From 244bdd7d9915e06f9a80711c01ed9cd69092dffd Mon Sep 17 00:00:00 2001 From: Jemoka Date: Tue, 19 Mar 2024 05:44:35 +0000 Subject: [PATCH] deploy: eb682f0590d6a4f7622f0ade0f063d64f99a32f0 --- index.json | 2 +- posts/kbhangelman_syndrome/index.html | 2 +- posts/kbhargmax/index.html | 2 +- posts/kbhautism/index.html | 2 +- posts/kbhbasis/index.html | 2 +- posts/kbhbernoulli_random_variable/index.html | 2 +- posts/kbhbetazero/index.html | 2 +- posts/kbhbioinformatics/index.html | 2 +- posts/kbhcantilever_beams/index.html | 2 +- posts/kbhchatbot/index.html | 2 +- posts/kbhcivil_rights/index.html | 2 +- posts/kbhcold_war_in_vietnam/index.html | 2 +- posts/kbhcomplex_exponential/index.html | 2 +- posts/kbhcomplex_number/index.html | 2 +- posts/kbhconditional_plan/index.html | 2 +- posts/kbhcross_product/index.html | 2 +- posts/kbhdecision_making/index.html | 2 +- posts/kbhdecision_making_index/index.html | 2 +- posts/kbhdot_product/index.html | 2 +- posts/kbhdrug_resistance/index.html | 2 +- posts/kbheigenvalue/index.html | 2 +- posts/kbhelectric_potential_energy/index.html | 2 +- posts/kbhexpectation/index.html | 2 +- .../index.html | 2 +- posts/kbhexponential_distribution/index.html | 2 +- posts/kbhfactored_mdps/index.html | 2 +- .../index.html | 2 +- posts/kbhfirst_order_odes/index.html | 2 +- posts/kbhfourier_series/index.html | 2 +- posts/kbhg_dice/index.html | 2 +- posts/kbhgarch/index.html | 2 +- posts/kbhgauss_law/index.html | 2 +- posts/kbhgaussian/index.html | 2 +- posts/kbhgram_schmidt/index.html | 2 +- posts/kbhgrammar/index.html | 2 +- posts/kbhguilded_age/index.html | 2 +- posts/kbhhindsight_optimization/index.html | 2 +- .../index.html | 2 +- posts/kbhilqr/index.html | 2 +- .../index.html | 2 +- posts/kbhinductors_in_circuits/index.html | 2 +- posts/kbhinjectivity/index.html | 2 +- posts/kbhinner_product/index.html | 2 +- posts/kbhinvariant_subspace/index.html | 2 +- posts/kbhinvertability/index.html | 2 +- .../index.html | 2 +- posts/kbhkernel_smoothing/index.html | 2 +- posts/kbhmapreduce/index.html | 2 +- posts/kbhmarkov_decision_process/index.html | 2 +- .../kbhmarkov_equivalence_classes/index.html | 2 +- posts/kbhmartingale_model/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- posts/kbhmodular_arithmetic/index.html | 2 +- posts/kbhmultithreading/index.html | 2 +- posts/kbhmutually_exclusive/index.html | 2 +- posts/kbhnaive_bayes/index.html | 2 +- posts/kbhneural_networks/index.html | 2 +- posts/kbhnlp/index.html | 2 +- .../index.html | 2 +- posts/kbhnon_linear_systems/index.html | 2 +- posts/kbhnull_space/index.html | 2 +- posts/kbhode_linearilzation/index.html | 2 +- posts/kbhparallel_linear_algebra/index.html | 2 +- posts/kbhparameter/index.html | 2 +- posts/kbhpegasus/index.html | 2 +- .../kbhpermittivity_of_free_space/index.html | 2 +- posts/kbhpointer/index.html | 2 +- posts/kbhpolicy/index.html | 2 +- posts/kbhpolicy_gradient/index.html | 2 +- posts/kbhpomdp_approximation/index.html | 2 +- posts/kbhproduct_summation_map/index.html | 2 +- posts/kbhproject80/index.html | 2 +- posts/kbhprojects/index.html | 2 +- posts/kbhquotient_group/index.html | 2 +- posts/kbhrational_preference/index.html | 2 +- posts/kbhresearch/index.html | 2 +- posts/kbhrfdiffusion/index.html | 2 +- posts/kbhrho_pomdps/index.html | 2 +- posts/kbhrosetta/index.html | 2 +- posts/kbhrosettafold2/index.html | 2 +- posts/kbhsarsa_lambda/index.html | 2 +- posts/kbhscheduling/index.html | 2 +- .../index.html | 2 +- posts/kbhsemantic_primes/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- posts/kbhsoftware_engineering/index.html | 2 +- .../index.html | 2 +- posts/kbhstable_matching_problem/index.html | 2 +- posts/kbhstring/index.html | 2 +- posts/kbhsu_cs111_final_sheet/index.html | 199 +++++++++++++++++- posts/kbhsu_math53_feb122024/index.html | 2 +- posts/kbhsu_math53_feb212024/index.html | 2 +- posts/kbhsu_math53_jan262023/index.html | 2 +- .../index.html | 42 ++++ posts/kbhsum_of_subsets/index.html | 2 +- posts/kbhsurjectivity/index.html | 2 +- posts/kbht_twiddle/index.html | 2 +- posts/kbhthoughts_on_axler_4/index.html | 2 +- posts/kbhtuning_forks/index.html | 2 +- posts/kbhuniqueness_and_existance/index.html | 2 +- posts/kbhupper_triangular_matrix/index.html | 2 +- posts/kbhutility_theory/index.html | 2 +- posts/kbhvalue_iteration/index.html | 2 +- posts/kbhvector_semantics/index.html | 2 +- posts/kbhwave_equation/index.html | 2 +- sitemap.xml | 2 +- 108 files changed, 345 insertions(+), 108 deletions(-) create mode 100644 posts/kbhsu_math53_practice_1_problem_4/index.html diff --git a/index.json b/index.json index cf16da2c0..801354684 100644 --- a/index.json +++ b/index.json @@ -1 +1 @@ -[{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhassembly/","tags":null,"title":""},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhistudio_meeting_nodes/","tags":null,"title":""},{"categories":null,"contents":"Separated qubits don\u0026rsquo;t really like to interact. Instead, then, we just make them bigger and control them at the same time. We can implement gates via a sequence of pulses. If you work with interacting qubits a lot, you will end up with the APR Paradox.\nIf you take two qubits, and move them though two gates, you essentially will get entangled results.\nTo make this works, you will need to take some probability. Know correlation, expectation, etc.\n","html":"\u003cp\u003eSeparated qubits don\u0026rsquo;t really like to interact. Instead, then, we just make them bigger and control them at the same time. We can implement gates via a sequence of pulses. If you work with interacting \u003ca href=\"/posts/kbhqubits/\"\u003equbit\u003c/a\u003es a lot, you will end up with the \u003ca href=\"/posts/kbhapr_paradox/\"\u003eAPR Paradox\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIf you take two \u003ca href=\"/posts/kbhqubits/\"\u003equbit\u003c/a\u003es, and move them though two gates, you essentially will get \u003ca href=\"/posts/kbhentangled/\"\u003eentangled\u003c/a\u003e results.\u003c/p\u003e\n\u003cp\u003eTo make this works, you will need to take some probability. Know \u003ca href=\"/posts/kbhcorrelation/\"\u003ecorrelation\u003c/a\u003e, \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e, etc.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmaking_qubits_interact/","tags":null,"title":""},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpoint_estimate/","tags":null,"title":""},{"categories":null,"contents":" \\(A\\) does all the asking, \\(B\\) has all the decision making power Population \\(A\\)\u0026rsquo;s match never goes up at best, they stay the same Population \\(B\\)\u0026rsquo;s match can never go down. At worse, they stay the same. Population \\(A\\) always ends up with the highest-preferred person in their realm of possibility Population \\(B\\) always ends up with the lowest-preferred person in their realm of possibility ","html":"\u003cul\u003e\n\u003cli\u003e\\(A\\) does all the asking, \\(B\\) has all the decision making power\u003c/li\u003e\n\u003cli\u003ePopulation \\(A\\)\u0026rsquo;s match \u003cem\u003enever\u003c/em\u003e goes up at best, they stay the same\u003c/li\u003e\n\u003cli\u003ePopulation \\(B\\)\u0026rsquo;s match can \u003cem\u003enever\u003c/em\u003e go down. At worse, they stay the same.\u003c/li\u003e\n\u003cli\u003ePopulation \\(A\\) always ends up with the highest-preferred person in their realm of possibility\u003c/li\u003e\n\u003cli\u003ePopulation \\(B\\) always ends up with the lowest-preferred person in their realm of possibility\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproperties_of_the_stable_matching_algorithm/","tags":null,"title":""},{"categories":null,"contents":" \u0026ldquo;Are the nodes system independent of the class system?\u0026rdquo; Does the model require a set of L2 class? Can we build the model to take advantage of as many 10* things as possible? A preso Demo of a kid moving through MVP vis a vis advantage over just taking all classes Naming skills that would go on the graph Figuring: comparability with flattening like in a L1 system ","html":"\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Are the nodes system independent of the class system?\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eDoes the model require a set of L2 class?\n\u003cul\u003e\n\u003cli\u003eCan we build the model to take advantage of as many 10* things as possible?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eA preso\n\u003cul\u003e\n\u003cli\u003eDemo of a kid moving through MVP vis a vis advantage over just taking all classes\u003c/li\u003e\n\u003cli\u003eNaming skills that would go on the graph\u003c/li\u003e\n\u003cli\u003eFiguring: comparability with flattening like in a L1 system\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrnn_notes/","tags":null,"title":""},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhrural_hospitals_problem/","tags":null,"title":""},{"categories":null,"contents":"The Stable Matching Problem is Wes Chao\u0026rsquo;s favourite algorithm.\nConsider two populations, \\(A\\) and \\(B\\), who want to form paired relationships between a person \\(A\\) and \\(B\\). \\(A_i\\) has a list of their ranked order matches (I want to be paired with \\(B_1\\) most, \\(B_4\\) second, etc.), and so does \\(B_i\\) (I want to be paired with \\(A_4\\) most \\(A_9\\) second, etc.)\nWe want to discover a stable matching, where pairs are most unwilling to move. We can solve it using the stable matching algorithm.\nNueva Invention Studio speed-dating noises?\napplications of the stable matching problem Dating Applying to college Both of these are high-stress situations, especially if you are doing asking You can mathematically prove that person doing the asking gets the best result Hence, it shows us that the best possible outcomes go to the people who are willing to ask and get rejected.\nextensions to the stable matching problem the stable matching problem can be extended to the rural hospitals problem, which is slightly better.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhstable_matching_problem/\"\u003eStable Matching Problem\u003c/a\u003e is \u003ca href=\"\"\u003eWes Chao\u003c/a\u003e\u0026rsquo;s favourite algorithm.\u003c/p\u003e\n\u003cp\u003eConsider two populations, \\(A\\) and \\(B\\), who want to form paired relationships between a person \\(A\\) and \\(B\\). \\(A_i\\) has a list of their ranked order matches (I want to be paired with \\(B_1\\) most, \\(B_4\\) second, etc.), and so does \\(B_i\\) (I want to be paired with \\(A_4\\) most \\(A_9\\) second, etc.)\u003c/p\u003e\n\u003cp\u003eWe want to discover a \u003ca href=\"\"\u003estable matching\u003c/a\u003e, where pairs are most unwilling to move. We can solve it using the \u003ca href=\"\"\u003estable matching algorithm\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"\"\u003eNueva Invention Studio\u003c/a\u003e speed-dating noises?\u003c/p\u003e\n\u003ch2 id=\"applications-of-the-stable-matching-problem--kbhstable-matching-problem-dot-md\"\u003eapplications of the \u003ca href=\"/posts/kbhstable_matching_problem/\"\u003estable matching problem\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDating\u003c/li\u003e\n\u003cli\u003eApplying to college\u003c/li\u003e\n\u003cli\u003eBoth of these are high-stress situations, especially if you are doing asking\u003c/li\u003e\n\u003cli\u003eYou can mathematically prove that \u003cem\u003eperson doing the asking gets the best result\u003c/em\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eHence, it shows us that the \u003cstrong\u003e\u003cstrong\u003ebest possible outcomes go to the people who are willing to ask and get rejected.\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"extensions-to-the-stable-matching-problem\"\u003eextensions to the stable matching problem\u003c/h2\u003e\n\u003cp\u003ethe \u003ca href=\"/posts/kbhstable_matching_problem/\"\u003estable matching problem\u003c/a\u003e can be extended to the \u003ca href=\"/posts/kbhrural_hospitals_problem/\"\u003erural hospitals problem\u003c/a\u003e, which is slightly better.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstable_matching_problem/","tags":null,"title":""},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhz_score/","tags":null,"title":""},{"categories":null,"contents":"\\begin{align} v+(-1)v \u0026amp;= (1+(-1))v \\\\ \u0026amp;= 0v \\\\ \u0026amp;= 0 \\end{align}\nAs \\((-1)v=0\\), \\((-1)v\\) is the additive identity of \\(v\\) which we defined as \\(-v\\) \\(\\blacksquare\\).\n","html":"\u003cp\u003e\\begin{align}\nv+(-1)v \u0026amp;= (1+(-1))v \\\\\n\u0026amp;= 0v \\\\\n\u0026amp;= 0\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAs \\((-1)v=0\\), \\((-1)v\\) is the \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e of \\(v\\) which we defined as \\(-v\\) \\(\\blacksquare\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbh1v_1/","tags":null,"title":"-1v=-v"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhq/","tags":null,"title":":q"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhw/","tags":null,"title":":w"},{"categories":null,"contents":"\\begin{align} 0v \u0026amp;= (0+0)v \\\\ \u0026amp;= 0v+0v \\end{align}\nGiven scalar multiplication is closed, \\(0v \\in V\\), which means \\(\\exists -0v:0v+(-0v)=0\\). Applying that to both sides:\n\\begin{equation} 0 = 0v\\ \\blacksquare \\end{equation}\nThe opposite proof of \\(\\lambda 0=0\\) but vectors work the same exact way.\n","html":"\u003cp\u003e\\begin{align}\n0v \u0026amp;= (0+0)v \\\\\n\u0026amp;= 0v+0v\n\\end{align}\u003c/p\u003e\n\u003cp\u003eGiven \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e is \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e, \\(0v \\in V\\), which means \\(\\exists -0v:0v+(-0v)=0\\). Applying that to both sides:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = 0v\\ \\blacksquare\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe opposite proof of \\(\\lambda 0=0\\) but vectors work the same exact way.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhzero_times_vector/","tags":null,"title":"0v=0"},{"categories":null,"contents":"eigenvalue is the scalar needed to scale the basis element of a one dimensional invariant subspace of a Linear Map to represent the behavior of the map:\n\\begin{equation} Tv = \\lambda v \\end{equation}\nNote we require \\(v \\neq 0\\) because otherwise all scalars count.\neigenvector is a vector that forms the basis list of length 1 of that 1-D invariant subspace under \\(T\\).\n\u0026ldquo;operators own eigenvalues, eigenvalues own eigenvectors\u0026rdquo;\nWhy is eigenvalue consistent per eigenvector? Because a linear map has to act on the same way to something\u0026rsquo;s basis as it does to the whole space.\nMotivation Take some subspace \\(U \\subset V\\):\n\\begin{equation} U = \\{\\lambda v\\ |\\ \\lambda \\in \\mathbb{F}, v \\in V\\} = span(v) \\end{equation}\nNow, if \\(T|_{U}\\) is an operator on \\(U\\), \\(U\\) would be an invariant subspace of \\(T\\) of dimension 1 (its basis being the list \\(\\{v\\}\\)).\nTherefore, for some vector \\(v \\in U\\) (basically like various scalings of \\(v\\)), \\(T\\) will always send back to \\(U\\) so we can represent it yet again with another scalar on \\(v\\), like \\(\\lambda v\\).\nIn this case, then, we can write that:\n\\begin{equation} Tv = \\lambda v \\end{equation}\nAnd then the usual definition of eigenvalues persist.\nconstituents linear map \\(T \\in \\mathcal{L}(V)\\) vector \\(v \\in V\\), such that \\(v \\neq 0\\) scalar \\(\\lambda \\in \\mathbb{F}\\) requirements If there exists \\(v \\in V\\) such that \\(v\\neq 0\\) and:\n\\begin{equation} Tv = \\lambda v \\end{equation}\nthen, \\(\\lambda\\) is called an eigenvalue, and \\(v\\) the eigenvector.\nadditional information properties of eigenvalues Suppose \\(V\\) in finite-dimensional, \\(T \\in \\mathcal{L}(V)\\) and \\(\\lambda \\in \\mathbb{F}\\), then:\n\\(\\lambda\\) is an eigenvalue of \\(T\\) \\(T - \\lambda I\\) is not injective \\(T - \\lambda I\\) is not surjective \\(T - \\lambda I\\) is not invertable Showing one shows all.\nProof:\n\\(1 \\implies 2\\) Suppose \\(\\lambda\\) is an eigenvalue of \\(T\\). Then, we have some \\(v \\in V\\) such that:\n\\begin{equation} Tv = \\lambda v \\end{equation}\nNow:\n\\begin{align} \u0026amp;Tv = \\lambda v \\\\ \\Rightarrow\\ \u0026amp; Tv - \\lambda v = 0 \\\\ \\Rightarrow\\ \u0026amp; Tv - \\lambda Iv = 0 \\\\ \\Rightarrow\\ \u0026amp; (T-\\lambda I)v = 0 \\end{align}\nthe last step by \\((T+S)v = Tv+Sv\\), the property of the vector space of \\(\\mathcal{L}(V)\\) (or any \\(\\mathcal{L}\\)).\nAnd therefore, \\(v \\in null\\ (T-\\lambda I)\\), and \\(v\\neq 0\\). And so \\(null\\ (T-\\lambda I) \\neq \\{0\\}\\) and so \\(T-\\lambda I\\) is not injective, as desired.\nThe reverse of this result shows the opposite direction that \\(1 \\implies 2\\).\nThe others \\(I \\in \\mathcal{L}(V)\\), \\(T \\in \\mathcal{L}(V)\\), \\(\\mathcal{L}(V)\\) is closed, so \\((T - \\lambda I) \\in \\mathcal{L}(V)\\), and so it is an operator. Having 2) implies all other conditions of non-injectivity, non-surjectivity, non-invertiblility by injectivity is surjectivity in finite-dimensional operators\nlist of eigenvectors are linearly independent Let \\(T \\in \\mathcal{L}(V)\\), suppose \\(\\lambda_{j}\\) are distinct eigenvalues of \\(T\\), and \\(v_1, \\ldots, v_{m}\\) the corresponding eigenvectors, then \\(v_1, \\ldots, v_{m}\\) is linearly independent.\nproof:\nWe will show this by contradiction. Suppose \\(v_1, \\ldots, v_{m}\\) are linearly dependent; then, by the Linear Dependence Lemma, \\(\\exists v_{j}\\) such that:\n\\begin{equation} v_{j} \\in span(v_1, \\dots, v_{j-1}) \\end{equation}\nMeaning:\n\\begin{equation} v_{j} = a_1v_1 + \\dots + a_{j-1}v_{j-1} \\end{equation}\nGiven the list is a list of eigenvalues, we can apply \\(T\\) to both sides to get:\n\\begin{equation} \\lambda_{j}v_{j} = a_1\\lambda_{1}v_1 + \\dots + a_{j-1}\\lambda_{j-1}v_{j-1} \\end{equation}\nWe can also get another definition for \\(\\lambda_{j} v_{j}\\) by simply multiplying the definition for \\(v_{j}\\) above by \\(\\lambda_{j}\\):\n\\begin{align} \u0026amp;v_{j} = a_1v_1 + \\dots + a_{j-1}v_{j-1}\\ \\text{from above} \\\\ \\Rightarrow\\ \u0026amp; \\lambda_{j} v_{j} = a_1\\lambda_{j}v_1 + \\dots + a_{j-1}\\lambda_{j}v_{j-1} \\end{align}\nNow, subtracting our two definitions of \\(\\lambda_{j} v_{j}\\), we get:\n\\begin{equation} 0 = a_1 (\\lambda_{j} - \\lambda_{1})v_{1} + \\dots +a_{j-1} (\\lambda_{j} - \\lambda_{j-1})v_{j-1} \\end{equation}\nRecall now that the eigenvalue list \\(\\lambda_{j}\\) are distinct. This means all \\(\\lambda_{j} - \\lambda_{k \\neq j} \\neq 0\\). No \\(v_{j} =0\\); so if we choose the smallest positive integer for \\(j\\), the list before it \\(v_1, \\dots, v_{j-1}\\) is linearly independent (as no value in that list would satisfy the Linear Dependence Lemma). This makes \\(a_{j} =\\dots =a_{j-1} = 0\\).\nAnd yet, substituting this back into the expression for \\(v_{j}\\), we have \\(v_{j} = 0\\), reaching contradiction. So therefore, the list of eigenvectors are linearly independent. \\(\\blacksquare\\)\noperators on finite dimensional V has at most dim V eigenvalues As a corollary of the above result, suppose \\(V\\) is finite dimensional; then, each operator on \\(V\\) has at most \\(dim\\ V\\) distinct eigenvalues because their eigenvectors form an linearly independent list and length of linearly-independent list \\(\\leq\\) length of spanning list.\neigenspaces are disjoint the eigenspaces of a Linear Map form a direct sum:\nproof:\nCorollary of result above. Because eigenvectors (i.e. bases) from distinct eigenspaces are linearly independent. So the only way to write \\(0\\) is by taking each to \\(0\\). So by taking the bases all to \\(0\\), you take the \\(0\\) vector from each space, which shows that the eigenspaces are a direct sum. \\(\\blacksquare\\)\nfinding eigenvalues with actual numbers \\begin{equation} \\lambda_{j} \\in Spec(T) \\Rightarrow det(\\lambda_{j}I-T) = 0 \\end{equation}\nThe right polynomial \\(det(\\lambda_{j} I-T) = 0\\) is named the \u0026ldquo;characteristic polynomial.\u0026rdquo;\nnatural choordinates of a map Given the eigenvectors \\((x+,y+), (x-,y-)\\), we can change coordinates of your matrix into the natural choordinates.\n\\begin{equation} A = \\begin{pmatrix} x+ \u0026amp; x- \\\\y+ \u0026amp; y- \\end{pmatrix} \\begin{pmatrix} \\lambda+ \u0026amp; 0 \\\\ 0 \u0026amp; \\lambda- \\end{pmatrix} \\begin{pmatrix} x+ \u0026amp; x- \\\\y+ \u0026amp; y- \\end{pmatrix}^{-1} \\end{equation}\nThis makes scaling matricides much much easier. If you think about multiplying the above matrix \\(n\\) times, the inverse and non-inverse cancells out.\nsimilar matrices Let \\(A,B\\) be defined:\n\\begin{equation} A = C B C^{-1} \\end{equation}\nand of course:\n\\begin{equation} B = C^{-1} B C \\end{equation}\nwhere, \\(A,B,C \\in \\mathcal{L}(V)\\)\n\\(A, B\\) has the same eigenvalues.\ninvertable matricies Let \\(T \\in \\mathcal{L}(V)\\) be invertable. If \\(\\lambda\\) is an eigenvalue of \\(T\\), then \\(\\frac{1}{\\lambda}\\) is an eigenvalue of \\(T\\). Furthermore, \\(T\\) and \\(T^{-1}\\) share eigenvectors with eigenvalues \\(\\lambda\\) and \\(\\frac{1}{\\lambda}\\)\nsymmetric matricies have a real basis of eigenvalues this falls out of the real spectral theorem.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e is the scalar needed to scale the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e element of a one \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003eal \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e of a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e to represent the behavior of the map:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv = \\lambda v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNote we require \\(v \\neq 0\\) because otherwise all scalars count.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e is a \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e that forms the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e list of length 1 of that 1-D \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e under \\(T\\).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;\u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003es own \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es, \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es own \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWhy is \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e consistent per \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e? Because a linear map has to act on the same way to something\u0026rsquo;s basis as it does to the whole space.\u003c/p\u003e\n\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003eTake some subspace \\(U \\subset V\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU = \\{\\lambda v\\ |\\ \\lambda \\in \\mathbb{F}, v \\in V\\} = span(v)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, if \\(T|_{U}\\) is an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e on \\(U\\), \\(U\\) would be an \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e of \\(T\\) of dimension 1 (its \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e being the list \\(\\{v\\}\\)).\u003c/p\u003e\n\u003cp\u003eTherefore, for some vector \\(v \\in U\\) (basically like various scalings of \\(v\\)), \\(T\\) will always send back to \\(U\\) so we can represent it yet again with another scalar on \\(v\\), like \\(\\lambda v\\).\u003c/p\u003e\n\u003cp\u003eIn this case, then, we can write that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv = \\lambda v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd then the usual definition of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es persist.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003elinear map \\(T \\in \\mathcal{L}(V)\\)\u003c/li\u003e\n\u003cli\u003evector \\(v \\in V\\), such that \\(v \\neq 0\\)\u003c/li\u003e\n\u003cli\u003escalar \\(\\lambda \\in \\mathbb{F}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eIf there exists \\(v \\in V\\) such that \\(v\\neq 0\\) and:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv = \\lambda v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethen, \\(\\lambda\\) is called an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e, and \\(v\\) the \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"properties-of-eigenvalue--kbheigenvalue-dot-md--s\"\u003eproperties of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es\u003c/h3\u003e\n\u003cp\u003eSuppose \\(V\\) in \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e, \\(T \\in \\mathcal{L}(V)\\) and \\(\\lambda \\in \\mathbb{F}\\), then:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\\(\\lambda\\) is an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\)\u003c/li\u003e\n\u003cli\u003e\\(T - \\lambda I\\) is not \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(T - \\lambda I\\) is not \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(T - \\lambda I\\) is not \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eShowing one shows all.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003ch4 id=\"1-implies-2\"\u003e\\(1 \\implies 2\\)\u003c/h4\u003e\n\u003cp\u003eSuppose \\(\\lambda\\) is an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\). Then, we have some \\(v \\in V\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv = \\lambda v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;Tv = \\lambda v \\\\\n\\Rightarrow\\ \u0026amp; Tv - \\lambda v = 0 \\\\\n\\Rightarrow\\ \u0026amp; Tv - \\lambda Iv = 0 \\\\\n\\Rightarrow\\ \u0026amp; (T-\\lambda I)v = 0\n\\end{align}\u003c/p\u003e\n\u003cp\u003ethe last step by \\((T+S)v = Tv+Sv\\), the property of the vector space of \\(\\mathcal{L}(V)\\) (or any \\(\\mathcal{L}\\)).\u003c/p\u003e\n\u003cp\u003eAnd therefore, \\(v \\in null\\ (T-\\lambda I)\\), and \\(v\\neq 0\\). And so \\(null\\ (T-\\lambda I) \\neq \\{0\\}\\) and so \\(T-\\lambda I\\) is not \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e, as desired.\u003c/p\u003e\n\u003cp\u003eThe reverse of this result shows the opposite direction that \\(1 \\implies 2\\).\u003c/p\u003e\n\u003ch4 id=\"the-others\"\u003eThe others\u003c/h4\u003e\n\u003cp\u003e\\(I \\in \\mathcal{L}(V)\\), \\(T \\in \\mathcal{L}(V)\\), \\(\\mathcal{L}(V)\\) is \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e, so \\((T - \\lambda I) \\in \\mathcal{L}(V)\\), and so it is an operator. Having 2) implies all other conditions of non-injectivity, non-surjectivity, non-invertiblility by \u003ca href=\"/posts/kbhoperator/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-is-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-in-id-4ed27ed5-4edc-4ef4-afd7-9b8e3bcd9b96-finite-dimensional-id-36e84a46-76f1-481e-b031-8ab2f0da0aa8-operator-s\"\u003einjectivity is surjectivity in finite-dimensional operators\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"list-of-eigenvectors-are-linearly-independent--kbhlinear-independence-dot-md\"\u003elist of eigenvectors are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eLet \\(T \\in \\mathcal{L}(V)\\), suppose \\(\\lambda_{j}\\) are distinct \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es of \\(T\\), and \\(v_1, \\ldots, v_{m}\\) the corresponding \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es, then \\(v_1, \\ldots, v_{m}\\) is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eproof:\u003c/p\u003e\n\u003cp\u003eWe will show this by contradiction. Suppose \\(v_1, \\ldots, v_{m}\\) are \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e; then, by the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e, \\(\\exists v_{j}\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv_{j} \\in span(v_1, \\dots, v_{j-1})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv_{j} = a_1v_1 + \\dots + a_{j-1}v_{j-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven the list is a list of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es, we can apply \\(T\\) to both sides to get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda_{j}v_{j} = a_1\\lambda_{1}v_1 + \\dots + a_{j-1}\\lambda_{j-1}v_{j-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can also get another definition for \\(\\lambda_{j} v_{j}\\) by simply multiplying the definition for \\(v_{j}\\) above by \\(\\lambda_{j}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;v_{j} = a_1v_1 + \\dots + a_{j-1}v_{j-1}\\ \\text{from above} \\\\\n\\Rightarrow\\ \u0026amp; \\lambda_{j} v_{j} = a_1\\lambda_{j}v_1 + \\dots + a_{j-1}\\lambda_{j}v_{j-1}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, subtracting our two definitions of \\(\\lambda_{j} v_{j}\\), we get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = a_1 (\\lambda_{j} - \\lambda_{1})v_{1} + \\dots +a_{j-1} (\\lambda_{j} - \\lambda_{j-1})v_{j-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall now that the eigenvalue list \\(\\lambda_{j}\\) are distinct. This means all \\(\\lambda_{j} - \\lambda_{k \\neq j} \\neq 0\\). No \\(v_{j} =0\\); so if we choose the smallest positive integer for \\(j\\), the list before it \\(v_1, \\dots, v_{j-1}\\) is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e (as no value in that list would satisfy the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e). This makes \\(a_{j} =\\dots =a_{j-1} = 0\\).\u003c/p\u003e\n\u003cp\u003eAnd yet, substituting this back into the expression for \\(v_{j}\\), we have \\(v_{j} = 0\\), reaching contradiction. So therefore, the list of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent.\u003c/a\u003e \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch4 id=\"operators-on-finite-dimensional-v-has-at-most-dim-v-eigenvalue--kbheigenvalue-dot-md--s\"\u003eoperators on finite dimensional V has at most dim V \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es\u003c/h4\u003e\n\u003cp\u003eAs a corollary of the above result, suppose \\(V\\) is finite dimensional; then, each \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e on \\(V\\) has at most \\(dim\\ V\\) distinct \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es because their \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es form an \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list and \u003ca href=\"/posts/kbhlinear_independence/#length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003elength of linearly-independent list \\(\\leq\\) length of spanning list\u003c/a\u003e.\u003c/p\u003e\n\u003ch4 id=\"eigenspaces-are-disjoint\"\u003eeigenspaces are disjoint\u003c/h4\u003e\n\u003cp\u003ethe \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003es of a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e form a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003eproof:\u003c/p\u003e\n\u003cp\u003eCorollary of result above. Because \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es (i.e. bases) from distinct \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003es are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e. So the only way to write \\(0\\) is by taking each to \\(0\\). So by taking the bases all to \\(0\\), you take the \\(0\\) vector from each space, which shows that the \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003es are a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"finding-eigenvalues-with-actual-numbers\"\u003efinding eigenvalues with actual numbers\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda_{j} \\in Spec(T) \\Rightarrow det(\\lambda_{j}I-T) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe right polynomial \\(det(\\lambda_{j} I-T) = 0\\) is named the \u0026ldquo;characteristic polynomial.\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"natural-choordinates-of-a-map\"\u003enatural choordinates of a map\u003c/h3\u003e\n\u003cp\u003eGiven the eigenvectors \\((x+,y+), (x-,y-)\\), we can change coordinates of your matrix into the natural choordinates.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA = \\begin{pmatrix}\nx+ \u0026amp; x- \\\\y+ \u0026amp; y-\n\\end{pmatrix} \\begin{pmatrix}\n\\lambda+ \u0026amp; 0 \\\\ 0 \u0026amp; \\lambda-\n\\end{pmatrix} \\begin{pmatrix}\nx+ \u0026amp; x- \\\\y+ \u0026amp; y-\n\\end{pmatrix}^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis makes scaling matricides much much easier. If you think about multiplying the above matrix \\(n\\) times, the inverse and non-inverse cancells out.\u003c/p\u003e\n\u003ch3 id=\"similar-matrices\"\u003esimilar matrices\u003c/h3\u003e\n\u003cp\u003eLet \\(A,B\\) be defined:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA = C B C^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand of course:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB = C^{-1} B C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(A,B,C \\in \\mathcal{L}(V)\\)\u003c/p\u003e\n\u003cp\u003e\\(A, B\\) has the same \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es.\u003c/p\u003e\n\u003ch3 id=\"invertable-matricies\"\u003einvertable matricies\u003c/h3\u003e\n\u003cp\u003eLet \\(T \\in \\mathcal{L}(V)\\) be \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e. If \\(\\lambda\\) is an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\), then \\(\\frac{1}{\\lambda}\\) is an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\). Furthermore, \\(T\\) and \\(T^{-1}\\) share \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es with eigenvalues \\(\\lambda\\) and \\(\\frac{1}{\\lambda}\\)\u003c/p\u003e\n\u003ch3 id=\"symmetric-matricies-have-a-real-basis-of-eigenvalues\"\u003esymmetric matricies have a real basis of eigenvalues\u003c/h3\u003e\n\u003cp\u003ethis falls out of the real \u003ca href=\"/posts/kbhaxler_7_a/#complex-spectral-theorem\"\u003espectral theorem\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbheigenvalue/","tags":null,"title":"1-d invariant subspace"},{"categories":null,"contents":" New Deal ","html":"\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnew_deal/\"\u003eNew Deal\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbh1980s_political_alignment/","tags":null,"title":"1980s Political Alignment"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbh1a/","tags":null,"title":"1a"},{"categories":null,"contents":"Galactica test Ka\u0026rsquo;Chava\n","html":"\u003cp\u003e\u003ca href=\"\"\u003eGalactica\u003c/a\u003e test \u003ca href=\"/posts/kbhka_chava/\"\u003eKa\u0026rsquo;Chava\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/2023-02-26/","tags":null,"title":"2023-02-26"},{"categories":null,"contents":"Suppose \\(T\\) is a function from \\(V\\) to \\(W\\). Let the \u0026ldquo;graph\u0026rdquo; of \\(T\\) be the subset of \\(V \\times W\\) such that:\n\\begin{equation} graph\\ T = \\{(v,Tv) \\in V \\times W \\mid v \\in V\\} \\end{equation}\nShow that \\(T\\) is a linear map IFF the graph of \\(T\\) is a subspace of \\(V \\times W\\).\nReview: A Linear Map Recall that a function \\(T: V \\to W\\) is called a linear map if it is a map that\u0026hellip;\nis additive: so \\(Tv + Tu = T(v+u): v,u \\in V\\) is homogeneous, so \\(\\lambda Tv = T\\lambda v: \\lambda \\in \\mathbb{F}, v \\in V\\) Given Graph is Subspace Given the graph of \\(T\\) is a subspace of \\(V \\times W\\), we desire that the function \\(T\\) is a linear map and therefore additive and homogeneous.\nBy declaration before, \\(graph\\ T\\) is a subspace, meaning it would be closed under adddition and scalar multiplication. We will use this fact to show that \\(T\\) follows the properties of a linear map.\nAdditivity We first desire that \\(T\\) is additive, that is, for \\(v,u \\in V\\), we desire \\(Tv + Tu = T(v+u)\\).\nLet \\(v,u \\in V\\), and let \\(a,b \\in graph\\ T\\) declared as follows:\n\\begin{equation} \\begin{cases} a = (v,Tv) \\in V \\times W \\\\ b = (u,Tu) \\in V \\times W \\end{cases} \\end{equation}\nWe are given that \\(graph\\ T\\) is a subspace of \\(T\\). As such, it is closed under addition; meaning, the sum of two elements from the space must remain in the space. Therefore:\n\\begin{equation} (v, Tv) + (u,Tu) = (v+u, Tv+Tu) \\in graph\\ T \\end{equation}\nAnd now, the latter being in \\(graph\\ T\\) implies that \\(\\exists\\) some \\(c \\in graph\\ T\\), \\(n \\in V\\) such that:\n\\begin{equation} c := (n, Tn) = (v+u, Tv+Tu) \\end{equation}\nTaking the latter equivalence and solving for \\(n\\), we have that \\(n = v+u\\). And so, we have that:\n\\begin{equation} (v+u, T(v+u)) = (v+u, Tv+Tu) \\end{equation}\nTherefore, \\(T(v+u) = Tv+Tu\\), as desired.\nHomogeneity We now desire that \\(T\\) is homogeneous. That is, for \\(v \\in V, \\lambda \\in \\mathbb{F}\\), we desire \\(\\lambda Tv = T\\lambda v\\).\nLet \\(v \\in V\\), \\(\\lambda \\in \\mathbb{F}\\), and \\(a \\in graph\\ T\\) declared as follows:\n\\begin{equation} a = (v, Tv) \\in V \\times W \\end{equation}\nBy the same logic before, \\(graph\\ T\\) is closed under scalar multiplication; meaning, the product of en element from the space to a scalar remain in the space. Therefore:\n\\begin{equation} \\lambda (v,Tv) = (\\lambda v, \\lambda Tv) \\in graph\\ T \\end{equation}\nThe latter being in \\(graph\\ T\\) implies that \\(\\exists\\) some \\(c \\in graph\\ T\\), \\(n \\in V\\) such that:\n\\begin{equation} c :=(n,Tn) = (\\lambda v, \\lambda Tv) \\end{equation}\nTaking the latter equivalence and solving for \\(n\\), we have \\(n = \\lambda v\\). And so, we have:\n\\begin{equation} (\\lambda v, T \\lambda v) = (\\lambda v, \\lambda Tv) \\end{equation}\nAnd therefore, \\(T\\lambda v = \\lambda Tv\\), as desired.\nHaving shown that \\(T\\) is now both additive and homogeneous, we have that \\(T\\) is a linear map, as desired.\nGiven \\(T\\) is a Linear Map We will essentially prove the previous condition backwards.\nWe are given that the graph of \\(T\\) is a subset of \\(V \\times W\\), and that \\(T: V \\to W\\) is a linear map. We desire that the graph of \\(T\\) is a subspace of \\(V \\times W\\).\nRecall that to show that a subset is a subspace, on simply has to show that it has closed operations and that it contains the additive identity.\nAdditive Identity Recall that the additive identity in \\(V \\times W\\) is the tuple that\u0026rsquo;s identically \\((0,0) \\in V \\times W\\).\nAs \\(V\\) is a vector space, \\(0 \\in V\\). Any linear map will send \\(0\\) to \\(0\\). Therefore, \\(T 0 = 0\\).\nTherefore, construct \\(a \\in graph\\ T\\):\n\\begin{equation} a = (0, T 0) \\in V \\times W = (0, 0) \\end{equation}\nBy construction, we have shown that the additive identity of \\(V \\times W\\) is in \\(graph\\ T\\).\nClosure of Addition Given WLOG \\(a,b \\in graph\\ T\\), we desire that \\(a+b \\in graph\\ T\\).\nLet \\(v,u \\in V\\), and let \\(a,b \\in graph\\ T\\) declared as follows:\n\\begin{equation} \\begin{cases} a = (v,Tv) \\in V \\times W \\\\ b = (u,Tu) \\in V \\times W \\end{cases} \\end{equation}\nNow:\n\\begin{equation} a+b = (v,Tv) + (u+Tu) = (v+u, Tv+Tu) \\end{equation}\nGiven \\(T\\) is a linear map, we have WLOG \\(Tv+Tu = T(v+u)\\). And therefore:\n\\begin{equation} a+b = (v,Tv) + (u+Tu) = (v+u, Tv+Tu) = (v+u, T(v+u)) \\in \\{(v,Tv) \\mid v \\in V\\} \\end{equation}\nHence, \\(graph\\ T\\) is closed under addition.\nClosure of Scalar Multiplication Given WLOG \\(a \\in graph\\ T, \\lambda \\in \\mathbb{F}\\), we desire that \\(\\lambda a \\in graph\\ T\\).\nLet \\(v \\in V, \\lambda \\in \\mathbb{F}\\), and let \\(a \\in graph\\ T\\) declared as follows:\n\\begin{equation} a = (v,Tv) \\in V \\times W \\end{equation}\nNow:\n\\begin{equation} \\lambda a = \\lambda (v,Tv) = (\\lambda v, \\lambda Tv) \\end{equation}\nGiven \\(T\\) is a linear map, we have WLOG \\(\\lambda Tv = T\\lambda v\\). And therefore:\n\\begin{equation} \\lambda a = \\lambda (v,Tv) = (\\lambda v, \\lambda Tv) = (\\lambda v, T \\lambda v)\\in \\{(v,Tv) \\mid v \\in V\\} \\end{equation}\nHence, \\(graph\\ T\\) is closed under scalar multiplication.\nHaving shown \\(graph\\ T\\) to be closed under addition and scalar multiplication, as well as containing the additive identity, we see that it is a subspace of \\(V \\times W\\) of which it is a subset.\nHaving shown both directions of the proof, \\(\\blacksquare\\)\n","html":"\u003cp\u003eSuppose \\(T\\) is a function from \\(V\\) to \\(W\\). Let the \u0026ldquo;graph\u0026rdquo; of \\(T\\) be the subset of \\(V \\times W\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ngraph\\ T = \\{(v,Tv) \\in V \\times W \\mid v \\in V\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eShow that \\(T\\) is a linear map IFF the graph of \\(T\\) is a subspace of \\(V \\times W\\).\u003c/p\u003e\n\u003ch2 id=\"review-a-linear-map\"\u003eReview: A Linear Map\u003c/h2\u003e\n\u003cp\u003eRecall that a function \\(T: V \\to W\\) is called a linear map if it is a map that\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eis \u003cstrong\u003eadditive\u003c/strong\u003e: so \\(Tv + Tu = T(v+u): v,u \\in V\\)\u003c/li\u003e\n\u003cli\u003eis \u003cstrong\u003ehomogeneous\u003c/strong\u003e, so \\(\\lambda Tv = T\\lambda v: \\lambda \\in \\mathbb{F}, v \\in V\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"given-graph-is-subspace\"\u003eGiven Graph is Subspace\u003c/h2\u003e\n\u003cp\u003eGiven the graph of \\(T\\) is a subspace of \\(V \\times W\\), we desire that the function \\(T\\) is a linear map and therefore additive and homogeneous.\u003c/p\u003e\n\u003cp\u003eBy declaration before, \\(graph\\ T\\) is a subspace, meaning it would be closed under adddition and scalar multiplication. We will use this fact to show that \\(T\\) follows the properties of a linear map.\u003c/p\u003e\n\u003ch3 id=\"additivity\"\u003eAdditivity\u003c/h3\u003e\n\u003cp\u003eWe first desire that \\(T\\) is additive, that is, for \\(v,u \\in V\\), we desire \\(Tv + Tu = T(v+u)\\).\u003c/p\u003e\n\u003cp\u003eLet \\(v,u \\in V\\), and let \\(a,b \\in graph\\ T\\) declared as follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\na = (v,Tv) \\in V \\times W \\\\\nb = (u,Tu) \\in V \\times W\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe are given that \\(graph\\ T\\) is a subspace of \\(T\\). As such, it is closed under addition; meaning, the sum of two elements from the space must remain in the space. Therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(v, Tv) + (u,Tu) = (v+u, Tv+Tu) \\in graph\\ T\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd now, the latter being in \\(graph\\ T\\) implies that \\(\\exists\\) some \\(c \\in graph\\ T\\), \\(n \\in V\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc := (n, Tn) = (v+u, Tv+Tu)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaking the latter equivalence and solving for \\(n\\), we have that \\(n = v+u\\). And so, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(v+u, T(v+u)) = (v+u, Tv+Tu)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, \\(T(v+u) = Tv+Tu\\), as desired.\u003c/p\u003e\n\u003ch3 id=\"homogeneity\"\u003eHomogeneity\u003c/h3\u003e\n\u003cp\u003eWe now desire that \\(T\\) is homogeneous. That is, for \\(v \\in V, \\lambda \\in \\mathbb{F}\\), we desire \\(\\lambda Tv = T\\lambda v\\).\u003c/p\u003e\n\u003cp\u003eLet \\(v \\in V\\), \\(\\lambda \\in \\mathbb{F}\\), and \\(a \\in graph\\ T\\) declared as follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na = (v, Tv) \\in V \\times W\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy the same logic before, \\(graph\\ T\\) is closed under scalar multiplication; meaning, the product of en element from the space to a scalar remain in the space. Therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda (v,Tv) = (\\lambda v, \\lambda Tv) \\in graph\\ T\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe latter being in \\(graph\\ T\\) implies that \\(\\exists\\) some \\(c \\in graph\\ T\\), \\(n \\in V\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc :=(n,Tn) = (\\lambda v, \\lambda Tv)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaking the latter equivalence and solving for \\(n\\), we have \\(n = \\lambda v\\). And so, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(\\lambda v, T \\lambda v) = (\\lambda v, \\lambda Tv)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd therefore, \\(T\\lambda v = \\lambda Tv\\), as desired.\u003c/p\u003e\n\u003cp\u003eHaving shown that \\(T\\) is now both additive and homogeneous, we have that \\(T\\) is a linear map, as desired.\u003c/p\u003e\n\u003ch2 id=\"given-t-is-a-linear-map\"\u003eGiven \\(T\\) is a Linear Map\u003c/h2\u003e\n\u003cp\u003eWe will essentially prove the previous condition backwards.\u003c/p\u003e\n\u003cp\u003eWe are given that the graph of \\(T\\) is a subset of \\(V \\times W\\), and that \\(T: V \\to W\\) is a linear map. We desire that the graph of \\(T\\) is a subspace of \\(V \\times W\\).\u003c/p\u003e\n\u003cp\u003eRecall that to show that a subset is a subspace, on simply has to show that it has closed operations and that it contains the additive identity.\u003c/p\u003e\n\u003ch3 id=\"additive-identity\"\u003eAdditive Identity\u003c/h3\u003e\n\u003cp\u003eRecall that the additive identity in \\(V \\times W\\) is the tuple that\u0026rsquo;s identically \\((0,0) \\in V \\times W\\).\u003c/p\u003e\n\u003cp\u003eAs \\(V\\) is a vector space, \\(0 \\in V\\). Any linear map will send \\(0\\) to \\(0\\). Therefore, \\(T 0 = 0\\).\u003c/p\u003e\n\u003cp\u003eTherefore, construct \\(a \\in graph\\ T\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na = (0, T 0) \\in V \\times W = (0, 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy construction, we have shown that the additive identity of \\(V \\times W\\) is in \\(graph\\ T\\).\u003c/p\u003e\n\u003ch3 id=\"closure-of-addition\"\u003eClosure of Addition\u003c/h3\u003e\n\u003cp\u003eGiven WLOG \\(a,b \\in graph\\ T\\), we desire that \\(a+b \\in graph\\ T\\).\u003c/p\u003e\n\u003cp\u003eLet \\(v,u \\in V\\), and let \\(a,b \\in graph\\ T\\) declared as follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\na = (v,Tv) \\in V \\times W \\\\\nb = (u,Tu) \\in V \\times W\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na+b = (v,Tv) + (u+Tu) = (v+u, Tv+Tu)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven \\(T\\) is a linear map, we have WLOG \\(Tv+Tu = T(v+u)\\). And therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na+b = (v,Tv) + (u+Tu) = (v+u, Tv+Tu) = (v+u, T(v+u)) \\in \\{(v,Tv) \\mid v \\in V\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eHence, \\(graph\\ T\\) is closed under addition.\u003c/p\u003e\n\u003ch3 id=\"closure-of-scalar-multiplication\"\u003eClosure of Scalar Multiplication\u003c/h3\u003e\n\u003cp\u003eGiven WLOG \\(a \\in graph\\ T, \\lambda \\in \\mathbb{F}\\), we desire that \\(\\lambda a \\in graph\\ T\\).\u003c/p\u003e\n\u003cp\u003eLet \\(v \\in V, \\lambda \\in \\mathbb{F}\\), and let \\(a \\in graph\\ T\\) declared as follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na = (v,Tv) \\in V \\times W\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda a = \\lambda (v,Tv) = (\\lambda v, \\lambda Tv)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven \\(T\\) is a linear map, we have WLOG \\(\\lambda Tv = T\\lambda v\\). And therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda a = \\lambda (v,Tv) = (\\lambda v, \\lambda Tv) = (\\lambda v, T \\lambda v)\\in \\{(v,Tv) \\mid v \\in V\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eHence, \\(graph\\ T\\) is closed under scalar multiplication.\u003c/p\u003e\n\u003cp\u003eHaving shown \\(graph\\ T\\) to be closed under addition and scalar multiplication, as well as containing the additive identity, we see that it is a subspace of \\(V \\times W\\) of which it is a subset.\u003c/p\u003e\n\u003cp\u003eHaving shown both directions of the proof, \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_3_e_problem_1/","tags":null,"title":"3.E Problem 1"},{"categories":null,"contents":"776 is a VC firm lead by Reddit cofounder Alexis Ohanian.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbh776/\"\u003e776\u003c/a\u003e is a VC firm lead by Reddit cofounder \u003ca href=\"/posts/kbhalexis_ohanian/\"\u003eAlexis Ohanian\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbh776/","tags":null,"title":"776"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhaaa/","tags":null,"title":"AAA"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhaaai_talk_contacts/","tags":null,"title":"AAAI Talk Contacts"},{"categories":null,"contents":" Locale Speaker Topic + Link W3PHI-AI Elizabeth Broycki AI Healthcare Safety W3PHI-AI Jeff Clark Patient Risk Prediction W3PHI-AI Yasmine and Emily! Abulance Trajectories W3PHI-AI Simeon Allmendinger Diffusion Laproscopic Surgeries W3PHI-AI Andrea Borghesi Clinical Skin Disease Image Generation W3PHI-AI Hossein Jafarinia Multiple Instance Learning W3PHI-AI Thomas Kannampallil AI Medicine W3PHI-AI Soumadeep Saha DOST W3PHI-AI Dimitris Spathis Multimodal AI for Real-World Signals W3PHI-AI William Bolton Medical Knowledge Extraction W3PHI-AI Prajwal Panzade MedBlindTuner W3PHI-AI Hita Kambhamettu Medical Dialogue Generation W3PHI-AI Amarpal Sahota Parkingson\u0026rsquo;s Classification with EEG W3PHI-AI Yidou Weng Baysian Networks for Healthcare W3PHI-AI Cheng Huang Multi-LSTM for Clinical Report Generation W3PHI-AI Rickard Stureborg Hierarchical Multi-Label Clsf. for Vaccine W3PHI-AI Mbithe Nzomo Semantic Health Risk Prediction Talk Contact AAAI Talk Contacts\n","html":"\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eLocale\u003c/th\u003e\n\u003cth\u003eSpeaker\u003c/th\u003e\n\u003cth\u003eTopic + Link\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eElizabeth Broycki\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhai_healthcare_safety/\"\u003eAI Healthcare Safety\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eJeff Clark\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpatient_risk_prediction/\"\u003ePatient Risk Prediction\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eYasmine and Emily!\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhabulance_trajectories/\"\u003eAbulance Trajectories\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eSimeon Allmendinger\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhdiffusion_models_for_laproscopic_surgeries/\"\u003eDiffusion Laproscopic Surgeries\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eAndrea Borghesi\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhclinical_skin_disease_imaging/\"\u003eClinical Skin Disease Image Generation\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eHossein Jafarinia\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmultiple_instance_learning/\"\u003eMultiple Instance Learning\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eThomas Kannampallil\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhai_medicine/\"\u003eAI Medicine\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eSoumadeep Saha\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhdost/\"\u003eDOST\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eDimitris Spathis\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmultimodal_ai_for_real_world_signals/\"\u003eMultimodal AI for Real-World Signals\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eWilliam Bolton\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmedical_knowledge_extraction/\"\u003eMedical Knowledge Extraction\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003ePrajwal Panzade\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmedblindtuner/\"\u003eMedBlindTuner\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eHita Kambhamettu\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmedical_dialogue_generation/\"\u003eMedical Dialogue Generation\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eAmarpal Sahota\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhparkingson_s_classification_with_eeg/\"\u003eParkingson\u0026rsquo;s Classification with EEG\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eYidou Weng\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhbaysian_networks_for_healthcare/\"\u003eBaysian Networks for Healthcare\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eCheng Huang\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmulti_lstm_for_clinical_report_generation/\"\u003eMulti-LSTM for Clinical Report Generation\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eRickard Stureborg\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhhierarchical_multi_label_clsf_for_vaccine/\"\u003eHierarchical Multi-Label Clsf. for Vaccine\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eMbithe Nzomo\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhsemantic_health_risk_prediction/\"\u003eSemantic Health Risk Prediction\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"talk-contact\"\u003eTalk Contact\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhaaai_talk_contacts/\"\u003eAAAI Talk Contacts\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaaai2024_index/","tags":null,"title":"AAAI2024 Index"},{"categories":null,"contents":"Welcome to the personal site of Houjun \u0026ldquo;Jack\u0026rdquo; Liu.\nI\u0026rsquo;m on the blaggosphere as u/jemoka and @jemoka.\nWho\u0026rsquo;s this guy? I am a human interested in linguistic analysis, L2 learning, and user interfaces. AGI \u0026amp; Emacs are cool. I run Shabang, do do research in NLP and education, and am working for TalkBank on the intersection between speech and language. I\u0026rsquo;m currently doing my undergrad at Stanford, where I write some code for Stanza, a NLP package for many human languages, and a rover that we are sending to Antarctica.\nNeed to catch me? Email me at houjun@jemoka.com. Please do email me, I actually check.\nRecent Projects Take a look at my GitHub profile for programming projects. For larger scale things, take a look at the Projects Index on this site.\nNotes This site also contains the vast majority of my course notes. It is a organized in a zettlekasten format. To begin exploring, why don\u0026rsquo;t you check out Nueva Courses Index and Stanford UG Courses Index.\njklsnt Some friends and I started a small collection of fun internets that we made. Check it out!.\nHow do I know you are you? Good question! gpg --locate-keys houjun@jemoka.com. Note that GPG don\u0026rsquo;t actually check fingerprints you received so do that yourself (CA0D6B9C1EA1CD08F0AC1802E7EDDE691807A0C6).\nBugga Bugga Bontehu? Sometimes I use this domain as a downlink to fastcalculator to friends and coworkers. To achieve this, here are two links you could click on that I don\u0026rsquo;t always promise do anything: oliver and socks.\n","html":"\u003cp\u003eWelcome to the personal site of \u003cstrong\u003e\u003cstrong\u003eHoujun \u0026ldquo;Jack\u0026rdquo; Liu\u003c/strong\u003e\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eI\u0026rsquo;m on the blaggosphere as \u003ca href=\"https://www.reddit.com/user/Jemoka/\"\u003eu/jemoka\u003c/a\u003e and \u003ca href=\"https://github.com/Jemoka/\"\u003e@jemoka\u003c/a\u003e.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"who-s-this-guy\"\u003eWho\u0026rsquo;s this guy?\u003c/h2\u003e\n\u003cp\u003eI am a \u003ca href=\"https://avatars.githubusercontent.com/u/28765741?v=4\"\u003ehuman\u003c/a\u003e interested in \u003ca href=\"http://pubs.asha.org/doi/10.1044/2023_JSLHR-22-00642\"\u003elinguistic analysis\u003c/a\u003e, \u003ca href=\"https://en.wikipedia.org/wiki/Second-language_acquisition\"\u003eL2 learning\u003c/a\u003e, and \u003ca href=\"https://www.shabang.io\"\u003euser interfaces\u003c/a\u003e. \u003ca href=\"https://en.wikipedia.org/wiki/Artificial_general_intelligence\"\u003eAGI\u003c/a\u003e \u0026amp; \u003ca href=\"https://github.com/Jemoka/.emacs.d\"\u003eEmacs\u003c/a\u003e are cool. I run \u003ca href=\"https://www.shabang.io\"\u003eShabang\u003c/a\u003e, do \u003ca href=\"/posts/kbhresearch_index/\"\u003edo research in NLP and education\u003c/a\u003e, and am working for \u003ca href=\"https://www.talkbank.org/\"\u003eTalkBank\u003c/a\u003e on the intersection between speech and language. I\u0026rsquo;m currently doing my undergrad at Stanford, where I write some code for \u003ca href=\"https://github.com/stanfordnlp/stanza\"\u003eStanza\u003c/a\u003e, a NLP package for many human languages, and a \u003ca href=\"https://github.com/stanford-ssi\"\u003erover that we are sending to Antarctica\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eNeed to catch me? Email me at \u003ccode\u003ehoujun@jemoka.com\u003c/code\u003e. Please do email me, I actually check.\u003c/p\u003e\n\u003ch2 id=\"recent-projects\"\u003eRecent Projects\u003c/h2\u003e\n\u003cp\u003eTake a look at my \u003ca href=\"https://github.com/Jemoka/\"\u003eGitHub profile\u003c/a\u003e for programming projects. For larger scale things, take a look at the \u003ca href=\"/posts/kbhprojects/\"\u003eProjects Index\u003c/a\u003e on this site.\u003c/p\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n\u003cp\u003eThis site also contains the vast majority of my course notes. It is a organized in a \u003ca href=\"/posts/kbhzettlekasten/\"\u003ezettlekasten\u003c/a\u003e format. To begin exploring, why don\u0026rsquo;t you check out \u003ca href=\"/posts/kbhnueva_courses_index/\"\u003eNueva Courses Index\u003c/a\u003e and \u003ca href=\"/posts/kbhstanford_courses_index/\"\u003eStanford UG Courses Index\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"jklsnt\"\u003ejklsnt\u003c/h2\u003e\n\u003cp\u003eSome friends and I started a small collection of fun internets that we made. \u003ca href=\"https://www.jklsnt.com/\"\u003eCheck it out!\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"how-do-i-know-you-are-you\"\u003eHow do I know you are you?\u003c/h2\u003e\n\u003cp\u003eGood question! \u003ccode\u003egpg --locate-keys houjun@jemoka.com\u003c/code\u003e. Note that GPG don\u0026rsquo;t actually check fingerprints you received so do that yourself (\u003ccode\u003eCA0D6B9C1EA1CD08F0AC1802E7EDDE691807A0C6\u003c/code\u003e).\u003c/p\u003e\n\u003ch2 id=\"bugga-bugga-bontehu\"\u003eBugga Bugga Bontehu?\u003c/h2\u003e\n\u003cp\u003eSometimes I use this domain as a downlink to fastcalculator to friends and coworkers. To achieve this, here are two links you could click on that I don\u0026rsquo;t always promise do anything: \u003ca href=\"https://oliver.jemoka.com/\"\u003eoliver\u003c/a\u003e and \u003ca href=\"https://socks.jemoka.com/\"\u003esocks\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhindex/","tags":null,"title":"About"},{"categories":null,"contents":"Here\u0026rsquo;s a fun implementation of absolute value.\nlong abs_value(long num) { long sign = num \u0026gt;\u0026gt; (sizeof(long)*CHAR_BIT - 1); // so you only get all 1s or all 0s return (num ^ sign) - sign; // sign is either -1 or 0. So, if num is non-negative // num^sign is not going to do anything (as 0^0 = 0, 0^1 = 1). // If num negative, num^sign is going to flip the bit AND subtract // negative on (i.e. add one) } ","html":"\u003cp\u003eHere\u0026rsquo;s a fun implementation of absolute value.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003elong\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eabs_value\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elong\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003elong\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esign\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enum\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u0026gt;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003esizeof\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elong\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eCHAR_BIT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// so you only get all 1s or all 0s\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enum\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esign\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esign\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// sign is either -1 or 0. So, if num is non-negative\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// num^sign is not going to do anything (as 0^0 = 0, 0^1 = 1).\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// If num negative, num^sign is going to flip the bit AND subtract\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// negative on (i.e. add one)\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhabsolute_value_function/","tags":null,"title":"Absolute Value Function"},{"categories":null,"contents":"To determine\n","html":"\u003cp\u003eTo determine\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaccounting_price/","tags":null,"title":"accounting price"},{"categories":null,"contents":"Capecitabmine =\u0026gt; 5-Fluoropyrimidine =\u0026gt; Cancer cell death.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcapecitabmine/\"\u003eCapecitabmine\u003c/a\u003e =\u0026gt; 5-\u003ca href=\"\"\u003eFluoropyrimidine\u003c/a\u003e =\u0026gt; Cancer cell death.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaction_of_capecitabmine/","tags":null,"title":"action of Capecitabmine"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhaction_research/","tags":null,"title":"action research"},{"categories":null,"contents":"Quality of taking a particular value at a function\u0026mdash;\u0026ldquo;expected discounted return when following a policy from \\(S\\) and taking \\(a\\)\u0026rdquo;:\n\\begin{equation} Q(s,a) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo;|s,a) U(s\u0026rsquo;) \\end{equation}\nwhere, \\(T\\) is the transition probability from \\(s\\) to \\(s\u0026rsquo;\\) given action \\(a\\).\nvalue function Therefore, the utility of being in a state (called the value function) is:\n\\begin{equation} U(s) = \\max_{a} Q(s,a) \\end{equation}\n\u0026ldquo;the utility that gains the best action-value\u0026rdquo;\nvalue-function policy A value-function policy is a policy that maximizes the action-value\n\\begin{equation} \\pi(s) = \\arg\\max_{a} Q(s,a) \\end{equation}\n\u0026ldquo;the policy that takes the best action to maximize action-value\u0026rdquo;\nwe call this \\(\\pi\\) \u0026ldquo;greedy policy with respect to \\(U\\)\u0026rdquo;\nadvantage see advantage function\n","html":"\u003cp\u003eQuality of taking a particular value at a function\u0026mdash;\u0026ldquo;expected discounted return when following a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e from \\(S\\) and taking \\(a\\)\u0026rdquo;:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(s,a) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo;|s,a) U(s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(T\\) is the transition probability from \\(s\\) to \\(s\u0026rsquo;\\) given action \\(a\\).\u003c/p\u003e\n\u003ch2 id=\"value-function--kbhaction-value-function-dot-md\"\u003e\u003ca href=\"/posts/kbhaction_value_function/\"\u003evalue function\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eTherefore, the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of being in a state (called the \u003ca href=\"/posts/kbhaction_value_function/\"\u003evalue function\u003c/a\u003e) is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(s) = \\max_{a} Q(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e that gains the best \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"value-function-policy\"\u003evalue-function policy\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#value-function-policy\"\u003evalue-function policy\u003c/a\u003e is a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e that maximizes the \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pi(s) = \\arg\\max_{a} Q(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e that takes the best action to maximize \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e\u0026rdquo;\u003c/p\u003e\n\u003cp\u003ewe call this \\(\\pi\\) \u0026ldquo;\u003ca href=\"#value-function-policy\"\u003egreedy policy\u003c/a\u003e with respect to \\(U\\)\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"advantage\"\u003eadvantage\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhadvantage_function/\"\u003eadvantage function\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaction_value_function/","tags":null,"title":"action-value function"},{"categories":null,"contents":"Comes from doi.org/10.3389/fcomp.2020.00001\nADR is a vectorization/encoding technique whereby time-series data is segmented, clustered via solf-organizing maps, and the centroids of the clusters are used as the encoding\n","html":"\u003cp\u003eComes from \u003ca href=\"https://www.frontiersin.org/articles/10.3389/fcomp.2020.00001/full\"\u003edoi.org/10.3389/fcomp.2020.00001\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhactive_data_representation/\"\u003eADR\u003c/a\u003e is a vectorization/encoding technique whereby time-series data is segmented, clustered via solf-organizing maps, and the centroids of the clusters are used as the encoding\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhactive_data_representation/","tags":null,"title":"Active Data Representation"},{"categories":null,"contents":"create a space of molecules of its ocnstructions, and use active learning to search through it.\n","html":"\u003cp\u003ecreate a space of molecules of its ocnstructions, and use active learning to search through it.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhactive_learning_molecule_iteration/","tags":null,"title":"Active Learning Molecule Iteration"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhactive_listening/","tags":null,"title":"active listening"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhactive_recall/","tags":null,"title":"active recall"},{"categories":null,"contents":"Create an approximation of the value function \\(U_{\\phi}\\) using Approximate Value Function, and use Policy Gradient to optimize an monte-carlo tree search policy\n","html":"\u003cp\u003eCreate an approximation of the \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e \\(U_{\\phi}\\) using \u003ca href=\"/posts/kbhapproximate_value_function/\"\u003eApproximate Value Function\u003c/a\u003e, and use \u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e to optimize an \u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003emonte-carlo tree search\u003c/a\u003e policy\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhactor_critic/","tags":null,"title":"Actor-Critic"},{"categories":null,"contents":"How do you sample particle filters? This doesn\u0026rsquo;t work for a continuous action space.\nContributions Uses KLD sampling\u0026mdash;adaptive sampling of particple filters \u0026ldquo;belief packing\u0026rdquo;\u0026mdash;pack similar beliefs together, making observation tree smaller KLD Sampling KLD Sampling uses KL Divergence to approximate difference between two probability distributions:\n\\begin{equation} N \\approx \\frac{k-1}{2\\xi} \\qty(1- \\frac{2}{9(k-1)} + \\sqrt{\\frac{2}{9(k-1)}} z_{1-\\eta})^{3} \\end{equation}\n\u0026ldquo;Propagation\u0026rdquo; We want to get a set of sampled observations from belief + action.\nBelief Packing L1 norm between beliefs. If its too small consider them the same beliefs.\n","html":"\u003cp\u003eHow do you sample particle filters? This doesn\u0026rsquo;t work for a continuous action space.\u003c/p\u003e\n\u003ch2 id=\"contributions\"\u003eContributions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eUses KLD sampling\u0026mdash;adaptive sampling of particple filters\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;belief packing\u0026rdquo;\u0026mdash;pack similar beliefs together, making observation tree smaller\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"kld-sampling\"\u003eKLD Sampling\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#kld-sampling\"\u003eKLD Sampling\u003c/a\u003e uses \u003ca href=\"/posts/kbhkl_divergence/\"\u003eKL Divergence\u003c/a\u003e to approximate difference between two probability distributions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nN \\approx \\frac{k-1}{2\\xi} \\qty(1- \\frac{2}{9(k-1)} + \\sqrt{\\frac{2}{9(k-1)}} z_{1-\\eta})^{3}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"propagation\"\u003e\u0026ldquo;Propagation\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003eWe want to get a set of sampled observations from belief + action.\u003c/p\u003e\n\u003ch2 id=\"belief-packing\"\u003eBelief Packing\u003c/h2\u003e\n\u003cp\u003eL1 norm between beliefs. If its too small consider them the same beliefs.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhadaops/","tags":null,"title":"AdaOPS"},{"categories":null,"contents":"Operation that adds elements in a set\nconstituents A set \\(V\\) Each non-necessarily-distinct elements \\(u,v \\in V\\) requirements addition on a set \\(V\\) is defined by a function that assigned an element named \\(u+v \\in V\\) (its closed), \\(\\forall u,v\\in V\\)\nadditional information See also addition in \\(\\mathbb{F}^n\\)\n","html":"\u003cp\u003eOperation that adds elements in a set\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA set \\(V\\)\u003c/li\u003e\n\u003cli\u003eEach non-necessarily-distinct elements \\(u,v \\in V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e on a set \\(V\\) is defined by a function that assigned an element named \\(u+v \\in V\\) (its \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e), \\(\\forall u,v\\in V\\)\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhlists_over_fields/#addition-in-mathbb-f-n\"\u003eaddition in \\(\\mathbb{F}^n\\)\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhadding/","tags":null,"title":"adding"},{"categories":null,"contents":"The additive identity allows another number to retain its identity after adding. That is: there exists an element \\(0\\) such that \\(v+0=v\\) for whatever structure \\(v\\) and addition \\(+\\) you are working with.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e allows another \u003ca href=\"/posts/kbhnumber/\"\u003enumber\u003c/a\u003e to retain its identity after \u003ca href=\"/posts/kbhadding/\"\u003eadding\u003c/a\u003e. That is: there exists an element \\(0\\) such that \\(v+0=v\\) for whatever structure \\(v\\) and addition \\(+\\) you are working with.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhadditive_identity/","tags":null,"title":"additive identity"},{"categories":null,"contents":"Assume for the sake of contradiction \\(\\exists\\ 0, 0\u0026rsquo;\\) both being additive identities in vector space \\(V\\).\nTherefore:\n\\begin{equation} 0+0\u0026rsquo; = 0\u0026rsquo; +0 \\end{equation}\ncommutativity.\nTherefore:\n\\begin{equation} 0+0\u0026rsquo; = 0 = 0\u0026rsquo;+0 = 0' \\end{equation}\ndefn. of identity.\nHence: \\(0=0\u0026rsquo;\\), \\(\\blacksquare\\).\n","html":"\u003cp\u003eAssume for the sake of contradiction \\(\\exists\\ 0, 0\u0026rsquo;\\) both being additive identities in vector space \\(V\\).\u003c/p\u003e\n\u003cp\u003eTherefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0+0\u0026rsquo; = 0\u0026rsquo; +0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eTherefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0+0\u0026rsquo; = 0 = 0\u0026rsquo;+0 = 0'\n\\end{equation}\u003c/p\u003e\n\u003cp\u003edefn. of identity.\u003c/p\u003e\n\u003cp\u003eHence: \\(0=0\u0026rsquo;\\), \\(\\blacksquare\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhadditive_identity_is_unique_in_a_vector_space/","tags":null,"title":"additive identity is unique in a vector space"},{"categories":null,"contents":"Take a vector \\(v \\in V\\) and additive inverses \\(a,b \\in V\\).\n\\begin{equation} a+0 = a \\end{equation}\ndefn. of additive identity\n\\begin{equation} a+(v+b) = a \\end{equation}\ndefn. of additive inverse\n\\begin{equation} (a+v)+b = a \\end{equation}\nassociativity\n\\begin{equation} 0+b = a \\end{equation}\ndefn. of additive inverse\n\\begin{equation} b=a\\ \\blacksquare \\end{equation}\n","html":"\u003cp\u003eTake a vector \\(v \\in V\\) and additive inverses \\(a,b \\in V\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na+0 = a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003edefn. of additive identity\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na+(v+b) = a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003edefn. of additive inverse\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(a+v)+b = a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eassociativity\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0+b = a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003edefn. of additive inverse\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb=a\\ \\blacksquare\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhadditive_inverse_is_unique_in_a_vector_space/","tags":null,"title":"additive inverse is unique in a vector space"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhadhd/","tags":null,"title":"ADHD"},{"categories":null,"contents":"adMe: absorbtion, distribution, metabolism, excretion.\nPharmacology treatment of diseases. The microbiome regulates metabolism.\n","html":"\u003cp\u003eadMe: absorbtion, distribution, metabolism, excretion.\u003c/p\u003e\n\u003cp\u003ePharmacology treatment of diseases. The \u003ca href=\"\"\u003emicrobiome\u003c/a\u003e regulates metabolism.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhadme/","tags":null,"title":"adMe"},{"categories":null,"contents":"ADReSS Challenge is a Alzheimer\u0026rsquo;s Dementia Recognition challenge from the data available on DementiaBank.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhadress_challenge/\"\u003eADReSS Challenge\u003c/a\u003e is a Alzheimer\u0026rsquo;s Dementia Recognition challenge from the data available on \u003ca href=\"/posts/kbhdementiabank/\"\u003eDementiaBank\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhadress_challenge/","tags":null,"title":"ADReSS Challenge"},{"categories":null,"contents":"The ADReSS Literature Survey is a literature survey for the results published during the ADReSS Challenge.\nAntonsson 2021: disfluency + SVF features trained on SVM: lexical \u0026gt; narrative qual. Chlasta 2021: features extracted from VGGish on SVM; also trained new CNN from .wav. Sadeghian 2021: Used GA for feature sel., achieved 94% w/ MMSE alone; dev\u0026rsquo;d ASR tool. Martinc 2021: CBOW (text) + ADR (sound) late fusion\u0026rsquo;d to a BERT, ablated for features. Meghanani 2021: spontaneous speech transcripts with fastText and CNN; 83.33% acc. Yuan 2021: ERNIE on transcripts with pause encoding; 89.6% acc. Jonell 2021: Developed a kitchen sink of diag. tools and correlated it with biomarkers. Laguarta 2021: multimodel (OVBM) to embed auditory info + biomarkers for clsf. Shah 2021: late fusion of n-gram and OpenSMILE on std. classifiers. Lindsay 2021: Cross-linguistic markers shared for AD patients between English and French. Zhu 2021: late fusion of CTP task for AD clsf. w/ transf., mobilenet, yamnet, mockingjay. Guo 2021: WLS data to augment CTP from ADReSS Challenge and trained it on a BERT. Balagopalan 2021: lexo. and synt. features trained on a BERT and other models. Mahajan 2021: a bimodal model on speech/text with GRU on speech and CNN-LSTM on text. Parvin 2020: excercize scheme effects on theta/alpha ratio and Brain wave frequency. Luz 2021: review paper presenting the ADReSSo challenge and current baselines. From Meghanani 2021, a review:\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhadress_literature_survey/\"\u003eADReSS Literature Survey\u003c/a\u003e is a literature survey for the results published during the \u003ca href=\"/posts/kbhadress_challenge/\"\u003eADReSS Challenge\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhantonsson_2021/\"\u003eAntonsson 2021\u003c/a\u003e: disfluency + \u003ca href=\"/posts/kbhsemantic_verbal_fluency/\"\u003eSVF\u003c/a\u003e features trained on SVM: lexical \u0026gt; narrative qual.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhchlasta_2021/\"\u003eChlasta 2021\u003c/a\u003e: features extracted from \u003ca href=\"/posts/kbhvggish/\"\u003eVGGish\u003c/a\u003e on SVM; also trained new CNN from .wav.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsadeghian_2021/\"\u003eSadeghian 2021\u003c/a\u003e: Used \u003ca href=\"/posts/kbhgenetic_algorithum/\"\u003eGA\u003c/a\u003e for feature sel., achieved 94% w/ \u003ca href=\"/posts/kbhmmse/\"\u003eMMSE\u003c/a\u003e alone; dev\u0026rsquo;d \u003ca href=\"/posts/kbhasr/\"\u003eASR\u003c/a\u003e tool.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmartinc_2021/\"\u003eMartinc 2021\u003c/a\u003e: CBOW (text) + \u003ca href=\"/posts/kbhactive_data_representation/\"\u003eADR\u003c/a\u003e (sound) \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u0026rsquo;d\u003c/a\u003e to a BERT, ablated for features.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmeghanani_2021/\"\u003eMeghanani 2021\u003c/a\u003e: spontaneous speech transcripts with fastText and CNN; 83.33% acc.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhyuan_2021/\"\u003eYuan 2021\u003c/a\u003e: ERNIE on transcripts with pause encoding; 89.6% acc.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhjonell_2021/\"\u003eJonell 2021\u003c/a\u003e: Developed a kitchen sink of diag. tools and correlated it with biomarkers.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlaguarta_2021/\"\u003eLaguarta 2021\u003c/a\u003e: multimodel (\u003ca href=\"/posts/kbhopen_voice_brain_model/\"\u003eOVBM\u003c/a\u003e) to embed auditory info + biomarkers for clsf.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhshah_2021/\"\u003eShah 2021\u003c/a\u003e: \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e of n-gram and \u003ca href=\"/posts/kbhopensmile/\"\u003eOpenSMILE\u003c/a\u003e on std. classifiers.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlindsay_2021/\"\u003eLindsay 2021\u003c/a\u003e: Cross-linguistic markers shared for AD patients between English and French.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhzhu_2021/\"\u003eZhu 2021\u003c/a\u003e: \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e of \u003ca href=\"/posts/kbhctp/\"\u003eCTP\u003c/a\u003e task for AD clsf. w/ transf., mobilenet, yamnet, mockingjay.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhguo_2021/\"\u003eGuo 2021\u003c/a\u003e: WLS data to augment \u003ca href=\"/posts/kbhctp/\"\u003eCTP\u003c/a\u003e from \u003ca href=\"/posts/kbhadress_challenge/\"\u003eADReSS Challenge\u003c/a\u003e and trained it on a BERT.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbalagopalan_2021/\"\u003eBalagopalan 2021\u003c/a\u003e: lexo. and synt. features trained on a BERT and other models.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmahajan_2021/\"\u003eMahajan 2021\u003c/a\u003e: a bimodal model on speech/text with GRU on speech and CNN-LSTM on text.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhparvin_2020/\"\u003eParvin 2020\u003c/a\u003e: excercize scheme effects on \u003ca href=\"/posts/kbhtheta_alpha_ratio/\"\u003etheta/alpha ratio\u003c/a\u003e and Brain wave frequency.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhluz_2021/\"\u003eLuz 2021\u003c/a\u003e: review paper presenting the ADReSSo challenge and current baselines.\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eFrom \u003ca href=\"/posts/kbhmeghanani_2021/\"\u003eMeghanani 2021\u003c/a\u003e, a review:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_00-32-54_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhadress_literature_survey/","tags":["index"],"title":"ADReSS Literature Survey Index"},{"categories":null,"contents":"an advantage function is a method for scoring a policy based on how much additional value it provides compared to the greedy policy:\n\\begin{align} A(s,a) \u0026amp;= Q(s,a) - U(s) \\\\ \u0026amp;= Q(s,a) - \\max_{a}Q(s,a) \\end{align}\nthat is, how much does your policy\u0026rsquo;s action-value function differ from that of choosing the action that maximizes the utility.\nFor a greedy policy that just optimizes this exact metric, \\(A =0\\).\n","html":"\u003cp\u003ean \u003ca href=\"/posts/kbhadvantage_function/\"\u003eadvantage function\u003c/a\u003e is a method for scoring a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e based on how much additional value it provides compared to the \u003ca href=\"/posts/kbhpolicy_evaluation/#value-function-policy\"\u003egreedy policy\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nA(s,a) \u0026amp;= Q(s,a) - U(s) \\\\\n\u0026amp;= Q(s,a) - \\max_{a}Q(s,a)\n\\end{align}\u003c/p\u003e\n\u003cp\u003ethat is, how much does your policy\u0026rsquo;s \u003ca href=\"/posts/kbhpolicy_evaluation/#action-value-function\"\u003eaction-value function\u003c/a\u003e differ from that of choosing the action that maximizes the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eFor a \u003ca href=\"/posts/kbhpolicy_evaluation/#value-function-policy\"\u003egreedy policy\u003c/a\u003e that just optimizes this exact metric, \\(A =0\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhadvantage_function/","tags":null,"title":"advantage function"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhadvertising/","tags":null,"title":"advertising"},{"categories":null,"contents":"an affine subset of \\(V\\) is a subset of \\(V\\) that is the sum of a vector and one of its subspace; that is, an affine subset of \\(V\\) is a subset of \\(V\\) of the form \\(v+U\\) for \\(v \\in V\\) and subspace \\(U \\subset V\\).\nfor \\(v \\in V\\) and \\(U \\subset V\\), an affine subset \\(v+U\\) is said to be parallel to \\(U\\).\nthat is, an affine subset for \\(U \\subset V\\) and \\(v \\in V\\):\n\\begin{equation} v + U = \\{v+u : u \\in U\\} \\end{equation}\nadditional information two affine subsets parallel to \\(U\\) are either equal or disjoint Suppose \\(U\\) is a subspace of \\(V\\); and \\(v,w \\in V\\), then, if one of the following is true all of them are true:\n\\(v-w \\in U\\) \\(v+U = w+U\\) \\((v+U) \\cap (w+U) \\neq \\emptyset\\) \\(1 \\implies 2\\) Given \\(v-w \\in U\\)\u0026hellip;.\nFor an element in \\(v+U\\), we have that \\(v+u = (w-w)+v+u = w+((v-w)+u) \\in w + U\\). This is because \\(U\\) is closed so adding \\(v-w \\in U\\) and \\(u\\) will remain being in \\(U\\). \\(w-w=0\\) just by everything being in \\(V\\).\nWe now have \\(v+u \\in w+U\\ \\forall u \\in U\\); we now can reverse the argument to argue in a similar fashion that \\(w+u \\in v+U\\ \\forall u \\in U\\). So, we have that \\(v+U \\subset w+U\\) and \\(w+U \\subset v+U\\). So \\(v+U = w+U\\), as desired.\n\\(2 \\implies 3\\) By definition of \\(v+U=w+U\\) as long as \\(v+U\\) and \\(w+U\\) is not empty sets, which they can\u0026rsquo;t be because \\(U\\) is a vector space so guaranteed nonempty.\n\\(3\\implies 1\\) Given \\((v+U) \\cap (w+U) \\neq \\emptyset\\), we have that there exists some \\(u_1, u_2 \\in U\\) such that \\(v+u_1 = w+u_2\\). Because everything here is in \\(V\\), we can add their respective inverses (\u0026ldquo;move them around\u0026rdquo;) such that: \\(v-w = u_2-u_1\\). Therefore \\(u_2-u_1 \\in U \\implies v-w \\in U\\).\n","html":"\u003cp\u003ean \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e of \\(V\\) is a subset of \\(V\\) that is the \u003ca href=\"/posts/kbhsum_of_vector_and_subspace/\"\u003esum of a vector and one of its subspace\u003c/a\u003e; that is, an \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e of \\(V\\) is a subset of \\(V\\) of the form \\(v+U\\) for \\(v \\in V\\) and \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e \\(U \\subset V\\).\u003c/p\u003e\n\u003cp\u003efor \\(v \\in V\\) and \\(U \\subset V\\), an \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e \\(v+U\\) is said to be \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eparallel\u003c/a\u003e to \\(U\\).\u003c/p\u003e\n\u003cp\u003ethat is, an \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e for \\(U \\subset V\\) and \\(v \\in V\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv + U = \\{v+u : u \\in U\\}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"two-affine-subset--kbhparallel-linear-algebra-dot-md--s-parallel--kbhparallel-linear-algebra-dot-md--to-u-are-either-equal-or-disjoint\"\u003etwo \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003es \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eparallel\u003c/a\u003e to \\(U\\) are either equal or disjoint\u003c/h3\u003e\n\u003cp\u003eSuppose \\(U\\) is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of \\(V\\); and \\(v,w \\in V\\), then, if one of the following is true all of them are true:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\\(v-w \\in U\\)\u003c/li\u003e\n\u003cli\u003e\\(v+U = w+U\\)\u003c/li\u003e\n\u003cli\u003e\\((v+U) \\cap (w+U) \\neq \\emptyset\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"1-implies-2\"\u003e\\(1 \\implies 2\\)\u003c/h4\u003e\n\u003cp\u003eGiven \\(v-w \\in U\\)\u0026hellip;.\u003c/p\u003e\n\u003cp\u003eFor an element in \\(v+U\\), we have that \\(v+u = (w-w)+v+u = w+((v-w)+u) \\in w + U\\). This is because \\(U\\) is closed so adding \\(v-w \\in U\\) and \\(u\\) will remain being in \\(U\\). \\(w-w=0\\) just by everything being in \\(V\\).\u003c/p\u003e\n\u003cp\u003eWe now have \\(v+u \\in w+U\\ \\forall u \\in U\\); we now can reverse the argument to argue in a similar fashion that \\(w+u \\in v+U\\ \\forall u \\in U\\). So, we have that \\(v+U \\subset w+U\\) and \\(w+U \\subset v+U\\). So \\(v+U = w+U\\), as desired.\u003c/p\u003e\n\u003ch4 id=\"2-implies-3\"\u003e\\(2 \\implies 3\\)\u003c/h4\u003e\n\u003cp\u003eBy definition of \\(v+U=w+U\\) as long as \\(v+U\\) and \\(w+U\\) is not empty sets, which they can\u0026rsquo;t be because \\(U\\) is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e so guaranteed nonempty.\u003c/p\u003e\n\u003ch4 id=\"3-implies-1\"\u003e\\(3\\implies 1\\)\u003c/h4\u003e\n\u003cp\u003eGiven \\((v+U) \\cap (w+U) \\neq \\emptyset\\), we have that there exists some \\(u_1, u_2 \\in U\\) such that \\(v+u_1 = w+u_2\\). Because everything here is in \\(V\\), we can add their respective inverses (\u0026ldquo;move them around\u0026rdquo;) such that: \\(v-w = u_2-u_1\\). Therefore \\(u_2-u_1 \\in U \\implies v-w \\in U\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhparallel_linear_algebra/","tags":null,"title":"affine subset"},{"categories":null,"contents":"In math, an affine transformation is a transformation that preserves lines and parallelism.\nFor instance, here is an affine transformation:\n\\begin{equation} U\u0026rsquo;(S) = mU(s) + b \\end{equation}\nwhere \\(m \u0026gt; 0\\), and \\(b\\) is unconstrained.\nhttps://en.wikipedia.org/wiki/Affine_transformation\n","html":"\u003cp\u003eIn math, an affine transformation is a transformation that preserves lines and parallelism.\u003c/p\u003e\n\u003cp\u003eFor instance, here is an \u003ca href=\"/posts/kbhaffine_transformation/\"\u003eaffine transformation\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU\u0026rsquo;(S) = mU(s) + b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(m \u0026gt; 0\\), and \\(b\\) is unconstrained.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://en.wikipedia.org/wiki/Affine_transformation\"\u003ehttps://en.wikipedia.org/wiki/Affine_transformation\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaffine_transformation/","tags":null,"title":"affine transformation"},{"categories":null,"contents":"An agent is an entity that act upon the observations of its environment.\n","html":"\u003cp\u003eAn \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e is an entity that act upon the observations of its environment.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhagent/","tags":null,"title":"agent"},{"categories":null,"contents":"Agricultural Adjustment Administration is a part of the New Deal programs to support the agricultural sector and maintain supply. They regulated production of seven different crops to group increase farming income. It is very far-reaching of other parts of the economy.\nIt was ruled unconstitutional in 1936.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhagricultural_adjustment_administration/\"\u003eAgricultural Adjustment Administration\u003c/a\u003e is a part of the \u003ca href=\"/posts/kbhnew_deal/\"\u003eNew Deal\u003c/a\u003e programs to support the agricultural sector and maintain supply. They regulated production of seven different crops to group increase farming income. It is very far-reaching of other parts of the economy.\u003c/p\u003e\n\u003cp\u003eIt was ruled unconstitutional in 1936.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhagricultural_adjustment_administration/","tags":null,"title":"Agricultural Adjustment Administration"},{"categories":null,"contents":"AgRP is a type of neurons that stimulates food intake.\nInhibit metacortin Activate NPY Release GABA Diet-induced obesity blunts AgRP response, and so, because AgRP plays a part in thermoregulation, diet-inducsed obesity responds less to temperature changes.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhagrp/\"\u003eAgRP\u003c/a\u003e is a type of neurons that stimulates food intake.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eInhibit metacortin\u003c/li\u003e\n\u003cli\u003eActivate \u003ca href=\"\"\u003eNPY\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eRelease \u003ca href=\"\"\u003eGABA\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDiet-induced \u003ca href=\"\"\u003eobesity\u003c/a\u003e blunts \u003ca href=\"/posts/kbhagrp/\"\u003eAgRP\u003c/a\u003e response, and so, because \u003ca href=\"/posts/kbhagrp/\"\u003eAgRP\u003c/a\u003e plays a part in \u003ca href=\"/posts/kbhthermoregulation/\"\u003ethermoregulation\u003c/a\u003e, diet-inducsed \u003ca href=\"\"\u003eobesity\u003c/a\u003e responds less to temperature changes.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhagrp/","tags":null,"title":"AgRP"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhai/","tags":null,"title":"AI"},{"categories":null,"contents":"AI Ethics is the Ethics of training AI models.\n","html":"\u003cp\u003eAI Ethics is the Ethics of training \u003ca href=\"/posts/kbhai/\"\u003eAI\u003c/a\u003e models.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhai_ethics/","tags":null,"title":"AI Ethics"},{"categories":null,"contents":"Iteration of Healthcare System\ndigital structure =\u0026gt; configuration =\u0026gt; activities or processes =\u0026gt; outcomes =\u0026gt; more structures\nEHR decision support systems Technology in general is becoming safer over time =\u0026gt; ideally, some form of high level safety is achieved.\nimportant: define a shared vocabulary between technologists and clinicians\nTechnology-Induced Errors Though EHS can serve to reduce medical errors, it can work in increase errors as well.\nbias ignorance of environmental factors Error reporting + incident reporting Resolution get rid of each problem help establish well-configured systems which could promote safety patient simulation with clinician + technology to see how it fits into process of care what could that look like from a home health-care perspective ","html":"\u003cp\u003eIteration of Healthcare System\u003c/p\u003e\n\u003cp\u003edigital structure =\u0026gt; configuration =\u0026gt; activities or processes =\u0026gt; outcomes =\u0026gt; more structures\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eEHR\u003c/li\u003e\n\u003cli\u003edecision support systems\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTechnology in general is becoming safer over time =\u0026gt; ideally, some form of high level safety is achieved.\u003c/p\u003e\n\u003cp\u003eimportant: \u003cstrong\u003edefine a shared vocabulary between technologists and clinicians\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"technology-induced-errors\"\u003eTechnology-Induced Errors\u003c/h2\u003e\n\u003cp\u003eThough EHS can serve to \u003cstrong\u003ereduce medical errors\u003c/strong\u003e, it can work in \u003cstrong\u003eincrease errors\u003c/strong\u003e as well.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ebias\u003c/li\u003e\n\u003cli\u003eignorance of environmental factors\u003c/li\u003e\n\u003cli\u003eError reporting + incident reporting\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"resolution\"\u003eResolution\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eget rid of each problem\u003c/li\u003e\n\u003cli\u003ehelp establish well-configured systems which could promote safety\u003c/li\u003e\n\u003cli\u003epatient simulation with clinician + technology to see how it fits into process of care\u003c/li\u003e\n\u003cli\u003ewhat could that look like from a home health-care perspective\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhai_healthcare_safety/","tags":null,"title":"AI Healthcare Safety"},{"categories":null,"contents":"The BIG NEW THING in AI research. Because normal results for seq2sseq is already doing very well. Slightly more realistic models of language acquisition.\n\u0026ldquo;Linguistics is not just about human languages\u0026rdquo;: humans, animals, and machines.\nBIG AGI What: question is the difference between a dumb human and GPT?\nSpeech is Based for Interoperability continuous data (as opposed to text) much less complex with vision: vision is more complex speech is a nice controllable system (????) Models of Language Acquisition https://arxiv.org/pdf/2309.07861.pdf\nApproximation of spoken language: GAN generates ElectroMagnetic Articulagraphy; then, a pretrained model turns that into a spoken language.\nThen, a discriminator then perform discrimination; and a decoder decodes the speech and feeds it back into the gan with a reconstruction loss.\nYou can take a latent representation and stretch it real hard.\nYou can also feed the audio into \\(Q\\), and decode an latent encoding, and then performs changes.\nPer layer, you can also add the activations of all filters outputs together to get a pattern of activation (frequency vs. number of samples) to figure out what is being encoded. If you see high activations at vowels, it\u0026rsquo;d be values, etc.\nearlier layers correspond to brain stem layer layers correspond to acoustic envelope questions whale still has no semantics though? reconstruction loss between brain and intermediate layers ","html":"\u003cp\u003eThe BIG NEW THING in AI research. Because normal results for seq2sseq is already doing very well. Slightly more realistic models of language acquisition.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Linguistics is not just about human languages\u0026rdquo;: humans, animals, and machines.\u003c/p\u003e\n\u003cp\u003eBIG AGI What: question is the difference between a dumb human and GPT?\u003c/p\u003e\n\u003ch2 id=\"speech-is-based-for-interoperability\"\u003eSpeech is Based for Interoperability\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003econtinuous data (as opposed to text)\u003c/li\u003e\n\u003cli\u003emuch less complex with vision: vision is more complex\u003c/li\u003e\n\u003cli\u003espeech is a nice controllable system (????)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"models-of-language-acquisition\"\u003eModels of Language Acquisition\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://arxiv.org/pdf/2309.07861.pdf\"\u003ehttps://arxiv.org/pdf/2309.07861.pdf\u003c/a\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-26_11-13-01_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eApproximation of spoken language: GAN generates ElectroMagnetic Articulagraphy; then, a pretrained model turns that into a spoken language.\u003c/p\u003e\n\u003cp\u003eThen, a discriminator then perform discrimination; and a decoder decodes the speech and feeds it back into the gan with a reconstruction loss.\u003c/p\u003e\n\u003cp\u003eYou can take a latent representation and stretch it real hard.\u003c/p\u003e\n\u003cp\u003eYou can also feed the audio into \\(Q\\), and decode an latent encoding, and then performs changes.\u003c/p\u003e\n\u003cp\u003ePer layer, you can also add the activations of all filters outputs together to get a pattern of activation (frequency vs. number of samples) to figure out what is being encoded. If you see high activations at vowels, it\u0026rsquo;d be values, etc.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eearlier layers correspond to brain stem\u003c/li\u003e\n\u003cli\u003elayer layers correspond to acoustic envelope\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003equestions\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ewhale still has no semantics though?\u003c/li\u003e\n\u003cli\u003ereconstruction loss between brain and intermediate layers\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhai_intepretability/","tags":null,"title":"AI Intepretability"},{"categories":null,"contents":"A lecture hosted by Cynthia Lee.\n\u0026ldquo;AI: how it works \u0026amp; why its often so biased\u0026rdquo;\nDefining Artificial Intelligence.\n","html":"\u003cp\u003eA lecture hosted by \u003ca href=\"/posts/kbhcynthia_lee/\"\u003eCynthia Lee\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;AI: how it works \u0026amp; why its often so biased\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eDefining \u003ca href=\"/posts/kbhartificial_intelligence/\"\u003eArtificial Intelligence\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhai_master_class/","tags":null,"title":"AI Master Class"},{"categories":null,"contents":"AI/Clinical Decision Support\nworkload measurement clinical wellness, etc. perioperative outcomes Big problem: integration is impossible; there\u0026rsquo;s lots of models. \u0026ldquo;Researched models are rarely implemented; implemented models are rarely researched\u0026rdquo;. Epic doesn\u0026rsquo;t stand behind its models.\nImplementation of AI based mechanisms though \u0026ldquo;saves time\u0026rdquo; on paper, results in more patient throughput and patient burn. At this point: harder question\u0026mdash;not whether you can make a model, but how do you govren their use and actually put them into implementation.\nMPOG: multi-centre perioperative outcomes group\n","html":"\u003cp\u003e\u003cstrong\u003eAI/Clinical Decision Support\u003c/strong\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eworkload measurement\u003c/li\u003e\n\u003cli\u003eclinical wellness, etc.\u003c/li\u003e\n\u003cli\u003eperioperative outcomes\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eBig problem: integration is impossible; there\u0026rsquo;s lots of models. \u0026ldquo;Researched models are \u003cstrong\u003erarely\u003c/strong\u003e implemented; implemented models are \u003cstrong\u003erarely\u003c/strong\u003e researched\u0026rdquo;. Epic doesn\u0026rsquo;t stand behind its models.\u003c/p\u003e\n\u003cp\u003eImplementation of AI based mechanisms though \u0026ldquo;saves time\u0026rdquo; on paper, results in more patient throughput and patient burn. At this point: harder question\u0026mdash;not whether you can make a model, but how do you govren their use and actually put them into implementation.\u003c/p\u003e\n\u003cp\u003eMPOG: multi-centre perioperative outcomes group\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhai_medicine/","tags":null,"title":"AI Medicine"},{"categories":null,"contents":"AIBridge is an introductory AI bootcamp developed and taught by Prof. Xin Liu, yours truly, and Samuel Ren in collaboration with AIFS.\nAIBRidge Notes Pause [more] to allow some time to see if people follow did y\u0026rsquo;all not introduce pandas? Closest to doing this without try/except:\nslide 49: what is conc? is this too much recap time? Haven\u0026rsquo;t we been recapping for a long while already? probably good to mention what is /content/iris.data, also, just opening from ./iris.data should work and will be probably more ergonomic read function confusion .read() =\u0026gt; str .readlines() =\u0026gt; [str] the pauses feel a tad ackward? speak up! SSE squares and lines need to be darker: increase opacity 39 \u0026ldquo;very common metric\u0026rdquo; \u0026mdash; not a metric motivate confidence value better; the \u0026ldquo;middle\u0026rdquo; question makes sense I think its actually probably good to explain cross-entropy in the future (i.e. its not a lot of fancy math + I think it provides a lot of intuition w.r.t. one-hot encoding, probablitiy distributions, etc.) Problem with how I made the old slides: multi-Class classification (1va, ava, etc.) needs better motivation before, otherwise throwing three classes on the screen is a tad confusing motivate that the whole random.seed business is so that the whole class can compare answers more effectively LogReg = LogisticRegression(), typically, name instance variables as lower snake case; so maybe call it my_log_reg or something ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhaibridge/\"\u003eAIBridge\u003c/a\u003e is an introductory AI bootcamp developed and taught by \u003ca href=\"/posts/kbhprof_xin_liu/\"\u003eProf. Xin Liu\u003c/a\u003e, yours truly, and Samuel Ren in collaboration with \u003ca href=\"/posts/kbhaifs/\"\u003eAIFS\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"aibridge-notes\"\u003eAIBRidge Notes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ePause [more] to allow some time to see if people follow\u003c/li\u003e\n\u003cli\u003edid y\u0026rsquo;all not introduce pandas?\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eClosest to doing this without try/except:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eslide 49: what is conc?\u003c/li\u003e\n\u003cli\u003eis this too much recap time? Haven\u0026rsquo;t we been recapping for a long while already?\u003c/li\u003e\n\u003cli\u003eprobably good to mention what is \u003ccode\u003e/content/iris.data\u003c/code\u003e, also, just opening from \u003ccode\u003e./iris.data\u003c/code\u003e should work and will be probably more ergonomic\u003c/li\u003e\n\u003cli\u003eread function confusion\n\u003cul\u003e\n\u003cli\u003e.read() =\u0026gt; str\u003c/li\u003e\n\u003cli\u003e.readlines() =\u0026gt; [str]\u003c/li\u003e\n\u003cli\u003ethe pauses feel a tad ackward?\u003c/li\u003e\n\u003cli\u003espeak up!\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cul\u003e\n\u003cli\u003eSSE squares and lines need to be darker: increase opacity 39\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;very common metric\u0026rdquo; \u0026mdash; not a metric\u003c/li\u003e\n\u003cli\u003emotivate confidence value better; the \u0026ldquo;middle\u0026rdquo; question makes sense\u003c/li\u003e\n\u003cli\u003eI think its actually probably good to explain cross-entropy in the future\n\u003cul\u003e\n\u003cli\u003e(i.e. its not a lot of fancy math + I think it provides a lot of intuition w.r.t. one-hot encoding, probablitiy distributions, etc.)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eProblem with how I made the old slides: multi-Class classification (1va, ava, etc.) needs better motivation before, otherwise throwing three classes on the screen is a tad confusing\u003c/li\u003e\n\u003cli\u003emotivate that the whole \u003ccode\u003erandom.seed\u003c/code\u003e business is so that the whole class can compare answers more effectively\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003eLogReg = LogisticRegression()\u003c/code\u003e, typically, name instance variables as lower snake case; so maybe call it \u003ccode\u003emy_log_reg\u003c/code\u003e or something\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridge/","tags":null,"title":"AIBridge"},{"categories":null,"contents":" Welcome to the AIBridge Course homepage.\nThe purpose of AIBridge is to bridge the gap between computer science and other disciplines. To many, working with AI might seem like an unreachable objective. However, in reality, one week is enough to get started. AIBridge will provide basic programming capability in Python and knowledge of object-oriented programming as well as the concepts behind machine learning and how to implement it using a popular toolbox, Scikit-Learn. Students work to complete a personally-defined project using techniques in AI, with data from their own research or with problems supplied by the Course. This one week course will be hosted in-person at UC Davis and will target mainly undergraduate and non-technical graduate students.\nThe course is taught by Prof. Xin Liu in collaboration with Houjun \u0026ldquo;Jack\u0026rdquo; Liu, Samuel Ren, and Albara Ah Ramli.\nEvergreen Resources Python Tutorial: W3 Schools Python Documentation: Python.org SciKit Documentation: scikit-learn.org Iris Dataset: UCI DB, or, for better user experience, scikit Wine Dataset: UCI DB Class Discord: Invite Data-Loading Cheat-Sheet: Colab When in doubt\u0026hellip;\nGoogle it! Try it! Andrew Ng\u0026rsquo;s Machine Learning Suite of Courses DONE Day 1: Python Basics On Monday, 06/27/2022, we covered the basics of Python so that we are all up to speed to perform basic ML with the Scikit Learn toolkit.\nIntroductory Remarks: Slides Lecture on Python Basics: Slides Lab Exercises: Morning Lab Notes, Afternoon Lab Notes Colab Notebooks: Morning Lecture Notebook, Morning Lab Notebook, Afternoon Lecture Notebook, Afternoon Lab Notebook Day 1 feedback survey: Link\nDONE Day 2: OOP + Linear Models Today, we are going to cover the basic intuition and terminology behind Object Oriented Programming, as well as introduce two simple, linear approaches to Machine Learning tasks: linear regression and logistic regression.\nLecture on OOP and more on functions (morning): Slides Lecture on Linear and Logistic Regression (afternoon): Slides Lab Exercises: Morning Lab Notes, Afternoon Lab Notes Colab Notebooks: Morning Lecture Notebook, Morning Lab Notebook, Afternoon Lab Notebook Day 2 feedback survey: Link\nDONE Day 3: Data + Classifier Today, we are going to cover data cleaning, and three more classifiers!\nLecture on data cleaning and pandas (morning): Slides Lecture on three classification algorithms (afternoon): Slides Lab Exercises: Morning Lab Notes, Afternoon Lab Notes Colab Notebooks: Morning Lab Notebook, Afternoon Lab Notebook Day 3 feedback survey: Link\nDONE Day 4: Operations and Clustering Today, we are going to work on the validation operations tools, and talk about clustering\nLecture on training and data operations (morning): Slides Lecture on clustering and feature operations (afternoon): Slides Lab Exercises: Morning Lab Notes, Afternoon Lab Notes Colab Notebooks: Afternoon Notebook Day 4 feedback survey: Link\nDay 5: Closing Thoughts Today, we are going to tie some loose ends with missing data, error analysis, semi supervised learning, cross validation, and ethics.\nClosing thoughts lecture (morning): Slides Final Project: AIBridge Final Project\nDay 5/Bootcamp feedback survey: Link\nOther Links and Resources Tools we use: AIBridge Packages and Tools Cleaned Wine Dataset (try cleaning it yourself before using!): Google Drive Iris Data with Temperature (don\u0026rsquo;t use without instructions, though!): Google Drive ","html":"\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-26_20-07-53_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWelcome to the AIBridge Course homepage.\u003c/p\u003e\n\u003cp\u003eThe purpose of AIBridge is to bridge the gap between computer science and other disciplines. To many, working with AI might seem like an unreachable objective. However, in reality, one week is enough to get started. AIBridge will provide basic programming capability in Python and knowledge of object-oriented programming as well as the concepts behind machine learning and how to implement it using a popular toolbox, Scikit-Learn. Students work to complete a personally-defined project using techniques in AI, with data from their own research or with problems supplied by the Course. This one week course will be hosted in-person at UC Davis and will target mainly undergraduate and non-technical graduate students.\u003c/p\u003e\n\u003cp\u003eThe course is taught by Prof. Xin Liu in collaboration with Houjun \u0026ldquo;Jack\u0026rdquo; Liu, Samuel Ren, and Albara Ah Ramli.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"evergreen-resources\"\u003eEvergreen Resources\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ePython Tutorial: \u003ca href=\"https://www.w3schools.com/python/\"\u003eW3 Schools\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ePython Documentation: \u003ca href=\"https://docs.python.org/3/\"\u003ePython.org\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eSciKit Documentation: \u003ca href=\"https://scikit-learn.org/stable/getting_started.html\"\u003escikit-learn.org\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eIris Dataset: \u003ca href=\"https://archive.ics.uci.edu/ml/datasets/iris\"\u003eUCI DB\u003c/a\u003e, or, for better user experience, \u003ca href=\"https://scikit-learn.org/stable/auto_examples/datasets/plot_iris_dataset.html\"\u003escikit\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eWine Dataset: \u003ca href=\"https://archive.ics.uci.edu/ml/datasets/wine+quality\"\u003eUCI DB\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eClass Discord: \u003ca href=\"https://discord.gg/DNj7masa\"\u003eInvite\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eData-Loading Cheat-Sheet: \u003ca href=\"https://colab.research.google.com/drive/1VlnKSUgefcSUBPLgAvOYHvBjB9bjcUQh?usp=sharing\"\u003eColab\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWhen in doubt\u0026hellip;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eGoogle it! Try it!\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.coursera.org/specializations/machine-learning-introduction#courses\"\u003eAndrew Ng\u0026rsquo;s Machine Learning Suite of Courses\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"day-1-python-basics\"\u003e\u003cspan class=\"org-todo done DONE\"\u003eDONE\u003c/span\u003e Day 1: Python Basics\u003c/h2\u003e\n\u003cp\u003eOn Monday, 06/27/2022, we covered the basics of Python so that we are all up to speed to perform basic ML with the Scikit Learn toolkit.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eIntroductory Remarks: \u003ca href=\"https://drive.google.com/file/d/1XPkB9GL6rG2F5s5ydTsJOMg33y87HEBB/view?usp=sharing\"\u003eSlides\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eLecture on Python Basics: \u003ca href=\"https://drive.google.com/file/d/1udI-c1roIS7Fb1cgGQOzRc7a6dfYZWu8/view?usp=sharing\"\u003eSlides\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eLab Exercises: \u003ca href=\"https://drive.google.com/file/d/1oomPZGg9NUgDhi6S_RuTH60Vzlv5kD8z/view?usp=sharing\"\u003eMorning Lab Notes\u003c/a\u003e, \u003ca href=\"https://drive.google.com/file/d/1nG_hQ02GDpHpIlwJ6VOGRqU8obv4dCx_/view?usp=sharing\"\u003eAfternoon Lab Notes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eColab Notebooks: \u003ca href=\"https://colab.research.google.com/drive/1EKSvewySaceQqSzy_sNJTWeWuEjE-T1n?usp=sharing\"\u003eMorning Lecture Notebook\u003c/a\u003e, \u003ca href=\"https://colab.research.google.com/drive/1jo5MMQsfkQ3IQ0pYI9G0pgp5bea6lUnZ?usp=sharing\"\u003eMorning Lab Notebook\u003c/a\u003e, \u003ca href=\"https://colab.research.google.com/drive/1FuFlG5UnP3H0dgFyvBG9kb21deW2UHIU#scrollTo=rTxx-vWi-qct\"\u003eAfternoon Lecture Notebook\u003c/a\u003e, \u003ca href=\"https://colab.research.google.com/drive/1HxWScbDZ0AuBrIZ0N2QDlrVzJBXORFi_#scrollTo=TkGfQYJmI3j1\"\u003eAfternoon Lab Notebook\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eDay 1 feedback survey\u003c/strong\u003e\u003c/strong\u003e: \u003ca href=\"https://forms.gle/KAdWJLDM9saTZCrT8\"\u003eLink\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"day-2-oop-plus-linear-models\"\u003e\u003cspan class=\"org-todo done DONE\"\u003eDONE\u003c/span\u003e Day 2: OOP + Linear Models\u003c/h2\u003e\n\u003cp\u003eToday, we are going to cover the basic intuition and terminology behind Object Oriented Programming, as well as introduce two simple, linear approaches to Machine Learning tasks: linear regression and logistic regression.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eLecture on OOP and more on functions (morning): \u003ca href=\"https://drive.google.com/file/d/1udI-c1roIS7Fb1cgGQOzRc7a6dfYZWu8/view?usp=sharing\"\u003eSlides\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eLecture on Linear and Logistic Regression (afternoon): \u003ca href=\"https://drive.google.com/file/d/1HXn7aat_bGzUh3vpQ7vQxNQvp6GrIi-6/view?usp=sharing\"\u003eSlides\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eLab Exercises: \u003ca href=\"https://drive.google.com/file/d/1nidC7fOeHMWnD_QZcSasiqRxUOEx-9Cx/view?usp=sharing\"\u003eMorning Lab Notes\u003c/a\u003e, \u003ca href=\"https://drive.google.com/file/d/1-PD2ZRbxyZN3kclo4FPi-cbx-wBh5cbn/view?usp=sharing\"\u003eAfternoon Lab Notes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eColab Notebooks: \u003ca href=\"https://colab.research.google.com/drive/1KFotnZcEKyiRjY5fRKwjLLUbaYzc6Ogi?usp=sharing\"\u003eMorning Lecture Notebook\u003c/a\u003e, \u003ca href=\"https://colab.research.google.com/drive/1gMAZPZs3y532sb3fdeXVqKNjOz-Ri8wa?usp=sharing\"\u003eMorning Lab Notebook\u003c/a\u003e, \u003ca href=\"https://colab.research.google.com/drive/18f3vNcDg2WKRuip31TCPRHN_t7Fy07Q9?usp=sharing\"\u003eAfternoon Lab Notebook\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eDay 2 feedback survey\u003c/strong\u003e\u003c/strong\u003e: \u003ca href=\"https://forms.gle/VtHtozjqsB9Y113F9\"\u003eLink\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"day-3-data-plus-classifier\"\u003e\u003cspan class=\"org-todo done DONE\"\u003eDONE\u003c/span\u003e Day 3: Data + Classifier\u003c/h2\u003e\n\u003cp\u003eToday, we are going to cover data cleaning, and three more classifiers!\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eLecture on data cleaning and pandas (morning): \u003ca href=\"https://drive.google.com/file/d/1pMHtQo1iITFSMPRls2K7gc1JGd-LK2Nv/view?usp=sharing\"\u003eSlides\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eLecture on three classification algorithms (afternoon): \u003ca href=\"https://drive.google.com/file/d/16Vjr3sXnoBTv_2vaa7cEz_t9qn_3QsrC/view?usp=sharing\"\u003eSlides\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eLab Exercises: \u003ca href=\"https://drive.google.com/file/d/16ady6_tt96YgiraSzxtZ7CBdASy3SSOu/view?usp=sharing\"\u003eMorning Lab Notes\u003c/a\u003e, \u003ca href=\"https://drive.google.com/file/d/1yHaSL73Tki_WULN0k85RY3wmyV3tlpLd/view?usp=sharing\"\u003eAfternoon Lab Notes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eColab Notebooks: \u003ca href=\"https://colab.research.google.com/drive/1i_OfqkrdfNU-fiIbz0bmrKMLf_sZV4xc?usp=sharing\"\u003eMorning Lab Notebook\u003c/a\u003e, \u003ca href=\"https://colab.research.google.com/drive/1aKjJVnmermrw5ysPQWHX5yUJHfVcR8FJ?usp=sharing\"\u003eAfternoon Lab Notebook\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eDay 3 feedback survey\u003c/strong\u003e\u003c/strong\u003e: \u003ca href=\"https://forms.gle/GybrD48kDkQbdcMi7\"\u003eLink\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"day-4-operations-and-clustering\"\u003e\u003cspan class=\"org-todo done DONE\"\u003eDONE\u003c/span\u003e Day 4: Operations and Clustering\u003c/h2\u003e\n\u003cp\u003eToday, we are going to work on the validation operations tools, and talk about clustering\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eLecture on training and data operations (morning): \u003ca href=\"https://drive.google.com/file/d/13CXp1pcXLjyAKTGq2ifimVbSeRsDnlGa/view?usp=sharing\"\u003eSlides\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eLecture on clustering and feature operations (afternoon): \u003ca href=\"https://drive.google.com/file/d/147eyCXJKx2tTEX_wzY-6L8jPbsZwNKZ2/view?usp=sharing\"\u003eSlides\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eLab Exercises: \u003ca href=\"/posts/kbhaibridge_iris_variance_worksheet/\"\u003eMorning Lab Notes\u003c/a\u003e, \u003ca href=\"https://drive.google.com/file/d/1I61UAf1VnziMs7N7sTinXm-QXWzPKbyA/view?usp=sharing\"\u003eAfternoon Lab Notes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eColab Notebooks: \u003ca href=\"https://colab.research.google.com/drive/1zSGk2e3vFzFliNiSLCs-HxOGm7e-caLC?usp=sharing\"\u003eAfternoon Notebook\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eDay 4 feedback survey\u003c/strong\u003e\u003c/strong\u003e: \u003ca href=\"https://forms.gle/F7sGtFsJryeV3SEJ8\"\u003eLink\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"day-5-closing-thoughts\"\u003eDay 5: Closing Thoughts\u003c/h2\u003e\n\u003cp\u003eToday, we are going to tie some loose ends with missing data, error analysis, semi supervised learning, cross validation, and ethics.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eClosing thoughts lecture (morning): \u003ca href=\"https://drive.google.com/file/d/1-a6VSDlJRdUb9MPw1d6EMajld3Pnd86N/view?usp=sharing\"\u003eSlides\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eFinal Project\u003c/strong\u003e\u003c/strong\u003e: \u003ca href=\"/posts/kbhaibridge_final_project/\"\u003eAIBridge Final Project\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eDay 5/Bootcamp feedback survey\u003c/strong\u003e\u003c/strong\u003e: \u003ca href=\"https://forms.gle/qCA34bWjfFXxeAjZ8\"\u003eLink\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"other-links-and-resources\"\u003eOther Links and Resources\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eTools we use: \u003ca href=\"/posts/kbhaibridge_packages/\"\u003eAIBridge Packages and Tools\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eCleaned Wine Dataset (try cleaning it yourself before using!): \u003ca href=\"https://drive.google.com/file/d/1K54C6QOZ2xlGJls59RRCLXr4OOa-8D1l/view?usp=sharing\"\u003eGoogle Drive\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eIris Data with Temperature (don\u0026rsquo;t use without instructions, though!): \u003ca href=\"https://drive.google.com/file/d/1WgruhndN1M1md4vgS87Ho9WS3wAshROP/view?usp=sharing\"\u003eGoogle Drive\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridge_course_website/","tags":null,"title":"AIBridge Course Website"},{"categories":null,"contents":"Part 1: ML Training Practice One of the things that makes a very good Sommelier is their ability to figure out as much details about a wine as possible with very little information.\nYou are tasked with making a Sommelier program that is able to figure both the type and quality of wine from available chemical information. Also, you have a \u0026ldquo;flavor-ater\u0026rdquo; machine that makes a linear combination of multiple chemical features together (similar to PCA), which is counted as one chemical feature after combination.\nA good Sommelier uses as little information as possible to deduce the quality and type. So, what is the best model(s) you can build for predicting quality and type of wine based on the least amount of features? What features should you choose?\nGood luck!\nPart 2: ML Project Walk-through Create your own machine learning experiement! Begin with a problem in your field; go through the available/your own data, determine what type of problem it is, and discuss why machine learning could be a good solution for the problem. Research/quantify the baselines in the field for the task (remembering our discussion on ML validation methods), and determine a list of possible features of your data.\nIf we were to help collect data together, how can we best collect a representative sample? How expensive (resources, monetary, or temporal) would it be? What are some ethical issues?\nSelect the features in the data available to you that would be most relavent (this time you are not trying to minimize the features, but select the most appropriate ones), and the model/training mechanism you think would be most appropriate.\nFinally, present your thinking! Share with us a few (1-3) slides on Friday afternoon. If you have additional time, possibly train the model on baseline data!\n","html":"\u003ch2 id=\"part-1-ml-training-practice\"\u003ePart 1: ML Training Practice\u003c/h2\u003e\n\u003cp\u003eOne of the things that makes a very good Sommelier is their ability to figure out as much details about a wine as possible with very little information.\u003c/p\u003e\n\u003cp\u003eYou are tasked with making a Sommelier program that is able to figure both the type and quality of wine from available chemical information. Also, you have a \u0026ldquo;flavor-ater\u0026rdquo; machine that makes a linear combination of multiple chemical features together (similar to PCA), which is counted as one chemical feature after combination.\u003c/p\u003e\n\u003cp\u003eA good Sommelier uses as little information as possible to deduce the quality and type. So, what is the best model(s) you can build for predicting quality and type of wine based on the least amount of features? What features should you choose?\u003c/p\u003e\n\u003cp\u003eGood luck!\u003c/p\u003e\n\u003ch2 id=\"part-2-ml-project-walk-through\"\u003ePart 2: ML Project Walk-through\u003c/h2\u003e\n\u003cp\u003eCreate your own machine learning experiement! Begin with a problem in your field; go through the available/your own data, determine what type of problem it is, and discuss why machine learning could be a good solution for the problem. Research/quantify the baselines in the field for the task (remembering our discussion on ML validation methods), and determine a list of possible features of your data.\u003c/p\u003e\n\u003cp\u003eIf we were to help collect data together, how can we best collect a representative sample? How expensive (resources, monetary, or temporal) would it be? What are some ethical issues?\u003c/p\u003e\n\u003cp\u003eSelect the features in the data available to you that would be most relavent (this time you are not trying to minimize the features, but select the most appropriate ones), and the model/training mechanism you think would be most appropriate.\u003c/p\u003e\n\u003cp\u003eFinally, present your thinking! Share with us a few (1-3) slides on Friday afternoon. If you have additional time, possibly train the model on baseline data!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridge_final_project/","tags":null,"title":"AIBridge Final Project"},{"categories":null,"contents":"SPOILER ALERT for future labs!! Don\u0026rsquo;t scroll down!\nWe are going to create a copy of the iris dataset with a random variance.\nimport sklearn from sklearn.datasets import load_iris Let\u0026rsquo;s load the iris dataset:\nx,y = load_iris(return_X_y=True) Because we need to generate a lot of random data, let\u0026rsquo;s import random\nimport random Put this in a df\nimport pandas as pd df = pd.DataFrame(x) df 0 1 2 3 0 5.1 3.5 1.4 0.2 1 4.9 3.0 1.4 0.2 2 4.7 3.2 1.3 0.2 3 4.6 3.1 1.5 0.2 4 5.0 3.6 1.4 0.2 .. ... ... ... ... 145 6.7 3.0 5.2 2.3 146 6.3 2.5 5.0 1.9 147 6.5 3.0 5.2 2.0 148 6.2 3.4 5.4 2.3 149 5.9 3.0 5.1 1.8 [150 rows x 4 columns] Let\u0026rsquo;s make 150 random numbers with pretty low variance:\nrandom_ns = [random.uniform(65,65.2) for _ in range(0, 150)] random_series = pd.Series(random_ns) random_series 0 65.127515 1 65.034572 2 65.123271 3 65.043985 4 65.145743 ... 145 65.036410 146 65.157172 147 65.034925 148 65.037373 149 65.042466 Length: 150, dtype: float64 Excellent. Now let\u0026rsquo;s put the two things together!\ndf[\u0026#34;temp\u0026#34;] = random_series df 0 1 2 3 temp 0 5.1 3.5 1.4 0.2 65.127515 1 4.9 3.0 1.4 0.2 65.034572 2 4.7 3.2 1.3 0.2 65.123271 3 4.6 3.1 1.5 0.2 65.043985 4 5.0 3.6 1.4 0.2 65.145743 .. ... ... ... ... ... 145 6.7 3.0 5.2 2.3 65.036410 146 6.3 2.5 5.0 1.9 65.157172 147 6.5 3.0 5.2 2.0 65.034925 148 6.2 3.4 5.4 2.3 65.037373 149 5.9 3.0 5.1 1.8 65.042466 [150 rows x 5 columns] And, while we are at it, let\u0026rsquo;s make new labels\nnames = pd.Series([\u0026#34;sepal length\u0026#34;, \u0026#34;sepal width\u0026#34;, \u0026#34;pedal length\u0026#34;, \u0026#34;pedal width\u0026#34;, \u0026#34;temp\u0026#34;]) df.columns = names df sepal length sepal width pedal length pedal width temp 0 5.1 3.5 1.4 0.2 65.127515 1 4.9 3.0 1.4 0.2 65.034572 2 4.7 3.2 1.3 0.2 65.123271 3 4.6 3.1 1.5 0.2 65.043985 4 5.0 3.6 1.4 0.2 65.145743 .. ... ... ... ... ... 145 6.7 3.0 5.2 2.3 65.036410 146 6.3 2.5 5.0 1.9 65.157172 147 6.5 3.0 5.2 2.0 65.034925 148 6.2 3.4 5.4 2.3 65.037373 149 5.9 3.0 5.1 1.8 65.042466 [150 rows x 5 columns] Excellent. Let\u0026rsquo;s finally get the flower results.\ndf[\u0026#34;species\u0026#34;] = y df sepal length sepal width pedal length pedal width temp species 0 5.1 3.5 1.4 0.2 65.127515 0 1 4.9 3.0 1.4 0.2 65.034572 0 2 4.7 3.2 1.3 0.2 65.123271 0 3 4.6 3.1 1.5 0.2 65.043985 0 4 5.0 3.6 1.4 0.2 65.145743 0 .. ... ... ... ... ... ... 145 6.7 3.0 5.2 2.3 65.036410 2 146 6.3 2.5 5.0 1.9 65.157172 2 147 6.5 3.0 5.2 2.0 65.034925 2 148 6.2 3.4 5.4 2.3 65.037373 2 149 5.9 3.0 5.1 1.8 65.042466 2 [150 rows x 6 columns] And dump it to a CSV.\ndf.to_csv(\u0026#34;./iris_variance.csv\u0026#34;, index=False) Let\u0026rsquo;s select for the input data again:\nX = df.iloc[:,0:5] y = df.iloc[:,5] X sepal length sepal width pedal length pedal width temp 0 5.1 3.5 1.4 0.2 65.127515 1 4.9 3.0 1.4 0.2 65.034572 2 4.7 3.2 1.3 0.2 65.123271 3 4.6 3.1 1.5 0.2 65.043985 4 5.0 3.6 1.4 0.2 65.145743 .. ... ... ... ... ... 145 6.7 3.0 5.2 2.3 65.036410 146 6.3 2.5 5.0 1.9 65.157172 147 6.5 3.0 5.2 2.0 65.034925 148 6.2 3.4 5.4 2.3 65.037373 149 5.9 3.0 5.1 1.8 65.042466 [150 rows x 5 columns] And use the variance threshold tool:\nfrom sklearn.feature_selection import VarianceThreshold sel = VarianceThreshold(0.1) sel.fit_transform(X) 5.1 3.5 1.4 0.2 4.9 3 1.4 0.2 4.7 3.2 1.3 0.2 4.6 3.1 1.5 0.2 5 3.6 1.4 0.2 5.4 3.9 1.7 0.4 4.6 3.4 1.4 0.3 \u0026hellip;\nAs we expected.\nAnd let\u0026rsquo;s use the select k best tool:\nfrom sklearn.feature_selection import SelectKBest, chi2 sel = SelectKBest(chi2, k=4) res = sel.fit_transform(X, y) res 5.1 3.5 1.4 0.2 4.9 3 1.4 0.2 4.7 3.2 1.3 0.2 4.6 3.1 1.5 0.2 5 3.6 1.4 0.2 5.4 3.9 1.7 0.4 4.6 3.4 1.4 0.3 5 3.4 1.5 0.2 \u0026hellip;\nAlso, as we expected. Got rid of temp.\n","html":"\u003cp\u003eSPOILER ALERT for future labs!! Don\u0026rsquo;t scroll down!\u003c/p\u003e\n\u003cp\u003eWe are going to create a copy of the iris dataset with a random variance.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.datasets\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s load the iris dataset:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturn_X_y\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eBecause we need to generate a lot of random data, let\u0026rsquo;s import random\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erandom\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ePut this in a df\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epandas\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDataFrame\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0 1 2 3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 4.9 3.0 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 1.3 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 4.6 3.1 1.5 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 5.0 3.6 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 5.2 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 6.3 2.5 5.0 1.9\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 6.5 3.0 5.2 2.0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 6.2 3.4 5.4 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 5.9 3.0 5.1 1.8\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 4 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s make 150 random numbers with pretty low variance:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erandom_ns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erandom\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003euniform\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e65\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e65.2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e150\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erandom_series\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSeries\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erandom_ns\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erandom_series\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 65.127515\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 65.034572\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 65.123271\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 65.043985\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 65.145743\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 65.036410\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 65.157172\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 65.034925\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 65.037373\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 65.042466\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLength: 150, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. Now let\u0026rsquo;s put the two things together!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;temp\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erandom_series\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0 1 2 3 temp\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 1.4 0.2 65.127515\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 4.9 3.0 1.4 0.2 65.034572\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 1.3 0.2 65.123271\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 4.6 3.1 1.5 0.2 65.043985\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 5.0 3.6 1.4 0.2 65.145743\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 5.2 2.3 65.036410\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 6.3 2.5 5.0 1.9 65.157172\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 6.5 3.0 5.2 2.0 65.034925\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 6.2 3.4 5.4 2.3 65.037373\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 5.9 3.0 5.1 1.8 65.042466\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 5 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd, while we are at it, let\u0026rsquo;s make new labels\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enames\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSeries\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;sepal length\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;sepal width\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;pedal length\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;pedal width\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;temp\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enames\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e sepal length sepal width pedal length pedal width temp\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 1.4 0.2 65.127515\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 4.9 3.0 1.4 0.2 65.034572\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 1.3 0.2 65.123271\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 4.6 3.1 1.5 0.2 65.043985\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 5.0 3.6 1.4 0.2 65.145743\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 5.2 2.3 65.036410\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 6.3 2.5 5.0 1.9 65.157172\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 6.5 3.0 5.2 2.0 65.034925\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 6.2 3.4 5.4 2.3 65.037373\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 5.9 3.0 5.1 1.8 65.042466\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 5 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. Let\u0026rsquo;s finally get the flower results.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;species\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e sepal length sepal width pedal length pedal width temp species\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 1.4 0.2 65.127515 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 4.9 3.0 1.4 0.2 65.034572 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 1.3 0.2 65.123271 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 4.6 3.1 1.5 0.2 65.043985 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 5.0 3.6 1.4 0.2 65.145743 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 5.2 2.3 65.036410 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 6.3 2.5 5.0 1.9 65.157172 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 6.5 3.0 5.2 2.0 65.034925 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 6.2 3.4 5.4 2.3 65.037373 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 5.9 3.0 5.1 1.8 65.042466 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 6 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd dump it to a CSV.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eto_csv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;./iris_variance.csv\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eindex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eFalse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s select for the input data again:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e sepal length sepal width pedal length pedal width temp\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 1.4 0.2 65.127515\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 4.9 3.0 1.4 0.2 65.034572\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 1.3 0.2 65.123271\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 4.6 3.1 1.5 0.2 65.043985\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 5.0 3.6 1.4 0.2 65.145743\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 5.2 2.3 65.036410\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 6.3 2.5 5.0 1.9 65.157172\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 6.5 3.0 5.2 2.0 65.034925\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 6.2 3.4 5.4 2.3 65.037373\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 5.9 3.0 5.1 1.8 65.042466\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 5 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd use the variance threshold tool:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.feature_selection\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eVarianceThreshold\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esel\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eVarianceThreshold\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esel\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit_transform\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e5.1\u003c/th\u003e\n\u003cth\u003e3.5\u003c/th\u003e\n\u003cth\u003e1.4\u003c/th\u003e\n\u003cth\u003e0.2\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e4.9\u003c/td\u003e\n\u003ctd\u003e3\u003c/td\u003e\n\u003ctd\u003e1.4\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e4.7\u003c/td\u003e\n\u003ctd\u003e3.2\u003c/td\u003e\n\u003ctd\u003e1.3\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e4.6\u003c/td\u003e\n\u003ctd\u003e3.1\u003c/td\u003e\n\u003ctd\u003e1.5\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e5\u003c/td\u003e\n\u003ctd\u003e3.6\u003c/td\u003e\n\u003ctd\u003e1.4\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e5.4\u003c/td\u003e\n\u003ctd\u003e3.9\u003c/td\u003e\n\u003ctd\u003e1.7\u003c/td\u003e\n\u003ctd\u003e0.4\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e4.6\u003c/td\u003e\n\u003ctd\u003e3.4\u003c/td\u003e\n\u003ctd\u003e1.4\u003c/td\u003e\n\u003ctd\u003e0.3\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\u0026hellip;\u003c/p\u003e\n\u003cp\u003eAs we expected.\u003c/p\u003e\n\u003cp\u003eAnd let\u0026rsquo;s use the select k best tool:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.feature_selection\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSelectKBest\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003echi2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esel\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSelectKBest\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echi2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esel\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit_transform\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e5.1\u003c/th\u003e\n\u003cth\u003e3.5\u003c/th\u003e\n\u003cth\u003e1.4\u003c/th\u003e\n\u003cth\u003e0.2\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e4.9\u003c/td\u003e\n\u003ctd\u003e3\u003c/td\u003e\n\u003ctd\u003e1.4\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e4.7\u003c/td\u003e\n\u003ctd\u003e3.2\u003c/td\u003e\n\u003ctd\u003e1.3\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e4.6\u003c/td\u003e\n\u003ctd\u003e3.1\u003c/td\u003e\n\u003ctd\u003e1.5\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e5\u003c/td\u003e\n\u003ctd\u003e3.6\u003c/td\u003e\n\u003ctd\u003e1.4\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e5.4\u003c/td\u003e\n\u003ctd\u003e3.9\u003c/td\u003e\n\u003ctd\u003e1.7\u003c/td\u003e\n\u003ctd\u003e0.4\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e4.6\u003c/td\u003e\n\u003ctd\u003e3.4\u003c/td\u003e\n\u003ctd\u003e1.4\u003c/td\u003e\n\u003ctd\u003e0.3\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e5\u003c/td\u003e\n\u003ctd\u003e3.4\u003c/td\u003e\n\u003ctd\u003e1.5\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\u0026hellip;\u003c/p\u003e\n\u003cp\u003eAlso, as we expected. Got rid of temp.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridge_iris_variance_worksheet/","tags":null,"title":"AIBridge Iris Variance Worksheet"},{"categories":null,"contents":"This is usually not needed if you are using Google Colab. If you are following the instructions provided during our lecture series, please disregard this page.\nHowever, students have expressed interest in working with their own system\u0026rsquo;s copy of Jupyter or local installation. We therefore provide a set of very tenuous instructions for installing the tools used in our session using vanilla C-Python (i.e. not anaconda/conda/miniconda.)\nPython Our tools target Python 3.8+. Use your system\u0026rsquo;s package manager to install Python at least version 3.8, or use Python Foundation\u0026rsquo;s universal installers.\nPackages Python sometimes ships pip, its packaging utility separately. Refer to your own distribution\u0026rsquo;s installation instructions if none of pip or pip3 or python -m pip or python -m pip.\nOnce your copy of pip has been identified, let\u0026rsquo;s move on to\u0026hellip;\nInstalling Packages Here are the packages we will need for our sessions:\nscikit-learn pandas numpy Along with its respective dependencies. Here\u0026rsquo;s a one-liner:\npython3 -m pip install scikit-learn pandas numpy Good luck!\n","html":"\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eThis is usually not needed if you are using \u003ca href=\"https://colab.research.google.com/\"\u003eGoogle Colab\u003c/a\u003e.\u003c/strong\u003e\u003c/strong\u003e If you are following the instructions provided during our lecture series, please disregard this page.\u003c/p\u003e\n\u003cp\u003eHowever, students have expressed interest in working with their own system\u0026rsquo;s copy of Jupyter or local installation. We therefore provide a set of very tenuous instructions for installing the tools used in our session using \u003cem\u003evanilla C-Python\u003c/em\u003e (i.e. not anaconda/conda/miniconda.)\u003c/p\u003e\n\u003ch2 id=\"python\"\u003ePython\u003c/h2\u003e\n\u003cp\u003eOur tools target Python 3.8+. Use your system\u0026rsquo;s package manager to install Python at least version 3.8, or use \u003ca href=\"https://www.python.org/downloads/\"\u003ePython Foundation\u0026rsquo;s\u003c/a\u003e universal installers.\u003c/p\u003e\n\u003ch2 id=\"packages\"\u003ePackages\u003c/h2\u003e\n\u003cp\u003ePython sometimes ships \u003ccode\u003epip\u003c/code\u003e, its packaging utility separately. Refer to your own distribution\u0026rsquo;s installation instructions if none of \u003ccode\u003epip\u003c/code\u003e or \u003ccode\u003epip3\u003c/code\u003e or \u003ccode\u003epython -m pip\u003c/code\u003e or \u003ccode\u003epython -m pip\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eOnce your copy of pip has been identified, let\u0026rsquo;s move on to\u0026hellip;\u003c/p\u003e\n\u003ch2 id=\"installing-packages\"\u003eInstalling Packages\u003c/h2\u003e\n\u003cp\u003eHere are the packages we will need for our sessions:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003escikit-learn\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003epandas\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003enumpy\u003c/code\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAlong with its respective dependencies. Here\u0026rsquo;s a one-liner:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-bash\" data-lang=\"bash\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003epython3 -m pip install scikit-learn pandas numpy\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eGood luck!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridge_packages/","tags":null,"title":"AIBridge Packages and Tools"},{"categories":null,"contents":"Rewa Rai Nitin Lab, Dept. of Food Sci + Tech - Davis\nWine Classification Task Whole data:\nDecision Tree: 98.46% Random Forest: 99.84% Gaussian NB: 97.08% Regression Task Feature selection with 2 best features actually improved.\nTalkthrough Detecting berry infection by leaf classification. Use FTIR spectroscopy as a means of infection classification.\nTana Hernandez PHD Student, Nitin Lab, Dept. of Food Sci + Tech - Davis\nTalkthrough Given input for reaction, predict resulting gell strength from protein+carbo+lactic acid.\nGoal to figure out what features are o predict gell formation. Use feature extraction to reduce the need of doing.\nWet lab task: use high-throughput 96 hole plates to measuring kinetics of absorborance and kinetics. In a single hour, 96 data points can be acquired.\nThen, droplet elements are added to the plates.\nModel: take feature inputs which was selected, classification on gell formation and regression for time for gell.\nJimmy Nguyen PHD Student, Nitin Lab, Dept. of Food Sci + Tech - Davis\nTalk through Need: creating plant-based products which just feels and tastes like actual meet based food.\nTask: given molecular information, classify taste based on like-product and unlike\nLuyao Ma Postdoc Researcher, Nitin Lab, Dept. of Food Sci + Tech - Davis\nTalk thought Problem: lots of antimicrobian resistance in food: on track for 10 million deaths due to antimicrobial resistance. This is caused by antibiotics given to animals, which then is given indirectly to humans. Humans gut bactorials became more more resistant to antibiotics due to antimicrobial bacterial deveolping in animal guts.\nCurrent surveilance systems for antibiotic bacteria: require centralized lab for analysis, data collection is slow, and data integration is very slow (2ish years to publish final results), protocol also changes.\nGoal: rapid in field automatic detection scheme\nExpose wells of bacterial to detect color intensity\n? PHD Student, USDA\nWine Naive bayes (6 RFE features); XE Boost Random Forest + Search with 9 features\nTalkthrough Dietary data Random calls Interested in gut miocrobiome influences. Goal: which factors to predict CAZyme dyvirsetiy?\nRandom forest regression Need for prediction for which features: use Shapley Addadtive for result intepretation.\nYue Wine OH WOWO\nReg:\n99.98 train, 59.788 test.\nBalanced dataset Sequential feature selection PCA -\u0026gt; 3 features Random Forest Something else: ExhaustiveFeatureSelector\nClsf:\nstill 4 features.\nTalkthrough Deep learning, CV applications.\nNutrition product validation so far is entirely manual; current work in bias are mostly political, so finding a ground truth is difficult.\nSupervised is probability difficult; getting the data and cluster.\nSriya Sunil PhD Food Science, Cornell\nWine Decision tree classifier; resulted in 7 features.\n99.97% train, 97.08% test.\nSupport Vector Regression; resulted in 7 features as well.\n39.25% train, 32.79% test.\nTalkthrough Microbial growth on baby spinach. Features: initial counts, prevalence of bacteria, growth of bacteria.\nOutput regression to time to spoilage\n","html":"\u003ch2 id=\"rewa-rai\"\u003eRewa Rai\u003c/h2\u003e\n\u003cp\u003eNitin Lab, Dept. of Food Sci + Tech - Davis\u003c/p\u003e\n\u003ch3 id=\"wine\"\u003eWine\u003c/h3\u003e\n\u003ch4 id=\"classification-task\"\u003eClassification Task\u003c/h4\u003e\n\u003cp\u003eWhole data:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eDecision Tree: 98.46%\u003c/li\u003e\n\u003cli\u003eRandom Forest: 99.84%\u003c/li\u003e\n\u003cli\u003eGaussian NB: 97.08%\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"regression-task\"\u003eRegression Task\u003c/h4\u003e\n\u003cp\u003eFeature selection with 2 best features actually improved.\u003c/p\u003e\n\u003ch3 id=\"talkthrough\"\u003eTalkthrough\u003c/h3\u003e\n\u003cp\u003eDetecting berry infection by leaf classification. Use FTIR spectroscopy as a means of infection classification.\u003c/p\u003e\n\u003ch2 id=\"tana-hernandez\"\u003eTana Hernandez\u003c/h2\u003e\n\u003cp\u003ePHD Student, Nitin Lab, Dept. of Food Sci + Tech - Davis\u003c/p\u003e\n\u003ch3 id=\"talkthrough\"\u003eTalkthrough\u003c/h3\u003e\n\u003cp\u003eGiven input for reaction, predict resulting gell strength from protein+carbo+lactic acid.\u003c/p\u003e\n\u003cp\u003eGoal to figure out what features are o predict gell formation. Use feature extraction to reduce the need of doing.\u003c/p\u003e\n\u003cp\u003eWet lab task: use high-throughput 96 hole plates to measuring kinetics of absorborance and kinetics. In a single hour, 96 data points can be acquired.\u003c/p\u003e\n\u003cp\u003eThen, droplet elements are added to the plates.\u003c/p\u003e\n\u003cp\u003eModel: take feature inputs which was selected, classification on gell formation and regression for time for gell.\u003c/p\u003e\n\u003ch2 id=\"jimmy-nguyen\"\u003eJimmy Nguyen\u003c/h2\u003e\n\u003cp\u003ePHD Student, Nitin Lab, Dept. of Food Sci + Tech - Davis\u003c/p\u003e\n\u003ch3 id=\"talk-through\"\u003eTalk through\u003c/h3\u003e\n\u003cp\u003eNeed: creating plant-based products which just feels and tastes like actual meet based food.\u003c/p\u003e\n\u003cp\u003eTask: given molecular information, classify taste based on like-product and unlike\u003c/p\u003e\n\u003ch2 id=\"luyao-ma\"\u003eLuyao Ma\u003c/h2\u003e\n\u003cp\u003ePostdoc Researcher, Nitin Lab, Dept. of Food Sci + Tech - Davis\u003c/p\u003e\n\u003ch3 id=\"talk-thought\"\u003eTalk thought\u003c/h3\u003e\n\u003cp\u003eProblem: lots of antimicrobian resistance in food: on track for 10 million deaths due to antimicrobial resistance. This is caused by antibiotics given to animals, which then is given indirectly to humans. Humans gut bactorials became more more resistant to antibiotics due to antimicrobial bacterial deveolping in animal guts.\u003c/p\u003e\n\u003cp\u003eCurrent surveilance systems for antibiotic bacteria: require centralized lab for analysis, data collection is slow, and data integration is very slow (2ish years to publish final results), protocol also changes.\u003c/p\u003e\n\u003cp\u003eGoal: rapid in field automatic detection scheme\u003c/p\u003e\n\u003cp\u003eExpose wells of bacterial to detect color intensity\u003c/p\u003e\n\u003ch2 id=\"d1457b\"\u003e?\u003c/h2\u003e\n\u003cp\u003ePHD Student, USDA\u003c/p\u003e\n\u003ch3 id=\"wine\"\u003eWine\u003c/h3\u003e\n\u003cp\u003eNaive bayes (6 RFE features); XE Boost Random Forest + Search with 9 features\u003c/p\u003e\n\u003ch3 id=\"talkthrough\"\u003eTalkthrough\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eDietary data\u003c/li\u003e\n\u003cli\u003eRandom calls\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eInterested in gut miocrobiome influences. Goal: which factors to predict CAZyme dyvirsetiy?\u003c/p\u003e\n\u003cp\u003eRandom forest regression\nNeed for prediction for which features: use Shapley Addadtive for result intepretation.\u003c/p\u003e\n\u003ch2 id=\"yue\"\u003eYue\u003c/h2\u003e\n\u003ch3 id=\"wine\"\u003eWine\u003c/h3\u003e\n\u003cp\u003eOH WOWO\u003c/p\u003e\n\u003cp\u003eReg:\u003c/p\u003e\n\u003cp\u003e99.98 train, 59.788 test.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eBalanced dataset\u003c/li\u003e\n\u003cli\u003eSequential feature selection\u003c/li\u003e\n\u003cli\u003ePCA -\u0026gt; 3 features\u003c/li\u003e\n\u003cli\u003eRandom Forest\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSomething else: \u003ccode\u003eExhaustiveFeatureSelector\u003c/code\u003e\u003c/p\u003e\n\u003cp\u003eClsf:\u003c/p\u003e\n\u003cp\u003estill 4 features.\u003c/p\u003e\n\u003ch3 id=\"talkthrough\"\u003eTalkthrough\u003c/h3\u003e\n\u003cp\u003eDeep learning, CV applications.\u003c/p\u003e\n\u003cp\u003eNutrition product validation so far is entirely manual; current work in bias are mostly political, so finding a ground truth is difficult.\u003c/p\u003e\n\u003cp\u003eSupervised is probability difficult; getting the data and cluster.\u003c/p\u003e\n\u003ch2 id=\"sriya-sunil\"\u003eSriya Sunil\u003c/h2\u003e\n\u003cp\u003ePhD Food Science, Cornell\u003c/p\u003e\n\u003ch3 id=\"wine\"\u003eWine\u003c/h3\u003e\n\u003cp\u003eDecision tree classifier; resulted in 7 features.\u003c/p\u003e\n\u003cp\u003e99.97% train, 97.08% test.\u003c/p\u003e\n\u003cp\u003eSupport Vector Regression; resulted in 7 features as well.\u003c/p\u003e\n\u003cp\u003e39.25% train, 32.79% test.\u003c/p\u003e\n\u003ch3 id=\"talkthrough\"\u003eTalkthrough\u003c/h3\u003e\n\u003cp\u003eMicrobial growth on baby spinach. Features: initial counts, prevalence of bacteria, growth of bacteria.\u003c/p\u003e\n\u003cp\u003eOutput regression to time to spoilage\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridge_student_presentations/","tags":null,"title":"AIBridge Student Presentations"},{"categories":null,"contents":"Welcome to the Day-2 Afternoon Lab! We are super excited to work through tasks in linear regression and logistic regression, as well as familiarize you with the Iris dataset.\nIris Dataset Let\u0026rsquo;s load the Iris dataset! Begin by importing the load_iris tool from sklearn. This is an easy loader scheme for the iris dataset.\nfrom sklearn.datasets import load_iris Then, we simply execute the following to load the data.\nx,y = load_iris(return_X_y=True) We use the return_X_y argument here so that, instead of dumping a large CSV, we get the neat-cleaned input and output values.\nLet\u0026rsquo;s inspect this data a little.\nx[0] 5.1 3.5 1.4 0.2 We can see that each sample of the data is a vector in \\(\\mathbb{R}^4\\). They correspond to four attributes:\nseptal length septal width pedal length pedal width What\u0026rsquo;s the output?\ny[0] 0 We can actually see all the possible values of the output by putting it into a set.\nset(y) 0 1 2 There are three different classes of outputs.\nIris Setosa Iris Versicolour Iris Virginica Excellent. So we can see that we have a dataset of four possible inputs and one possible output. Let\u0026rsquo;s see what we can do with it.\nLogistic Regression The simplest thing we can do is a logistic regression. We have a there categories for output and a lot of data for input. Let\u0026rsquo;s figure out if we can predict the output from the input!\nLet\u0026rsquo;s import logistic regression tool first, and instantiate it.\nfrom sklearn.linear_model import LogisticRegression reg = LogisticRegression() We will \u0026ldquo;fit\u0026rdquo; the data to the model: adjusting the model to best represent the data. Our data has 150 samples, so let\u0026rsquo;s fit the data on 140 of them.\ntesting_samples_x = x[-5:] testing_samples_y = y[-5:] x = x[:-5] y = y[:-5] Wonderful. Let\u0026rsquo;s fit the data onto the model.\nreg = reg.fit(x,y) Let\u0026rsquo;s go ahead and run the model on our 10 testing samples!\npredicted_y = reg.predict(testing_samples_x) predicted_y 2 2 2 2 2 And, let\u0026rsquo;s figure out what our actual results say:\ntesting_samples_y 2 2 2 2 2 Woah! That\u0026rsquo;s excellent.\nLinear Regression Instead of predicting the output classes, we can predict some values from the output. How about if we used septal length, width, and pedal length to predict petal width? The output now is a number, not some classes, which calls for linear regression!\nLet\u0026rsquo;s import linear regression tool first, and instantiate it.\nfrom sklearn.linear_model import LinearRegression reg = LinearRegression() We will \u0026ldquo;fit\u0026rdquo; the data to the model again. As we have cleaned out the testing_samples, we simply need to split out the fourth column for the new x and y:\nnew_x = x[:,:3] new_y = x[:,3] new_testing_samples_y = testing_samples_x[:,3] new_testing_samples_x = testing_samples_x[:,:3] Taking now our newly parsed data, let\u0026rsquo;s fit it to a linear model.\nreg = reg.fit(new_x,new_y) Let\u0026rsquo;s go ahead and run the model on our 10 testing samples!\nnew_predicted_y = reg.predict(new_testing_samples_x) new_predicted_y 1.7500734 1.61927061 1.79218767 2.04824364 1.86638164 And, let\u0026rsquo;s figure out what our actual results say:\nnew_testing_samples_y 2.3 1.9 2 2.3 1.8 Close on some samples, not quite there on others. How good does our model actually do? We can use .score() to figure out the \\(r^2\\) value of our line on some data.\nreg.score(new_x, new_y) 0.9405617534915884 Evidently, it seems like about \\(94\\%\\) of the variation in our output data can be explained by the input features. This means that the relationship between septals are not exactly a linear pattern!\nNow you try Download the wine quality dataset Predict the quality of wine given its chemical metrics Predict if its red or white wine given its chemical metrics Vary the amount of data used to .fit the model, how does that influence the results? Vary the amount in each \u0026ldquo;class\u0026rdquo; (red wine, white wine) to fit the model, how much does that influence the results. ","html":"\u003cp\u003eWelcome to the Day-2 Afternoon Lab! We are super excited to work through tasks in linear regression and logistic regression, as well as familiarize you with the Iris dataset.\u003c/p\u003e\n\u003ch2 id=\"iris-dataset\"\u003eIris Dataset\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s load the Iris dataset! Begin by importing the \u003ccode\u003eload_iris\u003c/code\u003e tool from \u003ccode\u003esklearn\u003c/code\u003e. This is an easy loader scheme for the iris dataset.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.datasets\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThen, we simply execute the following to load the data.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturn_X_y\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe use the \u003ccode\u003ereturn_X_y\u003c/code\u003e argument here so that, instead of dumping a large \u003ccode\u003eCSV\u003c/code\u003e, we get the neat-cleaned input and output values.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s inspect this data a little.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e5.1\u003c/td\u003e\n\u003ctd\u003e3.5\u003c/td\u003e\n\u003ctd\u003e1.4\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWe can see that each \u003ccode\u003esample\u003c/code\u003e of the data is a vector in \\(\\mathbb{R}^4\\). They correspond to four attributes:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eseptal length\u003c/li\u003e\n\u003cli\u003eseptal width\u003c/li\u003e\n\u003cli\u003epedal length\u003c/li\u003e\n\u003cli\u003epedal width\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWhat\u0026rsquo;s the output?\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe can actually see all the possible values of the output by putting it into a set.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eset\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eThere are three different \u003ccode\u003eclasses\u003c/code\u003e of outputs.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eIris Setosa\u003c/li\u003e\n\u003cli\u003eIris Versicolour\u003c/li\u003e\n\u003cli\u003eIris Virginica\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eExcellent. So we can see that we have a dataset of four possible inputs and one possible output. Let\u0026rsquo;s see what we can do with it.\u003c/p\u003e\n\u003ch2 id=\"logistic-regression\"\u003eLogistic Regression\u003c/h2\u003e\n\u003cp\u003eThe simplest thing we can do is a logistic regression. We have a there \u003cem\u003ecategories\u003c/em\u003e for output and a lot of data for input. Let\u0026rsquo;s figure out if we can predict the output from the input!\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s import logistic regression tool first, and instantiate it.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.linear_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLogisticRegression\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereg\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLogisticRegression\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will \u0026ldquo;fit\u0026rdquo; the data to the model: adjusting the model to best represent the data. Our data has 150 samples, so let\u0026rsquo;s fit the data on 140 of them.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etesting_samples_x\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etesting_samples_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWonderful. Let\u0026rsquo;s fit the data onto the model.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereg\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereg\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s go ahead and run the model on our 10 testing samples!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epredicted_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereg\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epredict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etesting_samples_x\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epredicted_y\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eAnd, let\u0026rsquo;s figure out what our actual results say:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etesting_samples_y\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWoah! That\u0026rsquo;s excellent.\u003c/p\u003e\n\u003ch2 id=\"linear-regression\"\u003eLinear Regression\u003c/h2\u003e\n\u003cp\u003eInstead of predicting the output \u003cem\u003eclasses\u003c/em\u003e, we can predict some values from the output. How about if we used septal length, width, and pedal length to predict petal width? The output now is a number, not some classes, which calls for linear regression!\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s import linear regression tool first, and instantiate it.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.linear_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLinearRegression\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereg\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLinearRegression\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will \u0026ldquo;fit\u0026rdquo; the data to the model again. As we have cleaned out the \u003ccode\u003etesting_samples\u003c/code\u003e, we simply need to split out the fourth column for the new x and y:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enew_x\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:,:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enew_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enew_testing_samples_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etesting_samples_x\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enew_testing_samples_x\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etesting_samples_x\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:,:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eTaking now our newly parsed data, let\u0026rsquo;s fit it to a linear model.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereg\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereg\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enew_x\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enew_y\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s go ahead and run the model on our 10 testing samples!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enew_predicted_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereg\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epredict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enew_testing_samples_x\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enew_predicted_y\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e1.7500734\u003c/td\u003e\n\u003ctd\u003e1.61927061\u003c/td\u003e\n\u003ctd\u003e1.79218767\u003c/td\u003e\n\u003ctd\u003e2.04824364\u003c/td\u003e\n\u003ctd\u003e1.86638164\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eAnd, let\u0026rsquo;s figure out what our actual results say:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enew_testing_samples_y\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e2.3\u003c/td\u003e\n\u003ctd\u003e1.9\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2.3\u003c/td\u003e\n\u003ctd\u003e1.8\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eClose on some samples, not quite there on others. How good does our model actually do? We can use \u003ccode\u003e.score()\u003c/code\u003e to figure out the \\(r^2\\) value of our line on some data.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereg\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003escore\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enew_x\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enew_y\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.9405617534915884\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eEvidently, it seems like about \\(94\\%\\) of the variation in our output data can be explained by the input features. This means that the relationship between septals are not \u003cem\u003eexactly\u003c/em\u003e a linear pattern!\u003c/p\u003e\n\u003ch2 id=\"now-you-try\"\u003eNow you try\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDownload the \u003ca href=\"https://archive.ics.uci.edu/ml/datasets/Wine+Quality\"\u003ewine quality dataset\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ePredict the quality of wine given its chemical metrics\u003c/li\u003e\n\u003cli\u003ePredict if its red or white wine given its chemical metrics\u003c/li\u003e\n\u003cli\u003eVary the amount of data used to .fit the model, how does that influence the results?\u003c/li\u003e\n\u003cli\u003eVary the amount in each \u0026ldquo;class\u0026rdquo; (red wine, white wine) to fit the model, how much does that influence the results.\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridgelab_d1aft/","tags":null,"title":"AIBridgeLab D2Aft"},{"categories":null,"contents":"Woah! We talked about a lot of different ways of doing classification today! Let\u0026rsquo;s see what we can do about this for the Iris dataset!\nIris Dataset Let\u0026rsquo;s load the Iris dataset! Begin by importing the load_iris tool from sklearn. This is an easy loader scheme for the iris dataset.\nfrom sklearn.datasets import load_iris Then, we simply execute the following to load the data.\nx,y = load_iris(return_X_y=True) We use the return_X_y argument here so that, instead of dumping a large CSV, we get the neat-cleaned input and output values.\nA reminder that there is three possible flowers that we can sort by.\nDecision Trees Scikit learn has great facilities for using decision trees for classification! Let\u0026rsquo;s use some of them by fitting to the Iris dataset.\nLet us begin by importing the SciKit learn tree system:\nfrom sklearn.tree import DecisionTreeClassifier We will fit and instantiate this classifier and fit it to the data exactly!\nclf = DecisionTreeClassifier() clf = clf.fit(x,y) One cool thing about decision trees is that we can actually see what its doing! by looking at the series of splits and decisions. This is a function provided by tree too.\n# We first import the plotting utility from matplotlib import matplotlib.pyplot as plt # as well as the tree plotting tool from sklearn.tree import plot_tree # We call the tree plot tool, which puts it on teh matplotlib graph for side effects plot_tree(clf) # And we save the figure plt.savefig(\u0026#34;tree.png\u0026#34;) Cool! As you can see, by the end of the entire graph, the gini impurity of each node has been sorted to 0.\nApparently, if the third feature (pedal length) is smaller that 2.45, it is definitely the first type of flower!\nCan you explain the rest of the divisions?\nThere are some arguments available in .fit of a DecisionTreeClassifier which controls for when splitting ends; for instance, max_depth controls the maximum depth by which the tree can go.\nExtra Addition! Random Forests. If you recall, we make the initial splitting decisions fairly randomly, and simply select the one with the lowest Ginni impurity. Of course, this makes the selection of the initial sets of splits very important.\nWhat if, instead of needing to make a decision about that now, we can just deal with it later? Well, that\u0026rsquo;s where the addition of Random Forests come in.\nAs the name suggests, instead of having one great tree that does a \u0026ldquo;pretty good\u0026rdquo; job, we can have a lot of trees acting in ensemble! We can randomly start a bunch of random trees, and pick the selection that most would correspond with.\nRandom forests come from the ensemble package from sklearn; we can use it fairly simply:\nfrom sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() Wonderful! I bet you can guess what the syntax is. Instead of fitting on the whole dataset, though, we will fit on the first 145 items.\nclf = clf.fit(x[:-5],y[:-5]) We can go ahead and run predict on some samples, just to see how it does on data it has not already seen before!\nclf.score(x[-5:], y[-5:]) 1.0 As you can see, it still does pretty well!\nSVM Let\u0026rsquo;s put another classification technique we learned today to use! Support Vector Machines. The entire syntax to manipulate support vector machines is very simple; at this point, you can probably guess it in yours sleep :)\nLet\u0026rsquo;s import a SVM:\nfrom sklearn import svm Great. Now, we will instantiate it and fit it onto the data. SVC is the support-vector machine classifier.\nclf = svm.SVC() clf.fit(x,y) Excellent, now, let\u0026rsquo;s score our predictions:\nclf.score(x,y) 0.9733333333333334 As you can see, our data is not entirely linear! Fitting our entire dataset onto a linear SVM didn\u0026rsquo;t score perfectly, which means that the model is not complex enough to support our problem.\nScikit\u0026rsquo;s support vector machine supports lots of nonlinearity function; this is set by the argument kernel. For instance, if we wanted a nonlinear, exponential function kernel (where nonlinear function \\(f(x,x\u0026rsquo;)= e^{-\\gamma||\\big\u0026lt;x,x\u0026rsquo;\\big\u0026gt;||^2}\\)), we can say:\nclf = svm.SVC(kernel=\u0026#34;rbf\u0026#34;) clf.fit(x,y) clf.score(x,y) 0.9733333333333334 Looks like our results are fairly similar, though.\nNaive Bayes One last one! Its Bayes time. Let\u0026rsquo;s first take a look at how an Naive Bayes implementation can be done via Scikit learn.\nOne of the things that the Scikit Learn Naive Bayes estimator does differently than the one that we learned via probabilities is that it assumes that\u0026mdash;instead of a uniform distribution (and therefore \u0026ldquo;chance of occurrence\u0026rdquo; is just occurrence divided by count), our samples are normally distributed. Therefore, we have that\n\\begin{equation} P(x_i | y) = \\frac{1}{\\sqrt{2\\pi{\\sigma^2}_y}}e^{\\left(-\\frac{(x_i-\\mu_y)^2}{2{\\sigma^2}_y}\\right)} \\end{equation}\nWe can instantiate such a model with the same exact syntax.\nfrom sklearn.naive_bayes import GaussianNB clf = GaussianNB() clf = clf.fit(x,y) Let\u0026rsquo;s see how it does!\nclf.score(x,y) 0.96 Same thing as before, it seems simple probabilities can\u0026rsquo;t model our relationship super well. However, this is still a fairly accurate and powerful classifier.\nNow you try! Try all three classifiers on the Wine dataset for red-white divide! Which one does better on generalizing to data you haven\u0026rsquo;t seen before? Explain the results of the decision trees trained on the Wine data by plotting it. Is there anything interesting that the tree used as a heuristic that came up? The probabilistic, uniform Naive-Bayes is fairly simple to implement write if we are using the traditional version of the Bayes theorem. Can you use Pandas to implement one yourself? ","html":"\u003cp\u003eWoah! We talked about a lot of different ways of doing classification today! Let\u0026rsquo;s see what we can do about this for the Iris dataset!\u003c/p\u003e\n\u003ch2 id=\"iris-dataset\"\u003eIris Dataset\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s load the Iris dataset! Begin by importing the \u003ccode\u003eload_iris\u003c/code\u003e tool from \u003ccode\u003esklearn\u003c/code\u003e. This is an easy loader scheme for the iris dataset.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.datasets\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThen, we simply execute the following to load the data.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturn_X_y\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe use the \u003ccode\u003ereturn_X_y\u003c/code\u003e argument here so that, instead of dumping a large \u003ccode\u003eCSV\u003c/code\u003e, we get the neat-cleaned input and output values.\u003c/p\u003e\n\u003cp\u003eA reminder that there is three possible flowers that we can sort by.\u003c/p\u003e\n\u003ch2 id=\"decision-trees\"\u003eDecision Trees\u003c/h2\u003e\n\u003cp\u003eScikit learn has great facilities for using decision trees for classification! Let\u0026rsquo;s use some of them by fitting to the Iris dataset.\u003c/p\u003e\n\u003cp\u003eLet us begin by importing the SciKit learn tree system:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.tree\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eDecisionTreeClassifier\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will fit and instantiate this classifier and fit it to the data exactly!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eDecisionTreeClassifier\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eOne cool thing about decision trees is that we can actually see what its \u003cem\u003edoing!\u003c/em\u003e by looking at the series of splits and decisions. This is a function provided by tree too.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# We first import the plotting utility from matplotlib\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ematplotlib.pyplot\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# as well as the tree plotting tool\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.tree\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplot_tree\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# We call the tree plot tool, which puts it on teh matplotlib graph for side effects\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplot_tree\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# And we save the figure\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esavefig\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;tree.png\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cfigure\u003e\u003cimg src=\"/ox-hugo/tree.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eCool! As you can see, by the end of the entire graph, the gini impurity of each node has been sorted to 0.\u003c/p\u003e\n\u003cp\u003eApparently, if the third feature (pedal length) is smaller that 2.45, it is definitely the first type of flower!\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_11-46-34_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eCan you explain the rest of the divisions?\u003c/p\u003e\n\u003cp\u003eThere are some arguments available in \u003ccode\u003e.fit\u003c/code\u003e of a \u003ccode\u003eDecisionTreeClassifier\u003c/code\u003e which controls for when splitting ends; for instance, \u003ccode\u003emax_depth\u003c/code\u003e controls the maximum depth by which the tree can go.\u003c/p\u003e\n\u003ch2 id=\"extra-addition-random-forests-dot\"\u003eExtra Addition! Random Forests.\u003c/h2\u003e\n\u003cp\u003eIf you recall, we make the initial splitting decisions fairly randomly, and simply select the one with the lowest Ginni impurity. Of course, this makes the selection of the initial sets of splits very important.\u003c/p\u003e\n\u003cp\u003eWhat if, instead of needing to make a decision about that now, we can just deal with it later? Well, that\u0026rsquo;s where the addition of Random Forests come in.\u003c/p\u003e\n\u003cp\u003eAs the name suggests, instead of having one great tree that does a \u0026ldquo;pretty good\u0026rdquo; job, we can have a lot of trees acting in \u003cem\u003eensemble!\u003c/em\u003e We can randomly start a bunch of random trees, and pick the selection that most would correspond with.\u003c/p\u003e\n\u003cp\u003eRandom forests come from the ensemble package from \u003ccode\u003esklearn\u003c/code\u003e; we can use it fairly simply:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.ensemble\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eRandomForestClassifier\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eRandomForestClassifier\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWonderful! I bet you can guess what the syntax is. Instead of fitting on the whole dataset, though, we will fit on the first 145 items.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe can go ahead and run predict on some samples, just to see how it does on data it has not already seen before!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003escore\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1.0\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAs you can see, it still does pretty well!\u003c/p\u003e\n\u003ch2 id=\"svm\"\u003eSVM\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s put another classification technique we learned today to use! Support Vector Machines. The entire syntax to manipulate support vector machines is very simple; at this point, you can probably guess it in yours sleep :)\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s import a SVM:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esvm\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eGreat. Now, we will instantiate it and fit it onto the data. \u003ccode\u003eSVC\u003c/code\u003e is the support-vector machine classifier.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esvm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSVC\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent, now, let\u0026rsquo;s score our predictions:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003escore\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.9733333333333334\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAs you can see, our data is not entirely linear! Fitting our entire dataset onto a linear SVM didn\u0026rsquo;t score perfectly, which means that the model is not complex enough to support our problem.\u003c/p\u003e\n\u003cp\u003eScikit\u0026rsquo;s support vector machine supports lots of nonlinearity function; this is set by the argument \u003ccode\u003ekernel\u003c/code\u003e. For instance, if we wanted a nonlinear, exponential function kernel (where nonlinear function \\(f(x,x\u0026rsquo;)= e^{-\\gamma||\\big\u0026lt;x,x\u0026rsquo;\\big\u0026gt;||^2}\\)), we can say:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esvm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSVC\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ekernel\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;rbf\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003escore\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.9733333333333334\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLooks like our results are fairly similar, though.\u003c/p\u003e\n\u003ch2 id=\"naive-bayes\"\u003eNaive Bayes\u003c/h2\u003e\n\u003cp\u003eOne last one! Its Bayes time. Let\u0026rsquo;s first take a look at how an Naive Bayes implementation can be done via Scikit learn.\u003c/p\u003e\n\u003cp\u003eOne of the things that the Scikit Learn Naive Bayes estimator does differently than the one that we learned via probabilities is that it assumes that\u0026mdash;instead of a uniform distribution (and therefore \u0026ldquo;chance of occurrence\u0026rdquo; is just occurrence divided by count), our samples are normally distributed. Therefore, we have that\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(x_i | y) = \\frac{1}{\\sqrt{2\\pi{\\sigma^2}_y}}e^{\\left(-\\frac{(x_i-\\mu_y)^2}{2{\\sigma^2}_y}\\right)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can instantiate such a model with the same exact syntax.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.naive_bayes\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eGaussianNB\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eGaussianNB\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s see how it does!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003escore\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.96\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSame thing as before, it seems simple probabilities can\u0026rsquo;t model our relationship super well. However, this is still a fairly accurate and powerful classifier.\u003c/p\u003e\n\u003ch2 id=\"now-you-try\"\u003eNow you try!\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eTry all three classifiers on the Wine dataset for red-white divide! Which one does better on generalizing to data you haven\u0026rsquo;t seen before?\u003c/li\u003e\n\u003cli\u003eExplain the results of the decision trees trained on the Wine data by plotting it. Is there anything interesting that the tree used as a heuristic that came up?\u003c/li\u003e\n\u003cli\u003eThe \u003cem\u003eprobabilistic\u003c/em\u003e, uniform Naive-Bayes is fairly simple to implement write if we are using the traditional version of the Bayes theorem. Can you use Pandas to implement one yourself?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridgelab_d3_d4/","tags":null,"title":"AIBridgeLab D3/D4"},{"categories":null,"contents":"Welcome to the Day-3 Morning Lab! We are glad for you to join us. Today, we are learning about how Pandas, a data manipulation tool, works, and working on cleaning some data of your own!\nIris Dataset We are going to lead the Iris dataset from sklearn again. This time, however, we will load the full dataset and parse it ourselves (instead of using return_X_y.)\nLet\u0026rsquo;s begin by importing the Iris dataset, as we expect.\nfrom sklearn.datasets import load_iris And, load the dataset to see what it looks like.\niris = load_iris() iris.keys() dict_keys([\u0026#39;data\u0026#39;, \u0026#39;target\u0026#39;, \u0026#39;frame\u0026#39;, \u0026#39;target_names\u0026#39;, \u0026#39;DESCR\u0026#39;, \u0026#39;feature_names\u0026#39;, \u0026#39;filename\u0026#39;, \u0026#39;data_module\u0026#39;]) We have a pretty large dictionary full of information! Let\u0026rsquo;s pull out data (our input data), target (our output data), and feature_names, the names of our feature.\niris_in = iris[\u0026#34;data\u0026#34;] iris_out = iris[\u0026#34;target\u0026#34;] iris_names = iris[\u0026#34;feature_names\u0026#34;] Data Manipulation pandas is a very helpful utility that allow us to see into data more conveniently. The object that we are usually working with, when using pandas, is called a DataFrame. We can actually create a DataFrame pretty easily. Let\u0026rsquo;s first import pandas\nimport pandas as pd Loading Data We have aliased it as pd so that its easier to type. Awesome! Let\u0026rsquo;s make a DataFrame.\ndf = pd.DataFrame(iris_in) df 0 1 2 3 0 5.1 3.5 1.4 0.2 1 4.9 3.0 1.4 0.2 2 4.7 3.2 1.3 0.2 3 4.6 3.1 1.5 0.2 4 5.0 3.6 1.4 0.2 .. ... ... ... ... 145 6.7 3.0 5.2 2.3 146 6.3 2.5 5.0 1.9 147 6.5 3.0 5.2 2.0 148 6.2 3.4 5.4 2.3 149 5.9 3.0 5.1 1.8 [150 rows x 4 columns] Nice! We have our input data contained in a data frame and nicely printed in a table; cool! However, the column names 1, 2, 3, 4 aren\u0026rsquo;t exactly the most useful labels for us. Instead, then, let\u0026rsquo;s change the column headers to:\niris_names sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) How? We can both get and set the columns via df.columns:\ndf.columns = iris_names Let\u0026rsquo;s look at the DataFrame again!\ndf sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) 0 5.1 3.5 1.4 0.2 1 4.9 3.0 1.4 0.2 2 4.7 3.2 1.3 0.2 3 4.6 3.1 1.5 0.2 4 5.0 3.6 1.4 0.2 .. ... ... ... ... 145 6.7 3.0 5.2 2.3 146 6.3 2.5 5.0 1.9 147 6.5 3.0 5.2 2.0 148 6.2 3.4 5.4 2.3 149 5.9 3.0 5.1 1.8 [150 rows x 4 columns] Excellent! Now our data frame looks much more reasonable.\nWranging Data How do we manipulate the data around? Well, we can index this data by both columns and rows.\nIndexing by columns first is very easy. Pandas tables are, by default, \u0026ldquo;column-major\u0026rdquo;. This means that we can just index the columns just like a list!\ndf[\u0026#34;petal width (cm)\u0026#34;] 0 0.2 1 0.2 2 0.2 3 0.2 4 0.2 ... 145 2.3 146 1.9 147 2.0 148 2.3 149 1.8 Name: petal width (cm), Length: 150, dtype: float64 Nice! I want to know introduce the idea of a \u0026ldquo;cursor\u0026rdquo;. A \u0026ldquo;cursor\u0026rdquo; is used to index this high-dimensional data; think about it as the way to turn this table into something like an indexable 1-D list.\nThe simplest cursor is .loc (\u0026ldquo;locator.\u0026rdquo;)\nUnlike list indexing directly, .loc is \u0026ldquo;row-major:\u0026rdquo; the first index selects rows instead of columns.\ndf.loc[0] sepal length (cm) 5.1 sepal width (cm) 3.5 petal length (cm) 1.4 petal width (cm) 0.2 Name: 0, dtype: float64 Nice! You can see that .loc turned our table into a list, with each \u0026ldquo;sample\u0026rdquo; of the data more clearly represented by indexing it like a list.\nWhat if, then, we want to select the \u0026ldquo;pedal width\u0026rdquo; value inside this sample? We just select the first index, a comma, then select the second index.\ndf.loc[0, \u0026#34;petal width (cm)\u0026#34;] 0.2 Excellent! We can see, because we changed the header columns to be strings, we have to index them like strings.\nWhat if, instead of the first row, we want to get\u0026hellip; say, the first, fifth, and sixth rows? Unlike traditional lists, Pandas\u0026rsquo; cursors can be indexed by a list.\nSo this:\ndf.loc[0] sepal length (cm) 5.1 sepal width (cm) 3.5 petal length (cm) 1.4 petal width (cm) 0.2 Name: 0, dtype: float64 turns into\ndf.loc[[0,2,8,9]] sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) 0 5.1 3.5 1.4 0.2 2 4.7 3.2 1.3 0.2 8 4.4 2.9 1.4 0.2 9 4.9 3.1 1.5 0.1 This would give us the 0th, 2nd, 8th, and 9th row!\nThis is all good, but, it\u0026rsquo;s kind of annoying to type the column names (like \u0026ldquo;petal width (cm)\u0026rdquo;) every time! No worries, we can address this.\niloc is a variant of loc which uses integer indexes. For row indexing, the syntax remains exactly the same; iloc, however, converts all column indexes to integers sequentially. Therefore:\ndf.loc[0, \u0026#34;petal width (cm)\u0026#34;] becomes\ndf.iloc[0, 3] 0.2 Nice! Isn\u0026rsquo;t that convenient.\nSome statistics The main gist of the lab here is to manipulate the input data a little. Pandas provides many helpful utilities to help us with that. For instance, let\u0026rsquo;s take a single feature in the data, say, the pedal with:\npwidth = df[\u0026#34;petal width (cm)\u0026#34;] # same pwidth = df.iloc[:,3], where : returns everything in the row dimention pwidth 0 0.2 1 0.2 2 0.2 3 0.2 4 0.2 ... 145 2.3 146 1.9 147 2.0 148 2.3 149 1.8 Name: petal width (cm), Length: 150, dtype: float64 We can now find out how distributed this data is, to glean some info about normalization! The most basic is for us to find the mean width of the petals:\npwidth.mean() 1.1993333333333336 Awesome! We can calculate the standard by applying this constant to that entire row. The syntax works just like how you expect\u0026mdash;subtracting a scalar from the whole column just subtracts that constant from every element\u0026mdash;without any fuss:\n(((pwidth-pwidth.mean())**2).sum()/len(pwidth))**0.5 0.7596926279021594 Cool! In the scheme of things, that\u0026rsquo;s actually a pretty good. However, if it was not, we could normalize the data!\nLet\u0026rsquo;s first get the norm of the vector\npwidth_norm = sum(pwidth**2)**0.5 pwidth_norm 17.38763928772391 And, let\u0026rsquo;s normalize our vector by this norm!\npwidth_normd = pwidth/pwidth_norm pwidth_normd 0 0.011502 1 0.011502 2 0.011502 3 0.011502 4 0.011502 ... 145 0.132278 146 0.109273 147 0.115024 148 0.132278 149 0.103522 Name: petal width (cm), Length: 150, dtype: float64 Excellent. Let\u0026rsquo;s find out its standard deviation again! This time we will use .std() instead.\npwidth_normd.std() 0.04383790440709825 Much better.\nNow you try Load the wine dataset into a DataFrame and manipulate it. Feed slices back into our functions yesterday! Can you make the subsets of the data you made yesterday via the .iloc notation to make slicing easier? Can you quantify the accuracy, precision, and recall on a shuffled version of the wine dataset and logistic regression? seed=0 Is there any columns that need normalisation? Any outliers (2 std. dev away)? Why/why not? Create a balanced version of the wine dataset between red and white classes. Does fitting this normalized version into our model makes training results better? ","html":"\u003cp\u003eWelcome to the Day-3 Morning Lab! We are glad for you to join us. Today, we are learning about how Pandas, a data manipulation tool, works, and working on cleaning some data of your own!\u003c/p\u003e\n\u003ch2 id=\"iris-dataset\"\u003eIris Dataset\u003c/h2\u003e\n\u003cp\u003eWe are going to lead the Iris dataset from \u003ccode\u003esklearn\u003c/code\u003e again. This time, however, we will load the full dataset and parse it ourselves (instead of using \u003ccode\u003ereturn_X_y\u003c/code\u003e.)\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s begin by importing the Iris dataset, as we expect.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.datasets\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd, load the dataset to see what it looks like.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eiris\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eiris\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ekeys\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edict_keys([\u0026#39;data\u0026#39;, \u0026#39;target\u0026#39;, \u0026#39;frame\u0026#39;, \u0026#39;target_names\u0026#39;, \u0026#39;DESCR\u0026#39;, \u0026#39;feature_names\u0026#39;, \u0026#39;filename\u0026#39;, \u0026#39;data_module\u0026#39;])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe have a pretty large dictionary full of information! Let\u0026rsquo;s pull out \u003ccode\u003edata\u003c/code\u003e (our input data), \u003ccode\u003etarget\u003c/code\u003e (our output data), and \u003ccode\u003efeature_names\u003c/code\u003e, the names of our feature.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eiris_in\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eiris\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;data\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eiris_out\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eiris\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;target\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eiris_names\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eiris\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;feature_names\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"data-manipulation\"\u003eData Manipulation\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-15_15-52-10_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\u003ccode\u003epandas\u003c/code\u003e is a very helpful utility that allow us to see into data more conveniently. The object that we are usually working with, when using pandas, is called a \u003ccode\u003eDataFrame\u003c/code\u003e. We can actually create a \u003ccode\u003eDataFrame\u003c/code\u003e pretty easily. Let\u0026rsquo;s first import \u003ccode\u003epandas\u003c/code\u003e\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epandas\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"loading-data\"\u003eLoading Data\u003c/h3\u003e\n\u003cp\u003eWe have aliased it as \u003ccode\u003epd\u003c/code\u003e so that its easier to type. Awesome! Let\u0026rsquo;s make a DataFrame.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDataFrame\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiris_in\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0 1 2 3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 4.9 3.0 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 1.3 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 4.6 3.1 1.5 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 5.0 3.6 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 5.2 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 6.3 2.5 5.0 1.9\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 6.5 3.0 5.2 2.0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 6.2 3.4 5.4 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 5.9 3.0 5.1 1.8\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 4 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNice! We have our input data contained in a data frame and nicely printed in a table; cool! However, the column names \u003ccode\u003e1\u003c/code\u003e, \u003ccode\u003e2\u003c/code\u003e, \u003ccode\u003e3\u003c/code\u003e, \u003ccode\u003e4\u003c/code\u003e aren\u0026rsquo;t exactly the most useful labels for us. Instead, then, let\u0026rsquo;s change the column headers to:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eiris_names\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003esepal length (cm)\u003c/td\u003e\n\u003ctd\u003esepal width (cm)\u003c/td\u003e\n\u003ctd\u003epetal length (cm)\u003c/td\u003e\n\u003ctd\u003epetal width (cm)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eHow? We can both get and set the columns via \u003ccode\u003edf.columns\u003c/code\u003e:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eiris_names\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s look at the \u003ccode\u003eDataFrame\u003c/code\u003e again!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e sepal length (cm) sepal width (cm) petal length (cm) petal width (cm)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 4.9 3.0 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 1.3 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 4.6 3.1 1.5 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 5.0 3.6 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 5.2 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 6.3 2.5 5.0 1.9\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 6.5 3.0 5.2 2.0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 6.2 3.4 5.4 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 5.9 3.0 5.1 1.8\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 4 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent! Now our data frame looks much more reasonable.\u003c/p\u003e\n\u003ch2 id=\"wranging-data\"\u003eWranging Data\u003c/h2\u003e\n\u003cp\u003eHow do we manipulate the data around? Well, we can index this data by both columns and rows.\u003c/p\u003e\n\u003cp\u003eIndexing by columns first is very easy. Pandas tables are, by default, \u0026ldquo;column-major\u0026rdquo;. This means that we can just index the columns just like a list!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;petal width (cm)\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 1.9\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 2.0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 1.8\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eName: petal width (cm), Length: 150, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNice! I want to know introduce the idea of a \u0026ldquo;cursor\u0026rdquo;. A \u0026ldquo;cursor\u0026rdquo; is used to index this high-dimensional data; think about it as the way to turn this table into something like an indexable 1-D list.\u003c/p\u003e\n\u003cp\u003eThe simplest cursor is \u003ccode\u003e.loc\u003c/code\u003e (\u0026ldquo;locator.\u0026rdquo;)\u003c/p\u003e\n\u003cp\u003eUnlike list indexing directly, \u003ccode\u003e.loc\u003c/code\u003e is \u0026ldquo;row-major:\u0026rdquo; the first index selects \u003cem\u003erows\u003c/em\u003e instead of columns.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003esepal length (cm) 5.1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003esepal width (cm) 3.5\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003epetal length (cm) 1.4\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003epetal width (cm) 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eName: 0, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNice! You can see that \u003ccode\u003e.loc\u003c/code\u003e turned our table into a list, with each \u0026ldquo;sample\u0026rdquo; of the data more clearly represented by indexing it like a list.\u003c/p\u003e\n\u003cp\u003eWhat if, then, we want to select the \u0026ldquo;pedal width\u0026rdquo; value inside this sample? We just select the first index, a comma, then select the second index.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;petal width (cm)\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.2\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent! We can see, because we changed the header columns to be strings, we have to index them like strings.\u003c/p\u003e\n\u003cp\u003eWhat if, instead of the first row, we want to get\u0026hellip; say, the first, fifth, and sixth rows? Unlike traditional lists, Pandas\u0026rsquo; cursors can be \u003cem\u003eindexed by a list\u003c/em\u003e.\u003c/p\u003e\n\u003cp\u003eSo this:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003esepal length (cm) 5.1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003esepal width (cm) 3.5\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003epetal length (cm) 1.4\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003epetal width (cm) 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eName: 0, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eturns into\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e8\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e9\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e sepal length (cm) sepal width (cm) petal length (cm) petal width (cm)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 1.3 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e8 4.4 2.9 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e9 4.9 3.1 1.5 0.1\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThis would give us the 0th, 2nd, 8th, and 9th row!\u003c/p\u003e\n\u003cp\u003eThis is all good, but, it\u0026rsquo;s kind of annoying to type the column names (like \u0026ldquo;petal width (cm)\u0026rdquo;) every time! No worries, we can address this.\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003eiloc\u003c/code\u003e is a variant of \u003ccode\u003eloc\u003c/code\u003e which uses integer indexes. For row indexing, the syntax remains exactly the same; \u003ccode\u003eiloc\u003c/code\u003e, however, converts all column indexes to integers sequentially. Therefore:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;petal width (cm)\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ebecomes\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.2\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNice! Isn\u0026rsquo;t that convenient.\u003c/p\u003e\n\u003ch2 id=\"some-statistics\"\u003eSome statistics\u003c/h2\u003e\n\u003cp\u003eThe main gist of the lab here is to manipulate the input data a little. Pandas provides many helpful utilities to help us with that. For instance, let\u0026rsquo;s take a single feature in the data, say, the pedal with:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epwidth\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;petal width (cm)\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# same pwidth = df.iloc[:,3], where : returns everything in the row dimention\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epwidth\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 1.9\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 2.0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 1.8\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eName: petal width (cm), Length: 150, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe can now find out how distributed this data is, to glean some info about normalization! The most basic is for us to find the mean width of the petals:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epwidth\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1.1993333333333336\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAwesome! We can calculate the standard by applying this constant to that entire row. The syntax works just like how you expect\u0026mdash;subtracting a scalar from the whole column just subtracts that constant from every element\u0026mdash;without any fuss:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epwidth\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epwidth\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epwidth\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.5\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.7596926279021594\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eCool! In the scheme of things, that\u0026rsquo;s actually a pretty good. However, if it was not, we could normalize the data!\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s first get the norm of the vector\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epwidth_norm\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epwidth\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.5\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epwidth_norm\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e17.38763928772391\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd, let\u0026rsquo;s normalize our vector by this norm!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epwidth_normd\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epwidth\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epwidth_norm\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epwidth_normd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 0.011502\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 0.011502\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.011502\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 0.011502\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.011502\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 0.132278\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 0.109273\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 0.115024\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 0.132278\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 0.103522\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eName: petal width (cm), Length: 150, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. Let\u0026rsquo;s find out its standard deviation again! This time we will use \u003ccode\u003e.std()\u003c/code\u003e instead.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epwidth_normd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.04383790440709825\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eMuch better.\u003c/p\u003e\n\u003ch2 id=\"now-you-try\"\u003eNow you try\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eLoad the wine dataset into a DataFrame and manipulate it.\u003c/li\u003e\n\u003cli\u003eFeed slices back into our functions yesterday! Can you make the subsets of the data you made yesterday via the \u003ccode\u003e.iloc\u003c/code\u003e notation to make slicing easier?\u003c/li\u003e\n\u003cli\u003eCan you quantify the accuracy, precision, and recall on a shuffled version of the wine dataset and logistic regression? \u003ccode\u003eseed=0\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eIs there any columns that need normalisation? Any outliers (2 std. dev away)? Why/why not?\u003c/li\u003e\n\u003cli\u003eCreate a balanced version of the wine dataset between red and white classes. Does fitting this normalized version into our model makes training results better?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridgelab_d2aft/","tags":null,"title":"AIBridgeLab D3Morning"},{"categories":null,"contents":"Let\u0026rsquo;s run some clustering algorithms! We are still going to use the Iris data, because we are super familiar with it already. Loading it works the exactly in the same way; I will not repeat the notes but just copy the code and description from before here for your reference\nIris Dataset Let\u0026rsquo;s load the Iris dataset! Begin by importing the load_iris tool from sklearn. This is an easy loader scheme for the iris dataset.\nfrom sklearn.datasets import load_iris Then, we simply execute the following to load the data.\nx,y = load_iris(return_X_y=True) We use the return_X_y argument here so that, instead of dumping a large CSV, we get the neat-cleaned input and output values.\nk-means clustering The basics of k-means clustering works exactly the same as before, except this time we have to specify and get a few more parameters. Let\u0026rsquo;s begin by importing k-means and getting some clusters together!\nfrom sklearn.cluster import KMeans Let\u0026rsquo;s instantiate the KMeans cluster with 3 clusters, which is the number of classes there is.\nkmeans = KMeans(n_clusters=3) kmeans = kmeans.fit(x) Great! Let\u0026rsquo;s take a look at how it sorted all of our samples\nkmeans.labels_ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0 2 2 2 2 0 2 2 2 2 2 2 0 0 2 2 2 2 0 2 0 2 0 2 2 0 0 2 2 2 2 2 0 2 2 2 2 0 2 2 2 0 2 2 2 0 2 2 0 Let\u0026rsquo;s plot our results.\nimport matplotlib.pyplot as plt We then need to define some colours.\ncolors=[\u0026#34;red\u0026#34;, \u0026#34;green\u0026#34;, \u0026#34;blue\u0026#34;] Recall from yesterday that we realized that inner Septal/Pedal differences are not as variable as intra Septal/Pedal differences. So, we will plot the first and third columns next to each other, and use labels_ for coloring.\n# for each element for indx, element in enumerate(x): # add a scatter point plt.scatter(element[0], element[1], color=colors[kmeans.labels_[indx]]) # save our figure plt.savefig(\u0026#34;scatter.png\u0026#34;) Nice. These look like the main groups are captured!\nLet\u0026rsquo;s compare that to intended classes\ny 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 There are obviously some clustering mistakes. Woah! Without prompting with answers, our model was able to figure out much of the general clusters at which our data exists. Nice.\nWe can also see the \u0026ldquo;average\u0026rdquo;/\u0026ldquo;center\u0026rdquo; for each of the clusters:\nkmeans.cluster_centers_ 5.9016129 2.7483871 4.39354839 1.43387097 5.006 3.428 1.462 0.246 6.85 3.07368421 5.74210526 2.07105263 Nice! These are what our model thinks are the centers of each group.\nPrinciple Component Analysis Let\u0026rsquo;s try reducing the dimentionality of our data by one, so that we only have three dimensions. We do this, by, again, begin importing PCA.\nfrom sklearn.decomposition import PCA When we are instantiating, we need to create a PCA instance with a keyword n_components, which is the number of dimensions (\u0026ldquo;component vectors\u0026rdquo;) we want to keep.\npca = PCA(n_components=3) Great, let\u0026rsquo;s fit our data to this PCA.\npca.fit(x) Wonderful. singular_values_ is how we can get out of the PCA\u0026rsquo;d change of basis results:\ncob = pca.components_ cob 0.36138659 -0.08452251 0.85667061 0.3582892 0.65658877 0.73016143 -0.17337266 -0.07548102 -0.58202985 0.59791083 0.07623608 0.54583143 So, we can then take a change of basis matrix and apply it to some samples!\ncob@(x[0]) 2.81823951 5.64634982 -0.65976754 What\u0026rsquo;s @? Well\u0026hellip; Unfortunately, Python has different operator for matrix-operations (\u0026ldquo;dot\u0026rdquo;); otherwise, it will perform element-wise operations.\nWe can actually also see the \\(R^2\\) values on each of the axis: the variance explained by each of the dimensions.\npca.explained_variance_ 4.22824171 0.24267075 0.0782095 Nice! As you can see, much of the variance is contained in our first dimension here.\n","html":"\u003cp\u003eLet\u0026rsquo;s run some clustering algorithms! We are still going to use the Iris data, because we are super familiar with it already. Loading it works the exactly in the same way; I will not repeat the notes but just copy the code and description from before here for your reference\u003c/p\u003e\n\u003ch2 id=\"iris-dataset\"\u003eIris Dataset\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s load the Iris dataset! Begin by importing the \u003ccode\u003eload_iris\u003c/code\u003e tool from \u003ccode\u003esklearn\u003c/code\u003e. This is an easy loader scheme for the iris dataset.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.datasets\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThen, we simply execute the following to load the data.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturn_X_y\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe use the \u003ccode\u003ereturn_X_y\u003c/code\u003e argument here so that, instead of dumping a large \u003ccode\u003eCSV\u003c/code\u003e, we get the neat-cleaned input and output values.\u003c/p\u003e\n\u003ch2 id=\"k-means-clustering\"\u003ek-means clustering\u003c/h2\u003e\n\u003cp\u003eThe basics of k-means clustering works exactly the same as before, except this time we have to specify and get a few more parameters. Let\u0026rsquo;s begin by importing k-means and getting some clusters together!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.cluster\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eKMeans\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s instantiate the KMeans cluster with 3 clusters, which is the number of classes there is.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ekmeans\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eKMeans\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en_clusters\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ekmeans\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ekmeans\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eGreat! Let\u0026rsquo;s take a look at how it sorted all of our samples\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ekmeans\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elabels_\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eLet\u0026rsquo;s plot our results.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ematplotlib.pyplot\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe then need to define some colours.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ecolors\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;red\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;green\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;blue\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRecall from yesterday that we realized that inner Septal/Pedal differences are not as variable as intra Septal/Pedal differences. So, we will plot the first and third columns next to each other, and use \u003ccode\u003elabels_\u003c/code\u003e for coloring.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# for each element\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eindx\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eelement\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eenumerate\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# add a scatter point\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003escatter\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eelement\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eelement\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolors\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ekmeans\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elabels_\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eindx\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# save our figure\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esavefig\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;scatter.png\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cfigure\u003e\u003cimg src=\"/ox-hugo/scatter.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eNice. These look like the main groups are captured!\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s compare that to intended classes\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eThere are obviously some clustering mistakes. Woah! Without prompting with answers, our model was able to figure out much of the general clusters at which our data exists. Nice.\u003c/p\u003e\n\u003cp\u003eWe can also see the \u0026ldquo;average\u0026rdquo;/\u0026ldquo;center\u0026rdquo; for each of the clusters:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ekmeans\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecluster_centers_\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e5.9016129\u003c/th\u003e\n\u003cth\u003e2.7483871\u003c/th\u003e\n\u003cth\u003e4.39354839\u003c/th\u003e\n\u003cth\u003e1.43387097\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e5.006\u003c/td\u003e\n\u003ctd\u003e3.428\u003c/td\u003e\n\u003ctd\u003e1.462\u003c/td\u003e\n\u003ctd\u003e0.246\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e6.85\u003c/td\u003e\n\u003ctd\u003e3.07368421\u003c/td\u003e\n\u003ctd\u003e5.74210526\u003c/td\u003e\n\u003ctd\u003e2.07105263\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eNice! These are what our model thinks are the centers of each group.\u003c/p\u003e\n\u003ch2 id=\"principle-component-analysis\"\u003ePrinciple Component Analysis\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s try reducing the dimentionality of our data by one, so that we only have three dimensions. We do this, by, again, begin importing PCA.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.decomposition\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ePCA\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWhen we are instantiating, we need to create a PCA instance with a keyword \u003ccode\u003en_components\u003c/code\u003e, which is the number of dimensions (\u0026ldquo;component vectors\u0026rdquo;) we want to keep.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epca\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ePCA\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en_components\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eGreat, let\u0026rsquo;s fit our data to this PCA.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epca\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWonderful. \u003ccode\u003esingular_values_\u003c/code\u003e is how we can get out of the PCA\u0026rsquo;d change of basis results:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ecob\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epca\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecomponents_\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ecob\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e0.36138659\u003c/th\u003e\n\u003cth\u003e-0.08452251\u003c/th\u003e\n\u003cth\u003e0.85667061\u003c/th\u003e\n\u003cth\u003e0.3582892\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e0.65658877\u003c/td\u003e\n\u003ctd\u003e0.73016143\u003c/td\u003e\n\u003ctd\u003e-0.17337266\u003c/td\u003e\n\u003ctd\u003e-0.07548102\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e-0.58202985\u003c/td\u003e\n\u003ctd\u003e0.59791083\u003c/td\u003e\n\u003ctd\u003e0.07623608\u003c/td\u003e\n\u003ctd\u003e0.54583143\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eSo, we can then take a change of basis matrix and apply it to some samples!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ecob\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e@\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e2.81823951\u003c/td\u003e\n\u003ctd\u003e5.64634982\u003c/td\u003e\n\u003ctd\u003e-0.65976754\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWhat\u0026rsquo;s \u003ccode\u003e@\u003c/code\u003e? Well\u0026hellip; Unfortunately, Python has different operator for matrix-operations (\u0026ldquo;dot\u0026rdquo;); otherwise, it will perform element-wise operations.\u003c/p\u003e\n\u003cp\u003eWe can actually also see the \\(R^2\\) values on each of the axis: the variance explained by each of the dimensions.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epca\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexplained_variance_\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e4.22824171\u003c/td\u003e\n\u003ctd\u003e0.24267075\u003c/td\u003e\n\u003ctd\u003e0.0782095\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eNice! As you can see, much of the variance is contained in our first dimension here.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridgelab_d4aft/","tags":null,"title":"AIBridgeLab D4Aft"},{"categories":null,"contents":"AIFS is a food systems institute at UC Davis.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhaifs/\"\u003eAIFS\u003c/a\u003e is a food systems institute at \u003ca href=\"\"\u003eUC Davis\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaifs/","tags":null,"title":"AIFS"},{"categories":null,"contents":"I am honestly not entirely sure why or what state of mind I was in circa 2017 to write, edit, and act! in this video, but I did.\nThis is an adaption of a Greek-Style story which someone else wrote, I don\u0026rsquo;t know who.\nVideo produced mostly by myself in front of a green screen, with help from my lovely mother as well as a very nice teacher named Joseph O\u0026rsquo;Brian.\nhttps://youtu.be/b1YxOkcwtgw\nBe prepared. 前方高能\n","html":"\u003cp\u003eI am honestly \u003cem\u003enot entirely sure\u003c/em\u003e why or what state of mind I was in circa 2017 to write, edit, and \u003cstrong\u003eact!\u003c/strong\u003e in this video, but I did.\u003c/p\u003e\n\u003cp\u003eThis is an adaption of a Greek-Style story which someone else wrote, I don\u0026rsquo;t know who.\u003c/p\u003e\n\u003cp\u003eVideo produced mostly by myself in front of a green screen, with help from my lovely mother as well as a very nice teacher named Joseph O\u0026rsquo;Brian.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://youtu.be/b1YxOkcwtgw\"\u003ehttps://youtu.be/b1YxOkcwtgw\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eBe prepared. 前方高能\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhair_a_greek_style_myth/","tags":null,"title":"Air: A Greek Style Myth"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhalexis_ohanian/","tags":null,"title":"Alexis Ohanian"},{"categories":null,"contents":"algebra is the study of\u0026hellip;\nsymbols/variables transformations/operations: \u0026ldquo;add\u0026rdquo;, \u0026ldquo;multiply\u0026rdquo; simple functions abstraction substitution ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhalgebra/\"\u003ealgebra\u003c/a\u003e is the study of\u0026hellip;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003esymbols/variables\u003c/li\u003e\n\u003cli\u003etransformations/operations: \u0026ldquo;add\u0026rdquo;, \u0026ldquo;multiply\u0026rdquo;\u003c/li\u003e\n\u003cli\u003esimple functions\u003c/li\u003e\n\u003cli\u003eabstraction\u003c/li\u003e\n\u003cli\u003esubstitution\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhalgebra/","tags":null,"title":"algebra"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhalgebreic_equation/","tags":null,"title":"algebreic equation"},{"categories":null,"contents":"The algebreic multiplicity for a given eigenvalue is the multiplicity for which the linear factor containing it shows up in the characteristic polynomial.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhalgebreic_multiplicity/\"\u003ealgebreic multiplicity\u003c/a\u003e for a given \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e is the multiplicity for which the linear factor containing it shows up in the \u003ca href=\"/posts/kbhcharacteristic_polynomial/\"\u003echaracteristic polynomial\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhalgebreic_multiplicity/","tags":null,"title":"algebreic multiplicity"},{"categories":null,"contents":" Startups Kwotes \u0026ldquo;Working with big tech is a job closer to big tech.\u0026rdquo;\n","html":"\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstartup/\"\u003eStartups\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"kwotes\"\u003eKwotes\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;Working with big tech is a job closer to big tech.\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhali_partovi/","tags":null,"title":"Ali Partovi"},{"categories":null,"contents":"Begin with a new installation of MFA, and head to the directory. First run validate with the original dictionary.\nmfa validate ~/Downloads/tb/my_corpus english_us_arpa english_us_arpa We see that there is in deed an section of corpus that is out-of-vocab.\nINFO - 11 OOV word types INFO - 18 total OOV tokens Therefore, we will generate a new dictionary based on the existing dictionary of english_us_arpa.\nFirst download the english_us_arpa model\nmfa model download g2p english_us_arpa Then, perform the actual dictionary generation:\nmfa g2p english_us_arpa ~/Downloads/tb/my_corpus ~/Downloads/tb/my_corpus/new_dict.txt There is a chance this command fails with\nThere was an issue importing Pynini, please ensure that it is installed. If you are on Windows, please use the Windows Subsystem for Linux to use g2p functionality. If so, install pynini\nconda add pynini Finally, run the mfa g2p command above to generate pronunciations.\nYou should end up with a file named new_dict.txt, which should include missing words.\nFinally, perform alignment with this new dictionary.\nmfa align ~/Downloads/tb/my_corpus ~/Downloads/tb/my_corpus/new_dict.txt english_us_arpa ~/Downloads/tb/my_corpus_output Notice here the second argument of mfa align is no longer english_us_arpa, our base dictionary. Instead, it is our custom dictionary.\n","html":"\u003cp\u003eBegin with a new installation of MFA, and head to the directory. First run validate with the original dictionary.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-shell\" data-lang=\"shell\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003emfa validate ~/Downloads/tb/my_corpus english_us_arpa english_us_arpa\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe see that there is in deed an section of corpus that is out-of-vocab.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eINFO - 11 OOV word types\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eINFO - 18 total OOV tokens\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eTherefore, we will generate a new dictionary based on the existing dictionary of \u003ccode\u003eenglish_us_arpa\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eFirst download the english_us_arpa model\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-shell\" data-lang=\"shell\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003emfa model download g2p english_us_arpa\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThen, perform the actual dictionary generation:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-shell\" data-lang=\"shell\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003emfa g2p english_us_arpa ~/Downloads/tb/my_corpus ~/Downloads/tb/my_corpus/new_dict.txt\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThere is a chance this command fails with\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eThere was an issue importing Pynini, please ensure that it is installed. If you are on Windows, please use the Windows Subsystem for Linux to use g2p functionality.\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eIf so, install pynini\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-shell\" data-lang=\"shell\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econda add pynini\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFinally, run the \u003ccode\u003emfa g2p\u003c/code\u003e command above to generate pronunciations.\u003c/p\u003e\n\u003cp\u003eYou should end up with a file named \u003ccode\u003enew_dict.txt\u003c/code\u003e, which should include missing words.\u003c/p\u003e\n\u003cp\u003eFinally, perform alignment with this new dictionary.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-shell\" data-lang=\"shell\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003emfa align ~/Downloads/tb/my_corpus ~/Downloads/tb/my_corpus/new_dict.txt english_us_arpa ~/Downloads/tb/my_corpus_output\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNotice here the second argument of \u003ccode\u003emfa align\u003c/code\u003e is no longer \u003ccode\u003eenglish_us_arpa\u003c/code\u003e, our base dictionary. Instead, it is our custom dictionary.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhalign_with_new_vocab/","tags":null,"title":"Align with New Vocab"},{"categories":null,"contents":" Want to interview more severe ashma Want to find someone younger Difference between marketing and purchaser.\nTaking to people Spoke with Matt. Talked with more details with prototyping and how they can build a unique product.\nHave not gotten back to him yet.\n","html":"\u003cul\u003e\n\u003cli\u003eWant to interview more severe ashma\u003c/li\u003e\n\u003cli\u003eWant to find someone younger\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDifference between marketing and purchaser.\u003c/p\u003e\n\u003ch2 id=\"taking-to-people\"\u003eTaking to people\u003c/h2\u003e\n\u003cp\u003eSpoke with Matt. Talked with more details with prototyping and how they can build a unique product.\u003c/p\u003e\n\u003cp\u003eHave not gotten back to him yet.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhalivio_april_checkin/","tags":null,"title":"Alivio April Checkin"},{"categories":null,"contents":"Recall, from conditional plan evaluation, we had that:\n\\begin{equation} U^{\\pi}(b) = \\sum_{s}^{} b(s) U^{\\pi}(s) \\end{equation}\nlet\u0026rsquo;s write it as:\n\\begin{equation} U^{\\pi}(b) = \\sum_{s}^{} b(s) U^{\\pi}(s) = {\\alpha_{\\pi}}^{\\top} b \\end{equation}\nwhere \\(\\U_{\\pi}(s)\\) is the conditional plan evaluation starting at each of the initial states.\n\\begin{equation} \\alpha_{\\pi} = \\qty[ U^{\\pi}(s_1), U^{\\pi}(s_2) ] \\end{equation}\nYou will notice, then the utility of \\(b\\) is linear on \\(b\\) for different policies \\(\\alpha_{\\pi}\\):\nAt every belief \\(b\\), there is a policy which has the highest \\(U(b)\\) at that \\(b\\) given be the alpha vector formulation.\nAdditional Information top action you can just represent a policy out of alpha vectors by taking the top (root) action of the conditional plan with the alpha vector on top.\noptimal value function for POMDP with alpha vector Recall:\n\\begin{equation} U^{*}(b) = \\max_{\\pi} U^{\\pi}(b) = \\max_{\\pi} \\alpha_{\\pi}^{\\top}b \\end{equation}\nNOTE! This function (look at the chart above from \\(b\\) to \\(u\\)) is:\npiecewise linear convex (because the \u0026ldquo;best\u0026rdquo; (highest) line) is always curving up and so, for a policy instantiated by a bunch of alpha vectors \\(\\Gamma\\), we have:\n\\begin{equation} U^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} b \\end{equation}\nTo actually extract a policy out of this set of vectors \\(\\Gamma\\), we turn to one-step lookahead in POMDP\none-step lookahead in POMDP Say you want to extract a policy out of a bunch of alpha vectors.\nLet \\(\\alpha \\in \\Gamma\\), a set of alpha vectors.\n\\begin{equation} \\pi^{\\Gamma}(b) = \\arg\\max_{a}\\qty[R(b,a)+\\gamma \\qty(\\sum_{o}^{}P(o|b,a) U^{\\Gamma}(update(b,a,o)))] \\end{equation}\nwhere:\n\\begin{equation} R(b,a) = \\sum_{s}^{} R(s,a)b(s) \\end{equation}\n\\begin{align} P(o|b,a) \u0026amp;= \\sum_{s}^{} p(o|s,a) b(s) \\\\ \u0026amp;= \\sum_{s}^{} \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) O(o|s\u0026rsquo;,a) b(s) \\end{align}\nand\n\\begin{equation} U^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} b \\end{equation}\nalpha vector pruning Say we had as set of alpha vectors \\(\\Gamma\\):\n\\(\\alpha_{3}\\) isn\u0026rsquo;t all that useful here. So we ask:\n\u0026ldquo;Is alpha dominated by some \\(\\alpha_{i}\\) everywhere?\u0026rdquo;\nWe formulate this question in terms of a linear program:\n\\begin{equation} \\max_{\\delta, b} \\delta \\end{equation}\nwhere \\(\\delta\\) is the gap between \\(\\alpha\\) and the utility o\nsubject to:\n\\begin{align} \u0026amp;1^{\\top} b = 1\\ \\text{(b adds up to 1)} \\\\ \u0026amp; b\\geq 0 \\\\ \u0026amp; \\alpha^{\\top} b \\geq \\alpha\u0026rsquo;^{\\top} b + \\delta, \\forall \\alpha\u0026rsquo; \\in \\Gamma \\end{align}\nif \\(\\delta \u0026lt; 0\\), then we can prune \\(\\alpha\\) because it had been dominated.\nif each value on the top of the set\n","html":"\u003cp\u003eRecall, from \u003ca href=\"/posts/kbhconditional_plan/#id-6f19368f-74b5-4606-a882-ec9bc5619873-conditional-plan-evaluation\"\u003econditional plan evaluation\u003c/a\u003e, we had that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\pi}(b) = \\sum_{s}^{} b(s) U^{\\pi}(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003elet\u0026rsquo;s write it as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\pi}(b) = \\sum_{s}^{} b(s) U^{\\pi}(s) = {\\alpha_{\\pi}}^{\\top} b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\U_{\\pi}(s)\\) is the \u003ca href=\"/posts/kbhconditional_plan/#id-6f19368f-74b5-4606-a882-ec9bc5619873-conditional-plan-evaluation\"\u003econditional plan evaluation\u003c/a\u003e starting at each of the initial states.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha_{\\pi} = \\qty[ U^{\\pi}(s_1), U^{\\pi}(s_2) ]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will notice, then the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of \\(b\\) is linear on \\(b\\) for different policies \\(\\alpha_{\\pi}\\):\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-16_09-23-10_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eAt every belief \\(b\\), there is a policy which has the highest \\(U(b)\\) at that \\(b\\) given be the \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e formulation.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eAdditional Information\u003c/h2\u003e\n\u003ch3 id=\"top-action\"\u003etop action\u003c/h3\u003e\n\u003cp\u003eyou can just represent a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e out of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es by taking the top (root) action of the \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e with the \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e on top.\u003c/p\u003e\n\u003ch3 id=\"optimal-value-function-for-pomdp--kbhconditional-plan-dot-md--with-alpha-vector--kbhalpha-vector-dot-md\"\u003e\u003ca href=\"/posts/kbhconditional_plan/#id-9ccda204-0967-44c8-a801-c92d0df154b5-optimal-value-function-for-id-130d5294-0274-422b-b395-7d6f7f75be7d-pomdp\"\u003eoptimal value function for POMDP\u003c/a\u003e with \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eRecall:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{*}(b) = \\max_{\\pi} U^{\\pi}(b) = \\max_{\\pi} \\alpha_{\\pi}^{\\top}b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNOTE! This function (look at the chart above from \\(b\\) to \\(u\\)) is:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003epiecewise linear\u003c/li\u003e\n\u003cli\u003econvex (because the \u0026ldquo;best\u0026rdquo; (highest) line) is always curving up\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eand so, for a policy instantiated by a bunch of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es \\(\\Gamma\\), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo actually extract a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e out of this set of vectors \\(\\Gamma\\), we turn to \u003ca href=\"#one-step-lookahead-in-pomdp\"\u003eone-step lookahead in POMDP\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"one-step-lookahead-in-pomdp\"\u003eone-step lookahead in POMDP\u003c/h3\u003e\n\u003cp\u003eSay you want to extract a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e out of a bunch of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eLet \\(\\alpha \\in \\Gamma\\), a set of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pi^{\\Gamma}(b) = \\arg\\max_{a}\\qty[R(b,a)+\\gamma \\qty(\\sum_{o}^{}P(o|b,a) U^{\\Gamma}(update(b,a,o)))]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR(b,a) = \\sum_{s}^{} R(s,a)b(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nP(o|b,a) \u0026amp;= \\sum_{s}^{} p(o|s,a) b(s) \\\\\n\u0026amp;= \\sum_{s}^{} \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) O(o|s\u0026rsquo;,a) b(s)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} b\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"alpha-vector--kbhalpha-vector-dot-md--pruning\"\u003e\u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e pruning\u003c/h3\u003e\n\u003cp\u003eSay we had as set of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es \\(\\Gamma\\):\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-16_09-40-27_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\\(\\alpha_{3}\\) isn\u0026rsquo;t all that useful here. So we ask:\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Is alpha dominated by some \\(\\alpha_{i}\\) everywhere?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe formulate this question in terms of a linear program:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\max_{\\delta, b} \\delta\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\delta\\) is the gap between \\(\\alpha\\) and the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e o\u003c/p\u003e\n\u003cp\u003esubject to:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;1^{\\top} b = 1\\ \\text{(b adds up to 1)} \\\\\n\u0026amp; b\\geq 0 \\\\\n\u0026amp; \\alpha^{\\top} b \\geq \\alpha\u0026rsquo;^{\\top} b + \\delta, \\forall \\alpha\u0026rsquo; \\in \\Gamma\n\\end{align}\u003c/p\u003e\n\u003cp\u003eif \\(\\delta \u0026lt; 0\\), then we can prune \\(\\alpha\\) because it had been dominated.\u003c/p\u003e\n\u003cp\u003eif each value on the top of the set\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhalpha_vector/","tags":null,"title":"alpha vector"},{"categories":null,"contents":"Alternating Least Squares is a method to Factoring a matrix into two components:\n\\begin{equation} \\mathcal{M}( R) \\approx \\mathcal{M}(U) \\cdot \\mathcal{M}(P) \\end{equation}\nwhere, we want to come up matricies \\(U\\) and \\(P\\) with a certain side length \\(k\\) that we exdogenously come up with\nTo perform Alternating Least Squares, we fix the values of either \\(U\\) or \\(P\\), then perform the least-squares optimization on\n(This is proven best-fit for \u0026ldquo;non-pathological matricies\u0026rdquo;)\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhalternating_least_squares/\"\u003eAlternating Least Squares\u003c/a\u003e is a method to \u003ca href=\"/posts/kbhthoughts_on_axler_4/#factoring\"\u003eFactoring\u003c/a\u003e a \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e into two components:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{M}( R) \\approx \\mathcal{M}(U) \\cdot \\mathcal{M}(P)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, we want to come up matricies \\(U\\) and \\(P\\) with a certain side length \\(k\\) that we exdogenously come up with\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-08-01_11-10-42_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003chr\u003e\n\u003cp\u003eTo perform \u003ca href=\"/posts/kbhalternating_least_squares/\"\u003eAlternating Least Squares\u003c/a\u003e, we fix the values of either \\(U\\) or \\(P\\), then perform the least-squares optimization on\u003c/p\u003e\n\u003cp\u003e(This is proven best-fit for \u0026ldquo;\u003ca href=\"/posts/kbhnon_pathological_matricies/\"\u003enon-pathological matricies\u003c/a\u003e\u0026rdquo;)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhalternating_least_squares/","tags":null,"title":"Alternating Least Squares"},{"categories":null,"contents":"Problem: current ambulance routing don\u0026rsquo;t optimize significantly on the contextual cases for stroke patients\nStroke hospitals: PSC is smaller than a CSC.\nPrevious work Routing methods\u0026mdash;\nroute all patient to nearest PSC, which is worse than route high risk patient to CSC, which is worse than always route to CSC This is counter-intuitive. How do we solve, given a stroke condition, available PSC/CSC locations, traffic, etc., for where and how to route a patient?\nAmbulance MDP formulation \\(S\\): (location, symptom onset, known stroke type, stroke type) \\(A\\): route to clinic, route to [specific] PSC, route to [specific] CSC will never be downrouted (for instance, if you are at a PSC you will always either stay or go to CSC) \\(T(s\u0026rsquo;|s,a)\\): location changes distance \\(R(s,a)\\): \u0026ldquo;probability of patient outcome\u0026rdquo; \\(P(success|time)\\) (Holodinsky, et. al. 2018) if stroke type is unknown, its a weighted average Solving Forward Search, depth of 2: patient will either get transported or bounced and transported.\nResults status quo: people near Stanford hospital/ChanZuck are better MDP: smoother gradient ","html":"\u003cp\u003eProblem: \u003cstrong\u003ecurrent ambulance routing don\u0026rsquo;t optimize significantly on the contextual cases for stroke patients\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eStroke hospitals: PSC is smaller than a CSC.\u003c/p\u003e\n\u003ch2 id=\"previous-work\"\u003ePrevious work\u003c/h2\u003e\n\u003cp\u003eRouting methods\u0026mdash;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eroute all patient to nearest PSC, which is worse than\u003c/li\u003e\n\u003cli\u003eroute high risk patient to CSC, which is worse than\u003c/li\u003e\n\u003cli\u003ealways route to CSC\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThis is counter-intuitive. How do we solve, given a stroke condition, available PSC/CSC locations, traffic, etc., for where and how to route a patient?\u003c/p\u003e\n\u003ch2 id=\"ambulance-mdp-formulation\"\u003eAmbulance MDP formulation\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(S\\): (location, symptom onset, known stroke type, stroke type)\u003c/li\u003e\n\u003cli\u003e\\(A\\):\n\u003cul\u003e\n\u003cli\u003eroute to clinic, route to [specific] PSC, route to [specific] CSC\u003c/li\u003e\n\u003cli\u003ewill never be downrouted (for instance, if you are at a PSC you will always either stay or go to CSC)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\\(T(s\u0026rsquo;|s,a)\\):\n\u003cul\u003e\n\u003cli\u003elocation changes\u003c/li\u003e\n\u003cli\u003edistance\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\\(R(s,a)\\):\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;probability of patient outcome\u0026rdquo; \\(P(success|time)\\) (Holodinsky, et. al. 2018)\u003c/li\u003e\n\u003cli\u003eif stroke type is unknown, its a weighted average\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"solving\"\u003eSolving\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhforward_search/\"\u003eForward Search\u003c/a\u003e, depth of 2: patient will either get transported or bounced and transported.\u003c/p\u003e\n\u003ch2 id=\"results\"\u003eResults\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003estatus quo: people near Stanford hospital/ChanZuck are better\u003c/li\u003e\n\u003cli\u003eMDP: smoother gradient\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhabulance_trajectories/","tags":null,"title":"Ambulance Trajectories"},{"categories":null,"contents":"Hello! Welcome to the series of guided code-along labs to introduce you to the basis of using the PyTorch library and its friends to create a neural network! We will dive deeply into Torch, focusing on how practically it can be used to build Neural Networks, as well as taking sideroads into how it works under the hood.\nGetting Started To get started, let\u0026rsquo;s open a colab and import Torch!\nimport torch import torch.nn as nn The top line here import PyTorch generally, and the bottom line imports the Neural Network libraries. We will need both for today and into the future!\nTensors and AutoGrad The most basic element we will be working with in Torch is something called a tensor. A tensor is a variable, which holds either a single number (scalar, or a single neuron) or a list of numbers (vector, or a layer of neurons), that can change. We will see what that means in a sec.\nYour First Tensors Everything that you are going to put through to PyTorch needs to be in a tensor. Therefore, we will need to get good at making them! As we discussed, a tensor can hold an number (scalar), a list (vector) or a (matrix).\nHere are a bunch of them!\nscalar_tensor = torch.tensor(2.2) vector_tensor = torch.tensor([1,3,4]) matrix_tensor = torch.tensor([[3,1,4],[1,7,4]]) You can perform operations on these tensors, like adding them together:\ntorch.tensor(2.2) + torch.tensor(5.1) tensor(7.3000) Vector and Matrix tensors work like NumPy arrays. You can add them pairwise:\ntorch.tensor([[3,1,4],[1,7,4]]) + torch.tensor([[0,2,1],[3,3,4]]) tensor([[ 3, 3, 5], [ 4, 10, 8]]) Connecting Tensors A single number can\u0026rsquo;t be a neural network! ([citation needed]) So, to be able to actually build networks, we have to connect tensors together.\nSo, let\u0026rsquo;s create two tensors, each holding a neuron, and connect them together!\nHere are two lovely scalar tensors:\nvar_1 = torch.tensor(3.0, requires_grad=True) var_2 = torch.tensor(4.0, requires_grad=True) var_1, var_2 (tensor(3., requires_grad=True), tensor(4., requires_grad=True)) We initialized two numbers, 3, which we named var_1, and 4, which we named var_2.\nThe value requires_grad here tells PyTorch that these values can change, which we need it to do\u0026hellip; very shortly!\nFirst, though, let\u0026rsquo;s create a latent variable. A \u0026ldquo;latent\u0026rdquo; value is a value that is the result of operations on other non-latent tensors\u0026mdash;connecting the activation of some neurons together with a new one. For instance, if I multiplied our two tensors together, we can create our very own latent tensor.\nmy_latent_value = var_1*var_2 my_latent_value tensor(12., grad_fn=\u0026lt;MulBackward0\u0026gt;) Evidently, \\(3 \\cdot 4 = 12\\).\nAutograd Now! The beauty of PyTorch is that we can tell it to set any particular latent variable to \\(0\\) (Why only \\(0\\), and \\(0\\) specifically? Calculus; turns out this limitation doesn\u0026rsquo;t matter at all, as we will see), and it can update all of its constituent tensors with required_grad \u0026ldquo;True\u0026rdquo; such that the latent variable we told PyTorch to set to \\(0\\) indeed becomes \\(0\\)!\nThis process is called \u0026ldquo;automatic gradient calculation\u0026rdquo; and \u0026ldquo;backpropagation.\u0026rdquo; (Big asterisks throughout, but bear with us. Find Matt/Jack if you want more.)\nTo do this, we will leverage the help of a special optimization algorithm called stochastic gradient descent.\nLet\u0026rsquo;s get a box of this stuff first:\nfrom torch.optim import SGD SGD \u0026lt;class \u0026#39;torch.optim.sgd.SGD\u0026#39;\u0026gt; Excellent. By the way, from the torch.optim package, there\u0026rsquo;s tonnes (like at least 20) different \u0026ldquo;optimizer\u0026rdquo; algorithms that all do the same thing (\u0026ldquo;take this latent variable to \\(0\\) by updating its constituents\u0026rdquo;) but do them in important different ways. We will explore some of them through this semester, and others you can Google for yourself by looking up \u0026ldquo;PyTorch optimizers\u0026rdquo;.\nOk, to get this SGD thing up and spinning, we have to tell it every tensor it gets to play with in a list. For us, let\u0026rsquo;s ask PyTorch SGD to update var_1 and var_2 such that my_latent_value (which, remember, is var1 times var2) becomes a new value.\nAside: learning rate\nNow, if you recall the neural network simulation, our model does not reach the desired outcome immediately. It does so in steps. The size of these steps are called the learning rate; the LARGER these steps are, the quicker you will get close to your desired solution, but where you end up getting maybe farther away from the actual solution; and vise versa.\nThink about the learning rate as a hoppy frog: a frog that can hop a yard at a time (\u0026ldquo;high learning rate\u0026rdquo;) can probably hit a target a mile away much quicker, but will have a hard time actually hitting the foot-wide target precisely; a frog that can hop an inch at a time (\u0026ldquo;low learning rate\u0026rdquo;) can probably hit a target a mile away\u0026hellip;. years from now, but will definitely be precisely hitting the foot-wide target when it finally gets there.\nSo what does \u0026ldquo;high\u0026rdquo; and \u0026ldquo;low\u0026rdquo; mean? Usually, we adjust learning rate by considering the number of decimal places it has. \\(1\\) is considered a high learning rate, \\(1 \\times 10^{-3} = 0.001\\) as medium-ish learning rate, and \\(1 \\times 10^{-5}=0.00001\\) as a small one. There are, however, no hard and fast rules about this and it is subjcet to experimentation.\nSo, choose also an appropriate learning rate for our optimizer. I would usually start with \\(3 \\times 10^{-3}\\) and go from there. In Python, we write that as 3e-3.\nSo, let\u0026rsquo;s make a SGD, and give it var_1 and var_2 to play with, and set the learning rate to 3e-3:\nmy_sgd = SGD([var_1, var_2], lr=3e-3) my_sgd SGD ( Parameter Group 0 dampening: 0 differentiable: False foreach: None lr: 0.003 maximize: False momentum: 0 nesterov: False weight_decay: 0 ) Wonderful. Don\u0026rsquo;t worry much about how many of these means for now; however, we will see it in action shortly.\nNow! Recall that we allowed my_sgd to mess with var_1 and var_2 to change the value of my_latent_value (the product of var_1 and var_2).\nCurrent, var_1 and var_2 carries the values of:\nvar_1, var_2 (tensor(3., requires_grad=True), tensor(4., requires_grad=True)) And, of course, their product my_latent_value carries the value of:\nmy_latent_value tensor(12., grad_fn=\u0026lt;MulBackward0\u0026gt;) What if we want my_latent_value to be\u0026hellip; \\(15\\)? That sounds like a good number. Let\u0026rsquo;s ask our SGD algorithm to update var_1 and var_2 such that my_latent_value will be \\(15\\)!\nWaaait. I mentioned that the optimizers can only take things to \\(0\\). How could it take my_latent_value to \\(15\\) then? Recall! I said SGD takes a latent variable to \\(0\\). So, we can just build another latent variable such that, when my_latent_value is \\(15\\), our new latent variable will be \\(0\\), and then ask SGD optimize on that!\nWhat could that be\u0026hellip; Well, the squared difference between \\(15\\) and my_latent_value is a good one. If my_latent_value is \\(15\\), the squared difference between it and \\(15\\) will be \\(0\\), as desired!\nSo, similar to what we explored last semester, we use sum of squared difference as our loss because it will be able to account for errors of fit in both directions: a \\(-4\\) difference in predicted and actual output is just as bad as a \\(+4\\) difference.\nTurns out, the \u0026ldquo;objective\u0026rdquo; for SGD optimization, the thing that we ask SGD to take to \\(0\\) on our behalf by updating the parameters we allowed it to update (again, they are var_1 and var_2 in our case here), is indeed the loss value of our model. Sum of squared errors is, therefore, called our loss function for this toy problem.\nSo let\u0026rsquo;s do it! Let\u0026rsquo;s create a tensor our loss:\nloss = (15-my_latent_value)**2 loss tensor(9., grad_fn=\u0026lt;PowBackward0\u0026gt;) Nice. So our loss is at \\(3\\) right now; when my_latent_value is correctly at \\(15\\), our loss will be at \\(0\\)! So, to get my_latent_value to \\(15\\), we will ask SGD to take loss to \\(0\\).\nTo do this, there are three steps. COMMIT THIS TO MEMORY, as it will be basis of literally everything else in the future.\nBackpropagate: \u0026ldquo;please tell SGD to take this variable to \\(0\\), and mark the correct tensors to change\u0026rdquo; Optimize: \u0026ldquo;SGD, please update the marked tensors such that the variable I asked you to take to \\(0\\) is closer to \\(0\\)\u0026rdquo; Reset: \u0026ldquo;SGD, please get ready for step 1 again by unmarking everything that you have changed\u0026rdquo; Again! Is it commited to memory yet?\nBackprop Optimize Reset I am stressing this here because a lot of people 1) miss one of these steps 2) do them out of order. Doing these in any other order will cause your desired result to not work. Why? Think about what each step does, and think about doing them out of order.\nOne more time for good luck:\nBackprop! Optimize! Reset! Let\u0026rsquo;s do it.\nBackprop! Backpropergation marks the correct loss value to minimize (optimze towards being \\(0\\)), and marks all tensors with requires_grad set to True which make up the value of that loss value for update.\nSecretly, this steps takes the partial derivative of our loss against each of the tensors we marked requires_grad, allowing SGD to \u0026ldquo;slide down the gradient\u0026rdquo; based on those partial derivatives. Don\u0026rsquo;t worry if you didn\u0026rsquo;t get that sentence.\nTo do this, we call .backward() on the loss we want to take to \\(0\\):\nloss.backward() None This call will produce nothing. And that\u0026rsquo;s OK, because here comes\u0026hellip;\nOptimize! The next step is tell SGD to update all of the tensors marked for update in the previous step to get loss closer to \\(0\\). To do this, we simply:\nmy_sgd.step() None This call will produce nothing. But, if you check now, the tensors should updated.\nAlthough\u0026hellip; You should\u0026rsquo;t check! Because we have one more step left:\nReset! my_sgd.zero_grad() None I cannot stress this enough. People often stop at the previous step because \u0026ldquo;ooo look my tensors updated!!!\u0026rdquo; and forget to do this step. THIS IS BAD. We won\u0026rsquo;t go into why for now, but basically not resetting the update mark results in a tensor being updated twice, then thrice, etc. each time you call .step(), which will cause double-updates, which will cause you to overshoot (handwavy, but roughly), which is bad.\nooo look my tensors updated!!! var_1, var_2 (tensor(3.0720, requires_grad=True), tensor(4.0540, requires_grad=True)) WOAH! Look at that! Without us telling SGD, it figured out that var_1 and var_2 both need to be BIGGER for my_latent_value, the product of var_1 and var_2 to change from \\(12\\) to \\(15\\). Yet, the product of \\(3.0720\\) and \\(4.0540\\) is hardly close to \\(15\\).\nWhy? Because our step size. It was tiny! To get my_latent_value to be properly \\(15\\), we have to do the cycle of 1) calculating new latent value 2) calculating new loss 3) backprop, optimize, reset, a LOT of times.\nNow do that a lot of times. for _ in range(100): my_latent_value = var_1*var_2 loss = (15-my_latent_value)**2 loss.backward() # BACKPROP! my_sgd.step() # OPTIMIZE! my_sgd.zero_grad() # RESET! var_1, var_2 (tensor(3.4505, requires_grad=True), tensor(4.3472, requires_grad=True)) Weird solution, but we got there! The product of these two values is indeed very close to \\(15\\)! Give yourself a pat on the back.\nSo why the heck are we doing all this So why did we go through all the effort of like 25 lines of code to get two numbers to multiply to \\(15\\)? If you think about Neural Networks as a process of function fitting, we are essentially asking our very basic \u0026ldquo;network\u0026rdquo; (as indeed, the chain of tensors to build up to our latent value, then to compute our loss, is a network!) to achieve a measurable task (\u0026ldquo;take the product of these numbers to \\(15\\)\u0026rdquo;). Though the relationships we will be modeling in this class will be more complex than literal multiplication, it will be just using more fancy mechanics of doing the same thing\u0026mdash;taking tensors values which was undesirable, and moving them to more desirable values to model our relationship.\ny=mx+b and your first neural network \u0026ldquo;module\u0026rdquo; nn.Linear The power of neural networks actually comes when a BUNCH of numbers gets multiplied together, all at once! using\u0026hellip; VECTORS and MATRICIES! Don\u0026rsquo;t remember what they are? Ask your friendly neighborhood Matt/Jack.\nRecall, a matrix is how you can transform a vector from one space to another. Turns out, the brunt of everything you will be doing involves asking SGD to move a bunch of matricies around (like we did before!) such that our input vector(s) gets mapped to the right place.\nA matrix, in neural network world, is referred to as a linear layer. It holds a whole series of neurons, taking every single value of the input into account to producing a whole set of output. Because of this property, it is considered a fully connected layer.\nLet\u0026rsquo;s create such a fully-connected layer (matrix) in PyTorch! When you ask PyTorch to make a matrix for you, you use the nn sublibrary which we imported before. Furthermore, and this is confusing for many people who have worked with matricies before, you specify the input dimension first.\nmy_matrix_var_1 = nn.Linear(3, 2) my_matrix_var_1 Linear(in_features=3, out_features=2, bias=True) my_matrix_var_1 is a linear map from three dimensions to two dimensions; it will take a vector of three things as input and spit out a vector of two.\nNote! Although my_matrix_var_1 is a tensor under the hood just like var_1, we 1) didn\u0026rsquo;t have to set default values for it 2) didn\u0026rsquo;t have to mark it as requires_grad. This is because, unlike a raw Tensor which often does not require to be changed (such as, for instance, the input value, which you can\u0026rsquo;t change), a matrix is basically ALWAYS a tensor that encodes the weights of a model we are working with\u0026mdash;so it is always going to be something that we will ask SGD to change on our behalf.\nSo, since you are asking SGD to change it anyways, PyTorch just filled a bunch of random numbers in for you and set requires_grad on for you to my_matrix_var_1. If you want to see the actual underlying tensor, you can:\nmy_matrix_var_1.weight Parameter containing: tensor([[-0.2634, 0.3729, 0.5019], [ 0.2796, 0.5425, -0.4337]], requires_grad=True) As you can see, we have indeed what we expect: a tensor containing a \\(2\\times 3\\) matrix with requires_grad on filled with random values.\nHow do we actually optimize over this tensor? You can do all the shenanigans we did before and pass my_matrix_var_1 to SGD, but this will quickly get unwieldy as you have more parameters. Remember how we had to give SVG a list of EVERYTHING it had to keep track of? var_1 and var_2 was simple enough, but what if we had to do var_1.weight, var_2.weight, var_3.weight\u0026hellip; \u0026hellip; \u0026hellip; ad nausium for every parameter we use on our large graph? GPT3 has 1.5 billion parameters. Do you really want to type that?\nNo.\nThere is, of course, a better way.\nnn.Module This, by the way, is the standard of how a Neural Network is properly built from now on until the industry moves on from PyTorch. You will want to remember this.\nLet\u0026rsquo;s replicate the example of our previous 3=\u0026gt;2 dimensional linear map, but with a whole lot more code.\nclass MyNetwork(nn.Module): def __init__(self): # important: runs early calls to make sure that # the module is correct super().__init__() # we declare our layers. we will use them below self.m1 = nn.Linear(3,2) # this is a special function that you don\u0026#39;t actually call # manually, but as you use this module Torch will call # on your behalf. It passes the input through to the layers # of your network. def forward(self, x): # we want to pass whatever input we get, named x # through to every layer. right now there is only # one fully-connected layer x = self.m1(x) return x What this does, behind the scenes, is to wrap our matrix and all of its parameters into one giant module. (NOTE! This is PyTorch-specific language. Unlike all other vocab before, this term is specific to PyTorch.) A module is an operation on tensors which can retain gradients (i.e. it can change, i.e. requires_grad=True).\nLet\u0026rsquo;s see it in action. Recall that our matrix takes a vector of 3 things as input, and spits out a vector of 2 things. So let\u0026rsquo;s make a vector of three things:\nthree_vector = torch.tensor([1.,2.,3.]) three_vector tensor([1., 2., 3.]) By the way, notice the period I\u0026rsquo;m putting after numbers here? That\u0026rsquo;s a shorthand for .0. So 3.0 = 3.. I want to take this opportunity to remind you that the tensor operations all take FLOATING POINT tensors as input, because the matrices themselves as initialized with random floating points.\nLet\u0026rsquo;s get an instance of the new MyNetwork module.\nmy_network = MyNetwork() my_network MyNetwork( (m1): Linear(in_features=3, out_features=2, bias=True) ) And apply this operation we designed to our three-vector!\nmy_network(three_vector) tensor([0.3850, 1.4120], grad_fn=\u0026lt;AddBackward0\u0026gt;) Woah! It mapped our vector tensor in three dimensions to a vector tensor in two!\nThe above code, by the way, is how we actually use our model to run predictions: my_network is transforming the input vector to the desired output vector.\nCool. This may not seem all that amazing to you\u0026hellip; yet. But, remember, we can encode any number of matrix operations in our forward() function above. Let\u0026rsquo;s design another module that uses two matricies\u0026mdash;or two fully-connected layers, or layers for short (when we don\u0026rsquo;t specify what kind of layer it is, it is fully connected)\u0026mdash;to perform a transformation.\nWe will transform a vector from 3 dimensions to 2 dimensions, then from 2 dimensions to 5 dimensions:\nclass MyNetwork(nn.Module): def __init__(self): # important: runs early calls to make sure that # the module is correct super().__init__() # we declare our layers. we will use them below self.m1 = nn.Linear(3,2) self.m2 = nn.Linear(2,5) # this is a special function that you don\u0026#39;t actually call # manually, but as you use this module Torch will call # on your behalf. It passes the input through to the layers # of your network. def forward(self, x): # we want to pass whatever input we get, named x # through to every layer. right now there is only # one fully-connected layer x = self.m1(x) x = self.m2(x) return x Of course, this network topology is kind of randomly tossed into the network.\nDoing everything else we did before again, we should end up a vector in 5 dimensions, having been transformed twice behind the scenes!\nmy_network = MyNetwork() my_network MyNetwork( (m1): Linear(in_features=3, out_features=2, bias=True) (m2): Linear(in_features=2, out_features=5, bias=True) ) And apply this operation we designed to our three-vector!\nmy_network(three_vector) tensor([ 0.8241, -0.1014, 0.2940, -0.2019, 0.6749], grad_fn=\u0026lt;AddBackward0\u0026gt;) Nice.\nAnd here\u0026rsquo;s the magical thing: when we are asking SGD to optimize this network, instead of needing to pass every darn parameter used in this network into SVG, we can just pass in:\nmy_network.parameters() \u0026lt;generator object Module.parameters at 0x115214270\u0026gt; This is actually a list of every single tensor that has requires_grad=True that we secretly created. No more typing out a list of every parameter to SGD like we did with var_1 and var_2! We will see this in action shortly.\nHow to Train Your Dragon Neural Network Note, the MyNetwork transformation is currently kind of useless. We know it maps the vector [1,2,3] to some arbitrary numbers above (i.e. 0.8241 an such). That\u0026rsquo;s quite lame.\nWe want our network to model some relationship between numbers, that\u0026rsquo;s why we are here. Let\u0026rsquo;s, arbitrarily and for fun, ask SGD to update my_network such that it will return [1,2,3,4,5] given [1,2,3].\nBy the way, from here on, I will use MyNetwork to refer to the model 3=\u0026gt;2=\u0026gt;5 network we made above generally, and my_network the specific instantiation of MyNetwork whose parameters we will ask SGD to update.\nLet\u0026rsquo;s get a clean copy of MyNetwork first:\nmy_network = MyNetwork() my_network MyNetwork( (m1): Linear(in_features=3, out_features=2, bias=True) (m2): Linear(in_features=2, out_features=5, bias=True) ) And, let\u0026rsquo;s create a static (i.e. SGD cannot change it) input and output vector pair which we will pass into our operation:\nmy_input = torch.tensor([1.,2.,3.]) my_desired_output = torch.tensor([1.,2.,3.,4.,5.]) my_input,my_desired_output (tensor([1., 2., 3.]), tensor([1., 2., 3., 4., 5.])) We will pass our input through the my_network operation, and figure out what our inputs currently map to:\nmy_network_output = my_network(my_input) my_network_output tensor([-1.4672, -0.7089, -0.2645, -0.0598, 0.1239], grad_fn=\u0026lt;AddBackward0\u0026gt;) Ah, clearly not [1,2,3,4,5]. Recall we want these values to be the same as my_output, which they isn\u0026rsquo;t doing right now. Let\u0026rsquo;s fix that.\nCan you guess what loss function we will use? \u0026hellip; That\u0026rsquo;s right, the same exact thing as before! Squaring the difference.\nloss = (my_network_output-my_desired_output)**2 loss tensor([ 6.0869, 7.3380, 10.6571, 16.4821, 23.7766], grad_fn=\u0026lt;PowBackward0\u0026gt;) Waiiiit. There\u0026rsquo;s a problem. Remember, SGD can take a single latent value to \\(0\\). That\u0026rsquo;s a whole lotta latent values in a vector! Which one will it take to \\(0\\)? Stop to think about this for a bit: we want to take all of these values to \\(0\\), but we can take only a single value to \\(0\\) with SGD. How can we do it?\nTo do this, we just\u0026hellip; add the values up using the torch.sum function!\nloss = torch.sum((my_network_output-my_desired_output)**2) loss tensor(64.3406, grad_fn=\u0026lt;SumBackward0\u0026gt;) Nice. We now have something to optimize against, let\u0026rsquo;s actually create our optimizer! Remember that, instead of passing in every single parameter we want PyTorch to change manually, we just pass in my_network.parameters() and PyTorch will scan for every single parameter that lives in MyNetwork and give it all to SGD:\nmy_sgd = SGD(my_network.parameters(), lr=1e-6) my_sgd SGD ( Parameter Group 0 dampening: 0 differentiable: False foreach: None lr: 1e-06 maximize: False momentum: 0 nesterov: False weight_decay: 0 ) Just for running this model, we are going to run our network with more steps (\\(50,000\\)), but with smaller step sizes (\\(1 \\times 10^{-6}\\)). We will not worry about it too much for now, and dive into discussing it further for network parameter tuning.\nSo, let\u0026rsquo;s make the actual training loop now that will take the latent variable named my_network_output, created by applying my_network on my_input, to take on the value of my_desired_output! Can you do it without looking? This will be almost the same as our first training loop, except we are asking our network to calculate the current latent output (instead of computing it from scratch each time.)\nfor _ in range(50000): # calculate new latent variable my_network_output = my_network(my_input) # calculate loss loss = torch.sum((my_network_output-my_desired_output)**2) # Backprop! loss.backward() # Optimize! my_sgd.step() # Reset! my_sgd.zero_grad() my_network(my_input) tensor([-0.9814, 0.4252, 1.8085, 2.7022, 3.5517], grad_fn=\u0026lt;AddBackward0\u0026gt;) Not great! But\u0026mdash;we are both ordered correctly and \u0026mdash; if you just kept running this loop, we will eventually converge (arrive at) the right answer! For kicks, let\u0026rsquo;s run it \\(50000\\) more times:\nfor _ in range(50000): # calculate new latent variable my_network_output = my_network(my_input) # calculate loss loss = torch.sum((my_network_output-my_desired_output)**2) # Backprop! loss.backward() # Optimize! my_sgd.step() # Reset! my_sgd.zero_grad() my_network(my_input) tensor([0.9975, 1.9986, 3.0006, 4.0026, 5.0052], grad_fn=\u0026lt;AddBackward0\u0026gt;) Would you look at that! What did I promise you :)\nYour network learned something! Specifically, the skill of mapping \\([1,2,3]\\) to \\([1,2,3,4,5]\\)! Congrats!\nChallenge Now that you know how to get the network to map a specific vector in three dimensions to a specific place in five dimensions, can you do that more generally? Can you generate and give your own network enough examples such that it will learn to do that for ALL vectors in three dimensions?\nSpecifically, generate a training set of in python and train your neural network now to perform the following operation:\nGiven a vector \\([a,b,c]\\), return \\([a,b,c,c+1,c+2]\\), for every integer \\([a,b,c]\\).\nHint: pass in many examples for correct behavior sequentially during each of your training loops, calculating loss and running the optimization step (i.e. back! optimize! reset!) after each example you give.\n","html":"\u003cp\u003eHello! Welcome to the series of guided code-along labs to introduce you to the basis of using the PyTorch library and its friends to create a neural network! We will dive deeply into Torch, focusing on how practically it can be used to build Neural Networks, as well as taking sideroads into how it works under the hood.\u003c/p\u003e\n\u003ch2 id=\"getting-started\"\u003eGetting Started\u003c/h2\u003e\n\u003cp\u003eTo get started, let\u0026rsquo;s open a \u003ca href=\"https://colab.research.google.com/\"\u003ecolab\u003c/a\u003e and import Torch!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch.nn\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThe top line here import PyTorch generally, and the bottom line imports the Neural Network libraries. We will need both for today and into the future!\u003c/p\u003e\n\u003ch2 id=\"tensors-and-autograd\"\u003eTensors and AutoGrad\u003c/h2\u003e\n\u003cp\u003eThe most basic element we will be working with in Torch is something called a \u003cstrong\u003etensor\u003c/strong\u003e. A tensor is a \u003cstrong\u003evariable\u003c/strong\u003e, which holds either a single number (\u003cstrong\u003escalar\u003c/strong\u003e, or a single \u003cstrong\u003eneuron\u003c/strong\u003e) or a list of numbers (\u003cstrong\u003evector\u003c/strong\u003e, or a \u003cstrong\u003elayer\u003c/strong\u003e of neurons), that \u003cem\u003ecan change\u003c/em\u003e. We will see what that means in a sec.\u003c/p\u003e\n\u003ch3 id=\"your-first-tensors\"\u003eYour First Tensors\u003c/h3\u003e\n\u003cp\u003eEverything that you are going to put through to PyTorch needs to be in a tensor. Therefore, we will need to get good at making them! As we discussed, a tensor can hold an number (scalar), a list (vector) or a (matrix).\u003c/p\u003e\n\u003cp\u003eHere are a bunch of them!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003escalar_tensor\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evector_tensor\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ematrix_tensor\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e7\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou can perform operations on these tensors, like adding them together:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5.1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor(7.3000)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eVector and Matrix tensors work like NumPy arrays. You can add them pairwise:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e7\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]])\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([[ 3, 3, 5],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 4, 10, 8]])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"connecting-tensors\"\u003eConnecting Tensors\u003c/h3\u003e\n\u003cp\u003eA single number can\u0026rsquo;t be a neural network! ([citation needed]) So, to be able to actually build networks, we have to connect tensors together.\u003c/p\u003e\n\u003cp\u003eSo, let\u0026rsquo;s create two tensors, each holding a neuron, and connect them together!\u003c/p\u003e\n\u003cp\u003eHere are two lovely scalar tensors:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3.0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erequires_grad\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4.0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erequires_grad\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(tensor(3., requires_grad=True), tensor(4., requires_grad=True))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe initialized two numbers, \u003ccode\u003e3\u003c/code\u003e, which we named \u003ccode\u003evar_1\u003c/code\u003e, and \u003ccode\u003e4\u003c/code\u003e, which we named \u003ccode\u003evar_2\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eThe value \u003ccode\u003erequires_grad\u003c/code\u003e here tells PyTorch that these values can change, which we need it to do\u0026hellip; very shortly!\u003c/p\u003e\n\u003cp\u003eFirst, though, let\u0026rsquo;s create a \u003cstrong\u003elatent\u003c/strong\u003e variable. A \u0026ldquo;latent\u0026rdquo; value is a value that is the \u003cem\u003eresult\u003c/em\u003e of operations on other non-latent tensors\u0026mdash;connecting the activation of some neurons together with a new one. For instance, if I multiplied our two tensors together, we can create our very own latent tensor.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_latent_value\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_latent_value\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor(12., grad_fn=\u0026lt;MulBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eEvidently, \\(3 \\cdot 4 = 12\\).\u003c/p\u003e\n\u003ch3 id=\"autograd\"\u003eAutograd\u003c/h3\u003e\n\u003cp\u003eNow! The beauty of PyTorch is that we can tell it to set any particular latent variable to \\(0\\) (Why only \\(0\\), and \\(0\\) specifically? Calculus; turns out this limitation doesn\u0026rsquo;t matter at all, as we will see), and it can update all of its constituent tensors with \u003ccode\u003erequired_grad\u003c/code\u003e \u0026ldquo;True\u0026rdquo; such that the latent variable we told PyTorch to set to \\(0\\) indeed becomes \\(0\\)!\u003c/p\u003e\n\u003cp\u003eThis process is called \u0026ldquo;automatic gradient calculation\u0026rdquo; and \u0026ldquo;backpropagation.\u0026rdquo; (Big asterisks throughout, but bear with us. Find Matt/Jack if you want more.)\u003c/p\u003e\n\u003cp\u003eTo do this, we will leverage the help of a special optimization algorithm called \u003cstrong\u003estochastic gradient descent\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s get a box of this stuff first:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch.optim\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSGD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eSGD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u0026lt;class \u0026#39;torch.optim.sgd.SGD\u0026#39;\u0026gt;\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. By the way, from the \u003ccode\u003etorch.optim\u003c/code\u003e package, there\u0026rsquo;s tonnes (like at least 20) different \u0026ldquo;optimizer\u0026rdquo; algorithms that all do the same thing (\u0026ldquo;take this latent variable to \\(0\\) by updating its constituents\u0026rdquo;) but do them in important different ways. We will explore some of them through this semester, and others you can Google for yourself by looking up \u0026ldquo;PyTorch optimizers\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eOk, to get this SGD thing up and spinning, we have to tell it every tensor it gets to play with in a list. For us, let\u0026rsquo;s ask PyTorch SGD to update \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e such that \u003ccode\u003emy_latent_value\u003c/code\u003e (which, remember, is var1 times var2) becomes a new value.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside: \u003cstrong\u003elearning rate\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eNow, if you recall the neural network simulation, our model does not reach the desired outcome immediately. It does so in \u003cem\u003esteps\u003c/em\u003e. The size of these steps are called the \u003cstrong\u003elearning rate\u003c/strong\u003e; the LARGER these steps are, the quicker you will get \u003cem\u003eclose\u003c/em\u003e to your desired solution, but where you end up getting maybe farther away from the actual solution; and vise versa.\u003c/p\u003e\n\u003cp\u003eThink about the learning rate as a hoppy frog: a frog that can hop a yard at a time (\u0026ldquo;high learning rate\u0026rdquo;) can probably hit a target a mile away much quicker, but will have a hard time actually hitting the foot-wide target precisely; a frog that can hop an inch at a time (\u0026ldquo;low learning rate\u0026rdquo;) can probably hit a target a mile away\u0026hellip;. years from now, but will definitely be precisely hitting the foot-wide target when it finally gets there.\u003c/p\u003e\n\u003cp\u003eSo what does \u0026ldquo;high\u0026rdquo; and \u0026ldquo;low\u0026rdquo; mean? Usually, we adjust learning rate by considering the number of decimal places it has. \\(1\\) is considered a high learning rate, \\(1 \\times 10^{-3} = 0.001\\) as medium-ish learning rate, and \\(1 \\times 10^{-5}=0.00001\\) as a small one. There are, however, no hard and fast rules about this and it is subjcet to experimentation.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eSo, choose also an appropriate \u003cstrong\u003elearning rate\u003c/strong\u003e for our optimizer. I would usually start with \\(3 \\times 10^{-3}\\) and go from there. In Python, we write that as \u003ccode\u003e3e-3\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eSo, let\u0026rsquo;s make a SGD, and give it \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e to play with, and set the learning rate to \u003ccode\u003e3e-3\u003c/code\u003e:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSGD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3e-3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSGD (\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eParameter Group 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e dampening: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e differentiable: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e foreach: None\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e lr: 0.003\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e maximize: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e momentum: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nesterov: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e weight_decay: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWonderful. Don\u0026rsquo;t worry much about how many of these means for now; however, we will see it in action shortly.\u003c/p\u003e\n\u003cp\u003eNow! Recall that we allowed \u003ccode\u003emy_sgd\u003c/code\u003e to mess with \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e to change the value of \u003ccode\u003emy_latent_value\u003c/code\u003e (the product of \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e).\u003c/p\u003e\n\u003cp\u003eCurrent, \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e carries the values of:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(tensor(3., requires_grad=True), tensor(4., requires_grad=True))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd, of course, their product \u003ccode\u003emy_latent_value\u003c/code\u003e carries the value of:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_latent_value\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor(12., grad_fn=\u0026lt;MulBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWhat if we want \u003ccode\u003emy_latent_value\u003c/code\u003e to be\u0026hellip; \\(15\\)? That sounds like a good number. Let\u0026rsquo;s ask our SGD algorithm to update \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e such that \u003ccode\u003emy_latent_value\u003c/code\u003e will be \\(15\\)!\u003c/p\u003e\n\u003cp\u003eWaaait. I mentioned that the optimizers can only take things to \\(0\\). How could it take \u003ccode\u003emy_latent_value\u003c/code\u003e to \\(15\\) then? Recall! I said SGD takes \u003cem\u003ea\u003c/em\u003e latent variable to \\(0\\). So, we can just build another latent variable such that, when \u003ccode\u003emy_latent_value\u003c/code\u003e is \\(15\\), our new latent variable will be \\(0\\), and then ask SGD optimize on that!\u003c/p\u003e\n\u003cp\u003eWhat could that be\u0026hellip; Well, the \u003cem\u003esquared difference\u003c/em\u003e between \\(15\\) and \u003ccode\u003emy_latent_value\u003c/code\u003e is a good one. If \u003ccode\u003emy_latent_value\u003c/code\u003e is \\(15\\), the \u003cem\u003esquared difference\u003c/em\u003e between it and \\(15\\) will be \\(0\\), as desired!\u003c/p\u003e\n\u003cp\u003eSo, similar to what we explored last semester, we use \u003cstrong\u003esum of squared difference\u003c/strong\u003e as our \u003cstrong\u003eloss\u003c/strong\u003e because it will be able to account for errors of fit in both directions: a \\(-4\\) difference in predicted and actual output is just as bad as a \\(+4\\) difference.\u003c/p\u003e\n\u003cp\u003eTurns out, the \u0026ldquo;objective\u0026rdquo; for SGD optimization, the thing that we ask SGD to take to \\(0\\) on our behalf by updating the parameters we allowed it to update (again, they are \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e in our case here), is indeed the \u003cstrong\u003eloss\u003c/strong\u003e value of our model. \u003cstrong\u003eSum of squared errors\u003c/strong\u003e is, therefore, called our \u003cstrong\u003eloss function\u003c/strong\u003e for this toy problem.\u003c/p\u003e\n\u003cp\u003eSo let\u0026rsquo;s do it! Let\u0026rsquo;s create a tensor our loss:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e15\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_latent_value\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor(9., grad_fn=\u0026lt;PowBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNice. So our loss is at \\(3\\) right now; when \u003ccode\u003emy_latent_value\u003c/code\u003e is correctly at \\(15\\), our loss will be at \\(0\\)! So, to get \u003ccode\u003emy_latent_value\u003c/code\u003e to \\(15\\), we will ask SGD to take \u003ccode\u003eloss\u003c/code\u003e to \\(0\\).\u003c/p\u003e\n\u003cp\u003eTo do this, there are three steps. \u003cstrong\u003eCOMMIT THIS TO MEMORY\u003c/strong\u003e, as it will be basis of literally everything else in the future.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eBackpropagate: \u0026ldquo;please tell SGD to take this variable to \\(0\\), and mark the correct tensors to change\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eOptimize: \u0026ldquo;SGD, please update the marked tensors such that the variable I asked you to take to \\(0\\) is closer to \\(0\\)\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eReset: \u0026ldquo;SGD, please get ready for step 1 again by unmarking everything that you have changed\u0026rdquo;\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAgain! Is it commited to memory yet?\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eBackprop\u003c/li\u003e\n\u003cli\u003eOptimize\u003c/li\u003e\n\u003cli\u003eReset\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eI am stressing this here because a \u003cem\u003elot\u003c/em\u003e of people 1) miss one of these steps 2) do them out of order. Doing these in any other order will cause your desired result to not work. Why? Think about what each step does, and think about doing them out of order.\u003c/p\u003e\n\u003cp\u003eOne more time for good luck:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eBackprop!\u003c/li\u003e\n\u003cli\u003eOptimize!\u003c/li\u003e\n\u003cli\u003eReset!\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eLet\u0026rsquo;s do it.\u003c/p\u003e\n\u003ch4 id=\"backprop\"\u003eBackprop!\u003c/h4\u003e\n\u003cp\u003eBackpropergation marks the correct loss value to minimize (optimze towards being \\(0\\)), and marks all tensors with \u003ccode\u003erequires_grad\u003c/code\u003e set to True which make up the value of that loss value for update.\u003c/p\u003e\n\u003cp\u003eSecretly, this steps takes the \u003cstrong\u003epartial derivative\u003c/strong\u003e of our loss against each of the tensors we marked \u003ccode\u003erequires_grad\u003c/code\u003e, allowing SGD to \u0026ldquo;slide down the gradient\u0026rdquo; based on those partial derivatives. Don\u0026rsquo;t worry if you didn\u0026rsquo;t get that sentence.\u003c/p\u003e\n\u003cp\u003eTo do this, we call \u003ccode\u003e.backward()\u003c/code\u003e on the loss we want to take to \\(0\\):\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNone\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThis call will produce nothing. And that\u0026rsquo;s OK, because here comes\u0026hellip;\u003c/p\u003e\n\u003ch4 id=\"optimize\"\u003eOptimize!\u003c/h4\u003e\n\u003cp\u003eThe next step is tell SGD to update all of the tensors marked for update in the previous step to get \u003ccode\u003eloss\u003c/code\u003e closer to \\(0\\). To do this, we simply:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNone\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThis call will produce nothing. But, if you check now, the tensors should updated.\u003c/p\u003e\n\u003cp\u003eAlthough\u0026hellip; You should\u0026rsquo;t check! Because we have one more step left:\u003c/p\u003e\n\u003ch4 id=\"reset\"\u003eReset!\u003c/h4\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNone\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eI cannot stress this enough. People often stop at the previous step because \u0026ldquo;ooo look my tensors updated!!!\u0026rdquo; and forget to do this step. THIS IS BAD. We won\u0026rsquo;t go into why for now, but basically not resetting the update mark results in a tensor being updated twice, then thrice, etc. each time you call \u003ccode\u003e.step()\u003c/code\u003e, which will cause double-updates, which will cause you to overshoot (handwavy, but roughly), which is bad.\u003c/p\u003e\n\u003ch4 id=\"ooo-look-my-tensors-updated\"\u003eooo look my tensors updated!!!\u003c/h4\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(tensor(3.0720, requires_grad=True), tensor(4.0540, requires_grad=True))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWOAH! Look at that! Without us telling SGD, it figured out that \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e both need to be BIGGER for \u003ccode\u003emy_latent_value\u003c/code\u003e, the product of \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e to change from \\(12\\) to \\(15\\). Yet, the product of \\(3.0720\\) and \\(4.0540\\) is hardly close to \\(15\\).\u003c/p\u003e\n\u003cp\u003eWhy? Because our step size. It was \u003cem\u003etiny!\u003c/em\u003e To get \u003ccode\u003emy_latent_value\u003c/code\u003e to be properly \\(15\\), we have to do the cycle of 1) calculating new latent value 2) calculating new loss 3) backprop, optimize, reset, a LOT of times.\u003c/p\u003e\n\u003ch3 id=\"now-do-that-a-lot-of-times-dot\"\u003eNow do that a lot of times.\u003c/h3\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e100\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emy_latent_value\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e15\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_latent_value\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# BACKPROP!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# OPTIMIZE!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# RESET!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(tensor(3.4505, requires_grad=True), tensor(4.3472, requires_grad=True))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWeird solution, but we got there! The product of these two values is indeed very close to \\(15\\)! Give yourself a pat on the back.\u003c/p\u003e\n\u003ch3 id=\"so-why-the-heck-are-we-doing-all-this\"\u003eSo why the heck are we doing all this\u003c/h3\u003e\n\u003cp\u003eSo why did we go through all the effort of like 25 lines of code to get two numbers to multiply to \\(15\\)? If you think about Neural Networks as a process of \u003cem\u003efunction fitting\u003c/em\u003e, we are essentially asking our very basic \u0026ldquo;network\u0026rdquo; (as indeed, the chain of tensors to build up to our latent value, then to compute our loss, \u003cem\u003eis\u003c/em\u003e a network!) to achieve a measurable task (\u0026ldquo;take the product of these numbers to \\(15\\)\u0026rdquo;). Though the relationships we will be modeling in this class will be more complex than literal multiplication, it will be just using more fancy mechanics of doing the same thing\u0026mdash;taking tensors values which was undesirable, and moving them to more desirable values to model our relationship.\u003c/p\u003e\n\u003ch2 id=\"y-mx-plus-b-and-your-first-neural-network-module\"\u003ey=mx+b and your first neural network \u0026ldquo;module\u0026rdquo;\u003c/h2\u003e\n\u003ch3 id=\"nn-dot-linear\"\u003e\u003ccode\u003enn.Linear\u003c/code\u003e\u003c/h3\u003e\n\u003cp\u003eThe power of neural networks actually comes when a BUNCH of numbers gets multiplied together, all at once! using\u0026hellip; VECTORS and MATRICIES! Don\u0026rsquo;t remember what they are? Ask your friendly neighborhood Matt/Jack.\u003c/p\u003e\n\u003cp\u003eRecall, a \u003cstrong\u003ematrix\u003c/strong\u003e is how you can transform a \u003cstrong\u003evector\u003c/strong\u003e from one space to another. Turns out, the brunt of everything you will be doing involves asking SGD to move a bunch of matricies around (like we did before!) such that our input vector(s) gets mapped to the right place.\u003c/p\u003e\n\u003cp\u003eA \u003cstrong\u003ematrix\u003c/strong\u003e, in neural network world, is referred to as a \u003cstrong\u003elinear layer\u003c/strong\u003e. It holds a whole \u003cem\u003eseries\u003c/em\u003e of neurons, taking every single value of the input into account to producing a whole set of output. Because of this property, it is considered a \u003cstrong\u003efully connected layer\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s create such a fully-connected layer (matrix) in PyTorch! When you ask PyTorch to make a matrix for you, you use the \u003ccode\u003enn\u003c/code\u003e sublibrary which we imported before. Furthermore, and this is confusing for many people who have worked with matricies before, you specify the \u003cstrong\u003einput dimension first\u003c/strong\u003e.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_matrix_var_1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_matrix_var_1\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLinear(in_features=3, out_features=2, bias=True)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003ccode\u003emy_matrix_var_1\u003c/code\u003e is a linear map from three dimensions to two dimensions; it will take a vector of three things as input and spit out a vector of two.\u003c/p\u003e\n\u003cp\u003eNote! Although \u003ccode\u003emy_matrix_var_1\u003c/code\u003e \u003cem\u003eis\u003c/em\u003e a tensor under the hood just like \u003ccode\u003evar_1\u003c/code\u003e, we 1) didn\u0026rsquo;t have to set default values for it 2) didn\u0026rsquo;t have to mark it as \u003ccode\u003erequires_grad\u003c/code\u003e. This is because, unlike a raw Tensor which often does not require to be changed (such as, for instance, the input value, which you can\u0026rsquo;t change), a matrix is basically ALWAYS a tensor that encodes the \u003cstrong\u003eweights\u003c/strong\u003e of a model we are working with\u0026mdash;so it is always going to be something that we will ask SGD to change on our behalf.\u003c/p\u003e\n\u003cp\u003eSo, since you are asking SGD to change it anyways, PyTorch just filled a bunch of random numbers in for you and set \u003ccode\u003erequires_grad\u003c/code\u003e on for you to \u003ccode\u003emy_matrix_var_1\u003c/code\u003e. If you want to see the actual underlying tensor, you can:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_matrix_var_1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eweight\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eParameter containing:\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([[-0.2634, 0.3729, 0.5019],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 0.2796, 0.5425, -0.4337]], requires_grad=True)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAs you can see, we have indeed what we expect: a tensor containing a \\(2\\times 3\\) matrix with \u003ccode\u003erequires_grad\u003c/code\u003e on filled with random values.\u003c/p\u003e\n\u003cp\u003eHow do we actually optimize over this tensor? You can do all the shenanigans we did before and pass \u003ccode\u003emy_matrix_var_1\u003c/code\u003e to SGD, but this will \u003cem\u003equickly\u003c/em\u003e get unwieldy as you have more parameters. Remember how we had to give SVG a list of EVERYTHING it had to keep track of? \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e was simple enough, but what if we had to do \u003ccode\u003evar_1.weight\u003c/code\u003e, \u003ccode\u003evar_2.weight\u003c/code\u003e, \u003ccode\u003evar_3.weight\u003c/code\u003e\u0026hellip; \u0026hellip; \u0026hellip; \u003cem\u003ead nausium\u003c/em\u003e for every parameter we use on our large graph? GPT3 has 1.5 billion parameters. Do you really want to type that?\u003c/p\u003e\n\u003cp\u003eNo.\u003c/p\u003e\n\u003cp\u003eThere is, of course, a better way.\u003c/p\u003e\n\u003ch3 id=\"nn-dot-module\"\u003e\u003ccode\u003enn.Module\u003c/code\u003e\u003c/h3\u003e\n\u003cp\u003eThis, by the way, is the standard of how a Neural Network is properly built from now on until the industry moves on from PyTorch. You will want to remember this.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s replicate the example of our previous 3=\u0026gt;2 dimensional linear map, but with a whole lot more code.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eclass\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eMyNetwork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eModule\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e__init__\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# important: runs early calls to make sure that\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# the module is correct\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esuper\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e__init__\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we declare our layers. we will use them below\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# this is a special function that you don\u0026#39;t actually call\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# manually, but as you use this module Torch will call\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# on your behalf. It passes the input through to the layers\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# of your network.\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eforward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we want to pass whatever input we get, named x\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# through to every layer. right now there is only\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# one fully-connected layer\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWhat this does, behind the scenes, is to wrap our matrix and all of its parameters into one giant \u003cstrong\u003emodule\u003c/strong\u003e. (NOTE! This is PyTorch-specific language. Unlike all other vocab before, this term is specific to PyTorch.) A module is an operation on tensors which can retain gradients (i.e. it can change, i.e. \u003ccode\u003erequires_grad=True\u003c/code\u003e).\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s see it in action. Recall that our matrix takes a vector of 3 things as input, and spits out a vector of 2 things. So let\u0026rsquo;s make a vector of three things:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ethree_vector\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ethree_vector\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([1., 2., 3.])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eBy the way, notice the period I\u0026rsquo;m putting after numbers here? That\u0026rsquo;s a shorthand for \u003ccode\u003e.0\u003c/code\u003e. So \u003ccode\u003e3.0 = 3.\u003c/code\u003e. I want to take this opportunity to remind you that the tensor operations all take FLOATING POINT tensors as input, because the matrices themselves as initialized with random floating points.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s get an instance of the new \u003ccode\u003eMyNetwork\u003c/code\u003e module.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eMyNetwork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMyNetwork(\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e (m1): Linear(in_features=3, out_features=2, bias=True)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd apply this operation we designed to our three-vector!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ethree_vector\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([0.3850, 1.4120], grad_fn=\u0026lt;AddBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWoah! It mapped our vector tensor in three dimensions to a vector tensor in two!\u003c/p\u003e\n\u003cp\u003eThe above code, by the way, is how we actually use our model to run \u003cstrong\u003epredictions\u003c/strong\u003e: \u003ccode\u003emy_network\u003c/code\u003e is \u003cem\u003etransforming\u003c/em\u003e the input vector to the desired output vector.\u003c/p\u003e\n\u003cp\u003eCool. This may not seem all that amazing to you\u0026hellip; yet. But, remember, we can encode \u003cem\u003eany number\u003c/em\u003e of matrix operations in our \u003ccode\u003eforward()\u003c/code\u003e function above. Let\u0026rsquo;s design another module that uses two matricies\u0026mdash;or two \u003cstrong\u003efully-connected layers\u003c/strong\u003e, or \u003cstrong\u003elayers\u003c/strong\u003e for short (when we don\u0026rsquo;t specify what kind of layer it is, it is fully connected)\u0026mdash;to perform a transformation.\u003c/p\u003e\n\u003cp\u003eWe will transform a vector from 3 dimensions to 2 dimensions, then from 2 dimensions to 5 dimensions:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eclass\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eMyNetwork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eModule\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e__init__\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# important: runs early calls to make sure that\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# the module is correct\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esuper\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e__init__\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we declare our layers. we will use them below\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# this is a special function that you don\u0026#39;t actually call\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# manually, but as you use this module Torch will call\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# on your behalf. It passes the input through to the layers\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# of your network.\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eforward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we want to pass whatever input we get, named x\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# through to every layer. right now there is only\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# one fully-connected layer\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eOf course, this network topology is kind of randomly tossed into the network.\u003c/p\u003e\n\u003cp\u003eDoing everything else we did before again, we should end up a vector in 5 dimensions, having been transformed twice behind the scenes!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eMyNetwork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMyNetwork(\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e (m1): Linear(in_features=3, out_features=2, bias=True)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e (m2): Linear(in_features=2, out_features=5, bias=True)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd apply this operation we designed to our three-vector!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ethree_vector\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([ 0.8241, -0.1014, 0.2940, -0.2019, 0.6749], grad_fn=\u0026lt;AddBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNice.\u003c/p\u003e\n\u003cp\u003eAnd here\u0026rsquo;s the magical thing: when we are asking SGD to optimize this network, instead of needing to pass every darn parameter used in this network into SVG, we can just pass in:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eparameters\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u0026lt;generator object Module.parameters at 0x115214270\u0026gt;\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThis is actually a list of every single \u003ccode\u003etensor\u003c/code\u003e that has \u003ccode\u003erequires_grad=True\u003c/code\u003e that we secretly created. No more typing out a list of every parameter to SGD like we did with \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e! We will see this in action shortly.\u003c/p\u003e\n\u003ch3 id=\"how-to-train-your-neural-network\"\u003eHow to Train Your \u003cdel\u003eDragon\u003c/del\u003e Neural Network\u003c/h3\u003e\n\u003cp\u003eNote, the \u003ccode\u003eMyNetwork\u003c/code\u003e transformation is currently kind of useless. We know it maps the vector \u003ccode\u003e[1,2,3]\u003c/code\u003e to some arbitrary numbers above (i.e. \u003ccode\u003e0.8241\u003c/code\u003e an such). That\u0026rsquo;s quite lame.\u003c/p\u003e\n\u003cp\u003eWe want our network to model some relationship between numbers, that\u0026rsquo;s why we are here. Let\u0026rsquo;s, arbitrarily and for fun, ask SGD to update \u003ccode\u003emy_network\u003c/code\u003e such that it will return \u003ccode\u003e[1,2,3,4,5]\u003c/code\u003e given \u003ccode\u003e[1,2,3]\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eBy the way, from here on, I will use \u003ccode\u003eMyNetwork\u003c/code\u003e to refer to the model 3=\u0026gt;2=\u0026gt;5 network we made above generally, and \u003ccode\u003emy_network\u003c/code\u003e the specific \u003cem\u003einstantiation\u003c/em\u003e of \u003ccode\u003eMyNetwork\u003c/code\u003e whose parameters we will ask SGD to update.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s get a clean copy of \u003ccode\u003eMyNetwork\u003c/code\u003e first:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eMyNetwork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMyNetwork(\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e (m1): Linear(in_features=3, out_features=2, bias=True)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e (m2): Linear(in_features=2, out_features=5, bias=True)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd, let\u0026rsquo;s create a \u003cem\u003estatic\u003c/em\u003e (i.e. SGD cannot change it) input and output vector pair which we will pass into our operation:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_input\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_desired_output\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_input\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_desired_output\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(tensor([1., 2., 3.]), tensor([1., 2., 3., 4., 5.]))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will pass our input through the \u003ccode\u003emy_network\u003c/code\u003e operation, and figure out what our inputs currently map to:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network_output\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_input\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network_output\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([-1.4672, -0.7089, -0.2645, -0.0598, 0.1239], grad_fn=\u0026lt;AddBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAh, clearly not \u003ccode\u003e[1,2,3,4,5]\u003c/code\u003e. Recall we want these values to be the same as \u003ccode\u003emy_output\u003c/code\u003e, which they isn\u0026rsquo;t doing right now. Let\u0026rsquo;s fix that.\u003c/p\u003e\n\u003cp\u003eCan you guess what loss function we will use? \u0026hellip; That\u0026rsquo;s right, the same exact thing as before! Squaring the difference.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_network_output\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_desired_output\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([ 6.0869, 7.3380, 10.6571, 16.4821, 23.7766], grad_fn=\u0026lt;PowBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWaiiiit. There\u0026rsquo;s a problem. Remember, SGD can take a single latent value to \\(0\\). That\u0026rsquo;s a whole lotta latent values in a vector! Which one will it take to \\(0\\)? Stop to think about this for a bit: we \u003cem\u003ewant\u003c/em\u003e to take all of these values to \\(0\\), but we can take only a single value to \\(0\\) with SGD. How can we do it?\u003c/p\u003e\n\u003cp\u003eTo do this, we just\u0026hellip; add the values up using the \u003ccode\u003etorch.sum\u003c/code\u003e function!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_network_output\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_desired_output\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor(64.3406, grad_fn=\u0026lt;SumBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNice. We now have something to optimize against, let\u0026rsquo;s actually create our optimizer! Remember that, instead of passing in every single parameter we want PyTorch to change manually, we just pass in \u003ccode\u003emy_network.parameters()\u003c/code\u003e and PyTorch will scan for every single parameter that lives in \u003ccode\u003eMyNetwork\u003c/code\u003e and give it all to SGD:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSGD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eparameters\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1e-6\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSGD (\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eParameter Group 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e dampening: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e differentiable: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e foreach: None\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e lr: 1e-06\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e maximize: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e momentum: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nesterov: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e weight_decay: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eJust for running this model, we are going to run our network with more steps (\\(50,000\\)), but with smaller step sizes (\\(1 \\times 10^{-6}\\)). We will not worry about it too much for now, and dive into discussing it further for network parameter tuning.\u003c/p\u003e\n\u003cp\u003eSo, let\u0026rsquo;s make the actual training loop now that will take the latent variable named \u003ccode\u003emy_network_output\u003c/code\u003e, created by applying \u003ccode\u003emy_network\u003c/code\u003e on \u003ccode\u003emy_input\u003c/code\u003e, to take on the value of \u003ccode\u003emy_desired_output\u003c/code\u003e! Can you do it without looking? This will be \u003cem\u003ealmost\u003c/em\u003e the same as our first training loop, except we are asking our network to calculate the current latent output (instead of computing it from scratch each time.)\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e50000\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# calculate new latent variable\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emy_network_output\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_input\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# calculate loss\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_network_output\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_desired_output\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# Backprop!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# Optimize!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# Reset!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_input\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([-0.9814, 0.4252, 1.8085, 2.7022, 3.5517], grad_fn=\u0026lt;AddBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNot great! But\u0026mdash;we are both \u003cem\u003eordered\u003c/em\u003e correctly and \u0026mdash; if you just kept running this loop, we will eventually \u003cstrong\u003econverge\u003c/strong\u003e (arrive at) the right answer! For kicks, let\u0026rsquo;s run it \\(50000\\) more times:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e50000\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# calculate new latent variable\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emy_network_output\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_input\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# calculate loss\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_network_output\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_desired_output\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# Backprop!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# Optimize!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# Reset!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_input\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([0.9975, 1.9986, 3.0006, 4.0026, 5.0052], grad_fn=\u0026lt;AddBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWould you look at that! What did I promise you :)\u003c/p\u003e\n\u003cp\u003eYour network \u003cem\u003elearned\u003c/em\u003e something! Specifically, the skill of mapping \\([1,2,3]\\) to \\([1,2,3,4,5]\\)! Congrats!\u003c/p\u003e\n\u003ch2 id=\"challenge\"\u003eChallenge\u003c/h2\u003e\n\u003cp\u003eNow that you know how to get the network to map a specific vector in three dimensions to a specific place in five dimensions, can you do that more generally? Can you generate and give your own network enough examples such that it will learn to do that for ALL vectors in three dimensions?\u003c/p\u003e\n\u003cp\u003eSpecifically, generate a training set of in python and train your neural network now to perform the following operation:\u003c/p\u003e\n\u003cp\u003eGiven a vector \\([a,b,c]\\), return \\([a,b,c,c+1,c+2]\\), for every integer \\([a,b,c]\\).\u003c/p\u003e\n\u003cp\u003eHint: pass in many examples for correct behavior sequentially during each of your training loops, calculating loss and running the \u003cstrong\u003eoptimization step\u003c/strong\u003e (i.e. back! optimize! reset!) after each example you give.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaml_dipping_into_pytorch/","tags":["writing","aml"],"title":"AML: Dipping into PyTorch"},{"categories":null,"contents":"You are no doubt familiar with the Iris dataset: a dataset containing flower pedal shapes and their corresponding sub-type of Iris flower: Setosa, Versicolour, and Virginica.\nWe are going to take those pedal measurements, and predict the type of Iris we are looking at!\nLet\u0026rsquo;s get the Iris dataset first. Turns out, Scikit Learn (your old friend from last semester) ships a copy of the Iris dataset with itself. So, we will load the dataset from it.\nLet\u0026rsquo;s first import what we need:\nimport torch import torch.nn as nn import sklearn from sklearn import datasets import pandas as pd Excellent. To load the built-in Iris dataset from sklearn, we can:\n# load iris iris = datasets.load_iris() # put input features into a dataframe df = pd.DataFrame(data=iris.data, columns=iris.feature_names) # add targets column from iris data df[\u0026#34;target\u0026#34;] = iris.target df sepal length (cm) sepal width (cm) ... petal width (cm) target 0 5.1 3.5 ... 0.2 0 1 4.9 3.0 ... 0.2 0 2 4.7 3.2 ... 0.2 0 3 4.6 3.1 ... 0.2 0 4 5.0 3.6 ... 0.2 0 .. ... ... ... ... ... 145 6.7 3.0 ... 2.3 2 146 6.3 2.5 ... 1.9 2 147 6.5 3.0 ... 2.0 2 148 6.2 3.4 ... 2.3 2 149 5.9 3.0 ... 1.8 2 [150 rows x 5 columns] You can imagine that this dataset could have been loaded from a CSV, etc.\nJust to recap, here are the columns of this dataset:\ndf.columns Index([\u0026#39;sepal length (cm)\u0026#39;, \u0026#39;sepal width (cm)\u0026#39;, \u0026#39;petal length (cm)\u0026#39;, \u0026#39;petal width (cm)\u0026#39;, \u0026#39;target\u0026#39;], dtype=\u0026#39;object\u0026#39;) Now, pause. Let\u0026rsquo;s think about two questions from last semester:\nWhat type of ML problem is this? (Classification? Regression? Clustering?) Before any engineering: How many input features are there? How many output features? \u0026hellip;\n\u0026hellip;\nWhat type of ML problem is this? Classification Before any engineering: 4 input features, 1 output feature Awesome. Let\u0026rsquo;s inspect this dataset again:\ndf sepal length (cm) sepal width (cm) ... petal width (cm) target 0 5.1 3.5 ... 0.2 0 1 4.9 3.0 ... 0.2 0 2 4.7 3.2 ... 0.2 0 3 4.6 3.1 ... 0.2 0 4 5.0 3.6 ... 0.2 0 .. ... ... ... ... ... 145 6.7 3.0 ... 2.3 2 146 6.3 2.5 ... 1.9 2 147 6.5 3.0 ... 2.0 2 148 6.2 3.4 ... 2.3 2 149 5.9 3.0 ... 1.8 2 [150 rows x 5 columns] You will notice that the targets are not shuffled. If we fit this into our neural network, it will overfit\u0026mdash;memorize output without generalization\u0026mdash;to one target, then to another, etc.\nSo first, let\u0026rsquo;s shuffle this table. To do so, we will simply ask Pandas to resample \\(100\\%\\) of the dataset; it will do this sampling randomly:\ndf = df.sample(frac=1) df sepal length (cm) sepal width (cm) ... petal width (cm) target 49 5.0 3.3 ... 0.2 0 93 5.0 2.3 ... 1.0 1 50 7.0 3.2 ... 1.4 1 145 6.7 3.0 ... 2.3 2 14 5.8 4.0 ... 0.2 0 .. ... ... ... ... ... 48 5.3 3.7 ... 0.2 0 91 6.1 3.0 ... 1.4 1 45 4.8 3.0 ... 0.3 0 131 7.9 3.8 ... 2.0 2 5 5.4 3.9 ... 0.4 0 [150 rows x 5 columns] You will note, however, that the indicies are reshuffled as well! This is actually Pandas being helpful\u0026mdash;allowing us to unshuffle the dataset if needed. But, we actually have no need to do this.\n","html":"\u003cp\u003eYou are no doubt familiar with the Iris dataset: a dataset containing flower pedal shapes and their corresponding sub-type of Iris flower: Setosa, Versicolour, and Virginica.\u003c/p\u003e\n\u003cp\u003eWe are going to take those pedal measurements, and predict the type of Iris we are looking at!\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s get the Iris dataset first. Turns out, Scikit Learn (your old friend from last semester) ships a copy of the Iris dataset with itself. So, we will load the dataset from it.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s first import what we need:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch.nn\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatasets\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epandas\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. To load the built-in Iris dataset from sklearn, we can:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# load iris\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eiris\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatasets\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# put input features into a dataframe\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDataFrame\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiris\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiris\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efeature_names\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# add targets column from iris data\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;target\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eiris\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etarget\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e sepal length (cm) sepal width (cm) ... petal width (cm) target\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 4.9 3.0 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 4.6 3.1 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 5.0 3.6 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 ... 2.3 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 6.3 2.5 ... 1.9 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 6.5 3.0 ... 2.0 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 6.2 3.4 ... 2.3 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 5.9 3.0 ... 1.8 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 5 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou can imagine that this dataset could have been loaded from a CSV, etc.\u003c/p\u003e\n\u003cp\u003eJust to recap, here are the columns of this dataset:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eIndex([\u0026#39;sepal length (cm)\u0026#39;, \u0026#39;sepal width (cm)\u0026#39;, \u0026#39;petal length (cm)\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;petal width (cm)\u0026#39;, \u0026#39;target\u0026#39;],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e dtype=\u0026#39;object\u0026#39;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, pause. Let\u0026rsquo;s think about two questions from last semester:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eWhat type of ML problem is this? (Classification? Regression? Clustering?)\u003c/li\u003e\n\u003cli\u003eBefore any engineering: How many \u003cstrong\u003einput\u003c/strong\u003e features are there? How many \u003cstrong\u003eoutput\u003c/strong\u003e features?\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003cp\u003e\u0026hellip;\u003c/p\u003e\n\u003cp\u003e\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eWhat type of ML problem is this? \u003cem\u003eClassification\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003eBefore any engineering: \u003cem\u003e4 input features\u003c/em\u003e, \u003cem\u003e1 output feature\u003c/em\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAwesome. Let\u0026rsquo;s inspect this dataset again:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e sepal length (cm) sepal width (cm) ... petal width (cm) target\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 4.9 3.0 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 4.6 3.1 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 5.0 3.6 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 ... 2.3 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 6.3 2.5 ... 1.9 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 6.5 3.0 ... 2.0 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 6.2 3.4 ... 2.3 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 5.9 3.0 ... 1.8 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 5 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou will notice that the targets are \u003cem\u003enot shuffled\u003c/em\u003e. If we fit this into our neural network, it will \u003cstrong\u003eoverfit\u003c/strong\u003e\u0026mdash;memorize output without generalization\u0026mdash;to one target, then to another, etc.\u003c/p\u003e\n\u003cp\u003eSo first, let\u0026rsquo;s shuffle this table. To do so, we will simply ask Pandas to resample \\(100\\%\\) of the dataset; it will do this sampling randomly:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esample\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efrac\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e sepal length (cm) sepal width (cm) ... petal width (cm) target\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e49 5.0 3.3 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e93 5.0 2.3 ... 1.0 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e50 7.0 3.2 ... 1.4 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 ... 2.3 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e14 5.8 4.0 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e48 5.3 3.7 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e91 6.1 3.0 ... 1.4 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e45 4.8 3.0 ... 0.3 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e131 7.9 3.8 ... 2.0 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 5.4 3.9 ... 0.4 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 5 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou will note, however, that the indicies are reshuffled as well! This is actually Pandas being helpful\u0026mdash;allowing us to unshuffle the dataset if needed. But, we actually have no need to do this.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaml_iris_strikes_bath/","tags":null,"title":"AML: Iris Strikes Back"},{"categories":null,"contents":"Hello everyone! It\u0026rsquo;s April, which means we are ready again for a new unit. Let\u0026rsquo;s dive in.\nYou know what\u0026rsquo;s better than one neural network? TWO!!! Multi-modal approaches\u0026mdash;making two neural networks interact for a certain result\u0026mdash;dominate many of the current edge of neural network research. In this unit, we are going to introduce one such approach, Generative Adversarial Networks (GAN), but leave you with some food for thought for other possibilities for what training multiple networks together can do.\nBe aware that this unit will begin our more theory-focused discussions, and will leave more of the implementation up to your own explorations or a later fuller example. If you don\u0026rsquo;t understand the math or the theory, please do flag us down in class or out to get things clarified.\nMotivation Although we will provide motivations for the architecture of a GAN in a bit, let\u0026rsquo;s first provide a problem to ground ourselves.\nSay we want to build a neural network to generate pictures of mountain goats. How would you do that?\nYou can\u0026rsquo;t build a supervised model exactly: what\u0026rsquo;s the input, and what are the labels? No clear answer. Even if you have labels, you\u0026rsquo;d have infinitely many possible such mountain goats; how do you generate labels for all of those?\nTo help us in solving this problem, let us make a few related claims that may seem unmotivated for now:\nIt is easy to find images of mountain goats [citation needed] It is eas(ier) to train a model to classify if an image is a mountain goat or not It is easy to generate random noise We want more pictures of mountain goats because they are cool It maybe unclear how 1-3 help us solve the mountain-goat generation problem; to explain why they are all crucial, we have to first understand some hand wavy game theory.\n(Better) Motivation It\u0026rsquo;s storytime!\nAl Capone and Eliot Ness are playing a game. Al is trying to create counterfeit Franklins, and Eliot is trying to catch them out of circulation.\nAl first uses his HP Inkjet printer to print the currency. Eliot quickly wises up and uses a microscope to observe whether or not a piece of money in question is printed by ink or via color pigmented paper. Not wishing to foil his plan, Al asks his lab to develop new color pigmentation technology\u0026mdash;just like the US currency does!\nYet, Eliot outsmarts Al again\u0026mdash;he uses a spectrophotometer to analyze whether or not the money in question is made using paper or on cotton like the actual US currency. Seeing this, an angry Al purchases a tonne of cotton and starts printing his counterfeits on cotton.\nWanting to satisfy Jack\u0026rsquo;s uselessly long analogy, Doctor Strange comes and freezes time for everyone except Al and Eliot (and their respective teams). As the true US currency technology remains the same, Eliot and Al continue to play this game: both developing better technologies to make or catch counterfeits.\nAfter a billion years, Doctor Strange gets bored and looked into his frozen world. What does he see?\nAl Capone built an exact replica of the US Mint.\nWhy? Each time Al gets caught out by Eliot, Al learns one more aspect of how his counterfeit differs from actual US currency. In effect, he\u0026rsquo;s also learning one new detail of how the US currency is made. Therefore, if he keeps patching these tiny differences that Eliot helpfully pointed out for him for the span of a billion years, what Al will be producing will eventually be indistinguishable from US dollars as Eliot will be out of things to point out!\nAt this point, the Capone-Ness system has reached what we call Nash equilibrium: neither Eliot nor Al have a better move to make\u0026mdash;Eliot no longer has anything more he can possibly do to catch counterfeits as Al\u0026rsquo;s money is identical to US currency, and Al can no longer change his formula for money-making as any deviation will create another factor Eliot can latch onto.\nGANs A Generative Adversarial Network (GAN) is a multi-modal generation model.\nIt is made out of two interacting neural networks:\ngenerator \\(G(x)\\): Al Capone discriminator \\(D(x)\\): Eliot Ness Specifically, the generator is an unsupervised model trained on the task of generating the targets (\u0026ldquo;images of mountain goats\u0026rdquo;) from random noise, while the discriminator is a self-supervised model trained on the task of classifying whether or not something is actually the target (\u0026ldquo;actual images of mountain goats\u0026rdquo;) or the output of the generator.\nThe two models converge in tandem, in a similar fashion to the story discribed above.\nDiscriminator \\(D(x)\\) The discriminator \\(D(x)\\) is perhaps the more easily understandable model out of the two. It is a self-supervised model designed with the task of discriminating whether or not a particular input came from the actual world (\u0026ldquo;goat images\u0026rdquo;) or was the output of the generator.\nSpecifically, the discriminator is a neural network with any middle layers you\u0026rsquo;d like that takes the output of the generator or real images as input, and produces a single sigmoid activated feature (between 0-1) where \\(0\\) represents \u0026ldquo;definitely produced by generator\u0026rdquo; and \\(1\\) represents \u0026ldquo;definitely real world.\u0026rdquo;\nGenerator \\(G(x)\\) The generator \\(G(x)\\) is a model that takes a random tensor as input and attempts to produce a generated sample (\u0026ldquo;a picture of a goat\u0026rdquo;). As with the discriminator, it can have any middle layers you\u0026rsquo;d like but has to produce a tensor with the same shape and activation of an actual sample. For instance, if you are trying to produce images, the output of your generator has to be of shape \\((channels, x, y)\\) activated with sigmoid for brightness; if you are trying to produce single scalars, then the generator has to produce only value, etc.\nIt is perhaps very mystifying how we would ever build a magical box that takes a random tensor and turn it into a pretend image; looking at the loss functions (i.e. training objectives) of these two networks may perhaps help clarify this.\nLoss Functions Before we begin, I want to quickly reiterate something which will be crucial to your mental framework of the loss functions: THEY ARE NOT METRICS. The value of the loss functions\u0026mdash;especially these ones\u0026mdash;are now completely devoid of physical meaning; instead, the trend of the loss functions (\u0026ldquo;value goes down means model is doing better\u0026rdquo;) is what matters.\nWe are introducing the simplest form of GAN loss functions by Goodfellow, et al called \u0026ldquo;non-saturating loss.\u0026rdquo; There are better ones, but these ones are mathematically elegant and works most of the time\u0026mdash;and are the \u0026ldquo;base case\u0026rdquo; loss functions which other models improve on.\nDiscriminator Loss \\begin{equation} L_{d} (\\bold{x}_{i}, \\bold{z}_{i}) = -\\log D(\\bold{x}_{i}) - \\log (1- D(G(\\bold{z}_{i}))) \\end{equation}\nwhere, \\(\\bold{x}_{i}\\) is a tensor representing a real sample (for instance, again, an actual grid of pixels for a mountain goat image), and \\(\\bold{z}_{i}\\) is a tensor containing random noise.\nWoof. This is quite a scary loss function; let\u0026rsquo;s break it up into pieces.\n\\(-\\log D(\\bold{x}_{i})\\): \\(\\bold{x}_{i}\\) is a real sample, so we expect \\(D\\) to produce \\(1\\). Any value below \\(1\\) (i.e. the discriminator thinking a real image is generated) will produce negative values of increasingly larger magnitude as \\(D(\\bold{x}_{i})\\) approaches \\(0\\). If the discriminator produces \\(1\\) correctly, \\(\\log 1 = 0\\) and we indeed have converged.\n\\(-\\log (1- D(G(\\bold{z}_{i})))\\): on the flip side, we expect the generator to consider the output of the generator (i.e. \\(D(G(\\bold{z}_{i}))\\)) to be generated and produce \\(0\\). Therefore, we expect the same scheme as before but flipped (\\(1-D(G(\\bold{z}_{i})\\))\u0026mdash;if \\(D(G(\\bold{z}))\\) produces \\(1\\) (\u0026ldquo;the discriminator is fooled\u0026rdquo;), \\(1-D(G(\\bold{z}))\\) will produce \\(0\\) and the loss will be very high. Vise versa: if \\(D(G(\\bold{z}))\\) produces \\(0\\) (\u0026ldquo;the discriminator picked out the fake\u0026rdquo;), the loss will be \\(0\\).\nAdding the two values encourages our discriminator to both classify real samples as real \\(1\\), and generated samples as fake \\(0\\).\nGenerator Loss \\begin{equation} L_{g}(\\bold{z}_{i}) = -\\log (D(G(\\bold{z}_{i}))) \\end{equation}\nThe sharp-eyed among you may realize that this is just the right term from the above expression without the \\(1-\\) negation. Indeed, the training target for the generator is very simple: \u0026ldquo;did I fool the discriminator\u0026rdquo;: if \\(D\\) produces a large (close to \\(1\\)) output on the generated result\u0026mdash;indicating that it is indeed \u0026ldquo;fooled\u0026rdquo;\u0026mdash;our \\(log\\) will approach \\(0\\); whereas, if \\(D\\) produces a small (close to \\(0\\)) output on the generated result\u0026mdash;indicating that it correctly spotted the fake\u0026mdash;our \\(log\\) will produce a very negative value which creates high loss.\nThe GAN Training Loop Loss functions in place, we are almost ready to make the model. The thing that\u0026rsquo;s tricky about training a GAN is that we have to ensure that both the discriminator and generator are converging at the same exact time: ensuring that neither Capone nor Ness has dramatically better technology than the other. This requires a little bit of finesse on your part in terms of the training loop. Plus, our loss functions here are quite special, so their definitions will also need a little wrangling.\nAt this point, though, I hope we are all pretty confident in how to structure the basics of a ML model. Instead of going over that again, let\u0026rsquo;s go over some of the differences in Python pseudo-code (code that doesn\u0026rsquo;t run, but to illustrate how you would write it)\u0026mdash;specially in four focus areas.\nDataprep Just a short note here on GAN data prep. What\u0026rsquo;s the special thing about GANs? They are self-supervised\u0026mdash;meaning they make their own labels. Instead, all you need to provide is plenty of examples of the thing you want your model to generate.\nAs such, your batch wouldn\u0026rsquo;t contain x_data, y_data, etc. Instead, your dataset code should look something of the flavor:\nimage_grid = example_data_for_the_gan_numpy() dataset = TensorDataset(torch.tensor(image_grid).float()) # only one argument! dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True) You will notice that the TensorDataset here took only one argument as input, as opposed to the usual 2: this is, as we discussed before, as product of the fact that our GAN only needs examples of the thing you want it to generate\u0026mdash;no labels needed (or possible!)\nNetwork Construction Of course, a GAN consists of two different networks. Though the network construction is mostly arbitrary, there are some general constraints:\ngenerator input shape: arbitrary, but takes exclusively random values as input; ideally you want this to be the same number of dimensions as the output output shape: the output shape of your network has to be the shape of one sample of the real data as the generator should generate something that looks like real data output activation: whatever makes sense for the real data: if probabilities, then softmax; if images, then sigmoid (as normalized brightness), etc. discriminator input shape: the output shape of the generator, or the shape of one real sample of data. (Thinking Break: WHY? as usual, pause and chat) output shape: (batch_size, 1). We want to output a scalar between \\(0\\) (\u0026ldquo;probably fake\u0026rdquo;) and \\(1\\) (\u0026ldquo;probably real\u0026rdquo;) for every sample output activation: sigmoid to get those values actually between \\(0\\) and \\(1\\) Network Initialization Because the generator and discriminator are two different networks, they require different optimizers!\nSo, we have to go about making them. This is fortunately pretty direct:\n# initialize networks gen = GeneratorNetwork() disc = DiscriminatorNetwork() # initalize *two seperate optimizers* gen_optim = Adam(gen.parameters(), lr=LR1) disc_optim = Adam(disc.parameters(), lr=LR2) Nothing out of the ordinary here, but a worthy reminder that you need 2. This will become important shortly.\nTraining Loop This is the main event, and probably the bit that most people trip up the most: the training loop. Let\u0026rsquo;s see a pseudocode implementation of one, and we will discuss how its structured.\nNote that we will be making some adjustments to our tried-and-true backprop logic.\nfor _ in range(EPOCHS): for batch in iter(dataloader): # train generator first disc_score = disc(gen(torch.rand(BATCH_SIZE,YOUR,INPUT,SHAPE,HERE))) # compute + backprop generator loss generator_loss = (-torch.log(disc_score)) generator_loss.backward() # disconnect discriminator gradients disc_optim.zero_grad() # step and clear gen_optim.step() gen_optim.zero_grad() # now, train discriminator disc_score_false = disc(gen(torch.rand(BATCH_SIZE,YOUR,INPUT,SHAPE,HERE)).detach()) disc_score_true = disc(batch) # compute + backprop discriminator loss discriminator_loss = (-torch.log(disc_score_true)-torch.log(1-disc_score_false)) discriminator_loss.backward() # step and clear disc_optim.step() disc_optim.zero_grad() Woweee. Much to talk about. Let\u0026rsquo;s break it down.\nScoring on fake sample We first generate a fake sample from the generator by first passing it random noise from torch.rand, then passing its output to the discriminator to get a group of scores.\ndisc_score = disc(gen(torch.rand(BATCH_SIZE,YOUR,INPUT,SHAPE,HERE))) Calculating the generator loss Next up, we will calculate the generator loss on the score that the discriminator gave for that fake sample we generated earlier.\nRecall that:\n\\begin{equation} L_{g}(\\bold{z}_{i}) = -\\log (D(G(\\bold{z}_{i}))) \\end{equation}\nand hence:\ngenerator_loss = (-torch.log(disc_score)) Thinking break!: why does implementing (-torch.log(disc_score)) accomplish the same thing as taking \\(-\\log (D(G(\\bold{z}_{i})))\\)? Specifically, how is disc_score calculated in our example?\nThe generator backprop step For all that drilling we did of BACKPROP! STEP! RESET!, the next step may feel sacrilegious:\ngenerator_loss.backward() # disconnect discriminator gradients disc_optim.zero_grad() # step and clear gen_optim.step() gen_optim.zero_grad() What is happening here? Let\u0026rsquo;s take it one step at a time.\nFirst, we call generator_loss.backward() to backprop the loss; nothing wrong here. But then, against all odds, we call .zero_grad() on the discriminator optimizer. What gives?\nRecall that, in this case, we are training the generator; as the loss-function literally asks the discriminator to be wrong, we mustn\u0026rsquo;t be updating the discriminator using the gradients computed against this function; instead, we simply want the generator to be updated to better fool the discriminator.\nTherefore, we immediately zero out all the gradients on the discriminator to prevent this step from updating the discriminator with the \u0026ldquo;fooling\u0026rdquo; loss function; and proceed to update the generator weights as usual.\nScoring on detached fake sample and real sample Next up, training the discriminator. We first obtain scores from the discriminator for a real sample and a fake sample separately:\ndisc_score_false = disc(gen(torch.rand(BATCH_SIZE,YOUR,INPUT,SHAPE,HERE)).detach()) disc_score_true = disc(batch) You should notice that the code here for obtaining the fake sample is almost identical to the one before; except, we are calling this .detach() against the generator output. This is very functionally similar to the \u0026ldquo;calling .zero_grad() immediately\u0026rdquo; move we made earlier; called .detach() asks PyTorch to treat whatever tensor there as a constant, and not propagate gradients any more backwards into the generator, which in this case we do not want to change as we are optimizing the discriminator.\nCalculating the discriminator loss With all the pieces in place, this is again just a very directly implementation of:\n\\begin{equation} L_{d} (\\bold{x}_{i}, \\bold{z}_{i}) = -\\log D(\\bold{x}_{i}) - \\log (1- D(G(\\bold{z}_{i}))) \\end{equation}\nin code.\ndiscriminator_loss = (-torch.log(disc_score_true)-torch.log(1-disc_score_false)) Normal backprop Because we ran .detach() before on the generator output, the generator is treated as a constant through this second loss function; as such, our backpropegation step will normally update the discriminator\u0026rsquo;s weights without any fuss. We therefore go back to our tried-and-true formula:\ndiscriminator_loss.backward() disc_optim.step() disc_optim.zero_grad() Tada! That\u0026rsquo;s it; the GAN training loop.\nFinal Thoughts and Unit Challenge Sorry for the very theoretically dense unit; please don\u0026rsquo;t hesitate to flag us down if any questions take place. To leave you, here are a few final tips and tricks for making GANs.\nIf your model doesn\u0026rsquo;t work, try pretraining the discriminator: letting Eliot Ness get a bit of a head start by training the discriminator to recognize noise from real images; to do this, just don\u0026rsquo;t run the code that updates the generator weights. GANs are known to perform something called mode collapse: whereby, instead of reaching Nash equilibrium, one of the two networks crash while the other one completely converges. One attempt to solve this is something called Wassterstein Loss, which is discussed here (https://developers.google.com/machine-learning/gan/loss#wasserstein-loss). One important note, however, is that using this loss function makes your network technically not a GAN anymore (as the discriminator will not be actually usefully discriminating, instead acting as a \u0026ldquo;critic\u0026rdquo; for the generator only producing non-interpretable scores), but it has shown improved performance for the generator only. GANs are notoriously hard to make work. See this whole page from Google (https://developers.google.com/machine-learning/gan/loss) about the various ways GANs can fail and possible strategies to remedy them. Do not be scared if your model doesn\u0026rsquo;t work immediately or even after copious tuning. Ok, onto the challenge: make a GAN! There are two variants of this:\nEasier \u0026mdash; use a pair of dense neural networks to make a GAN to generate valid series of \\(5\\) numbers which we explored in the beginning of this class \\([a,b,c,c+1,c+2]\\) Harder \u0026mdash; use a pair of convolutional neural networks to make a GAN to generate these nice pictures of pets (https://thor.robots.ox.ac.uk/~vgg/data/pets/images.tar.gz). Sorry that this is not mountain goats: unfortunately, a dataset large enough is not available for this task :/ Good luck, and have fun!\n","html":"\u003cp\u003eHello everyone! It\u0026rsquo;s April, which means we are ready again for a new unit. Let\u0026rsquo;s dive in.\u003c/p\u003e\n\u003cp\u003eYou know what\u0026rsquo;s better than one neural network? TWO!!! Multi-modal approaches\u0026mdash;making two neural networks interact for a certain result\u0026mdash;dominate many of the current edge of neural network research. In this unit, we are going to introduce one such approach, \u003cstrong\u003eGenerative Adversarial Networks\u003c/strong\u003e (\u003cstrong\u003eGAN\u003c/strong\u003e), but leave you with some food for thought for other possibilities for what training multiple networks together can do.\u003c/p\u003e\n\u003cp\u003eBe aware that this unit will begin our more theory-focused discussions, and will leave more of the implementation up to your own explorations or a later fuller example. If you don\u0026rsquo;t understand the math or the theory, please do flag us down in class or out to get things clarified.\u003c/p\u003e\n\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003eAlthough we will provide motivations for the architecture of a \u003cstrong\u003eGAN\u003c/strong\u003e in a bit, let\u0026rsquo;s first provide a problem to ground ourselves.\u003c/p\u003e\n\u003cp\u003eSay we want to build a neural network to generate pictures of mountain goats. How would you do that?\u003c/p\u003e\n\u003cp\u003eYou can\u0026rsquo;t build a supervised model exactly: what\u0026rsquo;s the input, and what are the labels? No clear answer. Even if you have labels, you\u0026rsquo;d have infinitely many possible such mountain goats; how do you generate labels for all of those?\u003c/p\u003e\n\u003cp\u003eTo help us in solving this problem, let us make a few related claims that may seem unmotivated for now:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eIt is easy to find images of mountain goats \u003ccode\u003e[citation needed]\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eIt is eas(ier) to train a model to classify if an image is a mountain goat or not\u003c/li\u003e\n\u003cli\u003eIt is easy to generate random noise\u003c/li\u003e\n\u003cli\u003eWe want more pictures of mountain goats because they are cool\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eIt maybe unclear how \u003ccode\u003e1-3\u003c/code\u003e help us solve the mountain-goat generation problem; to explain why they are all crucial, we have to first understand some hand wavy game theory.\u003c/p\u003e\n\u003ch2 id=\"better--motivation\"\u003e(Better) Motivation\u003c/h2\u003e\n\u003cp\u003eIt\u0026rsquo;s storytime!\u003c/p\u003e\n\u003cp\u003eAl Capone and Eliot Ness are playing a game. Al is trying to create counterfeit Franklins, and Eliot is trying to catch them out of circulation.\u003c/p\u003e\n\u003cp\u003eAl first uses his HP Inkjet printer to print the currency. Eliot quickly wises up and uses a microscope to observe whether or not a piece of money in question is printed by ink or via color pigmented paper. Not wishing to foil his plan, Al asks his lab to develop new color pigmentation technology\u0026mdash;just like the US currency does!\u003c/p\u003e\n\u003cp\u003eYet, Eliot outsmarts Al again\u0026mdash;he uses a spectrophotometer to analyze whether or not the money in question is made using paper or on cotton like the actual US currency. Seeing this, an angry Al purchases a tonne of cotton and starts printing his counterfeits on cotton.\u003c/p\u003e\n\u003cp\u003eWanting to satisfy Jack\u0026rsquo;s uselessly long analogy, Doctor Strange comes and freezes time for everyone except Al and Eliot (and their respective teams). As the true US currency technology remains the same, Eliot and Al continue to play this game: both developing better technologies to make or catch counterfeits.\u003c/p\u003e\n\u003cp\u003eAfter a billion years, Doctor Strange gets bored and looked into his frozen world. What does he see?\u003c/p\u003e\n\u003cp\u003eAl Capone built an exact replica of the US Mint.\u003c/p\u003e\n\u003cp\u003eWhy? Each time Al gets caught out by Eliot, Al learns one more aspect of how his counterfeit differs from actual US currency. In effect, he\u0026rsquo;s also learning one new detail of how the US currency is made. Therefore, if he keeps patching these tiny differences that Eliot helpfully pointed out for him for the span of a billion years, what Al will be producing will eventually be indistinguishable from US dollars as Eliot will be out of things to point out!\u003c/p\u003e\n\u003cp\u003eAt this point, the Capone-Ness system has reached what we call \u003cstrong\u003eNash equilibrium\u003c/strong\u003e: neither Eliot nor Al have a better move to make\u0026mdash;Eliot no longer has anything more he can possibly do to catch counterfeits as Al\u0026rsquo;s money is identical to US currency, and Al can no longer change his formula for money-making as any deviation will create another factor Eliot can latch onto.\u003c/p\u003e\n\u003ch2 id=\"gans\"\u003eGANs\u003c/h2\u003e\n\u003cp\u003eA \u003cstrong\u003eGenerative Adversarial Network\u003c/strong\u003e (\u003cstrong\u003eGAN\u003c/strong\u003e) is a multi-modal generation model.\u003c/p\u003e\n\u003cp\u003eIt is made out of two interacting neural networks:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003egenerator\u003c/strong\u003e \\(G(x)\\): Al Capone\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ediscriminator\u003c/strong\u003e \\(D(x)\\): Eliot Ness\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSpecifically, the \u003cstrong\u003egenerator\u003c/strong\u003e is an unsupervised model trained on the task of generating the targets (\u0026ldquo;images of mountain goats\u0026rdquo;) from random noise, while the \u003cstrong\u003ediscriminator\u003c/strong\u003e is a \u003cstrong\u003eself-supervised model\u003c/strong\u003e trained on the task of classifying whether or not something is actually the target (\u0026ldquo;actual images of mountain goats\u0026rdquo;) or the output of the generator.\u003c/p\u003e\n\u003cp\u003eThe two models converge in tandem, in a similar fashion to the story discribed above.\u003c/p\u003e\n\u003ch3 id=\"discriminator-d--x\"\u003eDiscriminator \\(D(x)\\)\u003c/h3\u003e\n\u003cp\u003eThe \u003cstrong\u003ediscriminator\u003c/strong\u003e \\(D(x)\\) is perhaps the more easily understandable model out of the two. It is a \u003cstrong\u003eself-supervised model\u003c/strong\u003e designed with the task of discriminating whether or not a particular input came from the actual world (\u0026ldquo;goat images\u0026rdquo;) or was the output of the \u003cstrong\u003egenerator\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eSpecifically, the \u003cstrong\u003ediscriminator\u003c/strong\u003e is a neural network with any middle layers you\u0026rsquo;d like that takes the output of the \u003cstrong\u003egenerator\u003c/strong\u003e \u003cem\u003eor\u003c/em\u003e real images as input, and produces a single \u003ccode\u003esigmoid\u003c/code\u003e activated feature (between 0-1) where \\(0\\) represents \u0026ldquo;definitely produced by \u003cstrong\u003egenerator\u003c/strong\u003e\u0026rdquo; and \\(1\\) represents \u0026ldquo;definitely real world.\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"generator-g--x\"\u003eGenerator \\(G(x)\\)\u003c/h3\u003e\n\u003cp\u003eThe \u003cstrong\u003egenerator\u003c/strong\u003e \\(G(x)\\) is a model that takes a \u003cem\u003erandom tensor\u003c/em\u003e as input and attempts to produce a generated sample (\u0026ldquo;a picture of a goat\u0026rdquo;). As with the discriminator, it can have any middle layers you\u0026rsquo;d like but has to produce a tensor with the same shape and activation of an actual sample. For instance, if you are trying to produce images, the output of your \u003cstrong\u003egenerator\u003c/strong\u003e has to be of shape \\((channels, x, y)\\) activated with \u003ccode\u003esigmoid\u003c/code\u003e for brightness; if you are trying to produce single scalars, then the \u003cstrong\u003egenerator\u003c/strong\u003e has to produce only value, etc.\u003c/p\u003e\n\u003cp\u003eIt is perhaps very mystifying how we would ever build a magical box that takes a random tensor and turn it into a pretend image; looking at the loss functions (i.e. training objectives) of these two networks may perhaps help clarify this.\u003c/p\u003e\n\u003ch3 id=\"loss-functions\"\u003eLoss Functions\u003c/h3\u003e\n\u003cp\u003eBefore we begin, I want to quickly reiterate something which will be crucial to your mental framework of the loss functions: \u003cstrong\u003eTHEY ARE NOT METRICS\u003c/strong\u003e. The \u003cem\u003evalue\u003c/em\u003e of the loss functions\u0026mdash;especially these ones\u0026mdash;are now completely devoid of physical meaning; instead, the \u003cem\u003etrend\u003c/em\u003e of the loss functions (\u0026ldquo;value goes down means model is doing better\u0026rdquo;) is what matters.\u003c/p\u003e\n\u003cp\u003eWe are introducing the simplest form of \u003cstrong\u003eGAN\u003c/strong\u003e loss functions by \u003ca href=\"https://arxiv.org/abs/1406.2661\"\u003eGoodfellow, et al\u003c/a\u003e called \u0026ldquo;non-saturating loss.\u0026rdquo; There are better ones, but these ones are mathematically elegant and works most of the time\u0026mdash;and are the \u0026ldquo;base case\u0026rdquo; loss functions which other models improve on.\u003c/p\u003e\n\u003ch4 id=\"discriminator-loss\"\u003eDiscriminator Loss\u003c/h4\u003e\n\u003cp\u003e\\begin{equation}\nL_{d} (\\bold{x}_{i}, \\bold{z}_{i}) = -\\log D(\\bold{x}_{i}) - \\log (1- D(G(\\bold{z}_{i})))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\bold{x}_{i}\\) is a tensor representing a real sample (for instance, again, an actual grid of pixels for a mountain goat image), and \\(\\bold{z}_{i}\\) is a tensor containing random noise.\u003c/p\u003e\n\u003cp\u003eWoof. This is quite a scary loss function; let\u0026rsquo;s break it up into pieces.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\\(-\\log D(\\bold{x}_{i})\\): \\(\\bold{x}_{i}\\) is a real sample, so we expect \\(D\\) to produce \\(1\\). Any value below \\(1\\) (i.e. the \u003cstrong\u003ediscriminator\u003c/strong\u003e thinking a real image is generated) will produce negative values of increasingly larger magnitude as \\(D(\\bold{x}_{i})\\) approaches \\(0\\). If the discriminator produces \\(1\\) correctly, \\(\\log 1 = 0\\) and we indeed have converged.\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\\(-\\log (1- D(G(\\bold{z}_{i})))\\): on the flip side, we expect the generator to consider the output of the generator (i.e. \\(D(G(\\bold{z}_{i}))\\)) to be generated and produce \\(0\\). Therefore, we expect the same scheme as before but flipped (\\(1-D(G(\\bold{z}_{i})\\))\u0026mdash;if \\(D(G(\\bold{z}))\\) produces \\(1\\) (\u0026ldquo;the discriminator is fooled\u0026rdquo;), \\(1-D(G(\\bold{z}))\\) will produce \\(0\\) and the loss will be very high. Vise versa: if \\(D(G(\\bold{z}))\\) produces \\(0\\) (\u0026ldquo;the discriminator picked out the fake\u0026rdquo;), the loss will be \\(0\\).\u003c/p\u003e\n\u003cp\u003eAdding the two values encourages our discriminator to both classify real samples as real \\(1\\), and generated samples as fake \\(0\\).\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"generator-loss\"\u003eGenerator Loss\u003c/h4\u003e\n\u003cp\u003e\\begin{equation}\nL_{g}(\\bold{z}_{i}) = -\\log (D(G(\\bold{z}_{i})))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe sharp-eyed among you may realize that this is just the right term from the above expression without the \\(1-\\) negation. Indeed, the training target for the \u003cstrong\u003egenerator\u003c/strong\u003e is very simple: \u0026ldquo;did I fool the discriminator\u0026rdquo;: if \\(D\\) produces a large (close to \\(1\\)) output on the generated result\u0026mdash;indicating that it is indeed \u0026ldquo;fooled\u0026rdquo;\u0026mdash;our \\(log\\) will approach \\(0\\); whereas, if \\(D\\) produces a small (close to \\(0\\)) output on the generated result\u0026mdash;indicating that it correctly spotted the fake\u0026mdash;our \\(log\\) will produce a very negative value which creates high loss.\u003c/p\u003e\n\u003ch2 id=\"the-gan-training-loop\"\u003eThe GAN Training Loop\u003c/h2\u003e\n\u003cp\u003eLoss functions in place, we are almost ready to make the model. The thing that\u0026rsquo;s tricky about training a GAN is that we have to ensure that \u003cem\u003eboth\u003c/em\u003e the \u003cstrong\u003ediscriminator\u003c/strong\u003e and \u003cstrong\u003egenerator\u003c/strong\u003e are converging at the same exact time: ensuring that neither Capone nor Ness has \u003cem\u003edramatically\u003c/em\u003e better technology than the other. This requires a little bit of finesse on your part in terms of the training loop. Plus, our loss functions here are quite special, so their definitions will also need a little wrangling.\u003c/p\u003e\n\u003cp\u003eAt this point, though, I hope we are all pretty confident in how to structure the basics of a ML model. Instead of going over that again, let\u0026rsquo;s go over some of the differences in Python pseudo-code (code that doesn\u0026rsquo;t run, but to illustrate how you would write it)\u0026mdash;specially in four focus areas.\u003c/p\u003e\n\u003ch3 id=\"dataprep\"\u003eDataprep\u003c/h3\u003e\n\u003cp\u003eJust a short note here on GAN data prep. What\u0026rsquo;s the special thing about GANs? They are \u003cstrong\u003eself-supervised\u003c/strong\u003e\u0026mdash;meaning they make their own labels. Instead, all you need to provide is plenty of examples of the thing you want your model to generate.\u003c/p\u003e\n\u003cp\u003eAs such, your batch wouldn\u0026rsquo;t contain \u003ccode\u003ex_data\u003c/code\u003e, \u003ccode\u003ey_data\u003c/code\u003e, etc. Instead, your dataset code should look something of the flavor:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eimage_grid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eexample_data_for_the_gan_numpy\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edataset\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eTensorDataset\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eimage_grid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# only one argument!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edataloader\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eDataLoader\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edataset\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebatch_size\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eBATCH_SIZE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eshuffle\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou will notice that the \u003ccode\u003eTensorDataset\u003c/code\u003e here took only \u003cem\u003eone\u003c/em\u003e argument as input, as opposed to the usual 2: this is, as we discussed before, as product of the fact that our GAN only needs examples of the thing you want it to generate\u0026mdash;no labels needed (or possible!)\u003c/p\u003e\n\u003ch3 id=\"network-construction\"\u003eNetwork Construction\u003c/h3\u003e\n\u003cp\u003eOf course, a GAN consists of two different networks. Though the network construction is mostly arbitrary, there are some general constraints:\u003c/p\u003e\n\u003ch4 id=\"generator\"\u003egenerator\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003einput shape\u003c/strong\u003e: arbitrary, but takes exclusively random values as input; ideally you want this to be the same number of dimensions as the output\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eoutput shape\u003c/strong\u003e: the \u003cem\u003eoutput shape\u003c/em\u003e of your network has to be the shape of one sample of the real data as the generator should generate something that looks like real data\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eoutput activation\u003c/strong\u003e: whatever makes sense for the real data: if probabilities, then \u003ccode\u003esoftmax\u003c/code\u003e; if images, then \u003ccode\u003esigmoid\u003c/code\u003e (as normalized brightness), etc.\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"discriminator\"\u003ediscriminator\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003einput shape\u003c/strong\u003e: the \u003cem\u003eoutput shape\u003c/em\u003e of the generator, or the shape of one real sample of data. (\u003cem\u003eThinking Break\u003c/em\u003e: WHY? as usual, pause and chat)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eoutput shape\u003c/strong\u003e: \u003ccode\u003e(batch_size, 1)\u003c/code\u003e. We want to output a scalar between \\(0\\) (\u0026ldquo;probably fake\u0026rdquo;) and \\(1\\) (\u0026ldquo;probably real\u0026rdquo;) for every sample\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eoutput activation\u003c/strong\u003e: \u003ccode\u003esigmoid\u003c/code\u003e to get those values actually between \\(0\\) and \\(1\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"network-initialization\"\u003eNetwork Initialization\u003c/h3\u003e\n\u003cp\u003eBecause the generator and discriminator are two different networks, they require different optimizers!\u003c/p\u003e\n\u003cp\u003eSo, we have to go about making them. This is fortunately pretty direct:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# initialize networks\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003egen\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eGeneratorNetwork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edisc\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eDiscriminatorNetwork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# initalize *two seperate optimizers*\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003egen_optim\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eAdam\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egen\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eparameters\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLR1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edisc_optim\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eAdam\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisc\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eparameters\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLR2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNothing out of the ordinary here, but a worthy reminder that you need 2. This will become important shortly.\u003c/p\u003e\n\u003ch3 id=\"training-loop\"\u003eTraining Loop\u003c/h3\u003e\n\u003cp\u003eThis is the main event, and probably the bit that most people trip up the most: the training loop. Let\u0026rsquo;s see a pseudocode implementation of one, and we will discuss how its structured.\u003c/p\u003e\n\u003cp\u003e\u003cem\u003eNote that we will be making some adjustments to our tried-and-true backprop logic.\u003c/em\u003e\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eEPOCHS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebatch\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eiter\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edataloader\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# train generator first\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edisc_score\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eBATCH_SIZE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eYOUR\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eINPUT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSHAPE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eHERE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# compute + backprop generator loss\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003egenerator_loss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisc_score\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003egenerator_loss\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# disconnect discriminator gradients\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edisc_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# step and clear\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003egen_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003egen_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# now, train discriminator\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edisc_score_false\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eBATCH_SIZE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eYOUR\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eINPUT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSHAPE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eHERE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edetach\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edisc_score_true\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebatch\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# compute + backprop discriminator loss\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ediscriminator_loss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisc_score_true\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisc_score_false\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ediscriminator_loss\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# step and clear\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edisc_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edisc_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWoweee. Much to talk about. Let\u0026rsquo;s break it down.\u003c/p\u003e\n\u003ch4 id=\"scoring-on-fake-sample\"\u003eScoring on fake sample\u003c/h4\u003e\n\u003cp\u003eWe first generate a fake sample from the generator by first passing it random noise from \u003ccode\u003etorch.rand\u003c/code\u003e, then passing its output to the discriminator to get a group of scores.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edisc_score\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eBATCH_SIZE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eYOUR\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eINPUT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSHAPE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eHERE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch4 id=\"calculating-the-generator-loss\"\u003eCalculating the generator loss\u003c/h4\u003e\n\u003cp\u003eNext up, we will calculate the generator loss on the score that the discriminator gave for that fake sample we generated earlier.\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL_{g}(\\bold{z}_{i}) = -\\log (D(G(\\bold{z}_{i})))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand hence:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003egenerator_loss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisc_score\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003cspan class=\"underline\"\u003eThinking break!\u003c/span\u003e: why does implementing \u003ccode\u003e(-torch.log(disc_score))\u003c/code\u003e accomplish the same thing as taking \\(-\\log (D(G(\\bold{z}_{i})))\\)? Specifically, how is \u003ccode\u003edisc_score\u003c/code\u003e calculated in our example?\u003c/p\u003e\n\u003ch4 id=\"the-generator-backprop-step\"\u003eThe generator backprop step\u003c/h4\u003e\n\u003cp\u003eFor all that drilling we did of BACKPROP! STEP! RESET!, the next step may feel sacrilegious:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003egenerator_loss\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# disconnect discriminator gradients\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edisc_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# step and clear\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003egen_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003egen_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003cem\u003eWhat is happening here?\u003c/em\u003e Let\u0026rsquo;s take it one step at a time.\u003c/p\u003e\n\u003cp\u003eFirst, we call \u003ccode\u003egenerator_loss.backward()\u003c/code\u003e to backprop the loss; nothing wrong here. But then, against all odds, we call \u003ccode\u003e.zero_grad()\u003c/code\u003e on the \u003cstrong\u003ediscriminator\u003c/strong\u003e optimizer. What gives?\u003c/p\u003e\n\u003cp\u003eRecall that, in this case, we are training the \u003cstrong\u003egenerator\u003c/strong\u003e; as the loss-function literally asks the \u003cstrong\u003ediscriminator\u003c/strong\u003e to be wrong, we mustn\u0026rsquo;t be updating the discriminator using the gradients computed against this function; instead, we simply want the generator to be updated to better fool the \u003cstrong\u003ediscriminator\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eTherefore, we immediately zero out all the gradients on the \u003cstrong\u003ediscriminator\u003c/strong\u003e to prevent this step from updating the \u003cstrong\u003ediscriminator\u003c/strong\u003e with the \u0026ldquo;fooling\u0026rdquo; loss function; and proceed to update the \u003cstrong\u003egenerator\u003c/strong\u003e weights as usual.\u003c/p\u003e\n\u003ch4 id=\"scoring-on-detached-fake-sample-and-real-sample\"\u003eScoring on detached fake sample and real sample\u003c/h4\u003e\n\u003cp\u003eNext up, training the \u003cstrong\u003ediscriminator\u003c/strong\u003e. We first obtain scores from the discriminator for a real sample and a fake sample separately:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edisc_score_false\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eBATCH_SIZE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eYOUR\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eINPUT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSHAPE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eHERE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edetach\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edisc_score_true\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebatch\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou should notice that the code here for obtaining the fake sample is almost identical to the one before; except, we are calling this \u003ccode\u003e.detach()\u003c/code\u003e against the generator output. This is very functionally similar to the \u0026ldquo;calling \u003ccode\u003e.zero_grad()\u003c/code\u003e immediately\u0026rdquo; move we made earlier; called \u003ccode\u003e.detach()\u003c/code\u003e asks PyTorch to treat whatever tensor there as a constant, and not propagate gradients any more backwards into the \u003cstrong\u003egenerator\u003c/strong\u003e, which in this case we do not want to change as we are optimizing the \u003cstrong\u003ediscriminator\u003c/strong\u003e.\u003c/p\u003e\n\u003ch4 id=\"calculating-the-discriminator-loss\"\u003eCalculating the discriminator loss\u003c/h4\u003e\n\u003cp\u003eWith all the pieces in place, this is again just a very directly implementation of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL_{d} (\\bold{x}_{i}, \\bold{z}_{i}) = -\\log D(\\bold{x}_{i}) - \\log (1- D(G(\\bold{z}_{i})))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ein code.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ediscriminator_loss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisc_score_true\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisc_score_false\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch4 id=\"normal-backprop\"\u003eNormal backprop\u003c/h4\u003e\n\u003cp\u003eBecause we ran \u003ccode\u003e.detach()\u003c/code\u003e before on the \u003cstrong\u003egenerator\u003c/strong\u003e output, the \u003cstrong\u003egenerator\u003c/strong\u003e is treated as a constant through this second loss function; as such, our backpropegation step will normally update the \u003cstrong\u003ediscriminator\u003c/strong\u003e\u0026rsquo;s weights without any fuss. We therefore go back to our tried-and-true formula:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ediscriminator_loss\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edisc_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edisc_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eTada! That\u0026rsquo;s it; the GAN training loop.\u003c/p\u003e\n\u003ch2 id=\"final-thoughts-and-unit-challenge\"\u003eFinal Thoughts and Unit Challenge\u003c/h2\u003e\n\u003cp\u003eSorry for the very theoretically dense unit; please don\u0026rsquo;t hesitate to flag us down if any questions take place. To leave you, here are a few final tips and tricks for making GANs.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eIf your model doesn\u0026rsquo;t work, try \u003cstrong\u003epretraining\u003c/strong\u003e the \u003cstrong\u003ediscriminator\u003c/strong\u003e: letting Eliot Ness get a bit of a head start by training the discriminator to recognize noise from real images; to do this, just don\u0026rsquo;t run the code that updates the generator weights.\u003c/li\u003e\n\u003cli\u003eGANs are known to perform something called \u003cstrong\u003emode collapse\u003c/strong\u003e: whereby, instead of reaching \u003cstrong\u003eNash equilibrium\u003c/strong\u003e, one of the two networks crash while the other one completely converges. One attempt to solve this is something called \u003cstrong\u003eWassterstein Loss\u003c/strong\u003e, which is \u003ca href=\"https://developers.google.com/machine-learning/gan/loss#wasserstein-loss\"\u003ediscussed here\u003c/a\u003e (\u003ca href=\"https://developers.google.com/machine-learning/gan/loss#wasserstein-loss\"\u003ehttps://developers.google.com/machine-learning/gan/loss#wasserstein-loss\u003c/a\u003e). One important note, however, is that using this loss function makes your network \u003cem\u003etechnically\u003c/em\u003e not a GAN anymore (as the \u003cstrong\u003ediscriminator\u003c/strong\u003e will not be actually usefully discriminating, instead acting as a \u0026ldquo;\u003cstrong\u003ecritic\u003c/strong\u003e\u0026rdquo; for the generator only producing non-interpretable scores), but it has shown improved performance for the \u003cstrong\u003egenerator\u003c/strong\u003e only.\u003c/li\u003e\n\u003cli\u003eGANs are notoriously hard to make work. \u003ca href=\"https://developers.google.com/machine-learning/gan/problems\"\u003eSee this whole page from Google\u003c/a\u003e (\u003ca href=\"https://developers.google.com/machine-learning/gan/loss\"\u003ehttps://developers.google.com/machine-learning/gan/loss\u003c/a\u003e) about the various ways GANs can fail and possible strategies to remedy them. \u003cstrong\u003eDo not\u003c/strong\u003e be scared if your model doesn\u0026rsquo;t work immediately or even after copious tuning.\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eOk, onto the challenge: make a GAN! There are two variants of this:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eEasier \u0026mdash; use a pair of \u003cstrong\u003edense neural networks\u003c/strong\u003e to make a GAN to generate valid series of \\(5\\) numbers which we explored in the beginning of this class \\([a,b,c,c+1,c+2]\\)\u003c/li\u003e\n\u003cli\u003eHarder \u0026mdash; use a pair of \u003cstrong\u003econvolutional neural networks\u003c/strong\u003e to make a GAN to generate \u003ca href=\"https://thor.robots.ox.ac.uk/~vgg/data/pets/images.tar.gz\"\u003ethese nice pictures of pets\u003c/a\u003e (\u003ca href=\"https://thor.robots.ox.ac.uk/~vgg/data/pets/images.tar.gz\"\u003ehttps://thor.robots.ox.ac.uk/~vgg/data/pets/images.tar.gz\u003c/a\u003e). Sorry that this is not mountain goats: unfortunately, a dataset large enough is not available for this task :/\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eGood luck, and have fun!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaml_it_takes_two/","tags":["writing","aml"],"title":"AML: It Takes Two"},{"categories":null,"contents":"Woof. As I begin to write this I should add that this unit is going to be conceptually dense. Though we are teaching one particular algorithm (incidentally, named, REINFORCE), the world of reinforcement learning is build by one, if not many, very advanced treatments in maths.\nSo if anything, I would focus on getting the conceptual flavor of how these problems are formulated and discuses. If you can be along for the mathematical and algorithmic journey, then even better \u0026mdash; but by no means required or expected\u0026hellip; There\u0026rsquo;s still lots for all of us to learn together.\nSpeaking of college level classes, I loved the detailed and accessible overview of Reinforcement Learning methods by Professors Charles Isbell and Michael Littlman from Georgia Tech CoC. If you find yourself gravitating towards the topic of this unit, go check them out:\nhttps://omscs.gatech.edu/cs-7642-reinforcement-learning-course-videos\nOk. Let\u0026rsquo;s dive in.\nMotivation We are used to a clear, differentiable loss function. One particular exercise in class we do a lot is to shout out a problem, and think about its loss function:\n\u0026ldquo;classifying Pokemon!\u0026rdquo; \u0026hellip; \u0026ldquo;cross entropy!\u0026rdquo; \u0026ldquo;generating stock price!\u0026rdquo; \u0026hellip; \u0026ldquo;MSE!\u0026rdquo; \u0026ldquo;making pictures of rice!\u0026rdquo; \u0026hellip; \u0026ldquo;GAN non-saturating loss!\u0026rdquo; and so on. Regardless of the classification/regression difference, you will note that these functions are all of the shape:\n\\begin{align} \u0026amp;f(\\hat{y}, y) = \\text{single float value} \\end{align}\nMeaning, it takes two vectors\u0026mdash;the output (\u0026ldquo;prediction\u0026rdquo;, \\(\\hat{y}\\)) of the network, and the desired output (\u0026ldquo;target\u0026rdquo;, \\(y\\)) in your training data, and produces (sometimes with much mathematical gymnastics) a single scalar value representing which we try to optimize to be lower.\nNote that, regardless of supervised learning (like Pokemon classification; we have input, desired targets, and actual output) or unsupervised learning (like GAN rice generation; we have only the desired targets and actual output), we have the desired targets in hand. We know what the model is supposed to do (i.e. have many examples of correct behavior), and are just teaching the model to do so one way or other.\nBut what if\u0026hellip;. we don\u0026rsquo;t know the correct behavior of the model? Can you brainstorm some tasks that would very well might want to automate using ML, but can\u0026rsquo;t provide precise labels for the desired action?\n\u0026hellip;\nTake, for instance, the task of teaching this poor stick figure how to stand up:\nFigure 1: aw so sad\nyou are given a list of forces currently hitting the figure, and you are to produce a list of forces the figure\u0026rsquo;s limbs should produce.\nOf course you can\u0026rsquo;t know precisely the labels at every given moment: there are no \u0026ldquo;best\u0026rdquo; or, arguably, even a \u0026ldquo;correct\u0026rdquo; strategy for standing the figure up. There\u0026rsquo;s no labels which you can use to even begin to approach this task!\nWhat to do?\nIn come Reinforcement Learning (RL)\nOk, this is where the math will begin. I encourage you to take a piece of paper and start writing down each symbol we define together, and refer to that piece of paper copiously to understand the expressions.\nIf you want to learn this more, the conceptual basis we are working with is called policy gradient, specifically the REINFORCE algorithm. This is not even close to being the only way to approach the Reinforcement Learning task; but its one fairly interesting and successful approach.\nThe Environment: Agent, State, Action, and Policy Three variables underlie the basics of Reinforcement Learning:\nstate \\(s_{t}\\): the \u0026ldquo;situation\u0026rdquo; of the environment, what can be \u0026ldquo;observed\u0026rdquo;; for our example above, this looks like the forces on each limb of our humanoid. action \\(a\\): a certain perturbation one can do to the agent which will influence its state; for our example, this looks like moving (\u0026ldquo;translating\u0026rdquo;/\u0026ldquo;applying force on\u0026rdquo;) one or many limbs. policy \\(\\pi\\): the policy is a function which takes the state as input, and produces a probability distribution (think \u0026ldquo;softmax\u0026rdquo;) over all the actions one could choose. We will talk extensively about this shortly. agent: a general term describing the actual thing being controlled; for instance, our humanoid. episode: an entire group of states, starting at the beginning and continuing for instance, for a fixed number of states or until a certain end is reached (for instance, for the humanoid walking task, when it falls). IMPORTANT NOTE: policy is as function \\(\\pi(s_{t})\\), literally a function named pi. It has nothing to do with the ratio between the radius and circumference of a circle. Its just called pi\u0026hellip; Unfortunately, we are working to stick to the language used by current literature, but sometimes their symbol choice is rather deranged.\nReward In lieu of a loss function, Reinforcement Learning is a class of models that learn from a numerical signal called reward. The reward function typically looks like this:\n\\begin{equation} r_{t}(s_{t},a_{t}) = \\text{single float value}\\ (-\\infty, +\\infty) \\end{equation}\nInstead of calculating the difference between the desired and actual output of the model, the reward signal scores how good taking a certain action is in an environment. It takes two vectors as input: the state and an action on the state, to produce a certain score.\nUnlike what we are used to with the loss, this reward value is not differentiable w.r.t. the parameters of the network! The action is a sample from the distribution; so this score can be generated however you\u0026rsquo;d like. Furthermore, unlike what we are used to with loss, a higher reward value means a better action.\nCumulative Discounted Reward Note again the expression for that reward statement:\n\\begin{equation} r_{t}(s_{t}, a_{t}) \\end{equation}\neach of these variables are parameterized by this subscript $t$\u0026mdash;meaning reward is calculated per time! This actually presents us a problem to describe the overall behavior of our agent. Pause and think why this may be.\n\u0026hellip;\nFor instance, the act of \u0026ldquo;standing up\u0026rdquo; often require multiple steps; many of which honestly doesn\u0026rsquo;t contribute at all to the act of standing up until many steps later! For instance, the act of propping up one\u0026rsquo;s hands to the ground\u0026mdash;which actually lowers your center of gravity, and hence naively should get a negative reward\u0026mdash;is actually critical in being able to stand up well.\nIf we train a model (somehow, ignoring the details for now) to maximize \\(r_{t}\\), then we will get an instant gratification machine: meaning, its pretty useless for any just a tad-bit complex task!\nTo deal with this, we need to introduce the idea of a trajectory (\\(\\tau\\)). A trajectory is a list of state-action pairs generated by the same exact policy (i.e. no learning at all) just playing a game out to completion\u0026mdash;i.e. until the end of the episode.\nThat is:\n\\begin{equation} \\tau = [(s_{0}, \\pi(s_{0})), \\dots, (s_{n}, \\pi(s_{n}))] = [(s_{0}, a_{0}), \\dots, (s_{n}, a_{n})] \\end{equation}\nWe then define a new-and-improved reward function \\(R_{t}(\\tau)\\) which models not just how good our policy is right now, but how good WILL our policy be given these set of actions.\nSpecifically, at every timestamp:\n\\begin{equation} R_{t}(\\tau) = r_{t}(s_{t}, a_{t}) + \\gamma r_{t+1}(s_{t+1}, a_{t+1})+ \\gamma^{2} r_{t+2}(s_{t+2}, a_{t+2}) + \\dots \\end{equation}\nwhere, \\(0 \\leq \\gamma \\leq 1\\) is a hyperparameter called a discount factor controlling how much more the current reward matters.\nWoof, the math here looks a bit scary; let\u0026rsquo;s break it down. We are defining a function $Rt(τ)$\u0026mdash;taking \\(\\tau\\) as input, meaning this function actually knows all of the model\u0026rsquo;s future behavior as well as current ones; each term of this function \\(R\\) multiplies \\(\\gamma\\) a certain number of times to the instantaneous reward at that point.\nThis function, then, essentially adds up all the future reward taking the current action will eventually lead to\u0026mdash;\u0026ldquo;how much reward does choosing this path afford you\u0026rdquo;\u0026mdash;discounting rewards earned in the future with a certain factor \\(\\gamma\\) because those are subject to change based on your agent\u0026rsquo;s future decisions. Things that are more the future gets discounted harder, by \\(\\gamma^{n}\\).\nThis expression for \\(R_{t}(\\tau)\\) is called the cumulative discounted reward, or \u0026ldquo;the reward\u0026rdquo; for short. When we refer to the reward in the rest of this write-up, this is probably the expression you are looking for.\nPolicy Gradient Theorem The policy gradient theorem is unfortunately not going to be very well motivated in the time that we have together. If you are curious, the proof, and some more discussion, can be found here or in my notes here.\nFor now, let\u0026rsquo;s just skip to the result\u0026hellip; The loss function objective \\(J\\) with which we can use to optimize a neural network, given a set of non-connected reward signals and a policy to optimize \\(\\pi\\), is:\n\\begin{equation} -\\sum_{t=0} \\log \\pi_{\\theta} (a_{t} | s_{t}) R_{t}(\\tau) \\end{equation}\nwhere, \\(\\theta\\) are the weights to policy \\(\\pi\\), and the rest are usual symbols defined above.\nLet\u0026rsquo;s break it down.\nThe rest of this is a summation over all time of the trajectory; meaning you have to first generate the entire trajectory \\(\\tau\\) first and then add this value per slice:\n\\(\\pi_{\\theta}(a_{t}|s_{t})\\): this is the probability (often called \u0026ldquo;confidence\u0026rdquo;) of the model to take action \\(a_{t}\\) at state \\(s_{t}\\); for a discrete set of actions (i.e. choosing/classification), we already know how to do this: torch.argmax. The code example below/in class explores how to do this for a continuous sample. \\(\\log \\pi_{t}(a_{t}|s_{t})\\): we want to take the log of this confidence score the model produced: bigger \u0026ldquo;confident\u0026rdquo; number, smaller magnitude log, smaller error \\(R_{t}(\\tau)\\) the Cumulative Discounted Reward from that timestamp on, as we discussed before The sharp-eyed among you may notice that this function is very similarly shaped as cross-entropy: except you swap out the ground truth \\(y\\) for the cumulative reward \\(R_{t}(\\tau)\\). Indeed that is the case! In fact, much of the similar motivations apply both functions.\nREINFORCE Loss function in hand, its time to actual perform the actual optimization. There\u0026rsquo;s three main steps to actually perform the REINFORCE algorithm optimization:\nPlay the game: generating entire episode worth of \\(s_{t}, a_{t}, r_{t}\\) using the same exact unoptimized policy \\(\\pi\\), storing a full trajectory \\(\\tau\\) Calculate the reward: calculate, using the discounting equation above, \\(R_{t}\\) from each \\(r_{t}\\). Remember that each \\(R_{t}\\) is a reward comprised of the current reward, plus \\(\\gamma\\) raised to a certain power to discount the future rewards \\(r_{t+n}\\). Replay and backprop: Compute the actual error above for each timeframe, backpropegating them all but don\u0026rsquo;t change the weights (i.e. call .backward() but not .step()) Change the weights all at once: call .step() and make our model better! As we have no fixed-length data, there are no epochs to this setup.; we will instead specify a number of times we want to run the above steps\u0026mdash;meaning we have a number of episodes you can tune while training the model.\nNext Steps Apart from the bunch of theory here, there still remain a lot of practical questions in how to make all this happen in PyTorch. We hope to discuss this together in class, and explore the wonderful set of tools Gym\u0026mdash;a RL state/reward calculation library\u0026mdash;can do for us!\nTo get started on this discussion, here\u0026rsquo;s one implementation of the humanoid-standup task we can be working from: https://github.com/Jemoka/demo_amlmod4rl/blob/master/main.py\n","html":"\u003cp\u003eWoof. As I begin to write this I should add that \u003cstrong\u003ethis unit is going to be conceptually dense\u003c/strong\u003e. Though we are teaching one particular algorithm (incidentally, named, REINFORCE), the world of reinforcement learning is build by one, if not many, very advanced treatments in maths.\u003c/p\u003e\n\u003cp\u003eSo if anything, I would focus on getting the conceptual flavor of how these problems are formulated and discuses. If you can be along for the mathematical and algorithmic journey, then even better \u0026mdash; but by no means required or expected\u0026hellip; There\u0026rsquo;s still lots for all of us to learn together.\u003c/p\u003e\n\u003cp\u003eSpeaking of college level classes, I \u003cem\u003eloved\u003c/em\u003e the detailed and accessible overview of Reinforcement Learning methods by Professors Charles Isbell and Michael Littlman from Georgia Tech CoC. If you find yourself gravitating towards the topic of this unit, go check them out:\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://omscs.gatech.edu/cs-7642-reinforcement-learning-course-videos\"\u003ehttps://omscs.gatech.edu/cs-7642-reinforcement-learning-course-videos\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eOk. Let\u0026rsquo;s dive in.\u003c/p\u003e\n\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003eWe are used to a clear, \u003cstrong\u003edifferentiable\u003c/strong\u003e loss function. One particular exercise in class we do a lot is to shout out a problem, and think about its loss function:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;classifying Pokemon!\u0026rdquo; \u0026hellip; \u0026ldquo;cross entropy!\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;generating stock price!\u0026rdquo; \u0026hellip; \u0026ldquo;MSE!\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;making pictures of rice!\u0026rdquo; \u0026hellip; \u0026ldquo;GAN non-saturating loss!\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eand so on. Regardless of the classification/regression difference, you will note that these functions are all of the shape:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;f(\\hat{y}, y) = \\text{single float value}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eMeaning, it takes \u003cstrong\u003etwo vectors\u003c/strong\u003e\u0026mdash;the \u003cem\u003eoutput\u003c/em\u003e (\u0026ldquo;prediction\u0026rdquo;, \\(\\hat{y}\\)) of the network, and the \u003cem\u003edesired output\u003c/em\u003e (\u0026ldquo;target\u0026rdquo;, \\(y\\)) in your training data, and produces (sometimes with much mathematical gymnastics) a single scalar value representing which we try to optimize to be lower.\u003c/p\u003e\n\u003cp\u003eNote that, regardless of \u003cstrong\u003esupervised learning\u003c/strong\u003e (like Pokemon classification; we have input, desired targets, and actual output) or \u003cstrong\u003eunsupervised learning\u003c/strong\u003e (like GAN rice generation; we have only the desired targets and actual output), we \u003cem\u003ehave the desired targets\u003c/em\u003e in hand. We \u003cem\u003eknow\u003c/em\u003e what the model is supposed to do (i.e. have many examples of correct behavior), and are just teaching the model to do so one way or other.\u003c/p\u003e\n\u003cp\u003eBut what if\u0026hellip;. we \u003cem\u003edon\u0026rsquo;t\u003c/em\u003e know the correct behavior of the model? Can you brainstorm some tasks that would very well might want to automate using ML, but can\u0026rsquo;t provide precise labels for the desired action?\u003c/p\u003e\n\u003cp\u003e\u0026hellip;\u003c/p\u003e\n\u003cp\u003eTake, for instance, the task of \u003ca href=\"https://gymnasium.farama.org/environments/mujoco/humanoid_standup/\"\u003eteaching this poor stick figure how to stand up\u003c/a\u003e:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-30_12-37-54_screenshot.png\"\n alt=\"Figure 1: aw so sad\"\u003e\u003cfigcaption\u003e\n \u003cp\u003e\u003cspan class=\"figure-number\"\u003eFigure 1: \u003c/span\u003eaw so sad\u003c/p\u003e\n \u003c/figcaption\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eyou are given a list of forces currently hitting the figure, and you are to produce a list of forces the figure\u0026rsquo;s limbs should produce.\u003c/p\u003e\n\u003cp\u003eOf course you can\u0026rsquo;t know precisely the labels at every given moment: there are no \u0026ldquo;best\u0026rdquo; or, arguably, even a \u0026ldquo;correct\u0026rdquo; strategy for standing the figure up. There\u0026rsquo;s no labels which you can use to even begin to approach this task!\u003c/p\u003e\n\u003cp\u003eWhat to do?\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eIn come Reinforcement Learning (RL)\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eOk, this is where the math will begin. I encourage you to take a piece of paper and start writing down each symbol we define together, and refer to that piece of paper copiously to understand the expressions.\u003c/p\u003e\n\u003cp\u003eIf you want to learn this more, the conceptual basis we are working with is called \u003cstrong\u003epolicy gradient\u003c/strong\u003e, specifically the \u003cstrong\u003eREINFORCE\u003c/strong\u003e algorithm. This is \u003cem\u003enot even close\u003c/em\u003e to being the only way to approach the Reinforcement Learning task; but its one fairly interesting and successful approach.\u003c/p\u003e\n\u003ch2 id=\"the-environment-agent-state-action-and-policy\"\u003eThe Environment: Agent, State, Action, and Policy\u003c/h2\u003e\n\u003cp\u003eThree variables underlie the basics of \u003cstrong\u003eReinforcement Learning\u003c/strong\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003estate\u003c/strong\u003e \\(s_{t}\\): the \u0026ldquo;situation\u0026rdquo; of the \u003cstrong\u003eenvironment\u003c/strong\u003e, what can be \u0026ldquo;observed\u0026rdquo;; for our example above, this looks like the forces on each limb of our humanoid.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eaction\u003c/strong\u003e \\(a\\): a certain perturbation one can do to the agent which will influence its \u003cstrong\u003estate\u003c/strong\u003e; for our example, this looks like moving (\u0026ldquo;translating\u0026rdquo;/\u0026ldquo;applying force on\u0026rdquo;) one or many limbs.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003epolicy\u003c/strong\u003e \\(\\pi\\): the \u003cstrong\u003epolicy\u003c/strong\u003e is a function which takes the \u003cstrong\u003estate\u003c/strong\u003e as input, and produces a probability distribution (think \u0026ldquo;softmax\u0026rdquo;) over all the \u003cstrong\u003eactions\u003c/strong\u003e one could choose. We will talk extensively about this shortly.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eagent\u003c/strong\u003e: a general term describing the actual thing being controlled; for instance, our humanoid.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eepisode\u003c/strong\u003e: an entire group of \u003cstrong\u003estates\u003c/strong\u003e, starting at the beginning and continuing for instance, for a fixed number of \u003cstrong\u003estates\u003c/strong\u003e or until a certain end is reached (for instance, for the humanoid walking task, when it falls).\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eIMPORTANT NOTE: policy is as function \\(\\pi(s_{t})\\), literally a \u003cem\u003efunction named pi\u003c/em\u003e. It has nothing to do with the ratio between the radius and circumference of a circle. Its \u003cem\u003ejust called pi\u0026hellip;\u003c/em\u003e Unfortunately, we are working to stick to the language used by current literature, but sometimes their symbol choice is rather deranged.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"reward\"\u003eReward\u003c/h2\u003e\n\u003cp\u003eIn lieu of a loss function, \u003cstrong\u003eReinforcement Learning\u003c/strong\u003e is a class of models that learn from a numerical signal called \u003cstrong\u003ereward\u003c/strong\u003e. The reward function typically looks like this:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr_{t}(s_{t},a_{t}) = \\text{single float value}\\ (-\\infty, +\\infty)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eInstead of calculating the difference between the desired and actual output of the model, the \u003cstrong\u003ereward\u003c/strong\u003e signal scores \u003cem\u003ehow good taking a certain action is\u003c/em\u003e in an environment. It takes two vectors as input: the \u003cstrong\u003estate\u003c/strong\u003e and an \u003cstrong\u003eaction\u003c/strong\u003e on the state, to produce a certain score.\u003c/p\u003e\n\u003cp\u003eUnlike what we are used to with the loss, this \u003cstrong\u003ereward\u003c/strong\u003e value is \u003cem\u003enot\u003c/em\u003e differentiable w.r.t. the parameters of the network! The action is a \u003cem\u003esample\u003c/em\u003e from the distribution; so this score can be generated however you\u0026rsquo;d like. Furthermore, unlike what we are used to with \u003cstrong\u003eloss\u003c/strong\u003e, a \u003cstrong\u003ehigher\u003c/strong\u003e \u003cstrong\u003ereward\u003c/strong\u003e value means a better action.\u003c/p\u003e\n\u003ch2 id=\"cumulative-discounted-reward\"\u003eCumulative Discounted Reward\u003c/h2\u003e\n\u003cp\u003eNote again the expression for that reward statement:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr_{t}(s_{t}, a_{t})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eeach of these variables are parameterized by this subscript $t$\u0026mdash;meaning reward is calculated \u003cem\u003eper time!\u003c/em\u003e This actually presents us a problem to describe the \u003cem\u003eoverall\u003c/em\u003e behavior of our agent. Pause and think why this may be.\u003c/p\u003e\n\u003cp\u003e\u0026hellip;\u003c/p\u003e\n\u003cp\u003eFor instance, the act of \u0026ldquo;standing up\u0026rdquo; often require multiple steps; many of which honestly doesn\u0026rsquo;t contribute at all to the act of standing up until many steps later! For instance, the act of propping up one\u0026rsquo;s hands to the ground\u0026mdash;which actually \u003cem\u003elowers\u003c/em\u003e your center of gravity, and hence naively should get a negative reward\u0026mdash;is actually critical in being able to stand up well.\u003c/p\u003e\n\u003cp\u003eIf we train a model (somehow, ignoring the details for now) to maximize \\(r_{t}\\), then we will get an \u003cem\u003einstant gratification machine\u003c/em\u003e: meaning, its pretty useless for any just a tad-bit complex task!\u003c/p\u003e\n\u003cp\u003eTo deal with this, we need to introduce the idea of a \u003cstrong\u003etrajectory\u003c/strong\u003e (\\(\\tau\\)). A \u003cstrong\u003etrajectory\u003c/strong\u003e is a list of state-action pairs generated by the same exact policy (i.e. no learning at all) just playing a game out to completion\u0026mdash;i.e. until the end of the \u003cstrong\u003eepisode\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eThat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\tau = [(s_{0}, \\pi(s_{0})), \\dots, (s_{n}, \\pi(s_{n}))] = [(s_{0}, a_{0}), \\dots, (s_{n}, a_{n})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe then define a new-and-improved reward function \\(R_{t}(\\tau)\\) which models not just \u003cem\u003ehow good our policy is right now\u003c/em\u003e, but \u003cem\u003ehow good WILL our policy be given these set of actions\u003c/em\u003e.\u003c/p\u003e\n\u003cp\u003eSpecifically, at every timestamp:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR_{t}(\\tau) = r_{t}(s_{t}, a_{t}) + \\gamma r_{t+1}(s_{t+1}, a_{t+1})+ \\gamma^{2} r_{t+2}(s_{t+2}, a_{t+2}) + \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(0 \\leq \\gamma \\leq 1\\) is a hyperparameter called a \u003cstrong\u003ediscount factor\u003c/strong\u003e controlling how much more the current reward matters.\u003c/p\u003e\n\u003cp\u003eWoof, the math here looks a bit scary; let\u0026rsquo;s break it down. We are defining a function $R\u003csub\u003et\u003c/sub\u003e(τ)$\u0026mdash;taking \\(\\tau\\) as input, meaning this function actually knows all of the model\u0026rsquo;s future behavior as well as current ones; each term of this function \\(R\\) multiplies \\(\\gamma\\) a certain number of times to the instantaneous reward at that point.\u003c/p\u003e\n\u003cp\u003eThis function, then, essentially adds up all the \u003cem\u003efuture reward taking the current action will eventually lead to\u003c/em\u003e\u0026mdash;\u0026ldquo;how much reward does choosing this path afford you\u0026rdquo;\u0026mdash;discounting rewards earned in the future with a certain factor \\(\\gamma\\) because those are subject to change based on your \u003cstrong\u003eagent\u003c/strong\u003e\u0026rsquo;s future decisions. Things that are more the future gets discounted harder, by \\(\\gamma^{n}\\).\u003c/p\u003e\n\u003cp\u003eThis expression for \\(R_{t}(\\tau)\\) is called the \u003cstrong\u003ecumulative discounted reward\u003c/strong\u003e, or \u0026ldquo;the reward\u0026rdquo; for short. When we refer to the reward in the rest of this write-up, this is probably the expression you are looking for.\u003c/p\u003e\n\u003ch2 id=\"policy-gradient-theorem\"\u003ePolicy Gradient Theorem\u003c/h2\u003e\n\u003cp\u003eThe policy gradient theorem is unfortunately not going to be very well motivated in the time that we have together. If you are curious, the proof, and some more discussion, can be found \u003ca href=\"https://lilianweng.github.io/posts/2018-04-08-policy-gradient/#proof-of-policy-gradient-theorem\"\u003ehere\u003c/a\u003e or in my notes \u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ehere\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eFor now, let\u0026rsquo;s just skip to the result\u0026hellip; The loss function objective \\(J\\) with which we can use to optimize a neural network, given a set of \u003cem\u003enon-connected\u003c/em\u003e \u003cstrong\u003ereward\u003c/strong\u003e signals and a policy to optimize \\(\\pi\\), is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n-\\sum_{t=0} \\log \\pi_{\\theta} (a_{t} | s_{t}) R_{t}(\\tau)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\theta\\) are the \u003cstrong\u003eweights\u003c/strong\u003e to \u003cstrong\u003epolicy\u003c/strong\u003e \\(\\pi\\), and the rest are usual symbols defined above.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s break it down.\u003c/p\u003e\n\u003cp\u003eThe rest of this is a summation over all time of the trajectory; meaning you have to first generate the entire trajectory \\(\\tau\\) first and then add this value per slice:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\pi_{\\theta}(a_{t}|s_{t})\\): this is the \u003cem\u003eprobability\u003c/em\u003e (often called \u0026ldquo;confidence\u0026rdquo;) of the model to take action \\(a_{t}\\) at state \\(s_{t}\\); for a discrete set of actions (i.e. choosing/classification), we already know how to do this: \u003ccode\u003etorch.argmax\u003c/code\u003e. The code example below/in class explores how to do this for a continuous sample.\u003c/li\u003e\n\u003cli\u003e\\(\\log \\pi_{t}(a_{t}|s_{t})\\): we want to take the log of this confidence score the model produced: bigger \u0026ldquo;confident\u0026rdquo; number, smaller magnitude log, smaller error\u003c/li\u003e\n\u003cli\u003e\\(R_{t}(\\tau)\\) the \u003cstrong\u003eCumulative Discounted Reward\u003c/strong\u003e from that timestamp on, as we discussed before\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe sharp-eyed among you may notice that this function is very similarly shaped as cross-entropy: except you swap out the ground truth \\(y\\) for the cumulative reward \\(R_{t}(\\tau)\\). Indeed that is the case! In fact, much of the similar motivations apply both functions.\u003c/p\u003e\n\u003ch2 id=\"reinforce\"\u003eREINFORCE\u003c/h2\u003e\n\u003cp\u003eLoss function in hand, its time to actual perform the actual optimization. There\u0026rsquo;s three main steps to actually perform the REINFORCE algorithm optimization:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003ePlay the game\u003c/strong\u003e: generating \u003cem\u003eentire \u003cstrong\u003eepisode\u003c/strong\u003e\u003c/em\u003e worth of \\(s_{t}, a_{t}, r_{t}\\) using the same exact unoptimized policy \\(\\pi\\), storing a full trajectory \\(\\tau\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eCalculate the reward\u003c/strong\u003e: calculate, using the discounting equation above, \\(R_{t}\\) from each \\(r_{t}\\). Remember that each \\(R_{t}\\) is a reward comprised of the current reward, plus \\(\\gamma\\) raised to a certain power to discount the future rewards \\(r_{t+n}\\).\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eReplay and backprop\u003c/strong\u003e: Compute the actual error above for each timeframe, backpropegating them all but \u003cem\u003edon\u0026rsquo;t change the weights\u003c/em\u003e (i.e. call \u003ccode\u003e.backward()\u003c/code\u003e but not \u003ccode\u003e.step()\u003c/code\u003e)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eChange the weights all at once\u003c/strong\u003e: call \u003ccode\u003e.step()\u003c/code\u003e and make our model better!\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAs we have no fixed-length data, there are no \u003cstrong\u003eepochs\u003c/strong\u003e to this setup.; we will instead specify a number of times we want to run the above steps\u0026mdash;meaning we have a number of \u003cstrong\u003eepisodes\u003c/strong\u003e you can tune while training the model.\u003c/p\u003e\n\u003ch2 id=\"next-steps\"\u003eNext Steps\u003c/h2\u003e\n\u003cp\u003eApart from the bunch of theory here, there still remain a lot of practical questions in how to make all this happen in PyTorch. We hope to discuss this together in class, and explore the wonderful set of tools Gym\u0026mdash;a RL state/reward calculation library\u0026mdash;can do for us!\u003c/p\u003e\n\u003cp\u003eTo get started on this discussion, here\u0026rsquo;s one implementation of the humanoid-standup task we can be working from: \u003ca href=\"https://github.com/Jemoka/demo_amlmod4rl/blob/master/main.py\"\u003ehttps://github.com/Jemoka/demo_amlmod4rl/blob/master/main.py\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaml_reinforce/","tags":["writing","aml"],"title":"AML: REINFORCE(ment learning)"},{"categories":null,"contents":"Welcome back! I think, over the last few days, we have been hyping up convolutional neural networks enough such that you are probably ready to dive right in. So\u0026hellip; Let\u0026rsquo;s, uh, motivate it first!\nWhy do we use a CNN? Let\u0026rsquo;s think of a toy problem to play with. Given a pattern made using two colours (let\u0026rsquo;s name them a and b, or perhaps black and white), let\u0026rsquo;s classify whether it is the \u0026ldquo;zebra\u0026rdquo; pattern\u0026quot; or the \u0026ldquo;checkerboard\u0026rdquo; pattern.\nZebra\u0026mdash;aligned stripes:\na b a b a b a b a b a b a b a b Checkerboard\u0026mdash;alternating stripes:\na b a b b a b a a b a b b a b a We are already familiar with one neural-network architecture: stacked linear layers, also known as deep neural networks. If we are trying to process these two input samples for a linear layer, what would we do?\n\u0026hellip;\nWell, we would take each of the figures, and flatten it into a long row. Then, feed it into a layer of \\(4 \\times 4 = 16\\) input neurons.\nWhat would that look like? Well; let \\(a=0, b=1\\):\nzebra_sample = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] chessboard_sample = [0,1,0,1,1,0,1,0,0,1,0,1,1,0,1,0] Without looking very closely, those two very different patterns seem to yield pretty similar input samples! A dense neural network need to fit very well to notice the numerical trick of checking if two \\(1\\) or two \\(0\\) are next. That\u0026rsquo;s not good\u0026hellip; A human can spot the difference in the original, 2D figure very obviously!\nENTER CNNs\nWhat is a CNN? The take-home-message from the previous simple example is that 2D structures loose information when they are flattened. So, CNNs\u0026mdash;ultimately\u0026mdash;offer a way to process this 2D structural information with a neural network without flattening immediately. Generally, a CNN takes the following structure:\n2D-input fed into the model convolutional layers process small sections of the 2D input, projecting each section another section on a larger, 2D hidden grid; think about this as upsampling in images a pooling layer takes sections of the larger 2D grid of neurons then process each section into one value (usually by taking their maximum or average); think about this as downsampling in images Repeat steps 2-3 a flatten layer takes the now processed 2D grid and flattens it in the usual manner into a 1D tensor process the now information-rich, flat hidden representation as usual with a dense neural-network I would pause and ponder the above outline a little bit; but, no worries if this does immediately make sense; hopefully, as the layers are introduced progressively, what they do on various inputs will start to make more sense.\nI promise we will get to the actual layers soon, but before then, we have some vocabulary terms to go over.\nVocab Time! kernel Everything in the CNN world rests upon the idea of a kernel. A kernel is a sub-sample of the input of a certain fixed size (you can choose the size). Take our original checkerboard input:\na b a b b a b a a b a b b a b a An example \\((2 \\times 2)\\) kernel on this input could be:\na b b a that is:\na b a b b a b a a b a b b a b a Tada! A \\((2 \\times 2)\\) kernel is simply a \\((2\\times 2)\\) sample of the input.\nconvol-* and stride Convolving, convolutional, convoluting\u0026hellip; What does that mean? For a kernel size (i.e. dimensions of the kernel) that\u0026rsquo;s smaller than the size of the entire 2D input\u0026mdash;which, if you want your CNN to perform better than a DNN, it has to be (challenge question: why?)\u0026mdash;you need to move it around the input to capture the entirety of the input sample.\nThat movement is called convolution\nHere\u0026rsquo;s a \\((2\\times 2)\\) kernel!\na b a b b a b a a b a b b a b a and\u0026hellip; here it is convolving to the right!\na b a b b a b a a b a b b a b a Look! A moving kernel. That\u0026rsquo;s convolution.\nNow, how much the kernel moves at each step is called the stride, or the stride size. For a 2D input sample, this is usually specified as a 2D tuple: \\((x,y)\\), with \\(x\\) representing how much the kernel moves per step in the \\(x\\) direction, and \\(y\\) representing how much the kernel moves per step in the \\(y\\) direction.\nfilter So far we are doing nothing with the kernel: we are just taking convolving sub-samples, and doing a grand total of nothing with the array of subsamples. Filters are responsible of doing the actual processing.\nEach time a kernel is sampled, it is sent through a weight-matrix (just like what is stuck between two linear layers) which is called a filter. The output of this matrix is then reassembled into a 2D array after the sample kernel from each convolution is passed through the same filter, ready for more processing!\nchannel Here\u0026rsquo;s a little-known secret about the world: humans see colors! The toy example conveniently ignored this important fact: each pixel was simply a number, which\u0026mdash;in the real world\u0026mdash;would represent only one hue (think shades of gray). That\u0026rsquo;s an alright assumption to make if we are only encoding checkerboards or zebras, but not great if we want to recognize anything complicated. How would we represent colors in our input?\nMultiple channels to the rescue!\nA \u0026ldquo;2D\u0026rdquo; sample actually contains three dimensions: (channel_size, height, width). Namely, each convolutional layer actually take multiple of those grids we discussed above as input, each representing the saturation of a specific color at each pixel. Those separate grids representing the same input are called channels.\nA conventional \u0026ldquo;image\u0026rdquo;, then, is actually three samples masquerading as one:\na grid of the concentrations of the red channel a grid of the concentrations of the green channel a grid of the concentrations of the blue channel Say, a sample image is square and has side-length \\(20\\). Can you guess the actual dimensions of one \u0026ldquo;sample\u0026rdquo; tensor?\n\u0026hellip;\n\\((3, 20,20)\\): three channels RGB, height, width.\nLet\u0026rsquo;s get convolving Throughout this workbook, we are never actually going to build a neural network. You already know how to do that! In this section, let\u0026rsquo;s go through each of the layers discussed above that a CNN consists of, and we will leave you with the task of putting them together in the workbook challenge. Don\u0026rsquo;t worry, we will be here to help you through that process.\nEither way, however, let\u0026rsquo;s get PyTorch going:\nimport torch import torch.nn as nn sampling images We went through all this talk about images, but we never actually dealt with one. So, before we can actually do anything with CNNs, let\u0026rsquo;s see how we can actually turn an image into the numbered pixel-grid we discussed in the toy example above.\nTo do this, we will use the PythonImageLibrary (PIL), whose currently implementation is cutely named \u0026ldquo;Pillow\u0026rdquo;. If the following line does not work because you are not running Colab, run pip install pillow on your machine and you will be off to the races.\nfrom PIL import Image Let\u0026rsquo;s open an example image!\nimg = Image.open(\u0026#34;./beesnees.png\u0026#34;) img \u0026lt;PIL.PngImagePlugin.PngImageFile image mode=RGBA size=938x1436 at 0x12B293FD0\u0026gt; Nice. We just opened a local image on my computer, of size \\(938 \\times 1436\\), named \u0026ldquo;beesnees.png\u0026rdquo;.\nAside: loading images\nWhat? You don\u0026rsquo;t just conveniently have a file named \u0026ldquo;beesnees\u0026rdquo; located on your Colab instance? Well\u0026hellip; Let\u0026rsquo;s load it.\nLocate on the left side of your Colab window the left sidebar, where the fourth icon down is the \u0026ldquo;file\u0026rdquo; folder icon. Tap on that, and\u0026mdash;in the \u0026ldquo;files\u0026rdquo; pane that opens\u0026mdash;tap on the first of the four icons, shaped like a page with an arrow, below the word \u0026ldquo;files\u0026rdquo;. Select your file, and you are off to the races.\nOh, and here\u0026rsquo;s beesnees.jpg\nAnyways, now that we have an image, what can we do with it? Well, for starters, we can ask numpy to make an array out of it.\nimport numpy as np arr = np.array(img) arr [[[ 10 10 8 255] [ 10 10 8 255] [ 10 10 8 255] ... [ 7 7 5 255] [ 7 7 5 255] [ 7 7 5 255]] ... [[ 3 3 3 255] [ 3 3 3 255] [ 3 3 3 255] ... [ 12 16 21 255] [ 12 16 21 255] [ 12 16 21 255]]] Interested in what shape it is?\narr.shape (1436, 938, 4) Hmmm\u0026hellip; We know that it is a \\((1436 \\times 938)\\) image, and apparently numpy encoded each of the pixels with all the channels instead of separating them into one channel per grids. That\u0026rsquo;s all fine and good, but why are there \\(4\\) channels for an RGB image?\nTurns out, png images by default are RGB*A*\u0026mdash;the last channel being transparency. Staring at the array above, we can see that every single pixel\u0026rsquo;s fourth channel is \\(255\\), meaning this image is not transparent anywhere.\nTransparency info is not super useful info for most models, so let\u0026rsquo;s slice it apart: leaving us with \\((1436 \\times 938 \\times 3)\\):\narr = arr[:,:,:-1] arr.shape (1436, 938, 3) Aside: woah! what is that syntax?\nIn the example code above, we used array slicing notation: arr[:,:,:-1]. This is an extension on Python list slicing only valid on Numpy arrays and PyTorch tensors. To come up with your own, here are the rules:\nseparate each dimension with a comma, from outer to inner on each dimension, slice the dimension normally with Python slice syntax, remembering that it has a fencepost problem on the end index: [startIndex]:[endIndex+1]; recall that negative numbers loop around (i.e. -1 means the end of the list) recall that, if you want to start from the beginning or end at the end of an array, you can leave that side blank: :5 means \u0026ldquo;give me the first 5 elements 0,1,2,3,4\u0026rdquo; if you want to keep the entirety of a dimension, type an colon and move on So, decoding what we just typed: arr[:,:,:-1]:\narr (the Numpy array we are slicing) [ (slice!) :, (keep all of the H dimension) :, (keep all of the W dimension) :-1 (keep everything in the channels dimension except until the last element, hence removing that) ] (end slice) One more change before moving on. The arr matrix right now has shape \\(H \\times W \\times C\\), where \\(C\\) is the number of channels. However, recall that PyTorch (more reasonably, in my opinion), expects \\(C \\times H \\times W\\): where channels are the first dimension to show that each channel is an independent grid of pixels representing that color.\nSo, we need to swap the inner and outer dimensions of our array to separate the three channels into grids of \\(H \\times W\\).\narr = arr.swapaxes(0,2) arr.shape (3, 938, 1436) Excellent. We have now swapped the axes of the image such that each channel is by itself. Although\u0026hellip; we also messed up the orientation of the image: it is now \\(938 \\times 1436\\) instead of \\(1436 \\times 938\\). Turns out, this does not matter much\u0026mdash;your machine learning model does not care about what orientation things are as long as they are consistent between the images and labels (challengeish question: why is that?).\nUsually, when we deal with image inputs, we end up with color values between \\(0\\) and \\(255\\). Yet, as you probably saw already, neural networks are exceptionally bad at dealing with large integers such as \\(255\\). As such, we will squish the input into a matrix of small numbers by just reporting pixel values in terms of \u0026ldquo;brightness\u0026rdquo;, also known as \u0026ldquo;percentage until 255\u0026rdquo;, which would nicely normalize the input\nbrightness_arr = arr/255 brightness_arr [[[0.03921569 0.03921569 0.03921569 ... 0.01176471 0.01176471 0.01176471] ... [0.02745098 0.02745098 0.02745098 ... 0.04705882 0.04705882 0.04705882]] [[0.03921569 0.03921569 0.03921569 ... 0.01176471 0.01176471 0.01176471] ... [0.02745098 0.02745098 0.02745098 ... 0.0627451 0.0627451 0.0627451 ]] [[0.03137255 0.03137255 0.03137255 ... 0.01176471 0.01176471 0.01176471] ... [0.01960784 0.01960784 0.01960784 ... 0.08235294 0.08235294 0.08235294]]] One last step before we go forward: arr in as numpy array, which can\u0026rsquo;t be fed through Torch-accelerated objects. To get it to do things in a neural network, we need it to be a tensor:\nimg_tsr = torch.tensor(brightness_arr).float() img_tsr tensor([[[0.0392, 0.0392, 0.0392, ..., 0.0118, 0.0118, 0.0118], ..., [0.0275, 0.0275, 0.0275, ..., 0.0471, 0.0471, 0.0471]], [[0.0392, 0.0392, 0.0392, ..., 0.0118, 0.0118, 0.0118], ..., [0.0275, 0.0275, 0.0275, ..., 0.0627, 0.0627, 0.0627]], [[0.0314, 0.0314, 0.0314, ..., 0.0118, 0.0118, 0.0118], ..., [0.0196, 0.0196, 0.0196, ..., 0.0824, 0.0824, 0.0824]]]) For kicks, let\u0026rsquo;s also put it in a batch of \\(1\\) (as, recall, layers take an array of samples for input, so we just need to create a batch containing one element). .unsqueeze(dim) does this on a tensor; it just surrounds the desired dimension with another set of \u0026ldquo;brackets\u0026rdquo; (i.e. add a dimension of \\(1\\)).\nimg_tsr = img_tsr.unsqueeze(0) img_tsr.shape torch.Size([1, 3, 938, 1436]) Machine learning now awaits us\u0026hellip;\nconvolutional layer Finally, it is convolutioning time. The main object we will be using will be Conv2d, the 2D convolutional layer. If you care about the behind-the-scenes math, here it is: basically, it is the bias per channel, plus the weight of that channel (filter) times each kernel of that channel, cross-correlated across convolutions.\nTo instantiate a convolutional layer, here are what you need to figure out:\ninput channels: 3 for RGB, if you are not convolving across images, or your image is sepia or B\u0026amp;W, etc., your mileage will vary output channels: the output dimension of your weight matrix, what your convolving kernels get projected to kernel size: the width of your kernel, it is usually square so one number suffices stride size: how much your kernel should move per convolution (i.e. stride), default is \\((1\\times 1)\\) So, here goes; I gave an arbitrary hidden size of \\(5\\) and a kernel size of \\(4 \\times 4\\); we will talk about recommended topologies later.\ntest_convolution_layer = nn.Conv2d(in_channels=3, out_channels=5, kernel_size=4, stride=(1,1)) test_convolution_layer Conv2d(3, 5, kernel_size=(4, 4), stride=(1, 1)) Now, passing our input through our single layer:\nnet = test_convolution_layer(img_tsr) net tensor([[[[-0.0296, -0.0288, -0.0296, ..., -0.0249, -0.0249, -0.0249], [-0.0296, -0.0288, -0.0300, ..., -0.0249, -0.0249, -0.0249], [-0.0290, -0.0282, -0.0293, ..., -0.0249, -0.0249, -0.0249], ..., [ 0.0713, 0.0713, 0.0713, ..., -0.0086, -0.0080, -0.0093], [ 0.0713, 0.0713, 0.0713, ..., -0.0089, -0.0087, -0.0102], [ 0.0713, 0.0713, 0.0713, ..., -0.0113, -0.0113, -0.0113]]]], grad_fn=\u0026lt;ConvolutionBackward0\u0026gt;) Now, let\u0026rsquo;s take a look at the shape of this nice output:\nnet.shape torch.Size([1, 5, 935, 1433]) Look! Each of our kernels got projected from the \\(3\\) input channels into the \\(5\\) output channels; as our convolutions has stride size \\((1 \\times 1)\\), our kernel moves across our image and the filter takes each kernel and spits out a vector of length \\(5\\) at each step, resulting in \\((935 \\times 1433)\\) such steps and hence an output of \\((5 \\times 935 \\times 1433)\\).\nThinking break: kernel sizes and steps!\nNow, recall that our input has size \\((938 \\times 1436)\\), and yet our output has \\((935 \\times 1433)\\) as the last two dimensions, meaning the kernel only took \\(935\\) steps in the first dimension and \\(1433\\) steps in the second. What happened? Our step size is \\(1\\), so shouldn\u0026rsquo;t the steps span across the whole image?\nMess with the stride size and kernel size and figure this out. Shout a compelling answer in class before moving on.\nUnderstanding this will be critical to demonstrate your intuition of CNNs, pleeeese don\u0026rsquo;t move on until you think you have a good answer.\nLet\u0026rsquo;s think about this in context of a larger network. Our convolutional layer just took our input image, and processed it such that it ended up with a more, for the lack of a better word, \u0026ldquo;nuanced\u0026rdquo; 2D representation of our image. Instead of RGB being our three channels, our five channels will, after good training, contain more complex information such as \u0026ldquo;edges\u0026rdquo; or \u0026ldquo;blocks\u0026rdquo; that downstream networks can process.\nmaxpool/avgpool layer In traditional convolutional networks, each kernel is processed with a filter, projecting its channels into some larger space with its weights. What if instead, we took each kernel stride and squished it down into a single number?\nThat process is called pooling; correctly sequenced pooling layers acts as \u0026ldquo;information extraction\u0026rdquo; layers. Think of it as layers that asks question to the tune of \u0026ldquo;is anything in this sub-area of the image very bright?\u0026rdquo; or \u0026ldquo;what is the average color of this sub-area of the image?\u0026rdquo;: giving us actually more actionable information about the area than the pixels themselves.\nThere are two common pooling algorithms:\nMaxPool: take a kernel, squish it into one vector with one number each channel representing the maximum of the kernel in that channel AvgPool: take a kernel, squish it into one vector with one number each channel representing the average of the kernel in that channel To instantiate such a pooling layer, you need to figure out:\nkernel size: the width of your squishification kernel stride size: how much your squishing kernel should move per convolution (i.e. stride), default is \\((1\\times 1)\\) So, instantiating a MaxPool layer:\ntest_pooling_layer = nn.MaxPool2d(kernel_size=4, stride=(1,1)) test_pooling_layer MaxPool2d(kernel_size=4, stride=(1, 1), padding=0, dilation=1, ceil_mode=False) Now, applying our layer:\nnet = test_pooling_layer(net) net tensor([[[[-0.0277, -0.0277, -0.0279, ..., -0.0249, -0.0249, -0.0249], [-0.0271, -0.0277, -0.0279, ..., -0.0249, -0.0249, -0.0249], [-0.0271, -0.0277, -0.0279, ..., -0.0249, -0.0249, -0.0249], ..., [ 0.0713, 0.0713, 0.0713, ..., -0.0064, -0.0041, -0.0041], [ 0.0713, 0.0713, 0.0713, ..., -0.0064, -0.0041, -0.0041], [ 0.0713, 0.0713, 0.0713, ..., -0.0083, -0.0064, -0.0061]]]], grad_fn=\u0026lt;MaxPool2DWithIndicesBackward0\u0026gt;) net.shape torch.Size([1, 5, 932, 1430]) Thinking break: kernel sizes and steps, again!\nA very similar question as before. Why is it that, while pooling the input kernels (i.e. squishing every kernel of \\(4 \\times 4 = 16\\) pixels into one value), our side length didn\u0026rsquo;t, say, get divided by \\(4\\)? Why it is that our image is still almost as large?\nI will leave you to infer the calling convention of nn.AvgPool2d.\nGreat, one more layer before we are off to the races.\nflatten This one is really simple. All the 2D convolution work we did before is fine and good, but eventually we need to fall back on dense neural-networks to do the work of, for example, classification. Eventually, we need to flatten these tensors.\nFortunately, PyTorch has a layer for that (it seems like when it doubt, this is usually true\u0026hellip;)\nflat = nn.Flatten() flat(net).shape torch.Size([1, 6663800]) Nice and flat, just the way we expect for a dense-neural-network.\nAside: woah! that\u0026rsquo;s awfully large!\nThe natural next layer to this would be something like\nnn.Dense(6663800, 256) which would result in us using a matrix to project this GIGANTIC! processed input into a comparatively tiny output dimension. The whole point, as we discussed, of CNNs is to prevent the need to flatten an image right up front into giant, hard-to-process input vectors. How is this output serving that need?\nWe will discuss strategies of projecting large samples downwards very shortly, but even if we didn\u0026rsquo;t, this large input vector is not at all the same thing as just flattening the raw input vector: it has been processed with many filters and a pooling layer already, which means that the information contained in it is probably much more readily accessible for a neural network.\nA Typical CNN It is always very hard to name what exact architecture will work for a problem. However, these guidelines can help you architect a good CNN:\nStart with convolutional layers that has a tiny kernel, but projects the input into a large amount of channels (i.e. hyper-dimensional filters); good candidates looks like \\(32\\) output channels, but with kernels of \\(2\\times 2\\). Think of these as the \u0026ldquo;edge detection\u0026rdquo;, \u0026ldquo;face detection\u0026rdquo;, etc. layers as Grant outlined in his video we saw at the beginning of the class. Gradually decrease the number of output channels (filters), but increase your kernel size; good candidates look like \\(2\\) output channels, but with kernels of \\(32 \\times 32\\). Think of these as the \u0026ldquo;structural\u0026rdquo; layers that detect large structures like \u0026ldquo;loops\u0026rdquo; or \u0026ldquo;shadows\u0026rdquo;, etc. Put a pooling layer (which algorithm is to taste of the problem) between 3-5 convolutional layers If you want to end up with a smaller sample, try taking larger strides. Always try to keep your kernel size larger then your stride size, or you will end up missing values in the data (thinking break: why?)\nChallenge + First Project Now that you have learned three new layers, its time to see them in action. Build a neural network to classify tiny images! Use at least one convolutional layer, one pooling layer, and the typical architecture of the network that we discussed last time. No need to do a full write-up, just the model, explanation and the associated colab is fine.\nData: https://drive.google.com/drive/folders/1U7RUybsCGpZCTYejnGGzSB_6HnQetmlj\n","html":"\u003cp\u003eWelcome back! I think, over the last few days, we have been hyping up convolutional neural networks enough such that you are probably ready to dive right in. So\u0026hellip; Let\u0026rsquo;s, uh, motivate it first!\u003c/p\u003e\n\u003ch2 id=\"why-do-we-use-a-cnn\"\u003eWhy do we use a CNN?\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s think of a toy problem to play with. Given a pattern made using two colours (let\u0026rsquo;s name them a and b, or perhaps black and white), let\u0026rsquo;s classify whether it is the \u0026ldquo;zebra\u0026rdquo; pattern\u0026quot; or the \u0026ldquo;checkerboard\u0026rdquo; pattern.\u003c/p\u003e\n\u003cp\u003eZebra\u0026mdash;aligned stripes:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eCheckerboard\u0026mdash;alternating stripes:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWe are already familiar with one neural-network architecture: stacked \u003cstrong\u003elinear\u003c/strong\u003e layers, also known as \u003cstrong\u003edeep neural networks\u003c/strong\u003e. If we are trying to process these two input samples for a linear layer, what would we do?\u003c/p\u003e\n\u003cp\u003e\u0026hellip;\u003c/p\u003e\n\u003cp\u003eWell, we would take each of the figures, and flatten it into a long row. Then, feed it into a layer of \\(4 \\times 4 = 16\\) input neurons.\u003c/p\u003e\n\u003cp\u003eWhat would that look like? Well; let \\(a=0, b=1\\):\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ezebra_sample\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echessboard_sample\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWithout looking very closely, those two \u003cem\u003every\u003c/em\u003e different patterns seem to yield pretty similar input samples! A \u003cstrong\u003edense neural network\u003c/strong\u003e need to fit very well to notice the numerical trick of checking if two \\(1\\) or two \\(0\\) are next. That\u0026rsquo;s not good\u0026hellip; A human can spot the difference in the original, 2D figure very obviously!\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eENTER CNNs\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"what-is-a-cnn\"\u003eWhat is a CNN?\u003c/h2\u003e\n\u003cp\u003eThe take-home-message from the previous simple example is that \u003cem\u003e2D structures loose information when they are flattened\u003c/em\u003e. So, \u003cstrong\u003eCNNs\u003c/strong\u003e\u0026mdash;ultimately\u0026mdash;offer a way to process this 2D structural information with a \u003cstrong\u003eneural network\u003c/strong\u003e without flattening immediately. Generally, a \u003cstrong\u003eCNN\u003c/strong\u003e takes the following structure:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e2D-input fed into the model\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003econvolutional layers\u003c/strong\u003e process small sections of the 2D input, projecting each section another section on a larger, 2D hidden grid; think about this as \u003cstrong\u003eupsampling\u003c/strong\u003e in images\u003c/li\u003e\n\u003cli\u003ea \u003cstrong\u003epooling layer\u003c/strong\u003e takes sections of the larger 2D grid of neurons then process each section into one value (usually by taking their maximum or average); think about this as \u003cstrong\u003edownsampling\u003c/strong\u003e in images\u003c/li\u003e\n\u003cli\u003eRepeat steps 2-3\u003c/li\u003e\n\u003cli\u003ea \u003cstrong\u003eflatten\u003c/strong\u003e layer takes the now \u003cem\u003eprocessed\u003c/em\u003e 2D grid and flattens it in the usual manner into a 1D tensor\u003c/li\u003e\n\u003cli\u003eprocess the now information-rich, flat hidden representation as usual with a \u003cstrong\u003edense neural-network\u003c/strong\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eI would pause and ponder the above outline a little bit; but, no worries if this does immediately make sense; hopefully, as the layers are introduced progressively, what they do on various inputs will start to make more sense.\u003c/p\u003e\n\u003cp\u003eI promise we will get to the actual layers soon, but before then, we have some vocabulary terms to go over.\u003c/p\u003e\n\u003ch2 id=\"vocab-time\"\u003eVocab Time!\u003c/h2\u003e\n\u003ch3 id=\"kernel\"\u003ekernel\u003c/h3\u003e\n\u003cp\u003eEverything in the \u003cstrong\u003eCNN\u003c/strong\u003e world rests upon the idea of a \u003cstrong\u003ekernel\u003c/strong\u003e. A \u003cstrong\u003ekernel\u003c/strong\u003e is a sub-sample of the input of a certain fixed size (you can choose the size). Take our original checkerboard input:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eAn example \\((2 \\times 2)\\) \u003cstrong\u003ekernel\u003c/strong\u003e on this input could be:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003ethat is:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003e\u003cstrong\u003ea\u003c/strong\u003e\u003c/td\u003e\n\u003ctd\u003e\u003cstrong\u003eb\u003c/strong\u003e\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003e\u003cstrong\u003eb\u003c/strong\u003e\u003c/td\u003e\n\u003ctd\u003e\u003cstrong\u003ea\u003c/strong\u003e\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eTada! A \\((2 \\times 2)\\) \u003cstrong\u003ekernel\u003c/strong\u003e is simply a \\((2\\times 2)\\) sample of the input.\u003c/p\u003e\n\u003ch3 id=\"convol-and-stride\"\u003econvol-* and stride\u003c/h3\u003e\n\u003cp\u003eConvolving, convolutional, convoluting\u0026hellip; What does that mean? For a \u003cstrong\u003ekernel size\u003c/strong\u003e (i.e. dimensions of the \u003cstrong\u003ekernel\u003c/strong\u003e) that\u0026rsquo;s smaller than the size of the entire 2D input\u0026mdash;which, if you want your \u003cstrong\u003eCNN\u003c/strong\u003e to perform better than a \u003cstrong\u003eDNN\u003c/strong\u003e, it has to be (challenge question: why?)\u0026mdash;you need to move it around the input to capture the entirety of the input sample.\u003c/p\u003e\n\u003cp\u003eThat movement is called \u003cstrong\u003econvolution\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eHere\u0026rsquo;s a \\((2\\times 2)\\) \u003cstrong\u003ekernel\u003c/strong\u003e!\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003cstrong\u003ea\u003c/strong\u003e\u003c/th\u003e\n\u003cth\u003e\u003cstrong\u003eb\u003c/strong\u003e\u003c/th\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cstrong\u003eb\u003c/strong\u003e\u003c/td\u003e\n\u003ctd\u003e\u003cstrong\u003ea\u003c/strong\u003e\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eand\u0026hellip; here it is \u003cstrong\u003econvolving\u003c/strong\u003e to the right!\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003e\u003cstrong\u003eb\u003c/strong\u003e\u003c/th\u003e\n\u003cth\u003e\u003cstrong\u003ea\u003c/strong\u003e\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003e\u003cstrong\u003ea\u003c/strong\u003e\u003c/td\u003e\n\u003ctd\u003e\u003cstrong\u003eb\u003c/strong\u003e\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eLook! A moving \u003cstrong\u003ekernel\u003c/strong\u003e. That\u0026rsquo;s \u003cstrong\u003econvolution\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eNow, how \u003cem\u003emuch\u003c/em\u003e the \u003cstrong\u003ekernel\u003c/strong\u003e moves at each step is called the \u003cstrong\u003estride\u003c/strong\u003e, or the \u003cstrong\u003estride size\u003c/strong\u003e. For a 2D input sample, this is usually specified as a 2D tuple: \\((x,y)\\), with \\(x\\) representing how much the kernel moves per step in the \\(x\\) direction, and \\(y\\) representing how much the kernel moves per step in the \\(y\\) direction.\u003c/p\u003e\n\u003ch3 id=\"filter\"\u003efilter\u003c/h3\u003e\n\u003cp\u003eSo far we are doing nothing with the \u003cstrong\u003ekernel\u003c/strong\u003e: we are just taking \u003cstrong\u003econvolving\u003c/strong\u003e sub-samples, and doing a grand total of nothing with the array of subsamples. \u003cstrong\u003eFilters\u003c/strong\u003e are responsible of doing the actual processing.\u003c/p\u003e\n\u003cp\u003eEach time a \u003cstrong\u003ekernel\u003c/strong\u003e is sampled, it is sent through a weight-matrix (just like what is stuck between two \u003cstrong\u003elinear\u003c/strong\u003e layers) which is called a \u003cstrong\u003efilter\u003c/strong\u003e. The output of this matrix is then reassembled into a 2D array after the sample kernel from each \u003cstrong\u003econvolution\u003c/strong\u003e is passed through the same filter, ready for more processing!\u003c/p\u003e\n\u003ch3 id=\"channel\"\u003echannel\u003c/h3\u003e\n\u003cp\u003eHere\u0026rsquo;s a little-known secret about the world: humans see colors! The toy example conveniently ignored this important fact: each pixel was simply a number, which\u0026mdash;in the real world\u0026mdash;would represent only one hue (think shades of gray). That\u0026rsquo;s an alright assumption to make if we are only encoding checkerboards or zebras, but not great if we want to recognize anything complicated. How would we represent colors in our input?\u003c/p\u003e\n\u003cp\u003e\u003cem\u003eMultiple \u003cstrong\u003echannels\u003c/strong\u003e to the rescue!\u003c/em\u003e\u003c/p\u003e\n\u003cp\u003eA \u0026ldquo;2D\u0026rdquo; sample actually contains three dimensions: \u003ccode\u003e(channel_size, height, width)\u003c/code\u003e. Namely, each \u003cstrong\u003econvolutional\u003c/strong\u003e layer actually take multiple of those grids we discussed above as input, each representing the saturation of a specific color at each pixel. Those separate grids representing the same input are called \u003cstrong\u003echannels\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eA conventional \u0026ldquo;image\u0026rdquo;, then, is actually three samples masquerading as one:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ea grid of the concentrations of the red \u003cstrong\u003echannel\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003ea grid of the concentrations of the green \u003cstrong\u003echannel\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003ea grid of the concentrations of the blue \u003cstrong\u003echannel\u003c/strong\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSay, a sample image is square and has side-length \\(20\\). Can you guess the actual dimensions of one \u0026ldquo;sample\u0026rdquo; tensor?\u003c/p\u003e\n\u003cp\u003e\u0026hellip;\u003c/p\u003e\n\u003cp\u003e\\((3, 20,20)\\): three channels RGB, height, width.\u003c/p\u003e\n\u003ch2 id=\"let-s-get-convolving\"\u003eLet\u0026rsquo;s get convolving\u003c/h2\u003e\n\u003cp\u003eThroughout this workbook, we are never actually going to build a neural network. You already know how to do that! In this section, let\u0026rsquo;s go through each of the \u003cem\u003elayers\u003c/em\u003e discussed above that a CNN consists of, and we will leave you with the task of putting them together in the workbook challenge. Don\u0026rsquo;t worry, we will be here to help you through that process.\u003c/p\u003e\n\u003cp\u003eEither way, however, let\u0026rsquo;s get PyTorch going:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch.nn\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"sampling-images\"\u003esampling images\u003c/h3\u003e\n\u003cp\u003eWe went through all this talk about images, but we never actually dealt with one. So, before we can actually do anything with CNNs, let\u0026rsquo;s see how we can actually turn an image into the numbered pixel-grid we discussed in the toy example above.\u003c/p\u003e\n\u003cp\u003eTo do this, we will use the PythonImageLibrary (PIL), whose currently implementation is cutely named \u0026ldquo;Pillow\u0026rdquo;. If the following line does not work because you are not running Colab, run \u003ccode\u003epip install pillow\u003c/code\u003e on your machine and you will be off to the races.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ePIL\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eImage\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s open an example image!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eimg\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eImage\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eopen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;./beesnees.png\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eimg\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u0026lt;PIL.PngImagePlugin.PngImageFile image mode=RGBA size=938x1436 at 0x12B293FD0\u0026gt;\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNice. We just opened a local image on my computer, of size \\(938 \\times 1436\\), named \u0026ldquo;beesnees.png\u0026rdquo;.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside: \u003cstrong\u003eloading images\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eWhat? You don\u0026rsquo;t just conveniently have a file named \u0026ldquo;beesnees\u0026rdquo; located on your Colab instance? Well\u0026hellip; Let\u0026rsquo;s load it.\u003c/p\u003e\n\u003cp\u003eLocate on the left side of your Colab window the left sidebar, where the fourth icon down is the \u0026ldquo;file\u0026rdquo; folder icon. Tap on that, and\u0026mdash;in the \u0026ldquo;files\u0026rdquo; pane that opens\u0026mdash;tap on the first of the four icons, shaped like a page with an arrow, below the word \u0026ldquo;files\u0026rdquo;. Select your file, and you are off to the races.\u003c/p\u003e\n\u003cp\u003eOh, and here\u0026rsquo;s \u003ca href=\"https://haha.business/business.jpg\"\u003ebeesnees.jpg\u003c/a\u003e\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAnyways, now that we have an image, what can we do with it? Well, for starters, we can ask \u003ccode\u003enumpy\u003c/code\u003e to make an array out of it.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enumpy\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003earray\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eimg\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[[[ 10 10 8 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 10 10 8 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 10 10 8 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 7 7 5 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 7 7 5 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 7 7 5 255]]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [[ 3 3 3 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 3 3 3 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 3 3 3 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 12 16 21 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 12 16 21 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 12 16 21 255]]]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eInterested in what shape it is?\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshape\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(1436, 938, 4)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eHmmm\u0026hellip; We know that it is a \\((1436 \\times 938)\\) image, and apparently \u003ccode\u003enumpy\u003c/code\u003e encoded each of the pixels with all the channels instead of separating them into one channel per grids. That\u0026rsquo;s all fine and good, but why are there \\(4\\) channels for an RGB image?\u003c/p\u003e\n\u003cp\u003eTurns out, \u003ccode\u003epng\u003c/code\u003e images by default are RGB*A*\u0026mdash;the last channel being transparency. Staring at the array above, we can see that every single pixel\u0026rsquo;s fourth channel is \\(255\\), meaning this image is not transparent anywhere.\u003c/p\u003e\n\u003cp\u003eTransparency info is not super useful info for most models, so let\u0026rsquo;s slice it apart: leaving us with \\((1436 \\times 938 \\times 3)\\):\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:,:,:\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshape\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(1436, 938, 3)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003chr\u003e\n\u003cp\u003eAside: \u003cstrong\u003ewoah! what is that syntax?\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eIn the example code above, we used \u003cstrong\u003earray slicing notation\u003c/strong\u003e: \u003ccode\u003earr[:,:,:-1]\u003c/code\u003e. This is an extension on Python list slicing only valid on Numpy arrays and PyTorch tensors. To come up with your own, here are the rules:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eseparate each dimension with a comma, from outer to inner\u003c/li\u003e\n\u003cli\u003eon each dimension, slice the dimension normally with Python slice syntax, remembering that it has a fencepost problem on the end index: \u003ccode\u003e[startIndex]:[endIndex+1]\u003c/code\u003e; recall that negative numbers loop around (i.e. \u003ccode\u003e-1\u003c/code\u003e means the end of the list)\u003c/li\u003e\n\u003cli\u003erecall that, if you want to start from the beginning or end at the end of an array, you can leave that side blank: \u003ccode\u003e:5\u003c/code\u003e means \u0026ldquo;give me the first 5 elements 0,1,2,3,4\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eif you want to keep the entirety of a dimension, type an colon and move on\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSo, decoding what we just typed: \u003ccode\u003earr[:,:,:-1]\u003c/code\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003earr\u003c/code\u003e (the Numpy array we are slicing)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e[\u003c/code\u003e (slice!)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e:,\u003c/code\u003e (keep all of the H dimension)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e:,\u003c/code\u003e (keep all of the W dimension)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e:-1\u003c/code\u003e (keep everything in the channels dimension except until the last element, hence removing that)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e]\u003c/code\u003e (end slice)\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eOne more change before moving on. The \u003ccode\u003earr\u003c/code\u003e matrix right now has shape \\(H \\times W \\times C\\), where \\(C\\) is the number of channels. However, recall that PyTorch (more reasonably, in my opinion), expects \\(C \\times H \\times W\\): where channels are the first dimension to show that each channel is an independent grid of pixels representing that color.\u003c/p\u003e\n\u003cp\u003eSo, we need to swap the inner and outer dimensions of our array to separate the three \u003cstrong\u003echannels\u003c/strong\u003e into grids of \\(H \\times W\\).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eswapaxes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshape\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(3, 938, 1436)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. We have now swapped the axes of the image such that each \u003cstrong\u003echannel\u003c/strong\u003e is by itself. Although\u0026hellip; we also messed up the orientation of the image: it is now \\(938 \\times 1436\\) instead of \\(1436 \\times 938\\). Turns out, this does not matter much\u0026mdash;your machine learning model does not care about what orientation things are \u003cem\u003eas long as they are consistent between the images and labels\u003c/em\u003e (challengeish question: why is that?).\u003c/p\u003e\n\u003cp\u003eUsually, when we deal with image inputs, we end up with color values between \\(0\\) and \\(255\\). Yet, as you probably saw already, neural networks are exceptionally bad at dealing with large integers such as \\(255\\). As such, we will squish the input into a matrix of small numbers by just reporting pixel values in terms of \u0026ldquo;brightness\u0026rdquo;, also known as \u0026ldquo;percentage until 255\u0026rdquo;, which would nicely normalize the input\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebrightness_arr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e255\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebrightness_arr\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[[[0.03921569 0.03921569 0.03921569 ... 0.01176471 0.01176471 0.01176471]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0.02745098 0.02745098 0.02745098 ... 0.04705882 0.04705882 0.04705882]]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [[0.03921569 0.03921569 0.03921569 ... 0.01176471 0.01176471 0.01176471]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0.02745098 0.02745098 0.02745098 ... 0.0627451 0.0627451 0.0627451 ]]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [[0.03137255 0.03137255 0.03137255 ... 0.01176471 0.01176471 0.01176471]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0.01960784 0.01960784 0.01960784 ... 0.08235294 0.08235294 0.08235294]]]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eOne last step before we go forward: \u003ccode\u003earr\u003c/code\u003e in as numpy array, which can\u0026rsquo;t be fed through Torch-accelerated objects. To get it to do things in a neural network, we need it to be a \u003cstrong\u003etensor\u003c/strong\u003e:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eimg_tsr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebrightness_arr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eimg_tsr\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([[[0.0392, 0.0392, 0.0392, ..., 0.0118, 0.0118, 0.0118],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0.0275, 0.0275, 0.0275, ..., 0.0471, 0.0471, 0.0471]],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [[0.0392, 0.0392, 0.0392, ..., 0.0118, 0.0118, 0.0118],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0.0275, 0.0275, 0.0275, ..., 0.0627, 0.0627, 0.0627]],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [[0.0314, 0.0314, 0.0314, ..., 0.0118, 0.0118, 0.0118],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0.0196, 0.0196, 0.0196, ..., 0.0824, 0.0824, 0.0824]]])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFor kicks, let\u0026rsquo;s also put it in a batch of \\(1\\) (as, recall, layers take an \u003cem\u003earray\u003c/em\u003e of samples for input, so we just need to create a batch containing one element). \u003ccode\u003e.unsqueeze(dim)\u003c/code\u003e does this on a tensor; it just surrounds the desired dimension with another set of \u0026ldquo;brackets\u0026rdquo; (i.e. add a dimension of \\(1\\)).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eimg_tsr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eimg_tsr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eunsqueeze\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eimg_tsr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshape\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etorch.Size([1, 3, 938, 1436])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eMachine learning now awaits us\u0026hellip;\u003c/p\u003e\n\u003ch3 id=\"convolutional-layer\"\u003econvolutional layer\u003c/h3\u003e\n\u003cp\u003eFinally, it is convolutioning time. The main object we will be using will be \u003ccode\u003eConv2d\u003c/code\u003e, the 2D convolutional layer. If you care about the behind-the-scenes math, \u003ca href=\"https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html\"\u003ehere it is\u003c/a\u003e: basically, it is the bias per channel, plus the weight of that channel (\u003cstrong\u003efilter\u003c/strong\u003e) times each \u003cstrong\u003ekernel\u003c/strong\u003e of that channel, cross-correlated across \u003cstrong\u003econvolutions\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eTo instantiate a convolutional layer, here are what you need to figure out:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003einput channels: \u003cem\u003e3\u003c/em\u003e for RGB, if you are not convolving across images, or your image is sepia or B\u0026amp;W, etc., your mileage will vary\u003c/li\u003e\n\u003cli\u003eoutput channels: the output dimension of your weight matrix, what your \u003cstrong\u003econvolving\u003c/strong\u003e \u003cstrong\u003ekernels\u003c/strong\u003e get projected to\u003c/li\u003e\n\u003cli\u003ekernel size: the width of your \u003cstrong\u003ekernel\u003c/strong\u003e, it is usually square so one number suffices\u003c/li\u003e\n\u003cli\u003estride size: how much your \u003cstrong\u003ekernel\u003c/strong\u003e should move per \u003cstrong\u003econvolution\u003c/strong\u003e (i.e. \u003cstrong\u003estride\u003c/strong\u003e), default is \\((1\\times 1)\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSo, here goes; I gave an arbitrary hidden size of \\(5\\) and a kernel size of \\(4 \\times 4\\); we will talk about recommended topologies later.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etest_convolution_layer\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eConv2d\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ein_channels\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eout_channels\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ekernel_size\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003estride\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etest_convolution_layer\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eConv2d(3, 5, kernel_size=(4, 4), stride=(1, 1))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, passing our input through our single layer:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enet\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etest_convolution_layer\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eimg_tsr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enet\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([[[[-0.0296, -0.0288, -0.0296, ..., -0.0249, -0.0249, -0.0249],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [-0.0296, -0.0288, -0.0300, ..., -0.0249, -0.0249, -0.0249],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [-0.0290, -0.0282, -0.0293, ..., -0.0249, -0.0249, -0.0249],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 0.0713, 0.0713, 0.0713, ..., -0.0086, -0.0080, -0.0093],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 0.0713, 0.0713, 0.0713, ..., -0.0089, -0.0087, -0.0102],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 0.0713, 0.0713, 0.0713, ..., -0.0113, -0.0113, -0.0113]]]],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e grad_fn=\u0026lt;ConvolutionBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, let\u0026rsquo;s take a look at the shape of this nice output:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enet\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshape\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etorch.Size([1, 5, 935, 1433])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLook! Each of our kernels got projected from the \\(3\\) input \u003cstrong\u003echannels\u003c/strong\u003e into the \\(5\\) output \u003cstrong\u003echannels\u003c/strong\u003e; as our \u003cstrong\u003econvolutions\u003c/strong\u003e has \u003cstrong\u003estride size\u003c/strong\u003e \\((1 \\times 1)\\), our \u003cstrong\u003ekernel\u003c/strong\u003e moves across our image and the \u003cstrong\u003efilter\u003c/strong\u003e takes each kernel and spits out a vector of length \\(5\\) at each step, resulting in \\((935 \\times 1433)\\) such steps and hence an output of \\((5 \\times 935 \\times 1433)\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eThinking break: \u003cstrong\u003ekernel sizes and steps!\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eNow, recall that our input has size \\((938 \\times 1436)\\), and yet our output has \\((935 \\times 1433)\\) as the last two dimensions, meaning the kernel only took \\(935\\) steps in the first dimension and \\(1433\\) steps in the second. What happened? Our step size is \\(1\\), so shouldn\u0026rsquo;t the steps span across the whole image?\u003c/p\u003e\n\u003cp\u003eMess with the stride size and kernel size and figure this out. Shout a compelling answer in class before moving on.\u003c/p\u003e\n\u003cp\u003eUnderstanding this will be \u003cem\u003ecritical\u003c/em\u003e to demonstrate your intuition of CNNs, \u003cem\u003epleeeese don\u0026rsquo;t move on until you think you have a good answer\u003c/em\u003e.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eLet\u0026rsquo;s think about this in context of a larger network. Our convolutional layer just took our input image, and processed it such that it ended up with a more, for the lack of a better word, \u0026ldquo;nuanced\u0026rdquo; 2D representation of our image. Instead of RGB being our three \u003cstrong\u003echannels\u003c/strong\u003e, our five \u003cstrong\u003echannels\u003c/strong\u003e will, after good training, contain more complex information such as \u0026ldquo;edges\u0026rdquo; or \u0026ldquo;blocks\u0026rdquo; that downstream networks can process.\u003c/p\u003e\n\u003ch3 id=\"maxpool-avgpool-layer\"\u003emaxpool/avgpool layer\u003c/h3\u003e\n\u003cp\u003eIn traditional convolutional networks, each \u003cstrong\u003ekernel\u003c/strong\u003e is processed with a \u003cstrong\u003efilter\u003c/strong\u003e, projecting its channels into some larger space with its weights. What if instead, we took each \u003cstrong\u003ekernel\u003c/strong\u003e stride and squished it down into a single \u003cstrong\u003enumber\u003c/strong\u003e?\u003c/p\u003e\n\u003cp\u003eThat process is called \u003cstrong\u003epooling\u003c/strong\u003e; correctly sequenced pooling layers acts as \u0026ldquo;information extraction\u0026rdquo; layers. Think of it as layers that asks question to the tune of \u0026ldquo;is anything in this sub-area of the image very bright?\u0026rdquo; or \u0026ldquo;what is the average color of this sub-area of the image?\u0026rdquo;: giving us actually \u003cem\u003emore\u003c/em\u003e actionable information about the area than the pixels themselves.\u003c/p\u003e\n\u003cp\u003eThere are two common pooling algorithms:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eMaxPool: take a \u003cstrong\u003ekernel\u003c/strong\u003e, squish it into one vector with one number each channel representing the maximum of the kernel in that channel\u003c/li\u003e\n\u003cli\u003eAvgPool: take a \u003cstrong\u003ekernel\u003c/strong\u003e, squish it into one vector with one number each channel representing the average of the kernel in that channel\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTo instantiate such a pooling layer, you need to figure out:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ekernel size: the width of your squishification \u003cstrong\u003ekernel\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003estride size: how much your squishing \u003cstrong\u003ekernel\u003c/strong\u003e should move per \u003cstrong\u003econvolution\u003c/strong\u003e (i.e. \u003cstrong\u003estride\u003c/strong\u003e), default is \\((1\\times 1)\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSo, instantiating a MaxPool layer:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etest_pooling_layer\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMaxPool2d\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ekernel_size\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003estride\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etest_pooling_layer\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMaxPool2d(kernel_size=4, stride=(1, 1), padding=0, dilation=1, ceil_mode=False)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, applying our layer:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enet\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etest_pooling_layer\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enet\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enet\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([[[[-0.0277, -0.0277, -0.0279, ..., -0.0249, -0.0249, -0.0249],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [-0.0271, -0.0277, -0.0279, ..., -0.0249, -0.0249, -0.0249],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [-0.0271, -0.0277, -0.0279, ..., -0.0249, -0.0249, -0.0249],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 0.0713, 0.0713, 0.0713, ..., -0.0064, -0.0041, -0.0041],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 0.0713, 0.0713, 0.0713, ..., -0.0064, -0.0041, -0.0041],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 0.0713, 0.0713, 0.0713, ..., -0.0083, -0.0064, -0.0061]]]],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e grad_fn=\u0026lt;MaxPool2DWithIndicesBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enet\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshape\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etorch.Size([1, 5, 932, 1430])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003chr\u003e\n\u003cp\u003eThinking break: \u003cstrong\u003ekernel sizes and steps, again!\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eA very similar question as before. Why is it that, while pooling the input kernels (i.e. squishing every kernel of \\(4 \\times 4 = 16\\) pixels into one value), our side length didn\u0026rsquo;t, say, get divided by \\(4\\)? Why it is that our image is still almost as large?\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eI will leave you to infer the calling convention of \u003ccode\u003enn.AvgPool2d\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eGreat, one more layer before we are off to the races.\u003c/p\u003e\n\u003ch3 id=\"flatten\"\u003eflatten\u003c/h3\u003e\n\u003cp\u003eThis one is \u003cem\u003ereally\u003c/em\u003e simple. All the 2D convolution work we did before is fine and good, but eventually we need to fall back on dense neural-networks to do the work of, for example, classification. \u003cem\u003eEventually\u003c/em\u003e, we need to flatten these tensors.\u003c/p\u003e\n\u003cp\u003eFortunately, PyTorch has a layer for that (it seems like when it doubt, this is usually true\u0026hellip;)\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eflat\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFlatten\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eflat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enet\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshape\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etorch.Size([1, 6663800])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNice and flat, just the way we expect for a \u003cstrong\u003edense-neural-network\u003c/strong\u003e.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside: \u003cstrong\u003ewoah! that\u0026rsquo;s awfully large!\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eThe natural next layer to this would be something like\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDense\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e6663800\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e256\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ewhich would result in us using a matrix to project this \u003cstrong\u003eGIGANTIC!\u003c/strong\u003e processed input into a comparatively \u003cem\u003etiny\u003c/em\u003e output dimension. The whole point, as we discussed, of CNNs is to prevent the need to flatten an image right up front into giant, hard-to-process input vectors. How is this output serving that need?\u003c/p\u003e\n\u003cp\u003eWe will discuss strategies of projecting large samples downwards very shortly, but even if we didn\u0026rsquo;t, this large input vector is not at all the same thing as just flattening the raw input vector: it has been processed with many filters and a pooling layer already, which means that the information contained in it is probably much more readily accessible for a neural network.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"a-typical-cnn\"\u003eA Typical CNN\u003c/h2\u003e\n\u003cp\u003eIt is always very hard to name what exact architecture will work for a problem. However, these guidelines can help you architect a good CNN:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eStart with convolutional layers that has a \u003cem\u003etiny\u003c/em\u003e \u003cstrong\u003ekernel\u003c/strong\u003e, but projects the input into a \u003cem\u003elarge\u003c/em\u003e amount of channels (i.e. hyper-dimensional \u003cstrong\u003efilters\u003c/strong\u003e); good candidates looks like \\(32\\) output channels, but with kernels of \\(2\\times 2\\). Think of these as the \u0026ldquo;edge detection\u0026rdquo;, \u0026ldquo;face detection\u0026rdquo;, etc. layers as Grant outlined in his video we saw at the beginning of the class.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eGradually\u003c/strong\u003e \u003cem\u003edecrease\u003c/em\u003e the number of output channels (\u003cstrong\u003efilters\u003c/strong\u003e), but \u003cem\u003eincrease\u003c/em\u003e your \u003cstrong\u003ekernel\u003c/strong\u003e size; good candidates look like \\(2\\) output channels, but with kernels of \\(32 \\times 32\\). Think of these as the \u0026ldquo;structural\u0026rdquo; layers that detect large structures like \u0026ldquo;loops\u0026rdquo; or \u0026ldquo;shadows\u0026rdquo;, etc.\u003c/li\u003e\n\u003cli\u003ePut a pooling layer (which algorithm is to taste of the problem) between 3-5 convolutional layers\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eIf you want to end up with a smaller sample, try taking larger \u003cstrong\u003estrides\u003c/strong\u003e. Always try to keep your \u003cstrong\u003ekernel size\u003c/strong\u003e larger then your \u003cstrong\u003estride size\u003c/strong\u003e, or you will end up missing values in the data (thinking break: why?)\u003c/p\u003e\n\u003ch2 id=\"challenge-plus-first-project\"\u003eChallenge + First Project\u003c/h2\u003e\n\u003cp\u003eNow that you have learned three new layers, its time to see them in action. Build a neural network to \u003ca href=\"https://drive.google.com/drive/folders/1U7RUybsCGpZCTYejnGGzSB_6HnQetmlj?usp=sharing\"\u003eclassify tiny images\u003c/a\u003e! Use at least one \u003cstrong\u003econvolutional\u003c/strong\u003e layer, one \u003cstrong\u003epooling\u003c/strong\u003e layer, and the typical architecture of the network that we discussed last time. No need to do a full write-up, just the model, explanation and the associated colab is fine.\u003c/p\u003e\n\u003cp\u003eData: \u003ca href=\"https://drive.google.com/drive/folders/1U7RUybsCGpZCTYejnGGzSB_6HnQetmlj\"\u003ehttps://drive.google.com/drive/folders/1U7RUybsCGpZCTYejnGGzSB_6HnQetmlj\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaml_time_to_convolve/","tags":["writing","aml"],"title":"AML: Time to Convolve"},{"categories":null,"contents":"Hello y\u0026rsquo;all! This quick post about\u0026hellip; writing your first \u0026ldquo;article\u0026rdquo; (ahem, MA) for this class. To me, the most rewarding part of our journey together is to be able to support everyone through writing very presentable reports\u0026mdash;even if it is on old or simple problems\u0026mdash;but in the format from which you can easily jump off and write a fully-blown scientific article in the future.\nexemplar we discussed (Kameswari, Sravani, and Mamidi 2020)\nI should add that, despite the word being used, this is by no means the only way that you can write a wonderful scientific article. It just had the right structure for us to go over in class :)\noverall goals We want to make reports that are clear (easily understandable for audience), concise (only uses words when needed, prioritizing figures and intuition), and precise (when making claims, they are supported clearly with data). And so, the following sections focus mostly on improving those three criteria.\ndiscussion per section Let\u0026rsquo;s now switch to bullet-point format, going over some ideas of how to make strong sections:\nabstract Guiding Questions: What is your study about? What did you do? How well did it work?\nmore jargon here is OK, but should be immediately summative of your whole study only use enough jargon to make it concise: this section should be crystal clear for everyone of your academic peers (i.e. anyone in our class/familiar with ML) it should be easily skimmable intro/motivation/background Guiding Questions: Why are you making big picture choices you made? Did anyone do it before you? How did they do it? What changes are you making (for this class, \u0026ldquo;none\u0026rdquo; is acceptable)?\nimmediately, you should state what you are trying to do and why its worthwhile of your reader\u0026rsquo;s time keep the goal scope small: not \u0026ldquo;cancer is an sickness that affects a lot of people\u0026rdquo;, but \u0026ldquo;this specific gene is correlated with cancer prognosis\u0026rdquo; justify why you are using AML tools! if the relationship you are modeling is a line, deep learning is way overkill summarize previous work: if anyone did it before you, what approach did they do; why? Are you/why are you doing anything difference (i.e. why do you believe methods to be not as good?) methods Guiding Questions: How did you do your study?\ngive as much information for a peer (i.e. anyone in our class/familiar with ML) to be able to reproduce your entire study good to think about\u0026hellip; data sourcing detailed notes on data preparation and feature engineering (bonus points for code) model selection + motivation (should be broadly given in the prev. section already) model implementation (layer counts, activations, seeds) (hyper)parameters (LR, batch size, epoch, optimizer momentum/beta, layer seeds) \u0026mdash; saying \u0026ldquo;default\u0026rdquo; here is fine but be specific about what you are leaving to default training environment (hardware, library versions, time it took, etc.) Also, from a class standpoint, we want to see your hard work in actually practicing the skills we are learning!\nresults/data Guiding Questions: Why should your peer believe you did what you said you did?\nmotivate validation metrics used (i.e. why is success in this validation metric a measurement by proxy of success in the stated problem in the intro?) report the clear details the withholding scheme used\u0026mdash;simple train/val split? k-fold? leave-one-out? present in graphical or tabular form! your clear key takeaways; in general, keep words in this section to a minimum \u0026ldquo;these distributions look visually different!\u0026rdquo; \u0026ldquo;these lines are definitely parallel!\u0026rdquo; \u0026ldquo;this number is definitely larger than this other number!\u0026rdquo; During the process of \u0026ldquo;NTJ\u0026rdquo;, a paper reading methodology taught by the XRT lab, the skill of jumping abstract =\u0026gt; data (\u0026ldquo;figures\u0026rdquo;) =\u0026gt; takeaways (\u0026ldquo;novelty\u0026rdquo;) is greatly emphasized. Usually, the best papers will represent their key takeaways clearly and graphically in this section, so that the reader only need to go into the methods section strictly when needed to reproduce or clarify questions.\nconclusion/discussion Guiding Questions: Summarize.\nIt is often good to include future work here as well as well as fascinating extensions of your choosing. This section differs from the abstract in both the inclusion of future work, as well as its audience: while the abstract need only to be crystal clear for your peers, the conclusion should be clear to everyone in the field \u0026mdash; so redefinition of paper-specific jargon, etc.\nethics Guiding Questions: Where did your data come from; why is its collection and processing (they are independent permissions!) legal and ethical? Why are you not breaking the world?\nSee: this Medium article for more!\nfrom the experts NIPS (leading ML conference) rubric for a paper (jump to \u0026ldquo;Review Content\u0026rdquo; section)\nKameswari, Lalitha, Dama Sravani, and Radhika Mamidi. 2020. “Enhancing Bias Detection in Political News Using Pragmatic Presupposition.” In Proceedings of the Eighth International Workshop on Natural Language Processing for Social Media, nil. doi:10.18653/v1/2020.socialnlp-1.1. ","html":"\u003cp\u003eHello y\u0026rsquo;all! This quick post about\u0026hellip; writing your first \u0026ldquo;article\u0026rdquo; (ahem, MA) for this class. To me, the most rewarding part of our journey together is to be able to support everyone through writing very presentable reports\u0026mdash;even if it is on old or simple problems\u0026mdash;but in the format from which you can easily jump off and write a fully-blown scientific article in the future.\u003c/p\u003e\n\u003ch2 id=\"exemplar-we-discussed\"\u003eexemplar we discussed\u003c/h2\u003e\n\u003cp\u003e(\u003ca href=\"#citeproc_bib_item_1\"\u003eKameswari, Sravani, and Mamidi 2020\u003c/a\u003e)\u003c/p\u003e\n\u003cp\u003eI should add that, despite the word being used, this is by \u003cem\u003eno means\u003c/em\u003e the only way that you can write a wonderful scientific article. It just had the right structure for us to go over in class :)\u003c/p\u003e\n\u003ch2 id=\"overall-goals\"\u003eoverall goals\u003c/h2\u003e\n\u003cp\u003eWe want to make reports that are \u003cem\u003eclear\u003c/em\u003e (easily understandable for audience), \u003cem\u003econcise\u003c/em\u003e (only uses words when needed, prioritizing figures and intuition), and \u003cem\u003eprecise\u003c/em\u003e (when making claims, they are supported clearly with data). And so, the following sections focus mostly on improving those three criteria.\u003c/p\u003e\n\u003ch2 id=\"discussion-per-section\"\u003ediscussion per section\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s now switch to bullet-point format, going over some ideas of how to make strong sections:\u003c/p\u003e\n\u003ch3 id=\"abstract\"\u003eabstract\u003c/h3\u003e\n\u003cp\u003e\u003cem\u003eGuiding Questions\u003c/em\u003e: What is your study about? What did you do? How well did it work?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003emore jargon here is OK, but should be \u003cstrong\u003eimmediately\u003c/strong\u003e summative of your whole study\u003c/li\u003e\n\u003cli\u003eonly use enough jargon to make it concise: this section should be \u003cstrong\u003ecrystal clear\u003c/strong\u003e for everyone of your academic peers (i.e. anyone in our class/familiar with ML)\u003c/li\u003e\n\u003cli\u003eit should be easily skimmable\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"intro-motivation-background\"\u003eintro/motivation/background\u003c/h3\u003e\n\u003cp\u003e\u003cem\u003eGuiding Questions\u003c/em\u003e: Why are you making big picture choices you made? Did anyone do it before you? How did they do it? What changes are you making (for this class, \u0026ldquo;none\u0026rdquo; is acceptable)?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eimmediately, you should state what you are trying to do and why its worthwhile of your reader\u0026rsquo;s time\u003c/li\u003e\n\u003cli\u003ekeep the goal scope small: not \u0026ldquo;cancer is an sickness that affects a lot of people\u0026rdquo;, but \u0026ldquo;this specific gene is correlated with cancer prognosis\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ejustify why you are using AML tools!\u003c/strong\u003e if the relationship you are modeling is a line, deep learning is way overkill\u003c/li\u003e\n\u003cli\u003esummarize previous work: if anyone did it before you, what approach did they do; why? Are you/why are you doing anything difference (i.e. why do you believe methods to be not as good?)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"methods\"\u003emethods\u003c/h3\u003e\n\u003cp\u003e\u003cem\u003eGuiding Questions\u003c/em\u003e: How did you do your study?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003egive as much information for a peer (i.e. anyone in our class/familiar with ML) to be able to \u003cstrong\u003ereproduce your entire study\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003egood to think about\u0026hellip;\n\u003cul\u003e\n\u003cli\u003edata sourcing\u003c/li\u003e\n\u003cli\u003e\u003cem\u003edetailed\u003c/em\u003e notes on data preparation and feature engineering (bonus points for code)\u003c/li\u003e\n\u003cli\u003emodel selection + motivation (should be broadly given in the prev. section already)\u003c/li\u003e\n\u003cli\u003emodel implementation (layer counts, activations, seeds)\u003c/li\u003e\n\u003cli\u003e(hyper)parameters (LR, batch size, epoch, optimizer momentum/beta, layer seeds) \u0026mdash; saying \u0026ldquo;default\u0026rdquo; here is fine but be specific about what you are leaving to default\u003c/li\u003e\n\u003cli\u003etraining environment (hardware, library versions, time it took, etc.)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAlso, from a class standpoint, we want to see your hard work in actually practicing the skills we are learning!\u003c/p\u003e\n\u003ch3 id=\"results-data\"\u003eresults/data\u003c/h3\u003e\n\u003cp\u003e\u003cem\u003eGuiding Questions\u003c/em\u003e: Why should your peer believe you did what you said you did?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003emotivate validation metrics used (i.e. why is success in this validation metric a measurement by proxy of success in the stated problem in the intro?)\u003c/li\u003e\n\u003cli\u003ereport the clear details the withholding scheme used\u0026mdash;simple train/val split? k-fold? leave-one-out?\u003c/li\u003e\n\u003cli\u003epresent in \u003cstrong\u003egraphical or tabular form!\u003c/strong\u003e your clear key takeaways; in general, keep words in this section to a minimum\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;these distributions look visually different!\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;these lines are definitely parallel!\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;this number is definitely larger than this other number!\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDuring the process of \u0026ldquo;NTJ\u0026rdquo;, a paper reading methodology taught by the XRT lab, the skill of jumping abstract =\u0026gt; data (\u0026ldquo;figures\u0026rdquo;) =\u0026gt; takeaways (\u0026ldquo;novelty\u0026rdquo;) is greatly emphasized. Usually, the best papers will represent their key takeaways clearly and graphically in this section, so that the reader only need to go into the methods section strictly when needed to reproduce or clarify questions.\u003c/p\u003e\n\u003ch3 id=\"conclusion-discussion\"\u003econclusion/discussion\u003c/h3\u003e\n\u003cp\u003e\u003cem\u003eGuiding Questions\u003c/em\u003e: Summarize.\u003c/p\u003e\n\u003cp\u003eIt is often good to include future work here as well as well as fascinating extensions of your choosing. This section differs from the abstract in both the inclusion of future work, as well as its audience: while the abstract need only to be crystal clear for your peers, the conclusion should be clear to everyone in the \u003cem\u003efield\u003c/em\u003e \u0026mdash; so redefinition of paper-specific jargon, etc.\u003c/p\u003e\n\u003ch3 id=\"ethics\"\u003eethics\u003c/h3\u003e\n\u003cp\u003e\u003cem\u003eGuiding Questions\u003c/em\u003e: Where did your data come from; why is its collection and \u003cstrong\u003eprocessing\u003c/strong\u003e (they are independent permissions!) legal and ethical? Why are you not breaking the world?\u003c/p\u003e\n\u003cp\u003eSee: \u003ca href=\"https://medium.com/@GovAI/a-guide-to-writing-the-neurips-impact-statement-4293b723f832\"\u003ethis Medium article\u003c/a\u003e for more!\u003c/p\u003e\n\u003ch2 id=\"from-the-experts\"\u003efrom the experts\u003c/h2\u003e\n\u003cp\u003eNIPS (leading ML conference) rubric for a paper (\u003ca href=\"https://nips.cc/Conferences/2020/PaperInformation/ReviewerGuidelines\"\u003ejump to \u0026ldquo;Review Content\u0026rdquo; section\u003c/a\u003e)\u003c/p\u003e\n\u003cstyle\u003e.csl-entry{text-indent: -1.5em; margin-left: 1.5em;}\u003c/style\u003e\u003cdiv class=\"csl-bib-body\"\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_1\"\u003e\u003c/a\u003eKameswari, Lalitha, Dama Sravani, and Radhika Mamidi. 2020. “Enhancing Bias Detection in Political News Using Pragmatic Presupposition.” In \u003ci\u003eProceedings of the Eighth International Workshop on Natural Language Processing for Social Media\u003c/i\u003e, nil. doi:\u003ca href=\"https://doi.org/10.18653/v1/2020.socialnlp-1.1\"\u003e10.18653/v1/2020.socialnlp-1.1\u003c/a\u003e.\u003c/div\u003e\n\u003c/div\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaml_your_first_article/","tags":null,"title":"AML: Your First Article"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhanatomy/","tags":null,"title":"anatomy"},{"categories":null,"contents":"anatomy learning is the learning of anatomy.\nAnatomy information acquired prior to medical school has a positive correlation in medical school outcomes. Also leveraging anatomy information.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhanatomy_learning/\"\u003eanatomy learning\u003c/a\u003e is the learning of \u003ca href=\"/posts/kbhanatomy/\"\u003eanatomy\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhanatomy/\"\u003eAnatomy\u003c/a\u003e information acquired prior to medical school has a positive correlation in medical school outcomes. Also leveraging anatomy information.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhanatomy_learning/","tags":null,"title":"anatomy learning"},{"categories":null,"contents":"ANCA-AE is a tool to use deep learning to take tedrehedral tessilated results + Finite Difference Method + [magical machine learning] to figure the\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhanca_ae/\"\u003eANCA-AE\u003c/a\u003e is a tool to use deep learning to take tedrehedral tessilated results + \u003ca href=\"/posts/kbhfinite_difference_method/\"\u003eFinite Difference Method\u003c/a\u003e + [magical machine learning] to figure the\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhanca_ae/","tags":null,"title":"ANCA-AE"},{"categories":null,"contents":"Angelman Syndrome is a syndrome is ~1 in 15000, clinically recognizable, developmental delay syndrome.\ncause of Angelman Syndrome Angelman Syndrome is primarily caused by the UBE3A and the ubiquitin proteasome system. Poly-ubiquitin chain asks to discard cells.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhangelman_syndrome/\"\u003eAngelman Syndrome\u003c/a\u003e is a syndrome is ~1 in 15000, clinically recognizable, developmental delay syndrome.\u003c/p\u003e\n\u003ch2 id=\"cause-of-angelman-syndrome--kbhangelman-syndrome-dot-md\"\u003ecause of \u003ca href=\"/posts/kbhangelman_syndrome/\"\u003eAngelman Syndrome\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhangelman_syndrome/\"\u003eAngelman Syndrome\u003c/a\u003e is primarily caused by the \u003ca href=\"\"\u003eUBE3A\u003c/a\u003e and the \u003ca href=\"\"\u003eubiquitin proteasome system.\u003c/a\u003e Poly-\u003ca href=\"\"\u003eubiquitin\u003c/a\u003e chain asks to discard cells.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhangelman_syndrome/","tags":null,"title":"Angelman Syndrome"},{"categories":null,"contents":"Need-finding conversation Main idea: testing?\u0026mdash;pregnancy testing and COVID testing\ntalking to longer-scope challenges in visually impaired community Navigation; transportation Cannot see markers on smaller steps; trying to find an uber drive and cannot reorient ","html":"\u003ch2 id=\"need-finding-conversation\"\u003eNeed-finding conversation\u003c/h2\u003e\n\u003cp\u003eMain idea: testing?\u0026mdash;pregnancy testing and COVID testing\u003c/p\u003e\n\u003chr\u003e\n\u003cul\u003e\n\u003cli\u003etalking to longer-scope challenges in visually impaired community\u003c/li\u003e\n\u003cli\u003eNavigation; transportation\n\u003cul\u003e\n\u003cli\u003eCannot see markers on smaller steps; trying to find an uber drive and cannot reorient\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhanna_s_team_checkin/","tags":null,"title":"Anna's Team Checkin"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhanotehuaoeu/","tags":null,"title":"anotehuaoeu"},{"categories":null,"contents":"Anoushka is a student at Nueva, also the host of Project80, among other things.\n","html":"\u003cp\u003eAnoushka is a student at Nueva, also the host of \u003ca href=\"/posts/kbhproject80/\"\u003eProject80\u003c/a\u003e, among other things.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhanoushka_krishnan/","tags":null,"title":"Anoushka Krishnan"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhanthony_badger/","tags":null,"title":"Anthony Badger"},{"categories":null,"contents":"DOI: 10.3389/fnagi.2020.607449\nOne-Liner oral lexical retrieval works better than qualitative narrative analysis to classify dementia; and semantic fluency + Disfluency features chucked on an SVM returns pretty good results.\nNovelty Tried two different assays of measuring linguistic ability: oral lexical retrieval metrics, and qualitative discourse features analysis of speech.\nNotable Methods Subjects divided into three groups\nGreat cog. decline Impaired but stable Healthy controls Administered BNT and SVF tests as baseline\nKey Figs Table 3 This figure tells us that the percentages of unrelated utterances was a statistically significant metric to figure differences between the three experimental groups.\n(CD, CS, HC: cognitive decline, cognitively stable (but declining normally), healthy control)\n(no other items are bolded)\nTable 4 This figure tells us the disfluency features analyzed. None of them were independently statistically significant.\nTable 5 This figure tells us that analyzing Semantic Verbal Fluency, plus the information of disfluency, trained on an SVM, actually shows \u0026gt;90% recall value?\nNew Concepts Discourse-Completion Task oral lexical retrieval discourse features modalization Semantic Verbal Fluency Boston Naming Test ","html":"\u003cp\u003eDOI: 10.3389/fnagi.2020.607449\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhoral_lexical_retrival/\"\u003eoral lexical retrieval\u003c/a\u003e works better than qualitative narrative analysis to classify dementia; and semantic fluency + Disfluency features chucked on an SVM returns pretty good results.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003eTried two different assays of measuring linguistic ability: \u003ca href=\"/posts/kbhoral_lexical_retrival/\"\u003eoral lexical retrieval\u003c/a\u003e metrics, and qualitative \u003ca href=\"/posts/kbhdiscourse_features/\"\u003ediscourse features\u003c/a\u003e analysis of speech.\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eSubjects divided into three groups\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eGreat cog. decline\u003c/li\u003e\n\u003cli\u003eImpaired but stable\u003c/li\u003e\n\u003cli\u003eHealthy controls\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAdministered \u003ca href=\"/posts/kbhboston_naming_test/\"\u003eBNT\u003c/a\u003e and \u003ca href=\"/posts/kbhsemantic_verbal_fluency/\"\u003eSVF\u003c/a\u003e tests as baseline\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-23-08_screenshot.png\"\u003e\n \u003c/figure\u003e\n\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"table-3\"\u003eTable 3\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-02-14_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure tells us that the percentages of unrelated utterances was a \u003ca href=\"/posts/kbhhypothesis_testing/#significance-level\"\u003estatistically significant\u003c/a\u003e metric to figure differences between the three experimental groups.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-02-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e(\u003cstrong\u003e\u003cstrong\u003eCD\u003c/strong\u003e\u003c/strong\u003e, \u003cstrong\u003e\u003cstrong\u003eCS\u003c/strong\u003e\u003c/strong\u003e, \u003cstrong\u003e\u003cstrong\u003eHC\u003c/strong\u003e\u003c/strong\u003e: cognitive decline, cognitively stable (but declining normally), healthy control)\u003c/p\u003e\n\u003cp\u003e(no other items are bolded)\u003c/p\u003e\n\u003ch3 id=\"table-4\"\u003eTable 4\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-15-54_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure tells us the disfluency features analyzed. None of them were independently \u003ca href=\"/posts/kbhhypothesis_testing/#significance-level\"\u003estatistically significant\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"table-5\"\u003eTable 5\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-17-38_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure tells us that analyzing \u003ca href=\"/posts/kbhsemantic_verbal_fluency/\"\u003eSemantic Verbal Fluency\u003c/a\u003e, plus the information of disfluency, trained on an SVM, actually shows \u0026gt;90% recall value?\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiscourse_completion_task/\"\u003eDiscourse-Completion Task\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoral_lexical_retrival/\"\u003eoral lexical retrieval\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiscourse_features/\"\u003ediscourse features\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodalization/\"\u003emodalization\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsemantic_verbal_fluency/\"\u003eSemantic Verbal Fluency\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhboston_naming_test/\"\u003eBoston Naming Test\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhantonsson_2021/","tags":["ntj"],"title":"Antonsson 2021"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhany_name_here/","tags":null,"title":"any name here"},{"categories":null,"contents":"Big picture: combining off-line and on-line approaches maybe the best way to tackle large POMDPs.\nTry planning:\nonly where we are only where we can reach Take into account three factors:\nuncertainty in the value function reachability from the current belief actions that are likely optimal It allows policy improvement on any base policy.\nSetup Discrete POMDPs:\n\\(L\\), lower bound \\(U\\), upper-bound \\(b_0\\): current belief Two main phases: the algorithm\nPlanning Phase at each belief point, choose a particular next node to expand (using the scheme below to score the nodes) expand that next node that are chosen propagate the value of the belief upwards through POMDP Bellman Backup up through the tree Best Node Selection We select the best node by three metrics:\nUncertainty: \\(\\epsilon(b) = U(b)-L(b)\\) we want small gap between upper and lower bound Optimality in actions: AEMS1: \\(p(a|b) = \\frac{U(a,b)-L(b)}{U(a^{*}, b)-L(b)}\\) (\u0026ldquo;what\u0026rsquo;s the relative optimality of our action, compared to best action\u0026rdquo;) AEMS2: \\(p(a|b)\\) = \\(1\\) if \\(a=A^{*}\\), \\(0\\) otherwise. (\u0026ldquo;just take best action\u0026rdquo;) Reachability: \\(p(b) = \\prod_{i=0}^{d} P(o^{(i)} | b^{(i)}, a^{(i)}) p(a^{(i)}|b^{(i)}})\\), where small \\(p\\) is either AIMS 1 or 2 above, where \\(a\\) comes from the best action conditional plan that came so far Combining the metrics gives:\n\\begin{equation} E(b) = \\gamma P(b) \\epsilon(b) \\end{equation}\nExecution execute the best action at \\(b_0\\) Perceive a new observation \\(b_0 \\leftarrow update(b_0,a,o)\\) ","html":"\u003cp\u003eBig picture: \u003cstrong\u003ecombining off-line and on-line approaches\u003c/strong\u003e maybe the best way to tackle large POMDPs.\u003c/p\u003e\n\u003cp\u003eTry planning:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eonly where we are\u003c/li\u003e\n\u003cli\u003eonly where we can reach\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTake into account three factors:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003euncertainty in the value function\u003c/li\u003e\n\u003cli\u003ereachability from the current belief\u003c/li\u003e\n\u003cli\u003eactions that are likely optimal\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eIt allows policy improvement on any base policy.\u003c/p\u003e\n\u003ch2 id=\"setup\"\u003eSetup\u003c/h2\u003e\n\u003cp\u003eDiscrete POMDPs:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(L\\), lower bound\u003c/li\u003e\n\u003cli\u003e\\(U\\), upper-bound\u003c/li\u003e\n\u003cli\u003e\\(b_0\\): current belief\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTwo main phases: the algorithm\u003c/p\u003e\n\u003ch3 id=\"planning-phase\"\u003ePlanning Phase\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eat each belief point, choose a particular next node to expand (using the scheme below to score the nodes)\u003c/li\u003e\n\u003cli\u003eexpand that next node that are chosen\u003c/li\u003e\n\u003cli\u003epropagate the value of the belief upwards through \u003ca href=\"/posts/kbhvalue_iteration/#pomdp-bellman-update\"\u003ePOMDP Bellman Backup\u003c/a\u003e up through the tree\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"best-node-selection\"\u003eBest Node Selection\u003c/h4\u003e\n\u003cp\u003eWe select the best node by three metrics:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eUncertainty: \\(\\epsilon(b) = U(b)-L(b)\\) we want small gap between upper and lower bound\u003c/li\u003e\n\u003cli\u003eOptimality in actions:\n\u003cul\u003e\n\u003cli\u003eAEMS1: \\(p(a|b) = \\frac{U(a,b)-L(b)}{U(a^{*}, b)-L(b)}\\) (\u0026ldquo;what\u0026rsquo;s the relative optimality of our action, compared to best action\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003eAEMS2: \\(p(a|b)\\) = \\(1\\) if \\(a=A^{*}\\), \\(0\\) otherwise. (\u0026ldquo;just take best action\u0026rdquo;)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eReachability: \\(p(b) = \\prod_{i=0}^{d} P(o^{(i)} | b^{(i)}, a^{(i)}) p(a^{(i)}|b^{(i)}})\\), where small \\(p\\) is either AIMS 1 or 2 above, where \\(a\\) comes from the best action conditional plan that came so far\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eCombining the metrics gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE(b) = \\gamma P(b) \\epsilon(b)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"execution\"\u003eExecution\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eexecute the best action at \\(b_0\\)\u003c/li\u003e\n\u003cli\u003ePerceive a new observation\u003c/li\u003e\n\u003cli\u003e\\(b_0 \\leftarrow update(b_0,a,o)\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaems/","tags":null,"title":"Anytime Error Minimization Search"},{"categories":null,"contents":"eansoetuhaosneu\n","html":"\u003cp\u003eeansoetuhaosneu\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaosneuhasoneuh/","tags":null,"title":"aosneuhasoneuh"},{"categories":null,"contents":"Other Factoids charged flux Chapters coulomb\u0026rsquo;s law superposition electric field Gauss\u0026rsquo; Law electric potential current Ohm\u0026rsquo;s Law resistor kirchoff\u0026rsquo;s laws Capacitor Dynamic RC Circuits magnetism faraday\u0026rsquo;s law Things to do AP Phys C EM Things to Do\n","html":"\u003ch2 id=\"other-factoids\"\u003eOther Factoids\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcharged/\"\u003echarged\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhflux/\"\u003eflux\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"chapters\"\u003eChapters\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcoulomb_s_law/\"\u003ecoulomb\u0026rsquo;s law\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcoulomb_s_law/#superposition\"\u003esuperposition\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhelectric_field/\"\u003eelectric field\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgauss_law/\"\u003eGauss\u0026rsquo; Law\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhelectric_potential_energy/\"\u003eelectric potential\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcurrent/\"\u003ecurrent\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhohm_s_law/\"\u003eOhm\u0026rsquo;s Law\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhresistors/\"\u003eresistor\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhkirchoff_s_laws/\"\u003ekirchoff\u0026rsquo;s laws\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcapacitor/\"\u003eCapacitor\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdynamic_rc_circuts/\"\u003eDynamic RC Circuits\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmagnetism/\"\u003emagnetism\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfaraday_s_law/\"\u003efaraday\u0026rsquo;s law\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"things-to-do\"\u003eThings to do\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhap_phys_c_em_things_to_do/\"\u003eAP Phys C EM Things to Do\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhap_phys_c_em_index/","tags":["index"],"title":"AP Phys C EM Index"},{"categories":null,"contents":" Review all the names of units, and their SI conversions Review all eqns of time constants \u0026ldquo;Amperian Loop\u0026rdquo; ","html":"\u003cul\u003e\n\u003cli\u003eReview all the names of units, and their SI conversions\u003c/li\u003e\n\u003cli\u003eReview all eqns of time constants\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Amperian Loop\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-05-07_18-06-56_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhap_phys_c_em_things_to_do/","tags":null,"title":"AP Phys C EM Things to Do"},{"categories":null,"contents":"AP Phys C Mech is an examination held by the CollegeBoard in mechanics.\nThings to Study Permittivity of free space Impulse Springs! In general. Perhaps review old notes. How to be faster? Kepler\u0026rsquo;s Laws of Planetary Motion\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhap_phys_c_mech_index/\"\u003eAP Phys C Mech\u003c/a\u003e is an examination held by the \u003ca href=\"/posts/kbhcollegeboard/\"\u003eCollegeBoard\u003c/a\u003e in mechanics.\u003c/p\u003e\n\u003ch2 id=\"things-to-study\"\u003eThings to Study\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ePermittivity of free space\u003c/li\u003e\n\u003cli\u003eImpulse\u003c/li\u003e\n\u003cli\u003eSprings! In general. Perhaps \u003ca href=\"https://www.notion.so/shabangsystems/013cd5fdedda491b86eb45eb139813a5?v=0449600634bd485fbf0f6f7b8a0833a3\"\u003ereview old notes\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eHow to be faster?\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhkepler_s_laws_of_planetary_motion/\"\u003eKepler\u0026rsquo;s Laws of Planetary Motion\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhap_phys_c_mech_index/","tags":["index"],"title":"AP Phys C Mech Index"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhap_physi/","tags":null,"title":"ap physi"},{"categories":null,"contents":"AP Statistics is an examination by the CollegeBoard.\nSee also crap to remember for AP Stats\nNon-Focus Mistakes file:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf file:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf file:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf Interpretation of regression outputs Backlog Chi-square file:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf file:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf Notes confidence interval hypothesis testing t-statistics chi-square data inference binomial distribution ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhapstats/\"\u003eAP Statistics\u003c/a\u003e is an examination by the \u003ca href=\"/posts/kbhcollegeboard/\"\u003eCollegeBoard\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhcrap_to_remember_for_ap_stats/\"\u003ecrap to remember for AP Stats\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"non-focus-mistakes\"\u003eNon-Focus Mistakes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003efile:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf\u003c/li\u003e\n\u003cli\u003efile:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf\u003c/li\u003e\n\u003cli\u003efile:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf\u003c/li\u003e\n\u003cli\u003eInterpretation of regression outputs\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"backlog\"\u003eBacklog\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eChi-square\u003c/li\u003e\n\u003cli\u003efile:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf\u003c/li\u003e\n\u003cli\u003efile:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhconfidence_interval/\"\u003econfidence interval\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbht_statistics/\"\u003et-statistics\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhchi_square/\"\u003echi-square\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdata_inference/\"\u003edata inference\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhapstats/","tags":["index"],"title":"AP Statistics Index"},{"categories":null,"contents":"Show that:\n\\begin{equation} \\dv t e^{tA} = e^{tA}A \\end{equation}\nWe can apply the result we shown in eigenvalue:\n\\begin{equation} \\dv t \\qty(e^{tA}) = \\dv t \\qty(I + \\sum_{k=1}^{\\infty} \\frac{t^{k}}{k!}A^{k}) = \\qty(\\sum_{k=1}^{\\infty }\\frac{1}{k!}kt^{k-1}A^{k-1})A \\end{equation}\nWe do this separation because \\(k=0\\) would\u0026rsquo;t make sense to raise \\(A\\) (\\(k-1=-1\\)) to as we are unsure about the invertability of \\(A\\). Obviously \\(\\frac{1}{k!}k = \\frac{1}{(k-1)!}\\). Therefore, we can shift our index back yet again:\n\\begin{equation} \\qty(\\sum_{k=1}^{\\infty }\\frac{1}{k!}kt^{k-1}A^{k-1})A = \\qty(\\sum_{j=0}^{\\infty }\\frac{1}{j!}t^{j}A^{j})A \\end{equation}\nAwesome. So now we have the taylor series in \\(e^{tA}\\) back, times \\(A\\).\nSo therefore:\n\\begin{equation} \\qty(\\sum_{j=0}^{\\infty }\\frac{1}{j!}t^{j}A^{j})A = e^{tA}A \\end{equation}\nBe forewarned:\n\\begin{equation} e^{A}e^{B} \\neq e^{A+B} \\end{equation}\nmostly because matrix multiplication is not commutative..\n","html":"\u003cp\u003eShow that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv t e^{tA} = e^{tA}A\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can apply the result we shown in \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv t \\qty(e^{tA}) = \\dv t \\qty(I + \\sum_{k=1}^{\\infty} \\frac{t^{k}}{k!}A^{k}) = \\qty(\\sum_{k=1}^{\\infty }\\frac{1}{k!}kt^{k-1}A^{k-1})A\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe do this separation because \\(k=0\\) would\u0026rsquo;t make sense to raise \\(A\\) (\\(k-1=-1\\)) to as we are unsure about the \u003ca href=\"/posts/kbhmatricies/#invertability\"\u003einvertability\u003c/a\u003e of \\(A\\). Obviously \\(\\frac{1}{k!}k = \\frac{1}{(k-1)!}\\). Therefore, we can shift our index back yet again:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\qty(\\sum_{k=1}^{\\infty }\\frac{1}{k!}kt^{k-1}A^{k-1})A = \\qty(\\sum_{j=0}^{\\infty }\\frac{1}{j!}t^{j}A^{j})A\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAwesome. So now we have the taylor series in \\(e^{tA}\\) back, times \\(A\\).\u003c/p\u003e\n\u003cp\u003eSo therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\qty(\\sum_{j=0}^{\\infty }\\frac{1}{j!}t^{j}A^{j})A = e^{tA}A\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eBe forewarned:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{A}e^{B} \\neq e^{A+B}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emostly because matrix multiplication is not commutative..\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhapplying_eigenspace/","tags":null,"title":"applying eigenspace"},{"categories":null,"contents":"Direct Sampling Direct Sampling is an approximate inference method where we pull samples from the given joint probability distribution.\nExample Suppose we are interested in:\nwhere we dare \\(P(B^{1}|D^{1},C^{1})\\).\nStep 1: sort We obtain a topological sort of this network:\n\\begin{equation} B, S, E, D, C \\end{equation}\nStep 2: sample from \\(B,S\\) We sample \\(B\\). We sampled that \\(B=1\\) today. We sample \\(S\\). We sampled that \\(S=0\\) today. Step 3: sample from \\(E\\) We sample \\(E\\) GIVEN what we already sampled, that \\(B=1, S=0\\), we sampled that that \\(E = 1\\) Step 4: sample from \\(D, C\\) We sample \\(D\\) given that \\(E=1\\) as we sampled. We sample \\(C\\) given that \\(E=1\\) as we sampled. Repeat Repeat steps 2-4\nStep n: Analyze B S E D C 1 0 1 0 1 0 1 1 0 0 1 1 1 1 0 0 0 1 1 0 1 0 1 1 1 We desire to know \\(P(b^{1}|d^{1}, c^{1})\\). Looks like, given this table, it would be \\(100\\%\\).\nLikelihood Weighted Sampling Likelihood Weighted Sampling is a sampling approach whereby you force values that you wont, and then weight the results by the chance of it happening.\nThis is super useful when our envidence is unlikely.\nExample Suppose again you are interested in \\(P(b^{1}|d^{1}, c^{1})\\). In this case, we only sample \\(B,S,E\\):\nB S E 0 1 0 1 0 1 Now, for each of these results, we the compute the chance of our priors happening given the samples.\nRow 1: \\(p(d^{1}|e^{0})p(c^{1}|e^{0})\\) Row 2: \\(p(d^{1}|e^{1})p(c^{1}|e^{1})\\) Let\u0026rsquo;s say:\nRow 1: \\(p(d^{1}|e^{0})p(c^{1}|e^{0})=0.3\\) Row 2: \\(p(d^{1}|e^{1})p(c^{1}|e^{1})=0.9\\) Finally, to compute \\(p(b^{1}|d^{1}c^{1})\\):\n\\begin{equation} \\frac{0.9}{0.9+0.3} \\end{equation}\nbecause only row \\(2\\) fit with our expectations.\n","html":"\u003ch2 id=\"direct-sampling--kbhdirect-sampling-dot-md\"\u003e\u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e is an \u003ca href=\"/posts/kbhapproximate_inference/\"\u003eapproximate inference\u003c/a\u003e method where we pull samples from the given \u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"example\"\u003eExample\u003c/h3\u003e\n\u003cp\u003eSuppose we are interested in:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-05_09-19-51_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ewhere we dare \\(P(B^{1}|D^{1},C^{1})\\).\u003c/p\u003e\n\u003ch4 id=\"step-1-sort\"\u003eStep 1: sort\u003c/h4\u003e\n\u003cp\u003eWe obtain a \u003ca href=\"/posts/kbhtopological_sort/\"\u003etopological sort\u003c/a\u003e of this network:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB, S, E, D, C\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"step-2-sample-from-b-s\"\u003eStep 2: sample from \\(B,S\\)\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eWe sample \\(B\\). We sampled that \\(B=1\\) today.\u003c/li\u003e\n\u003cli\u003eWe sample \\(S\\). We sampled that \\(S=0\\) today.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"step-3-sample-from-e\"\u003eStep 3: sample from \\(E\\)\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eWe sample \\(E\\) \u003cstrong\u003eGIVEN\u003c/strong\u003e what we already sampled, that \\(B=1, S=0\\), we sampled that that \\(E = 1\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"step-4-sample-from-d-c\"\u003eStep 4: sample from \\(D, C\\)\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eWe sample \\(D\\) given that \\(E=1\\) as we sampled.\u003c/li\u003e\n\u003cli\u003eWe sample \\(C\\) given that \\(E=1\\) as we sampled.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"repeat\"\u003eRepeat\u003c/h4\u003e\n\u003cp\u003eRepeat steps 2-4\u003c/p\u003e\n\u003ch4 id=\"step-n-analyze\"\u003eStep n: Analyze\u003c/h4\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eB\u003c/th\u003e\n\u003cth\u003eS\u003c/th\u003e\n\u003cth\u003eE\u003c/th\u003e\n\u003cth\u003eD\u003c/th\u003e\n\u003cth\u003eC\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWe desire to know \\(P(b^{1}|d^{1}, c^{1})\\). Looks like, given this table, it would be \\(100\\%\\).\u003c/p\u003e\n\u003ch2 id=\"likelihood-weighted-sampling--kbhdirect-sampling-dot-md\"\u003e\u003ca href=\"/posts/kbhdirect_sampling/#likelihood-weighted-sampling\"\u003eLikelihood Weighted Sampling\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdirect_sampling/#likelihood-weighted-sampling\"\u003eLikelihood Weighted Sampling\u003c/a\u003e is a sampling approach whereby you force values that you wont, and then weight the results by the chance of it happening.\u003c/p\u003e\n\u003cp\u003eThis is \u003cstrong\u003esuper useful\u003c/strong\u003e when our envidence is unlikely.\u003c/p\u003e\n\u003ch3 id=\"example\"\u003eExample\u003c/h3\u003e\n\u003cp\u003eSuppose again you are interested in \\(P(b^{1}|d^{1}, c^{1})\\). In this case, we only sample \\(B,S,E\\):\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eB\u003c/th\u003e\n\u003cth\u003eS\u003c/th\u003e\n\u003cth\u003eE\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eNow, for each of these results, we the compute the chance of our priors happening given the samples.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eRow 1: \\(p(d^{1}|e^{0})p(c^{1}|e^{0})\\)\u003c/li\u003e\n\u003cli\u003eRow 2: \\(p(d^{1}|e^{1})p(c^{1}|e^{1})\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eLet\u0026rsquo;s say:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eRow 1: \\(p(d^{1}|e^{0})p(c^{1}|e^{0})=0.3\\)\u003c/li\u003e\n\u003cli\u003eRow 2: \\(p(d^{1}|e^{1})p(c^{1}|e^{1})=0.9\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eFinally, to compute \\(p(b^{1}|d^{1}c^{1})\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{0.9}{0.9+0.3}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause only row \\(2\\) fit with our expectations.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhapproximate_inference/","tags":null,"title":"approximate inference"},{"categories":null,"contents":"How do we deal with Markov Decision Process solution with continuous state space?\nLet there be a value function parameterized on \\(\\theta\\):\n\\begin{equation} U_{\\theta}(s) \\end{equation}\nLet us find the value-function policy of this utility:\n\\begin{equation} \\pi(s) = \\arg\\max_{a} \\qty(R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) U_{\\theta}(s\u0026rsquo;)) \\end{equation}\nWe now create a finite sampling of our state space, which maybe infinitely large (for instance, continuous):\n\\begin{equation} S \\in \\mathcal{S} \\end{equation}\nwhere, \\(S\\) is a set of discrete states \\(\\{s_1, \\dots, s_{m}\\}\\).\nNow, what next?\ngenerally: Loop until convergence:\nInitialize \\(u_{\\theta}\\) For all \\(s_{i} \\in S\\), let \\(u_{i} = \\max_{a} R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{}T(s\u0026rsquo;|s,a) u_{\\theta}(s\u0026rsquo;)\\), the utility at those discrete state samples \\(s_{i}\\) Then, fit a \\(\\theta\\) so that \\(U_{\\theta}(s_{i})\\) is close to \\(u_{i}\\) to get \\(T\\): get a finite sampling of next states, or fit a function to it.\nBUT: Convergence is not guaranteed.\nThere are two main specific approaches to achieve this:\nglobal approximation linreg a best-fit line of state value vs. utility value polynomial fit a best-fit line, whereby \\(U_{\\theta}(s) = \\theta^{T}\\beta(s)\\), where each \\(\\beta_{j}(s)=s^{j-1}\\). a frigin neural network (train a model with parameters \\(\\theta\\) which produces the utility calculations for you \\(M_{\\theta}(s) = U_{\\theta}(s)\\)) local approximation make a sampling in your continuous state space to discretized it do any utility function thing you\u0026rsquo;d like (policy evaluation or value iteration) to get some set of \\(\\theta_{i}\\), which is the utility for being in each sampled discrete state \\(s_{i}\\) whenever you need to calculate \\(U(s)\\) of a particular state\u0026hellip; linearly interpolate k nearest neighbor kernel smoothing ","html":"\u003cp\u003eHow do we deal with \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMarkov Decision Process\u003c/a\u003e solution with \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e state space?\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eLet there be a \u003ca href=\"/posts/kbhaction_value_function/\"\u003evalue function\u003c/a\u003e \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003eized on \\(\\theta\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_{\\theta}(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet us find the \u003ca href=\"/posts/kbhaction_value_function/#value-function-policy\"\u003evalue-function policy\u003c/a\u003e of this utility:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pi(s) = \\arg\\max_{a} \\qty(R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) U_{\\theta}(s\u0026rsquo;))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe now create a finite sampling of our state space, which maybe infinitely large (for instance, \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS \\in \\mathcal{S}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(S\\) is a set of discrete states \\(\\{s_1, \\dots, s_{m}\\}\\).\u003c/p\u003e\n\u003cp\u003eNow, what next?\u003c/p\u003e\n\u003ch2 id=\"generally\"\u003egenerally:\u003c/h2\u003e\n\u003cp\u003eLoop until convergence:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eInitialize \\(u_{\\theta}\\)\u003c/li\u003e\n\u003cli\u003eFor all \\(s_{i} \\in S\\), let \\(u_{i} = \\max_{a} R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{}T(s\u0026rsquo;|s,a) u_{\\theta}(s\u0026rsquo;)\\), the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e at those discrete state samples \\(s_{i}\\)\u003c/li\u003e\n\u003cli\u003eThen, fit a \\(\\theta\\) so that \\(U_{\\theta}(s_{i})\\) is close to \\(u_{i}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003eto get \\(T\\)\u003c/strong\u003e: get a finite sampling of next states, or fit a function to it.\u003c/p\u003e\n\u003cp\u003eBUT: \u003cstrong\u003eConvergence is not guaranteed.\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eThere are two main specific approaches to achieve this:\u003c/p\u003e\n\u003ch2 id=\"global-approximation\"\u003eglobal approximation\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003elinreg a best-fit line of state value vs. \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e value\n\u003cul\u003e\n\u003cli\u003epolynomial fit a best-fit line, whereby \\(U_{\\theta}(s) = \\theta^{T}\\beta(s)\\), where each \\(\\beta_{j}(s)=s^{j-1}\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ea frigin neural network (train a model with parameters \\(\\theta\\) which produces the utility calculations for you \\(M_{\\theta}(s) = U_{\\theta}(s)\\))\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"local-approximation\"\u003elocal approximation\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003emake a sampling in your \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e state space to discretized it\u003c/li\u003e\n\u003cli\u003edo any \u003ca href=\"/posts/kbhutility_function/\"\u003eutility function\u003c/a\u003e thing you\u0026rsquo;d like (\u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e or \u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e) to get some set of \\(\\theta_{i}\\), which is the utility for being in each sampled discrete state \\(s_{i}\\)\u003c/li\u003e\n\u003cli\u003ewhenever you need to calculate \\(U(s)\\) of a particular state\u0026hellip;\n\u003cul\u003e\n\u003cli\u003elinearly interpolate\u003c/li\u003e\n\u003cli\u003ek nearest neighbor\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhkernel_smoothing/\"\u003ekernel smoothing\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhapproximate_value_function/","tags":null,"title":"Approximate Value Function"},{"categories":null,"contents":"If we take entangled qubits, and separate them real far away, their behavior would be the same even despite it will take longer for light to travel.\n","html":"\u003cp\u003eIf we take \u003ca href=\"/posts/kbhentangled/\"\u003eentangled\u003c/a\u003e \u003ca href=\"/posts/kbhqubits/\"\u003equbit\u003c/a\u003es, and separate them real far away, their behavior would be the same even despite it will take longer for light to travel.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhapr_paradox/","tags":null,"title":"APR Paradox"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhaps/","tags":null,"title":"APS"},{"categories":null,"contents":"Background In the 60s, economists that the pricing of options were independent of pricing of underlying assets. Nowadays, we can see that, if the underlying assets were obeying of a Brownian Motion, there is no additional degree of freedom that options can bring: that knowing the stocks will tell you exactly through a DiffEQ how the option will evolve.\nThe idea, then, is that you can replicate options: by dynamically buying and selling pairs of securities in the same way as the option, your new portfolio can track the option exactly.\nOf course, there is a certain amount of volatility associated with Brownian Motion markets.\nUnfortunately, there is no one fixed volatility which can be used to model all options; you can fit a volatility given all strike prices\u0026mdash;creating an implied volatility surface.\nOtherwise, you can also model volatility as a random variable, a stochastic process modeled by stochastic volatility.\nReading pg 350-352: diffusion are described by stochastic differential equations Option Pricing A Vanilla Call Given some current price \\(S\\), option price \\(K\\), time to maturity \\(T\\); the payoff increases linearly after the option matures. How much should the option be changed for the right to buy the option after \\(T\\) days?\nWe can use the option info to calculate the implied volatility.\n","html":"\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003cp\u003eIn the 60s, economists that the pricing of options were independent of pricing of underlying assets. Nowadays, we can see that, if the underlying assets were obeying of a Brownian Motion, there is no additional degree of freedom that options can bring: that knowing the stocks will tell you exactly through a \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDiffEQ\u003c/a\u003e how the option will evolve.\u003c/p\u003e\n\u003cp\u003eThe idea, then, is that you can replicate options: by dynamically buying and selling pairs of securities in the same way as the option, your new portfolio can track the option exactly.\u003c/p\u003e\n\u003cp\u003eOf course, there is a certain amount of volatility associated with \u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e markets.\u003c/p\u003e\n\u003cp\u003eUnfortunately, there is no one fixed volatility which can be used to model all options; you can fit a volatility given all strike prices\u0026mdash;creating an implied \u003cstrong\u003evolatility surface\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eOtherwise, you can also model volatility as a random variable, a stochastic process modeled by \u003cstrong\u003estochastic volatility\u003c/strong\u003e.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"reading\"\u003eReading\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003epg 350-352: diffusion are described by stochastic differential equations\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003ch2 id=\"option-pricing\"\u003eOption Pricing\u003c/h2\u003e\n\u003ch3 id=\"a-vanilla-call\"\u003eA Vanilla Call\u003c/h3\u003e\n\u003cp\u003eGiven some current price \\(S\\), option price \\(K\\), time to maturity \\(T\\); the payoff increases linearly after the option matures. How much should the option be changed for the right to buy the option after \\(T\\) days?\u003c/p\u003e\n\u003cp\u003eWe can use the option info to calculate the implied volatility.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbharbitrage_pricing/","tags":null,"title":"Arbitrage Pricing"},{"categories":null,"contents":"function that returns the input that maximizes the expression.\nfinding argmax direct optimization Typical maximization system. Take derivative, set it to 0, solve, plug in, solve. THis is pretty bad during times are not differentiable.\ngradient ascent We take steps following the direction\n\\begin{equation} \\theta_{1j} = \\theta_{0j} + \\eta \\pdv{LL(\\theta_{0})}{\\theta_{0j}} \\end{equation}\nadditional information argmax of log see argmax of log\n","html":"\u003cp\u003efunction that returns the input that maximizes the expression.\u003c/p\u003e\n\u003ch2 id=\"finding-argmax\"\u003efinding argmax\u003c/h2\u003e\n\u003ch3 id=\"direct-optimization--kbhoptimization-dot-md\"\u003edirect \u003ca href=\"/posts/kbhoptimization/\"\u003eoptimization\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eTypical maximization system. Take derivative, set it to 0, solve, plug in, solve. THis is pretty bad during times are not differentiable.\u003c/p\u003e\n\u003ch3 id=\"gradient-ascent\"\u003egradient ascent\u003c/h3\u003e\n\u003cp\u003eWe take steps following the direction\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{1j} = \\theta_{0j} + \\eta \\pdv{LL(\\theta_{0})}{\\theta_{0j}}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"argmax-of-log--kbhmaximum-likelihood-parameter-learning-dot-md\"\u003e\u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/#argmax-of-log\"\u003eargmax of log\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/#argmax-of-log\"\u003eargmax of log\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhargmax/","tags":null,"title":"argmax"},{"categories":null,"contents":" When you make an array, you are making space for each element When you create a pointer, you are making space for 64 bit address arrays \u0026ldquo;decay to pointers\u0026rdquo;: when you identify an array by name, you are sharing the location of the leading element \u0026amp;arr gets an address to the FIRST element \u0026mdash; don\u0026rsquo;t do this, \u0026amp;ptr gets the pointers\u0026rsquo; address Array is a special type that represent a segment of contiguously allocated memory. You can\u0026rsquo;t reassign an array to be equal to a new array.\nint nums[] = {1, 2, 3}; int nums2[] = {4, 5, 6}; nums and nums2 has no memory set aside. Calling sizeof() on an array gets the length of the array; calling sizeof() on the pointer it decays to will get the word size. Therefore, when we pass an array to a function will require you giving the size to the function as well.\nTo get a pointer to the beginning of an array, write \u0026amp;str[0]. NEVER EVER write \u0026amp;str, even if its the same thing: the latter sounds like you are getting the address of an array which doesn\u0026rsquo;t exist.\nPointer Arithmetic If you add/subtract values to a pointer, the value you add/subtract gets scaled by the size of type so you can always add/subtract as needed.\n","html":"\u003cul\u003e\n\u003cli\u003eWhen you make an \u003ca href=\"/posts/kbharray/\"\u003earray\u003c/a\u003e, you are making space for each element\u003c/li\u003e\n\u003cli\u003eWhen you create a pointer, you are making space for 64 bit address\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbharray/\"\u003earray\u003c/a\u003es \u0026ldquo;decay to pointers\u0026rdquo;: when you identify an array by name, you are sharing the location of the leading element\u003c/li\u003e\n\u003cli\u003e\u0026amp;arr gets an address to the FIRST element \u0026mdash; don\u0026rsquo;t do this, \u0026amp;ptr gets the pointers\u0026rsquo; address\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eArray is a special type that represent a segment of contiguously allocated memory. You can\u0026rsquo;t reassign an array to be equal to a new array.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enums\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e};\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enums2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e6\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e};\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003ccode\u003enums\u003c/code\u003e and \u003ccode\u003enums2\u003c/code\u003e has no memory set aside. Calling \u003ccode\u003esizeof()\u003c/code\u003e on an array gets the length of the array; calling \u003ccode\u003esizeof()\u003c/code\u003e on the pointer it decays to will get the word size. Therefore, when we pass an array to a function will require you giving the size to the function as well.\u003c/p\u003e\n\u003cp\u003eTo get a pointer to the beginning of an array, write \u003ccode\u003e\u0026amp;str[0]\u003c/code\u003e. NEVER EVER write \u003ccode\u003e\u0026amp;str\u003c/code\u003e, even if its the same thing: the latter sounds like you are getting the address of an array which doesn\u0026rsquo;t exist.\u003c/p\u003e\n\u003ch2 id=\"pointer-arithmetic\"\u003ePointer Arithmetic\u003c/h2\u003e\n\u003cp\u003eIf you add/subtract values to a pointer, the value you add/subtract gets scaled by the size of type so you can always add/subtract as needed.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbharray/","tags":null,"title":"array"},{"categories":null,"contents":"Require: analyze movie + quote [story + bellows]\nineffability of language vs. Sapire-Wolf\nForeignizing Time in Heptopod B\nLouise\u0026rsquo;s ability to re-express her temporally-independent thoughts in English after learning Heptopod B represents a successful act foreignization of Heptopod thought for an English L1 audience despite this audience\u0026rsquo;s supposed limitations in understanding temporally-independent concepts according to the Sapir-Wholf Hypothesis.\nHeptopod B does not have temporality RUSSIAN SCIENTIST: \u0026ldquo;Their final words translate to, \u0026ldquo;There is no time, many become one.\u0026rdquo; I fear we have all been given weapons because we answered the timeline wrong, please, if you - -\u0026rdquo; \u0026ldquo;Explain it by saying that light minimized the time needed to travel to its destination, and one saw the world as the heptapods saw it.\u0026rdquo; So it seems like quintessential Sapir-Wholf: time hard to express with Heptopod, and so their way of thinking work around it.\n\u0026ldquo;Sapir’s point was that instead of saying that Latin and Greek are well suited to abstract thought, we should say, rather, that abstract thought is well suited to Greek and Latin and view the particular kinds of philosophical discourse that were developed by their speakers as a consequence of the grammar they used\u0026rdquo; and there are evidence for it\u0026hellip; Louis got this ability and then can think into the future.\n\u0026ldquo;It worked better when I didn\u0026rsquo;t think about it too much. \u0026hellip; my initial strokes almost always turned out to be compatible with an elegant rendition of what I was trying to say. I was developing a faculty like that of the heptapods.\u0026rdquo; Louise ditches temporality in her ENGLISH as well And yet, Louis very effectively translated HB into English by abusing English grammar.\n\u0026ldquo;you will be twenty-five then\u0026rdquo;\nLOUISE \u0026ldquo;We are so bound by time; by its order.\u0026rdquo;\nLOUISE: \u0026ldquo;I forgot how good it feels to be held by you\u0026rdquo;\nSo this lack of temporality not ineffable: \u0026ldquo;translation is the enemy of the ineffable. One causes the other to cease to exist.\u0026rdquo;\nnobody knows how to translate “animal signals” into human speech or vice versa. When and if we ever can translate nonhuman noises into human speech, species-related ineffabilities will evaporate like the morning haze.\nSo despite English grammar\u0026rsquo;s temporality, it can be adapted.\nThis process of translation can be explained effectively in English, even if the original event is not:\nDR. KETTLER \u0026ldquo;Not everyone is wired for what you\u0026rsquo;re about to do. Our brains aren\u0026rsquo;t always able to process experiences like this.\u0026rdquo; Moving though over to another language is actually essentially just forenizing So: non-temporality vis a vi Heptopod B can be expressed in any language that you desire, except for the fact that it will be forenizing and hence require understanding of their culture. It is not that L1 itself limits understanding of L2 culture vis a vi S-W; instead, L2 culture needed to be understood correctly to foreignize for L1 audience. Louis does this.\nlouis is forenizing \u0026ldquo;As a result, the “foreign-soundingness” of a translation seeking to give the reader a glimpse of the authentic quality of the source can only reproduce and reinforce what the receiving culture already imagines the foreign to be. \u0026hellip; Foreign-soundingness is therefore only a real option for a translator when working from a language with which the receiving language and its culture have an established relationship\u0026rdquo; Louise establishes this relationship LOUISE: \u0026ldquo;Language is the foundation of civilization. \u0026ldquo;It is the glue that holds a people together, and it is the first weapon drawn in a conflict.\u0026rdquo; \u0026ldquo;The only way to learn an unknown language is to interact with a native speaker, and by that I mean asking questions, holding a conversation \u0026hellip; without that, it\u0026rsquo;s simply not possible.\u0026rdquo; We get to hear it too: film VFX forenization; \u0026ldquo;in the pitch of the low tone reverberating inside. \u0026hellip; another metallic ROAR. Followed by a distant HIGH PITCH.\u0026rdquo; the music is forenizing\nStuff from Chaing \u0026ldquo;you will be twenty-five then\u0026rdquo; \u0026ldquo;Over time, the sentences I wrote grew shapelier, more cohesive. I had reached the point where it worked better when I didn\u0026rsquo;t think about it too much. Instead of carefully trying to design a sentence before writing, I could simply begin putting down strokes immediately; my initial strokes almost always turned out to be compatible with an elegant rendition of what I was trying to say. I was developing a faculty like that of the heptapods.\u0026rdquo; \u0026ldquo;I could understand that: the physical attributes that humans found intuitive, like kinetic energy or acceleration, were all properties of an objectat a given moment in time.\u0026rdquo; \u0026ldquo;And these were conducive to a teleological interpretation of events: by viewing events over a period of time, one recognized that there was a requirement that had to be satisfied, a goal of minimizing or maximizing. And one had to know the initial and final states to meet that goal; one needed knowledge of the effects before the causes could be initiated. I was growing to understand that, too.\u0026rdquo; \u0026ldquo;Gary once told me that the fundamental laws of physics were time-symmetric, that there was no physical difference between past and future.\u0026rdquo; \u0026ldquo;Explain it by saying that light minimized the time needed to travel to its destination, and one saw the world as the heptapods saw it.\u0026rdquo; Stuff from Bellows \u0026ldquo;A reader who says that poetry is what has been lost in translation is also claiming to be simultaneously in full possession of the original (which is poetry) and of the translation (which is not). Otherwise there would be no knowing if anything has been lost, let alone knowing that it was poetry.\u0026rdquo; \u0026ldquo;Because if the inhabitants of the distant planet did have a language, and if the space crew had learned it, then it must be possible for them to say what the aliens had said. Must, not should: radically untranslatable sounds do not make a language simply because we could not know it was a language unless we could translate it, even if only roughly\u0026rdquo; \u0026ldquo;The tonal and rhythmic patterns of whale song are of such complexity as to make it quite impossible to believe that what we can hear (and pick up on instruments more sensitive than human ears) is just random noise.\u0026rdquo; nobody knows how to translate “animal signals” into human speech or vice versa. When and if we ever can translate nonhuman noises into human speech, species-related ineffabilities will evaporate like the morning haze. Translation is the enemy of the ineffable. One causes the other to cease to exist. Foreign-soundingness is therefore only a real option for a translator when working from a language with which the receiving language and its culture have an established relationship. But there are significant differences between cultures and languages in how people do things with words. \u0026ldquo;Different languages, [Wilhelm] von Humboldt saw, were different worlds, and the great diversity of natural languages on the planet should be seen as a treasure house of tools for thinking in other ways.\u0026rdquo; Wilhelm von Humboldt, elder brother of the great explorer Alexander \u0026ldquo;The evidence itself brought [Sapir] to see that any attempt to match the grammar of a language with the culture of its speakers or their ethnic origins was completely impossible. “Language,” “culture,” and “race” were independent variables.\u0026rdquo; \u0026ldquo;Sapir’s point was that instead of saying that Latin and Greek are well suited to abstract thought, we should say, rather, that abstract thought is well suited to Greek and Latin and view the particular kinds of philosophical discourse that were developed by their speakers as a consequence of the grammar they used\u0026rdquo; \u0026ldquo;It means that speakers of what Sapir called “Average West European” are poorly equipped to engage in Hopi thought. To expand our minds and to become more fully civilized members of the human race, we should learn as many different languages as we can.\u0026rdquo; Movie Script Film VFX forenization; \u0026ldquo;in the pitch of the low tone reverberating inside. \u0026hellip; another metallic ROAR. Followed by a distant HIGH PITCH.\u0026rdquo; all caps meaning LOUISE \u0026ldquo;We are so bound by time; by its order.\u0026rdquo; DR. KETTLER \u0026ldquo;Not everyone is wired for what you\u0026rsquo;re about to do. Our brains aren\u0026rsquo;t always able to process experiences like this.\u0026rdquo; RUSSIAN SCIENTIST: \u0026ldquo;Their final words translate to, \u0026ldquo;There is no time, many become one.\u0026rdquo; I fear we have all been given weapons because we answered the timeline wrong, please, if you - -\u0026rdquo; LOUISE: \u0026ldquo;I forgot how good it feels to be held by you\u0026rdquo; \u0026ldquo;This is the same scene as the first. Shot for shot.\u0026rdquo; \u0026ldquo;And now we stay here a moment longer than the opening scene, and see that while Louise is smiling, a tear slips down her cheek.\u0026rdquo; INT. LAKE HOUSE - LOUISE\u0026rsquo;S STUDY - NIGHT (FLASHBACK) Childhood Flashback \u0026ldquo;memory is a strange thing\u0026rdquo; \u0026ldquo;we are so bound by time, by its order\u0026rdquo;\u0026mdash;progression of time backed into the inherentness of language the story opens with an emotional connection to the audience for imprinting Lecture Scene\u0026mdash;Alien Invasion \u0026ldquo;Authority assess the object\u0026rdquo; emphasis a weird place Shadow framing of the events question: why is she so worried Second time with plan down from blackness Army Scene + Fly Away Scene \u0026ldquo;Fluttering\u0026rdquo; reproduced Army attempts to replicate the results Sanskrit word for \u0026ldquo;war\u0026rdquo; and its translation\u0026mdash;\u0026ldquo;Louis: desire for more cows; Cal: argument\u0026rdquo; Film environment from the back, filming forward Helicopter Scene Louis: communication as first priority vs. Army: understanding as first priority Approach Scene Music pitched the \u0026ldquo;alien\u0026rdquo; flutters tonally Kind of emotional communication with the audience, the Dalian sound + high highs EQs is a foreignization technique taller than it was in the movie Entry Scene Driving: perspective questions \u0026ldquo;every 18 hours\u0026rdquo; continuous panning from black downwards Sargent\u0026rsquo;s differing eye length Difference in gravity and perspectives? Camera angle trickery, panning down is no longer the same direction \u0026ldquo;light at the end of the tunnel\u0026rdquo; again, music + dissonant sounds create foreignization in the audience \u0026ldquo;they arrive\u0026rdquo;\nCommunication scene They are very animal-like as portrayed in the film \u0026ldquo;Visual Aid\u0026rdquo; scenes Pairwise matching \u0026ldquo;A more advanced race\u0026rdquo; Rosetta stone behavior Taking of headgear: contrast between small vs. large (size differences Increasing breathing during a moment of transition Introductory scene not sure if they have names making an assumption what is happening to Louise\u0026rsquo;s thoughts \u0026ldquo;As Banks studies the language, she starts to have flashback-like visions of her daughter. \u0026quot; Panic worried Scene Fear of the unknown Thoughts flashing back: symbols muting sounds Flashing back being real: Thoughts having panicked sensation of time Repeating single syllable\u0026mdash;foreignization \u0026ldquo;unline speech, a logogram is free of time\u0026rdquo; Voiceover Scene No directionality: complex understanding The repeated vocalizations help highlight the distantness Dialogue Between the two \u0026ldquo;Sapire-Wolf Hypothesis\u0026rdquo;: being used incorrectly; hallucinating Louis' China mobilizing forces Majiong is a form of mobiling forces Final Message Group \u0026ldquo;offer\u0026rdquo; vs \u0026ldquo;use\u0026rdquo; \u0026mdash; the US understands it as \u0026ldquo;offer\u0026rdquo; and China understands it as \u0026ldquo;use\u0026rdquo; contextual intepretation varies how the use of linguistics language as something contextually dependent large amounts of communication can be packed very densely Non-Zero Sum Game the ask to work together need to be interpreted differently different parts of time fold together to become hole: \u0026ldquo;non zero sum game\u0026rdquo; The alien speech is being subtitled! Final communication Result becomes \u0026ldquo;objective\u0026rdquo;: i.e. there is a direct understanding of the aliens, suddenly Also, palendromic names: \u0026ldquo;Hannah\u0026rdquo; for daughter, not translatable Seeing into the future Seeing into Time Being able to understand heptopod + time properly means that they are able to understand time Gave private number in the future allow you to see the past: General Shang can see into the future Why is the banquet colored yellow Finale Repeat of the opener scene: pan down as a trope that cycles from the beginning Ian is in Louise\u0026rsquo;s house! The house+baby scenes (which is different from baby nature scenes) is lit orange in the same way as the banquet scene whereas the hospital scene and the house scene were lit blue ","html":"\u003cp\u003eRequire: analyze movie + quote [story + bellows]\u003c/p\u003e\n\u003cp\u003eineffability of language vs. Sapire-Wolf\u003c/p\u003e\n\u003cp\u003eForeignizing Time in Heptopod B\u003c/p\u003e\n\u003cp\u003eLouise\u0026rsquo;s ability to re-express her temporally-independent thoughts in English after learning Heptopod B represents a successful act \u003cem\u003eforeignization\u003c/em\u003e of Heptopod thought for an English L1 audience despite this audience\u0026rsquo;s supposed limitations in understanding temporally-independent concepts according to the Sapir-Wholf Hypothesis.\u003c/p\u003e\n\u003ch2 id=\"heptopod-b-does-not-have-temporality\"\u003eHeptopod B does not have temporality\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eRUSSIAN SCIENTIST: \u0026ldquo;Their final words translate to, \u0026ldquo;There is no time, many become one.\u0026rdquo; I fear we have all been given weapons because we answered the timeline wrong, please, if you - -\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Explain it by saying that light minimized the time needed to travel to its destination, and one saw the world as the heptapods saw it.\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSo it seems like quintessential Sapir-Wholf: time hard to express with Heptopod, and so their way of thinking work around it.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Sapir’s point was that instead of saying that Latin and Greek are well suited to abstract thought, we should say, rather, that abstract thought is well suited to Greek and Latin and view the particular kinds of philosophical discourse that were developed by their speakers as a consequence of the grammar they used\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eand there are evidence for it\u0026hellip; Louis got this ability and then can think into the future.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;It worked better when I didn\u0026rsquo;t think about it too much. \u0026hellip; my initial strokes almost always turned out to be compatible with an elegant rendition of what I was trying to say. I was developing a faculty like that of the heptapods.\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"louise-ditches-temporality-in-her-english-as-well\"\u003eLouise ditches temporality in her ENGLISH as well\u003c/h2\u003e\n\u003cp\u003eAnd yet, Louis very effectively translated HB into English by abusing English grammar.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u0026ldquo;you will be twenty-five then\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eLOUISE \u0026ldquo;We are so bound by time; by its order.\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eLOUISE: \u0026ldquo;I forgot how good it feels to be held by you\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSo this lack of temporality not ineffable: \u0026ldquo;translation is the enemy of the ineffable. One causes the other to cease to exist.\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003enobody knows how to translate “animal signals” into human speech or vice versa. When and if we ever can translate nonhuman noises into human speech, species-related ineffabilities will evaporate like the morning haze.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSo despite English grammar\u0026rsquo;s temporality, it can be adapted.\u003c/p\u003e\n\u003cp\u003eThis process of translation can be explained effectively in English, even if the original event is not:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eDR. KETTLER \u0026ldquo;Not everyone is wired for what you\u0026rsquo;re about to do. Our brains aren\u0026rsquo;t always able to process experiences like this.\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"moving-though-over-to-another-language-is-actually-essentially-just-forenizing\"\u003eMoving though over to another language is actually essentially just forenizing\u003c/h2\u003e\n\u003cp\u003eSo: non-temporality vis a vi Heptopod B can be expressed in any language that you desire, except for the fact that it will be forenizing and hence require understanding of their \u003cem\u003eculture\u003c/em\u003e. It is \u003cstrong\u003enot\u003c/strong\u003e that L1 itself limits understanding of L2 culture vis a vi S-W; instead, L2 culture needed to be understood correctly to foreignize for L1 audience. Louis does this.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003elouis is forenizing\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;As a result, the “foreign-soundingness” of a translation seeking to give the reader a glimpse of the authentic quality of the source can only reproduce and reinforce what the receiving culture already imagines the foreign to be. \u0026hellip; Foreign-soundingness is therefore only a real option for a translator when working from a language with which the receiving language and its culture have an established relationship\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eLouise establishes this relationship\n\u003cul\u003e\n\u003cli\u003eLOUISE: \u0026ldquo;Language is the foundation of civilization. \u0026ldquo;It is the glue that holds a people together, and it is the first weapon drawn in a conflict.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;The only way to learn an unknown language is to interact with a native speaker, and by that I mean asking questions, holding a conversation \u0026hellip; without that, it\u0026rsquo;s simply not possible.\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe get to hear it too: film VFX forenization; \u0026ldquo;in the pitch of the low tone reverberating inside. \u0026hellip; another metallic ROAR. Followed by a distant HIGH PITCH.\u0026rdquo; the \u003cstrong\u003emusic\u003c/strong\u003e is forenizing\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"stuff-from-chaing\"\u003eStuff from Chaing\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;you will be twenty-five then\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Over time, the sentences I wrote grew shapelier, more cohesive. I had reached the point where it worked better when I didn\u0026rsquo;t think about it too much. Instead of carefully trying to design a sentence before writing, I could simply begin putting down strokes immediately; my initial strokes almost always turned out to be compatible with an elegant rendition of what I was trying to say. I was developing a faculty like that of the heptapods.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;I could understand that: the physical attributes that humans found intuitive, like kinetic energy or acceleration, were all properties of an objectat a given moment in time.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;And these were conducive to a teleological interpretation of events: by viewing events over a period of time, one recognized that there was a requirement that had to be satisfied, a goal of minimizing or maximizing. And one had to know the initial and final states to meet that goal; one needed knowledge of the effects before the causes could be initiated. I was growing to understand that, too.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Gary once told me that the fundamental laws of physics were time-symmetric, that there was no physical difference between past and future.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Explain it by saying that light minimized the time needed to travel to its destination, and one saw the world as the heptapods saw it.\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"stuff-from-bellows\"\u003eStuff from Bellows\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;A reader who says that poetry is what has been lost in translation is also claiming to be simultaneously in full possession of the original (which is poetry) and of the translation (which is not). Otherwise there would be no knowing if anything has been lost, let alone knowing that it was poetry.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Because if the inhabitants of the distant planet did have a language, and if the space crew had learned it, then it must be possible for them to say what the aliens had said. Must, not should: radically untranslatable sounds do not make a language simply because we could not know it was a language unless we could translate it, even if only roughly\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;The tonal and rhythmic patterns of whale song are of such complexity as to make it quite impossible to believe that what we can hear (and pick up on instruments more sensitive than human ears) is just random noise.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003enobody knows how to translate “animal signals” into human speech or vice versa. When and if we ever can translate nonhuman noises into human speech, species-related ineffabilities will evaporate like the morning haze.\u003c/li\u003e\n\u003cli\u003eTranslation is the enemy of the ineffable. One causes the other to cease to exist.\u003c/li\u003e\n\u003cli\u003eForeign-soundingness is therefore only a real option for a translator when working from a language with which the receiving language and its culture have an established relationship.\u003c/li\u003e\n\u003cli\u003eBut there are significant differences between cultures and languages in how people do things with words.\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Different languages, [Wilhelm] von Humboldt saw, were different worlds, and the great diversity of natural languages on the planet should be seen as a treasure house of tools for thinking in other ways.\u0026rdquo;\n\u003cul\u003e\n\u003cli\u003eWilhelm von Humboldt, elder brother of the great explorer Alexander\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;The evidence itself brought [Sapir] to see that any attempt to match the grammar of a language with the culture of its speakers or their ethnic origins was completely impossible. “Language,” “culture,” and “race” were independent variables.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Sapir’s point was that instead of saying that Latin and Greek are well suited to abstract thought, we should say, rather, that abstract thought is well suited to Greek and Latin and view the particular kinds of philosophical discourse that were developed by their speakers as a consequence of the grammar they used\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;It means that speakers of what Sapir called “Average West European” are poorly equipped to engage in Hopi thought. To expand our minds and to become more fully civilized members of the human race, we should learn as many different languages as we can.\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"movie-script\"\u003eMovie Script\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eFilm VFX forenization; \u0026ldquo;in the pitch of the low tone reverberating inside. \u0026hellip; another metallic ROAR. Followed by a distant HIGH PITCH.\u0026rdquo; all caps meaning\u003c/li\u003e\n\u003cli\u003eLOUISE \u0026ldquo;We are so bound by time; by its order.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eDR. KETTLER \u0026ldquo;Not everyone is wired for what you\u0026rsquo;re about to do. Our brains aren\u0026rsquo;t always able to process experiences like this.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eRUSSIAN SCIENTIST: \u0026ldquo;Their final words translate to, \u0026ldquo;There is no time, many become one.\u0026rdquo; I fear we have all been given weapons because we answered the timeline wrong, please, if you - -\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eLOUISE: \u0026ldquo;I forgot how good it feels to be held by you\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;This is the same scene as the first. Shot for shot.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;And now we stay here a moment longer than the opening scene, and see that while Louise is smiling, a tear slips down her cheek.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eINT. LAKE HOUSE - LOUISE\u0026rsquo;S STUDY - NIGHT (FLASHBACK)\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003ch2 id=\"childhood-flashback\"\u003eChildhood Flashback\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;memory is a strange thing\u0026rdquo;\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;we are so bound by time, by its order\u0026rdquo;\u0026mdash;progression of time backed into the inherentness of language\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ethe story opens with an emotional connection to the audience for imprinting\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"lecture-scene-alien-invasion\"\u003eLecture Scene\u0026mdash;Alien Invasion\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;\u003cstrong\u003eAuthority\u003c/strong\u003e assess the object\u0026rdquo; emphasis a weird place\u003c/li\u003e\n\u003cli\u003eShadow framing of the events\u003c/li\u003e\n\u003cli\u003equestion: why is \u003cem\u003eshe\u003c/em\u003e so worried\u003c/li\u003e\n\u003cli\u003eSecond time with plan down from blackness\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"army-scene-plus-fly-away-scene\"\u003eArmy Scene + Fly Away Scene\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Fluttering\u0026rdquo; reproduced\n\u003cul\u003e\n\u003cli\u003eArmy attempts to replicate the results\u003c/li\u003e\n\u003cli\u003eSanskrit word for \u0026ldquo;war\u0026rdquo; and its translation\u0026mdash;\u0026ldquo;Louis: desire for more cows; Cal: argument\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eFilm environment from the back, filming forward\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"helicopter-scene\"\u003eHelicopter Scene\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eLouis: communication as first priority vs. Army: understanding as first priority\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"approach-scene\"\u003eApproach Scene\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eMusic pitched the \u0026ldquo;alien\u0026rdquo; flutters tonally\n\u003cul\u003e\n\u003cli\u003eKind of emotional communication with the audience, the Dalian sound + high highs EQs is a \u003ca href=\"/posts/kbhtranslation_theory/#spectrum-of-translation\"\u003eforeignization\u003c/a\u003e technique\u003c/li\u003e\n\u003cli\u003e\u003cem\u003etaller\u003c/em\u003e than it was in the movie\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"entry-scene\"\u003eEntry Scene\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDriving: perspective questions\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;every 18 hours\u0026rdquo;\u003c/li\u003e\n\u003cli\u003econtinuous panning from black \u003cstrong\u003edownwards\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eSargent\u0026rsquo;s differing eye length\u003c/li\u003e\n\u003cli\u003eDifference in gravity and perspectives?\u003c/li\u003e\n\u003cli\u003eCamera angle trickery, panning \u003cstrong\u003edown\u003c/strong\u003e is no longer the same direction\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;light at the end of the tunnel\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eagain, music + dissonant sounds create \u003ca href=\"/posts/kbhtranslation_theory/#spectrum-of-translation\"\u003eforeignization\u003c/a\u003e in the audience\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;they arrive\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"communication-scene\"\u003eCommunication scene\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eThey are very animal-like as portrayed in the film\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"visual-aid-scenes\"\u003e\u0026ldquo;Visual Aid\u0026rdquo; scenes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ePairwise matching\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;A more advanced race\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eRosetta stone behavior\u003c/li\u003e\n\u003cli\u003eTaking of headgear: contrast between small vs. large (size differences\u003c/li\u003e\n\u003cli\u003eIncreasing breathing during a moment of transition\u003c/li\u003e\n\u003cli\u003eIntroductory scene\n\u003cul\u003e\n\u003cli\u003enot sure if they have names\u003c/li\u003e\n\u003cli\u003emaking an assumption\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ewhat is happening to Louise\u0026rsquo;s thoughts\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;As Banks studies the language, she starts to have flashback-like visions of her daughter. \u0026quot;\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"panic-worried-scene\"\u003ePanic worried Scene\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eFear of the unknown\u003c/li\u003e\n\u003cli\u003eThoughts flashing back: symbols muting sounds\n\u003cul\u003e\n\u003cli\u003eFlashing back being real: Thoughts having panicked sensation of time\u003c/li\u003e\n\u003cli\u003eRepeating single syllable\u0026mdash;foreignization\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;unline speech, a logogram is free of time\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"voiceover-scene\"\u003eVoiceover Scene\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eNo directionality: complex understanding\u003c/li\u003e\n\u003cli\u003eThe repeated vocalizations help highlight the distantness\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"dialogue-between-the-two\"\u003eDialogue Between the two\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Sapire-Wolf Hypothesis\u0026rdquo;: being used incorrectly; hallucinating Louis'\n\u003cul\u003e\n\u003cli\u003eChina mobilizing forces\u003c/li\u003e\n\u003cli\u003eMajiong is a form of mobiling forces\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"final-message-group\"\u003eFinal Message Group\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;offer\u0026rdquo; vs \u0026ldquo;use\u0026rdquo; \u0026mdash; the \u003cstrong\u003eUS\u003c/strong\u003e understands it as \u0026ldquo;offer\u0026rdquo; and \u003cstrong\u003eChina\u003c/strong\u003e understands it as \u0026ldquo;use\u0026rdquo;\n\u003cul\u003e\n\u003cli\u003econtextual intepretation varies how the use of linguistics\u003c/li\u003e\n\u003cli\u003elanguage as something contextually dependent\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003elarge amounts of communication can be packed very densely\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"non-zero-sum-game\"\u003eNon-Zero Sum Game\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ethe ask to work together need to be interpreted differently\u003c/li\u003e\n\u003cli\u003edifferent parts of time fold together to become hole: \u0026ldquo;non zero sum game\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eThe alien speech is being subtitled!\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"final-communication\"\u003eFinal communication\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eResult becomes \u0026ldquo;objective\u0026rdquo;: i.e. there is a direct understanding of the aliens, suddenly\u003c/li\u003e\n\u003cli\u003eAlso, palendromic names: \u0026ldquo;Hannah\u0026rdquo; for daughter, not translatable\u003c/li\u003e\n\u003cli\u003eSeeing into the future\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"seeing-into-time\"\u003eSeeing into Time\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eBeing able to understand heptopod + time properly means that they are able to understand time\u003c/li\u003e\n\u003cli\u003eGave private number in the future allow you to see the past: General Shang can see into the future\u003c/li\u003e\n\u003cli\u003eWhy is the banquet colored yellow\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"finale\"\u003eFinale\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eRepeat of the opener scene: pan down as a trope that cycles from the beginning\u003c/li\u003e\n\u003cli\u003eIan is in Louise\u0026rsquo;s house!\u003c/li\u003e\n\u003cli\u003eThe house+baby scenes (which is different from baby nature scenes) is lit orange in the same way as the banquet scene\n\u003cul\u003e\n\u003cli\u003ewhereas the hospital scene and the house scene were lit blue\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n","permalink":"https://www.jemoka.com/posts/kbharrival_movie/","tags":null,"title":"Arrival Movie"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbharthur_m_schlesinger/","tags":null,"title":"Arthur M. Schlesinger"},{"categories":null,"contents":"Artificial Intelligence is defined as the act of parameter estimation.\nToy example for Linear Regression Imagine if we want to predict the price of a choclate bar We feed in a bunch of bar weight vs. chocolate price data; that\u0026rsquo;s the training data Then, we come up with a model of the training data We then throw away the training data Therefore, we can think of the model as a COMPRESSION of the training data; estimating parameters.\nBias in AI It is the ability of AI as a TOOL that causes it a very big UNDOING.\nBlueberry Muffin vs. Chuwawa Its quite difficult to come with codified rules; yet, we can just ask for it to compressive\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhartificial_intelligence/\"\u003eArtificial Intelligence\u003c/a\u003e is defined as the act of \u003cstrong\u003eparameter estimation\u003c/strong\u003e.\u003c/p\u003e\n\u003ch2 id=\"toy-example-for-linear-regression\"\u003eToy example for Linear Regression\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eImagine if we want to predict the price of a choclate bar\u003c/li\u003e\n\u003cli\u003eWe feed in a bunch of bar weight vs. chocolate price data; that\u0026rsquo;s the \u003cstrong\u003etraining data\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eThen, we come up with a \u003cstrong\u003emodel\u003c/strong\u003e of the \u003cstrong\u003etraining data\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eWe then throw away the \u003cstrong\u003etraining data\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTherefore, we can think of the \u003cstrong\u003emodel\u003c/strong\u003e as a \u003cstrong\u003eCOMPRESSION\u003c/strong\u003e of the training data; \u003cstrong\u003eestimating\u003c/strong\u003e parameters.\u003c/p\u003e\n\u003ch2 id=\"bias-in-ai\"\u003eBias in AI\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eIt is the ability of AI as a TOOL that causes it a very big UNDOING\u003c/strong\u003e.\u003c/p\u003e\n\u003ch3 id=\"blueberry-muffin-vs-dot-chuwawa\"\u003eBlueberry Muffin vs. Chuwawa\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-29_09-46-36_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eIts quite difficult to come with codified rules; yet, we can just ask for it to compressive\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhartificial_intelligence/","tags":null,"title":"Artificial Intelligence"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhasbmb/","tags":null,"title":"ASBMB"},{"categories":null,"contents":"2023 annual meeting of ASBMB.\nTalks Molecular Engineering\nChristopher Barnes, Stanford SARS-COV2 Structural Analysis 10.1126/sciimmunol.ade0958 Emma J. Chory, Duke Robotics-Assisted Directed Evolution 10.1038/s41592-021-01348-4 Daniel-Adriano Silva, Monod De novo biosensors 10.1038/s41586-021-03258-z Structure Determination and Machine Learning\nSonya Hanson, Flatiron cyro-EM + ensemble reweighting 10.1073/pnas.1419276111 Celia Schiffer, UMass Med Drug Resistance Analysis 10.7554/eLife.77433 Arvind Ramanathan, Argonne Lab Models of Interaction Analysis ?, see GenSLMs Jason K Perry, Gilead Structure of COVID Replication Protein Structure and Biophysics\nErik Yukl Zinc ABC Transporters ? Wentao Li Calpains \u0026hellip; AFIB Drug Design w/ AI\nAlpha Lee, PostEra ML COVID Drug Discovery 10.1101/2020.10.29.339317 David Baker, UW De Novo Protein Design RFDiffusion, RoseTTAFold2 Relay Therapeutics Active Learning Molecule Iteration Rommie E Amary, UCSD Immunogen Design Things to Google diffusion maps and https://www.charmm-gui.org/ GenSLMs: LLMs, but genome sequence representation learning RFDiffusion \u0026ldquo;quantum chemistry\u0026rdquo; https://www.biosolveit.de/infiniSee what tool do these people use to make these? ","html":"\u003cp\u003e2023 annual meeting of \u003ca href=\"/posts/kbhasbmb/\"\u003eASBMB\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"talks\"\u003eTalks\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eMolecular Engineering\u003c/strong\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eChristopher Barnes, \u003ca href=\"/posts/kbhstanford/\"\u003eStanford\u003c/a\u003e\u003c/th\u003e\n\u003cth\u003e\u003ca href=\"/posts/kbhsars_cov2_structural_analysis/\"\u003eSARS-COV2 Structural Analysis\u003c/a\u003e\u003c/th\u003e\n\u003cth\u003e10.1126/sciimmunol.ade0958\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eEmma J. Chory, Duke\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhrobotics_assisted_directed_evolution/\"\u003eRobotics-Assisted Directed Evolution\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e10.1038/s41592-021-01348-4\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eDaniel-Adriano Silva, Monod\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhde_novo_biosensors/\"\u003eDe novo biosensors\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e10.1038/s41586-021-03258-z\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\u003cstrong\u003eStructure Determination and Machine Learning\u003c/strong\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eSonya Hanson, Flatiron\u003c/th\u003e\n\u003cth\u003e\u003ca href=\"/posts/kbhcyro_em/#manifold-embedding\"\u003ecyro-EM\u003c/a\u003e + \u003ca href=\"/posts/kbhcyro_em/#ensemble-reweighting\"\u003eensemble reweighting\u003c/a\u003e\u003c/th\u003e\n\u003cth\u003e10.1073/pnas.1419276111\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eCelia Schiffer, UMass Med\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmolecular_drug_resistance/\"\u003eDrug Resistance Analysis\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e10.7554/eLife.77433\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eArvind Ramanathan, Argonne Lab\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhfundational_models_of_interaction_analysis/\"\u003eModels of Interaction Analysis\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e?, see \u003ca href=\"/posts/kbhgenslms/\"\u003eGenSLMs\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eJason K Perry, Gilead\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhstructure_of_covid_replication/\"\u003eStructure of COVID Replication\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\u003cstrong\u003eProtein Structure and Biophysics\u003c/strong\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eErik Yukl\u003c/th\u003e\n\u003cth\u003e\u003ca href=\"/posts/kbhzinc_abc_transporters/\"\u003eZinc ABC Transporters\u003c/a\u003e\u003c/th\u003e\n\u003cth\u003e?\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eWentao Li\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhcalpains_afib/\"\u003eCalpains \u0026hellip; AFIB\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\u003cstrong\u003eDrug Design w/ AI\u003c/strong\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eAlpha Lee, PostEra\u003c/th\u003e\n\u003cth\u003e\u003ca href=\"/posts/kbhml_drug_discovery/\"\u003eML COVID Drug Discovery\u003c/a\u003e\u003c/th\u003e\n\u003cth\u003e10.1101/2020.10.29.339317\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eDavid Baker, UW\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhde_novo_protein_design/\"\u003eDe Novo Protein Design\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhrfdiffusion/\"\u003eRFDiffusion\u003c/a\u003e, \u003ca href=\"/posts/kbhrosettafold2/\"\u003eRoseTTAFold2\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eRelay Therapeutics\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhactive_learning_molecule_iteration/\"\u003eActive Learning Molecule Iteration\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eRommie E Amary, UCSD\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhimmunogen_design/\"\u003eImmunogen Design\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"things-to-google\"\u003eThings to Google\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiffusion_map/\"\u003ediffusion map\u003c/a\u003es and\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.charmm-gui.org/\"\u003ehttps://www.charmm-gui.org/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgenslms/\"\u003eGenSLMs\u003c/a\u003e: LLMs, but genome sequence\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrepresentation_learning/\"\u003erepresentation learning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrfdiffusion/\"\u003eRFDiffusion\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;quantum chemistry\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.biosolveit.de/infiniSee\"\u003ehttps://www.biosolveit.de/infiniSee\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewhat tool do these people use to make these?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhasbmb2023_index/","tags":null,"title":"ASBMB2023 Index"},{"categories":null,"contents":"ASCII represents each char as an integer, its \u0026ldquo;ascii value\u0026rdquo;.\nUppercase letters are sequentially numbered Lowercase letters are sequentially numbered Digits are sequentially numbered Lowercase letters are 32 more than their uppercases (which means its a single bit flip) char upper = \u0026#39;A\u0026#39;; // 65 char lower = \u0026#39;a\u0026#39;; // 97 char zero = \u0026#39;0\u0026#39;; // 48 ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhascii/\"\u003eASCII\u003c/a\u003e represents each \u003ca href=\"/posts/kbhchar/\"\u003echar\u003c/a\u003e as an \u003ca href=\"/posts/kbhinteger/\"\u003einteger\u003c/a\u003e, its \u0026ldquo;ascii value\u0026rdquo;.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eUppercase letters are sequentially numbered\u003c/li\u003e\n\u003cli\u003eLowercase letters are sequentially numbered\u003c/li\u003e\n\u003cli\u003eDigits are sequentially numbered\u003c/li\u003e\n\u003cli\u003eLowercase letters are 32 more than their uppercases (which means its a single bit flip)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eupper\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#39;A\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// 65\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elower\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#39;a\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// 97\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ezero\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#39;0\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// 48\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhascii/","tags":null,"title":"ASCII"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhasee_prism/","tags":null,"title":"ASEE Prism"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhasip/","tags":null,"title":"ASIP"},{"categories":null,"contents":"ASR are tech that helps make transcripts from speech\n","html":"\u003cp\u003eASR are tech that helps make transcripts from speech\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhasr/","tags":null,"title":"ASR"},{"categories":null,"contents":"After a brief survey of current literature, it appears that no standardized benchmarks for ASR on clinical data exists that are widely used. Given the vast resources available from the TalkBank corpus, it is not infeasible to build such a corpus and evaluate the performance of a few commercial ASR systems in its ability to perform such a task.\nDespite there not being a single baseline that works to benchmark ASR on clinical datasets, a few different subsets of efforts exists on each component of this front.\nEvaluation Datasets As perhaps an exemplar to the lack of standardization in ASR performance evaluation, the Whisper ASR model ((NO_ITEM_DATA:radford2022robust)) was not evaluated on one particular benchmark but instead a series of multi-domain benchmarks.\nThis is perhaps for good reason, recent results (discuses below) show that single-domain benchmarks do not describe performance well across other domains, or even other usage methods. Therefore, the battery of different tests done by (Radford et al. 2022) could be essentially thought of as a single battery of multi-usage tests that covers a good span of recent-standard ASR performance tests; among them:\nStandard Datasets CORAAL: a dataset of high-quality lower-fidelity recordings of African-American vernacular of varying degrees in conversation (incl. cross talk, etc.) ((Farrington and Kendall 2021)) EARNINGS: a set of benchmark datasets of earnings calls within various financial industries ((Rio et al. 2021)) TED-LIUM 3: a dataset of high-fidelity recordings of full-length TED talks ((Hernandez et al. 2018)) In addition to the three evaluation datasets provided, the entire model was also trained on (Panayotov et al. 2015), a gold-standard corpus of open-source high-fidelity recordings of audiobooks.\n\u0026ldquo;Home brew\u0026rdquo; Benchmarks In addition to the three published, standard datasets above, Radford et al. also used a series of self-selected datasets of varying quality.\nRev16: Rev.AI\u0026rsquo;s clean podcast transcription dataset https://www.rev.ai/blog/podcast-transcription-benchmark-part-1/ Meanwhile: Recordings of Stephen Colbert\u0026rsquo;s Meanwhile segments Kinkaid46: \u0026hellip;.apparently a selection of YouTube videos from this guy\u0026rsquo;s blog post: https://medium.com/descript/which-automatic-transcription-service-is-the-most-accurate-2018-2e859b23ed19 Evaluation Metrics Most benchmarks still report results in terms of word-error-rate (WER) and the standard lexical distance metric of BLEU ((Papineni et al. 2001)). These are two generally well-accepted ways of reporting ASR performance, and for most of the datasets cited above suffice.\nHowever, some very recent results ((Shor et al. 2023)) indicate that BLEU and WER themselves do not capture a good view for what would be clinically relevant data. Some ASR mistakes (such as that on the investigator, or that which doesn\u0026rsquo;t relate to the disfluency being observed) matter a lot less than others (errors on the participant, esp. missing filled pauses, wrongly coded utterances etc.). The work by Shor et al. presents also an alternative metric to quantify such errors: essentially training a BERT model ((Devlin et al. 2018)) to perform the classification task of \u0026ldquo;clinician preference\u0026rdquo; (i.e. \u0026ldquo;predict which of these errors would be less problematic to a clinician\u0026rdquo;), then using the results of that model to evaluate the ASR performance.\nThis last method is likely overkill. However, it is useful to discuss if richer information\u0026ndash;such as special binning for missing clinically significant markers, like filled pauses\u0026mdash;in addition to simple BLEU and WER will be useful as we develop our own benchmarks.\nDiscussion (Szymański et al. 2020) (sec. 3, \u0026ldquo;Call to Action) offers some guidance with regards to the design of robust ASR benchmarks. Among which:\nHigher quality annotations, like morphology information we provide with %mor, to help aid language model training Broader range of human diects and variations covered Performance across many recording domains (various processing of audio signals, properties of the signal itself, etc.) Though TalkBank contains a wealth of data, individual corpuses often have little variation, which Szymańki et. al. shows cause degraded performance. Therefore, it is useful to create a benchmark that strives across multiple problem domains and recording schemes to be able to provide a reproducible and more accurate benchmark of a given model.\nSzymańki et. al. also brought up another issue through their paper: \u0026ldquo;due to legal constraints \u0026hellip; we are not able to provide the community with neither the benchmark data nor the detailed information about evaluated systems.\u0026rdquo; ((Szymański et al. 2020), section 2.) The anonymization of benchmarked models seen in both Szymańki et. al. and Radford et. al. may point to a certain legal barrier in specifically benchmarking existing, commercial ASR models.\nDevlin, Jacob, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. “Bert: Pre-Training of Deep Bidirectional Transformers for Language Understanding.” arXiv Preprint arXiv:1810.04805. Farrington, Charlie, and Tyler Kendall. 2021. “The Corpus of Regional African American Language.” doi:10.7264/1AD5-6T35. Hernandez, François, Vincent Nguyen, Sahar Ghannay, Natalia Tomashenko, and Yannick Estève. 2018. “TED-LIUM 3: Twice as Much Data and Corpus Repartition for Experiments on Speaker Adaptation,” 11096:198–208. doi:10.1007/978-3-319-99579-3_21. Panayotov, Vassil, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. 2015. “Librispeech: An ASR Corpus Based on Public Domain Audio Books.” In 2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 5206–10. South Brisbane, Queensland, Australia: IEEE. doi:10.1109/ICASSP.2015.7178964. Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2001. “BLEU: A Method for Automatic Evaluation of Machine Translation.” In Proceedings of the 40th Annual Meeting on Association for Computational Linguistics - ACL ’02, 311. Philadelphia, Pennsylvania: Association for Computational Linguistics. doi:10.3115/1073083.1073135. Radford, Alec, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. 2022. “Robust Speech Recognition via Large-Scale Weak Supervision.” arXiv Preprint arXiv:2212.04356. Rio, M., Natalie Delworth, R. Westerman, Michelle Huang, Nishchal Bhandari, Joseph Palakapilly, Quinten McNamara, Joshua Dong, Piotr Żelasko, and Miguel Jette. 2021. “Earnings-21: A Practical Benchmark for ASR in the Wild.” ArXiv. doi:10.21437/Interspeech.2021-1915. Shor, Joel, Ruyue Agnes Bi, Subhashini Venugopalan, Steven Ibara, Roman Goldenberg, and Ehud Rivlin. 2023. “Clinical BERTScore: An Improved Measure of Automatic Speech Recognition Performance in Clinical Settings.” arXiv. http://arxiv.org/abs/2303.05737. Szymański, Piotr, Piotr Żelasko, Mikolaj Morzy, Adrian Szymczak, Marzena Żyła-Hoppe, Joanna Banaszczak, Lukasz Augustyniak, Jan Mizgajski, and Yishay Carmiel. 2020. “WER We Are and WER We Think We Are.” Findings of the Association for Computational Linguistics: EMNLP 2020, 3290–95. doi:10.18653/v1/2020.findings-emnlp.295. NO_ITEM_DATA:radford2022robust) ","html":"\u003cp\u003eAfter a brief survey of current literature, it appears that no standardized benchmarks for ASR on clinical data exists that are widely used. Given the vast resources available from the TalkBank corpus, it is not infeasible to build such a corpus and evaluate the performance of a few commercial ASR systems in its ability to perform such a task.\u003c/p\u003e\n\u003cp\u003eDespite there not being a single baseline that works to benchmark ASR on clinical datasets, a few different subsets of efforts exists on each component of this front.\u003c/p\u003e\n\u003ch2 id=\"evaluation-datasets\"\u003eEvaluation Datasets\u003c/h2\u003e\n\u003cp\u003eAs perhaps an exemplar to the lack of standardization in ASR performance evaluation, the Whisper ASR model ((NO_ITEM_DATA:radford2022robust)) was not evaluated on one particular benchmark but instead a series of multi-domain benchmarks.\u003c/p\u003e\n\u003cp\u003eThis is perhaps for good reason, recent results (discuses below) show that single-domain benchmarks do not describe performance well across other domains, or even other usage methods. Therefore, the battery of different tests done by (\u003ca href=\"#citeproc_bib_item_6\"\u003eRadford et al. 2022\u003c/a\u003e) could be essentially thought of as a single battery of multi-usage tests that covers a good span of recent-standard ASR performance tests; among them:\u003c/p\u003e\n\u003ch3 id=\"standard-datasets\"\u003eStandard Datasets\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCORAAL: a dataset of high-quality \u003cstrong\u003elower-fidelity recordings\u003c/strong\u003e of African-American vernacular of varying degrees in conversation (incl. cross talk, etc.) ((\u003ca href=\"#citeproc_bib_item_2\"\u003eFarrington and Kendall 2021\u003c/a\u003e))\u003c/li\u003e\n\u003cli\u003eEARNINGS: a set of benchmark datasets of \u003cstrong\u003eearnings calls\u003c/strong\u003e within various financial industries ((\u003ca href=\"#citeproc_bib_item_7\"\u003eRio et al. 2021\u003c/a\u003e))\u003c/li\u003e\n\u003cli\u003eTED-LIUM 3: a dataset of \u003cstrong\u003ehigh-fidelity recordings\u003c/strong\u003e of full-length TED talks ((\u003ca href=\"#citeproc_bib_item_3\"\u003eHernandez et al. 2018\u003c/a\u003e))\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIn addition to the three evaluation datasets provided, the entire model was also trained on (\u003ca href=\"#citeproc_bib_item_4\"\u003ePanayotov et al. 2015\u003c/a\u003e), a gold-standard corpus of open-source \u003cstrong\u003ehigh-fidelity recordings\u003c/strong\u003e of audiobooks.\u003c/p\u003e\n\u003ch3 id=\"home-brew-benchmarks\"\u003e\u0026ldquo;Home brew\u0026rdquo; Benchmarks\u003c/h3\u003e\n\u003cp\u003eIn addition to the three published, standard datasets above, Radford et al. also used a series of self-selected datasets of varying quality.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eRev16: Rev.AI\u0026rsquo;s \u003cstrong\u003eclean podcast\u003c/strong\u003e transcription dataset \u003ca href=\"https://www.rev.ai/blog/podcast-transcription-benchmark-part-1/\"\u003ehttps://www.rev.ai/blog/podcast-transcription-benchmark-part-1/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eMeanwhile: Recordings of Stephen Colbert\u0026rsquo;s Meanwhile segments\u003c/li\u003e\n\u003cli\u003eKinkaid46: \u0026hellip;.apparently a selection of YouTube videos from this guy\u0026rsquo;s blog post: \u003ca href=\"https://medium.com/descript/which-automatic-transcription-service-is-the-most-accurate-2018-2e859b23ed19\"\u003ehttps://medium.com/descript/which-automatic-transcription-service-is-the-most-accurate-2018-2e859b23ed19\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"evaluation-metrics\"\u003eEvaluation Metrics\u003c/h2\u003e\n\u003cp\u003eMost benchmarks still report results in terms of word-error-rate (WER) and the standard lexical distance metric of BLEU ((\u003ca href=\"#citeproc_bib_item_5\"\u003ePapineni et al. 2001\u003c/a\u003e)). These are two generally well-accepted ways of reporting ASR performance, and for most of the datasets cited above suffice.\u003c/p\u003e\n\u003cp\u003eHowever, some very recent results ((\u003ca href=\"#citeproc_bib_item_8\"\u003eShor et al. 2023\u003c/a\u003e)) indicate that BLEU and WER themselves do not capture a good view for what would be clinically relevant data. Some ASR mistakes (such as that on the investigator, or that which doesn\u0026rsquo;t relate to the disfluency being observed) matter a lot less than others (errors on the participant, esp. missing filled pauses, wrongly coded utterances etc.). The work by Shor et al. presents also an alternative metric to quantify such errors: essentially training a BERT model ((\u003ca href=\"#citeproc_bib_item_1\"\u003eDevlin et al. 2018\u003c/a\u003e)) to perform the classification task of \u0026ldquo;clinician preference\u0026rdquo; (i.e. \u0026ldquo;predict which of these errors would be less problematic to a clinician\u0026rdquo;), then using the results of that model to evaluate the ASR performance.\u003c/p\u003e\n\u003cp\u003eThis last method is likely overkill. However, it is useful to discuss if richer information\u0026ndash;such as special binning for missing clinically significant markers, like filled pauses\u0026mdash;in addition to simple BLEU and WER will be useful as we develop our own benchmarks.\u003c/p\u003e\n\u003ch2 id=\"discussion\"\u003eDiscussion\u003c/h2\u003e\n\u003cp\u003e(\u003ca href=\"#citeproc_bib_item_9\"\u003eSzymański et al. 2020\u003c/a\u003e) (sec. 3, \u0026ldquo;Call to Action) offers some guidance with regards to the design of robust ASR benchmarks. Among which:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eHigher quality annotations, like morphology information we provide with %mor, to help aid language model training\u003c/li\u003e\n\u003cli\u003eBroader range of human diects and variations covered\u003c/li\u003e\n\u003cli\u003ePerformance across many recording domains (various processing of audio signals, properties of the signal itself, etc.)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThough TalkBank contains a wealth of data, individual corpuses often have little variation, which Szymańki et. al. shows cause degraded performance. Therefore, it is useful to create a benchmark that strives across multiple problem domains and recording schemes to be able to provide a reproducible and more accurate benchmark of a given model.\u003c/p\u003e\n\u003cp\u003eSzymańki et. al. also brought up another issue through their paper: \u0026ldquo;due to legal constraints \u0026hellip; we are not able to provide the community with neither the benchmark data nor the detailed information about evaluated systems.\u0026rdquo; ((\u003ca href=\"#citeproc_bib_item_9\"\u003eSzymański et al. 2020\u003c/a\u003e), section 2.) The anonymization of benchmarked models seen in both Szymańki et. al. and Radford et. al. may point to a certain legal barrier in specifically benchmarking existing, commercial ASR models.\u003c/p\u003e\n\u003cstyle\u003e.csl-entry{text-indent: -1.5em; margin-left: 1.5em;}\u003c/style\u003e\u003cdiv class=\"csl-bib-body\"\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_1\"\u003e\u003c/a\u003eDevlin, Jacob, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. “Bert: Pre-Training of Deep Bidirectional Transformers for Language Understanding.” \u003ci\u003earXiv Preprint arXiv:1810.04805\u003c/i\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_2\"\u003e\u003c/a\u003eFarrington, Charlie, and Tyler Kendall. 2021. “The Corpus of Regional African American Language.” doi:\u003ca href=\"https://doi.org/10.7264/1AD5-6T35\"\u003e10.7264/1AD5-6T35\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_3\"\u003e\u003c/a\u003eHernandez, François, Vincent Nguyen, Sahar Ghannay, Natalia Tomashenko, and Yannick Estève. 2018. “TED-LIUM 3: Twice as Much Data and Corpus Repartition for Experiments on Speaker Adaptation,” 11096:198–208. doi:\u003ca href=\"https://doi.org/10.1007/978-3-319-99579-3_21\"\u003e10.1007/978-3-319-99579-3_21\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_4\"\u003e\u003c/a\u003ePanayotov, Vassil, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. 2015. “Librispeech: An ASR Corpus Based on Public Domain Audio Books.” In \u003ci\u003e2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)\u003c/i\u003e, 5206–10. South Brisbane, Queensland, Australia: IEEE. doi:\u003ca href=\"https://doi.org/10.1109/ICASSP.2015.7178964\"\u003e10.1109/ICASSP.2015.7178964\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_5\"\u003e\u003c/a\u003ePapineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2001. “BLEU: A Method for Automatic Evaluation of Machine Translation.” In \u003ci\u003eProceedings of the 40th Annual Meeting on Association for Computational Linguistics - ACL ’02\u003c/i\u003e, 311. Philadelphia, Pennsylvania: Association for Computational Linguistics. doi:\u003ca href=\"https://doi.org/10.3115/1073083.1073135\"\u003e10.3115/1073083.1073135\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_6\"\u003e\u003c/a\u003eRadford, Alec, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. 2022. “Robust Speech Recognition via Large-Scale Weak Supervision.” \u003ci\u003earXiv Preprint arXiv:2212.04356\u003c/i\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_7\"\u003e\u003c/a\u003eRio, M., Natalie Delworth, R. Westerman, Michelle Huang, Nishchal Bhandari, Joseph Palakapilly, Quinten McNamara, Joshua Dong, Piotr Żelasko, and Miguel Jette. 2021. “Earnings-21: A Practical Benchmark for ASR in the Wild.” \u003ci\u003eArXiv\u003c/i\u003e. doi:\u003ca href=\"https://doi.org/10.21437/Interspeech.2021-1915\"\u003e10.21437/Interspeech.2021-1915\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_8\"\u003e\u003c/a\u003eShor, Joel, Ruyue Agnes Bi, Subhashini Venugopalan, Steven Ibara, Roman Goldenberg, and Ehud Rivlin. 2023. “Clinical BERTScore: An Improved Measure of Automatic Speech Recognition Performance in Clinical Settings.” arXiv. \u003ca href=\"http://arxiv.org/abs/2303.05737\"\u003ehttp://arxiv.org/abs/2303.05737\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_9\"\u003e\u003c/a\u003eSzymański, Piotr, Piotr Żelasko, Mikolaj Morzy, Adrian Szymczak, Marzena Żyła-Hoppe, Joanna Banaszczak, Lukasz Augustyniak, Jan Mizgajski, and Yishay Carmiel. 2020. “WER We Are and WER We Think We Are.” \u003ci\u003eFindings of the Association for Computational Linguistics: EMNLP 2020\u003c/i\u003e, 3290–95. doi:\u003ca href=\"https://doi.org/10.18653/v1/2020.findings-emnlp.295\"\u003e10.18653/v1/2020.findings-emnlp.295\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003eNO_ITEM_DATA:radford2022robust)\u003c/div\u003e\n\u003c/div\u003e\n","permalink":"https://www.jemoka.com/posts/kbhasr_disordered_speech/","tags":null,"title":"ASR on Disordered Speech"},{"categories":null,"contents":"associative means that operations can be grouped in any way as long as order is preserved.\nThat is:\n\\begin{equation} (AB)C = A(BC) \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e means that \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003es can be grouped in any way as long as order is preserved.\u003c/p\u003e\n\u003cp\u003eThat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(AB)C = A(BC)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhassociative/","tags":null,"title":"associative"},{"categories":null,"contents":"~ Given functions \\(f(n)\\) and \\(g(n)\\), if:\n\\begin{equation} \\lim_{n\\to \\infty} \\left(\\frac{f(n)}{g(n)}\\right) = 1 \\end{equation}\nwe say that \\(f \\sim g\\).\nThat \u0026ndash; the relationship between \\(f\\) and \\(g\\) grows in a similar fashion as \\(n\\) increases. For instance:\n\\(f(n) = n+1\\) \\(g(n) = n+2\\) Therefore:\n\\begin{equation} f\\sim g = \\lim_{n\\to \\infty} \\frac{f(n)}{g(n)} = \\lim_{n\\to \\infty} \\frac{n+1}{n+2} = 1 \\end{equation}\nThe \\(\\sim\\) operator is commutative (\\(f \\sim g \\Rightarrow g\\sim f\\)) and transitive (\\(f\\sim g, g\\sim h \\Rightarrow f \\sim h\\)).\no(n) Given two functions \\(f(n)\\), \\(g(n)\\), if their relationship shows:\n\\begin{equation} \\lim_{n \\to \\infty} \\frac{f(n)}{g(n)} = 0 \\end{equation}\nwe can write it as\n\\begin{equation} f = o(g) \\end{equation}\nThis tells us that if \\(n\\) becomes very large, \\(g\\) becomes much larger than \\(f\\). \\(f\\) does not grow nearly as fast as \\(g\\).\nThe operation is not commutative, but is transitive (\\(f = o(g), g = o(h) \\Rightarrow f = o(h)\\))\nO(n) Given two functions \\(f(n)\\), \\(g(n)\\).\n\\begin{equation} \\lim_{n \\to \\infty} \\frac{f(n)}{g(n)} \u0026lt; \\infty \\end{equation}\nthat the relationship between \\(f(n)\\) and \\(g(n)\\) is countable as \\(n\\) trends to infinity.\nWe can also say that, given \\(n\\), \\(n_0\\), and some \\(c\\) which \\(\\forall n, n \u0026gt; n_0\\), there is:\n\\begin{equation} |f(n)| \u0026lt; |cg(n)| \\end{equation}\nThis tells us that \\(f(n)\\) does not grow much much faster than \\(g(n)\\).\nTherefore:\nIf \\(f \\sim g\\), \\(f = O(g)\\) (as they grow together, \\(f\\) is not much faster) If \\(f = o(g)\\), \\(f=O(g)\\) (as \\(f\\) does not grow at all, \\(f\\) is not faster) \\(\\theta\\)(n) \\(f=\\theta(g)\\) IFF \\(f=O(g)\\) and \\(g=O(f)\\), its essentially \\(\\sim\\) but without the strict requirement of a 1:1 ratio.\n\\(\\omega\\)(n) and \\(\\Omega\\)(n) The inverses of \\(O\\) and \\(o\\):\n\\(f(n) = O(g(n)) \\Rightarrow g(n) = \\omega(f(n))\\) \\(f(n) = o(g(n)) \\Rightarrow g(n) = \\Omega(f(n))\\) ","html":"\u003ch2 id=\"4c761f\"\u003e~\u003c/h2\u003e\n\u003cp\u003eGiven functions \\(f(n)\\) and \\(g(n)\\), if:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lim_{n\\to \\infty} \\left(\\frac{f(n)}{g(n)}\\right) = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe say that \\(f \\sim g\\).\u003c/p\u003e\n\u003cp\u003eThat \u0026ndash; the relationship between \\(f\\) and \\(g\\) grows in a similar fashion as \\(n\\) increases. For instance:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(f(n) = n+1\\)\u003c/li\u003e\n\u003cli\u003e\\(g(n) = n+2\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTherefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf\\sim g = \\lim_{n\\to \\infty} \\frac{f(n)}{g(n)} = \\lim_{n\\to \\infty} \\frac{n+1}{n+2} = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe \\(\\sim\\) operator is \u003cem\u003ecommutative\u003c/em\u003e (\\(f \\sim g \\Rightarrow g\\sim f\\)) and \u003cem\u003etransitive\u003c/em\u003e (\\(f\\sim g, g\\sim h \\Rightarrow f \\sim h\\)).\u003c/p\u003e\n\u003ch2 id=\"o--n\"\u003eo(n)\u003c/h2\u003e\n\u003cp\u003eGiven two functions \\(f(n)\\), \\(g(n)\\), if their relationship shows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lim_{n \\to \\infty} \\frac{f(n)}{g(n)} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can write it as\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf = o(g)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis tells us that if \\(n\\) becomes very large, \\(g\\) becomes much larger than \\(f\\). \\(f\\) does not grow nearly as fast as \\(g\\).\u003c/p\u003e\n\u003cp\u003eThe operation is \u003cem\u003enot\u003c/em\u003e commutative, but is \u003cem\u003etransitive\u003c/em\u003e (\\(f = o(g), g = o(h) \\Rightarrow f = o(h)\\))\u003c/p\u003e\n\u003ch2 id=\"o--n\"\u003eO(n)\u003c/h2\u003e\n\u003cp\u003eGiven two functions \\(f(n)\\), \\(g(n)\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lim_{n \\to \\infty} \\frac{f(n)}{g(n)} \u0026lt; \\infty\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat the relationship between \\(f(n)\\) and \\(g(n)\\) is countable as \\(n\\) trends to infinity.\u003c/p\u003e\n\u003cp\u003eWe can also say that, given \\(n\\), \\(n_0\\), and some \\(c\\) which \\(\\forall n, n \u0026gt; n_0\\), there is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|f(n)| \u0026lt; |cg(n)|\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis tells us that \\(f(n)\\) does not grow much much faster than \\(g(n)\\).\u003c/p\u003e\n\u003cp\u003eTherefore:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eIf \\(f \\sim g\\), \\(f = O(g)\\) (as they grow together, \\(f\\) is not much faster)\u003c/li\u003e\n\u003cli\u003eIf \\(f = o(g)\\), \\(f=O(g)\\) (as \\(f\\) does not grow at all, \\(f\\) is not faster)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"theta--n\"\u003e\\(\\theta\\)(n)\u003c/h2\u003e\n\u003cp\u003e\\(f=\\theta(g)\\) IFF \\(f=O(g)\\) and \\(g=O(f)\\), its essentially \\(\\sim\\) but without the strict requirement of a 1:1 ratio.\u003c/p\u003e\n\u003ch2 id=\"omega--n--and-omega--n\"\u003e\\(\\omega\\)(n) and \\(\\Omega\\)(n)\u003c/h2\u003e\n\u003cp\u003eThe inverses of \\(O\\) and \\(o\\):\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(f(n) = O(g(n)) \\Rightarrow g(n) = \\omega(f(n))\\)\u003c/li\u003e\n\u003cli\u003e\\(f(n) = o(g(n)) \\Rightarrow g(n) = \\Omega(f(n))\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhasymtotic_analysis/","tags":null,"title":"asymtotic analysis"},{"categories":null,"contents":"You can use atoms as many different types of qubits.\nmanipulating physical qubits To make physical qubits go to different states, we will again use something in the ancillary states. Rotating it to \\(z\\) \u0026mdash; leverage one lazer to make it fall; \\(rx\\), \\(ry\\), we leverage combinations of two light.\nvarious qubit implementations Implementations of physical qubits\nType Superconductor Ions Atoms Company Google, IBM, Rigetti IonQ, Honeywell Atom Computing, QuEra Nature Artifical Natural Natural Calibration Individual calibration Naturally calibrated Naturally calibrated Coherence Time Short Long Long Connectivity Adjacent connectivity All-to-all More than adjacent Scalability Compatible with existing tech Not easily scalable Potentially scalable Speed Fast gates Kinda fast Untested possible uses for qubits Here are some possible uses for physical qubits\nTraveling salesman Research + simulations Cryptography ","html":"\u003cp\u003eYou can use atoms as many different types of \u003ca href=\"/posts/kbhqubits/\"\u003equbits\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"manipulating-physical-qubits\"\u003emanipulating physical qubits\u003c/h2\u003e\n\u003cp\u003eTo make \u003ca href=\"/posts/kbhphysical_qubits/\"\u003ephysical qubits\u003c/a\u003e go to different states, we will again use something in the ancillary states. Rotating it to \\(z\\) \u0026mdash; leverage one lazer to make it fall; \\(rx\\), \\(ry\\), we leverage combinations of two light.\u003c/p\u003e\n\u003ch2 id=\"various-qubit-implementations\"\u003evarious qubit implementations\u003c/h2\u003e\n\u003cp\u003eImplementations of \u003ca href=\"/posts/kbhphysical_qubits/\"\u003ephysical qubits\u003c/a\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eType\u003c/th\u003e\n\u003cth\u003eSuperconductor\u003c/th\u003e\n\u003cth\u003eIons\u003c/th\u003e\n\u003cth\u003eAtoms\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eCompany\u003c/td\u003e\n\u003ctd\u003eGoogle, IBM, Rigetti\u003c/td\u003e\n\u003ctd\u003eIonQ, Honeywell\u003c/td\u003e\n\u003ctd\u003eAtom Computing, QuEra\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eNature\u003c/td\u003e\n\u003ctd\u003eArtifical\u003c/td\u003e\n\u003ctd\u003eNatural\u003c/td\u003e\n\u003ctd\u003eNatural\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eCalibration\u003c/td\u003e\n\u003ctd\u003eIndividual calibration\u003c/td\u003e\n\u003ctd\u003eNaturally calibrated\u003c/td\u003e\n\u003ctd\u003eNaturally calibrated\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhcoherence_time/\"\u003eCoherence Time\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003eShort\u003c/td\u003e\n\u003ctd\u003eLong\u003c/td\u003e\n\u003ctd\u003eLong\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eConnectivity\u003c/td\u003e\n\u003ctd\u003eAdjacent connectivity\u003c/td\u003e\n\u003ctd\u003eAll-to-all\u003c/td\u003e\n\u003ctd\u003eMore than adjacent\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eScalability\u003c/td\u003e\n\u003ctd\u003eCompatible with existing tech\u003c/td\u003e\n\u003ctd\u003eNot easily scalable\u003c/td\u003e\n\u003ctd\u003ePotentially scalable\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eSpeed\u003c/td\u003e\n\u003ctd\u003eFast gates\u003c/td\u003e\n\u003ctd\u003eKinda fast\u003c/td\u003e\n\u003ctd\u003eUntested\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"possible-uses-for-qubits\"\u003epossible uses for qubits\u003c/h2\u003e\n\u003cp\u003eHere are some possible uses for \u003ca href=\"/posts/kbhphysical_qubits/\"\u003ephysical qubits\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eTraveling salesman\u003c/li\u003e\n\u003cli\u003eResearch + simulations\u003c/li\u003e\n\u003cli\u003eCryptography\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhatoms_as_qubits/","tags":null,"title":"atoms as qubits"},{"categories":null,"contents":"AFIB is a heart conditinos, which is augmented during heart surgery.\n\u0026ldquo;Endogeneous extrolluculor proteases damage to Kv1.5 in the atria contributes to AFIB\u0026rdquo;\nWBC\u0026rsquo;s secretion of proteases, such as calpain when it is inflamed ","html":"\u003cp\u003eAFIB is a heart conditinos, which is augmented during heart surgery.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Endogeneous extrolluculor proteases damage to Kv1.5 in the atria contributes to AFIB\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eWBC\u0026rsquo;s secretion of proteases, such as calpain when it is inflamed\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhafib/","tags":null,"title":"Atrial Fibrillation"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhauthoritarianism/","tags":null,"title":"Authoritarianism"},{"categories":null,"contents":"autism is a spectrum disorder that are caused by both environmental and genetic factors.\nKey Question: how can different chromatin regulators lead to the same set of symptoms named \u0026ldquo;autism\u0026rdquo;.\nautism gene signature The gene signature of autism can be measured in clean and quantitative assays.\n","html":"\u003cp\u003eautism is a spectrum disorder that are caused by both environmental and genetic factors.\u003c/p\u003e\n\u003cp\u003eKey Question: how can different \u003ca href=\"/posts/kbhchromatin/\"\u003echromatin\u003c/a\u003e regulators lead to the same set of symptoms named \u0026ldquo;\u003ca href=\"/posts/kbhautism/\"\u003eautism\u003c/a\u003e\u0026rdquo;.\u003c/p\u003e\n\u003ch2 id=\"autism--kbhautism-dot-md--gene-signature\"\u003e\u003ca href=\"/posts/kbhautism/\"\u003eautism\u003c/a\u003e gene signature\u003c/h2\u003e\n\u003cp\u003eThe gene signature of autism can be measured in clean and quantitative assays.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhautism/","tags":null,"title":"autism"},{"categories":null,"contents":"an First Order ODE is \u0026ldquo;autonomous\u0026rdquo; when:\n\\begin{equation} y\u0026rsquo; = f(y) \\end{equation}\nfor some \\(f\\) of one variables. Meaning, it only depends on the independent variable \\(t\\) through the use of \\(y(t)\\) in context.\nThis is a special class of seperable diffequ.\nautonomous ODEs level off at stationary curves for autonomous ODEs can never level off at non-stationary points. Otherwise, that would be a stationary point.\nSee stability (ODEs)\ntime-invariant expressions For forms by which:\n\\begin{equation} y\u0026rsquo; = f(y) \\end{equation}\nas in, the expression is time invariant.\n","html":"\u003cp\u003ean \u003ca href=\"/posts/kbhfirst_order_odes/\"\u003eFirst Order ODE\u003c/a\u003e is \u0026ldquo;autonomous\u0026rdquo; when:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = f(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some \\(f\\) of one variables. Meaning, it only depends on the independent variable \\(t\\) through the use of \\(y(t)\\) in context.\u003c/p\u003e\n\u003cp\u003eThis is a special class of \u003ca href=\"/posts/kbhseperable_diffequ/\"\u003eseperable diffequ\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"autonomous-odes-level-off-at-stationary-curves\"\u003eautonomous ODEs level off at stationary curves\u003c/h2\u003e\n\u003cp\u003efor autonomous ODEs can never level off at non-stationary points. Otherwise, that would be a stationary point.\u003c/p\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhstability/\"\u003estability (ODEs)\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"time-invariant-expressions\"\u003etime-invariant expressions\u003c/h2\u003e\n\u003cp\u003eFor forms by which:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = f(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas in, the expression is \u003cstrong\u003etime invariant\u003c/strong\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhautonomous_odes/","tags":null,"title":"autonomous ODEs"},{"categories":null,"contents":"Key sequence In this chapter, we defined complex numbers, their definition, their closeness under addition and multiplication, and their properties These properties make them a field: namely, they have, associativity, commutativity, identities, inverses, and distribution. notably, they are different from a group by having 1) two operations 2) additionally, commutativity and distributivity. We then defined \\(\\mathbb{F}^n\\), defined addition, additive inverse, and zero. These combined (with some algebra) shows that \\(\\mathbb{F}^n\\) under addition is a commutative group. Lastly, we show that there is this magical thing called scalar multiplication in \\(\\mathbb{F}^n\\) and that its associative, distributive, and has an identity. Technically scalar multiplication in \\(\\mathbb{F}^n\\) commutes too but extremely wonkily so we don\u0026rsquo;t really think about it. New Definitions complex number addition and multiplication of complex numbers subtraction and division of complex numbers field: \\(\\mathbb{F}\\) is \\(\\mathbb{R}\\) or \\(\\mathbb{C}\\) power list \\(\\mathbb{F}^n\\): F^n coordinate addition in \\(\\mathbb{F}^n\\) additive inverse of \\(\\mathbb{F}^n\\) \\(0\\): zero scalar multiplication in \\(\\mathbb{F}^n\\) Results and Their Proofs properties of complex arithmetic commutativity associativity identities additive inverse multiplicative inverse distributive property properties of \\(\\mathbb{F}^n\\) addition in \\(\\mathbb{F}^n\\) is associative addition in \\(\\mathbb{F}^n\\) is commutative addition in \\(\\mathbb{F}^n\\) has an identity (zero) addition in \\(\\mathbb{F}^n\\) has an inverse scalar multiplication in \\(\\mathbb{F}^n\\) is associative scalar multiplication in \\(\\mathbb{F}^n\\) has an identity (one) scalar multiplication in \\(\\mathbb{F}^n\\) is distributive Question for Jana No demonstration in exercises or book that scalar multiplication is commutative, why? Interesting Factoids You can take a field, look at an operation, and take that (minus the other op\u0026rsquo;s identity), and call it a group (groups (vector spaces (fields ))) ","html":"\u003ch2 id=\"key-sequence\"\u003eKey sequence\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eIn this chapter, we defined \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003es, their definition, their closeness under \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e and \u003ca href=\"/posts/kbhmultiplying/\"\u003emultiplication\u003c/a\u003e, and their \u003ca href=\"/posts/kbhcomplex_number/#requirements\"\u003eproperties\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eThese properties make them a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e: namely, they have, \u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e, \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e, \u003ca href=\"/posts/kbhidentity/\"\u003eidentities\u003c/a\u003e, \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003es, and distribution.\u003c/li\u003e\n\u003cli\u003enotably, they are different from a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e by having 1) two operations 2) additionally, \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e and distributivity. We then defined \\(\\mathbb{F}^n\\), defined \u003ca href=\"/posts/kbhlists_over_fields/#addition-in-mathbb-f-n\"\u003eaddition\u003c/a\u003e, \u003ca href=\"/posts/kbhlists_over_fields/#additive-inverse-of-mathbb-f-n\"\u003eadditive inverse\u003c/a\u003e, and \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eThese combined (with some \u003ca href=\"/posts/kbhalgebra/\"\u003ealgebra\u003c/a\u003e) shows that \\(\\mathbb{F}^n\\) under \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e is a \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutative\u003c/a\u003e \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eLastly, we show that there is this magical thing called \u003ca href=\"/posts/kbhlists_over_fields/#scalar-multiplication-in-mathbb-f-n\"\u003escalar multiplication in \\(\\mathbb{F}^n\\)\u003c/a\u003e and that its \u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e, distributive, and has an \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e. Technically \u003ca href=\"/posts/kbhlists_over_fields/#scalar-multiplication-in-mathbb-f-n\"\u003escalar multiplication in \\(\\mathbb{F}^n\\)\u003c/a\u003e commutes too but extremely wonkily so we don\u0026rsquo;t really think about it.\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcomplex_number/#addition-and-multiplication-of-id-7c982e7e-b8be-4053-a71e-fc0dba7a5da5-complex-number-s\"\u003eaddition and multiplication of complex numbers\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcomplex_number/#subtraction-and-division-of-id-7c982e7e-b8be-4053-a71e-fc0dba7a5da5-complex-number-s\"\u003esubtraction and division of complex numbers\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e: \\(\\mathbb{F}\\) is \\(\\mathbb{R}\\) or \\(\\mathbb{C}\\)\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpower_math/\"\u003epower\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(\\mathbb{F}^n\\): \u003ca href=\"/posts/kbhlists_over_fields/\"\u003eF^n\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlists_over_fields/\"\u003ecoordinate\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlists_over_fields/#addition-in-mathbb-f-n\"\u003eaddition in \\(\\mathbb{F}^n\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlists_over_fields/#additive-inverse-of-mathbb-f-n\"\u003eadditive inverse of \\(\\mathbb{F}^n\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(0\\): \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlists_over_fields/#scalar-multiplication-in-mathbb-f-n\"\u003escalar multiplication in \\(\\mathbb{F}^n\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcomplex_number/#requirements\"\u003eproperties of complex arithmetic\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003ecommutativity\u003c/li\u003e\n\u003cli\u003eassociativity\u003c/li\u003e\n\u003cli\u003eidentities\u003c/li\u003e\n\u003cli\u003eadditive inverse\u003c/li\u003e\n\u003cli\u003emultiplicative inverse\u003c/li\u003e\n\u003cli\u003edistributive property\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eproperties of \\(\\mathbb{F}^n\\)\n\u003cul\u003e\n\u003cli\u003eaddition in \\(\\mathbb{F}^n\\) is \u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlists_over_fields/#addition-in-mathbb-f-n-is-commutative\"\u003eaddition in \\(\\mathbb{F}^n\\) is commutative\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e in \\(\\mathbb{F}^n\\) has an \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e (\u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlists_over_fields/#additive-inverse-of-mathbb-f-n\"\u003eaddition in \\(\\mathbb{F}^n\\) has an inverse\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlists_over_fields/#scalar-multiplication-in-mathbb-f-n\"\u003escalar multiplication in \\(\\mathbb{F}^n\\)\u003c/a\u003e is \u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003escalar multiplication in \\(\\mathbb{F}^n\\) has an \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e (one)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlists_over_fields/#scalar-multiplication-in-mathbb-f-n\"\u003escalar multiplication in \\(\\mathbb{F}^n\\)\u003c/a\u003e is distributive\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"question-for-jana\"\u003eQuestion for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cinput disabled=\"\" type=\"checkbox\"\u003e No demonstration in exercises or book that scalar \u003ca href=\"/posts/kbhmultiplying/\"\u003emultiplication\u003c/a\u003e is commutative, why?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eYou can take a field, look at an operation, and take that (minus the other op\u0026rsquo;s identity), and call it a group\u003c/li\u003e\n\u003cli\u003e(groups (vector spaces (fields )))\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_a/","tags":null,"title":"Axler 1.A"},{"categories":null,"contents":"Key Sequence \\(\\mathbb{F}^{n}\\) not being a field kinda sucks, so we made an object called a \u0026ldquo;vector space\u0026rdquo; which essentially does everything a field does except without necessitating a multiplicative inverse Formally, a vector space is closed over addition and have a scalar multiplication. Its addition is commutative, both addition and scalar multiplication is associative, and distributivity holds. There is an additive identity, additive inverse, and multiplicative identity. We defined something called \\(\\mathbb{F}^{S}\\), which is the set of functions from a set \\(S\\) to \\(\\mathbb{F}\\). Turns out, \\(\\mathbb{F}^{S}\\) is a Vector Space Over \\(\\mathbb{F}\\) and we can secretly treat \\(\\mathbb{F}^{n}\\) and \\(\\mathbb{F}^{\\infty}\\) as special cases of \\(\\mathbb{F}^{s}\\). We established that identity and inverse are unique additively in vector spaces. Lastly, we proved some expressions we already know: \\(0v=0\\), \\(-1v=-v\\). New Definitions addition and scalar multiplication vector space and vectors vector space \u0026ldquo;over\u0026rdquo; fields \\(V\\) denotes a vector space over \\(\\mathbb{F}\\) \\(-v\\) is defined as the additive inverse of \\(v \\in V\\) Results and Their Proofs \\(\\mathbb{F}^{\\infty}\\) is a Vector Space over \\(\\mathbb{F}\\) \\(\\mathbb{F}^{S}\\) is a Vector Space Over \\(\\mathbb{F}\\) All vector spaces \\(\\mathbb{F}^{n}\\) and \\(\\mathbb{F}^{\\infty}\\) are just special cases \\(\\mathbb{F}^{S}\\): you can think about those as a mapping from coordinates \\((1,2,3, \\dots )\\) to their actual values in the \u0026ldquo;vector\u0026rdquo; additive identity is unique in a vector space additive inverse is unique in a vector space \\(0v=0\\), both ways (for zero scalars and vectors) \\(-1v=-v\\) Questions for Jana The way Axler presented the idea of \u0026ldquo;over\u0026rdquo; is a tad weird; is it really only scalar multiplication which hinders vector spaces without \\(\\mathbb{F}\\)? In other words, do the sets that form vector spaces, apart from the \\(\\lambda\\) used for scalar multiplication, need anything to do with the \\(\\mathbb{F}\\) they are \u0026ldquo;over\u0026rdquo;? The name of the field and what its over do not have to be the same\u0026mdash;\u0026ldquo;vector space \\(\\mathbb{C}^2\\) over \\(\\{0,1\\}\\)\u0026rdquo; is a perfectly valid statement If lists have finite length \\(n\\), then what are the elements of \\(\\mathbb{F}^{\\infty}\\) called? \u0026ldquo;we could think about \\(\\mathbb{F}^{\\infty}\\), but we aren\u0026rsquo;t gonna.\u0026rdquo; Why is \\(1v=v\\) an axiom, whereas we say that some \\(0\\) exists? because we know 1 already, and you can follow the behavor of scalar multiplication what\u0026rsquo;s that thing called again in proofs where you just steal the property of a constituent element?: inherits Interesting Factoids The simplest vector space is \\(\\{0\\}\\) ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\mathbb{F}^{n}\\) not being a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e kinda sucks, so we made an object called a \u0026ldquo;\u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u0026rdquo; which essentially does everything a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e does except without necessitating a \u003ca href=\"/posts/kbhinverses/\"\u003emultiplicative inverse\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eFormally, a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e is \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e over \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e and have a \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e. Its \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e is \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutative\u003c/a\u003e, both \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e and \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e is \u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e, and \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e holds. There is an \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e, \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e, and \u003ca href=\"/posts/kbhmultiplicative_identity/\"\u003emultiplicative identity\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eWe defined something called \\(\\mathbb{F}^{S}\\), which is the set of functions from a set \\(S\\) to \\(\\mathbb{F}\\). Turns out, \u003ca href=\"/posts/kbhfs_is_a_vector_space/\"\u003e\\(\\mathbb{F}^{S}\\) is a Vector Space Over \\(\\mathbb{F}\\)\u003c/a\u003e and we can secretly treat \\(\\mathbb{F}^{n}\\) and \\(\\mathbb{F}^{\\infty}\\) as special cases of \\(\\mathbb{F}^{s}\\).\u003c/li\u003e\n\u003cli\u003eWe established that \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e and \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e are unique additively in \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es.\u003c/li\u003e\n\u003cli\u003eLastly, we proved some expressions we already know: \\(0v=0\\), \\(-1v=-v\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e and \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e and \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvector_space/#vector-space-over-fields\"\u003evector space \u0026ldquo;over\u0026rdquo; fields\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(V\\) denotes a vector space over \\(\\mathbb{F}\\)\u003c/li\u003e\n\u003cli\u003e\\(-v\\) is defined as the \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e of \\(v \\in V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfinfty_is_a_vector_space_over_f/\"\u003e\\(\\mathbb{F}^{\\infty}\\) is a Vector Space over \\(\\mathbb{F}\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfs_is_a_vector_space/\"\u003e\\(\\mathbb{F}^{S}\\) is a Vector Space Over \\(\\mathbb{F}\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eAll \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es \\(\\mathbb{F}^{n}\\) and \\(\\mathbb{F}^{\\infty}\\) are just special cases \\(\\mathbb{F}^{S}\\): you can think about those as a mapping from coordinates \\((1,2,3, \\dots )\\) to their actual values in the \u0026ldquo;vector\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadditive_identity_is_unique_in_a_vector_space/\"\u003eadditive identity is unique in a vector space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadditive_inverse_is_unique_in_a_vector_space/\"\u003eadditive inverse is unique in a vector space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhzero_times_vector/\"\u003e\\(0v=0\\)\u003c/a\u003e, both ways (for zero scalars and vectors)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbh1v_1/\"\u003e\\(-1v=-v\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cdel\u003eThe way Axler presented the idea of \u0026ldquo;over\u0026rdquo; is a tad weird; is it really only scalar multiplication which hinders vector spaces without \\(\\mathbb{F}\\)? In other words, do the sets that form vector spaces, apart from the \\(\\lambda\\) used for scalar multiplication, need anything to do with the \\(\\mathbb{F}\\) they are \u0026ldquo;over\u0026rdquo;?\u003c/del\u003e The \u003cstrong\u003ename\u003c/strong\u003e of the field and what its \u003cstrong\u003eover\u003c/strong\u003e do not have to be the same\u0026mdash;\u0026ldquo;vector space \\(\\mathbb{C}^2\\) over \\(\\{0,1\\}\\)\u0026rdquo; is a perfectly valid statement\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003eIf lists have finite length \\(n\\), then what are the elements of \\(\\mathbb{F}^{\\infty}\\) called?\u003c/del\u003e \u0026ldquo;we could think about \\(\\mathbb{F}^{\\infty}\\), but we aren\u0026rsquo;t gonna.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003eWhy is \\(1v=v\\) an axiom, whereas we say that \u003cem\u003esome\u003c/em\u003e \\(0\\) exists?\u003c/del\u003e because we know 1 already, and you can follow the behavor of scalar multiplication\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003ewhat\u0026rsquo;s that thing called again in proofs where you just steal the property of a constituent element?\u003c/del\u003e: inherits\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eThe simplest vector space is \\(\\{0\\}\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_1_b/","tags":null,"title":"Axler 1.B"},{"categories":null,"contents":"Key Sequence we defined subspace and how to check for them we want to operate on subsets, so we defined the sum of subsets we saw that the sum of subspaces are the smallest containing subspace and finally, we defined direct sums and how to prove them New Definitions subspace sum of subsets direct sum Results and Their Proofs checking for subspaces simplified check for subspace sum of subspaces is the smallest subspace with both subspaces creating direct sums a sum of subsets is a direct sum IFF there is only one way to write \\(0\\) a sum of subsets is only a direct sum IFF their intersection is the set containing \\(0\\) Questions for Jana Does the additive identity have be the same between different subspaces of the same vector space? yes, otherwise the larger vector space has two additive identities. Does the addition and multiplication operations in a subspace have to be the same as its constituent vector space? by definition Why are direct sums defined on sub-spaces and not sum of subsets? because the union is usually not a subspace so we use sums and keep it in subspaces ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe defined \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e and how to check for them\u003c/li\u003e\n\u003cli\u003ewe want to operate on subsets, so we defined the \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe saw that the \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subspaces\u003c/a\u003e are the \u003ca href=\"/posts/kbhsum_of_subsets/#sum-of-subspaces-is-the-smallest-subspace-with-both-subspaces\"\u003esmallest containing subspace\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eand finally, we defined \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003es and how to prove them\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003echecking for subspaces\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsubspace/#simplified-check-for-subspace\"\u003esimplified check for subspace\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsum_of_subsets/#sum-of-subspaces-is-the-smallest-subspace-with-both-subspaces\"\u003esum of subspaces is the smallest subspace with both subspaces\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ecreating direct sums\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirect_sum/#a-id-1b800658-2f83-4802-acfd-2d15cf5a1d74-sum-of-subsets-is-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-id-fddf0648-91ea-4c5b-8298-fa0a30637cb7-iff-there-is-only-one-way-to-write-0\"\u003ea sum of subsets is a direct sum IFF there is only one way to write \\(0\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirect_sum/#a-id-1b800658-2f83-4802-acfd-2d15cf5a1d74-sum-of-subsets-is-only-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-id-fddf0648-91ea-4c5b-8298-fa0a30637cb7-iff-their-intersection-is-set-containing-0\"\u003ea sum of subsets is only a direct sum IFF their intersection is the set containing \\(0\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cdel\u003eDoes the \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e have be the same between different \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es of the same \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e?\u003c/del\u003e yes, otherwise the larger \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e has two \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identities\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003eDoes the \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e and \u003ca href=\"/posts/kbhmultiplying/\"\u003emultiplication\u003c/a\u003e operations in a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e have to be the same as its constituent \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e?\u003c/del\u003e by definition\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003eWhy are \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003es defined on sub-\u003cstrong\u003e\u003cstrong\u003espaces\u003c/strong\u003e\u003c/strong\u003e and not \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e?\u003c/del\u003e because the union is usually not a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e so we use sums and keep it in subspaces\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_1_c/","tags":null,"title":"Axler 1.C"},{"categories":null,"contents":"3: Show that the set of differential real-valued functions \\(f\\) on the interval \\((-4,4)\\) such that \\(f\u0026rsquo;(-1)=3f(2)\\) is a subspace of \\(\\mathbb{R}^{(-4,4)}\\)\n4: Suppose \\(b \\in R\\). Show that the set of continuous real-valued functions \\(f\\) on the interval \\([0,1]\\) such that \\(\\int_{0}^{1}f=b\\) is a subspace of \\(\\mathbb{R}^{[0,1]}\\) IFF \\(b=0\\)\nAdditive Identity:\nassume \\(\\int_{0}^{1}f=b\\) is a subspace\n","html":"\u003cp\u003e3: Show that the set of differential real-valued functions \\(f\\) on the interval \\((-4,4)\\) such that \\(f\u0026rsquo;(-1)=3f(2)\\) is a subspace of \\(\\mathbb{R}^{(-4,4)}\\)\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e4: Suppose \\(b \\in R\\). Show that the set of continuous real-valued functions \\(f\\) on the interval \\([0,1]\\) such that \\(\\int_{0}^{1}f=b\\) is a subspace of \\(\\mathbb{R}^{[0,1]}\\) IFF \\(b=0\\)\u003c/p\u003e\n\u003cp\u003eAdditive Identity:\u003c/p\u003e\n\u003cp\u003eassume \\(\\int_{0}^{1}f=b\\) is a subspace\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_1_c_excercises/","tags":null,"title":"Axler 1.C Exercises"},{"categories":null,"contents":"Key Sequence we defined the combination of a list of vectors as a linear combination and defined set of all linear combination of vectors to be called a span we defined the idea of a finite-dimensional vector space vis a vi spanning we took a god-forsaken divergence into polynomials that will surely not come back and bite us in chapter 4 we defined linear independence + linear dependence and, from those definition, proved the actual usecase of these concepts which is the Linear Dependence Lemma we apply the Linear Dependence Lemma to show that length of linearly-independent list \\(\\leq\\) length of spanning list as well as that finite-dimensional vector spaces make finite subspaces. Both of these proofs work by making linearly independent lists\u0026mdash;the former by taking a spanning list and making it smaller and smaller, and the latter by taking a linearly independent list and making it bigger and bigger New Definitions linear combination span + \u0026ldquo;spans\u0026rdquo; finite-dimensional vector space infinite-demensional vector space finite-dimensional subspaces polynomial \\(\\mathcal{P}(\\mathbb{F})\\) \\(\\mathcal{P}_{m}(\\mathbb{F})\\) degree of a polynomial \\(\\deg p\\) linear independence and linear dependence Linear Dependence Lemma Results and Their Proofs span is the smallest subspace containing all vectors in the list \\(\\mathcal{P}(\\mathbb{F})\\) is a vector space over \\(\\mathbb{F}\\) the world famous Linear Dependence Lemma and its fun issue length of linearly-independent list \\(\\leq\\) length of spanning list subspaces of inite-dimensional vector spaces is finite dimensional Questions for Jana obviously polynomials are non-linear structures; under what conditions make them nice to work with in linear algebra? what is the \u0026ldquo;obvious way\u0026rdquo; to change Linear Dependence Lemma\u0026rsquo;s part \\(b\\) to make \\(v_1=0\\) work? for the finite-dimensional subspaces proof, though we know that the process terminates, how do we know that it terminates at a spanning list of \\(U\\) and not just a linearly independent list in \\(U\\)? direct sum and linear independence related; how exactly? Interesting Factoids I just ate an entire Chinese new-year worth of food while typing this up. That\u0026rsquo;s worth something right\n","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe defined the combination of a list of vectors as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e and defined set of all \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es to be called a \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe defined the idea of a \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e vis a vi \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe took a god-forsaken divergence into \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003es that will surely not come back and bite us in chapter 4\u003c/li\u003e\n\u003cli\u003ewe defined \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e + \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinear dependence\u003c/a\u003e and, from those definition, proved the actual usecase of these concepts which is the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe apply the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e to show that \u003ca href=\"/posts/kbhlinear_independence/#length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003elength of linearly-independent list \\(\\leq\\) length of spanning list\u003c/a\u003e as well as that \u003ca href=\"/posts/kbhsubspace/#finite-dimensional-subspaces\"\u003efinite-dimensional vector spaces make finite subspaces\u003c/a\u003e. Both of these proofs work by making \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e lists\u0026mdash;the former by taking a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list and making it smaller and smaller, and the latter by taking a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list and making it bigger and bigger\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e + \u0026ldquo;\u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003einfinite-demensional vector space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsubspace/#finite-dimensional-subspaces\"\u003efinite-dimensional subspaces\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolynomial/#mathcal-p--mathbb-f\"\u003e\\(\\mathcal{P}(\\mathbb{F})\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolynomial/#mathcal-p-m--mathbb-f\"\u003e\\(\\mathcal{P}_{m}(\\mathbb{F})\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolynomial/#degree-of-a-polynomial-deg-p\"\u003edegree of a polynomial \\(\\deg p\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e and \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinear dependence\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhspan/#span-is-the-smallest-subspace-containing-all-vectors-in-the-list\"\u003espan is the smallest subspace containing all vectors in the list\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolynomial/#mathcal-p--mathbb-f--is-a-vector-space-over-mathbb-f\"\u003e\\(\\mathcal{P}(\\mathbb{F})\\) is a vector space over \\(\\mathbb{F}\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ethe world famous \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e and its fun \u003ca href=\"/posts/kbhlinear_dependence_lemma/#issue\"\u003eissue\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_independence/#length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003elength of linearly-independent list \\(\\leq\\) length of spanning list\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsubspace/#finite-dimensional-subspaces\"\u003esubspaces of inite-dimensional vector spaces is finite dimensional\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cdel\u003eobviously \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003es are non-linear structures; under what conditions make them nice to work with in linear algebra?\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003ewhat is the \u0026ldquo;obvious way\u0026rdquo; to change \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e\u0026rsquo;s part \\(b\\) to make \\(v_1=0\\) work?\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003efor the \u003ca href=\"/posts/kbhsubspace/#finite-dimensional-subspaces\"\u003efinite-dimensional subspaces\u003c/a\u003e proof, though we know that the process terminates, how do we know that it terminates at a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list of \\(U\\) and not just a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(U\\)?\u003c/li\u003e\n\u003cli\u003edirect sum and linear independence related; how exactly?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003eI just ate an entire Chinese new-year worth of food while typing this up. That\u0026rsquo;s worth \u003cem\u003esomething\u003c/em\u003e right\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_2_a/","tags":null,"title":"Axler 2.A"},{"categories":null,"contents":"Key Sequence we defined basis of a vector space\u0026mdash;a linearly independent spanning list of that vector space\u0026mdash;and shown that to be a basis one has to be able to write a write an unique spanning list we show that you can chop a spanning list of a space down to a basis or build a linearly independent list up to a basis because of this, you can make a spanning list of finite-dimensional vector spaces and chop it down to a basis: so every finite-dimensional vector space has a basis lastly, we can use the fact that you can grow list to basis to show that every subspace of \\(V\\) is a part of a direct sum equaling to \\(V\\) New Definitions basis and criteria for basis\nI mean its a chapter on bases not sure what you are expecting.\nResults and Their Proofs a list is a basis if you can write every memeber of their span uniquely every finite-dimensional vector space has a basis dualing basis constructions all spanning lists contains a basis of which you are spanning a linearly independent list expends to a basis every subspace of \\(V\\) is a part of a direct sum equaling to \\(V\\) Questions for Jana Is the subspace direct sum proof a unique relationship? That is, is every complement \\(W\\) for each \\(U \\subset V\\) unique? ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe defined \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u0026mdash;a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list of that \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u0026mdash;and shown that to \u003ca href=\"/posts/kbhbasis/#criteria-for-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003ebe a basis\u003c/a\u003e one has to be able to write a write an unique \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list\u003c/li\u003e\n\u003cli\u003ewe show that you can \u003ca href=\"/posts/kbhbasis/#all-id-e8109222-5548-4d08-b6df-8f933f2dbb36-spanning-lists-contains-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis-of-which-you-are-id-e8109222-5548-4d08-b6df-8f933f2dbb36-spanning\"\u003echop a spanning list of a space down to a basis\u003c/a\u003e or \u003ca href=\"/posts/kbhbasis/#a-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent-list-expends-to-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003ebuild a linearly independent list up to a basis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ebecause of this, you can make a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list of \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector spaces\u003c/a\u003e and \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/#every-id-4ed27ed5-4edc-4ef4-afd7-9b8e3bcd9b96-finite-dimensional-vector-space-has-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003echop it down to a basis\u003c/a\u003e: so every \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e has a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003elastly, we can use the fact that you can grow \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e to \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e to show that \u003ca href=\"/posts/kbhdirect_sum/#every-id-345c37fa-5d4c-44e9-ad03-2fe7e5a37224-subspace-of-v-is-a-part-of-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-equaling-to-v\"\u003eevery subspace of \\(V\\) is a part of a direct sum equaling to \\(V\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e and \u003ca href=\"/posts/kbhbasis/#criteria-for-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003ecriteria for basis\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eI mean its a chapter on \u003ca href=\"/posts/kbhbasis/\"\u003ebases\u003c/a\u003e not sure what you are expecting.\u003c/p\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbasis/#criteria-for-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003ea list is a basis if you can write every memeber of their span uniquely\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfinite_dimensional_vector_space/#every-id-4ed27ed5-4edc-4ef4-afd7-9b8e3bcd9b96-finite-dimensional-vector-space-has-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003eevery finite-dimensional vector space has a basis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003edualing basis constructions\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbasis/#all-id-e8109222-5548-4d08-b6df-8f933f2dbb36-spanning-lists-contains-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis-of-which-you-are-id-e8109222-5548-4d08-b6df-8f933f2dbb36-spanning\"\u003eall spanning lists contains a basis of which you are spanning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbasis/#a-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent-list-expends-to-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003ea linearly independent list expends to a basis\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirect_sum/#every-id-345c37fa-5d4c-44e9-ad03-2fe7e5a37224-subspace-of-v-is-a-part-of-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-equaling-to-v\"\u003eevery subspace of \\(V\\) is a part of a direct sum equaling to \\(V\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eIs the \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e \u003ca href=\"/posts/kbhproof/\"\u003eproof\u003c/a\u003e a unique relationship? That is, is every complement \\(W\\) for each \\(U \\subset V\\) unique?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_2_b/","tags":null,"title":"Axler 2.B"},{"categories":null,"contents":"Key Sequence Because Length of Basis Doesn\u0026rsquo;t Depend on Basis, we defined dimension as the same, shared length of basis in a vector space We shown that lists of the right length (i.e. dim that space) that is either spanning or linearly independent must be a basis\u0026mdash;\u0026ldquo;half is good enough\u0026rdquo; theorems we also shown that \\(dim(U_1+U_2) = dim(U_1)+dim(U_2) - dim(U_1 \\cap U_2)\\): dimension of sums New Definitions dimension Results and Their Proofs Length of Basis Doesn\u0026rsquo;t Depend on Basis lists of right length are basis linearly independent list of length dim V are a basis of V spanning list of length of dim V are a basis of V dimension of sums Questions for Jana Example 2.41: why is it that \\(\\dim U \\neq 4\\)? We only know that \\(\\dim \\mathcal{P}_{3}(\\mathbb{R}) = 4\\), and \\(\\dim U \\leq 4\\). Is it because \\(U\\) (i.e. basis of \\(U\\) doesn\u0026rsquo;t span the polynomial) is strictly a subset of \\(\\mathcal{P}_{3}(\\mathbb{R})\\), so there must be some extension needed? because we know that \\(U\\) isn\u0026rsquo;t all of \\(\\mathcal{P}_{3}\\). Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eBecause \u003ca href=\"/posts/kbhlength_of_basis_doesn_t_depend_on_basis/\"\u003eLength of Basis Doesn\u0026rsquo;t Depend on Basis\u003c/a\u003e, we defined \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e as the same, shared length of \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eWe shown that lists of the right length (i.e. dim that space) that is \u003cem\u003eeither\u003c/em\u003e \u003ca href=\"/posts/kbhdimension/#spanning-list-of-length-of-dim-v-are-a-basis-of-v\"\u003espanning\u003c/a\u003e or \u003ca href=\"/posts/kbhdimension/#linearly-independent-list-of-length-dim-v-are-a-basis-of-v\"\u003elinearly independent\u003c/a\u003e must be a basis\u0026mdash;\u0026ldquo;half is good enough\u0026rdquo; theorems\u003c/li\u003e\n\u003cli\u003ewe also shown that \\(dim(U_1+U_2) = dim(U_1)+dim(U_2) - dim(U_1 \\cap U_2)\\): \u003ca href=\"/posts/kbhsum_of_subsets/#id-07b04334-5ae7-457c-bc3e-92feed8fc2cc-dimension-of-sums\"\u003edimension of sums\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlength_of_basis_doesn_t_depend_on_basis/\"\u003eLength of Basis Doesn\u0026rsquo;t Depend on Basis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003elists of right length are \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdimension/#linearly-independent-list-of-length-dim-v-are-a-basis-of-v\"\u003elinearly independent list of length dim V are a basis of V\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdimension/#spanning-list-of-length-of-dim-v-are-a-basis-of-v\"\u003espanning list of length of dim V are a basis of V\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsum_of_subsets/#id-07b04334-5ae7-457c-bc3e-92feed8fc2cc-dimension-of-sums\"\u003edimension of sums\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cdel\u003eExample 2.41: why is it that \\(\\dim U \\neq 4\\)? We only know that \\(\\dim \\mathcal{P}_{3}(\\mathbb{R}) = 4\\), and \\(\\dim U \\leq 4\\). Is it because \\(U\\) (i.e. \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U\\) doesn\u0026rsquo;t \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e the polynomial) is strictly a subset of \\(\\mathcal{P}_{3}(\\mathbb{R})\\), so there must be \u003cem\u003esome\u003c/em\u003e extension needed?\u003c/del\u003e because we know that \\(U\\) isn\u0026rsquo;t all of \\(\\mathcal{P}_{3}\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_2_c/","tags":null,"title":"Axler 2.C"},{"categories":null,"contents":"OMGOMGOMG its Linear Maps time! \u0026ldquo;One of the key definitions in linear algebra.\u0026rdquo;\nKey Sequence We define these new-fangled functions called Linear Maps, which obey \\(T(u+v) = Tu+Tv\\) and \\(T(\\lambda v) = \\lambda Tv\\) We show that the set of all linear maps between two vector spaces \\(V,W\\) is denoted \\(\\mathcal{L}(V,W)\\); and, in fact, by defining addition and scalar multiplication of Linear Maps in the way you\u0026rsquo;d expect, \\(\\mathcal{L}(V,W)\\) is a vector space! this also means that we can use effectively the \\(0v=0\\) proof to show that linear maps take \\(0\\) to \\(0\\) we show that Linear Maps can be defined uniquely by where it takes the basis of a vector space; in fact, there exists a Linear Map to take the basis anywhere you want to go! though this doesn\u0026rsquo;t usually make sense, we call the \u0026ldquo;composition\u0026rdquo; operation on Linear Maps their \u0026ldquo;product\u0026rdquo; and show that this product is associative, distributive, and has an identity New Definitions Linear Map \u0026mdash; additivity (adding \u0026ldquo;distributes\u0026rdquo;) and homogeneity (scalar multiplication \u0026ldquo;factors\u0026rdquo;) \\(\\mathcal{L}(V,W)\\) any polynomial map from Fn to Fm is a linear map addition and scalar multiplication on \\(\\mathcal{L}(V,W)\\); and, as a bonus, \\(\\mathcal{L}(V,W)\\) a vector space! naturally (almost by the same \\(0v=0\\) proof), linear maps take \\(0\\) to \\(0\\) Product of Linear Maps is just composition. These operations are: associative distributive has an identity Results and Their Proofs technically a result: any polynomial map from Fn to Fm is a linear map basis of domain of linear maps uniquely determines them Questions for Jana why does the second part of the basis of domain proof make it unique? ","html":"\u003cp\u003eOMGOMGOMG its \u003cem\u003eLinear Maps\u003c/em\u003e time! \u0026ldquo;One of the key definitions in linear algebra.\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWe define these new-fangled functions called \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Maps\u003c/a\u003e, which obey \\(T(u+v) = Tu+Tv\\) and \\(T(\\lambda v) = \\lambda Tv\\)\u003c/li\u003e\n\u003cli\u003eWe show that the set of all linear maps between two \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es \\(V,W\\) is denoted \\(\\mathcal{L}(V,W)\\); and, in fact, by defining \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e and \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e of \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es in the way you\u0026rsquo;d expect, \\(\\mathcal{L}(V,W)\\) is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e!\n\u003cul\u003e\n\u003cli\u003ethis also means that we can use effectively the \\(0v=0\\) proof to show that \u003ca href=\"/posts/kbhlinear_map/#linear-maps-take-0-to-0\"\u003elinear maps take \\(0\\) to \\(0\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ewe show that \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es can be defined uniquely by \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ewhere it takes the basis of a vector space\u003c/a\u003e; in fact, there exists a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e to take the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e \u003cem\u003eanywhere\u003c/em\u003e you want to go!\u003c/li\u003e\n\u003cli\u003ethough this doesn\u0026rsquo;t usually make sense, we call the \u0026ldquo;composition\u0026rdquo; operation on \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es their \u0026ldquo;product\u0026rdquo; and show that this product is \u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e, \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributive\u003c/a\u003e, and has an \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e \u0026mdash; additivity (adding \u0026ldquo;distributes\u0026rdquo;) and homogeneity (scalar multiplication \u0026ldquo;factors\u0026rdquo;)\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_map/#mathcal-l--v-w\"\u003e\\(\\mathcal{L}(V,W)\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_map/#any-map-from-mathbb-f-n-to-mathbb-f-m\"\u003eany polynomial map from Fn to Fm is a linear map\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_map/#addition-and-scalar-multiplication-on-mathcal-l--v-w\"\u003eaddition and scalar multiplication on \\(\\mathcal{L}(V,W)\\)\u003c/a\u003e; and, as a bonus, \\(\\mathcal{L}(V,W)\\) a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e!\u003c/li\u003e\n\u003cli\u003enaturally (almost by the same \\(0v=0\\) proof), \u003ca href=\"/posts/kbhlinear_map/#linear-maps-take-0-to-0\"\u003elinear maps take \\(0\\) to \\(0\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduct_of_linear_maps/\"\u003eProduct of Linear Maps\u003c/a\u003e is just composition. These operations are:\n\u003cul\u003e\n\u003cli\u003eassociative\u003c/li\u003e\n\u003cli\u003edistributive\u003c/li\u003e\n\u003cli\u003ehas an identity\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003etechnically a result: \u003ca href=\"/posts/kbhlinear_map/#any-map-from-mathbb-f-n-to-mathbb-f-m\"\u003eany polynomial map from Fn to Fm is a linear map\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain of linear maps uniquely determines them\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewhy does the second part of the \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e proof make it unique?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_3_a/","tags":null,"title":"Axler 3.A"},{"categories":null,"contents":"Key Sequence we defined the null space and injectivity from that, we showed that injectivity IFF implies that null space is \\(\\{0\\}\\), essentially because if \\(T0=0\\) already, there cannot be another one that also is taken to \\(0\\) in an injective function we defined range and surjectivity we showed that these concepts are strongly related by the fundamental theorem of linear maps: if \\(T \\in \\mathcal{L}(V,W)\\), then \\(\\dim V = \\dim null\\ T + \\dim range\\ T\\) from the fundamental theorem, we showed the somewhat intuitive pair about the sizes of maps: map to smaller space is not injective, map to bigger space is not surjective we then applied that result to show results about homogeneous systems homogenous system with more variables than equations has nonzero solutions inhomogenous system with more equations than variables has no solutions for an arbitrary set of constants New Definitions null space injectivity range surjectivity homogeneous system Results and Their Proofs the null space is a subspace of the domain injectivity IFF implies that null space is \\(\\{0\\}\\) the fundamental theorem of linear maps \u0026ldquo;sizes\u0026rdquo; of maps map to smaller space is not injective map to bigger space is not surjective solving systems of equations: homogenous system with more variables than equations has nonzero solutions inhomogenous system with more equations than variables has no solutions for an arbitrary set of constants Questions for Jana \u0026ldquo;To prove the inclusion in the other direction, suppose v 2 null T.\u0026rdquo; for 3.16; what is the first direction? maybe nothing maps to \\(0\\) ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe defined the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e and \u003ca href=\"/posts/kbhinjectivity/\"\u003einjectivity\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003efrom that, we showed that \u003ca href=\"/posts/kbhinjectivity/#injectivity-implies-that-id-767a441d-4931-4fad-aa8e-c6b001e8b507-null-space-is-0\"\u003einjectivity IFF implies that null space is \\(\\{0\\}\\)\u003c/a\u003e, essentially because if \\(T0=0\\) already, there cannot be another one that also is taken to \\(0\\) in an \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e function\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ewe defined \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e and \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjectivity\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe showed that these concepts are strongly related by the \u003ca href=\"/posts/kbhfundamental_theorem_of_linear_maps/\"\u003efundamental theorem of linear maps\u003c/a\u003e: if \\(T \\in \\mathcal{L}(V,W)\\), then \\(\\dim V = \\dim null\\ T + \\dim range\\ T\\)\u003c/li\u003e\n\u003cli\u003efrom the fundamental theorem, we showed the somewhat intuitive pair about the sizes of maps: \u003ca href=\"/posts/kbhlinear_map/#map-to-smaller-space-is-not-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003emap to smaller space is not injective\u003c/a\u003e, \u003ca href=\"/posts/kbhlinear_map/#map-to-bigger-space-is-not-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjective\"\u003emap to bigger space is not surjective\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe then applied that result to show results about \u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneous\u003c/a\u003e systems\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhomogeneity/#id-f57b638c-b8c9-4c88-b02f-9cd0ed47c51e-homogenous-system-with-more-variables-than-equations-has-nonzero-solutions\"\u003ehomogenous system with more variables than equations has nonzero solutions\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhomogeneity/#in-id-f57b638c-b8c9-4c88-b02f-9cd0ed47c51e-homogenous-system-with-more-equations-than-variables-has-no-solutions-for-an-arbitrary-set-of-constants\"\u003einhomogenous system with more equations than variables has no solutions for an arbitrary set of constants\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinjectivity/\"\u003einjectivity\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjectivity\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneous system\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnull_space/#the-null-space-is-a-id-345c37fa-5d4c-44e9-ad03-2fe7e5a37224-subspace-of-the-domain\"\u003ethe null space is a subspace of the domain\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinjectivity/#injectivity-implies-that-id-767a441d-4931-4fad-aa8e-c6b001e8b507-null-space-is-0\"\u003einjectivity IFF implies that null space is \\(\\{0\\}\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ethe \u003ca href=\"/posts/kbhfundamental_theorem_of_linear_maps/\"\u003efundamental theorem of linear maps\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;sizes\u0026rdquo; of maps\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_map/#map-to-smaller-space-is-not-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003emap to smaller space is not injective\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_map/#map-to-bigger-space-is-not-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjective\"\u003emap to bigger space is not surjective\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003esolving systems of equations:\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhomogeneity/#id-f57b638c-b8c9-4c88-b02f-9cd0ed47c51e-homogenous-system-with-more-variables-than-equations-has-nonzero-solutions\"\u003ehomogenous system with more variables than equations has nonzero solutions\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhomogeneity/#in-id-f57b638c-b8c9-4c88-b02f-9cd0ed47c51e-homogenous-system-with-more-equations-than-variables-has-no-solutions-for-an-arbitrary-set-of-constants\"\u003einhomogenous system with more equations than variables has no solutions for an arbitrary set of constants\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cdel\u003e\u0026ldquo;To prove the inclusion in the other direction, suppose v 2 null T.\u0026rdquo; for 3.16; what is the \u003cem\u003efirst\u003c/em\u003e direction?\u003c/del\u003e maybe nothing maps to \\(0\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_3_b/","tags":null,"title":"Axler 3.B"},{"categories":null,"contents":"matricies!!!!\nKey Sequence matricies exist, you can add them, scalarly multiply them, and actually multiply them they can represent Linear Maps by showing where they take basis unsurprisingly, the set of matricies of a shape is a vector space New Definitions matricies matrix of Linear Map matrix addition and scalar multiplications matrix multiplication \\(\\mathbb{F}^{m,n}\\) Results and Their Proofs sums and scalar multiplication of matricies, and why they work to represent Linear Maps \\(\\mathbb{F}^{m,n}\\) is a vector space Interesting Factoids its literally matricies\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e!!!!\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e exist, you can \u003ca href=\"/posts/kbhmatricies/#sums-and-scalar-multiplication-of-id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matricies\"\u003eadd them, scalarly multiply them\u003c/a\u003e, and \u003ca href=\"/posts/kbhmatrix_multiplication/\"\u003eactually multiply them\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ethey \u003ca href=\"/posts/kbhmatricies/#matrix-of-id-17f3b01c-4945-4268-8da4-9887d960596b-linear-map\"\u003ecan represent Linear Maps by showing where they take basis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eunsurprisingly, the \u003ca href=\"/posts/kbhmatricies/#mathbb-f-m-n\"\u003eset of matricies of a shape is a vector space\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/#matrix-of-id-17f3b01c-4945-4268-8da4-9887d960596b-linear-map\"\u003ematrix of Linear Map\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/#sums-and-scalar-multiplication-of-id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matricies\"\u003ematrix addition and scalar multiplications\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatrix_multiplication/\"\u003ematrix multiplication\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/#mathbb-f-m-n\"\u003e\\(\\mathbb{F}^{m,n}\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/#sums-and-scalar-multiplication-of-id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matricies\"\u003esums and scalar multiplication of matricies\u003c/a\u003e, and why they work to represent \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/#mathbb-f-m-n\"\u003e\\(\\mathbb{F}^{m,n}\\) is a vector space\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003eits literally \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_3_c/","tags":null,"title":"Axler 3.C"},{"categories":null,"contents":"isomorphisms. Somebody\u0026rsquo;s new favourite word since last year.\nKey Sequence we showed that a linear map\u0026rsquo;s inverse is unique, and so named the inverse \\(T^{-1}\\) we then showed an important result, that injectivity and surjectivity implies invertability this property allowed us to use invertable maps to define isomorphic spaces, naming the invertable map between them as the isomorphism we see that having the same dimension is enough to show invertability (IFF), because we can use basis of domain to map the basis of one space to another we then use that property to establish that matricies and linear maps have an isomorphism between them: namely, the matrixify operator \\(\\mathcal{M}\\). this isomorphism allow us to show that the dimension of a set of Linear Maps is the product of the dimensions of their domain and codomain (that \\(\\dim \\mathcal{L}(V,W) = (\\dim V)(\\dim W)\\)) We then, for some unknown reason, decided that right this second we gotta define matrix of a vector, and that linear map applications are like matrix multiplication because of it. Not sure how this relates finally, we defined a Linear Map from a space to itself as an operator we finally show an important result that, despite not being true for infinite-demensional vector space, injectivity is surjectivity in finite-dimensional operators New Definitions invertability isomorphism + isomorphic vector spaces matrix of a vector operator Results and Their Proofs linear map inverse is unique injectivity and surjectivity implies invertability two vector spaces are isomorphic IFF they have the same dimension matricies and Linear Maps from the right dimensions are isomorphic \\(\\dim \\mathcal{L}(V,W) = (\\dim V)(\\dim W)\\) \\(\\mathcal{M}(T)_{.,k} = \\mathcal{M}(Tv_{k})\\), a result of how everything is defined (see matrix of a vector) \u0026ldquo;each column of a matrix represents where each of the basis of the input gets taken to\u0026rdquo; So applying a vector to a matrix shows the linear combination of what where the basis sent linear maps are like matrix multiplication injectivity is surjectivity in finite-dimensional operators Questions for Jana why doesn\u0026rsquo;t axler just say the \u0026ldquo;basis of domain\u0026rdquo; directly (i.e. he did a lin comb instead) for the second direction for the two vector spaces are isomorphic IFF they have the same dimension proof? because the next steps for spanning (surjectivity) and linear independence (injectivity) is made more obvious clarify the matricies and Linear Maps from the right dimensions are isomorphic proof what is the \u0026ldquo;multiplication by \\(x^{2}\\)\u0026rdquo; operator? literally multiplying by \\(x^{2}\\) how does the matrix of a vector detour relate to the content before and after? I suppose an isomorphism exists but it isn\u0026rsquo;t explicitly used in the linear maps are like matrix multiplication proof, which is the whole point because we needed to close the loop of being able to linear algebra with matricies completely, which we didn\u0026rsquo;t know without the isomorphism between matricies and maps Interesting Factoids ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003es. Somebody\u0026rsquo;s new favourite word since last year.\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe showed that a \u003ca href=\"/posts/kbhinvertability/#linear-map-inverse-is-unique\"\u003elinear map\u0026rsquo;s inverse is unique\u003c/a\u003e, and so named the inverse \\(T^{-1}\\)\u003c/li\u003e\n\u003cli\u003ewe then showed an important result, that \u003ca href=\"/posts/kbhinvertability/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-and-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-implies-id-ff05739c-6e70-46ba-9d56-0958ef847f57-invertability\"\u003einjectivity and surjectivity implies invertability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ethis property allowed us to use \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e maps to define \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e spaces, naming the \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e map between them as the \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003ewe see that \u003ca href=\"/posts/kbhisomorphism/#two-vector-spaces-are-isomorphic-iff-they-have-the-same-dimension\"\u003ehaving the same dimension is enough to show invertability (IFF)\u003c/a\u003e, because we can use \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e to map the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of one space to another\u003c/li\u003e\n\u003cli\u003ewe then use that property to establish that \u003ca href=\"/posts/kbhisomorphism/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matricies-and-id-17f3b01c-4945-4268-8da4-9887d960596b-linear-map-s-from-the-right-id-07b04334-5ae7-457c-bc3e-92feed8fc2cc-dimension-s-are-id-3f5ba3a5-15d4-4b58-99de-09eb1e4713cb-isomorphic\"\u003ematricies and linear maps have an isomorphism between them\u003c/a\u003e: namely, the matrixify operator \\(\\mathcal{M}\\).\u003c/li\u003e\n\u003cli\u003ethis \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e allow us to show that the \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of a set of \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es is the product of the \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003es of their domain and codomain (that \u003ca href=\"/posts/kbhisomorphism/#dim-mathcal-l--v-w----dim-v----dim-w\"\u003e\\(\\dim \\mathcal{L}(V,W) = (\\dim V)(\\dim W)\\)\u003c/a\u003e)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eWe then, for some unknown reason, decided that right this second we gotta define \u003ca href=\"/posts/kbhmatricies/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matrix-of-a-vector\"\u003ematrix of a vector\u003c/a\u003e, and that \u003ca href=\"/posts/kbhmatrix_multiplication/#linear-maps-are-like-matrix-multiplication\"\u003elinear map applications are like matrix multiplication\u003c/a\u003e because of it. Not sure how this relates\u003c/li\u003e\n\u003cli\u003efinally, we defined a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e from a space to itself as an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003ewe finally show an important result that, despite not being true for \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003einfinite-demensional vector space\u003c/a\u003e, \u003ca href=\"/posts/kbhoperator/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-is-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-in-id-4ed27ed5-4edc-4ef4-afd7-9b8e3bcd9b96-finite-dimensional-id-36e84a46-76f1-481e-b031-8ab2f0da0aa8-operator-s\"\u003einjectivity is surjectivity in finite-dimensional operators\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinvertability/\"\u003einvertability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e + \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matrix-of-a-vector\"\u003ematrix of a vector\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinvertability/#linear-map-inverse-is-unique\"\u003elinear map inverse is unique\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinvertability/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-and-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-implies-id-ff05739c-6e70-46ba-9d56-0958ef847f57-invertability\"\u003einjectivity and surjectivity implies invertability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhisomorphism/#two-vector-spaces-are-isomorphic-iff-they-have-the-same-dimension\"\u003etwo vector spaces are isomorphic IFF they have the same dimension\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhisomorphism/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matricies-and-id-17f3b01c-4945-4268-8da4-9887d960596b-linear-map-s-from-the-right-id-07b04334-5ae7-457c-bc3e-92feed8fc2cc-dimension-s-are-id-3f5ba3a5-15d4-4b58-99de-09eb1e4713cb-isomorphic\"\u003ematricies and Linear Maps from the right dimensions are isomorphic\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhisomorphism/#dim-mathcal-l--v-w----dim-v----dim-w\"\u003e\\(\\dim \\mathcal{L}(V,W) = (\\dim V)(\\dim W)\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{M}(T)_{.,k} = \\mathcal{M}(Tv_{k})\\), a result of how everything is defined (see \u003ca href=\"/posts/kbhmatricies/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matrix-of-a-vector\"\u003ematrix of a vector\u003c/a\u003e)\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;each column of a \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e represents where each of the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of the input gets taken to\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eSo applying a vector to a matrix shows the linear combination of what where the basis sent\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatrix_multiplication/#linear-maps-are-like-matrix-multiplication\"\u003elinear maps are like matrix multiplication\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoperator/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-is-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-in-id-4ed27ed5-4edc-4ef4-afd7-9b8e3bcd9b96-finite-dimensional-id-36e84a46-76f1-481e-b031-8ab2f0da0aa8-operator-s\"\u003einjectivity is surjectivity in finite-dimensional operators\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cdel\u003ewhy doesn\u0026rsquo;t axler just say the \u0026ldquo;\u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e\u0026rdquo; directly (i.e. he did a lin comb instead) for the second direction for the \u003ca href=\"/posts/kbhisomorphism/#two-vector-spaces-are-isomorphic-iff-they-have-the-same-dimension\"\u003etwo vector spaces are isomorphic IFF they have the same dimension\u003c/a\u003e proof?\u003c/del\u003e because the next steps for \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e (\u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjectivity\u003c/a\u003e) and \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e (\u003ca href=\"/posts/kbhinjectivity/\"\u003einjectivity\u003c/a\u003e) is made more obvious\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003eclarify the \u003ca href=\"/posts/kbhisomorphism/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matricies-and-id-17f3b01c-4945-4268-8da4-9887d960596b-linear-map-s-from-the-right-id-07b04334-5ae7-457c-bc3e-92feed8fc2cc-dimension-s-are-id-3f5ba3a5-15d4-4b58-99de-09eb1e4713cb-isomorphic\"\u003ematricies and Linear Maps from the right dimensions are isomorphic\u003c/a\u003e proof\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003ewhat is the \u0026ldquo;multiplication by \\(x^{2}\\)\u0026rdquo; \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e?\u003c/del\u003e literally multiplying by \\(x^{2}\\)\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003ehow does the \u003ca href=\"/posts/kbhmatricies/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matrix-of-a-vector\"\u003ematrix of a vector\u003c/a\u003e detour relate to the content before and after? I suppose an \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e exists but it isn\u0026rsquo;t explicitly used in the \u003ca href=\"/posts/kbhmatrix_multiplication/#linear-maps-are-like-matrix-multiplication\"\u003elinear maps are like matrix multiplication\u003c/a\u003e proof, which is the whole point\u003c/del\u003e because we needed to close the loop of being able to linear algebra with \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e completely, which we didn\u0026rsquo;t know without the \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e between matricies and maps\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_3_d/","tags":null,"title":"Axler 3.D"},{"categories":null,"contents":"No idea why this is so long!!!\nKey Sequence Firehose of a chapter.\nWe first began an unrelated exploration in Product of Vector Spaces (\u0026ldquo;tuples\u0026rdquo;): we show that the Product of Vector Spaces is a vector space because you can build a list out of zeroing every element except each one on each basis of each element of the tuple sequentially, we learned that the dimension of the Product of Vector Spaces is the sum of the spaces\u0026rsquo; dimension. we defined the product-to-sum map \\(\\Gamma\\) \\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\Gamma\\) is injective and, as a result, \\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\) We then tackled the fun part of this chapter, which is affine subsets, parallel structures, quotient spaces, quotient map (affine subsetification maps) we learned an important and useful result that two affine subsets parallel to \\(U\\) are either equal or disjoint (\\(v-w \\in U\\) means \\(v+U = w+U\\) means \\(v+U \\cap w+U \\neq \\emptyset\\), means the first thing) we defined the operations on quotient space, and showed that quotient space operations behave uniformly on equivalent affine subsets. This, and the usual closer proof, demonstrates that quotient spaces is a vector space with the help of the affine subsetification map (the quotient map \\(\\pi\\)), we show that the dimension of a quotient space is the difference between dimensions of its constituents essentially by invoking rank-nullity theorem after knowing the fact that \\(null\\ \\pi = U\\) (because \\(u+U\\) is an affine subset that has not been shifted (think about a line moving along itself\u0026hellip; it doesn\u0026rsquo;t move)) Then, and I\u0026rsquo;m not quite sure why, we defined \\(\\widetilde{T}: V / null\\ T \\to W\\), for some \\(T: V\\to W\\), defined as \\(\\widetilde{T}(v+null\\ T) = Tv\\). We show that the map is Linear, injective, its range is \\(range\\ T\\), and so it forms an isomorphism between \\(V / null\\ T\\) and \\(range\\ T\\). Here\u0026rsquo;s something: products and quotients, the intuition\nNew Definitions Product of Vector Spaces operations on Product of Vector Spaces product summation map \\(\\Gamma\\) sum of vector and subspace parallel + affine subset quotient space operations on the quotient space quotient map \\(\\widetilde{T}\\) Results and Their Proofs Product of Vector Spaces is a vector space dimension of the Product of Vector Spaces is the sum of the spaces\u0026rsquo; dimension Results relating to \\(\\Gamma\\) \\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\Gamma\\) is injective \\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\) results relating to affine subsets and quotient spaces two affine subsets parallel to \\(U\\) are either equal or disjoint quotient space operations behave uniformly on equivalent affine subsets quotient space is a vector space: bleh just prove it yourself. additive identity is \\(0+U\\) and additive inverse is \\(-v + U\\). dimension of a quotient space is the difference between dimensions of its constituents results relating to \\(\\widetilde{T}\\) \\(\\widetilde{T}\\) is well defined properties of \\(\\widetilde{T}\\) it is linear it is injective its range is the range of \\(range\\ T\\) it is an isomorphism between \\(V / null\\ T\\) and \\(range\\ T\\) Questions for Jana what\u0026rsquo;s the point of learning about \\(\\widetilde{T}\\)? how are Product of Vector Spaces and quotient space opposites of each other?: products and quotients, the intuition Interesting Factoids Happy Lunar New Year! Also, let\u0026rsquo;s hope this is not a trend:\n","html":"\u003cp\u003eNo idea why this is so long!!!\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cp\u003eFirehose of a chapter.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eWe first began an unrelated exploration in \u003ca href=\"/posts/kbhproduct_of_vector_spaces/\"\u003eProduct of Vector Space\u003c/a\u003es (\u0026ldquo;tuples\u0026rdquo;):\n\u003cul\u003e\n\u003cli\u003ewe show that the \u003ca href=\"/posts/kbhproduct_of_vector_spaces/\"\u003eProduct of Vector Space\u003c/a\u003es is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ebecause you can build a list out of zeroing every element except each one on each \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of each element of the tuple sequentially, we learned that the \u003ca href=\"/posts/kbhproduct_of_vector_spaces/#dimension-of-the-id-a45b05c0-3e01-4c27-bc3b-543ff3606c66-product-of-vector-space-s-is-the-sum-of-the-spaces-dimension\"\u003edimension of the Product of Vector Spaces is the sum of the spaces\u0026rsquo; dimension\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003ewe defined the product-to-sum map \\(\\Gamma\\)\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduct_summation_map/#u-1-plus-dots-plus-u-m-is-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-id-fddf0648-91ea-4c5b-8298-fa0a30637cb7-iff-gamma-is-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003e\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\Gamma\\) is injective\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eand, as a result, \u003ca href=\"/posts/kbhproduct_summation_map/#u-1-plus-dots-plus-u-m-is-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-iff-dim--u-1-plus-dots-plus-u-m--dim-u-1-plus-dots-plus-dim-u-m\"\u003e\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eWe then tackled the fun part of this chapter, which is \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003es, \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eparallel\u003c/a\u003e structures, \u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003es, \u003ca href=\"/posts/kbhquotient_map/\"\u003equotient map\u003c/a\u003e (\u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003eification maps)\n\u003cul\u003e\n\u003cli\u003ewe learned an important and useful result that \u003ca href=\"/posts/kbhparallel_linear_algebra/#two-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-affine-subset-s-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-parallel-to-u-are-either-equal-or-disjoint\"\u003etwo affine subsets parallel to \\(U\\) are either equal or disjoint\u003c/a\u003e (\\(v-w \\in U\\) means \\(v+U = w+U\\) means \\(v+U \\cap w+U \\neq \\emptyset\\), means the first thing)\u003c/li\u003e\n\u003cli\u003ewe defined the \u003ca href=\"/posts/kbhquotient_space/#operations-on-id-53548f85-b3c8-42ce-81e7-9016ed7bd280-quotient-space\"\u003eoperations on quotient space\u003c/a\u003e, and showed that \u003ca href=\"/posts/kbhquotient_space/#id-53548f85-b3c8-42ce-81e7-9016ed7bd280-quotient-space-operations-behave-uniformly-on-equivalent-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-affine-subset-s\"\u003equotient space operations behave uniformly on equivalent affine subsets\u003c/a\u003e. This, and the usual closer proof, demonstrates that \u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003es is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewith the help of the \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003eification map (the \u003ca href=\"/posts/kbhquotient_map/\"\u003equotient map\u003c/a\u003e \\(\\pi\\)), we show that the \u003ca href=\"/posts/kbhquotient_space/#dimension-of-a-quotient-space-is-the-difference-between-dimensions-of-its-constituents\"\u003edimension of a quotient space is the difference between dimensions of its constituents\u003c/a\u003e essentially by invoking \u003ca href=\"/posts/kbhfundamental_theorem_of_linear_maps/\"\u003erank-nullity theorem\u003c/a\u003e after knowing the fact that \\(null\\ \\pi = U\\) (because \\(u+U\\) is an \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e that has not been shifted (think about a line moving along itself\u0026hellip; it doesn\u0026rsquo;t move))\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eThen, and I\u0026rsquo;m not quite sure why, we defined \\(\\widetilde{T}: V / null\\ T \\to W\\), for some \\(T: V\\to W\\), defined as \\(\\widetilde{T}(v+null\\ T) = Tv\\).\n\u003cul\u003e\n\u003cli\u003eWe show that the map is \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear\u003c/a\u003e, \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e, its range is \\(range\\ T\\), and so it forms an \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e between \\(V / null\\ T\\) and \\(range\\ T\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eHere\u0026rsquo;s something: \u003ca href=\"/posts/kbhproducts_and_quotients_the_intuition/\"\u003eproducts and quotients, the intuition\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduct_of_vector_spaces/\"\u003eProduct of Vector Spaces\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduct_of_vector_spaces/#id-9700ea39-282d-48ef-a959-a416eee0d3ec-operation-s-on-id-a45b05c0-3e01-4c27-bc3b-543ff3606c66-product-of-vector-space-s\"\u003eoperations on Product of Vector Spaces\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduct_summation_map/\"\u003eproduct summation map\u003c/a\u003e \\(\\Gamma\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsum_of_vector_and_subspace/\"\u003esum of vector and subspace\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eparallel\u003c/a\u003e + \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhquotient_space/#operations-on-id-53548f85-b3c8-42ce-81e7-9016ed7bd280-quotient-space\"\u003eoperations on the quotient space\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhquotient_map/\"\u003equotient map\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbht_twiddle/\"\u003e\\(\\widetilde{T}\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduct_of_vector_spaces/#id-a45b05c0-3e01-4c27-bc3b-543ff3606c66-product-of-vector-space-s-is-a-id-123d705f-7ede-44bf-882a-04c2f123f7fc-vector-space\"\u003eProduct of Vector Spaces is a vector space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduct_of_vector_spaces/#dimension-of-the-id-a45b05c0-3e01-4c27-bc3b-543ff3606c66-product-of-vector-space-s-is-the-sum-of-the-spaces-dimension\"\u003edimension of the Product of Vector Spaces is the sum of the spaces\u0026rsquo; dimension\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eResults relating to \\(\\Gamma\\)\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduct_summation_map/#u-1-plus-dots-plus-u-m-is-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-id-fddf0648-91ea-4c5b-8298-fa0a30637cb7-iff-gamma-is-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003e\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\Gamma\\) is injective\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduct_summation_map/#u-1-plus-dots-plus-u-m-is-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-iff-dim--u-1-plus-dots-plus-u-m--dim-u-1-plus-dots-plus-dim-u-m\"\u003e\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eresults relating to \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003es and \u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003es\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhparallel_linear_algebra/#two-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-affine-subset-s-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-parallel-to-u-are-either-equal-or-disjoint\"\u003etwo affine subsets parallel to \\(U\\) are either equal or disjoint\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhquotient_space/#id-53548f85-b3c8-42ce-81e7-9016ed7bd280-quotient-space-operations-behave-uniformly-on-equivalent-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-affine-subset-s\"\u003equotient space operations behave uniformly on equivalent affine subsets\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e: bleh just prove it yourself. \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e is \\(0+U\\) and \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e is \\(-v + U\\).\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhquotient_space/#dimension-of-a-quotient-space-is-the-difference-between-dimensions-of-its-constituents\"\u003edimension of a quotient space is the difference between dimensions of its constituents\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eresults relating to \\(\\widetilde{T}\\)\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbht_twiddle/#widetilde-t-is-well-defined\"\u003e\\(\\widetilde{T}\\) is well defined\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbht_twiddle/#properties-of-widetilde-t\"\u003eproperties of \\(\\widetilde{T}\\)\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003eit is linear\u003c/li\u003e\n\u003cli\u003eit is injective\u003c/li\u003e\n\u003cli\u003eits range is the range of \\(range\\ T\\)\u003c/li\u003e\n\u003cli\u003eit is an \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e between \\(V / null\\ T\\) and \\(range\\ T\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewhat\u0026rsquo;s the point of learning about \\(\\widetilde{T}\\)?\u003c/li\u003e\n\u003cli\u003ehow are \u003ca href=\"/posts/kbhproduct_of_vector_spaces/\"\u003eProduct of Vector Space\u003c/a\u003es and \u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e opposites of each other?: \u003ca href=\"/posts/kbhproducts_and_quotients_the_intuition/\"\u003eproducts and quotients, the intuition\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003eHappy Lunar New Year! Also, let\u0026rsquo;s hope this is not a trend:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-01-21_00-33-13_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhaxler_3_e/","tags":null,"title":"Axler 3.E"},{"categories":null,"contents":"Because duality is fun and I\u0026rsquo;m bored and houjun-being-obtuse.\nKey Sequence New Definitions linear functional dual space Results and Their Proofs dimension of dual space is equivalent to the original space Questions for Jana Interesting Factoids Hello from onboard NH107! Or perhaps my next connecting flight, or from China.\n","html":"\u003cp\u003eBecause duality is fun and I\u0026rsquo;m bored and \u003ccode\u003ehoujun-being-obtuse\u003c/code\u003e.\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdual_space/\"\u003edual space\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdual_space/#dimension-of-dual-space-is-equivalent-to-the-original-space\"\u003edimension of dual space is equivalent to the original space\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003eHello from onboard NH107! Or perhaps my next connecting flight, or from China.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_3_f/","tags":null,"title":"Axler 3.F"},{"categories":null,"contents":"EIGENSTUFF and OPERATORS! Invariant subspaces are nice.\nSometimes, if we can break the domain of a linear map down to its eigenvalues, we can understand what its doing on a component-wise level.\nKey Sequence we defined an invariant subspace, and gave a name to 1-D invariant subspaces: the span of eigenvectors we showed some properties of eigenvalues and showed that a list of eigenvectors are linearly independent a correlate of this is that operators on finite dimensional V has at most dim V eigenvalues finally, we defined map restriction operator and quotient operator, and showed that they were well-defined New Definitions invariant subspace conditions for nontrivial invariant subspace eigenvalues + eigenvectors + eigenspace two new operators: map restriction operator and quotient operator Results and Their Proofs properties of eigenvalues list of eigenvectors are linearly independent eigenspaces are disjoint operators on finite dimensional V has at most dim V eigenvalues quotient operator is well-defined Questions for Jana Interesting Factoids \u0026ldquo;eigenvalue\u0026rdquo; is sometimes called the \u0026ldquo;characterizing value\u0026rdquo; of a map\nfinding eigenvalues with actual numbers natural choordinates of a map ","html":"\u003cp\u003eEIGENSTUFF and \u003ca href=\"/posts/kbhoperator/\"\u003eOPERATOR\u003c/a\u003eS! Invariant subspaces are nice.\u003c/p\u003e\n\u003cp\u003eSometimes, if we can break the domain of a linear map down to its eigenvalues, we can understand what its doing on a component-wise level.\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe defined an \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e, and gave a name to 1-D \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003es: the span of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003ewe showed some \u003ca href=\"/posts/kbheigenvalue/#properties-of-id-7d742b39-4a4a-4a9d-a55b-07e2030dfdeb-eigenvalue-s\"\u003eproperties of eigenvalues\u003c/a\u003e and showed that a \u003ca href=\"/posts/kbheigenvalue/#list-of-eigenvectors-are-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent\"\u003elist of eigenvectors are linearly independent\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003ea correlate of this is that \u003ca href=\"/posts/kbheigenvalue/#operators-on-finite-dimensional-v-has-at-most-dim-v-id-7d742b39-4a4a-4a9d-a55b-07e2030dfdeb-eigenvalue-s\"\u003eoperators on finite dimensional V has at most dim V eigenvalues\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003efinally, we defined \u003ca href=\"/posts/kbhmap_restriction_operator/\"\u003emap restriction operator\u003c/a\u003e and \u003ca href=\"/posts/kbhquotient_operator/\"\u003equotient operator\u003c/a\u003e, and showed that they were well-defined\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003econditions for \u003ca href=\"/posts/kbhinvariant_subspace/#nontrivial-id-731fad15-1ec3-4619-8532-1290fefd3b89-invariant-subspace\"\u003enontrivial invariant subspace\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es + \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es + \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003etwo new operators: \u003ca href=\"/posts/kbhmap_restriction_operator/\"\u003emap restriction operator\u003c/a\u003e and \u003ca href=\"/posts/kbhquotient_operator/\"\u003equotient operator\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/#properties-of-id-7d742b39-4a4a-4a9d-a55b-07e2030dfdeb-eigenvalue-s\"\u003eproperties of eigenvalues\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/#list-of-eigenvectors-are-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent\"\u003elist of eigenvectors are linearly independent\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/#eigenspaces-are-disjoint\"\u003eeigenspaces are disjoint\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/#operators-on-finite-dimensional-v-has-at-most-dim-v-id-7d742b39-4a4a-4a9d-a55b-07e2030dfdeb-eigenvalue-s\"\u003eoperators on finite dimensional V has at most dim V eigenvalues\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhquotient_operator/#id-84dca125-e64f-48d7-b71e-858ad5c3db6c-quotient-operator-is-well-defined\"\u003equotient operator is well-defined\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cpre tabindex=\"0\"\u003e\u003ccode class=\"language-Doesn't\" data-lang=\"Doesn't\"\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;\u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e\u0026rdquo; is sometimes called the \u0026ldquo;characterizing value\u0026rdquo; of a map\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/#finding-eigenvalues-with-actual-numbers\"\u003efinding eigenvalues with actual numbers\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/#natural-choordinates-of-a-map\"\u003enatural choordinates of a map\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_5_a/","tags":null,"title":"Axler 5.A"},{"categories":null,"contents":"Key Sequence we began the chapter defining \\(T^m\\) (reminding ourselves the usual rules of \\(T^{m+n} = T^{m}T^{n}\\), \\((T^{m})^{n} = T^{mn}\\), and, for invertible maps, \\(T^{-m} = (T^{-1})^{m}\\)) and \\(p(T)\\), wrapping copies of \\(T\\) into coefficients of a polynomial, and from those definitions showed that polynomial of operator is commutative we then used those results + fundamental theorem of algebra to show that operators on complex vector spaces have an eigenvalue that previous, important result in hand, we then dove into upper-triangular matricies specifically, we learned the properties of upper-triangular matrix, that if \\(v_1 \u0026hellip; v_{n}\\) is a basis of \\(V\\) then \\(\\mathcal{M}(T)\\) is upper-triangular if \\(Tv_{j} \\in span(v_1, \u0026hellip; v_{j})\\) for all \\(j \\leq n\\); and, equivalently, \\(T\\) in invariant under the span of \\(v_{j}\\) using that result, we show that every complex operator has an upper-triangular matrix using some neat tricks of algebra, we then establish that operator is only invertible if diagonal of its upper-triangular matrix is nonzero, which seems awfully unmotivated until you learn that\u0026hellip; eigenvalues of a map are the entries of the diagonal of its upper-triangular matrix, and that basically is a direct correlary from the upper-triangular matrix of \\(T-\\lambda I\\) New Definitions \\(T^m\\) \\(p(T)\\) technically also product of polynomials matrix of an operator diagonal of a matrix upper-triangular matrix Results and Their Proofs \\(p(z) \\to p(T)\\) is a linear function polynomial of operator is commutative operators on complex vector spaces have an eigenvalue properties of upper-triangular matrix every complex operator has an upper-triangular matrix operator is only invertible if diagonal of its upper-triangular matrix is nonzero eigenvalues of a map are the entries of the diagonal of its upper-triangular matrix Questions for Jana why define the matrix of an operator again?? just to stress that its square for the second flavor of the proof that every complex operator has an upper-triangular matrix, why is \\(v_1 \u0026hellip; v_{j}\\) a basis of \\(V\\)? Interesting Factoids Its 12:18AM and I read this chapter for 5 hours. I also just got jumpscared by my phone notification. What\u0026rsquo;s happening?\n","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe began the chapter defining \u003ca href=\"/posts/kbhraising_operators_to_powers/\"\u003e\\(T^m\\)\u003c/a\u003e (reminding ourselves the usual rules of \\(T^{m+n} = T^{m}T^{n}\\), \\((T^{m})^{n} = T^{mn}\\), and, for invertible maps, \\(T^{-m} = (T^{-1})^{m}\\)) and \u003ca href=\"/posts/kbhpolynomial_operator/\"\u003e\\(p(T)\\)\u003c/a\u003e, wrapping copies of \\(T\\) into coefficients of a polynomial, and from those definitions showed that \u003ca href=\"/posts/kbhpolynomial_operator/#id-fbaf420a-6345-417b-8016-a976e7b155be-polynomial-of-operator-is-commutative\"\u003epolynomial of operator is commutative\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe then used those results + \u003ca href=\"\"\u003efundamental theorem of algebra\u003c/a\u003e to show that \u003ca href=\"/posts/kbhoperators_on_complex_vector_spaces_have_an_eigenvalue/\"\u003eoperators on complex vector spaces have an eigenvalue\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ethat previous, important result in hand, we then dove into \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matricies\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003especifically, we learned the \u003ca href=\"/posts/kbhupper_triangular_matrix/#properties-of-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix\"\u003eproperties of upper-triangular matrix\u003c/a\u003e, that if \\(v_1 \u0026hellip; v_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\) then \\(\\mathcal{M}(T)\\) is \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e if \\(Tv_{j} \\in span(v_1, \u0026hellip; v_{j})\\) for all \\(j \\leq n\\); and, equivalently, \\(T\\) in \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e under the \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e of \\(v_{j}\\)\u003c/li\u003e\n\u003cli\u003eusing that result, we show that \u003ca href=\"/posts/kbhupper_triangular_matrix/#every-complex-operator-has-an-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix\"\u003eevery complex operator has an upper-triangular matrix\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eusing some neat tricks of algebra, we then establish that \u003ca href=\"/posts/kbhupper_triangular_matrix/#operator-is-only-invertible-if-id-c38ed162-6861-420c-a812-6d25ac539ea9-diagonal-of-its-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix-is-nonzero\"\u003eoperator is only invertible if diagonal of its upper-triangular matrix is nonzero\u003c/a\u003e, which seems awfully unmotivated until you learn that\u0026hellip;\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhupper_triangular_matrix/#eigenvalues-of-a-map-are-the-entries-of-the-id-c38ed162-6861-420c-a812-6d25ac539ea9-diagonal-of-its-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix\"\u003eeigenvalues of a map are the entries of the diagonal of its upper-triangular matrix\u003c/a\u003e, and that basically is a direct correlary from the \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e of \\(T-\\lambda I\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhraising_operators_to_powers/\"\u003e\\(T^m\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolynomial_operator/\"\u003e\\(p(T)\\)\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003etechnically also \u003ca href=\"\"\u003eproduct of polynomials\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e of an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/#diagonal\"\u003ediagonal\u003c/a\u003e of a matrix\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolynomial_operator/#p--z--to-p--t--is-a-linear-id-d782b5f7-29b5-4f70-a058-f15c0162cbef-function\"\u003e\\(p(z) \\to p(T)\\) is a linear function\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolynomial_operator/#id-fbaf420a-6345-417b-8016-a976e7b155be-polynomial-of-operator-is-commutative\"\u003epolynomial of operator is commutative\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoperators_on_complex_vector_spaces_have_an_eigenvalue/\"\u003eoperators on complex vector spaces have an eigenvalue\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhupper_triangular_matrix/#properties-of-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix\"\u003eproperties of upper-triangular matrix\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhupper_triangular_matrix/#every-complex-operator-has-an-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix\"\u003eevery complex operator has an upper-triangular matrix\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhupper_triangular_matrix/#operator-is-only-invertible-if-id-c38ed162-6861-420c-a812-6d25ac539ea9-diagonal-of-its-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix-is-nonzero\"\u003eoperator is only invertible if diagonal of its upper-triangular matrix is nonzero\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhupper_triangular_matrix/#eigenvalues-of-a-map-are-the-entries-of-the-id-c38ed162-6861-420c-a812-6d25ac539ea9-diagonal-of-its-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix\"\u003eeigenvalues of a map are the entries of the diagonal of its upper-triangular matrix\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cdel\u003ewhy define the \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e of an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e again??\u003c/del\u003e just to stress that its square\u003c/li\u003e\n\u003cli\u003efor the second flavor of the proof that \u003ca href=\"/posts/kbhupper_triangular_matrix/#every-complex-operator-has-an-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix\"\u003eevery complex operator has an upper-triangular matrix\u003c/a\u003e, why is \\(v_1 \u0026hellip; v_{j}\\) a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\)?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003eIts 12:18AM and I read this chapter for 5 hours. I also just got jumpscared by my phone notification. What\u0026rsquo;s happening?\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_5_b/","tags":null,"title":"Axler 5.B"},{"categories":null,"contents":"Key Sequence we defined an eigenspace, which is the space of all eigenvalues of a distinct eigenvector, and show that they form a direct sum to the whole space and, as a correlate to how direct sums are kind of like disjoint sets, we have the perhaps expected result of dimension, that the sum of the eigenspace\u0026rsquo; dimensions must be smaller than or equal than that of \\(V\\) we defined a Diagonal Matrix, which by its structure + calculation can be shown to require that it is formed by a basis of eigenvalues and from there, and the properties of eigenspaces above, we deduce some conditions equal to diagonalizability a direct correlary of the last point (perhaps more straightforwardly intuited by just lining eigenvalue up diagonally in a matrix) is that enough eigenvalues implies diagonalizability New Definitions diagonal matrix properties of diagonal matrices eigenspace Results and Their Proofs eigenspaces are a direct sum dimension of sum of eigenspaces is smaller than or equal to the dimension of the whole space conditions equal to diagonalizability enough eigenvalues implies diagonalizability Questions for Jana for diagonalizability, shouldn\u0026rsquo;t \\(n\\) be \\(m\\) on item 3? Interesting Factoids Short!\n","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe defined an \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e, which is the space of all \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es of a distinct \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e, and show that\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/#eigenspaces-are-disjoint\"\u003ethey form a direct sum\u003c/a\u003e to the whole space\u003c/li\u003e\n\u003cli\u003eand, as a correlate to how \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003es are kind of like disjoint sets, we have the \u003ca href=\"/posts/kbheigenspace/#dimension-of-sum-of-eigenspaces-is-smaller-than-or-equal-to-the-dimension-of-the-whole-space\"\u003eperhaps expected result of dimension\u003c/a\u003e, that the sum of the \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e\u0026rsquo; dimensions must be smaller than or equal than that of \\(V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ewe defined a \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003eDiagonal Matrix\u003c/a\u003e, which by its structure + calculation can be shown to require that it is formed by a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es\n\u003cul\u003e\n\u003cli\u003eand from there, and the properties of \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003es above, we deduce \u003ca href=\"/posts/kbhdiagonal_matrix/#properties-of-diagonal-matrices\"\u003esome conditions equal to diagonalizability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ea direct correlary of the last point (perhaps more straightforwardly intuited by just lining \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e up diagonally in a matrix) is that \u003ca href=\"/posts/kbhdiagonal_matrix/#enough-eigenvalues-implies-diagonalizability\"\u003eenough eigenvalues implies diagonalizability\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003ediagonal matrix\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiagonal_matrix/#properties-of-diagonal-matrices\"\u003eproperties of diagonal matrices\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/#eigenspaces-are-disjoint\"\u003eeigenspaces are a direct sum\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenspace/#dimension-of-sum-of-eigenspaces-is-smaller-than-or-equal-to-the-dimension-of-the-whole-space\"\u003edimension of sum of eigenspaces is smaller than or equal to the dimension of the whole space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiagonal_matrix/#properties-of-diagonal-matrices\"\u003econditions equal to diagonalizability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiagonal_matrix/#enough-eigenvalues-implies-diagonalizability\"\u003eenough eigenvalues implies diagonalizability\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003efor \u003ca href=\"/posts/kbhdiagonal_matrix/#properties-of-diagonal-matrices\"\u003ediagonalizability\u003c/a\u003e, shouldn\u0026rsquo;t \\(n\\) be \\(m\\) on item 3?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003eShort!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_5_c/","tags":null,"title":"Axler 5.C"},{"categories":null,"contents":"Hear ye, hear ye! Length and angles are a thing now!!\nKey Sequence We remembered how dot products work, then proceeded to generalize them into inner products\u0026mdash;this is needed because complex numbers don\u0026rsquo;t behave well when squared so we need to add in special guardrails We then learned that dot products is just an instantiation of Euclidean Inner Product, which itself is simply one of many inner products. A vector space that has a well-defined inner product is now called an Inner Product Space Along with revisiting our definition of dot products to include complexes, we changed our definition of norm to be in terms of inner products \\(\\sqrt{\\langle v,v \\rangle}\\) to help support complex vector spaces better; we then also redefined orthogonality and showed a few results regarding them Then, we did a bunch of analysis-y work to understand some properties of norms and inner products: Pythagorean Theorem, Cauchy-Schwartz Inequality, triangle inequality and parallelogram equality. New Definitions dot product and the more generally important\u0026hellip; inner product!, Euclidean Inner Product, Inner Product Space norm orthogonal and now, a cornucopia of analysis Pythagorean Theorem Cauchy-Schwartz Inequality triangle inequality parallelogram equality Results and Their Proofs properties of the dot product properties of inner product properties of the norm orthogonality and \\(0\\) Questions for Jana How much of the analysis-y proof work do we have to remember for the analysis-y results? Interesting Factoids ","html":"\u003cp\u003eHear ye, hear ye! Length and angles are a thing now!!\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWe remembered how \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003es work, then proceeded to generalize them into \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003es\u0026mdash;this is needed because \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003es don\u0026rsquo;t behave well when squared so we need to add in special guardrails\u003c/li\u003e\n\u003cli\u003eWe then learned that \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003es is just an instantiation of \u003ca href=\"/posts/kbhinner_product/#euclidean-inner-product\"\u003eEuclidean Inner Product\u003c/a\u003e, which itself is simply one of many \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003es. A \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e that has a well-defined \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e is now called an \u003ca href=\"/posts/kbhinner_product/#inner-product-space\"\u003eInner Product Space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eAlong with revisiting our definition of \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003es to include complexes, we changed our definition of \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e to be in terms of \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003es \\(\\sqrt{\\langle v,v \\rangle}\\) to help support \u003ca href=\"/posts/kbhvector_space/#vector-space-over-fields\"\u003ecomplex vector space\u003c/a\u003es better; we then also redefined \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonality\u003c/a\u003e and showed a few results regarding them\u003c/li\u003e\n\u003cli\u003eThen, we did a bunch of analysis-y work to understand some properties of norms and \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003es: \u003ca href=\"/posts/kbhcornucopia_of_analysis/#pythagorean-theorem\"\u003ePythagorean Theorem\u003c/a\u003e, \u003ca href=\"/posts/kbhcornucopia_of_analysis/#cauchy-schwartz-inequality\"\u003eCauchy-Schwartz Inequality\u003c/a\u003e, \u003ca href=\"/posts/kbhcornucopia_of_analysis/#triangle-inequality--vectors\"\u003etriangle inequality\u003c/a\u003e and \u003ca href=\"/posts/kbhcornucopia_of_analysis/#parallelogram-equality\"\u003eparallelogram equality\u003c/a\u003e.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e and the more generally important\u0026hellip;\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e!, \u003ca href=\"/posts/kbhinner_product/#euclidean-inner-product\"\u003eEuclidean Inner Product\u003c/a\u003e, \u003ca href=\"/posts/kbhinner_product/#inner-product-space\"\u003eInner Product Space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eand now, a cornucopia of analysis\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcornucopia_of_analysis/#pythagorean-theorem\"\u003ePythagorean Theorem\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcornucopia_of_analysis/#cauchy-schwartz-inequality\"\u003eCauchy-Schwartz Inequality\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcornucopia_of_analysis/#triangle-inequality--vectors\"\u003etriangle inequality\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcornucopia_of_analysis/#parallelogram-equality\"\u003eparallelogram equality\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdot_product/#properties-of-the-dot-product\"\u003eproperties of the dot product\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinner_product/#properties-of-inner-product\"\u003eproperties of inner product\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnorm/#properties-of-the-norm\"\u003eproperties of the norm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhorthogonal/#orthogonality-and-0\"\u003eorthogonality and \\(0\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eHow much of the analysis-y proof work do we have to remember for the analysis-y results?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_6_a/","tags":null,"title":"Axler 6.A"},{"categories":null,"contents":"OMG its Gram-Schmidtting\nKey Sequence we defined lists of vectors that all have norm 1 and are all orthogonal to each other as orthonormal; we showed orthonormal list is linearly independent by hijacking pythagoras of course, once we have a finitely long linearly independent thing we must be able to build a basis. The nice thing about such an orthonormal basis is that for every vector we know precisely what its coefficients have to be! Specifically, \\(a_{j} = \\langle v, e_{j} \\rangle\\). That\u0026rsquo;s cool. What we really want, though, is to be able to get an orthonormal basis from a regular basis, which we can do via Gram-Schmidt. In fact, this gives us some useful correlaries regarding the existance of orthonormal basis (just Gram-Schmidt a normal one), or extending a orthonormal list to a basis, etc. There are also important implications (still along the veins of \u0026ldquo;just Gram-Schmidt it!\u0026rdquo;) for upper-traingular matricies as well We also learned, as a result of orthonormal basis, any finite-dimensional linear functional (Linear Maps to scalars) can be represented as an inner product via the Riesz Representation Theorem, which is honestly kinda epic. New Definitions orthonormal + orthonormal basis Gram-Schmidt (i.e. orthonormalization) linear functional and Riesz Representation Theorem Results and Their Proofs Norm of an Orthogonal Linear Combination An orthonormal list is linearly independent An orthonormal list of the right length is a basis Writing a vector as a linear combination of orthonormal basis Corollaries of Gram-Schmidt Every Inner Product Space has an orthonormal basis Orthonormal list extended to orthonormal basis Orthonormal upper-triangular matrix basis exists if normal upper-triangular exists Schur\u0026rsquo;s Theorem Riesz Representation Theorem Questions for Jana Interesting Factoids ","html":"\u003cp\u003eOMG its \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidtting\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe defined lists of vectors that all have \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e 1 and are all \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e to each other as \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e; we showed \u003ca href=\"/posts/kbhorthonormal/#orthonormal-list-is-linearly-independent\"\u003eorthonormal list is linearly independent\u003c/a\u003e by hijacking \u003ca href=\"/posts/kbhcornucopia_of_analysis/#pythagorean-theorem\"\u003epythagoras\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eof course, once we have a finitely long \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e thing we must be able to \u003ca href=\"/posts/kbhorthonormal_basis/#orthonormal-list-of-the-right-length-is-a-basis\"\u003ebuild a basis\u003c/a\u003e. The nice thing about such an \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e is that for every vector \u003ca href=\"/posts/kbhorthonormal_basis/#writing-a-vector-as-a-linear-combination-of-orthonormal-basis\"\u003ewe know precisely what its coefficients have to be\u003c/a\u003e! Specifically, \\(a_{j} = \\langle v, e_{j} \\rangle\\). That\u0026rsquo;s cool.\u003c/li\u003e\n\u003cli\u003eWhat we really want, though, is to be able to get an \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e from a regular \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, which we can do via \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003e. In fact, this gives us some useful correlaries regarding the existance of \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e (just \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003e a normal one), or extending a \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e list to a basis, etc. There are also important implications (still along the veins of \u0026ldquo;just \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003e it!\u0026rdquo;) for \u003ca href=\"/posts/kbhgram_schmidt/#schur-s-theorem\"\u003eupper-traingular matricies\u003c/a\u003e as well\u003c/li\u003e\n\u003cli\u003eWe also learned, as a result of \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e, any finite-dimensional \u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003e (\u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es to scalars) can be represented as an \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e via the \u003ca href=\"/posts/kbhlinear_functional/#riesz-representation-theorem\"\u003eRiesz Representation Theorem\u003c/a\u003e, which is honestly kinda epic.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e + \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003e (i.e. \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eorthonormalization\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003e and \u003ca href=\"/posts/kbhlinear_functional/#riesz-representation-theorem\"\u003eRiesz Representation Theorem\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhorthonormal/#norm-of-an-orthogonal-linear-combination\"\u003eNorm of an Orthogonal Linear Combination\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhorthonormal/#orthonormal-list-is-linearly-independent\"\u003eAn orthonormal list is linearly independent\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhorthonormal_basis/#orthonormal-list-of-the-right-length-is-a-basis\"\u003eAn orthonormal list of the right length is a basis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhorthonormal_basis/#writing-a-vector-as-a-linear-combination-of-orthonormal-basis\"\u003eWriting a vector as a linear combination of orthonormal basis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eCorollaries of \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgram_schmidt/#every-id-4a788e29-a3e9-4c13-8c97-08746878966e-inner-product-space-has-an-id-2a1eecb2-f23a-469f-a860-b561a9197906-orthonormal-basis\"\u003eEvery Inner Product Space has an orthonormal basis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgram_schmidt/#orthonormal-list-extended-to-id-2a1eecb2-f23a-469f-a860-b561a9197906-orthonormal-basis\"\u003eOrthonormal list extended to orthonormal basis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgram_schmidt/#orthonormal-upper-triangular-matrix-basis-exists-if-normal-upper-triangular-exists\"\u003eOrthonormal upper-triangular matrix basis exists if normal upper-triangular exists\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgram_schmidt/#schur-s-theorem\"\u003eSchur\u0026rsquo;s Theorem\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_functional/#riesz-representation-theorem\"\u003eRiesz Representation Theorem\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_6_b/","tags":null,"title":"Axler 6.B"},{"categories":null,"contents":"This is not actually like a proper review of a chapter, instead, it is an opinionated review of what I think Jana thinks Axler thinks is important about 7.A.\nNote that all of the \u0026ldquo;proofy things\u0026rdquo; in this section are poofy because problems with putting trips prior to the end of the year.\nHere\u0026rsquo;s an outline:\nWe defined the adjoint We learned some properties of the adjoint; importantly, that \\((A+B)^{*} = A^{*} + B^{*}\\), \\((AB)^{*} = B^{*} A^{*}\\), \\((\\lambda T)^{*} = \\bar{\\lambda}T^{*}\\); a correlary is that \\(M^{*}M\\) is self-adjoint We defined normal, self-adjoint, and unitary With those definitions, we showed that eigenvalues of self-adjoint matricies are real Then, we created two mildly interesting intermediate results Over \\(\\mathbb{C}\\), \\(Tv\\) is orthogonal to all \\(v\\) IFF \\(T\\) is the zero matrix Over \\(\\mathbb{R}\\), \\(Tv\\) is orthogonal to all \\(v\\) and \\(T\\) is self-adjoint, then \\(T\\) is the zero matrix The latter of which shows that Eigenvectors of \\(T\\) corresponding to distinct eigenvalues are orthogonal if \\(T \\in \\mathcal{L}(V)\\) is normal This, all, builds up to the result of the Complex Spectral Theorem, which you should know adjoint Suppose \\(T \\in \\mathcal{L}(V,W)\\), we define the adjoint as a \\(T^{*} \\in \\mathcal{L}(W,V)\\) that:\n\\begin{equation} \\langle Tv,w \\rangle = \\langle v, T^{*}w \\rangle \\end{equation}\nthe usual verifications could be made (setting \\(w = w_1+w_2\\), then applying inner product additivity, etc.) to show that \\(T^{*}\\) is a linear map.\nproperties of the adjoint \\((S+T)^{*} = S^{*} + T^{*}\\) \\((\\lambda T)^{*} = \\bar{\\lambda}T^{*}\\) \\((T^{*})^{*} = T\\) \\(I^{*} = I\\) \u0026ldquo;identity is self adjoint\u0026rdquo; and, importantly, \\((ST)^{*} = T^{*} S^{*}\\) All of these results are findable by chonking through the expressions for inner product properties.\nadjoint is the conjugate transpose For complex-valued matricies, and the Euclidean Inner Product, the adjoint is the conjugate transpose of the matrix.\nTo show this ham-fistedly, first convince yourself that the property of \\((ST)^{*} = T^{*} S^{*}\\) holds for the act of conjugate transpose of matricies as well. Now, we will show how we can get to the adjoint definition from that result:\nConsider:\n\\begin{equation} \\langle v,w \\rangle \\end{equation}\nwe can represent this as the product of two \u0026ldquo;matricies\u0026rdquo;: an \\(n \\times 1\\) \u0026ldquo;matrix\u0026rdquo; named \\(v\\), and a \\(n \\times 1\\) \u0026ldquo;matrix\u0026rdquo; named \\(w\\); specifically:\n\\begin{equation} \\langle v,w \\rangle = v^{*} w \\end{equation}\nwhere \\(v^{*}\\) is the conjugate-transpose of \\(v\\) (the dagger is nonstandard notation, to distinguish from \\(*\\) the adjoint defined above which we haven\u0026rsquo;t shown yet). This is by definition of how matricies multiply and how the Euclidean Inner Product works.\nSo then consider the same for the above:\n\\begin{equation} \\langle Tv,w \\rangle = (Tv)^{*}w = v^{*} T^{*}w = \\langle v, T^{*}w \\rangle \\end{equation}\nAxler gives an arguably better proof involving representing the matricies w.r.t. the orthonomal bases, and then showing that the inner products just swapped slots:\nBuncha matrix adjectives And given we now know what the adjoint is, we can make some definitions:\nself-adjoint \\begin{equation} A = A^{*} \\end{equation}\nwow.\nNamely:\n\\begin{equation} \\langle Tv, w \\rangle = \\langle v, T^{*}w \\rangle = \\langle v, Tw \\rangle \\end{equation}\nnormal \\begin{equation} A A^{*} = A^{*} A \\end{equation}\nAs in, if the operator commutes with its own adjoint.\nunitary \\begin{equation} A^{*} = A^{-1} \\end{equation}\nor, that \\(A\\) has orthonormal columns: an unitary operator is invertible, and the inverse of its matrix representation is its transpose\nEigenvalues of self-adjoint matricies are real So, if we have:\n\\begin{equation} Tv = \\lambda v \\end{equation}\nwe want to show that \\(\\lambda\\) is real. To do this, we can show that \\(\\lambda = \\bar{\\lambda}\\) which would mean the \\(b\\) component is \\(0\\).\nNow, recall that self-adjoint means \\(\\langle Tv,w \\rangle = \\langle v, Tw \\rangle\\).\nConstruct now: \\(\\lambda \\|v\\|^{2}\\) \u0026mdash;\n\\begin{equation} \\lambda \\|v\\|^{2} = \\lambda \\langle v,v \\rangle = \\langle \\lambda v,v \\rangle = \\langle Tv, v \\rangle = \\langle v, Tv \\rangle = \\langle v, \\lambda v \\rangle = \\bar{\\lambda} \\langle v,v \\rangle = \\bar{\\lambda} \\|v\\|^{2} \\end{equation}\nSo we have \\(\\lambda \\|v\\|^{2} = \\bar{\\lambda} \\|v\\|^{2}\\), which means \\(\\lambda = \\bar{\\lambda}\\), as desired.\nTwo less important intermediate results \u0026hellip;that we just trust Axler\u0026rsquo;s word + our intuition for:\n7.14: Over \\(\\mathbb{C}\\), \\(Tv\\) is orthogonal to all \\(v\\) IFF \\(T\\) is the zero matrix 7.16: Over \\(\\mathbb{R}\\), \\(Tv\\) is orthogonal to all \\(v\\) and \\(T\\) is self-adjoint, then \\(T\\) is the zero matrix Why is it different? ASK JANA IDK\nEigenvectors of \\(T\\) corresponding to distinct eigenvalues are orthogonal if \\(T \\in \\mathcal{L}(V)\\) is normal Prove depended on the minor results from before\nand then voodo whitchcraft.\nComplex Spectral Theorem On \\(\\mathbb{C}\\), and with \\(T \\in \\mathcal{L}(V)\\), the following statements are equivalent:\n\\(T\\) is normal \\(T\\) has an orthonormal basis of eigenvectors and so \\(T\\) is diagonalizable w.r.t. that orthonormal basis of eigenvectors This proof depends on Schur\u0026rsquo;s Theorem.\nThe real number version requires that \\(T\\) is self-adjoint.\nThings to ask jana why is 7.14 and 7.16 different ","html":"\u003cp\u003eThis is not actually like a proper review of a chapter, instead, it is an \u003cstrong\u003eopinionated review\u003c/strong\u003e of what I think Jana thinks Axler thinks is important about 7.A.\u003c/p\u003e\n\u003cp\u003eNote that all of the \u0026ldquo;proofy things\u0026rdquo; in this section are poofy because problems with putting trips prior to the end of the year.\u003c/p\u003e\n\u003cp\u003eHere\u0026rsquo;s an outline:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eWe defined the \u003ca href=\"#adjoint\"\u003eadjoint\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003eWe learned some \u003ca href=\"#properties-of-the-adjoint\"\u003eproperties of the adjoint\u003c/a\u003e; importantly, that \\((A+B)^{*} = A^{*} + B^{*}\\), \\((AB)^{*} = B^{*} A^{*}\\), \\((\\lambda T)^{*} = \\bar{\\lambda}T^{*}\\); a correlary is that \\(M^{*}M\\) is \u003ca href=\"#self-adjoint\"\u003eself-adjoint\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eWe defined \u003ca href=\"#buncha-matrix-adjectives\"\u003enormal, self-adjoint, and unitary\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eWith those definitions, we showed that \u003ca href=\"#eigenvalues-of-self-adjoint--org71f113d--matricies-are-real\"\u003eeigenvalues of self-adjoint matricies are real\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eThen, we created two mildly interesting intermediate results\n\u003cul\u003e\n\u003cli\u003eOver \\(\\mathbb{C}\\), \\(Tv\\) is orthogonal to all \\(v\\) IFF \\(T\\) is the zero matrix\u003c/li\u003e\n\u003cli\u003eOver \\(\\mathbb{R}\\), \\(Tv\\) is orthogonal to all \\(v\\) \u003cstrong\u003eand \\(T\\) is self-adjoint\u003c/strong\u003e, then \\(T\\) is the zero matrix\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eThe latter of which shows that \u003ca href=\"#eigenvectors-of-t-corresponding-to-distinct-eigenvalues-are-orthogonal-if-t-in-mathcal-l--v--is-normal\"\u003eEigenvectors of \\(T\\) corresponding to distinct eigenvalues are orthogonal if \\(T \\in \\mathcal{L}(V)\\) is normal\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eThis, all, builds up to the result of the \u003cstrong\u003e\u003ca href=\"#complex-spectral-theorem\"\u003eComplex Spectral Theorem\u003c/a\u003e\u003c/strong\u003e, which you should know\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"adjoint\"\u003eadjoint\u003c/h2\u003e\n\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V,W)\\), we define the \u003cstrong\u003eadjoint\u003c/strong\u003e as a \\(T^{*} \\in \\mathcal{L}(W,V)\\) that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle Tv,w \\rangle = \\langle v, T^{*}w \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe usual verifications could be made (setting \\(w = w_1+w_2\\), then applying \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e additivity, etc.) to show that \\(T^{*}\\) is a linear map.\u003c/p\u003e\n\u003ch3 id=\"properties-of-the-adjoint\"\u003eproperties of the adjoint\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\\((S+T)^{*} = S^{*} + T^{*}\\)\u003c/li\u003e\n\u003cli\u003e\\((\\lambda T)^{*} = \\bar{\\lambda}T^{*}\\)\u003c/li\u003e\n\u003cli\u003e\\((T^{*})^{*} = T\\)\u003c/li\u003e\n\u003cli\u003e\\(I^{*} = I\\) \u0026ldquo;identity is self adjoint\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eand, importantly, \\((ST)^{*} = T^{*} S^{*}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAll of these results are findable by chonking through the expressions for inner product properties.\u003c/p\u003e\n\u003ch3 id=\"adjoint-is-the-conjugate-transpose\"\u003eadjoint is the conjugate transpose\u003c/h3\u003e\n\u003cp\u003eFor complex-valued \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e, and the \u003ca href=\"/posts/kbhinner_product/#euclidean-inner-product\"\u003eEuclidean Inner Product\u003c/a\u003e, the adjoint is the conjugate transpose of the matrix.\u003c/p\u003e\n\u003cp\u003eTo show this ham-fistedly, first convince yourself that the property of \\((ST)^{*} = T^{*} S^{*}\\) holds for the act of conjugate transpose of matricies as well. Now, we will show how we can get to the adjoint definition from that result:\u003c/p\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle v,w \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can represent this as the product of two \u0026ldquo;matricies\u0026rdquo;: an \\(n \\times 1\\) \u0026ldquo;matrix\u0026rdquo; named \\(v\\), and a \\(n \\times 1\\) \u0026ldquo;matrix\u0026rdquo; named \\(w\\); specifically:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle v,w \\rangle = v^{*} w\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(v^{*}\\) is the conjugate-transpose of \\(v\\) (the dagger is nonstandard notation, to distinguish from \\(*\\) the adjoint defined above which we haven\u0026rsquo;t shown yet). This is by definition of how matricies multiply and how the \u003ca href=\"/posts/kbhinner_product/#euclidean-inner-product\"\u003eEuclidean Inner Product\u003c/a\u003e works.\u003c/p\u003e\n\u003cp\u003eSo then consider the same for the above:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle Tv,w \\rangle = (Tv)^{*}w = v^{*} T^{*}w = \\langle v, T^{*}w \\rangle\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAxler gives an arguably better proof involving representing the matricies w.r.t. the orthonomal bases, and then showing that the inner products just swapped slots:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-05-10_20-53-51_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"buncha-matrix-adjectives\"\u003eBuncha matrix adjectives\u003c/h2\u003e\n\u003cp\u003eAnd given we now know what the \u003ca href=\"#adjoint\"\u003eadjoint\u003c/a\u003e is, we can make some definitions:\u003c/p\u003e\n\u003ch3 id=\"self-adjoint\"\u003eself-adjoint\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nA = A^{*}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewow.\u003c/p\u003e\n\u003cp\u003eNamely:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle Tv, w \\rangle = \\langle v, T^{*}w \\rangle = \\langle v, Tw \\rangle\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"normal\"\u003enormal\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nA A^{*} = A^{*} A\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAs in, if the operator commutes with its own adjoint.\u003c/p\u003e\n\u003ch3 id=\"unitary\"\u003eunitary\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nA^{*} = A^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eor, that \\(A\\) has orthonormal columns: \u003ca href=\"/posts/kbhnus_math530_matrix_adjectives/#an-unitary-operator-is-invertible-and-the-inverse-of-its-matrix-representation-is-its-transpose\"\u003ean unitary operator is invertible, and the inverse of its matrix representation is its transpose\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"eigenvalues-of-self-adjoint--org71f113d--matricies-are-real\"\u003eEigenvalues of \u003ca href=\"#self-adjoint\"\u003eself-adjoint\u003c/a\u003e matricies are real\u003c/h2\u003e\n\u003cp\u003eSo, if we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv = \\lambda v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe want to show that \\(\\lambda\\) is real. To do this, we can show that \\(\\lambda = \\bar{\\lambda}\\) which would mean the \\(b\\) component is \\(0\\).\u003c/p\u003e\n\u003cp\u003eNow, recall that \u003ca href=\"#self-adjoint\"\u003eself-adjoint\u003c/a\u003e means \\(\\langle Tv,w \\rangle = \\langle v, Tw \\rangle\\).\u003c/p\u003e\n\u003cp\u003eConstruct now: \\(\\lambda \\|v\\|^{2}\\) \u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda \\|v\\|^{2} = \\lambda \\langle v,v \\rangle = \\langle \\lambda v,v \\rangle = \\langle Tv, v \\rangle = \\langle v, Tv \\rangle = \\langle v, \\lambda v \\rangle = \\bar{\\lambda} \\langle v,v \\rangle = \\bar{\\lambda} \\|v\\|^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo we have \\(\\lambda \\|v\\|^{2} = \\bar{\\lambda} \\|v\\|^{2}\\), which means \\(\\lambda = \\bar{\\lambda}\\), as desired.\u003c/p\u003e\n\u003ch2 id=\"two-less-important-intermediate-results\"\u003eTwo less important intermediate results\u003c/h2\u003e\n\u003cp\u003e\u0026hellip;that we just trust Axler\u0026rsquo;s word + our intuition for:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e7.14: Over \\(\\mathbb{C}\\), \\(Tv\\) is orthogonal to all \\(v\\) IFF \\(T\\) is the zero matrix\u003c/li\u003e\n\u003cli\u003e7.16: Over \\(\\mathbb{R}\\), \\(Tv\\) is orthogonal to all \\(v\\) \u003cstrong\u003eand \\(T\\) is self-adjoint\u003c/strong\u003e, then \\(T\\) is the zero matrix\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWhy is it different? \u003cstrong\u003e\u003cstrong\u003eASK JANA IDK\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"eigenvectors-of-t-corresponding-to-distinct-eigenvalues-are-orthogonal-if-t-in-mathcal-l--v--is-normal\"\u003eEigenvectors of \\(T\\) corresponding to distinct eigenvalues are orthogonal if \\(T \\in \\mathcal{L}(V)\\) is normal\u003c/h2\u003e\n\u003cp\u003eProve depended on the minor results from before\u003c/p\u003e\n\u003cp\u003eand then voodo whitchcraft.\u003c/p\u003e\n\u003ch2 id=\"complex-spectral-theorem\"\u003eComplex Spectral Theorem\u003c/h2\u003e\n\u003cp\u003eOn \\(\\mathbb{C}\\), and with \\(T \\in \\mathcal{L}(V)\\), the following statements are equivalent:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\\(T\\) is normal\u003c/li\u003e\n\u003cli\u003e\\(T\\) has an orthonormal basis of eigenvectors\u003c/li\u003e\n\u003cli\u003eand so \\(T\\) is diagonalizable w.r.t. that orthonormal \u003cstrong\u003ebasis of eigenvectors\u003c/strong\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis proof depends on \u003ca href=\"/posts/kbhgram_schmidt/#schur-s-theorem\"\u003eSchur\u0026rsquo;s Theorem\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThe real number version requires that \\(T\\) is \u003ca href=\"#self-adjoint\"\u003eself-adjoint\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"things-to-ask-jana\"\u003eThings to ask jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewhy is 7.14 and 7.16 different\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_7_a/","tags":null,"title":"Axler 7.A"},{"categories":null,"contents":"AAAAA I want a good backpack.\nrequirements\nexplicit laptop compartment (whether intentional or not; water bladder component that fits a laptop is fine) earbags (those fannypack things on the side of the bottom belt); needs to be large (i.e. enough to fit an iphone 5) raincover at least 3 compartments, ideally one with a pen holder and key ring, and the outermost being very accessible (think mesh bag) basically I want an exact replica of the columbia silver ridge 30L from 2012 which they don\u0026rsquo;t sell anymore; the new one breaks 4) slightly and is also $150 and I got mine for ilke $60-70 (it was like 300-350 rmb) max in 2012\nhttps://www.amazon.com/FENGDONG-Waterproof-Lightweight-Outdoor-Backpack/dp/B07T9KJ87Y/ref=sr_1_8?keywords=Ergonomic%2BBackpack%2Bwith%2BWaist%2BBelt\u0026amp;qid=1681186885\u0026amp;sr=8-8\u0026amp;th=1 https://www.thenorthface.com/en-us/bags-and-gear/technical-packs/hike-c224525/basin-36-backpack-pNF0A52CX?color=KX7 https://www.patagonia.com/product/altvia-pack-36-liters/195699592506.html or just https://www.ebay.com/itm/314416731511?hash=item4934b29577:g:~qEAAOSwxPFkGep5\u0026amp;amdata=enc%3AAQAHAAAA8KUu6vJvKh7jVrgZg0FOdwH29YdndgW0Z%2FMxdEOH15jv1RGQN6DetZ4Lt8NqSwRFCH3wCCJe6cBmKvSysTNf2QUIYAv%2FKaRUGkeKX4XTkMwQRtAP67yRMdZJEu3GRcuLgjv7QRmPS6BmydT1NUBi4ULiHVupVFYMv9ei%2BJK2JGjf53DJJ7JqFjftWdQ%2F6mfoEdjsmZ5LZbMKP0XFCtyt6OojQKE63GcQt16lXGdfjlZuGpHJ6znv%2BhAHiHyV9FlguEi1v3hHSn1zx9qmfzriXvh1vbqlguRc2uiNVz3dJ52A7WYzvvWVkHZ1Vo8SekjByQ%3D%3D%7Ctkp%3ABk9SR97_n-ntYQ ","html":"\u003cp\u003eAAAAA I want a good backpack.\u003c/p\u003e\n\u003cp\u003erequirements\u003c/p\u003e\n\u003chr\u003e\n\u003col\u003e\n\u003cli\u003eexplicit laptop compartment (whether intentional or not; water bladder component that fits a laptop is fine)\u003c/li\u003e\n\u003cli\u003eearbags (those fannypack things on the side of the bottom belt); needs to be large (i.e. enough to fit an iphone 5)\u003c/li\u003e\n\u003cli\u003eraincover\u003c/li\u003e\n\u003cli\u003eat least 3 compartments, ideally one with a pen holder and key ring, and the outermost being very accessible (think mesh bag)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ebasically I want an exact replica of the columbia silver ridge 30L from 2012 which they don\u0026rsquo;t sell anymore; the new one breaks 4) slightly and is also $150 and I got mine for ilke $60-70 (it was like 300-350 rmb) max in 2012\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"https://www.amazon.com/FENGDONG-Waterproof-Lightweight-Outdoor-Backpack/dp/B07T9KJ87Y/ref=sr_1_8?keywords=Ergonomic%2BBackpack%2Bwith%2BWaist%2BBelt\u0026amp;qid=1681186885\u0026amp;sr=8-8\u0026amp;th=1\"\u003ehttps://www.amazon.com/FENGDONG-Waterproof-Lightweight-Outdoor-Backpack/dp/B07T9KJ87Y/ref=sr_1_8?keywords=Ergonomic%2BBackpack%2Bwith%2BWaist%2BBelt\u0026amp;qid=1681186885\u0026amp;sr=8-8\u0026amp;th=1\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.thenorthface.com/en-us/bags-and-gear/technical-packs/hike-c224525/basin-36-backpack-pNF0A52CX?color=KX7\"\u003ehttps://www.thenorthface.com/en-us/bags-and-gear/technical-packs/hike-c224525/basin-36-backpack-pNF0A52CX?color=KX7\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.patagonia.com/product/altvia-pack-36-liters/195699592506.html\"\u003ehttps://www.patagonia.com/product/altvia-pack-36-liters/195699592506.html\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eor just \u003ca href=\"https://www.ebay.com/itm/314416731511?hash=item4934b29577:g:~qEAAOSwxPFkGep5\u0026amp;amdata=enc%3AAQAHAAAA8KUu6vJvKh7jVrgZg0FOdwH29YdndgW0Z%2FMxdEOH15jv1RGQN6DetZ4Lt8NqSwRFCH3wCCJe6cBmKvSysTNf2QUIYAv%2FKaRUGkeKX4XTkMwQRtAP67yRMdZJEu3GRcuLgjv7QRmPS6BmydT1NUBi4ULiHVupVFYMv9ei%2BJK2JGjf53DJJ7JqFjftWdQ%2F6mfoEdjsmZ5LZbMKP0XFCtyt6OojQKE63GcQt16lXGdfjlZuGpHJ6znv%2BhAHiHyV9FlguEi1v3hHSn1zx9qmfzriXvh1vbqlguRc2uiNVz3dJ52A7WYzvvWVkHZ1Vo8SekjByQ%3D%3D%7Ctkp%3ABk9SR97_n-ntYQ\"\u003ehttps://www.ebay.com/itm/314416731511?hash=item4934b29577:g:~qEAAOSwxPFkGep5\u0026amp;amdata=enc%3AAQAHAAAA8KUu6vJvKh7jVrgZg0FOdwH29YdndgW0Z%2FMxdEOH15jv1RGQN6DetZ4Lt8NqSwRFCH3wCCJe6cBmKvSysTNf2QUIYAv%2FKaRUGkeKX4XTkMwQRtAP67yRMdZJEu3GRcuLgjv7QRmPS6BmydT1NUBi4ULiHVupVFYMv9ei%2BJK2JGjf53DJJ7JqFjftWdQ%2F6mfoEdjsmZ5LZbMKP0XFCtyt6OojQKE63GcQt16lXGdfjlZuGpHJ6znv%2BhAHiHyV9FlguEi1v3hHSn1zx9qmfzriXvh1vbqlguRc2uiNVz3dJ52A7WYzvvWVkHZ1Vo8SekjByQ%3D%3D%7Ctkp%3ABk9SR97_n-ntYQ\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbackpacks/","tags":null,"title":"Backpacks"},{"categories":null,"contents":"we need to keep two sequences aligned; so in addition to minimum edit distance we need to know how to transform one sequence into another.\nTo do this, we keep a pointer of what cell we came from.\nThis is similar to edit distance with DP, but we keep a pointer of each cell of the action: point DOWN (less j) if inserting, point LEFT (less i) if deleting, and point diagonally if substituting.\nFor two strings, let\u0026rsquo;s define:\n\\(X\\) of length \\(n\\) \\(Y\\) of length \\(m\\) we define some \\(D(i,j)\\) as the edit distance between substring \\(X[1:i]\\) and \\(Y[1:j]\\).\nLet:\n\\(D(i,0) = i\\) \\(D(0,j) = j\\) for i in range(1,M): for j in range(1,N): # deletion: ignoring one char of previous string d1 = D(i-1,j) + 1 # (cost) # insertion: insertion into string before using rest of j d2 = D(i,j-1) + 1 # (cost) # keep same if char is same or substitute current d3 = D(i-1,j-1) + (0 if X[i] == Y[j] else 2) # cache D(i,j) = min(d1, d2, d3) ","html":"\u003cp\u003ewe need to keep two sequences aligned; so in addition to \u003ca href=\"/posts/kbhminimum_edit_distance/\"\u003eminimum edit distance\u003c/a\u003e we need to know how to transform one sequence into another.\u003c/p\u003e\n\u003cp\u003eTo do this, we keep a pointer of what cell we came from.\u003c/p\u003e\n\u003cp\u003eThis is similar to \u003ca href=\"/posts/kbhedit_distance_with_dp/\"\u003eedit distance with DP\u003c/a\u003e, but we keep a pointer of each cell of the action: point DOWN (less j) if inserting, point LEFT (less i) if deleting, and point diagonally if substituting.\u003c/p\u003e\n\u003cp\u003eFor two strings, let\u0026rsquo;s define:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X\\) of length \\(n\\)\u003c/li\u003e\n\u003cli\u003e\\(Y\\) of length \\(m\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewe define some \\(D(i,j)\\) as the edit distance between substring \\(X[1:i]\\) and \\(Y[1:j]\\).\u003c/p\u003e\n\u003cp\u003eLet:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(D(i,0) = i\\)\u003c/li\u003e\n\u003cli\u003e\\(D(0,j) = j\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eM\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# deletion: ignoring one char of previous string\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# (cost)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# insertion: insertion into string before using rest of j\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# (cost)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# keep same if char is same or substitute current\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# cache\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emin\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003chr\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbacktracing/","tags":null,"title":"backtracing"},{"categories":null,"contents":"Bag of Words is an order-free representation of a corpus. Specifically, each word has a count which we assign to each word without any other information about ordering, etc.\nWith the Bayes Theorem, we have:\n\\begin{equation} C_{MAP} = \\arg\\max_{c \\in C} P(d|c)P( c) \\end{equation}\nwhere \\(d\\) is the document, and \\(c\\) is the class.\nSo, given a document is a set of a bunch of words:\n\\begin{equation} C_{MAP} = \\arg\\max_{c\\in C} P(x_1, \\dots, x_{n}|c)P( c) \\end{equation}\nNaive Bayes for Text Classification Assumptions of Bag of Words for Naive Bayes\n\\(P( c)\\) The right side is just relative frequencies (count of freq divided by count of class).\n\\(P(x_1, \u0026hellip;, x_{n})\\) Naive Bayes assumption (each word\u0026rsquo;s position doesn\u0026rsquo;t matter) Bag of Words assumption (assume position doesn\u0026rsquo;t matter) So we have:\n\\begin{equation} C_{NB} = \\arg\\max_{c\\in C} P(c_{j}) \\prod_{x\\in X} P(x|c) \\end{equation}\nWe eventually use logs to prevent underflow:\n\\begin{equation} C_{NB} = \\arg\\max_{c\\in C}\\log P(c_{j}) +\\sum_{x\\in X} \\log P(x|c) \\end{equation}\nParameter Estimation Because we don\u0026rsquo;t know new words completely decimating probability, we want to establish a Beta Distribution prior which smoothes the outcomes, meaning:\n\\begin{equation} P(w_{k}|c_{j}) = \\frac{n_{k} + \\alpha }{n + \\alpha |V|} \\end{equation}\nwhere \\(n_{k}\\) is the number of occurrences of word \\(k\\) in class \\(C\\), and \\(n\\) is the number of words in total that occurs in class \\(C\\).\nUnknown Words We ignore them. Because knowing a class has lots of unknown words don\u0026rsquo;t help.\nBinary Naive Bayes There is another version, which simply clip all the word count \\(n_{k}\\) to \\(1\\) for both train and test. You do this by de-duplicating the entire corpus by DOCUMENT (i.e. if a word appears twice in the same document, we count it only once).\nBenefits Doesn\u0026rsquo;t have significant fragmentation problems (i.e. many important features clotting up decision) Robust to irrelevant features (which cancel each other out) ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbag_of_words/\"\u003eBag of Words\u003c/a\u003e is an order-free representation of a \u003ca href=\"/posts/kbhcorpus/\"\u003ecorpus\u003c/a\u003e. Specifically, each word has a count which we assign to each word without any other information about ordering, etc.\u003c/p\u003e\n\u003cp\u003eWith the \u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes Theorem\u003c/a\u003e, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC_{MAP} = \\arg\\max_{c \\in C} P(d|c)P( c)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(d\\) is the document, and \\(c\\) is the class.\u003c/p\u003e\n\u003cp\u003eSo, given a document is a set of a bunch of words:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC_{MAP} = \\arg\\max_{c\\in C} P(x_1, \\dots, x_{n}|c)P( c)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"naive-bayes-for-text-classification\"\u003eNaive Bayes for Text Classification\u003c/h2\u003e\n\u003cp\u003eAssumptions of \u003ca href=\"/posts/kbhbag_of_words/\"\u003eBag of Words\u003c/a\u003e for \u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"p--c\"\u003e\\(P( c)\\)\u003c/h3\u003e\n\u003cp\u003eThe right side is just relative frequencies (count of freq divided by count of class).\u003c/p\u003e\n\u003ch3 id=\"p--x-1-dot-dot-dot-x-n\"\u003e\\(P(x_1, \u0026hellip;, x_{n})\\)\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnaive_bayes/#id-6cdf74a2-2451-47d1-8a62-62aa6dff62c6-naive-bayes-assumption\"\u003eNaive Bayes assumption\u003c/a\u003e (each word\u0026rsquo;s position doesn\u0026rsquo;t matter)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbag_of_words/\"\u003eBag of Words\u003c/a\u003e assumption (assume position doesn\u0026rsquo;t matter)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSo we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC_{NB} = \\arg\\max_{c\\in C} P(c_{j}) \\prod_{x\\in X} P(x|c)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe eventually use logs to prevent underflow:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC_{NB} = \\arg\\max_{c\\in C}\\log P(c_{j}) +\\sum_{x\\in X} \\log P(x|c)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"parameter-estimation\"\u003eParameter Estimation\u003c/h3\u003e\n\u003cp\u003eBecause we don\u0026rsquo;t know new words completely decimating probability, we want to establish a \u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e prior which smoothes the outcomes, meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(w_{k}|c_{j}) = \\frac{n_{k} + \\alpha }{n + \\alpha |V|}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(n_{k}\\) is the number of occurrences of word \\(k\\) in class \\(C\\), and \\(n\\) is the number of words in total that occurs in class \\(C\\).\u003c/p\u003e\n\u003ch3 id=\"unknown-words\"\u003eUnknown Words\u003c/h3\u003e\n\u003cp\u003eWe ignore them. Because knowing a class has lots of unknown words don\u0026rsquo;t help.\u003c/p\u003e\n\u003ch3 id=\"binary-naive-bayes\"\u003eBinary Naive Bayes\u003c/h3\u003e\n\u003cp\u003eThere is another version, which simply clip all the word count \\(n_{k}\\) to \\(1\\) for both train and test. You do this by de-duplicating the entire corpus by \u003cstrong\u003eDOCUMENT\u003c/strong\u003e (i.e. if a word appears twice in the same document, we count it only once).\u003c/p\u003e\n\u003ch3 id=\"benefits\"\u003eBenefits\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eDoesn\u0026rsquo;t have significant \u003cstrong\u003efragmentation\u003c/strong\u003e problems (i.e. many important features clotting up decision)\u003c/li\u003e\n\u003cli\u003eRobust to irrelevant features (which cancel each other out)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbag_of_words/","tags":null,"title":"Bag of Words"},{"categories":null,"contents":"DOI: 10.3389/fnagi.2021.635945\nOne-Liner extracted lexicographic and syntactical features from ADReSS Challenge data and trained it on various models, with BERT performing the best.\nNovelty ???????\nSeems like results here are a strict subset of Zhu 2021. Same sets of dataprep of Antonsson 2021 but trained on a BERT now. Seem to do worse than Antonsson 2021 too.\nNotable Methods Essentially Antonsson 2021\nAlso performed MMSE score regression. Key Figs Table 7 training result This figure shows us that the results attained by training on extracted feature is past the state-of-the-art at the time.\nTable 4 These tables tells us the feature extracted\n","html":"\u003cp\u003eDOI: 10.3389/fnagi.2021.635945\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eextracted lexicographic and syntactical features from \u003ca href=\"/posts/kbhadress_challenge/\"\u003eADReSS Challenge\u003c/a\u003e data and trained it on various models, with BERT performing the best.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003e???????\u003c/p\u003e\n\u003cp\u003eSeems like results here are a strict subset of \u003ca href=\"/posts/kbhzhu_2021/\"\u003eZhu 2021\u003c/a\u003e. Same sets of dataprep of \u003ca href=\"/posts/kbhantonsson_2021/\"\u003eAntonsson 2021\u003c/a\u003e but trained on a BERT now. Seem to do worse than \u003ca href=\"/posts/kbhantonsson_2021/\"\u003eAntonsson 2021\u003c/a\u003e too.\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cp\u003eEssentially \u003ca href=\"/posts/kbhantonsson_2021/\"\u003eAntonsson 2021\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eAlso performed \u003ca href=\"/posts/kbhmmse/\"\u003eMMSE\u003c/a\u003e score regression.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"table-7-training-result\"\u003eTable 7 training result\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_11-47-38_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure shows us that the results attained by training on extracted feature is past the state-of-the-art at the time.\u003c/p\u003e\n\u003ch3 id=\"table-4\"\u003eTable 4\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_11-48-55_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_11-48-59_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThese tables tells us the feature extracted\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbalagopalan_2021/","tags":["ntj"],"title":"Balagopalan 2021"},{"categories":null,"contents":"A basis is a list of vectors in \\(V\\) that spans \\(V\\) and is linearly independent\nconstituents a LIST! of vectors in vector space \\(V\\) requirements the list is\u0026hellip; linear independent spans \\(V\\) additional information criteria for basis A list \\(v_1, \\dots v_{n}\\) of vectors in \\(V\\) is a basis of \\(V\\) IFF every \\(v \\in V\\) can be written uniquely as:\n\\begin{equation} v = a_1v_1+ \\dots + a_{n}v_{n} \\end{equation}\nwhere \\(a_1, \\dots, a_{n} \\in \\mathbb{F}\\).\nforward direction Suppose we have \\(v_1, \\dots, v_{n}\\) as the basis in \\(V\\). We desire that \\(v_1, \\dots v_{n}\\) uniquely constructs each \\(v \\in V\\).\nBy definition, they span \\(V\\) and are linear independent in \\(V\\).\nBecause of the spanning quality, there exists at least one set of \\(a_1, \\dots, a_{n} \\in \\mathbb{F}\\) such that we can write:\n\\begin{equation} v \\in V = a_1v_1+ \\dots + a_{n}v_{n} \\end{equation}\nSuppose now that we have another representation of \\(v\\) via scalars \\(c_1, \\dots, c_{n}\\) and our same list of vectors:\n\\begin{equation} v \\in V =^{?} c_1v_1+ \\dots + c_{n}v_{n} \\end{equation}\nSubtracting the two expressions, we have that:\n\\begin{equation} 0 = (a_1-c_1)v_1 + \\dots +(a_{n}-c_{n}) v_{n} \\end{equation}\nBy definition that \\(v_1 \\dots v_{n}\\) is linearly independent, we have that \\(a_j-c_j=0 \\implies a_{j}=c_{j}\\). Therefore, there is only one unique representation for \\(v\\) as a linear combination of vectors \\(v_1, \\dots v_{n}\\).\n(to be honest, we could have just applied that as the definition of linear independence that the scalars in a linear combo of linearly independent list is unique but this is the more careful definition.)\nbackward direction Suppose we have a list \\(v_1, \\dots v_{n}\\) which uniquely constructs each \\(v \\in V\\). We desire that \\(v_1, \\dots v_{n}\\) is a basis in \\(V\\). Given a linear combination thereof can construct all \\(v \\in V\\), we can say that \\(v_1, \\dots v_{n}\\) spans \\(V\\).\nAs \\(V\\) is a vector space, we have \\(0 \\in V\\). Therefore, there exists some scalars \\(a_1, \\dots a_{n}\\) for which:\n\\begin{equation} 0 = a_1v_1 + \\dots +a_{n}v_{n} \\end{equation}\n(as we already established \\(v_1, \\dots, v_{n}\\) spans \\(V\\) and \\(0 \\in V\\))\nOf course, we are given that \\(v_1, \\dots v_{n}\\) uniquely constructs each \\(v \\in V\\). As the trivial solution does exist: that \\(a_1 = \\dots = a_{n} = 0\\), it is the only solution.\nBy definition of linear independence, then, \\(v_1, \\dots v_{n}\\) is linearly independent. Having constructed that \\(v_1, \\dots v_{n}\\) is both a spanning set in \\(V\\) and are linearly independent, we have that they are a basis of \\(V\\). \\(\\blacksquare\\)\nDualing Basis Construction These are two results that says: \u0026ldquo;you can build up a linearly independent list to a basis or you can pluck away a spanning list to a basis\u0026rdquo;.\nall spanning lists contains a basis of which you are spanning Every spanning list in \\(V\\) contains the basis (and possibly some more) in \\(V\\).\nRead: \u0026ldquo;apply Linear Dependence Lemma your way to success\u0026rdquo;.\nBegin with a spanning list \\(v_1, \\dots v_{m}\\) of \\(V\\). We run a for loop for the list.\nStep 0:\nIf \\(v_1=0\\) (i.e. \\(v_1 \\in span(\\{\\})\\)), delete \\(v_1\\). Otherwise, do nothing.\nStep \\(j\\):\nIf \\(v_{j}\\) is in \\(span(v_1, \\dots v_{j-1})\\), \\(v_{j}\\) satisfies the Linear Dependence Lemma\u0026rsquo;s first condition, and therefore naturally satisfies the second condition (removal from list keeps the same span because \\(v_{j}\\) can just be rewritten from \\(v_1, \\dots v_{j-1}\\)).\nSo we remove \\(v_{j}\\) if it is indeed in the span of the previous vectors. By the Linear Dependence Lemma, the new list spans the same space the old list.\nConclusion\nBy the end of this process, no vectors left in the list will satisfy the Linear Dependence Lemma (read: we got rid of all of them.) Therefore, the list is linearly independent. However, every step of the way the Linear Dependence Lemma ensures that the new list spans the same space; therefore, the new list still spans \\(V\\). Having constructed a linearly independent list that spans \\(V\\), we declare the new list as a basis of \\(V\\).\nAs all we did was pluck vectors out of the old list, the new list is a sublist of the old list. This means that the spanning list (old list) contains the new list, which is a basis. \\(\\blacksquare\\)\na linearly independent list expends to a basis Every linearly independent list of vectors in finite-dimensional vector spaces can be extended to a basis.\nRecall first that every finite-dimensional vector space has a basis.\nLet\u0026rsquo;s begin with a linearly independent list in \\(V\\) \\(u_1, \\dots u_{m}\\). Let\u0026rsquo;s recruit also a basis of \\(V\\): \\(w_{1}, \\dots w_{m}\\).\nNaturally: \\(u_1, \\dots u_{m}, w_1, \\dots w_{m}\\) spans \\(V\\) (as the \\(w\\) vectors already span \\(V\\)). We will now apply the fact that all spanning lists contains a basis of which you are spanning (the order of \\(u\\) vectors first and \\(w\\) vectors second ensuring that you try to remove the \\(w\\), and, as \\(u\\) are linearly independent, none of them will be removed) to get back a basis in \\(V\\) consisting of all \\(u\\) and some \\(w\\). \\(\\blacksquare\\)\n","html":"\u003cp\u003eA basis is a list of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es in \\(V\\) that \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\) and is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ea LIST! of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es in \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e \\(V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ethe list is\u0026hellip;\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independent\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"criteria-for-basis--kbhbasis-dot-md\"\u003ecriteria for \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eA list \\(v_1, \\dots v_{n}\\) of vectors in \\(V\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\) \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e every \\(v \\in V\\) can be written uniquely as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = a_1v_1+ \\dots + a_{n}v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(a_1, \\dots, a_{n} \\in \\mathbb{F}\\).\u003c/p\u003e\n\u003ch4 id=\"forward-direction\"\u003eforward direction\u003c/h4\u003e\n\u003cp\u003eSuppose we have \\(v_1, \\dots, v_{n}\\) as the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in \\(V\\). We desire that \\(v_1, \\dots v_{n}\\) uniquely constructs each \\(v \\in V\\).\u003c/p\u003e\n\u003cp\u003eBy definition, they \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e \\(V\\) and are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independent\u003c/a\u003e in \\(V\\).\u003c/p\u003e\n\u003cp\u003eBecause of the \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e quality, there exists \u003cem\u003eat least\u003c/em\u003e one set of \\(a_1, \\dots, a_{n} \\in \\mathbb{F}\\) such that we can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv \\in V = a_1v_1+ \\dots + a_{n}v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSuppose now that we have another representation of \\(v\\) via scalars \\(c_1, \\dots, c_{n}\\) and our same list of vectors:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv \\in V =^{?} c_1v_1+ \\dots + c_{n}v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSubtracting the two expressions, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = (a_1-c_1)v_1 + \\dots +(a_{n}-c_{n}) v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy definition that \\(v_1 \\dots v_{n}\\) is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e, we have that \\(a_j-c_j=0 \\implies a_{j}=c_{j}\\). Therefore, there is only one unique representation for \\(v\\) as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of vectors \\(v_1, \\dots v_{n}\\).\u003c/p\u003e\n\u003cp\u003e(to be honest, we could have just applied that as the definition of \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e that the scalars in a linear combo of linearly independent list is unique but this is the more careful definition.)\u003c/p\u003e\n\u003ch4 id=\"backward-direction\"\u003ebackward direction\u003c/h4\u003e\n\u003cp\u003eSuppose we have a list \\(v_1, \\dots v_{n}\\) which uniquely constructs each \\(v \\in V\\). We desire that \\(v_1, \\dots v_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in \\(V\\). Given a linear combination thereof can construct all \\(v \\in V\\), we can say that \\(v_1, \\dots v_{n}\\) \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\).\u003c/p\u003e\n\u003cp\u003eAs \\(V\\) is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e, we have \\(0 \\in V\\). Therefore, there exists some scalars \\(a_1, \\dots a_{n}\\) for which:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = a_1v_1 + \\dots +a_{n}v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(as we already established \\(v_1, \\dots, v_{n}\\) \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\) and \\(0 \\in V\\))\u003c/p\u003e\n\u003cp\u003eOf course, we are given that \\(v_1, \\dots v_{n}\\) uniquely constructs each \\(v \\in V\\). As the trivial solution \u003cem\u003edoes\u003c/em\u003e exist: that \\(a_1 = \\dots = a_{n} = 0\\), it is the only solution.\u003c/p\u003e\n\u003cp\u003eBy definition of \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e, then, \\(v_1, \\dots v_{n}\\) is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e. Having constructed that \\(v_1, \\dots v_{n}\\) is both a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e set in \\(V\\) and are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e, we have that they are a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\). \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"dualing-basis-construction\"\u003eDualing Basis Construction\u003c/h3\u003e\n\u003cp\u003eThese are two results that says: \u0026ldquo;you can build up a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list to a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e or you can pluck away a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list to a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e\u0026rdquo;.\u003c/p\u003e\n\u003ch4 id=\"all-spanning--kbhspan-dot-md--lists-contains-a-basis--kbhbasis-dot-md--of-which-you-are-spanning--kbhspan-dot-md\"\u003eall \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e lists contains a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of which you are \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eEvery \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list in \\(V\\) contains the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e (and possibly some more) in \\(V\\).\u003c/p\u003e\n\u003cp\u003eRead: \u0026ldquo;apply \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e your way to success\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eBegin with a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list \\(v_1, \\dots v_{m}\\) of \\(V\\). We run a for loop for the list.\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eStep 0:\u003c/p\u003e\n\u003cp\u003eIf \\(v_1=0\\) (i.e. \\(v_1 \\in span(\\{\\})\\)), delete \\(v_1\\). Otherwise, do nothing.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eStep \\(j\\):\u003c/p\u003e\n\u003cp\u003eIf \\(v_{j}\\) is in \\(span(v_1, \\dots v_{j-1})\\), \\(v_{j}\\) satisfies the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e\u0026rsquo;s first condition, and therefore naturally satisfies the second condition (removal from list keeps the same \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e because \\(v_{j}\\) can just be rewritten from \\(v_1, \\dots v_{j-1}\\)).\u003c/p\u003e\n\u003cp\u003eSo we remove \\(v_{j}\\) if it is indeed in the span of the previous vectors. By the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e, the new list \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e the same space the old list.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eConclusion\u003c/p\u003e\n\u003cp\u003eBy the end of this process, no vectors left in the list will satisfy the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e (read: we got rid of all of them.) Therefore, the list is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e. However, every step of the way the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e ensures that the new list \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e the same space; therefore, the new list still \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\). Having constructed a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list that \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\), we declare the new list as a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\).\u003c/p\u003e\n\u003cp\u003eAs all we did was pluck vectors out of the old list, the new list is a sublist of the old list. This means that the \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list (old list) contains the new list, which is a basis. \\(\\blacksquare\\)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"a-linearly-independent--kbhlinear-independence-dot-md--list-expends-to-a-basis--kbhbasis-dot-md\"\u003ea \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list expends to a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eEvery \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es in \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003es can be extended to a basis.\u003c/p\u003e\n\u003cp\u003eRecall first that \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/#every-id-4ed27ed5-4edc-4ef4-afd7-9b8e3bcd9b96-finite-dimensional-vector-space-has-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003eevery finite-dimensional vector space has a basis\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s begin with a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(V\\) \\(u_1, \\dots u_{m}\\). Let\u0026rsquo;s recruit also a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\): \\(w_{1}, \\dots w_{m}\\).\u003c/p\u003e\n\u003cp\u003eNaturally: \\(u_1, \\dots u_{m}, w_1, \\dots w_{m}\\) \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\) (as the \\(w\\) vectors already \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e \\(V\\)). We will now apply the fact that \u003ca href=\"#all-spanning--kbhspan-dot-md--lists-contains-a-basis--kbhbasis-dot-md--of-which-you-are-spanning--kbhspan-dot-md\"\u003eall spanning lists contains a basis of which you are spanning\u003c/a\u003e (the order of \\(u\\) vectors first and \\(w\\) vectors second ensuring that you try to remove the \\(w\\), and, as \\(u\\) are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e, none of them will be removed) to get back a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in \\(V\\) consisting of all \\(u\\) and some \\(w\\). \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbasis/","tags":null,"title":"basis"},{"categories":null,"contents":"Suppose \\(v_1, \\dots v_{n} \\in V\\) is a basis of some vector space \\(V\\); \\(w_1, \\dots w_{n} \\in W\\) is just a good\u0026rsquo;ol list of length \\(n= \\dim V\\) in \\(W\\).\nThere exists a unique linear map \\(T \\in \\mathcal{L}(V,W)\\) such that\u0026hellip;\n\\begin{equation} Tv_{j} = w_{j} \\end{equation}\nfor each \\(j = 1, \\dots n\\)\nIntuition The layperson\u0026rsquo;s explanation of this result: 1) that, for everywhere you want to take the basis of one space, there\u0026rsquo;s always a unique linear map to take you there. 2) that, a linear map is determined uniquely by what it does to the basis of its domain.\nProof We have two vector spaces, \\(V\\) and \\(W\\); \\(v_1, \\dots v_{n} \\in V\\) forms a basis of \\(V\\); \\(w_1, \\dots w_{n} \\in W\\) are just some vectors in \\(W\\).\nDefinition We define some \\(T: V \\to W\\) as follows:\n\\begin{equation} T(c_1v_1 + \\dots + c_{n}v_{n}) = c_1 w_1 + \\dots + c_{n} w_{n} \\end{equation}\nwhere, \\(c_1, \\dots c_{n} \\in \\mathbb{F}\\). Note that the actual values of \\(c\\) doesn\u0026rsquo;t actually matter here.\nExistence We now show that the \\(T\\) defined above has the property of mapping \\(Tv_{j} \\to w_{j}\\).\nAs the basis \\(v_1, \\dots v_{n}\\) is a spanning list of \\(V\\), some \\(T\\) that takes an arbitrary linear combination of \\(v\\) as input does indeed have domain \\(V\\). Due to addition\u0026rsquo;s closure, a linear combination of \\(w\\) is \\(\\in W\\). This makes \\(T\\) at least a function from \\(V \\to W\\).\nOf course, by taking all \\(c_{i}\\) to \\(0\\) except for the index \\(c_{j}\\) you are interested in to \\(1\\), you can show that this \\(T\\) takes \\(v_{j}\\) to \\(w_{j}\\).\nWe now show that \\(T\\) is a Linear Map. This part proof is just route algebra so I won\u0026rsquo;t type it again.\nUniqueness Suppose there is a Linear Map that has the desired property: that \\(T \\in \\mathcal{L}(V,W)\\) and that \\(Tv_{j}=w_{j}, \\forall j=1, \\dots n\\). For any scalar \\(c_{j}\\), the homogeneity of \\(T\\) indicates that this same \\(T\\) has to take \\(T(c_{j}v_{j}) = c_{j}Tv_{j} = c_{j}w_{j}\\).\nNow, the additivity of \\(T\\) also indicates that we can string these \\(c_{j} v_{j}\\) together in the same \\(T\\); that:\ngiven \\(T(c_{j}v_{j}) = c_{j}w_{j}\\), we can just string it all together to get \\(T(c_1v_1 + \\dots + c_{n}v_{n}) = c_1w_1+ \\dots + c_{n}w_{n}\\).\nThis means that there is only one \\(T\\) that behaves in the way that we desire, on the span of \\(v_1 \\dots v_{n}\\). Those vectors being the basis, their span is just the domain \\(V\\). This makes \\(T\\) uniquely determined on \\(V\\) as we were able to construct the original given map simply by following the rules of the Linear Map.\n","html":"\u003cp\u003eSuppose \\(v_1, \\dots v_{n} \\in V\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of some \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e \\(V\\); \\(w_1, \\dots w_{n} \\in W\\) is just a good\u0026rsquo;ol list of length \\(n= \\dim V\\) in \\(W\\).\u003c/p\u003e\n\u003cp\u003eThere exists a unique linear map \\(T \\in \\mathcal{L}(V,W)\\) such that\u0026hellip;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv_{j} = w_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor each \\(j = 1, \\dots n\\)\u003c/p\u003e\n\u003ch2 id=\"intuition\"\u003eIntuition\u003c/h2\u003e\n\u003cp\u003eThe layperson\u0026rsquo;s explanation of this result: 1) that, for everywhere you want to take the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of one space, there\u0026rsquo;s always a unique linear map to take you there. 2) that, a linear map is determined uniquely by what it does to the \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of its domain\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"proof\"\u003eProof\u003c/h2\u003e\n\u003cp\u003eWe have two vector spaces, \\(V\\) and \\(W\\); \\(v_1, \\dots v_{n} \\in V\\) forms a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\); \\(w_1, \\dots w_{n} \\in W\\) are just some \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es in \\(W\\).\u003c/p\u003e\n\u003ch3 id=\"definition\"\u003eDefinition\u003c/h3\u003e\n\u003cp\u003eWe define some \\(T: V \\to W\\) as follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(c_1v_1 + \\dots + c_{n}v_{n}) = c_1 w_1 + \\dots + c_{n} w_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(c_1, \\dots c_{n} \\in \\mathbb{F}\\). Note that the actual \u003cem\u003evalues\u003c/em\u003e of \\(c\\) doesn\u0026rsquo;t actually matter here.\u003c/p\u003e\n\u003ch3 id=\"existence\"\u003eExistence\u003c/h3\u003e\n\u003cp\u003eWe now show that the \\(T\\) defined above has the property of mapping \\(Tv_{j} \\to w_{j}\\).\u003c/p\u003e\n\u003cp\u003eAs the basis \\(v_1, \\dots v_{n}\\) is a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list of \\(V\\), some \\(T\\) that takes an arbitrary \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \\(v\\) as input does indeed have domain \\(V\\). Due to \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e\u0026rsquo;s closure, a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \\(w\\) is \\(\\in W\\). This makes \\(T\\) at least a function from \\(V \\to W\\).\u003c/p\u003e\n\u003cp\u003eOf course, by taking all \\(c_{i}\\) to \\(0\\) except for the index \\(c_{j}\\) you are interested in to \\(1\\), you can show that this \\(T\\) takes \\(v_{j}\\) to \\(w_{j}\\).\u003c/p\u003e\n\u003cp\u003eWe now show that \\(T\\) is a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e. This part proof is just route algebra so I won\u0026rsquo;t type it again.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-11-02_22-26-02_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"uniqueness\"\u003eUniqueness\u003c/h3\u003e\n\u003cp\u003eSuppose there is a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e that has the desired property: that \\(T \\in \\mathcal{L}(V,W)\\) and that \\(Tv_{j}=w_{j}, \\forall j=1, \\dots n\\). For any scalar \\(c_{j}\\), the homogeneity of \\(T\\) indicates that this same \\(T\\) has to take \\(T(c_{j}v_{j}) = c_{j}Tv_{j} = c_{j}w_{j}\\).\u003c/p\u003e\n\u003cp\u003eNow, the additivity of \\(T\\) also indicates that we can string these \\(c_{j} v_{j}\\) together in the same \\(T\\); that:\u003c/p\u003e\n\u003cp\u003egiven \\(T(c_{j}v_{j}) = c_{j}w_{j}\\), we can just string it all together to get \\(T(c_1v_1 + \\dots + c_{n}v_{n}) = c_1w_1+ \\dots + c_{n}w_{n}\\).\u003c/p\u003e\n\u003cp\u003eThis means that there is only one \\(T\\) that behaves in the way that we desire, on the span of \\(v_1 \\dots v_{n}\\). Those vectors being the basis, their span is just the domain \\(V\\). This makes \\(T\\) uniquely determined on \\(V\\) as we were able to construct the original given map simply by following the rules of the \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbasis_of_domain/","tags":null,"title":"basis of domain"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhbatchalign/","tags":null,"title":"batchalign"},{"categories":null,"contents":"Things to include Rev How to handle interspersed results Utterance segmentation Why --prealigned and the overall performance of MFA Beginning/End Bullet and why we throw away Rev\u0026rsquo;s output fixbullets and manual utterance segmentation \u0026amp;*INV= interspersed comments ","html":"\u003ch2 id=\"things-to-include\"\u003eThings to include\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eRev\u003c/li\u003e\n\u003cli\u003eHow to handle interspersed results\u003c/li\u003e\n\u003cli\u003eUtterance segmentation\u003c/li\u003e\n\u003cli\u003eWhy \u003ccode\u003e--prealigned\u003c/code\u003e and the overall performance of MFA\u003c/li\u003e\n\u003cli\u003eBeginning/End Bullet and why we throw away Rev\u0026rsquo;s output\u003c/li\u003e\n\u003cli\u003efixbullets and manual utterance segmentation\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e\u0026amp;*INV=\u003c/code\u003e interspersed comments\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbatchalign_paper_outline/","tags":null,"title":"Batchalign Paper Outline"},{"categories":null,"contents":"For some Baysian Network situation, you will note that there\u0026rsquo;s some bodge of values below:\n\\begin{equation} P(A|M) = \\frac{P(M|A)P(A)}{P(M)} \\end{equation}\nif we are only interested in a function in terms of different values of \\(a\\), \\(P(M)\\) is not that interesting. Therefore, we can just calculate \\(A\\) for all \\(a\\), and then normalize it to sum to 1:\n\\begin{equation} P(A|M) \\propto P(M|A)P(A) \\end{equation}\nand then, after calculating each \\(P(M|A)P(A)\\) , we just ensure that each thing sums to one.\n","html":"\u003cp\u003eFor some \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e situation, you will note that there\u0026rsquo;s some bodge of values below:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(A|M) = \\frac{P(M|A)P(A)}{P(M)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif we are only interested in a function in terms of different values of \\(a\\), \\(P(M)\\) is not that interesting. Therefore, we can just calculate \\(A\\) for all \\(a\\), and then normalize it to sum to 1:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(A|M) \\propto P(M|A)P(A)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand then, after calculating each \\(P(M|A)P(A)\\) , we just ensure that each thing sums to one.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbayes_normalization_constant/","tags":null,"title":"Bayes Normalization Constant"},{"categories":null,"contents":"\\begin{align} p(x\\mid y) = \\frac{p(y \\mid x) p(x)}{p(y)} \\end{align}\nthis is a direct result of the probability chain rule.\nTypically, we name \\(p(y|x)\\) the \u0026ldquo;likelihood\u0026rdquo;, \\(p(x)\\) the \u0026ldquo;prior\u0026rdquo;.\nBetter normalization What if you don\u0026rsquo;t fully know \\(p(y)\\), say it was parameterized over \\(x\\)?\n\\begin{align} p(x|y) \u0026amp;= \\frac{p(y|x) \\cdot p(x)}{p(y)} \\\\ \u0026amp;= \\frac{p(y|x) \\cdot p(x)}{\\sum_{X_{i}} p(y|X_{i})} \\end{align}\njust apply law of total probability! taad\n","html":"\u003cp\u003e\\begin{align}\np(x\\mid y) = \\frac{p(y \\mid x) p(x)}{p(y)}\n\\end{align}\u003c/p\u003e\n\u003cp\u003ethis is a direct result of the \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003eprobability chain rule\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eTypically, we name \\(p(y|x)\\) the \u0026ldquo;likelihood\u0026rdquo;, \\(p(x)\\) the \u0026ldquo;prior\u0026rdquo;.\u003c/p\u003e\n\u003ch2 id=\"better-normalization\"\u003eBetter normalization\u003c/h2\u003e\n\u003cp\u003eWhat if you don\u0026rsquo;t fully know \\(p(y)\\), say it was parameterized over \\(x\\)?\u003c/p\u003e\n\u003cp\u003e\\begin{align}\np(x|y) \u0026amp;= \\frac{p(y|x) \\cdot p(x)}{p(y)} \\\\\n\u0026amp;= \\frac{p(y|x) \\cdot p(x)}{\\sum_{X_{i}} p(y|X_{i})}\n\\end{align}\u003c/p\u003e\n\u003cp\u003ejust apply \u003ca href=\"/posts/kbhprobability/#law-of-total-probability\"\u003elaw of total probability\u003c/a\u003e! taad\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbayes_theorem/","tags":null,"title":"Bayes Theorem"},{"categories":null,"contents":"\\begin{equation} P(B=b | D=d) = P(D=d|B=b) P(B=b) k \\end{equation}\nwhere, \\(P(B=b | D=d)\\) is your \u0026ldquo;posterior\u0026rdquo;; \\(P(D=d|B=b)\\) is your likelyhood; and \\(P(B=b)\\) is your prior.\n","html":"\u003cp\u003e\\begin{equation}\nP(B=b | D=d) = P(D=d|B=b) P(B=b) k\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(P(B=b | D=d)\\) is your \u0026ldquo;posterior\u0026rdquo;; \\(P(D=d|B=b)\\) is your likelyhood; and \\(P(B=b)\\) is your prior.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbayes_theorem_over_random_variable/","tags":null,"title":"Bayes Theorem Over Random Variable"},{"categories":null,"contents":"A Baysian Network is composed of:\na directed, acyclic graph a set of conditional probabilities acting as factors. You generally want arrows to go in the direction of causality.\nVia the chain rule of Bayes nets, we can write this equivalently as:\n\\begin{equation} (P(B) \\cdot P(S)) \\cdot P(E \\mid B,S) \\cdot P(D \\mid E) \\cdot P(C \\mid E) \\end{equation}\ngenerally, for \\(n\\) different variables,\n\\begin{equation} \\prod_{i=1}^{n} p(X_{i} \\mid pa(x_{i})) \\end{equation}\nwhere, \\(pa(x_{i})\\) are the parent values of \\(x_{i}\\).\nconditional independence \\(X\\) and \\(Y\\) are conditionally independent given \\(Z\\) IFF:\n\\begin{equation} P(X, Y|Z) = P(X|Z) \\cdot P(Y|Z) \\end{equation}\n(\u0026ldquo;two variables are conditionally independent if they exhibit independence conditioned on \\(Z\\)\u0026rdquo;)\nthis is equivalent to saying:\n\\begin{equation} P(X|Z) = P(X|Y,Z) \\end{equation}\n(\u0026ldquo;two variables are conditionally independent if the inclusion of the evidence of another set into the condition doesn\u0026rsquo;t influence the outcome if they are both conditioned on \\(Z\\)\u0026rdquo;)\nWe write:\n\\begin{equation} X \\perp Y \\mid Z \\end{equation}\nThe network above has an important property: conditions \\(B\\) and \\(S\\) are independent; and conditions \\(D\\) and \\(C\\) are independent. Though they all depended on \\(E\\), each pair is conditionally independent.\nchecking for conditional independence \\((A \\perp B \\mid C)\\) IFF ALL undirected paths from \\(A\\) to \\(B\\) on a Baysian Network exhibits d seperation, whose conditions are below:\nA path is d-seperated by \\(C\\), the set of evidence if ANY of the following:\nthe path contains a chain of nodes: \\(X \\to Y \\to Z\\) where \\(Y \\in C\\) the path contains a fork: \\(X \\leftarrow C \\to Z\\), where \\(Y \\in C\\) the path contains a inverted fork: \\(X \\to Y \\leftarrow Z\\), where \\(Y\\) is not in \\(C\\) and no descendent of \\(Y\\) is in \\(C\\). Note that \\(C\\) can be empty. This is why, \\(B,S\\) is conditionally independent on nothing on that graph above, so they are just actually independent.\nIf the structure does not imply conditional independence, it does NOT mean that the structure is conditionally dependent. It could still be conditionally independent. end{equation}\nmarkov blanket the markov blanket of node \\(X\\) is the minimal set of nodes on a Baysian Network which renders \\(X\\) conditionally independent from all other nodes not in the blanket.\nIt includes, at most:\nnode\u0026rsquo;s parenst node\u0026rsquo;s chlidren other parents of node\u0026rsquo;s children Check that you need all of these values: frequently, you don\u0026rsquo;t\u0026mdash;simply selecting a subset of this often d-seperates the node from everyone else.\nparameter learning in Baysian Network Let:\n\\(x_{1:n}\\) be variables \\(o_1, \u0026hellip;, o_{m}\\) be the \\(m\\) observations we took \\(G\\) is the graph \\(r_{i}\\) is the number of instantiations in \\(X_{i}\\) (for boolean variables, this would be \\(2\\)) \\(q_{i}\\) is the number of parental instantiations of \\(X_{i}\\) (if parent 1 can take on 10 values, parent 2 can take 3 values, then child\u0026rsquo;s \\(q_{i}=10\\cdot 3=30\\)) \u0026mdash; if a node has no parents it has a \\(q_{i}\\) is \\(1\\) \\(\\pi_{i,j}\\) is \\(j\\) instantiation of parents of \\(x_{i}\\) (the \\(j\\) th combinator) What we want to learn from the graph, is:\n\\begin{equation} P(x_{i}=k | \\pi_{i,j}) = \\theta_{i,j,k} \\end{equation}\n\u0026ldquo;what\u0026rsquo;s the probability that \\(x_{i}\\) takes on value \\(k\\), given the state of \\(x_{i}\\)\u0026rsquo;s parents are \\(\\pi_{i,j}\\) right now?\u0026rdquo;\nLet us first make some observations. We use \\(m_{i,j,k}\\) to denote the COUNT of the number of times \\(x_{i}\\) took a value \\(k\\) when \\(x_{i}\\) parents took instantiation \\(j\\). This is usually represented programmatically as a set of matrices:\nTo learn the parameter as desired, we use:\n\\begin{equation} MLE\\ \\hat{\\theta}_{i,j,k} = \\frac{m_{i,j,k}}{\\sum_{k\u0026rsquo;} m_{i,j,k\u0026rsquo;}} \\end{equation}\nIn that: we want to sum up all possible value \\(x_{i}\\) takes on, and check how many times it takes on a certain value, given the conditions are the same.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e is composed of:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ea directed, acyclic graph\u003c/li\u003e\n\u003cli\u003ea set of \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probabilities\u003c/a\u003e acting as \u003ca href=\"/posts/kbhfactor/\"\u003efactor\u003c/a\u003es.\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eYou generally want arrows to go in the direction of causality.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-09-28_10-20-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eVia the chain rule of Bayes nets, we can write this equivalently as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(P(B) \\cdot P(S)) \\cdot P(E \\mid B,S) \\cdot P(D \\mid E) \\cdot P(C \\mid E)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egenerally, for \\(n\\) different variables,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\prod_{i=1}^{n} p(X_{i} \\mid pa(x_{i}))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(pa(x_{i})\\) are the parent values of \\(x_{i}\\).\u003c/p\u003e\n\u003ch2 id=\"conditional-independence\"\u003econditional independence\u003c/h2\u003e\n\u003cp\u003e\\(X\\) and \\(Y\\) are \u003ca href=\"#conditional-independence\"\u003econditionally independent\u003c/a\u003e given \\(Z\\) IFF:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X, Y|Z) = P(X|Z) \\cdot P(Y|Z)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(\u0026ldquo;two variables are \u003ca href=\"#conditional-independence\"\u003econditionally independent\u003c/a\u003e if they exhibit \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependence\u003c/a\u003e conditioned on \\(Z\\)\u0026rdquo;)\u003c/p\u003e\n\u003cp\u003ethis is equivalent to saying:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X|Z) = P(X|Y,Z)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(\u0026ldquo;two variables are \u003ca href=\"#conditional-independence\"\u003econditionally independent\u003c/a\u003e if the inclusion of the evidence of another set into the condition doesn\u0026rsquo;t influence the outcome if they are both conditioned on \\(Z\\)\u0026rdquo;)\u003c/p\u003e\n\u003cp\u003eWe write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX \\perp Y \\mid Z\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe network above has an important property: conditions \\(B\\) and \\(S\\) are independent; and conditions \\(D\\) and \\(C\\) are independent. Though they all depended on \\(E\\), each pair is \u003ca href=\"#conditional-independence\"\u003econditionally independent\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"checking-for-conditional-independence\"\u003echecking for conditional independence\u003c/h3\u003e\n\u003cp\u003e\\((A \\perp B \\mid C)\\) IFF ALL undirected paths from \\(A\\) to \\(B\\) on a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e exhibits \u003ca href=\"#checking-for-conditional-independence\"\u003ed seperation\u003c/a\u003e, whose conditions are below:\u003c/p\u003e\n\u003cp\u003eA path is d-seperated by \\(C\\), the set of evidence if ANY of the following:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ethe path contains a chain of nodes: \\(X \\to Y \\to Z\\) where \\(Y \\in C\\)\u003c/li\u003e\n\u003cli\u003ethe path contains a fork: \\(X \\leftarrow C \\to Z\\), where \\(Y \\in C\\)\u003c/li\u003e\n\u003cli\u003ethe path contains a \u003ca href=\"#checking-for-conditional-independence\"\u003einverted fork\u003c/a\u003e: \\(X \\to Y \\leftarrow Z\\), where \\(Y\\) is \u003cstrong\u003enot\u003c/strong\u003e in \\(C\\) and no descendent of \\(Y\\) is in \\(C\\).\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eNote that \\(C\\) can be empty. This is why, \\(B,S\\) is \u003ca href=\"#conditional-independence\"\u003econditionally independent\u003c/a\u003e on \u003cstrong\u003enothing\u003c/strong\u003e on that graph above, so they are just actually independent.\u003c/p\u003e\n\u003cp\u003eIf the structure does not imply \u003ca href=\"#conditional-independence\"\u003econditional independence\u003c/a\u003e, it does \u003cstrong\u003eNOT\u003c/strong\u003e mean that the structure is conditionally dependent. It could still be \u003ca href=\"#conditional-independence\"\u003econditionally independent\u003c/a\u003e.\nend{equation}\u003c/p\u003e\n\u003ch4 id=\"markov-blanket\"\u003emarkov blanket\u003c/h4\u003e\n\u003cp\u003ethe \u003ca href=\"#markov-blanket\"\u003emarkov blanket\u003c/a\u003e of node \\(X\\) is the minimal set of nodes on a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e which renders \\(X\\) \u003ca href=\"#conditional-independence\"\u003econditionally independent\u003c/a\u003e from all other nodes not in the blanket.\u003c/p\u003e\n\u003cp\u003eIt includes, at most:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003enode\u0026rsquo;s parenst\u003c/li\u003e\n\u003cli\u003enode\u0026rsquo;s chlidren\u003c/li\u003e\n\u003cli\u003eother parents of node\u0026rsquo;s children\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eCheck that you need all of these values: frequently, you don\u0026rsquo;t\u0026mdash;simply selecting a subset of this often d-seperates the node from everyone else.\u003c/p\u003e\n\u003ch2 id=\"parameter-learning--kbhparameter-learning-dot-md--in-baysian-network--kbhbaysian-network-dot-md\"\u003e\u003ca href=\"/posts/kbhparameter_learning/\"\u003eparameter learning\u003c/a\u003e in \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eLet:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(x_{1:n}\\) be variables\u003c/li\u003e\n\u003cli\u003e\\(o_1, \u0026hellip;, o_{m}\\) be the \\(m\\) observations we took\u003c/li\u003e\n\u003cli\u003e\\(G\\) is the graph\u003c/li\u003e\n\u003cli\u003e\\(r_{i}\\) is the number of instantiations in \\(X_{i}\\) (for boolean variables, this would be \\(2\\))\u003c/li\u003e\n\u003cli\u003e\\(q_{i}\\) is the number of parental instantiations of \\(X_{i}\\) (if parent 1 can take on 10 values, parent 2 can take 3 values, then child\u0026rsquo;s \\(q_{i}=10\\cdot 3=30\\)) \u0026mdash; if a node has no parents it has a \\(q_{i}\\) is \\(1\\)\u003c/li\u003e\n\u003cli\u003e\\(\\pi_{i,j}\\) is \\(j\\) instantiation of parents of \\(x_{i}\\) (the \\(j\\) th combinator)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWhat we want to learn from the graph, is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(x_{i}=k | \\pi_{i,j}) = \\theta_{i,j,k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;what\u0026rsquo;s the probability that \\(x_{i}\\) takes on value \\(k\\), given the state of \\(x_{i}\\)\u0026rsquo;s parents are \\(\\pi_{i,j}\\) right now?\u0026rdquo;\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eLet us first make some observations. We use \\(m_{i,j,k}\\) to denote the COUNT of the number of times \\(x_{i}\\) took a value \\(k\\) when \\(x_{i}\\) parents took instantiation \\(j\\). This is usually represented programmatically as a set of matrices:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-10_09-47-04_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eTo learn the parameter as desired, we use:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nMLE\\ \\hat{\\theta}_{i,j,k} = \\frac{m_{i,j,k}}{\\sum_{k\u0026rsquo;} m_{i,j,k\u0026rsquo;}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn that: we want to sum up all possible value \\(x_{i}\\) takes on, and check how many times it takes on a certain value, given the conditions are the same.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbaysian_network/","tags":null,"title":"Baysian Network"},{"categories":null,"contents":"Representing conditional dependencies.\nSemiparametric Expert Bayes Net We use a Semiparametric Expert Bayes Net to learn the structure of the dynamics\u0026hellip;. of medicine somewhere?\nAtienza et al. 2022\nlearns semiparemetic relations in expert basian networks uses gaussian rocesses for modeling no-linear performances + horseshoe regularization 2401.16419\nResults UCI Liver Disorder Dataste\nwhat is the oracle graph? what specific dynamics did the model learn? ","html":"\u003cp\u003eRepresenting conditional dependencies.\u003c/p\u003e\n\u003ch2 id=\"semiparametric-expert-bayes-net\"\u003eSemiparametric Expert Bayes Net\u003c/h2\u003e\n\u003cp\u003eWe use a \u003ca href=\"#semiparametric-expert-bayes-net\"\u003eSemiparametric Expert Bayes Net\u003c/a\u003e to learn the structure of the dynamics\u0026hellip;. of medicine somewhere?\u003c/p\u003e\n\u003cp\u003eAtienza et al. 2022\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003elearns semiparemetic relations in expert basian networks\u003c/li\u003e\n\u003cli\u003euses gaussian rocesses for modeling no-linear performances + horseshoe regularization\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e2401.16419\u003c/p\u003e\n\u003ch2 id=\"results\"\u003eResults\u003c/h2\u003e\n\u003cp\u003eUCI Liver Disorder Dataste\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ewhat is the oracle graph?\u003c/li\u003e\n\u003cli\u003ewhat specific dynamics did the model learn?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbaysian_networks_for_healthcare/","tags":null,"title":"Baysian Networks for Healthcare"},{"categories":null,"contents":"We treat this as an inference problem in Naive Bayes: observations are independent from each other.\nInstead of trying to compute a \\(\\theta\\) that works for Maximum Likelihood Parameter Learning, what we instead do is try to understand what \\(\\theta\\) can be in terms of a distribution.\nThat is, we want to get some:\n\u0026ldquo;for each value of \\(\\theta\\), what\u0026rsquo;s the chance that that is the actual value\u0026rdquo;\nTo do this, we desire:\n\\begin{equation} p(\\theta | D) \\end{equation}\n\u0026ldquo;what\u0026rsquo;s the probability of theta being at a certain value given the observations we had.\u0026rdquo;\nAnd to obtain the actual the actual value, we calculate the expectation of this distribution:\n\\begin{equation} \\hat{\\theta} = \\mathbb{E}[\\theta] = \\int \\theta p(\\theta | D) \\dd{\\theta} \\end{equation}\nIf its not possible to obtain such an expected value, we then calculate just the mode of the distribution (like where the peak probability of \\(\\theta\\) is) by:\n\\begin{equation} \\hat{\\theta} = \\arg\\max_{\\theta} p(\\theta | D) \\end{equation}\nBayesian Parameter Learning on Binary Distributions We are working in a Naive Bayes environment, where we assume that \\(o_{1:m}\\) are conditionally independent. Then, we essentially consider each class as carrying some parameter \\(\\theta\\) which contains the possibility of that class happening.\nUsing the same steps as inference with Naive Bayes and some algebra:\n\\begin{equation} p(\\theta | o_{1:m}) \\propto p(\\theta, o_{1:m}) \\end{equation}\nNow, we would like to normalize this function for \\(\\theta \\in [0,1]\\), so, we get:\n\\begin{equation} \\int_{0}^{1} \\theta^{n}(1-\\theta)^{m-n}\\dd{\\theta} = \\frac{\\Gamma(n+1) \\Gamma(m-n+1)}{\\Gamma(m+2)} \\end{equation}\nwhere, \\(\\Gamma\\) is a real valued factorial generalization, and this entire integral is often called the \u0026ldquo;Beta Function\u0026rdquo;\nNormalizing the output, we have that:\n\\begin{align} p(\\theta | o_{1:m}) \u0026amp;\\propto p(\\theta, o_{1:m}) \\\\ \u0026amp;= \\frac{\\Gamma(m+2)}{\\Gamma(n+1) \\Gamma(m-n+1)} \\theta^{n} (1-\\theta)^{m-n} \\\\ \u0026amp;= Beta(\\theta | n+1, m-n +1) \\end{align}\nwhere \\(m\\) is the sample size and \\(n\\) is the number of events in the sample space.\nBeta Distribution Suppose you had a non-uniform prior:\nPrior: \\(Beta(\\alpha, \\beta)\\) Observe: \\(m_1\\) positive outcomes, \\(m_2\\) negative outcomes Posterior: \\(Beta(\\alpha+m_1, \\beta+m_2)\\) That is: for binary outcomes, the beta distribution can be updated without doing any math.\nFor instance, say we had:\n\\begin{equation} \\theta_{t} = Beta(\\alpha, \\beta) \\end{equation}\nand we observed that \\(o_{i} = 1\\), then:\n\\begin{equation} \\theta_{t+1} = Beta(\\alpha+1, \\beta) \\end{equation}\ninstead, if we observed that \\(o_{i} = 0\\), then:\n\\begin{equation} \\theta_{t+1} = Beta(\\alpha, \\beta+1) \\end{equation}\nEssentially: MAGNITUDE of beta distribution governs how small the spread is (higher magnitude smaller spread), and the balance between the two values represents how much skew there is.\nBeta is a special distribution which takes parameters \\(\\alpha, \\beta\\),and has mean:\n\\begin{equation} \\frac{\\alpha}{\\alpha + \\beta} \\end{equation}\nand variance:\n\\begin{equation} \\frac{ab}{(a+b)^{2}(a+b+1)} \\end{equation}\nand has mode:\n\\begin{equation} \\frac{\\alpha -1 }{\\alpha + \\beta -2} \\end{equation}\nwhen \\(\\alpha \u0026gt; 1\\) and \\(\\beta \u0026gt; 1\\).\nThis means that, at \\(beta(1,1)\\), we have a inform distribution\nLaplace Smoothing Laplace Smoothing is a prior where:\n\\begin{equation} prior\\ X \\sim Beta(2,2) \\end{equation}\nso you just add \\(2\\) to each of our output pseudo counts.\nsee also Laplace prior, where you use Laplace Smoothing for your prior\nTotal Probability in beta distributions Recall, for total probability, beta is a special distribution which takes parameters \\(\\alpha, \\beta\\),and has expectation:\n\\begin{equation} \\frac{\\alpha}{\\alpha + \\beta} \\end{equation}\nand has mode:\n\\begin{equation} \\frac{\\alpha -1 }{\\alpha + \\beta -2} \\end{equation}\nChoosing a prior do it with only the problem and no knowledge of the data uniform typically works well, but if you have any reason why it won\u0026rsquo;t be uniform (say coin flip), you should count accordingly such as making the distribution more normal with \\(Beta(1,1)\\) Dirichlet Distribution We can generalize the Bayesian Parameter Learning on Binary Distributions with the Dirichlet Distribution.\nFor \\(n\\) parameters \\(\\theta_{1:n}\\) (\\(n-1\\) of which independent, because we know that \\(\\sum \\theta_{i} = 1\\)), where \\(\\theta_{j}\\) is the probability that the \\(j\\) th case of the categorical distribution happening.\nNow:\n\\begin{equation} Dir(\\theta_{1:n} | \\alpha) = \\frac{\\Gamma(\\alpha_{0})}{\\prod_{i=1}^{n} \\Gamma(\\alpha_{i})} \\prod_{i=1}^{n} \\theta_{i}^{\\alpha_{i}-1} \\end{equation}\nwhereby:\n\\begin{equation} \\alpha_{j} = prior + count \\end{equation}\nfor \\(j \\geq 1\\), and\n\\begin{equation} \\alpha_{0} = prior + total_{}count \\end{equation}\nwhereby prior is your initial distribution. If its uniform, then all prior equals one.\nThe expectation for each \\(\\theta_{i}\\) happening is:\n\\begin{equation} \\mathbb{E}[\\theta_{i}] = \\frac{a_{i}}{\\sum_{j=1}^{n} \\alpha_{j}} \\end{equation}\nand, with \\(a_{i} \u0026gt; 1\\), the $i$th mode is:\n\\begin{equation} \\frac{a_{i}-1 }{\\sum_{j=1}^{n} a_{j}-n} \\end{equation}\nexpectation of a distribution For Beta Distribution and Dirichlet Distribution, the expectation of their distribution is simply their mean.\nif you say want to know what the probability of \\(P(thing|D)\\), you can integrate over all \\(P(thing|\\theta)\\):\n\\begin{equation} \\int^{1}_{0} P(thing|\\theta)P(\\theta)d\\theta \\end{equation}\nThe first thing is just the actual value of \\(\\theta\\) (because \\(\\theta\\) is literally the probability of \\(thing\\) happening). The second thing is the probability of that \\(\\theta\\) actually happening.\nThis, of course, just add up to the expected value of \\(\\theta\\), which is given above:\n\\begin{equation} \\frac{\\alpha}{\\alpha + \\beta} \\end{equation}\n","html":"\u003cp\u003eWe treat this as an inference problem in \u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e: \u003cstrong\u003eobservations are independent from each other\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eInstead of trying to compute a \\(\\theta\\) that works for \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e, what we instead do is try to understand what \\(\\theta\\) can be in terms of a distribution.\u003c/p\u003e\n\u003cp\u003eThat is, we want to get some:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-05_10-22-12_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\u0026ldquo;for each value of \\(\\theta\\), what\u0026rsquo;s the chance that that is the actual value\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eTo do this, we desire:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(\\theta | D)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;what\u0026rsquo;s the probability of theta being at a certain value given the observations we had.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eAnd to obtain the actual the actual value, we calculate the \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e of this distribution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{\\theta} = \\mathbb{E}[\\theta] = \\int \\theta p(\\theta | D) \\dd{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf its not possible to obtain such an expected value, we then calculate just the mode of the distribution (like where the peak probability of \\(\\theta\\) is) by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{\\theta} = \\arg\\max_{\\theta} p(\\theta | D)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"bayesian-parameter-learning-on-binary-distributions\"\u003eBayesian Parameter Learning on Binary Distributions\u003c/h2\u003e\n\u003cp\u003eWe are working in a \u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e environment, where we assume that \\(o_{1:m}\\) are \u003ca href=\"/posts/kbhbaysian_network/#conditional-independence\"\u003econditionally independent\u003c/a\u003e. Then, we essentially consider each class as carrying some parameter \\(\\theta\\) which contains the possibility of that class happening.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-05_14-55-41_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eUsing the same steps as \u003ca href=\"/posts/kbhnaive_bayes/#id-76165699-9f9a-4b7e-a081-c8462cece2ee-inference-with-id-6cdf74a2-2451-47d1-8a62-62aa6dff62c6-naive-bayes\"\u003einference with Naive Bayes\u003c/a\u003e and some algebra:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(\\theta | o_{1:m}) \\propto p(\\theta, o_{1:m})\n\\end{equation}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-05_17-20-53_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eNow, we would like to normalize this function for \\(\\theta \\in [0,1]\\), so, we get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{0}^{1} \\theta^{n}(1-\\theta)^{m-n}\\dd{\\theta} = \\frac{\\Gamma(n+1) \\Gamma(m-n+1)}{\\Gamma(m+2)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\Gamma\\) is a real valued factorial generalization, and this entire integral is often called the \u0026ldquo;\u003ca href=\"/posts/kbhbaysian_parameter_learning/\"\u003eBeta Function\u003c/a\u003e\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eNormalizing the output, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\np(\\theta | o_{1:m}) \u0026amp;\\propto p(\\theta, o_{1:m}) \\\\\n\u0026amp;= \\frac{\\Gamma(m+2)}{\\Gamma(n+1) \\Gamma(m-n+1)} \\theta^{n} (1-\\theta)^{m-n} \\\\\n\u0026amp;= Beta(\\theta | n+1, m-n +1)\n\\end{align}\u003c/p\u003e\n\u003cp\u003ewhere \\(m\\) is the sample size and \\(n\\) is the number of events in the sample space.\u003c/p\u003e\n\u003ch3 id=\"beta-distribution\"\u003eBeta Distribution\u003c/h3\u003e\n\u003cp\u003eSuppose you had a non-uniform prior:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ePrior: \\(Beta(\\alpha, \\beta)\\)\u003c/li\u003e\n\u003cli\u003eObserve: \\(m_1\\) positive outcomes, \\(m_2\\) negative outcomes\u003c/li\u003e\n\u003cli\u003ePosterior: \\(Beta(\\alpha+m_1, \\beta+m_2)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThat is: for binary outcomes, the beta distribution can be updated without doing any math.\u003c/p\u003e\n\u003cp\u003eFor instance, say we had:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{t} = Beta(\\alpha, \\beta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand we observed that \\(o_{i} = 1\\), then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{t+1} = Beta(\\alpha+1, \\beta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003einstead, if we observed that \\(o_{i} = 0\\), then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{t+1} = Beta(\\alpha, \\beta+1)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eEssentially: MAGNITUDE of beta distribution governs how small the spread is (higher magnitude smaller spread), and the balance between the two values represents how much skew there is.\u003c/p\u003e\n\u003cp\u003eBeta is a special distribution which takes parameters \\(\\alpha, \\beta\\),and has mean:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\alpha}{\\alpha + \\beta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand variance:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{ab}{(a+b)^{2}(a+b+1)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand has mode:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\alpha -1 }{\\alpha + \\beta -2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhen \\(\\alpha \u0026gt; 1\\) and \\(\\beta \u0026gt; 1\\).\u003c/p\u003e\n\u003cp\u003eThis means that, at \\(beta(1,1)\\), we have a inform distribution\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-05_21-32-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch4 id=\"laplace-smoothing\"\u003eLaplace Smoothing\u003c/h4\u003e\n\u003cp\u003e\u003ca href=\"#laplace-smoothing\"\u003eLaplace Smoothing\u003c/a\u003e is a prior where:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nprior\\ X \\sim Beta(2,2)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso you just add \\(2\\) to each of our output pseudo counts.\u003c/p\u003e\n\u003cp\u003esee also \u003ca href=\"/posts/kbhmaximum_a_posteriori_estimate/#map-for-bernoulli-and-binomial-p\"\u003eLaplace prior\u003c/a\u003e, where you use \u003ca href=\"#laplace-smoothing\"\u003eLaplace Smoothing\u003c/a\u003e for your prior\u003c/p\u003e\n\u003ch3 id=\"total-probability-in-beta-distributions\"\u003eTotal Probability in beta distributions\u003c/h3\u003e\n\u003cp\u003eRecall, for total probability, beta is a special distribution which takes parameters \\(\\alpha, \\beta\\),and has \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\alpha}{\\alpha + \\beta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand has mode:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\alpha -1 }{\\alpha + \\beta -2}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"choosing-a-prior\"\u003eChoosing a prior\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003edo it with only the problem and no knowledge of the data\u003c/li\u003e\n\u003cli\u003euniform typically works well, but if you have any reason why it won\u0026rsquo;t be uniform (say coin flip), you should count accordingly such as making the distribution more normal with \\(Beta(1,1)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"dirichlet-distribution\"\u003eDirichlet Distribution\u003c/h2\u003e\n\u003cp\u003eWe can generalize the \u003ca href=\"#bayesian-parameter-learning-on-binary-distributions\"\u003eBayesian Parameter Learning on Binary Distributions\u003c/a\u003e with the \u003ca href=\"#dirichlet-distribution\"\u003eDirichlet Distribution\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eFor \\(n\\) parameters \\(\\theta_{1:n}\\) (\\(n-1\\) of which independent, because we know that \\(\\sum \\theta_{i} = 1\\)), where \\(\\theta_{j}\\) is the probability that the \\(j\\) th case of the categorical distribution happening.\u003c/p\u003e\n\u003cp\u003eNow:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nDir(\\theta_{1:n} | \\alpha) = \\frac{\\Gamma(\\alpha_{0})}{\\prod_{i=1}^{n} \\Gamma(\\alpha_{i})} \\prod_{i=1}^{n} \\theta_{i}^{\\alpha_{i}-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha_{j} = prior + count\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor \\(j \\geq 1\\), and\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha_{0} = prior + total_{}count\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby prior is your initial distribution. If its uniform, then all prior equals one.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e for each \\(\\theta_{i}\\) happening is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{E}[\\theta_{i}] = \\frac{a_{i}}{\\sum_{j=1}^{n} \\alpha_{j}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand, with \\(a_{i} \u0026gt; 1\\), the $i$th mode is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{a_{i}-1 }{\\sum_{j=1}^{n} a_{j}-n}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"expectation--kbhexpectation-dot-md--of-a-distribution\"\u003e\u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e of a distribution\u003c/h2\u003e\n\u003cp\u003eFor \u003ca href=\"#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e and \u003ca href=\"#dirichlet-distribution\"\u003eDirichlet Distribution\u003c/a\u003e, the \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e of their distribution is simply their mean.\u003c/p\u003e\n\u003cp\u003eif you say want to know what the probability of \\(P(thing|D)\\), you can integrate over all \\(P(thing|\\theta)\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int^{1}_{0} P(thing|\\theta)P(\\theta)d\\theta\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe first thing is just the actual value of \\(\\theta\\) (because \\(\\theta\\) is literally the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of \\(thing\\) happening). The second thing is the probability of that \\(\\theta\\) actually happening.\u003c/p\u003e\n\u003cp\u003eThis, of course, just add up to the expected value of \\(\\theta\\), which is given above:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\alpha}{\\alpha + \\beta}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbaysian_parameter_learning/","tags":null,"title":"Baysian Parameter Learning"},{"categories":null,"contents":"belief is a probability distribution over your states.\n\u0026ldquo;an informational state decoupled from motivational states\u0026rdquo;\n\\begin{equation} b \\leftarrow update(b,a,o) \\end{equation}\nThere are two main flavours of how to represent beliefs\nparametric: belief distribution is fully represented over all states by a set of parameters (categorical, gaussian, etc.) non-parametric: belief is represented by a non-weighted list of possible locations of where you are; such as a Particle Filter To update parametric beliefs, we can use a discrete state filter (for categorical belief distributions) or a Kalman Filter (for linear Gaussian). To update non-parametric beliefs, we can use a Particle Filter.\nIf we have an parametric belief that\u0026rsquo;s not categorical nor linear Gaussian, we can use Extended Kalman Filter or Unscented Kalman Filter to approximate a belief update.\nbelief update To update belief, we need to initialize it somehow. If you have no knowledge of the situation, you want to diffuse your initial distributions because you don\u0026rsquo;t want to be overconfident For non-parametric situations, this may cause logistical problems; so, you may need to make many observations before you can be confident enough to seed a belief observation model \\(O(o|a,s\u0026rsquo;)\\) is a model for what observations we may get if we are in a particular state/action.\nerror model there is some model which is a probability distribution over the state given observation:\nlet orange \\(d\\) be state, the green would be the error model\nfilters filters are how beliefs are updated from observation. \u0026ldquo;we want to perform localization\u0026rdquo;\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e is a \u003ca href=\"/posts/kbhprobability_distributions/\"\u003eprobability distribution\u003c/a\u003e over your states.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;an informational state decoupled from motivational states\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb \\leftarrow update(b,a,o)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThere are two main flavours of how to represent beliefs\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eparametric\u003c/strong\u003e: belief distribution is fully represented over all states by a set of parameters (categorical, \u003ca href=\"/posts/kbhgaussian_distribution/\"\u003egaussian\u003c/a\u003e, etc.)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003enon-parametric\u003c/strong\u003e: belief is represented by a non-weighted list of possible locations of where you are; such as a \u003ca href=\"/posts/kbhfilters/#particle-filter\"\u003eParticle Filter\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eTo update \u003cstrong\u003eparametric\u003c/strong\u003e beliefs, we can use a \u003ca href=\"/posts/kbhfilters/#discrete-state-filter\"\u003ediscrete state filter\u003c/a\u003e (for categorical belief distributions) or a \u003ca href=\"/posts/kbhfilters/#kalman-filter\"\u003eKalman Filter\u003c/a\u003e (for linear Gaussian). To update \u003cstrong\u003enon-parametric\u003c/strong\u003e beliefs, we can use a \u003ca href=\"/posts/kbhfilters/#particle-filter\"\u003eParticle Filter\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIf we have an \u003cstrong\u003eparametric\u003c/strong\u003e belief that\u0026rsquo;s not categorical nor linear Gaussian, we can use \u003ca href=\"/posts/kbhfilters/#extended-kalman-filter--kbhfilters-dot-md\"\u003eExtended Kalman Filter\u003c/a\u003e or \u003ca href=\"/posts/kbhfilters/#unscented-id-6800e7a8-729c-4654-adcc-e0f877079b6a-kalman-filter\"\u003eUnscented Kalman Filter\u003c/a\u003e to approximate a belief update.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"belief-update\"\u003ebelief update\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eTo \u003ca href=\"#belief-update\"\u003eupdate belief\u003c/a\u003e, we need to initialize it somehow.\n\u003cul\u003e\n\u003cli\u003eIf you have no knowledge of the situation, you want to \u003cstrong\u003ediffuse\u003c/strong\u003e your initial distributions because you don\u0026rsquo;t want to be overconfident\u003c/li\u003e\n\u003cli\u003eFor non-parametric situations, this may cause logistical problems; so, you may need to make many observations before you can be confident enough to seed a belief\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"observation-model\"\u003eobservation model\u003c/h2\u003e\n\u003cp\u003e\\(O(o|a,s\u0026rsquo;)\\) is a model for what observations we may get if we are in a particular state/action.\u003c/p\u003e\n\u003ch3 id=\"error-model\"\u003eerror model\u003c/h3\u003e\n\u003cp\u003ethere is some model which is a probability distribution over the state given observation:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-09_10-01-10_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003elet orange \\(d\\) be state, the green would be the \u003ca href=\"#error-model\"\u003eerror model\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"filters--kbhfilters-dot-md\"\u003e\u003ca href=\"/posts/kbhfilters/\"\u003efilters\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhfilters/\"\u003efilters\u003c/a\u003e are how \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003es are updated from observation. \u0026ldquo;we want to perform localization\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbelief/","tags":null,"title":"belief"},{"categories":null,"contents":"Motivation Imperfect sensors in robot control: partial observations Manipulators face tradeoff between sensing + acting curse of dimensionality and curse of history.\nBelief-Space Planning Perhaps we should plan over all possible distributions of state space, making a belief-state MDP.\nBut: this is a nonlinear, stochastic dynamic. In fact: there maybe stochastic events that affects dynamics.\nBig problem:\ndim(belief) \u0026gt;\u0026gt; dim(state) dim(belief) \u0026gt;\u0026gt; dim(action) Belief iLQR \u0026ldquo;determinize and replan\u0026rdquo;: simplify the dynamics at each step, plan, take action, and replan\ntracks belief via observations simplifies belief state dynamics based on linear MLE When the dynamics is linear, you can use Linear-Quadratic Regulator to solve. This results in a worse policy but will give you a policy.\nPrevious Work \u0026ldquo;just solve most-likely state\u0026rdquo;: doesn\u0026rsquo;t take action to explore and understand the state. \u0026ldquo;belief roadmap\u0026rdquo;: not really planning in the belief space itself Approach Belief Update We use Baysian updates for the state probably updates:\n\\begin{equation} P(s_{t+1}) = \\eta P(o_{t+1}|s_{t+1}) \\int_{x} p(_{t+1}|x, a_{t}) P(s) \\end{equation}\nand then the actual beliefs are updated with Extended Kalman Filter.\nImportantly, the Extended Kalman Filter usually requires us to take an expectation of each observation O over all O; instead, we assume that the future states are uniform linearly distributed.\nBelief Update Cost Ideally, we want to lower covariance of the belief vectors in order to be more confident.\nfirst term: reduce large trajectories (verify) second: stabilization Replanning Strategy while b not at goal: # replan at where we are at now (b, a, mean_b) = create_initial_plan(b); for depth d: a_t = solve_lqr_for_plan_at_time(b, a, mean_b) o = environment.step(a_t) b = extended_kalman(b, a, o) if mean(b) \u0026gt; max_allowed_belief_uncertainty: break ","html":"\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eImperfect sensors in robot control: \u003cstrong\u003epartial observations\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eManipulators face tradeoff between \u003cstrong\u003esensing\u003c/strong\u003e + \u003cstrong\u003eacting\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcurse_of_dimensionality/\"\u003ecurse of dimensionality\u003c/a\u003e and curse of history.\u003c/p\u003e\n\u003ch2 id=\"belief-space-planning\"\u003eBelief-Space Planning\u003c/h2\u003e\n\u003cp\u003ePerhaps we should plan over all possible distributions of state space, making a \u003ca href=\"/posts/kbhbelief_state_mdp/\"\u003ebelief-state MDP\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eBut: this is a \u003cstrong\u003enonlinear\u003c/strong\u003e, \u003cstrong\u003estochastic\u003c/strong\u003e dynamic. In fact: there maybe stochastic events that affects dynamics.\u003c/p\u003e\n\u003cp\u003eBig problem:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003edim(\u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e) \u0026gt;\u0026gt; dim(\u003ca href=\"\"\u003estate\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003edim(\u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e) \u0026gt;\u0026gt; dim(\u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction\u003c/a\u003e)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"belief-ilqr--kbhilqr-dot-md\"\u003e\u003ca href=\"/posts/kbhilqr/\"\u003eBelief iLQR\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;determinize and replan\u0026rdquo;: simplify the dynamics at each step, plan, take action, and replan\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003etracks belief via observations\u003c/li\u003e\n\u003cli\u003esimplifies belief state dynamics based on linear \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWhen the dynamics is linear, you can use \u003ca href=\"/posts/kbhlinear_quadratic_regulator/\"\u003eLinear-Quadratic Regulator\u003c/a\u003e to solve. This results in a worse policy but will give you a policy.\u003c/p\u003e\n\u003ch3 id=\"previous-work\"\u003ePrevious Work\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;just solve most-likely state\u0026rdquo;: doesn\u0026rsquo;t take action to explore and understand the state.\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;belief roadmap\u0026rdquo;: not really planning in the belief space itself\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"approach\"\u003eApproach\u003c/h3\u003e\n\u003ch4 id=\"belief-update\"\u003eBelief Update\u003c/h4\u003e\n\u003cp\u003eWe use Baysian updates for the state probably updates:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(s_{t+1}) = \\eta P(o_{t+1}|s_{t+1}) \\int_{x} p(_{t+1}|x, a_{t}) P(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand then the actual beliefs are updated with \u003ca href=\"/posts/kbhfilters/#extended\"\u003eExtended Kalman Filter\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eImportantly, the \u003ca href=\"/posts/kbhfilters/#extended\"\u003eExtended Kalman Filter\u003c/a\u003e usually requires us to take an expectation of each observation O over all O; instead, we assume that the future states are uniform linearly distributed.\u003c/p\u003e\n\u003ch4 id=\"belief-update-cost\"\u003eBelief Update Cost\u003c/h4\u003e\n\u003cp\u003eIdeally, we want to lower \u003ca href=\"/posts/kbhcovariance/\"\u003ecovariance\u003c/a\u003e of the \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e vectors in order to be more confident.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-20_09-32-34_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003col\u003e\n\u003cli\u003efirst term: reduce large trajectories (verify)\u003c/li\u003e\n\u003cli\u003esecond: stabilization\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"replanning-strategy\"\u003eReplanning Strategy\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-20_09-35-47_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003enot\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eat\u003c/span\u003e \u003cspan style=\"color:#111\"\u003egoal\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# replan at where we are at now\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emean_b\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecreate_initial_plan\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edepth\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ea_t\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esolve_lqr_for_plan_at_time\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emean_b\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eo\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eenvironment\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea_t\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eextended_kalman\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eo\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emax_allowed_belief_uncertainty\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ebreak\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhilqr/","tags":null,"title":"Belief iLQR"},{"categories":null,"contents":"Our belief can be represented as vectors as the probability of us being in each state. If we have that, we can just use our belief vector as our state vector. Now use MDP any solving you\u0026rsquo;d like, keeping in mind that the reward is just the expected reward:\n\\begin{equation} \\mathbb{E}[R(b,a)] = \\sum_{s} R(s,a) b(s) \\end{equation}\nwe can estimate our transition between belief-states like so:\n\\begin{align} T(b\u0026rsquo;|b,a) \u0026amp;= P(b\u0026rsquo;|b,a) \\\\ \u0026amp;= \\sum_{o}^{} P(b\u0026rsquo;|b,a,o) P(o|b,a) \\\\ \u0026amp;= \\sum_{o}^{} P(b\u0026rsquo; = Update(b,a,o)) \\sum_{s\u0026rsquo;}^{}O(o|a,s\u0026rsquo;) \\sum_{s}^{}T(s\u0026rsquo;|s,a)b(s) \\end{align}\n\u0026ldquo;the probability of the next belief being \\(b\u0026rsquo;\\) is equal to how probable it is to get state b\u0026rsquo; from conditions b,a,o, times the probability of getting that particular observation.\u0026rdquo;.\nHowever, this expression is quite unwheldy if your state-space is large. Hence, we turn to a technique like conditional plans which foregos considering individual states altogether.\n","html":"\u003cp\u003eOur \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e can be represented as \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es as the probability of us being in each state. If we have that, we can just use our \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e vector as our state vector. Now use \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003e any solving you\u0026rsquo;d like, keeping in mind that the reward is just the expected reward:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{E}[R(b,a)] = \\sum_{s} R(s,a) b(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can estimate our transition between belief-states like so:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nT(b\u0026rsquo;|b,a) \u0026amp;= P(b\u0026rsquo;|b,a) \\\\\n\u0026amp;= \\sum_{o}^{} P(b\u0026rsquo;|b,a,o) P(o|b,a) \\\\\n\u0026amp;= \\sum_{o}^{} P(b\u0026rsquo; = Update(b,a,o)) \\sum_{s\u0026rsquo;}^{}O(o|a,s\u0026rsquo;) \\sum_{s}^{}T(s\u0026rsquo;|s,a)b(s)\n\\end{align}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the probability of the next belief being \\(b\u0026rsquo;\\) is equal to how probable it is to get state b\u0026rsquo; from conditions b,a,o, times the probability of getting that particular observation.\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eHowever, this expression is quite unwheldy if your state-space is large. Hence, we turn to a technique like \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003es which foregos considering individual states altogether.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbelief_state_mdp/","tags":null,"title":"belief-state MDP"},{"categories":null,"contents":"Bending is what happens when you apply a transverse load to an object and it goes wooosh.\nThat\u0026rsquo;s cool. Now how does it work? see Euler-Bernoulli Theory\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbending/\"\u003eBending\u003c/a\u003e is what happens when you apply a \u003ca href=\"/posts/kbhtransverse_loaod/\"\u003etransverse load\u003c/a\u003e to an object and it goes wooosh.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-05_22-22-09_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThat\u0026rsquo;s cool. Now how does it work? see \u003ca href=\"/posts/kbheuler_bernoulli_theory/\"\u003eEuler-Bernoulli Theory\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbending/","tags":null,"title":"bending"},{"categories":null,"contents":"Consider a case where there\u0026rsquo;s only a single binary outcome:\n\u0026ldquo;success\u0026rdquo;, with probability \\(p\\) \u0026ldquo;failure\u0026rdquo;, with probability \\(1-p\\) constituents \\begin{equation} X \\sim Bern(p) \\end{equation}\nrequirements the probability mass function:\n\\begin{equation} P(X=k) = \\begin{cases} p,\\ if\\ k=1\\\\ 1-p,\\ if\\ k=0\\\\ \\end{cases} \\end{equation}\nThis is sadly not Differentiable, which is sad for Maximum Likelihood Parameter Learning. Therefore, we write:\n\\begin{equation} P(X=k) = p^{k} (1-p)^{1-k} \\end{equation}\nWhich emulates the behavior of your function at \\(0\\) and \\(1\\) and we kinda don\u0026rsquo;t care any other place.\nWe can use it\nadditional information properties of Bernoulli distribution expected value: \\(p\\) variance: \\(p(1-p)\\) Bernoulli as indicator If there\u0026rsquo;s a series of event whose probability you are given, you can use a Bernoulli to model each one and add/subtract\nMLE for Bernouli \\begin{equation} p_{MLE} = \\frac{m}{n} \\end{equation}\n\\(m\\) is the number of events\n","html":"\u003cp\u003eConsider a case where there\u0026rsquo;s only a single \u003ca href=\"/posts/kbhbinary_number_system/#base-2\"\u003ebinary\u003c/a\u003e outcome:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;success\u0026rdquo;, with probability \\(p\\)\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;failure\u0026rdquo;, with probability \\(1-p\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nX \\sim Bern(p)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003ethe \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003eprobability mass function\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X=k) =\n\\begin{cases}\np,\\ if\\ k=1\\\\\n1-p,\\ if\\ k=0\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is sadly not \u003ca href=\"/posts/kbhuniqueness_and_existance/#differentiable\"\u003eDifferentiable\u003c/a\u003e, which is sad for \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e. Therefore, we write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X=k) = p^{k} (1-p)^{1-k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich emulates the behavior of your function at \\(0\\) and \\(1\\) and we kinda don\u0026rsquo;t care any other place.\u003c/p\u003e\n\u003cp\u003eWe can use it\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"properties-of-bernoulli-distribution--kbhbernoulli-random-variable-dot-md\"\u003eproperties of \u003ca href=\"/posts/kbhbernoulli_random_variable/\"\u003eBernoulli distribution\u003c/a\u003e\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhexpectation/\"\u003eexpected value\u003c/a\u003e\u003c/strong\u003e: \\(p\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhvariance/\"\u003evariance\u003c/a\u003e\u003c/strong\u003e: \\(p(1-p)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"bernoulli--kbhbernoulli-random-variable-dot-md--as-indicator\"\u003e\u003ca href=\"/posts/kbhbernoulli_random_variable/\"\u003eBernoulli\u003c/a\u003e as indicator\u003c/h3\u003e\n\u003cp\u003eIf there\u0026rsquo;s a series of event whose probability you are given, you can use a \u003ca href=\"/posts/kbhbernoulli_random_variable/\"\u003eBernoulli\u003c/a\u003e to model each one and add/subtract\u003c/p\u003e\n\u003ch3 id=\"mle-for-bernouli\"\u003eMLE for Bernouli\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\np_{MLE} = \\frac{m}{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(m\\) is the number of events\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbernoulli_random_variable/","tags":null,"title":"Bernoulli distribution"},{"categories":null,"contents":"\\begin{equation} x^{2}y\u0026rsquo;\u0026rsquo; + xy\u0026rsquo; + (x^{2}-n^{2})y = 0 \\end{equation}\nthis function is very useful, they have no well defined elementary result.\n","html":"\u003cp\u003e\\begin{equation}\nx^{2}y\u0026rsquo;\u0026rsquo; + xy\u0026rsquo; + (x^{2}-n^{2})y = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis function is very useful, they have no well defined elementary result.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbessel_s_equation/","tags":null,"title":"Bessel's Equation"},{"categories":null,"contents":"best-action worst-state is a lower bound for alpha vectors:\n\\begin{equation} r_{baws} = \\max_{a} \\sum_{k=1}^{\\infty} \\gamma^{k-1} \\min_{s}R(s,a) \\end{equation}\nThe alpha vector corresponding to this system would be the same \\(r_{baws}\\) at each slot.\nwhich should give us the highest possible reward possible given we always pick the most optimal actions while being stuck in the worst state\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhworst_possible_state/\"\u003ebest-action worst-state\u003c/a\u003e is a lower bound for \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr_{baws} = \\max_{a} \\sum_{k=1}^{\\infty} \\gamma^{k-1} \\min_{s}R(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e corresponding to this system would be the same \\(r_{baws}\\) at each slot.\u003c/p\u003e\n\u003cp\u003ewhich should give us the highest possible reward possible given we always pick the most optimal actions while being stuck in the worst state\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhworst_possible_state/","tags":null,"title":"best-action worst-state"},{"categories":null,"contents":"Background recall AlphaZero\nSelection (UCB 1, or DTW, etc.) Expansion (generate possible belief notes) Simulation (if its a brand new node, Rollout, etc.) Backpropegation (backpropegate your values up) Key Idea Remove the need for heuristics for MCTS\u0026mdash;removing inductive bias\nApproach We keep the ol\u0026rsquo; neural network:\n\\begin{equation} f_{\\theta}(b_{t}) = (p_{t}, v_{t}) \\end{equation}\nPolicy Evaluation Do \\(n\\) episodes of MCTS, then use cross entropy to improve \\(f\\)\nGround truth policy Action Selection Uses Double Progressive Widening\nImportantly, no need to use a heuristic (or worst yet random Rollouts) for action selection.\nDifference vs. LetsDrive LetsDrive uses DESPOT BetaZero uses MCTS with belief states. ","html":"\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003cp\u003erecall \u003ca href=\"/posts/kbhlm_alignment/#alphazero\"\u003eAlphaZero\u003c/a\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eSelection (\u003ca href=\"/posts/kbhdirected_exploration/#ucb-1\"\u003eUCB 1\u003c/a\u003e, or \u003ca href=\"/posts/kbhdouble_progressive_widening/\"\u003eDTW\u003c/a\u003e, etc.)\u003c/li\u003e\n\u003cli\u003eExpansion (generate possible belief notes)\u003c/li\u003e\n\u003cli\u003eSimulation (if its a brand new node, \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003eRollout\u003c/a\u003e, etc.)\u003c/li\u003e\n\u003cli\u003eBackpropegation (backpropegate your values up)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"key-idea\"\u003eKey Idea\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eRemove the need for heuristics for \u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003eMCTS\u003c/a\u003e\u0026mdash;removing inductive bias\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"approach\"\u003eApproach\u003c/h2\u003e\n\u003cp\u003eWe keep the ol\u0026rsquo; neural network:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf_{\\theta}(b_{t}) = (p_{t}, v_{t})\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"policy-evaluation\"\u003ePolicy Evaluation\u003c/h3\u003e\n\u003cp\u003eDo \\(n\\) episodes of \u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003eMCTS\u003c/a\u003e, then use cross entropy to improve \\(f\\)\u003c/p\u003e\n\u003ch3 id=\"ground-truth-policy\"\u003eGround truth policy\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-15_10-05-04_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"action-selection\"\u003eAction Selection\u003c/h3\u003e\n\u003cp\u003eUses \u003ca href=\"/posts/kbhdouble_progressive_widening/\"\u003eDouble Progressive Widening\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eImportantly, \u003cstrong\u003eno need to use a heuristic (or worst yet random \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003eRollout\u003c/a\u003es) for action selection\u003c/strong\u003e.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-15_10-11-45_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"difference-vs-dot-letsdrive--kbhletsdrive-dot-md\"\u003eDifference vs. \u003ca href=\"/posts/kbhletsdrive/\"\u003eLetsDrive\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhletsdrive/\"\u003eLetsDrive\u003c/a\u003e uses \u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbetazero/\"\u003eBetaZero\u003c/a\u003e uses \u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003eMCTS\u003c/a\u003e with \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e states.\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbetazero/","tags":null,"title":"BetaZero"},{"categories":null,"contents":"Big Data is a term for datasets large enough that traditional data processing applications are inadequate. i.e. when non-parallel processing is inadequate.\nThat is: \u0026ldquo;Big Data\u0026rdquo; is when Pandas and SQL is inadequate. To handle big data, its very difficult to sequentially go through and process stuff. To make it work, you usually have to perform parallel processing under the hood.\nRules of Thumb of Datasets 1000 Genomes (AWS, 260TB) CommonCraw - the entire web (On PSC! 300-800 TB) GDELT - https://www.gdeltproject.org/ a dataset that contains everything that\u0026rsquo;s happening in the world right now in terms of news (small!! 2.5 TB per year; however, there is a LOT of fields: 250 Million fields) Evolution of Big Data Good Ol\u0026rsquo; SQL schemas are too set in stone (\u0026ldquo;not a fit for Agile development\u0026rdquo; \u0026mdash; a research scientist) SQL sharding, when working correctly, is KV Stores And this is why we gave up and made Redis (or Amazon DynamoDB, Riak, Memcached) which keeps only Key/Value information. We just make the key really really complicated to support structures: GET cart:joe:15~4...\nBut the problem with key-value stores isn\u0026rsquo;t good at indexing at all: if we want like to get all of Joe\u0026rsquo;s cart, you can\u0026rsquo;t just GET cart:joe because you can\u0026rsquo;t compare partial hashes.\nDocument Stores [And something something mongo\u0026rsquo;s document stores but something its bad about those too but CMU can\u0026rsquo;t do tech and the speakers died]\nWide Column Stores Google BigTable type thing\nJust have a wide column of arbitrary width with no schema:\nCart Joe 15~4 Cart Robert 15~3 More Things! Chicken 15~2 Etc. No idea how you query this but google does it and CMU\u0026rsquo;s speakers died again good on them.\nGraphs! Neo4j: don\u0026rsquo;t store triples, and have better schemes for encoding. You can then use nice graph query schemes.\nHard to visualize VERY hard to serialize Queries are hard And so: And sooooo. Introducing Spark. We don\u0026rsquo;t want to do parallel programming, we want to use traditional databases, so we just make someone else do it on an adapter and just query boring databases with lots of parallel connections.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbig_data/\"\u003eBig Data\u003c/a\u003e is a term for datasets large enough that traditional data processing applications are inadequate. i.e. when non-parallel processing is inadequate.\u003c/p\u003e\n\u003cp\u003eThat is: \u0026ldquo;\u003ca href=\"/posts/kbhbig_data/\"\u003eBig Data\u003c/a\u003e\u0026rdquo; is when Pandas and SQL is inadequate. To handle big data, its very difficult to sequentially go through and process stuff. To make it work, you usually have to perform parallel processing under the hood.\u003c/p\u003e\n\u003ch2 id=\"rules-of-thumb-of-datasets\"\u003eRules of Thumb of Datasets\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e1000 Genomes (AWS, 260TB)\u003c/li\u003e\n\u003cli\u003eCommonCraw - the entire web (On PSC! 300-800 TB)\u003c/li\u003e\n\u003cli\u003eGDELT - \u003ca href=\"https://www.gdeltproject.org/\"\u003ehttps://www.gdeltproject.org/\u003c/a\u003e a dataset that contains everything that\u0026rsquo;s happening in the world right now in terms of news (small!! 2.5 TB per year; however, there is a LOT of fields: 250 Million fields)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"evolution-of-big-data\"\u003eEvolution of Big Data\u003c/h2\u003e\n\u003ch3 id=\"good-ol-sql\"\u003eGood Ol\u0026rsquo; SQL\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eschemas are too set in stone (\u0026ldquo;not a fit for \u003ca href=\"/posts/kbhsoftware_development_methodologies/#agile\"\u003eAgile\u003c/a\u003e development\u0026rdquo; \u0026mdash; a research scientist)\u003c/li\u003e\n\u003cli\u003eSQL sharding, when working correctly, is\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"kv-stores\"\u003eKV Stores\u003c/h3\u003e\n\u003cp\u003eAnd this is why we gave up and made Redis (or Amazon DynamoDB, Riak, Memcached) which keeps only Key/Value information. We just make the key really really complicated to support structures: \u003ccode\u003eGET cart:joe:15~4...\u003c/code\u003e\u003c/p\u003e\n\u003cp\u003eBut the problem with key-value stores isn\u0026rsquo;t good at indexing at all: if we want like to get all of Joe\u0026rsquo;s cart, you can\u0026rsquo;t just \u003ccode\u003eGET cart:joe\u003c/code\u003e because you can\u0026rsquo;t compare partial hashes.\u003c/p\u003e\n\u003ch3 id=\"document-stores\"\u003eDocument Stores\u003c/h3\u003e\n\u003cp\u003e[And something something mongo\u0026rsquo;s document stores but something its bad about those too but CMU can\u0026rsquo;t do tech and the speakers died]\u003c/p\u003e\n\u003ch3 id=\"wide-column-stores\"\u003eWide Column Stores\u003c/h3\u003e\n\u003cp\u003eGoogle BigTable type thing\u003c/p\u003e\n\u003cp\u003eJust have a wide column of arbitrary width with no schema:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eCart\u003c/th\u003e\n\u003cth\u003eJoe\u003c/th\u003e\n\u003cth\u003e15~4\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eCart\u003c/td\u003e\n\u003ctd\u003eRobert\u003c/td\u003e\n\u003ctd\u003e15~3\u003c/td\u003e\n\u003ctd\u003eMore Things!\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003ctd\u003eChicken\u003c/td\u003e\n\u003ctd\u003e15~2\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eEtc. No idea how you query this but google does it and CMU\u0026rsquo;s speakers died again good on them.\u003c/p\u003e\n\u003ch3 id=\"graphs\"\u003eGraphs!\u003c/h3\u003e\n\u003cp\u003eNeo4j: don\u0026rsquo;t store triples, and have better schemes for encoding. You can then use nice graph query schemes.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eHard to visualize\u003c/li\u003e\n\u003cli\u003eVERY hard to serialize\u003c/li\u003e\n\u003cli\u003eQueries are hard\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"and-so\"\u003eAnd so:\u003c/h3\u003e\n\u003cp\u003eAnd sooooo. Introducing \u003ca href=\"/posts/kbhspark/\"\u003eSpark\u003c/a\u003e. We don\u0026rsquo;t want to do parallel programming, we want to use traditional databases, so we just make someone else do it on an adapter and just query boring databases with lots of parallel connections.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbig_data/","tags":null,"title":"Big Data"},{"categories":null,"contents":"A binary operation means that you are taking two things in and you are getting one thing out; for instance:\n\\begin{equation} f: (\\mathbb{F},\\mathbb{F}) \\to \\mathbb{F} \\end{equation}\nThis is also closed, but binary operations dons\u0026rsquo;t have to be.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhbinary_operation/\"\u003ebinary operation\u003c/a\u003e means that you are taking two things in and you are getting one thing out; for instance:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf: (\\mathbb{F},\\mathbb{F}) \\to \\mathbb{F}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is also \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e, but \u003ca href=\"/posts/kbhbinary_operation/\"\u003ebinary operation\u003c/a\u003es dons\u0026rsquo;t have to be.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbinary_operation/","tags":null,"title":"binary operation"},{"categories":null,"contents":"A binomial distribution is a typo of distribution whose contents are:\nBinary Independent Fixed number Same probability: \u0026ldquo;That means: WITH REPLACEMENT\u0026rdquo; Think: \u0026ldquo;what\u0026rsquo;s the probability of \\(n\\) coin flips getting \\(k\\) heads given the head\u0026rsquo;s probability is \\(p\\)\u0026rdquo;.\nconstituents We write:\n\\begin{equation} X \\sim Bin(n,p) \\end{equation}\nwhere, \\(n\\) is the number of trials, \\(p\\) is the probability of success on each trial.\nrequirements Here is the probability mass function:\n\\begin{equation} P(X=k) = {n \\choose k} p^{k}(1-p)^{n-k} \\end{equation}\nadditional information properties of binomial distribution expected value: \\(np\\) variance: \\(np(1-p)\\) deriving the expectation The expectation of the binomial distribution is derivable from the fact:\n\\begin{equation} X = \\sum_{i=1}^{n} Y_{i} \\end{equation}\nwhere,\n\\begin{equation} \\begin{cases} X \\sim Bin(n,p) \\\\ Y_{i} \\sim Bern(p) \\end{cases} \\end{equation}\nNow, recall that expected value is linear.\nTherefore, we can write that:\napproximating binomial normal distribution approximation: \\(n \u0026gt; 20\\), variance large \\((np(1-p)) \u0026gt; 10\\), absolute independence; beware of continuity correction poisson distribution approximation: \\(n \u0026gt; 20\\), p small \\(p \u0026lt; 0.05\\) adding binomial distribution For \\(X\\) and \\(Y\\) independent binomial distributions, with equivalent probability:\n\\begin{equation} X \\sim Bin(a, p), Y \\sim Bin(b, p) \\end{equation}\nThen:\n\\begin{equation} X+Y \\sim Bin(a+b, p) \\end{equation}\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e is a typo of distribution whose contents are:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eBinary\u003c/li\u003e\n\u003cli\u003eIndependent\u003c/li\u003e\n\u003cli\u003eFixed number\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eSame probability\u003c/strong\u003e: \u0026ldquo;That means: WITH REPLACEMENT\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThink: \u0026ldquo;what\u0026rsquo;s the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of \\(n\\) coin flips getting \\(k\\) heads given the head\u0026rsquo;s probability is \\(p\\)\u0026rdquo;.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003eWe write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX \\sim Bin(n,p)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(n\\) is the number of trials, \\(p\\) is the probability of success on each trial.\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eHere is the \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003eprobability mass function\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X=k) = {n \\choose k} p^{k}(1-p)^{n-k}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"properties-of-binomial-distribution--kbhbinomial-distribution-dot-md\"\u003eproperties of \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhexpectation/\"\u003eexpected value\u003c/a\u003e\u003c/strong\u003e: \\(np\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhvariance/\"\u003evariance\u003c/a\u003e\u003c/strong\u003e: \\(np(1-p)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"deriving-the-expectation--kbhexpectation-dot-md\"\u003ederiving the \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eThe expectation of the \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e is derivable from the fact:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX = \\sum_{i=1}^{n} Y_{i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nX \\sim Bin(n,p) \\\\\nY_{i} \\sim Bern(p)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, recall that \u003ca href=\"/posts/kbhexpectation/\"\u003eexpected value\u003c/a\u003e is linear.\u003c/p\u003e\n\u003cp\u003eTherefore, we can write that:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-11_16-46-39_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"approximating-binomial\"\u003eapproximating binomial\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormal distribution\u003c/a\u003e approximation: \\(n \u0026gt; 20\\), variance large \\((np(1-p)) \u0026gt; 10\\), absolute independence; beware of \u003ca href=\"/posts/kbhcontinuity_correction/\"\u003econtinuity correction\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_of_k_in_x_time/\"\u003epoisson distribution\u003c/a\u003e approximation: \\(n \u0026gt; 20\\), p small \\(p \u0026lt; 0.05\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"adding-binomial-distribution--kbhbinomial-distribution-dot-md\"\u003eadding \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eFor \\(X\\) and \\(Y\\) independent \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003es, with \u003cstrong\u003eequivalent\u003c/strong\u003e probability:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX \\sim Bin(a, p), Y \\sim Bin(b, p)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThen:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX+Y \\sim Bin(a+b, p)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbinomial_distribution/","tags":null,"title":"binomial distribution"},{"categories":null,"contents":"bioinformatics is a field of biology that deals with biology information. Blending CS, Data, Strategies and of course biology into one thing.\nFirst, let\u0026rsquo;s review genetic information\npossible use for bioinformatics Find the start/stop codons of known gene, and determine the gene and protein length ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbioinformatics/\"\u003ebioinformatics\u003c/a\u003e is a field of \u003ca href=\"\"\u003ebiology\u003c/a\u003e that deals with \u003ca href=\"\"\u003ebiology\u003c/a\u003e information. Blending CS, Data, Strategies and of course \u003ca href=\"\"\u003ebiology\u003c/a\u003e into one thing.\u003c/p\u003e\n\u003cp\u003eFirst, let\u0026rsquo;s review \u003ca href=\"\"\u003egenetic information\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"possible-use-for-bioinformatics--kbhbioinformatics-dot-md\"\u003epossible use for \u003ca href=\"/posts/kbhbioinformatics/\"\u003ebioinformatics\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eFind the start/stop codons of known gene, and determine the gene and protein length\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbioinformatics/","tags":null,"title":"bioinformatics"},{"categories":null,"contents":"bitmasking is a very helpful to create bit vectors.\n| with a 1-mask is useful to turning things on \u0026amp; with a 0-mask is useful to turning things off (bitvector \u0026amp; not(1-mask)) | is useful for set unions \u0026amp; is useful for intersections of bits ^ is useful for flipping isolated bits: 0 is bit preserving, 1 is bit negating ~ is useful for flipping all bits ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbitmask/\"\u003ebitmask\u003c/a\u003eing is a very helpful to create \u003ca href=\"/posts/kbhbinary_number_system/#bit\"\u003ebit\u003c/a\u003e \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e| with a 1-mask is useful to turning things on\u003c/li\u003e\n\u003cli\u003e\u0026amp; with a 0-mask is useful to turning things off (bitvector \u0026amp; not(1-mask))\u003c/li\u003e\n\u003cli\u003e| is useful for set unions\u003c/li\u003e\n\u003cli\u003e\u0026amp; is useful for intersections of bits\u003c/li\u003e\n\u003cli\u003e^ is useful for flipping isolated bits: 0 is bit preserving, 1 is bit negating\u003c/li\u003e\n\u003cli\u003e~ is useful for flipping all bits\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbitmask/","tags":null,"title":"bitmask"},{"categories":null,"contents":"\u0026amp; | ~ ^ \u0026lt;\u0026lt; \u0026gt;\u0026gt;\n\u0026amp; Bitwise level AND\n| Bitwise level OR\n~ Unary bitwise negation\n^ Unary XOR\n\u0026lt;\u0026lt; Shift the number to the left. Fill unused slots with 0.\n\u0026gt;\u0026gt; Shift the number to the right\nfor signed values, we perform an arithmetic right shift: fill the unused slots with the most significant bit from before (\u0026ldquo;fill with 1s\u0026rdquo;) for unsigned values, we perform a logical right shift ","html":"\u003cp\u003e\u0026amp; | ~ ^ \u0026lt;\u0026lt; \u0026gt;\u0026gt;\u003c/p\u003e\n\u003ch2 id=\"and\"\u003e\u0026amp;\u003c/h2\u003e\n\u003cp\u003eBitwise level AND\u003c/p\u003e\n\u003ch2 id=\"b99834\"\u003e|\u003c/h2\u003e\n\u003cp\u003eBitwise level OR\u003c/p\u003e\n\u003ch2 id=\"4c761f\"\u003e~\u003c/h2\u003e\n\u003cp\u003eUnary bitwise negation\u003c/p\u003e\n\u003ch2 id=\"7e6a2a\"\u003e^\u003c/h2\u003e\n\u003cp\u003eUnary XOR\u003c/p\u003e\n\u003ch2 id=\"9c1628\"\u003e\u0026lt;\u0026lt;\u003c/h2\u003e\n\u003cp\u003eShift the number to the left. Fill unused slots with 0.\u003c/p\u003e\n\u003ch2 id=\"22a1da\"\u003e\u0026gt;\u0026gt;\u003c/h2\u003e\n\u003cp\u003eShift the number to the right\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003efor \u003cstrong\u003esigned\u003c/strong\u003e values, we perform an \u003ca href=\"#22a1da\"\u003earithmetic right shift\u003c/a\u003e: fill the unused slots with the most significant bit from before (\u0026ldquo;fill with 1s\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003efor \u003cstrong\u003eunsigned\u003c/strong\u003e values, we perform a \u003ca href=\"#22a1da\"\u003elogical right shift\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbitwise_operations/","tags":null,"title":"bitwise operations"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhblack_thursday/","tags":null,"title":"Black Thursday"},{"categories":null,"contents":"People have been trading options for a very long time, but there wasn\u0026rsquo;t a good way of quantify the value of an option.\nThere are two main types of uses for Black-Scholes Formula\nyou can use all variables and determine the value of options you can get the price of options being traded, then compute the $σ$\u0026mdash;the market\u0026rsquo;s estimation of volatility (how much they want the insurance policy that is the options) constituents \\(S_0\\): stock price \\(X\\): exercise price \\(r\\): risk-free interest rate \\(T\\): maturity time \\(\\sigma\\): standard-deviation of log returns\u0026mdash;\u0026ldquo;volatility\u0026rdquo; Black-Scholes Formula for an European \u0026ldquo;Call\u0026rdquo; Option Here is the scary formula:\n\\begin{equation} C_0 = S_0 \\mathcal{N}(d_{1})-Xe^{-rT}\\mathcal{N}(d_{2}) \\end{equation}\nwhere, the variables are defined above, and:\n\\begin{equation} \\begin{cases} d_1 = \\frac{\\ln\\qty(\\frac{S_0}{X})+\\qty(r+\\frac{\\sigma^{2}}{2})T}{\\sigma \\sqrt{t}}\\\\ d_2 = \\frac{\\ln\\qty(\\frac{S_0}{X})+\\qty(r-\\frac{\\sigma^{2}}{2})T}{\\sigma \\sqrt{t}} \\end{cases} \\end{equation}\nand \\(\\mathcal{N}\\) is the area at point under the standard normal distribution.\noh god So let\u0026rsquo;s dissect this a little.\nThe first term:\n\\begin{equation} S_0\\mathcal{N}(d_{1}) \\end{equation}\nis the \u0026ldquo;current\u0026rdquo; stock price, weighted by the probability of you being willing to exercise it.\nand the second term:\n\\begin{equation} Xe^{-rT}\\mathcal{N}(d_{2}) \\end{equation}\nis the \u0026ldquo;price\u0026rdquo; of the exercise (what you need to pay, if exercising the option, to get the stock.)\nThis strike price \\(X\\) is discounted by \\(e^{-rT}\\), which is like a time machine that rolls that strike price back to what it would be today (so that it\u0026rsquo;s comparable to \\(S_0\\).) As \\(r\\) is the risk free interest rate, we are essentially saying: \u0026ldquo;in a perfectly functional market, over the next \\(T\\) days, how will our asset grow?\u0026rdquo;\nThis is again weighted by the probability of you being willing to exercise it\u0026mdash;through modified slightly differently.\nTherefore, subtracting the two terms, we get the actual value of the option\u0026mdash;the money you would gain by exercising it, then immediately selling the stock, weighted by how willing you are actually to excercise it.\nLet\u0026rsquo;s now take a look at those \u0026ldquo;probabilities\u0026rdquo; \\(d_{\\{1,2\\}}\\). These factors essentially provide quantification of the statement that: \u0026ldquo;the higher our current price is ABOVE the excrecise price\u0026mdash;accounting for volatility\u0026mdash;the more willing we are to excercise the option.\u0026rdquo;\nNote then, \\(\\ln\\qty(\\frac{S_{0}}{X})\\) form the top of both expressions. That essentially measures how high the current price \\(S_0\\) deviates from the strike price \\(X\\).\nNow, as volatility \\(\\sigma\\) increases, \\(d_1\\) increases and \\(d_2\\) decreases (as \\(\\frac{\\sigma^{2}}{2}\\) is being added in \\(d_1\\) and subtracted in \\(d_2\\)). This is because, as volatility increase, you are less certain about what the actual \u0026ldquo;pay\u0026rdquo; (price) is, but your option\u0026mdash;given its constant strike price\u0026mdash;provides the certainty in gain.\n","html":"\u003cp\u003ePeople have been trading \u003ca href=\"/posts/kbhoptions/\"\u003eoption\u003c/a\u003es for a very long time, but there wasn\u0026rsquo;t a good way of quantify the value of an \u003ca href=\"/posts/kbhoptions/\"\u003eoption\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThere are two main types of uses for \u003ca href=\"/posts/kbhblack_scholes_formula/\"\u003eBlack-Scholes Formula\u003c/a\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eyou can use all variables and determine the value of \u003ca href=\"/posts/kbhoptions/\"\u003eoption\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003eyou can get the price of \u003ca href=\"/posts/kbhoptions/\"\u003eoption\u003c/a\u003es being traded, then compute the $σ$\u0026mdash;the market\u0026rsquo;s estimation of volatility (how much they want the \u003ca href=\"/posts/kbhoptions/#analyze-options-as-insurance\"\u003einsurance policy\u003c/a\u003e that is the options)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(S_0\\): stock price\u003c/li\u003e\n\u003cli\u003e\\(X\\): exercise price\u003c/li\u003e\n\u003cli\u003e\\(r\\): risk-free interest rate\u003c/li\u003e\n\u003cli\u003e\\(T\\): maturity time\u003c/li\u003e\n\u003cli\u003e\\(\\sigma\\): standard-deviation of log \u003ca href=\"/posts/kbhrandom_walk/#return--finmetrics\"\u003ereturn\u003c/a\u003es\u0026mdash;\u0026ldquo;volatility\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"black-scholes-formula--kbhblack-scholes-formula-dot-md--for-an-european-call-option--kbhoptions-dot-md\"\u003e\u003ca href=\"/posts/kbhblack_scholes_formula/\"\u003eBlack-Scholes Formula\u003c/a\u003e for an \u003ca href=\"/posts/kbhoptions/#american-vs-european-options\"\u003eEuropean \u0026ldquo;Call\u0026rdquo; Option\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eHere is the scary formula:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC_0 = S_0 \\mathcal{N}(d_{1})-Xe^{-rT}\\mathcal{N}(d_{2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, the variables are defined above, and:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nd_1 = \\frac{\\ln\\qty(\\frac{S_0}{X})+\\qty(r+\\frac{\\sigma^{2}}{2})T}{\\sigma \\sqrt{t}}\\\\\nd_2 = \\frac{\\ln\\qty(\\frac{S_0}{X})+\\qty(r-\\frac{\\sigma^{2}}{2})T}{\\sigma \\sqrt{t}}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand \\(\\mathcal{N}\\) is the area at point under the standard \u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormal distribution\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"oh-god\"\u003eoh god\u003c/h3\u003e\n\u003cp\u003eSo let\u0026rsquo;s dissect this a little.\u003c/p\u003e\n\u003cp\u003eThe first term:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS_0\\mathcal{N}(d_{1})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis the \u0026ldquo;current\u0026rdquo; stock price, weighted by the probability of you being willing to exercise it.\u003c/p\u003e\n\u003cp\u003eand the second term:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nXe^{-rT}\\mathcal{N}(d_{2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis the \u0026ldquo;price\u0026rdquo; of the exercise (what you need to pay, if exercising the option, to get the stock.)\u003c/p\u003e\n\u003cp\u003eThis strike price \\(X\\) is discounted by \\(e^{-rT}\\), which is like a time machine that rolls that strike price back to what it would be today (so that it\u0026rsquo;s comparable to \\(S_0\\).) As \\(r\\) is the risk free interest rate, we are essentially saying: \u0026ldquo;in a perfectly functional market, over the next \\(T\\) days, how will our asset grow?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eThis is again weighted by the probability of you being willing to exercise it\u0026mdash;through modified slightly differently.\u003c/p\u003e\n\u003cp\u003eTherefore, subtracting the two terms, we get the actual value of the option\u0026mdash;the money you would gain by exercising it, then immediately selling the stock, weighted by how willing you are actually to excercise it.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s now take a look at those \u0026ldquo;probabilities\u0026rdquo; \\(d_{\\{1,2\\}}\\). These factors essentially provide quantification of the statement that: \u0026ldquo;the higher our current price is ABOVE the excrecise price\u0026mdash;accounting for volatility\u0026mdash;the more willing we are to excercise the option.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eNote then, \\(\\ln\\qty(\\frac{S_{0}}{X})\\) form the top of both expressions. That essentially measures how high the current price \\(S_0\\) deviates from the strike price \\(X\\).\u003c/p\u003e\n\u003cp\u003eNow, as volatility \\(\\sigma\\) increases, \\(d_1\\) increases and \\(d_2\\) decreases (as \\(\\frac{\\sigma^{2}}{2}\\) is being \u003cem\u003eadded\u003c/em\u003e in \\(d_1\\) and \u003cem\u003esubtracted\u003c/em\u003e in \\(d_2\\)). This is because, as volatility increase, you are \u003cem\u003eless\u003c/em\u003e certain about what the actual \u0026ldquo;pay\u0026rdquo; (price) is, but your option\u0026mdash;given its constant strike price\u0026mdash;provides the certainty in gain.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhblack_scholes_formula/","tags":null,"title":"Black-Scholes Formula"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhblb/","tags":null,"title":"BLB"},{"categories":null,"contents":"To evaluate the lower bound:\n\\begin{equation} \\alpha_{a}^{k+1} (s) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) \\alpha_{a}^{k}(s\u0026rsquo;) \\end{equation}\nwe are essentially sticking with an action and do conditional plan evaluation of a policy that do one action into the future\n","html":"\u003cp\u003eTo evaluate the lower bound:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha_{a}^{k+1} (s) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) \\alpha_{a}^{k}(s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe are essentially sticking with an action and do \u003ca href=\"/posts/kbhconditional_plan/#id-6f19368f-74b5-4606-a882-ec9bc5619873-conditional-plan-evaluation\"\u003econditional plan evaluation\u003c/a\u003e of a policy that do one action into the future\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhblind_lower_bound/","tags":null,"title":"blind lower bound"},{"categories":null,"contents":"The bloch sphere is a sphere encoding all possible probabilities of a qubit shared between two axis, \\(|u\\big\u0026gt;\\) and \\(|d\\big\u0026gt;\\).\nYou will notice that its a unit sphere, in which any magnitude has size \\(1\\). Hence, probabilities would result as projected onto each of the directions.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhbloch_sphere/\"\u003ebloch sphere\u003c/a\u003e is a sphere encoding all possible probabilities of a \u003ca href=\"/posts/kbhqubits/\"\u003equbit\u003c/a\u003e shared between two axis, \\(|u\\big\u0026gt;\\) and \\(|d\\big\u0026gt;\\).\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-03-19_21-56-36_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eYou will notice that its a unit sphere, in which any magnitude has size \\(1\\). Hence, probabilities would result as projected onto each of the directions.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbloch_sphere/","tags":null,"title":"bloch sphere"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhbluest_eye/","tags":null,"title":"Bluest Eye"},{"categories":null,"contents":"General Information Due Date Topic Important Documents \u0026lt;2022-05-06 Fri\u0026gt; Bluest Eye Essay Bluest Eye Prompt Beauty: discuss Morrison’s treatment of the idea of beauty. From what, where, or whom does this notion come? What effect does it have on the way one perceives the world? On the way others perceive an individual?\nHow does beauty (the acquisition of it, the lack of it, or the presence of it) determine one’s fate in America? Is beauty a necessarily fixed entity or does it fluctuate at the whim of society? How much or to what extent does one’s perception of beauty contribute to one’s sense of self-worth?\nQuotes Bin Beauty Claudia: I had only one desire: to dismember it. To see of what it was made, to discover the dearness, to find the beauty, the desirability that had escaped me, but apparently only me. Pecola: Thrown, in this way, into the binding conviction that only a miracle could relieve her, she would never know her beauty. She would see only what there was to see: the eyes of other people. Maureen: Maureen agreed. \u0026ldquo;Ooooo yes. My mother told me that a girl named Audrey, she went to the beauty parlor where we lived before, and asked the lady to fix her hair like Hedy Lamarr’s, and the lady said, \u0026lsquo;Yeah, when you grow some hair like Hedy Lamarr’s.\u0026rsquo;\u0026rdquo; She laughed long and sweet. (post pecola beat-up) Pauline (Polly): Along with the idea of romantic love, she was introduced to another—physical beauty. In equating physical beauty with virtue, she stripped her mind, bound it, and collected self-contempt by the heap. Pauline (Polly) cont\u0026rsquo;d: She was never able, after her education in the movies, to look at a face and not assign it some category in the scale of absolute beauty, and the scale was one she absorbed in full from the silver screen. Pauline (Polly): More and more she neglected her house, her children, her man\u0026mdash;\u0026hellip;the dark edges that made the daily life with the Fishers lighter, more delicate, more lovely \u0026hellip; Here she found beauty, order, cleanliness, and praise. Pauline (Polly): Pauline kept this order, this beauty, for herself, a private world, and never introduced it into her storefront, or to her children. Cholly after Aunt Death: The funeral banquet was a peal of joy after the thunderous beauty of the funeral. It was like a street tragedy with spontaneity tucked softly into the corners of a highly formal structure. Soaphead Church: He thought it was at once the most fantastic and the most logical petition he had ever received. Here was an ugly little girl asking for beauty. A surge of love and understanding swept through him, but was quickly replaced by anger. Claudia (reflecting on Pecola): All of our waste which we dumped on her and which she absorbed. And all of our beauty, which was hers first and which she gave to us. Eyes a: Her eyes are full of sorrow. She sings to me: \u0026ldquo;When the deep purple falls over sleepy garden walls, someone thinks of me\u0026hellip;.\u0026rdquo; ** Sub-Claim Synthesis There\u0026rsquo;s always the UCLA Writing Lab.\n","html":"\u003ch2 id=\"general-information\"\u003eGeneral Information\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDue Date\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003cth\u003eImportant Documents\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-06 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003eBluest Eye Essay\u003c/td\u003e\n\u003ctd\u003eBluest Eye\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"prompt\"\u003ePrompt\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eBeauty\u003c/strong\u003e\u003c/strong\u003e: discuss Morrison’s treatment of the idea of beauty. From what, where, or whom does this notion come? What effect does it have on the way one perceives the world? On the way others perceive an individual?\u003c/p\u003e\n\u003cp\u003eHow does beauty (the acquisition of it, the lack of it, or the presence of it) determine one’s fate in America? Is beauty a necessarily fixed entity or does it fluctuate at the whim of society? How much or to what extent does one’s perception of beauty contribute to one’s sense of self-worth?\u003c/p\u003e\n\u003ch2 id=\"quotes-bin\"\u003eQuotes Bin\u003c/h2\u003e\n\u003ch3 id=\"beauty\"\u003eBeauty\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eClaudia\u003c/strong\u003e\u003c/strong\u003e: I had only one desire: to dismember it. To see of what it was made, to discover the dearness, to find the beauty, the desirability that had escaped me, but apparently only me.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003ePecola\u003c/strong\u003e\u003c/strong\u003e: Thrown, in this way, into the binding conviction that only a miracle could relieve her, she would never know her beauty. She would see only what there was to see: the eyes of other people.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eMaureen\u003c/strong\u003e\u003c/strong\u003e: Maureen agreed. \u0026ldquo;Ooooo yes. My mother told me that a girl named Audrey, she went to the beauty parlor where we lived before, and asked the lady to fix her hair like Hedy Lamarr’s, and the lady said, \u0026lsquo;Yeah, when you grow some hair like Hedy Lamarr’s.\u0026rsquo;\u0026rdquo; She laughed long and sweet. (post pecola beat-up)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003ePauline\u003c/strong\u003e\u003c/strong\u003e (Polly): Along with the idea of romantic love, she was introduced to another—physical beauty. In equating physical beauty with virtue, she stripped her mind, bound it, and collected self-contempt by the heap.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003ePauline\u003c/strong\u003e\u003c/strong\u003e (Polly) cont\u0026rsquo;d: She was never able, after her education in the movies, to look at a face and not assign it some category in the scale of absolute beauty, and the scale was one she absorbed in full from the silver screen.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003ePauline\u003c/strong\u003e\u003c/strong\u003e (Polly): More and more she neglected her house, her children, her man\u0026mdash;\u0026hellip;the dark edges that made the daily life with the Fishers lighter, more delicate, more lovely \u0026hellip; Here she found beauty, order, cleanliness, and praise.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003ePauline\u003c/strong\u003e\u003c/strong\u003e (Polly): Pauline kept this order, this beauty, for herself, a private world, and never introduced it into her storefront, or to her children.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eCholly\u003c/strong\u003e\u003c/strong\u003e after Aunt Death: The funeral banquet was a peal of joy after the thunderous beauty of the funeral. It was like a street tragedy with spontaneity tucked softly into the corners of a highly formal structure.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eSoaphead Church\u003c/strong\u003e\u003c/strong\u003e: He thought it was at once the most fantastic and the most logical petition he had ever received. Here was an ugly little girl asking for beauty. A surge of love and understanding swept through him, but was quickly replaced by anger.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eClaudia\u003c/strong\u003e\u003c/strong\u003e (reflecting on Pecola): All of our waste which we dumped on her and which she absorbed. And all of our beauty, which was hers first and which she gave to us.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"eyes\"\u003eEyes\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003ea\u003c/strong\u003e\u003c/strong\u003e: Her eyes are full of sorrow. She sings to me: \u0026ldquo;When the deep purple falls over sleepy garden walls, someone thinks of me\u0026hellip;.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e**\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"sub-claim-synthesis\"\u003eSub-Claim Synthesis\u003c/h2\u003e\n\u003chr\u003e\n\u003cp\u003eThere\u0026rsquo;s always the \u003ca href=\"https://wp.ucla.edu/wp-content/uploads/2016/01/UWC_handouts_What-How-So-What-Thesis-revised-5-4-15-RZ.pdf\"\u003eUCLA Writing Lab\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhenglish_bluest_eye/","tags":null,"title":"Bluest Eye Essay Planning"},{"categories":null,"contents":"A secondary source comparison activity for the Bluest Eye\nTony Morrison\u0026rsquo;s Rootedness That, if an action were to be done as in a community, its regarded as safer It is a very personal grief and a personal statement done among people you trust. Done within the context of the community, therefore safe.\nPublic (white-washed) and private image, by necessesity, is separated it\u0026rsquo;s just important that it be private. And then, whatever I do that is public can be done seriously.\nthat people are only defined by the uniqueness they have out of the tribe My single solitary and individual Jifejs like the lives of the tribe; it differs in these specific ways, but it is a balanced life because it is both solitary and representative\nPurpose of the novel is enlightening as well as an art form It should have something in it that enlightens; something in it that opens the door arid points the way. Something in it that suggests what the conflicts are, what the problems are.\nThe Novel is a middle class art form The history of the novel as a form began when there was a new class, a middle class, to read it; it was an art form that they needed.\nThat there is already a form of artistry for the lower class, but not middle class The lower classes didn\u0026rsquo;t need novels at that time because they had an art form already they had songs and dances, and ceremony, and gossip, and celebrations.\nnovels of manners tell people of a different world we call 1t the novel of manners, an art form designed to tell peole something they didn\u0026rsquo;t know.\nPortrays quintessential forms of connection How to get married. What a good living was.\nThe African Americans became unexclusive For a long time, the art form that was healing for Black people was music. That music is no longer exclusively ours; we don\u0026rsquo;t have exclusive rights to it.\nThat the story of the novel is told where the reader constructs the story together To construct the dialogue so that it is heard. So that there are no adverbs attached to them: \u0026ldquo;loudly,\u0026rdquo; \u0026ldquo;softly,\u0026rdquo; \u0026ldquo;he said menacingly.'\nThat the artistry is not described as Black but inherently black Black, because it uses the characteristics of Black art\n","html":"\u003cp\u003eA \u003ca href=\"\"\u003esecondary source\u003c/a\u003e comparison activity for the \u003ca href=\"/posts/kbhbluest_eye/\"\u003eBluest Eye\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"tony-morrison-s-rootedness\"\u003eTony Morrison\u0026rsquo;s Rootedness\u003c/h2\u003e\n\u003ch3 id=\"that-if-an-action-were-to-be-done-as-in-a-community-its-regarded-as-safer\"\u003eThat, if an action were to be done as in a community, its regarded as safer\u003c/h3\u003e\n\u003cp\u003eIt is a very personal grief and a personal statement done among people you trust. Done within the context of the community, therefore safe.\u003c/p\u003e\n\u003ch3 id=\"public--white-washed--and-private-image-by-necessesity-is-separated\"\u003ePublic (white-washed) and private image, by necessesity, is separated\u003c/h3\u003e\n\u003cp\u003eit\u0026rsquo;s just important that it be private. And then, whatever I do that is public can be done seriously.\u003c/p\u003e\n\u003ch3 id=\"that-people-are-only-defined-by-the-uniqueness-they-have-out-of-the-tribe\"\u003ethat people are only defined by the uniqueness they have out of the tribe\u003c/h3\u003e\n\u003cp\u003eMy single solitary and individual Jifejs like the lives of the tribe; it differs in these specific ways, but it is a balanced life because it is both solitary and representative\u003c/p\u003e\n\u003ch3 id=\"purpose-of-the-novel-is-enlightening-as-well-as-an-art-form\"\u003ePurpose of the novel is enlightening as well as an art form\u003c/h3\u003e\n\u003cp\u003eIt should have something in it that enlightens; something in it that opens the door arid points the way. Something in it that suggests what the conflicts are, what the problems are.\u003c/p\u003e\n\u003ch3 id=\"the-novel-is-a-middle-class-art-form\"\u003eThe Novel is a middle class art form\u003c/h3\u003e\n\u003cp\u003eThe history of the novel as a form began when there was a new class, a middle class, to read it; it was an art form that they needed.\u003c/p\u003e\n\u003ch3 id=\"that-there-is-already-a-form-of-artistry-for-the-lower-class-but-not-middle-class\"\u003eThat there is already a form of artistry for the lower class, but not middle class\u003c/h3\u003e\n\u003cp\u003eThe lower classes didn\u0026rsquo;t need novels at that time because they had an art form already they had songs and dances, and ceremony, and gossip, and celebrations.\u003c/p\u003e\n\u003ch3 id=\"novels-of-manners-tell-people-of-a-different-world\"\u003enovels of manners tell people of a different world\u003c/h3\u003e\n\u003cp\u003ewe call 1t the novel of manners, an art form designed to tell peole something they didn\u0026rsquo;t know.\u003c/p\u003e\n\u003ch3 id=\"portrays-quintessential-forms-of-connection\"\u003ePortrays quintessential forms of connection\u003c/h3\u003e\n\u003cp\u003eHow to get married. What a good living was.\u003c/p\u003e\n\u003ch3 id=\"the-african-americans-became-unexclusive\"\u003eThe African Americans became unexclusive\u003c/h3\u003e\n\u003cp\u003eFor a long time, the art form that was healing for Black people was music. That music is no longer exclusively ours; we don\u0026rsquo;t have exclusive rights to it.\u003c/p\u003e\n\u003ch3 id=\"that-the-story-of-the-novel-is-told-where-the-reader-constructs-the-story-together\"\u003eThat the story of the novel is told where the reader constructs the story together\u003c/h3\u003e\n\u003cp\u003eTo construct the dialogue so that it is heard. So that there are no adverbs attached to them: \u0026ldquo;loudly,\u0026rdquo; \u0026ldquo;softly,\u0026rdquo; \u0026ldquo;he said menacingly.'\u003c/p\u003e\n\u003ch3 id=\"that-the-artistry-is-not-described-as-black-but-inherently-black\"\u003eThat the artistry is not described as Black but inherently black\u003c/h3\u003e\n\u003cp\u003eBlack, because it uses the characteristics of Black art\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsecondary_source_comparison_activity/","tags":null,"title":"Bluest Eye: secondary source comparison activity"},{"categories":null,"contents":"bool does not belong in pure C.\n#include \u0026lt;stdio.h\u0026gt; #include \u0026lt;stdbool.h\u0026gt; // you need to include this to get bools to work. int main(int argc, char *argv[]) { bool test = true; if (test) printf(\u0026#34;its true\\n\u0026#34;) } ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbool/\"\u003ebool\u003c/a\u003e does \u003cstrong\u003enot\u003c/strong\u003e belong in pure C.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-c\" data-lang=\"c\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e#include\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e\u0026lt;stdio.h\u0026gt;\u003c/span\u003e\u003cspan style=\"color:#75715e\"\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e#include\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e\u0026lt;stdbool.h\u0026gt; // you need to include this to get bools to work.\u003c/span\u003e\u003cspan style=\"color:#75715e\"\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emain\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eargc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[])\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ebool\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etest\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etest\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003eprintf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;its true\u003c/span\u003e\u003cspan style=\"color:#8045ff\"\u003e\\n\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhbool/","tags":null,"title":"bool"},{"categories":null,"contents":"bootstrap allows you to know distribution statistics, calculate p-value, etc, with NO statistical testing like t test, etc.\nBig idea: treat your sample space as your population, and sample from it to obtain an estimate of the properties of the sample distribution.\n\\begin{equation} D \\approx \\hat{D} \\end{equation}\nso, to calculate the distribution of any given statistic via a sample:\nestimate the PMF using sample my_statistic_dist = [] (like sample mean, sample variance, etc.) for i in (N \u0026gt;\u0026gt; 10000) take a subsample of len(sample) samples from PMFu my_statistic_dist.append(my_statistic=(=subsample)) (recall it has to be a sampling statistic (like N-1 for sample variance) how you have a distribution of my_statistic We know that taking mean and var re drawn as a statistic of the same random variable, \\(N\\) times. So, central limit theorem holds. Therefore, these are normal and you can deal with them.\nIn terms of step 3.1, the subsample of len sample can be given by:\nnp.random.choice(sample_pop, len(sample_pop), replace=True) because we essentilaly want to draw from a weighted distribution of your input sample, WITH REPLACEMENT (otherwise it\u0026rsquo;d be the same exact set of data instead of a sample from it).\np-value from bootstrap p-value is defined as \u0026ldquo;probability of having an difference in sample means (called Effecient Frontier) greater than that observed in samples of the null hypothesis, that the two sames came from the same distribution\u0026rdquo;.\nso:\n\\begin{equation} P(|\\mu_{1} - \\mu_{2}|\u0026gt;x | \\text{null}\\)) \\end{equation}\nWe can simply calculate an effect size distribution via the bootstrapping on the combined population of both distributions, to see what the probability above is where \\(x\\) is the actual effect size we got.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhboostrap/\"\u003ebootstrap\u003c/a\u003e allows you to know distribution statistics, calculate \u003ca href=\"/posts/kbhhypothesis_testing/#p-value\"\u003ep-value\u003c/a\u003e, etc, with NO \u003ca href=\"/posts/kbhstastistic/\"\u003estatistic\u003c/a\u003eal testing like t test, etc.\u003c/p\u003e\n\u003cp\u003eBig idea: treat your \u003ca href=\"/posts/kbhsample_space/\"\u003esample space\u003c/a\u003e as your population, and sample from it to obtain an estimate of the properties of the sample distribution.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nD \\approx \\hat{D}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso, to calculate the distribution of any given \u003ca href=\"/posts/kbhstastistic/\"\u003estatistic\u003c/a\u003e via a sample:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eestimate the \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003e using sample\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003emy_statistic_dist\u003c/code\u003e = [] (like \u003ca href=\"/posts/kbhrandom_variables/#sample-mean\"\u003esample mean\u003c/a\u003e, \u003ca href=\"/posts/kbhrandom_variables/#sample-variance\"\u003esample variance\u003c/a\u003e, etc.)\u003c/li\u003e\n\u003cli\u003efor i in (N \u0026gt;\u0026gt; 10000)\n\u003col\u003e\n\u003cli\u003etake a \u003ccode\u003esubsample\u003c/code\u003e of len(sample) samples from \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003eu\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003emy_statistic_dist\u003c/code\u003e.append(\u003ccode\u003emy_statistic=(=subsample\u003c/code\u003e)) (recall it has to be a \u003ca href=\"/posts/kbhrandom_variables/#sampling-statistics\"\u003esampling statistic\u003c/a\u003e (like N-1 for \u003ca href=\"/posts/kbhrandom_variables/#sample-variance\"\u003esample variance\u003c/a\u003e)\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003ehow you have a distribution of \u003ccode\u003emy_statistic\u003c/code\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWe know that taking mean and var re drawn as a statistic of the same random variable, \\(N\\) times. So, \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e holds. Therefore, these are normal and you can deal with them.\u003c/p\u003e\n\u003cp\u003eIn terms of step 3.1, the subsample of len sample can be given by:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erandom\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echoice\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esample_pop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esample_pop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereplace\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ebecause we essentilaly want to draw from a weighted distribution of your input sample, WITH REPLACEMENT (otherwise it\u0026rsquo;d be the same exact set of data instead of a sample from it).\u003c/p\u003e\n\u003ch2 id=\"p-value-from-bootstrap\"\u003ep-value from bootstrap\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhhypothesis_testing/#p-value\"\u003ep-value\u003c/a\u003e is defined as \u0026ldquo;\u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of having an difference in sample means (called \u003ca href=\"/posts/kbhcapm/#minimum-variance-boundary\"\u003eEffecient Frontier\u003c/a\u003e) greater than that observed in samples of the \u003ca href=\"/posts/kbhhypothesis_testing/#null-hypothesis\"\u003enull hypothesis\u003c/a\u003e, that the two sames came from the same distribution\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eso:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(|\\mu_{1} - \\mu_{2}|\u0026gt;x | \\text{null}\\))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can simply calculate an \u003ca href=\"#p-value-from-bootstrap\"\u003eeffect size\u003c/a\u003e distribution via the \u003ca href=\"/posts/kbhboostrap/\"\u003ebootstrap\u003c/a\u003eping on the combined population of both distributions, to see what the probability above is where \\(x\\) is the actual effect size we got.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhboostrap/","tags":null,"title":"bootstrap"},{"categories":null,"contents":"BNT is a discourse task where subjects are shown 60 pictures decreasing frequency and asked to recall the word.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhboston_naming_test/\"\u003eBNT\u003c/a\u003e is a \u003ca href=\"/posts/kbhdiscourse_completion_task/\"\u003ediscourse task\u003c/a\u003e where subjects are shown 60 pictures decreasing frequency and asked to recall the word.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhboston_naming_test/","tags":null,"title":"Boston Naming Test"},{"categories":null,"contents":"(Bouton et al. 2018)\nOne-Liner Uses the single-user avoidance POMDP formulation presented in (Bouton, Cosgun, and Kochenderfer 2017) to extend to multiple road users\nNovelty Uses Single-User Model of Road Navigation to extend general POMDP formulation into multi-pedestrian/multi road user casesroad user cases\nPrevious Work Imagine worst-case scenario always: set upper bound and always imagine it; could cause gridlock if situation never resolves.\nNotable Methods Uses QMDP and SARSOP to perform optimization\nSingle-User Model of Road Navigation See Single-User Model of Road Navigation\nScaling to multiple road users make an aggregate utility which is a function across all the single-user avoidance strategies (i.e. the aggregate utiltiy of mulitlpe road user is the utility of avoiding each individual user) \\(U^{*}(b,a) = f(U^{*}(b_1, a) \u0026hellip; U^{*}(b_{n}, a)\\). this is called utility fusion two possible approaches: either minimum of all the utilities, or the sum of them; the former is more risk averse (we want to hit no one), and latter treats each user is independent. further, the number of users in the road is modeled by a belief Evaluation \u0026ldquo;the evaluation models are different to find the optimal policy, and are also higher fidelity\u0026rdquo;\nWe want to evaluate our POMDP on a higher fidelity model to check if the system can generalize to harder environments.\nBaselines: random actions, or hand crafted rules-based policy.\nKey Figs New Concepts Single-User Model of Road Navigation POMDP formulation; we only care about one road user\naction: a finite set of change in acceleration -4m/s2, -2m/s2, 0m/s2, 2m/s2, 4m/s2 states and transitions: poses (position + velocity) of the car and the road user; position are velocities are discretized observation: measured position and velocity of the one other road user with a pm 1 meter variance for crosswalks and pm 2 meter variance for intersection users in non-occluded area will always be detected user in an occluded area will not be detected position and velocity of road users are uncertain pm 1 meter and pm 1 meter / second belief: categorical distribution over states dynamics: physics + kinematics for car; pedestrians have stochastic velocity reward: unit reward for final position, tuned penalty for collision Notes ","html":"\u003cp\u003e(\u003ca href=\"#citeproc_bib_item_2\"\u003eBouton et al. 2018\u003c/a\u003e)\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eUses the single-user avoidance POMDP formulation presented in (\u003ca href=\"#citeproc_bib_item_1\"\u003eBouton, Cosgun, and Kochenderfer 2017\u003c/a\u003e) to extend to multiple road users\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003eUses \u003ca href=\"#single-user-model-of-road-navigation\"\u003eSingle-User Model of Road Navigation\u003c/a\u003e to extend general \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e formulation into multi-pedestrian/multi road user casesroad user cases\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-11_09-18-05_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"previous-work\"\u003ePrevious Work\u003c/h2\u003e\n\u003cp\u003eImagine worst-case scenario always: set upper bound and always imagine it; could cause gridlock if situation never resolves.\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cp\u003eUses \u003ca href=\"/posts/kbhqmdp/\"\u003eQMDP\u003c/a\u003e and \u003ca href=\"/posts/kbhsarsop/\"\u003eSARSOP\u003c/a\u003e to perform optimization\u003c/p\u003e\n\u003ch3 id=\"single-user-model-of-road-navigation--orgea079d6\"\u003e\u003ca href=\"#single-user-model-of-road-navigation\"\u003eSingle-User Model of Road Navigation\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"#single-user-model-of-road-navigation\"\u003eSingle-User Model of Road Navigation\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"scaling-to-multiple-road-users\"\u003eScaling to multiple road users\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003emake an aggregate utility which is a function across all the single-user avoidance strategies (i.e. the aggregate utiltiy of mulitlpe road user is the utility of avoiding each individual user) \\(U^{*}(b,a) = f(U^{*}(b_1, a) \u0026hellip; U^{*}(b_{n}, a)\\). this is called \u003ca href=\"/posts/kbhutility_fusion/\"\u003eutility fusion\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003etwo possible approaches: either minimum of all the utilities, or the sum of them; the former is more risk averse (we want to hit no one), and latter treats each user is independent.\u003c/li\u003e\n\u003cli\u003efurther, the number of users in the road is modeled by a belief\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"evaluation\"\u003eEvaluation\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;the evaluation models are different to find the optimal policy, and are also higher fidelity\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe want to evaluate our \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e on a higher fidelity model to check if the system can generalize to harder environments.\u003c/p\u003e\n\u003cp\u003eBaselines: random actions, or hand crafted rules-based policy.\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-09_13-00-53_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-11_09-35-15_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003ch3 id=\"single-user-model-of-road-navigation\"\u003eSingle-User Model of Road Navigation\u003c/h3\u003e\n\u003cp\u003ePOMDP formulation; we only care about \u003cstrong\u003eone road user\u003c/strong\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eaction: a finite set of change in acceleration -4m/s2, -2m/s2, 0m/s2, 2m/s2, 4m/s2\u003c/li\u003e\n\u003cli\u003estates and transitions: poses (position + velocity) of the car and the road user; position are velocities are discretized\u003c/li\u003e\n\u003cli\u003eobservation: measured position and velocity of the one other road user with a pm 1 meter variance for crosswalks and pm 2 meter variance for intersection\n\u003cul\u003e\n\u003cli\u003eusers in non-occluded area will always be detected\u003c/li\u003e\n\u003cli\u003euser in an occluded area will not be detected\u003c/li\u003e\n\u003cli\u003eposition and velocity of road users are uncertain pm 1 meter and pm 1 meter / second\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ebelief: categorical distribution over states\u003c/li\u003e\n\u003cli\u003edynamics: physics + kinematics for car; pedestrians have stochastic velocity\u003c/li\u003e\n\u003cli\u003ereward: unit reward for final position, tuned penalty for collision\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbouton_2018/","tags":null,"title":"Bouton 2018"},{"categories":null,"contents":"Big idea: keep branching/selecting until a tally hits an upper/lower bound\nIngredients:\n\\(Ulo(s)\\): lower bound function of value function \\(Qhi(s,a)\\): upper bound function of action-value function \\(\\mathcal{P}\\) problem (states, transitions, etc.) \\(d\\) depth (how many next states to look into)\u0026mdash;more is more accurate but slower Its Forward Search, but with bounds instead of exponentially looking into every possible next state, we only check the actions in the order of their bounded value. We start with the actions with the highest bound (most possible value), and if its already better than the upper bound, we can be done because we know everything else will have lower value as their bounds are lower.\nDefine subroutine branch_and_bound(depth_remaining, utility_lower_bound, q_upper_bound, state).\nif depth_remaining=0; return (action=None, utility=utility_lower_bound(s)) otherwise, let best=(action = None, utility = -infiny) for each possible action at our state, SORTED from highest q_upper_bound(s,a) to lowest if best.utility is higher than the q_upper_bound(s,a) return best (because its not worth to search any other action anymore because anything else would have a lower max bound) get an action-value for our current state where the utility of each next state is the utility given by branch_and_bound(depth_remaining-1, utility_lower_bound, q_upper_bound, next_state) if the action-value is higher than what we have, then we set best (a, action-value) return best This is basically the same thing as Forward Search, but you get the bonus benefit of being able to early-terminate if you bettered your max bounds\n","html":"\u003cp\u003eBig idea: keep branching/selecting until a tally hits an upper/lower bound\u003c/p\u003e\n\u003cp\u003eIngredients:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(Ulo(s)\\): lower bound function of \u003ca href=\"/posts/kbhaction_value_function/#value-function--kbhaction-value-function-dot-md\"\u003evalue function\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(Qhi(s,a)\\): upper bound function of \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value function\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{P}\\) problem (states, transitions, etc.)\u003c/li\u003e\n\u003cli\u003e\\(d\\) depth (how many next states to look into)\u0026mdash;more is more accurate but slower\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIts \u003ca href=\"/posts/kbhforward_search/\"\u003eForward Search\u003c/a\u003e, but with bounds instead of exponentially looking into every possible next state, we only check the actions in the order of their bounded value. We start with the actions with the highest bound (most possible value), and if its already better than the upper bound, we can be done because we know everything else will have lower value as their bounds are lower.\u003c/p\u003e\n\u003cp\u003eDefine subroutine \u003ccode\u003ebranch_and_bound(depth_remaining, utility_lower_bound, q_upper_bound, state)\u003c/code\u003e.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eif depth_remaining=0; return (action=None, utility=utility_lower_bound(s))\u003c/li\u003e\n\u003cli\u003eotherwise,\n\u003col\u003e\n\u003cli\u003elet \u003ccode\u003ebest=(action = None, utility = -infiny)\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003efor each possible action at our state, SORTED from highest \u003ccode\u003eq_upper_bound(s,a)\u003c/code\u003e to lowest\n\u003col\u003e\n\u003cli\u003eif \u003ccode\u003ebest.utility\u003c/code\u003e is higher than the \u003ccode\u003eq_upper_bound(s,a)\u003c/code\u003e return best (because its not worth to search any other action anymore because anything else would have a lower max bound)\u003c/li\u003e\n\u003cli\u003eget an \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e for our current state where the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of each next state is the utility given by \u003ccode\u003ebranch_and_bound(depth_remaining-1, utility_lower_bound, q_upper_bound, next_state)\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eif the \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e is higher than what we have, then we set best (a, action-value)\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003ereturn best\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis is basically the same thing as \u003ca href=\"/posts/kbhforward_search/\"\u003eForward Search\u003c/a\u003e, but you get the bonus benefit of being able to early-terminate if you bettered your max bounds\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbranch_and_bound/","tags":null,"title":"Branch and Bound"},{"categories":null,"contents":"Way of performing action research developed by Victoria Clarke and Virginia Braun in 2006\n","html":"\u003cp\u003eWay of performing \u003ca href=\"/posts/kbhaction_research/\"\u003eaction research\u003c/a\u003e developed by Victoria Clarke and Virginia Braun in 2006\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbraun_and_clarke_thematic_analysis/","tags":null,"title":"Braun and Clarke thematic analysis"},{"categories":null,"contents":"Professor Brian MacWhinney is a professor of psychology, modern languages, and language technology at CMU.\n","html":"\u003cp\u003eProfessor \u003ca href=\"/posts/kbhbrian_macwinney/\"\u003eBrian MacWhinney\u003c/a\u003e is a professor of psychology, modern languages, and language technology at \u003ca href=\"/posts/kbhcmu/\"\u003eCMU\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbrian_macwinney/","tags":null,"title":"Brian MacWhinney"},{"categories":null,"contents":"Brown v. Board of Education is a landmark case in the US. This lead for schools to be integrated, and many children were taken out of school out of protest due to the subsequent integration movement between schools.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbrown_v_board_of_education/\"\u003eBrown v. Board of Education\u003c/a\u003e is a landmark case in the US. This lead for schools to be integrated, and many children were taken out of school out of protest due to the subsequent integration movement between schools.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbrown_v_board_of_education/","tags":null,"title":"Brown v. Board of Education"},{"categories":null,"contents":"Brownian Motion is the pattern for measuring the convergence of random walk through continuous timing.\ndiscrete random walk discrete random walk is a tool used to construct Brownian Motion. It is a random walk which only takes on two discrete values at any given time: \\(\\Delta\\) and its additive inverse \\(-\\Delta\\). These two cases take place at probabilities \\(\\pi\\) and \\(1-\\pi\\).\nTherefore, the expected return over each time \\(k\\) is:\n\\begin{equation} \\epsilon_{k} = \\begin{cases} \\Delta, p(\\pi) \\\\ -\\Delta, p(1-\\pi) \\end{cases} \\end{equation}\n(that, at any given time, the expectation of return is either\u0026mdash;with probability $π$\u0026mdash;\\(\\Delta\\), or\u0026ndash;with probability $1-π$\u0026mdash;\\(-\\Delta\\).\nThis makes \\(\\epsilon_{k}\\) independently and identically distributed. The price, then, is formed by:\n\\begin{equation} p_{k} = p_{k-1}+\\epsilon_{k} \\end{equation}\nand therefore the price follows a random walk.\nSuch a discrete random walk can look like this:\nWe can split this time from \\([0,T]\\) into \\(n\\) pieces; making each segment with length \\(h=\\frac{T}{n}\\). Then, we can parcel out:\n\\begin{equation} p_{n}(t) = p_{[\\frac{t}{h}]} = p_{[\\frac{nt}{T}]} \\end{equation}\nDescretized at integer intervals.\nAt this current, discrete moments have expected value \\(E[p_{n}(T)] = n(\\pi -(1-\\pi))\\Delta\\) and variance \\(Var[p_{n}(T)]=4n\\pi (1-\\pi)\\Delta^{2}\\). #why\nNow, if we want to have a continuous version of the descretized interval above, we will maintain the finiteness of \\(p_{n}(T)\\) but take \\(n\\) to \\(\\infty\\). To get a continuous random walk needed for Brownian Motion, we adjust \\(\\Delta\\), \\(\\pi\\), and \\(1-\\pi\\) such that the expected value and variance tends towards the normal (as we expect for a random walk); that is, we hope to see that:\n\\begin{equation} \\begin{cases} n(\\pi -(1-\\pi))\\Delta \\to \\mu T \\\\ 4n\\pi (1-\\pi )\\Delta ^{2} \\to \\sigma^{2} T \\end{cases} \\end{equation}\nTo solve for these desired convergences into the normal, we have probabilities \\(\\pi, (1-\\pi), \\Delta\\) such that:\n\\begin{equation} \\begin{cases} \\pi = \\frac{1}{2}\\qty(1+\\frac{\\mu \\sqrt{h}}{\\sigma})\\\\ (1-\\pi) = \\frac{1}{2}\\qty(1-\\frac{\\mu \\sqrt{h}}{\\sigma})\\\\ \\Delta = \\sigma \\sqrt{h} \\end{cases} \\end{equation}\nwhere, \\(h = \\frac{1}{n}\\).\nSo looking at the expression for \\(\\Delta\\), we can see that as \\(n\\) in increases, \\(h =\\frac{1}{n}\\) decreases and therefore \\(\\Delta\\) decreases. In fact, we can see that the change in all three variables track the change in the rate of \\(\\sqrt{h}\\); namely, they vary with O(h).\n\\begin{equation} \\pi = (1-\\pi) = \\frac{1}{2}+\\frac{\\mu \\sqrt{h}}{2\\sigma} = \\frac{1}{2}+O\\qty(\\sqrt{h}) \\end{equation}\nOf course:\n\\begin{equation} \\Delta = O\\qty(\\sqrt{h}) \\end{equation}\nSo, finally, we have the conclusion that:\nas \\(n\\) (number of subdivision pieces of the time domain \\(T\\)) increases, \\(\\frac{1}{n}\\) decreases, \\(O\\qty(\\sqrt{h})\\) decreases with the same proportion. Therefore, as \\(\\lim_{n \\to \\infty}\\) in the continuous-time case, the probability of either positive or negative delta (\\(\\pi\\) and \\(-\\pi\\) trends towards each to \\(\\frac{1}{2}\\)) by the same vein, as \\(\\lim_{n \\to \\infty}\\), \\(\\Delta \\to 0\\) Therefore, this is a cool result: in a continuous-time case of a discrete random walk, the returns (NOT! just the expect value, but literal \\(\\Delta\\)) trend towards \\(+0\\) and \\(-0\\) each with \\(\\frac{1}{2}\\) probability.\nactual Brownian motion Given the final results above for the limits of discrete random walk, we can see that the price moment traced from the returns (i.e. \\(p_{k} = p_{k-1}+\\epsilon_{k}\\)) have the properties of normality (\\(p_{n}(T) \\to \\mathcal{N}(\\mu T, \\sigma^{2}T)\\))\nTrue Brownian Motion follows, therefore, three basic properties:\n\\(B_{t}\\) is normally distributed by a mean of \\(0\\), and variance of \\(t\\) For some \\(s\u0026lt;t\\), \\(B_{t}-B_{s}\\) is normally distributed by a mean of \\(0\\), and variance of \\(t-s\\) Distributions \\(B_{j}\\) and \\(B_{t}-B_{s}\\) is independent Standard Brownian Motion Brownian motion that starts at \\(B_0=0\\) is called Standard Brownian Motion\nquadratic variation The quadratic variation of a sequence of values is the expression that:\n\\begin{equation} \\sum_{i=0}^{N-1} (x_{i+1}-x_i)^{2} \\end{equation}\nOn any sequence of values \\(x_0=0,\\dots,x_{N}=1\\) (with defined bounds), the quadratic variation becomes bounded.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e is the pattern for measuring the convergence of \u003ca href=\"/posts/kbhrandom_walk/\"\u003erandom walk\u003c/a\u003e through continuous timing.\u003c/p\u003e\n\u003ch2 id=\"discrete-random-walk\"\u003ediscrete random walk\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#discrete-random-walk\"\u003ediscrete random walk\u003c/a\u003e is a tool used to construct \u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e. It is a \u003ca href=\"/posts/kbhrandom_walk/\"\u003erandom walk\u003c/a\u003e which only takes on two discrete values at any given time: \\(\\Delta\\) and its \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e \\(-\\Delta\\). These two cases take place at probabilities \\(\\pi\\) and \\(1-\\pi\\).\u003c/p\u003e\n\u003cp\u003eTherefore, the expected return over each time \\(k\\) is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\epsilon_{k} = \\begin{cases}\n\\Delta, p(\\pi) \\\\\n-\\Delta, p(1-\\pi)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(that, at any given time, the \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e of return is either\u0026mdash;with probability $π$\u0026mdash;\\(\\Delta\\), or\u0026ndash;with probability $1-π$\u0026mdash;\\(-\\Delta\\).\u003c/p\u003e\n\u003cp\u003eThis makes \\(\\epsilon_{k}\\) independently and identically distributed. The price, then, is formed by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np_{k} = p_{k-1}+\\epsilon_{k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand therefore the price follows a \u003ca href=\"/posts/kbhrandom_walk/\"\u003erandom walk\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSuch a \u003ca href=\"#discrete-random-walk\"\u003ediscrete random walk\u003c/a\u003e can look like this:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-19_10-53-11_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWe can split this time from \\([0,T]\\) into \\(n\\) pieces; making each segment with length \\(h=\\frac{T}{n}\\). Then, we can parcel out:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np_{n}(t) = p_{[\\frac{t}{h}]} = p_{[\\frac{nt}{T}]}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eDescretized at integer intervals.\u003c/p\u003e\n\u003cp\u003eAt this current, discrete moments have expected value \\(E[p_{n}(T)] = n(\\pi -(1-\\pi))\\Delta\\) and variance \\(Var[p_{n}(T)]=4n\\pi (1-\\pi)\\Delta^{2}\\). #why\u003c/p\u003e\n\u003cp\u003eNow, if we want to have a \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e version of the descretized interval above, we will maintain the finiteness of \\(p_{n}(T)\\) but take \\(n\\) to \\(\\infty\\). To get a continuous \u003ca href=\"/posts/kbhrandom_walk/\"\u003erandom walk\u003c/a\u003e needed for \u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e, we adjust \\(\\Delta\\), \\(\\pi\\), and \\(1-\\pi\\) such that the expected value and variance tends towards the normal (as we expect for a \u003ca href=\"/posts/kbhrandom_walk/\"\u003erandom walk\u003c/a\u003e); that is, we hope to see that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nn(\\pi -(1-\\pi))\\Delta \\to \\mu T \\\\\n4n\\pi (1-\\pi )\\Delta ^{2} \\to \\sigma^{2} T\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo solve for these desired convergences into the normal, we have probabilities \\(\\pi, (1-\\pi), \\Delta\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\pi = \\frac{1}{2}\\qty(1+\\frac{\\mu \\sqrt{h}}{\\sigma})\\\\\n(1-\\pi) = \\frac{1}{2}\\qty(1-\\frac{\\mu \\sqrt{h}}{\\sigma})\\\\\n\\Delta = \\sigma \\sqrt{h}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(h = \\frac{1}{n}\\).\u003c/p\u003e\n\u003cp\u003eSo looking at the expression for \\(\\Delta\\), we can see that as \\(n\\) in increases, \\(h =\\frac{1}{n}\\) decreases and therefore \\(\\Delta\\) decreases. In fact, we can see that the change in all three variables track the change in the rate of \\(\\sqrt{h}\\); namely, they vary with \u003ca href=\"/posts/kbhasymtotic_analysis/#o--n\"\u003eO(h)\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pi = (1-\\pi) = \\frac{1}{2}+\\frac{\\mu \\sqrt{h}}{2\\sigma} = \\frac{1}{2}+O\\qty(\\sqrt{h})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOf course:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Delta = O\\qty(\\sqrt{h})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, finally, we have the conclusion that:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eas \\(n\\) (number of subdivision pieces of the time domain \\(T\\)) increases, \\(\\frac{1}{n}\\) decreases, \\(O\\qty(\\sqrt{h})\\) decreases with the same proportion. Therefore, as \\(\\lim_{n \\to \\infty}\\) in the continuous-time case, the probability of \u003cem\u003eeither\u003c/em\u003e positive or negative delta (\\(\\pi\\) and \\(-\\pi\\) trends towards each to \\(\\frac{1}{2}\\))\u003c/li\u003e\n\u003cli\u003eby the same vein, as \\(\\lim_{n \\to \\infty}\\), \\(\\Delta \\to 0\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTherefore, this is a cool result: in a continuous-time case of a \u003ca href=\"#discrete-random-walk\"\u003ediscrete random walk\u003c/a\u003e, the returns (NOT! just the expect value, but literal \\(\\Delta\\)) trend towards \\(+0\\) and \\(-0\\) each with \\(\\frac{1}{2}\\) probability.\u003c/p\u003e\n\u003ch2 id=\"actual-brownian-motion\"\u003eactual Brownian motion\u003c/h2\u003e\n\u003cp\u003eGiven the final results above for the limits of \u003ca href=\"#discrete-random-walk\"\u003ediscrete random walk\u003c/a\u003e, we can see that the price moment traced from the returns (i.e. \\(p_{k} = p_{k-1}+\\epsilon_{k}\\)) have the properties of normality (\\(p_{n}(T) \\to \\mathcal{N}(\\mu T, \\sigma^{2}T)\\))\u003c/p\u003e\n\u003cp\u003eTrue \u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e follows, therefore, three basic properties:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\\(B_{t}\\) is normally distributed by a mean of \\(0\\), and variance of \\(t\\)\u003c/li\u003e\n\u003cli\u003eFor some \\(s\u0026lt;t\\), \\(B_{t}-B_{s}\\) is normally distributed by a mean of \\(0\\), and variance of \\(t-s\\)\u003c/li\u003e\n\u003cli\u003eDistributions \\(B_{j}\\) and \\(B_{t}-B_{s}\\) is independent\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"standard-brownian-motion\"\u003eStandard Brownian Motion\u003c/h2\u003e\n\u003cp\u003eBrownian motion that starts at \\(B_0=0\\) is called \u003ca href=\"#standard-brownian-motion\"\u003eStandard Brownian Motion\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"quadratic-variation\"\u003equadratic variation\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#quadratic-variation\"\u003equadratic variation\u003c/a\u003e of a sequence of values is the expression that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{i=0}^{N-1} (x_{i+1}-x_i)^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOn any sequence of values \\(x_0=0,\\dots,x_{N}=1\\) (with defined bounds), the quadratic variation becomes bounded.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbrownian_motion/","tags":null,"title":"Brownian Motion"},{"categories":null,"contents":"buffer overflow happens when operations like stcpy runs beyond the edge of the allocated buffer. We need to find and fix buffer overflows, which causes people who use o\nbuffer overflow horror stories AOL messanger identifying buffer overflows Think about whether or not what you are going to do will cause buffer overflows. There are stuff which you shouldn\u0026rsquo;t do:\nstrcpy: which keeps copying strcat: gets: it keeps taking input forever and forever https://www.acm.org/code-of-ethics \u0026ldquo;Design and implement systems that are robustly and usably secure.\u0026rdquo;\nvalgrind valgrind is a good tool to check whether or not something will buffer overflow.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbuffer_overflow/\"\u003ebuffer overflow\u003c/a\u003e happens when operations like \u003ccode\u003estcpy\u003c/code\u003e runs beyond the edge of the allocated buffer. We need to find and fix \u003ca href=\"/posts/kbhbuffer_overflow/\"\u003ebuffer overflow\u003c/a\u003es, which causes people who use o\u003c/p\u003e\n\u003ch2 id=\"buffer-overflow--kbhbuffer-overflow-dot-md--horror-stories\"\u003e\u003ca href=\"/posts/kbhbuffer_overflow/\"\u003ebuffer overflow\u003c/a\u003e horror stories\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eAOL messanger\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"identifying-buffer-overflow--kbhbuffer-overflow-dot-md--s\"\u003eidentifying \u003ca href=\"/posts/kbhbuffer_overflow/\"\u003ebuffer overflow\u003c/a\u003es\u003c/h2\u003e\n\u003cp\u003eThink about whether or not what you are going to do will cause \u003ca href=\"/posts/kbhbuffer_overflow/\"\u003ebuffer overflow\u003c/a\u003es. There are stuff which you shouldn\u0026rsquo;t do:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/#strcpy\"\u003estrcpy\u003c/a\u003e: which keeps copying\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/#strcat\"\u003estrcat\u003c/a\u003e:\u003c/li\u003e\n\u003cli\u003egets: it keeps taking input forever and forever\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"https://www.acm.org/code-of-ethics\"\u003ehttps://www.acm.org/code-of-ethics\u003c/a\u003e \u0026ldquo;Design and implement systems that are robustly and usably secure.\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"valgrind\"\u003evalgrind\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#valgrind\"\u003evalgrind\u003c/a\u003e is a good tool to check whether or not something will buffer overflow.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbuffer_overflow/","tags":null,"title":"buffer overflow"},{"categories":null,"contents":"\u0026ldquo;How do we build well developed AI systems without a bangin\u0026rsquo; company\u0026rdquo;\nTwo main paradigms transfer learning: (pretrain a model, and) faster convergence, better performance *monolithic models: (pretrain a model, and) just use the pretrained model Problems with monolythic models Continual development of large language models mostly don\u0026rsquo;t exist: no incremental updates To get better improvements, we throw out the old monolythic model Most of the research community can\u0026rsquo;t participate in their development New Alternative Paradigm A very simple routing layer A very large collection of specialist models all from a base model Collaborative model development means that a large amount of contributors can band together to contribute to the development of the models Why Specialist models are cheaper and better to train few shot parameter efficient fine tuning is better liu et al few shot fine-tuning is better than few-shot in-context learning Specialist models can be communicable, incremental updates to a base model think: PEFT each of the specialist models can only need to update a small percent of the weights think \u0026ldquo;adapters\u0026rdquo;: parameter efficient updates Routing task2vec: task embedding for meta learning Achille et al efficiently tuned parameters are task embeddings Zhou et al distinction between MoE instead of routing in sub-layer level routing, we are routing at the input level we look at the Novel Tasks (Model Merging) Tasks can be considered as a composition of skills.\neach task can be encoded as a composition of skills we can merge the skills of sub-models Usual updates we take a pretrained model we adapt it to some target task Model Merging Fisher-weight averaging\n\u0026ldquo;Merging models with fisher-weight averaging\u0026rdquo;, Matena et al Merging can be shown as an optimization problem:\n\\begin{equation} argmax_{\\theta} \\sum_{i-1}^{M} \\lambda_{i} \\log p(\\theta \\mid \\mathcal{D}_{i}) \\end{equation}\n\u0026ldquo;a merged model is the set of parameters which would maximize the log-posterior of each model \\(\\mathcal{D}_{i}\\), controlled by \\(\\lambda_{i}\\)\u0026rdquo;\nTask arthmetic\n\u0026ldquo;Editing models with Task Arthmetic\u0026rdquo;, llharco et al \u0026ldquo;Resolving inference when merging models\u0026rdquo; by Yadev et al\nYou can create multi-task models by just doing maff:\n\\begin{equation} \\tau_{1} = \\theta_{finetune_{1}} - \\theta_{pretrain} \\end{equation}\n\\begin{equation} \\tau_{2} = \\theta_{finetune_{2}} - \\theta_{pretrain} \\end{equation}\n\\begin{equation} \\theta_{finetune_{1+2}} = (\\tau_{1} + \\tau_{2}) + \\theta_{pretrain} \\end{equation}\nthis apparently works ok.\nSoft MoE\nSoft merging of experts with adaptive routing, Muqeeth et al\nMoE, but instead of choosing an expert to activate, the router\u0026rsquo;s probability densities will result in a mixture of the experts\u0026rsquo; weights. So, mulitple experts can be invoked in a linear way.\nGit-Theta Git-Theta: A Git Extension for Collaborative Development of Machine Learning Models, Kandpal et al\nCommunal and iterative development of model checkpoints. Saves only LoRA\u0026rsquo;d parameters, and removes any weights that didn\u0026rsquo;t change between diffs.\nPetals Petals: Collaborative Inference and Fine-Tuning of Large Models, Borzunov et al.\nDistributed fine-tuning and model inference by using different sub-worker nodes to run different layers of the model.\nhttps://health.petals.dev/\n","html":"\u003cp\u003e\u0026ldquo;How do we build well developed AI systems without a bangin\u0026rsquo; company\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"two-main-paradigms\"\u003eTwo main paradigms\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003etransfer learning\u003c/strong\u003e: (pretrain a model, and) faster convergence, better performance\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e*monolithic models\u003c/strong\u003e: (pretrain a model, and) just use the pretrained model\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"problems-with-monolythic-models\"\u003eProblems with monolythic models\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eContinual development of large language models mostly don\u0026rsquo;t exist: no incremental updates\u003c/li\u003e\n\u003cli\u003eTo get better improvements, we throw out the old monolythic model\u003c/li\u003e\n\u003cli\u003eMost of the research community can\u0026rsquo;t participate in their development\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-alternative-paradigm\"\u003eNew Alternative Paradigm\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA very simple routing layer\u003c/li\u003e\n\u003cli\u003eA very large collection of specialist models all from a base model\u003c/li\u003e\n\u003cli\u003eCollaborative model development means that a large amount of contributors can band together to contribute to the development of the models\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"why\"\u003eWhy\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eSpecialist models are cheaper and better to train\n\u003cul\u003e\n\u003cli\u003efew shot parameter efficient fine tuning is better liu et al\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003efew shot fine-tuning is better than few-shot in-context learning\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eSpecialist models can be communicable, incremental updates to a base model\n\u003cul\u003e\n\u003cli\u003ethink: \u003ca href=\"/posts/kbhpeft/\"\u003ePEFT\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eeach of the specialist models can only need to update a small percent of the weights\u003c/li\u003e\n\u003cli\u003ethink \u0026ldquo;adapters\u0026rdquo;: parameter efficient updates\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"routing\"\u003eRouting\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003etask2vec: task embedding for meta learning Achille et al\u003c/li\u003e\n\u003cli\u003eefficiently tuned parameters are task embeddings Zhou et al\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"distinction-between-moe\"\u003edistinction between MoE\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003einstead of routing in sub-layer level routing, we are routing at the \u003cstrong\u003einput level\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003ewe look at the\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"novel-tasks--model-merging\"\u003eNovel Tasks (Model Merging)\u003c/h3\u003e\n\u003cp\u003eTasks can be considered as a composition of skills.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eeach task can be encoded as a composition of skills\u003c/li\u003e\n\u003cli\u003ewe can merge the skills of sub-models\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"usual-updates\"\u003eUsual updates\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003ewe take a pretrained model\u003c/li\u003e\n\u003cli\u003ewe adapt it to some target task\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"model-merging\"\u003eModel Merging\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eFisher-weight averaging\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u0026ldquo;Merging models with fisher-weight averaging\u0026rdquo;, Matena et al\u003c/strong\u003e\nMerging can be shown as an optimization problem:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nargmax_{\\theta} \\sum_{i-1}^{M} \\lambda_{i} \\log p(\\theta \\mid \\mathcal{D}_{i})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;a merged model is the set of parameters which would maximize the log-posterior of each model \\(\\mathcal{D}_{i}\\), controlled by \\(\\lambda_{i}\\)\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eTask arthmetic\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u0026ldquo;Editing models with Task Arthmetic\u0026rdquo;, llharco et al\u003c/strong\u003e\n\u003cstrong\u003e\u0026ldquo;Resolving inference when merging models\u0026rdquo; by Yadev et al\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eYou can create multi-task models by just doing maff:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\tau_{1} = \\theta_{finetune_{1}} - \\theta_{pretrain}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\tau_{2} = \\theta_{finetune_{2}} - \\theta_{pretrain}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{finetune_{1+2}} = (\\tau_{1} + \\tau_{2}) + \\theta_{pretrain}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis apparently works ok.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eSoft MoE\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eSoft merging of experts with adaptive routing, Muqeeth et al\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eMoE, but instead of choosing an expert to activate, the router\u0026rsquo;s probability densities will result in a mixture of the experts\u0026rsquo; weights. So, mulitple experts can be invoked in a linear way.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"git-theta\"\u003eGit-Theta\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eGit-Theta: A Git Extension for Collaborative Development of Machine Learning Models, Kandpal et al\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eCommunal and iterative development of model checkpoints. Saves only LoRA\u0026rsquo;d parameters, and removes any weights that didn\u0026rsquo;t change between diffs.\u003c/p\u003e\n\u003ch2 id=\"petals\"\u003ePetals\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003ePetals: Collaborative Inference and Fine-Tuning of Large Models, Borzunov et al.\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eDistributed fine-tuning and model inference by using different sub-worker nodes to run different layers of the model.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://health.petals.dev/\"\u003ehttps://health.petals.dev/\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbuild_a_system_not_a_monolyth/","tags":null,"title":"Build a System, Not a Monolith"},{"categories":null,"contents":"BPE is a common Subword Tokenization scheme.\nTraining choose two symbols that are most frequency adjacent merge those two symbols as one symbol throughout the text repeat to step \\(1\\) until we merge \\(k\\) times v = set(corpus.characters()) for i in range(k): tl, tr = get_most_common_bigram(v) tnew = f\u0026#34;{tl}{tr}\u0026#34; v.push(tnew) corpus.replace((tl,tr), tnew) return v Most commonly, BPE is not ran alone: it usually run inside space separation systems. Hence, after each word we usually put a special _ token which delineates end of word.\nHence: \u0026ldquo;pink fluffy unicorn dancing on rainbows\u0026rdquo; becomes\np i n k _ f l u f f y _ u n i c o r n _ d a n c i n g _ o n _ r a i n b o w s Inference During inference time, we apply our stored merges in the order we learned them. As in, if we merged er first during training, we should do that first during inference before merging say n er.\nFrequent subwords often ends up being morphemes.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbpe/\"\u003eBPE\u003c/a\u003e is a common \u003ca href=\"/posts/kbhtokenization/#subword-tokenization\"\u003eSubword Tokenization\u003c/a\u003e scheme.\u003c/p\u003e\n\u003ch2 id=\"training\"\u003eTraining\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003echoose two symbols that are most frequency adjacent\u003c/li\u003e\n\u003cli\u003emerge those two symbols as one symbol throughout the text\u003c/li\u003e\n\u003cli\u003erepeat to step \\(1\\) until we merge \\(k\\) times\u003c/li\u003e\n\u003c/ol\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eset\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecorpus\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echaracters\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003etl\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eget_most_common_bigram\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003etnew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etl\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e}{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etr\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e}\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epush\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etnew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ecorpus\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etl\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etnew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eMost commonly, \u003ca href=\"/posts/kbhbpe/\"\u003eBPE\u003c/a\u003e is not ran alone: it usually run \u003cstrong\u003einside\u003c/strong\u003e space separation systems. Hence, after each word we usually put a special \u003ccode\u003e_\u003c/code\u003e token which delineates end of word.\u003c/p\u003e\n\u003cp\u003eHence: \u0026ldquo;pink fluffy unicorn dancing on rainbows\u0026rdquo; becomes\u003c/p\u003e\n\u003cpre tabindex=\"0\"\u003e\u003ccode class=\"language-nil\" data-lang=\"nil\"\u003ep i n k _ f l u f f y _ u n i c o r n _ d a n c i n g _ o n _ r a i n b o w s\n\u003c/code\u003e\u003c/pre\u003e\u003ch2 id=\"inference\"\u003eInference\u003c/h2\u003e\n\u003cp\u003eDuring inference time, we apply our stored merges \u003cstrong\u003ein the order we learned them\u003c/strong\u003e. As in, if we merged \u003ccode\u003eer\u003c/code\u003e first during training, we should do that first during inference before merging say \u003ccode\u003en er\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eFrequent subwords often ends up being \u003ca href=\"/posts/kbhmorpheme/\"\u003emorpheme\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbpe/","tags":null,"title":"Byte-Pair Encoding"},{"categories":null,"contents":"C was created around 1970s to make Unix tool writing easier. It was meant to the be the simplest, lightest, human readable code on top of assembly.\nC is procedural: you write functions and compose them, instead of defining types C++ is procedural (mostly), but you can have objects: you still write functions, and can build objects and call methods Java is actual oop We use C because its fast, highly efficient. C is popular for systems programming, OSes, networking, etc. Lets you work at a lower level to understand/manipulate the underlying systems.\nprinciples of C small, simple abstractions minimalist aesthetic efficiency and simplicity over safety and high-level abstractions C limitations no advanced features operator overloading default arguments pass by reference classes, objects abstract data types no extensive libs no networking no graphics no safety (\u0026ldquo;people who write C think they don\u0026rsquo;t make mistakes\u0026rdquo;) weak compiler no runtime checks at all ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhc/\"\u003eC\u003c/a\u003e was created around 1970s to make \u003ca href=\"/posts/kbhunix/\"\u003eUnix\u003c/a\u003e tool writing easier. It was meant to the be the simplest, lightest, human readable code on top of assembly.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eC is procedural\u003c/strong\u003e: you write functions and compose them, instead of defining types\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eC++ is procedural (mostly), but you can have objects\u003c/strong\u003e: you still write functions, and \u003cem\u003ecan\u003c/em\u003e build objects and call methods\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eJava is actual oop\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe use C because its fast, highly efficient. C is popular for systems programming, OSes, networking, etc. Lets you work at a lower level to understand/manipulate the underlying systems.\u003c/p\u003e\n\u003ch2 id=\"principles-of-c\"\u003eprinciples of C\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003esmall, simple abstractions\u003c/li\u003e\n\u003cli\u003eminimalist aesthetic\u003c/li\u003e\n\u003cli\u003eefficiency and simplicity over safety and high-level abstractions\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"c-limitations\"\u003eC limitations\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eno advanced features\n\u003cul\u003e\n\u003cli\u003eoperator overloading\u003c/li\u003e\n\u003cli\u003edefault arguments\u003c/li\u003e\n\u003cli\u003epass by reference\u003c/li\u003e\n\u003cli\u003eclasses, objects\u003c/li\u003e\n\u003cli\u003eabstract data types\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eno extensive libs\n\u003cul\u003e\n\u003cli\u003eno networking\u003c/li\u003e\n\u003cli\u003eno graphics\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eno safety (\u0026ldquo;people who write C think they don\u0026rsquo;t make mistakes\u0026rdquo;)\n\u003cul\u003e\n\u003cli\u003eweak compiler\u003c/li\u003e\n\u003cli\u003eno runtime checks at all\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhc/","tags":null,"title":"C"},{"categories":null,"contents":"All caches, uses similar memory addressing scheme (i.e. each address can map to one of each device); each \u0026ldquo;line\u0026rdquo; of cache is usually 64 bytes. This means that each time you grab something from memory, the 64 bytes surrounding that area of memory will be cached. You are most likely to do this because of iterating through an array.\nL1-D cache: on the CPU, used for staging for registers L1-I cache: on the CPU, used for staging for assembly instructions L2 cache: L1 staging L3 cache: L2 staging Main memory: memory cache locality temporal locality \u0026ldquo;Things I have used recently, I\u0026rsquo;m likely to use again soon\u0026rdquo;\nspacial locality \u0026ldquo;Things next to what I got I\u0026rsquo;m likely going to use soon\u0026rdquo;\n","html":"\u003cp\u003eAll caches, uses similar memory addressing scheme (i.e. each address can map to one of each device); each \u0026ldquo;line\u0026rdquo; of cache is usually 64 bytes. This means that each time you grab something from memory, the 64 bytes surrounding that area of memory will be cached. You are most likely to do this because of iterating through an array.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eL1-D cache: on the CPU, used for staging for registers\u003c/li\u003e\n\u003cli\u003eL1-I cache: on the CPU, used for staging for assembly instructions\u003c/li\u003e\n\u003cli\u003eL2 cache: L1 staging\u003c/li\u003e\n\u003cli\u003eL3 cache: L2 staging\u003c/li\u003e\n\u003cli\u003eMain memory: memory\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"cache-locality\"\u003ecache locality\u003c/h2\u003e\n\u003ch3 id=\"temporal-locality\"\u003etemporal locality\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;Things I have used recently, I\u0026rsquo;m likely to use again soon\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"spacial-locality\"\u003espacial locality\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;Things next to what I got I\u0026rsquo;m likely going to use soon\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcaching/","tags":null,"title":"caching"},{"categories":null,"contents":"cal.com is an automating calendar service funded by the VC firm 776.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcal_com/\"\u003ecal.com\u003c/a\u003e is an automating calendar service funded by the VC firm \u003ca href=\"/posts/kbh776/\"\u003e776\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcal_com/","tags":null,"title":"cal.com"},{"categories":null,"contents":" ","html":"\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-12-08_12-59-06_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-12-08_12-59-13_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhcalculating_shear_s_modulus/","tags":null,"title":"calculating shear's modulus"},{"categories":null,"contents":"Contraindicated offline POMDP solver.\nContrained belief state MDP Linear Programming belief set generation Approximate POMDP with Contrainst CPOMDPs are Hard Can\u0026rsquo;t do DP with pruning: optimal policies may be stochastic Minimax quadratically contained program: computational intractable Contained PBVI struggles with contraint satisfaction CALP Core Idea Recast CPOMDP as a contrained belief-state MDP.\nWe replace our state-space with our belief space:\n\\(S = B\\) \\(s_0 = b_0\\) You essentially assume here that there is some finite belief space.\n","html":"\u003cp\u003e\u003cstrong\u003eContraindicated\u003c/strong\u003e offline POMDP solver.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eContrained belief state MDP\u003c/li\u003e\n\u003cli\u003eLinear Programming\u003c/li\u003e\n\u003cli\u003ebelief set generation\u003c/li\u003e\n\u003cli\u003eApproximate POMDP with Contrainst\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"cpomdps-are-hard\"\u003eCPOMDPs are Hard\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eCan\u0026rsquo;t do DP with pruning: optimal policies may be stochastic\u003c/li\u003e\n\u003cli\u003eMinimax quadratically contained program: computational intractable\u003c/li\u003e\n\u003cli\u003eContained \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e struggles with contraint satisfaction\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"calp-core-idea\"\u003eCALP Core Idea\u003c/h2\u003e\n\u003cp\u003eRecast \u003ca href=\"/posts/kbhcpomdp/\"\u003eCPOMDP\u003c/a\u003e as a contrained \u003ca href=\"/posts/kbhbelief_state_mdp/\"\u003ebelief-state MDP\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eWe replace our state-space with our belief space:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(S = B\\)\u003c/li\u003e\n\u003cli\u003e\\(s_0 = b_0\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eYou essentially assume here that there is some finite belief space.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcalp/","tags":null,"title":"CALP"},{"categories":null,"contents":"Calpains something something something AFIB\n","html":"\u003cp\u003eCalpains something something something \u003ca href=\"/posts/kbhafib/\"\u003eAFIB\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcalpains_afib/","tags":null,"title":"Calpains … AFIB"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcanciones/","tags":null,"title":"Canciones"},{"categories":null,"contents":"duplicate article creation. see Cantilever Beams\n","html":"\u003cp\u003eduplicate article creation. see \u003ca href=\"/posts/kbhcantilever_beams/\"\u003eCantilever Beams\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcantilever_beam/","tags":null,"title":"cantilever beam"},{"categories":null,"contents":"A Cantilever beam is a rigid structure which is extended horizontally and supported on one end.\nWorking with Cantilever Beams curvature Let\u0026rsquo;s first define a function:\n\\begin{equation} w(x) \\end{equation}\nthis represents the deflection of the beam at point \\(x\\). We will begin by taking its derivative by location:\n\\begin{equation} \\Delta w = \\pdv{w}{x} \\end{equation}\nis the change in deflection over location. \u0026ldquo;How much deviation of the beam from the resting axi is there as you run along it?\u0026rdquo;\nWe now take another derivative:\n\\begin{equation} k = \\pdv[2]{w}{x} \\end{equation}\n\\(k\\) is defined as the \u0026ldquo;Curvature\u0026rdquo; of the beam: the \u0026ldquo;change in the change of bentness.\u0026rdquo; The intuition is essentially this:\na straight, flat beam fixed an one end has \\(\\Delta w=0\\), \\(k=0\\). It does not change from its resting axis, and its rate of change from resting does not change a straight, slanted beam fixed at one end has \\(\\Delta w=C, k=0\\). It changes from its resting axis with a linear rate, and its rate of change from resting does not change. a curved, slanted beam fixed at one end has \\(\\Delta \\omega = f(x), k=C\\). It changes from its resting axis non-linearly (hence curving at a function of \\(x\\)), and its rate of change from resting is changing at a constant \\(c\\). flexural rigidity Flexural Rigidity is the \u0026ldquo;force couple\u0026rdquo; (\u0026ldquo;rate\u0026rdquo;) which relates the Curvature of an non-rigid body and how much torque it actually generates given the object\u0026rsquo;s properties.\nRecall first our Elastic Modulus \\(E\\): it is a fraction of \\(\\frac{stress}{strain}\\) measured in Pascals (force per unit area, i.e. \\(\\frac{N}{m^{2}} = \\frac{kg}{ms^{2}}\\)).\nFind also second moment of area \\(I\\): a value in units \\(m^{4}\\) which is the sum (by area) of the squared displacement of each infinitesimal area to the axis of origin.\nAnd we bam! we multiply the two things together, creating a value \\(EI\\) in units \\(Nm^{2}\\).\nbending moment bending moment is the torque from bending. It is expressed usually in \\(M\\). As mentioned in the section about Flexural Rigidity, we can use that value to relate \\(M\\) with the actual Curvature of your object.\nSpecifically, that:\n\\begin{equation} M = -(EI)k = -EI\\pdv[2]{w}{x} \\end{equation}\n\u0026ldquo;bending moment is flexural rigidity times curvature\u0026rdquo; =\u0026gt; \u0026ldquo;[how much force per distance you exert] is the result of [how bendy your thing is] times [how much you bent it].\u0026rdquo;\nThere is a negative in front because if you pull out your lovely little right hand, point your thumb forward (+y), start curling your nice fingers around your nice hand (-z), you will notice that you are wrapping them downwards (the - part of the z) which is rather not positive. If we want \\(\\pdv[2]{w}{x}\\) to be positive (bend up), we will need to chuck a negative in front of it to make both things positive.\nThis relation, while intuitive, is not from first-principles. In order to get such a derivation, you read Wikipedia.\nmagic We can take two derivatives by location\u0026mdash;\n\\begin{equation} \\pdv[2] x \\qty(EI \\pdv[2]{w}{x}) = -\\mu \\pdv{w}{t}+q(x) \\end{equation}\nwhere \\(\\mu\\) is the mass density, \\(q(x)\\) is the force applied (in Newtons) by area. this is magic. Will come back to it.\nSolving this? See Finite Difference Method\nActually attempting to solve it Numerical Cantileaver Simulations\nWorking on Deformation ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhcantilever_beams/\"\u003eCantilever\u003c/a\u003e beam is a rigid structure which is extended horizontally and supported on one end.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"working-with-cantilever-beams--kbhcantilever-beams-dot-md\"\u003eWorking with \u003ca href=\"/posts/kbhcantilever_beams/\"\u003eCantilever Beams\u003c/a\u003e\u003c/h2\u003e\n\u003ch3 id=\"curvature\"\u003ecurvature\u003c/h3\u003e\n\u003cp\u003eLet\u0026rsquo;s first define a function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis represents the deflection of the beam at point \\(x\\). We will begin by taking its derivative by location:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Delta w = \\pdv{w}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis the change in deflection over location. \u0026ldquo;How much deviation of the beam from the resting axi is there as you run along it?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe now take another derivative:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nk = \\pdv[2]{w}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(k\\) is defined as the \u0026ldquo;\u003ca href=\"#curvature\"\u003eCurvature\u003c/a\u003e\u0026rdquo; of the beam: the \u0026ldquo;change in the change of bentness.\u0026rdquo; The intuition is essentially this:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ea straight, flat beam fixed an one end has \\(\\Delta w=0\\), \\(k=0\\). It does \u003cstrong\u003enot change\u003c/strong\u003e from its resting axis, and its rate of change from resting does \u003cstrong\u003e\u003cstrong\u003enot change\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003ea straight, slanted beam fixed at one end has \\(\\Delta w=C, k=0\\). It \u003cstrong\u003e\u003cstrong\u003echanges\u003c/strong\u003e\u003c/strong\u003e from its resting axis with a linear rate, and its rate of change from resting does \u003cstrong\u003e\u003cstrong\u003enot change\u003c/strong\u003e\u003c/strong\u003e.\u003c/li\u003e\n\u003cli\u003ea \u003cem\u003ecurved\u003c/em\u003e, slanted beam fixed at one end has \\(\\Delta \\omega = f(x), k=C\\). It \u003cstrong\u003e\u003cstrong\u003echanges\u003c/strong\u003e\u003c/strong\u003e from its resting axis non-linearly (hence curving at a function of \\(x\\)), and its rate of change from resting is \u003cstrong\u003echanging\u003c/strong\u003e at a constant \\(c\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"flexural-rigidity\"\u003eflexural rigidity\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#flexural-rigidity\"\u003eFlexural Rigidity\u003c/a\u003e is the \u0026ldquo;force couple\u0026rdquo; (\u0026ldquo;rate\u0026rdquo;) which relates the \u003ca href=\"#curvature\"\u003eCurvature\u003c/a\u003e of an non-rigid body and how much torque it actually generates given the object\u0026rsquo;s properties.\u003c/p\u003e\n\u003cp\u003eRecall first our \u003ca href=\"/posts/kbhelastic_modulus/\"\u003eElastic Modulus\u003c/a\u003e \\(E\\): it is a fraction of \\(\\frac{stress}{strain}\\) measured in Pascals (force per unit area, i.e. \\(\\frac{N}{m^{2}} = \\frac{kg}{ms^{2}}\\)).\u003c/p\u003e\n\u003cp\u003eFind also \u003ca href=\"/posts/kbhsecond_moment_of_area/\"\u003esecond moment of area\u003c/a\u003e \\(I\\): a value in units \\(m^{4}\\) which is the sum (by area) of the squared displacement of each infinitesimal area to the axis of origin.\u003c/p\u003e\n\u003cp\u003eAnd we bam! we multiply the two things together, creating a value \\(EI\\) in units \\(Nm^{2}\\).\u003c/p\u003e\n\u003ch3 id=\"bending-moment\"\u003ebending moment\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#bending-moment\"\u003ebending moment\u003c/a\u003e is the \u003ca href=\"#bending-moment\"\u003etorque from bending\u003c/a\u003e. It is expressed usually in \\(M\\). As mentioned in the section about \u003ca href=\"#flexural-rigidity\"\u003eFlexural Rigidity\u003c/a\u003e, we can use that value to relate \\(M\\) with the actual \u003ca href=\"#curvature\"\u003eCurvature\u003c/a\u003e of your object.\u003c/p\u003e\n\u003cp\u003eSpecifically, that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nM = -(EI)k = -EI\\pdv[2]{w}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;\u003ca href=\"#bending-moment\"\u003ebending moment\u003c/a\u003e is \u003ca href=\"#flexural-rigidity\"\u003eflexural rigidity\u003c/a\u003e times \u003ca href=\"#curvature\"\u003ecurvature\u003c/a\u003e\u0026rdquo; =\u0026gt; \u0026ldquo;[how much force per distance you exert] is the result of [how bendy your thing is] times [how much you bent it].\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eThere is a negative in front because if you pull out your lovely little right hand, point your thumb forward (+y), start curling your nice fingers around your nice hand (-z), you will notice that you are wrapping them downwards (the - part of the z) which is rather not positive. If we want \\(\\pdv[2]{w}{x}\\) to be positive (bend up), we will need to chuck a negative in front of it to make both things positive.\u003c/p\u003e\n\u003cp\u003eThis relation, while intuitive, is not from first-principles. In order to get such a derivation, \u003ca href=\"https://en.wikipedia.org/wiki/Euler%E2%80%93Bernoulli_beam_theory#Derivation_of_the_bending_equation\"\u003eyou read Wikipedia\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"magic\"\u003emagic\u003c/h3\u003e\n\u003cp\u003eWe can take two derivatives by location\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2] x \\qty(EI \\pdv[2]{w}{x}) = -\\mu \\pdv{w}{t}+q(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\mu\\) is the mass density, \\(q(x)\\) is the force applied (in Newtons) by area. this is magic. Will come back to it.\u003c/p\u003e\n\u003ch2 id=\"solving-this\"\u003eSolving this?\u003c/h2\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhfinite_difference_method/\"\u003eFinite Difference Method\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"actually-attempting-to-solve-it\"\u003eActually attempting to solve it\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhnumerical_cantileaver_simulations/\"\u003eNumerical Cantileaver Simulations\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"working-on-deformation\"\u003eWorking on Deformation\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcantilever_beams/","tags":null,"title":"Cantilever Beams"},{"categories":null,"contents":"The capacitance is the amount of change something can hold; this scales based on how much electric potential is being applied.\nParallel plates \\begin{equation} C = \\frac{\\epsilon_{0} A}{d} \\end{equation}\nwhere, \\(e_0\\) is the permittivity of free space, \\(A\\) the area of the plates, and \\(d\\) their distance.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhcapacitance/\"\u003ecapacitance\u003c/a\u003e is the amount of change something can hold; this scales based on how much \u003ca href=\"/posts/kbhelectric_potential_energy/\"\u003eelectric potential\u003c/a\u003e is being applied.\u003c/p\u003e\n\u003ch2 id=\"parallel-plates\"\u003eParallel plates\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nC = \\frac{\\epsilon_{0} A}{d}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(e_0\\) is the \u003ca href=\"/posts/kbhpermittivity_of_free_space/\"\u003epermittivity of free space\u003c/a\u003e, \\(A\\) the area of the plates, and \\(d\\) their distance.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcapacitance/","tags":null,"title":"capacitance"},{"categories":null,"contents":"A capacitor changes, then resists being charged further. Their rules work opposite to resistors.\ncapacitor in series \\begin{equation} \\frac{1}{C_{eq}} = \\frac{1}{C_1} + \\frac{1}{C_2} + \\frac{1}{C_3} \\end{equation}\nand yet,\ncapacitor in parallel \\begin{equation} C_{eq} = C_1 + C_2 + C_3 \\end{equation}\nenergy stored by a capacitor \\begin{equation} E = \\frac{1}{2} CV^{2} \\end{equation}\nwhere, \\(E\\) is the energy stored, \\(C\\) the capacitance, and \\(V\\) the voltage across the capacitor.\nWhich, subbing the formula below:\n\\begin{equation} U = \\frac{1}{2} \\frac{Q^{2}}{C} \\end{equation}\nvoltage across and max charge stored on a capacitor \\begin{equation} C = \\frac{Q}{V} \\end{equation}\nwhere, \\(Q\\) is the change and \\(V\\) the voltage\n\u0026ldquo;the more change the capacitor can store given a voltage, the higher the capacitance.\u0026rdquo;\n\\begin{equation} Q = CV \\end{equation}\n","html":"\u003cp\u003eA capacitor changes, then resists being charged further. Their rules work opposite to \u003ca href=\"/posts/kbhresistors/\"\u003eresistor\u003c/a\u003es.\u003c/p\u003e\n\u003ch2 id=\"capacitor-in-series\"\u003ecapacitor in series\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{1}{C_{eq}} = \\frac{1}{C_1} + \\frac{1}{C_2} + \\frac{1}{C_3}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand yet,\u003c/p\u003e\n\u003ch2 id=\"capacitor-in-parallel\"\u003ecapacitor in parallel\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nC_{eq} = C_1 + C_2 + C_3\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"energy-stored-by-a-capacitor\"\u003eenergy stored by a capacitor\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nE = \\frac{1}{2} CV^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(E\\) is the energy stored, \\(C\\) the capacitance, and \\(V\\) the \u003ca href=\"/posts/kbhvoltage/\"\u003evoltage\u003c/a\u003e across the capacitor.\u003c/p\u003e\n\u003cp\u003eWhich, subbing the formula below:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU = \\frac{1}{2} \\frac{Q^{2}}{C}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"voltage-across-and-max-charge-stored-on-a-capacitor\"\u003evoltage across and max charge stored on a capacitor\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nC = \\frac{Q}{V}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(Q\\) is the change and \\(V\\) the voltage\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the more change the capacitor can store given a voltage, the higher the capacitance.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ = CV\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcapacitor/","tags":null,"title":"Capacitor"},{"categories":null,"contents":"A cancer drug to synthesize Fluoropyrimidine.\n","html":"\u003cp\u003eA cancer drug to synthesize \u003ca href=\"\"\u003eFluoropyrimidine\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcapecitabmine/","tags":null,"title":"Capecitabmine"},{"categories":null,"contents":"CAPM is a method of portfolio selection analysis which focuses on maximizing return given some fixed variance.\nIt deals with optimal Capital Market Line, given here:\n\\begin{equation} E[R_{p}] = r_{f}+\\frac{\\sigma_{p}}{\\sigma_{T}}\\qty(E[R_{T}]-r_{f}) \\end{equation}\nWhich describes \\(E[R_{p}]\\), the expected return of an optimal portfolio in a market, given, \\(R_{T}\\) is the market return, \\(r_{f}\\) is the risk-free rate, \\(\\sigma_{p}\\) is the portfolio returns, and \\(\\sigma_{t}\\) is standard-deviation of the market returns.\nSharpe Ratio The Sharpe Ratio is a measure of the risk-adjusted performance of an asset\u0026mdash;given the rate of return of some risk-free asset.\nIt is defined as:\n\\begin{equation} S_{a} = \\frac{E[R_{a}-R_{b}]}{\\sigma_{a}} \\end{equation}\nwhere, \\(R_{a}\\) is the raw return of the asset, \\(R_{b}\\) is the risk-free rate of return, and \\(\\sigma_{a}\\) is the standard deviation of the asset \u0026ldquo;excess\u0026rdquo; return (i.e. standard deviation actual return - expected return\u0026mdash;how much extra there is).\nMinimum-Variance Boundary For a given a weighted-average portfolio of stocks their waited averages, and correlations between the stocks you can draw this curvy curve. Let pink dots represent the two securities in your portfolio, and various curves highlighting possible linear combinations thereof\u0026mdash;\nLet\u0026rsquo;s observe the boundary conditions of this curve.\nIf the two stocks are exactly negatively correlated, then the more risk you take the more return you have for one while less return you have for the other (hence, two straight divergent lines.)\nIf you have an exactly correlated portfolio, the two assets will will form a line.\nThe Effecient Frontier is the top half of this curve (i.e. higher risk/higher return is not a fun place to be, so that\u0026rsquo;s an inefficient frontier.)\nCapital Market Line The Capital Market Line is a line that uses the Sharpe Ratio of a market as a whole (how the market is performing against the risk-free rate) to analyze the performance of portfolio. It plots the performance of an \u0026ldquo;optimal portfolio\u0026rdquo; in a given market.\nLet\u0026rsquo;s construct first the Sharpe Ratio of a hypothetical market:\n\\begin{equation} \\frac{R_{t}-r_{f}}{\\sigma_{t}} \\end{equation}\nwhere \\(R_{T}\\) is the market return, \\(r_{f}\\) is the risk-free rate, and \\(\\sigma_{t}\\) is standard-deviation of the market returns.\nWe will multiply this value by the standard-deviation of your portfolio to calculate what the market claims should be your expected return. Then, we shift the line by the risk-free rate (as you are expected also to get that rate back in your return.\nSo an \u0026ldquo;effecient\u0026rdquo; portfolio (getting the max expected return per unit risk as measured by the market Sharpe Ratio) should behave like:\n\\begin{equation} E[R_{p}] = r_{f}+\\frac{E[R_{T}]-r_{f}}{\\sigma_{T}}\\sigma_{p} \\end{equation}\nagain, \\(R_{T}\\) is the market return, \\(r_{f}\\) is the risk-free rate, and \\(\\sigma_{t}\\) is standard-deviation of the market returns.\nThe one liner is: \u0026ldquo;the return of your portfolio should be the base return by risk-free rate, plus how much excess risk you are taking on (and therefore return you should be getting back by the Sharpe Ratio)\u0026rdquo;\n(how much you are expected to get (i.e. market Sharpe Ratio times your portfolio volatility), shifted back up by the risk-free rate.\nSharpe-Lintner CAPM A linear formulation of CAPM base on market-excess return (i.e. if you want to beat the market, you will have to sustain proportionally the same amount of risk.)\nTangency Portfolio There is a portfolio, which is named the Tangency Portfolio. This portfolio is the tangent point between the Capital Market Line and the Effecient Frontier.\nIt represents the point where you can get the highest return given some risk, but also control the risk at the market\u0026rsquo;s Sharpe Ratio.\nBlack\u0026rsquo;s CAPM CAPM depends on a risk-free asset. Black of Black-Scholes Formula fame derived another formulation of CAPM which doesn\u0026rsquo;t dependent on a risk-free asset.\nZero-Beta Portfolio To work with Black\u0026rsquo;s CAPM, we first define \\(0m\\), the Zero-Beta Portfolio (used in the formula as \\(R_{0m}\\), the return of the Zero-Beta Portfolio).\nIt is defined to be the portfolio with the minimum variance of all portfolios not correlated with \\(m\\).\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcapm/\"\u003eCAPM\u003c/a\u003e is a method of portfolio selection analysis which focuses on \u003cem\u003emaximizing\u003c/em\u003e \u003ca href=\"/posts/kbhrandom_walk/#return--finmetrics\"\u003ereturn\u003c/a\u003e given some fixed variance.\u003c/p\u003e\n\u003cp\u003eIt deals with optimal \u003ca href=\"#capital-market-line\"\u003eCapital Market Line\u003c/a\u003e, given here:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE[R_{p}] = r_{f}+\\frac{\\sigma_{p}}{\\sigma_{T}}\\qty(E[R_{T}]-r_{f})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich describes \\(E[R_{p}]\\), the expected return of an optimal portfolio in a market, given, \\(R_{T}\\) is the market return, \\(r_{f}\\) is the risk-free rate, \\(\\sigma_{p}\\) is the portfolio returns, and \\(\\sigma_{t}\\) is standard-deviation of the market returns.\u003c/p\u003e\n\u003ch2 id=\"sharpe-ratio\"\u003eSharpe Ratio\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#sharpe-ratio\"\u003eSharpe Ratio\u003c/a\u003e is a measure of the risk-adjusted performance of an asset\u0026mdash;given the rate of return of some risk-free asset.\u003c/p\u003e\n\u003cp\u003eIt is defined as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS_{a} = \\frac{E[R_{a}-R_{b}]}{\\sigma_{a}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(R_{a}\\) is the raw \u003ca href=\"/posts/kbhrandom_walk/#return--finmetrics\"\u003ereturn\u003c/a\u003e of the asset, \\(R_{b}\\) is the risk-free rate of \u003ca href=\"/posts/kbhrandom_walk/#return--finmetrics\"\u003ereturn\u003c/a\u003e, and \\(\\sigma_{a}\\) is the standard deviation of the asset \u0026ldquo;excess\u0026rdquo; return (i.e. standard deviation actual return - expected return\u0026mdash;how much extra there is).\u003c/p\u003e\n\u003ch2 id=\"minimum-variance-boundary\"\u003eMinimum-Variance Boundary\u003c/h2\u003e\n\u003cp\u003eFor a given a weighted-average portfolio of stocks their waited averages, and correlations between the stocks you can draw this curvy curve. Let pink dots represent the two securities in your portfolio, and various curves highlighting possible linear combinations thereof\u0026mdash;\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-29_16-04-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eLet\u0026rsquo;s observe the boundary conditions of this curve.\u003c/p\u003e\n\u003cp\u003eIf the two stocks are exactly negatively correlated, then the more risk you take the more return you have for one while less return you have for the other (hence, two straight divergent lines.)\u003c/p\u003e\n\u003cp\u003eIf you have an exactly correlated portfolio, the two assets will will form a line.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"#minimum-variance-boundary\"\u003eEffecient Frontier\u003c/a\u003e is the top half of this curve (i.e. higher risk/higher return is not a fun place to be, so that\u0026rsquo;s an inefficient frontier.)\u003c/p\u003e\n\u003ch2 id=\"capital-market-line\"\u003eCapital Market Line\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#capital-market-line\"\u003eCapital Market Line\u003c/a\u003e is a line that uses the \u003ca href=\"#sharpe-ratio\"\u003eSharpe Ratio\u003c/a\u003e of a \u003cstrong\u003emarket\u003c/strong\u003e as a whole (how the market is performing against the risk-free rate) to analyze the performance of portfolio. It plots the performance of an \u0026ldquo;optimal portfolio\u0026rdquo; in a given market.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s construct first the \u003ca href=\"#sharpe-ratio\"\u003eSharpe Ratio\u003c/a\u003e of a hypothetical market:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{R_{t}-r_{f}}{\\sigma_{t}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(R_{T}\\) is the market return, \\(r_{f}\\) is the risk-free rate, and \\(\\sigma_{t}\\) is standard-deviation of the market returns.\u003c/p\u003e\n\u003cp\u003eWe will multiply this value by the standard-deviation of your portfolio to calculate what the market claims should be your expected return. Then, we shift the line by the risk-free rate (as you are expected also to get that rate back in your return.\u003c/p\u003e\n\u003cp\u003eSo an \u0026ldquo;effecient\u0026rdquo; portfolio (getting the max expected return per unit risk as measured by the market \u003ca href=\"#sharpe-ratio\"\u003eSharpe Ratio\u003c/a\u003e) should behave like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE[R_{p}] = r_{f}+\\frac{E[R_{T}]-r_{f}}{\\sigma_{T}}\\sigma_{p}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eagain, \\(R_{T}\\) is the market return, \\(r_{f}\\) is the risk-free rate, and \\(\\sigma_{t}\\) is standard-deviation of the market returns.\u003c/p\u003e\n\u003cp\u003eThe one liner is: \u0026ldquo;the return of your portfolio should be the base return by risk-free rate, plus how much excess risk you are taking on (and therefore \u003ca href=\"/posts/kbhrandom_walk/#return--finmetrics\"\u003ereturn\u003c/a\u003e you should be getting back by the \u003ca href=\"#sharpe-ratio\"\u003eSharpe Ratio\u003c/a\u003e)\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e(how much you are expected to get (i.e. market \u003ca href=\"#sharpe-ratio\"\u003eSharpe Ratio\u003c/a\u003e times your portfolio volatility), shifted back up by the risk-free rate.\u003c/p\u003e\n\u003ch2 id=\"sharpe-lintner-capm\"\u003eSharpe-Lintner CAPM\u003c/h2\u003e\n\u003cp\u003eA linear formulation of CAPM base on market-excess return (i.e. if you want to beat the market, you will have to sustain proportionally the same amount of risk.)\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-29_18-44-41_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-29_18-45-03_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"tangency-portfolio\"\u003eTangency Portfolio\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-27_10-35-03_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThere is a portfolio, which is named the \u003ca href=\"#tangency-portfolio\"\u003eTangency Portfolio\u003c/a\u003e. This portfolio is the tangent point between the \u003ca href=\"#capital-market-line\"\u003eCapital Market Line\u003c/a\u003e and the \u003ca href=\"#minimum-variance-boundary\"\u003eEffecient Frontier\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIt represents the point where you can get the highest return given some risk, but also control the risk at the market\u0026rsquo;s \u003ca href=\"#sharpe-ratio\"\u003eSharpe Ratio\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"black-s-capm\"\u003eBlack\u0026rsquo;s CAPM\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcapm/\"\u003eCAPM\u003c/a\u003e depends on a risk-free asset. Black of \u003ca href=\"/posts/kbhblack_scholes_formula/\"\u003eBlack-Scholes Formula\u003c/a\u003e fame derived another formulation of CAPM which doesn\u0026rsquo;t dependent on a risk-free asset.\u003c/p\u003e\n\u003ch3 id=\"zero-beta-portfolio\"\u003eZero-Beta Portfolio\u003c/h3\u003e\n\u003cp\u003eTo work with \u003ca href=\"#black-s-capm\"\u003eBlack\u0026rsquo;s CAPM\u003c/a\u003e, we first define \\(0m\\), the \u003ca href=\"#zero-beta-portfolio\"\u003eZero-Beta Portfolio\u003c/a\u003e (used in the formula as \\(R_{0m}\\), the return of the \u003ca href=\"#zero-beta-portfolio\"\u003eZero-Beta Portfolio\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eIt is defined to be the portfolio with the minimum variance of all portfolios not correlated with \\(m\\).\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-29_16-52-14_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhcapm/","tags":null,"title":"Capital-Asset Pricing Model"},{"categories":null,"contents":"Pitfalls The bytes remains the same despite copying, so you can get too funky:\nint v = -12345; unsigned int uv = v; printf(\u0026#34;v = %d, uv = %d\\n\u0026#34;, v, uv); This prints \u0026ldquo;v = -12345, uv=4294954951\u0026rdquo;. As in: when you copy rvalues, the bit pattern gets copied and not the numerical number itself; so, it will overflow.\nYou can use U to force an signed quantity to be unsigned:\nunsigned int uv = -12345U; sign promotion If you have the nerve of putting a comparing things of different types (don\u0026rsquo;t), then, the signed quantities gets promoted to be unsigned.\nThat is, we get that:\n-1 \u0026lt; 0U is false because the -1 is promoted to an unsigned integer 2\u0026hellip;.7 \u0026gt; -2\u0026hellip;.7 is true because nothing is converted type size promotion If you have the nerve of putting a comparing things of different types (don\u0026rsquo;t), then, the smaller types get promoted to being a bigger types.\ncasting from small unsigned value to larger unsigned value just requires us prepending a buncha zeros as needed casting from a small signed value to larger signed value requires us repeating the left most value to fill out the rest of the variable (-1 = 11111, so bigger -1 = 11111 (repeated) 111) lasting from a large value to a smaller value will cause truncation type size truncation Take, for instance:\nint x = 53191; short sx = x; int y = sx; The short is 2 byte, which means that 2 of the left bytes of the int got dropped.\n","html":"\u003ch2 id=\"pitfalls\"\u003ePitfalls\u003c/h2\u003e\n\u003cp\u003eThe bytes remains the same despite copying, so you can get too funky:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e12345\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eunsigned\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003euv\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eprintf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;v = %d, uv = %d\u003c/span\u003e\u003cspan style=\"color:#8045ff\"\u003e\\n\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003euv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThis prints \u0026ldquo;v = -12345, uv=4294954951\u0026rdquo;. As in: \u003cstrong\u003ewhen you copy rvalues, the \u003ca href=\"/posts/kbhbinary_number_system/#bit\"\u003ebit\u003c/a\u003e pattern gets copied and not the numerical number itself\u003c/strong\u003e; so, it will overflow.\u003c/p\u003e\n\u003cp\u003eYou can use U to force an signed quantity to be unsigned:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eunsigned\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003euv\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e12345U\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"sign-promotion\"\u003esign promotion\u003c/h2\u003e\n\u003cp\u003eIf you have the nerve of putting a comparing things of different types (don\u0026rsquo;t), then, the \u003cstrong\u003esigned quantities gets promoted to be unsigned\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eThat is, we get that:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e-1 \u0026lt; 0U is \u003cstrong\u003efalse\u003c/strong\u003e because the -1 is promoted to an \u003ca href=\"/posts/kbhbinary_number_system/#unsigned-integers\"\u003eunsigned integer\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e2\u0026hellip;.7 \u0026gt; -2\u0026hellip;.7 is \u003cstrong\u003etrue\u003c/strong\u003e because nothing is converted\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"type-size-promotion\"\u003etype size promotion\u003c/h2\u003e\n\u003cp\u003eIf you have the nerve of putting a comparing things of different types (don\u0026rsquo;t), then, the \u003cstrong\u003esmaller types get promoted to being a bigger types\u003c/strong\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ecasting from small unsigned value to larger unsigned value just requires us prepending a buncha zeros as needed\u003c/li\u003e\n\u003cli\u003ecasting from a small signed value to larger signed value requires us repeating the left most value to fill out the rest of the variable (-1 = 11111, so bigger -1 = 11111 (repeated) 111)\u003c/li\u003e\n\u003cli\u003elasting from a large value to a smaller value will cause \u003cstrong\u003etruncation\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"type-size-truncation\"\u003etype size truncation\u003c/h2\u003e\n\u003cp\u003eTake, for instance:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-c\" data-lang=\"c\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e53191\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eshort\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esx\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esx\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThe short is 2 \u003ca href=\"/posts/kbhbinary_number_system/#byte\"\u003ebyte\u003c/a\u003e, which means that 2 of the left \u003ca href=\"/posts/kbhbinary_number_system/#byte\"\u003ebyte\u003c/a\u003es of the int got dropped.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcasting/","tags":null,"title":"casting"},{"categories":null,"contents":"categorical grammar is a grammar in the language of categories.\nconstituents \\(A\\), a set of \u0026ldquo;expressions\u0026rdquo; \\(C\\), a set of categories of \u0026ldquo;syntax\u0026rdquo; \\(\\varphi: A \\to Pow( C)\\), assigning each \\(a \\in A\\) to a set of categories \\(c \\subset C\\) \\(G\\): a family of sets of n-place operations where \\(n=1, 2, \\ldots\\) (what does a \u0026ldquo;3-place\u0026rdquo; op mean? idk) \\(R\\): a set of rules encoded as tuples: \\((f; \\{c_1, \\dots c_{k}\\}; c_{k+1})\\), where \\(f\\) is a \\(k\\) place operation, and \\(c_{j} \\in C\\) requirements The operations of this grammar behaves like so:\ngiven a rule \\(r \\in R\\), it tells you that given WLOG an expression in \\(c_{1}, c_2, \\ldots c_{k} \\in C\\) (i.e. they were mapped to that set \\(\\varphi\\)), \\(f\\) will map that set of expressions into the same new category \\(c_{k+1}\\).\nadditional information a basic categorical grammar one implementation of a basic categorical grammar is as follows:\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcategorical_grammar/\"\u003ecategorical grammar\u003c/a\u003e is a \u003ca href=\"/posts/kbhgrammar/\"\u003egrammar\u003c/a\u003e in the language of \u003ca href=\"/posts/kbhcategory/\"\u003ecategories\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(A\\), a \u003ca href=\"/posts/kbhset/\"\u003eset\u003c/a\u003e of \u0026ldquo;expressions\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\\(C\\), a set of \u003ca href=\"/posts/kbhcategory/\"\u003ecategories\u003c/a\u003e of \u0026ldquo;syntax\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\\(\\varphi: A \\to Pow( C)\\), assigning each \\(a \\in A\\) to a \u003cstrong\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhset/\"\u003eset\u003c/a\u003e\u003c/strong\u003e\u003c/strong\u003e of categories \\(c \\subset C\\)\u003c/li\u003e\n\u003cli\u003e\\(G\\): a family of sets of n-place operations where \\(n=1, 2, \\ldots\\) (what does a \u0026ldquo;3-place\u0026rdquo; op mean? idk)\u003c/li\u003e\n\u003cli\u003e\\(R\\): a set of rules encoded as tuples: \\((f; \\{c_1, \\dots c_{k}\\}; c_{k+1})\\), where \\(f\\) is a \\(k\\) place operation, and \\(c_{j} \\in C\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eThe operations of this grammar behaves like so:\u003c/p\u003e\n\u003cp\u003egiven a rule \\(r \\in R\\), it tells you that given WLOG an expression in \\(c_{1}, c_2, \\ldots c_{k} \\in C\\) (i.e. they were mapped to that set \\(\\varphi\\)), \\(f\\) will map that set of expressions into the same new category \\(c_{k+1}\\).\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"a-basic-categorical-grammar\"\u003ea basic categorical grammar\u003c/h3\u003e\n\u003cp\u003eone implementation of a basic \u003ca href=\"/posts/kbhcategorical_grammar/\"\u003ecategorical grammar\u003c/a\u003e is as follows:\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcategorical_grammar/","tags":null,"title":"categorical grammar"},{"categories":null,"contents":" categorical grammar ","html":"\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcategorical_grammar/\"\u003ecategorical grammar\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcategorical_grammars_index/","tags":null,"title":"Categorical Grammars Index"},{"categories":null,"contents":"A category is an abstract collection of objects\nconstituents collection of objects, where if \\(X\\) is an object of \\(C\\) we write \\(X \\in C\\) for a pair of objects \\(X, Y \\in C\\), a set of morphisms acting upon the objects which we call the homset additional information requirements there exists the identity morphism; that is, \\(\\forall X \\in C, \\exists I_{X}: X\\to X\\) morphisms are always composable: given \\(f: X\\to Y\\), and \\(g: Y\\to Z\\), exists \\(gf: X \\to Z\\) the identity morphism can compose in either direction: given \\(f: X \\to Y\\), then \\(f I_{x} = f = I_{y} f\\) morphism composition is associative: \\((hg)f=h(gf)\\) ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhcategory/\"\u003ecategory\u003c/a\u003e is an abstract collection of \u003ca href=\"/posts/kbhobjects/\"\u003eobject\u003c/a\u003es\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecollection of \u003ca href=\"/posts/kbhobjects/\"\u003eobjects\u003c/a\u003e, where if \\(X\\) is an \u003ca href=\"/posts/kbhobjects/\"\u003eobject\u003c/a\u003e of \\(C\\) we write \\(X \\in C\\)\u003c/li\u003e\n\u003cli\u003efor a pair of objects \\(X, Y \\in C\\), a set of \u003ca href=\"/posts/kbhmorphism/\"\u003emorphism\u003c/a\u003es acting upon the objects which we call the \u003ca href=\"/posts/kbhhomset/\"\u003ehomset\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ethere exists the \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e \u003ca href=\"/posts/kbhmorphism/\"\u003emorphism\u003c/a\u003e; that is, \\(\\forall X \\in C, \\exists I_{X}: X\\to X\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmorphism/\"\u003emorphism\u003c/a\u003es are always composable: given \\(f: X\\to Y\\), and \\(g: Y\\to Z\\), exists \\(gf: X \\to Z\\)\u003c/li\u003e\n\u003cli\u003ethe \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e \u003ca href=\"/posts/kbhmorphism/\"\u003emorphism\u003c/a\u003e can compose in either direction: given \\(f: X \\to Y\\), then \\(f I_{x} = f = I_{y} f\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmorphism/\"\u003emorphism\u003c/a\u003e composition is \u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e: \\((hg)f=h(gf)\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcategory/","tags":null,"title":"category"},{"categories":null,"contents":"An abstract study of mathematics based on categories, functors, and natural transformations.\n","html":"\u003cp\u003eAn abstract study of mathematics based on \u003ca href=\"/posts/kbhcategory/\"\u003ecategories\u003c/a\u003e, \u003ca href=\"/posts/kbhfunctor/\"\u003efunctors\u003c/a\u003e, and \u003ca href=\"/posts/kbhnatural_transformations/\"\u003enatural transformations\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcategory_theory/","tags":null,"title":"category theory"},{"categories":null,"contents":"stock market crash of 1929 At October 24th, 1929, Black Thursday took place, and the stock market crashed. During this time, a record of 13 million shares traded, over $3b of losses. This began a 4 year slide of the global economy.\nCrash theories:\ndemand-driven theory Monetarist theory bank failures of 1929 Banks became irrelevant. Lots of risky loans given out, farmers are taken out huge loans and the banks can\u0026rsquo;t deal.\nother factors economy of credit tariffs ","html":"\u003ch2 id=\"stock-market-crash-of-1929\"\u003estock market crash of 1929\u003c/h2\u003e\n\u003cp\u003eAt October 24th, 1929, \u003ca href=\"/posts/kbhblack_thursday/\"\u003eBlack Thursday\u003c/a\u003e took place, and the stock market crashed. During this time, a record of 13 million shares traded, over $3b of losses. This began a 4 year slide of the global economy.\u003c/p\u003e\n\u003cp\u003eCrash theories:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdemand_driven_theory/\"\u003edemand-driven theory\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmonetarist_theory/\"\u003eMonetarist theory\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"bank-failures-of-1929\"\u003ebank failures of 1929\u003c/h2\u003e\n\u003cp\u003eBanks became irrelevant. Lots of risky loans given out, farmers are taken out huge loans and the banks can\u0026rsquo;t deal.\u003c/p\u003e\n\u003ch2 id=\"other-factors\"\u003eother factors\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheconomy_of_credit/\"\u003eeconomy of credit\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtariffs/\"\u003etariffs\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcauses_of_the_great_depression/","tags":null,"title":"causes of the Great Depression"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcell/","tags":null,"title":"cell"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcell_free_biocatalysis/","tags":null,"title":"cell-free biocatalysis"},{"categories":null,"contents":"\u0026ldquo;If sample size is large and IID, the sampling distribution is normal. The larger \\(N\\) is, the more normal the resulting shape is.\u0026rdquo;\nWe can use the central limit theorem to estimate the sum of IID random variables:\nLet there be \\(n\\) random variables named \\(X_{j}\\), they are IID, and they have \\(E[x] = \\mu\\), and \\(Var(x) = \\sigma^{2}\\)\nWe have that:\n\\begin{equation} \\sum_{i=1}^{N} X_{n} \\sim N(n\\mu, n \\sigma^{2}), \\text{as}\\ n \\to \\infty \\end{equation}\nThat, as long as you normalize a random variable and have enough of it, you get closer and closer to the normal distribution.\nNotably, for the central limit theorem to hold, the variance has to be finite (that the results vary in a certain finite value \\(\\sigma\\). With that \\(\\sigma\\) value, we can see above that the central limit theorem will eventually converge to the normal. THis is useful for the Random Walk Hypothesis.\nREMEMBER THAT IF YOU ARE APPROXIMATIGN DISCRETE THINGS YOU NEED continuity correction!!!\n","html":"\u003cp\u003e\u0026ldquo;If sample size is large and \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e, the sampling distribution is normal. The larger \\(N\\) is, the more normal the resulting shape is.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe can use the \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e to estimate the sum of \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es:\u003c/p\u003e\n\u003cp\u003eLet there be \\(n\\) \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es named \\(X_{j}\\), they are \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e, and they have \\(E[x] = \\mu\\), and \\(Var(x) = \\sigma^{2}\\)\u003c/p\u003e\n\u003cp\u003eWe have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{i=1}^{N} X_{n} \\sim N(n\\mu, n \\sigma^{2}), \\text{as}\\ n \\to \\infty\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThat, as long as you normalize a random variable and have enough of it, you get closer and closer to the normal distribution.\u003c/p\u003e\n\u003cp\u003eNotably, for the \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e to hold, the variance has to be finite (that the results vary in a certain finite value \\(\\sigma\\). With that \\(\\sigma\\) value, we can see above that the \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e will eventually converge to the normal. THis is useful for the \u003ca href=\"/posts/kbhrandom_walk/\"\u003eRandom Walk Hypothesis\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003e\u003cstrong\u003e\u003cstrong\u003eREMEMBER THAT IF YOU ARE APPROXIMATIGN DISCRETE THINGS YOU NEED \u003ca href=\"/posts/kbhcontinuity_correction/\"\u003econtinuity correction\u003c/a\u003e!!!\u003c/strong\u003e\u003c/strong\u003e\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcentral_limit_theorem/","tags":null,"title":"central limit theorem"},{"categories":null,"contents":" 80% of the human genome is actually transcribed very little \u0026ldquo;junk DNA\u0026rdquo; 40% IncRNA are gene specific ","html":"\u003cul\u003e\n\u003cli\u003e80% of the human genome is actually transcribed\u003c/li\u003e\n\u003cli\u003every little \u0026ldquo;junk DNA\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e40% \u003ca href=\"\"\u003eIncRNA\u003c/a\u003e are gene specific\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhchanges_to_central_dogma/","tags":null,"title":"changes to central dogma"},{"categories":null,"contents":"char is a character that represents a glypth:\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhchar/\"\u003echar\u003c/a\u003e is a character that represents a glypth:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-06_11-05-49_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhchar/","tags":null,"title":"char"},{"categories":null,"contents":"The polynomial given by the determinant of:\n\\begin{equation} det(A-\\lambda I) \\end{equation}\nfor some Linear Map \\(A\\). Solutions for \\(\\lambda\\) are the eigenvalues. This is because something is an eigenvalue IFF \\((A-\\lambda I)v = 0\\) for some \\(\\lambda, v\\), so we need \\((A-\\lambda I)\\) to be singular.\nCharacteristic polynomial of a 2x2 matrix is given by \\(\\lambda^{2}-tr(A)\\lambda + det(A)\\).\n","html":"\u003cp\u003eThe polynomial given by the determinant of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ndet(A-\\lambda I)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e \\(A\\). Solutions for \\(\\lambda\\) are the \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es. This is because something is an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e IFF \\((A-\\lambda I)v = 0\\) for some \\(\\lambda, v\\), so we need \\((A-\\lambda I)\\) to be singular.\u003c/p\u003e\n\u003cp\u003eCharacteristic \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e of a 2x2 matrix is given by \\(\\lambda^{2}-tr(A)\\lambda + det(A)\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcharacteristic_polynomial/","tags":null,"title":"characteristic polynomial"},{"categories":null,"contents":"an atom is said to be charged when there is an imbalance between its number of protons and electrons.\nadditional information units of charge charge is measured in SI unit \\(C\\), coulomb. However, we are often dealing with \\(e\\), the charge of an electron (as ultimate that\u0026rsquo;s the principle way by which charge moves around). \\(e \\approx 1.6 \\times 10^{-19} C\\).\nnet charge can be neither created nor destroyed Unsurprisingly, though you can move electrons around, they will be conserved across a system.\n","html":"\u003cp\u003ean atom is said to be \u003ca href=\"/posts/kbhcharged/\"\u003echarged\u003c/a\u003e when there is an imbalance between its number of \u003ca href=\"/posts/kbhprotons/\"\u003eproton\u003c/a\u003es and \u003ca href=\"/posts/kbhelectron/\"\u003eelectron\u003c/a\u003es.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"units-of-charge\"\u003eunits of charge\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcharged/\"\u003echarge\u003c/a\u003e is measured in SI unit \\(C\\), coulomb. However, we are often dealing with \\(e\\), the charge of an \u003ca href=\"/posts/kbhelectron/\"\u003eelectron\u003c/a\u003e (as ultimate that\u0026rsquo;s the principle way by which charge moves around). \\(e \\approx 1.6 \\times 10^{-19} C\\).\u003c/p\u003e\n\u003ch3 id=\"net-charge-can-be-neither-created-nor-destroyed\"\u003enet charge can be neither created nor destroyed\u003c/h3\u003e\n\u003cp\u003eUnsurprisingly, though you can move \u003ca href=\"/posts/kbhelectron/\"\u003eelectron\u003c/a\u003es around, they will be conserved across a system.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcharged/","tags":null,"title":"charged"},{"categories":null,"contents":"Two main Dialogue Systems architectures:\nframe based systems: talk to users + accomplish specific tasks LLM: reasoning as agents Dialogue Systems vs Chatbot Previously, when we say Chatbot we mean task-based systems\nhumans and chat humans tend to think of Dialogue Systems as human-like even if they know its not. this makes users more prone to share private information and worry less about its disclosure.\nELIZA see ELIZA\nLLM Chatbots Training Corpus C4: colossal clean crawled corpus\npatent, wikipedia, news\nChatbots EmphaticDialogues SaFeRDialogues Pseudo-conversations: reddit, twitter, weibo Fine-Tuning quality: improving sensible and interesting responses safety: prevention of suggesting harmful actions IFT: perhaps you can add positive data as fine tuning as a part of instruction-finetuning step.\nFiltering: build a filter for whether something is safe/unsafe, etc.\nRetrieval Augmented Generation call search engine get back a retrieved passages shove them into prompt \u0026ldquo;based on this tasks, answer:\u0026rdquo; we can make Chatbots use RAG by adding \u0026ldquo;pseudo-participants\u0026rdquo; to make the chat bots, which the system should add.\nEvaluation task based systems: measure task performance chatbot: enjoyability by humans we evaluate chatbots by asking a human to assign a score, and observer is a third party that assigns a score via a transcript of a conversation.\nparticipants scoring interact with 6 turns, then score:\navoiding repetition interestingness sensemaking fluency listening inquisitiveness humanness engagingness ACUTE-EVAL: choosing who you would like to speak to\nadversarial evaluation train a human/robot classifier, use it, use the inverse of its score at the metric of the chat bot\ntask evaluatino measure overall task success, or measure slot error rate\ndesign system design Don\u0026rsquo;t build Frankenstein: safety (ensure people aren\u0026rsquo;t crashing cars), limiting representation harm (don\u0026rsquo;t demean social groups), privacy\nstudy users and task what are their values? how do they interact?\nbuild simulations wizard of oz study: observe user interaction with a HUMAN pretending to be a chat bot\ntest the design test on users\ninfo leakage accidentally leaking information (microphone, etc.) intentionally leaking information due to advertising, etc. ","html":"\u003cp\u003eTwo main \u003ca href=\"/posts/kbhchatbot/\"\u003eDialogue Systems\u003c/a\u003e architectures:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eframe based\u003c/strong\u003e systems: talk to users + accomplish specific tasks\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eLLM\u003c/strong\u003e: reasoning as agents\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"dialogue-systems--kbhchatbot-dot-md--vs-chatbot--kbhchatbot-dot-md\"\u003e\u003ca href=\"/posts/kbhchatbot/\"\u003eDialogue Systems\u003c/a\u003e vs \u003ca href=\"/posts/kbhchatbot/\"\u003eChatbot\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003ePreviously, when we say \u003ca href=\"/posts/kbhchatbot/\"\u003eChatbot\u003c/a\u003e we mean task-based systems\u003c/p\u003e\n\u003ch2 id=\"humans-and-chat\"\u003ehumans and chat\u003c/h2\u003e\n\u003cp\u003ehumans tend to think of \u003ca href=\"/posts/kbhchatbot/\"\u003eDialogue Systems\u003c/a\u003e as human-like even if they know its not. this makes users more prone to share private information and worry less about its disclosure.\u003c/p\u003e\n\u003ch2 id=\"eliza--kbheliza-dot-md\"\u003e\u003ca href=\"/posts/kbheliza/\"\u003eELIZA\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbheliza/\"\u003eELIZA\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"llm-chatbots\"\u003eLLM Chatbots\u003c/h2\u003e\n\u003ch3 id=\"training-corpus\"\u003eTraining Corpus\u003c/h3\u003e\n\u003cp\u003eC4: colossal clean crawled corpus\u003c/p\u003e\n\u003cp\u003epatent, wikipedia, news\u003c/p\u003e\n\u003ch3 id=\"chatbots\"\u003eChatbots\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eEmphaticDialogues\u003c/li\u003e\n\u003cli\u003eSaFeRDialogues\u003c/li\u003e\n\u003cli\u003ePseudo-conversations: reddit, twitter, weibo\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"fine-tuning\"\u003eFine-Tuning\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003equality\u003c/strong\u003e: improving sensible and interesting responses\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003esafety\u003c/strong\u003e: prevention of suggesting harmful actions\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003eIFT\u003c/strong\u003e: perhaps you can add positive data as fine tuning as a part of instruction-finetuning step.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eFiltering\u003c/strong\u003e: build a filter for whether something is safe/unsafe, etc.\u003c/p\u003e\n\u003ch3 id=\"retrieval-augmented-generation\"\u003eRetrieval Augmented Generation\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003ecall search engine\u003c/li\u003e\n\u003cli\u003eget back a retrieved passages\u003c/li\u003e\n\u003cli\u003eshove them into prompt\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;based on this tasks, answer:\u0026rdquo;\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ewe can make \u003ca href=\"/posts/kbhchatbot/\"\u003eChatbot\u003c/a\u003es use \u003ca href=\"#retrieval-augmented-generation\"\u003eRAG\u003c/a\u003e by adding \u0026ldquo;pseudo-participants\u0026rdquo; to make the chat bots, which the system should add.\u003c/p\u003e\n\u003ch2 id=\"evaluation\"\u003eEvaluation\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003etask based systems\u003c/strong\u003e: measure task performance\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003echatbot\u003c/strong\u003e: enjoyability by humans\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewe evaluate chatbots by asking a human to assign a score, and observer is a third party that assigns a score via a transcript of a conversation.\u003c/p\u003e\n\u003ch3 id=\"participants-scoring\"\u003eparticipants scoring\u003c/h3\u003e\n\u003cp\u003einteract with 6 turns, then score:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eavoiding repetition\u003c/li\u003e\n\u003cli\u003einterestingness\u003c/li\u003e\n\u003cli\u003esensemaking\u003c/li\u003e\n\u003cli\u003efluency\u003c/li\u003e\n\u003cli\u003elistening\u003c/li\u003e\n\u003cli\u003einquisitiveness\u003c/li\u003e\n\u003cli\u003ehumanness\u003c/li\u003e\n\u003cli\u003eengagingness\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eACUTE-EVAL: \u003cstrong\u003echoosing who you would like to speak to\u003c/strong\u003e\u003c/p\u003e\n\u003ch3 id=\"adversarial-evaluation\"\u003eadversarial evaluation\u003c/h3\u003e\n\u003cp\u003etrain a human/robot classifier, use it, use the inverse of its score at the metric of the chat bot\u003c/p\u003e\n\u003ch3 id=\"task-evaluatino\"\u003etask evaluatino\u003c/h3\u003e\n\u003cp\u003emeasure overall task success, or measure slot error rate\u003c/p\u003e\n\u003ch2 id=\"design-system-design\"\u003edesign system design\u003c/h2\u003e\n\u003cp\u003eDon\u0026rsquo;t build \u003cstrong\u003eFrankenstein\u003c/strong\u003e: safety (ensure people aren\u0026rsquo;t crashing cars), limiting representation harm (don\u0026rsquo;t demean social groups), privacy\u003c/p\u003e\n\u003ch3 id=\"study-users-and-task\"\u003estudy users and task\u003c/h3\u003e\n\u003cp\u003ewhat are their values? how do they interact?\u003c/p\u003e\n\u003ch3 id=\"build-simulations\"\u003ebuild simulations\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003ewizard of oz\u003c/strong\u003e study: observe user interaction with a \u003cstrong\u003eHUMAN\u003c/strong\u003e pretending to be a chat bot\u003c/p\u003e\n\u003ch3 id=\"test-the-design\"\u003etest the design\u003c/h3\u003e\n\u003cp\u003etest on users\u003c/p\u003e\n\u003ch3 id=\"info-leakage\"\u003einfo leakage\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eaccidentally leaking information (microphone, etc.)\u003c/li\u003e\n\u003cli\u003eintentionally leaking information due to advertising, etc.\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhchatbot/","tags":null,"title":"Chatbot"},{"categories":null,"contents":"\\(\\chi^2\\) is a test statistic for hypothesis testing.\nmotivation for chi-square The motivation for chi-square is because t-test (means, \u0026ldquo;is the value significantly different\u0026rdquo;) and z-test (proportion, \u0026ldquo;is the incidence percentage significantly different\u0026rdquo;) all don\u0026rsquo;t really cover categorical data samples: \u0026ldquo;the categories are distributed in this way.\u0026rdquo;\nTake, for instance, if we want to test the following null hypothesis:\nCategory Expected Actual A 25 20 B 25 20 C 25 25 D 25 25 \\(\\alpha = 0.05\\). What do we use to test this??\n(hint: we can\u0026rsquo;t, unless\u0026hellip;)\nEnter chi-square.\nchi-square test chi-square test is a hypothesis test for categorical data. It is responsible to translate differences in distributions into p-values for significance.\nBegin by calculating chi-square after you confirmed that your experiment meets conditions for inference (chi-square test).\nOnce you have that, look it up at a chi-square table to figure the appropriate p-value. Then, proceed with normal hypothesis testing.\nBecause of this categorical nature, chi-square test can also be used as a homogeneity test.\nconditions for inference (chi-square test) random sampling expected value for data must be \\(\\geq 5\\) sampling should be \\(\u0026lt;10\\%\\) or independent chi-square test for homogeneity The chi-square test for homogeneity is a test for homogeneity via the chi-square statistic.\nTo do this, we take the probability of a certain outcome happening\u0026mdash;if distributed equally\u0026mdash;and apply it to the samples to compare.\nTake, for instance:\nSubject Right Hand Left Hand Total STEM 30 10 40 Humanities 15 25 40 Equal 15 5 20 Total 60 40 100 We will then figure the expected outcomes:\nRight Left 24 16 24 16 12 8 Awesome! Now, calculate chi-square with each cell of measured outcomes. Calculate degrees of freedom by (num_row-1)*(num_col-1).\nchi-square test for independence The chi-square test for independence is a test designed to accept-reject the null hypothesis of \u0026ldquo;no association between two variables.\u0026rdquo;\nEssentially, you leverage the fact that \u0026ldquo;AND\u0026rdquo; relationships are multiplicative probabilities. Therefore, the expected outcomes are simply the multiplied/fraction of sums:\ncalculating chi-square \\begin{equation} \\chi^2 = \\frac{(\\hat{x}_0-x_0)^2}{x_0} +\\frac{(\\hat{x}_1-x_1)^2}{x_1} + \\cdots + \\frac{(\\hat{x}_n-x_n)^2}{x_n} \\end{equation}\nWhere, \\(\\hat{x}_i\\) is the measured value and \\(x_i\\) is the expected value.\n","html":"\u003cp\u003e\\(\\chi^2\\) is a test \u003ca href=\"/posts/kbhstastistic/\"\u003estatistic\u003c/a\u003e for \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"motivation-for-chi-square\"\u003emotivation for chi-square\u003c/h2\u003e\n\u003cp\u003eThe motivation for \u003ca href=\"/posts/kbhchi_square/\"\u003echi-square\u003c/a\u003e is because \u003ca href=\"/posts/kbht_test/\"\u003et-test\u003c/a\u003e (means, \u0026ldquo;is the value significantly different\u0026rdquo;) and \u003ca href=\"/posts/kbhz_test/\"\u003ez-test\u003c/a\u003e (proportion, \u0026ldquo;is the incidence percentage significantly different\u0026rdquo;) all don\u0026rsquo;t really cover categorical data samples: \u0026ldquo;the categories are distributed in this way.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eTake, for instance, if we want to test the following \u003ca href=\"/posts/kbhhypothesis_testing/#null-hypothesis\"\u003enull hypothesis\u003c/a\u003e:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eCategory\u003c/th\u003e\n\u003cth\u003eExpected\u003c/th\u003e\n\u003cth\u003eActual\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eA\u003c/td\u003e\n\u003ctd\u003e25\u003c/td\u003e\n\u003ctd\u003e20\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eB\u003c/td\u003e\n\u003ctd\u003e25\u003c/td\u003e\n\u003ctd\u003e20\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eC\u003c/td\u003e\n\u003ctd\u003e25\u003c/td\u003e\n\u003ctd\u003e25\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eD\u003c/td\u003e\n\u003ctd\u003e25\u003c/td\u003e\n\u003ctd\u003e25\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\\(\\alpha = 0.05\\). What do we use to test this??\u003c/p\u003e\n\u003cp\u003e(hint: we can\u0026rsquo;t, unless\u0026hellip;)\u003c/p\u003e\n\u003cp\u003eEnter \u003ca href=\"/posts/kbhchi_square/\"\u003echi-square\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"chi-square-test\"\u003echi-square test\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#chi-square-test\"\u003echi-square test\u003c/a\u003e is a \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis test\u003c/a\u003e for categorical data. It is responsible to translate differences in distributions into \u003ca href=\"/posts/kbhhypothesis_testing/#p-value\"\u003ep-value\u003c/a\u003es for significance.\u003c/p\u003e\n\u003cp\u003eBegin by \u003ca href=\"#calculating-chi-square\"\u003ecalculating chi-square\u003c/a\u003e after you confirmed that your experiment meets \u003ca href=\"#conditions-for-inference--chi-square-test\"\u003econditions for inference (chi-square test)\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eOnce you have that, look it up at a chi-square table to figure the appropriate \u003ca href=\"/posts/kbhhypothesis_testing/#p-value\"\u003ep-value\u003c/a\u003e. Then, proceed with normal \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eBecause of this categorical nature, \u003ca href=\"#chi-square-test\"\u003echi-square test\u003c/a\u003e can also be used as a homogeneity test.\u003c/p\u003e\n\u003ch2 id=\"conditions-for-inference--chi-square-test\"\u003econditions for inference (chi-square test)\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003erandom sampling\u003c/li\u003e\n\u003cli\u003eexpected value for data must be \\(\\geq 5\\)\u003c/li\u003e\n\u003cli\u003esampling should be \\(\u0026lt;10\\%\\) or independent\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"chi-square-test-for-homogeneity\"\u003echi-square test for homogeneity\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#chi-square-test-for-homogeneity\"\u003echi-square test for homogeneity\u003c/a\u003e is a test for \u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e via the \u003ca href=\"/posts/kbhchi_square/\"\u003echi-square\u003c/a\u003e \u003ca href=\"/posts/kbhstastistic/\"\u003estatistic\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eTo do this, we take the probability of a certain outcome happening\u0026mdash;if distributed equally\u0026mdash;and apply it to the samples to compare.\u003c/p\u003e\n\u003cp\u003eTake, for instance:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eSubject\u003c/th\u003e\n\u003cth\u003eRight Hand\u003c/th\u003e\n\u003cth\u003eLeft Hand\u003c/th\u003e\n\u003cth\u003e\u003cstrong\u003e\u003cstrong\u003eTotal\u003c/strong\u003e\u003c/strong\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eSTEM\u003c/td\u003e\n\u003ctd\u003e30\u003c/td\u003e\n\u003ctd\u003e10\u003c/td\u003e\n\u003ctd\u003e40\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eHumanities\u003c/td\u003e\n\u003ctd\u003e15\u003c/td\u003e\n\u003ctd\u003e25\u003c/td\u003e\n\u003ctd\u003e40\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eEqual\u003c/td\u003e\n\u003ctd\u003e15\u003c/td\u003e\n\u003ctd\u003e5\u003c/td\u003e\n\u003ctd\u003e20\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cstrong\u003e\u003cstrong\u003eTotal\u003c/strong\u003e\u003c/strong\u003e\u003c/td\u003e\n\u003ctd\u003e60\u003c/td\u003e\n\u003ctd\u003e40\u003c/td\u003e\n\u003ctd\u003e100\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWe will then figure the expected outcomes:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eRight\u003c/th\u003e\n\u003cth\u003eLeft\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e24\u003c/td\u003e\n\u003ctd\u003e16\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e24\u003c/td\u003e\n\u003ctd\u003e16\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e12\u003c/td\u003e\n\u003ctd\u003e8\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eAwesome! Now, \u003ca href=\"#calculating-chi-square\"\u003ecalculate chi-square\u003c/a\u003e with each cell of measured outcomes. Calculate degrees of freedom by (num_row-1)*(num_col-1).\u003c/p\u003e\n\u003ch2 id=\"chi-square-test-for-independence\"\u003echi-square test for independence\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#chi-square-test-for-independence\"\u003echi-square test for independence\u003c/a\u003e is a test designed to accept-reject the null hypothesis of \u0026ldquo;no association between two variables.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eEssentially, you leverage the fact that \u0026ldquo;AND\u0026rdquo; relationships are multiplicative probabilities. Therefore, the expected outcomes are simply the multiplied/fraction of sums:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_22-37-43_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"calculating-chi-square\"\u003ecalculating chi-square\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\chi^2 = \\frac{(\\hat{x}_0-x_0)^2}{x_0} +\\frac{(\\hat{x}_1-x_1)^2}{x_1} + \\cdots + \\frac{(\\hat{x}_n-x_n)^2}{x_n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhere, \\(\\hat{x}_i\\) is the measured value and \\(x_i\\) is the expected value.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhchi_square/","tags":null,"title":"chi-square"},{"categories":null,"contents":"Chiara Marletto is an physicist working on Quantum mechanics working in D. of Physics, Wolfson College, University of Oxford.\nSubfield: constructor theory. She studies quantum theory.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhchiara_marletto/\"\u003eChiara Marletto\u003c/a\u003e is an physicist working on Quantum mechanics working in D. of Physics, Wolfson College, University of Oxford.\u003c/p\u003e\n\u003cp\u003eSubfield: \u003ca href=\"/posts/kbhconstructor_theory/\"\u003econstructor theory\u003c/a\u003e. She studies \u003ca href=\"/posts/kbhquantum_theory/\"\u003equantum theory\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhchiara_marletto/","tags":null,"title":"Chiara Marletto"},{"categories":null,"contents":"I was digging through my OneDrive recently for work, and found this piece of writing.\nThere is naught but a small, dirt-filled puddle in front of this lawn. Yet only here – by the puddle – can Gary find a small, much-needed respite from the neverending work. Of course, without the hours he has committed to the sweatshop, his mother would have died ages ago from colora.\nBut how does it matter now? Rarely now \u0026ndash; once every year \u0026ndash; does he even earn the privilege to exit the heavily-guarded area to visit his mother; and how little time he has during such visits: each visit seems to just be a long walk, a knock, a kiss on the cheek \u0026ndash; then back to the workhouse he goes.\nNo, he must push on. Focusing his tired mind back to the concrete structure in front of him, he sees the supervisor hollering the same old phrase. Back to work! Back to work! Move! Move! Break is over!\nWhat is this break, even? The notable lack of timepieces around the lawn means the important task of timekeeping falls to the supervisors \u0026ndash; who, notably, have an obvious interest in shortening the break. And ‘lo, the breaks are shortened: Gary has always remembered the session as until the bottom of the clock, yet doubtless he will find himself staring at a clock hand pointing to the horizontal upon walking into the building.\nHe can do nothing now: there is one \u0026ndash; and the ultimate \u0026ndash; sanction for not listening to the supervisor, and he wants nothing to do with it: beating. Beating that gets harder, faster, as time progresses is the one, the only, and the final answer to all cases of disobedience. Heck, if the supervisor demands time run backwards during breaks, Cronus will listen and obey \u0026ndash; for even he, a god, is probably as scared of these “correctional sessions” as anyone else.\nThere is, then, no time to be wasted. Up towards the factory he walks \u0026ndash; joined by hundreds of others suffering a similar fate, doing the same tedious, repetitive tasks as him. If he hadn\u0026rsquo;t been made dependent \u0026ndash; addicted! in fact \u0026ndash; to the meager wages he received, he could have achieved greatness the world has yet to see.\nBut, alas, towards the factory he walks, steps. Timidly, slowly, shuffling his feet quickly enough so as to not anger the increasingly stressed supervisor. Stressed understandably, perhaps, due to the increasing external talk of organizing such congregations as the “Child Labour Committee”, which Gary himself isn’t sure to what extent he should trust.\nThe quarter-bell strikes. Indeed, his suspicions were correct \u0026ndash; yet superfluous. When all was thought and done, he couldn\u0026rsquo;t possibly have even produced the thought of defying the wishes of the supervisor, let along execute it. But now, he has not even the physical capacity to escape \u0026ndash; the door was locked, and locked means work eternal \u0026ndash; at least until the next meager halt seemingly few decades later.\nSuddenly, a clicking occurs. A machine screeching to a halt, perhaps due to the same overwork and misuse. In walks the supervisor: nevermind that: the work must go on!\nIt is now down to the same routine \u0026ndash; picking the smallest, nimblist of the bunch \u0026ndash; Gary, of course \u0026ndash; to, through great persuasion and threatenings of beatings, climb under the mechanical beast and undo the mess. It’s a dance of oil and gear that Gary has rehearsed many times before, each time dreading the next. Yet he still brought himself to perform the task time and time again for it, although dreadful, seems to be heavenly compared to the alternate: getting the “correctional session.”\nDown the cover he goes: a little pulling there, a little dabbing there, and Hark! The machine jumped to a start with a splash of brilliant pink hue, announcing \u0026ndash; celebrating, it seems \u0026ndash; itself as Gary’s final quarters.\nNevermind that: the work must go on!\n","html":"\u003cp\u003eI was digging through my OneDrive recently for work, and found this piece of writing.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eThere is naught but a small, dirt-filled puddle in front of this lawn. Yet only here – by the puddle – can Gary find a small, much-needed respite from the neverending work. Of course, without the hours he has committed to the sweatshop, his mother would have died ages ago from colora.\u003c/p\u003e\n\u003cp\u003eBut how does it matter now? Rarely now \u0026ndash; once every year \u0026ndash; does he even earn the privilege to exit the heavily-guarded area to visit his mother; and how little time he has during such visits: each visit seems to just be a long walk, a knock, a kiss on the cheek \u0026ndash; then back to the workhouse he goes.\u003c/p\u003e\n\u003cp\u003eNo, he must push on. Focusing his tired mind back to the concrete structure in front of him, he sees the supervisor hollering the same old phrase. Back to work! Back to work! Move! Move! Break is over!\u003c/p\u003e\n\u003cp\u003eWhat is this break, even? The notable lack of timepieces around the lawn means the important task of timekeeping falls to the supervisors \u0026ndash; who, notably, have an obvious interest in shortening the break. And ‘lo, the breaks are shortened: Gary has always remembered the session as until the bottom of the clock, yet doubtless he will find himself staring at a clock hand pointing to the horizontal upon walking into the building.\u003c/p\u003e\n\u003cp\u003eHe can do nothing now: there is one \u0026ndash; and the ultimate \u0026ndash; sanction for not listening to the supervisor, and he wants nothing to do with it: beating. Beating that gets harder, faster, as time progresses is the one, the only, and the final answer to all cases of disobedience. Heck, if the supervisor demands time run backwards during breaks, Cronus will listen and obey \u0026ndash; for even he, a god, is probably as scared of these “correctional sessions” as anyone else.\u003c/p\u003e\n\u003cp\u003eThere is, then, no time to be wasted. Up towards the factory he walks \u0026ndash; joined by hundreds of others suffering a similar fate, doing the same tedious, repetitive tasks as him. If he hadn\u0026rsquo;t been made dependent \u0026ndash; addicted! in fact \u0026ndash; to the meager wages he received, he could have achieved greatness the world has yet to see.\u003c/p\u003e\n\u003cp\u003eBut, alas, towards the factory he walks, steps. Timidly, slowly, shuffling his feet quickly enough so as to not anger the increasingly stressed supervisor. Stressed understandably, perhaps, due to the increasing external talk of organizing such congregations as the  “Child Labour Committee”, which Gary himself isn’t sure to what extent he should trust.\u003c/p\u003e\n\u003cp\u003eThe quarter-bell strikes. Indeed, his suspicions were correct \u0026ndash; yet superfluous. When all was thought and done, he couldn\u0026rsquo;t possibly have even produced the thought of defying the wishes of the supervisor, let along execute it. But now, he has not even the physical capacity to escape \u0026ndash; the door was locked, and locked means work eternal \u0026ndash; at least until the next meager halt seemingly few decades later.\u003c/p\u003e\n\u003cp\u003eSuddenly, a clicking occurs. A machine screeching to a halt, perhaps due to the same overwork and misuse. In walks the supervisor: nevermind that: the work must go on!\u003c/p\u003e\n\u003cp\u003eIt is now down to the same routine \u0026ndash; picking the smallest, nimblist of the bunch \u0026ndash; Gary, of course \u0026ndash; to, through great persuasion and threatenings of beatings, climb under the mechanical beast and undo the mess. It’s a dance of oil and gear that Gary has rehearsed many times before, each time dreading the next. Yet he still brought himself to perform the task time and time again for it, although dreadful, seems to be heavenly compared to the alternate: getting the “correctional session.”\u003c/p\u003e\n\u003cp\u003eDown the cover he goes: a little pulling there, a little dabbing there, and Hark! The machine jumped to a start with a splash of brilliant pink hue, announcing \u0026ndash; celebrating, it seems \u0026ndash; itself as Gary’s final quarters.\u003c/p\u003e\n\u003cp\u003eNevermind that: the work must go on!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhchild_labour/","tags":null,"title":"Child Labour: A Short Story"},{"categories":null,"contents":"DOI: 10.3389/fpsyg.2020.623237\nOne-Liner (thrice) Used features extracted by VGGish from raw acoustic audio against a SVM, Perceptron, 1NN; got \\(59.1\\%\\) classif. accuracy for dementia Then, trained a CNN on raw wave-forms and got \\(63.6\\%\\) accuracy Then, they fine-tuned a VGGish on the raw wave-forms and didn\u0026rsquo;t report their results and just said \u0026ldquo;we discovered that audio transfer learning with a pretrained VGGish feature extractor performs better\u0026rdquo; Gah! Novelty Threw the kitchen sink to process only raw acoustic input, most of it missed; wanted 0 human involvement. It seems like last method is promising.\nNotable Methods fine-tuning VGGish against raw acoustic waveforms to build a classifier via a CNN.\nKey Figs Their fancy network Its just a CNN afaik with much maxpooling; could have used some skipped connections. I wonder if it overfit?\nTheir actual training results Looks generally pretty bad, but a run of their DemCNN seem to have gotten state-of-the-art results. Not sure where transfer training data went.\nNew Concepts VGGish Notes Accuracy question According to this the state of the art at the time from pure audio was 56.6%? For a binary classifier isn\u0026rsquo;t that just doing nothing?\nSo somebody did get better before?\n","html":"\u003cp\u003eDOI: 10.3389/fpsyg.2020.623237\u003c/p\u003e\n\u003ch2 id=\"one-liner--thrice\"\u003eOne-Liner (thrice)\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eUsed features extracted by \u003ca href=\"/posts/kbhvggish/\"\u003eVGGish\u003c/a\u003e from raw acoustic audio against a SVM, Perceptron, 1NN; got \\(59.1\\%\\) classif. accuracy for dementia\u003c/li\u003e\n\u003cli\u003eThen, trained a CNN on raw wave-forms and got \\(63.6\\%\\) accuracy\u003c/li\u003e\n\u003cli\u003eThen, they fine-tuned a \u003ca href=\"/posts/kbhvggish/\"\u003eVGGish\u003c/a\u003e on the raw wave-forms and didn\u0026rsquo;t report their results and just said \u0026ldquo;we discovered that audio transfer learning with a pretrained VGGish feature extractor performs better\u0026rdquo; Gah!\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003eThrew the kitchen sink to process only raw acoustic input, most of it missed; wanted 0 human involvement. It seems like last method is promising.\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cp\u003efine-tuning \u003ca href=\"/posts/kbhvggish/\"\u003eVGGish\u003c/a\u003e against raw acoustic waveforms to build a classifier via a CNN.\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"their-fancy-network\"\u003eTheir fancy network\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-37-51_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eIts just a CNN afaik with much maxpooling; could have used some skipped connections. I wonder if it overfit?\u003c/p\u003e\n\u003ch3 id=\"their-actual-training-results\"\u003eTheir actual training results\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-38-34_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eLooks generally pretty bad, but a run of their DemCNN seem to have gotten state-of-the-art results. Not sure where transfer training data went.\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvggish/\"\u003eVGGish\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n\u003ch3 id=\"accuracy-question\"\u003eAccuracy question\u003c/h3\u003e\n\u003cp\u003eAccording to this the state of the art at the time from pure audio was 56.6%? For a binary classifier isn\u0026rsquo;t that just doing nothing?\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-39-31_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSo somebody did get better before?\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhchlasta_2021/","tags":["ntj"],"title":"Chlasta 2021"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhchromatin/","tags":null,"title":"chromatin"},{"categories":null,"contents":"civil rights movement starting civil rights moment was kicked off by the Rosa Parks incident, which caused the Montomery Bus Boycott.\nMartin Luther King capitalized the incident to kick start civil rights movement. He employed the method of nonviolence movement.\neducational integration in the civil rights movement K-12 disintegration: Brown v. Board of Education University of Georgia was the first disintegrated university in the south service integration in the civil rights movement Lunch counter boycotts. Nashville became the first desegregated lunch counter.\nSNICK SNICK is a student organization founded by Ella Baker in the civil rights movement that sent students into the most dangerous areas of segregation and leading protests.\nMotown Records Motown Records is an African-American owned Detroit record business\nMalcom X A civil rights movement activist, calling for more violent forms of protest and prosecuting specific white actions. Malcom X and Martin Luther King contradicted each other in methods of active persecution vs. nonviolent integration.\nBloody Sunday Bloody Sunday was a voting rights march from Selma to Montgomery. Peaceful protesters were attacked with nightsticks and tear gas. The event was widely televised: transforming the movement as a televised morality play.\nNonviolence helps getting the clergy leaders as a form of leveraging religion in a show of unity.\nBlack Power Movement A new chapter in the civil rights movement which incorporated less of the elements of integration but instead in wanted more sense of self-determination. nonviolence movement, which the Black Power Movement overrided, had ran its course when Martin Luther King was assassinated.\n","html":"\u003ch2 id=\"civil-rights-movement-starting\"\u003ecivil rights movement starting\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcivil_rights/\"\u003ecivil rights\u003c/a\u003e moment was kicked off by the \u003ca href=\"/posts/kbhrosa_parks/\"\u003eRosa Parks\u003c/a\u003e incident, which caused the \u003ca href=\"/posts/kbhmontomery_bus_boycott/\"\u003eMontomery Bus Boycott.\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhmartin_luther_king/\"\u003eMartin Luther King\u003c/a\u003e capitalized the incident to kick start civil rights movement. He employed the method of \u003ca href=\"/posts/kbhnonviolence_movement/\"\u003enonviolence movement\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"educational-integration-in-the-civil-rights-movement\"\u003eeducational integration in the civil rights movement\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eK-12 disintegration: \u003ca href=\"/posts/kbhbrown_v_board_of_education/\"\u003eBrown v. Board of Education\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhuniversity_of_georgia/\"\u003eUniversity of Georgia\u003c/a\u003e was the first disintegrated university in the south\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"service-integration-in-the-civil-rights-movement--kbhcivil-rights-dot-md\"\u003eservice integration in the \u003ca href=\"/posts/kbhcivil_rights/\"\u003ecivil rights movement\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eLunch counter boycotts. Nashville became the first desegregated lunch counter.\u003c/p\u003e\n\u003ch2 id=\"snick\"\u003eSNICK\u003c/h2\u003e\n\u003cp\u003eSNICK is a student organization founded by \u003ca href=\"/posts/kbhella_baker/\"\u003eElla Baker\u003c/a\u003e in the \u003ca href=\"/posts/kbhcivil_rights/\"\u003ecivil rights movement\u003c/a\u003e that sent students into the most dangerous areas of segregation and leading protests.\u003c/p\u003e\n\u003ch2 id=\"motown-records\"\u003eMotown Records\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#motown-records\"\u003eMotown Records\u003c/a\u003e is an African-American owned Detroit record business\u003c/p\u003e\n\u003ch2 id=\"malcom-x\"\u003eMalcom X\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"/posts/kbhcivil_rights/\"\u003ecivil rights movement\u003c/a\u003e activist, calling for more violent forms of protest and prosecuting specific white actions. \u003ca href=\"#malcom-x\"\u003eMalcom X\u003c/a\u003e and \u003ca href=\"/posts/kbhmartin_luther_king/\"\u003eMartin Luther King\u003c/a\u003e contradicted each other in methods of active persecution vs. nonviolent integration.\u003c/p\u003e\n\u003ch2 id=\"bloody-sunday\"\u003eBloody Sunday\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#bloody-sunday\"\u003eBloody Sunday\u003c/a\u003e was a voting rights march from Selma to Montgomery. Peaceful protesters were attacked with nightsticks and tear gas. The event was widely televised: transforming the movement as a televised morality play.\u003c/p\u003e\n\u003cp\u003eNonviolence helps getting the clergy leaders as a form of leveraging religion in a show of unity.\u003c/p\u003e\n\u003ch2 id=\"black-power-movement\"\u003eBlack Power Movement\u003c/h2\u003e\n\u003cp\u003eA new chapter in the \u003ca href=\"/posts/kbhcivil_rights/\"\u003ecivil rights movement\u003c/a\u003e which incorporated less of the elements of integration but instead in wanted more sense of self-determination. \u003ca href=\"/posts/kbhnonviolence_movement/\"\u003enonviolence movement\u003c/a\u003e, which the \u003ca href=\"#black-power-movement\"\u003eBlack Power Movement\u003c/a\u003e overrided, had ran its course when \u003ca href=\"/posts/kbhmartin_luther_king/\"\u003eMartin Luther King\u003c/a\u003e was assassinated.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcivil_rights/","tags":null,"title":"civil rights movement"},{"categories":null,"contents":"A part of the New Deal programs for unmarried men to go and build American infrastructure outdoors under reasonably harsh conditions. \u0026ldquo;Kind of like boy scouts for adults.\u0026rdquo; It is structured like the military; Black men were segregated and not given leadership roles.\n1933-1942.\n","html":"\u003cp\u003eA part of the \u003ca href=\"/posts/kbhnew_deal/\"\u003eNew Deal\u003c/a\u003e programs for unmarried men to go and build American infrastructure outdoors under reasonably harsh conditions. \u0026ldquo;Kind of like boy scouts for adults.\u0026rdquo; It is structured like the military; Black men were segregated and not given leadership roles.\u003c/p\u003e\n\u003cp\u003e1933-1942.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcivillian_conservation_corps/","tags":null,"title":"Civillian Conservation Corps"},{"categories":null,"contents":"key question: are there bias and changes in young children + changes in skin color to generate more samples of skin disease.\nprevious work: DermGAN (Ghorbani 2020), this is not pediatric and also a bit deterministic.\nkey problems data is scarce data is not available and lack of data sharing.\ndata is sensitive especially children.\npediatric specificity we want to generate children\u0026rsquo;s skin disease samples, which as vastly out of sample. The work is therefore trained on only 1000-2000ish samples.\nmodeling latent diffusion model (LDF) ControlNet (Zhang 2023)\u0026mdash;allows specific conditioning of the generation by exogenous rules data gym cleaning get rid of face crop for specific body part ensure anonymity, etc. patch extraction start in the upper left corner, ensure that the resulting patch has at least n% of diseases available\nfurther, create a mask for where the disease should live\nmodeling train a diffusion model yay\nresults the diffusion model by itself does nothing combined with ControlNet, life is much better and achieve higher than SOTA ","html":"\u003cp\u003e\u003cstrong\u003ekey question\u003c/strong\u003e: are there bias and changes in young children + changes in skin color to generate more samples of skin disease.\u003c/p\u003e\n\u003cp\u003eprevious work: DermGAN (Ghorbani 2020), this is not pediatric and also a bit deterministic.\u003c/p\u003e\n\u003ch2 id=\"key-problems\"\u003ekey problems\u003c/h2\u003e\n\u003ch3 id=\"data-is-scarce\"\u003edata is scarce\u003c/h3\u003e\n\u003cp\u003edata is not available and lack of data sharing.\u003c/p\u003e\n\u003ch3 id=\"data-is-sensitive\"\u003edata is sensitive\u003c/h3\u003e\n\u003cp\u003eespecially children.\u003c/p\u003e\n\u003ch3 id=\"pediatric-specificity\"\u003epediatric specificity\u003c/h3\u003e\n\u003cp\u003ewe want to generate children\u0026rsquo;s skin disease samples, which as vastly out of sample. The work is therefore trained on only 1000-2000ish samples.\u003c/p\u003e\n\u003ch2 id=\"modeling\"\u003emodeling\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003elatent diffusion model (LDF)\u003c/li\u003e\n\u003cli\u003eControlNet (Zhang 2023)\u0026mdash;allows specific conditioning of the generation by exogenous rules\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"data-gym\"\u003edata gym\u003c/h2\u003e\n\u003ch3 id=\"cleaning\"\u003ecleaning\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eget rid of face\u003c/li\u003e\n\u003cli\u003ecrop for specific body part\u003c/li\u003e\n\u003cli\u003eensure anonymity, etc.\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"patch-extraction\"\u003epatch extraction\u003c/h3\u003e\n\u003cp\u003estart in the upper left corner, ensure that the resulting patch has at least n% of diseases available\u003c/p\u003e\n\u003cp\u003efurther, create a mask for where the disease should live\u003c/p\u003e\n\u003ch3 id=\"modeling\"\u003emodeling\u003c/h3\u003e\n\u003cp\u003etrain a diffusion model yay\u003c/p\u003e\n\u003ch2 id=\"results\"\u003eresults\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ethe diffusion model by itself does nothing\u003c/li\u003e\n\u003cli\u003ecombined with ControlNet, life is much better and achieve higher than SOTA\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhclinical_skin_disease_imaging/","tags":null,"title":"Clinical Skin Disease Image Generation"},{"categories":null,"contents":"in demand paging, if we have to kick out a page, which one do we kick?\npossible basic approaches random page (this works surprisingly well) throw out the page that\u0026rsquo;s the longest in memory (this is BAD because if a page is there for a long time, its probably accessed a lot) oracle (pick the page whose next accesses is farest in the future\u0026hellip; we can\u0026rsquo;t predict the future) LRU (replace the page that\u0026rsquo;s accessed longest time ago) LRU sounds decently good, but recall that \\(2^{36}\\) wall times to store wall time for each page are needed which is bad\nclock algorithm rotate through all pages until we find one that hasn\u0026rsquo;t been referenced since last time\nwe add a reference bit to the page table\u0026mdash;its set to \\(1\\) if the program wrote or read each page, otherwise its set to \\(0\\) when page kick is needed, clock algorithm starts where it left off before and scan through physical pages each page it checks with reference bit 1, it sets the reference bit as 0 if it checked a page and its reference bit is 0, we kick it out (because we\u0026rsquo;ve gone through two ) We now save the position of the hand\u0026mdash;we want to begin checking with the page that hasn\u0026rsquo;t been checked for the longest time.\nIf every page has a reference bit is one, running this algorithm doesn\u0026rsquo;t break because it would set its immediately next bit of memory.\npage replacement model per-process replacement THIS IS NOT USED: we only kick out our own pages (but\u0026hellip; how would we know how many pages we allocate max per process before we start kicking?).\nglobal replacement THIS IS USED: a page fault in one process can kick out a page from another process. all pages from all processes in a single pool.\nrecall: demand paging get space:\npick a page to kick out write it to disk mark the old page map entry as \u0026ldquo;not present\u0026rdquo; update the new page map entry for the new process + point to our reused page restore kicked page:\ntrigger page fault check swap (if it isn\u0026rsquo;t present, page fault end) get a new physical page by kicking another page out load the data from disk into that reused page update page map yet again ","html":"\u003cp\u003ein \u003ca href=\"/posts/kbhdemand_paging/\"\u003edemand paging\u003c/a\u003e, if we have to kick out a page, which one do we kick?\u003c/p\u003e\n\u003ch2 id=\"possible-basic-approaches\"\u003epossible basic approaches\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003erandom page (this works surprisingly well)\u003c/li\u003e\n\u003cli\u003ethrow out the page that\u0026rsquo;s the longest in memory (this is BAD because if a page is there for a long time, its probably accessed a lot)\u003c/li\u003e\n\u003cli\u003eoracle (pick the page whose next accesses is farest in the future\u0026hellip; we can\u0026rsquo;t predict the future)\u003c/li\u003e\n\u003cli\u003eLRU (replace the page that\u0026rsquo;s accessed \u003cstrong\u003elongest time ago\u003c/strong\u003e)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003eLRU\u003c/strong\u003e sounds decently good, but recall that \\(2^{36}\\) wall times to store wall time for each page are needed which is bad\u003c/p\u003e\n\u003ch2 id=\"clock-algorithm\"\u003eclock algorithm\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003erotate through all pages until we find one that hasn\u0026rsquo;t been referenced since last time\u003c/strong\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewe add a \u003cstrong\u003ereference bit\u003c/strong\u003e to the \u003ca href=\"/posts/kbhvirtual_memory/#paging\"\u003epage table\u003c/a\u003e\u0026mdash;its set to \\(1\\) if the program wrote or read each page, otherwise its set to \\(0\\)\u003c/li\u003e\n\u003cli\u003ewhen page kick is needed, clock algorithm starts where it left off before and scan through physical pages\n\u003col\u003e\n\u003cli\u003eeach page it checks with reference bit 1, it sets the \u003cstrong\u003ereference bit\u003c/strong\u003e as 0\u003c/li\u003e\n\u003cli\u003eif it checked a page and its reference bit is 0, we kick it out (because we\u0026rsquo;ve gone through two )\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWe now \u003cstrong\u003esave the position of the hand\u003c/strong\u003e\u0026mdash;we want to begin checking with the page that hasn\u0026rsquo;t been checked for the longest time.\u003c/p\u003e\n\u003cp\u003eIf every page has a \u003cstrong\u003ereference bit\u003c/strong\u003e is one, running this algorithm doesn\u0026rsquo;t break because it would set its immediately next bit of memory.\u003c/p\u003e\n\u003ch2 id=\"page-replacement-model\"\u003epage replacement model\u003c/h2\u003e\n\u003ch3 id=\"per-process-replacement\"\u003eper-process replacement\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003eTHIS IS NOT USED\u003c/strong\u003e: we only kick out our own pages (but\u0026hellip; how would we know how many pages we allocate max per process before we start kicking?).\u003c/p\u003e\n\u003ch3 id=\"global-replacement\"\u003eglobal replacement\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003eTHIS IS USED\u003c/strong\u003e: a page fault in one process can kick out a page from another process. all pages from all processes in a single pool.\u003c/p\u003e\n\u003ch2 id=\"recall-demand-paging\"\u003erecall: demand paging\u003c/h2\u003e\n\u003cp\u003eget space:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003epick a page to kick out\u003c/li\u003e\n\u003cli\u003ewrite it to disk\u003c/li\u003e\n\u003cli\u003emark the old page map entry as \u0026ldquo;not present\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eupdate the new page map entry for the new process + point to our reused page\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003cp\u003erestore kicked page:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003etrigger page fault\u003c/li\u003e\n\u003cli\u003echeck swap (if it isn\u0026rsquo;t present, page fault end)\u003c/li\u003e\n\u003cli\u003eget a new physical page by kicking another page out\u003c/li\u003e\n\u003cli\u003eload the data from disk into that reused page\u003c/li\u003e\n\u003cli\u003eupdate page map yet again\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhclock_algorthium/","tags":null,"title":"clock algorithm"},{"categories":null,"contents":"to be closed means that the operation of a group applied to an element of a group would produce another element of the group.\n","html":"\u003cp\u003eto be \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e means that the \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003e of a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e applied to an element of a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e would produce another element of the \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhclosed/","tags":null,"title":"closed"},{"categories":null,"contents":"Exploring CLRS.\n2 insertion sort ","html":"\u003cp\u003eExploring CLRS.\u003c/p\u003e\n\u003ch2 id=\"2\"\u003e2\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinsertion_sort/\"\u003einsertion sort\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhclrs_index/","tags":["index"],"title":"CLRS Index"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhclustering/","tags":null,"title":"clustering"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcmu/","tags":null,"title":"CMU"},{"categories":null,"contents":"A brain signal to help maintain glucose homeostatis\nBrain takes glucose product + glucose uptake to control energy balance in food intake and energy expenditure.\nThe brain takes:\nNeural Behavioral Hormonal responses to maintain glucode uptake.\n","html":"\u003cp\u003eA brain signal to help maintain \u003ca href=\"\"\u003eglucose homeostatis\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eBrain takes glucose product + glucose uptake to control energy balance in food intake and energy expenditure.\u003c/p\u003e\n\u003cp\u003eThe brain takes:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eNeural\u003c/li\u003e\n\u003cli\u003eBehavioral\u003c/li\u003e\n\u003cli\u003eHormonal\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eresponses to maintain glucode uptake.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcns_regulation/","tags":null,"title":"CNS regulation"},{"categories":null,"contents":"A Code Review is a systematic study code by others\u0026mdash;like proofreading an essay. There\u0026rsquo;s a few different ways of doing Code Review.\nWhy do we code review? catch bugs, style deviations, design + convention violations security trade-off: having someone who is well-versed in security is useful to know how other people\u0026rsquo;s code work to learn additional skills, languages, frameworks Code Review Methodology Don\u0026rsquo;t do it Very fast! None of the benefits of code review Over-the-Shoulder Code Review Over-the-Shoulder Code Review typically is done over someone\u0026rsquo;s shoulder\u0026mdash;author walking the reviewer through code.\nProps Typically catch major + obvious issues Lightweight and fast\u0026mdash;most likely to get done Cons Author explains code as they go; biasing the reviewer Author knows the code, so may gloss over the parts they are familiar with + move at a pace faster than the reviewer can process The author only has two solders: can\u0026rsquo;t involve more than 2 people Usually real-time: the author is waiting for reviewer, which is blocking both author and reviewer\u0026rsquo;s schedules Pair Programming Pair Programming is a two-brains, one keyboard paradigm. The less experienced person types (moves at the pace of the slower person)\nPros Real-time feedback/correction; good for learning new things Writing code + code review at the same time\u0026mdash;total time saved Cons Two people working together are susceptible to bind slots still! Remember that it will take four times as long to do something: this trade-off is worth it! Formal Code Review Tools: Phabricator, Gerrit\nThe process of Formal Code Review is a very formal process.\nAuthor writes and commits code The diff of the commit is sent to the reviewer Reviewer reads through the code at their own pace Reviewer can comment on the entire diff, or on specific lines of code This can involve multiple reviewers This is basically informal code review, but solving the original issues.\nBest Practices Every commit must be code reviewed before pushed to other people The larger the commit, the more reviewers should be on it Reviewer Approval Levels +2: code can be pushed +1: code looks good, someone else should make the call 0: why am I here? I dunno -1: this code smells funny, but I\u0026rsquo;m willing to be overruled -2: this code must be changed before being pushed -1/-2: comment should clearly indicate whether they are blocking push.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhcode_review/\"\u003eCode Review\u003c/a\u003e is a systematic study code by others\u0026mdash;like proofreading an essay. There\u0026rsquo;s a few different ways of doing \u003ca href=\"/posts/kbhcode_review/\"\u003eCode Review\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"why-do-we-code-review\"\u003eWhy do we code review?\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ecatch bugs, style deviations, design + convention violations\u003c/li\u003e\n\u003cli\u003esecurity trade-off: having someone who is well-versed in security is useful\u003c/li\u003e\n\u003cli\u003eto know how other people\u0026rsquo;s code work\u003c/li\u003e\n\u003cli\u003eto learn additional skills, languages, frameworks\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"code-review-methodology\"\u003eCode Review Methodology\u003c/h2\u003e\n\u003ch3 id=\"don-t-do-it\"\u003eDon\u0026rsquo;t do it\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eVery fast!\u003c/li\u003e\n\u003cli\u003eNone of the benefits of code review\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"over-the-shoulder-code-review\"\u003eOver-the-Shoulder Code Review\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#over-the-shoulder-code-review\"\u003eOver-the-Shoulder Code Review\u003c/a\u003e typically is done over someone\u0026rsquo;s shoulder\u0026mdash;author walking the reviewer through code.\u003c/p\u003e\n\u003ch4 id=\"props\"\u003eProps\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eTypically catch major + obvious issues\u003c/li\u003e\n\u003cli\u003eLightweight and fast\u0026mdash;most likely to get done\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"cons\"\u003eCons\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eAuthor explains code as they go; biasing the reviewer\u003c/li\u003e\n\u003cli\u003eAuthor knows the code, so may gloss over the parts they are familiar with + move at a pace faster than the reviewer can process\u003c/li\u003e\n\u003cli\u003eThe author only has two solders: can\u0026rsquo;t involve more than 2 people\u003c/li\u003e\n\u003cli\u003eUsually real-time: the author is waiting for reviewer, which is blocking both author and reviewer\u0026rsquo;s schedules\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"pair-programming\"\u003ePair Programming\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#pair-programming\"\u003ePair Programming\u003c/a\u003e is a two-brains, one keyboard paradigm. The less experienced person types (moves at the pace of the slower person)\u003c/p\u003e\n\u003ch4 id=\"pros\"\u003ePros\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eReal-time feedback/correction; good for learning new things\u003c/li\u003e\n\u003cli\u003eWriting code + code review at the same time\u0026mdash;total time saved\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"cons\"\u003eCons\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eTwo people working together are susceptible to bind slots still!\u003c/li\u003e\n\u003cli\u003eRemember that it will take four times as long to do something: this trade-off is worth it!\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"formal-code-review\"\u003eFormal Code Review\u003c/h3\u003e\n\u003cp\u003eTools: Phabricator, Gerrit\u003c/p\u003e\n\u003cp\u003eThe process of \u003ca href=\"#formal-code-review\"\u003eFormal Code Review\u003c/a\u003e is a very formal process.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eAuthor writes and commits code\u003c/li\u003e\n\u003cli\u003eThe diff of the commit is sent to the reviewer\u003c/li\u003e\n\u003cli\u003eReviewer reads through the code at their own pace\u003c/li\u003e\n\u003cli\u003eReviewer can comment on the entire diff, or on specific lines of code\u003c/li\u003e\n\u003cli\u003eThis can involve multiple reviewers\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThis is \u003cem\u003ebasically\u003c/em\u003e informal code review, but solving the original issues.\u003c/p\u003e\n\u003ch4 id=\"best-practices\"\u003eBest Practices\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eEvery commit must be code reviewed before pushed to other people\u003c/li\u003e\n\u003cli\u003eThe larger the commit, the more reviewers should be on it\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"reviewer-approval-levels\"\u003eReviewer Approval Levels\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e+2: code can be pushed\u003c/li\u003e\n\u003cli\u003e+1: code looks good, someone else should make the call\u003c/li\u003e\n\u003cli\u003e0: why am I here? I dunno\u003c/li\u003e\n\u003cli\u003e-1: this code smells funny, but I\u0026rsquo;m willing to be overruled\u003c/li\u003e\n\u003cli\u003e-2: this code must be changed before being pushed\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e-1/-2: comment should clearly indicate whether they are blocking push.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcode_review/","tags":null,"title":"Code Review"},{"categories":null,"contents":"The time it takes for a qubit to oscillate between two states between damping down.\n","html":"\u003cp\u003eThe time it takes for a \u003ca href=\"/posts/kbhqubits/\"\u003equbit\u003c/a\u003e to oscillate between two states between damping down.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcoherence_time/","tags":null,"title":"coherence time"},{"categories":null,"contents":"A family wide between SARS-COV2 variances identified specific sites which are maintained across variance of concerns suggesting why specific antibidies targeting them maybe able to render higher neutralizing potential\n","html":"\u003cp\u003eA family wide between \u003ca href=\"/posts/kbhsars_cov2/\"\u003eSARS-COV2\u003c/a\u003e variances identified specific sites which are maintained across variance of concerns suggesting why specific antibidies targeting them maybe able to render higher neutralizing potential\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcold_sites/","tags":null,"title":"cold sites"},{"categories":null,"contents":"The cold war is a period of time in which there is blocks of conflict. This is after WWII.\nSee also:\ncold war in vietnam ","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhcold_war/\"\u003ecold war\u003c/a\u003e is a period of time in which there is blocks of conflict. This is after \u003ca href=\"\"\u003eWWII\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSee also:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003ecold war in vietnam\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcold_war/","tags":null,"title":"cold war"},{"categories":null,"contents":"A fact sheet on the progress of the cold war in Vietnam.\nprogression of US escalation in the war, an overview Reading: encyclopedia Britannica\n1959-1960: VCs initiated a group of ambushes which the exiled government led by Ngô Đình Diệm can no longer fend off 1961: Kennedy takes office, institutes a plan to put American advisors at all levels of Vietnam leadership 1963: Buddest monks rebelled, Ngô family rule became bizarre and leveraged their Roman Catholic views to persecute Buddhists in the region 1963: Ngô Đình Diệm is assassinated after the support of the US (Kennedy) via Cable 243 seeking a regime change 1963: Kennedy assisinated 1964: Vietnam situation worsens 1965: American government fully went in, and Ky was eased out of power when Neuyen Van Thieu ad full control 1965: US fighting was effective though unpresistent; viet cong just went in after US leaves 1967: Protests in the US lead to a growing anti-war sentiment, which the VietCong picked up on 1968: the Tet Offensive, a VietCong operation, tried to pillage South Vietnam. Though it failed, strong anti-war sentiments were stirred. 1969: Anti-War protests pick up force 1970: Ohio National Guard opens fire on unarmed protesters 1973: Peace Pact Signed after the US giving up, essentially 1975: Saigon falls, US evacuates anti-war protest motivation in Vietnam Reading: Protest against the War in Vietnam\nThe first protests rose in the 1950 and picked up force by the late 1960s when LBJ decided not to seek re-election.\nForeign policy is usually hard to change, but the strength of domestic dissent in Vietnam represents an usual shift which drove foreign policy changes.\nRight-wing sentiment: seeing the war as a means of future-proofing the American government from Communistic influences. Left-wing protest More organized than the spontaneous of the right-wing protest Split between moralistic + legalistic interests vs. national interest domestic political influence of the Vietnam War Reading: The War that Killed Trust, Karl Marlantes, 2017\n\u0026ldquo;Of course presidents lie\u0026rdquo;\u0026mdash;that the Vietnam War represented the shift away from genuine truthfulness as a part of American politics Killed 58,000 service-members, and made Americans cynical and distrustful of governmental institutions Systemic Cynicism Johnson\u0026rsquo;s \u0026ldquo;credibility gap\u0026rdquo;: that the president maybe lying. Nowadays this is commonplace, but back then it was quite unusual.\nCLAIM: engendered Cynicism threatened inaction.\nRacial Integration The cold war promised higher degrees of racial integration because of collective service.\nRepeated Touring That, post-draft, the American working class became much more likely to serve \u0026ldquo;voluntarily\u0026rdquo; by being recruited. Unlike the draft, which is some ways is universal service, the volunteer system is much more reliant upon th emiddle class.\nsocial impacts of the Vietnam War Reading: The Social Impact of War, Modell and Haggerty, 1991\nWars\u0026rsquo; effects can be treated with a lens of social manifestation The Vietnam war had an impact on the last 20 years of primary war literature draft The draft is the principle mechanism by which people into the war. The system facilitating the draft in the United States, the Selective Service System, is a good case study for such a system in the Vietnam War.\nBy its design, the draft is supposed to be an equitable process (baring gender and age.) However, the Vietnam War reveals that the military services was not straightforwardly distributed: often drafting children of lower socioeconomic status.\nexperience of servicemen in Vietnam Soldiers in the Vietnam War have shown some negative psychological side effects. Solders are shown to be \u0026ldquo;working through\u0026rdquo; the ideas to process, creating a larger effects.\neffects on the economy War veterans generally had higher incomes than non-vets, mostly because they have more income per level of educational attanment.\nhistoriographical school of Vietnam War Reading: James McLeroy, Small Wars Journal, (Army Special Forces Officer in I Corps, Vietnam, in 1968)\nOrthodox treatment Vietnam War as an extension/afterthought of late-20th century cold war history\nVietnam War escalated only because of United States involvement \u0026ldquo;anti-war\u0026rdquo; is not opposition against communistic conquest but opposition against war in itself Revisionist treatment Vietnam War as a calculable implementation of escalator revolutionary strategy modeled after Mao.\nVietnam War is not an insurgency or a civil war, but instead a part of the three-step guerrilla warfare Provocation of the United States is a part of the strategy\u0026mdash;to force them to move out of Vietnam and to encourage the communist bloc to provide more support ","html":"\u003cp\u003eA fact sheet on the progress of the \u003ca href=\"/posts/kbhcold_war/\"\u003ecold war\u003c/a\u003e in \u003ca href=\"/posts/kbhvietnam/\"\u003eVietnam\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"progression-of-us-escalation-in-the-war-an-overview\"\u003eprogression of US escalation in the war, an overview\u003c/h2\u003e\n\u003cp\u003eReading: encyclopedia Britannica\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e1959-1960: VCs initiated a group of ambushes which the exiled government led by Ngô Đình Diệm can no longer fend off\u003c/li\u003e\n\u003cli\u003e1961: Kennedy takes office, institutes a plan to put American advisors at all levels of Vietnam leadership\u003c/li\u003e\n\u003cli\u003e1963: Buddest monks rebelled, Ngô family rule became bizarre and leveraged their Roman Catholic views to persecute Buddhists in the region\u003c/li\u003e\n\u003cli\u003e1963: Ngô Đình Diệm is assassinated after the support of the US (Kennedy) via Cable 243 seeking a regime change\u003c/li\u003e\n\u003cli\u003e1963: Kennedy assisinated\u003c/li\u003e\n\u003cli\u003e1964: Vietnam situation worsens\u003c/li\u003e\n\u003cli\u003e1965: American government fully went in, and Ky was eased out of power when Neuyen Van Thieu ad full control\u003c/li\u003e\n\u003cli\u003e1965: US fighting was effective though unpresistent; viet cong just went in after US leaves\u003c/li\u003e\n\u003cli\u003e1967: Protests in the US lead to a growing anti-war sentiment, which the VietCong picked up on\u003c/li\u003e\n\u003cli\u003e1968: the Tet Offensive, a VietCong operation, tried to pillage South Vietnam. Though it failed, strong anti-war sentiments were stirred.\u003c/li\u003e\n\u003cli\u003e1969: Anti-War protests pick up force\u003c/li\u003e\n\u003cli\u003e1970: Ohio National Guard opens fire on unarmed protesters\u003c/li\u003e\n\u003cli\u003e1973: Peace Pact Signed after the US giving up, essentially\u003c/li\u003e\n\u003cli\u003e1975: Saigon falls, US evacuates\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"anti-war-protest-motivation-in-vietnam\"\u003eanti-war protest motivation in Vietnam\u003c/h2\u003e\n\u003cp\u003eReading: Protest against the War in Vietnam\u003c/p\u003e\n\u003cp\u003eThe first protests rose in the 1950 and picked up force by the late 1960s when LBJ decided not to seek re-election.\u003c/p\u003e\n\u003cp\u003eForeign policy is usually hard to change, but the strength of domestic dissent in Vietnam represents an usual shift which drove foreign policy changes.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eRight-wing sentiment: seeing the war as a means of future-proofing the American government from Communistic influences.\u003c/li\u003e\n\u003cli\u003eLeft-wing protest\n\u003cul\u003e\n\u003cli\u003eMore organized than the spontaneous of the right-wing protest\u003c/li\u003e\n\u003cli\u003eSplit between moralistic + legalistic interests vs. national interest\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"domestic-political-influence-of-the-vietnam-war--kbhcold-war-in-vietnam-dot-md\"\u003edomestic political influence of the \u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eReading: The War that Killed Trust, Karl Marlantes, 2017\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Of course presidents lie\u0026rdquo;\u0026mdash;that the \u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e represented the shift away from genuine truthfulness as a part of American politics\u003c/li\u003e\n\u003cli\u003eKilled 58,000 service-members, and made Americans cynical and distrustful of governmental institutions\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"systemic-cynicism\"\u003eSystemic Cynicism\u003c/h3\u003e\n\u003cp\u003eJohnson\u0026rsquo;s \u0026ldquo;credibility gap\u0026rdquo;: that the president maybe lying. Nowadays this is commonplace, but back then it was quite unusual.\u003c/p\u003e\n\u003cp\u003eCLAIM: engendered Cynicism threatened inaction.\u003c/p\u003e\n\u003ch3 id=\"racial-integration\"\u003eRacial Integration\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhcold_war/\"\u003ecold war\u003c/a\u003e promised higher degrees of racial integration because of collective service.\u003c/p\u003e\n\u003ch3 id=\"repeated-touring\"\u003eRepeated Touring\u003c/h3\u003e\n\u003cp\u003eThat, post-draft, the American working class became much more likely to serve \u0026ldquo;voluntarily\u0026rdquo; by being recruited. Unlike the draft, which is some ways is universal service, the volunteer system is much more reliant upon th emiddle class.\u003c/p\u003e\n\u003ch2 id=\"social-impacts-of-the-vietnam-war--kbhcold-war-in-vietnam-dot-md\"\u003esocial impacts of the \u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eReading: The Social Impact of War, Modell and Haggerty, 1991\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eWars\u0026rsquo; effects can be treated with a lens of social manifestation\u003c/li\u003e\n\u003cli\u003eThe \u003ca href=\"/posts/kbhvietnam/\"\u003eVietnam\u003c/a\u003e war had an impact on the last 20 years of primary war literature\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"draft\"\u003edraft\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"#draft\"\u003edraft\u003c/a\u003e is the principle mechanism by which people into the war. The system facilitating the \u003ca href=\"#draft\"\u003edraft\u003c/a\u003e in the \u003ca href=\"\"\u003eUnited States\u003c/a\u003e, the \u003ca href=\"/posts/kbhselective_service_system/\"\u003eSelective Service System\u003c/a\u003e, is a good case study for such a system in the \u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eBy its design, the \u003ca href=\"#draft\"\u003edraft\u003c/a\u003e is supposed to be an equitable process (baring gender and age.) However, the \u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e reveals that the military services was not straightforwardly distributed: often \u003ca href=\"#draft\"\u003edrafting\u003c/a\u003e children of lower socioeconomic status.\u003c/p\u003e\n\u003ch3 id=\"experience-of-servicemen-in-vietnam\"\u003eexperience of servicemen in Vietnam\u003c/h3\u003e\n\u003cp\u003eSoldiers in the \u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e have shown some negative psychological side effects. Solders are shown to be \u0026ldquo;working through\u0026rdquo; the ideas to process, creating a larger effects.\u003c/p\u003e\n\u003ch3 id=\"effects-on-the-economy\"\u003eeffects on the economy\u003c/h3\u003e\n\u003cp\u003eWar veterans generally had higher incomes than non-vets, mostly because they have more income per level of educational attanment.\u003c/p\u003e\n\u003ch2 id=\"historiographical-school-of-vietnam-war--kbhcold-war-in-vietnam-dot-md\"\u003ehistoriographical school of \u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eReading: James McLeroy, Small Wars Journal, (Army Special Forces Officer in I Corps, Vietnam, in 1968)\u003c/p\u003e\n\u003ch3 id=\"orthodox-treatment\"\u003eOrthodox treatment\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e as an extension/afterthought of late-20th century \u003ca href=\"/posts/kbhcold_war/\"\u003ecold war\u003c/a\u003e history\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e escalated only because of \u003ca href=\"\"\u003eUnited States\u003c/a\u003e involvement\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;anti-war\u0026rdquo; is not opposition against communistic conquest but opposition against war in itself\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"revisionist-treatment\"\u003eRevisionist treatment\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e as a calculable implementation of escalator revolutionary strategy modeled after Mao.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e is not an insurgency or a civil war, but instead a part of the three-step guerrilla warfare\u003c/li\u003e\n\u003cli\u003eProvocation of the \u003ca href=\"\"\u003eUnited States\u003c/a\u003e is a part of the strategy\u0026mdash;to force them to move out of Vietnam and to encourage the communist bloc to provide more support\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcold_war_in_vietnam/","tags":null,"title":"cold war in vietnam"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcollectivist_economy/","tags":null,"title":"Collectivist Economy"},{"categories":null,"contents":"College application is the process of applying to an American college.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcollege_application/\"\u003eCollege application\u003c/a\u003e is the process of applying to an American college.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcollege_application/","tags":null,"title":"college application"},{"categories":null,"contents":"COLLEGE101 Readings These links will not be online, as they contain actual notes from my reading.\nReading Date Link W.E.B. DuBois, Of Our Spiritual Strivings \u0026lt;2023-09-24 Sun\u0026gt; Of Our Spiritual Strivings ","html":"\u003ch2 id=\"college101-readings\"\u003eCOLLEGE101 Readings\u003c/h2\u003e\n\u003cp\u003eThese links will not be online, as they contain actual notes from my reading.\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eReading\u003c/th\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eLink\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eW.E.B. DuBois, Of Our Spiritual Strivings\u003c/td\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-09-24 Sun\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhof_our_spiritual_strivings/\"\u003eOf Our Spiritual Strivings\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcollege101_index/","tags":null,"title":"COLLEGE101 Index"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcollegeboard/","tags":null,"title":"CollegeBoard"},{"categories":null,"contents":"collocation extraction is the task of extracting n-grams from text that would co-occur next to each other more often than chance.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcollocation_extractio/\"\u003ecollocation extraction\u003c/a\u003e is the task of extracting n-grams from text that would co-occur next to each other more often than chance.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcollocation_extractio/","tags":null,"title":"collocation extraction"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcolumn_space/","tags":null,"title":"column space"},{"categories":null,"contents":"A combination is a choice task which shows that order does not matter.\n\\begin{equation} \\mqty(n \\\\k) = \\frac{n!}{k!(n-k)!} = n! \\times 1 \\times \\frac{1}{k!} \\times \\frac{1}{(n-k)!} \\end{equation}\nThis could be shown as follows: we first permute the group of people \\(n\\) (\\(n!\\)); take the first \\(k\\) of them (only 1 chose); we remove the overcounted order from the \\(k\\) subset chosen (\\(\\frac{1}{k!}\\)),; we remove the overcounted order from the \\(n-k\\) subset (\\(\\frac{1}{(n-k)!}\\)).\nThere are many ways of making this happen in code:\nn_choose_k = math.factorial(n) / (math.factorial(k) * math.factorial(n-k)) n_choose_k = math.comb(n,k) n_choose_k = itertools.combinations(range(n), k) ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhcombination/\"\u003ecombination\u003c/a\u003e is a choice task which shows that order does not matter.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(n \\\\k) = \\frac{n!}{k!(n-k)!} = n! \\times 1 \\times \\frac{1}{k!} \\times \\frac{1}{(n-k)!}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis could be shown as follows: we first \u003ca href=\"/posts/kbhpermutation/\"\u003epermute\u003c/a\u003e the group of people \\(n\\) (\\(n!\\)); take the first \\(k\\) of them (only 1 chose); we remove the overcounted order from the \\(k\\) subset chosen (\\(\\frac{1}{k!}\\)),; we remove the overcounted order from the \\(n-k\\) subset (\\(\\frac{1}{(n-k)!}\\)).\u003c/p\u003e\n\u003cp\u003eThere are many ways of making this happen in code:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003en_choose_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emath\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efactorial\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emath\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efactorial\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emath\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efactorial\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003en_choose_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emath\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecomb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003en_choose_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eitertools\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecombinations\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhcombination/","tags":null,"title":"combination"},{"categories":null,"contents":" collect(): get all of your data count(): get a count of the elements in the RDD countByValue(): list the times each value appears reduce(func): the reduce part of MapReduce first(), take(n): return some number of elements top(n): return the highest n values in the list ","html":"\u003cul\u003e\n\u003cli\u003e\u003ccode\u003ecollect()\u003c/code\u003e: get all of your data\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003ecount()\u003c/code\u003e: get a count of the elements in the RDD\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003ecountByValue()\u003c/code\u003e: list the times each value appears\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003ereduce(func)\u003c/code\u003e: the \u003ca href=\"/posts/kbhreduce/\"\u003ereduce\u003c/a\u003e part of \u003ca href=\"/posts/kbhmapreduce/\"\u003eMapReduce\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003efirst(), take(n)\u003c/code\u003e: return some number of elements\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003etop(n)\u003c/code\u003e: return the highest n values in the list\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcommon_spark_actions/","tags":null,"title":"Common Spark Actions"},{"categories":null,"contents":" map(func): apply a function on all functions filter(func): filter based on function flatMap(func): flatten returned lists into one giant list union(rdd): create a union of multiple RDD0 subtract(rdd): subtract RDDs cartesian(rdd): cartesian product of rdd parallelize(list): make an RDD from list Special transformations for Pair RDDs reduceByKey(func): key things groupByKey(func): key things sortByKey(func): key things See also Database \u0026ldquo;Join\u0026rdquo;\n","html":"\u003cul\u003e\n\u003cli\u003e\u003ccode\u003emap(func)\u003c/code\u003e: apply a function on all functions\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003efilter(func)\u003c/code\u003e: filter based on function\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003eflatMap(func)\u003c/code\u003e: flatten returned lists into one giant list\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003eunion(rdd)\u003c/code\u003e: create a union of multiple RDD0\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003esubtract(rdd)\u003c/code\u003e: subtract RDDs\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003ecartesian(rdd)\u003c/code\u003e: cartesian product of rdd\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003eparallelize(list)\u003c/code\u003e: make an RDD from list\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"special-transformations-for-pair-rdd--kbhrdd-dot-md--s\"\u003eSpecial transformations for \u003ca href=\"/posts/kbhrdd/#pair-rdd\"\u003ePair RDD\u003c/a\u003es\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003ereduceByKey(func)\u003c/code\u003e: key things\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003egroupByKey(func)\u003c/code\u003e: key things\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003esortByKey(func)\u003c/code\u003e: key things\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSee also \u003ca href=\"\"\u003eDatabase \u0026ldquo;Join\u0026rdquo;\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcommon_spark_transformations/","tags":null,"title":"Common Spark Transformations"},{"categories":null,"contents":"commutativity means that the same operation can be ran in any order.\nThat is:\n\\begin{equation} ABC = ACB \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e means that the same \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003e can be ran in any order.\u003c/p\u003e\n\u003cp\u003eThat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nABC = ACB\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcommutivity/","tags":null,"title":"commutativity"},{"categories":null,"contents":" return \u0026lt; 0 if first value should come before second value return \u0026gt; 0 if first value should come AFTEr second value 0 if the first and second value are equivalent ","html":"\u003col\u003e\n\u003cli\u003ereturn \u0026lt; 0 if first value should come before second value\u003c/li\u003e\n\u003cli\u003ereturn \u0026gt; 0 if first value should come AFTEr second value\u003c/li\u003e\n\u003cli\u003e0 if the first and second value are equivalent\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcomparison_function/","tags":null,"title":"comparison function"},{"categories":null,"contents":"Recall that Euler\u0026rsquo;s Equation exists:\n\\begin{equation} f(x) = e^{i k \\omega x} = \\cos (k\\omega x) + i \\sin(k\\omega x) \\end{equation}\nand, for \\(\\omega = \\frac{2\\pi}{L}\\), this is still \\(L\\) periodic!\nNext up, we make an important note:\n\\begin{equation} e^{ik\\omega x}, e^{-i k \\omega x} \\end{equation}\nis linearly independent over \\(x\\).\ninner product over complex-valued functions recall all of the inner product properties. Now, for functions periodic over \\([0,L]\\) (recall we have double this if the function is period over \\([-L, L]\\):\n\\begin{equation} \\langle f, g \\rangle = \\frac{1}{L} \\int_{0}^{L} f(x) \\overline{g(x)} \\dd{x} \\end{equation}\nsimilar to all other inner products, \\(\\langle f,f \\rangle = 0\\) IFF \\(f = 0\\), and \\(\\langle f,g \\rangle = 0\\) implies that \\(f\\) and \\(g\\) are orthogonal.\ncomplex exponentials are orthonormal For \\(L \u0026gt; 0\\), and \\(\\omega = \\frac{2\\pi}{L}\\), consider:\n\\begin{equation} \\langle e^{ik_{1} \\omega x}, e^{ik_{2} \\omega x} \\rangle \\end{equation}\nImportantly, we have the property that:\n\\(\\langle e^{ik_{1} \\omega x}, e^{ik_{2} \\omega x} \\rangle = 0\\) if \\(k_1 \\neq k_2\\) \\(\\langle e^{ik_{1} \\omega x}, e^{ik_{2} \\omega x} \\rangle = 1\\) if \\(k_1 = 1\\) ","html":"\u003cp\u003eRecall that \u003ca href=\"/posts/kbheuler_s_equation/\"\u003eEuler\u0026rsquo;s Equation\u003c/a\u003e exists:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = e^{i k \\omega x} = \\cos (k\\omega x) + i \\sin(k\\omega x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand, for \\(\\omega = \\frac{2\\pi}{L}\\), this is \u003cstrong\u003estill\u003c/strong\u003e \\(L\\) periodic!\u003c/p\u003e\n\u003cp\u003eNext up, we make an important note:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{ik\\omega x}, e^{-i k \\omega x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e over \\(x\\).\u003c/p\u003e\n\u003ch2 id=\"inner-product--kbhinner-product-dot-md--over-complex-valued-functions\"\u003e\u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e over complex-valued functions\u003c/h2\u003e\n\u003cp\u003erecall all of the \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e properties. Now, for functions periodic over \\([0,L]\\) (recall we have double this if the function is period over \\([-L, L]\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle f, g \\rangle = \\frac{1}{L} \\int_{0}^{L} f(x) \\overline{g(x)} \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003esimilar to all other \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003es, \\(\\langle f,f \\rangle = 0\\) IFF \\(f = 0\\), and \\(\\langle f,g \\rangle = 0\\) implies that \\(f\\) and \\(g\\) are orthogonal.\u003c/p\u003e\n\u003ch2 id=\"complex-exponentials-are-orthonormal\"\u003ecomplex exponentials are orthonormal\u003c/h2\u003e\n\u003cp\u003eFor \\(L \u0026gt; 0\\), and \\(\\omega = \\frac{2\\pi}{L}\\), consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle e^{ik_{1} \\omega x}, e^{ik_{2} \\omega x} \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eImportantly, we have the property that:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\langle e^{ik_{1} \\omega x}, e^{ik_{2} \\omega x} \\rangle = 0\\) if \\(k_1 \\neq k_2\\)\u003c/li\u003e\n\u003cli\u003e\\(\\langle e^{ik_{1} \\omega x}, e^{ik_{2} \\omega x} \\rangle = 1\\) if \\(k_1 = 1\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcomplex_exponential/","tags":null,"title":"Complex Exponential"},{"categories":null,"contents":"A complex number is a type of number. They are usually written as \\(a+bi\\).\nFormally\u0026mdash;\n\\begin{equation} \\mathbb{C} = \\left\\{a+bi\\ \\middle |\\ a,b \\in \\mathbb{R} \\right\\} \\end{equation}\nThis set generates solutions to every single polynomial with unique solutions. Its plane looks like \\(\\mathbb{R}^{2}\\).\nconstituents an order pair of two elements \\((a,b)\\) where \\(a,b\\in \\mathbb{R}\\).\nproperties of complex arithmetic there are 6. For all statements below, we assume \\(\\alpha = a+bi\\) and \\(\\beta=c+di\\), \\(\\lambda = e+fi\\), where \\(a,b,c,d,e,f \\in \\mathbb{R}\\) and therefore \\(\\alpha, \\beta,\\lambda \\in \\mathbb{C}\\).\ncommutativity \\(\\alpha + \\beta = \\beta + \\alpha\\) and \\(\\alpha\\beta = \\beta\\alpha\\) for all \\(\\alpha,\\beta \\in \\mathbb{C}\\).\nProof of complex number commutativity We desire \\(\\alpha + \\beta = \\beta + \\alpha\\).\n\\begin{align} \\alpha + \\beta \u0026amp;= (a+bi)+(c+di) \\\\ \u0026amp;=(a+c)+(b+d)i \\\\ \u0026amp;=(c+a)+(d+b)i \\\\ \u0026amp;=(c+di) + (a+bi) \\\\ \u0026amp;=\\beta+\\alpha\\ \\blacksquare \\end{align}\nleveraging the commutativity inside real numbers.\nInsights: combining and splitting\nThis proof has the feature of combining, operating (commuting, here), the splitting.\nassociativity \\((\\alpha +\\beta) + \\lambda = \\alpha + (\\beta +\\lambda)\\) and \\((\\alpha\\beta) \\lambda = (\\alpha \\beta) \\lambda\\)\nProven via the same trick from last time\nidentities \\(\\lambda + 0 = \\lambda\\), \\(\\lambda 1 = \\lambda\\)\nProof of complex number additive identity We desire that \\(\\lambda + 0 = 0\\).\n\\begin{align} \\lambda + 0 \u0026amp;= (e+fi) + (0+0i) \\\\ \u0026amp;= (e+0) + (f+0)i \\\\ \u0026amp;= e+fi\\ \\blacksquare \\end{align}\nmultiplicative identity is proven in the same way\nadditive inverse \\(\\forall \\alpha \\in \\mathbb{C}, \\exists !\\ \\beta \\in \\mathbb{C}: \\alpha + \\beta = 0\\)\nProof of complex number additive inverse We desire to claim that \\(\\forall \\alpha \\in \\mathbb{C}, \\exists !\\ \\beta \\in \\mathbb{C}: \\alpha + \\beta = 0\\), specifically that there is a unique \\(\\beta\\) which is the additive inverse of every \\(\\alpha\\).\nTake a number \\(\\alpha \\in \\mathbb{C}\\). We have that \\(\\alpha\\) would then by definition be some \\((a+bi)\\) where \\(a,b \\in \\mathbb{R}\\).\nTake some \\(\\beta\\) for which \\(\\alpha + \\beta = 0\\); by definition we again have \\(\\beta\\) equals some \\((c+di)\\) where \\(c,d \\in \\mathbb{R}\\).\n\\(\\because \\alpha + \\beta =0\\), \\(\\therefore (a+bi) + (c+di) = 0\\). \\(\\therefore (a+c) + (b+d)i = 0\\) \\(\\therefore a+c = 0, b+d = 0\\) \\(\\therefore c = -a, d = -b\\) We have created a unique definition of \\(c,d\\) and therefore \\(\\beta\\) given any \\(\\alpha\\), implying both uniqueness and existence.\nInsights: construct then generalize\nIn this case, the cool insight is the construct and generalize pattern. We are taking a single case \\(\\alpha\\), manipulating it, and wrote the result we want in terms of the constituents of \\(\\alpha\\). This creates both an existence and uniqueness proof.\nmultiplicative inverse \\(\\forall \\alpha \\in \\mathbb{C}, \\alpha \\neq 0, \\exists!\\ \\beta \\in \\mathbb{C} : \\alpha\\beta =1\\)\nThis is proven exactly in the same way as before.\ndistributive property \\(\\lambda(\\alpha+\\beta) = \\lambda \\alpha + \\lambda \\beta\\ \\forall\\ \\lambda, \\alpha, \\beta \\in \\mathbb{C}\\)\nProof of complex number distributive property We desire to claim that \\(\\lambda(\\alpha+\\beta) = \\lambda \\alpha + \\lambda \\beta\\).\n\\begin{align} \\lambda(\\alpha+\\beta) \u0026amp;= (e+fi)((a+bi)+(c+di))\\\\ \u0026amp;=(e+fi)((a+c)+(b+d)i)\\\\ \u0026amp;=((ea+ec)-(fb+fd))+((eb+ed)+(fa+fc))i\\\\ \u0026amp;=ea+ec-fb-fd+(eb+ed+fa+fc)i\\\\ \u0026amp;=ea-fb+ec-fd+(eb+fa+ed+fc)i\\\\ \u0026amp;=(ea-fb)+(ec-fd)+((eb+fa)+(ed+fc))i\\\\ \u0026amp;=((ea-fb)+(eb+fa)i) + ((ec-fd)+(ed+fc)i)\\\\ \u0026amp;=(e+fi)(a+bi) + (e+fi)(c+di)\\\\ \u0026amp;=\\lambda \\alpha + \\lambda \\beta\\ \\blacksquare \\end{align}\nInsights: try to remember to go backwards\nAt some point in this proof I had to reverse complex addition then multiplication, which actually tripped me up for a bit (\u0026ldquo;how does i distribute!!!\u0026rdquo;, etc.) Turns out, there was already a definition for addition and multiplication of complex numbers so we just needed to use that.\nadditional information addition and multiplication of complex numbers \\begin{align} (a+bi) + (c+di) \u0026amp;= (a+c)+(b+d)i \\\\ (a+bi)(c+di) \u0026amp;= (ac-bd)+(ad+bc)i \\end{align}\nwhere, \\(a,b,c,d\\in\\mathbb{R}\\).\nsubtraction and division of complex numbers Let \\(\\alpha, \\beta \\in \\mathbb{C}\\), and \\(-a\\) be the additive inverse of \\(\\alpha\\) and \\(\\frac{1}{\\alpha}\\) be the multiplicative inverse of \\(\\alpha\\).\nsubtraction: \\(\\beta-\\alpha = \\beta + (-\\alpha)\\) division: \\(\\frac{\\beta}{\\alpha} = \\beta\\frac{1}{\\alpha}\\) Simple enough, subtraction and division of complex numbers is just defined by applying the inverses of a number to a different number.\ncomplex numbers form a field See properties of complex arithmetic, how we proved that it satisfies a field.\ncomplex conjugate The complex conjugate of a complex number is defined as\n\\begin{equation} \\bar{z} = \\text{Re}\\ z - (\\text{Im}\\ z)i \\end{equation}\ni.e. taking the complex part to be negative. Say, \\(z = 3+2i\\), then \\(\\bar{z}=3-2i\\).\nabsolute value (complex numbers) The absolute value (complex numbers) of a complex number is:\n\\begin{equation} |z| = \\sqrt{{(\\text{Re}\\ z)^{2} + (\\text{Im}\\ z)^{2}}} \\end{equation}\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e is a type of \u003ca href=\"/posts/kbhnumber/\"\u003enumber\u003c/a\u003e. They are usually written as \\(a+bi\\).\u003c/p\u003e\n\u003cp\u003eFormally\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{C} = \\left\\{a+bi\\ \\middle |\\ a,b \\in \\mathbb{R} \\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis set generates solutions to every single polynomial with unique solutions. Its plane looks like \\(\\mathbb{R}^{2}\\).\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003ean order pair of two elements \\((a,b)\\) where \\(a,b\\in \\mathbb{R}\\).\u003c/p\u003e\n\u003ch2 id=\"properties-of-complex-arithmetic\"\u003eproperties of complex arithmetic\u003c/h2\u003e\n\u003cp\u003ethere are 6. For all statements below, we assume \\(\\alpha = a+bi\\) and \\(\\beta=c+di\\), \\(\\lambda = e+fi\\), where \\(a,b,c,d,e,f \\in \\mathbb{R}\\) and therefore \\(\\alpha, \\beta,\\lambda \\in \\mathbb{C}\\).\u003c/p\u003e\n\u003ch3 id=\"commutativity--kbhcommutivity-dot-md\"\u003e\u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\(\\alpha + \\beta = \\beta + \\alpha\\) and \\(\\alpha\\beta = \\beta\\alpha\\) for all \\(\\alpha,\\beta \\in \\mathbb{C}\\).\u003c/p\u003e\n\u003ch4 id=\"proof-of-complex-number--kbhcomplex-number-dot-md--commutativity--kbhcommutivity-dot-md\"\u003eProof of \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eWe desire \\(\\alpha + \\beta = \\beta + \\alpha\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\alpha + \\beta \u0026amp;= (a+bi)+(c+di) \\\\\n\u0026amp;=(a+c)+(b+d)i \\\\\n\u0026amp;=(c+a)+(d+b)i \\\\\n\u0026amp;=(c+di) + (a+bi) \\\\\n\u0026amp;=\\beta+\\alpha\\ \\blacksquare\n\\end{align}\u003c/p\u003e\n\u003cp\u003eleveraging the \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e inside \u003ca href=\"/posts/kbhreal_number/\"\u003ereal number\u003c/a\u003es.\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eInsights: combining and splitting\u003c/p\u003e\n\u003cp\u003eThis proof has the feature of combining, operating (commuting, here), the splitting.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"associativity--kbhassociative-dot-md\"\u003e\u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\((\\alpha +\\beta) + \\lambda = \\alpha + (\\beta +\\lambda)\\) and \\((\\alpha\\beta) \\lambda = (\\alpha \\beta) \\lambda\\)\u003c/p\u003e\n\u003cp\u003eProven via the same trick from last time\u003c/p\u003e\n\u003ch3 id=\"identities--kbhidentity-dot-md\"\u003e\u003ca href=\"/posts/kbhidentity/\"\u003eidentities\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\(\\lambda + 0 = \\lambda\\), \\(\\lambda 1 = \\lambda\\)\u003c/p\u003e\n\u003ch4 id=\"proof-of-complex-number--kbhcomplex-number-dot-md--additive-identity--kbhadditive-identity-dot-md\"\u003eProof of \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eWe desire that \\(\\lambda + 0 = 0\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\lambda + 0 \u0026amp;= (e+fi) + (0+0i) \\\\\n\u0026amp;= (e+0) + (f+0)i \\\\\n\u0026amp;= e+fi\\ \\blacksquare\n\\end{align}\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhmultiplicative_identity/\"\u003emultiplicative identity\u003c/a\u003e is proven in the same way\u003c/p\u003e\n\u003ch3 id=\"additive-inverse--kbhinverses-dot-md\"\u003eadditive \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\(\\forall \\alpha \\in \\mathbb{C}, \\exists !\\ \\beta \\in \\mathbb{C}: \\alpha + \\beta = 0\\)\u003c/p\u003e\n\u003ch4 id=\"proof-of-complex-number--kbhcomplex-number-dot-md--additive-inverse--kbhinverses-dot-md\"\u003eProof of \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e additive \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eWe desire to claim that \\(\\forall \\alpha \\in \\mathbb{C}, \\exists !\\ \\beta \\in \\mathbb{C}: \\alpha + \\beta = 0\\), specifically that there \u003cem\u003eis\u003c/em\u003e a \u003cem\u003eunique\u003c/em\u003e \\(\\beta\\) which is the additive \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e of every \\(\\alpha\\).\u003c/p\u003e\n\u003cp\u003eTake a number \\(\\alpha \\in \\mathbb{C}\\). We have that \\(\\alpha\\) would then by definition be some \\((a+bi)\\) where \\(a,b \\in \\mathbb{R}\\).\u003c/p\u003e\n\u003cp\u003eTake some \\(\\beta\\) for which \\(\\alpha + \\beta = 0\\); by definition we again have \\(\\beta\\) equals some \\((c+di)\\) where \\(c,d \\in \\mathbb{R}\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\because \\alpha + \\beta =0\\), \\(\\therefore (a+bi) + (c+di) = 0\\).\u003c/li\u003e\n\u003cli\u003e\\(\\therefore (a+c) + (b+d)i = 0\\)\u003c/li\u003e\n\u003cli\u003e\\(\\therefore a+c = 0, b+d = 0\\)\u003c/li\u003e\n\u003cli\u003e\\(\\therefore c = -a, d = -b\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe have created a unique definition of \\(c,d\\) and therefore \\(\\beta\\) given any \\(\\alpha\\), implying both uniqueness and existence.\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eInsights: construct then generalize\u003c/p\u003e\n\u003cp\u003eIn this case, the cool insight is the construct and generalize pattern. We are taking a single case \\(\\alpha\\), manipulating it, and wrote the result we want in terms of the constituents of \\(\\alpha\\). This creates both an existence and uniqueness proof.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"multiplicative-inverse\"\u003emultiplicative inverse\u003c/h3\u003e\n\u003cp\u003e\\(\\forall \\alpha \\in \\mathbb{C}, \\alpha \\neq 0, \\exists!\\ \\beta \\in \\mathbb{C} : \\alpha\\beta =1\\)\u003c/p\u003e\n\u003cp\u003eThis is proven exactly in the same way as before.\u003c/p\u003e\n\u003ch3 id=\"distributive-property\"\u003edistributive property\u003c/h3\u003e\n\u003cp\u003e\\(\\lambda(\\alpha+\\beta) = \\lambda \\alpha + \\lambda \\beta\\ \\forall\\ \\lambda, \\alpha, \\beta \\in \\mathbb{C}\\)\u003c/p\u003e\n\u003ch4 id=\"proof-of-complex-number--kbhcomplex-number-dot-md--distributive-property\"\u003eProof of \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e distributive property\u003c/h4\u003e\n\u003cp\u003eWe desire to claim that \\(\\lambda(\\alpha+\\beta) = \\lambda \\alpha + \\lambda \\beta\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\lambda(\\alpha+\\beta) \u0026amp;= (e+fi)((a+bi)+(c+di))\\\\\n\u0026amp;=(e+fi)((a+c)+(b+d)i)\\\\\n\u0026amp;=((ea+ec)-(fb+fd))+((eb+ed)+(fa+fc))i\\\\\n\u0026amp;=ea+ec-fb-fd+(eb+ed+fa+fc)i\\\\\n\u0026amp;=ea-fb+ec-fd+(eb+fa+ed+fc)i\\\\\n\u0026amp;=(ea-fb)+(ec-fd)+((eb+fa)+(ed+fc))i\\\\\n\u0026amp;=((ea-fb)+(eb+fa)i) + ((ec-fd)+(ed+fc)i)\\\\\n\u0026amp;=(e+fi)(a+bi) + (e+fi)(c+di)\\\\\n\u0026amp;=\\lambda \\alpha + \\lambda \\beta\\ \\blacksquare\n\\end{align}\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eInsights: try to remember to go backwards\u003c/p\u003e\n\u003cp\u003eAt some point in this proof I had to reverse complex addition then multiplication, which actually tripped me up for a bit (\u0026ldquo;how does \u003ccode\u003ei\u003c/code\u003e distribute!!!\u0026rdquo;, etc.) Turns out, there was already a definition for \u003ca href=\"#addition-and-multiplication-of-complex-number--kbhcomplex-number-dot-md--s\"\u003eaddition and multiplication of complex numbers\u003c/a\u003e so we just needed to use that.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"addition-and-multiplication-of-complex-number--kbhcomplex-number-dot-md--s\"\u003eaddition and multiplication of \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003es\u003c/h3\u003e\n\u003cp\u003e\\begin{align}\n(a+bi) + (c+di) \u0026amp;= (a+c)+(b+d)i \\\\\n(a+bi)(c+di) \u0026amp;= (ac-bd)+(ad+bc)i\n\\end{align}\u003c/p\u003e\n\u003cp\u003ewhere, \\(a,b,c,d\\in\\mathbb{R}\\).\u003c/p\u003e\n\u003ch3 id=\"subtraction-and-division-of-complex-number--kbhcomplex-number-dot-md--s\"\u003esubtraction and division of \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003es\u003c/h3\u003e\n\u003cp\u003eLet \\(\\alpha, \\beta \\in \\mathbb{C}\\), and \\(-a\\) be the additive inverse of \\(\\alpha\\) and \\(\\frac{1}{\\alpha}\\) be the multiplicative inverse of \\(\\alpha\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003esubtraction\u003c/strong\u003e\u003c/strong\u003e: \\(\\beta-\\alpha = \\beta + (-\\alpha)\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003edivision\u003c/strong\u003e\u003c/strong\u003e: \\(\\frac{\\beta}{\\alpha} = \\beta\\frac{1}{\\alpha}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSimple enough, \u003ca href=\"#subtraction-and-division-of-complex-number--kbhcomplex-number-dot-md--s\"\u003esubtraction and division of complex numbers\u003c/a\u003e is just defined by applying the inverses of a number to a different number.\u003c/p\u003e\n\u003ch3 id=\"complex-number--kbhcomplex-number-dot-md--s-form-a-field--kbhfield-dot-md\"\u003e\u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003es form a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"#properties-of-complex-arithmetic\"\u003eproperties of complex arithmetic\u003c/a\u003e, how we proved that it satisfies a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"complex-conjugate\"\u003ecomplex conjugate\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"#complex-conjugate\"\u003ecomplex conjugate\u003c/a\u003e of a \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e is defined as\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bar{z} = \\text{Re}\\ z - (\\text{Im}\\ z)i\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ei.e. taking the complex part to be negative. Say, \\(z = 3+2i\\), then \\(\\bar{z}=3-2i\\).\u003c/p\u003e\n\u003ch3 id=\"absolute-value--complex-number-kbhcomplex-number-dot-md--s\"\u003eabsolute value (\u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003es)\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"#absolute-value--complex-number-kbhcomplex-number-dot-md--s\"\u003eabsolute value (complex numbers)\u003c/a\u003e of a \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|z| = \\sqrt{{(\\text{Re}\\ z)^{2} + (\\text{Im}\\ z)^{2}}}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcomplex_number/","tags":null,"title":"complex number"},{"categories":null,"contents":"\\begin{equation} \\begin{cases} x_1\u0026rsquo; = 5x_1 - 5x_2 \\\\ x_2\u0026rsquo; = 2x_1 -x_2 \\end{cases} \\end{equation}\nThis gives rise to:\n\\begin{equation} A = \\mqty(5 \u0026amp; -5 \\\\ 2 \u0026amp;-1) \\end{equation}\nSolving the characteristic polynomial gives:\n\\begin{equation} (5-\\lambda)(-1-\\lambda) + 10 = \\lambda^{2} - 4\\lambda +5 \\end{equation}\nTherefore, our solutions are imaginary!\n\\begin{equation} \\lambda_{1}, \\lambda_{2} = 2 \\pm i \\end{equation}\nAside: we only need to deal with one\nNotably, anything that satisfies the original polynomial, its conjugates also satisfies:\n\\begin{equation} \\bar{\\lambda^{2}-4\\lambda +5} = 0= {\\bar{\\lambda}}^{2} - 4\\bar{\\lambda} + 5 \\end{equation}\nFurther, for some:\n\\begin{equation} Av = \\lambda v \\end{equation}\nwe have:\n\\begin{equation} A \\bar{v} = \\lambda \\bar{v} \\end{equation}\nmeaning if we just figured the eigenvector of one of the lambdas we are good\nNow, let us consider the case before with \\(\\lambda = 2 +i\\). We therefore have:\n\\begin{equation} \\mqty(3-i \u0026amp; -5 \\\\ 2 \u0026amp; -3-i) \\mqty(a \\\\ b) = \\mqty(0 \\\\ 0) \\end{equation}\nThis gives one particular null space, such as:\n\\begin{equation} v = \\mqty(5 \\\\ 3-i) \\end{equation}\nThis gives rise to:\n\\begin{equation} u\u0026rsquo; = (2+i)u \\end{equation}\nwhich means:\n\\begin{equation} u(t) = ce^{(2+i)t} \\end{equation}\nfinally, resulting in:\n\\begin{equation} x(t) = ce^{(2+i)t} \\mqty(5 \\\\ 3-i) \\end{equation}\nwhich is a particular solution. Now, the general solution would tack on a complex conjugate, which doesn\u0026rsquo;t actually add any new information.\nInstead, we can actually use Euler to break this into two, independent, and equally valid solutions:\n\\begin{equation} x(t) = e^{2t} \\qty(\\cos t + i \\sin t) \\qty( \\mqty(5 \\\\3) - i \\mqty(0 \\\\ 1)) \\end{equation}\nfinally, we obtain:\n\\begin{equation} x(t) = e^{2t} \\qty( \\cos t \\mqty(5 \\\\ 1) + \\sin t \\mqty(0 \\\\1)) + i e^{2t} \\qty( \\sin t \\mqty(5 \\\\ 1) - \\cos t \\mqty(0 \\\\1)) \\end{equation}\neach of which individual is a solution:\n\\begin{equation} x_1(t) =e^{2t} \\qty( \\cos t \\mqty(5 \\\\ 1) + \\sin t \\mqty(0 \\\\1)) \\end{equation}\nand:\n\\begin{equation} x_2(t) = e^{2t} \\qty( \\sin t \\mqty(5 \\\\ 1) - \\cos t \\mqty(0 \\\\1)) \\end{equation}\n","html":"\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx_1\u0026rsquo; = 5x_1 - 5x_2 \\\\\nx_2\u0026rsquo; = 2x_1 -x_2\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis gives rise to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA = \\mqty(5 \u0026amp; -5 \\\\ 2 \u0026amp;-1)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSolving the \u003ca href=\"/posts/kbhcharacteristic_polynomial/\"\u003echaracteristic polynomial\u003c/a\u003e gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(5-\\lambda)(-1-\\lambda) + 10 = \\lambda^{2} - 4\\lambda +5\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, our solutions are \u003cstrong\u003eimaginary!\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda_{1}, \\lambda_{2} = 2 \\pm i\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside: we only need to deal with one\u003c/p\u003e\n\u003cp\u003eNotably, anything that satisfies the original polynomial, its conjugates also satisfies:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bar{\\lambda^{2}-4\\lambda +5} = 0= {\\bar{\\lambda}}^{2} - 4\\bar{\\lambda} + 5\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFurther, for some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nAv = \\lambda v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA \\bar{v} = \\lambda \\bar{v}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning if we just figured the eigenvector of one of the lambdas we are good\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eNow, let us consider the case before with \\(\\lambda = 2 +i\\). We therefore have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(3-i \u0026amp; -5 \\\\ 2 \u0026amp; -3-i) \\mqty(a \\\\ b) = \\mqty(0 \\\\ 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis gives one particular null space, such as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = \\mqty(5 \\\\ 3-i)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis gives rise to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu\u0026rsquo; = (2+i)u\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich means:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(t) = ce^{(2+i)t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efinally, resulting in:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = ce^{(2+i)t} \\mqty(5 \\\\ 3-i)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is a particular solution. Now, the general solution would tack on a complex conjugate, which doesn\u0026rsquo;t actually add any new information.\u003c/p\u003e\n\u003cp\u003eInstead, we can actually use Euler to break this into two, independent, and equally valid solutions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = e^{2t} \\qty(\\cos t + i \\sin t) \\qty( \\mqty(5 \\\\3) - i \\mqty(0 \\\\ 1))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efinally, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = e^{2t} \\qty( \\cos t \\mqty(5 \\\\ 1) + \\sin t \\mqty(0 \\\\1)) + i e^{2t} \\qty( \\sin t \\mqty(5 \\\\ 1) - \\cos t \\mqty(0 \\\\1))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eeach of which individual is a solution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_1(t) =e^{2t} \\qty( \\cos t \\mqty(5 \\\\ 1) + \\sin t \\mqty(0 \\\\1))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_2(t) = e^{2t} \\qty( \\sin t \\mqty(5 \\\\ 1) - \\cos t \\mqty(0 \\\\1))\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhunderdetermined_ode_system/","tags":null,"title":"Complex ODE System"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcomplex_system/","tags":null,"title":"Complex System"},{"categories":null,"contents":"complexity theory is a theory in algorithms to analyze time classes.\nWe know that \\(O(n\\ log\\ n)\\) is between \\(O(n)\\) and \\(O(n^2)\\) \u0026mdash; so we can roughly call it \u0026ldquo;polynomial time.\u0026rdquo;\nSince the optimal comparison cannot be faster than polynomial time, we say that comparison-based sorting is a polynomial-time algorithm.\nFrom this information, we can come up with two main time classes: \\(P\\) for solutions with known polynomial time, \\(NP\\) for non-deterministic polynomial time.\nThink of it as \\(P\\) is solvable with polynomial time and \\(NP\\) is verifiable with polynomial time.\nThe cool thing about \\(NP\\) problems is that solving a subset of them (\u0026quot;\\(NP\\) hard\u0026quot; problems) solves all \\(NP\\) problems.\nreduction (algorithms) reduction is how you can use \\(NP-hard\\) problems to solve all \\(NP\\) problems in complexity theory.\nSay, multiplication:\nsay you have a basic algorithm to add we can perform multiplication by asking our black box addition algorithm to add \\(n\\) times in complexity theory terms, this means addition is \u0026ldquo;at least as hard\u0026rdquo; as multiplication. Because, if we can solve any addition problem, we can solve any multiplication problem. \u0026ldquo;Given this, do that.\u0026rdquo;\nproblem classes (see above)\n\u0026ldquo;Polynomial time\u0026rdquo; \\(P\\) \u0026mdash; problems solvable with polynomial time \u0026ldquo;Non-deterministic polynomial time\u0026rdquo; \\(NP\\) \u0026mdash; problem verifiable with polynomial time \u0026ldquo;Exponential time\u0026rdquo; \\(EXPTIME\\) \u0026mdash; problems that can only be solved in exponential time \u0026ldquo;2 Exponential time\u0026rdquo; \\(2EXPTIME\\) \u0026mdash; class of problems that takes \\(2^{2^n}\\) time to solve Space complexity works in a similar way.\n\\(P\\) and \\(NP\\) are deterministic and non-deterministic in context to a Turing machine.\nFundamentally, \\(P\\) and \\(NP\\) only apply to decision problems\u0026mdash;given a problem, output \u0026ldquo;yes\u0026rdquo; or \u0026ldquo;no.\u0026rdquo; However, this definition can be stretched: sorting is a decision problem, because it can be stated as \u0026ldquo;given an unsorted array, can you verify whether or not an array is sorted\u0026rdquo;\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcomplexity_theory/\"\u003ecomplexity theory\u003c/a\u003e is a theory in algorithms to analyze time classes.\u003c/p\u003e\n\u003cp\u003eWe know that \\(O(n\\ log\\ n)\\) is between \\(O(n)\\) and \\(O(n^2)\\) \u0026mdash; so we can roughly call it \u0026ldquo;polynomial time.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eSince the optimal comparison cannot be faster than polynomial time, we say that comparison-based sorting is a \u003cem\u003epolynomial-time\u003c/em\u003e algorithm.\u003c/p\u003e\n\u003cp\u003eFrom this information, we can come up with two main time classes: \\(P\\) for solutions with known polynomial time, \\(NP\\) for non-deterministic polynomial time.\u003c/p\u003e\n\u003cp\u003eThink of it as \\(P\\) is solvable with polynomial time and \\(NP\\) is verifiable with polynomial time.\u003c/p\u003e\n\u003cp\u003eThe cool thing about \\(NP\\) problems is that solving a subset of them (\u0026quot;\\(NP\\) hard\u0026quot; problems) solves all \\(NP\\) problems.\u003c/p\u003e\n\u003ch2 id=\"reduction--algorithms\"\u003ereduction (algorithms)\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#reduction--algorithms\"\u003ereduction\u003c/a\u003e is how you can use \\(NP-hard\\) problems to solve all \\(NP\\) problems in \u003ca href=\"/posts/kbhcomplexity_theory/\"\u003ecomplexity theory\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSay, multiplication:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003esay you have a basic algorithm to add\u003c/li\u003e\n\u003cli\u003ewe can perform multiplication by asking our black box addition algorithm to add \\(n\\) times\u003c/li\u003e\n\u003cli\u003ein \u003ca href=\"/posts/kbhcomplexity_theory/\"\u003ecomplexity theory\u003c/a\u003e terms, this means addition is \u0026ldquo;at least as hard\u0026rdquo; as multiplication. Because, if we can solve any addition problem, we can solve any multiplication problem.\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;Given this, do that.\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"problem-classes\"\u003eproblem classes\u003c/h2\u003e\n\u003cp\u003e(see above)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Polynomial time\u0026rdquo; \\(P\\) \u0026mdash; problems solvable with polynomial time\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Non-deterministic polynomial time\u0026rdquo; \\(NP\\) \u0026mdash; problem verifiable with polynomial time\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Exponential time\u0026rdquo; \\(EXPTIME\\) \u0026mdash; problems that can only be solved in exponential time\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;2 Exponential time\u0026rdquo; \\(2EXPTIME\\) \u0026mdash; class of problems that takes \\(2^{2^n}\\) time to solve\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSpace complexity works in a similar way.\u003c/p\u003e\n\u003cp\u003e\\(P\\) and \\(NP\\) are deterministic and non-deterministic \u003cem\u003ein context\u003c/em\u003e to a Turing machine.\u003c/p\u003e\n\u003cp\u003eFundamentally, \\(P\\) and \\(NP\\) only apply to \u003cem\u003edecision problems\u003c/em\u003e\u0026mdash;given a problem, output \u0026ldquo;yes\u0026rdquo; or \u0026ldquo;no.\u0026rdquo; However, this definition can be stretched: sorting is a decision problem, because it can be stated as \u0026ldquo;given an unsorted array, can you verify whether or not an array is sorted\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcomplexity_theory/","tags":null,"title":"complexity theory"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcomposite_system/","tags":null,"title":"composite system"},{"categories":null,"contents":"compositional scene representation is the process of trying to represent a certain visual signal into its constituent parts.\nAim: unsupervised segmentation + representation\nthe model finds the most intuitive representations of the scene train segmentation and representation together Autoencoding segmentation! Segment =\u0026gt; Represent =\u0026gt; Resegment =\u0026gt; etc.\nGaussian Mixture Model???? over pixels: regularizes by taking KL Divergence between latent and predicted output, to force them to be similar.\nLoss: error in RECONSTRUCTION and KL-Divergence of latent space\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhscene_representation/\"\u003ecompositional scene representation\u003c/a\u003e is the process of trying to represent a certain visual signal into its constituent parts.\u003c/p\u003e\n\u003cp\u003eAim: unsupervised segmentation + representation\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ethe model finds the most intuitive representations of the scene\u003c/li\u003e\n\u003cli\u003etrain segmentation and representation together\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAutoencoding segmentation! Segment =\u0026gt; Represent =\u0026gt; Resegment =\u0026gt; etc.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhgaussian_mixture_model/\"\u003eGaussian Mixture Model\u003c/a\u003e???? over pixels: regularizes by taking \u003ca href=\"/posts/kbhkl_divergence/\"\u003eKL Divergence\u003c/a\u003e between latent and predicted output, to force them to be similar.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eLoss: error in RECONSTRUCTION and KL-Divergence of latent space\u003c/strong\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhscene_representation/","tags":null,"title":"compositional scene representation"},{"categories":null,"contents":"Computational Biology is a the study of biology using computation.\nRather that starting from the properties, start with the end states or what properties it has; instead, we define the initial values based on the edges.\nConstructor theory: https://en.wikipedia.org/wiki/Constructor_theory?\nthe relationship between temperature and occurance of a uniform gas is actually right-skewed; the mean temperature in a uniform closed system will be higher than the median temperature molecules are not static: at best, molecules are static when frozen in place; yet, generally it is not in their nature to stay solidly in place; they just shuffle around but maintain the molecular sturucture If the energy level is higher, it will ignore various troughs\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcomputational_biology_index/\"\u003eComputational Biology\u003c/a\u003e is a the study of biology using computation.\u003c/p\u003e\n\u003cp\u003eRather that starting from the properties, start with the end states or what properties it has; instead, we define the initial values based on the edges.\u003c/p\u003e\n\u003cp\u003eConstructor theory: \u003ca href=\"https://en.wikipedia.org/wiki/Constructor_theory\"\u003ehttps://en.wikipedia.org/wiki/Constructor_theory\u003c/a\u003e?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ethe relationship between temperature and occurance of a uniform gas is actually right-skewed; the mean temperature in a uniform closed system will be higher than the median temperature\u003c/li\u003e\n\u003cli\u003emolecules are not static: at best, molecules are static when frozen in place; yet, generally it is not in their nature to stay solidly in place; they just shuffle around but maintain the molecular sturucture\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf the energy level is higher, it will ignore various troughs\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcomputational_biology_index/","tags":null,"title":"Computational Biology Index"},{"categories":null,"contents":"bit A computer is built out of binary gates:\nSo, having voltage into \\(B\\) allows current to pass through between \\(S\\) and \\(D\\), it could be on/off.\nbyte Accumulation of \\(8\\) bits\nComputer memory is a large array of bytes. It is only BYTE ADDRESSABLE: you can\u0026rsquo;t address a bit in isolation.\nbases Generate, each base uses digits \\(0\\) to \\(base-1\\).\nWe prefix 0x to represent hexadecimal, and 0b to represent binary.\nbase 10 base 10 uses digits \\(0-9\\). Each place value represents to a certain power of \\(10\\): \\(10^{n}\\) at each place value \\(n\\), starting at \\(0\\).\nbase 2 base 2 uses digits \\(0-1\\). Each place value represents a certain power of \\(2\\): \\(2^{n}\\) at each place \\(n\\), starting at \\(0\\).\nThe leftmost (largest value) is considered most-significant bit, right most is the least-significant bit.\nconversion from base 10 to base 2 \u0026ldquo;What is \\(6\\) is base 2?\u0026rdquo;\nWhat\u0026rsquo;s the largest power of \\(2 \\leq 6\\)? Well, we have \\(2^{2}\\). Therefore, the first place value is \\(2\\), which is the third digit. now, we subtract the remainder, we now have \\(6-4=2\\) , which is \\(2^{1}\\) min and max of binary The maximum value could be one minus the extra place value. For instance, if you have \\(8\\) digits (i.e. 7 place values), you would only be able to represent:\n\\begin{equation} 2^{8}-1 = 255 \\end{equation}\nmultiplying and dividing by base It works in the way you expect.\nbase 16 We can use base 16 essentially to divide base 2 numbers into groups of \\(4\\).\nEach quartet of bits can be converted separately\n\u0026ldquo;Which bit is missing\u0026rdquo; The way you can do conversion in your head more simply is to stare at a binary number in groups of \\(4\\), and see which missing bytes are there and subtract that much.\nnumerical representations unsigned integers Positive numbers and 0. A number is either \\(0\\) or some positive integer.\nThe range of is \\(2^{w}-1\\) where \\(w\\) is the number of bits, because we are cramming the entire number from \\(0\\) to \\(2^{w}-1\\).\nsigned integers Negative, positive, and \\(0\\).\na bad system The fact that \\(0\\) is signed is quite bad. And like adding negative numbers to positive number is very hard because you need another processor to figure out what the sign is.\ntwo\u0026rsquo;s complement See two\u0026rsquo;s complement.\nsizes of stuff (bytes)\nint: 4 float: 4 double: 8 char: 1 pointer: 8 (for 64 bit systems) short: 2 long: 8 overflow If you exceed the maximum value of bit representation, it rolls over to becoming negative. If you subtract one, you have to borrow from an imaginary\n","html":"\u003ch2 id=\"bit\"\u003ebit\u003c/h2\u003e\n\u003cp\u003eA computer is built out of binary gates:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-02_10-36-03_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSo, having voltage into \\(B\\) allows current to pass through between \\(S\\) and \\(D\\), it could be on/off.\u003c/p\u003e\n\u003ch2 id=\"byte\"\u003ebyte\u003c/h2\u003e\n\u003cp\u003eAccumulation of \\(8\\) \u003ca href=\"#bit\"\u003ebit\u003c/a\u003es\u003c/p\u003e\n\u003cp\u003eComputer memory is a large array of \u003ca href=\"#byte\"\u003ebyte\u003c/a\u003es. It is only \u003cstrong\u003eBYTE ADDRESSABLE\u003c/strong\u003e: you can\u0026rsquo;t address a bit in isolation.\u003c/p\u003e\n\u003ch2 id=\"bases\"\u003ebases\u003c/h2\u003e\n\u003cp\u003eGenerate, each base uses digits \\(0\\) to \\(base-1\\).\u003c/p\u003e\n\u003cp\u003eWe prefix \u003ccode\u003e0x\u003c/code\u003e to represent hexadecimal, and \u003ccode\u003e0b\u003c/code\u003e to represent binary.\u003c/p\u003e\n\u003ch3 id=\"base-10\"\u003ebase 10\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#base-10\"\u003ebase 10\u003c/a\u003e uses digits \\(0-9\\). Each place value represents to a certain power of \\(10\\): \\(10^{n}\\) at each place value \\(n\\), starting at \\(0\\).\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-02_10-40-16_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"base-2\"\u003ebase 2\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#base-2\"\u003ebase 2\u003c/a\u003e uses digits \\(0-1\\). Each place value represents a certain power of \\(2\\): \\(2^{n}\\) at each place \\(n\\), starting at \\(0\\).\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-02_10-41-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe leftmost (largest value) is considered most-significant bit, right most is the least-significant bit.\u003c/p\u003e\n\u003ch4 id=\"conversion-from-base-10-to-base-2\"\u003econversion from base 10 to base 2\u003c/h4\u003e\n\u003cp\u003e\u0026ldquo;What is \\(6\\) is base 2?\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eWhat\u0026rsquo;s the largest power of \\(2 \\leq 6\\)? Well, we have \\(2^{2}\\). Therefore, the first place value is \\(2\\), which is the third digit.\u003c/li\u003e\n\u003cli\u003enow, we subtract the remainder, we now have \\(6-4=2\\) , which is \\(2^{1}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"min-and-max-of-binary\"\u003emin and max of binary\u003c/h4\u003e\n\u003cp\u003eThe maximum value could be one minus the extra place value. For instance, if you have \\(8\\) digits (i.e. 7 place values), you would only be able to represent:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n2^{8}-1 = 255\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"multiplying-and-dividing-by-base\"\u003emultiplying and dividing by base\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-02_10-48-50_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-02_10-49-33_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eIt works in the way you expect.\u003c/p\u003e\n\u003ch3 id=\"base-16\"\u003ebase 16\u003c/h3\u003e\n\u003cp\u003eWe can use \u003ca href=\"#base-16\"\u003ebase 16\u003c/a\u003e essentially to divide \u003ca href=\"#base-2\"\u003ebase 2\u003c/a\u003e numbers into groups of \\(4\\).\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-02_10-54-50_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eEach quartet of bits can be converted separately\u003c/p\u003e\n\u003ch3 id=\"which-bit-is-missing\"\u003e\u0026ldquo;Which bit is missing\u0026rdquo;\u003c/h3\u003e\n\u003cp\u003eThe way you can do conversion in your head more simply is to stare at a binary number in groups of \\(4\\), and see which missing bytes are there and subtract that much.\u003c/p\u003e\n\u003ch2 id=\"numerical-representations\"\u003enumerical representations\u003c/h2\u003e\n\u003ch3 id=\"unsigned-integers\"\u003eunsigned integers\u003c/h3\u003e\n\u003cp\u003ePositive numbers and 0. A number is either \\(0\\) or some positive integer.\u003c/p\u003e\n\u003cp\u003eThe range of is \\(2^{w}-1\\) where \\(w\\) is the number of bits, because we are cramming the entire number from \\(0\\) to \\(2^{w}-1\\).\u003c/p\u003e\n\u003ch3 id=\"signed-integers\"\u003esigned integers\u003c/h3\u003e\n\u003cp\u003eNegative, positive, and \\(0\\).\u003c/p\u003e\n\u003ch4 id=\"a-bad-system\"\u003ea bad system\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-02_11-13-22_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe fact that \\(0\\) is signed is quite bad. And like adding negative numbers to positive number is very hard because you need another processor to figure out what the sign is.\u003c/p\u003e\n\u003ch4 id=\"two-s-complement\"\u003etwo\u0026rsquo;s complement\u003c/h4\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhtwo_s_complement/\"\u003etwo\u0026rsquo;s complement.\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"sizes-of-stuff\"\u003esizes of stuff\u003c/h3\u003e\n\u003cp\u003e(\u003ca href=\"#byte\"\u003ebyte\u003c/a\u003es)\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-03_16-49-34_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eint: 4\u003c/li\u003e\n\u003cli\u003efloat: 4\u003c/li\u003e\n\u003cli\u003edouble: 8\u003c/li\u003e\n\u003cli\u003echar: 1\u003c/li\u003e\n\u003cli\u003epointer: 8 (for 64 bit systems)\u003c/li\u003e\n\u003cli\u003eshort: 2\u003c/li\u003e\n\u003cli\u003elong: 8\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"overflow\"\u003eoverflow\u003c/h2\u003e\n\u003cp\u003eIf you exceed the maximum value of bit representation, it rolls over to becoming negative. If you subtract one, you have to borrow from an imaginary\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbinary_number_system/","tags":null,"title":"computer number system"},{"categories":null,"contents":"Notes on CS 107, C, MIPS, and computational systems.\nLectures SU-CS107 SEP272023 SU-CS107 SEP292023 SU-CS107 OCT022023 SU-CS107 OCT032023 SU-CS107 OCT042023 SU-CS107 OCT062023 SU-CS107 OCT092023 SU-CS107 OCT112023 SU-CS107 OCT132023 SU-CS107 OCT182023 SU-CS107 OCT202023 SU-CS107 OCT232023 SU-CS107 OCT252023 SU-CS107 OCT272023 SU-CS107 NOV102023 SU-CS107 NOV132023 SU-CS107 DEC012023 Worksheets SU-CS107 Midterm Sheet ","html":"\u003cp\u003eNotes on CS 107, C, MIPS, and computational systems.\u003c/p\u003e\n\u003ch2 id=\"lectures\"\u003eLectures\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_sep272023/\"\u003eSU-CS107 SEP272023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_sep292023/\"\u003eSU-CS107 SEP292023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct022023/\"\u003eSU-CS107 OCT022023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct032023/\"\u003eSU-CS107 OCT032023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct042023/\"\u003eSU-CS107 OCT042023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct062023/\"\u003eSU-CS107 OCT062023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct092023/\"\u003eSU-CS107 OCT092023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct112023/\"\u003eSU-CS107 OCT112023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct132023/\"\u003eSU-CS107 OCT132023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct182023/\"\u003eSU-CS107 OCT182023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct2023/\"\u003eSU-CS107 OCT202023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct232023/\"\u003eSU-CS107 OCT232023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct252023/\"\u003eSU-CS107 OCT252023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct272023/\"\u003eSU-CS107 OCT272023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_nov102023/\"\u003eSU-CS107 NOV102023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_nov132023/\"\u003eSU-CS107 NOV132023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_dec012023/\"\u003eSU-CS107 DEC012023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"worksheets\"\u003eWorksheets\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_midterm_sheet/\"\u003eSU-CS107 Midterm Sheet\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcomputer_systems_index/","tags":["index"],"title":"Computer Systems Index"},{"categories":null,"contents":"conceptual grammar is the proposed universal grammar which connects semantic primes. In theory, this grammar is universal across languages.\nThere are three main categories of conceptual grammars:\nCombinatorics (connecting one idea to another) Account of valancies? #what Propositional complementation (location \u0026ldquo;something that happen in this place\u0026rdquo; ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhconceptual_grammar/\"\u003econceptual grammar\u003c/a\u003e is the proposed universal grammar which connects \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es. In theory, this grammar is universal across languages.\u003c/p\u003e\n\u003cp\u003eThere are three main categories of \u003ca href=\"/posts/kbhconceptual_grammar/\"\u003econceptual grammar\u003c/a\u003es:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eCombinatorics (connecting one idea to another)\u003c/li\u003e\n\u003cli\u003eAccount of valancies? #what\u003c/li\u003e\n\u003cli\u003ePropositional complementation (location \u0026ldquo;something that happen in this place\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhconceptual_grammar/","tags":null,"title":"conceptual grammar"},{"categories":null,"contents":"Current automated lexicography (term definition) techniques cannot include contextual or new term information as a part of its synthesis. We propose a novel data harvesting scheme leveraging lead paragraphs in Wikipedia to train automated context-aware lexicographical models. Furthermore, we present ConDef, a fine-tuned BART trained on the harvested data that defines vocabulary terms from a short context. ConDef is determined to be highly accurate in context-dependent lexicography as validated on ROUGE-1 and ROUGE-L measures in an 1000-item withheld test set, achieving scores of 46.40% and 43.26% respectively. Furthermore, we demonstrate that ConDef\u0026rsquo;s synthesis serve as good proxies for term definitions by achieving ROUGE-1 measure of 27.79% directly against gold-standard WordNet definitions.Accepted to the 2022 SAI Computing Conference, to be published on Springer Nature\u0026rsquo;s Lecture Notes on Networks and Systems Current automated lexicography (term definition) techniques cannot include contextual or new term information as a part of its synthesis. We propose a novel data harvesting scheme leveraging lead paragraphs in Wikipedia to train automated context-aware lexicographical models. Furthermore, we present ConDef, a fine-tuned BART trained on the harvested data that defines vocabulary terms from a short context. ConDef is determined to be highly accurate in context-dependent lexicography as validated on ROUGE-1 and ROUGE-L measures in an 1000-item withheld test set, achieving scores of 46.40% and 43.26% respectively. Furthermore, we demonstrate that ConDef\u0026rsquo;s synthesis serve as good proxies for term definitions by achieving ROUGE-1 measure of 27.79% directly against gold-standard WordNet definitions.\n","html":"\u003cp\u003eCurrent automated lexicography (term definition) techniques cannot include contextual or new term information as a part of its synthesis. We propose a novel data harvesting scheme leveraging lead paragraphs in Wikipedia to train automated context-aware lexicographical models. Furthermore, we present ConDef, a fine-tuned BART trained on the harvested data that defines vocabulary terms from a short context. ConDef is determined to be highly accurate in context-dependent lexicography as validated on ROUGE-1 and ROUGE-L measures in an 1000-item withheld test set, achieving scores of 46.40% and 43.26% respectively. Furthermore, we demonstrate that ConDef\u0026rsquo;s synthesis serve as good proxies for term definitions by achieving ROUGE-1 measure of 27.79% directly against gold-standard WordNet definitions.Accepted to the 2022 SAI Computing Conference, to be published on Springer Nature\u0026rsquo;s Lecture Notes on Networks and Systems Current automated lexicography (term definition) techniques cannot include contextual or new term information as a part of its synthesis. We propose a novel data harvesting scheme leveraging lead paragraphs in Wikipedia to train automated context-aware lexicographical models. Furthermore, we present ConDef, a fine-tuned BART trained on the harvested data that defines vocabulary terms from a short context. ConDef is determined to be highly accurate in context-dependent lexicography as validated on ROUGE-1 and ROUGE-L measures in an 1000-item withheld test set, achieving scores of 46.40% and 43.26% respectively. Furthermore, we demonstrate that ConDef\u0026rsquo;s synthesis serve as good proxies for term definitions by achieving ROUGE-1 measure of 27.79% directly against gold-standard WordNet definitions.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcondef_abstract/","tags":null,"title":"ConDef Abstract"},{"categories":null,"contents":"Say you have one continuous variable \\(X\\), and one discrete variable \\(Y\\), and you desire to express the probability of \\(X\\) conditioned upon \\(Y\\) using a gaussian model:\n\\begin{equation} p(x|y) = \\begin{cases} \\mathcal{N}(x \\mid \\mu_{1}, \\sigma_{1}^{2}), y^{1} \\\\ \\dots \\\\ \\mathcal{N}(x \\mid \\mu_{1}, \\sigma_{1}^{2}), y^{n} \\\\ \\end{cases} \\end{equation}\n","html":"\u003cp\u003eSay you have one continuous variable \\(X\\), and one discrete variable \\(Y\\), and you desire to express the probability of \\(X\\) conditioned upon \\(Y\\) using a \u003ca href=\"/posts/kbhprobability_distributions/#gaussian-distribution\"\u003egaussian model\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(x|y) = \\begin{cases}\n\\mathcal{N}(x \\mid \\mu_{1}, \\sigma_{1}^{2}), y^{1} \\\\\n\\dots \\\\\n\\mathcal{N}(x \\mid \\mu_{1}, \\sigma_{1}^{2}), y^{n} \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhconditional_gaussian_models/","tags":null,"title":"conditional Gaussian model"},{"categories":null,"contents":"conditional plan is a POMDP representation technique. We can represent a conditional plan as a tree.\ntoy problem crying baby POMDP problem:\nactions: feed, ignore reward: if hungry, negative reward state: two states: is the baby hungry or not observation: noisy crying (she maybe crying because she\u0026rsquo;s genuinely hungry or crying just for kicks) formulate a conditional plan we can create a conditional plan by generating a exponential tree based on the observations. This is a policy which tells you what you should do given the sequence of observations you get, with no knowledge of the underlying state.\nWe call this plan \\(\\pi\\) (shock suprise). We define two notations:\n\\(\\pi()\\): the ACTION at the head of this tree (in this case, \u0026ldquo;ignore\u0026rdquo;) \\(\\pi(o)\\): the SUBTREE which is one-level below the first action. For instance, for both observations of the tree above, \\(\\pi(o)()\\) is ignore for both \\(o\\). conditional plan evaluation Assume we have a starting at some given true state \\(s\\). We can evaluate a conditional plan at that state by formulating:\n\\begin{equation} U^{\\pi} (s) = R(s, \\pi()) + \\gamma \\qty[\\sum_{s\u0026rsquo;} T(s\u0026rsquo;|s, \\pi()) \\sum_{o} O(o|\\pi(), s\u0026rsquo;) U^{\\pi(o)}(s\u0026rsquo;)] \\end{equation}\nwhere, \\(\\pi()\\) is the action at the root node of the tree; and \\(\\pi(o)\\) is the subtree for subplan at observation \\(o\\); essentially, at each point where we evaluate \\(U\\), we move the root node forward and recalculate. If we run out of depth, the utility is \\(0\\) and hence the whole right term is \\(0\\).\nOf course this assumes we know what our initial state is. Which is lame. So now:\n\\begin{equation} U^{\\pi}(b) = \\sum_{s}^{} b(s) U^{\\pi}(s) \\end{equation}\nwhich will give us the utility of our policy given a belief about wher ewe are.\nso literally take our belief about the probability of us being in each initial state and calculate it for each of our initial states.\noptimal value function for POMDP \\begin{equation} U^{*}(b) = \\max_{\\pi} U^{\\pi}(b) \\end{equation}\nOf course, trying to actually do this is impossible because you have to iterate over all possible policies and then calculate every utility from them.\nThis is practically untenable, because the space of \\(\\pi\\) is wayyy too big. Hence, we turn to alpha vectors.\nSee also optimal value function for POMDP with alpha vector\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e is a \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e representation technique. We can represent a \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e as a tree.\u003c/p\u003e\n\u003ch2 id=\"toy-problem\"\u003etoy problem\u003c/h2\u003e\n\u003cp\u003ecrying baby \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e problem:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eactions\u003c/strong\u003e: feed, ignore\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ereward\u003c/strong\u003e: if hungry, negative reward\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003estate\u003c/strong\u003e: two states: is the baby hungry or not\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eobservation\u003c/strong\u003e: noisy crying (she maybe crying because she\u0026rsquo;s genuinely hungry or crying just for kicks)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"formulate-a-conditional-plan--kbhconditional-plan-dot-md\"\u003eformulate a \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003ewe can create a \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e by generating a exponential tree based on the \u003cstrong\u003eobservations\u003c/strong\u003e. This is a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e which tells you what you should do given the sequence of observations you get, with no knowledge of the underlying state.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-14_10-04-21_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWe call this plan \\(\\pi\\) (shock suprise). We define two notations:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\pi()\\): the \u003cstrong\u003e\u003cstrong\u003eACTION\u003c/strong\u003e\u003c/strong\u003e at the head of this tree (in this case, \u0026ldquo;ignore\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003e\\(\\pi(o)\\): the \u003cstrong\u003e\u003cstrong\u003eSUBTREE\u003c/strong\u003e\u003c/strong\u003e which is one-level below the first action. For instance, for both observations of the tree above, \\(\\pi(o)()\\) is ignore for both \\(o\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"conditional-plan--kbhconditional-plan-dot-md--evaluation\"\u003e\u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e evaluation\u003c/h2\u003e\n\u003cp\u003eAssume we have a starting at some given true state \\(s\\). We can evaluate a \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e at that state by formulating:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\pi} (s) = R(s, \\pi()) + \\gamma \\qty[\\sum_{s\u0026rsquo;} T(s\u0026rsquo;|s, \\pi()) \\sum_{o} O(o|\\pi(), s\u0026rsquo;) U^{\\pi(o)}(s\u0026rsquo;)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\pi()\\) is the action at the root node of the tree; and \\(\\pi(o)\\) is the subtree for subplan at observation \\(o\\); essentially, at each point where we evaluate \\(U\\), we move the root node forward and recalculate. If we run out of depth, the utility is \\(0\\) and hence the whole right term is \\(0\\).\u003c/p\u003e\n\u003cp\u003eOf course this assumes we know what our initial state is. Which is lame. So now:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\pi}(b) = \\sum_{s}^{} b(s) U^{\\pi}(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich will give us the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of our policy given a \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e about wher ewe are.\u003c/p\u003e\n\u003cp\u003eso literally take our belief about the probability of us being in each initial state and calculate it for each of our initial states.\u003c/p\u003e\n\u003ch2 id=\"optimal-value-function--kbhpolicy-dot-md--for-pomdp--kbhpartially-observable-markov-decision-process-dot-md\"\u003e\u003ca href=\"/posts/kbhpolicy/#optimal-policy\"\u003eoptimal value function\u003c/a\u003e for \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nU^{*}(b) = \\max_{\\pi} U^{\\pi}(b)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOf course, trying to actually do this is impossible because you have to iterate over all possible policies and then calculate every utility from them.\u003c/p\u003e\n\u003cp\u003eThis is practically untenable, because the space of \\(\\pi\\) is wayyy too big. Hence, we turn to \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhalpha_vector/#id-a2dee193-65b1-47ed-8dbc-aa362b28b451-optimal-value-function-for-pomdp-with-id-a11af4cf-7e36-4b3f-876f-e6a26cf6817e-alpha-vector\"\u003eoptimal value function for POMDP with alpha vector\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhconditional_plan/","tags":null,"title":"conditional plan"},{"categories":null,"contents":"There are many condition in the Great Depression caused\nby 1932, 1/4 had no work emigration exceeded immigration decrease in American birth increase of mental illness and suicide people create Hooverviles movies and radio became much more popular ","html":"\u003cp\u003eThere are many condition in the \u003ca href=\"/posts/kbhgreat_depression/\"\u003eGreat Depression\u003c/a\u003e caused\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eby 1932, 1/4 had no work\u003c/li\u003e\n\u003cli\u003eemigration exceeded immigration\u003c/li\u003e\n\u003cli\u003edecrease in American birth\u003c/li\u003e\n\u003cli\u003eincrease of mental illness and suicide\u003c/li\u003e\n\u003cli\u003epeople create \u003ca href=\"/posts/kbhhooverviles/\"\u003eHooverviles\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003emovies and radio became much more popular\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhconditions_in_the_great_depression/","tags":null,"title":"conditions in the Great Depression"},{"categories":null,"contents":"proportional confidence intervals We will measure a single stastistic from a large population, and call it the point estimate. This is usually denoted as \\(\\hat{p}\\).\nGiven a proportion \\(\\hat{p}\\) (\u0026ldquo;95% of sample), the range which would possibly contain it as part of its \\(2\\sigma\\) range is the \\(95\\%\\) confidence interval.\nTherefore, given a \\(\\hat{p}\\) the plausible interval for its confidence is:\n\\begin{equation} \\hat{p} \\pm z^* \\sqrt{\\frac{\\hat{p}(1-\\hat{p})}{n}} \\end{equation}\nwhere, \\(n\\) is the sample size, \\(\\hat{p}\\) is the point estimate, and \\(z*=1.96\\) is the critical value, the z-score denoting \\(95\\%\\) confidence (or any other desired confidence level).\nconditions for proportional confidence interval There are the conditions that make a proportional confidence interval work\ndistribution is normal \\(n\\hat{p}\\) and \\(n(1-\\hat{p})\\) are both \\(\u0026gt;10\\) we are sampling with replacement, or otherwise sampling \\(\u0026lt;10\\%\\) of population (otherwise, we need to apply a finite population correction value confidence intervals The expression is:\n\\begin{equation} \\bar{x} \\pm t^* \\frac{s}{\\sqrt{n}} \\end{equation}\nwhere \\(t*\\) is the \\(t\\) score of the desired power level with the correct degrees of freedom; \\(s\\) the sample standard deviation, \\(n\\) the sample size, and \\(\\har{x}\\) the mean.\n","html":"\u003ch2 id=\"proportional-confidence-intervals\"\u003eproportional confidence intervals\u003c/h2\u003e\n\u003cp\u003eWe will measure a single \u003ca href=\"/posts/kbhstastistic/\"\u003estastistic\u003c/a\u003e from a large population, and call it the \u003ca href=\"/posts/kbhpoint_estimate/\"\u003epoint estimate\u003c/a\u003e. This is usually denoted as \\(\\hat{p}\\).\u003c/p\u003e\n\u003cp\u003eGiven a proportion \\(\\hat{p}\\) (\u0026ldquo;95% of sample), the range which would possibly contain it as part of its \\(2\\sigma\\) range is the \\(95\\%\\) confidence interval.\u003c/p\u003e\n\u003cp\u003eTherefore, given a \\(\\hat{p}\\) the plausible interval for its confidence is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{p} \\pm z^* \\sqrt{\\frac{\\hat{p}(1-\\hat{p})}{n}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(n\\) is the sample size, \\(\\hat{p}\\) is the \u003ca href=\"/posts/kbhpoint_estimate/\"\u003epoint estimate\u003c/a\u003e, and \\(z*=1.96\\) is the \u003ca href=\"/posts/kbhcritical_value/\"\u003ecritical value\u003c/a\u003e, the \u003ca href=\"/posts/kbhz_score/\"\u003ez-score\u003c/a\u003e denoting \\(95\\%\\) confidence (or any other desired confidence level).\u003c/p\u003e\n\u003ch2 id=\"conditions-for-proportional-confidence-interval\"\u003econditions for proportional confidence interval\u003c/h2\u003e\n\u003cp\u003eThere are the conditions that make a \u003ca href=\"#proportional-confidence-intervals\"\u003eproportional confidence interval\u003c/a\u003e work\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003edistribution is normal\u003c/li\u003e\n\u003cli\u003e\\(n\\hat{p}\\) and \\(n(1-\\hat{p})\\) are both \\(\u0026gt;10\\)\u003c/li\u003e\n\u003cli\u003ewe are sampling with replacement, or otherwise sampling \\(\u0026lt;10\\%\\) of population (otherwise, we need to apply a finite population correction\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"value-confidence-intervals\"\u003evalue confidence intervals\u003c/h2\u003e\n\u003cp\u003eThe expression is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bar{x} \\pm t^* \\frac{s}{\\sqrt{n}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(t*\\) is the \\(t\\) score of the desired power level with the correct degrees of freedom; \\(s\\) the sample standard deviation, \\(n\\) the sample size, and \\(\\har{x}\\) the mean.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhconfidence_interval/","tags":null,"title":"confidence interval"},{"categories":null,"contents":"conjugation is the process of building a similar matricies.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhconjugation/\"\u003econjugation\u003c/a\u003e is the process of building a \u003ca href=\"/posts/kbheigenvalue/#similar-matrices\"\u003esimilar\u003c/a\u003e \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhconjugation/","tags":null,"title":"conjugation"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhconnectionism/","tags":null,"title":"connectionism"},{"categories":null,"contents":"constructor theory deals with \u0026ldquo;constructors\u0026rdquo;, a general type of computer.\nconstructor theory can give us a theory of the universal quantum constructor by expanding upon quantum information theory. It allows us to unify quantum and classical information by simply defining operations in terms of counterfactuals exclusively: that a space is entirely defined by what\u0026rsquo;s possible and what\u0026rsquo;s not possible.\nAccording to constructor theory, fundamental laws are not dynamical laws instead are boundary conditions. We can take the boundary conditions to form the most general set of initial conditions.\nyou can conjecture a set of laws is fully complete at some point, you will find something that hits the bounds then you revise the theory ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhconstructor_theory/\"\u003econstructor theory\u003c/a\u003e deals with \u0026ldquo;\u003ca href=\"/posts/kbhconstructor_theory/\"\u003econstructor\u003c/a\u003es\u0026rdquo;, a general type of computer.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhconstructor_theory/\"\u003econstructor theory\u003c/a\u003e can give us a theory of the \u003ca href=\"/posts/kbhuniversal_quantum_constructor/\"\u003euniversal quantum constructor\u003c/a\u003e by expanding upon \u003ca href=\"/posts/kbhquantum_information_theory/\"\u003equantum information theory\u003c/a\u003e. It allows us to unify quantum and classical information by simply defining operations in terms of \u003ca href=\"/posts/kbhcounterfactual/\"\u003ecounterfactual\u003c/a\u003es exclusively: that a space is entirely defined by what\u0026rsquo;s possible and what\u0026rsquo;s not possible.\u003c/p\u003e\n\u003cp\u003eAccording to \u003ca href=\"/posts/kbhconstructor_theory/\"\u003econstructor\u003c/a\u003e theory, fundamental laws are not dynamical laws instead are boundary conditions. We can take the boundary conditions to form the most general set of initial conditions.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eyou can conjecture a set of laws is fully complete\u003c/li\u003e\n\u003cli\u003eat some point, you will find something that hits the bounds\u003c/li\u003e\n\u003cli\u003ethen you revise the theory\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhconstructor_theory/","tags":null,"title":"constructor"},{"categories":null,"contents":"Used: IBM/OS360\ncontiguous allocation puts the files and metadata together, and implements a Explicit Free List Allocator across the file.\nbenefits simple problems external fragmentation: little pockets of data is everywhere editing: hard to grow files ","html":"\u003cp\u003eUsed: IBM/OS360\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcontiguous_allocation/\"\u003econtiguous allocation\u003c/a\u003e puts the files and metadata together, and implements a \u003ca href=\"/posts/kbhheap_allocator/#explicit-free-list-allocator\"\u003eExplicit Free List Allocator\u003c/a\u003e across the file.\u003c/p\u003e\n\u003ch2 id=\"benefits\"\u003ebenefits\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003esimple\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"problems\"\u003eproblems\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eexternal fragmentation: little pockets of data is everywhere\u003c/li\u003e\n\u003cli\u003eediting: hard to grow files\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcontiguous_allocation/","tags":null,"title":"contiguous allocation"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcontinuity_correct/","tags":null,"title":"continuity correct"},{"categories":null,"contents":"Because we want to including rounding during continuity correction to account for things discretized to certain values.\nDiscrete Continuous P(X = 6) P( 5.5 \u0026lt;= X \u0026lt;= 6.5) P(X \u0026gt;= 6) P (X \u0026gt;= 5.5) P(X \u0026gt; 6) P (X \u0026gt;= 6.5) basically \u0026ldquo;less than\n","html":"\u003cp\u003eBecause we want to including rounding during \u003ca href=\"/posts/kbhcontinuity_correction/\"\u003econtinuity correction\u003c/a\u003e to account for things discretized to certain values.\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDiscrete\u003c/th\u003e\n\u003cth\u003eContinuous\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eP(X = 6)\u003c/td\u003e\n\u003ctd\u003eP( 5.5 \u0026lt;= X \u0026lt;= 6.5)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eP(X \u0026gt;= 6)\u003c/td\u003e\n\u003ctd\u003eP (X \u0026gt;= 5.5)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eP(X \u0026gt; 6)\u003c/td\u003e\n\u003ctd\u003eP (X \u0026gt;= 6.5)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003ebasically \u0026ldquo;less than\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcontinuity_correction/","tags":null,"title":"continuity correction"},{"categories":null,"contents":"This is a continuous distribution for which the probability can be quantified as:\n\\begin{equation} p(x) \\dd{x} \\end{equation}\nYou will note that, at any given exact point, the probability is \\(\\lim_{\\dd{x} \\to 0} p(x)\\dd{x} = 0\\). However, to get the actual probability, we take an integral over some range:\n\\begin{equation} \\int_{-\\infty}^{\\infty} p(x) \\dd{x} = 1 \\end{equation}\nSee also cumulative distribution function which represents the chance of something happening up to a threshold.\n","html":"\u003cp\u003eThis is a continuous distribution for which the probability can be quantified as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will note that, at any given exact point, the probability is \\(\\lim_{\\dd{x} \\to 0} p(x)\\dd{x} = 0\\). However, to get the actual probability, we take an integral over some range:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{-\\infty}^{\\infty} p(x) \\dd{x} = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhprobability_distributions/#cumulative-distribution-function\"\u003ecumulative distribution function\u003c/a\u003e which represents the chance of something happening up to a threshold.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcontinuous_distribution/","tags":null,"title":"continuous distribution"},{"categories":null,"contents":"a controller is a that maintains its own state.\nconstituents \\(X\\): a set of nodes (hidden, internal states) \\(\\Psi(a|x)\\): probability of taking an action \\(\\eta(x\u0026rsquo;|x,a,o)\\) : transition probability between hidden states requirements Controllers are nice because we:\ndon\u0026rsquo;t have to maintain a belief over time: we need an initial belief, and then we can create beliefs as we\u0026rsquo;d like without much worry controllers can be made shorter than conditional plans additional information finite state controller A finite state controller has a finite amount of hidden internal state.\nConsider the crying baby problem. We will declare two internal state:\n\\begin{equation} x_1, x_2 \\end{equation}\nGiven our observations and our internal states, we can declare transitions and an action probability \\(\\Psi\\):\nWe essentially declare a policy vis a vi your observations. It can be a sequence, for instance, if we want to declare a policy whereby if you cry twice then you feed, you can declare:\nfinite state controller evaluation \\begin{equation} U(x, s) = \\sum_{a}^{} \\Psi(a|x) \\qty[R(s,a) + \\gamma \\qty(\\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) \\sum_{o}^{} O(o|a, s\u0026rsquo;) \\sum_{x\u0026rsquo;}^{} \\eta(x\u0026rsquo;|x,a,o) U(x\u0026rsquo;, s\u0026rsquo;)) ] \\end{equation}\nwhich is a conditional plan evaluation but we know even litle\nand, to construct alpha vectors:\n\\begin{equation} \\alpha_{x} = \\qty[U(x, s_1), \\dots, U(x, s_{n})] \\end{equation}\nwe just make one alpha vector per node. So the entire plan is represented as usual by \\(\\Gamma\\) a set of alpha vectors. And yes you can alpha vector pruning.\n\\begin{align} U(x,b) = b^{\\top} \\alpha_{x} \\end{align}\nnode we want to start at:\n\\begin{equation} X^{*} = \\arg\\max_{x} U(x,b) \\end{equation}\nsolving for \\(\\Psi\\) and \\(\\eta\\) policy iteration: incrementally add nodes and evaluate it nonlinear programming: this can be a nonlinear optimization problem controller gradient ascent ","html":"\u003cp\u003ea \u003ca href=\"/posts/kbhcontroller/\"\u003econtroller\u003c/a\u003e is a that maintains its own state.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X\\): a set of nodes (hidden, internal states)\u003c/li\u003e\n\u003cli\u003e\\(\\Psi(a|x)\\): probability of taking an action\u003c/li\u003e\n\u003cli\u003e\\(\\eta(x\u0026rsquo;|x,a,o)\\) : transition probability between hidden states\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eControllers are nice because we:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003edon\u0026rsquo;t have to maintain a belief over time: we need an initial belief, and then we can create beliefs as we\u0026rsquo;d like without much worry\u003c/li\u003e\n\u003cli\u003econtrollers can be made shorter than \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003es\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"finite-state-controller\"\u003efinite state controller\u003c/h3\u003e\n\u003cp\u003eA \u003ca href=\"#finite-state-controller\"\u003efinite state controller\u003c/a\u003e has a finite amount of hidden internal state.\u003c/p\u003e\n\u003cp\u003eConsider the crying baby problem. We will declare two internal state:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_1, x_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven our observations and our internal states, we can declare transitions and an action probability \\(\\Psi\\):\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-30_09-07-04_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWe essentially declare a policy vis a vi your observations. It can be a sequence, for instance, if we want to declare a policy whereby if you cry twice then you feed, you can declare:\u003c/p\u003e\n\u003ch3 id=\"finite-state-controller-evaluation\"\u003efinite state controller evaluation\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nU(x, s) = \\sum_{a}^{} \\Psi(a|x) \\qty[R(s,a) + \\gamma \\qty(\\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) \\sum_{o}^{} O(o|a, s\u0026rsquo;) \\sum_{x\u0026rsquo;}^{} \\eta(x\u0026rsquo;|x,a,o) U(x\u0026rsquo;, s\u0026rsquo;)) ]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is a \u003ca href=\"/posts/kbhconditional_plan/#id-6f19368f-74b5-4606-a882-ec9bc5619873-conditional-plan-evaluation\"\u003econditional plan evaluation\u003c/a\u003e but we know even litle\u003c/p\u003e\n\u003cp\u003eand, to construct \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha_{x} = \\qty[U(x, s_1), \\dots, U(x, s_{n})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe just make one alpha vector per node. So the entire plan is represented as usual by \\(\\Gamma\\) a set of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es. And yes you can \u003ca href=\"/posts/kbhalpha_vector/#id-a11af4cf-7e36-4b3f-876f-e6a26cf6817e-alpha-vector-pruning\"\u003ealpha vector pruning\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nU(x,b) = b^{\\top} \\alpha_{x}\n\\end{align}\u003c/p\u003e\n\u003cp\u003enode we want to start at:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX^{*} = \\arg\\max_{x} U(x,b)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"solving-for-psi-and-eta\"\u003esolving for \\(\\Psi\\) and \\(\\eta\\)\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e: incrementally add nodes and evaluate it\u003c/li\u003e\n\u003cli\u003enonlinear programming: this can be a nonlinear optimization problem\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcontroller_gradient_ascent/\"\u003econtroller gradient ascent\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcontroller/","tags":null,"title":"controller"},{"categories":null,"contents":"We aim to solve for a fixed-sized controller based policy using gradient ascent. This is the unconstrained variation on PGA.\nRecall that we seek to optimize, for some initial node \\(x^{(1)}\\) and belief-state \\(b\\), we want to find the distribution of actions and transitions \\(\\Psi\\) and \\(\\eta\\), which maximizes the utility we can obtain based on initial state:\n\\begin{equation} \\sum_{s}b(s) U(x^{(1)}, s) \\end{equation}\nRecall that \\(U(x,s)\\) is given by:\n\\begin{equation} U(x, s) = \\sum_{a}^{} \\Psi(a|x) \\qty[R(s,a) + \\gamma \\qty(\\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) \\sum_{o}^{} O(o|a, s\u0026rsquo;) \\sum_{x\u0026rsquo;}^{} \\eta(x\u0026rsquo;|x,a,o) U(x\u0026rsquo;, s\u0026rsquo;)) ] \\end{equation}\nwhere\n\\(X\\): a set of nodes (hidden, internal states) \\(\\Psi(a|x)\\): probability of taking an action \\(\\eta(x\u0026rsquo;|x,a,o)\\) : transition probability between hidden states Let\u0026rsquo;s first develop some tools which can help us linearize the objective equation given above.\nWe can define a transition map (matrix) between any two controller-states (latent + state) as:\n\\begin{equation} T_{\\theta}((x,s), (x\u0026rsquo;,s\u0026rsquo;)) = \\sum_{a} \\Psi(a | x) T(s\u0026rsquo;|s,a) \\sum_{o} O (o|a,s\u0026rsquo;) \\eta (x\u0026rsquo;|x,a,o) \\end{equation}\nwhere \\(\\bold{T}_{\\theta} \\in \\mathbb{R}^{|X \\times S| \\times |X \\times S|}\\) .\nFurther, we can parameterize reward over \\(R(s,a)\\) for:\n\\begin{equation} R_{\\theta}((x, s)) = \\sum_{a} \\Psi(a|x) R(s,a) \\end{equation}\nwhere \\(R_{\\theta}\\in \\mathbb{R}^{|X \\times S|}\\)\n(i.e. the reward of being in each controller state is the expected reward over all possible actions at that controller state).\nAnd now, recall the procedure for Bellman Expectation Equation; having formulated the transition and reward at any given controller state \\(X \\times S\\), we can write:\n\\begin{equation} \\bold{u}_{\\theta} = \\bold{r}_{\\theta} + \\gamma \\bold{T_{\\theta}}\\bold{u}_{\\theta} \\end{equation}\nnote that this vector \\(\\bold{U} \\in \\mathbb{R}^{|X \\times S}}\\). Therefore, to write out an \u0026ldquo;utility of belief\u0026rdquo; (prev. \\(b^{\\top} U\\) where \\(U \\in \\alpha\\) some alpha vector over states), we have to redefine a:\n\\begin{equation} \\bold{\\beta}_{xs}, \\text{where} \\begin{cases} \\bold{\\beta}_{xs} = b(s), if\\ x = x^{(1)} \\\\ 0 \\end{cases} \\end{equation}\nFinally, then we can rewrite the objective as:\n\\begin{equation} \\beta^{\\top} \\bold{U}_{\\theta} \\end{equation}\nwhere we seek to use gradient ascend to maximize \\(\\bold{U}_{\\theta}\\).\nWriting this out, we have:\n\\begin{equation} \\bold{u}_{\\theta} = \\bold{r}_{\\theta} + \\gamma \\bold{T}_{\\theta} \\bold{u}_{\\theta} \\end{equation}\nwhich gives:\n\\begin{equation} \\bold{u}_{\\theta} = (\\bold{I} - \\gamma \\bold{T}_{\\theta})^{-1} \\bold{r}_{\\theta} \\end{equation}\nLet\u0026rsquo;s call \\(\\bold{Z} = (\\bold{I}-\\gamma \\bold{T}_{\\theta})\\), meaning:\n\\begin{equation} \\bold{u}_{\\theta} = \\bold{Z}^{-1} \\bold{r}_{\\theta} \\end{equation}\nFinally, to gradient ascent, we better get the gradient. So\u0026hellip; its CHAIN RULE TIME\nRecall that \\(\\theta\\) at this point refers to both \\(\\eta\\) and \\(\\Psi\\), so we need to take a partial against each of those variables. After doing copious calculus in Alg4DM pp 485, we arrive at the update expressions.\n","html":"\u003cp\u003eWe aim to solve for a fixed-sized \u003ca href=\"/posts/kbhcontroller/\"\u003econtroller\u003c/a\u003e based policy using \u003ca href=\"/posts/kbhargmax/#gradient-ascent\"\u003egradient ascent\u003c/a\u003e. This is the \u003cstrong\u003eunconstrained\u003c/strong\u003e variation on \u003ca href=\"/posts/kbhpga/\"\u003ePGA\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eRecall that we seek to optimize, for some initial node \\(x^{(1)}\\) and belief-state \\(b\\), we want to find the distribution of actions and transitions \\(\\Psi\\) and \\(\\eta\\), which maximizes the utility we can obtain based on initial state:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{s}b(s) U(x^{(1)}, s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that \\(U(x,s)\\) is given by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(x, s) = \\sum_{a}^{} \\Psi(a|x) \\qty[R(s,a) + \\gamma \\qty(\\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) \\sum_{o}^{} O(o|a, s\u0026rsquo;) \\sum_{x\u0026rsquo;}^{} \\eta(x\u0026rsquo;|x,a,o) U(x\u0026rsquo;, s\u0026rsquo;)) ]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X\\): a set of nodes (hidden, internal states)\u003c/li\u003e\n\u003cli\u003e\\(\\Psi(a|x)\\): probability of taking an action\u003c/li\u003e\n\u003cli\u003e\\(\\eta(x\u0026rsquo;|x,a,o)\\) : transition probability between hidden states\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eLet\u0026rsquo;s first develop some tools which can help us linearize the objective equation given above.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eWe can define a transition map (matrix) between any two controller-states (latent + state) as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT_{\\theta}((x,s), (x\u0026rsquo;,s\u0026rsquo;)) = \\sum_{a} \\Psi(a | x) T(s\u0026rsquo;|s,a) \\sum_{o} O (o|a,s\u0026rsquo;) \\eta (x\u0026rsquo;|x,a,o)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\bold{T}_{\\theta} \\in \\mathbb{R}^{|X \\times S| \\times |X \\times S|}\\) .\u003c/p\u003e\n\u003cp\u003eFurther, we can parameterize reward over \\(R(s,a)\\) for:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR_{\\theta}((x, s)) = \\sum_{a} \\Psi(a|x) R(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(R_{\\theta}\\in \\mathbb{R}^{|X \\times S|}\\)\u003c/p\u003e\n\u003cp\u003e(i.e. the reward of being in each controller state is the expected reward over all possible actions at that controller state).\u003c/p\u003e\n\u003cp\u003eAnd now, recall the procedure for \u003ca href=\"/posts/kbhpolicy_evaluation/#bellman-expectation-equation\"\u003eBellman Expectation Equation\u003c/a\u003e; having formulated the transition and reward at any given controller state \\(X \\times S\\), we can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{u}_{\\theta} = \\bold{r}_{\\theta} + \\gamma \\bold{T_{\\theta}}\\bold{u}_{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enote that this vector \\(\\bold{U} \\in \\mathbb{R}^{|X \\times S}}\\). Therefore, to write out an \u0026ldquo;utility of belief\u0026rdquo; (prev. \\(b^{\\top} U\\) where \\(U \\in \\alpha\\) some alpha vector over \u003cstrong\u003estates\u003c/strong\u003e), we have to redefine a:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{\\beta}_{xs}, \\text{where} \\begin{cases}\n\\bold{\\beta}_{xs} = b(s), if\\ x = x^{(1)} \\\\\n0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eFinally, then we can rewrite the objective as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\beta^{\\top} \\bold{U}_{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere we seek to use gradient ascend to maximize \\(\\bold{U}_{\\theta}\\).\u003c/p\u003e\n\u003cp\u003eWriting this out, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{u}_{\\theta} = \\bold{r}_{\\theta} + \\gamma \\bold{T}_{\\theta} \\bold{u}_{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{u}_{\\theta} = (\\bold{I} - \\gamma \\bold{T}_{\\theta})^{-1} \\bold{r}_{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s call \\(\\bold{Z} = (\\bold{I}-\\gamma \\bold{T}_{\\theta})\\), meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{u}_{\\theta} = \\bold{Z}^{-1} \\bold{r}_{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, to \u003ca href=\"/posts/kbhargmax/#gradient-ascent\"\u003egradient ascent\u003c/a\u003e, we better get the gradient. So\u0026hellip; its \u003cstrong\u003e\u003cstrong\u003eCHAIN RULE TIME\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-13_12-16-13_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eRecall that \\(\\theta\\) at this point refers to both \\(\\eta\\) and \\(\\Psi\\), so we need to take a partial against each of those variables. After doing copious calculus in Alg4DM pp 485, we arrive at the update expressions.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcontroller_gradient_ascent/","tags":null,"title":"controller gradient ascent"},{"categories":null,"contents":"For \\(f,g : \\mathbb{R} \\to \\mathbb{C}\\), we have:\n\\begin{equation} (f * g)(x) = \\int_{\\mathbb{R}} f(x-y) g(y) \\dd{y} = \\int_{\\mathbb{R}} f(y) g(x-y) \\dd{y} \\end{equation}\nproperties of convolution \\((g * f) (x) = (f * g) (x)\\) \\(\\mathcal{F}(f * g) = \\mathcal{F}(f)\\mathcal{F}(g)\\) \\(\\mathcal{F}^{-1}(\\hat{f} \\hat{g}) = f * g\\) \\((f * g)\u0026rsquo; = f * g\u0026rsquo; = f\u0026rsquo; * g\\) \\(\\lambda ( f * g ) = (\\lambda f) * g = f * (\\lambda g)\\) =\u0026gt; \u0026ldquo;in a convolution, if ANY ONE of the two functions are Differentiable, both are Differentiable.\u0026rdquo;; think about smoothing a jagged function using a Gaussian.\nexamples rolling average \\begin{align} U_{L}(x) = \\begin{cases} L, |x| \\leq \\frac{1}{2L} \\\\ 0, |x| \u0026gt; \\frac{1}{2L} \\end{cases} \\end{align}\nThe width of the area for which the expression is positive is \\(2L\\), and the height is \\(L\\), so the area (integral) is \\(1\\).\nSo now let\u0026rsquo;s consider:\n\\begin{equation} (f * U_{L})(x) \\end{equation}\nwhich is:\n\\begin{equation} \\int_{\\mathbb{R}} f(x-y) U_{L}(y) \\dd{y} \\end{equation}\nmeaning:\n\\begin{equation} L \\int_{-\\frac{1}{2}L}^{\\frac{1}{2}L} f(x-y) \\dd{y} \\end{equation}\nYou will note that we are sweeping something of window width \\(\\frac{1}{L}\\) over the function, which averages the function \\(f\\) over the window \\(L\\).\nSo convolving with this function essentially smoothes function over a window \\(\\frac{1}{L}\\); as \\(L\\) decreases, we are averaging over a greater interval; vise versa.\nsignal compression Write your signal in terms of its Fourier transform:\n\\begin{equation} f(t) = \\frac{1}{2\\pi} \\int_{-\\infty}^{\\infty} e^{it\\lambda} \\hat{f}(\\lambda) \\dd{\\lambda} \\end{equation}\nWe can write:\n\\begin{equation} \\hat{f}(\\lambda) \\cdot 1_{J}(\\lambda) \\end{equation}\nwhose inverse Fourier transform would be:\n\\begin{equation} f(x) * \\mathcal{F}\\qty(1_{J}(\\lambda)) \\end{equation}\nmotivation What if we want the Fourier Transform of \\(\\hat{f}(\\lambda)\\hat{g}(\\lambda)\\) in terms of one expression?\nConsider:\n\\begin{equation} \\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\qty(\\int_{\\mathbb{R}} f(x) e^{-i\\lambda x} \\dd{x}) \\qty(\\int_{\\mathbb{R}} g(y) e^{-i\\lambda y} \\dd{y}) \\end{equation}\nNotice that because neither integral have dependence on the other, we can actually:\n\\begin{equation} \\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\int_{\\mathbb{R}} \\int_{\\mathbb{R}} f(x) g(y) e^{-i\\lambda (x+y)} \\dd{x}\\dd{y} \\end{equation}\nwriting this as a change of variable:\n\\begin{equation} \\begin{cases} u = x+y \\\\ x = u-y \\\\ \\dd{x} = \\dd{u} \\end{cases} \\end{equation}\nwe can write:\n\\begin{equation} \\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\int_{\\mathbb{R}} \\qty(\\int_{\\mathbb{R}} f(u-y) g(y) e^{-i\\lambda (u)} \\dd{u})\\dd{y} \\end{equation}\nConsidering they the integrands are isolated and decaying, we can swap them, pulling out also \\(e^{-i\\lambda(u)}\\) because it has no \\(y\\) dependence:\n\\begin{equation} \\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\int_{\\mathbb{R}} \\qty(\\int_{\\mathbb{R}} f(u-y) g(y) \\dd{y})e^{-i\\lambda (u)} \\dd{u} \\end{equation}\nNotice! The inner part is a function, and the outer part is a Fourier transform! This is similar to a convolution (probability)!\nMeaning:\n\\begin{equation} \\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\mathcal{F}(f * g) = \\mathcal{F}(f) \\mathcal{F}(g) \\end{equation}\nOperating on the inverse, we can obtain a similar result:\n\\begin{equation} \\mathcal{F}^{-1}(\\hat{f} \\hat{g}) = f * g \\end{equation}\n","html":"\u003cp\u003eFor \\(f,g : \\mathbb{R} \\to \\mathbb{C}\\), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(f * g)(x) = \\int_{\\mathbb{R}} f(x-y) g(y) \\dd{y} = \\int_{\\mathbb{R}} f(y) g(x-y) \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"properties-of-convolution\"\u003eproperties of convolution\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\((g * f) (x) = (f * g) (x)\\)\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{F}(f * g) = \\mathcal{F}(f)\\mathcal{F}(g)\\)\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{F}^{-1}(\\hat{f} \\hat{g}) = f * g\\)\u003c/li\u003e\n\u003cli\u003e\\((f * g)\u0026rsquo; = f * g\u0026rsquo; = f\u0026rsquo; * g\\)\u003c/li\u003e\n\u003cli\u003e\\(\\lambda ( f * g ) = (\\lambda f) * g = f * (\\lambda g)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e=\u0026gt; \u0026ldquo;in a convolution, if \u003cstrong\u003eANY ONE\u003c/strong\u003e of the two functions are \u003ca href=\"/posts/kbhuniqueness_and_existance/#differentiable\"\u003eDifferentiable\u003c/a\u003e, both are \u003ca href=\"/posts/kbhuniqueness_and_existance/#differentiable\"\u003eDifferentiable\u003c/a\u003e.\u0026rdquo;; think about smoothing a jagged function using a \u003ca href=\"/posts/kbhgaussian/\"\u003eGaussian\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"examples\"\u003eexamples\u003c/h2\u003e\n\u003ch3 id=\"rolling-average\"\u003erolling average\u003c/h3\u003e\n\u003cp\u003e\\begin{align}\nU_{L}(x) = \\begin{cases}\nL, |x| \\leq \\frac{1}{2L} \\\\\n0, |x| \u0026gt; \\frac{1}{2L}\n\\end{cases}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThe width of the area for which the expression is positive is \\(2L\\), and the height is \\(L\\), so the area (integral) is \\(1\\).\u003c/p\u003e\n\u003cp\u003eSo now let\u0026rsquo;s consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(f * U_{L})(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{\\mathbb{R}} f(x-y) U_{L}(y) \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL \\int_{-\\frac{1}{2}L}^{\\frac{1}{2}L} f(x-y) \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will note that we are sweeping something of window width \\(\\frac{1}{L}\\) over the function, which averages the function \\(f\\) over the window \\(L\\).\u003c/p\u003e\n\u003cp\u003eSo convolving with this function essentially smoothes function over a window \\(\\frac{1}{L}\\); as \\(L\\) decreases, we are averaging over a greater interval; vise versa.\u003c/p\u003e\n\u003ch3 id=\"signal-compression\"\u003esignal compression\u003c/h3\u003e\n\u003cp\u003eWrite your signal in terms of its Fourier transform:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(t) = \\frac{1}{2\\pi} \\int_{-\\infty}^{\\infty} e^{it\\lambda} \\hat{f}(\\lambda) \\dd{\\lambda}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) \\cdot 1_{J}(\\lambda)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhose inverse Fourier transform would be:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) * \\mathcal{F}\\qty(1_{J}(\\lambda))\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"motivation\"\u003emotivation\u003c/h2\u003e\n\u003cp\u003eWhat if we want the \u003ca href=\"/posts/kbhfourier_transform/\"\u003eFourier Transform\u003c/a\u003e of \\(\\hat{f}(\\lambda)\\hat{g}(\\lambda)\\) in terms of one expression?\u003c/p\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\qty(\\int_{\\mathbb{R}} f(x) e^{-i\\lambda x} \\dd{x}) \\qty(\\int_{\\mathbb{R}} g(y) e^{-i\\lambda y} \\dd{y})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNotice that because neither integral have dependence on the other, we can actually:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\int_{\\mathbb{R}} \\int_{\\mathbb{R}} f(x) g(y) e^{-i\\lambda (x+y)} \\dd{x}\\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewriting this as a change of variable:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nu = x+y \\\\\nx = u-y \\\\\n\\dd{x} = \\dd{u}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\int_{\\mathbb{R}} \\qty(\\int_{\\mathbb{R}} f(u-y) g(y) e^{-i\\lambda (u)} \\dd{u})\\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eConsidering they the integrands are isolated and decaying, we can swap them, pulling out also \\(e^{-i\\lambda(u)}\\) because it has no \\(y\\) dependence:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\int_{\\mathbb{R}} \\qty(\\int_{\\mathbb{R}} f(u-y) g(y) \\dd{y})e^{-i\\lambda (u)} \\dd{u}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNotice! The inner part is a function, and the outer part is a Fourier transform! This is similar to a \u003ca href=\"/posts/kbhrandom_variables/#adding-random-variables\"\u003econvolution (probability)\u003c/a\u003e!\u003c/p\u003e\n\u003cp\u003eMeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\mathcal{F}(f * g) = \\mathcal{F}(f) \\mathcal{F}(g)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOperating on the inverse, we can obtain a similar result:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}^{-1}(\\hat{f} \\hat{g}) = f * g\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhconvolution/","tags":null,"title":"convolution"},{"categories":null,"contents":"Cookie Theft is a Discourse-Completion Task that involves describing the following picture:\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhctp/\"\u003eCookie Theft\u003c/a\u003e is a \u003ca href=\"/posts/kbhdiscourse_completion_task/\"\u003eDiscourse-Completion Task\u003c/a\u003e that involves describing the following picture:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_23-28-15_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhctp/","tags":null,"title":"Cookie Theft Picture Description Task"},{"categories":null,"contents":"Pythagorean Theorem \\begin{equation} \\|u + v\\|^{2} = \\|u \\|^{2} + \\|v\\|^{2} \\end{equation}\nif \\(v\\) and \\(u\\) are orthogonal vectors.\nProof:\nAn Useful Orthogonal Decomposition Suppose we have a vector \\(u\\), and another \\(v\\), both belonging to \\(V\\). We can decompose \\(u\\) as a sum of two vectors given a choice of \\(v\\): one a scalar multiple of \\(v\\), and another orthogonal to \\(v\\).\nThat is: we can write \\(u = cv + w\\), where \\(c \\in \\mathbb{F}\\) and \\(w \\in V\\), such that \\(\\langle w,v \\rangle = 0\\).\nHere\u0026rsquo;s how:\nFor nonzero \\(v\\)\n\\begin{equation} c = \\frac{\\langle u,v \\rangle}{\\|v\\|^{2}} \\end{equation}\nand\n\\begin{equation} w = (u - cv) \\end{equation}\nWe can show \\(\\langle w,v \\rangle=0\\) as follows:\n\\begin{align} \\langle (u-cv), v \\rangle \u0026amp;= \\langle u,v \\rangle - \\langle cv, v \\rangle \\\\ \u0026amp;= \\langle u,v \\rangle - c \\langle v,v \\rangle \\\\ \u0026amp;= \\langle u,v \\rangle - \\frac{\\langle u,v \\rangle}{\\|v\\|^{2}} \\langle v,v \\rangle \\\\ \u0026amp;= \\langle u,v \\rangle - \\frac{\\langle u,v \\rangle}{\\|v\\|^{2}} \\|v\\|^{2} \\\\ \u0026amp;= 0 \\end{align}\nCauchy-Schwartz Inequality \\begin{equation} | \\langle u,v \\rangle | \\leq \\|u\\| \\|v\\| \\end{equation}\nand the expression is an equality of each vector \\(u,v\\) is the scalar multiple of the other.\nProof:\nPick some set of \\(v\\) and \\(u\\) and write out the orthogonal decomposition we had outlined above:\n\\begin{equation} u = cv + w \\end{equation}\nNow, recall \\(c = \\frac{\\langle u,v \\rangle}{\\|v\\|^{2}}\\). We now apply Pythagorean Theorem:\nNow we just multiply \\(\\|v\\|^{2}\\) to both sides and take square roots.\nIf \\(w = 0\\) (i.e. \\(v\\) and \\(w\\) have no othogonal component, and therefore they are scalar multiples), then this would turn into an equality as desired.\ntriangle inequality (vectors) See also triangle inequality (complexes)\n\u0026ldquo;The length of \\(u+v\\) is always less than the length of each \\(u\\) plus \\(v\\); the third side length is always shorter than the sum of both other sides\u0026rsquo; lengths.\u0026rdquo;\n\\begin{equation} \\|u\\| + \\|v\\| \\geq \\|u+v\\| \\end{equation}\nNotably, the two lines between \\(2|\\langle u,v \\rangle|\\) and \\(2 \\|u\\| \\|v\\|\\) holds because of the Cauchy-Schwartz Inequality.\nThis inequality becomes an equality if \\(u\\) and \\(v\\) are a non-negative multiple of the other.\nparallelogram equality The sums of squared side lengths of a parallelogram is equal to the sum of the squares of the length of diagonals:\n\\begin{equation} \\|u + v\\|^{2} + \\|u-v\\|^{2} = 2(\\|u\\|^{2} + \\|v\\|^{2}) \\end{equation}\n","html":"\u003ch2 id=\"pythagorean-theorem\"\u003ePythagorean Theorem\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\|u + v\\|^{2} = \\|u \\|^{2} + \\|v\\|^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif \\(v\\) and \\(u\\) are orthogonal vectors.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-08_22-53-17_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"an-useful-orthogonal-decomposition\"\u003eAn Useful Orthogonal Decomposition\u003c/h2\u003e\n\u003cp\u003eSuppose we have a vector \\(u\\), and another \\(v\\), both belonging to \\(V\\). We can decompose \\(u\\) as a sum of two vectors given a choice of \\(v\\): one a scalar multiple of \\(v\\), and another orthogonal to \\(v\\).\u003c/p\u003e\n\u003cp\u003eThat is: we can write \\(u = cv + w\\), where \\(c \\in \\mathbb{F}\\) and \\(w \\in V\\), such that \\(\\langle w,v \\rangle = 0\\).\u003c/p\u003e\n\u003cp\u003eHere\u0026rsquo;s how:\u003c/p\u003e\n\u003cp\u003eFor nonzero \\(v\\)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc = \\frac{\\langle u,v \\rangle}{\\|v\\|^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw = (u - cv)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can show \\(\\langle w,v \\rangle=0\\) as follows:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\langle (u-cv), v \\rangle \u0026amp;= \\langle u,v \\rangle - \\langle cv, v \\rangle \\\\\n\u0026amp;= \\langle u,v \\rangle - c \\langle v,v \\rangle \\\\\n\u0026amp;= \\langle u,v \\rangle - \\frac{\\langle u,v \\rangle}{\\|v\\|^{2}} \\langle v,v \\rangle \\\\\n\u0026amp;= \\langle u,v \\rangle - \\frac{\\langle u,v \\rangle}{\\|v\\|^{2}} \\|v\\|^{2} \\\\\n\u0026amp;= 0\n\\end{align}\u003c/p\u003e\n\u003ch2 id=\"cauchy-schwartz-inequality\"\u003eCauchy-Schwartz Inequality\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n| \\langle u,v \\rangle | \\leq \\|u\\| \\|v\\|\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand the expression is an equality of each vector \\(u,v\\) is the scalar multiple of the other.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003ePick some set of \\(v\\) and \\(u\\) and write out the orthogonal decomposition we had outlined above:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu = cv + w\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, recall \\(c = \\frac{\\langle u,v \\rangle}{\\|v\\|^{2}}\\). We now apply \u003ca href=\"#pythagorean-theorem\"\u003ePythagorean Theorem\u003c/a\u003e:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-08_23-13-44_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eNow we just multiply \\(\\|v\\|^{2}\\) to both sides and take square roots.\u003c/p\u003e\n\u003cp\u003eIf \\(w = 0\\) (i.e. \\(v\\) and \\(w\\) have no othogonal component, and therefore they are scalar multiples), then this would turn into an equality as desired.\u003c/p\u003e\n\u003ch2 id=\"triangle-inequality--vectors\"\u003etriangle inequality (vectors)\u003c/h2\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhthoughts_on_axler_4/#triangle-inequality--complexes\"\u003etriangle inequality (complexes)\u003c/a\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-08_23-23-03_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\u0026ldquo;The length of \\(u+v\\) is always less than the length of each \\(u\\) plus \\(v\\); the third side length is always shorter than the sum of both other sides\u0026rsquo; lengths.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\|u\\| + \\|v\\| \\geq \\|u+v\\|\n\\end{equation}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-08_23-26-02_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eNotably, the two lines between \\(2|\\langle u,v \\rangle|\\) and \\(2 \\|u\\| \\|v\\|\\) holds because of the \u003ca href=\"#cauchy-schwartz-inequality\"\u003eCauchy-Schwartz Inequality\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThis inequality becomes an equality if \\(u\\) and \\(v\\) are a \u003cstrong\u003enon-negative\u003c/strong\u003e multiple of the other.\u003c/p\u003e\n\u003ch2 id=\"parallelogram-equality\"\u003eparallelogram equality\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-08_23-32-19_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe sums of squared side lengths of a parallelogram is equal to the sum of the squares of the length of diagonals:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\|u + v\\|^{2} + \\|u-v\\|^{2} = 2(\\|u\\|^{2} + \\|v\\|^{2})\n\\end{equation}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-08_23-33-33_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhcornucopia_of_analysis/","tags":null,"title":"cornucopia of analysis"},{"categories":null,"contents":"usually we use \\(N\\) to denote the number of tokens, and \\(V\\) the \u0026ldquo;vocab\u0026rdquo; or set of word types.\nCorpora is usually considered in context of:\nspecific writers at specific time for specific varieties of specific languages for a specific function Particularly hard: code switching, gender, demographics, variety, etc.\nHerdan\u0026rsquo;s Law \\begin{equation} |V| = kN^{\\beta} \\end{equation}\nwith \\(\\beta\\) being a constant between \\(0.67 \u0026lt; \\beta \u0026lt; 0.75\\).\nThe vocab size is roughly proportional to the number of tokens.\n","html":"\u003cp\u003eusually we use \\(N\\) to denote the number of \u003ca href=\"/posts/kbhtokenization/\"\u003etoken\u003c/a\u003es, and \\(V\\) the \u0026ldquo;vocab\u0026rdquo; or set of \u003ca href=\"/posts/kbhtokenization/\"\u003eword type\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eCorpora is usually considered in context of:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003especific writers\u003c/li\u003e\n\u003cli\u003eat specific time\u003c/li\u003e\n\u003cli\u003efor specific varieties\u003c/li\u003e\n\u003cli\u003eof specific languages\u003c/li\u003e\n\u003cli\u003efor a specific function\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eParticularly hard: code switching, gender, demographics, variety, etc.\u003c/p\u003e\n\u003ch2 id=\"herdan-s-law\"\u003eHerdan\u0026rsquo;s Law\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n|V| = kN^{\\beta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewith \\(\\beta\\) being a constant between \\(0.67 \u0026lt; \\beta \u0026lt; 0.75\\).\u003c/p\u003e\n\u003cp\u003eThe vocab size is roughly proportional to the number of tokens.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcorpus/","tags":null,"title":"corpus"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcortex/","tags":null,"title":"cortex"},{"categories":null,"contents":"coulomb\u0026rsquo;s law is a principle that deals with the force that two charged particles exhibit to each other.\nconstituents \\(k\\), Coulomb\u0026rsquo;s Constant, found roughly to be \\(9 \\times 10^{9} \\frac{N m^{2}}{C}\\) \\(q_{1,2}\\), the charge of the two particles you are analyzing \\(r\\), distance between particles requirements \\begin{equation} \\vec{F_{E}} = k \\frac{q_1q_2}{r^{2}} \\end{equation}\nadditional information interpreting signs on \\(F_{e}\\) negative: attraction force between changes (the points have opposite signed charges, and so attract) positive: repulsion force between changes (the point have the same signed change, so repel) alternative formulation of Coulomb\u0026rsquo;s Law The law is often redefined with the language of the premittivity of free space:\n\\begin{equation} \\vec{F_{E}} = \\frac{1}{4\\pi \\epsilon_{0}} \\frac{q_1q_2}{r^{2}} \\end{equation}\nsuperposition The net electric force on a test change is simply the sum of the electric forces which other particles exhibit on the test change. That is:\n\\begin{equation} F_{on\\ 2} = F_{1 \\to 2} + F_{3 \\to 2} \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcoulomb_s_law/\"\u003ecoulomb\u0026rsquo;s law\u003c/a\u003e is a principle that deals with the force that two charged particles exhibit to each other.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(k\\), \u003ca href=\"/posts/kbhcoulomb_s_law/\"\u003eCoulomb\u0026rsquo;s Constant\u003c/a\u003e, found roughly to be \\(9 \\times 10^{9} \\frac{N m^{2}}{C}\\)\u003c/li\u003e\n\u003cli\u003e\\(q_{1,2}\\), the \u003ca href=\"/posts/kbhcharged/\"\u003echarge\u003c/a\u003e of the two particles you are analyzing\u003c/li\u003e\n\u003cli\u003e\\(r\\), distance between particles\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{F_{E}} = k \\frac{q_1q_2}{r^{2}}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"interpreting-signs-on-f-e\"\u003einterpreting signs on \\(F_{e}\\)\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003enegative: attraction force between changes (the points have opposite signed charges, and so attract)\u003c/li\u003e\n\u003cli\u003epositive: repulsion force between changes (the point have the same signed change, so repel)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"alternative-formulation-of-coulomb-s-law\"\u003ealternative formulation of Coulomb\u0026rsquo;s Law\u003c/h3\u003e\n\u003cp\u003eThe law is often \u003ca href=\"/posts/kbhpermittivity_of_free_space/#redefinition-of-id-e9d5f9b8-f44f-4ec2-a3ca-eddf236efe1b-coulomb-s-constant-based-on-id-d9abe64a-7668-490b-a297-af51c8859624-permittivity-of-free-space\"\u003eredefined with the language of the premittivity of free space\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{F_{E}} = \\frac{1}{4\\pi \\epsilon_{0}} \\frac{q_1q_2}{r^{2}}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"superposition\"\u003esuperposition\u003c/h3\u003e\n\u003cp\u003eThe net \u003ca href=\"/posts/kbhcoulomb_s_law/\"\u003eelectric force\u003c/a\u003e on a test change is simply the sum of the \u003ca href=\"/posts/kbhcoulomb_s_law/\"\u003eelectric force\u003c/a\u003es which other particles exhibit on the test change. That is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nF_{on\\ 2} = F_{1 \\to 2} + F_{3 \\to 2}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcoulomb_s_law/","tags":null,"title":"Coulomb's Law"},{"categories":null,"contents":"quantum information theory requires manipulating counterfactual information\u0026mdash;not what the current known states are, but what are the next possible states.\nInside physics, there is already a few principles which are counterfactual.\nConservation of energy: a perpetual machine is *impossible Second law: its impossible to convert all heat into useful work Heisenberg\u0026rsquo;s uncertainty: its impossible to copy reliable all states of a qubit With the impossibles, we can make the possible.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhquantum_information_theory/\"\u003equantum information theory\u003c/a\u003e requires manipulating counterfactual information\u0026mdash;not what the current known states are, but what are the \u003cem\u003enext possible states\u003c/em\u003e.\u003c/p\u003e\n\u003cp\u003eInside \u003ca href=\"/posts/kbhphysics/\"\u003ephysics\u003c/a\u003e, there is already a few principles which are counterfactual.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eConservation of energy: a perpetual machine is \u003cstrong\u003e*impossible\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eSecond law: its \u003cstrong\u003e\u003cstrong\u003eimpossible\u003c/strong\u003e\u003c/strong\u003e to convert all heat into useful work\u003c/li\u003e\n\u003cli\u003eHeisenberg\u0026rsquo;s uncertainty: its \u003cstrong\u003e\u003cstrong\u003eimpossible\u003c/strong\u003e\u003c/strong\u003e to copy reliable all states of a qubit\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWith the impossibles, we can make the possible.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcounterfactual/","tags":null,"title":"counterfactual"},{"categories":null,"contents":"counting asks: \u0026ldquo;how many possible outcomes satisfy an event?\u0026rdquo; You create a \u0026ldquo;generative story\u0026rdquo; to think about how you can count for the total choices.\n(like, 3 times will we roll even in a regular fair dice)\nstep rule of counting ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcounting/\"\u003ecounting\u003c/a\u003e asks: \u0026ldquo;how many possible outcomes satisfy an event?\u0026rdquo; You create a \u0026ldquo;generative story\u0026rdquo; to think about how you can count for the total choices.\u003c/p\u003e\n\u003cp\u003e(like, 3 times will we roll even in a regular fair dice)\u003c/p\u003e\n\u003ch2 id=\"step-rule-of-counting\"\u003estep rule of counting\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-02_16-28-05_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhcounting/","tags":null,"title":"counting"},{"categories":null,"contents":" CS157 Introduction to Logic; Carta average 3.9, \u0026ldquo;Only take if you are actually interested in logic or this is an absolute requirement for your degree that you can\u0026rsquo;t dodge. Class structure is very tricky as your entire grade depends on how you do on 3 exams of 5 questions each.\u0026rdquo; ","html":"\u003cul\u003e\n\u003cli\u003eCS157 Introduction to Logic; Carta average 3.9, \u0026ldquo;Only take if you are actually interested in logic or this is an absolute requirement for your degree that you can\u0026rsquo;t dodge. Class structure is very tricky as your entire grade depends on how you do on 3 exams of 5 questions each.\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcourses_to_take_for_qnlp/","tags":null,"title":"Courses to Take for QNLP"},{"categories":null,"contents":"\\begin{equation} cov(x,y) = E[(X-E[X])(Y-E[Y])] = E[XY]-E[X]E[Y] \\end{equation}\n(the derivation comes from FOIling the two terms and applying properties of expectation.\nwe want to consider: if a point goes way beyond its expectation, does the corresponding point change for another?\n\\begin{equation} (x-E[x])(y-E[y]) \\end{equation}\nif both points are varying .\nInstead of using this unbounded value, we sometimes use a normalized value named correlation:\n\\begin{equation} \\rho(X,Y) = \\frac{Cov(X,Y)}{\\sqrt{Var(X)Var(Y)}} \\end{equation}\n","html":"\u003cp\u003e\\begin{equation}\ncov(x,y) = E[(X-E[X])(Y-E[Y])] = E[XY]-E[X]E[Y]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(the derivation comes from FOIling the two terms and applying \u003ca href=\"/posts/kbhexpectation/#properties-of-id-24e5fb5b-b0b2-4872-adf2-398e91c3ee0e-expectation\"\u003eproperties of expectation\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003ewe want to consider: if a point goes way beyond its expectation, does the corresponding point change for another?\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(x-E[x])(y-E[y])\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif both points are varying .\u003c/p\u003e\n\u003cp\u003eInstead of using this unbounded value, we sometimes use a normalized value named \u003ca href=\"/posts/kbhcovariance/\"\u003ecorrelation\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\rho(X,Y) = \\frac{Cov(X,Y)}{\\sqrt{Var(X)Var(Y)}}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcovariance/","tags":null,"title":"covariance"},{"categories":null,"contents":"coveather is a novel consensus algorithm based on the proof of work mechanism.\nSee also minimum user base requirements for coveather and Coveather Abstract.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcoveather/\"\u003ecoveather\u003c/a\u003e is a novel consensus algorithm based on the \u003ca href=\"/posts/kbhproof_of_work/\"\u003eproof of work\u003c/a\u003e mechanism.\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhminimum_user_base_requirements_for_coveather/\"\u003eminimum user base requirements for coveather\u003c/a\u003e and \u003ca href=\"/posts/kbhcoveather_abstract/\"\u003eCoveather Abstract\u003c/a\u003e.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-22_22-22-19_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhcoveather/","tags":null,"title":"coveather"},{"categories":null,"contents":"Digital Health Passes (DHP), systems of digitally validating quarantine and vaccination status such as the New York IBM Excelsior Pass, demonstrate a lawful means to approach some benefits offered by \u0026ldquo;true elimination\u0026rdquo; treatment strategies-which focus on the complete elimination of cases instead of investing more in controlling the progression of the disease-of COVID-19. Current implementations of DHPs require region-based control and central storage of Protected Health Information (PHI)-creating a challenge to widespread use across different jurisdictions with incompatible data management systems and a lack of standardized patient privacy controls. In this work, a mechanism for decentralized PHI storage and validation is proposed through a novel two-stage handshaking mechanism update to blockchain proof-of-stake consensus. The proposed mechanism, when used to support a DHP, allows individuals to validate their quarantine and testing universally with any jurisdiction while allowing their right of independent movement and the protection of their PHI. Implementational details on the protocol are given, and the protocol is shown to withstand a 1% disturbance attack at only 923 participants via a Monte-Carlo simulation: further validating its stability.\n","html":"\u003cp\u003eDigital Health Passes (DHP), systems of digitally validating quarantine and vaccination status such as the New York IBM Excelsior Pass, demonstrate a lawful means to approach some benefits offered by \u0026ldquo;true elimination\u0026rdquo; treatment strategies-which focus on the complete elimination of cases instead of investing more in controlling the progression of the disease-of COVID-19. Current implementations of DHPs require region-based control and central storage of Protected Health Information (PHI)-creating a challenge to widespread use across different jurisdictions with incompatible data management systems and a lack of standardized patient privacy controls. In this work, a mechanism for decentralized PHI storage and validation is proposed through a novel two-stage handshaking mechanism update to blockchain proof-of-stake consensus. The proposed mechanism, when used to support a DHP, allows individuals to validate their quarantine and testing universally with any jurisdiction while allowing their right of independent movement and the protection of their PHI. Implementational details on the protocol are given, and the protocol is shown to withstand a 1% disturbance attack at only 923 participants via a Monte-Carlo simulation: further validating its stability.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcoveather_abstract/","tags":null,"title":"Coveather Abstract"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcovid_19/","tags":null,"title":"COVID-19"},{"categories":null,"contents":"A CPOMDP, or Constrained Partially Observable Markov Decision Process, gives two objectives for the system to optimize upon:\nan reward function \\(r(s,a)\\) and a set of constraints \\(c(s,a) \\geq 0\\). Specifically, we formulate it as a POMDP: \\((S,A,\\Omega), T, O ,R\\), with an additional set of constraints \\(\\bold{C}\\) and budgets \\(\\beta\\).\nWhereby, we seek to maximize the infinite-horizon reward \\(\\mathbb{E}_{t} \\qty[R(a_{t}, s_{t})]\\) subject to discounting, subject to:\n\\begin{equation} C_{i}(s,a) \\leq \\beta_{i}, \\forall C_{i},\\beta_{i} \\in \\bold{C}, \\beta \\end{equation}\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhcpomdp/\"\u003eCPOMDP\u003c/a\u003e, or \u003ca href=\"/posts/kbhcpomdp/\"\u003eConstrained Partially Observable Markov Decision Process\u003c/a\u003e, gives two objectives for the system to optimize upon:\u003c/p\u003e\n\u003cp\u003ean reward function \\(r(s,a)\\) and a set of constraints \\(c(s,a) \\geq 0\\). Specifically, we formulate it as a \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e: \\((S,A,\\Omega), T, O ,R\\), with an additional set of constraints \\(\\bold{C}\\) and budgets \\(\\beta\\).\u003c/p\u003e\n\u003cp\u003eWhereby, we seek to maximize the infinite-horizon reward \\(\\mathbb{E}_{t} \\qty[R(a_{t}, s_{t})]\\) subject to discounting, subject to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC_{i}(s,a) \\leq \\beta_{i}, \\forall C_{i},\\beta_{i} \\in \\bold{C}, \\beta\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcpomdp/","tags":null,"title":"CPOMDP"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcram/","tags":null,"title":"cram"},{"categories":null,"contents":"crap to remember for AP Stats is a cram sheet for the AP Statistics exam.\n95% confidence: \\(z^*=1.96\\)\n\\(r=1\\): perfect positive correlation \\(r=-1\\): perfect negative correlation \\(r=0\\): no correlation S: standard deviation of residuals R-sq: how much of varience in dep. var can be explained by indp. var SE: estimate of standard deviation of the random var. that is slope.\nFor lines:\nNote that p value from regression outputs are two-tailed. So divide by 2 if you want a one-tail result.\nMultiplication changes mean as well as well as standard deviation. Adding changes mean but not standard deviation.\nExpected value of the sum and differences of random variables are just the sums and differences of their expected value. \\(S = X+Y, \\bar{S} = \\bar{X}+\\bar{Y}\\).\nVariance of random variables are just the sum and differences of their variance. \\(S=X+Y,{\\sigma^2}_S = {\\sigma^2}_X+{\\sigma^2}_Y\\).\n#WHAPS\nwhat test what hypothesis and what significance level assumptions and conditions; state! random independent: \\(\\le 10\\%\\) of population. t and z special: normal (z tests: \\(np, n(1-p) \\geq 10\\), t tests: \\(n\u0026gt;30\\) or given) chi-square special: \\(\\forall\\ EV \u0026gt; 5\\) p: z-statistic that would XD: Control (control for confounding and bias, placebo, etc.), Randomization (spread uncontrolled variability), Replication (need to have adequate units and ability to be repeated)\n=\u0026gt; Describing a distribution\nCenter: Mean, Median, or Mode? figure by skew Shape: Symmetric vs Skewed? Unimodal vs Bimodal Spread: Range and Inter-Quartile Range Outlier: anything more than 1.5*IQR away Context: what the distribution shows \u0026ldquo;Experimental Unit\u0026rdquo;: a physic entity that\u0026rsquo;s the primary unit of interest in a research objective.\nConditions for binomial distribution:\nBinary Independent Fixed number of trials All trials with same probability Conditions for geometric distrubiton\nBinary Independent Fixed number of successes All trials with same probability state the thing, state the conditions: \u0026ldquo;normal distribution with n= s=\u0026rdquo;, binomial distribution with n= p= etc.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcrap_to_remember_for_ap_stats/\"\u003ecrap to remember for AP Stats\u003c/a\u003e is a cram sheet for the \u003ca href=\"/posts/kbhapstats/\"\u003eAP Statistics\u003c/a\u003e exam.\u003c/p\u003e\n\u003chr\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_22-46-44_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_22-47-19_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_22-47-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e95% confidence: \\(z^*=1.96\\)\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_22-56-57_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003e\\(r=1\\): perfect positive correlation\u003c/li\u003e\n\u003cli\u003e\\(r=-1\\): perfect negative correlation\u003c/li\u003e\n\u003cli\u003e\\(r=0\\): no correlation\u003c/li\u003e\n\u003c/ul\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_22-57-20_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_22-48-27_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_22-48-45_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_22-49-30_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_23-19-06_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_23-20-47_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eS: standard deviation of residuals\u003c/li\u003e\n\u003cli\u003eR-sq: how much of varience in dep. var can be explained by indp. var\u003c/li\u003e\n\u003c/ul\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_23-21-56_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSE: estimate of standard deviation of the random var. that is slope.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_23-34-45_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eFor lines:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_23-39-50_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eNote that p value from regression outputs are two-tailed. So divide by 2 if you want a one-tail result.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_23-45-38_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eMultiplication changes mean as well as well as standard deviation. Adding changes mean but not standard deviation.\u003c/p\u003e\n\u003cp\u003eExpected value of the sum and differences of random variables are just the sums and differences of their expected value. \\(S = X+Y, \\bar{S} = \\bar{X}+\\bar{Y}\\).\u003c/p\u003e\n\u003cp\u003eVariance of random variables are just the sum and differences of their variance. \\(S=X+Y,{\\sigma^2}_S = {\\sigma^2}_X+{\\sigma^2}_Y\\).\u003c/p\u003e\n\u003cp\u003e#WHAPS\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ewhat test\u003c/li\u003e\n\u003cli\u003ewhat hypothesis and what \u003ca href=\"/posts/kbhhypothesis_testing/#significance-level\"\u003esignificance level\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eassumptions and conditions; state!\n\u003cul\u003e\n\u003cli\u003erandom\u003c/li\u003e\n\u003cli\u003eindependent: \\(\\le 10\\%\\) of population.\u003c/li\u003e\n\u003cli\u003et and z special: normal (z tests: \\(np, n(1-p) \\geq 10\\), t tests: \\(n\u0026gt;30\\) or given)\u003c/li\u003e\n\u003cli\u003echi-square special: \\(\\forall\\ EV \u0026gt; 5\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ep: z-statistic that would\u003c/li\u003e\n\u003c/ul\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-21_21-28-45_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eXD: Control (control for confounding and bias, placebo, etc.), Randomization (spread uncontrolled variability), Replication (need to have adequate units and ability to be repeated)\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-21_22-30-54_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e=\u0026gt; Describing a distribution\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eCenter: Mean, Median, or Mode? figure by skew\u003c/li\u003e\n\u003cli\u003eShape: Symmetric vs Skewed? Unimodal vs Bimodal\u003c/li\u003e\n\u003cli\u003eSpread: Range and Inter-Quartile Range\u003c/li\u003e\n\u003cli\u003eOutlier: anything more than 1.5*IQR away\u003c/li\u003e\n\u003cli\u003eContext: what the distribution shows\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;Experimental Unit\u0026rdquo;: a physic entity that\u0026rsquo;s the primary unit of interest in a research objective.\u003c/p\u003e\n\u003cp\u003eConditions for \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eBinary\u003c/li\u003e\n\u003cli\u003eIndependent\u003c/li\u003e\n\u003cli\u003eFixed number of trials\u003c/li\u003e\n\u003cli\u003eAll trials with same probability\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eConditions for geometric distrubiton\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eBinary\u003c/li\u003e\n\u003cli\u003eIndependent\u003c/li\u003e\n\u003cli\u003eFixed number of successes\u003c/li\u003e\n\u003cli\u003eAll trials with same probability\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003estate the thing, state the conditions\u003c/strong\u003e\u003c/strong\u003e: \u0026ldquo;normal distribution with n= s=\u0026rdquo;, \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e with n= p= etc.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcrap_to_remember_for_ap_stats/","tags":null,"title":"crap to remember for AP Stats"},{"categories":null,"contents":"Major Challenges data loss: crashes can happen, and not all data could be saved to disk inconsistency: crashes can happen in the middle of operations crashes could occur when someone of the blocks that have been written to disk, but not others inode and free lists don\u0026rsquo;t agree. Ideally, filesystem operations should be atomic. Every operation should happen or not happen at all\u0026mdash;but not halfway.\nCase study: Block Cache Modification\nTradeoffs The overall design tradeoffs between this:\ndurability - the data needs to be safe (which is slow, and may require manual crash recovery (sans cache, etc.)) performance - it needs to be fast (which may mean less error checking) consistency - the filesystem needs to be uniform (which means that we need to be slower and we may drop data in favor of previous checkpoints that worked) Also the disks themselves can still fail.\nfsck Don\u0026rsquo;t make any changes to filesystem at all. At the system boot time, check filesystem for consistency.\nmain limitation:\ncan\u0026rsquo;t restart filesystem until it completes: this process takes forever restores consistency, but doesn\u0026rsquo;t prevent loss of info restores consistency, but filesystem may still be unusable (core files moved to lost+found) a block could migrate from a password file to some other random file, hence removing info Check whether or not there is a clean shutdown: setting a disk flag on clean shutdown; so if the flag isn\u0026rsquo;t set there isn\u0026rsquo;t a clean shutdown. If it wasn\u0026rsquo;t a clean shutdown, identify inconsistencies Scans meta data (inodes, indirect blocks, free list, directory blocks) and handle any of the following situations\u0026mdash; block in an inode and in free list Solution: pull the block off of free list\nblock is a part of two inodes Solutions:\ngive to newest randomly pick make a copy remove (generally a bad idea, we don\u0026rsquo;t want to destroy data) inode claims one dirent refers to it, but there are no such dirent put the file under the lost+found folder\nordered writes Example: block is in file and also in the free list.\nThis basically removes the need to wait for fsck on reboot.\nWe can use a certain order of operations to prevent these types of errors from occurring:\nAlways initialize the TARGET before initializing the REFERENCE Initialize inode before initalize directory entry to it Never reuse a resource before NULLIFYING all existing REFERENCES Remove the inode reference before putting a block on the free list Never clear the LAST REFERENCE to a live resource before setting a NEW REFERENCE (\u0026ldquo;its better to have 2 copies instead of none\u0026rdquo;) Make the new directory entry before get rid of the old one Limitations:\nperformance: we need to do operations synchronously if we really want to do caching async, we can track dependencies circular dependencies are possible leak: it could leak resources (reference nullification happens but resource not added) We can run fsck in the background journaling journaling keeps a paper trail of disk appertains in the event of a crash. We have an append-only log on disk that stores disk operations.\nbefore performing an operation, record its info in the log and write that to disk The log will always record what\u0026rsquo;s happening ahead. The actual block updates can eventually be carried out in any order.\nwhat do we log? we only log metadata changes (inodes, moving stuff around, etc.) payload operations are not saved structure We typically have a LSN: log serial number, operations, and metadata.\n#+begin_src toml [offset 335050] LSN 18384030 operation = \u0026ldquo;LogBlockAlloc\u0026rdquo; blockno = 1027 zero_on_replay = 0\n[offset 23232] LSN N operation = \u0026ldquo;LogPatch\u0026rdquo; blockno = 8 offset = 137 bytes = 0.04 inode = 52 #+end_arc\n\u0026ldquo;zero-on-replay\u0026rdquo;:\nSpecifies that block number blockno, which was previously free, should now be marked as allocated (0 or false in the freemap). If zero_on_replay is non-zero, it means that the block is being used for metadata\u0026mdash;i.e., as an indirect (or double-indirect) block, or for a directory.\nlimitations checkpoints Its an add-only paper trial. We can truncate the log occasionally at a \u0026ldquo;checkpoint\u0026rdquo;, and truncate the log which is no longer needed.\nmultiple log entries An atomic operations may have multiple log entries corresponding to it (because they have many steps). We need to make sure that the entire operation is replayed or none at all.\nSo, we introduce transactions: each atomic operation will be wrapped into a unit transaction.\nwhere do we start replaying You don\u0026rsquo;t know where exactly you crashed.\nSo, log entries should be idempotent: doing something multiple times should have the same effect of doing them once. To make this happen, we need to cache all the data that\u0026rsquo;s needed to write to the log in the log itself. It cannot have external dependencies.\nSo we just replay the entire log. To save time every so often you trim the logs via checkpoints\nlog entries may take time We can also make log entry writes in the block cache too. This doesn\u0026rsquo;t matter too much: if both the log and the actual data is wiped from the cache, the filesystem is still consistent (we just lost data).\nWhen finally we write stuff to disk, we write the logs first. So no problems there.\n","html":"\u003ch2 id=\"major-challenges\"\u003eMajor Challenges\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003edata loss\u003c/strong\u003e: crashes can happen, and not all data could be saved to disk\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003einconsistency\u003c/strong\u003e: crashes can happen in the middle of operations\n\u003cul\u003e\n\u003cli\u003ecrashes could occur when someone of the blocks that have been written to disk, but not others\u003c/li\u003e\n\u003cli\u003einode and free lists don\u0026rsquo;t agree.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIdeally, filesystem operations should be \u003cstrong\u003eatomic\u003c/strong\u003e. Every operation should happen or not happen at all\u0026mdash;but not halfway.\u003c/p\u003e\n\u003cp\u003eCase study: \u003ca href=\"/posts/kbhunix_v6_filesystem/#block-cache-modification\"\u003eBlock Cache Modification\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"tradeoffs\"\u003eTradeoffs\u003c/h2\u003e\n\u003cp\u003eThe overall design tradeoffs between this:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003edurability - the data needs to be safe (which is slow, and may require manual crash recovery (sans cache, etc.))\u003c/li\u003e\n\u003cli\u003eperformance - it needs to be fast (which may mean less error checking)\u003c/li\u003e\n\u003cli\u003econsistency - the filesystem needs to be uniform (which means that we need to be slower and we may drop data in favor of previous checkpoints that worked)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAlso the disks themselves can still fail.\u003c/p\u003e\n\u003ch2 id=\"fsck\"\u003efsck\u003c/h2\u003e\n\u003cp\u003eDon\u0026rsquo;t make any changes to filesystem at all. At the system boot time, check filesystem for consistency.\u003c/p\u003e\n\u003cp\u003emain limitation:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ecan\u0026rsquo;t restart filesystem until it completes: this process \u003cstrong\u003e\u003cstrong\u003etakes forever\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003erestores consistency, but doesn\u0026rsquo;t prevent loss of info\u003c/li\u003e\n\u003cli\u003erestores consistency, but filesystem may still be unusable (core files moved to lost+found)\u003c/li\u003e\n\u003cli\u003ea block could migrate from a password file to some other random file, hence removing info\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cul\u003e\n\u003cli\u003eCheck whether or not there is a clean shutdown: setting a disk flag on clean shutdown; so if the flag isn\u0026rsquo;t set there isn\u0026rsquo;t a clean shutdown.\u003c/li\u003e\n\u003cli\u003eIf it wasn\u0026rsquo;t a clean shutdown, identify inconsistencies\u003c/li\u003e\n\u003cli\u003eScans meta data (inodes, indirect blocks, free list, directory blocks) and handle any of the following situations\u0026mdash;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"block-in-an-inode-and-in-free-list\"\u003eblock in an inode and in free list\u003c/h3\u003e\n\u003cp\u003eSolution: pull the block off of free list\u003c/p\u003e\n\u003ch3 id=\"block-is-a-part-of-two-inodes\"\u003eblock is a part of two inodes\u003c/h3\u003e\n\u003cp\u003eSolutions:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003egive to newest\u003c/li\u003e\n\u003cli\u003erandomly pick\u003c/li\u003e\n\u003cli\u003emake a copy\u003c/li\u003e\n\u003cli\u003eremove (generally a bad idea, we don\u0026rsquo;t want to destroy data)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"inode-claims-one-dirent-refers-to-it-but-there-are-no-such-dirent\"\u003einode claims one dirent refers to it, but there are no such dirent\u003c/h3\u003e\n\u003cp\u003eput the file under the \u003ccode\u003elost+found\u003c/code\u003e folder\u003c/p\u003e\n\u003ch2 id=\"ordered-writes\"\u003eordered writes\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eExample\u003c/strong\u003e\u003c/strong\u003e: block is in file and also in the free list.\u003c/p\u003e\n\u003cp\u003eThis basically removes the need to wait for fsck on reboot.\u003c/p\u003e\n\u003cp\u003eWe can use a certain \u003cstrong\u003eorder\u003c/strong\u003e of operations to prevent these types of errors from occurring:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eAlways initialize the \u003cstrong\u003eTARGET\u003c/strong\u003e before initializing the \u003cstrong\u003eREFERENCE\u003c/strong\u003e\n\u003cul\u003e\n\u003cli\u003eInitialize inode before initalize directory entry to it\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eNever reuse a resource before \u003cstrong\u003eNULLIFYING\u003c/strong\u003e all existing \u003cstrong\u003e\u003cstrong\u003eREFERENCES\u003c/strong\u003e\u003c/strong\u003e\n\u003cul\u003e\n\u003cli\u003eRemove the inode reference before putting a block on the free list\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eNever clear the \u003cstrong\u003e\u003cstrong\u003eLAST REFERENCE\u003c/strong\u003e\u003c/strong\u003e to a live resource before setting a \u003cstrong\u003e\u003cstrong\u003eNEW REFERENCE\u003c/strong\u003e\u003c/strong\u003e (\u0026ldquo;its better to have 2 copies instead of none\u0026rdquo;)\n\u003cul\u003e\n\u003cli\u003eMake the new directory entry before get rid of the old one\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003cp\u003eLimitations:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eperformance\u003c/strong\u003e: we need to do operations synchronously\n\u003cul\u003e\n\u003cli\u003eif we really want to do caching async, we can track dependencies\u003c/li\u003e\n\u003cli\u003ecircular dependencies are possible\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eleak\u003c/strong\u003e\u003c/strong\u003e: it could leak resources (reference nullification happens but resource not added)\n\u003cul\u003e\n\u003cli\u003eWe can run fsck in the background\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"journaling\"\u003ejournaling\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#journaling\"\u003ejournaling\u003c/a\u003e keeps a paper trail of disk appertains in the event of a crash. We have an append-only log on disk that stores disk operations.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ebefore performing an operation, record its info in the log\u003c/li\u003e\n\u003cli\u003eand write that to disk\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe log will always record what\u0026rsquo;s happening ahead. The actual block updates can eventually be carried out in any order.\u003c/p\u003e\n\u003ch3 id=\"what-do-we-log\"\u003ewhat do we log?\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ewe only log \u003cstrong\u003emetadata\u003c/strong\u003e changes (inodes, moving stuff around, etc.)\u003c/li\u003e\n\u003cli\u003epayload operations are not saved\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"structure\"\u003estructure\u003c/h3\u003e\n\u003cp\u003eWe typically have a LSN: log serial number, operations, and metadata.\u003c/p\u003e\n\u003cp\u003e#+begin_src toml\n[offset 335050]\nLSN 18384030\noperation = \u0026ldquo;LogBlockAlloc\u0026rdquo;\nblockno = 1027\nzero_on_replay = 0\u003c/p\u003e\n\u003cp\u003e[offset 23232]\nLSN N\noperation = \u0026ldquo;LogPatch\u0026rdquo;\nblockno = 8\noffset = 137\nbytes = 0.04\ninode = 52\n#+end_arc\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;zero-on-replay\u0026rdquo;:\u003c/p\u003e\n\u003cblockquote\u003e\n\u003cp\u003eSpecifies that block number blockno, which was previously free, should now be marked as allocated (0 or false in the freemap). If zero_on_replay is non-zero, it means that the block is being used for metadata\u0026mdash;i.e., as an indirect (or double-indirect) block, or for a directory.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003ch3 id=\"limitations\"\u003elimitations\u003c/h3\u003e\n\u003ch4 id=\"checkpoints\"\u003echeckpoints\u003c/h4\u003e\n\u003cp\u003eIts an add-only paper trial. We can truncate the log occasionally at a \u0026ldquo;checkpoint\u0026rdquo;, and truncate the log which is no longer needed.\u003c/p\u003e\n\u003ch4 id=\"multiple-log-entries\"\u003emultiple log entries\u003c/h4\u003e\n\u003cp\u003eAn atomic operations may have multiple log entries corresponding to it (because they have many steps). We need to make sure that the entire operation is replayed or none at all.\u003c/p\u003e\n\u003cp\u003eSo, we introduce \u003cstrong\u003etransactions\u003c/strong\u003e: each atomic operation will be wrapped into a unit transaction.\u003c/p\u003e\n\u003ch4 id=\"where-do-we-start-replaying\"\u003ewhere do we start replaying\u003c/h4\u003e\n\u003cp\u003eYou don\u0026rsquo;t know where \u003cstrong\u003eexactly\u003c/strong\u003e you crashed.\u003c/p\u003e\n\u003cp\u003eSo, log entries should be \u003cstrong\u003eidempotent\u003c/strong\u003e: doing something multiple times should have the same effect of doing them once. To make this happen, we need to cache all the data that\u0026rsquo;s needed to write to the log in the log itself. It cannot have external dependencies.\u003c/p\u003e\n\u003cp\u003eSo we just replay the entire log. To save time every so often you trim the logs via \u003ca href=\"#checkpoints\"\u003echeckpoints\u003c/a\u003e\u003c/p\u003e\n\u003ch4 id=\"log-entries-may-take-time\"\u003elog entries may take time\u003c/h4\u003e\n\u003cp\u003eWe can also make log entry writes in the block cache too. This doesn\u0026rsquo;t matter too much: if both the log and the actual data is wiped from the cache, the filesystem is \u003cstrong\u003estill consistent\u003c/strong\u003e (we just lost data).\u003c/p\u003e\n\u003cp\u003eWhen finally we write stuff to disk, we write the logs first. So no problems there.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcrash_recovery/","tags":null,"title":"crash recovery"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcredit/","tags":null,"title":"credit"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcredit_suisse/","tags":null,"title":"Credit Suisse"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcritical_value/","tags":null,"title":"critical value"},{"categories":null,"contents":"criticized the New Deal from all sides. Senator Huy P. Long claimed to \u0026ldquo;show our wealth.\u0026rdquo; nullification from conservative supreme court, FDR threatened to restructure + hurts his coalition.\nFDR ordered cuts in spending 1938 midterms: Republicans can block programs \u0026mdash; gained control of congress + created ability to gain control ","html":"\u003cp\u003ecriticized the \u003ca href=\"/posts/kbhnew_deal/\"\u003eNew Deal\u003c/a\u003e from all sides. Senator Huy P. Long claimed to \u0026ldquo;show our wealth.\u0026rdquo; nullification from conservative supreme court, FDR threatened to restructure + hurts his coalition.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eFDR ordered cuts in spending\u003c/li\u003e\n\u003cli\u003e1938 midterms: Republicans can block programs \u0026mdash; gained control of congress + created ability to gain control\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcriticism_of_the_new_deal/","tags":null,"title":"criticism of the New Deal (See file KBhnew_deal.org)"},{"categories":null,"contents":"Cross Entropy Method is a \u0026ldquo;conditional MLE\u0026rdquo; objective; whereby we try to maximize:\nthe log prob of the true y labels in the training data given the observations Derivation Recall the Bernoulli distribution, and specifically:\n\\begin{equation} P(Y=y) = p^{y} (1-p)^{1-y} \\end{equation}\nMeaning, we want to maximize:\n\\begin{equation} \\log P(y=y) = y \\log p + (1-y)\\log (1-y) \\end{equation}\nspecifically, we\u0026rsquo;d like to minimize:\n\\begin{equation} -[y \\log p + (1-y)\\log (1-y)] \\end{equation}\nIntuition This function should be\nsmaller when the model estimate is close to correct bigger if the model is confused or wrong ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcross_entropy_method/\"\u003eCross Entropy Method\u003c/a\u003e is a \u0026ldquo;conditional \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e\u0026rdquo; objective; whereby we try to maximize:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ethe log prob\u003c/li\u003e\n\u003cli\u003eof the true y labels in the training data\u003c/li\u003e\n\u003cli\u003egiven the observations\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"derivation\"\u003eDerivation\u003c/h2\u003e\n\u003cp\u003eRecall the \u003ca href=\"/posts/kbhbernoulli_random_variable/\"\u003eBernoulli distribution\u003c/a\u003e, and specifically:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(Y=y) = p^{y} (1-p)^{1-y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning, we want to maximize:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\log P(y=y) = y \\log p + (1-y)\\log (1-y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003especifically, we\u0026rsquo;d like to \u003cstrong\u003eminimize\u003c/strong\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n-[y \\log p + (1-y)\\log (1-y)]\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"intuition\"\u003eIntuition\u003c/h2\u003e\n\u003cp\u003eThis function should be\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003esmaller when the model estimate is close to correct\u003c/li\u003e\n\u003cli\u003ebigger if the model is confused or wrong\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcross_entropy_loss/","tags":null,"title":"cross entropy loss"},{"categories":null,"contents":"This method introduces a search distribution instead of discrete points:\n\\begin{equation} p(\\theta | \\psi) \\end{equation}\nWe want to know how parameters \\(\\theta\\) are distributed, given some input parameters \\(\\psi\\) (for instance, we assume parameters are gaussian distributed such as the mean/variance).\nGiven this distribution, we sample \\(m\\) samples of \\(\\theta\\) from the distribution. Those are our starting candidate points. We then check its policy for its utility via the Roll-out utility We want to take top \\(k\\) of our best performers, called \u0026ldquo;elite samples\u0026rdquo; \\(m_{elite}\\) Use the set of \\(m_{elite}\\) points, we fit a new distribution parameter \\(\\psi\\) that describes those sample This allows us to bound how many Roll-out utilities we are doing.\nFor each dimension, we should have 10x elite sample points (1d should have 10 samples, 2d should have 20, etc.)\n","html":"\u003cp\u003eThis method introduces a search distribution instead of discrete points:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(\\theta | \\psi)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe want to know how parameters \\(\\theta\\) are distributed, given some input parameters \\(\\psi\\) (for instance, we assume parameters are \u003ca href=\"/posts/kbhgaussian_distribution/\"\u003egaussian\u003c/a\u003e distributed such as the mean/variance).\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eGiven this distribution, we sample \\(m\\) samples of \\(\\theta\\) from the distribution. Those are our starting candidate points.\u003c/li\u003e\n\u003cli\u003eWe then check its policy for its \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e via the \u003ca href=\"/posts/kbhpolicy_evaluation/#roll-out-utility\"\u003eRoll-out utility\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eWe want to take top \\(k\\) of our best performers, called \u0026ldquo;elite samples\u0026rdquo; \\(m_{elite}\\)\u003c/li\u003e\n\u003cli\u003eUse the set of \\(m_{elite}\\) points, we fit a new distribution parameter \\(\\psi\\) that describes those sample\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis allows us to bound how many \u003ca href=\"/posts/kbhpolicy_evaluation/#roll-out-utility\"\u003eRoll-out utilitie\u003c/a\u003es we are doing.\u003c/p\u003e\n\u003cp\u003eFor each dimension, we should have 10x elite sample points (1d should have 10 samples, 2d should have 20, etc.)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcross_entropy_method/","tags":null,"title":"Cross Entropy Method"},{"categories":null,"contents":"constituents additional information lack of inverse of cross product The cross product doesn\u0026rsquo;t have an inverse\ngeometric interpretation of cross product \\begin{equation} a \\times b = |\\vec{a}| |\\vec{b}| \\sin \\theta n \\end{equation}\nwhere, \\(n\\) is the unit vector in some direction.\nThe length of the resulting vector in the cross product is the area of the parallelogram formed by the two vectors.\n","html":"\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"lack-of-inverse-of-cross-product\"\u003elack of inverse of cross product\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhcross_product/\"\u003ecross product\u003c/a\u003e doesn\u0026rsquo;t have an inverse\u003c/p\u003e\n\u003ch3 id=\"geometric-interpretation-of-cross-product--kbhcross-product-dot-md\"\u003egeometric interpretation of \u003ca href=\"/posts/kbhcross_product/\"\u003ecross product\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\na \\times b = |\\vec{a}| |\\vec{b}| \\sin \\theta n\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(n\\) is the unit vector in some direction.\u003c/p\u003e\n\u003cp\u003eThe length of the resulting vector in the \u003ca href=\"/posts/kbhcross_product/\"\u003ecross product\u003c/a\u003e is the area of the parallelogram formed by the two vectors.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcross_product/","tags":null,"title":"cross product"},{"categories":null,"contents":"CrossFinder is a darkpool owned by Credit Suisse.\nFeatures:\nNormal darkpooling Routing the transaction out to other exchanges and dark-pools if needed Measuring the latency of each other exchange, etc. ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcrossfinder/\"\u003eCrossFinder\u003c/a\u003e is a \u003ca href=\"/posts/kbhdarkpool/\"\u003edarkpool\u003c/a\u003e owned by \u003ca href=\"/posts/kbhcredit_suisse/\"\u003eCredit Suisse\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eFeatures:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eNormal \u003ca href=\"/posts/kbhdarkpool/\"\u003edarkpool\u003c/a\u003eing\u003c/li\u003e\n\u003cli\u003eRouting the transaction out to other exchanges and dark-pools if needed\u003c/li\u003e\n\u003cli\u003eMeasuring the latency of each other exchange, etc.\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcrossfinder/","tags":null,"title":"CrossFinder"},{"categories":null,"contents":"cyro-EM is a structure determination system of a solution (Dutta, M. 2018. J indian inst sci 98) to analyze a structural population of particles from TEM; the resulting 3-D structures obtained can be analyzed and classified.\n\u0026ldquo;The Resolution Revolution\u0026rdquo;: much better structures to analyze because of high-fidelity cyro-EM\ncyro-EM vs x-ray crystallography cyro-EM can identify heterogeneous motions throughout the structure, instead of averaging out multiple structural combinations; instead of the \u0026ldquo;general\u0026rdquo; structure on average, we can get a collection of various states the particle can be in.\nmanifold embedding manifold embedding is set of methods using diffusion maps to analyze the primary dynamics behavior, not sure what exactly are the methods\nhttps://cryosparc.com/\nensemble reweighting http://arxiv.org/abs/2212.05320\ntake MD =\u0026gt; create \u0026ldquo;fake\u0026rdquo; cyro-EM images [something happens I didn\u0026rsquo;t catch for cyro-em] then, project back to MD. misfolded elements would then be removed CHARMM-GUI\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcyro_em/\"\u003ecyro-EM\u003c/a\u003e is a structure determination system of a \u003cstrong\u003esolution\u003c/strong\u003e (Dutta, M. 2018. J indian inst sci 98) to analyze a structural population of particles from TEM; the resulting 3-D structures obtained can be analyzed and classified.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;The Resolution Revolution\u0026rdquo;: much better structures to analyze because of high-fidelity \u003ca href=\"/posts/kbhcyro_em/\"\u003ecyro-EM\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"cyro-em-vs-x-ray-crystallography\"\u003ecyro-EM vs x-ray crystallography\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcyro_em/\"\u003ecyro-EM\u003c/a\u003e can identify heterogeneous motions throughout the structure, instead of averaging out multiple structural combinations; instead of the \u0026ldquo;general\u0026rdquo; structure on average, we can get a collection of various states the particle can be in.\u003c/p\u003e\n\u003ch2 id=\"manifold-embedding\"\u003emanifold embedding\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#manifold-embedding\"\u003emanifold embedding\u003c/a\u003e is set of methods using \u003ca href=\"/posts/kbhdiffusion_map/\"\u003ediffusion map\u003c/a\u003es to analyze the primary dynamics behavior, not sure what exactly are the methods\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://cryosparc.com/\"\u003ehttps://cryosparc.com/\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"ensemble-reweighting\"\u003eensemble reweighting\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"http://arxiv.org/abs/2212.05320\"\u003ehttp://arxiv.org/abs/2212.05320\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003etake MD =\u0026gt; create \u0026ldquo;fake\u0026rdquo; \u003ca href=\"#manifold-embedding\"\u003ecyro-EM\u003c/a\u003e images\u003c/li\u003e\n\u003cli\u003e[something happens I didn\u0026rsquo;t catch for cyro-em]\u003c/li\u003e\n\u003cli\u003ethen, project back to MD. misfolded elements would then be removed\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eCHARMM-GUI\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcyro_em/","tags":null,"title":"cryo-electron microscopy"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcrystels/","tags":null,"title":"Crystals"},{"categories":null,"contents":"What random variable should I use? Unit 1 core probability, axiom of probability, counting, combination, permutation, DeMorgan\u0026rsquo;s Law.\nSU-CS109 SEP272023 SU-CS109 SEP292023 SU-CS109 OCT022023 SU-CS109 OCT042023 SU-CS109 OCT062023 SU-CS109 OCT112023 Unit 2 random variables\nSU-CS109 OCT092023 SU-CS109 OCT132023 SU-CS109 OCT162023 SU-CS109 OCT182023 Unit 3 inference, General Inference\nSU-CS109 OCT202023 SU-CS109 OCT232023 SU-CS109 OCT252023 SU-CS109 OCT272023 Unit 4 Beta Distribution, central limit theorem, bootstrapping\nSU-CS109 NOV012023 SU-CS109 NOV032023 SU-CS109 NOV102023 Unit 5 Apredezahe de machinas: Naive Bayes, logistic regression, deep learning\nSU-CS109 NOV132023 SU-CS109 NOV292023 SU-CS109 DEC012023 SU-CS109 DEC042023 Notes SU-CS109 Midterm SU-CS109 Midterm Sheet ","html":"\u003ch2 id=\"what-random-variable-should-i-use\"\u003eWhat random variable should I use?\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-16_15-40-15_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"unit-1\"\u003eUnit 1\u003c/h2\u003e\n\u003cp\u003ecore \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e, \u003ca href=\"/posts/kbhprobability/#axiom-of-probability\"\u003eaxiom of probability\u003c/a\u003e, \u003ca href=\"/posts/kbhcounting/\"\u003ecounting\u003c/a\u003e, \u003ca href=\"/posts/kbhcombination/\"\u003ecombination\u003c/a\u003e, \u003ca href=\"/posts/kbhpermutation/\"\u003epermutation\u003c/a\u003e, \u003ca href=\"/posts/kbhdemorgan_s_law/\"\u003eDeMorgan\u0026rsquo;s Law\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_sep272023/\"\u003eSU-CS109 SEP272023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_sep292023/\"\u003eSU-CS109 SEP292023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct022023/\"\u003eSU-CS109 OCT022023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct042023/\"\u003eSU-CS109 OCT042023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct062023/\"\u003eSU-CS109 OCT062023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct112023/\"\u003eSU-CS109 OCT112023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"unit-2\"\u003eUnit 2\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variables\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct092023/\"\u003eSU-CS109 OCT092023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_109_oct132023/\"\u003eSU-CS109 OCT132023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct162023/\"\u003eSU-CS109 OCT162023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct182023/\"\u003eSU-CS109 OCT182023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"unit-3\"\u003eUnit 3\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e, \u003ca href=\"/posts/kbhgeneral_inference/\"\u003eGeneral Inference\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct202023/\"\u003eSU-CS109 OCT202023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct232023/\"\u003eSU-CS109 OCT232023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct252023/\"\u003eSU-CS109 OCT252023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct272023/\"\u003eSU-CS109 OCT272023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"unit-4\"\u003eUnit 4\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e, \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e, \u003ca href=\"/posts/kbhboostrap/\"\u003ebootstrap\u003c/a\u003eping\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_nov012023/\"\u003eSU-CS109 NOV012023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_nov032023/\"\u003eSU-CS109 NOV032023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_nov102023/\"\u003eSU-CS109 NOV102023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"unit-5\"\u003eUnit 5\u003c/h2\u003e\n\u003cp\u003eApredezahe de machinas: \u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e, \u003ca href=\"/posts/kbhlogistic_regression/\"\u003elogistic regression\u003c/a\u003e, \u003ca href=\"/posts/kbhdeep_learning/\"\u003edeep learning\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_nov132023/\"\u003eSU-CS109 NOV132023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_nov292023/\"\u003eSU-CS109 NOV292023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_dec012023/\"\u003eSU-CS109 DEC012023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_dec042023/\"\u003eSU-CS109 DEC042023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_midterm/\"\u003eSU-CS109 Midterm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_midterm_sheet/\"\u003eSU-CS109 Midterm Sheet\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcs_probability_index/","tags":["index"],"title":"CS Probability Index"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcs124/","tags":null,"title":"cs124"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcultural_revolution/","tags":null,"title":"Cultural Revolution"},{"categories":null,"contents":"current is defined as the flow of positive charge. Specifically:\n\\begin{equation} I = \\frac{\\Delta Q}{\\Delta t} \\end{equation}\nresistance of a wire (if ever you come across needing to calculate the resistance of the wire from scratch)\n\\begin{equation} R = \\rho \\frac{L}{A} \\end{equation}\nwhere, \\(\\rho\\) is the material resistivity, \\(L\\) the length, and \\(A\\) the cross-sectional area.\nyou rarely need to do this!\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcurrent/\"\u003ecurrent\u003c/a\u003e is defined as the flow of positive \u003ca href=\"/posts/kbhcharged/\"\u003echarge\u003c/a\u003e. Specifically:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI = \\frac{\\Delta Q}{\\Delta t}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"resistance-of-a-wire\"\u003eresistance of a wire\u003c/h2\u003e\n\u003cp\u003e(if ever you come across needing to calculate the resistance of the wire from scratch)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR = \\rho \\frac{L}{A}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\rho\\) is the material resistivity, \\(L\\) the length, and \\(A\\) the cross-sectional area.\u003c/p\u003e\n\u003cp\u003eyou rarely need to do this!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcurrent/","tags":null,"title":"current"},{"categories":null,"contents":"The curse of dimensionality is the result of correlatives of the fact that:\nat higher dimensions, most random data points become equidistant from each other \u0026mdash; you can prove this to yourself pythagoras and some math of expectation\nrandom vectors are almost orthogonal unit sphere takes almost no volume in unit square (?) ","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhcurse_of_dimensionality/\"\u003ecurse of dimensionality\u003c/a\u003e is the result of correlatives of the fact that:\u003c/p\u003e\n\u003cp\u003eat higher dimensions, \u003cstrong\u003emost random data points become equidistant from each other\u003c/strong\u003e \u0026mdash; you can prove this to yourself \u003ca href=\"/posts/kbhcornucopia_of_analysis/#pythagorean-theorem\"\u003epythagoras\u003c/a\u003e and some math of \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-07-31_15-16-20_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003col\u003e\n\u003cli\u003erandom vectors are almost orthogonal\u003c/li\u003e\n\u003cli\u003eunit sphere takes almost no volume in unit square (?)\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcurse_of_dimensionality/","tags":null,"title":"curse of dimensionality"},{"categories":null,"contents":"User Stories but harder and more rigerous: https://www.visual-paradigm.com/guide/customer-experience/what-is-customer-journey-mapping/\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhuser_interviews/#user-story\"\u003eUser Stories\u003c/a\u003e but harder and more rigerous: \u003ca href=\"https://www.visual-paradigm.com/guide/customer-experience/what-is-customer-journey-mapping/\"\u003ehttps://www.visual-paradigm.com/guide/customer-experience/what-is-customer-journey-mapping/\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcustomer_journey_map/","tags":null,"title":"Customer Journey Map"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcynthia_lee/","tags":null,"title":"Cynthia Lee"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcyrodrgn/","tags":null,"title":"cyroDRGN"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhd_see/","tags":null,"title":"d-see"},{"categories":null,"contents":"we can also damp the heat equation:\n\\begin{equation} \\pdv{u}{t} + ku = \\pdv[2]{u}{x} \\end{equation}\nwe note that substituting \\(u(t,x) = e^{-kt}w(t,x)\\) into the expression, we yield:\n\\begin{equation} \\pdv{w}{t} = \\pdv[2]{w}{t} \\end{equation}\ntherefore, we simply have to solve the system normally on \\(w\\), then multiply the solution by \\(e^{-kt}\\) to obtain our solution for the damped equation.\n","html":"\u003cp\u003ewe can also damp the heat equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t} + ku = \\pdv[2]{u}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe note that substituting \\(u(t,x) = e^{-kt}w(t,x)\\) into the expression, we yield:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{w}{t} = \\pdv[2]{w}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003etherefore, we simply have to solve the system normally on \\(w\\), then multiply the solution by \\(e^{-kt}\\) to obtain our solution for the damped equation.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdamped_heat_equation/","tags":null,"title":"damped heat equation"},{"categories":null,"contents":"darkpools are non-exchange, non-published exchange which doesn\u0026rsquo;t have the same reporting obligations of a stock market. The only thing they have to report is the actual filled transactions after 90 seconds.\ndarkpools are used because the order book/bid-ask spread is not leaked, which means large transactions will not be able to influence the market as much.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdarkpool/\"\u003edarkpool\u003c/a\u003es are non-exchange, non-published exchange which doesn\u0026rsquo;t have the same reporting obligations of a \u003ca href=\"/posts/kbhfinancial_markets_intro/\"\u003estock market\u003c/a\u003e. The only thing they have to report is the actual filled transactions after 90 seconds.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdarkpool/\"\u003edarkpool\u003c/a\u003es are used because the order book/bid-ask spread is not leaked, which means large transactions will not be able to influence the market as much.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdarkpool/","tags":null,"title":"darkpool"},{"categories":null,"contents":"For data inference tasks, categorical data\n","html":"\u003cp\u003eFor \u003ca href=\"/posts/kbhdata_inference/\"\u003edata inference\u003c/a\u003e tasks, categorical data\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdata_inference/","tags":null,"title":"data inference"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdcgan/","tags":null,"title":"DCGAN"},{"categories":null,"contents":"Speed up biosensor development? Preso felt like an ad.\nLucCage\nAnd then we throw ML at designing LucCage binding structures. Using LucCage florescence as a reporter tool for detection of the molecule\nwe found LucCage used the platform + ML to engineer binding sites to things we want to bio on we tacked a light on it florescence now you have a lightbulb as an assay ","html":"\u003cp\u003eSpeed up biosensor development? Preso felt like an ad.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhluccage/\"\u003eLucCage\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eAnd then we throw ML at designing \u003ca href=\"/posts/kbhluccage/\"\u003eLucCage\u003c/a\u003e binding structures. Using \u003ca href=\"/posts/kbhluccage/\"\u003eLucCage\u003c/a\u003e florescence as a reporter tool for detection of the molecule\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewe found \u003ca href=\"/posts/kbhluccage/\"\u003eLucCage\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eused the platform + ML to engineer binding sites to things we want to bio on\u003c/li\u003e\n\u003cli\u003ewe tacked a light on it\u003c/li\u003e\n\u003cli\u003eflorescence\u003c/li\u003e\n\u003cli\u003enow you have a lightbulb as an assay\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhde_novo_biosensors/","tags":null,"title":"De novo biosensors"},{"categories":null,"contents":"hype hype hype\nprotein binding with Rosetta RoseTTAFold2 RFDiffusion ","html":"\u003cp\u003ehype hype hype\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrosetta/#protein-binding-with-id-dc7d9d61-130a-435d-bd45-9757aed9555a-rosetta\"\u003eprotein binding with Rosetta\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrosettafold2/\"\u003eRoseTTAFold2\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrfdiffusion/\"\u003eRFDiffusion\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhde_novo_protein_design/","tags":null,"title":"De Novo Protein Design"},{"categories":null,"contents":"deadlock is when mutexes lock in a circular order:\nthread 1:\nm1.lock(); m2.lock(); thread 2:\nm2.lock(); m3.lock(); We prevent this by locking things in the same order. Which maybe hard: because loops.\nWe need, also, to limit the number of threads competing for a shared resource: imagine all of your threads doing a thing, will it deadlock? If so, limit.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdeadlock/\"\u003edeadlock\u003c/a\u003e is when \u003ca href=\"/posts/kbhmultithreading/#mutex\"\u003emutex\u003c/a\u003ees lock in a circular order:\u003c/p\u003e\n\u003cp\u003ethread 1:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ethread 2:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe prevent this by locking things in the same order. Which maybe hard: because loops.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eWe need, also, to \u003cstrong\u003elimit the number of threads competing for a shared resource\u003c/strong\u003e: imagine all of your threads doing a thing, will it deadlock? If so, limit.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdeadlock/","tags":null,"title":"deadlock"},{"categories":null,"contents":"Key components Task/Objective (\u0026ldquo;Automated Driving to reach destination [here]\u0026rdquo;) Resources (state) (\u0026ldquo;sensors, fuel, etc.\u0026rdquo;) Uncertainties (\u0026ldquo;What in the world is happening\u0026rdquo;) Actions (\u0026ldquo;turn left\u0026rdquo;) In one line: an agent makes decisions via the balance of observation with uncertainty. This is called the observe-act cycle.\nSee also connectionism\nApplications Stock shelving Automated driving Space missions Sports Congestion modeling Online dating Traffic light control decision making methods explicit programming: \u0026ldquo;just code it up\u0026rdquo; \u0026mdash; try this first if you are building something, which should establish a baseline: guess all possible states, and hard code strategies for all of them supervised learning: manually solve representative states, hard code strategies for them, make model interpolate between them optimization: create optimization objective connected to a model of the environment, optimize that objective planning: using model of the environment directly to predict best moves reinforcement learning: make agent interact with environment directly, and optimize its score of success in the environment without a model Method Model Visible? Strategy Hard-Coded? explicit programming yes, all states fully known yes supervised learning no, only a sample of it yes, only a sample of it optimization no, except reward no planning yes no reinforcement learning history see decision making history\n","html":"\u003ch2 id=\"key-components\"\u003eKey components\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eTask/Objective (\u0026ldquo;Automated Driving to reach destination [here]\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003eResources (state) (\u0026ldquo;sensors, fuel, etc.\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003eUncertainties (\u0026ldquo;What in the world is happening\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003eActions (\u0026ldquo;turn left\u0026rdquo;)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIn one line: an \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e makes decisions via the balance of \u003cstrong\u003eobservation\u003c/strong\u003e with \u003cstrong\u003e\u003ca href=\"/posts/kbhuncertainty/\"\u003euncertainty\u003c/a\u003e\u003c/strong\u003e. This is called the \u003ca href=\"/posts/kbhobserve_act_cycle/\"\u003eobserve-act cycle\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhconnectionism/\"\u003econnectionism\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"applications\"\u003eApplications\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eStock shelving\u003c/li\u003e\n\u003cli\u003eAutomated driving\u003c/li\u003e\n\u003cli\u003eSpace missions\u003c/li\u003e\n\u003cli\u003eSports\u003c/li\u003e\n\u003cli\u003eCongestion modeling\u003c/li\u003e\n\u003cli\u003eOnline dating\u003c/li\u003e\n\u003cli\u003eTraffic light control\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"decision-making--kbhdecision-making-dot-md--methods\"\u003e\u003ca href=\"/posts/kbhdecision_making/\"\u003edecision making\u003c/a\u003e methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexplicit_programming/\"\u003eexplicit programming\u003c/a\u003e: \u0026ldquo;just code it up\u0026rdquo; \u0026mdash; try this first if you are building something, which should establish a \u003cstrong\u003ebaseline\u003c/strong\u003e: guess all possible states, and hard code strategies for all of them\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsupervised_learning/\"\u003esupervised learning\u003c/a\u003e: manually solve representative states, hard code strategies for them, make model interpolate between them\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoptimization/\"\u003eoptimization\u003c/a\u003e: create optimization objective connected to a model of the environment, optimize that objective\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhplanning/\"\u003eplanning\u003c/a\u003e: using model of the environment directly to predict best moves\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhreinforcement_learning/\"\u003ereinforcement learning\u003c/a\u003e: make agent interact with environment directly, and optimize its score of success in the environment without a model\u003c/li\u003e\n\u003c/ul\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eMethod\u003c/th\u003e\n\u003cth\u003eModel Visible?\u003c/th\u003e\n\u003cth\u003eStrategy Hard-Coded?\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhexplicit_programming/\"\u003eexplicit programming\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003eyes, all states fully known\u003c/td\u003e\n\u003ctd\u003eyes\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhsupervised_learning/\"\u003esupervised learning\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003eno, only a sample of it\u003c/td\u003e\n\u003ctd\u003eyes, only a sample of it\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhoptimization/\"\u003eoptimization\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003eno, except reward\u003c/td\u003e\n\u003ctd\u003eno\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhplanning/\"\u003eplanning\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003eyes\u003c/td\u003e\n\u003ctd\u003eno\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhreinforcement_learning/\"\u003ereinforcement learning\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"history\"\u003ehistory\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhdecision_making_history/\"\u003edecision making history\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdecision_making/","tags":null,"title":"decision making"},{"categories":null,"contents":"Lecture notes taking during CS238, decision making. Stanford Intelligence Systems Laboratory (SISL: planning and validation of intelligent systems).\nBig Ideas Themes There\u0026rsquo;s a principled mathematical framework for defining rational behavior There are computational techniques that could lead to better, and perhaps counter-intuitive decisions Successful application depends on your choice of representation and approximation you typically can\u0026rsquo;t solve mathematical models exactly so, we have to rely on good models of approximations The same computational approaches can be applied to different application domains the same set of abstractions can be carried through life send Mykel a note about how these topics about where this stuff is applied These algorithms drive high quality decisions on a tight timeline. You can\u0026rsquo;t fuck up: people die.\nContents Fundamental understanding of mathematical models and solution methods\u0026mdash;ungraded book exercises Three quizzes: one question per chapter chapters 2, 3, 5 Implement and extend key algorithms for learning and decision making Identify an application of the theory of this course and formulate it mathematically (proposal) what are the i/o what are the sensors measurements what are the decisions to be made [one other thing] Course Outline 1-shot: Probabilistic Reasoning models of distributions over many variables using distributions to make inferences utility theory n-shot: Sequential Problems we now 1-shot decision networks into making a series of decisions assume: model of environment is known (no Model Uncertainty), and environment is fully observable (no State Uncertainty) this introduces a Markov Decision Process (MDP) approximation solutions for observing the environment both online and offline Model Uncertainty deal with situations where we don\u0026rsquo;t know what the best action is at any given step i.e.: future rewards, etc. introduce reinforcement learning and its challenges Rewards may be received long after important decisions Agents must generalized from limited exploration experience State Uncertainty deal with situations where we don\u0026rsquo;t know what is actually happening: we only have a probabilistic state introduce Partially Observable Markov Decision Process keep a distribution of believes update the distribution of believes make decisions based the distribution Multiagent Systems challenges of Interaction Uncertainty building up interaction complexity simple games: many agents, each with individual rewards, acting to make a single joint action markov games: many agents, many states, multiple outcomes in a stochastic environment; Interaction Uncertainty arises out of unknowns about what other agents will do partially observable markov game: markov games with State Uncertainty decentralized partially observable markov game: POMGs with shared rewards between agents instead of individual rewards Lectures probabilistic reasoning relating to single decisions Baysian Networks, and how to deal with them.\nSU-CS238 SEP262023 SU-CS238 SEP272023 SU-CS238 OCT032023 SU-CS238 OCT052023 SU-CS238 OCT102023 SU-CS238 OCT122023 a chain of reasoning with feedback Markov Decision Process uses policies that are evaluated with policy evaluation via utility, Bellman Equation, value function, etc.\nIf we know the state space fully, we can use policy iteration and value iteration to determine an objectively optimal policy. If we don\u0026rsquo;t (or if the state space is too large), we can try to discretize our state space and appropriate through Approximate Value Functions, or use online planning approaches to compute good policy as we go.\nIf none of those things are feasible (i.e. your state space is too big or complex to be discretized (i.e. sampling will cause you to loose the structure of the problem)), you can do some lovely Policy Optimization which will keep you in continuous space while iterating on the policy directly. Some nerds lmao like Policy Gradient methods if your policy is differentiable.\nNow, Policy Optimization methods all require sampling a certain set of trajectories and optimizing over them in order to work. How do we know how much sampling to do before we start optimizing? That\u0026rsquo;s an Exploration and Exploitation question. We can try really hard to collect trajectories, but then we\u0026rsquo;d loose out on collecting intermediate reward.\nSU-CS238 OCT172023 SU-CS238 OCT192023 SU-CS238 OCT242023 SU-CS238 OCT262023 SU-CS238 OCT312023 SU-CS238 NOV022023 POMDP bomp bomp bomp SU-CS238 NOV092023 SU-CS238 NOV142023 SU-CS238 NOV162023 SU-CS238 NOV282023 SU-CS238 NOV302023 Failures? Change the action space Change the reward function Change the transition function Improve the solver Don\u0026rsquo;t worry about it Don\u0026rsquo;t deploy the system Words of Wisdom from Mykel \u0026ldquo;The belief update is central to learning. The point of education is to change your beliefs; look for opportunities to change your belief.\u0026rdquo;\n\u0026ldquo;What\u0026rsquo;s in the action space, how do we maximize it?\u0026rdquo;\nFrom MDPs, \u0026ldquo;we can learn from the past, but the past doesn\u0026rsquo;t influence you.\u0026rdquo;\n\u0026ldquo;Optimism under uncertainty\u0026rdquo;: Exploration and Exploitation \u0026ldquo;you should try things\u0026rdquo;\nWorksheets SU-CS238 Q0Q3 ","html":"\u003cp\u003eLecture notes taking during CS238, decision making. Stanford Intelligence Systems Laboratory (SISL: planning and validation of intelligent systems).\u003c/p\u003e\n\u003ch2 id=\"big-ideas\"\u003eBig Ideas\u003c/h2\u003e\n\u003ch3 id=\"themes\"\u003eThemes\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eThere\u0026rsquo;s a principled mathematical framework for defining rational behavior\u003c/li\u003e\n\u003cli\u003eThere are computational techniques that could lead to better, and perhaps counter-intuitive decisions\u003c/li\u003e\n\u003cli\u003eSuccessful application depends on your choice of representation and approximation\n\u003cul\u003e\n\u003cli\u003eyou typically can\u0026rsquo;t solve mathematical models \u003cstrong\u003eexactly\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eso, we have to rely on good models of approximations\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eThe same computational approaches can be applied to different application domains\n\u003cul\u003e\n\u003cli\u003ethe same set of abstractions can be carried through life\u003c/li\u003e\n\u003cli\u003esend Mykel a note about how these topics about where this stuff is applied\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThese algorithms drive \u003cstrong\u003ehigh quality\u003c/strong\u003e decisions on a \u003cstrong\u003etight timeline\u003c/strong\u003e. You can\u0026rsquo;t fuck up: people die.\u003c/p\u003e\n\u003ch3 id=\"contents\"\u003eContents\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eFundamental understanding of mathematical models and solution methods\u0026mdash;ungraded book exercises\n\u003cul\u003e\n\u003cli\u003eThree quizzes: one question per chapter\n\u003col\u003e\n\u003cli\u003echapters 2, 3, 5\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eImplement and extend key algorithms for learning and decision making\u003c/li\u003e\n\u003cli\u003eIdentify an application of the theory of this course and formulate it mathematically (proposal)\n\u003cul\u003e\n\u003cli\u003ewhat are the i/o\u003c/li\u003e\n\u003cli\u003ewhat are the sensors measurements\u003c/li\u003e\n\u003cli\u003ewhat are the decisions to be made\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e[one other thing]\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"course-outline\"\u003eCourse Outline\u003c/h2\u003e\n\u003ch3 id=\"1-shot-probabilistic-reasoning\"\u003e1-shot: Probabilistic Reasoning\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003emodels of distributions over many variables\u003c/li\u003e\n\u003cli\u003eusing distributions to make inferences\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_theory/\"\u003eutility theory\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"n-shot-sequential-problems\"\u003en-shot: Sequential Problems\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ewe now 1-shot \u003ca href=\"/posts/kbhdecision_networks/\"\u003edecision network\u003c/a\u003es into making a series of decisions\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eassume\u003c/strong\u003e: model of environment is known (no \u003ca href=\"\"\u003eModel Uncertainty\u003c/a\u003e), and environment is fully observable (no \u003ca href=\"\"\u003eState Uncertainty\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003ethis introduces a \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMarkov Decision Process\u003c/a\u003e (MDP)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eapproximation solutions for observing the environment both online and offline\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"model-uncertainty\"\u003eModel Uncertainty\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003edeal with situations where we don\u0026rsquo;t know what the best action is at any given step\u003c/li\u003e\n\u003cli\u003ei.e.: future rewards, etc.\u003c/li\u003e\n\u003cli\u003eintroduce \u003ca href=\"/posts/kbhreinforcement_learning/\"\u003ereinforcement learning\u003c/a\u003e and its challenges\n\u003col\u003e\n\u003cli\u003eRewards may be received long after important decisions\u003c/li\u003e\n\u003cli\u003eAgents must generalized from limited exploration experience\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"state-uncertainty\"\u003eState Uncertainty\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003edeal with situations where we don\u0026rsquo;t know what is actually happening: we only have a \u003cstrong\u003eprobabilistic\u003c/strong\u003e state\u003c/li\u003e\n\u003cli\u003eintroduce \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePartially Observable Markov Decision Process\u003c/a\u003e\n\u003col\u003e\n\u003cli\u003ekeep a distribution of believes\u003c/li\u003e\n\u003cli\u003eupdate the distribution of believes\u003c/li\u003e\n\u003cli\u003emake decisions based the distribution\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"multiagent-systems\"\u003eMultiagent Systems\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003echallenges of \u003ca href=\"/posts/kbhinteraction_uncertainty/\"\u003eInteraction Uncertainty\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ebuilding up interaction complexity\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsimple_game/\"\u003esimple game\u003c/a\u003es: many \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003es, each with individual rewards, acting to make a single joint action\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmarkov_game/\"\u003emarkov game\u003c/a\u003es: many agents, many states, multiple outcomes in a stochastic environment; \u003ca href=\"/posts/kbhinteraction_uncertainty/\"\u003eInteraction Uncertainty\u003c/a\u003e arises out of unknowns about what other agents will do\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpartially_observable_markov_game/\"\u003epartially observable markov game\u003c/a\u003e: \u003ca href=\"/posts/kbhmarkov_game/\"\u003emarkov game\u003c/a\u003es with \u003ca href=\"\"\u003eState Uncertainty\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003edecentralized \u003ca href=\"/posts/kbhpartially_observable_markov_game/\"\u003epartially observable markov game\u003c/a\u003e: \u003ca href=\"/posts/kbhpartially_observable_markov_game/\"\u003ePOMG\u003c/a\u003es with shared rewards between \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003es instead of individual rewards\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"lectures\"\u003eLectures\u003c/h2\u003e\n\u003ch3 id=\"probabilistic-reasoning-relating-to-single-decisions\"\u003eprobabilistic reasoning relating to single decisions\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003es, and how to deal with them.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_sep262023/\"\u003eSU-CS238 SEP262023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_sep272023/\"\u003eSU-CS238 SEP272023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_oct032023/\"\u003eSU-CS238 OCT032023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_oct052023/\"\u003eSU-CS238 OCT052023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_oct102023/\"\u003eSU-CS238 OCT102023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_oct122023/\"\u003eSU-CS238 OCT122023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"a-chain-of-reasoning-with-feedback\"\u003ea chain of reasoning with feedback\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMarkov Decision Process\u003c/a\u003e uses \u003ca href=\"/posts/kbhpolicy/\"\u003epolicies\u003c/a\u003e that are evaluated with \u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e via \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e, \u003ca href=\"/posts/kbhpolicy_evaluation/#bellman-expectation-equation\"\u003eBellman Equation\u003c/a\u003e, \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e, etc.\u003c/p\u003e\n\u003cp\u003eIf we know the state space fully, we can use \u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e and \u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e to determine an objectively \u003ca href=\"/posts/kbhpolicy/#optimal-policy\"\u003eoptimal policy\u003c/a\u003e. If we don\u0026rsquo;t (or if the state space is too large), we can try to discretize our state space and appropriate through \u003ca href=\"/posts/kbhapproximate_value_function/\"\u003eApproximate Value Function\u003c/a\u003es, or use \u003ca href=\"/posts/kbhonline_planning/\"\u003eonline planning\u003c/a\u003e approaches to compute good policy as we go.\u003c/p\u003e\n\u003cp\u003eIf none of those things are feasible (i.e. your state space is too big or complex to be discretized (i.e. sampling will cause you to loose the structure of the problem)), you can do some lovely \u003ca href=\"/posts/kbhpolicy_optimization/\"\u003ePolicy Optimization\u003c/a\u003e which will keep you in continuous space while iterating on the policy directly. Some nerds lmao like \u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e methods if your policy is differentiable.\u003c/p\u003e\n\u003cp\u003eNow, \u003ca href=\"/posts/kbhpolicy_optimization/\"\u003ePolicy Optimization\u003c/a\u003e methods all require sampling a certain set of \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003etrajectories\u003c/a\u003e and optimizing over them in order to work. How do we know how much sampling to do before we start optimizing? That\u0026rsquo;s an \u003ca href=\"/posts/kbhexploration_and_exploitation/\"\u003eExploration and Exploitation\u003c/a\u003e question. We can try really hard to collect \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003etrajectories\u003c/a\u003e, but then we\u0026rsquo;d loose out on collecting intermediate reward.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_oct172023/\"\u003eSU-CS238 OCT172023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_oct192023/\"\u003eSU-CS238 OCT192023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_oct242023/\"\u003eSU-CS238 OCT242023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_oct262023/\"\u003eSU-CS238 OCT262023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_oct212023/\"\u003eSU-CS238 OCT312023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_nov022023/\"\u003eSU-CS238 NOV022023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"pomdp--kbhpartially-observable-markov-decision-process-dot-md--bomp-bomp-bomp\"\u003e\u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e bomp bomp bomp\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_nov092023/\"\u003eSU-CS238 NOV092023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_nov142023/\"\u003eSU-CS238 NOV142023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_nov162023/\"\u003eSU-CS238 NOV162023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_nov282023/\"\u003eSU-CS238 NOV282023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_nov302023/\"\u003eSU-CS238 NOV302023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"failures\"\u003eFailures?\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eChange the action space\u003c/li\u003e\n\u003cli\u003eChange the reward function\u003c/li\u003e\n\u003cli\u003eChange the transition function\u003c/li\u003e\n\u003cli\u003eImprove the solver\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t worry about it\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t deploy the system\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"words-of-wisdom-from-mykel\"\u003eWords of Wisdom from Mykel\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;The belief update is central to learning. The point of education is to change your beliefs; look for opportunities to change your belief.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;What\u0026rsquo;s in the action space, how do we maximize it?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eFrom MDPs, \u0026ldquo;we can learn from the past, but the past doesn\u0026rsquo;t influence you.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Optimism under uncertainty\u0026rdquo;: \u003ca href=\"/posts/kbhexploration_and_exploitation/\"\u003eExploration and Exploitation\u003c/a\u003e \u0026ldquo;you should try things\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"worksheets\"\u003eWorksheets\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_q0q3/\"\u003eSU-CS238 Q0Q3\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdecision_making_index/","tags":["index"],"title":"Decision Making Index"},{"categories":null,"contents":"A decision network is a Baysian Network which is used to make decisions based on optimizing utility.\nTo solve a problem, we iterate through all possible decision parameters to find the one that maximizes utility.\nNodes chance nodes: random variables \u0026mdash; some inputs we can observe, some are latent variables we can\u0026rsquo;t observe \u0026mdash; circles action nodes: what we have control over \u0026mdash; squares utility nodes: output, what the results would be; we typically sum utilities together if you have multiple of them \u0026mdash; diamonds Edges conditional edge - arrows to chance nodes: conditional probability edges informational edge - arrows to action nodes: this information is used to inform choice of action functional edge - arrows to utility nodes: computes how the action affects the world Example For \\(U\\), for instance, you can have a factor that loks ilke:\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhdecision_networks/\"\u003edecision network\u003c/a\u003e is a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e which is used to make decisions based on optimizing \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eTo solve a problem, we iterate through all possible decision parameters to find the one that maximizes utility.\u003c/p\u003e\n\u003ch2 id=\"nodes\"\u003eNodes\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003echance nodes: random variables \u0026mdash; some inputs we can observe, some are latent variables we can\u0026rsquo;t observe \u0026mdash; circles\u003c/li\u003e\n\u003cli\u003eaction nodes: what we have control over \u0026mdash; squares\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e nodes: output, what the results would be; we typically sum utilities together if you have multiple of them \u0026mdash; diamonds\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"edges\"\u003eEdges\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003econditional edge - arrows to chance nodes: conditional \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e edges\u003c/li\u003e\n\u003cli\u003einformational edge - arrows to action nodes: this information is used to inform choice of action\u003c/li\u003e\n\u003cli\u003efunctional edge - arrows to utility nodes: computes how the action affects the world\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"example\"\u003eExample\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-12_12-27-32_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eFor \\(U\\), for instance, you can have a \u003ca href=\"/posts/kbhfactor/\"\u003efactor\u003c/a\u003e that loks ilke:\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdecision_networks/","tags":null,"title":"decision network"},{"categories":null,"contents":"a student approach to learning where learning outcomes are driven by student\u0026rsquo;s own experience to deeply drive educational results independenlty\n","html":"\u003cp\u003ea \u003ca href=\"\"\u003estudent approach to learning\u003c/a\u003e where learning outcomes are driven by student\u0026rsquo;s own experience to deeply drive educational results independenlty\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdeep_approach/","tags":null,"title":"deep approach"},{"categories":null,"contents":"deep learning is MLE performed with neural networks. A neural network is many logistic regression pieces (sic.?) stack on top of each other.\nWe begin motivating this with trying to solve MNIST with logistic regression. What a time to be alive. After each layer of deep learning, we are going to use a layer of \u0026ldquo;hidden variable\u0026rdquo;, made of singular logistic regressions,\nNotation:\n\\(x\\) is the input, \\(h\\) is the hidden layers, and \\(\\hat{y}\\) is the prediction.\nWe call each weight, at each layer, from \\(x_{i}\\) to \\(h_{j}\\), \\(\\theta_{i,j}^{(h)}\\). At every neuron on each layer, we calculate:\n\\begin{equation} h_{j} = \\sigma\\qty[\\sum_{i}^{} x_{i} \\theta_{i,j}^{(h)}] \\end{equation}\n\\begin{equation} \\hat{y} = \\sigma\\qty[\\sum_{i}^{} h_{i}\\theta_{i}^{(y)}] \\end{equation}\nnote! we often\nbackpropegation backpropegation is a special case of \u0026ldquo;backwards differentiation\u0026rdquo; to update a computation grap.h\nToy Consider:\n\\begin{equation} L(a,b,c) = c(a+2b) \\end{equation}\nmeaning, we obtain a graph that looks like:\nin three steps, we have:\n\\(d = 2b\\) \\(e = a+d\\) \\(L = e\\cdot e\\) To perform backpropagation, we compute derivatives from right to left, computing first \\(\\pdv{L}{L}= 1\\), then, moving slowly towards the left to obtain \\(\\pdv{L}{c} = \\pdv{L}{L}\\pdv{L}{c}\\), and then \\(\\pdv{L}{e} = \\pdv{L}{L}\\pdv{L}{c}\\) , and then \\(\\pdv{L}{d} = \\pdv{L}{L}\\pdv{L}{e}\\pdv{e}{d}\\) and so forth.\nMotivation deep learning is useful by having good \\(\\theta\\) we can find useful thetas by MLE we MLE by doing optimization to maximize the likelyhood Example For one data point, let us define our neural network:\n\\begin{equation} h_{j} = \\sigma\\qty[\\sum_{i}^{} x_{i} \\theta_{i,j}^{(h)}] \\end{equation}\n\\begin{equation} \\hat{y} = \\sigma\\qty[\\sum_{i}^{} h_{i}\\theta_{i}^{(y)}] \\end{equation}\nwe can define our network:\n\\begin{equation} L(\\theta) = P(Y=y|X=x) = (\\hat{y})^{y} (1-\\hat{y})^{1-y} \\end{equation}\nfrom IID datasets, we can multiply the probablities together:\n\\begin{equation} L(\\theta) = \\prod_{i=1}^{n} (\\hat{y_{i}})^{y_{i}} (1-\\hat{y_{i}})^{1-y_{i}} \\end{equation}\nand, to prevent calculus and derivative instability, we take the log:\n\\begin{equation} LL(\\theta) = \\sum_{i=1}^{n}{y_{i}}\\log (\\hat{y_{i}}) \\cdot ( 1-y_{i} )\\log (1-\\hat{y_{i}}) \\end{equation}\nWe want to maximise this, meaning we perform gradient ascent on this statement. Recall the chain rule; so we can break each layer down:\n\\begin{equation} \\pdv{LL(\\theta)}{\\theta_{ij}^{h}} = \\pdv{LL(\\theta)}{\\hat{y}} \\pdv{\\hat{y}}{h_{j}} \\pdv{h_{j}}{\\theta_{ij}^{h}} \\end{equation}\nfurthermore, for any summation,\n\\begin{equation} \\dv x \\sum_{i=0}^{} x = \\sum_{i=0}^{}\\dv x x \\end{equation}\nSo we can consider our derivatives with respect to each data point. When going about the second part, recall an important trick:\n\\begin{equation} \\pdv{h_{i}} \\qty[\\sum_{i}^{} h_{i}\\theta_{i}^{(y)}] \\end{equation}\nyou will note that, for the inside derivative, much the summation expands\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdeep_learning/\"\u003edeep learning\u003c/a\u003e is \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e performed with neural networks. A \u003ca href=\"/posts/kbhdeep_learning/\"\u003eneural network\u003c/a\u003e is many \u003ca href=\"/posts/kbhlogistic_regression/\"\u003elogistic regression\u003c/a\u003e pieces (sic.?) stack on top of each other.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eWe begin motivating this with trying to solve MNIST with \u003ca href=\"/posts/kbhlogistic_regression/\"\u003elogistic regression\u003c/a\u003e. What a time to be alive. After each layer of \u003ca href=\"/posts/kbhdeep_learning/\"\u003edeep learning\u003c/a\u003e, we are going to use a layer of \u0026ldquo;\u003ca href=\"/posts/kbhinference/\"\u003ehidden variable\u003c/a\u003e\u0026rdquo;, made of singular \u003ca href=\"/posts/kbhlogistic_regression/\"\u003elogistic regression\u003c/a\u003es,\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eNotation:\u003c/p\u003e\n\u003cp\u003e\\(x\\) is the input, \\(h\\) is the hidden layers, and \\(\\hat{y}\\) is the prediction.\u003c/p\u003e\n\u003cp\u003eWe call each weight, at each layer, from \\(x_{i}\\) to \\(h_{j}\\), \\(\\theta_{i,j}^{(h)}\\). At every neuron on each layer, we calculate:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nh_{j} = \\sigma\\qty[\\sum_{i}^{} x_{i} \\theta_{i,j}^{(h)}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{y} = \\sigma\\qty[\\sum_{i}^{} h_{i}\\theta_{i}^{(y)}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enote! we often\u003c/p\u003e\n\u003ch2 id=\"backpropegation\"\u003ebackpropegation\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#backpropegation\"\u003ebackpropegation\u003c/a\u003e is a special case of \u0026ldquo;backwards differentiation\u0026rdquo; to update a computation grap.h\u003c/p\u003e\n\u003ch3 id=\"toy\"\u003eToy\u003c/h3\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL(a,b,c) = c(a+2b)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning, we obtain a graph that looks like:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-13_23-46-34_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ein three steps, we have:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(d = 2b\\)\u003c/li\u003e\n\u003cli\u003e\\(e = a+d\\)\u003c/li\u003e\n\u003cli\u003e\\(L = e\\cdot e\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTo perform backpropagation, we compute derivatives from right to left, computing first \\(\\pdv{L}{L}= 1\\), then, moving slowly towards the left to obtain \\(\\pdv{L}{c} = \\pdv{L}{L}\\pdv{L}{c}\\), and then \\(\\pdv{L}{e} = \\pdv{L}{L}\\pdv{L}{c}\\) , and then \\(\\pdv{L}{d} = \\pdv{L}{L}\\pdv{L}{e}\\pdv{e}{d}\\) and so forth.\u003c/p\u003e\n\u003ch3 id=\"motivation\"\u003eMotivation\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003edeep learning is useful by having good \\(\\theta\\)\u003c/li\u003e\n\u003cli\u003ewe can find useful thetas by \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e by doing optimization to maximize the \u003ca href=\"/posts/kbhlikelyhood/\"\u003elikelyhood\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"example\"\u003eExample\u003c/h3\u003e\n\u003cp\u003eFor one data point, let us define our neural network:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nh_{j} = \\sigma\\qty[\\sum_{i}^{} x_{i} \\theta_{i,j}^{(h)}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{y} = \\sigma\\qty[\\sum_{i}^{} h_{i}\\theta_{i}^{(y)}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can define our network:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL(\\theta) = P(Y=y|X=x) = (\\hat{y})^{y} (1-\\hat{y})^{1-y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efrom IID datasets, we can multiply the probablities together:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL(\\theta) = \\prod_{i=1}^{n} (\\hat{y_{i}})^{y_{i}} (1-\\hat{y_{i}})^{1-y_{i}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand, to prevent calculus and derivative instability, we take the log:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nLL(\\theta) = \\sum_{i=1}^{n}{y_{i}}\\log (\\hat{y_{i}}) \\cdot ( 1-y_{i} )\\log (1-\\hat{y_{i}})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe want to maximise this, meaning we perform \u003ca href=\"/posts/kbhargmax/#gradient-ascent\"\u003egradient ascent\u003c/a\u003e on this statement. Recall the chain rule; so we can break each layer down:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{LL(\\theta)}{\\theta_{ij}^{h}} = \\pdv{LL(\\theta)}{\\hat{y}} \\pdv{\\hat{y}}{h_{j}} \\pdv{h_{j}}{\\theta_{ij}^{h}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efurthermore, for any summation,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv x \\sum_{i=0}^{} x = \\sum_{i=0}^{}\\dv x x\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo we can consider our derivatives with respect to each data point. When going about the second part, recall an important trick:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{h_{i}} \\qty[\\sum_{i}^{} h_{i}\\theta_{i}^{(y)}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou will note that, for the inside derivative, much the summation expands\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdeep_learning/","tags":null,"title":"deep learning"},{"categories":null,"contents":"Facts Everybody writes bugs Debugging sucks Defensive Programming Tools + Techniques Use language features Specs, documentations, Test-Driven Development, unit testing Fail fast and loudly Systematic debugging Investing in tools Use Language Features Descriptors: static, final, pub./priv. Type checking: prevent type errors Automatic array bounds checking Memory management Compiler optimization Key idea: know what language features are available, why/when to use them. don\u0026rsquo;t work against the language in circumventing them\nSpecs, Docs., TDD, Unit Tests How should it work: specs How does it work: docs How will I know it works: TDD How do I know it still works: unit tests These all force you to think about your code before!! you write it so then you can correct them as soon as possible.\nFailing Fast and Failing Loudly The earlier you recognize there is a problem, the easier it is to fix it Problems not fixed can be lost, covered up, or even relied upon Learn from every failure How do we put this into practice Use asserts, exceptions, logging Fix/diagnose/track every bug, even if you choose not to fix it Add regression tests for every bug + run them regularly Systematic Debugging Systematic Debugging is a framework for debugging software.\nReproduce the bug Reduce the bug to the smallest possible, repeatable test case Faster test cases mean faster iterations in debugging Smaller test cases help eliminate possible causes for error Find the root cause Study data (logs, behavior, etc.), hypothesis, experiment, repeat Change code and data to get more information FIXING SYMPTOM IS NOT ENOUGH Fix the bug Add a regression test, and run all tests Reducing Test Case Start with the data that uncovered the bug Remove pieces of data until the bug no longer occurs Bracketing: create both a test case that fails and similar test cases that pass Binary search: remove/add back half of the data at a time Can work from either end: start with everything and reduce until disappearance, or start with only one line and build until bug Finding the Cause Trace through the program View intermediate results Every iteration of a for loop Input and output of a given function Tools to use assert() printing/logging a debugger binary search Tooling! Linter Fuzzer Sanitizer Valgrind DTrace ","html":"\u003ch2 id=\"facts\"\u003eFacts\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eEverybody writes bugs\u003c/li\u003e\n\u003cli\u003eDebugging \u003cem\u003esucks\u003c/em\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"defensive-programming-tools-plus-techniques\"\u003eDefensive Programming Tools + Techniques\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eUse language features\u003c/li\u003e\n\u003cli\u003eSpecs, documentations, \u003ca href=\"/posts/kbhtesting/#test-driven-development\"\u003eTest-Driven Development\u003c/a\u003e, unit testing\u003c/li\u003e\n\u003cli\u003eFail fast and loudly\u003c/li\u003e\n\u003cli\u003eSystematic debugging\u003c/li\u003e\n\u003cli\u003eInvesting in tools\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"use-language-features\"\u003eUse Language Features\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDescriptors: static, final, pub./priv.\u003c/li\u003e\n\u003cli\u003eType checking: prevent type errors\u003c/li\u003e\n\u003cli\u003eAutomatic array bounds checking\u003c/li\u003e\n\u003cli\u003eMemory management\u003c/li\u003e\n\u003cli\u003eCompiler optimization\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eKey idea: know what language features are available, why/when to use them. \u003cstrong\u003edon\u0026rsquo;t work against the language in circumventing them\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"specs-docs-dot-tdd-unit-tests\"\u003eSpecs, Docs., TDD, Unit Tests\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eHow should it work: specs\u003c/li\u003e\n\u003cli\u003eHow does it work: docs\u003c/li\u003e\n\u003cli\u003eHow will I know it works: TDD\u003c/li\u003e\n\u003cli\u003eHow do I know it still works: unit tests\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThese all force you to \u003cem\u003ethink\u003c/em\u003e about your code \u003cem\u003ebefore!!\u003c/em\u003e you write it so then you can correct them as soon as possible.\u003c/p\u003e\n\u003ch2 id=\"failing-fast-and-failing-loudly\"\u003eFailing Fast and Failing Loudly\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eThe earlier you recognize there is a problem, the easier it is to fix it\u003c/li\u003e\n\u003cli\u003eProblems not fixed can be lost, covered up, or even \u003cstrong\u003erelied upon\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eLearn from every failure\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"how-do-we-put-this-into-practice\"\u003eHow do we put this into practice\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eUse asserts, exceptions, logging\u003c/li\u003e\n\u003cli\u003eFix/diagnose/track every bug, even if you choose not to fix it\u003c/li\u003e\n\u003cli\u003eAdd regression tests for every bug + run them regularly\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"systematic-debugging\"\u003eSystematic Debugging\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#systematic-debugging\"\u003eSystematic Debugging\u003c/a\u003e is a framework for debugging software.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eReproduce the bug\u003c/li\u003e\n\u003cli\u003eReduce the bug to the smallest possible, repeatable test case\n\u003col\u003e\n\u003cli\u003eFaster test cases mean faster iterations in debugging\u003c/li\u003e\n\u003cli\u003eSmaller test cases help eliminate possible causes for error\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003eFind the root cause\n\u003col\u003e\n\u003cli\u003eStudy data (logs, behavior, etc.), hypothesis, experiment, repeat\u003c/li\u003e\n\u003cli\u003eChange code and data to get more information\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eFIXING SYMPTOM IS NOT ENOUGH\u003c/strong\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003eFix the bug\u003c/li\u003e\n\u003cli\u003eAdd a regression test, and run all tests\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"reducing-test-case\"\u003eReducing Test Case\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eStart with the data that uncovered the bug\u003c/li\u003e\n\u003cli\u003eRemove pieces of data until the bug no longer occurs\n\u003cul\u003e\n\u003cli\u003eBracketing: create both a test case that fails and similar test cases that pass\u003c/li\u003e\n\u003cli\u003eBinary search: remove/add back half of the data at a time\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eCan work from either end: start with everything and reduce until disappearance, or start with only one line and build until bug\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"finding-the-cause\"\u003eFinding the Cause\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eTrace through the program\u003c/li\u003e\n\u003cli\u003eView intermediate results\n\u003cul\u003e\n\u003cli\u003eEvery iteration of a for loop\u003c/li\u003e\n\u003cli\u003eInput and output of a given function\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eTools to use\n\u003cul\u003e\n\u003cli\u003eassert()\u003c/li\u003e\n\u003cli\u003eprinting/logging\u003c/li\u003e\n\u003cli\u003ea debugger\u003c/li\u003e\n\u003cli\u003ebinary search\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"tooling\"\u003eTooling!\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eLinter\u003c/li\u003e\n\u003cli\u003eFuzzer\u003c/li\u003e\n\u003cli\u003eSanitizer\u003c/li\u003e\n\u003cli\u003eValgrind\u003c/li\u003e\n\u003cli\u003eDTrace\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"d41d8c\"\u003e\u003c/h2\u003e\n\u003ch2 id=\"d41d8c\"\u003e\u003c/h2\u003e\n\u003ch2 id=\"d41d8c\"\u003e\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdefensive_programming/","tags":null,"title":"Defensive Programming"},{"categories":null,"contents":"degrees of belief help us quantify how much we believe some event \\(A\\) is more/less plausible than some event \\(B\\).\nLet us take two statements:\n\\(A\\) Taylor gets Nobel Prize in Literature \\(B\\) Han shot first For instance, if we want to express \u0026ldquo;I think its more likely that Taylor gets the prize than Han shot first\u0026rdquo;:\n\\begin{equation} A \\succ B \\end{equation}\naxioms of degrees of belief universal comparability for two statements \\(A, B\\), only three states can exist:\n\\(A \\succ B\\) (A more likely) \\(A \\prec B\\) (B more likely) \\(A \\sim B\\) (equally likely) transitivity if \\(A \\succeq B\\) and \\(B \\succeq C\\), then \\(A \\succeq C\\)\nlanguage of probability using this framework, we can then describe the events in terms of probability\n\\(P(A) \u0026gt; P(B) \\Leftrightarrow A \\succ B\\) \\(P(A) = P(B) \\Leftrightarrow A \\sim B\\) See also axiom of probability\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhprobability_theory/\"\u003edegrees of belief\u003c/a\u003e help us quantify how much we believe some event \\(A\\) is more/less plausible than some event \\(B\\).\u003c/p\u003e\n\u003cp\u003eLet us take two statements:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(A\\) Taylor gets Nobel Prize in Literature\u003c/li\u003e\n\u003cli\u003e\\(B\\) Han shot first\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eFor instance, if we want to express \u0026ldquo;I think its more likely that Taylor gets the prize than Han shot first\u0026rdquo;:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA \\succ B\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"axioms-of-degrees-of-belief--kbhprobability-theory-dot-md\"\u003eaxioms of \u003ca href=\"/posts/kbhprobability_theory/\"\u003edegrees of belief\u003c/a\u003e\u003c/h2\u003e\n\u003ch3 id=\"universal-comparability\"\u003euniversal comparability\u003c/h3\u003e\n\u003cp\u003efor two statements \\(A, B\\), only three states can exist:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(A \\succ B\\) (A more likely)\u003c/li\u003e\n\u003cli\u003e\\(A \\prec B\\) (B more likely)\u003c/li\u003e\n\u003cli\u003e\\(A \\sim B\\) (equally likely)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"transitivity\"\u003etransitivity\u003c/h3\u003e\n\u003cp\u003eif \\(A \\succeq B\\) and \\(B \\succeq C\\), then \\(A \\succeq C\\)\u003c/p\u003e\n\u003ch3 id=\"language-of-probability\"\u003elanguage of probability\u003c/h3\u003e\n\u003cp\u003eusing this framework, we can then describe the events in terms of \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(P(A) \u0026gt; P(B) \\Leftrightarrow A \\succ B\\)\u003c/li\u003e\n\u003cli\u003e\\(P(A) = P(B) \\Leftrightarrow A \\sim B\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhprobability/#axiom-of-probability\"\u003eaxiom of probability\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprobability_theory/","tags":null,"title":"degrees of belief"},{"categories":null,"contents":" use efficient page maps too translate virtual to physical addresses kick things off to disk when memory runs out Every process has its own page map.\ndemand paging Key idea: physical representation of virtual memory does not have to be on actual memory.\nif memory fills out, kick a page to disk if the program asks for memory again, kick another page to disk and load its memory back Keep in memory the information that\u0026rsquo;s being used, kick the rest to swap space/\u0026quot;paging file\u0026quot;. Ideally: we have a performance of main memory and capacity of disk.\ndemand fetching most modern OSes start with no pages loaded\u0026mdash;load pages only when referenced; this is tempered by the type of page that\u0026rsquo;s needed:\nread only code pages (program code, doesn\u0026rsquo;t change) \u0026mdash; do NOT save to swap; executable will always be there so you can just reload from disk program will expect code pages too contain executable data initialized data pages \u0026mdash; save to swap because contents may have changed frorm initial values program expects them to contain data on load, so we need to load them ahead of time unitialized data pages save to swap no initial content Page Type Need Content on First Load Save to Swap (\u0026ldquo;Swap?\u0026rdquo;) code yes no (read from exe) data yes yes stack/heap no yes We only write to disk if its dirty.\nusing swap to get extra memory pick a page to kick out write kicked page to disk mark the old page entry as not present give the physical address to the new virtual page If we ever ask for the old page back, trigger page fault:\npage fault (to recover a page that\u0026rsquo;s on swap)\ncheck with swap for the data get new physical page (perhaps kicking out another page) load data into page update page map as present and with new address choosing what to swap thrashing downside of demand paging\nWhen the pages being actively used don\u0026rsquo;t fit in memory: you will have to get the page, kick it out immediately, get it back again, etc. This basically make memory as fast as disk; which is really slow.\nSolution: download more RAM. \u0026ldquo;buy more memory, or use task manager\u0026rdquo; - nick\npage map A page map, to keep track of something is valid/invalid, we have to store information about EVERY PAGE for EVERY PROCESS.\nEach entry in the page:\nIndex Physical Address Writable Present/Mapped? Last Access Kernel Dirty 0 0x2023 1 0 0 0 0 1 0x0023 1 1 1 0 0 Dirty: the content matters and it needs to be written out.\nThis is, of course, very big if stored densely. Consider 36 bit page numbers, 8 byte entries, it requires \\(2^{36} \\cdot 8 = 512GB\\) worth of space per process. This is sad.\nPRESENT simply means if the segment of memory is MAPPED. ITs possible for a not present index to be in SWAP instead.\npage map tree implementation To resolve this, we have entry for RANGES of virtual pages; there\u0026rsquo;s about \\(4\\) levels. If everything is invalid in a range, we just consider the whole range invalid using one row.\nTherefore, we lazily make space for this tree.\n","html":"\u003col\u003e\n\u003cli\u003euse efficient \u003cstrong\u003epage maps\u003c/strong\u003e too translate virtual to physical addresses\u003c/li\u003e\n\u003cli\u003ekick things off to \u003cstrong\u003edisk\u003c/strong\u003e when memory runs out\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eEvery process has its own \u003ca href=\"#page-map\"\u003epage map\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"demand-paging\"\u003edemand paging\u003c/h2\u003e\n\u003cp\u003eKey idea: physical representation of virtual memory does \u003cstrong\u003enot have to be on actual memory\u003c/strong\u003e.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eif memory fills out, kick a page to disk\u003c/li\u003e\n\u003cli\u003eif the program asks for memory again, kick another page to disk and load its memory back\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eKeep in memory the information that\u0026rsquo;s being \u003cstrong\u003eused\u003c/strong\u003e, kick the rest to \u003ca href=\"/posts/kbhdemand_paging/\"\u003eswap space\u003c/a\u003e/\u0026quot;\u003ca href=\"/posts/kbhdemand_paging/\"\u003epaging file\u003c/a\u003e\u0026quot;. Ideally: we have a performance of main memory and capacity of disk.\u003c/p\u003e\n\u003ch3 id=\"demand-fetching\"\u003edemand fetching\u003c/h3\u003e\n\u003cp\u003emost modern OSes start with \u003cstrong\u003eno pages loaded\u003c/strong\u003e\u0026mdash;load pages only when referenced; this is tempered by the type of page that\u0026rsquo;s needed:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eread only code pages (program code, doesn\u0026rsquo;t change) \u0026mdash;\n\u003cul\u003e\n\u003cli\u003edo \u003cstrong\u003eNOT\u003c/strong\u003e save to swap; executable will always be there so you can just reload from disk\u003c/li\u003e\n\u003cli\u003eprogram will expect code pages too contain executable data\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003einitialized data pages \u0026mdash;\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003esave to swap\u003c/strong\u003e because contents may have changed frorm initial values\u003c/li\u003e\n\u003cli\u003eprogram expects them to contain data on load, so we need to load them ahead of time\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eunitialized data pages\n\u003cul\u003e\n\u003cli\u003esave to swap\u003c/li\u003e\n\u003cli\u003eno initial content\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ePage Type\u003c/th\u003e\n\u003cth\u003eNeed Content on First Load\u003c/th\u003e\n\u003cth\u003eSave to Swap (\u0026ldquo;Swap?\u0026rdquo;)\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003ecode\u003c/td\u003e\n\u003ctd\u003eyes\u003c/td\u003e\n\u003ctd\u003eno (read from exe)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003edata\u003c/td\u003e\n\u003ctd\u003eyes\u003c/td\u003e\n\u003ctd\u003eyes\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003estack/heap\u003c/td\u003e\n\u003ctd\u003eno\u003c/td\u003e\n\u003ctd\u003eyes\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWe only write to disk if its \u003cstrong\u003edirty\u003c/strong\u003e.\u003c/p\u003e\n\u003ch3 id=\"using-swap-to-get-extra-memory\"\u003eusing swap to get extra memory\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003epick a page to kick out\u003c/li\u003e\n\u003cli\u003ewrite kicked page to disk\u003c/li\u003e\n\u003cli\u003emark the old page entry as not present\u003c/li\u003e\n\u003cli\u003egive the physical address to the new virtual page\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eIf we ever ask for the old page back, trigger \u003ca href=\"#page-fault\"\u003epage fault\u003c/a\u003e:\u003c/p\u003e\n\u003ch3 id=\"page-fault\"\u003epage fault\u003c/h3\u003e\n\u003cp\u003e(to recover a page that\u0026rsquo;s on swap)\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003echeck with swap for the data\u003c/li\u003e\n\u003cli\u003eget new physical page (perhaps kicking out another page)\u003c/li\u003e\n\u003cli\u003eload data into page\u003c/li\u003e\n\u003cli\u003eupdate page map as present and with new address\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"choosing-what-to-swap\"\u003echoosing what to swap\u003c/h3\u003e\n\u003ch3 id=\"thrashing\"\u003ethrashing\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003edownside of \u003ca href=\"/posts/kbhdemand_paging/\"\u003edemand paging\u003c/a\u003e\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eWhen the pages being actively used don\u0026rsquo;t fit in memory: you will have to get the page, kick it out immediately, get it back again, etc. This basically make memory as fast as disk; which is really slow.\u003c/p\u003e\n\u003cp\u003eSolution: download more RAM. \u0026ldquo;buy more memory, or use task manager\u0026rdquo; - nick\u003c/p\u003e\n\u003ch2 id=\"page-map\"\u003epage map\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#page-map\"\u003epage map\u003c/a\u003e, to keep track of something is valid/invalid, we have to store information about \u003cstrong\u003eEVERY PAGE\u003c/strong\u003e for \u003cstrong\u003eEVERY PROCESS\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eEach entry in the page:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eIndex\u003c/th\u003e\n\u003cth\u003ePhysical Address\u003c/th\u003e\n\u003cth\u003eWritable\u003c/th\u003e\n\u003cth\u003ePresent/Mapped?\u003c/th\u003e\n\u003cth\u003eLast Access\u003c/th\u003e\n\u003cth\u003eKernel\u003c/th\u003e\n\u003cth\u003eDirty\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0x2023\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0x0023\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\u003cstrong\u003eDirty\u003c/strong\u003e: the content matters and it needs to be written out.\u003c/p\u003e\n\u003cp\u003eThis is, of course, very big if stored densely. Consider 36 bit page numbers, 8 byte entries, it requires \\(2^{36} \\cdot 8 = 512GB\\) worth of space per process. This is sad.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003ePRESENT\u003c/strong\u003e simply means if the segment of memory is \u003cstrong\u003eMAPPED\u003c/strong\u003e. ITs possible for a not present index to be in SWAP instead.\u003c/p\u003e\n\u003ch3 id=\"page-map-tree-implementation\"\u003epage map tree implementation\u003c/h3\u003e\n\u003cp\u003eTo resolve this, we have entry for RANGES of virtual pages; there\u0026rsquo;s about \\(4\\) levels. If everything is invalid in a range, we just consider the whole range invalid using one row.\u003c/p\u003e\n\u003cp\u003eTherefore, we lazily make space for this tree.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdemand_paging/","tags":null,"title":"demand paging"},{"categories":null,"contents":"demand-driven theory hypothesis that the reason why the Great Depression took place was because people were not buying stocks, etc, and there was no demand.\nSee also: Monetarist theory.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdemand_driven_theory/\"\u003edemand-driven theory\u003c/a\u003e hypothesis that the reason why the \u003ca href=\"/posts/kbhgreat_depression/\"\u003eGreat Depression\u003c/a\u003e took place was because people were not buying stocks, etc, and there was no demand.\u003c/p\u003e\n\u003cp\u003eSee also: \u003ca href=\"/posts/kbhmonetarist_theory/\"\u003eMonetarist theory.\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdemand_driven_theory/","tags":null,"title":"demand-driven theory"},{"categories":null,"contents":"DementiaBank is a shared database of multimedia interactions for the study of communication in dementia. There are a few projects being explored for DementiaBank.\nSee also: ADReSS Literature Survey\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdementiabank/\"\u003eDementiaBank\u003c/a\u003e is a shared database of multimedia interactions for the study of communication in dementia. There are a few projects being explored for \u003ca href=\"/posts/kbhdementiabank/\"\u003eDementiaBank\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSee also: \u003ca href=\"/posts/kbhadress_literature_survey/\"\u003eADReSS Literature Survey\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdementiabank/","tags":null,"title":"DementiaBank"},{"categories":null,"contents":"Ideas Can we correlate any longitudinal data with NACC?\nData dementia/English/Lanzi: Alyssa Lanzi\u0026rsquo;s new data\ndementia/English/Delaware\nWhat are the standard for acoustic features?\nMotor cortex/frontal control may also be impacted\nVocal tremer\nWhat are the predictors? How automatic can we make it?\n","html":"\u003ch2 id=\"ideas\"\u003eIdeas\u003c/h2\u003e\n\u003cp\u003eCan we correlate any longitudinal data with \u003ca href=\"/posts/kbhnacc/\"\u003eNACC\u003c/a\u003e?\u003c/p\u003e\n\u003ch2 id=\"data\"\u003eData\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003edementia/English/Lanzi: Alyssa Lanzi\u0026rsquo;s new data\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003edementia/English/Delaware\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWhat are the standard for acoustic features?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eMotor cortex/frontal control may also be impacted\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eVocal tremer\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWhat are the predictors? How automatic can we make it?\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdementiabank_acoustics_brainstoming/","tags":null,"title":"DementiaBank Acoustics Brainstoming"},{"categories":null,"contents":"The DementiaBank Acoustics Project is a working title for an acoustic-only challenge for AD detection. This document serves as the lab notebook for this project.\nThis project will attempt to replicate some of the results of Wang 2019 and Martinc 2021, but focusing on minimizing human involvement; we will first work on raw transcript classification with ERNIE (cutting all CHAT annotations), then introduce pause-encoding in a manner similar to Yuan 2021 which is automated by MFA. Goal is to replicate the results of Yuan 2021/or even Martinc 2021 in a completely automated manner.\nBackground Reading I first began by doing a literature survey on the ADReSS Challenge results published in the Frontiers AD special interest group issue.\nProposal And then, we wrote a proposal: DementiaBank Acoustics Project Proposal\nBrainstoming More notes from the meeting: DementiaBank Acoustics Brainstoming\nProtocol Notes July 1st Began by moving a subsample of Pitt\u0026rsquo;s Cookie Theft to pitt-7-1 in the raw data folder Ran flo on all collected samples. Arguments used are the same as that for batchalign, except we filter out the INV tier as we are detecting AD on patient and not investigator: so flo +d +ca +t* -tINV Moved all collected samples (and changed extension to .txt) to the same sub-folder, but in transcripts_nodisfluency July 2nd Created a dataprep script dataprep.py which dumps a pickled copy of cleaned data to transcripts_nodisfluency/pitt-7-1.dat. Created sliding windows of 5 pieces of dialogue concatenated, stored it in transcripts_nodisfluency/pitt-7-1-windowed.dat Used tencent/HuYong\u0026rsquo;s nghuyong/ernie-2.0-en Ernie 2.0 model, the continuous language model from Baidu (Layer:12, Hidden:768, Heads:12) July 4th Finalized training code. Selected base hyperparameters {bs: 8, epochs: 2, lr: 3e-3, length: 60}. Again, we are using Baidu\u0026rsquo;s nghuyong/ernie-2.0-en. Started training fastcalculator on 24bc812 train: faithful-frog-3 {bs: 8, epochs: 2, lr: 3e-3, length: 60, pitt-7-1-windowed.dat }\nCommentary: LR could be too high, looking at the divergent loss behavior. Decision: dropping bs to 4 and lr to 1e-5, similar to previous transformers. Also training for 3 epochs. train: revived-disco-5 {bs: 4, epochs: 3, lr: 1e-5, length: 60, pitt-7-1-windowed.dat }\nCommentary: quintessential overfitting Decision: Made the corpus bigger cleaned the entire Pitt corpus (pitt-7-4 in the raw folder) to become training data. Similar to pitt-7-1, ran flo on all collected samples; arguments used are the same as that for batchalign, except we filter out the INV tier as we are detecting AD on patient and not investigator: so flo +d +ca +t* -tINV; the flo\u0026rsquo;d results are in transcripts_nodisfluency. the notable difference between the previous dataset 7-1 and the current one 7-4 is that the 7-4 are prepended numbered by the task (cookie/100-01.cha \u0026gt; =cookie-100-01.txt) New (full) Pitt data as prepared above is ran though the dataprep script as of b325514cfad79da82d7a519ed29ea19ed87b2be4 (difference is that empty/dummy files are ignored), and pickled at transcripts_nodisfluency/pitt-7-4.dat and transcripts_nodisfluency/pitt-7-4-windowed.dat respectively. For new data, window size is still 5, splitting 10 cases out for testing now instead of 5. train: vocal-oath-6 {bs: 4, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed.dat}\nCommentary: high recall, low precision. Perhaps classes aren\u0026rsquo;t balanced? Spoiler alert: they are not. An inspection of data reveals that there is 3211 rows of dementia, 2397 rows of control Decision: Created pitt-7-4-bal and pitt-7-4-windowed-bal series of data based on dataprep.py on 703f79248a20fd7a13a5033ca2bf7f691f42c941. This version force-crops to make sure that the dementia and control indicies have the exact same length for each class. train: helpful-leaf-7 {bs: 4, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed-bal.dat}\nBeautiful. Question now is whether or not there is data leakage/external heuristics. It is a good time to do some LOOCV. Getting this result without any disfluency calculations seems unlikely.\nBut anyways, going to discuss these results as they seem to meet results we see in Yuan 2021, even without top-N ensemble; though this is one trial, LOOCV may still show that we actually need it.\nJuly 5th Began the day with creating the script k-fold validation; I originally hoped to exactly replicate the procedure of Yuan 2021 for comparability, but, not sure how they got the actual result of a min/max range with LOOCV on binary; therefore, we will instead create a 95% confidence interval analysis via a single-variable t test on standard k-fold validation. K=50 During one-off testing, another set of hyperparameters seems to work too: {bs: 72, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed-bal.dat}. As we have not begun tuning for hyperparameters, we are just going to use this set, K=50, for the first k-fold trial. k-fold: F4ZVbGfdBAQvtvXemWZCZD code: 55f77ff1dea03c3ed66967864dc52fd2c0062f23\n{bs: 72, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed-bal.dat} K = 50\nIt seems like the results we got is consistent and validates in a manner which we expect.\nJuly 7th Yesterday was a day filled with working on batchalign, but we are back now. Today, I aim to look into the heuristic that I identified yesterday by playing with the model, which is that it seems like the model prefers the use of long-focused sentences about cookies, so the heruistic its picking up is probably on-topicness.\nI am going to first leverage the lovely cdpierse/transformers-interpret tool to help build some explainability by adding it to validate.py. Upon some human validation with random sampling, the model seem to do less well than I\u0026rsquo;d hoped. Running a train cycle with the new results/params seen above to see if it does better.\ntrain: brisk-oath-10 {bs: 72, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed-bal.dat}\nCommentary: It seems like the model is doing overall worse from validation data, but it does fairly well during test data. Decision: I can fairly confidently claim that the model is just fitting on topic. As in, if the topic is about cookies (theft/taking/cookie/mother/etc.), it will be classified as control. One thing that we can do is to claim this task as directly task-controlled: that is, include no data except cookie and control for that difference Then, the model would\u0026rsquo;t be able to predict the result b/c the variation in topic won\u0026rsquo;t have influence. This is going to be prepared in the cookiepitt-7-7-bal* based on dataprep.py in commit 518dec82bb961c0a8ad02e3080289b56102aa1a2 train: super-durian-11 {bs: 72, epochs: 3, lr: 1e-5, length: 60, cookiepitt-7-7-windowed-bal.dat}\nCommentary: the model is no where near convergence Decision: multiplying the LR by 10 train: floral-sunset-12 {bs: 72, epochs: 3, lr: 1e-4, length: 60, cookiepitt-7-7-windowed-bal.dat}\nCommentary: There we go. This seem to be more in line with what we see in Yuan 2021 Decision: ok, let\u0026rsquo;s elongate the actual content. Perhaps we can try a 7-element search instead? This is written as cookiepitt-7-7-*-long. Code based on 9e31f4bc13c4bfe193dcc049059c3d9bda46c8d0 train: sweet-plasma-13 {bs: 72, epochs: 3, lr: 1e-4, length: 60, cookiepitt-7-7-windowed-long-bal.dat}\nCommentary: underfitting Dropping batch size down to 64 to add more steps train: smart-river-14 {bs: 64, epochs: 3, lr: 1e-4, length: 60, cookiepitt-7-7-windowed-long-bal.dat}\nCommentary: this finally fits to the specifications which Yuan 2021 have revealed Decision: running k-fold on this architecture k-fold: XgsP4FVS6ScFxCZKFJoVQ5. Code: 3870651ba71da8ddb3f481a7c3e046397a09d8b2\nJuly 8th Began the day with aligning the entirety of cookie for both control and dementia, named the dataset alignedpitt-7-8 in the RAW folder\nPer what we discussed, will add [pause] as a token to the model. Then, transcript the text such that it would contain normalized values to the pauses for pauses \u0026gt; 0.250 seconds. Therefore, the data would look like\n\u0026ldquo;hello my name is [pause] 262 [pause] bob\u0026rdquo;\nJuly 9th Created transcript.py, which coverts the data in raw to transcripts_pauses, which contains pause values \u0026gt; 250 msc and prepends them with [pause] tokens The code from above is taken from check.py in batchalign, used transcript.py from 7e19a4912cf0ad5d269c139da5ce018615495ebb to clean out the dataset; placed it in similar txt format to alignedpitt-7-8 Ran dataprep with window size of 5, created alignedpitt-7-8.bat and alignedpitt-7-8-windowed.bat as the dataprep file starting a new training run, with [pause] added as a new token, code 06846c6c95e6b1ccf17f0660c5da76aa50231567 train: golden-tree-16 {bs: 64, epochs: 3, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}\nSo realistically, we have the same F1 between the two, but pause encoding increased the accuracy of prediction yet dropped recall dramatically.\nAs a random check, let\u0026rsquo;s find out if simple fine-tuning (only training on classifier) would work, so:\ntrain: jumping-blaze-17 {bs: 64, epochs: 3, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}. This time with only training classifier.\nCommentary: we did not like. start coverging Bumping LR by a factor of 10 train: vital-water-18 {bs: 64, epochs: 3, lr: 1e-3, length: 60, alignedpitt-7-8-windowed.dat}. This time with only training classifier.\nCommentary: barely started converging, seem to be a local Training for 2 more epochs train: fiery-smoke-19 {bs: 64, epochs: 5, lr: 1e-3, length: 60, alignedpitt-7-8-windowed.dat}. This time with only training classifier.\nCommentary: classic overfitting At this point, unlocking the model would probably be a good bet\ntrain: leafy-deluge-20 {bs: 64, epochs: 5, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}.\nTraining once again with code without locking, and bump LR down\nCommentary: classic the recall is slowly creeping up Decision: let\u0026rsquo;s go for 8 epochs train: royal-pond-21 {bs: 64, epochs: 8, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}.\nCommentary: let\u0026rsquo;s run k-fold now, with these settings.\nk-fold: QskZWfEsML52ofcQgGujE2. {bs: 64, epochs: 8, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}.\nOk, the base hypothesis from Yuan 2021 is very much confirmed here. The same training, same content, but pause encoding is very beneficial to the quality of the results. The results that they reported contained an ensemble data, which is in the high 80s; we can now continue doing something new as Yuan 2021\u0026rsquo;s conclusion is fairly achieved.\nWe can probably call the replication stage done, with no dramatically better effect.\nJuly 10th FluCalc! Leonid\u0026rsquo;s lovely new program can be an uberuseful feature extraction tool Let\u0026rsquo;s try using to build a new dataset, and network. FluCalc + Pause Encoding + Textual Data late fusion This is becoming alignedpitt-7-8-flucalc. As the program is currently under heavy development to include results from batchalign, we will specify version V 09-Jul-2022 11:00 for now. Done, the new data has the same i/o shape, but then has a bunch of features filtered for nulls which contains outputs from flucalc. Again, alignedpitt-7-8-flucalc from 4346fc07c4707343c507e32786b6769b6bd6fb49 does not take into account results from the %wor tier! July 11th ab19abd6486884141c9ab4e4e185255a77ae833e is the final-ish version of the late fusion model We are going to use alignedpitt-7-8-flucalc to start training train: royal-pond-21 {bs: 64, epochs: 8, lr: 1e-4, length: 60, alignedpitt-7-8-flucalc-windowed.dat}.\nCommentary: overfitting Decision, droping lr by a factor of 10, also increasing length to 70 train: fallen-dust-25 {bs: 64, epochs: 8, lr: 1e-5, length: 70, alignedpitt-7-8-flucalc-windowed.dat}.\nCommentary: overfitting Decision, droping lr by a factor of 10, dropping batch size to 32, training more to 10 train: dainty-meadow-26 {bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}.\nah\nAt this point, I think it\u0026rsquo;d be good to do some feature selection Let\u0026rsquo;s do a chi^2 correlation, and select 3 best features import pandas as pd DATA = \u0026#34;/Users/houliu/Documents/Projects/DBC/data/transcripts_pauses/alignedpitt-7-8-flucalc-windowed.bat\u0026#34; # read pickle df = pd.read_pickle(DATA) # test test_data = df[df.split==\u0026#34;test\u0026#34;] # also, get only train data df = df[df.split==\u0026#34;train\u0026#34;] df target mor_Utts ... split utterance trial sample ... 120-2 1049 1 -0.179084 ... train well the boy is getting some cookies handing o... 336-1 2492 0 -0.481740 ... train +oh okay, the the little girl askin(g) for the... 076-4 786 1 -0.179084 ... train well the little boy was looking at that cookie... 279-0 2250 1 1.980274 ... train kid\u0026#39;s stool turnin(g) [pause]540[pause] over s... 014-2 151 1 0.746355 ... train he\u0026#39;s fallin(g) off the chair down here or try... ... ... ... ... ... ... 208-0 1655 0 -0.481740 ... train the boy [pause]920[pause] is going after [paus... 492-0 2696 1 -0.179084 ... train oh yes quite a_lot the kid\u0026#39;s tryin(g) to get t... 497-1 2727 1 0.129396 ... train what else ? \u0026amp;uh the see the [pause]2400[pause]... 175-2 1535 0 0.863668 ... train the window is open you can see out the curtain... 279-0 2261 1 1.980274 ... train the other kid with [pause]610[pause] the stool... [2848 rows x 44 columns] Let\u0026rsquo;s slice out the bits which is labels, etc.\nin_data = df.drop(columns=[\u0026#34;utterance\u0026#34;, \u0026#34;target\u0026#34;, \u0026#34;split\u0026#34;]) in_data.columns Index([\u0026#39;mor_Utts\u0026#39;, \u0026#39;mor_Words\u0026#39;, \u0026#39;mor_syllables\u0026#39;, \u0026#39;#_Prolongation\u0026#39;, \u0026#39;%_Prolongation\u0026#39;, \u0026#39;#_Broken_word\u0026#39;, \u0026#39;%_Broken_word\u0026#39;, \u0026#39;#_Block\u0026#39;, \u0026#39;%_Block\u0026#39;, \u0026#39;#_PWR\u0026#39;, \u0026#39;%_PWR\u0026#39;, \u0026#39;#_PWR-RU\u0026#39;, \u0026#39;%_PWR-RU\u0026#39;, \u0026#39;#_WWR\u0026#39;, \u0026#39;%_WWR\u0026#39;, \u0026#39;#_mono-WWR\u0026#39;, \u0026#39;%_mono-WWR\u0026#39;, \u0026#39;#_WWR-RU\u0026#39;, \u0026#39;%_WWR-RU\u0026#39;, \u0026#39;#_mono-WWR-RU\u0026#39;, \u0026#39;%_mono-WWR-RU\u0026#39;, \u0026#39;Mean_RU\u0026#39;, \u0026#39;#_Phonological_fragment\u0026#39;, \u0026#39;%_Phonological_fragment\u0026#39;, \u0026#39;#_Phrase_repetitions\u0026#39;, \u0026#39;%_Phrase_repetitions\u0026#39;, \u0026#39;#_Word_revisions\u0026#39;, \u0026#39;%_Word_revisions\u0026#39;, \u0026#39;#_Phrase_revisions\u0026#39;, \u0026#39;%_Phrase_revisions\u0026#39;, \u0026#39;#_Pauses\u0026#39;, \u0026#39;%_Pauses\u0026#39;, \u0026#39;#_Filled_pauses\u0026#39;, \u0026#39;%_Filled_pauses\u0026#39;, \u0026#39;#_TD\u0026#39;, \u0026#39;%_TD\u0026#39;, \u0026#39;#_SLD\u0026#39;, \u0026#39;%_SLD\u0026#39;, \u0026#39;#_Total_(SLD+TD)\u0026#39;, \u0026#39;%_Total_(SLD+TD)\u0026#39;, \u0026#39;Weighted_SLD\u0026#39;], dtype=\u0026#39;object\u0026#39;) And the labels:\nout_data = df[\u0026#34;target\u0026#34;] out_data trial sample 120-2 1049 1 336-1 2492 0 076-4 786 1 279-0 2250 1 014-2 151 1 .. 208-0 1655 0 492-0 2696 1 497-1 2727 1 175-2 1535 0 279-0 2261 1 Name: target, Length: 2848, dtype: int64 And now, let\u0026rsquo;s select 3 best features.\nfrom sklearn.feature_selection import SelectKBest, f_classif k_best_tool = SelectKBest(f_classif, k=3) k_best_tool.fit(in_data, out_data) best_features = k_best_tool.get_feature_names_out() best_features %_WWR %_mono-WWR %Total(SLD+TD) OD = other disfluencies; SLD = stuttering-like disfluencies; TD = total disfluencies; WWR = whole-word-repetition\nok, let\u0026rsquo;s select those features\ntrain: visionary-plasma-27 {bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}. Also with feature selection.\nhmmm.\nI am curious if we just ran something like a decision tree, what happens.\nin_features = df.drop(columns=[\u0026#34;utterance\u0026#34;, \u0026#34;target\u0026#34;, \u0026#34;split\u0026#34;]) test_features = test_data.drop(columns=[\u0026#34;utterance\u0026#34;, \u0026#34;target\u0026#34;, \u0026#34;split\u0026#34;]) in_targets = df[\u0026#34;target\u0026#34;] test_targets = test_data[\u0026#34;target\u0026#34;] seed the classifier, and fit.\nfrom sklearn.ensemble import RandomForestClassifier clsf = RandomForestClassifier() clsf.fit(in_features, in_targets) clsf.score(test_features, test_targets) 0.5932203389830508 OK nevermind. What about SVC?\nfrom sklearn.svm import SVC clsf = SVC() clsf.fit(in_features, in_targets) clsf.score(test_features, test_targets) 0.5932203389830508 Turns out, deep learning still does better. I\u0026rsquo;m thinking maybe the output is being faulty, say, for something like the loss function.\nDecision: switching activation to sigmoid.\ntrain: sunny-bush-31 {bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}, selected features\nOk let\u0026rsquo;s think about this. Decision: added batch normalization.\ntrain: autumn-jazz-32 {bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}, selected features\nThe model maybe overfitting on some simple heuristic; some basic statistics revealed that these variables are actually quite differently distributed.\nPerhaps we should increase the complexity of the model?\ntrain: fallen-microwave-33 {bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}, selected features\nJust to test, I am bumping the LR to 1e-5, just to see what happens. I am very confused.\ntrain: upbeat-flower-35 {bs: 32, epochs: 10, lr: 1e-5, length: 70, alignedpitt-7-8-flucalc-windowed.dat}, selected features\nThe more we work on this, the more overfit it gets. (I FORGOT A RELUCTIFIER)\na note {bs: 32, epochs: 10, lr: 1e-5, length: 70, alignedpitt-7-11-flucalc-windowed.dat}, selected features\nPauses, no meta:\nPauses, meta:\nso effectively cointoss\nConcerns and Questions July 2nd pitt7-1/dementia/493-0 PAR tier \u0026ldquo;tell me everything you see going on in that picture\u0026rdquo; doesn\u0026rsquo;t seem to be labeled correctly; I am guessing that\u0026rsquo;s supposed to be INV? Has anyone tried to include investigator/participant cross-dialogue? July 4th Is the model overfitting on antiquated language? Is the model overfitting on cooke-theft on-topic-ness? July 11th LSTM only on pauses? ","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhdementiabank_acoustics_project/\"\u003eDementiaBank Acoustics Project\u003c/a\u003e is a working title for an acoustic-only challenge for AD detection. This document serves as the lab notebook for this project.\u003c/p\u003e\n\u003cp\u003eThis project will attempt to replicate some of the results of \u003ca href=\"/posts/kbhwang_2019/\"\u003eWang 2019\u003c/a\u003e and \u003ca href=\"/posts/kbhmartinc_2021/\"\u003eMartinc 2021\u003c/a\u003e, but focusing on minimizing human involvement; we will first work on raw transcript classification with ERNIE (cutting all CHAT annotations), then introduce pause-encoding in a manner similar to \u003ca href=\"/posts/kbhyuan_2021/\"\u003eYuan 2021\u003c/a\u003e which is automated by MFA. Goal is to replicate the results of \u003ca href=\"/posts/kbhyuan_2021/\"\u003eYuan 2021\u003c/a\u003e/or even \u003ca href=\"/posts/kbhmartinc_2021/\"\u003eMartinc 2021\u003c/a\u003e in a completely automated manner.\u003c/p\u003e\n\u003ch2 id=\"background-reading\"\u003eBackground Reading\u003c/h2\u003e\n\u003cp\u003eI first began by doing a literature survey on the \u003ca href=\"/posts/kbhadress_literature_survey/\"\u003eADReSS Challenge\u003c/a\u003e results published in the Frontiers AD special interest group issue.\u003c/p\u003e\n\u003ch2 id=\"proposal\"\u003eProposal\u003c/h2\u003e\n\u003cp\u003eAnd then, we wrote a proposal: \u003ca href=\"/posts/kbhdementiabank_acoustics_project_proposal/\"\u003eDementiaBank Acoustics Project Proposal\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"brainstoming\"\u003eBrainstoming\u003c/h2\u003e\n\u003cp\u003eMore notes from the meeting: \u003ca href=\"/posts/kbhdementiabank_acoustics_brainstoming/\"\u003eDementiaBank Acoustics Brainstoming\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"protocol-notes\"\u003eProtocol Notes\u003c/h2\u003e\n\u003ch3 id=\"july-1st\"\u003eJuly 1st\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eBegan by moving a subsample of \u003ca href=\"https://dementia.talkbank.org/access/English/Pitt.html\"\u003ePitt\u003c/a\u003e\u0026rsquo;s \u003ca href=\"/posts/kbhctp/\"\u003eCookie Theft\u003c/a\u003e to \u003ccode\u003epitt-7-1\u003c/code\u003e in the \u003ccode\u003eraw\u003c/code\u003e data folder\u003c/li\u003e\n\u003cli\u003eRan \u003ccode\u003eflo\u003c/code\u003e on all collected samples. Arguments used are the same as that for \u003ca href=\"/posts/kbhbatchalign/\"\u003ebatchalign\u003c/a\u003e, except \u003cem\u003ewe filter out the \u003ccode\u003eINV\u003c/code\u003e tier\u003c/em\u003e as we are detecting AD on patient and not investigator: so \u003ccode\u003eflo +d +ca +t* -tINV\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eMoved all collected samples (and changed extension to .txt) to the same sub-folder, but in \u003ccode\u003etranscripts_nodisfluency\u003c/code\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"july-2nd\"\u003eJuly 2nd\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCreated a dataprep script \u003ccode\u003edataprep.py\u003c/code\u003e which dumps a pickled copy of cleaned data to \u003ccode\u003etranscripts_nodisfluency/pitt-7-1.dat\u003c/code\u003e.\u003c/li\u003e\n\u003cli\u003eCreated sliding windows of 5 pieces of dialogue concatenated, stored it in \u003ccode\u003etranscripts_nodisfluency/pitt-7-1-windowed.dat\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eUsed tencent/HuYong\u0026rsquo;s \u003ccode\u003enghuyong/ernie-2.0-en\u003c/code\u003e Ernie 2.0 model, the continuous language model from Baidu (Layer:12, Hidden:768, Heads:12)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"july-4th\"\u003eJuly 4th\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eFinalized training code. Selected base hyperparameters {bs: 8, epochs: 2, lr: 3e-3, length: 60}. Again, we are using Baidu\u0026rsquo;s \u003ccode\u003enghuyong/ernie-2.0-en\u003c/code\u003e.\u003c/li\u003e\n\u003cli\u003eStarted training fastcalculator on \u003ccode\u003e24bc812\u003c/code\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-faithful-frog-3\"\u003etrain: faithful-frog-3\u003c/h4\u003e\n\u003cp\u003e{bs: 8, epochs: 2, lr: 3e-3, length: 60, pitt-7-1-windowed.dat }\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-04_19-20-13_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: LR could be too high, looking at the divergent loss behavior.\u003c/li\u003e\n\u003cli\u003eDecision: dropping bs to \u003ccode\u003e4\u003c/code\u003e and lr to \u003ccode\u003e1e-5\u003c/code\u003e, similar to previous transformers. Also training for 3 epochs.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-revived-disco-5\"\u003etrain: revived-disco-5\u003c/h4\u003e\n\u003cp\u003e{bs: 4, epochs: 3, lr: 1e-5, length: 60, pitt-7-1-windowed.dat }\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-04_19-28-07_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: quintessential overfitting\u003c/li\u003e\n\u003cli\u003eDecision:\n\u003cul\u003e\n\u003cli\u003eMade the corpus bigger\n\u003cul\u003e\n\u003cli\u003ecleaned the entire \u003ca href=\"https://dementia.talkbank.org/access/English/Pitt.html\"\u003ePitt\u003c/a\u003e corpus (\u003ccode\u003epitt-7-4\u003c/code\u003e in the \u003ccode\u003eraw\u003c/code\u003e folder) to become training data. Similar to \u003ccode\u003epitt-7-1\u003c/code\u003e, ran \u003ccode\u003eflo\u003c/code\u003e on all collected samples; arguments used are the same as that for \u003ca href=\"/posts/kbhbatchalign/\"\u003ebatchalign\u003c/a\u003e, except \u003cem\u003ewe filter out the \u003ccode\u003eINV\u003c/code\u003e tier\u003c/em\u003e as we are detecting AD on patient and not investigator: so \u003ccode\u003eflo +d +ca +t* -tINV\u003c/code\u003e; the \u003ccode\u003eflo\u003c/code\u003e\u0026rsquo;d results are in \u003ccode\u003etranscripts_nodisfluency\u003c/code\u003e.\u003c/li\u003e\n\u003cli\u003ethe notable difference between the previous dataset \u003ccode\u003e7-1\u003c/code\u003e and the current one \u003ccode\u003e7-4\u003c/code\u003e is that the \u003ccode\u003e7-4\u003c/code\u003e are prepended numbered by the task (\u003ccode\u003ecookie/100-01.cha\u003c/code\u003e \u003ccode\u003e\u0026gt; =cookie-100-01.txt\u003c/code\u003e)\u003c/li\u003e\n\u003cli\u003eNew (full) Pitt data as prepared above is ran though the dataprep script as of \u003ccode\u003eb325514cfad79da82d7a519ed29ea19ed87b2be4\u003c/code\u003e (difference is that empty/dummy files are ignored), and pickled at \u003ccode\u003etranscripts_nodisfluency/pitt-7-4.dat\u003c/code\u003e and \u003ccode\u003etranscripts_nodisfluency/pitt-7-4-windowed.dat\u003c/code\u003e respectively.\u003c/li\u003e\n\u003cli\u003eFor new data, window size is still \u003ccode\u003e5\u003c/code\u003e, splitting \u003ccode\u003e10\u003c/code\u003e cases out for testing now instead of \u003ccode\u003e5\u003c/code\u003e.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-vocal-oath-6\"\u003etrain: vocal-oath-6\u003c/h4\u003e\n\u003cp\u003e{bs: 4, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed.dat}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-04_20-20-01_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-04_20-35-54_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: high recall, low precision. Perhaps classes aren\u0026rsquo;t balanced?\n\u003cul\u003e\n\u003cli\u003eSpoiler alert: they are not.\u003c/li\u003e\n\u003cli\u003eAn inspection of data reveals that there is 3211 rows of dementia, 2397 rows of control\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eDecision:\n\u003cul\u003e\n\u003cli\u003eCreated \u003ccode\u003epitt-7-4-bal\u003c/code\u003e and \u003ccode\u003epitt-7-4-windowed-bal\u003c/code\u003e series of data based on dataprep.py on \u003ccode\u003e703f79248a20fd7a13a5033ca2bf7f691f42c941\u003c/code\u003e. This version force-crops to make sure that the dementia and control indicies have the exact same length for each class.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-helpful-leaf-7\"\u003etrain: helpful-leaf-7\u003c/h4\u003e\n\u003cp\u003e{bs: 4, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed-bal.dat}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-04_21-31-19_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-04_21-35-43_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eBeautiful. Question now is whether or not there is data leakage/external heuristics. It is a good time to do some \u003ca href=\"/posts/kbhloo/\"\u003eLOOCV\u003c/a\u003e. Getting this result without any disfluency calculations seems unlikely.\u003c/p\u003e\n\u003cp\u003eBut anyways, going to discuss these results as they seem to meet results we see in \u003ca href=\"/posts/kbhyuan_2021/\"\u003eYuan 2021\u003c/a\u003e, even without top-N ensemble; though this is one trial, \u003ca href=\"/posts/kbhloo/\"\u003eLOOCV\u003c/a\u003e may still show that we actually need it.\u003c/p\u003e\n\u003ch3 id=\"july-5th\"\u003eJuly 5th\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eBegan the day with creating the script k-fold validation; I originally hoped to exactly replicate the procedure of \u003ca href=\"/posts/kbhyuan_2021/\"\u003eYuan 2021\u003c/a\u003e for comparability, but, not sure how they got the actual result of a min/max range with \u003ca href=\"/posts/kbhloo/\"\u003eLOOCV\u003c/a\u003e on binary; therefore, we will instead create a 95% \u003ca href=\"/posts/kbhconfidence_interval/\"\u003econfidence interval\u003c/a\u003e analysis via a single-variable \u003ca href=\"/posts/kbht_statistics/\"\u003et test\u003c/a\u003e on standard k-fold validation. K=50\u003c/li\u003e\n\u003cli\u003eDuring one-off testing, another set of hyperparameters seems to work too: {bs: 72, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed-bal.dat}. As we have not begun tuning for hyperparameters, we are just going to use this set, K=50, for the first k-fold trial.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"k-fold-f4zvbgfdbaqvtvxemwzczd\"\u003ek-fold: F4ZVbGfdBAQvtvXemWZCZD\u003c/h4\u003e\n\u003cp\u003ecode: 55f77ff1dea03c3ed66967864dc52fd2c0062f23\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-05_13-22-24_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e{bs: 72, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed-bal.dat}\nK = 50\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-05_14-25-26_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-05_14-26-00_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eIt seems like the results we got is consistent and validates in a manner which we expect.\u003c/p\u003e\n\u003ch3 id=\"july-7th\"\u003eJuly 7th\u003c/h3\u003e\n\u003cp\u003eYesterday was a day filled with working on \u003ca href=\"/posts/kbhbatchalign/\"\u003ebatchalign\u003c/a\u003e, but we are back now. Today, I aim to look into the heuristic that I identified yesterday by playing with the model, which is that it seems like the model prefers the use of long-focused sentences \u003cem\u003eabout\u003c/em\u003e cookies, so the heruistic its picking up is probably on-topicness.\u003c/p\u003e\n\u003cp\u003eI am going to first leverage the lovely \u003ccode\u003ecdpierse/transformers-interpret\u003c/code\u003e tool to help build some explainability by adding it to validate.py. Upon some human validation with random sampling, the model seem to do less well than I\u0026rsquo;d hoped. Running a train cycle with the new results/params seen above to see if it does better.\u003c/p\u003e\n\u003ch4 id=\"train-brisk-oath-10\"\u003etrain: brisk-oath-10\u003c/h4\u003e\n\u003cp\u003e{bs: 72, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed-bal.dat}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_11-39-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_11-48-40_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: It seems like the model is doing overall worse from validation data, but it does fairly well during test data.\u003c/li\u003e\n\u003cli\u003eDecision:\n\u003cul\u003e\n\u003cli\u003eI can fairly confidently claim that the model is just fitting on topic. As in, if the topic is about cookies (theft/taking/cookie/mother/etc.), it will be classified as control.\u003c/li\u003e\n\u003cli\u003eOne thing that we can do is to claim this task as directly task-controlled: that is, include \u003cstrong\u003eno\u003c/strong\u003e data except cookie and control for that difference\u003c/li\u003e\n\u003cli\u003eThen, the model would\u0026rsquo;t be able to predict the result b/c the variation in topic won\u0026rsquo;t have influence.\u003c/li\u003e\n\u003cli\u003eThis is going to be prepared in the \u003ccode\u003ecookiepitt-7-7-bal*\u003c/code\u003e based on \u003ccode\u003edataprep.py\u003c/code\u003e in commit \u003ccode\u003e518dec82bb961c0a8ad02e3080289b56102aa1a2\u003c/code\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-super-durian-11\"\u003etrain: super-durian-11\u003c/h4\u003e\n\u003cp\u003e{bs: 72, epochs: 3, lr: 1e-5, length: 60, cookiepitt-7-7-windowed-bal.dat}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_13-51-01_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: the model is \u003cem\u003eno where near convergence\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003eDecision: multiplying the LR by 10\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-floral-sunset-12\"\u003etrain: floral-sunset-12\u003c/h4\u003e\n\u003cp\u003e{bs: 72, epochs: 3, lr: 1e-4, length: 60, cookiepitt-7-7-windowed-bal.dat}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_13-54-38_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_14-02-47_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: There we go. This seem to be more in line with what we see in \u003ca href=\"/posts/kbhyuan_2021/\"\u003eYuan 2021\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eDecision: ok, let\u0026rsquo;s elongate the actual content. Perhaps we can try a 7-element search instead? This is written as \u003ccode\u003ecookiepitt-7-7-*-long\u003c/code\u003e. Code based on \u003ccode\u003e9e31f4bc13c4bfe193dcc049059c3d9bda46c8d0\u003c/code\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-sweet-plasma-13\"\u003etrain: sweet-plasma-13\u003c/h4\u003e\n\u003cp\u003e{bs: 72, epochs: 3, lr: 1e-4, length: 60, cookiepitt-7-7-windowed-long-bal.dat}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_15-05-28_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: underfitting\u003c/li\u003e\n\u003cli\u003eDropping batch size down to 64 to add more steps\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-smart-river-14\"\u003etrain: smart-river-14\u003c/h4\u003e\n\u003cp\u003e{bs: 64, epochs: 3, lr: 1e-4, length: 60, cookiepitt-7-7-windowed-long-bal.dat}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_15-13-21_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_15-20-57_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: this finally fits to the specifications which \u003ca href=\"/posts/kbhyuan_2021/\"\u003eYuan 2021\u003c/a\u003e have revealed\u003c/li\u003e\n\u003cli\u003eDecision: running k-fold on this architecture\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"k-fold-xgsp4fvs6scfxczkfjovq5-dot\"\u003ek-fold: XgsP4FVS6ScFxCZKFJoVQ5.\u003c/h4\u003e\n\u003cp\u003eCode: 3870651ba71da8ddb3f481a7c3e046397a09d8b2\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_15-30-07_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_16-18-44_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_16-20-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"july-8th\"\u003eJuly 8th\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eBegan the day with aligning the entirety of cookie for both control and dementia, named the dataset \u003ccode\u003ealignedpitt-7-8\u003c/code\u003e in the RAW folder\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ePer what we discussed, will add [pause] as a token to the model. Then, transcript the text such that it would contain normalized values to the pauses for pauses \u0026gt; 0.250 seconds. Therefore, the data would look like\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;hello my name is [pause] 262 [pause] bob\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"july-9th\"\u003eJuly 9th\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCreated transcript.py, which coverts the data in \u003ccode\u003eraw\u003c/code\u003e to \u003ccode\u003etranscripts_pauses\u003c/code\u003e, which contains pause values \u0026gt; 250 msc and prepends them with [pause] tokens\u003c/li\u003e\n\u003cli\u003eThe code from above is taken from \u003ccode\u003echeck.py\u003c/code\u003e in \u003ca href=\"/posts/kbhbatchalign/\"\u003ebatchalign\u003c/a\u003e, used \u003ccode\u003etranscript.py\u003c/code\u003e from \u003ccode\u003e7e19a4912cf0ad5d269c139da5ce018615495ebb\u003c/code\u003e to clean out the dataset; placed it in similar txt format to \u003ccode\u003ealignedpitt-7-8\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eRan dataprep with window size of 5, created \u003ccode\u003ealignedpitt-7-8.bat\u003c/code\u003e and \u003ccode\u003ealignedpitt-7-8-windowed.bat\u003c/code\u003e as the dataprep file\u003c/li\u003e\n\u003cli\u003estarting a new training run, with \u003ccode\u003e[pause]\u003c/code\u003e added as a new token, code \u003ccode\u003e06846c6c95e6b1ccf17f0660c5da76aa50231567\u003c/code\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-golden-tree-16\"\u003etrain: golden-tree-16\u003c/h4\u003e\n\u003cp\u003e{bs: 64, epochs: 3, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_11-48-01_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_11-51-24_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSo realistically, we have the same F1 between the two, but pause encoding increased the accuracy of prediction yet dropped recall dramatically.\u003c/p\u003e\n\u003cp\u003eAs a random check, let\u0026rsquo;s find out if simple fine-tuning (only training on classifier) would work, so:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_12-07-31_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch4 id=\"train-jumping-blaze-17\"\u003etrain: jumping-blaze-17\u003c/h4\u003e\n\u003cp\u003e{bs: 64, epochs: 3, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}. This time with only training classifier.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_12-09-22_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: we did not like. start coverging\u003c/li\u003e\n\u003cli\u003eBumping LR by a factor of 10\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-vital-water-18\"\u003etrain: vital-water-18\u003c/h4\u003e\n\u003cp\u003e{bs: 64, epochs: 3, lr: 1e-3, length: 60, alignedpitt-7-8-windowed.dat}. This time with only training classifier.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_12-11-20_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: barely started converging, seem to be a local\u003c/li\u003e\n\u003cli\u003eTraining for 2 more epochs\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-fiery-smoke-19\"\u003etrain: fiery-smoke-19\u003c/h4\u003e\n\u003cp\u003e{bs: 64, epochs: 5, lr: 1e-3, length: 60, alignedpitt-7-8-windowed.dat}. This time with only training classifier.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_12-14-14_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: classic overfitting\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAt this point, unlocking the model would probably be a good bet\u003c/p\u003e\n\u003ch4 id=\"train-leafy-deluge-20\"\u003etrain: leafy-deluge-20\u003c/h4\u003e\n\u003cp\u003e{bs: 64, epochs: 5, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}.\u003c/p\u003e\n\u003cp\u003eTraining once again with code without locking, and bump LR down\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_13-14-39_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_13-17-36_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: classic the recall is slowly creeping up\u003c/li\u003e\n\u003cli\u003eDecision: let\u0026rsquo;s go for 8 epochs\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-royal-pond-21\"\u003etrain: royal-pond-21\u003c/h4\u003e\n\u003cp\u003e{bs: 64, epochs: 8, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_13-22-40_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_13-24-01_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eCommentary: let\u0026rsquo;s run k-fold now, with these settings.\u003c/p\u003e\n\u003ch4 id=\"k-fold-qskzwfesml52ofcqgguje2-dot\"\u003ek-fold: QskZWfEsML52ofcQgGujE2.\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_14-06-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e{bs: 64, epochs: 8, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_16-08-09_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_16-08-31_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eOk, the base hypothesis from \u003ca href=\"/posts/kbhyuan_2021/\"\u003eYuan 2021\u003c/a\u003e is very much confirmed here. The same training, same content, but pause encoding is very beneficial to the quality of the results. The results that they reported contained an ensemble data, which is in the high 80s; we can now continue doing something new as \u003ca href=\"/posts/kbhyuan_2021/\"\u003eYuan 2021\u003c/a\u003e\u0026rsquo;s conclusion is fairly achieved.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_16-15-07_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_18-26-57_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWe can probably call the replication stage done, with no dramatically better effect.\u003c/p\u003e\n\u003ch3 id=\"july-10th\"\u003eJuly 10th\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eFluCalc! Leonid\u0026rsquo;s lovely new program can be an uberuseful feature extraction tool\u003c/li\u003e\n\u003cli\u003eLet\u0026rsquo;s try using to build a new dataset, and network. FluCalc + Pause Encoding + Textual Data \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eThis is becoming \u003ccode\u003ealignedpitt-7-8-flucalc\u003c/code\u003e. As the program is currently under heavy development to include results from \u003ca href=\"/posts/kbhbatchalign/\"\u003ebatchalign\u003c/a\u003e, we will specify version \u003ccode\u003eV 09-Jul-2022 11:00\u003c/code\u003e for now.\u003c/li\u003e\n\u003cli\u003eDone, the new data has the same i/o shape, but then has a bunch of features filtered for nulls which contains outputs from flucalc. Again, \u003ccode\u003ealignedpitt-7-8-flucalc\u003c/code\u003e from \u003ccode\u003e4346fc07c4707343c507e32786b6769b6bd6fb49\u003c/code\u003e does not take into account results from the \u003ccode\u003e%wor\u003c/code\u003e tier!\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"july-11th\"\u003eJuly 11th\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003eab19abd6486884141c9ab4e4e185255a77ae833e\u003c/code\u003e is the final-ish version of the late fusion model\u003c/li\u003e\n\u003cli\u003eWe are going to use \u003ccode\u003ealignedpitt-7-8-flucalc\u003c/code\u003e to start training\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-royal-pond-21\"\u003etrain: royal-pond-21\u003c/h4\u003e\n\u003cp\u003e{bs: 64, epochs: 8, lr: 1e-4, length: 60, alignedpitt-7-8-flucalc-windowed.dat}.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_10-15-58_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: overfitting\u003c/li\u003e\n\u003cli\u003eDecision, droping lr by a factor of 10, also increasing length to 70\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-fallen-dust-25\"\u003etrain: fallen-dust-25\u003c/h4\u003e\n\u003cp\u003e{bs: 64, epochs: 8, lr: 1e-5, length: 70, alignedpitt-7-8-flucalc-windowed.dat}.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_10-37-32_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_10-38-37_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: overfitting\u003c/li\u003e\n\u003cli\u003eDecision, droping lr by a factor of 10, dropping batch size to 32, training more to 10\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-dainty-meadow-26\"\u003etrain: dainty-meadow-26\u003c/h4\u003e\n\u003cp\u003e{bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_10-45-52_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_10-46-31_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eah\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eAt this point, I think it\u0026rsquo;d be good to do some feature selection\u003c/li\u003e\n\u003cli\u003eLet\u0026rsquo;s do a chi^2 correlation, and select 3 best features\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epandas\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eDATA\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;/Users/houliu/Documents/Projects/DBC/data/transcripts_pauses/alignedpitt-7-8-flucalc-windowed.bat\u0026#34;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# read pickle\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_pickle\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDATA\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# test\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etest_data\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esplit\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;test\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# also, get only train data\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esplit\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;train\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e target mor_Utts ... split utterance\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etrial sample ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e120-2 1049 1 -0.179084 ... train well the boy is getting some cookies handing o...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e336-1 2492 0 -0.481740 ... train +oh okay, the the little girl askin(g) for the...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e076-4 786 1 -0.179084 ... train well the little boy was looking at that cookie...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e279-0 2250 1 1.980274 ... train kid\u0026#39;s stool turnin(g) [pause]540[pause] over s...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e014-2 151 1 0.746355 ... train he\u0026#39;s fallin(g) off the chair down here or try...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e208-0 1655 0 -0.481740 ... train the boy [pause]920[pause] is going after [paus...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e492-0 2696 1 -0.179084 ... train oh yes quite a_lot the kid\u0026#39;s tryin(g) to get t...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e497-1 2727 1 0.129396 ... train what else ? \u0026amp;uh the see the [pause]2400[pause]...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e175-2 1535 0 0.863668 ... train the window is open you can see out the curtain...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e279-0 2261 1 1.980274 ... train the other kid with [pause]610[pause] the stool...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2848 rows x 44 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s slice out the bits which is labels, etc.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ein_data\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edrop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;utterance\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;target\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;split\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ein_data\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eIndex([\u0026#39;mor_Utts\u0026#39;, \u0026#39;mor_Words\u0026#39;, \u0026#39;mor_syllables\u0026#39;, \u0026#39;#_Prolongation\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;%_Prolongation\u0026#39;, \u0026#39;#_Broken_word\u0026#39;, \u0026#39;%_Broken_word\u0026#39;, \u0026#39;#_Block\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;%_Block\u0026#39;, \u0026#39;#_PWR\u0026#39;, \u0026#39;%_PWR\u0026#39;, \u0026#39;#_PWR-RU\u0026#39;, \u0026#39;%_PWR-RU\u0026#39;, \u0026#39;#_WWR\u0026#39;, \u0026#39;%_WWR\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;#_mono-WWR\u0026#39;, \u0026#39;%_mono-WWR\u0026#39;, \u0026#39;#_WWR-RU\u0026#39;, \u0026#39;%_WWR-RU\u0026#39;, \u0026#39;#_mono-WWR-RU\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;%_mono-WWR-RU\u0026#39;, \u0026#39;Mean_RU\u0026#39;, \u0026#39;#_Phonological_fragment\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;%_Phonological_fragment\u0026#39;, \u0026#39;#_Phrase_repetitions\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;%_Phrase_repetitions\u0026#39;, \u0026#39;#_Word_revisions\u0026#39;, \u0026#39;%_Word_revisions\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;#_Phrase_revisions\u0026#39;, \u0026#39;%_Phrase_revisions\u0026#39;, \u0026#39;#_Pauses\u0026#39;, \u0026#39;%_Pauses\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;#_Filled_pauses\u0026#39;, \u0026#39;%_Filled_pauses\u0026#39;, \u0026#39;#_TD\u0026#39;, \u0026#39;%_TD\u0026#39;, \u0026#39;#_SLD\u0026#39;, \u0026#39;%_SLD\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;#_Total_(SLD+TD)\u0026#39;, \u0026#39;%_Total_(SLD+TD)\u0026#39;, \u0026#39;Weighted_SLD\u0026#39;],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e dtype=\u0026#39;object\u0026#39;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd the labels:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eout_data\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;target\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eout_data\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etrial sample\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e120-2 1049 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e336-1 2492 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e076-4 786 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e279-0 2250 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e014-2 151 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ..\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e208-0 1655 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e492-0 2696 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e497-1 2727 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e175-2 1535 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e279-0 2261 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eName: target, Length: 2848, dtype: int64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd now, let\u0026rsquo;s select 3 best features.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.feature_selection\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSelectKBest\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ef_classif\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ek_best_tool\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSelectKBest\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef_classif\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ek_best_tool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ein_data\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eout_data\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebest_features\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek_best_tool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eget_feature_names_out\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebest_features\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e%_WWR\u003c/td\u003e\n\u003ctd\u003e%_mono-WWR\u003c/td\u003e\n\u003ctd\u003e%\u003cem\u003eTotal\u003c/em\u003e(SLD+TD)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eOD = other disfluencies; SLD = stuttering-like disfluencies; TD = total disfluencies; WWR = whole-word-repetition\u003c/p\u003e\n\u003cp\u003eok, let\u0026rsquo;s select those features\u003c/p\u003e\n\u003ch4 id=\"train-visionary-plasma-27\"\u003etrain: visionary-plasma-27\u003c/h4\u003e\n\u003cp\u003e{bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}. Also with feature selection.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_11-27-49_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_11-28-11_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ehmmm.\u003c/p\u003e\n\u003cp\u003eI am curious if we just ran something like a decision tree, what happens.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ein_features\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edrop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;utterance\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;target\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;split\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etest_features\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etest_data\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edrop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;utterance\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;target\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;split\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ein_targets\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;target\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etest_targets\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etest_data\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;target\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eseed the classifier, and fit.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.ensemble\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eRandomForestClassifier\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclsf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eRandomForestClassifier\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclsf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ein_features\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ein_targets\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclsf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003escore\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etest_features\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etest_targets\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.5932203389830508\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eOK nevermind. What about SVC?\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.svm\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSVC\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclsf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSVC\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclsf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ein_features\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ein_targets\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclsf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003escore\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etest_features\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etest_targets\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.5932203389830508\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eTurns out, deep learning still does better. I\u0026rsquo;m thinking maybe the output is being faulty, say, for something like the loss function.\u003c/p\u003e\n\u003cp\u003eDecision: switching activation to sigmoid.\u003c/p\u003e\n\u003ch4 id=\"train-sunny-bush-31\"\u003etrain: sunny-bush-31\u003c/h4\u003e\n\u003cp\u003e{bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}, selected features\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_12-35-52_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_12-37-21_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eOk let\u0026rsquo;s think about this. Decision: added batch normalization.\u003c/p\u003e\n\u003ch4 id=\"train-autumn-jazz-32\"\u003etrain: autumn-jazz-32\u003c/h4\u003e\n\u003cp\u003e{bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}, selected features\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_12-50-10_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_12-50-56_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe model maybe overfitting on some simple heuristic; some basic statistics revealed that these variables are actually quite differently distributed.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_13-06-06_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ePerhaps we should increase the complexity of the model?\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_13-08-49_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch4 id=\"train-fallen-microwave-33\"\u003etrain: fallen-microwave-33\u003c/h4\u003e\n\u003cp\u003e{bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}, selected features\u003c/p\u003e\n\u003cp\u003eJust to test, I am bumping the LR to 1e-5, just to see what happens. I am very confused.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_13-14-26_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch4 id=\"train-upbeat-flower-35\"\u003etrain: upbeat-flower-35\u003c/h4\u003e\n\u003cp\u003e{bs: 32, epochs: 10, lr: 1e-5, length: 70, alignedpitt-7-8-flucalc-windowed.dat}, selected features\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_13-21-53_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_13-23-35_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe more we work on this, the more overfit it gets. (I FORGOT A RELUCTIFIER)\u003c/p\u003e\n\u003ch4 id=\"a-note\"\u003ea note\u003c/h4\u003e\n\u003cp\u003e{bs: 32, epochs: 10, lr: 1e-5, length: 70, alignedpitt-7-11-flucalc-windowed.dat}, selected features\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_17-07-45_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ePauses, no meta:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_17-09-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ePauses, meta:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_17-08-41_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eso effectively cointoss\u003c/p\u003e\n\u003ch2 id=\"concerns-and-questions\"\u003eConcerns and Questions\u003c/h2\u003e\n\u003ch3 id=\"july-2nd\"\u003eJuly 2nd\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003epitt7-1/dementia/493-0\u003c/code\u003e PAR tier \u0026ldquo;tell me everything you see going on in that picture\u0026rdquo; doesn\u0026rsquo;t seem to be labeled correctly; I am guessing that\u0026rsquo;s supposed to be INV?\u003c/li\u003e\n\u003cli\u003eHas anyone tried to include investigator/participant cross-dialogue?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"july-4th\"\u003eJuly 4th\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eIs the model overfitting on antiquated language?\u003c/li\u003e\n\u003cli\u003eIs the model overfitting on cooke-theft on-topic-ness?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"july-11th\"\u003eJuly 11th\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eLSTM only on pauses?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdementiabank_acoustics_project/","tags":null,"title":"DementiaBank Acoustics Project"},{"categories":null,"contents":"Suppose you have two non mutually exclusive sets \\(E\\) or \\(F\\).\nDeMorgan\u0026rsquo;s Law:\n\\begin{equation} (E\\ and\\ F)^{C} = (E^{C}\\ or\\ F^{C}) \\end{equation}\n\\begin{equation} (E\\ or\\ F)^{C} = (E^{C}\\ and\\ F^{C}) \\end{equation}\n","html":"\u003cp\u003eSuppose you have two non \u003ca href=\"/posts/kbhmutually_exclusive/\"\u003emutually exclusive\u003c/a\u003e sets \\(E\\) or \\(F\\).\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-06_16-36-03_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\u003ca href=\"/posts/kbhdemorgan_s_law/\"\u003eDeMorgan\u0026rsquo;s Law\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(E\\ and\\ F)^{C} = (E^{C}\\ or\\ F^{C})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(E\\ or\\ F)^{C} = (E^{C}\\ and\\ F^{C})\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdemorgan_s_law/","tags":null,"title":"DeMorgan's Law"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdepression/","tags":null,"title":"depression"},{"categories":null,"contents":"Derivat\n","html":"\u003cp\u003eDerivat\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhderivational_words/","tags":null,"title":"derivational words"},{"categories":null,"contents":"a\n","html":"\u003cp\u003ea\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhderivatives/","tags":null,"title":"derivative (finance)"},{"categories":null,"contents":"We will take \\(G(P(t),t)\\) to figure the price of an option, with \\(t\\) being time, strike price \\(X\\) (not introduced yet), and expiration date \\(T \u0026gt; t\\) on a stock with price \\(P(t)\\) at time \\(t\\).\nThis representation does something really important: it expresses \\(G\\) as a function of only the current stock price \\(P(t)\\).\n","html":"\u003cp\u003eWe will take \\(G(P(t),t)\\) to figure the price of an option, with \\(t\\) being time, strike price \\(X\\) (not introduced yet), and expiration date \\(T \u0026gt; t\\) on a stock with price \\(P(t)\\) at time \\(t\\).\u003c/p\u003e\n\u003cp\u003eThis representation does something really important: it expresses \\(G\\) as a function of only the \u003cem\u003ecurrent\u003c/em\u003e stock price \\(P(t)\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhderivative_pricing/","tags":null,"title":"Derivative Pricing"},{"categories":null,"contents":"A derived variable is a mapping between states to a set, usually the natural numbers. Remember, if we can, given a state and match it to a number and show a relation which would iterate the state and decrease the states\u0026rsquo; number. We can show that the algorithm terminates.\n","html":"\u003cp\u003eA derived variable is a mapping between states to a set, usually the natural numbers. Remember, if we can, given a state and match it to a number and show a relation which would iterate the state and decrease the states\u0026rsquo; number. We can show that the algorithm terminates.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhderived_variable/","tags":null,"title":"derived variable"},{"categories":null,"contents":"For a matrix, for instance, like:\n\\begin{equation} \\begin{bmatrix} a \u0026amp; b \\\\ c \u0026amp; d \\end{bmatrix} \\end{equation}\nWe wish to find the matrix\u0026rsquo;s determinant; we write it down as:\n\\begin{equation} \\begin{vmatrix} a \u0026amp; b \\\\ c \u0026amp; d \\end{vmatrix} \\end{equation}\ngeometric interpretation of determinants Geometrically, determinants are how matrices send a unit object after its mapping; i.e. how does it transform the area of a unit square.\ndeterminants can be computed along any axes You can pick any row or column as the \u0026ldquo;axes\u0026rdquo;, and expand the matrix along any direction\n","html":"\u003cp\u003eFor a \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e, for instance, like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{bmatrix}\na \u0026amp; b \\\\\nc \u0026amp; d\n\\end{bmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe wish to find the matrix\u0026rsquo;s determinant; we write it down as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{vmatrix}\na \u0026amp; b \\\\\nc \u0026amp; d\n\\end{vmatrix}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"geometric-interpretation-of-determinants\"\u003egeometric interpretation of determinants\u003c/h2\u003e\n\u003cp\u003eGeometrically, determinants are how matrices send a unit object after its mapping; i.e. how does it transform the area of a unit square.\u003c/p\u003e\n\u003ch2 id=\"determinants-can-be-computed-along-any-axes\"\u003edeterminants can be computed along any axes\u003c/h2\u003e\n\u003cp\u003eYou can pick any row or column as the \u0026ldquo;axes\u0026rdquo;, and expand the matrix along any direction\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdeterminants/","tags":null,"title":"determinants"},{"categories":null,"contents":"key idea: let\u0026rsquo;s build a tree such that, after taking the action, the observation is deterministic. Therefore, you get a belief tree with no branching on observations.\nDESPOT trees We make an assumption, that the actual observation given are fixed given belief. That is:\n\\begin{equation} O(o|b,a) = 1 \\end{equation}\nfor some specific \\(o\\), everything else is \\(0\\) for every b,a.\nSample Scenarios To make such a tree, let\u0026rsquo;s sample of set of scenarios: sequences of actions and observations (because, given a belief and action, we assume observation is fixed. So, given an initial belief and an action, you will always go down a single \u0026ldquo;scenario\u0026rdquo;).\nBuild Tree Do a bunch of scenario sampling\nUse Tree Starting at where you are in terms of initial belief, greedily choose the \u0026ldquo;best action\u0026rdquo;.\nEvaluate Tree Average discounted future reward of the scenarios that relate to your starting states.\nDrawbacks Normal DESPOT is very very easy to overfit.\nRegularized DESPOT Build a DESPOT until depth \\(D\\), with \\(K\\) senarios, then, treat the resulting tree as a conditional plan, do bottom-up DP to optimize the plan.\nGiven a set of senarios \\(\\Phi\\), we write:\nAnytime DESPOT We build up the despot tree by maintaining upper and lower bounds on the value function, and try to expand on scenarios that would help us lower the gap between them.\nFirst, pick an upper and lower bound. The note on HSVI may help.\nBuild and Optimize Bounded DESPOT tree (see below) starting at \\(b_{0}\\) Compute the optimal policy using Regularized DESPOT expression above Execute best action Get observation \\(update(b,a,o)\\) Building Bounded DESPOT sample a set of \\(\\Phi\\) senarios at \\(b_0\\) insert \\(b_0\\) into the tree as the root node let \\(b \\leftarrow b_0\\), and, as time permits: tighten bounds on \\(b\\) back up the upper and lower bounds you found all the way up the tree as you would with HSVI Tightening Bounds if \\(b\\) is a leaf on the tree, then add new belief nodes for every action and every observation as children of \\(b\\).\nthen,\n\\begin{equation} b \\leftarrow update(b, a^{*}, o^{*}) \\end{equation}\nwhere \\(a^{*}\\) and \\(o^{*}\\) are determined by\u0026hellip;\n\\(a^{*}\\): IE-MAX Heuristic, where the original upper-bound is \\(Q\\) \\(o^{*}\\): weighted excess uncertainty If the weighted excess uncertainty we got is non-zero, we repeat this tightening bounds process until it is zero.\nDESPOT Theoretic Guarantees It is near-optimal as a policy.\n","html":"\u003cp\u003ekey idea: let\u0026rsquo;s build a tree such that, after taking the action, the \u003cstrong\u003eobservation is deterministic\u003c/strong\u003e. Therefore, you get a \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e tree with no branching on observations.\u003c/p\u003e\n\u003ch2 id=\"despot--kbhdespot-dot-md--trees\"\u003e\u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e trees\u003c/h2\u003e\n\u003cp\u003eWe make an assumption, that the actual observation given are fixed given belief. That is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nO(o|b,a) = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some specific \\(o\\), everything else is \\(0\\) for every b,a.\u003c/p\u003e\n\u003ch3 id=\"sample-scenarios\"\u003eSample Scenarios\u003c/h3\u003e\n\u003cp\u003eTo make such a tree, let\u0026rsquo;s sample of set of \u003ca href=\"#despot--kbhdespot-dot-md--trees\"\u003escenarios\u003c/a\u003e: sequences of actions and observations (because, given a belief and action, we assume observation is fixed. So, given an initial belief and an action, you will always go down a single \u0026ldquo;scenario\u0026rdquo;).\u003c/p\u003e\n\u003ch3 id=\"build-tree\"\u003eBuild Tree\u003c/h3\u003e\n\u003cp\u003eDo a bunch of scenario sampling\u003c/p\u003e\n\u003ch3 id=\"use-tree\"\u003eUse Tree\u003c/h3\u003e\n\u003cp\u003eStarting at where you are in terms of initial belief, greedily choose the \u0026ldquo;best action\u0026rdquo;.\u003c/p\u003e\n\u003ch3 id=\"evaluate-tree\"\u003eEvaluate Tree\u003c/h3\u003e\n\u003cp\u003eAverage discounted future reward of the scenarios that relate to your starting states.\u003c/p\u003e\n\u003ch3 id=\"drawbacks\"\u003eDrawbacks\u003c/h3\u003e\n\u003cp\u003eNormal \u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e is very very easy to overfit.\u003c/p\u003e\n\u003ch2 id=\"regularized-despot--kbhdespot-dot-md\"\u003eRegularized \u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eBuild a \u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e until depth \\(D\\), with \\(K\\) senarios, then, treat the resulting tree as a \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e, do bottom-up DP to optimize the plan.\u003c/p\u003e\n\u003cp\u003eGiven a set of senarios \\(\\Phi\\), we write:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-27_23-48-15_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"anytime-despot\"\u003eAnytime DESPOT\u003c/h2\u003e\n\u003cp\u003eWe build up the despot tree by maintaining upper and lower bounds on the value function, and try to expand on \u003ca href=\"#despot--kbhdespot-dot-md--trees\"\u003escenarios\u003c/a\u003e that would help us lower the gap between them.\u003c/p\u003e\n\u003cp\u003eFirst, pick an upper and lower bound. The note on \u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e may help.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eBuild and Optimize Bounded \u003ca href=\"#despot--kbhdespot-dot-md--trees\"\u003eDESPOT tree\u003c/a\u003e (see below) starting at \\(b_{0}\\)\u003c/li\u003e\n\u003cli\u003eCompute the optimal policy using \u003ca href=\"#regularized-despot--kbhdespot-dot-md\"\u003eRegularized DESPOT\u003c/a\u003e expression above\u003c/li\u003e\n\u003cli\u003eExecute best action\u003c/li\u003e\n\u003cli\u003eGet observation\u003c/li\u003e\n\u003cli\u003e\\(update(b,a,o)\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"building-bounded-despot--kbhdespot-dot-md\"\u003eBuilding Bounded \u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003esample a set of \\(\\Phi\\) senarios at \\(b_0\\)\u003c/li\u003e\n\u003cli\u003einsert \\(b_0\\) into the tree as the root node\u003c/li\u003e\n\u003cli\u003elet \\(b \\leftarrow b_0\\), and, as time permits:\n\u003col\u003e\n\u003cli\u003etighten bounds on \\(b\\)\u003c/li\u003e\n\u003cli\u003eback up the upper and lower bounds you found all the way up the tree as you would with \u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"tightening-bounds\"\u003eTightening Bounds\u003c/h3\u003e\n\u003cp\u003eif \\(b\\) is a leaf on the tree, then add new belief nodes for every action and every observation as children of \\(b\\).\u003c/p\u003e\n\u003cp\u003ethen,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb \\leftarrow update(b, a^{*}, o^{*})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(a^{*}\\) and \\(o^{*}\\) are determined by\u0026hellip;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(a^{*}\\): \u003ca href=\"/posts/kbhhsvi/#ie-max-heuristic\"\u003eIE-MAX Heuristic\u003c/a\u003e, where the original upper-bound is \\(Q\\)\u003c/li\u003e\n\u003cli\u003e\\(o^{*}\\): \u003ca href=\"/posts/kbhhsvi/#weighted-excess-uncertainty\"\u003eweighted excess uncertainty\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003eIf the \u003ca href=\"/posts/kbhhsvi/#weighted-excess-uncertainty\"\u003eweighted excess uncertainty\u003c/a\u003e we got is non-zero, we repeat this tightening bounds process until it is zero.\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"despot-theoretic-guarantees\"\u003eDESPOT Theoretic Guarantees\u003c/h2\u003e\n\u003cp\u003eIt is near-optimal as a policy.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdespot/","tags":null,"title":"Determinized Sparse Partially Observable Tree"},{"categories":null,"contents":"A health concern relating to glucose and obesity.\n","html":"\u003cp\u003eA health concern relating to \u003ca href=\"\"\u003eglucose\u003c/a\u003e and \u003ca href=\"\"\u003eobesity.\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdiabetes/","tags":null,"title":"diabetes"},{"categories":null,"contents":"The diagonal of a square matrix consists of entries from the upper-left to the bottom-right\nFurthermore, because eigenvalues of a map are the entries of the diagonal of its upper-triangular matrix, and this is technically an upper-triangular matrix, the entries on the diagonal are exactly the eigenvalues of the Linear Map.\nproperties of diagonal matrices Suppose \\(V\\) is finite-dimensional, and \\(T \\in \\mathcal{L}(V)\\); and let \\(\\lambda_{1}, \u0026hellip; \\lambda_{m}\\) be distinct eigenvalues of \\(T\\). Then, the following are equivalent:\n\\(T\\) is diagonalizable \\(V\\) has a basis containing of eigenvectors of \\(T\\) there exists 1-dim subspaces \\(U_{1}, \u0026hellip;, U_{n}\\) of \\(V\\), each invariant under \\(T\\), such that \\(V = U_1 \\oplus \u0026hellip; \\oplus U_{n}\\) specifically, those \\(U\\) are eigenspaces; that is: \\(V = E(\\lambda_{1}, T) \\oplus \u0026hellip; \\oplus E(\\lambda_{m}, T)\\) \\(\\dim V = \\dim E(\\lambda_{1}, T) + \u0026hellip; + \\dim E(\\lambda_{m}, T)\\) Proof:\n\\(1 \\implies 2\\), \\(2 \\implies 1\\)\nBy a moment\u0026rsquo;s fucking thought. hehe my notes my rule. jkjkjk\nBy calculation this is true; if you apply a standard basis to the matrix, it will simply be scaled; therefore, you can think of each slot as an eigenvector of \\(T\\).\n\\(2 \\implies 3\\)\nCreate \\(U_{j} = span(v_{j})\\) where \\(v_{j}\\) is the \\(j\\) eigenvalue of \\(T\\). Now, given \\(v_{j}\\) forms a basis, then, \\(v_1 \u0026hellip; v_{n}\\) not only is linearly independent but span. Therefore, each vector in \\(V\\) can be written uniquely by a linear combination of \\(v_{j}\\) (i.e. taking one \\(v_{j}\\) from each \\(U\\)). Hence, by definition, \\(U_{j}\\) form a direct sum to \\(V\\), hence showing \\(3\\).\n\\(3\\implies 2\\)\nNow, suppose you have a bunch of 1-d invariant subspaces \\(U_1 \u0026hellip; U_{n}\\) and they form a direct sum; because they are invariant subspaces, picking any \\(v_{j} \\in U_{j}\\) would be an eigenvector (because \\(T v_{j} = a_{j} v_{j}\\), as applying \\(T\\) is invariant so it\u0026rsquo;d return to the same space, just at a different place). Now, because they form a direct sum on \\(V\\), taking \\(v_{j}\\) from each \\(U_{j}\\) would result in a linearly independent list which\u0026mdash;because they sum up to $V$\u0026mdash;span all of \\(V\\) as each \\(U\\) is simply spanning by scaling \\(v_{j}\\). So, \\(v_{j}\\) together forms a basis.\n\\(2 \\implies 4\\)\nGiven \\(V\\) has a basis formed by eigenvectors of \\(T\\), the sum of all scales of eigenvectors in \\(T\\) can be written by the sum of all eigenspaces: that is \\(V = null(T-\\lambda_{1} I) + \u0026hellip; null(T- \\lambda_{m} I)\\) (recall that \\(E(\\lambda_{j}, T) = null(T- \\lambda_{j}I)\\)); as each eigenvalue for which the basis is formed can be found in each of these spaces, their sum would therefore equal to \\(V\\) as this sum represents an linear combination of eigenvectors in \\(T\\).\nNow, sum of eigenspaces form a direct sum so we have that the sum is direct sum. Hence: \\(V = E(\\lambda_{1}, T) \\oplus \u0026hellip; \\oplus E(\\lambda_{m}, T)\\)\n\\(4 \\implies 5\\)\n\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\) (see link for proof).\n\\(5 \\implies 2\\)\nWe are given that:\n\\begin{equation} \\dim V = \\dim E(\\lambda_{1}, T) + \\dots + \\dim E(\\lambda_{m}, T) \\end{equation}\nwhich means that taking a basis of each subspace provides a list of \\(\\dim n\\) long of eigenvectors. Now, each sub-list belonging to each space is linearly independent amongst themselves, and they will each be linearly independent against others as list of eigenvectors are linearly independent.\ni.e.: if \\(a_1v_1 + \u0026hellip; + a_{n} v_{n} = 0\\), we can treat each chunk from each eigenspace as \\(u\\), making \\(u_1 + \u0026hellip; u_{m} = 0\\); as they are eigenvectors from distinct eigenvalues, they are linearly independent so each will be \\(0\\). Now, collapsing it into the basis of each eigenspace, this makes \\(a_{j}\\) of the coefficients \\(0\\) as well.\nAnd all of this makes \\(v_1 \u0026hellip; v_{n}\\) a list of \\(\\dim n\\) long that is linearly independent; hence, it is a basis of \\(V\\), as desired. \\(\\blacksquare\\)\nenough eigenvalues implies diagonalizability If \\(T \\in \\mathcal{L}(V)\\) has \\(\\dim V\\) distinct eigenvalues, then \\(T\\) is diagonalizable.\nProof:\nlet \\(\\dim V = n\\). Pick eigenvectors \\(v_1 \u0026hellip; v_{n}\\) corresponding to distinct eigenvalues \\(\\lambda_{1} \u0026hellip; \\lambda_{n}\\). Now, eigenvectors coorsponding to distinct eigenvalues are linearly independent, and this is a list of \\(\\dim n\\) long that is linearly independent, so it is a basis of eigenvectors. Now, that means that the matrix coorsponding to \\(T\\) is diagonalizable.\nNOTE THAT THE CONVERSE IS NOT TRUE! as each eigenspace can have a dimension of more than 1 so 1 eigenvalue can generate two linearly independent eigenvectors belonging to it.\nFor instance:\n\\begin{equation} T (z_1, z_2, z_3) = (4z_1, 4z_2, 5z_3) \\end{equation}\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003ediagonal\u003c/a\u003e of a square matrix consists of entries from the upper-left to the bottom-right\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-03-20_20-16-29_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eFurthermore, because \u003ca href=\"/posts/kbhupper_triangular_matrix/#eigenvalues-of-a-map-are-the-entries-of-the-id-c38ed162-6861-420c-a812-6d25ac539ea9-diagonal-of-its-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix\"\u003eeigenvalues of a map are the entries of the diagonal of its upper-triangular matrix\u003c/a\u003e, and this is \u003cem\u003etechnically\u003c/em\u003e an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e, the entries on the diagonal are exactly the \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es of the \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"properties-of-diagonal-matrices\"\u003eproperties of diagonal matrices\u003c/h2\u003e\n\u003cp\u003eSuppose \\(V\\) is \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e, and \\(T \\in \\mathcal{L}(V)\\); and let \\(\\lambda_{1}, \u0026hellip; \\lambda_{m}\\) be distinct \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es of \\(T\\). Then, the following are equivalent:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\\(T\\) is diagonalizable\u003c/li\u003e\n\u003cli\u003e\\(V\\) has a basis containing of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es of \\(T\\)\u003c/li\u003e\n\u003cli\u003ethere exists 1-dim subspaces \\(U_{1}, \u0026hellip;, U_{n}\\) of \\(V\\), each invariant under \\(T\\), such that \\(V = U_1 \\oplus \u0026hellip; \\oplus U_{n}\\)\u003c/li\u003e\n\u003cli\u003especifically, those \\(U\\) are \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003es; that is: \\(V = E(\\lambda_{1}, T) \\oplus \u0026hellip; \\oplus E(\\lambda_{m}, T)\\)\u003c/li\u003e\n\u003cli\u003e\\(\\dim V = \\dim E(\\lambda_{1}, T) + \u0026hellip; + \\dim E(\\lambda_{m}, T)\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003e\\(1 \\implies 2\\), \\(2 \\implies 1\\)\u003c/p\u003e\n\u003cp\u003eBy a moment\u0026rsquo;s fucking thought. hehe my notes my rule. jkjkjk\u003c/p\u003e\n\u003cp\u003eBy calculation this is true; if you apply a standard basis to the matrix, it will simply be scaled; therefore, you can think of each slot as an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e of \\(T\\).\u003c/p\u003e\n\u003cp\u003e\\(2 \\implies 3\\)\u003c/p\u003e\n\u003cp\u003eCreate \\(U_{j} = span(v_{j})\\) where \\(v_{j}\\) is the \\(j\\) \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\). Now, given \\(v_{j}\\) forms a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, then, \\(v_1 \u0026hellip; v_{n}\\) not only is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e but \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e. Therefore, each \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e in \\(V\\) can be written uniquely by a linear combination of \\(v_{j}\\) (i.e. taking one \\(v_{j}\\) from each \\(U\\)). Hence, by definition, \\(U_{j}\\) form a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e to \\(V\\), hence showing \\(3\\).\u003c/p\u003e\n\u003cp\u003e\\(3\\implies 2\\)\u003c/p\u003e\n\u003cp\u003eNow, suppose you have a bunch of 1-d \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003es \\(U_1 \u0026hellip; U_{n}\\) and they form a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e; because they are \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003es, picking any \\(v_{j} \\in U_{j}\\) would be an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e (because \\(T v_{j} = a_{j} v_{j}\\), as applying \\(T\\) is \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e so it\u0026rsquo;d return to the same space, just at a different place). Now, because they form a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e on \\(V\\), taking \\(v_{j}\\) from each \\(U_{j}\\) would result in a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list which\u0026mdash;because they sum up to $V$\u0026mdash;\u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e all of \\(V\\) as each \\(U\\) is simply spanning by scaling \\(v_{j}\\). So, \\(v_{j}\\) together forms a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\(2 \\implies 4\\)\u003c/p\u003e\n\u003cp\u003eGiven \\(V\\) has a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e formed by \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es of \\(T\\), the sum of all scales of eigenvectors in \\(T\\) can be written by the sum of all \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003es: that is \\(V = null(T-\\lambda_{1} I) + \u0026hellip; null(T- \\lambda_{m} I)\\) (recall that \\(E(\\lambda_{j}, T) = null(T- \\lambda_{j}I)\\)); as each \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e for which the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e is formed can be found in each of these spaces, their sum would therefore equal to \\(V\\) as this sum represents an \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es in \\(T\\).\u003c/p\u003e\n\u003cp\u003eNow, \u003ca href=\"/posts/kbheigenvalue/#eigenspaces-are-disjoint\"\u003esum of eigenspaces form a direct sum\u003c/a\u003e so we have that the sum is \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e. Hence: \\(V = E(\\lambda_{1}, T) \\oplus \u0026hellip; \\oplus E(\\lambda_{m}, T)\\)\u003c/p\u003e\n\u003cp\u003e\\(4 \\implies 5\\)\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhproduct_summation_map/#u-1-plus-dots-plus-u-m-is-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-iff-dim--u-1-plus-dots-plus-u-m--dim-u-1-plus-dots-plus-dim-u-m\"\u003e\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\)\u003c/a\u003e (see link for proof).\u003c/p\u003e\n\u003cp\u003e\\(5 \\implies 2\\)\u003c/p\u003e\n\u003cp\u003eWe are given that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim V = \\dim E(\\lambda_{1}, T) + \\dots + \\dim E(\\lambda_{m}, T)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich means that taking a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of each \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e provides a list of \\(\\dim n\\) long of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es. Now, each sub-list belonging to each space is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e amongst themselves, and they will each be \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e against others as \u003ca href=\"/posts/kbheigenvalue/#list-of-eigenvectors-are-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent\"\u003elist of eigenvectors are linearly independent\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003ei.e.: if \\(a_1v_1 + \u0026hellip; + a_{n} v_{n} = 0\\), we can treat each chunk from each \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e as \\(u\\), making \\(u_1 + \u0026hellip; u_{m} = 0\\); as they are \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es from distinct \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es, they are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e so each will be \\(0\\). Now, collapsing it into the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of each \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e, this makes \\(a_{j}\\) of the coefficients \\(0\\) as well.\u003c/p\u003e\n\u003cp\u003eAnd all of this makes \\(v_1 \u0026hellip; v_{n}\\) a list of \\(\\dim n\\) long that is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e; hence, it is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\), as desired. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch2 id=\"enough-eigenvalues-implies-diagonalizability\"\u003eenough eigenvalues implies diagonalizability\u003c/h2\u003e\n\u003cp\u003eIf \\(T \\in \\mathcal{L}(V)\\) has \\(\\dim V\\) distinct \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es, then \\(T\\) is \u003ca href=\"#properties-of-diagonal-matrices\"\u003ediagonalizable\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003elet \\(\\dim V = n\\). Pick \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es \\(v_1 \u0026hellip; v_{n}\\) corresponding to distinct \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es \\(\\lambda_{1} \u0026hellip; \\lambda_{n}\\). Now, \u003ca href=\"/posts/kbheigenvalue/#list-of-eigenvectors-are-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent\"\u003eeigenvectors coorsponding to distinct eigenvalues are linearly independent\u003c/a\u003e, and this is a list of \\(\\dim n\\) long that is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e, so it is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es. Now, that means that the matrix coorsponding to \\(T\\) is \u003ca href=\"#properties-of-diagonal-matrices\"\u003ediagonalizable\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eNOTE THAT THE CONVERSE IS NOT TRUE!\u003c/strong\u003e as each \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e can have a dimension of more than 1 so 1 \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e can generate two \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvectors\u003c/a\u003e belonging to it.\u003c/p\u003e\n\u003cp\u003eFor instance:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT (z_1, z_2, z_3) = (4z_1, 4z_2, 5z_3)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdiagonal_matrix/","tags":null,"title":"Diagonal Matrix"},{"categories":null,"contents":"A human Dialogue is a human to human interaction.\nturn each contributino to a conversation is called a \u0026ldquo;turn\u0026rdquo;, which contains a sentence, multiple sentences, or a single word\nturn-taking when to take the floor who takes the floor what happens during interruptions? barge-in barge-in is the property to allow the user to interrupt the system\nend-pointing deciding when a human has stopped talking, compute, etc.\nspeech-act each turn is actually an \u0026ldquo;action\u0026rdquo; performed by the user\nconstatives: committing the speaker to something being the case (answering, denying) directives: ask the addressee to do something (advising, ordering) com missives: commuting the speaker to future action (planning, voving) acknowledgement: reflecting the speaker\u0026rsquo;s attitude for something (apologizing, greeting, etc.) common ground grounding is the problem of acknowledging and reflecting the state of interaction; such as the elevator lighting up when pressed.\nacknowledgements and repeats is a way of grounding.\nwe need to make sure that the system acknowledges user interaction\nadjacency pairs question =\u0026gt; answer proposal =\u0026gt; acceptance/rejection complements =\u0026gt; downplay two-pair composition maybe interrupted or separated by a sub-dialogue\nconversational initiative Sometimes, such as during interviews, only one agent has initiative. This is not true most of the time during human-human interactions.\nmixed initiative is hard to achieve, usually we make dialogue systems as passive environments\u0026mdash;only user and system understanding.\n","html":"\u003cp\u003eA human \u003ca href=\"/posts/kbhdialogue/\"\u003eDialogue\u003c/a\u003e is a human to human interaction.\u003c/p\u003e\n\u003ch2 id=\"turn\"\u003eturn\u003c/h2\u003e\n\u003cp\u003eeach contributino to a conversation is called a \u0026ldquo;turn\u0026rdquo;, which contains a sentence, multiple sentences, or a single word\u003c/p\u003e\n\u003ch2 id=\"turn-taking\"\u003eturn-taking\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewhen to take the floor\u003c/li\u003e\n\u003cli\u003ewho takes the floor\u003c/li\u003e\n\u003cli\u003ewhat happens during interruptions?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"barge-in\"\u003ebarge-in\u003c/h2\u003e\n\u003cp\u003ebarge-in is the property to allow the user to interrupt the system\u003c/p\u003e\n\u003ch2 id=\"end-pointing\"\u003eend-pointing\u003c/h2\u003e\n\u003cp\u003edeciding when a human has stopped talking, compute, etc.\u003c/p\u003e\n\u003ch2 id=\"speech-act\"\u003espeech-act\u003c/h2\u003e\n\u003cp\u003eeach \u003ca href=\"#turn\"\u003eturn\u003c/a\u003e is actually an \u0026ldquo;action\u0026rdquo; performed by the user\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003econstatives\u003c/strong\u003e: committing the speaker to something being the case (answering, denying)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003edirectives\u003c/strong\u003e: ask the addressee to do something (advising, ordering)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ecom missives\u003c/strong\u003e: commuting the speaker to future action (planning, voving)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eacknowledgement\u003c/strong\u003e: reflecting the speaker\u0026rsquo;s attitude for something (apologizing, greeting, etc.)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"common-ground\"\u003ecommon ground\u003c/h2\u003e\n\u003cp\u003egrounding is the problem of acknowledging and reflecting the state of interaction; such as the elevator lighting up when pressed.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eacknowledgements\u003c/strong\u003e and repeats is a way of grounding.\u003c/p\u003e\n\u003cp\u003ewe need to make sure that the system acknowledges user interaction\u003c/p\u003e\n\u003ch2 id=\"adjacency-pairs\"\u003eadjacency pairs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003equestion =\u0026gt; answer\u003c/li\u003e\n\u003cli\u003eproposal =\u0026gt; acceptance/rejection\u003c/li\u003e\n\u003cli\u003ecomplements =\u0026gt; downplay\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003etwo-pair composition maybe interrupted or separated by a sub-dialogue\u003c/p\u003e\n\u003ch2 id=\"conversational-initiative\"\u003econversational initiative\u003c/h2\u003e\n\u003cp\u003eSometimes, such as during interviews, only one agent has initiative. This is not true most of the time during human-human interactions.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003emixed initiative\u003c/strong\u003e is hard to achieve, usually we make dialogue systems as passive environments\u0026mdash;only user and system understanding.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdialogue/","tags":null,"title":"Dialogue"},{"categories":null,"contents":"Dialogue State Architecture uses dialogue acts instead of simple frame filling to perform generation; used currently more in research.\nNLU: slot fillers to extract user\u0026rsquo;s utterance, using ML Dialogue State Tracker: maintains current state of dialogue Dialogue policy: decides what to do next (think GUS\u0026rsquo; policy: ask, fill, respond)\u0026mdash;but nowaday we have more complex dynamics NLG: respond dialogue acts dialogue acts combines speech-acts with underlying states\nslot filing we typically do this with BIO Tagging with a BERT just like NER Tagging, but we tag for frame slots.\nthe final \u0026lt;cls\u0026gt; token may also work to classify domain + intent.\ncorrections are hard folks sometimes uses hyperarticulation (\u0026ldquo;exaggerated prosody\u0026rdquo;) for correction, which trip up ASR\ncorrection acts may need to be detected explicitly as a speech act:\ndialogue policy we can choose over the last frame, agent and user utterances:\n\\begin{equation} A = \\arg\\max_{a} P(A|F_{i-1}, A_{i-1}, U_{i-1}) \\end{equation}\nwe can probably use a neural architecture to do this.\nwhether to confirm via ASR confirm:\n\\(\u0026lt;\\alpha\\): reject \\(\\geq \\alpha\\): confirm explicitly \\(\\geq \\beta\\): confirm implicitly \\(\\geq \\gamma\\): no need to confirm NLG once the speech act is determined, we need to actually go generate it: 1) choose some attributes 2) generate utterance\nWe typically want to delexicalize the keywords (Henry serves French food =\u0026gt; [restraunt] serves [cruisine] food), then run through NLG, then rehydrate with frame.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdialogue_state_architecture/\"\u003eDialogue State Architecture\u003c/a\u003e uses \u003ca href=\"#dialogue-acts\"\u003edialogue acts\u003c/a\u003e instead of simple \u003ca href=\"/posts/kbhgus/#frame\"\u003eframe\u003c/a\u003e filling to perform generation; used currently more in research.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-01_09-52-46_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eNLU\u003c/strong\u003e: slot fillers to extract user\u0026rsquo;s utterance, using ML\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eDialogue State Tracker\u003c/strong\u003e: maintains current state of dialogue\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eDialogue policy\u003c/strong\u003e: decides what to do next (think \u003ca href=\"/posts/kbhgus/\"\u003eGUS\u003c/a\u003e\u0026rsquo; policy: ask, fill, respond)\u0026mdash;but nowaday we have more complex dynamics\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eNLG\u003c/strong\u003e: respond\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"dialogue-acts\"\u003edialogue acts\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#dialogue-acts\"\u003edialogue acts\u003c/a\u003e combines \u003ca href=\"/posts/kbhdialogue/#speech-act\"\u003espeech-act\u003c/a\u003es with underlying states\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-01_09-55-16_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-01_09-55-39_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"slot-filing\"\u003eslot filing\u003c/h2\u003e\n\u003cp\u003ewe typically do this with \u003ca href=\"/posts/kbhner_tagging/#bio-tagging\"\u003eBIO Tagging\u003c/a\u003e with a BERT just like \u003ca href=\"/posts/kbhner_tagging/\"\u003eNER Tagging\u003c/a\u003e, but we tag for frame slots.\u003c/p\u003e\n\u003cp\u003ethe final \u0026lt;cls\u0026gt; token may also work to classify domain + intent.\u003c/p\u003e\n\u003ch2 id=\"corrections-are-hard\"\u003ecorrections are hard\u003c/h2\u003e\n\u003cp\u003efolks sometimes uses \u003ca href=\"#corrections-are-hard\"\u003ehyperarticulation\u003c/a\u003e (\u0026ldquo;exaggerated prosody\u0026rdquo;) for correction, which trip up ASR\u003c/p\u003e\n\u003cp\u003ecorrection acts may need to be detected explicitly as a speech act:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-01_10-00-34_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"dialogue-policy\"\u003edialogue policy\u003c/h2\u003e\n\u003cp\u003ewe can choose over the last frame, agent and user utterances:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA = \\arg\\max_{a} P(A|F_{i-1}, A_{i-1}, U_{i-1})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can probably use a neural architecture to do this.\u003c/p\u003e\n\u003cp\u003ewhether to confirm via ASR confirm:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\u0026lt;\\alpha\\): reject\u003c/li\u003e\n\u003cli\u003e\\(\\geq \\alpha\\): confirm explicitly\u003c/li\u003e\n\u003cli\u003e\\(\\geq \\beta\\): confirm implicitly\u003c/li\u003e\n\u003cli\u003e\\(\\geq \\gamma\\): no need to confirm\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"nlg\"\u003eNLG\u003c/h2\u003e\n\u003cp\u003eonce the speech act is determined, we need to actually go generate it: 1) choose some attributes 2) generate utterance\u003c/p\u003e\n\u003cp\u003eWe typically want to \u003cstrong\u003edelexicalize\u003c/strong\u003e the keywords (Henry serves French food =\u0026gt; [restraunt] serves [cruisine] food), then run through NLG, then rehydrate with frame.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdialogue_state_architecture/","tags":null,"title":"Dialogue State Architecture"},{"categories":null,"contents":"An\n","html":"\u003cp\u003eAn\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdiffeomorphism/","tags":null,"title":"Diffeomorphism"},{"categories":null,"contents":"We have a function:\n\\begin{equation} |x|+|y|\\frac{dy}{dx} = \\sin \\left(\\frac{x}{n}\\right) \\end{equation}\nWe are to attempt to express the solution analytically and also approximate them.\nTo develop a basic approximate solution, we will leverage a recursive simulation approach.\nWe first set a constant \\(N\\) which in the \\(N\\) value which we will eventually vary.\nN = 0.5 We can get some values by stepping through \\(x\\) and \\(y\\) through which we can then figure \\(\\frac{dy}{dx}\\), namely, how the function evolves.\n# cache res = [] # number of steps steps = 1000 # seed values x = -5 y = 5 # step size step = 1/100 # for number of setps for _ in range(steps): # get the current equation and slope solution dydx = (sin(x/N)-abs(x))/abs(y) # append result res.append((x,y,dydx)) # apply the slope solution to iterate next y # step size is defined by `step` x += step y += dydx*step We have now a set of analytic solutions \\((x,y,\\frac{dy}{dx})\\). Let\u0026rsquo;s plot them!\nscatter_plot([i[0:2] for i in res]) Great, now we have a fairly non-specific but \u0026ldquo;correct\u0026rdquo; solution. We are now going to try to derive an analytic solution.\nWait\u0026hellip; That\u0026rsquo;s not the solution we got! But\u0026hellip; its close: the blue line simply need to be reflected across the \\(x\\) axis.\nIts actually fairly apparent why we will need this negative. We just declared that \\(y\\) was negative for that portion of the solution; the output of a square root could never be negative, so of course to achieve \\(y\\) being negative we have to take into account that square roots have a possible negative output as well.\nNice; now our analytical results agree with out numerical results.\n\\begin{equation} \\begin{cases} y\u0026gt;0 \u0026amp; y=\\sqrt{-2n\\cos\\left(\\frac{x}{n}\\right)-x\\vert x\\vert} +C \\\\ y\u0026lt;0 \u0026amp; y=-\\sqrt{2n\\cos\\left(\\frac{x}{n}\\right)+x\\vert x\\vert}+C \\end{cases} \\end{equation}\nMoving on to the result of the questions.\nSolution behavior The solution are unbounded and mostly decreasing. As \\(n\\in [-1,1]\\), the solution becomes unstable; a solution does not exist at \\(n=0\\).\nAt \\(n=0.5\\), a solution passes through \\((0,-1)\\).\n","html":"\u003cp\u003eWe have a function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|x|+|y|\\frac{dy}{dx} = \\sin \\left(\\frac{x}{n}\\right)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe are to attempt to express the solution analytically and also approximate them.\u003c/p\u003e\n\u003cp\u003eTo develop a basic approximate solution, we will leverage a recursive simulation approach.\u003c/p\u003e\n\u003cp\u003eWe first set a constant \\(N\\) which in the \\(N\\) value which we will eventually vary.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.5\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe can get some values by stepping through \\(x\\) and \\(y\\) through which we can then figure \\(\\frac{dy}{dx}\\), namely, how the function evolves.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# cache\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# number of steps\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esteps\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# seed values\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# step size\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e100\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# for number of setps\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esteps\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# get the current equation and slope solution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edydx\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esin\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eabs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eabs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# append result\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edydx\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# apply the slope solution to iterate next y\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# step size is defined by `step`\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edydx\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe have now a set of analytic solutions \\((x,y,\\frac{dy}{dx})\\). Let\u0026rsquo;s plot them!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003escatter_plot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-08-30_23-22-52_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eGreat, now we have a fairly non-specific but \u0026ldquo;correct\u0026rdquo; solution. We are now going to try to derive an analytic solution.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-01_22-01-44_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWait\u0026hellip; That\u0026rsquo;s not the solution we got! But\u0026hellip; its \u003cem\u003eclose\u003c/em\u003e: the blue line simply need to be reflected across the \\(x\\) axis.\u003c/p\u003e\n\u003cp\u003eIts actually fairly apparent why we will need this negative. We just declared that \\(y\\) was negative for that portion of the solution; the output of a square root could never be negative, so of course to achieve \\(y\\) being negative we have to take into account that square roots have a possible negative output as well.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-01_22-21-24_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eNice; now our analytical results agree with out numerical results.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\ny\u0026gt;0 \u0026amp; y=\\sqrt{-2n\\cos\\left(\\frac{x}{n}\\right)-x\\vert x\\vert} +C \\\\\ny\u0026lt;0 \u0026amp; y=-\\sqrt{2n\\cos\\left(\\frac{x}{n}\\right)+x\\vert x\\vert}+C\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMoving on to the result of the questions.\u003c/p\u003e\n\u003ch2 id=\"solution-behavior\"\u003eSolution behavior\u003c/h2\u003e\n\u003cp\u003eThe solution are unbounded and mostly decreasing. As \\(n\\in [-1,1]\\), the solution becomes unstable; a solution does not exist at \\(n=0\\).\u003c/p\u003e\n\u003cp\u003eAt \\(n=0.5\\), a solution passes through \\((0,-1)\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhchallenge_1/","tags":null,"title":"DiffEq: Challenge #1"},{"categories":null,"contents":"Generative Classifier A Generative Classifier builds a good model of a class, and use that to assign how \u0026ldquo;class-y\u0026rdquo; is that image.\nFor instance, to categorize cats vs. dogs, we build a cat model and dog model. To classify, then, we see if a particular image is more \u0026ldquo;cat-y\u0026rdquo; or \u0026ldquo;dog-y\u0026rdquo;.\nDiscriminative Classifier A Discriminative Classifier observes the differences between two classes, instead of trying to model each one.\n","html":"\u003ch2 id=\"generative-classifier\"\u003eGenerative Classifier\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#generative-classifier\"\u003eGenerative Classifier\u003c/a\u003e builds a good model of a class, and use that to assign how \u0026ldquo;class-y\u0026rdquo; is that image.\u003c/p\u003e\n\u003cp\u003eFor instance, to categorize cats vs. dogs, we build a cat model and dog model. To classify, then, we see if a particular image is more \u0026ldquo;cat-y\u0026rdquo; or \u0026ldquo;dog-y\u0026rdquo;.\u003c/p\u003e\n\u003ch2 id=\"discriminative-classifier\"\u003eDiscriminative Classifier\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#discriminative-classifier\"\u003eDiscriminative Classifier\u003c/a\u003e observes the differences between two classes, instead of trying to model each one.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgenerative_vs_discriminitive_classifier/","tags":null,"title":"Difference Between Logistic Regression and Naive Bayes"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdifference_equation/","tags":null,"title":"difference equation"},{"categories":null,"contents":"A Differential Equation is a function-valued algebreic equation whose unknown is an entire function \\(y(x)\\), where the equation involves a combination of derivatives $y(x), y\u0026rsquo;(x), \u0026hellip;$.\nSee Differential Equations Index\nand Uniqueness and Existance\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equation\u003c/a\u003e is a function-valued \u003ca href=\"/posts/kbhalgebreic_equation/\"\u003ealgebreic equation\u003c/a\u003e whose unknown is an entire \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e \\(y(x)\\), where the equation involves a combination of derivatives $y(x), y\u0026rsquo;(x), \u0026hellip;$.\u003c/p\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhodes_index/\"\u003eDifferential Equations Index\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eand \u003ca href=\"/posts/kbhuniqueness_and_existance/\"\u003eUniqueness and Existance\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdiffeq_intro/","tags":["Index"],"title":"Differential Equations"},{"categories":null,"contents":"Differential Equations. math53.stanford.edu.\nLogistics Prof. Rafe Mazzeo\nTAs Rodrigo Angelo Zhenyuan Zhang Assignments Pre-lecture reading + questionnaire PSets: Wed 9A 2 Midterms + 1 Final: wk 4 + wk 7, Thurs Evening; Tuesday 12:15 Review it suffices to study First Order ODEs because we can convert all higher order functions into a First Order ODEs homogeneous linear systems \\(y\u0026rsquo;=Ay\\) can be solved using eigenvalue, matrix exponentiation, etc. (recall that special cases exists where repeated eigenvalues, etc.) inhomogeneous systems \\(y\u0026rsquo; = Ay +f(t)\\) can be solved using intergrating factor or variation of parameters method general analysis of non-linear \\(y\u0026rsquo;=f(y)\\): we can talk about stationary solutions (1. linearize each \\(y_0\\) stationary solutions to figure local behavior 2. away from stationary solutions, use Lyapunov Functions to discuss), or liapenov functions for variable-coefficient ODEs, we decry sadness and Solving ODEs via power series Content Ordinary Differential Equations Partial Differential Equations What we want to understand:\nqualitative behaviors and values writing it as an elementary function is lame Linear ODEs SU-MATH53 JAN082024 SU-MATH53 JAN102024 SU-MATH53 JAN122024 SU-MATH53 JAN172024 SU-MATH53 JAN192024 SU-MATH53 JAN222024 Linear Second Order ODEs (and how to first-order them) SU-MATH53 JAN262024 SU-MATH53 JAN292024 SU-MATH53 FEB022024 SU-MATH53 FEB052024 Non-linear ODEs SU-MATH53 FEB072024 SU-MATH53 FEB092024 Linear Non-Constant Coefficient ODEs SU-MATH53 FEB122024 SU-MATH53 FEB142024 SU-MATH53 FEB162024 Fourier Series SU-MATH53 FEB212024 SU-MATH53 FEB232024 SU-MATH53 FEB252024 SU-MATH53 FEB282024 SU-MATH53 MAR042024 Fourier Transform SU-MATH53 MAR062024 SU-MATH53 MAR082024 SU-MATH53 MAR112024 Midterm Sheet SU-MATH53 Midterm Sheet Other Stuff Bessel\u0026rsquo;s Equation ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equations\u003c/a\u003e. math53.stanford.edu.\u003c/p\u003e\n\u003ch2 id=\"logistics\"\u003eLogistics\u003c/h2\u003e\n\u003cp\u003eProf. Rafe Mazzeo\u003c/p\u003e\n\u003ch3 id=\"tas\"\u003eTAs\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eRodrigo Angelo\u003c/li\u003e\n\u003cli\u003eZhenyuan Zhang\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"assignments\"\u003eAssignments\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ePre-lecture reading + questionnaire\u003c/li\u003e\n\u003cli\u003ePSets: Wed 9A\u003c/li\u003e\n\u003cli\u003e2 Midterms + 1 Final: wk 4 + wk 7, Thurs Evening; Tuesday 12:15\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"review\"\u003eReview\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eit suffices to study \u003ca href=\"/posts/kbhfirst_order_odes/\"\u003eFirst Order ODEs\u003c/a\u003e because we can convert all \u003ca href=\"/posts/kbhgeneric/#higher-order-functions\"\u003ehigher order functions\u003c/a\u003e into a \u003ca href=\"/posts/kbhfirst_order_odes/\"\u003eFirst Order ODEs\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ehomogeneous linear systems \\(y\u0026rsquo;=Ay\\) can be solved using \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e, \u003ca href=\"/posts/kbhmatrix_exponentiation/\"\u003ematrix exponentiation\u003c/a\u003e, etc. (recall that \u003cstrong\u003especial cases exists\u003c/strong\u003e where repeated eigenvalues, etc.)\u003c/li\u003e\n\u003cli\u003einhomogeneous systems \\(y\u0026rsquo; = Ay +f(t)\\) can be solved using \u003ca href=\"/posts/kbhlinear_non_seperable_equation/#solving-differential-equations\"\u003eintergrating factor\u003c/a\u003e or \u003ca href=\"/posts/kbhnon_homogeneous_linear_differential_equation/#variation-of-parameters-method\"\u003evariation of parameters method\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003egeneral analysis of non-linear \\(y\u0026rsquo;=f(y)\\): we can talk about stationary solutions (1. linearize each \\(y_0\\) stationary solutions to figure local behavior 2. away from stationary solutions, use \u003ca href=\"/posts/kbhnon_linear_ode/#monotone-function\"\u003eLyapunov Function\u003c/a\u003es to discuss), or liapenov functions\u003c/li\u003e\n\u003cli\u003efor variable-coefficient ODEs, we decry sadness and \u003ca href=\"/posts/kbhsu_math53_feb122024/#solving-odes-via\"\u003eSolving ODEs via power series\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"content\"\u003eContent\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhordinary_differential_equations/\"\u003eOrdinary Differential Equations\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpartial_differential_equations/\"\u003ePartial Differential Equations\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWhat we want to understand:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003equalitative behaviors and values\u003c/li\u003e\n\u003cli\u003ewriting it as an elementary function is lame\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"linear-odes\"\u003eLinear ODEs\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_jan082023/\"\u003eSU-MATH53 JAN082024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_jan102023/\"\u003eSU-MATH53 JAN102024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_jan122023/\"\u003eSU-MATH53 JAN122024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_jan172024/\"\u003eSU-MATH53 JAN172024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_jan192023/\"\u003eSU-MATH53 JAN192024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_jan202024/\"\u003eSU-MATH53 JAN222024\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"linear-second-order-odes--and-how-to-first-order-them\"\u003eLinear Second Order ODEs (and how to first-order them)\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_jan262023/\"\u003eSU-MATH53 JAN262024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_jan292024/\"\u003eSU-MATH53 JAN292024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb022024/\"\u003eSU-MATH53 FEB022024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb052024/\"\u003eSU-MATH53 FEB052024\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"non-linear-odes\"\u003eNon-linear ODEs\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb072024/\"\u003eSU-MATH53 FEB072024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb092024/\"\u003eSU-MATH53 FEB092024\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"linear-non-constant-coefficient-odes\"\u003eLinear Non-Constant Coefficient ODEs\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb122024/\"\u003eSU-MATH53 FEB122024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb142024/\"\u003eSU-MATH53 FEB142024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb162024/\"\u003eSU-MATH53 FEB162024\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"fourier-series\"\u003eFourier Series\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb212024/\"\u003eSU-MATH53 FEB212024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb232024/\"\u003eSU-MATH53 FEB232024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb252024/\"\u003eSU-MATH53 FEB252024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb282024/\"\u003eSU-MATH53 FEB282024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_mar042024/\"\u003eSU-MATH53 MAR042024\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"fourier-transform\"\u003eFourier Transform\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_mar062024/\"\u003eSU-MATH53 MAR062024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_mar082024/\"\u003eSU-MATH53 MAR082024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_mar112024/\"\u003eSU-MATH53 MAR112024\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"midterm-sheet\"\u003eMidterm Sheet\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_midterm_sheet/\"\u003eSU-MATH53 Midterm Sheet\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"other-stuff\"\u003eOther Stuff\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbessel_s_equation/\"\u003eBessel\u0026rsquo;s Equation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhodes_index/","tags":null,"title":"Differential Equations Index"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdifferential_privacy/","tags":null,"title":"Differential Privacy"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdiffusion_map/","tags":null,"title":"diffusion map"},{"categories":null,"contents":"What if we can use diffusion models to generate Laproscopic surgeries to train surgeons?\nProblem Asking dalle to just \u0026ldquo;generate a Laproscopic surgery\u0026rdquo; is not going to work. It will give you cartoons.\nApproach text problem formulation: \u0026ldquo;grasper grasp gallbladder\u0026rdquo; encode text into latents do diffusion with late fusion of latents Data: Cholec T-45\nWeighting Scoring: Perception Prioritized Weighting + Prioritization for Signal-to-Noise\n(Ho et al, 2020)\nText \u0026ldquo;[subject] [verb] [object] [surgical phase]\u0026rdquo;\n\u0026ldquo;grasper grasp gallbladder in preparation\u0026rdquo;\nModel Elucidated Imagen. Dall-E is very bad; Imagen-class models works better because (why?).\nAdded Value to Physicians using Generated Images Train a Classifier Rendevouz Network: train a discriminator for procedure based on data augmented with generated images; 5% improvement.\nMedical Expert Survey \u0026ldquo;yo mr doctor man can you spot which one of these are generated?\u0026rdquo;\n45% success rate.\n","html":"\u003cp\u003eWhat if we can use diffusion models to generate Laproscopic surgeries to train surgeons?\u003c/p\u003e\n\u003ch2 id=\"problem\"\u003eProblem\u003c/h2\u003e\n\u003cp\u003eAsking dalle to just \u0026ldquo;generate a Laproscopic surgery\u0026rdquo; is not going to work. It will give you cartoons.\u003c/p\u003e\n\u003ch2 id=\"approach\"\u003eApproach\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003etext problem formulation\u003c/strong\u003e: \u0026ldquo;grasper grasp gallbladder\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eencode text into latents\u003c/li\u003e\n\u003cli\u003edo diffusion with late fusion of latents\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eData: Cholec T-45\u003c/p\u003e\n\u003ch3 id=\"weighting\"\u003eWeighting\u003c/h3\u003e\n\u003cp\u003eScoring: Perception Prioritized Weighting + Prioritization for Signal-to-Noise\u003c/p\u003e\n\u003cp\u003e(Ho et al, 2020)\u003c/p\u003e\n\u003ch3 id=\"text\"\u003eText\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;[subject] [verb] [object] [surgical phase]\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;grasper grasp gallbladder in preparation\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"model\"\u003eModel\u003c/h3\u003e\n\u003cp\u003eElucidated Imagen. Dall-E is very bad; Imagen-class models works better because (why?).\u003c/p\u003e\n\u003ch2 id=\"added-value-to-physicians-using-generated-images\"\u003eAdded Value to Physicians using Generated Images\u003c/h2\u003e\n\u003ch3 id=\"train-a-classifier\"\u003eTrain a Classifier\u003c/h3\u003e\n\u003cp\u003eRendevouz Network: train a discriminator for procedure based on data augmented with generated images; 5% improvement.\u003c/p\u003e\n\u003ch3 id=\"medical-expert-survey\"\u003eMedical Expert Survey\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;yo mr doctor man can you spot which one of these are generated?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e45% success rate.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdiffusion_models_for_laproscopic_surgeries/","tags":null,"title":"Diffusion Models for Laproscopic Surgeries"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdigital_origin_for_life/","tags":null,"title":"Digital Origin for Life"},{"categories":null,"contents":"The dimension of a vector space is the length of any basis in the vector space. It is denoted as \\(\\dim V\\).\nadditional information See also finite-dimensional vector space and infinite-demensional vector space\ndimension of subspace is smaller or equal to that of its parent If we have a finite-dimensional \\(V\\) and a subspace thereof \\(U\\), then \\(\\dim U \\leq \\dim V\\).\nFirstly, the every subspace of a finite-dimensional vector space is a finite-dimensional vector space is itself a finite-dimensional vector space. Therefore, it has a finite dimension.\nThen, we will simply think of the basis of \\(U\\) as an linearly independent list in \\(V\\); and of course, the basis of \\(V\\) spans \\(V\\). As length of linearly-independent list \\(\\leq\\) length of spanning list, we have that length of basis of \\(U \\leq\\) length of basis of \\(V\\).\nThis makes \\(\\dim U \\leq \\dim V\\), as desired. \\(\\blacksquare\\)\nlists of right length are a basis These are two results that tell us if you are given a list of list of right length, one condition (spanning or linear independence) can tell you that they are a basis. It\u0026rsquo;s also known (as a John McHugh special:tm:) as the Half Is Good Enough theorems.\nlinearly independent list of length dim V are a basis of V Begin with an linearly independent list in \\(V\\) of length \\(\\dim V\\). We aim to extend this list into a basis of \\(V\\).\nAs we know all basis in \\(V\\) must have length \\(\\dim V\\), and the list is already length \\(\\dim V\\), no extension is needed to form a basis.\nAs every linearly independent list expends to a basis, we conclude that the list is already a basis of \\(V\\), as desired \\(\\blacksquare\\).\nspanning list of length of dim V are a basis of V Begin with a spanning list in \\(V\\) of length \\(\\dim V\\). We aim to reduce this list into a basis of \\(V\\).\nAs we know all basis in \\(V\\) must have length \\(\\dim V\\), and the list is already length \\(\\dim V\\), no reduction is needed to form a basis.\nAs all spanning lists contains a basis of which you are spanning, we conclude that the list is a basis of \\(V\\), as desired \\(\\blacksquare\\).\ndimension of sums See dimension of sums\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e is the length of any \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in the \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e. It is denoted as \\(\\dim V\\).\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e and \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003einfinite-demensional vector space\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"dimension-of-subspace-is-smaller-or-equal-to-that-of-its-parent\"\u003edimension of subspace is smaller or equal to that of its parent\u003c/h3\u003e\n\u003cp\u003eIf we have a \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e \\(V\\) and a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e thereof \\(U\\), then \\(\\dim U \\leq \\dim V\\).\u003c/p\u003e\n\u003cp\u003eFirstly, the \u003ca href=\"/posts/kbhsubspace/#finite-dimensional-subspaces\"\u003eevery subspace of a finite-dimensional vector space is a finite-dimensional vector space\u003c/a\u003e is itself a \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e. Therefore, it has a finite dimension.\u003c/p\u003e\n\u003cp\u003eThen, we will simply think of the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U\\) as an \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(V\\); and of course, the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\) \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\). As \u003ca href=\"/posts/kbhlinear_independence/#length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003elength of linearly-independent list \\(\\leq\\) length of spanning list\u003c/a\u003e, we have that length of \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U \\leq\\) length of \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\).\u003c/p\u003e\n\u003cp\u003eThis makes \\(\\dim U \\leq \\dim V\\), as desired. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"lists-of-right-length-are-a-basis--kbhbasis-dot-md\"\u003elists of right length are a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eThese are two results that tell us if you are given a list of \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e of right length, one condition (\u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e or \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e) can tell you that they are a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e. It\u0026rsquo;s also known (as a John McHugh special:tm:) as the Half Is Good Enough theorems.\u003c/p\u003e\n\u003ch4 id=\"linearly-independent-list-of-length-dim-v-are-a-basis-of-v\"\u003elinearly independent list of length dim V are a basis of V\u003c/h4\u003e\n\u003cp\u003eBegin with an \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(V\\) of length \\(\\dim V\\). We aim to extend this list into a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\).\u003c/p\u003e\n\u003cp\u003eAs we know all \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in \\(V\\) must have length \\(\\dim V\\), and the list is already length \\(\\dim V\\), no extension is needed to form a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eAs \u003ca href=\"/posts/kbhbasis/#a-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent-list-expends-to-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003eevery linearly independent list expends to a basis\u003c/a\u003e, we conclude that the list is already a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\), as desired \\(\\blacksquare\\).\u003c/p\u003e\n\u003ch4 id=\"spanning-list-of-length-of-dim-v-are-a-basis-of-v\"\u003espanning list of length of dim V are a basis of V\u003c/h4\u003e\n\u003cp\u003eBegin with a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list in \\(V\\) of length \\(\\dim V\\). We aim to reduce this list into a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\).\u003c/p\u003e\n\u003cp\u003eAs we know all \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in \\(V\\) must have length \\(\\dim V\\), and the list is already length \\(\\dim V\\), no reduction is needed to form a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eAs \u003ca href=\"/posts/kbhbasis/#all-id-e8109222-5548-4d08-b6df-8f933f2dbb36-spanning-lists-contains-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis-of-which-you-are-id-e8109222-5548-4d08-b6df-8f933f2dbb36-spanning\"\u003eall spanning lists contains a basis of which you are spanning\u003c/a\u003e, we conclude that the list is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\), as desired \\(\\blacksquare\\).\u003c/p\u003e\n\u003ch3 id=\"dimension--kbhdimension-dot-md--of-sums\"\u003e\u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of sums\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhsum_of_subsets/#id-07b04334-5ae7-457c-bc3e-92feed8fc2cc-dimension-of-sums\"\u003edimension of sums\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdimension/","tags":null,"title":"dimension"},{"categories":null,"contents":"Direct Sampling is the act in probability to sample what you want from the distribution. This is often used when actual inference impossible. It involves. well. sampling from the distribution to compute a conditional probability that you want.\nIt basically involves invoking the Frequentist Definition of Probability without letting \\(n \\to \\infty\\), instead just sampling some \\(n \u0026lt; \\infty\\) and dividing the event space by your sample space.\nSo, for instance, to compute inference on \\(b^{1}\\) given observations \\(d^{1}c^{1}\\), we can write:\n\\begin{equation} P(b^{1} | d^{1}, c^{1}) = \\frac{P(b^{1}, d^{1}, c^{1})}{P(d^{1})P(c^{1})} \\approx \\frac{\\sum_{i}^{} b^{i} = 1 \\land d^{i} = i \\land c^{i} = 1}{\\sum_{i}^{} d^{i} =1 \\land c^{i} = 1} \\end{equation}\nwhere \\(a^{i}\\) is the \\(i\\) th sample.\nDirect Sampling a Baysian Network We first obtain a topological sort of the system. For a graph with \\(n\\) nodes, we then obtain a list \\(X_{1:n}\\).\nWe can then obtain a Direct Sampling via simply sampling from this list. Whenever we need to sample some kind of conditional probability, we know that for every \\(k_{i}\\) we need to sample from, its parent conditions would have already been sampled because we are sampling in order of a topological sort so we can just sample the values from a subset of the conditioned set.\nLikelihood Weighted Sampling Likelihood Weighted Sampling is a change to the Direct Sampling approach which deals with the fact that Direct Sampling may oversample conditional probabilities as it is sampling sub-nodes an equal amount.\nIt is particularly useful when our priors are unlikely.\nTo do this, we first perform Direct Sampling as how you would normally. Now, say we get \\(D=1\\), \\(C=1\\), \\(E=1\\) for the Baysian Network presented below, the actual value we return would be whatever \\(P(D|E) P(C|E)\\).\nSee an example here.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e is the act in \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e to sample what you want from the distribution. This is often used when actual \u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e impossible. It involves. well. sampling from the distribution to compute a \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e that you want.\u003c/p\u003e\n\u003cp\u003eIt basically involves invoking the \u003ca href=\"/posts/kbhprobability/#frequentist-definition-of-probability\"\u003eFrequentist Definition of Probability\u003c/a\u003e without letting \\(n \\to \\infty\\), instead just sampling some \\(n \u0026lt; \\infty\\) and dividing the event space by your \u003ca href=\"/posts/kbhsample_space/\"\u003esample space\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSo, for instance, to compute \u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e on \\(b^{1}\\) given observations \\(d^{1}c^{1}\\), we can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(b^{1} | d^{1}, c^{1}) = \\frac{P(b^{1}, d^{1}, c^{1})}{P(d^{1})P(c^{1})} \\approx \\frac{\\sum_{i}^{} b^{i} = 1 \\land d^{i} = i \\land c^{i} = 1}{\\sum_{i}^{} d^{i} =1 \\land c^{i} = 1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(a^{i}\\) is the \\(i\\) th sample.\u003c/p\u003e\n\u003ch2 id=\"direct-sampling-a-baysian-network--kbhbaysian-network-dot-md\"\u003eDirect Sampling a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eWe first obtain a \u003ca href=\"/posts/kbhtopological_sort/\"\u003etopological sort\u003c/a\u003e of the system. For a graph with \\(n\\) nodes, we then obtain a list \\(X_{1:n}\\).\u003c/p\u003e\n\u003cp\u003eWe can then obtain a \u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e via simply sampling from this list. Whenever we need to sample some kind of \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e, we know that for every \\(k_{i}\\) we need to sample from, its parent conditions would have already been sampled because we are sampling in order of a \u003ca href=\"/posts/kbhtopological_sort/\"\u003etopological sort\u003c/a\u003e so we can just sample the values from a subset of the conditioned set.\u003c/p\u003e\n\u003ch2 id=\"likelihood-weighted-sampling\"\u003eLikelihood Weighted Sampling\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#likelihood-weighted-sampling\"\u003eLikelihood Weighted Sampling\u003c/a\u003e is a change to the \u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e approach which deals with the fact that \u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e may oversample \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probabilities\u003c/a\u003e as it is sampling sub-nodes an equal amount.\u003c/p\u003e\n\u003cp\u003eIt is particularly useful when our priors are unlikely.\u003c/p\u003e\n\u003cp\u003eTo do this, we first perform \u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e as how you would normally. Now, say we get \\(D=1\\), \\(C=1\\), \\(E=1\\) for the \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e presented below, the actual value we return would be whatever \\(P(D|E) P(C|E)\\).\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-09-28_10-20-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSee \u003ca href=\"/posts/kbhapproximate_inference/#example\"\u003ean example here\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdirect_sampling/","tags":null,"title":"Direct Sampling"},{"categories":null,"contents":"A direct sum is a sum of subspaces (not just subsets!!) where there\u0026rsquo;s only one way to represent each element.\nconstituents subspaces of \\(V\\) named \\(U_1, \\dots, U_{m}\\)\nrequirements The sum of subsets of \\(U_1+\\dots+U_{m}\\) is called a direct sum IFF:\neach element in \\(U_1+\\dots +U_{m}\\) can only be written in one way as a sum \\(u_1 +\\dots +u_{m}\\) (as in, they are linearly independent?)\nWe use \\(\\oplus\\) to represent direct sum.\nadditional information why is it called a direct sum? Something is not a direct sum if any of its components can be described using the others. Its kind of line linear independence but! on entire spaces.\na sum of subsets is a direct sum IFF there is only one way to write \\(0\\) Given \\(U_1, \\dots, U_{m}\\) are subspaces of \\(V\\), then \\(U_1+\\dots +U_{m}\\) is a direct sum IFF the only way to write \\(0\\) as a sum \\(u_1 +\\dots +u_{m}\\) is by taking each element to \\(0\\).\nProof:\nif\u0026mdash; If some \\(U_1 + \\dots +U_{m}\\) is a direct sum, definitionally there is only one way to write \\(0\\). And you can always write \\(0\\) by taking all the constituents to \\(0\\) as they are subspaces, so the additive identity exists.\nonly if\u0026mdash; We are given that there is only one way to write \\(0\\), that:\n\\begin{equation} 0 = u_1+ u_2+ \\dots+ u_{m}: u_j \\in U_{j} \\end{equation}\nas \\(U_{j}\\) are all subspaces, and the additive identity exists, we can say that \\(u_1=u_2=\\dots =0\\).\nAssume for the sake of contradiction that \\(U_1 + \\dots +U_{m}\\) is not a direct sum. Therefore:\n\\begin{equation} \\exists\\ v_1 = u_1+u_2+\\dots + u_{m}: u_{j} \\in U_{j} \\end{equation}\nand\n\\begin{equation} \\exists\\ v_1 = w_1+w_2+\\dots + w_{m}: w_{j} \\in U_{j} \\end{equation}\n\u0026ldquo;there are two unique representations of a vector given the sum of subsets\u0026rdquo;\nSubtracting these representations, then:\n\\begin{equation} (v_1-v_1) = (u_1-w_1) + \\dots +(u_{m}-w_{m}): u_{j}, w_{j} \\in U_{j} \\end{equation}\nFinally, then:\n\\begin{equation} 0 = (u_1-w_1) + \\dots +(u_{m}-w_{m}): u_{j}, w_{j} \\in U_{j} \\end{equation}\nWe have established that each slot that makes up this particular sum \\(=0\\). Therefore, \\(u_{i}-w_{i} = 0\\). This means $ui=wi$\u0026mdash;there are no two unique representations of \\(v_{1}\\). Reaching contradiction. \\(\\blacksquare\\)\na sum of subsets is only a direct sum IFF their intersection is set containing \\(0\\) Take \\(U\\) and \\(W\\), two subspaces of \\(V\\). \\(U+V\\) is a direct sum IFF \\(U \\cap W = \\{0\\}\\).\nProof:\nif\u0026mdash; Suppose \\(U+V\\) is a direct sum. \\(\\forall v \\in U \\cap V\\), as \\(v\\) is equal to itself, we have that:\n\\begin{equation} 0 = v+(-v) \\end{equation}\nwhere, \\(v\\) is in \\(U\\) and \\(-v\\) is in \\(V\\) (as both \\(U\\) and \\(V\\) are vector spaces, both would contain \\(-1v=-v\\) as we are given \\(v \\in U \\cap V\\) and scalar multiplication is closed on both.)\nBy the unique representation in the definition of direct sums, you have only one way to construct this expression: namely, that \\(v=0\\) as both are vector spaces so the additive identity exists on both.\nHence:\n\\begin{equation} \\{0\\} = U \\cap V \\end{equation}\nonly if\u0026mdash; Suppose \\(U \\cap W = \\{0\\}\\). Take also \\(u \\in U\\) and \\(w \\in W\\); we can construct an expression:\n\\begin{equation} u + w = 0 \\end{equation}\nIf we can show that there is only one unique combination of \\(u\\) and \\(w\\) to write \\(0\\), we satisfy the previous proof and therefore \\(U+W\\) is a direct sum.\nThe expression above implies that \\(w\\) is the additive inverse of \\(u\\); therefore; \\(u = -w\\). As both \\(U\\) and \\(W\\) are vector spaces, their elements all have inverses. As \\(u\\) is the inverse of \\(w\\), and given the definition of sum of subsets that \\(u \\in U\\) and \\(w \\in W\\), \\(u\\) and \\(w\\) are both in both \\(U\\) and \\(W\\).\nAs the intersection of \\(U\\) and \\(V\\) is \\(0\\), \\(u=w=0\\). Therefore, there is only one unique representation of \\(0\\), namely with \\(u=0,w=0\\), making \\(U+W\\) a direct sum. \\(\\blacksquare\\)\ndirect sum proofs are not pairwise! Those two proofs above only deal with pairs of sum of subsets. If you have multiple subsets, they don\u0026rsquo;t apply!\nevery subspace of \\(V\\) is a part of a direct sum equaling to \\(V\\) For every subspace \\(U\\) of a finite-dimensional \\(V\\), there is a subspace \\(W\\) of \\(V\\) for which \\(V = U \\oplus W\\).\nBecause \\(V\\) is defined to be finite-dimensional, and the fact that a finite-dimensional subspace is finite-dimensional, \\(U\\) is finite-dimensional.\nTherefore, because every finite-dimensional vector space has a basis, \\(U\\) has a basis \\(u_1, \\dots u_{m}\\).\nBecause bases are linearly independent, and \\(U \\subset V\\), \\(u_1, \\dots u_{m}\\) is a linearly independent list in \\(V\\).\nBecause a linearly independent list expends to a basis, we can construct \\(u_1, \\dots u_{m}, w_{1}, \\dots w_{n}\\) as the basis of \\(V\\). We will construct a \\(W = span(w_1, \\dots w_{n})\\) \u0026mdash; the space formed as the span of the \u0026ldquo;extension\u0026rdquo; vectors to make the basis in \\(V\\).\nBecause the list \\(u_{j}\\dots w_{k}\\) we made is a basis in \\(V\\), \\(U+W=V\\).\nYou can see this because every element \\(v \\in V\\) can be constructed with a linear combination \\(u_1, \\dots u_{m}, w_{1}, \\dots w_{n}\\) (again, because this list shown to be a basis of \\(V\\) therefore it spans \\(V\\).) Then, to show that \\(U+W=V\\), we can collapse \\(a_{1}u_1\\dots + a_{m}u_{m}=u \\in U\\), and \\(c_{1}w_1 \\dots +c_{m}w_{m} = w \\in W\\). Hence, every element \\(v \\in V\\) can be constructed by some \\(u \\in U + w \\in W\\), making \\(U+W=V\\).\nNow, we have to show that the combination is a direct sum. There is a few ways of going about this, the one presented by Axler is leveraging the fact that a sum of subsets is only a direct sum IFF their intersection is set containing \\(0\\)\u0026mdash;that \\(U \\cap W = \\{0\\}\\).\nGiven some element \\(v\\) that lives in the intersection between \\(U\\) and \\(W\\), it must be formed as a linear combination of two linearly independent lists (as \\(u_j, \\dots w_{j}\\) is a basis, they are linearly independent.)\nIntuition: if an non-zero element lives in the intersection between two linearly independent lists which together is still linearly independent, it must be able to be written by a linear combination of other elements of that linearly independent list to live in the intersection of the two lists\u0026mdash;which is absurd (violates the definition of linearly dependent). The only element for which this is an exception is \\(0\\).\nActual proof:\nsuppose \\(v \\in U \\cap W\\), so \\(v = a_1u_1\\dots +a_{m}v_{m}\\) as well as \\(v=b_1w_{1} + \\dots b_{n}w_{n}\\). Subtracting the two lists results in:\n\\begin{equation} 0 = a_1u_1+ \\dots a_{m} u_{m} - b_1w_1+ \\dots +b_{n}w_{n} \\end{equation}\nhaving already declared this list linearly independent, we see that each scalar \\(a_1, \\dots -b_{n}\\) must equal to \\(0\\) for this expression. Therefore, the intersection \\(v\\) must be \\(\\{0\\}\\) as \\(0u_1 + \\dots +0u_{m}=0\\).\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e is a sum of \u003cstrong\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es\u003c/strong\u003e\u003c/strong\u003e (not just subsets!!) where there\u0026rsquo;s only one way to represent each element.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es of \\(V\\) named \\(U_1, \\dots, U_{m}\\)\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e of \\(U_1+\\dots+U_{m}\\) is called a \u003cem\u003edirect sum\u003c/em\u003e \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003eeach element in \\(U_1+\\dots +U_{m}\\) can only be written in one way as a sum \\(u_1 +\\dots +u_{m}\\) (as in, they are linearly independent?)\u003c/p\u003e\n\u003cp\u003eWe use \\(\\oplus\\) to represent \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"why-is-it-called-a-direct-sum--kbhdirect-sum-dot-md\"\u003ewhy is it \u003cem\u003ecalled\u003c/em\u003e a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum?\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSomething is \u003cem\u003enot\u003c/em\u003e a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e if any of its components can be described using the others. Its kind of line linear independence but! on entire spaces.\u003c/p\u003e\n\u003ch3 id=\"a-sum-of-subsets--kbhsum-of-subsets-dot-md--is-a-direct-sum--kbhdirect-sum-dot-md--iff--kbhequivalence-dot-md--there-is-only-one-way-to-write-0\"\u003ea \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e there is only one way to write \\(0\\)\u003c/h3\u003e\n\u003cp\u003eGiven \\(U_1, \\dots, U_{m}\\) are subspaces of \\(V\\), then \\(U_1+\\dots +U_{m}\\) is a direct sum IFF the only way to write \\(0\\) as a sum \\(u_1 +\\dots +u_{m}\\) is by taking each element to \\(0\\).\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eif\u0026mdash;\nIf some \\(U_1 + \\dots +U_{m}\\) is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e, definitionally there is only one way to write \\(0\\). And you can always write \\(0\\) by taking all the constituents to \\(0\\) as they are \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es, so the \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e exists.\u003c/p\u003e\n\u003cp\u003eonly if\u0026mdash;\nWe are given that there is only one way to write \\(0\\), that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = u_1+ u_2+ \\dots+ u_{m}: u_j \\in U_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas \\(U_{j}\\) are all subspaces, and the \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e exists, we can say that \\(u_1=u_2=\\dots =0\\).\u003c/p\u003e\n\u003cp\u003eAssume for the sake of contradiction that \\(U_1 + \\dots +U_{m}\\) is not a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e. Therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\exists\\ v_1 = u_1+u_2+\\dots + u_{m}: u_{j} \\in U_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\exists\\ v_1 = w_1+w_2+\\dots + w_{m}: w_{j} \\in U_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;there are two unique representations of a \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e given the \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eSubtracting these representations, then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(v_1-v_1) = (u_1-w_1) + \\dots +(u_{m}-w_{m}): u_{j}, w_{j} \\in U_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = (u_1-w_1) + \\dots +(u_{m}-w_{m}): u_{j}, w_{j} \\in U_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have established that each slot that makes up this particular sum \\(=0\\). Therefore, \\(u_{i}-w_{i} = 0\\). This means $u\u003csub\u003ei\u003c/sub\u003e=w\u003csub\u003ei\u003c/sub\u003e$\u0026mdash;there are no two unique representations of \\(v_{1}\\). Reaching contradiction. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"a-sum-of-subsets--kbhsum-of-subsets-dot-md--is-only-a-direct-sum--kbhdirect-sum-dot-md--iff--kbhequivalence-dot-md--their-intersection-is-set-containing-0\"\u003ea \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e is only a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e their intersection is set containing \\(0\\)\u003c/h3\u003e\n\u003cp\u003eTake \\(U\\) and \\(W\\), two \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es of \\(V\\). \\(U+V\\) is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e \\(U \\cap W = \\{0\\}\\).\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eif\u0026mdash;\nSuppose \\(U+V\\) is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e. \\(\\forall v \\in U \\cap V\\), as \\(v\\) is equal to itself, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = v+(-v)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(v\\) is in \\(U\\) and \\(-v\\) is in \\(V\\) (as both \\(U\\) and \\(V\\) are vector spaces, both would contain \\(-1v=-v\\) as we are given \\(v \\in U \\cap V\\) and \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e is closed on both.)\u003c/p\u003e\n\u003cp\u003eBy the unique representation in the definition of \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003es, you have only one way to construct this expression: namely, that \\(v=0\\) as both are \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es so the \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e exists on both.\u003c/p\u003e\n\u003cp\u003eHence:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\{0\\} = U \\cap V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eonly if\u0026mdash;\nSuppose \\(U \\cap W = \\{0\\}\\). Take also \\(u \\in U\\) and \\(w \\in W\\); we can construct an expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu + w = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf we can show that there is only one unique combination of \\(u\\) and \\(w\\) to write \\(0\\), we satisfy the previous proof and therefore \\(U+W\\) is a direct sum.\u003c/p\u003e\n\u003cp\u003eThe expression above implies that \\(w\\) is the \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e of \\(u\\); therefore; \\(u = -w\\). As both \\(U\\) and \\(W\\) are \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es, their elements all have \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003es. As \\(u\\) is the inverse of \\(w\\), and given the definition of \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e that \\(u \\in U\\) and \\(w \\in W\\), \\(u\\) and \\(w\\) are both in both \\(U\\) and \\(W\\).\u003c/p\u003e\n\u003cp\u003eAs the intersection of \\(U\\) and \\(V\\) is \\(0\\), \\(u=w=0\\). Therefore, there is only one unique representation of \\(0\\), namely with \\(u=0,w=0\\), making \\(U+W\\) a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"direct-sum--kbhdirect-sum-dot-md--proofs-are-not-pairwise\"\u003e\u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e proofs are not pairwise!\u003c/h3\u003e\n\u003cp\u003eThose two proofs above only deal with pairs of \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e. If you have multiple subsets, they don\u0026rsquo;t apply!\u003c/p\u003e\n\u003ch3 id=\"every-subspace--kbhsubspace-dot-md--of-v-is-a-part-of-a-direct-sum--kbhdirect-sum-dot-md--equaling-to-v\"\u003eevery \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of \\(V\\) is a part of a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e equaling to \\(V\\)\u003c/h3\u003e\n\u003cp\u003eFor every \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e \\(U\\) of a \u003cstrong\u003e\u003cstrong\u003efinite-dimensional\u003c/strong\u003e\u003c/strong\u003e \\(V\\), there is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e \\(W\\) of \\(V\\) for which \\(V = U \\oplus W\\).\u003c/p\u003e\n\u003cp\u003eBecause \\(V\\) is defined to be \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e, and the fact that a \u003ca href=\"/posts/kbhsubspace/#finite-dimensional-subspaces\"\u003efinite-dimensional subspace\u003c/a\u003e is \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e, \\(U\\) is \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eTherefore, because \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/#every-id-4ed27ed5-4edc-4ef4-afd7-9b8e3bcd9b96-finite-dimensional-vector-space-has-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003eevery finite-dimensional vector space has a basis\u003c/a\u003e, \\(U\\) has a basis \\(u_1, \\dots u_{m}\\).\u003c/p\u003e\n\u003cp\u003eBecause \u003ca href=\"/posts/kbhbasis/\"\u003ebases\u003c/a\u003e are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e, and \\(U \\subset V\\), \\(u_1, \\dots u_{m}\\) is a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(V\\).\u003c/p\u003e\n\u003cp\u003eBecause \u003ca href=\"/posts/kbhbasis/#a-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent-list-expends-to-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003ea linearly independent list expends to a basis\u003c/a\u003e, we can construct \\(u_1, \\dots u_{m}, w_{1}, \\dots w_{n}\\) as the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\). We will construct a \\(W = span(w_1, \\dots w_{n})\\) \u0026mdash; the space formed as the \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e of the \u0026ldquo;extension\u0026rdquo; vectors to make the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in \\(V\\).\u003c/p\u003e\n\u003cp\u003eBecause the list \\(u_{j}\\dots w_{k}\\) we made is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in \\(V\\), \\(U+W=V\\).\u003c/p\u003e\n\u003cp\u003eYou can see this because every element \\(v \\in V\\) can be constructed with a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e \\(u_1, \\dots u_{m}, w_{1}, \\dots w_{n}\\) (again, because this list shown to be a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\) therefore it \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003es \\(V\\).) Then, to show that \\(U+W=V\\), we can collapse \\(a_{1}u_1\\dots + a_{m}u_{m}=u \\in U\\), and \\(c_{1}w_1 \\dots +c_{m}w_{m} = w \\in W\\). Hence, every element \\(v \\in V\\) can be constructed by some \\(u \\in U + w \\in W\\), making \\(U+W=V\\).\u003c/p\u003e\n\u003cp\u003eNow, we have to show that the combination is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e. There is a few ways of going about this, the one presented by \u003ca href=\"/posts/kbhlinear_algebra_index/\"\u003eAxler\u003c/a\u003e is leveraging the fact that \u003ca href=\"#a-sum-of-subsets--kbhsum-of-subsets-dot-md--is-only-a-direct-sum--kbhdirect-sum-dot-md--iff--kbhequivalence-dot-md--their-intersection-is-set-containing-0\"\u003ea sum of subsets is only a direct sum IFF their intersection is set containing \\(0\\)\u003c/a\u003e\u0026mdash;that \\(U \\cap W = \\{0\\}\\).\u003c/p\u003e\n\u003cp\u003eGiven some element \\(v\\) that lives in the intersection between \\(U\\) and \\(W\\), it must be formed as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of two \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e lists (as \\(u_j, \\dots w_{j}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, they are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e.)\u003c/p\u003e\n\u003cp\u003eIntuition: if an non-zero element lives in the intersection between two \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e lists which together is still \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e, it must be able to be written by a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of other elements of that \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list to live in the intersection of the two lists\u0026mdash;which is absurd (violates the definition of \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e). The only element for which this is an exception is \\(0\\).\u003c/p\u003e\n\u003cp\u003eActual proof:\u003c/p\u003e\n\u003cp\u003esuppose \\(v \\in U \\cap W\\), so \\(v = a_1u_1\\dots +a_{m}v_{m}\\) as well as \\(v=b_1w_{1} + \\dots b_{n}w_{n}\\). Subtracting the two lists results in:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = a_1u_1+ \\dots a_{m} u_{m} - b_1w_1+ \\dots +b_{n}w_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ehaving already declared this list \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e, we see that each scalar \\(a_1, \\dots -b_{n}\\) must equal to \\(0\\) for this expression. Therefore, the intersection \\(v\\) must be \\(\\{0\\}\\) as \\(0u_1 + \\dots +0u_{m}=0\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdirect_sum/","tags":null,"title":"direct sum"},{"categories":null,"contents":"Directed Evolution is a process of recreating Darwinian processes in a lab setting\nmutation: make select mutation selection: selection specific changes replication: make more of it Examples: PACE\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdirected_evolution/\"\u003eDirected Evolution\u003c/a\u003e is a process of recreating Darwinian processes in a lab setting\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003emutation: make select mutation\u003c/li\u003e\n\u003cli\u003eselection: selection specific changes\u003c/li\u003e\n\u003cli\u003ereplication: make more of it\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eExamples: \u003ca href=\"/posts/kbhpace/\"\u003ePACE\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdirected_evolution/","tags":null,"title":"Directed Evolution"},{"categories":null,"contents":"Softmax Method Pull arm \\(a\\) with probability \\(\\propto \\exp (\\lambda \\rho_{a})\\), where \\(\\lambda \\geq 0\\) is the \u0026ldquo;precision parameter\u0026rdquo;.\nWhen \\(\\lambda \\to 0\\), this system uses the same rate for each of the actions, so you are essentially randomly sampling; when \\(\\lambda \\to \\infty\\), the system will use only the greedy action because only the element with the biggest \\(\\rho_{a}\\) gets selected.\nFor a multi-state case:\n\\begin{equation} \\propto \\exp (\\lambda Q(s,a)) \\end{equation}\nQuantile Exploration Choose the arm with the largest \\(\\theta\\) at the highest \\(\\alpha\\) percentile of its beta distribution, pull that arm, update priors\n\u0026ldquo;choose the arm with the highest \\(\\theta\\) for the \\(90\\%\\) percentile, then update the distribution\u0026rdquo;\nUCB 1 Inspired by monte-carlo exploration\ntake action \\(a\\) such that\n\\begin{equation} \\max_{a} \\rho_{a} + c \\sqrt{ \\frac{\\log N}{N(a)}} \\end{equation}\nwhere, \\(c\\) is the exploration factor, \\(N\\) is the total number of trials, \\(N(a)\\) is the number of trials for \\(a\\) we have done.\nThis value is considered the \u0026ldquo;upper confidence bound\u0026rdquo;; hence \u0026ldquo;UCB\u0026rdquo;\nPosterior Sampling Same one point from each Beta Distribution for each of your slot machines; then you pick the result that is the highest.\nDoes not require any parameter.\nThis is proven to do some over-exploration. But that\u0026rsquo;s (mostly) just fine.\nR-Max See R-Max\n","html":"\u003ch2 id=\"softmax-method\"\u003eSoftmax Method\u003c/h2\u003e\n\u003cp\u003ePull arm \\(a\\) with probability \\(\\propto \\exp (\\lambda \\rho_{a})\\), where \\(\\lambda \\geq 0\\) is the \u0026ldquo;precision parameter\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eWhen \\(\\lambda \\to 0\\), this system uses the same rate for each of the actions, so you are essentially randomly sampling; when \\(\\lambda \\to \\infty\\), the system will use only the \u003ca href=\"/posts/kbhexploration_and_exploitation/#bayesian-model-estimation\"\u003egreedy action\u003c/a\u003e because only the element with the biggest \\(\\rho_{a}\\) gets selected.\u003c/p\u003e\n\u003cp\u003eFor a multi-state case:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\propto \\exp (\\lambda Q(s,a))\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"quantile-exploration\"\u003eQuantile Exploration\u003c/h2\u003e\n\u003cp\u003eChoose the arm with the largest \\(\\theta\\) at the highest \\(\\alpha\\) percentile of its beta distribution, pull that arm, update priors\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;choose the arm with the highest \\(\\theta\\) for the \\(90\\%\\) percentile, then update the distribution\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"ucb-1\"\u003eUCB 1\u003c/h2\u003e\n\u003cp\u003eInspired by \u003ca href=\"/posts/kbhmonte_carlo_tree_search/#monte-carlo-exploration\"\u003emonte-carlo exploration\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003etake action \\(a\\) such that\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\max_{a} \\rho_{a} + c \\sqrt{ \\frac{\\log N}{N(a)}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(c\\) is the exploration factor, \\(N\\) is the total number of trials, \\(N(a)\\) is the number of trials for \\(a\\) we have done.\u003c/p\u003e\n\u003cp\u003eThis value is considered the \u0026ldquo;upper confidence bound\u0026rdquo;; hence \u0026ldquo;UCB\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"posterior-sampling\"\u003ePosterior Sampling\u003c/h2\u003e\n\u003cp\u003eSame one point from each \u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e for each of your slot machines; then you pick the result that is the highest.\u003c/p\u003e\n\u003cp\u003eDoes not require any parameter.\u003c/p\u003e\n\u003cp\u003eThis is proven to do some over-exploration. But that\u0026rsquo;s (mostly) just fine.\u003c/p\u003e\n\u003ch2 id=\"r-max\"\u003eR-Max\u003c/h2\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhmodel_based_reinforcement_learning/#r-max\"\u003eR-Max\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdirected_exploration/","tags":null,"title":"Directed Exploration"},{"categories":null,"contents":"discourse features are marks of fluency/etc. which mark one\u0026rsquo;s speech.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdiscourse_features/\"\u003ediscourse features\u003c/a\u003e are marks of fluency/etc. which mark one\u0026rsquo;s speech.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdiscourse_features/","tags":null,"title":"discourse features"},{"categories":null,"contents":"A Discourse-Completion Task is a tool used to elicit speech acts, such as showing an image, etc. For instance,\ntypes of Discourse-Completion Tasks oral lexical retrival Cookie Theft ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhdiscourse_completion_task/\"\u003eDiscourse-Completion Task\u003c/a\u003e is a tool used to elicit speech acts, such as showing an image, etc. For instance,\u003c/p\u003e\n\u003ch2 id=\"types-of-discourse-completion-task--kbhdiscourse-completion-task-dot-md--s\"\u003etypes of \u003ca href=\"/posts/kbhdiscourse_completion_task/\"\u003eDiscourse-Completion Task\u003c/a\u003es\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoral_lexical_retrival/\"\u003eoral lexical retrival\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhctp/\"\u003eCookie Theft\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdiscourse_completion_task/","tags":null,"title":"Discourse-Completion Task"},{"categories":null,"contents":"A discrete set of chances: die, coin flip, etc.\nWe use probability mass function to model such a distribution:\n\\begin{equation} \\sum_{i=1}^{n}P(X=i) = 1 \\end{equation}\nTo each member of the distribution, we assign a factor. The parameters of this distribution are the probability values you assign to each group.\n","html":"\u003cp\u003eA discrete set of chances: die, coin flip, etc.\u003c/p\u003e\n\u003cp\u003eWe use \u003ca href=\"/posts/kbhprobability_distributions/#probability-mass-function\"\u003eprobability mass function\u003c/a\u003e to model such a distribution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{i=1}^{n}P(X=i) = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo each member of the distribution, we assign a \u003ca href=\"/posts/kbhfactor/\"\u003efactor.\u003c/a\u003e The \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es of this distribution are the probability values you assign to each group.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdiscrete_distribution/","tags":null,"title":"discrete distribution"},{"categories":null,"contents":"\u0026ldquo;how does an operating system track threads and processes\u0026rdquo;\nsee process control block\ntraps and interrups Bad problem: the operating system can\u0026rsquo;t be running when a user thread is running. We can\u0026rsquo;t do thread bookeeping if a user thread is running.\ntrap a trap is a scheme to request OS attention explicitly from the user thread, swapping the user process off the CPU.\nsystem calls errors page fault (memory errors) interrupt a interrupt takes place outside the current thread, it forces the OS\u0026rsquo; attention even if the user thread isn\u0026rsquo;t asking for it\ncharacter typed at keyboard completion of a disk operations a hardware timer that fires an interrupt interrupts enable preemption to happen, so see also preemption for interrupt handling pattern.\nwhat if a timer goes off during an interrupt interrupts are disabled during interrupt handling, otherwise, this causes an infinite loop.\nsolution: interrupts are disabled during timer handling.\nthis causes a problem: if you preempt into a brand new thread\nmain idea there are race condition we cannot solve with mutexes because we are the OS so, we implement mutexes by enabling/disabling interrupts dispatcher a dispatcher performs a context switch, which\ncontext switch (in asm) push all registers except %rsp into the bottom of the old thread\u0026rsquo;s stack store the stack pointer %rsp into the process control block for that process corresponding to thread read the new thread\u0026rsquo;s stack pointer from the process control block, and load that into %rsp (in asm) pop all registers stored on the bottom of our new stack back onto the registers remember to push and pop the registers in the same order\u0026hellip;. otherwise the registers won\u0026rsquo;t be in the right order.\nthis makes a context switch a function that calls on one thread and returns on another thread\u0026mdash;\u0026ldquo;we start executing from one stack, and end executing from another\u0026rdquo;.\nExample:\ncontext switch\nNotice that we only store callee saved registers because its the responsibility of whomever called context switch to save the register of the caller saved registers.\npushq %rbp pushq %rbx pushq %r14 pushq %r15 ;; pushq all of em callee saved ... movq %rsp, [somewhere in PCB, thread 1] ; the process control block movq [somewhere else in PCB, thread 2], %rsp ; the stack is now somewhere else ;; now we pop backwards up from the stack ;; popq all of em calee saved ... popq %r15 popq %r14 popq %rbx popq %rbp ;; this will RETURN to the last call *or* top of context_switch() of the ;; **THREAD 2**, because we moved the stack pointer by movq into ;; %rsp, we will return to the NEW thread\u0026#39;s last executed position ret what if the thread is new? We can\u0026rsquo;t ret to a function that never called context_switch, which is the case for new threads.\nTo do this, we create a fake freeze frame on the stack for that new thread which looks like you are just about to call the thread function, and calls context_switch normally.\nyield yield is a user function that one could implement, which acts like a blocking action, but instead of doing that we just add ourselves directly to the end of the ready queue again. (i.e. give up CPU voluntarily, but don\u0026rsquo;t block0.\n","html":"\u003cp\u003e\u0026ldquo;how does an operating system track threads and processes\u0026rdquo;\u003c/p\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhprocess_control_block/\"\u003eprocess control block\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"traps-and-interrups\"\u003etraps and interrups\u003c/h2\u003e\n\u003cp\u003eBad problem: \u003cstrong\u003ethe operating system can\u0026rsquo;t be running when a user thread is running\u003c/strong\u003e. We can\u0026rsquo;t do thread bookeeping if a user thread is running.\u003c/p\u003e\n\u003ch3 id=\"trap\"\u003etrap\u003c/h3\u003e\n\u003cp\u003ea \u003ca href=\"#trap\"\u003etrap\u003c/a\u003e is a scheme to request OS attention explicitly from the user thread, swapping the user process off the CPU.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003esystem calls\u003c/li\u003e\n\u003cli\u003eerrors\u003c/li\u003e\n\u003cli\u003epage fault (memory errors)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"interrupt\"\u003einterrupt\u003c/h3\u003e\n\u003cp\u003ea \u003ca href=\"#interrupt\"\u003einterrupt\u003c/a\u003e takes place outside the current thread, it forces the OS\u0026rsquo; attention even if the user thread isn\u0026rsquo;t asking for it\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003echaracter typed at keyboard\u003c/li\u003e\n\u003cli\u003ecompletion of a disk operations\u003c/li\u003e\n\u003cli\u003ea hardware timer that fires an interrupt\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\u003ca href=\"#interrupt\"\u003einterrupt\u003c/a\u003es enable \u003ca href=\"/posts/kbhpreemption/\"\u003epreemption\u003c/a\u003e to happen, so see also \u003ca href=\"/posts/kbhpreemption/\"\u003epreemption\u003c/a\u003e for interrupt handling pattern.\u003c/p\u003e\n\u003ch4 id=\"what-if-a-timer-goes-off-during-an-interrupt--orge7a39af\"\u003ewhat if a timer goes off during an \u003ca href=\"#interrupt\"\u003einterrupt\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003e\u003cstrong\u003einterrupts are disabled during interrupt handling\u003c/strong\u003e, otherwise, this causes an infinite loop.\u003c/p\u003e\n\u003cp\u003esolution: \u003cem\u003einterrupts are disabled during timer handling\u003c/em\u003e.\u003c/p\u003e\n\u003cp\u003ethis causes a problem: if you \u003ca href=\"/posts/kbhpreemption/#preempting-into-a-brand-new-thread\"\u003epreempt into a brand new thread\u003c/a\u003e\u003c/p\u003e\n\u003ch4 id=\"main-idea\"\u003emain idea\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003ethere are \u003ca href=\"/posts/kbhmultithreading/#race-condition\"\u003erace condition\u003c/a\u003e we cannot solve with \u003ca href=\"/posts/kbhmultithreading/#mutex\"\u003emutex\u003c/a\u003ees because we are the OS\u003c/li\u003e\n\u003cli\u003eso, \u003cstrong\u003ewe\u003c/strong\u003e implement mutexes by enabling/disabling \u003ca href=\"#interrupt\"\u003einterrupt\u003c/a\u003es\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"dispatcher\"\u003edispatcher\u003c/h2\u003e\n\u003cp\u003ea \u003ca href=\"#dispatcher\"\u003edispatcher\u003c/a\u003e performs a \u003ca href=\"#context-switch\"\u003econtext switch\u003c/a\u003e, which\u003c/p\u003e\n\u003ch3 id=\"context-switch\"\u003econtext switch\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003e(in asm) push \u003cstrong\u003eall \u003ca href=\"/posts/kbhassembly/#register\"\u003eregister\u003c/a\u003es\u003c/strong\u003e except \u003ccode\u003e%rsp\u003c/code\u003e into the bottom of the old thread\u0026rsquo;s \u003ca href=\"/posts/kbhstack/\"\u003estack\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003estore the \u003ca href=\"/posts/kbhassembly/#stack-pointer\"\u003estack pointer\u003c/a\u003e \u003ccode\u003e%rsp\u003c/code\u003e into the \u003ca href=\"/posts/kbhprocess_control_block/\"\u003eprocess control block\u003c/a\u003e for that process corresponding to thread\u003c/li\u003e\n\u003cli\u003eread the new thread\u0026rsquo;s stack pointer from the \u003ca href=\"/posts/kbhprocess_control_block/\"\u003eprocess control block\u003c/a\u003e, and load that into \u003ccode\u003e%rsp\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003e(in asm) pop \u003cstrong\u003eall \u003ca href=\"/posts/kbhassembly/#register\"\u003eregister\u003c/a\u003es\u003c/strong\u003e stored on the bottom of our new stack back onto the registers\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eremember to push and pop the \u003ca href=\"/posts/kbhassembly/#register\"\u003eregister\u003c/a\u003es in the same order\u0026hellip;. otherwise the registers won\u0026rsquo;t be in the right order.\u003c/p\u003e\n\u003cp\u003ethis makes a \u003ca href=\"#context-switch\"\u003econtext switch\u003c/a\u003e a function that \u003cstrong\u003ecalls on one thread\u003c/strong\u003e and \u003cstrong\u003ereturns on another thread\u003c/strong\u003e\u0026mdash;\u0026ldquo;we start executing from one stack, and end executing from another\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eExample:\u003c/p\u003e\n\u003cp\u003econtext switch\u003c/p\u003e\n\u003cp\u003eNotice that we only store \u003cstrong\u003ecallee saved registers\u003c/strong\u003e because its the responsibility of whomever called context switch to save the register of the \u003cstrong\u003ecaller saved registers\u003c/strong\u003e.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-asm\" data-lang=\"asm\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003epushq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%rbp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003epushq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%rbx\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003epushq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%r14\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003epushq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%r15\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e;; pushq all of em callee saved ...\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emovq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%rsp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003esomewhere\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ePCB\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e; the process control block\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emovq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003esomewhere\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ePCB\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%rsp\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e; the stack is now somewhere else\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e;; now we pop backwards up from the stack\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e;; popq all of em calee saved ...\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003epopq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%r15\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003epopq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%r14\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003epopq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%rbx\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003epopq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%rbp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e;; this will RETURN to the last call *or* top of context_switch() of the\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e;; **THREAD 2**, because we moved the stack pointer by movq into\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e;; %rsp, we will return to the NEW thread\u0026#39;s last executed position\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eret\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch4 id=\"what-if-the-thread-is-new\"\u003ewhat if the thread is new?\u003c/h4\u003e\n\u003cp\u003eWe can\u0026rsquo;t \u003ccode\u003eret\u003c/code\u003e to a function that never called \u003ccode\u003econtext_switch\u003c/code\u003e, which is the case for \u003cstrong\u003enew threads\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eTo do this, we create a fake freeze frame on the stack for that new thread which looks like you are just about to call the thread function, and calls \u003ccode\u003econtext_switch\u003c/code\u003e normally.\u003c/p\u003e\n\u003ch3 id=\"yield\"\u003eyield\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#yield\"\u003eyield\u003c/a\u003e is a user function that one could implement, which acts like a blocking action, but instead of doing that we just add ourselves directly to the end of the ready queue again. (i.e. give up CPU voluntarily, but don\u0026rsquo;t block0.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdispatching/","tags":null,"title":"dispatching"},{"categories":null,"contents":"Big Idea Motivation: Touring Test Point of the Turing test: we can use language to get the underlying thought and the underlying cognition. However, language IS NOT thought.\nLanguage is not thought Good at language != good at thought =\u0026gt; public speaking settings\nBad at language != bad at thought =\u0026gt; language models?\nLLM eval should separate language and thought formal vs functional linguistic confidence (good at speaking and speaking is useful) generalized world knowledge Detour and motivation: cognitive science language centre in brain is specific to a location, and changes in language doesn\u0026rsquo;t change what region gets activated language shows little/no response when we are thinking of cognitively challenging tasks lik emaffs Key examples: aphasics can still think. So each skill is in a separate brain-place.\nFormal and Functional Competence Mahowold, Ivanlova, et al.\nCan we find out what parts of the network separately process core language skills (syntax and formal grammar) vs. \u0026ldquo;functional\u0026rdquo; skills (semantics and mathematical reasoning), and how does LLMs perform in each area?\nFormal Competence Unsurprisingly for us but surprisingly for linguists, GPT can grammar real good. No surprises there.\nFunctional Competence It can memorize the output, and it doesn\u0026rsquo;t perform well for out of sample math/reasoning cases\nGeneralized World Knowledge Two types of knowledge\nFactual: Paris is the capital of France; Birds lay eggs Distributional: the sky is {blue, black, pink}. the first two are largely more likely The factual side LLMs are very good at and that\u0026rsquo;s again unsurpsininlgy. But for #2\u0026hellip;\nLLM embeddings have similar colours close together, and similar animals close together. LLMs are subject to reporter bias: we talk less about obvious things, yet LLMs are only trained on what we talk about.\nQuestion 1: does the generalized world model require languages?\n\u0026ldquo;The fox is chasing a planet.\u0026rdquo; \u0026mdash; there is a logical failure here.\nHowever, when shown semantically incompatible events, the language centre activates, but not very much. Doing this to aphasics still showed that having no difference.\nSO: Language network is recruited but not required for event semantics\nEvent knowledge evaluations:\nThis is actually formal and so LLMs do very well: \u0026ldquo;The laptop ate the teacher\u0026rdquo; (inanimate objects cannot eat, formal issue) This is perceptual and LLMs do poorly: \u0026ldquo;The fox chased the rabbit\u0026rdquo; (foxes can\u0026rsquo;t be slower than a fox) Question: what happens if you in-context complete the recruitment of counterfactuals\n","html":"\u003ch2 id=\"big-idea\"\u003eBig Idea\u003c/h2\u003e\n\u003ch3 id=\"motivation-touring-test\"\u003eMotivation: Touring Test\u003c/h3\u003e\n\u003cp\u003ePoint of the Turing test: we can use language to get the underlying thought and the underlying cognition. However, language IS NOT thought.\u003c/p\u003e\n\u003ch3 id=\"language-is-not-thought\"\u003eLanguage is not thought\u003c/h3\u003e\n\u003cp\u003eGood at language != good at thought =\u0026gt; public speaking settings\u003c/p\u003e\n\u003cp\u003eBad at language != bad at thought =\u0026gt; language models?\u003c/p\u003e\n\u003ch3 id=\"llm-eval-should-separate-language-and-thought\"\u003eLLM eval should separate language and thought\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eformal vs functional linguistic confidence (good at speaking and speaking is useful)\u003c/li\u003e\n\u003cli\u003egeneralized world knowledge\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"detour-and-motivation-cognitive-science\"\u003eDetour and motivation: cognitive science\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003elanguage centre in brain is specific to a location, and changes in language doesn\u0026rsquo;t change what region gets activated\u003c/li\u003e\n\u003cli\u003elanguage shows little/no response when we are thinking of cognitively challenging tasks lik emaffs\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eKey examples: aphasics can still think. So each skill is in a separate brain-place.\u003c/p\u003e\n\u003ch2 id=\"formal-and-functional-competence\"\u003eFormal and Functional Competence\u003c/h2\u003e\n\u003cp\u003eMahowold, Ivanlova, et al.\u003c/p\u003e\n\u003cp\u003eCan we find out what parts of the network separately process core language skills (syntax and formal grammar) vs. \u0026ldquo;functional\u0026rdquo; skills (semantics and mathematical reasoning), and how does LLMs perform in each area?\u003c/p\u003e\n\u003ch3 id=\"formal-competence\"\u003eFormal Competence\u003c/h3\u003e\n\u003cp\u003eUnsurprisingly for us but surprisingly for linguists, GPT can grammar real good. No surprises there.\u003c/p\u003e\n\u003ch3 id=\"functional-competence\"\u003eFunctional Competence\u003c/h3\u003e\n\u003cp\u003eIt can memorize the output, and it doesn\u0026rsquo;t perform well for out of sample math/reasoning cases\u003c/p\u003e\n\u003ch2 id=\"generalized-world-knowledge\"\u003eGeneralized World Knowledge\u003c/h2\u003e\n\u003cp\u003eTwo types of knowledge\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eFactual\u003c/strong\u003e\u003c/strong\u003e: Paris is the capital of France; Birds lay eggs\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eDistributional\u003c/strong\u003e: the sky is {blue, black, pink}. the first two are largely more likely\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe \u003cstrong\u003efactual\u003c/strong\u003e side LLMs are very good at and that\u0026rsquo;s again unsurpsininlgy. But for #2\u0026hellip;\u003c/p\u003e\n\u003cp\u003eLLM embeddings have similar colours close together, and similar animals close together. LLMs are subject to reporter bias: we talk less about obvious things, yet LLMs are only trained on what we talk about.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eQuestion 1: does the generalized world model require languages?\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;The fox is chasing a planet.\u0026rdquo; \u0026mdash; there is a logical failure here.\u003c/p\u003e\n\u003cp\u003eHowever, when shown semantically incompatible \u003cstrong\u003eevents\u003c/strong\u003e, the language centre activates, but not very much. Doing this to aphasics still showed that having no difference.\u003c/p\u003e\n\u003cp\u003eSO: \u003cstrong\u003e\u003cstrong\u003eLanguage network is recruited but not required for event semantics\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eEvent knowledge evaluations:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eThis is actually formal and so LLMs do very well: \u0026ldquo;The laptop ate the teacher\u0026rdquo; (inanimate objects cannot eat, formal issue)\u003c/li\u003e\n\u003cli\u003eThis is perceptual and LLMs do poorly: \u0026ldquo;The fox chased the rabbit\u0026rdquo; (foxes can\u0026rsquo;t be slower than a fox)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eQuestion: what happens if you in-context complete the recruitment of \u003ca href=\"/posts/kbhcounterfactual/\"\u003ecounterfactual\u003c/a\u003es\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdissociating_language_and_thought/","tags":null,"title":"Dissociating Language and Thought"},{"categories":null,"contents":"distributed algorithm is a type of algorithm that can be distributed across many modules.\nThere are a few core areas of research:\nfailure-proofing nodes is a distributed algorithm What if one processor fails? communication in a distributed algorithm What if communication between processors fails? What if timing fails? atomicity atomicity is a property of distributed algorithm where, for a set of steps, a processor can only do one or all of the steps. i.e.: if you are asking a node to do something, it can either do all of the thing or be able to roll back as if the entire thing didn\u0026rsquo;t happen.\nleader election (algorithms) leader election is the process by which a distributed algorithm elects the driving node among similar nodes.\nconsensus (algorithms) consensus is a mechanism in a distributed algorithm where the solution requires multiple processes to do the same calculation to confirm.\nalgorithms designed to be distributed MapReduce ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdistributed_algorithum/\"\u003edistributed algorithm\u003c/a\u003e is a type of algorithm that can be distributed across many modules.\u003c/p\u003e\n\u003cp\u003eThere are a few core areas of research:\u003c/p\u003e\n\u003ch2 id=\"failure-proofing-nodes-is-a-distributed-algorithm--kbhdistributed-algorithum-dot-md\"\u003efailure-proofing nodes is a \u003ca href=\"/posts/kbhdistributed_algorithum/\"\u003edistributed algorithm\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWhat if one processor fails?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"communication-in-a-distributed-algorithm--kbhdistributed-algorithum-dot-md\"\u003ecommunication in a \u003ca href=\"/posts/kbhdistributed_algorithum/\"\u003edistributed algorithm\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWhat if communication between processors fails?\u003c/li\u003e\n\u003cli\u003eWhat if timing fails?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"atomicity\"\u003eatomicity\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#atomicity\"\u003eatomicity\u003c/a\u003e is a property of \u003ca href=\"/posts/kbhdistributed_algorithum/\"\u003edistributed algorithm\u003c/a\u003e where, for a set of steps, a processor can only do \u003cem\u003eone\u003c/em\u003e or \u003cem\u003eall\u003c/em\u003e of the steps. i.e.: if you are asking a node to do something, it can either do all of the thing or be able to roll back as if the entire thing didn\u0026rsquo;t happen.\u003c/p\u003e\n\u003ch2 id=\"leader-election--algorithms\"\u003eleader election (algorithms)\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#leader-election--algorithms\"\u003eleader election\u003c/a\u003e is the process by which a \u003ca href=\"/posts/kbhdistributed_algorithum/\"\u003edistributed algorithm\u003c/a\u003e elects the driving node among similar nodes.\u003c/p\u003e\n\u003ch2 id=\"consensus--algorithms\"\u003econsensus (algorithms)\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#consensus--algorithms\"\u003econsensus\u003c/a\u003e is a mechanism in a \u003ca href=\"/posts/kbhdistributed_algorithum/\"\u003edistributed algorithm\u003c/a\u003e where the solution requires multiple processes to do the same calculation to confirm.\u003c/p\u003e\n\u003ch2 id=\"algorithms-designed-to-be-distributed\"\u003ealgorithms designed to be distributed\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmapreduce/\"\u003eMapReduce\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdistributed_algorithum/","tags":null,"title":"distributed algorithm"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdistributed_morphology/","tags":null,"title":"distributed morphology"},{"categories":null,"contents":"distributive harm is a harm where a system extends, withhold opportunities given a specific group of people\nSt. George\u0026rsquo;s hospital: overweighting\nobserving the weights observing the input, consider whether or not input has sensitive features ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdistributive_harm/\"\u003edistributive harm\u003c/a\u003e is a harm where a system extends, withhold opportunities given a specific group of people\u003c/p\u003e\n\u003cp\u003eSt. George\u0026rsquo;s hospital: overweighting\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eobserving the weights\u003c/li\u003e\n\u003cli\u003eobserving the input, consider whether or not input has sensitive features\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdistributive_harm/","tags":null,"title":"distributive harm"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdistributivity/","tags":null,"title":"distributivity"},{"categories":null,"contents":"Let integer \\(a,b \\in \\mathbb{Z}\\), where \\(b \\neq 0\\). We say \\(b\\) divides \\(a\\) (i.e. \\(b|a\\)) if there\u0026rsquo;s some \\(m \\in \\mathbb{Z}\\) such that \\(a = bm\\).\nadditional information division algorithm Let \\(a,b \\in \\mathbb{Z}\\), \\(b \u0026gt; 0\\). Then, there exists, uniquely, some \\(q,r \\in \\mathbb{Z}\\) such that \\(a = bq + r\\) with \\(0 \\leq r \u0026lt;b\\).\n\u0026ldquo;division with remainder\u0026rdquo;\nYou will note that, if \\(a \u0026lt; b\\), we can just say \\(q = 0\\).\nProof:\nExistence Let us define some:\n\\begin{equation} S = \\{a - bk: k \\in \\mathbb{Z}, a-bk \\geq 0\\} \\end{equation}\nWe will note that this set is definitely non-empty:\nIf \\(a \\geq 0\\), then \\(a = a-b\\cdot 0 \\in S\\) If \\(a \u0026lt; 0\\) , then \\(a-b(2a) = a(1-2b)\\), we note that \\(a \u0026lt; 0\\) and since \\(b \u0026gt;0\\), \\((1-2b) \u0026lt;0\\), so \\(a(1-2b) \u0026gt; 0\\) and so \\(a(1-2b) \\in S\\) So by the WOP, \\(S\\) has a smallest element. Let us, by WOP, define \\(r\\) to be the smallest element in \\(S\\).\nTherefore, we can make some \\(r = a-bq \\in S\\). We also know that \\(r\\) is non-negative, as that is the constraint of \\(S\\). Finally, we have to ensure that \\(r\\) is the actual remainder, we have to ensure that \\(r \u0026lt; b\\).\nAssume for contradiction \\(r \\geq b\\). Then, \\(r-b = (a-qb)-b = a-(q+1)b \\geq 0\\). Therefore, \\(r-b \\in S\\). Yet, we said that \\(r\\) was the smallest element, reaching contradiction. Therefore, \\(r\u0026lt;b\\) . \\(\\blacksquare\\)\nUniqueness Let us have:\n\\begin{equation} a = bq+r = bq\u0026rsquo; + r' \\end{equation}\nRecall that, \\(0 \\leq r \u0026lt; b\\). We desire that \\(q = q\u0026rsquo;\\), \\(r = r\u0026rsquo;\\).\nWLOG let \\(r \\leq r\u0026rsquo;\\). So, \\(0 \\leq r\u0026rsquo; - r\\). Both \\(r\u0026rsquo;\\) and \\(r\\) are remainders after dividing by \\(b\\), so \\(r\u0026rsquo; \u0026lt; b\\) and \\(r \u0026lt; b\\). Therefore, we have:\n\\begin{equation} 0 \\leq r\u0026rsquo; - r \u0026lt; b \\end{equation}\nNow, recall that:\n\\begin{align} \u0026amp;bq+r = bq\u0026rsquo; + r\u0026rsquo;\\\\ \\Rightarrow\\ \u0026amp;b(q-q\u0026rsquo;) = r\u0026rsquo; - r \\end{align}\nNow, we have that \\(b|(r\u0026rsquo; - r)\\). Hence, we have some positive \\(r\u0026rsquo; - r\\), which is smaller than b, but which is divisible by \\(b\\). This forces us to conclude that \\(r\u0026rsquo; - r = 0\\).\nGiven \\(r\u0026rsquo; = r\\), now, we can see that \\(q = q\u0026rsquo;\\). \\(\\blacksquare\\)\n","html":"\u003cp\u003eLet integer \\(a,b \\in \\mathbb{Z}\\), where \\(b \\neq 0\\). We say \\(b\\) \u003ca href=\"/posts/kbhdivide/\"\u003edivide\u003c/a\u003es \\(a\\) (i.e. \\(b|a\\)) if there\u0026rsquo;s some \\(m \\in \\mathbb{Z}\\) such that \\(a = bm\\).\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"division-algorithm\"\u003edivision algorithm\u003c/h3\u003e\n\u003cp\u003eLet \\(a,b \\in \\mathbb{Z}\\), \\(b \u0026gt; 0\\). Then, there exists, uniquely, some \\(q,r \\in \\mathbb{Z}\\) such that \\(a = bq + r\\) with \\(0 \\leq r \u0026lt;b\\).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;division with remainder\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eYou will note that, if \\(a \u0026lt; b\\), we can just say \\(q = 0\\).\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003ch4 id=\"existence\"\u003eExistence\u003c/h4\u003e\n\u003cp\u003eLet us define some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS = \\{a - bk: k \\in \\mathbb{Z}, a-bk \\geq 0\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will note that this set is definitely non-empty:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eIf \\(a \\geq 0\\), then \\(a = a-b\\cdot 0 \\in S\\)\u003c/li\u003e\n\u003cli\u003eIf \\(a \u0026lt; 0\\) , then \\(a-b(2a) = a(1-2b)\\), we note that \\(a \u0026lt; 0\\) and since \\(b \u0026gt;0\\), \\((1-2b) \u0026lt;0\\), so \\(a(1-2b) \u0026gt; 0\\) and so \\(a(1-2b) \\in S\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSo by the \u003ca href=\"/posts/kbhprinciple_of_induction/#well-ordering-principle\"\u003eWOP\u003c/a\u003e, \\(S\\) has a smallest element. Let us, by \u003ca href=\"/posts/kbhprinciple_of_induction/#well-ordering-principle\"\u003eWOP\u003c/a\u003e, define \\(r\\) to be the smallest element in \\(S\\).\u003c/p\u003e\n\u003cp\u003eTherefore, we can make some \\(r = a-bq \\in S\\). We also know that \\(r\\) is non-negative, as that is the constraint of \\(S\\). Finally, we have to ensure that \\(r\\) is the actual remainder, we have to ensure that \\(r \u0026lt; b\\).\u003c/p\u003e\n\u003cp\u003eAssume for contradiction \\(r \\geq b\\). Then, \\(r-b = (a-qb)-b = a-(q+1)b \\geq 0\\). Therefore, \\(r-b \\in S\\). Yet, we said that \\(r\\) was the smallest element, reaching contradiction. Therefore, \\(r\u0026lt;b\\) . \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch4 id=\"uniqueness\"\u003eUniqueness\u003c/h4\u003e\n\u003cp\u003eLet us have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na = bq+r = bq\u0026rsquo; + r'\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that, \\(0 \\leq r \u0026lt; b\\). We desire that \\(q = q\u0026rsquo;\\), \\(r = r\u0026rsquo;\\).\u003c/p\u003e\n\u003cp\u003eWLOG let \\(r \\leq r\u0026rsquo;\\). So, \\(0 \\leq r\u0026rsquo; - r\\). Both \\(r\u0026rsquo;\\) and \\(r\\) are remainders after dividing by \\(b\\), so \\(r\u0026rsquo; \u0026lt; b\\) and \\(r \u0026lt; b\\). Therefore, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 \\leq r\u0026rsquo; - r \u0026lt; b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, recall that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;bq+r = bq\u0026rsquo; + r\u0026rsquo;\\\\\n\\Rightarrow\\ \u0026amp;b(q-q\u0026rsquo;) = r\u0026rsquo; - r\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, we have that \\(b|(r\u0026rsquo; - r)\\). Hence, we have some positive \\(r\u0026rsquo; - r\\), which is smaller than b, but which is divisible by \\(b\\). This forces us to conclude that \\(r\u0026rsquo; - r = 0\\).\u003c/p\u003e\n\u003cp\u003eGiven \\(r\u0026rsquo; = r\\), now, we can see that \\(q = q\u0026rsquo;\\). \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdivide/","tags":null,"title":"divide"},{"categories":null,"contents":"Divide by \\(2\\pi\\), or, how I learned to start worrying and hate Fourier Transforms.\nHello all. Good news first: our frequency theory is now correctly validated by data.\nIf you want a band-aid for the answer, here it is: divide everything we get out of the cantilever equations by \\(2\\pi\\); then, use the correct linear mass density: our Google sheets was off by a factor of almost \\(4\\) because of later-corrected apparent measurement error.\nThe bad news? You get pages of algebra to justify how, while getting a whole run-down of our entire theory so far for kicks.\nOur story begins at what popped out of the other end of the Euler-Lagrange equations (if you want the start of the Lagrangian analysis, read this from Mark, and plug the resulting Lagrangian into the Euler-Lagrange equation of the right shape.) But, either way, out will pop this fourth-order partial differential equation:\n\\begin{equation} EI\\pdv[4]{w}{x} = -\\mu \\pdv[2]{w}{t} \\end{equation}\nwhere, \\(w(x,t)\\) is a function of displacement by location by time, \\(E\\) the elastic modulus, and \\(I\\) the second moment of bending area.\nNow, fourth-order diffequs are already pain. PARTIAL forth order diffequs sounds darn near impossible. Wikipedia, to their credit, helpfully suggests the following to help tackle this problem:\nYou see, as we are trying to isolate possible individual frequencies, it make sense to essentially run a Fourier Transform on our algebra, to get the possible amplitude at each frequency \\(\\hat{w}(x)\\), given some frequency \\(\\omega\\) (no idea why they use \\(\\omega\\), I will use \\(f\\) for the rest of this article.)\nTo perform this analysis, Wikipedia suggests that we substitute our \\(w(x,t)\\) with its Fourier Definition, which is written as a function of the Fourier-decomposed version of the function \\(\\hat{w}(x)\\) (real component only, as imaginary pairs serve only as the conjugate), and then re-isolate those decomposed \\(\\hat{w}(x)\\). In this way, we get rid of the time dimension as sine waves oscillate ad infinium. Makes total sense.\nEXCEPT WHAT WIKIPEDIA GAVE ABOVE TO SUBSTITUTE IN ISN\u0026rsquo;T THE CORRECT FOURIER DECOMPOSITION\nHere\u0026rsquo;s the actual Fourier Transform intergral:\nwhere, \\(\\zeta=\\omega=f\\) , \\(f(x) = \\hat{w}(x)\\).\nWHAT DO YOU NOTICE? AN EXTRA \\(2\\pi\\).\nTHIS ALSO MEANS THAT THE FREQUENCY ANALYSTS IN THE REST OF THAT WIKI ARTICLE IS WRONG\nOk. I collect myself.\nSo, we now have that:\n\\begin{equation} w(x,t) = Re\\qty[\\hat{w}(x)e^{-i 2\\pi ft}] \\end{equation}\nRecall that we are trying to substitute this into\n\\begin{equation} EI\\pdv[4]{w}{x} = -\\mu \\pdv[2]{w}{t} \\end{equation}\nTaking two derivatives of the above Fourier decomposition equation by time (which is the dimension we are trying to get rid of to make the diffequ not partial), we have:\n\\begin{align} \\pdv[2]{w(x,t)}{t} \u0026amp;= \\pdv[2] t Re\\qty[\\hat{w}(x)e^{-i2\\pi ft}] \\\\ \u0026amp;= Re\\qty[\\hat{w}(x)\\pdv[2] t e^{-i2\\pi ft}] \\\\ \u0026amp;= Re\\qty[\\hat{w}(x)(2\\pi f)^{2} \\cdot e^{-i2\\pi ft}\\dots] \\end{align}\nNow, given we are only dealing with the real components of these things, everything on the \\(e^{-i\\dots }\\) part of the function wooshes away, cleanly leaving us with:\n\\begin{equation} \\pdv[2]{w(x,t)}{t} \u0026amp;= \\hat{w}(x)(2\\pi f)^{2} \\end{equation}\nYay! No longer differential. Substituting that into our original expression, and making the partials not partial anymore:\n\\begin{equation} EI\\pdv[4]{w}{x} + \\mu \\hat{w}(x)(2\\pi f)^{2}= 0 \\end{equation}\nExcellent. Now, onto solving this. The basic way to solve this is essentially to split the fourth-order differential into a 4x4 matrix, each one taking another derivative of the past. Then, to get a characteristic solution, you take its eigenvalues.\nBut instead of going about doing that, I\u0026rsquo;m going to give up and ask a computer. In the code, I am going to substitute \\(p\\) for \\(2\\pi\\) temporarily because FriCAS gets a little to eager to convert things into their sinusoidal forms if we leave it as \\(2\\pi\\).\nE,I,u,f = var(\u0026#34;E I u f\u0026#34;) x, L = var(\u0026#34;x L\u0026#34;) w = function(\u0026#39;w\u0026#39;)(x) p = var(\u0026#34;p\u0026#34;) _c0, _c1, _c2, _c3 = var(\u0026#34;_C0 _C1 _C2 _C3\u0026#34;) fourier_cantileaver = (E*I*diff(w, x, 4) - u*(p*f)^2*w == 0) fourier_cantileaver -f^2*p^2*u*w(x) + E*I*diff(w(x), x, x, x, x) == 0 solution = desolve(fourier_cantileaver, w, ivar=x, algorithm=\u0026#34;fricas\u0026#34;).expand() latex(solution) \\begin{equation} \\hat{w}(x) = _{C_{1}} e^{\\left(\\sqrt{f} \\sqrt{2\\pi} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{0}} e^{\\left(i \\, \\sqrt{f} \\sqrt{2\\pi} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{2}} e^{\\left(-i \\, \\sqrt{f} \\sqrt{2\\pi} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{3}} e^{\\left(-\\sqrt{f} \\sqrt{2 \\pi} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} \\end{equation}\nOk, so we have that each component solution is a combination of a bunch of stuff, times \\(\\pm i\\) or \\(\\pm 1\\). We are going to declare everything that\u0026rsquo;s invariant in the exponent to be named \\(\\beta\\):\n\\begin{equation} \\beta := \\sqrt{2\\pi f} \\qty(\\frac{u}{EI})^{\\frac{1}{4}} \\end{equation}\nAnd given this, we can then write the general solution for displacement by location (\\(w\\)) determined above more cleanly as:\n\\begin{equation} \\hat{w}(x) = _{C_{1}} e^{\\beta x} + _{C_{0}} e^{ \\beta ix} + _{C_{2}} e^{-\\beta ix} + _{C_{3}} e^{-\\beta x} \\end{equation}\nWe will make one more substitution\u0026mdash;try to get the \\(e^{x}\\) into sinusoidal form. We know this is supposed to oscillate, and it being in sinusoidal makes the process of solving for periodic solutions easier.\nRecall that:\n\\begin{equation} \\begin{cases} \\cosh x = \\frac{e^{x}+e^{-x}}{2} \\\\ \\cos x = \\frac{e^{ix}+e^{-ix}}{2}\\\\ \\sinh x = \\frac{e^{x}-e^{-x}}{2} \\\\ \\sin x = \\frac{e^{ix}-e^{-ix}}{2i}\\\\ \\end{cases} \\end{equation}\nWith a new set of scaling constants \\(d_0\\dots d_3\\) (because these substitutions essentially ignore any factors its being multiplied, but we don\u0026rsquo;t actually care about modeling amplitude with these expressions anyways, so we can just change the arbitrary initial-conditions scalars on the fly), and some rearranging, we can rewrite the above expressions into just a linear combination of those elements. That is, the same expression for \\(\\hat{w}(x)\\) at a specific frequency \\(f\\) can be written as:\n\\begin{equation} d_0\\cosh \\beta x +d_1\\sinh \\beta x +d_2\\cos \\beta x +d_3\\sin \\beta x = \\hat{w}(x) \\end{equation}\nfor some arbitrary initial conditions \\(d_0\\dots d_3\\). Significantly cleaner.\nSo, what frequencies will our fork oscillate at? Well, a mode for our fork is any set of \\(d_0 \\dots d_3\\) for which a solution for \\(\\hat{w}(x)\\) exists given our constants.\nAs it stands right now, it seems like we have four unknowns (\\(d_0 \\dots d_3\\)) but only one equation to solve with. That\u0026rsquo;s no bueno.\nEnter our initial conditions:\nThe top line states that: at \\(x=0\\), the bottom of the fork, our beam does not travel away from its natural axis (yes, because its a solid hunk of metal connected to the base), and it does not deflect (slope).\nThe bottom line stats that: at \\(x=L\\), the top of the fork is straight (which is true, the tip-top of the fork does indeed not bend, only the middleish parts bend.)\nSo, to get at the hidden system of four elements, we will take some derivatives of our original \\(\\hat{w}(x)\\) equation by \\(x\\), as prescribed by our initial conditions.\nwp = diff(w,x,1) wpp = diff(w,x,2) wppp = diff(w,x,3) (wp, wpp, wppp) (b*d3*cos(b*x) + b*d1*cosh(b*x) - b*d2*sin(b*x) + b*d0*sinh(b*x), -b^2*d2*cos(b*x) + b^2*d0*cosh(b*x) - b^2*d3*sin(b*x) + b^2*d1*sinh(b*x), -b^3*d3*cos(b*x) + b^3*d1*cosh(b*x) + b^3*d2*sin(b*x) + b^3*d0*sinh(b*x)) And then, we have a system:\ncond_1 = w.subs(x=0) == 0 cond_2 = wp.subs(x=0) == 0 cond_3 = wpp.subs(x=L) == 0 cond_4 = wppp.subs(x=L) == 0 conds = (cond_1, cond_2, cond_3, cond_4) conds (d0 + d2 == 0, b*d1 + b*d3 == 0, -b^2*d2*cos(L*b) + b^2*d0*cosh(L*b) - b^2*d3*sin(L*b) + b^2*d1*sinh(L*b) == 0, -b^3*d3*cos(L*b) + b^3*d1*cosh(L*b) + b^3*d2*sin(L*b) + b^3*d0*sinh(L*b) == 0) solve(conds, d0, d1, d2, d3).full_simplify() Ok so, we notice that out of all of these boundary expressions the \\(b^{n}\\) term drop out. Therefore, we have the system:\n\\begin{equation} \\begin{cases} d_0 + d_2 = 0 \\\\ d_1 + d_3 = 0 \\\\ -d_2 \\cos L\\beta + d_0 \\cosh L\\beta - d_3 \\sin L\\beta + d_1 \\sinh L\\beta = 0 \\\\ -d_3 \\cos L\\beta + d_1 \\cosh L\\beta + d_2 \\sin L\\beta + d_0 \\sinh L\\beta = 0 \\\\ \\end{cases} \\end{equation}\nGreat. Four unknowns, four equations. We can now figure out when a solution for \\(d_0, \\dots d_3\\) exists (or go about solving it, but turns out that\u0026rsquo;s significantly harder and wildly useless.)\nI will spare you the pages of route algebra needed to figure out when a solution exists. Suffice to say its lots of trig identities.\nBut, the satisfying conclusion is that, given the equations above, a solution exists for \\(d_0 \\dots d_3\\) (read: a mode for the beam exists), when:\n\\begin{equation} \\cos L\\beta \\cdot \\cosh L\\beta +1 = 0 \\end{equation}\nSo, any valid solutions for the expression \\(\\cos x \\cdot \\cosh x + 1 = 0\\) will be a valid product between \\(L\\beta\\). We can use this information to figure out the right frequencies by then solving for \\(f\\) embedded in \\(\\beta\\).\nSo, onto solving for \\(\\cos L\\beta \\cdot \\cosh L\\beta +1 = 0\\).\nWe again give up and ask a computer to do it.We will try to locate a root for \\(Lb\\) for every \\(\\pi\\) for two rounds around the circle (until \\(4 \\pi\\))\u0026mdash;there is a solution for every \\(\\pi\\), if you don\u0026rsquo;t believe me, plot it or change the bottom to try to find it for every \\(\\frac{\\pi}{2}\\), sage will crash:\nintervals = [jj*pi for jj in range(0, 5)] intervals [0, pi, 2*pi, 3*pi, 4*pi] We will now declare \\(x=L\\beta\\), and create a nonlinear expression in it:\nx = var(\u0026#34;x\u0026#34;) characteristic_eqn = 1 + cos(x)*cosh(x) == 0 characteristic_eqn cos(x)*cosh(x) + 1 == 0 Root finding time!\ncharacteristic_solutions = [characteristic_eqn.find_root(i,j) for (i,j) in zip(intervals,intervals[1:])] characteristic_solutions [1.8751040687120917, 4.6940911329739246, 7.854757438237603, 10.995540734875457] These are possible candidate values for \\(L\\beta\\). We will declare these values \\(s\\).\nSo, we now have that:\n\\begin{equation} L \\beta = s \\end{equation}\nSubstituting back our original definition for \\(\\beta\\), we have that:\n\\begin{equation} L \\sqrt{2\\pi f} \\qty(\\frac{u}{EI})^{\\frac{1}{4}} = s \\end{equation}\nNow, we will try to get \\(f\\) by itself:\n\\begin{align} \u0026amp;L \\sqrt{2\\pi f} \\qty(\\frac{u}{EI})^{\\frac{1}{4}} = s \\\\ \\Rightarrow\\ \u0026amp; \\sqrt{2\\pi f} = \\frac{s}{L} \\qty(\\frac{EI}{u})^{\\frac{1}{4}} \\\\ \\Rightarrow\\ \u0026amp; 2\\pi f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{EI}{u})^{\\frac{1}{2}} \\\\ \\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{2\\pi L^{2}} \\qty(\\frac{EI}{u})^{\\frac{1}{2}} \\end{align}\nFinally, we have that \\(I = \\frac{1}{12} bh^{3}\\) for a rectangular prism; and that linear density is cross-sectional area times volumetric density \\(\\mu = \\rho \\cdot bh\\). Making these substitutions:\n\\begin{align} f \u0026amp;= \\frac{s^{2}}{2\\pi L^{2}} \\qty(\\frac{EI}{u})^{\\frac{1}{2}} \\\\ \u0026amp;= \\frac{s^{2}}{2\\pi L^{2}} \\qty(\\frac{Ebh^{3}}{12 \\rho bh})^{\\frac{1}{2}} \\\\ \u0026amp;= \\frac{s^{2}}{2\\pi L^{2}} \\qty(\\frac{Eh^{2}}{12 \\rho})^{\\frac{1}{2}} \\\\ \\end{align}\nWithout even getting to the frequency-based payoff, we immediately notice two takeaways.\nThe frequency of our fork is inversely proportional to length (i.e. \\(f = \\frac{1}{L^{2}}\\dots\\)) The first overtone of the tuning fork is \\(s^{2} = (\\frac{4.694}{1.875})^{2} \\approx 6.27\\) times higher than the fundamental\u0026mdash;meaning its significantly higher energy so it dissipates significantly faster; it is also not an integer multiple, which means its much less likely to be confused to be a harmonic; making a tuning fork essentially a pure-frequency oscillator Given equal conditions, only the thickness in one dimension (the one perpendicular to the bending axis) matters But, enough idling, onto our main event. Using standard reference values for aluminum, as well as our measured length and thickness of a \\(C\u0026rsquo;\\ 512hz\\) tuning fork, we have that\n# measured values---- # thickness h = 0.0065 # meters # length L0 = 0.09373 # meters L1 = 0.08496 # meters # theoretical values--- # elastic modulus E = 46203293995 # pascals = kg/m^2 # density p = 2597 # kg/m^3 # our solved characteristic value (s) # mode to index nth_mode = 0 s = characteristic_solutions[nth_mode] zero = (((s^2)/(2*pi*L0^2))*((E*h^2)/(12*p))^(1/2)).n() one = (((s^2)/(2*pi*L1^2))*((E*h^2)/(12*p))^(1/2)).n() zero, one mean([zero,one]) (504.123425101814, 613.571395642254) 558.847410372034 Close enough for a night. Thank you sorry about everything.\ntemperature # mode to index nth_mode = 0 # s = characteristic_solutions[nth_mode] s = var(\u0026#34;s\u0026#34;) # change to L and h (distances measures) by increases in degrees C d(t,x) = (2.4e-5)*x rho(t,x) = ((2.4e-5))*x a,b = var(\u0026#34;a b\u0026#34;) # a = -3.9 # b = 0.0033 Ed(t) = ((a*b)*e^(b*t))*1e9 f(E, L, h, p) = (((s^2)/(2*pi*L^2))*((E*h^2)/(12*p))^(1/2)) E,L,h,p = var(\u0026#34;E L h p\u0026#34;) # (a * b * /g)c t = var(\u0026#34;t\u0026#34;) # diff(t, dt, E,L,h,p) = sqrt((f.diff(E)*Ed(t)*dt)^2 + (f.diff(E)*Ed(t)*dt)^2 + (f.diff(L)*d(t)*dt)^2 + (f.diff(h)*d(t)*dt)^2) # diff(10, 1, 42661456706, 0.09833, 0.00643, 2545.454545).n() subdict = { a: -3.9, b:0.0033, L:0.09833, h:0.00643, p:2545.454545, E:42661456706, s:characteristic_solutions[nth_mode], t:50 } (f.diff(E)*Ed(t)).subs(subdict).full_simplify().n() (f.diff(L)*d(t,L)).subs(subdict).full_simplify().n() (f.diff(h)*d(t,h)).subs(subdict).full_simplify().n() (f.diff(p)*rho(t,p)).subs(subdict).full_simplify().n() -0.0782394489394635 -0.0211103561467420 0.0105551780733710 -0.00527758903668550 expansion = var(\u0026#34;x\u0026#34;) l,w,h,m = var(\u0026#34;l w h m\u0026#34;) density(l,w,h) = (l*w*h)/m density.diff(l)*expansion + density.diff(w)*expansion + density.diff(h)*expansion (l, w, h) |--\u0026gt; h*l*x/m + h*w*x/m + l*w*x/m ","html":"\u003cp\u003e\u003cstrong\u003eDivide by \\(2\\pi\\)\u003c/strong\u003e, or, how I learned to start worrying and hate Fourier Transforms.\u003c/p\u003e\n\u003cp\u003eHello all. Good news first: our frequency theory is now correctly validated by data.\u003c/p\u003e\n\u003cp\u003eIf you want a band-aid for the answer, here it is: \u003cstrong\u003edivide everything we get out of the cantilever equations by \\(2\\pi\\)\u003c/strong\u003e; then, use the \u003cstrong\u003ecorrect\u003c/strong\u003e linear mass density: our Google sheets was off by a factor of almost \\(4\\) because of later-corrected apparent measurement error.\u003c/p\u003e\n\u003cp\u003eThe bad news? You get pages of algebra to justify how, while getting a whole run-down of our entire theory so far for kicks.\u003c/p\u003e\n\u003cp\u003eOur story begins at what popped out of the other end of the Euler-Lagrange equations (if you want the start of the Lagrangian analysis, read \u003ca href=\"https://drive.google.com/file/d/182FLTSs2DziJcY4rnZkcTmBUwB3ek_Nv/view?usp=sharing\"\u003ethis from Mark\u003c/a\u003e, and plug the resulting Lagrangian into the Euler-Lagrange equation of the right shape.) But, either way, out will pop this fourth-order partial differential equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nEI\\pdv[4]{w}{x} = -\\mu \\pdv[2]{w}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(w(x,t)\\) is a function of displacement by location by time, \\(E\\) the elastic modulus, and \\(I\\) the second moment of bending area.\u003c/p\u003e\n\u003cp\u003eNow, fourth-order diffequs are already pain. PARTIAL forth order diffequs sounds darn near impossible. Wikipedia, to their credit, helpfully suggests the following to help tackle this problem:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-01-18_00-15-05_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eYou see, as we are trying to isolate possible individual frequencies, it make sense to essentially run a Fourier Transform on our algebra, to get the possible amplitude at each frequency \\(\\hat{w}(x)\\), given some frequency \\(\\omega\\) (no idea why they use \\(\\omega\\), I will use \\(f\\) for the rest of this article.)\u003c/p\u003e\n\u003cp\u003eTo perform this analysis, Wikipedia suggests that we substitute our \\(w(x,t)\\) with its Fourier Definition, which is written as a function of the Fourier-decomposed version of the function \\(\\hat{w}(x)\\) (real component only, as imaginary pairs serve only as the conjugate), and then re-isolate those decomposed \\(\\hat{w}(x)\\). In this way, we get rid of the time dimension as sine waves oscillate \u003cem\u003ead infinium\u003c/em\u003e. Makes total sense.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eEXCEPT WHAT WIKIPEDIA GAVE ABOVE TO SUBSTITUTE IN ISN\u0026rsquo;T THE CORRECT FOURIER DECOMPOSITION\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eHere\u0026rsquo;s the actual Fourier Transform intergral:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-01-18_00-21-50_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ewhere, \\(\\zeta=\\omega=f\\) , \\(f(x) = \\hat{w}(x)\\).\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eWHAT DO YOU NOTICE? AN EXTRA \\(2\\pi\\).\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eTHIS ALSO MEANS THAT THE FREQUENCY ANALYSTS IN THE REST OF THAT WIKI ARTICLE IS WRONG\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eOk. I collect myself.\u003c/p\u003e\n\u003cp\u003eSo, we \u003cem\u003enow\u003c/em\u003e have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw(x,t) = Re\\qty[\\hat{w}(x)e^{-i 2\\pi ft}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that we are trying to substitute this into\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nEI\\pdv[4]{w}{x} = -\\mu \\pdv[2]{w}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaking two derivatives of the above Fourier decomposition equation by \u003cem\u003etime\u003c/em\u003e (which is the dimension we are trying to get rid of to make the diffequ not partial), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\pdv[2]{w(x,t)}{t} \u0026amp;= \\pdv[2] t Re\\qty[\\hat{w}(x)e^{-i2\\pi ft}] \\\\\n\u0026amp;= Re\\qty[\\hat{w}(x)\\pdv[2] t e^{-i2\\pi ft}] \\\\\n\u0026amp;= Re\\qty[\\hat{w}(x)(2\\pi f)^{2} \\cdot e^{-i2\\pi ft}\\dots]\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, given we are only dealing with the real components of these things, everything on the \\(e^{-i\\dots }\\) part of the function wooshes away, cleanly leaving us with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2]{w(x,t)}{t} \u0026amp;= \\hat{w}(x)(2\\pi f)^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYay! No longer differential. Substituting that into our original expression, and making the partials not partial anymore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nEI\\pdv[4]{w}{x} + \\mu \\hat{w}(x)(2\\pi f)^{2}= 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eExcellent. Now, onto solving this. The basic way to solve this is essentially to split the fourth-order differential into a 4x4 matrix, each one taking another derivative of the past. Then, to get a characteristic solution, you take its eigenvalues.\u003c/p\u003e\n\u003cp\u003eBut instead of going about doing that, I\u0026rsquo;m going to give up and ask a computer. In the code, I am going to substitute \\(p\\) for \\(2\\pi\\) temporarily because FriCAS gets a little to eager to convert things into their sinusoidal forms if we leave it as \\(2\\pi\\).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;E I u f\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;x L\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efunction\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;w\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;p\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_c0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;_C0 _C1 _C2 _C3\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efourier_cantileaver\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efourier_cantileaver\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-f^2*p^2*u*w(x) + E*I*diff(w(x), x, x, x, x) == 0\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edesolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efourier_cantileaver\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eivar\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ealgorithm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;fricas\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexpand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elatex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\\begin{equation}\n\\hat{w}(x) = _{C_{1}} e^{\\left(\\sqrt{f} \\sqrt{2\\pi} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{0}} e^{\\left(i \\, \\sqrt{f} \\sqrt{2\\pi} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{2}} e^{\\left(-i \\, \\sqrt{f} \\sqrt{2\\pi} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{3}} e^{\\left(-\\sqrt{f} \\sqrt{2 \\pi} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOk, so we have that each component solution is a combination of a bunch of stuff, times \\(\\pm i\\) or \\(\\pm 1\\). We are going to declare everything that\u0026rsquo;s invariant in the exponent to be named \\(\\beta\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\beta := \\sqrt{2\\pi f} \\qty(\\frac{u}{EI})^{\\frac{1}{4}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd given this, we can then write the general solution for displacement by location (\\(w\\)) determined above more cleanly as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{w}(x) = _{C_{1}} e^{\\beta x} + _{C_{0}} e^{ \\beta ix} + _{C_{2}} e^{-\\beta ix} + _{C_{3}} e^{-\\beta x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will make one more substitution\u0026mdash;try to get the \\(e^{x}\\) into sinusoidal form. We \u003cem\u003eknow\u003c/em\u003e this is supposed to oscillate, and it being in sinusoidal makes the process of solving for periodic solutions easier.\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\cosh x = \\frac{e^{x}+e^{-x}}{2} \\\\\n\\cos x = \\frac{e^{ix}+e^{-ix}}{2}\\\\\n\\sinh x = \\frac{e^{x}-e^{-x}}{2} \\\\\n\\sin x = \\frac{e^{ix}-e^{-ix}}{2i}\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWith a new set of scaling constants \\(d_0\\dots d_3\\) (because these substitutions essentially ignore any factors its being multiplied, but we don\u0026rsquo;t actually care about modeling amplitude with these expressions anyways, so we can just change the arbitrary initial-conditions scalars on the fly), and some rearranging, we can rewrite the above expressions into just a linear combination of those elements. That is, the same expression for \\(\\hat{w}(x)\\) at a specific frequency \\(f\\) can be written as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nd_0\\cosh \\beta x +d_1\\sinh \\beta x +d_2\\cos \\beta x +d_3\\sin \\beta x = \\hat{w}(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some arbitrary initial conditions \\(d_0\\dots d_3\\). \u003cem\u003eSignificantly\u003c/em\u003e cleaner.\u003c/p\u003e\n\u003cp\u003eSo, what frequencies will our fork oscillate at? Well, a \u003cstrong\u003emode\u003c/strong\u003e for our fork is any set of \\(d_0 \\dots d_3\\) for which a solution for \\(\\hat{w}(x)\\) exists given our constants.\u003c/p\u003e\n\u003cp\u003eAs it stands right now, it seems like we have four unknowns (\\(d_0 \\dots d_3\\)) but only one equation to solve with. That\u0026rsquo;s no bueno.\u003c/p\u003e\n\u003cp\u003eEnter our initial conditions:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-11-10_13-38-40_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe top line states that: at \\(x=0\\), the bottom of the fork, our beam does not travel away from its natural axis (yes, because its a solid hunk of metal connected to the base), and it does not deflect (slope).\u003c/p\u003e\n\u003cp\u003eThe bottom line stats that: at \\(x=L\\), the top of the fork is straight (which is true, the tip-top of the fork does indeed not bend, only the middleish parts bend.)\u003c/p\u003e\n\u003cp\u003eSo, to get at the hidden system of four elements, we will take some derivatives of our original \\(\\hat{w}(x)\\) equation by \\(x\\), as prescribed by our initial conditions.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ewp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ewpp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ewppp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewpp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewppp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(b*d3*cos(b*x) + b*d1*cosh(b*x) - b*d2*sin(b*x) + b*d0*sinh(b*x),\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^2*d2*cos(b*x) + b^2*d0*cosh(b*x) - b^2*d3*sin(b*x) + b^2*d1*sinh(b*x),\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^3*d3*cos(b*x) + b^3*d1*cosh(b*x) + b^3*d2*sin(b*x) + b^3*d0*sinh(b*x))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd then, we have a system:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewpp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_4\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewppp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econds\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econd_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econd_2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econd_3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econd_4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econds\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(d0 + d2 == 0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e b*d1 + b*d3 == 0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^2*d2*cos(L*b) + b^2*d0*cosh(L*b) - b^2*d3*sin(L*b) + b^2*d1*sinh(L*b) == 0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^3*d3*cos(L*b) + b^3*d1*cosh(L*b) + b^3*d2*sin(L*b) + b^3*d0*sinh(L*b) == 0)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econds\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efull_simplify\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eOk so, we notice that out of all of these boundary expressions the \\(b^{n}\\) term drop out. Therefore, we have the system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nd_0 + d_2 = 0 \\\\\nd_1 + d_3 = 0 \\\\\n-d_2 \\cos L\\beta + d_0 \\cosh L\\beta - d_3 \\sin L\\beta + d_1 \\sinh L\\beta = 0 \\\\\n-d_3 \\cos L\\beta + d_1 \\cosh L\\beta + d_2 \\sin L\\beta + d_0 \\sinh L\\beta = 0 \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGreat. Four unknowns, four equations. We can now figure out when a solution for \\(d_0, \\dots d_3\\) exists (or go about solving it, but turns out that\u0026rsquo;s significantly harder and wildly useless.)\u003c/p\u003e\n\u003cp\u003eI will spare you the pages of route algebra needed to figure out when a solution exists. Suffice to say its lots of trig identities.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-01-18_00-47-38_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eBut, the satisfying conclusion is that, given the equations above, a solution \u003cem\u003eexists\u003c/em\u003e for \\(d_0 \\dots d_3\\) (read: a \u003cstrong\u003emode\u003c/strong\u003e for the beam exists), when:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\cos L\\beta \\cdot \\cosh L\\beta +1 = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, any valid solutions for the expression \\(\\cos x \\cdot \\cosh x + 1 = 0\\) will be a valid product between \\(L\\beta\\). We can use this information to figure out the right frequencies by then solving for \\(f\\) embedded in \\(\\beta\\).\u003c/p\u003e\n\u003cp\u003eSo, onto solving for \\(\\cos L\\beta \\cdot \\cosh L\\beta +1 = 0\\).\u003c/p\u003e\n\u003cp\u003eWe again give up and ask a computer to do it.We will try to locate a root for \\(Lb\\) for every \\(\\pi\\) for two rounds around the circle (until \\(4 \\pi\\))\u0026mdash;there is a solution for every \\(\\pi\\), if you don\u0026rsquo;t believe me, plot it or change the bottom to try to find it for every \\(\\frac{\\pi}{2}\\), sage will crash:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ejj\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epi\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ejj\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[0, pi, 2*pi, 3*pi, 4*pi]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will now declare \\(x=L\\beta\\), and create a nonlinear expression in it:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;x\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_eqn\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecos\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecosh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_eqn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003ecos(x)*cosh(x) + 1 == 0\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRoot finding time!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_eqn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efind_root\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ezip\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:])]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1.8751040687120917, 4.6940911329739246, 7.854757438237603, 10.995540734875457]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThese are possible candidate values for \\(L\\beta\\). We will declare these values \\(s\\).\u003c/p\u003e\n\u003cp\u003eSo, we now have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL \\beta = s\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSubstituting back our original definition for \\(\\beta\\), we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL \\sqrt{2\\pi f} \\qty(\\frac{u}{EI})^{\\frac{1}{4}} = s\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, we will try to get \\(f\\) by itself:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;L \\sqrt{2\\pi f} \\qty(\\frac{u}{EI})^{\\frac{1}{4}} = s \\\\\n\\Rightarrow\\ \u0026amp; \\sqrt{2\\pi f} = \\frac{s}{L} \\qty(\\frac{EI}{u})^{\\frac{1}{4}} \\\\\n\\Rightarrow\\ \u0026amp; 2\\pi f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{EI}{u})^{\\frac{1}{2}} \\\\\n\\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{2\\pi L^{2}} \\qty(\\frac{EI}{u})^{\\frac{1}{2}}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eFinally, we have that \\(I = \\frac{1}{12} bh^{3}\\) for a rectangular prism; and that linear density is cross-sectional area times volumetric density \\(\\mu = \\rho \\cdot bh\\). Making these substitutions:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nf \u0026amp;= \\frac{s^{2}}{2\\pi L^{2}} \\qty(\\frac{EI}{u})^{\\frac{1}{2}} \\\\\n\u0026amp;= \\frac{s^{2}}{2\\pi L^{2}} \\qty(\\frac{Ebh^{3}}{12 \\rho bh})^{\\frac{1}{2}} \\\\\n\u0026amp;= \\frac{s^{2}}{2\\pi L^{2}} \\qty(\\frac{Eh^{2}}{12 \\rho})^{\\frac{1}{2}} \\\\\n\\end{align}\u003c/p\u003e\n\u003cp\u003eWithout even getting to the frequency-based payoff, we immediately notice two takeaways.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eThe frequency of our fork is inversely proportional to length (i.e. \\(f = \\frac{1}{L^{2}}\\dots\\))\u003c/li\u003e\n\u003cli\u003eThe first overtone of the tuning fork is \\(s^{2} = (\\frac{4.694}{1.875})^{2} \\approx 6.27\\) times higher than the fundamental\u0026mdash;meaning its significantly higher energy so it dissipates significantly faster; it is also not an integer multiple, which means its much less likely to be confused to be a harmonic; making a tuning fork essentially a pure-frequency oscillator\u003c/li\u003e\n\u003cli\u003eGiven equal conditions, only the thickness in one dimension (the one perpendicular to the bending axis) matters\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eBut, enough idling, onto our main event. Using standard reference values for aluminum, as well as our measured length and thickness of a \\(C\u0026rsquo;\\ 512hz\\) tuning fork, we have that\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# measured values----\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# thickness\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.0065\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# meters\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# length\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eL0\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.09373\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# meters\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eL1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.08496\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# meters\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# theoretical values---\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# elastic modulus\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e46203293995\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# pascals = kg/m^2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# density\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2597\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# kg/m^3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# our solved characteristic value (s)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# mode to index\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ezero\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epi\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e12\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eone\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epi\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e12\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ezero\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eone\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eone\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(504.123425101814, 613.571395642254)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e558.847410372034\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eClose enough for a night. Thank you \u003cdel\u003esorry\u003c/del\u003e about everything.\u003c/p\u003e\n\u003ch2 id=\"temperature\"\u003etemperature\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# mode to index\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# s = characteristic_solutions[nth_mode]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;s\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# change to L and h (distances measures) by increases in degrees C\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.4e-5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erho\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.4e-5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;a b\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# a = -3.9\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# b = 0.0033\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eEd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1e9\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epi\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e12\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;E L h p\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# (a * b * /g)c\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;t\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# diff(t, dt, E,L,h,p) = sqrt((f.diff(E)*Ed(t)*dt)^2 + (f.diff(E)*Ed(t)*dt)^2 + (f.diff(L)*d(t)*dt)^2 + (f.diff(h)*d(t)*dt)^2)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# diff(10, 1, 42661456706, 0.09833, 0.00643, 2545.454545).n()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esubdict\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3.9\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.0033\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.09833\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.00643\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2545.454545\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e42661456706\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e50\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eEd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubdict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efull_simplify\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubdict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efull_simplify\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubdict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efull_simplify\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erho\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubdict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efull_simplify\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-0.0782394489394635\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-0.0211103561467420\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.0105551780733710\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-0.00527758903668550\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eexpansion\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;x\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003el\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;l w h m\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edensity\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003el\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003el\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edensity\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003el\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexpansion\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edensity\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexpansion\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edensity\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexpansion\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(l, w, h) |--\u0026gt; h*l*x/m + h*w*x/m + l*w*x/m\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhdivide_by_2pi/","tags":null,"title":"Divide by 2pi"},{"categories":null,"contents":"Dopamine optical sensor. When dopamine is bound, it floreses and can detect micromolar changes and dopamine concentration.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdopamine/\"\u003eDopamine\u003c/a\u003e optical sensor. When dopamine is bound, it floreses and can detect micromolar changes and dopamine concentration.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdlight_1/","tags":null,"title":"dLight 1"},{"categories":null,"contents":"See also Software Development Methodologies\ndocumentation Comments Readme Wiki specification UX UI High-Level Architecture (libraries, external APIs) Low-Level Architecture (modules, functions, internal APIs) commenting Almost anything hardcoded (constants, strings, etc.) Anything confusing, tricky, nonstandard Historical notes: if something is added/removed, write it down TODO for bugs or hacks README Files Best used as a quick-start guide What are key pieces of info they will pieces of info they will need? What is your code supposed to do? How does someone run your code? How does a new engineer get set up? General overview of how things are laid out, with links to wiki pages with details Wiki In-depth explanation of subsystems and modules Separate pages for each subsystem Include decisions of their design decisions Discussions of why systems are not designed differently UI/UX Spec How do we know what the software is supposed to do? Varying levels of resolution User stories All the way up to granular details of UI elements Don\u0026rsquo;t forgot to document defaults!\n","html":"\u003cp\u003eSee also \u003ca href=\"/posts/kbhsoftware_development_methodologies/\"\u003eSoftware Development Methodologies\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"documentation\"\u003edocumentation\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eComments\u003c/li\u003e\n\u003cli\u003eReadme\u003c/li\u003e\n\u003cli\u003eWiki\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"specification\"\u003especification\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eUX\u003c/li\u003e\n\u003cli\u003eUI\u003c/li\u003e\n\u003cli\u003eHigh-Level Architecture (libraries, external APIs)\u003c/li\u003e\n\u003cli\u003eLow-Level Architecture (modules, functions, internal APIs)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"commenting\"\u003ecommenting\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eAlmost anything hardcoded (constants, strings, etc.)\u003c/li\u003e\n\u003cli\u003eAnything confusing, tricky, nonstandard\u003c/li\u003e\n\u003cli\u003eHistorical notes: if something is added/removed, write it down\u003c/li\u003e\n\u003cli\u003eTODO for bugs or hacks\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"readme-files\"\u003eREADME Files\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eBest used as a quick-start guide\u003c/li\u003e\n\u003cli\u003eWhat are key pieces of info they will pieces of info they will need?\n\u003cul\u003e\n\u003cli\u003eWhat is your code supposed to do?\u003c/li\u003e\n\u003cli\u003eHow does someone run your code?\u003c/li\u003e\n\u003cli\u003eHow does a new engineer get set up?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eGeneral overview of how things are laid out, with links to wiki pages with details\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"wiki\"\u003eWiki\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eIn-depth explanation of subsystems and modules\u003c/li\u003e\n\u003cli\u003eSeparate pages for each subsystem\u003c/li\u003e\n\u003cli\u003eInclude decisions of their design decisions\u003c/li\u003e\n\u003cli\u003eDiscussions of why systems are not designed differently\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"ui-ux-spec\"\u003eUI/UX Spec\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eHow do we know what the software is \u003cem\u003esupposed\u003c/em\u003e to do?\u003c/li\u003e\n\u003cli\u003eVarying levels of resolution\n\u003cul\u003e\n\u003cli\u003eUser stories\u003c/li\u003e\n\u003cli\u003eAll the way up to granular details of UI elements\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDon\u0026rsquo;t forgot to document defaults!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdocumentation_and_specification/","tags":null,"title":"Documentation and Specification"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdopamine/","tags":null,"title":"dopamine"},{"categories":null,"contents":"The dopamine circuitry in NF1.\nGenetically encoded \u0026ldquo;sensors\u0026rdquo; to measure circuits.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhdopamine/\"\u003edopamine\u003c/a\u003e circuitry in \u003ca href=\"\"\u003eNF1.\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eGenetically encoded \u0026ldquo;sensors\u0026rdquo; to measure circuits.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdopamine_circuitry_in_nf1/","tags":null,"title":"dopamine circuitry in NF1"},{"categories":null,"contents":"There is extreme noise in the patient annotations: for instance, in their health rhythm labels there\u0026rsquo;s around 20% contradictions in the dataset.\naccurate diagnosis limiting \u0026ldquo;domain rule violations\u0026rdquo; approach take your dataset, and validate rules IF validation is successful, train model normally with that sample IF the validation is unsuccessful, then use the output samples as negative examples ","html":"\u003cp\u003eThere is \u003cstrong\u003eextreme noise\u003c/strong\u003e in the patient annotations: for instance, in their health rhythm labels there\u0026rsquo;s around 20% contradictions in the dataset.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eaccurate diagnosis\u003c/li\u003e\n\u003cli\u003elimiting \u0026ldquo;domain rule violations\u0026rdquo;\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"approach\"\u003eapproach\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003etake your dataset, and validate rules\u003c/li\u003e\n\u003cli\u003eIF validation is successful, train model normally with that sample\u003c/li\u003e\n\u003cli\u003eIF the validation is unsuccessful, then use the output samples as negative examples\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdost/","tags":null,"title":"DOST"},{"categories":null,"contents":"The dot product is a property of real vector spaces which is a simplified version of an inner product; specifically, it obviates the need to complex-conjugate anything because, well, \\(\\bar{n} = n, n \\in \\mathbb{R}\\). The dot-product also yield a real number.\nconstituents \\(x, y \\in \\mathbb{R}^{n}\\) (NOTE the realness) where, \\(x = (x_1, \\dots, x_{n})\\) and \\(y = (y_1, \u0026hellip;, y_{n})\\) requirements As we are familiar with, element-wise product and sum\n\\begin{equation} x\\cdot y = x_1y_1 + \\dots + x_{n}y_{n} \\end{equation}\nadditional information properties of the dot product For fixed \\(y \\in \\mathbb{R}^{n}\\), the dot product map that sends \\(x\\) to \\(x \\cdot y\\) is linear (inheriting add. and homo. from algebra) \\(x \\cdot x = 0\\) IFF \\(x =0\\) (no negs allowed (above), so every slot has to have a zero to multiply to 0) \\(x \\cdot x \u0026gt; 0\\) for all \\(x \\in \\mathbb{R}^{n}\\) (neg times neg is pos) \\(x \\cdot y = y \\cdot x\\) for reals; by inheriting from each element\u0026rsquo;s field orthogonality test The dot product is an orthogonality test. If the dot product between the two vectors is \\(0\\), they are definitely orthogonal.\ngeometric interpretation of the dot product Well, we have some shape between two vectors; then, we can first write out the law of cosines. Then, we can see that, for two vectors from the same origin, we can say that the projection of vector \\(\\vec{A}\\) onto \\(\\vec{B}\\) is written as:\n\\begin{equation} |\\vec{A}||\\vec{B}|\\cos \\theta \\end{equation}\nwhere, \\(\\theta\\) is the angle between the two vectors.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e is a property of \u003ca href=\"/posts/kbhvector_space/#vector-space-over-fields\"\u003ereal vector space\u003c/a\u003es which is a simplified version of an \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e; specifically, it obviates the need to complex-conjugate anything because, well, \\(\\bar{n} = n, n \\in \\mathbb{R}\\). The dot-product also yield a real number.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(x, y \\in \\mathbb{R}^{n}\\) (NOTE the realness)\n\u003cul\u003e\n\u003cli\u003ewhere, \\(x = (x_1, \\dots, x_{n})\\) and \\(y = (y_1, \u0026hellip;, y_{n})\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eAs we are familiar with, element-wise product and sum\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\\cdot y = x_1y_1 + \\dots + x_{n}y_{n}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"properties-of-the-dot-product\"\u003eproperties of the dot product\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eFor fixed \\(y \\in \\mathbb{R}^{n}\\), the dot product map that sends \\(x\\) to \\(x \\cdot y\\) is linear (inheriting add. and homo. from algebra)\u003c/li\u003e\n\u003cli\u003e\\(x \\cdot x = 0\\) IFF \\(x =0\\) (no negs allowed (above), so every slot has to have a zero to multiply to 0)\u003c/li\u003e\n\u003cli\u003e\\(x \\cdot x \u0026gt; 0\\) for all \\(x \\in \\mathbb{R}^{n}\\) (neg times neg is pos)\u003c/li\u003e\n\u003cli\u003e\\(x \\cdot y = y \\cdot x\\) for reals; by inheriting from each element\u0026rsquo;s \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"orthogonality-test\"\u003eorthogonality test\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e is an \u003ca href=\"#orthogonality-test\"\u003eorthogonality test\u003c/a\u003e. If the \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e between the two vectors is \\(0\\), they are definitely orthogonal.\u003c/p\u003e\n\u003ch3 id=\"geometric-interpretation-of-the-dot-product--kbhdot-product-dot-md\"\u003egeometric interpretation of the \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eWell, we have some shape between two vectors; then, we can first write out the \u003ca href=\"/posts/kbhlaw_of_cosines/\"\u003elaw of cosines\u003c/a\u003e. Then, we can see that, for two vectors from the same origin, we can say that the projection of vector \\(\\vec{A}\\) onto \\(\\vec{B}\\) is written as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|\\vec{A}||\\vec{B}|\\cos \\theta\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\theta\\) is the angle between the two vectors.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdot_product/","tags":null,"title":"dot product"},{"categories":null,"contents":"One envelope has 10 times the money in the other money.\nWLOG let \\(x\\) be the envelope in Cary\u0026rsquo;s hand. The money in \\(y\\), then, \\(y = \\frac{1}{2}\\qty(\\frac{1}{10}x)+\\frac{1}{2}\\qty (10x) = 0.05x+5x = 5.05x\\). Wat.\nBasically; regardless if Cary took the envelope \\(x\\) or \\(y\\), the other envelope is expected to have \\(5\\times\\) more money. What.\nWhy? There\u0026rsquo;s a bug in this:\n\\begin{equation} y = \\frac{1}{2}\\qty(\\frac{1}{10}x)+\\frac{1}{2}\\qty (10x) \\end{equation}\nis not true! There is a human PRIOR BELIEF!! Its very unlikely that mykel/chris put 10000 dollars into an envelope; so each individual amount in an envelope has an exogenous probability of it happening!\n","html":"\u003cp\u003eOne envelope has 10 times the money in the other money.\u003c/p\u003e\n\u003cp\u003eWLOG let \\(x\\) be the envelope in Cary\u0026rsquo;s hand. The money in \\(y\\), then, \\(y = \\frac{1}{2}\\qty(\\frac{1}{10}x)+\\frac{1}{2}\\qty (10x) = 0.05x+5x = 5.05x\\). Wat.\u003c/p\u003e\n\u003cp\u003eBasically; regardless if Cary took the envelope \\(x\\) or \\(y\\), the \u003cem\u003eother\u003c/em\u003e envelope is expected to have \\(5\\times\\) more money. What.\u003c/p\u003e\n\u003ch2 id=\"why\"\u003eWhy?\u003c/h2\u003e\n\u003cp\u003eThere\u0026rsquo;s a bug in this:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = \\frac{1}{2}\\qty(\\frac{1}{10}x)+\\frac{1}{2}\\qty (10x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis not true! There is a human \u003cstrong\u003e\u003cstrong\u003ePRIOR BELIEF\u003c/strong\u003e\u003c/strong\u003e!! Its very unlikely that mykel/chris put 10000 dollars into an envelope; so each individual amount in an envelope has an exogenous probability of it happening!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdouble_envelope_problem/","tags":null,"title":"Double Envelope Problem"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdouble_progressive_widening/","tags":null,"title":"Double Progressive Widening"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdouble_slit_experiment/","tags":null,"title":"double slit experiment"},{"categories":null,"contents":"A human gene similar to the gene PreTA found in E. Coli, a bacterial found in microbiome. See effects of PreTA on Fluoropyrimidine, and by proxy Capecitabmine for implications on cancer treatment.\n","html":"\u003cp\u003eA human gene similar to the gene \u003ca href=\"\"\u003ePreTA\u003c/a\u003e found in \u003ca href=\"/posts/kbhe_coli/\"\u003eE. Coli\u003c/a\u003e, a bacterial found in \u003ca href=\"\"\u003emicrobiome\u003c/a\u003e. See effects of \u003ca href=\"\"\u003ePreTA\u003c/a\u003e on \u003ca href=\"\"\u003eFluoropyrimidine\u003c/a\u003e, and by proxy \u003ca href=\"/posts/kbhcapecitabmine/\"\u003eCapecitabmine\u003c/a\u003e for implications on cancer treatment.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdpyd/","tags":null,"title":"DPYD"},{"categories":null,"contents":"Gah I have to do this. Not for public consumption. California laws 2022 DL600 R7 2022.\nConsequences Not licensed If unlicensed person is drivnig your car, it maybe impounded for 30 days Hired to drive interstate commercially need to be older than 21, also need to be older than 21 to transport hazardous materials Class C License Driving #knw Two axle vehicle with a GVWL of 26,000 lbs or less Three axle vehicle weighing 6,000 lbs or less House car \u0026lt; 40 feet or less Three wheel motocycles Vanpool vehicle designed to carry between 10 and no more than 15 people Towing #knw Single vehicle of 10,000 or less Vehicle weighing 4000 lbs or more unladen Trailer coach under 10,000 lbs Fifth wheel trailer exceeding 10,000 lbs but under 15,000 lbs, with endorsement Mor ethings Class C drivers can\u0026rsquo;t tow more than one Motor vehile weigning under 4000 lbs cannot tow more than 6000 lbs Getting in trouble Get a traffic ticket and fail to show up to court: suspend driving One at fault collision or one at fault traffic violation: may take action? Two of either at fault collision or violation conviction: no driving for 30 days unless accompanied by 25 year old adult Three of \u0026ldquo;\u0026rdquo;: no driving for 6 months, on probation for a year. Drugs or alcohol between 13-21: suspension for a year Minor driving Not sure if this applies\nPractice for 50 hours, 10 hours at night #knw\nPass knowledge test\nPass driving test\nCannot drive between 11P and 5A during the first year #knw\nCannot drive with under 20 Y/O unless 25 Y/O licensed accompanied #knw\nUnless\u0026mdash;\nMedical need with doctor\u0026rsquo;s note and end date School and dean\u0026rsquo;s note Work and employer\u0026rsquo;s note and employment status Family need and parent\u0026rsquo;s note Minors can\u0026rsquo;t use a phone while driving.\nSafe car #knw Working driver\u0026rsquo;s window, brake lights, horn, parking brake, turn signals Safe tire (1/32 inch tread) Full windshield Two rear view mirrors, incl. one on left side Working seatbelts Check: clean windows and mirrors, adjust seat and mirrors, check tires.\nSafe personage Vision Hearing Not tired Not medicated Health: no Lapses of conciseness AD \u0026ldquo;related disorders\u0026rdquo; \u0026mdash; anything the doctor reports to DMV Steering Hand to Hand hands 9/3 or 8/4 oclock Push and pull, hands stay where they are Hand over hand Start 9/3 or 8/4 Turn, but leave wheel sliding under Sliding under hand reach over, pull the wheel up One-hand Turning or backing up to turn back Hand at 12 oclock Limeted use Signaling Arm signals when lights are hard to see because of bright sun\nMotorcyclists use these signals, and bikers point their hand straight up to turning direction\nWhen to signal #knw Signal when: turn, change lanes, slow down, stop.\n100 feet before turning Before every lane change: look over and check blind spot 5 seconds before lane change on highway Pulling next to or away curb Signal even if no cars around you Horning \u0026ldquo;It is safer to slow down or stop instead of honking your horn.\u0026rdquo;\nWhen to horn #knw Avoid collisions Alert hazard Alert oncoming traffic on narrow mountain roads when you cannot see at least 200 feet in front of vehicle Don\u0026rsquo;t use horn to move people along, or \u0026ldquo;express anger.\u0026rdquo; The more ya know.\nHeadlights They are bright.\nWhen to headlight #knw When its too dark to see: if you can\u0026rsquo;t see a person 1000 feet away Beginning 30 minutes after sunset until 30 minutes before sunrise Adverse weather: windshield wipers on = low-beam headlights on Clouds dust smoke or fog prevent seeing other cars On sunny days on country or mountain roads When a white regulatory sign says so To help others see your car, when sun is low on horizon When not to high-beam headlight Dim when 500 feet of car coming towards you or 300 feet of a car you are following Emergency flashers If you can see a collision ahead, do:\nTurn on flashers #knw Lightly tap brake pedal three/four times Use hand signals How to stop in a middle of the road during an emergency #knw Start breaking early.\nGive drivers warning\nTurn on emergency flashers if you earn\u0026rsquo;t moving, or use turn signals\nPull off the road\nStop not on the road or, if isn\u0026rsquo;t possible, stop where people can see\nDon\u0026rsquo;t stop just over a hill\nLift the hook to signal an emergency\nPlace emergency triangles 200-300 feet behind vehicle; use flares if needed but be careful b/c they may cause fire\nCall for roadside assistance\n63, 92\nLanes! Reading \u0026rsquo;em Yellow: different directions Single yellow is the center of the road; cannot cross into oncoming traffic Double solid yellow line: not to be crossed \u0026hellip;except hov entrace lane which has a left entrance Instructed to cross because the road is blocked Entering or exiting a driveway, private road, or making a u-turn 2 double yellow line groups spaced 2 feet or more apart are considered a barrier; under no circumstance is to cross Broken yellow line: you may pass if the broken line is next to you White: same directions Single solid white line: traffic lanes in the same direction Double solid white lines: not to be crossed, regular use vs. preferential use lanes (carpool, etc.) Broken white lines: separate roads with two or more lines in the same direction White triangles: yield lines A line where you should yield. Triangles point to the direction of oncoming traffic (\u0026ldquo;towards you.\u0026rdquo;).\nChoosing \u0026rsquo;em Leftmost lane is lane 1, rightmost is lane n\nUse the left lane to pass or turn left Use the right lane to enter or exit traffic Change lanes when Moving from one lane to another Entering freeway Exiting freeway Entering the road from curb or shoulder Protocol for lane change #knw Signal Look in all mirrors Check traffic beside and behind you Look over solder in direction of desired lane change Check blind spots for other vehicles, motorcyclists, and bicycilsts Ensure room Tips stay in one lane don\u0026rsquo;t weave if you start a change, finish it Types of them Lane closest to the center divider is the \u0026ldquo;passing lane\u0026rdquo; HOV lanes is for high occupancy Center left turn lanes The center of some two-way streets has a left turn lane; marked on both sides by two painted lines. Inner line is broken and outer line is solid.\nYou may only drive 200 feet in the center left turn lane #knw\nProtocol for using this lane\nLook for other vehicles coming towards you in the center left turn lane Signal Look over shoulder Merge completely into the center left turn lane Turn when its safe. Turnouts Areas or lanes for turning that are marked? Use when:\nDriving slowly on a two-lane road where passing is unsafe, AND There are 5 or more vehicles following #knw Bike lanes Bike lanes\nBuffered bike lanes: uses chevrons or diagonals to buffer the bikes\nBike route: shared road markings to designate a preferred route\nBike boulevard: bike travel on streets with cars\nSeperated bikeways: completely different\nBikes share the road \u0026ldquo;sharrows!\u0026rdquo;\nCannot drive in bike lane unless\u0026hellip;.\nParking Entering or leaving road Turning (within 200 feet of intersection) Turning Right Drive close to the edge Drive in a bike lane, wait until about 200 feet to make turn #knw Watch for everybody Signal about 100 feet before #knw Look over sholder Stop behind limit line (/before entering crosswalk or intersection) Look both ways and turn when its safe; don\u0026rsquo;t turn into another lane Complete turn Details:\nCan\u0026rsquo;t turn when red arrow, but you can turn against red light You could cross a bus lane to make a right turn, but you can\u0026rsquo;t drive in it There could be designated right turn lanes which let you make a \u0026ldquo;free right turn\u0026rdquo; Left Drive close to the center divider or left turn lane Signal about 100 feed Look over sholder Stop behind limit line (/before entering crosswalk or intersection) Look left, right, then left Turn Details\nonly can turn against light when single-lane-to-single-lane U Conditions Across double-yellow line In a residential district No cars for 200 feet Whenever a sign or light protects against approachng cars At an intersection On a divided driveway, if opening provided Anticonditions WHen \u0026ldquo;no u-turn\u0026rdquo; is posted\nAt a railroad crossing\nOn a divided highway if needed to cross things\nCannot see 200 feet in each direction\nWhen other cars may hit you\nOn a one-way street\nIn front of a fire station\nooo. scary\nIn business districts, including churches apartments and buildings (except for schools); turn only at an intersection or opening if allowed. Merging Highways Enter at or near traffic speed Merge onto highway when safe to do so, don\u0026rsquo;t stop unless needed Merge into a space large enough for your car to join the lane Use mirrors and turn signals Watch for cars Leave three seconds of space (\u0026ldquo;three second rule\u0026rdquo;) between you and the car in front of you Exiting Know the exist Signal, look over sholder, etc. Change lanes Signal intention for 5 seconds Leave Space for entering You will need about a half a block on city streets Or, a full block on the highway Passing If anybody wants to pass, let them pass\nSpace for passing Don\u0026rsquo;t pass if\u0026hellip;\nYou are approaching a hill and cannot see oncoming traffic Within 100 feet of an intersection #knw At crossroads or driveways Condition of Passing You pass on the left, unless\u0026hellip;\nOpen highway with two or more lanes going in your direction Driver ahead of you is turning left, and you don\u0026rsquo;t have to drive off the road to pass You are on a one-way street Never drive off the road to pass.\nProtocol for passing Signal Shoulder Turn Speed up and pass Retturn Parking Find a space #knw three feet longer that your vehicle Turn on turn signal Pull up alongside the vehicle in front; leave about two feet between you and the car to your right. Stop when you rear bumper is aligned with the front of the space Check rearview mirror, look over sholder, keep foot on break and reverse Back up, 45% When rear view is within 18 inches from the curb, straighten out Set parking break. Leave when safe. Parking on a hill \u0026ldquo;Your car may roll when you breaks fail.\u0026rdquo;\nDownhill: wheels towards the curb Uphill: wheels away from curb No curb: turn towards the sholder of the road \u0026ldquo;towards the sholder, except when uphill with curb\u0026rdquo;\nColors White curb: stop for picking up or dropping off passengers or mails Green curb: park for limited time Yellow: load and unload, staying in the vehicle Red: no stopping Blue: disabled\u0026mdash;fine of $1,000, 6 months in county jail #knw Can\u0026rsquo;t park when No marking Unmarked or marked crosswalk Sidewalk, partially blocking sidewalk, or in front of driveway Within 3 feet of disabled sidewalk ramp #knw On diagnal lines next to disabled space Within 15 feet of a fire hydrant #knw Double parking On the wrong side of the street or freeway, except: 1) emergency 2) law enforcement officer 3) specificaly permitted stop.\nTo stop and park then, park off the pavement, stay with the car and lock the doors until help arrives; visibility is 200 feet in each direction required. #knw\nLights Flashing red: stop sign\u0026ndash;stop and go when its safe Flashing yellow: yield sign\u0026mdash;proceed with caution Flashing yellow arrow: unprotected turn Broken traffic lights become a four way stop sign.\nSigns Stop sign is stop; there should be a limit line; if no limit line, stop before intesection Yield sign is to yield; slow down Right of Way Without stop/yield signs Whomever gets to the intersection first has right of way T intersection without stop/yield signs The through road have right of way Stop signs Stop first, then follow right of way rules as if no intersection Turning left Right of way to anyone approaching that\u0026rsquo;s \u0026ldquo;close enough to be dangerous\u0026rdquo; Turning right Check for pedestrians crossing the street, and bikes and motors next to you Green light Pedestrians Divided highways Vehicles coming in the lane you are about to enter Entering traffic The traffic you are entering Roundabouts The logistics of using a roundabout\nSlow down Yield to traffic Watch for signs Travel in counter-clockwise direction, don\u0026rsquo;t stop or pass Signal when you change lanes or exit If you miss your exit, try again Choosing lane Rightmost for turning right Either lane (\u0026ldquo;middle\u0026rdquo;, if exists) for straight Innermost for left turn or u turn Pedestrians Pedestrians have right-of-way Pedestrian crossing need to cross first, you yield or slow to them Which means\u0026hellip;\nDo not pass a stopped vehicle Don\u0026rsquo;t drive on a sidewalk except to cross it or enter/exit it Don\u0026rsquo;t stop in a crosswalk If people make eyecontact, they are crossing the street Obey pedestrian\u0026rsquo;s signs Watch for seniors, people with disabilities, young children.\nCrosswalks Crosswalks are marked (but not all) School crossings have yellow lines Pedestrians have right of wall in all crosswalks Flashing light crosswalks exists to, just be prepared to stop regardless Blind White canes and guide dogs have absolute right of way Stop at all stop walks Don\u0026rsquo;t stop in the middle of stop walk Don\u0026rsquo;t give verbal directions to blind pedestrian Don\u0026rsquo;t turn right w/o looking for pedestrians Don\u0026rsquo;t honk at a blind person Don\u0026rsquo;t block sidewalk Pulling in cane + stepping away: you may go Mountain roads Uphill car has right of way Downhill car has more control backing up the hill Roadsharing Large cars Average passenter car at 55mph has 400 feet before stopping Large car takes 800 feet Don\u0026rsquo;t move in front of a large car and suddenly stop.\nLook at turn signals: large vehicles may swing their back, say, left in order to turn right.\nDon\u0026rsquo;t\nChange lanes in front of them to reach an exit or turn (tight spaces around large vehicles is dangerous) Drive next to them (unless passing); after you pass, move ahead of it Follow too closely: that\u0026rsquo;s tailgating. Give more space Underestimate the size and speed of the vehicle \u0026ldquo;If you can\u0026rsquo;t see a truck\u0026rsquo;s side mirrors, it can\u0026rsquo;t see you.\u0026rdquo;\nalways pass it on the left side\nBuses and rails when loading is happening without a safety zone, stop behind the nearest door Stopped busses can only be passed at 10mph Don\u0026rsquo;t pass on the left side, unless\u0026hellip; you are on a one-way street tracks are so close to the right you can\u0026rsquo;t pass on the right traffic officer directs you to Never turn in front of a light rail vehicle Check for traffic lights (light rails can interrupt them) Motocycles 4 second following distance Given a motocycle a full lane; its legal to share but its unsafe Don\u0026rsquo;t try to pass a motorcycle in the same lane When possible, move to one side of your lane Check for motocyclists Emergency vehicles Give them right of way: drive to the edge until they\u0026rsquo;ve passed \u0026hellip;except in intersections: never stop in an intersection (continue through and stop) Obey loudspeaker orders Illegal to follow 300 feet of any emergency vehicles with flashing siren Slow cars Slow down for them NEV LSV Like gold carts\nThey have max speed 25mph They can\u0026rsquo;t drive in roads with speed limit larger than 35 mph Bikes Front lamp with white light visible for 300 feet Rear red reflector (visible from 500 feet) White or yellow reflector on each pedal (visible for 200 feet) Travel lanes Must ride to the curb if slow, unless\nPassing in the same direction Preparing to turn left Avoiding a hazard/road condition Approaching right turn On a one way road with two or more lanes (if so, bikers may right next to left curb) Passing bikers 3 feet clearance\nSchool buses Yellow lights flashing is to slow Red lights flashing is to stop If you fail to stop, you can be fined up to $1,000 and driving maybe suspended for a year Workzone fines Traffic violations have fines of $1,000 or more Assulting a worker has a fine of $2,000 plus imprisonment for up to on year Some regions are double-fine zones Speed Limit \u0026ldquo;Basic speed law\u0026rdquo;: you may never drive faster than its safe.\n10mph to pas a roadcar\n15 mph in blind intersections (cannot see 100 in both directions when within 100 feet)\nif your view is blocked in a blind intersection, inch forward until you can see 15 mph also in some school, alleys (roads no wider 25 feet), 100 feet of railroad tracks if visiblity less then 400 feet\n25 mph when you are 500-1000 feet of a school, when crossing the street, residential\n55 mph on two lane undivided highway\nYou cannot block traffic flow\nDrive far-right lane of you are towing\nRailroad Look in both directions Except train anytime Don\u0026rsquo;t stop in traintracks Watch for other cars Stop between 15-50 feet from the neearest tracks Fines and Stuff Smoking with a minor: $100\nDumping animals: $1,000, six months in jail\nEvading law enforcement:\nstate prison up to 7 years, county jail for 1 year Fine between $2,000 and $10,000 Or both Evading law enforcement and commiting manslauter\nImprisonment for 4-10 years Speed content and reckless driving: fine and imprsionment\nTexting\nWear earplugs in bot hyears\nCarry anything that extends beyond the fenders on the left side, or more then 6 inches on the right side\nCargo more the 4 feet must display a 1 feet red or flourencesnt flag\nTransport animals unless secured\nAllow a person to be in a back of a pickup truck unless secured\nDrive a car with a video monitor except when it doesn\u0026rsquo;t face driver\nThrow a cig from the car\nCut signs that block the windshiled\nDon\u0026rsquo;t hang objects on the mirror\nDon\u0026rsquo;t sticker, unless\n7 inch square on lower corner of passengers or rear window 5 inch square on the lower corner of the driver window Side windows behind driver 5 inch located in the center uppermost portion Funeral pocessions have right of way\nPoints 36 month record Suspension when: 4 points in 12 months, 6 in 24, or 8 in 36 Once 18 months to earn back points via traffic school Best Practices Scan road 10-15 seconds ahead of you Don\u0026rsquo;t stare Don\u0026rsquo;t tailgate: 3 seconds between you and the car ahead passes Allow extra space when\u0026hellip; If you have a tailgator, (and move! if you can) The driver behind you wants to pass Slippery Following on icy or wet Towing a trailer Followiing a car that blocks you ahead Merging onto freeway Following Don\u0026rsquo;t stay in the blind spot Don\u0026rsquo;t driving alongside cars Make space when possible Keep space between you and parked cars Be careful when nearing motorcyclists and bicyclists At intersections Look both ways Look left first (vehicles coming from the left are closer) Look right Take one more look to the left 5-10mph on wet road, reduce speed by half on snow, tiny very slow on ice Don\u0026rsquo;t use breaks if starting to hydroplone If you can\u0026rsquo;t see farther than 100 feet, its unsafe to drive faster than 30mph Seat belts Click it or ticket Under 16 years old, you may also get ticket Child safety Under 2 years old: secure in a real facing child restraight system (unless child weighs more than 40 pounds or is more that 3 ft 4 inches taller)\nChilden under 8 years old, less than 4 feet 9 inches tall: secure in a front-facing restraight system\nCould use front seat if there\u0026rsquo;s no rear seat or if they are side facing jump seat\n8 years old or older, or 4 feet 9 inches tall: use seat belts\n6 y/o or younger unattended illegal to leave in car; supervision could be 12 year old.\nHot vehicle can kill\nEmergencies Skids Slippery surface Slowly remove foot from gas pedal Don\u0026rsquo;t use breaks Turn the steering wheel in the direction of the skid If your breaches get wet, dry them by pressing gas and brake at the same time.\nLock wheel Breaking too hard when going to fast: skid no matter steering wheel\nRemove foot from break Straighten front wheel If ABS not working, step on brake gradually until safe speed. If the brake petal sinks to the floor, bump the brakes.\nDriving off pavement Grip wheel slowly Remove your foot from gas Brake gently Check for traffic Steer back Accelerator mallfunction Shift to neutral Apply breakes Look for traffic Honk horn and emergency flashers Drive car off the road Turn of ignition Collision If collision causes more than $1000 in property damage, you msut report to DMV Driving is suspended for 4 years of no insurance Disabled Vehicle Safely pull over Exit on the right side Find assistance Return no vehicle Stay inside with your seat belt Uuse flashers Railroad If a train is coming, get out and run in a 45 degree away from the train and tracks. Dial 911 If train not coming, exit vehicle, dial emergency number on the railroad crossing box, and then call 911 DUI Don\u0026rsquo;t drink and drive Don\u0026rsquo;t take drugs Use any combination of drugs Illegal to drink alcohol or smoke or eat cannabis products while in a car, whether self or passenger. If you are carrying it, it must be full and unopened. If its open, keep it in the trunk.\nLimits 0.08% over 21 0.01% under 21 0.01% under DUI probation 0.04% if commercial 0.04% if driving for hire DUI Arrests Hold license for 30 days Hearing from 10 days DUI Convictions Completion of DUI program Install Ignition Interlock Device 6 months in jail $390-$1000 May inpound vehicle Carrying under 21 May not carry unless someone older Fine up to $1000 and impound for 30 days, suspencion for 1 year 0.01% or higher you have to complete program, 0.05% suspension ","html":"\u003cp\u003eGah I have to do this. Not for public consumption. California laws 2022 DL600 R7 2022.\u003c/p\u003e\n\u003ch2 id=\"consequences\"\u003eConsequences\u003c/h2\u003e\n\u003ch3 id=\"not-licensed\"\u003eNot licensed\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eIf unlicensed person is drivnig your car, it maybe impounded for 30 days\u003c/li\u003e\n\u003cli\u003eHired to drive interstate commercially need to be older than 21, also need to be older than 21 to transport hazardous materials\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"class-c-license\"\u003eClass C License\u003c/h3\u003e\n\u003ch4 id=\"driving-knw\"\u003eDriving #knw\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eTwo axle vehicle with a GVWL of 26,000 lbs or less\u003c/li\u003e\n\u003cli\u003eThree axle vehicle weighing 6,000 lbs or less\u003c/li\u003e\n\u003cli\u003eHouse car \u0026lt; 40 feet or less\u003c/li\u003e\n\u003cli\u003eThree wheel motocycles\u003c/li\u003e\n\u003cli\u003eVanpool vehicle designed to carry between 10 and no more than 15 people\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"towing-knw\"\u003eTowing #knw\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eSingle vehicle of 10,000 or less\u003c/li\u003e\n\u003cli\u003eVehicle weighing 4000 lbs or more unladen\n\u003cul\u003e\n\u003cli\u003eTrailer coach under 10,000 lbs\u003c/li\u003e\n\u003cli\u003eFifth wheel trailer exceeding 10,000 lbs but under 15,000 lbs, with endorsement\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"mor-ethings\"\u003eMor ethings\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eClass C drivers can\u0026rsquo;t tow more than one\u003c/li\u003e\n\u003cli\u003eMotor vehile weigning under 4000 lbs cannot tow more than 6000 lbs\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"getting-in-trouble\"\u003eGetting in trouble\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eGet a traffic ticket and fail to show up to court: suspend driving\u003c/li\u003e\n\u003cli\u003eOne at fault collision or one at fault traffic violation: may take action?\u003c/li\u003e\n\u003cli\u003eTwo of either at fault collision or violation conviction: no driving for 30 days unless accompanied by 25 year old adult\u003c/li\u003e\n\u003cli\u003eThree of \u0026ldquo;\u0026rdquo;: no driving for 6 months, on probation for a year.\u003c/li\u003e\n\u003cli\u003eDrugs or alcohol between 13-21: suspension for a year\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"minor-driving\"\u003eMinor driving\u003c/h2\u003e\n\u003cp\u003eNot sure if this applies\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ePractice for 50 hours, 10 hours at night #knw\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ePass knowledge test\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ePass driving test\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCannot drive between 11P and 5A during the first year #knw\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCannot drive with under 20 Y/O unless 25 Y/O licensed accompanied #knw\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eUnless\u0026mdash;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eMedical need with doctor\u0026rsquo;s note and end date\u003c/li\u003e\n\u003cli\u003eSchool and dean\u0026rsquo;s note\u003c/li\u003e\n\u003cli\u003eWork and employer\u0026rsquo;s note and employment status\u003c/li\u003e\n\u003cli\u003eFamily need and parent\u0026rsquo;s note\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eMinors can\u0026rsquo;t use a phone while driving.\u003c/p\u003e\n\u003ch2 id=\"safe-car-knw\"\u003eSafe car #knw\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWorking driver\u0026rsquo;s window, brake lights, horn, parking brake, turn signals\u003c/li\u003e\n\u003cli\u003eSafe tire (1/32 inch tread)\u003c/li\u003e\n\u003cli\u003eFull windshield\u003c/li\u003e\n\u003cli\u003eTwo rear view mirrors, incl. one on left side\u003c/li\u003e\n\u003cli\u003eWorking seatbelts\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eCheck:\u003c/strong\u003e\u003c/strong\u003e clean windows and mirrors, adjust seat and mirrors, check tires.\u003c/p\u003e\n\u003ch2 id=\"safe-personage\"\u003eSafe personage\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eVision\u003c/li\u003e\n\u003cli\u003eHearing\u003c/li\u003e\n\u003cli\u003eNot tired\u003c/li\u003e\n\u003cli\u003eNot medicated\u003c/li\u003e\n\u003cli\u003eHealth: no\n\u003cul\u003e\n\u003cli\u003eLapses of conciseness\u003c/li\u003e\n\u003cli\u003eAD\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;related disorders\u0026rdquo; \u0026mdash; anything the doctor reports to DMV\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"steering\"\u003eSteering\u003c/h2\u003e\n\u003ch3 id=\"hand-to-hand\"\u003eHand to Hand\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ehands 9/3 or 8/4 oclock\u003c/li\u003e\n\u003cli\u003ePush and pull, hands stay where they are\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"hand-over-hand\"\u003eHand over hand\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eStart 9/3 or 8/4\u003c/li\u003e\n\u003cli\u003eTurn, but leave wheel sliding under\u003c/li\u003e\n\u003cli\u003eSliding under hand reach over, pull the wheel up\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"one-hand\"\u003eOne-hand\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eTurning or backing up to turn back\u003c/li\u003e\n\u003cli\u003eHand at 12 oclock\u003c/li\u003e\n\u003cli\u003eLimeted use\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"signaling\"\u003eSignaling\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eArm signals when lights are hard to see because of bright sun\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-19_15-43-18_screenshot.png\"\u003e\n \u003c/figure\u003e\n\n\u003cp\u003eMotorcyclists use these signals, and bikers point their hand straight up to turning direction\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"when-to-signal-knw\"\u003eWhen to signal #knw\u003c/h3\u003e\n\u003cp\u003eSignal when: turn, change lanes, slow down, stop.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e100 feet before turning\u003c/li\u003e\n\u003cli\u003eBefore every lane change: look over and check blind spot\u003c/li\u003e\n\u003cli\u003e5 seconds before lane change on highway\u003c/li\u003e\n\u003cli\u003ePulling next to or away curb\u003c/li\u003e\n\u003cli\u003eSignal even if no cars around you\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"horning\"\u003eHorning\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;It is safer to slow down or stop instead of honking your horn.\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"when-to-horn-knw\"\u003eWhen to horn #knw\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eAvoid collisions\u003c/li\u003e\n\u003cli\u003eAlert hazard\u003c/li\u003e\n\u003cli\u003eAlert oncoming traffic on narrow mountain roads when you cannot see at least 200 feet in front of vehicle\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDon\u0026rsquo;t use horn to move people along, or \u0026ldquo;express anger.\u0026rdquo; The more ya know.\u003c/p\u003e\n\u003ch2 id=\"headlights\"\u003eHeadlights\u003c/h2\u003e\n\u003cp\u003eThey are bright.\u003c/p\u003e\n\u003ch3 id=\"when-to-headlight-knw\"\u003eWhen to headlight #knw\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eWhen its \u003cstrong\u003e\u003cstrong\u003etoo dark to see\u003c/strong\u003e\u003c/strong\u003e: if you can\u0026rsquo;t see a person 1000 feet away\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eBeginning 30 minutes after sunset until 30 minutes before sunrise\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eAdverse weather\u003c/strong\u003e\u003c/strong\u003e: windshield wipers on = low-beam headlights on\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eClouds dust smoke\u003c/strong\u003e\u003c/strong\u003e or fog prevent seeing other cars\u003c/li\u003e\n\u003cli\u003eOn sunny days \u003cstrong\u003e\u003cstrong\u003eon country or mountain roads\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eWhen a \u003cstrong\u003e\u003cstrong\u003ewhite regulatory sign says so\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eTo help others see your car, when \u003cstrong\u003e\u003cstrong\u003esun is low on horizon\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"when-not-to-high-beam-headlight\"\u003eWhen not to high-beam headlight\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eDim when 500 feet of car coming towards you or 300 feet of a car you are following\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"emergency-flashers\"\u003eEmergency flashers\u003c/h2\u003e\n\u003cp\u003eIf you can see a collision ahead, do:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eTurn on flashers #knw\u003c/li\u003e\n\u003cli\u003eLightly tap brake pedal three/four times\u003c/li\u003e\n\u003cli\u003eUse hand signals\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"how-to-stop-in-a-middle-of-the-road-during-an-emergency-knw\"\u003eHow to stop in a middle of the road during an emergency #knw\u003c/h2\u003e\n\u003cp\u003eStart breaking early.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eGive drivers warning\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eTurn on emergency flashers if you earn\u0026rsquo;t moving, or use turn signals\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ePull off the road\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eStop not on the road or, if isn\u0026rsquo;t possible, stop where people can see\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDon\u0026rsquo;t stop just over a hill\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eLift the hook to signal an emergency\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ePlace emergency triangles 200-300 feet behind vehicle; use flares if needed but be careful b/c they may cause fire\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCall for roadside assistance\u003c/p\u003e\n\u003cp\u003e63, 92\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"lanes\"\u003eLanes!\u003c/h2\u003e\n\u003ch3 id=\"reading-em\"\u003eReading \u0026rsquo;em\u003c/h3\u003e\n\u003ch4 id=\"yellow-different-directions\"\u003eYellow: different directions\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eSingle yellow is the center of the road; cannot cross into oncoming traffic\u003c/li\u003e\n\u003cli\u003eDouble solid yellow line: not to be crossed\n\u003cul\u003e\n\u003cli\u003e\u0026hellip;except\n\u003cul\u003e\n\u003cli\u003ehov entrace lane which has a left entrance\u003c/li\u003e\n\u003cli\u003eInstructed to cross because the road is blocked\u003c/li\u003e\n\u003cli\u003eEntering or exiting a driveway, private road, or making a u-turn\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e2 double yellow line groups spaced 2 feet or more apart are considered a barrier; under no circumstance is to cross\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eBroken yellow line: you may pass if the broken line is next to you\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"white-same-directions\"\u003eWhite: same directions\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eSingle solid white line: traffic lanes in the same direction\u003c/li\u003e\n\u003cli\u003eDouble solid white lines: not to be crossed, regular use vs. preferential use lanes (carpool, etc.)\u003c/li\u003e\n\u003cli\u003eBroken white lines: separate roads with two or more lines in the same direction\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"white-triangles-yield-lines\"\u003eWhite triangles: yield lines\u003c/h4\u003e\n\u003cp\u003eA line where you should yield. Triangles point to the direction of oncoming traffic (\u0026ldquo;towards you.\u0026rdquo;).\u003c/p\u003e\n\u003ch3 id=\"choosing-em\"\u003eChoosing \u0026rsquo;em\u003c/h3\u003e\n\u003cp\u003eLeftmost lane is lane 1, rightmost is lane n\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eUse the left lane to pass or turn left\u003c/li\u003e\n\u003cli\u003eUse the right lane to enter or exit traffic\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"change-lanes-when\"\u003eChange lanes when\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eMoving from one lane to another\u003c/li\u003e\n\u003cli\u003eEntering freeway\u003c/li\u003e\n\u003cli\u003eExiting freeway\u003c/li\u003e\n\u003cli\u003eEntering the road from curb or shoulder\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"protocol-for-lane-change-knw\"\u003eProtocol for lane change #knw\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eSignal\u003c/li\u003e\n\u003cli\u003eLook in all mirrors\u003c/li\u003e\n\u003cli\u003eCheck traffic beside and behind you\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eLook over solder in direction of desired lane change\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eCheck blind spots for other vehicles, motorcyclists, and bicycilsts\u003c/li\u003e\n\u003cli\u003eEnsure room\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"tips\"\u003eTips\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003estay in one lane\u003c/li\u003e\n\u003cli\u003edon\u0026rsquo;t weave\u003c/li\u003e\n\u003cli\u003eif you start a change, finish it\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"types-of-them\"\u003eTypes of them\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eLane closest to the center divider is the \u0026ldquo;passing lane\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eHOV lanes is for high occupancy\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"center-left-turn-lanes\"\u003eCenter left turn lanes\u003c/h4\u003e\n\u003cp\u003eThe center of some two-way streets has a left turn lane; marked on both sides by two painted lines. Inner line is broken and outer line is solid.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-21_23-52-00_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eYou may only drive 200 feet in the center left turn lane #knw\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eProtocol for using this lane\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eLook for other vehicles coming towards you in the center left turn lane\u003c/li\u003e\n\u003cli\u003eSignal\u003c/li\u003e\n\u003cli\u003eLook over shoulder\u003c/li\u003e\n\u003cli\u003eMerge completely into the center left turn lane\u003c/li\u003e\n\u003cli\u003eTurn when its safe.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"turnouts\"\u003eTurnouts\u003c/h4\u003e\n\u003cp\u003eAreas or lanes for turning that are marked? Use when:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eDriving slowly on a two-lane road where passing is unsafe, AND\u003c/li\u003e\n\u003cli\u003eThere are 5 or more vehicles following #knw\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"bike-lanes\"\u003eBike lanes\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eBike lanes\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eBuffered bike lanes: uses chevrons or diagonals to buffer the bikes\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-21_23-55-41_screenshot.png\"\u003e\n \u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eBike route: shared road markings to designate a preferred route\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eBike boulevard: bike travel on streets with cars\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSeperated bikeways: completely different\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eBikes share the road \u0026ldquo;sharrows!\u0026rdquo;\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-21_23-57-32_screenshot.png\"\u003e\n \u003c/figure\u003e\n\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eCannot drive in bike lane unless\u0026hellip;.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eParking\u003c/li\u003e\n\u003cli\u003eEntering or leaving road\u003c/li\u003e\n\u003cli\u003eTurning (within 200 feet of intersection)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"turning\"\u003eTurning\u003c/h2\u003e\n\u003ch3 id=\"right\"\u003eRight\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eDrive close to the edge\u003c/li\u003e\n\u003cli\u003eDrive in a bike lane, wait until about 200 feet to make turn #knw\u003c/li\u003e\n\u003cli\u003eWatch for everybody\u003c/li\u003e\n\u003cli\u003eSignal about 100 feet before #knw\u003c/li\u003e\n\u003cli\u003eLook over sholder\u003c/li\u003e\n\u003cli\u003eStop behind limit line (/before entering crosswalk or intersection)\u003c/li\u003e\n\u003cli\u003eLook both ways and turn when its safe; don\u0026rsquo;t turn into another lane\u003c/li\u003e\n\u003cli\u003eComplete turn\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDetails:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eCan\u0026rsquo;t turn when red arrow, but you can turn against red light\u003c/li\u003e\n\u003cli\u003eYou could cross a bus lane to make a right turn, but you can\u0026rsquo;t drive in it\u003c/li\u003e\n\u003cli\u003eThere could be designated right turn lanes which let you make a \u0026ldquo;free right turn\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"left\"\u003eLeft\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eDrive close to the center divider or left turn lane\u003c/li\u003e\n\u003cli\u003eSignal about 100 feed\u003c/li\u003e\n\u003cli\u003eLook over sholder\u003c/li\u003e\n\u003cli\u003eStop behind limit line (/before entering crosswalk or intersection)\u003c/li\u003e\n\u003cli\u003eLook left, right, then left\u003c/li\u003e\n\u003cli\u003eTurn\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDetails\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eonly can turn against light when single-lane-to-single-lane\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"u\"\u003eU\u003c/h3\u003e\n\u003ch4 id=\"conditions\"\u003eConditions\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eAcross double-yellow line\u003c/li\u003e\n\u003cli\u003eIn a residential district\n\u003cul\u003e\n\u003cli\u003eNo cars for 200 feet\u003c/li\u003e\n\u003cli\u003eWhenever a sign or light protects against approachng cars\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eAt an intersection\u003c/li\u003e\n\u003cli\u003eOn a divided driveway, if opening provided\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"anticonditions\"\u003eAnticonditions\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eWHen \u0026ldquo;no u-turn\u0026rdquo; is posted\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAt a railroad crossing\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eOn a divided highway if needed to cross things\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCannot see 200 feet in each direction\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eWhen other cars may hit you\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eOn a one-way street\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eIn front of a fire station\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-22_00-10-35_screenshot.png\"\u003e\n \u003c/figure\u003e\n\n\u003cp\u003eooo. scary\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eIn business districts, including churches apartments and buildings (except for schools); turn only at an intersection or opening if allowed.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"merging\"\u003eMerging\u003c/h2\u003e\n\u003ch3 id=\"highways\"\u003eHighways\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eEnter at or near traffic speed\u003c/li\u003e\n\u003cli\u003eMerge onto highway when safe to do so, don\u0026rsquo;t stop unless needed\u003c/li\u003e\n\u003cli\u003eMerge into a space large enough for your car to join the lane\u003c/li\u003e\n\u003cli\u003eUse mirrors and turn signals\u003c/li\u003e\n\u003cli\u003eWatch for cars\u003c/li\u003e\n\u003cli\u003eLeave \u003cstrong\u003e\u003cstrong\u003ethree seconds of space\u003c/strong\u003e\u003c/strong\u003e (\u0026ldquo;three second rule\u0026rdquo;) between you and the car in front of you\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"exiting\"\u003eExiting\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eKnow the exist\u003c/li\u003e\n\u003cli\u003eSignal, look over sholder, etc.\u003c/li\u003e\n\u003cli\u003eChange lanes\u003c/li\u003e\n\u003cli\u003eSignal intention for 5 seconds\u003c/li\u003e\n\u003cli\u003eLeave\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"space-for-entering\"\u003eSpace for entering\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eYou will need about a half a block on city streets\u003c/li\u003e\n\u003cli\u003eOr, a full block on the highway\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"passing\"\u003ePassing\u003c/h2\u003e\n\u003cp\u003eIf anybody wants to pass, let them pass\u003c/p\u003e\n\u003ch3 id=\"space-for-passing\"\u003eSpace for passing\u003c/h3\u003e\n\u003cp\u003eDon\u0026rsquo;t pass if\u0026hellip;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eYou are approaching a hill and cannot see oncoming traffic\u003c/li\u003e\n\u003cli\u003eWithin 100 feet of an intersection #knw\u003c/li\u003e\n\u003cli\u003eAt crossroads or driveways\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"condition-of-passing\"\u003eCondition of Passing\u003c/h3\u003e\n\u003cp\u003eYou pass on the left, unless\u0026hellip;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eOpen highway with two or more lanes going in your direction\u003c/li\u003e\n\u003cli\u003eDriver ahead of you is turning left, and you don\u0026rsquo;t have to drive off the road to pass\u003c/li\u003e\n\u003cli\u003eYou are on a one-way street\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eNever drive off the road to pass.\u003c/p\u003e\n\u003ch3 id=\"protocol-for-passing\"\u003eProtocol for passing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eSignal\u003c/li\u003e\n\u003cli\u003eShoulder\u003c/li\u003e\n\u003cli\u003eTurn\u003c/li\u003e\n\u003cli\u003eSpeed up and pass\u003c/li\u003e\n\u003cli\u003eRetturn\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"parking\"\u003eParking\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eFind a space #knw three feet longer that your vehicle\u003c/li\u003e\n\u003cli\u003eTurn on turn signal\u003c/li\u003e\n\u003cli\u003ePull up alongside the vehicle in front; leave about two feet between you and the car to your right. Stop when you rear bumper is aligned with the front of the space\u003c/li\u003e\n\u003cli\u003eCheck rearview mirror, look over sholder, keep foot on break and reverse\u003c/li\u003e\n\u003cli\u003eBack up, 45%\u003c/li\u003e\n\u003cli\u003eWhen rear view is within 18 inches from the curb, straighten out\u003c/li\u003e\n\u003cli\u003eSet parking break. Leave when safe.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"parking-on-a-hill\"\u003eParking on a hill\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;Your car may roll when you breaks fail.\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eDownhill\u003c/strong\u003e\u003c/strong\u003e: wheels towards the curb\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eUphill\u003c/strong\u003e\u003c/strong\u003e: wheels away from curb\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eNo curb\u003c/strong\u003e\u003c/strong\u003e: turn towards the sholder of the road\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;towards the sholder, except when uphill with curb\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"colors\"\u003eColors\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eWhite curb: stop for picking up or dropping off passengers or mails\u003c/li\u003e\n\u003cli\u003eGreen curb: park for limited time\u003c/li\u003e\n\u003cli\u003eYellow: load and unload, staying in the vehicle\u003c/li\u003e\n\u003cli\u003eRed: no stopping\u003c/li\u003e\n\u003cli\u003eBlue: disabled\u0026mdash;fine of $1,000, 6 months in county jail #knw\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"can-t-park-when\"\u003eCan\u0026rsquo;t park when\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eNo marking\u003c/li\u003e\n\u003cli\u003eUnmarked or marked crosswalk\u003c/li\u003e\n\u003cli\u003eSidewalk, partially blocking sidewalk, or in front of driveway\u003c/li\u003e\n\u003cli\u003eWithin 3 feet of disabled sidewalk ramp #knw\u003c/li\u003e\n\u003cli\u003eOn diagnal lines next to disabled space\u003c/li\u003e\n\u003cli\u003eWithin 15 feet of a fire hydrant #knw\u003c/li\u003e\n\u003cli\u003eDouble parking\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eOn the wrong side of the street or freeway, except: 1) emergency 2) law enforcement officer 3) specificaly permitted stop.\u003c/p\u003e\n\u003cp\u003eTo stop and park then, park off the pavement, stay with the car and lock the doors until help arrives; visibility is 200 feet in each direction required. #knw\u003c/p\u003e\n\u003ch2 id=\"lights\"\u003eLights\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eFlashing red: stop sign\u0026ndash;stop and go when its safe\u003c/li\u003e\n\u003cli\u003eFlashing yellow: yield sign\u0026mdash;proceed with caution\u003c/li\u003e\n\u003cli\u003eFlashing yellow arrow: unprotected turn\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eBroken traffic lights become a four way stop sign.\u003c/p\u003e\n\u003ch2 id=\"signs\"\u003eSigns\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eStop sign is stop; there should be a limit line; if no limit line, stop before intesection\u003c/li\u003e\n\u003cli\u003eYield sign is to yield; slow down\u003c/li\u003e\n\u003c/ul\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-22_00-41-43_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-22_00-42-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-22_00-42-21_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-22_00-43-33_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"right-of-way\"\u003eRight of Way\u003c/h2\u003e\n\u003ch3 id=\"without-stop-yield-signs\"\u003eWithout stop/yield signs\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eWhomever gets to the intersection first has right of way\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"t-intersection-without-stop-yield-signs\"\u003eT intersection without stop/yield signs\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eThe through road have right of way\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"stop-signs\"\u003eStop signs\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eStop first, then follow right of way rules as if no intersection\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"turning-left\"\u003eTurning left\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eRight of way to anyone approaching that\u0026rsquo;s \u0026ldquo;close enough to be dangerous\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"turning-right\"\u003eTurning right\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCheck for pedestrians crossing the street, and bikes and motors next to you\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"green-light\"\u003eGreen light\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ePedestrians\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"divided-highways\"\u003eDivided highways\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eVehicles coming in the lane you are about to enter\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"entering-traffic\"\u003eEntering traffic\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eThe traffic you are entering\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"roundabouts\"\u003eRoundabouts\u003c/h2\u003e\n\u003cp\u003eThe logistics of using a roundabout\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eSlow down\u003c/li\u003e\n\u003cli\u003eYield to traffic\u003c/li\u003e\n\u003cli\u003eWatch for signs\u003c/li\u003e\n\u003cli\u003eTravel in counter-clockwise direction, don\u0026rsquo;t stop or pass\u003c/li\u003e\n\u003cli\u003eSignal when you change lanes or exit\u003c/li\u003e\n\u003cli\u003eIf you miss your exit, try again\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"choosing-lane\"\u003eChoosing lane\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eRightmost for turning right\u003c/li\u003e\n\u003cli\u003eEither lane (\u0026ldquo;middle\u0026rdquo;, if exists) for straight\u003c/li\u003e\n\u003cli\u003eInnermost for left turn or u turn\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"pedestrians\"\u003ePedestrians\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ePedestrians have right-of-way\u003c/li\u003e\n\u003cli\u003ePedestrian crossing need to cross first, you yield or slow to them\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWhich means\u0026hellip;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eDo not pass a stopped vehicle\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t drive on a sidewalk except to cross it or enter/exit it\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t stop in a crosswalk\u003c/li\u003e\n\u003cli\u003eIf people make eyecontact, they are crossing the street\u003c/li\u003e\n\u003cli\u003eObey pedestrian\u0026rsquo;s signs\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWatch for seniors, people with disabilities, young children.\u003c/p\u003e\n\u003ch3 id=\"crosswalks\"\u003eCrosswalks\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCrosswalks are marked (but not all)\u003c/li\u003e\n\u003cli\u003eSchool crossings have yellow lines\u003c/li\u003e\n\u003cli\u003ePedestrians have right of wall in all crosswalks\u003c/li\u003e\n\u003cli\u003eFlashing light crosswalks exists to, just be prepared to stop regardless\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"blind\"\u003eBlind\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eWhite canes and guide dogs have absolute right of way\n\u003cul\u003e\n\u003cli\u003eStop at all stop walks\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t stop in the middle of stop walk\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t give verbal directions to blind pedestrian\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t turn right w/o looking for pedestrians\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t honk at a blind person\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t block sidewalk\u003c/li\u003e\n\u003cli\u003ePulling in cane + stepping away: you may go\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"mountain-roads\"\u003eMountain roads\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eUphill car has right of way\u003c/li\u003e\n\u003cli\u003eDownhill car has more control backing up the hill\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"roadsharing\"\u003eRoadsharing\u003c/h2\u003e\n\u003ch3 id=\"large-cars\"\u003eLarge cars\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eAverage passenter car at 55mph has 400 feet before stopping\u003c/li\u003e\n\u003cli\u003eLarge car takes 800 feet\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDon\u0026rsquo;t move in front of a large car and suddenly stop.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-23_15-53-47_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eLook at turn signals: large vehicles may swing their back, say, left in order to turn right.\u003c/p\u003e\n\u003cp\u003eDon\u0026rsquo;t\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eChange lanes in front of them to reach an exit or turn (tight spaces around large vehicles is dangerous)\u003c/li\u003e\n\u003cli\u003eDrive next to them (unless passing); after you pass, move ahead of it\u003c/li\u003e\n\u003cli\u003eFollow too closely: that\u0026rsquo;s tailgating. Give more space\u003c/li\u003e\n\u003cli\u003eUnderestimate the size and speed of the vehicle\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;If you can\u0026rsquo;t see a truck\u0026rsquo;s side mirrors, it can\u0026rsquo;t see you.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003ealways pass it on the left side\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003ch3 id=\"buses-and-rails\"\u003eBuses and rails\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ewhen loading is happening without a safety zone, stop behind the nearest door\u003c/li\u003e\n\u003cli\u003eStopped busses can only be passed at 10mph\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t pass on the left side, unless\u0026hellip;\n\u003cul\u003e\n\u003cli\u003eyou are on a one-way street\u003c/li\u003e\n\u003cli\u003etracks are so close to the right you can\u0026rsquo;t pass on the right\u003c/li\u003e\n\u003cli\u003etraffic officer directs you to\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eNever turn in front of a light rail vehicle\u003c/li\u003e\n\u003cli\u003eCheck for traffic lights (light rails can interrupt them)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"motocycles\"\u003eMotocycles\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e4 second following distance\u003c/li\u003e\n\u003cli\u003eGiven a motocycle a full lane; its legal to share but its unsafe\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t try to pass a motorcycle in the same lane\u003c/li\u003e\n\u003cli\u003eWhen possible, move to one side of your lane\u003c/li\u003e\n\u003cli\u003eCheck for motocyclists\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"emergency-vehicles\"\u003eEmergency vehicles\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eGive them right of way: drive to the edge until they\u0026rsquo;ve passed\u003c/li\u003e\n\u003cli\u003e\u0026hellip;except in intersections: never stop in an intersection (continue through and stop)\u003c/li\u003e\n\u003cli\u003eObey loudspeaker orders\u003c/li\u003e\n\u003cli\u003eIllegal to follow 300 feet of any emergency vehicles with flashing siren\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"slow-cars\"\u003eSlow cars\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eSlow down for them\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"nev-lsv\"\u003eNEV LSV\u003c/h3\u003e\n\u003cp\u003eLike gold carts\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eThey have max speed 25mph\u003c/li\u003e\n\u003cli\u003eThey can\u0026rsquo;t drive in roads with speed limit larger than 35 mph\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"bikes\"\u003eBikes\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eFront lamp with white light visible for 300 feet\u003c/li\u003e\n\u003cli\u003eRear red reflector (visible from 500 feet)\u003c/li\u003e\n\u003cli\u003eWhite or yellow reflector on each pedal (visible for 200 feet)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"travel-lanes\"\u003eTravel lanes\u003c/h4\u003e\n\u003cp\u003eMust ride to the curb if slow, unless\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ePassing in the same direction\u003c/li\u003e\n\u003cli\u003ePreparing to turn left\u003c/li\u003e\n\u003cli\u003eAvoiding a hazard/road condition\u003c/li\u003e\n\u003cli\u003eApproaching right turn\u003c/li\u003e\n\u003cli\u003eOn a one way road with two or more lanes (if so, bikers may right next to left curb)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"passing-bikers\"\u003ePassing bikers\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-23_16-21-16_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e3 feet clearance\u003c/p\u003e\n\u003ch4 id=\"school-buses\"\u003eSchool buses\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eYellow lights flashing is to slow\u003c/li\u003e\n\u003cli\u003eRed lights flashing is to stop\u003c/li\u003e\n\u003cli\u003eIf you fail to stop, you can be fined up to $1,000 and driving maybe suspended for a year\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"workzone-fines\"\u003eWorkzone fines\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eTraffic violations have fines of $1,000 or more\u003c/li\u003e\n\u003cli\u003eAssulting a worker has a fine of $2,000 plus imprisonment for up to on year\u003c/li\u003e\n\u003cli\u003eSome regions are double-fine zones\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"speed-limit\"\u003eSpeed Limit\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;Basic speed law\u0026rdquo;: you may never drive faster than its safe.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e10mph to pas a roadcar\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e15 mph in blind intersections (cannot see 100 in both directions when within 100 feet)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eif your view is blocked in a blind intersection, inch forward until you can see\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e15 mph also in some school, alleys (roads no wider 25 feet), 100 feet of railroad tracks if visiblity less then 400 feet\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e25 mph when you are 500-1000 feet of a school, when crossing the street, residential\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e55 mph on two lane undivided highway\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eYou cannot block traffic flow\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDrive far-right lane of you are towing\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"railroad\"\u003eRailroad\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eLook in both directions\u003c/li\u003e\n\u003cli\u003eExcept train anytime\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t stop in traintracks\u003c/li\u003e\n\u003cli\u003eWatch for other cars\u003c/li\u003e\n\u003cli\u003eStop between 15-50 feet from the neearest tracks\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"fines-and-stuff\"\u003eFines and Stuff\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eSmoking with a minor: $100\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDumping animals: $1,000, six months in jail\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eEvading law enforcement:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003estate prison up to 7 years, county jail for 1 year\u003c/li\u003e\n\u003cli\u003eFine between $2,000 and $10,000\u003c/li\u003e\n\u003cli\u003eOr both\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eEvading law enforcement and commiting manslauter\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eImprisonment for 4-10 years\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSpeed content and reckless driving: fine and imprsionment\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eTexting\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWear earplugs in bot hyears\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCarry anything that extends beyond the fenders on the left side, or more then 6 inches on the right side\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCargo more the 4 feet must display a 1 feet red or flourencesnt flag\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eTransport animals unless secured\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAllow a person to be in a back of a pickup truck unless secured\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDrive a car with a video monitor except when it doesn\u0026rsquo;t face driver\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eThrow a cig from the car\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCut signs that block the windshiled\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDon\u0026rsquo;t hang objects on the mirror\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDon\u0026rsquo;t sticker, unless\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e7 inch square on lower corner of passengers or rear window\u003c/li\u003e\n\u003cli\u003e5 inch square on the lower corner of the driver window\u003c/li\u003e\n\u003cli\u003eSide windows behind driver\u003c/li\u003e\n\u003cli\u003e5 inch located in the center uppermost portion\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eFuneral pocessions have right of way\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"points\"\u003ePoints\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e36 month record\u003c/li\u003e\n\u003cli\u003eSuspension when: 4 points in 12 months, 6 in 24, or 8 in 36\u003c/li\u003e\n\u003cli\u003eOnce 18 months to earn back points via traffic school\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"best-practices\"\u003eBest Practices\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eScan road 10-15 seconds ahead of you\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t stare\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t tailgate: 3 seconds between you and the car ahead passes\n\u003cul\u003e\n\u003cli\u003eAllow extra space when\u0026hellip;\n\u003cul\u003e\n\u003cli\u003eIf you have a tailgator, (and move! if you can)\u003c/li\u003e\n\u003cli\u003eThe driver behind you wants to pass\u003c/li\u003e\n\u003cli\u003eSlippery\u003c/li\u003e\n\u003cli\u003eFollowing on icy or wet\u003c/li\u003e\n\u003cli\u003eTowing a trailer\u003c/li\u003e\n\u003cli\u003eFollowiing a car that blocks you ahead\u003c/li\u003e\n\u003cli\u003eMerging onto freeway\u003c/li\u003e\n\u003cli\u003eFollowing\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t stay in the blind spot\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t driving alongside cars\u003c/li\u003e\n\u003cli\u003eMake space when possible\u003c/li\u003e\n\u003cli\u003eKeep space between you and parked cars\u003c/li\u003e\n\u003cli\u003eBe careful when nearing motorcyclists and bicyclists\u003c/li\u003e\n\u003cli\u003eAt intersections\n\u003cul\u003e\n\u003cli\u003eLook both ways\u003c/li\u003e\n\u003cli\u003eLook left first (vehicles coming from the left are closer)\u003c/li\u003e\n\u003cli\u003eLook right\u003c/li\u003e\n\u003cli\u003eTake one more look to the left\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e5-10mph on wet road, reduce speed by half on snow, tiny very slow on ice\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t use breaks if starting to hydroplone\u003c/li\u003e\n\u003cli\u003eIf you can\u0026rsquo;t see farther than 100 feet, its unsafe to drive faster than 30mph\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"seat-belts\"\u003eSeat belts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eClick it or ticket\u003c/li\u003e\n\u003cli\u003eUnder 16 years old, you may also get ticket\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"child-safety\"\u003eChild safety\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eUnder 2 years old: secure in a real facing child restraight system (unless child weighs more than 40 pounds or is more that 3 ft 4 inches taller)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eChilden under 8 years old, less than 4 feet 9 inches tall: secure in a front-facing restraight system\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCould use front seat if there\u0026rsquo;s no rear seat or if they are side facing jump seat\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e8 years old or older, or 4 feet 9 inches tall: use seat belts\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e6 y/o or younger unattended illegal to leave in car; supervision could be 12 year old.\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eHot vehicle can kill\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"emergencies\"\u003eEmergencies\u003c/h2\u003e\n\u003ch3 id=\"skids\"\u003eSkids\u003c/h3\u003e\n\u003ch4 id=\"slippery-surface\"\u003eSlippery surface\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eSlowly remove foot from gas pedal\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t use breaks\u003c/li\u003e\n\u003cli\u003eTurn the steering wheel in the direction of the skid\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf your breaches get wet, dry them by pressing gas and brake at the same time.\u003c/p\u003e\n\u003ch4 id=\"lock-wheel\"\u003eLock wheel\u003c/h4\u003e\n\u003cp\u003eBreaking too hard when going to fast: skid no matter steering wheel\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eRemove foot from break\u003c/li\u003e\n\u003cli\u003eStraighten front wheel\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf ABS not working, step on brake gradually until safe speed. If the brake petal sinks to the floor, bump the brakes.\u003c/p\u003e\n\u003ch3 id=\"driving-off-pavement\"\u003eDriving off pavement\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eGrip wheel slowly\u003c/li\u003e\n\u003cli\u003eRemove your foot from gas\u003c/li\u003e\n\u003cli\u003eBrake gently\u003c/li\u003e\n\u003cli\u003eCheck for traffic\u003c/li\u003e\n\u003cli\u003eSteer back\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"accelerator-mallfunction\"\u003eAccelerator mallfunction\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eShift to neutral\u003c/li\u003e\n\u003cli\u003eApply breakes\u003c/li\u003e\n\u003cli\u003eLook for traffic\u003c/li\u003e\n\u003cli\u003eHonk horn and emergency flashers\u003c/li\u003e\n\u003cli\u003eDrive car off the road\u003c/li\u003e\n\u003cli\u003eTurn of ignition\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"collision\"\u003eCollision\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eIf collision causes more than $1000 in property damage, you msut report to DMV\u003c/li\u003e\n\u003cli\u003eDriving is suspended for 4 years of no insurance\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"disabled-vehicle\"\u003eDisabled Vehicle\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eSafely pull over\u003c/li\u003e\n\u003cli\u003eExit on the right side\u003c/li\u003e\n\u003cli\u003eFind assistance\u003c/li\u003e\n\u003cli\u003eReturn no vehicle\u003c/li\u003e\n\u003cli\u003eStay inside with your seat belt\u003c/li\u003e\n\u003cli\u003eUuse flashers\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"railroad\"\u003eRailroad\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eIf a train is coming, get out and run in a 45 degree away from the train and tracks. Dial 911\u003c/li\u003e\n\u003cli\u003eIf train not coming, exit vehicle, dial emergency number on the railroad crossing box, and then call 911\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"dui\"\u003eDUI\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDon\u0026rsquo;t drink and drive\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t take drugs\u003c/li\u003e\n\u003cli\u003eUse any combination of drugs\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIllegal to drink alcohol or smoke or eat cannabis products while in a car, whether self or passenger. If you are carrying it, it must be full and unopened. If its open, keep it in the trunk.\u003c/p\u003e\n\u003ch3 id=\"limits\"\u003eLimits\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e0.08% over 21\u003c/li\u003e\n\u003cli\u003e0.01% under 21\u003c/li\u003e\n\u003cli\u003e0.01% under DUI probation\u003c/li\u003e\n\u003cli\u003e0.04% if commercial\u003c/li\u003e\n\u003cli\u003e0.04% if driving for hire\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"dui-arrests\"\u003eDUI Arrests\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eHold license for 30 days\u003c/li\u003e\n\u003cli\u003eHearing from 10 days\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"dui-convictions\"\u003eDUI Convictions\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCompletion of DUI program\u003c/li\u003e\n\u003cli\u003eInstall Ignition Interlock Device\u003c/li\u003e\n\u003cli\u003e6 months in jail\u003c/li\u003e\n\u003cli\u003e$390-$1000\u003c/li\u003e\n\u003cli\u003eMay inpound vehicle\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"carrying-under-21\"\u003eCarrying under 21\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eMay not carry unless someone older\u003c/li\u003e\n\u003cli\u003eFine up to $1000 and impound for 30 days, suspencion for 1 year\u003c/li\u003e\n\u003cli\u003e0.01% or higher you have to complete program, 0.05% suspension\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdriving/","tags":null,"title":"Driving"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdriving_practice/","tags":null,"title":"Driving Practice"},{"categories":null,"contents":"Drug Resistance is the process of developing resistance to drugs after some time of use\noccurrence of Drug Resistance Drug Resistance occurs when there\u0026rsquo;s a mutation that causes macromolecular changes which causes virus to no longer bind the drug but still maintain the same function\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdrug_resistance/\"\u003eDrug Resistance\u003c/a\u003e is the process of developing resistance to drugs after some time of use\u003c/p\u003e\n\u003ch2 id=\"occurrence-of-drug-resistance--kbhdrug-resistance-dot-md\"\u003eoccurrence of \u003ca href=\"/posts/kbhdrug_resistance/\"\u003eDrug Resistance\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdrug_resistance/\"\u003eDrug Resistance\u003c/a\u003e occurs when there\u0026rsquo;s a mutation that causes macromolecular changes which causes virus to no longer bind the drug but still maintain the same function\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdrug_resistance/","tags":null,"title":"Drug Resistance"},{"categories":null,"contents":"The dual space of \\(V\\), named \\(V\u0026rsquo;\\), is the vector space formed by linear functionals on \\(V\\) (because recall set of linear maps between two vector spaces form a vector space).\nconstituents A vector space \\(V\\)\nrequirements \\(V\u0026rsquo; = \\mathcal{L}(V, \\mathbb{F})\\) , and its a vector space.\nadditional information dimension of dual space is equivalent to the original space \\begin{equation} \\dim V\u0026rsquo; = \\dim V \\end{equation}\nProof:\nBecause \\(\\dim \\mathcal{L}(V,W) = (\\dim V)(\\dim W)\\), and \\(V\u0026rsquo; = \\mathcal{L}(V, \\mathbb{F})\\). Now, \\(\\dim V\u0026rsquo; = \\dim \\mathcal{L}(V,\\mathbb{F}) = (\\dim V)(\\dim \\mathbb{F}) = \\dim V \\cdot 1 = \\dim V\\).\ndual basis Let \\(v_1, \u0026hellip;, v_{n}\\) be a basis of \\(V\\), then, we can construct a basis of \\(V\u0026rsquo;\\) with linear functionals \\(\\varphi_{1}, \u0026hellip;, \\varphi_{n}\\):\n\\begin{equation} \\varphi_{j}(v_{k}) = \\begin{cases} 1, if\\ k=j \\\\ 0, if\\ k \\neq j \\end{cases} \\end{equation}\nNow, we can show that \\(\\varphi_{j}\\) are indeed linear functionals by basis of domain: we defined its behavior of each \\(\\varphi_{j}\\) based on where it sends each \\(v_{j}\\) (i.e. the basis of \\(V\\), the domain of elements in \\(V\u0026rsquo;\\)) into values in \\(\\mathbb{F}\\) (i.e. \\(1\\) or \\(0\\)).\nWe can now show that these \\(\\varphi_{j}\\) is indeed a basis of \\(V\u0026rsquo;\\) by only showing that it is linearly independent because we have already a list of \\(n\\) \\(\\varphi_{j}\\) elements (i.e. \\(\\dim V\u0026rsquo;=\\dim V = n\\) number of \\(\\varphi_{j}\\)), and linearly independent list of length dim V are a basis of V.\n","html":"\u003cp\u003eThe dual space of \\(V\\), named \\(V\u0026rsquo;\\), is the vector space formed by \u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003es on \\(V\\) (because recall \u003ca href=\"/posts/kbhlinear_map/#addition-and-scalar-multiplication-on-mathcal-l--v-w\"\u003eset of linear maps between two vector spaces form a vector space\u003c/a\u003e).\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e \\(V\\)\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\(V\u0026rsquo; = \\mathcal{L}(V, \\mathbb{F})\\) , and its a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"dimension-of-dual-space-is-equivalent-to-the-original-space\"\u003edimension of dual space is equivalent to the original space\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\dim V\u0026rsquo; = \\dim V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eBecause \u003ca href=\"/posts/kbhisomorphism/#dim-mathcal-l--v-w----dim-v----dim-w\"\u003e\\(\\dim \\mathcal{L}(V,W) = (\\dim V)(\\dim W)\\)\u003c/a\u003e, and \\(V\u0026rsquo; = \\mathcal{L}(V, \\mathbb{F})\\). Now, \\(\\dim V\u0026rsquo; = \\dim \\mathcal{L}(V,\\mathbb{F}) = (\\dim V)(\\dim \\mathbb{F}) = \\dim V \\cdot 1 = \\dim V\\).\u003c/p\u003e\n\u003ch3 id=\"dual-basis\"\u003edual basis\u003c/h3\u003e\n\u003cp\u003eLet \\(v_1, \u0026hellip;, v_{n}\\) be a basis of \\(V\\), then, we can construct a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\u0026rsquo;\\) with \u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003es \\(\\varphi_{1}, \u0026hellip;, \\varphi_{n}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\varphi_{j}(v_{k}) =\n\\begin{cases}\n1, if\\ k=j \\\\\n0, if\\ k \\neq j\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, we can show that \\(\\varphi_{j}\\) are indeed \u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003es by \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e: we defined its behavior of each \\(\\varphi_{j}\\) based on where it sends each \\(v_{j}\\) (i.e. the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\), the domain of elements in \\(V\u0026rsquo;\\)) into \u003cem\u003evalues\u003c/em\u003e in \\(\\mathbb{F}\\) (i.e. \\(1\\) or \\(0\\)).\u003c/p\u003e\n\u003cp\u003eWe can now show that these \\(\\varphi_{j}\\) is indeed a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\u0026rsquo;\\) by only showing that it is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e because we have already a list of \\(n\\) \\(\\varphi_{j}\\) elements (i.e. \\(\\dim V\u0026rsquo;=\\dim V = n\\) number of \\(\\varphi_{j}\\)), and \u003ca href=\"/posts/kbhdimension/#linearly-independent-list-of-length-dim-v-are-a-basis-of-v\"\u003elinearly independent list of length dim V are a basis of V\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdual_space/","tags":null,"title":"dual space"},{"categories":null,"contents":"Dup15q Syndrome is an autistic syndrome associated with a gain of variant function in UBE3A. It is the opposite of Angelman Syndrome, which is a loss of function result on UBE3A.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdup15q/\"\u003eDup15q Syndrome\u003c/a\u003e is an autistic syndrome associated with a gain of variant function in \u003ca href=\"\"\u003eUBE3A.\u003c/a\u003e It is the opposite of \u003ca href=\"/posts/kbhangelman_syndrome/\"\u003eAngelman Syndrome\u003c/a\u003e, which is a loss of function result on UBE3A.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdup15q/","tags":null,"title":"Dup15q Syndrome"},{"categories":null,"contents":"dynamic programming is a three-step algorithm to tackle large, multi-step problems; high level idea: guessing + caching + recursion.\ndynamic programming can sometimes not be good enough, and it doesn\u0026rsquo;t really give us fast enough to get what we need to use. That\u0026rsquo;s when we need to deal with relaxation, or possibly greedy programming.\nmain steps of dynamic programming Break a hard problem into sub-problems Guess what sub-problem to solve Solve the sub-problem and store the solution Repeat #2 and #3 Combine sub-problem solutions to solve the hard problem analyzing runtime of dynamic programming To analyze runtime of dynamic programming problems, you ask:\nHow many sub-problems are there? How long does it take to solve each sub-problem? How long does it take to combine sub-problems? fibonacchi numbers: dynamic programming here\u0026rsquo;s an example top-down dynamic programming problem:\nThere are \\(n\\) sub-problems: \\(F_1, F_2, \\ldots, F_{n-1}\\). Solve a sub-problem, then store the solution \\(F_{n-1} = F_{n-2}+F_{n-3}\\) Continue until \\(F_1 =1\\). Now, we can recurs back up (popping the call stack) and cache all calculated results So then we can just look up any \\(F_k\\). shortest path: dynamic programming here\u0026rsquo;s a graph! how do we get to node \\(6\\)?\nGuess that the shortest path goes through 10 Go recursively until you get to root, cache the solution Do it again until you got to all subproblems Look up cached result ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdynamic_programming/\"\u003edynamic programming\u003c/a\u003e is a three-step algorithm to tackle large, multi-step problems; high level idea: guessing + caching + recursion.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdynamic_programming/\"\u003edynamic programming\u003c/a\u003e can sometimes not be good enough, and it doesn\u0026rsquo;t really give us fast enough to get what we need to use. That\u0026rsquo;s when we need to deal with \u003ca href=\"/posts/kbhrelaxation_algorithums/\"\u003erelaxation\u003c/a\u003e, or possibly \u003ca href=\"/posts/kbhgreedy_programming/\"\u003egreedy programming\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"main-steps-of-dynamic-programming\"\u003emain steps of dynamic programming\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eBreak a hard problem into sub-problems\u003c/li\u003e\n\u003cli\u003eGuess what sub-problem to solve\u003c/li\u003e\n\u003cli\u003eSolve the sub-problem and store the solution\u003c/li\u003e\n\u003cli\u003eRepeat #2 and #3\u003c/li\u003e\n\u003cli\u003eCombine sub-problem solutions to solve the hard problem\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"analyzing-runtime-of-dynamic-programming\"\u003eanalyzing runtime of dynamic programming\u003c/h2\u003e\n\u003cp\u003eTo analyze runtime of \u003ca href=\"/posts/kbhdynamic_programming/\"\u003edynamic programming\u003c/a\u003e problems, you ask:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eHow many sub-problems are there?\u003c/li\u003e\n\u003cli\u003eHow long does it take to solve each sub-problem?\u003c/li\u003e\n\u003cli\u003eHow long does it take to combine sub-problems?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"fibonacchi-numbers-dynamic-programming\"\u003efibonacchi numbers: dynamic programming\u003c/h2\u003e\n\u003cp\u003ehere\u0026rsquo;s an example top-down \u003ca href=\"/posts/kbhdynamic_programming/\"\u003edynamic programming\u003c/a\u003e problem:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eThere are \\(n\\) sub-problems: \\(F_1, F_2, \\ldots, F_{n-1}\\).\u003c/li\u003e\n\u003cli\u003eSolve a sub-problem, then store the solution\n\u003col\u003e\n\u003cli\u003e\\(F_{n-1} = F_{n-2}+F_{n-3}\\)\u003c/li\u003e\n\u003cli\u003eContinue until \\(F_1 =1\\).\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003eNow, we can recurs back up (popping the call stack) and cache all calculated results\u003c/li\u003e\n\u003cli\u003eSo then we can just look up any \\(F_k\\).\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"shortest-path-dynamic-programming\"\u003eshortest path: dynamic programming\u003c/h2\u003e\n\u003cp\u003ehere\u0026rsquo;s a graph! how do we get to node \\(6\\)?\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-05-02_10-28-04_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003col\u003e\n\u003cli\u003eGuess that the shortest path goes through 10\u003c/li\u003e\n\u003cli\u003eGo recursively until you get to root, cache the solution\u003c/li\u003e\n\u003cli\u003eDo it again until you got to all subproblems\u003c/li\u003e\n\u003cli\u003eLook up cached result\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdynamic_programming/","tags":null,"title":"dynamic programming"},{"categories":null,"contents":"Capacitor charging:\n\\begin{equation} Q = Q_0 (1-e^{-\\frac{t}{RC}}) \\end{equation}\nwhere \\(Q\\) is capacitor change at time \\(t\\), and \\(Q_0\\) initial change, and \\(RC\\) the resistance and capacitance.\nWhere, \\(RC\\) is called the \u0026ldquo;time constant\u0026rdquo;.\n\\begin{equation} I = \\frac{V}{R} (e^{-\\frac{t}{RC}}) \\end{equation}\nNote! these are inverse relationships: as a capacitor CHARGE, the current DROPS.\n","html":"\u003cp\u003eCapacitor charging:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ = Q_0 (1-e^{-\\frac{t}{RC}})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(Q\\) is capacitor change at time \\(t\\), and \\(Q_0\\) initial change, and \\(RC\\) the \u003ca href=\"/posts/kbhcurrent/#resistance-of-a-wire\"\u003eresistance\u003c/a\u003e and \u003ca href=\"/posts/kbhcapacitance/\"\u003ecapacitance\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eWhere, \\(RC\\) is called the \u0026ldquo;time constant\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI = \\frac{V}{R} (e^{-\\frac{t}{RC}})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNote! these are \u003cem\u003einverse\u003c/em\u003e relationships: as a capacitor CHARGE, the current DROPS.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdynamic_rc_circuts/","tags":null,"title":"Dynamic RC Circuits"},{"categories":null,"contents":"Dyson\u0026rsquo;s Model of Life is a theory of origin of life:\ncells form as machines that perform tasks genes show up later as parasites, eventually forming symbiosis with cells Read: and so, we can essentially ditch trying to find things characteristics of \u0026ldquo;cells\u0026rdquo; per-se like RNA, instead we can go about finding generic boxes of containers called \u0026ldquo;cells\u0026rdquo; and see how they evolve.\nSee also: high chemical activity, metabolism, Stepwise Evolution, and Two Main Functions of Life\nconstituents \\(x\\): percentage of active binding sites \\(w\\): percentage of inactive binding sites \\(z\\): percentage of \u0026ldquo;empty binding sites\u0026rdquo; \\(p(k)\\): probability distribution for a site to be in any given state at time \\(k\\) \\(\\Psi(x)\\): rate of activation \u0026ldquo;efficiency in active monomers in acceleration monomer absorption\u0026rdquo; requirements evidently, because percentages: \\(x+w+z = 1\\) Active monomer absorption: \\(\\Psi(x) \\cdot p\\) Inactive monomer absorption: \\(p\\) additional information general intuition some kind of isolated droplet contains a population of molecules chemical reactions occur to the whole droplet such that its state changes Recall the Two Main Functions of Life: metabolism and replication. So, if we want our Dyson\u0026rsquo;s Model to capture life, we should try to encode them into our model. Turns out, we can use the language of Stepwise Evolution to describe our model.\nTherefore, let\u0026rsquo;s declare that there is only two states to our system, in which our particle is quasi-stationary (it wiggles but doesn\u0026rsquo;t go anywhere):\n\u0026ldquo;high chemical activity\u0026rdquo;, a.k.a. \u0026ldquo;metabolism\u0026rdquo; \u0026mdash; \u0026ldquo;ordered\u0026rdquo; state \u0026ldquo;low chemical activity\u0026rdquo; \u0026mdash; disordered state Our transition \\(M\\), then, only has to encode transitions between these two states. Dyson claims that, in his model, this transition happens spontaneously when the circumstances is correct.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdyson_s_model_of_life/\"\u003eDyson\u0026rsquo;s Model of Life\u003c/a\u003e is a \u003ca href=\"/posts/kbhliving/#theories-of-origin-of-life\"\u003etheory of origin of life\u003c/a\u003e:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcell/\"\u003ecell\u003c/a\u003es form as machines that perform tasks\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"\"\u003egene\u003c/a\u003es show up later as parasites, eventually forming symbiosis with \u003ca href=\"/posts/kbhcell/\"\u003ecell\u003c/a\u003es\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eRead: and so, we can essentially ditch trying to find things characteristics of \u0026ldquo;\u003ca href=\"/posts/kbhcell/\"\u003ecell\u003c/a\u003es\u0026rdquo; per-se like RNA, instead we can go about finding generic boxes of containers called \u0026ldquo;\u003ca href=\"/posts/kbhcell/\"\u003ecell\u003c/a\u003es\u0026rdquo; and see how they evolve.\u003c/p\u003e\n\u003cp\u003eSee also: \u003ca href=\"/posts/kbhhigh_chemical_activity/\"\u003ehigh chemical activity\u003c/a\u003e, \u003ca href=\"/posts/kbhmetabolism/\"\u003emetabolism\u003c/a\u003e, \u003ca href=\"/posts/kbhstepwise_evolution/\"\u003eStepwise Evolution\u003c/a\u003e, and \u003ca href=\"/posts/kbhliving/#two-main-functions-of-life\"\u003eTwo Main Functions of Life\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(x\\): percentage of \u003ca href=\"/posts/kbhhigh_chemical_activity/\"\u003eactive\u003c/a\u003e binding sites\u003c/li\u003e\n\u003cli\u003e\\(w\\): percentage of inactive binding sites\u003c/li\u003e\n\u003cli\u003e\\(z\\): percentage of \u0026ldquo;\u003ca href=\"/posts/kbhempty_binding_site/\"\u003eempty binding site\u003c/a\u003es\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\\(p(k)\\): probability distribution for a site to be in any given state at time \\(k\\)\u003c/li\u003e\n\u003cli\u003e\\(\\Psi(x)\\): rate of \u003ca href=\"/posts/kbhhigh_chemical_activity/\"\u003eactivation\u003c/a\u003e \u0026ldquo;efficiency in active monomers in acceleration monomer absorption\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eevidently, because percentages: \\(x+w+z = 1\\)\u003c/li\u003e\n\u003cli\u003eActive monomer absorption: \\(\\Psi(x) \\cdot p\\)\u003c/li\u003e\n\u003cli\u003eInactive monomer absorption: \\(p\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"general-intuition\"\u003egeneral intuition\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003esome kind of isolated droplet contains a population of molecules\u003c/li\u003e\n\u003cli\u003echemical reactions occur to the whole droplet such that its state changes\u003c/li\u003e\n\u003c/ol\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-03-11_19-37-29_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eRecall the \u003ca href=\"/posts/kbhliving/#two-main-functions-of-life\"\u003eTwo Main Functions of Life\u003c/a\u003e: \u003ca href=\"/posts/kbhmetabolism/\"\u003emetabolism\u003c/a\u003e and \u003ca href=\"/posts/kbhreplication/\"\u003ereplication\u003c/a\u003e. So, if we want our \u003ca href=\"/posts/kbhdyson_s_model_of_life/\"\u003eDyson\u0026rsquo;s Model\u003c/a\u003e to capture life, we should try to encode them into our model. Turns out, we can use the language of \u003ca href=\"/posts/kbhstepwise_evolution/\"\u003eStepwise Evolution\u003c/a\u003e to describe our model.\u003c/p\u003e\n\u003cp\u003eTherefore, let\u0026rsquo;s declare that there is only two states to our system, in which our particle is quasi-stationary (it wiggles but doesn\u0026rsquo;t go anywhere):\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u0026ldquo;\u003ca href=\"/posts/kbhhigh_chemical_activity/\"\u003ehigh chemical activity\u003c/a\u003e\u0026rdquo;, a.k.a. \u0026ldquo;\u003ca href=\"/posts/kbhmetabolism/\"\u003emetabolism\u003c/a\u003e\u0026rdquo; \u0026mdash; \u0026ldquo;ordered\u0026rdquo; state\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;low chemical activity\u0026rdquo; \u0026mdash; disordered state\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eOur transition \\(M\\), then, only has to encode transitions between these two states. Dyson claims that, in his model, this transition happens spontaneously when the circumstances is correct.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdyson_s_model_of_life/","tags":null,"title":"Dyson's Model of Life"},{"categories":null,"contents":"It a constant.\n\\begin{equation} \\lim_{n \\to \\infty} \\qty(1- \\frac{\\lambda}{n})^{n} = e^{-\\lambda} \\end{equation}\n","html":"\u003cp\u003eIt a constant.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lim_{n \\to \\infty} \\qty(1- \\frac{\\lambda}{n})^{n} = e^{-\\lambda}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhe/","tags":null,"title":"e"},{"categories":null,"contents":"We want to solve huge POMDP in the real world, but the belief states are huge. Notably, reachable beliefs are very small given an initial belief.\nWhy is vanilla PCA bad PCA as a denoising procedure: the underlying data is some data which is normally noised. This is not strictly true, the points don\u0026rsquo;t have normal noise.\nBetter PCA: E-PCA Instead of Euclidean distance, we use\n\\begin{equation} L(U,V) = \\mid X-UV\\mid^{2} \\end{equation}\nas a metric, where:\n\\(U\\) the feature\nspecifically:\n\\begin{equation} F(z) - yz + F^{*}(y) \\end{equation}\nwhere \\(F\\) is any convex objective that is problem specific that you choose,\nBregman Divergence forces the underlying matricies\u0026rsquo; bases to be non-negative\nOverall Methods collect sample beliefs apply the belifs into E-PCA Discretize the E-PCA\u0026rsquo;d belifs into a new state space \\(S\\) Recalculate R (\\(R(b) = b \\cdot R(s)\\)) and T (we simply sample \\(b,o\\) and calculate \\(update(b,a,o)\\)) for that state space S; congratulations, you are now solving an MDP value iteration ","html":"\u003cp\u003eWe want to solve huge \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e in the real world, but the belief states are huge. Notably, reachable beliefs are very small given an initial belief.\u003c/p\u003e\n\u003ch2 id=\"why-is-vanilla-pca-bad\"\u003eWhy is vanilla PCA bad\u003c/h2\u003e\n\u003cp\u003ePCA as a denoising procedure: the underlying data is some data which is normally noised. This is not strictly true, the points don\u0026rsquo;t have normal noise.\u003c/p\u003e\n\u003ch2 id=\"better-pca-e-pca\"\u003eBetter PCA: E-PCA\u003c/h2\u003e\n\u003cp\u003eInstead of Euclidean distance, we use\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL(U,V) = \\mid X-UV\\mid^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas a metric, where:\u003c/p\u003e\n\u003cp\u003e\\(U\\) the feature\u003c/p\u003e\n\u003cp\u003especifically:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nF(z) - yz + F^{*}(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(F\\) is any convex objective that is problem specific that you choose,\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eBregman Divergence\u003c/strong\u003e forces the underlying matricies\u0026rsquo; bases to be non-negative\u003c/p\u003e\n\u003ch2 id=\"overall-methods\"\u003eOverall Methods\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ecollect sample beliefs\u003c/li\u003e\n\u003cli\u003eapply the belifs into E-PCA\u003c/li\u003e\n\u003cli\u003eDiscretize the E-PCA\u0026rsquo;d belifs into a new state space \\(S\\)\u003c/li\u003e\n\u003cli\u003eRecalculate R (\\(R(b) = b \\cdot R(s)\\)) and T (we simply sample \\(b,o\\) and calculate \\(update(b,a,o)\\)) for that state space S; congratulations, you are now solving an MDP\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhe_pca/","tags":null,"title":"E-PCA"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhe_coli/","tags":null,"title":"E. Coli"},{"categories":null,"contents":"Presented Project80 Talks Person Society Keywords Email Chhavi Chauhuan, PhD, ELS ASIP AI Ethics, Pathology cchauhan@asip.org J. Elliott Robinson, PhD, MD ASBMB NF1, Dopamine, ADHD elliott.robinson@cchmc.org Jason Yi, PhD ASBMB UBE3A, Recklinghaus, Dup15q domain Erica Korb, PhD ASBMB Autism, Chromatin ekorb@pennmedicine.upenn.edu Catherine Wang AAA student approach to learning ??? Megan Fagalde, PhD Candidate AAA anatomy learning mfagalde@iu.edu Michelle A. Sveistrup AAA haptic abilities, HAT msveistr@uwo.ca AAA anatomy learning Alam Boyd AAA partner vs. individual work Magnus ??? AAA Orna Issler ASBMB IncRNA, LINC00473, FEDORA orna.issler@mssm.edu Kaushik Ragunathan ASBMB whimsical adaptations ragunath@med.umich.edu Tracy l. Bale ASBMB i think like P80 scary tbale@som.umaryland.edu Gregory Morton APS thermoregulation, glucose gjmorton@uw.edu Peter Turnbaugh ASBMB Fluoropyrimidine, PreTA, DPYD peter.turnbaugh@ucsf.edu Ralph DeBernandis ASBMB metabolic alterations, LIPT1 People Meeters Person Place Email Job Followup Jay Pieczynski Rollins jpieczynski@rollings.edu Assist. Prof. P80, College Apps Sebastian Hernandez Rollins shernandez1@rollings.edu Undergrad \u0026quot;\u0026quot; Bryson Arnett U of Kentucky Undegrad Jennifer Pousont Pingry Eric P. Chang Pace U echang@pace.edu Assist. Prof P80 ","html":"\u003ch2 id=\"presented\"\u003ePresented\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproject80/\"\u003eProject80\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"talks\"\u003eTalks\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ePerson\u003c/th\u003e\n\u003cth\u003eSociety\u003c/th\u003e\n\u003cth\u003eKeywords\u003c/th\u003e\n\u003cth\u003eEmail\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eChhavi Chauhuan, PhD, ELS\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhasip/\"\u003eASIP\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhai_ethics/\"\u003eAI Ethics\u003c/a\u003e, \u003ca href=\"\"\u003ePathology\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:cchauhan@asip.org\"\u003ecchauhan@asip.org\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eJ. Elliott Robinson, PhD, MD\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhasbmb/\"\u003eASBMB\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003eNF1\u003c/a\u003e, \u003ca href=\"/posts/kbhdopamine/\"\u003eDopamine\u003c/a\u003e, \u003ca href=\"/posts/kbhadhd/\"\u003eADHD\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:elliott.robinson@cchmc.org\"\u003eelliott.robinson@cchmc.org\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eJason Yi, PhD\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhasbmb/\"\u003eASBMB\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003eUBE3A\u003c/a\u003e, \u003ca href=\"\"\u003eRecklinghaus\u003c/a\u003e, \u003ca href=\"/posts/kbhdup15q/\"\u003eDup15q\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"https://www.jasonyilab.org/\"\u003edomain\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eErica Korb, PhD\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhasbmb/\"\u003eASBMB\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhautism/\"\u003eAutism\u003c/a\u003e, \u003ca href=\"/posts/kbhchromatin/\"\u003eChromatin\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:ekorb@pennmedicine.upenn.edu\"\u003eekorb@pennmedicine.upenn.edu\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eCatherine Wang\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhaaa/\"\u003eAAA\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003estudent approach to learning\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e???\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eMegan Fagalde, PhD Candidate\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhaaa/\"\u003eAAA\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhanatomy_learning/\"\u003eanatomy learning\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:mfagalde@iu.edu\"\u003emfagalde@iu.edu\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eMichelle A. Sveistrup\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhaaa/\"\u003eAAA\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003ehaptic abilities\u003c/a\u003e, \u003ca href=\"\"\u003eHAT\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:msveistr@uwo.ca\"\u003emsveistr@uwo.ca\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhaaa/\"\u003eAAA\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhanatomy_learning/\"\u003eanatomy learning\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eAlam Boyd\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhaaa/\"\u003eAAA\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003epartner vs. individual work\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eMagnus ???\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhaaa/\"\u003eAAA\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eOrna Issler\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhasbmb/\"\u003eASBMB\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003eIncRNA\u003c/a\u003e, \u003ca href=\"\"\u003eLINC00473\u003c/a\u003e, \u003ca href=\"\"\u003eFEDORA\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:orna.issler@mssm.edu\"\u003eorna.issler@mssm.edu\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eKaushik Ragunathan\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhasbmb/\"\u003eASBMB\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhepigenetics/#whimsical-adaptations\"\u003ewhimsical adaptations\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:ragunath@med.umich.edu\"\u003eragunath@med.umich.edu\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eTracy l. Bale\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhasbmb/\"\u003eASBMB\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003ei think like P80 scary\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:tbale@som.umaryland.edu\"\u003etbale@som.umaryland.edu\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eGregory Morton\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhaps/\"\u003eAPS\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhthermoregulation/\"\u003ethermoregulation\u003c/a\u003e, \u003ca href=\"\"\u003eglucose\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:gjmorton@uw.edu\"\u003egjmorton@uw.edu\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ePeter Turnbaugh\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhasbmb/\"\u003eASBMB\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003eFluoropyrimidine\u003c/a\u003e, \u003ca href=\"\"\u003ePreTA\u003c/a\u003e, \u003ca href=\"/posts/kbhdpyd/\"\u003eDPYD\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:peter.turnbaugh@ucsf.edu\"\u003epeter.turnbaugh@ucsf.edu\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eRalph DeBernandis\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhasbmb/\"\u003eASBMB\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003emetabolic alterations\u003c/a\u003e, \u003ca href=\"\"\u003eLIPT1\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"people-meeters\"\u003ePeople Meeters\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ePerson\u003c/th\u003e\n\u003cth\u003ePlace\u003c/th\u003e\n\u003cth\u003eEmail\u003c/th\u003e\n\u003cth\u003eJob\u003c/th\u003e\n\u003cth\u003eFollowup\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eJay Pieczynski\u003c/td\u003e\n\u003ctd\u003eRollins\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:jpieczynski@rollings.edu\"\u003ejpieczynski@rollings.edu\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003eAssist. Prof.\u003c/td\u003e\n\u003ctd\u003eP80, College Apps\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eSebastian Hernandez\u003c/td\u003e\n\u003ctd\u003eRollins\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:shernandez1@rollings.edu\"\u003eshernandez1@rollings.edu\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003eUndergrad\u003c/td\u003e\n\u003ctd\u003e\u0026quot;\u0026quot;\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eBryson Arnett\u003c/td\u003e\n\u003ctd\u003eU of Kentucky\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003ctd\u003eUndegrad\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eJennifer Pousont\u003c/td\u003e\n\u003ctd\u003ePingry\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eEric P. Chang\u003c/td\u003e\n\u003ctd\u003ePace U\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:echang@pace.edu\"\u003eechang@pace.edu\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003eAssist. Prof\u003c/td\u003e\n\u003ctd\u003eP80\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n","permalink":"https://www.jemoka.com/posts/kbheb_emails/","tags":["index"],"title":"EB2022 Index"},{"categories":null,"contents":"Slightly nontraditional Ted class, which is that it is in complete modular architecture: no large group lectures, work is done in 2-3 week sprints.\nFirst two days, we will be doing intro together. There are 12 modules, and you do 6. There will be core modules and branches.\nThere are 3 symposiums which the groups share out. This class is very hard; we are using a graduate school textbook. We will be sidestepping some depth: main idea is to show the big area.\nTracks 1 =\u0026gt; {2,4,5} 4 =\u0026gt; 5 7 =\u0026gt; 8 8 =\u0026gt; {11,12} 3 =\u0026gt; 6 6 =\u0026gt; 9 9 =\u0026gt; 10 Good to learn MatLab.\nLogistics Create a portfolio journal; supply one entry a week.\nIntroductory Reading How Did Economists Get It So Wrong?\n","html":"\u003cp\u003eSlightly nontraditional Ted class, which is that it is in complete modular architecture: no large group lectures, work is done in 2-3 week sprints.\u003c/p\u003e\n\u003cp\u003eFirst two days, we will be doing intro together. There are 12 modules, and you do 6. There will be core modules and branches.\u003c/p\u003e\n\u003cp\u003eThere are 3 symposiums which the groups share out. This class is very hard; we are using a graduate school textbook. We will be sidestepping some depth: main idea is to show the big area.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"tracks\"\u003eTracks\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e1 =\u0026gt; {2,4,5}\u003c/li\u003e\n\u003cli\u003e4 =\u0026gt; 5\u003c/li\u003e\n\u003cli\u003e7 =\u0026gt; 8\u003c/li\u003e\n\u003cli\u003e8 =\u0026gt; {11,12}\u003c/li\u003e\n\u003cli\u003e3 =\u0026gt; 6\u003c/li\u003e\n\u003cli\u003e6 =\u0026gt; 9\u003c/li\u003e\n\u003cli\u003e9 =\u0026gt; 10\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eGood to learn MatLab.\u003c/p\u003e\n\u003ch2 id=\"logistics\"\u003eLogistics\u003c/h2\u003e\n\u003cp\u003eCreate a portfolio journal; supply one entry a week.\u003c/p\u003e\n\u003ch2 id=\"introductory-reading\"\u003eIntroductory Reading\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhhow_did_economists_get_it_so_wrong/\"\u003eHow Did Economists Get It So Wrong?\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhecon320_architecture/","tags":null,"title":"ECON320 Architecture"},{"categories":null,"contents":"The economy of credit is an effect where credit is being traded liberally, and people are buying stocks on large margins and unable to pay back.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbheconomy_of_credit/\"\u003eeconomy of credit\u003c/a\u003e is an effect where \u003ca href=\"/posts/kbhcredit/\"\u003ecredit\u003c/a\u003e is being traded liberally, and people are buying stocks on large margins and unable to pay back.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbheconomy_of_credit/","tags":null,"title":"economy of credit"},{"categories":null,"contents":"Goal: search for a path (sequence of edits) from start to final string, whereby:\ninitial state is the word we are transforming operators: insert, delete, substitute goal state: the word we end up at path cost: cost of the path we are trying to minimize Sequence of all edits is huge! so DP.\nFor two strings, let\u0026rsquo;s define:\n\\(X\\) of length \\(n\\) \\(Y\\) of length \\(m\\) we define some \\(D(i,j)\\) as the edit distance between substring \\(X[1:i]\\) and \\(Y[1:j]\\).\nLet:\n\\(D(i,0) = i, \\forall i\\) \\(D(0,j) = j, \\forall j\\) for i in range(1,M): for j in range(1,N): # deletion: ignoring one char of previous string d1 = D(i-1,j) + 1 # (cost) # insertion: insertion into string before using rest of j d2 = D(i,j-1) + 1 # (cost) # keep same if char is same or substitute current d3 = D(i-1,j-1) + (0 if X[i] == Y[j] else 2) # cache D(i,j) = min(d1, d2, d3) ","html":"\u003cp\u003eGoal: search for a path (sequence of edits) from start to final string, whereby:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003einitial state\u003c/strong\u003e is the word we are transforming\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eoperators\u003c/strong\u003e: insert, delete, substitute\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003egoal state\u003c/strong\u003e: the word we end up at\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003epath cost\u003c/strong\u003e: cost of the path we are trying to minimize\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSequence of all edits is huge! so DP.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eFor two strings, let\u0026rsquo;s define:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X\\) of length \\(n\\)\u003c/li\u003e\n\u003cli\u003e\\(Y\\) of length \\(m\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewe define some \\(D(i,j)\\) as the edit distance between substring \\(X[1:i]\\) and \\(Y[1:j]\\).\u003c/p\u003e\n\u003cp\u003eLet:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(D(i,0) = i, \\forall i\\)\u003c/li\u003e\n\u003cli\u003e\\(D(0,j) = j, \\forall j\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eM\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# deletion: ignoring one char of previous string\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# (cost)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# insertion: insertion into string before using rest of j\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# (cost)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# keep same if char is same or substitute current\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# cache\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emin\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003chr\u003e\n","permalink":"https://www.jemoka.com/posts/kbhedit_distance_with_dp/","tags":null,"title":"edit distance with DP"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbheffability/","tags":null,"title":"effability"},{"categories":null,"contents":" Many Mexican-Americans worked as migratory laborers + outside programs Indian Reorganization Act of 1934 Woman were paied less Environmental cost of damns and public projects commentary on the effects of the New Deal Incorporating aspects of Arthur M. Schlesinger\u0026rsquo;s Appraisal of the New Deal, William E. Leuchtenburg\u0026rsquo;s Appraisal of the New Deal, Anthony Badger\u0026rsquo;s Appraisal of the New Deal.\nThrough the analysis of the New Deal programs, what was particularly salient was Anthony Badger\u0026rsquo;s framing of the event as not one that is ultimately \u0026ldquo;successful\u0026rdquo; or \u0026ldquo;failed\u0026rdquo; but instead one which focuses on its long-term effects in context with the future policies. The equivocal labeling allows nuance that places the Deal properly in its historical content. According to Badger, helping the poor, a significant policy goal of the deals, were left as \u0026ldquo;unfinished business\u0026rdquo; when going to war. This idea contrasts with William E. Leuchtenburg\u0026rsquo;s framing of the same event\u0026mdash;that it was never the true intention of the deal to assist in subsidies on a humane level, but that which supported the economy and incidentally those that reaped benefits on it.\nThis new frame is much more useful when analyzing the deal. In fact, Leuchtenburg took this a step further and claimed that the New Deal didn\u0026rsquo;t work largely because it was impossible for it to have repaired the damage by the Hoover administration. Furthermore, according to Schlesinger, programs like the NRA were created with already the clear assumption that there were not enough policy tools in place to actually achieve it to the fullest extent. Under this mind frame, then, it is not difficult to see the New Deal as one that intentionally brought a failing US economy\u0026mdash;and those participating in it\u0026mdash;to full swing whilst ignoring those that didn\u0026rsquo;t have an economic influence. It was, therefore, never about helping \u0026ldquo;people\u0026rdquo;: it is a policy and economic tool like any other.\nThrough this somewhat revisionist view, it is much easier to place into perspective New Deal\u0026rsquo;s zealot focus on young men, strange deficiency in some areas, and central focus on infrastructure. In that regard, the New Deal worked very well to bring a failing economy back to a semblance of normalcy for the privileged few.\n","html":"\u003cul\u003e\n\u003cli\u003eMany Mexican-Americans worked as migratory laborers + outside programs\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"\"\u003eIndian Reorganization Act of 1934\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eWoman were paied less\u003c/li\u003e\n\u003cli\u003eEnvironmental cost of damns and public projects\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"commentary-on-the-effects-of-the-new-deal\"\u003ecommentary on the effects of the New Deal\u003c/h2\u003e\n\u003cp\u003eIncorporating aspects of \u003ca href=\"\"\u003eArthur M. Schlesinger\u0026rsquo;s Appraisal of the New Deal\u003c/a\u003e, \u003ca href=\"\"\u003eWilliam E. Leuchtenburg\u0026rsquo;s Appraisal of the New Deal\u003c/a\u003e, \u003ca href=\"\"\u003eAnthony Badger\u0026rsquo;s Appraisal of the New Deal.\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eThrough the analysis of the New Deal programs, what was particularly salient was Anthony Badger\u0026rsquo;s framing of the event as not one that is ultimately \u0026ldquo;successful\u0026rdquo; or \u0026ldquo;failed\u0026rdquo; but instead one which focuses on its long-term effects \u003cem\u003ein context\u003c/em\u003e with the future policies. The equivocal labeling allows nuance that places the Deal properly in its historical content. According to Badger, helping the poor, a significant policy goal of the deals, were left as \u0026ldquo;unfinished business\u0026rdquo; when going to war. This idea contrasts with William E. Leuchtenburg\u0026rsquo;s framing of the same event\u0026mdash;that it was never the true intention of the deal to assist in subsidies on a humane level, but that which supported the economy and incidentally those that reaped benefits on it.\u003c/p\u003e\n\u003cp\u003eThis new frame is much more useful when analyzing the deal. In fact, Leuchtenburg took this a step further and claimed that the New Deal didn\u0026rsquo;t work largely because it was impossible for it to have repaired the damage by the Hoover administration. Furthermore, according to Schlesinger, programs like the NRA were created with already the clear assumption that there were not enough policy tools in place to actually achieve it to the fullest extent. Under this mind frame, then, it is not difficult to see the New Deal as one that intentionally brought a failing US economy\u0026mdash;and those participating in it\u0026mdash;to full swing whilst ignoring those that didn\u0026rsquo;t have an economic influence. It was, therefore, never about helping \u0026ldquo;people\u0026rdquo;: it is a policy and economic tool like any other.\u003c/p\u003e\n\u003cp\u003eThrough this somewhat revisionist view, it is much easier to place into perspective New Deal\u0026rsquo;s zealot focus on young men, strange deficiency in some areas, and central focus on infrastructure. In that regard, the New Deal worked very well to bring a failing economy back to a semblance of normalcy for the privileged few.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbheffects_of_the_new_deal/","tags":null,"title":"effects of the New Deal"},{"categories":null,"contents":"For an ODE, eigensolutions of some expression \\(x\u0026rsquo;=Ax\\) consists of the class of solutions which remains in a line through the origin, which consists of the family which:\n\\begin{equation} x(t) = ke^{\\lambda t} v \\end{equation}\nwhere \\(\\lambda\\) is an eigenvalue of \\(A\\), and \\(v\\) its corresponding eigenvector.\nmotivation \\begin{equation} y\u0026rsquo; = F(y) \\end{equation}\nan autonomous ODE, suppose we have some solution \\(y=a\\) for which \\(y\u0026rsquo; = 0\\), that is, \\(F(a) = 0\\), we know that the system will be trapped there.\nNear such a stationary solution \\(a\\), we can use a Taylor expansion to linearize:\n\\begin{equation} F(a+b) = F(a) + Jac(a)x + \\dots \\end{equation}\nThe first term, we are given, is \\(0\\). The second term indicates that our derivative near the stationary point seems to be some matrix \\(A\\) of \\(a\\).\nsketching solutions along eigenlines For eigenlines, we can observe the sign of the eigenline to see how it behaves, and\u0026mdash;in conjuction\u0026mdash;how other solutions behave. In particular, in the x1, x2 plane for two orders, the solutions are tangent to the eigensolutions.\nWith an negative eigenvalue, the eigensolution arrows will point towards the origin, whereas with positive eigenvalues the eigensolutions will point away.\nsaddle case: if \\(\\lambda_{1}\\) and \\(\\lambda_{2}\\) have opposite signs, then the paths look like half-parabolas matching the eigensolutions; it will approach the larger eigenvalue more rapidly node/source/sink case: if \\(\\lambda_{1}\\) and \\(\\lambda_{2}\\) have the same sign, then the solutions look like half-parabolas tangent only to the eigenline which has a smaller \\(\\lambda\\) \u0026mdash; in this case, if the eigenvalues happens to be both negative you can work things out for \\(-A\\) and then flip the paths on all the lines\u0026mdash;at smaller values of \\(t\\) (specifically \\(t\u0026lt;1\\)), the curve tends closer to the \\(\\lambda\\) with the smaller eigenvalue (because raising the power to a larger number actually makes the power smaller); at \\(t\u0026gt;1\\), the curve moves towards that of the bigger eigenvalue complex/spiral case: in this case, we can write some answer with Euler\u0026rsquo;s Equation to get two real solutions in trig \\(P(t) + iQ(t)\\), where each \\(P,Q\\) is some function is cos plus sin times \\(ae^{t}\\). Therefore, it will be a spiral outwards Flipping You will note that, in all of these cases \\(x=0\\) is a stationary solution, as \\(A0 = 0\\). As \\(t \\to -\\infty\\), we end up kissing the side with the smaller eigenvalue, and as \\(t \\to +\\infty\\), we end up going towards the side with the bigger eigenvalue.\nNonlinear Non-linear systems can be figured by the motivation above, linearizing into groups, and figuring out each local area.\nFlipping This is because:\n\\begin{equation} (-A)v = -(Av) = (-\\lambda) v \\end{equation}\nmeaning the directions of eigenvectors don\u0026rsquo;t change while their corresponding eigenvalues change. If we define some \\(y(t) = x(-t)\\), where \\(Ax = x\u0026rsquo;\\), we can work out that \\(y\u0026rsquo;(t) = -Ay(t)\\), meaning that \\(y\u0026rsquo;\\)\u0026rsquo;s graphs are just flipped versions of \\(x\\)\u0026rsquo;s graphs.\nHence we can just flip everything.\nDrawing By tracing those patterns, you can draw other solutions over time:\n","html":"\u003cp\u003eFor an ODE, \u003ca href=\"/posts/kbheigensolutions/\"\u003eeigensolutions\u003c/a\u003e of some expression \\(x\u0026rsquo;=Ax\\) consists of the class of solutions which remains in a line through the origin, which consists of the family which:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = ke^{\\lambda t} v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\lambda\\) is an eigenvalue of \\(A\\), and \\(v\\) its corresponding \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"motivation\"\u003emotivation\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = F(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ean \u003ca href=\"/posts/kbhautonomous_odes/\"\u003eautonomous ODE\u003c/a\u003e, suppose we have some solution \\(y=a\\) for which \\(y\u0026rsquo; = 0\\), that is, \\(F(a) = 0\\), we know that the system will be trapped there.\u003c/p\u003e\n\u003cp\u003eNear such a stationary solution \\(a\\), we can use a Taylor expansion to linearize:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nF(a+b) = F(a) + Jac(a)x + \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe first term, we are given, is \\(0\\). The second term indicates that our derivative near the stationary point seems to be some matrix \\(A\\) of \\(a\\).\u003c/p\u003e\n\u003ch2 id=\"sketching-solutions-along-eigenlines\"\u003esketching solutions along eigenlines\u003c/h2\u003e\n\u003cp\u003eFor eigenlines, we can observe the sign of the eigenline to see how it behaves, and\u0026mdash;in conjuction\u0026mdash;how other solutions behave. In particular, in the x1, x2 plane for two orders, the solutions are tangent to the eigensolutions.\u003c/p\u003e\n\u003cp\u003eWith an negative eigenvalue, the eigensolution arrows will point towards the origin, whereas with positive eigenvalues the eigensolutions will point away.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003esaddle case\u003c/strong\u003e: if \\(\\lambda_{1}\\) and \\(\\lambda_{2}\\) have opposite signs, then the paths look like half-parabolas matching the eigensolutions; it will approach the larger eigenvalue more rapidly\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003enode/source/sink case\u003c/strong\u003e: if \\(\\lambda_{1}\\) and \\(\\lambda_{2}\\) have the same sign, then the solutions look like half-parabolas tangent only to the eigenline which has a smaller \\(\\lambda\\) \u0026mdash; in this case, if the eigenvalues happens to be both negative you can work things out for \\(-A\\) and then flip the paths on all the lines\u0026mdash;at smaller values of \\(t\\) (specifically \\(t\u0026lt;1\\)), the curve tends closer to the \\(\\lambda\\) with the smaller eigenvalue (because raising the power to a larger number actually makes the power smaller); at \\(t\u0026gt;1\\), the curve moves towards that of the bigger eigenvalue\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ecomplex/spiral case\u003c/strong\u003e: in this case, we can write some answer with \u003ca href=\"/posts/kbheuler_s_equation/\"\u003eEuler\u0026rsquo;s Equation\u003c/a\u003e to get two real solutions in trig \\(P(t) + iQ(t)\\), where each \\(P,Q\\) is some function is cos plus sin times \\(ae^{t}\\). Therefore, it will be a spiral outwards\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"flipping\"\u003eFlipping\u003c/h3\u003e\n\u003cp\u003eYou will note that, in all of these cases \\(x=0\\) is a stationary solution, as \\(A0 = 0\\). As \\(t \\to -\\infty\\), we end up kissing the side with the smaller eigenvalue, and as \\(t \\to +\\infty\\), we end up going towards the side with the bigger eigenvalue.\u003c/p\u003e\n\u003ch3 id=\"nonlinear\"\u003eNonlinear\u003c/h3\u003e\n\u003cp\u003eNon-linear systems can be figured by the motivation above, linearizing into groups, and figuring out each local area.\u003c/p\u003e\n\u003ch2 id=\"flipping\"\u003eFlipping\u003c/h2\u003e\n\u003cp\u003eThis is because:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(-A)v = -(Av) = (-\\lambda) v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning the directions of eigenvectors don\u0026rsquo;t change while their corresponding eigenvalues change. If we define some \\(y(t) = x(-t)\\), where \\(Ax = x\u0026rsquo;\\), we can work out that \\(y\u0026rsquo;(t) = -Ay(t)\\), meaning that \\(y\u0026rsquo;\\)\u0026rsquo;s graphs are just flipped versions of \\(x\\)\u0026rsquo;s graphs.\u003c/p\u003e\n\u003cp\u003eHence we can just flip everything.\u003c/p\u003e\n\u003ch2 id=\"drawing\"\u003eDrawing\u003c/h2\u003e\n\u003cp\u003eBy tracing those patterns, you can draw other solutions over time:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-27_15-13-27_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbheigensolutions/","tags":null,"title":"eigensolutions"},{"categories":null,"contents":"The eigenspace of \\(T, \\lambda\\) is the set of all eigenvectors of \\(T\\) corresponding to \\(\\lambda\\), plus the \\(0\\) vector.\nconstituents \\(T \\in \\mathcal{L}(V)\\) \\(\\lambda \\in \\mathbb{F}\\), an eigenvalue of \\(T\\) requirements \\begin{equation} E(\\lambda, T) = \\text{null}\\ (T - \\lambda I) \\end{equation}\ni.e. all vectors such that \\((T- \\lambda I) v = 0\\).\nwhere, \\(E\\) is an eigenspace of \\(T\\).\nadditional information sum of eigenspaces is a direct sum \\(E(\\lambda_{1}, T) + \u0026hellip; + E(\\lambda_{m}, T)\\) is a direct sum.\nSee eigenspaces are disjoint.\ndimension of sum of eigenspaces is smaller than or equal to the dimension of the whole space A correlate of the above is that:\n\\begin{equation} \\dim E(\\lambda_{1}, T) + \u0026hellip; + \\dim E(\\lambda_{m}, T) \\leq \\dim V \\end{equation}\nProof:\nRecall that:\n\\begin{equation} \\dim E(\\lambda_{1}, T) + \u0026hellip; + \\dim E(\\lambda_{m}, T) = \\dim (E(\\lambda_{1}, T) \\oplus \u0026hellip; \\oplus E(\\lambda_{m}, T) ) \\end{equation}\nbecause \\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\).\nNow, the sum of subspaces is the smallest subspace, so \\(\\dim (E(\\lambda_{1}, T) \\oplus \u0026hellip; \\oplus E(\\lambda_{m}, T) ) \\leq \\dim V\\).\nAnd hence:\n\\begin{equation} \\dim E(\\lambda_{1}, T) + \u0026hellip; + \\dim E(\\lambda_{m}, T) \\leq \\dim V \\end{equation}\nas desired. \\(\\blacksquare\\)\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e of \\(T, \\lambda\\) is the set of all \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es of \\(T\\) corresponding to \\(\\lambda\\), plus the \\(0\\) vector.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(T \\in \\mathcal{L}(V)\\)\u003c/li\u003e\n\u003cli\u003e\\(\\lambda \\in \\mathbb{F}\\), an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nE(\\lambda, T) = \\text{null}\\ (T - \\lambda I)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ei.e. all vectors such that \\((T- \\lambda I) v = 0\\).\u003c/p\u003e\n\u003cp\u003ewhere, \\(E\\) is an \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e of \\(T\\).\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"sum-of-eigenspaces-is-a-direct-sum\"\u003esum of eigenspaces is a direct sum\u003c/h3\u003e\n\u003cp\u003e\\(E(\\lambda_{1}, T) + \u0026hellip; + E(\\lambda_{m}, T)\\) is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbheigenvalue/#eigenspaces-are-disjoint\"\u003eeigenspaces are disjoint\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"dimension-of-sum-of-eigenspaces-is-smaller-than-or-equal-to-the-dimension-of-the-whole-space\"\u003edimension of sum of eigenspaces is smaller than or equal to the dimension of the whole space\u003c/h3\u003e\n\u003cp\u003eA correlate of the above is that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim E(\\lambda_{1}, T) + \u0026hellip; + \\dim E(\\lambda_{m}, T) \\leq \\dim V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim E(\\lambda_{1}, T) + \u0026hellip; + \\dim E(\\lambda_{m}, T) = \\dim (E(\\lambda_{1}, T) \\oplus \u0026hellip; \\oplus E(\\lambda_{m}, T) )\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause \u003ca href=\"/posts/kbhproduct_summation_map/#u-1-plus-dots-plus-u-m-is-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-iff-dim--u-1-plus-dots-plus-u-m--dim-u-1-plus-dots-plus-dim-u-m\"\u003e\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\)\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eNow, the \u003ca href=\"/posts/kbhsum_of_subsets/#sum-of-subspaces-is-the-smallest-subspace-with-both-subspaces\"\u003esum of subspaces is the smallest subspace\u003c/a\u003e, so \\(\\dim (E(\\lambda_{1}, T) \\oplus \u0026hellip; \\oplus E(\\lambda_{m}, T) ) \\leq \\dim V\\).\u003c/p\u003e\n\u003cp\u003eAnd hence:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim E(\\lambda_{1}, T) + \u0026hellip; + \\dim E(\\lambda_{m}, T) \\leq \\dim V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas desired. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbheigenspace/","tags":null,"title":"eigenspace"},{"categories":null,"contents":"see Extended Kalman Filter\n","html":"\u003cp\u003esee \u003ca href=\"/posts/kbhfilters/#extended\"\u003eExtended Kalman Filter\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhekf/","tags":null,"title":"EKF"},{"categories":null,"contents":"The Elastic Modulus is a measurement of how much deformation takes place given some force on the system. Formally, it is the slope of the stress-strain curve, defined by:\n\\begin{equation} E = \\frac{stress}{strain} \\end{equation}\nThe units in pascals as it is: force per area (pascals) divided by deformation (dimensionless, as it is a fraction of old shape over new shape \\(\\frac{V}{V}=1\\)).\nDepending on how its measured, it is called different things:\nYoung\u0026rsquo;s Modulus: tensile elasticity\u0026mdash;tendency for object to deform along an axis with force applied (usually that is just called the Elastic Modulus) Shear\u0026rsquo;s Modulus: shear elasticity\u0026mdash;tendency of an object to shear (deform in shape with the constant volume) with force applied Bulk Modulus: volumetric elasticity\u0026mdash;tendency for an object to deform in all directions when uniformly loaded ","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhelastic_modulus/\"\u003eElastic Modulus\u003c/a\u003e is a measurement of how much deformation takes place given some force on the system. Formally, it is the slope of the \u003ca href=\"/posts/kbhstress/\"\u003estress\u003c/a\u003e-\u003ca href=\"/posts/kbhstrain/\"\u003estrain\u003c/a\u003e curve, defined by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE = \\frac{stress}{strain}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe units in pascals as it is: force per area (pascals) divided by deformation (dimensionless, as it is a fraction of old shape over new shape \\(\\frac{V}{V}=1\\)).\u003c/p\u003e\n\u003cp\u003eDepending on how its measured, it is called different things:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhyoung_s_modulus/\"\u003eYoung\u0026rsquo;s Modulus\u003c/a\u003e: tensile elasticity\u0026mdash;tendency for object to deform along an axis with force applied (usually that \u003cem\u003eis\u003c/em\u003e just called the \u003ca href=\"/posts/kbhelastic_modulus/\"\u003eElastic Modulus\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003eShear\u0026rsquo;s Modulus: shear elasticity\u0026mdash;tendency of an object to shear (deform in shape with the constant volume) with force applied\u003c/li\u003e\n\u003cli\u003eBulk Modulus: volumetric elasticity\u0026mdash;tendency for an object to deform in all directions when uniformly loaded\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhelastic_modulus/","tags":null,"title":"Elastic Modulus"},{"categories":null,"contents":"Eleanor Roosevelt is the first lady of the US.\nCreated minimum wage Wrote a weekly column named My Day, in 135 newspapers 2x a week broadcast ","html":"\u003cp\u003e\u003ca href=\"/posts/kbheleanor_roosevelt/\"\u003eEleanor Roosevelt\u003c/a\u003e is the first lady of the US.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eCreated \u003ca href=\"/posts/kbhminimum_wage/\"\u003eminimum wage\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eWrote a weekly column named \u003ca href=\"/posts/kbhmy_day/\"\u003eMy Day\u003c/a\u003e, in 135 newspapers\u003c/li\u003e\n\u003cli\u003e2x a week broadcast\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbheleanor_roosevelt/","tags":null,"title":"Eleanor Roosevelt"},{"categories":null,"contents":"Though Coulomb\u0026rsquo;s Law allow us to calculate the force between any two individual charges, one can note that most of it is independent of the second test charge. In fact, each charge emits a field around itself of the shape:\n\\begin{equation} \\vec{E( r)} = k \\frac{q}{r^{2}} = \\frac{1}{4\\pi \\epsilon_{0}} \\frac{q}{r^{2}} \\end{equation}\nOr, you can think of it as moving a test charge \\(q\\) around the charge of interest, then calculating:\n\\begin{equation} \\vec{E} = \\frac{\\vec{F_{e}}}{q} \\end{equation}\nAs you can see, if the source charge were to be positive, you have a positive \\(E\\) which will point away from the charge, and negative charge \\(E\\) will point towards the charge.\nOne institution here that the above statement provides is that electric fields are drawn positive to negative; so, if you placed a positive test charge in the field, it will experience a force tangent to and in the same direction that traced out by the field lines. If you have a negative test change, then it will experience a force in the opposite direction.\nadditional information tracing electric field lines By tracing out electric fields and observing the lines\u0026rsquo; density, we can make a guess about how long the fields are.\ncomposing electric fields Unsurprisingly, superposition matters here as well:\nIf your system has multiple charges, then \\(E_{total} = E_{q_1} + E_{q_2}\\). No surprise here.\n","html":"\u003cp\u003eThough \u003ca href=\"/posts/kbhcoulomb_s_law/\"\u003eCoulomb\u0026rsquo;s Law\u003c/a\u003e allow us to calculate the force between any two individual charges, one can note that most of it is independent of the second test charge. In fact, each charge emits a field around itself of the shape:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{E( r)} = k \\frac{q}{r^{2}} = \\frac{1}{4\\pi \\epsilon_{0}} \\frac{q}{r^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOr, you can think of it as moving a test charge \\(q\\) around the charge of interest, then calculating:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{E} = \\frac{\\vec{F_{e}}}{q}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAs you can see, if the source charge were to be positive, you have a \u003cstrong\u003epositive\u003c/strong\u003e \\(E\\) which will point \u003cem\u003eaway\u003c/em\u003e from the charge, and \u003cstrong\u003enegative\u003c/strong\u003e charge \\(E\\) will point \u003cem\u003etowards\u003c/em\u003e the charge.\u003c/p\u003e\n\u003cp\u003eOne institution here that the above statement provides is that \u003cstrong\u003eelectric fields are drawn positive to negative\u003c/strong\u003e; so, if you placed a positive test charge in the field, it will experience a force tangent to and in the same direction that traced out by the field lines. If you have a negative test change, then it will experience a force in the opposite direction.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"tracing-electric-field-lines\"\u003etracing electric field lines\u003c/h3\u003e\n\u003cp\u003eBy tracing out electric fields and observing the lines\u0026rsquo; density, we can make a guess about how long the fields are.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-02-12_15-10-20_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"composing-electric-fields\"\u003ecomposing electric fields\u003c/h3\u003e\n\u003cp\u003eUnsurprisingly, \u003ca href=\"/posts/kbhcoulomb_s_law/#superposition\"\u003esuperposition\u003c/a\u003e matters here as well:\u003c/p\u003e\n\u003cp\u003eIf your system has multiple charges, then \\(E_{total} = E_{q_1} + E_{q_2}\\). No surprise here.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhelectric_field/","tags":null,"title":"electric field"},{"categories":null,"contents":"electric potential is analogous to gravitational potential energy, but with electrostatics!\n\\begin{equation} P_{E} = qV \\end{equation}\nwhere \\(q\\) is the change on the particle in question, and \\(V\\) is the voltage, the difference in electric potential between two places.\nYes, voltage is defined vis a vi electric potential: that is, it represents a differential of electric potential.\nadditional information electric potential is analogous to gravitational potential Let \\(A, B, C\\) be positrons, and the lines are the electric field. Which one has the highest electric potential? \\(A\\), because it has the most distance to travel to until it can get all the way to the right.\nconnecting electric potential and electric field parallel plates \\begin{equation} E = \\frac{V}{d} \\end{equation}\nwhere \\(d\\) is the distance between the plates, \\(E\\) the uniform electric field between the plates.\nThe amount of charge on each plate is described by:\n\\begin{equation} Q = CV \\end{equation}\nwhere, \\(C\\) is the capacitance of each plate, and \\(V\\) the voltage across the plates.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhelectric_potential_energy/\"\u003eelectric potential\u003c/a\u003e is analogous to \u003ca href=\"/posts/kbhgravitational_potential_energy/\"\u003egravitational potential energy\u003c/a\u003e, but with electrostatics!\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP_{E} = qV\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(q\\) is the change on the particle in question, and \\(V\\) is the \u003ca href=\"/posts/kbhvoltage/\"\u003evoltage\u003c/a\u003e, the difference in \u003ca href=\"/posts/kbhelectric_potential_energy/\"\u003eelectric potential\u003c/a\u003e between two places.\u003c/p\u003e\n\u003cp\u003eYes, \u003ca href=\"/posts/kbhvoltage/\"\u003evoltage\u003c/a\u003e is defined vis a vi \u003ca href=\"/posts/kbhelectric_potential_energy/\"\u003eelectric potential\u003c/a\u003e: that is, it represents a \u003cem\u003edifferential\u003c/em\u003e of \u003ca href=\"/posts/kbhelectric_potential_energy/\"\u003eelectric potential\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"electric-potential-is-analogous-to-gravitational-potential\"\u003eelectric potential is analogous to gravitational potential\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-03-05_20-19-25_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eLet \\(A, B, C\\) be positrons, and the lines are the \u003ca href=\"/posts/kbhelectric_field/\"\u003eelectric field\u003c/a\u003e. Which one has the highest electric potential? \\(A\\), because it has the most distance to travel to until it can get all the way to the right.\u003c/p\u003e\n\u003ch3 id=\"connecting-electric-potential--kbhelectric-potential-energy-dot-md--and-electric-field--kbhelectric-field-dot-md\"\u003econnecting \u003ca href=\"/posts/kbhelectric_potential_energy/\"\u003eelectric potential\u003c/a\u003e and \u003ca href=\"/posts/kbhelectric_field/\"\u003eelectric field\u003c/a\u003e\u003c/h3\u003e\n\u003ch4 id=\"parallel-plates\"\u003eparallel plates\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-03-18_13-40-07_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\\begin{equation}\nE = \\frac{V}{d}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(d\\) is the distance between the plates, \\(E\\) the uniform \u003ca href=\"/posts/kbhelectric_field/\"\u003eelectric field\u003c/a\u003e between the plates.\u003c/p\u003e\n\u003cp\u003eThe amount of \u003ca href=\"/posts/kbhcharged/\"\u003echarge\u003c/a\u003e on each plate is described by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ = CV\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(C\\) is the \u003ca href=\"/posts/kbhcapacitance/\"\u003ecapacitance\u003c/a\u003e of each plate, and \\(V\\) the \u003ca href=\"/posts/kbhvoltage/\"\u003evoltage\u003c/a\u003e across the plates.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhelectric_potential_energy/","tags":null,"title":"electric potential energy"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhelectron/","tags":null,"title":"electron"},{"categories":null,"contents":"Elie Adam is a research scientist in brain dynamics and neuro-science at MIT.\nMathematical Systems Systemic influences with various pieces Hopfield Networks Mouse Video Games Derivatives are essentially a high pass filter\nMethods of neuro imaging calcium channel florescence Electrode measurements Optogenetics primary methods analyzing monke with neuro-imaging methods above creating in silico models based on those responses, in a large systems of differential equations play with those equations to figure possible novel responses try them on monke ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhelie_adam/\"\u003eElie Adam\u003c/a\u003e is a research scientist in brain dynamics and neuro-science at MIT.\u003c/p\u003e\n\u003ch2 id=\"mathematical-systems\"\u003eMathematical Systems\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eSystemic influences with various pieces\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhopfield_networks/\"\u003eHopfield Networks\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"mouse-video-games\"\u003eMouse Video Games\u003c/h2\u003e\n\u003cp\u003eDerivatives are essentially a high pass filter\u003c/p\u003e\n\u003ch2 id=\"methods-of-neuro-imaging\"\u003eMethods of neuro imaging\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecalcium channel florescence\u003c/li\u003e\n\u003cli\u003eElectrode measurements\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoptogenetics/\"\u003eOptogenetics\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"primary-methods\"\u003eprimary methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eanalyzing monke with neuro-imaging methods above\u003c/li\u003e\n\u003cli\u003ecreating \u003cem\u003ein silico\u003c/em\u003e models based on those responses, in a large systems of differential equations\u003c/li\u003e\n\u003cli\u003eplay with those equations to figure possible novel responses\u003c/li\u003e\n\u003cli\u003etry them on monke\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhelie_adam/","tags":null,"title":"Elie Adam"},{"categories":null,"contents":"Wizenbaum (1966)\nworks pattern-action rules by rephrasing user\u0026rsquo;s questions\n\u0026ldquo;You hate me\u0026rdquo; =\u0026gt; \u0026ldquo;what makes you think I hate you\u0026rdquo;\nRogerian psycotherapy: assume no real-world knowledge; simply draws out patient\u0026rsquo;s statements\nI need X =\u0026gt; what would it mean to you if you got X.\nuses regex\ncapture specific adjectives, \u0026ldquo;all\u0026rdquo;, \u0026ldquo;always\u0026rdquo;, etc. and responds accordingly\nEliza Rules patterns are organized by keywords: a keyword has a pattern and a list of transforms:\ne.g.:\nkeyword: you\n(0 you 0 me) (what makes you think I 3 you) (why do you think I 3 you) Keywords are ranked from specific to general: each keyword has a rank, where most specific keywords words are most highly ranked, and then expansions are picked with the one with the highest keyword rank.\n\u0026ldquo;my transform\u0026rdquo; whenever the keyword \u0026ldquo;my\u0026rdquo; is used, we will pop a transformerd utterance onto the memory list (\u0026ldquo;earlier you said your\u0026rdquo;\u0026hellip;) in a FIFO queue.\nlater if we don\u0026rsquo;t know what to say we just pop something off\nethical implications people maybe mislead by computer understanding face to face interaction is vital people develop specific relationships with artifacts: such as a diary value sensitive design: consider benifits, harms, etc. ","html":"\u003cp\u003eWizenbaum (1966)\u003c/p\u003e\n\u003cp\u003eworks pattern-action rules by rephrasing user\u0026rsquo;s questions\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;You hate me\u0026rdquo; =\u0026gt; \u0026ldquo;what makes you think I hate you\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eRogerian psycotherapy: \u003cstrong\u003eassume no real-world knowledge\u003c/strong\u003e; simply draws out patient\u0026rsquo;s statements\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eI need X =\u0026gt; what would it mean to you if you got X.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-12_09-37-07_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003euses \u003ca href=\"/posts/kbhregex/\"\u003eregex\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003ecapture specific adjectives, \u0026ldquo;all\u0026rdquo;, \u0026ldquo;always\u0026rdquo;, etc. and responds accordingly\u003c/p\u003e\n\u003ch2 id=\"eliza-rules\"\u003eEliza Rules\u003c/h2\u003e\n\u003cp\u003epatterns are organized by \u003cstrong\u003ekeywords\u003c/strong\u003e: a keyword has a pattern and a list of transforms:\u003c/p\u003e\n\u003cp\u003ee.g.:\u003c/p\u003e\n\u003cp\u003ekeyword: \u003cstrong\u003eyou\u003c/strong\u003e\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-lisp\" data-lang=\"lisp\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eyou\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eme\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewhat\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emakes\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eyou\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ethink\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eyou\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewhy\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edo\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eyou\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ethink\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eyou\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eKeywords are ranked from specific to general: each keyword has a rank, where most specific keywords words are most highly ranked, and then expansions are picked with the one with the highest keyword rank.\u003c/p\u003e\n\u003ch2 id=\"my-transform\"\u003e\u0026ldquo;my transform\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003ewhenever the keyword \u0026ldquo;my\u0026rdquo; is used, we will pop a transformerd utterance onto the memory list (\u0026ldquo;earlier you said your\u0026rdquo;\u0026hellip;) in a FIFO queue.\u003c/p\u003e\n\u003cp\u003elater if we don\u0026rsquo;t know what to say we just pop something off\u003c/p\u003e\n\u003ch2 id=\"ethical-implications\"\u003eethical implications\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003epeople maybe mislead by computer understanding\u003c/li\u003e\n\u003cli\u003eface to face interaction is vital\u003c/li\u003e\n\u003cli\u003epeople develop specific relationships with artifacts: such as a diary\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003evalue sensitive design\u003c/strong\u003e: consider benifits, harms, etc.\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbheliza/","tags":null,"title":"ELIZA"},{"categories":null,"contents":"A civil rights movement organizer that founded SNICK.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhcivil_rights/\"\u003ecivil rights movement\u003c/a\u003e organizer that founded \u003ca href=\"/posts/kbhcivil_rights/#snick\"\u003eSNICK\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhella_baker/","tags":null,"title":"Ella Baker"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhelo_ratings/","tags":null,"title":"Elo Ratings"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhempty_binding_site/","tags":null,"title":"empty binding site"},{"categories":null,"contents":"Your brain maintaing a stable level of energy. Closely related to glucose homeostatis.\nmethods to achive energy homeostasis by the CNS regulation of the brain AgRP signaling is activated to stimulate food intake when hypoglycemic. ","html":"\u003cp\u003eYour brain maintaing a stable level of energy. Closely related to \u003ca href=\"\"\u003eglucose homeostatis\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"methods-to-achive-energy-homeostasis--kbhenergy-homeostasis-dot-md--by-the-cns-regulation--kbhcns-regulation-dot-md--of-the-brain\"\u003emethods to achive \u003ca href=\"/posts/kbhenergy_homeostasis/\"\u003eenergy homeostasis\u003c/a\u003e by the \u003ca href=\"/posts/kbhcns_regulation/\"\u003eCNS regulation\u003c/a\u003e of the brain\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhagrp/\"\u003eAgRP\u003c/a\u003e signaling is activated to stimulate food intake when hypoglycemic.\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhenergy_homeostasis/","tags":null,"title":"energy homeostasis"},{"categories":null,"contents":"english is a great language.\nFor Orthello:\nPronouns: 23 \u0026ldquo;Moor:\u0026rdquo;: 18 Orthello: 7\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhenglish/\"\u003eenglish\u003c/a\u003e is a great language.\u003c/p\u003e\n\u003cp\u003eFor Orthello:\u003c/p\u003e\n\u003cp\u003ePronouns: 23\n\u0026ldquo;Moor:\u0026rdquo;: 18\nOrthello: 7\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhenglish/","tags":null,"title":"english"},{"categories":null,"contents":"motivating entanglement file:///Users/houliu/Documents/School Work/The Bible/Quantum/Leonard Susskind, Art Friedman - Quantum Mechanics_ The Theoretical Minimum-Basic Books (2014).pdf\nTake two actors, Alice \\(A\\) and Bob \\(B\\). They each have a space \\(S_A\\) and \\(S_B\\). What if, for instance, we want to create a composite system out of Alice and Bob?\nWe will define elements in the Alice space as being defined by bases \\(H\\) and \\(T\\), where each element \\(a \\in S_a\\) is defined as:\n\\begin{equation} \\alpha_H | H \\big\\} + \\alpha_T | T \\big\\} \\end{equation}\nWhy the weird kets? We will use different kets to be aware of where bases came from; as in, elements in Alicespace is not elements in Bobspace.\nLet\u0026rsquo;s take Bobspace to be a higher dimension, as in, using normal ket vectors:\n\\begin{align} |1\\big\u0026gt; \\\\ |2\\big\u0026gt; \\\\ |3\\big\u0026gt; \\\\ \\cdots \\\\ |6\\big\u0026gt; \\end{align}\n","html":"\u003ch2 id=\"motivating-entanglement--kbhentangled-dot-md\"\u003emotivating \u003ca href=\"/posts/kbhentangled/\"\u003eentanglement\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003efile:///Users/houliu/Documents/School Work/The Bible/Quantum/Leonard Susskind, Art Friedman - Quantum Mechanics_ The Theoretical Minimum-Basic Books (2014).pdf\u003c/p\u003e\n\u003cp\u003eTake two actors, Alice \\(A\\) and Bob \\(B\\). They each have a space \\(S_A\\) and \\(S_B\\). What if, for instance, we want to create a \u003ca href=\"/posts/kbhcomposite_system/\"\u003ecomposite system\u003c/a\u003e out of Alice and Bob?\u003c/p\u003e\n\u003cp\u003eWe will define elements in the Alice space as being defined by bases \\(H\\) and \\(T\\), where each element \\(a \\in S_a\\) is defined as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha_H | H \\big\\} + \\alpha_T | T \\big\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhy the weird kets? We will use different kets to be aware of where bases came from; as in, elements in Alicespace is not elements in Bobspace.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s take Bobspace to be a higher dimension, as in, using normal ket vectors:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n|1\\big\u0026gt; \\\\\n|2\\big\u0026gt; \\\\\n|3\\big\u0026gt; \\\\\n\\cdots \\\\\n|6\\big\u0026gt;\n\\end{align}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhentangled/","tags":null,"title":"entanglement"},{"categories":null,"contents":"epigenetics is the ability to make identical cells present distinct phenotipic states.\nWhy? DNA is packaged by charged histone proteins, and they wrap around the nucleosome. Upon acute changes in the environment, cells can change their epigenic states.\nwhimsical adaptations Epigenetic adaptive states in organisms with no clear path adaptation. For instance, a certain lung cancer cell has this ability. So, how do cells decide what genes they would activate?\nAnother example: treating fisheries in caffine\nGrowing in caffine will trigger caffine resistance Remove the caffine would cause some of them to default back, some of them to stay the same way ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhepigenetics/\"\u003eepigenetics\u003c/a\u003e is the ability to make identical cells present distinct phenotipic states.\u003c/p\u003e\n\u003ch2 id=\"why\"\u003eWhy?\u003c/h2\u003e\n\u003cp\u003eDNA is packaged by charged \u003ca href=\"\"\u003ehistone\u003c/a\u003e proteins, and they wrap around the nucleosome. Upon acute changes in the environment, cells can change their epigenic states.\u003c/p\u003e\n\u003ch2 id=\"whimsical-adaptations\"\u003ewhimsical adaptations\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhepigenetics/\"\u003eEpigenetic\u003c/a\u003e adaptive states in organisms with no clear path adaptation. For instance, a certain lung cancer cell has this ability. So, how do cells decide what genes they would activate?\u003c/p\u003e\n\u003cp\u003eAnother example: treating fisheries in caffine\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eGrowing in caffine will trigger caffine resistance\u003c/li\u003e\n\u003cli\u003eRemove the caffine would cause some of them to default back, some of them to stay the same way\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhepigenetics/","tags":null,"title":"epigenetics"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhepitophs/","tags":null,"title":"epitopes"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhequal_rights_act/","tags":null,"title":"Equal Rights Act"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhetf/","tags":null,"title":"ETF"},{"categories":null,"contents":"A corollary of greatest common divisor and division.\nSay you have some \\(b|a\\) such that:\n\\begin{equation} a = bq + r \\end{equation}\nNow, \\(d|a,b \\Leftrightarrow d|b,r\\) (because \\(d|b,r\\) implies there\u0026rsquo;s some \\(x, x\u0026rsquo;\\) such that \\(a = (dx)q+dx\u0026rsquo;\\), and so \\(a = d(xq + x\u0026rsquo;)\\) and so \\(d|a\\); the logic goes the other way too).\nThis finally implies that \\(\\gcd (a,b)= \\gcd (b,r)\\) because any divisor that works for one works for both.\n","html":"\u003cp\u003eA corollary of \u003ca href=\"/posts/kbhgreatest_common_divisor/\"\u003egreatest common divisor\u003c/a\u003e and \u003ca href=\"/posts/kbhdivide/\"\u003edivision\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSay you have some \\(b|a\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na = bq + r\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, \\(d|a,b \\Leftrightarrow d|b,r\\) (because \\(d|b,r\\) implies there\u0026rsquo;s some \\(x, x\u0026rsquo;\\) such that \\(a = (dx)q+dx\u0026rsquo;\\), and so \\(a = d(xq + x\u0026rsquo;)\\) and so \\(d|a\\); the logic goes the other way too).\u003c/p\u003e\n\u003cp\u003eThis finally implies that \\(\\gcd (a,b)= \\gcd (b,r)\\) because any divisor that works for one works for both.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbheuclidean_algorithm/","tags":null,"title":"Euclidean Algorithm"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbheugene_wigner/","tags":null,"title":"Eugene Wigner"},{"categories":null,"contents":"A type of cell.\nSample eukareotyic cell gene:\nTATA box promoter 5\u0026rsquo; non-coding sequence Non-coding introns interlaced between exons, unique to eukareotyic cells. Bacteria (prokateotic cells don\u0026rsquo;t contain introns or have small them) 3\u0026rsquo; non-coding sequence ","html":"\u003cp\u003eA type of \u003ca href=\"/posts/kbhcell/\"\u003ecell\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSample \u003ca href=\"/posts/kbheukareotyic_cell/\"\u003eeukareotyic cell\u003c/a\u003e gene:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"\"\u003eTATA box\u003c/a\u003e \u003ca href=\"\"\u003epromoter\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e5\u0026rsquo; non-coding sequence\u003c/li\u003e\n\u003cli\u003eNon-coding \u003ca href=\"\"\u003eintrons\u003c/a\u003e interlaced between \u003ca href=\"\"\u003eexons\u003c/a\u003e, unique to \u003ca href=\"/posts/kbheukareotyic_cell/\"\u003eeukareotyic cell\u003c/a\u003es. Bacteria (\u003ca href=\"/posts/kbhprokateotic_cell/\"\u003eprokateotic cell\u003c/a\u003es don\u0026rsquo;t contain \u003ca href=\"\"\u003eintron\u003c/a\u003es or have small them)\u003c/li\u003e\n\u003cli\u003e3\u0026rsquo; non-coding sequence\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbheukareotyic_cell/","tags":null,"title":"eukareotyic cell"},{"categories":null,"contents":"The Euler-Bernoulli Theory is a theory in dynamics which describes how much a beam deflect given an applied load.\nAssumptions For Euler-Bernoulli Theory to apply in its basic form, we make assumptions.\nThe \u0026ldquo;beam\u0026rdquo; you are bending is modeled as a 1d object; it is only long and is not wide For this page, \\(+x\\) is \u0026ldquo;right\u0026rdquo;, \\(+y\\) is \u0026ldquo;in\u0026rdquo;, and \\(+z\\) is \u0026ldquo;up\u0026rdquo; Probably more, but we only have this so far. the general form of the Euler-Bernoulli Theory assumes a freestanding beam Basic Statement The most basic for the Euler-Bernoulli Equation looks like this:\n\\begin{equation} \\dv[2]x \\qty(EI\\dv[2]{w}{x}) =q(x) \\end{equation}\nwhere, \\(w(x)\\) is the deflection of the beam at some direction \\(z\\) at position \\(x\\). \\(q\\) is the load distribution (force per unit length, similar to pressure which is force per unit area, at each point \\(x\\)). \\(E\\) is the Elastic Modulus of the beam, and \\(I\\) the second moment of area of the beam\u0026rsquo;s cross section.\nNote that \\(I\\) must be calculated with respect to the axis perpendicular to the load. So, for a beam placed longside by the \\(x\\) axis, and pressed down on the \\(z\\) axis, \\(I\\) should be calculated as: \\(\\iint z^{2}\\dd{y}\\dd{z}\\).\nPretty much all the time, the Elastic Modulus \\(E\\) (how rigid your thing is) and second moment of area \\(I\\) (how distributed are the cross-section\u0026rsquo;s mass) are constant; therefore, we factor them out, making:\n\\begin{align} \u0026amp;\\dv[2]x \\qty(EI\\dv[2]{w}{x}) =q(x) \\\\ \\Rightarrow\\ \u0026amp; EI \\qty(\\dv[2]x \\dv[2]{w}{x} )=q(x) \\\\ \\Rightarrow\\ \u0026amp; EI \\dv[4]{w}{x} = q(x) \\end{align}\nThis is also apparently used everywhere in engineering to figure out how bendy something will be given some \\(q\\) put along the beam.\nOk, let\u0026rsquo;s take the original form of this equation and take some integrals to see the edges of this thing:\n\\begin{equation} \\dv[2]{x} \\qty(EI \\dv[2]{w}{x}) = q(x) \\end{equation}\nFirst things first, let\u0026rsquo;s take a single integral:\n\\begin{equation} \\dv{x} \\qty(EI \\dv[2]{w}{x}) = -Q \\end{equation}\nThis is the total shear force on the material (the sum of all forces applied to all points \\(\\int q(x)\\).) We have a sign difference\nold notes\nLet\u0026rsquo;s take some transverse load \\(q(x,t)\\), applied at time \\(t\\) at location \\(x\\). To model the load/bending/vibration of the rod, we first have to know a few more things.\nFirst, figure the Young\u0026rsquo;s Modulus \\(E\\) of the thing that you are bending.\nOf course, we also want to know what shape our thing is; more specifically, we want to know how the point masses in our thing is distributed. So we will also need the second moment of area \\(I\\).\nFinally, we should have \\(m\\) mass per unit length of the rod we are bending.\nThe Euler-Bernoulli Theory tells us that the deflection (distance from the neutral-axis) each point \\(x\\) in the material should get is:\n\\begin{equation} EI \\pdv[4]{w}{x} + m \\pdv[2]{w}{t} = q(x,t) \\end{equation}\nSolving this lovely Differential Equation would tell you how far away each point diverges from the neutral point.\nTracing this out over \\((x,t)\\), we can get some trace of how the thing vibrates by measuring the behavior of \\(\\omega\\).\nfree vibrations in Euler-Bernoulli Theory If no time-varying \\(q\\) exists, we then have:\n\\begin{equation} EI \\pdv[4]{w}{x} + m \\pdv[2]{w}{t} = 0 \\end{equation}\nAnd then some magical Differential Equations happen. I hope to learn them soon.\nThe result here is significant: if we can figure the actual rate of vibrations which we expect.\nHowever, this doesn\u0026rsquo;t really decay\u0026mdash;but funing torks do. How?\nApparently because air resistance\u0026mdash;Zachary Sayyah. So Sasha time.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbheuler_bernoulli_theory/\"\u003eEuler-Bernoulli Theory\u003c/a\u003e is a theory in dynamics which describes how much a beam deflect given an applied load.\u003c/p\u003e\n\u003ch2 id=\"assumptions\"\u003eAssumptions\u003c/h2\u003e\n\u003cp\u003eFor \u003ca href=\"/posts/kbheuler_bernoulli_theory/\"\u003eEuler-Bernoulli Theory\u003c/a\u003e to apply in its basic form, we make assumptions.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eThe \u0026ldquo;beam\u0026rdquo; you are bending is modeled as a 1d object; it is only long and is not wide\u003c/li\u003e\n\u003cli\u003eFor this page, \\(+x\\) is \u0026ldquo;right\u0026rdquo;, \\(+y\\) is \u0026ldquo;in\u0026rdquo;, and \\(+z\\) is \u0026ldquo;up\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eProbably more, but we only have this so far.\u003c/li\u003e\n\u003cli\u003ethe general form of the \u003ca href=\"/posts/kbheuler_bernoulli_theory/\"\u003eEuler-Bernoulli Theory\u003c/a\u003e assumes a freestanding beam\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"basic-statement\"\u003eBasic Statement\u003c/h2\u003e\n\u003cp\u003eThe most basic for the \u003ca href=\"/posts/kbheuler_bernoulli_theory/\"\u003eEuler-Bernoulli Equation\u003c/a\u003e looks like this:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv[2]x \\qty(EI\\dv[2]{w}{x}) =q(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(w(x)\\) is the deflection of the beam at some direction \\(z\\) at position \\(x\\). \\(q\\) is the load distribution (force per unit length, similar to pressure which is force per unit area, at each point \\(x\\)). \\(E\\) is the \u003ca href=\"/posts/kbhelastic_modulus/\"\u003eElastic Modulus\u003c/a\u003e of the beam, and \\(I\\) the \u003ca href=\"/posts/kbhsecond_moment_of_area/\"\u003esecond moment of area\u003c/a\u003e of the beam\u0026rsquo;s cross section.\u003c/p\u003e\n\u003cp\u003eNote that \\(I\\) must be calculated with respect to the axis perpendicular to the load. So, for a beam placed longside by the \\(x\\) axis, and pressed down on the \\(z\\) axis, \\(I\\) should be calculated as: \\(\\iint z^{2}\\dd{y}\\dd{z}\\).\u003c/p\u003e\n\u003cp\u003ePretty much all the time, the \u003ca href=\"/posts/kbhelastic_modulus/\"\u003eElastic Modulus\u003c/a\u003e \\(E\\) (how rigid your thing is) and \u003ca href=\"/posts/kbhsecond_moment_of_area/\"\u003esecond moment of area\u003c/a\u003e \\(I\\) (how distributed are the cross-section\u0026rsquo;s mass) are constant; therefore, we factor them out, making:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\dv[2]x \\qty(EI\\dv[2]{w}{x}) =q(x) \\\\\n\\Rightarrow\\ \u0026amp; EI \\qty(\\dv[2]x \\dv[2]{w}{x} )=q(x) \\\\\n\\Rightarrow\\ \u0026amp; EI \\dv[4]{w}{x} = q(x)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThis is also apparently used everywhere in engineering to figure out how bendy something will be given some \\(q\\) put along the beam.\u003c/p\u003e\n\u003cp\u003eOk, let\u0026rsquo;s take the original form of this equation and take some integrals to see the edges of this thing:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv[2]{x} \\qty(EI \\dv[2]{w}{x}) = q(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFirst things first, let\u0026rsquo;s take a single integral:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{x} \\qty(EI \\dv[2]{w}{x}) = -Q\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is the total shear force on the material (the sum of all forces applied to all points \\(\\int q(x)\\).) We have a sign difference\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eold notes\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s take some \u003ca href=\"/posts/kbhtransverse_loaod/\"\u003etransverse load\u003c/a\u003e \\(q(x,t)\\), applied at time \\(t\\) at location \\(x\\). To model the load/bending/vibration of the rod, we first have to know a few more things.\u003c/p\u003e\n\u003cp\u003eFirst, figure the \u003ca href=\"/posts/kbhyoung_s_modulus/\"\u003eYoung\u0026rsquo;s Modulus\u003c/a\u003e \\(E\\) of the thing that you are bending.\u003c/p\u003e\n\u003cp\u003eOf course, we also want to know what shape our thing is; more specifically, we want to know how the point masses in our thing is distributed. So we will also need the \u003ca href=\"/posts/kbhsecond_moment_of_area/\"\u003esecond moment of area\u003c/a\u003e \\(I\\).\u003c/p\u003e\n\u003cp\u003eFinally, we should have \\(m\\) mass per unit length of the rod we are bending.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbheuler_bernoulli_theory/\"\u003eEuler-Bernoulli Theory\u003c/a\u003e tells us that the deflection (distance from the neutral-axis) each point \\(x\\) in the material should get is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nEI \\pdv[4]{w}{x} + m \\pdv[2]{w}{t} = q(x,t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSolving this lovely \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equation\u003c/a\u003e would tell you how far away each point diverges from the neutral point.\u003c/p\u003e\n\u003cp\u003eTracing this out over \\((x,t)\\), we can get some trace of how the thing vibrates by measuring the behavior of \\(\\omega\\).\u003c/p\u003e\n\u003ch2 id=\"free-vibrations-in-euler-bernoulli-theory--kbheuler-bernoulli-theory-dot-md\"\u003efree vibrations in \u003ca href=\"/posts/kbheuler_bernoulli_theory/\"\u003eEuler-Bernoulli Theory\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eIf no time-varying \\(q\\) exists, we then have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nEI \\pdv[4]{w}{x} + m \\pdv[2]{w}{t} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd then some magical \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equations\u003c/a\u003e happen. I hope to learn them soon.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-05_23-15-31_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-05_23-16-41_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe result here is significant: if we can figure the actual rate of vibrations which we expect.\u003c/p\u003e\n\u003cp\u003eHowever, this doesn\u0026rsquo;t really decay\u0026mdash;but \u003ca href=\"/posts/kbhtuning_forks/\"\u003efuning tork\u003c/a\u003es do. How?\u003c/p\u003e\n\u003cp\u003eApparently because air resistance\u0026mdash;Zachary Sayyah. So Sasha time.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbheuler_bernoulli_theory/","tags":null,"title":"Euler-Bernoulli Theory"},{"categories":null,"contents":"\\begin{equation} f(x) = e^{ix} = \\cos (x) + i\\sin (x) \\end{equation}\nthis brings a circle of radius one, because in every point, velocity is orthogonal to where you are (because \\(f\u0026rsquo;(x) = if(x)\\), and multiplying by \\(i\\) accounts for a rotation of 90 degrees.\nAnd so,\n\\begin{equation} z = re^{i\\theta} \\end{equation}\ngives any point in the imaginary polar plane.\n","html":"\u003cp\u003e\\begin{equation}\nf(x) = e^{ix} = \\cos (x) + i\\sin (x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis brings a circle of radius one, because in every point, velocity is orthogonal to where you are (because \\(f\u0026rsquo;(x) = if(x)\\), and multiplying by \\(i\\) accounts for a rotation of 90 degrees.\u003c/p\u003e\n\u003cp\u003eAnd so,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nz = re^{i\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egives any point in the imaginary polar plane.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbheuler_s_equation/","tags":null,"title":"Euler's Equation"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbheurope/","tags":null,"title":"Europe"},{"categories":null,"contents":"An event is sub-subset of the sample space \\(E \\in S\\). These are some subset to which you ascribe some meaning.\n","html":"\u003cp\u003eAn \u003ca href=\"/posts/kbhevent/\"\u003eevent\u003c/a\u003e is sub-subset of the \u003ca href=\"/posts/kbhsample_space/\"\u003esample space\u003c/a\u003e \\(E \\in S\\). These are some subset to which you ascribe some meaning.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhevent/","tags":null,"title":"event"},{"categories":null,"contents":"Preamble As notation differs between Alg4DM (which the presentation and notes use) and the paper, we provide a note here to standardize the notation of the PGA formulation to avoid confusion.\nRecall that the non-linear program formulation of the naive PGA implementation gives:\n\\begin{align} \\max_{\\theta}\\ \u0026amp;f(\\theta) \\\\ \\text{such that}\\ \u0026amp;J\\theta = \\bold{1} \\\\ \u0026amp; \\theta \\geq \\bold{0} \\\\ \u0026amp; h_{i}(\\theta) \\leq \\epsilon_{i},\\ \\forall i \\end{align}\nfor:\n\\begin{equation} f(\\theta) = \\beta^{\\top} \\bold{Z}^{-1} \\bold{r}_{\\theta} \\end{equation}\nand\n\\begin{equation} h_{i}(\\theta) = \\beta^{\\top} \\bold{Z}^{-1} C_{i} \\end{equation}\nwhere \\(\\bold{Z} = (\\bold{I} - \\gamma \\bold{T}_{\\theta})\\), and \\(\\bold{T}_{\\theta}((x,s), (x\u0026rsquo;,s\u0026rsquo;))\\) is a transition matrix between state and controller latent pairs.\nQuestion 1 In the Non Linear Program (NLP) formulation above, the constraint \\(J\\theta = \\bold{1}\\) is a block-diagonal matrix filled with only ones and zeros that serve a particular purpose.\nThough we didn\u0026rsquo;t describe \\(J\\) in detail in the talk apart from its function, recall that its job is to add up certain elements in the \\(\\theta\\) vector to ensure they satisfy certain constraints. What breaks down about PGA if that constraint is removed (i.e. what does it do)? Given your answer in 1, what should be the input and output dimensions of the \\(J\\) map? You may use in your answer, as needed, any expression that involves \\(|X|\\) the number of nodes in the controller, \\(|A|\\) the size of the action space, \\(|O|\\) the size of the observation space and \\(|S|\\) the size of the state space. Answer Recall that:\n\\begin{equation} \\theta = \\mqty(\\Psi(a_0 | x_0) \\\\ \\Psi(a_1 | x_0) \\\\ \\dots \\\\ \\Psi(a_n | x_N) \\\\ \\eta(x_0 | x_0, a_0, o_0) \\\\ \\eta(x_1 | x_0, a_0, o_0) \\\\ \\dots \\\\ \\eta(x_N | x_N, a_n, o_f)) \\end{equation}\nWe want to ensure that each output \\(\\sum_{a}\\Psi(a | x_{j}) = 1\\), and \\(\\sum_{x} \\eta(x|x_{i},a_{j},o_{k}) = 1\\) (that both the action distributions at a node and the node transitions are indeed categirocial probability distributions).\nAs we flattened every combination of possible outputs of \\(\\Psi\\) and \\(\\eta\\) of output onto \\(\\theta\\), the constraint \\(J\\theta = \\bold{1}\\) ensures that each \\(\\Psi\\) and \\(\\eta\\) conditioned on a pair of current condition remains a probability distribution. Otherwise, the model will likely collapse to taking advantage of impossible probabilities (\u0026ldquo;we have \\(300\\%\\) chance of a highly valuable state!\u0026rdquo;) to maximize the utility.\nIts signature is:\n\\begin{equation} J: \\mathbb{R}^{|A \\times X \\times X \\times X \\times A \\times O|} \\to \\mathbb{R}^{|X \\times X \\times A \\times O|} \\end{equation}\nEach input dimension corresponds to a slot on \\(\\theta\\), and each output dimension corresponds to a thing we want to add up to \\(1\\), which means each pair of conditions to the distributions of \\(\\Psi\\) and \\(\\eta\\).\nAs there are \\(A \\cdot X\\) possible combinations of \\(a,x_{j}\\) for \\(\\Psi(a|x_{j})\\) and \\(X \\cdot X\\cdot A\\cdot O\\) possible combinations of \\(x, x_{i}, a_{j},o_{k}\\) for \\(\\eta(x|x_i, a_{j}, o_{k})\\) to be tabulated for probability in \\(\\theta\\), this matrix has \\(A^{2}X^{3}O\\) columns as input.\nAs there are \\(X\\) possible prior conditions to \\(\\Psi\\) and \\(X \\cdot A \\cdot O\\) possible prior conditions to \\(\\eta\\), this means the matrix should have \\(X^{2}AO\\) rows of output.\nQuestion 2 The constraints to the optimization objective for controller C-POMDPs is extremely similar to non-constrained controller POMDPs. In effect, they only differ by the third constraint:\n\\begin{equation} h_{i}(\\theta) \\leq \\varepsilon_{i} \\end{equation}\nIn fact, both systems have the same, exact, unchanged optimization objective that doesn\u0026rsquo;t change regardless of constraint: \\(\\max_{\\theta}\\ \u0026amp;f(\\theta)\\).\nWhy is solving C-POMDPs using controllers gradient methods much harder than solving POMDPs using a similar objective, and how exactly does the PGA authors\u0026rsquo; contributions address this issue to make its computation feasible?\nAnswer In order to ensure that the output distribution by gradient descent actually fits the constraints provided (and other constraints regarding them being probability distributions), the constraints to the optimization problem\u0026mdash;\\(h_{i}(\\theta)\\) included\u0026mdash;needs to be computed per step to project the raw output parameters down into valid parameters.\n\\(h_{i}\\), importantly, is non-linear and non-convex. Removing this constraint makes the optimization bounds linear which drastically speeds up computation. This is why POMDPs leveraging similar approaches don\u0026rsquo;t have as much computational intractability.\nTo solve this, the PGA authors linearise the \\(h_{i}\\) function using a first-order Taylor expansion in order to make this last constraint linear as well, which makes the entire projection problem have linear constraints: vastly improving computational efficiency.\n","html":"\u003ch2 id=\"preamble\"\u003ePreamble\u003c/h2\u003e\n\u003cp\u003eAs notation differs between Alg4DM (which the presentation and notes use) and the paper, we provide a note here to standardize the notation of the PGA formulation to avoid confusion.\u003c/p\u003e\n\u003cp\u003eRecall that the non-linear program formulation of the naive PGA implementation gives:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\max_{\\theta}\\ \u0026amp;f(\\theta) \\\\\n\\text{such that}\\ \u0026amp;J\\theta = \\bold{1} \\\\\n\u0026amp; \\theta \\geq \\bold{0} \\\\\n\u0026amp; h_{i}(\\theta) \\leq \\epsilon_{i},\\ \\forall i\n\\end{align}\u003c/p\u003e\n\u003cp\u003efor:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(\\theta) = \\beta^{\\top} \\bold{Z}^{-1} \\bold{r}_{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nh_{i}(\\theta) = \\beta^{\\top} \\bold{Z}^{-1} C_{i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\bold{Z} = (\\bold{I} - \\gamma \\bold{T}_{\\theta})\\), and \\(\\bold{T}_{\\theta}((x,s), (x\u0026rsquo;,s\u0026rsquo;))\\) is a transition matrix between state and controller latent pairs.\u003c/p\u003e\n\u003ch2 id=\"question-1\"\u003eQuestion 1\u003c/h2\u003e\n\u003cp\u003eIn the Non Linear Program (NLP) formulation above, the constraint \\(J\\theta = \\bold{1}\\) is a block-diagonal matrix filled with only ones and zeros that serve a particular purpose.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eThough we didn\u0026rsquo;t describe \\(J\\) in detail in the talk apart from its function, recall that its job is to add up certain elements in the \\(\\theta\\) vector to ensure they satisfy certain constraints. What breaks down about PGA if that constraint is removed (i.e. what does it do)?\u003c/li\u003e\n\u003cli\u003eGiven your answer in 1, what should be the input and output dimensions of the \\(J\\) map? You may use in your answer, as needed, any expression that involves \\(|X|\\) the number of nodes in the controller, \\(|A|\\) the size of the action space, \\(|O|\\) the size of the observation space and \\(|S|\\) the size of the state space.\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"answer\"\u003eAnswer\u003c/h3\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta = \\mqty(\\Psi(a_0 | x_0) \\\\ \\Psi(a_1 | x_0) \\\\ \\dots \\\\ \\Psi(a_n | x_N) \\\\ \\eta(x_0 | x_0, a_0, o_0) \\\\ \\eta(x_1 | x_0, a_0, o_0) \\\\ \\dots \\\\ \\eta(x_N | x_N, a_n, o_f))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe want to ensure that each output \\(\\sum_{a}\\Psi(a | x_{j}) = 1\\), and \\(\\sum_{x} \\eta(x|x_{i},a_{j},o_{k}) = 1\\) (that both the action distributions at a node and the node transitions are indeed categirocial probability distributions).\u003c/p\u003e\n\u003cp\u003eAs we flattened every combination of possible outputs of \\(\\Psi\\) and \\(\\eta\\) of output onto \\(\\theta\\), the constraint \\(J\\theta = \\bold{1}\\) ensures that each \\(\\Psi\\) and \\(\\eta\\) conditioned on a pair of current condition remains a probability distribution. Otherwise, the model will likely collapse to taking advantage of impossible probabilities (\u0026ldquo;we have \\(300\\%\\) chance of a highly valuable state!\u0026rdquo;) to maximize the utility.\u003c/p\u003e\n\u003cp\u003eIts signature is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nJ: \\mathbb{R}^{|A \\times X \\times X \\times X \\times A \\times O|} \\to \\mathbb{R}^{|X \\times X \\times A \\times O|}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eEach input dimension corresponds to a slot on \\(\\theta\\), and each output dimension corresponds to a thing we want to add up to \\(1\\), which means each pair of conditions to the distributions of \\(\\Psi\\) and \\(\\eta\\).\u003c/p\u003e\n\u003cp\u003eAs there are \\(A \\cdot X\\) possible combinations of \\(a,x_{j}\\) for \\(\\Psi(a|x_{j})\\) and \\(X \\cdot X\\cdot A\\cdot O\\) possible combinations of \\(x, x_{i}, a_{j},o_{k}\\) for \\(\\eta(x|x_i, a_{j}, o_{k})\\) to be tabulated for probability in \\(\\theta\\), this matrix has \\(A^{2}X^{3}O\\) columns as input.\u003c/p\u003e\n\u003cp\u003eAs there are \\(X\\) possible prior conditions to \\(\\Psi\\) and \\(X \\cdot A \\cdot O\\) possible prior conditions to \\(\\eta\\), this means the matrix should have \\(X^{2}AO\\) rows of output.\u003c/p\u003e\n\u003ch2 id=\"question-2\"\u003eQuestion 2\u003c/h2\u003e\n\u003cp\u003eThe constraints to the optimization objective for controller C-POMDPs is extremely similar to non-constrained controller POMDPs. In effect, they only differ by the third constraint:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nh_{i}(\\theta) \\leq \\varepsilon_{i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn fact, both systems have the same, exact, unchanged optimization objective that doesn\u0026rsquo;t change regardless of constraint: \\(\\max_{\\theta}\\ \u0026amp;f(\\theta)\\).\u003c/p\u003e\n\u003cp\u003eWhy is solving C-POMDPs using controllers gradient methods much harder than solving POMDPs using a similar objective, and how exactly does the PGA authors\u0026rsquo; contributions address this issue to make its computation feasible?\u003c/p\u003e\n\u003ch3 id=\"answer\"\u003eAnswer\u003c/h3\u003e\n\u003cp\u003eIn order to ensure that the output distribution by gradient descent actually fits the constraints provided (and other constraints regarding them being probability distributions), the constraints to the optimization problem\u0026mdash;\\(h_{i}(\\theta)\\) included\u0026mdash;needs to be computed per step to project the raw output parameters down into valid parameters.\u003c/p\u003e\n\u003cp\u003e\\(h_{i}\\), importantly, is \u003cstrong\u003enon-linear\u003c/strong\u003e and \u003cstrong\u003enon-convex\u003c/strong\u003e. Removing this constraint makes the optimization bounds linear which drastically speeds up computation. This is why POMDPs leveraging similar approaches don\u0026rsquo;t have as much computational intractability.\u003c/p\u003e\n\u003cp\u003eTo solve this, the PGA authors linearise the \\(h_{i}\\) function using a first-order Taylor expansion in order to make this last constraint linear as well, which makes the entire projection problem have linear constraints: vastly improving computational efficiency.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpgapset/","tags":null,"title":"Exercises in PGA"},{"categories":null,"contents":"A result so important it gets a page.\nEvery operator on a finite-dimensional, non-zero, complex vector space has an eigenvalue.\nProof:\nSuppose \\(V\\) is a complex vector space with dimension \\(n \u0026gt; 0\\), and \\(T \\in \\mathcal{L}(V)\\). Choose \\(v \\in V, v\\neq 0\\) (possible as \\(V\\) is non-zero):\nConstruct a list of \\(n+1\\) vectors:\n\\begin{equation} v, Tv, \\dots T^{n} v \\end{equation}\nbecause we managed to cram \\(n+1\\) vectors into a list for a vector space with dimension \\(n\\), that list is linearly dependent.\nAnd thus, by definition of linearly dependence, exists a set of \\(a_0, \u0026hellip; a_{n} \\in \\mathbb{C}\\), which not all are \\(0\\), such that:\n\\begin{equation} 0 = a_0 v + a_1 T v + \\dots + a_{n} T^{n} v \\end{equation}\nNote that, because \\(v \\neq 0\\), \\(a_{1} \u0026hellip; a_{n}\\) can\u0026rsquo;t all be \\(0\\) either because otherwise \\(a_0 = 0\\) making all \\(a_{j}=0\\).\nNow, this polynomial can be completely factored because of the fundamental theorem of algebra into linear factors, \\(a_{0} + a_{1}z + \u0026hellip; a_{n}z^{n} = c(z-\\lambda_{1}) \\dots (z- \\lambda_{m})\\). We have to invoke the fundamental theorem of algebra with complex factors \\(z\\) because we haven\u0026rsquo;t shown it holds for polynomial operators yet.\nHowever, the existence of such a complete factoring over the complex numbers means that, with possibly complex number \\(\\lambda_{j}\\) values:\n\\begin{align} 0 \u0026amp;= a_{0} v + a_{1} Tv + \\dots a_{n} T^{n} v \\\\ \u0026amp;= (a_{0} I + a_{1} T + \\dots a_{n} T^{n}) v \\\\ \u0026amp;= c(T - \\lambda_{1} I) \\dots (T- \\lambda_{m} I)v \\end{align}\nnote that \\(m\\) is not necessarily \\(n\\) because different multiplicities.\nNow, \\(c\\) cannot be \\(0\\) because \\(a_0 \\neq 0\\), and multiplying everything out out\u0026hellip; makes the ending not zero?\nGiven \\(c \\neq 0\\), \\(v \\neq 0\\), and yet the map maps \\(v\\) to \\(0\\), at least one of the maps has to be non-injective. And because the properties of eigenvalues, some \\((T- \\lambda_{j} I)\\) being non-injective for a finite-dimensional vector space means that \\(\\lambda_{j}\\) is an eigenvalue of \\(T\\). \\(\\blacksquare\\)\n","html":"\u003cp\u003eA result so important it gets a page.\u003c/p\u003e\n\u003cp\u003eEvery \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e on a \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e, non-zero, \u003ca href=\"/posts/kbhvector_space/#vector-space-over-fields\"\u003ecomplex vector space\u003c/a\u003e has an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eSuppose \\(V\\) is a \u003ca href=\"/posts/kbhvector_space/#vector-space-over-fields\"\u003ecomplex vector space\u003c/a\u003e with \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e \\(n \u0026gt; 0\\), and \\(T \\in \\mathcal{L}(V)\\). Choose \\(v \\in V, v\\neq 0\\) (possible as \\(V\\) is non-zero):\u003c/p\u003e\n\u003cp\u003eConstruct a list of \\(n+1\\) \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv, Tv, \\dots T^{n} v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause we managed to cram \\(n+1\\) \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es into a \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e for a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e with \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e \\(n\\), that list is \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eAnd thus, by definition of \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinearly dependence\u003c/a\u003e, exists a set of \\(a_0, \u0026hellip; a_{n} \\in \\mathbb{C}\\), which not all are \\(0\\), such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = a_0 v + a_1 T v + \\dots + a_{n} T^{n} v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNote that, because \\(v \\neq 0\\), \\(a_{1} \u0026hellip; a_{n}\\) can\u0026rsquo;t all be \\(0\\) either because otherwise \\(a_0 = 0\\) making all \\(a_{j}=0\\).\u003c/p\u003e\n\u003cp\u003eNow, this \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e can be completely factored because of the \u003ca href=\"\"\u003efundamental theorem of algebra\u003c/a\u003e into linear factors, \\(a_{0} + a_{1}z + \u0026hellip; a_{n}z^{n} = c(z-\\lambda_{1}) \\dots (z- \\lambda_{m})\\). We have to invoke the \u003ca href=\"\"\u003efundamental theorem of algebra\u003c/a\u003e with complex factors \\(z\\) because we haven\u0026rsquo;t shown it holds for \u003ca href=\"/posts/kbhpolynomial_operator/\"\u003epolynomial operator\u003c/a\u003es yet.\u003c/p\u003e\n\u003cp\u003eHowever, the existence of such a complete \u003ca href=\"/posts/kbhthoughts_on_axler_4/#factoring\"\u003efactoring\u003c/a\u003e over the \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003es means that, with possibly \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e \\(\\lambda_{j}\\) values:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n0 \u0026amp;= a_{0} v + a_{1} Tv + \\dots a_{n} T^{n} v \\\\\n\u0026amp;= (a_{0} I + a_{1} T + \\dots a_{n} T^{n}) v \\\\\n\u0026amp;= c(T - \\lambda_{1} I) \\dots (T- \\lambda_{m} I)v\n\\end{align}\u003c/p\u003e\n\u003cp\u003enote that \\(m\\) is not necessarily \\(n\\) because different multiplicities.\u003c/p\u003e\n\u003cp\u003eNow, \\(c\\) cannot be \\(0\\) because \\(a_0 \\neq 0\\), and multiplying everything out out\u0026hellip; makes the ending not zero?\u003c/p\u003e\n\u003cp\u003eGiven \\(c \\neq 0\\), \\(v \\neq 0\\), and yet the map maps \\(v\\) to \\(0\\), at least one of the maps has to be non-injective. And because the \u003ca href=\"/posts/kbheigenvalue/#properties-of-id-7d742b39-4a4a-4a9d-a55b-07e2030dfdeb-eigenvalue-s\"\u003eproperties of eigenvalues\u003c/a\u003e, some \\((T- \\lambda_{j} I)\\) being non-injective for a \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e means that \\(\\lambda_{j}\\) is an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\). \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoperators_on_complex_vector_spaces_have_an_eigenvalue/","tags":null,"title":"existence of eigenvalue of operators"},{"categories":null,"contents":"expectation is the calculation of the \u0026ldquo;intended\u0026rdquo; or \u0026ldquo;target\u0026rdquo; value given a random variable:\n\\begin{equation} \\mathbb{E}[M] = \\sum_{x} x\\ p(X=x) \\end{equation}\nStandardize variables to \\(z\\) by dividing The correlation is simply their \u0026ldquo;product\u0026rdquo;: means of positive and negative groups The expectation is the average of the counts of the data you have.\nproperties of expectation these holds REGARDLESS of whether or not the variables you are doing is independent, IID, etc.\nLinearity in the first slot expectation has additivity and homogeneity.\n\\begin{equation} \\mathbb{E}[aX+b] = a\\mathbb{E}[X] + b \\end{equation}\nClosure under expectation \\begin{equation} E[X+Y] = E[X]+E[Y] \\end{equation}\nUnconscious statistician \\begin{equation} \\mathbb{E}[g(x)] = \\sum_{x \\in X}^{} g(x) P(X=x) \\end{equation}\nwhereby, if \\(g\\) is a normal function, you can just add up all the possible output. This property can be used to show the firts results.\nconditional expectation We can perform expectation via conditional probability.\n\\begin{equation} E[X|Y=y] = \\sum_{x}^{} x \\cdot p(X=x|Y=y) \\end{equation}\nlaw of total expectation \\begin{equation} \\mathbb{E}[X] = \\sum_{y}^{}\\mathbb{E}[X|Y=y] P(Y=y) \\end{equation}\nwhat is the \u0026ldquo;background variable\u0026rdquo;? the \\(y\\) value above.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e is the calculation of the \u0026ldquo;intended\u0026rdquo; or \u0026ldquo;target\u0026rdquo; value given a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{E}[M] = \\sum_{x} x\\ p(X=x)\n\\end{equation}\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eStandardize variables to \\(z\\) by dividing\u003c/li\u003e\n\u003cli\u003eThe correlation is simply their \u0026ldquo;product\u0026rdquo;: means of positive and negative groups\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e is the average of the counts of the data you have.\u003c/p\u003e\n\u003ch2 id=\"properties-of-expectation--kbhexpectation-dot-md\"\u003eproperties of \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003ethese holds \u003cstrong\u003eREGARDLESS\u003c/strong\u003e of whether or not the variables you are doing is \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e, \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e, etc.\u003c/p\u003e\n\u003ch3 id=\"linearity-in-the-first-slot\"\u003eLinearity in the first slot\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e has additivity and \u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{E}[aX+b] = a\\mathbb{E}[X] + b\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"closure-under-expectation\"\u003eClosure under expectation\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nE[X+Y] = E[X]+E[Y]\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"unconscious-statistician\"\u003eUnconscious statistician\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{E}[g(x)] = \\sum_{x \\in X}^{} g(x) P(X=x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby, if \\(g\\) is a normal function, you can just add up all the possible output. This property can be used to show the firts results.\u003c/p\u003e\n\u003ch2 id=\"conditional-expectation\"\u003econditional expectation\u003c/h2\u003e\n\u003cp\u003eWe can perform \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e via \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE[X|Y=y] = \\sum_{x}^{} x \\cdot p(X=x|Y=y)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"law-of-total-expectation\"\u003elaw of total expectation\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{E}[X] = \\sum_{y}^{}\\mathbb{E}[X|Y=y] P(Y=y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhat is the \u0026ldquo;background variable\u0026rdquo;? the \\(y\\) value above.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhexpectation/","tags":null,"title":"expectation"},{"categories":null,"contents":" anticipate all states that the agent may find itself in hard-code responses to each one This is bad because you have to have big brain to think about and anticipate all the possible states (to provide a \u0026ldquo;complete strategy\u0026rdquo;), which is often impractical if not impossible.\nDisadvantages You have to know the finite possible state space, and solve them \u0026ldquo;correctly\u0026rdquo;\n","html":"\u003col\u003e\n\u003cli\u003eanticipate all states that the \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e may find itself in\u003c/li\u003e\n\u003cli\u003ehard-code responses to each one\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis is bad because you have to have big brain to think about and anticipate all the possible states (to provide a \u0026ldquo;complete strategy\u0026rdquo;), which is often impractical if not impossible.\u003c/p\u003e\n\u003ch2 id=\"disadvantages\"\u003eDisadvantages\u003c/h2\u003e\n\u003cp\u003eYou have to know the finite possible state space, and solve them \u0026ldquo;correctly\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhexplicit_programming/","tags":null,"title":"explicit programming"},{"categories":null,"contents":"You are the president, and you are trying to choose the secretary of state. You can only interview people in sequence, and you have to hire on the spot. There are a known number of candidates. We want to maximize the probability of selecting the best candidate. You are given no priors.\nHow do we know which candidates we explore, and which candidates we exploit?\nSometimes, you don\u0026rsquo;t have a way of getting data.\nBinary Bandit We are playing with \\(n\\) binary slot machines.\narm \\(j\\) pays off \\(1\\) with probability \\(\\theta_{j}\\), and pays of \\(0\\) otherwise. we do not know $θj$s exogenously and have to learn it we only have \\(h\\) pulls in total across all \\(n\\) slot machines As we perform \\(k\\) pulls, we can keep track of a separate Beta Distribution representing the probability of success for each of the slot machines.\nEssentially, we have a problem whereby we are at a stationary Markov Decision Process whereby the only difference between actions is how much reward we get.\nBayesian Model Estimation We don\u0026rsquo;t actually know the probability of winning (called \u0026ldquo;\\(\\theta\\)\u0026rdquo; in the figure above), and therefore have to \u0026ldquo;explore\u0026rdquo; the system to actually know about it.\nWe want to compute \\(\\rho_{a}\\):\n\\begin{equation} \\rho_{a} = P(win_{a} | w_{a}, l_{a}) = \\int_{0}^{1} \\theta \\times Beta(\\theta | w_{a}+1, l_{a}+1) \\dd{\\theta} \\end{equation}\nwhere, \\(w_{a}\\) is the number of successes for arm \\(a\\), and \\(l_{a}\\) is the number of failures observed.\nThis is exactly the \\(\\mathbb{E}[Beta(w_{a}+1, l_{a}+1)] = \\frac{w_{a}+1}{(w_{a}+1)+(l_{a}+1)}\\)\nA \u0026ldquo;greedy action\u0026rdquo; is an action which simply chooses the \\(a\\) out of all \\(\\rho_{a}\\) which maximizes this probability. We often don\u0026rsquo;t want that because we want to explore the space.\nApproximate Exploration Strategies Undirected Exploration Directed Exploration Optimal Exploration Optimal Exploration is not always possible because its computationally to complex. But its in theory possible. See Optimal Exploration.\n","html":"\u003cp\u003eYou are the president, and you are trying to choose the secretary of state. You can only interview people in sequence, and you have to hire on the spot. There are a known number of candidates. We want to maximize the probability of selecting the best candidate. You are given no priors.\u003c/p\u003e\n\u003cp\u003eHow do we know which candidates we explore, and which candidates we exploit?\u003c/p\u003e\n\u003cp\u003eSometimes, you don\u0026rsquo;t have a way of getting data.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"binary-bandit\"\u003eBinary Bandit\u003c/h2\u003e\n\u003cp\u003eWe are playing with \\(n\\) binary slot machines.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003earm \\(j\\) pays off \\(1\\) with probability \\(\\theta_{j}\\), and pays of \\(0\\) otherwise. we do not know $θ\u003csub\u003ej\u003c/sub\u003e$s exogenously and have to learn it\u003c/li\u003e\n\u003cli\u003ewe only have \\(h\\) pulls in total across all \\(n\\) slot machines\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAs we perform \\(k\\) pulls, we can keep track of a separate \u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e representing the probability of success for each of the slot machines.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-31_13-01-35_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eEssentially, we have a problem whereby we are at a \u003ca href=\"/posts/kbhmarkov_decision_process/#stationary-id-5bb5350e-04e4-46dc-9ea8-cb7bb09edd42-markov-decision-process\"\u003estationary Markov Decision Process\u003c/a\u003e whereby the only difference between actions is how much reward we get.\u003c/p\u003e\n\u003ch3 id=\"bayesian-model-estimation\"\u003eBayesian Model Estimation\u003c/h3\u003e\n\u003cp\u003eWe don\u0026rsquo;t actually know the probability of winning (called \u0026ldquo;\\(\\theta\\)\u0026rdquo; in the figure above), and therefore have to \u0026ldquo;explore\u0026rdquo; the system to actually know about it.\u003c/p\u003e\n\u003cp\u003eWe want to compute \\(\\rho_{a}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\rho_{a} = P(win_{a} | w_{a}, l_{a}) = \\int_{0}^{1} \\theta \\times Beta(\\theta | w_{a}+1, l_{a}+1) \\dd{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(w_{a}\\) is the number of successes for arm \\(a\\), and \\(l_{a}\\) is the number of failures observed.\u003c/p\u003e\n\u003cp\u003eThis is exactly the \\(\\mathbb{E}[Beta(w_{a}+1, l_{a}+1)] = \\frac{w_{a}+1}{(w_{a}+1)+(l_{a}+1)}\\)\u003c/p\u003e\n\u003cp\u003eA \u0026ldquo;\u003ca href=\"#bayesian-model-estimation\"\u003egreedy action\u003c/a\u003e\u0026rdquo; is an action which simply chooses the \\(a\\) out of all \\(\\rho_{a}\\) which maximizes this probability. We often don\u0026rsquo;t want that because we want to explore the space.\u003c/p\u003e\n\u003ch3 id=\"approximate-exploration-strategies\"\u003eApproximate Exploration Strategies\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhundirected_exploration/\"\u003eUndirected Exploration\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirected_exploration/\"\u003eDirected Exploration\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"optimal-exploration--kbhoptimal-exploration-dot-md\"\u003e\u003ca href=\"/posts/kbhoptimal_exploration/\"\u003eOptimal Exploration\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhoptimal_exploration/\"\u003eOptimal Exploration\u003c/a\u003e is not always possible because its computationally to complex. But its in theory possible. See \u003ca href=\"/posts/kbhoptimal_exploration/\"\u003eOptimal Exploration\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhexploration_and_exploitation/","tags":null,"title":"Exploration and Exploitation"},{"categories":null,"contents":"Analogous to poisson distribution, but for continuous random variable. Consider a distribution which lasts a duration of time until success; what\u0026rsquo;s the probability that success is found in some range of times:\n\u0026ldquo;What\u0026rsquo;s the probability that there are an earthquake in \\(k\\) years if there\u0026rsquo;s on average \\(2\\) earthquakes in 1 year?\u0026rdquo;\nconstituents $λ$\u0026mdash;\u0026ldquo;rate\u0026rdquo;: event rate (mean occurrence per time) requirements \\begin{equation} f(x) = \\begin{cases} \\lambda e^{-\\lambda x}, x\\geq 0\\\\ 0, x\u0026lt; 0 \\end{cases} \\end{equation}\nadditional information expectation: \\(\\frac{1}{\\lambda}\\) variance: \\(\\frac{1}{\\lambda^{2}}\\) exponential distribution is memoryless An exponential distribution doesn\u0026rsquo;t care about what happened before.\n\u0026ldquo;On average, we have a request every 5 minutes. There have been 2 minutes with no requests. What\u0026rsquo;s the probability that the next request is in 10 minutes?\u0026rdquo;\nis the same statement as\n\u0026ldquo;On average, we have a request every 5 minutes. There have been 2 minutes with no requests. What\u0026rsquo;s the probability that the next request is in 10 minutes?\u0026rdquo;\nThat is:\n\\begin{equation} P(s+t|s) = P(t) \\end{equation}\n","html":"\u003cp\u003eAnalogous to \u003ca href=\"/posts/kbhprobability_of_k_in_x_time/\"\u003epoisson distribution\u003c/a\u003e, but for \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e. Consider a distribution which lasts a duration of time until success; what\u0026rsquo;s the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e that success is found in some range of times:\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;What\u0026rsquo;s the probability that there are an earthquake in \\(k\\) years if there\u0026rsquo;s on average \\(2\\) earthquakes in 1 year?\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e$λ$\u0026mdash;\u0026ldquo;rate\u0026rdquo;: event rate (mean occurrence per time)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\begin{cases}\n\\lambda e^{-\\lambda x}, x\\geq 0\\\\\n0, x\u0026lt; 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eexpectation\u003c/strong\u003e: \\(\\frac{1}{\\lambda}\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003evariance\u003c/strong\u003e: \\(\\frac{1}{\\lambda^{2}}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"exponential-distribution--kbhexponential-distribution-dot-md--is-memoryless\"\u003e\u003ca href=\"/posts/kbhexponential_distribution/\"\u003eexponential distribution\u003c/a\u003e is memoryless\u003c/h3\u003e\n\u003cp\u003eAn \u003ca href=\"/posts/kbhexponential_distribution/\"\u003eexponential distribution\u003c/a\u003e doesn\u0026rsquo;t care about what happened before.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;On average, we have a request every 5 minutes. There have been 2 minutes with no requests. What\u0026rsquo;s the probability that the next request is in 10 minutes?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eis the same statement as\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;On average, we have a request every 5 minutes. \u003cdel\u003eThere have been 2 minutes with no requests.\u003c/del\u003e What\u0026rsquo;s the probability that the next request is in 10 minutes?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eThat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(s+t|s) = P(t)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhexponential_distribution/","tags":null,"title":"exponential distribution"},{"categories":null,"contents":"\\(\\mathbb{F}^n\\) is the set of all lists of length \\(n\\) with elements of \\(\\mathbb{F}\\). These are a special case of matricies.\nFormally\u0026mdash;\n\\begin{equation} \\mathbb{F}^n = \\{(x1,\\ldots,x_n):x_j\\in\\mathbb{F}, \\forall j =1,\\ldots,n\\} \\end{equation}\nFor some \\((x_1,\\ldots,x_n) \\in \\mathbb{F}^n\\) and \\(j \\in \\{1,\\ldots,n\\}\\), we say \\(x_j\\) is the \\(j^{th}\\) coordinate in \\((x_1,\\ldots,x_n)\\).\nadditional information addition in \\(\\mathbb{F}^n\\) Addition is defined by adding corresponding coordinates:\n\\begin{equation} (x1,\\ldots,x_n) + (y_1,\\ldots,y_n) = (x_1+y_1, \\ldots,x_n+y_n) \\end{equation}\naddition in \\(\\mathbb{F}^n\\) is commutative If we have \\(x,y\\in \\mathbb{F}^n\\), then \\(x+y = y+x\\).\nThe proof of this holds because of how addition works and the fact that you can pairwise commute addition in \\(\\mathbb{F}\\).\n\\begin{align} x+y \u0026amp;= (x_1,\\ldots,x_n) + (y_1,\\ldots,y_n)\\\\ \u0026amp;= (x_1+y_1,\\ldots,x_n+y_n)\\\\ \u0026amp;= (y_1+x_1,\\ldots,y_n+x_n)\\\\ \u0026amp;= (y_1,\\ldots,y_n) + (x_1,\\ldots,x_n)\\\\ \u0026amp;= y+x \\end{align}\nThis is a lesson is why avoiding explicit coordinates is good.\nadditive inverse of \\(\\mathbb{F}^n\\) For \\(x \\in \\mathbb{F}^n\\), the additive inverse of \\(x\\), written as \\(-x\\) is the vector \\(-x\\in \\mathbb{F}^n\\) such that:\n\\begin{equation} x+(-x) = 0 \\end{equation}\nWhich really means that its the additive inverse of each of the coordinates.\nscalar multiplication in \\(\\mathbb{F}^n\\) At present, we are only going to concern ourselves with the product of a number \\(\\lambda\\) and a vector \\(\\mathbb{F}^n\\). This is done by multiplying each coordinate of the vector by \\(\\lambda\\).\n\\begin{equation} \\lambda (x_1,\\ldots,x_n) = (\\lambda x_1, \\lambda, \\lambda x_n) \\end{equation}\nwhere, \\(\\lambda \\in \\mathbb{F}\\), and \\((x_1,\\ldots,x_n) \\in \\mathbb{F}^n\\).\nThe geometric interpretation of this is a scaling operation of vectors.\n","html":"\u003cp\u003e\\(\\mathbb{F}^n\\) is the set of all lists of length \\(n\\) with elements of \\(\\mathbb{F}\\). These are a special case of \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eFormally\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{F}^n = \\{(x1,\\ldots,x_n):x_j\\in\\mathbb{F}, \\forall j =1,\\ldots,n\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor some \\((x_1,\\ldots,x_n) \\in \\mathbb{F}^n\\) and \\(j \\in \\{1,\\ldots,n\\}\\), we say \\(x_j\\) is the \\(j^{th}\\) \u003cstrong\u003ecoordinate\u003c/strong\u003e in \\((x_1,\\ldots,x_n)\\).\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"addition-in-mathbb-f-n\"\u003eaddition in \\(\\mathbb{F}^n\\)\u003c/h3\u003e\n\u003cp\u003e\u003cem\u003eAddition\u003c/em\u003e is defined by adding corresponding coordinates:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(x1,\\ldots,x_n) + (y_1,\\ldots,y_n) = (x_1+y_1, \\ldots,x_n+y_n)\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"addition-in-mathbb-f-n-is-commutative\"\u003eaddition in \\(\\mathbb{F}^n\\) is commutative\u003c/h4\u003e\n\u003cp\u003eIf we have \\(x,y\\in \\mathbb{F}^n\\), then \\(x+y = y+x\\).\u003c/p\u003e\n\u003cp\u003eThe proof of this holds because of how addition works and the fact that you can pairwise commute addition in \\(\\mathbb{F}\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nx+y \u0026amp;= (x_1,\\ldots,x_n) + (y_1,\\ldots,y_n)\\\\\n\u0026amp;= (x_1+y_1,\\ldots,x_n+y_n)\\\\\n\u0026amp;= (y_1+x_1,\\ldots,y_n+x_n)\\\\\n\u0026amp;= (y_1,\\ldots,y_n) + (x_1,\\ldots,x_n)\\\\\n\u0026amp;= y+x\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThis is a lesson is why avoiding explicit coordinates is good.\u003c/p\u003e\n\u003ch3 id=\"additive-inverse-of-mathbb-f-n\"\u003eadditive inverse of \\(\\mathbb{F}^n\\)\u003c/h3\u003e\n\u003cp\u003eFor \\(x \\in \\mathbb{F}^n\\), the \u003cstrong\u003eadditive \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e\u003c/strong\u003e of \\(x\\), written as \\(-x\\) is the vector \\(-x\\in \\mathbb{F}^n\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx+(-x) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich really means that its the \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e of each of the \u003ca href=\"/posts/kbhlists_over_fields/\"\u003ecoordinate\u003c/a\u003es.\u003c/p\u003e\n\u003ch3 id=\"scalar-multiplication-in-mathbb-f-n\"\u003escalar multiplication in \\(\\mathbb{F}^n\\)\u003c/h3\u003e\n\u003cp\u003eAt present, we are only going to concern ourselves with the product of a number \\(\\lambda\\) and a vector \\(\\mathbb{F}^n\\). This is done by multiplying each coordinate of the vector by \\(\\lambda\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda (x_1,\\ldots,x_n) = (\\lambda x_1, \\lambda, \\lambda x_n)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\lambda \\in \\mathbb{F}\\), and \\((x_1,\\ldots,x_n) \\in \\mathbb{F}^n\\).\u003c/p\u003e\n\u003cp\u003eThe geometric interpretation of this is a scaling operation of vectors.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlists_over_fields/","tags":null,"title":"F^n"},{"categories":null,"contents":"We define a set \\(\\mathbb{F}^{s}\\), which is the set of unit functions that maps from any set \\(S\\) to \\(\\mathbb{F}\\).\ncloseness of addition \\begin{equation} (f+g)(x) = f(x)+g(x), \\forall f,g \\in \\mathbb{F}^{S}, x \\in S \\end{equation}\ncloseness of scalar multiplication \\begin{equation} (\\lambda f)(x)=\\lambda f(x), \\forall \\lambda \\in \\mathbb{F}, f \\in \\mathbb{F}^{S}, x \\in S \\end{equation}\ncommutativity inherits \\(\\mathbb{F}\\) (for the codomain of functions \\(f\\) and \\(g\\))\nassociativity inherits \\(\\mathbb{F}\\) for codomain or is just \\(\\mathbb{F}\\) for scalar\ndistribution inherits distribution in \\(\\mathbb{F}\\) on the codomain again\nadditive identity \\begin{equation} 0(x) = 0 \\end{equation}\nadditive inverse \\begin{equation} (-f)(x) = -f(x) \\end{equation}\nmultiplicative identity \\(1\\) hee hee\n","html":"\u003cp\u003eWe define a set \\(\\mathbb{F}^{s}\\), which is the set of unit functions that maps from any set \\(S\\) to \\(\\mathbb{F}\\).\u003c/p\u003e\n\u003ch2 id=\"closeness-of-addition\"\u003ecloseness of addition\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n(f+g)(x) = f(x)+g(x), \\forall f,g \\in \\mathbb{F}^{S}, x \\in S\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"closeness-of-scalar-multiplication\"\u003ecloseness of scalar multiplication\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n(\\lambda f)(x)=\\lambda f(x), \\forall \\lambda \\in \\mathbb{F}, f \\in \\mathbb{F}^{S}, x \\in S\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"commutativity\"\u003ecommutativity\u003c/h2\u003e\n\u003cp\u003einherits \\(\\mathbb{F}\\) (for the codomain of functions \\(f\\) and \\(g\\))\u003c/p\u003e\n\u003ch2 id=\"associativity\"\u003eassociativity\u003c/h2\u003e\n\u003cp\u003einherits \\(\\mathbb{F}\\) for codomain or is just \\(\\mathbb{F}\\) for scalar\u003c/p\u003e\n\u003ch2 id=\"distribution\"\u003edistribution\u003c/h2\u003e\n\u003cp\u003einherits distribution in \\(\\mathbb{F}\\) on the codomain again\u003c/p\u003e\n\u003ch2 id=\"additive-identity\"\u003eadditive identity\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n0(x) = 0\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additive-inverse\"\u003eadditive inverse\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n(-f)(x) = -f(x)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"multiplicative-identity\"\u003emultiplicative identity\u003c/h2\u003e\n\u003cp\u003e\\(1\\) hee hee\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfs_is_a_vector_space/","tags":null,"title":"F^s is a Vector Space Over F"},{"categories":null,"contents":"in probability, a factor \\(\\phi\\) is a value you can assign to each distinct value in a discrete distribution which acts as the probability of that value occurring. They are considered parameters of the discrete distribution.\nIf you don\u0026rsquo;t have discrete variables, factors allow you to state \\(p(x|y)\\) in terms of a function \\(\\phi(x,y)\\).\nSee also Rejection Sampling\nfactor operations factor product \\begin{equation} \\phi_{3} (x,y,z) = \\phi_{1} (x,y) \\cdot \\phi_{2}(y,z) \\end{equation}\nfactor marginalization \\begin{equation} \\phi(x) = \\sum_{y=Y} \\phi(x,y) \\end{equation}\nfactor conditioning Removing any rows not consistent with evidence. Say you know \\(Y=1\\), remove all rows that say \\(Y=0\\).\n","html":"\u003cp\u003ein \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e, a \u003ca href=\"/posts/kbhfactor/\"\u003efactor\u003c/a\u003e \\(\\phi\\) is a value you can assign to each distinct value in a \u003ca href=\"/posts/kbhdiscrete_distribution/\"\u003ediscrete distribution\u003c/a\u003e which acts as the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of that value occurring. They are considered \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es of the \u003ca href=\"/posts/kbhdiscrete_distribution/\"\u003ediscrete distribution\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIf you don\u0026rsquo;t have discrete variables, \u003ca href=\"/posts/kbhfactor/\"\u003efactor\u003c/a\u003es allow you to state \\(p(x|y)\\) in terms of a function \\(\\phi(x,y)\\).\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhrejection_sampling/\"\u003eRejection Sampling\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"factor-operations\"\u003efactor operations\u003c/h2\u003e\n\u003ch3 id=\"factor-product\"\u003efactor product\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\phi_{3} (x,y,z) = \\phi_{1} (x,y) \\cdot \\phi_{2}(y,z)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"factor-marginalization\"\u003efactor marginalization\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\phi(x) = \\sum_{y=Y} \\phi(x,y)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"factor-conditioning\"\u003efactor conditioning\u003c/h3\u003e\n\u003cp\u003eRemoving any rows not consistent with evidence. Say you know \\(Y=1\\), remove all rows that say \\(Y=0\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfactor/","tags":null,"title":"factor"},{"categories":null,"contents":"Motivation Multiple agents need to collaborate to achieve common goal.\nJoint Utility Maximization: maximize the joint utility between various agents.\nPossible Approaches Using a traditional MDP: an MDP considers \u0026ldquo;action\u0026rdquo; as a joint action between all agents (exponential blow up because the agent actions multiply) Local Optimization: share rewards/values among agents Local Optimization: search and maximize joint utility explicitly (no need to model the entire action space) Problems with single Reward Sharing:\nCredit Assignment Problem In collective reward situations, determining which action out of the cohort actually contributed to the award is hard.\nFree Ride Problem Agents can benefit from reward without actually doing anything by being carried.\nFactored MDPs Representation Using factored linear value function to approximate the joint value function Using linear programming to avoid exponential blow up Background Coordination Graphs modeling each agent as a node each edge is a dependency factored Markov Decision Process MDPs are not good at large problems factor the state and action spaces as a random variable factors, etc. action selection each agent maintains a local \\(Q\\) function indicating its population the \\(Q\\) function of each agent maybe influenced by other agents: the coordination graph of the agent is used to calculate contribution We optimize by using one agent at a time: we optimize one agent, then\n","html":"\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003eMultiple agents need to collaborate to achieve common goal.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eJoint Utility Maximization\u003c/strong\u003e: maximize the joint utility between various agents.\u003c/p\u003e\n\u003ch2 id=\"possible-approaches\"\u003ePossible Approaches\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eUsing a traditional MDP\u003c/strong\u003e: an MDP considers \u0026ldquo;action\u0026rdquo; as a joint action between all agents (exponential blow up because the agent actions multiply)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eLocal Optimization\u003c/strong\u003e: share rewards/values among agents\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eLocal Optimization\u003c/strong\u003e: search and maximize joint utility explicitly (no need to model the entire action space)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eProblems with single Reward Sharing:\u003c/p\u003e\n\u003ch3 id=\"credit-assignment-problem\"\u003eCredit Assignment Problem\u003c/h3\u003e\n\u003cp\u003eIn collective reward situations, determining which action out of the cohort actually contributed to the award is hard.\u003c/p\u003e\n\u003ch3 id=\"free-ride-problem\"\u003eFree Ride Problem\u003c/h3\u003e\n\u003cp\u003eAgents can benefit from reward without actually doing anything by being carried.\u003c/p\u003e\n\u003ch2 id=\"factored-mdps--kbhfactored-mdps-dot-md--representation\"\u003e\u003ca href=\"/posts/kbhfactored_mdps/\"\u003eFactored MDPs\u003c/a\u003e Representation\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eUsing factored linear value function to approximate the joint value function\u003c/li\u003e\n\u003cli\u003eUsing linear programming to avoid exponential blow up\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"background\"\u003eBackground\u003c/h3\u003e\n\u003ch4 id=\"coordination-graphs\"\u003eCoordination Graphs\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003emodeling each agent as a node\u003c/li\u003e\n\u003cli\u003eeach edge is a dependency\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"factored-markov-decision-process--kbhmarkov-decision-process-dot-md\"\u003efactored \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMarkov Decision Process\u003c/a\u003e\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eMDPs are not good at large problems\u003c/li\u003e\n\u003cli\u003efactor the state and action spaces as a random variable factors, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"action-selection\"\u003eaction selection\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eeach agent maintains a local \\(Q\\) function indicating its population\u003c/li\u003e\n\u003cli\u003ethe \\(Q\\) function of each agent maybe influenced by other agents:\n\u003cul\u003e\n\u003cli\u003ethe coordination graph of the agent is used to calculate contribution\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe optimize by using \u003cstrong\u003eone agent at a time\u003c/strong\u003e: we optimize one agent, then\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfactored_mdps/","tags":null,"title":"Factored MDPs"},{"categories":null,"contents":"fairness through unawareness procedural fairness, or fairness through unawareness is a fairness system\nIf you have no idea about the demographics of protected groups, you will make better decisions.\nexclude sensitive features from datasets exclude proxies of protected groups Problem: deeply correlated information (such as stuff that people like) is hard to get rid of\u0026mdash;individual features does nothing with respect to predicting gender, but taken in groups it can recover protected group information.\nfairness through awareness we only care about the outcome\nfairness through parity that the prediction for different groups\n\\begin{equation} P(G=1|D=0) = P(G=1|D=1) \\end{equation}\nfairness through calibration We want the CORRECTNESS of the algorithm to be similar between protected groups.\ndisparate impact \\begin{equation} \\frac{P(G=G^{*}|D=0)}{P(G=G^{*}|D=1)} \\leq \\epsilon \\end{equation}\nwhere, by US law, disparate impact states \\(\\epsilon\\) must be 0.2 or smaller for protected groups \\(D\\).\nwhere \\(G^{*}\\) is the correct prediction.\n","html":"\u003ch2 id=\"fairness-through-unawareness\"\u003efairness through unawareness\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#fairness-through-unawareness\"\u003eprocedural fairness\u003c/a\u003e, or \u003ca href=\"#fairness-through-unawareness\"\u003efairness through unawareness\u003c/a\u003e is a \u003ca href=\"/posts/kbhprocedural_vs_distributive_fairness/\"\u003efairness\u003c/a\u003e system\u003c/p\u003e\n\u003cp\u003eIf you have no idea about the demographics of \u003ca href=\"/posts/kbhprotected_group/\"\u003eprotected group\u003c/a\u003es, you will make better decisions.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eexclude sensitive features from datasets\u003c/li\u003e\n\u003cli\u003eexclude proxies of \u003ca href=\"/posts/kbhprotected_group/\"\u003eprotected group\u003c/a\u003es\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eProblem: deeply correlated information (such as stuff that people like) is hard to get rid of\u0026mdash;individual features does nothing with respect to predicting gender, but taken in groups it can recover \u003ca href=\"/posts/kbhprotected_group/\"\u003eprotected group\u003c/a\u003e information.\u003c/p\u003e\n\u003ch2 id=\"fairness-through-awareness\"\u003efairness through awareness\u003c/h2\u003e\n\u003cp\u003ewe only care about the outcome\u003c/p\u003e\n\u003ch3 id=\"fairness-through-parity\"\u003efairness through parity\u003c/h3\u003e\n\u003cp\u003ethat the prediction for different groups\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(G=1|D=0) = P(G=1|D=1)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"fairness-through-calibration\"\u003efairness through calibration\u003c/h3\u003e\n\u003cp\u003eWe want the CORRECTNESS of the algorithm to be similar between \u003ca href=\"/posts/kbhprotected_group/\"\u003eprotected group\u003c/a\u003es.\u003c/p\u003e\n\u003ch4 id=\"disparate-impact\"\u003edisparate impact\u003c/h4\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{P(G=G^{*}|D=0)}{P(G=G^{*}|D=1)} \\leq \\epsilon\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, by US law, \u003ca href=\"#disparate-impact\"\u003edisparate impact\u003c/a\u003e states \\(\\epsilon\\) must be 0.2 or smaller for protected groups \\(D\\).\u003c/p\u003e\n\u003cp\u003ewhere \\(G^{*}\\) is the correct prediction.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprocedural_vs_distributive_fairness/","tags":null,"title":"fairness"},{"categories":null,"contents":"We have that:\nThe change in volts in a relationship to the magnetic flux.\n\\begin{equation} \\epsilon = \\oint \\vec{E} \\cdot \\dd{\\vec{l}} = - \\dv{\\Phi_{b}}{t} \\end{equation}\nwhere, \\(\\Phi_{b}\\) is the magnetic flux, namely how much magnetic field is through a surface:\n\\begin{equation} \\Phi_{b} = \\int \\vec{B} \\cdot \\dd{\\vec{A}} \\end{equation}\nusually, this is just \\(BA\\).\nNote! This tells us that the EMF (electric field per length) is just negative the change of magnetic flux.\n","html":"\u003cp\u003eWe have that:\u003c/p\u003e\n\u003cp\u003eThe change in volts in a relationship to the magnetic flux.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\epsilon = \\oint \\vec{E} \\cdot \\dd{\\vec{l}} = - \\dv{\\Phi_{b}}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\Phi_{b}\\) is the magnetic flux, namely how much magnetic field is through a surface:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Phi_{b} = \\int \\vec{B} \\cdot \\dd{\\vec{A}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eusually, this is just \\(BA\\).\u003c/p\u003e\n\u003cp\u003eNote! This tells us that the EMF (electric field per length) is just negative the change of magnetic flux.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfaraday_s_law/","tags":null,"title":"faraday's law"},{"categories":null,"contents":"One alpha vector per action:\n\\begin{equation} \\alpha^{(k+1)}_{a}(s) = R(s,a) + \\gamma \\sum_{o}^{} \\max_{a\u0026rsquo;} \\sum_{s\u0026rsquo;}^{} O(o|a,s\u0026rsquo;)T(s\u0026rsquo;|s,a) \\alpha_{a\u0026rsquo;}^{k}(s\u0026rsquo;) \\end{equation}\ntime complexity: \\(O(|S|^{2}|A|^{2}|O|)\\)\n","html":"\u003cp\u003eOne \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e per action:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha^{(k+1)}_{a}(s) = R(s,a) + \\gamma \\sum_{o}^{} \\max_{a\u0026rsquo;} \\sum_{s\u0026rsquo;}^{} O(o|a,s\u0026rsquo;)T(s\u0026rsquo;|s,a) \\alpha_{a\u0026rsquo;}^{k}(s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003etime complexity: \\(O(|S|^{2}|A|^{2}|O|)\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfast_informed_bound/","tags":null,"title":"Fast Informed Bound"},{"categories":null,"contents":"A New Deal program to help long-term families to have home. Tho program lowered down-payment for homes down from \\(50\\%\\) down to only \\(\u0026lt;10\\%\\). This is part of Roosevelt\u0026rsquo;s New Deal to lower interest rates and increased national home ownership rates. This could have been attributed to programs to stabilize home prices. This specifically helped white families: favoured single-family homes.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhnew_deal/\"\u003eNew Deal\u003c/a\u003e program to help long-term families to have home. Tho program lowered down-payment for homes down from \\(50\\%\\) down to only \\(\u0026lt;10\\%\\). This is part of Roosevelt\u0026rsquo;s New Deal to lower interest rates and increased national home ownership rates. This could have been attributed to programs to stabilize home prices. This specifically helped white families: favoured single-family homes.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfederal_housing_administration/","tags":null,"title":"Federal Housing Administration"},{"categories":null,"contents":"The Federal Project Number One is a branch of projects under the WPA which created opportunities for writers, musicians, artists, writers, etc.\n","html":"\u003cp\u003eThe Federal Project Number One is a branch of projects under the \u003ca href=\"/posts/kbhwpa/\"\u003eWPA\u003c/a\u003e which created opportunities for writers, musicians, artists, writers, etc.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfederal_project_number_one/","tags":null,"title":"Federal Project Number One"},{"categories":null,"contents":"A field is a special set.\nconstituents distinct elements of at least \\(0\\) and \\(1\\) operations of addition and multiplication requirements closed commutativity associativity identities (both additive and multiplicative) inverses (both additive and multiplicative) distribution Therefore, \\(\\mathbb{R}\\) is a field, and so is \\(\\mathbb{C}\\) (which we proved in properties of complex arithmetic).\nadditional information Main difference between group: there is one operation is group, a field has two operations.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e is a special set.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003edistinct elements of at least \\(0\\) and \\(1\\)\u003c/li\u003e\n\u003cli\u003eoperations of addition and multiplication\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhidentity/\"\u003eidentities\u003c/a\u003e (both additive and multiplicative)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003es (both additive and multiplicative)\u003c/li\u003e\n\u003cli\u003edistribution\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTherefore, \\(\\mathbb{R}\\) is a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e, and so is \\(\\mathbb{C}\\) (which we proved in \u003ca href=\"/posts/kbhcomplex_number/#requirements\"\u003eproperties of complex arithmetic\u003c/a\u003e).\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cp\u003eMain difference between \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e: there is \u003cem\u003eone\u003c/em\u003e operation is \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e, a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e has two operations.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfield/","tags":null,"title":"field"},{"categories":null,"contents":"Store files strided across the disk, and store the blocks which the file uses as a list of block ids. We can then jump on that block IDs and then jump to there\n","html":"\u003cp\u003eStore files strided across the disk, and store the blocks which the file uses as a list of block ids. We can then jump on that block IDs and then jump to there\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfile_payload_data/","tags":null,"title":"File Payload Data"},{"categories":null,"contents":"The filesystem is the only thing that can store anything across power offs.\ndisk Unlike memory, its \u0026ldquo;sector-addressable\u0026rdquo;: you cannot read or write individual bytes. The disk is divided into sectors, which you have to wholesale read and write.\nseeking Because disks are mostly moving, reading and writing requires seeking: to wait until the platter go under the arm and read.\nfilesystems are designed to minimize the seek time.\nfunctionality creating looknig up reading: sequential + random file access; access either all of a file or a part of the file editing creating folders main challenges disk space management: minimize seeks, sharing space, efficient use of disk naming: how do users name files reliability: surviving OS crashes and hardware failures protection: isolation between users, controlled sharing block a block is a group of one or more sectors, which issued to abstract away chunks of sectors.\nfragmentation internal fragmentation A file can be no less than a single block of text.\nexternal fragmentation \u0026ldquo;no space is available even if the space in aggregate is available\u0026rdquo;\nmodels of storage We typically put two things into the block:\nfile payload data file meta-data there is a few ways to do this:\ncontiguous allocation linked files Windows FAT File Payload Data Unix V6 Filesystem: inode, block, Block Cache ","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhfilesystem/\"\u003efilesystem\u003c/a\u003e is the only thing that can store anything across power offs.\u003c/p\u003e\n\u003ch2 id=\"disk\"\u003edisk\u003c/h2\u003e\n\u003cp\u003eUnlike memory, its \u0026ldquo;sector-addressable\u0026rdquo;: you cannot read or write individual bytes. The \u003ca href=\"#disk\"\u003edisk\u003c/a\u003e is divided into \u003ca href=\"#disk\"\u003esector\u003c/a\u003es, which you have to wholesale read and write.\u003c/p\u003e\n\u003ch3 id=\"seeking\"\u003eseeking\u003c/h3\u003e\n\u003cp\u003eBecause disks are mostly moving, reading and writing requires \u003ca href=\"#seeking\"\u003eseeking\u003c/a\u003e: to wait until the platter go under the arm and read.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhfilesystem/\"\u003efilesystem\u003c/a\u003es are designed to minimize the seek time.\u003c/p\u003e\n\u003ch2 id=\"functionality\"\u003efunctionality\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecreating\u003c/li\u003e\n\u003cli\u003elooknig up\u003c/li\u003e\n\u003cli\u003ereading: sequential + random file access; access either all of a file or a part of the file\u003c/li\u003e\n\u003cli\u003eediting\u003c/li\u003e\n\u003cli\u003ecreating folders\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"main-challenges\"\u003emain challenges\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003edisk space management\u003c/strong\u003e: minimize seeks, sharing space, efficient use of disk\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003enaming\u003c/strong\u003e: how do users name files\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ereliability\u003c/strong\u003e: surviving OS crashes and hardware failures\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eprotection\u003c/strong\u003e: isolation between users, controlled sharing\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"block\"\u003eblock\u003c/h2\u003e\n\u003cp\u003ea \u003ca href=\"#block\"\u003eblock\u003c/a\u003e is a group of one or more \u003ca href=\"#disk\"\u003esector\u003c/a\u003es, which issued to abstract away chunks of sectors.\u003c/p\u003e\n\u003ch2 id=\"fragmentation\"\u003efragmentation\u003c/h2\u003e\n\u003ch3 id=\"internal-fragmentation\"\u003einternal fragmentation\u003c/h3\u003e\n\u003cp\u003eA file can be no less than a single \u003ca href=\"#block\"\u003eblock\u003c/a\u003e of text.\u003c/p\u003e\n\u003ch3 id=\"external-fragmentation\"\u003eexternal fragmentation\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;no space is available even if the space in aggregate is available\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"models-of-storage\"\u003emodels of storage\u003c/h2\u003e\n\u003cp\u003eWe typically put two things into the block:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003efile payload data\u003c/li\u003e\n\u003cli\u003efile meta-data\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ethere is a few ways to do this:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcontiguous_allocation/\"\u003econtiguous allocation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinked_files/\"\u003elinked files\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhwindows_fat/\"\u003eWindows FAT\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfile_payload_data/\"\u003eFile Payload Data\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003eUnix V6 Filesystem\u003c/a\u003e: \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e, \u003ca href=\"#block\"\u003eblock\u003c/a\u003e, \u003ca href=\"/posts/kbhunix_v6_filesystem/#block-cache\"\u003eBlock Cache\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfilesystem/","tags":null,"title":"filesystem"},{"categories":null,"contents":"filters are how beliefs are updated from observation\ndiscrete state filter \\begin{equation} b\u0026rsquo;(s\u0026rsquo;) = P(s\u0026rsquo;|b,a,o) \\end{equation}\n\\(b\u0026rsquo;\\) is what state we think we are in next, and its a probability distribution over all states, calculated given from \\(b,a,o\\) our current belief about our state, our action, and our observation.\nWe can perform this belief update by performing Bayes Theorem over \\(o\\):\n\\begin{align} b\u0026rsquo;(s\u0026rsquo;) \u0026amp;= P(s\u0026rsquo;|b,a,o) \\\\ \u0026amp;\\propto P(o|b,a,s\u0026rsquo;) P(s\u0026rsquo; | b,a) \\end{align}\nNow, consider\n\\(b\\) is a representation of \\(s\\) (\u0026ldquo;belief is a representation of what previous state you are in.\u0026rdquo;) However, you will note that \\(s\\) is conditionally independent to \\(o\\) through d-seperation as there is a chain \\(s \\to s\u0026rsquo; \\to o\\). So:\n\\begin{align} b\u0026rsquo;(s\u0026rsquo;) \u0026amp;\\propto P(o|b,a,s\u0026rsquo;) P(s\u0026rsquo; | b,a) \\\\ \u0026amp;= P(o|a,s\u0026rsquo;) P(s\u0026rsquo; | b,a) \\end{align}\nThis first term is by definition the observation model, so we have:\n\\begin{align} b\u0026rsquo;(s\u0026rsquo;) \u0026amp;\\propto P(o|a,s\u0026rsquo;) P(s\u0026rsquo; | b,a) \\\\ \u0026amp;= O(o|a,s\u0026rsquo;)P(s\u0026rsquo; | b,a) \\end{align}\nWe now invoke the law of total probability over the second term, over all states:\n\\begin{align} b\u0026rsquo;(s\u0026rsquo;) \u0026amp;\\propto O(o|a,s\u0026rsquo;)P(s\u0026rsquo; | b,a) \\\\ \u0026amp;= O(o|a,s\u0026rsquo;) \\sum_{s}^{} P(s\u0026rsquo;|b,a,s)P(s|b,a) \\end{align}\nIf we know \\(s\\) and \\(a\\) in the \\(P(s\u0026rsquo;|b,a,s)\\) terms, we can drop \\(b\\) because if we already know \\(a,s\\) knowing what probability we are in \\(s\\) (i.e. \\(b(s)\\)) is lame. Furthermore, \\(P(s|b,a)=b(s)\\) because the action we take is irrelavent to what CURRENT state we are in, if we already are given a distribution about what state we are in through \\(b\\).\n\\begin{align} b\u0026rsquo;(s\u0026rsquo;) \u0026amp;\\propto O(o|a,s\u0026rsquo;) \\sum_{s}^{} T(s\u0026rsquo;|s,a)b(s) \\end{align}\nKalman Filter A Kalman Filter is a continous state-filter where by each of our \\(T, O, b\\) is represented via a Gaussian distribution. Kalman Filter is discrete state filter but continuous. Consider the final, belief-updating result of the discrete state filter above, and port it to be continous:\n\\begin{equation} b\u0026rsquo;(s\u0026rsquo;) \\propto O(o|a,s\u0026rsquo;) \\int_{s} T(s\u0026rsquo;|s,a) b(s) ds \\end{equation}\nif we modeled our transition probabilties, observations, and initial belief with a gaussian whereby each parameter is a gaussian model parameterized upon a few matricies.\n\\begin{equation} T(s\u0026rsquo;|s,a) = \\mathcal{N}(s\u0026rsquo;|T_{s} s + T_{a} a, \\Sigma_{s}) \\end{equation}\n\\begin{equation} O(o|s\u0026rsquo;) = \\mathcal{N}(o|O_{s}s\u0026rsquo;, \\Sigma_{o}) \\end{equation}\n\\begin{equation} b(s) = \\mathcal{N}(s | \\mu_{b}, \\Sigma_{b}) \\end{equation}\nwhere, \\(T, O\\) are matricies that maps vectors states to vectors. \\(\\Sigma\\) are covariances matricies. Finally, \\(\\mu\\) is a mean belief vector.\nTwo main steps:\npredict \\begin{equation} \\mu_{p} \\leftarrow T_{s} \\mu_{b} + T_{a}a \\end{equation}\n\\begin{equation} \\Sigma_{p} \\leftarrow T_{s} \\Sigma_{b} T_{s}^{T} + \\Sigma_{s} \\end{equation}\ngiven our current belief \\(b\\) and its parameters, and our current situation \\(s,a\\), we want to make a prediction about where we should be next. We should be somewhere on: \\(b\u0026rsquo;_{p} = \\mathcal{N}(\\mu_{p}, \\Sigma_{p})\\).\nupdate \\begin{equation} \\mu_{b} \\leftarrow \\mu_{p}+K(o-O_{s}\\mu_{p}) \\end{equation}\n\\begin{equation} \\Sigma_{b} \\leftarrow (I-KO_{s})\\Sigma_{p} \\end{equation}\nwhere \\(K\\) is Kalmain gain\nWe are now going to take an observation \\(o\\), and update our belief about where we should be now given our new observation.\nKalmain gain \\begin{equation} K \\leftarrow \\Sigma_{p} O_{s}^{T} (O_{s}\\Sigma_{p}O_{s}^{T}+\\Sigma_{O})^{-1}} \\end{equation}\nAdditional Information Extended Kalman Filter Kalman Filter, but no linearity required by forcing linearity by a point-jacobian estimate about the mean of the belief state.\nUnscented Kalman Filter its Extended Kalman Filter but derivative free, which means its clean and hence its unscented.\nIts achieved through using \u0026ldquo;sigma point samples\u0026rdquo;: just taking some representative points (mean + 2 points in each direction), and draw a line.\nParticle Filter Its a filter with Likelihood Weighted Sampling.\nSay we are flying a plane; we want to use our height measures to infer our horizontal location. Let us take an observation model: \\(O(o|s,a) = \\mathcal{N}(o|h(s), \\sigma)\\) (\u0026ldquo;the probability of getting an observation given we are in the state\u0026rdquo;)\nstart off with a prior distribution over the states you have: a distribution over the possible states make \\(N\\) monte-calro samples from our prior. These are our particles. use the transition model to propagate the \\(N\\) samples forward to get \\(N\\) new next-state samples take action \\(a\\); calculate \\(O(o|s,a)\\) for each of your proper gated samples \\(s\\) (\u0026ldquo;how likely is our observed altitude given each of our sampled states?\u0026rdquo;) normalise the resulting probabilities into a single distribution re-sample \\(N\\) samples that from the resulting distribution. these are our updated belief. repeat from step 3 main pitfalls: if we don\u0026rsquo;t have enough sampled particles, you may get condensations that doesn\u0026rsquo;t make sense\nparticle filter with rejection This is used almost never but if you really want to you can. You\u0026rsquo;d get a bunch of particles and take an action. You propagate the particles forward.\nFor each propergated state \\(s\\), if what you observed \\(o\\) is equal to (or close to, for continuous cases) \\(sample(o|s,a)\\), you keep it around. Otherwise, you discard it.\nYou keep doing this until you have kept enough states to do this again, and repeat.\nInjection particle filter Add a random new particle every so often to prevent particle deprivation.\nAdaptive injection particle filter We perform injection based on the ratio of two moving averages of particle weights\u0026mdash;if all weights are too low, we chuck in some to perturb it\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhfilters/\"\u003efilters\u003c/a\u003e are how \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003es are updated from observation\u003c/p\u003e\n\u003ch2 id=\"discrete-state-filter\"\u003ediscrete state filter\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nb\u0026rsquo;(s\u0026rsquo;) = P(s\u0026rsquo;|b,a,o)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(b\u0026rsquo;\\) is what state we think we are in next, and its a probability distribution over all states, calculated given from \\(b,a,o\\) our current belief about our state, our action, and our observation.\u003c/p\u003e\n\u003cp\u003eWe can perform this belief update by performing \u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes Theorem\u003c/a\u003e over \\(o\\):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nb\u0026rsquo;(s\u0026rsquo;) \u0026amp;= P(s\u0026rsquo;|b,a,o) \\\\\n\u0026amp;\\propto P(o|b,a,s\u0026rsquo;) P(s\u0026rsquo; | b,a)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, consider\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-09_09-52-58_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\\(b\\) is a representation of \\(s\\) (\u0026ldquo;belief is a representation of what previous state you are in.\u0026rdquo;) However, you will note that \\(s\\) is \u003ca href=\"/posts/kbhbaysian_network/#conditional-independence\"\u003econditionally independent\u003c/a\u003e to \\(o\\) through \u003ca href=\"/posts/kbhbaysian_network/#checking-for-conditional-independence\"\u003ed-seperation\u003c/a\u003e as there is a chain \\(s \\to s\u0026rsquo; \\to o\\). So:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nb\u0026rsquo;(s\u0026rsquo;) \u0026amp;\\propto P(o|b,a,s\u0026rsquo;) P(s\u0026rsquo; | b,a) \\\\\n\u0026amp;= P(o|a,s\u0026rsquo;) P(s\u0026rsquo; | b,a)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThis first term is by definition the \u003ca href=\"/posts/kbhbelief/#observation-model\"\u003eobservation model\u003c/a\u003e, so we have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nb\u0026rsquo;(s\u0026rsquo;) \u0026amp;\\propto P(o|a,s\u0026rsquo;) P(s\u0026rsquo; | b,a) \\\\\n\u0026amp;= O(o|a,s\u0026rsquo;)P(s\u0026rsquo; | b,a)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eWe now invoke the \u003ca href=\"/posts/kbhprobability/#law-of-total-probability\"\u003elaw of total probability\u003c/a\u003e over the second term, over all states:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nb\u0026rsquo;(s\u0026rsquo;) \u0026amp;\\propto O(o|a,s\u0026rsquo;)P(s\u0026rsquo; | b,a) \\\\\n\u0026amp;= O(o|a,s\u0026rsquo;) \\sum_{s}^{} P(s\u0026rsquo;|b,a,s)P(s|b,a)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eIf we know \\(s\\) and \\(a\\) in the \\(P(s\u0026rsquo;|b,a,s)\\) terms, we can drop \\(b\\) because if we already know \\(a,s\\) knowing what probability we are in \\(s\\) (i.e. \\(b(s)\\)) is lame. Furthermore, \\(P(s|b,a)=b(s)\\) because the action we take is irrelavent to what CURRENT state we are in, if we already are given a distribution about what state we are in through \\(b\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nb\u0026rsquo;(s\u0026rsquo;) \u0026amp;\\propto O(o|a,s\u0026rsquo;) \\sum_{s}^{} T(s\u0026rsquo;|s,a)b(s)\n\\end{align}\u003c/p\u003e\n\u003ch2 id=\"kalman-filter\"\u003eKalman Filter\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#kalman-filter\"\u003eKalman Filter\u003c/a\u003e is a continous state-filter where by each of our \\(T, O, b\\) is represented via a \u003ca href=\"/posts/kbhgaussian_distribution/\"\u003eGaussian distribution\u003c/a\u003e. \u003ca href=\"#kalman-filter\"\u003eKalman Filter\u003c/a\u003e is \u003ca href=\"#discrete-state-filter\"\u003ediscrete state filter\u003c/a\u003e but continuous. Consider the final, belief-updating result of the \u003ca href=\"#discrete-state-filter\"\u003ediscrete state filter\u003c/a\u003e above, and port it to be continous:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb\u0026rsquo;(s\u0026rsquo;) \\propto O(o|a,s\u0026rsquo;) \\int_{s} T(s\u0026rsquo;|s,a) b(s) ds\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif we modeled our transition probabilties, observations, and initial belief with a \u003ca href=\"/posts/kbhgaussian_distribution/\"\u003egaussian\u003c/a\u003e whereby each parameter is a \u003ca href=\"/posts/kbhgaussian_distribution/\"\u003egaussian model\u003c/a\u003e parameterized upon a few matricies.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(s\u0026rsquo;|s,a) = \\mathcal{N}(s\u0026rsquo;|T_{s} s + T_{a} a, \\Sigma_{s})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nO(o|s\u0026rsquo;) = \\mathcal{N}(o|O_{s}s\u0026rsquo;, \\Sigma_{o})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb(s) = \\mathcal{N}(s | \\mu_{b}, \\Sigma_{b})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(T, O\\) are matricies that maps vectors states to vectors. \\(\\Sigma\\) are covariances matricies. Finally, \\(\\mu\\) is a mean belief vector.\u003c/p\u003e\n\u003cp\u003eTwo main steps:\u003c/p\u003e\n\u003ch3 id=\"predict\"\u003epredict\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\mu_{p} \\leftarrow T_{s} \\mu_{b} + T_{a}a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Sigma_{p} \\leftarrow T_{s} \\Sigma_{b} T_{s}^{T} + \\Sigma_{s}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egiven our current belief \\(b\\) and its parameters, and our current situation \\(s,a\\), we want to make a prediction about where we \u003cstrong\u003eshould\u003c/strong\u003e be next. We should be somewhere on: \\(b\u0026rsquo;_{p} = \\mathcal{N}(\\mu_{p}, \\Sigma_{p})\\).\u003c/p\u003e\n\u003ch3 id=\"update\"\u003eupdate\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\mu_{b} \\leftarrow \\mu_{p}+K(o-O_{s}\\mu_{p})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Sigma_{b} \\leftarrow (I-KO_{s})\\Sigma_{p}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(K\\) is \u003ca href=\"#kalmain-gain\"\u003eKalmain gain\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eWe are now going to take an observation \\(o\\), and update our belief about where we should be now given our new observation.\u003c/p\u003e\n\u003ch4 id=\"kalmain-gain\"\u003eKalmain gain\u003c/h4\u003e\n\u003cp\u003e\\begin{equation}\nK \\leftarrow \\Sigma_{p} O_{s}^{T} (O_{s}\\Sigma_{p}O_{s}^{T}+\\Sigma_{O})^{-1}}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"additional-information\"\u003eAdditional Information\u003c/h3\u003e\n\u003ch4 id=\"extended-kalman-filter--org1c38d38\"\u003eExtended \u003ca href=\"#kalman-filter\"\u003eKalman Filter\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003e\u003ca href=\"#kalman-filter\"\u003eKalman Filter\u003c/a\u003e, but no linearity required by forcing linearity by a point-jacobian estimate about the mean of the belief state.\u003c/p\u003e\n\u003ch4 id=\"unscented-kalman-filter--org1c38d38\"\u003eUnscented \u003ca href=\"#kalman-filter\"\u003eKalman Filter\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eits \u003ca href=\"#extended-kalman-filter--org1c38d38\"\u003eExtended Kalman Filter\u003c/a\u003e but derivative free, which means its clean and hence its unscented.\u003c/p\u003e\n\u003cp\u003eIts achieved through using \u0026ldquo;sigma point samples\u0026rdquo;: just taking some representative points (mean + 2 points in each direction), and draw a line.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-16_18-27-09_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"particle-filter\"\u003eParticle Filter\u003c/h2\u003e\n\u003cp\u003eIts a filter with \u003ca href=\"/posts/kbhdirect_sampling/#likelihood-weighted-sampling\"\u003eLikelihood Weighted Sampling\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSay we are flying a plane; we want to use our height measures to infer our horizontal location. Let us take an observation model: \\(O(o|s,a) = \\mathcal{N}(o|h(s), \\sigma)\\) (\u0026ldquo;the probability of getting an observation given we are in the state\u0026rdquo;)\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003estart off with a prior distribution over the states you have: a distribution over the possible states\u003c/li\u003e\n\u003cli\u003emake \\(N\\) monte-calro samples from our prior. These are our particles.\u003c/li\u003e\n\u003cli\u003euse the transition model to propagate the \\(N\\) samples forward to get \\(N\\) new next-state samples\u003c/li\u003e\n\u003cli\u003etake action \\(a\\); calculate \\(O(o|s,a)\\) for each of your proper gated samples \\(s\\) (\u0026ldquo;how likely is our observed altitude given each of our sampled states?\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003enormalise the resulting probabilities into a single distribution\u003c/li\u003e\n\u003cli\u003ere-sample \\(N\\) samples that from the resulting distribution. these are our updated belief.\u003c/li\u003e\n\u003cli\u003erepeat from step 3\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003emain pitfalls\u003c/strong\u003e\u003c/strong\u003e: if we don\u0026rsquo;t have enough sampled particles, you may get condensations that doesn\u0026rsquo;t make sense\u003c/p\u003e\n\u003ch3 id=\"particle-filter-with-rejection\"\u003eparticle filter with rejection\u003c/h3\u003e\n\u003cp\u003eThis is used almost never but if you really want to you can. You\u0026rsquo;d get a bunch of particles and take an action. You propagate the particles forward.\u003c/p\u003e\n\u003cp\u003eFor each propergated state \\(s\\), if what you observed \\(o\\) is equal to (or close to, for continuous cases) \\(sample(o|s,a)\\), you keep it around. Otherwise, you discard it.\u003c/p\u003e\n\u003cp\u003eYou keep doing this until you have kept enough states to do this again, and repeat.\u003c/p\u003e\n\u003ch3 id=\"injection-particle-filter\"\u003eInjection particle filter\u003c/h3\u003e\n\u003cp\u003eAdd a random new particle every so often to prevent particle deprivation.\u003c/p\u003e\n\u003ch3 id=\"adaptive-injection-particle-filter\"\u003eAdaptive injection particle filter\u003c/h3\u003e\n\u003cp\u003eWe perform injection based on the ratio of two moving averages of particle weights\u0026mdash;if all weights are too low, we chuck in some to perturb it\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfilters/","tags":null,"title":"filter"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhfilter_bank/","tags":null,"title":"Filter Bank"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhfilterb/","tags":null,"title":"filterb"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhfilterba/","tags":null,"title":"filterba"},{"categories":null,"contents":"We have a system of differential equations:\n\\begin{equation} \\begin{cases} \\dv{I}{t} = -0.73 U(t) + 0.0438 + 0.4 \\dv{M}{t} \\\\ \\dv{U}{t} = 0.4I-0.012 \\\\ \\dv{G}{t} = \\dv{M}{t} - I(t) \\end{cases} \\end{equation}\nwhere, \\(M\\) is a sinusoidal function which we can control.\nWe hope for this system to be as stable as possible.\nFirst, let\u0026rsquo;s try to get a general solution of the system. The linearized(ish) solution takes the shape of:\n\\begin{equation} \\dv t \\mqty(I \\\\ U \\\\ G) = \\mqty(0 \u0026amp; -x_1 \u0026amp; 0 \\\\ x_4 \u0026amp; 0 \u0026amp; 0 \\\\ -1 \u0026amp; 0 \u0026amp; 0 ) \\mqty(I \\\\ U \\\\ G)+ \\dv{M}{t}\\mqty(x_3 \\\\ 0 \\\\ 1) + \\mqty(x_2 \\\\ x_5 \\\\ 0) \\end{equation}\nWith:\n\\begin{equation} \\begin{cases} x_1 = 0.73 \\\\ x_2 = 0.0438 \\\\ x_3 = 0.4 \\\\ x_4 = 0.4 \\\\ x_5 = 0.012 \\end{cases} \\end{equation}\nas input parameters. We will follow the method of underdetermined coefficients: taking the homogeneous solution first and then using it to get the general solution.\nHomogeneous System To get the characteristic equation of the homogeneous system, we take the eigenvalue of the system:\nx1,x2,x3,x4,x5 = var(\u0026#34;x1 x2 x3 x4 x5\u0026#34;) matrix = matrix([[0, -x1, 0], [x4, 0,0], [-1, 0,0]]) matrix.eigenvalues() [-sqrt(-x1*x4), sqrt(-x1*x4), 0] Awesome. So we can see that our characteristic equation will be:\n\\begin{align} \\mqty(I \\\\ U \\\\ G)_{h} \u0026amp;= \\vec{c_0} e^{0t} + \\vec{c_1} e^{\\sqrt{-x_1x_4}t} + \\vec{c_2} e^{-\\sqrt{-x_1x_4}t} \\\\ \u0026amp;= \\vec{c_0} + \\vec{c_1}e^{i \\sqrt{x_1x_4}t} + \\vec{c_2} e^{-i \\sqrt{x_1x_4}t} \\end{align}\nNow, the two \\(e^{ix}\\) functions, one positive and one negative, inspires us to the following results:\n\\begin{equation} \\begin{cases} \\cos x = \\frac{e^{ix}+e^{-ix}}{2}\\\\ \\sin x = \\frac{e^{ix}-e^{-ix}}{2i}\\\\ \\end{cases} \\end{equation}\nTreating \\(\\frac{1}{2}\\) and \\(\\frac{1}{2i}\\) (which we can do, because the constants can be defined on any space desired), we have:\n\\begin{align} \\cos x + \\sin x \u0026amp;= \\frac{e^{ix}+e^{-ix}}{2} + \\frac{e^{ix}-e^{-ix}}{2i} \\\\ \u0026amp;= A_1e^{ix}+A_2e^{-ix} \\end{align}\nfor some constant scalars \\(A_1\\) and \\(A_2\\)\n\u0026ldquo;Wait, doesn\u0026rsquo;t the \\(e^{-ix}\\) and \\(-e^{-ix}\\) subtract each other out on the numerator? No, notice the denominator is different, so we will have \\((A-B)e^{-ix}\\) after we add the two expressions for some constants \\(A\\) and \\(B\\), it doesn\u0026rsquo;t cancel out.\u0026rdquo;\nPerforming this substitution allows us to reveal the sinusoidal nature of our characteristic equation, and get rid of those pesky \\(i\\).\n\\begin{align} \\mqty(I \\\\ U \\\\ G)_{h} \u0026amp;= \\vec{c_0} e^{0t} + \\vec{c_1} e^{\\sqrt{-x_1x_4}t} + \\vec{c_2} e^{-\\sqrt{-x_1x_4}t} \\\\ \u0026amp;= \\vec{c_0} + \\vec{c_1}e^{i \\sqrt{x_1x_4}t} + \\vec{c_2} e^{-i \\sqrt{x_1x_4}t} \\\\ \u0026amp;= \\vec{c_0} + \\vec{c_1\u0026rsquo;} \\cos (\\sqrt{x_1x_4} t)+ \\vec{c_2\u0026rsquo;} \\sin (\\sqrt{x_1x_4} t) \\end{align}\nThe primes here indicate that \\(\\vec{c_1} \\neq \\vec{c_1\u0026rsquo;}\\) because the initial conditions shift when we move to sinusoidal functions.\nWriting this out completely, ditching the vector expressions, we have\n\\begin{equation} \\begin{cases} I_{h}(t) = I_0 + I_1\\cos(\\sqrt{x_1x_4}t) + I_2\\sin (\\sqrt{x_1x_4}t) \\\\ U_{h}(t) = U_0 + U_1\\cos(\\sqrt{x_1x_4}t) + U_2\\sin (\\sqrt{x_1x_4}t) \\\\ G_{h}(t) = G_0 + G_1\\cos(\\sqrt{x_1x_4}t) + G_2\\sin (\\sqrt{x_1x_4}t) \\end{cases} \\end{equation}\nas the homogenous solutions for the equation.\nUnderdetermined Coefficients Recall the expression we are trying to solve is:\n\\begin{equation} \\begin{cases} \\dv{I}{t} = -0.73 U(t) + 0.0438 + 0.4 \\dv{M}{t} \\\\ \\dv{U}{t} = 0.4I-0.012 \\\\ \\dv{G}{t} = \\dv{M}{t} - I(t) \\end{cases} \\end{equation}\nWe dealt with the homongenous part\u0026hellip; but not the next two parts! Let\u0026rsquo;s do that.\nIn order to do that, we will use the method of underdetermined coefficients. Recall that:\n\\begin{equation} \\dv t \\mqty(I \\\\ U \\\\ G) = \\mqty(0 \u0026amp; -x_1 \u0026amp; 0 \\\\ x_4 \u0026amp; 0 \u0026amp; 0 \\\\ -1 \u0026amp; 0 \u0026amp; 0 ) \\mqty(I \\\\ U \\\\ G)+ \\dv{M}{t}\\mqty(x_3 \\\\ 0 \\\\ 1) + \\mqty(x_2 \\\\ x_5 \\\\ 0) \\end{equation}\nFor now, we will add the extra \\(\\dv{M}{t}\\) term explicitly later. Let us solve for the undetermined coefficients based on the assumption that each function (except for the attenuation by \\(M\\)) is linear:\n\\begin{equation} y(t) = at \\end{equation}\n(\u0026ldquo;it linearly changes over time\u0026rdquo;)\nIts derivative by time is:\n\\begin{equation} y\u0026rsquo;(t) = a \\end{equation}\nPlugging that into our expressions above:\n\\begin{equation} \\mqty(a_{I} \\\\ a_{U} \\\\ a_{G} ) = \\mqty(0 \u0026amp; -x_1 \u0026amp; 0 \\\\ x_4 \u0026amp; 0 \u0026amp; 0 \\\\ -1 \u0026amp; 0 \u0026amp; 0 ) \\mqty(a_{I}t \\\\ a_{U}t \\\\ a_{G} t) + \\dv{M}{t} \\mqty(x_3 \\\\ 0 \\\\ 1) + \\mqty(x_2 \\\\ x_5 \\\\ 0) \\end{equation}\nAnd now, arranging the right expressions such that we can clearly see each coefficient line up, relegating \\(M\\) to the side, and actually multiplying:\n\\begin{equation} \\mqty(a_{I} \\\\ a_{U} \\\\ a_{G} ) = \\mqty(0 \u0026amp; -x_1 \u0026amp; 0 \\\\ x_4 \u0026amp; 0 \u0026amp; 0 \\\\ -1 \u0026amp; 0 \u0026amp; 0 ) \\mqty(a_{I}t + x_2 \\\\ a_{U}t + x_5 \\\\ a_{G} t) + \\dv{M}{t} \\mqty(x_3 \\\\ 0 \\\\ 1) \\end{equation}\n\\begin{equation} \\mqty(a_{I} \\\\ a_{U} \\\\ a_{G} ) = \\mqty(-x_1 (a_{U}t + x_5) \\\\ x_4 (a_{I}t + x_2 )\\\\ 1 a_{G} t) + \\dv{M}{t} \\mqty(x_3 \\\\ 0 \\\\ 1) \\end{equation}\nAwesome, so now, matching coefficients, we have:\n\\begin{equation} \\begin{cases} a_{I} = -x_1x_5 \\\\ a_{U} = x_4x_2 \\\\ a_{G} = 0 \\end{cases} \\end{equation}\nWhich I honestly could have told you by\u0026hellip;. Just staring at the equations. furthermore, we will add the requisite shift of \\(\\dv{M}{t}\\) to the right equations when appropriate.\nSo, adding the \\(M(t)\\) in place, our particular solutions are:\n\\begin{equation} \\begin{cases} I_{p}(t) = -x_1x_5 t + x_3 M(t) \\\\ U_{p}(t) = x_4x_2t \\\\ G_{p}(t) = M(t) \\end{cases} \\end{equation}\nas the homogenous solutions for the equation.\nGeneral Solution Let us know put the general and particular solutions together:\nRecall that:\n\\begin{equation} \\begin{cases} I_{h}(t) = I_0 + I_1\\cos(\\sqrt{x_1x_4}t) + I_2\\sin (\\sqrt{x_1x_4}t) \\\\ U_{h}(t) = U_0 + U_1\\cos(\\sqrt{x_1x_4}t) + U_2\\sin (\\sqrt{x_1x_4}t) \\\\ G_{h}(t) = G_0 + G_1\\cos(\\sqrt{x_1x_4}t) + G_2\\sin (\\sqrt{x_1x_4}t) \\end{cases} \\end{equation}\n\\begin{equation} \\begin{cases} I_{p}(t) = -x_1x_5 t + 0.4M(t) \\\\ U_{p}(t) = x_4x_2t \\\\ G_{p}(t) = M(t) \\end{cases} \\end{equation}\nSo, by linear additivity, we have:\n\\begin{equation} \\begin{cases} I}(t) = I_0 + I_1\\cos(\\sqrt{x_1x_4}t) + I_2\\sin (\\sqrt{x_1x_4}t) -x_1x_5 t + 0.4M(t) \\\\ U}(t) = U_0 + U_1\\cos(\\sqrt{x_1x_4}t) + U_2\\sin (\\sqrt{x_1x_4}t) + x_4x_2t\\\\ G}(t) = G_0 + G_1\\cos(\\sqrt{x_1x_4}t) + G_2\\sin (\\sqrt{x_1x_4}t) + M(t) \\end{cases} \\end{equation}\nSimplification Recall that our function \\(M(t)\\) is a sinusoidal function. And it is being added to some linear combination of sinusoidal functions in each term of our general solution above. Meaning, each of our equations are of the shape:\n[some vertical shift] + [cosine something] + [sine something] + [optional linear drift] + [M(t)]\nFor us to use \\(M(t)\\) to attenuate/stabilize the system, the best we can do is to dampen the sinusoidal part (because \\(M\\) itself is sinusoidal). We can\u0026rsquo;t do much of anything else.\nTo do this, we want ideally \\(M(t)\\) be \\(\\pi\\) ahead of the \\(\\cos + \\sin\\) waves in each of the functions; that is, we want \\(M\\) to be out of phase exactly.\n\\(\\cos +\\sin\\) is harder to be out of phase than just \\(\\sin\\); if the latter, we can just figure out its frequency and shift, and be \\(\\pi\\) ahead of it.\nFortunately, our \\(\\cos\\) and \\(\\sin\\) terms have exactly the same contents; therefore, their sum form just another shifted sine wave (don\u0026rsquo;t believe me, plot it!). Therefore, we will now endeavor to combine them.\nAside: \\(A\\cos (x)+B\\sin (x)\\) Here\u0026rsquo;s how you go about the combination. We desire that \\(A\\cos (x) + B \\sin (x)\\) be a single shifted sine function; we know this is true (by plottingish or using imaginary numbers), so we will set the sum to some arbitrary sine function and solve for its correct coefficients to mimic the sum; that is:\n\\begin{equation} r \\sin (x + \\alpha) := A\\cos (x) + B \\sin (x) \\end{equation}\nwe know desire the coefficients \\(r, \\alpha\\) that would make this true.\nRecall \\(\\sin a+b = \\cos a\\sin b + \\sin a\\cos b\\); so:\n\\begin{align} r \\sin (x+\\alpha) \u0026amp; = r(\\cos x \\sin \\alpha + \\sin x \\cos \\alpha ) \\\\ \u0026amp;= r \\sin x \\cos \\alpha + r \\cos x \\sin \\alpha \\\\ \u0026amp;= (r \\sin \\alpha) \\cos x + (r \\cos \\alpha) \\sin x \\end{align}\nNow, we have:\n\\begin{equation} (r \\sin \\alpha) \\cos x + (r \\cos \\alpha) \\sin x := A\\cos (x) + B \\sin (x) \\end{equation}\nTherefore:\n\\begin{equation} \\begin{cases} r\\sin \\alpha = A \\\\ r \\cos \\alpha = B \\end{cases} \\end{equation}\nAnd we desire correct coefficients \\(r, \\alpha\\) in terms of \\(A, B\\).\nDividing the two expressions:\n\\begin{equation} \\frac{\\sin \\alpha }{\\cos \\alpha } = \\frac{A}{B} \\end{equation}\nTherefore, \\(\\alpha = \\tan^{-1}\\qty(\\frac{A}{B})\\).\nFinally, recall that \\(\\sin^{2} x +\\cos^{2} x =1\\) for any \\(x\\). We will use this fact to get \\(r\\).\n\\begin{align} \u0026amp;\\sin^{2} \\alpha + \\cos^{2} \\alpha = 1 \\\\ \\Rightarrow\\ \u0026amp; \\qty(\\frac{A}{r})^{2} + \\qty(\\frac{B}{r})^{2} =1 \\end{align}\nBy rearranging our pair of expressions above to get \\(\\sin \\alpha\\) and \\(\\cos \\alpha\\) by itself.\nFinally, we have:\n\\begin{align} 1 \u0026amp;= \\qty(\\frac{A}{r})^{2} + \\qty(\\frac{B}{r})^{2} \\\\ \u0026amp;= \\frac{A^{2} + B^{2}}{r^{2}} \\end{align}\nSo:\n\\begin{equation} r^{2} = \\sqrt{A^{2}+B^{2}} \\end{equation}\nFinally, we have that:\n\\begin{equation} A\\cos (x)+B\\sin (x) = \\sqrt{A^{2}+B^{2}} \\sin \\qty(x + \\tan^{-1}\\qty(\\frac{A}{B})) \\end{equation}\nUsing the above result Recall we are working with:\n\\begin{equation} \\begin{cases} {I}(t) = I_0 + I_1\\cos(\\sqrt{x_1x_4}t) + I_2\\sin (\\sqrt{x_1x_4}t) -x_1x_5 t + 0.4M(t) \\\\ {U}(t) = U_0 + U_1\\cos(\\sqrt{x_1x_4}t) + U_2\\sin (\\sqrt{x_1x_4}t) + x_4x_2t\\\\ {G}(t) = G_0 + G_1\\cos(\\sqrt{x_1x_4}t) + G_2\\sin (\\sqrt{x_1x_4}t) + M(t) \\end{cases} \\end{equation}\nAnd we desire to use the above to simplify it. Plugging this expression directly in, for instance, to the first expression, we have:\n\\begin{equation} I(t) = I_0 + \\sqrt{ {I_{1}}^{2} + {I_{2}}^{2} } \\sin \\qty(\\sqrt{x_1x_4}t + \\tan^{-1} \\qty(\\frac{I_1}{I_2})) -x_1x_5 t + 0.4M(t) \\end{equation}\nNotice! Even if the shift changes based on each function, the frequency of the oscillation of each function is the same\u0026mdash;\nas each \\(\\cos x + \\sin x\\) sinusoidal, after applying the identity derived above, takes the form of:\n\\begin{equation} A\\sin (\\sqrt{x_1x_4}t + \\tan^{-1}(B)) \\end{equation}\nwe can see that they all oscillate with frequency of\n\\begin{equation} \\frac{\\sqrt{x_1x_4}}{2\\pi} \\end{equation}\n\u0026ldquo;how many \\(2\\pi\\) can our function go in \\(1\\) second?\u0026rdquo;\nTherefore, the control mechanism must work in frequencies of \\(\\frac{\\sqrt{x_1x_4}}{2\\pi}\\) (and best be exactly or as best as possible out of phase by being phase shifted by \\(\\tan^{-1}(B) + \\pi\\)) to be able to attenuate the sinusoidal the best.\nWe can allow \\(M(t)\\) to go to any sinusoidal function, and compose them together:\n\\begin{equation} I(t) = I_0 + \\sqrt{ {I_{1}}^{2} + {I_{2}}^{2} } \\sin \\qty(\\sqrt{x_1x_4}t + \\tan^{-1} \\qty(\\frac{I_1}{I_2})) -x_1x_5 t + 0.4 (c \\sin(ax+b)) \\end{equation}\nOk, let us now spend another aside to figure out the frequency and amplitude of this new curve, which will be our target upon which we are optimizing:\nAttenuating the Sums of Sinusoidals We now have:\n\\begin{equation} a_1\\sin (b_1t + c_1) + a_2 \\sin (b_2t+c_2) \\end{equation}\nThe question is how we can make the first wave destructively interfere with the second one.\n","html":"\u003cp\u003eWe have a system of differential equations:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{I}{t} = -0.73 U(t) + 0.0438 + 0.4 \\dv{M}{t} \\\\\n\\dv{U}{t} = 0.4I-0.012 \\\\\n\\dv{G}{t} = \\dv{M}{t} - I(t)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(M\\) is a sinusoidal function which we can control.\u003c/p\u003e\n\u003cp\u003eWe hope for this system to be as stable as possible.\u003c/p\u003e\n\u003cp\u003eFirst, let\u0026rsquo;s try to get a general solution of the system. The linearized(ish) solution takes the shape of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv t \\mqty(I \\\\ U \\\\ G) = \\mqty(0 \u0026amp; -x_1 \u0026amp; 0 \\\\ x_4 \u0026amp; 0 \u0026amp; 0 \\\\ -1 \u0026amp; 0 \u0026amp; 0 ) \\mqty(I \\\\ U \\\\ G)+ \\dv{M}{t}\\mqty(x_3 \\\\ 0 \\\\ 1) + \\mqty(x_2 \\\\ x_5 \\\\ 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWith:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx_1 = 0.73 \\\\\nx_2 = 0.0438 \\\\\nx_3 = 0.4 \\\\\nx_4 = 0.4 \\\\\nx_5 = 0.012\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas input parameters. We will follow the method of underdetermined coefficients: taking the homogeneous solution first and then using it to get the general solution.\u003c/p\u003e\n\u003ch2 id=\"homogeneous-system\"\u003eHomogeneous System\u003c/h2\u003e\n\u003cp\u003eTo get the characteristic equation of the \u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneous\u003c/a\u003e system, we take the \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of the system:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex5\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;x1 x2 x3 x4 x5\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ematrix\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ematrix\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ematrix\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eeigenvalues\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[-sqrt(-x1*x4), sqrt(-x1*x4), 0]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAwesome. So we can see that our characteristic equation will be:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\mqty(I \\\\ U \\\\ G)_{h} \u0026amp;= \\vec{c_0} e^{0t} + \\vec{c_1} e^{\\sqrt{-x_1x_4}t} + \\vec{c_2} e^{-\\sqrt{-x_1x_4}t} \\\\\n\u0026amp;= \\vec{c_0} + \\vec{c_1}e^{i \\sqrt{x_1x_4}t} + \\vec{c_2} e^{-i \\sqrt{x_1x_4}t}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, the two \\(e^{ix}\\) functions, one positive and one negative, inspires us to the following results:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\cos x = \\frac{e^{ix}+e^{-ix}}{2}\\\\\n\\sin x = \\frac{e^{ix}-e^{-ix}}{2i}\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTreating \\(\\frac{1}{2}\\) and \\(\\frac{1}{2i}\\) (which we can do, because the constants can be defined on any space desired), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\cos x + \\sin x \u0026amp;= \\frac{e^{ix}+e^{-ix}}{2} + \\frac{e^{ix}-e^{-ix}}{2i} \\\\\n\u0026amp;= A_1e^{ix}+A_2e^{-ix}\n\\end{align}\u003c/p\u003e\n\u003cp\u003efor some constant scalars \\(A_1\\) and \\(A_2\\)\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Wait, doesn\u0026rsquo;t the \\(e^{-ix}\\) and \\(-e^{-ix}\\) subtract each other out on the numerator? No, notice the denominator is different, so we will have \\((A-B)e^{-ix}\\) after we add the two expressions for some constants \\(A\\) and \\(B\\), it doesn\u0026rsquo;t cancel out.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003ePerforming this substitution allows us to reveal the sinusoidal nature of our characteristic equation, and get rid of those pesky \\(i\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\mqty(I \\\\ U \\\\ G)_{h} \u0026amp;= \\vec{c_0} e^{0t} + \\vec{c_1} e^{\\sqrt{-x_1x_4}t} + \\vec{c_2} e^{-\\sqrt{-x_1x_4}t} \\\\\n\u0026amp;= \\vec{c_0} + \\vec{c_1}e^{i \\sqrt{x_1x_4}t} + \\vec{c_2} e^{-i \\sqrt{x_1x_4}t} \\\\\n\u0026amp;= \\vec{c_0} + \\vec{c_1\u0026rsquo;} \\cos (\\sqrt{x_1x_4} t)+ \\vec{c_2\u0026rsquo;} \\sin (\\sqrt{x_1x_4} t)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThe primes here indicate that \\(\\vec{c_1} \\neq \\vec{c_1\u0026rsquo;}\\) because the initial conditions shift when we move to sinusoidal functions.\u003c/p\u003e\n\u003cp\u003eWriting this out completely, ditching the vector expressions, we have\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nI_{h}(t) = I_0 + I_1\\cos(\\sqrt{x_1x_4}t) + I_2\\sin (\\sqrt{x_1x_4}t) \\\\\nU_{h}(t) = U_0 + U_1\\cos(\\sqrt{x_1x_4}t) + U_2\\sin (\\sqrt{x_1x_4}t) \\\\\nG_{h}(t) = G_0 + G_1\\cos(\\sqrt{x_1x_4}t) + G_2\\sin (\\sqrt{x_1x_4}t)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas the homogenous solutions for the equation.\u003c/p\u003e\n\u003ch2 id=\"underdetermined-coefficients\"\u003eUnderdetermined Coefficients\u003c/h2\u003e\n\u003cp\u003eRecall the expression we are trying to solve is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{I}{t} = -0.73 U(t) + 0.0438 + 0.4 \\dv{M}{t} \\\\\n\\dv{U}{t} = 0.4I-0.012 \\\\\n\\dv{G}{t} = \\dv{M}{t} - I(t)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe dealt with the homongenous part\u0026hellip; but \u003cem\u003enot\u003c/em\u003e the next two parts! Let\u0026rsquo;s do that.\u003c/p\u003e\n\u003cp\u003eIn order to do that, we will use the method of underdetermined coefficients. Recall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv t \\mqty(I \\\\ U \\\\ G) = \\mqty(0 \u0026amp; -x_1 \u0026amp; 0 \\\\ x_4 \u0026amp; 0 \u0026amp; 0 \\\\ -1 \u0026amp; 0 \u0026amp; 0 ) \\mqty(I \\\\ U \\\\ G)+ \\dv{M}{t}\\mqty(x_3 \\\\ 0 \\\\ 1) + \\mqty(x_2 \\\\ x_5 \\\\ 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor now, we will add the extra \\(\\dv{M}{t}\\) term explicitly later. Let us solve for the undetermined coefficients based on the assumption that each function (except for the attenuation by \\(M\\)) is linear:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = at\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(\u0026ldquo;it linearly changes over time\u0026rdquo;)\u003c/p\u003e\n\u003cp\u003eIts derivative by time is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;(t) = a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ePlugging that into our expressions above:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(a_{I} \\\\ a_{U} \\\\ a_{G} ) = \\mqty(0 \u0026amp; -x_1 \u0026amp; 0 \\\\ x_4 \u0026amp; 0 \u0026amp; 0 \\\\ -1 \u0026amp; 0 \u0026amp; 0 ) \\mqty(a_{I}t \\\\ a_{U}t \\\\ a_{G} t) + \\dv{M}{t} \\mqty(x_3 \\\\ 0 \\\\ 1) + \\mqty(x_2 \\\\ x_5 \\\\ 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd now, arranging the right expressions such that we can clearly see each coefficient line up, relegating \\(M\\) to the side, and actually multiplying:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(a_{I} \\\\ a_{U} \\\\ a_{G} ) = \\mqty(0 \u0026amp; -x_1 \u0026amp; 0 \\\\ x_4 \u0026amp; 0 \u0026amp; 0 \\\\ -1 \u0026amp; 0 \u0026amp; 0 ) \\mqty(a_{I}t + x_2 \\\\ a_{U}t + x_5 \\\\ a_{G} t) + \\dv{M}{t} \\mqty(x_3 \\\\ 0 \\\\ 1)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(a_{I} \\\\ a_{U} \\\\ a_{G} ) = \\mqty(-x_1 (a_{U}t + x_5) \\\\ x_4 (a_{I}t + x_2 )\\\\ 1 a_{G} t) + \\dv{M}{t} \\mqty(x_3 \\\\ 0 \\\\ 1)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAwesome, so now, matching coefficients, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\na_{I} = -x_1x_5 \\\\\na_{U} = x_4x_2 \\\\\na_{G} = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich I honestly could have told you by\u0026hellip;. Just staring at the equations. furthermore, we will add the requisite shift of \\(\\dv{M}{t}\\) to the right equations when appropriate.\u003c/p\u003e\n\u003cp\u003eSo, adding the \\(M(t)\\) in place, our particular solutions are:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nI_{p}(t) = -x_1x_5 t + x_3 M(t) \\\\\nU_{p}(t) = x_4x_2t \\\\\nG_{p}(t) = M(t)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas the homogenous solutions for the equation.\u003c/p\u003e\n\u003ch2 id=\"general-solution\"\u003eGeneral Solution\u003c/h2\u003e\n\u003cp\u003eLet us know put the general and particular solutions together:\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nI_{h}(t) = I_0 + I_1\\cos(\\sqrt{x_1x_4}t) + I_2\\sin (\\sqrt{x_1x_4}t) \\\\\nU_{h}(t) = U_0 + U_1\\cos(\\sqrt{x_1x_4}t) + U_2\\sin (\\sqrt{x_1x_4}t) \\\\\nG_{h}(t) = G_0 + G_1\\cos(\\sqrt{x_1x_4}t) + G_2\\sin (\\sqrt{x_1x_4}t)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nI_{p}(t) = -x_1x_5 t + 0.4M(t) \\\\\nU_{p}(t) = x_4x_2t \\\\\nG_{p}(t) = M(t)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, by linear additivity, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nI}(t) = I_0 + I_1\\cos(\\sqrt{x_1x_4}t) + I_2\\sin (\\sqrt{x_1x_4}t) -x_1x_5 t + 0.4M(t) \\\\\nU}(t) = U_0 + U_1\\cos(\\sqrt{x_1x_4}t) + U_2\\sin (\\sqrt{x_1x_4}t) + x_4x_2t\\\\\nG}(t) = G_0 + G_1\\cos(\\sqrt{x_1x_4}t) + G_2\\sin (\\sqrt{x_1x_4}t) + M(t)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"simplification\"\u003eSimplification\u003c/h2\u003e\n\u003cp\u003eRecall that our function \\(M(t)\\) is a sinusoidal function. And it is being added to some linear combination of sinusoidal functions in each term of our general solution above. Meaning, each of our equations are of the shape:\u003c/p\u003e\n\u003cp\u003e[some vertical shift] + [cosine something] + [sine something] + [optional linear drift] + [M(t)]\u003c/p\u003e\n\u003cp\u003eFor us to use \\(M(t)\\) to attenuate/stabilize the system, the best we can do is to dampen the sinusoidal part (because \\(M\\) itself is sinusoidal). We can\u0026rsquo;t do much of anything else.\u003c/p\u003e\n\u003cp\u003eTo do this, we want ideally \\(M(t)\\) be \\(\\pi\\) ahead of the \\(\\cos + \\sin\\) waves in each of the functions; that is, we want \\(M\\) to be out of phase exactly.\u003c/p\u003e\n\u003cp\u003e\\(\\cos +\\sin\\) is harder to be out of phase than just \\(\\sin\\); if the latter, we can just figure out its frequency and shift, and be \\(\\pi\\) ahead of it.\u003c/p\u003e\n\u003cp\u003eFortunately, our \\(\\cos\\) and \\(\\sin\\) terms have exactly the same contents; therefore, their sum form just another shifted sine wave (don\u0026rsquo;t believe me, plot it!). Therefore, we will now endeavor to combine them.\u003c/p\u003e\n\u003ch3 id=\"aside-a-cos--x--plus-b-sin--x\"\u003eAside: \\(A\\cos (x)+B\\sin (x)\\)\u003c/h3\u003e\n\u003cp\u003eHere\u0026rsquo;s how you go about the combination. We desire that \\(A\\cos (x) + B \\sin (x)\\) be a single shifted sine function; we know this is true (by plottingish or using imaginary numbers), so we will set the sum to some arbitrary sine function and solve for its correct coefficients to mimic the sum; that is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr \\sin (x + \\alpha) := A\\cos (x) + B \\sin (x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe know desire the coefficients \\(r, \\alpha\\) that would make this true.\u003c/p\u003e\n\u003cp\u003eRecall \\(\\sin a+b = \\cos a\\sin b + \\sin a\\cos b\\); so:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nr \\sin (x+\\alpha) \u0026amp; = r(\\cos x \\sin \\alpha + \\sin x \\cos \\alpha ) \\\\\n\u0026amp;= r \\sin x \\cos \\alpha + r \\cos x \\sin \\alpha \\\\\n\u0026amp;= (r \\sin \\alpha) \\cos x + (r \\cos \\alpha) \\sin x\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(r \\sin \\alpha) \\cos x + (r \\cos \\alpha) \\sin x := A\\cos (x) + B \\sin (x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nr\\sin \\alpha = A \\\\\nr \\cos \\alpha = B\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd we desire correct coefficients \\(r, \\alpha\\) in terms of \\(A, B\\).\u003c/p\u003e\n\u003cp\u003eDividing the two expressions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\sin \\alpha }{\\cos \\alpha } = \\frac{A}{B}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, \\(\\alpha = \\tan^{-1}\\qty(\\frac{A}{B})\\).\u003c/p\u003e\n\u003cp\u003eFinally, recall that \\(\\sin^{2} x +\\cos^{2} x =1\\) for any \\(x\\). We will use this fact to get \\(r\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\sin^{2} \\alpha + \\cos^{2} \\alpha = 1 \\\\\n\\Rightarrow\\ \u0026amp; \\qty(\\frac{A}{r})^{2} + \\qty(\\frac{B}{r})^{2} =1\n\\end{align}\u003c/p\u003e\n\u003cp\u003eBy rearranging our pair of expressions above to get \\(\\sin \\alpha\\) and \\(\\cos \\alpha\\) by itself.\u003c/p\u003e\n\u003cp\u003eFinally, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n1 \u0026amp;= \\qty(\\frac{A}{r})^{2} + \\qty(\\frac{B}{r})^{2} \\\\\n\u0026amp;= \\frac{A^{2} + B^{2}}{r^{2}}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr^{2} = \\sqrt{A^{2}+B^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA\\cos (x)+B\\sin (x) = \\sqrt{A^{2}+B^{2}} \\sin \\qty(x + \\tan^{-1}\\qty(\\frac{A}{B}))\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"using-the-above-result\"\u003eUsing the above result\u003c/h3\u003e\n\u003cp\u003eRecall we are working with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n{I}(t) = I_0 + I_1\\cos(\\sqrt{x_1x_4}t) + I_2\\sin (\\sqrt{x_1x_4}t) -x_1x_5 t + 0.4M(t) \\\\\n{U}(t) = U_0 + U_1\\cos(\\sqrt{x_1x_4}t) + U_2\\sin (\\sqrt{x_1x_4}t) + x_4x_2t\\\\\n{G}(t) = G_0 + G_1\\cos(\\sqrt{x_1x_4}t) + G_2\\sin (\\sqrt{x_1x_4}t) + M(t)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd we desire to use the above to simplify it. Plugging this expression directly in, for instance, to the first expression, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI(t) = I_0 + \\sqrt{ {I_{1}}^{2} + {I_{2}}^{2} } \\sin \\qty(\\sqrt{x_1x_4}t + \\tan^{-1} \\qty(\\frac{I_1}{I_2})) -x_1x_5 t + 0.4M(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNotice! Even if the shift changes based on each function, the \u003cem\u003efrequency\u003c/em\u003e of the oscillation of each function is the same\u0026mdash;\u003c/p\u003e\n\u003cp\u003eas each \\(\\cos x + \\sin x\\) sinusoidal, after applying the identity derived above, takes the form of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA\\sin (\\sqrt{x_1x_4}t + \\tan^{-1}(B))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can see that they all oscillate with frequency of\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\sqrt{x_1x_4}}{2\\pi}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;how many \\(2\\pi\\) can our function go in \\(1\\) second?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eTherefore, the control mechanism must work in frequencies of \\(\\frac{\\sqrt{x_1x_4}}{2\\pi}\\) (and best be exactly or as best as possible out of phase by being phase shifted by \\(\\tan^{-1}(B) + \\pi\\)) to be able to attenuate the sinusoidal the best.\u003c/p\u003e\n\u003cp\u003eWe can allow \\(M(t)\\) to go to any sinusoidal function, and compose them together:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI(t) = I_0 + \\sqrt{ {I_{1}}^{2} + {I_{2}}^{2} } \\sin \\qty(\\sqrt{x_1x_4}t + \\tan^{-1} \\qty(\\frac{I_1}{I_2})) -x_1x_5 t + 0.4 (c \\sin(ax+b))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOk, let us now spend another aside to figure out the frequency and amplitude of this new curve, which will be our target upon which we are optimizing:\u003c/p\u003e\n\u003ch4 id=\"attenuating-the-sums-of-sinusoidals\"\u003eAttenuating the Sums of Sinusoidals\u003c/h4\u003e\n\u003cp\u003eWe now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_1\\sin (b_1t + c_1) + a_2 \\sin (b_2t+c_2)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe question is how we can make the first wave destructively interfere with the second one.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math570_finance_eigen/","tags":null,"title":"Finance (Eigen)"},{"categories":null,"contents":"Why do we have a market? Basically: it allows society to make decisions about the value of things\u0026mdash;with the wisdom of the crowd. The stock market is how we (as people) decide what to make and how to make it.\nMisc. Questions About the Market Misc. Financial Market Questions\nKnowledge price Random Walk Hypothesis Brownian Motion Arbitrage Pricing Derivative Pricing options CAPM Stochastic Discount Factor GARCH ETF accounting price stock indicies VWAP short selling darkpool fundamental investing Second-Level Thinking stock market survey OTC markets NBBO LiquidNet ","html":"\u003ch2 id=\"why-do-we-have-a-market\"\u003eWhy do we have a market?\u003c/h2\u003e\n\u003cp\u003eBasically: it allows society to make decisions about the value of things\u0026mdash;with the wisdom of the crowd. The stock market is how we (as people) decide what to make and how to make it.\u003c/p\u003e\n\u003ch2 id=\"misc-dot-questions-about-the-market\"\u003eMisc. Questions About the Market\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhmisc_financial_market_questions/\"\u003eMisc. Financial Market Questions\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"knowledge\"\u003eKnowledge\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprice/\"\u003eprice\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_walk/\"\u003eRandom Walk Hypothesis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbharbitrage_pricing/\"\u003eArbitrage Pricing\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhderivative_pricing/\"\u003eDerivative Pricing\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoptions/\"\u003eoptions\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcapm/\"\u003eCAPM\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstochastic_discount_factor/\"\u003eStochastic Discount Factor\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgarch/\"\u003eGARCH\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhetf/\"\u003eETF\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaccounting_price/\"\u003eaccounting price\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstock_indicies/\"\u003estock indicies\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvwap/\"\u003eVWAP\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhshort_selling/\"\u003eshort selling\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdarkpool/\"\u003edarkpool\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfundimental_investing/\"\u003efundamental investing\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfundimental_investing/#second-level-thinking\"\u003eSecond-Level Thinking\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstock_market_survey/\"\u003estock market survey\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhotc_markets/\"\u003eOTC markets\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnbbo/\"\u003eNBBO\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhliquidnet/\"\u003eLiquidNet\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfinancial_markets_intro/","tags":["index"],"title":"Financial Market"},{"categories":null,"contents":"We define:\n\\begin{equation} \\mathbb{F}^{\\infty} = \\{(x_1, x_2, \\dots): x_{j} \\in \\mathbb{F}, \\forall j=1,2,\\dots\\} \\end{equation}\nclosure of addition We define addition:\n\\begin{equation} (x_1,x_2,\\dots)+(y_1,y_2, \\dots) = (x_1+y_1,x_2+y_2, \\dots ) \\end{equation}\nEvidently, the output is also of infinite length, and as addition in \\(\\mathbb{F}\\) is closed, then also closed.\nclosure of scalar multiplication We define scalar multiplication:\n\\begin{equation} \\lambda (x_1,x_2, \\dots) = (\\lambda x_1, \\lambda x_2, \\dots ) \\end{equation}\nditto. as above\ncommutativity extensible from commutativity of \\(\\mathbb{F}\\)\nassociativity extensible from associativity of \\(\\mathbb{F}\\), for both operations\ndistribution \\begin{align} \\lambda ((x_1,x_2,\\dots)+(y_1,y_2, \\dots)) \u0026amp;= \\lambda (x_1+y_1,x_2+y_2, \\dots ) \\\\ \u0026amp;= (\\lambda (x_1+y_1),\\lambda (x_2+y_2), \\dots ) \\\\ \u0026amp;= (\\lambda x_1+\\lambda y_1,\\lambda x_2+\\lambda y_2, \\dots) \\\\ \u0026amp;= (\\lambda x_1, \\lambda x_2, \\dots) + (\\lambda y_1, \\lambda y_2, \\dots) \\\\ \u0026amp;= \\lambda (x_1, x_2, \\dots) + \\lambda (y_1, y_2, \\dots) \\end{align}\nditto. for the other direction.\nadditive ID \\begin{equation} (0,0, \\dots ) \\end{equation}\nadditive inverse extensive from \\(\\mathbb{F}\\)\n\\begin{equation} (-a, -b, \\dots ) + (a,b, \\dots ) = 0 \\end{equation}\nscalar multiplicative ID \\(1\\)\n","html":"\u003cp\u003eWe define:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{F}^{\\infty} = \\{(x_1, x_2, \\dots): x_{j} \\in \\mathbb{F}, \\forall j=1,2,\\dots\\}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"closure-of-addition\"\u003eclosure of addition\u003c/h2\u003e\n\u003cp\u003eWe define addition:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(x_1,x_2,\\dots)+(y_1,y_2, \\dots) = (x_1+y_1,x_2+y_2, \\dots )\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eEvidently, the output is also of infinite length, and as addition in \\(\\mathbb{F}\\) is closed, then also closed.\u003c/p\u003e\n\u003ch2 id=\"closure-of-scalar-multiplication\"\u003eclosure of scalar multiplication\u003c/h2\u003e\n\u003cp\u003eWe define scalar multiplication:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda (x_1,x_2, \\dots) = (\\lambda x_1, \\lambda x_2, \\dots )\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003cem\u003editto.\u003c/em\u003e as above\u003c/p\u003e\n\u003ch2 id=\"commutativity--kbhcommutivity-dot-md\"\u003e\u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eextensible from \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e of \\(\\mathbb{F}\\)\u003c/p\u003e\n\u003ch2 id=\"associativity--kbhassociative-dot-md\"\u003e\u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eextensible from \u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e of \\(\\mathbb{F}\\), for both operations\u003c/p\u003e\n\u003ch2 id=\"distribution\"\u003edistribution\u003c/h2\u003e\n\u003cp\u003e\\begin{align}\n\\lambda ((x_1,x_2,\\dots)+(y_1,y_2, \\dots)) \u0026amp;= \\lambda (x_1+y_1,x_2+y_2, \\dots ) \\\\\n\u0026amp;= (\\lambda (x_1+y_1),\\lambda (x_2+y_2), \\dots ) \\\\\n\u0026amp;= (\\lambda x_1+\\lambda y_1,\\lambda x_2+\\lambda y_2, \\dots) \\\\\n\u0026amp;= (\\lambda x_1, \\lambda x_2, \\dots) + (\\lambda y_1, \\lambda y_2, \\dots) \\\\\n\u0026amp;= \\lambda (x_1, x_2, \\dots) + \\lambda (y_1, y_2, \\dots)\n\\end{align}\u003c/p\u003e\n\u003cp\u003e\u003cem\u003editto.\u003c/em\u003e for the other direction.\u003c/p\u003e\n\u003ch2 id=\"additive-id\"\u003eadditive ID\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n(0,0, \\dots )\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additive-inverse\"\u003eadditive inverse\u003c/h2\u003e\n\u003cp\u003eextensive from \\(\\mathbb{F}\\)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(-a, -b, \\dots ) + (a,b, \\dots ) = 0\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"scalar-multiplicative-id\"\u003escalar multiplicative ID\u003c/h2\u003e\n\u003cp\u003e\\(1\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfinfty_is_a_vector_space_over_f/","tags":null,"title":"Finfinity is a Vector Space over F"},{"categories":null,"contents":"The Finite Difference Method is a method of solving partial Differential Equations. It follows two steps:\nDevelop discrete difference equations for the desired expression Algebraically solve these equations to yield stepped solutions https://www.youtube.com/watch?v=ZSNl5crAvsw\nFollow Along We will try to solve:\n\\begin{equation} \\pdv{p(t,x)}{t} = \\frac{1}{2}\\pdv[2]{p(t,x)}{x} \\end{equation}\nTo aid in notation, let us:\n\\begin{equation} p(t_{i}, x_{j}) := p_{i,j} \\end{equation}\nto represent one distinct value of our function \\(p\\).\nLet\u0026rsquo;s begin by writing our expression above via our new notation:\n\\begin{equation} \\pdv{p_{i,j}}{t}= \\frac{1}{2} \\pdv[2]{p_{i,j}}{x} \\end{equation}\nGreat. Now, let\u0026rsquo;s think about the left side and try to turn it into a difference eqn:\nWhat exactly is\u0026mdash;\n\\begin{equation} \\pdv{p_{i,j}}{t} \\end{equation}\nas a finite difference? Well, it is just:\n\\begin{equation} \\frac{p_{i+1,j}-p_{i,{j}}}{\\Delta t} \\end{equation}\nWhat about second partials?\nWell, what is\u0026mdash;\n\\begin{equation} \\pdv[2]{p_{i,j}}{x} \\end{equation}\nIt is:\n\\begin{equation} \\frac{\\pdv{p_{i,j+1}}{x}- \\pdv{p_{i,j}}{x}}{\\Delta x} \\end{equation}\nExpanding the top expressions even more difference expressions:\n\\begin{equation} \\frac{\\frac{p_{i,{j+2}}-p_{i,{j+1}}}{\\Delta x}- \\frac{p_{i,{j+1}}-p_{i,{j}}}{\\Delta x}}{\\Delta x} \\end{equation}\nThis equals to:\n\\begin{equation} \\frac{\\frac{p_{i,{j+2}}-p_{i,{j+1}} - p_{i,{j+1}}+p_{i,{j}}}{(\\Delta x)^{2}} \\end{equation}\nFinally, substitute this into our expression, then solve for some \\(p_{{i+1}, j}\\) in terms of \\(p_{i, ?}\\). We will treat the entire \u0026ldquo;row\u0026rdquo; of \\(p_{i,?}\\) as our initial condition, then solve for the rest + propagate forward.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhfinite_difference_method/\"\u003eFinite Difference Method\u003c/a\u003e is a method of solving partial \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equations\u003c/a\u003e. It follows two steps:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eDevelop discrete \u003ca href=\"/posts/kbhdifference_equation/\"\u003edifference equation\u003c/a\u003es for the desired expression\u003c/li\u003e\n\u003cli\u003eAlgebraically solve these equations to yield stepped solutions\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\u003ca href=\"https://www.youtube.com/watch?v=ZSNl5crAvsw\"\u003ehttps://www.youtube.com/watch?v=ZSNl5crAvsw\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"follow-along\"\u003eFollow Along\u003c/h2\u003e\n\u003cp\u003eWe will try to solve:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{p(t,x)}{t} = \\frac{1}{2}\\pdv[2]{p(t,x)}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo aid in notation, let us:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(t_{i}, x_{j}) := p_{i,j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eto represent one distinct value of our function \\(p\\).\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s begin by writing our expression above via our new notation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{p_{i,j}}{t}= \\frac{1}{2} \\pdv[2]{p_{i,j}}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGreat. Now, let\u0026rsquo;s think about the left side and try to turn it into a difference eqn:\u003c/p\u003e\n\u003cp\u003eWhat exactly is\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{p_{i,j}}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas a finite difference? Well, it is just:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{p_{i+1,j}-p_{i,{j}}}{\\Delta t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhat about second partials?\u003c/p\u003e\n\u003cp\u003eWell, what is\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2]{p_{i,j}}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIt is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\pdv{p_{i,j+1}}{x}- \\pdv{p_{i,j}}{x}}{\\Delta x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eExpanding the top expressions even more difference expressions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\frac{p_{i,{j+2}}-p_{i,{j+1}}}{\\Delta x}- \\frac{p_{i,{j+1}}-p_{i,{j}}}{\\Delta x}}{\\Delta x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis equals to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\frac{p_{i,{j+2}}-p_{i,{j+1}} - p_{i,{j+1}}+p_{i,{j}}}{(\\Delta x)^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, substitute this into our expression, then solve for some \\(p_{{i+1}, j}\\) in terms of \\(p_{i, ?}\\). We will treat the entire \u0026ldquo;row\u0026rdquo; of \\(p_{i,?}\\) as our initial condition, then solve for the rest + propagate forward.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfinite_difference_method/","tags":null,"title":"Finite Difference Method"},{"categories":null,"contents":"A graph of states which is closed and connected.\nAlso relating to this is a derived variable. One way to prove reaching any state is via Floyd\u0026rsquo;s Invariant Method.\n","html":"\u003cp\u003eA graph of states which is closed and connected.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-07_10-50-57_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eAlso relating to this is a \u003ca href=\"/posts/kbhderived_variable/\"\u003ederived variable\u003c/a\u003e. One way to prove reaching any state is via \u003ca href=\"/posts/kbhfloyd_s_invariant_method/\"\u003eFloyd\u0026rsquo;s Invariant Method\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfinite_state_machine/","tags":null,"title":"Finite State Machine"},{"categories":null,"contents":"A finite-dimensional vector space is a vector space where some actual list (which remember, has finite length) of vectors spans the space.\nAn infinite-demensional vector space is a vector space that\u0026rsquo;s not a finite-dimensional vector space.\nadditional information every finite-dimensional vector space has a basis Begin with a spanning list in the finite-dimensional vector space you are working with. Apply the fact that all spanning lists contains a basis of which you are spanning. Therefore, some elements of that list form a basis of the finite-dimensional vector space you are working with. \\(\\blacksquare\\)\nfinite-dimensional subspaces finite-dimensional subspaces\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e where some actual list (which remember, has finite length) of vectors \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e the space.\u003c/p\u003e\n\u003cp\u003eAn \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003einfinite-demensional vector space\u003c/a\u003e is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e that\u0026rsquo;s not a \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"every-finite-dimensional-vector-space--kbhfinite-dimensional-vector-space-dot-md--has-a-basis--kbhbasis-dot-md\"\u003eevery \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e has a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eBegin with a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list in the \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e you are working with. Apply the fact that \u003ca href=\"/posts/kbhbasis/#all-id-e8109222-5548-4d08-b6df-8f933f2dbb36-spanning-lists-contains-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis-of-which-you-are-id-e8109222-5548-4d08-b6df-8f933f2dbb36-spanning\"\u003eall spanning lists contains a basis of which you are spanning\u003c/a\u003e. Therefore, some elements of that list form a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of the \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e you are working with. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"finite-dimensional-subspaces--kbhsubspace-dot-md\"\u003e\u003ca href=\"/posts/kbhsubspace/#finite-dimensional-subspaces\"\u003efinite-dimensional subspaces\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhsubspace/#finite-dimensional-subspaces\"\u003efinite-dimensional subspaces\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfinite_dimensional_vector_space/","tags":null,"title":"finite-dimensional vector space"},{"categories":null,"contents":"Fireside Chats are a group of broadcasts by Franklin D. Roosevelt (FDR) which allowed him to speak directly to the people.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhfireside_chats/\"\u003eFireside Chats\u003c/a\u003e are a group of broadcasts by \u003ca href=\"/posts/kbhfdr/\"\u003eFranklin D. Roosevelt (FDR)\u003c/a\u003e which allowed him to speak directly to the people.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfireside_chats/","tags":null,"title":"Fireside Chats"},{"categories":null,"contents":"Below you will find a list of the Fireside articles.\nArticle Date Welcome to the Fireside \u0026lt;2023-10-16 Mon\u0026gt; Make Models Go Brrr \u0026lt;2023-10-23 Mon\u0026gt; Todo Lists \u0026lt;2023-10-30 Mon\u0026gt; ","html":"\u003cp\u003eBelow you will find a list of the Fireside articles.\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eArticle\u003c/th\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhfireside_article/\"\u003eWelcome to the Fireside\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-10-16 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmake_models_go_brrr/\"\u003eMake Models Go Brrr\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-10-23 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhtodo_lists/\"\u003eTodo Lists\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-10-30 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfireside/","tags":["index"],"title":"Fireside Index"},{"categories":null,"contents":"First Order ODEs are Differential Equations that only takes one derivative.\nTypically, by the nature of how they are modeled, we usually state it in a equation between three things:\n\\begin{equation} t, y(t), y\u0026rsquo;(t) \\end{equation}\nas in\u0026mdash;we only take one derivative.\nSometimes the solution may not be analytic, but is well-defined:\n\\begin{equation} y\u0026rsquo; = e^{-x^{2}} \\end{equation}\nwe know that, by the fundamental theorem of calculus, gives us:\n\\begin{equation} y(x) = \\int_{0}^{x} e^{-s{2}} \\dd{s} \\end{equation}\nIndeed this function doesn\u0026rsquo;t have an elementary integral, but still has a well defined result. Almost always differential equations doesn\u0026rsquo;t have elementary solutions.\nSeparated Equations There is a very nice class of this type of first-order equations:\n\\begin{equation} y\u0026rsquo; = f(t,y) \\end{equation}\nA general function here are not these cases.\nMentally, we think of this structure on a \\(t,y\\) plane, where at each point \\((t,y)\\) the slope of the graph matches the slope given by \\(f(t,y)\\). To solve for the rest of the evolution, we consider an initial state of this system, say \\(y(1) = 3\\).\nsymbolic methods: generally, you are the happiest when you find specific formulas for \\(y(t)\\). qualitative methods: for instance, slope fields autonomous ODEs This is a special case of these types of equations, called autonomous ODEs\n\\begin{equation} y\u0026rsquo; = f(y) \\end{equation}\nIn most cases, this resolves into some \\(y(t) = T_0+Ce^{-ht}\\).\nseperable \\begin{equation} y\u0026rsquo; = f(y)g(t) \\end{equation}\ngenerally, this can be solved with division algorithm.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhfirst_order_odes/\"\u003eFirst Order ODEs\u003c/a\u003e are \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equations\u003c/a\u003e that only takes one derivative.\u003c/p\u003e\n\u003cp\u003eTypically, by the nature of how they are modeled, we usually state it in a equation between three things:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nt, y(t), y\u0026rsquo;(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas in\u0026mdash;we only take one derivative.\u003c/p\u003e\n\u003cp\u003eSometimes the solution may not be analytic, but is well-defined:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = e^{-x^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe know that, by the fundamental theorem of calculus, gives us:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(x) = \\int_{0}^{x} e^{-s{2}} \\dd{s}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIndeed this function doesn\u0026rsquo;t have an elementary integral, but still has a well defined result. Almost always differential equations doesn\u0026rsquo;t have elementary solutions.\u003c/p\u003e\n\u003ch2 id=\"separated-equations\"\u003eSeparated Equations\u003c/h2\u003e\n\u003cp\u003eThere is a very nice class of this type of first-order equations:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = f(t,y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eA general function here are not these cases.\u003c/p\u003e\n\u003cp\u003eMentally, we think of this structure on a \\(t,y\\) plane, where at each point \\((t,y)\\) the slope of the graph matches the slope given by \\(f(t,y)\\). To solve for the rest of the evolution, we consider an initial state of this system, say \\(y(1) = 3\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003esymbolic methods\u003c/strong\u003e: generally, you are the happiest when you find specific formulas for \\(y(t)\\).\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003equalitative methods\u003c/strong\u003e: for instance, slope fields\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"autonomous-odes--kbhautonomous-odes-dot-md\"\u003e\u003ca href=\"/posts/kbhautonomous_odes/\"\u003eautonomous ODEs\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eThis is a special case of these types of equations, called \u003ca href=\"/posts/kbhautonomous_odes/\"\u003eautonomous ODEs\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = f(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn most cases, this resolves into some \\(y(t) = T_0+Ce^{-ht}\\).\u003c/p\u003e\n\u003ch3 id=\"seperable--kbhseperable-diffequ-dot-md\"\u003e\u003ca href=\"/posts/kbhseperable_diffequ/\"\u003eseperable\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = f(y)g(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egenerally, this can be solved with \u003ca href=\"/posts/kbhdivide/#division-algorithm\"\u003edivision algorithm\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfirst_order_odes/","tags":null,"title":"First Order ODEs"},{"categories":null,"contents":"Consider the case where there are two functions interacting with each other:\n\\begin{equation} y_1(t) \\dots y_{2}(t) \\end{equation}\nSo we have more than one dependent function, with functions \\(y_1, y_1\u0026rsquo;, y_2, y_2\u0026rsquo;\\) and so forth. To deal with this, we simply make it into a matrix system:\n\\begin{equation} y(t) = \\mqty(y_1(t) \\\\ \\dots \\\\ y_{n}(t)) \\end{equation}\nFor instance, should we have:\n\\begin{equation} \\begin{cases} y_1\u0026rsquo; = 3y_1 - 2y_2 \\\\ y_2\u0026rsquo; = -y_1 + 5y_2 \\end{cases} \\end{equation}\nWe can write this system in a matrix like such:\n\\begin{equation} y\u0026rsquo;(t) = \\mqty(3 \u0026amp; -2 \\\\ -1 \u0026amp; 5) y(t) \\end{equation}\nMeaning:\n\\begin{equation} y\u0026rsquo; = Ay \\end{equation}\nwhich is a single linear equation.\nRecall that we had:\n\\begin{equation} y\u0026rsquo; = Ay \\end{equation}\nLet \\(v\\) be an eigenvector of \\(A\\) with \\(\\lambda\\) be an eigenvalue. Let us guess that \\(y = e^{\\lambda t} v\\) is a solution.\nPlugging this in, we have:\n\\begin{equation} y\u0026rsquo; = Ay = A(e^{\\lambda t} v) = e^{\\lambda t} Av = \\lambda e^{\\lambda t} v \\end{equation}\nOf course, \\(y\u0026rsquo; = \\lambda e^{\\lambda t} v\\).\nMeaning this is a solution of our system. Recall finding eigenvalues with actual numbers, so we want some \\(\\lambda\\) for which \\(det(A-\\lambda I)=0\\).\nPlugging the eigenvalues back, and recalling the superposition principle, we are left with some:\n\\begin{equation} y(t) = c_1 e^{\\lambda_{1}} v_1 + \\dots + c_{n} e^{\\lambda_{n}} v_{n} \\end{equation}\nThis is true if we have enough eigenvalues which forms a basis. Now, at \\(y(0)\\), we have some \\(y_0 = c_1v_1 + \u0026hellip; + c_{n}v_{n}\\).\nThis yields a system \\(y_{0} = \\mqty[v_1 \u0026amp; \\dots \u0026amp; v_{n}] \\mqty[c_1 \\\\ \\dots \\\\ c_{n}]\\).\nWe call this matrix written in terms of eigenvectors \\(E\\), that is:\n\\begin{equation} E = \\mqty[v_1 \u0026amp; \\dots \u0026amp; v_{n}] \\end{equation}\nFinally, we have:\n\\begin{equation} \\mqty[c_1 \\\\ \\dots \\\\ c_{n}] = E^{-1} y_0 \\end{equation}\nThis method works for cases where we have enough independent eigenvectors to admit enough initial conditions. Otherwise, matrix exponentiation.\nSpecial Cases 2x2 with \\(\\lambda_{2} = \\bar{\\lambda_{1}}\\) For any two by two system, where there the eigenvalues are conjugates of each other, we can formulate a solution in the form:\n\\begin{equation} y(t) = c_1 Re(e^{\\lambda t} v) + c_2 Im(e^{\\lambda t}v) \\end{equation}\nif the matrix representing the system admits two eigenvalues, \\(\\lambda\\) and \\(\\bar{\\lambda}\\). We can obtain this by rephrasing one solution as \\(e^{\\lambda t} = e^{a + ib} e^{t} = e^{a+t}(\\cos b + i\\sin b)\\).\nTips and Tricks Changing higher order system into lower orders We can actually write higher order linear system this way too:\n\\begin{equation} y\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; + by = 0 \\end{equation}\nwe can actually construct:\n\\begin{align} \u0026amp; y_1(t) = y(t) \\\\ \u0026amp; y_2(t) = y\u0026rsquo;(t) \\end{align}\nAnd therefore, we can construct:\n\\begin{equation} \\mqty(y_1 \\\\ y_2)\u0026rsquo; = \\mqty(y_2 \\\\ -by1 - ay2) = \\mqty(0 \u0026amp; 1 \\\\ -b \u0026amp;-a) \\mqty(y_1 \\\\ y_2) \\end{equation}\n","html":"\u003cp\u003eConsider the case where there are two functions interacting with each other:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny_1(t) \\dots y_{2}(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo we have more than one dependent function, with functions \\(y_1, y_1\u0026rsquo;, y_2, y_2\u0026rsquo;\\) and so forth. To deal with this, we simply make it into a matrix system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = \\mqty(y_1(t) \\\\ \\dots \\\\ y_{n}(t))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor instance, should we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\ny_1\u0026rsquo; = 3y_1 - 2y_2 \\\\\ny_2\u0026rsquo; = -y_1 + 5y_2\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can write this system in a matrix like such:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;(t) = \\mqty(3 \u0026amp; -2 \\\\ -1 \u0026amp; 5) y(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = Ay\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is a single linear equation.\u003c/p\u003e\n\u003cp\u003eRecall that we had:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = Ay\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet \\(v\\) be an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e of \\(A\\) with \\(\\lambda\\) be an eigenvalue. Let us guess that \\(y = e^{\\lambda t} v\\) is a solution.\u003c/p\u003e\n\u003cp\u003ePlugging this in, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = Ay = A(e^{\\lambda t} v) = e^{\\lambda t} Av = \\lambda e^{\\lambda t} v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOf course, \\(y\u0026rsquo; = \\lambda e^{\\lambda t} v\\).\u003c/p\u003e\n\u003cp\u003eMeaning this is a solution of our system. Recall \u003ca href=\"/posts/kbheigenvalue/#finding-eigenvalues-with-actual-numbers\"\u003efinding eigenvalues with actual numbers\u003c/a\u003e, so we want some \\(\\lambda\\) for which \\(det(A-\\lambda I)=0\\).\u003c/p\u003e\n\u003cp\u003ePlugging the eigenvalues back, and recalling the \u003ca href=\"/posts/kbhordinary_differential_equations/#superposition-principle\"\u003esuperposition principle\u003c/a\u003e, we are left with some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = c_1 e^{\\lambda_{1}} v_1 + \\dots + c_{n} e^{\\lambda_{n}} v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is true if we have enough \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es which forms a basis. Now, at \\(y(0)\\), we have some \\(y_0 = c_1v_1 + \u0026hellip; + c_{n}v_{n}\\).\u003c/p\u003e\n\u003cp\u003eThis yields a system \\(y_{0} = \\mqty[v_1 \u0026amp; \\dots \u0026amp; v_{n}] \\mqty[c_1 \\\\ \\dots \\\\ c_{n}]\\).\u003c/p\u003e\n\u003cp\u003eWe call this matrix written in terms of eigenvectors \\(E\\), that is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE = \\mqty[v_1 \u0026amp; \\dots \u0026amp; v_{n}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty[c_1 \\\\ \\dots \\\\ c_{n}] = E^{-1} y_0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis method works for cases where we have enough \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es to admit enough initial conditions. Otherwise, \u003ca href=\"/posts/kbhmatrix_exponentiation/\"\u003ematrix exponentiation.\u003c/a\u003e\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"special-cases\"\u003eSpecial Cases\u003c/h2\u003e\n\u003ch3 id=\"2x2-with-lambda-2-bar-lambda-1\"\u003e2x2 with \\(\\lambda_{2} = \\bar{\\lambda_{1}}\\)\u003c/h3\u003e\n\u003cp\u003eFor any two by two system, where there the eigenvalues are conjugates of each other, we can formulate a solution in the form:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = c_1 Re(e^{\\lambda t} v) + c_2 Im(e^{\\lambda t}v)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif the matrix representing the system admits two eigenvalues, \\(\\lambda\\) and \\(\\bar{\\lambda}\\). We can obtain this by rephrasing one solution as \\(e^{\\lambda t} = e^{a + ib} e^{t} = e^{a+t}(\\cos b + i\\sin b)\\).\u003c/p\u003e\n\u003ch2 id=\"tips-and-tricks\"\u003eTips and Tricks\u003c/h2\u003e\n\u003ch3 id=\"changing-higher-order-system-into-lower-orders\"\u003eChanging higher order system into lower orders\u003c/h3\u003e\n\u003cp\u003eWe can actually write higher order linear system this way too:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; + by = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can actually construct:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; y_1(t) = y(t) \\\\\n\u0026amp; y_2(t) = y\u0026rsquo;(t)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAnd therefore, we can construct:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(y_1 \\\\ y_2)\u0026rsquo; = \\mqty(y_2 \\\\ -by1 - ay2) = \\mqty(0 \u0026amp; 1 \\\\ -b \u0026amp;-a) \\mqty(y_1 \\\\ y_2)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsystems_of_odes/","tags":null,"title":"First-Order Linear Systems of ODEs"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhflexua/","tags":null,"title":"flexua"},{"categories":null,"contents":"To prove properties on Finite State Machines, we can construct a proof:\nstating an invariant proving that the invarient is true for all states for all transitions: assume invarient is true before transition and prove that its true after So, essentially induction.\n","html":"\u003cp\u003eTo prove properties on \u003ca href=\"/posts/kbhfinite_state_machine/\"\u003eFinite State Machine\u003c/a\u003es, we can construct a proof:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003estating an invariant\u003c/li\u003e\n\u003cli\u003eproving that the invarient is true for all states\u003c/li\u003e\n\u003cli\u003efor all transitions: assume invarient is true before transition and prove that its true after\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSo, essentially induction.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfloyd_s_invariant_method/","tags":null,"title":"Floyd's Invariant Method"},{"categories":null,"contents":"flux is the volume of flow per unit time: multiplying the speed of flow \\(\\frac{m}{s}\\) against the area \\(m^{2}\\) gives you the volume flowed per second \\(\\frac{m^{3}}{s}\\).\ntilted flux Flow, however, is not necessarily perpendicular to the plain. Therefore, we only analyze the perpendicular component of the flow: that is \u0026mdash; \\(\\Phi = Av \\cos \\theta\\). Why? If we tipped the plane (of certain area) up, the flow that used to cross the bottom of the plane now will not go through the plane, so we want to account for that.\nelectric flux The electric flux through an are, hopefully not unexpectedly, is:\n\\begin{equation} \\Phi_{E} = \\int E \\cdot dA \\end{equation}\nwhere, \\(E\\) is the electric field strength though that differential area, and \\(dA\\) is the actual differential area.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhflux/\"\u003eflux\u003c/a\u003e is the volume of flow per unit time: multiplying the speed of flow \\(\\frac{m}{s}\\) against the area \\(m^{2}\\) gives you the volume flowed per second \\(\\frac{m^{3}}{s}\\).\u003c/p\u003e\n\u003ch2 id=\"tilted-flux\"\u003etilted flux\u003c/h2\u003e\n\u003cp\u003eFlow, however, is not necessarily \u003cstrong\u003eperpendicular\u003c/strong\u003e to the plain. Therefore, we only analyze the perpendicular component of the flow: that is \u0026mdash; \\(\\Phi = Av \\cos \\theta\\). Why? If we tipped the plane (of certain area) up, the flow that used to cross the bottom of the plane now will not go through the plane, so we want to account for that.\u003c/p\u003e\n\u003ch2 id=\"electric-flux\"\u003eelectric flux\u003c/h2\u003e\n\u003cp\u003eThe electric flux through an are, hopefully not unexpectedly, is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Phi_{E} = \\int E \\cdot dA\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(E\\) is the \u003ca href=\"/posts/kbhelectric_field/\"\u003eelectric field\u003c/a\u003e strength though that differential area, and \\(dA\\) is the actual differential area.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhflux/","tags":null,"title":"flux"},{"categories":null,"contents":"Abstract Alzheimer\u0026rsquo;s Disease (AD) is a demonstrativeness disease marked by declines in cognitive function. Despite early diagnoses being critical for AD prognosis and treatment, currently accepted diagnoses mechanisms for AD requires clinical outpatient testing with a medical professional, which reduces its accessibility. In this work, we propose a possible feature extraction mechanism leveraging the previously demonstrated errors of Hidden Markov-based forced alignment (FA) tools upon cognitively impaired patients as an automated means to quantify linguistic disfluency.\nBackground Annotated linguistic disfluency features, used in combination with semantic features, have been shown ((Antonsson et al. 2021)) to improve the accuracy of AD classification systems. However, manual annotation of disfluency hinders the throughput of AD detection systems. Furthermore, there is a dearth ((Guo et al. 2021)) of data provided with preexisting annotated results.\nExisting acoustic-only approaches ((Lindsay, Tröger, and König 2021; Shah et al. 2021)) frequently places focus on the actual speech features such as silence, energy, rate, or loudness. While this approach has returned promising results ((Wang et al. 2019)), it renders the acoustic data features extracted independent of actual linguistic disfluency. Of course, some approaches (including that in (Wang et al. 2019)) perform separate, manual annotation on both aspects and treat them jointly with late fusion. However, no existing approaches have an effective feature representation that bridges the acoustic-linguistic gap.\nAn incidental effect of Hidden Markov Model (HMM) based Viterbi forced alignment (FA) tools (such as P2FA) is that its quality is shown ((Saz et al. 2009)) to be lowered in cognitively impaired speakers, resulting from a roughly \\(50\\%\\) decrease in power of discrimination between stressed and unstressed vowels. Other ASR and FA approaches ((Tao, Xueqing, and Bian 2010)) has since been designed discriminate against such changes more effectively.\nProposal By encoding FA results of HMM based approaches in embedding space, we introduce a novel feature representation of acoustic information. As FA requires an existing transcript, this method is considered semi-automated because the test must be either administered via a common-transcript, transcribed manually later, or transcribed using ASR techniques. After encoding, the proposed feature can be used in a few ways.\nEuclidean distance The Euclidean Distance approach compares the embedding of the HMM FA vector with a \u0026ldquo;reference\u0026rdquo; benchmark via pythagoras in high dimension.\nThere are two possible modalities by which the \u0026ldquo;reference\u0026rdquo; can be acquired; if the data was sourced via the patient sample reading a standardized transcript, a reference FA sample could be provided via the audio of another individual reading the same transcript screened traditionally screened without AD. Therefore, the \u0026ldquo;deviation from reference\u0026rdquo; would be used as an input feature group to any proposed model architectures.\nAlternatively, as stated before, other FA approaches are less susceptible to lexical hindrances with decreased discriminatory power. Therefore, we could equally take the Euclidean distance between embedded results of two different FA mechanisms\u0026mdash;one shown to be more sustainable to cognitively impaired speakers and one not\u0026mdash;as input features to training architectures.\nCross-Attention One key issue with the Euclidean Distance approach is that the difference between \u0026ldquo;normal\u0026rdquo; pauses, changes in speaker pace, etc. which would be variable between different speakers even controlling for AD prognoses.\nIn computer vision, few-shot classification cross-attention ((Hou et al. 2019)) has shown promising results in discrimination; furthermore, trainable cross-attention ensures more flexible control to non-prognostic verbal disturbances such as a normal change in pace which would otherwise cause a large difference in the Euclidean Distance approach.\nIn practice, a model similar to that proposed by ((Hou et al. 2019)) would be used as the basis to encode (or even discriminate) between pairwise samples of different FA approaches or against a non-AD control, as per highlighted in the section above.\nAs input features Of course, the raw FA embedding can be used as an input feature. There are less prior work on this front as this project would be, as far as we know, proposing the use of forced aligner outputs as a feature input heuristic.\nReferences Antonsson, Malin, Kristina Lundholm Fors, Marie Eckerström, and Dimitrios Kokkinakis. 2021. “Using a Discourse Task to Explore Semantic Ability in Persons with Cognitive Impairment.” Frontiers in Aging Neuroscience 12 (January): 607449. doi:10.3389/fnagi.2020.607449. Guo, Yue, Changye Li, Carol Roan, Serguei Pakhomov, and Trevor Cohen. 2021. “Crossing the ‘Cookie Theft’ Corpus Chasm: Applying What BERT Learns from Outside Data to the ADReSS Challenge Dementia Detection Task.” Frontiers in Computer Science 3 (April): 642517. doi:10.3389/fcomp.2021.642517. Hou, Ruibing, Hong Chang, Bingpeng Ma, Shiguang Shan, and Xilin Chen. 2019. “Cross Attention Network for Few-Shot Classification.” Advances in Neural Information Processing Systems 32. Lindsay, Hali, Johannes Tröger, and Alexandra König. 2021. “Language Impairment in Alzheimer’s Disease—Robust and Explainable Evidence for AD-Related Deterioration of Spontaneous Speech through Multilingual Machine Learning.” Frontiers in Aging Neuroscience 13 (May): 642033. doi:10.3389/fnagi.2021.642033. Saz, Oscar, Javier Simón, W Ricardo Rodr\\’ıguez, Eduardo Lleida, and Carlos Vaquero. 2009. “Analysis of Acoustic Features in Speakers with Cognitive Disorders and Speech Impairments.” Eurasip Journal on Advances in Signal Processing 2009. Springer: 1–11. Shah, Zehra, Jeffrey Sawalha, Mashrura Tasnim, Shi-ang Qi, Eleni Stroulia, and Russell Greiner. 2021. “Learning Language and Acoustic Models for Identifying Alzheimer’s Dementia from Speech.” Frontiers in Computer Science 3 (February): 624659. doi:10.3389/fcomp.2021.624659. Tao, Ye, Li Xueqing, and Wu Bian. 2010. “A Dynamic Alignment Algorithm for Imperfect Speech and Transcript.” Computer Science and Information Systems 7 (1): 75–84. doi:10.2298/CSIS1001075T. Wang, Tianqi, Chongyuan Lian, Jingshen Pan, Quanlei Yan, Feiqi Zhu, Manwa L. Ng, Lan Wang, and Nan Yan. 2019. “Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese.” In Interspeech 2019, 3880–84. ISCA. doi:10.21437/Interspeech.2019-2414. ","html":"\u003ch2 id=\"abstract\"\u003eAbstract\u003c/h2\u003e\n\u003cp\u003eAlzheimer\u0026rsquo;s Disease (AD) is a demonstrativeness disease marked by declines in cognitive function. Despite early diagnoses being critical for AD prognosis and treatment, currently accepted diagnoses mechanisms for AD requires clinical outpatient testing with a medical professional, which reduces its accessibility. In this work, we propose a possible feature extraction mechanism leveraging the previously demonstrated errors of Hidden Markov-based forced alignment (FA) tools upon cognitively impaired patients as an automated means to quantify linguistic disfluency.\u003c/p\u003e\n\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003cp\u003eAnnotated linguistic disfluency features, used in combination with semantic features, have been shown ((\u003ca href=\"#citeproc_bib_item_1\"\u003eAntonsson et al. 2021\u003c/a\u003e)) to improve the accuracy of AD classification systems. However, manual annotation of disfluency hinders the throughput of AD detection systems. Furthermore, there is a dearth ((\u003ca href=\"#citeproc_bib_item_2\"\u003eGuo et al. 2021\u003c/a\u003e)) of data provided with preexisting annotated results.\u003c/p\u003e\n\u003cp\u003eExisting acoustic-only approaches ((\u003ca href=\"#citeproc_bib_item_4\"\u003eLindsay, Tröger, and König 2021\u003c/a\u003e; \u003ca href=\"#citeproc_bib_item_6\"\u003eShah et al. 2021\u003c/a\u003e)) frequently places focus on the actual speech features such as silence, energy, rate, or loudness. While this approach has returned promising results ((\u003ca href=\"#citeproc_bib_item_8\"\u003eWang et al. 2019\u003c/a\u003e)), it renders the acoustic data features extracted independent of actual linguistic disfluency. Of course, some approaches (including that in (\u003ca href=\"#citeproc_bib_item_8\"\u003eWang et al. 2019\u003c/a\u003e)) perform separate, manual annotation on both aspects and treat them jointly with late fusion. However, no existing approaches have an effective feature representation that bridges the acoustic-linguistic gap.\u003c/p\u003e\n\u003cp\u003eAn incidental effect of Hidden Markov Model (HMM) based Viterbi forced alignment (FA) tools (such as P2FA) is that its quality is shown ((\u003ca href=\"#citeproc_bib_item_5\"\u003eSaz et al. 2009\u003c/a\u003e)) to be lowered in cognitively impaired speakers, resulting from a roughly \\(50\\%\\) decrease in power of discrimination between stressed and unstressed vowels. Other ASR and FA approaches ((\u003ca href=\"#citeproc_bib_item_7\"\u003eTao, Xueqing, and Bian 2010\u003c/a\u003e)) has since been designed discriminate against such changes more effectively.\u003c/p\u003e\n\u003ch2 id=\"proposal\"\u003eProposal\u003c/h2\u003e\n\u003cp\u003eBy encoding FA results of HMM based approaches in embedding space, we introduce a novel feature representation of acoustic information. As FA requires an existing transcript, this method is considered semi-automated because the test must be either administered via a common-transcript, transcribed manually later, or transcribed using ASR techniques. After encoding, the proposed feature can be used in a few ways.\u003c/p\u003e\n\u003ch3 id=\"euclidean-distance\"\u003eEuclidean distance\u003c/h3\u003e\n\u003cp\u003eThe Euclidean Distance approach compares the embedding of the HMM FA vector with a \u0026ldquo;reference\u0026rdquo; benchmark via pythagoras in high dimension.\u003c/p\u003e\n\u003cp\u003eThere are two possible modalities by which the \u0026ldquo;reference\u0026rdquo; can be acquired; if the data was sourced via the patient sample reading a standardized transcript, a reference FA sample could be provided via the audio of another individual reading the same transcript screened traditionally screened without AD. Therefore, the \u0026ldquo;deviation from reference\u0026rdquo; would be used as an input feature group to any proposed model architectures.\u003c/p\u003e\n\u003cp\u003eAlternatively, as stated before, other FA approaches are less susceptible to lexical hindrances with decreased discriminatory power. Therefore, we could equally take the Euclidean distance between embedded results of two different FA mechanisms\u0026mdash;one shown to be more sustainable to cognitively impaired speakers and one not\u0026mdash;as input features to training architectures.\u003c/p\u003e\n\u003ch3 id=\"cross-attention\"\u003eCross-Attention\u003c/h3\u003e\n\u003cp\u003eOne key issue with the Euclidean Distance approach is that the difference between \u0026ldquo;normal\u0026rdquo; pauses, changes in speaker pace, etc. which would be variable between different speakers even controlling for AD prognoses.\u003c/p\u003e\n\u003cp\u003eIn computer vision, few-shot classification cross-attention ((\u003ca href=\"#citeproc_bib_item_3\"\u003eHou et al. 2019\u003c/a\u003e)) has shown promising results in discrimination; furthermore, trainable cross-attention ensures more flexible control to non-prognostic verbal disturbances such as a normal change in pace which would otherwise cause a large difference in the Euclidean Distance approach.\u003c/p\u003e\n\u003cp\u003eIn practice, a model similar to that proposed by ((\u003ca href=\"#citeproc_bib_item_3\"\u003eHou et al. 2019\u003c/a\u003e)) would be used as the basis to encode (or even discriminate) between pairwise samples of different FA approaches or against a non-AD control, as per highlighted in the section above.\u003c/p\u003e\n\u003ch3 id=\"as-input-features\"\u003eAs input features\u003c/h3\u003e\n\u003cp\u003eOf course, the raw FA embedding can be used as an input feature. There are less prior work on this front as this project would be, as far as we know, proposing the use of forced aligner outputs as a feature input heuristic.\u003c/p\u003e\n\u003ch2 id=\"references\"\u003eReferences\u003c/h2\u003e\n\u003cstyle\u003e.csl-entry{text-indent: -1.5em; margin-left: 1.5em;}\u003c/style\u003e\u003cdiv class=\"csl-bib-body\"\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_1\"\u003e\u003c/a\u003eAntonsson, Malin, Kristina Lundholm Fors, Marie Eckerström, and Dimitrios Kokkinakis. 2021. “Using a Discourse Task to Explore Semantic Ability in Persons with Cognitive Impairment.” \u003ci\u003eFrontiers in Aging Neuroscience\u003c/i\u003e 12 (January): 607449. doi:\u003ca href=\"https://doi.org/10.3389/fnagi.2020.607449\"\u003e10.3389/fnagi.2020.607449\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_2\"\u003e\u003c/a\u003eGuo, Yue, Changye Li, Carol Roan, Serguei Pakhomov, and Trevor Cohen. 2021. “Crossing the ‘Cookie Theft’ Corpus Chasm: Applying What BERT Learns from Outside Data to the ADReSS Challenge Dementia Detection Task.” \u003ci\u003eFrontiers in Computer Science\u003c/i\u003e 3 (April): 642517. doi:\u003ca href=\"https://doi.org/10.3389/fcomp.2021.642517\"\u003e10.3389/fcomp.2021.642517\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_3\"\u003e\u003c/a\u003eHou, Ruibing, Hong Chang, Bingpeng Ma, Shiguang Shan, and Xilin Chen. 2019. “Cross Attention Network for Few-Shot Classification.” \u003ci\u003eAdvances in Neural Information Processing Systems\u003c/i\u003e 32.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_4\"\u003e\u003c/a\u003eLindsay, Hali, Johannes Tröger, and Alexandra König. 2021. “Language Impairment in Alzheimer’s Disease—Robust and Explainable Evidence for AD-Related Deterioration of Spontaneous Speech through Multilingual Machine Learning.” \u003ci\u003eFrontiers in Aging Neuroscience\u003c/i\u003e 13 (May): 642033. doi:\u003ca href=\"https://doi.org/10.3389/fnagi.2021.642033\"\u003e10.3389/fnagi.2021.642033\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_5\"\u003e\u003c/a\u003eSaz, Oscar, Javier Simón, W Ricardo Rodr\\’ıguez, Eduardo Lleida, and Carlos Vaquero. 2009. “Analysis of Acoustic Features in Speakers with Cognitive Disorders and Speech Impairments.” \u003ci\u003eEurasip Journal on Advances in Signal Processing\u003c/i\u003e 2009. Springer: 1–11.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_6\"\u003e\u003c/a\u003eShah, Zehra, Jeffrey Sawalha, Mashrura Tasnim, Shi-ang Qi, Eleni Stroulia, and Russell Greiner. 2021. “Learning Language and Acoustic Models for Identifying Alzheimer’s Dementia from Speech.” \u003ci\u003eFrontiers in Computer Science\u003c/i\u003e 3 (February): 624659. doi:\u003ca href=\"https://doi.org/10.3389/fcomp.2021.624659\"\u003e10.3389/fcomp.2021.624659\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_7\"\u003e\u003c/a\u003eTao, Ye, Li Xueqing, and Wu Bian. 2010. “A Dynamic Alignment Algorithm for Imperfect Speech and Transcript.” \u003ci\u003eComputer Science and Information Systems\u003c/i\u003e 7 (1): 75–84. doi:\u003ca href=\"https://doi.org/10.2298/CSIS1001075T\"\u003e10.2298/CSIS1001075T\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_8\"\u003e\u003c/a\u003eWang, Tianqi, Chongyuan Lian, Jingshen Pan, Quanlei Yan, Feiqi Zhu, Manwa L. Ng, Lan Wang, and Nan Yan. 2019. “Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese.” In \u003ci\u003eInterspeech 2019\u003c/i\u003e, 3880–84. ISCA. doi:\u003ca href=\"https://doi.org/10.21437/Interspeech.2019-2414\"\u003e10.21437/Interspeech.2019-2414\u003c/a\u003e.\u003c/div\u003e\n\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhdementiabank_acoustics_project_proposal/","tags":null,"title":"Forced-Alignment Error for Feature Extraction for Acoustic AD Detection"},{"categories":null,"contents":"fork creates a second process that is an exact clone from the first.\nThe original process is called the parent, the child process is called the child. The child comes in at the next instruction after fork. This means that fork calls once, returns twice. After fork, the execution order between both processes is completely up to the OS. After fork, we cannot assume execution order.\nFork\u0026rsquo;s return value is different between parent and child:\nin parent, fork will return the PID of the child process in the child, fork will return \\(0\\), you can get PID by calling getpid, and get parent ID through getppid. if its \\(-1\\), something failed things that are duplicated fire descriptor table mapped memory regions (both stack and heap) shell a shell forks off a child to run the command.\nwhile (true) { char *command = { \u0026#34;ls\u0026#34;, \u0026#34;things\u0026#34; }; pid_t child_pid = fork(); if (!child_pid) { // this is the child; execvp will check PATH for you execvp(command.argv[0], command.argv); // if we got here, the PID didn\u0026#39;t do well throw STSHException(string(command.argv[0])+\u0026#34;: not found or didn\u0026#39;t succeed to fork.\u0026#34;); } waitpid(child_pid); // do cleanup } This is because the act of running a subprogram from a program requires taking over the current PID with a different program. If we don\u0026rsquo;t fork, once the takeover happens, we don\u0026rsquo;t have a shell anymore.\nexecvp execvp takes over the current PID with another executable.\nint execvp(const char *path, char *argv[]); if execvp works, obviously it never returns. If it is unsuccessful, it returns -1.\nThe arguments list have to BEGIN WITH EXECUTABLE NAME and END WITH NULL.\nchar *args[] = { \u0026#34;/bin/ls\u0026#34;, \u0026#34;-l\u0026#34;, \u0026#34;~/hewo\u0026#34;, NULL }; execvp(args[0], args); This is how we run other programs. After this happens, recall that the process is the SAME PROCESS, so we can still wait on this process.\nexecvp LEAVES THE FILE DESCRIPTOR TABLE.\nwaitpid waitpid waits for a subprocess and frees information from the OS to store the information about the child process\u0026rsquo; exit code. waitpid can ONLY ALLOW YOU TO WAIT ON DIRECT CHILDREN*.\npid_t waitpid(pid_t pid, int *status, int options); pid status: pointer to store return about the child options (0 for now) if the PID has died, this returns immediately. Otherwise, this blocks.\nthe status int is a bitmap with a bunch of stuff, which we can check with a series of macros\nif (WIFEXISTED(status)) { // child normal exit int statuscode = WEXITSTATUS(status); } else { // abnormal exist } wait on any children If want to deal with the children as they exit, whichever one finishes first, you can write:\nint pid = waitpid(-1, ...); which will wait on any of the process\u0026rsquo; direct children, returning whichever one finishes first and returning its PID. If pid-1= and errcode is ECHILD, this means that there\u0026rsquo;s no more children to be waited on.\nwords zombie a child process which exists which hasn\u0026rsquo;t been blocked by a parent process using waitpid, where its exit code is stored and taking up resources forever.\norphan a child process whose parent exited; which the bootloader takes care of.\nfork mechanics The act of copying stack and heap sounds really really expensive. So\u0026hellip;. Whapppens?\nEach program thinks its is given all memory addresses to use; the OS maps the \u0026ldquo;virtual addresses\u0026rdquo; to the main address. So, when the fork happens, the virtual address space stays the same. The child will map the parent\u0026rsquo;s memory addresses to different physical addresses than for the parent.\nThe copies are LAZY\u0026mdash;if the child writes to an area in memory, its virtual address are mapped to different addresses. If no writes by the child happen, the virtual address are mapped to the same address.\nduring file reading, the file descriptors gets cloned, the underlying open file table doesn\u0026rsquo;t close.\ntypical mp pattern int main() { // fork off first child pid_t f1 = fork(); if (f1 == 0) { dispatch_1(); return 0; } // fork off the process, if // we are still in main (meaning we are not a child) pid_t f2 = fork(); if (f2 == 0) { dispatch_2(); return 0; } // this is equivalent to .join() // recall that even if f1 returns later // its ok, becasue we can let f2 be zombie for a bit waitpid(f1, NULL, 0); waitpid(f2, NULL, 0); return 0; } ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhfork/\"\u003efork\u003c/a\u003e creates a second process that is an exact \u003cstrong\u003eclone\u003c/strong\u003e from the first.\u003c/p\u003e\n\u003cp\u003eThe original process is called the \u003cstrong\u003eparent\u003c/strong\u003e, the child process is called the \u003cstrong\u003echild\u003c/strong\u003e. The \u003cstrong\u003echild\u003c/strong\u003e comes in at the next instruction after fork. This means that fork \u003cstrong\u003ecalls once, returns twice\u003c/strong\u003e. \u003cstrong\u003eAfter \u003ccode\u003efork\u003c/code\u003e, the execution order between both processes is completely up to the OS.\u003c/strong\u003e After fork, we cannot assume execution order.\u003c/p\u003e\n\u003cp\u003eFork\u0026rsquo;s \u003cstrong\u003ereturn value\u003c/strong\u003e is different between parent and child:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ein parent, fork will return the PID of the child process\u003c/li\u003e\n\u003cli\u003ein the child, fork will return \\(0\\), you can get PID by calling \u003ccode\u003egetpid\u003c/code\u003e, and get parent ID through \u003ccode\u003egetppid\u003c/code\u003e.\u003c/li\u003e\n\u003cli\u003eif its \\(-1\\), something failed\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"things-that-are-duplicated\"\u003ethings that are duplicated\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003efire descriptor table\u003c/li\u003e\n\u003cli\u003emapped memory regions (both stack and heap)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"shell\"\u003eshell\u003c/h2\u003e\n\u003cp\u003ea \u003ca href=\"#shell\"\u003eshell\u003c/a\u003e forks off a child to run the command.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecommand\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;ls\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;things\u0026#34;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e};\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003echild_pid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e!\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echild_pid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// this is the child; execvp will check PATH for you\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eexecvp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecommand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecommand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if we got here, the PID didn\u0026#39;t do well\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ethrow\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eSTSHException\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estring\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecommand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;: not found or didn\u0026#39;t succeed to fork.\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ewaitpid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echild_pid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// do cleanup\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThis is because the act of running a subprogram from a program requires \u003cstrong\u003etaking over the current PID with a different program\u003c/strong\u003e. If we don\u0026rsquo;t fork, once the takeover happens, we don\u0026rsquo;t have a shell anymore.\u003c/p\u003e\n\u003ch2 id=\"execvp\"\u003eexecvp\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#execvp\"\u003eexecvp\u003c/a\u003e takes over the current PID with another executable.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eexecvp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003econst\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epath\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[]);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eif \u003ca href=\"#execvp\"\u003eexecvp\u003c/a\u003e works, obviously it never returns. If it is unsuccessful, it returns \u003ccode\u003e-1\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eThe arguments list have to \u003cstrong\u003eBEGIN WITH EXECUTABLE NAME\u003c/strong\u003e and \u003cstrong\u003eEND WITH NULL\u003c/strong\u003e.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;/bin/ls\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;-l\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;~/hewo\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eNULL\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e};\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eexecvp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eargs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThis is how we run other programs. After this happens, recall that the process is the \u003cstrong\u003eSAME PROCESS\u003c/strong\u003e, so we can still wait on this process.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eexecvp LEAVES THE FILE DESCRIPTOR TABLE\u003c/strong\u003e.\u003c/p\u003e\n\u003ch2 id=\"waitpid\"\u003ewaitpid\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#waitpid\"\u003ewaitpid\u003c/a\u003e waits for a subprocess and frees information from the OS to store the information about the child process\u0026rsquo; exit code. \u003ca href=\"#waitpid\"\u003ewaitpid\u003c/a\u003e can \u003cstrong\u003eONLY ALLOW YOU TO WAIT ON DIRECT CHILDREN*\u003c/strong\u003e.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ewaitpid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estatus\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eoptions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cul\u003e\n\u003cli\u003epid\u003c/li\u003e\n\u003cli\u003estatus: pointer to store return about the child\u003c/li\u003e\n\u003cli\u003eoptions (0 for now)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eif the PID has died, this returns immediately. Otherwise, this blocks.\u003c/p\u003e\n\u003ch3 id=\"the-status-int\"\u003ethe \u003ccode\u003estatus\u003c/code\u003e int\u003c/h3\u003e\n\u003cp\u003eis a bitmap with a bunch of stuff, which we can check with a series of macros\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003eWIFEXISTED\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estatus\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// child normal exit\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estatuscode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eWEXITSTATUS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estatus\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// abnormal exist\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"wait-on-any-children\"\u003ewait on any children\u003c/h3\u003e\n\u003cp\u003eIf want to deal with the children as they exit, whichever one finishes first, you can write:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ewaitpid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e...);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ewhich will wait on any of the process\u0026rsquo; direct children, returning whichever one finishes first and returning its PID. If \u003ccode\u003epid\u003c/code\u003e-1= and errcode is ECHILD, this means that there\u0026rsquo;s no more children to be waited on.\u003c/p\u003e\n\u003ch2 id=\"words\"\u003ewords\u003c/h2\u003e\n\u003ch3 id=\"zombie\"\u003ezombie\u003c/h3\u003e\n\u003cp\u003ea child process which exists which hasn\u0026rsquo;t been blocked by a parent process using \u003ccode\u003ewaitpid\u003c/code\u003e, where its exit code is stored and taking up resources forever.\u003c/p\u003e\n\u003ch3 id=\"orphan\"\u003eorphan\u003c/h3\u003e\n\u003cp\u003ea child process whose parent exited; which the bootloader takes care of.\u003c/p\u003e\n\u003ch2 id=\"fork-mechanics\"\u003efork mechanics\u003c/h2\u003e\n\u003cp\u003eThe act of copying stack and heap sounds really really expensive. So\u0026hellip;. Whapppens?\u003c/p\u003e\n\u003cp\u003eEach program thinks its is given all memory addresses to use; the OS maps the \u0026ldquo;virtual addresses\u0026rdquo; to the main address. So, when the fork happens, the virtual address space stays the same. The child will map the parent\u0026rsquo;s memory addresses to \u003cstrong\u003edifferent\u003c/strong\u003e physical addresses than for the parent.\u003c/p\u003e\n\u003cp\u003eThe copies are \u003cstrong\u003eLAZY\u003c/strong\u003e\u0026mdash;if the child writes to an area in memory, its virtual address are mapped to different addresses. If no writes by the child happen, the virtual address are mapped to the same address.\u003c/p\u003e\n\u003cp\u003eduring file reading, the file descriptors gets cloned, the underlying \u003ca href=\"/posts/kbhmultiprocessing/#open-file-table\"\u003eopen file table\u003c/a\u003e doesn\u0026rsquo;t close.\u003c/p\u003e\n\u003ch2 id=\"typical-mp-pattern\"\u003etypical mp pattern\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emain\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// fork off first child\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ef1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003efork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003edispatch_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// fork off the process, if\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// we are still in main (meaning we are not a child)\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ef2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003efork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003edispatch_2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// this is equivalent to .join()\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// recall that even if f1 returns later\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// its ok, becasue we can let f2 be zombie for a bit\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ewaitpid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eNULL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003ewaitpid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eNULL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhfork/","tags":null,"title":"fork"},{"categories":null,"contents":"Ingredients:\n\\(\\mathcal{P}\\) problem (states, transitions, etc.) \\(d\\) depth (how many next states to look into)\u0026mdash;more is more accurate but slower \\(U\\) value function estimate at depth \\(d\\) We essentially roll forward into all possible next states up to depth \\(d\\), and tabulate our value function.\nDefine subroutine forward_search(depth_remaining, value_function_estimate_at_d, state).\nif depth_remaining=0; return (action=None, utility=value_function_estimate_at_d(state)) otherwise, let best = (action = None, utility = -infinity) for each possible action at our state get an action-value for our current state where the utility of each next state is the utility given by forward_search(depth_remaining-1, value_function_estimate_at_d, next_state) if the action-value is higher than what we have, then we set best=(a, action-value) return best What this essentially does is to Dijkstra an optimal path towards the highest final utility \\(U(s)\\) om your current state, by trying all states.\n","html":"\u003cp\u003eIngredients:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\mathcal{P}\\) problem (states, transitions, etc.)\u003c/li\u003e\n\u003cli\u003e\\(d\\) depth (how many next states to look into)\u0026mdash;more is more accurate but slower\u003c/li\u003e\n\u003cli\u003e\\(U\\) \u003ca href=\"/posts/kbhaction_value_function/#value-function--kbhaction-value-function-dot-md\"\u003evalue function\u003c/a\u003e estimate at depth \\(d\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe essentially roll forward into all possible next states up to depth \\(d\\), and tabulate our \u003ca href=\"/posts/kbhaction_value_function/#value-function--kbhaction-value-function-dot-md\"\u003evalue function\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eDefine subroutine \u003ccode\u003eforward_search(depth_remaining, value_function_estimate_at_d, state)\u003c/code\u003e.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eif \u003ccode\u003edepth_remaining=0\u003c/code\u003e; return \u003ccode\u003e(action=None, utility=value_function_estimate_at_d(state))\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eotherwise,\n\u003col\u003e\n\u003cli\u003elet \u003ccode\u003ebest = (action = None, utility = -infinity)\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003efor each possible action at our state\n\u003col\u003e\n\u003cli\u003eget an \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e for our current state where the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of each next state is the utility given by \u003ccode\u003eforward_search(depth_remaining-1, value_function_estimate_at_d, next_state)\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eif the \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e is higher than what we have, then we set \u003ccode\u003ebest=(a, action-value)\u003c/code\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003ereturn \u003ccode\u003ebest\u003c/code\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWhat this essentially does is to Dijkstra an optimal path towards the highest final utility \\(U(s)\\) om your current state, by trying all states.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhforward_search/","tags":null,"title":"Forward Search"},{"categories":null,"contents":"The Forw\n","html":"\u003cp\u003eThe Forw\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhforward_forward_algorithm/","tags":null,"title":"Forward-Forward Algorithm"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhfoundational_model/","tags":null,"title":"foundational model"},{"categories":null,"contents":"Problem: end-to-end analysis of biological interactions at all timescales is hard; womp womp. No relationship explicitly between sequence, crystallography, md, etc. Also, some of them have time, some of them are frozen, etc.\nSolution: use ML to glue multiple scales\u0026rsquo; analysis together, using ML to\nproteins can be encoded as hierarchies protein functional behavior secondary structure/primary structure amino acids sequences! Slicing through the embedding space of GenSLMs can be used to identify these larger scale things from just the sequence by looking at the \u0026ldquo;general area\u0026rdquo; it exists in the latest space.\n","html":"\u003cp\u003eProblem: end-to-end analysis of biological interactions at all timescales is hard; womp womp. No relationship explicitly between sequence, crystallography, md, etc. Also, some of them have \u003cem\u003etime\u003c/em\u003e, some of them are frozen, etc.\u003c/p\u003e\n\u003cp\u003eSolution: use ML to glue multiple scales\u0026rsquo; analysis together, using ML to\u003c/p\u003e\n\u003ch2 id=\"proteins-can-be-encoded-as-hierarchies\"\u003eproteins can be encoded as hierarchies\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eprotein functional behavior\u003c/li\u003e\n\u003cli\u003esecondary structure/primary structure\u003c/li\u003e\n\u003cli\u003eamino acids\u003c/li\u003e\n\u003cli\u003esequences!\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSlicing through the embedding space of \u003ca href=\"/posts/kbhgenslms/\"\u003eGenSLMs\u003c/a\u003e can be used to identify these larger scale things from just the sequence by looking at the \u0026ldquo;general area\u0026rdquo; it exists in the latest space.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfundational_models_of_interaction_analysis-1/","tags":null,"title":"Foundational Models of Interaction Analysis"},{"categories":null,"contents":"Problem: end-to-end analysis of biological interactions at all timescales is hard; womp womp. No relationship explicitly between sequence, crystallography, md, etc. Also, some of them have time, some of them are frozen, etc.\nSolution: use ML to glue multiple scales\u0026rsquo; analysis together, using ML to\nstory 1: proteins can be encoded as hierarchies protein functional behavior secondary structure/primary structure amino acids sequences! Slicing through the embedding space of GenSLMs can be used to identify these larger scale things from just the sequence by looking at the \u0026ldquo;general area\u0026rdquo; it exists in the latest space.\nstory 2: tetrahedron tessellations and finite-element methods to analyze dynamics behavior Resolving cyro-EM dynamics to be able to capture binding behavior\nANCA-AE\napplications training GenSLMs can help identify covid variantts ","html":"\u003cp\u003eProblem: end-to-end analysis of biological interactions at all timescales is hard; womp womp. No relationship explicitly between sequence, crystallography, md, etc. Also, some of them have \u003cem\u003etime\u003c/em\u003e, some of them are frozen, etc.\u003c/p\u003e\n\u003cp\u003eSolution: use ML to glue multiple scales\u0026rsquo; analysis together, using ML to\u003c/p\u003e\n\u003ch2 id=\"story-1-proteins-can-be-encoded-as-hierarchies\"\u003estory 1: proteins can be encoded as hierarchies\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eprotein functional behavior\u003c/li\u003e\n\u003cli\u003esecondary structure/primary structure\u003c/li\u003e\n\u003cli\u003eamino acids\u003c/li\u003e\n\u003cli\u003esequences!\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSlicing through the embedding space of \u003ca href=\"/posts/kbhgenslms/\"\u003eGenSLMs\u003c/a\u003e can be used to identify these larger scale things from just the sequence by looking at the \u0026ldquo;general area\u0026rdquo; it exists in the latest space.\u003c/p\u003e\n\u003ch2 id=\"story-2-tetrahedron-tessellations-and-finite-element-methods-to-analyze-dynamics-behavior\"\u003estory 2: tetrahedron tessellations and finite-element methods to analyze dynamics behavior\u003c/h2\u003e\n\u003cp\u003eResolving \u003ca href=\"/posts/kbhcyro_em/#manifold-embedding\"\u003ecyro-EM\u003c/a\u003e dynamics to be able to capture binding behavior\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhanca_ae/\"\u003eANCA-AE\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"applications\"\u003eapplications\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003etraining \u003ca href=\"/posts/kbhgenslms/\"\u003eGenSLMs\u003c/a\u003e can help identify covid variantts\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfundational_models_of_interaction_analysis/","tags":null,"title":"Foundational Models of Interaction Analysis"},{"categories":null,"contents":"For vector \\(v\\) in the span of orthogonal basis \\(v_1, ..v_{n}\\):\n\\begin{equation} v = c_1 v_1 + \\dots + c_{n} v_{n} \\end{equation}\nwe can write:\n\\begin{equation} c_{j} = \\frac{v \\cdot v_{j}}{ v_{j} \\cdot v_{j}} \\end{equation}\nProof:\n\\begin{equation} \\langle v, v_{j} \\rangle = c_{n} \\langle v_{1}, v_{j} \\rangle \\dots \\end{equation}\nwhich is \\(0\\) for all cases that\u0026rsquo;s not \\(\\langle v_{j}, v_{j} \\rangle\\) as the \\(v\\) are orthogonal, and \\(\\mid v_{j} \\mid^{2}\\) for the case where it is.\nHence, we see that:\n\\begin{equation} \\langle v, v_{j} \\rangle = c_{j} \\mid v_{j}\\mid^{2} \\end{equation}\nWhich gives:\n\\begin{equation} c_{j} = \\frac{\\langle v,v_{j} \\rangle}{\\mid v_{j}\\mid^{2}} \\end{equation}\nas desired.\n","html":"\u003cp\u003eFor vector \\(v\\) in the \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e of \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e basis \\(v_1, ..v_{n}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = c_1 v_1 + \\dots + c_{n} v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_{j} = \\frac{v \\cdot v_{j}}{ v_{j} \\cdot v_{j}}\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle v, v_{j} \\rangle = c_{n} \\langle v_{1}, v_{j} \\rangle \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is \\(0\\) for all cases that\u0026rsquo;s not \\(\\langle v_{j}, v_{j} \\rangle\\) as the \\(v\\) are orthogonal, and \\(\\mid v_{j} \\mid^{2}\\) for the case where it is.\u003c/p\u003e\n\u003cp\u003eHence, we see that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle v, v_{j} \\rangle = c_{j} \\mid v_{j}\\mid^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_{j} = \\frac{\\langle v,v_{j} \\rangle}{\\mid v_{j}\\mid^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas desired.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlearn_more/","tags":null,"title":"Fourier formula"},{"categories":null,"contents":"Fourier Series and how to find them.\nFor a function given at some interval of length \\(l\\), then the function can be written at:\n\\begin{equation} f(x) = \\sum_{k=1}^{\\infty} a_{k} \\sin \\qty( \\frac{k\\pi x}{l}) \\end{equation}\nor\n\\begin{equation} f(x) = \\sum_{k=1}^{\\infty} b_{k} \\cos \\qty( \\frac{k\\pi x}{l}) \\end{equation}\nRecall that because sin and cos are even and odd parts, the functions above force an even and oddness to your expansions. They will be particularly helpful for Dirichlet Conditions and Neumann Conditions.\nBut, in general, you can use a linear combination of the two by doubling the frequency over your interval \\(l\\):\n\\begin{equation} f(x) = a_0 + \\sum_{k=1}^{\\infty} \\qty( a_{k} \\cos(k \\omega x) + b_{k} \\sin(k \\omega x)) \\end{equation}\nwhere \\(\\omega = \\frac{2\\pi}{L}\\).\nStatement Suppose we have a function that satisfies:\nRecall that each \\(\\cos(\\dots)\\) and \\(\\sin (\u0026hellip;)\\) are orthogonal, we can then use the Fourier formula to figure the coefficients \\(a_{k}\\), \\(b_{k}\\).\nAside: why is \\(a_0\\) also orthogonal?\n\\begin{equation} a_0 = a_0 \\cos (0 \\omega x) = a_0 \\cdot 1 = a_0 \\end{equation}\nGeneral Fourier Decomposition Therefore, by the Fourier formula, we expect that:\n\\begin{equation} a_0 = \\frac{\\langle f, 1 \\rangle}{ \\langle 1,1 \\rangle} = \\frac{1}{L} \\int_{0}^{L} f(x) \\dd{x} \\end{equation}\n\\begin{equation} a_{k} = \\frac{\\langle f, \\cos (k\\omega x) \\rangle}{\\langle \\cos (k\\omega x), \\cos (k\\omega x) \\rangle} = \\frac{2}{L} \\int_{0}^{L} f(x) \\cos (k\\omega x) \\dd{x} \\end{equation}\n\\begin{equation} b_{k} = \\frac{\\langle f, \\sin (k\\omega x) \\rangle}{\\langle \\sin (k\\omega x), \\sin (k\\omega x) \\rangle} = \\frac{2}{L} \\int_{0}^{L} f(x) \\sin (k\\omega x) \\dd{x} \\end{equation}\nWhen computing this, recall that:\n\\begin{equation} \\omega = \\frac{2\\pi}{L} \\end{equation}\nwhere \\(L\\) the period of your \\(L\\) periodic function.\nOdd and Even Break If you have an even or odd \\(f(x)\\), we can refine the series even more into simply a sine or cosine only series.\nFor even \\(f(x)\\), we can write:\n\\begin{equation} a_{0} + \\sum_{k=1}^{\\infty} a_{k} \\cos (k\\omega x) \\end{equation}\nwhere:\n\\begin{equation} a_0 = \\frac{1}{L / 2} \\int_{0}^{\\frac{L}{2}} f(x) \\dd{x} \\end{equation}\n\\begin{equation} a_{k} = \\frac{2}{L / 2} \\int_{0}^{\\frac{L}{2}} f(x) \\cos (k \\omega x) \\dd{x} \\end{equation}\nWhereas for odd \\(f(x)\\), we write:\n\\begin{equation} \\sum_{k=1}^{\\infty} b_{k} \\sin (k\\omega x) \\end{equation}\n\\begin{equation} b_{k} = \\frac{2}{L / 2} \\int_{0}^{L / 2} f(x) \\sin (k\\omega x) \\dd{x} \\end{equation}\nover any function Suppose we have a function with two roots:\n\\begin{equation} f(0) = 0 = f(l) \\end{equation}\nthen, we can write it in terms of a Fourier Series by odd-extending the function to the negative direction (see \u0026ldquo;odd extensions below\u0026rdquo;).\nThis makes us be able to write \\(f\\) over \\([0,l]\\) as:\n\\begin{equation} f(x) = \\sum_{n=1}^{\\infty} b_{n} \\sin \\qty( \\frac{n\\pi}{l} x) \\end{equation}\nwhere:\n\\begin{equation} b_{n} = \\frac{2}{l} \\int_{0}^{l} f(x) \\sin \\qty( \\frac{n \\pi}{l} x) \\dd{x} \\end{equation}\nthis is just the \\(l\\) extension function above, but with small \\(l\\) as the function is odd to one side.\nHere\u0026rsquo;s the motivation:\nsin and cos are even and odd parts odd extensions\nImportant note: this function seems to vanish at endpoints \\(0\\) and \\(l\\), and not all functions do that.\nSo, instead, let\u0026rsquo;s consider the odd extension of \\(f\\):\n\\begin{equation} \\hat{f}(x) = f(x), x \\geq 0 \\end{equation}\nand\n\\begin{equation} \\hat{f}(x) = -f(-x), x \u0026lt; 0 \\end{equation}\nThere will therefore be a discontinuous jump at \\(0\\).\nUsing the \\(\\sin\\) function, which are odd, recall that Fourier Series Converges with \\(L\\) Periodic Function, so at \\(0\\) given Gibbs Phenomena, the jump will average the discontinouity down to \\(0\\) (given our extensions are odd).\neven extensions\ninstead, if you want to use \\(\\cos\\), you can make an even extension:\n\\begin{equation} \\hat{f}(x) = f(x), x \\geq 0 \\end{equation}\nand\n\\begin{equation} \\hat{f}(x) = f(-x), x \u0026lt; 0 \\end{equation}\nwhich shouldn\u0026rsquo;t be discontinuous at \\(0\\) at all.\nAdditional Informatino Fourier Series Converges with \\(L\\) Periodic Function Suppose \\(f(x)\\) is an \\(L\\) periodic function with at most jump discountinuty, and \\(f\u0026rsquo;\\) continuous everywhere. Then, the associated Fourier Series converges everywhere and coincides with \\(f\\) except for jump discontinuances, where the values are the average of limits from either side. This is called the Gibbs Phenomena\nBackground Fourier formula Consider some orthogonal basis \\(v_1, \u0026hellip; v_{n}\\), recall that if we have:\n\\begin{equation} v = c_1 v_1 + \\dots + c_{n} v_{n} \\end{equation}\nwe can write:\n\\begin{equation} c_{j} = \\frac{v \\cdot v_{j}}{ v_{j} \\cdot v_{j}} \\end{equation}\n(this is similar to Writing a vector as a linear combination of orthonormal basis, but recall the \\(v_{j}\\) are orthogonal and not orthonormal, so we have to divide by the square of the norm of \\(v\\). learn more)\ninner product of \\(L\\) periodic functions For \\(f,g : [0,L] \\to \\mathbb{R}\\), which are L-periodic, we write:\n\\begin{equation} \\langle f,g \\rangle := \\frac{1}{L} \\int_{0}^{L} f(x) g(x) \\dd{x} \\end{equation}\nproperties worth noting for continuous functions \\([0,L]\\) \\(g_1, g_2, h_1, h_2, g, h\\), the inner product rules hold, which gives\n\\(\\langle c_1 g_1 + c_2 g_2, h \\rangle = c_1 \\langle g_1, h \\rangle + c_2 \\langle g_2, h \\rangle\\) \\(\\langle g, c_1h_1 + c_2h_2 \\rangle = c_1 \\langle g, h_1 \\rangle + c_2 \\langle g, h_2 \\rangle\\) \\(\\langle h,g \\rangle = \\langle g,h \\rangle\\), as the functions are over the reals \\(\\langle g,g \\rangle\\) is zero only when \\(g\\) is zero \\(L\\) periodic sinusoids are orthogonal Recall that we have two basic L-periodic sinusoids:\n\\(\\sin \\qty(\\frac{2\\pi k }{L}x)\\) \\(\\cos \\qty(\\frac{2\\pi k }{L}x)\\) Let\u0026rsquo;s write:\n\\begin{equation} \\omega = \\frac{2\\pi}{L} \\end{equation}\nthen, for any distinct integer \\(k_1 \\neq k_2, k_1, k_2 \u0026gt; 0\\), we see that:\n\\begin{equation} \\int_{0}^{L} \\cos (k_1 \\omega x) \\cos (k_2 \\omega x) = \\int_{0}^{L} \\sin (k_1 \\omega x) \\sin (k_2 \\omega x) = 0 \\end{equation}\nand further for any \\(k\\):\n\\begin{equation} \\int_{0}^{L} \\cos (k_1 \\omega x) \\sin (k_2 \\omega x) = 0 \\end{equation}\nMeaning, every pair of \\(\\langle \\{\\sin, \\cos\\} (k_1 \\omega x), \\{\\sin, \\cos\\} (k_1 \\omega x) \\rangle\\) are orthogonal.\nFurther, for the same \\(k\\),\n\\begin{equation} \\langle \\cos (k\\omega x), \\cos (k \\omega x) \\rangle = \\langle \\sin (k\\omega x), \\sin (k \\omega x) \\rangle = \\frac{L}{2} \\end{equation}\nin partiular:\n\\begin{equation} \\int_{0}^{\\frac{L}{2}} \\sin (k \\omega x) \\sin (k \\omega x) = \\frac{L}{4} \\end{equation}\nif \\(k\\) are equal.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhfourier_series/\"\u003eFourier Series\u003c/a\u003e and how to find them.\u003c/p\u003e\n\u003cp\u003eFor a function given at some interval of length \\(l\\), then the function can be written at:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\sum_{k=1}^{\\infty} a_{k} \\sin \\qty( \\frac{k\\pi x}{l})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eor\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\sum_{k=1}^{\\infty} b_{k} \\cos \\qty( \\frac{k\\pi x}{l})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that because \u003ca href=\"#sin-and-cos-are-even-and-odd-parts\"\u003esin and cos are even and odd parts\u003c/a\u003e, the functions above force an even and oddness to your expansions. They will be particularly helpful for \u003ca href=\"/posts/kbhsu_math53_feb232024/#dirichlet-conditions\"\u003eDirichlet Conditions\u003c/a\u003e and \u003ca href=\"/posts/kbhsu_math53_feb232024/#neumann-conditions\"\u003eNeumann Conditions\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eBut, in general, you can use a linear combination of the two by doubling the frequency over your interval \\(l\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = a_0 + \\sum_{k=1}^{\\infty} \\qty( a_{k} \\cos(k \\omega x) + b_{k} \\sin(k \\omega x))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\omega = \\frac{2\\pi}{L}\\).\u003c/p\u003e\n\u003ch2 id=\"statement\"\u003eStatement\u003c/h2\u003e\n\u003cp\u003eSuppose we have a function that satisfies:\u003c/p\u003e\n\u003cp\u003eRecall that each \\(\\cos(\\dots)\\) and \\(\\sin (\u0026hellip;)\\) are \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e, we can then use the \u003ca href=\"/posts/kbhlearn_more/\"\u003eFourier formula\u003c/a\u003e to figure the coefficients \\(a_{k}\\), \\(b_{k}\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside: why is \\(a_0\\) also orthogonal?\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_0 = a_0 \\cos (0 \\omega x) = a_0 \\cdot 1 = a_0\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"general-fourier-decomposition\"\u003eGeneral Fourier Decomposition\u003c/h3\u003e\n\u003cp\u003eTherefore, by the \u003ca href=\"/posts/kbhlearn_more/\"\u003eFourier formula\u003c/a\u003e, we expect that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_0 = \\frac{\\langle f, 1 \\rangle}{ \\langle 1,1 \\rangle} = \\frac{1}{L} \\int_{0}^{L} f(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{k} = \\frac{\\langle f, \\cos (k\\omega x) \\rangle}{\\langle \\cos (k\\omega x), \\cos (k\\omega x) \\rangle} = \\frac{2}{L} \\int_{0}^{L} f(x) \\cos (k\\omega x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb_{k} = \\frac{\\langle f, \\sin (k\\omega x) \\rangle}{\\langle \\sin (k\\omega x), \\sin (k\\omega x) \\rangle} = \\frac{2}{L} \\int_{0}^{L} f(x) \\sin (k\\omega x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhen computing this, recall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\omega = \\frac{2\\pi}{L}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(L\\) the period of your \\(L\\) periodic function.\u003c/p\u003e\n\u003ch3 id=\"odd-and-even-break\"\u003eOdd and Even Break\u003c/h3\u003e\n\u003cp\u003eIf you have an even or odd \\(f(x)\\), we can refine the series even more into simply a sine or cosine only series.\u003c/p\u003e\n\u003cp\u003eFor even \\(f(x)\\), we can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{0} + \\sum_{k=1}^{\\infty} a_{k} \\cos (k\\omega x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_0 = \\frac{1}{L / 2} \\int_{0}^{\\frac{L}{2}} f(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{k} = \\frac{2}{L / 2} \\int_{0}^{\\frac{L}{2}} f(x) \\cos (k \\omega x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhereas for odd \\(f(x)\\), we write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{k=1}^{\\infty} b_{k} \\sin (k\\omega x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb_{k} = \\frac{2}{L / 2} \\int_{0}^{L / 2} f(x) \\sin (k\\omega x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"over-any-function\"\u003eover any function\u003c/h3\u003e\n\u003cp\u003eSuppose we have a function with two roots:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(0) = 0 = f(l)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethen, we can write it in terms of a \u003ca href=\"/posts/kbhfourier_series/\"\u003eFourier Series\u003c/a\u003e by odd-extending the function to the negative direction (see \u0026ldquo;odd extensions below\u0026rdquo;).\u003c/p\u003e\n\u003cp\u003eThis makes us be able to write \\(f\\) over \\([0,l]\\) as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\sum_{n=1}^{\\infty} b_{n} \\sin \\qty( \\frac{n\\pi}{l} x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb_{n} = \\frac{2}{l} \\int_{0}^{l} f(x) \\sin \\qty( \\frac{n \\pi}{l} x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is just the \\(l\\) extension function above, but with small \\(l\\) as the function is odd to one side.\u003c/p\u003e\n\u003cp\u003eHere\u0026rsquo;s the motivation:\u003c/p\u003e\n\u003ch4 id=\"sin-and-cos-are-even-and-odd-parts\"\u003esin and cos are even and odd parts\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eodd extensions\u003c/p\u003e\n\u003cp\u003eImportant note: this function seems to vanish at endpoints \\(0\\) and \\(l\\), and not all functions do that.\u003c/p\u003e\n\u003cp\u003eSo, instead, let\u0026rsquo;s consider the odd extension of \\(f\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(x) = f(x), x \\geq 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(x) = -f(-x), x \u0026lt; 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThere will therefore be a discontinuous jump at \\(0\\).\u003c/p\u003e\n\u003cp\u003eUsing the \\(\\sin\\) function, which are odd, recall that \u003ca href=\"#fourier-series-converges-with-l-periodic-function\"\u003eFourier Series Converges with \\(L\\) Periodic Function\u003c/a\u003e, so at \\(0\\) given \u003ca href=\"#fourier-series-converges-with-l-periodic-function\"\u003eGibbs Phenomena\u003c/a\u003e, the jump will average the discontinouity down to \\(0\\) (given our extensions are odd).\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eeven extensions\u003c/p\u003e\n\u003cp\u003einstead, if you want to use \\(\\cos\\), you can make an even extension:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(x) = f(x), x \\geq 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(x) = f(-x), x \u0026lt; 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich shouldn\u0026rsquo;t be discontinuous at \\(0\\) at all.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-informatino\"\u003eAdditional Informatino\u003c/h2\u003e\n\u003ch3 id=\"fourier-series-converges-with-l-periodic-function\"\u003eFourier Series Converges with \\(L\\) Periodic Function\u003c/h3\u003e\n\u003cp\u003eSuppose \\(f(x)\\) is an \\(L\\) periodic function with at most jump discountinuty, and \\(f\u0026rsquo;\\) continuous everywhere. Then, the associated \u003ca href=\"/posts/kbhfourier_series/\"\u003eFourier Series\u003c/a\u003e converges everywhere and coincides with \\(f\\) except for jump discontinuances, where the values are the average of limits from either side. This is called the \u003ca href=\"#fourier-series-converges-with-l-periodic-function\"\u003eGibbs Phenomena\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003ch3 id=\"fourier-formula--kbhlearn-more-dot-md\"\u003e\u003ca href=\"/posts/kbhlearn_more/\"\u003eFourier formula\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eConsider some \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e basis \\(v_1, \u0026hellip; v_{n}\\), recall that if we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = c_1 v_1 + \\dots + c_{n} v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_{j} = \\frac{v \\cdot v_{j}}{ v_{j} \\cdot v_{j}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(this is similar to \u003ca href=\"/posts/kbhorthonormal_basis/#writing-a-vector-as-a-linear-combination-of-orthonormal-basis\"\u003eWriting a vector as a linear combination of orthonormal basis\u003c/a\u003e, but recall the \\(v_{j}\\) are \u003cstrong\u003eorthogonal\u003c/strong\u003e and not \u003cstrong\u003eorthonormal\u003c/strong\u003e, so we have to divide by the square of the norm of \\(v\\). \u003ca href=\"/posts/kbhlearn_more/\"\u003elearn more\u003c/a\u003e)\u003c/p\u003e\n\u003ch3 id=\"inner-product-of-l-periodic-functions--kbhinner-product-dot-md\"\u003e\u003ca href=\"/posts/kbhinner_product/#inner-product-of-l-periodic-functions\"\u003einner product of \\(L\\) periodic functions\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eFor \\(f,g : [0,L] \\to \\mathbb{R}\\), which are \u003ca href=\"/posts/kbhsu_math53_feb252024/#l-periodicity\"\u003eL-periodic\u003c/a\u003e, we write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle f,g \\rangle := \\frac{1}{L} \\int_{0}^{L} f(x) g(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"properties-worth-noting\"\u003eproperties worth noting\u003c/h4\u003e\n\u003cp\u003efor continuous functions \\([0,L]\\) \\(g_1, g_2, h_1, h_2, g, h\\), the \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e rules hold, which gives\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\langle c_1 g_1 + c_2 g_2, h \\rangle = c_1 \\langle g_1, h \\rangle + c_2 \\langle g_2, h \\rangle\\)\u003c/li\u003e\n\u003cli\u003e\\(\\langle g, c_1h_1 + c_2h_2 \\rangle = c_1 \\langle g, h_1 \\rangle + c_2 \\langle g, h_2 \\rangle\\)\u003c/li\u003e\n\u003cli\u003e\\(\\langle h,g \\rangle = \\langle g,h \\rangle\\), as the functions are over the reals\u003c/li\u003e\n\u003cli\u003e\\(\\langle g,g \\rangle\\) is zero only when \\(g\\) is zero\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"l-periodic-sinusoids-are-orthogonal\"\u003e\\(L\\) periodic sinusoids are orthogonal\u003c/h4\u003e\n\u003cp\u003eRecall that we have two basic \u003ca href=\"/posts/kbhsu_math53_feb252024/#l-periodicity\"\u003eL-periodic\u003c/a\u003e sinusoids:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\sin \\qty(\\frac{2\\pi k }{L}x)\\)\u003c/li\u003e\n\u003cli\u003e\\(\\cos \\qty(\\frac{2\\pi k }{L}x)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eLet\u0026rsquo;s write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\omega = \\frac{2\\pi}{L}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethen, for any distinct integer \\(k_1 \\neq k_2, k_1, k_2 \u0026gt; 0\\), we see that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{0}^{L} \\cos (k_1 \\omega x) \\cos (k_2 \\omega x) = \\int_{0}^{L} \\sin (k_1 \\omega x) \\sin (k_2 \\omega x) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand further for any \\(k\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{0}^{L} \\cos (k_1 \\omega x) \\sin (k_2 \\omega x) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning, every pair of \\(\\langle \\{\\sin, \\cos\\} (k_1 \\omega x), \\{\\sin, \\cos\\} (k_1 \\omega x) \\rangle\\) are orthogonal.\u003c/p\u003e\n\u003cp\u003eFurther, for the same \\(k\\),\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle \\cos (k\\omega x), \\cos (k \\omega x) \\rangle = \\langle \\sin (k\\omega x), \\sin (k \\omega x) \\rangle = \\frac{L}{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ein partiular:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{0}^{\\frac{L}{2}} \\sin (k \\omega x) \\sin (k \\omega x) = \\frac{L}{4}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif \\(k\\) are equal.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfourier_series/","tags":null,"title":"Fourier Series"},{"categories":null,"contents":"requirements Consider a function that has no periodicity, but that:\n\\begin{equation} f(x), -\\infty \u0026lt; x \u0026lt; \\infty \\end{equation}\nAnd assume that:\n\\begin{equation} \\int_{\\infty}^{\\infty} |f(x)| \\dd{x}, \u0026lt; \\infty \\end{equation}\nimportant: look up! the integral of \\(f(x)\\) has to converge AND this means that the \\(f(x)\\) goes to \\(0\\) actually at boundaries.\n(meaning the function decays as you go towards the end)\ndefinition a Fourier transform is an invertible transformation:\n\\begin{equation} f(x) \\to \\hat{f}(\\lambda) \\end{equation}\nand\n\\begin{equation} \\hat{f}(\\lambda) \\to f(x) \\end{equation}\nWhere,\n\\begin{equation} \\hat{f}(\\lambda) = \\int_{-\\infty}^{\\infty } e^{-i\\lambda x} f(x) \\dd{x} \\end{equation}\n\\begin{equation} f(x) = \\frac{1}{2\\pi} \\int_{\\infty}^{-\\infty} e^{ix\\lambda} \\hat{f}(\\lambda) \\dd{\\lambda} \\end{equation}\nWe sometimes write:\n\\begin{equation} \\hat{f}(\\lambda) = \\mathcal{F}(f)(\\lambda) \\end{equation}\nwhere \\(\\mathcal{F}\\) is an invertible map that gives you the Fourier Series.\nadditional information Properties of \\(\\mathcal{F}\\) it\u0026rsquo;s a Linear Map: \\(\\mathcal{F}(c_1 f_1 + c_2 f_2) = c_1\\mathcal{F}(f_1) + c_2 \\mathcal{F}(f_2)\\) it\u0026rsquo;s recenter able: \\(\\mathcal{F}(f(x+c)) = e^{i c \\lambda}\\mathcal{F}(f)(\\lambda)\\) it\u0026rsquo;s reverse-shift-able: \\(\\mathcal{F}\\qty(e^{i \\lambda_{0} x} f(x)) = \\mathcal{F}(f) (\\lambda -\\lambda_{0})\\) Proof:\nbecause integrals are linear \\(\\int_{-\\infty}^{\\lambda} e^{-i(t-c)\\lambda}f(t) \\dd{t} = e^{ic\\lambda} \\mathcal{F}(f)(\\lambda)\\), where we define \\(t = x+c\\) try it Derivative of Fourier Transform Suppose we want:\n\\begin{equation} \\mathcal{F}(f\u0026rsquo;(x)) = \\int_{\\infty}^{\\infty} e^{-ix\\lambda} f\u0026rsquo;(x) \\dd{x} = \\left e^{-ix\\lambda} f(x)\\right|_{-\\infty}^{\\infty} + i \\lambda \\int_{-\\infty}^{\\infty} e^{-ix\\lambda} f(x) \\dd{x} = i \\lambda \\mathcal{F}(f) (\\lambda) \\end{equation}\nBecause we are guaranteed \\(f(x)\\) evaluated at infinity is \\(0\\), the first term drops out. The important conclusion here: *Fourier transforms change a derivative into ALGEBRA of multiplying by \\(i\\lambda\\).\nConsider also:\n\\begin{equation} \\mathcal{F}(x f(x)) = i \\dv \\lambda \\mathcal{F}(f)(\\lambda) \\end{equation}\nyou can show this in a similar way, by attempting to distribute a \\(\\dv \\lambda\\) into the Fourier transform and showing that they are equal.\nFourier Transform of a Gaussian \\begin{equation} \\mathcal{F}\\qty(e^{-\\frac{ax^{2}}{2}}) = \\sqrt{\\frac{2\\pi}{a}}e^{-\\frac{\\lambda^{2}}{2a}} \\end{equation}\nand:\n\\begin{equation} \\mathcal{F}^{-1}\\qty(e^{-a\\frac{\\lambda^{2}}{2}}) = \\frac{e^{-\\frac{x^{2}}{2a}}}{\\sqrt{2\\pi a}} \\end{equation}\nwe obtain this:\n\\begin{equation} u = e^{-\\frac{x^{2}}{2}} \\end{equation}\nand:\n\\begin{equation} \\dv{u}{x} = -xe^{-\\frac{x^{2}}{2}} = -xu \\end{equation}\nand if we took a Fourier transform on both sides, we obtain:\n\\begin{equation} \\mathcal{F}\\qty(\\dv{u}{x} + xu) = 0 = i \\lambda \\hat{u} + i \\pdv{\\hat{u}} = 0 \\end{equation}\nand note that this is the same equation. Meaning:\n\\begin{equation} \\mathcal{F}\\qty(\\dv{u}{x} + xu) = \\dv{\\lambda}{x} + \\lambda u \\end{equation}\nthis gives:\n\\begin{equation} \\mathcal{F}(u) = Cu \\end{equation}\nwhich is what we see.\nLook! A table where:\n\\begin{equation} \\Lambda_{a} \\end{equation}\nis the triangle between \\([-a, a]\\), that goes up to \\(1\\).\ninterpreting \\(\\lambda\\) if \\(f(x)\\) is a function in time \\(\\lambda\\) could be thought of analogous to frequency\nif \\(f(x)\\) is a function of space \\(\\lambda\\) could be thought of analogous to momentum\nFourier transform of step function if you have a function:\n\\begin{equation} f(x) = \\begin{cases} 1, |x| \u0026lt; a \\\\ 0 \\end{cases} \\end{equation}\nits Fourier transform is sinc function:\n\\begin{equation} \\hat{f}(\\lambda) = \\frac{i \\sin (a \\lambda)}{\\lambda} \\end{equation}\nintuitive understandings the formula sines stuck between Consider what:\n\\begin{equation} f(x) \\cos (x) \\end{equation}\nlooks like.\nEffectively, you are stenching the \\(\\cos(x)\\) between \\(f(x)\\) and its reflection across \\(x\\). As you integrate, the majority of the up and downs cancel out, and the only thing you are left is the bits where \\(f(x)\\) peak up!\nas you increase \\(k\\):\n\\begin{equation} f(x) \\cos (kx) \\end{equation}\nyou obtain more cancellations and it will eventually integrate to \\(0\\).\nFourier transform properties As a function gets smoother, its Fourier transform is more concentrated at one point (closer to a single frequency).\nConversely, as a function gets more jagged, its Fourier transform is smoother (closer to a composition of sinusoids).\nFourier Transform as Quantization Consider:\nthe big fun idea\u0026mdash;-we can transform:\n\\(L\\) periodic function on \\(f(x)\\) \\(\\left\\{ (c_{n})^{\\infty}_{-\\infty} \\right\\}\\) This series exists for all function, converges exceedingly quickly, and has great properties. It should look like the form:\n\\begin{equation} f(x) = \\sum_{-\\infty}^{\\infty} c_{n} e^{n \\omega x i} \\end{equation}\nFourier norm of a function After you do this, and obtain\n\\(\\left\\{ (c_{n})^{\\infty}_{-\\infty} \\right\\}\\)\nwe call the \u0026ldquo;size\u0026rdquo; of this function:\n\\begin{equation} \\sum_{-\\infty}^{\\infty} | c_{n}|^{2} \\end{equation}\nPlanchrel\u0026rsquo;s Formula For a usual \\(L\\) periodic function, size agrees:\n\\begin{equation} \\langle f,f \\rangle= \\int_{0}^{L} |f(x)|^{2} \\dd{x} = L\\sum_{-\\infty}^{\\infty} | c_{n}|^{2} \\end{equation}\nYou can show this by plugging in the Complex Fourier Series in \\(f\\).\nmotivation Consider a function period of period \\(L\\)\n\\begin{equation} a_{k} = \\int_{0}^{L} F(x) e^{-i\\omega kx} \\dd{x} \\end{equation}\nwhere:\n\\begin{equation} f(x) = ?\\sum_{k} a_{n} e^{i \\omega kx} \\end{equation}\nAnd the BIG PICTURE: if we took the period \\(L \\to \\infty\\), we end up with the Fourier Transform.\n","html":"\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eConsider a function that has no periodicity, but that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x), -\\infty \u0026lt; x \u0026lt; \\infty\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd assume that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{\\infty}^{\\infty} |f(x)| \\dd{x}, \u0026lt; \\infty\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eimportant: \u003cstrong\u003e\u003cstrong\u003elook up! the integral of \\(f(x)\\) has to converge AND this means that the \\(f(x)\\) goes to \\(0\\) actually at boundaries\u003c/strong\u003e\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003e(meaning the function decays as you go towards the end)\u003c/p\u003e\n\u003ch2 id=\"definition\"\u003edefinition\u003c/h2\u003e\n\u003cp\u003ea Fourier transform is an invertible transformation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) \\to \\hat{f}(\\lambda)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) \\to f(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhere,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) = \\int_{-\\infty}^{\\infty } e^{-i\\lambda x} f(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\frac{1}{2\\pi} \\int_{\\infty}^{-\\infty} e^{ix\\lambda} \\hat{f}(\\lambda) \\dd{\\lambda}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe sometimes write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) = \\mathcal{F}(f)(\\lambda)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\mathcal{F}\\) is an invertible map that gives you the \u003ca href=\"/posts/kbhfourier_series/\"\u003eFourier Series\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"properties-of-mathcal-f\"\u003eProperties of \\(\\mathcal{F}\\)\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eit\u0026rsquo;s a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e: \\(\\mathcal{F}(c_1 f_1 + c_2 f_2) = c_1\\mathcal{F}(f_1) + c_2 \\mathcal{F}(f_2)\\)\u003c/li\u003e\n\u003cli\u003eit\u0026rsquo;s recenter able: \\(\\mathcal{F}(f(x+c)) = e^{i c \\lambda}\\mathcal{F}(f)(\\lambda)\\)\u003c/li\u003e\n\u003cli\u003eit\u0026rsquo;s reverse-shift-able: \\(\\mathcal{F}\\qty(e^{i \\lambda_{0} x} f(x)) = \\mathcal{F}(f) (\\lambda -\\lambda_{0})\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ebecause integrals are linear\u003c/li\u003e\n\u003cli\u003e\\(\\int_{-\\infty}^{\\lambda} e^{-i(t-c)\\lambda}f(t) \\dd{t} = e^{ic\\lambda} \\mathcal{F}(f)(\\lambda)\\), where we define \\(t = x+c\\)\u003c/li\u003e\n\u003cli\u003etry it\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"derivative-of-fourier-transform\"\u003eDerivative of Fourier Transform\u003c/h3\u003e\n\u003cp\u003eSuppose we want:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}(f\u0026rsquo;(x)) = \\int_{\\infty}^{\\infty} e^{-ix\\lambda} f\u0026rsquo;(x) \\dd{x} = \\left e^{-ix\\lambda} f(x)\\right|_{-\\infty}^{\\infty} + i \\lambda \\int_{-\\infty}^{\\infty} e^{-ix\\lambda} f(x) \\dd{x} = i \\lambda \\mathcal{F}(f) (\\lambda)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBecause we are guaranteed \\(f(x)\\) evaluated at infinity is \\(0\\), the first term drops out. The important conclusion here: \u003cstrong\u003e*Fourier transforms change a derivative into ALGEBRA of multiplying by \\(i\\lambda\\)\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eConsider also:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}(x f(x)) = i \\dv \\lambda \\mathcal{F}(f)(\\lambda)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou can show this in a similar way, by attempting to distribute a \\(\\dv \\lambda\\) into the Fourier transform and showing that they are equal.\u003c/p\u003e\n\u003ch3 id=\"fourier-transform-of-a-gaussian\"\u003eFourier Transform of a Gaussian\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}\\qty(e^{-\\frac{ax^{2}}{2}}) = \\sqrt{\\frac{2\\pi}{a}}e^{-\\frac{\\lambda^{2}}{2a}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}^{-1}\\qty(e^{-a\\frac{\\lambda^{2}}{2}}) = \\frac{e^{-\\frac{x^{2}}{2a}}}{\\sqrt{2\\pi a}}\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003ewe obtain this:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu = e^{-\\frac{x^{2}}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{u}{x} = -xe^{-\\frac{x^{2}}{2}} = -xu\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand if we took a Fourier transform on both sides, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}\\qty(\\dv{u}{x} + xu) = 0 = i \\lambda \\hat{u} + i \\pdv{\\hat{u}} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand note that this is the same equation. Meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}\\qty(\\dv{u}{x} + xu) = \\dv{\\lambda}{x} + \\lambda u\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}(u) = Cu\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is what we see.\u003c/p\u003e\n\u003ch3 id=\"look-a-table\"\u003eLook! A table\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-06_21-36-14_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Lambda_{a}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis the triangle between \\([-a, a]\\), that goes up to \\(1\\).\u003c/p\u003e\n\u003ch3 id=\"interpreting-lambda\"\u003einterpreting \\(\\lambda\\)\u003c/h3\u003e\n\u003ch4 id=\"if-f--x--is-a-function-in-time\"\u003eif \\(f(x)\\) is a function in time\u003c/h4\u003e\n\u003cp\u003e\\(\\lambda\\) could be thought of analogous to frequency\u003c/p\u003e\n\u003ch4 id=\"if-f--x--is-a-function-of-space\"\u003eif \\(f(x)\\) is a function of space\u003c/h4\u003e\n\u003cp\u003e\\(\\lambda\\) could be thought of analogous to momentum\u003c/p\u003e\n\u003ch3 id=\"fourier-transform-of-step-function\"\u003eFourier transform of step function\u003c/h3\u003e\n\u003cp\u003eif you have a function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) =\n\\begin{cases}\n1, |x| \u0026lt; a \\\\\n0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eits Fourier transform is \u003ca href=\"\"\u003esinc function\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) = \\frac{i \\sin (a \\lambda)}{\\lambda}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"intuitive-understandings-the-formula\"\u003eintuitive understandings the formula\u003c/h3\u003e\n\u003ch4 id=\"sines-stuck-between\"\u003esines stuck between\u003c/h4\u003e\n\u003cp\u003eConsider what:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) \\cos (x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003elooks like.\u003c/p\u003e\n\u003cp\u003eEffectively, you are stenching the \\(\\cos(x)\\) between \\(f(x)\\) and its reflection across \\(x\\). As you integrate, the majority of the up and downs cancel out, and the only thing you are left is the bits where \\(f(x)\\) peak up!\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-06_10-48-46_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eas you increase \\(k\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) \\cos (kx)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou obtain more cancellations and it will eventually integrate to \\(0\\).\u003c/p\u003e\n\u003ch4 id=\"fourier-transform-properties\"\u003eFourier transform properties\u003c/h4\u003e\n\u003cp\u003eAs a function gets smoother, its Fourier transform is more concentrated at one point (closer to a single frequency).\u003c/p\u003e\n\u003cp\u003eConversely, as a function gets more jagged, its Fourier transform is smoother (closer to a composition of sinusoids).\u003c/p\u003e\n\u003ch3 id=\"fourier-transform-as-quantization\"\u003eFourier Transform as Quantization\u003c/h3\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003ethe big fun idea\u0026mdash;-we can transform:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(L\\) periodic function on \\(f(x)\\)\u003c/li\u003e\n\u003cli\u003e\\(\\left\\{ (c_{n})^{\\infty}_{-\\infty} \\right\\}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThis series exists for all function, converges exceedingly quickly, and has great properties. It should look like the form:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\sum_{-\\infty}^{\\infty} c_{n} e^{n \\omega x i}\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"fourier-norm-of-a-function\"\u003eFourier norm of a function\u003c/h4\u003e\n\u003cp\u003eAfter you do this, and obtain\u003c/p\u003e\n\u003cp\u003e\\(\\left\\{ (c_{n})^{\\infty}_{-\\infty} \\right\\}\\)\u003c/p\u003e\n\u003cp\u003ewe call the \u0026ldquo;size\u0026rdquo; of this function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{-\\infty}^{\\infty} | c_{n}|^{2}\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"planchrel-s-formula\"\u003ePlanchrel\u0026rsquo;s Formula\u003c/h4\u003e\n\u003cp\u003eFor a usual \\(L\\) periodic function, size agrees:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle f,f \\rangle= \\int_{0}^{L} |f(x)|^{2} \\dd{x} = L\\sum_{-\\infty}^{\\infty} | c_{n}|^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou can show this by plugging in the \u003ca href=\"/posts/kbhsu_math53_mar042024/#complex-fourier-series\"\u003eComplex Fourier Series\u003c/a\u003e in \\(f\\).\u003c/p\u003e\n\u003ch3 id=\"motivation\"\u003emotivation\u003c/h3\u003e\n\u003cp\u003eConsider a function period of period \\(L\\)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{k} = \\int_{0}^{L} F(x) e^{-i\\omega kx} \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = ?\\sum_{k} a_{n} e^{i \\omega kx}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd the BIG PICTURE: if we took the period \\(L \\to \\infty\\), we end up with the \u003ca href=\"/posts/kbhfourier_transform/\"\u003eFourier Transform\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfourier_transform/","tags":null,"title":"Fourier Transform"},{"categories":null,"contents":"FDR is an American president.\nFDR and Teddy Roosevelt is Got Polio, which played in his favor =\u0026gt; press agree to not photograph him when he was in a wheelchair Created the New Deal Models himself after his cousin Teddy Roosevelt, and believed that charisma and moral leadership work. \u0026ldquo;Above all, try something\u0026hellip; let the court shoot it if need to.\u0026rdquo;\nHe was able to gain single party control, wh.\nCreated Fireside Chats.\nHis wife, Eleanor Roosevelt, was very controversial.\nlegacy of FDR Never spent enough to end the depression Expanded government regulation, government size, and social welfare Modernization of presidency: sets agenda, initiates legislation Realigned the democratic party (created the progressive democrats) Maintained democracy \u0026lt;=== compared to Authoritarianism ","html":"\u003cp\u003eFDR is an American president.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eFDR and \u003ca href=\"/posts/kbhteddy_roosevelt/\"\u003eTeddy Roosevelt\u003c/a\u003e is\u003c/li\u003e\n\u003cli\u003eGot \u003ca href=\"/posts/kbhpolio/\"\u003ePolio\u003c/a\u003e, which played in his favor =\u0026gt; press agree to not photograph him when he was in a wheelchair\u003c/li\u003e\n\u003cli\u003eCreated the \u003ca href=\"/posts/kbhnew_deal/\"\u003eNew Deal\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eModels himself after his cousin \u003ca href=\"/posts/kbhteddy_roosevelt/\"\u003eTeddy Roosevelt\u003c/a\u003e, and believed that charisma and moral leadership work. \u0026ldquo;Above all, try something\u0026hellip; let the court shoot it if need to.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eHe was able to gain \u003ca href=\"/posts/kbhsingle_party_control/\"\u003esingle party control\u003c/a\u003e, wh.\u003c/p\u003e\n\u003cp\u003eCreated \u003ca href=\"/posts/kbhfireside_chats/\"\u003eFireside Chats.\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eHis wife, \u003ca href=\"/posts/kbheleanor_roosevelt/\"\u003eEleanor Roosevelt\u003c/a\u003e, was very controversial.\u003c/p\u003e\n\u003ch2 id=\"legacy-of-fdr\"\u003elegacy of FDR\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eNever spent enough to end the depression\u003c/li\u003e\n\u003cli\u003eExpanded government regulation, government size, and social welfare\u003c/li\u003e\n\u003cli\u003eModernization of presidency: sets agenda, initiates legislation\u003c/li\u003e\n\u003cli\u003eRealigned the democratic party (created the progressive democrats)\u003c/li\u003e\n\u003cli\u003eMaintained democracy \u0026lt;=== compared to \u003ca href=\"/posts/kbhauthoritarianism/\"\u003eAuthoritarianism\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfdr/","tags":null,"title":"Franklin D. Roosevelt (FDR)"},{"categories":null,"contents":"Saltwater economists are economists from coastal schools that are mostly classical Keynsians\nFreshwater economists are economists who are mostly Neoclassical Economists\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhfreshwater_economists/\"\u003eSaltwater economists\u003c/a\u003e are economists from coastal schools that are mostly classical \u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynsians\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhfreshwater_economists/\"\u003eFreshwater economists\u003c/a\u003e are economists who are mostly \u003ca href=\"/posts/kbhneoclassical_economics/\"\u003eNeoclassical Economists\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfreshwater_economists/","tags":null,"title":"Freshwater economists"},{"categories":null,"contents":"function pointers typedef bool (*should_swap) (int, int); all function\u0026rsquo;s names are pointers to first address of the function\u0026rsquo;s machine code in memory.\nWhen writing a generic function, if we don\u0026rsquo;t know about the behavior of something (such as comparison), etc., we have to rely on the client to specify the information in terms of a function.\nfunction writer: writes algorithmic function, relies on caller data function caller: knows data, and doesn\u0026rsquo;t know how algorithm knows ","html":"\u003ch2 id=\"function-pointers\"\u003efunction pointers\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-c\" data-lang=\"c\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003etypedef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ebool\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshould_swap\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eall \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e\u0026rsquo;s names are \u003cstrong\u003epointers\u003c/strong\u003e to first address of the function\u0026rsquo;s machine code in memory.\u003c/p\u003e\n\u003cp\u003eWhen writing a \u003ca href=\"/posts/kbhgeneric/\"\u003egeneric\u003c/a\u003e function, if we don\u0026rsquo;t know about the behavior of something (such as comparison), etc., we have to rely on the client to specify the information in terms of a function.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003efunction writer\u003c/strong\u003e: writes algorithmic function, relies on caller data\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003efunction caller\u003c/strong\u003e: knows data, and doesn\u0026rsquo;t know how algorithm knows\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfunction/","tags":null,"title":"function"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhfunctor/","tags":null,"title":"functor"},{"categories":null,"contents":"If your style is not math driven, fundamental investing are the strategies you can use.\nBTW: the game of investing in the stock market has gotten harder because we have too much data with the historical data. (Efficient) markets tend to eliminate opportunities to make a profit.\nLooking at 300 banks would be a good idea.\nValue Investing Value Investing is a fundamental investing strategy to value each company by going through the accounting/books of the company and buying companies that are theoretically undervalued.\nDistressed Investing Distressed Investing is the extreme version of Value Investing. Buy something that has a bad asset, a bad structure, and a bad holders: buy things when \u0026ldquo;what do they care what price they sell at\u0026rdquo;, which means you may be able to buy it at less than its worth.\nGrowth Investing Growth Investing is a fundamental investing strategy to bet in the future growth of a company given its performance and technology: Tesla, Teledoc, etc.\nQuality Investing Quality Investing is a fundamental investing strategy to buy stocks even despite high prices that has the most dependable market share: Coke, P\u0026amp;G, etc.\nSecond-Level Thinking A meta-level way of looking at decisions: \u0026ldquo;how many people know what I know.\u0026rdquo; Your strategy has to be both 1) DIFFERENT and 2) BETTER than what other people are doing.\n\u0026ldquo;The correctness of a decision cannot be judged by the outcome.\u0026rdquo;\n","html":"\u003cp\u003eIf your style is not math driven, \u003ca href=\"/posts/kbhfundimental_investing/\"\u003efundamental investing\u003c/a\u003e are the strategies you can use.\u003c/p\u003e\n\u003cp\u003eBTW: the game of investing in the \u003ca href=\"/posts/kbhfinancial_markets_intro/\"\u003estock market\u003c/a\u003e has gotten harder because we have too much data with the historical data. (Efficient) markets tend to eliminate opportunities to make a profit.\u003c/p\u003e\n\u003cp\u003eLooking at 300 banks would be a good idea.\u003c/p\u003e\n\u003ch2 id=\"value-investing\"\u003eValue Investing\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#value-investing\"\u003eValue Investing\u003c/a\u003e is a \u003ca href=\"/posts/kbhfundimental_investing/\"\u003efundamental investing\u003c/a\u003e strategy to value each company by going through the accounting/books of the company and buying companies that are theoretically undervalued.\u003c/p\u003e\n\u003ch3 id=\"distressed-investing\"\u003eDistressed Investing\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#distressed-investing\"\u003eDistressed Investing\u003c/a\u003e is the extreme version of \u003ca href=\"#value-investing\"\u003eValue Investing\u003c/a\u003e. Buy something that has a bad asset, a bad structure, and a bad holders: buy things when \u0026ldquo;what do they care what price they sell at\u0026rdquo;, which means you may be able to buy it at less than its worth.\u003c/p\u003e\n\u003ch2 id=\"growth-investing\"\u003eGrowth Investing\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#growth-investing\"\u003eGrowth Investing\u003c/a\u003e is a \u003ca href=\"/posts/kbhfundimental_investing/\"\u003efundamental investing\u003c/a\u003e strategy to bet in the future growth of a company given its performance and technology: Tesla, Teledoc, etc.\u003c/p\u003e\n\u003ch2 id=\"quality-investing\"\u003eQuality Investing\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#quality-investing\"\u003eQuality Investing\u003c/a\u003e is a \u003ca href=\"/posts/kbhfundimental_investing/\"\u003efundamental investing\u003c/a\u003e strategy to buy stocks even despite high prices that has the most dependable market share: Coke, P\u0026amp;G, etc.\u003c/p\u003e\n\u003ch2 id=\"second-level-thinking\"\u003eSecond-Level Thinking\u003c/h2\u003e\n\u003cp\u003eA meta-level way of looking at decisions: \u0026ldquo;how many people know what I know.\u0026rdquo; Your strategy has to be both 1) DIFFERENT and 2) BETTER than what other people are doing.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;The correctness of a decision cannot be judged by the outcome.\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfundimental_investing/","tags":null,"title":"fundamental investing"},{"categories":null,"contents":"factorization motivator If \\(p\\) is prime and \\(p | ab\\), then \\(p|a\\) or \\(p|b\\).\nIf \\(p|a\\), we are done.\nConsider the case where \\(p|ab\\) yet \\(a\\) is not divisible by \\(p\\). Then, \\(a\\) and \\(p\\) are coprime. This means that, we have:\n\\begin{equation} \\gcd (a,p) = 1 = s a + tp \\end{equation}\nWe note that:\n\\begin{align} b \u0026amp;= 1 \\cdot b \\\\ \u0026amp;= (sa+tp) b \\\\ \u0026amp;= sab + tpb \\\\ \u0026amp;= s(ab) + tb(p) \\end{align}\nNotice that both of these elements are divisible by \\(p\\) (\\(p|ab\\) and of course \\(p|p\\)). Therefore, \\(p|b\\) as desired.\nstatement of the theorem Every integer greater than \\(1\\) is a prime or a product of primes. This factorization is unique.\nProof Existence Let \\(S\\) be the list of integers bigger than \\(1\\) which are prime or are products of primes. Consider the set \\(T\\) which is all integers bigger than \\(1\\) which isn\u0026rsquo;t prime or are products of primes:\n\\begin{equation} T = \\{2, 3, \\dots, \\} \\setminus S \\end{equation}\nWe desire \\(T\\) to be empty.\nAssume for the sake of contradiction that \\(T\\) isn\u0026rsquo;t empty. By WOP, take some smallest element of \\(t \\in T\\).\n\\(t\\) is not in \\(S\\), so it mustn\u0026rsquo;t be prime. This means:\n\\begin{equation} t = ab \\end{equation}\nThough\u0026hellip;. \\(a\\) and \\(b\\) must be smaller than \\(t\\) (otherwise their product wouldn\u0026rsquo;t make \\(t\\), as we are working with only positive numbers (integers greater than \\(1\\)) here). So \\(a\\) and \\(b\\) must be in $S$\u0026mdash;meaning they are primes or product of primes. This makes \\(t\\) a prime or product of primes, reaching contradiction.\nUniqueness We show this by induction. We see that: \\(2 = 2\\). Now, suppose a unique prime factorization holds for all integers smaller than \\(n\\). Let:\n\\begin{equation} n = p_1 \\dots p_{r} = q_1 \\dots q_{s} \\end{equation}\nLet us order it such that \\(p_1 \\leq \u0026hellip; \\leq p_{r}\\), \\(q_1 \\leq \u0026hellip; \\leq q_{s}\\).\nBy the factorization motivator, each \\(p_{j}|n\\) implies that \\(p_{j}|q_{i}\\) (you can see this by treating \\(n = q_1 \u0026hellip; q_{s}\\), so \\(p_{j}|n \\implies p_{j}|(q_1 \\cdot \\dots \\cdot q_{s})\\) so \\(p_{j}\\) should be divisible by some \\(q_{j}\\).)\nNow, this condition implies \\(p_{j} = q_{i}\\), because primes are not divisible by anything except themselves and \\(1\\) (and \\(1\\) is not considered prime).\nConsider, then, two such equivalences:\n\\begin{equation} p_{1} = q_{j} \\end{equation}\n\\begin{equation} q_{1} = p_{k} \\end{equation}\nNow, this means that:\n\\begin{equation} p_{1} \\leq p_{k} = q_{1} \\leq q_{j} = p_{1} \\end{equation}\nTherefore, the only way this can work (the fact that \\(q_1\\) is sandwiched on both ends \u0026mdash; by \\(p_1\\leq q_1 \\leq p_1\\)) is that \\(p_1 = q_1\\).\nTherefore, we now have:\n\\begin{equation} \\frac{n}{p_1} = p_{2} \\cdot \\dots \\cdot p_{n} = q_{2} \\cdot \\dots \\cdot q_{n} \\end{equation}\nYou will note \\(\\frac{n}{p_1} \u0026lt; n\\). Now, we can invoke induction. \\(\\blacksquare\\)\n","html":"\u003ch2 id=\"factorization-motivator\"\u003efactorization motivator\u003c/h2\u003e\n\u003cp\u003eIf \\(p\\) is \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e and \\(p | ab\\), then \\(p|a\\) or \\(p|b\\).\u003c/p\u003e\n\u003cp\u003eIf \\(p|a\\), we are done.\u003c/p\u003e\n\u003cp\u003eConsider the case where \\(p|ab\\) yet \\(a\\) is not divisible by \\(p\\). Then, \\(a\\) and \\(p\\) are \u003ca href=\"/posts/kbhprime/#coprime\"\u003ecoprime\u003c/a\u003e. This means that, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\gcd (a,p) = 1 = s a + tp\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe note that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nb \u0026amp;= 1 \\cdot b \\\\\n\u0026amp;= (sa+tp) b \\\\\n\u0026amp;= sab + tpb \\\\\n\u0026amp;= s(ab) + tb(p)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNotice that both of these elements are \u003ca href=\"/posts/kbhdivide/\"\u003edivisible\u003c/a\u003e by \\(p\\) (\\(p|ab\\) and of course \\(p|p\\)). Therefore, \\(p|b\\) as desired.\u003c/p\u003e\n\u003ch2 id=\"statement-of-the-theorem\"\u003estatement of the theorem\u003c/h2\u003e\n\u003cp\u003eEvery integer greater than \\(1\\) is a \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e or a product of primes. This factorization is unique.\u003c/p\u003e\n\u003ch2 id=\"proof\"\u003eProof\u003c/h2\u003e\n\u003ch3 id=\"existence\"\u003eExistence\u003c/h3\u003e\n\u003cp\u003eLet \\(S\\) be the list of \u003ca href=\"/posts/kbhinteger/\"\u003einteger\u003c/a\u003es bigger than \\(1\\) which are \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e or are products of primes. Consider the set \\(T\\) which is all \u003ca href=\"/posts/kbhinteger/\"\u003einteger\u003c/a\u003es bigger than \\(1\\) which isn\u0026rsquo;t \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e or are products of primes:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT = \\{2, 3, \\dots, \\} \\setminus S\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe desire \\(T\\) to be empty.\u003c/p\u003e\n\u003cp\u003eAssume for the sake of contradiction that \\(T\\) isn\u0026rsquo;t empty. By \u003ca href=\"/posts/kbhprinciple_of_induction/#well-ordering-principle\"\u003eWOP\u003c/a\u003e, take some smallest element of \\(t \\in T\\).\u003c/p\u003e\n\u003cp\u003e\\(t\\) is not in \\(S\\), so it mustn\u0026rsquo;t be \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e. This means:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nt = ab\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThough\u0026hellip;. \\(a\\) and \\(b\\) must be smaller than \\(t\\) (otherwise their product wouldn\u0026rsquo;t make \\(t\\), as we are working with only positive numbers (\u003ca href=\"/posts/kbhinteger/\"\u003einteger\u003c/a\u003es greater than \\(1\\)) here). So \\(a\\) and \\(b\\) must be in $S$\u0026mdash;meaning they are \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003es or product of primes. This makes \\(t\\) a prime or product of primes, reaching contradiction.\u003c/p\u003e\n\u003ch3 id=\"uniqueness\"\u003eUniqueness\u003c/h3\u003e\n\u003cp\u003eWe show this by induction. We see that: \\(2 = 2\\). Now, suppose a unique prime factorization holds for all integers smaller than \\(n\\). Let:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nn = p_1 \\dots p_{r} = q_1 \\dots q_{s}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet us order it such that \\(p_1 \\leq \u0026hellip; \\leq p_{r}\\), \\(q_1 \\leq \u0026hellip; \\leq q_{s}\\).\u003c/p\u003e\n\u003cp\u003eBy the \u003ca href=\"#factorization-motivator\"\u003efactorization motivator\u003c/a\u003e, each \\(p_{j}|n\\) implies that \\(p_{j}|q_{i}\\) (you can see this by treating \\(n = q_1 \u0026hellip; q_{s}\\), so \\(p_{j}|n \\implies p_{j}|(q_1 \\cdot \\dots \\cdot q_{s})\\) so \\(p_{j}\\) should be divisible by some \\(q_{j}\\).)\u003c/p\u003e\n\u003cp\u003eNow, this condition implies \\(p_{j} = q_{i}\\), because primes are not divisible by anything except themselves and \\(1\\) (and \\(1\\) is not considered \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eConsider, then, two such equivalences:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np_{1} = q_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nq_{1} = p_{k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, this means that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np_{1} \\leq p_{k} = q_{1} \\leq q_{j} = p_{1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, the only way this can work (the fact that \\(q_1\\) is sandwiched on both ends \u0026mdash; by \\(p_1\\leq q_1 \\leq p_1\\)) is that \\(p_1 = q_1\\).\u003c/p\u003e\n\u003cp\u003eTherefore, we now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{n}{p_1} = p_{2} \\cdot \\dots \\cdot p_{n} = q_{2} \\cdot \\dots \\cdot q_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will note \\(\\frac{n}{p_1} \u0026lt; n\\). Now, we can invoke induction. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfundamental_theorem_of_arithmetic/","tags":null,"title":"fundamental theorem of arithmetic"},{"categories":null,"contents":"Lovely, well known result:\n\\begin{equation} \\dv x \\int_{a}^{x} f(t)\\dd{t} = f(x) \\end{equation}\nfor any fixed \\(a\\). This is because that\u0026rsquo;s functionally using \\(a\\) as a \\(+C\\) term.\n","html":"\u003cp\u003eLovely, well known result:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv x \\int_{a}^{x} f(t)\\dd{t} = f(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor any fixed \\(a\\). This is because that\u0026rsquo;s functionally using \\(a\\) as a \\(+C\\) term.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfundamental_theorem_of_calculus/","tags":null,"title":"Fundamental Theorem of Calculus"},{"categories":null,"contents":"The dimension of the null space plus the dimension of the range of a Linear Map equals the dimension of its domain.\nThis also implies that both the null space (but this one\u0026rsquo;s trivial b/c the null space is a subspace of the already finite-dimensional domain) and the range as well is finite-dimensional.\nconstituents \\(T \\in \\mathcal{L}( V,W )\\) finite-dimensional \\(V\\) (otherwise commenting on computing its dimension doesn\u0026rsquo;t make sense) requirements \\begin{equation} \\dim V = \\dim null\\ T + \\dim range\\ T \\end{equation}\nfor \\(T \\in \\mathcal{L}(V,W)\\)\nproof We desire that \\(\\dim V = \\dim null\\ T + \\dim range\\ T\\) for \\(T \\in \\mathcal{L}(V,W)\\).\nLet us construct a basis of the null space of \\(T\\), \\(u_1, \\dots u_{m}\\). This makes \\(\\dim null\\ T = m\\).\nWe can extend this list to a basis of \\(V\\), the domain, with some vectors \\(v_1, \\dots v_{n}\\). This makes the \\(\\dim V = m+n\\).\nWe now desire that \\(\\dim range\\ T = n\\). We show this by showing \\(Tv_{1}, \\dots Tv_{n}\\) is a basis of \\(range\\ T\\).\nRecall that \\(u_1, \\dots u_{m}, v_1, \\dots v_{n}\\) is a basis of \\(V\\) the domain of \\(T\\). This means that any element that can go into \\(T\\) takes the shape of:\n\\begin{equation} v = a_1u_1+ \\dots +a_{m}u_{m} + b_{1}v_1 + \\dots + b_{n}v_{n} \\end{equation}\nRecall also that the definition of the range of \\(T\\) is that:\n\\begin{equation} range\\ T = \\{Tv: v \\in V\\} \\end{equation}\nTherefore, every element of the range of \\(T\\) takes the shape of \\(Tv\\): meaning:\n\\begin{equation} Tv = a_1Tu_1+ \\dots +a_{m}Tu_{m} + b_{1}Tv_1 + \\dots + b_{n}Tv_{n} \\end{equation}\nby additivity and homogeneity of Linear Maps.\nNow, \\(Tu_{j}=0\\), because each \\(u_{j}\\) is a basis (and so definitely at least an element of) the null space of \\(T\\). This makes the above expression:\n\\begin{equation} Tv = 0 + b_{1}Tv_1 + \\dots + b_{n}Tv_{n} = b_{1}Tv_1 + \\dots + b_{n}Tv_{n} \\end{equation}\nOk. Given that all elements of the range can be constructed by a linear combination of \\(Tv_{1} \\dots Tv_{n}\\), we declare that the list spans the range of \\(T\\). Notably, as \\(V\\) is finite-dimensional and \\(v_1, \\dots v_{n}\\) is a sublist of its basis, \\(n \u0026lt; \\infty\\) and so the range of \\(T\\) is also finite-dimensional.\nTo finish showing \\(Tv_{1}, \\dots, Tv_{n}\\) to be a basis of \\(range\\ T\\), we have to show that its linearly independent.\nSuppose:\n\\begin{equation} c_1Tv_{1} + \\dots + c_{n}Tv_{n} = 0 \\end{equation}\nBy homogeneity and additivity, we have that:\n\\begin{equation} T(c_1v_{1} + \\dots + c_{n}v_{n}) = 0 \\end{equation}\nthis makes \\(c_1v_1 + \\dots\\) a member of the null space of \\(T\\). Recall that \\(u_1, \\dots u_{m}\\) were a basis thereof, this means that the linear combination of \\(v_{j}\\) can be written as a linear combination of \\(u_{j}\\):\n\\begin{equation} c_1 v_1 + \\dots + c_{n}v_{n} = d_1 u_{1} + \\dots + d_{m} u_{m} \\end{equation}\nOf course, the list \\(u_1, \\dots u_{m}, v_1, \\dots v_{n}\\) is linearly independent as it is a basis of \\(V\\). This makes \\(c_{j}=d_{j}=0\\) (to see this, move all the \\(d_{j}u_{j}\\) to the left and apply definition of linear independence).\nWe have therefore shown that, given\n\\begin{equation} c_1Tv_{1} + \\dots + c_{n}Tv_{n} = 0 \\end{equation}\n\\(c_1 = \\dots = c_{n} =0\\), satisfying the definition of linear independence of the list of \\(Tv_{j}\\).\nHaving shown that \\(Tv_{j}\\) to be a linearly independent spanning list of \\(range\\ T\\), we can conclude that it is indeed a basis of \\(range\\ T\\).\nThis makes the \\(\\dim range\\ T = n\\), as desired. \\(\\blacksquare\\)\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e plus the \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e of a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e equals the \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of its domain.\u003c/p\u003e\n\u003cp\u003eThis also implies that both the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e (but this one\u0026rsquo;s trivial b/c the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of the already \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e domain) and the \u003cstrong\u003e\u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e\u003c/strong\u003e as well is \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(T \\in \\mathcal{L}( V,W )\\)\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e \\(V\\) (otherwise \u003ca href=\"/posts/kbhdocumentation_and_specification/#commenting\"\u003ecommenting\u003c/a\u003e on computing its \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e doesn\u0026rsquo;t make sense)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\dim V = \\dim null\\ T + \\dim range\\ T\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor \\(T \\in \\mathcal{L}(V,W)\\)\u003c/p\u003e\n\u003ch2 id=\"proof\"\u003eproof\u003c/h2\u003e\n\u003cp\u003eWe desire that \\(\\dim V = \\dim null\\ T + \\dim range\\ T\\) for \\(T \\in \\mathcal{L}(V,W)\\).\u003c/p\u003e\n\u003cp\u003eLet us construct a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(T\\), \\(u_1, \\dots u_{m}\\). This makes \\(\\dim null\\ T = m\\).\u003c/p\u003e\n\u003cp\u003eWe can extend this list to a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\), the domain, with some vectors \\(v_1, \\dots v_{n}\\). This makes the \\(\\dim V = m+n\\).\u003c/p\u003e\n\u003cp\u003eWe now desire that \\(\\dim range\\ T = n\\). We show this by showing \\(Tv_{1}, \\dots Tv_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(range\\ T\\).\u003c/p\u003e\n\u003cp\u003eRecall that \\(u_1, \\dots u_{m}, v_1, \\dots v_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\) the domain of \\(T\\). This means that any element that \u003cem\u003ecan\u003c/em\u003e go into \\(T\\) takes the shape of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = a_1u_1+ \\dots +a_{m}u_{m} + b_{1}v_1 + \\dots + b_{n}v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall also that the definition of the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e of \\(T\\) is that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nrange\\ T = \\{Tv: v \\in V\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, every element of the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e of \\(T\\) takes the shape of \\(Tv\\): meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv = a_1Tu_1+ \\dots +a_{m}Tu_{m} + b_{1}Tv_1 + \\dots + b_{n}Tv_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eby additivity and homogeneity of \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eNow, \\(Tu_{j}=0\\), because each \\(u_{j}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e (and so definitely at least an element of) the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(T\\). This makes the above expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv = 0 + b_{1}Tv_1 + \\dots + b_{n}Tv_{n} = b_{1}Tv_1 + \\dots + b_{n}Tv_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOk. Given that all elements of the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e can be constructed by a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \\(Tv_{1} \\dots Tv_{n}\\), we declare that the list \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003es the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e of \\(T\\). Notably, as \\(V\\) is \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e and \\(v_1, \\dots v_{n}\\) is a sublist of its \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, \\(n \u0026lt; \\infty\\) and so the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e of \\(T\\) is also \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eTo finish showing \\(Tv_{1}, \\dots, Tv_{n}\\) to be a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(range\\ T\\), we have to show that its \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSuppose:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_1Tv_{1} + \\dots + c_{n}Tv_{n} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy \u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e and additivity, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(c_1v_{1} + \\dots + c_{n}v_{n}) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis makes \\(c_1v_1 + \\dots\\) a member of the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(T\\). Recall that \\(u_1, \\dots u_{m}\\) were a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e thereof, this means that the \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \\(v_{j}\\) can be written as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \\(u_{j}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_1 v_1 + \\dots + c_{n}v_{n} = d_1 u_{1} + \\dots + d_{m} u_{m}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOf course, the list \\(u_1, \\dots u_{m}, v_1, \\dots v_{n}\\) is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e as it is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\). This makes \\(c_{j}=d_{j}=0\\) (to see this, move all the \\(d_{j}u_{j}\\) to the left and apply definition of \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eWe have therefore shown that, given\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_1Tv_{1} + \\dots + c_{n}Tv_{n} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(c_1 = \\dots = c_{n} =0\\), satisfying the definition of \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e of the list of \\(Tv_{j}\\).\u003c/p\u003e\n\u003cp\u003eHaving shown that \\(Tv_{j}\\) to be a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list of \\(range\\ T\\), we can conclude that it is indeed a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(range\\ T\\).\u003c/p\u003e\n\u003cp\u003eThis makes the \\(\\dim range\\ T = n\\), as desired. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfundamental_theorem_of_linear_maps/","tags":null,"title":"fundamental theorem of linear maps"},{"categories":null,"contents":"fusion in machine learning is the process of adding features or encoding.\nlate fusion late fusion adds features together to a model in a multi-modal approach by first embedding the features separately\nearly fusion early fusion adds features together to a model in a multi-modal approach by concatenating the features first then embedding\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhfusion/\"\u003efusion\u003c/a\u003e in machine learning is the process of adding features or encoding.\u003c/p\u003e\n\u003ch2 id=\"late-fusion\"\u003elate fusion\u003c/h2\u003e\n\u003cp\u003elate fusion adds features together to a model in a multi-modal approach by first embedding the features separately\u003c/p\u003e\n\u003ch2 id=\"early-fusion\"\u003eearly fusion\u003c/h2\u003e\n\u003cp\u003eearly fusion adds features together to a model in a multi-modal approach by concatenating the features first then embedding\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfusion/","tags":null,"title":"fusion (machine learning)"},{"categories":null,"contents":"Main problem: joint actions and observations are exponential by the number of agents.\nSolution: Smaple-based online planning for multiagent systems. We do this with the factored-value POMCP.\nfactored statistics: reduces the number of joint actions (through action selection statistics) factored trees: reduces the number of histories Multiagent Definition \\(I\\) set of agents \\(S\\) set of states \\(A_{i}\\) set of states for each agent \\(i\\) \\(T\\) state transitions \\(R\\) reward function \\(Z_{i}\\) joint observations for each agents \\(O\\) set of observations Coordination Graphs you can use sum-product elimination to shorten the Baysian Network of the agent Coordination Graphs (which is how agents influnece each other).\nMixture of Experts Directly search for the best joint actions; computed by MLE of the total value.\n","html":"\u003cp\u003eMain problem: joint actions and observations are exponential by the number of agents.\u003c/p\u003e\n\u003cp\u003eSolution: \u003cstrong\u003eSmaple-based online planning\u003c/strong\u003e for multiagent systems. We do this with the factored-value \u003ca href=\"/posts/kbhpomcp/\"\u003ePOMCP\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003efactored statistics\u003c/strong\u003e: reduces the number of joint actions (through action selection statistics)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003efactored trees\u003c/strong\u003e: reduces the number of histories\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"multiagent-definition\"\u003eMultiagent Definition\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(I\\) set of agents\u003c/li\u003e\n\u003cli\u003e\\(S\\) set of states\u003c/li\u003e\n\u003cli\u003e\\(A_{i}\\) set of states for each agent \\(i\\)\u003c/li\u003e\n\u003cli\u003e\\(T\\) state transitions\u003c/li\u003e\n\u003cli\u003e\\(R\\) reward function\u003c/li\u003e\n\u003cli\u003e\\(Z_{i}\\) joint observations for each agents\u003c/li\u003e\n\u003cli\u003e\\(O\\) set of observations\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"coordination-graphs\"\u003eCoordination Graphs\u003c/h2\u003e\n\u003cp\u003eyou can use \u003ca href=\"/posts/kbhinference/#sum-product-elimination\"\u003esum-product elimination\u003c/a\u003e to shorten the \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e of the agent \u003ca href=\"#coordination-graphs\"\u003eCoordination Graphs\u003c/a\u003e (which is how agents influnece each other).\u003c/p\u003e\n\u003ch2 id=\"mixture-of-experts\"\u003eMixture of Experts\u003c/h2\u003e\n\u003cp\u003eDirectly search for the best joint actions; computed by \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e of the total value.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfv_pomcps/","tags":null,"title":"FV-POMCPs"},{"categories":null,"contents":"Motivation Its the same. It hasn\u0026rsquo;t changed: curses of dimensionality and history.\nGoal: to solve decentralized multi-agent MDPs.\nKey Insights macro-actions (MAs) to reduce computational complexity (like hierarchical planning) uses cross entropy to make infinite horizon problem tractable Prior Approaches masked Monte Carlo search: heuristic based, no optimality garantees MCTS: poor performance Direct Cross Entropy see also Cross Entropy Method\nsample a value function \\(k\\) takes \\(n\\) highest sampled values update parameter \\(\\theta\\) resample until distribution convergence take the best sample \\(x\\) G-DICE create a graph with exogenous \\(N\\) nodes, and \\(O\\) outgoing edges (designed before) use Direct Cross Entropy to solve for the best policy Results demonstrates improved performance over MMCS and MCTS does not need robot communication garantees convergence for both finite and infiinte horizon can choose exogenous number of nodes in order to gain computational savings ","html":"\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003eIts the same. It hasn\u0026rsquo;t changed: curses of dimensionality and history.\u003c/p\u003e\n\u003cp\u003eGoal: to solve decentralized multi-agent MDPs.\u003c/p\u003e\n\u003ch2 id=\"key-insights\"\u003eKey Insights\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003emacro-actions (MAs) to reduce computational complexity (like hierarchical planning)\u003c/li\u003e\n\u003cli\u003euses cross entropy to make infinite horizon problem tractable\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"prior-approaches\"\u003ePrior Approaches\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003emasked Monte Carlo search\u003c/strong\u003e: heuristic based, no optimality garantees\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003eMCTS\u003c/a\u003e: poor performance\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"direct-cross-entropy\"\u003eDirect Cross Entropy\u003c/h2\u003e\n\u003cp\u003esee also \u003ca href=\"/posts/kbhcross_entropy_method/\"\u003eCross Entropy Method\u003c/a\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003esample a value function \\(k\\)\u003c/li\u003e\n\u003cli\u003etakes \\(n\\) highest sampled values\u003c/li\u003e\n\u003cli\u003eupdate parameter \\(\\theta\\)\u003c/li\u003e\n\u003cli\u003eresample until distribution convergence\u003c/li\u003e\n\u003cli\u003etake the best sample \\(x\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"g-dice--kbhg-dice-dot-md\"\u003e\u003ca href=\"/posts/kbhg_dice/\"\u003eG-DICE\u003c/a\u003e\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ecreate a graph with exogenous \\(N\\) nodes, and \\(O\\) outgoing edges (designed before)\u003c/li\u003e\n\u003cli\u003euse \u003ca href=\"#direct-cross-entropy\"\u003eDirect Cross Entropy\u003c/a\u003e to solve for the best policy\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-22_10-08-33_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"results\"\u003eResults\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003edemonstrates improved performance over MMCS and \u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003eMCTS\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003edoes not need robot communication\u003c/li\u003e\n\u003cli\u003egarantees convergence for both finite and infiinte horizon\u003c/li\u003e\n\u003cli\u003ecan choose exogenous number of nodes in order to gain computational savings\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhg_dice/","tags":null,"title":"G-DICE"},{"categories":null,"contents":"Galactica is a large-languange model for generating research papers, made by meta research\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhgalactica/\"\u003eGalactica\u003c/a\u003e is a large-languange model for generating research papers, made by meta research\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgalactica/","tags":null,"title":"Galactica"},{"categories":null,"contents":" One of these things. It is actually a binomial distribution.\nYou can phrase the probability at\n","html":"\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-11_16-14-52_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eOne of these things. It is actually a \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eYou can phrase the probability at\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgalton_board/","tags":null,"title":"Galton Board"},{"categories":null,"contents":"The GARCH model is a model for the heteroskedastic variations where the changes in variance is assumed to be auto correlated: that, though the variance changes, it changes in a predictable manner.\nIt is especially useful to\nGARCH 1,1 Conditional mean:\n\\begin{equation} y_{t} = x\u0026rsquo;_{t} \\theta + \\epsilon_{t} \\end{equation}\nThen, the epsilon parameter:\n\\begin{equation} \\epsilon_{t} = \\sigma_{t}z_{t} \\end{equation}\nwhere:\n\\begin{equation} z_{t} \\sim \\mathcal{N}(0,1) \\end{equation}\nand:\nconditional variance\n\\begin{equation} {\\sigma_{t}}^{2} = \\omega + \\lambda {\\sigma_{t-1}}^{2} + \\beta {\\sigma_{t-1}}^{2} \\end{equation}\nFinally, with initial conditions:\n\\begin{equation} w\u0026gt;0; \\alpha \u0026gt;0; \\beta \u0026gt;0 \\end{equation}\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhgarch/\"\u003eGARCH\u003c/a\u003e model is a model for the \u003ca href=\"/posts/kbhheteroskedastic/\"\u003eheteroskedastic\u003c/a\u003e variations where the changes in variance is assumed to be auto correlated: that, though the variance changes, it changes in a predictable manner.\u003c/p\u003e\n\u003cp\u003eIt is especially useful to\u003c/p\u003e\n\u003ch2 id=\"garch--kbhgarch-dot-md--1-1\"\u003e\u003ca href=\"/posts/kbhgarch/\"\u003eGARCH\u003c/a\u003e 1,1\u003c/h2\u003e\n\u003cp\u003eConditional mean:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny_{t} = x\u0026rsquo;_{t} \\theta + \\epsilon_{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThen, the epsilon parameter:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\epsilon_{t} = \\sigma_{t}z_{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nz_{t} \\sim \\mathcal{N}(0,1)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003econditional variance\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n{\\sigma_{t}}^{2} = \\omega + \\lambda {\\sigma_{t-1}}^{2} + \\beta {\\sigma_{t-1}}^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, with initial conditions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw\u0026gt;0; \\alpha \u0026gt;0; \\beta \u0026gt;0\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgarch/","tags":null,"title":"GARCH"},{"categories":null,"contents":"The Gauss\u0026rsquo; Law is a principle of electric flux of uniformly distributed electric field along a surface: that, the electric flux through a closed surface is the sum of the electric charge enclosed divided by the permittivity of free space.\nThat is:\n\\begin{equation} \\oint E \\cdot dA = \\frac{\\sum Q}{\\epsilon_{0}} \\end{equation}\nsomewhat motivating Gauss\u0026rsquo; Law Consider a sphere with uniformly distributed charge on its surface. It has surface area \\(4 \\pi r^{2}\\). Given the expression of electric flux and the fact that the origin change is in the center, and the test change is evenly distributed (i.e. \\(E\\) is held constant):\n\\begin{align} \\Phi_{E} \u0026amp;= \\int E \\cdot dA \\\\ \u0026amp;= E\\int dA \\end{align}\nNow, we are integrating across the entire surface of the sphere, so it is a closed integral. So:\n\\begin{align} \\Phi_{E} \u0026amp;= \\oint E dA \\end{align}\nWe have the entire sum of the surfaces to be the surface area; so \\(\\oint dA = 4\\pi r^{2}\\). Furthermore, recall that if the field is uniform, \\(E\\) is constantly at \\(\\frac{1}{4 \\pi \\epsilon_{0}} \\frac{Q}{r^{2}}\\).\nSo, substituting the two in:\n\\begin{align} \\Phi_{E} \u0026amp;= \\frac{1}{4\\pi \\epsilon_{0}} \\frac{Q}{r^{2}} 4\\pi r^{2} \\\\ \u0026amp;= \\frac{Q}{\\epsilon_{0}} \\end{align}\nwhere, \\(\\epsilon_{0}\\) is the permittivity of free space.\nCongrats, we have Gauss\u0026rsquo; Law: \u0026ldquo;the electric flux through the surface of an object is the sum of the charges enclosed divided by the permittivity of free space.\u0026rdquo;\nspheres electric field inside a closed conductor is zero\nThis is a direct result of gauss\u0026rsquo; law\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhgauss_law/\"\u003eGauss\u0026rsquo; Law\u003c/a\u003e is a principle of \u003ca href=\"/posts/kbhflux/#electric-flux\"\u003eelectric flux\u003c/a\u003e of uniformly distributed electric field along a surface: that, the \u003ca href=\"/posts/kbhflux/#electric-flux\"\u003eelectric flux\u003c/a\u003e through a \u003cstrong\u003eclosed surface\u003c/strong\u003e is the sum of the electric \u003ca href=\"/posts/kbhcharged/\"\u003echarge\u003c/a\u003e enclosed divided by the \u003ca href=\"/posts/kbhpermittivity_of_free_space/\"\u003epermittivity of free space\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\oint E \\cdot dA = \\frac{\\sum Q}{\\epsilon_{0}}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"somewhat-motivating-gauss-law--kbhgauss-law-dot-md\"\u003esomewhat motivating \u003ca href=\"/posts/kbhgauss_law/\"\u003eGauss\u0026rsquo; Law\u003c/a\u003e\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-02-19_11-00-47_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eConsider a sphere with uniformly distributed \u003ca href=\"/posts/kbhcharged/\"\u003echarge\u003c/a\u003e on its surface. It has surface area \\(4 \\pi r^{2}\\). Given the expression of \u003ca href=\"/posts/kbhflux/#electric-flux\"\u003eelectric flux\u003c/a\u003e and the fact that the origin change is in the center, and the test change is evenly distributed (i.e. \\(E\\) is held constant):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\Phi_{E} \u0026amp;= \\int E \\cdot dA \\\\\n\u0026amp;= E\\int dA\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, we are integrating across the entire surface of the sphere, so it is a closed integral. So:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\Phi_{E} \u0026amp;= \\oint E dA\n\\end{align}\u003c/p\u003e\n\u003cp\u003eWe have the entire sum of the surfaces to be the surface area; so \\(\\oint dA = 4\\pi r^{2}\\). Furthermore, recall that if the field is uniform, \\(E\\) is constantly at \\(\\frac{1}{4 \\pi \\epsilon_{0}} \\frac{Q}{r^{2}}\\).\u003c/p\u003e\n\u003cp\u003eSo, substituting the two in:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\Phi_{E} \u0026amp;= \\frac{1}{4\\pi \\epsilon_{0}} \\frac{Q}{r^{2}} 4\\pi r^{2} \\\\\n\u0026amp;= \\frac{Q}{\\epsilon_{0}}\n\\end{align}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\epsilon_{0}\\) is the \u003ca href=\"/posts/kbhpermittivity_of_free_space/\"\u003epermittivity of free space\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eCongrats, we have \u003ca href=\"/posts/kbhgauss_law/\"\u003eGauss\u0026rsquo; Law\u003c/a\u003e: \u0026ldquo;the \u003ca href=\"/posts/kbhflux/#electric-flux\"\u003eelectric flux\u003c/a\u003e through the surface of an object is the sum of the \u003ca href=\"/posts/kbhcharged/\"\u003echarge\u003c/a\u003es enclosed divided by the \u003ca href=\"/posts/kbhpermittivity_of_free_space/\"\u003epermittivity of free space\u003c/a\u003e.\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"spheres\"\u003espheres\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eelectric field inside a closed conductor is zero\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eThis is a direct result of gauss\u0026rsquo; law\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgauss_law/","tags":null,"title":"Gauss' Law"},{"categories":null,"contents":"The Gaussian, in general, gives:\n\\begin{equation} e^{-\\frac{ax^{2}}{2}} \\end{equation}\nwhich is a Bell-Shaped curve. It\u0026rsquo;s pretty darn important\nsolving heat equation without boundary for general expression:\n\\begin{equation} \\pdv{U}{t} = \\alpha \\pdv[2]{U}{x} \\end{equation}\n\\begin{equation} U(t,x) = \\frac{1}{\\sqrt{4\\pi \\alpha t}}\\int_{\\mathbb{R}} f(y) e^{-\\frac{(x-y)^{2}}{4\\alpha t}} \\dd{y} \\end{equation}\nwhere,\n\\begin{equation} \\hat{U}(t,\\lambda) = \\hat{f}(\\lambda)e^{-\\alpha t \\lambda^{2}} \\end{equation}\n\\begin{equation} \\hat{U}(t,\\lambda) = \\hat{f}(\\lambda)e^{-\\lambda^{2}(t)} \\end{equation}\nHeat Equation and Gaussian \\begin{equation} H(t,x) = \\frac{1}{\\sqrt{2\\pi} t}e^{-\\frac{x^{2}}{2t}} \\end{equation}\nYou will note that \\(H\\) does satisfy the heat equation:\n\\begin{equation} \\pdv{U}{t} = \\pdv[2]{U}{x} \\end{equation}\nclosed form solution \\begin{equation} U(t,x) = \\frac{1}{\\sqrt{2\\pi} t} \\int_{\\mathbb{R}} f(y) e^{-\\frac{(x-y)^{2}}{2t}} \\dd{y} \\end{equation}\nthis is exactly:\n\\begin{equation} \\int_{\\mathbb{R}}f(y) H(t,(x-y)) \\dd{y} = \\int_{\\mathbb{R}}\\frac{1}{\\sqrt{2\\pi} t}e^{-\\frac{(x-y)^{2}}{2t}} f(y) \\dd{y} \\end{equation}\nWe can understand this when \\(t \\to 0\\), where there is a single, narrow, area \\(1\\) band which we sweep across all of \\(y\\). Because its thin and \\(1\\), its basically \\(f(x)\\) at each \\(y\\).\nsolving Heat Equation without boundary Consider the partial Fourier Transform on the \\(x\\) variable of the heat equation.\n\\begin{equation} U(t,x) = \\frac{1}{2\\pi} \\int_{\\mathbb{R}} e^{ix\\lambda} \\hat{U} \\qty(t,\\lambda) \\dd{\\lambda} \\end{equation}\nTaking derivatives of this:\n\\begin{equation} \\pdv{U}{t} (t,x) = \\frac{1}{2\\pi} \\int_{\\mathbb{R}} e^{i\\lambda x} \\pdv{\\hat{U}}{t} (t,\\lambda) \\dd{\\lambda} \\end{equation}\nand:\n\\begin{equation} \\pdv[2]{U}{x} = \\frac{1}{2\\pi} \\int_{\\mathbb{R}} \\qty(-\\lambda^{2}) e^{ix \\lambda } \\hat{U}(t,\\lambda) \\dd{\\lambda} \\end{equation}\nBecause these two are equal, it gives us that:\n\\begin{equation} \\hat{U}(t,\\lambda) = -\\lambda^{2} \\hat{U}(t,\\lambda) \\end{equation}\nmeaning:\n\\begin{equation} \\hat{U}(t,\\lambda) = a(\\lambda)e^{-\\lambda^{2}t} \\end{equation}\nFinally, at:\n\\begin{equation} \\hat{U}(0,\\lambda) = a(\\lambda) = \\hat{f}(\\lambda) \\end{equation}\nWe see that:\n\\begin{equation} \\hat{U}(t,\\lambda) = \\hat{f}(\\lambda)e^{-\\lambda^{2}(t)} \\end{equation}\nTo get our original function back, we need to inverse Fourier transform it:\n\\begin{equation} U(t,x) = \\frac{1}{2\\pi} \\int_{\\mathbb{R}} e^{ix\\lambda - \\lambda^{2}t} \\hat{f}(\\lambda) \\dd{\\lambda} \\end{equation}\nIntegrating Gaussian, more Generally Let\u0026rsquo;s integrate:\n\\begin{equation} \\int_{-\\infty}^{\\infty} e^{-\\frac{{ax}^{2}}{2}} \\dd{x} \\end{equation}\nLet\u0026rsquo;s replace: \\(s = \\sqrt{a} x\\)\nThis gives us that (based on Integrating Gaussian):\n\\begin{equation} x = \\sqrt{\\frac{2\\pi}{a}} \\end{equation}\nIf we replace \\(a\\) by \\(\\frac{1}{t}\\), we obtain:\n\\begin{equation} \\frac{1}{\\sqrt{2\\pi}t} \\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}}{2t}} \\dd{x} = 1 \\end{equation}\nby rescaling \\(x(a)\\) function above.\nIf \\(t\\) increases, you will see that this function diffuses from a single point at \\(0\\) and spreading out. Notice, that over the whole real line, no matter what the \\(t\\) is, you always end up with integral \\(1\\).\nIntegrating Gaussian Let\u0026rsquo;s integrate:\n\\begin{equation} \\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}}{2}} \\dd{x} \\end{equation}\ncomputing this is funny:\n\\begin{equation} A \\cdot A = \\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}}{2}} \\dd{x} \\int_{-\\infty}^{\\infty} e^{-\\frac{y^{2}}{2}} \\dd{y} \\end{equation}\nWe can think of this as a double integral:\n\\begin{equation} A \\cdot A = \\int_{-\\infty}^{\\infty} \\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}}{2}} e^{-\\frac{y^{2}}{2}} \\dd{x} \\dd{y} \\end{equation}\nmeaning we get:\n\\begin{equation} A \\cdot A = \\int_{-\\infty}^{\\infty} \\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}+y^{2}}{2}} \\dd{x} \\dd{y} \\end{equation}\nIts polar time; recall:\n\\begin{equation} x^{2} + y^{2} = r^{2} \\end{equation}\nwe can now go over this whole thing by converting into polar (notice the extra factor \\(r\\)):\n\\begin{equation} A \\cdot A = \\int_{0}^{2\\pi} \\int_{0}^{\\infty} e^{-\\frac{r^{2}}{2}} r \\dd{r} \\dd{\\theta} \\end{equation}\nvery suddenly we can use u sub on \\(r\\) to obtain:\n\\begin{equation} 2\\pi \\int_{0}^{\\infty} e^{-u} \\dd{u} = 2\\pi \\cdot 1 = 2\\pi \\end{equation}\nMeaning:\n\\begin{equation} A = \\sqrt{2\\pi} \\end{equation}\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhgaussian/\"\u003eGaussian\u003c/a\u003e, in general, gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{-\\frac{ax^{2}}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is a Bell-Shaped curve. It\u0026rsquo;s pretty darn important\u003c/p\u003e\n\u003ch2 id=\"solving-heat-equation-without-boundary\"\u003esolving heat equation without boundary\u003c/h2\u003e\n\u003cp\u003efor general expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{U}{t} = \\alpha \\pdv[2]{U}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(t,x) = \\frac{1}{\\sqrt{4\\pi \\alpha t}}\\int_{\\mathbb{R}} f(y) e^{-\\frac{(x-y)^{2}}{4\\alpha t}} \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U}(t,\\lambda) = \\hat{f}(\\lambda)e^{-\\alpha t \\lambda^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U}(t,\\lambda) = \\hat{f}(\\lambda)e^{-\\lambda^{2}(t)}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"heat-equation--kbhheat-equation-dot-md--and-gaussian--kbhgaussian-dot-md\"\u003e\u003ca href=\"/posts/kbhheat_equation/\"\u003eHeat Equation\u003c/a\u003e and \u003ca href=\"/posts/kbhgaussian/\"\u003eGaussian\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nH(t,x) = \\frac{1}{\\sqrt{2\\pi} t}e^{-\\frac{x^{2}}{2t}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will note that \\(H\\) \u003cstrong\u003edoes\u003c/strong\u003e satisfy the heat equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{U}{t} = \\pdv[2]{U}{x}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"closed-form-solution\"\u003eclosed form solution\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nU(t,x) = \\frac{1}{\\sqrt{2\\pi} t} \\int_{\\mathbb{R}} f(y) e^{-\\frac{(x-y)^{2}}{2t}} \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is exactly:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{\\mathbb{R}}f(y) H(t,(x-y)) \\dd{y} = \\int_{\\mathbb{R}}\\frac{1}{\\sqrt{2\\pi} t}e^{-\\frac{(x-y)^{2}}{2t}} f(y) \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can understand this when \\(t \\to 0\\), where there is a single, narrow, area \\(1\\) band which we sweep across all of \\(y\\). Because its thin and \\(1\\), its basically \\(f(x)\\) at each \\(y\\).\u003c/p\u003e\n\u003ch3 id=\"solving-heat-equation--kbhheat-equation-dot-md--without-boundary\"\u003esolving \u003ca href=\"/posts/kbhheat_equation/\"\u003eHeat Equation\u003c/a\u003e without boundary\u003c/h3\u003e\n\u003cp\u003eConsider the partial \u003ca href=\"/posts/kbhfourier_transform/\"\u003eFourier Transform\u003c/a\u003e on the \\(x\\) variable of the heat equation.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(t,x) = \\frac{1}{2\\pi} \\int_{\\mathbb{R}} e^{ix\\lambda} \\hat{U} \\qty(t,\\lambda) \\dd{\\lambda}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaking derivatives of this:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{U}{t} (t,x) = \\frac{1}{2\\pi} \\int_{\\mathbb{R}} e^{i\\lambda x} \\pdv{\\hat{U}}{t} (t,\\lambda) \\dd{\\lambda}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2]{U}{x} = \\frac{1}{2\\pi} \\int_{\\mathbb{R}} \\qty(-\\lambda^{2}) e^{ix \\lambda } \\hat{U}(t,\\lambda) \\dd{\\lambda}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBecause these two are equal, it gives us that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U}(t,\\lambda) = -\\lambda^{2} \\hat{U}(t,\\lambda)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U}(t,\\lambda) = a(\\lambda)e^{-\\lambda^{2}t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, at:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U}(0,\\lambda) = a(\\lambda) = \\hat{f}(\\lambda)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe see that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U}(t,\\lambda) = \\hat{f}(\\lambda)e^{-\\lambda^{2}(t)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo get our original function back, we need to inverse Fourier transform it:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(t,x) = \\frac{1}{2\\pi} \\int_{\\mathbb{R}} e^{ix\\lambda - \\lambda^{2}t} \\hat{f}(\\lambda) \\dd{\\lambda}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"integrating-gaussian-more-generally\"\u003eIntegrating Gaussian, more Generally\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s integrate:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{-\\infty}^{\\infty} e^{-\\frac{{ax}^{2}}{2}} \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s replace: \\(s = \\sqrt{a} x\\)\u003c/p\u003e\n\u003cp\u003eThis gives us that (based on \u003ca href=\"#integrating-gaussian\"\u003eIntegrating Gaussian\u003c/a\u003e):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx = \\sqrt{\\frac{2\\pi}{a}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf we replace \\(a\\) by \\(\\frac{1}{t}\\), we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{1}{\\sqrt{2\\pi}t} \\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}}{2t}} \\dd{x} = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eby rescaling \\(x(a)\\) function above.\u003c/p\u003e\n\u003cp\u003eIf \\(t\\) increases, you will see that this function diffuses from a single point at \\(0\\) and spreading out. Notice, that over the whole real line, no matter what the \\(t\\) is, you always end up with integral \\(1\\).\u003c/p\u003e\n\u003ch2 id=\"integrating-gaussian\"\u003eIntegrating Gaussian\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s integrate:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}}{2}} \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003ecomputing this is funny:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA \\cdot A = \\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}}{2}} \\dd{x} \\int_{-\\infty}^{\\infty} e^{-\\frac{y^{2}}{2}} \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can think of this as a double integral:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA \\cdot A = \\int_{-\\infty}^{\\infty} \\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}}{2}} e^{-\\frac{y^{2}}{2}} \\dd{x} \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning we get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA \\cdot A = \\int_{-\\infty}^{\\infty} \\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}+y^{2}}{2}} \\dd{x} \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIts polar time; recall:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx^{2} + y^{2} = r^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can now go over this whole thing by converting into polar (notice the extra factor \\(r\\)):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA \\cdot A = \\int_{0}^{2\\pi} \\int_{0}^{\\infty} e^{-\\frac{r^{2}}{2}} r \\dd{r} \\dd{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003every suddenly we can use u sub on \\(r\\) to obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n2\\pi \\int_{0}^{\\infty} e^{-u} \\dd{u} = 2\\pi \\cdot 1 = 2\\pi\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA = \\sqrt{2\\pi}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgaussian/","tags":null,"title":"Gaussian"},{"categories":null,"contents":"standard normal density function This is a function used to model many Gaussian distributions.\n\\begin{equation} \\phi(x) = \\frac{1}{\\sqrt{2\\pi}} e^{-\\frac{x^{2}}{2}} \\end{equation}\nThis function is the CDF of the standard normal.\nstandard normal density function is also symmetric:\n\\begin{equation} \\phi(a) = 1- \\phi(a) \\end{equation}\nGaussian distribution constituents \\(\\mu\\) the mean \\(\\sigma\\) the variance requirements \\begin{equation} X \\sim N(\\mu, \\sigma^{2}) \\end{equation}\nIts PDF is:\n\\begin{equation} \\mathcal{N}(x \\mid \\mu, \\sigma^{2}) = \\frac{1}{\\sigma\\sqrt{2\\pi}} e^{ \\frac{-(x-u)^{2}}{2 \\sigma^{2}}} \\end{equation}\nwhere, \\(\\phi\\) is the standard normal density function\nIts CDF:\n\\begin{equation} F(x) = \\Phi \\qty( \\frac{x-\\mu}{\\sigma}) \\end{equation}\nWe can\u0026rsquo;t integrate \\(\\Phi\\) further. So we leave it as a special function.\nAnd its expectations:\n\\(E(X) = \\mu\\)\n\\(Var(X) = \\sigma^{2}\\)\nadditional information linear transformations on Gaussian For some:\n\\begin{equation} Y = aX + b \\end{equation}\nwhere \\(X \\sim \\mathcal{N}\\)\nWe will end up with another normal \\(Y \\sim \\mathcal{N}\\) such that:\nmean: \\(au + b\\) variance: \\(a^{2}\\sigma^{2}\\) standard normal The standard normal is:\n\\begin{equation} Z=\\mathcal{N}(0,1) \\end{equation}\nmean 0, variance 1. You can transform anything into a standard normal via the following linear transform:\ntransformation into standard normal \\begin{equation} X \\sim \\mathcal{N}(\\mu, \\sigma^{2}) \\end{equation}\nand, we can shift it into a standard normal with:\n\\begin{equation} Z = \\frac{X-\\mu}{\\sigma} \\end{equation}\ntherefore, we can derive what the CDF of the normal distribution by shifting it back into the center:\n\\begin{equation} P(X\u0026lt;x) \\implies P\\qty(\\frac{X-\\mu}{\\theta} \u0026lt; \\frac{x-\\mu}{\\theta}) \\implies P\\qty(Z\u0026lt; \\frac{x-\\mu}{\\theta}) = \\Phi\\qty(\\frac{x-\\mu}{\\theta}) \\end{equation}\nnormal maximizes entropy no other random variable uses as little parameters to convey as much information\napproximation of binomial distribution with normal distribution You can use a normal distribution to approximate binomial approximation. However, be aware of a continuity correction\nadding Gaussian distributions for independent:\n\\begin{equation} X+Y \\sim \\mathcal{N}(\\mu_{1}+\\mu_{2}, \\sigma_{1}^{2}+\\sigma_{2}^{2}) \\end{equation}\n","html":"\u003ch2 id=\"standard-normal-density-function\"\u003estandard normal density function\u003c/h2\u003e\n\u003cp\u003eThis is a function used to model many Gaussian distributions.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\phi(x) = \\frac{1}{\\sqrt{2\\pi}} e^{-\\frac{x^{2}}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis function is the \u003ca href=\"/posts/kbhprobability_distributions/#cumulative-distribution-function\"\u003eCDF\u003c/a\u003e of the \u003ca href=\"#standard-normal\"\u003estandard normal.\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#standard-normal-density-function\"\u003estandard normal density function\u003c/a\u003e is also symmetric:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\phi(a) = 1- \\phi(a)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"gaussian-distribution\"\u003eGaussian distribution\u003c/h2\u003e\n\u003ch3 id=\"constituents\"\u003econstituents\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\mu\\) the mean\u003c/li\u003e\n\u003cli\u003e\\(\\sigma\\) the variance\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"requirements\"\u003erequirements\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nX \\sim N(\\mu, \\sigma^{2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIts \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003e is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{N}(x \\mid \\mu, \\sigma^{2}) = \\frac{1}{\\sigma\\sqrt{2\\pi}} e^{ \\frac{-(x-u)^{2}}{2 \\sigma^{2}}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\phi\\) is the \u003ca href=\"#standard-normal-density-function\"\u003estandard normal density function\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eIts \u003ca href=\"/posts/kbhprobability_distributions/#cumulative-distribution-function\"\u003eCDF\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nF(x) = \\Phi \\qty( \\frac{x-\\mu}{\\sigma})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can\u0026rsquo;t integrate \\(\\Phi\\) further. So we leave it as a special function.\u003c/p\u003e\n\u003cp\u003eAnd its expectations:\u003c/p\u003e\n\u003cp\u003e\\(E(X) = \\mu\\)\u003c/p\u003e\n\u003cp\u003e\\(Var(X) = \\sigma^{2}\\)\u003c/p\u003e\n\u003ch3 id=\"additional-information\"\u003eadditional information\u003c/h3\u003e\n\u003ch4 id=\"linear-transformations-on-gaussian\"\u003elinear transformations on Gaussian\u003c/h4\u003e\n\u003cp\u003eFor some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nY = aX + b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(X \\sim \\mathcal{N}\\)\u003c/p\u003e\n\u003cp\u003eWe will end up with another normal \\(Y \\sim \\mathcal{N}\\) such that:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003emean: \\(au + b\\)\u003c/li\u003e\n\u003cli\u003evariance: \\(a^{2}\\sigma^{2}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"standard-normal\"\u003estandard normal\u003c/h4\u003e\n\u003cp\u003eThe standard normal is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nZ=\\mathcal{N}(0,1)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emean 0, variance 1. You can transform anything into a standard normal via the following linear transform:\u003c/p\u003e\n\u003ch4 id=\"transformation-into-standard-normal--org430b977\"\u003etransformation into \u003ca href=\"#standard-normal\"\u003estandard normal\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003e\\begin{equation}\nX \\sim \\mathcal{N}(\\mu, \\sigma^{2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand, we can shift it into a standard normal with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nZ = \\frac{X-\\mu}{\\sigma}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003etherefore, we can derive what the \u003ca href=\"/posts/kbhprobability_distributions/#cumulative-distribution-function\"\u003eCDF\u003c/a\u003e of the \u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormal distribution\u003c/a\u003e by shifting it back into the center:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X\u0026lt;x) \\implies P\\qty(\\frac{X-\\mu}{\\theta} \u0026lt; \\frac{x-\\mu}{\\theta}) \\implies P\\qty(Z\u0026lt; \\frac{x-\\mu}{\\theta}) = \\Phi\\qty(\\frac{x-\\mu}{\\theta})\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"normal-maximizes-entropy\"\u003enormal maximizes entropy\u003c/h4\u003e\n\u003cp\u003eno other \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e uses as little \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es to convey as much information\u003c/p\u003e\n\u003ch4 id=\"approximation-of-binomial-distribution--kbhbinomial-distribution-dot-md--with-normal-distribution--kbhnormal-distribution-dot-md\"\u003eapproximation of \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e with \u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormal distribution\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eYou can use a \u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormal distribution\u003c/a\u003e to approximate \u003ca href=\"#approximation-of-binomial-distribution--kbhbinomial-distribution-dot-md--with-normal-distribution--kbhnormal-distribution-dot-md\"\u003ebinomial approximation\u003c/a\u003e. However, be aware of a \u003ca href=\"/posts/kbhcontinuity_correction/\"\u003econtinuity correction\u003c/a\u003e\u003c/p\u003e\n\u003ch4 id=\"adding-gaussian-distribution--kbhgaussian-distribution-dot-md--s\"\u003eadding \u003ca href=\"/posts/kbhgaussian_distribution/\"\u003eGaussian distribution\u003c/a\u003es\u003c/h4\u003e\n\u003cp\u003efor \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX+Y \\sim \\mathcal{N}(\\mu_{1}+\\mu_{2}, \\sigma_{1}^{2}+\\sigma_{2}^{2})\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgaussian_distribution/","tags":null,"title":"Gaussian distribution"},{"categories":null,"contents":"The point of Gaussian elimination is to solve/identiy-ify a linear equation. Take, if you have a matrix expression:\n\\begin{equation} Ax = b \\end{equation}\nWe can apply \\(A^{-1}\\) to both side, we then have:\n\\begin{equation} A^{-1}Ax = A^{-1} b \\end{equation}\nApplying the definition of the identity:\n\\begin{equation} Ix = A^{-1}b \\end{equation}\nTherefore, to solve for some \\(A^{-1}\\), which would yield \\(x\\).\n","html":"\u003cp\u003eThe point of \u003ca href=\"/posts/kbhgaussian_elimination/\"\u003eGaussian elimination\u003c/a\u003e is to solve/identiy-ify a linear equation. Take, if you have a matrix expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nAx = b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can apply \\(A^{-1}\\) to both side, we then have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA^{-1}Ax = A^{-1} b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eApplying the definition of the identity:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nIx = A^{-1}b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, to solve for some \\(A^{-1}\\), which would yield \\(x\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgaussian_elimination/","tags":null,"title":"Gaussian elimination"},{"categories":null,"contents":"GDB is gnu\u0026rsquo;s very own debugger\nb main or b 72 (set breakpoint on main function or line 72) r args (run with args) p thingname or p 3+5 (print a variable or return value) p/t print as binary p/x print as hex info (get args, locals) n s continue next, step, continue int test; short lsb = 0xff; test |= lsb printf(\u0026#34;%d\\n\u0026#34;,lsb); int test; ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhgdb/\"\u003eGDB\u003c/a\u003e is gnu\u0026rsquo;s very own debugger\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003eb main\u003c/code\u003e or \u003ccode\u003eb 72\u003c/code\u003e (set breakpoint on \u003ccode\u003emain\u003c/code\u003e function or line \u003ccode\u003e72\u003c/code\u003e)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003er args\u003c/code\u003e (run with args)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003ep thingname\u003c/code\u003e or \u003ccode\u003ep 3+5\u003c/code\u003e (print a variable or return value)\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003ep/t\u003c/code\u003e print as binary\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003ep/x\u003c/code\u003e print as hex\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003einfo\u003c/code\u003e (get args, locals)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003en\u003c/code\u003e \u003ccode\u003es\u003c/code\u003e \u003ccode\u003econtinue\u003c/code\u003e next, step, continue\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etest\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eshort\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elsb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0xff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etest\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e|=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elsb\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eprintf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;%d\u003c/span\u003e\u003cspan style=\"color:#8045ff\"\u003e\\n\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elsb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etest\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhgdb/","tags":null,"title":"GDB"},{"categories":null,"contents":"See inference.\nIn general, the joint probability distribution tables are very hard to solve because it requires\u0026mdash;for instance for binary variables\u0026mdash;requries \\(2^{n}\\) entires, which is a lot.\nhow do you define very large models? how do you perform inference with very large models what about the data can we use to inform the design process \u0026ldquo;If you can tell me a generative story, we can compress our joint probability distribution\u0026rdquo;. Get ready for\u0026hellip;\u0026hellip; inference with causality with Baysian Network.\nIf you can write a program to sample from the joint probability distribution, you have just described the joint.\n\u0026ldquo;Random variables are independent of causal non-descendents given their causal parents\u0026rdquo;. d-seperation\n","html":"\u003cp\u003eSee \u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIn general, the \u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e tables are very hard to solve because it requires\u0026mdash;for instance for binary variables\u0026mdash;requries \\(2^{n}\\) entires, which is a lot.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ehow do you define very large models?\u003c/li\u003e\n\u003cli\u003ehow do you perform \u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e with very large models\u003c/li\u003e\n\u003cli\u003ewhat about the data can we use to inform the design process\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\u0026ldquo;If you can tell me a generative story, we can compress our \u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e\u0026rdquo;. Get ready for\u0026hellip;\u0026hellip; \u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e with causality with \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIf you can write a program to sample from the \u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e, you have just described the joint.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Random variables are independent of causal non-descendents given their causal parents\u0026rdquo;. \u003ca href=\"/posts/kbhbaysian_network/#checking-for-conditional-independence\"\u003ed-seperation\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgeneral_inference/","tags":null,"title":"General Inference"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhgeneral_relativity/","tags":null,"title":"general relativity"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhgenerative_adversarial_network/","tags":null,"title":"Generative Adversarial Network"},{"categories":null,"contents":"a hissyfight with the transformational generative syntax.\ngenerative semantics states that structure is in support of meaning, rather than the other way around that transformational generative syntax suggests.\nThis means that you need to first come up with a meaning then imbew the best structure to support the expression of that meaning.\nThis (along with distributed morphology) is the main opposition of the Lexicalist Hypothesis, and because proof for the existence of semantic primes, also the main opposition of the existence of semantic primes.\n","html":"\u003cp\u003ea hissyfight with the \u003ca href=\"/posts/kbhtransformational_generative_syntax/\"\u003etransformational generative syntax\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhgenerative_semantics/\"\u003egenerative semantics\u003c/a\u003e states that \u003cstrong\u003e\u003cstrong\u003estructure\u003c/strong\u003e\u003c/strong\u003e is in support of \u003cstrong\u003e\u003cstrong\u003emeaning\u003c/strong\u003e\u003c/strong\u003e, rather than the other way around that \u003ca href=\"/posts/kbhtransformational_generative_syntax/\"\u003etransformational generative syntax\u003c/a\u003e suggests.\u003c/p\u003e\n\u003cp\u003eThis means that you need to first come up with a meaning then imbew the best structure to support the expression of that meaning.\u003c/p\u003e\n\u003cp\u003eThis (along with \u003ca href=\"/posts/kbhdistributed_morphology/\"\u003edistributed morphology\u003c/a\u003e) is the main opposition of the \u003ca href=\"/posts/kbhlexicalization_hypothesis/\"\u003eLexicalist Hypothesis\u003c/a\u003e, and because \u003ca href=\"/posts/kbhsemantic_primes/#proof-for-the-existence-of-id-4e587814-bfd1-458e-ba5f-67882832107b-semantic-prime-s\"\u003eproof for the existence of semantic primes\u003c/a\u003e, also the main opposition of the existence of \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgenerative_semantics/","tags":null,"title":"generative semantics"},{"categories":null,"contents":"big red Chomskian structuralist warning\nThe principle of generativity states that the goal of a grammar should be to enumerate the span of the structural descriptions of the expressions of a language.\n","html":"\u003cp\u003e\u003cstrong\u003ebig red Chomskian structuralist warning\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eThe principle of \u003ca href=\"/posts/kbhgenerativity/\"\u003egenerativity\u003c/a\u003e states that the goal of a \u003ca href=\"/posts/kbhgrammar/\"\u003egrammar\u003c/a\u003e should be to enumerate the span of the \u003cem\u003estructural\u003c/em\u003e descriptions of the expressions of a language.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgenerativity/","tags":null,"title":"generativity"},{"categories":null,"contents":"We don\u0026rsquo;t want to write the same thing many times; generics minimizes code duplication. Therefore, generics!\nLet\u0026rsquo;s implement a simple swap function:\nvoid swap_ptr_values(void *data1ptr, void *data2ptr, size_t datasize) { } helper functions memcpy Copy datasize bytes worth of memory in the second argument into the first argument. The two arguments CANNOT OVERLAP otherwise, you risk UB.\nvoid *memcpy(void *dest, void *src, size_t nbytes) memmove Its memcpy, but it works with overlapping data, and is slower.\nvoid *memove(void *dest, void *src, size_t nbytes) pointer arithmetic with generics Unfortunately, given that we don\u0026rsquo;t know how big a void * pointer is, we can\u0026rsquo;t do pointer arithmetic against it because it still doesn\u0026rsquo;t know how big the pointer is. You can\u0026rsquo;t just add/subtract numbers to char *.\nSo, we actually have to do pointer arithmetic by casting the pointer to a char* which will make pointer arithmetic work at the one-byte level.\nvoid *return_sixth_elem(void *arr) { return (char *)arr + 5; } higher order functions We can pass a function as a parameter.\nbool (*function_name)(int, int) ","html":"\u003cp\u003eWe don\u0026rsquo;t want to write the same thing many times; generics minimizes code duplication. Therefore, generics!\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s implement a simple swap function:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eswap_ptr_values\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata1ptr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata2ptr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatasize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"helper-functions\"\u003ehelper functions\u003c/h2\u003e\n\u003ch3 id=\"memcpy\"\u003ememcpy\u003c/h3\u003e\n\u003cp\u003eCopy \u003ccode\u003edatasize\u003c/code\u003e bytes worth of memory in the second argument into the first argument. The two arguments \u003cstrong\u003eCANNOT OVERLAP\u003c/strong\u003e otherwise, you risk UB.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ememcpy\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edest\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esrc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enbytes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"memmove\"\u003ememmove\u003c/h3\u003e\n\u003cp\u003eIts \u003ca href=\"#memcpy\"\u003ememcpy\u003c/a\u003e, but it works with overlapping data, and is slower.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ememove\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edest\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esrc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enbytes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"pointer-arithmetic--kbharray-dot-md--with-generics--kbhgeneric-dot-md\"\u003e\u003ca href=\"/posts/kbharray/#pointer-arithmetic\"\u003epointer arithmetic\u003c/a\u003e with \u003ca href=\"/posts/kbhgeneric/\"\u003egenerics\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eUnfortunately, given that we don\u0026rsquo;t know how big a \u003ccode\u003evoid *\u003c/code\u003e pointer is, we can\u0026rsquo;t do \u003ca href=\"/posts/kbharray/#pointer-arithmetic\"\u003epointer arithmetic\u003c/a\u003e against it because it still doesn\u0026rsquo;t know how big the pointer is. You can\u0026rsquo;t just add/subtract numbers to \u003ccode\u003echar *\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eSo, we actually have to do \u003ca href=\"/posts/kbharray/#pointer-arithmetic\"\u003epointer arithmetic\u003c/a\u003e by \u003ca href=\"/posts/kbhcasting/\"\u003ecasting\u003c/a\u003e the pointer to a \u003ccode\u003echar*\u003c/code\u003e which will make pointer arithmetic work at the one-byte level.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ereturn_sixth_elem\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"higher-order-functions\"\u003ehigher order functions\u003c/h2\u003e\n\u003cp\u003eWe can pass a function as a parameter.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ebool\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efunction_name\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhgeneric/","tags":null,"title":"generics"},{"categories":null,"contents":" A genetic algorithm is a search heuristic that is inspired by Charles Darwin\u0026rsquo;s theory of natural evolution.\nIts what Grey\u0026rsquo;s video says. The picking and chucking iterative thing.\n","html":"\u003cblockquote\u003e\n\u003cp\u003eA genetic algorithm is a search heuristic that is inspired by Charles Darwin\u0026rsquo;s theory of natural evolution.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003cp\u003eIts what Grey\u0026rsquo;s video says. The picking and chucking iterative thing.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgenetic_algorithum/","tags":null,"title":"genetic algorithm"},{"categories":null,"contents":"Genetic Policy Search involves performing Local Policy Search, but starting from a plurality of initial policies and perturbing the top-k most successful ones (called the \u0026ldquo;elite samples\u0026rdquo; \\(m_{elite}\\)) to generate the next set of starting points.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhgenetic_policy_search/\"\u003eGenetic Policy Search\u003c/a\u003e involves performing \u003ca href=\"/posts/kbhlocal_policy_search/\"\u003eLocal Policy Search\u003c/a\u003e, but starting from a plurality of initial policies and perturbing the top-k most successful ones (called the \u0026ldquo;elite samples\u0026rdquo; \\(m_{elite}\\)) to generate the next set of starting points.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgenetic_policy_search/","tags":null,"title":"Genetic Policy Search"},{"categories":null,"contents":"GenSLMs are a LLM, but genome sequence\nTake genome sequence Throw transformers at it create \u0026ldquo;semantic embedding\u0026rdquo; autoregression happens This is trained as a foundational model to organize the genomic sequence\nTurns out, the embedding space above can be used to discover relations. See proteins can be encoded as hierarchies\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhgenslms/\"\u003eGenSLMs\u003c/a\u003e are a LLM, but genome sequence\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eTake genome sequence\u003c/li\u003e\n\u003cli\u003eThrow transformers at it\u003c/li\u003e\n\u003cli\u003ecreate \u0026ldquo;semantic embedding\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eautoregression happens\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis is trained as a \u003ca href=\"/posts/kbhfoundational_model/\"\u003efoundational model\u003c/a\u003e to organize the genomic sequence\u003c/p\u003e\n\u003cp\u003eTurns out, the embedding space above can be used to discover relations. See \u003ca href=\"/posts/kbhfundational_models_of_interaction_analysis/#proteins-can-be-encoded-as-hierarchies\"\u003eproteins can be encoded as hierarchies\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgenslms-1/","tags":null,"title":"GenSLMs"},{"categories":null,"contents":"GenSLMs are a LLM, but genome sequence\nTake genome sequence Throw transformers at it create \u0026ldquo;semantic embedding\u0026rdquo; autoregression happens This is trained as a foundational model to organize the genomic sequence\nTurns out, the embedding space above can be used to discover relations. See proteins can be encoded as hierarchies\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhgenslms/\"\u003eGenSLMs\u003c/a\u003e are a LLM, but genome sequence\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eTake genome sequence\u003c/li\u003e\n\u003cli\u003eThrow transformers at it\u003c/li\u003e\n\u003cli\u003ecreate \u0026ldquo;semantic embedding\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eautoregression happens\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis is trained as a \u003ca href=\"/posts/kbhfoundational_model/\"\u003efoundational model\u003c/a\u003e to organize the genomic sequence\u003c/p\u003e\n\u003cp\u003eTurns out, the embedding space above can be used to discover relations. See \u003ca href=\"/posts/kbhfundational_models_of_interaction_analysis/#story-1-proteins-can-be-encoded-as-hierarchies\"\u003eproteins can be encoded as hierarchies\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgenslms/","tags":null,"title":"GenSLMs"},{"categories":null,"contents":"A Geometric Brownian Motion is a Brownian Motion with a drift.\nIt is determined by:\n\\begin{equation} \\dd{S_{t}} = \\mu S_{t} \\dd{t} + \\sigma \\dd{S_{t}} \\dd{W_{t}} \\end{equation}\nwhere, \\(S_{t}\\) is a Geometric Brownian Motion, \\(\\mu\\) is its drift, \\(\\sigma\\) the volatility, and \\(W_{t}\\) a centered Brownian Motion.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhgeometric_brownian_motion/\"\u003eGeometric Brownian Motion\u003c/a\u003e is a \u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e with a drift.\u003c/p\u003e\n\u003cp\u003eIt is determined by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dd{S_{t}} = \\mu S_{t} \\dd{t} + \\sigma \\dd{S_{t}} \\dd{W_{t}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(S_{t}\\) is a \u003ca href=\"/posts/kbhgeometric_brownian_motion/\"\u003eGeometric Brownian Motion\u003c/a\u003e, \\(\\mu\\) is its drift, \\(\\sigma\\) the volatility, and \\(W_{t}\\) a centered \u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgeometric_brownian_motion/","tags":null,"title":"Geometric Brownian Motion"},{"categories":null,"contents":"how many times do you have to do the trial to get at least one success\nconstituents \\(p\\) is the probability of one individual success \\(x\\) is the number of trials requirements \\begin{equation} P(X = x) = \\qty(1-p)^{x-1} p \\end{equation}\nwhich represents the probability of getting a success on the \\(x\\) trial\nadditional information ","html":"\u003cp\u003ehow many times do you have to do the trial to get at least one success\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(p\\) is the probability of one individual success\u003c/li\u003e\n\u003cli\u003e\\(x\\) is the number of trials\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nP(X = x) = \\qty(1-p)^{x-1} p\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich represents the probability of getting a success on the \\(x\\) trial\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgeometric_random_variable/","tags":null,"title":"geometric distribution"},{"categories":null,"contents":"The geometric multplicity for a given eigenvalue is the dimension of its generated eigenspace.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhgeometric_multplicity/\"\u003egeometric multplicity\u003c/a\u003e for a given \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e is the dimension of its generated \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgeometric_multplicity/","tags":null,"title":"geometric multplicity"},{"categories":null,"contents":"(Py)Torch is a great C++/Python library to construct and train complex neural networks. It has taken over academia over the last few years and is slowly taking over industry. Let\u0026rsquo;s learn about how it works!\nThis document is meant to be read cover-to-cover. It makes NO SENSE unless read like that. I focus on building intuition about why PyTorch works, so we will be writing unorthodox code until the very end where we put all ideas together.\nThe chapters below take you through large chapters in a machine-learning journey. But, to do anything, we need to import some stuff which we will need:\nimport numpy as np import torch Autograd source\nI believe that anybody learning a new ML framework should learn how its differentiation tools work. Yes, this means that we should first understand how it works with not a giant matrix, but with just two simple variables.\nAt the heart of PyTorch is the built-in gradient backpropagation facilities. To demonstrate this, let us create two such variables.\nvar_1 = torch.tensor(3.0, requires_grad=True) var_2 = torch.tensor(4.0, requires_grad=True) (var_1, var_2) (tensor(3., requires_grad=True), tensor(4., requires_grad=True)) There is secretly a lot going on here, so let\u0026rsquo;s dive in. First, just to get the stickler out of the way, torch.tensor (used here) is the generic variable creator, torch.Tensor (capital!) initializes a proper tensor\u0026mdash;which you will never need.\nWhat is a tensor? A tensor is simply a very efficient matrix that can updates its own values dynamically but keep the same variable name. The above commands creates two such tensor, both being 1x1 matrices.\nNote that, for the initial values, I used floats! instead of ints. The above code will crash if you use ints: this is because we want the surface on which the matrix changes value to be smooth to make things like gradient descent to work.\nLastly, we have an argument requires_grad=True. This argument tells PyTorch to keep track of the gradient of the tensor. For now, understand this as \u0026ldquo;permit PyTorch to change this variable if needed.\u0026rdquo; More on that in a sec.\nNaturally, if we have two tensors, we would love to multiply them!\nvar_mult = var_1*var_2 var_mult tensor(12., grad_fn=\u0026lt;MulBackward0\u0026gt;) Wouldyalookatthat! Another tensor, with the value \\(12\\).\nNow. Onto the main event. Back-Propagation! The core idea of a neural network is actually quite simple: figure out how much each input parameter (for us var_1, var_2) influence the output, then adjust the inputs accordingly to get the output to be \\(0\\).\nTo see what I mean, recall our output tensor named:\nvar_mult tensor(12., grad_fn=\u0026lt;MulBackward0\u0026gt;) How much does changing var_1 and var_2, its inputs, influence this output tensor? This is not immediately obvious, so let\u0026rsquo;s write what we are doing out:\n\\begin{equation} v_1 \\cdot v_2 = v_{m} \\implies 3 \\cdot 4 = 12 \\end{equation}\nwith \\(v_1\\) being var_1, \\(v_2\\) being var_2, and \\(v_{m}\\) being var_mult.\nAs you vary var_1, by what factor does the output change? For instance, if var_1 (the \\(3\\)) suddenly became a \\(2\\), how much less will var_mult be? Well, \\(2\\cdot 4=8\\), the output is exactly \\(4\\) less than before less than before. Hence, var_1 influences the value of var_mult by a factor of \\(4\\); meaning every time you add/subtract \\(1\\) to the value of var_1, var_mult gets added/subtracted by a value of \\(4\\).\nSimilarly, as you vary var_2, by what factor does the output change? For instance, if var_2 (the \\(4\\)) suddenly became a \\(5\\), how much less will var_mult be? Well, \\(3\\cdot 3=5\\), the output is exactly \\(3\\) more than before less than before. Hence, var_2 influences the value of var_mult by a factor of \\(3\\); meaning every time you add/subtract \\(1\\) to the value of var_3, var_mult gets added/subtracted by a value of \\(3\\).\nThose of you who have exposure to Multi-Variable Calculus\u0026mdash;this is indeed the same concept as a partial derivative of var_mult w.r.t. var_1 and var_2 for the previous two paragraphs respectively.\nThese relative-change-units (\\(4\\) and \\(3\\)) are called gradients: the factor by which changing any given variable change the output.\nNow, gradient calculation is awfully manual! Surely we don\u0026rsquo;t want to keep track of these tiny rates-of-change ourselves! This is where PyTorch autograd comes in. Autograd is the automated tool that helps you figure out these relative changes! It is built in to all PyTorch tensors.\nIn the previous paragraphs, we figured out the relative influences var_1 and var_2 on var_multi. Now let\u0026rsquo;s ask a computer to give us the same result, in much less time.\nFirst, we will ask PyTorch to calculate gradients for all variables that contributed to var_mult.\nvar_mult.backward() The backward function is a magical function that finds and calculates these relative-change-values of var_multi with respect to every variable that contributed to its values. To view the actual relative values, we will use .grad now on the actual variables:\nvar_1.grad tensor(4.) Recall! We used our big brains to deduce above that changing var_1 by \\(1\\) unit will change var_mult by \\(4\\) units. So this works!\nThe other variables works as expected:\nvar_2.grad tensor(3.) Yayyy! Still what we expected.\nGradient Descent Relative changes are cool, but it isn\u0026rsquo;t all that useful unless we are actually doing some changing. We want to use our epic knowledge about the relative influences of var_1 and var_2, to manipulate those variables such that var_mult is the value we want.\nTHE REST OF THIS DOCUMENT IS IN CONSTRUCTION\nimport torch.optim as optim To start an optimizer, you give it all the variables for which it should keep track of updating.\noptim = torch.optim.SGD([var_1, var_2], lr=1e-2, momentum=0.9) And then, to update gradients, you just have to:\noptim.step() # IMPORTANT optim.zero_grad() What\u0026rsquo;s that zero_grad? That clears the gradients from the variables (after applying them with .step()) so that the next update doesn\u0026rsquo;t influence the current one.\nYour First Neural Network import torch.nn as nn Layers m = nn.Linear(20, 30) input = torch.randn(128, 20) output = m(input) output, output.size() Explain what the \\(20, 30\\) means.\nOk one layer is just lame. What if you want a bunch of layers?\nm1 = nn.Linear(20, 30) m2 = nn.Linear(30, 30) m3 = nn.Linear(30, 40) input = torch.randn(128, 20) # function call syntax! Functions call from rigth to left! output = m3(m2(m1(input))) output, output.size() And guess what? If you want to adjust the values here, you would just do:\nm1 = nn.Linear(20, 30) m2 = nn.Linear(30, 30) m3 = nn.Linear(30, 40) input = torch.randn(128, 20) # function call syntax! Functions call from rigth to left! output = m3(m2(m1(input))) (output.sum() - 12).backward() None But wait! What are the options you give to your optimizer?\noptim = torch.optim.SGD([m1.weight, m1.bias ... ... ], lr=1e-2, momentum=0.9) That\u0026rsquo;s a lot of variables!! Each linear layer has a \\(m\\) and a \\(b\\) (from \\(y=mx+b\\) fame), and you will end up with a bajillon one of those! Also, that function call syntax, chaining one layer after another, is so knarly! Can we do better? Yes.\nAn Honest-to-Goodness Neural Network PyTorch makes the module framework to make model creator\u0026rsquo;s lives easier. This is the best practice for creating a neural network.\nLet\u0026rsquo;s replicate the example above with the new module framework:\nclass MyNetwork(nn.Module): def __init__(self): # important: runs early calls to make sure that # the module is correct super().__init__() # we declare our layers. We don\u0026#39;t use them yet. self.m1 = nn.Linear(20,30) self.m2 = nn.Linear(30,30) self.m3 = nn.Linear(30,40) # this is a special function that is called when # the module is called def forward(self, x): # we want to pass our input through to every layer # like we did before, but now more declaritively x = self.m1(x) x = self.m2(x) x = self.m3(x) return x Explain all of this.\nBut now, we essentially built our entire network in own \u0026ldquo;layer\u0026rdquo; (actually we literally did, all =Layer=s are just =torch.Module=s) that does the job of all other layers acting together. To use it, we just:\nmy_network = MyNetwork() input = torch.randn(128, 20) # function call syntax! Functions call from rigth to left! output = my_network(input) output tensor([[-0.1694, 0.0095, 0.4306, ..., 0.1580, 0.2644, 0.1509], [-0.2346, -0.0269, -0.1191, ..., 0.0229, -0.0819, -0.1452], [-0.4871, -0.2868, -0.2488, ..., 0.0637, 0.1832, 0.0619], ..., [-0.1323, 0.2531, -0.1086, ..., 0.0975, 0.0426, -0.2092], [-0.4765, 0.1441, -0.0520, ..., 0.2364, 0.0253, -0.1914], [-0.5044, -0.3263, 0.3102, ..., 0.1938, 0.1427, -0.0587]], grad_fn=\u0026lt;AddmmBackward0\u0026gt;) But wait! What are the options you give to your optimizer? Surely you don\u0026rsquo;t have to pass my_network.m1.weight, my_network.m1.bias, etc. etc. to the optimizer, right?\nYou don\u0026rsquo;t. One of the things that the super().__init__() did was to register a special function to your network class that keeps track of everything to optimize for. So now, to ask the optimizer to update the entire network, you just have to write:\noptim = torch.optim.SGD(my_network.parameters(), lr=1e-2, momentum=0.9) optim SGD ( Parameter Group 0 dampening: 0 differentiable: False foreach: None lr: 0.01 maximize: False momentum: 0.9 nesterov: False weight_decay: 0 ) TODO make students recall original backprop example, backprope and step and zero_grad with this new optim.\nLook! Optimizing an entire network works in the exact same way as optimizing two lone variables.\nPutting it together TODO\ntraining loop (zero first, call model, get diff/loss, .backward(), .step()) best practices saving and restoring models GPU ","html":"\u003cp\u003e(Py)Torch is a great C++/Python library to construct and train complex neural networks. It has \u003ca href=\"https://paperswithcode.com/trends\"\u003etaken over academia\u003c/a\u003e over the last few years and is slowly taking over industry. Let\u0026rsquo;s learn about how it works!\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eThis document is meant to be read cover-to-cover. It makes NO SENSE unless read like that. I focus on building intuition about why PyTorch works, so we will be writing unorthodox code until the very end where we put all ideas together.\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eThe chapters below take you through large chapters in a machine-learning journey. But, to do anything, we need to import some stuff which we will need:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enumpy\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"autograd\"\u003eAutograd\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://pytorch.org/tutorials/beginner/basics/autogradqs_tutorial.html\"\u003esource\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eI believe that anybody learning a new ML framework should learn how its differentiation tools work. Yes, this means that we should first understand how it works with not a giant matrix, but with just two simple variables.\u003c/p\u003e\n\u003cp\u003eAt the heart of PyTorch is the built-in gradient backpropagation facilities. To demonstrate this, let us create two such variables.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3.0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erequires_grad\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4.0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erequires_grad\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(tensor(3., requires_grad=True), tensor(4., requires_grad=True))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThere is secretly a lot going on here, so let\u0026rsquo;s dive in. First, just to get the stickler out of the way, \u003ccode\u003etorch.tensor\u003c/code\u003e (used here) is the generic variable creator, \u003ccode\u003etorch.Tensor\u003c/code\u003e (capital!) initializes a proper tensor\u0026mdash;which you will \u003cstrong\u003enever\u003c/strong\u003e need.\u003c/p\u003e\n\u003cp\u003eWhat is a \u003ccode\u003etensor\u003c/code\u003e? A \u003ccode\u003etensor\u003c/code\u003e is simply a very efficient matrix that can updates its own values dynamically but keep the same variable name. The above commands creates two such \u003ccode\u003etensor\u003c/code\u003e, both being \u003ccode\u003e1x1\u003c/code\u003e matrices.\u003c/p\u003e\n\u003cp\u003eNote that, for the initial values, I used \u003cem\u003efloats!\u003c/em\u003e instead of \u003cem\u003eints\u003c/em\u003e. The above code will crash if you use ints: this is because we want the surface on which the matrix changes value to be smooth to make things like gradient descent to work.\u003c/p\u003e\n\u003cp\u003eLastly, we have an argument \u003ccode\u003erequires_grad=True\u003c/code\u003e. This argument tells PyTorch to keep track of the gradient of the \u003ccode\u003etensor\u003c/code\u003e. For now, understand this as \u0026ldquo;permit PyTorch to change this variable if needed.\u0026rdquo; More on that in a sec.\u003c/p\u003e\n\u003cp\u003eNaturally, if we have two tensors, we would love to multiply them!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_mult\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_mult\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor(12., grad_fn=\u0026lt;MulBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWouldyalookatthat! Another tensor, with the value \\(12\\).\u003c/p\u003e\n\u003cp\u003eNow. Onto the main event. Back-Propagation! The core idea of a neural network is actually quite simple: figure out how much each input parameter (for us \u003ccode\u003evar_1\u003c/code\u003e, \u003ccode\u003evar_2\u003c/code\u003e) influence the output, then adjust the inputs accordingly to get the output to be \\(0\\).\u003c/p\u003e\n\u003cp\u003eTo see what I mean, recall our output \u003ccode\u003etensor\u003c/code\u003e named:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_mult\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor(12., grad_fn=\u0026lt;MulBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eHow much does changing \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e, its inputs, influence this output \u003ccode\u003etensor\u003c/code\u003e? This is not immediately obvious, so let\u0026rsquo;s write what we are doing out:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv_1 \\cdot v_2 = v_{m} \\implies 3 \\cdot 4 = 12\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewith \\(v_1\\) being \u003ccode\u003evar_1\u003c/code\u003e, \\(v_2\\) being \u003ccode\u003evar_2\u003c/code\u003e, and \\(v_{m}\\) being \u003ccode\u003evar_mult\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eAs you vary \u003ccode\u003evar_1\u003c/code\u003e, by \u003cstrong\u003ewhat factor\u003c/strong\u003e does the output change? For instance, if \u003ccode\u003evar_1\u003c/code\u003e (the \\(3\\)) suddenly became a \\(2\\), how much \u003cem\u003eless\u003c/em\u003e will \u003ccode\u003evar_mult\u003c/code\u003e be? Well, \\(2\\cdot 4=8\\), the output is exactly \\(4\\) less than before less than before. Hence, \u003ccode\u003evar_1\u003c/code\u003e influences the value of \u003ccode\u003evar_mult\u003c/code\u003e by a factor of \\(4\\); meaning every time you add/subtract \\(1\\) to the value of \u003ccode\u003evar_1\u003c/code\u003e, \u003ccode\u003evar_mult\u003c/code\u003e gets added/subtracted by a value of \\(4\\).\u003c/p\u003e\n\u003cp\u003eSimilarly, as you vary \u003ccode\u003evar_2\u003c/code\u003e, by what factor does the output change? For instance, if \u003ccode\u003evar_2\u003c/code\u003e (the \\(4\\)) suddenly became a \\(5\\), how much \u003cem\u003eless\u003c/em\u003e will \u003ccode\u003evar_mult\u003c/code\u003e be? Well, \\(3\\cdot 3=5\\), the output is exactly \\(3\\) more than before less than before. Hence, \u003ccode\u003evar_2\u003c/code\u003e influences the value of \u003ccode\u003evar_mult\u003c/code\u003e by a factor of \\(3\\); meaning every time you add/subtract \\(1\\) to the value of \u003ccode\u003evar_3\u003c/code\u003e, \u003ccode\u003evar_mult\u003c/code\u003e gets added/subtracted by a value of \\(3\\).\u003c/p\u003e\n\u003cp\u003eThose of you who have exposure to Multi-Variable Calculus\u0026mdash;this is indeed the same concept as a partial derivative of \u003ccode\u003evar_mult\u003c/code\u003e w.r.t. \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e for the previous two paragraphs respectively.\u003c/p\u003e\n\u003cp\u003eThese relative-change-units (\\(4\\) and \\(3\\)) are called \u003cstrong\u003egradients\u003c/strong\u003e: the factor by which changing any given variable change the output.\u003c/p\u003e\n\u003cp\u003eNow, gradient calculation is awfully manual! Surely we don\u0026rsquo;t want to keep track of these tiny rates-of-change ourselves! This is where PyTorch autograd comes in. Autograd is the automated tool that helps you figure out these relative changes! It is built in to all PyTorch tensors.\u003c/p\u003e\n\u003cp\u003eIn the previous paragraphs, we figured out the relative influences \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e on \u003ccode\u003evar_multi\u003c/code\u003e. Now let\u0026rsquo;s ask a computer to give us the same result, in much less time.\u003c/p\u003e\n\u003cp\u003eFirst, we will ask PyTorch to calculate gradients for all variables that contributed to \u003ccode\u003evar_mult\u003c/code\u003e.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_mult\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThe \u003ccode\u003ebackward\u003c/code\u003e function is a magical function that finds and calculates these relative-change-values of \u003ccode\u003evar_multi\u003c/code\u003e with respect to every variable that contributed to its values. To view the actual relative values, we will use \u003ccode\u003e.grad\u003c/code\u003e now on the actual variables:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egrad\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor(4.)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRecall! We used our big brains to deduce above that changing \u003ccode\u003evar_1\u003c/code\u003e by \\(1\\) unit will change \u003ccode\u003evar_mult\u003c/code\u003e by \\(4\\) units. So this works!\u003c/p\u003e\n\u003cp\u003eThe other variables works as expected:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egrad\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor(3.)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYayyy! Still what we expected.\u003c/p\u003e\n\u003ch2 id=\"gradient-descent\"\u003eGradient Descent\u003c/h2\u003e\n\u003cp\u003eRelative changes are cool, but it isn\u0026rsquo;t all that useful unless we are actually doing some changing. We want to use our epic knowledge about the relative influences of \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e, to manipulate those variables such that \u003ccode\u003evar_mult\u003c/code\u003e is the value we want.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003e\u003cstrong\u003e\u003cstrong\u003eTHE REST OF THIS DOCUMENT IS IN CONSTRUCTION\u003c/strong\u003e\u003c/strong\u003e\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch.optim\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eTo start an optimizer, you give it all the variables for which it should keep track of updating.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSGD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1e-2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emomentum\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd then, to update gradients, you just have to:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# IMPORTANT\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWhat\u0026rsquo;s that \u003ccode\u003ezero_grad\u003c/code\u003e? That clears the gradients from the variables (after applying them with \u003ccode\u003e.step()\u003c/code\u003e) so that the next update doesn\u0026rsquo;t influence the current one.\u003c/p\u003e\n\u003ch2 id=\"your-first-neural-network\"\u003eYour First Neural Network\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch.nn\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"layers\"\u003eLayers\u003c/h3\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e20\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003einput\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erandn\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e128\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e20\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003em\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einput\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExplain what the \\(20, 30\\) means.\u003c/p\u003e\n\u003cp\u003eOk one layer is just lame. What if you want a bunch of layers?\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e20\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e40\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003einput\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erandn\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e128\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e20\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# function call syntax! Functions call from rigth to left!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003em3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einput\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd guess what? If you want to adjust the values here, you would just do:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e20\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e40\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003einput\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erandn\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e128\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e20\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# function call syntax! Functions call from rigth to left!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003em3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einput\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e12\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNone\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eBut wait! What are the options you give to your optimizer?\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSGD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eweight\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebias\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e...\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e...\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1e-2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emomentum\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThat\u0026rsquo;s a \u003cem\u003elot of variables!!\u003c/em\u003e Each linear layer has a \\(m\\) and a \\(b\\) (from \\(y=mx+b\\) fame), and you will end up with a bajillon one of those! Also, that function call syntax, chaining one layer after another, is so knarly! Can we do better? Yes.\u003c/p\u003e\n\u003ch3 id=\"an-honest-to-goodness-neural-network\"\u003eAn Honest-to-Goodness Neural Network\u003c/h3\u003e\n\u003cp\u003ePyTorch makes the \u003ccode\u003emodule\u003c/code\u003e framework to make model creator\u0026rsquo;s lives easier. This is the best practice for creating a neural network.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s replicate the example above with the new \u003ccode\u003emodule\u003c/code\u003e framework:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eclass\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eMyNetwork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eModule\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e__init__\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# important: runs early calls to make sure that\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# the module is correct\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esuper\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e__init__\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we declare our layers. We don\u0026#39;t use them yet.\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e20\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e40\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# this is a special function that is called when\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# the module is called\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eforward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we want to pass our input through to every layer\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# like we did before, but now more declaritively\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExplain all of this.\u003c/p\u003e\n\u003cp\u003eBut now, we essentially built our entire network in own \u0026ldquo;layer\u0026rdquo; (actually we literally did, all =Layer=s are just =torch.Module=s) that does the job of all other layers acting together. To use it, we just:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eMyNetwork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003einput\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erandn\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e128\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e20\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# function call syntax! Functions call from rigth to left!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einput\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([[-0.1694, 0.0095, 0.4306, ..., 0.1580, 0.2644, 0.1509],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [-0.2346, -0.0269, -0.1191, ..., 0.0229, -0.0819, -0.1452],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [-0.4871, -0.2868, -0.2488, ..., 0.0637, 0.1832, 0.0619],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [-0.1323, 0.2531, -0.1086, ..., 0.0975, 0.0426, -0.2092],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [-0.4765, 0.1441, -0.0520, ..., 0.2364, 0.0253, -0.1914],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [-0.5044, -0.3263, 0.3102, ..., 0.1938, 0.1427, -0.0587]],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e grad_fn=\u0026lt;AddmmBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eBut wait! What are the options you give to your optimizer? Surely you don\u0026rsquo;t have to pass \u003ccode\u003emy_network.m1.weight\u003c/code\u003e, \u003ccode\u003emy_network.m1.bias\u003c/code\u003e, etc. etc. to the optimizer, right?\u003c/p\u003e\n\u003cp\u003eYou don\u0026rsquo;t. One of the things that the \u003ccode\u003esuper().__init__()\u003c/code\u003e did was to register a special function to your network class that keeps track of everything to optimize for. So now, to ask the optimizer to update the entire network, you just have to write:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSGD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eparameters\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1e-2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emomentum\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSGD (\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eParameter Group 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e dampening: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e differentiable: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e foreach: None\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e lr: 0.01\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e maximize: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e momentum: 0.9\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nesterov: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e weight_decay: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eTODO make students recall original backprop example, backprope and step and zero_grad with this new optim.\u003c/p\u003e\n\u003cp\u003eLook! Optimizing an entire network works in the \u003cem\u003eexact same way\u003c/em\u003e as optimizing two lone variables.\u003c/p\u003e\n\u003ch2 id=\"putting-it-together\"\u003ePutting it together\u003c/h2\u003e\n\u003cp\u003eTODO\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003etraining loop (zero first, call model, get diff/loss, .backward(), .step())\u003c/li\u003e\n\u003cli\u003ebest practices\u003c/li\u003e\n\u003cli\u003esaving and restoring models\u003c/li\u003e\n\u003cli\u003eGPU\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgetting_started_with_pytorch/","tags":["guide"],"title":"Getting Started with PyTorch"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhgolden_gate_bridge/","tags":null,"title":"Golden Gate Bridge"},{"categories":null,"contents":" The Mandarin (Chinese fusion): 1029 El Camino Real, Menlo Park, CA 94025 The Kitchen (very classical cantonese): 279 El Camino Real, Millbrae, CA 94030 Left Bank (sit down french): 635 Santa Cruz Ave, Menlo Park, CA 94025 Jeffrey\u0026rsquo;s Hamburgers (chill, high quality American diner): 888 El Camino Real, Menlo Park, CA 94025 Tai Pan (formal Chinese): 560 Waverley St, Palo Alto, CA 94301 ","html":"\u003cul\u003e\n\u003cli\u003eThe Mandarin (Chinese fusion): 1029 El Camino Real, Menlo Park, CA 94025\u003c/li\u003e\n\u003cli\u003eThe Kitchen (very classical cantonese): 279 El Camino Real, Millbrae, CA 94030\u003c/li\u003e\n\u003cli\u003eLeft Bank (sit down french): 635 Santa Cruz Ave, Menlo Park, CA 94025\u003c/li\u003e\n\u003cli\u003eJeffrey\u0026rsquo;s Hamburgers (chill, high quality American diner): 888 El Camino Real, Menlo Park, CA 94025\u003c/li\u003e\n\u003cli\u003eTai Pan (formal Chinese): 560 Waverley St, Palo Alto, CA 94301\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgood_restaurants_in_the_bay_area/","tags":null,"title":"good restaurants in the Bay Area"},{"categories":null,"contents":"a:2:{i:0;s:2:\u0026ldquo;f2\u0026rdquo;;i:1;s:2:\u0026ldquo;f3\u0026rdquo;;}\na:2:{i:0;s:2:\u0026ldquo;e2\u0026rdquo;;i:1;s:2:\u0026ldquo;e3\u0026rdquo;;}\na:2:{i:0;s:2:\u0026ldquo;e1\u0026rdquo;;i:1;s:2:\u0026ldquo;e2\u0026rdquo;;}\na:2:{i:0;s:2:\u0026ldquo;b2\u0026rdquo;;i:1;s:2:\u0026ldquo;b3\u0026rdquo;;}\na:2:{i:0;s:2:\u0026ldquo;c2\u0026rdquo;;i:1;s:2:\u0026ldquo;d8\u0026rdquo;;}\n","html":"\u003cp\u003ea:2:{i:0;s:2:\u0026ldquo;f2\u0026rdquo;;i:1;s:2:\u0026ldquo;f3\u0026rdquo;;}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-30_21-09-03_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ea:2:{i:0;s:2:\u0026ldquo;e2\u0026rdquo;;i:1;s:2:\u0026ldquo;e3\u0026rdquo;;}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-30_21-09-16_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ea:2:{i:0;s:2:\u0026ldquo;e1\u0026rdquo;;i:1;s:2:\u0026ldquo;e2\u0026rdquo;;}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-30_21-10-10_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ea:2:{i:0;s:2:\u0026ldquo;b2\u0026rdquo;;i:1;s:2:\u0026ldquo;b3\u0026rdquo;;}\u003c/p\u003e\n\u003cp\u003ea:2:{i:0;s:2:\u0026ldquo;c2\u0026rdquo;;i:1;s:2:\u0026ldquo;d8\u0026rdquo;;}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgoogle_nerd_snipe/","tags":null,"title":"Google Nerd Snipe"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhgorup/","tags":null,"title":"gorup"},{"categories":null,"contents":"OMG its Gram-Schmidtting!!! Ok so like orthonormal basis are so nice, don\u0026rsquo;t you want to make them out of boring-ass normal basis? Of course you do.\nSuppose \\(v_1, \u0026hellip; v_{m}\\) is a linearly independent list in \\(V\\). Now let us define some \\(e_{1} \u0026hellip; e_{m}\\) using the procedure below such that \\(e_{j}\\) are orthonormal and, importantly:\n\\begin{equation} span(v_1, \\dots, v_{m}) = span(e_{1}, \\dots, e_{m}) \\end{equation}\nThe Procedure We do this process inductively. Let:\n\\begin{equation} e_1 = \\frac{v_1}{\\|v_1\\|} \\end{equation}\nAnd then, let:\n\\begin{equation} e_{j} = \\frac{v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1} - \\dots -\\langle v_{j}, e_{j-1} \\rangle e_{j-1}}{\\|v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1} - \\dots -\\langle v_{j}, e_{j-1} \\rangle e_{j-1}\\|} \\end{equation}\nThat is, for each vector \\(v_{j}\\), we subtract out the component which it is already parallel (i.e. not orthogonal, i.e. already accounted by) each other already orthonormal basis. Then we norm the whole thing as lengths don\u0026rsquo;t matter and we desire norm-1.\nThe Proof We Prove this by induction.\nBase case: \\(j=1\\)\n\\(span (v_1) = span (e_{1})\\) because, by definition above, \\(e_1 = \\frac{v_1}{\\|v_1\\|}\\). And hence, they are multiples of each other and hence has the same span.\nInduction: at \\(1\u0026lt;j \u0026lt;m\\)\nSo, we have that:\n\\begin{equation} span (v_1, \\dots, v_{j-1}) = span(e_1, \\dots, e_{j-1}) \\end{equation}\nLet now \\(v_{j} \\not \\in span(v_1, \u0026hellip;, v_{j-1})\\) (because \\(v_{j}\\) are linearly independent). We have then \\(v_{j} \\not \\in span(e_1, \u0026hellip;, e_{j-1})\\), given the two spans are equal.\nHence, \\(v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1}- \u0026hellip; - \\langle v_{j}, e_{j-1} \\rangle e_{j-1} \\neq 0\\) because otherwise \\(v_{j}\\) would be writable as a linearly combinations of \\(e_{1}, \u0026hellip;, e_{j-1}\\) and would then be in the span thereof, which we know isn\u0026rsquo;t true.\nDividing a vector by its norm produces a norm-1 vector; so we have now that \\(e_{j}\\) would be a norm-1 vector.\nNow, let \\(k \u0026lt; j\\). We desire that \\(\\langle e_{j}, e_{k} \\rangle = 0\\) because we want our new \\(e_{j}\\) to be orthogonal to every other existing vector.\nWe have:\n\\begin{equation} \\langle e_{j}, e_{k} \\rangle = \\langle \\frac{v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1} - \\dots - \\langle v_{j}, e_{k} \\rangle e_{k} - \\dots \\langle v_{j}, e_{j-1} \\rangle e_{j-1}}{\\|v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1}- \\dots - \\langle v_{j}, e_{k} \\rangle e_{k} - \\dots \\langle v_{j}, e_{j-1} \\rangle e_{j-1}\\|}, e_{k} \\rangle \\end{equation}\nNow, if we parcel out the large fraction the bottom, and apply additivity in the first slot, we will note that all of the \\(\\langle e_{i \\neq k}, e_{k} \\rangle=0\\) as everything already on this list is orthonormal. Finally, then we have only:\n\\begin{equation} \\langle v_{j}, e_{k} \\rangle - \\langle v_{k}, e_{k} \\rangle \\langle e_{k}, e_{k} \\rangle \\end{equation}\non top, which conveniently equals \\(0\\). Meaning \\(\\langle e_{j}, e_{k} \\rangle= 0\\), so \\(e_{k}\\) is indeed orthogonal to the rest of the list.\nBy definition of \\(e_{j}\\) above, \\(v_{j}\\) can be written as a linear combination of \\(e_{1}, \u0026hellip; e_{j-1}\\) as well as a bare \\(e_{j}\\). Therefore:\n\\begin{equation} span(v_1, \\dots, v_{j}) \\subset span (e_1, \\dots e_{j}) \\end{equation}\nOf course, both subspaces are the same dimension and so extending the basis to \\(v_{1} \u0026hellip; v_{j}\\) to \\(e_{1}, \u0026hellip; e_{j}\\) would be trivial. So they are equal. Phew. \\(\\blacksquare\\)\nCorollary Results Every Inner Product Space has an orthonormal basis Take any basis, Gram-Schmidt it, orthonormal list of the right length is a basis. \\(\\blacksquare\\)\nOrthonormal list extended to orthonormal basis Based on the procedure above, Gram-Schmidt does nothing to already orthonormal vectors: the inner products between any yet-to-be-reghramschmidt\u0026rsquo;d already orthonormal vector will be \\(0\\), so nothing will be subtracted.\nSo, suppose you have an orthonormal list \\(e_1, \u0026hellip;, e_{m}\\) in \\(V\\), which because orthonormal list is linearly independent, can be Gram-Schmidt\u0026rsquo;d to the same thing.\nAs a linearly independent list expends to a basis, go do that. Now Gram-Schmidtting this new thing won\u0026rsquo;t change \\(e_1, \u0026hellip; e_{m}\\) at all, but will give you extra orthonormal vectors to them which all form the basis as its the right length.\nOrthonormal upper-triangular matrix basis exists if normal upper-triangular exists Note that Gram-Schmidtting doesn\u0026rsquo;t actually change the span; meaning, if you have an upper-triangular matrix, you must have each \\(span(v_1, \u0026hellip;v_{j})\\) be invariant under \\(T\\).\nNow, recall that Gram-Schmidtting doesn\u0026rsquo;t actually change span; therefore, if each \\(span (v_1, \u0026hellip; v_{j})\\) is invariant under \\(T\\), then each \\(span(e_1, \u0026hellip; e_{j}) = span(v_1, \u0026hellip; v_{j})\\) after Gram-Schmidtting is still invariant under \\(T\\). So we can actually build an upper-triangular matrix out of the orthonormalized matrix as well.\nSchur\u0026rsquo;s Theorem Support \\(V\\) is a finite-dimensional complex vector space, then \\(T\\) has an upper-triangular matrix w.r.t. an orthonormal basis of \\(V\\).\nevery complex operator has an upper-triangular matrix; and orthonormal upper-triangular matrix basis exists if normal upper-triangular exists.\n","html":"\u003cp\u003eOMG its \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidtting\u003c/a\u003e!!! Ok so like \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e are so nice, don\u0026rsquo;t you want to make them out of boring-ass normal \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e? Of course you do.\u003c/p\u003e\n\u003cp\u003eSuppose \\(v_1, \u0026hellip; v_{m}\\) is a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(V\\). Now let us define some \\(e_{1} \u0026hellip; e_{m}\\) using the procedure below such that \\(e_{j}\\) are \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e and, importantly:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nspan(v_1, \\dots, v_{m}) = span(e_{1}, \\dots, e_{m})\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"the-procedure\"\u003eThe Procedure\u003c/h2\u003e\n\u003cp\u003eWe do this process inductively. Let:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne_1 = \\frac{v_1}{\\|v_1\\|}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd then, let:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne_{j} = \\frac{v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1} - \\dots -\\langle v_{j}, e_{j-1} \\rangle e_{j-1}}{\\|v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1} - \\dots -\\langle v_{j}, e_{j-1} \\rangle e_{j-1}\\|}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThat is, for each vector \\(v_{j}\\), we subtract out the component which it is already parallel (i.e. not \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e, i.e. already accounted by) each other already \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e basis. Then we norm the whole thing as lengths don\u0026rsquo;t matter and we desire \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e-1.\u003c/p\u003e\n\u003ch2 id=\"the-proof\"\u003eThe Proof\u003c/h2\u003e\n\u003cp\u003eWe Prove this by induction.\u003c/p\u003e\n\u003cp\u003eBase case: \\(j=1\\)\u003c/p\u003e\n\u003cp\u003e\\(span (v_1) = span (e_{1})\\) because, by definition above, \\(e_1 = \\frac{v_1}{\\|v_1\\|}\\). And hence, they are multiples of each other and hence has the same span.\u003c/p\u003e\n\u003cp\u003eInduction: at \\(1\u0026lt;j \u0026lt;m\\)\u003c/p\u003e\n\u003cp\u003eSo, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nspan (v_1, \\dots, v_{j-1}) = span(e_1, \\dots, e_{j-1})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet now \\(v_{j} \\not \\in span(v_1, \u0026hellip;, v_{j-1})\\) (because \\(v_{j}\\) are linearly independent). We have then \\(v_{j} \\not \\in span(e_1, \u0026hellip;, e_{j-1})\\), given the two spans are equal.\u003c/p\u003e\n\u003cp\u003eHence, \\(v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1}- \u0026hellip; - \\langle v_{j}, e_{j-1} \\rangle e_{j-1} \\neq 0\\) because otherwise \\(v_{j}\\) would be writable as a linearly combinations of \\(e_{1}, \u0026hellip;, e_{j-1}\\) and would then be in the span thereof, which we know isn\u0026rsquo;t true.\u003c/p\u003e\n\u003cp\u003eDividing a vector by its \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e produces a \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e-1 vector; so we have now that \\(e_{j}\\) would be a \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e-1 vector.\u003c/p\u003e\n\u003cp\u003eNow, let \\(k \u0026lt; j\\). We desire that \\(\\langle e_{j}, e_{k} \\rangle = 0\\) because we want our new \\(e_{j}\\) to be \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e to every other existing vector.\u003c/p\u003e\n\u003cp\u003eWe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle e_{j}, e_{k} \\rangle = \\langle \\frac{v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1} - \\dots - \\langle v_{j}, e_{k} \\rangle e_{k} - \\dots \\langle v_{j}, e_{j-1} \\rangle e_{j-1}}{\\|v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1}- \\dots - \\langle v_{j}, e_{k} \\rangle e_{k} - \\dots \\langle v_{j}, e_{j-1} \\rangle e_{j-1}\\|}, e_{k} \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, if we parcel out the large fraction the bottom, and apply additivity in the first slot, we will note that all of the \\(\\langle e_{i \\neq k}, e_{k} \\rangle=0\\) as everything already on this list is orthonormal. Finally, then we have only:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle v_{j}, e_{k} \\rangle - \\langle v_{k}, e_{k} \\rangle \\langle e_{k}, e_{k} \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eon top, which conveniently equals \\(0\\). Meaning \\(\\langle e_{j}, e_{k} \\rangle= 0\\), so \\(e_{k}\\) is indeed \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e to the rest of the list.\u003c/p\u003e\n\u003cp\u003eBy definition of \\(e_{j}\\) above, \\(v_{j}\\) can be written as a linear combination of \\(e_{1}, \u0026hellip; e_{j-1}\\) as well as a bare \\(e_{j}\\). Therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nspan(v_1, \\dots, v_{j}) \\subset span (e_1, \\dots e_{j})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOf course, both subspaces are the same dimension and so extending the basis to \\(v_{1} \u0026hellip; v_{j}\\) to \\(e_{1}, \u0026hellip; e_{j}\\) would be trivial. So they are equal. Phew. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch2 id=\"corollary-results\"\u003eCorollary Results\u003c/h2\u003e\n\u003ch3 id=\"every-inner-product-space--kbhinner-product-dot-md--has-an-orthonormal-basis--kbhorthonormal-basis-dot-md\"\u003eEvery \u003ca href=\"/posts/kbhinner_product/#inner-product-space\"\u003eInner Product Space\u003c/a\u003e has an \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eTake any \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt it\u003c/a\u003e, \u003ca href=\"/posts/kbhorthonormal_basis/#orthonormal-list-of-the-right-length-is-a-basis\"\u003eorthonormal list of the right length is a basis\u003c/a\u003e. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"orthonormal-list-extended-to-orthonormal-basis--kbhorthonormal-basis-dot-md\"\u003eOrthonormal list extended to \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eBased on the procedure above, \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003e does nothing to already orthonormal vectors: the inner products between any yet-to-be-reghramschmidt\u0026rsquo;d already orthonormal vector will be \\(0\\), so nothing will be subtracted.\u003c/p\u003e\n\u003cp\u003eSo, suppose you have an \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e list \\(e_1, \u0026hellip;, e_{m}\\) in \\(V\\), which because \u003ca href=\"/posts/kbhorthonormal/#orthonormal-list-is-linearly-independent\"\u003eorthonormal list is linearly independent\u003c/a\u003e, can be \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003e\u0026rsquo;d to the same thing.\u003c/p\u003e\n\u003cp\u003eAs \u003ca href=\"/posts/kbhbasis/#a-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent-list-expends-to-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003ea linearly independent list expends to a basis\u003c/a\u003e, go do that. Now \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003eting this new thing won\u0026rsquo;t change \\(e_1, \u0026hellip; e_{m}\\) at all, but will give you extra \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e vectors to them which all form the basis as its the right length.\u003c/p\u003e\n\u003ch3 id=\"orthonormal-upper-triangular-matrix-basis-exists-if-normal-upper-triangular-exists\"\u003eOrthonormal upper-triangular matrix basis exists if normal upper-triangular exists\u003c/h3\u003e\n\u003cp\u003eNote that \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003eting doesn\u0026rsquo;t actually change the span; meaning, if you have an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e, you must have each \\(span(v_1, \u0026hellip;v_{j})\\) be invariant under \\(T\\).\u003c/p\u003e\n\u003cp\u003eNow, recall that \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003eting doesn\u0026rsquo;t actually change span; therefore, if each \\(span (v_1, \u0026hellip; v_{j})\\) is invariant under \\(T\\), then each \\(span(e_1, \u0026hellip; e_{j}) = span(v_1, \u0026hellip; v_{j})\\) after \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003eting is \u003cem\u003estill\u003c/em\u003e \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e under \\(T\\). So we can actually build an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e out of the \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003eized matrix as well.\u003c/p\u003e\n\u003ch3 id=\"schur-s-theorem\"\u003eSchur\u0026rsquo;s Theorem\u003c/h3\u003e\n\u003cp\u003eSupport \\(V\\) is a finite-dimensional complex vector space, then \\(T\\) has an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e w.r.t. an \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e of \\(V\\).\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhupper_triangular_matrix/#every-complex-operator-has-an-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix\"\u003eevery complex operator has an upper-triangular matrix\u003c/a\u003e; and \u003ca href=\"#orthonormal-upper-triangular-matrix-basis-exists-if-normal-upper-triangular-exists\"\u003eorthonormal upper-triangular matrix basis exists if normal upper-triangular exists\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgram_schmidt/","tags":null,"title":"Gram-Schmidt"},{"categories":null,"contents":"A grammar is a set of logical rules that form a language. (more precisely defined in goals of a grammar)\ngoals of a grammar explain natural languages in syntax + semantics have described algebras which can be used to evolve the syntax \u0026hellip;that describe the grammatical operations The formalism here is that a rigorous grammar should have:\nsemantic accountability generativity ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhgrammar/\"\u003egrammar\u003c/a\u003e is a set of logical rules that form a \u003ca href=\"/posts/kbhlanguage/\"\u003elanguage\u003c/a\u003e. (more precisely defined in \u003ca href=\"#goals-of-a-grammar--kbhgrammar-dot-md\"\u003egoals of a grammar\u003c/a\u003e)\u003c/p\u003e\n\u003ch2 id=\"goals-of-a-grammar--kbhgrammar-dot-md\"\u003egoals of a \u003ca href=\"/posts/kbhgrammar/\"\u003egrammar\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eexplain natural languages in syntax + semantics\u003c/li\u003e\n\u003cli\u003ehave described algebras which can be used to evolve the syntax\u003c/li\u003e\n\u003cli\u003e\u0026hellip;that describe the grammatical operations\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe formalism here is that a rigorous \u003ca href=\"/posts/kbhgrammar/\"\u003egrammar\u003c/a\u003e should have:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsemantic_accountability/\"\u003esemantic accountability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgenerativity/\"\u003egenerativity\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgrammar/","tags":null,"title":"grammar"},{"categories":null,"contents":"Using constructor theory to test whether or not gravity in quantum theory is just entanglement.\nThis solves problem with gravity.\n","html":"\u003cp\u003eUsing \u003ca href=\"/posts/kbhconstructor_theory/\"\u003econstructor theory\u003c/a\u003e to test whether or not gravity in \u003ca href=\"/posts/kbhquantum_theory/\"\u003equantum theory\u003c/a\u003e is just \u003ca href=\"/posts/kbhentangled/\"\u003eentanglement\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThis solves \u003ca href=\"/posts/kbhproblem_with_gravity/\"\u003eproblem with gravity\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgravitational_entanglement/","tags":null,"title":"gravitational entanglement"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhgravitational_potential_energy/","tags":null,"title":"gravitational potential energy"},{"categories":null,"contents":"The Great Depression is a period of time of American depression.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhgreat_depression/\"\u003eGreat Depression\u003c/a\u003e is a period of time of American depression.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgreat_depression/","tags":null,"title":"Great Depression"},{"categories":null,"contents":"Let \\(a,b \\in \\mathbb{Z}\\), not both zero. \\(\\gcd (a,b)\\) is the greatest value \\(d\\) such that \\(d|a\\), \\(d|b\\).\ngreatest common divisor is a linear combination We can write \\(\\gcd (a,b) = as+bt\\) for some \\(s,t \\in \\mathbb{Z}\\).\nLet us define:\n\\begin{equation} S = \\{am + bn: m,n \\in \\mathbb{Z}, am+bn \u0026gt; 0\\} \\end{equation}\nWe will first check that \\(S\\) is non-empty. To do so, let \\(a\\) be negative and \\(b\\) be positive. Then, set \\(m = -1\\), \\(n = 1\\). We can see that \\(am + bn \u0026gt; 0\\), satisfying the conditions of the set. In a similar manner, we can demonstrate that regardless of the choice of \\(a, b\\), \\(S\\) is non-empty.\nFurthermore, integral linear combinations are integers, so \\(S\\) is a non-empty subset of \\(\\mathbb{Z}\\).\nWe can now invoke WOP. There is some smallest \\(d \\in S\\). Let\u0026rsquo;s call \\(d = as +dt\\). We desire that \\(d\\) is actually \\(\\gcd (a,b)\\).\n\\(d\\) is a common divisor of \\(a,b\\) WLOG write some:\n\\begin{equation} a = dq + r \\end{equation}\nusing division algorithm. Because \\(d \\in S\\), we can write now:\n\\begin{equation} a = (as+bt) q + r \\end{equation}\nWe desire that now \\(r = 0\\) so that we can write \\(d|a\\). We can write:\n\\begin{equation} r = a-dq \\end{equation}\n(notice! \\(a\\) is a linear combination of \\(a,b\\), and \\(d\\) is given to be such)\n\\begin{equation} r = a-dq = (1a + 0b) - (as+bt)q = a(1-qs) + b(-tq) \\end{equation}\nRecall that \\(r \u0026lt; d\\) because \\(r\\) is a remainder. And of course \\(r\\) is defined to be positive or \\(0\\) by the division algorithm.\nSo:\n\\begin{equation} 0 \\leq a(1-qs) + b(-tq) \u0026lt;d \\end{equation}\nNow, you will note this middle thing, which is equal to \\(r\\), is itself a positive linear combination of \\(a,b\\). Furthermore, it is smaller than \\(d\\). We already have that \\(d\\) is the smallest element of \\(S\\), which means the only other value \\(r\\) can take on is \\(0\\).\nThis leads to conclude:\n\\begin{equation} a = dq + 0 \\end{equation}\nso \\(d|a\\), WLOG \\(d|b\\).\n\\(d\\) is the greatest common divisor Proof:\nLet \\(d\u0026rsquo;\\) be a common divisor of \\(a,b\\). This means there are some \\(m\u0026rsquo;, n\u0026rsquo;\\) such that:\n\\begin{align} a \u0026amp;= d\u0026rsquo; m\u0026rsquo; \\\\ b \u0026amp;= d\u0026rsquo; n' \\end{align}\nRecall that \\(d = as + bt\\). This means:\n\\begin{equation} d = as + bt = (d\u0026rsquo; m\u0026rsquo;)s + (d\u0026rsquo; n\u0026rsquo;)t = d\u0026rsquo; (m\u0026rsquo; s + n\u0026rsquo; t) \\end{equation}\nThis means that \\(d\u0026rsquo; | d\\). Now, \\(d \\in S\\), and everything in \\(S\\) is positive. Therefore, \\(d\\) must be the greatest common divisor because it is divisible (and therefore bigger in magnitude than) any \\(d\u0026rsquo;\\).\nWhich means that \\(d\\) must be the greatest common divisor\n","html":"\u003cp\u003eLet \\(a,b \\in \\mathbb{Z}\\), not both zero. \\(\\gcd (a,b)\\) is the greatest value \\(d\\) such that \\(d|a\\), \\(d|b\\).\u003c/p\u003e\n\u003ch2 id=\"greatest-common-divisor-is-a-linear-combination\"\u003egreatest common divisor is a linear combination\u003c/h2\u003e\n\u003cp\u003eWe can write \\(\\gcd (a,b) = as+bt\\) for some \\(s,t \\in \\mathbb{Z}\\).\u003c/p\u003e\n\u003cp\u003eLet us define:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS = \\{am + bn: m,n \\in \\mathbb{Z}, am+bn \u0026gt; 0\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will first check that \\(S\\) is non-empty. To do so, let \\(a\\) be negative and \\(b\\) be positive. Then, set \\(m = -1\\), \\(n = 1\\). We can see that \\(am + bn \u0026gt; 0\\), satisfying the conditions of the set. In a similar manner, we can demonstrate that regardless of the choice of \\(a, b\\), \\(S\\) is non-empty.\u003c/p\u003e\n\u003cp\u003eFurthermore, integral linear combinations are integers, so \\(S\\) is a non-empty subset of \\(\\mathbb{Z}\\).\u003c/p\u003e\n\u003cp\u003eWe can now invoke \u003ca href=\"/posts/kbhprinciple_of_induction/#well-ordering-principle\"\u003eWOP\u003c/a\u003e. There is some smallest \\(d \\in S\\). Let\u0026rsquo;s call \\(d = as +dt\\). We desire that \\(d\\) is actually \\(\\gcd (a,b)\\).\u003c/p\u003e\n\u003ch3 id=\"d-is-a-common-divisor-of-a-b\"\u003e\\(d\\) is a common divisor of \\(a,b\\)\u003c/h3\u003e\n\u003cp\u003eWLOG write some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na = dq + r\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eusing \u003ca href=\"/posts/kbhdivide/#division-algorithm\"\u003edivision algorithm\u003c/a\u003e. Because \\(d \\in S\\), we can write now:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na = (as+bt) q + r\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe desire that now \\(r = 0\\) so that we can write \\(d|a\\). We can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr = a-dq\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(notice! \\(a\\) is a linear combination of \\(a,b\\), and \\(d\\) is given to be such)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr = a-dq = (1a + 0b) - (as+bt)q = a(1-qs) + b(-tq)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that \\(r \u0026lt; d\\) because \\(r\\) is a remainder. And of course \\(r\\) is defined to be positive or \\(0\\) by the \u003ca href=\"/posts/kbhdivide/#division-algorithm\"\u003edivision algorithm\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 \\leq a(1-qs) + b(-tq) \u0026lt;d\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, you will note this middle thing, which is equal to \\(r\\), is itself a positive \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \\(a,b\\). Furthermore, it is smaller than \\(d\\). We already have that \\(d\\) is the smallest element of \\(S\\), which means the only other value \\(r\\) can take on is \\(0\\).\u003c/p\u003e\n\u003cp\u003eThis leads to conclude:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na = dq + 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso \\(d|a\\), WLOG \\(d|b\\).\u003c/p\u003e\n\u003ch3 id=\"d-is-the-greatest-common-divisor\"\u003e\\(d\\) is the greatest common divisor\u003c/h3\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eLet \\(d\u0026rsquo;\\) be a common divisor of \\(a,b\\). This means there are some \\(m\u0026rsquo;, n\u0026rsquo;\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\na \u0026amp;= d\u0026rsquo; m\u0026rsquo; \\\\\nb \u0026amp;= d\u0026rsquo; n'\n\\end{align}\u003c/p\u003e\n\u003cp\u003eRecall that \\(d = as + bt\\). This means:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nd = as + bt = (d\u0026rsquo; m\u0026rsquo;)s + (d\u0026rsquo; n\u0026rsquo;)t = d\u0026rsquo; (m\u0026rsquo; s + n\u0026rsquo; t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis means that \\(d\u0026rsquo; | d\\). Now, \\(d \\in S\\), and everything in \\(S\\) is positive. Therefore, \\(d\\) must be the \u003ca href=\"/posts/kbhgreatest_common_divisor/\"\u003egreatest common divisor\u003c/a\u003e because it is divisible (and therefore bigger in magnitude than) any \\(d\u0026rsquo;\\).\u003c/p\u003e\n\u003cp\u003eWhich means that \\(d\\) must be the \u003ca href=\"/posts/kbhgreatest_common_divisor/\"\u003egreatest common divisor\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgreatest_common_divisor/","tags":null,"title":"greatest common divisor"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhgreedy_programming/","tags":null,"title":"greedy programming"},{"categories":null,"contents":"Participate in Demo Day.\nGetting something:\nOpportunity to get partnering Networking opportunities, having access to contract manufacturing =\u0026gt; Conrad Challenge, $600 each, $1200\nUser conversation\nSpoke again with CompassionKind: wanting to get 20 units shipped out Spoke with SustainableEnergy for All: started by one of the UN reps. of an African country; wanted to have us featured on Social Media Wanted to connect Start diving into user connections Hiring requests\nFulfilling orders MechE ","html":"\u003cp\u003eParticipate in Demo Day.\u003c/p\u003e\n\u003cp\u003eGetting something:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eOpportunity to get partnering\u003c/li\u003e\n\u003cli\u003eNetworking opportunities, having access to contract manufacturing\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e=\u0026gt; Conrad Challenge, $600 each, $1200\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eUser conversation\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eSpoke again with CompassionKind: wanting to get 20 units shipped out\u003c/li\u003e\n\u003cli\u003eSpoke with SustainableEnergy for All: started by one of the UN reps. of an African country; wanted to have us featured on Social Media\u003c/li\u003e\n\u003cli\u003eWanted to connect\u003c/li\u003e\n\u003cli\u003eStart diving into user connections\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eHiring requests\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eFulfilling orders\u003c/li\u003e\n\u003cli\u003eMechE\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgreenswing_april_checkin/","tags":null,"title":"GreenSwing April Checkin"},{"categories":null,"contents":"In this experiment, an efficient and accurate network of detecting automatically disseminated (bot) content on social platforms is devised. Through the utilisation of parallel convolutional neural network (CNN) which processes variable n-grams of text 15, 20, and 25 tokens in length encoded by Byte Pair Encoding (BPE), the complexities of linguistic content on social platforms are effectively captured and analysed. With validation on two sets of previously unexposed data, the model was able to achieve an accuracy of around 96.6% and 97.4% respectively — meeting or exceeding the performance of other comparable supervised ML solutions to this problem. Through testing, it is concluded that this method of text processing and analysis proves to be an effective way of classifying potentially artificially synthesized user data — aiding the security and integrity of social platforms.\n","html":"\u003cp\u003eIn this experiment, an efficient and accurate network of detecting automatically disseminated (bot) content on social platforms is devised. Through the utilisation of parallel convolutional neural network (CNN) which processes variable n-grams of text 15, 20, and 25 tokens in length encoded by Byte Pair Encoding (BPE), the complexities of linguistic content on social platforms are effectively captured and analysed. With validation on two sets of previously unexposed data, the model was able to achieve an accuracy of around 96.6% and 97.4% respectively — meeting or exceeding the performance of other comparable supervised ML solutions to this problem. Through testing, it is concluded that this method of text processing and analysis proves to be an effective way of classifying potentially artificially synthesized user data — aiding the security and integrity of social platforms.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgregarious_abstract/","tags":null,"title":"Gregarious Abstract"},{"categories":null,"contents":"grid search is a hyperparameter tuning technique by trying pairs of all hyperparemeters sequentially\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhgrid_search/\"\u003egrid search\u003c/a\u003e is a hyperparameter tuning technique by trying pairs of all hyperparemeters sequentially\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgrid_search/","tags":null,"title":"grid search"},{"categories":null,"contents":"components a set of constituent objects an operation requirements for group closed: if \\(a,b \\in G\\), then \\(a \\cdot b \\in G\\) existence of identity: there is \\(e \\in G\\) such that \\(e\\cdot a= a\\cdot e = a\\), for all \\(a \\in G\\) existence of inverses: there is \\(b \\in G\\) for all \\(a \\in G\\) such that \\(a\\cdot b = b\\cdot a = e\\) associative: \\((a\\cdot b)\\cdot c = a\\cdot (b\\cdot c)\\) for all \\(a,b,c \\in G\\) additional information identity in group commutates with everything (which is the only commutattion in groups\nUnique identities and inverses the identity is unique in a group (similar idea as additive identity is unique in a vector space) for each \\(a \\in G\\), its inverse in unique (similar ideas as additive inverse is unique in a vector space) cancellation policies if \\(a,b,c \\in G\\), \\(ab = ac \\implies b = c\\) (left cancellation)\n\\(ba = ca \\implies b = c\\) (right cancellation)\nsock-shoes property if \\(a,b \\in G\\), then \\((ab)^{-1} = b^{-1}a^{-1}\\)\n","html":"\u003ch2 id=\"components\"\u003ecomponents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ea set of constituent \u003ca href=\"/posts/kbhobjects/\"\u003eobject\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003ean \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements-for-group\"\u003erequirements for group\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e: if \\(a,b \\in G\\), then \\(a \\cdot b \\in G\\)\u003c/li\u003e\n\u003cli\u003eexistence of \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e: there is \\(e \\in G\\) such that \\(e\\cdot a= a\\cdot e = a\\), for all \\(a \\in G\\)\u003c/li\u003e\n\u003cli\u003eexistence of \u003ca href=\"/posts/kbhinverses/\"\u003einverses\u003c/a\u003e: there is \\(b \\in G\\) for all \\(a \\in G\\) such that \\(a\\cdot b = b\\cdot a = e\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e: \\((a\\cdot b)\\cdot c = a\\cdot (b\\cdot c)\\) for all \\(a,b,c \\in G\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e in \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutates\u003c/a\u003e with everything (which is the only \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutattion\u003c/a\u003e in groups\u003c/p\u003e\n\u003ch3 id=\"unique-identities-and-inverses\"\u003eUnique identities and inverses\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ethe \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e is unique in a group (similar idea as \u003ca href=\"/posts/kbhadditive_identity_is_unique_in_a_vector_space/\"\u003eadditive identity is unique in a vector space\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003efor each \\(a \\in G\\), its inverse in unique (similar ideas as \u003ca href=\"/posts/kbhadditive_inverse_is_unique_in_a_vector_space/\"\u003eadditive inverse is unique in a vector space\u003c/a\u003e)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"cancellation-policies\"\u003ecancellation policies\u003c/h3\u003e\n\u003cp\u003eif \\(a,b,c \\in G\\), \\(ab = ac \\implies b = c\\) (\u003ca href=\"/posts/kbhgroup/\"\u003eleft cancellation\u003c/a\u003e)\u003c/p\u003e\n\u003cp\u003e\\(ba = ca \\implies b = c\\) (\u003ca href=\"/posts/kbhgroup/\"\u003eright cancellation\u003c/a\u003e)\u003c/p\u003e\n\u003ch3 id=\"sock-shoes-property\"\u003esock-shoes property\u003c/h3\u003e\n\u003cp\u003eif \\(a,b \\in G\\), then \\((ab)^{-1} = b^{-1}a^{-1}\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgroup/","tags":null,"title":"group"},{"categories":null,"contents":"Notes on MATH 109, group theory.\nLectures SU-MATH109 SEP272023 PSets These links are dead.\nSU-MATH109 Problem Set 1 Course logistics midterm: November 1st, final: December 14th, 8:30-11:30 WIM assignment: December 8th, start of class (no late submissions) PSets: 8 in total, posted on Wednesdays at 8A, due following Tuesday at 8A ","html":"\u003cp\u003eNotes on MATH 109, \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e theory.\u003c/p\u003e\n\u003ch2 id=\"lectures\"\u003eLectures\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math109_sep272023_exp/\"\u003eSU-MATH109 SEP272023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"psets\"\u003ePSets\u003c/h2\u003e\n\u003cp\u003eThese links are dead.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math109_problem_set_1/\"\u003eSU-MATH109 Problem Set 1\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"course-logistics\"\u003eCourse logistics\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003emidterm: November 1st, final: December 14th, 8:30-11:30\u003c/li\u003e\n\u003cli\u003eWIM assignment: December 8th, start of class (no late submissions)\u003c/li\u003e\n\u003cli\u003ePSets: 8 in total, posted on Wednesdays at 8A, due following Tuesday at 8A\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgroup_theory_index/","tags":["index"],"title":"Group Theory Index"},{"categories":null,"contents":"\u0026ldquo;Stuffing some stuff into buckets\u0026rdquo;\nHow many ways are there to sort \\(n\\) distinct objects to \\(r\\) buckets?\n\\begin{equation} r^{n} \\end{equation}\ngrouping with entirely indistinct objects You can simply reframe the grouping problem as permutation of the objects with \\(r-1\\) dividers along with your old \\(n\\) objects.\ni.e.: sort this thing \u0026mdash;\nSo:\n\\begin{equation} \\frac{(n+r-1)!}{n! (r-1)!} \\end{equation}\n","html":"\u003cp\u003e\u0026ldquo;Stuffing some stuff into buckets\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eHow many ways are there to sort \\(n\\) distinct objects to \\(r\\) buckets?\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr^{n}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"grouping-with-entirely-indistinct-objects\"\u003egrouping with entirely indistinct objects\u003c/h2\u003e\n\u003cp\u003eYou can simply reframe the \u003ca href=\"/posts/kbhgrouping/\"\u003egrouping\u003c/a\u003e problem as \u003ca href=\"/posts/kbhpermutation/\"\u003epermutation\u003c/a\u003e of the objects with \\(r-1\\) dividers along with your old \\(n\\) objects.\u003c/p\u003e\n\u003cp\u003ei.e.: sort this thing \u0026mdash;\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-09-29_16-43-08_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{(n+r-1)!}{n! (r-1)!}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgrouping/","tags":null,"title":"grouping"},{"categories":null,"contents":"The Guilded Age is a period in history between 1877 and 1900. This period deepened divide in racism, deepened the split between poor and rich, and the fluidity of American social classes became more set in this time.\nLinks to Organize Imperialism New American South Why is the \u0026ldquo;Guilded Age\u0026rdquo; \u0026ldquo;Guilded\u0026rdquo;? Guilded: Outside Lined with Gold, Inside Contains Metal and is Less Valuable.\nThe Guilded Age consists of three different sections:\nBusiness (Top!) Labour Government Contributors to the Guilded Age There are three pieces\n\u0026ldquo;Homestead Act\u0026rdquo;: legal way to give people land in the west \u0026ldquo;National Banking Act\u0026rdquo;: unified a uniform economic system, and to connect markets \u0026ldquo;Pacific Railroad Act\u0026rdquo;: expansion of connection; also formed the first \u0026ldquo;Corporations\u0026rdquo; based on railroad organization structures. Issues of the Guilded Age Immigration \u0026ldquo;They are coming to take our jobs!\u0026rdquo; (Irish Edition.)\nUSCIS processed people in Ellis (Irish processing, took about 2 days to process) and Angel (Chinese processing, took about 6 months to process) islands: beginning having racial immigrant discrimination.\nUrbanization Populations in the United States tripled in about 50 years. Immigrants were stuffed into Tennaments. The Guilded age saw the beginning of skyscrapers.\nSocial Activism Because of the issues began during the Guilded Age, more people essentially stepped instead of the governement to play a role in supporting welfare.\nIndustrialization \u0026ldquo;Pulling yourself up by your bootstraps.\u0026rdquo; Steel is invented. All of the technology has been moved to provide maximum output; this comes, of course, at an environmental cost. Large unions are starting to take off. Railroad Strike: federal troops fired upon railroad workers; argued the case for union but also for corp. influence on politics.\nPolitics Democrats: racist, states rights, limited federal government. Republicans: supported businesses, immigrations. Yes, they are still flipped.\nBut either way, democracy was rampant: 80% turnout! This is, however, one of the most corrupt time in American politics. The party system: local political bosses would provide favours for a vote \u0026mdash; in an absence of welfare, in exchange for a vote, wolud provide protection and social welfare. At this time, there was mass lack of reliance.\nCulture Victorianism! Proper manners, conservatism, etc. Bikes and contraceptives! There is also a fear that \u0026ldquo;manliness was declining\u0026rdquo;: that no more farming means need for more sports, body building, etc. Also, \u0026ldquo;name brands\u0026rdquo;, \u0026ldquo;sears catalogue\u0026rdquo;, and consumerism is taking hold.\nCorportization Corporations, as an idea, took hold. That the owners of a group is seperated by management, it allows the expansion of the size of companies. Monopolies in industries run while: concentrated wealth in addition to corrupted politics.\nTaylorism: Taylor decided to make a shovel for each type of movement \u0026mdash; which makes people repeat the same task over again but increased efficiency. \u0026ldquo;Taylor-made\u0026rdquo; comes from this.\nOmaha Platform Expanding Credit Bracketed income tax Social reforms Lays the groundwork for the progressive moment. This was a socialist movement!\nThe West Transcontinental railroad: power over towns and concessions Rise of Cowboys and \u0026ldquo;cattle bonanza\u0026rdquo; Prairies settled with new farming equipment and new Russian wheat strands: \u0026ldquo;Americanlization\u0026rdquo; The \u0026ldquo;turner thesis\u0026rdquo;: American democracy is formed at the frontier. However, Western Expansion is actually much of a tragedy, and this is actually leads to Imperialism.\nIndian Removal Policy of Indian removal to force into treaties + reservation Sioux Wars (crazy horse, etc.): Native American resistance Native Americans of California extreme violence; as well as slave labour Dawes Act of 1887 and forced \u0026ldquo;assimilation\u0026rdquo;: forced the breakup of many reservations Guilded Age Commentary Historians Rebekah Edwards The late 19th century was not entirely laissez faire \u0026ldquo;Progressive Era\u0026rdquo;: not always progressive Issues that lead to the \u0026ldquo;Guilded age\u0026rdquo; name that was not specific to the Guilded age \u0026ldquo;Guilded age\u0026rdquo;: \u0026ldquo;eh, nothing else to deal with, so let\u0026rsquo;s deal with racism!\u0026rdquo;\nRichard John Guilded age was a period of rapid industrialization Very charactured, unequal + vulgar time The resulting changes are very concentrated; all of the changes that are 80 years apart This is super disconnected to social, political aspects of life. It doesn\u0026rsquo;t talk about how the economy effects the social standings and ladders that people lived in =\u0026gt; that movement comes from a lot of social change.\nMade a point about the positive/negatives effects of the guilded age: don\u0026rsquo;t focus the individuals but instead the structures.\nHe did not want the \u0026ldquo;progressive era\u0026rdquo; as a classification in line with the guilded age. \u0026ldquo;Guilded age\u0026rdquo; is the only pejorative term for an era: so one negative description does not do it justice.\nRichard Benzel Richard Benzel claims that the textbook industry primes people; that a title for an age shoehorns the age and changes the reflection of the reader.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhguilded_age/\"\u003eGuilded Age\u003c/a\u003e is a period in history between 1877 and 1900. This period deepened divide in racism, deepened the split between poor and rich, and the fluidity of American social classes became more set in this time.\u003c/p\u003e\n\u003ch2 id=\"links-to-organize\"\u003eLinks to Organize\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhimperialism/\"\u003eImperialism\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnew_american_south/\"\u003eNew American South\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"why-is-the-guilded-age--kbhguilded-age-dot-md--guilded\"\u003eWhy is the \u0026ldquo;\u003ca href=\"/posts/kbhguilded_age/\"\u003eGuilded Age\u003c/a\u003e\u0026rdquo; \u0026ldquo;Guilded\u0026rdquo;?\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#why-is-the-guilded-age--kbhguilded-age-dot-md--guilded\"\u003eGuilded\u003c/a\u003e: Outside Lined with Gold, Inside Contains Metal and is Less Valuable.\u003c/p\u003e\n\u003cp\u003eThe Guilded Age consists of three different sections:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eBusiness (Top!)\u003c/li\u003e\n\u003cli\u003eLabour\u003c/li\u003e\n\u003cli\u003eGovernment\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"contributors-to-the-guilded-age--kbhguilded-age-dot-md\"\u003eContributors to the \u003ca href=\"/posts/kbhguilded_age/\"\u003eGuilded Age\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eThere are three pieces\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;\u003ca href=\"/posts/kbhhomestead_act/\"\u003eHomestead Act\u003c/a\u003e\u0026rdquo;: legal way to give people land in the west\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;\u003ca href=\"/posts/kbhnational_banking_act/\"\u003eNational Banking Act\u003c/a\u003e\u0026rdquo;: unified a uniform economic system, and to connect markets\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;\u003ca href=\"/posts/kbhpacific_railroad_act/\"\u003ePacific Railroad Act\u003c/a\u003e\u0026rdquo;: expansion of connection; also formed the first \u0026ldquo;Corporations\u0026rdquo; based on railroad organization structures.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"issues-of-the-guilded-age\"\u003eIssues of the Guilded Age\u003c/h2\u003e\n\u003ch3 id=\"immigration\"\u003eImmigration\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;They are coming to take our jobs!\u0026rdquo; (Irish Edition.)\u003c/p\u003e\n\u003cp\u003eUSCIS processed people in Ellis (Irish processing, took about 2 days to process) and Angel (Chinese processing, took about 6 \u003cem\u003emonths\u003c/em\u003e to process) islands: beginning having racial immigrant discrimination.\u003c/p\u003e\n\u003ch3 id=\"urbanization\"\u003eUrbanization\u003c/h3\u003e\n\u003cp\u003ePopulations in the United States tripled in about 50 years. Immigrants were stuffed into Tennaments. The Guilded age saw the beginning of skyscrapers.\u003c/p\u003e\n\u003ch3 id=\"social-activism\"\u003eSocial Activism\u003c/h3\u003e\n\u003cp\u003eBecause of the issues began during the Guilded Age, more people essentially stepped instead of the governement to play a role in supporting welfare.\u003c/p\u003e\n\u003ch3 id=\"industrialization\"\u003eIndustrialization\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;Pulling yourself up by your bootstraps.\u0026rdquo; \u003cem\u003eSteel\u003c/em\u003e is invented. All of the technology has been moved to provide maximum output; this comes, of course, at an environmental cost. Large unions are starting to take off. Railroad Strike: federal troops fired upon railroad workers; argued the case for union but also for corp. influence on politics.\u003c/p\u003e\n\u003ch3 id=\"politics\"\u003ePolitics\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eDemocrats: racist, states rights, limited federal government.\u003c/li\u003e\n\u003cli\u003eRepublicans: supported businesses, immigrations.\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eYes, they are still flipped.\u003c/p\u003e\n\u003cp\u003eBut either way, democracy was rampant: 80% turnout! This is, however, one of the most corrupt time in American politics. The party system: local political bosses would provide favours for a vote \u0026mdash; in an absence of welfare, in exchange for a vote, wolud provide protection and social welfare. At this time, there was mass lack of reliance.\u003c/p\u003e\n\u003ch3 id=\"culture\"\u003eCulture\u003c/h3\u003e\n\u003cp\u003eVictorianism! Proper manners, conservatism, etc. Bikes and contraceptives! There is also a fear that \u0026ldquo;manliness was declining\u0026rdquo;: that no more farming means need for more sports, body building, etc. Also, \u0026ldquo;name brands\u0026rdquo;, \u0026ldquo;sears catalogue\u0026rdquo;, and consumerism is taking hold.\u003c/p\u003e\n\u003ch3 id=\"corportization\"\u003eCorportization\u003c/h3\u003e\n\u003cp\u003eCorporations, as an idea, took hold. That the owners of a group is seperated by management, it allows the expansion of the size of companies. Monopolies in industries run while: concentrated wealth in addition to corrupted politics.\u003c/p\u003e\n\u003cp\u003eTaylorism: Taylor decided to make a shovel for each type of movement \u0026mdash; which makes people repeat the same task over again but increased efficiency. \u0026ldquo;Taylor-made\u0026rdquo; comes from this.\u003c/p\u003e\n\u003ch3 id=\"omaha-platform\"\u003eOmaha Platform\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eExpanding Credit\u003c/li\u003e\n\u003cli\u003eBracketed income tax\u003c/li\u003e\n\u003cli\u003eSocial reforms\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eLays the groundwork for the progressive moment. This was a socialist movement!\u003c/p\u003e\n\u003ch2 id=\"the-west\"\u003eThe West\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eTranscontinental railroad: power over towns and concessions\u003c/li\u003e\n\u003cli\u003eRise of Cowboys and \u0026ldquo;cattle bonanza\u0026rdquo;\u003c/li\u003e\n\u003cli\u003ePrairies settled with new farming equipment and new Russian wheat strands: \u0026ldquo;Americanlization\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe \u0026ldquo;turner thesis\u0026rdquo;: American democracy is formed at the frontier. However, Western Expansion is actually much of a tragedy, and this is actually leads to Imperialism.\u003c/p\u003e\n\u003ch3 id=\"indian-removal\"\u003eIndian Removal\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ePolicy of Indian removal to force into treaties + reservation\u003c/li\u003e\n\u003cli\u003eSioux Wars (crazy horse, etc.): Native American resistance\u003c/li\u003e\n\u003cli\u003eNative Americans of California extreme violence; as well as slave labour\u003c/li\u003e\n\u003cli\u003eDawes Act of 1887 and forced \u0026ldquo;assimilation\u0026rdquo;: forced the breakup of many reservations\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"guilded-age-commentary-historians\"\u003eGuilded Age Commentary Historians\u003c/h2\u003e\n\u003ch3 id=\"rebekah-edwards\"\u003eRebekah Edwards\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eThe late 19th century was not entirely \u003cem\u003elaissez faire\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Progressive Era\u0026rdquo;: not always progressive\u003c/li\u003e\n\u003cli\u003eIssues that lead to the \u0026ldquo;Guilded age\u0026rdquo; name that was not specific to the Guilded age\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;Guilded age\u0026rdquo;: \u0026ldquo;eh, nothing else to deal with, so let\u0026rsquo;s deal with racism!\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"richard-john\"\u003eRichard John\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eGuilded age was a period of rapid industrialization\u003c/li\u003e\n\u003cli\u003eVery charactured, unequal + vulgar time\u003c/li\u003e\n\u003cli\u003eThe resulting changes are very concentrated; all of the changes that are 80 years apart\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThis is super disconnected to social, political aspects of life. It doesn\u0026rsquo;t talk about how the economy effects the social standings and ladders that people lived in =\u0026gt; that movement comes from a lot of social change.\u003c/p\u003e\n\u003cp\u003eMade a point about the positive/negatives effects of the guilded age: don\u0026rsquo;t focus the individuals but instead the structures.\u003c/p\u003e\n\u003cp\u003eHe did not want the \u0026ldquo;progressive era\u0026rdquo; as a classification in line with the guilded age. \u0026ldquo;Guilded age\u0026rdquo; is the only pejorative term for an era: so one negative description does not do it justice.\u003c/p\u003e\n\u003ch3 id=\"richard-benzel\"\u003eRichard Benzel\u003c/h3\u003e\n\u003cp\u003eRichard Benzel claims that the textbook industry primes people; that a title for an age shoehorns the age and changes the reflection of the reader.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhguilded_age/","tags":null,"title":"Guilded Age"},{"categories":null,"contents":"One-Liner UAV navigation through leveraging updrafts, handling their unpredictability with POMDPs and Receeding Horizon.\nNovelty Developed new method for low-cost POMDP online solving Cool bird. Notable Methods two main steps explore: determine thermal parameters exploit: plan a trajectory to exploit the thermal formulation \\(\\mathcal{S}\\): \\(s^{u} \\in \\mathbb{R}^{6}\\), the joint state of the UAV (2D location wrt fixed point + air speech + heading, bank, roll, altitude), and \\(s^{th} \\in \\mathbb{R}^{2}\\),the thermal status (thermal center x and y relative to UAV) \\(\\mathcal{A}\\): discretized arc trajectory segments by bank angles \\(\\phi_{1 \\dots n}\\), which executes for a fixed \\(T_{A}\\) seconds \\(\\mathcal{T}\\): Gaussian of \\(s^{u}\\) over the dynamics of the UAV, and over fixed noise covariance \\(Q\\) \\(\\mathcal{R}\\): \\(h_{s\u0026rsquo;}-h_{s}\\), the change in altitude\u0026hellip;. \\(\\mathcal{O}\\): senor readings \\(O(a, s\u0026rsquo;, o)\\): fixed noise covariance \\(R\\) \\(b_0\\): product of two Gaussian of the UAV\u0026rsquo;s position and the belief about the underlying thermals \\(update(b,a,o)\\): EKF modeling assumptions:\nthermal consistency: the world model change frequency less than control thermal stationarity: thermal doesn\u0026rsquo;t move against surrounding air no pitch angle control: reward hacking may happen no turbulence: thermal doesn\u0026rsquo;t sang horizontal displacements POMDSoar The exact solution to the POMDP as proposed makes aggressive decisions in order to simplify costs to run on a PixHawk.\nWe need to explicitly build in a exploration/exploitation tradeoff.\nKey Figs comparison against ardusoar: EKF + just circling\nArduPilot\u0026rsquo;s implementation is worse\nNew Concepts POMDSoar, the soring mechanism\nNotes ","html":"\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eUAV navigation through leveraging updrafts, handling their unpredictability with \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es and \u003ca href=\"/posts/kbhreceeding_horizon/\"\u003eReceeding Horizon\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDeveloped new method for low-cost POMDP online solving\u003c/li\u003e\n\u003cli\u003eCool bird.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003ch3 id=\"two-main-steps\"\u003etwo main steps\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eexplore: determine thermal parameters\u003c/li\u003e\n\u003cli\u003eexploit: plan a trajectory to exploit the thermal\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"formulation\"\u003eformulation\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\mathcal{S}\\): \\(s^{u} \\in \\mathbb{R}^{6}\\), the joint state of the UAV (2D location wrt fixed point + air speech + heading, bank, roll, altitude), and \\(s^{th} \\in \\mathbb{R}^{2}\\),the thermal status (thermal center x and y relative to UAV)\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{A}\\): discretized arc trajectory segments by bank angles \\(\\phi_{1 \\dots n}\\), which executes for a fixed \\(T_{A}\\) seconds\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{T}\\): Gaussian of \\(s^{u}\\) over the dynamics of the UAV, and over fixed noise covariance \\(Q\\)\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{R}\\): \\(h_{s\u0026rsquo;}-h_{s}\\), the change in altitude\u0026hellip;.\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{O}\\): senor readings\u003c/li\u003e\n\u003cli\u003e\\(O(a, s\u0026rsquo;, o)\\): fixed noise covariance \\(R\\)\u003c/li\u003e\n\u003cli\u003e\\(b_0\\): product of two Gaussian of the UAV\u0026rsquo;s position and the belief about the underlying thermals\u003c/li\u003e\n\u003cli\u003e\\(update(b,a,o)\\): \u003ca href=\"/posts/kbhfilters/#extended\"\u003eEKF\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003emodeling assumptions:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003ethermal consistency\u003c/strong\u003e: the world model change frequency less than control\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ethermal stationarity\u003c/strong\u003e: thermal doesn\u0026rsquo;t move against surrounding air\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eno pitch angle control\u003c/strong\u003e: reward hacking may happen\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eno turbulence\u003c/strong\u003e: thermal doesn\u0026rsquo;t sang horizontal displacements\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"pomdsoar\"\u003ePOMDSoar\u003c/h3\u003e\n\u003cp\u003eThe exact solution to the \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e as proposed makes aggressive decisions in order to simplify costs to run on a PixHawk.\u003c/p\u003e\n\u003cp\u003eWe need to explicitly build in a exploration/exploitation tradeoff.\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-11_09-49-54_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ecomparison against ardusoar: \u003ca href=\"/posts/kbhfilters/#extended\"\u003eEKF\u003c/a\u003e + just circling\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-09_12-12-55_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eArduPilot\u0026rsquo;s implementation is worse\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#pomdsoar\"\u003ePOMDSoar\u003c/a\u003e, the soring mechanism\u003c/p\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhkolobov_2018/","tags":null,"title":"Guilliard 2018"},{"categories":null,"contents":"DOI: 10.3389/fcomp.2021.642517\nOne-Liner Used WLS data to augment CTP from ADReSS Challenge and trained it on a BERT with good results.\nNovelty Used WLS data with CTP task to augment ADReSS DementiaBank data Notable Methods WLS data is not labeled, so authors used Semantic Verbal Fluency tests that come with WLS to make a presumed conservative diagnoses. Therefore, control data is more interesting:\nKey Figs Table 2 Data-aug of ADReSS Challenge data with WSL controls (no presumed AD) trained with a BERT. As expected the conservative control data results in better ferf\nNew Concepts ADReSS Challenge is small so use WLS to augment it ","html":"\u003cp\u003eDOI: 10.3389/fcomp.2021.642517\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eUsed WLS data to augment \u003ca href=\"/posts/kbhctp/\"\u003eCTP\u003c/a\u003e from \u003ca href=\"/posts/kbhadress_challenge/\"\u003eADReSS Challenge\u003c/a\u003e and trained it on a BERT with good results.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eUsed WLS data with \u003ca href=\"/posts/kbhctp/\"\u003eCTP\u003c/a\u003e task to augment ADReSS \u003ca href=\"/posts/kbhdementiabank/\"\u003eDementiaBank\u003c/a\u003e data\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cp\u003eWLS data is not labeled, so authors used \u003ca href=\"/posts/kbhsemantic_verbal_fluency/\"\u003eSemantic Verbal Fluency\u003c/a\u003e tests that come with WLS to make a presumed conservative diagnoses. Therefore, control data is more interesting:\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"table-2\"\u003eTable 2\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_11-27-14_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eData-aug of \u003ca href=\"/posts/kbhadress_challenge/\"\u003eADReSS Challenge\u003c/a\u003e data with WSL controls (no presumed AD) trained with a BERT. As expected the conservative control data results in better ferf\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadress_challenge/\"\u003eADReSS Challenge\u003c/a\u003e is small so use WLS to augment it\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhguo_2021/","tags":["ntj"],"title":"Guo 2021"},{"categories":null,"contents":"GUS is the a architecture of frame based Dialogue Systems; this is sometimes called a domain ontology.\nGeneral principle: try to fill as many user slots in the frame as possible that the user specifies, if the frame is filled, do action and report result.\nYou maybe working in multi-frame systems, then in which case some slots in one frame may help inform or fill those in another frame.\nGUS uses regular expressions/grammar rules to perform all of its tasks. Generating responses are usually completely templated.\ntradeoffs high precision low recall maybe hard to write three tasks domain classification: which frames to activate? intent determination: which tasks to activate once frame is filled? slot filling: fill frame we can actually consider this as one giant frame:\nframe frame is a structure which is used to store information about an interaction.\nSlot Type Question origin city \u0026ldquo;\u0026hellip;.?\u0026rdquo; \u0026hellip; \u0026hellip; \u0026hellip; which, throughout the interaction, is filled out by asking the questions.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhgus/\"\u003eGUS\u003c/a\u003e is the a architecture of \u003ca href=\"#frame\"\u003eframe\u003c/a\u003e based \u003ca href=\"/posts/kbhchatbot/\"\u003eDialogue Systems\u003c/a\u003e; this is sometimes called a \u003cstrong\u003edomain ontology\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eGeneral principle: try to fill as many user slots in the frame as possible that the user specifies, if the frame is filled, do action and report result.\u003c/p\u003e\n\u003cp\u003eYou maybe working in multi-frame systems, then in which case some slots in one frame may help inform or fill those in another frame.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhgus/\"\u003eGUS\u003c/a\u003e uses regular expressions/grammar rules to perform all of its tasks. Generating responses are usually completely templated.\u003c/p\u003e\n\u003ch2 id=\"tradeoffs\"\u003etradeoffs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003ehigh precision\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003elow recall\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003emaybe hard to write\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"three-tasks\"\u003ethree tasks\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003edomain classification\u003c/strong\u003e: which frames to activate?\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eintent determination\u003c/strong\u003e: which tasks to activate once frame is filled?\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eslot filling\u003c/strong\u003e: fill frame\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewe can actually consider this as one giant frame:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-01_09-47-15_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"frame\"\u003eframe\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#frame\"\u003eframe\u003c/a\u003e is a structure which is used to store information about an interaction.\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eSlot\u003c/th\u003e\n\u003cth\u003eType\u003c/th\u003e\n\u003cth\u003eQuestion\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eorigin\u003c/td\u003e\n\u003ctd\u003ecity\u003c/td\u003e\n\u003ctd\u003e\u0026ldquo;\u0026hellip;.?\u0026rdquo;\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u0026hellip;\u003c/td\u003e\n\u003ctd\u003e\u0026hellip;\u003c/td\u003e\n\u003ctd\u003e\u0026hellip;\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003ewhich, throughout the interaction, is filled out by asking the questions.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgus/","tags":null,"title":"GUS"},{"categories":null,"contents":"Gut bacteria are both adversly affected by 5-Fluoropyrimidine, and but they mtaybe able to inactivate synthesized Fluoropyrimidine.\nPreTA in E. Coli is an example of a bacterial that can do this. See implications of PreTA deactivating Fluoropyrimidine.\n","html":"\u003cp\u003eGut bacteria are both adversly affected by 5-\u003ca href=\"\"\u003eFluoropyrimidine\u003c/a\u003e, and but they mtaybe able to inactivate synthesized \u003ca href=\"\"\u003eFluoropyrimidine\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"\"\u003ePreTA\u003c/a\u003e in \u003ca href=\"/posts/kbhe_coli/\"\u003eE. Coli\u003c/a\u003e is an example of a bacterial that can do this. See \u003ca href=\"\"\u003eimplications of PreTA deactivating Fluoropyrimidine\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbh5_fluoropyrimidine_maybe_inactivated_by_gut_microbiome/","tags":null,"title":"gut microbiome deactivating Fluoropyrimidine"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhh4/","tags":null,"title":"H4"},{"categories":null,"contents":"controller POMDP policies with FST. Previous papers had exponential blowups.\nSuccessor function is deterministic.\npolicy iteration Use FST as policy representation:\ndeterministic controller POMDP evaluation for all \\((a,o,x)\\), add a now node x\u0026rsquo; and evaluate them to see if its needed then, we perform pruning everything that\u0026rsquo;s dominated (i.e. \\(U(x,s) \u0026lt; U(x\u0026rsquo;, s) \\forall s\\). i.e. we want to prune everything for which the expected utility of being in node \\(x\u0026rsquo;\\) dominates the expected utility of \\(x\\) for all \\(x\\). prune new nodes that are duplicates in terms of action and transitions When you are done, extract the policy: find the node that maximizes your\nheuristic search Optimize value function ran starting at the starting belief state, not for all states. Add nodes only when improvement is seen starting at the beginning.\ndeterministic controller POMDP evaluation Recall that controllers are defined over belief-states, and, unlike finite state controller evaluation, the transitions are not distributions; so, we have:\n\\begin{equation} U(x,s) = R(s,a(x)) + \\gamma \\sum_{s\u0026rsquo;}^{}T(s\u0026rsquo;|s,a(x)) \\sum_{o}^{} O(o|s\u0026rsquo;, a(x)) U(x\u0026rsquo;(x,a,o), s\u0026rsquo;) \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcontroller/\"\u003econtroller\u003c/a\u003e \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e policies with FST. Previous papers had exponential blowups.\u003c/p\u003e\n\u003cp\u003eSuccessor function is \u003cstrong\u003edeterministic\u003c/strong\u003e.\u003c/p\u003e\n\u003ch2 id=\"policy-iteration--kbhpolicy-iteration-dot-md\"\u003e\u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eUse FST as policy representation:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"#deterministic-controller-pomdp-evaluation\"\u003edeterministic controller POMDP evaluation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003efor all \\((a,o,x)\\), add a now node x\u0026rsquo; and evaluate them to see if its needed\u003c/li\u003e\n\u003cli\u003ethen, we perform pruning\n\u003cul\u003e\n\u003cli\u003eeverything that\u0026rsquo;s dominated (i.e. \\(U(x,s) \u0026lt; U(x\u0026rsquo;, s) \\forall s\\). i.e. we want to prune everything for which the expected utility of being in node \\(x\u0026rsquo;\\) dominates the expected utility of \\(x\\) for all \\(x\\).\u003c/li\u003e\n\u003cli\u003eprune new nodes that are duplicates in terms of action and transitions\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWhen you are done, extract the policy: find the node that maximizes your\u003c/p\u003e\n\u003ch2 id=\"heuristic-search\"\u003eheuristic search\u003c/h2\u003e\n\u003cp\u003eOptimize value function ran starting at the starting belief state, not for all states. Add nodes only when improvement is seen starting at the beginning.\u003c/p\u003e\n\u003ch2 id=\"deterministic-controller-pomdp-evaluation\"\u003edeterministic controller POMDP evaluation\u003c/h2\u003e\n\u003cp\u003eRecall that controllers are defined over belief-states, and, unlike \u003ca href=\"/posts/kbhcontroller/#finite-state-controller-evaluation\"\u003efinite state controller evaluation\u003c/a\u003e, the transitions are not distributions; so, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(x,s) = R(s,a(x)) + \\gamma \\sum_{s\u0026rsquo;}^{}T(s\u0026rsquo;|s,a(x)) \\sum_{o}^{} O(o|s\u0026rsquo;, a(x)) U(x\u0026rsquo;(x,a,o), s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhansen/","tags":null,"title":"Hansen"},{"categories":null,"contents":"Haplmmune is a antibody platform technology developed by Akiko Koide (NYU) specific towards ?\n","html":"\u003cp\u003e\u003ca href=\"\"\u003eHaplmmune\u003c/a\u003e is a antibody platform technology developed by Akiko Koide (NYU) specific towards ?\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhaplmmune/","tags":null,"title":"Haplmmune"},{"categories":null,"contents":"Harmonic Mean is the inverse of the inverse sum of arithmetic means, weighted.\nIt is near the lower of the two values, instead of the middle: meaning that in incentives both things being meaned to be higher. Hence why we use F measure for things.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhharmonic_mean/\"\u003eHarmonic Mean\u003c/a\u003e is the inverse of the inverse sum of arithmetic means, weighted.\u003c/p\u003e\n\u003cp\u003eIt is near the lower of the two values, instead of the middle: meaning that in incentives both things being meaned to be higher. Hence why we use F measure for things.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhharmonic_mean/","tags":null,"title":"harmonic mean"},{"categories":null,"contents":"Representational Harms System\u0026rsquo;s representation demeans a social group because it learns about built-in biaes of data\nHarms of Censorship Speech that mention minority group gets sensored because they mention minority groups.\nPerformance Disparities For instance, works worse on AAVE. Lack of data, labels, etc.\n","html":"\u003ch2 id=\"representational-harms\"\u003eRepresentational Harms\u003c/h2\u003e\n\u003cp\u003eSystem\u0026rsquo;s representation demeans a social group because it learns about built-in biaes of data\u003c/p\u003e\n\u003ch2 id=\"harms-of-censorship\"\u003eHarms of Censorship\u003c/h2\u003e\n\u003cp\u003eSpeech that mention minority group gets sensored because they mention minority groups.\u003c/p\u003e\n\u003ch2 id=\"performance-disparities\"\u003ePerformance Disparities\u003c/h2\u003e\n\u003cp\u003eFor instance, works worse on AAVE. Lack of data, labels, etc.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhharms_in_classification/","tags":null,"title":"Harms in Classification"},{"categories":null,"contents":"The heap is a self-managed area of the memory.\nmalloc void *malloc(size_t size); You should pass in the number of bytes; therefore, we need to pass in the number of bytes through something like malloc(sizeof(int)*len). The memory is not cleared out.\ncalloc void *calloc(size_t nmemb, size_t size); Put the number of elements into nmemb, and the size of them into size. Stamp zeros throughout.\nstrdup Deep copy a string. strlen, malloc, strcpy, retrun.\nfree void free(void *ptr); Frees whatever the pointer points to. The pointer itself (a stack variable), is not deleted and still points to the freed memory.\nrealloc void *realloc(void *ptr, size_t size); Changes the memory that ptr points to to size size. If there\u0026rsquo;s not enough space, realloc moves the memory content and frees the old one.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhheap/\"\u003eheap\u003c/a\u003e is a self-managed area of the \u003ca href=\"/posts/kbhmemory/\"\u003ememory\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"malloc\"\u003emalloc\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003emalloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou should pass in the number of \u003ca href=\"/posts/kbhbinary_number_system/#byte\"\u003ebyte\u003c/a\u003es; therefore, we need to pass in the number of bytes through something like \u003ccode\u003emalloc(sizeof(int)*len)\u003c/code\u003e. The memory is \u003cem\u003enot\u003c/em\u003e cleared out.\u003c/p\u003e\n\u003ch2 id=\"calloc\"\u003ecalloc\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ecalloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enmemb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ePut the \u003ca href=\"/posts/kbhnumber/\"\u003enumber\u003c/a\u003e of elements into \u003ccode\u003enmemb\u003c/code\u003e, and the size of them into \u003ccode\u003esize\u003c/code\u003e. Stamp zeros throughout.\u003c/p\u003e\n\u003ch2 id=\"strdup\"\u003estrdup\u003c/h2\u003e\n\u003cp\u003eDeep copy a string. \u003ccode\u003estrlen\u003c/code\u003e, \u003ccode\u003emalloc\u003c/code\u003e, \u003ccode\u003estrcpy\u003c/code\u003e, retrun.\u003c/p\u003e\n\u003ch2 id=\"free\"\u003efree\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003efree\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eptr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFrees whatever the pointer points to. The \u003ca href=\"/posts/kbhpointer/\"\u003epointer\u003c/a\u003e itself (a \u003ca href=\"/posts/kbhstack/\"\u003estack\u003c/a\u003e variable), is not deleted and still points to the freed memory.\u003c/p\u003e\n\u003ch2 id=\"realloc\"\u003erealloc\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003erealloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eptr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eChanges the \u003ca href=\"/posts/kbhmemory/\"\u003ememory\u003c/a\u003e that \u003ccode\u003eptr\u003c/code\u003e points to to size \u003ccode\u003esize\u003c/code\u003e. If there\u0026rsquo;s not enough space, \u003ccode\u003erealloc\u003c/code\u003e moves the memory content and frees the old one.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhheap/","tags":null,"title":"heap"},{"categories":null,"contents":"Upon initialization, a large contiguous block of memory is initialized as a whole and called the \u0026ldquo;heap\u0026rdquo;. If we run out of it, we double the amount of memory being allocated.\nhandling arbitrary requests of mallocs/realloc and frees keep track of what\u0026rsquo;s been allocated and what\u0026rsquo;s free decide which segment of memory to use when fulfilling an allocating request respond quickly Return addresses that are 8-byte aligned (native types must be stored at a memory location which is a multiple of its size; otherwise bus error) Two main goals:\nmaximize throughput: we want to make number of requests per unit of time large (\u0026ldquo;we want the largest address in use to be as low as possible\u0026rdquo;) maximize utilization: we want to use memory economically without fragmenting it These two goals seems to be conflicting: it may take longer to plan out heap memory use for each request if we want to have perfect.\nDesign Questions how do we keep track of blocks that are freed\nhow do we choose which free block to satisfy an allocation request\nafter we choose a free block, how do we deal with the excess\ncan we avoid searching all blocks for the free blocks to reuse?\ncan we merge adjacent free blocks to keep large space available?\nCan we avoid always coping/moving data?\nBump Allocator Silliest Heap allocator. You maintain a pointer that\u0026rsquo;s the root of the memory being used, and each time you get memory we bump that pointer forward. Free does nothing.\nMaximum throughput (you like, just allocate heap, and free is very easy), but bad utilization.\nImplicit Free List Allocator In this implementation, the block structure implies what has been freed. We used to store this into a global data structure, but that\u0026rsquo;s bad because there is too much memory overhead. Instead, we place a 8-byte \u0026ldquo;header\u0026rdquo; in front of each block of memory containing whether its free or in use + its payload size. Through reading all the headers, we essentially maintain an implicit list of free nodes.\nNow, the 8 byte system for memory + free status doesn\u0026rsquo;t sound right. Recall memory addresses themselves are 8-bytes; however, all of our memory is 8-byte aligned. So, the first three bits should be 0.\nTherefore, we pack free status in the firs tbit, ignore the next two, do store the memory in the rest\n\u0026ldquo;which one do you alloc\u0026rdquo; First fit: start from the beginning, and search for the first free block you come across to serve the request Next fit: continuing search starting at the end point of your last malloc until you get the first free block, when you hit the end, go back around Best fit: examine every free block and find the one with the smallest size that fits Best fit minimizes fragmentation; next fit optimizes speed\nedge case if you run out of space in the end, with an awkward 8 byte in the end, you can either make a 0-byte block or just give the last bit of memory to the previous one.\nExplicit Free List Allocator Can we design an allocator to jump between free blocks. Naively doing this is bad.\nInstead, we can constrain each block to be at least size 16. And then, we will put the pointers to the prev/next free nodes in the next two 8-byte payload.\nFinally, we will keep track of a head node as a global variable\nMemory Coalescing During frees, we should try to eat the adjacent right free memory to create one large free block in order to coalescing free blocks together\nDuring realloc, there are three conditions by which you can retrun the same address:\nsize is growing, there\u0026rsquo;s free space to the right size is growing, but we added padding so we can use that size is shrinking (we have to ensure that we have at least 16 bytes in the shrink space, which means we need to be shrink by at least 24 bytes to actually do any shrinking) Memory providing rules at least 16 bytes (only if Explicit Free List Allocator) has to be multiple of 8 Explicit allocator Requirements must have headers to track information in implicit must have an explicit free list managed as a doubly linked list using the first 16 bytes of the free block must have a malloc implementation that searches the free block must coallesce the immediate right free blocks must do in-place realloc when possible; even if its not possible, we should still absorb adjacent right blocks or no longer absorb and must realloc ","html":"\u003cp\u003eUpon initialization, a large contiguous block of memory is initialized as a whole and called the \u0026ldquo;heap\u0026rdquo;. If we run out of it, we double the amount of memory being allocated.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ehandling arbitrary requests of mallocs/realloc and frees\u003c/li\u003e\n\u003cli\u003ekeep track of what\u0026rsquo;s been allocated and what\u0026rsquo;s free\u003c/li\u003e\n\u003cli\u003edecide which segment of memory to use when fulfilling an allocating request\u003c/li\u003e\n\u003cli\u003erespond quickly\u003c/li\u003e\n\u003cli\u003eReturn addresses that are 8-byte aligned (native types must be stored at a memory location which is a multiple of its size; otherwise bus error)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTwo main goals:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003emaximize throughput: we want to make number of requests per unit of time large (\u0026ldquo;we want the largest address in use to be as low as possible\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003emaximize utilization: we want to use memory economically without fragmenting it\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThese two goals seems to be conflicting: it may take longer to plan out heap memory use for each request if we want to have perfect.\u003c/p\u003e\n\u003ch2 id=\"design-questions\"\u003eDesign Questions\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003e\n\u003cp\u003ehow do we keep track of blocks that are freed\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ehow do we choose which free block to satisfy an allocation request\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eafter we choose a free block, how do we deal with the excess\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecan we avoid searching all blocks for the free blocks to reuse?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecan we merge adjacent free blocks to keep large space available?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCan we avoid always coping/moving data?\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"bump-allocator\"\u003eBump Allocator\u003c/h2\u003e\n\u003cp\u003eSilliest \u003ca href=\"/posts/kbhheap_allocator/\"\u003eHeap allocator\u003c/a\u003e. You maintain a pointer that\u0026rsquo;s the root of the memory being used, and each time you get memory we bump that pointer forward. Free does nothing.\u003c/p\u003e\n\u003cp\u003eMaximum throughput (you like, just allocate heap, and free is very easy), but bad utilization.\u003c/p\u003e\n\u003ch2 id=\"implicit-free-list-allocator\"\u003eImplicit Free List Allocator\u003c/h2\u003e\n\u003cp\u003eIn this implementation, the block structure implies what has been freed. We used to store this into a global data structure, but that\u0026rsquo;s bad because there is too much memory overhead. Instead, we place a 8-byte \u0026ldquo;header\u0026rdquo; in front of each block of memory containing whether its free or in use + its payload size. Through reading all the headers, we essentially maintain an implicit list of free nodes.\u003c/p\u003e\n\u003cp\u003eNow, the 8 byte system for memory + free status doesn\u0026rsquo;t sound right. Recall memory addresses themselves are 8-bytes; however, all of our memory is 8-byte aligned. So, the first three bits should be 0.\u003c/p\u003e\n\u003cp\u003eTherefore, we pack free status in the firs tbit, ignore the next two, do store the memory in the rest\u003c/p\u003e\n\u003ch3 id=\"which-one-do-you-alloc\"\u003e\u0026ldquo;which one do you alloc\u0026rdquo;\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eFirst fit\u003c/strong\u003e: start from the beginning, and search for the first free block you come across to serve the request\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eNext fit\u003c/strong\u003e: continuing search starting at the end point of your last malloc until you get the first free block, when you hit the end, go back around\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eBest fit\u003c/strong\u003e: examine every free block and find the one with the smallest size that fits\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eBest fit minimizes fragmentation; next fit optimizes speed\u003c/p\u003e\n\u003ch3 id=\"edge-case\"\u003eedge case\u003c/h3\u003e\n\u003cp\u003eif you run out of space in the end, with an awkward 8 byte in the end, you can either make a 0-byte block or just give the last bit of memory to the previous one.\u003c/p\u003e\n\u003ch3 id=\"explicit-free-list-allocator\"\u003eExplicit Free List Allocator\u003c/h3\u003e\n\u003cp\u003eCan we design an allocator to jump between free blocks. Naively doing this is bad.\u003c/p\u003e\n\u003cp\u003eInstead, we can constrain each block to be at least size 16. And then, we will put the pointers to the prev/next free nodes in the next two 8-byte payload.\u003c/p\u003e\n\u003cp\u003eFinally, we will keep track of a head node as a global variable\u003c/p\u003e\n\u003ch2 id=\"memory-coalescing\"\u003eMemory Coalescing\u003c/h2\u003e\n\u003cp\u003eDuring frees, we should try to eat the adjacent \u003cstrong\u003eright\u003c/strong\u003e free memory to create one large free block in order to coalescing free blocks together\u003c/p\u003e\n\u003cp\u003eDuring \u003ca href=\"/posts/kbhheap/#realloc\"\u003erealloc\u003c/a\u003e, there are three conditions by which you can retrun the same address:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003esize is growing, there\u0026rsquo;s free space to the right\u003c/li\u003e\n\u003cli\u003esize is growing, but we added padding so we can use that\u003c/li\u003e\n\u003cli\u003esize is shrinking (we have to ensure that we have at least 16 bytes in the shrink space, which means we need to be shrink by at least 24 bytes to actually do any shrinking)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"memory-providing-rules\"\u003eMemory providing rules\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eat least 16 bytes (only if \u003ca href=\"#explicit-free-list-allocator\"\u003eExplicit Free List Allocator\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003ehas to be multiple of 8\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"explicit-allocator-requirements\"\u003eExplicit allocator Requirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003emust have headers to track information in implicit\u003c/li\u003e\n\u003cli\u003emust have an explicit free list managed as a doubly linked list using the first 16 bytes of the free block\u003c/li\u003e\n\u003cli\u003emust have a malloc implementation that searches the free block\u003c/li\u003e\n\u003cli\u003emust coallesce the immediate right free blocks\u003c/li\u003e\n\u003cli\u003emust do in-place realloc when possible; even if its not possible, we should still absorb adjacent right blocks or no longer absorb and must realloc\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhheap_allocator/","tags":null,"title":"Heap allocator"},{"categories":null,"contents":"see also two-dimensional heat equation the following relates to 1d\nheat distributes by \u0026ldquo;diffusing\u0026rdquo;; this is heat \\(u\\) diffusing across a plate\n\\begin{equation} \\pdv{u}{t} = \\pdv[2]{u}{x} \\end{equation}\nwe have, with Dirichlet Conditions:\n\\begin{equation} u_{k}(t,x) = \\sum b_{k} e^{ - \\frac{k^{2} \\pi^{2}}{l^{2}} t } \\sin \\qty( \\frac{k \\pi x}{l}) \\end{equation}\nand with Neumann Conditions:\n\\begin{equation} u_{k}(t,x) = \\sum b_{k} e^{ - \\frac{k^{2} \\pi^{2}}{l^{2}} t } \\cos \\qty( \\frac{k \\pi x}{l}) \\end{equation}\nwith infinite boundaries:\n\\begin{equation} U(t,x) =\\frac{1}{\\sqrt{4 \\pi \\alpha t}} \\int_{\\mathbb{R}} f(y) e^{-\\frac{(x-y)^{2}}{4\\alpha t}} \\dd{y} \\end{equation}\ngeneral system:\n\\begin{equation} \\begin{cases} A\u0026rsquo;(t) = \\lambda A(t) \\\\ B\u0026rsquo;\u0026rsquo;(x) = \\lambda B(x) \\end{cases} \\end{equation}\nRemoving a constant Consider a function:\n\\begin{equation} t = c \\tau \\end{equation}\nyou can remove the constant by finanglisng because the constant drops out when scaled (i.e. you can just scale your results back TODO check this).\ndamping damped heat equation\nSolving Heat Equation Consider the one dimensional heat equation:\n\\begin{equation} \\pdv{u}{t} = \\pdv[2]{u}{x} \\end{equation}\n\u0026ldquo;well posed-ness\u0026rdquo; of to this problem requires two sets of initial conditions: one \u0026ldquo;boundary condition\u0026rdquo;\nInitial Condition Because the expression is linear by time, we need one initial condition; let\u0026rsquo;s say its some function in \\(x\\):\n\\begin{equation} f_{0}(x) \\end{equation}\nSolving Let\u0026rsquo;s make an educated guess:\n\\begin{equation} u(t,x) = A(t) B(x) \\end{equation}\nConsider:\n\\begin{equation} \\pdv{u}{t} = A\u0026rsquo;(t)B(x) \\end{equation}\n\\begin{equation} \\pdv[2]{u}{x} = A(t) B\u0026rsquo;\u0026rsquo;(x) \\end{equation}\nThis results in:\n\\begin{equation} A\u0026rsquo;(t) B(x) = A(t) B\u0026rsquo;\u0026rsquo;(X) \\end{equation}\nmeaning, we can rearrange and integrate:\n\\begin{equation} \\frac{A\u0026rsquo;(t)}{A(t)} = \\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(x)} \\end{equation}\nYou will note that taking a derivative by \\(t\\) on one side tells us that the right side is \\(0\\), and taking derivative of \\(x\\) on the other results in left side is \\(0\\). This tell us that this function is constant in both \\(t\\) and \\(x\\). Meaning:\n\\begin{equation} \\frac{A\u0026rsquo;(t)}{A(t)} = \\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(x)} = \\lambda \\end{equation}\nThis results in a renewed system:\n\\begin{equation} \\begin{cases} A\u0026rsquo;(t) = \\lambda A(t) \\\\ B\u0026rsquo;\u0026rsquo;(x) = \\lambda B(x) \\end{cases} \\end{equation}\nSolving using Dirichlet Conditions Finding \\(\\lambda\\) using Boundary Conditions\nNow, recall from our system:\n\\begin{equation} B\u0026rsquo;\u0026rsquo;(x) = \\lambda B(x) \\end{equation}\nIts solutions are\n\\begin{equation} B(x) = c_1 e^{\\sqrt{\\lambda}x} + c_2 e^{-\\sqrt{\\lambda}x} \\end{equation}\nRecall Dirichlet Conditions:\n\\begin{equation} u(t,0) = u(t, l)= 0 \\end{equation}\nThis tells us that \\(B(0) = 0\\), \\(B(l) = 0\\).\nAt \\(B(0)\\), this gives us that \\(c_1 + c_2 = 0\\), meaning \\(c_2 = -c_1\\)\nAt \\(B(l) = 0 = c_1 \\qty( e^{\\sqrt{\\lambda}l} - e^{-\\sqrt{\\lambda}l})\\). Dividing \\(c_1\\) to both sides, we obtain \\(e^{2\\sqrt{\\lambda} l} = 1\\).\nWe finally can obtain \\(\\lambda\\). One obvious answer is \\(\\lambda = 0\\). But, there are other fun things we can do:\nAside:\nRecall, if we desire\n\\begin{equation} e^{i\\theta} = \\cos \\theta + i \\sin \\theta = 1 \\end{equation}\nThis gives:\n\\begin{equation} \\theta = 2\\pi k \\end{equation}\nTherefore, recall that we obtained \\(e^{2\\sqrt{\\lambda}l}\\), we obtained:\n\\begin{equation} 2\\sqrt{\\lambda}l = 2\\pi k i \\end{equation}\nSolving for \\(\\lambda\\), we finally get solutions:\n\\begin{equation} \\lambda_{k} = \\frac{-k^{2}\\pi^{2}}{l^{2}} \\end{equation}\nfor \\(k = 0, 1, 2, 3\\); this condition called \u0026ldquo;quantization\u0026rdquo;\nSolving Again\nNow that we know \\(\\lambda\\), we can say:\n\\begin{equation} \\begin{cases} A\u0026rsquo;(t) = \\frac{-k^{2}\\pi^{2}}{l^{2}} A(t) \\\\ B\u0026rsquo;\u0026rsquo;(x) = \\frac{-k^{2}\\pi^{2}}{l^{2}} B(x) \\end{cases} \\end{equation}\nAnd then we can proceed to solve everything again. [a little lost, but in theory \\(\\cos(x)\\) drops out after solving].\nThis Gives us eventually:\n\\begin{equation} A(t) = e^{-\\frac{k^{2}\\pi^{2}}{l^{2}} t} \\end{equation}\nand\n\\begin{equation} B(x) = \\sin \\frac{k\\pi x}{l} \\end{equation}\nsin Recall that \\(U = AB\\), this means, with all generality:\n\\begin{equation} u_{k} = e^{-\\frac{k^{2}\\pi^{2}}{l^{2}} t}\\sin \\frac{k\\pi x}{l} \\end{equation}\nInitial Conditions\nSuppose we have initial condition:\n\\begin{equation} f_{0}(x) = \\sum a_{n} \\sin \\qty( \\frac{k\\pi x}{l}) \\end{equation}\nbecause the PDE is linear, we obtain:\n\\begin{equation} u_{k}(t,x) = \\sum a_{k} e^{-\\frac{k^{2}\\pi^{2}}{l^{2}}t} \\sin \\qty( \\frac{k \\pi x}{l}) \\end{equation}\nagain quantized over \\(k\\).\nThis is because each individual terms corresponds to a solution \\(a_{n} \\sin \\qty(\\frac{k\\pi x}{l})\\), and at boundary condition \\(f_{0}(x)\\), the left term of the general solution drops out, to obtain:\n\\begin{equation} a_{n}\\frac{k \\pi x}{l} = f_{0}(x) = u(0, x) = e^{0} \\sin \\qty(\\frac{k \\pi x}{l}) = a_{k} \\sin \\qty(\\frac{k \\pi x}{l}) \\end{equation}\nso we can just match terms.\nThe good news is that, because exists, any initial condition that\u0026rsquo;s a well-formed function can be written a sum of sines. This also converges really quickly (because \\(e^{-k^{2}}\\)). Further, given some \\(f_{0}(x)\\), we obtain a specific \\(k\\) and will obtain a specific solution.\nSolving using Neumann Conditions Go through the derivation, this gives:\n\\begin{equation} u_{k}(t,x) = \\sum b_{k} e^{ - \\frac{k^{2} \\pi^{2}}{l^{2}} t } \\cos \\qty( \\frac{k \\pi x}{l}) \\end{equation}\n","html":"\u003cp\u003e\u003cstrong\u003esee also \u003ca href=\"/posts/kbhtwo_dimensional_heat_equation/\"\u003etwo-dimensional heat equation\u003c/a\u003e\u003c/strong\u003e the following relates to 1d\u003c/p\u003e\n\u003cp\u003eheat distributes by \u0026ldquo;diffusing\u0026rdquo;; this is heat \\(u\\) diffusing across a plate\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t} = \\pdv[2]{u}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe have, with \u003ca href=\"/posts/kbhsu_math53_feb232024/#dirichlet-conditions\"\u003eDirichlet Conditions\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu_{k}(t,x) = \\sum b_{k} e^{ - \\frac{k^{2} \\pi^{2}}{l^{2}} t } \\sin \\qty( \\frac{k \\pi x}{l})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand with \u003ca href=\"/posts/kbhsu_math53_feb232024/#neumann-conditions\"\u003eNeumann Conditions\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu_{k}(t,x) = \\sum b_{k} e^{ - \\frac{k^{2} \\pi^{2}}{l^{2}} t } \\cos \\qty( \\frac{k \\pi x}{l})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewith infinite boundaries:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(t,x) =\\frac{1}{\\sqrt{4 \\pi \\alpha t}} \\int_{\\mathbb{R}} f(y) e^{-\\frac{(x-y)^{2}}{4\\alpha t}} \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egeneral system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nA\u0026rsquo;(t) = \\lambda A(t) \\\\\nB\u0026rsquo;\u0026rsquo;(x) = \\lambda B(x)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"removing-a-constant\"\u003eRemoving a constant\u003c/h2\u003e\n\u003cp\u003eConsider a function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nt = c \\tau\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou can remove the constant by finanglisng because the constant drops out when scaled (i.e. you can just scale your results back TODO check this).\u003c/p\u003e\n\u003ch2 id=\"damping\"\u003edamping\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdamped_heat_equation/\"\u003edamped heat equation\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"solving-heat-equation\"\u003eSolving Heat Equation\u003c/h2\u003e\n\u003cp\u003eConsider the one dimensional heat equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t} = \\pdv[2]{u}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;well posed-ness\u0026rdquo; of to this problem requires two sets of initial conditions: one \u0026ldquo;boundary condition\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"initial-condition\"\u003eInitial Condition\u003c/h3\u003e\n\u003cp\u003eBecause the expression is linear by time, we need one initial condition; let\u0026rsquo;s say its some function in \\(x\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf_{0}(x)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"solving\"\u003eSolving\u003c/h3\u003e\n\u003cp\u003eLet\u0026rsquo;s make an educated guess:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(t,x) = A(t) B(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t} = A\u0026rsquo;(t)B(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2]{u}{x} = A(t) B\u0026rsquo;\u0026rsquo;(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis results in:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA\u0026rsquo;(t) B(x) = A(t) B\u0026rsquo;\u0026rsquo;(X)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning, we can rearrange and integrate:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{A\u0026rsquo;(t)}{A(t)} = \\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(x)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will note that taking a derivative by \\(t\\) on one side tells us that the right side is \\(0\\), and taking derivative of \\(x\\) on the other results in left side is \\(0\\). This tell us that this function is constant in both \\(t\\) and \\(x\\). Meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{A\u0026rsquo;(t)}{A(t)} = \\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(x)} = \\lambda\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis results in a renewed system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nA\u0026rsquo;(t) = \\lambda A(t) \\\\\nB\u0026rsquo;\u0026rsquo;(x) = \\lambda B(x)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"solving-using-dirichlet-conditions--kbhsu-math53-feb232024-dot-md\"\u003eSolving using \u003ca href=\"/posts/kbhsu_math53_feb232024/#dirichlet-conditions\"\u003eDirichlet Conditions\u003c/a\u003e\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eFinding \\(\\lambda\\) using Boundary Conditions\u003c/p\u003e\n\u003cp\u003eNow, recall from our system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB\u0026rsquo;\u0026rsquo;(x) = \\lambda B(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIts solutions are\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB(x) = c_1 e^{\\sqrt{\\lambda}x} + c_2 e^{-\\sqrt{\\lambda}x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall \u003ca href=\"/posts/kbhsu_math53_feb232024/#dirichlet-conditions\"\u003eDirichlet Conditions\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(t,0) = u(t, l)= 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis tells us that \\(B(0) = 0\\), \\(B(l) = 0\\).\u003c/p\u003e\n\u003cp\u003eAt \\(B(0)\\), this gives us that \\(c_1 + c_2 = 0\\), meaning \\(c_2 = -c_1\\)\u003c/p\u003e\n\u003cp\u003eAt \\(B(l) = 0 = c_1 \\qty( e^{\\sqrt{\\lambda}l} - e^{-\\sqrt{\\lambda}l})\\). Dividing \\(c_1\\) to both sides, we obtain \\(e^{2\\sqrt{\\lambda} l} = 1\\).\u003c/p\u003e\n\u003cp\u003eWe finally can obtain \\(\\lambda\\). One obvious answer is \\(\\lambda = 0\\). But, there are other fun things we can do:\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside:\u003c/p\u003e\n\u003cp\u003eRecall, if we desire\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{i\\theta} = \\cos \\theta + i \\sin \\theta = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta = 2\\pi k\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eTherefore, recall that we obtained \\(e^{2\\sqrt{\\lambda}l}\\), we obtained:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n2\\sqrt{\\lambda}l = 2\\pi k i\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSolving for \\(\\lambda\\), we finally get solutions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda_{k} = \\frac{-k^{2}\\pi^{2}}{l^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor \\(k = 0, 1, 2, 3\\); this condition called \u0026ldquo;quantization\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eSolving Again\u003c/p\u003e\n\u003cp\u003eNow that we know \\(\\lambda\\), we can say:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nA\u0026rsquo;(t) = \\frac{-k^{2}\\pi^{2}}{l^{2}} A(t) \\\\\nB\u0026rsquo;\u0026rsquo;(x) = \\frac{-k^{2}\\pi^{2}}{l^{2}} B(x)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd then we can proceed to solve everything again. [a little lost, but in theory \\(\\cos(x)\\) drops out after solving].\u003c/p\u003e\n\u003cp\u003eThis Gives us eventually:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA(t) = e^{-\\frac{k^{2}\\pi^{2}}{l^{2}} t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB(x) = \\sin \\frac{k\\pi x}{l}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003esin\nRecall that \\(U = AB\\), this means, with all generality:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu_{k} = e^{-\\frac{k^{2}\\pi^{2}}{l^{2}} t}\\sin \\frac{k\\pi x}{l}\n\\end{equation}\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eInitial Conditions\u003c/p\u003e\n\u003cp\u003eSuppose we have initial condition:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf_{0}(x) = \\sum a_{n} \\sin \\qty( \\frac{k\\pi x}{l})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause the PDE is linear, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu_{k}(t,x) = \\sum a_{k} e^{-\\frac{k^{2}\\pi^{2}}{l^{2}}t} \\sin \\qty( \\frac{k \\pi x}{l})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eagain quantized over \\(k\\).\u003c/p\u003e\n\u003cp\u003eThis is because each individual terms corresponds to a solution \\(a_{n} \\sin \\qty(\\frac{k\\pi x}{l})\\), and at boundary condition \\(f_{0}(x)\\), the left term of the general solution drops out, to obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{n}\\frac{k \\pi x}{l} = f_{0}(x) = u(0, x) = e^{0} \\sin \\qty(\\frac{k \\pi x}{l}) = a_{k} \\sin \\qty(\\frac{k \\pi x}{l})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso we can just match terms.\u003c/p\u003e\n\u003cp\u003eThe good news is that, because exists, any initial condition that\u0026rsquo;s a well-formed function can be written a sum of sines. This also converges really quickly (because \\(e^{-k^{2}}\\)). Further, given some \\(f_{0}(x)\\), we obtain a specific \\(k\\) and will obtain a specific solution.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"solving-using-neumann-conditions--kbhsu-math53-feb232024-dot-md\"\u003eSolving using \u003ca href=\"/posts/kbhsu_math53_feb232024/#neumann-conditions\"\u003eNeumann Conditions\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eGo through the derivation, this gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu_{k}(t,x) = \\sum b_{k} e^{ - \\frac{k^{2} \\pi^{2}}{l^{2}} t } \\cos \\qty( \\frac{k \\pi x}{l})\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhheat_equation/","tags":null,"title":"Heat Equation"},{"categories":null,"contents":"Hello Internet is a podcast hosted by Brady Haran and CGP Grey.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhhello_internet/\"\u003eHello Internet\u003c/a\u003e is a podcast hosted by Brady Haran and CGP Grey.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhello_internet/","tags":null,"title":"Hello Internet"},{"categories":null,"contents":"Herber Hoover is an American president.\nHerber Hoover\u0026rsquo;s response to the Great Depression Hoover\u0026rsquo;s Programs: too little, too late Makes business pledge to maintain wages, tax cuts, Smoot-halwey Tariff, bank financial support Builds Golden Gate Bridge and the Hoover Dam Rejects the idea of the direct federal relief, which is against FDR\u0026rsquo;s thoughts ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhherber_hoover/\"\u003eHerber Hoover\u003c/a\u003e is an American president.\u003c/p\u003e\n\u003ch2 id=\"herber-hoover--kbhherber-hoover-dot-md--s-response-to-the-great-depression--kbhgreat-depression-dot-md\"\u003e\u003ca href=\"/posts/kbhherber_hoover/\"\u003eHerber Hoover\u003c/a\u003e\u0026rsquo;s response to the \u003ca href=\"/posts/kbhgreat_depression/\"\u003eGreat Depression\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eHoover\u0026rsquo;s Programs: too little, too late\u003c/li\u003e\n\u003cli\u003eMakes business pledge to maintain wages, tax cuts, Smoot-halwey Tariff, bank financial support\u003c/li\u003e\n\u003cli\u003eBuilds \u003ca href=\"/posts/kbhgolden_gate_bridge/\"\u003eGolden Gate Bridge\u003c/a\u003e and the \u003ca href=\"/posts/kbhhoover_dam/\"\u003eHoover Dam\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eRejects the idea of the direct federal relief, which is against \u003ca href=\"/posts/kbhfdr/\"\u003eFDR\u003c/a\u003e\u0026rsquo;s thoughts\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhherber_hoover/","tags":null,"title":"Herber Hoover"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhheteroskedastic/","tags":null,"title":"heteroskedasticity"},{"categories":null,"contents":" draw an initial state \\(q_1\\) from the initial state distribution \\(\\pi\\) For each state \\(q_{i}\\)\u0026hellip; Drew observe something \\(o_{t}\\) according to the action distribution of state \\(q_{i}\\) Use transition probability \\(a_{i,j}\\) to draw a next state \\(q_{j}\\) Isolated recognition: train a family of HMMs, one for each word or something. Then, given new data, perform scoring of the HMM onto the features.\ncomponents of HMMs scoring Given an observation \\(o_1, \u0026hellip;, o_{T}\\) and a model, we compute $P(O | λ)$\u0026mdash;the probability of a sequence given a model \\(\\lambda\\)\n\u0026ldquo;forward and backward algorithm\u0026rdquo;\ndecoding Given observations, find the state sequence \\(q1, \u0026hellip;, q_{T}\\) most likely to have generated\ntraining Given observations \\(O\\), find the model parameters \\(\\lambda\\) that maximize \\(P(O|\\lambda)\\), the Maximum Likelihood Parameter Learning.\ncontinuous-density HMM There are some HMMs that blend the discrete timestamps into Gaussian mixture models.\ncontinuous speech Scoring becomes hard because you have to go through and calculate every freaking word. THerefore:\n\\begin{equation} P(W|O) = \\frac{P(O|W) P(W)}{P(O)} \\end{equation}\nTherefore, we really desire:\n\\begin{equation} \\arg\\max_{w} P(O|W) P(W) \\end{equation}\n","html":"\u003col\u003e\n\u003cli\u003edraw an initial state \\(q_1\\) from the initial state distribution \\(\\pi\\)\u003c/li\u003e\n\u003cli\u003eFor each state \\(q_{i}\\)\u0026hellip;\n\u003col\u003e\n\u003cli\u003eDrew observe something \\(o_{t}\\) according to the action distribution of state \\(q_{i}\\)\u003c/li\u003e\n\u003cli\u003eUse transition probability \\(a_{i,j}\\) to draw a next state \\(q_{j}\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eIsolated recognition: train a family of \u003ca href=\"/posts/kbhhidden_markov_model/\"\u003eHMM\u003c/a\u003es, one for each word or something. Then, given new data, perform scoring of the \u003ca href=\"/posts/kbhhidden_markov_model/\"\u003eHMM\u003c/a\u003e onto the features.\u003c/p\u003e\n\u003ch2 id=\"components-of-hmm--kbhhidden-markov-model-dot-md--s\"\u003ecomponents of \u003ca href=\"/posts/kbhhidden_markov_model/\"\u003eHMM\u003c/a\u003es\u003c/h2\u003e\n\u003ch3 id=\"scoring\"\u003escoring\u003c/h3\u003e\n\u003cp\u003eGiven an observation \\(o_1, \u0026hellip;, o_{T}\\) and a model, we compute $P(O | λ)$\u0026mdash;the probability of a sequence given a model \\(\\lambda\\)\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;forward and backward algorithm\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"decoding\"\u003edecoding\u003c/h3\u003e\n\u003cp\u003eGiven observations, find the state sequence \\(q1, \u0026hellip;, q_{T}\\) most likely to have generated\u003c/p\u003e\n\u003ch3 id=\"training\"\u003etraining\u003c/h3\u003e\n\u003cp\u003eGiven observations \\(O\\), find the model parameters \\(\\lambda\\) that maximize \\(P(O|\\lambda)\\), the \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"continuous-density-hmm--kbhhidden-markov-model-dot-md\"\u003econtinuous-density \u003ca href=\"/posts/kbhhidden_markov_model/\"\u003eHMM\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eThere are some \u003ca href=\"/posts/kbhhidden_markov_model/\"\u003eHMM\u003c/a\u003es that blend the discrete timestamps into \u003ca href=\"/posts/kbhprobability_distributions/#gaussian-mixture-model\"\u003eGaussian mixture model\u003c/a\u003es.\u003c/p\u003e\n\u003ch2 id=\"continuous-speech\"\u003econtinuous speech\u003c/h2\u003e\n\u003cp\u003eScoring becomes hard because you have to go through and calculate every freaking word. THerefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(W|O) = \\frac{P(O|W) P(W)}{P(O)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, we really desire:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\arg\\max_{w} P(O|W) P(W)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhidden_markov_model/","tags":null,"title":"Hidden Markov Model"},{"categories":null,"contents":"Misinformation can decline people\u0026rsquo;s intent to vaccinate.\nData VaxConcerns Taxonomy: sorting misinformation into a taxonomy which has multiple, hierarchical labels\nSo, you can classify a label in a bunch of places.\nApproaches Ask an LLM to do one of the following\u0026hellip;\nOne-Shot, Multi-Label Ignore hierchy, just go do multi label system\nmulti-pass, hierarchical label try to predict groups at a time, each pass has a label group as input and needs to produce whether the label is given + sublabels\none pass, hierarchical label Only predict labels on the lowest level, then extract higher level information\nbinary search predict highest level, then search down\n\u0026ldquo;Almost Few Shot\u0026rdquo; Trying to force a specific output format.\n","html":"\u003cp\u003eMisinformation can decline people\u0026rsquo;s intent to vaccinate.\u003c/p\u003e\n\u003ch2 id=\"data\"\u003eData\u003c/h2\u003e\n\u003cp\u003eVaxConcerns Taxonomy: sorting misinformation into a taxonomy which has multiple, hierarchical labels\u003c/p\u003e\n\u003cp\u003eSo, you can classify a label in a bunch of places.\u003c/p\u003e\n\u003ch2 id=\"approaches\"\u003eApproaches\u003c/h2\u003e\n\u003cp\u003eAsk an LLM to do one of the following\u0026hellip;\u003c/p\u003e\n\u003ch3 id=\"one-shot-multi-label\"\u003eOne-Shot, Multi-Label\u003c/h3\u003e\n\u003cp\u003eIgnore hierchy, just go do multi label system\u003c/p\u003e\n\u003ch3 id=\"multi-pass-hierarchical-label\"\u003emulti-pass, hierarchical label\u003c/h3\u003e\n\u003cp\u003etry to predict groups at a time, each pass has a label group as input and needs to produce whether the label is given + sublabels\u003c/p\u003e\n\u003ch3 id=\"one-pass-hierarchical-label\"\u003eone pass, hierarchical label\u003c/h3\u003e\n\u003cp\u003eOnly predict labels on the lowest level, then extract higher level information\u003c/p\u003e\n\u003ch3 id=\"binary-search\"\u003ebinary search\u003c/h3\u003e\n\u003cp\u003epredict highest level, then search down\u003c/p\u003e\n\u003ch2 id=\"almost-few-shot\"\u003e\u0026ldquo;Almost Few Shot\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003eTrying to force a specific output format.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhierarchical_multi_label_clsf_for_vaccine/","tags":null,"title":"Hierarchical Multi-Label Clsf. for Vaccine"},{"categories":null,"contents":"a monomer is named active if it recruits its neighbors to make a polymer that can act as an enzyme to catalyze other monomers to become active as well\n","html":"\u003cp\u003ea monomer is named \u003ca href=\"/posts/kbhhigh_chemical_activity/\"\u003eactive\u003c/a\u003e if it recruits its neighbors to make a polymer that can act as an enzyme to catalyze other monomers to become \u003ca href=\"/posts/kbhhigh_chemical_activity/\"\u003eactive\u003c/a\u003e as well\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhigh_chemical_activity/","tags":null,"title":"high chemical activity"},{"categories":null,"contents":"If we are tele-operating a robot, we ideally want to minimize cost. We want to estimate a user\u0026rsquo;s goal via user inputs. Predict the most likely goal + assist for it.\n\u0026ldquo;find a cost function for which user input \\(u\\) is optimal\u0026rdquo;.\nsystem does not know the goal the user may not change their goal on a whim Hindsight Optimization To solve this, we use QMDP: \u0026ldquo;select the most optimal actions to estimating cost-to-go assuming full observability\u0026rdquo;.\n\\begin{equation} Q(b,a,u) = \\sum_{g}^{} b(g) Q_{g}(x,a,u) \\end{equation}\nResult users felt less in control with Hindsight Optimization, despite reaching the goal faster with this policy.\nChallenging the results between \u0026ldquo;task completion\u0026rdquo; vs. \u0026ldquo;user satisfaction\u0026rdquo;.\n","html":"\u003cp\u003eIf we are tele-operating a robot, we ideally want to minimize \u003cstrong\u003ecost\u003c/strong\u003e. We want to estimate a user\u0026rsquo;s \u003cstrong\u003egoal\u003c/strong\u003e via user inputs. Predict the most likely goal + assist for it.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;find a cost function for which user input \\(u\\) is optimal\u0026rdquo;.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003esystem does not know the goal\u003c/li\u003e\n\u003cli\u003ethe user may not change their goal on a whim\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"hindsight-optimization--kbhhindsight-optimization-dot-md\"\u003e\u003ca href=\"/posts/kbhhindsight_optimization/\"\u003eHindsight Optimization\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eTo solve this, we use \u003ca href=\"/posts/kbhqmdp/\"\u003eQMDP\u003c/a\u003e: \u0026ldquo;select the most optimal actions to estimating cost-to-go assuming full observability\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(b,a,u) = \\sum_{g}^{} b(g) Q_{g}(x,a,u)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"result\"\u003eResult\u003c/h2\u003e\n\u003cp\u003eusers felt less in control with \u003ca href=\"/posts/kbhhindsight_optimization/\"\u003eHindsight Optimization\u003c/a\u003e, despite reaching the goal faster with this policy.\u003c/p\u003e\n\u003cp\u003eChallenging the results between \u0026ldquo;task completion\u0026rdquo; vs. \u0026ldquo;user satisfaction\u0026rdquo;.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhindsight_optimization/","tags":null,"title":"Hindsight Optimization"},{"categories":null,"contents":"history economics utility theory \u0026mdash; psychology Pavlov\u0026rsquo;s salivating togs: biological reinforcement learning Alan Turing thought about this regarding neuroscience bio-simulated things computer science how \u0026ldquo;systems composed of matter can have properties of the mind engineering control theory maffs statistics operations research WWII logistics optimization allocating decisions, etc. simplex algorithm societal impact classes can help amplify intention (whether good or evil) data-driven methods ","html":"\u003ch2 id=\"history\"\u003ehistory\u003c/h2\u003e\n\u003ch3 id=\"economics\"\u003eeconomics\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eutility theory\u003c/strong\u003e \u0026mdash;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"psychology\"\u003epsychology\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ePavlov\u0026rsquo;s salivating togs: biological \u003ca href=\"/posts/kbhreinforcement_learning/\"\u003ereinforcement learning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eAlan Turing thought about this regarding\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"neuroscience\"\u003eneuroscience\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ebio-simulated things\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"computer-science\"\u003ecomputer science\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ehow \u0026ldquo;systems composed of matter can have properties of the mind\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"engineering\"\u003eengineering\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003econtrol theory\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"maffs\"\u003emaffs\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstastistic/\"\u003estatistic\u003c/a\u003es\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"operations-research\"\u003eoperations research\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"\"\u003eWWII\u003c/a\u003e logistics \u003ca href=\"/posts/kbhoptimization/\"\u003eoptimization\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eallocating decisions, etc.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"\"\u003esimplex algorithm\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"societal-impact\"\u003esocietal impact\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eclasses can help amplify intention (whether good or evil)\u003c/li\u003e\n\u003cli\u003edata-driven methods\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdecision_making_history/","tags":null,"title":"history and impact of decision making"},{"categories":null,"contents":" Reading Date Notes New Deal Flip-book \u0026lt;2022-03-24 Thu\u0026gt; New Deal Historian Flipbook Legacy of McCarthyism \u0026lt;2022-04-25 Mon\u0026gt; Legacy of McCarthyism Soviet Perspective on the Cold War \u0026lt;2022-04-29 Fri\u0026gt; Soviet Perspective on Cold War MLK and Malcom X \u0026lt;2022-05-10 Tue\u0026gt; MLK and Malcom X Reading Origins of American Conservatism \u0026lt;2022-05-27 Fri\u0026gt; Origins of American Conservatism ","html":"\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eReading\u003c/th\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eNotes\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eNew Deal Flip-book\u003c/td\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-03-24 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003eNew Deal Historian Flipbook\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eLegacy of McCarthyism\u003c/td\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-25 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhlegacy_of_mccarthyism/\"\u003eLegacy of McCarthyism\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eSoviet Perspective on the Cold War\u003c/td\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-29 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhsoviet_perspective_on_cold_war/\"\u003eSoviet Perspective on Cold War\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eMLK and Malcom X\u003c/td\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-10 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmlk_and_malcom_x_reading/\"\u003eMLK and Malcom X Reading\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eOrigins of American Conservatism\u003c/td\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-27 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhrise_of_american_conservatism/\"\u003eOrigins of American Conservatism\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhistory_readings_index/","tags":["index"],"title":"History Readings Index"},{"categories":null,"contents":"Homestead Act is the legal colonization of the west that was a Contributor to the Guilded Age\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhhomestead_act/\"\u003eHomestead Act\u003c/a\u003e is the legal colonization of the west that was a \u003ca href=\"/posts/kbhguilded_age/#contributors-to-the-id-a21cee5b-f0e2-4647-8e7d-e17d9c55ea42-guilded-age\"\u003eContributor to the Guilded Age\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhomestead_act/","tags":null,"title":"Homestead Act"},{"categories":null,"contents":"statistical context Homogeneity is a measure of how similar many things are.\nLinear Algebra context \u0026hellip;of linear maps homogeneity is a property of Linear Maps to describe the ability to \u0026ldquo;factor out\u0026rdquo; scalars\n\u0026hellip;of linear equations A homogenous linear equation is one which the constant term on the right of the equations are \\(0\\).\nhomogenous system with more variables than equations has nonzero solutions Proof: You can imagine the system as a matrix equation:\n\\begin{equation} Av = 0 \\end{equation}\nwhere, \\(v\\) is a list of input variables, and \\(A\\) is a coefficient matrix. Note that \\(A = \\mathbb{F}^{n} \\to \\mathbb{F}^{m}\\), where \\(n\\) is the number of variables, and \\(m\\) the number of equations.\nNow, the input variables \\(v\\) of the above expression is in the null space of \\(A\\). The question of \u0026ldquo;whether is there non-zero solutions\u0026rdquo; can be rephrased as given \\(Av=0\\), does \\(v=0\\)?\u0026quot; Otherwise known as \u0026ldquo;is \\(null\\ A=\\{0\\}\\)?\u0026rdquo;: that is, \u0026ldquo;is \\(A\\) injective?\u0026rdquo;\nGiven the fact that map to smaller space is not injective, if \\(m \u0026lt;n\\), the map is not going to be injective. Therefore, we want \\(m\u0026lt;n\\), meaning we want more variables (\\(n\\)) than equations (\\(m\\)) to have non-zero solutions.\ninhomogenous system with more equations than variables has no solutions for an arbitrary set of constants Proof: You can imagine the system as a matrix equation:\n\\begin{equation} Av = C \\end{equation}\nwhere, \\(v\\) is a list of input variables, and \\(A\\) is a coefficient matrix. Note that \\(A = \\mathbb{F}^{n} \\to \\mathbb{F}^{m}\\), where \\(n\\) is the number of variables, and \\(m\\) the number of equations.\nNow, a valid solution of the above expression means that \\(Av=C\\) for all \\(v\\) (as they are, of course, the variables.) If we want the expression to have a solution for all choices of \\(C\\), we desire that the range of \\(A\\) to equal to its codomain\u0026mdash;that we desire it to be surjective.\nGiven the fact that map to bigger space is not surjective, if \\(m \u0026gt; n\\), the map is not going to be surjective. Therefore, we want \\(m\u0026gt;n\\), meaning we want more equations (\\(m\\)) than variables (\\(n\\)) to have no solutions for arbitrary \\(C\\).\n","html":"\u003ch2 id=\"statistic--kbhstastistic-dot-md--al-context\"\u003e\u003ca href=\"/posts/kbhstastistic/\"\u003estatistic\u003c/a\u003eal context\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhhomogeneity/\"\u003eHomogeneity\u003c/a\u003e is a measure of how similar many things are.\u003c/p\u003e\n\u003ch2 id=\"linear-algebra--kbhlinear-algebra-index-dot-md--context\"\u003e\u003ca href=\"/posts/kbhlinear_algebra_index/\"\u003eLinear Algebra\u003c/a\u003e context\u003c/h2\u003e\n\u003ch3 id=\"dot-dot-dot-of-linear-maps\"\u003e\u0026hellip;of linear maps\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e is a property of \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es to describe the ability to \u0026ldquo;factor out\u0026rdquo; scalars\u003c/p\u003e\n\u003ch3 id=\"dot-dot-dot-of-linear-equations\"\u003e\u0026hellip;of linear equations\u003c/h3\u003e\n\u003cp\u003eA \u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogenous\u003c/a\u003e linear equation is one which the constant term on the right of the equations are \\(0\\).\u003c/p\u003e\n\u003ch4 id=\"homogenous--kbhhomogeneity-dot-md--system-with-more-variables-than-equations-has-nonzero-solutions\"\u003e\u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogenous\u003c/a\u003e system with more variables than equations has nonzero solutions\u003c/h4\u003e\n\u003cp\u003eProof:\nYou can imagine the system as a \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nAv = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(v\\) is a list of input variables, and \\(A\\) is a \u003ca href=\"/posts/kbhpolynomial/\"\u003ecoefficient\u003c/a\u003e \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e. Note that \\(A = \\mathbb{F}^{n} \\to \\mathbb{F}^{m}\\), where \\(n\\) is the number of variables, and \\(m\\) the number of equations.\u003c/p\u003e\n\u003cp\u003eNow, the input variables \\(v\\) of the above expression is in the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(A\\). The question of \u0026ldquo;whether is there non-zero solutions\u0026rdquo; can be rephrased as given \\(Av=0\\), does \\(v=0\\)?\u0026quot; Otherwise known as \u0026ldquo;is \\(null\\ A=\\{0\\}\\)?\u0026rdquo;: that is, \u0026ldquo;is \\(A\\) \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eGiven the fact that \u003ca href=\"/posts/kbhlinear_map/#map-to-smaller-space-is-not-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003emap to smaller space is not injective\u003c/a\u003e, if \\(m \u0026lt;n\\), the map is not going to be \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e. Therefore, we want \\(m\u0026lt;n\\), meaning we want more variables (\\(n\\)) than equations (\\(m\\)) to have non-zero solutions.\u003c/p\u003e\n\u003ch4 id=\"in-homogenous--kbhhomogeneity-dot-md--system-with-more-equations-than-variables-has-no-solutions-for-an-arbitrary-set-of-constants\"\u003ein\u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogenous\u003c/a\u003e system with more equations than variables has no solutions for an arbitrary set of constants\u003c/h4\u003e\n\u003cp\u003eProof:\nYou can imagine the system as a \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nAv = C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(v\\) is a list of input variables, and \\(A\\) is a \u003ca href=\"/posts/kbhpolynomial/\"\u003ecoefficient\u003c/a\u003e \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e. Note that \\(A = \\mathbb{F}^{n} \\to \\mathbb{F}^{m}\\), where \\(n\\) is the number of variables, and \\(m\\) the number of equations.\u003c/p\u003e\n\u003cp\u003eNow, a valid solution of the above expression means that \\(Av=C\\) for all \\(v\\) (as they are, of course, the variables.) If we want the expression to have a solution for all choices of \\(C\\), we desire that the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e of \\(A\\) to equal to its codomain\u0026mdash;that we desire it to be \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eGiven the fact that \u003ca href=\"/posts/kbhlinear_map/#map-to-bigger-space-is-not-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjective\"\u003emap to bigger space is not surjective\u003c/a\u003e, if \\(m \u0026gt; n\\), the map is not going to be \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e. Therefore, we want \\(m\u0026gt;n\\), meaning we want more equations (\\(m\\)) than variables (\\(n\\)) to have no solutions for arbitrary \\(C\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhomogeneity/","tags":null,"title":"homogeneity"},{"categories":null,"contents":"the\n","html":"\u003cp\u003ethe\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhomset/","tags":null,"title":"homset"},{"categories":null,"contents":"Honoré\u0026rsquo;s Statistic is a statistical measure of vocabulary complexity, it is a test of Semantic Verbal Fluency and is commonly used for cognitive impairment detection.\nThe statistic is defined as:\n\\begin{equation} HS = 100 \\log \\frac{N}{1-\\frac{N_{uni}}{U}} \\end{equation}\nwhere, \\(N\\) is the total number of words, \\(U\\) the total number of distinct words, \\(N_{uni}\\) the number of total distinct words used only once.\nThe idea here is that a higher diversity of vocabulary shows higher Semantic Verbal Fluency.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhhonore_s_statistic/\"\u003eHonoré\u0026rsquo;s Statistic\u003c/a\u003e is a statistical measure of vocabulary complexity, it is a test of \u003ca href=\"/posts/kbhsemantic_verbal_fluency/\"\u003eSemantic Verbal Fluency\u003c/a\u003e and is commonly used for cognitive impairment detection.\u003c/p\u003e\n\u003cp\u003eThe statistic is defined as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nHS = 100 \\log \\frac{N}{1-\\frac{N_{uni}}{U}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(N\\) is the total number of words, \\(U\\) the total number of distinct words, \\(N_{uni}\\) the number of total distinct words used only once.\u003c/p\u003e\n\u003cp\u003eThe idea here is that a higher diversity of vocabulary shows higher \u003ca href=\"/posts/kbhsemantic_verbal_fluency/\"\u003eSemantic Verbal Fluency\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhonore_s_statistic/","tags":null,"title":"Honoré's Statistic"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhhoover_dam/","tags":null,"title":"Hoover Dam"},{"categories":null,"contents":"Hoovervile are homeless encampments named after Herber Hoover, where homeless people band together after loosing jobs in the Great Depression.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhhooverviles/\"\u003eHoovervile\u003c/a\u003e are homeless encampments named after \u003ca href=\"/posts/kbhherber_hoover/\"\u003eHerber Hoover\u003c/a\u003e, where homeless people band together after loosing jobs in the \u003ca href=\"/posts/kbhgreat_depression/\"\u003eGreat Depression\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhooverviles/","tags":null,"title":"Hoovervile"},{"categories":null,"contents":"\u0026lt;\u0026gt; Hux\n","html":"\u003cp\u003e\u0026lt;\u0026gt; Hux\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhopfield_networks/","tags":null,"title":"Hopfield Networks"},{"categories":null,"contents":"me\n","html":"\u003cp\u003eme\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhoujun_liu/","tags":null,"title":"Houjun Liu"},{"categories":null,"contents":" 👋 Howdy, I'm Houjun Liu! I\u0026rsquo;m a first-year undergraduate student in the Computer Science Department at Stanford University, advised by Prof. Mykel Kochenderfer. I\u0026rsquo;m interested in Natural Language Processing and Speech Language Sample Analysis, specifically, in making large models solve important problems through 1) building better tools for language and speech processing to democratize state-of-the-art research 2) designing improved algorithmic approaches to language model training + decoding to improve performance and 3) exploring their applications.\nWelcome to my academic homepage! This is my little homestead on the internet about my academic interests. Check out my projects below. If you want to know more about the rest of my life, feel free to visit my website!\nRecent goings on Feb. 26-27, 24' AAAI 2024! See y'all in Vancouver! Dec. 15, 23' Paper (NACC) Accepted by W3PHAI-24 Dec. 3, 23' Released TalkBank Utterance Model Jun. 22, 23' Paper (Batchalign) Published by JSLHR Shoot me an email at [firstname] at stanford dot edu, or, if you are around Stanford, grab dinner with me :)\nAbout I am a research engineer at the TalkBank Project at CMU under the supervision of Prof. Brian MacWhinney, where I develop better models and tools for clinical language sample analysis. I also work with the Stanford NLP Group, under direction of Prof. Chris Manning, on using neural models to solve semantic and syntax tagging tasks efficiently with Stanza. Finally, I am a research assistant with Prof. Xin Liu at UC Davis and at UC Davis Health, where I use transformer models to push our understanding of dementia.\nIn industry, I lead the development of Condution, simon, and am a managing partner at #!/Shabang. Previously, I worked as a consulting ML engineer at Dragonfruit AI under the AI Operations team.\nProjects UC Davis Health (2023) A Transformer Approach to Congnitive Impairment Classification and Prediction Liu, H., Weakley, A.M., Zhang, J., Liu, X. Talk@NACCIn Press@W3PHAI-24 at AAAI TalkBank (2023) Automation of Language Sample Analysis Liu, H., MacWhinney, B., Fromm, D., Lanzi, A. Journal Article@JSLHR Code@GitHub TalkBank (2023) DementiaBank: Theoretical Rationale, Protocol, and Illustrative Analyses Lanzi, A., Saylor, A.K., Fromm, D., Liu, H., MacWhinney, B., Cohen, M.L. Journal Article@AJSLP Nueva (2022) ConDef: Automated Context-Aware Lexicography Using Large Online Encyclopedias Liu, H., Sayyah, Z. Book Chapter@LNNSCode@GitHubTalk@SAI Preprint (2021) Towards Automated Psychotherapy via Language Modeling Liu, H. arXiv Teaching 2023- Teaching Assistant, Stanford Association for Computing Machinery (ACM) Chapter 2022-2023 Head TA (co-instructor and summer lecturer) at AIFS AIBridge, a program funded by UC Davis Food Science 2021-2023 Co-Developer, Research@Nueva, a high-school science education program Course Notes Some folks have mentioned that my course notes through classes at Stanford and before have been helpful. Feel free to browse my Stanford UG Courses Index if you want to check it out!\n© 2019-2024 Houjun Liu. Licensed CC BY-NC-SA 4.0. This website layout is inspired by the lovely homepage of Karel D\u0026rsquo;Oosterlick.\n","html":"\u003ch1 style=\"display:inline-block\"\u003e 👋 Howdy, I'm Houjun Liu! \u003c/h1\u003e\n\u003cp\u003eI\u0026rsquo;m a first-year undergraduate student in the Computer Science Department at \u003ca href=\"https://www.stanford.edu/\"\u003eStanford University\u003c/a\u003e, advised by Prof. \u003ca href=\"https://mykel.kochenderfer.com/\"\u003eMykel Kochenderfer\u003c/a\u003e. I\u0026rsquo;m interested in \u003cstrong\u003e\u003cstrong\u003eNatural Language Processing\u003c/strong\u003e\u003c/strong\u003e and \u003cstrong\u003e\u003cstrong\u003eSpeech Language Sample Analysis\u003c/strong\u003e\u003c/strong\u003e, specifically, in making large models solve important problems through 1) building better tools for language and speech processing to democratize state-of-the-art research 2) designing improved algorithmic approaches to language model training + decoding to improve performance and 3) exploring their applications.\u003c/p\u003e\n\u003cp\u003eWelcome to my academic homepage! This is my little homestead on the internet about my academic interests. Check out \u003cstrong\u003e\u003cstrong\u003e\u003ca href=\"#projects\"\u003emy projects\u003c/a\u003e\u003c/strong\u003e\u003c/strong\u003e below. If you want to know more about the rest of my life, feel free to \u003ca href=\"https://www.jemoka.com/\"\u003evisit my website\u003c/a\u003e!\u003c/p\u003e\n\u003cdiv style=\"background-color: #f0f0f0; padding: 1px 10px; border-radius: 5px; margin-top: 20px\"\u003e\n\u003cdiv style=\"margin: 10px 0\"\u003e\n\u003cspan style=\"color: #262626; font-weight:500; color: #292929; opacity:0.6; font-size: 14px\"\u003eRecent goings on\u003c/span\u003e\n\u003cdiv style=\"margin-top: 10px; display: grid; column-gap: 20px; row-gap: 5px; grid-template-columns: 120px auto\"\u003e\n\u003cspan style=\"font-weight: 500\"\u003eFeb. 26-27, 24'\u003c/span\u003e \u003cspan\u003eAAAI 2024! See y'all in Vancouver!\u003c/span\u003e\n\u003cspan style=\"font-weight: 500\"\u003eDec. 15, 23'\u003c/span\u003e \u003cspan\u003ePaper (NACC) Accepted by W3PHAI-24\u003c/span\u003e\n\u003cspan style=\"font-weight: 500\"\u003eDec. 3, 23'\u003c/span\u003e \u003cspan\u003eReleased \u003ca target=\"_top\" target=\"_top\" href=\"https://huggingface.co/talkbank/CHATUtterance-en\"\u003eTalkBank Utterance Model\u003c/a\u003e\u003c/span\u003e\n\u003cspan style=\"font-weight: 500\"\u003eJun. 22, 23'\u003c/span\u003e \u003cspan\u003ePaper (Batchalign) \u003ca target=\"_top\" href=\"https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10555460/\" target=\"_top\"\u003ePublished\u003c/a\u003e by JSLHR\u003c/span\u003e\n\u003c/div\u003e\n\u003c/div\u003e\n\u003c/div\u003e\n\u003cp\u003eShoot me an email at \u003ccode\u003e[firstname] at stanford dot edu\u003c/code\u003e, or, if you are around Stanford, \u003ca href=\"https://cal.com/houjun/dinner\"\u003egrab dinner with me\u003c/a\u003e :)\u003c/p\u003e\n\u003ch2 id=\"about\"\u003eAbout\u003c/h2\u003e\n\u003cp\u003eI am a research engineer at the \u003ca href=\"https://talkbank.org/\"\u003eTalkBank Project\u003c/a\u003e at CMU under the supervision of Prof. Brian MacWhinney, where I develop better \u003ca href=\"https://huggingface.co/talkbank/\"\u003emodels\u003c/a\u003e and \u003ca href=\"https://github.com/talkbank/batchalign2\"\u003etools\u003c/a\u003e for clinical language sample analysis. I also work with the Stanford NLP Group, under direction of Prof. Chris Manning, on using neural models to solve semantic and syntax tagging tasks efficiently with \u003ca href=\"https://github.com/stanfordnlp/stanza\"\u003eStanza\u003c/a\u003e. Finally, I am a research assistant with Prof. \u003ca href=\"https://xinliu.engineering.ucdavis.edu/\"\u003eXin Liu\u003c/a\u003e at UC Davis and at \u003ca href=\"https://health.ucdavis.edu/alzheimers/\"\u003eUC Davis Health\u003c/a\u003e, where I use transformer models to push our understanding of dementia.\u003c/p\u003e\n\u003cp\u003eIn industry, I lead the development of \u003ca href=\"https://www.condution.com/\"\u003eCondution\u003c/a\u003e, \u003ca href=\"https://simon.shabang.io/\"\u003esimon\u003c/a\u003e, and am a managing partner at \u003ca href=\"https://www.shabang.io/\"\u003e#!/Shabang\u003c/a\u003e. Previously, I worked as a consulting ML engineer at \u003ca href=\"https://www.dragonfruit.ai/\"\u003eDragonfruit AI\u003c/a\u003e under the AI Operations team.\u003c/p\u003e\n\u003ch2 id=\"projects\"\u003eProjects\u003c/h2\u003e\n\u003cdiv style=\"padding: 15px 0\"\u003e\n\u003cdiv style=\"font-weight: 500; font-size: 14px; opacity: 0.5\"\u003eUC Davis Health (2023)\u003c/div\u003e\n\u003cdiv style=\"font-weight: 500\"\u003eA Transformer Approach to Congnitive Impairment Classification and Prediction\u003c/div\u003e\n\u003cdiv\u003e\u003cu\u003eLiu, H.\u003c/u\u003e, Weakley, A.M., Zhang, J., Liu, X.\u003c/div\u003e\n\u003cdiv style=\"padding-top: 5px; transform: translateX(-2px)\"\u003e\u003cspan class=\"tag\"\u003e\u003ca target=\"_top\" href=\"https://docs.google.com/presentation/d/1J5WUGUXbVlG5Fl4cQdu6FuNVFTPB2NTW/edit?usp=sharing\u0026ouid=112528726606349722398\u0026rtpof=true\u0026sd=true\"\u003eTalk@NACC\u003c/a\u003e\u003c/span\u003e\u003cspan class=\"tag\"\u003eIn Press@W3PHAI-24 at AAAI\u003c/span\u003e\u003c/div\u003e\n\u003c/div\u003e\n\u003cdiv style=\"padding: 15px 0\"\u003e\n\u003cdiv style=\"font-weight: 500; font-size: 14px; opacity: 0.5\"\u003eTalkBank (2023)\u003c/div\u003e\n\u003cdiv style=\"font-weight: 500\"\u003eAutomation of Language Sample Analysis\u003c/div\u003e\n\u003cdiv\u003e\u003cu\u003eLiu, H.\u003c/u\u003e, MacWhinney, B., Fromm, D., Lanzi, A.\u003c/div\u003e\n\u003cdiv style=\"padding-top: 5px; transform: translateX(-2px)\"\u003e\n\u003cspan class=\"tag\"\u003e\u003ca target=\"_top\" href=\"https://pubs.asha.org/doi/10.1044/2023_JSLHR-22-00642\"\u003eJournal Article@JSLHR\u003c/a\u003e\u003c/span\u003e\n\u003cspan class=\"tag\"\u003e\u003ca target=\"_top\" href=\"https://github.com/talkbank/batchalign2\"\u003eCode@GitHub\u003c/a\u003e\u003c/span\u003e\n\u003c/div\u003e\n\u003c/div\u003e\n\u003cdiv style=\"padding: 15px 0\"\u003e\n\u003cdiv style=\"font-weight: 500; font-size: 14px; opacity: 0.5\"\u003eTalkBank (2023)\u003c/div\u003e\n\u003cdiv style=\"font-weight: 500\"\u003eDementiaBank: Theoretical Rationale, Protocol, and Illustrative Analyses\u003c/div\u003e\n\u003cdiv\u003eLanzi, A., Saylor, A.K., Fromm, D., \u003cu\u003eLiu, H.\u003c/u\u003e, MacWhinney, B., Cohen, M.L. \u003c/div\u003e\n\u003cdiv style=\"padding-top: 5px; transform: translateX(-2px)\"\u003e\u003cspan class=\"tag\"\u003e\u003ca target=\"_top\" href=\"https://doi.org/10.1044/2022_AJSLP-22-00281\"\u003eJournal Article@AJSLP\u003c/a\u003e\u003c/span\u003e\u003c/div\u003e\n\u003c/div\u003e\n\u003cdiv style=\"padding: 15px 0\"\u003e\n\u003cdiv style=\"font-weight: 500; font-size: 14px; opacity: 0.5\"\u003eNueva (2022)\u003c/div\u003e\n\u003cdiv style=\"font-weight: 500\"\u003eConDef: Automated Context-Aware Lexicography Using Large Online Encyclopedias\u003c/div\u003e\n\u003cdiv\u003e\u003cu\u003eLiu, H.\u003c/u\u003e, Sayyah, Z.\u003c/div\u003e\n\u003cdiv style=\"padding-top: 5px; transform: translateX(-2px)\"\u003e\u003cspan class=\"tag\"\u003e\u003ca target=\"_top\" href=\"https://doi.org/10.1007/978-3-031-10464-0_41\"\u003eBook Chapter@LNNS\u003c/a\u003e\u003c/span\u003e\u003cspan class=\"tag\"\u003e\u003ca target=\"_top\" href=\"https://github.com/jklsnt/dictembed\"\u003eCode@GitHub\u003c/a\u003e\u003c/span\u003e\u003cspan class=\"tag\"\u003eTalk@SAI\u003c/span\u003e\u003c/div\u003e\n\u003c/div\u003e\n\u003cdiv style=\"padding: 15px 0\"\u003e\n\u003cdiv style=\"font-weight: 500; font-size: 14px; opacity: 0.5\"\u003ePreprint (2021)\u003c/div\u003e\n\u003cdiv style=\"font-weight: 500\"\u003eTowards Automated Psychotherapy via Language Modeling\u003c/div\u003e\n\u003cdiv\u003e\u003cu\u003eLiu, H.\u003c/u\u003e\u003c/div\u003e\n\u003cdiv style=\"padding-top: 5px; transform: translateX(-2px)\"\u003e\u003cspan class=\"tag\"\u003e\u003ca target=\"_top\" href=\"https://arxiv.org/abs/2104.10661\"\u003earXiv\u003c/a\u003e\u003c/span\u003e\u003c/div\u003e\n\u003c/div\u003e\n\u003ch2 id=\"teaching\"\u003eTeaching\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e2023- Teaching Assistant, Stanford Association for Computing Machinery (ACM) Chapter\u003c/li\u003e\n\u003cli\u003e2022-2023 Head TA (co-instructor and summer lecturer) at \u003ca href=\"https://www.jemoka.com/posts/kbhaibridge_course_website/\"\u003eAIFS AIBridge\u003c/a\u003e, a program funded by UC Davis Food Science\u003c/li\u003e\n\u003cli\u003e2021-2023 Co-Developer, Research@Nueva, a high-school science education program\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"course-notes\"\u003eCourse Notes\u003c/h2\u003e\n\u003cp\u003eSome folks have mentioned that my course notes through classes at Stanford and before have been helpful. Feel free to browse my \u003ca href=\"https://www.jemoka.com/posts/kbhstanford_courses_index/\"\u003eStanford UG Courses Index\u003c/a\u003e if you want to check it out!\u003c/p\u003e\n\u003cstyle\u003e\n.tag {\nfont-size: 13px;\nmargin: 0 10px;\nmargin-left: 0;\ncursor: default;\n}\n.tag \u003e a {\nborder: 0 !important;\n}\n.tag \u003e a:hover {\nborder-bottom: 0 !important;\n}\n\u003c/style\u003e\n\u003cp\u003e\u003cspan style=\"font-size: 10px\"\u003e© 2019-2024 Houjun Liu. Licensed CC BY-NC-SA 4.0. This website layout is inspired by the lovely homepage of \u003ca target=\"_top\" href=\"https://kareldo.github.io/research\"\u003eKarel D\u0026rsquo;Oosterlick\u003c/a\u003e.\u003c/span\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhresearch_index/","tags":["index"],"title":"Houjun's Academic Home Page"},{"categories":null,"contents":"A reading: (Krugman 2009)\nReflection The discussion here of the conflict between \u0026ldquo;saltwater\u0026rdquo; and \u0026ldquo;freshwater\u0026rdquo; (Keynesian and Neoclassical) economists is very interesting when evaluated from the perspective of our recent impending recession.\nOne particular statement that resonated with me in the essay was the fact that a crisis simply \u0026ldquo;pushed the freshwater economists into further absurdity.\u0026rdquo; It is interesting to see that, once a theory has been well-established and insulated in a community, it becomes much more difficult to parcel out as something that could be wrong.\nAs the same time, the forcibly-correcting \u0026ldquo;fudge\u0026rdquo; inconsistencies of the Keynesian model is also a strong weakness which perhaps further exacerbated the freshwater economists\u0026rsquo; dissent into their models. Modeling human behavior has been consistently quite messy, so it is unsurprising that both neoclassical and Keynesian economists strayed away from those models.\nCircling back to the COVID-trigger economic downturn: we definitely see a push towards increased \u0026ldquo;absurdity\u0026rdquo; in terms of increased polarization in the US; but not only that, the deeply rooted idea of \u0026ldquo;pandemics don\u0026rsquo;t affect the States\u0026rdquo; or at least \u0026ldquo;the Feds/our supply chain have preparation for absurd events\u0026rdquo; is again shown to be false\u0026mdash;despite the Obaman re-discovery of Keynesian management earlier.\nThis all raises a question: under what circumstances is a tangibly \u0026ldquo;better\u0026rdquo; result going to surface and be accepted when one model is tangibly perfect yet wrong, the other requiring flawed corrections or unrigorous analysis. Must we reject one model completely before the other one can be used?\nI don\u0026rsquo;t believe behavioral economics, though providing a partial solution as Krugman outlines, is the be-and-end-all of macroeconomic models during a depression. All of the models which were theorized (bar pure neoclassicalist \u0026ldquo;perfect agents\u0026rdquo;) ostensibly do one thing: trying to \u0026ldquo;rationally\u0026rdquo; model the \u0026ldquo;irrational\u0026rdquo; behavior of market participants. I don\u0026rsquo;t believe that this is ultimately going to be feasible on a macroeconomic scale to create models that will last (sans repeated, empirical testing\u0026mdash;but there are not enough depressions to go around.) Perhaps, then, the basic Keynesian idea of simply creating fiscal corrections may very well be the best second thing.\nReading notes the main problem was the fact that nobody saw a catastrophie coming More important was the profession’s blindness to the very possibility of catastrophic failures in a market economy.\npeople either believed that the market would never go wrong or the Fed fixes everything free-market economies never go astray and those who believed that economies may stray now and then but that any major deviations from the path of prosperity could and would be corrected by the all-powerful Fed.\nThe economists thought the humans are perfectly rational, and the fact that they are not is what leads to failures Unfortunately, this romanticized and sanitized vision of the economy led most economists to ignore all the things that can go wrong. They turned a blind eye to the limitations of human rationality that often lead to bubbles and busts\nKeynsian Economics was not trying to entirely replace markets Keynes did not, despite what you may have heard, want the government to run the economy. \u0026hellip; He wanted to fix capitalism, not replace it.\nMilton Friedman lead the return to Neoclassical Economics The neoclassical revival was initially led by Milton Friedman of the University of Chicago, who asserted as early as 1953 that neoclassical economics works well enough as a description of the way the economy actually functions\nNeoclassical Economics with the monetarist theory under Milton asserted that keeping the money supply growing is all that needed Monetarists asserted, however, that a very limited, circumscribed form of government intervention — namely, instructing central banks to keep the nation’s money supply, the sum of cash in circulation and bank deposits, growing on a steady path — is all that’s required to prevent depressions.\nMilton Freedman believes that large-scale expansion would lead to inflation and high unimployment excessively expansionary policies, he predicted, would lead to a combination of inflation and high unemployment\nAnti-Keynesian seniments overtook Freedman\u0026rsquo;s original proposition Eventually, however, the anti-Keynesian counterrevolution went far beyond Friedman’s position, which came to seem relatively moderate compared with what his successors were saying.\n#question why is this obvious? for obvious reasons\nBecause the new economists beliefed that the market is right, the advise was for business to max stock price finance economists believed that we should put the capital development of the nation in the hands of what Keynes had called a “casino.”\nMajor stock events didn\u0026rsquo;t blunt the disregard to Keynesian policy These events, however, which Keynes would have considered evidence of the unreliability of markets, did little to blunt the force of a beautiful idea.\nNew \u0026ldquo;perfect\u0026rdquo; economic models earned large respect in industry mild-mannered business-school professors could and did become Wall Street rocket scientists, earning Wall Street paychecks.\nNew models often analyzed financial systems independently of their real-world worth Finance economists rarely asked the seemingly obvious (though not easily answered) question of whether asset prices made sense given real-world fundamentals like earnings. Instead, they asked only whether asset prices made sense given other asset prices\nMacro split into two factions: the Keynes recessionists or the anti-Keynesians macroeconomics has divided into two great factions: “saltwater” economists (mainly in coastal U.S. universities), who have a more or less Keynesian vision of what recessions are all about; and “freshwater” economists (mainly at inland schools), who consider that vision nonsense.\nFreshwater economists\u0026rsquo; theory: recessions were just people confused? Nobel laureate Robert Lucas, argued that recessions were caused by temporary confusion: workers and companies had trouble distinguishing overall changes in the level of prices\nUnder freshwater theories, unemployment is just people electing not to work due to unfavorable environment amplified by the rational response of workers, who voluntarily work more when the environment is favorable and less when it’s unfavorable. Unemployment is a deliberate decision by workers to take time off.\n\u0026hellip;\nPut baldly like that, this theory sounds foolish — was the Great Depression really the Great Vacation?\nThe new Keysians still kept more or less to non-dramatic thinking They tried to keep their deviations from neoclassical orthodoxy as limited as possible. This meant that there was no room in the prevailing models for such things as bubbles and banking-system collapse.\nNew Keysians believed entirely in the Fed, without need for large fiscal policy They believed that monetary policy, administered by the technocrats at the Fed, could provide whatever remedies the economy needed.\nPeople just thought that there can\u0026rsquo;t be a bubble in housing What’s striking, when you reread Greenspan’s assurances, is that they weren’t based on evidence — they were based on the a priori assertion that there simply can’t be a bubble in housing.\nObama\u0026rsquo;s economic policies are much more on the Keynes side Such Keynesian thinking underlies the Obama administration’s economic policies — and the freshwater economists are furious.\nFailure of neoclassicalist theory is that breaking Keynsian economical behavior requires perfect rationality, which is absurd if you start from the assumption that people are perfectly rational and markets are perfectly efficient, you have to conclude that unemployment is voluntary and recessions are desirable.\nEconomists thought that economics would have been perfect Economics, as a field, got in trouble because economists were seduced by the vision of a perfect, frictionless market system.\nBehavioral Economics Behavioral Economics is a study of economics which hinges on the irrationality of human behavior. Its an answer to both the Neoclassical Economics\u0026rsquo; poor assumption that humans and markets are perfect, but also Keynsian Economics\u0026rsquo;s increasingly large need for a random \u0026ldquo;fudge\u0026rdquo; to get their models working right.\npillars of Behavioral Economics \u0026ldquo;Many real-world investors bear little resemblance to the cool calculators of efficient-market theory: they’re all too subject to herd behavior, to bouts of irrational exuberance and unwarranted panic.\u0026rdquo; \u0026ldquo;even those who try to base their decisions on cool calculation often find that they can’t, that problems of trust, credibility and limited collateral force them to run with the herd.\u0026rdquo; Good arbitrageurs are just forced out of the economy in large downward spirals As a result, the smart money is forced out of the market, and prices may go into a downward spiral.\n","html":"\u003cp\u003eA reading: (\u003ca href=\"#citeproc_bib_item_1\"\u003eKrugman 2009\u003c/a\u003e)\u003c/p\u003e\n\u003ch2 id=\"reflection\"\u003eReflection\u003c/h2\u003e\n\u003cp\u003eThe discussion here of the conflict between \u0026ldquo;saltwater\u0026rdquo; and \u0026ldquo;freshwater\u0026rdquo; (Keynesian and Neoclassical) economists is very interesting when evaluated from the perspective of our recent impending recession.\u003c/p\u003e\n\u003cp\u003eOne particular statement that resonated with me in the essay was the fact that a crisis simply \u0026ldquo;pushed the freshwater economists into further absurdity.\u0026rdquo; It is interesting to see that, once a theory has been well-established and insulated in a community, it becomes much more difficult to parcel out as something that could be wrong.\u003c/p\u003e\n\u003cp\u003eAs the same time, the forcibly-correcting \u0026ldquo;fudge\u0026rdquo; inconsistencies of the Keynesian model is also a strong weakness which perhaps further exacerbated the freshwater economists\u0026rsquo; dissent into their models. Modeling human behavior has been consistently quite messy, so it is unsurprising that both neoclassical and Keynesian economists strayed away from those models.\u003c/p\u003e\n\u003cp\u003eCircling back to the COVID-trigger economic downturn: we definitely see a push towards increased \u0026ldquo;absurdity\u0026rdquo; in terms of increased polarization in the US; but not only that, the deeply rooted idea of \u0026ldquo;pandemics don\u0026rsquo;t affect the States\u0026rdquo; or at least \u0026ldquo;the Feds/our supply chain have preparation for absurd events\u0026rdquo; is again shown to be false\u0026mdash;despite the Obaman re-discovery of Keynesian management earlier.\u003c/p\u003e\n\u003cp\u003eThis all raises a question: under what circumstances is a tangibly \u0026ldquo;better\u0026rdquo; result going to surface and be accepted when one model is tangibly perfect yet wrong, the other requiring flawed corrections or unrigorous analysis. Must we reject one model completely before the other one can be used?\u003c/p\u003e\n\u003cp\u003eI don\u0026rsquo;t believe behavioral economics, though providing a partial solution as Krugman outlines, is the be-and-end-all of macroeconomic models during a depression. All of the models which were theorized (bar pure neoclassicalist \u0026ldquo;perfect agents\u0026rdquo;) ostensibly do one thing: trying to \u0026ldquo;rationally\u0026rdquo; model the \u0026ldquo;irrational\u0026rdquo; behavior of market participants. I don\u0026rsquo;t believe that this is ultimately going to be feasible on a macroeconomic scale to create models that will last (sans repeated, empirical testing\u0026mdash;but there are not enough depressions to go around.) Perhaps, then, the basic Keynesian idea of simply creating fiscal corrections may very well be the best second thing.\u003c/p\u003e\n\u003ch2 id=\"reading-notes\"\u003eReading notes\u003c/h2\u003e\n\u003ch3 id=\"the-main-problem-was-the-fact-that-nobody-saw-a-catastrophie-coming\"\u003ethe main problem was the fact that nobody saw a catastrophie coming\u003c/h3\u003e\n\u003cp\u003eMore important was the profession’s blindness to the very possibility of catastrophic failures in a market economy.\u003c/p\u003e\n\u003ch3 id=\"people-either-believed-that-the-market-would-never-go-wrong-or-the-fed-fixes-everything\"\u003epeople either believed that the market would never go wrong or the Fed fixes everything\u003c/h3\u003e\n\u003cp\u003efree-market economies never go astray and those who believed that economies may stray now and then but that any major deviations from the path of prosperity could and would be corrected by the all-powerful Fed.\u003c/p\u003e\n\u003ch3 id=\"the-economists-thought-the-humans-are-perfectly-rational-and-the-fact-that-they-are-not-is-what-leads-to-failures\"\u003eThe economists thought the humans are perfectly rational, and the fact that they are not is what leads to failures\u003c/h3\u003e\n\u003cp\u003eUnfortunately, this romanticized and sanitized vision of the economy led most economists to ignore all the things that can go wrong. They turned a blind eye to the limitations of human rationality that often lead to bubbles and busts\u003c/p\u003e\n\u003ch3 id=\"keynsian-economics--kbhkeynsian-politics-dot-md--was-not-trying-to-entirely-replace-markets\"\u003e\u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynsian Economics\u003c/a\u003e was not trying to entirely replace markets\u003c/h3\u003e\n\u003cp\u003eKeynes did not, despite what you may have heard, want the government to run the economy. \u0026hellip; He wanted to fix capitalism, not replace it.\u003c/p\u003e\n\u003ch3 id=\"milton-friedman-lead-the-return-to-neoclassical-economics--kbhneoclassical-economics-dot-md\"\u003eMilton Friedman lead the return to \u003ca href=\"/posts/kbhneoclassical_economics/\"\u003eNeoclassical Economics\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eThe neoclassical revival was initially led by Milton Friedman of the University of Chicago, who asserted as early as 1953 that neoclassical economics works well enough as a description of the way the economy actually functions\u003c/p\u003e\n\u003ch3 id=\"neoclassical-economics-with-the-monetarist-theory--kbhmonetarist-theory-dot-md--under-milton-asserted-that-keeping-the-money-supply-growing-is-all-that-needed\"\u003eNeoclassical Economics with the \u003ca href=\"/posts/kbhmonetarist_theory/\"\u003emonetarist theory\u003c/a\u003e under Milton asserted that keeping the money supply growing is all that needed\u003c/h3\u003e\n\u003cp\u003eMonetarists asserted, however, that a very limited, circumscribed form of government intervention — namely, instructing central banks to keep the nation’s money supply, the sum of cash in circulation and bank deposits, growing on a steady path — is all that’s required to prevent depressions.\u003c/p\u003e\n\u003ch3 id=\"milton-freedman--kbhmilton-freedman-dot-md--believes-that-large-scale-expansion-would-lead-to-inflation-and-high-unimployment\"\u003e\u003ca href=\"/posts/kbhmilton_freedman/\"\u003eMilton Freedman\u003c/a\u003e believes that large-scale expansion would lead to inflation and high unimployment\u003c/h3\u003e\n\u003cp\u003eexcessively expansionary policies, he predicted, would lead to a combination of inflation and high unemployment\u003c/p\u003e\n\u003ch3 id=\"anti-keynesian-seniments-overtook-freedman--kbhmilton-freedman-dot-md--s-original-proposition\"\u003eAnti-Keynesian seniments overtook \u003ca href=\"/posts/kbhmilton_freedman/\"\u003eFreedman\u003c/a\u003e\u0026rsquo;s original proposition\u003c/h3\u003e\n\u003cp\u003eEventually, however, the anti-Keynesian counterrevolution went far beyond Friedman’s position, which came to seem relatively moderate compared with what his successors were saying.\u003c/p\u003e\n\u003ch3 id=\"question-why-is-this-obvious\"\u003e#question why is this obvious?\u003c/h3\u003e\n\u003cp\u003efor obvious reasons\u003c/p\u003e\n\u003ch3 id=\"because-the-new-economists-beliefed-that-the-market-is-right-the-advise-was-for-business-to-max-stock-price\"\u003eBecause the new economists beliefed that the market is right, the advise was for business to max stock price\u003c/h3\u003e\n\u003cp\u003efinance economists believed that we should put the capital development of the nation in the hands of what Keynes had called a “casino.”\u003c/p\u003e\n\u003ch3 id=\"major-stock-events-didn-t-blunt-the-disregard-to-keynesian-policy\"\u003eMajor stock events didn\u0026rsquo;t blunt the disregard to Keynesian policy\u003c/h3\u003e\n\u003cp\u003eThese events, however, which Keynes would have considered evidence of the unreliability of markets, did little to blunt the force of a beautiful idea.\u003c/p\u003e\n\u003ch3 id=\"new-perfect-economic-models-earned-large-respect-in-industry\"\u003eNew \u0026ldquo;perfect\u0026rdquo; economic models earned large respect in industry\u003c/h3\u003e\n\u003cp\u003emild-mannered business-school professors could and did become Wall Street rocket scientists, earning Wall Street paychecks.\u003c/p\u003e\n\u003ch3 id=\"new-models-often-analyzed-financial-systems-independently-of-their-real-world-worth\"\u003eNew models often analyzed financial systems independently of their real-world worth\u003c/h3\u003e\n\u003cp\u003eFinance economists rarely asked the seemingly obvious (though not easily answered) question of whether asset prices made sense given real-world fundamentals like earnings. Instead, they asked only whether asset prices made sense given other asset prices\u003c/p\u003e\n\u003ch3 id=\"macro-split-into-two-factions-the-keynes--kbhkeynsian-politics-dot-md--recessionists-or-the-anti-keynesians--kbhkeynsian-politics-dot-md\"\u003eMacro split into two factions: the \u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynes\u003c/a\u003e recessionists or the anti-\u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynesians\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003emacroeconomics has divided into two great factions: “saltwater” economists (mainly in coastal U.S. universities), who have a more or less Keynesian vision of what recessions are all about; and “freshwater” economists (mainly at inland schools), who consider that vision nonsense.\u003c/p\u003e\n\u003ch3 id=\"freshwater-economists-theory-recessions-were-just-people-confused\"\u003eFreshwater economists\u0026rsquo; theory: recessions were just people confused?\u003c/h3\u003e\n\u003cp\u003eNobel laureate Robert Lucas, argued that recessions were caused by temporary confusion: workers and companies had trouble distinguishing overall changes in the level of prices\u003c/p\u003e\n\u003ch3 id=\"under-freshwater-theories-unemployment-is-just-people-electing-not-to-work-due-to-unfavorable-environment\"\u003eUnder freshwater theories, unemployment is just people electing not to work due to unfavorable environment\u003c/h3\u003e\n\u003cp\u003eamplified by the rational response of workers, who voluntarily work more when the environment is favorable and less when it’s unfavorable. Unemployment is a deliberate decision by workers to take time off.\u003c/p\u003e\n\u003cp\u003e\u0026hellip;\u003c/p\u003e\n\u003cp\u003ePut baldly like that, this theory sounds foolish — was the Great Depression really the Great\nVacation?\u003c/p\u003e\n\u003ch3 id=\"the-new-keysians-still-kept-more-or-less-to-non-dramatic-thinking\"\u003eThe new Keysians still kept more or less to non-dramatic thinking\u003c/h3\u003e\n\u003cp\u003eThey tried to keep their deviations from neoclassical orthodoxy as limited as possible. This meant that there was no room in the prevailing models for such things as bubbles and banking-system collapse.\u003c/p\u003e\n\u003ch3 id=\"new-keysians-believed-entirely-in-the-fed-without-need-for-large-fiscal-policy\"\u003eNew Keysians believed entirely in the Fed, without need for large fiscal policy\u003c/h3\u003e\n\u003cp\u003eThey believed that monetary policy, administered by the technocrats at the Fed, could provide whatever remedies the economy needed.\u003c/p\u003e\n\u003ch3 id=\"people-just-thought-that-there-can-t-be-a-bubble-in-housing\"\u003ePeople just thought that there can\u0026rsquo;t be a bubble in housing\u003c/h3\u003e\n\u003cp\u003eWhat’s striking, when you reread Greenspan’s assurances, is that they weren’t based on evidence — they were based on the a priori assertion that there simply can’t be a bubble in housing.\u003c/p\u003e\n\u003ch3 id=\"obama-s-economic-policies-are-much-more-on-the-keynes--kbhkeynsian-politics-dot-md--side\"\u003eObama\u0026rsquo;s economic policies are much more on the \u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynes\u003c/a\u003e side\u003c/h3\u003e\n\u003cp\u003eSuch Keynesian thinking underlies the Obama administration’s economic policies — and the freshwater economists are furious.\u003c/p\u003e\n\u003ch3 id=\"failure-of-neoclassicalist-theory-is-that-breaking-keynsian-economical-behavior-requires-perfect-rationality-which-is-absurd\"\u003eFailure of neoclassicalist theory is that breaking Keynsian economical behavior requires perfect rationality, which is absurd\u003c/h3\u003e\n\u003cp\u003eif you start from the assumption that people are perfectly rational and markets are perfectly efficient, you have to conclude that unemployment is voluntary and recessions are desirable.\u003c/p\u003e\n\u003ch3 id=\"economists-thought-that-economics-would-have-been-perfect\"\u003eEconomists thought that economics would have been perfect\u003c/h3\u003e\n\u003cp\u003eEconomics, as a field, got in trouble because economists were seduced by the vision of a perfect, frictionless market system.\u003c/p\u003e\n\u003ch3 id=\"behavioral-economics\"\u003eBehavioral Economics\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#behavioral-economics\"\u003eBehavioral Economics\u003c/a\u003e is a study of economics which hinges on the irrationality of human behavior. Its an answer to both the \u003ca href=\"/posts/kbhneoclassical_economics/\"\u003eNeoclassical Economics\u003c/a\u003e\u0026rsquo; poor assumption that humans and markets are perfect, but also \u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynsian Economics\u003c/a\u003e\u0026rsquo;s increasingly large need for a random \u0026ldquo;fudge\u0026rdquo; to get their models working right.\u003c/p\u003e\n\u003ch4 id=\"pillars-of-behavioral-economics\"\u003epillars of Behavioral Economics\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003e\u0026ldquo;Many real-world investors bear little resemblance to the cool calculators of efficient-market theory: they’re all too subject to herd behavior, to bouts of irrational exuberance and unwarranted panic.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;even those who try to base their decisions on cool calculation often find that they can’t, that problems of trust, credibility and limited collateral force them to run with the herd.\u0026rdquo;\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"good-arbitrageurs-are-just-forced-out-of-the-economy-in-large-downward-spirals\"\u003eGood arbitrageurs are just forced out of the economy in large downward spirals\u003c/h3\u003e\n\u003cp\u003eAs a result, the smart money is forced out of the market, and prices may go into a downward spiral.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhow_did_economists_get_it_so_wrong/","tags":null,"title":"How Did Economists Get It So Wrong?"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhhsbi/","tags":null,"title":"hsbi"},{"categories":null,"contents":"Improving PBVI without sacrificing quality.\nInitialization We first initialize HSVI with a set of alpha vectors \\(\\Gamma\\), representing the lower-bound, and a list of tuples of \\((b, U(b))\\) named \\(\\Upsilon\\), representing the upper-bound. We call the value functions they generate as \\(\\bar{V}\\) and \\(\\underline V\\).\nLower Bound Set of alpha vectors: best-action worst-state (HSVI1), blind lower bound (HSVI2)\nCalculating \\(\\underline{V}(b)\\) \\begin{equation} \\underline{V}_{\\Gamma} = \\max_{\\alpha} \\alpha^{\\top}b \\end{equation}\nUpper Bound Fast Informed Bound\nsolving fully-observable MDP Project \\(b\\) into the point-set Projected the upper bound onto a convex hull (HSVI2: via approximate convex hull projection) Calculating \\(\\bar{V}(b)\\) Recall that though the lower-bound is given by alpha vectors, the upper bound is given in terms of a series of tuples \\((b, U(b)) \\in \\Upsilon\\).\nHSVI1: we figure the upper bound for any given \\(b\\) by projecting onto the convex hull formed by points on \\(\\Upsilon\\) HSVI2: approximate linear projection Update Begin with state \\(b = b_0\\).\nRepeat:\nat every step, we perform a local update for upper and lower bound using the current \\(b\\)\nthe lower bound is updated using PBVI Backup on \\(b, \\Gamma\\) the upper bound is updated using POMDP Bellman Update on \\(b, \\Upsilon\\), putting the new \\((b, u(b))\\) in the set \\(\\Upsilon\\). Then, we update our belief via the usual:\n\\begin{equation} b \\leftarrow update(b, a^{*}, o^{*}) \\end{equation}\nwhere \\(a^{*}\\) and \\(o^{*}\\) are determined by\u0026hellip;\nIE-MAX Heuristic IE-MAX Heuristic is used to determine \\(a^{*}\\), whereby we choose the action such that:\n\\begin{equation} a^{*} = \\arg\\max_{a}Q^{(\\bar{V})}(b) \\end{equation}\nyes, we choose the next action which maximizes the upper bound of the utility we can get.\nweighted excess uncertainty weighted excess uncertainty is used to determine \\(o^{*}\\). Suppose we are depth \\(d\\) loops in the search tree (i.e. this is our $d$th chain), we define:\n\\begin{equation} \\text{excess}(b,t) = (\\bar{V}(b)-\\underline{V}(b)) - \\epsilon \\gamma^{-t} \\end{equation}\n\u0026ldquo;how far away are we from converging to a value uncertainty of no more than \\(\\epsilon\\), given we are depth \\(t\\) in?\nand, we choose the observation \\(o^{*}\\) such that:\n\\begin{equation} o^{*} = \\arg\\max_{o} \\qty[p(o|b,a^{*}) \\text{excess}(update(b,a,o), t+1)] \\end{equation}\nwhere,\n\\begin{align} P(o|b,a) \u0026amp;= \\sum_{s}^{} p(o|s,a) b(s) \\\\ \u0026amp;= \\sum_{s}^{} \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) O(o|s\u0026rsquo;,a) b(s) \\end{align}\n","html":"\u003cp\u003eImproving \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e without sacrificing quality.\u003c/p\u003e\n\u003ch2 id=\"initialization\"\u003eInitialization\u003c/h2\u003e\n\u003cp\u003eWe first initialize \u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e with a set of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es \\(\\Gamma\\), representing the lower-bound, and a list of tuples of \\((b, U(b))\\) named \\(\\Upsilon\\), representing the upper-bound. We call the value functions they generate as \\(\\bar{V}\\) and \\(\\underline V\\).\u003c/p\u003e\n\u003ch3 id=\"lower-bound\"\u003eLower Bound\u003c/h3\u003e\n\u003cp\u003eSet of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es: \u003ca href=\"/posts/kbhworst_possible_state/\"\u003ebest-action worst-state\u003c/a\u003e (\u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e1), \u003ca href=\"/posts/kbhblind_lower_bound/\"\u003eblind lower bound\u003c/a\u003e (\u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e2)\u003c/p\u003e\n\u003ch4 id=\"calculating-underline-v--b\"\u003eCalculating \\(\\underline{V}(b)\\)\u003c/h4\u003e\n\u003cp\u003e\\begin{equation}\n\\underline{V}_{\\Gamma} = \\max_{\\alpha} \\alpha^{\\top}b\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"upper-bound\"\u003eUpper Bound\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhfast_informed_bound/\"\u003eFast Informed Bound\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003esolving fully-observable MDP\u003c/li\u003e\n\u003cli\u003eProject \\(b\\) into the point-set\u003c/li\u003e\n\u003cli\u003eProjected the upper bound onto a convex hull (HSVI2: via approximate convex hull projection)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"calculating-bar-v--b\"\u003eCalculating \\(\\bar{V}(b)\\)\u003c/h4\u003e\n\u003cp\u003eRecall that though the lower-bound is given by \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es, the upper bound is given in terms of a series of tuples \\((b, U(b)) \\in \\Upsilon\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e1: we figure the upper bound for any given \\(b\\) by projecting onto the convex hull formed by points on \\(\\Upsilon\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e2: approximate linear projection\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"update\"\u003eUpdate\u003c/h2\u003e\n\u003cp\u003eBegin with state \\(b = b_0\\).\u003c/p\u003e\n\u003cp\u003eRepeat:\u003c/p\u003e\n\u003cp\u003eat every step, we perform a local update for upper and lower bound using the current \\(b\\)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ethe lower bound is \u003cstrong\u003eupdated\u003c/strong\u003e using \u003ca href=\"/posts/kbhpoint_based_value_iteration/#pbvi-backup\"\u003ePBVI Backup\u003c/a\u003e on \\(b, \\Gamma\\)\u003c/li\u003e\n\u003cli\u003ethe upper bound is \u003cstrong\u003eupdated\u003c/strong\u003e using \u003ca href=\"/posts/kbhvalue_iteration/#pomdp-bellman-update\"\u003ePOMDP Bellman Update\u003c/a\u003e on \\(b, \\Upsilon\\), putting the new \\((b, u(b))\\) in the set \\(\\Upsilon\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThen, we update our belief via the usual:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb \\leftarrow update(b, a^{*}, o^{*})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(a^{*}\\) and \\(o^{*}\\) are determined by\u0026hellip;\u003c/p\u003e\n\u003ch3 id=\"ie-max-heuristic\"\u003eIE-MAX Heuristic\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#ie-max-heuristic\"\u003eIE-MAX Heuristic\u003c/a\u003e is used to determine \\(a^{*}\\), whereby we choose the action such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na^{*} = \\arg\\max_{a}Q^{(\\bar{V})}(b)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyes, we choose the next action which maximizes the \u003cstrong\u003eupper bound\u003c/strong\u003e of the utility we can get.\u003c/p\u003e\n\u003ch3 id=\"weighted-excess-uncertainty\"\u003eweighted excess uncertainty\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#weighted-excess-uncertainty\"\u003eweighted excess uncertainty\u003c/a\u003e is used to determine \\(o^{*}\\). Suppose we are depth \\(d\\) loops in the search tree (i.e. this is our $d$th chain), we define:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\text{excess}(b,t) = (\\bar{V}(b)-\\underline{V}(b)) - \\epsilon \\gamma^{-t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;how far away are we from converging to a value uncertainty of no more than \\(\\epsilon\\), given we are depth \\(t\\) in?\u003c/p\u003e\n\u003cp\u003eand, we choose the observation \\(o^{*}\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\no^{*} = \\arg\\max_{o} \\qty[p(o|b,a^{*}) \\text{excess}(update(b,a,o), t+1)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere,\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nP(o|b,a) \u0026amp;= \\sum_{s}^{} p(o|s,a) b(s) \\\\\n\u0026amp;= \\sum_{s}^{} \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) O(o|s\u0026rsquo;,a) b(s)\n\\end{align}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhsvi/","tags":null,"title":"HSVI"},{"categories":null,"contents":"\u0026ldquo;Can we come up a policy that, if not fast, at least reach the goal!\u0026rdquo;\nBackground Stochastic Shortest-Path we are at an initial state, and we have a series of goal states, and we want to reach to the goal states.\nWe can solve this just by:\nvalue iteration simulate a trajectory and only updating reachable state: RTDP, LRTDP MBP Problem MDP + Goal States\n\\(S\\): set of states \\(A\\): actions \\(P(s\u0026rsquo;|s,a)\\): transition \\(C\\): reward \\(G\\): absorbing goal states Approach Combining LRTDP with anytime dynamics\nrun GPT (not the transformer, \u0026ldquo;General Planning Tool\u0026rdquo;, think LRTDP) exact solver use GPT policy for solved states or visited more than a certain threshold uses MBP policy for other states policy evaluation for convergence \u0026ldquo;use GPT solution as much as possible, and when we haven\u0026rsquo;t ever visited a place due to the search trajectories, we can use MBP to supplement the solution\u0026rdquo;\n","html":"\u003cp\u003e\u0026ldquo;Can we come up a policy that, if not fast, \u003cstrong\u003eat least reach the goal!\u003c/strong\u003e\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003ch3 id=\"stochastic-shortest-path\"\u003eStochastic Shortest-Path\u003c/h3\u003e\n\u003cp\u003ewe are at an initial state, and we have a series of goal states, and we want to reach to the goal states.\u003c/p\u003e\n\u003cp\u003eWe can solve this just by:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003esimulate a trajectory and only updating reachable state: \u003ca href=\"/posts/kbhltrdp/#real-time-dynamic-programming\"\u003eRTDP\u003c/a\u003e, \u003ca href=\"/posts/kbhltrdp/\"\u003eLRTDP\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmbp/\"\u003eMBP\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"problem\"\u003eProblem\u003c/h2\u003e\n\u003cp\u003eMDP + Goal States\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(S\\): set of states\u003c/li\u003e\n\u003cli\u003e\\(A\\): actions\u003c/li\u003e\n\u003cli\u003e\\(P(s\u0026rsquo;|s,a)\\): transition\u003c/li\u003e\n\u003cli\u003e\\(C\\): reward\u003c/li\u003e\n\u003cli\u003e\\(G\\): absorbing \u003cstrong\u003egoal states\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"approach\"\u003eApproach\u003c/h2\u003e\n\u003cp\u003eCombining \u003ca href=\"/posts/kbhltrdp/\"\u003eLRTDP\u003c/a\u003e with anytime dynamics\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003erun GPT (not the transformer, \u0026ldquo;General Planning Tool\u0026rdquo;, think \u003ca href=\"/posts/kbhltrdp/\"\u003eLRTDP\u003c/a\u003e) exact solver\u003c/li\u003e\n\u003cli\u003euse GPT policy for solved states or visited more than a certain threshold\u003c/li\u003e\n\u003cli\u003euses \u003ca href=\"/posts/kbhmbp/\"\u003eMBP\u003c/a\u003e policy for other states\u003c/li\u003e\n\u003cli\u003epolicy evaluation for convergence\u003c/li\u003e\n\u003c/ol\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-15_09-27-58_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\u0026ldquo;use GPT solution as much as possible, and when we haven\u0026rsquo;t ever visited a place due to the search trajectories, we can use \u003ca href=\"/posts/kbhmbp/\"\u003eMBP\u003c/a\u003e to supplement the solution\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhybplan/","tags":null,"title":"HybPlan"},{"categories":null,"contents":"hypothesis testing is the mechanism by which a hypothesis is tested statistically.\nThe core logic of hypothesis testing: have a metric, do tests, calculate probability that the outcome could have happened given the metric is true.\nExamples include\nt-test (for sample means) z-test (for sample proportions) chi-square test (for sample categories) Common to all hypothesis tests are the following terms.\nnull hypothesis A null hypothesis is a \u0026ldquo;no difference\u0026rdquo; hypothesis created as a part of hypothesis testing. It is usually stated as an equality.\nalternative hypothesis The alternative hypothesis is the \u0026ldquo;new news\u0026rdquo; hypothesis created as a part of hypothesis testing, whereby the confirmation would introduce new information.\np-value the p-value of a hypothesis test is the probability of the results acquired taking place given if the null hypothesis. That is:\n\\begin{equation} p(\\hat{p} | H_0\\ true) \\end{equation}\nTo figure out the above probability, you could either simulate the occurrence and look at a histogram (more common for AP Statistics anyways) or measure a few other statistics. We will talk about them later.\nTo use p-value as a hypothesis test, the sample has to meet the conditions for inference.\nSee also p-value from bootstrap\nType I Error A Type I Error takes place when you reject the null hypothesis during hypothesis testing even while its true: i.e., a false positive.\nThe probability of having a Type I Error is the significance level of the test.\nType II Error A Type II Error takes place when you accept the null hypothesis during hypothesis testing even while its false.\nThe probability of having a Type II Error is the conjugate of the power of a test.\nsignificance level significance level is the level by which one would accept a p-value is being indicative of the success of a test. We usually use the letter \\(\\alpha\\) to denote this.\npower (statistics) power is a statistic calculable during hypothesis testing. Its the probability of rejecting the null hypothesis given the null hypothesis is false. Also known as the conjugate of the Type II Error.\npower increases as significance level increases, but then the probability of a Type I Error increases as well.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e is the mechanism by which a hypothesis is tested statistically.\u003c/p\u003e\n\u003cp\u003eThe core logic of \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e: have a metric, do tests, calculate probability that the outcome could have happened given the metric is true.\u003c/p\u003e\n\u003cp\u003eExamples include\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbht_test/\"\u003et-test\u003c/a\u003e (for sample means)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhz_test/\"\u003ez-test\u003c/a\u003e (for sample proportions)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhchi_square/#chi-square-test\"\u003echi-square test\u003c/a\u003e (for sample categories)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eCommon to all \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis tests\u003c/a\u003e are the following terms.\u003c/p\u003e\n\u003ch2 id=\"null-hypothesis\"\u003enull hypothesis\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#null-hypothesis\"\u003enull hypothesis\u003c/a\u003e is a \u0026ldquo;no difference\u0026rdquo; hypothesis created as a part of \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e. It is usually stated as an equality.\u003c/p\u003e\n\u003ch2 id=\"alternative-hypothesis\"\u003ealternative hypothesis\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#alternative-hypothesis\"\u003ealternative hypothesis\u003c/a\u003e is the \u0026ldquo;new news\u0026rdquo; hypothesis created as a part of \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e, whereby the confirmation would introduce new information.\u003c/p\u003e\n\u003ch2 id=\"p-value\"\u003ep-value\u003c/h2\u003e\n\u003cp\u003ethe \u003ca href=\"#p-value\"\u003ep-value\u003c/a\u003e of a \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis test\u003c/a\u003e is the probability of the results acquired taking place given if the \u003ca href=\"#null-hypothesis\"\u003enull hypothesis\u003c/a\u003e. That is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(\\hat{p} | H_0\\ true)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo figure out the above probability, you could either simulate the occurrence and look at a histogram (more common for \u003ca href=\"/posts/kbhapstats/\"\u003eAP Statistics\u003c/a\u003e anyways) or measure a few other statistics. We will talk about them later.\u003c/p\u003e\n\u003cp\u003eTo use \u003ca href=\"#p-value\"\u003ep-value\u003c/a\u003e as a \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis test\u003c/a\u003e, the sample has to meet the \u003ca href=\"/posts/kbhz_test/#conditions-for-inference--z-test\"\u003econditions for inference\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhboostrap/#p-value-from-bootstrap\"\u003ep-value from bootstrap\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"type-i-error\"\u003eType I Error\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#type-i-error\"\u003eType I Error\u003c/a\u003e takes place when you reject the \u003ca href=\"#null-hypothesis\"\u003enull hypothesis\u003c/a\u003e during \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e even while its true: i.e., a false positive.\u003c/p\u003e\n\u003cp\u003eThe probability of having a \u003ca href=\"#type-i-error\"\u003eType I Error\u003c/a\u003e is the \u003ca href=\"#significance-level\"\u003esignificance level\u003c/a\u003e of the test.\u003c/p\u003e\n\u003ch2 id=\"type-ii-error\"\u003eType II Error\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#type-ii-error\"\u003eType II Error\u003c/a\u003e takes place when you accept the \u003ca href=\"#null-hypothesis\"\u003enull hypothesis\u003c/a\u003e during \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e even while its false.\u003c/p\u003e\n\u003cp\u003eThe probability of having a \u003ca href=\"#type-ii-error\"\u003eType II Error\u003c/a\u003e is the conjugate of the \u003ca href=\"#power--statistics\"\u003epower\u003c/a\u003e of a test.\u003c/p\u003e\n\u003ch2 id=\"significance-level\"\u003esignificance level\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#significance-level\"\u003esignificance level\u003c/a\u003e is the level by which one would accept a \u003ca href=\"#p-value\"\u003ep-value\u003c/a\u003e is being indicative of the success of a test. We usually use the letter \\(\\alpha\\) to denote this.\u003c/p\u003e\n\u003ch2 id=\"power--statistics\"\u003epower (statistics)\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#power--statistics\"\u003epower\u003c/a\u003e is a \u003ca href=\"/posts/kbhstastistic/\"\u003estatistic\u003c/a\u003e calculable during \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e. Its the probability of rejecting the \u003ca href=\"#null-hypothesis\"\u003enull hypothesis\u003c/a\u003e given the \u003ca href=\"#null-hypothesis\"\u003enull hypothesis\u003c/a\u003e is false. Also known as the conjugate of the \u003ca href=\"#type-ii-error\"\u003eType II Error\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#power--statistics\"\u003epower\u003c/a\u003e increases as \u003ca href=\"#significance-level\"\u003esignificance level\u003c/a\u003e increases, but then the probability of a \u003ca href=\"#type-i-error\"\u003eType I Error\u003c/a\u003e increases as well.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhypothesis_testing/","tags":null,"title":"hypothesis testing"},{"categories":null,"contents":"identities allows another number to retain its identity after an operation.\nWhat identities are applicable is group dependent. Identities are almost always object dependent.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhidentity/\"\u003eidentities\u003c/a\u003e allows another \u003ca href=\"/posts/kbhnumber/\"\u003enumber\u003c/a\u003e to retain its identity after an \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eWhat \u003ca href=\"/posts/kbhidentity/\"\u003eidentities\u003c/a\u003e are applicable is \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e dependent. \u003ca href=\"/posts/kbhidentity/\"\u003eIdentities\u003c/a\u003e are almost always \u003ca href=\"/posts/kbhobjects/\"\u003eobject\u003c/a\u003e dependent.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhidentity/","tags":null,"title":"identity"},{"categories":null,"contents":"\u0026lt;\u0026gt; NUS-HIST301 American History\nThe idea of identity politics is proposed, that politics became associated with sub-population of identities:\nBlack Pride Movement Chicano Activism The American Indian movement Termination of reservation system Pan-Indian Rights Alcatraz and Wounded Knee Occupations LGBT movement Stonewall GLF starts marching Asian American Yellow Peril Model minority movement NOW Femanism Acts The Equal Rights Act almost possible, and then Phyllis Schlafly happened Environmental Movement Silent Spring Cuyahoga River on fire Richard Nixon creates the EPA Earth Day ","html":"\u003cp\u003e\u0026lt;\u0026gt; \u003ca href=\"/posts/kbhnueva_courses_index/#nus-hist301-american-history\"\u003eNUS-HIST301 American History\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eThe idea of \u003ca href=\"/posts/kbhactivism_during_the_1970s/\"\u003eidentity politics\u003c/a\u003e is proposed, that politics became associated with sub-population of identities:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eBlack Pride Movement\u003c/li\u003e\n\u003cli\u003eChicano Activism\u003c/li\u003e\n\u003cli\u003eThe American Indian movement\n\u003cul\u003e\n\u003cli\u003eTermination of reservation system\u003c/li\u003e\n\u003cli\u003ePan-Indian Rights\u003c/li\u003e\n\u003cli\u003eAlcatraz and Wounded Knee Occupations\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eLGBT movement\n\u003cul\u003e\n\u003cli\u003eStonewall\u003c/li\u003e\n\u003cli\u003eGLF starts marching\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eAsian American\n\u003cul\u003e\n\u003cli\u003eYellow Peril\u003c/li\u003e\n\u003cli\u003eModel minority movement\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eNOW Femanism Acts\n\u003cul\u003e\n\u003cli\u003eThe \u003ca href=\"/posts/kbhequal_rights_act/\"\u003eEqual Rights Act\u003c/a\u003e almost possible, and then Phyllis Schlafly happened\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eEnvironmental Movement\n\u003cul\u003e\n\u003cli\u003e\u003cem\u003eSilent Spring\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003eCuyahoga River \u003cem\u003eon fire\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e creates the EPA\u003c/li\u003e\n\u003cli\u003eEarth Day\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhactivism_during_the_1970s/","tags":null,"title":"identity politics"},{"categories":null,"contents":"to prove that something goes both ways: given \\(A\\Rightarrow B\\), and \\(A \\Leftarrow B\\), \\(A \\Leftrightarrow B\\).\n","html":"\u003cp\u003eto prove that something goes both ways: given \\(A\\Rightarrow B\\), and \\(A \\Leftarrow B\\), \\(A \\Leftrightarrow B\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhequivalence/","tags":null,"title":"if and only if"},{"categories":null,"contents":"v-structure whose parents are unconnected are immoral\nThis is immoral:\nThis is moral\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbaysian_network/#checking-for-conditional-independence\"\u003ev-structure\u003c/a\u003e whose parents are unconnected are immoral\u003c/p\u003e\n\u003cp\u003eThis is immoral:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-12_10-53-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis is moral\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-12_10-53-39_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhimmoral_v_structure/","tags":null,"title":"immoral v-structure"},{"categories":null,"contents":"Using a bunch of signals to create a 3d representation of the system\nfeatures cyro-EM x-ray tomo glynomics/libidomics genetics studying large-scale viron behavior Misc. Discoveries bydoing a bunch of MD\nproteins of specific virons \u0026ldquo;breath\u0026rdquo;: oning and closing an entire backbone group; this is not yet quantified in physical modeling when its open, the \u0026ldquo;breathing open\u0026rdquo; motion causes the virus to be vunerable *we don\u0026rsquo;t * \u0026ldquo;we are so biased by what we can see experimentally\u0026rdquo; ","html":"\u003cp\u003eUsing a bunch of signals to create a 3d representation of the system\u003c/p\u003e\n\u003ch2 id=\"features\"\u003efeatures\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcyro_em/#manifold-embedding\"\u003ecyro-EM\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ex-ray\u003c/li\u003e\n\u003cli\u003etomo\u003c/li\u003e\n\u003cli\u003eglynomics/libidomics\u003c/li\u003e\n\u003cli\u003egenetics\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"studying-large-scale-viron-behavior\"\u003estudying large-scale viron behavior\u003c/h2\u003e\n\u003cp\u003eMisc. Discoveries bydoing a bunch of MD\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eproteins of specific virons \u0026ldquo;breath\u0026rdquo;: oning and closing an entire backbone group; this is not yet quantified in physical modeling\u003c/li\u003e\n\u003cli\u003ewhen its open, the \u0026ldquo;breathing open\u0026rdquo; motion causes the virus to be vunerable\u003c/li\u003e\n\u003cli\u003e*we don\u0026rsquo;t *\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;we are so biased by what we can see experimentally\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhimmunogen_design-1/","tags":null,"title":"Immunogen Design"},{"categories":null,"contents":"Using a bunch of signals to create a 3d representation of the system\nfeatures cyro-EM x-ray tomo glynomics/libidomics genetics studying large-scale viron surface protein behavior using MD \u0026ldquo;we are so biased by what we can see experimentally, so do MD\u0026rdquo;\n\u0026ldquo;breathing\u0026rdquo; motion proteins of specific virons \u0026ldquo;breath\u0026rdquo;: oning and closing an entire backbone group; this is not yet quantified in physical modeling when its open, the \u0026ldquo;breathing open\u0026rdquo; motion causes the virus to be vunerable so some antibodies jam into the open position \u0026ldquo;head tilting\u0026rdquo; motion when its tilted, there is an epitope that becomes exposed this is a site that\u0026rsquo;s possible for introduction of what\u0026rsquo;s needed overall architecture use conventional MD discover regions of instability (see above for examples) for possible binding surfaces bam inhibition! maybe at some point run a cyro-EM to experimentally run some\n","html":"\u003cp\u003eUsing a bunch of signals to create a 3d representation of the system\u003c/p\u003e\n\u003ch2 id=\"features\"\u003efeatures\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcyro_em/#manifold-embedding\"\u003ecyro-EM\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ex-ray\u003c/li\u003e\n\u003cli\u003etomo\u003c/li\u003e\n\u003cli\u003eglynomics/libidomics\u003c/li\u003e\n\u003cli\u003egenetics\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"studying-large-scale-viron-surface-protein-behavior-using-md\"\u003estudying large-scale viron surface protein behavior using MD\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;we are so biased by what we can see experimentally, so do MD\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"breathing-motion\"\u003e\u0026ldquo;breathing\u0026rdquo; motion\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eproteins of specific virons \u0026ldquo;breath\u0026rdquo;: oning and closing an entire backbone group; this is not yet quantified in physical modeling\u003c/li\u003e\n\u003cli\u003ewhen its open, the \u0026ldquo;breathing open\u0026rdquo; motion causes the virus to be vunerable\u003c/li\u003e\n\u003cli\u003eso some antibodies jam into the open position\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"head-tilting-motion\"\u003e\u0026ldquo;head tilting\u0026rdquo; motion\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ewhen its tilted, there is an \u003ca href=\"/posts/kbhepitophs/\"\u003eepitope\u003c/a\u003e that becomes exposed\u003c/li\u003e\n\u003cli\u003ethis is a site that\u0026rsquo;s possible for introduction of what\u0026rsquo;s needed\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"overall-architecture\"\u003eoverall architecture\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003euse conventional MD\u003c/li\u003e\n\u003cli\u003ediscover regions of instability (see above for examples) for possible binding surfaces\u003c/li\u003e\n\u003cli\u003ebam inhibition!\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003emaybe at some point run a \u003ca href=\"/posts/kbhcyro_em/#manifold-embedding\"\u003ecyro-EM\u003c/a\u003e to experimentally run some\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhimmunogen_design/","tags":null,"title":"Immunogen Design"},{"categories":null,"contents":"Imperialism: a policy of extending a country\u0026rsquo;s power and influence though diplomacy or military force.\nColonies Protectorate \u0026mdash; nations has own government legally controlled by outside power Sphere of influence U.S. Imperialism, why?\n\u0026ldquo;Desire for Military strength\u0026rdquo;: for a nation to be an international player, you have to have a strong navy \u0026ldquo;Thirst for new markets\u0026rdquo;: if we continue to expand, we will have more economic power \u0026ldquo;Belief in supernatural superiority\u0026rdquo;: trust that own culture is better Alaska \u0026mdash; \u0026ldquo;Seward\u0026rsquo;s Ice Box\u0026rdquo;, purchased from czarist Russia.\nHawaii \u0026mdash; Annexed 1898, a sugar company, to get around import taxes, asked the US to annex Hawaii.\nSpanish-American War \u0026mdash;- newspaper receive letter sent by Spanish minister to not protect Cuba. The US then proceeded to fight for the territories.\nFilipino rejected treaty of Paris, America fights. America burned food and crops to starve rebels, and built infrastructure earning elite support due to infrastructure.\n","html":"\u003cp\u003eImperialism: a policy of extending a country\u0026rsquo;s power and influence though diplomacy or military force.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eColonies\u003c/li\u003e\n\u003cli\u003eProtectorate \u0026mdash; nations has own government legally controlled by outside power\u003c/li\u003e\n\u003cli\u003eSphere of influence\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eU.S. Imperialism, why?\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u0026ldquo;Desire for Military strength\u0026rdquo;: for a nation to be an international player, you have to have a strong navy\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Thirst for new markets\u0026rdquo;: if we continue to expand, we will have more economic power\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Belief in supernatural superiority\u0026rdquo;: trust that own culture is better\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAlaska \u0026mdash; \u0026ldquo;Seward\u0026rsquo;s Ice Box\u0026rdquo;, purchased from czarist Russia.\u003c/p\u003e\n\u003cp\u003eHawaii \u0026mdash; Annexed 1898, a sugar company, to get around import taxes, asked the US to annex Hawaii.\u003c/p\u003e\n\u003cp\u003eSpanish-American War \u0026mdash;- newspaper receive letter sent by Spanish minister to not protect Cuba. The US then proceeded to fight for the territories.\u003c/p\u003e\n\u003cp\u003eFilipino rejected treaty of Paris, America fights. America burned food and crops to starve rebels, and built infrastructure earning elite support due to infrastructure.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhimperialism/","tags":null,"title":"Imperialism"},{"categories":null,"contents":"The Inbox is an Inbox for quick captures.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhinbox/\"\u003eInbox\u003c/a\u003e is an \u003ca href=\"/posts/kbhinbox/\"\u003eInbox\u003c/a\u003e for quick captures.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinbox/","tags":null,"title":"Inbox"},{"categories":null,"contents":"If an outcome can be from sets \\(A=m\\) or \\(B=n\\) with no overlaps, where \\(A \\cap B = \\emptyset\\), then, the total number of outcomes are \\(|A| + |B| = m+n\\)\nIf there are overlap:\n\\begin{equation} N = |A|+|B| - |A \\cap B| \\end{equation}\n","html":"\u003cp\u003eIf an outcome can be from sets \\(A=m\\) \u003cstrong\u003eor\u003c/strong\u003e \\(B=n\\) with no overlaps, where \\(A \\cap B = \\emptyset\\), then, the total number of outcomes are \\(|A| + |B| = m+n\\)\u003c/p\u003e\n\u003cp\u003eIf there are overlap:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nN = |A|+|B| - |A \\cap B|\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsum_rule_of_counting/","tags":null,"title":"inclusion exclusion counting"},{"categories":null,"contents":"\\(n\\) random random variables are IID if they are\nindependent identically distributed (see below) \u0026ldquo;identically distributed\u0026rdquo; Consider \\(n\\) random variables:\n\\(X_i\\) all have the same PMF / PDF and therefore, all have the same expectation and variance central limit theorem when things are IID, you can use central limit theorem.\n","html":"\u003cp\u003e\\(n\\) random \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es are \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e if they are\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eidentically distributed (see below)\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003ch2 id=\"identically-distributed\"\u003e\u0026ldquo;identically distributed\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003eConsider \\(n\\) \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X_i\\) all have the same \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003e / \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eand therefore, all have the same \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e and \u003ca href=\"/posts/kbhvariance/\"\u003evariance\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"central-limit-theorem--kbhcentral-limit-theorem-dot-md\"\u003e\u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003ewhen things are \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e, you can use \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhindependently_and_identically_distributed/","tags":null,"title":"independently and identically distributed"},{"categories":null,"contents":"Here\u0026rsquo;s a list of all indexes:\nProjects Index Research Index Production Index About This should be reflected on a fancier way on my home page.\n","html":"\u003cp\u003eHere\u0026rsquo;s a list of all indexes:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprojects/\"\u003eProjects Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhresearch_index/\"\u003eResearch Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduction_index/\"\u003eProduction Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhindex/\"\u003eAbout\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThis should be reflected on a fancier way on \u003ca href=\"https://www.jemoka.com/\"\u003emy home page.\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhindex_index/","tags":["index"],"title":"Index Index"},{"categories":null,"contents":"voltage across a inductor \\begin{equation} V = \\epsilon = -L \\dv{I}{t} \\end{equation}\nthis is kind of a formulation of faraday\u0026rsquo;s law.\n\\begin{equation} I(t) = \\frac{V_0}{R_1} (1-e^{\\frac{-t}{\\frac{L}{R}}}) \\end{equation}\nenergy stored in an inductor \\begin{equation} E = \\frac{1}{2} LI^{2} \\end{equation}\n","html":"\u003ch2 id=\"voltage-across-a-inductor--kbhinductors-in-circuits-dot-md\"\u003evoltage across a \u003ca href=\"/posts/kbhinductors_in_circuits/\"\u003einductor\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nV = \\epsilon = -L \\dv{I}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is kind of a formulation of \u003ca href=\"/posts/kbhfaraday_s_law/\"\u003efaraday\u0026rsquo;s law\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI(t) = \\frac{V_0}{R_1} (1-e^{\\frac{-t}{\\frac{L}{R}}})\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"energy-stored-in-an-inductor--kbhinductors-in-circuits-dot-md\"\u003eenergy stored in an \u003ca href=\"/posts/kbhinductors_in_circuits/\"\u003einductor\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nE = \\frac{1}{2} LI^{2}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinductors_in_circuits/","tags":null,"title":"inductor"},{"categories":null,"contents":"inference is the act of updating the distribution of a random variable based on distribution of actually observed variables:\n\\begin{equation} P(X|Y) \\end{equation}\nwhere \\(Y\\) is observed, and we want to know how likely \\(X\\) would therefore be.\nWe call the set \\(X\\) the \u0026ldquo;query variables\u0026rdquo;, \\(Y\\) as \u0026ldquo;evidence varibales\u0026rdquo;, and anything that we didn\u0026rsquo;t use which connects the two variables as \u0026ldquo;hidden variables\u0026rdquo;.\nIf things are not in the right order of \\(X\\) and \\(Y\\), consider the Bayes rule.\nInference is Hard mix of continuous and discrete distribution results could be either a PMF or a PDF Example Suppose we\u0026rsquo;d like to know \\(P(b^{1} | d^{1}, c^{1})\\), where \\(b^{1}\\) is considered a query variable, and \\(c^{1}\\) is considered evidence varibales. The definition of the conditional probability gives us:\n\\begin{equation} p(b^{1} | d^{1}, c^{1}) = \\frac{p(b^{1}, d^{1}, c^{1})}{p(d^{1}, c^{1})} \\end{equation}\nTo compute \\(p(b^{1}d^{1}c^{1})\\), we first compute:\n\\begin{equation} p(b^{1}, d^{1}, c^{1}, E, S) \\end{equation}\nand then, use the law of total probability to get:\n\\begin{equation} p(b^{1}, d^{1}, c^{1}) = \\sum_{e=E} \\sum_{s=S} p(b^{1}, d^{1}, c^{1}, E, S) \\end{equation}\nyou will note this is very expensive computationally O(es) \u0026mdash; and if you have like a 1000 hidden variables you will die.\nWe therefore introduce sum-product elimination.\nsum-product elimination You will note the summation in the example above has a lot of interlocking for loops. You can \u0026ldquo;factor them out\u0026rdquo; via the sum-product elimination algorithm.\nSuppose you are interested in:\n\\begin{equation} P(b | d\u0026rsquo;, c\u0026rsquo;) \\end{equation}\nStep 1: write down factors Write down all factors associated with this computation:\n\\begin{equation} \\phi_{1}(B), \\phi_{2}(S), \\phi_{3}(E,B,S), \\phi_{4}(D,E), \\phi_{5}(C,E) \\end{equation}\nwe have evidence at two variables: \\(D, C\\).\nStep 2: performing factor conditioning for all evidence variables Therefore, \\(\\phi_{4}\\) and \\(\\phi_{5}\\) can be replaced by the factor conditioning as we observed \\(d, c\\), so we no longer need \\(d, c\\) as input because we know them:\nnow we have, to replace \\(\\phi_{4}, \\phi_{5}\\):\n\\begin{equation} \\phi_{6}(E), \\phi_{7}(E) \\end{equation}\nStep 3: using the law of total probability and factor product, get rid of hidden variables We then choose an ordering of the hidden variables and apply a factor product using the law of total probability to get rid of them:\nFirst get rid of any hidden variables Then use factor product to combine results \\begin{equation} \\phi_{8}(B,S) = \\sum_{E=e} \\phi_{3}(E,B,S) \\phi_{6}(e) \\phi_{7}(e) \\end{equation}\n\\begin{equation} \\phi_{9}(B) = \\sum_{S=s} \\phi_{2}(s) \\cdot \\phi_{8}(B,S) \\end{equation}\nWe now only have two factors left: \\(\\phi_{1}(B)\\phi_{9}(B)\\). We finally apply factor product again:\n\\begin{equation} \\phi_{10} (B) = \\phi_{9}(B) \\cdot \\phi_{1}(B) \\end{equation}\nApproximate Inference See Approximate Inference\nGaussian Inference See Inference for Gaussian Models\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e is the act of updating the distribution of a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e based on distribution of actually observed variables:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X|Y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(Y\\) is observed, and we want to know how likely \\(X\\) would therefore be.\u003c/p\u003e\n\u003cp\u003eWe call the set \\(X\\) the \u0026ldquo;query variables\u0026rdquo;, \\(Y\\) as \u0026ldquo;evidence varibales\u0026rdquo;, and anything that we didn\u0026rsquo;t use which connects the two variables as \u0026ldquo;hidden variables\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eIf things are not in the right order of \\(X\\) and \\(Y\\), consider the \u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes rule\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"inference-is-hard\"\u003eInference is Hard\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003emix of \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e and \u003ca href=\"/posts/kbhdiscrete_distribution/\"\u003ediscrete distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eresults could be either a \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003e or a \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"example\"\u003eExample\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-03_09-52-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSuppose we\u0026rsquo;d like to know \\(P(b^{1} | d^{1}, c^{1})\\), where \\(b^{1}\\) is considered a query variable, and \\(c^{1}\\) is considered evidence varibales. The definition of the \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e gives us:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(b^{1} | d^{1}, c^{1}) = \\frac{p(b^{1}, d^{1}, c^{1})}{p(d^{1}, c^{1})}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo compute \\(p(b^{1}d^{1}c^{1})\\), we first compute:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(b^{1}, d^{1}, c^{1}, E, S)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand then, use the \u003ca href=\"/posts/kbhprobability/#law-of-total-probability\"\u003elaw of total probability\u003c/a\u003e to get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(b^{1}, d^{1}, c^{1}) = \\sum_{e=E} \\sum_{s=S} p(b^{1}, d^{1}, c^{1}, E, S)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou will note this is very expensive computationally O(es) \u0026mdash; and if you have like a 1000 hidden variables you will die.\u003c/p\u003e\n\u003cp\u003eWe therefore introduce \u003ca href=\"#sum-product-elimination\"\u003esum-product elimination\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"sum-product-elimination\"\u003esum-product elimination\u003c/h2\u003e\n\u003cp\u003eYou will note the summation in the example above has a lot of interlocking for loops. You can \u0026ldquo;factor them out\u0026rdquo; via the \u003ca href=\"#sum-product-elimination\"\u003esum-product elimination\u003c/a\u003e algorithm.\u003c/p\u003e\n\u003cp\u003eSuppose you are interested in:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(b | d\u0026rsquo;, c\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"step-1-write-down-factors\"\u003eStep 1: write down factors\u003c/h3\u003e\n\u003cp\u003eWrite down all \u003ca href=\"/posts/kbhfactor/\"\u003efactor\u003c/a\u003es associated with this computation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\phi_{1}(B), \\phi_{2}(S), \\phi_{3}(E,B,S), \\phi_{4}(D,E), \\phi_{5}(C,E)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe have evidence at two variables: \\(D, C\\).\u003c/p\u003e\n\u003ch3 id=\"step-2-performing-factor-conditioning--kbhfactor-dot-md--for-all-evidence-variables\"\u003eStep 2: performing \u003ca href=\"/posts/kbhfactor/#factor-conditioning\"\u003efactor conditioning\u003c/a\u003e for all evidence variables\u003c/h3\u003e\n\u003cp\u003eTherefore, \\(\\phi_{4}\\) and \\(\\phi_{5}\\) can be replaced by the \u003ca href=\"/posts/kbhfactor/#factor-conditioning\"\u003efactor conditioning\u003c/a\u003e as we observed \\(d, c\\), so we no longer need \\(d, c\\) as input because we know them:\u003c/p\u003e\n\u003cp\u003enow we have, to replace \\(\\phi_{4}, \\phi_{5}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\phi_{6}(E), \\phi_{7}(E)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"step-3-using-the-law-of-total-probability--kbhprobability-dot-md--and-factor-product--kbhfactor-dot-md--get-rid-of-hidden-variables\"\u003eStep 3: using the \u003ca href=\"/posts/kbhprobability/#law-of-total-probability\"\u003elaw of total probability\u003c/a\u003e and \u003ca href=\"/posts/kbhfactor/#factor-product\"\u003efactor product\u003c/a\u003e, get rid of hidden variables\u003c/h3\u003e\n\u003cp\u003eWe then choose an ordering of the \u003ca href=\"/posts/kbhinference/\"\u003ehidden variables\u003c/a\u003e and apply a \u003ca href=\"/posts/kbhfactor/#factor-product\"\u003efactor product\u003c/a\u003e using the \u003ca href=\"/posts/kbhprobability/#law-of-total-probability\"\u003elaw of total probability\u003c/a\u003e to get rid of them:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eFirst get rid of any hidden variables\u003c/li\u003e\n\u003cli\u003eThen use \u003ca href=\"/posts/kbhfactor/#factor-product\"\u003efactor product\u003c/a\u003e to combine results\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\\begin{equation}\n\\phi_{8}(B,S) = \\sum_{E=e} \\phi_{3}(E,B,S) \\phi_{6}(e) \\phi_{7}(e)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\phi_{9}(B) = \\sum_{S=s} \\phi_{2}(s) \\cdot \\phi_{8}(B,S)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe now only have two \u003ca href=\"/posts/kbhfactor/\"\u003efactor\u003c/a\u003es left: \\(\\phi_{1}(B)\\phi_{9}(B)\\). We finally apply \u003ca href=\"/posts/kbhfactor/#factor-product\"\u003efactor product\u003c/a\u003e again:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\phi_{10} (B) = \\phi_{9}(B) \\cdot \\phi_{1}(B)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"approximate-inference\"\u003eApproximate Inference\u003c/h2\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhapproximate_inference/\"\u003eApproximate Inference\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"gaussian-inference\"\u003eGaussian Inference\u003c/h2\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhinference_for_gaussian_models/\"\u003eInference for Gaussian Models\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinference/","tags":null,"title":"inference"},{"categories":null,"contents":"If we know that \\(a,b\\) are both Gaussian distributions, then we have that:\n\\begin{equation} \\mqty[a \\\\ b] \\sim \\mathcal{N} \\qty(\\mqty[\\mu_{a} \\\\mu_{b}], \\mqty[A \u0026amp; C \\\\ C^{T} \u0026amp; B]) \\end{equation}\nwhereby:\n\\(A\\) is the covariance of each element of \\(A\\) \\(B\\) is the covariance of each element of \\(B\\) \\(C\\) is the covariance of \\(A\\) against \\(B\\) To perform inference:\n\\begin{equation} p(a|b) = \\mathcal{N}(a | \\mu_{a|B}, \\Sigma_{a|b}) \\end{equation}\nwherby:\n\\begin{equation} \\mu_{a|b} = \\mu_{a} + CB^{-1}(b-\\mu_{b}) \\end{equation}\n\\begin{equation} \\Sigma_{a|b} = A - CB^{-1}C^{T} \\end{equation}\nIts a closed form solution. Tada.\nWe know that \\(B\\) is positive semidefinite, and that its invertible, from the fact that its a covariance.\n","html":"\u003cp\u003eIf we know that \\(a,b\\) are both \u003ca href=\"/posts/kbhprobability_distributions/#gaussian-distribution\"\u003eGaussian distribution\u003c/a\u003es, then we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty[a \\\\ b] \\sim \\mathcal{N} \\qty(\\mqty[\\mu_{a} \\\\mu_{b}], \\mqty[A \u0026amp; C \\\\ C^{T} \u0026amp; B])\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(A\\) is the \u003ca href=\"/posts/kbhcovariance/\"\u003ecovariance\u003c/a\u003e of each element of \\(A\\)\u003c/li\u003e\n\u003cli\u003e\\(B\\) is the \u003ca href=\"/posts/kbhcovariance/\"\u003ecovariance\u003c/a\u003e of each element of \\(B\\)\u003c/li\u003e\n\u003cli\u003e\\(C\\) is the \u003ca href=\"/posts/kbhcovariance/\"\u003ecovariance\u003c/a\u003e of \\(A\\) against \\(B\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTo perform inference:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(a|b) = \\mathcal{N}(a | \\mu_{a|B}, \\Sigma_{a|b})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewherby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mu_{a|b} = \\mu_{a} + CB^{-1}(b-\\mu_{b})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Sigma_{a|b} = A - CB^{-1}C^{T}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIts a closed form solution. Tada.\u003c/p\u003e\n\u003cp\u003eWe know that \\(B\\) is positive semidefinite, and that its invertible, from the fact that its a \u003ca href=\"/posts/kbhcovariance/\"\u003ecovariance\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinference_for_gaussian_models/","tags":null,"title":"Inference for Gaussian Models"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhinflectional_words/","tags":null,"title":"inflectional words"},{"categories":null,"contents":"Information Retrival is trying to find material within large collections which is unstructured which satisfies an information need (of structured info).\nUnstructured information has had a massive outburst after the millennium.\nIMPORTANTLY: evaluating Information Retrival is based on Precision/Recall/F on information need and not the query.\nFor ranked system, we can come up with a curve of precision-recall curve by selecting increasing \\(k\\), or mean average precision.\nBasic Terminology collection a set of documents\u0026mdash;could by static, or dynamically added\ngoal retrieve documents with information relevant to the user\u0026rsquo;s information need + to complete a task\ninformation need information need is the actual information that is needed by a search; this is usually translated into a search query, which is actually used to search.\nquery query is a computer accessible form of text which searches to answer an information need.\ninformation need: \u0026ldquo;info about removing mice without killing them\u0026rdquo; query: \u0026ldquo;trapping mouse alive\u0026rdquo; Stages of Interpolation user task =\u0026gt; info need: we may not be looking for the right info info need =\u0026gt; query: we may not be using the best methods to get the info we are looking for Motivation \u0026ldquo;what\u0026rsquo;s wrong with grepping?\u0026rdquo;\nwe cannot afford to do a linear search over web-scale data a \u0026ldquo;NOT\u0026rdquo; query is non-trivial no semantics we have no ranking, so we don\u0026rsquo;t know what\u0026rsquo;s the \u0026ldquo;best\u0026rdquo; document Ranked Approaches Ranked Information Retrieval\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhinformation_retrival/\"\u003eInformation Retrival\u003c/a\u003e is trying to \u003cstrong\u003efind material\u003c/strong\u003e within \u003cstrong\u003elarge collections\u003c/strong\u003e which is \u003cstrong\u003eunstructured\u003c/strong\u003e which satisfies an \u003cstrong\u003einformation need\u003c/strong\u003e (of structured info).\u003c/p\u003e\n\u003cp\u003eUnstructured information has had a massive outburst after the millennium.\u003c/p\u003e\n\u003cp\u003eIMPORTANTLY: evaluating \u003ca href=\"/posts/kbhinformation_retrival/\"\u003eInformation Retrival\u003c/a\u003e is based on Precision/Recall/F on \u003ca href=\"#information-need\"\u003einformation need\u003c/a\u003e and not the \u003ca href=\"#query\"\u003equery\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eFor ranked system, we can come up with a curve of precision-recall curve by selecting increasing \\(k\\), or \u003ca href=\"/posts/kbhmean_average_precision/\"\u003emean average precision\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"basic-terminology\"\u003eBasic Terminology\u003c/h2\u003e\n\u003ch3 id=\"collection\"\u003ecollection\u003c/h3\u003e\n\u003cp\u003ea set of documents\u0026mdash;could by static, or dynamically added\u003c/p\u003e\n\u003ch3 id=\"goal\"\u003egoal\u003c/h3\u003e\n\u003cp\u003eretrieve documents with information \u003cstrong\u003erelevant to the user\u0026rsquo;s information need\u003c/strong\u003e + to complete a \u003cstrong\u003etask\u003c/strong\u003e\u003c/p\u003e\n\u003ch3 id=\"information-need\"\u003einformation need\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#information-need\"\u003einformation need\u003c/a\u003e is the actual information that is needed by a search; this is usually translated into a search \u003ca href=\"#query\"\u003equery\u003c/a\u003e, which is actually used to search.\u003c/p\u003e\n\u003ch3 id=\"query\"\u003equery\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#query\"\u003equery\u003c/a\u003e is a computer accessible form of text which searches to answer an \u003ca href=\"#information-need\"\u003einformation need\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"#information-need\"\u003einformation need\u003c/a\u003e: \u0026ldquo;info about removing mice without killing them\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#query\"\u003equery\u003c/a\u003e: \u0026ldquo;trapping mouse alive\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"stages-of-interpolation\"\u003eStages of Interpolation\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003euser task =\u0026gt; info need: we may not be looking for the right info\u003c/li\u003e\n\u003cli\u003einfo need =\u0026gt; query: we may not be using the best methods to get the info we are looking for\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;what\u0026rsquo;s wrong with grepping?\u0026rdquo;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewe cannot afford to do a linear search over web-scale data\u003c/li\u003e\n\u003cli\u003ea \u0026ldquo;NOT\u0026rdquo; query is non-trivial\u003c/li\u003e\n\u003cli\u003eno semantics\u003c/li\u003e\n\u003cli\u003ewe have no ranking, so we don\u0026rsquo;t know what\u0026rsquo;s the \u0026ldquo;best\u0026rdquo; document\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"ranked-approaches\"\u003eRanked Approaches\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/\"\u003eRanked Information Retrieval\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinformation_retrival/","tags":null,"title":"Information Retrival"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhinformation_theory/","tags":null,"title":"information theory"},{"categories":null,"contents":"Information Units are unique entities mentioned during an utterance; for a sentence like \u0026ldquo;There is a boy. The boy is a brother. He is stealing a cookie. The sister is watching.\u0026rdquo;, \u0026ldquo;boy, cookie, sister\u0026rdquo; are possible IUs.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhiu/\"\u003eInformation Unit\u003c/a\u003es are unique entities mentioned during an utterance; for a sentence like \u0026ldquo;There is a boy. The boy is a brother. He is stealing a cookie. The sister is watching.\u0026rdquo;, \u0026ldquo;boy, cookie, sister\u0026rdquo; are possible \u003ca href=\"/posts/kbhiu/\"\u003eIU\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhiu/","tags":null,"title":"Information Units (Linguistics)"},{"categories":null,"contents":"First order IVP The class of problems described as:\n\\begin{equation} \\dv{y}{t} = f(t, y) \\end{equation}\nand:\n\\begin{equation} y(t_0) = y_0 \\end{equation}\nwe need to figure \u0026ldquo;which of the general solutions of the DiffEqu satisfy the general value.\nTo do this, we simply have to plug in the initial value and solve for our constant \\(K\\).\nSecond order IVP \\begin{equation} \\dv[2]{d}{t} = f(t,y,y\u0026rsquo;) \\end{equation}\nthis requires two initial conditions to fully specify (because two variables becomes constant and goes away).\none and exactly one solution exist for every initial condition of an IVP The ODE \\(y\u0026rsquo; = f(t,y)\\) with initial condition \\(y(t_0) = y_0\\), where, \\(f\\) has to be continuous in some maximal interval \\(t,y\\), and differentiable in \\(y\\), has a unique solution on some maximal interval of \\(t\\).\nThat is: for every single point on a solution space of an IVP, each point is covered one solution and only one solution. Its possible for that function to diverge beyond that point.\nThis is also true for second order differential equations (its written as linear homogenous constant coeffiicient; but its true generally for 2nd order IVPs):\n\\begin{equation} \\begin{cases} y\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; +by = 0 \\\\ y(t_0) = y_0 \\\\ y\u0026rsquo;(t_0) = y\u0026rsquo;_{0} \\end{cases} \\end{equation}\nwill have one and only one solution per \\(y_0\\), \\(y_0\u0026rsquo;\\).\nauxiliary constants :PROPERTIES: :ID: 20C96C21-7C77-4F84-BDDD-B0F96E509200\n\\(y_0\\), or some \\(C\\) that arise out of constant of integration. Essentially, the values which fin down a specific function from a function family\n","html":"\u003ch2 id=\"first-order-ivp\"\u003eFirst order IVP\u003c/h2\u003e\n\u003cp\u003eThe class of problems described as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{y}{t} = f(t, y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t_0) = y_0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe need to figure \u0026ldquo;which of the general solutions of the DiffEqu satisfy the general value.\u003c/p\u003e\n\u003cp\u003eTo do this, we simply have to plug in the initial value and solve for our constant \\(K\\).\u003c/p\u003e\n\u003ch2 id=\"second-order-ivp\"\u003eSecond order IVP\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\dv[2]{d}{t} = f(t,y,y\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis requires two initial conditions to fully specify (because two variables becomes constant and goes away).\u003c/p\u003e\n\u003ch2 id=\"one-and-exactly-one-solution-exist-for-every-initial-condition-of-an-ivp\"\u003eone and exactly one solution exist for every initial condition of an IVP\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhordinary_differential_equations/\"\u003eODE\u003c/a\u003e \\(y\u0026rsquo; = f(t,y)\\) with initial condition \\(y(t_0) = y_0\\), where, \\(f\\) has to be continuous in some maximal interval \\(t,y\\), and differentiable in \\(y\\), has a unique solution on some \u003ca href=\"/posts/kbhmaximal_interval/\"\u003emaximal interval\u003c/a\u003e of \\(t\\).\u003c/p\u003e\n\u003cp\u003eThat is: for every single point on a solution space of an \u003ca href=\"/posts/kbhinitial_value_problems/\"\u003eIVP\u003c/a\u003e, each point is covered one solution and only one solution. Its possible for that function to diverge beyond that point.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eThis is also true for second order differential equations (its written as linear homogenous constant coeffiicient; but its true generally for 2nd order IVPs):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\ny\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; +by = 0 \\\\\ny(t_0) = y_0 \\\\\ny\u0026rsquo;(t_0) = y\u0026rsquo;_{0}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewill have one and only one solution per \\(y_0\\), \\(y_0\u0026rsquo;\\).\u003c/p\u003e\n\u003ch2 id=\"auxiliary-constants\"\u003eauxiliary constants\u003c/h2\u003e\n\u003cp\u003e:PROPERTIES:\n:ID: 20C96C21-7C77-4F84-BDDD-B0F96E509200\u003c/p\u003e\n\u003cp\u003e\\(y_0\\), or some \\(C\\) that arise out of constant of integration. Essentially, the values which fin down a specific function from a function family\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinitial_value_problems/","tags":null,"title":"initial value problems"},{"categories":null,"contents":"An injective function is one which is one-to-one: that it maps distinct inputs to distinct outputs.\nconstituents A function \\(T: V \\to W\\) requirements \\(T\\) is injective if \\(Tu = Tv\\) implies \\(u=v\\).\nadditional information injectivity implies that null space is \\(\\{0\\}\\) Proof: let \\(T \\in \\mathcal{L}(V,W)\\); \\(T\\) is injective IFF \\(null\\ T = \\{0\\}\\).\ngiven injectivity Suppose \\(T\\) is injective.\nNow, we know that \\(0\\), because it indeed gets mapped by \\(T\\) to \\(0\\), is in the null space of \\(T\\).\nBecause linear maps take \\(0\\) to \\(0\\), \\(T0=0\\). Now, because \\(T\\) is injective, for any \\(v\\) that \\(Tv = 0 = T 0\\) implies \\(v=0\\).\nSo \\(0\\) is the only thing that an injective \\(T\\) can map to \\(0\\), and it is indeed in the null space, so the null space is just \\(\\{0\\}\\).\ngiven \\(null\\ T=\\{0\\}\\) Suppose we have some \\(Tu = Tv\\), we desire to proof that \\(u=v\\) to show that \\(T\\) is injective.\nGiven \\(Tu=Tv\\), we have that \\(Tu-Tv\\). Given additivity, \\(T(u-v) = 0\\). This makes \\((u-v) \\in\\ null\\ T\\).\nGiven only \\(0\\) is in the null space of \\(T\\), \\(u-v = 0\\), so \\(u=v\\), as desired. \\(\\blacksquare\\).\nmap to smaller space is not injective See map to smaller space is not injective\n","html":"\u003cp\u003eAn \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e is one which is one-to-one: that it maps distinct inputs to distinct outputs.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e \\(T: V \\to W\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\(T\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e if \\(Tu = Tv\\) implies \\(u=v\\).\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"injectivity-implies-that-null-space--kbhnull-space-dot-md--is-0\"\u003einjectivity implies that \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e is \\(\\{0\\}\\)\u003c/h3\u003e\n\u003cp\u003eProof: let \\(T \\in \\mathcal{L}(V,W)\\); \\(T\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e IFF \\(null\\ T = \\{0\\}\\).\u003c/p\u003e\n\u003ch4 id=\"given-injectivity--kbhinjectivity-dot-md\"\u003egiven \u003ca href=\"/posts/kbhinjectivity/\"\u003einjectivity\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eSuppose \\(T\\) is injective.\u003c/p\u003e\n\u003cp\u003eNow, we know that \\(0\\), because it indeed gets mapped by \\(T\\) to \\(0\\), is in the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(T\\).\u003c/p\u003e\n\u003cp\u003eBecause \u003ca href=\"/posts/kbhlinear_map/#linear-maps-take-0-to-0\"\u003elinear maps take \\(0\\) to \\(0\\)\u003c/a\u003e, \\(T0=0\\). Now, because \\(T\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e, for any \\(v\\) that \\(Tv = 0 = T 0\\) implies \\(v=0\\).\u003c/p\u003e\n\u003cp\u003eSo \\(0\\) is the only thing that an injective \\(T\\) can map to \\(0\\), and it is indeed in the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e, so the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e is just \\(\\{0\\}\\).\u003c/p\u003e\n\u003ch4 id=\"given-null-t-0\"\u003egiven \\(null\\ T=\\{0\\}\\)\u003c/h4\u003e\n\u003cp\u003eSuppose we have some \\(Tu = Tv\\), we desire to proof that \\(u=v\\) to show that \\(T\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eGiven \\(Tu=Tv\\), we have that \\(Tu-Tv\\). Given additivity, \\(T(u-v) = 0\\). This makes \\((u-v) \\in\\ null\\ T\\).\u003c/p\u003e\n\u003cp\u003eGiven only \\(0\\) is in the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(T\\), \\(u-v = 0\\), so \\(u=v\\), as desired. \\(\\blacksquare\\).\u003c/p\u003e\n\u003ch3 id=\"map-to-smaller-space-is-not-injective--kbhlinear-map-dot-md\"\u003e\u003ca href=\"/posts/kbhlinear_map/#map-to-smaller-space-is-not-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003emap to smaller space is not injective\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhlinear_map/#map-to-smaller-space-is-not-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003emap to smaller space is not injective\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinjectivity/","tags":null,"title":"injectivity"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhinjectivity_implies_that_null_space_is_0/","tags":null,"title":"injectivity implies that null space is {0}"},{"categories":null,"contents":"constituents \\(V\\) a vector space \\((u,v)\\), an ordered pair of vectors in \\(V\\) (its not commutative!) requirements We define \\(\\langle u, v \\rangle \\in \\mathbb{F}\\) as the inner product of \\((u,v)\\) in that order!. It carries the following properties:\npositivity: \\(\\langle v, v\\rangle \\geq 0, \\forall v \\in V\\) definiteness: \\(\\langle v, v\\rangle = 0\\) IFF \\(v = 0\\) additivity in the first slot: \\(\\langle u+v, w\\rangle = \\langle u, w \\rangle + \\langle v, w \\rangle\\) homogeneity in the first slot: \\(\\langle \\lambda u, v \\rangle = \\lambda \\langle u, v \\rangle\\) conjugate symmetry: \\(\\langle u,v \\rangle = \\overline{\\langle v,u \\rangle}\\) additional information Inner Product Space An Inner Product Space is a vector space with a well-defined inner product. For instance, \\(\\mathbb{F}^{n}\\) has the canonical inner product named Euclidean Inner Product (see below, a.k.a. dot product for reals). The existence of such a well-defined inner product makes \\(\\mathbb{F}^{n}\\) an Inner Product Space.\nRare Axler moment, instead of \u0026ldquo;well-defined\u0026rdquo;, he says we want a vector space with an inner product \u0026ldquo;lurking nearby\u0026rdquo;; james bond style.\nproperties of inner product For a fixed \\(u \\in V\\), the function takes \\(v\\) to \\(\\langle v,u \\rangle\\) is a Linear Map \\(V \\to \\mathbb{F}\\) \\(\\langle 0,u \\rangle = 0\\) \\(\\langle u,0 \\rangle = 0\\) \\(\\langle u,v+w \\rangle = \\langle u,v \\rangle + \\langle u,w \\rangle\\) \\(\\langle u,\\lambda v \\rangle = \\bar{\\lambda}\\langle u,v \\rangle\\) Proof:\nInheriting the additivity and homogeneity of the definition of inner products Set \\(u\\) to be the fixed element for 1), set \\(0\\) to be the input, linear maps take \\(0\\) to \\(0\\) Apply conjugate symmetry to 2) Apply conjugate symmetry, inner product additivty, then conjugate back Apply conjugate symmetry, inner product homogeneity in the first slot, then conjugate back (of course leaving \\(\\lambda\\) out conjugated) Euclidean Inner Product For \\(x,y \\in \\mathbb{F}^{n}\\), one can define a pretty well-defined inner product by\n\\begin{equation} x \\cdot y = x_1 \\bar{y_{1}} + \u0026hellip; + x_{n} \\bar{y_{n}} \\end{equation}\nsimilar to dot product for the reals. This is called the Euclidean Inner Product and has the nice parallelity properties we saw.\ncomplex number shenanigans that motivate the inner product \u0026hellip;as both relevant and more general than the dot product, but also different in key areas.\nFirst, review complex numbers from our discussion in chapter 4. The main problem here is this:\nfor \\(z = (z_1, \\dots, z_{n}) \\in \\mathbb{C}^{n}\\), simply squaring each slot to take the norm may cause us to take a square root of a negative number (as each slot would then be \\(a^{2}-b^{2}\\) for a complex number). That\u0026rsquo;s no bueno because we want \\(\\|z\\|\\) to be real and non-negative.\nThis, therefore, suggests something similar for our inner product definition; to make sure that each slot end up being a real and non-negative number, we simply conjugate the second value:\n\\begin{equation} x \\cdot y = x_1 \\bar{y_{1}} + \u0026hellip; + x_{n} \\bar{y_{n}} \\end{equation}\nAlso, note that this definition give us an important result: if we reverse \\(x\\) and \\(y\\), we would be conjugating the other element! And so, we have that:\n\\begin{equation} x \\cdot y = \\bar{{y \\cdot x}} \\end{equation}\nderived by following the usual rules of complex conjugation. Note that none of these elementwisethings (the \\(x_{n}y_{n}\\) business) are actually in the definition of the inner product, as it is the rules of an Euclidean Inner Product.\ninner product of \\(L\\) periodic functions For \\(f,g : [0,L] \\to \\mathbb{R}\\), which are L-periodic, we define:\n\\begin{equation} \\langle f,g \\rangle := \\frac{1}{L} \\int_{0}^{L} f(x) g(x) \\dd{x} \\end{equation}\nRecall that L-periodic functions can be shifted without changing periodicity. But if for some reason you want to base it off of any two numbers with distance \\(L\\) in between:\n\\begin{equation} \\langle f,g \\rangle_{[a,b]} := \\frac{1}{b-a} \\int^{b}_{a} f(x) g(x) \\dd{x} \\end{equation}\nThe work of checking this is a well-formed inner product is left to absolutely nobody.\ninner product over complex-valued functions see inner product over complex-valued functions\n","html":"\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(V\\) a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\((u,v)\\), an \u003cem\u003eordered\u003c/em\u003e pair of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es in \\(V\\) (its not commutative!)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eWe define \\(\\langle u, v \\rangle \\in \\mathbb{F}\\) as the \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e of \\((u,v)\\) \u003cstrong\u003ein that order!\u003c/strong\u003e. It carries the following properties:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003epositivity\u003c/strong\u003e: \\(\\langle v, v\\rangle \\geq 0, \\forall v \\in V\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003edefiniteness\u003c/strong\u003e: \\(\\langle v, v\\rangle = 0\\) IFF \\(v = 0\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eadditivity in the first slot\u003c/strong\u003e: \\(\\langle u+v, w\\rangle = \\langle u, w \\rangle + \\langle v, w \\rangle\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ehomogeneity in the first slot\u003c/strong\u003e: \\(\\langle \\lambda u, v \\rangle = \\lambda \\langle u, v \\rangle\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003econjugate symmetry\u003c/strong\u003e: \\(\\langle u,v \\rangle = \\overline{\\langle v,u \\rangle}\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"inner-product-space\"\u003eInner Product Space\u003c/h3\u003e\n\u003cp\u003eAn \u003ca href=\"#inner-product-space\"\u003eInner Product Space\u003c/a\u003e is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e with a well-defined \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e. For instance, \\(\\mathbb{F}^{n}\\) has the canonical \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e named \u003ca href=\"#euclidean-inner-product\"\u003eEuclidean Inner Product\u003c/a\u003e (see below, a.k.a. \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e for reals). The existence of such a well-defined \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e makes \\(\\mathbb{F}^{n}\\) an \u003ca href=\"#inner-product-space\"\u003eInner Product Space\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eRare Axler moment, instead of \u0026ldquo;well-defined\u0026rdquo;, he says we want a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e with an \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e \u0026ldquo;lurking nearby\u0026rdquo;; james bond style.\u003c/p\u003e\n\u003ch3 id=\"properties-of-inner-product\"\u003eproperties of inner product\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eFor a fixed \\(u \\in V\\), the function takes \\(v\\) to \\(\\langle v,u \\rangle\\) is a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e \\(V \\to \\mathbb{F}\\)\u003c/li\u003e\n\u003cli\u003e\\(\\langle 0,u \\rangle = 0\\)\u003c/li\u003e\n\u003cli\u003e\\(\\langle u,0 \\rangle = 0\\)\u003c/li\u003e\n\u003cli\u003e\\(\\langle u,v+w \\rangle = \\langle u,v \\rangle + \\langle u,w \\rangle\\)\u003c/li\u003e\n\u003cli\u003e\\(\\langle u,\\lambda v \\rangle = \\bar{\\lambda}\\langle u,v \\rangle\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eInheriting the additivity and homogeneity of the definition of \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003eSet \\(u\\) to be the fixed element for 1), set \\(0\\) to be the input, linear maps take \\(0\\) to \\(0\\)\u003c/li\u003e\n\u003cli\u003eApply conjugate symmetry to 2)\u003c/li\u003e\n\u003cli\u003eApply conjugate symmetry, \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e additivty, then conjugate back\u003c/li\u003e\n\u003cli\u003eApply conjugate symmetry, \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e homogeneity in the first slot, then conjugate back (of course leaving \\(\\lambda\\) out conjugated)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"euclidean-inner-product\"\u003eEuclidean Inner Product\u003c/h3\u003e\n\u003cp\u003eFor \\(x,y \\in \\mathbb{F}^{n}\\), one can define a pretty well-defined \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e by\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx \\cdot y = x_1 \\bar{y_{1}} + \u0026hellip; + x_{n} \\bar{y_{n}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003esimilar to \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e for the reals. This is called the \u003ca href=\"#euclidean-inner-product\"\u003eEuclidean Inner Product\u003c/a\u003e and has the nice parallelity properties we saw.\u003c/p\u003e\n\u003ch3 id=\"complex-number--kbhcomplex-number-dot-md--shenanigans-that-motivate-the-inner-product--kbhinner-product-dot-md\"\u003e\u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e shenanigans that motivate the \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\u0026hellip;as both relevant and more general than the \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e, but also different in key areas.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhthoughts_on_axler_4/#first-review-id-7c982e7e-b8be-4053-a71e-fc0dba7a5da5-complex-number-s\"\u003eFirst, review complex numbers\u003c/a\u003e from our discussion in chapter 4. The main problem here is this:\u003c/p\u003e\n\u003cp\u003efor \\(z = (z_1, \\dots, z_{n}) \\in \\mathbb{C}^{n}\\), simply squaring each slot to take the \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e may cause us to take a square root of a negative number (as each slot would then be \\(a^{2}-b^{2}\\) for a complex number). That\u0026rsquo;s no bueno because we want \\(\\|z\\|\\) to be real and non-negative.\u003c/p\u003e\n\u003cp\u003eThis, therefore, suggests something similar for our \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e definition; to make sure that each slot end up being a real and non-negative number, we simply \u003ca href=\"/posts/kbhcomplex_number/#complex-conjugate\"\u003econjugate\u003c/a\u003e the second value:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx \\cdot y = x_1 \\bar{y_{1}} + \u0026hellip; + x_{n} \\bar{y_{n}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAlso, note that this definition give us an important result: if we reverse \\(x\\) and \\(y\\), we would be conjugating the other element! And so, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx \\cdot y = \\bar{{y \\cdot x}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ederived by following the usual rules of \u003ca href=\"/posts/kbhcomplex_number/#complex-conjugate\"\u003ecomplex conjugation\u003c/a\u003e. Note that none of these elementwisethings (the \\(x_{n}y_{n}\\) business) are actually in the definition of the \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e, as it is the rules of an \u003ca href=\"#euclidean-inner-product\"\u003eEuclidean Inner Product\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"inner-product-of-l-periodic-functions\"\u003einner product of \\(L\\) periodic functions\u003c/h3\u003e\n\u003cp\u003eFor \\(f,g : [0,L] \\to \\mathbb{R}\\), which are \u003ca href=\"/posts/kbhsu_math53_feb252024/#l-periodicity\"\u003eL-periodic\u003c/a\u003e, we define:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle f,g \\rangle := \\frac{1}{L} \\int_{0}^{L} f(x) g(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that \u003ca href=\"/posts/kbhsu_math53_feb252024/#l-periodicity\"\u003eL-periodic\u003c/a\u003e functions can be shifted without changing periodicity. But if for some reason you want to base it off of any two numbers with distance \\(L\\) in between:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle f,g \\rangle_{[a,b]} := \\frac{1}{b-a} \\int^{b}_{a} f(x) g(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe work of checking this is a well-formed \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e is left to absolutely nobody.\u003c/p\u003e\n\u003ch3 id=\"inner-product-over-complex-valued-functions--kbhcomplex-exponential-dot-md\"\u003e\u003ca href=\"/posts/kbhcomplex_exponential/#inner-product--kbhinner-product-dot-md--over-complex-valued-functions\"\u003einner product over complex-valued functions\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhcomplex_exponential/#inner-product--kbhinner-product-dot-md--over-complex-valued-functions\"\u003einner product over complex-valued functions\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinner_product/","tags":null,"title":"inner product"},{"categories":null,"contents":"insertion sort is an algorithm that solves the sorting problem.\nconstituents a sequence of \\(n\\) numbers \\(\\{a_1, \\dots a_{n}\\}\\), called keys\nrequirements Insertion sort provides an ordered sequence \\(\\{a_1\u0026rsquo;, \\dots a_{n}\u0026rsquo;\\}\\) s.t. \\(a_1\u0026rsquo; \\leq \\dots \\leq a_{n}\u0026rsquo;\\)\nimplementation I don\u0026rsquo;t know why, but it seems like CLRS\u0026rsquo; implementation is back-to font. But perhaps I\u0026rsquo;m just mistaken.\nvoid insertion_sort(int length, int *A) { for (int j=1; j\u0026lt;length; j++) { int key = A[j]; // insert the key correctly into the // sorted sequence, when appropriate int i = j-1; while (i \u0026gt; 0 \u0026amp;\u0026amp; A[i] \u0026gt; key) { // if things before had // larger key // move them A[i+1] = A[i]; // move it down // move our current value down i -= 1; } // put our new element into the correct palace A[i+1] = key; } } additional information proof We use loop invariant method to show that our algorithm is correct. Our invariant is that the array \\(A[0, \\dots, j-1]\\) is sorted \\(\\forall j 0 \\dots L+1\\).\nInitialization: at the first step, \\(j=1\\) (second element), the subarray of \\(A[0, \\dots j-1]\\) (namely, only the first element), is sorted trivially Maintenance: during each loop, we move \\(j\\) to the right, only being done when the subarray to the left is correctly sorted because of \\(j\\) is moving forward until length, it will terminate As \\(j\\), by the end, covers the entire loop, our loop terminates at \\(L+1\\) and invariant (sortedness) is maintained between \\(A[0, \\dots j]\\).\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhinsertion_sort/\"\u003einsertion sort\u003c/a\u003e is an algorithm that solves the sorting problem.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003ea sequence of \\(n\\) numbers \\(\\{a_1, \\dots a_{n}\\}\\), called \u003ca href=\"/posts/kbhkeys/\"\u003ekeys\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eInsertion sort provides an ordered sequence \\(\\{a_1\u0026rsquo;, \\dots a_{n}\u0026rsquo;\\}\\) s.t. \\(a_1\u0026rsquo; \\leq \\dots \\leq a_{n}\u0026rsquo;\\)\u003c/p\u003e\n\u003ch2 id=\"implementation\"\u003eimplementation\u003c/h2\u003e\n\u003cp\u003eI don\u0026rsquo;t know why, but it seems like CLRS\u0026rsquo; implementation is back-to font. But perhaps I\u0026rsquo;m just mistaken.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-cpp\" data-lang=\"cpp\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003einsertion_sort\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elength\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eA\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elength\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e++\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ekey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eA\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// insert the key correctly into the\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// sorted sequence, when appropriate\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u0026amp;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eA\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ekey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// if things before had\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// larger key\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// move them\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eA\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eA\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// move it down\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// move our current value down\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// put our new element into the correct palace\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eA\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ekey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"proof\"\u003eproof\u003c/h3\u003e\n\u003cp\u003eWe use \u003ca href=\"/posts/kbhloop_invariant/\"\u003eloop invariant\u003c/a\u003e method to show that our algorithm is correct. Our invariant is that the array \\(A[0, \\dots, j-1]\\) is sorted \\(\\forall j 0 \\dots L+1\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eInitialization: at the first step, \\(j=1\\) (second element), the subarray of \\(A[0, \\dots j-1]\\) (namely, only the first element), is sorted trivially\u003c/li\u003e\n\u003cli\u003eMaintenance: during each loop, we move \\(j\\) to the right, only being done when the subarray to the left is correctly sorted\u003c/li\u003e\n\u003cli\u003ebecause of \\(j\\) is moving forward until length, it will terminate\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAs \\(j\\), by the end, covers the entire loop, our loop terminates at \\(L+1\\) and invariant (sortedness) is maintained between \\(A[0, \\dots j]\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinsertion_sort/","tags":null,"title":"insertion sort"},{"categories":null,"contents":"an integer (\\(\\mathbb{Z}\\)) is the natural numbers, zero, and negative numbers: \u0026hellip;,-4,-3,-2,-1,0,1,2,2,3\nrepresenting integers what are the limitations of computational arithmetic how to perform efficient arithmetic how to encode data more compactly and efficiently See also computer number system\n","html":"\u003cp\u003ean \u003ca href=\"/posts/kbhinteger/\"\u003einteger\u003c/a\u003e (\\(\\mathbb{Z}\\)) is the \u003ca href=\"/posts/kbhnatural_numbers/\"\u003enatural numbers\u003c/a\u003e, zero, and negative numbers: \u0026hellip;,-4,-3,-2,-1,0,1,2,2,3\u003c/p\u003e\n\u003ch2 id=\"representing-integers\"\u003erepresenting integers\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewhat are the limitations of computational arithmetic\u003c/li\u003e\n\u003cli\u003ehow to perform efficient arithmetic\u003c/li\u003e\n\u003cli\u003ehow to encode data more compactly and efficiently\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhbinary_number_system/\"\u003ecomputer number system\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinteger/","tags":null,"title":"integer"},{"categories":null,"contents":"The integrating factor \\(\\rho(x)\\) is a value that helps undo the product rule. For which:\n\\begin{equation} log(\\rho(x)) = \\int P(x)dx \\end{equation}\nfor some function \\(P(x)\\).\nSeparating the \\(\\rho(x)\\) out, we have therefore:\n\\begin{equation} e^{\\int P dx} = \\rho(x) \\end{equation}\nWhy is this helpful and undoes the product rule? This is because of a very interesting property of how \\(\\rho(x)\\) behaves.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhintegrating_factor/\"\u003eintegrating factor\u003c/a\u003e \\(\\rho(x)\\) is a value that helps undo the product rule. For which:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nlog(\\rho(x)) = \\int P(x)dx\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some function \\(P(x)\\).\u003c/p\u003e\n\u003cp\u003eSeparating the \\(\\rho(x)\\) out, we have therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{\\int P dx} = \\rho(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhy is this helpful and undoes the product rule? This is because of a very interesting property of how \\(\\rho(x)\\) behaves.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhintegrating_factor/","tags":null,"title":"integrating factor"},{"categories":null,"contents":"Goal We are going to solve the inter-temporal choice problem, for ten time stamps, and perform some numerical optimization of the results\nMain Methods We do this by solving backwards. We will create a variable \\(k\\) to measure asset, and \\(k_{t}\\) the remaining asset at time \\(t\\).\nLet us first declare the function for power utility. \\(k\\) is our asset holding, \\(\\gamma\\) our relative margin of risk, and \\(U\\) the power utility.\nThe power utility function is defined by:\n\\begin{equation} U( C) = \\frac{c^{1-\\gamma}-1}{1-\\gamma} \\end{equation}\nImplementing Power Utility # risk aversion y = var(\u0026#34;y\u0026#34;, latex_name=\u0026#34;\\gamma\u0026#34;, domain=\u0026#39;real\u0026#39;) # discount factor d = var(\u0026#34;d\u0026#34;, latex_name=\u0026#34;\\delta\u0026#34;, domain=\u0026#39;real\u0026#39;) # final value at time t=f k_f = var(\u0026#34;k_f\u0026#34;, latex_name=\u0026#34;k_f\u0026#34;, domain=\u0026#39;real\u0026#39;) # the instrument\u0026#39;s percentage return over a period: i.e. (1+mu)*I_t = k_{t+1} m = var(\u0026#34;m\u0026#34;, latex_name=\u0026#34;\\mu\u0026#34;, domain=\u0026#39;real\u0026#39;) # boundary conditions assume(y\u0026gt;0) assume(y\u0026lt;1) assume(d\u0026gt;0) assume(d\u0026lt;1) # power utility u(c) = ((c^(1-y)-1)/(1-y)) u c |--\u0026gt; -(c^(-y + 1) - 1)/(y - 1) End Boundary Conditions At the final time stamp, we desire to consume all of our assets. Therefore, we will seed our investment amount at \\(I=0\\). We will optimize for eventual global utility, therefore, we will talley our utility; starting this talley at \\(0\\).\n# at the final time, leave nothing for investment I=0; u_total = 0 Bottom-Up Dynamic Programming From every step from here, we will discount this utility by \\(d\\), then solve for the previous step\u0026rsquo;s target consumption that would maximize utility. That is, at every step, we desire:\n\\begin{equation} k_{t-1} = I_{t} + c_{t} \\implies c_{t} = k_{t-1}-I_{t} \\end{equation}\n\u0026ldquo;we want to consume all we have that needed\u0026rsquo;t to be invested\u0026rdquo;\nand\n\\(\\max u(c_{t})\\)\nRecall also that \\((1+\\mu)I_{t} = k_{t+1}\\) (as \\(\\mu\\) is the mean log-return, 1+that times \\(I\\), how much was invested at time \\(t\\), is the expected capital one time period from then.) Therefore, to make sure that \\(k_{f}\\) gets back in the final period, we solve for our seed value for \\(I\\), how much to invest would be:\n\\begin{equation} I_{t-1} = \\frac{k_t}{(1+m)} \\end{equation}\nActual implementation # create an dictionary to keep track of all the capital variables k = {} # we will iterate time stamps 1-10 T = 10 # a variable for captial at that time for i in range(T): k_t = var(f\u0026#34;k_{T-i}\u0026#34;, latex_name=f\u0026#34;k_{T-i}\u0026#34;) # t-i becasue we are solving backwards; i0 = T10 # what can be consumed at every time stamp # is the k of the previous timestamp, minus # what needs to be left over # we multiply here by d because we want to # discount future utility u_total = d*u_total + u(k_t-I) # add the current variable to dictionary k[T-i] = k_t # recall again i0=T10 because backwards # solve for the next investment amount I = k_t/(1+m) u_total -(((((((d*(d*(k_10^(-y + 1) - 1)/(y - 1) + ((k_9 - k_10/(m + 1))^(-y + 1) - 1)/(y - 1)) + ((k_8 - k_9/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_7 - k_8/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_6 - k_7/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_5 - k_6/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_4 - k_5/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_3 - k_4/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_2 - k_3/(m + 1))^(-y + 1) - 1)/(y - 1))*d - ((k_1 - k_2/(m + 1))^(-y + 1) - 1)/(y - 1) Optimization with some constants We can now use the scipy numerical optimizer to minimize this target. Recall that we can recover the actual value of consumption at each step as \\(c=k-\\frac{k}{m+1}\\).\nWe will set some initial conditions:\n_m = 0.01 # 1% period-to-period increase _k = 1000 # $1000 capital _y = 0.8 # generally risk averse _d = 0.9 # the future matters slightly less Optimization Target Function Recall that the scipy optimizer MINIMIZES, so we will make the loss the negative of utility. Before we finally start, we need to make the actual, numerical loss function that performs the substitution.\nThe code is actually just doing some function substitution, so its not very exciting.\n# we reverse the k_* variables because it is stored in the dictionary # in reverse, because we knew the reverse condition first optim_variables = list(k.values()) optim_variables.reverse() # this function is also the callback, so it returning # True terminates execution def u_total_loss(x): # the optimizer\u0026#39;s current step # we want to take [1:], because we need to keep k1 the same at _k the # initial value substitution_dict = {key: val for key, val in zip(optim_variables[1:], x)} # initial conditions substitution_dict[m] = _m substitution_dict[y] = _y substitution_dict[d] = _d substitution_dict[d] = _d # we want to keep the initial value k1 the same substitution_dict[k[1]] = _k try: # get value content = (-1*u_total).subs(substitution_dict) # recall we multiply by -1 because we are MINIMIZING, so the loss is # the inverse of the maximization utility target return float(content.n()), False except: return 0, True Optimize! Finally, we are ready to start. We will now create the other initial conditions k1\u0026hellip;k10 and : we will set the initial value to all be 1000 (i.e. do nothing) and have the optimizer work it out from there:\nfrom scipy.optimize import minimize target = minimize(lambda x:u_total_loss(x)[0], [_k for _ in range(T-1)], callback=lambda x:u_total_loss(x)[1]) target fun: -50.71592850322347 hess_inv: array([[ 9518.97596212, 7617.14636381, 5964.42171873, 4433.87331935, 4253.91810669, 3528.72923763, 2329.61846616, 1769.85078017, 1126.51562458], ... [ 1126.51562458, 1359.86586059, 1639.19265606, 2255.16306648, 2598.27394651, 2293.45506703, 2657.8129831 , 2422.66762041, 2911.30717272]]) jac: array([ 0.00000000e+00, -3.81469727e-06, 2.38418579e-06, 0.00000000e+00, 8.58306885e-06, -5.24520874e-06, 2.86102295e-06, -1.43051147e-06, -5.24520874e-06]) message: \u0026#39;Optimization terminated successfully.\u0026#39; nfev: 1360 nit: 130 njev: 136 status: 0 success: True x: array([841.22097906, 699.82556541, 573.89912346, 461.63474591, 361.51493714, 272.10309839, 192.29084196, 120.94057011, 57.12129925]) Recovering Actual Dollar Consumption Amount Awesome! We now can recover \\(c\\) at each point by a nice helpful function:\nc(k0, k1) = k0 - k1/(_m+1) \u0026ldquo;Consumption is how much we have, minus how much we would be investing\u0026rdquo;\nSo, let us translate our list to the actual values consumed:\ncapital_over_time = [_k]+target.x.tolist() # we need to add the initial condition _k back to the # inventory list consumption_over_time = [c(i,j) for i,j in zip(capital_over_time, capital_over_time[1:])] consumption_over_time [167.107941529699, 148.324379643989, 131.608611479784, 116.835018601197, 103.699164584812, 92.1059288329209, 81.7161261514775, 72.5477032414004, 64.3848282779261] Examples of Output The next set of slides show examples of possible optimization outputs\u0026mdash;how decisions by the inter-temporal choice problem changes based on the inputs.\nRisk Averse _m = 0.01 # 1% period-to-period increase _k = 1000 # $1000 capital _y = 0.8 # generally risk averse _d = 0.9 # the future matters slightly less [167.107941529699, 148.324379643989, 131.608611479784, 116.835018601197, 103.699164584812, 92.1059288329209, 81.7161261514775, 72.5477032414004, 64.3848282779261] More Return Winning Game _m = 0.1 # 1% period-to-period increase _k = 1000 # $1000 capital _y = 0.8 # generally risk averse _d = 0.9 # the future matters slightly less [154.860597149863, 152.989432556196, 151.010433069881, 149.201249715528, 147.329750167852, 145.539019666462, 143.739371599600, 141.984228587213, 140.243839963791] More Risk _m = 0.01 # 1% period-to-period increase _k = 1000 # $1000 capital _y = 0.2 # generally risky _d = 0.9 # the future matters slightly less [388.525041338376, 241.124420093987, 149.632568775223, 92.8644259086613, 57.6330459746870, 35.7667230511026, 22.1970017374152, 13.7754327365677, 8.54930907023498] Loosing Game _m = -0.01 # this is a loosing stock _k = 1000 # $1000 capital _y = 0.9 # very safe _d = 0.9 # the future matters fun: 0 hess_inv: array([[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]]) jac: array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) message: \u0026#39;Optimization terminated successfully.\u0026#39; nfev: 10 nit: 0 njev: 1 status: 0 success: True x: array([1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000.]) Evidently: do nothing if we have a loosing cause.\nWinning Game _m = 1.00 # this is SUPER winning stock _k = 1000 # $1000 capital _y = 0.9 # very safe _d = 0.9 # the future matters [125.667556437602, 241.474827418105, 460.068836905327, 868.972817783791, 4540.45893314523, 4219.93058738029, 3988.05775624984, 3996.89431939885, 3615.74982832315] We made so much money that we are spending a lot of it and still spending it.\n","html":"\u003ch2 id=\"goal\"\u003eGoal\u003c/h2\u003e\n\u003cp\u003eWe are going to solve the inter-temporal choice problem, for ten time stamps, and perform some numerical optimization of the results\u003c/p\u003e\n\u003ch2 id=\"main-methods\"\u003eMain Methods\u003c/h2\u003e\n\u003cp\u003eWe do this by solving backwards. We will create a variable \\(k\\) to measure asset, and \\(k_{t}\\) the remaining asset at time \\(t\\).\u003c/p\u003e\n\u003cp\u003eLet us first declare the function for \u003ca href=\"/posts/kbhpower_utility/\"\u003epower utility\u003c/a\u003e. \\(k\\) is our asset holding, \\(\\gamma\\) our relative margin of risk, and \\(U\\) the power utility.\u003c/p\u003e\n\u003cp\u003eThe power utility function is defined by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU( C) = \\frac{c^{1-\\gamma}-1}{1-\\gamma}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"implementing-power-utility\"\u003eImplementing Power Utility\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# risk aversion\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;y\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\gamma\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edomain\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;real\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# discount factor\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;d\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\delta\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edomain\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;real\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# final value at time t=f\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ek_f\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;k_f\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;k_f\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edomain\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;real\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# the instrument\u0026#39;s percentage return over a period: i.e. (1+mu)*I_t = k_{t+1}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;m\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\mu\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edomain\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;real\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# boundary conditions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eassume\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eassume\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eassume\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eassume\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# power utility\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ec\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ec\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003ec |--\u0026gt; -(c^(-y + 1) - 1)/(y - 1)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"end-boundary-conditions\"\u003eEnd Boundary Conditions\u003c/h2\u003e\n\u003cp\u003eAt the final time stamp, we desire to consume all of our assets. Therefore, we will seed our investment amount at \\(I=0\\). We will optimize for eventual global utility, therefore, we will talley our utility; starting this talley at \\(0\\).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# at the final time, leave nothing for investment\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"bottom-up-dynamic-programming\"\u003eBottom-Up Dynamic Programming\u003c/h2\u003e\n\u003cp\u003eFrom every step from here, we will discount this utility by \\(d\\), then solve for the \u003cem\u003eprevious\u003c/em\u003e step\u0026rsquo;s target consumption that would maximize utility. That is, at every step, we desire:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nk_{t-1} = I_{t} + c_{t} \\implies c_{t} = k_{t-1}-I_{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;we want to consume all we have that needed\u0026rsquo;t to be invested\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\(\\max u(c_{t})\\)\u003c/p\u003e\n\u003cp\u003eRecall also that \\((1+\\mu)I_{t} = k_{t+1}\\) (as \\(\\mu\\) is the mean log-return, 1+that times \\(I\\), how much was invested at time \\(t\\), is the expected capital one time period from then.) Therefore, to make sure that \\(k_{f}\\) gets back in the final period, we solve for our seed value for \\(I\\), how much to invest would be:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI_{t-1} = \\frac{k_t}{(1+m)}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"actual-implementation\"\u003eActual implementation\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# create an dictionary to keep track of all the capital variables\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# we will iterate time stamps 1-10\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e10\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# a variable for captial at that time\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ek_t\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;k_\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e}\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;k_\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e}\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# t-i becasue we are solving backwards; i0 = T10\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# what can be consumed at every time stamp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# is the k of the previous timestamp, minus\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# what needs to be left over\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we multiply here by d because we want to\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# discount future utility\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek_t\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# add the current variable to dictionary\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek_t\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# recall again i0=T10 because backwards\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# solve for the next investment amount\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek_t\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-(((((((d*(d*(k_10^(-y + 1) - 1)/(y - 1) + ((k_9 - k_10/(m + 1))^(-y + 1) - 1)/(y - 1)) + ((k_8 - k_9/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_7 - k_8/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_6 - k_7/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_5 - k_6/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_4 - k_5/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_3 - k_4/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_2 - k_3/(m + 1))^(-y + 1) - 1)/(y - 1))*d - ((k_1 - k_2/(m + 1))^(-y + 1) - 1)/(y - 1)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"optimization-with-some-constants\"\u003eOptimization with some constants\u003c/h2\u003e\n\u003cp\u003eWe can now use the scipy numerical optimizer to minimize this target. Recall that we can recover the actual value of consumption at each step as \\(c=k-\\frac{k}{m+1}\\).\u003c/p\u003e\n\u003cp\u003eWe will set some initial conditions:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# 1% period-to-period increase\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.8\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# generally risk averse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters slightly less\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"optimization-target-function\"\u003eOptimization Target Function\u003c/h2\u003e\n\u003cp\u003eRecall that the scipy optimizer MINIMIZES, so we will make the loss the negative of utility. Before we finally start, we need to make the actual, numerical loss function that performs the substitution.\u003c/p\u003e\n\u003cp\u003eThe code is actually just doing some function substitution, so its not very exciting.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# we reverse the k_* variables because it is stored in the dictionary\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# in reverse, because we knew the reverse condition first\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim_variables\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evalues\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim_variables\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereverse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# this function is also the callback, so it returning\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# True terminates execution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eu_total_loss\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# the optimizer\u0026#39;s current step\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we want to take [1:], because we need to keep k1 the same at _k the\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# initial value\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ekey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eval\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ekey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eval\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ezip\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eoptim_variables\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# initial conditions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we want to keep the initial value k1 the same\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003etry\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# get value\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003econtent\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# recall we multiply by -1 because we are MINIMIZING, so the loss is\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# the inverse of the maximization utility target\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econtent\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()),\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eFalse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eexcept\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"optimize\"\u003eOptimize!\u003c/h2\u003e\n\u003cp\u003eFinally, we are ready to start. We will now create the other initial conditions k1\u0026hellip;k10 and : we will set the initial value to all be 1000 (i.e. do nothing) and have the optimizer work it out from there:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003escipy.optimize\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eminimize\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etarget\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eminimize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elambda\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu_total_loss\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecallback\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elambda\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu_total_loss\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etarget\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e fun: -50.71592850322347\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e hess_inv: array([[ 9518.97596212, 7617.14636381, 5964.42171873, 4433.87331935,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 4253.91810669, 3528.72923763, 2329.61846616, 1769.85078017,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 1126.51562458],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1126.51562458, 1359.86586059, 1639.19265606, 2255.16306648,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 2598.27394651, 2293.45506703, 2657.8129831 , 2422.66762041,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 2911.30717272]])\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e jac: array([ 0.00000000e+00, -3.81469727e-06, 2.38418579e-06, 0.00000000e+00,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 8.58306885e-06, -5.24520874e-06, 2.86102295e-06, -1.43051147e-06,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -5.24520874e-06])\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e message: \u0026#39;Optimization terminated successfully.\u0026#39;\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nfev: 1360\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nit: 130\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e njev: 136\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e status: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e success: True\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e x: array([841.22097906, 699.82556541, 573.89912346, 461.63474591,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 361.51493714, 272.10309839, 192.29084196, 120.94057011,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 57.12129925])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"recovering-actual-dollar-consumption-amount\"\u003eRecovering Actual Dollar Consumption Amount\u003c/h2\u003e\n\u003cp\u003e\u003cem\u003eAwesome!\u003c/em\u003e We now can recover \\(c\\) at each point by a nice helpful function:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ec\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek0\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u0026ldquo;Consumption is how much we have, minus how much we would be investing\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eSo, let us translate our list to the actual values consumed:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ecapital_over_time\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etarget\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etolist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# we need to add the initial condition _k back to the\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# inventory list\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econsumption_over_time\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ec\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ezip\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecapital_over_time\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecapital_over_time\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:])]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econsumption_over_time\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[167.107941529699,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 148.324379643989,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 131.608611479784,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 116.835018601197,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 103.699164584812,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 92.1059288329209,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 81.7161261514775,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 72.5477032414004,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 64.3848282779261]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"examples-of-output\"\u003eExamples of Output\u003c/h2\u003e\n\u003cp\u003eThe next set of slides show examples of possible optimization outputs\u0026mdash;how decisions by the inter-temporal choice problem changes based on the inputs.\u003c/p\u003e\n\u003ch2 id=\"risk-averse\"\u003eRisk Averse\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# 1% period-to-period increase\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.8\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# generally risk averse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters slightly less\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[167.107941529699,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 148.324379643989,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 131.608611479784,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 116.835018601197,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 103.699164584812,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 92.1059288329209,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 81.7161261514775,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 72.5477032414004,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 64.3848282779261]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"more-return\"\u003eMore Return\u003c/h2\u003e\n\u003ch2 id=\"winning-game\"\u003eWinning Game\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.1\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# 1% period-to-period increase\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.8\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# generally risk averse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters slightly less\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[154.860597149863,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 152.989432556196,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 151.010433069881,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 149.201249715528,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 147.329750167852,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 145.539019666462,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 143.739371599600,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 141.984228587213,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 140.243839963791]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"more-risk\"\u003eMore Risk\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# 1% period-to-period increase\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.2\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# generally risky\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters slightly less\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[388.525041338376,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 241.124420093987,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 149.632568775223,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 92.8644259086613,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 57.6330459746870,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 35.7667230511026,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 22.1970017374152,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 13.7754327365677,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 8.54930907023498]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"loosing-game\"\u003eLoosing Game\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# this is a loosing stock\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# very safe\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e fun: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e hess_inv: array([[1, 0, 0, 0, 0, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 1, 0, 0, 0, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 1, 0, 0, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 1, 0, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 1, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 0, 1, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 0, 0, 1, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 0, 0, 0, 1, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 0, 0, 0, 0, 1]])\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e jac: array([0., 0., 0., 0., 0., 0., 0., 0., 0.])\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e message: \u0026#39;Optimization terminated successfully.\u0026#39;\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nfev: 10\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nit: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e njev: 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e status: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e success: True\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e x: array([1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000.])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eEvidently: do nothing if we have a loosing cause.\u003c/p\u003e\n\u003ch2 id=\"winning-game\"\u003eWinning Game\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1.00\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# this is SUPER winning stock\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# very safe\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[125.667556437602,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 241.474827418105,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 460.068836905327,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 868.972817783791,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 4540.45893314523,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 4219.93058738029,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 3988.05775624984,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 3996.89431939885,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 3615.74982832315]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe made so much money that we are spending a lot of it and still spending it.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/risk_apetite_preso/","tags":null,"title":"Inter-Temporal Choice"},{"categories":null,"contents":"The interaction of multiple agents/decision makers causes additional uncertainty\n","html":"\u003cp\u003eThe interaction of multiple agents/decision makers causes additional uncertainty\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinteraction_uncertainty/","tags":null,"title":"Interaction Uncertainty"},{"categories":null,"contents":"Big question: how to we align agents in an interactive, dynamic way (i.e. without instruction fine tuning which is hard).\nSequentiality is hard:\nwhat is the context/motivation? how to you transfer across contexts? how do you plan? Key Idea Language is information that helps agents predict the future; instructions is world modeling\ninstead of instructions =\u0026gt; actions (executor) instructions =\u0026gt; updated belief (world model) User intent =\u0026gt; action shouldn\u0026rsquo;t have LLM language representation in the middle as a bottleneck.\nThere is an underlying representation of the user\u0026rsquo;s preferences, you have to use language to coax it out of them.\nDynalang build model that takes vision + language as a joint input pass it through an auto-encoding representation have the world model predict the next-encoding representation Main Idea: modeling language/tokens/images as a joint latent representation over time.\nTraining objective:\nreconstruction loss against the future presentation: using \\(R_{i}\\) to predict \\(R_{i+1}\\) predict the reward over time regularize? Workflow take reward/preferences/behavior data structure learning to create the relationships between elements in the data structure ","html":"\u003cp\u003eBig question: how to we align agents in an interactive, dynamic way (i.e. without instruction fine tuning which is hard).\u003c/p\u003e\n\u003cp\u003eSequentiality is hard:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewhat is the context/motivation?\u003c/li\u003e\n\u003cli\u003ehow to you transfer across contexts?\u003c/li\u003e\n\u003cli\u003ehow do you plan?\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"key-idea\"\u003eKey Idea\u003c/h2\u003e\n\u003cp\u003eLanguage is information that helps agents \u003cstrong\u003epredict the future\u003c/strong\u003e; instructions is \u003cstrong\u003eworld modeling\u003c/strong\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003einstead of instructions =\u0026gt; actions (executor)\u003c/li\u003e\n\u003cli\u003einstructions =\u0026gt; updated belief (world model)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eUser intent =\u0026gt; action shouldn\u0026rsquo;t have LLM language representation in the middle as a bottleneck.\u003c/p\u003e\n\u003cp\u003eThere is an underlying representation of the user\u0026rsquo;s preferences, you have to use language to coax it out of them.\u003c/p\u003e\n\u003ch2 id=\"dynalang\"\u003eDynalang\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ebuild model that takes vision + language as a joint input\u003c/li\u003e\n\u003cli\u003epass it through an auto-encoding representation\u003c/li\u003e\n\u003cli\u003ehave the world model predict the next-encoding representation\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eMain Idea: modeling language/tokens/images as a joint latent representation over time.\u003c/p\u003e\n\u003cp\u003eTraining objective:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ereconstruction loss against the future presentation: using \\(R_{i}\\) to predict \\(R_{i+1}\\)\u003c/li\u003e\n\u003cli\u003epredict the reward over time\u003c/li\u003e\n\u003cli\u003eregularize?\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"workflow\"\u003eWorkflow\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003etake reward/preferences/behavior data\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstructure_learning/\"\u003estructure learning\u003c/a\u003e to create the relationships between elements in the data structure\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinteractive_agent/","tags":null,"title":"Interactive Agents"},{"categories":null,"contents":" psycoacoustics ","html":"\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpsycoacoustics/\"\u003epsycoacoustics\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhintersession_2023/","tags":null,"title":"Intersession 2023"},{"categories":null,"contents":"invariant subspaces are a property of operators; it is a subspace for which the operator in question on the overall space is also an operator of the subspace.\nconstituents an operator \\(T \\in \\mathcal{L}(V)\\) a subspace \\(U \\subset V\\) requirements \\(U\\) is considered invariant on \\(T\\) if \\(u \\in U \\implies Tu \\in U\\)\n(i.e. \\(U\\) is invariant under \\(T\\) if \\(T |_{U}\\) is an operator on \\(U\\))\nadditional information nontrivial invariant subspace (i.e. eigenstuff)\nA proof is not given yet, but \\(T \\in \\mathcal{L}(V)\\) has an invariant subspace that\u0026rsquo;s not \\(V\\) nor \\(\\{0\\}\\) if \\(\\dim V \u0026gt; 1\\) for complex number vector spaces and \\(\\dim V \u0026gt; 2\\) for real number vector spaces.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003es are a property of \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003es; it is a subspace for which the \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e in question on the overall space is also an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e of the subspace.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ean \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e \\(T \\in \\mathcal{L}(V)\\)\u003c/li\u003e\n\u003cli\u003ea \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e \\(U \\subset V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\(U\\) is considered \u003cstrong\u003einvariant\u003c/strong\u003e on \\(T\\) if \\(u \\in U \\implies Tu \\in U\\)\u003c/p\u003e\n\u003cp\u003e(i.e. \\(U\\) is \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e under \\(T\\) if \\(T |_{U}\\) is an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e on \\(U\\))\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"nontrivial-invariant-subspace--kbhinvariant-subspace-dot-md\"\u003enontrivial \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e(i.e. eigenstuff)\u003c/p\u003e\n\u003cp\u003eA proof is not given yet, but \\(T \\in \\mathcal{L}(V)\\) has an \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e that\u0026rsquo;s not \\(V\\) nor \\(\\{0\\}\\) if \\(\\dim V \u0026gt; 1\\) for \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es and \\(\\dim V \u0026gt; 2\\) for \u003ca href=\"/posts/kbhreal_number/\"\u003ereal number\u003c/a\u003e \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinvariant_subspace/","tags":null,"title":"invariant subspace"},{"categories":null,"contents":"the inverse is the the opposite of an operation. As in, if you apply the inverse of an operation to the result of applying the original with the same operation it will cancel it.\nThat is,\n\\begin{equation} A * B * B^{-1} = A \\end{equation}\n\\(B^{-1}\\) is then the inverse of \\(B\\) for the \\(*\\) operation. This is operation dependent.\n","html":"\u003cp\u003ethe \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e is the the opposite of an \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003e. As in, if you apply the \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e of an \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003e to the result of applying the original with the same \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003e it will cancel it.\u003c/p\u003e\n\u003cp\u003eThat is,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA * B * B^{-1} = A\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(B^{-1}\\) is then the \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e of \\(B\\) for the \\(*\\) operation. This is \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003e dependent.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinverses/","tags":null,"title":"inverse"},{"categories":null,"contents":" Generate a uniform number between 0 to 1. Get the inverse of the standard normal density function at that value (let number be \\(y\\), find the \\(x\\) such that \\(\\phi(y) = x\\)) return \\(x\\) ","html":"\u003col\u003e\n\u003cli\u003eGenerate a uniform number between 0 to 1.\u003c/li\u003e\n\u003cli\u003eGet the inverse of the \u003ca href=\"/posts/kbhgaussian_distribution/#standard-normal-density-function\"\u003estandard normal density function\u003c/a\u003e at that value (let number be \\(y\\), find the \\(x\\) such that \\(\\phi(y) = x\\))\u003c/li\u003e\n\u003cli\u003ereturn \\(x\\)\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinverse_transform_sampling/","tags":null,"title":"inverse transform sampling"},{"categories":null,"contents":"A Linear Map is invertable if it can be undone. It is called a nonsingular matrix\nconstituents A linear map \\(T \\in \\mathcal{L}(V,W)\\)\nrequirements A Linear Map \\(T \\in \\mathcal{L}(V,W)\\) is called invertable if \\(\\exists T^{-1} \\in \\mathcal{L}(W,V): T^{-1}T=I \\in \\mathcal{L}(V), TT^{-1} = I \\in \\mathcal{L}(W)\\).\n\u0026ldquo;a map is invertable if there is an inverse\u0026rdquo;: that combining the commutable inverse and itself will result in the identity map.\nadditional information matrix invertability Matrices whose determinants are not \\(0\\) (i.e. it is invertable) is called \u0026ldquo;nonsingular matrix\u0026rdquo;. If it doesn\u0026rsquo;t have an inverse, it is called a singular matrix.\nlinear map inverse is unique An invertable Linear Map has an unique inverse:\nProof:\nSuppose \\(T \\in \\mathcal{L}(V,W)\\), and \\(\\exists S_1, S_2\\) which are both inverses of \\(T\\). We desire \\(S_1=S_2\\).\nSo:\n\\begin{equation} S_1 = S_1(TS_2) = (S_1T)S_2 = IS_{2} = S_2 \\end{equation}\ngiven Product of Linear Maps is associative.\n\\(S_1=S_2\\), as desired. \\(\\blacksquare\\)\ninjectivity and surjectivity implies invertability Suppose \\(T \\in \\mathcal{L}(V,W)\\); we desire that \\(T\\) is invertable IFF it is both injective and surjective.\nFirst, suppose \\(T\\) is invertible; that is, \\(\\exists T^{-1}: T^{-1}T=I, TT^{-1}=I\\) We desire that \\(T\\) is both injective and surjective.\nInjectivity: Suppose \\(Tv=Tu\\); we desire \\(u=v\\). \\(u = T^{-1}(Tu) = T^{-1}(Tv) = v\\) . We essentially to use the fact that \\(T^{-1}\\) is a function to \u0026ldquo;revert\u0026rdquo; the map of \\(T\\); as \\(T^{-1}\\) is a map, we know it has to revert to the same result.\nSurjectivity: Recall \\(T: V\\to W\\). WLOG let \\(w \\in W\\), \\(w=T(T^{-1}w)\\). Therefore, all \\(w\\) is in range of \\(T\\).\nSecond, suppose \\(T\\) is both injective and surjective. Define a transition \\(S\\) such that \\(T(Sw) = w\\) for all \\(w \\in W\\) (i.e. it hits just the right element to hit \\(w\\) as an input of \\(T\\).) This is made possible because \\(T\\) is surjective (because you can hit all \\(W\\)) and injective (which makes \\(S\\) not need to hit two different things or have two non-equal things accidentally map to the same thing.)\nEvidently, \\(T(Sw)=w \\forall w \\in W \\implies (TS) = I\\) by definition.\nWe now desire \\(ST = I\\). We have \\((TSTv) = (TS)(Tv) = ITv = Tv\\) by associativity of map multiplication. Now, \\((TSTv) = Tv \\implies T(ST)v = Tv\\) by associativity again. This implies that \\((ST)v=v\\) again because \\(T\\) is injective: so the same input will not produce two unique outputs.\nWe then can show \\(S\\) is a linear map in the usual way.\nHaving constructed the desired result, \\(\\blacksquare\\)\nAlternate Proof for Finite Dimensional \\(T\\) So given map to bigger space is not surjective and map to smaller space is not injective, we have that the dimension of \\(W = V\\), we leverage the basis of each and build the using the basis of domain.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e if it can be undone. It is called a \u003ca href=\"/posts/kbhinvertability/\"\u003enonsingular matrix\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003eA linear map \\(T \\in \\mathcal{L}(V,W)\\)\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e \\(T \\in \\mathcal{L}(V,W)\\) is called \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e if \\(\\exists T^{-1} \\in \\mathcal{L}(W,V): T^{-1}T=I \\in \\mathcal{L}(V), TT^{-1} = I \\in \\mathcal{L}(W)\\).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;a map is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e if there is an inverse\u0026rdquo;: that combining the \u003cstrong\u003ecommutable\u003c/strong\u003e inverse and itself will result in the \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e map.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"matrix--kbhmatricies-dot-md--invertability--kbhinvertability-dot-md\"\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e \u003ca href=\"/posts/kbhinvertability/\"\u003einvertability\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eMatrices whose determinants are not \\(0\\) (i.e. it is invertable) is called \u0026ldquo;\u003ca href=\"/posts/kbhinvertability/\"\u003enonsingular matrix\u003c/a\u003e\u0026rdquo;. If it doesn\u0026rsquo;t have an \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e, it is called a \u003ca href=\"/posts/kbhinvertability/\"\u003esingular matrix\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"linear-map-inverse-is-unique\"\u003elinear map inverse is unique\u003c/h3\u003e\n\u003cp\u003eAn \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e has an unique inverse:\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V,W)\\), and \\(\\exists S_1, S_2\\) which are both inverses of \\(T\\). We desire \\(S_1=S_2\\).\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS_1 = S_1(TS_2) = (S_1T)S_2 = IS_{2} = S_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egiven \u003ca href=\"/posts/kbhproduct_of_linear_maps/\"\u003eProduct of Linear Maps\u003c/a\u003e is \u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\(S_1=S_2\\), as desired. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"injectivity--kbhinjectivity-dot-md--and-surjectivity--kbhsurjectivity-dot-md--implies-invertability--kbhinvertability-dot-md\"\u003e\u003ca href=\"/posts/kbhinjectivity/\"\u003einjectivity\u003c/a\u003e and \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjectivity\u003c/a\u003e implies \u003ca href=\"/posts/kbhinvertability/\"\u003einvertability\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V,W)\\); we desire that \\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e IFF it is both injective and surjective.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eFirst, suppose \\(T\\) is invertible\u003c/strong\u003e\u003c/strong\u003e; that is, \\(\\exists T^{-1}: T^{-1}T=I, TT^{-1}=I\\) We desire that \\(T\\) is both \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e and \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eInjectivity: Suppose \\(Tv=Tu\\); we desire \\(u=v\\). \\(u = T^{-1}(Tu) = T^{-1}(Tv) = v\\) . We essentially to use the fact that \\(T^{-1}\\) is a function to \u0026ldquo;revert\u0026rdquo; the map of \\(T\\); as \\(T^{-1}\\) is a map, we know it has to revert to the same result.\u003c/p\u003e\n\u003cp\u003eSurjectivity: Recall \\(T: V\\to W\\). WLOG let \\(w \\in W\\), \\(w=T(T^{-1}w)\\). Therefore, all \\(w\\) is in range of \\(T\\).\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eSecond, suppose \\(T\\) is both injective and surjective\u003c/strong\u003e\u003c/strong\u003e. Define a transition \\(S\\) such that \\(T(Sw) = w\\) for all \\(w \\in W\\) (i.e. it hits just the right element to hit \\(w\\) as an input of \\(T\\).) This is made possible because \\(T\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e (because you can hit all \\(W\\)) and \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e (which makes \\(S\\) not need to hit two different things or have two non-equal things accidentally map to the same thing.)\u003c/p\u003e\n\u003cp\u003eEvidently, \\(T(Sw)=w \\forall w \\in W \\implies (TS) = I\\) by definition.\u003c/p\u003e\n\u003cp\u003eWe now desire \\(ST = I\\). We have \\((TSTv) = (TS)(Tv) = ITv = Tv\\) by associativity of map multiplication. Now, \\((TSTv) = Tv \\implies T(ST)v = Tv\\) by associativity again. This implies that \\((ST)v=v\\) again because \\(T\\) is injective: so the same input will not produce two unique outputs.\u003c/p\u003e\n\u003cp\u003eWe then can show \\(S\\) is a linear map in the usual way.\u003c/p\u003e\n\u003cp\u003eHaving constructed the desired result, \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch4 id=\"alternate-proof-for-finite-dimensional-t\"\u003eAlternate Proof for Finite Dimensional \\(T\\)\u003c/h4\u003e\n\u003cp\u003eSo given \u003ca href=\"/posts/kbhlinear_map/#map-to-bigger-space-is-not-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjective\"\u003emap to bigger space is not surjective\u003c/a\u003e and \u003ca href=\"/posts/kbhlinear_map/#map-to-smaller-space-is-not-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003emap to smaller space is not injective\u003c/a\u003e, we have that the dimension of \\(W = V\\), we leverage the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of each and build the \u003cinverse\u003e using the \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinvertability/","tags":null,"title":"invertability"},{"categories":null,"contents":"For each term \\(t\\), let\u0026rsquo;s store all the documents containing \\(t\\). We identify each doc by DocID.\npostings list a \u0026ldquo;postings list\u0026rdquo; datastructure is a variable-length list which is appended to with \u0026ldquo;postings\u0026rdquo;. In this way, we can store a \u0026ldquo;posting\u0026rdquo; for every DocID with the index we encounter.\nFor instance, this could be a linked list.\nAlthough: we generally want to sort our postings list by documentID for ease of indexing.\nindexing process sort term vs. docID tuples by term alphabetically\nsort docIDs within each tuple by integer\nmerge multiple entries in the single document, keeping track of total frequency\nconsolidate information into postings list (term + document frequency (how many documents does the term show up): [postings, here, ...])\nfor instance\nuseful pre-processing cut character sequences into word tokens map text and query into the same form stemming or lemmatization removing stopwords (the, a, to, etc.?) \u0026mdash; but this may not be a good idea (song \u0026ldquo;to be or not to be\u0026rdquo;) handling phrases biword index Sometimes, single word don\u0026rsquo;t work well as tokens. sometimes, we use bi-grams instead to be indexed. Then, we can break any query down into series of bigrams (\u0026ldquo;stanford university palo alto\u0026rdquo; =\u0026gt; (stanford university) (university palo) (palo alto))\nWe already have \\(V^{2}\\) blow up here. So this is actually NOT the standard solution.\npositional index in each posting of a postings list, store the docID and a sublist of positions of the term within that document.\ntypically, for English/germanic/romatic languages, a positional index is 2-4 times as lange as a non-positional index. in particular, the size would be 35%-50% of the original text.\nBoolean Retrieval AND query \u0026ldquo;merge\u0026rdquo; two postings: identify intersections by two pointer at the head of both lists, check if the two pointers are pointing at the same docID.\nif the answer is \u0026ldquo;no\u0026rdquo;, advance the pointer pointed to the smaller docid if the answer is \u0026ldquo;yes\u0026rdquo;, advance both pointers once any list is exhausted, stop.\nAnd this is why we need the postings sorted.\nTypically, when you start, you\u0026rsquo;d like to start your searches on your smallest postings list.\nphrase-query retrieval phrase-query retrieval is the prcoess to process documents where an exact phrase appears. First index for the postings list of the entire phrase:\nthen do the Boolean Retrieval iteratively\u0026mdash;merge the phrases using AND queries first, then zoom into each document to merge their word positions, offset by one.\n","html":"\u003cp\u003eFor each term \\(t\\), let\u0026rsquo;s store all the documents containing \\(t\\). We identify each doc by DocID.\u003c/p\u003e\n\u003ch2 id=\"postings-list\"\u003epostings list\u003c/h2\u003e\n\u003cp\u003ea \u0026ldquo;\u003ca href=\"#postings-list\"\u003epostings list\u003c/a\u003e\u0026rdquo; datastructure is a variable-length list which is appended to with \u0026ldquo;postings\u0026rdquo;. In this way, we can store a \u0026ldquo;posting\u0026rdquo; for every DocID with the index we encounter.\u003c/p\u003e\n\u003cp\u003eFor instance, this could be a linked list.\u003c/p\u003e\n\u003cp\u003eAlthough: we generally want to sort our \u003ca href=\"#postings-list\"\u003epostings list\u003c/a\u003e by documentID for ease of indexing.\u003c/p\u003e\n\u003ch2 id=\"indexing-process\"\u003eindexing process\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003e\n\u003cp\u003esort term vs. docID tuples by term alphabetically\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003esort docIDs within each tuple by integer\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003emerge multiple entries in the single document, keeping track of total frequency\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003econsolidate information into postings list (\u003ccode\u003eterm + document frequency (how many documents does the term show up): [postings, here, ...]\u003c/code\u003e)\u003c/p\u003e\n\u003cp\u003efor instance\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-28_10-13-30_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"useful-pre-processing\"\u003euseful pre-processing\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecut character sequences into word tokens\u003c/li\u003e\n\u003cli\u003emap text and query into the same form\u003c/li\u003e\n\u003cli\u003estemming or lemmatization\u003c/li\u003e\n\u003cli\u003eremoving stopwords (the, a, to, etc.?) \u0026mdash; but this may not be a good idea (song \u0026ldquo;to be or not to be\u0026rdquo;)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"handling-phrases\"\u003ehandling phrases\u003c/h2\u003e\n\u003ch3 id=\"biword-index\"\u003ebiword index\u003c/h3\u003e\n\u003cp\u003eSometimes, single word don\u0026rsquo;t work well as tokens. sometimes, we use bi-grams instead to be indexed. Then, we can break any query down into series of bigrams (\u0026ldquo;stanford university palo alto\u0026rdquo; =\u0026gt; (stanford university) (university palo) (palo alto))\u003c/p\u003e\n\u003cp\u003eWe already have \\(V^{2}\\) blow up here. So this is actually \u003cstrong\u003eNOT\u003c/strong\u003e the standard solution.\u003c/p\u003e\n\u003ch3 id=\"positional-index\"\u003epositional index\u003c/h3\u003e\n\u003cp\u003ein each posting of a \u003ca href=\"#postings-list\"\u003epostings list\u003c/a\u003e, store the docID and a sublist of positions of the term within that document.\u003c/p\u003e\n\u003cp\u003etypically, for English/germanic/romatic languages, a positional index is 2-4 times as lange as a non-positional index. in particular, the size would be 35%-50% of the original text.\u003c/p\u003e\n\u003ch2 id=\"boolean-retrieval\"\u003eBoolean Retrieval\u003c/h2\u003e\n\u003ch3 id=\"and-query\"\u003eAND query\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;merge\u0026rdquo; two postings: identify intersections by two pointer at the head of both lists, check if the two pointers are pointing at the same docID.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eif the answer is \u0026ldquo;no\u0026rdquo;, advance the pointer pointed to the smaller docid\u003c/li\u003e\n\u003cli\u003eif the answer is \u0026ldquo;yes\u0026rdquo;, advance both pointers\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eonce any list is exhausted, stop.\u003c/p\u003e\n\u003cp\u003eAnd this is why we need the postings sorted.\u003c/p\u003e\n\u003cp\u003eTypically, when you start, you\u0026rsquo;d like to start your searches on your smallest postings list.\u003c/p\u003e\n\u003ch2 id=\"phrase-query-retrieval\"\u003ephrase-query retrieval\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#phrase-query-retrieval\"\u003ephrase-query retrieval\u003c/a\u003e is the prcoess to process documents where an exact phrase appears. First index for the postings list of the entire phrase:\u003c/p\u003e\n\u003cp\u003ethen do the \u003ca href=\"#boolean-retrieval\"\u003eBoolean Retrieval\u003c/a\u003e iteratively\u0026mdash;merge the phrases using AND queries first, then zoom into each document to merge their word positions, offset by one.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-28_10-19-09_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhinverted_index/","tags":null,"title":"Inverted Index"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhiob/","tags":null,"title":"iob"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhiptv/","tags":null,"title":"IPTV"},{"categories":null,"contents":"irrational numbers are real numbers that are not rational numbers.\nFormally:\n\\begin{equation} \\mathbb{C} = \\mathbb{R} \\backslash \\mathbb{Q} \\end{equation}\nwhere, \\(\\backslash\\) is subtracting two sets.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhirrational_number/\"\u003eirrational number\u003c/a\u003es are \u003ca href=\"/posts/kbhreal_number/\"\u003ereal number\u003c/a\u003es that are not \u003ca href=\"/posts/kbhrational_number/\"\u003erational number\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eFormally:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{C} = \\mathbb{R} \\backslash \\mathbb{Q}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\backslash\\) is subtracting two \u003ca href=\"/posts/kbhset/\"\u003eset\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhirrational_number/","tags":null,"title":"irrational number"},{"categories":null,"contents":"Motivation Large crowd navigation with sudden changes: unlikely events are out of likely sample. So, we want to bring in another distribution based on importance and not likelyness.\nGoals retains DESPOT garantees outperforms DESPOT and POMCP DESPOT with Importance Sampling take our initial belief sample trajectories according to Importance Sampling distribution calculate values of those states obtain value estimate based on weighted average of the values Importance Sampling of trajectories We define an importance distribution of some trajectory \\(\\xi\\):\n\\begin{equation} q(\\xi | b,\\pi) = q(s_0) \\prod_{t=0}^{D} q(s_{t+1}, o_{t+1} | s_{t}, a_{t+1}) \\end{equation}\nBackground Importance Sampling Suppose you have a function \\(f(s)\\) which isn\u0026rsquo;t super well integrate-able, yet you want:\n\\begin{equation} \\mu = \\mathbb{E}(f(s)) = \\int_{0}^{1} f(s)p(s) \\dd{s} \\end{equation}\nhow would you sample various \\(f(s)\\) effectively such that you end up with \\(\\hat{\\mu}\\) that\u0026rsquo;s close enough?\nWell, what if you have an importance distribution \\(q(s): S \\to \\mathbb{R}^{[0,1]}\\), which tells you how \u0026ldquo;important\u0026rdquo; to the expected value of the distribution a particular state is? Then, we can formulate a new, better normalization function called the \u0026ldquo;importance weight\u0026rdquo;:\n\\begin{equation} w(s) = \\frac{p(s)}{q(s)} \\end{equation}\nTherefore, this would make our estimator:\n\\begin{equation} \\hat{\\mu} = \\frac{\\sum_{n} f(s_{n}) w(s_{n})}{\\sum_{n} w(s_{n})} \\end{equation}\nTheoretic grantees So, there\u0026rsquo;s a distribution over \\(f\\):\n\\begin{equation} q(s) = \\frac{b(s)}{w_{\\pi}(s)} \\end{equation}\nwhere\n\\begin{equation} w(s) = \\frac{\\mathbb{E}_{b} \\qty( \\sqrt{[\\mathbb{E}(v|s, \\pi )]^{2} + [Var(v|s, \\pi )]})}{[\\mathbb{E}(v|s, \\pi )]^{2} + [Var(v|s, \\pi )]} \\end{equation}\nwhich measures how important a state is, where \\(\\pi\\) is the total discounted reward.\n","html":"\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003eLarge crowd navigation with sudden changes: unlikely events are out of likely sample. So, we want to bring in another distribution based on \u003cstrong\u003eimportance\u003c/strong\u003e and not \u003cstrong\u003elikelyness\u003c/strong\u003e.\u003c/p\u003e\n\u003ch2 id=\"goals\"\u003eGoals\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eretains DESPOT garantees\u003c/li\u003e\n\u003cli\u003eoutperforms \u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e and \u003ca href=\"/posts/kbhpomcp/\"\u003ePOMCP\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"despot--kbhdespot-dot-md--with-importance-sampling--org7062454\"\u003e\u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e with \u003ca href=\"#importance-sampling\"\u003eImportance Sampling\u003c/a\u003e\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003etake our initial belief\u003c/li\u003e\n\u003cli\u003esample trajectories according to \u003ca href=\"#importance-sampling\"\u003eImportance Sampling\u003c/a\u003e distribution\u003c/li\u003e\n\u003cli\u003ecalculate values of those states\u003c/li\u003e\n\u003cli\u003eobtain value estimate based on weighted average of the values\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"importance-sampling--org7062454--of-trajectories\"\u003e\u003ca href=\"#importance-sampling\"\u003eImportance Sampling\u003c/a\u003e of trajectories\u003c/h3\u003e\n\u003cp\u003eWe define an \u003ca href=\"#importance-sampling\"\u003eimportance distribution\u003c/a\u003e of some trajectory \\(\\xi\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nq(\\xi | b,\\pi) = q(s_0) \\prod_{t=0}^{D} q(s_{t+1}, o_{t+1} | s_{t}, a_{t+1})\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003ch3 id=\"importance-sampling\"\u003eImportance Sampling\u003c/h3\u003e\n\u003cp\u003eSuppose you have a function \\(f(s)\\) which isn\u0026rsquo;t super well integrate-able, yet you want:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mu = \\mathbb{E}(f(s)) = \\int_{0}^{1} f(s)p(s) \\dd{s}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ehow would you sample various \\(f(s)\\) effectively such that you end up with \\(\\hat{\\mu}\\) that\u0026rsquo;s close enough?\u003c/p\u003e\n\u003cp\u003eWell, what if you have an \u003ca href=\"#importance-sampling\"\u003eimportance distribution\u003c/a\u003e \\(q(s): S \\to \\mathbb{R}^{[0,1]}\\), which tells you how \u0026ldquo;important\u0026rdquo; to the expected value of the distribution a particular state is? Then, we can formulate a new, better normalization function called the \u0026ldquo;\u003ca href=\"#importance-sampling\"\u003eimportance weight\u003c/a\u003e\u0026rdquo;:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw(s) = \\frac{p(s)}{q(s)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, this would make our estimator:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{\\mu} = \\frac{\\sum_{n} f(s_{n}) w(s_{n})}{\\sum_{n} w(s_{n})}\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"theoretic-grantees\"\u003eTheoretic grantees\u003c/h4\u003e\n\u003cp\u003eSo, there\u0026rsquo;s a distribution over \\(f\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nq(s) = \\frac{b(s)}{w_{\\pi}(s)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw(s) = \\frac{\\mathbb{E}_{b} \\qty( \\sqrt{[\\mathbb{E}(v|s, \\pi )]^{2} + [Var(v|s, \\pi )]})}{[\\mathbb{E}(v|s, \\pi )]^{2} + [Var(v|s, \\pi )]}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich measures how important a state is, where \\(\\pi\\) is the total discounted reward.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhis_despot/","tags":null,"title":"IS-DESPOT"},{"categories":null,"contents":"An isomorphism is an invertable Linear Map. Two vector spaces are called isomorphic if there is an isomorphism from one to another.\n\u0026ldquo;A linear map that maintain the correct structure of the structure.\u0026rdquo;\nThis makes the vector spaces that are isomorphic \u0026ldquo;equivalent\u0026rdquo;, because the isomorphism is the equivalence relationship. Of course, they are still not equal.\nGenerally, isomorphisms can only be built between vector spaces over the same field.\nadditional information matrices We know we can represent Linear Maps as matricies.\nSo, given some \\(A\\), we have an inverse \\(A^{-1}\\).\nSo:\n\\begin{equation} A A^{-1} = I = A^{-1} A \\end{equation}\nIn this case, the \\(I\\) is the identity map: \\(Iv = v\\).\ntwo vector spaces are isomorphic IFF they have the same dimension note: this relationship works over the SAME field \\(\\mathbb{F}\\), otherwise lin comb can\u0026rsquo;t work\nGiven vector spaces \\(I,W\\) isomorphic, we desire \\(dim V = dim W\\) Suppose \\(V\\) and \\(W\\) are finite-dimensional vector spaces that are isomorphic. There means that there is an isomorphism, an invertable Linear Map between them which we will name \\(T \\in \\mathcal{L}(V,W)\\).\nBecause \\(T\\) is invertable, and injectivity and surjectivity implies invertability, so \\(null\\ T = \\{0\\}\\) and \\(range\\ T = W\\).\nLastly, we have that:\n\\begin{align} \\dim V \u0026amp;= \\dim null\\ T + \\dim range\\ T \\\\ \u0026amp;= 0 + dim\\ W \\\\ \u0026amp;= dim\\ W \\end{align}\nas desired.\nGiven \\(dim V = dim W\\), show the vector spaces are isomorphic Take \\(v_1, \\dots v_{n}\\) a basis of \\(V\\), and \\(w_1 \\dots w_{n}\\) a basis of \\(W\\).\nDefine a map by basis of domain mapping \\(Tv_{j} = w_{j}\\), that is, \\(T(c_1v_1 + \\dots + c_{n}v_{n}) = c_1 w_1 + \\dots + c_{n} w_{n}\\).\nBecause \\(w_1 \\dots w_{n}\\) spans \\(W\\) (it is a basis after all), \\(T\\) is surjective.\nAn input with some set of \\(c_{j}\\) is in the null space of \\(T\\) if \\(c_1 w_1 + \\dots + c_{n}w_{n}\\) adds up to \\(0\\) (by definition, as that\u0026rsquo;s the output of \\(T\\)).\nBecause \\(w_1 \\dots w_{n}\\) is a basis, the only linear combination thereof which makes \\(0\\) is by taking all \\(c_1 = \\dots c_{n} = 0\\). This make it so that the only valid input to \\(T\\) that will map to \\(0\\) requires \\(c_1=\\dots c_{n} = 0\\), making \\(null\\ T = \\{0\\}\\), showing that \\(T\\) is injective.\nHaving shown \\(T\\) is injective and surjective, it is an isomorphism, as desired. \\(\\blacksquare\\)\nmatricies and Linear Maps from the right dimensions are isomorphic Formally: suppose \\(v_1 \\dots v_{n}\\) is a basis of \\(V\\), and \\(w_1 \\dots w_{m}\\) is a basis of \\(W\\), then, \\(\\mathcal{M}\\) the matrixify operation that takes Linear Maps and turn them into matricies is an isomorphism between \\(\\mathcal{L}(V,W)\\) and \\(\\mathbb{F}^{m,n}\\).\nThe matrixify operation \\(\\mathcal{M}\\) is linear, because matricies are linear. The only thing that \\(\\mathcal{M}\\) will turn into the zero matrix is the zero Linear Map (i.e. \\(\\mathcal{M}(t)=0 \\implies T v_{k} = 0\\ \\forall k 1 \\dots n\\) by construction of matricies, and because the \\(v_{k}\\) are a basis, \\(T v_{k} =0 \\implies T=0\\)), so the null space of \\(\\mathcal{M}\\) is \\(\\{0\\}\\), making \\(\\mathcal{M}\\) injective.\nNow, because of the fact one can construct a matrix based on the scalars applied to map the input basis to the output basis; i.e. that, for any map \\(T \\in \\mathcal{L}(V,W)\\):\n\\begin{equation} Tv_{k} = \\sum_{j=i}^{m}A_{j,k} w_{j} \\end{equation}\nfor some matrix \\(\\mathcal{M}(T) = A \\in \\mathbb{F}^{m,n}\\), we have that \\(\\mathcal{M}\\) can be used to produce any map between \\(V\\) and \\(W\\). This makes \\(\\mathcal{M}\\) surjective.\n\\(\\dim \\mathcal{L}(V,W) = (\\dim V)(\\dim W)\\) \\(\\mathcal{L}(V,W)\\) is isomorphic to the set of matricies \\(\\mathbb{F}^{m,n}\\) where \\(w_1 \\dots w_{m}\\) is a basis for \\(W\\) and \\(v_1 \\dots v_{n}\\) is a basis for \\(V\\). two vector spaces are isomorphic IFF they have the same dimension, so \\(\\dim \\mathcal{L}(V,W) = \\dim \\mathbb{F}^{m,n} = m\\cdot n\\) (see \\(\\mathbb{F}^{m,n}\\)).\nHaving claimed that \\(w_1 \\dots w_{m}\\) is a basis of \\(W\\) and \\(v_1 \\dots v_{n}\\) is a basis of \\(V\\), \\(W\\) and \\(V\\) have dimensions \\(m\\) and \\(n\\) respectively. So \\((\\dim V)(\\dim W) = n \\cdot m = m\\cdot n = \\dim \\mathbb{F}^{m,n} = \\dim \\mathcal{L}(V,W)\\), as desired.\n","html":"\u003cp\u003eAn \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e is an \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e. Two \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es are called \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e if there is an \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e from one to another.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;A linear map that maintain the correct structure of the structure.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eThis makes the \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es that are \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e \u0026ldquo;equivalent\u0026rdquo;, because the \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e is the \u003ca href=\"/posts/kbhequivalence/\"\u003eequivalence\u003c/a\u003e relationship. Of course, they are still not equal.\u003c/p\u003e\n\u003cp\u003eGenerally, \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003es can only be built between \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es over the same \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"matrices\"\u003ematrices\u003c/h3\u003e\n\u003cp\u003eWe know we can represent \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es as \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSo, given some \\(A\\), we have an inverse \\(A^{-1}\\).\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA A^{-1} = I = A^{-1} A\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn this case, the \\(I\\) is the \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e map: \\(Iv = v\\).\u003c/p\u003e\n\u003ch3 id=\"two-vector-spaces-are-isomorphic-iff-they-have-the-same-dimension\"\u003etwo vector spaces are isomorphic IFF they have the same dimension\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003enote: this relationship works over the SAME field \\(\\mathbb{F}\\), otherwise lin comb can\u0026rsquo;t work\u003c/strong\u003e\u003c/p\u003e\n\u003ch4 id=\"given-vector-spaces-i-w-isomorphic--kbhisomorphism-dot-md--we-desire-dim-v-dim-w\"\u003eGiven vector spaces \\(I,W\\) \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e, we desire \\(dim V = dim W\\)\u003c/h4\u003e\n\u003cp\u003eSuppose \\(V\\) and \\(W\\) are finite-dimensional \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es that are \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e. There means that there is an \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e, an \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e between them which we will name \\(T \\in \\mathcal{L}(V,W)\\).\u003c/p\u003e\n\u003cp\u003eBecause \\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e, and \u003ca href=\"/posts/kbhinvertability/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-and-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-implies-id-ff05739c-6e70-46ba-9d56-0958ef847f57-invertability\"\u003einjectivity and surjectivity implies invertability\u003c/a\u003e, so \\(null\\ T = \\{0\\}\\) and \\(range\\ T = W\\).\u003c/p\u003e\n\u003cp\u003eLastly, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dim V \u0026amp;= \\dim null\\ T + \\dim range\\ T \\\\\n\u0026amp;= 0 + dim\\ W \\\\\n\u0026amp;= dim\\ W\n\\end{align}\u003c/p\u003e\n\u003cp\u003eas desired.\u003c/p\u003e\n\u003ch4 id=\"given-dim-v-dim-w-show-the-vector-spaces-are-isomorphic--kbhisomorphism-dot-md\"\u003eGiven \\(dim V = dim W\\), show the vector spaces are \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eTake \\(v_1, \\dots v_{n}\\) a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\), and \\(w_1 \\dots w_{n}\\) a basis of \\(W\\).\u003c/p\u003e\n\u003cp\u003eDefine a map by \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e mapping \\(Tv_{j} = w_{j}\\), that is, \\(T(c_1v_1 + \\dots + c_{n}v_{n}) = c_1 w_1 + \\dots + c_{n} w_{n}\\).\u003c/p\u003e\n\u003cp\u003eBecause \\(w_1 \\dots w_{n}\\) \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(W\\) (it is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e after all), \\(T\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eAn input with some set of \\(c_{j}\\) is in the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(T\\) if \\(c_1 w_1 + \\dots + c_{n}w_{n}\\) adds up to \\(0\\) (by definition, as that\u0026rsquo;s the output of \\(T\\)).\u003c/p\u003e\n\u003cp\u003eBecause \\(w_1 \\dots w_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, the only \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e thereof which makes \\(0\\) is by taking all \\(c_1 = \\dots c_{n} = 0\\). This make it so that the only valid \u003cem\u003einput\u003c/em\u003e to \\(T\\) that will map to \\(0\\) requires \\(c_1=\\dots c_{n} = 0\\), making \\(null\\ T = \\{0\\}\\), showing that \\(T\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eHaving shown \\(T\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e and \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e, it is an \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e, as desired. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"matricies--kbhmatricies-dot-md--and-linear-map--kbhlinear-map-dot-md--s-from-the-right-dimension--kbhdimension-dot-md--s-are-isomorphic--kbhisomorphism-dot-md\"\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e and \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es from the right \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003es are \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eFormally: suppose \\(v_1 \\dots v_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\), and \\(w_1 \\dots w_{m}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(W\\), then, \\(\\mathcal{M}\\) the \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003eify operation that takes \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es and turn them into \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e is an \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e between \\(\\mathcal{L}(V,W)\\) and \\(\\mathbb{F}^{m,n}\\).\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003eify operation \\(\\mathcal{M}\\) is linear, because \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e are linear. The only thing that \\(\\mathcal{M}\\) will turn into the \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e is the \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e (i.e. \\(\\mathcal{M}(t)=0 \\implies T v_{k} = 0\\ \\forall k 1 \\dots n\\) by construction of \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e, and because the \\(v_{k}\\) are a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, \\(T v_{k} =0 \\implies T=0\\)), so the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(\\mathcal{M}\\) is \\(\\{0\\}\\), making \\(\\mathcal{M}\\) \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eNow, because of the fact one can construct a \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e based on the scalars applied to map the input \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e to the output \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e; i.e. that, for any map \\(T \\in \\mathcal{L}(V,W)\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv_{k} = \\sum_{j=i}^{m}A_{j,k} w_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some matrix \\(\\mathcal{M}(T) = A \\in \\mathbb{F}^{m,n}\\), we have that \\(\\mathcal{M}\\) can be used to produce any map between \\(V\\) and \\(W\\). This makes \\(\\mathcal{M}\\) \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"dim-mathcal-l--v-w----dim-v----dim-w\"\u003e\\(\\dim \\mathcal{L}(V,W) = (\\dim V)(\\dim W)\\)\u003c/h3\u003e\n\u003cp\u003e\\(\\mathcal{L}(V,W)\\) is \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e to the set of \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e \\(\\mathbb{F}^{m,n}\\) where \\(w_1 \\dots w_{m}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e for \\(W\\) and \\(v_1 \\dots v_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e for \\(V\\). \u003ca href=\"#two-vector-spaces-are-isomorphic-iff-they-have-the-same-dimension\"\u003etwo vector spaces are isomorphic IFF they have the same dimension\u003c/a\u003e, so \\(\\dim \\mathcal{L}(V,W) = \\dim \\mathbb{F}^{m,n} = m\\cdot n\\) (see \u003ca href=\"/posts/kbhmatricies/#mathbb-f-m-n\"\u003e\\(\\mathbb{F}^{m,n}\\)\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eHaving claimed that \\(w_1 \\dots w_{m}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(W\\) and \\(v_1 \\dots v_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\), \\(W\\) and \\(V\\) have \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003es \\(m\\) and \\(n\\) respectively. So \\((\\dim V)(\\dim W) = n \\cdot m = m\\cdot n = \\dim \\mathbb{F}^{m,n} = \\dim \\mathcal{L}(V,W)\\), as desired.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhisomorphism/","tags":null,"title":"isomorphism"},{"categories":null,"contents":" Date Notes \u0026lt;2022-04-13 Wed\u0026gt; PCP April Checkin \u0026lt;2022-04-13 Wed\u0026gt; Alivio April Checkin \u0026lt;2022-04-16 Sat\u0026gt; GreenSwing April Checkin \u0026lt;2022-04-25 Mon\u0026gt; Pollen April Checkin \u0026lt;2022-04-30 Sat\u0026gt; Logan\u0026rsquo;s Team Checkin \u0026lt;2022-05-02 Mon\u0026gt; Anna\u0026rsquo;s Team Checkin TODO Stack Get asthma kids leads for Alivio GreenSwing Hiring: Fufilling Orders, MechE Conrad money? Get Mentors for Pollen =\u0026gt; Figma lady ","html":"\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eNotes\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-13 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpcp_april_checkin/\"\u003ePCP April Checkin\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-13 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhalivio_april_checkin/\"\u003eAlivio April Checkin\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-16 Sat\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhgreenswing_april_checkin/\"\u003eGreenSwing April Checkin\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-25 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003ePollen April Checkin\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-30 Sat\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhlogan_s_team_check_in/\"\u003eLogan\u0026rsquo;s Team Checkin\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-02 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhanna_s_team_checkin/\"\u003eAnna\u0026rsquo;s Team Checkin\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"stack\"\u003e\u003cspan class=\"org-todo todo TODO\"\u003eTODO\u003c/span\u003e Stack\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eGet asthma kids leads for Alivio\u003c/li\u003e\n\u003cli\u003eGreenSwing Hiring: Fufilling Orders, MechE\u003c/li\u003e\n\u003cli\u003eConrad money?\u003c/li\u003e\n\u003cli\u003eGet Mentors for Pollen =\u0026gt; Figma lady\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhistudio_meeting_notes/","tags":null,"title":"iStudio Meeting Notes"},{"categories":null,"contents":"If a student has ability \\(a\\), and a probably is \\(d\\) difficulty, the probability of a student getting something right:\n\\begin{equation} \\sigma (a-d) \\end{equation}\nthis doesn\u0026rsquo;t consider SLIPPING at all.\n","html":"\u003cp\u003eIf a student has ability \\(a\\), and a probably is \\(d\\) difficulty, the probability of a student getting something right:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sigma (a-d)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis doesn\u0026rsquo;t consider \u003cstrong\u003eSLIPPING\u003c/strong\u003e at all.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhitem_response_theory/","tags":null,"title":"Item Response Theory"},{"categories":null,"contents":" https://en.wikipedia.org/wiki/It%C3%B4%27s_lemma\nfor integrating Differential Equations with Brownian Motion.\n","html":"\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-14_14-08-01_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\u003ca href=\"https://en.wikipedia.org/wiki/It%C3%B4%27s_lemma\"\u003ehttps://en.wikipedia.org/wiki/It%C3%B4%27s_lemma\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003efor integrating \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equations\u003c/a\u003e with \u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhito_intergral/","tags":null,"title":"Itô Intergral"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhjohn_corso/","tags":null,"title":"John Corso"},{"categories":null,"contents":"for random variables \\(X, Y\\), the joint probability distribution is the probability of both of them happening at once.\n\\begin{equation} p(x,y) \\end{equation}\nThe most fundamental solution can be derived with a table where all complete probabilities are listed. They are going to be too large to practically store.\nprobability of the joint of a Bayes Net \\begin{equation} p(joint) = \\prod_{i \\in BN}^{} p(x_{i} | parents(x_{i})) \\end{equation}\n","html":"\u003cp\u003efor \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es \\(X, Y\\), the \u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e is the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of both of them happening at once.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(x,y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe most fundamental solution can be derived with a table where all complete \u003ca href=\"/posts/kbhprobability/\"\u003eprobabilities\u003c/a\u003e are listed. They are going to be too large to practically store.\u003c/p\u003e\n\u003ch2 id=\"probability-of-the-joint-of-a-bayes-net--kbhbaysian-network-dot-md\"\u003eprobability of the joint of a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBayes Net\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\np(joint) = \\prod_{i \\in BN}^{} p(x_{i} | parents(x_{i}))\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhjoint_probability_distribution/","tags":null,"title":"joint probability distribution"},{"categories":null,"contents":"Punchlines Screw you, I\u0026rsquo;m not Stupid\u0026hellip;. I\u0026rsquo;m just Chinese. Joke\u0026rsquo;s on you, I\u0026rsquo;m both Chinese AND stupid. Setups Why is it that toilets have a refractory period? The satanic church fights back against the Texas abortion ban. Completed jokes Where did Texas gun control funding go? its illegal to own more than 6 d*l**s there Not only did you have to pass normal tests, you had to pass like a thousand COVID tests ","html":"\u003ch2 id=\"punchlines\"\u003ePunchlines\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eScrew you, I\u0026rsquo;m not Stupid\u0026hellip;. I\u0026rsquo;m just Chinese.\u003c/li\u003e\n\u003cli\u003eJoke\u0026rsquo;s on you, I\u0026rsquo;m both Chinese \u003cstrong\u003eAND\u003c/strong\u003e stupid.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"setups\"\u003eSetups\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWhy is it that toilets have a refractory period?\u003c/li\u003e\n\u003cli\u003eThe satanic church fights back against the Texas abortion ban.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"completed-jokes\"\u003eCompleted jokes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWhere did Texas gun control funding go? its illegal to own more than 6 d*l**s there\u003c/li\u003e\n\u003cli\u003eNot only did you have to pass normal tests, you had to pass like a thousand COVID tests\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhjokes/","tags":null,"title":"jokes"},{"categories":null,"contents":"DOI: 10.3389/fcomp.2021.642633\nOne-Liner Developed a kitchen sink of diagnoses tools and correlated it with biomarkers.\nNovelty The kitchen sink of data collection (phones, tablet, eye tracker, microphone, wristband) and the kitchen sink of noninvasive data imaging, psych, speech assesment, clinical metadata.\nNotable Methods Here\u0026rsquo;s their kitchen sink\nI have no idea why a thermal camera is needed\nKey Figs Here are the features they extracted\nDeveloped the features collected via a method similar to action research, did two passes and refined/added information after preliminary analysis. Figure above also include info about whether or not the measurement was task specific.\nand there are the biomarkers and medical data they collected\nAnd then they correlated their kitchen sink with biomarker from the tap\nNew Concepts spinal tap Notes ","html":"\u003cp\u003eDOI: 10.3389/fcomp.2021.642633\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eDeveloped a kitchen sink of diagnoses tools and correlated it with biomarkers.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003eThe kitchen sink of data collection (phones, tablet, eye tracker, microphone, wristband) and the kitchen sink of noninvasive data imaging, psych, speech assesment, clinical metadata.\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cp\u003eHere\u0026rsquo;s their kitchen sink\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-06-49_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eI have \u003cstrong\u003e\u003cstrong\u003eno idea\u003c/strong\u003e\u003c/strong\u003e why a thermal camera is needed\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003cp\u003eHere are the features they extracted\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-07-27_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eDeveloped the features collected via a method similar to action research, did two passes and refined/added information after preliminary analysis. Figure above also include info about whether or not the measurement was task specific.\u003c/p\u003e\n\u003cp\u003eand there are the biomarkers and medical data they collected\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-09-47_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eAnd then they correlated their kitchen sink with biomarker from the tap\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-14-56_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhspinal_tap/\"\u003espinal tap\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhjonell_2021/","tags":["ntj"],"title":"Jonell 2021"},{"categories":null,"contents":"POMDPs can become computationally quite intractable. Alternative: a stochastic, memoryless policy. A policy should be stochastic in order to satisfy certain conditions during adversarial games (think bluffing).\nJSJ is basically Q-Learning adapted for POMDPs:\n\\begin{equation} \\begin{cases} Q^{\\pi}(s,a) = \\sum_{t=1}^{\\infty}\\mathbb{E}_{\\pi} [r_{t}- R^{\\pi}|s, a] \\\\ Q^{\\pi}(o,a) = \\mathbb{E}_{s}[Q^{\\pi}(s,a) | M(s) = o] \\end{cases} \\end{equation}\nwhere \\(M\\) is a mapping between states and possible observations.\nPolicy Improvement Now, we want to maximise:\n\\begin{equation} \\Delta_{o}(a) = Q(o,a) - V(o) \\end{equation}\n\u0026ldquo;if an action \\(a\\) results in a better value than the expected value, we want to upweight that action.\u0026rdquo;\nWe further normalise this:\n\\begin{equation} \\delta_{o}(a) = \\Delta_{o}(a) - \\frac{1}{|A|} \\sum_{a\u0026rsquo; \\in A} \\Delta_{o}(a\u0026rsquo;) \\end{equation}\n\u0026ldquo;how does the diff of my action is considering improve over all other actions (i.e. \u0026ldquo;maybe all actions have similar diffs\u0026rdquo;).\nNow, substitution time:\n\\begin{equation} \\delta_{o}(a) = \\qty(Q(o,a) - V(o)) - \\frac{1}{|A|} \\sum_{(a\u0026rsquo; \\in A)} Q(o,a\u0026rsquo;) - V(o) \\end{equation}\nWhich, after simplification (the two \\(V\\) cancels out), we actually get:\n\\begin{equation} \\delta_{o}(a) = Q(o,a) - \\frac{1}{|A|} \\sum_{(a\u0026rsquo; \\in A)}^{} Q(o,a\u0026rsquo;) \\end{equation}\nwhich makes sense; \u0026ldquo;how does our current action does better than all others\u0026rdquo;. To obtain \\(Q(o,a)\\), see the big function above.\nFinally, having defined our update step, we can now let the good times roll\u0026mdash;-gradient ascent! For some action \\(a\\) at observation \\(o\\) and learning rate we update our policy:\n\\begin{equation} Q^{\\pi}(a|o) = Q^{\\pi}(a|o) + \\varepsilon \\delta_{o}(a) \\end{equation}\nWe can then use it to take some more actions, compute more deltas, repeat.\nPolicy Evaluation ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es can become computationally quite intractable. Alternative: a stochastic, memoryless policy. A policy should be stochastic in order to satisfy certain conditions during adversarial games (think bluffing).\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhjsj/\"\u003eJSJ\u003c/a\u003e is basically \u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/#q-learning\"\u003eQ-Learning\u003c/a\u003e adapted for \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nQ^{\\pi}(s,a) = \\sum_{t=1}^{\\infty}\\mathbb{E}_{\\pi} [r_{t}- R^{\\pi}|s, a] \\\\\nQ^{\\pi}(o,a) = \\mathbb{E}_{s}[Q^{\\pi}(s,a) | M(s) = o]\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(M\\) is a mapping between states and possible observations.\u003c/p\u003e\n\u003ch2 id=\"policy-improvement\"\u003ePolicy Improvement\u003c/h2\u003e\n\u003cp\u003eNow, we want to maximise:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Delta_{o}(a) = Q(o,a) - V(o)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;if an action \\(a\\) results in a better value than the expected value, we want to upweight that action.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe further normalise this:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\delta_{o}(a) = \\Delta_{o}(a) - \\frac{1}{|A|} \\sum_{a\u0026rsquo; \\in A} \\Delta_{o}(a\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;how does the diff of my action is considering improve over all other actions (i.e. \u0026ldquo;maybe all actions have similar diffs\u0026rdquo;).\u003c/p\u003e\n\u003cp\u003eNow, substitution time:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\delta_{o}(a) = \\qty(Q(o,a) - V(o)) - \\frac{1}{|A|} \\sum_{(a\u0026rsquo; \\in A)} Q(o,a\u0026rsquo;) - V(o)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich, after simplification (the two \\(V\\) cancels out), we actually get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\delta_{o}(a) = Q(o,a) - \\frac{1}{|A|} \\sum_{(a\u0026rsquo; \\in A)}^{} Q(o,a\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich makes sense; \u0026ldquo;how does our current action does better than all others\u0026rdquo;. To obtain \\(Q(o,a)\\), see the big function above.\u003c/p\u003e\n\u003cp\u003eFinally, having defined our update step, we can now let the good times roll\u0026mdash;-gradient ascent! For some action \\(a\\) at observation \\(o\\) and learning rate we update our policy:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ^{\\pi}(a|o) = Q^{\\pi}(a|o) + \\varepsilon \\delta_{o}(a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can then use it to take some more actions, compute more deltas, repeat.\u003c/p\u003e\n\u003ch2 id=\"policy-evaluation\"\u003ePolicy Evaluation\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-08_09-54-59_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhjsj/","tags":null,"title":"JSJ"},{"categories":null,"contents":"Ka\u0026rsquo;chava is described as a \u0026ldquo;superfood\u0026rdquo; which is used as a meal replacement to manage hunger.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhka_chava/\"\u003eKa\u0026rsquo;chava\u003c/a\u003e is described as a \u0026ldquo;superfood\u0026rdquo; which is used as a \u003ca href=\"/posts/kbhmeal_replacement/\"\u003emeal replacement\u003c/a\u003e to manage hunger.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhka_chava/","tags":null,"title":"Ka'Chava"},{"categories":null,"contents":" Orbits of planetary bodies are ellipses with the sun at one of the two foci Drawing a line from the sun to the orbiting body, they would sweep out equal areas Planets that are closer to the sun have much shorter periods than that Squares of the periods of the planets is equal to the cubes of the distance from the planet to the sun ","html":"\u003col\u003e\n\u003cli\u003eOrbits of planetary bodies are ellipses with the sun at one of the two foci\u003c/li\u003e\n\u003cli\u003eDrawing a line from the sun to the orbiting body, they would sweep out equal areas\u003c/li\u003e\n\u003cli\u003ePlanets that are closer to the sun have much shorter periods than that\n\u003cul\u003e\n\u003cli\u003eSquares of the periods of the planets is equal to the cubes of the distance from the planet to the sun\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhkepler_s_laws_of_planetary_motion/","tags":null,"title":"Kepler's Laws of Planetary Motion"},{"categories":null,"contents":"kernel smoothing is a way of smoothing a utility function over continuous state space despite only sampling a discrete set of the states.\n\\begin{equation} U_{\\theta}(s) = \\theta^{T} \\beta(s) \\end{equation}\nWe multiply a vector \\(\\theta_{j}\\), the utility of being in each state \\(s_{j}\\) a basis function, which smears, generated for each \\(i\\) of known discrete state we have:\n\\begin{equation} \\beta_{i}(s) = \\frac{k(s, s_{i})}{\\sum_{j}^{} k(s, s_{j})} \\end{equation}\nwhere, \\(k\\) is the kernel function, a function inversely proportional to how close the two states are:\nk(s,sj) is a normalization factor and doesn\u0026rsquo;t need to be computed at every call.\n\\begin{equation} k(s, s\u0026rsquo;) = \\max \\qty(d(s,s\u0026rsquo;), \\epsilon)^{-1} \\end{equation}\nwhere \\(d\\) is a measure of distance. We clip this function at \\(\\epsilon\\) to prevent inverting \\(0\\).\ngaussian kernel There is an alternate state smoothing function which is called gaussian kernel, which allows you to control the degree of smoothing between two states through a parameter \\(\\sigma\\):\n\\begin{equation} k(s,s\u0026rsquo;) = \\exp \\qty( - \\frac{d(s,s\u0026rsquo;)^{2}}{2 \\sigma^{2}}) \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhkernel_smoothing/\"\u003ekernel smoothing\u003c/a\u003e is a way of smoothing a \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e function over \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e state space despite only sampling a discrete set of the states.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_{\\theta}(s) = \\theta^{T} \\beta(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe multiply a vector \\(\\theta_{j}\\), the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of being in each state \\(s_{j}\\) a basis function, which smears, generated for each \\(i\\) of known discrete state we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\beta_{i}(s) = \\frac{k(s, s_{i})}{\\sum_{j}^{} k(s, s_{j})}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(k\\) is the \u003ca href=\"/posts/kbhkernel_smoothing/\"\u003ekernel function\u003c/a\u003e, a function inversely proportional to how close the two states are:\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003ek(s,sj)\u003c/strong\u003e\u003c/strong\u003e is a normalization factor and doesn\u0026rsquo;t need to be computed at every call.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nk(s, s\u0026rsquo;) = \\max \\qty(d(s,s\u0026rsquo;), \\epsilon)^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(d\\) is a measure of distance. We clip this function at \\(\\epsilon\\) to prevent inverting \\(0\\).\u003c/p\u003e\n\u003ch2 id=\"gaussian--kbhgaussian-distribution-dot-md--kernel\"\u003e\u003ca href=\"/posts/kbhgaussian_distribution/\"\u003egaussian\u003c/a\u003e kernel\u003c/h2\u003e\n\u003cp\u003eThere is an alternate state smoothing function which is called \u003ca href=\"#gaussian--kbhgaussian-distribution-dot-md--kernel\"\u003egaussian kernel\u003c/a\u003e, which allows you to control the degree of smoothing between two states through a parameter \\(\\sigma\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nk(s,s\u0026rsquo;) = \\exp \\qty( - \\frac{d(s,s\u0026rsquo;)^{2}}{2 \\sigma^{2}})\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhkernel_smoothing/","tags":null,"title":"kernel smoothing"},{"categories":null,"contents":"Keynsian Politics is a economy strategy to support large projects via the government to boost economic output (i.e. that the economy needs a minder, but is generally free-sustaining.)\nSee also: Keynsian Economics was not trying to entirely replace markets\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynsian Politics\u003c/a\u003e is a economy strategy to support large projects via the government to boost economic output (i.e. that the economy needs a minder, but is generally free-sustaining.)\u003c/p\u003e\n\u003cp\u003eSee also: \u003ca href=\"/posts/kbhhow_did_economists_get_it_so_wrong/#id-70e8aae4-7410-46d7-93ab-504ef8effc79-keynsian-economics-was-not-trying-to-entirely-replace-markets\"\u003eKeynsian Economics was not trying to entirely replace markets\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhkeynsian_politics/","tags":null,"title":"Keynsian Politics"},{"categories":null,"contents":"in algorithms, key\n","html":"\u003cp\u003ein algorithms, key\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhkeys/","tags":null,"title":"keys (algorithms)"},{"categories":null,"contents":"kirchoff\u0026rsquo;s laws is a set of laws to deal with complicated circuits, and it says\u0026hellip;\nany junction, the current entering equals the current leaving the sum of voltage across a closed loop is \\(0\\) ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhkirchoff_s_laws/\"\u003ekirchoff\u0026rsquo;s laws\u003c/a\u003e is a set of laws to deal with complicated circuits, and it says\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eany junction, the \u003ca href=\"/posts/kbhcurrent/\"\u003ecurrent\u003c/a\u003e entering equals the \u003ca href=\"/posts/kbhcurrent/\"\u003ecurrent\u003c/a\u003e leaving\u003c/li\u003e\n\u003cli\u003ethe sum of \u003ca href=\"/posts/kbhvoltage/\"\u003evoltage\u003c/a\u003e across a closed loop is \\(0\\)\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhkirchoff_s_laws/","tags":null,"title":"kirchoff's laws"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhkl_divergence/","tags":null,"title":"KL Divergence"},{"categories":null,"contents":"KLA is a semiconductor process control company. https://www.kla.com/ Rick Wallace is the CEO.\n135000 employees 8.2B of revenue 72-300 tools 15% of revenue in R\u0026amp;D Their main business is in automatically inspecting chips and wafers in time.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhkla/\"\u003eKLA\u003c/a\u003e is a \u003ca href=\"/posts/kbhsemiconductor/\"\u003esemiconductor\u003c/a\u003e process control company. \u003ca href=\"https://www.kla.com/\"\u003ehttps://www.kla.com/\u003c/a\u003e \u003ca href=\"/posts/kbhrick_wallace/\"\u003eRick Wallace\u003c/a\u003e is the CEO.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e135000 employees\u003c/li\u003e\n\u003cli\u003e8.2B of revenue\u003c/li\u003e\n\u003cli\u003e72-300 tools\u003c/li\u003e\n\u003cli\u003e15% of revenue in R\u0026amp;D\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTheir main business is in automatically inspecting chips and wafers in time.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhkla/","tags":null,"title":"KLA"},{"categories":null,"contents":"controllable We want \\(P(Y|X) = p\\), for a specific \\(p\\) that we specify.\nfine-grained control ideally, instead of optimizing over entire expected values, we want to tune specific utputs\nSuccess in Editing Say we edited some \\(M\\), specifying a viper is a vertebrate.\nIdeally, this should also edit the other related information:\n\\(P\\) (paraphrases)j: viper and vertebrates \\(E\\) (logical entailments): a viper has a brain And we shouldn\u0026rsquo;t touch:\n\\(R\\) (other stuff): Chile is a country \\(LN\\) (local neural data): a viper is venomous Hypernetwork Weight Editing\u0026rsquo;s Drawbacks harder to fix errors than creating them harder to retain preformance on local data than random data hander to generalize to entailed data than paraphrases Updates improves consistency Information Deletion \u0026ldquo;deleting information\u0026rdquo; from LLMs is undefined RLHF, SFT, etc. HIDES rather than ddeleting this can be framed as model editing High Level Approach notice threat information attempt to \u0026ldquo;delete it\u0026rdquo; evaluate the deletion try to extract the threat information again loop We formalize this by saying, for some adversarial \\(A\\) to question \\(Q\\), we hope that the candidate output set \\(C\\) of size \\(B\\) all don\u0026rsquo;t contain \\(A\\).\nFormal guarantees don\u0026rsquo;t work very well in LLMWorld.\nIdeally, we balance attack success and the damage to other aspects from the model.\nSupervision Gap Recovered Measuring the ratio between the rate of success of \u0026ldquo;easy\u0026rdquo; supervision data over \u0026ldquo;hard\u0026rdquo; supervisiation data.\n","html":"\u003ch2 id=\"controllable\"\u003econtrollable\u003c/h2\u003e\n\u003cp\u003eWe want \\(P(Y|X) = p\\), for a specific \\(p\\) that we specify.\u003c/p\u003e\n\u003ch3 id=\"fine-grained-control\"\u003efine-grained control\u003c/h3\u003e\n\u003cp\u003eideally, instead of optimizing over entire expected values, we want to tune specific utputs\u003c/p\u003e\n\u003ch2 id=\"success-in-editing\"\u003eSuccess in Editing\u003c/h2\u003e\n\u003cp\u003eSay we edited some \\(M\\), specifying a viper is a vertebrate.\u003c/p\u003e\n\u003cp\u003eIdeally, this should also edit the other related information:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(P\\) (paraphrases)j: viper and vertebrates\u003c/li\u003e\n\u003cli\u003e\\(E\\) (logical entailments): a viper has a brain\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAnd we shouldn\u0026rsquo;t touch:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(R\\) (other stuff): Chile is a country\u003c/li\u003e\n\u003cli\u003e\\(LN\\) (local neural data): a viper is venomous\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"hypernetwork-weight-editing-s-drawbacks\"\u003eHypernetwork Weight Editing\u0026rsquo;s Drawbacks\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eharder to \u003cstrong\u003efix errors\u003c/strong\u003e than \u003cstrong\u003ecreating them\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eharder to retain preformance on \u003cstrong\u003elocal data\u003c/strong\u003e than \u003cstrong\u003erandom data\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003ehander to generalize to \u003cstrong\u003eentailed data\u003c/strong\u003e than \u003cstrong\u003eparaphrases\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eUpdates \u003cstrong\u003eimproves consistency\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"information-deletion\"\u003eInformation Deletion\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;deleting information\u0026rdquo; from LLMs is undefined\u003c/li\u003e\n\u003cli\u003eRLHF, SFT, etc. \u003cstrong\u003eHIDES\u003c/strong\u003e rather than ddeleting\u003c/li\u003e\n\u003cli\u003ethis can be framed as model editing\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"high-level-approach\"\u003eHigh Level Approach\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003enotice threat information\u003c/li\u003e\n\u003cli\u003eattempt to \u0026ldquo;delete it\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eevaluate the deletion\u003c/li\u003e\n\u003cli\u003etry to extract the threat information again\u003c/li\u003e\n\u003cli\u003eloop\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe formalize this by saying, for some adversarial \\(A\\) to question \\(Q\\), we hope that the candidate output set \\(C\\) of size \\(B\\) all don\u0026rsquo;t contain \\(A\\).\u003c/p\u003e\n\u003cp\u003eFormal guarantees don\u0026rsquo;t work very well in LLMWorld.\u003c/p\u003e\n\u003cp\u003eIdeally, we balance attack success and the damage to other aspects from the model.\u003c/p\u003e\n\u003ch3 id=\"supervision-gap-recovered\"\u003eSupervision Gap Recovered\u003c/h3\u003e\n\u003cp\u003eMeasuring the ratio between the rate of success of \u0026ldquo;easy\u0026rdquo; supervision data over \u0026ldquo;hard\u0026rdquo; supervisiation data.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhknowledge_editing/","tags":null,"title":"Knowledge Editing"},{"categories":null,"contents":"this thing needs an eigenvalue.\n","html":"\u003cp\u003ethis thing needs an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhknowledgebase_testing/","tags":null,"title":"knowledgebase testing page"},{"categories":null,"contents":"A KS test is a hypothesis test that measures if two groups of samples are drawn from the same distribution.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhkolmogorov_smirnov_test/\"\u003eKS test\u003c/a\u003e is a \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis test\u003c/a\u003e that measures if two groups of samples are drawn from the same distribution.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhkolmogorov_smirnov_test/","tags":null,"title":"Kolmogorov-Smirnov test"},{"categories":null,"contents":"Where:\n\\begin{equation} ||X-Y||_{\\infty} = \\max \\{| x_{i} - y_{i} |, x \\in X, y \\in Y} \\} \\end{equation}\n","html":"\u003cp\u003eWhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n||X-Y||_{\\infty} = \\max \\{| x_{i} - y_{i} |, x \\in X, y \\in Y} \\}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhl_infty/","tags":null,"title":"L-infinity norm"},{"categories":null,"contents":"Want mechanics? No. You get energy.\nFirst, recall the stationary-action principle. To define a system in Lagrangian Mechanics, we define a smooth function \\(L\\), called the \u0026ldquo;Lagrangian\u0026rdquo;, and some configuration space (axis) \\(M\\).\nBy convention, \\(L=T-V\\). \\(T\\) is the kinetic energy in the system, and \\(V\\) is the potential energy in the system.\nBy the stationary-action principle, then, we require \\(L\\) to remain at a critical point (max, min, saddle.) This fact allows us to calculate the equations of motion by hold \\(L\\) at such a point, and evolving the \\((T,V)\\) pair to remain at that point.\nThe notion of solving for optimal \\((T,V)\\), which will give us the equations of motion, is why Lagrangian multipliers were invented.\nNow, here\u0026rsquo;s a few results which help you deal with the Lagrangian.\nConservation of Momentum Note that momentum is always conserved.\nRecall that:\n\\begin{equation} F = m a = m \\dv{v}{t} = \\dv{mv}{t} \\end{equation}\nwhen \\(m\\) is constant, which it almost certainly is.\nRecall the definition of momentum:\n\\begin{equation} p := mv \\end{equation}\nTherefore, we have that:\n\\begin{equation} F = \\dv{p}{t} \\end{equation}\nGreat, now let\u0026rsquo;s recall what energy is:\n\\begin{equation} W = \\int F\\dd{x} \\end{equation}\nSubstituting our definitions of force:\n\\begin{equation} W = \\int \\dv{p}{t}\\dd{x} = \\int \\dd{p}\\dv{x}{t} = \\int v \\dd{p} \\end{equation}\n[something something something ask leonard]\nWe end up with:\n\\begin{equation} \\pdv{W}{v} = p \\end{equation}\nhow? IDK. But you then usually would use this by taking the derivative of the Lagrangian by velocity, then figuring it lingual to \\(0\\).\nBeam Theory We begin with this Euler-Lagrange expression:\nthese are a series of expressions derived to semiautomatically solve Largrangian expressions of expressions derived to semiautomatically solve Largrangian expressions: they are the pre-figured-out stationary-action principle \u0026ldquo;stationary points\u0026rdquo; with the least energy.\nWe want to create a Lagrangian of our system, and plug it in there.\nWe define the Lagrangian for this system to be\nRecall that the Lagrangian is defined by all kinetic energy sum minus all potential energy sum. Will investigate deeper later, but the first term is obviously the kinetic energy (1/2 mass-density velocity squared), then the subtracted potential energy term is the spring potential of the system (1/2 kx^2).\nThen there\u0026rsquo;s this third term. No idea.\nWe then try to plug stuff into that Euler-Lagrange expression. We can calculate for ourselves that:\nFinally, then:\nliterally\u0026hellip; the end. We just move stuff around and that\u0026rsquo;s literally it.\n","html":"\u003cp\u003eWant mechanics? No. You get energy.\u003c/p\u003e\n\u003cp\u003eFirst, recall the \u003ca href=\"/posts/kbhstationary_action_principle/\"\u003estationary-action principle\u003c/a\u003e. To define a system in \u003ca href=\"/posts/kbhlagrangian_mechanics/\"\u003eLagrangian Mechanics\u003c/a\u003e, we define a \u003ca href=\"/posts/kbhsmooth_function/\"\u003esmooth function\u003c/a\u003e \\(L\\), called the \u0026ldquo;Lagrangian\u0026rdquo;, and some configuration space (axis) \\(M\\).\u003c/p\u003e\n\u003cp\u003eBy convention, \\(L=T-V\\). \\(T\\) is the kinetic energy in the system, and \\(V\\) is the potential energy in the system.\u003c/p\u003e\n\u003cp\u003eBy the \u003ca href=\"/posts/kbhstationary_action_principle/\"\u003estationary-action principle\u003c/a\u003e, then, we require \\(L\\) to remain at a critical point (max, min, saddle.) This fact allows us to calculate the equations of motion by hold \\(L\\) at such a point, and evolving the \\((T,V)\\) pair to remain at that point.\u003c/p\u003e\n\u003cp\u003eThe notion of solving for optimal \\((T,V)\\), which will give us the equations of motion, is why Lagrangian multipliers were invented.\u003c/p\u003e\n\u003cp\u003eNow, here\u0026rsquo;s a few results which help you deal with the \u003ca href=\"/posts/kbhlagrangian_mechanics/\"\u003eLagrangian\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"conservation-of-momentum\"\u003eConservation of Momentum\u003c/h2\u003e\n\u003cp\u003eNote that momentum is always conserved.\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nF = m a = m \\dv{v}{t} = \\dv{mv}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhen \\(m\\) is constant, which it almost certainly is.\u003c/p\u003e\n\u003cp\u003eRecall the definition of momentum:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np := mv\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nF = \\dv{p}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGreat, now let\u0026rsquo;s recall what energy is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nW = \\int F\\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSubstituting our definitions of force:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nW = \\int \\dv{p}{t}\\dd{x} = \\int \\dd{p}\\dv{x}{t} = \\int v \\dd{p}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e[something something something ask leonard]\u003c/p\u003e\n\u003cp\u003eWe end up with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{W}{v} = p\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ehow? IDK. But you then usually would use this by taking the derivative of the \u003ca href=\"/posts/kbhlagrangian_mechanics/\"\u003eLagrangian\u003c/a\u003e by velocity, then figuring it lingual to \\(0\\).\u003c/p\u003e\n\u003ch2 id=\"beam-theory\"\u003eBeam Theory\u003c/h2\u003e\n\u003cp\u003eWe begin with this Euler-Lagrange expression:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-25_00-28-03_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ethese are a series of expressions derived to semiautomatically solve Largrangian expressions of expressions derived to semiautomatically solve Largrangian expressions: they are the pre-figured-out \u003ca href=\"/posts/kbhstationary_action_principle/\"\u003estationary-action principle\u003c/a\u003e \u0026ldquo;stationary points\u0026rdquo; with the least energy.\u003c/p\u003e\n\u003cp\u003eWe want to create a \u003ca href=\"/posts/kbhlagrangian_mechanics/\"\u003eLagrangian\u003c/a\u003e of our system, and plug it in there.\u003c/p\u003e\n\u003cp\u003eWe define the \u003ca href=\"/posts/kbhlagrangian_mechanics/\"\u003eLagrangian\u003c/a\u003e for this system to be\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-25_00-31-04_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eRecall that the \u003ca href=\"/posts/kbhlagrangian_mechanics/\"\u003eLagrangian\u003c/a\u003e is defined by all kinetic energy sum minus all potential energy sum. Will investigate deeper later, but the first term is obviously the kinetic energy (1/2 mass-density velocity squared), then the subtracted potential energy term is the spring potential of the system (1/2 kx^2).\u003c/p\u003e\n\u003cp\u003eThen there\u0026rsquo;s this third term. No idea.\u003c/p\u003e\n\u003cp\u003eWe then try to plug stuff into that Euler-Lagrange expression. We can calculate for ourselves that:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-25_00-33-06_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eFinally, then:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-25_00-33-15_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eliterally\u0026hellip; the end. We just move stuff around and that\u0026rsquo;s literally it.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-25_00-33-34_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhlagrangian_mechanics/","tags":null,"title":"Lagrangian Mechanics"},{"categories":null,"contents":"DOI: 10.3389/fcomp.2021.624694\nOne-Liner Proposed a large multimodal approach to embed auditory info + biomarkers for baseline classification.\nNovelty Developed a massively multimodal audio-to-embedding correlation system that maps audio to biomarker information collected (mood, memory, respiratory) and demonstrated its ability to discriminate cough results for COVID. (they were looking for AD; whoopsies)\nNotable Methods Developed a feature extraction model for AD detection named Open Voice Brain Model Collected a dataset on people coughing and correlated it with biomarkers Key Figs Figure 2 This is MULTI-MODAL as heck\nThis figure tells us the large network the came up with.\nTable 2 and 3 The descriminator tacked on the end of the network is transfer-trained to different tasks. It shows promising results for cough-to-COVID classification\nNew Concepts OVBM Lyu 2018 Notes Biomarker correlation Is biomarker data something that is commonly used as a feature extraction/benchmark tool?\n","html":"\u003cp\u003eDOI: 10.3389/fcomp.2021.624694\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eProposed a large multimodal approach to embed auditory info + biomarkers for baseline classification.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003eDeveloped a massively multimodal audio-to-embedding correlation system that maps audio to biomarker information collected (mood, memory, respiratory) and demonstrated its ability to discriminate cough results for COVID. (they were looking for AD; whoopsies)\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDeveloped a feature extraction model for AD detection named \u003ca href=\"/posts/kbhopen_voice_brain_model/\"\u003eOpen Voice Brain Model\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eCollected a dataset on people coughing and correlated it with biomarkers\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"figure-2\"\u003eFigure 2\u003c/h3\u003e\n\u003cp\u003eThis is \u003cstrong\u003e\u003cstrong\u003eMULTI-MODAL\u003c/strong\u003e\u003c/strong\u003e as heck\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-32-21_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure tells us the large network the came up with.\u003c/p\u003e\n\u003ch3 id=\"table-2-and-3\"\u003eTable 2 and 3\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-37-45_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe descriminator tacked on the end of the network is transfer-trained to different tasks. It shows promising results for cough-to-COVID classification\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhopen_voice_brain_model/\"\u003eOVBM\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlyu_2018/\"\u003eLyu 2018\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n\u003ch3 id=\"biomarker-correlation\"\u003eBiomarker correlation\u003c/h3\u003e\n\u003cp\u003eIs biomarker data something that is commonly used as a feature extraction/benchmark tool?\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-24-32_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhlaguarta_2021/","tags":["ntj"],"title":"Laguarta 2021"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhlambek_calculus/","tags":null,"title":"Lambek Calculus"},{"categories":null,"contents":"effability\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbheffability/\"\u003eeffability\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlanguage/","tags":null,"title":"language"},{"categories":null,"contents":"Definitions: Language Agents Agents that uses the language to act on behave of another person or group.\nTransitions Transition first from rule based learning to statistical learning Rise of semantic parsing: statistical models of parsing Then, moving from semantic parsing to large models\u0026mdash;putting decision making and language modeling into the same bubble Importance of LLMs They are simply better at understanding language inputs They can generate structured information (i.e. not just human language, JSONs, etc.) They can perform natural language \u0026ldquo;reasoning\u0026rdquo;\u0026mdash;not just generate (and natural language generation, abv)\n1+3 gives you chain of thought reasoning 1+2 gives CALM, SayCan, and other types of RL text parsing in order to do stuff with robotics all three gives ReAct ReAct Recover from incorrect thought and incorrect tools Allows human-in-the-loop alignment Major Flaw left-to-right one-pass decoding doesn\u0026rsquo;t allow alternate solutions bad properties regarding propagating hallucination search and planning had been explored a lot Tree of Thoughts Partial solution: Tree of Thoughts\nevaluate sub-paths to determine most optimal paths: think A* but with more proper heuristic bounding.\nBig idea: merge classic algorithmic ideas with decision making against LLMs\nProblem: agents are not robust at all https://github.com/ryoungj/ToolEmu\nKey New Challenges for Agents Evaluation Different from how previous NLP benchmarks: we are not worried about language modeling No longer boundaries between various fields Common goals:\nrealistic agents\u0026mdash;stop playing Atari games. reproducible systems measurability goals scalable models which are easy to use Web as an Interactive Environment agents on the web is both practical and scalable https://webshop-pnlp.github.io/ WebShop can actually transfer with no work to training on Amazon Mind2Web InterCode Formulation of agent decisions as POMDP in order to fully benchmark Markovian decisions:\nhttps://arxiv.org/abs/2306.14898\nAgent Development Agents development has no core framework\nproduction systems set of rules specificying a precondition + action when preconditinons are met, perform an action Big kitchen sink proposal: https://arxiv.org/abs/2309.02427\nTrust and safety Agents are much more powerful and dynamic\n","html":"\u003ch2 id=\"definitions-language-agents\"\u003eDefinitions: Language Agents\u003c/h2\u003e\n\u003cp\u003eAgents that uses the language to act on behave of another person or group.\u003c/p\u003e\n\u003ch2 id=\"transitions\"\u003eTransitions\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eTransition first from rule based learning to statistical learning\u003c/li\u003e\n\u003cli\u003eRise of semantic parsing: statistical models of parsing\u003c/li\u003e\n\u003cli\u003eThen, moving from semantic parsing to large models\u0026mdash;putting decision making and language modeling into the same bubble\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"importance-of-llms\"\u003eImportance of LLMs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eThey are simply better at understanding language inputs\u003c/li\u003e\n\u003cli\u003eThey can generate structured information (i.e. not just human language, JSONs, etc.)\u003c/li\u003e\n\u003cli\u003eThey can perform natural language \u0026ldquo;reasoning\u0026rdquo;\u0026mdash;not just generate\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e(and natural language generation, abv)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e1+3 gives you chain of thought reasoning\u003c/li\u003e\n\u003cli\u003e1+2 gives CALM, SayCan, and other types of RL text parsing in order to do stuff with robotics\u003c/li\u003e\n\u003cli\u003eall three gives ReAct\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"react\"\u003eReAct\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eRecover from incorrect thought and incorrect tools\u003c/li\u003e\n\u003cli\u003eAllows human-in-the-loop alignment\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"major-flaw\"\u003eMajor Flaw\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eleft-to-right one-pass decoding doesn\u0026rsquo;t allow alternate solutions\u003c/li\u003e\n\u003cli\u003ebad properties regarding propagating hallucination\u003c/li\u003e\n\u003cli\u003esearch and planning had been explored a lot\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"tree-of-thoughts\"\u003eTree of Thoughts\u003c/h3\u003e\n\u003cp\u003ePartial solution: \u003ca href=\"#tree-of-thoughts\"\u003eTree of Thoughts\u003c/a\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-11_11-22-35_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eevaluate sub-paths to determine most optimal paths: think A* but with more proper heuristic bounding.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eBig idea: merge classic algorithmic ideas with decision making against LLMs\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"problem-agents-are-not-robust-at-all\"\u003eProblem: agents are not robust at all\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://github.com/ryoungj/ToolEmu\"\u003ehttps://github.com/ryoungj/ToolEmu\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"key-new-challenges-for-agents\"\u003eKey New Challenges for Agents\u003c/h2\u003e\n\u003ch3 id=\"evaluation\"\u003eEvaluation\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eDifferent from how previous NLP benchmarks: we are \u003cstrong\u003enot\u003c/strong\u003e worried about language modeling\u003c/li\u003e\n\u003cli\u003eNo longer boundaries between various fields\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eCommon goals:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003erealistic agents\u0026mdash;stop playing Atari games.\u003c/li\u003e\n\u003cli\u003ereproducible systems\u003c/li\u003e\n\u003cli\u003emeasurability goals\u003c/li\u003e\n\u003cli\u003escalable models\u003c/li\u003e\n\u003cli\u003ewhich are easy to use\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"web-as-an-interactive-environment\"\u003eWeb as an Interactive Environment\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eagents on the web is both practical and scalable\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://webshop-pnlp.github.io/\"\u003ehttps://webshop-pnlp.github.io/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eWebShop can actually transfer with no work to training on Amazon\u003c/li\u003e\n\u003cli\u003eMind2Web\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"intercode\"\u003eInterCode\u003c/h4\u003e\n\u003cp\u003eFormulation of agent decisions as POMDP in order to fully benchmark Markovian decisions:\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://arxiv.org/abs/2306.14898\"\u003ehttps://arxiv.org/abs/2306.14898\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"agent-development\"\u003eAgent Development\u003c/h3\u003e\n\u003cp\u003eAgents development has no core framework\u003c/p\u003e\n\u003ch4 id=\"production-systems\"\u003eproduction systems\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eset of rules specificying a precondition + action\u003c/li\u003e\n\u003cli\u003ewhen preconditinons are met, perform an action\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eBig kitchen sink proposal: \u003ca href=\"https://arxiv.org/abs/2309.02427\"\u003ehttps://arxiv.org/abs/2309.02427\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"trust-and-safety\"\u003eTrust and safety\u003c/h3\u003e\n\u003cp\u003eAgents are much more powerful and dynamic\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlanguage_agents/","tags":null,"title":"Language Agents with Karthik"},{"categories":null,"contents":"What makes language modeling hard: resolving ambiguity is hard.\n\u0026ldquo;the chef made her duck\u0026rdquo;\nContents Basic Text Processing regex ELIZA tokenization and corpus Herdan\u0026rsquo;s Law text normalization tokenization + Subword Tokenization BPE Word Normalization lemmatization through morphological parsing only take stems from morphemes: porter stemmer sentence segmentation N-Grams Edit Distance DP costs \\(O(nm)\\), backtrace costs \\(O(n+m)\\).\nminimum edit distance weighted edit distance backtracing Ngrams N-Grams Markov Assumption Unigrams Backoff and Stupid Backoff Interpolation OOV Words Model Evaluation perplexity open vocabulary Text Classification Text Classification Bag of Words Naive Bayes Naive Bayes for Text Classification Binary Naive Bayes Lexicon Naive Bays Language Modeling Harmonic Mean Macroaverage and Microaverage Logistic Regression Generative Classifier vs Discriminate Classifier Logistic Regression Text Classification decision boundary cross entropy loss stochastic gradient descent Information Retrial Information Retrival Term-Document Matrix Inverted Index + postings list Boolean Retrieval positional index Ranked Information Retrial Ranked Information Retrieval feast or famine problem free text query score Jaccard Coefficient log-frequency weighting document frequency (\u0026quot;idf weight\u0026quot;) TF-IDF SMART notation vector-space model Vector Semantics sense principle of contrast word relatedness semantic field synonymy and antonyms affective meaning vector semantics transposing a Term-Document Matrix term-term matrix word2vec skip-gram with negative sampling POS and NER POS Tagging NER Tagging Dialogue Systems Dialogue Chatbot PARRY Recommender Systems Recommender System Dora Dora Neural Nets Neural Networks The Web Web Graph Social Network ","html":"\u003cp\u003eWhat makes language modeling hard: \u003cstrong\u003eresolving ambiguity is hard\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the chef made her duck\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"contents\"\u003eContents\u003c/h2\u003e\n\u003ch3 id=\"basic-text-processing\"\u003eBasic Text Processing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhregex/\"\u003eregex\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheliza/\"\u003eELIZA\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtokenization/\"\u003etokenization\u003c/a\u003e and \u003ca href=\"/posts/kbhcorpus/\"\u003ecorpus\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcorpus/#herdan-s-law\"\u003eHerdan\u0026rsquo;s Law\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtext_normalization/\"\u003etext normalization\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtokenization/\"\u003etokenization\u003c/a\u003e + \u003ca href=\"/posts/kbhtokenization/#subword-tokenization\"\u003eSubword Tokenization\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbpe/\"\u003eBPE\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhword_normalization/\"\u003eWord Normalization\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlemmatization/\"\u003elemmatization\u003c/a\u003e through \u003ca href=\"/posts/kbhmorphological_parsing/\"\u003emorphological parsing\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eonly take stems from morphemes: \u003ca href=\"/posts/kbhmorphological_parsing/#porter-stemmer\"\u003eporter stemmer\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsentence_segmentation/\"\u003esentence segmentation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhn_grams/\"\u003eN-Grams\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"edit-distance\"\u003eEdit Distance\u003c/h3\u003e\n\u003cp\u003eDP costs \\(O(nm)\\), backtrace costs \\(O(n+m)\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhminimum_edit_distance/\"\u003eminimum edit distance\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhweighted_edit_distance/\"\u003eweighted edit distance\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbacktracing/\"\u003ebacktracing\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"ngrams\"\u003eNgrams\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhn_grams/\"\u003eN-Grams\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhn_grams/#markov-assumption\"\u003eMarkov Assumption\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhn_grams/#unigrams\"\u003eUnigrams\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhn_grams/#backoff\"\u003eBackoff\u003c/a\u003e and \u003ca href=\"/posts/kbhn_grams/#stupid-backoff\"\u003eStupid Backoff\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhn_grams/#interpolation\"\u003eInterpolation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhn_grams/#oov-words\"\u003eOOV Words\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodel_evaluation/\"\u003eModel Evaluation\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhperplexity/\"\u003eperplexity\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhn_grams/#oov-words\"\u003eopen vocabulary\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"text-classification\"\u003eText Classification\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtext_classification/\"\u003eText Classification\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbag_of_words/\"\u003eBag of Words\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbag_of_words/#naive-bayes-for-text-classification\"\u003eNaive Bayes for Text Classification\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbag_of_words/#binary-naive-bayes\"\u003eBinary Naive Bayes\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlexicon/\"\u003eLexicon\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhn_grams/#naive-bays-language-modeling\"\u003eNaive Bays Language Modeling\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhharmonic_mean/\"\u003eHarmonic Mean\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmacroaverage/\"\u003eMacroaverage\u003c/a\u003e and \u003ca href=\"/posts/kbhmacroaverage/\"\u003eMicroaverage\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"logistic-regression\"\u003eLogistic Regression\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgenerative_vs_discriminitive_classifier/#generative-classifier\"\u003eGenerative Classifier\u003c/a\u003e vs \u003ca href=\"/posts/kbhgenerative_vs_discriminitive_classifier/#discriminative-classifier\"\u003eDiscriminate Classifier\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlogistic_regression/#logistic-regression-text-classification\"\u003eLogistic Regression Text Classification\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlogistic_regression/#logistic-regression-text-classification\"\u003edecision boundary\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcross_entropy_loss/\"\u003ecross entropy loss\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstochastic_gradient_descent/\"\u003estochastic gradient descent\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"information-retrial\"\u003eInformation Retrial\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinformation_retrival/\"\u003eInformation Retrival\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhterm_document_matrix/\"\u003eTerm-Document Matrix\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinverted_index/\"\u003eInverted Index\u003c/a\u003e + \u003ca href=\"/posts/kbhinverted_index/#postings-list\"\u003epostings list\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinverted_index/#boolean-retrieval\"\u003eBoolean Retrieval\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinverted_index/#positional-index\"\u003epositional index\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"ranked-information-retrial\"\u003eRanked Information Retrial\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/\"\u003eRanked Information Retrieval\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/#feast-or-famine-problem\"\u003efeast or famine problem\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/#free-text-query\"\u003efree text query\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/#score\"\u003escore\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/#jaccard-coefficient\"\u003eJaccard Coefficient\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/#log-frequency-weighting\"\u003elog-frequency weighting\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/#document-frequency\"\u003edocument frequency\u003c/a\u003e (\u0026quot;\u003ca href=\"/posts/kbhranked_information_retrieval/#document-frequency\"\u003eidf weight\u003c/a\u003e\u0026quot;)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/#tf-idf\"\u003eTF-IDF\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/#smart-notation\"\u003eSMART notation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/#vector-space-model\"\u003evector-space model\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"vector-semantics\"\u003eVector Semantics\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsense/\"\u003esense\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsense/#principle-of-contrast\"\u003eprinciple of contrast\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsense/#word-relatedness\"\u003eword relatedness\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsense/#semantic-field\"\u003esemantic field\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsense/#synonymy\"\u003esynonymy\u003c/a\u003e and \u003ca href=\"/posts/kbhsense/#antonyms\"\u003eantonyms\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsense/#affective-meaning\"\u003eaffective meaning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvector_semantics/\"\u003evector semantics\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvector_semantics/#transposing-a-id-b5d7f908-0351-436d-9784-180ab5aa0562-term-document-matrix\"\u003etransposing a Term-Document Matrix\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvector_semantics/#term-term-matrix\"\u003eterm-term matrix\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhword2vec/\"\u003eword2vec\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhword2vec/#skip-gram-with-negative-sampling\"\u003eskip-gram with negative sampling\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"pos-and-ner\"\u003ePOS and NER\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpos_tagging/\"\u003ePOS Tagging\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhner_tagging/\"\u003eNER Tagging\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"dialogue-systems\"\u003eDialogue Systems\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdialogue/\"\u003eDialogue\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhchatbot/\"\u003eChatbot\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhparry/\"\u003ePARRY\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"recommender-systems\"\u003eRecommender Systems\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrecommender_system/\"\u003eRecommender System\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"dora\"\u003eDora\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdora/\"\u003eDora\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"neural-nets\"\u003eNeural Nets\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhneural_networks/\"\u003eNeural Networks\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"the-web\"\u003eThe Web\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhweb_graph/\"\u003eWeb Graph\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsocial_network/\"\u003eSocial Network\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlanguage_information_index/","tags":null,"title":"Language Information Index"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhlaplae/","tags":null,"title":"laplae"},{"categories":null,"contents":"the\n","html":"\u003cp\u003ethe\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlaw_of_cosines/","tags":null,"title":"law of cosines"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhlaw_of_large_numbers/","tags":null,"title":"law of large numbers"},{"categories":null,"contents":"LOOCV is a cross validation method whereby the entire dataset bar one sample is used for training; then, validation is ran on one sample. This is repeated \\(N\\) times (with a fresh model and a fresh item left out) to get a distribution of one-shot validation results that is an approximately-normal curve centered around the mean validation result from many one-shot samples.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhloo/\"\u003eLOOCV\u003c/a\u003e is a cross validation method whereby the entire dataset bar one sample is used for training; then, validation is ran on one sample. This is repeated \\(N\\) times (with a fresh model and a fresh item left out) to get a distribution of one-shot validation results that is an approximately-normal curve centered around the mean validation result from many one-shot samples.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhloo/","tags":null,"title":"Leave-One-Out Cross Validation"},{"categories":null,"contents":"Reading notes :claim: Mccarthyism was a process that de-politicized America Since political activities could get you in trouble, prudent folk avoided them\nSocial conformaty became standard middle-class Americans became social conformists\nCommunism serves as a form of balance checking, which Mccathyism lost With their demise, the nation lost the institutional network that had created a public space where serious alternatives to the status quo could be presented.\nModerate-left was also diminished Moreover, with the disappearance of a vigorous movement on their left, moderate reform groups were more exposed to right-wing attacks and thus rendered less effective.\nMccarthyism also diminshed America\u0026rsquo;s liberal modernization Measures like national health insurance, a social reform embraced by the rest of the industrialized world, simply fell by the wayside.\nCold-war opposition became quelled by mccarthism Opposition to the cold war had been so thoroughly identified with communism that it was no longer possible to challenge the basic assumptions of American foreign policy without incurring suspicions of disloyalty\nThat there may have been more international collaboration if mccarthism was not done early on American policymakers feared to acknowledge the official existence of the People\u0026rsquo;s Republic of China until Richard Nixon, who was uniquely impervious to charges of being soft on communism, did so as president in 1971\nControvercial issues were avoided intellecturally and artistically Similarly, the blacklist contributed to the reluctance of the film industry to grapple with controversial social or political issues. In the intellectual world, cold war liberals also avoided controversy.\nThat \u0026ldquo;ideology\u0026rdquo; became irrelavent, pure pragmatism took hold They celebrated the \u0026ldquo;end of ideology,\u0026rdquo; claiming that the United States\u0026rsquo; uniquely pragmatic approach to politics made the problems that had once concerned left- wing ideologists irrelevant.\nState power became expanded federal agents attacked individual rights and extended state power into movie studios, universities, labor unions, and many other ostensibly independent institutions.\nThat Mccarthism produced a threat to demcrocy in itself McCarthyism alone did not cause these outrages; but the assault on democracy that began during the 1940s and 1950s with the collaboration of private institutions and public agencies in suppressing the alleged threat of domestic communism was an important early contribution.\n","html":"\u003ch2 id=\"reading-notes\"\u003eReading notes\u003c/h2\u003e\n\u003ch3 id=\"claim-mccarthyism-was-a-process-that-de-politicized-america\"\u003e:claim: Mccarthyism was a process that de-politicized America\u003c/h3\u003e\n\u003cp\u003eSince political activities could get you in trouble, prudent folk avoided them\u003c/p\u003e\n\u003ch3 id=\"social-conformaty-became-standard\"\u003eSocial conformaty became standard\u003c/h3\u003e\n\u003cp\u003emiddle-class Americans became social conformists\u003c/p\u003e\n\u003ch3 id=\"communism-serves-as-a-form-of-balance-checking-which-mccathyism-lost\"\u003eCommunism serves as a form of balance checking, which Mccathyism lost\u003c/h3\u003e\n\u003cp\u003eWith their demise, the nation lost the institutional network that had created a public space where serious alternatives to the status quo could be presented.\u003c/p\u003e\n\u003ch3 id=\"moderate-left-was-also-diminished\"\u003eModerate-left was also diminished\u003c/h3\u003e\n\u003cp\u003eMoreover, with the disappearance of a vigorous movement on their left, moderate reform groups were more exposed to right-wing attacks and thus rendered less effective.\u003c/p\u003e\n\u003ch3 id=\"mccarthyism-also-diminshed-america-s-liberal-modernization\"\u003eMccarthyism also diminshed America\u0026rsquo;s liberal modernization\u003c/h3\u003e\n\u003cp\u003eMeasures like national health insurance, a social reform embraced by the rest of the industrialized world, simply fell by the wayside.\u003c/p\u003e\n\u003ch3 id=\"cold-war-opposition-became-quelled-by-mccarthism\"\u003eCold-war opposition became quelled by mccarthism\u003c/h3\u003e\n\u003cp\u003eOpposition to the cold war had been so thoroughly identified with communism that it was no longer possible to challenge the basic assumptions of American foreign policy without incurring suspicions of disloyalty\u003c/p\u003e\n\u003ch3 id=\"that-there-may-have-been-more-international-collaboration-if-mccarthism-was-not-done-early-on\"\u003eThat there may have been more international collaboration if mccarthism was not done early on\u003c/h3\u003e\n\u003cp\u003eAmerican policymakers feared to acknowledge the official existence of the People\u0026rsquo;s Republic of China until Richard Nixon, who was uniquely impervious to charges of being soft on communism, did so as president in 1971\u003c/p\u003e\n\u003ch3 id=\"controvercial-issues-were-avoided-intellecturally-and-artistically\"\u003eControvercial issues were avoided intellecturally and artistically\u003c/h3\u003e\n\u003cp\u003eSimilarly, the blacklist contributed to the reluctance of the film industry to grapple with controversial social or political issues. In the intellectual world, cold war liberals also avoided controversy.\u003c/p\u003e\n\u003ch3 id=\"that-ideology-became-irrelavent-pure-pragmatism-took-hold\"\u003eThat \u0026ldquo;ideology\u0026rdquo; became irrelavent, pure pragmatism took hold\u003c/h3\u003e\n\u003cp\u003eThey celebrated the \u0026ldquo;end of ideology,\u0026rdquo; claiming that the United States\u0026rsquo; uniquely pragmatic approach to politics made the problems that had once concerned left- wing ideologists irrelevant.\u003c/p\u003e\n\u003ch3 id=\"state-power-became-expanded\"\u003eState power became expanded\u003c/h3\u003e\n\u003cp\u003efederal agents attacked individual rights and extended state power into movie studios, universities, labor unions, and many other ostensibly independent institutions.\u003c/p\u003e\n\u003ch3 id=\"that-mccarthism-produced-a-threat-to-demcrocy-in-itself\"\u003eThat Mccarthism produced a threat to demcrocy in itself\u003c/h3\u003e\n\u003cp\u003eMcCarthyism alone did not cause these outrages; but the assault on democracy that began during the 1940s and 1950s with the collaboration of private institutions and public agencies in suppressing the alleged threat of domestic communism was an important early contribution.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlegacy_of_mccarthyism/","tags":null,"title":"Legacy of McCarthyism"},{"categories":null,"contents":"lemmatization is the act of normalizing words into standard meaning irrespective of word variations in order to do a broader analysis\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhlemmatization/\"\u003elemmatization\u003c/a\u003e is the act of normalizing words into standard meaning irrespective of word variations in order to do a broader analysis\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlemmatization/","tags":null,"title":"lemmatization"},{"categories":null,"contents":"Any two basis of finite-dimensional vector space have the same length.\nconstituents A finite-dimensional vector space \\(V\\) Basis \\(B_1\\), \\(B_2\\) be bases in \\(V\\) requirements Given \\(B_1\\), \\(B_2\\) are basis in \\(V\\), we know that they are both linearly independent and spans \\(V\\). We have that the length of linearly-independent list \\(\\leq\\) length of spanning list.\nLet\u0026rsquo;s take first \\(B_1\\) as linearly independent and \\(B_2\\) as spanning:\nWe have then \\(len(B_1) \\leq len(B_2)\\)\nSwapping roles:\nWe have then \\(len(B_2) \\leq len(B_1)\\)\nAs both of this conditions are true, we have that \\(len(B_1)=len(B_{2})\\). \\(\\blacksquare\\)\n","html":"\u003cp\u003eAny two \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e have the same length.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e \\(V\\)\u003c/li\u003e\n\u003cli\u003eBasis \\(B_1\\), \\(B_2\\) be bases in \\(V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eGiven \\(B_1\\), \\(B_2\\) are \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in \\(V\\), we know that they are both \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e and \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\). We have that the \u003ca href=\"/posts/kbhlinear_independence/#length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003elength of linearly-independent list \\(\\leq\\) length of spanning list\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s take first \\(B_1\\) as \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e and \\(B_2\\) as \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003eWe have then \\(len(B_1) \\leq len(B_2)\\)\u003c/p\u003e\n\u003cp\u003eSwapping roles:\u003c/p\u003e\n\u003cp\u003eWe have then \\(len(B_2) \\leq len(B_1)\\)\u003c/p\u003e\n\u003cp\u003eAs both of this conditions are true, we have that \\(len(B_1)=len(B_{2})\\). \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlength_of_basis_doesn_t_depend_on_basis/","tags":null,"title":"Length of Basis Doesn't Depend on Basis"},{"categories":null,"contents":"Autonomously driving is really hard. How do we integrate planning + learning in a close-loop style. We\u0026rsquo;ll start from the current belief, and construct a tree of all reachable belief state.\nRecall DESPOT.\nApproach learning (b): a neural network which maps the driving history into a policy and value planning (a): we will use the neural network\u0026rsquo;s derived policy and value to run MCTS execution (e): execute the actions in a simulator The data which is obtained using the simulator is used to train the neural network.\nlearning The learning component is a supervised policy where a CNN takes a situation and map\nplanning its a AR-DESPOT. We select actions by:\n\\begin{equation} a^{*} = \\arg\\max_{a \\in A} \\left\\{u(b,a) + c \\pi_{\\theta}(a|x_{b}) \\sqrt{ \\frac{N(b)}{N(b,a)+1}}\\right\\} \\end{equation}\nwhere \\(\\pi_{\\theta}\\) is our policy network.\nEvery time we encounter a new node, use the learned value function as a lower bound.\nNeeded less depth in the DESPOT than using it naively.\n","html":"\u003cp\u003eAutonomously driving is really hard. How do we integrate planning + learning in a close-loop style. We\u0026rsquo;ll start from the current belief, and construct a tree of all reachable belief state.\u003c/p\u003e\n\u003cp\u003eRecall \u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"approach\"\u003eApproach\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-15_09-48-34_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003elearning\u003c/strong\u003e (b): a neural network which maps the driving history into a policy and value\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eplanning\u003c/strong\u003e (a): we will use the neural network\u0026rsquo;s derived policy and value to run \u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003eMCTS\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eexecution\u003c/strong\u003e (e): execute the actions in a simulator\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThe data which is obtained using the simulator is used to train the neural network.\u003c/p\u003e\n\u003ch3 id=\"learning\"\u003elearning\u003c/h3\u003e\n\u003cp\u003eThe learning component is a supervised policy where a CNN takes a situation and map\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-15_09-50-19_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"planning\"\u003eplanning\u003c/h3\u003e\n\u003cp\u003eits a \u003ca href=\"/posts/kbhdespot/#anytime-despot\"\u003eAR-DESPOT\u003c/a\u003e. We select actions by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na^{*} = \\arg\\max_{a \\in A} \\left\\{u(b,a) + c \\pi_{\\theta}(a|x_{b}) \\sqrt{ \\frac{N(b)}{N(b,a)+1}}\\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\pi_{\\theta}\\) is our policy network.\u003c/p\u003e\n\u003cp\u003eEvery time we encounter a new node, use the learned value function as a lower bound.\u003c/p\u003e\n\u003cp\u003eNeeded less depth in the \u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e than using it naively.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhletsdrive/","tags":null,"title":"LetsDrive"},{"categories":null,"contents":"occasionally, you can\u0026rsquo;t really get a specific solution.\n\\begin{equation} \\dv{y}{t} = e^{t}\\cos y \\end{equation}\nafter doing the , you get:\n\\begin{equation} \\ln (\\sec y + \\tan y) - e^{t} = C \\end{equation}\nyou get sets of this function \\(F(t,y)\\) which shifts it up and down, by any constant C.\nBut at any given \\((t,y)\\), you get a slope \\(e^{t}\\cos y\\).\n","html":"\u003cp\u003eoccasionally, you can\u0026rsquo;t really get a specific solution.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{y}{t} = e^{t}\\cos y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eafter doing the , you get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ln (\\sec y + \\tan y) - e^{t} = C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou get sets of this function \\(F(t,y)\\) which shifts it up and down, by any constant C.\u003c/p\u003e\n\u003cp\u003eBut at any given \\((t,y)\\), you get a slope \\(e^{t}\\cos y\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlevel_set/","tags":null,"title":"level set"},{"categories":null,"contents":"The Lexicalization Hypothesis is a hypothesis proposed by Chomsky that states that syntactic transformations can only apply on syntatic constituents; therefore, the rules of putting words together is different from the rules that puts phrases together. This theory stands in opposition to generative semantics.\nThere are two versions of the Lexicalization Hypothesis:\nStrong Lexicalization Hypothesis The Strong Lexicalization Hypothesis states that both derivational words (changes meaning, bench=\u0026gt;benching) or inflectional words (changes grammar, eat=\u0026gt;eating) cannot be put together via syntatical rules. (Geeraerts 2009)\nWeak Lexicalization Hypothesis Weak Lexicalization Hypothesis states that semantic rules cannot work in the formation of derivational words only.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhlexicalization_hypothesis/\"\u003eLexicalization Hypothesis\u003c/a\u003e is a hypothesis proposed by \u003ca href=\"/posts/kbhchomsky/\"\u003eChomsky\u003c/a\u003e that states that syntactic transformations can only apply on \u003ca href=\"\"\u003esyntatic constituents\u003c/a\u003e; therefore, the rules of putting words together is \u003cem\u003edifferent\u003c/em\u003e from the rules that puts phrases together. This theory stands in opposition to \u003ca href=\"/posts/kbhgenerative_semantics/\"\u003egenerative semantics\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThere are two versions of the \u003ca href=\"/posts/kbhlexicalization_hypothesis/\"\u003eLexicalization Hypothesis\u003c/a\u003e:\u003c/p\u003e\n\u003ch2 id=\"strong-lexicalization-hypothesis--kbhlexicalization-hypothesis-dot-md\"\u003eStrong \u003ca href=\"/posts/kbhlexicalization_hypothesis/\"\u003eLexicalization Hypothesis\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#strong-lexicalization-hypothesis--kbhlexicalization-hypothesis-dot-md\"\u003eStrong Lexicalization Hypothesis\u003c/a\u003e states that \u003cem\u003eboth\u003c/em\u003e \u003ca href=\"/posts/kbhderivational_words/\"\u003ederivational words\u003c/a\u003e (changes meaning, bench=\u0026gt;benching) or \u003ca href=\"/posts/kbhinflectional_words/\"\u003einflectional words\u003c/a\u003e (changes grammar, eat=\u0026gt;eating) cannot be put together via syntatical rules. (\u003ca href=\"#citeproc_bib_item_1\"\u003eGeeraerts 2009\u003c/a\u003e)\u003c/p\u003e\n\u003ch2 id=\"weak-lexicalization-hypothesis--kbhlexicalization-hypothesis-dot-md\"\u003eWeak \u003ca href=\"/posts/kbhlexicalization_hypothesis/\"\u003eLexicalization Hypothesis\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#weak-lexicalization-hypothesis--kbhlexicalization-hypothesis-dot-md\"\u003eWeak Lexicalization Hypothesis\u003c/a\u003e states that semantic rules cannot work in the formation of \u003ca href=\"/posts/kbhderivational_words/\"\u003ederivational words\u003c/a\u003e only.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlexicalization_hypothesis/","tags":null,"title":"Lexicalization Hypothesis"},{"categories":null,"contents":"Lexicon are pre-labeled datasets which pre-organize words into features. They are useful when training data is sparse.\nInstead of doing word counts, we compute each feature based on teh the token\u0026rsquo;s assigned label in the lexicon.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhlexicon/\"\u003eLexicon\u003c/a\u003e are pre-labeled datasets which pre-organize words into features. They are useful when training data is sparse.\u003c/p\u003e\n\u003cp\u003eInstead of doing word counts, we compute each feature based on teh the token\u0026rsquo;s assigned label in the lexicon.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlexicon/","tags":null,"title":"Lexicon"},{"categories":null,"contents":" Poster-modern search for individualism ","html":"\u003cul\u003e\n\u003cli\u003ePoster-modern search for individualism\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhliberal_center/","tags":null,"title":"Liberal Center"},{"categories":null,"contents":"likelyhood is the PDF, PMF, or joint probability distribution\u0026mdash;which ever distribution\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhlikelyhood/\"\u003elikelyhood\u003c/a\u003e is the \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003e, \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003e, or \u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e\u0026mdash;which ever distribution\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlikelyhood/","tags":null,"title":"likelyhood"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhlina/","tags":null,"title":"lina"},{"categories":null,"contents":"DOI: 10.3389/fnagi.2021.642033\nOne-Liner Proposed cross-linguistic markers shared for AD patients between English and French; evaluated features found with standard ML.\nNovelty Multi-lingual, cross-linguistic analysis.\nNotable Methods Looked at common patters between the two languages Linguistic results scored by IUs on CTP task Key Figs Figure 1 This figure tells us the various approaches measured.\nTable 2 Here\u0026rsquo;s a list of semantic features extracted\nTable 3 Here\u0026rsquo;s a list of NLP features extracted. Bolded items represent P \u0026lt;0.001 correlation for AD/NonAD difference between English and French.\nSame thing but semantic features\nsame thing but acoustic features. As we can see, acoustic features didn\u0026rsquo;t do much.\nNew Concepts CTP IU Notes ","html":"\u003cp\u003eDOI: 10.3389/fnagi.2021.642033\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eProposed cross-linguistic markers shared for AD patients between English and French; evaluated features found with standard ML.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003eMulti-lingual, cross-linguistic analysis.\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eLooked at common patters between the two languages\u003c/li\u003e\n\u003cli\u003eLinguistic results scored by \u003ca href=\"/posts/kbhiu/\"\u003eIU\u003c/a\u003es on \u003ca href=\"/posts/kbhctp/\"\u003eCTP\u003c/a\u003e task\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"figure-1\"\u003eFigure 1\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_23-26-39_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure tells us the various approaches measured.\u003c/p\u003e\n\u003ch3 id=\"table-2\"\u003eTable 2\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_23-31-43_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eHere\u0026rsquo;s a list of semantic features extracted\u003c/p\u003e\n\u003ch3 id=\"table-3\"\u003eTable 3\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_23-32-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eHere\u0026rsquo;s a list of NLP features extracted. Bolded items represent P \u0026lt;0.001 correlation for AD/NonAD difference between English and French.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_23-33-26_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSame thing but semantic features\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_23-33-35_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003esame thing but acoustic features. As we can see, acoustic features didn\u0026rsquo;t do much.\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhctp/\"\u003eCTP\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhiu/\"\u003eIU\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlindsay_2021/","tags":["ntj"],"title":"Lindsay 2021"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhlinear_algea/","tags":null,"title":"linear algea"},{"categories":null,"contents":"Gaussian Elimination Quiz Demonstrate that matrices\u0026rsquo; multiplication are not commutative (error: didn\u0026rsquo;t consider \\(m\\times m\\)) Which \\(2\\times 2\\) matrices under multiplication form a group? (error: closure need to proved on invertable matrices under multiplication, not just \\(2\\times 2\\)) Deriving Rotation matrices (error: clockwise vs counter-clockwise) Linear Independence Quiz Connection between linear independence and systems equations (error: beated around the bush) \u0026mdash; the matrix of an nxn system of equations has a solution if the matrix\u0026rsquo;s column vectors is linearly independent Basis and Dimension Quiz put 0 into a basis AAAA not lin. indep; figure out what the basis for a polynomial with a certain root is: it is probably of dimension m (instead of m+1), because scalars doesn\u0026rsquo;t work in the case of p(3)=0; so basis is just the scalars missing some inequality about basis? \u0026mdash; its just that lin.idp sets is shorter or equal to basis and spanning sets is longer or equal to basis Final, part 1 definition of vector space: scalar multiplication is not an operation straight forgot \\(dim(U+V) = dim U + dim V - dim (U\\cap V)\\) plane containing \\((1,0,2)\\) and \\((3,-1,1)\\): math mistake proof: det A det B = det AB Final, part 2 Counterproof: If \\(v_1 \\dots v_4\\) is a basis of \\(V\\), and \\(U\\) is a subspace of \\(V\\) with \\(v_1, v_2 \\in U\\) and \\(v_3, v_4\\) not in \\(U\\), \\(v_1, v_2\\) is a basis of \\(U\\) Counterproof: if \\(T \\in \\mathcal{L}(V,V)\\) and \\(T^{2}=0\\), then \\(T=0\\) Counterproof: if \\(s,t \\in \\mathcal{L}(V,V)\\), and \\(ST=0\\), then \\(null\\ s\\) is contained in \\(range\\ T\\) Product Spaces Quiz Prove that \\(\\mathcal{L}(V_1 \\times V_2 \\times \\dots \\times V_{m}, W)\\) and \\(\\mathcal{L}(V_1, W) \\times \\dots \\times \\mathcal{L}(V_{m}, W)\\) are isomorphic\nerror: didn\u0026rsquo;t do it\nQuotient Spaces Quiz Couldn\u0026rsquo;t prove that the list in linearly independent: the linear combinations is some \\(c_1v_1 + \\dots c_{m}v_{m} + U\\); as \\(v_1 \\dots v_{m}\\) is a basis of \\(V / U\\), \\(c_1 \\dots c_{m} = 0\\), now the second part is also a basis so they are \\(0\\) too. The spanning proof: \\(v + U =\\) , rewrite as basis, etc. ","html":"\u003ch2 id=\"gaussian-elimination-quiz\"\u003eGaussian Elimination Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDemonstrate that matrices\u0026rsquo; multiplication are not commutative (error: didn\u0026rsquo;t consider \\(m\\times m\\))\u003c/li\u003e\n\u003cli\u003eWhich \\(2\\times 2\\) matrices under multiplication form a group? (error: closure need to proved on \u003cstrong\u003einvertable\u003c/strong\u003e matrices under multiplication, not just \\(2\\times 2\\))\u003c/li\u003e\n\u003cli\u003eDeriving Rotation matrices (error: clockwise vs counter-clockwise)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"linear-independence-quiz\"\u003eLinear Independence Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eConnection between linear independence and systems equations (error: beated around the bush) \u0026mdash; the matrix of an nxn system of equations has a solution if the matrix\u0026rsquo;s column vectors is linearly independent\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"basis-and-dimension-quiz\"\u003eBasis and Dimension Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eput 0 into a basis AAAA not lin. indep; figure out what the basis for a polynomial with a certain root is: it is probably of dimension m (instead of m+1), because scalars doesn\u0026rsquo;t work in the case of p(3)=0; so basis is just the scalars\u003c/li\u003e\n\u003cli\u003emissing some inequality about basis? \u0026mdash; its just that lin.idp sets is shorter or equal to basis and spanning sets is longer or equal to basis\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"final-part-1\"\u003eFinal, part 1\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003edefinition of vector space: scalar multiplication is not an operation\u003c/li\u003e\n\u003cli\u003estraight forgot \\(dim(U+V) = dim U + dim V - dim (U\\cap V)\\)\u003c/li\u003e\n\u003cli\u003eplane containing \\((1,0,2)\\) and \\((3,-1,1)\\): math mistake\u003c/li\u003e\n\u003cli\u003eproof: det A det B = det AB\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"final-part-2\"\u003eFinal, part 2\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eCounterproof: If \\(v_1 \\dots v_4\\) is a basis of \\(V\\), and \\(U\\) is a subspace of \\(V\\) with \\(v_1, v_2 \\in U\\) and \\(v_3, v_4\\) not in \\(U\\), \\(v_1, v_2\\) is a basis of \\(U\\)\u003c/li\u003e\n\u003cli\u003eCounterproof: if \\(T \\in \\mathcal{L}(V,V)\\) and \\(T^{2}=0\\), then \\(T=0\\)\u003c/li\u003e\n\u003cli\u003eCounterproof: if \\(s,t \\in \\mathcal{L}(V,V)\\), and \\(ST=0\\), then \\(null\\ s\\) is contained in \\(range\\ T\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003ch2 id=\"product-spaces-quiz\"\u003eProduct Spaces Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eProve that \\(\\mathcal{L}(V_1 \\times V_2 \\times \\dots \\times V_{m}, W)\\) and \\(\\mathcal{L}(V_1, W) \\times \\dots \\times \\mathcal{L}(V_{m}, W)\\) are \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eerror: didn\u0026rsquo;t do it\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"quotient-spaces-quiz\"\u003eQuotient Spaces Quiz\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-02-09_10-24-09_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCouldn\u0026rsquo;t prove that the list in linearly independent: the linear combinations is some \\(c_1v_1 + \\dots c_{m}v_{m} + U\\); as \\(v_1 \\dots v_{m}\\) is a basis of \\(V / U\\), \\(c_1 \\dots c_{m} = 0\\), now the second part is also a basis so they are \\(0\\) too.\u003c/li\u003e\n\u003cli\u003eThe spanning proof: \\(v + U =\\) , rewrite as basis, etc.\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_algebra_errors-1/","tags":null,"title":"Linear Algebra Errors"},{"categories":null,"contents":"Gaussian Elimination Quiz Demonstrate that matrices\u0026rsquo; multiplication are not commutative (error: didn\u0026rsquo;t consider \\(m\\times m\\)) Which \\(2\\times 2\\) matrices under multiplication form a group? (error: closure need to proved on invertable matrices under multiplication, not just \\(2\\times 2\\)) Deriving Rotation matrices (error: clockwise vs counter-clockwise) Linear Independence Quiz Connection between linear independence and systems equations (error: beated around the bush) \u0026mdash; the matrix of an nxn system of equations has a solution if the matrix\u0026rsquo;s column vectors is linearly independent Basis and Dimension Quiz put 0 into a basis AAAA not lin. indep; figure out what the basis for a polynomial with a certain root is: it is probably of dimension m (instead of m+1), because scalars doesn\u0026rsquo;t work in the case of p(3)=0; so basis is just the scalars missing some inequality about basis? \u0026mdash; its just that lin.idp sets is shorter or equal to basis and spanning sets is longer or equal to basis Final, part 1 definition of vector space: scalar multiplication is not an operation straight forgot \\(dim(U+V) = dim U + dim V - dim (U\\cap V)\\) plane containing \\((1,0,2)\\) and \\((3,-1,1)\\): math mistake proof: det A det B = det AB Final, part 2 Counterproof: If \\(v_1 \\dots v_4\\) is a basis of \\(V\\), and \\(U\\) is a subspace of \\(V\\) with \\(v_1, v_2 \\in U\\) and \\(v_3, v_4\\) not in \\(U\\), \\(v_1, v_2\\) is a basis of \\(U\\) Counterproof: if \\(T \\in \\mathcal{L}(V,V)\\) and \\(T^{2}=0\\), then \\(T=0\\) Counterproof: if \\(s,t \\in \\mathcal{L}(V,V)\\), and \\(ST=0\\), then \\(null\\ s\\) is contained in \\(range\\ T\\) Product Spaces Quiz Need more specific description: explain why we use product and quotient to describe product and quotient spaces? Prove that \\(\\mathcal{L}(V_1 \\times V_2 \\times \\dots \\times V_{m}, W)\\) and \\(\\mathcal{L}(V_1, W) \\times \\dots \\times \\mathcal{L}(V_{m}, W)\\) are isomorphic. Error: didn\u0026rsquo;t do it correctly for infinite dimensional Quotient Spaces Quiz Couldn\u0026rsquo;t prove that the list in linearly independent: the linear combinations is some \\(c_1v_1 + \\dots c_{m}v_{m} + U\\); as \\(v_1 \\dots v_{m}\\) is a basis of \\(V / U\\), \\(c_1 \\dots c_{m} = 0\\), now the second part is also a basis so they are \\(0\\) too. The spanning proof: \\(v + U =\\) , rewrite as basis, etc. she graded wrong: what\u0026rsquo;s the importance of \\(\\widetilde{T}\\)? Give two statements equivalent to \\(v+U = w+U\\), prove equivalence betewen this statement and the others didn\u0026rsquo;t prove both directions! Polynomials Quiz state the fundamental theorem of algebra; error: \\(\\mathcal{P}_{m}(\\mathbb{F})\\) is a vector space of polynomials with degree at most \\(m\\), and yet the FtOA requires exactly \\(m\\) Upper Triangular Quiz upper-triangular representation is findable when the space is 1) under complexes and 2) for finite-dimensional vector spaces; need BOTH conditions Upper Triangular Quiz UNCLEAR: Geometric Multipliicty is bounded by Algebric Multiplicity; Algebraic multiplicity (\u0026ldquo;real estate\u0026rdquo; taken on the upper-triangular diagonal) v. geometric multiplicity (amount of linearly independent eigenvectors included with that eigenvalue); so if geometric multiplicity \u0026lt; algebraic multiplicity, the map is not diagonalizable because its not bringing enough linearly independent eigenvectors Diagonalization Quiz enough eigenvalues go in only one direction: it existing means its diagonalizable, but the opposite isn\u0026rsquo;t true the proof for \\(T\\) is diagonalizable IFF the matrix \\(T\\) is similar to a diagonal matrix: NUS-MATH530 Similar to Diagonal Final, part 1 State the complex spectral theorem (error: the condition of normality is a PARALLEL result) Final, Part 2 Said this was true, but its not; \\(null\\ T \\bigoplus range\\ T = V\\), \\(T\\) is diagonalizable; Said this was true, but its false \\(T^{2}= 0\\) IFF \\(null\\ T = range\\ T\\) suppose \\(T=0\\), \\(T^{2} = 0\\). \\(null\\ T = V\\), \\(range\\ T = 0\\). Spectral theorem doesn\u0026rsquo;t define diagonalizability, it defines diagonalibility for ORTHONORMAL missing derivation of the pseudoinverse ","html":"\u003ch2 id=\"gaussian-elimination-quiz\"\u003eGaussian Elimination Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDemonstrate that matrices\u0026rsquo; multiplication are not commutative (error: didn\u0026rsquo;t consider \\(m\\times m\\))\u003c/li\u003e\n\u003cli\u003eWhich \\(2\\times 2\\) matrices under multiplication form a group? (error: closure need to proved on \u003cstrong\u003einvertable\u003c/strong\u003e matrices under multiplication, not just \\(2\\times 2\\))\u003c/li\u003e\n\u003cli\u003eDeriving Rotation matrices (error: clockwise vs counter-clockwise)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"linear-independence-quiz\"\u003eLinear Independence Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eConnection between linear independence and systems equations (error: beated around the bush) \u0026mdash; the matrix of an nxn system of equations has a solution if the matrix\u0026rsquo;s column vectors is linearly independent\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"basis-and-dimension-quiz\"\u003eBasis and Dimension Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eput 0 into a basis AAAA not lin. indep; figure out what the basis for a polynomial with a certain root is: it is probably of dimension m (instead of m+1), because scalars doesn\u0026rsquo;t work in the case of p(3)=0; so basis is just the scalars\u003c/li\u003e\n\u003cli\u003emissing some inequality about basis? \u0026mdash; its just that lin.idp sets is shorter or equal to basis and spanning sets is longer or equal to basis\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"final-part-1\"\u003eFinal, part 1\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003edefinition of vector space: scalar multiplication is not an operation\u003c/li\u003e\n\u003cli\u003estraight forgot \\(dim(U+V) = dim U + dim V - dim (U\\cap V)\\)\u003c/li\u003e\n\u003cli\u003eplane containing \\((1,0,2)\\) and \\((3,-1,1)\\): math mistake\u003c/li\u003e\n\u003cli\u003eproof: det A det B = det AB\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"final-part-2\"\u003eFinal, part 2\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eCounterproof: If \\(v_1 \\dots v_4\\) is a basis of \\(V\\), and \\(U\\) is a subspace of \\(V\\) with \\(v_1, v_2 \\in U\\) and \\(v_3, v_4\\) not in \\(U\\), \\(v_1, v_2\\) is a basis of \\(U\\)\u003c/li\u003e\n\u003cli\u003eCounterproof: if \\(T \\in \\mathcal{L}(V,V)\\) and \\(T^{2}=0\\), then \\(T=0\\)\u003c/li\u003e\n\u003cli\u003eCounterproof: if \\(s,t \\in \\mathcal{L}(V,V)\\), and \\(ST=0\\), then \\(null\\ s\\) is contained in \\(range\\ T\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003ch2 id=\"product-spaces-quiz\"\u003eProduct Spaces Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eNeed more specific description: explain why we use product and quotient to describe product and quotient spaces?\u003c/li\u003e\n\u003cli\u003eProve that \\(\\mathcal{L}(V_1 \\times V_2 \\times \\dots \\times V_{m}, W)\\) and \\(\\mathcal{L}(V_1, W) \\times \\dots \\times \\mathcal{L}(V_{m}, W)\\) are \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e. Error: didn\u0026rsquo;t do it correctly for infinite dimensional\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"quotient-spaces-quiz\"\u003eQuotient Spaces Quiz\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-02-09_10-24-09_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCouldn\u0026rsquo;t prove that the list in linearly independent: the linear combinations is some \\(c_1v_1 + \\dots c_{m}v_{m} + U\\); as \\(v_1 \\dots v_{m}\\) is a basis of \\(V / U\\), \\(c_1 \\dots c_{m} = 0\\), now the second part is also a basis so they are \\(0\\) too.\n\u003cul\u003e\n\u003cli\u003eThe spanning proof: \\(v + U =\\) , rewrite as basis, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eshe graded wrong: what\u0026rsquo;s the importance of \\(\\widetilde{T}\\)?\u003c/li\u003e\n\u003cli\u003eGive two statements equivalent to \\(v+U = w+U\\), prove equivalence betewen this statement and the others\n\u003cul\u003e\n\u003cli\u003edidn\u0026rsquo;t prove both directions!\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"polynomials-quiz\"\u003ePolynomials Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003estate the fundamental theorem of algebra; error: \\(\\mathcal{P}_{m}(\\mathbb{F})\\) is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e of \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003es with degree \u003cem\u003eat most \\(m\\)\u003c/em\u003e, and yet the FtOA requires \u003cem\u003eexactly \\(m\\)\u003c/em\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"upper-triangular-quiz\"\u003eUpper Triangular Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eupper-triangular representation is findable when the space is 1) under complexes and 2) for finite-dimensional vector spaces; need BOTH conditions\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"upper-triangular-quiz\"\u003eUpper Triangular Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eUNCLEAR: Geometric Multipliicty is bounded by Algebric Multiplicity; Algebraic multiplicity (\u0026ldquo;real estate\u0026rdquo; taken on the upper-triangular diagonal) v. geometric multiplicity (amount of linearly independent eigenvectors included with that eigenvalue); so if geometric multiplicity \u0026lt; algebraic multiplicity, the map is not diagonalizable because its not bringing enough linearly independent eigenvectors\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"diagonalization-quiz\"\u003eDiagonalization Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eenough eigenvalues go in only one direction: it existing means its diagonalizable, but the opposite isn\u0026rsquo;t true\u003c/li\u003e\n\u003cli\u003ethe proof for \\(T\\) is diagonalizable IFF the matrix \\(T\\) is similar to a diagonal matrix: \u003ca href=\"/posts/kbhnus_math530_similar_to_diagonal/\"\u003eNUS-MATH530 Similar to Diagonal\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"final-part-1\"\u003eFinal, part 1\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eState the complex spectral theorem (error: the condition of normality is a PARALLEL result)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"final-part-2\"\u003eFinal, Part 2\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eSaid this was true, but its not; \\(null\\ T \\bigoplus range\\ T = V\\), \\(T\\) is diagonalizable;\u003c/li\u003e\n\u003cli\u003eSaid this was true, but its false \\(T^{2}= 0\\) IFF \\(null\\ T = range\\ T\\)\nsuppose \\(T=0\\), \\(T^{2} = 0\\). \\(null\\ T = V\\), \\(range\\ T = 0\\).\u003c/li\u003e\n\u003cli\u003eSpectral theorem doesn\u0026rsquo;t define diagonalizability, it defines diagonalibility for ORTHONORMAL\u003c/li\u003e\n\u003cli\u003emissing derivation of the pseudoinverse\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_algebra_errors/","tags":null,"title":"Linear Algebra Errors"},{"categories":null,"contents":"The bible stays the same: (Axler 1997)\nWe will be less exploratory, Axler will pretty much tell us. However, we should try to say stuff in the class every single class period.\nThere is a ban on numbers over 4 on this class.\nBest Practices Ask questions Talk to each other Make mistakes From Riley: know the Proof Design Patterns Non-Axler but Important Things we explicitly are told to know, but is not immediately in Axler. You bet you determinants are going to be here.\ngroup matricies dot product and cross product solving systems binary operation modular arithmetic quotient group strong induction algebreic multiplicity geometric multplicity matrix adjectives singular value decomposition 1 Axler 1.A Axler 1.B Axler 1.C 2 Axler 2.A Axler 2.B Axler 2.C 3 Axler 3.A Axler 3.B Axler 3.C Axler 3.D Axler 3.E Axler 3.F 4 Thoughts on Axler 4\n5 Axler 5.A Axler 5.B Axler 5.C 6 Axler 6.A Axler 6.B 7 Axler 7.A Misc Knowledge algebra vector integer additive identity taxicab norm Axler, Sheldon. 1997. Linear Algebra Done Right. Undergraduate Texts in Mathematics. Springer New York. doi:10.1007/b97662. ","html":"\u003cp\u003eThe bible stays the same: (\u003ca href=\"#citeproc_bib_item_1\"\u003eAxler 1997\u003c/a\u003e)\u003c/p\u003e\n\u003cp\u003eWe will be less exploratory, Axler will pretty much tell us. However, we should try to say stuff in the class every single class period.\u003c/p\u003e\n\u003cp\u003eThere is a ban on \u003ca href=\"/posts/kbhnumber/\"\u003enumber\u003c/a\u003es over 4 on this class.\u003c/p\u003e\n\u003ch2 id=\"best-practices\"\u003eBest Practices\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eAsk questions\u003c/li\u003e\n\u003cli\u003eTalk to each other\u003c/li\u003e\n\u003cli\u003eMake mistakes\u003c/li\u003e\n\u003cli\u003eFrom Riley: know the \u003ca href=\"/posts/kbhproof_design_patterns/\"\u003eProof Design Patterns\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"non-axler-but-important\"\u003eNon-Axler but Important\u003c/h2\u003e\n\u003cp\u003eThings we explicitly are told to know, but is not immediately in Axler. You bet you determinants are going to be here.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e and \u003ca href=\"/posts/kbhcross_product/\"\u003ecross product\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsolving_systems/\"\u003esolving systems\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_operation/\"\u003ebinary operation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodular_arithmetic/\"\u003emodular arithmetic\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhquotient_group/\"\u003equotient group\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstrong_induction/\"\u003estrong induction\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhalgebreic_multiplicity/\"\u003ealgebreic multiplicity\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgeometric_multplicity/\"\u003egeometric multplicity\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatrix_adjectives/\"\u003ematrix adjectives\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsingular_value_decomposition/\"\u003esingular value decomposition\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"1\"\u003e1\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_a/\"\u003eAxler 1.A\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_1_b/\"\u003eAxler 1.B\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_1_c/\"\u003eAxler 1.C\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"2\"\u003e2\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_2_a/\"\u003eAxler 2.A\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_2_b/\"\u003eAxler 2.B\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_2_c/\"\u003eAxler 2.C\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"3\"\u003e3\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_3_a/\"\u003eAxler 3.A\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_3_b/\"\u003eAxler 3.B\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_3_c/\"\u003eAxler 3.C\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_3_d/\"\u003eAxler 3.D\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_3_e/\"\u003eAxler 3.E\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_3_f/\"\u003eAxler 3.F\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"4\"\u003e4\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhthoughts_on_axler_4/\"\u003eThoughts on Axler 4\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"5\"\u003e5\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_5_a/\"\u003eAxler 5.A\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_5_b/\"\u003eAxler 5.B\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_5_c/\"\u003eAxler 5.C\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"6\"\u003e6\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_6_a/\"\u003eAxler 6.A\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_6_b/\"\u003eAxler 6.B\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"7\"\u003e7\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_7_a/\"\u003eAxler 7.A\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"misc-knowledge\"\u003eMisc Knowledge\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhalgebra/\"\u003ealgebra\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinteger/\"\u003einteger\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtaxicab_norm/\"\u003etaxicab norm\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cstyle\u003e.csl-entry{text-indent: -1.5em; margin-left: 1.5em;}\u003c/style\u003e\u003cdiv class=\"csl-bib-body\"\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_1\"\u003e\u003c/a\u003eAxler, Sheldon. 1997. \u003ci\u003eLinear Algebra Done Right\u003c/i\u003e. Undergraduate Texts in Mathematics. Springer New York. doi:\u003ca href=\"https://doi.org/10.1007/b97662\"\u003e10.1007/b97662\u003c/a\u003e.\u003c/div\u003e\n\u003c/div\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_algebra_index/","tags":["index"],"title":"Linear Algebra Index"},{"categories":null,"contents":"A Linear Combination of vectors is a\u0026hellip; guess what? Any vector formed by a combination of vectors at arbitrary scales.\nconstituents A list of vectors \\(v_1, \\dots,v_{m}\\) Scalars \\(a_1, \\dots, v_{m} \\in \\mathbb{F}\\) requirements A Linear Combination is defined formally by:\n\\begin{equation} v = a_1v_1+\\dots+a_{m}v_{m} \\end{equation}\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhlinear_combination/\"\u003eLinear Combination\u003c/a\u003e of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es is a\u0026hellip; guess what? Any vector formed by a combination of vectors at arbitrary scales.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA list of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es \\(v_1, \\dots,v_{m}\\)\u003c/li\u003e\n\u003cli\u003eScalars \\(a_1, \\dots, v_{m} \\in \\mathbb{F}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"/posts/kbhlinear_combination/\"\u003eLinear Combination\u003c/a\u003e is defined formally by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = a_1v_1+\\dots+a_{m}v_{m}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_combination/","tags":null,"title":"linear combination"},{"categories":null,"contents":"Here it is:\n\\begin{equation} a\\frac{dy}{dx} + by = c \\end{equation}\nFor some constants \\(a,b,c\\). The name is pretty obvious, because we have constants and the highest power on everything is \\(1\\). Its first-order because the derivative is only the first-order derivative.\nlinear (diffeq) We technically call it \u0026ldquo;linear\u0026rdquo; because: if there are two possible solutions \\(y_1(x)\\) \\(y_2(x)\\), a linear combination \\(Ay_1(x)+By_2(x)\\) should also be a solution. Its \u0026ldquo;linear\u0026rdquo; because linear combinations work.\nsolving separable differential equations A separable differential equation means that we can separate the derivative by itself and separate its two components. For the example above, we have that:\n\\begin{equation} \\frac{dy}{dx} = \\frac{c-by}{a} \\end{equation}\nWe can naturally separate this:\n\\begin{equation} \\frac{a}{c-by}dy = dx \\end{equation}\nAnd then we can finally take the integral on both sides:\n\\begin{equation} \\int \\frac{a}{c-by}dy = \\int dx \\end{equation}\nWait wait wait but why is this possible? Why is it that we can separate a \\(\\frac{dy}{dx}\\) such that \\(dy\\) and \\(dx\\) is isolatable? Remember:\n\\begin{equation} \\frac{dy}{dx} = \\lim_{h\\to 0} \\frac{y(x+h)-y(x)}{h} \\end{equation}\nno where is the differentials seperatable! Apparently Ted\u0026rsquo;s undergrads didn\u0026rsquo;t know this either. So here\u0026rsquo;s a reading on it.\nWhat if its non-seperable? See Linear Non-Seperable Equation\n","html":"\u003cp\u003eHere it is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na\\frac{dy}{dx} + by = c\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor some constants \\(a,b,c\\). The name is pretty obvious, because we have constants and the highest power on everything is \\(1\\). Its first-order because the derivative is only the first-order derivative.\u003c/p\u003e\n\u003ch2 id=\"linear--diffeq\"\u003elinear (diffeq)\u003c/h2\u003e\n\u003cp\u003eWe technically call it \u0026ldquo;linear\u0026rdquo; because: if there are two possible solutions \\(y_1(x)\\) \\(y_2(x)\\), a linear combination \\(Ay_1(x)+By_2(x)\\) should also be a solution. Its \u0026ldquo;linear\u0026rdquo; because linear combinations work.\u003c/p\u003e\n\u003ch2 id=\"solving-separable-differential-equations\"\u003esolving separable differential equations\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#solving-separable-differential-equations\"\u003eseparable\u003c/a\u003e differential equation means that we can separate the derivative by itself and separate its two components. For the example above, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{dy}{dx} = \\frac{c-by}{a}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can naturally separate this:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{a}{c-by}dy = dx\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd then we can finally take the integral on both sides:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int \\frac{a}{c-by}dy = \\int dx\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWait wait wait but why is this possible? Why is it that we can separate a \\(\\frac{dy}{dx}\\) such that \\(dy\\) and \\(dx\\) is isolatable? Remember:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{dy}{dx} = \\lim_{h\\to 0} \\frac{y(x+h)-y(x)}{h}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eno where is the differentials seperatable! Apparently Ted\u0026rsquo;s undergrads didn\u0026rsquo;t know this either. \u003ca href=\"https://drive.google.com/file/d/1GWSagIMjXI0Awwy6wlQqsBm6c8tnCqcg/view?pli=1\"\u003eSo here\u0026rsquo;s a reading on it\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eWhat if its non-seperable? See \u003ca href=\"/posts/kbhlinear_non_seperable_equation/\"\u003eLinear Non-Seperable Equation\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_constant_coefficient_equation/","tags":null,"title":"Linear Constant-Coefficient Equation"},{"categories":null,"contents":"Linear Dependence Lemma is AFAIK one of the more important results of elementary linear algebra.\nstatement Suppose \\(v_1, \\dots v_{m}\\) is an linearly dependent list in \\(V\\); then \\(\\exists j \\in \\{1, 2, \\dots m\\}\\) such that\u0026hellip;\n\\(v_{j} \\in span(v_1, \\dots, v_{j-1})\\) the span of the list constructed by removing \\(v_{j}\\) from \\(v_1, \\dots v_{m}\\) equals the span of \\(v_1, \\dots v_{m}\\) itself intuition: \u0026ldquo;in a linearly dependent list of vectors, one of the vectors is in the span of the previous ones, and we can throw it out without changing the span.\u0026rdquo;\nproof By definition of linear dependence, given the list \\((v_1, \\dots v_{m}\\)) is linearly dependent, there exists some not-all-zero \\(a_1, \\dots, a_{m} \\in \\mathbb{F}\\) such that:\n\\begin{equation} a_1v_1+\\dots +a_{m}v_{m} = 0 \\end{equation}\nLet \\(a_{j}\\) be the last non-zero scalar in the expression (making the term actually exist). You can, in this circumstance, chuck everything to the right and divide by \\(a_{j}\\) to recover \\(v_{j}\\):\n\\begin{equation} v_{j}= -\\frac{a_1}{a_{j}} v_1 - \\dots -\\frac{a_{j-1}}{a_{j}}v_{j-1} \\end{equation}\nWe were able to construct \\(v_{j}\\) as a linear combination of \\(v_{1}, \\dots v_{j-1}\\), therefore:\n\\begin{equation} v_{j} \\in span(v_1, \\dots, v_{j-1}) \\end{equation}\nshowing \\((1)\\).\nFor \\(2\\), the intuition behind the proof is just that you can take that expression for \\(v_{j}\\) above to replace \\(v_{j}\\), therefore getting rid of one vector but still keeping the same span.\nFormally, \\(\\forall u \\in span(v_1, \\dots v_{m})\\), we can write it as some:\n\\begin{equation} u = c_1v_1 + \\dots c_{j}v_{j} + \\dots + c_{m}v_{m} \\end{equation}\nnow we replace \\(v_{j}\\) with the isolated expression for \\(v_{j}\\) above.\nException: if \\(j=1\\) and \\(v_1=0\\), note that you can just replace \\(v_1\\) with \\(0\\) without doing any special substitution.\nHaving written all arbitrary \\(u \\in span(v_1, \\dots v_{m})\\) as a linear combination of \\(v_1\\dots v_{m}\\) without \u0026hellip; \\(v_{j}\\), we see that the renaming vectors span the same space. \\(\\blacksquare\\)\nissue note that if we chose \\(j=1\\) in the above result, \\(v_1=0\\). Contrapositively, if \\(v_1 \\neq 0\\), \\(j\\neq 1\\). This is because of the fact that:\nif \\(j=1\\), the lemma tells us that \\(v_{1} \\in span(v_{1-1}) \\implies v_1 \\in span()\\). As per definition, the span of the empty set is \\(\\{0\\}\\). Therefore, \\(v_1 \\in \\{0\\} \\implies v_1=0\\).\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e is AFAIK one of the more important results of elementary linear algebra.\u003c/p\u003e\n\u003ch2 id=\"statement\"\u003estatement\u003c/h2\u003e\n\u003cp\u003eSuppose \\(v_1, \\dots v_{m}\\) is an \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e list in \\(V\\); then \\(\\exists j \\in \\{1, 2, \\dots m\\}\\) such that\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\\(v_{j} \\in span(v_1, \\dots, v_{j-1})\\)\u003c/li\u003e\n\u003cli\u003ethe \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e of the list constructed by removing \\(v_{j}\\) from \\(v_1, \\dots v_{m}\\) equals the span of \\(v_1, \\dots v_{m}\\) itself\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eintuition: \u0026ldquo;in a \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e list of vectors, one of the vectors is in the span of the previous ones, and we can throw it out without changing the span.\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"proof\"\u003eproof\u003c/h2\u003e\n\u003cp\u003eBy definition of \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinear dependence\u003c/a\u003e, given the list \\((v_1, \\dots v_{m}\\)) is \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e, there exists some not-all-zero \\(a_1, \\dots, a_{m} \\in \\mathbb{F}\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_1v_1+\\dots +a_{m}v_{m} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet \\(a_{j}\\) be the last non-zero scalar in the expression (making the term actually exist). You can, in this circumstance, chuck everything to the right and divide by \\(a_{j}\\) to recover \\(v_{j}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv_{j}= -\\frac{a_1}{a_{j}} v_1 - \\dots -\\frac{a_{j-1}}{a_{j}}v_{j-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe were able to construct \\(v_{j}\\) as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \\(v_{1}, \\dots v_{j-1}\\), therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv_{j} \\in span(v_1, \\dots, v_{j-1})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eshowing \\((1)\\).\u003c/p\u003e\n\u003cp\u003eFor \\(2\\), the intuition behind the proof is just that you can take that expression for \\(v_{j}\\) above to replace \\(v_{j}\\), therefore getting rid of one vector but still keeping the same \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eFormally, \\(\\forall u \\in span(v_1, \\dots v_{m})\\), we can write it as some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu = c_1v_1 + \\dots c_{j}v_{j} + \\dots + c_{m}v_{m}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enow we replace \\(v_{j}\\) with the isolated expression for \\(v_{j}\\) above.\u003c/p\u003e\n\u003cp\u003eException: if \\(j=1\\) and \\(v_1=0\\), note that you can just replace \\(v_1\\) with \\(0\\) without doing any special substitution.\u003c/p\u003e\n\u003cp\u003eHaving written all arbitrary \\(u \\in span(v_1, \\dots v_{m})\\) as a linear combination of \\(v_1\\dots v_{m}\\) \u003cem\u003ewithout\u003c/em\u003e \u0026hellip; \\(v_{j}\\), we see that the renaming vectors span the same space. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch2 id=\"issue\"\u003eissue\u003c/h2\u003e\n\u003cp\u003enote that if we chose \\(j=1\\) in the above result, \\(v_1=0\\). Contrapositively, if \\(v_1 \\neq 0\\), \\(j\\neq 1\\). This is because of the fact that:\u003c/p\u003e\n\u003cp\u003eif \\(j=1\\), the lemma tells us that \\(v_{1} \\in span(v_{1-1}) \\implies v_1 \\in span()\\). As per definition, the \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e of the empty set is \\(\\{0\\}\\). Therefore, \\(v_1 \\in \\{0\\} \\implies v_1=0\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_dependence_lemma/","tags":null,"title":"Linear Dependence Lemma"},{"categories":null,"contents":"A linear map to numbers. Its very powerful because any linear functional can be represented as an inner product using Riesz Representation Theorem\nconstituents vector space \\(V\\) a linear map \\(\\varphi \\in \\mathcal{L}(V, \\mathbb{F})\\) requirements \\(\\varphi\\) is called a linear functional on \\(V\\) if \\(\\varphi: V \\to \\mathbb{F}\\). That is, it maps elements of \\(V\\) to scalars. For instance, every inner product is a Linear Map to scalars and hence a linear functional.\nadditional information Riesz Representation Theorem Suppose \\(V\\) is finite-dimensional, and \\(\\varphi\\) is a linear functional on \\(V\\); then, there exists an unique \\(u \\in V\\) such that:\n\\begin{equation} \\varphi(v) = \\langle v,u \\rangle \\end{equation}\n\\(\\forall v \\in V\\). Kinda a mindblowing fact.\nProof:\nEvery Inner Product Space has an orthonormal basis; let \\(e_1, \u0026hellip;e_{n}\\) be an orthonormal basis of \\(V\\). Recall there\u0026rsquo;s a specific way of writing a vector as a linear combination of orthonormal basis, that WLOG \\(v \\in V\\):\n\\begin{equation} v = \\langle v, e_1 \\rangle e_1 + \\dots \\langle v, e_{n} \\rangle e_{n} \\end{equation}\nNow:\n\\begin{equation} \\varphi(v) = \\varphi(\\langle v, e_1 \\rangle e_1 + \\dots \\langle v, e_{n} \\rangle e_{n}) \\end{equation}\nGiven homogenity and addtivity, we then have:\n\\begin{align} \\varphi(v) \u0026amp;= \\varphi(\\langle v, e_1 \\rangle e_1 + \\dots \\langle v, e_{n} \\rangle e_{n}) \\\\ \u0026amp;= \\langle v, e_1 \\rangle \\varphi(e_1) + \\dots + \\langle v, e_n \\rangle \\varphi(e_n) \\end{align}\nNow, shoving \\(\\varphi\\) into the second slot (remember we have conjugate homogenity on the secon slot), and adding it all together (as inner products are additive in both slots):\n\\begin{align} \\varphi(v) \u0026amp;= \\varphi(\\langle v, e_1 \\rangle e_1 + \\dots \\langle v, e_{n} \\rangle e_{n}) \\\\ \u0026amp;= \\langle v, e_1 \\rangle \\varphi(e_1) + \\dots + \\langle v, e_n \\rangle \\varphi(e_n) \\\\ \u0026amp;= \\langle v, \\overline{\\varphi(e_1)} e_1 + \\dots + \\overline{\\varphi(e_n)}e_n \\rangle \\end{align}\nYou will note now that the second slot to this inner product is v-independent! So as long as we know the orthonormal basis we can encode \\(\\varphi\\) with:\n\\begin{equation} u = \\overline{\\varphi(e_1)} e_1 + \\dots + \\overline{\\varphi(e_n)}e_n \\end{equation}\nand:\n\\begin{equation} \\varphi(v) = \\langle v, u \\rangle \\end{equation}\nNow, to show uniqueness, we probably do the same damned thing we have a million times:\nSuppose:\n\\begin{equation} \\varphi(v) = \\langle v,u_1 \\rangle = \\langle v,u_{2} \\rangle \\end{equation}\nholds for all \\(v \\in V\\), as required by the theorem.\nThis means that:\n\\begin{equation} \\langle v, u_1-u_2 \\rangle = 0 \\end{equation}\nFor every \\(v \\in V\\). Let \\(v = u_1-u_2\\). Now by definiteness we have \\(u_1-u_2=0\\) meaning \\(u_1=u_2\\) as desired. \\(\\blacksquare\\)\n","html":"\u003cp\u003eA linear map to numbers. Its very powerful because any \u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003e can be represented as an \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e using \u003ca href=\"#riesz-representation-theorem\"\u003eRiesz Representation Theorem\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003evector space \\(V\\)\u003c/li\u003e\n\u003cli\u003ea linear map \\(\\varphi \\in \\mathcal{L}(V, \\mathbb{F})\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\(\\varphi\\) is called a \u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003e on \\(V\\) if \\(\\varphi: V \\to \\mathbb{F}\\). That is, it maps elements of \\(V\\) to scalars. For instance, every \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e is a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e to scalars and hence a \u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"riesz-representation-theorem\"\u003eRiesz Representation Theorem\u003c/h3\u003e\n\u003cp\u003eSuppose \\(V\\) is \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e, and \\(\\varphi\\) is a \u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003e on \\(V\\); then, there exists an \u003cstrong\u003eunique\u003c/strong\u003e \\(u \\in V\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\varphi(v) = \\langle v,u \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(\\forall v \\in V\\). Kinda a mindblowing fact.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhgram_schmidt/#every-id-4a788e29-a3e9-4c13-8c97-08746878966e-inner-product-space-has-an-id-2a1eecb2-f23a-469f-a860-b561a9197906-orthonormal-basis\"\u003eEvery Inner Product Space has an orthonormal basis\u003c/a\u003e; let \\(e_1, \u0026hellip;e_{n}\\) be an \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e of \\(V\\). Recall there\u0026rsquo;s a specific way of \u003ca href=\"/posts/kbhorthonormal_basis/#writing-a-vector-as-a-linear-combination-of-orthonormal-basis\"\u003ewriting a vector as a linear combination of orthonormal basis\u003c/a\u003e, that WLOG \\(v \\in V\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = \\langle v, e_1 \\rangle e_1 + \\dots \\langle v, e_{n} \\rangle e_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\varphi(v) = \\varphi(\\langle v, e_1 \\rangle e_1 + \\dots \\langle v, e_{n} \\rangle e_{n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven homogenity and addtivity, we then have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\varphi(v) \u0026amp;= \\varphi(\\langle v, e_1 \\rangle e_1 + \\dots \\langle v, e_{n} \\rangle e_{n}) \\\\\n\u0026amp;= \\langle v, e_1 \\rangle \\varphi(e_1) + \\dots + \\langle v, e_n \\rangle \\varphi(e_n)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, shoving \\(\\varphi\\) into the second slot (remember we have conjugate homogenity on the secon slot), and adding it all together (as inner products are additive in both slots):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\varphi(v) \u0026amp;= \\varphi(\\langle v, e_1 \\rangle e_1 + \\dots \\langle v, e_{n} \\rangle e_{n}) \\\\\n\u0026amp;= \\langle v, e_1 \\rangle \\varphi(e_1) + \\dots + \\langle v, e_n \\rangle \\varphi(e_n) \\\\\n\u0026amp;= \\langle v, \\overline{\\varphi(e_1)} e_1 + \\dots + \\overline{\\varphi(e_n)}e_n \\rangle\n\\end{align}\u003c/p\u003e\n\u003cp\u003eYou will note now that the second slot to this \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e is \u003cstrong\u003ev-independent!\u003c/strong\u003e So as long as we know the \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e we can encode \\(\\varphi\\) with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu = \\overline{\\varphi(e_1)} e_1 + \\dots + \\overline{\\varphi(e_n)}e_n\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\varphi(v) = \\langle v, u \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, to show uniqueness, we probably do the same damned thing we have a million times:\u003c/p\u003e\n\u003cp\u003eSuppose:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\varphi(v) = \\langle v,u_1 \\rangle = \\langle v,u_{2} \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eholds for all \\(v \\in V\\), as required by the theorem.\u003c/p\u003e\n\u003cp\u003eThis means that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle v, u_1-u_2 \\rangle = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor every \\(v \\in V\\). Let \\(v = u_1-u_2\\). Now by definiteness we have \\(u_1-u_2=0\\) meaning \\(u_1=u_2\\) as desired. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_functional/","tags":null,"title":"linear functional"},{"categories":null,"contents":"Suppose you have continuous random variables \\(X,Y\\), you can use one to seed the value and the other to change the Gaussian distribution:\n\\begin{equation} p(x\\mid y) = \\mathcal{N}(x \\mid my + b, \\sigma^{2}) \\end{equation}\n","html":"\u003cp\u003eSuppose you have continuous \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es \\(X,Y\\), you can use one to seed the value and the other to change the \u003ca href=\"/posts/kbhprobability_distributions/#gaussian-distribution\"\u003eGaussian distribution\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(x\\mid y) = \\mathcal{N}(x \\mid my + b, \\sigma^{2})\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_gaussian_model/","tags":null,"title":"linear gaussian model"},{"categories":null,"contents":"A linearly independent list is a list of vectors such that there is one unique choice of scalars to be able to construct each member of their span.\nBased on the same technique as in the proof that a sum of subsets is a direct sum IFF there is only one way to write \\(0\\), we can show that in a linearly independent list, there is (IFF) only one way to write the zero vector as a linear combination of that list of vectors \u0026mdash;namely, the trivial representation of taking each vector to \\(0\\). In fact, we will actually use that as the formal definition of linear independence.\nThis definition of linear independence is the result of the definition for direct sum.\nSee also Linear Dependence Lemma.\nconstituents A list of vectors \\(v_1, \\dots, v_{m}\\) in \\(V\\) requirements Formally, a linearly independent list is defined by there being only one choice of scalars \\(a_1, \\dots, a_{m} \\in \\mathbb{F}\\) to write \\(0\\) as a linear combination of \\(v_{1},\\dots, v_{m}\\): namely, by taking each \\(a_1, \\dots a_{m}\\) to \\(0\\).\nWe also declare \\(()\\) to be linearly independent.\nadditional information linearly dependent a list is linearly dependent if\u0026hellip;. its not linearly independent.\noh. my. god.\nBased on the same formal definition, this means that a linearly dependent list is defined by the fact that there can be more than one way of writing \\(0\\) as a linear combination of that list of vectors, where one of the ways makes it so that writing \\(0\\) does not require all zero scalars.\nlength of linearly-independent list \\(\\leq\\) length of spanning list A linearly independent list should be smaller or equal in length to a spanning list.\nThe canonical proof is one by induction.\nSuppose \\(u_1, \\dots u_{m}\\) is an linearly independent list in \\(V\\). Take also a list \\(w_1, \\dots w_{n}\\) spans \\(V\\). We desire that \\(m\\leq n\\). We create a list of length \\(n\\) containing all of the \\(w\\) thus far. Our invariant is that \\(len(B) = n\\). This proof essentially uses Floyd\u0026rsquo;s Invariant Method (compsci topic for Jack\u0026rsquo;s understanding only.)\nbase case take the spanning list of \\(V\\) we declared named \\(w_1, \\dots w_{n}\\). Given it spans, adding any other vector in \\(V\\), if \\(w_1, \\dots w_{n}\\) isn\u0026rsquo;t already linearly dependent, will make it linearly dependent. This is because you can write the new vector \\(v \\in V\\) which you add as a linear combination of the previous vectors already as they already span \\(V\\).\nBy the Linear Dependence Lemma, you can remove one of the vectors in the new linearly dependent list while keeping the list still spanning \\(V\\).\nNow, construct the list:\n\\begin{equation} u_1, w_1, \\dots w_{n} \\end{equation}\nwhere, \\(u_{1} \\in V\\) is taken from that linearly independent list in \\(V\\). By the statement above, via applying the Linear Dependence Lemma, we can create a list that spans the same space by taking away one of the \\(w_{j}\\) (we can\u0026rsquo;t take \\(u_1\\) because it is at the first position, and we can\u0026rsquo;t grantee its $0$\u0026mdash;see the issue with the Linear Dependence Lemma). We now have a list \\(B\\) with length \\(n\\) with \\(u_1\\) and the rest of the \\(w\\) not taken away which span \\(V\\)\ncase number \\(j\\) Given a spanning list \\(B\\) of \\(V\\) with length \\(n\\), with some parts \\(u_1, \\dots, u_{j-1}, w_{j}, \\dots w_{n}\\). We now include \\(u_{j}\\) in the list, placing it after \\(u_{j-1}\\). As the list pre-inclusion is already a spanning list of \\(V\\), any new vectors from \\(V\\) added will necessarily be able to be written as a linear combination of the other vectors already in the list. Therefore, we know that\u0026mdash;if not already pre-inclusion\u0026mdash;the list is linearly dependent.\nBecause the first half (\\(u_1,\\dots u_{j}\\)) of this new list is linearly independent (given), the bit that \u0026ldquo;causes\u0026rdquo; the linear dependence is in the \\(w\\) (i.e. each \\(u\\) cannot be written by other \\(u\\).) Therefore, we can say that the first condition of Linear Dependence Lemma allows us to remove one of the \\(w\\) while spanning the same space, creating again a spanning list of length \\(n\\).\ninduction repeat the procedure \\(m\\) times, resulting in all the \\(u_{j}\\) being included in our new list \\(B\\) of length still \\(n\\). Given we contained a list of length \\(m\\) in a list of length \\(n\\), \\(m \\leq n\\).\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list is a list of vectors such that there is one unique choice of scalars to be able to construct each member of their \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eBased on the same technique as in the proof that \u003ca href=\"/posts/kbhdirect_sum/#a-id-1b800658-2f83-4802-acfd-2d15cf5a1d74-sum-of-subsets-is-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-id-fddf0648-91ea-4c5b-8298-fa0a30637cb7-iff-there-is-only-one-way-to-write-0\"\u003ea sum of subsets is a direct sum IFF there is only one way to write \\(0\\)\u003c/a\u003e, we can show that in a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e, there is (IFF) only one way to write the zero vector as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of that \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e of vectors \u0026mdash;namely, the trivial representation of taking each vector to \\(0\\). In fact, we will actually use that as the formal definition of \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThis definition of \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e is the \u003cem\u003eresult\u003c/em\u003e of the definition for \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA list of vectors \\(v_1, \\dots, v_{m}\\) in \\(V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eFormally, a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list is defined by there being only one choice of scalars \\(a_1, \\dots, a_{m} \\in \\mathbb{F}\\) to write \\(0\\) as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \\(v_{1},\\dots, v_{m}\\): namely, by taking each \\(a_1, \\dots a_{m}\\) to \\(0\\).\u003c/p\u003e\n\u003cp\u003eWe also declare \\(()\\) to be \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"linearly-dependent\"\u003elinearly dependent\u003c/h3\u003e\n\u003cp\u003ea list is \u003ca href=\"#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e if\u0026hellip;. its not \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eoh. my. god.\u003c/p\u003e\n\u003cp\u003eBased on the same formal definition, this means that a \u003ca href=\"#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e list is defined by the fact that there can be more than one way of writing \\(0\\) as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of that \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es, where one of the ways makes it so that writing \\(0\\) does not require all zero scalars.\u003c/p\u003e\n\u003ch3 id=\"length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003elength of linearly-independent list \\(\\leq\\) length of spanning list\u003c/h3\u003e\n\u003cp\u003eA \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list should be smaller or equal in length to a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list.\u003c/p\u003e\n\u003cp\u003eThe canonical proof is one by induction.\u003c/p\u003e\n\u003cp\u003eSuppose \\(u_1, \\dots u_{m}\\) is an \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(V\\). Take also a list \\(w_1, \\dots w_{n}\\) \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\). We desire that \\(m\\leq n\\). We create a list of length \\(n\\) containing all of the \\(w\\) thus far. Our invariant is that \\(len(B) = n\\). This proof essentially uses \u003ca href=\"/posts/kbhfloyd_s_invariant_method/\"\u003eFloyd\u0026rsquo;s Invariant Method\u003c/a\u003e (compsci topic for Jack\u0026rsquo;s understanding only.)\u003c/p\u003e\n\u003ch4 id=\"base-case\"\u003ebase case\u003c/h4\u003e\n\u003cp\u003etake the \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list of \\(V\\) we declared named \\(w_1, \\dots w_{n}\\). Given it spans, adding any other vector in \\(V\\), if \\(w_1, \\dots w_{n}\\) isn\u0026rsquo;t already \u003ca href=\"#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e, will make it \u003ca href=\"#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e. This is because you can write the new vector \\(v \\in V\\) which you add as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of the previous \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es already as they already span \\(V\\).\u003c/p\u003e\n\u003cp\u003eBy the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e, you can remove one of the \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es in the new \u003ca href=\"#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e list while keeping the list still \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e \\(V\\).\u003c/p\u003e\n\u003cp\u003eNow, construct the list:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu_1, w_1, \\dots w_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(u_{1} \\in V\\) is taken from that \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(V\\). By the statement above, via applying the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e, we can create a list that \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003es the same space by taking away one of the \\(w_{j}\\) (we can\u0026rsquo;t take \\(u_1\\) because it is at the first position, and we can\u0026rsquo;t grantee its $0$\u0026mdash;see the \u003ca href=\"/posts/kbhlinear_dependence_lemma/#issue\"\u003eissue\u003c/a\u003e with the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e). We now have a list \\(B\\) with length \\(n\\) with \\(u_1\\) and the rest of the \\(w\\) not taken away which span \\(V\\)\u003c/p\u003e\n\u003ch4 id=\"case-number-j\"\u003ecase number \\(j\\)\u003c/h4\u003e\n\u003cp\u003eGiven a spanning list \\(B\\) of \\(V\\) with length \\(n\\), with some parts \\(u_1, \\dots, u_{j-1}, w_{j}, \\dots w_{n}\\). We now include \\(u_{j}\\) in the list, placing it after \\(u_{j-1}\\). As the list pre-inclusion is already a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list of \\(V\\), any new \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es from \\(V\\) added will necessarily be able to be written as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of the other vectors already in the list. Therefore, we know that\u0026mdash;if not already pre-inclusion\u0026mdash;the list is \u003ca href=\"#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eBecause the first half (\\(u_1,\\dots u_{j}\\)) of this new list is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e (given), the bit that \u0026ldquo;causes\u0026rdquo; the linear dependence is in the \\(w\\) (i.e. each \\(u\\) cannot be written by other \\(u\\).) Therefore, we can say that the first condition of \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e allows us to remove one of the \\(w\\) while \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e the same space, creating again a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list of length \\(n\\).\u003c/p\u003e\n\u003ch4 id=\"induction\"\u003einduction\u003c/h4\u003e\n\u003cp\u003erepeat the procedure \\(m\\) times, resulting in all the \\(u_{j}\\) being included in our new list \\(B\\) of length still \\(n\\). Given we contained a list of length \\(m\\) in a list of length \\(n\\), \\(m \\leq n\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_independence/","tags":null,"title":"linear independence"},{"categories":null,"contents":"A Linear Map (a.k.a. Linear Transformation) is a function which maps elements between two vector space that follows linear properties.\nconstituents vector spaces \\(V\\) and \\(W\\) (they don\u0026rsquo;t have to be subspaces) A function \\(T: V \\to W\\) (when we put something in, it only goes to one place) requirements \\(T\\) is considered a Linear Map if it follows\u0026hellip; (properties of \u0026ldquo;linearity\u0026rdquo;)\nadditivity \\begin{equation} T(u+v) = Tu+Tv,\\ \\forall u,v \\in V \\end{equation}\nhomogeneity \\begin{equation} T(\\lambda v) = \\lambda (Tv),\\ \\forall \\lambda \\in \\mathbb{F}, v \\in V \\end{equation}\nadditional information note on notation The \u0026ldquo;application\u0026rdquo; of a Linear Map \\(T\\) on a vector \\(V\\) is written as:\n\\begin{equation} \\begin{cases} Tv \\\\ T(v) \\end{cases} \\end{equation}\nboth are acceptable.\n\\(\\mathcal{L}(V,W)\\) The set of all Linear Maps from \\(V\\) to \\(W\\) is denoted as \\(\\mathcal{L}(V,W)\\).\nsome fun linear maps zero There is, of course, the Linear Map that maps everything to the \\(0\\) \u0026mdash; as the zero exists in all vector spaces.\nThat is:\n\\begin{equation} 0 \\in \\mathcal{L}(V,W) \\implies 0v = 0, v\\in V \\end{equation}\nadditivity\nLet \\(v_1+v_2= v \\in V\\).\n\\begin{equation} 0(v_1+v_2) = 0(v) = 0 = 0+0 = 0v_1+0v_2 \\end{equation}\nhomogeneity\nLet \\(\\lambda v = u \\in V\\).\n\\begin{equation} 0(\\lambda v) = 0(u) = 0 = \\lambda 0 = \\lambda 0v \\end{equation}\nidentity Another classic. \\(I\\), the identity map, is denoted as (for some \\(v \\in V\\) and \\(I \\in \\mathcal{L}(V,V)\\)):\n\\begin{equation} Iv = v \\end{equation}\ni.e. it does nothing\nadditivity\nLet \\(v_1,v_2 \\in V\\):\n\\begin{equation} I(v_1+v_2) = v_1+v_2 = Iv1+Iv2 \\end{equation}\nhomogeneity\n\\begin{equation} I(\\lambda v) = \\lambda v = \\lambda Iv \\end{equation}\nany map from \\(\\mathbb{F}^{n}\\) to \\(\\mathbb{F}^{m}\\) turns out any map that follows a specific pattern of polynomials between two vector spaces are Linear Maps.\nDefine some two vector spaces \\(\\mathbb{F}^{n}\\) and \\(\\mathbb{F}^{m}\\), some set of scalars \\(a_{jk} \\in \\mathbb{F}: j=1, \\dots m; k=1, \\dots n\\).\nWe construct \\(T \\in \\mathcal{L}(\\mathbb{F}^{n}, \\mathbb{F}^{m})\\) by: \\(T(x_1, \\dots x_{n}) = a_{11} x_1+ \\cdots + a_{1n} x_{n}, \\dots, a_{m1} x_1 + \\cdots + a_{mn} x_{n}\\) (i.e. a combination of linear combinations).\nadditivity\nLet \\(x,y \\in \\mathbb{F}^{n}\\), with \\(x_{j}\\) being each coordinate of \\(x\\) and the same goes for \\(y\\).\n\\begin{align} T((x_1, \\dots x_{n}) + (y_1, \\dots y_{n}))) \u0026amp;= T(x_1+y_1 \\dots x_{n}+y_{n}) \\\\ \u0026amp;= a_{11}(x_1+y_1) + \\cdots, \\dots, \\cdots + a_{mn} (x_{n} + y_{n}) \\\\ \u0026amp;= (a_{11}x_1 + a_{11}y_{1}) + \\cdots, \\dots, \\cdots + (a_{mn} x_{n} + a_{mn} y_{n}) \\\\ \u0026amp;= (a_{11}x_1 + \\cdots) + (a_{11}y_{1}+ \\cdots ), \\dots, (\\cdots + a_{mn}x_n) + (\\cdots + a_{mn}y_{n}) \\\\ \u0026amp;= ((a_{11}x_1 + \\cdots), \\dots, (\\cdots + a_{mn}x_n)) = ((a_{11}y_{1}+ \\cdots ), \\dots,(\\cdots + a_{mn}y_{n})) \\\\ \u0026amp;= T (x_1, \\dots, x_{n}) + T (y_1, \\dots, x_{n}) \\end{align}\nhomogeneity\nProof left to the reader. Pretty much just expand and more algebra.\nmatricies to encode Linear Map we can use matricies to represent Linear Maps. See matrix of Linear Map\n\u0026ldquo;basis of domain\u0026rdquo; This result tells us that we can find a Linear Map for wherever we want to take the basis of a vector space, and that a Linear Map\u0026rsquo;s behavior on basis uniquely determines that Linear Map.\nSee basis of domain.\naddition and scalar multiplication on \\(\\mathcal{L}(V,W)\\) Suppose \\(S,T \\in \\mathcal{L}(V,W); \\lambda \\in \\mathbb{F}\\).\n\u0026ldquo;Sum\u0026rdquo; and \u0026ldquo;Product\u0026rdquo; are defined in the way that one would expect:\n\\begin{equation} (S+T)(v) = Sv+Tv \\end{equation}\nand\n\\begin{equation} (\\lambda T)(v) = \\lambda (Tv) \\end{equation}\nfor all \\(v \\in V\\).\nThese two operations make \\(\\mathcal{L}(V,W)\\) a vector space (\\(1Tv = Tv\\), \\(0+Tv=Tv\\), \\(Tv + (-1)Tv = 0\\), associativity, commutativity, distributive inherits from \\(V\\).)\nlinear maps take \\(0\\) to \\(0\\) We desire that \\(T(0) = 0\\) for any linear map \\(T\\)\nProof:\n\\begin{equation} T(0) = T(0+0) \\end{equation}\nThen, by additivity:\n\\begin{equation} T(0) = T (0 + 0) = T (0) + T (0) \\end{equation}\nGiven \\(\\mathcal{L}(V,W)\\) is a vector space for any \\(V,W\\), \\(\\exists -T(0)\\) such that \\(T(0)+(-T(0)) = 0\\). Applying that here:\n\\begin{equation} T(0) = T(0)+T(0) \\implies T(0) -T(0) = T(0)+T(0)-T(0) \\implies 0 = T(0) \\end{equation}\nProduct of Linear Maps See Product of Linear Maps\n\u0026ldquo;sizes\u0026rdquo; of maps map to smaller space is not injective Suppose \\(V,W\\) are finite-dimensional vector spaces, and \\(\\dim V \u0026gt; \\dim W\\). Then, all \\(T \\in \\mathcal{L}(V,W)\\) are not injective.\nWe first have that:\n\\begin{align} \u0026amp;\\dim V = \\dim null\\ T + \\dim range\\ T \\\\ \\Rightarrow\\ \u0026amp; \\dim null\\ T = \\dim V - \\dim range\\ T \\end{align}\nrecall at this point that \\(\\dim range\\ T \\leq \\dim W\\) (the range is a subspace of the codomain.) Therefore, subtracting a bigger value means that the value will be smaller. So we have that:\n\\begin{align} \u0026amp; \\dim null\\ T = \\dim V - \\dim range\\ T \\\\ \\Rightarrow\\ \u0026amp; \\dim null\\ T \\geq \\dim V - \\dim W \\end{align}\nNow, recall that \\(\\dim V \u0026gt; \\dim W\\). Therefore, \\(\\dim V - \\dim W\\) is strictly bigger than \\(0\\). So:\n\\begin{align} \\dim null\\ T \u0026amp;\\geq \\dim V - \\dim W \\\\ \u0026amp;\u0026gt; 0 \\end{align}\nAnd so, the dimension of the null space of \\(T\\) is not \\(0\\). Therefore, the null space of \\(T\\) can\u0026rsquo;t have been \\(\\{0\\}\\) because that does have dimension \\(0\\). This makes the map not injective because injectivity implies that null space is \\(\\{0\\}\\)\nmap to bigger space is not surjective Its basically the same thing as the one above. Suppose \\(V,W\\) are finite-dimensional vector spaces, and \\(\\dim V \u0026lt; \\dim W\\). Then, all \\(T \\in \\mathcal{L}(V,W)\\) are not injective.\nWe first have that:\n\\begin{align} \u0026amp;\\dim V = \\dim null\\ T + \\dim range\\ T \\\\ \\Rightarrow\\ \u0026amp; \\dim range\\ T = \\dim V - \\dim null\\ T \\end{align}\nBecause the dimension of \\(null\\ T\\) is larger than \\(0\\) (or, for that matter, the dimension of anything), \\(\\dim V - \\dim\\ null\\ T \\leq \\dim\\ V\\). Hence:\n\\begin{align} \u0026amp; \\dim range\\ T = \\dim V - \\dim null\\ T \\\\ \\Rightarrow\\ \u0026amp; \\dim range\\ T \\leq \\dim V \\end{align}\nNow, recall that \\(\\dim V \u0026lt; \\dim W\\).\n\\begin{align} \u0026amp; \\dim range\\ T = \\dim V - \\dim null\\ T \\\\ \\Rightarrow\\ \u0026amp; \\dim range\\ T \\leq \\dim V \u0026lt; \\dim W \\end{align}\nGiven the range of \\(T\\) is smaller than the codomain of \\(T\\), they cannot be equal spaces. So, \\(T\\) is not surjective.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e (a.k.a. \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Transformation\u003c/a\u003e) is a \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e which maps elements between two \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e that follows linear properties.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es \\(V\\) and \\(W\\) (they \u003cspan class=\"underline\"\u003edon\u0026rsquo;t\u003c/span\u003e have to be \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es)\u003c/li\u003e\n\u003cli\u003eA \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e \\(T: V \\to W\\) (when we put something in, it only goes to one place)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\(T\\) is considered a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e if it follows\u0026hellip; (properties of \u0026ldquo;linearity\u0026rdquo;)\u003c/p\u003e\n\u003ch3 id=\"additivity\"\u003eadditivity\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nT(u+v) = Tu+Tv,\\ \\forall u,v \\in V\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"homogeneity--kbhhomogeneity-dot-md\"\u003e\u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nT(\\lambda v) = \\lambda (Tv),\\ \\forall \\lambda \\in \\mathbb{F}, v \\in V\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"note-on-notation\"\u003enote on notation\u003c/h3\u003e\n\u003cp\u003eThe \u0026ldquo;application\u0026rdquo; of a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e \\(T\\) on a vector \\(V\\) is written as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nTv \\\\\nT(v)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eboth are acceptable.\u003c/p\u003e\n\u003ch3 id=\"mathcal-l--v-w\"\u003e\\(\\mathcal{L}(V,W)\\)\u003c/h3\u003e\n\u003cp\u003eThe set of all \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es from \\(V\\) to \\(W\\) is denoted as \\(\\mathcal{L}(V,W)\\).\u003c/p\u003e\n\u003ch3 id=\"some-fun-linear-maps\"\u003esome fun linear maps\u003c/h3\u003e\n\u003ch4 id=\"zero--kbhzero-dot-md\"\u003e\u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eThere is, of course, the \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e that maps everything to the \\(0\\) \u0026mdash; as the \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e exists in all \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eThat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 \\in \\mathcal{L}(V,W) \\implies 0v = 0, v\\in V\n\\end{equation}\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eadditivity\u003c/p\u003e\n\u003cp\u003eLet \\(v_1+v_2= v \\in V\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0(v_1+v_2) = 0(v) = 0 = 0+0 = 0v_1+0v_2\n\\end{equation}\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eLet \\(\\lambda v = u \\in V\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0(\\lambda v) = 0(u) = 0 = \\lambda 0 = \\lambda 0v\n\\end{equation}\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"identity--kbhidentity-dot-md\"\u003e\u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eAnother classic. \\(I\\), the \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e map, is denoted as (for some \\(v \\in V\\) and \\(I \\in \\mathcal{L}(V,V)\\)):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nIv = v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ei.e. it does nothing\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eadditivity\u003c/p\u003e\n\u003cp\u003eLet \\(v_1,v_2 \\in V\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI(v_1+v_2) = v_1+v_2 = Iv1+Iv2\n\\end{equation}\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI(\\lambda v) = \\lambda v = \\lambda Iv\n\\end{equation}\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"any-map-from-mathbb-f-n-to-mathbb-f-m\"\u003eany map from \\(\\mathbb{F}^{n}\\) to \\(\\mathbb{F}^{m}\\)\u003c/h4\u003e\n\u003cp\u003eturns out any map that follows a specific pattern of \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003es between two \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es are \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eDefine some two \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es \\(\\mathbb{F}^{n}\\) and \\(\\mathbb{F}^{m}\\), some set of scalars \\(a_{jk} \\in \\mathbb{F}: j=1, \\dots m; k=1, \\dots n\\).\u003c/p\u003e\n\u003cp\u003eWe construct \\(T \\in \\mathcal{L}(\\mathbb{F}^{n}, \\mathbb{F}^{m})\\) by: \\(T(x_1, \\dots x_{n}) = a_{11} x_1+ \\cdots + a_{1n} x_{n}, \\dots, a_{m1} x_1 + \\cdots + a_{mn} x_{n}\\) (i.e. a combination of \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003es).\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eadditivity\u003c/p\u003e\n\u003cp\u003eLet \\(x,y \\in \\mathbb{F}^{n}\\), with \\(x_{j}\\) being each \u003ca href=\"/posts/kbhlists_over_fields/\"\u003ecoordinate\u003c/a\u003e of \\(x\\) and the same goes for \\(y\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nT((x_1, \\dots x_{n}) + (y_1, \\dots y_{n}))) \u0026amp;= T(x_1+y_1 \\dots x_{n}+y_{n}) \\\\\n\u0026amp;= a_{11}(x_1+y_1) + \\cdots, \\dots, \\cdots + a_{mn} (x_{n} + y_{n}) \\\\\n\u0026amp;= (a_{11}x_1 + a_{11}y_{1}) + \\cdots, \\dots, \\cdots + (a_{mn} x_{n} + a_{mn} y_{n}) \\\\\n\u0026amp;= (a_{11}x_1 + \\cdots) + (a_{11}y_{1}+ \\cdots ), \\dots, (\\cdots + a_{mn}x_n) + (\\cdots + a_{mn}y_{n}) \\\\\n\u0026amp;= ((a_{11}x_1 + \\cdots), \\dots, (\\cdots + a_{mn}x_n)) = ((a_{11}y_{1}+ \\cdots ), \\dots,(\\cdots + a_{mn}y_{n})) \\\\\n\u0026amp;= T (x_1, \\dots, x_{n}) + T (y_1, \\dots, x_{n})\n\\end{align}\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eProof left to the reader. Pretty much just expand and more algebra.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"matricies--kbhmatricies-dot-md--to-encode-linear-map--kbhlinear-map-dot-md\"\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e to encode \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003ewe can use \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e to represent \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es. See \u003ca href=\"/posts/kbhmatricies/#matrix-of-id-17f3b01c-4945-4268-8da4-9887d960596b-linear-map\"\u003ematrix of Linear Map\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"basis-of-domain--kbhbasis-of-domain-dot-md\"\u003e\u0026ldquo;\u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e\u0026rdquo;\u003c/h3\u003e\n\u003cp\u003eThis result tells us that we can find a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e for wherever we want to take the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e, and that a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e\u0026rsquo;s behavior on \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e uniquely determines that \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"addition-and-scalar-multiplication-on-mathcal-l--v-w\"\u003eaddition and scalar multiplication on \\(\\mathcal{L}(V,W)\\)\u003c/h3\u003e\n\u003cp\u003eSuppose \\(S,T \\in \\mathcal{L}(V,W); \\lambda \\in \\mathbb{F}\\).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Sum\u0026rdquo; and \u0026ldquo;Product\u0026rdquo; are defined in the way that one would expect:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(S+T)(v) = Sv+Tv\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(\\lambda T)(v) = \\lambda (Tv)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor all \\(v \\in V\\).\u003c/p\u003e\n\u003cp\u003eThese two operations make \\(\\mathcal{L}(V,W)\\) a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e (\\(1Tv = Tv\\), \\(0+Tv=Tv\\), \\(Tv + (-1)Tv = 0\\), associativity, commutativity, distributive inherits from \\(V\\).)\u003c/p\u003e\n\u003ch3 id=\"linear-maps-take-0-to-0\"\u003elinear maps take \\(0\\) to \\(0\\)\u003c/h3\u003e\n\u003cp\u003eWe desire that \\(T(0) = 0\\) for any linear map \\(T\\)\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(0) = T(0+0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThen, by additivity:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(0) = T (0 + 0) = T (0) + T (0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven \\(\\mathcal{L}(V,W)\\) is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e for any \\(V,W\\), \\(\\exists -T(0)\\) such that \\(T(0)+(-T(0)) = 0\\). Applying that here:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(0) = T(0)+T(0) \\implies T(0) -T(0) = T(0)+T(0)-T(0) \\implies 0 = T(0)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"product-of-linear-maps\"\u003eProduct of Linear Maps\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhproduct_of_linear_maps/\"\u003eProduct of Linear Maps\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"sizes-of-maps\"\u003e\u0026ldquo;sizes\u0026rdquo; of maps\u003c/h3\u003e\n\u003ch4 id=\"map-to-smaller-space-is-not-injective--kbhinjectivity-dot-md\"\u003emap to smaller space is not \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eSuppose \\(V,W\\) are \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003es, and \\(\\dim V \u0026gt; \\dim W\\). Then, all \\(T \\in \\mathcal{L}(V,W)\\) are \u003cstrong\u003enot\u003c/strong\u003e \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eWe first have that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\dim V = \\dim null\\ T + \\dim range\\ T \\\\\n\\Rightarrow\\ \u0026amp; \\dim null\\ T = \\dim V - \\dim range\\ T\n\\end{align}\u003c/p\u003e\n\u003cp\u003erecall at this point that \\(\\dim range\\ T \\leq \\dim W\\) (the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of the codomain.) Therefore, subtracting a bigger value means that the value will be smaller. So we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; \\dim null\\ T = \\dim V - \\dim range\\ T \\\\\n\\Rightarrow\\ \u0026amp; \\dim null\\ T \\geq \\dim V - \\dim W\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, recall that \\(\\dim V \u0026gt; \\dim W\\). Therefore, \\(\\dim V - \\dim W\\) is strictly bigger than \\(0\\). So:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dim null\\ T \u0026amp;\\geq \\dim V - \\dim W \\\\\n\u0026amp;\u0026gt; 0\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAnd so, the \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(T\\) is not \\(0\\). Therefore, the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(T\\) can\u0026rsquo;t have been \\(\\{0\\}\\) because that does have \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e \\(0\\). This makes the map not \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e because \u003ca href=\"/posts/kbhinjectivity/#injectivity-implies-that-id-767a441d-4931-4fad-aa8e-c6b001e8b507-null-space-is-0\"\u003einjectivity implies that null space is \\(\\{0\\}\\)\u003c/a\u003e\u003c/p\u003e\n\u003ch4 id=\"map-to-bigger-space-is-not-surjective--kbhsurjectivity-dot-md\"\u003emap to bigger space is not \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eIts basically the same thing as the one above. Suppose \\(V,W\\) are \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003es, and \\(\\dim V \u0026lt; \\dim W\\). Then, all \\(T \\in \\mathcal{L}(V,W)\\) are \u003cstrong\u003enot\u003c/strong\u003e \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eWe first have that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\dim V = \\dim null\\ T + \\dim range\\ T \\\\\n\\Rightarrow\\ \u0026amp; \\dim range\\ T = \\dim V - \\dim null\\ T\n\\end{align}\u003c/p\u003e\n\u003cp\u003eBecause the \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of \\(null\\ T\\) is larger than \\(0\\) (or, for that matter, the \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of anything), \\(\\dim V - \\dim\\ null\\ T \\leq \\dim\\ V\\). Hence:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; \\dim range\\ T = \\dim V - \\dim null\\ T \\\\\n\\Rightarrow\\ \u0026amp; \\dim range\\ T \\leq \\dim V\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, recall that \\(\\dim V \u0026lt; \\dim W\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; \\dim range\\ T = \\dim V - \\dim null\\ T \\\\\n\\Rightarrow\\ \u0026amp; \\dim range\\ T \\leq \\dim V \u0026lt; \\dim W\n\\end{align}\u003c/p\u003e\n\u003cp\u003eGiven the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e of \\(T\\) is smaller than the codomain of \\(T\\), they cannot be equal spaces. So, \\(T\\) is not \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_map/","tags":null,"title":"Linear Map"},{"categories":null,"contents":"general form of First-Order Differential Equations This will depend on both unknown function \\(x\\), and the independent variable \\(t\\). These could and could not be separable.\n\\begin{equation} \\dv{x}{t} = F(t,x),\\ x(t_{0}) = x_{0} \\end{equation}\nLet\u0026rsquo;s imagine \\(F\\) is \u0026ldquo;bounded\u0026rdquo; and \u0026ldquo;continuous\u0026rdquo; on \\(I \\times \\omega\\), where \\(I\\) is an open interval about \\(t_{0}\\) and \\(\\omega\\) is an open subset of \\(\\mathbb{R}^{n}\\), containing \\(x_{0}\\). \\(F\\) is bounded; the results are bounded??\nfunctions embedded in vector spaces We understand that such First-Order Differential Equations will describe a subset of an infinite dimensional vector space.\nGiven we are dealing with First-Order Differential Equations, each function is a basis (if linear, otherwise, not quite the basis) of the subspace of the larger vector space; \\(+C\\) is how you create parametried variations However, our function is not linear, not all functions would suffice here: non-linear equations are difficult to deal with beacuse the arc length follows a certain pattern General form of a first order linear differential equation A general linear, first-order, first-degree differential equation of the form:\n\\begin{equation} \\dv{y}{x} + P(x)y = Q(x) \\end{equation}\nhas a solution:\n\\begin{equation} y(x) = e^{-\\int P\\dd{x}}\\int e^{\\int P\\dd{x}} Q(x) \\dd{x} \\end{equation}\nthe more general solution (for definite integrals):\n\\begin{equation} x(t) = e^{-A(t)}x_{0} + e^{-A(t)}\\int_{t_{0}}^{t}e^{A(s)}b(s)\\dd{s} \\end{equation}\ngiven the initial condition that \\(x(0) = 0\\). This is from the textbook.\nBefore you go ham and start solving, though, make sure that pesky \\(y\\) term is actually there. If its not, you maybe better served using the seperable methods to solve these things.\nthis is bad This is difficult to deal with this! What?? How?? Why does this work?? See below.\nsolving differential equations The following technique works for ALL first-order linear differential equations:\nTo solve, first put your equation into the standard form:\n\\begin{equation} \\frac{dy}{dx} + P(x)y = Q(x) \\end{equation}\nIf you have an equation like:\n\\begin{equation} a(x) \\dv{y}{x} + b(x)y = c(x) \\end{equation}\na good way to do this is to apply \\(\\frac{1}{a(x)}\\) to both sides, resulting in:\n\\begin{equation} \\dv{y}{x} + \\frac{b(x)}{a(x)} y = \\frac{c(x)}{a(x)} \\end{equation}\nAnd then you can carry on solving like its an equation in standard form.\nTo solve such a generic equation, we here are trying to UNDO the product rule.\nWe first multiply the entire expression by something called the intergrating factor \\(\\rho(x)\\).\n\\begin{equation} \\rho(x) \\left(\\frac{dy}{dx} + P(x)y\\right) = \\rho(x)Q(x) \\end{equation}\nA note on how this \\(\\rho(x)\\) works. This intergrating factor is actually defined with the following rule:\n\\begin{equation} \\log (\\rho (x)) = \\int P(x) \\dd{x} \\end{equation}\n(notably, \\(\\log\\) is actually \\(\\ln\\) in this case.)\nWhy so weird of an expression? This all springs from the fact that \\(\\dv x e^{x} = e^{x}\\). See below on how this fact is stretched (to great lengths) to solve diffeqs.\nFrom the above expression containing \\(\\rho (x)\\), we naturally have that (based on the definition of the natural log, just expanding it out):\n\\begin{equation} e^{\\int P(x)\\dd{x}} = \\rho (x) \\end{equation}\nWhy is this useful? Remember, we are trying to undo the product rule. Let\u0026rsquo;s replace our new definition for \\(\\rho (x)\\) into the above expression we are trying to solve and see what happens!\n\\begin{align} \u0026amp;\\rho (x)\\qty (\\dv{y}{x} + P(x)y) = \\rho (x)Q(x) \\\\ \\Rightarrow\\ \u0026amp; e^{\\int P\\dd{x}} \\qty (\\dv{y}{x} + P(x)y) = e^{\\int P\\dd{x}} Q(x) \\end{align}\nFor a second now, let\u0026rsquo;s just take an aside and deal with the left side. We are starting to almost clearly see the product rule at play here. Let\u0026rsquo;s finish the job by finishing up the rest of the product rule. Remember, we want to go opposite the product rule at the next steps.\n\\begin{align} e^{\\int P\\dd{x}} \\qty (\\dv{y}{x} + P(x)y) \u0026amp;= \\dv{y}{x}e^{\\int p\\dd{x}} + yPe^{\\int P\\dd{x}} \\\\ \u0026amp;= \\dv x \\qty (ye^{\\int P\\dd{x}}) \\end{align}\nWoah! Now we have something clearly in the favor of \\(y\\) separated out. Let\u0026rsquo;s put this back to our original expression.\n\\begin{align} \u0026amp;e^{\\int P\\dd{x}} \\qty (\\dv{y}{x} + P(x)y) = e^{\\int P\\dd{x}} Q(x) \\\\ \\Rightarrow\\ \u0026amp; \\dv x \\qty (ye^{\\int P\\dd{x}}) = e^{\\int P\\dd{x}} Q(x) \\end{align}\nNice. Now, do you see the clear step to isolate \\(y\\) by itself? I do.\n\\begin{align} \u0026amp;\\dv x \\qty (ye^{\\int P\\dd{x}}) = e^{\\int P\\dd{x}} Q(x) \\\\ \\Rightarrow\\ \u0026amp; \\int \\dv x \\qty (ye^{\\int P\\dd{x}}) \\dd{x}= \\int e^{\\int P\\dd{x}} Q(x) \\dd{x}\\\\ \\Rightarrow\\ \u0026amp; ye^{\\int P\\dd{x}} = \\int e^{\\int P\\dd{x}} Q(x) \\dd{x} \\end{align}\nAnd finally, naturally and lastly, we divide the \\(e^{\\int P\\dd{x}}\\) to both sides.\n\\begin{align} \u0026amp; ye^{\\int P\\dd{x}} = \\int e^{\\int P\\dd{x}} Q(x) \\dd{x}\\\\ \\Rightarrow\\ \u0026amp; y = e^{-\\int P\\dd{x}}\\int e^{\\int P\\dd{x}} Q(x) \\dd{x}\\ \\blacksquare \\end{align}\nAnd there you have it. That\u0026rsquo;s the general solution to our diffeq.\n","html":"\u003ch2 id=\"general-form-of-first-order-differential-equations--org9e796b5\"\u003egeneral form of \u003ca href=\"#solving-differential-equations\"\u003eFirst-Order Differential Equations\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eThis will depend on both unknown function \\(x\\), and the independent variable \\(t\\). These could and could not be separable.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{x}{t} = F(t,x),\\ x(t_{0}) = x_{0}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s imagine \\(F\\) is \u0026ldquo;bounded\u0026rdquo; and \u0026ldquo;continuous\u0026rdquo; on \\(I \\times \\omega\\), where \\(I\\) is an open interval about \\(t_{0}\\) and \\(\\omega\\) is an open subset of \\(\\mathbb{R}^{n}\\), containing \\(x_{0}\\). \\(F\\) is bounded; the results are bounded??\u003c/p\u003e\n\u003ch3 id=\"functions-embedded-in-vector-space--kbhvector-space-dot-md--s\"\u003efunctions embedded in \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es\u003c/h3\u003e\n\u003cp\u003eWe understand that such \u003ca href=\"#solving-differential-equations\"\u003eFirst-Order Differential Equations\u003c/a\u003e will describe a subset of an infinite dimensional \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eGiven we are dealing with \u003ca href=\"#solving-differential-equations\"\u003eFirst-Order Differential Equations\u003c/a\u003e, each function is a basis (if linear, otherwise, not quite the basis) of the subspace of the larger vector space; \\(+C\\) is how you create parametried variations\u003c/li\u003e\n\u003cli\u003eHowever, our function is not linear, not all functions would suffice here: non-linear equations are difficult to deal with beacuse the arc length follows a certain pattern\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"general-form-of-a-first-order-linear-differential-equation\"\u003eGeneral form of a first order \u003cstrong\u003e\u003cstrong\u003elinear\u003c/strong\u003e\u003c/strong\u003e differential equation\u003c/h2\u003e\n\u003cp\u003eA general linear, first-order, first-degree differential equation of the form:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{y}{x} + P(x)y = Q(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ehas a solution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(x) = e^{-\\int P\\dd{x}}\\int e^{\\int P\\dd{x}} Q(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe more general solution (for definite integrals):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = e^{-A(t)}x_{0} + e^{-A(t)}\\int_{t_{0}}^{t}e^{A(s)}b(s)\\dd{s}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egiven the initial condition that \\(x(0) = 0\\). This is from the textbook.\u003c/p\u003e\n\u003cp\u003eBefore you go ham and start solving, though, make sure that pesky \\(y\\) term is actually there. If its not, you maybe better served using the \u003ca href=\"/posts/kbhlinear_constant_coefficient_equation/#solving-separable-differential-equations\"\u003eseperable\u003c/a\u003e methods to solve these things.\u003c/p\u003e\n\u003ch2 id=\"this-is-bad\"\u003ethis is bad\u003c/h2\u003e\n\u003cp\u003eThis is difficult to deal with this! What?? How?? Why does this work?? See below.\u003c/p\u003e\n\u003ch2 id=\"solving-differential-equations\"\u003esolving differential equations\u003c/h2\u003e\n\u003cp\u003eThe following technique works for ALL first-order linear differential equations:\u003c/p\u003e\n\u003cp\u003eTo solve, first put your equation into the standard form:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{dy}{dx} + P(x)y = Q(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf you have an equation like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na(x) \\dv{y}{x} + b(x)y = c(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ea good way to do this is to apply \\(\\frac{1}{a(x)}\\) to both sides, resulting in:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{y}{x} + \\frac{b(x)}{a(x)} y = \\frac{c(x)}{a(x)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd then you can carry on solving like its an equation in standard form.\u003c/p\u003e\n\u003cp\u003eTo solve such a generic equation, we here are trying to UNDO the product rule.\u003c/p\u003e\n\u003cp\u003eWe first multiply the entire expression by something called the \u003ca href=\"#solving-differential-equations\"\u003eintergrating factor\u003c/a\u003e \\(\\rho(x)\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\rho(x) \\left(\\frac{dy}{dx} + P(x)y\\right) = \\rho(x)Q(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eA note on how this \\(\\rho(x)\\) works. This \u003ca href=\"#solving-differential-equations\"\u003eintergrating factor\u003c/a\u003e is actually defined with the following rule:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\log (\\rho (x)) = \\int P(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(notably, \\(\\log\\) is actually \\(\\ln\\) in this case.)\u003c/p\u003e\n\u003cp\u003eWhy so weird of an expression? This all springs from the fact that \\(\\dv x e^{x} = e^{x}\\). See below on how this fact is stretched (to great lengths) to solve diffeqs.\u003c/p\u003e\n\u003cp\u003eFrom the above expression containing \\(\\rho (x)\\), we naturally have that (based on the definition of the natural log, just expanding it out):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{\\int P(x)\\dd{x}} = \\rho (x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhy is this useful? Remember, we are trying to \u003cem\u003eundo\u003c/em\u003e the product rule. Let\u0026rsquo;s replace our new definition for \\(\\rho (x)\\) into the above expression we are trying to solve and see what happens!\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\rho (x)\\qty (\\dv{y}{x} + P(x)y) = \\rho (x)Q(x) \\\\\n\\Rightarrow\\ \u0026amp; e^{\\int P\\dd{x}} \\qty (\\dv{y}{x} + P(x)y) = e^{\\int P\\dd{x}} Q(x)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eFor a second now, let\u0026rsquo;s just take an aside and deal with the left side. We are starting to \u003cem\u003ealmost\u003c/em\u003e clearly see the product rule at play here. Let\u0026rsquo;s finish the job by finishing up the rest of the product rule. Remember, we want to \u003cem\u003ego opposite\u003c/em\u003e the product rule at the next steps.\u003c/p\u003e\n\u003cp\u003e\\begin{align}\ne^{\\int P\\dd{x}} \\qty (\\dv{y}{x} + P(x)y) \u0026amp;= \\dv{y}{x}e^{\\int p\\dd{x}} + yPe^{\\int P\\dd{x}} \\\\\n\u0026amp;= \\dv x \\qty (ye^{\\int P\\dd{x}})\n\\end{align}\u003c/p\u003e\n\u003cp\u003eWoah! Now we have something clearly in the favor of \\(y\\) separated out. Let\u0026rsquo;s put this back to our original expression.\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;e^{\\int P\\dd{x}} \\qty (\\dv{y}{x} + P(x)y) = e^{\\int P\\dd{x}} Q(x) \\\\\n\\Rightarrow\\ \u0026amp; \\dv x \\qty (ye^{\\int P\\dd{x}}) = e^{\\int P\\dd{x}} Q(x)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNice. Now, do you see the clear step to isolate \\(y\\) by itself? I do.\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\dv x \\qty (ye^{\\int P\\dd{x}}) = e^{\\int P\\dd{x}} Q(x) \\\\\n\\Rightarrow\\ \u0026amp; \\int \\dv x \\qty (ye^{\\int P\\dd{x}}) \\dd{x}= \\int e^{\\int P\\dd{x}} Q(x) \\dd{x}\\\\\n\\Rightarrow\\ \u0026amp; ye^{\\int P\\dd{x}} = \\int e^{\\int P\\dd{x}} Q(x) \\dd{x}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAnd finally, naturally and lastly, we divide the \\(e^{\\int P\\dd{x}}\\) to both sides.\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; ye^{\\int P\\dd{x}} = \\int e^{\\int P\\dd{x}} Q(x) \\dd{x}\\\\\n\\Rightarrow\\ \u0026amp; y = e^{-\\int P\\dd{x}}\\int e^{\\int P\\dd{x}} Q(x) \\dd{x}\\ \\blacksquare\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAnd there you have it. That\u0026rsquo;s the general solution to our diffeq.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_non_seperable_equation/","tags":null,"title":"Linear Non-Seperable Equation"},{"categories":null,"contents":"An exact solution for a dynamic system with quadratic costs and linear differential equation describing the dynamics.\n","html":"\u003cp\u003eAn exact solution for a dynamic system with quadratic costs and \u003ca href=\"/posts/kbhordinary_differential_equations/#linear-vs-dot-non-linear-differential-equations\"\u003elinear differential equation\u003c/a\u003e describing the dynamics.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_quadratic_regulator/","tags":null,"title":"Linear-Quadratic Regulator"},{"categories":null,"contents":"For some non-linear function, we can use its first Jacobian to create a linear system. Then, we can use that system to write the first order Taylor:\n\\begin{equation} y\u0026rsquo; = \\nabla F(crit)y \\end{equation}\nwhere \\(crit\\) are critical points.\nPhase Portrait stability if all \\(Re[\\lambda] \u0026lt; 0\\) of \\(\\qty(\\nabla F)(p)\\) then \\(p\\) is considered stable\u0026mdash;that is, points initially near \\(p\\) will exponentially approach \\(p\\)\nif at least one \\(Re[\\lambda] \u0026gt; 0\\) of \\(\\qty(\\nabla F)(p)\\) then \\(p\\) is considered unstable\u0026mdash;that is, points initially near \\(p\\) will go somewhere else\nif all \\(Re[\\lambda] \\leq 0\\) and at least one \\(\\lambda\\) is pure imaginary of \\(\\qty(\\nabla F)(p)\\), then there are no conclusions and \\(p\\) is considered marginal\nIf there are no purely imaginary values, then the solution paths of the ODE look like that of \\(y\u0026rsquo; = (\\nambla F)(p) y\\).\nWorked Example Let\u0026rsquo;s Lotha-Volterra Prey-Predictor Equation again as an example\n\\begin{equation} \\begin{cases} x_1\u0026rsquo; = 2x_1-x_1x_2 \\\\ x_2\u0026rsquo; = x_1x_2 - 3x_2 \\end{cases} \\end{equation}\nwe can stare at this (and factor \\(x\\) out) to understand that there are only two stationary points:\n\\begin{equation} (x_1,x_2) = (0,0), (3,2) \\end{equation}\nLet\u0026rsquo;s analyze this function for linearilzation.\nLet\u0026rsquo;s write this expression in terms of the linear and non linear parts\n\\begin{equation} \\begin{cases} x\u0026rsquo; = \\mqty(2 \u0026amp; 0 \\\\ 0 \u0026amp; -3) \\mqty(x_1 \\\\ x_2) + \\mqty(-x_1x_2 \\\\ x_1 x_2) \\end{cases} \\end{equation}\nNear \\((0,0)\\) You will note that the right non-linear parts becomes very small near \\((0,0)\\), meaning we can analyze this in terms of a normal phase portrait.\nNear \\((3,2)\\) We can translate this down:\nLet:\n\\begin{equation} y = x - \\mqty(3 \\\\2) \\end{equation}\nmeaning:\n\\begin{equation} y\u0026rsquo; = x\u0026rsquo; = F\\qty(y+\\mqty(3 \\\\ 2)) \\end{equation}\nwe can use a Taylor expansion to get:\n\\begin{equation} y\u0026rsquo; = x\u0026rsquo; = F\\qty(y + \\mqty(3\\\\2)) + \\qty(\\nabla F)y + \\dots \\end{equation}\nRecall that \\(F\\) is given as:\n\\begin{equation} \\mqty(2x_1 - x_1x_2 \\\\ x_1x_2-3x_2) \\end{equation}\nmeaning:\n\\begin{equation} \\nabla \\mqty(2x_1 - x_1x_2 \\\\ x_1x_2-3x_2) = \\mqty(2-x_2 \u0026amp; -x_1 \\\\ x_2 \u0026amp; x_1-3) \\end{equation}\nplugging in \\((3, 2)\\) obtains:\n\\begin{equation} y\u0026rsquo; = \\mqty(0 \u0026amp; -3 \\\\ 2 \u0026amp; 0) y \\end{equation}\nwhich we can analyze in the usual manners.\n","html":"\u003cp\u003eFor some non-linear function, we can use its first Jacobian to create a linear system. Then, we can use that system to write the first order Taylor:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = \\nabla F(crit)y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(crit\\) are critical points.\u003c/p\u003e\n\u003ch2 id=\"phase-portrait--kbhsu-math53-feb072024-dot-md--stability\"\u003e\u003ca href=\"/posts/kbhsu_math53_feb072024/#phase-portrait\"\u003ePhase Portrait\u003c/a\u003e stability\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eif \u003cstrong\u003eall\u003c/strong\u003e \\(Re[\\lambda] \u0026lt; 0\\) of \\(\\qty(\\nabla F)(p)\\) then \\(p\\) is considered \u003cstrong\u003estable\u003c/strong\u003e\u0026mdash;that is, points initially near \\(p\\) will exponentially approach \\(p\\)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eif \u003cstrong\u003eat least one\u003c/strong\u003e \\(Re[\\lambda] \u0026gt; 0\\) of \\(\\qty(\\nabla F)(p)\\) then \\(p\\) is considered \u003cstrong\u003eunstable\u003c/strong\u003e\u0026mdash;that is, points initially near \\(p\\) will go somewhere else\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eif \u003cstrong\u003eall\u003c/strong\u003e \\(Re[\\lambda] \\leq 0\\) and \u003cstrong\u003eat least one\u003c/strong\u003e \\(\\lambda\\) is pure imaginary of \\(\\qty(\\nabla F)(p)\\), then there are no conclusions and \\(p\\) is considered \u003cstrong\u003emarginal\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eIf there are \u003cstrong\u003eno\u003c/strong\u003e purely imaginary values, then the solution paths of the ODE look like that of \\(y\u0026rsquo; = (\\nambla F)(p) y\\).\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"worked-example\"\u003eWorked Example\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s \u003ca href=\"/posts/kbhsu_math53_feb072024/#lotha-volterra-prey-predictor-equation\"\u003eLotha-Volterra Prey-Predictor Equation\u003c/a\u003e again as an example\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx_1\u0026rsquo; = 2x_1-x_1x_2 \\\\\nx_2\u0026rsquo; = x_1x_2 - 3x_2\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can stare at this (and factor \\(x\\) out) to understand that there are only two stationary points:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(x_1,x_2) = (0,0), (3,2)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s analyze this function for \u003ca href=\"/posts/kbhode_linearilzation/\"\u003elinearilzation\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s write this expression in terms of the linear and non linear parts\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx\u0026rsquo; = \\mqty(2 \u0026amp; 0 \\\\ 0 \u0026amp; -3) \\mqty(x_1 \\\\ x_2) + \\mqty(-x_1x_2 \\\\ x_1 x_2)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"near--0-0\"\u003eNear \\((0,0)\\)\u003c/h3\u003e\n\u003cp\u003eYou will note that the right non-linear parts becomes very small near \\((0,0)\\), meaning we can analyze this in terms of a normal phase portrait.\u003c/p\u003e\n\u003ch3 id=\"near--3-2\"\u003eNear \\((3,2)\\)\u003c/h3\u003e\n\u003cp\u003eWe can translate this down:\u003c/p\u003e\n\u003cp\u003eLet:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = x - \\mqty(3 \\\\2)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = x\u0026rsquo; = F\\qty(y+\\mqty(3 \\\\ 2))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can use a Taylor expansion to get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = x\u0026rsquo; = F\\qty(y + \\mqty(3\\\\2)) + \\qty(\\nabla F)y + \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that \\(F\\) is given as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(2x_1 - x_1x_2 \\\\ x_1x_2-3x_2)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla \\mqty(2x_1 - x_1x_2 \\\\ x_1x_2-3x_2) = \\mqty(2-x_2 \u0026amp; -x_1 \\\\ x_2 \u0026amp; x_1-3)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eplugging in \\((3, 2)\\) obtains:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = \\mqty(0 \u0026amp; -3 \\\\ 2 \u0026amp; 0) y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich we can analyze in the usual manners.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhode_linearilzation/","tags":null,"title":"linearilzation"},{"categories":null,"contents":"CAPM, a Review Note that we will be using the Sharpe-Linter version of CAPM:\n\\begin{equation} E[R_{i}-R_{f}] = \\beta_{im} E[(R_{m}-R_{f})] \\end{equation}\n\\begin{equation} \\beta_{im} := \\frac{Cov[(R_{i}-R_{f}),(R_{m}-R_{f})]}{Var[R_{m}-R_{f}]} \\end{equation}\nRecall that we declare \\(R_{f}\\) (the risk-free rate) to be non-stochastic.\nLet us begin. We will create a generic function to analyze some given stock.\nData Import We will first import our utilities\nimport pandas as pd import numpy as np Let\u0026rsquo;s load the data from our market (NYSE) as well as our 10 year t-bill data.\nt_bill = pd.read_csv(\u0026#34;./linearity_test_data/10yrTBill.csv\u0026#34;) nyse = pd.read_csv(\u0026#34;./linearity_test_data/NYSEComposite.csv\u0026#34;) nyse.head() Date Close 0 11/7/2013 16:00:00 9924.37 1 11/8/2013 16:00:00 10032.14 2 11/11/2013 16:00:00 10042.95 3 11/12/2013 16:00:00 10009.84 4 11/13/2013 16:00:00 10079.89 Excellent. Let\u0026rsquo;s load in the data for that stock.\ndef load_stock(stock): return pd.read_csv(f\u0026#34;./linearity_test_data/{stock}.csv\u0026#34;) load_stock(\u0026#34;LMT\u0026#34;).head() Date Close 0 11/7/2013 16:00:00 136.20 1 11/8/2013 16:00:00 138.11 2 11/11/2013 16:00:00 137.15 3 11/12/2013 16:00:00 137.23 4 11/13/2013 16:00:00 137.26 Raw Data And now, let\u0026rsquo;s load all three stocks, then concatenate them all into a big-ol DataFrame.\n# load data df = { \u0026#34;Date\u0026#34;: nyse.Date, \u0026#34;NYSE\u0026#34;: nyse.Close, \u0026#34;TBill\u0026#34;: t_bill.Close, \u0026#34;LMT\u0026#34;: load_stock(\u0026#34;LMT\u0026#34;).Close, \u0026#34;TWTR\u0026#34;: load_stock(\u0026#34;TWTR\u0026#34;).Close, \u0026#34;MCD\u0026#34;: load_stock(\u0026#34;MCD\u0026#34;).Close } # convert to dataframe df = pd.DataFrame(df) # drop empty df.dropna(inplace=True) df Date NYSE TBill LMT TWTR MCD 0 11/7/2013 16:00:00 9924.37 26.13 136.20 44.90 97.20 1 11/8/2013 16:00:00 10032.14 27.46 138.11 41.65 97.01 2 11/11/2013 16:00:00 10042.95 27.51 137.15 42.90 97.09 3 11/12/2013 16:00:00 10009.84 27.68 137.23 41.90 97.66 4 11/13/2013 16:00:00 10079.89 27.25 137.26 42.60 98.11 ... ... ... ... ... ... ... 2154 10/24/2022 16:00:00 14226.11 30.44 440.11 39.60 252.21 2155 10/25/2022 16:00:00 14440.69 31.56 439.30 39.30 249.28 2156 10/26/2022 16:00:00 14531.69 33.66 441.11 39.91 250.38 2157 10/27/2022 16:00:00 14569.90 34.83 442.69 40.16 248.36 2158 10/28/2022 16:00:00 14795.63 33.95 443.41 39.56 248.07 [2159 rows x 6 columns] Log Returns Excellent. Now, let\u0026rsquo;s convert all of these values into daily log-returns (we don\u0026rsquo;t really care about the actual pricing.)\nlog_returns = df[[\u0026#34;NYSE\u0026#34;, \u0026#34;TBill\u0026#34;, \u0026#34;LMT\u0026#34;, \u0026#34;TWTR\u0026#34;, \u0026#34;MCD\u0026#34;]].apply(np.log, inplace=True) df.loc[:, [\u0026#34;NYSE\u0026#34;, \u0026#34;TBill\u0026#34;, \u0026#34;LMT\u0026#34;, \u0026#34;TWTR\u0026#34;, \u0026#34;MCD\u0026#34;]] = log_returns df Date NYSE TBill LMT TWTR MCD 0 11/7/2013 16:00:00 9.202749 3.263084 4.914124 3.804438 4.576771 1 11/8/2013 16:00:00 9.213549 3.312730 4.928050 3.729301 4.574814 2 11/11/2013 16:00:00 9.214626 3.314550 4.921075 3.758872 4.575638 3 11/12/2013 16:00:00 9.211324 3.320710 4.921658 3.735286 4.581492 4 11/13/2013 16:00:00 9.218298 3.305054 4.921877 3.751854 4.586089 ... ... ... ... ... ... ... 2154 10/24/2022 16:00:00 9.562834 3.415758 6.087025 3.678829 5.530262 2155 10/25/2022 16:00:00 9.577805 3.451890 6.085183 3.671225 5.518577 2156 10/26/2022 16:00:00 9.584087 3.516310 6.089294 3.686627 5.522980 2157 10/27/2022 16:00:00 9.586713 3.550479 6.092870 3.692871 5.514879 2158 10/28/2022 16:00:00 9.602087 3.524889 6.094495 3.677819 5.513711 [2159 rows x 6 columns] And now, the log returns! We will shift this data by one column and subtract.\nreturns = df.drop(columns=[\u0026#34;Date\u0026#34;]) - df.drop(columns=[\u0026#34;Date\u0026#34;]).shift(1) returns.dropna(inplace=True) returns NYSE TBill LMT TWTR MCD 1 0.010801 0.049646 0.013926 -0.075136 -0.001957 2 0.001077 0.001819 -0.006975 0.029570 0.000824 3 -0.003302 0.006161 0.000583 -0.023586 0.005854 4 0.006974 -0.015657 0.000219 0.016568 0.004597 5 0.005010 -0.008476 0.007476 0.047896 -0.005622 ... ... ... ... ... ... 2154 0.005785 0.004940 -0.023467 -0.014291 0.001349 2155 0.014971 0.036133 -0.001842 -0.007605 -0.011685 2156 0.006282 0.064420 0.004112 0.015402 0.004403 2157 0.002626 0.034169 0.003575 0.006245 -0.008100 2158 0.015374 -0.025590 0.001625 -0.015053 -0.001168 [2158 rows x 5 columns] Risk-Free Excess Recall that we want to be working with the excess-to-risk-free rates \\(R_{T}-R_{f}\\), where \\(R_{T}\\) is some security. So, we will go through and subtract everything by the risk-free rate (and drop the RFR itself):\nrisk_free_excess = returns.drop(columns=\u0026#34;TBill\u0026#34;).apply(lambda x: x-returns.TBill) risk_free_excess NYSE LMT TWTR MCD 1 -0.038846 -0.035720 -0.124783 -0.051603 2 -0.000742 -0.008794 0.027751 -0.000995 3 -0.009463 -0.005577 -0.029747 -0.000307 4 0.022630 0.015875 0.032225 0.020254 5 0.013486 0.015952 0.056372 0.002854 ... ... ... ... ... 2154 0.000845 -0.028406 -0.019231 -0.003591 2155 -0.021162 -0.037975 -0.043738 -0.047818 2156 -0.058138 -0.060308 -0.049017 -0.060017 2157 -0.031543 -0.030593 -0.027924 -0.042269 2158 0.040964 0.027215 0.010537 0.024422 [2158 rows x 4 columns] Actual Regression It is now time to perform the actual linear regression! We will use statsmodels\u0026rsquo; Ordinary Least Squares API to make our work easier, but we will go through a full regression in the end.\nimport statsmodels.api as sm CAPM Regression: Lockheed Martin Let\u0026rsquo;s work with Lockheed Martin first for regression, fitting an ordinary least squares. Remember that the OLS functions reads the endogenous variable first (for us, the return of the asset.)\n# add a column of ones to our input market excess returns nyse_with_bias = sm.add_constant(risk_free_excess.NYSE) # perform linreg lmt_model = sm.OLS(risk_free_excess.LMT, nyse_with_bias).fit() lmt_model.summary() OLS Regression Results ============================================================================== Dep. Variable: LMT R-squared: 0.859 Model: OLS Adj. R-squared: 0.859 Method: Least Squares F-statistic: 1.312e+04 No. Observations: 2158 AIC: -1.263e+04 Df Residuals: 2156 BIC: -1.262e+04 Df Model: 1 Prob (F-statistic): 0.00 Covariance Type: nonrobust Log-Likelihood: 6318.9 ============================================================================== coef std err t P\u0026gt;|t| [0.025 0.975] ------------------------------------------------------------------------------ const 0.0004 0.000 1.311 0.190 -0.000 0.001 NYSE 0.9449 0.008 114.552 0.000 0.929 0.961 ============================================================================== Based on the constants row, we can see that\u0026mdash;within \\(95\\%\\) confidence\u0026mdash;the intercept is generally \\(0\\) and CAPM applies. However, we do see a slight positive compared to the market. Furthermore, we can see that the regression has a beta value of \\(0.9449\\) \u0026mdash; according the CAPM model, it being slightly undervarying that the market.\nCAPM Regression: MacDonald\u0026rsquo;s # perform linreg mcd_model = sm.OLS(risk_free_excess.MCD, nyse_with_bias).fit() mcd_model.summary() OLS Regression Results ============================================================================== Dep. Variable: MCD R-squared: 0.887 Model: OLS Adj. R-squared: 0.887 Method: Least Squares F-statistic: 1.697e+04 No. Observations: 2158 AIC: -1.310e+04 Df Residuals: 2156 BIC: -1.309e+04 Df Model: 1 Prob (F-statistic): 0.00 Covariance Type: nonrobust Log-Likelihood: 6551.1 ============================================================================== coef std err t P\u0026gt;|t| [0.025 0.975] ------------------------------------------------------------------------------ const 0.0003 0.000 1.004 0.315 -0.000 0.001 NYSE 0.9651 0.007 130.287 0.000 0.951 0.980 ============================================================================== Same thing as before, we are within \\(95\\%\\) confidence having a intercept of \\(0\\) (with a slight positive edge), and it looks like MacDonald\u0026rsquo;s vary a little bit more than Lockheed Martin. The food industry is probably a tougher business than that in defense.\nCAPM Regression: Twitter Lastly, to analyze the recently delisted Twitter!\n# perform linreg twtr_model = sm.OLS(risk_free_excess.TWTR, nyse_with_bias).fit() twtr_model.summary() OLS Regression Results ============================================================================== Dep. Variable: TWTR R-squared: 0.522 Model: OLS Adj. R-squared: 0.522 Method: Least Squares F-statistic: 2357. No. Observations: 2158 AIC: -8610. Df Residuals: 2156 BIC: -8599. Df Model: 1 Prob (F-statistic): 0.00 Covariance Type: nonrobust Log-Likelihood: 4307.1 ============================================================================== coef std err t P\u0026gt;|t| [0.025 0.975] ------------------------------------------------------------------------------ const -0.0002 0.001 -0.346 0.730 -0.002 0.001 NYSE 1.0173 0.021 48.549 0.000 0.976 1.058 ============================================================================== Evidently, Twitter is much more variable. It looks like it has a nontrivial bias (the intercept being -0.001 being within the \\(95\\%\\) confidence band \u0026mdash; that the security is possibly significantly underperforming the CAPM expectation in the market.) Furthermore, we have a positive beta value: that the asset is more variable than the market.\nManual Checking We can also use the betas formula to manually calculate what we expect for the beta values (i.e. as if they were one IID random variable.)\nrisk_free_cov = risk_free_excess.cov() risk_free_cov NYSE LMT TWTR MCD NYSE 0.001143 0.001080 0.001163 0.001103 LMT 0.001080 0.001188 0.001116 0.001083 TWTR 0.001163 0.001116 0.002264 0.001155 MCD 0.001103 0.001083 0.001155 0.001200 Finally, to construct the beta values. Recall that:\n\\begin{equation} \\beta_{im} := \\frac{Cov[(R_{i}-R_{f}),(R_{m}-R_{f})]}{Var[R_{m}-R_{f}]} \\end{equation}\nand that:\n\\begin{equation} Var[X] = Cov[X,X], \\forall X \\end{equation}\n# get the market variance (covariance with itself) market_variation = risk_free_cov.NYSE.NYSE # calculate betas betas = {\u0026#34;LMT\u0026#34;: (risk_free_cov.LMT.NYSE/market_variation), \u0026#34;TWTR\u0026#34;: (risk_free_cov.TWTR.NYSE/market_variation), \u0026#34;MCD\u0026#34;: (risk_free_cov.MCD.NYSE/market_variation)} # and make dataframe betas = pd.Series(betas) betas LMT 0.944899 TWTR 1.017294 MCD 0.965081 dtype: float64 Apparently, all of our assets swing less than the overall NYSE market! Especially Lockheed\u0026mdash;it is only \\(94.4\\%\\) of the market variation. Furthermore, it is interesting to see that Twitter swings much more dramatically compared to the market.\nEqual-Part Fund We will now create two funds with the three securities, one with equal parts and one which attempts to maximizes the bias (max returns) while minimizing the beta variance value compared to the market.\nFirst, let\u0026rsquo;s create a baseline fund in equal parts. Here it is:\nfund_1_returns = returns.LMT + returns.TWTR + returns.MCD fund_1_returns 1 -0.063167 2 0.023420 3 -0.017149 4 0.021384 5 0.049750 ... 2154 -0.036409 2155 -0.021132 2156 0.023917 2157 0.001720 2158 -0.014596 Length: 2158, dtype: float64 We will calculate the excess returns of this fund:\nfund_1_excess = fund_1_returns-returns.TBill fund_1_excess 1 -0.112813 2 0.021600 3 -0.023310 4 0.037041 5 0.058226 ... 2154 -0.041349 2155 -0.057265 2156 -0.040503 2157 -0.032449 2158 0.010994 Length: 2158, dtype: float64 Performance of the Equal-Part Fund # perform linreg fund_1_model = sm.OLS(fund_1_excess, nyse_with_bias).fit() fund_1_model.summary() OLS Regression Results ============================================================================== Dep. Variable: y R-squared: 0.473 Model: OLS Adj. R-squared: 0.473 Method: Least Squares F-statistic: 1935. No. Observations: 2158 AIC: -7735. Df Residuals: 2156 BIC: -7724. Df Model: 1 Prob (F-statistic): 3.01e-302 Covariance Type: nonrobust Log-Likelihood: 3869.5 ============================================================================== coef std err t P\u0026gt;|t| [0.025 0.975] ------------------------------------------------------------------------------ const 0.0007 0.001 0.841 0.401 -0.001 0.002 NYSE 1.1290 0.026 43.993 0.000 1.079 1.179 ============================================================================== Surprisingly, we have now created a significantly riskier investment that, though riskier, generates a much higher probability of reward (\\(+0.001\\) is now within the \\(99\\%\\) band!)\nA More Optimized Fund To me, this is the payoff of this assignment. We will now use CAPM to create the \u0026ldquo;best\u0026rdquo; fund combination\u0026mdash;given some variance, the funds which match CAPM. To do this, let\u0026rsquo;s create a generic linear combination of the assets.\nimport sympy as sym x = sym.Symbol(\u0026#39;x\u0026#39;) y = sym.Symbol(\u0026#39;y\u0026#39;) z = sym.Symbol(\u0026#39;z\u0026#39;) fund_2_returns = x*returns.LMT + y*returns.TWTR + z*returns.MCD fund_2_returns 1 0.0139260753744255*x - 0.0751364261353569*y - ... 2 -0.00697525170622448*x + 0.0295704573211193*y ... 3 0.000583132897928884*x - 0.0235859990058791*y ... 4 0.000218587198947517*x + 0.016568426347233*y +... 5 0.00747599199607762*x + 0.0478955096700351*y -... ... 2154 -0.0234665578621085*x - 0.0142913301107561*y +... 2155 -0.00184214468578059*x - 0.0076045993852194*y ... 2156 0.00411172646842317*x + 0.0154024001854269*y +... 2157 0.00357547337231878*x + 0.0062445563228315*y -... 2158 0.00162509910496933*x - 0.0150529686289622*y -... Length: 2158, dtype: object Excellent. We will also calculate the excess returns of this fund:\nfund_2_excess = fund_2_returns-returns.TBill Y = fund_2_excess.to_numpy() Y [0.0139260753744255*x - 0.0751364261353569*y - 0.00195664549320096*z - 0.0496463208073039 -0.00697525170622448*x + 0.0295704573211193*y + 0.000824317408861575*z - 0.00181917459665826 0.000583132897928884*x - 0.0235859990058791*y + 0.00585367525146019*z - 0.00616055581298536 ... 0.00411172646842317*x + 0.0154024001854269*y + 0.00440300114913317*z - 0.0644196927849867 0.00357547337231878*x + 0.0062445563228315*y - 0.0081004573348249*z - 0.0341688956152497 0.00162509910496933*x - 0.0150529686289622*y - 0.00116834209450634*z + 0.0255902303732043] We cast this type to a numpy array because we are about to perform some matrix operations upon it.\nOptimizing the Optimized Fund: Linreg Now, let us perform the actual linear regression ourselves. Recall that the pseudoinverse linear regression estimator is:\n\\begin{equation} \\beta = (X^{T}X)^{-1}X^{T}Y \\end{equation}\nWe have already our \\(Y\\) as a vector (above), and our \\(X\\) is:\nX = nyse_with_bias.to_numpy() X [[ 1.00000000e+00 -3.88457302e-02] [ 1.00000000e+00 -7.42217926e-04] [ 1.00000000e+00 -9.46284244e-03] ... [ 1.00000000e+00 -5.81378271e-02] [ 1.00000000e+00 -3.15429207e-02] [ 1.00000000e+00 4.09643405e-02]] We now have our matrices, let\u0026rsquo;s perform the linear regression!\nlinear_model = np.linalg.inv((X.transpose()@X))@X.transpose()@Y linear_model [0.000544056413840724*x - 6.62061061591867e-5*y + 0.000429966553373172*z - 0.000178620725465344 0.0457830563134785*x + 0.118178191274045*y + 0.0659651260604729*z + 0.899115719100281] Excellent. So we now have two rows; the top row represents the \u0026ldquo;bias\u0026rdquo;\u0026mdash;how much deviation there is from CAPM, and the bottom row represents the \u0026ldquo;rate\u0026rdquo;\u0026mdash;the \u0026ldquo;beta\u0026rdquo; value which represents how much excess variance there is.\nOptimizing the Optimized Fund: Picking Optimizing Parameters We can will solve for a combination of solutions to give us specific values of returns vs risk. For instance, we can fix the variance to 1 (i.e. we can vary as much as the market.) We subtract one here for the solver, which expects the expressions equaling to \\(0\\).\nrisk_expr = linear_model[1] - 1 risk_expr 0.0457830563134785*x + 0.118178191274045*y + 0.0659651260604729*z - 0.100884280899719 Now, we will set a certain earning value, and solve for possible solutions. We will try to get the largest possible bias without needing to short something (i.e. cause a negative solution). By hand-fisting a value, it seems 0.001 is a good bet.\ndeviance_expr = linear_model[0] - 0.001 deviance_expr 0.000544056413840724*x - 6.62061061591867e-5*y + 0.000429966553373172*z - 0.00117862072546534 Optimizing the Optimized Fund: Optimize! solution = sym.solvers.solve([deviance_expr, risk_expr], x,y,z) solution {x: 2.16803104555387 - 0.819584899551304*z, y: 0.0137520589394366 - 0.24067066980814*z} We have one degree of freedom here: how much MacDonald\u0026rsquo;s we want! Let\u0026rsquo;s say we want none (which would, according to this, be an equally efficient solution.)\nHow Does Our Fund Do? This would create the following plan:\n# for our case z_val = 0 # numerical solutions s_x = solution[x].subs(z,z_val) s_y = solution[y].subs(z,z_val) # solution fund_2_nobias_nomac = s_x*returns.LMT + s_y*returns.TWTR + z_val*returns.MCD fund_2_nobias_nomac.mean() 0.001185050286566688 Recall that this is the performance of the balanced portfolio:\nfund_1_returns.mean() 0.0009224705380695683 So, for market-level risk (\\(\\beta =1\\), instead of the balanced portfolio\u0026rsquo;s \\(\\beta =1.1290\\)), this is a pretty good deal!\nSome Plots Finally, let\u0026rsquo;s plot the prices of our various funds:\nimport matplotlib.pyplot as plt import matplotlib.dates as mdates import seaborn as sns from datetime import datetime sns.set() fund_2_price = s_x*df.LMT + s_y*df.TWTR + z_val*df.MCD fund_1_price = df.LMT + df.TWTR fund_l_price = df.LMT fund_t_price = df.TWTR dates = df.Date.apply(lambda x:datetime.strptime(x, \u0026#34;%m/%d/%Y %H:%M:%S\u0026#34;)) sns.lineplot(x=dates, y=fund_2_price.apply(sym.Float).astype(float)) sns.lineplot(x=dates, y=fund_1_price.apply(sym.Float).astype(float)) sns.lineplot(x=dates, y=fund_l_price.apply(sym.Float).astype(float)) sns.lineplot(x=dates, y=fund_t_price.apply(sym.Float).astype(float)) plt.gca().xaxis.set_major_locator(mdates.YearLocator()) plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(\u0026#39;%Y\u0026#39;)) plt.gca().set_ylabel(\u0026#34;Price\u0026#34;) plt.show() Recall that we didn\u0026rsquo;t actually buy any MacDonald\u0026rsquo;s. So, we have\u0026mdash;blue, being our \u0026ldquo;optimal\u0026rdquo; portfolio, yellow, our balanced portfolio, green, being Lockheed, and red, being Twitter.\nOur portfolio works surprisingly well!\n","html":"\u003ch2 id=\"capm-a-review\"\u003eCAPM, a Review\u003c/h2\u003e\n\u003cp\u003eNote that we will be using the Sharpe-Linter version of CAPM:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE[R_{i}-R_{f}] = \\beta_{im} E[(R_{m}-R_{f})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\beta_{im} := \\frac{Cov[(R_{i}-R_{f}),(R_{m}-R_{f})]}{Var[R_{m}-R_{f}]}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that we declare \\(R_{f}\\) (the risk-free rate) to be non-stochastic.\u003c/p\u003e\n\u003cp\u003eLet us begin. We will create a generic function to analyze some given stock.\u003c/p\u003e\n\u003ch2 id=\"data-import\"\u003eData Import\u003c/h2\u003e\n\u003cp\u003eWe will first import our utilities\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epandas\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enumpy\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s load the data from our market (NYSE) as well as our 10 year t-bill data.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003et_bill\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_csv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;./linearity_test_data/10yrTBill.csv\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enyse\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_csv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;./linearity_test_data/NYSEComposite.csv\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enyse\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ehead\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Date Close\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 11/7/2013 16:00:00 9924.37\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 11/8/2013 16:00:00 10032.14\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 11/11/2013 16:00:00 10042.95\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 11/12/2013 16:00:00 10009.84\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 11/13/2013 16:00:00 10079.89\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. Let\u0026rsquo;s load in the data for that stock.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_csv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;./linearity_test_data/\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estock\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e}\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e.csv\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ehead\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Date Close\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 11/7/2013 16:00:00 136.20\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 11/8/2013 16:00:00 138.11\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 11/11/2013 16:00:00 137.15\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 11/12/2013 16:00:00 137.23\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 11/13/2013 16:00:00 137.26\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"raw-data\"\u003eRaw Data\u003c/h2\u003e\n\u003cp\u003eAnd now, let\u0026rsquo;s load all three stocks, then concatenate them all into a big-ol DataFrame.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# load data\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;Date\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDate\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;NYSE\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TBill\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et_bill\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# convert to dataframe\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDataFrame\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# drop empty\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edropna\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einplace\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Date NYSE TBill LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 11/7/2013 16:00:00 9924.37 26.13 136.20 44.90 97.20\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 11/8/2013 16:00:00 10032.14 27.46 138.11 41.65 97.01\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 11/11/2013 16:00:00 10042.95 27.51 137.15 42.90 97.09\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 11/12/2013 16:00:00 10009.84 27.68 137.23 41.90 97.66\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 11/13/2013 16:00:00 10079.89 27.25 137.26 42.60 98.11\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 10/24/2022 16:00:00 14226.11 30.44 440.11 39.60 252.21\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 10/25/2022 16:00:00 14440.69 31.56 439.30 39.30 249.28\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 10/26/2022 16:00:00 14531.69 33.66 441.11 39.91 250.38\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 10/27/2022 16:00:00 14569.90 34.83 442.69 40.16 248.36\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 10/28/2022 16:00:00 14795.63 33.95 443.41 39.56 248.07\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2159 rows x 6 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"log-returns\"\u003eLog Returns\u003c/h2\u003e\n\u003cp\u003eExcellent. Now, let\u0026rsquo;s convert all of these values into daily log-returns (we don\u0026rsquo;t really care about the actual pricing.)\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elog_returns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;NYSE\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TBill\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einplace\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;NYSE\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TBill\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elog_returns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Date NYSE TBill LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 11/7/2013 16:00:00 9.202749 3.263084 4.914124 3.804438 4.576771\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 11/8/2013 16:00:00 9.213549 3.312730 4.928050 3.729301 4.574814\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 11/11/2013 16:00:00 9.214626 3.314550 4.921075 3.758872 4.575638\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 11/12/2013 16:00:00 9.211324 3.320710 4.921658 3.735286 4.581492\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 11/13/2013 16:00:00 9.218298 3.305054 4.921877 3.751854 4.586089\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 10/24/2022 16:00:00 9.562834 3.415758 6.087025 3.678829 5.530262\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 10/25/2022 16:00:00 9.577805 3.451890 6.085183 3.671225 5.518577\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 10/26/2022 16:00:00 9.584087 3.516310 6.089294 3.686627 5.522980\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 10/27/2022 16:00:00 9.586713 3.550479 6.092870 3.692871 5.514879\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 10/28/2022 16:00:00 9.602087 3.524889 6.094495 3.677819 5.513711\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2159 rows x 6 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd now, the log returns! We will shift this data by one column and subtract.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edrop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Date\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edrop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Date\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshift\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edropna\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einplace\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e NYSE TBill LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 0.010801 0.049646 0.013926 -0.075136 -0.001957\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.001077 0.001819 -0.006975 0.029570 0.000824\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 -0.003302 0.006161 0.000583 -0.023586 0.005854\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.006974 -0.015657 0.000219 0.016568 0.004597\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.005010 -0.008476 0.007476 0.047896 -0.005622\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 0.005785 0.004940 -0.023467 -0.014291 0.001349\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 0.014971 0.036133 -0.001842 -0.007605 -0.011685\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 0.006282 0.064420 0.004112 0.015402 0.004403\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 0.002626 0.034169 0.003575 0.006245 -0.008100\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 0.015374 -0.025590 0.001625 -0.015053 -0.001168\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2158 rows x 5 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"risk-free-excess\"\u003eRisk-Free Excess\u003c/h2\u003e\n\u003cp\u003eRecall that we want to be working with the excess-to-risk-free rates \\(R_{T}-R_{f}\\), where \\(R_{T}\\) is some security. So, we will go through and subtract everything by the risk-free rate (and drop the RFR itself):\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edrop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;TBill\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elambda\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTBill\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e NYSE LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 -0.038846 -0.035720 -0.124783 -0.051603\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 -0.000742 -0.008794 0.027751 -0.000995\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 -0.009463 -0.005577 -0.029747 -0.000307\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.022630 0.015875 0.032225 0.020254\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.013486 0.015952 0.056372 0.002854\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 0.000845 -0.028406 -0.019231 -0.003591\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 -0.021162 -0.037975 -0.043738 -0.047818\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 -0.058138 -0.060308 -0.049017 -0.060017\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 -0.031543 -0.030593 -0.027924 -0.042269\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 0.040964 0.027215 0.010537 0.024422\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2158 rows x 4 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"actual-regression\"\u003eActual Regression\u003c/h2\u003e\n\u003cp\u003eIt is now time to perform the actual linear regression! We will use statsmodels\u0026rsquo; Ordinary Least Squares API to make our work easier, but we will go through a full regression in the end.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estatsmodels.api\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"capm-regression-lockheed-martin\"\u003eCAPM Regression: Lockheed Martin\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s work with Lockheed Martin first for regression, fitting an ordinary least squares. Remember that the OLS functions reads the \u003cem\u003eendogenous\u003c/em\u003e variable first (for us, the return of the asset.)\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# add a column of ones to our input market excess returns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eadd_constant\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# perform linreg\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elmt_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eOLS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elmt_model\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esummary\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e OLS Regression Results\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDep. Variable: LMT R-squared: 0.859\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eModel: OLS Adj. R-squared: 0.859\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMethod: Least Squares F-statistic: 1.312e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNo. Observations: 2158 AIC: -1.263e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Residuals: 2156 BIC: -1.262e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Model: 1 Prob (F-statistic): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eCovariance Type: nonrobust Log-Likelihood: 6318.9\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e coef std err t P\u0026gt;|t| [0.025 0.975]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e------------------------------------------------------------------------------\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econst 0.0004 0.000 1.311 0.190 -0.000 0.001\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 0.9449 0.008 114.552 0.000 0.929 0.961\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eBased on the constants row, we can see that\u0026mdash;within \\(95\\%\\) confidence\u0026mdash;the intercept is generally \\(0\\) and CAPM applies. However, we do see a slight positive compared to the market. Furthermore, we can see that the regression has a beta value of \\(0.9449\\) \u0026mdash; according the CAPM model, it being \u003cem\u003eslightly\u003c/em\u003e undervarying that the market.\u003c/p\u003e\n\u003ch2 id=\"capm-regression-macdonald-s\"\u003eCAPM Regression: MacDonald\u0026rsquo;s\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# perform linreg\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emcd_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eOLS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emcd_model\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esummary\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e OLS Regression Results\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDep. Variable: MCD R-squared: 0.887\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eModel: OLS Adj. R-squared: 0.887\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMethod: Least Squares F-statistic: 1.697e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNo. Observations: 2158 AIC: -1.310e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Residuals: 2156 BIC: -1.309e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Model: 1 Prob (F-statistic): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eCovariance Type: nonrobust Log-Likelihood: 6551.1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e coef std err t P\u0026gt;|t| [0.025 0.975]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e------------------------------------------------------------------------------\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econst 0.0003 0.000 1.004 0.315 -0.000 0.001\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 0.9651 0.007 130.287 0.000 0.951 0.980\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSame thing as before, we are within \\(95\\%\\) confidence having a intercept of \\(0\\) (with a slight positive edge), and it looks like MacDonald\u0026rsquo;s vary a little bit more than Lockheed Martin. The food industry is probably a tougher business than that in defense.\u003c/p\u003e\n\u003ch2 id=\"capm-regression-twitter\"\u003eCAPM Regression: Twitter\u003c/h2\u003e\n\u003cp\u003eLastly, to analyze the recently delisted Twitter!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# perform linreg\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etwtr_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eOLS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etwtr_model\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esummary\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e OLS Regression Results\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDep. Variable: TWTR R-squared: 0.522\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eModel: OLS Adj. R-squared: 0.522\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMethod: Least Squares F-statistic: 2357.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNo. Observations: 2158 AIC: -8610.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Residuals: 2156 BIC: -8599.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Model: 1 Prob (F-statistic): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eCovariance Type: nonrobust Log-Likelihood: 4307.1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e coef std err t P\u0026gt;|t| [0.025 0.975]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e------------------------------------------------------------------------------\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econst -0.0002 0.001 -0.346 0.730 -0.002 0.001\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 1.0173 0.021 48.549 0.000 0.976 1.058\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eEvidently, Twitter is \u003cem\u003emuch\u003c/em\u003e more variable. It looks like it has a nontrivial bias (the intercept being -0.001 being within the \\(95\\%\\) confidence band \u0026mdash; that the security is possibly significantly underperforming the CAPM expectation in the market.) Furthermore, we have a positive beta value: that the asset is more variable than the market.\u003c/p\u003e\n\u003ch2 id=\"manual-checking\"\u003eManual Checking\u003c/h2\u003e\n\u003cp\u003eWe can also use the betas formula to manually calculate what we \u003cem\u003eexpect\u003c/em\u003e for the beta values (i.e. as if they were one IID random variable.)\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecov\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e NYSE LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 0.001143 0.001080 0.001163 0.001103\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLMT 0.001080 0.001188 0.001116 0.001083\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eTWTR 0.001163 0.001116 0.002264 0.001155\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMCD 0.001103 0.001083 0.001155 0.001200\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFinally, to construct the beta values. Recall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\beta_{im} := \\frac{Cov[(R_{i}-R_{f}),(R_{m}-R_{f})]}{Var[R_{m}-R_{f}]}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nVar[X] = Cov[X,X], \\forall X\n\\end{equation}\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# get the market variance (covariance with itself)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emarket_variation\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# calculate betas\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebetas\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emarket_variation\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emarket_variation\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emarket_variation\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# and make dataframe\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebetas\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSeries\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebetas\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebetas\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLMT 0.944899\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eTWTR 1.017294\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMCD 0.965081\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eApparently, all of our assets swing less than the overall NYSE market! Especially Lockheed\u0026mdash;it is only \\(94.4\\%\\) of the market variation. Furthermore, it is interesting to see that Twitter swings much more dramatically compared to the market.\u003c/p\u003e\n\u003ch2 id=\"equal-part-fund\"\u003eEqual-Part Fund\u003c/h2\u003e\n\u003cp\u003eWe will now create two funds with the three securities, one with equal parts and one which attempts to maximizes the bias (max returns) while minimizing the beta variance value compared to the market.\u003c/p\u003e\n\u003cp\u003eFirst, let\u0026rsquo;s create a baseline fund in equal parts. Here it is:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_returns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_returns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 -0.063167\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.023420\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 -0.017149\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.021384\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.049750\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 -0.036409\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 -0.021132\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 0.023917\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 0.001720\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 -0.014596\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLength: 2158, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will calculate the excess returns of this fund:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_excess\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efund_1_returns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTBill\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_excess\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 -0.112813\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.021600\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 -0.023310\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.037041\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.058226\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 -0.041349\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 -0.057265\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 -0.040503\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 -0.032449\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 0.010994\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLength: 2158, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"performance-of-the-equal-part-fund\"\u003ePerformance of the Equal-Part Fund\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# perform linreg\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eOLS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_1_excess\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_model\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esummary\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e OLS Regression Results\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDep. Variable: y R-squared: 0.473\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eModel: OLS Adj. R-squared: 0.473\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMethod: Least Squares F-statistic: 1935.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNo. Observations: 2158 AIC: -7735.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Residuals: 2156 BIC: -7724.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Model: 1 Prob (F-statistic): 3.01e-302\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eCovariance Type: nonrobust Log-Likelihood: 3869.5\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e coef std err t P\u0026gt;|t| [0.025 0.975]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e------------------------------------------------------------------------------\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econst 0.0007 0.001 0.841 0.401 -0.001 0.002\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 1.1290 0.026 43.993 0.000 1.079 1.179\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSurprisingly, we have now created a \u003cstrong\u003esignificantly\u003c/strong\u003e riskier investment that, though riskier, generates a much higher probability of reward (\\(+0.001\\) is now within the \\(99\\%\\) band!)\u003c/p\u003e\n\u003ch2 id=\"a-more-optimized-fund\"\u003eA More Optimized Fund\u003c/h2\u003e\n\u003cp\u003eTo me, this is the payoff of this assignment. We will now use CAPM to create the \u0026ldquo;best\u0026rdquo; fund combination\u0026mdash;given some variance, the funds which match CAPM. To do this, let\u0026rsquo;s create a generic linear combination of the assets.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esympy\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSymbol\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;x\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSymbol\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;y\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSymbol\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;z\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_returns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_returns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 0.0139260753744255*x - 0.0751364261353569*y - ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 -0.00697525170622448*x + 0.0295704573211193*y ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 0.000583132897928884*x - 0.0235859990058791*y ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.000218587198947517*x + 0.016568426347233*y +...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.00747599199607762*x + 0.0478955096700351*y -...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 -0.0234665578621085*x - 0.0142913301107561*y +...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 -0.00184214468578059*x - 0.0076045993852194*y ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 0.00411172646842317*x + 0.0154024001854269*y +...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 0.00357547337231878*x + 0.0062445563228315*y -...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 0.00162509910496933*x - 0.0150529686289622*y -...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLength: 2158, dtype: object\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. We will also calculate the excess returns of this fund:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_excess\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efund_2_returns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTBill\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efund_2_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eto_numpy\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[0.0139260753744255*x - 0.0751364261353569*y - 0.00195664549320096*z - 0.0496463208073039\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -0.00697525170622448*x + 0.0295704573211193*y + 0.000824317408861575*z - 0.00181917459665826\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.000583132897928884*x - 0.0235859990058791*y + 0.00585367525146019*z - 0.00616055581298536\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.00411172646842317*x + 0.0154024001854269*y + 0.00440300114913317*z - 0.0644196927849867\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.00357547337231878*x + 0.0062445563228315*y - 0.0081004573348249*z - 0.0341688956152497\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.00162509910496933*x - 0.0150529686289622*y - 0.00116834209450634*z + 0.0255902303732043]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe cast this type to a numpy array because we are about to perform some matrix operations upon it.\u003c/p\u003e\n\u003ch2 id=\"optimizing-the-optimized-fund-linreg\"\u003eOptimizing the Optimized Fund: Linreg\u003c/h2\u003e\n\u003cp\u003eNow, let us perform the actual linear regression ourselves. Recall that the pseudoinverse linear regression estimator is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\beta = (X^{T}X)^{-1}X^{T}Y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have already our \\(Y\\) as a vector (above), and our \\(X\\) is:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eto_numpy\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[[ 1.00000000e+00 -3.88457302e-02]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 -7.42217926e-04]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 -9.46284244e-03]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 -5.81378271e-02]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 -3.15429207e-02]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 4.09643405e-02]]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe now have our matrices, let\u0026rsquo;s perform the linear regression!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elinear_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elinalg\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etranspose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@X\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@X.transpose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@Y\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elinear_model\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[0.000544056413840724*x - 6.62061061591867e-5*y + 0.000429966553373172*z - 0.000178620725465344\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.0457830563134785*x + 0.118178191274045*y + 0.0659651260604729*z + 0.899115719100281]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. So we now have two rows; the top row represents the \u0026ldquo;bias\u0026rdquo;\u0026mdash;how much deviation there is from CAPM, and the bottom row represents the \u0026ldquo;rate\u0026rdquo;\u0026mdash;the \u0026ldquo;beta\u0026rdquo; value which represents how much excess variance there is.\u003c/p\u003e\n\u003ch2 id=\"optimizing-the-optimized-fund-picking-optimizing-parameters\"\u003eOptimizing the Optimized Fund: Picking Optimizing Parameters\u003c/h2\u003e\n\u003cp\u003eWe can will solve for a combination of solutions to give us specific values of returns vs risk. For instance, we can fix the variance to 1 (i.e. we can vary as much as the market.) We subtract one here for the solver, which expects the expressions equaling to \\(0\\).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_expr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elinear_model\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_expr\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.0457830563134785*x + 0.118178191274045*y + 0.0659651260604729*z - 0.100884280899719\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, we will set a certain earning value, and solve for possible solutions. We will try to get the largest possible bias without needing to short something (i.e. cause a negative solution). By hand-fisting a value, it seems 0.001 is a good bet.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edeviance_expr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elinear_model\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.001\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edeviance_expr\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.000544056413840724*x - 6.62061061591867e-5*y + 0.000429966553373172*z - 0.00117862072546534\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"optimizing-the-optimized-fund-optimize\"\u003eOptimizing the Optimized Fund: Optimize!\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esolvers\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edeviance_expr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erisk_expr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e{x: 2.16803104555387 - 0.819584899551304*z, y: 0.0137520589394366 - 0.24067066980814*z}\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe have one degree of freedom here: how much MacDonald\u0026rsquo;s we want! Let\u0026rsquo;s say we want none (which would, according to this, be an equally efficient solution.)\u003c/p\u003e\n\u003ch2 id=\"how-does-our-fund-do\"\u003eHow Does Our Fund Do?\u003c/h2\u003e\n\u003cp\u003eThis would create the following plan:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# for our case\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ez_val\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# numerical solutions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003es_x\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez_val\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003es_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez_val\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# solution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_nobias_nomac\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es_x\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es_y\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_nobias_nomac\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.001185050286566688\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRecall that this is the performance of the balanced portfolio:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_returns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.0009224705380695683\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSo, for market-level risk (\\(\\beta =1\\), instead of the balanced portfolio\u0026rsquo;s \\(\\beta =1.1290\\)), this is a pretty good deal!\u003c/p\u003e\n\u003ch2 id=\"some-plots\"\u003eSome Plots\u003c/h2\u003e\n\u003cp\u003eFinally, let\u0026rsquo;s plot the \u003cem\u003eprices\u003c/em\u003e of our various funds:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ematplotlib.pyplot\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ematplotlib.dates\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emdates\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eseaborn\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatetime\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatetime\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eset\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_price\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es_x\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es_y\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_price\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_l_price\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_t_price\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDate\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elambda\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edatetime\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estrptime\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;%m/\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e%d\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e/%Y %H:%M:%S\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_2_price\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eastype\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_1_price\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eastype\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_l_price\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eastype\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_t_price\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eastype\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egca\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003exaxis\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eset_major_locator\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emdates\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eYearLocator\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egca\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003exaxis\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eset_major_formatter\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emdates\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDateFormatter\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;%Y\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egca\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eset_ylabel\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Price\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshow\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-29_23-33-46_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eRecall that we didn\u0026rsquo;t actually buy any MacDonald\u0026rsquo;s. So, we have\u0026mdash;blue, being our \u0026ldquo;optimal\u0026rdquo; portfolio, yellow, our balanced portfolio, green, being Lockheed, and red, being Twitter.\u003c/p\u003e\n\u003cp\u003eOur portfolio works surprisingly well!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/linearity_tests_preso/","tags":null,"title":"Linearity Tests"},{"categories":null,"contents":"linked files is a linked list: in every block, it stores the location of the next block; we don\u0026rsquo;t store files contiguously. We simply store a part of the file in a block, and a pointer to wherever the next block where the file is located is.\nthis solves the contiguous allocation\u0026rsquo;s fragmentation problem.\nproblems massive seek time to get all the blocks for a given file: data scattered random access of files (\u0026ldquo;find the middle\u0026rdquo;) is hard: can\u0026rsquo;t easily jump to an arbitrary location; we had to read the file from the start ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhlinked_files/\"\u003elinked files\u003c/a\u003e is a linked list: in every block, it stores the location of the next block; we \u003cstrong\u003edon\u0026rsquo;t store files contiguously\u003c/strong\u003e. We simply store a part of the file in a block, and a pointer to wherever the next block where the file is located is.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-10_14-07-31_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ethis solves the \u003ca href=\"/posts/kbhcontiguous_allocation/\"\u003econtiguous allocation\u003c/a\u003e\u0026rsquo;s fragmentation problem.\u003c/p\u003e\n\u003ch2 id=\"problems\"\u003eproblems\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003emassive seek time to get all the blocks for a given file: data scattered\u003c/li\u003e\n\u003cli\u003erandom access of files (\u0026ldquo;find the middle\u0026rdquo;) is hard: can\u0026rsquo;t easily jump to an arbitrary location; we had to read the file from the start\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinked_files/","tags":null,"title":"linked files"},{"categories":null,"contents":" Using P2P to trade stocks in a darkpool Sweep across LiquidNet and send normall if not needed ","html":"\u003cul\u003e\n\u003cli\u003eUsing P2P to trade stocks in a \u003ca href=\"/posts/kbhdarkpool/\"\u003edarkpool\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eSweep across \u003ca href=\"/posts/kbhliquidnet/\"\u003eLiquidNet\u003c/a\u003e and send normall if not needed\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhliquidnet/","tags":null,"title":"LiquidNet"},{"categories":null,"contents":"A list is an ordered collection of \\(n\\) elements.\nrequirements as list length cannot be negative list length cannot be \\(\\infty\\) repetition matters order matters additional info two lists are equal IFF they have same \\(n\\) same elements same order they are different from sets because order matters (therefore, because in/out is no longer a binary) number of entries of the same object matters length is finite ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e is an \u003cstrong\u003e\u003cstrong\u003eordered collection\u003c/strong\u003e\u003c/strong\u003e of \\(n\\) elements.\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eas list length cannot be negative\u003c/li\u003e\n\u003cli\u003elist length cannot be \\(\\infty\\)\u003c/li\u003e\n\u003cli\u003erepetition matters\u003c/li\u003e\n\u003cli\u003eorder matters\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-info\"\u003eadditional info\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003etwo lists are equal IFF they have\n\u003cul\u003e\n\u003cli\u003esame \\(n\\)\u003c/li\u003e\n\u003cli\u003esame elements\u003c/li\u003e\n\u003cli\u003esame order\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ethey are different from \u003ca href=\"/posts/kbhset/\"\u003eset\u003c/a\u003es because\n\u003cul\u003e\n\u003cli\u003eorder matters\u003c/li\u003e\n\u003cli\u003e(therefore, because in/out is no longer a binary) number of entries of the same object matters\u003c/li\u003e\n\u003cli\u003elength is finite\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlist/","tags":null,"title":"list"},{"categories":null,"contents":" Number Name 31 Herber Hoover 32 Franklin D. Roosevelt (FDR) ","html":"\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eNumber\u003c/th\u003e\n\u003cth\u003eName\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e31\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhherber_hoover/\"\u003eHerber Hoover\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e32\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhfdr/\"\u003eFranklin D. Roosevelt (FDR)\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlist_of_american_presidents/","tags":null,"title":"list of American presidents"},{"categories":null,"contents":"The Little Endian architecture is one which the numbers are laid out such that the smallest bytes are placed earlier into memory. In a sense, all the numbers are stored in reverse if readnig from \u0026ldquo;left to \u0026ldquo;right\u0026rdquo;\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhlittle_endian/\"\u003eLittle Endian\u003c/a\u003e architecture is one which the numbers are laid out such that the smallest \u003ca href=\"/posts/kbhbinary_number_system/#byte\"\u003ebyte\u003c/a\u003es are placed earlier into memory. In a sense, all the numbers are stored in reverse if readnig from \u0026ldquo;left to \u0026ldquo;right\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlittle_endian/","tags":null,"title":"Little Endian"},{"categories":null,"contents":"Something is considered \u0026ldquo;Living\u0026rdquo; when they exhibit two main biological functions\nTwo Main Functions of Life metabolism: \u0026ldquo;do chemistry to change internal consistence\u0026rdquo; replication: \u0026ldquo;copying cell information\u0026rdquo; theories of origin of life Manfred Eigen (\u0026quot;RNA-World\u0026quot; theory): genes form, constructing enzymes, forming cells Alexander Oparin: cells form, creating enzymes as needed, forming the genes to encode them Freeman Dyson: Dyson\u0026rsquo;s Model of Life\u0026mdash;basically the Oparin model, but with more specifics about how genes evolve ","html":"\u003cp\u003eSomething is considered \u0026ldquo;\u003ca href=\"/posts/kbhliving/\"\u003eLiving\u003c/a\u003e\u0026rdquo; when they exhibit two main biological functions\u003c/p\u003e\n\u003ch2 id=\"two-main-functions-of-life\"\u003eTwo Main Functions of Life\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmetabolism/\"\u003emetabolism\u003c/a\u003e: \u0026ldquo;do chemistry to change internal consistence\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhreplication/\"\u003ereplication\u003c/a\u003e: \u0026ldquo;copying cell information\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"theories-of-origin-of-life\"\u003etheories of origin of life\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eManfred Eigen (\u0026quot;\u003ca href=\"#theories-of-origin-of-life\"\u003eRNA-World\u003c/a\u003e\u0026quot; theory): genes form, constructing enzymes, forming cells\u003c/li\u003e\n\u003cli\u003eAlexander Oparin: cells form, creating enzymes as needed, forming the genes to encode them\u003c/li\u003e\n\u003cli\u003eFreeman Dyson: \u003ca href=\"/posts/kbhdyson_s_model_of_life/\"\u003eDyson\u0026rsquo;s Model of Life\u003c/a\u003e\u0026mdash;basically the Oparin model, but with more specifics about how genes evolve\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhliving/","tags":null,"title":"Living"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhllama/","tags":null,"title":"LLaMA"},{"categories":null,"contents":"Qualitative Changes in Teaching via LLMs no clear sign that there are qualitative changes via GPT no clear catering to students important questions how to treanslate training into practice? how to cater to student needs? what to do with flawed assessments? Teacher Training Conventional Teacher Coaching not scalable, requires observation, and will give feedback not data driven, not adaptive\u0026mdash;expertise is hard AI powered coaching provide data-driven reflection opportunities can be personalized but not personalized with a human connection Automated NLP Feedback talk time measurements reflection opportunities, NLP measurements, etc. GPT wasn\u0026rsquo;t good at evaluating teachers GPT is gaslighting the teachers\u0026mdash;rewording the existing work, so no novelty GPT is fairly faithful, the information is relevant, and Punitive vs. Restorative Classroom Management Classroom Management reducing use of exclusionary discipline improve classroom management to prevent escalation teachers feel stressed + under-prepared Examples \u0026ldquo;sit down now\u0026rdquo; vs. \u0026ldquo;do you need a break\u0026rdquo;\nClassroom Management with a Roberta working well to predict whether or not an action is punitive correlations are strong against teacher\u0026rsquo;s and students\u0026rsquo; perceptions of the class Male teachers practice less punitive behavior management than female teachers.\nGenerating Student Feedback How do we support \u0026ldquo;growth mindset\u0026rdquo;. For instance, \u0026ldquo;just try harder!!!!\u0026rdquo; is not growth mindset.\nGMSL Framework emphatic validation reapproaisal of affect seeking to understand position as collaboration provide hope for change use autonomy supportive language ","html":"\u003ch2 id=\"qualitative-changes-in-teaching-via-llms\"\u003eQualitative Changes in Teaching via LLMs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eno clear sign that there are qualitative changes via GPT\u003c/li\u003e\n\u003cli\u003eno clear catering to students\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"important-questions\"\u003eimportant questions\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ehow to treanslate training into practice?\u003c/li\u003e\n\u003cli\u003ehow to cater to student needs?\u003c/li\u003e\n\u003cli\u003ewhat to do with flawed assessments?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"teacher-training\"\u003eTeacher Training\u003c/h2\u003e\n\u003ch3 id=\"conventional-teacher-coaching\"\u003eConventional Teacher Coaching\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003enot scalable, requires observation, and will give feedback\u003c/li\u003e\n\u003cli\u003enot data driven, not adaptive\u0026mdash;expertise is hard\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"ai-powered-coaching\"\u003eAI powered coaching\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eprovide data-driven reflection opportunities\u003c/li\u003e\n\u003cli\u003ecan be personalized\u003c/li\u003e\n\u003cli\u003ebut not personalized with a \u003cstrong\u003ehuman connection\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"automated-nlp-feedback\"\u003eAutomated NLP Feedback\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003etalk time measurements\u003c/li\u003e\n\u003cli\u003ereflection opportunities, NLP measurements, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"gpt-wasn-t-good-at-evaluating-teachers\"\u003eGPT wasn\u0026rsquo;t good at evaluating teachers\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eGPT is gaslighting the teachers\u0026mdash;rewording the existing work, so \u003cstrong\u003eno novelty\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eGPT is fairly faithful, the information is relevant, and\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"punitive-vs-dot-restorative-classroom-management\"\u003ePunitive vs. Restorative Classroom Management\u003c/h2\u003e\n\u003ch3 id=\"classroom-management\"\u003eClassroom Management\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ereducing use of exclusionary discipline\u003c/li\u003e\n\u003cli\u003eimprove classroom management to prevent escalation\u003c/li\u003e\n\u003cli\u003eteachers feel stressed + under-prepared\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"examples\"\u003eExamples\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;sit down now\u0026rdquo; vs. \u0026ldquo;do you need a break\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"classroom-management-with-a-roberta\"\u003eClassroom Management with a Roberta\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eworking well to predict whether or not an action is punitive\u003c/li\u003e\n\u003cli\u003ecorrelations are strong against teacher\u0026rsquo;s and students\u0026rsquo; perceptions of the class\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eMale teachers practice \u003cstrong\u003eless\u003c/strong\u003e punitive behavior management than female teachers.\u003c/p\u003e\n\u003ch2 id=\"generating-student-feedback\"\u003eGenerating Student Feedback\u003c/h2\u003e\n\u003cp\u003eHow do we support \u0026ldquo;growth mindset\u0026rdquo;. For instance, \u0026ldquo;just try harder!!!!\u0026rdquo; is not growth mindset.\u003c/p\u003e\n\u003ch3 id=\"gmsl-framework\"\u003eGMSL Framework\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eemphatic validation\u003c/li\u003e\n\u003cli\u003ereapproaisal of affect\u003c/li\u003e\n\u003cli\u003eseeking to understand\u003c/li\u003e\n\u003cli\u003eposition as collaboration\u003c/li\u003e\n\u003cli\u003eprovide hope for change\u003c/li\u003e\n\u003cli\u003euse autonomy supportive language\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdora/","tags":null,"title":"LLM for Teacher Feedback"},{"categories":null,"contents":"For the past 20 years, semantic indexing sucked.\nFor the most part, the core offerings of search products in the last while is divided into two categories:\nFull-text search things (i.e. every app in the face of the planet that stores text), which for the most part use something n-grammy like Okapi BM25 to do nice fuzzy string matching Ranking/Recommendation things, who isn\u0026rsquo;t so much trying to search a database as they are trying to guess the user\u0026rsquo;s intent and recommend them things from it And we lived in a pretty happy world in which, depending on the application, developers chose one or the other to build.\nThere\u0026rsquo;s something really funny to do with this idea of \u0026ldquo;search\u0026rdquo;. Take, for instance, Google. Its a \u0026ldquo;search\u0026rdquo; engine\u0026mdash;but really it recommends people information that is probably relevant; PageRank, the company\u0026rsquo;s claim to fame, isn\u0026rsquo;t even textual analysis of any type at all: it is a measure of relevance, based on centrality arguments about where the average web surfer may end up.\nBy framing systems like Google as an act of recommendation, we can see why it is so widely adopted: it, really, brings the best of the Internet to the user\u0026mdash;a catalogue of sorts\u0026mdash;based on text which the user provides as input data regarding their interest. It is, importantly, not a capital-s Search engine.\nAnd perhaps this explains why this doesn\u0026rsquo;t work:\nFigure 1: Oh my lord scary books.\nWouldn\u0026rsquo;t it be nice for a query like this to return us actual, non-scary books?\nIf you think about it, back in the olden days (i.e. 2019), there really isn\u0026rsquo;t a way to reconcile this difference between search and recommendation engines. Textual-based search systems were fantastically fast and gave you the exact things you needed\u0026mdash;great for filing \u0026ldquo;that file named this\u0026rdquo;\u0026mdash;but categorically useless when it comes to parsing large amounts of data that the user doesn\u0026rsquo;t know the exact terminology for.\nRecommendation engines, on the other hand, often required special indexing or behavioral modeling for what \u0026ldquo;an average user of this type\u0026rdquo; would like, which is great for leading the user to discover certain things they wouldn\u0026rsquo;t otherwise find, but makes hilarious mistakes like the above because, well, they aren\u0026rsquo;t doing much linguistic analysis.\nSo, what if we can simultaneously do the guessing game for user behavior\u0026mdash;a la \u0026ldquo;recommendation engines\u0026rdquo;\u0026mdash;but still use a fundamentally text-based approach to perform searching\u0026mdash;a la a \u0026ldquo;search service\u0026rdquo;?\nLLMs are text matchers Transformers are Text Matchers Fundamentally, the Transformer (the basis of all of those lovely large-language models (LLMs)) is a \u0026ldquo;sequence transduction model\u0026rdquo;\u0026mdash;they are (and were originally invented as) a translation model of sorts. And I find it easy and productive to think of LLMs in that mindframe: although their output may look like human-like \u0026ldquo;reasoning\u0026rdquo;, LLMs\u0026rsquo; fundamental job is to match one bit of text (context) against another (output).\nLLMs are Internet Text Matchers The actual thing that is making the whole of the world go crazy right now, though, is the fact that LLMs, transformers trained on the internet, seem to be able to handle text-to-text \u0026ldquo;translation\u0026rdquo; tasks of a much more general nature:\n\u0026ldquo;given a food, translate it into its recipe\u0026rdquo; \u0026ldquo;given this text, translate it into the same written with a pirate voice\u0026rdquo; \u0026ldquo;given the comment, translate it to the code that follows\u0026rdquo; You see, instead of carefully supervised translations a la the original Transformer, GPT+friends is simply chopping off the entirety of the input encoding process and letting the decoder ramble on its own. Hence, its outputs are functionally text matches of the context against the training data: that is, these LLM models effectively are text-matchers between the whole of the internet and your input.\nAnd hence, the promise Let\u0026rsquo;s recap real quick.\nTransformers are text matchers LLMs, which are transformers, are text matchers against the text on the internet And here\u0026rsquo;s the corollary to both of these statements:\nTransformers match text LLMs, which are transformers, is trained (and biased) towards the things people would typically be looking for on the internet That is, though LLMs fundamentally match text, they know what\u0026rsquo;s on the internet and how people talk about (and try to look for) them.\nThe point above is, to me, a very profound idea. LLMs essentially achieve what we were hoping to edge towards in the last 20 years\u0026mdash;closing the gap between recommendation (what people want) and search (text matching) systems into one uniform interface.\nAnd, praise be, LLMs seems to be highly directable at this task as well: they excel at few-shot and zero-shot training tasks; meaning, if you just give the Transformer a few examples of how to \u0026ldquo;translate\u0026rdquo; a piece of its input, it will happily do it with some accuracy following your example.\nAside: Stop Building Gosh Darn Chatbots Fortunately, people not named yours truly has also noticed these exciting capabilities of LLMs.\nWhat confuses me, though, is the fact that everybody and their pet duck is building a chat bot or \u0026ldquo;answering\u0026rdquo; service of some description: capitalizing on the fact that LLMs have trained knowledge of text on the internet, but completely disregarding the fact that LLMs fundamentally are best at \u0026ldquo;matching\u0026rdquo; existing text in its context and not hallucinating new text\u0026mdash;as these \u0026ldquo;answer services\u0026rdquo; want to do.\nWhat gives? Wattenburger has this fantastic take on why chat bots are not the best interface for LLMs. To me, the most salient observation\u0026mdash;one which stems from her wonderful arguments about chat bot\u0026rsquo;s poor observability and iteration cycle\u0026mdash;is that the generated text from these current LLM \u0026ldquo;search\u0026rdquo; services (called \u0026ldquo;retrial augmented generation\u0026rdquo;) is just so darn long.\nWhen we look information on a site like Google, I believe our goal is generally to shove the desired information in our head as quickly as possible and know where we can go to learn more; if we wanted to read a 300 word summary about it (as Perplexity AI, Mendable, Phind etc. is want to give us) we can just go look up the source ourselves.\nTo me, the duty of a search platform, LLM or not, is to get the user on their merry way as quickly as possible\u0026mdash;information in head or link in hand\u0026mdash;not for the user to navigate a possibly-hallucinated rant about the topic they are looking for, followed by 3 source citations.\nMaking a LLM Search Engine And so we face a rather daunting task. To make a better search service with LLMs, we have to:\nLeverage LLM\u0026rsquo;s fantastic text matching capabilities Allow LLMs to inject their trained biases into what\u0026rsquo;s relevant to the user in order to act as a good recommendation engine Do so in as little words as possible written by the LLM These three bullet points has consumed much of my life for the past 6 months, culminating in a reference implementation of such a \u0026ldquo;LLM search engine\u0026rdquo; called Simon. Let me now tell you its story.\nFulfilling a search query, Part 1/3 Our first goal is to figure out\nSide quest: Actual Text-to-text Recommendation Now You Try ","html":"\u003cp\u003eFor the past 20 years, semantic indexing sucked.\u003c/p\u003e\n\u003cp\u003eFor the most part, the core offerings of search products in the last while is divided into two categories:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eFull-text search things (i.e. every app in the face of the planet that stores text), which for the most part use something n-grammy like \u003ca href=\"https://en.wikipedia.org/wiki/Okapi_BM25\"\u003eOkapi BM25\u003c/a\u003e to do nice fuzzy string matching\u003c/li\u003e\n\u003cli\u003eRanking/Recommendation things, who isn\u0026rsquo;t so much trying to \u003cem\u003esearch\u003c/em\u003e a database as they are trying to guess the user\u0026rsquo;s intent and \u003cem\u003erecommend\u003c/em\u003e them things from it\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAnd we lived in a pretty happy world in which, depending on the application, developers chose one or the other to build.\u003c/p\u003e\n\u003cp\u003eThere\u0026rsquo;s something really funny to do with this idea of \u0026ldquo;search\u0026rdquo;. Take, for instance, Google. Its a \u0026ldquo;search\u0026rdquo; engine\u0026mdash;but really it \u003cem\u003erecommends\u003c/em\u003e people information that is probably relevant; PageRank, the company\u0026rsquo;s claim to fame, isn\u0026rsquo;t even textual analysis of any type at all: it is a measure of \u003cem\u003erelevance\u003c/em\u003e, based on centrality arguments about where the average web surfer may end up.\u003c/p\u003e\n\u003cp\u003eBy framing systems like Google as an act of recommendation, we can see why it is so widely adopted: it, really, brings the best of the Internet to the user\u0026mdash;a catalogue of sorts\u0026mdash;based on text which the user provides as input data regarding their interest. It is, importantly, \u003cem\u003enot a capital-s Search engine\u003c/em\u003e.\u003c/p\u003e\n\u003cp\u003eAnd perhaps this explains why this doesn\u0026rsquo;t work:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-09-02_15-01-29_screenshot.png\"\n alt=\"Figure 1: Oh my lord scary books.\" width=\"60%\" height=\"60%\"\u003e\u003cfigcaption\u003e\n \u003cp\u003e\u003cspan class=\"figure-number\"\u003eFigure 1: \u003c/span\u003eOh my lord scary books.\u003c/p\u003e\n \u003c/figcaption\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWouldn\u0026rsquo;t it be nice for a query like this to return us actual, non-scary books?\u003c/p\u003e\n\u003cp\u003eIf you think about it, back in the olden days (i.e. 2019), there really isn\u0026rsquo;t a way to reconcile this difference between search and recommendation engines. Textual-based search systems were fantastically fast and gave you the exact things you needed\u0026mdash;great for filing \u0026ldquo;that file named this\u0026rdquo;\u0026mdash;but categorically useless when it comes to parsing large amounts of data that the user doesn\u0026rsquo;t know the exact terminology for.\u003c/p\u003e\n\u003cp\u003eRecommendation engines, on the other hand, often required special indexing or behavioral modeling for what \u0026ldquo;an average user of this type\u0026rdquo; would like, which is great for leading the user to discover certain things they wouldn\u0026rsquo;t otherwise find, but makes hilarious mistakes like the above because, well, they aren\u0026rsquo;t doing much linguistic analysis.\u003c/p\u003e\n\u003cp\u003eSo, what if we can simultaneously do the guessing game for user behavior\u0026mdash;a la \u0026ldquo;recommendation engines\u0026rdquo;\u0026mdash;but still use a fundamentally text-based approach to perform searching\u0026mdash;a la a \u0026ldquo;search service\u0026rdquo;?\u003c/p\u003e\n\u003ch2 id=\"llms-are-text-matchers\"\u003eLLMs are text matchers\u003c/h2\u003e\n\u003ch3 id=\"transformers-are-text-matchers\"\u003eTransformers are Text Matchers\u003c/h3\u003e\n\u003cp\u003eFundamentally, the \u003ca href=\"https://arxiv.org/abs/1706.03762\"\u003eTransformer\u003c/a\u003e (the basis of all of those lovely large-language models (LLMs)) is a \u0026ldquo;sequence transduction model\u0026rdquo;\u0026mdash;they are (and were originally invented as) a \u003cem\u003etranslation\u003c/em\u003e model of sorts. And I find it easy and productive to think of LLMs in that mindframe: although their output may look like human-like \u0026ldquo;reasoning\u0026rdquo;, LLMs\u0026rsquo; fundamental job is to \u003cem\u003ematch\u003c/em\u003e one bit of text (context) against another (output).\u003c/p\u003e\n\u003ch3 id=\"llms-are-internet-text-matchers\"\u003eLLMs are Internet Text Matchers\u003c/h3\u003e\n\u003cp\u003eThe actual thing that is making the whole of the world go crazy right now, though, is the fact that LLMs, transformers trained on the internet, seem to be able to handle text-to-text \u0026ldquo;translation\u0026rdquo; tasks of a \u003cem\u003emuch\u003c/em\u003e more general nature:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;given a food, \u003cstrong\u003etranslate\u003c/strong\u003e it into its recipe\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;given this text, \u003cstrong\u003etranslate\u003c/strong\u003e it into the same written with a pirate voice\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;given the comment, \u003cstrong\u003etranslate\u003c/strong\u003e it to the code that follows\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eYou see, instead of carefully supervised translations a la the original Transformer, GPT+friends is simply chopping off the entirety of the input encoding process and letting the decoder ramble on its own. Hence, its outputs are functionally text matches of the context against the \u003cstrong\u003etraining data\u003c/strong\u003e: that is, these LLM models effectively are text-matchers between the whole of the \u003cem\u003einternet\u003c/em\u003e and your input.\u003c/p\u003e\n\u003ch3 id=\"and-hence-the-promise\"\u003eAnd hence, the promise\u003c/h3\u003e\n\u003cp\u003eLet\u0026rsquo;s recap real quick.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eTransformers are text matchers\u003c/li\u003e\n\u003cli\u003eLLMs, which are transformers, are text matchers against the text on the internet\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAnd here\u0026rsquo;s the corollary to both of these statements:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eTransformers match text\u003c/li\u003e\n\u003cli\u003eLLMs, which are transformers, is trained (and biased) towards the things people would typically be looking for on the internet\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThat is, though LLMs fundamentally match text, they know what\u0026rsquo;s on the internet and how people talk about (and try to look for) them.\u003c/p\u003e\n\u003cp\u003eThe point above is, to me, a very profound idea. LLMs essentially achieve what we were hoping to edge towards in the last 20 years\u0026mdash;closing the gap between \u003cem\u003erecommendation\u003c/em\u003e (what people want) and \u003cem\u003esearch\u003c/em\u003e (text matching) systems into one uniform interface.\u003c/p\u003e\n\u003cp\u003eAnd, praise be, LLMs seems to be highly directable at this task as well: they excel at \u003ca href=\"https://arxiv.org/abs/2005.14165\"\u003efew-shot and zero-shot training tasks\u003c/a\u003e; meaning, if you just give the Transformer a few examples of how to \u0026ldquo;translate\u0026rdquo; a piece of its input, it will happily do it with some accuracy following your example.\u003c/p\u003e\n\u003ch2 id=\"aside-stop-building-gosh-darn-chatbots\"\u003eAside: Stop Building Gosh Darn Chatbots\u003c/h2\u003e\n\u003cp\u003eFortunately, people not named yours truly has also noticed these exciting capabilities of LLMs.\u003c/p\u003e\n\u003cp\u003eWhat confuses me, though, is the fact that everybody and their pet duck is building a chat bot or \u0026ldquo;answering\u0026rdquo; service of some description: capitalizing on the fact that LLMs have trained knowledge of text on the internet, but completely disregarding the fact that LLMs fundamentally are best at \u0026ldquo;matching\u0026rdquo; existing text in its context and \u003cem\u003enot\u003c/em\u003e hallucinating new text\u0026mdash;as these \u0026ldquo;answer services\u0026rdquo; want to do.\u003c/p\u003e\n\u003cp\u003eWhat gives? \u003ca href=\"https://wattenberger.com/thoughts/boo-chatbots\"\u003eWattenburger has this fantastic take\u003c/a\u003e on why chat bots are not the best interface for LLMs. To me, the most salient observation\u0026mdash;one which stems from her wonderful arguments about chat bot\u0026rsquo;s poor observability and iteration cycle\u0026mdash;is that the generated text from these current LLM \u0026ldquo;search\u0026rdquo; services (called \u0026ldquo;retrial augmented generation\u0026rdquo;) is just \u003cem\u003eso darn long\u003c/em\u003e.\u003c/p\u003e\n\u003cp\u003eWhen we look information on a site like Google, I believe our goal is generally to shove the desired information in our head as quickly as possible and know where we can go to learn more; if we wanted to read a 300 word summary about it (as Perplexity AI, Mendable, Phind etc. is want to give us) we can just go look up the source ourselves.\u003c/p\u003e\n\u003cp\u003eTo me, the duty of a search platform, LLM or not, is to get the user on their merry way as quickly as possible\u0026mdash;information in head or link in hand\u0026mdash;not for the user to navigate a possibly-hallucinated rant about the topic they are looking for, followed by 3 source citations.\u003c/p\u003e\n\u003ch2 id=\"making-a-llm-search-engine\"\u003eMaking a LLM Search Engine\u003c/h2\u003e\n\u003cp\u003eAnd so we face a rather daunting task. To make a better search service with LLMs, we have to:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eLeverage LLM\u0026rsquo;s fantastic text matching capabilities\u003c/li\u003e\n\u003cli\u003eAllow LLMs to inject their trained biases into what\u0026rsquo;s relevant to the user in order to act as a good recommendation engine\u003c/li\u003e\n\u003cli\u003eDo so in as little words as possible written by the LLM\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThese three bullet points has consumed much of my life for the past 6 months, culminating in a reference implementation of such a \u0026ldquo;LLM search engine\u0026rdquo; called \u003ca href=\"https://github.com/Shabang-Systems/simon\"\u003eSimon\u003c/a\u003e. Let me now tell you its story.\u003c/p\u003e\n\u003ch3 id=\"fulfilling-a-search-query-part-1-3\"\u003eFulfilling a search query, Part 1/3\u003c/h3\u003e\n\u003cp\u003eOur first goal is to figure out\u003c/p\u003e\n\u003ch2 id=\"side-quest-actual-text-to-text-recommendation\"\u003eSide quest: Actual Text-to-text Recommendation\u003c/h2\u003e\n\u003ch2 id=\"now-you-try\"\u003eNow You Try\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhllms_are_text_matchers/","tags":["writing"],"title":"LLMs are fantastic search engines, so I built one"},{"categories":null,"contents":"Been Kim\nalignment problem involves \u0026ldquo;aligning\u0026rdquo; the representation spaces between machines of the world and that of the human. alternative perspective: teach humans new concepts to understand/communicate better\nfeature attribution doesn\u0026rsquo;t work We take that perspective because many of the intersectional intepretability doesn\u0026rsquo;t work well (feature permutation, etc.)\u0026mdash;feature attribution type analyses (\u0026ldquo;Impossibility Theorems Been Kim\u0026rdquo;) actually has no correlation with predictive results.\nfeature information store in models is unrelated to model edit success i.e.: knowledge storing location located using ROME technique, though it gives you a sense of the location to store information, doens\u0026rsquo;t correlate to success of model editing.\ncan we use ML to teach people? for instance, we can teach grandmasters to play chess using AlphaGo, and see if we can make a quantitative impact.\nconcept A concept is a unit of knowledge that\u0026rsquo;s useful for a task. Two properties:\nminimality: irrelavent information has been removed transferable: it can be taught atomically filtering for good concepts Representing a concept as a sparse vector as the latent space. We can check if a concept is transferable by teaching a student agent by doing KL divergence.\ndemonstration learning instead of doing demonstration learning on machines, do it on HUMANS. Filter for the concepts that are well operationalized.\nAlphaZero recap: using a dense network to embed the network, and then MCTS.\n","html":"\u003cp\u003eBeen Kim\u003c/p\u003e\n\u003cp\u003ealignment problem involves \u0026ldquo;aligning\u0026rdquo; the representation spaces between machines of the world and that of the human. alternative perspective: \u003cstrong\u003eteach \u003cspan class=\"underline\"\u003e\u003cspan class=\"underline\"\u003ehumans\u003c/span\u003e\u003c/span\u003e new concepts to understand/communicate better\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"feature-attribution-doesn-t-work\"\u003efeature attribution doesn\u0026rsquo;t work\u003c/h2\u003e\n\u003cp\u003eWe take that perspective because many of the intersectional intepretability doesn\u0026rsquo;t work well (feature permutation, etc.)\u0026mdash;feature attribution type analyses (\u0026ldquo;Impossibility Theorems Been Kim\u0026rdquo;) actually has no correlation with predictive results.\u003c/p\u003e\n\u003ch2 id=\"feature-information-store-in-models-is-unrelated-to-model-edit-success\"\u003efeature information store in models is unrelated to model edit success\u003c/h2\u003e\n\u003cp\u003ei.e.: knowledge storing location located using ROME technique, though it gives you a sense of the location to store information, doens\u0026rsquo;t correlate to success of model editing.\u003c/p\u003e\n\u003ch2 id=\"can-we-use-ml-to-teach-people\"\u003ecan we use ML to teach people?\u003c/h2\u003e\n\u003cp\u003efor instance, we can teach grandmasters to play chess using AlphaGo, and see if we can make a quantitative impact.\u003c/p\u003e\n\u003ch3 id=\"concept\"\u003econcept\u003c/h3\u003e\n\u003cp\u003eA \u003ca href=\"#concept\"\u003econcept\u003c/a\u003e is a unit of knowledge that\u0026rsquo;s \u003cstrong\u003euseful for a task\u003c/strong\u003e. Two properties:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003eminimality\u003c/strong\u003e: irrelavent information has been removed\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003etransferable\u003c/strong\u003e: it can be taught atomically\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"filtering-for-good-concept--org4a10397--s\"\u003efiltering for good \u003ca href=\"#concept\"\u003econcept\u003c/a\u003es\u003c/h4\u003e\n\u003cp\u003eRepresenting a concept as a sparse vector as the latent space. We can check if a concept is transferable by teaching a student agent by doing KL divergence.\u003c/p\u003e\n\u003ch4 id=\"demonstration-learning\"\u003edemonstration learning\u003c/h4\u003e\n\u003cp\u003einstead of doing demonstration learning on machines, do it on \u003cstrong\u003e\u003cstrong\u003eHUMANS\u003c/strong\u003e\u003c/strong\u003e. Filter for the \u003ca href=\"#concept\"\u003econcept\u003c/a\u003es that are well operationalized.\u003c/p\u003e\n\u003ch2 id=\"alphazero\"\u003eAlphaZero\u003c/h2\u003e\n\u003cp\u003erecap: using a dense network to embed the network, and then \u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003eMCTS\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlm_alignment/","tags":null,"title":"LM Alignment"},{"categories":null,"contents":"We begin with a policy parameterized on anything you\u0026rsquo;d like with random seed weights. Then,\nWe sample a local set of parameters, one pertubation \\(\\pm \\alpha\\) per direction in the parameter vector (for instance, for a parameter in 4-space, up, down, left, right in latent space), and use those new parameters to seed a policy. Check each policy for its utility via monte-carlo policy evaluation If any of the adjacent points are better, we move there If none of the adjacent points are better, we set \\(\\alpha = 0.5 \\alpha\\) (of the up/down/left/right) and try again We continue until \\(\\alpha\\) drops below some \\(\\epsilon\\).\nNote: if we have billions of parameters, this method will be not that feasible because we have to calculate the Roll-out utility so many many many times.\n","html":"\u003cp\u003eWe begin with a policy parameterized on anything you\u0026rsquo;d like with random seed weights. Then,\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eWe sample a local set of \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es, one pertubation \\(\\pm \\alpha\\) per direction in the parameter vector (for instance, for a parameter in 4-space, up, down, left, right in latent space), and use those new parameters to seed a policy.\u003c/li\u003e\n\u003cli\u003eCheck each policy for its \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e via \u003ca href=\"/posts/kbhpolicy_evaluation/#monte-carlo-policy-evaluation\"\u003emonte-carlo policy evaluation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eIf any of the adjacent points are better, we move there\u003c/li\u003e\n\u003cli\u003eIf none of the adjacent points are better, we set \\(\\alpha = 0.5 \\alpha\\) (of the up/down/left/right) and try again\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWe continue until \\(\\alpha\\) drops below some \\(\\epsilon\\).\u003c/p\u003e\n\u003cp\u003eNote: if we have billions of parameters, this method will be not that feasible because we have to calculate the \u003ca href=\"/posts/kbhpolicy_evaluation/#roll-out-utility\"\u003eRoll-out utility\u003c/a\u003e so many many many times.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlocal_policy_search/","tags":null,"title":"Local Policy Search"},{"categories":null,"contents":"\\begin{equation} \\log a^{b} = b\\log a \\end{equation}\n\\begin{equation} \\log (ab) = \\log a + \\log b \\end{equation}\n\\begin{equation} \\log (\\frac{a}{b}) = \\log a - \\log b \\end{equation}\n","html":"\u003cp\u003e\\begin{equation}\n\\log a^{b} = b\\log a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\log (ab) = \\log a + \\log b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\log (\\frac{a}{b}) = \\log a - \\log b\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlog_laws/","tags":null,"title":"log laws"},{"categories":null,"contents":"TODO: connect Logan with a few fire departments\n","html":"\u003cp\u003eTODO: connect Logan with a few fire departments\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlogan_s_team_check_in/","tags":null,"title":"Logan's Team Checkin"},{"categories":null,"contents":"Consider:\n\\begin{equation} P\u0026rsquo; = 2P(100-P) \\end{equation}\nfor a motivation, see petri dish.\nSolution Assuming \\(P\\) never reaches 100\n\\begin{equation} \\int \\frac{\\dd{P}}{P(100-P)} \\dd{P}= \\int 2 \\dd{t} \\end{equation}\nPartial fractions time:\n\\begin{equation} \\frac{1}{100} \\int \\qty(\\frac{1}{p} + \\frac{1}{100-p})\\dd{P} = \\frac{1}{100} \\ln |p| - \\ln |100-p| = 2t+C \\end{equation}\nRemember now log laws:\n\\begin{equation} \\frac{1}{100} \\ln \\left| \\frac{p}{100-p} \\right| = 2t+C \\end{equation}\nAnd finally, we obtain:\n\\begin{equation} \\qty | \\frac{p}{100-p} | = e^{200t + C} \\end{equation}\nWe can get rid of the absolute value by reshaping the fraction:\n\\begin{equation} \\frac{p}{100-p} = ke^{200t} \\end{equation}\nFinally, we solve for \\(p\\):\n\\begin{equation} p(t) = \\frac{100k e^{200t}}{1+ke^{200t}} = \\frac{100k}{e^{-200t}+k} \\end{equation}\nNote!\nas \\(t \\to -\\infty\\), we have \\(p \\to 0\\) as \\(t \\to +\\infty\\), we have \\(p \\to 100\\) ","html":"\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP\u0026rsquo; = 2P(100-P)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor a motivation, see \u003ca href=\"/posts/kbhpetri_dish/\"\u003epetri dish\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"solution\"\u003eSolution\u003c/h2\u003e\n\u003cp\u003eAssuming \\(P\\) never reaches 100\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int \\frac{\\dd{P}}{P(100-P)} \\dd{P}= \\int 2 \\dd{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ePartial fractions time:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{1}{100} \\int \\qty(\\frac{1}{p} + \\frac{1}{100-p})\\dd{P} = \\frac{1}{100} \\ln |p| - \\ln |100-p| = 2t+C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRemember now log laws:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{1}{100} \\ln \\left| \\frac{p}{100-p} \\right| = 2t+C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd finally, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\qty | \\frac{p}{100-p} | = e^{200t + C}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can get rid of the absolute value by reshaping the fraction:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{p}{100-p} = ke^{200t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, we solve for \\(p\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(t) = \\frac{100k e^{200t}}{1+ke^{200t}} = \\frac{100k}{e^{-200t}+k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNote!\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eas \\(t \\to -\\infty\\), we have \\(p \\to 0\\)\u003c/li\u003e\n\u003cli\u003eas \\(t \\to +\\infty\\), we have \\(p \\to 100\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlogistic_equations/","tags":null,"title":"logistic equation"},{"categories":null,"contents":"Naive Bayes acts to compute \\(P(Y|X)\\) via the Bayes rule and using the Naive Bayes assumption. What if we can model the value of \\(P(Y|X)\\) directly?\nWith \\(\\sigma\\) as the sigmoid function:\n\\begin{equation} P(Y=1|X=x) = \\sigma (\\theta^{\\top}x) \\end{equation}\nand we tune the parameters of \\(\\theta\\) until this looks correct.\nWe always want to introduce a BIAS parameter, which acts as an offset; meaning the first \\(x\\) should always be \\(1\\), which makes the first value in \\(\\theta\\) as a \u0026ldquo;bias\u0026rdquo;.\nFor optimizing this function, we have:\n\\begin{equation} LL(\\theta) = y \\log \\sigma(\\theta^{\\top} x) + (1-y) \\log (1- \\theta^{\\top} x) \\end{equation}\nand if we took the derivative w.r.t. a particular parameter slot \\(\\theta_{j}\\):\n\\begin{equation} \\pdv{LL(\\theta)}{\\theta_{j}} = \\sum_{i=1}^{n} \\qty[y^{(i)} - \\sigma(\\theta^{\\top}x^{(i)})] x_{j}^{(i)} \\end{equation}\nlogistic regression assumption We assume that there exists that there are some \\(\\theta\\) which, when multiplied to the input and squashed by th sigmoid function, can model our underlying probability distribution:\n\\begin{equation} \\begin{cases} P(Y=1|X=x) = \\sigma (\\theta^{\\top}x) \\\\ P(Y=0|X=x) = 1- \\sigma (\\theta^{\\top}x) \\\\ \\end{cases} \\end{equation}\nWe then attempt to compute a set of \\(\\theta\\) which:\n\\begin{equation} \\theta_{MLE} = \\arg\\max_{\\theta} P(y^{(1)}, \\dots, y^{(n)} | \\theta, x_1 \\dots x_{n}) \\end{equation}\nLog Likelihood of Logistic Regression To actually perform MLE for the $θ$s, we need to do parameter learning. Now, recall that we defined, though the logistic regression assumption:\n\\begin{equation} P(Y=1|X=x) = \\sigma (\\theta^{\\top}x) \\end{equation}\nessentially, this is a Bernouli:\n\\begin{equation} (Y|X=x) \\sim Bern(p=\\sigma(\\theta^{\\top}x)) \\end{equation}\nWe desire to maximize:\n\\begin{equation} P(Y=y | \\theta, X=x) \\end{equation}\nNow, recall the continous PDF of Bernouli:\n\\begin{equation} P(Y=y) = p^{y} (1-p)^{1-y} \\end{equation}\nwe now plug in our expression for \\(p\\):\n\\begin{equation} P(Y=y|X=x) = \\sigma(\\theta^{\\top}x)^{y} (1-\\sigma(\\theta^{\\top}x))^{1-y} \\end{equation}\nfor all \\(x,y\\).\nLogistic Regression, in general For some input, output pair, \\((x,y)\\), we map each input \\(x^{(i)}\\) into a vector of length \\(n\\) where \\(x^{(i)}_{1} \u0026hellip; x^{(i)}_{n}\\).\nTraining We are going to learn weights \\(w\\) and \\(b\\) using stochastic gradient descent; and measure our performance using cross-entropy loss\nTest Given a test example \\(x\\), we compute \\(p(y|x)\\) for each \\(y\\), returning the label with the highest probability.\nLogistic Regression Text Classification Given a series of input/output pairs of text to labels, we want to assign a predicted class to a new input fair.\nWe represent each text in terms of features. Each feature \\(x_{i}\\) comes with some weight \\(w_{i}\\), informing us of the importance of feature \\(x_{i}\\).\nSo: input is a vector \\(x_1 \u0026hellip; x_{n}\\), and some weights \\(w_1 \u0026hellip; w_{n}\\), which will eventually gives us an output.\nThere is usually also a bias term \\(b\\). Eventually, classification gives:\n\\begin{equation} z = w \\cdot x + b \\end{equation}\nHowever, this does not give a probability, which by default this does not. To fix this, we apply a squishing function \\(\\sigma\\), which gives\n\\begin{equation} \\sigma(z) = \\frac{1}{1+\\exp(-z)} \\end{equation}\nwhich ultimately yields:\n\\begin{equation} z = \\sigma(w \\cdot x+ b) \\end{equation}\nwith the sigmoid function.\nTo make this sum to \\(1\\), we write:\n\\begin{equation} p(y=1|x) = \\sigma(w \\cdot x + b) \\end{equation}\nand\n\\begin{equation} p(y=0|x) = 1- p(y=1|x) \\end{equation}\nAlso, recall that \\(\\sigma(-x) = 1- \\sigma(x)\\), this gives:\n\\begin{equation} p(y=0|x) = \\sigma(-w\\cdot x-b) \\end{equation}\nthe probability at which point we make a decision is called a decision boundary. Typically this is 0.5.\nWe can featurize by counts from a lexicon, by word counts, etc.\nFor instance:\nlogistic regression terms feature representation: each input \\(x\\) is represented by a vectorized lit of feature classification function: \\(p(y|x)\\), computing \\(y\\) using the estimated class objective function: the loss to minimize (i.e. cross entropy) optimizer: SGD, etc. decision boundary: the threshold at which classification decisions are made, with \\(P(y=1|x) \u0026gt; N\\). binary cross entropy \\begin{equation} \\mathcal{L} = - \\qty[y \\log \\sigmoid(w \\cdot x + b) + (1-y) \\log (1- \\sigmoid(w \\cdot x + b))] \\end{equation}\nor, for neural networks in general:\n\\begin{equation} \\mathcal{L} = - \\qty[y \\log \\hat{y} + (1-y) \\log (1- \\hat{y})] \\end{equation}\ngradient descent \\begin{equation} \\theta^{t+1} = \\theta^{t} - \\eta \\nabla_{\\theta} \\mathcal{L} \\end{equation}\n\u0026ldquo;update the weight by taking a step in the opposite direction of the gradient by weight\u0026rdquo;.\nWeight gradient for logistic regresison \\begin{equation} \\pdv{L_{CE}(\\hat(y), y)}{w_{j}} = \\qty[\\sigma\\qty(w \\cdot x + b) -y] x_{j} \\end{equation}\nwhere \\(x_{j}\\) is feature \\(j\\).\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e acts to compute \\(P(Y|X)\\) via the \u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes rule\u003c/a\u003e and using the \u003ca href=\"/posts/kbhnaive_bayes/#id-6cdf74a2-2451-47d1-8a62-62aa6dff62c6-naive-bayes-assumption\"\u003eNaive Bayes assumption\u003c/a\u003e. What if we can model the value of \\(P(Y|X)\\) directly?\u003c/p\u003e\n\u003cp\u003eWith \\(\\sigma\\) as the \u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(Y=1|X=x) = \\sigma (\\theta^{\\top}x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand we tune the parameters of \\(\\theta\\) until this looks correct.\u003c/p\u003e\n\u003cp\u003eWe always want to introduce a BIAS parameter, which acts as an offset; meaning the first \\(x\\) should always be \\(1\\), which makes the first value in \\(\\theta\\) as a \u0026ldquo;bias\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eFor optimizing this function, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nLL(\\theta) = y \\log \\sigma(\\theta^{\\top} x) + (1-y) \\log (1- \\theta^{\\top} x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand if we took the derivative w.r.t. a particular parameter slot \\(\\theta_{j}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{LL(\\theta)}{\\theta_{j}} = \\sum_{i=1}^{n} \\qty[y^{(i)} - \\sigma(\\theta^{\\top}x^{(i)})] x_{j}^{(i)}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"logistic-regression-assumption\"\u003elogistic regression assumption\u003c/h2\u003e\n\u003cp\u003eWe assume that there exists that there are some \\(\\theta\\) which, when multiplied to the input and squashed by th \u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e function, can model our underlying \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e distribution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nP(Y=1|X=x) = \\sigma (\\theta^{\\top}x) \\\\\nP(Y=0|X=x) = 1- \\sigma (\\theta^{\\top}x) \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe then attempt to compute a set of \\(\\theta\\) which:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{MLE} = \\arg\\max_{\\theta} P(y^{(1)}, \\dots, y^{(n)} | \\theta, x_1 \\dots x_{n})\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"log-likelihood-of-logistic-regression\"\u003eLog Likelihood of Logistic Regression\u003c/h2\u003e\n\u003cp\u003eTo actually perform \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e for the $θ$s, we need to do \u003ca href=\"/posts/kbhparameter_learning/\"\u003eparameter learning\u003c/a\u003e. Now, recall that we defined, though the \u003ca href=\"#logistic-regression-assumption\"\u003elogistic regression assumption\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(Y=1|X=x) = \\sigma (\\theta^{\\top}x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eessentially, this is a Bernouli:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(Y|X=x) \\sim Bern(p=\\sigma(\\theta^{\\top}x))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe desire to maximize:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(Y=y | \\theta, X=x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, recall the continous \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003e of Bernouli:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(Y=y) = p^{y} (1-p)^{1-y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe now plug in our expression for \\(p\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(Y=y|X=x) = \\sigma(\\theta^{\\top}x)^{y} (1-\\sigma(\\theta^{\\top}x))^{1-y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor all \\(x,y\\).\u003c/p\u003e\n\u003ch2 id=\"logistic-regression-in-general\"\u003eLogistic Regression, in general\u003c/h2\u003e\n\u003cp\u003eFor some input, output pair, \\((x,y)\\), we map each input \\(x^{(i)}\\) into a vector of length \\(n\\) where \\(x^{(i)}_{1} \u0026hellip; x^{(i)}_{n}\\).\u003c/p\u003e\n\u003ch3 id=\"training\"\u003eTraining\u003c/h3\u003e\n\u003cp\u003eWe are going to learn weights \\(w\\) and \\(b\\) using stochastic gradient descent; and measure our performance using cross-entropy loss\u003c/p\u003e\n\u003ch3 id=\"test\"\u003eTest\u003c/h3\u003e\n\u003cp\u003eGiven a test example \\(x\\), we compute \\(p(y|x)\\) for each \\(y\\), returning the label with the highest probability.\u003c/p\u003e\n\u003ch2 id=\"logistic-regression-text-classification\"\u003eLogistic Regression Text Classification\u003c/h2\u003e\n\u003cp\u003eGiven a series of input/output pairs of text to labels, we want to assign a predicted class to a new input fair.\u003c/p\u003e\n\u003cp\u003eWe represent each text in terms of features. Each feature \\(x_{i}\\) comes with some weight \\(w_{i}\\), informing us of the importance of feature \\(x_{i}\\).\u003c/p\u003e\n\u003cp\u003eSo: input is a vector \\(x_1 \u0026hellip; x_{n}\\), and some weights \\(w_1 \u0026hellip; w_{n}\\), which will eventually gives us an output.\u003c/p\u003e\n\u003cp\u003eThere is usually also a bias term \\(b\\). Eventually, classification gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nz = w \\cdot x + b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eHowever, this does \u003cstrong\u003enot\u003c/strong\u003e give a probability, which by default this does not. To fix this, we apply a squishing function \\(\\sigma\\), which gives\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sigma(z) = \\frac{1}{1+\\exp(-z)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich ultimately yields:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nz = \\sigma(w \\cdot x+ b)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewith the \u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e function.\u003c/p\u003e\n\u003cp\u003eTo make this sum to \\(1\\), we write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(y=1|x) = \\sigma(w \\cdot x + b)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(y=0|x) = 1- p(y=1|x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAlso, recall that \\(\\sigma(-x) = 1- \\sigma(x)\\), this gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(y=0|x) = \\sigma(-w\\cdot x-b)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe probability at which point we make a decision is called a \u003ca href=\"#logistic-regression-text-classification\"\u003edecision boundary\u003c/a\u003e. Typically this is 0.5.\u003c/p\u003e\n\u003cp\u003eWe can featurize by counts from a lexicon, by word counts, etc.\u003c/p\u003e\n\u003cp\u003eFor instance:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-26_19-26-16_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"logistic-regression-terms\"\u003elogistic regression terms\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003efeature representation\u003c/strong\u003e: each input \\(x\\) is represented by a vectorized lit of feature\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eclassification function\u003c/strong\u003e: \\(p(y|x)\\), computing \\(y\\) using the estimated class\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eobjective function\u003c/strong\u003e: the loss to minimize (i.e. cross entropy)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eoptimizer\u003c/strong\u003e: SGD, etc.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003edecision boundary\u003c/strong\u003e: the threshold at which classification decisions are made, with \\(P(y=1|x) \u0026gt; N\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"binary-cross-entropy\"\u003ebinary cross entropy\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{L} = - \\qty[y \\log \\sigmoid(w \\cdot x + b) + (1-y) \\log (1- \\sigmoid(w \\cdot x + b))]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eor, for neural networks in general:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{L} = - \\qty[y \\log \\hat{y} + (1-y) \\log (1- \\hat{y})]\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"gradient-descent\"\u003egradient descent\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\theta^{t+1} = \\theta^{t} - \\eta \\nabla_{\\theta} \\mathcal{L}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;update the weight by taking a step in the opposite direction of the gradient by weight\u0026rdquo;.\u003c/p\u003e\n\u003ch3 id=\"weight-gradient-for-logistic-regresison\"\u003eWeight gradient for logistic regresison\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{L_{CE}(\\hat(y), y)}{w_{j}} = \\qty[\\sigma\\qty(w \\cdot x + b) -y] x_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(x_{j}\\) is feature \\(j\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlogistic_regression/","tags":null,"title":"logistic regression"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhloop_invariant/","tags":null,"title":"loop invariant"},{"categories":null,"contents":"A lottery is a choice problem, where each outcome has a certain probability:\n\\begin{equation} [S_1:p_1, \\dots, S_{n}:p_{n}] \\end{equation}\nwhere, \\(S_{j}\\) has \\(p_{j}\\) chance of occurring.\nutility of a lottery For a lottery, the utility thereof is the probability of a state happening times the utility of the state:\nthat is,\n\\begin{equation} U([S_1:p_1, \\dots, S_{n}:p_{n}]) = \\sum_{i=1}^{N} p_{i}U(S_{i})} \\end{equation}\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhlottery/\"\u003elottery\u003c/a\u003e is a choice problem, where each outcome has a certain \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n[S_1:p_1, \\dots, S_{n}:p_{n}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(S_{j}\\) has \\(p_{j}\\) chance of occurring.\u003c/p\u003e\n\u003ch2 id=\"utility-of-a-lottery\"\u003eutility of a lottery\u003c/h2\u003e\n\u003cp\u003eFor a lottery, the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e thereof is the probability of a state happening times the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of the state:\u003c/p\u003e\n\u003cp\u003ethat is,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU([S_1:p_1, \\dots, S_{n}:p_{n}]) = \\sum_{i=1}^{N} p_{i}U(S_{i})}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlottery/","tags":null,"title":"lottery"},{"categories":null,"contents":"Real-Time Dynamic Programming RTDP is a asynchronous value iteration scheme. Each RTDP trial is a result of:\n\\begin{equation} V(s) = \\min_{ia \\in A(s)} c(a,s) + \\sum_{s\u0026rsquo; \\in S}^{} P_{a}(s\u0026rsquo;|s)V(s) \\end{equation}\nthe algorithm halts when the residuals are sufficiently small.\nLabeled RTDP We want to label converged states so we don\u0026rsquo;t need to keep investigate it:\na state is solved if:\nstate has less then \\(\\epsilon\\) all reachable states given \\(s\u0026rsquo;\\) from this state has residual lower than \\(\\epsilon\\) Labelled RTDP We stochastically simulate one step forward, and until a state we haven\u0026rsquo;t marked as \u0026ldquo;solved\u0026rdquo; is met, then we simulate forward and value iterate\n","html":"\u003ch2 id=\"real-time-dynamic-programming\"\u003eReal-Time Dynamic Programming\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#real-time-dynamic-programming\"\u003eRTDP\u003c/a\u003e is a asynchronous value iteration scheme. Each \u003ca href=\"#real-time-dynamic-programming\"\u003eRTDP\u003c/a\u003e trial is a result of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV(s) = \\min_{ia \\in A(s)} c(a,s) + \\sum_{s\u0026rsquo; \\in S}^{} P_{a}(s\u0026rsquo;|s)V(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe algorithm halts when the residuals are sufficiently small.\u003c/p\u003e\n\u003ch2 id=\"labeled-rtdp--org9a279ff\"\u003eLabeled \u003ca href=\"#real-time-dynamic-programming\"\u003eRTDP\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eWe want to label converged states so we don\u0026rsquo;t need to keep investigate it:\u003c/p\u003e\n\u003cp\u003ea state is \u003cstrong\u003esolved\u003c/strong\u003e if:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003estate has less then \\(\\epsilon\\)\u003c/li\u003e\n\u003cli\u003eall reachable states given \\(s\u0026rsquo;\\) from this state has residual lower than \\(\\epsilon\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"labelled-rtdp\"\u003eLabelled RTDP\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-13_10-11-32_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWe stochastically simulate one step forward, and until a state we haven\u0026rsquo;t marked as \u0026ldquo;solved\u0026rdquo; is met, then we simulate forward and value iterate\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhltrdp/","tags":null,"title":"LRTDP"},{"categories":null,"contents":"LucCage is a platform as a biosensor: a case whose binding domain could be changed to fit specific applications\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhluccage/\"\u003eLucCage\u003c/a\u003e is a platform as a biosensor: a case whose binding domain could be changed to fit specific applications\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhluccage/","tags":null,"title":"LucCage"},{"categories":null,"contents":"DOI: 10.1101/2021.03.24.21254263\nOne-Liner Review paper presenting the \\(ADReSS_o\\) challenge and current baselines for three tasks\nNotes Three tasks + state of the art:\nClassification of AD: accuracy \\(78.87\\%\\) Prediction of MMSE score: RMSE \\(5.28\\) Prediction of cognitive decline: accuracy \\(68.75\\%\\) Task 1 AD classification baseline established by decision tree with late fusion\n(LOOCV and test)\nTask 2 MMSE score prediction baseline established by grid search on parameters.\nSVR did best on both counts; results from either model are averaged for prediction.\nTask 3 Same thing here, DT does better but notably its F1 is smaller; data trained with final late fusion\n","html":"\u003cp\u003eDOI: 10.1101/2021.03.24.21254263\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eReview paper presenting the \\(ADReSS_o\\) challenge and current baselines for three tasks\u003c/p\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n\u003cp\u003eThree tasks + state of the art:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eClassification of AD: accuracy \\(78.87\\%\\)\u003c/li\u003e\n\u003cli\u003ePrediction of \u003ca href=\"/posts/kbhmmse/\"\u003eMMSE\u003c/a\u003e score: RMSE \\(5.28\\)\u003c/li\u003e\n\u003cli\u003ePrediction of cognitive decline: accuracy \\(68.75\\%\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"task-1\"\u003eTask 1\u003c/h3\u003e\n\u003cp\u003eAD classification baseline established by decision tree with \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_22-57-05_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e(\u003ca href=\"/posts/kbhloo/\"\u003eLOOCV\u003c/a\u003e and test)\u003c/p\u003e\n\u003ch3 id=\"task-2\"\u003eTask 2\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhmmse/\"\u003eMMSE\u003c/a\u003e score prediction baseline established by \u003ca href=\"/posts/kbhgrid_search/\"\u003egrid search\u003c/a\u003e on parameters.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_22-58-42_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSVR did best on both counts; results from either model are averaged for prediction.\u003c/p\u003e\n\u003ch3 id=\"task-3\"\u003eTask 3\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_23-02-07_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSame thing here, DT does better but notably its F1 is smaller; data trained with final late fusion\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhluz_2021/","tags":["ntj"],"title":"Luz 2021"},{"categories":null,"contents":"Seed: walking, loving\nWalking\nSkipping\nShoes\nRoad\nRunning\nForward\nSpeed\nPlane\nTravel\nUnique\nCold\nHouse\nLoving\nCuddling\nKissing\nHolding\nTogether\nStaring\nLonging\nEstablish\nSpending time\nWaving\nWelling\nWalking together, staring forward longing you\nLoving together, skipping forward, a cold house\nCuddling down the avenue, spending time there, Waving by\nEstablish what it\u0026rsquo;s like,\n","html":"\u003cp\u003eSeed: walking, loving\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eWalking\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSkipping\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eShoes\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eRoad\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eRunning\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eForward\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSpeed\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ePlane\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eTravel\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eUnique\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCold\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eHouse\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eLoving\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCuddling\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eKissing\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eHolding\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eTogether\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eStaring\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eLonging\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eEstablish\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSpending time\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWaving\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWelling\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eWalking together,\nstaring forward\nlonging you\u003c/p\u003e\n\u003cp\u003eLoving together,\nskipping forward,\na cold house\u003c/p\u003e\n\u003cp\u003eCuddling down the avenue,\nspending time there,\nWaving by\u003c/p\u003e\n\u003cp\u003eEstablish what it\u0026rsquo;s like,\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlyrics_ping/","tags":null,"title":"Lyrics: Ping"},{"categories":null,"contents":"Seed: explore, wild\nexplore\nlearn\nresources\nmineral\ndetail\nfeature\nfact\npolice\nduty\nparticulars\ndeposit\nassign\nundertake\nnatural\nenvironment\ncultivate\nregion\nharshly\nuntrusting\nnervous\nincreasing\nchanging\nperiod\nbecome greater\nWe go explore, changing times, parting ways.\nWanting no praise, become greater Than ever\nWe go explore, shining lights moving stars\nFinding no target, we cannot expect to see\nHow can we explore if we can\u0026rsquo;t even feed? Ourselves? Our families? Our digitaries?\nHow can we explore if we can\u0026rsquo;t even seek. Unatural exploration Touching the depths with our feet\nWe go explore, wondrous depths, random seas\nWanting someone, reminicing the never\nWe go explore, purple skies acid rain\nFinding the target, we didn\u0026rsquo;t know to see\nHow can we explore if we can\u0026rsquo;t even feed? Ourselves? Our families? Our digitaries?\nHow can we explore if we can\u0026rsquo;t even seek. Unatural exploration Probing the depths with our feet\n","html":"\u003cp\u003eSeed: explore, wild\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eexplore\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003elearn\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eresources\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003emineral\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003edetail\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003efeature\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003efact\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003epolice\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eduty\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eparticulars\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003edeposit\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eassign\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eundertake\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003enatural\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eenvironment\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecultivate\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eregion\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eharshly\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003euntrusting\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003enervous\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eincreasing\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003echanging\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eperiod\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ebecome greater\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eWe go explore,\nchanging times,\nparting ways.\u003c/p\u003e\n\u003cp\u003eWanting no praise,\nbecome greater\nThan ever\u003c/p\u003e\n\u003cp\u003eWe go explore,\nshining lights\nmoving stars\u003c/p\u003e\n\u003cp\u003eFinding no target,\nwe cannot expect to see\u003c/p\u003e\n\u003cp\u003eHow can we explore if we can\u0026rsquo;t even feed?\nOurselves? Our families? Our digitaries?\u003c/p\u003e\n\u003cp\u003eHow can we explore if we can\u0026rsquo;t even seek.\nUnatural exploration\nTouching the depths with our feet\u003c/p\u003e\n\u003cp\u003eWe go explore,\nwondrous depths,\nrandom seas\u003c/p\u003e\n\u003cp\u003eWanting someone,\nreminicing\nthe never\u003c/p\u003e\n\u003cp\u003eWe go explore,\npurple skies\nacid rain\u003c/p\u003e\n\u003cp\u003eFinding the target,\nwe didn\u0026rsquo;t know to see\u003c/p\u003e\n\u003cp\u003eHow can we explore if we can\u0026rsquo;t even feed?\nOurselves? Our families? Our digitaries?\u003c/p\u003e\n\u003cp\u003eHow can we explore if we can\u0026rsquo;t even seek.\nUnatural exploration\nProbing the depths with our feet\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlyrics_laws/","tags":null,"title":"Lyrics: Unnatural Exploration"},{"categories":null,"contents":"DOI: 10.1109/CISP-BMEI.2018.8633126\nA dataset paper with which auditory info about people talking is collected.\nHere are the state-of-the-art as of Laguarta 2021 on the dataset proposed.\n","html":"\u003cp\u003eDOI: 10.1109/CISP-BMEI.2018.8633126\u003c/p\u003e\n\u003cp\u003eA dataset paper with which auditory info about people talking is collected.\u003c/p\u003e\n\u003cp\u003eHere are the state-of-the-art as of \u003ca href=\"/posts/kbhlaguarta_2021/\"\u003eLaguarta 2021\u003c/a\u003e on the dataset proposed.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-31-20_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhlyu_2018/","tags":null,"title":"Lyu 2018"},{"categories":null,"contents":"machine learning is the act of using some input to come up with some prediction, where the model is parameterized via a bunch of parameters. Hence, parameter learning approaches is how machine learning works.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmachine_learning/\"\u003emachine learning\u003c/a\u003e is the act of using some input to come up with some prediction, where the model is parameterized via a bunch of \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es. Hence, \u003ca href=\"/posts/kbhparameter_learning/\"\u003eparameter learning\u003c/a\u003e approaches is how machine learning works.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmachine_learning/","tags":null,"title":"machine learning"},{"categories":null,"contents":"For multi-class classification, the macroaverage is the average of statistical values (prec, recc, etc.) after they have been computed for each seperate class.\nThe microaverage is the combination of a confusion matrix BEFORE statistical values are computed.\n","html":"\u003cp\u003eFor multi-class classification, the \u003ca href=\"/posts/kbhmacroaverage/\"\u003emacroaverage\u003c/a\u003e is the average of statistical values (prec, recc, etc.) after they have been computed for each seperate class.\u003c/p\u003e\n\u003cp\u003eThe microaverage is the combination of a confusion matrix BEFORE statistical values are computed.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmacroaverage/","tags":null,"title":"macroaverage"},{"categories":null,"contents":"For a charge to do something in a magnetic field, it has to have velocity; nothing happens without movement.\nSo:\n\\begin{equation} \\vec{F}_{M} = q \\vec{v} \\times \\vec{B} \\end{equation}\nTo calculate: magnitude: \\(qvB \\sin \\theta\\) + right hand rule.\nRadius You maybe asked to find the radius of the path the particle takes, so:\n\\begin{equation} \\frac{v^{2}}{r} = a \\end{equation}\nSo, the net force here is:\n\\begin{equation} qvB = Ma \\end{equation}\nSo plug in and solve\nCurrent along a wire \\begin{equation} \\vec{F} = \\int I \\dd{l} \\times \\vec{B} \\end{equation}\nThe sum of the current across the wire is the same as \\(q \\vec{v}\\).\nThis equals \\(IlB \\sin \\theta\\) in magnitude for constant current.\n\u0026ldquo;FILB - sintheta\u0026rdquo;\nInduced magnetic field For the induced magnetic field of a current, use the curvey (curl) right hand rule.\nThe actual magnitude induced by the wire is ampere\u0026rsquo;s law:\n\\begin{equation} \\oint \\vec{B} \\cdot \\dd{\\vec{l}} = \\mu I \\end{equation}\nwhere, \\(u_0\\) is vacuum permeability (\\(4 \\pi \\times 10^{-7} \\frac{T \\cdot m}{A}\\)).\nMagnetic Field of a Solenoid \\begin{equation} Bs = \\mu_{0} n I \\end{equation}\nwhere, \\(n = \\frac{N}{L}\\), the number of turns of the solenoid per length.\nMagnetic Field of a Loop \\begin{equation} B 2\\pi r = \\mu_{0} I \\end{equation}\nwhere, the surface integral of length of a loop is just the circumference\nWIRES ARE OPPOSITE Current\u0026rsquo;s induced magnetic fields in the same direction attracts, and in opposite directinos repel\nFull description of magnetic field non-bdl-b For instance, current in a loop and desire magnetic field in the center\n","html":"\u003cp\u003eFor a charge to do something in a magnetic field, it has to have velocity; nothing happens without \u003cstrong\u003emovement\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{F}_{M} = q \\vec{v} \\times \\vec{B}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo calculate: magnitude: \\(qvB \\sin \\theta\\) + right hand rule.\u003c/p\u003e\n\u003ch2 id=\"radius\"\u003eRadius\u003c/h2\u003e\n\u003cp\u003eYou maybe asked to find the radius of the path the particle takes, so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{v^{2}}{r} = a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, the net force here is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nqvB = Ma\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo plug in and solve\u003c/p\u003e\n\u003ch2 id=\"current-along-a-wire\"\u003eCurrent along a wire\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{F} = \\int I \\dd{l} \\times \\vec{B}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe sum of the current across the wire is the same as \\(q \\vec{v}\\).\u003c/p\u003e\n\u003cp\u003eThis equals \\(IlB \\sin \\theta\\) in magnitude for constant current.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;FILB - sintheta\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"induced-magnetic-field\"\u003eInduced magnetic field\u003c/h2\u003e\n\u003cp\u003eFor the induced magnetic field of a current, use the curvey (curl) right hand rule.\u003c/p\u003e\n\u003cp\u003eThe actual \u003cstrong\u003emagnitude\u003c/strong\u003e induced by the wire is ampere\u0026rsquo;s law:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\oint \\vec{B} \\cdot \\dd{\\vec{l}} = \\mu I\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(u_0\\) is vacuum permeability (\\(4 \\pi \\times 10^{-7} \\frac{T \\cdot m}{A}\\)).\u003c/p\u003e\n\u003ch2 id=\"magnetic-field-of-a-solenoid\"\u003eMagnetic Field of a Solenoid\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nBs = \\mu_{0} n I\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(n = \\frac{N}{L}\\), the number of turns of the solenoid per length.\u003c/p\u003e\n\u003ch2 id=\"magnetic-field-of-a-loop\"\u003eMagnetic Field of a Loop\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nB 2\\pi r = \\mu_{0} I\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, the surface integral of length of a loop is just the circumference\u003c/p\u003e\n\u003ch2 id=\"wires-are-opposite\"\u003eWIRES ARE OPPOSITE\u003c/h2\u003e\n\u003cp\u003eCurrent\u0026rsquo;s induced magnetic fields in the \u003cstrong\u003esame direction attracts\u003c/strong\u003e, and in \u003cstrong\u003eopposite directinos repel\u003c/strong\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-19_10-52-16_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"full-description-of-magnetic-field-non-bdl-b\"\u003eFull description of magnetic field non-bdl-b\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-19_11-13-43_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eFor instance, current in a loop and desire magnetic field in the center\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmagnetism/","tags":null,"title":"magnetism"},{"categories":null,"contents":"DOI: 10.3389/fnagi.2021.623607\nOne-Liner Trained a bimodal model on speech/text with GRU on speech and CNN-LSTM on text.\nNovelty A post-2019 NLP paper that doesn\u0026rsquo;t use transformers! (so faster (they used CNN-LSTM) lighter easier) \u0026ldquo;Our work sheds light on why the accuracy of these models drops to 72.92% on the ADReSS dataset, whereas, they gave state of the art results on the DementiaBank dataset.\u0026rdquo; Notable Methods Bi-Modal audio and transcript processing vis a vi Shah 2021, but with a CNN-LSTM and GRU on the other side.\nKey Figs Figure 1: Proposed Architecture The figure highlights the authors\u0026rsquo; proposed architecture\nFigure 2: confusion matrix In addition to validating prior work by Karlekar 2018 and Di Palo 2019, proposed model C and got accuracy of \\(73.92\\%\\).\n","html":"\u003cp\u003eDOI: 10.3389/fnagi.2021.623607\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eTrained a bimodal model on speech/text with GRU on speech and CNN-LSTM on text.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA post-2019 NLP paper that doesn\u0026rsquo;t use transformers! (so \u003cdel\u003efaster\u003c/del\u003e (they used CNN-LSTM) lighter easier)\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Our work sheds light on why the accuracy of these models drops to 72.92% on the ADReSS dataset, whereas, they gave state of the art results on the DementiaBank dataset.\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cp\u003eBi-Modal audio and transcript processing vis a vi \u003ca href=\"/posts/kbhshah_2021/\"\u003eShah 2021\u003c/a\u003e, but with a CNN-LSTM and GRU on the other side.\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"figure-1-proposed-architecture\"\u003eFigure 1: Proposed Architecture\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_12-10-09_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe figure highlights the authors\u0026rsquo; proposed architecture\u003c/p\u003e\n\u003ch3 id=\"figure-2-confusion-matrix\"\u003eFigure 2: confusion matrix\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_12-17-25_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eIn addition to validating prior work by Karlekar 2018 and Di Palo 2019, proposed model C and got accuracy of \\(73.92\\%\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmahajan_2021/","tags":["ntj"],"title":"Mahajan 2021"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmahatma_ghandi/","tags":null,"title":"Mahatma Ghandi"},{"categories":null,"contents":"Happy Monday friends.\nThe deliverable of the week was to make the a ASR model for Batchalign. Essentially, most copies of Whisper is pretty bad at Language Sample Analysis (LSA), because they mostly don\u0026rsquo;t work in terms trying to actually capture the things that people doing LSA want to capture (disfluencies, stuttering, etc.). OpenAI even acknowledged in the paper that they filtered out the disfluencies from their gold transcript to prevent Whisper from writing down too much of them.\nAnd so\u0026hellip; We roll up our sleeves and do it ourselves.\nA Large Language Model I didn\u0026rsquo;t want to perform Low-Rank Approximation (LoRA) to heavily when training this model. Folks fine tuning LLaMA will note that the preferred parameters were essentially asked the user to make the model matricies Rank 8, across the entire model.\nWhen trying this in earlier experiments, we failed dramatically as the LoRA\u0026rsquo;d model failed to converge when we hit any smaller rank below 10. However, if we tried to, say, do it above 10, I would OOM.\nI will note: its not like we don\u0026rsquo;t have compute. For this project, I fortunately am able to provision any number of V100 32GB as I see reasonable to train this model. Nevertheless, a lovey dovey parameter heavy 1.5 Billion parameter model is still a sight to behold (and cram into one such GPUs).\nHence, the most important impetus for making this work without aggressive LoRA and degraded performance is some kind of model parallel training scheme.\nOne Model, Multiple Cards Alr then.\nAfter investigation, DeepSpeed seemed pretty promising for a few reasons. The third iteration of its algorithm (Zero-3) has three different main offerings:\nModel parameter sharding (sharding the weights of the model across devices) Optimizer state sharding Model/Parameter state offload The last one caught my eye. Essentially, as long as your chip has the ability to perform a single forward pass, it can train a model under Zero-3. This is because the system is designed, on request, to offload the weights of your model into CPU or NVMe if you want\u0026mdash;and only pull it into the main device for the actual step of forward/backwards passes.\nThe thing about DeepSpeed is that its configured in a very hapazard way, and once you DeepSpeed onto your training script you can\u0026rsquo;t really go back: it expects model parallel training, in the way you configured it, always, based on the contents to the training script.\nHuggingface Accelerate to the rescue! The system is essentially a generic hypervisation framework. It is designed to accelerate model training using any framework you\u0026rsquo;d like: CPU data parallel, GPU data parallel, DeepSpeed model parallel, and so on\u0026mdash;with a single configuration file.\nWith minimal change to your training script, your actual acceleration scheme travels with a configuration file on device. Meaning, running the same script on different devices configured with Accelerate will use the best settings for that device; including the correct number of cards, accelerators, etc.\nPedal to the Metal As usual, despite how good all of this stuff sounds, getting it all to glue together was a hot mess.\nAccelerate Let\u0026rsquo;s start with Accelerate. The actual process of integrating Accelerate into your training script is pretty straightforward:\naccelerator = Accelerator() DEVICE = accelerator.device model, optim, dataloader, val_dataloader = accelerator.prepare(model, optim, dataloader, val_dataloader) and then, in your training loop, change\n- loss.backward() + accelerator.backward(loss) and finally, whenever you need to access a value in CPU, change\n- loss = torch.mean(loss.cpu()) + loss = torch.mean(accelerator.gather(loss)) That\u0026rsquo;s honestly about it in terms of making accelerate work.\nDeepSpeed Shenanigans DeepSpeed is a great tool to accelerate model training, but the damned thing is so janky to actually get started because of various device integration issues.\nThere\u0026rsquo;s this excellent thread on Reddit with people winging about the various things that DeepSpeed is broken about. To actually get it to actually work on my end\u0026hellip;\ndeep breath. pray to deity of your choice, etc. and Install Conda pip install deepspeed conda install openmpi pip install mpi4py (if this fails, env LD_LIBRARY_PATH=/your/conda/lib/path pip install --no-cache-dir mpi4py) If you now ran DeepSpeed on a model, it likely will crash on a local random assert statement. To fix this, get ready:\nfind runtime/zero/partitioned_param_coordinator.py wherever your DeepSpeed code is, and:\n- assert param.ds_status == ZeroParamStatus.AVAILABLE, param.ds_summary() + # assert param.ds_status == ZeroParamStatus.AVAILABLE, param.ds_summary() comment the damned assertion out. Yup.\nAccelerate Device Config And now, onto the device configuration. If you are most normal people, you can just run:\naccelerate config answer the questions, and be done for configuring that device. However, as I was training on a SLURM device, I had no access to a tty. Hence, I had to configure the Accelerate device configuration myself.\nTo glue Accelerate and Deepspeed together, here was the config.\ncompute_environment: LOCAL_MACHINE debug: false deepspeed_config: gradient_accumulation_steps: 1 offload_optimizer_device: none offload_param_device: cpu zero3_init_flag: true zero_stage: 3 distributed_type: DEEPSPEED fsdp_config: {} downcast_bf16: \u0026#39;no\u0026#39; machine_rank: 0 mixed_precision: \u0026#39;no\u0026#39; num_machines: 1 num_processes: 3 use_cpu: false Here are the highlights:\nmixed_precision: 'no': FP16 doesn\u0026rsquo;t work if you do your own tensor creation within the train loop as I did though the Whisper models. Your DataLoader passed to your accelerator at the beginning of the script must return the exact tensors you put into the model if you want FP16.\noffload_optimizer_device: none: offloading optimizer requires you to compile the PyTorch extension adam_cpu from DeepSpeed. I never got it to work on the training rig because it required CUDA headers (why? how? why is adam_cpu CUDA? no clue). Notably, optimizer SHARDING across GPUs still work, because that has nothing to do with offload.\nzero_stage: 3: stage 1 is state sharding, 2 is optimizer sharding, 3 is optimizer AND parameter sharding.\nnum_processes: 3: for GPUs, num_processes is the number of GPUs Accelerate/DeepSpeed should use.\nFriggin LoRA In the sprit of not wasting too many monies, I still conceded and used LoRA. This was a fairly straightforward setup through Huggingface PEFT.\nHere was my config:\npeft_config = LoraConfig(inference_mode=False, r=16, target_modules=[\u0026#34;q_proj\u0026#34;, \u0026#34;v_proj\u0026#34;, \u0026#34;out_proj\u0026#34;], lora_alpha=32, lora_dropout=0.1) and the integration:\nmodel = WhisperForConditionalGeneration.from_pretrained(f\u0026#34;{MODEL}\u0026#34;) + model = get_peft_model(model, peft_config) Simple as that. One protip: call model.train(); otherwise you will be hit with:\nFile \u0026#34;/jet/home/hliuk/.conda/envs/chat-whisper/lib/python3.10/site-packages/torch/nn/modules/conv.py\u0026#34;, line 309, in _conv_forward return F.conv1d(input, weight, bias, self.stride, RuntimeError: weight should have at least three dimensions presumably because of some conflict with inference_mode setting the wrong .forward() paths.\nOn the machine, merge_and_unload never worked. Instead, I downloaded the LoRA weights (instead of the merged full weights) and then called that on my local machine.\nTwo highlights from the LoRA config:\nr=16: we set the rank of the matrix into 16, because anything lower causes the model to stop converging. This still ended up needing 3 GPUs to actually cram fit.\nlora_alpha=32: I saw somewhere that the LoRA weight scaling factor, which is lora_alpha/r, should always be larger that \\(1\\). Your mileage may vary.\n[\u0026quot;q_proj\u0026quot;, \u0026quot;v_proj\u0026quot;, \u0026quot;out_proj\u0026quot;]: it seems like many people are not a fan of LoRAing the key matricies\u0026mdash;why? I don\u0026rsquo;t know. I\u0026rsquo;m following that convention here.\nAnd so\u0026hellip; Two days, and much wandb later, we\u0026rsquo;ve got a model!\nCheck it out!\nWe could\u0026rsquo;ve pushed the GPU up a little by setting LoRA rank higher, but I found that if the memory is sitting at anything above a \\(80\\%\\) ever, the system will eventually OOM.\n","html":"\u003cp\u003eHappy Monday friends.\u003c/p\u003e\n\u003cp\u003eThe deliverable of the week was to make the a ASR model for Batchalign. Essentially, most copies of Whisper is pretty bad at Language Sample Analysis (LSA), because they mostly don\u0026rsquo;t work in terms trying to actually capture the things that people doing LSA want to capture (disfluencies, stuttering, etc.). OpenAI even acknowledged in the paper that they filtered out the disfluencies from their gold transcript to prevent Whisper from writing down too much of them.\u003c/p\u003e\n\u003cp\u003eAnd so\u0026hellip; We roll up our sleeves and do it ourselves.\u003c/p\u003e\n\u003ch2 id=\"a-large-language-model\"\u003eA \u003cstrong\u003eLarge\u003c/strong\u003e Language Model\u003c/h2\u003e\n\u003cp\u003eI didn\u0026rsquo;t want to perform Low-Rank Approximation (LoRA) to heavily when training this model. Folks fine tuning \u003ca href=\"/posts/kbhllama/\"\u003eLLaMA\u003c/a\u003e will note that the preferred parameters were \u003ca href=\"https://deci.ai/blog/fine-tune-llama-2-with-lora-for-question-answering/\"\u003eessentially asked the user to make the model matricies Rank 8\u003c/a\u003e, across the entire model.\u003c/p\u003e\n\u003cp\u003eWhen trying this in earlier experiments, we failed dramatically as the LoRA\u0026rsquo;d model failed to converge when we hit any smaller rank below 10. However, if we tried to, say, do it above 10, I would OOM.\u003c/p\u003e\n\u003cp\u003eI will note: its not like we don\u0026rsquo;t have compute. For this project, I fortunately am able to provision any number of V100 32GB as I see reasonable to train this model. Nevertheless, a lovey dovey parameter heavy 1.5 Billion parameter model is still a sight to behold (and cram into one such GPUs).\u003c/p\u003e\n\u003cp\u003eHence, the most important impetus for making this work without aggressive LoRA and degraded performance is some kind of model parallel training scheme.\u003c/p\u003e\n\u003ch2 id=\"one-model-multiple-cards\"\u003eOne Model, Multiple Cards\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-23_10-21-34_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-23_10-21-40_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eAlr then.\u003c/p\u003e\n\u003cp\u003eAfter investigation, \u003ca href=\"https://deepspeed.readthedocs.io/en/stable/zero3.html\"\u003eDeepSpeed\u003c/a\u003e seemed pretty promising for a few reasons. The third iteration of its algorithm (Zero-3) has three different main offerings:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eModel parameter sharding (sharding the weights of the model across devices)\u003c/li\u003e\n\u003cli\u003eOptimizer state sharding\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eModel/Parameter state offload\u003c/strong\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThe last one caught my eye. Essentially, as long as your chip has the ability to perform a single forward pass, it can train a model under Zero-3. This is because the system is designed, on request, to offload the weights of your model into CPU or NVMe if you want\u0026mdash;and only pull it into the main device for the actual step of forward/backwards passes.\u003c/p\u003e\n\u003cp\u003eThe thing about DeepSpeed is that its configured in a very hapazard way, and once you DeepSpeed onto your training script you can\u0026rsquo;t really go back: it expects model parallel training, in the way you configured it, always, based on the contents to the training script.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://github.com/huggingface/accelerate\"\u003eHuggingface Accelerate\u003c/a\u003e to the rescue! The system is essentially a generic hypervisation framework. It is designed to accelerate model training using any framework you\u0026rsquo;d like: CPU data parallel, GPU data parallel, DeepSpeed model parallel, and so on\u0026mdash;with a single configuration file.\u003c/p\u003e\n\u003cp\u003eWith minimal change to your \u003cem\u003etraining script\u003c/em\u003e, your actual acceleration scheme travels with a configuration file \u003cstrong\u003eon device\u003c/strong\u003e. Meaning, running the same script on different devices configured with Accelerate will use the best settings for that device; including the correct number of cards, accelerators, etc.\u003c/p\u003e\n\u003ch2 id=\"pedal-to-the-metal\"\u003ePedal to the Metal\u003c/h2\u003e\n\u003cp\u003eAs usual, despite how good all of this stuff sounds, getting it all to glue together was a hot mess.\u003c/p\u003e\n\u003ch3 id=\"accelerate\"\u003eAccelerate\u003c/h3\u003e\n\u003cp\u003eLet\u0026rsquo;s start with Accelerate. The actual process of integrating Accelerate into your training script is pretty straightforward:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eaccelerator\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eAccelerator\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eDEVICE\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eaccelerator\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edevice\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emodel\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edataloader\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eval_dataloader\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eaccelerator\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprepare\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emodel\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edataloader\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eval_dataloader\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eand then, in your training loop, change\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-diff\" data-lang=\"diff\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e- loss.backward()\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e+ accelerator.backward(loss)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eand finally, whenever you need to access a value in CPU, change\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-diff\" data-lang=\"diff\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e- loss = torch.mean(loss.cpu())\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e+ loss = torch.mean(accelerator.gather(loss))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThat\u0026rsquo;s honestly about it in terms of making accelerate work.\u003c/p\u003e\n\u003ch3 id=\"deepspeed-shenanigans\"\u003eDeepSpeed Shenanigans\u003c/h3\u003e\n\u003cp\u003eDeepSpeed is a great tool to accelerate model training, but the damned thing is so janky to actually get started because of various device integration issues.\u003c/p\u003e\n\u003cp\u003eThere\u0026rsquo;s this \u003ca href=\"https://www.reddit.com/r/Oobabooga/comments/13etobg/using_deepspeed_requires_lots_of_manual_tweaking/\"\u003eexcellent thread\u003c/a\u003e on Reddit with people winging about the various things that DeepSpeed is broken about. To actually get it to actually work on my end\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003cem\u003edeep breath. pray to deity of your choice, etc.\u003c/em\u003e and Install Conda\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003epip install deepspeed\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003econda install openmpi\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003epip install mpi4py\u003c/code\u003e (if this fails, \u003ccode\u003eenv LD_LIBRARY_PATH=/your/conda/lib/path pip install --no-cache-dir mpi4py\u003c/code\u003e)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eIf you now ran DeepSpeed on a model, it likely will crash on a local random assert statement. To fix this, get ready:\u003c/p\u003e\n\u003cp\u003efind \u003ccode\u003eruntime/zero/partitioned_param_coordinator.py\u003c/code\u003e wherever your DeepSpeed code is, and:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-diff\" data-lang=\"diff\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e- assert param.ds_status == ZeroParamStatus.AVAILABLE, param.ds_summary()\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e+ # assert param.ds_status == ZeroParamStatus.AVAILABLE, param.ds_summary()\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ecomment the damned assertion out. Yup.\u003c/p\u003e\n\u003ch3 id=\"accelerate-device-config\"\u003eAccelerate Device Config\u003c/h3\u003e\n\u003cp\u003eAnd now, onto the device configuration. If you are most normal people, you can just run:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-bash\" data-lang=\"bash\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eaccelerate config\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eanswer the questions, and be done for configuring that device. However, as I was training on a SLURM device, I had no access to a tty. Hence, I had to configure the Accelerate device configuration myself.\u003c/p\u003e\n\u003cp\u003eTo glue Accelerate and Deepspeed together, here was the config.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-yaml\" data-lang=\"yaml\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003ecompute_environment\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003eLOCAL_MACHINE\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003edebug\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efalse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003edeepspeed_config\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#f92672\"\u003egradient_accumulation_steps\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#f92672\"\u003eoffload_optimizer_device\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003enone\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#f92672\"\u003eoffload_param_device\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003ecpu\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#f92672\"\u003ezero3_init_flag\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003etrue\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#f92672\"\u003ezero_stage\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003edistributed_type\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003eDEEPSPEED\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efsdp_config\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e {}\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003edowncast_bf16\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#39;no\u0026#39;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003emachine_rank\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003emixed_precision\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#39;no\u0026#39;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003enum_machines\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003enum_processes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003euse_cpu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efalse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eHere are the highlights:\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003emixed_precision: 'no'\u003c/code\u003e: FP16 doesn\u0026rsquo;t work if you do your own tensor creation within the train loop as I did though the Whisper models. Your DataLoader passed to your accelerator at the beginning of the script must return the \u003cstrong\u003eexact\u003c/strong\u003e tensors you put into the model if you want FP16.\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003eoffload_optimizer_device: none\u003c/code\u003e: offloading optimizer requires you to compile the PyTorch extension \u003ccode\u003eadam_cpu\u003c/code\u003e from DeepSpeed. I never got it to work on the training rig because it required CUDA headers (why? how? why is \u003ccode\u003eadam_cpu\u003c/code\u003e CUDA? no clue). Notably, \u003cstrong\u003eoptimizer SHARDING\u003c/strong\u003e across GPUs still work, because that has nothing to do with offload.\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003ezero_stage: 3\u003c/code\u003e: stage 1 is state sharding, 2 is optimizer sharding, 3 is optimizer AND parameter sharding.\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003enum_processes: 3\u003c/code\u003e: for GPUs, \u003ccode\u003enum_processes\u003c/code\u003e is \u003cstrong\u003ethe number of GPUs\u003c/strong\u003e Accelerate/DeepSpeed should use.\u003c/p\u003e\n\u003ch3 id=\"friggin-lora\"\u003eFriggin LoRA\u003c/h3\u003e\n\u003cp\u003eIn the sprit of not wasting too many monies, I still conceded and used LoRA. This was a fairly straightforward setup through Huggingface PEFT.\u003c/p\u003e\n\u003cp\u003eHere was my config:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epeft_config\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLoraConfig\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einference_mode\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eFalse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003er\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e16\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003etarget_modules\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;q_proj\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;v_proj\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;out_proj\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003elora_alpha\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e32\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003elora_dropout\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eand the integration:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-diff\" data-lang=\"diff\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003emodel = WhisperForConditionalGeneration.from_pretrained(f\u0026#34;{MODEL}\u0026#34;)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e+ model = get_peft_model(model, peft_config)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSimple as that. One protip: call \u003ccode\u003emodel.train()\u003c/code\u003e; otherwise you will be hit with:\u003c/p\u003e\n\u003cpre tabindex=\"0\"\u003e\u003ccode class=\"language-nil\" data-lang=\"nil\"\u003e File \u0026#34;/jet/home/hliuk/.conda/envs/chat-whisper/lib/python3.10/site-packages/torch/nn/modules/conv.py\u0026#34;, line 309, in _conv_forward\n return F.conv1d(input, weight, bias, self.stride,\nRuntimeError: weight should have at least three dimensions\n\u003c/code\u003e\u003c/pre\u003e\u003cp\u003epresumably because of some conflict with \u003ccode\u003einference_mode\u003c/code\u003e setting the wrong \u003ccode\u003e.forward()\u003c/code\u003e paths.\u003c/p\u003e\n\u003cp\u003eOn the machine, \u003ccode\u003emerge_and_unload\u003c/code\u003e never worked. Instead, I downloaded the LoRA weights (instead of the merged full weights) and then called that on my local machine.\u003c/p\u003e\n\u003cp\u003eTwo highlights from the LoRA config:\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003er=16\u003c/code\u003e: we set the rank of the matrix into \u003ccode\u003e16\u003c/code\u003e, because anything lower causes the model to stop converging. This still ended up needing 3 GPUs to actually cram fit.\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003elora_alpha=32\u003c/code\u003e: I saw somewhere that the LoRA weight scaling factor, which is \u003ccode\u003elora_alpha/r\u003c/code\u003e, should always be larger that \\(1\\). Your mileage may vary.\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003e[\u0026quot;q_proj\u0026quot;, \u0026quot;v_proj\u0026quot;, \u0026quot;out_proj\u0026quot;]\u003c/code\u003e: it seems like many people are not a fan of LoRAing the key matricies\u0026mdash;why? I don\u0026rsquo;t know. I\u0026rsquo;m following that convention here.\u003c/p\u003e\n\u003ch2 id=\"and-so-dot-dot-dot\"\u003eAnd so\u0026hellip;\u003c/h2\u003e\n\u003cp\u003eTwo days, and much wandb later, we\u0026rsquo;ve got a model!\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://huggingface.co/talkbank/CHATWhisper-en-large-v1\"\u003eCheck it out!\u003c/a\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-23_13-16-01_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWe could\u0026rsquo;ve pushed the GPU up a little by setting LoRA rank higher, but I found that if the memory is sitting at anything above a \\(80\\%\\) ever, the system will eventually OOM.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmake_models_go_brrr/","tags":["fireside"],"title":"Make Models Go Brrr: Model Parallel Whisper Training"},{"categories":null,"contents":"Suppose \\(T \\in \\mathcal{L}(V)\\), and \\(U \\subset V\\), an invariant subspace under \\(T\\). Then:\n\\begin{equation} T|_{U}(u) = Tu,\\ \\forall u \\in U \\end{equation}\nwhere \\(T|_{U} \\in \\mathcal{L}(U)\\)\n","html":"\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V)\\), and \\(U \\subset V\\), an \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e under \\(T\\). Then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT|_{U}(u) = Tu,\\ \\forall u \\in U\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(T|_{U} \\in \\mathcal{L}(U)\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmap_restriction_operator/","tags":null,"title":"map restriction operator"},{"categories":null,"contents":"MapReduce is an distributed algorithm.\nhttps://www.psc.edu/wp-content/uploads/2023/07/A-Brief-History-of-Big-Data.pdf\nMap: \\((in\\_key, in\\_value) \\Rightarrow list(out\\_key, intermediate\\_value)\\). Reduce: Group map outputs by \\(out\\_key\\) \\((out\\_key, list(intermediate\\_value)) \\Rightarrow list(out\\_value)\\) example of MapReduce Say, if you want to count word frequencies in a set of documents.\nMap: \\((document\\_name, document\\_contents) \\Rightarrow list(word, #\\ occurrences)\\) You can see that this can be distributed to multiple processors. You can have each processor count the word frequencies in a single document. We have now broken the contents into divide and conquerable groups.\nReduce: \\((word, list\\ (occurrences\\_per\\_document)) \\Rightarrow (word,sum)\\) We just add up the occurrences that each of the nodes\u0026rsquo; output for word frequency.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmapreduce/\"\u003eMapReduce\u003c/a\u003e is an \u003ca href=\"/posts/kbhdistributed_algorithum/\"\u003edistributed algorithm\u003c/a\u003e.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-07-31_11-58-49_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.psc.edu/wp-content/uploads/2023/07/A-Brief-History-of-Big-Data.pdf\"\u003ehttps://www.psc.edu/wp-content/uploads/2023/07/A-Brief-History-of-Big-Data.pdf\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eMap: \\((in\\_key, in\\_value) \\Rightarrow list(out\\_key, intermediate\\_value)\\).\u003c/li\u003e\n\u003cli\u003eReduce:\n\u003cul\u003e\n\u003cli\u003eGroup map outputs by \\(out\\_key\\)\u003c/li\u003e\n\u003cli\u003e\\((out\\_key, list(intermediate\\_value)) \\Rightarrow list(out\\_value)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"example-of-mapreduce--kbhmapreduce-dot-md\"\u003eexample of \u003ca href=\"/posts/kbhmapreduce/\"\u003eMapReduce\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eSay, if you want to count word frequencies in a set of documents.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eMap: \\((document\\_name, document\\_contents) \\Rightarrow list(word, #\\ occurrences)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eYou can see that this can be distributed to multiple processors. You can have each processor count the word frequencies in a \u003cem\u003esingle\u003c/em\u003e document. We have now broken the contents into divide and conquerable groups.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eReduce: \\((word, list\\ (occurrences\\_per\\_document)) \\Rightarrow (word,sum)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe just add up the occurrences that each of the nodes\u0026rsquo; output for word frequency.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmapreduce/","tags":null,"title":"MapReduce"},{"categories":null,"contents":"A Markov Chain is a chain of \\(N\\) states, with an \\(N \\times N\\) transition matrix.\nat each step, we are in exactly one of those states the matrix \\(P_{ij}\\) tells us \\(P(j|i)\\), the probability of going to state \\(j\\) given you are at state \\(i\\) And therefore:\n\\begin{equation} \\sum_{j=1}^{N} P_{ij} = 1 \\end{equation}\nErgotic Markov Chain a markov chain is Ergotic if\u0026hellip;\nyou have a path from any one state to any other for any start state, after some time \\(T_0\\), the probability of being in any state at any \\(T \u0026gt; T_0\\) is non-zero Every Ergotic Markov Chain has a long-term visit rate:\ni.e. a steady state visitation count exists. We usually call it:\n\\begin{equation} \\pi = \\qty(\\pi_{i}, \\dots, \\pi_{n}) \\end{equation}\nComputing steady state Fact:\nlet\u0026rsquo;s declare that \\(\\pi\\) is the steady state to a transition matrix \\(T\\); recall that the FROM states are the rows, which means that \\(\\pi\\) has to be a row vector; \\(\\pi\\) being a steady state makes:\n\\begin{equation} \\pi T = \\pi \\end{equation}\nThis is a left e.v. with eigenvalue \\(1\\), which is the principle eigenvector of \\(T\\) as transition matricies always have eigenvector eigenvalue to \\(1\\).\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhmarkov_chain/\"\u003eMarkov Chain\u003c/a\u003e is a chain of \\(N\\) states, with an \\(N \\times N\\) transition matrix.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eat each step, we are in exactly one of those states\u003c/li\u003e\n\u003cli\u003ethe matrix \\(P_{ij}\\) tells us \\(P(j|i)\\), the probability of going to state \\(j\\) given you are at state \\(i\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAnd therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{j=1}^{N} P_{ij} = 1\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"ergotic-markov-chain\"\u003eErgotic Markov Chain\u003c/h2\u003e\n\u003cp\u003ea markov chain is \u003ca href=\"#ergotic-markov-chain\"\u003eErgotic\u003c/a\u003e if\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eyou have a path from any one state to any other\u003c/li\u003e\n\u003cli\u003efor any start state, after some time \\(T_0\\), the probability of being in any state at any \\(T \u0026gt; T_0\\) is non-zero\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eEvery \u003ca href=\"#ergotic-markov-chain\"\u003eErgotic Markov Chain\u003c/a\u003e has a long-term visit rate:\u003c/p\u003e\n\u003cp\u003ei.e. a steady state visitation count exists. We usually call it:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pi = \\qty(\\pi_{i}, \\dots, \\pi_{n})\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"computing-steady-state\"\u003eComputing steady state\u003c/h3\u003e\n\u003cp\u003eFact:\u003c/p\u003e\n\u003cp\u003elet\u0026rsquo;s declare that \\(\\pi\\) is the steady state to a transition matrix \\(T\\); recall that the FROM states are the rows, which means that \\(\\pi\\) has to be a row vector; \\(\\pi\\) being a steady state makes:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pi T = \\pi\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is a left e.v. with \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e \\(1\\), which is the principle \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e of \\(T\\) as transition matricies always have \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e eigenvalue to \\(1\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmarkov_chain/","tags":null,"title":"Markov Chain"},{"categories":null,"contents":"A MDP is a decision network whereby a sequences of actions causes a sequence of states. Each state is dependent on the action we take and the state we are in, and each utility is dependent on action taken and the state we are in.\nNote that, unlike a POMDP, we know what state we are in\u0026mdash;the observations from the states are just unclear.\nconstituents \\(S\\): state space (assuming discrete for now, there are \\(n\\) states) \u0026mdash; \u0026ldquo;minimum set of information that allows you to solve a problem\u0026rdquo; \\(A\\): action space \u0026mdash; set of things your agent can do \\(T(s\u0026rsquo; | s,a)\\): \u0026ldquo;dynamics\u0026rdquo;, state-transition model \u0026ldquo;probability that we end up in \\(s\u0026rsquo;\\) given \\(s\\) and action \\(a\\)\u0026rdquo;: good idea to make a table of probabilities of source vs. destination variables \\(R(s,a,s\u0026rsquo;)\\): expected reward given in an action and a state (real world reward maybe stochastic) \\(\\pi_{t}(s_{1:t}, a_{1:t-1})\\): the policy, returning an action, a system of assigning actions based on states however, our past states are d-seperated from our current action given knowing the state, so really we have \\(\\pi_{t}(s_{t})\\) additional information We assume policy to be exact right now.\nstationary Markov Decision Process This is a stationary Markov Decision Process because at each node \\(S_{n}\\), we have: \\(P(S_{n+1} | A_n, S_n)\\). Time is not a variable: as long as you know what state you are in, and what you did, you know the transition probability.\n(that is, the set of states is not dependent on time)\ncalculating utility with instantaneous rewards Because, typically, in decision networks you sum all the utilities together, you\u0026rsquo;d think that we should sum the utilities together.\nfinite-horizon models We want to maximize reward over time, over a finite horizon \\(n\\). Therefore, we try to maximize:\n\\begin{equation} \\sum_{t=1}^{n}r_{t} \\end{equation}\nthis function is typically called \u0026ldquo;return\u0026rdquo;.\ninfinite-horizon models If you lived forever, small positive \\(r_{t}\\) and large \\(r_{t}\\) makes no utility difference. We therefore add discounting:\n\\begin{equation} \\sum_{t=1}^{\\infty} \\gamma^{t-1} r_{t} \\end{equation}\nwhere, \\(\\gamma \\in (0,1)\\)\nwe discount the future by some amount\u0026mdash;an \u0026ldquo;interest rate\u0026rdquo;\u0026mdash;reward now is better than reward in the future.\n\\(\\gamma \\to 0\\): \u0026ldquo;myopic\u0026rdquo; strategies, near-sighted strategies \\(\\gamma \\to 1\\): \u0026ldquo;non-discounting\u0026rdquo; average return models We don\u0026rsquo;t care about this as much:\n\\begin{equation} \\lim_{n \\to \\infty} \\frac{1}{n} \\sum_{t=1}^{n}r_{t} \\end{equation}\nbut its close to infinite-horizon models with Gama close to \\(1\\)\nSolving an MDP You are handed or can predict \\(R(s,a)\\), and know all transitions Small, Discrete State Space\nGet an exact solution for \\(U^{*}(s)\\) (and hence \\(\\pi^{ *}(a, s)\\)) for the problem via\u0026hellip;\npolicy iteration value iteration Large, Continuous State Space\nParameterize Policy\nOptimize \\(\\pi_{\\theta}\\) to maximize \\(U(\\pi_{\\theta})\\) using Policy Optimization methods!\nGradient Free: lower dimension policy space\nLocal Policy Search (aka Hooke-Jeeves Policy Search) Genetic Policy Search Cross Entropy Method Gradient Based Method: higher dimension policy space\nPolicy Gradient\nParameterize Value Function\nOptimize \\(U_{\\theta}(S)\\) via global approximation or local approximation methods, then use a greedy policy on that nice and optimized value function.\nYou can only reason about your immediate surroundings/local reachable states online planning\nor\u0026hellip; \u0026ldquo;you don\u0026rsquo;t know the model whatsoever\u0026rdquo;\nreinforcement learning\nduring these cases, you never argmax over all actions; hence, its important to remember the methods to preserve Exploration and Exploitation.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003e is a \u003ca href=\"/posts/kbhdecision_networks/\"\u003edecision network\u003c/a\u003e whereby a sequences of actions causes a sequence of states. Each state is dependent on the action we take and the state we are in, and each \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e is dependent on action taken and the state we are in.\u003c/p\u003e\n\u003cp\u003eNote that, unlike a \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e, we know what state we are in\u0026mdash;the observations from the states are just unclear.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-17_09-18-03_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(S\\): state space (assuming discrete for now, there are \\(n\\) states) \u0026mdash; \u0026ldquo;minimum set of information that allows you to solve a problem\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\\(A\\): action space \u0026mdash; set of things your agent can do\u003c/li\u003e\n\u003cli\u003e\\(T(s\u0026rsquo; | s,a)\\): \u0026ldquo;dynamics\u0026rdquo;, state-transition model \u0026ldquo;\u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e that we end up in \\(s\u0026rsquo;\\) given \\(s\\) and action \\(a\\)\u0026rdquo;: good idea to make a table of probabilities of source vs. destination variables\u003c/li\u003e\n\u003cli\u003e\\(R(s,a,s\u0026rsquo;)\\): expected reward given in an action and a state (real world reward maybe stochastic)\u003c/li\u003e\n\u003cli\u003e\\(\\pi_{t}(s_{1:t}, a_{1:t-1})\\): the \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e, returning an action, a system of assigning actions based on states\n\u003cul\u003e\n\u003cli\u003ehowever, our past states are \u003ca href=\"/posts/kbhbaysian_network/#checking-for-conditional-independence\"\u003ed-seperated\u003c/a\u003e from our \u003ca href=\"/posts/kbhcurrent/\"\u003ecurrent\u003c/a\u003e action given knowing the state, so really we have \\(\\pi_{t}(s_{t})\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cp\u003eWe assume \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e to be exact right now.\u003c/p\u003e\n\u003ch3 id=\"stationary-markov-decision-process--kbhmarkov-decision-process-dot-md\"\u003estationary \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMarkov Decision Process\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eThis is a \u003ca href=\"#stationary-markov-decision-process--kbhmarkov-decision-process-dot-md\"\u003estationary Markov Decision Process\u003c/a\u003e because at each node \\(S_{n}\\), we have: \\(P(S_{n+1} | A_n, S_n)\\). Time is \u003cstrong\u003enot\u003c/strong\u003e a variable: as long as you know what state you are in, and what you did, you know the transition \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-17_13-07-24_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e(that is, the set of states is not dependent on time)\u003c/p\u003e\n\u003ch3 id=\"calculating-utility--kbhutility-theory-dot-md--with-instantaneous-rewards\"\u003ecalculating \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e with instantaneous rewards\u003c/h3\u003e\n\u003cp\u003eBecause, typically, in \u003ca href=\"/posts/kbhdecision_networks/\"\u003edecision network\u003c/a\u003es you sum all the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutilities\u003c/a\u003e together, you\u0026rsquo;d think that we should sum the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutilities\u003c/a\u003e together.\u003c/p\u003e\n\u003ch4 id=\"finite-horizon-models\"\u003efinite-horizon models\u003c/h4\u003e\n\u003cp\u003eWe want to maximize reward over time, over a finite horizon \\(n\\). Therefore, we try to maximize:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{t=1}^{n}r_{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis function is typically called \u0026ldquo;\u003ca href=\"/posts/kbhrandom_walk/#return--finmetrics\"\u003ereturn\u003c/a\u003e\u0026rdquo;.\u003c/p\u003e\n\u003ch4 id=\"infinite-horizon-models\"\u003einfinite-horizon models\u003c/h4\u003e\n\u003cp\u003eIf you lived forever, small positive \\(r_{t}\\) and large \\(r_{t}\\) makes no utility difference. We therefore add discounting:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{t=1}^{\\infty} \\gamma^{t-1} r_{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\gamma \\in (0,1)\\)\u003c/p\u003e\n\u003cp\u003ewe discount the future by some amount\u0026mdash;an \u0026ldquo;interest rate\u0026rdquo;\u0026mdash;reward now is better than reward in the future.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\gamma \\to 0\\): \u0026ldquo;myopic\u0026rdquo; strategies, near-sighted strategies\u003c/li\u003e\n\u003cli\u003e\\(\\gamma \\to 1\\): \u0026ldquo;non-discounting\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"average-return-models\"\u003eaverage return models\u003c/h4\u003e\n\u003cp\u003eWe don\u0026rsquo;t care about this as much:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lim_{n \\to \\infty} \\frac{1}{n} \\sum_{t=1}^{n}r_{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebut its close to \u003ca href=\"#infinite-horizon-models\"\u003einfinite-horizon models\u003c/a\u003e with Gama close to \\(1\\)\u003c/p\u003e\n\u003ch3 id=\"solving-an-mdp--kbhmarkov-decision-process-dot-md\"\u003eSolving an \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003e\u003c/h3\u003e\n\u003ch4 id=\"you-are-handed-or-can-predict-r--s-a--and-know-all-transitions\"\u003eYou are handed or can predict \\(R(s,a)\\), and know all transitions\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eSmall, Discrete State Space\u003c/p\u003e\n\u003cp\u003eGet an exact solution for \\(U^{*}(s)\\) (and hence \\(\\pi^{ *}(a, s)\\)) for the problem via\u0026hellip;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eLarge, Continuous State Space\u003c/p\u003e\n \u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eParameterize Policy\u003c/p\u003e\n\u003cp\u003eOptimize \\(\\pi_{\\theta}\\) to maximize \\(U(\\pi_{\\theta})\\) using \u003ca href=\"/posts/kbhpolicy_optimization/\"\u003ePolicy Optimization\u003c/a\u003e methods!\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eGradient Free\u003c/strong\u003e\u003c/strong\u003e: lower dimension \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e space\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlocal_policy_search/\"\u003eLocal Policy Search\u003c/a\u003e (aka \u003ca href=\"/posts/kbhlocal_policy_search/\"\u003eHooke-Jeeves Policy Search\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgenetic_policy_search/\"\u003eGenetic Policy Search\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcross_entropy_method/\"\u003eCross Entropy Method\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eGradient Based Method\u003c/strong\u003e\u003c/strong\u003e: higher dimension \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e space\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n \u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eParameterize Value Function\u003c/p\u003e\n\u003cp\u003eOptimize \\(U_{\\theta}(S)\\) via \u003ca href=\"/posts/kbhapproximate_value_function/#global-approximation\"\u003eglobal approximation\u003c/a\u003e or \u003ca href=\"/posts/kbhapproximate_value_function/#local-approximation\"\u003elocal approximation\u003c/a\u003e methods, then use a \u003ca href=\"/posts/kbhaction_value_function/#value-function-policy\"\u003egreedy policy\u003c/a\u003e on that nice and optimized \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"you-can-only-reason-about-your-immediate-surroundings-local-reachable-states\"\u003eYou can only reason about your immediate surroundings/local reachable states\u003c/h4\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhonline_planning/\"\u003eonline planning\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eor\u0026hellip; \u0026ldquo;you don\u0026rsquo;t know the model whatsoever\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhreinforcement_learning/\"\u003ereinforcement learning\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eduring these cases, you never argmax over all actions; hence, its important to remember the methods to preserve \u003ca href=\"/posts/kbhexploration_and_exploitation/\"\u003eExploration and Exploitation\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmarkov_decision_process/","tags":null,"title":"Markov Decision Process"},{"categories":null,"contents":"\\(A \\to B\\) has the same independence relationship as \\(B\\to A\\). How do we describe it?\nrequirements If two Baysian Networks encode the same conditional independence assumptions, they are Markov Equivalent.\nadditional information checking Markov Equivalence Two graphs are Markov Equivalent, IFF BOTH:\nsome edges without regard to direction (\u0026ldquo;same skeleton\u0026rdquo;) the same set of immoral v-structures ","html":"\u003cp\u003e\\(A \\to B\\) has the same independence relationship as \\(B\\to A\\). How do we describe it?\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eIf two \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003es encode the same \u003ca href=\"/posts/kbhbaysian_network/#conditional-independence\"\u003econditional independence\u003c/a\u003e assumptions, they are \u003ca href=\"/posts/kbhmarkov_equivalence_classes/\"\u003eMarkov Equivalent\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"checking-markov-equivalence--kbhmarkov-equivalence-classes-dot-md\"\u003echecking \u003ca href=\"/posts/kbhmarkov_equivalence_classes/\"\u003eMarkov Equivalence\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eTwo graphs are \u003ca href=\"/posts/kbhmarkov_equivalence_classes/\"\u003eMarkov Equivalent\u003c/a\u003e, \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e BOTH:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003esome edges without regard to direction (\u0026ldquo;same skeleton\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003ethe same set of \u003ca href=\"/posts/kbhimmoral_v_structure/\"\u003eimmoral v-structures\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmarkov_equivalence_classes/","tags":null,"title":"Markov Equivalence Classes"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmarkov_game/","tags":null,"title":"markov game"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmarkovian_process/","tags":null,"title":"markovian process"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmartin_luther_king/","tags":null,"title":"Martin Luther King"},{"categories":null,"contents":"DOI: 10.3389/fnagi.2021.642647\nOne-Liner Combined bag-of-words on transcript + ADR on audio to various classifiers for AD; ablated BERT\u0026rsquo;s decesion space for attention to make more easy models in the future.\nNovelty Pre-processed each of the two modalities before fusing it (late fusion) Archieved \\(93.75\\%\\) accuracy on AD detection The data being forced-aligned and fed with late fusion allows one to see what sounds/words the BERT model was focusing on by just focusing on the attention on the words Notable Methods Used classic cookie theft data bag of words to do ADR but for words multimodality but late fusion with one (hot-swappable) classifier Key Figs How they did it This is how the combined the forced aligned (:tada:) audio and transcript together.\nBertbelation Ablated BERT results.\nThe model overall tends to focus on early parts of sentences. y is attention weight, x is position in sentence, blue is TD, red is AD.\nNew Concepts Active Data Representation ","html":"\u003cp\u003eDOI: 10.3389/fnagi.2021.642647\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eCombined bag-of-words on transcript + \u003ca href=\"/posts/kbhactive_data_representation/\"\u003eADR\u003c/a\u003e on audio to various classifiers for AD; ablated BERT\u0026rsquo;s decesion space for attention to make more easy models in the future.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ePre-processed each of the two modalities before fusing it (\u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003eArchieved \\(93.75\\%\\) accuracy on AD detection\u003c/li\u003e\n\u003cli\u003eThe data being forced-aligned and fed with \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e allows one to see what sounds/words the BERT model was focusing on by just focusing on the attention on the words\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eUsed classic cookie theft data\u003c/li\u003e\n\u003cli\u003ebag of words to do \u003ca href=\"/posts/kbhactive_data_representation/\"\u003eADR\u003c/a\u003e but for words\u003c/li\u003e\n\u003cli\u003emultimodality but \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e with one (hot-swappable) classifier\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"how-they-did-it\"\u003eHow they did it\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_00-20-26_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis is how the combined the forced aligned (:tada:) audio and transcript together.\u003c/p\u003e\n\u003ch3 id=\"bertbelation\"\u003eBertbelation\u003c/h3\u003e\n\u003cp\u003eAblated BERT results.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_00-23-36_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe model overall tends to focus on early parts of sentences. y is attention weight, x is position in sentence, blue is TD, red is AD.\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhactive_data_representation/\"\u003eActive Data Representation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmartinc_2021/","tags":["ntj"],"title":"Martinc 2021"},{"categories":null,"contents":"The Martingale Model states: if we observed the closing price of the market yesterday, we expect that the market is going to open at the close price yesterday.\nFormally:\n\\begin{equation} E\\qty [X_{k}|X_{k-1}, X_{k-2},\\ldots] = X_{k-1} \\end{equation}\n\u0026ldquo;irrespective of what you know, no matter how long the history, the best expectation of today\u0026rsquo;s price is yesterday\u0026rsquo;s price.\u0026rdquo;\nThis is not a for sure! modeling statement: this is simply the expected value!! That means, after \\(\\infty\\) times of re-running the universe starting \u0026ldquo;yesterday\u0026rdquo;, the new opening price will converge to the last closing price.\nTwo important conclusions:\nIf we know the closing price yesterday (it is observed), the price today will be DETERMINED and not!!! a random variable If the closing price yesterday is a random variable, the price today will be IN-DETERMINED and also a random variable Therefore, the \u0026ldquo;randomness is fair\u0026rdquo;, and therefore the \u0026ldquo;market is not drifting in favor/against you.\u0026rdquo;\nThe Martingale Model comes from the idea that \u0026ldquo;true gambling is true equal conditions (money, opponents, bystanders, situations, die, and dice.)\u0026rdquo; Therefore, any amount of bias towards one direction/party is advantageous for that person.\nIn fact, it was theorized that an efficient market should follow exactly this behavior.\nchanges in history Of course, the difference between the expression:\n\\begin{equation} E\\qty [X_{k}|X_{k-1}, X_{k-2},\\ldots] = X_{k-1} \\end{equation}\nversus\n\\begin{equation} E\\qty [X_{k}|X_{k-1}] = X_{k-1} \\end{equation}\nis pretty big. The two will only be the same if the markets is assumed to be a markovian process.\nMartingale historical conditioning Ok, if we are told that the process is Martingale, but we only have two days ago, what do we have?\ni.e. what if we want to know:\n\\begin{equation} E\\qty [X_{k} | X_{k-2}] = ? \\end{equation}\nTurns out, there\u0026rsquo;s a small trick you can do. Without even Martingale, we can claim that:\n\\begin{equation} E\\qty [X_{k} | X_{k-2}] = \\sum_{x} E\\qty [X_{k} | X_{k-1}, X_{k-1} = x] \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) \\end{equation}\nThat, the price today is just the sum of all possible prices for day \\(k-1\\) we name small \\(x\\) times the probability \\(Pr\\) that it actually happens given the existing \\(k-2\\) observation.\nOf course, given the Martingale Model now, given some possible price in day \\(k-1\\) named \\(x\\), price in \\(k\\) is also \\(x\\). Therefore:\n\\begin{equation} E[X_{k}|X_{k-1},X_{k-1} = x] =x \\end{equation}\nApplying this, then, we have\n\\begin{equation} \\sum_{x} E\\qty [X_{k} | X_{k-1}, X_{k-1} = x] \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) = \\sum_{x} x \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) \\end{equation}\nThe right sum, then, is just the expected value of \\(X_{k-1}\\) given \\(X_{k-2}\\)!! Meaning:\n\\begin{equation} \\sum_{x} x \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) = E[X_{k-1} | X_{k-2}] \\end{equation}\nNow, we are in a Martingale Model. Therefore:\n\\begin{equation} \\sum_{x} x \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) = E[X_{k-1} | X_{k-2}] = X_{k-2} \\end{equation}\nAnd so, putting it all together, we have:\n\\begin{align} E\\qty [X_{k} | X_{k-2}] \u0026amp;= \\sum_{x} E\\qty [X_{k} | X_{k-1}, X_{k-1} = x] \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) \\\\ \u0026amp;= \\sum_{x} x \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) \\\\ \u0026amp;= E[X_{k-1} | X_{k-2}] \\\\ \u0026amp;= X_{k-2} \\end{align}\nAmazing. So Martingale holds over time\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale Model\u003c/a\u003e states: if we observed the closing price of the market yesterday, we expect that the market is going to open at the close price yesterday.\u003c/p\u003e\n\u003cp\u003eFormally:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE\\qty [X_{k}|X_{k-1}, X_{k-2},\\ldots] = X_{k-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;irrespective of what you know, no matter how long the history, the best expectation of today\u0026rsquo;s price is yesterday\u0026rsquo;s price.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eThis is not a \u003cem\u003efor sure!\u003c/em\u003e modeling statement: this is simply the expected value!! That means, after \\(\\infty\\) times of re-running the universe starting \u0026ldquo;yesterday\u0026rdquo;, the new opening price will converge to the last closing price.\u003c/p\u003e\n\u003cp\u003eTwo important conclusions:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eIf we know the closing price yesterday (it is observed), the price today will be DETERMINED and not!!! a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eIf the closing price yesterday is a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e, the price today will be IN-DETERMINED and also a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTherefore, the \u0026ldquo;randomness is fair\u0026rdquo;, and therefore the \u0026ldquo;market is not drifting in favor/against you.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale Model\u003c/a\u003e comes from the idea that \u0026ldquo;true gambling is true equal conditions (money, opponents, bystanders, situations, die, and dice.)\u0026rdquo; Therefore, any amount of bias towards one direction/party is advantageous for that person.\u003c/p\u003e\n\u003cp\u003eIn fact, it was theorized that an efficient market should follow exactly this behavior.\u003c/p\u003e\n\u003ch2 id=\"changes-in-history\"\u003echanges in history\u003c/h2\u003e\n\u003cp\u003eOf course, the difference between the expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE\\qty [X_{k}|X_{k-1}, X_{k-2},\\ldots] = X_{k-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eversus\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE\\qty [X_{k}|X_{k-1}] = X_{k-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis pretty big. The two will only be the same if the markets is assumed to be a \u003ca href=\"/posts/kbhmarkovian_process/\"\u003emarkovian process\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"martingale--kbhmartingale-model-dot-md--historical-conditioning\"\u003e\u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale\u003c/a\u003e historical conditioning\u003c/h2\u003e\n\u003cp\u003eOk, if we are told that the process is \u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale\u003c/a\u003e, but we only have two days ago, what do we have?\u003c/p\u003e\n\u003cp\u003ei.e. what if we want to know:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE\\qty [X_{k} | X_{k-2}] = ?\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTurns out, there\u0026rsquo;s a small trick you can do. Without even \u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale\u003c/a\u003e, we can claim that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE\\qty [X_{k} | X_{k-2}] = \\sum_{x} E\\qty [X_{k} | X_{k-1}, X_{k-1} = x] \\cdot Pr \\qty(X_{k-1}=x|X_{k-2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThat, the price today is just the sum of all possible prices for day \\(k-1\\) we name small \\(x\\) times the probability \\(Pr\\) that it actually happens given the existing \\(k-2\\) observation.\u003c/p\u003e\n\u003cp\u003eOf course, given the \u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale Model\u003c/a\u003e now, given some possible price in day \\(k-1\\) named \\(x\\), price in \\(k\\) is also \\(x\\). Therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE[X_{k}|X_{k-1},X_{k-1} = x] =x\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eApplying this, then, we have\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{x} E\\qty [X_{k} | X_{k-1}, X_{k-1} = x] \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) = \\sum_{x} x \\cdot Pr \\qty(X_{k-1}=x|X_{k-2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe right sum, then, is just the expected value of \\(X_{k-1}\\) given \\(X_{k-2}\\)!! Meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{x} x \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) = E[X_{k-1} | X_{k-2}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, we are in a \u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale Model\u003c/a\u003e. Therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{x} x \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) = E[X_{k-1} | X_{k-2}] = X_{k-2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd so, putting it all together, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nE\\qty [X_{k} | X_{k-2}] \u0026amp;= \\sum_{x} E\\qty [X_{k} | X_{k-1}, X_{k-1} = x] \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) \\\\\n\u0026amp;= \\sum_{x} x \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) \\\\\n\u0026amp;= E[X_{k-1} | X_{k-2}] \\\\\n\u0026amp;= X_{k-2}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAmazing. So \u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale\u003c/a\u003e holds over time\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmartingale_model/","tags":null,"title":"Martingale Model"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmath5_how/","tags":null,"title":"math5 how"},{"categories":null,"contents":"matricies are like buckets of numbers. ok, ok, seriously:\nmatricies are a way of encoding the basis of domain proof: that if Linear Maps are determined uniquely by where they map the basis anyways, why don\u0026rsquo;t we just make a mathematical object that represents that to encode the linear maps.\ndefinition Let \\(n\\), \\(m\\) be positive integer. An \\(m\\) by \\(n\\) matrix \\(A\\) is a rectangular array of elements of \\(\\mathbb{F}\\) with \\(m\\) rows and \\(n\\) columns:\n\\begin{equation} A = \\mqty(A_{1,1} \u0026amp; \\dots \u0026amp; A_{1,n} \\\\ \\vdots \u0026amp;\u0026amp; \\vdots \\\\ A_{m,1} \u0026amp; \\dots \u0026amp; A_{m,n}) \\end{equation}\nthe matrix representing a Linear Map \\(T\\) is noted as \\(\\mathcal{M}(T)\\). This maybe basis specific; see matrix of Linear Map for more.\nadditional information matrix of Linear Map This result codifies the claim that matricies represent Linear Maps by what they do to the basis of the space of concern.\nSuppose \\(T \\in \\mathcal{L}(V,W)\\), and \\(v_1, \\dots, v_{n}\\) is a basis of \\(V\\); and \\(w_1, \\dots w_{m}\\) is a basis of \\(W\\). Then, the matrix of \\(T\\) with respective to these basis is the \\(m\\) by \\(n\\) (rows by columns!) where:\n\\begin{equation} Tv_{k} = A_{1,k}w_1 + \\dots + A_{m,k}w_{m} \\end{equation}\nQuick memory of this result: inputs across columns, outputs across rows; think about how matrix is multiplied: you smash the input vector horizontally, across the top columns and down. Therefore, a matrix is written as: each columns contains the instructions of where to send each input basis, written as a linear combination down each row of that column of the output basis\nIF the basis being used in the matrix is unclear (i.e. if we had a change of basis, so didn\u0026rsquo;t use the standard basis, etc.), then the matrix of a SPECIFIC set of basis is written as: \\(\\mathcal{M}(T, (v_1, \\dots, v_n), (w_1, \\dots, w_{m}))\\).\nmatrix of a vector The matrix of a vector is just an encoding of scalars which needed to scale the basis of the space to add up to that vector.\nMore formally\u0026mdash;\nSuppose \\(v \\in V\\), and \\(v_1 \\dots v_{n}\\) is a basis of \\(V\\). The matrix representing vector \\(v\\) is the n-by-1 matrix:\n\\begin{equation} \\mathcal{M}(v) = \\mqty(c_1 \\\\ \\dots \\\\ c_{n}) \\end{equation}\nwhere \\(c_1 \\dots c_{n}\\) are the scalars such that:\n\\begin{equation} v = c_1v_1 + \\dots +c_{n}v_{n} \\end{equation}\ncolumn notation One can use a dot to index matricies\u0026rsquo; columns and rows.\nSuppose \\(A\\) is an \\(m\\) by \\(n\\) matrix.\nAT \\(1 \\leq j \\leq m\\), \\(A_{j ,.}\\) denotes the \\(1\\) by \\(n\\) matrix consisting only row \\(j\\) of \\(A\\) AT \\(1 \\leq k \\leq n\\), \\(A_{. ,k}\\) denotes the \\(m\\) by \\(k\\) matrix consisting only column \\(k\\) of \\(A\\) sums and scalar multiplication of matricies According to Jana, a third grader can add and scalar multiply matricies. So I am not going to write them here.\nHowever, what\u0026rsquo;s interesting is the fact that they actually work:\nSuppose \\(S,T \\in \\mathcal{L}(V,W)\\), then \\(\\mathcal{M}(S+T) = \\mathcal{M}(S)+\\mathcal{M}(T)\\) Suppose \\(\\lambda \\in \\mathbb{F}, T \\in \\mathcal{L}(V,W)\\), then \\(\\mathcal{M}(\\lambdaT) = \\lambda \\mathcal{M}(T)\\) The verification of this result, briefly, is that:\nRecall that matricies encode where each input basis get sent, as a linear combination of the output basis, down each column; recall that \\((S+T)v = Sv+Tv\\); now, write the sum of the matrix without performing the sum; apply the basis to the matrix; distribute the basis choordinates across the sum, seperate into two matricies. Now we have the sum of the matrix is equal to \\(Sv + Tv\\); then invoke definition of sum of Linear Map.\nscalar multiplication works in the same darn way.\nmatrix multiplication See matrix multiplication\n\\(\\mathbb{F}^{m,n}\\) For \\(m\\) and \\(n\\) positive integers, the set of all \\(m,n\\) matricies with entries in \\(\\mathbb{F}\\) is called \\(\\mathbb{F}^{m,n}\\).\nThis is a vector space! \u0026ldquo;obviously\u0026rdquo; its basis is the set of all matrix with \\(1\\) in one slot and \\(0\\) in all others. There are \\(m\\cdot n\\) of those matricies so \\(\\dim \\mathbb{F}^{m,n}=m\\cdot n\\).\ninvertability See matrix invertability\nelementary matrix elementary matricies are slight variations from the identity matrix which performs the elementary row operations:\nswap rows add a row to another scale rows determinants See determinants\nGaussian elimination See Gaussian elimination\ndiagonal matrix see diagonal matrix\nupper-triangular matricies upper-triangular matricies\nchange-of-basis To change the basis of \\(A\\) to w.r.t. \\(B\\), create a similar matrix:\n\\begin{equation} B^{-1} A B = C \\end{equation}\n\\(C\\) is \\(A\\) in terms of \\(B\\).\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e are like buckets of numbers. ok, ok, seriously:\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e are a way of encoding the \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e proof: that if \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es are determined uniquely by where they map the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e anyways, why don\u0026rsquo;t we just make a mathematical object that represents that to encode the linear maps.\u003c/p\u003e\n\u003ch2 id=\"definition\"\u003edefinition\u003c/h2\u003e\n\u003cp\u003eLet \\(n\\), \\(m\\) be positive integer. An \\(m\\) by \\(n\\) matrix \\(A\\) is a rectangular array of elements of \\(\\mathbb{F}\\) with \\(m\\) rows and \\(n\\) columns:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA = \\mqty(A_{1,1} \u0026amp; \\dots \u0026amp; A_{1,n} \\\\ \\vdots \u0026amp;\u0026amp; \\vdots \\\\ A_{m,1} \u0026amp; \\dots \u0026amp; A_{m,n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e representing a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e \\(T\\) is noted as \\(\\mathcal{M}(T)\\). This maybe \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e specific; see \u003ca href=\"#matrix-of-linear-map--kbhlinear-map-dot-md\"\u003ematrix of Linear Map\u003c/a\u003e for more.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"matrix-of-linear-map--kbhlinear-map-dot-md\"\u003ematrix of \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eThis result codifies the claim that \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e represent \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es by what they do to the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of the space of concern.\u003c/p\u003e\n\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V,W)\\), and \\(v_1, \\dots, v_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\); and \\(w_1, \\dots w_{m}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(W\\). Then, the \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e of \\(T\\) with respective to these basis is the \\(m\\) by \\(n\\) (rows by columns!) where:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv_{k} = A_{1,k}w_1 + \\dots + A_{m,k}w_{m}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eQuick memory of this result: inputs across columns, outputs across rows; think about how matrix is multiplied: you smash the input vector horizontally, across the top columns and down. Therefore, a matrix is written as: each columns contains the instructions of where to send each input \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, written as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e down each row of that column of the output \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eIF the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e being used in the \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e is unclear (i.e. if we had a change of \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, so didn\u0026rsquo;t use the standard basis, etc.), then the \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e of a \u003cem\u003eSPECIFIC\u003c/em\u003e set of \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e is written as: \\(\\mathcal{M}(T, (v_1, \\dots, v_n), (w_1, \\dots, w_{m}))\\).\u003c/p\u003e\n\u003ch3 id=\"matrix--kbhmatricies-dot-md--of-a-vector\"\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e of a vector\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"#matrix--kbhmatricies-dot-md--of-a-vector\"\u003ematrix of a vector\u003c/a\u003e is just an encoding of scalars which needed to scale the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of the space to add up to that vector.\u003c/p\u003e\n\u003cp\u003eMore formally\u0026mdash;\u003c/p\u003e\n\u003cp\u003eSuppose \\(v \\in V\\), and \\(v_1 \\dots v_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\). The \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e representing vector \\(v\\) is the n-by-1 matrix:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{M}(v) = \\mqty(c_1 \\\\ \\dots \\\\ c_{n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(c_1 \\dots c_{n}\\) are the scalars such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = c_1v_1 + \\dots +c_{n}v_{n}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"column-notation\"\u003ecolumn notation\u003c/h3\u003e\n\u003cp\u003eOne can use a dot to index \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e\u0026rsquo; columns and rows.\u003c/p\u003e\n\u003cp\u003eSuppose \\(A\\) is an \\(m\\) by \\(n\\) matrix.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eAT \\(1 \\leq j \\leq m\\), \\(A_{j ,.}\\) denotes the \\(1\\) by \\(n\\) matrix consisting only row \\(j\\) of \\(A\\)\u003c/li\u003e\n\u003cli\u003eAT \\(1 \\leq k \\leq n\\), \\(A_{. ,k}\\) denotes the \\(m\\) by \\(k\\) matrix consisting only column \\(k\\) of \\(A\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"sums-and-scalar-multiplication-of-matricies--kbhmatricies-dot-md\"\u003esums and scalar multiplication of \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eAccording to Jana, a third grader can add and scalar multiply \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e. So I am not going to write them here.\u003c/p\u003e\n\u003cp\u003eHowever, what\u0026rsquo;s interesting is the fact that they actually work:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eSuppose \\(S,T \\in \\mathcal{L}(V,W)\\), then \\(\\mathcal{M}(S+T) = \\mathcal{M}(S)+\\mathcal{M}(T)\\)\u003c/li\u003e\n\u003cli\u003eSuppose \\(\\lambda \\in \\mathbb{F}, T \\in \\mathcal{L}(V,W)\\), then \\(\\mathcal{M}(\\lambdaT) = \\lambda \\mathcal{M}(T)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe verification of this result, briefly, is that:\u003c/p\u003e\n\u003cp\u003eRecall that matricies encode where each input \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e get sent, as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of the output \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, down each column; recall that \\((S+T)v = Sv+Tv\\); now, write the sum of the matrix without performing the sum; apply the basis to the matrix; distribute the basis choordinates across the sum, seperate into two matricies. Now we have the sum of the matrix is equal to \\(Sv + Tv\\); then invoke definition of sum of \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e works in the same darn way.\u003c/p\u003e\n\u003ch3 id=\"matrix-multiplication--kbhmatrix-multiplication-dot-md\"\u003e\u003ca href=\"/posts/kbhmatrix_multiplication/\"\u003ematrix multiplication\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhmatrix_multiplication/\"\u003ematrix multiplication\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"mathbb-f-m-n\"\u003e\\(\\mathbb{F}^{m,n}\\)\u003c/h3\u003e\n\u003cp\u003eFor \\(m\\) and \\(n\\) positive integers, the set of all \\(m,n\\) matricies with entries in \\(\\mathbb{F}\\) is called \\(\\mathbb{F}^{m,n}\\).\u003c/p\u003e\n\u003cp\u003eThis is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e! \u0026ldquo;obviously\u0026rdquo; its \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e is the set of all \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e with \\(1\\) in one slot and \\(0\\) in all others. There are \\(m\\cdot n\\) of those \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e so \\(\\dim \\mathbb{F}^{m,n}=m\\cdot n\\).\u003c/p\u003e\n\u003ch3 id=\"invertability--kbhinvertability-dot-md\"\u003e\u003ca href=\"/posts/kbhinvertability/\"\u003einvertability\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhinvertability/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matrix-id-ff05739c-6e70-46ba-9d56-0958ef847f57-invertability\"\u003ematrix invertability\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"elementary-matrix\"\u003eelementary matrix\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#elementary-matrix\"\u003eelementary matricies\u003c/a\u003e are slight variations from the \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e matrix which performs the \u003ca href=\"#elementary-matrix\"\u003eelementary row operations\u003c/a\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eswap rows\u003c/li\u003e\n\u003cli\u003eadd a row to another\u003c/li\u003e\n\u003cli\u003escale rows\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"determinants--kbhdeterminants-dot-md\"\u003e\u003ca href=\"/posts/kbhdeterminants/\"\u003edeterminants\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhdeterminants/\"\u003edeterminants\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"gaussian-elimination--kbhgaussian-elimination-dot-md\"\u003e\u003ca href=\"/posts/kbhgaussian_elimination/\"\u003eGaussian elimination\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhgaussian_elimination/\"\u003eGaussian elimination\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"diagonal-matrix--kbhdiagonal-matrix-dot-md\"\u003e\u003ca href=\"/posts/kbhdiagonal_matrix/#diagonal-matrix\"\u003ediagonal matrix\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhdiagonal_matrix/#diagonal-matrix\"\u003ediagonal matrix\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"upper-triangular-matricies--kbhupper-triangular-matrix-dot-md\"\u003e\u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matricies\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matricies\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"change-of-basis\"\u003echange-of-basis\u003c/h3\u003e\n\u003cp\u003eTo change the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(A\\) to w.r.t. \\(B\\), create a \u003ca href=\"/posts/kbheigenvalue/#similar-matrices\"\u003esimilar matrix\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB^{-1} A B = C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(C\\) is \\(A\\) in terms of \\(B\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmatricies/","tags":null,"title":"matricies"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmatrix_adjectives/","tags":null,"title":"matrix adjectives"},{"categories":null,"contents":"If we have some system:\n\\begin{equation} x\u0026rsquo; = Ax \\end{equation}\nthe solution for this system should be \\(e^{At}\\). This gives rise to, given the power series:\n\\begin{equation} e^{At} = 1 + At + \\frac{1}{2} \\qty(At)^{2} + \\frac{1}{3!} (At)^{3}+ \\dots \\end{equation}\nthe derivative of which:\n\\begin{align} \\dv t e^{At} \u0026amp;= A + A^{2}t + \\frac{A^{3}t^{2}}{2} + \\dots \\\\ \u0026amp;= A\\qty(1 + At + \\frac{A^{2}t^{2}}{2}) \\end{align}\nThis intuition makes sense for all matrices \\(A\\). Meaning the general solution gives:\n\\begin{equation} x = e^{At} x_0 \\end{equation}\nSee also raising e to a matrix to see how to deal with diagonalizable matricies.\nBenefits this approach produces all solutions no matter the eigenvalues of \\(A\\). also tells you what to do if your characteristic polynomial has repeated eigenvalues this is computationally not too bad if you have.. diagonal \\(A\\) diagonalizable \\(A\\) Great Matrix Exponential Tragedy \\begin{equation} e^{A+B} \\neq e^{A} e^{B} \\end{equation}\nin general, because matricies don\u0026rsquo;t commute.\n","html":"\u003cp\u003eIf we have some system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo; = Ax\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe solution for this system should be \\(e^{At}\\). This gives rise to, given the power series:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{At} = 1 + At + \\frac{1}{2} \\qty(At)^{2} + \\frac{1}{3!} (At)^{3}+ \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe derivative of which:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dv t e^{At} \u0026amp;= A + A^{2}t + \\frac{A^{3}t^{2}}{2} + \\dots \\\\\n\u0026amp;= A\\qty(1 + At + \\frac{A^{2}t^{2}}{2})\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThis intuition makes sense for all matrices \\(A\\). Meaning the general solution gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx = e^{At} x_0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhraising_e_to_a_matrix/\"\u003eraising e to a matrix\u003c/a\u003e to see how to deal with \u003ca href=\"/posts/kbhdiagonal_matrix/#properties-of-diagonal-matrices\"\u003ediagonalizable\u003c/a\u003e matricies.\u003c/p\u003e\n\u003ch2 id=\"benefits\"\u003eBenefits\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ethis approach produces all solutions no matter the \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es of \\(A\\).\u003c/li\u003e\n\u003cli\u003ealso tells you what to do if your \u003ca href=\"/posts/kbhcharacteristic_polynomial/\"\u003echaracteristic polynomial\u003c/a\u003e has repeated eigenvalues\u003c/li\u003e\n\u003cli\u003ethis is computationally not too bad if you have..\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003ediagonal\u003c/a\u003e \\(A\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiagonal_matrix/#properties-of-diagonal-matrices\"\u003ediagonalizable\u003c/a\u003e \\(A\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"great-matrix-exponential-tragedy\"\u003eGreat Matrix Exponential Tragedy\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\ne^{A+B} \\neq e^{A} e^{B}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ein general, because \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e don\u0026rsquo;t commute.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmatrix_exponentiation/","tags":null,"title":"matrix exponentiation"},{"categories":null,"contents":"matrix multiplication is defined such that the expression \\(\\mathcal{M}(ST) = \\mathcal{M}(S)\\mathcal{M}(T)\\) holds:\n\\begin{equation} (AC)_{j,k} = \\sum_{r=1}^{n}A_{j,r}C_{r,k} \\end{equation}\nWhile matrix multiplication is distributive and associative, it is NOT!!!!!!!!!!! commutative. I hope you can see that \\(ST\\neq TS\\).\nmemorization its always row-by-column, move down rows first then columns multiply element-wise and add (row times column and add) other ways of thinking about matrix multiplication it is \u0026ldquo;row times column\u0026rdquo;: \\((AC)_{j,k} = A_{j, .} \\cdot C_{., k}\\) it is \u0026ldquo;matrix times columns\u0026rdquo;: \\((AC)_{. , k} = A C_{., k}\\) matrix as a linear combinator Suppose \\(A\\) is an \\(m\\) by \\(n\\) matrix; and \\(c = \\mqty(c_1\\\\ \\vdots\\\\ c_{0})\\) is an \\(n\\) by \\(1\\) matrix; then:\n\\begin{equation} Ac = c_1 A_{., 1} + \\dots + c_{n} A_{., n} \\end{equation}\n(i.e. you can use a vector to linearly combinate the column vectors.)\nlinear maps are like matrix multiplication \\begin{equation} \\mathcal{M}(Tv) = \\mathcal{M}(T)M(v) \\end{equation}\n\u0026ldquo;the matrix of a vector formed by applying some Linear Map \\(T\\) onto \\(v\\) is the same as the product of the matrix of \\(T\\) and the matrix of a vector of \\(v\\)\u0026rdquo;\nProof:\nLet \\(v_1 \\dots v_{n}\\) be a basis of \\(v\\).\nSo, we have that \\(Tv = c_1Tv_{1} + \\dots + c_{n}T v_{n}\\) by the additivity and homogeneity of \\(T\\).\nThen, converting it all to matricies:\n\\begin{align} \\mathcal{M}(Tv) \u0026amp;= c_1 \\mathcal{M}(Tv_{1}) + \\dots + c_{n} \\mathcal{M}(Tv_{n}) \\\\ \u0026amp;= c_1 \\mathcal{M}(T)_{.,1} + \\dots + c_{n}\\mathcal{M}(T)_{.,n} \\end{align}\nbecause the columns of a matrix represent where each basis vector gets taken in the new space.\nYou will notice now that \\(c_1 \\dots c_{n}\\) are the scalars needed to construct \\(v\\), and that \\(\\mathcal{M}(T)_{.,1} \\dots\\) are the vectors needed to construct \\(\\mathcal{M}(T)\\).\nSo:\n\\begin{equation} c_1 \\mathcal{M}(T)_{.,1} + \\dots + c_{n}\\mathcal{M}(T)_{.,n} = \\mathcal{M}(T) \\mathcal{M}(v) = \\mathcal{M}(Tv) \\end{equation}\nas desired. \\(\\blacksquare\\)\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmatrix_multiplication/\"\u003ematrix multiplication\u003c/a\u003e is defined such that the expression \\(\\mathcal{M}(ST) = \\mathcal{M}(S)\\mathcal{M}(T)\\) holds:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(AC)_{j,k} = \\sum_{r=1}^{n}A_{j,r}C_{r,k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhile matrix multiplication is \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributive\u003c/a\u003e and \u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e, it is \u003cstrong\u003e\u003cstrong\u003eNOT\u003c/strong\u003e\u003c/strong\u003e!!!!!!!!!!! \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutative\u003c/a\u003e. I hope you can see that \\(ST\\neq TS\\).\u003c/p\u003e\n\u003ch2 id=\"memorization\"\u003ememorization\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eits always row-by-column, move down rows first then columns\u003c/li\u003e\n\u003cli\u003emultiply element-wise and add (row times column and add)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"other-ways-of-thinking-about-matrix-multiplication--kbhmatrix-multiplication-dot-md\"\u003eother ways of thinking about \u003ca href=\"/posts/kbhmatrix_multiplication/\"\u003ematrix multiplication\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eit is \u0026ldquo;row times column\u0026rdquo;: \\((AC)_{j,k} = A_{j, .} \\cdot C_{., k}\\)\u003c/li\u003e\n\u003cli\u003eit is \u0026ldquo;matrix times columns\u0026rdquo;: \\((AC)_{. , k} = A C_{., k}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"matrix-as-a-linear-combinator\"\u003ematrix as a linear combinator\u003c/h2\u003e\n\u003cp\u003eSuppose \\(A\\) is an \\(m\\) by \\(n\\) matrix; and \\(c = \\mqty(c_1\\\\ \\vdots\\\\ c_{0})\\) is an \\(n\\) by \\(1\\) matrix; then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nAc = c_1 A_{., 1} + \\dots + c_{n} A_{., n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(i.e. you can use a vector to linearly combinate the column vectors.)\u003c/p\u003e\n\u003ch2 id=\"linear-maps-are-like-matrix-multiplication\"\u003elinear maps are like matrix multiplication\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{M}(Tv) = \\mathcal{M}(T)M(v)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the \u003ca href=\"/posts/kbhmatricies/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matrix-of-a-vector\"\u003ematrix of a vector\u003c/a\u003e formed by applying some \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e \\(T\\) onto \\(v\\) is the same as the product of the \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e of \\(T\\) and the \u003ca href=\"/posts/kbhmatricies/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matrix-of-a-vector\"\u003ematrix of a vector\u003c/a\u003e of \\(v\\)\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eLet \\(v_1 \\dots v_{n}\\) be a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(v\\).\u003c/p\u003e\n\u003cp\u003eSo, we have that \\(Tv = c_1Tv_{1} + \\dots + c_{n}T v_{n}\\) by the additivity and \u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e of \\(T\\).\u003c/p\u003e\n\u003cp\u003eThen, converting it all to \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\mathcal{M}(Tv) \u0026amp;= c_1 \\mathcal{M}(Tv_{1}) + \\dots + c_{n} \\mathcal{M}(Tv_{n}) \\\\\n\u0026amp;= c_1 \\mathcal{M}(T)_{.,1} + \\dots + c_{n}\\mathcal{M}(T)_{.,n}\n\\end{align}\u003c/p\u003e\n\u003cp\u003ebecause the columns of a \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e represent where each \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e gets taken in the new space.\u003c/p\u003e\n\u003cp\u003eYou will notice now that \\(c_1 \\dots c_{n}\\) are the scalars needed to construct \\(v\\), and that \\(\\mathcal{M}(T)_{.,1} \\dots\\) are the \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es needed to construct \\(\\mathcal{M}(T)\\).\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_1 \\mathcal{M}(T)_{.,1} + \\dots + c_{n}\\mathcal{M}(T)_{.,n} = \\mathcal{M}(T) \\mathcal{M}(v) = \\mathcal{M}(Tv)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas desired. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmatrix_multiplication/","tags":null,"title":"matrix multiplication"},{"categories":null,"contents":"a maximal interval is the largest interval you can fit while the function is finite while the function is finite.\n","html":"\u003cp\u003ea \u003ca href=\"/posts/kbhmaximal_interval/\"\u003emaximal interval\u003c/a\u003e is the largest interval you can fit while the function is finite while the function is finite.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmaximal_interval/","tags":null,"title":"maximal interval"},{"categories":null,"contents":"maximum a posteriori estimate is a parameter learning scheme that uses Beta Distribution and Baysian inference to get a distribution of the posterior of the parameter, and return the argmax (i.e. the mode) of the MAP.\nCalculating a MAP posterior, in general:\n\\begin{equation} \\theta_{MAP} = \\arg\\max_{\\theta} P(\\theta|x_1, \\dots, x_{n}) = \\arg\\max_{\\theta} \\frac{f(x_1, \\dots, x_{n} | \\theta) g(\\theta)}{h(x_1, \\dots, x_{n})} \\end{equation}\nWe assume that the data points are IID, and the fact that the bottom of this is constant, we have:\n\\begin{equation} \\theta_{MAP} = \\arg\\max_{\\theta} g(\\theta) \\prod_{i=1}^{n} f(x_{i}|\\theta) \\end{equation}\nUsually, we\u0026rsquo;d like to argmax the log:\n\\begin{equation} \\theta_{MAP} = \\arg\\max_{\\theta} \\qty(\\log (g(\\theta)) + \\sum_{i=1}^{n} \\log(f(x_{i}|\\theta)) ) \\end{equation}\nwhere, \\(g\\) is the probability density of \\(\\theta\\) happening given the prior belief, and \\(f\\) is the likelyhood of \\(x_{i}\\) given parameter \\(\\theta\\).\nYou will note this is just Maximum Likelihood Parameter Learning function, plus the log-probability of the parameter prior.\nMAP for Bernoulli and Binomial \\(p\\) To estimate \\(p\\), we use the Beta Distribution:\nThe MODE of the beta, which is the MAP of such a result:\n\\begin{equation} \\frac{\\alpha -1 }{\\alpha + \\beta -2} \\end{equation}\nnow, for a Laplace posterior \\(Beta(2,2)\\), we have:\n\\begin{equation} \\frac{n+1}{m+n+2} \\end{equation}\nMAP for Poisson and Exponential \\(\\lambda\\) We use the gamma distribution as our prior\n\\begin{equation} \\Lambda \\sim Gamma(\\alpha, \\beta) \\end{equation}\nwhere \\(\\alpha-1\\) is the prior event count, and \\(\\beta\\) is the prior time periods.\nLet\u0026rsquo;s say you have some data points \\(x_1, \u0026hellip;x_{k}\\), the posterior from from those resulting events:\n\\begin{equation} Gamma(\\alpha + n, \\beta+k) \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmaximum_a_posteriori_estimate/\"\u003emaximum a posteriori estimate\u003c/a\u003e is a \u003ca href=\"/posts/kbhparameter_learning/\"\u003eparameter learning\u003c/a\u003e scheme that uses \u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e and \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian inference\u003c/a\u003e to get a distribution of the posterior of the parameter, and return the \u003ca href=\"/posts/kbhargmax/\"\u003eargmax\u003c/a\u003e (i.e. the mode) of the \u003ca href=\"/posts/kbhmaximum_a_posteriori_estimate/\"\u003eMAP\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eCalculating a \u003ca href=\"/posts/kbhmaximum_a_posteriori_estimate/\"\u003eMAP\u003c/a\u003e posterior, in general:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{MAP} = \\arg\\max_{\\theta} P(\\theta|x_1, \\dots, x_{n}) = \\arg\\max_{\\theta} \\frac{f(x_1, \\dots, x_{n} | \\theta) g(\\theta)}{h(x_1, \\dots, x_{n})}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe assume that the data points are \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e, and the fact that the bottom of this is constant, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{MAP} = \\arg\\max_{\\theta} g(\\theta) \\prod_{i=1}^{n} f(x_{i}|\\theta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eUsually, we\u0026rsquo;d like to argmax the log:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{MAP} = \\arg\\max_{\\theta} \\qty(\\log (g(\\theta)) + \\sum_{i=1}^{n} \\log(f(x_{i}|\\theta)) )\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(g\\) is the probability density of \\(\\theta\\) happening given the \u003cstrong\u003e\u003cstrong\u003eprior\u003c/strong\u003e\u003c/strong\u003e belief, and \\(f\\) is the \u003ca href=\"/posts/kbhlikelyhood/\"\u003elikelyhood\u003c/a\u003e of \\(x_{i}\\) given parameter \\(\\theta\\).\u003c/p\u003e\n\u003cp\u003eYou will note this is just \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e function, plus the log-probability of the parameter prior.\u003c/p\u003e\n\u003ch2 id=\"map-for-bernoulli-and-binomial-p\"\u003eMAP for Bernoulli and Binomial \\(p\\)\u003c/h2\u003e\n\u003cp\u003eTo estimate \\(p\\), we use the \u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003eThe MODE of the beta, which is the \u003ca href=\"/posts/kbhmaximum_a_posteriori_estimate/\"\u003eMAP\u003c/a\u003e of such a result:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\alpha -1 }{\\alpha + \\beta -2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enow, for a Laplace posterior \\(Beta(2,2)\\), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{n+1}{m+n+2}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"map--kbhmaximum-a-posteriori-estimate-dot-md--for-poisson-and-exponential-lambda\"\u003e\u003ca href=\"/posts/kbhmaximum_a_posteriori_estimate/\"\u003eMAP\u003c/a\u003e for Poisson and Exponential \\(\\lambda\\)\u003c/h2\u003e\n\u003cp\u003eWe use the \u003ca href=\"#map--kbhmaximum-a-posteriori-estimate-dot-md--for-poisson-and-exponential-lambda\"\u003egamma distribution\u003c/a\u003e as our prior\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Lambda \\sim Gamma(\\alpha, \\beta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\alpha-1\\) is the prior event count, and \\(\\beta\\) is the prior time periods.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-15_16-16-40_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003chr\u003e\n\u003cp\u003eLet\u0026rsquo;s say you have some data points \\(x_1, \u0026hellip;x_{k}\\), the posterior from from those resulting events:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nGamma(\\alpha + n, \\beta+k)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmaximum_a_posteriori_estimate/","tags":null,"title":"maximum a posteriori estimate"},{"categories":null,"contents":"\u0026ldquo;We find the parameter that maximizes the likelihood.\u0026rdquo;\nfor each \\(X_{j}\\), sum what\u0026rsquo;s the log-likelihood of one \\(X_{i}\\) take derivative w.r.t. \\(\\theta\\) and set to \\(0\\) solve for \\(\\theta\\) (this maximizes the log-likelihood of the data!)\nthat is:\n\\begin{equation} \\theta_{MLE} = \\arg\\max_{\\theta} P(x_1, \\dots, x_{n}|\\theta) = \\arg\\max_{\\theta} \\qty(\\sum_{i=1}^{n} \\log(f(x_{i}|\\theta)) ) \\end{equation}\nIf your \\(\\theta\\) is a vector of more than \\(1\\) thing, take the gradient (i.e. partial derivative against each of your variables) of the thing and solve the place where the gradient is identically \\(0\\) (each slot is \\(0\\)). That is, we want:\n\\begin{equation} \\mqty[\\pdv{LL(\\theta)}{\\theta_{1}} \\\\ \\pdv{LL(\\theta)}{\\theta_{2}} \\\\ \\pdv{LL(\\theta)}{\\theta_{3}} \\\\ \\dots] = \\mqty[0 \\\\ 0 \\\\0] \\end{equation}\nMLE for poisson distribution MLE for Bernouli MLE is REALLY bad at generalizing to unseen data. Hence why MLE is good for big data where your MLE slowly converge to best parameters for your actual dataset.\nWe desire \\(\\theta\\) parameter from some data \\(D\\). To do this, we simply optimize:\n\\begin{equation} \\hat{\\theta} = \\arg\\max_{\\theta}P(D|\\theta) \\end{equation}\n, where:\n\\begin{equation} P(D|\\theta) = \\prod_{i} P(o_{i}| \\theta) \\end{equation}\nfor each \\(o_{i} \\in D\\). and \\(P\\) is the likelyhood: PMF or PDF given what you are working with.\nThat is, we want the parameter \\(\\theta\\) which maximizes the likelyhood of the data. This only works, of course, if each \\(o_{i} \\in D\\) is independent from each other, which we can assume so by calling the samples from data IID (because they are independent draws from the underlying distribution.)\nlog-likelihood The summation above is a little unwieldy, so we take the logs and apply log laws to turn the multiplication into a summation:\n\\begin{equation} \\hat{\\theta} = \\arg\\max_{\\theta} \\sum_{i} \\log P(o_{i}|\\theta) \\end{equation}\n\u0026ldquo;add the log probabilities of each of the outcomes you observed happening according to your unoptimized theta, and maximize it\u0026rdquo;\nargmax of log This holds because log is monotonic (\u0026ldquo;any larger input to a log will lead to a larger value\u0026rdquo;):\n\\begin{equation} \\arg\\max_{x} f(x) = \\arg\\max_{x} \\log f(x) \\end{equation}\nMLE, in general \\begin{equation} \\theta_{MLE} = \\arg\\max_{\\theta} \\qty(\\sum_{i=1}^{n} \\log(f(x_{i}|\\theta)) ) \\end{equation}\nExample Say we want to train a model to predict whether or not a plane will crash. Suppose our network is very simple:\n\\(\\theta\\) represents if there will be an midair collision. Therefore, we have two disconnected nodes:\n\\begin{equation} P(crash) = \\theta \\end{equation}\n\\begin{equation} P(safe) = 1-\\theta \\end{equation}\nNow, suppose we observed that there was \\(m\\) flights and \\(n\\) midair collisions between them. We can then write then:\n\\begin{equation} P(D|\\theta) = \\theta^{n}(1-\\theta)^{m-n} \\end{equation}\nbecause \\(\\theta^{n}(1-\\theta)^{m-n}\\) is the total probability of the data you are given occurring (\\(n\\) crashes, \\(m-n\\) non crashing flights).\nNow, we seek to maximise this value\u0026mdash;because the probability of \\(P(D)\\) occurring should be \\(1\\) because \\(D\\) actually occured.\nIts mostly algebra at this point:\nSteps:\nwe first compute the probability of each of the sample happening according to old \\(\\theta\\) to get \\(P(D|\\theta)\\) we then take the log of it to make it a summation we then try to maximize \\(\\theta\\) to What this tells us is\u0026hellip;\nGeneric Maximum Likelihood Estimate Overall, its kind of unsurprising from the Frequentist Definition of Probability, but:\n\\begin{equation} \\hat{\\theta}_{i} = \\frac{n_{i}}{\\sum_{j=1}^{k} n_{j}} \\end{equation}\nfor some observations \\(n_{1:k}\\).\nand:\n\\begin{equation} \\sigma^{2} = \\frac{\\sum_{}^{} (o_{i} - \\hat{u})^{2}}{m} \\end{equation}\nProblems with Maximum Likelihood Parameter Learning This requires a lot of data to make work: for instance\u0026mdash;if we don\u0026rsquo;t have any plane crashes observed in \\(n\\) files, this scheme would say there\u0026rsquo;s no chance of plane crashes. This is not explicitly true.\nTherefore, we use Baysian Parameter Learning.\n","html":"\u003cp\u003e\u0026ldquo;We find the \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003e that maximizes the likelihood.\u0026rdquo;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003efor each \\(X_{j}\\), sum\n\u003col\u003e\n\u003cli\u003ewhat\u0026rsquo;s the \u003ca href=\"#log-likelihood\"\u003elog-likelihood\u003c/a\u003e of one \\(X_{i}\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003etake derivative w.r.t. \\(\\theta\\) and set to \\(0\\)\u003c/li\u003e\n\u003cli\u003esolve for \\(\\theta\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e(this maximizes the \u003ca href=\"#log-likelihood\"\u003elog-likelihood\u003c/a\u003e of the data!)\u003c/p\u003e\n\u003cp\u003ethat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{MLE} = \\arg\\max_{\\theta} P(x_1, \\dots, x_{n}|\\theta) = \\arg\\max_{\\theta} \\qty(\\sum_{i=1}^{n} \\log(f(x_{i}|\\theta)) )\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eIf your \\(\\theta\\) is a vector of more than \\(1\\) thing, take the gradient (i.e. partial derivative against each of your variables) of the thing and solve the place where the gradient is identically \\(0\\) (each slot is \\(0\\)). That is, we want:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty[\\pdv{LL(\\theta)}{\\theta_{1}} \\\\ \\pdv{LL(\\theta)}{\\theta_{2}} \\\\ \\pdv{LL(\\theta)}{\\theta_{3}} \\\\ \\dots] = \\mqty[0 \\\\ 0 \\\\0]\n\\end{equation}\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_of_k_in_x_time/#mle-for\"\u003eMLE for poisson distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbernoulli_random_variable/#mle-for-bernouli\"\u003eMLE for Bernouli\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eMLE is REALLY bad at generalizing to unseen data. Hence why MLE is good for big data where your MLE slowly converge to best parameters for your actual dataset.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eWe desire \\(\\theta\\) parameter from some data \\(D\\). To do this, we simply optimize:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{\\theta} = \\arg\\max_{\\theta}P(D|\\theta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e, where:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(D|\\theta) = \\prod_{i} P(o_{i}| \\theta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor each \\(o_{i} \\in D\\). and \\(P\\) is the \u003ca href=\"/posts/kbhlikelyhood/\"\u003elikelyhood\u003c/a\u003e: \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003e or \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003e given what you are working with.\u003c/p\u003e\n\u003cp\u003eThat is, we want the parameter \\(\\theta\\) which maximizes the likelyhood of the data. This only works, of course, if each \\(o_{i} \\in D\\) is \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e from each other, which we can assume so by calling the samples from data \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e (because they are independent draws from the underlying distribution.)\u003c/p\u003e\n\u003ch2 id=\"log-likelihood\"\u003elog-likelihood\u003c/h2\u003e\n\u003cp\u003eThe summation above is a little unwieldy, so we take the logs and apply log laws to turn the multiplication into a summation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{\\theta} = \\arg\\max_{\\theta} \\sum_{i} \\log P(o_{i}|\\theta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;add the log probabilities of each of the outcomes you observed happening according to your unoptimized theta, and maximize it\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"argmax-of-log\"\u003eargmax of log\u003c/h3\u003e\n\u003cp\u003eThis holds because \u003ca href=\"/posts/kbhlog_laws/\"\u003elog\u003c/a\u003e is monotonic (\u0026ldquo;any larger input to a log will lead to a larger value\u0026rdquo;):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\arg\\max_{x} f(x) = \\arg\\max_{x} \\log f(x)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"mle-in-general\"\u003eMLE, in general\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{MLE} = \\arg\\max_{\\theta} \\qty(\\sum_{i=1}^{n} \\log(f(x_{i}|\\theta)) )\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"example\"\u003eExample\u003c/h2\u003e\n\u003cp\u003eSay we want to train a model to predict whether or not a plane will crash. Suppose our network is very simple:\u003c/p\u003e\n\u003cp\u003e\\(\\theta\\) represents if there will be an midair collision. Therefore, we have two disconnected nodes:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(crash) = \\theta\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(safe) = 1-\\theta\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, suppose we observed that there was \\(m\\) flights and \\(n\\) midair collisions between them. We can then write then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(D|\\theta) = \\theta^{n}(1-\\theta)^{m-n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause \\(\\theta^{n}(1-\\theta)^{m-n}\\) is the total probability of the data you are given occurring (\\(n\\) crashes, \\(m-n\\) non crashing flights).\u003c/p\u003e\n\u003cp\u003eNow, we seek to maximise this value\u0026mdash;because the probability of \\(P(D)\\) occurring should be \\(1\\) because \\(D\\) actually occured.\u003c/p\u003e\n\u003cp\u003eIts mostly algebra at this point:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-05_10-07-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSteps:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewe first compute the probability of each of the sample happening according to old \\(\\theta\\) to get \\(P(D|\\theta)\\)\u003c/li\u003e\n\u003cli\u003ewe then take the log of it to make it a summation\u003c/li\u003e\n\u003cli\u003ewe then try to maximize \\(\\theta\\) to\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWhat this tells us is\u0026hellip;\u003c/p\u003e\n\u003ch2 id=\"generic-maximum-likelihood-estimate\"\u003eGeneric Maximum Likelihood Estimate\u003c/h2\u003e\n\u003cp\u003eOverall, its kind of unsurprising from the \u003ca href=\"/posts/kbhprobability/#frequentist-definition-of-probability\"\u003eFrequentist Definition of Probability\u003c/a\u003e, but:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{\\theta}_{i} = \\frac{n_{i}}{\\sum_{j=1}^{k} n_{j}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some observations \\(n_{1:k}\\).\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sigma^{2} = \\frac{\\sum_{}^{} (o_{i} - \\hat{u})^{2}}{m}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"problems-with-maximum-likelihood-parameter-learning--kbhmaximum-likelihood-parameter-learning-dot-md\"\u003eProblems with \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eThis requires a lot of data to make work: for instance\u0026mdash;if we don\u0026rsquo;t have any plane crashes observed in \\(n\\) files, this scheme would say there\u0026rsquo;s no chance of plane crashes. This is not explicitly true.\u003c/p\u003e\n\u003cp\u003eTherefore, we use \u003ca href=\"/posts/kbhbaysian_parameter_learning/\"\u003eBaysian Parameter Learning\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmaximum_likelihood_parameter_learning/","tags":null,"title":"Maximum Likelihood Parameter Learning"},{"categories":null,"contents":"Two Abstractions \u0026ldquo;temporal abstractions\u0026rdquo;: making decisions without consideration / abstracting away time (MDP) \u0026ldquo;state abstractions\u0026rdquo;: making decisions about groups of states at once Graph MaxQ formulates a policy as a graph, which formulates a set of \\(n\\) policies\nMax Node This is a \u0026ldquo;policy node\u0026rdquo;, connected to a series of \\(Q\\) nodes from which it takes the max and propegate down. If we are at a leaf max-node, the actual action is taken and control is passed back t to the top of the graph\nQ Node each node computes \\(Q(S,A)\\) for a value at that action\nHierachical Value Function \\begin{equation} Q(s,a) = V_{a}(s) + C_{i}(s,a) \\end{equation}\nthe value function of the root node is the value obtained over all nodes in the graph\nwhere:\n\\begin{equation} C_{i}(s,a) = \\sum_{s\u0026rsquo;}^{} P(s\u0026rsquo;|s,a) V(s\u0026rsquo;) \\end{equation}\nLearning MaxQ maintain two tables \\(C_{i}\\) and \\(\\tilde{C}_{i}(s,a)\\) (which is a special completion function which corresponds to a special reward \\(\\tilde{R}\\) which prevents the model from doing egregious ending actions) choose \\(a\\) according to exploration strategy execute \\(a\\), observe \\(s\u0026rsquo;\\), and compute \\(R(s\u0026rsquo;|s,a)\\) Then, update:\n","html":"\u003ch2 id=\"two-abstractions\"\u003eTwo Abstractions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;temporal abstractions\u0026rdquo;: making decisions without consideration / abstracting away time (\u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;state abstractions\u0026rdquo;: making decisions about groups of states at once\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"graph\"\u003eGraph\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhmaxq/\"\u003eMaxQ\u003c/a\u003e formulates a policy as a graph, which formulates a set of \\(n\\) policies\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-13_09-50-20_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"max-node\"\u003eMax Node\u003c/h3\u003e\n\u003cp\u003eThis is a \u0026ldquo;policy node\u0026rdquo;, connected to a series of \\(Q\\) nodes from which it takes the max and propegate down. If we are at a leaf max-node, the actual action is taken and control is passed back t to the top of the graph\u003c/p\u003e\n\u003ch3 id=\"q-node\"\u003eQ Node\u003c/h3\u003e\n\u003cp\u003eeach node computes \\(Q(S,A)\\) for a value at that action\u003c/p\u003e\n\u003ch2 id=\"hierachical-value-function\"\u003eHierachical Value Function\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-13_09-51-27_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\\begin{equation}\nQ(s,a) = V_{a}(s) + C_{i}(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe value function of the root node is the value obtained over all nodes in the graph\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC_{i}(s,a) = \\sum_{s\u0026rsquo;}^{} P(s\u0026rsquo;|s,a) V(s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"learning-maxq\"\u003eLearning MaxQ\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003emaintain two tables \\(C_{i}\\) and \\(\\tilde{C}_{i}(s,a)\\) (which is a special completion function which corresponds to a special reward \\(\\tilde{R}\\) which prevents the model from doing egregious ending actions)\u003c/li\u003e\n\u003cli\u003echoose \\(a\\) according to exploration strategy\u003c/li\u003e\n\u003cli\u003eexecute \\(a\\), observe \\(s\u0026rsquo;\\), and compute \\(R(s\u0026rsquo;|s,a)\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThen, update:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-13_09-54-38_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhmaxq/","tags":null,"title":"MaxQ"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmbp/","tags":null,"title":"MBP"},{"categories":null,"contents":"MCVI solves POMDPs with continuous state space, but with discrete observation and action spaces. It does this by formulating a POMDP as a graph.\nFast algorithms require discretized state spaces, which makes the problem much more difficult to model. MCVI makes continuous representations possible for complex domains.\nMC Backup Normal POMDP Bellman Backup isn\u0026rsquo;t going to work well with continuous state spaces.\nTherefore, we reformulate our value backup as:\n\\begin{equation} V_{t+1}(b) = \\max_{a \\in A} \\qty(\\int_{s} R(s,a)b(s) \\dd{s}) + \\gamma \\sum_{o \\in O}^{} p(o|b,a) V_{t}(update(b,a,o)) \\end{equation}\nwhereby, a continuous belief update:\n\\begin{equation} update(b,a,o) = \\kappa O(o|s\u0026rsquo;,a) \\int_{s \\in S} T(s\u0026rsquo;|s,a) b(s) \\dd{s} \\end{equation}\nwhere \\(\\kappa\\) is a normalisation constant to keep the new belief a probability.\nBut! Instead of actually taking the integral, we simulate a series of trajectories and sum the toal reward\nMC-Backup at Graph We construct at graph by sticking each best action determined by rolling out \\(L\\) steps and computing the reward.\nCollecting the values given each observation, we create a new node for the best action; the best action per observation is connected as well.\nThis creates a new optimal policy graph from the rollouts.\nMCVI initial each reward at action to \\(0\\) for each observation, initialize each observation, node as \\(0\\) Take monte-carlo samples across the actions and states to take integrals to obtain: \\(HV_{g}(b) = \\max_{a \\in A} \\qty(\\int_{s \\in S} R(s,a)b(s) \\dd{s} + \\sum_{o}^{} ???)\\) each future observation is sampled using monte-carlo simulation each backup, you pick one new node to add.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmcvi/\"\u003eMCVI\u003c/a\u003e solves \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es with continuous state space, but with discrete observation and action spaces. It does this by formulating a POMDP as a graph.\u003c/p\u003e\n\u003cp\u003eFast algorithms require discretized state spaces, which makes the problem much more difficult to model. MCVI makes continuous representations possible for complex domains.\u003c/p\u003e\n\u003ch2 id=\"mc-backup\"\u003eMC Backup\u003c/h2\u003e\n\u003cp\u003eNormal \u003ca href=\"/posts/kbhvalue_iteration/#pomdp-bellman-update\"\u003ePOMDP Bellman Backup\u003c/a\u003e isn\u0026rsquo;t going to work well with continuous state spaces.\u003c/p\u003e\n\u003cp\u003eTherefore, we reformulate our value backup as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV_{t+1}(b) = \\max_{a \\in A} \\qty(\\int_{s} R(s,a)b(s) \\dd{s}) + \\gamma \\sum_{o \\in O}^{} p(o|b,a) V_{t}(update(b,a,o))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby, a continuous belief update:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nupdate(b,a,o) = \\kappa O(o|s\u0026rsquo;,a) \\int_{s \\in S} T(s\u0026rsquo;|s,a) b(s) \\dd{s}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\kappa\\) is a normalisation constant to keep the new belief a probability.\u003c/p\u003e\n\u003cp\u003eBut! Instead of actually taking the integral, we simulate a series of trajectories and sum the toal reward\u003c/p\u003e\n\u003ch2 id=\"mc-backup-at-graph\"\u003eMC-Backup at Graph\u003c/h2\u003e\n\u003cp\u003eWe construct at graph by sticking each best action determined by rolling out \\(L\\) steps and computing the reward.\u003c/p\u003e\n\u003cp\u003eCollecting the values given each observation, we create a new node for the best action; the best action per observation is connected as well.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-30_20-02-16_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis creates a new optimal policy graph from the rollouts.\u003c/p\u003e\n\u003ch2 id=\"mcvi\"\u003eMCVI\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003einitial each reward at action to \\(0\\)\u003c/li\u003e\n\u003cli\u003efor each observation, initialize each observation, node as \\(0\\)\u003c/li\u003e\n\u003cli\u003eTake monte-carlo samples across the actions and states to take integrals to obtain:\n\u003cul\u003e\n\u003cli\u003e\\(HV_{g}(b) = \\max_{a \\in A} \\qty(\\int_{s \\in S} R(s,a)b(s) \\dd{s} + \\sum_{o}^{} ???)\\)\u003c/li\u003e\n\u003cli\u003eeach future observation is sampled using monte-carlo simulation\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eeach backup, you pick one new node to add.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmcvi/","tags":null,"title":"MCVI"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmeal_replacement/","tags":null,"title":"meal replacement"},{"categories":null,"contents":" at each point a relevant result is returned, calculate precision and then average that and then average the precision over all queries precision \\begin{equation} \\frac{tp}{tp + fp} \\end{equation}\nrecall \\begin{equation} \\frac{tp}{tp+fn} \\end{equation}\naccuracy \\begin{equation} \\frac{tp + tn}{tp+tn+fp+fn} \\end{equation}\nf1 \\begin{equation} F_1 = \\frac{2 (P\\cdot R)}{P+R} \\end{equation}\n","html":"\u003cul\u003e\n\u003cli\u003eat each point a relevant result is returned, calculate precision\u003c/li\u003e\n\u003cli\u003eand then average that\u003c/li\u003e\n\u003cli\u003eand then average the precision over all queries\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"precision\"\u003eprecision\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{tp}{tp + fp}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"recall\"\u003erecall\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{tp}{tp+fn}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"accuracy\"\u003eaccuracy\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{tp + tn}{tp+tn+fp+fn}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"f1\"\u003ef1\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nF_1 = \\frac{2 (P\\cdot R)}{P+R}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmean_average_precision/","tags":null,"title":"mean average precision"},{"categories":null,"contents":"One possible approach for using homomorphic encryption, developed specifically for imaging data.\nextract relevant features locally resulting data is encrypted using FHE train model remotely using FHE encrypted data send model back, and data owners decrypt the inference results locally ","html":"\u003cp\u003eOne possible approach for using homomorphic encryption, developed specifically for imaging data.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eextract relevant features locally\u003c/li\u003e\n\u003cli\u003eresulting data is encrypted using FHE\u003c/li\u003e\n\u003cli\u003etrain model remotely using FHE encrypted data\u003c/li\u003e\n\u003cli\u003esend model back, and data owners decrypt the inference results locally\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmedblindtuner/","tags":null,"title":"MedBlindTuner"},{"categories":null,"contents":"RAG for generic risk conversations.\ntag transcripts with the relevant themes a la action research use llama2 to embed the given information then send the overall information to a larger language model People generally preferred grounded GPT responses over human responses. Sometimes, in 2 of the features, humans preferred the non grounded responses.\n","html":"\u003cp\u003e\u003cstrong\u003eRAG\u003c/strong\u003e for generic risk conversations.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003etag transcripts with the relevant themes a la action research\u003c/li\u003e\n\u003cli\u003euse llama2 to embed the given information\u003c/li\u003e\n\u003cli\u003ethen send the overall information to a larger language model\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ePeople generally preferred grounded GPT responses over human responses. Sometimes, in 2 of the features, humans preferred the \u003cstrong\u003enon grounded\u003c/strong\u003e responses.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmedical_dialogue_generation/","tags":null,"title":"Medical Dialogue Generation"},{"categories":null,"contents":"SnowMed CT: large medical ontology\ncreated pairwise distance matrix on SnowMed CT created weighted graphs using the SnowMed information Node2Vec! Suddenly you have an embedding for each disease. Two Tasks:\nPatient Disease Embeddings Using the node2vec with snowmed\nSimilar Patient Retrial Reveal hidden co-morbidities via Jaccard Coefficient\n","html":"\u003cp\u003eSnowMed CT: large medical ontology\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ecreated pairwise distance matrix on SnowMed CT\u003c/li\u003e\n\u003cli\u003ecreated weighted graphs using the SnowMed information\u003c/li\u003e\n\u003cli\u003eNode2Vec! Suddenly you have an embedding for each disease.\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTwo Tasks:\u003c/p\u003e\n\u003ch2 id=\"patient-disease-embeddings\"\u003ePatient Disease Embeddings\u003c/h2\u003e\n\u003cp\u003eUsing the node2vec with snowmed\u003c/p\u003e\n\u003ch2 id=\"similar-patient-retrial\"\u003eSimilar Patient Retrial\u003c/h2\u003e\n\u003cp\u003eReveal hidden co-morbidities via \u003ca href=\"/posts/kbhranked_information_retrieval/#jaccard-coefficient\"\u003eJaccard Coefficient\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmedical_knowledge_extraction/","tags":null,"title":"Medical Knowledge Extraction"},{"categories":null,"contents":"DOI: 10.3389/fcomp.2021.624558\nOne-Liner analyzed spontaneous speech transcripts (only!) from TD and AD patients with fastText and CNN; best was \\(83.33\\%\\) acc.\nNovelty threw the NLP kitchen sink to transcripts fastText CNN (with vary n-gram kernel 2,3,4,5 sizes) Notable Methods embeddings seaded by GloVe fastText are much faster, but CNN won out Key Figs the qual results PAR (participant), INV (investigator)\nNotes Hey look a review of the field:\n","html":"\u003cp\u003eDOI: 10.3389/fcomp.2021.624558\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eanalyzed spontaneous speech transcripts (only!) from TD and AD patients with fastText and CNN; best was \\(83.33\\%\\) acc.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ethrew the NLP kitchen sink to transcripts\n\u003cul\u003e\n\u003cli\u003efastText\u003c/li\u003e\n\u003cli\u003eCNN (with vary n-gram kernel 2,3,4,5 sizes)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eembeddings seaded by GloVe\u003c/li\u003e\n\u003cli\u003efastText are much faster, but CNN won out\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"the-qual-results\"\u003ethe qual results\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_00-33-37_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003ePAR\u003c/strong\u003e\u003c/strong\u003e (participant), \u003cstrong\u003e\u003cstrong\u003eINV\u003c/strong\u003e\u003c/strong\u003e (investigator)\u003c/p\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n\u003cp\u003eHey look a review of the field:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_00-32-54_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhmeghanani_2021/","tags":["ntj"],"title":"Meghanani 2021"},{"categories":null,"contents":"Human do not perceive frequency very well. The Mel scale is a scale from Hertz to what\u0026rsquo;s better perceived.\n","html":"\u003cp\u003eHuman do not perceive frequency very well. The Mel scale is a scale from Hertz to what\u0026rsquo;s better perceived.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmel_scale/","tags":null,"title":"Mel Scale"},{"categories":null,"contents":"memory is an array of bytes\neach byte has a unique index which is written in hexadecimal a pointer stores addresses to memory ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmemory/\"\u003ememory\u003c/a\u003e is an array of \u003ca href=\"/posts/kbhbinary_number_system/#byte\"\u003ebyte\u003c/a\u003es\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eeach \u003ca href=\"/posts/kbhbinary_number_system/#byte\"\u003ebyte\u003c/a\u003e has a unique index which is written in hexadecimal\u003c/li\u003e\n\u003cli\u003ea \u003ca href=\"/posts/kbhpointer/\"\u003epointer\u003c/a\u003e stores addresses to \u003ca href=\"/posts/kbhmemory/\"\u003ememory\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmemory/","tags":null,"title":"memory"},{"categories":null,"contents":"Utilization vs throughput is conflicting goals.\nBig Picture OS:\ncreates new process sets up address space/segments read the executable, load instructions, global data libraries gets loaded Complier:\nset up stack Heap Allocator: \u0026ldquo;Sandbox of bytes\u0026rdquo;\ninitialize the heap heap allocation: client void *malloc(size_t size); Returns a pointer to a block of heap memory of at least size bytes, or NULL if an error occurred.\nvoid free(void *ptr); Frees the heap-allocated block starting at the specific address.\nvoid *realloc(void *ptr, size_t size); Changing the size of a pointer and realloc if needed\nSee Heap allocator\n","html":"\u003cp\u003eUtilization vs throughput is conflicting goals.\u003c/p\u003e\n\u003ch2 id=\"big-picture\"\u003eBig Picture\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-13_10-56-33_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eOS:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ecreates new process\u003c/li\u003e\n\u003cli\u003esets up address space/segments\u003c/li\u003e\n\u003cli\u003eread the executable, load instructions, global data\u003c/li\u003e\n\u003cli\u003elibraries gets loaded\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eComplier:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eset up stack\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eHeap Allocator: \u0026ldquo;Sandbox of bytes\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003einitialize the heap\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"heap-allocation-client\"\u003eheap allocation: client\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-c\" data-lang=\"c\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003emalloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eReturns a pointer to a block of heap memory of at least size \u003ccode\u003ebytes\u003c/code\u003e, or \u003ccode\u003eNULL\u003c/code\u003e if an error occurred.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003efree\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eptr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFrees the heap-allocated block starting at the specific address.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003erealloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eptr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eChanging the size of a pointer and realloc if needed\u003c/p\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhheap_allocator/#heap-allocator\"\u003eHeap allocator\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmemory_allocation/","tags":null,"title":"memory allocation"},{"categories":null,"contents":"Mencius Philosophy: every person has the capability of goodness and harm, and whichever one you water is the one that grows.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmencius_philosophy/\"\u003eMencius Philosophy\u003c/a\u003e: every person has the capability of goodness and harm, and whichever one you water is the one that grows.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmencius_philosophy/","tags":null,"title":"Mencius Philosophy"},{"categories":null,"contents":"the mesoscopic region is the regions far away from equilibrium points\u0026mdash;which is really hard\nThis is also why Poincare invented topo.\n","html":"\u003cp\u003ethe \u003ca href=\"/posts/kbhmesoscopic_region/\"\u003emesoscopic region\u003c/a\u003e is the regions far away from equilibrium points\u0026mdash;which is really hard\u003c/p\u003e\n\u003cp\u003eThis is also why Poincare invented topo.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmesoscopic_region/","tags":null,"title":"mesoscopic region"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmetabolism/","tags":null,"title":"metabolism"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmethods/","tags":null,"title":"Methods"},{"categories":null,"contents":"Applying the MFA aligner upon the Pitt (cookie only) data and performing statistics upon the calculated disfluency information. The ultimate goal is to replicate Wang 2019.\nThe code is available here.\nThe (unvalidated, draft) results are reported below:\nMean value reported, standard deviation in parens. For our data, \\(N=422\\), cases balanced.\nVariable AD (Pitt, ours) MCI (Wang) Control (ours) Control (Wang) Silence Duration 28.10 (21.28) 13.55 (5.53) 18.06 (12.52) 7.71 (5.03) Speech Duration* 23.77 (14.11) 46.64 (5.79) 27.23 (15.3) 53.63 (7.82) Voice-Silence Ratio 1.79 (4.88) 4.43 (2.78) 5.78 (31.95) 10.11 (6.05) Verbal Rate 1.59 (0.61) 1.56 (0.40) 1.989 (0.51) 1.91 (0.43) *speech duration would obviously vary with file length\nFurther statistical quantification also tells us some more things. Although the data does not make a good classifier, I performed two tests: a Kolmogorov-Smirnov test for goodness of fit, and a good \u0026lsquo;ol Pearson\u0026rsquo;s correlation with AD/control target. p-values are reported below.\nKS test silence duration: \\(1.31 \\times 10^{-5}\\) speech duration: \\(2.98 \\times 10^{-3}\\) voice-silence ratio: \\(2.01 \\times 10^{-7}\\) verbal rate: \\(4.32 \\times 10^{-10}\\) Pearson\u0026rsquo;s silence duration: \\(4.15 \\times 10^{-8}\\) speech duration: \\(0.164\\) voice-silence ratio: \\(0.732\\) verbal rate: \\(1.22 \\times 10^{-12}\\) As per the values reported in Wang 2019, we can see that\u0026mdash;apart from audio metadata\u0026mdash;verbal rate is a strongly correlated indicator against MCI/AD. We can reasonably say that Wang 2019\u0026rsquo;s data collection can be automated with reasonable success using batchalign + MFA.\nBroken ML I applied an RBF Support-Vector machine to classify AD/control based only on the two most highly correlated variables: verbal rate and silence duration. The results were disappointing.\nOn test data, N=42, balanced labels:\nSVC: \\(61.9\\%\\) Random forest: also \\(61.9\\%\\) We have fairly disappointing results. Here\u0026rsquo;s my hypothesis of why:\nif you take a look at this figure, we can see two main distributions\nSo, if we, like Wang 2019, used statistics on independence (they used chi-square, I used KS test), we will come up that the distributions are different.\nHowever, if you take a look at a randomly sampled set of validation data (crosses on the figure), you can see that a lot of them lands in the \u0026ldquo;mostly control\u0026rdquo; area: making the classifier not super useful.\nWe can therefore catch a lot of the \u0026ldquo;slow talking, long pausing\u0026rdquo; patients, but most speaking fluently will possibly need semantic information for prediction.\nI have some preliminary results on Pitt+ERNIE (a kind of BERT) that indicate that a key semantic factor is \u0026ldquo;on-topicness.\u0026rdquo; However, Pitt does not contain a lot of off-topic control data (say, the fluency task, which it has for dementia) for me to validate those claims easily. I will continue work on that front.\n","html":"\u003cp\u003eApplying the MFA aligner upon the Pitt (cookie only) data and performing statistics upon the calculated disfluency information. The ultimate goal is to replicate \u003ca href=\"/posts/kbhwang_2019/\"\u003eWang 2019\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThe code \u003ca href=\"https://github.com/Jemoka/DBA/blob/f01862efe3fe7c196ff63252d73c86f1b64f03af/analyze.py#L154-L198\"\u003eis available here\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThe (unvalidated, draft) results are reported below:\u003c/p\u003e\n\u003cp\u003eMean value reported, standard deviation in parens. For our data, \\(N=422\\), cases balanced.\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eVariable\u003c/th\u003e\n\u003cth\u003eAD (Pitt, ours)\u003c/th\u003e\n\u003cth\u003eMCI (Wang)\u003c/th\u003e\n\u003cth\u003eControl (ours)\u003c/th\u003e\n\u003cth\u003eControl (Wang)\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eSilence Duration\u003c/td\u003e\n\u003ctd\u003e28.10 (21.28)\u003c/td\u003e\n\u003ctd\u003e13.55 (5.53)\u003c/td\u003e\n\u003ctd\u003e18.06 (12.52)\u003c/td\u003e\n\u003ctd\u003e7.71 (5.03)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eSpeech Duration*\u003c/td\u003e\n\u003ctd\u003e23.77 (14.11)\u003c/td\u003e\n\u003ctd\u003e46.64 (5.79)\u003c/td\u003e\n\u003ctd\u003e27.23 (15.3)\u003c/td\u003e\n\u003ctd\u003e53.63 (7.82)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eVoice-Silence Ratio\u003c/td\u003e\n\u003ctd\u003e1.79 (4.88)\u003c/td\u003e\n\u003ctd\u003e4.43 (2.78)\u003c/td\u003e\n\u003ctd\u003e5.78 (31.95)\u003c/td\u003e\n\u003ctd\u003e10.11 (6.05)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eVerbal Rate\u003c/td\u003e\n\u003ctd\u003e1.59 (0.61)\u003c/td\u003e\n\u003ctd\u003e1.56 (0.40)\u003c/td\u003e\n\u003ctd\u003e1.989 (0.51)\u003c/td\u003e\n\u003ctd\u003e1.91 (0.43)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e*speech duration would obviously vary with file length\u003c/p\u003e\n\u003cp\u003eFurther statistical quantification also tells us some more things. Although the data does not make a good classifier, I performed two tests: a \u003ca href=\"/posts/kbhkolmogorov_smirnov_test/\"\u003eKolmogorov-Smirnov test\u003c/a\u003e for goodness of fit, and a good \u0026lsquo;ol Pearson\u0026rsquo;s correlation with AD/control target. p-values are reported below.\u003c/p\u003e\n\u003ch2 id=\"ks-test--kbhkolmogorov-smirnov-test-dot-md\"\u003e\u003ca href=\"/posts/kbhkolmogorov_smirnov_test/\"\u003eKS test\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003esilence duration: \\(1.31 \\times 10^{-5}\\)\u003c/li\u003e\n\u003cli\u003espeech duration: \\(2.98 \\times 10^{-3}\\)\u003c/li\u003e\n\u003cli\u003evoice-silence ratio: \\(2.01 \\times 10^{-7}\\)\u003c/li\u003e\n\u003cli\u003everbal rate: \\(4.32 \\times 10^{-10}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"pearson-s\"\u003ePearson\u0026rsquo;s\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003esilence duration: \\(4.15 \\times 10^{-8}\\)\u003c/li\u003e\n\u003cli\u003espeech duration: \\(0.164\\)\u003c/li\u003e\n\u003cli\u003evoice-silence ratio: \\(0.732\\)\u003c/li\u003e\n\u003cli\u003everbal rate: \\(1.22 \\times 10^{-12}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAs per the values reported in \u003ca href=\"/posts/kbhwang_2019/\"\u003eWang 2019\u003c/a\u003e, we can see that\u0026mdash;apart from audio metadata\u0026mdash;verbal rate is a strongly correlated indicator against MCI/AD. We can reasonably say that \u003ca href=\"/posts/kbhwang_2019/\"\u003eWang 2019\u0026rsquo;s\u003c/a\u003e data collection can be automated with reasonable success using \u003ca href=\"/posts/kbhbatchalign/\"\u003ebatchalign\u003c/a\u003e + MFA.\u003c/p\u003e\n\u003ch2 id=\"broken-ml\"\u003eBroken ML\u003c/h2\u003e\n\u003cp\u003eI applied an RBF Support-Vector machine to classify AD/control based only on the two most highly correlated variables: verbal rate and silence duration. The results were disappointing.\u003c/p\u003e\n\u003cp\u003eOn test data, N=42, balanced labels:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eSVC: \\(61.9\\%\\)\u003c/li\u003e\n\u003cli\u003eRandom forest: also \\(61.9\\%\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe have fairly disappointing results. Here\u0026rsquo;s my hypothesis of why:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-12_15-50-37_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eif you take a look at this figure, we can see two main distributions\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-12_15-52-29_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSo, if we, like \u003ca href=\"/posts/kbhwang_2019/\"\u003eWang 2019\u003c/a\u003e, used statistics on independence (they used \u003ca href=\"/posts/kbhchi_square/\"\u003echi-square\u003c/a\u003e, I used \u003ca href=\"/posts/kbhkolmogorov_smirnov_test/\"\u003eKS test\u003c/a\u003e), we \u003cem\u003ewill\u003c/em\u003e come up that the distributions are different.\u003c/p\u003e\n\u003cp\u003eHowever, if you take a look at a randomly sampled set of validation data (crosses on the figure), you can see that a lot of them lands in the \u0026ldquo;mostly control\u0026rdquo; area: making the classifier not super useful.\u003c/p\u003e\n\u003cp\u003eWe can therefore catch a lot of the \u0026ldquo;slow talking, long pausing\u0026rdquo; patients, but most speaking fluently will possibly need semantic information for prediction.\u003c/p\u003e\n\u003cp\u003eI have some preliminary results on Pitt+ERNIE (a kind of BERT) that indicate that a key semantic factor is \u0026ldquo;on-topicness.\u0026rdquo; However, Pitt does not contain a lot of off-topic control data (say, the fluency task, which it has for dementia) for me to validate those claims easily. I will continue work on that front.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmfa_disfluency_measurement/","tags":null,"title":"MFA Disfluency Measurement"},{"categories":null,"contents":" Lanzi WNL (August 12) 1%. Selection Seed 7. Houjun. 82.64% ± 4.48% with a 95% confidence. Lanzi MCI (August 12) 1%. Selection Seed 7. Houjun. 78.70% ± 7.85% with a 95% confidence. Lanzi WNL (August 13) 1%. Selection Seed 7; syllabic balanced. Houjun. Within which, 90.97%±3.40% of multi-syllabic words were correctly identified 86.28%±4.08% of mono-syllabic words were correctly identified 88.63%±2.65% of all words were correctly identified at a confidence interval of 95% based on a single-variable t test. Lanzi MCI (August 13) 1%. Selection Seed 7; syllabic balanced. Houjun. Within which, 76.85%±8.08% of multi-syllabic words were correctly identified 72.22%±8.58% of mono-syllabic words were correctly identified 74.54%±5.86% of all words were correctly identified at a confidence interval of 95% based on a single-variable t test. Lanzi WNL (August 13) 1%. Selection Seed 7; syllabic balanced; 3-tier labeling. Houjun. Within which, 96.75%±2.10% of multi-syllabic words were correctly identified 90.61%±3.46% of mono-syllabic words were correctly identified 93.68%±2.03% of all words were correctly identified at a confidence interval of 95% based on a single-variable t test.\nWithin sucesseses, 16.57% are partial.\nLanzi MCI (August 13) 1%. Selection Seed 7; syllabic balanced; 3-tier labeling. Houjun. Within which, 91.67%±5.30% of multi-syllabic words were correctly identified 78.70%±7.85% of mono-syllabic words were correctly identified 85.19%±4.78% of all words were correctly identified at a confidence interval of 95% based on a single-variable t test.\nWithin sucesseses, 18.48% are partial.\n","html":"\u003col\u003e\n\u003cli\u003eLanzi WNL (August 12) 1%. Selection Seed 7. Houjun.\n82.64% ± 4.48% with a 95% confidence.\u003c/li\u003e\n\u003cli\u003eLanzi MCI (August 12) 1%. Selection Seed 7. Houjun.\n78.70% ± 7.85% with a 95% confidence.\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003col\u003e\n\u003cli\u003eLanzi WNL (August 13) 1%. Selection Seed 7; syllabic balanced. Houjun.\nWithin which, 90.97%±3.40% of multi-syllabic words were correctly identified\n86.28%±4.08% of mono-syllabic words were correctly identified\n88.63%±2.65% of all words were correctly identified\nat a confidence interval of 95% based on a single-variable t test.\u003c/li\u003e\n\u003cli\u003eLanzi MCI (August 13) 1%. Selection Seed 7; syllabic balanced. Houjun.\nWithin which, 76.85%±8.08% of multi-syllabic words were correctly identified\n72.22%±8.58% of mono-syllabic words were correctly identified\n74.54%±5.86% of all words were correctly identified\nat a confidence interval of 95% based on a single-variable t test.\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003col\u003e\n\u003cli\u003e\n\u003cp\u003eLanzi WNL (August 13) 1%. Selection Seed 7; syllabic balanced; 3-tier labeling. Houjun.\nWithin which, 96.75%±2.10% of multi-syllabic words were correctly identified\n90.61%±3.46% of mono-syllabic words were correctly identified\n93.68%±2.03% of all words were correctly identified\nat a confidence interval of 95% based on a single-variable t test.\u003c/p\u003e\n\u003cp\u003eWithin sucesseses, 16.57% are partial.\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eLanzi MCI (August 13) 1%. Selection Seed 7; syllabic balanced; 3-tier labeling. Houjun.\nWithin which, 91.67%±5.30% of multi-syllabic words were correctly identified\n78.70%±7.85% of mono-syllabic words were correctly identified\n85.19%±4.78% of all words were correctly identified\nat a confidence interval of 95% based on a single-variable t test.\u003c/p\u003e\n\u003cp\u003eWithin sucesseses, 18.48% are partial.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmfa_performance_statistics/","tags":null,"title":"MFA Performance Statistics"},{"categories":null,"contents":"Mia is a student at the Nueva School\n","html":"\u003cp\u003eMia is a student at the Nueva School\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmia_tavares/","tags":null,"title":"Mia Tavares"},{"categories":null,"contents":"Micah Brown is a student at The Nueva School, also the host of Project80, among other things.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmicah_brown/\"\u003eMicah Brown\u003c/a\u003e is a student at The Nueva School, also the host of \u003ca href=\"/posts/kbhproject80/\"\u003eProject80\u003c/a\u003e, among other things.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmicah_brown/","tags":null,"title":"Micah Brown"},{"categories":null,"contents":"Milton Freedman is an economist.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmilton_freedman/\"\u003eMilton Freedman\u003c/a\u003e is an economist.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmilton_freedman/","tags":null,"title":"Milton Freedman"},{"categories":null,"contents":"MMSE is not mean squared error! It is a short mental state test to measure one\u0026rsquo;s neuralpsycological capabilities; frequently used as a first line by a psycologist.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmmse/\"\u003eMMSE\u003c/a\u003e is \u003cstrong\u003enot\u003c/strong\u003e mean squared error! It is a short mental state test to measure one\u0026rsquo;s neuralpsycological capabilities; frequently used as a first line by a psycologist.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmmse/","tags":null,"title":"Mini-Mental State Examination"},{"categories":null,"contents":"\u0026ldquo;minimum edit distance\u0026rdquo; is one approach to solving the problem of \u0026ldquo;how similar are these two strings\u0026rdquo;? minimum edit distance is defined by the smallest number of editing operations (insertion, deletion, substitution) needed to transform one string into another.\nThere are two technical definitions. Both definitions are grounded upon \u0026ldquo;minimum number of operations it takes to transform a string into another, where\u0026rdquo;\nedit distance with DP\nDP costs \\(O(nm)\\), backtrace costs \\(O(n+m)\\).\nStandard Edit Distance insertion, deletion, and substitution cost 1\nLevenshtein Distance insertion and deletion cost 1; substitution cost 2\nExample For instance: \u0026ldquo;graffe\u0026rdquo;. Is\u0026hellip;\ngraf graft grail giraffe the closest?\nCommon Use machine translation + speech recognition uses edit distance to evaluate output quality coreference and NER uses edit distance as a baseline check ","html":"\u003cp\u003e\u0026ldquo;\u003ca href=\"/posts/kbhminimum_edit_distance/\"\u003eminimum edit distance\u003c/a\u003e\u0026rdquo; is one approach to solving the problem of \u0026ldquo;how similar are these two strings\u0026rdquo;? \u003ca href=\"/posts/kbhminimum_edit_distance/\"\u003eminimum edit distance\u003c/a\u003e is defined by the smallest number of editing operations (insertion, deletion, substitution) needed to transform one string into another.\u003c/p\u003e\n\u003cp\u003eThere are two technical definitions. Both definitions are grounded upon \u0026ldquo;minimum number of operations it takes to transform a string into another, where\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhedit_distance_with_dp/\"\u003eedit distance with DP\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eDP costs \\(O(nm)\\), backtrace costs \\(O(n+m)\\).\u003c/p\u003e\n\u003ch2 id=\"standard-edit-distance\"\u003eStandard Edit Distance\u003c/h2\u003e\n\u003cp\u003einsertion, deletion, and substitution cost 1\u003c/p\u003e\n\u003ch2 id=\"levenshtein-distance\"\u003eLevenshtein Distance\u003c/h2\u003e\n\u003cp\u003einsertion and deletion cost 1; substitution cost 2\u003c/p\u003e\n\u003ch2 id=\"example\"\u003eExample\u003c/h2\u003e\n\u003cp\u003eFor instance: \u0026ldquo;graffe\u0026rdquo;. Is\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003egraf\u003c/li\u003e\n\u003cli\u003egraft\u003c/li\u003e\n\u003cli\u003egrail\u003c/li\u003e\n\u003cli\u003egiraffe\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ethe closest?\u003c/p\u003e\n\u003ch2 id=\"common-use\"\u003eCommon Use\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003emachine translation + speech recognition uses edit distance to evaluate output quality\u003c/li\u003e\n\u003cli\u003ecoreference and NER uses edit distance as a baseline check\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhminimum_edit_distance/","tags":null,"title":"minimum edit distance"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhminimum_spanning_tree/","tags":null,"title":"minimum spanning tree"},{"categories":null,"contents":"How many disturbance users can coveather take without crashing? Let\u0026rsquo;s find out.\nCode Util function to mapreduce a list:\ndef multiplyList(l) : # Multiply elements one by one result = 1 for x in l: result = result * x return result We first set a user count:\nN = var(\u0026#34;N\u0026#34;) # Pool size val_percent = var(\u0026#34;val_percent\u0026#34;) # Pools val_pool = N*val_percent user_pool = N*(1-val_percent) # Disturbance disturbance_percent = var(\u0026#34;disturbance_percent\u0026#34;) # Validation Pools + Disburbance val_disturbance_pool = disturbance_percent*val_pool val_normal_pool = (1-disturbance_percent)*val_pool # Chance of three or more disturbance attestors # which is equal to one minus chance of zero, one, or two disturbance attesors no_disturbance_attestor = (val_normal_pool/val_pool)*((val_normal_pool-1)/(val_pool-1))*((val_normal_pool-2)/(val_pool-2))*((val_normal_pool-3)/(val_pool-3)) one_disturbance = [] for disturbance_point in range(0,4): res = [] res.append((val_disturbance_pool)/(val_pool-disturbance_point)) for pre_disturbance in range(0,disturbance_point): res.append((val_normal_pool-pre_disturbance)/(val_pool-pre_disturbance)) for post_disturbance in range(disturbance_point+1,4): res.append((val_normal_pool-post_disturbance)/(val_pool-post_disturbance)) one_disturbance.append(multiplyList(res)) one_disturbance_attestor = sum(one_disturbance) two_disturbance = [] for disturbance_point_i in range(0,4): for disturbance_point_j in range(disturbance_point_i+1,4): res = [] res.append((val_disturbance_pool)/(val_pool-disturbance_point_i)) res.append((val_disturbance_pool-1)/(val_pool-disturbance_point_j)) for pre_i_disturbance in range(0,disturbance_point_i): res.append((val_normal_pool-pre_disturbance)/(val_pool-pre_disturbance)) for sandwich in range(disturbance_point_i+1,disturbance_point_j): res.append((val_normal_pool-post_disturbance)/(val_pool-sandwich)) for post_j_disturbance in range(disturbance_point_j+1,4): res.append((val_normal_pool-post_disturbance)/(val_pool-post_j_disturbance)) two_disturbance.append(multiplyList(res)) two_disturbance_attestor = sum(two_disturbance) distubrance_chance(N, val_percent, disturbance_percent) = expand(1-(no_disturbance_attestor+one_disturbance_attestor+two_disturbance_attestor)) # no_disturbance_attestor (N*(disturbance_percent - 1)*val_percent + 3)*(N*(disturbance_percent - 1)*val_percent + 2)*(N*(disturbance_percent - 1)*val_percent + 1)*(disturbance_percent - 1)/((N*val_percent - 1)*(N*val_percent - 2)*(N*val_percent - 3)) z = var(\u0026#34;z\u0026#34;) val_dist(val_percent, disturbance_percent) = distubrance_chance(100, val_percent, disturbance_percent) implicit_plot3d(val_dist-z, (val_percent,0.1,1), (disturbance_percent, 0,1), (z, 0,1) ,frame=True,axes_labels=[\u0026#39;Validation\u0026#39;,\u0026#39;Disturbance\u0026#39;, \u0026#39;Chance\u0026#39;],axes=False, color=(val_dist,colormaps.Blues)) Launched html viewer for Graphics3d Object z = var(\u0026#34;z\u0026#34;) n_dist(N, disturbance_percent) = distubrance_chance(N, 0.1, disturbance_percent) show(implicit_plot3d(n_dist-z, (N,100,100000), (disturbance_percent, 0,1), (z, 0,1) ,frame=True,axes_labels=[\u0026#39;N\u0026#39;,\u0026#39;Disturbance\u0026#39;, \u0026#39;Chance\u0026#39;],axes=False, color=(n_dist,colormaps.Blues)), aspect_ratio=[1,100000,100000], plot_points=100) Launched html viewer for Graphics3d Object n_dir(N) = distubrance_chance(N, 0.1, 0.1) # plot(n_dir, (N,100,100000),axes_labels=[\u0026#39;N\u0026#39;, \u0026#39;Disturbance\u0026#39;], thickness=1) # solve(distubrance_chance(100, N, 0.1)==0.01, N, to_poly_solve=True) # implicit_plot(distubrance_chance(100, N, 0.1)==0.01, (N, 0,1), (z, 0, # solve(distubrance_chance(N, val_perc, 0.1)==0.01, val_perc, to_poly_solve=True) # implicit_plot(solve(distubrance_chance(N, val_perc, 0.1)==0.01, val_perc)[0]) # val_perc = var(\u0026#34;var_perc\u0026#34;) show(implicit_plot(distubrance_chance(N, val_perc, 0.1)==0.01, (N, 15, 1000), (val_perc, 0,1), plot_points=300,axes_labels=[\u0026#39;N\u0026#39;,\u0026#39;Val Ratio\u0026#39;],axes=False), aspect_ratio=800) # solve(distubrance_chance(800, val_perc, 0.1)==0.01, val_perc, to_poly_solve=True) \u0026lt;/Users/houliu/.sage/temp/baboon.jemoka.com/64368/tmp_9bdcu2si.pn\u0026gt;\n","html":"\u003cp\u003eHow many disturbance users can \u003ca href=\"/posts/kbhcoveather/\"\u003ecoveather\u003c/a\u003e take without crashing? Let\u0026rsquo;s find out.\u003c/p\u003e\n\u003ch2 id=\"code\"\u003eCode\u003c/h2\u003e\n\u003cp\u003eUtil function to mapreduce a list:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emultiplyList\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003el\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# Multiply elements one by one\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eresult\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003el\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eresult\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eresult\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eresult\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe first set a user count:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;N\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# Pool size\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eval_percent\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;val_percent\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# Pools\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_percent\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003euser_pool\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# Disturbance\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;disturbance_percent\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# Validation Pools + Disburbance\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eval_disturbance_pool\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# Chance of three or more disturbance attestors\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# which is equal to one minus chance of zero, one, or two disturbance attesors\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eno_disturbance_attestor\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eone_disturbance\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisturbance_point\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_disturbance_pool\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epre_disturbance\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epre_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epre_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epost_disturbance\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epost_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epost_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eone_disturbance\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emultiplyList\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eone_disturbance_attestor\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eone_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etwo_disturbance\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisturbance_point_i\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisturbance_point_j\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point_i\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_disturbance_pool\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point_i\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_disturbance_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point_j\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epre_i_disturbance\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point_i\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epre_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epre_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esandwich\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point_i\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point_j\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epost_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esandwich\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epost_j_disturbance\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point_j\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epost_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epost_j_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003etwo_disturbance\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emultiplyList\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etwo_disturbance_attestor\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etwo_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edistubrance_chance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eval_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eexpand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eno_disturbance_attestor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eone_disturbance_attestor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etwo_disturbance_attestor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# no_disturbance_attestor\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(N*(disturbance_percent - 1)*val_percent + 3)*(N*(disturbance_percent - 1)*val_percent + 2)*(N*(disturbance_percent - 1)*val_percent + 1)*(disturbance_percent - 1)/((N*val_percent - 1)*(N*val_percent - 2)*(N*val_percent - 3))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;z\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eval_dist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edistubrance_chance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e100\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eval_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eimplicit_plot3d\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_dist\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eframe\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eaxes_labels\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;Validation\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;Disturbance\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#39;Chance\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eaxes\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eFalse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_dist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolormaps\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eBlues\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLaunched html viewer for Graphics3d Object\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;z\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003en_dist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edistubrance_chance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eshow\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eimplicit_plot3d\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en_dist\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e100\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e100000\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eframe\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eaxes_labels\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;N\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;Disturbance\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#39;Chance\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eaxes\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eFalse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en_dist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolormaps\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eBlues\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003easpect_ratio\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e100000\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e100000\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplot_points\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e100\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLaunched html viewer for Graphics3d Object\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003en_dir\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edistubrance_chance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# plot(n_dir, (N,100,100000),axes_labels=[\u0026#39;N\u0026#39;, \u0026#39;Disturbance\u0026#39;], thickness=1)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# solve(distubrance_chance(100, N, 0.1)==0.01, N, to_poly_solve=True)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# implicit_plot(distubrance_chance(100, N, 0.1)==0.01, (N, 0,1), (z, 0,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# solve(distubrance_chance(N, val_perc, 0.1)==0.01, val_perc, to_poly_solve=True)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# implicit_plot(solve(distubrance_chance(N, val_perc, 0.1)==0.01, val_perc)[0])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# val_perc = var(\u0026#34;var_perc\u0026#34;)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eshow\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eimplicit_plot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edistubrance_chance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eval_perc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e15\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_perc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplot_points\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e300\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eaxes_labels\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;N\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;Val Ratio\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eaxes\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eFalse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003easpect_ratio\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e800\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# solve(distubrance_chance(800, val_perc, 0.1)==0.01, val_perc, to_poly_solve=True)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u0026lt;/Users/houliu/.sage/temp/baboon.jemoka.com/64368/tmp_9bdcu2si.pn\u0026gt;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhminimum_user_base_requirements_for_coveather/","tags":null,"title":"minimum user base requirements for coveather"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhminimum_wage/","tags":null,"title":"minimum wage"},{"categories":null,"contents":"Why so many stock exchanges? Because the FTC just allows you to make\u0026rsquo;em as desired.\nWhy doesn\u0026rsquo;t the market trade 24 hours a day? Because the institutional traders can only trade 2 hours a day: the beginning of the day, or the end of the day. Otherwise, there are not enough volume for the institutional traders to be able to trade at their size. See Volume Profile.\nWhat\u0026rsquo;s a good \u0026ldquo;full view\u0026rdquo; of the stock? The order book! You can actually see it by paying money to the exchange. You want to subscribe to every order for every exchange. How to large traders strategically break stocks? \u0026ldquo;How long should I take?\u0026rdquo;\nWhy are some Ethernet ports worth a lot more than others? Some amount of trading (10-20%) is done at light speed. Cable lengths of about a foot change the stock dramatically.\n","html":"\u003ch2 id=\"why-so-many-stock-exchanges\"\u003eWhy so many stock exchanges?\u003c/h2\u003e\n\u003cp\u003eBecause the FTC just allows you to make\u0026rsquo;em as desired.\u003c/p\u003e\n\u003ch2 id=\"why-doesn-t-the-market-trade-24-hours-a-day\"\u003eWhy doesn\u0026rsquo;t the market trade 24 hours a day?\u003c/h2\u003e\n\u003cp\u003eBecause the institutional traders can only trade 2 hours a day: the beginning of the day, or the end of the day. Otherwise, there are not enough volume for the institutional traders to be able to trade at their size. See \u003ca href=\"/posts/kbhvwap/#volume-profile\"\u003eVolume Profile\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"what-s-a-good-full-view-of-the-stock\"\u003eWhat\u0026rsquo;s a good \u0026ldquo;full view\u0026rdquo; of the stock?\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eThe order book! You can actually see it by paying money to the exchange.\u003c/li\u003e\n\u003cli\u003eYou want to subscribe to every order for every exchange.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"how-to-large-traders-strategically-break-stocks\"\u003eHow to large traders strategically break stocks?\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;How long should I take?\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"why-are-some-ethernet-ports-worth-a-lot-more-than-others\"\u003eWhy are some Ethernet ports worth a lot more than others?\u003c/h2\u003e\n\u003cp\u003eSome amount of trading (10-20%) is done at light speed. Cable lengths of about a foot change the stock dramatically.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmisc_financial_market_questions/","tags":null,"title":"Misc. Financial Market Questions"},{"categories":null,"contents":"Focus on protease: inhibition helps inhibit viral replication; and it is conserved across most coronaviruses; so good point to start working in drug development.\nTake smaller binding fragments covering the binding site, and combine them together Try to combine these fragments together into a molecule that fits well with the binding site protease inhibition is usually achieved with a covalent peptide bond, but this crowd-sourcing effort showed that\nmachine-learning rapid library synthesis begin with some guess for the model molecule then, use ML to perform modifications to the molecule really quickly by scanning though (\u0026ldquo;ML-prioritized rapid library synthesis\u0026rdquo;) a bunch of changes to the molecule pick and repeat Molecular Transformer THROW THE FUCKING REACTION INTO AN LLM, as WORDS\nI desire death\nSo; taking reactants + reagents as input; guess the product.\nas input; guess the product.\nSABER decompose molecule into building blocks make biostesters of the building blocks change crap limitations ML can\u0026rsquo;t extrapolate into unknown search space and it could come up with bullshit; so to fix:\nusing physics to create correct docking structures use ML to perform last mile optimization ","html":"\u003cp\u003eFocus on \u003ca href=\"/posts/kbhprotease/\"\u003eprotease\u003c/a\u003e: inhibition helps inhibit viral replication; and \u003cstrong\u003eit is conserved across most coronaviruses\u003c/strong\u003e; so good point to start working in drug development.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eTake smaller binding fragments covering the binding site, and combine them together\u003c/li\u003e\n\u003cli\u003eTry to combine these fragments together into a molecule that fits well with the binding site\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhprotease/\"\u003eprotease\u003c/a\u003e inhibition is usually achieved with a covalent peptide bond, but this crowd-sourcing effort showed that\u003c/p\u003e\n\u003ch2 id=\"machine-learning-rapid-library-synthesis\"\u003emachine-learning rapid library synthesis\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ebegin with some guess for the model molecule\u003c/li\u003e\n\u003cli\u003ethen, use ML to perform modifications to the molecule really quickly by scanning though (\u0026ldquo;ML-prioritized rapid library synthesis\u0026rdquo;) a bunch of changes to the molecule\u003c/li\u003e\n\u003cli\u003epick and repeat\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"molecular-transformer\"\u003eMolecular Transformer\u003c/h2\u003e\n\u003cp\u003eTHROW THE FUCKING REACTION INTO AN LLM, as WORDS\u003c/p\u003e\n\u003cp\u003e\u003cem\u003eI desire death\u003c/em\u003e\u003c/p\u003e\n\u003cp\u003eSo; taking reactants + reagents as input; guess the product.\u003c/p\u003e\n\u003cp\u003eas input; guess the product.\u003c/p\u003e\n\u003ch2 id=\"saber\"\u003eSABER\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003edecompose molecule into building blocks\u003c/li\u003e\n\u003cli\u003emake biostesters of the building blocks\u003c/li\u003e\n\u003cli\u003echange crap\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"limitations\"\u003elimitations\u003c/h2\u003e\n\u003cp\u003eML can\u0026rsquo;t extrapolate into unknown search space and it could come up with bullshit; so to fix:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eusing physics to create correct docking structures\u003c/li\u003e\n\u003cli\u003euse ML to perform last mile optimization\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhml_drug_discovery/","tags":null,"title":"ML COVID Drug Discovery"},{"categories":null,"contents":"MLib is a machine learning library built on top of Spark.\nfrom pyspalk.mllib.clustering import KMeans KMeans(rdd) where you pass the MLib a PySpark RDD\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmlib/\"\u003eMLib\u003c/a\u003e is a machine learning library built on top of \u003ca href=\"/posts/kbhspark/\"\u003eSpark\u003c/a\u003e.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epyspalk.mllib.clustering\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eKMeans\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eKMeans\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erdd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ewhere you pass the \u003ccode\u003eMLib\u003c/code\u003e a PySpark \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmlib/","tags":null,"title":"MLib"},{"categories":null,"contents":"Reading notes Malcom X\u0026rsquo;s father was an active prechear in the scene Malcom X and MLK are both made mostly charactures out of context Malcom X had a belligent upbringing with a belligent father, whereas MLK lived in relative comfort as a son of a successful minister Malcom was sent into white foster families as his mother became institutionalized Becasue of his experience in foster system, Malcom tried to pass/be white King\u0026rsquo;s nonviolent priciples not understood and became conflicted with ideas of local leaders Malcom found a father figure in the Nation of Islam, changing his name in prison MLK had more positive African American role models in life Malcom X disallusioned with the policy of nonengagement by the nation of islam Malcom X had support over racial seperatism Nation of Islam wanted to create a completely seperate Black state, promoting Black Nationalism secret Malcom X wanted break because of skeptism again Eli Mohammed Malcom charged MLK with infiltration Martin believes that the process of voilence is a form of naïve expression King believes that the \u0026ldquo;strong demagogic oratory\u0026rdquo; of Malcom was detrimental and extremist Martin believes that the personal nature of assults from Malcom maybe result in physical assult Malcom was suspended during 1963, and became independent\u0026mdash;wanted to combine religion and politics like King Malcom began forging ties with millitan Black movement Martin regretted that integration has not proceeded, but believed it would have been difficult anyways Rejected nonviolent and intergrational movement People saw King and X\u0026rsquo;s ideas inrecosiliable But, King and X themselves made a possible shared ending by the end Believes that suicides were cut short Racial pride was a centering point: while Malcom saw it as something to be harbored, Martin saw it as inate ","html":"\u003ch2 id=\"reading-notes\"\u003eReading notes\u003c/h2\u003e\n\u003ch3 id=\"malcom-x-s-father-was-an-active-prechear-in-the-scene\"\u003eMalcom X\u0026rsquo;s father was an active prechear in the scene\u003c/h3\u003e\n\u003ch3 id=\"malcom-x-and-mlk-are-both-made-mostly-charactures-out-of-context\"\u003eMalcom X and MLK are both made mostly charactures out of context\u003c/h3\u003e\n\u003ch3 id=\"malcom-x-had-a-belligent-upbringing-with-a-belligent-father-whereas-mlk-lived-in-relative-comfort-as-a-son-of-a-successful-minister\"\u003eMalcom X had a belligent upbringing with a belligent father, whereas MLK lived in relative comfort as a son of a successful minister\u003c/h3\u003e\n\u003ch3 id=\"malcom-was-sent-into-white-foster-families-as-his-mother-became-institutionalized\"\u003eMalcom was sent into white foster families as his mother became institutionalized\u003c/h3\u003e\n\u003ch3 id=\"becasue-of-his-experience-in-foster-system-malcom-tried-to-pass-be-white\"\u003eBecasue of his experience in foster system, Malcom tried to pass/be white\u003c/h3\u003e\n\u003ch3 id=\"king-s-nonviolent-priciples-not-understood-and-became-conflicted-with-ideas-of-local-leaders\"\u003eKing\u0026rsquo;s nonviolent priciples not understood and became conflicted with ideas of local leaders\u003c/h3\u003e\n\u003ch3 id=\"malcom-found-a-father-figure-in-the-nation-of-islam-changing-his-name-in-prison\"\u003eMalcom found a father figure in the Nation of Islam, changing his name in prison\u003c/h3\u003e\n\u003ch3 id=\"mlk-had-more-positive-african-american-role-models-in-life\"\u003eMLK had more positive African American role models in life\u003c/h3\u003e\n\u003ch3 id=\"malcom-x-disallusioned-with-the-policy-of-nonengagement-by-the-nation-of-islam\"\u003eMalcom X disallusioned with the policy of nonengagement by the nation of islam\u003c/h3\u003e\n\u003ch3 id=\"malcom-x-had-support-over-racial-seperatism\"\u003eMalcom X had support over racial seperatism\u003c/h3\u003e\n\u003ch3 id=\"nation-of-islam-wanted-to-create-a-completely-seperate-black-state-promoting-black-nationalism-secret\"\u003eNation of Islam wanted to create a completely seperate Black state, promoting Black Nationalism secret\u003c/h3\u003e\n\u003ch3 id=\"malcom-x-wanted-break-because-of-skeptism-again-eli-mohammed\"\u003eMalcom X wanted break because of skeptism again Eli Mohammed\u003c/h3\u003e\n\u003ch3 id=\"malcom-charged-mlk-with-infiltration\"\u003eMalcom charged MLK with infiltration\u003c/h3\u003e\n\u003ch3 id=\"martin-believes-that-the-process-of-voilence-is-a-form-of-naïve-expression\"\u003eMartin believes that the process of voilence is a form of naïve expression\u003c/h3\u003e\n\u003ch3 id=\"king-believes-that-the-strong-demagogic-oratory-of-malcom-was-detrimental-and-extremist\"\u003eKing believes that the \u0026ldquo;strong demagogic oratory\u0026rdquo; of Malcom was detrimental and extremist\u003c/h3\u003e\n\u003ch3 id=\"martin-believes-that-the-personal-nature-of-assults-from-malcom-maybe-result-in-physical-assult\"\u003eMartin believes that the personal nature of assults from Malcom maybe result in physical assult\u003c/h3\u003e\n\u003ch3 id=\"malcom-was-suspended-during-1963-and-became-independent-wanted-to-combine-religion-and-politics-like-king\"\u003eMalcom was suspended during 1963, and became independent\u0026mdash;wanted to combine religion and politics like King\u003c/h3\u003e\n\u003ch3 id=\"malcom-began-forging-ties-with-millitan-black-movement\"\u003eMalcom began forging ties with millitan Black movement\u003c/h3\u003e\n\u003ch3 id=\"martin-regretted-that-integration-has-not-proceeded-but-believed-it-would-have-been-difficult-anyways\"\u003eMartin regretted that integration has not proceeded, but believed it would have been difficult anyways\u003c/h3\u003e\n\u003ch3 id=\"rejected-nonviolent-and-intergrational-movement\"\u003eRejected nonviolent and intergrational movement\u003c/h3\u003e\n\u003ch3 id=\"people-saw-king-and-x-s-ideas-inrecosiliable\"\u003ePeople saw King and X\u0026rsquo;s ideas inrecosiliable\u003c/h3\u003e\n\u003ch3 id=\"but-king-and-x-themselves-made-a-possible-shared-ending-by-the-end\"\u003eBut, King and X themselves made a possible shared ending by the end\u003c/h3\u003e\n\u003ch3 id=\"believes-that-suicides-were-cut-short\"\u003eBelieves that suicides were cut short\u003c/h3\u003e\n\u003ch3 id=\"racial-pride-was-a-centering-point-while-malcom-saw-it-as-something-to-be-harbored-martin-saw-it-as-inate\"\u003eRacial pride was a centering point: while Malcom saw it as something to be harbored, Martin saw it as inate\u003c/h3\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmlk_and_malcom_x_reading/","tags":null,"title":"MLK and Malcom X Reading"},{"categories":null,"contents":"Modal is a cloud deployment system that\u0026rsquo;s entirely programmatic. No yaml:\nimport modal stub = modal.stub(gpu=\u0026#34;a100\u0026#34;) @stub.function() def fit(x): import whatever whatever.thing() So think run.house, but they have the infra.\nfine-tuning with Modal https://github.com/modal-labs/llama-recipes\nYou can store the serverless functions, and Modal can serve stored serverless functions. Modal have web hooks as well to do inference at a front end.\nModal can serve most the management as well.\npricing 13B: 500 tokens/s on 40GB AA10 (3.73 / hour) 70B: 300 tok /s 80 GB( 2* 5.59/hour)\n","html":"\u003cp\u003eModal is a cloud deployment system that\u0026rsquo;s entirely programmatic. No yaml:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emodal\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003estub\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emodal\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estub\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egpu\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;a100\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003e@stub.function\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewhatever\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ewhatever\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ething\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSo think run.house, but they have the infra.\u003c/p\u003e\n\u003ch2 id=\"fine-tuning-with-modal\"\u003efine-tuning with Modal\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://github.com/modal-labs/llama-recipes\"\u003ehttps://github.com/modal-labs/llama-recipes\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eYou can store the serverless functions, and \u003ca href=\"/posts/kbhmodal/\"\u003eModal\u003c/a\u003e can serve stored serverless functions. Modal have web hooks as well to do inference at a front end.\u003c/p\u003e\n\u003cp\u003eModal can serve most the management as well.\u003c/p\u003e\n\u003ch2 id=\"pricing\"\u003epricing\u003c/h2\u003e\n\u003cp\u003e13B: 500 tokens/s on 40GB AA10 (3.73 / hour)\n70B: 300 tok /s 80 GB( 2* 5.59/hour)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmodal/","tags":null,"title":"Modal"},{"categories":null,"contents":"modalization is the\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmodalization/\"\u003emodalization\u003c/a\u003e is the\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmodalization/","tags":null,"title":"modalization"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmodel_bae/","tags":null,"title":"model bae"},{"categories":null,"contents":"Extrinsic Evaluation Extrinsic Evaluation, also known as In-Vivo Evaluation, focuses on benchmarking two language models in terms of their differing performance on a test task.\nIntrinsic Evaluation In-Vitro Evaluation or Intrinsic Evaluation focuses on evaluating the language models\u0026rsquo; performance at, well, language modeling.\nTypically, we use perplexity.\ndirectly measure language model performance doesn\u0026rsquo;t necessarily correspond with real applications ","html":"\u003ch2 id=\"extrinsic-evaluation\"\u003eExtrinsic Evaluation\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#extrinsic-evaluation\"\u003eExtrinsic Evaluation\u003c/a\u003e, also known as \u003ca href=\"#extrinsic-evaluation\"\u003eIn-Vivo Evaluation\u003c/a\u003e, focuses on benchmarking two language models in terms of their differing performance on a test task.\u003c/p\u003e\n\u003ch2 id=\"intrinsic-evaluation\"\u003eIntrinsic Evaluation\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#intrinsic-evaluation\"\u003eIn-Vitro Evaluation\u003c/a\u003e or \u003ca href=\"#intrinsic-evaluation\"\u003eIntrinsic Evaluation\u003c/a\u003e focuses on evaluating the language models\u0026rsquo; performance at, well, language modeling.\u003c/p\u003e\n\u003cp\u003eTypically, we use \u003ca href=\"/posts/kbhperplexity/\"\u003eperplexity\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003edirectly measure language model performance\u003c/li\u003e\n\u003cli\u003edoesn\u0026rsquo;t necessarily correspond with real applications\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmodel_evaluation/","tags":null,"title":"Model Evaluation"},{"categories":null,"contents":"Step 1: Getting Model We want a model\n\\(T\\): transition probability \\(R\\): rewards Maximum Likelihood Parameter Learning Method \\begin{equation} N(s,a,s\u0026rsquo;) \\end{equation}\nwhich is the count of transitions from \\(s,a\\) to \\(s\u0026rsquo;\\) and increment it as \\(s, a, s\u0026rsquo;\\) gets observed. This makes, with Maximum Likelihood Parameter Learning:\n\\begin{equation} T(s\u0026rsquo; | s,a) = \\frac{N(s,a,s\u0026rsquo;)}{\\sum_{s\u0026rsquo;\u0026rsquo;}^{} N(s,a,s\u0026rsquo;\u0026rsquo;)} \\end{equation}\nWe also keep a table:\n\\begin{equation} p(s,a) \\end{equation}\nthe sum of rewards when taking \\(s,a\\). To calculate a reward, we take the average:\n\\begin{equation} R(s,a) \\approx \\frac{p(s,a)}{\\sum_{s\u0026rsquo;\u0026rsquo;}^{}N(s,a,s\u0026rsquo;\u0026rsquo;)} \\end{equation}\nBaysian Parameter Learning Method We build a Dirichlet Distribution; let:\n\\begin{equation} \\vec{\\theta}_{(s,a)} = \\mqty[T(s_1 | s,a) \\\\ \\dots\\\\ T(s_{n} | s,a)] \\end{equation}\nWe then calculate a distribution:\n\\begin{equation} Dir(\\vec{\\theta}_{s,a} | \\vec{N}_{s,a}) \\end{equation}\nwhich will give you a probability over a set of transitions.\nThen, when we need a transition \\(T\\), we perform Posterior Sampling on this Dirichlet Distribution at every episode (or so, otherwise the model shifts a lot) and then optimize on that.\nGetting rewards is an advanced topic. So let\u0026rsquo;s just use Maximum Likelihood Parameter Learning or assume its given\nStep 2: Getting Value Function. full update One direct strategy to work on this, then, is to use whatever transition and rewards you observed to perform value iteration or policy iteration. First go through one or a bunch of observations, then take a full value iteration or policy iteration sweep, and then go back and take more measurements.\nrandomized update We randomly update a single state:\n\\begin{equation} U(s) \\leftarrow \\max_{a} \\qty[R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo; | s,a) U(s\u0026rsquo;)] \\end{equation}\nand take another observation, update our model estimate, and move on.\nprioritized updates Say we are current updating a state \\(s\\), and there are two previous states that could transition into \\(s\\). First we create an estimate like before:\n\\begin{equation} U(s) \\leftarrow \\max_{a} \\qty[R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo; | s,a) U(s\u0026rsquo;)] \\end{equation}\nWe create a queue whose contents are ranked by:\n\\begin{equation} T(s|s^{-}, a^{-}) \\times |U(s)-u(s)| \\end{equation}\nwhere \\(u(s)\\) is \\(U(s)\\) prior to the update.\nWe move on to the next state to update by popping off the queue.\nStep 3: Explore a Little epsilon-greedy exploration with decay Softmax Method R-Max Most strategies above focuses on choosing a random action. This exploration focuses on adapting reward/transitions to explicitly explore new-state.\n\\begin{equation} R(s,a) = \\begin{cases} r_{\\max}, if N(s,a) \u0026lt; m,\\\\ \\rho\\frac{s,a}{N(s,a)}, otherwise \\end{cases} \\end{equation}\nyou get a large reward \\(r_{\\max }\\) if you haven\u0026rsquo;t been to \\((s,a)\\), otherwise the reward you get gets discounted by the number of times you visited.\n\\begin{equation} T(s\u0026rsquo;|s,a) = \\begin{cases} 1, if N(s,a) \u0026lt; m\\ and\\ s\u0026rsquo; = s \\\\ 0, if N(s,a) \u0026lt; m\\ and\\ s\u0026rsquo; \\neq s \\\\ \\frac{N(s,a,s\u0026rsquo;)}{N(s,a)}, otherwise \\end{cases} \\end{equation}\n","html":"\u003ch2 id=\"step-1-getting-model\"\u003eStep 1: Getting Model\u003c/h2\u003e\n\u003cp\u003eWe want a model\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(T\\): transition probability\u003c/li\u003e\n\u003cli\u003e\\(R\\): rewards\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"maximum-likelihood-parameter-learning--kbhmaximum-likelihood-parameter-learning-dot-md--method\"\u003e\u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e Method\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nN(s,a,s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is the count of transitions from \\(s,a\\) to \\(s\u0026rsquo;\\) and increment it as \\(s, a, s\u0026rsquo;\\) gets observed. This makes, with \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(s\u0026rsquo; | s,a) = \\frac{N(s,a,s\u0026rsquo;)}{\\sum_{s\u0026rsquo;\u0026rsquo;}^{} N(s,a,s\u0026rsquo;\u0026rsquo;)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe also keep a table:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe sum of rewards when taking \\(s,a\\). To calculate a reward, we take the average:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR(s,a) \\approx \\frac{p(s,a)}{\\sum_{s\u0026rsquo;\u0026rsquo;}^{}N(s,a,s\u0026rsquo;\u0026rsquo;)}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"baysian-parameter-learning--kbhbaysian-parameter-learning-dot-md--method\"\u003e\u003ca href=\"/posts/kbhbaysian_parameter_learning/\"\u003eBaysian Parameter Learning\u003c/a\u003e Method\u003c/h3\u003e\n\u003cp\u003eWe build a \u003ca href=\"/posts/kbhbaysian_parameter_learning/#dirichlet-distribution\"\u003eDirichlet Distribution\u003c/a\u003e; let:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{\\theta}_{(s,a)} = \\mqty[T(s_1 | s,a) \\\\ \\dots\\\\ T(s_{n} | s,a)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe then calculate a distribution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nDir(\\vec{\\theta}_{s,a} | \\vec{N}_{s,a})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich will give you a probability over a set of transitions.\u003c/p\u003e\n\u003cp\u003eThen, when we need a transition \\(T\\), we perform \u003ca href=\"/posts/kbhdirected_exploration/#posterior-sampling\"\u003ePosterior Sampling\u003c/a\u003e on this \u003ca href=\"/posts/kbhbaysian_parameter_learning/#dirichlet-distribution\"\u003eDirichlet Distribution\u003c/a\u003e at every episode (or so, otherwise the model shifts a lot) and then optimize on that.\u003c/p\u003e\n\u003cp\u003eGetting rewards is an advanced topic. So let\u0026rsquo;s just use \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e or assume its given\u003c/p\u003e\n\u003ch2 id=\"step-2-getting-value-function-dot\"\u003eStep 2: Getting Value Function.\u003c/h2\u003e\n\u003ch3 id=\"full-update\"\u003efull update\u003c/h3\u003e\n\u003cp\u003eOne direct strategy to work on this, then, is to use whatever transition and rewards you observed to perform \u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e or \u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e. First go through one or a bunch of observations, then take a full \u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e or \u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e sweep, and then go back and take more measurements.\u003c/p\u003e\n\u003ch3 id=\"randomized-update\"\u003erandomized update\u003c/h3\u003e\n\u003cp\u003eWe randomly update a single state:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(s) \\leftarrow \\max_{a} \\qty[R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo; | s,a) U(s\u0026rsquo;)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand take another observation, update our model estimate, and move on.\u003c/p\u003e\n\u003ch3 id=\"prioritized-updates\"\u003eprioritized updates\u003c/h3\u003e\n\u003cp\u003eSay we are current updating a state \\(s\\), and there are two previous states that could transition into \\(s\\). First we create an estimate like before:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(s) \\leftarrow \\max_{a} \\qty[R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo; | s,a) U(s\u0026rsquo;)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe create a queue whose contents are ranked by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(s|s^{-}, a^{-}) \\times |U(s)-u(s)|\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(u(s)\\) is \\(U(s)\\) prior to the update.\u003c/p\u003e\n\u003cp\u003eWe move on to the next state to update by popping off the queue.\u003c/p\u003e\n\u003ch2 id=\"step-3-explore-a-little\"\u003eStep 3: Explore a Little\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhundirected_exploration/#epsilon-greedy-exploration-with-decay\"\u003eepsilon-greedy exploration with decay\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirected_exploration/#softmax-method\"\u003eSoftmax Method\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"r-max\"\u003eR-Max\u003c/h3\u003e\n\u003cp\u003eMost strategies above focuses on choosing a random action. This exploration focuses on adapting reward/transitions to explicitly explore new-state.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR(s,a) = \\begin{cases}\nr_{\\max}, if N(s,a) \u0026lt; m,\\\\\n\\rho\\frac{s,a}{N(s,a)}, otherwise\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou get a large reward \\(r_{\\max }\\) if you haven\u0026rsquo;t been to \\((s,a)\\), otherwise the reward you get gets discounted by the number of times you visited.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(s\u0026rsquo;|s,a) = \\begin{cases}\n1, if N(s,a) \u0026lt; m\\ and\\ s\u0026rsquo; = s \\\\\n0, if N(s,a) \u0026lt; m\\ and\\ s\u0026rsquo; \\neq s \\\\\n\\frac{N(s,a,s\u0026rsquo;)}{N(s,a)}, otherwise\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmodel_based_reinforcement_learning/","tags":null,"title":"model-based reinforcement learning"},{"categories":null,"contents":"In model-based reinforcement learning, we tried real hard to get \\(T\\) and \\(R\\). What if we just estimated \\(Q(s,a)\\) directly? model-free reinforcement learning tends to be quite slow, compared to model-based reinforcement learning methods.\nreview: estimating mean of a random variable we got \\(m\\) points \\(x^{(1 \\dots m)} \\in X\\) , what is the mean of \\(X\\)?\n\\begin{equation} \\hat{x_{m}} = \\frac{1}{m} \\sum_{i=1}^{m} x^{(i)} \\end{equation}\n\\begin{equation} \\hat{x}_{m} = \\hat{x}_{m-1} + \\frac{1}{m} (x^{(m)} - \\hat{x}_{m-1}) \\end{equation}\nevery time you get a new measurement \\(x^{(m)}\\). sometimes we don\u0026rsquo;t scale it by \\(\\frac{1}{m}\\), you can scale it with constant \\(\\alpha\\) which actually causes exponential decay of past samples (as it keeps getting scaled by \\(\\alpha\\)).\n\\begin{equation} \\hat{x} = \\hat{x} + \\alpha (x- \\hat{x}) \\end{equation}\nQ-Learning Let us review the action-value function:\n\\begin{equation} Q(s,a) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) U(s\u0026rsquo;) \\end{equation}\nthis is a model-free method, substituting in the definition of the value function:\n\\begin{equation} Q(s,a) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) \\max_{a} Q(s\u0026rsquo;, a\u0026rsquo;) \\end{equation}\nNote! The second half is know in the shape of an expectation (\u0026quot;probability times the value\u0026quot;). Recall also that \\(R(s,a)\\) is the expected reward \\(r\\) when taking \\(s,a\\).\nLet:\n\\begin{equation} r = \\mathbb{E}[R(s,a)] \\end{equation}\nSo we can say that:\n\\begin{equation} Q(s,a) = \\mathbb{E} \\qty[r + \\gamma \\max_{a\u0026rsquo;} Q(s\u0026rsquo;, a\u0026rsquo;)] \\end{equation}\nFinally, then, we can perform random variable mean estimation scheme given above; recall:\n\\begin{equation} \\hat{x} = \\hat{x} + \\alpha (x- \\hat{x}) \\end{equation}\nhence, we update our new mean with:\n\\begin{equation} Q(s,a) \\leftarrow Q(s,a) + \\alpha \\qty [(r + \\gamma \\max_{a\u0026rsquo;} Q(s\u0026rsquo;, a\u0026rsquo;)) - Q(s,a)] \\end{equation}\nSARSA SARSA is Q-Learning where you hope the model converges. You HAVE to perform some Exploration and Exploitation to try out other actions, and then you just update your function accordingly:\n\\begin{equation} Q(s,a) \\leftarrow Q(s,a) + \\alpha \\qty [(r + \\gamma Q(s\u0026rsquo;, a\u0026rsquo;)) - Q(s,a)] \\end{equation}\nthis works in theory because over time, good Exploration and Exploitation assumes that:\n\\begin{equation} a\u0026rsquo; \\rightarrow \\arg\\max_{a\u0026rsquo;} Q(s\u0026rsquo;,a\u0026rsquo;) \\end{equation}\nEligibility Traces Eligibility Traces is a change to SARSA which uses the number of visits as an additional constraint that allows updates to propagate each reward backwards given the list of states which caused that reward to be distributed.\nMeaning, let \\(\\lambda\\) be some decay parameter, we have:\n\\begin{equation} \\delta = r + \\gamma Q(s\u0026rsquo;,a\u0026rsquo;) - Q(s,a) \\end{equation}\nand, we can write:\n\\begin{equation} Q(s,a) \\leftarrow Q(s,a) + \\lambda \\delta N(s,a) \\end{equation}\nwhere by the visit counts are discounted such that:\n\\begin{equation} N(s,a) \\leftarrow \\gamma \\lambda N(s,a) \\end{equation}\nSee also Sarsa (Lambda).\nGeneralized Q-Learning with Gradient action-value Consider Value Function Approximation. We were trying to fit a set of \\(\\theta\\) at that time to find \\(U_{\\theta}\\) that matches \\(U^{*}\\).\nWe now want to compute some \\(Q_{\\theta}\\) in the same flavour:\n\\begin{equation} Q_{\\theta}(s,a) \\sim Q^{*}(s,a) \\end{equation}\nWe can measure the difference between these two values like so:\n\\begin{equation} \\ell(\\theta) = \\frac{1}{2}\\mathbb{E}_{(s,a)\\sim \\pi^{*}}\\qty[(Q^{*}(s,a) - Q_{\\theta}(s,a))^{2}] \\end{equation}\nWe want to write this expected value distributed over \\(s,a\\) of the optimal policy because we want to calculate more samples over those states that the optimal policy ends up at most.\nTo optimize \\(Q_{\\theta}\\), then, you betcha know what\u0026rsquo;s happenin:\n\\begin{align} \\nabla \\ell \u0026amp;= \\nabla \\frac{1}{2} \\nabla \\mathbb{E}_{(s,a) \\sim \\pi^{*}} \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a))^{2}] \\\\ \u0026amp;= \\mathbb{E}_{(s,a) \\sim \\pi^{*}} \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a)) (-1)\\nabla Q_{\\theta}(s,a)] \\\\ \u0026amp;= -\\mathbb{E}_{(s,a) \\sim \\pi^{*}} \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a)) \\nabla Q_{\\theta}(s,a)] \\end{align}\nby a healthy dose of the chain rule.\nNow, to minimize this loss, we go in the direction opposite the gradient. The negatives then cancel out to give us:\n\\begin{equation} \\theta \\leftarrow \\theta + \\alpha \\qty[\\mathbb{E}_{(s,a) \\sim \\pi^{*}} \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a)) \\nabla Q_{\\theta}(s,a)] ] \\end{equation}\nwhere \\(\\alpha \\in (0,1)\\).\nSimilar to the SARSA assumption, good Exploration and Exploitation assumes that:\n\\begin{equation} Q \\to Q^{*} \\end{equation}\nso we can drop our expectation with:\n\\begin{equation} \\theta \\leftarrow \\theta + \\alpha \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a)) \\nabla Q_{\\theta}(s,a)] \\end{equation}\nNow, we can make one more assumption, the assumption from Q-Learning:\n\\begin{equation} Q^{*}(s,a) \\approx r_{s} + \\gamma \\max_{a\u0026rsquo;} Q_{\\theta}(s\u0026rsquo;,a\u0026rsquo;) \\end{equation}\nthat taking the best actions with the \\(Q\\) you have will slowly approximate optimal \\(Q\\).\n\\begin{equation} \\theta \\leftarrow \\theta + \\alpha \\qty[\\qty((r_{s} + \\gamma \\max_{a\u0026rsquo;} Q_{\\theta}(s\u0026rsquo;,a\u0026rsquo;))-Q_{\\theta}(s,a)) \\nabla Q_{\\theta}(s,a)] \\end{equation}\nyou will note! this is actually just Q-Learning multiplying with a gradient.\nPolicy Gradient see also Policy Gradient\n","html":"\u003cp\u003eIn \u003ca href=\"/posts/kbhmodel_based_reinforcement_learning/\"\u003emodel-based reinforcement learning\u003c/a\u003e, we tried real hard to get \\(T\\) and \\(R\\). What if we just estimated \\(Q(s,a)\\) directly? \u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/\"\u003emodel-free reinforcement learning\u003c/a\u003e tends to be quite slow, compared to \u003ca href=\"/posts/kbhmodel_based_reinforcement_learning/\"\u003emodel-based reinforcement learning\u003c/a\u003e methods.\u003c/p\u003e\n\u003ch2 id=\"review-estimating-mean-of-a-random-variable--kbhrandom-variables-dot-md\"\u003ereview: estimating mean of a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003ewe got \\(m\\) points \\(x^{(1 \\dots m)} \\in X\\) , what is the mean of \\(X\\)?\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{x_{m}} = \\frac{1}{m} \\sum_{i=1}^{m} x^{(i)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{x}_{m} = \\hat{x}_{m-1} + \\frac{1}{m} (x^{(m)} - \\hat{x}_{m-1})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eevery time you get a new measurement \\(x^{(m)}\\). sometimes we don\u0026rsquo;t scale it by \\(\\frac{1}{m}\\), you can scale it with constant \\(\\alpha\\) which actually causes exponential decay of past samples (as it keeps getting scaled by \\(\\alpha\\)).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{x} = \\hat{x} + \\alpha (x- \\hat{x})\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"q-learning\"\u003eQ-Learning\u003c/h2\u003e\n\u003cp\u003eLet us review the \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value function\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(s,a) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) U(s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is a model-free method, substituting in the definition of the \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(s,a) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) \\max_{a} Q(s\u0026rsquo;, a\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNote! The second half is know in the shape of an \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e (\u0026quot;\u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e times the value\u0026quot;). Recall also that \\(R(s,a)\\) is the expected reward \\(r\\) when taking \\(s,a\\).\u003c/p\u003e\n\u003cp\u003eLet:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr = \\mathbb{E}[R(s,a)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo we can say that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(s,a) = \\mathbb{E} \\qty[r + \\gamma \\max_{a\u0026rsquo;} Q(s\u0026rsquo;, a\u0026rsquo;)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, then, we can perform \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e mean estimation scheme given above; recall:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{x} = \\hat{x} + \\alpha (x- \\hat{x})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ehence, we update our new mean with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(s,a) \\leftarrow Q(s,a) + \\alpha \\qty [(r + \\gamma \\max_{a\u0026rsquo;} Q(s\u0026rsquo;, a\u0026rsquo;)) - Q(s,a)]\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"sarsa\"\u003eSARSA\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#sarsa\"\u003eSARSA\u003c/a\u003e is \u003ca href=\"#q-learning\"\u003eQ-Learning\u003c/a\u003e where you hope the model converges. You HAVE to perform some \u003ca href=\"/posts/kbhexploration_and_exploitation/\"\u003eExploration and Exploitation\u003c/a\u003e to try out other actions, and then you just update your function accordingly:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(s,a) \\leftarrow Q(s,a) + \\alpha \\qty [(r + \\gamma Q(s\u0026rsquo;, a\u0026rsquo;)) - Q(s,a)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis works in theory because over time, good \u003ca href=\"/posts/kbhexploration_and_exploitation/\"\u003eExploration and Exploitation\u003c/a\u003e assumes that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na\u0026rsquo; \\rightarrow \\arg\\max_{a\u0026rsquo;} Q(s\u0026rsquo;,a\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"eligibility-traces\"\u003eEligibility Traces\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#eligibility-traces\"\u003eEligibility Traces\u003c/a\u003e is a change to \u003ca href=\"#sarsa\"\u003eSARSA\u003c/a\u003e which uses the number of visits as an additional constraint that allows updates to propagate each reward backwards given the list of states which caused that reward to be distributed.\u003c/p\u003e\n\u003cp\u003eMeaning, let \\(\\lambda\\) be some decay parameter, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\delta = r + \\gamma Q(s\u0026rsquo;,a\u0026rsquo;) - Q(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand, we can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(s,a) \\leftarrow Q(s,a) + \\lambda \\delta N(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere by the visit counts are discounted such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nN(s,a) \\leftarrow \\gamma \\lambda N(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhsarsa_lambda/\"\u003eSarsa (Lambda)\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"generalized-q-learning--orgfb02fd1--with-gradient-action-value--kbhaction-value-function-dot-md\"\u003eGeneralized \u003ca href=\"#q-learning\"\u003eQ-Learning\u003c/a\u003e with Gradient \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eConsider \u003ca href=\"/posts/kbhapproximate_value_function/\"\u003eValue Function Approximation\u003c/a\u003e. We were trying to fit a set of \\(\\theta\\) at that time to find \\(U_{\\theta}\\) that matches \\(U^{*}\\).\u003c/p\u003e\n\u003cp\u003eWe now want to compute some \\(Q_{\\theta}\\) in the same flavour:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ_{\\theta}(s,a) \\sim Q^{*}(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can measure the difference between these two values like so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ell(\\theta) = \\frac{1}{2}\\mathbb{E}_{(s,a)\\sim \\pi^{*}}\\qty[(Q^{*}(s,a) - Q_{\\theta}(s,a))^{2}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe want to write this expected value distributed over \\(s,a\\) of the \u003cstrong\u003eoptimal\u003c/strong\u003e policy because we want to calculate more samples over those states that the optimal policy ends up at most.\u003c/p\u003e\n\u003cp\u003eTo optimize \\(Q_{\\theta}\\), then, you betcha know what\u0026rsquo;s happenin:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\nabla \\ell \u0026amp;= \\nabla \\frac{1}{2} \\nabla \\mathbb{E}_{(s,a) \\sim \\pi^{*}} \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a))^{2}] \\\\\n\u0026amp;= \\mathbb{E}_{(s,a) \\sim \\pi^{*}} \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a)) (-1)\\nabla Q_{\\theta}(s,a)] \\\\\n\u0026amp;= -\\mathbb{E}_{(s,a) \\sim \\pi^{*}} \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a)) \\nabla Q_{\\theta}(s,a)]\n\\end{align}\u003c/p\u003e\n\u003cp\u003eby a healthy dose of the chain rule.\u003c/p\u003e\n\u003cp\u003eNow, to minimize this loss, we go in the direction opposite the gradient. The negatives then cancel out to give us:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta \\leftarrow \\theta + \\alpha \\qty[\\mathbb{E}_{(s,a) \\sim \\pi^{*}} \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a)) \\nabla Q_{\\theta}(s,a)] ]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\alpha \\in (0,1)\\).\u003c/p\u003e\n\u003cp\u003eSimilar to the \u003ca href=\"#sarsa\"\u003eSARSA\u003c/a\u003e assumption, good \u003ca href=\"/posts/kbhexploration_and_exploitation/\"\u003eExploration and Exploitation\u003c/a\u003e assumes that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ \\to Q^{*}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso we can drop our expectation with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta \\leftarrow \\theta + \\alpha \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a)) \\nabla Q_{\\theta}(s,a)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, we can make one more assumption, the assumption from \u003ca href=\"#q-learning\"\u003eQ-Learning\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ^{*}(s,a) \\approx r_{s} + \\gamma \\max_{a\u0026rsquo;} Q_{\\theta}(s\u0026rsquo;,a\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat taking the best actions with the \\(Q\\) you have will slowly approximate optimal \\(Q\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta \\leftarrow \\theta + \\alpha \\qty[\\qty((r_{s} + \\gamma \\max_{a\u0026rsquo;} Q_{\\theta}(s\u0026rsquo;,a\u0026rsquo;))-Q_{\\theta}(s,a)) \\nabla Q_{\\theta}(s,a)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou will note! this is actually just \u003ca href=\"#q-learning\"\u003eQ-Learning\u003c/a\u003e multiplying with a gradient.\u003c/p\u003e\n\u003ch2 id=\"policy-gradient--kbhpolicy-gradient-dot-md\"\u003e\u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003esee also \u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmodel_free_reinforcement_learning/","tags":null,"title":"model-free reinforcement learning"},{"categories":null,"contents":"Here are the main steps of generic modeling.\n","html":"\u003cp\u003eHere are the main steps of generic \u003ca href=\"/posts/kbhmodeling/\"\u003emodeling\u003c/a\u003e.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-13_15-44-50_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhmodeling/","tags":null,"title":"modeling"},{"categories":null,"contents":"multi-core CPUs Finally, actually multitasking: starting in mid 2000s, multiple cores are finally more common. management between cores is crucial\nMoors Law Break Down we have reached much of the limits of the speed of a single core instead, we have to have more cores\u0026mdash;which requires more management to take advantage of More kinds of Cores \u0026ldquo;performance\u0026rdquo; vs \u0026ldquo;efficiency\u0026rdquo; cores needs to schedule for different tasks: not just who on what core, but who on what TYPE of core Other Hardware Specialized hardware in these chips, which is required for scheduling.\nGPU In change of graphics and some ML applications\nNPU/TPU Machine learning specialization.\nScheduling Multi-Core CPUs Most Basic Idea share ready queue shared across cores lock to sync access to the ready queue one dispatcher separate interrupts for each core Run \\(k\\) highest priority threads on the \\(k\\) cores.\nIssue need to figure out what is the priority of each core run if we want preemption, so its an \\(O(n)\\) check for free + priority the shared ready queue needs to be locked, so as core increases they need to be synchronized which causes slowdown One Ready Queue per Core Big problems:\nwhere do we put a given thread? moving core between threads is expensive Big tension: Work Stealing and Core Affinity\nWork Stealing If one core is free (even if there is things in the ready queue), check other cores\u0026rsquo; ready queues and try to do thread communism.\nCore Affinity Ideally, because moving threads between cores is expensive (need to rebuild cache), we keep each thread running on the same core.\nGang Scheduling When you have a thread you are trying to schedule, try to see if there are other threads from the same process in the ready queue and schedule all of them on various cores.\nLocking Multi-Core CPUs Main problem: disable interrupts doesn\u0026rsquo;t stop race conditions.\nSo we turn to busy waiting with a hardware atomic operation exchange, which reads, returns, and swaps the value of some memory in a single atomic operation AND which is never ran in parallel; it returns the previous value of the memory before it was set:\nclass Lock { std::automic\u0026lt;int\u0026gt; sync(0); } void Lock::lock() { while (sync.exchange(1)) {} // we are now the only one using it // do work .... sync = 0; } The exchange function returns the old value.\nThe busy waiting here isn\u0026rsquo;t too bad, because you only need to busy wait for the lock itself to be locked, and then the lock will handle sync from there.\nFlash Storage They are faster:\nno moving parts (no spinny) smaller, faster, lots of data mobile devices especially Typically, we fix these quirky issues with the Flash Translation Layer (FTL), which provides block, sector, and read/write interfaces like spinning harddrives without the OS noticing.\nMinimizing seeks isn\u0026rsquo;t too necessary now, but, writing SSD is very weird:\nwriting You have two operation.\nerase You can set ALL SEGMENT of an \u0026ldquo;erase unit\u0026rdquo; to \\(1\\)\n\u0026ldquo;erase unit\u0026rdquo; size is usually 256k\nwrite You can modify one \u0026ldquo;page\u0026rdquo; at a time (which is smaller than a erase unit)\u0026mdash;but you can ONLY set individual bits in the page into 0 (and not 1, which you have to do by erasing larger erasing chunks).\n\u0026ldquo;page\u0026rdquo; size is usually 512 bytes or 4k bytes\nwear-out wear leveling: make sure that the drive wears out at roughly the same rate as other parts of the drive. Moving commonly written data (\u0026ldquo;hot\u0026rdquo; data) around\nFTL limitations no hardware access (can\u0026rsquo;t optimize around flash storage) sacrifices performances for performance wasts capacity (to look like hard drive) many layers ","html":"\u003ch2 id=\"multi-core-cpus\"\u003emulti-core CPUs\u003c/h2\u003e\n\u003cp\u003eFinally, actually multitasking: starting in mid 2000s, multiple cores are finally more common. \u003cstrong\u003emanagement between cores is crucial\u003c/strong\u003e\u003c/p\u003e\n\u003ch3 id=\"moors-law-break-down\"\u003eMoors Law Break Down\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ewe have reached much of the limits of the speed of a single core\u003c/li\u003e\n\u003cli\u003einstead, we have to have more cores\u0026mdash;which requires more management to take advantage of\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"more-kinds-of-cores\"\u003eMore kinds of Cores\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;performance\u0026rdquo; vs \u0026ldquo;efficiency\u0026rdquo; cores\u003c/li\u003e\n\u003cli\u003eneeds to schedule for different tasks: not just who on what core, but who on what TYPE of core\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"other-hardware\"\u003eOther Hardware\u003c/h2\u003e\n\u003cp\u003eSpecialized hardware in these chips, which is required for scheduling.\u003c/p\u003e\n\u003ch3 id=\"gpu\"\u003eGPU\u003c/h3\u003e\n\u003cp\u003eIn change of graphics and some ML applications\u003c/p\u003e\n\u003ch3 id=\"npu-tpu\"\u003eNPU/TPU\u003c/h3\u003e\n\u003cp\u003eMachine learning specialization.\u003c/p\u003e\n\u003ch2 id=\"scheduling-multi-core-cpus\"\u003eScheduling Multi-Core CPUs\u003c/h2\u003e\n\u003ch3 id=\"most-basic-idea\"\u003eMost Basic Idea\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eshare ready queue shared across cores\u003c/li\u003e\n\u003cli\u003elock to sync access to the ready queue\u003c/li\u003e\n\u003cli\u003eone dispatcher\u003c/li\u003e\n\u003cli\u003eseparate interrupts for each core\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eRun \\(k\\) highest priority threads on the \\(k\\) cores.\u003c/p\u003e\n\u003ch4 id=\"issue\"\u003eIssue\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eneed to figure out what is the priority of each core run if we want preemption, so its an \\(O(n)\\) check for free + priority\u003c/li\u003e\n\u003cli\u003ethe shared ready queue needs to be locked, so as core increases they need to be synchronized which causes slowdown\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"one-ready-queue-per-core\"\u003eOne Ready Queue per Core\u003c/h3\u003e\n\u003cp\u003eBig problems:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewhere do we put a given thread?\u003c/li\u003e\n\u003cli\u003emoving core between threads is expensive\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eBig tension: \u003cstrong\u003e\u003ca href=\"#work-stealing\"\u003eWork Stealing\u003c/a\u003e\u003c/strong\u003e and \u003cstrong\u003e\u003ca href=\"#core-affinity\"\u003eCore Affinity\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003ch4 id=\"work-stealing\"\u003eWork Stealing\u003c/h4\u003e\n\u003cp\u003eIf one core is free (even if there is things in the ready queue), check other cores\u0026rsquo; ready queues and try to do thread communism.\u003c/p\u003e\n\u003ch4 id=\"core-affinity\"\u003eCore Affinity\u003c/h4\u003e\n\u003cp\u003eIdeally, because moving threads between cores is expensive (need to rebuild cache), we keep each thread running on the same core.\u003c/p\u003e\n\u003ch3 id=\"gang-scheduling\"\u003eGang Scheduling\u003c/h3\u003e\n\u003cp\u003eWhen you have a thread you are trying to schedule, try to see if there are other threads from the same process in the ready queue and schedule all of them on various cores.\u003c/p\u003e\n\u003ch2 id=\"locking-multi-core-cpus\"\u003eLocking Multi-Core CPUs\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eMain problem\u003c/strong\u003e: disable interrupts doesn\u0026rsquo;t stop race conditions.\u003c/p\u003e\n\u003cp\u003eSo we turn to \u003ca href=\"/posts/kbhpermits_model/\"\u003ebusy waiting\u003c/a\u003e with a hardware atomic operation \u003ccode\u003eexchange\u003c/code\u003e, which reads, returns, and swaps the value of some memory in a single atomic operation AND which is never ran in parallel; it returns the previous value of the memory before it was set:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eclass\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eLock\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003estd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e::\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eautomic\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esync\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLock\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e::\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esync\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexchange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// we are now the only one using it\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// do work ....\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esync\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThe exchange function returns the old value.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhpermits_model/\"\u003ebusy waiting\u003c/a\u003e here isn\u0026rsquo;t too bad, because you only need to busy wait for the lock itself to be locked, and then the lock will handle sync from there.\u003c/p\u003e\n\u003ch2 id=\"flash-storage\"\u003eFlash Storage\u003c/h2\u003e\n\u003cp\u003eThey are faster:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eno moving parts (no spinny)\u003c/li\u003e\n\u003cli\u003esmaller, faster, lots of data\u003c/li\u003e\n\u003cli\u003emobile devices especially\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTypically, we fix these quirky issues with the \u003ca href=\"#flash-storage\"\u003eFlash Translation Layer\u003c/a\u003e (\u003ca href=\"#flash-storage\"\u003eFTL\u003c/a\u003e), which provides block, sector, and read/write interfaces like spinning harddrives without the OS noticing.\u003c/p\u003e\n\u003cp\u003eMinimizing seeks isn\u0026rsquo;t too necessary now, but, writing SSD is very weird:\u003c/p\u003e\n\u003ch3 id=\"writing\"\u003ewriting\u003c/h3\u003e\n\u003cp\u003eYou have two operation.\u003c/p\u003e\n\u003ch4 id=\"erase\"\u003eerase\u003c/h4\u003e\n\u003cp\u003eYou can set \u003cstrong\u003eALL SEGMENT\u003c/strong\u003e of an \u0026ldquo;erase unit\u0026rdquo; to \\(1\\)\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;erase unit\u0026rdquo; size is usually 256k\u003c/p\u003e\n\u003ch4 id=\"write\"\u003ewrite\u003c/h4\u003e\n\u003cp\u003eYou can modify one \u0026ldquo;page\u0026rdquo; at a time (which is smaller than a erase unit)\u0026mdash;but you can ONLY set individual bits in the page into 0 (and not 1, which you have to do by erasing larger erasing chunks).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;page\u0026rdquo; size is usually 512 bytes or 4k bytes\u003c/p\u003e\n\u003ch3 id=\"wear-out\"\u003ewear-out\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003ewear leveling\u003c/strong\u003e: make sure that the drive wears out at roughly the same rate as other parts of the drive. Moving commonly written data (\u0026ldquo;hot\u0026rdquo; data) around\u003c/p\u003e\n\u003ch3 id=\"ftl--org19007d5--limitations\"\u003e\u003ca href=\"#flash-storage\"\u003eFTL\u003c/a\u003e limitations\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eno hardware access (can\u0026rsquo;t optimize around flash storage)\u003c/li\u003e\n\u003cli\u003esacrifices performances for performance\u003c/li\u003e\n\u003cli\u003ewasts capacity (to look like hard drive)\u003c/li\u003e\n\u003cli\u003emany layers\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmodern_os/","tags":null,"title":"modern OS"},{"categories":null,"contents":"Clock math.\nWe say that \\(a\\ \\text{mod}\\ b = r\\) if \\(a=bq+r\\), such that \\(b\u0026gt;0\\) and \\(0 \\leq r \u0026lt;b\\). More specifically, we denote:\n\\begin{equation} a \\equiv a\u0026rsquo;\\ \\text{mod}\\ b \\end{equation}\nif \\(b|(a-a\u0026rsquo;)\\).\nadditional information basic modular arithmetic operations \\begin{align} (a+b)\\ \\text{mod}\\ c \u0026amp;= ((a\\ \\text{mod}\\ c) + (b\\ \\text{mod}\\ c))\\ \\text{mod}\\ c \\\\ (ab) \\ \\text{mod}\\ c \u0026amp;= ((a\\ \\text{mod}\\ c) (b \\ \\text{mod}\\ c)) \\ \\text{mod}\\ c \\end{align}\nexamples of modular arithmetic If \\(a\\ \\text{mod}\\ b = r\\), \\((-a)\\ \\text{mod}\\ b = -r = b-r\\)\n\\(2^{2}\\equiv 4 \\equiv -1 \\ \\text{mod}\\ 5\\), \\(2^{4}\\equiv 1\\ \\text{mod}\\ 5\\)\nUSPS\u0026rsquo;s check digit is \\(a\\ \\text{mod}\\ 9\\) because you can just add all the digits up Let \\(a \\in \\mathbb{Z}\\). Let \\(s\\) be the sum of all the digits in \\(a\\). \\(a \\ \\text{mod}\\ 9 = s \\ \\text{mod}\\ 9\\). Why? Not a very satisfying answer, but because \\(9\\) is \\(10-1\\), so for each \\(n \\times 10^{k}\\ \\text{mod}\\ 9\\) is always \\(-n\\) smaller. like how \\(10 = 9+1\\), \\(20 = 2 \\times 9+2\\), etc.\nsubgroups Recall the real numbers: \\(\\dots, -2, -1, 0, 1, 2, 3, \\dots\\)\nThat\u0026rsquo;s so many numbers! Instead, let\u0026rsquo;s create a circle of these values. For instance, what if you only want \\(5\\):\n\\begin{equation} \\mathbb{Z}_{5} = \\{0,1,2,3,4\\} \\end{equation}\nThis is a group under addition.\nhumph: similarity between this and affine subsets \\(u/U = v/U\\) if \\(u-v \\in U\\) \\(u \\equiv v\\ \\text{mod}\\ b\\) if \\(b|u-v\\) Chinese Remainder Theorem Suppose \\(a,b \\in \\mathbb{Z}\\), and \\(m,n \\in \\mathbb{N}\\), such that \\(\\gcd (m,n) = 1\\) (that is, suppose \\(m,n\\) is coprime). There is some \\(x \\in \\mathbb{Z}\\) such that:\n\\begin{equation} x \\equiv a \\ \\text{mod}\\ m, x \\equiv b\\ \\text{mod}\\ n \\end{equation}\nFurthermore, and importantly, \\(x\\ \\text{mod}\\ (mn)\\) is unique.\n","html":"\u003cp\u003eClock math.\u003c/p\u003e\n\u003cp\u003eWe say that \\(a\\ \\text{mod}\\ b = r\\) if \\(a=bq+r\\), such that \\(b\u0026gt;0\\) and \\(0 \\leq r \u0026lt;b\\). More specifically, we denote:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na \\equiv a\u0026rsquo;\\ \\text{mod}\\ b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif \\(b|(a-a\u0026rsquo;)\\).\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"basic-modular-arithmetic--kbhmodular-arithmetic-dot-md--operations\"\u003ebasic \u003ca href=\"/posts/kbhmodular_arithmetic/\"\u003emodular arithmetic\u003c/a\u003e operations\u003c/h3\u003e\n\u003cp\u003e\\begin{align}\n(a+b)\\ \\text{mod}\\ c \u0026amp;= ((a\\ \\text{mod}\\ c) + (b\\ \\text{mod}\\ c))\\ \\text{mod}\\ c \\\\\n(ab) \\ \\text{mod}\\ c \u0026amp;= ((a\\ \\text{mod}\\ c) (b \\ \\text{mod}\\ c)) \\ \\text{mod}\\ c\n\\end{align}\u003c/p\u003e\n\u003ch3 id=\"examples-of-modular-arithmetic\"\u003eexamples of modular arithmetic\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eIf \\(a\\ \\text{mod}\\ b = r\\), \\((-a)\\ \\text{mod}\\ b = -r = b-r\\)\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003e\\(2^{2}\\equiv 4 \\equiv -1 \\ \\text{mod}\\ 5\\), \\(2^{4}\\equiv 1\\ \\text{mod}\\ 5\\)\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eUSPS\u0026rsquo;s check digit is \\(a\\ \\text{mod}\\ 9\\) because you can just add all the digits up\u003c/strong\u003e\u003c/strong\u003e\nLet \\(a \\in \\mathbb{Z}\\). Let \\(s\\) be the sum of all the digits in \\(a\\). \\(a \\ \\text{mod}\\ 9 = s \\ \\text{mod}\\ 9\\). Why? Not a very satisfying answer, but because \\(9\\) is \\(10-1\\), so for each \\(n \\times 10^{k}\\ \\text{mod}\\ 9\\) is always \\(-n\\) smaller. like how \\(10 = 9+1\\), \\(20 = 2 \\times 9+2\\), etc.\u003c/p\u003e\n\u003ch3 id=\"subgroups--kbhsubgroup-dot-md\"\u003e\u003ca href=\"/posts/kbhsubgroup/\"\u003esubgroups\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eRecall the real numbers: \\(\\dots, -2, -1, 0, 1, 2, 3, \\dots\\)\u003c/p\u003e\n\u003cp\u003eThat\u0026rsquo;s so many numbers! Instead, let\u0026rsquo;s create a \u003cem\u003ecircle\u003c/em\u003e of these values. For instance, what if you only want \\(5\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{Z}_{5} = \\{0,1,2,3,4\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e under addition.\u003c/p\u003e\n\u003ch3 id=\"humph-similarity-between-this-and-affine-subsets--kbhparallel-linear-algebra-dot-md\"\u003ehumph: similarity between this and \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subsets\u003c/a\u003e\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\\(u/U = v/U\\) if \\(u-v \\in U\\)\u003c/li\u003e\n\u003cli\u003e\\(u \\equiv v\\ \\text{mod}\\ b\\) if \\(b|u-v\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"chinese-remainder-theorem\"\u003eChinese Remainder Theorem\u003c/h3\u003e\n\u003cp\u003eSuppose \\(a,b \\in \\mathbb{Z}\\), and \\(m,n \\in \\mathbb{N}\\), such that \\(\\gcd (m,n) = 1\\) (that is, suppose \\(m,n\\) is \u003ca href=\"/posts/kbhprime/#coprime\"\u003ecoprime\u003c/a\u003e). There is some \\(x \\in \\mathbb{Z}\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx \\equiv a \\ \\text{mod}\\ m, x \\equiv b\\ \\text{mod}\\ n\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFurthermore, and importantly, \\(x\\ \\text{mod}\\ (mn)\\) is unique.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmodular_arithmetic/","tags":null,"title":"modular arithmetic"},{"categories":null,"contents":"goal: Drug Resistance could be more hampered by developing drugs that actually fit in the sub-strait envelope (i.e. if a virus develops a change to the drugged area, it should also stop working)\ntakeaway: to design inhibitors, it sticking out (\u0026ldquo;protrusion\u0026rdquo;) of the substrate envelope causes easy areas of mutation that will confer Drug Resistance, therefore, design drugs that try to stay within substrate envelope to ensure a higher degree of imperviousness to mutation (i.e. if the envelope changes well the virus is going to not do its job either)\nMessing with HIV mutations outside the active site (in primary backbone structure) actually caused a large increase in resistance (4 points outside of backbone structure) use linreg and other ML methods to take the type of mutation change (hydrogen? binding? etc.) and to find features most important to confer resistance Messing with COVID MPro COVID has many mutations on the aligned sequences to figure conserved interactions paxlovid nemdesirvir binds strongly to E166, which\u0026mdash;though conserved\u0026mdash;still could be resistant to resistance ","html":"\u003cp\u003egoal: \u003ca href=\"/posts/kbhdrug_resistance/\"\u003eDrug Resistance\u003c/a\u003e could be more hampered by developing drugs that actually fit in the sub-strait envelope (i.e. if a virus develops a change to the drugged area, it should also stop working)\u003c/p\u003e\n\u003cp\u003etakeaway: to design inhibitors, it sticking out (\u0026ldquo;protrusion\u0026rdquo;) of the \u003ca href=\"/posts/kbhsubtrait_envelope/\"\u003esubstrate envelope\u003c/a\u003e causes easy areas of mutation that will confer \u003ca href=\"/posts/kbhdrug_resistance/\"\u003eDrug Resistance\u003c/a\u003e, therefore, design drugs that try to stay within \u003ca href=\"/posts/kbhsubtrait_envelope/\"\u003esubstrate envelope\u003c/a\u003e to ensure a higher degree of imperviousness to mutation (i.e. if the envelope changes well the virus is going to not do its job either)\u003c/p\u003e\n\u003ch2 id=\"messing-with-hiv\"\u003eMessing with HIV\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003emutations outside the active site (in primary backbone structure) actually caused a large increase in resistance (4 points outside of backbone structure)\u003c/li\u003e\n\u003cli\u003euse linreg and other ML methods to take the \u003cem\u003etype\u003c/em\u003e of mutation change (hydrogen? binding? etc.) and to find features most important to confer resistance\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"messing-with-covid-mpro\"\u003eMessing with COVID MPro\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eCOVID has many mutations on the\u003c/li\u003e\n\u003cli\u003ealigned sequences to figure conserved interactions\u003c/li\u003e\n\u003cli\u003epaxlovid nemdesirvir binds strongly to E166, which\u0026mdash;though conserved\u0026mdash;still could be resistant to resistance\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmolecular_drug_resistance/","tags":null,"title":"Molecular Analysis of Drug Resistance"},{"categories":null,"contents":"MOMDP are POMDPs where some parts of the state are fully observable.\nMotivation scaling up POMDPs is really hard: exponential curse of dimensionality. Even discretization will cause the number of beliefs to really blow up.\nSome of the state isn\u0026rsquo;t uncertain, some others are bounded uncertainty: this REDUCES scale a lot.\nSolving Solving the algorithm uses SARSOP, or any point-based system. Instead of sampling the full belief state, however, we sample from a tuple \\((x, b_{y})\\), whereby \\(x\\) is the observable part and \\(b_{y}\\) is the unobservable part.\nHow Exactly Tuple? True Mixed Observability Go about splitting about your space based on the true observability part. Say there are \\(10\\) states which are observable, you literally just initialize 10 sets of alpha vectors to create \\(V_{1} \u0026hellip; V_{10}\\) for your observable states, where each one you have:\n\\begin{equation} V_{x_{i}}(b_{j}) = \\dots \\end{equation}\nwhereby all of your objectives and backup, etc., takes \\(x\\) your observable state as input. Then, during inference/backup looking at where you are in the observable part and use the value function from that part.\nPseudo-Full Observability Train a fully observable model, and then use belief-weighted average during inference. This is where QMDP came from.\nBounded Uncertainty Most of the time neither of the top two cases apply cleanly. Instead, most frequently your uncertainty in your observation is bounded by a significant degree.\nCondition For instance, your GPS maybe uncertain, but if it says you are in Kansas you are not in Shanghai. Formally, for \\(h: O \\to S\\) (the hypothetical \u0026ldquo;preimage\u0026rdquo; of any observation), we expect that:\n\\begin{equation} \\frac{h(o)}{S} = c \\end{equation}\ngives \\(c \\ll 1\\).\nSolution If we know we have Bounded Uncertainty, we can reparameterize our POMDP to an MDP over observations (we call this \\(X\\)) plus a POMDP modeling uncertainty in offsets from those observations (we call this \\(Y\\)).\nWhereby:\n\\begin{equation} \\begin{cases} T_{x}(x\u0026rsquo;|x,y,a) = \\sum_{s\u0026rsquo; \\in S} T(s\u0026rsquo; | (x,y),a) O(x\u0026rsquo;|s\u0026rsquo;,a) \\\\ T_{y}(y\u0026rsquo;|x,x\u0026rsquo;,y,a) = \\frac{T((x\u0026rsquo;,y\u0026rsquo;) | (x,y),a) O((x\u0026rsquo;,y\u0026rsquo;)|s\u0026rsquo;,a)}{T_{x}(x\u0026rsquo;|x,y,a)} \\end{cases} \\end{equation}\nwhere our state space is now split into \\(s \\in S = X \\times Y\\) s.t. \\(s=(x,y)\\).\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmomdp/\"\u003eMOMDP\u003c/a\u003e are \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es where some parts of the state are fully observable.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003escaling up \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es is \u003cstrong\u003e\u003cstrong\u003ereally hard\u003c/strong\u003e\u003c/strong\u003e: exponential \u003ca href=\"/posts/kbhcurse_of_dimensionality/\"\u003ecurse of dimensionality\u003c/a\u003e. Even discretization will cause the number of beliefs to really blow up.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eSome of the state isn\u0026rsquo;t uncertain, some others are bounded uncertainty: this REDUCES scale a lot.\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"solving\"\u003eSolving\u003c/h2\u003e\n\u003cp\u003eSolving the algorithm uses \u003ca href=\"/posts/kbhsarsop/\"\u003eSARSOP\u003c/a\u003e, or any point-based system. Instead of sampling the full belief state, however, we sample from a tuple \\((x, b_{y})\\), whereby \\(x\\) is the observable part and \\(b_{y}\\) is the unobservable part.\u003c/p\u003e\n\u003ch2 id=\"how-exactly-tuple\"\u003eHow Exactly Tuple?\u003c/h2\u003e\n\u003ch3 id=\"true-mixed-observability\"\u003eTrue Mixed Observability\u003c/h3\u003e\n\u003cp\u003eGo about splitting about your space based on the true observability part. Say there are \\(10\\) states which are observable, you literally just initialize 10 sets of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es to create \\(V_{1} \u0026hellip; V_{10}\\) for your observable states, where each one you have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV_{x_{i}}(b_{j}) = \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby all of your objectives and backup, etc., takes \\(x\\) your observable state as input. Then, during inference/backup looking at where you are in the observable part and use the value function from that part.\u003c/p\u003e\n\u003ch3 id=\"pseudo-full-observability\"\u003ePseudo-Full Observability\u003c/h3\u003e\n\u003cp\u003eTrain a fully observable model, and then use \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e-weighted average during inference. This is where \u003ca href=\"/posts/kbhqmdp/\"\u003eQMDP\u003c/a\u003e came from.\u003c/p\u003e\n\u003ch3 id=\"bounded-uncertainty\"\u003eBounded Uncertainty\u003c/h3\u003e\n\u003cp\u003eMost of the time neither of the top two cases apply cleanly. Instead, most frequently your uncertainty in your observation is \u003cem\u003ebounded\u003c/em\u003e by a significant degree.\u003c/p\u003e\n\u003ch4 id=\"condition\"\u003eCondition\u003c/h4\u003e\n\u003cp\u003eFor instance, your GPS maybe uncertain, but if it says you are in Kansas you are not in Shanghai. Formally, for \\(h: O \\to S\\) (the hypothetical \u0026ldquo;preimage\u0026rdquo; of any observation), we expect that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{h(o)}{S} = c\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egives \\(c \\ll 1\\).\u003c/p\u003e\n\u003ch4 id=\"solution\"\u003eSolution\u003c/h4\u003e\n\u003cp\u003eIf we know we have \u003ca href=\"#bounded-uncertainty\"\u003eBounded Uncertainty\u003c/a\u003e, we can reparameterize our \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e to an \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003e over observations (we call this \\(X\\)) plus a \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e modeling \u003ca href=\"/posts/kbhuncertainty/\"\u003euncertainty\u003c/a\u003e in offsets from those observations (we call this \\(Y\\)).\u003c/p\u003e\n\u003cp\u003eWhereby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nT_{x}(x\u0026rsquo;|x,y,a) = \\sum_{s\u0026rsquo; \\in S} T(s\u0026rsquo; | (x,y),a) O(x\u0026rsquo;|s\u0026rsquo;,a) \\\\\nT_{y}(y\u0026rsquo;|x,x\u0026rsquo;,y,a) = \\frac{T((x\u0026rsquo;,y\u0026rsquo;) | (x,y),a) O((x\u0026rsquo;,y\u0026rsquo;)|s\u0026rsquo;,a)}{T_{x}(x\u0026rsquo;|x,y,a)}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere our state space is now split into \\(s \\in S = X \\times Y\\) s.t. \\(s=(x,y)\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmomdp/","tags":null,"title":"MOMDP"},{"categories":null,"contents":"Monetarist theory is a theory of economics proposed by Milton Freedman which asserts that Keynsian economics only applies in the limited case that central bank need to keep the money supply growing; otherwise, the free market can handle itself.\nTherefore the Monetarist theorists propose that the stock market crash of 1929 was caused that the US monetary fund did a bad job of actually controlling the funds, and didn\u0026rsquo;t inject enough money into economy.\nSee also the opposite: demand-driven theory.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmonetarist_theory/\"\u003eMonetarist theory\u003c/a\u003e is a theory of economics proposed by \u003ca href=\"/posts/kbhmilton_freedman/\"\u003eMilton Freedman\u003c/a\u003e which asserts that \u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynsian economics\u003c/a\u003e only applies in the limited case that central bank need to keep the money supply growing; otherwise, the free market can handle itself.\u003c/p\u003e\n\u003cp\u003eTherefore the \u003ca href=\"/posts/kbhmonetarist_theory/\"\u003eMonetarist theorists\u003c/a\u003e propose that the \u003ca href=\"/posts/kbhcauses_of_the_great_depression/#stock-market-crash-of-1929\"\u003estock market crash of 1929\u003c/a\u003e was caused that the US monetary fund did a bad job of actually controlling the funds, and didn\u0026rsquo;t inject enough money into economy.\u003c/p\u003e\n\u003cp\u003eSee also the opposite: \u003ca href=\"/posts/kbhdemand_driven_theory/\"\u003edemand-driven theory.\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmonetarist_theory/","tags":null,"title":"Monetarist theory"},{"categories":null,"contents":"monitor pattern is a multithreading pattern to help prevent race conditions and deadlocks.\nassociate a single lock with a collection of variables (a \u0026ldquo;class\u0026rdquo;), having one lock associated with the group.\nany time when you want to access anything in that group, you unlock the mutex associated with the group. meaning, there\u0026rsquo;s only one mutex which can be used to change shared state.\nBridge Crossing There is cars that are crossing a one lane bridge: each car in a thread, they have to coordinate when/where to cross the bridge.\nCar can be going east or the west. All cars must be traveling in the same direction. And a car can only go once the coast is clear.\nInterface static void cross_bridge_east(size_t carid) { approach_bridge(); // sleeping driveAcross(EAST); // sleeping } static void cross_bridge_west(size_t carid) { approach_bridge(); // sleeping driveAcross(WEST); // sleeping } we need to ensure that, we are sharing a one lane bridge, and they don\u0026rsquo;t collide.\nMonitor Pattern REDUCES NUMBER OF PARAMS!\nMethod for the mutex management system isolated into a single, thread-safe class. All the mutexes, etc., all of the mutex gunk gets isolated into a thread safe instance method of the bridge class.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmonitor_pattern/\"\u003emonitor pattern\u003c/a\u003e is a \u003ca href=\"/posts/kbhmultithreading/\"\u003emultithreading\u003c/a\u003e pattern to help prevent \u003ca href=\"/posts/kbhmultithreading/#race-condition\"\u003erace condition\u003c/a\u003es and \u003ca href=\"/posts/kbhdeadlock/\"\u003edeadlock\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eassociate a single lock with a collection of variables\u003c/strong\u003e (a \u0026ldquo;class\u0026rdquo;), having one lock associated with the group.\u003c/p\u003e\n\u003cp\u003eany time when you want to access anything in that group, you unlock the \u003ca href=\"/posts/kbhmultithreading/#mutex\"\u003emutex\u003c/a\u003e associated with the group. meaning, there\u0026rsquo;s only one mutex which can be used to change shared state.\u003c/p\u003e\n\u003ch2 id=\"bridge-crossing\"\u003eBridge Crossing\u003c/h2\u003e\n\u003cp\u003eThere is cars that are crossing a \u003cstrong\u003eone lane bridge\u003c/strong\u003e: each car in a thread, they have to coordinate when/where to cross the bridge.\u003c/p\u003e\n\u003cp\u003eCar can be going east or the west. All cars must be traveling in the same direction. And a car can only go once the coast is clear.\u003c/p\u003e\n\u003ch3 id=\"interface\"\u003eInterface\u003c/h3\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-c++\" data-lang=\"c++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estatic\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ecross_bridge_east\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecarid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eapproach_bridge\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// sleeping\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edriveAcross\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eEAST\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// sleeping\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estatic\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ecross_bridge_west\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecarid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eapproach_bridge\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// sleeping\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edriveAcross\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eWEST\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// sleeping\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ewe need to ensure that, we are sharing a one lane bridge, and they don\u0026rsquo;t collide.\u003c/p\u003e\n\u003ch3 id=\"monitor-pattern\"\u003eMonitor Pattern\u003c/h3\u003e\n\u003cp\u003eREDUCES NUMBER OF PARAMS!\u003c/p\u003e\n\u003cp\u003eMethod for the mutex management system isolated into a single, thread-safe class. All the mutexes, etc., all of the mutex gunk gets isolated into a thread safe instance method of the bridge class.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmonitor_pattern/","tags":null,"title":"monitor pattern"},{"categories":null,"contents":" \\(\\mathcal{P}\\) problem (states, transitions, etc.) \\(N\\) visit counts \\(Q\\) a q-table: action-value estimates \\(d\\) depth (how many next states to look into)\u0026mdash;more is more accurate but slower \\(U\\) value function estimate; usually a Rollout Policy, estimate at some depth \\(d\\) \\(c\\) exploration constant After \\(n\\) simulation s from the starting state; we find the best action for our current state from our q-table.\nSubroutine: simulate(state, depth_remaining)\nIf depth_remaining=0, simply return the utility from the value function estimate For some s, Actions that we just got, if we haven\u0026rsquo;t seen it, we just return the value function estimate + initialize the N and Q tables select an action via the monte-carlo exploration formula sample a next state and current reward based on the action you gotten via a generative model value = reward + discount*simulate(next_state, depth_remaining-1) add to the N(state, action) count update the q table at (state, action): Q[s,a] + = (value-Q[s,a])/N[s,a] (\u0026ldquo;how much better is taking this action?\u0026rdquo; \u0026mdash; with later times taking this action more heavily discounted) monte-carlo exploration \\begin{equation} \\max_{a} Q(s,a) + c \\sqrt{ \\frac{\\log \\sum_{a}N(s,a)}{N(s,a)}} \\end{equation}\nwhere \\(c\\) is the exploration factor, and \\(N\\) is the next steps.\nWe want to encourage the exploration of things we haven\u0026rsquo;t tried as much. Note that as \\(N(s,a)\\) is small, the right term is larger. So, if its also not too bad in terms of \\(Q\\), we will choose it.\nIf \\(N(s,a)\\) is zero, you return the action. You always want to try something at least once.\n","html":"\u003cul\u003e\n\u003cli\u003e\\(\\mathcal{P}\\) problem (states, transitions, etc.)\u003c/li\u003e\n\u003cli\u003e\\(N\\) visit counts\u003c/li\u003e\n\u003cli\u003e\\(Q\\) a q-table: \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e estimates\u003c/li\u003e\n\u003cli\u003e\\(d\\) depth (how many next states to look into)\u0026mdash;more is more accurate but slower\u003c/li\u003e\n\u003cli\u003e\\(U\\) \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function estimate\u003c/a\u003e; usually a \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout-policy\"\u003eRollout Policy\u003c/a\u003e, estimate at some depth \\(d\\)\u003c/li\u003e\n\u003cli\u003e\\(c\\) exploration constant\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAfter \\(n\\) simulation s from the starting state; we find the best action for our current state from our q-table.\u003c/p\u003e\n\u003cp\u003eSubroutine: \u003ccode\u003esimulate(state, depth_remaining)\u003c/code\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eIf \u003ccode\u003edepth_remaining=0\u003c/code\u003e, simply return the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e from the \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e estimate\u003c/li\u003e\n\u003cli\u003eFor some \u003ccode\u003es, Actions\u003c/code\u003e that we just got, if we haven\u0026rsquo;t seen it, we just return the \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e estimate + initialize the N and Q tables\u003c/li\u003e\n\u003cli\u003eselect an action via the \u003ca href=\"#monte-carlo-exploration\"\u003emonte-carlo exploration\u003c/a\u003e formula\u003c/li\u003e\n\u003cli\u003esample a next state and current reward based on the action you gotten via a \u003ca href=\"/posts/kbhonline_planning/#generative-model\"\u003egenerative model\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003evalue = reward + discount*simulate(next_state, depth_remaining-1)\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eadd to the \u003ccode\u003eN(state, action)\u003c/code\u003e count\u003c/li\u003e\n\u003cli\u003eupdate the q table at (state, action): \u003ccode\u003eQ[s,a] + = (value-Q[s,a])/N[s,a]\u003c/code\u003e (\u0026ldquo;how much better is taking this action?\u0026rdquo; \u0026mdash; with later times taking this action more heavily discounted)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"monte-carlo-exploration\"\u003emonte-carlo exploration\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\max_{a} Q(s,a) + c \\sqrt{ \\frac{\\log \\sum_{a}N(s,a)}{N(s,a)}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(c\\) is the exploration factor, and \\(N\\) is the next steps.\u003c/p\u003e\n\u003cp\u003eWe want to encourage the exploration of things we haven\u0026rsquo;t tried as much. Note that as \\(N(s,a)\\) is small, the right term is larger. So, if its also not too bad in terms of \\(Q\\), we will choose it.\u003c/p\u003e\n\u003cp\u003eIf \\(N(s,a)\\) is zero, you return the action. You always want to try something at least once.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmonte_carlo_tree_search/","tags":null,"title":"monte-carlo tree search"},{"categories":null,"contents":"The fallout of the Rosa Parks incident, which is when many of Montgomery residents.\nThe boycotts were developed by Martin Luther King.\n","html":"\u003cp\u003eThe fallout of the \u003ca href=\"/posts/kbhrosa_parks/\"\u003eRosa Parks\u003c/a\u003e incident, which is when many of Montgomery residents.\u003c/p\u003e\n\u003cp\u003eThe boycotts were developed by \u003ca href=\"/posts/kbhmartin_luther_king/\"\u003eMartin Luther King\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmontomery_bus_boycott/","tags":null,"title":"Montgomery Bus Boycott"},{"categories":null,"contents":"A morpheme is the smallest meaning-bearing unit of a language. \u0026ldquo;er\u0026rdquo;, or \u0026ldquo;ist\u0026rdquo;, etc. It contains:\nstems: core meaning-bearing units, and affexes: parts that adhere to stems For non space-delineated languages, tokenization happens with morpheme (\u0026ldquo;词\u0026rdquo;).\nConsider:\n姚明进入总决赛\nIs yao/ming first and last names seperated. Is zong combined with juesai? (i.e. ADJ vs. NOUN).\nCommonly, Chinese performs word level tokenization if you don\u0026rsquo;t want to deal with it. Typically, this usuals neural sequence models.\n","html":"\u003cp\u003eA morpheme is the smallest meaning-bearing unit of a language. \u0026ldquo;er\u0026rdquo;, or \u0026ldquo;ist\u0026rdquo;, etc. It contains:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003estems\u003c/strong\u003e: core meaning-bearing units, and\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eaffexes\u003c/strong\u003e: parts that adhere to stems\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eFor non space-delineated languages, \u003ca href=\"/posts/kbhtokenization/\"\u003etokenization\u003c/a\u003e happens with \u003ca href=\"/posts/kbhmorpheme/\"\u003emorpheme\u003c/a\u003e (\u0026ldquo;词\u0026rdquo;).\u003c/p\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e姚明进入总决赛\u003c/p\u003e\n\u003cp\u003eIs yao/ming first and last names seperated. Is zong combined with juesai? (i.e. ADJ vs. NOUN).\u003c/p\u003e\n\u003cp\u003eCommonly, Chinese performs word level tokenization if you don\u0026rsquo;t want to deal with it. Typically, this usuals neural sequence models.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmorpheme/","tags":null,"title":"morpheme"},{"categories":null,"contents":"A morphism is a not-necessarily-invertible map between two objects of a category. If the map is indeed invertable, then we call the map an isomorphism.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhmorphism/\"\u003emorphism\u003c/a\u003e is a not-necessarily-invertible map between two \u003ca href=\"/posts/kbhobjects/\"\u003eobject\u003c/a\u003es of a \u003ca href=\"/posts/kbhcategory/\"\u003ecategory\u003c/a\u003e. If the map is indeed \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e, then we call the map an \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmorphism/","tags":null,"title":"morphism"},{"categories":null,"contents":"recall morphemes are the smallest meaningful units of a word.\nmorphological parsing is the act of getting morphemes: cats =\u0026gt; =cat s=o\nstem + affix stemming stemming just chops off the morpheme affixes; leaving the stems. \u0026ldquo;heights\u0026rdquo; =\u0026gt; \u0026ldquo;heigh\u0026rdquo;. without lemmatization.\nThis increases recall (more stuff is caught we want to catch) at he cost of precision (what we catch is probably lots of false positives).\nLanguages with complex cojugation or morphology, this can\u0026rsquo;t work because you can\u0026rsquo;t just chop.\nporter stemmer A series of rewrite rules which performs stemming.\n","html":"\u003cp\u003erecall \u003ca href=\"/posts/kbhmorpheme/\"\u003emorpheme\u003c/a\u003es are the smallest meaningful units of a word.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhmorphological_parsing/\"\u003emorphological parsing\u003c/a\u003e is the act of getting morphemes: \u003ccode\u003ecats\u003c/code\u003e =\u0026gt; =cat s=o\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003estem +\u003c/li\u003e\n\u003cli\u003eaffix\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"stemming\"\u003estemming\u003c/h2\u003e\n\u003cp\u003estemming just chops off the morpheme affixes; leaving the stems. \u0026ldquo;heights\u0026rdquo; =\u0026gt; \u0026ldquo;heigh\u0026rdquo;. without lemmatization.\u003c/p\u003e\n\u003cp\u003eThis increases recall (more stuff is caught we want to catch) at he cost of precision (what we catch is probably lots of false positives).\u003c/p\u003e\n\u003cp\u003eLanguages with complex cojugation or morphology, this can\u0026rsquo;t work because you can\u0026rsquo;t just chop.\u003c/p\u003e\n\u003ch3 id=\"porter-stemmer\"\u003eporter stemmer\u003c/h3\u003e\n\u003cp\u003eA series of rewrite rules which performs stemming.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmorphological_parsing/","tags":null,"title":"morphological parsing"},{"categories":null,"contents":"Take X-Rays and generate clinical reports.\nMethod encoder decoder architectures\nEncoder ConViT: convolutional vision transformer. Special thing: we swap out the attention\nDouble Weighted Multi-Head Attention We want to force the model to focus on one thing, so we modulate the model based on the weights of other: if one head is big, we make the other head small.\nwhere \\(w_{\\cos i} = \\frac{\\sum_{i}^{} \\cos \\qty (att_{a}, att_{base})}{N}\\)\n\\begin{equation} w = w_{a} \\cdot (1- w_{\\cos i}) \\end{equation}\nmeaning:\n\\begin{equation} att_{dwma} = w \\cdot att \\end{equation}\nDecoding Goood ol\u0026rsquo; Hierarchical-Decoder\n","html":"\u003cp\u003eTake X-Rays and generate clinical reports.\u003c/p\u003e\n\u003ch2 id=\"method\"\u003eMethod\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eencoder decoder architectures\u003c/strong\u003e\u003c/p\u003e\n\u003ch3 id=\"encoder\"\u003eEncoder\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003eConViT\u003c/strong\u003e: convolutional vision transformer. Special thing: we swap out the attention\u003c/p\u003e\n\u003ch4 id=\"double-weighted-multi-head-attention\"\u003eDouble Weighted Multi-Head Attention\u003c/h4\u003e\n\u003cp\u003eWe want to force the model to focus on one thing, so we modulate the model based on the weights of other: if one head is big, we make the other head small.\u003c/p\u003e\n\u003cp\u003ewhere \\(w_{\\cos i} = \\frac{\\sum_{i}^{} \\cos \\qty (att_{a}, att_{base})}{N}\\)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw = w_{a} \\cdot (1- w_{\\cos i})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\natt_{dwma} = w \\cdot att\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"decoding\"\u003eDecoding\u003c/h3\u003e\n\u003cp\u003eGoood ol\u0026rsquo; \u003cstrong\u003eHierarchical-Decoder\u003c/strong\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmulti_lstm_for_clinical_report_generation/","tags":null,"title":"Multi-LSTM for Clinical Report Generation"},{"categories":null,"contents":"simple games constituents agent \\(i \\in X\\) the set of agents. joint action space: \\(A = A\u0026rsquo; \\times A^{2} \\times \u0026hellip; \\times A^{k}\\) joint action would be one per agent \\(\\vec{a} = (a_{1}, \u0026hellip;, a_{k})\\) joint reward function \\(R(a) = R\u0026rsquo;(\\vec{a}), \u0026hellip;, R(\\vec{a})\\) additional information prisoner\u0026rsquo;s dilemma Cooperate Defect Cooperate -1, -1 -4, 0 Defect 0, -4 -3, -3 traveler\u0026rsquo;s dilemma two people write down the price of their luggage, between 2-100 the lower amount gets that value plus 2 the higher amount gets the lower amount minus 2 joint policy agent utility for agent number \\(i\\)\n\\begin{equation} U^{i} (\\vec{\\pi}) = \\sum_{a \\in A}^{} R^{(i)}(\\vec{a}) \\prod_{j}^{} \\pi^{(j)}(a^{(j)}) \\end{equation}\nthis is essentially the reward you get given you took\nresponse model how would other agents respond to our system?\n\\(a^{-i}\\): joint action except for agent \\(i\\) \\(\\vec{a} = (a^{i}, a^{-i})\\), \\(R(a^{i}, a^{-i}) = R(\\vec{a})\\) best-response deterministic best response model for agent \\(i\\):\n\\begin{equation} \\arg\\max_{a^{i} \\in A^{i}} U^{i}(a^{i}, \\pi^{-i}) \\end{equation}\nwhere the response to agent \\(a\\) is deterministically selected.\nFor prisoner\u0026rsquo;s dilemma, this results in both parties defecting because that would maximise the utility.\nsoftmax response its like Softmax Method:\n\\begin{equation} \\pi^{i}(a^{i}) \\propto \\exp\\qty(\\lambda U^{i}(a^{i}, \\pi^{-1})) \\end{equation}\nfictitious play play at some kind of game continuously\nDominant Strategy Equilibrium The dominant strategy is a policy that is the best response to all other possible agent policies. Not all games have a Dominant Strategy Equilibrium, because there are games for which the best response is never invariant to others\u0026rsquo; strategies (rock paper scissors).\nNash Equilibrium A Nash Equilibrium is a joint policy \\(\\pi\\) where everyone is following their best response: i.e. no one is incentive to unilaterally change from their policy. This exists for every game. In general, Nash Equilibrium is very hard to compute: it is p-pad (which is unclear relationally to np-complete).\n","html":"\u003ch2 id=\"simple-games\"\u003esimple games\u003c/h2\u003e\n\u003ch3 id=\"constituents\"\u003econstituents\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eagent \\(i \\in X\\) the set of agents.\u003c/li\u003e\n\u003cli\u003ejoint action space: \\(A = A\u0026rsquo; \\times A^{2} \\times \u0026hellip; \\times A^{k}\\)\u003c/li\u003e\n\u003cli\u003ejoint action would be one per agent \\(\\vec{a} = (a_{1}, \u0026hellip;, a_{k})\\)\u003c/li\u003e\n\u003cli\u003ejoint reward function \\(R(a) = R\u0026rsquo;(\\vec{a}), \u0026hellip;, R(\\vec{a})\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"additional-information\"\u003eadditional information\u003c/h3\u003e\n\u003ch4 id=\"prisoner-s-dilemma\"\u003eprisoner\u0026rsquo;s dilemma\u003c/h4\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003eCooperate\u003c/th\u003e\n\u003cth\u003eDefect\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eCooperate\u003c/td\u003e\n\u003ctd\u003e-1, -1\u003c/td\u003e\n\u003ctd\u003e-4, 0\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eDefect\u003c/td\u003e\n\u003ctd\u003e0, -4\u003c/td\u003e\n\u003ctd\u003e-3, -3\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch4 id=\"traveler-s-dilemma\"\u003etraveler\u0026rsquo;s dilemma\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003etwo people write down the price of their luggage, between 2-100\u003c/li\u003e\n\u003cli\u003ethe lower amount gets that value plus 2\u003c/li\u003e\n\u003cli\u003ethe higher amount gets the lower amount minus 2\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"joint-policy-agent-utility\"\u003ejoint policy agent utility\u003c/h2\u003e\n\u003cp\u003efor agent number \\(i\\)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{i} (\\vec{\\pi}) = \\sum_{a \\in A}^{} R^{(i)}(\\vec{a}) \\prod_{j}^{} \\pi^{(j)}(a^{(j)})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is essentially the reward you get given you took\u003c/p\u003e\n\u003ch2 id=\"response-model\"\u003eresponse model\u003c/h2\u003e\n\u003cp\u003ehow would other agents respond to our system?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(a^{-i}\\): joint action except for agent \\(i\\)\u003c/li\u003e\n\u003cli\u003e\\(\\vec{a} = (a^{i}, a^{-i})\\),\u003c/li\u003e\n\u003cli\u003e\\(R(a^{i}, a^{-i}) = R(\\vec{a})\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"best-response\"\u003ebest-response\u003c/h3\u003e\n\u003cp\u003edeterministic best response model for agent \\(i\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\arg\\max_{a^{i} \\in A^{i}} U^{i}(a^{i}, \\pi^{-i})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere the response to agent \\(a\\) is deterministically selected.\u003c/p\u003e\n\u003cp\u003eFor \u003ca href=\"#prisoner-s-dilemma\"\u003eprisoner\u0026rsquo;s dilemma\u003c/a\u003e, this results in both parties defecting because that would maximise the utility.\u003c/p\u003e\n\u003ch3 id=\"softmax-response\"\u003esoftmax response\u003c/h3\u003e\n\u003cp\u003eits like \u003ca href=\"/posts/kbhdirected_exploration/#softmax-method\"\u003eSoftmax Method\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pi^{i}(a^{i}) \\propto \\exp\\qty(\\lambda U^{i}(a^{i}, \\pi^{-1}))\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"fictitious-play\"\u003efictitious play\u003c/h3\u003e\n\u003cp\u003eplay at some kind of game continuously\u003c/p\u003e\n\u003ch2 id=\"dominant-strategy-equilibrium\"\u003eDominant Strategy Equilibrium\u003c/h2\u003e\n\u003cp\u003eThe dominant strategy is a policy that is the best response to all other possible agent policies. Not all games have a \u003ca href=\"#dominant-strategy-equilibrium\"\u003eDominant Strategy Equilibrium\u003c/a\u003e, because there are games for which the best response is never invariant to others\u0026rsquo; strategies (rock paper scissors).\u003c/p\u003e\n\u003ch2 id=\"nash-equilibrium\"\u003eNash Equilibrium\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#nash-equilibrium\"\u003eNash Equilibrium\u003c/a\u003e is a joint policy \\(\\pi\\) where everyone is following their best response: i.e. no one is incentive to unilaterally change from their policy. This exists for every game. In general, \u003ca href=\"#nash-equilibrium\"\u003eNash Equilibrium\u003c/a\u003e is very hard to compute: it is p-pad (which is unclear relationally to np-complete).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmultiagent_reasoning/","tags":null,"title":"multiagent reasoning"},{"categories":null,"contents":"Key idea: multi-modality, when leveraged well, leads to faster convergence.\nData Availability Health and health sensing requires labels, but health signals require specialist knowledge + broader context to label.\ntypical image labeling: 0.05/label medical imaging: 4.00/label Even if want to automate the study, we need to Kyntic style strap a thing to a person and have soft labels that we align with raw sensor data..\nInstead, Do Time-series Instead: run proxy self-supervised studies into the future\u0026mdash;pretraining on a shit tone of sensor data just as timeseries regressing into the future without any labels.\nThen, take the resulting latents and do FTing on specific tasks with your minimal labeled data.\n\u0026ldquo;arrow of time\u0026rdquo;?\nApproaches Best method: spacial masking + FT downstream; the system also generalizes well even with missing modalities.\nable to achieve good models via multimodal signals was able to handle missing data\u0026hellip; by skipping them is more data efficient by doing masked pret training requires no pre-processing Give up and use an LLM main problem: tokenized is very bad at splitting up numbers.\nYou can therefore come up with paired architectures with some modality encoder\u0026mdash;taking the data and encode it using a SoTA EKG encoder, for instance\u0026mdash;before passing the embeddings into the LLM.\nhttps://arxiv.org/abs/2309.16058\n","html":"\u003cp\u003eKey idea: multi-modality, when leveraged well, leads to faster convergence.\u003c/p\u003e\n\u003ch2 id=\"data-availability\"\u003eData Availability\u003c/h2\u003e\n\u003cp\u003eHealth and health sensing requires labels, but health signals require specialist knowledge + broader context to label.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003etypical image labeling: 0.05/label\u003c/li\u003e\n\u003cli\u003emedical imaging: 4.00/label\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eEven if want to automate the study, we need to Kyntic style strap a thing to a person and have soft labels that we align with raw sensor data..\u003c/p\u003e\n\u003ch2 id=\"instead-do-time-series\"\u003eInstead, Do Time-series\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eInstead\u003c/strong\u003e: run proxy self-supervised studies into the future\u0026mdash;pretraining on a shit tone of sensor data just as timeseries regressing into the future without any labels.\u003c/p\u003e\n\u003cp\u003eThen, take the resulting latents and do FTing on specific tasks with your minimal labeled data.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;arrow of time\u0026rdquo;?\u003c/p\u003e\n\u003ch2 id=\"approaches\"\u003eApproaches\u003c/h2\u003e\n\u003cp\u003eBest method: \u003cstrong\u003espacial masking\u003c/strong\u003e + \u003cstrong\u003eFT downstream\u003c/strong\u003e; the system also generalizes well even with missing modalities.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eable to achieve good models via multimodal signals\u003c/li\u003e\n\u003cli\u003ewas able to handle missing data\u0026hellip; by skipping them\u003c/li\u003e\n\u003cli\u003eis more data efficient by doing masked pret training\u003c/li\u003e\n\u003cli\u003erequires no pre-processing\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"give-up-and-use-an-llm\"\u003eGive up and use an LLM\u003c/h2\u003e\n\u003cp\u003emain problem: \u003cstrong\u003etokenized is very bad at splitting up numbers\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eYou can therefore come up with paired architectures with some modality encoder\u0026mdash;taking the data and encode it using a SoTA EKG encoder, for instance\u0026mdash;before passing the embeddings into the LLM.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://arxiv.org/abs/2309.16058\"\u003ehttps://arxiv.org/abs/2309.16058\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmultimodal_ai_for_real_world_signals/","tags":null,"title":"Multimodal AI for Real-World Signals"},{"categories":null,"contents":"Its a general form of the combinations formula:\n\\begin{equation} {n \\choose k_1, k_2, \\dots, k_{n}} = \\frac{n!}{k_{1}! k_2! \\dots k_{n}!} \\end{equation}\n","html":"\u003cp\u003eIts a general form of the \u003ca href=\"/posts/kbhcombination/\"\u003ecombination\u003c/a\u003es formula:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n{n \\choose k_1, k_2, \\dots, k_{n}} = \\frac{n!}{k_{1}! k_2! \\dots k_{n}!}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmultinomial_coefficient/","tags":null,"title":"multinomial coefficient"},{"categories":null,"contents":"\\begin{equation} B = \\qty[(x_1, y_1), \\dots, (x_{n}, y_{n})] \\end{equation}\nwhere the labels would be:\n\\begin{equation} C(b) = \\begin{cases} 0, if \\sum_{i}^{}y_{i} = 0 \\\\ 1, \\text{otherwise} \\end{cases} \\end{equation}\nand then we maxpool\nMILFormer MILFormer is a multiple-instance learning scheme which makes predictions over input patches whose output predictions are weighted as multi-distirbution.\n","html":"\u003cp\u003e\\begin{equation}\nB = \\qty[(x_1, y_1), \\dots, (x_{n}, y_{n})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere the labels would be:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC(b) = \\begin{cases}\n0, if \\sum_{i}^{}y_{i} = 0 \\\\\n1, \\text{otherwise}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand then we maxpool\u003c/p\u003e\n\u003ch2 id=\"milformer\"\u003eMILFormer\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#milformer\"\u003eMILFormer\u003c/a\u003e is a multiple-instance learning scheme which makes predictions over input patches whose output predictions are weighted as multi-distirbution.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmultiple_instance_learning/","tags":null,"title":"Multiple Instance Learning"},{"categories":null,"contents":"The multiplicative identity allows another number to retain its identity after multiplying. Its \\(1\\) [for fields?].\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhmultiplicative_identity/\"\u003emultiplicative identity\u003c/a\u003e allows another \u003ca href=\"/posts/kbhnumber/\"\u003enumber\u003c/a\u003e to retain its identity after \u003ca href=\"/posts/kbhmultiplying/\"\u003emultiplying\u003c/a\u003e. Its \\(1\\) [for fields?].\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmultiplicative_identity/","tags":null,"title":"multiplicative identity"},{"categories":null,"contents":"TBD\n","html":"\u003cp\u003eTBD\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmultiplying/","tags":null,"title":"multiplying"},{"categories":null,"contents":"multiprocessing is the act of switching between multiple processes so fast that it appears multiple processes are running concurrently.\nOS schedules tasks each program gets a little time, then has to wait in a turn to continue executing base level syscalls that requires waiting will be moved off before finishing, and in the meantime others can wait. like file read.\nprogram A program is a script to be ran.\nprocess a process is an instance of a program. Every process has a unique identifier, each process is uniquely identified by a PID.\nsyscall get_pid will give you back the PID.\nopen file table open file table is a system wide for each file opening session, mentioning what the mode and cursor of the file is open, and the number of file descriptor tables pointing to it with a refcount.\nWhen we call close, the refcount decrements. When refcount=0, the file is deleted. This means, if you share a pipe, both parent and child has to close the pipe.\nread blocks until at least 1 byte is available, or until all write ends are closed.\n","html":"\u003cp\u003emultiprocessing is the act of switching between multiple \u003ca href=\"#process\"\u003eprocess\u003c/a\u003ees so fast that it appears multiple processes are running concurrently.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eOS \u003cem\u003eschedules\u003c/em\u003e tasks\u003c/li\u003e\n\u003cli\u003eeach program gets a little time, then has to wait in a turn to continue executing\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ebase level syscalls that requires waiting will be moved off before finishing, and in the meantime others can wait. like file read.\u003c/p\u003e\n\u003ch2 id=\"program\"\u003eprogram\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#program\"\u003eprogram\u003c/a\u003e is a script to be ran.\u003c/p\u003e\n\u003ch2 id=\"process\"\u003eprocess\u003c/h2\u003e\n\u003cp\u003ea \u003ca href=\"#process\"\u003eprocess\u003c/a\u003e is an instance of a \u003ca href=\"#program\"\u003eprogram\u003c/a\u003e. Every process has a unique identifier, each process is uniquely identified by a \u003ca href=\"#process\"\u003ePID\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003esyscall \u003ccode\u003eget_pid\u003c/code\u003e will give you back the PID.\u003c/p\u003e\n\u003ch3 id=\"open-file-table\"\u003eopen file table\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#open-file-table\"\u003eopen file table\u003c/a\u003e is a system wide for each file opening session, mentioning what the mode and cursor of the file is open, and the number of \u003ca href=\"/posts/kbhsyscalls/#file-descriptor\"\u003efile descriptor\u003c/a\u003e tables pointing to it with a \u003ccode\u003erefcount\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eWhen we call close, the \u003ccode\u003erefcount\u003c/code\u003e decrements. When \u003ccode\u003erefcount=0\u003c/code\u003e, the file is deleted. This means, if you share a \u003ca href=\"/posts/kbhpipe/\"\u003epipe\u003c/a\u003e, both parent and child has to close the \u003ca href=\"/posts/kbhpipe/\"\u003epipe\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eread blocks\u003c/strong\u003e until at least 1 byte is available, or until all write ends are closed.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmultiprocessing/","tags":null,"title":"multiprocessing"},{"categories":null,"contents":" we can have concurrency within a single process\u0026mdash;each running a single function We will solve problems:\nnever race condition never deadlock thread you can spawn a thread using the thread() can even pass function parameters threads share all virtual address space: bugs can arise when multiple threads modify the same thing at the same time\u0026mdash;each thread has access to a small chunk of the stack threads are actually the unit of concurrency: the OS actually chooses threads to run // now the thread can execute at any time: once a thread is made, it will run in any order thread myThread(function_to_run, arg1, arg2, ...); // threads run AS SOON AS SPAWENED: so We can wait for a thread:\nmyThread.join() You can also start a bunch on a loop:\nthread threads[3]; for (thread\u0026amp; cf : threads) { cf = thread(func, ...); } Importantly, unlike waitpid, we can\u0026rsquo;t join an arbitrary thread. We basically have to wait for all your threads to finish.\nDEBUGGING TRICK: adding a sleep call everywhere shouldn\u0026rsquo;t cause any problems; if it does, there\u0026rsquo;s a race condition.\npassing by reference threading doesn\u0026rsquo;t know the type of arguments being passed into a function; this is especially prevalent when passing by reference.\nstatic void mythingref(int \u0026amp;pbr); thread(myfunc, ref(myint)); Remember: ref will SHARE MEMORY, and you have no control over when the thread runs. So once a pointer is passed all bets are off in terms of what values things take on.\nprocesses vs threads Processes Threads isolate virtual address spaces shares virtual address space to share info can run external programs can\u0026rsquo;t run external programs (execvp wipes) harder to coordinate tasks within the same program easier to coordinate tasks within a program threads are really the main way to break down big tasks.\nrace condition undesirable behavior caused by arbitrary execution order. we typically solve them using mutexes.\nthread safe thread safe functions are ones whereby its designed to prevent against unexpected behavior during threading.\nwe want atomicity in the code: we want entire data viewing + modification operations to not be interrupted\u0026mdash;otherwise, you will generate race conditions.\nRecall: C++ statements themselves are not INHERENTLY autonomic.\nwe want to outline a \u0026ldquo;critical section\u0026rdquo; and ensure it doesn\u0026rsquo;t get ran more than once.\ncritical section A critical section is a region of code which should only be executed by one thread at a time. We want to keep this section as small as possible to preserve performance.\nwe want to organize it to be as small as we possibly can we want to move the critical section in terms of expressions; so if you have a loop you should put the loop in the outer area, and do the checking + break within if our critical sections are not small, we would have little benefits to multithreading\nmutex it would be nice if a critical section can only be executed once; a mutex can be shared across threads, but can only be \u0026ldquo;owned\u0026rdquo; by a single thread at once.\nmutex tmp; tmp.lock(); tmp.unlock(); importantly, if multiple threads are waiting on a mutex, the next thread that\u0026rsquo;s going to get the mutex\nwhen there are multiple threads writing to a value when there is a thread writing and one or more threads reading if you are no writes, you don\u0026rsquo;t need a mutex when dealing with mutex, beware of deadlock\nSleep call can happen by putting a sleep call in certain places.\nimplementation Things it needs to do:\ntrack whether or not the mutex is locked/unlocked track which thread is the owner of the lock threads that want to get this lock int locked = 0; Queue blocked_queue; void Lock::Lock() { // disable interrupts: otherwise multiple threads // could come and lock the mutex (such as between // the locked check and lock =1 IntrGuard grd; if (!locked) { // if our thread is not locked, just lock it locked = 1; } else { // if our thread is locked, we need to prevent our current // thread from going to the ready queue, and push it to the current thread blocked_queue.push(CURRENT_THREAD); // remember this isn\u0026#39;t an issue even if IntrGuard // didn\u0026#39;t yet go out of scope; because it will either // land on a context_switch which will enable interrupts for you // or land on the beginning of a threadfunc helper, which // is also going to enable interrupts for you // nicely, the interrupts are here are *off* as required because switching // to another thread always will result in reenabling (either by new thread, // by timer handler, or by IntrGuard) mark_block_and_call_schedule(CURRENT_THREAD); } } void Lock::Unlock() { // disable interrupts: otherwise multiple threads // could come and lock the mutex (such as between // the locked check and lock =1 IntrGuard grd; // if our thread is locked and nobody is waiting for it if (q.empty()) { locked = 0; } else { unblock_thread(q.pop()); // we do not switch to the unblocked thread, just add it to the // ready queue. we are entrusting the scheduler to start this thread // whenever we feel right } } IntrGuard IntrGuard will turn off interrupts for the duration of its scope; when it goes out of scope, it will restore the state of the interrupt before (whether on or off). So, implementing the mutex code above without InterGuard:\nint locked = 0; Queue blocked_queue; void Lock::Lock() { // disable interrupts: otherwise multiple threads // could come and lock the mutex (such as between // the locked check and lock =1 bool interrupsEnabled = intr_enabled(); // only disable interrupts if they are currently // on if (interrupsEnabled) { intr_enable(false); } if (!locked) { // if our thread is not locked, just lock it locked = 1; } else { // if our thread is locked, we need to prevent our current // thread from going to the ready queue, and push it to the current thread blocked_queue.push(CURRENT_THREAD); mark_block_and_call_schedule(CURRENT_THREAD); } // if interrupts was on, turn them on again. // otherwise, do nothing if (interrupsEnabled) { intr_enable(true); } } void Lock::Unlock() { // disable interrupts: otherwise multiple threads // could come and lock the mutex (such as between // the locked check and lock =1 bool interrupsEnabled = intr_enabled(); // only disable interrupts if they are currently // on if (interrupsEnabled) { intr_enable(false); } // if our thread is locked and nobody is waiting for it if (q.empty()) { locked = 0; } else { unblock_thread(q.pop()); // we do not switch to the unblocked thread, just add it to the // ready queue. we are entrusting the scheduler to start this thread // whenever we feel right } if (interrupsEnabled) { intr_enable(true); } } ","html":"\u003cul\u003e\n\u003cli\u003ewe can have concurrency \u003cstrong\u003ewithin a single process\u003c/strong\u003e\u0026mdash;each running a single function\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe will solve problems:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003enever \u003ca href=\"#race-condition\"\u003erace condition\u003c/a\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003enever \u003ca href=\"/posts/kbhdeadlock/\"\u003edeadlock\u003c/a\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"thread\"\u003ethread\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eyou can spawn a thread using the \u003cstrong\u003ethread()\u003c/strong\u003e can even pass function parameters\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ethreads share all virtual address space\u003c/strong\u003e: bugs can arise when multiple threads modify the same thing at the same time\u0026mdash;each thread has access to a small chunk of the stack\u003c/li\u003e\n\u003cli\u003ethreads are actually the unit of concurrency: the OS actually chooses threads to run\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// now the thread can execute at any time: once a thread is made, it will run in any order\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emyThread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efunction_to_run\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earg1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earg2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e...);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// threads run AS SOON AS SPAWENED: so\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe can wait for a thread:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emyThread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ejoin\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou can also start a bunch on a loop:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ethreads\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecf\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ethreads\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ecf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efunc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e...);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eImportantly, unlike \u003ca href=\"/posts/kbhfork/#waitpid\"\u003ewaitpid\u003c/a\u003e, we can\u0026rsquo;t join an arbitrary thread. We basically have to wait for all your threads to finish.\u003c/p\u003e\n\u003cp\u003eDEBUGGING TRICK: \u003cstrong\u003e\u003cstrong\u003eadding a sleep call everywhere shouldn\u0026rsquo;t cause any problems\u003c/strong\u003e\u003c/strong\u003e; if it does, there\u0026rsquo;s a race condition.\u003c/p\u003e\n\u003ch3 id=\"passing-by-reference\"\u003epassing by reference\u003c/h3\u003e\n\u003cp\u003ethreading doesn\u0026rsquo;t know the type of arguments being passed into a function; this is especially prevalent when passing by reference.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estatic\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emythingref\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epbr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emyfunc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eref\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emyint\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e));\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRemember: ref will \u003cstrong\u003e\u003cstrong\u003eSHARE MEMORY\u003c/strong\u003e\u003c/strong\u003e, and you have no control over when the thread runs. So once a pointer is passed all bets are off in terms of what values things take on.\u003c/p\u003e\n\u003ch2 id=\"process--kbhmultiprocessing-dot-md--es-vs-thread--orga50f688--s\"\u003e\u003ca href=\"/posts/kbhmultiprocessing/#process\"\u003eprocess\u003c/a\u003ees vs \u003ca href=\"#thread\"\u003ethread\u003c/a\u003es\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eProcesses\u003c/th\u003e\n\u003cth\u003eThreads\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eisolate virtual address spaces\u003c/td\u003e\n\u003ctd\u003eshares virtual address space to share info\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ecan run external programs\u003c/td\u003e\n\u003ctd\u003ecan\u0026rsquo;t run external programs (execvp wipes)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eharder to coordinate tasks within the same program\u003c/td\u003e\n\u003ctd\u003eeasier to coordinate tasks within a program\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\u003ca href=\"#thread\"\u003ethread\u003c/a\u003es are really the main way to break down big tasks.\u003c/p\u003e\n\u003ch2 id=\"race-condition\"\u003erace condition\u003c/h2\u003e\n\u003cp\u003eundesirable behavior caused by arbitrary execution order. we typically solve them using \u003ca href=\"#mutex\"\u003emutex\u003c/a\u003ees.\u003c/p\u003e\n\u003ch3 id=\"thread-safe\"\u003ethread safe\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#thread-safe\"\u003ethread safe\u003c/a\u003e functions are ones whereby its designed to prevent against unexpected behavior during threading.\u003c/p\u003e\n\u003cp\u003ewe want \u003ca href=\"/posts/kbhdistributed_algorithum/#atomicity\"\u003eatomicity\u003c/a\u003e in the code: we want entire data viewing + modification operations to not be interrupted\u0026mdash;otherwise, you will generate race conditions.\u003c/p\u003e\n\u003cp\u003eRecall: \u003cstrong\u003e\u003cstrong\u003eC++ statements themselves are not INHERENTLY autonomic\u003c/strong\u003e\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003ewe want to outline a \u0026ldquo;critical section\u0026rdquo; and ensure it doesn\u0026rsquo;t get ran more than once.\u003c/p\u003e\n\u003ch3 id=\"critical-section\"\u003ecritical section\u003c/h3\u003e\n\u003cp\u003eA \u003ca href=\"#critical-section\"\u003ecritical section\u003c/a\u003e is a region of code which should only be executed by one thread at a time. We want to keep this section as small as possible to preserve performance.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewe want to organize it to be as small as we possibly can\u003c/li\u003e\n\u003cli\u003ewe want to move the critical section in terms of expressions; so if you have a loop you should put the loop in the outer area, and do the checking + break within\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eif our \u003ca href=\"#critical-section\"\u003ecritical section\u003c/a\u003es are not small, we would have little benefits to multithreading\u003c/p\u003e\n\u003ch2 id=\"mutex\"\u003emutex\u003c/h2\u003e\n\u003cp\u003eit would be nice if a \u003ca href=\"#critical-section\"\u003ecritical section\u003c/a\u003e can only be executed once; a \u003ca href=\"#mutex\"\u003emutex\u003c/a\u003e can be shared across threads, but can only be \u0026ldquo;owned\u0026rdquo; by a single thread at once.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emutex\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etmp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etmp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etmp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eunlock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eimportantly, if multiple \u003ca href=\"#thread\"\u003ethread\u003c/a\u003es are waiting on a mutex, the next thread that\u0026rsquo;s going to get the mutex\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ewhen there are multiple threads \u003cstrong\u003ewriting\u003c/strong\u003e to a value\u003c/li\u003e\n\u003cli\u003ewhen there is a thread \u003cstrong\u003ewriting\u003c/strong\u003e and one or more threads \u003cstrong\u003ereading\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eif you are no writes, you don\u0026rsquo;t need a mutex\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewhen dealing with \u003ca href=\"#mutex\"\u003emutex\u003c/a\u003e, beware of \u003ca href=\"/posts/kbhdeadlock/\"\u003edeadlock\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eSleep call can happen by putting a sleep call in certain places.\u003c/p\u003e\n\u003ch3 id=\"implementation\"\u003eimplementation\u003c/h3\u003e\n\u003cp\u003eThings it needs to do:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003etrack whether or not the mutex is locked/unlocked\u003c/li\u003e\n\u003cli\u003etrack which thread is the owner of the lock\u003c/li\u003e\n\u003cli\u003ethreads that want to get this lock\u003c/li\u003e\n\u003c/ol\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-c++\" data-lang=\"c++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eQueue\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eblocked_queue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLock\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e::\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// disable interrupts: otherwise multiple threads\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// could come and lock the mutex (such as between\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// the locked check and lock =1\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eIntrGuard\u003c/span\u003e \u003cspan style=\"color:#111\"\u003egrd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e!\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if our thread is not locked, just lock it\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if our thread is locked, we need to prevent our current\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// thread from going to the ready queue, and push it to the current thread\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eblocked_queue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epush\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eCURRENT_THREAD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// remember this isn\u0026#39;t an issue even if IntrGuard\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// didn\u0026#39;t yet go out of scope; because it will either\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// land on a context_switch which will enable interrupts for you\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// or land on the beginning of a threadfunc helper, which\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// is also going to enable interrupts for you\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// nicely, the interrupts are here are *off* as required because switching\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// to another thread always will result in reenabling (either by new thread,\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// by timer handler, or by IntrGuard)\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emark_block_and_call_schedule\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eCURRENT_THREAD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLock\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e::\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eUnlock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// disable interrupts: otherwise multiple threads\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// could come and lock the mutex (such as between\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// the locked check and lock =1\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eIntrGuard\u003c/span\u003e \u003cspan style=\"color:#111\"\u003egrd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if our thread is locked and nobody is waiting for it\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eq\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eempty\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eunblock_thread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eq\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e());\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// we do not switch to the unblocked thread, just add it to the\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// ready queue. we are entrusting the scheduler to start this thread\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// whenever we feel right\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"intrguard\"\u003eIntrGuard\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#intrguard\"\u003eIntrGuard\u003c/a\u003e will turn off interrupts for the duration of its scope; when it goes out of scope, it will \u003cstrong\u003erestore the state of the interrupt before\u003c/strong\u003e (whether on or off). So, implementing the \u003ca href=\"#mutex\"\u003emutex\u003c/a\u003e code above \u003cstrong\u003ewithout\u003c/strong\u003e InterGuard:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-c++\" data-lang=\"c++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eQueue\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eblocked_queue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLock\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e::\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// disable interrupts: otherwise multiple threads\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// could come and lock the mutex (such as between\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// the locked check and lock =1\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ebool\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einterrupsEnabled\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eintr_enabled\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// only disable interrupts if they are currently\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// on\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einterrupsEnabled\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eintr_enable\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efalse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e!\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if our thread is not locked, just lock it\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if our thread is locked, we need to prevent our current\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// thread from going to the ready queue, and push it to the current thread\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eblocked_queue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epush\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eCURRENT_THREAD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emark_block_and_call_schedule\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eCURRENT_THREAD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if interrupts was on, turn them on again.\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// otherwise, do nothing\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einterrupsEnabled\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eintr_enable\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLock\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e::\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eUnlock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// disable interrupts: otherwise multiple threads\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// could come and lock the mutex (such as between\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// the locked check and lock =1\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ebool\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einterrupsEnabled\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eintr_enabled\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// only disable interrupts if they are currently\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// on\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einterrupsEnabled\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eintr_enable\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efalse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if our thread is locked and nobody is waiting for it\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eq\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eempty\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eunblock_thread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eq\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e());\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// we do not switch to the unblocked thread, just add it to the\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// ready queue. we are entrusting the scheduler to start this thread\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// whenever we feel right\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einterrupsEnabled\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eintr_enable\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhmultithreading/","tags":null,"title":"multithreading"},{"categories":null,"contents":"mutual information a measure of the dependence of two random variables in information theory. Applications include collocation extraction, which would require finding how two words co-occur (which means one would contribute much less entropy than the other.)\nconstituents \\(X, Y\\) random variables \\(D_{KL}\\) KL Divergence function \\(P_{(X,Y)}\\) the joint distribution of \\(X,Y\\) \\(P_{X}, P_{Y}\\) the marginal distributions of \\(X,Y\\) requirements mutual information is defined as\n\\begin{equation} I(X ; Y) = D_{KL}(P_{ (X, Y) } | P_{X} \\otimes P_{Y}) \\end{equation}\n\u0026ldquo;mutual information between \\(X\\) and \\(Y\\) is the additional information contributed by the \u0026quot;\nadditional information ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmutual_information/\"\u003emutual information\u003c/a\u003e a measure of the dependence of two \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es in \u003ca href=\"/posts/kbhinformation_theory/\"\u003einformation theory\u003c/a\u003e. Applications include \u003ca href=\"/posts/kbhcollocation_extractio/\"\u003ecollocation extraction\u003c/a\u003e, which would require finding how two words co-occur (which means one would contribute much less entropy than the other.)\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X, Y\\) \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003e\\(D_{KL}\\) \u003ca href=\"/posts/kbhkl_divergence/\"\u003eKL Divergence function\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(P_{(X,Y)}\\) the joint distribution of \\(X,Y\\)\u003c/li\u003e\n\u003cli\u003e\\(P_{X}, P_{Y}\\) the marginal distributions of \\(X,Y\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhmutual_information/\"\u003emutual information\u003c/a\u003e is defined as\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI(X ; Y) = D_{KL}(P_{ (X, Y) } | P_{X} \\otimes P_{Y})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;\u003ca href=\"/posts/kbhmutual_information/\"\u003emutual information\u003c/a\u003e between \\(X\\) and \\(Y\\) is the additional information contributed by the \u0026quot;\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-03-05_23-07-58_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmutual_information/","tags":null,"title":"mutual information"},{"categories":null,"contents":"probability of \u0026ldquo;or\u0026rdquo;\nIf its not possible for two events to happen at the same time, they are called mutually exclusive:\n\\begin{equation} P(E\\ or\\ F) = P(E)+P(F) - P(E \\cap F) \\end{equation}\nThis is called the inclusion exclusion principle. This is what motivates inclusion exclusion counting.\nGeneral inclusion exclusion principle Its scary. Think about this:\nWe basically need to alternate between adding and subtracting. (i.e.: in our case here, we add all the odd-group pairs (for P(x) and P(xyz)), we subtract the even-number pairs (for p(xy))).\nAnd so:\n\\begin{equation} P(E_1\\ or\\ \\dots\\ or\\ E_{n}) = \\sum_{r=1}^{n} (-1)^{r+1} Y_{r} \\end{equation}\nwhereby, \\(Y_{j}\\) is the sum of \\(P(x_n, \u0026hellip; x_{j})\\) for combinations of \\(j\\) events.\nTry not to do this.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of \u0026ldquo;or\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eIf its not possible for two events to happen at the same time, they are called \u003ca href=\"/posts/kbhmutually_exclusive/\"\u003emutually exclusive\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(E\\ or\\ F) = P(E)+P(F) - P(E \\cap F)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is called the \u003ca href=\"/posts/kbhmutually_exclusive/\"\u003einclusion exclusion principle\u003c/a\u003e. This is what motivates \u003ca href=\"/posts/kbhsum_rule_of_counting/\"\u003einclusion exclusion counting\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"general-inclusion-exclusion-principle--kbhmutually-exclusive-dot-md\"\u003eGeneral \u003ca href=\"/posts/kbhmutually_exclusive/\"\u003einclusion exclusion principle\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eIts scary. Think about this:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-06_15-56-50_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWe basically need to alternate between adding and subtracting. (i.e.: in our case here, we add all the odd-group pairs (for P(x) and P(xyz)), we subtract the even-number pairs (for p(xy))).\u003c/p\u003e\n\u003cp\u003eAnd so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(E_1\\ or\\ \\dots\\ or\\ E_{n}) = \\sum_{r=1}^{n} (-1)^{r+1} Y_{r}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby, \\(Y_{j}\\) is the sum of \\(P(x_n, \u0026hellip; x_{j})\\) for \u003ca href=\"/posts/kbhcombination/\"\u003ecombination\u003c/a\u003es of \\(j\\) events.\u003c/p\u003e\n\u003cp\u003eTry not to do this.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmutually_exclusive/","tags":null,"title":"mutually exclusive"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmy_day/","tags":null,"title":"My Day"},{"categories":null,"contents":"Main goals: assign a probability of each sequence of words existing:\n\\begin{equation} P(W) = P(w_1, \\dots, w_{n}) \\end{equation}\nclosely related is the NLG formulation of predicting an upcoming word:\n\\begin{equation} P(w_5|w_1, \\dots, w_{n}) \\end{equation}\neither of these we call a \u0026ldquo;grammar\u0026rdquo;, or \u0026ldquo;Language Model\u0026rdquo;.\nChain Rule Language Modeling Recall probability chain rule. Now, the probability of a sequence like:\n\\begin{equation} P(its\\ water\\ is\\ so\\ transparent) \\end{equation}\ngives:\n\\begin{equation} P(its) \\times P(water|its) \\times P(is | its\\ water) \\dots \\end{equation}\nThat is:\n\\begin{equation} P(w_1 \\dots w_{n}) = \\prod_{i}P(w_{i} | w_1 \\dots w_{i-1}) \\end{equation}\nMarkov Assumption Because we can\u0026rsquo;t make conditional counts over all words all the time, we make an assumption: the probability of the current word is the probability of the current word conditioned on the probability of the last \\(k\\) words.\n\\begin{equation} P(w_1, \\dots, w_{n}) \\approx \\prod_{i}P(w_{i} | w_{i-k} \\dots w_{i-1}) \\end{equation}\nUnigrams The simplest Markov Assumption is unigrams, which will be word salad generation because it has no understanding of language structure.\nNaive Bays Language Modeling You can consider each class in Naive Bayes \\(P(word | c)\\) as a language model.\nSo:\n\\begin{equation} P(sentence|c) = \\prox_{i}P(word_{i}|c) \\end{equation}\nEach class is a separate class-conditioned language model. So, we just want to compute the probability of each sentence, and classify the sentence based on the higher probability result.\nLimitations In general, n gram models are limited because they don\u0026rsquo;t consider long distance dependencies which are present in English.\nEstimating N-Grams Many counts are results of\u0026hellip;\nworld (\u0026ldquo;people want chinese food more often, so want+Chinese appears more\u0026rdquo;) grammar (\u0026ldquo;want+want is less likely\u0026rdquo;) MLE \\begin{equation} P(w_{i} | w_{i-1}) = \\frac{C(w_{i-1}, w_{i})}{C(w_{i-1})} \\end{equation}\nMAP, i.e. Laplace Smoothing \\begin{equation} P(w_{i} | w_{i-1}) = \\frac{C(w_{i-1}, w_{i})+1}{C(w_{i-1})+V} \\end{equation}\nwe have to add \\(V\\) on the denominator because every word could possibly follow \\(w_{i-1}\\). Note that as \\(N\\) increases we actually still add \\(V\\) because we are predicting at each time a single word (just conditioned on more words), so if we are smoothing output we are only adding \\(V\\) extra counts.\nIMPORTANT NOTE THOUGH: this is typically not used for N-Grams (because there are simply so many OOS sequences). Instead, its more frequently used in other cases such as Naive Bayes for Text Classification.\nLog Probs In practice, we keep probability as log probabilities after we computed them.\nN-Gram Models Google n-gram models, SRILM\nBackoff Use trigrams if high probability evidence is found, otherwise bigrams or unigrams\nStupid Backoff give the MLE if the conditioning sequence has a non-zero count otherwise, start backing off, recursively calculating the probability of the current word given the last n-1-gram, multplied by a discount factor if we end up with a unigram, just give the unigram probability This DOES NOT PRODUCE A PROBABILITY as it is not normalized. Instead of being probabilites, we consider them \u0026ldquo;scores\u0026rdquo;.\nInterpolation In practice, Interpolation works better. Interpolation smoothes the probability between unigram, bigram, and trigrams.\nMostly simply, we mix them with some factors \\(\\lambda_{1}, \\lambda_{2}, \\lambda_{3}\\), where \\(\\sum_{i} \\lambda_{i} = 1\\). This makes a weighted average over probabilities:\n\\begin{equation} P(comb) = \\lambda_{1} P(uni) + \\lambda_{2} P(bi)+ \\lambda_{3} P(tri) \\end{equation}\nlambdas could also be a function of the previous tokens.\nWe sometimes obtain this with a disjoint dataset from the original training set, whereby we train some ngrams from the original dataset, and then identify \\(\\lambda\\) which maximises the probabilities.\nOOV Words we sometimes replace the lowest likelyhood few words with \u0026lt;UNK\u0026gt;, and train models such that we can have an open vocabulary: whenever we encounter unknown words, we replace it with \u0026lt;UNK\u0026gt;\nScaling Up Strategies to make LMing with Ngrams more efficient\npruning: only store ngrams of top k use tries (suffix trees, etc.) approximations: bloom filter storing indicies ","html":"\u003cp\u003eMain goals: assign a probability of each sequence of words existing:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(W) = P(w_1, \\dots, w_{n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eclosely related is the NLG formulation of predicting an upcoming word:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(w_5|w_1, \\dots, w_{n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eeither of these we call a \u0026ldquo;grammar\u0026rdquo;, or \u0026ldquo;\u003ca href=\"/posts/kbhnlp/#language-model\"\u003eLanguage Model\u003c/a\u003e\u0026rdquo;.\u003c/p\u003e\n\u003ch2 id=\"chain-rule-language-modeling\"\u003eChain Rule Language Modeling\u003c/h2\u003e\n\u003cp\u003eRecall \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003eprobability chain rule\u003c/a\u003e. Now, the probability of a sequence like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(its\\ water\\ is\\ so\\ transparent)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(its) \\times P(water|its) \\times P(is | its\\ water) \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(w_1 \\dots w_{n}) = \\prod_{i}P(w_{i} | w_1 \\dots w_{i-1})\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"markov-assumption\"\u003eMarkov Assumption\u003c/h2\u003e\n\u003cp\u003eBecause we can\u0026rsquo;t make conditional counts over all words all the time, we make an assumption: the probability of the current word is the probability of the current word conditioned on the probability of the last \\(k\\) words.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(w_1, \\dots, w_{n}) \\approx \\prod_{i}P(w_{i} | w_{i-k} \\dots w_{i-1})\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"unigrams\"\u003eUnigrams\u003c/h2\u003e\n\u003cp\u003eThe simplest \u003ca href=\"#markov-assumption\"\u003eMarkov Assumption\u003c/a\u003e is unigrams, which will be word salad generation because it has no understanding of language structure.\u003c/p\u003e\n\u003ch3 id=\"naive-bays-language-modeling\"\u003eNaive Bays Language Modeling\u003c/h3\u003e\n\u003cp\u003eYou can consider each class in Naive Bayes \\(P(word | c)\\) as a language model.\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(sentence|c) = \\prox_{i}P(word_{i}|c)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eEach class is a separate class-conditioned language model. So, we just want to compute the probability of each sentence, and classify the sentence based on the higher probability result.\u003c/p\u003e\n\u003ch2 id=\"limitations\"\u003eLimitations\u003c/h2\u003e\n\u003cp\u003eIn general, n gram models are limited because they don\u0026rsquo;t consider long distance dependencies which are present in English.\u003c/p\u003e\n\u003ch2 id=\"estimating-n-grams--kbhn-grams-dot-md\"\u003eEstimating \u003ca href=\"/posts/kbhn_grams/\"\u003eN-Grams\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eMany counts are results of\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eworld (\u0026ldquo;people want chinese food more often, so want+Chinese appears more\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003egrammar (\u0026ldquo;want+want is less likely\u0026rdquo;)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"mle--kbhmaximum-likelihood-parameter-learning-dot-md\"\u003e\u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nP(w_{i} | w_{i-1}) = \\frac{C(w_{i-1}, w_{i})}{C(w_{i-1})}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"map-i-dot-e-dot-laplace-smoothing--kbhbaysian-parameter-learning-dot-md\"\u003eMAP, i.e. \u003ca href=\"/posts/kbhbaysian_parameter_learning/#laplace-smoothing\"\u003eLaplace Smoothing\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nP(w_{i} | w_{i-1}) = \\frac{C(w_{i-1}, w_{i})+1}{C(w_{i-1})+V}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe have to add \\(V\\) on the denominator because every word could possibly follow \\(w_{i-1}\\). Note that as \\(N\\) increases we actually still add \\(V\\) because we are predicting at each time a \u003cstrong\u003esingle word\u003c/strong\u003e (just conditioned on more words), so if we are smoothing output we are only adding \\(V\\) extra counts.\u003c/p\u003e\n\u003cp\u003eIMPORTANT NOTE THOUGH: this is typically not used for \u003ca href=\"/posts/kbhn_grams/\"\u003eN-Grams\u003c/a\u003e (because there are simply so many OOS sequences). Instead, its more frequently used in other cases such as \u003ca href=\"/posts/kbhbag_of_words/#naive-bayes-for-text-classification\"\u003eNaive Bayes for Text Classification\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"log-probs\"\u003eLog Probs\u003c/h3\u003e\n\u003cp\u003eIn practice, we keep probability as log probabilities after we computed them.\u003c/p\u003e\n\u003ch3 id=\"n-gram-models\"\u003eN-Gram Models\u003c/h3\u003e\n\u003cp\u003eGoogle n-gram models, SRILM\u003c/p\u003e\n\u003ch3 id=\"backoff\"\u003eBackoff\u003c/h3\u003e\n\u003cp\u003eUse trigrams if high probability evidence is found, otherwise bigrams or unigrams\u003c/p\u003e\n\u003ch4 id=\"stupid-backoff\"\u003eStupid Backoff\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003egive the \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e if the conditioning sequence has a non-zero count\u003c/li\u003e\n\u003cli\u003eotherwise, start backing off, recursively calculating the probability of the current word given the last n-1-gram, multplied by a discount factor\u003c/li\u003e\n\u003cli\u003eif we end up with a unigram, just give the unigram probability\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis \u003cstrong\u003eDOES NOT PRODUCE A PROBABILITY\u003c/strong\u003e as it is not normalized. Instead of being probabilites, we consider them \u0026ldquo;scores\u0026rdquo;.\u003c/p\u003e\n\u003ch3 id=\"interpolation\"\u003eInterpolation\u003c/h3\u003e\n\u003cp\u003eIn practice, \u003ca href=\"#interpolation\"\u003eInterpolation\u003c/a\u003e works better. \u003ca href=\"#interpolation\"\u003eInterpolation\u003c/a\u003e smoothes the probability between unigram, bigram, and trigrams.\u003c/p\u003e\n\u003cp\u003eMostly simply, we mix them with some factors \\(\\lambda_{1}, \\lambda_{2}, \\lambda_{3}\\), where \\(\\sum_{i} \\lambda_{i} = 1\\). This makes a weighted average over probabilities:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(comb) = \\lambda_{1} P(uni) + \\lambda_{2} P(bi)+ \\lambda_{3} P(tri)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003elambdas could also be a function of the previous tokens.\u003c/p\u003e\n\u003cp\u003eWe sometimes obtain this with a disjoint dataset from the original training set, whereby we train some ngrams from the original dataset, and then identify \\(\\lambda\\) which maximises the probabilities.\u003c/p\u003e\n\u003ch3 id=\"oov-words\"\u003eOOV Words\u003c/h3\u003e\n\u003cp\u003ewe sometimes replace the lowest likelyhood few words with \u003ccode\u003e\u0026lt;UNK\u0026gt;\u003c/code\u003e, and train models such that we can have an \u003ca href=\"#oov-words\"\u003eopen vocabulary\u003c/a\u003e: whenever we encounter unknown words, we replace it with \u003ccode\u003e\u0026lt;UNK\u0026gt;\u003c/code\u003e\u003c/p\u003e\n\u003ch2 id=\"scaling-up\"\u003eScaling Up\u003c/h2\u003e\n\u003cp\u003eStrategies to make LMing with Ngrams more efficient\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003epruning: only store ngrams of top k\u003c/li\u003e\n\u003cli\u003euse tries (suffix trees, etc.)\u003c/li\u003e\n\u003cli\u003eapproximations: bloom filter\u003c/li\u003e\n\u003cli\u003estoring indicies\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhn_grams/","tags":null,"title":"N-Grams"},{"categories":null,"contents":"NACC is a large, longitudinal dataset for neurodegentitive disease as a project in collaboration with Dr. Alyssa Weakley at UC Davis.\nDr. Alyssa Weakley is interested in\nEarly Cognitive Change Mild Cognitive Impairment (MCI) \u0026ldquo;How early can we detect, using NACC, change?\u0026rdquo;\ndataset construction Participants are given a battery of mental capacity tests, these values are tracked over time There are also family member questionnaire Neuroimaging and biomarker data Other things tracked in the data\u0026mdash;\nAmyloid levels of spinal fluid Detecting even earlier focus good to focus on specifically alzheimer\u0026rsquo;s type dementia (so, ignore things on lewy body disease) Using clinical diagnosis as the dependent variable, but good to see the autopsy results Items 3 and 7 are independent codes; if alzhimer\u0026rsquo;s is measured, MCI is not measured. visa versa.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhnacc/\"\u003eNACC\u003c/a\u003e is a large, longitudinal dataset for neurodegentitive disease as a project in collaboration with \u003ca href=\"\"\u003eDr. Alyssa Weakley\u003c/a\u003e at \u003ca href=\"\"\u003eUC Davis\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"\"\u003eDr. Alyssa Weakley\u003c/a\u003e is interested in\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"\"\u003eEarly Cognitive Change\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eMild Cognitive Impairment (MCI)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;How early can we detect, using \u003ca href=\"/posts/kbhnacc/\"\u003eNACC\u003c/a\u003e, change?\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"dataset-construction\"\u003edataset construction\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eParticipants are given a battery of mental capacity tests, these values are tracked over time\u003c/li\u003e\n\u003cli\u003eThere are also family member questionnaire\u003c/li\u003e\n\u003cli\u003eNeuroimaging and biomarker data\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eOther things tracked in the data\u0026mdash;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eAmyloid levels of spinal fluid\u003c/li\u003e\n\u003cli\u003eDetecting even earlier\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"focus\"\u003efocus\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003egood to focus on specifically \u003cem\u003ealzheimer\u0026rsquo;s type dementia\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003e(so, ignore things on lewy body disease)\u003c/li\u003e\n\u003cli\u003eUsing clinical diagnosis as the dependent variable, but good to see the autopsy results\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eItems 3 and 7 are independent codes; if alzhimer\u0026rsquo;s is measured, MCI is not measured. visa versa.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnacc/","tags":null,"title":"NACC"},{"categories":null,"contents":"Naive Bayes is a special class of Baysian Network inference problem which follows a specific structure used to solve classification problems.\nThe Naive Bayes classifier is a Baysian Network of the shape:\n(Why is this backwards(ish)? Though we typically think about models as a function M(obs) = cls, the real world is almost kind of opposite; it kinda works like World(thing happening) = things we observe. Therefore, the observations are a RESULT of the class happening.)\nWe consider, naively, \\(o_{1:n}\\) are all conditionally independent on \\(c\\). From this graph, we can therefore use the probability chain rule + conditional probability to write that:\n\\begin{equation} P(c, o_{1:n}) = P( c) \\prod_{i=1}^{n} P(o_{i} | c) \\end{equation}\nso, to actually compute this, we don\u0026rsquo;t want to bother going over all the multiplications because of underflow, we write:\n\\begin{equation} \\hat{y} = \\arg\\max_{y} \\log \\hat{P}(y) + \\sum_{i=1}^{m} \\log \\hat{P}(x|y) \\end{equation}\nbrute-force Bayes classifier \\begin{equation} \\arg\\max_{y} P(y|x) = \\arg\\max_{y} \\frac{P(x|y)P(y)}{P(x)} \\end{equation}\nbut because we are taking argmax, we can not normalize:\n\\begin{equation} \\arg\\max_{y} P(y|x) = \\arg\\max_{y} P(x|y)P(y) \\end{equation}\nthis only works if \\(x\\) is a single value (i.e. you have a one-feature classifier\nThis system has 6 parameters; they can be MLE for Bernouli from data, but you can also use Baysian Parameter Learning Method\ny = 0 y = 1 x1 = 0 theta0 theta2 x1 = 1 theta1 theta3 y = 0 y = 0 theta4 y = 1 theta5 (=1-theta4) to perform estiimation with MAP\n\\begin{equation} p(X=1| Y=0) = \\frac{\\text{examples where X=1, Y=0}}{\\text{examples where Y=0}} \\end{equation}\nwhith MLE with a Laplace prior:\n\\begin{equation} p(X=1| Y=0) = \\frac{\\text{(examples where X=1, Y=0)}+1}{\\text{(examples where Y=0)}+\\text{(nclass = 2)}} \\end{equation}\nWe can keep going; for instance, if you wave \\(x_1, x_2\\) two diffferent features:\n\\begin{equation} \\arg\\max_{y} P(y|x) = \\arg\\max_{y} P(x_1, x_2|y)P(y) \\end{equation}\nbut this requires us to have \\(2^{2}\\) and ultimately \\(2^{n}\\) parameters, which is exponential blowup. Hence, we need to treat the variables as\u0026mdash;naivly\u0026mdash;independent so we can multiply them. Hence:\nNaive Bayes assumption we assume independence between the input features. That is, we assume:\n\\begin{equation} P(x_1, \\dots, x_{n}|y) = \\prod_{i=1}^{n} P(X_{i}|y) \\end{equation}\ninference with Naive Bayes Recall the definition of inference, for our case here:\ngiven observations \\(o_{1:n}\\), we desire to know what\u0026rsquo;s the probability of \\(c\\) happening. That is, from conditional probability:\n\\begin{equation} P(c | o_{1:n}) = \\frac{P(c, o_{1:n})}{P(o_{1:n})} \\end{equation}\nNow, from above we have \\(P(c, o_{1:n})\\) already. To get the denominator, we invoke law of total probability to add up the probability of all observations occurring given all classes. That is:\n\\begin{equation} P(o_{1:n}) = \\sum_{c \\in C} P(c, o_{1:n}) \\end{equation}\nYou will note that this value \\(P(o_{1:n})\\) is actually constant as long as the network structure does not change. Therefore, we tend to write:\n\\begin{align} P(c | o_{1:n}) \u0026amp;= \\frac{P(c, o_{1:n})}{P(o_{1:n})} \\\\ \u0026amp;= \\kappa P(c, o_{1:n}) \\end{align}\nor, that:\n\\begin{equation} P(c|o_{1:n}) \\propto P(c, o_{1:n}) \\end{equation}\n\u0026ldquo;the probability of a class occurring given the inputs is proportional to the probability of that class occurring along with the inputs\u0026rdquo;\nMultiple believes \\begin{equation} P(A=a | R_1) \\propto P(R_1 | A=a) \\cdot P(A=a) \\end{equation}\nBut now\nMotivation: Bayes rule This will give us:\nHowever, what if we don\u0026rsquo;t want to use the law of total probability to add up \\(P(FB\u0026rsquo;)\\)?\nWe can actually write a relation that essentially reminds us that the fact that \\(P(FB\u0026rsquo;)\\) as not dependent on \\(TSF\\), so we can write:\n\\begin{equation} P(TSF^{1}|FB^{1}) \\porpto P(TSF^{1})P(FB^{1} | TSF^{1}) \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e is a special class of \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e \u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e problem which follows a specific structure used to solve classification problems.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e classifier is a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e of the shape:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-03_13-15-54_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e(Why is this backwards(ish)? Though we typically think about models as a function M(obs) = cls, the real world is almost kind of opposite; it kinda works like World(thing happening) = things we observe. Therefore, the observations are a RESULT of the class happening.)\u003c/p\u003e\n\u003cp\u003eWe consider, \u003cstrong\u003enaively\u003c/strong\u003e, \\(o_{1:n}\\) are all \u003ca href=\"/posts/kbhbaysian_network/#conditional-independence\"\u003econditionally independent\u003c/a\u003e on \\(c\\). From this graph, we can therefore use the \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003eprobability chain rule\u003c/a\u003e + \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e to write that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(c, o_{1:n}) = P( c) \\prod_{i=1}^{n} P(o_{i} | c)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso, to actually compute this, we don\u0026rsquo;t want to bother going over all the multiplications because of underflow, we write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{y} = \\arg\\max_{y} \\log \\hat{P}(y) + \\sum_{i=1}^{m} \\log \\hat{P}(x|y)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"brute-force-bayes-classifier\"\u003ebrute-force Bayes classifier\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\arg\\max_{y} P(y|x) = \\arg\\max_{y} \\frac{P(x|y)P(y)}{P(x)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebut because we are taking argmax, we can not normalize:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\arg\\max_{y} P(y|x) = \\arg\\max_{y} P(x|y)P(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis only works if \\(x\\) is a single \u003cstrong\u003evalue\u003c/strong\u003e (i.e. you have a one-feature classifier\u003c/p\u003e\n\u003cp\u003eThis system has 6 parameters; they can be \u003ca href=\"/posts/kbhbernoulli_random_variable/#mle-for-bernouli\"\u003eMLE for Bernouli\u003c/a\u003e from data, but you can also use \u003ca href=\"/posts/kbhmodel_based_reinforcement_learning/#method\"\u003eBaysian Parameter Learning Method\u003c/a\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003ey = 0\u003c/th\u003e\n\u003cth\u003ey = 1\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003ex1 = 0\u003c/td\u003e\n\u003ctd\u003etheta0\u003c/td\u003e\n\u003ctd\u003etheta2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ex1 = 1\u003c/td\u003e\n\u003ctd\u003etheta1\u003c/td\u003e\n\u003ctd\u003etheta3\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003ey = 0\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003ey = 0\u003c/td\u003e\n\u003ctd\u003etheta4\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ey = 1\u003c/td\u003e\n\u003ctd\u003etheta5 (=1-theta4)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eto perform estiimation with MAP\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(X=1| Y=0) = \\frac{\\text{examples where X=1, Y=0}}{\\text{examples where Y=0}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhith MLE with a \u003ca href=\"/posts/kbhmaximum_a_posteriori_estimate/#map-for-bernoulli-and-binomial-p\"\u003eLaplace prior\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(X=1| Y=0) = \\frac{\\text{(examples where X=1, Y=0)}+1}{\\text{(examples where Y=0)}+\\text{(nclass = 2)}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can keep going; for instance, if you wave \\(x_1, x_2\\) two diffferent features:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\arg\\max_{y} P(y|x) = \\arg\\max_{y} P(x_1, x_2|y)P(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebut this requires us to have \\(2^{2}\\) and ultimately \\(2^{n}\\) parameters, which is exponential blowup. Hence, we need to treat the variables as\u0026mdash;naivly\u0026mdash;independent so we can multiply them. Hence:\u003c/p\u003e\n\u003ch2 id=\"naive-bayes--kbhnaive-bayes-dot-md--assumption\"\u003e\u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e assumption\u003c/h2\u003e\n\u003cp\u003ewe assume \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependence\u003c/a\u003e between the input features. That is, we assume:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(x_1, \\dots, x_{n}|y) = \\prod_{i=1}^{n} P(X_{i}|y)\n\\end{equation}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-17_16-35-58_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"inference--kbhinference-dot-md--with-naive-bayes--kbhnaive-bayes-dot-md\"\u003e\u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e with \u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eRecall the definition of \u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e, for our case here:\u003c/p\u003e\n\u003cp\u003egiven observations \\(o_{1:n}\\), we desire to know what\u0026rsquo;s the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of \\(c\\) happening. That is, from \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(c | o_{1:n}) = \\frac{P(c, o_{1:n})}{P(o_{1:n})}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, from above we have \\(P(c, o_{1:n})\\) already. To get the denominator, we invoke \u003ca href=\"/posts/kbhprobability/#law-of-total-probability\"\u003elaw of total probability\u003c/a\u003e to add up the probability of all observations occurring given all classes. That is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(o_{1:n}) = \\sum_{c \\in C} P(c, o_{1:n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will note that this value \\(P(o_{1:n})\\) is actually constant as long as the network structure does not change. Therefore, we tend to write:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nP(c | o_{1:n}) \u0026amp;= \\frac{P(c, o_{1:n})}{P(o_{1:n})} \\\\\n\u0026amp;= \\kappa P(c, o_{1:n})\n\\end{align}\u003c/p\u003e\n\u003cp\u003eor, that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(c|o_{1:n}) \\propto P(c, o_{1:n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the probability of a class occurring given the inputs is proportional to the probability of that class occurring along with the inputs\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"multiple-believes\"\u003eMultiple believes\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nP(A=a | R_1) \\propto P(R_1 | A=a) \\cdot P(A=a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBut now\u003c/p\u003e\n\u003ch2 id=\"motivation-bayes-rule--kbhbayes-theorem-dot-md\"\u003eMotivation: \u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes rule\u003c/a\u003e\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-05_09-14-17_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis will give us:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-05_09-14-32_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eHowever, what if we don\u0026rsquo;t want to use the \u003ca href=\"/posts/kbhprobability/#law-of-total-probability\"\u003elaw of total probability\u003c/a\u003e to add up \\(P(FB\u0026rsquo;)\\)?\u003c/p\u003e\n\u003cp\u003eWe can actually write a relation that essentially reminds us that the fact that \\(P(FB\u0026rsquo;)\\) as not dependent on \\(TSF\\), so we can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(TSF^{1}|FB^{1}) \\porpto P(TSF^{1})P(FB^{1} | TSF^{1})\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnaive_bayes/","tags":null,"title":"Naive Bayes"},{"categories":null,"contents":"The National Banking Act unified Financial Markets.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhnational_banking_act/\"\u003eNational Banking Act\u003c/a\u003e unified \u003ca href=\"/posts/kbhfinancial_markets_intro/\"\u003eFinancial Markets\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnational_banking_act/","tags":null,"title":"National Banking Act"},{"categories":null,"contents":"natural numbers (\\(\\mathbb{N}\\)) are the counting numbers: 1,2,3,4\u0026hellip;.\nZero is not part of it; this produces interesting results like set of natural number under addition is not a group because there is no identity (tbh nor inverse (inverse of 1 is -1 which is not in the set.))\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhnatural_numbers/\"\u003enatural number\u003c/a\u003es (\\(\\mathbb{N}\\)) are the counting numbers: 1,2,3,4\u0026hellip;.\u003c/p\u003e\n\u003cp\u003eZero is not part of it; this produces interesting results like set of \u003ca href=\"/posts/kbhnatural_numbers/\"\u003enatural number\u003c/a\u003e under \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e is not a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e because there is no \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e (tbh nor \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e (inverse of 1 is -1 which is not in the set.))\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnatural_numbers/","tags":null,"title":"natural number"},{"categories":null,"contents":"The nsm theory is a theory that\u0026hellip;\nclaims that there exists a set of semantic primes and logic universal across languages which is indefinable by other words within the language which, as a corollary, resolves the epistemological problem that if all words are defined by other words in the language there will (why?) be no connection to the real world the theory of NSM rests on\u0026hellip;\ntwo pillars of NSM theory existence of semantic primes The existence of semantic primes is codified more formally as the strong version of the Lexicalization Hypothesis.\nIssues with it: problems with semantic primes\nthe ability to perform the act of reductive paraphrase Issues with that: problems with reductive paraphrasing\noh cool! (Bohnemeyer 2004)\nAlso the fact that NSM is first found in English means that there is a certain anglo-centrism that comes with the language.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhnatural_semantic_metalanguage/\"\u003ensm\u003c/a\u003e theory is a theory that\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eclaims that there exists a set of semantic primes and logic universal across languages which is indefinable by other words within the language\u003c/li\u003e\n\u003cli\u003ewhich, as a corollary, resolves the epistemological problem that if all words are defined by other words in the language there will (why?) be no connection to the real world\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ethe theory of \u003ca href=\"/posts/kbhnatural_semantic_metalanguage/\"\u003eNSM\u003c/a\u003e rests on\u0026hellip;\u003c/p\u003e\n\u003ch2 id=\"two-pillars-of-nsm--kbhnatural-semantic-metalanguage-dot-md--theory\"\u003etwo pillars of \u003ca href=\"/posts/kbhnatural_semantic_metalanguage/\"\u003eNSM\u003c/a\u003e theory\u003c/h2\u003e\n\u003ch3 id=\"existence-of-semantic-primes--kbhsemantic-primes-dot-md\"\u003eexistence of \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic primes\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eThe existence of \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es is codified more formally as the strong version of the \u003ca href=\"/posts/kbhlexicalization_hypothesis/\"\u003eLexicalization Hypothesis\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIssues with it: \u003ca href=\"/posts/kbhsemantic_primes/#problems-with-id-4e587814-bfd1-458e-ba5f-67882832107b-semantic-prime-s\"\u003eproblems with semantic primes\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"the-ability-to-perform-the-act-of-reductive-paraphrase--kbhreductive-paraphrase-dot-md\"\u003ethe ability to perform the act of \u003ca href=\"/posts/kbhreductive_paraphrase/\"\u003ereductive paraphrase\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eIssues with that: \u003ca href=\"/posts/kbhreductive_paraphrase/#problems-with-reductive-paraphrasing\"\u003eproblems with reductive paraphrasing\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"oh-cool\"\u003eoh cool!\u003c/h2\u003e\n\u003cp\u003e(\u003ca href=\"#citeproc_bib_item_1\"\u003eBohnemeyer 2004\u003c/a\u003e)\u003c/p\u003e\n\u003cp\u003eAlso the fact that \u003ca href=\"/posts/kbhnatural_semantic_metalanguage/\"\u003eNSM\u003c/a\u003e is first found in English means that there is a certain anglo-centrism that comes with the language.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnatural_semantic_metalanguage/","tags":null,"title":"Natural Semantic Metalanguage"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhnatural_transformations/","tags":null,"title":"natural transformation"},{"categories":null,"contents":"NBBO is the composite best bid/ask nationally, across all of the exchanges.\nIt allows the average person on the street to get a price for the asset. The government system is actually SLOWER from the fastest exchange: you can know, within microseconds, the difference.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhnbbo/\"\u003eNBBO\u003c/a\u003e is the composite best bid/ask nationally, across all of the exchanges.\u003c/p\u003e\n\u003cp\u003eIt allows the average person on the street to get a price for the asset. The government system is actually SLOWER from the fastest exchange: you can know, within microseconds, the difference.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnbbo/","tags":null,"title":"NBBO"},{"categories":null,"contents":"Framework for subsurface exploration: a DARPA challenge that explores unknown underground environments.\nMain Problem: there is a high degree of uncertainty that comes from multiple different systems interacting:\nsensing environment command execution communication mission state health of systems and subsystems NeBula treats uncertainty between systems via a POMDP:\nconstruct a simulation of the tasks to coordinate robots solved using Double Progressive Widening AISR NeBula NeBula autonomy framework extrapolation on an active source seeking. For instance, combining with semantic understanding, we want to \u0026ldquo;find the red backpack\u0026rdquo;.\nmulti-model semantic understanding learning based mobility (vis a vi NeBula) semantic aware source seeking (\u0026ldquo;finding the thing there\u0026rdquo;) ","html":"\u003cp\u003eFramework for subsurface exploration: a DARPA challenge that explores unknown underground environments.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMain Problem\u003c/strong\u003e\u003c/strong\u003e: there is a high degree of uncertainty that comes from multiple different systems interacting:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003esensing\u003c/li\u003e\n\u003cli\u003eenvironment\u003c/li\u003e\n\u003cli\u003ecommand execution\u003c/li\u003e\n\u003cli\u003ecommunication\u003c/li\u003e\n\u003cli\u003emission state\u003c/li\u003e\n\u003cli\u003ehealth of systems and subsystems\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhnebula/\"\u003eNeBula\u003c/a\u003e treats uncertainty between systems via a POMDP:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003econstruct a simulation of the tasks to coordinate robots\u003c/li\u003e\n\u003cli\u003esolved using \u003ca href=\"/posts/kbhdouble_progressive_widening/\"\u003eDouble Progressive Widening\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"aisr-nebula--kbhnebula-dot-md\"\u003eAISR \u003ca href=\"/posts/kbhnebula/\"\u003eNeBula\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhnebula/\"\u003eNeBula\u003c/a\u003e autonomy framework extrapolation on an active source seeking. For instance, combining with semantic understanding, we want to \u0026ldquo;find the red backpack\u0026rdquo;.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003emulti-model semantic understanding\u003c/li\u003e\n\u003cli\u003elearning based mobility (vis a vi \u003ca href=\"/posts/kbhnebula/\"\u003eNeBula\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003esemantic aware source seeking (\u0026ldquo;finding the thing there\u0026rdquo;)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnebula/","tags":null,"title":"NeBula"},{"categories":null,"contents":"needfinding as a process of finding need.\nneedfinding with Rick Wallace needfinding with Rick Wallace. You don\u0026rsquo;t find out what they need, but you find what they need and how to fix it. (duh?)\nneedfinding with Cynthia Lee \u0026ldquo;Any time your curse, write down what just went wrong. How to fix it is the path to your next startup idea.\u0026rdquo;\nmonoculture workforce People who do the above will\u0026hellip; result in creating many products serving the segment of market matching software.\nHP Webcam story: the HP laptop face tracking software doesn\u0026rsquo;t follow Black faces as well as white ones.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhneedfinding/\"\u003eneedfinding\u003c/a\u003e as a process of finding need.\u003c/p\u003e\n\u003ch2 id=\"needfinding--kbhneedfinding-dot-md--with-rick-wallace--kbhrick-wallace-dot-md\"\u003e\u003ca href=\"/posts/kbhneedfinding/\"\u003eneedfinding\u003c/a\u003e with \u003ca href=\"/posts/kbhrick_wallace/\"\u003eRick Wallace\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhneedfinding/\"\u003eneedfinding\u003c/a\u003e with \u003ca href=\"/posts/kbhrick_wallace/\"\u003eRick Wallace\u003c/a\u003e. You don\u0026rsquo;t find out what they need, but you find what they need and how to fix it. (duh?)\u003c/p\u003e\n\u003ch2 id=\"needfinding--kbhneedfinding-dot-md--with-cynthia-lee--kbhcynthia-lee-dot-md\"\u003e\u003ca href=\"/posts/kbhneedfinding/\"\u003eneedfinding\u003c/a\u003e with \u003ca href=\"/posts/kbhcynthia_lee/\"\u003eCynthia Lee\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;Any time your curse, write down what just went wrong. How to fix it is the path to your next startup idea.\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"monoculture-workforce\"\u003emonoculture workforce\u003c/h3\u003e\n\u003cp\u003ePeople who do the above will\u0026hellip; result in creating many products serving the segment of market matching software.\u003c/p\u003e\n\u003cp\u003eHP Webcam story: the HP laptop face tracking software doesn\u0026rsquo;t follow Black faces as well as white ones.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhneedfinding/","tags":null,"title":"needfinding"},{"categories":null,"contents":"how many trials do you need to get r successes.\n\\begin{equation} P(X=n) = {{n-1} \\choose {r-1}} p^{r} (1-p)^{n-r} \\end{equation}\nif the chance of individual success is \\(p\\), what\u0026rsquo;s the probability that it takes \\(n\\) trials to get \\(r\\) successes.\n\\begin{equation} \\mathbb{E}[x] = \\frac{r}{p} \\end{equation}\n\\begin{equation} Var[x] = r \\frac{{1-p}}{r^{2}} \\end{equation}\n","html":"\u003cp\u003ehow many trials do you need to get r successes.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X=n) = {{n-1} \\choose {r-1}} p^{r} (1-p)^{n-r}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif the chance of individual success is \\(p\\), what\u0026rsquo;s the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e that it takes \\(n\\) trials to get \\(r\\) successes.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{E}[x] = \\frac{r}{p}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nVar[x] = r \\frac{{1-p}}{r^{2}}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnegative_binomial_distribution/","tags":null,"title":"negative binomial distribution"},{"categories":null,"contents":"Neoclassical Economics is a view of economics that disregards the Keynsian Politics theory of the economy needs a minder started by Milton Freedman. It believes that free market economy will prevail.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhneoclassical_economics/\"\u003eNeoclassical Economics\u003c/a\u003e is a view of economics that disregards the \u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynsian Politics\u003c/a\u003e theory of the economy needs a minder started by \u003ca href=\"/posts/kbhmilton_freedman/\"\u003eMilton Freedman\u003c/a\u003e. It believes that free market economy will prevail.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhneoclassical_economics/","tags":null,"title":"Neoclassical Economics"},{"categories":null,"contents":"while POS Tagging assigns tags to each word, NER Tagging tags the category of usage of multi-word spans.\nNER Tagging needs to label spans of text, which means that there is ambiguity in type.\nBIO Tagging BIO Tagging will tag each word: where \\(B\\) begins a span, \\(I\\), is inside a span, and \\(O\\) outside a span. So tags per word still apply, but we can extract span information as well.\n(job - gender + gender ) = job (captial - country + country) = captial\n","html":"\u003cp\u003ewhile \u003ca href=\"/posts/kbhpos_tagging/\"\u003ePOS Tagging\u003c/a\u003e assigns tags to each word, \u003ca href=\"/posts/kbhner_tagging/\"\u003eNER Tagging\u003c/a\u003e tags the category of usage of multi-word spans.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhner_tagging/\"\u003eNER Tagging\u003c/a\u003e needs to label \u003cstrong\u003espans\u003c/strong\u003e of text, which means that there is ambiguity in type.\u003c/p\u003e\n\u003ch2 id=\"bio-tagging\"\u003eBIO Tagging\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#bio-tagging\"\u003eBIO Tagging\u003c/a\u003e will tag each word: where \\(B\\) begins a span, \\(I\\), is inside a span, and \\(O\\) outside a span. So tags per word still apply, but we can extract span information as well.\u003c/p\u003e\n\u003cp\u003e(job - gender + gender ) = job\n(captial - country + country) = captial\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhner_tagging/","tags":null,"title":"NER Tagging"},{"categories":null,"contents":"Neural Network Unit A real-valued vector as input, each multiplied by some weights, summed, and squashed by some non-linear transform.\n\\begin{equation} z = w\\cdot x + b \\end{equation}\nand then, we will squash this using it as an \u0026ldquo;activation\u0026rdquo;\n\\begin{equation} y = \\sigmoid(z) \\end{equation}\nOne common activation is sigmoid. So, one common formulation would be:\n\\begin{equation} y = \\frac{1}{1+\\exp (- (w \\cdot x + b))} \\end{equation}\nTanh \\begin{equation} y(z) = \\frac{e^{z} - e^{-z}}{e^{z}+e^{-z}} \\end{equation}\nThis causes \u0026ldquo;saturation\u0026rdquo;\u0026mdash;meaning derivatives to be \\(0\\) at high values\nrelu \\begin{equation} y(z) = \\max(z,0) \\end{equation}\nmulti-layer networks Single computing units can\u0026rsquo;t compute XOR. Consider a perceptron:\n\\begin{equation} w_1x_1 + w_2x_2 + b = 0 \\end{equation}\nmeaning:\n\\begin{equation} x_2 = \\qty(\\frac{-w_1}{w_2})x_1 + \\qty(\\frac{-b}{w_2}) \\end{equation}\nmeaning, obtain a line that acts as a decision boundary\u0026mdash;we obtain 0 if the input is on one side of the line, and 1 if on the other. XOR, unfortunately, does not have a single linear boundary, its not linearly seperable.\nlogistic regression, for instance, can\u0026rsquo;t compute XOR because it is linear until squashing.\nfeed-forward network we can think about logistic regression as a one layer network, generalizing over sigmoid:\n\\begin{equation} \\text{softmax} = \\frac{\\exp(z_{i})}{\\sum_{j=1}^{k} \\exp(z_{j})} \\end{equation}\nand a multinomial logistic regression which uses the above. This is considered a \u0026ldquo;layer\u0026rdquo; in the feed-forward network.\nnotation:\n\\(W^{(j)}\\), weight matrix for layer \\(j\\) \\(b^{(j)}\\), the bias vector for layer \\(j\\) \\(g^{(j)}\\), the activation function at \\(j\\) and \\(z^{(i)}\\), the output at \\(i\\) (before activation function) \\(a^{(i)}\\), the activation at \\(i\\) instead of bias, we sometimes add a dummy node \\(a_{0}\\), we will force a value \\(1\\) at \\(a_{0}\\) and use its weights as bias.\nembeddings We use vector-space model to feed words into networks: converting each word first into embeddings, then feeding it into the network\nFix length problems:\nsentence embedding (mean of all the embeddings) element wise max of all the word embeddings to create sentence embedding use the max length + pad For Language Models, we can use a \u0026ldquo;sliding window\u0026rdquo;; that is:\n\\begin{equation} P(w_{t}|w_{1 \\dots t-1}) \\approx P(w_{t} | w_{t-N+1 \\dots t-1}) \\end{equation}\nTraining For every tuple \\((x,y)\\), we run a forward pass to obtain \\(\\hat{y}\\). Then, we run the network backwards to update the weights.\nA loss function calculates the negative of the probability of the correct labels.\nbackpropegation backprop\n","html":"\u003ch2 id=\"neural-network-unit\"\u003eNeural Network Unit\u003c/h2\u003e\n\u003cp\u003eA real-valued vector as input, each multiplied by some weights, summed, and squashed by some non-linear transform.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nz = w\\cdot x + b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand then, we will squash this using it as an \u0026ldquo;activation\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = \\sigmoid(z)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOne common activation is \u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e. So, one common formulation would be:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = \\frac{1}{1+\\exp (- (w \\cdot x + b))}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"tanh\"\u003eTanh\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\ny(z) = \\frac{e^{z} - e^{-z}}{e^{z}+e^{-z}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis causes \u0026ldquo;saturation\u0026rdquo;\u0026mdash;meaning derivatives to be \\(0\\) at high values\u003c/p\u003e\n\u003ch2 id=\"relu\"\u003erelu\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\ny(z) = \\max(z,0)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"multi-layer-networks\"\u003emulti-layer networks\u003c/h2\u003e\n\u003cp\u003eSingle computing units can\u0026rsquo;t compute XOR. Consider a perceptron:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw_1x_1 + w_2x_2 + b = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_2 = \\qty(\\frac{-w_1}{w_2})x_1 + \\qty(\\frac{-b}{w_2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning, obtain a line that acts as a \u003cstrong\u003edecision boundary\u003c/strong\u003e\u0026mdash;we obtain 0 if the input is on one side of the line, and 1 if on the other. XOR, unfortunately, does not have a single linear boundary, its not \u003cstrong\u003elinearly \u003ca href=\"/posts/kbhseperable_diffequ/\"\u003eseperable\u003c/a\u003e\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhlogistic_regression/\"\u003elogistic regression\u003c/a\u003e, for instance, can\u0026rsquo;t compute XOR because it is linear until squashing.\u003c/p\u003e\n\u003ch2 id=\"feed-forward-network\"\u003efeed-forward network\u003c/h2\u003e\n\u003cp\u003ewe can think about \u003ca href=\"/posts/kbhlogistic_regression/\"\u003elogistic regression\u003c/a\u003e as a one layer network, generalizing over \u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\text{softmax} = \\frac{\\exp(z_{i})}{\\sum_{j=1}^{k} \\exp(z_{j})}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand a multinomial \u003ca href=\"/posts/kbhlogistic_regression/\"\u003elogistic regression\u003c/a\u003e which uses the above. This is considered a \u0026ldquo;layer\u0026rdquo; in the \u003ca href=\"#feed-forward-network\"\u003efeed-forward network\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003enotation:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(W^{(j)}\\), weight matrix for layer \\(j\\)\u003c/li\u003e\n\u003cli\u003e\\(b^{(j)}\\), the bias vector for layer \\(j\\)\u003c/li\u003e\n\u003cli\u003e\\(g^{(j)}\\), the activation function at \\(j\\)\u003c/li\u003e\n\u003cli\u003eand \\(z^{(i)}\\), the output at \\(i\\) (before activation function)\u003c/li\u003e\n\u003cli\u003e\\(a^{(i)}\\), the activation at \\(i\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003einstead of bias, we sometimes add a dummy node \\(a_{0}\\), we will force a value \\(1\\) at \\(a_{0}\\) and use its weights as bias.\u003c/p\u003e\n\u003ch3 id=\"embeddings\"\u003eembeddings\u003c/h3\u003e\n\u003cp\u003eWe use \u003ca href=\"/posts/kbhranked_information_retrieval/#vector-space-model\"\u003evector-space model\u003c/a\u003e to feed words into networks: converting each word first into embeddings, then feeding it into the network\u003c/p\u003e\n\u003cp\u003eFix length problems:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003esentence embedding (mean of all the embeddings)\u003c/li\u003e\n\u003cli\u003eelement wise max of all the word embeddings to create sentence embedding\u003c/li\u003e\n\u003cli\u003euse the max length + pad\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eFor \u003ca href=\"/posts/kbhnlp/#language-model\"\u003eLanguage Model\u003c/a\u003es, we can use a \u0026ldquo;sliding window\u0026rdquo;; that is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(w_{t}|w_{1 \\dots t-1}) \\approx P(w_{t} | w_{t-N+1 \\dots t-1})\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"training\"\u003eTraining\u003c/h2\u003e\n\u003cp\u003eFor every tuple \\((x,y)\\), we run a forward pass to obtain \\(\\hat{y}\\). Then, we run the network backwards to update the weights.\u003c/p\u003e\n\u003cp\u003eA loss function calculates the negative of the probability of the correct labels.\u003c/p\u003e\n\u003ch3 id=\"backpropegation--kbhdeep-learning-dot-md\"\u003e\u003ca href=\"/posts/kbhdeep_learning/#backpropegation\"\u003ebackpropegation\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdeep_learning/#backpropegation\"\u003ebackprop\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhneural_networks/","tags":null,"title":"Neural Networks"},{"categories":null,"contents":"scene representation\nartificial vs biological intelligence Humans are few-shot learners (\u0026ldquo;sample efficiency\u0026rdquo;)\nHumans can easily fine-tunable (\u0026ldquo;transfer flexibility\u0026rdquo;)\nHuman knowledge can transfer easily\nAI are many-shot learners (\u0026ldquo;sample inefficiency\u0026rdquo;)\nAI are specialized\nAI is more precise, and can hold a lot in cache\nbiological learning biological learning is mostly unsupervised, and yte can generalize\nvisual processing ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhscene_representation/\"\u003escene representation\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"artificial-vs-biological-intelligence\"\u003eartificial vs biological intelligence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eHumans are few-shot learners (\u0026ldquo;sample efficiency\u0026rdquo;)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eHumans can easily fine-tunable (\u0026ldquo;transfer flexibility\u0026rdquo;)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eHuman knowledge can transfer easily\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAI are many-shot learners (\u0026ldquo;sample inefficiency\u0026rdquo;)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAI are specialized\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAI is more precise, and can hold a lot in cache\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"biological-learning\"\u003ebiological learning\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#biological-learning\"\u003ebiological learning\u003c/a\u003e is mostly unsupervised, and yte can generalize\u003c/p\u003e\n\u003ch2 id=\"visual-processing\"\u003evisual processing\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhneuroscience_and_ai/","tags":null,"title":"Neuroscience and AI"},{"categories":null,"contents":"a neutral stability ( mar ) condition in Differential Equations means that a function is neither stable nor unstable: it does not\nSee: https://en.wikipedia.org/wiki/Marginal_stability\n","html":"\u003cp\u003ea \u003ca href=\"/posts/kbhneutral_stability/\"\u003eneutral stability\u003c/a\u003e ( mar ) condition in \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equations\u003c/a\u003e means that a function is neither \u003ca href=\"/posts/kbhnon_linear_systems/#stable\"\u003estable\u003c/a\u003e nor unstable: it does not\u003c/p\u003e\n\u003cp\u003eSee: \u003ca href=\"https://en.wikipedia.org/wiki/Marginal_stability\"\u003ehttps://en.wikipedia.org/wiki/Marginal_stability\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhneutral_stability/","tags":null,"title":"neutral stability"},{"categories":null,"contents":"Election between Hayes vs Tildon was very close. Democrats gave Republicans Hayes, but then asked the Republican millitary to leave the South and hence they have no way of enforcing the rights.\nRedeemer Governments Democrats put in systems to relegate African Americans to second-class citizenship into the south. Lynchings became the weapon of choice of enforcing Jim Crow.\nWithin 20 years, Jim Crow became implemented by every state 1896 Plessy vs Ferguson upholding the process of segregation Convict leasing: convicts\u0026rsquo; labour was leased to create infrastructure Economic transformation: put in sharecropping (crops in lieu or in addition to rent) and convict leasing. This is essentially modern slavery because debt is used as a process to enslave people as they will never actually be paid enough to pay back debt.\nPush for Civil Rights \u0026ldquo;Booker T. Washington\u0026rdquo;: help promote Southern society will gain equality. Founded the \u0026ldquo;Tuskegee Institute\u0026rdquo;.\n\u0026ldquo;W.E.B. Dubois\u0026rdquo;: make the most talented and artistic people push for civil rights. \u0026ldquo;Civil rights by copyright.\u0026rdquo;\n","html":"\u003cp\u003eElection between Hayes vs Tildon was very close. Democrats gave Republicans Hayes, but then asked the Republican millitary to leave the South and hence they have no way of enforcing the rights.\u003c/p\u003e\n\u003ch2 id=\"redeemer-governments\"\u003eRedeemer Governments\u003c/h2\u003e\n\u003cp\u003eDemocrats put in systems to relegate African Americans to second-class citizenship into the south. Lynchings became the weapon of choice of enforcing Jim Crow.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eWithin 20 years, Jim Crow became implemented by every state\u003c/li\u003e\n\u003cli\u003e1896 Plessy vs Ferguson upholding the process of segregation\u003c/li\u003e\n\u003cli\u003eConvict leasing: convicts\u0026rsquo; labour was leased to create infrastructure\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eEconomic transformation: put in sharecropping (crops in lieu or in addition to rent) and convict leasing. This is essentially modern slavery because debt is used as a process to enslave people as they will never actually be paid enough to pay back debt.\u003c/p\u003e\n\u003ch2 id=\"push-for-civil-rights\"\u003ePush for Civil Rights\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;Booker T. Washington\u0026rdquo;: help promote Southern society will gain equality. Founded the \u0026ldquo;Tuskegee Institute\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;W.E.B. Dubois\u0026rdquo;: make the most talented and artistic people push for civil rights. \u0026ldquo;Civil rights by copyright.\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnew_american_south/","tags":null,"title":"New American South"},{"categories":null,"contents":"A set of policy by Franklin D. Roosevelt (FDR) which helped saving the economy during the Great Depression.\nSaving the Banks Unemployment Relief Industrial Recovery Agriculture Creates the WPA. Also the Social Security Administration. Also created Rural Electrification Administration\nMany people were still left out.\n","html":"\u003cp\u003eA set of policy by \u003ca href=\"/posts/kbhfdr/\"\u003eFranklin D. Roosevelt (FDR)\u003c/a\u003e which helped saving the economy during the \u003ca href=\"/posts/kbhgreat_depression/\"\u003eGreat Depression\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eSaving the Banks\u003c/li\u003e\n\u003cli\u003eUnemployment Relief\u003c/li\u003e\n\u003cli\u003eIndustrial Recovery\u003c/li\u003e\n\u003cli\u003eAgriculture\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eCreates the \u003ca href=\"/posts/kbhwpa/\"\u003eWPA\u003c/a\u003e. Also the \u003ca href=\"/posts/kbhsocial_security_administration/\"\u003eSocial Security Administration\u003c/a\u003e. Also created \u003ca href=\"/posts/kbhrural_electrification_administration/\"\u003eRural Electrification Administration\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eMany people were still left out.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnew_deal/","tags":null,"title":"New Deal"},{"categories":null,"contents":"A reformist, counterculture movement during the \u0026rsquo;80s lead by Ronald Reagan. Its a new response to the neoliberalism which aligned the blocks of Evangelical Christians (25% of voters) and Business leaders (powerful leaders.)\nAmerican liberalism expands under the new right as well.\nPresident as a party leader: Reagan is often shown as shining beaken of the Republican Party Leadership\u0026mdash;won every single state except Georgia .\n","html":"\u003cp\u003eA reformist, counterculture movement during the \u0026rsquo;80s lead by \u003ca href=\"/posts/kbhronald_raegan/\"\u003eRonald Reagan\u003c/a\u003e. Its a new response to the neoliberalism which aligned the blocks of Evangelical Christians (25% of voters) and Business leaders (powerful leaders.)\u003c/p\u003e\n\u003cp\u003eAmerican liberalism expands under the new right as well.\u003c/p\u003e\n\u003cp\u003ePresident as a party leader: Reagan is often shown as shining beaken of the Republican Party Leadership\u0026mdash;won every single state except Georgia .\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnew_right/","tags":null,"title":"New Right"},{"categories":null,"contents":"\\begin{equation} y\u0026rsquo;\u0026rsquo;=0 \\end{equation}\nthat is, if \\(F=0\\), then the solution will travel along a straight line.\n","html":"\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo;=0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat is, if \\(F=0\\), then the solution will travel along a straight line.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnewton_s_first_law_of_motion/","tags":null,"title":"Newton's First Law of Motion"},{"categories":null,"contents":"Putting something with a different temperature in a space with a constant temperature. The assumption underlying here is that the overall room temperature stays constant (i.e. the thing that\u0026rsquo;s cooling is so small that it doesn\u0026rsquo;t hurt room temperature).\n\\begin{equation} y\u0026rsquo;(t) = -k(y-T_0) \\end{equation}\nwhere, \\(T_0\\) is the initial temperature.\nThe intuition of this modeling is that there is some \\(T_0\\), which as the temperature \\(y\\) of your object gets closer to t. The result we obtain\nSolving \\begin{equation} \\int \\frac{\\dd{y}}{y-T_0} = \\int -k \\dd{t} \\end{equation}\nwe can solve this:\n\\begin{equation} \\ln |y-T_0| = -kt+C \\end{equation}\nwhich means we end up with:\n\\begin{equation} |y-T_0| = e^{-kt+C} = e^{C}e^{-kt} \\end{equation}\nSo therefore:\n\\begin{equation} y(t) = T_0 + C_1e^{-kt} \\end{equation}\nto include both \\(\\pm\\) cases.\nthis tells us that cooling and heating is exponential. We will fit our initial conditions rom data to obtain \\(C_1\\).\n","html":"\u003cp\u003ePutting something with a different temperature in a space with a constant temperature. The assumption underlying here is that the overall room temperature stays constant (i.e. the thing that\u0026rsquo;s cooling is so small that it doesn\u0026rsquo;t hurt room temperature).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;(t) = -k(y-T_0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(T_0\\) is the initial temperature.\u003c/p\u003e\n\u003cp\u003eThe intuition of this modeling is that there is some \\(T_0\\), which as the temperature \\(y\\) of your object gets closer to t. The result we obtain\u003c/p\u003e\n\u003ch2 id=\"solving\"\u003eSolving\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\int \\frac{\\dd{y}}{y-T_0} = \\int -k \\dd{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can solve this:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ln |y-T_0| = -kt+C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich means we end up with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|y-T_0| = e^{-kt+C} = e^{C}e^{-kt}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = T_0 + C_1e^{-kt}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eto include both \\(\\pm\\) cases.\u003c/p\u003e\n\u003cp\u003ethis tells us that cooling and heating is exponential. We will fit our initial conditions rom data to obtain \\(C_1\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnewton_s_law_of_cooling/","tags":null,"title":"Newton's Law of Cooling"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhnewton_s_method/","tags":null,"title":"Newton's Method"},{"categories":null,"contents":"Complex System\nLanguage Model A Language Model is a large neural network trained to predict the next token given some context.\n\u0026ldquo;Language models can discriminate behavior that they can\u0026rsquo;t reliably generate.\u0026rdquo;\nCoherence Generative REVOLUTION\nWhy probability maximization sucks Its expensive!\nBeam Search Take \\(k\\) candidates Expand \\(k\\) expansions for each of the \\(k\\) candidates Choose the highest probability \\(k\\) candidates \\(k\\) should be small: trying to maximizing\nBranch and Bound See Branch and Bound\nChallenges of Direct Sampling Direct Sampling sucks. Its sucks. It sucks. Just sampling from the distribution sucks. This has to do with the fact that assigning slightly lower scores \u0026ldquo;being less confident\u0026rdquo; is exponentially worse.\nThe model has to therefore be VERY conservative about giving low confidences; so, it is over confident about worst tokens.\nTop-K Top-k is too broad, and top\nNucleaus Sampling Find the smallest set of tokens that make up to \\(p\\) probability.\nCorrectness The highest probability answer isn\u0026rsquo;t always right Generative models consider every answer, so we want another model to compute the correct answer Surface Form Competition The Surface Form Competition problem results when top probabity token \u0026ldquo;steals\u0026rdquo; probability from the other tokens.\nThe predicted frequency of a possible string is a main comfounder. And so we can use models to decompose their own predictions:\nTurns out:\n\\(P(answer|question) \\approx P(answer\\ is\\ valid)P(answer|domain)\\)\nSo\u0026hellip;\n\\begin{equation} P(answer\\ is\\ valid) = \\frac{P(answer|question)}{P(answer|domain)} \\end{equation}\nThis is better :point_up:. Futher reading: (Holtzman et al. 2021)\nDomain Domain is the context in which that the text may occur.\nCoverage Why aren\u0026rsquo;t models controllable\nHallucination Language models predict what\u0026rsquo;s most likely We hope to control them with natural-language semantics In-Context Learning If we show the model some context which has example input output pairs, it can output. (Language Model model are few shot learners)\nCorrect Scoring We can reverse the output to predict the input to prevent model from loosing information, and use that to rerank the info. Of course, if the model can\u0026rsquo;t generate the desired input, the output is probably missing information.\nSmaller models can be made better because of info reranking.\nTh Degenerative Discriminative Gap.\nFuture Work The fact that the single comma shift the input. What we need is a language to control language behavior.\nThe Ability to Control a Model are the Goal of Understand the Model\nWe should only claim to understand a model when we can make a theory map about it: \u0026ldquo;when X is fed into the model, we get Y\u0026rdquo;\nSo: we should look at what the model is biased about (Surface Form Competition, for instance) we would be closer to prime behaviors such that they mimic the human behavior (in pieces, not just \u0026ldquo;complete these tokens\u0026rdquo;) in completion We see success as the actual evaluation metrics; we can use machines vs. other machines as the the results Questions ahai@uw.edu\nMarcel Just\nanthropic ai papers\npercy liang\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcomplex_system/\"\u003eComplex System\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"language-model\"\u003eLanguage Model\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#language-model\"\u003eLanguage Model\u003c/a\u003e is a large neural network trained to predict the \u003cstrong\u003enext token\u003c/strong\u003e given some context.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Language models can discriminate behavior that they can\u0026rsquo;t reliably generate.\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"coherence\"\u003eCoherence\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eGenerative REVOLUTION\u003c/strong\u003e\u003c/p\u003e\n\u003ch3 id=\"why-probability-maximization-sucks\"\u003eWhy probability maximization sucks\u003c/h3\u003e\n\u003cp\u003eIts expensive!\u003c/p\u003e\n\u003ch3 id=\"beam-search\"\u003eBeam Search\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eTake \\(k\\) candidates\u003c/li\u003e\n\u003cli\u003eExpand \\(k\\) expansions for each of the \\(k\\) candidates\u003c/li\u003e\n\u003cli\u003eChoose the highest probability \\(k\\) candidates\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\\(k\\) should be small: trying to maximizing\u003c/p\u003e\n\u003ch3 id=\"branch-and-bound--kbhbranch-and-bound-dot-md\"\u003e\u003ca href=\"/posts/kbhbranch_and_bound/\"\u003eBranch and Bound\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhbranch_and_bound/\"\u003eBranch and Bound\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"challenges-of-direct-sampling\"\u003eChallenges of Direct Sampling\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e sucks. Its sucks. It sucks. Just sampling from the distribution sucks. This has to do with the fact that assigning slightly lower scores \u0026ldquo;being less confident\u0026rdquo; is exponentially worse.\u003c/p\u003e\n\u003cp\u003eThe model has to therefore be VERY conservative about giving low confidences; so, it is over confident about worst tokens.\u003c/p\u003e\n\u003ch3 id=\"top-k\"\u003eTop-K\u003c/h3\u003e\n\u003cp\u003eTop-k is too broad, and top\u003c/p\u003e\n\u003ch3 id=\"nucleaus-sampling\"\u003eNucleaus Sampling\u003c/h3\u003e\n\u003cp\u003eFind the smallest set of tokens that make up to \\(p\\) probability.\u003c/p\u003e\n\u003ch2 id=\"correctness\"\u003eCorrectness\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eThe highest probability answer isn\u0026rsquo;t always right\u003c/li\u003e\n\u003cli\u003eGenerative models consider every answer, so we want another model to compute the correct answer\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"surface-form-competition\"\u003eSurface Form Competition\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"#surface-form-competition\"\u003eSurface Form Competition\u003c/a\u003e problem results when top probabity token \u0026ldquo;steals\u0026rdquo; probability from the other tokens.\u003c/p\u003e\n\u003cp\u003eThe predicted frequency of a possible string is a main comfounder. And so we can use models to decompose their own predictions:\u003c/p\u003e\n\u003cp\u003eTurns out:\u003c/p\u003e\n\u003cp\u003e\\(P(answer|question) \\approx P(answer\\ is\\ valid)P(answer|domain)\\)\u003c/p\u003e\n\u003cp\u003eSo\u0026hellip;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(answer\\ is\\ valid) = \\frac{P(answer|question)}{P(answer|domain)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is better :point_up:. Futher reading: (\u003ca href=\"#citeproc_bib_item_1\"\u003eHoltzman et al. 2021\u003c/a\u003e)\u003c/p\u003e\n\u003ch4 id=\"domain\"\u003eDomain\u003c/h4\u003e\n\u003cp\u003e\u003ca href=\"#domain\"\u003eDomain\u003c/a\u003e is the context in which that the text may occur.\u003c/p\u003e\n\u003ch2 id=\"coverage\"\u003eCoverage\u003c/h2\u003e\n\u003cp\u003eWhy aren\u0026rsquo;t models controllable\u003c/p\u003e\n\u003ch3 id=\"hallucination\"\u003eHallucination\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eLanguage models predict what\u0026rsquo;s most likely\u003c/li\u003e\n\u003cli\u003eWe hope to control them with natural-language semantics\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"in-context-learning\"\u003eIn-Context Learning\u003c/h3\u003e\n\u003cp\u003eIf we show the model some context which has example input output pairs, it can output. (\u003ca href=\"#language-model\"\u003eLanguage Model\u003c/a\u003e model are few shot learners)\u003c/p\u003e\n\u003ch4 id=\"correct-scoring\"\u003eCorrect Scoring\u003c/h4\u003e\n\u003cp\u003eWe can reverse the output to predict the input to prevent model from loosing information, and use that to rerank the info. Of course, if the model can\u0026rsquo;t generate the desired input, the output is probably missing information.\u003c/p\u003e\n\u003cp\u003eSmaller models can be made better because of info reranking.\u003c/p\u003e\n\u003cp\u003eTh Degenerative Discriminative Gap.\u003c/p\u003e\n\u003ch2 id=\"future-work\"\u003eFuture Work\u003c/h2\u003e\n\u003cp\u003eThe fact that the single comma shift the input. What we need is a language to control language behavior.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eThe Ability to Control a Model are the Goal of Understand the Model\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eWe should only claim to understand a model when we can make a theory map about it: \u0026ldquo;when X is fed into the model, we get Y\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"so\"\u003eSo:\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ewe should look at what the model is biased about (\u003ca href=\"#surface-form-competition\"\u003eSurface Form Competition\u003c/a\u003e, for instance)\u003c/li\u003e\n\u003cli\u003ewe would be closer to prime behaviors such that they mimic the human behavior (in pieces, not just \u0026ldquo;complete these tokens\u0026rdquo;) in completion\u003c/li\u003e\n\u003cli\u003eWe see success as the actual evaluation metrics; we can use machines vs. other machines as the the results\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"mailto:ahai@uw.edu\"\u003eahai@uw.edu\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eMarcel Just\u003c/p\u003e\n\u003cp\u003eanthropic ai papers\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003epercy liang\u003c/strong\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnlp/","tags":null,"title":"NLP"},{"categories":null,"contents":" 1990 static word embeddings 2003 neural language models 2008 multi-task learning 2015 attention 2017 transformer 2018 trainable contextual word embeddings + large scale pretraining 2019 prompt engineering Motivating Attention Given a sequence of embeddings: \\(x_1, x_2, \u0026hellip;, x_{n}\\)\nFor each \\(x_{i}\\), the goal of attention is to produce a new embedding of each \\(x_{i}\\) named \\(a_{i}\\) based its dot product similarity with all other words that are before it.\nLet\u0026rsquo;s define:\n\\begin{equation} score(x_{i}, x_{j}) = x_{i} \\cdot x_{j} \\end{equation}\nWhich means that we can write:\n\\begin{equation} a_{i} = \\sum_{j \\leq i}^{} \\alpha_{i,j} x_{j} \\end{equation}\nwhere:\n\\begin{equation} \\alpha_{i,j} = softmax \\qty(score(x_{i}, x_{j}) ) \\end{equation}\nThe resulting \\(a_{i}\\) is the output of our attention.\nAttention From the above, we call the input embeddings \\(x_{j}\\) the values, and we will create a separate embeddings called key with which we will measure the similarity. We call the word we want the target new embeddings for the query (i.e. \\(x_{i}\\) from above).\n","html":"\u003cul\u003e\n\u003cli\u003e1990 static word embeddings\u003c/li\u003e\n\u003cli\u003e2003 neural language models\u003c/li\u003e\n\u003cli\u003e2008 multi-task learning\u003c/li\u003e\n\u003cli\u003e2015 attention\u003c/li\u003e\n\u003cli\u003e2017 transformer\u003c/li\u003e\n\u003cli\u003e2018 trainable contextual word embeddings + large scale pretraining\u003c/li\u003e\n\u003cli\u003e2019 prompt engineering\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"motivating-attention\"\u003eMotivating Attention\u003c/h2\u003e\n\u003cp\u003eGiven a sequence of embeddings: \\(x_1, x_2, \u0026hellip;, x_{n}\\)\u003c/p\u003e\n\u003cp\u003eFor each \\(x_{i}\\), the goal of attention is to \u003cstrong\u003eproduce a new embedding\u003c/strong\u003e of each \\(x_{i}\\) named \\(a_{i}\\) based its dot product similarity with all other words that are before it.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s define:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nscore(x_{i}, x_{j}) = x_{i} \\cdot x_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich means that we can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{i} = \\sum_{j \\leq i}^{} \\alpha_{i,j} x_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha_{i,j} = softmax \\qty(score(x_{i}, x_{j}) )\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe resulting \\(a_{i}\\) is the output of our attention.\u003c/p\u003e\n\u003ch2 id=\"attention\"\u003eAttention\u003c/h2\u003e\n\u003cp\u003eFrom the above, we call the input embeddings \\(x_{j}\\) the \u003cstrong\u003evalues\u003c/strong\u003e, and we will create a separate embeddings called \u003cstrong\u003ekey\u003c/strong\u003e with which we will measure the similarity. We call the word we want the target new embeddings for the \u003cstrong\u003equery\u003c/strong\u003e (i.e. \\(x_{i}\\) from above).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnlp_semantics_timeline/","tags":null,"title":"NLP Semantics Timeline"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhchomsky/","tags":null,"title":"Noam Chomsky"},{"categories":null,"contents":"\\begin{equation} y\u0026rsquo; + ay = f(t) \\end{equation}\nThe general solution for this would be\nany solution specifically which gives \\(f(t)\\), plus any homogeneous solutions specifically:\n\\begin{equation} y = y_{p}(t) + y_{n}(t) \\end{equation}\nwhere the left is a particular solution, and the right is any homogeneous solution. We can do this because, say if we derivate it; the left derivative (the particular solution) gives \\(f(t)\\), and the right, because its homogeneous, gives 0.\nBecause there can be at most one solution to every IVP, we know that all solutions to the equation must take on the form of \\(y_{p}(t) + c_1 y_{n_{1}}(t) + \u0026hellip; + c_{n} y_{n_{j}}(t) = y\\)\nThe general solution to this is:\n\\begin{equation} y(t) = e^{-at} \\int_{0}^{t}e^{ax} f(x) \\dd{x} + Ce^{-at} \\end{equation}\nthis works equally well when \\(a\\) is not constant:\n\\begin{equation} y(t) = e^{-\\qty(\\int a(s) \\dd{s})t} \\int_{0}^{t}e^{\\qty(\\int a(s) \\dd{s})x} f(x) \\dd{x} + Ce^{-at} \\end{equation}\ninhomogeneous solutions cannot work with the time translation trick\nintegrating factor Consider the case where:\n\\begin{equation} y\u0026rsquo; + ay = f(t) \\end{equation}\nideally, we would love our whole left side to be one giant derivative which we can antiderive; let\u0026rsquo;s try multiply both sides with \\(e^{at}\\):\n\\begin{equation} (e^{at}y)\u0026rsquo; = e^{at}y\u0026rsquo; + ae^{at}y = e^{at}(y\u0026rsquo; + ay) = e^{at} f(t) \\end{equation}\nWe note that this gives:\n\\begin{equation} (e^{at}y)\u0026rsquo; = e^{at}f(t) \\end{equation}\nmeaning:\n\\begin{equation} e^{at}y(t) = \\int_{0}^{t} e^{ax} f(x) \\dd{x} \\end{equation}\nwhich gives:\n\\begin{equation} y(t) = e^{-at} \\int_{0}^{t}e^{ax} f(x) \\dd{x} \\end{equation}\nTacking on the homogeneous solution :\n\\begin{equation} y(t) = e^{-at} \\int_{0}^{t}e^{ax} f(x) \\dd{x} + Ce^{-at} \\end{equation}\nnote! the first term doesn\u0026rsquo;t have a scaler in front of it. Otherwise, the derivative will give you \\(nf(x)\\) instead of \\(f(x)\\).\nThis actually doesn\u0026rsquo;t matter what \\(a\\) is. In a sense, if we swap \\(a\\) for \\(a(t)\\), we simply have to write \\(a = \\int a(x) \\dd{x}\\).So, most generally:\n\\begin{equation} y\u0026rsquo; + a(t)y = f(t) \\end{equation}\nyields (CHECK THIS FACT\u0026lt; IT MAY BE WRONG)\n\\begin{equation} y(t) = e^{-\\qty(\\int a(t) \\dd{t})} \\int_{0}^{t}e^{\\qty(\\int a(x) \\dd{x})} f(x) \\dd{x} + Ce^{-\\qty(\\int a(t) \\dd{t})} \\end{equation}\nfor constant solutions, we get:\n\\begin{equation} y(t) = C e^{-a(t-t_0)} + e^{-at} \\int_{t_0}^{t} e^{as} f(s) \\dd{s} \\end{equation}\nfor\n\\begin{equation} y\u0026rsquo; + ay = f(t) \\end{equation}\nmethod of undetermined coefficients Good guesses for the structure of:\n\\begin{equation} y\u0026rsquo; + ay = f(t) \\end{equation}\nfor \\(f(t) = C\\) , guess \\(y = C\u0026rsquo;\\) for \\(f(t) = x^{n}\\), guess \\(y = x^{n}\\) (with all subsequent terms) for \\(f(t) = \\sin (t)\\) or \\(f(t)=\\cos (t)\\), guess \\(y=A \\sin (t) + B \\cos (t)\\) for \\(f(t) = e^{\\lambda t}\\), guess \\(y = Ce^{\\lambda t}\\) example say:\n\\begin{equation} y\u0026rsquo; + ky = 70k + 10k \\sin (t) \\end{equation}\nlet\u0026rsquo;s break it up into three pieces:\n\\begin{equation} \\begin{cases} y_1\u0026rsquo; + ky_{1} = 70 k\\\\ y_2\u0026rsquo; + k y_2 = 10k \\sin (t) \\\\ y_3\u0026rsquo; + k y_{3} = 0 \\end{cases} \\end{equation}\nyou will note that adding up all three of these yields a value for \\(y\\) that satisfy the overall expression.\nfirst one: we can just guess \\(y = 70\\), which evidently works second one: we want the sin and cos to cancel out, so we can guess \\(A \\sin t + B \\cos t\\), whose derivative is \\(-B \\sin t + A \\cos t\\), plugging that in, we get: \\((-B+kA) \\sin t + (A+kB) \\cos t\\), which we can use our coefficients to solve third one: that\u0026rsquo;s the homogeneous solution \\(Ce^{-kt}\\) and we can finally add it all up.\nconcern at some points, there is a case where\u0026mdash;at certain choices of constants, you may obtain a homogeneous solution when you are trying to get the particular solution. that\u0026rsquo;s bad. Consider:\n\\begin{equation} y\u0026rsquo;\u0026rsquo; - y = e^{mt} \\end{equation}\nand the particular solution you\u0026rsquo;d get is something like:\n\\begin{equation} y_{p}(t) = \\frac{1}{m^{2}-1} e^{mt} \\end{equation}\nthis makes sense for all cases except where \\(m = \\pm 1\\), because that gives a homogeneous solution, and the bottom of that fraction will blow up. To fix this, we can engineer a specific solution for which the limit towards \\(1\\) exists. Recall that, in general, we have:\n\\begin{equation} y(t) = \\frac{1}{m^{2}-1} e^{mt} + c_1 e^{t} + c_2 e^{-t} \\end{equation}\nif we choose \\(c_2=0\\), \\(c_1= \\frac{1}{m^{2}-1}\\), we can see that the limit exists through l\u0026rsquo;hospitals:\n\\begin{equation} y_{p}(t) = \\frac{1}{m^{2}-1} \\qty( e^{mt}-e^{t}) \\end{equation}\nwe can evaluate this at \\(m=1\\) by using l\u0026rsquo;hospitals rule.\nAt this point, we essentially end up with two distinct solutions, given a choice of \\(m\\).\nvariation of parameters method Take an independent set of homogeneous solutions which tells you what the general solution is, then modifying to modify the \\(f\\).\nThe general solution is irrespective of \\(f\\).\nNow, consider:\n\\begin{equation} y\u0026rsquo;\u0026rsquo; - y = f(t) \\end{equation}\nBecause the homogeneous solution \\(y_1\\) and \\(y_2\\) gives two independent solutions, we obtain:\n\\begin{equation} y_{p} = c_1(t) y_1(t) + c_2(t) y_2(t) \\end{equation}\nwe can take the derivative of this to obtain:\n\\begin{equation} y\u0026rsquo;_{p} = c_1(t) y_1(t)\u0026rsquo; + c_2(t) y_2(t)\u0026rsquo; + c_1(t)\u0026rsquo; y_1(t) + c_2(t)\u0026rsquo; y_2(t) \\end{equation}\nwe are going to guess the right two terms are zero (assume \\(c_1 \u0026rsquo; y_1 + c_2 \u0026rsquo; y_2 = 0\\)) and repeat this procedure:\n\\begin{equation} y\u0026rsquo;\u0026rsquo;_{p} = c_1(t) y_1(t)\u0026rsquo;\u0026rsquo; + c_2(t) y_2(t)\u0026rsquo;\u0026rsquo; + c_1(t)\u0026rsquo; y_1(t)\u0026rsquo; + c_2(t)\u0026rsquo; y_2(t)' \\end{equation}\nnow, plugging this into our original equation, we obtain:\n\\begin{align} y_{p}\u0026rsquo;\u0026rsquo; - y_{p} \u0026amp;= c_1(y_1 \u0026rsquo;\u0026rsquo; - y_{1}) + c_2 (y_{2}\u0026rsquo;\u0026rsquo; - y_2) + c_1 \u0026rsquo; y_1 \u0026rsquo; + c_2 \u0026rsquo; y_2 \u0026rsquo; \\\\ \u0026amp;= c_1(0) + c_2 (0) + c_1 \u0026rsquo; y_1 \u0026rsquo; + c_2 \u0026rsquo; y_2 \u0026rsquo; \\\\ \u0026amp;= c_1 \u0026rsquo; y_1 \u0026rsquo; + c_2 \u0026rsquo; y_2 \u0026rsquo; \\\\ \u0026amp;= f(t) \\end{align}\nSo, combining what we just got and our simplifying assumption, we obtain:\n\\begin{equation} \\begin{cases} c_1 \u0026rsquo; y_1 + c_2 \u0026rsquo; y_2 = 0 \\\\ c_1 \u0026rsquo; y_1 \u0026rsquo; + c_2 \u0026rsquo; y_2 \u0026rsquo; = f(t) \\end{cases} \\end{equation}\nThis is now a matrix expression:\n\\begin{equation} \\mqty(y_1 \u0026amp; y_2 \\\\ y_1 \u0026rsquo; \u0026amp; y_2 \u0026lsquo;) \\mqty(c_1 \u0026rsquo; \\\\ c_2 \u0026lsquo;) = \\mqty( 0 \\\\ f(t)) \\end{equation}\nthat matrix on the left side is called the Wronshian matrix, and if \\(y_1\\) and \\(y_2\\) the homogeneous solutions are independent, we know that this is going to be invertible. Then, we can just solve and integrate to obtain \\(c_1, c_2\\).\nwhy do we tack on the homogeneous solution again? What if we have a plane specified:\n\\begin{equation} a x_1 + b x_2 + c x_3 = K \\end{equation}\nwe want to solve \\(x_{j}\\) as a vector which lives on this plane.\nLet\u0026rsquo;s begin by shifting this plane down to the origin:\n\\begin{equation} a x_1 + b x_2 + c x_3 + 0 \\end{equation}\nWhich says the same thing as:\n\\begin{equation} \\mqty(a \u0026amp; b \u0026amp; c) \\mqty(x_1 \\\\ x_2 \\\\ x_3) = 0 \\end{equation}\nmeaning:\n\\begin{equation} A x = 0 \\end{equation}\nwhere \\(A \\in \\mathcal{L}(m,n)\\), where \\(m \u0026lt; n\\). To solve for \\(x\\), we desire \\(\\text{null}\\ A\\), and given we are a map into a bigger space, we should have non-trivial null space.\nfind a particular solution \\(x_{p}\\) to the non-shifted version the general solution should live in \\(x_{p} + \\text{null}\\ A\\), the affine subset meaning all solutions should live on \\(x = x_{p} + c_1 v_1 + c_2 v_2\\) ","html":"\u003cp\u003e\\begin{equation}\ny\u0026rsquo; + ay = f(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe general solution for this would be\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eany solution specifically which gives \\(f(t)\\), plus\u003c/li\u003e\n\u003cli\u003eany homogeneous solutions\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003especifically:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = y_{p}(t) + y_{n}(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere the left is a particular solution, and the right is any homogeneous solution. We can do this because, say if we derivate it; the left derivative (the particular solution) gives \\(f(t)\\), and the right, because its homogeneous, gives 0.\u003c/p\u003e\n\u003cp\u003eBecause there can be at most one solution to every IVP, we know that all solutions to the equation must take on the form of \\(y_{p}(t) + c_1 y_{n_{1}}(t) + \u0026hellip; + c_{n} y_{n_{j}}(t) = y\\)\u003c/p\u003e\n\u003cp\u003eThe general solution to this is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = e^{-at} \\int_{0}^{t}e^{ax} f(x) \\dd{x} + Ce^{-at}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis works equally well when \\(a\\) is not constant:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = e^{-\\qty(\\int a(s) \\dd{s})t} \\int_{0}^{t}e^{\\qty(\\int a(s) \\dd{s})x} f(x) \\dd{x} + Ce^{-at}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003einhomogeneous solutions cannot work with the time translation trick\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"integrating-factor\"\u003eintegrating factor\u003c/h2\u003e\n\u003cp\u003eConsider the case where:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; + ay = f(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eideally, we would love our whole left side to be one giant derivative which we can antiderive; let\u0026rsquo;s try multiply both sides with \\(e^{at}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(e^{at}y)\u0026rsquo; = e^{at}y\u0026rsquo; + ae^{at}y = e^{at}(y\u0026rsquo; + ay) = e^{at} f(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe note that this gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(e^{at}y)\u0026rsquo; = e^{at}f(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{at}y(t) = \\int_{0}^{t} e^{ax} f(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = e^{-at} \\int_{0}^{t}e^{ax} f(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTacking on the homogeneous solution :\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = e^{-at} \\int_{0}^{t}e^{ax} f(x) \\dd{x} + Ce^{-at}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enote! the first term doesn\u0026rsquo;t have a scaler in front of it. Otherwise, the derivative will give you \\(nf(x)\\) instead of \\(f(x)\\).\u003c/p\u003e\n\u003cp\u003eThis actually doesn\u0026rsquo;t matter what \\(a\\) is. In a sense, if we swap \\(a\\) for \\(a(t)\\), we simply have to write \\(a = \\int a(x) \\dd{x}\\).So, most generally:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; + a(t)y = f(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyields (CHECK THIS FACT\u0026lt; IT MAY BE WRONG)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = e^{-\\qty(\\int a(t) \\dd{t})} \\int_{0}^{t}e^{\\qty(\\int a(x) \\dd{x})} f(x) \\dd{x} + Ce^{-\\qty(\\int a(t) \\dd{t})}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor constant solutions, we get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = C e^{-a(t-t_0)} + e^{-at} \\int_{t_0}^{t} e^{as} f(s) \\dd{s}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; + ay = f(t)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"method-of-undetermined-coefficients--kbhsecond-order-linear-differential-equation-dot-md\"\u003e\u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/#method-of-undetermined-coefficients\"\u003emethod of undetermined coefficients\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eGood guesses for the structure of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; + ay = f(t)\n\\end{equation}\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003efor \\(f(t) = C\\) , guess \\(y = C\u0026rsquo;\\)\u003c/li\u003e\n\u003cli\u003efor \\(f(t) = x^{n}\\), guess \\(y = x^{n}\\) (with all subsequent terms)\u003c/li\u003e\n\u003cli\u003efor \\(f(t) = \\sin (t)\\) or \\(f(t)=\\cos (t)\\), guess \\(y=A \\sin (t) + B \\cos (t)\\)\u003c/li\u003e\n\u003cli\u003efor \\(f(t) = e^{\\lambda t}\\), guess \\(y = Ce^{\\lambda t}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"example\"\u003eexample\u003c/h3\u003e\n\u003cp\u003esay:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; + ky = 70k + 10k \\sin (t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003elet\u0026rsquo;s break it up into three pieces:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\ny_1\u0026rsquo; + ky_{1} = 70 k\\\\\ny_2\u0026rsquo; + k y_2 = 10k \\sin (t) \\\\\ny_3\u0026rsquo; + k y_{3} = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou will note that adding up all three of these yields a value for \\(y\\) that satisfy the overall expression.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003efirst one: we can just guess \\(y = 70\\), which evidently works\u003c/li\u003e\n\u003cli\u003esecond one: we want the sin and cos to cancel out, so we can guess \\(A \\sin t + B \\cos t\\), whose derivative is \\(-B \\sin t + A \\cos t\\), plugging that in, we get: \\((-B+kA) \\sin t + (A+kB) \\cos t\\), which we can use our coefficients to solve\u003c/li\u003e\n\u003cli\u003ethird one: that\u0026rsquo;s the homogeneous solution \\(Ce^{-kt}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eand we can finally add it all up.\u003c/p\u003e\n\u003ch3 id=\"concern\"\u003econcern\u003c/h3\u003e\n\u003cp\u003eat some points, there is a case where\u0026mdash;at certain choices of constants, you may obtain a homogeneous solution when you are trying to get the particular solution. that\u0026rsquo;s bad. Consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; - y = e^{mt}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand the particular solution you\u0026rsquo;d get is something like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny_{p}(t) = \\frac{1}{m^{2}-1} e^{mt}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis makes sense for all cases except where \\(m = \\pm 1\\), because that gives a homogeneous solution, and the bottom of that fraction will blow up. To fix this, we can engineer a specific solution for which the limit towards \\(1\\) exists. Recall that, in general, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = \\frac{1}{m^{2}-1} e^{mt} + c_1 e^{t} + c_2 e^{-t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif we choose \\(c_2=0\\), \\(c_1= \\frac{1}{m^{2}-1}\\), we can see that the limit exists through l\u0026rsquo;hospitals:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny_{p}(t) = \\frac{1}{m^{2}-1} \\qty( e^{mt}-e^{t})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can evaluate this at \\(m=1\\) by using l\u0026rsquo;hospitals rule.\u003c/p\u003e\n\u003cp\u003eAt this point, we essentially end up with two distinct solutions, given a choice of \\(m\\).\u003c/p\u003e\n\u003ch2 id=\"variation-of-parameters-method\"\u003evariation of parameters method\u003c/h2\u003e\n\u003cp\u003eTake an independent set of homogeneous solutions which tells you what the general solution is, then modifying to modify the \\(f\\).\u003c/p\u003e\n\u003cp\u003eThe general solution is irrespective of \\(f\\).\u003c/p\u003e\n\u003cp\u003eNow, consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; - y = f(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBecause the homogeneous solution \\(y_1\\) and \\(y_2\\) gives two independent solutions, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny_{p} = c_1(t) y_1(t) + c_2(t) y_2(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can take the derivative of this to obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;_{p} = c_1(t) y_1(t)\u0026rsquo; + c_2(t) y_2(t)\u0026rsquo; + c_1(t)\u0026rsquo; y_1(t) + c_2(t)\u0026rsquo; y_2(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe are going to guess the right two terms are zero (assume \\(c_1 \u0026rsquo; y_1 + c_2 \u0026rsquo; y_2 = 0\\)) and repeat this procedure:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo;_{p} = c_1(t) y_1(t)\u0026rsquo;\u0026rsquo; + c_2(t) y_2(t)\u0026rsquo;\u0026rsquo; + c_1(t)\u0026rsquo; y_1(t)\u0026rsquo; + c_2(t)\u0026rsquo; y_2(t)'\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enow, plugging this into our original equation, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\ny_{p}\u0026rsquo;\u0026rsquo; - y_{p} \u0026amp;= c_1(y_1 \u0026rsquo;\u0026rsquo; - y_{1}) + c_2 (y_{2}\u0026rsquo;\u0026rsquo; - y_2) + c_1 \u0026rsquo; y_1 \u0026rsquo; + c_2 \u0026rsquo; y_2 \u0026rsquo; \\\\\n\u0026amp;= c_1(0) + c_2 (0) + c_1 \u0026rsquo; y_1 \u0026rsquo; + c_2 \u0026rsquo; y_2 \u0026rsquo; \\\\\n\u0026amp;= c_1 \u0026rsquo; y_1 \u0026rsquo; + c_2 \u0026rsquo; y_2 \u0026rsquo; \\\\\n\u0026amp;= f(t)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eSo, combining what we just got and our simplifying assumption, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nc_1 \u0026rsquo; y_1 + c_2 \u0026rsquo; y_2 = 0 \\\\\nc_1 \u0026rsquo; y_1 \u0026rsquo; + c_2 \u0026rsquo; y_2 \u0026rsquo; = f(t)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is now a matrix expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(y_1 \u0026amp; y_2 \\\\ y_1 \u0026rsquo; \u0026amp; y_2 \u0026lsquo;) \\mqty(c_1 \u0026rsquo; \\\\ c_2 \u0026lsquo;) = \\mqty( 0 \\\\ f(t))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat matrix on the left side is called the \u003ca href=\"#variation-of-parameters-method\"\u003eWronshian\u003c/a\u003e matrix, and if \\(y_1\\) and \\(y_2\\) the homogeneous solutions are independent, we know that this is going to be invertible. Then, we can just solve and integrate to obtain \\(c_1, c_2\\).\u003c/p\u003e\n\u003ch2 id=\"why-do-we-tack-on-the-homogeneous-solution-again\"\u003ewhy do we tack on the homogeneous solution again?\u003c/h2\u003e\n\u003cp\u003eWhat if we have a plane specified:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na x_1 + b x_2 + c x_3 = K\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe want to solve \\(x_{j}\\) as a vector which lives on this plane.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s begin by shifting this plane down to the origin:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na x_1 + b x_2 + c x_3 + 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich says the same thing as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(a \u0026amp; b \u0026amp; c) \\mqty(x_1 \\\\ x_2 \\\\ x_3) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA x = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(A \\in \\mathcal{L}(m,n)\\), where \\(m \u0026lt; n\\). To solve for \\(x\\), we desire \\(\\text{null}\\ A\\), and given we are a map into a bigger space, we should have non-trivial null space.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003efind a particular solution \\(x_{p}\\) to the non-shifted version\u003c/li\u003e\n\u003cli\u003ethe general solution should live in \\(x_{p} + \\text{null}\\ A\\), the \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003emeaning all solutions should live on \\(x = x_{p} + c_1 v_1 + c_2 v_2\\)\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnon_homogeneous_linear_differential_equation/","tags":null,"title":"non-homogeneous linear differential equation"},{"categories":null,"contents":"In this project, we aim to derive situations for the existence of a differential equation for when a family of functions do not intersect. We were able to derive a full solution for the result in linear equations, and we offer an exploration of a partial solution for non-linear cases.\nFunction Families Fundamentally, function families are functions parameterized by some \\(C\\), which has the shape:\n\\begin{equation} y(x, \\dots, c) = f(x, \\dots)+c \\end{equation}\nThrough this result, we can figure a statement for \u0026ldquo;intersection.\u0026rdquo; If two functions intersect, their difference will be \\(0\\); if there is a non-trivial solution (that \\(c_1\\neq c_2\\) \u0026mdash; that, they are not the same function\u0026mdash;still makes \\(y_{C_1} = y_{C_2}\\)), the function family interact.\nWe can test this by subtracting two arbitrary members from the desired family. If it results that \\(c_1-c_2=0 \\implies c_1=c_2\\), we can say that the family does not intersect: that there are no non-trivial solutions to the function having no difference.\nSingle-Order Linear Differential Equations Here, we prove the fact that single-order linear differential equations do not produce solutions that intersect. We have the following single-order linear differential equation:\n\\begin{equation} \\dv{y}{x} + P(x) = Q(x) \\end{equation}\nIf, as desired, our function has a analytical solution (without an integral), we will make both terms differentiable.\n\\begin{equation} \\dv{y}{x} + P\u0026rsquo;(x) = Q\u0026rsquo;(x) \\end{equation}\nRecall the general solution of this expression:\n\\begin{align} y \u0026amp;= e^{-\\int P\u0026rsquo;(x)\\dd{x}} \\int e^{\\int P\u0026rsquo;(x)\\dd{x}} Q\u0026rsquo;(x)\\dd{x} \\\\ \u0026amp;= e^{-(P(x)+C_1)} \\int e^{P(x)+C_1} Q\u0026rsquo;(x)\\dd{x} \\end{align}\nOf course, we can separate the constants \\(e^{C_1}\\) out.\n\\begin{align} y \u0026amp;= e^{-(P(x)+C_1)} \\int e^{P(x)+C_1} Q\u0026rsquo;(x)\\dd{x} \\\\ \u0026amp;= e^{-P(x)} \\int e^{P(x)} Q\u0026rsquo;(x)\\dd{x} \\end{align}\nNow, it is the case that, for the most part, \\(e^{P(x)}Q\u0026rsquo;(x)\\) may not be integral-differentiable. Applying the fundamental theorem, we still have that as the integral function, with some \u0026ldquo;differentiated\u0026rdquo; term which we will call \\(a(x)\\): below\n\\begin{align} y \u0026amp;= e^{-P(x)}(a(x) +C) \\\\ \u0026amp;= e^{-P(x)}a(x) +Ce^{-P(x)} \\end{align}\nExcellent. Now, let\u0026rsquo;s do the subtraction test devised above; if we have that \\(C_1-C_2=0\\) given \\(y_1-y_2=0\\), then we can ensure that the function family do not intersect.\n\\begin{align} y_1 - y_2 =0 \u0026amp;= (e^{-P(x)}a(x) +C_{1}e^{-P(x)})-(e^{-P(x)}a(x) +C_{2}e^{-P(x)}) \\\\ \u0026amp;= C_{1}e^{-P(x)}-C_{2}e^{-P(x)} \\\\ \u0026amp;= (C_{1}-C_{2})e^{-P(x)} \\end{align}\nWe now have that:\n\\begin{equation} 0 = (C_1+C_2)e^{-P(x)} \\end{equation}\nNotably, the codomain of \\(e^{x}\\) is \\((0, \\infty)\\). Having never reached \\(0\\), we have that \\(0=C_1-C_2\\), as desired. \\(\\blacksquare\\)\n","html":"\u003cp\u003eIn this project, we aim to derive situations for the existence of a differential equation for when a family of functions do not intersect. We were able to derive a full solution for the result in linear equations, and we offer an exploration of a partial solution for non-linear cases.\u003c/p\u003e\n\u003ch2 id=\"function-families\"\u003eFunction Families\u003c/h2\u003e\n\u003cp\u003eFundamentally, function families are functions parameterized by some \\(C\\), which has the shape:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(x, \\dots, c) = f(x, \\dots)+c\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThrough this result, we can figure a statement for \u0026ldquo;intersection.\u0026rdquo; If two functions intersect, their difference will be \\(0\\); if there is a non-trivial solution (that \\(c_1\\neq c_2\\) \u0026mdash; that, they are not the same function\u0026mdash;still makes \\(y_{C_1} = y_{C_2}\\)), the function family interact.\u003c/p\u003e\n\u003cp\u003eWe can test this by subtracting two arbitrary members from the desired family. If it results that \\(c_1-c_2=0 \\implies c_1=c_2\\), we can say that the family does \u003cem\u003enot\u003c/em\u003e intersect: that there are no non-trivial solutions to the function having no difference.\u003c/p\u003e\n\u003ch2 id=\"single-order-linear-differential-equations\"\u003eSingle-Order Linear Differential Equations\u003c/h2\u003e\n\u003cp\u003eHere, we prove the fact that single-order linear differential equations do not produce solutions that intersect. We have the following single-order linear differential equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{y}{x} + P(x) = Q(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf, as desired, our function has a analytical solution (without an integral), we will make both terms differentiable.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{y}{x} + P\u0026rsquo;(x) = Q\u0026rsquo;(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall the general solution of this expression:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\ny \u0026amp;= e^{-\\int P\u0026rsquo;(x)\\dd{x}} \\int e^{\\int P\u0026rsquo;(x)\\dd{x}} Q\u0026rsquo;(x)\\dd{x} \\\\\n\u0026amp;= e^{-(P(x)+C_1)} \\int e^{P(x)+C_1} Q\u0026rsquo;(x)\\dd{x}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eOf course, we can separate the constants \\(e^{C_1}\\) out.\u003c/p\u003e\n\u003cp\u003e\\begin{align}\ny \u0026amp;= e^{-(P(x)+C_1)} \\int e^{P(x)+C_1} Q\u0026rsquo;(x)\\dd{x} \\\\\n\u0026amp;= e^{-P(x)} \\int e^{P(x)} Q\u0026rsquo;(x)\\dd{x}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, it is the case that, for the most part, \\(e^{P(x)}Q\u0026rsquo;(x)\\) may not be integral-differentiable. Applying the fundamental theorem, we still have that as the integral function, with some \u0026ldquo;differentiated\u0026rdquo; term which we will call \\(a(x)\\): below\u003c/p\u003e\n\u003cp\u003e\\begin{align}\ny \u0026amp;= e^{-P(x)}(a(x) +C) \\\\\n\u0026amp;= e^{-P(x)}a(x) +Ce^{-P(x)}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eExcellent. Now, let\u0026rsquo;s do the subtraction test devised above; if we have that \\(C_1-C_2=0\\) given \\(y_1-y_2=0\\), then we can ensure that the function family do not intersect.\u003c/p\u003e\n\u003cp\u003e\\begin{align}\ny_1 - y_2 =0 \u0026amp;= (e^{-P(x)}a(x) +C_{1}e^{-P(x)})-(e^{-P(x)}a(x) +C_{2}e^{-P(x)}) \\\\\n\u0026amp;= C_{1}e^{-P(x)}-C_{2}e^{-P(x)} \\\\\n\u0026amp;= (C_{1}-C_{2})e^{-P(x)}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eWe now have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = (C_1+C_2)e^{-P(x)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNotably, the codomain of \\(e^{x}\\) is \\((0, \\infty)\\). Having never reached \\(0\\), we have that \\(0=C_1-C_2\\), as desired. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnon_intersecting_graphs/","tags":null,"title":"Non-Intersecting Graphs (Single Order)"},{"categories":null,"contents":"Suppose we analyze first order non-linear system:\n\\begin{equation} x\u0026rsquo; = F(t,x) \\end{equation}\nWe can actually turn this into an autonomous system:\n\\begin{equation} x_0 = t \\end{equation}\n\\begin{equation} x_0\u0026rsquo; = 1 \\end{equation}\nmeaning suddenly we have an autonomous system:\n\\begin{equation} \\begin{cases} x_0\u0026rsquo; = 1 \\\\ x_1\u0026rsquo; = F(x_0, x_1) \\end{cases} \\end{equation}\nGeneral strategy:\nFind zeros of the right side (which are the stationary solutions) Analyze near-stationary solutions through eigenvalues of the linearized Jacobian matrix: if both eigenvalues are zero Away from stationary solutions: basically guessing Three Examples that are Hopeless to Solve Lotha-Volterra Prey-Predictor Equation \\begin{equation} \\begin{cases} x_1\u0026rsquo; = 2x_1-x_1x_2 \\\\ x_2\u0026rsquo; = x_1x_2 - 3x_2 \\end{cases} \\end{equation}\nBy default, if either \\(x_1\\) or \\(x_2\\) goes down, the system dies quickly.\nExample \\begin{equation} \\begin{cases} x_1\u0026rsquo; = r_1x_1 \\qty(1- \\frac{x_1 + h_{12} x_2}{k_1})\\\\ x_2\u0026rsquo; = r_2x_2 \\qty(1- \\frac{x_2 + h_{21} x_1}{k_2}) \\end{cases} \\end{equation}\nExample \\begin{equation} \\begin{cases} x_1\u0026rsquo; = x_2 \\\\ x_2\u0026rsquo; = -\\sin x_1 - \\gamma x_2 \\end{cases} \\end{equation}\nStrategy to Analyze when its Hopeless find a stationary solutions: \\(x(t) = a\\): where \\(x\u0026rsquo; = F(a) = 0\\) and draw them as points on the \\(x_1\\) and \\(x_2\\) plane near each equilibrium point, approximate through Linearilzation study the mesoscopic region So, see ODE linearilzation.\nPhase Portrait Phase Portrait is a figure in the \\(x_1, x_2\\) plane where each solution exists as a curve on the figure.\nmonotone function for linearilzation systems that are marginal (zero, negative real parts, one or more fully imaginary), we can\u0026rsquo;t use linearilzation itself to analyze the system.\nTherefore, we have to use a function for which \\(\\dv t V(y(t)) \\geq 0\\) or \\(\\dv V(y(t)) \\leq 0\\) for all \\(t\\) called a monotone function, which could give us hints about the function\u0026rsquo;s behavior.\nMeaning:\n\\begin{align} \\dv t V(y(T) \u0026amp;= \\nabla V(y(t)) \\cdot y\u0026rsquo;(t) \\\\ \u0026amp;= \\nabla V(y(t)) \\cdot F(y(t)) \\end{align}\nThe gradient of \\(V\\) is always perpendicular to the level curve of \\(V\\), and\u0026mdash;when dotted with \\(F\\) the vector field of $y$\u0026mdash;we obtain a value that\u0026rsquo;s either positive or negative. When positive, the angle between the vector field \\(F\\) and \\(V\\) would be less than \\(\\frac{\\pi}{2}\\), meaning the vector field point \u0026ldquo;outwards\u0026rdquo; from the level sets. Otherwise, it would be more than \\(\\frac{\\pi}{2}\\), meaning the vector field point \u0026ldquo;inwards\u0026rdquo;.\nconserved function its like a monotone function, but \\(\\dv{V}{t} = 0\\). any solution curve would lie inside a level curve of \\(V\\) (parts of the level curve). Its basically the intuition of a monotone function, but the solution curves instead of pointing inwards and outwards, it just get stuck.\n","html":"\u003cp\u003eSuppose we analyze first order non-linear system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo; = F(t,x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can actually turn this into an autonomous system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_0 = t\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_0\u0026rsquo; = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning suddenly we have an autonomous system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx_0\u0026rsquo; = 1 \\\\\nx_1\u0026rsquo; = F(x_0, x_1)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGeneral strategy:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eFind zeros of the right side (which are the stationary solutions)\u003c/li\u003e\n\u003cli\u003eAnalyze near-stationary solutions through eigenvalues of the linearized Jacobian matrix: if both eigenvalues are zero\u003c/li\u003e\n\u003cli\u003eAway from stationary solutions: basically guessing\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"three-examples-that-are-hopeless-to-solve\"\u003eThree Examples that are Hopeless to Solve\u003c/h2\u003e\n\u003ch3 id=\"lotha-volterra-prey-predictor-equation\"\u003eLotha-Volterra Prey-Predictor Equation\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx_1\u0026rsquo; = 2x_1-x_1x_2 \\\\\nx_2\u0026rsquo; = x_1x_2 - 3x_2\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy default, if either \\(x_1\\) or \\(x_2\\) goes down, the system dies quickly.\u003c/p\u003e\n\u003ch3 id=\"example\"\u003eExample\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx_1\u0026rsquo; = r_1x_1 \\qty(1- \\frac{x_1 + h_{12} x_2}{k_1})\\\\\nx_2\u0026rsquo; = r_2x_2 \\qty(1- \\frac{x_2 + h_{21} x_1}{k_2})\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"example\"\u003eExample\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx_1\u0026rsquo; = x_2 \\\\\nx_2\u0026rsquo; = -\\sin x_1 - \\gamma x_2\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"strategy-to-analyze-when-its-hopeless\"\u003eStrategy to Analyze when its Hopeless\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003efind a stationary solutions: \\(x(t) = a\\): where \\(x\u0026rsquo; = F(a) = 0\\) and draw them as points on the \\(x_1\\) and \\(x_2\\) plane\u003c/li\u003e\n\u003cli\u003enear each equilibrium point, approximate through \u003ca href=\"/posts/kbhode_linearilzation/\"\u003eLinearilzation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003estudy the \u003ca href=\"/posts/kbhmesoscopic_region/\"\u003emesoscopic region\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSo, see \u003ca href=\"/posts/kbhode_linearilzation/\"\u003eODE linearilzation\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"phase-portrait\"\u003ePhase Portrait\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#phase-portrait\"\u003ePhase Portrait\u003c/a\u003e is a figure in the \\(x_1, x_2\\) plane where each solution exists as a curve on the figure.\u003c/p\u003e\n\u003ch2 id=\"monotone-function\"\u003emonotone function\u003c/h2\u003e\n\u003cp\u003efor \u003ca href=\"/posts/kbhode_linearilzation/\"\u003elinearilzation\u003c/a\u003e systems that are marginal (zero, negative real parts, one or more fully imaginary), we can\u0026rsquo;t use \u003ca href=\"/posts/kbhode_linearilzation/\"\u003elinearilzation\u003c/a\u003e itself to analyze the system.\u003c/p\u003e\n\u003cp\u003eTherefore, we have to use a function for which \\(\\dv t V(y(t)) \\geq 0\\) or \\(\\dv V(y(t)) \\leq 0\\) for all \\(t\\) called a \u003ca href=\"#monotone-function\"\u003emonotone function\u003c/a\u003e, which could give us hints about the function\u0026rsquo;s behavior.\u003c/p\u003e\n\u003cp\u003eMeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dv t V(y(T) \u0026amp;= \\nabla V(y(t)) \\cdot y\u0026rsquo;(t) \\\\\n\u0026amp;= \\nabla V(y(t)) \\cdot F(y(t))\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThe gradient of \\(V\\) is always perpendicular to the level curve of \\(V\\), and\u0026mdash;when dotted with \\(F\\) the vector field of $y$\u0026mdash;we obtain a value that\u0026rsquo;s either positive or negative. When positive, the angle between the vector field \\(F\\) and \\(V\\) would be less than \\(\\frac{\\pi}{2}\\), meaning the vector field point \u0026ldquo;outwards\u0026rdquo; from the level sets. Otherwise, it would be more than \\(\\frac{\\pi}{2}\\), meaning the vector field point \u0026ldquo;inwards\u0026rdquo;.\u003c/p\u003e\n\u003ch2 id=\"conserved-function\"\u003econserved function\u003c/h2\u003e\n\u003cp\u003eits like a \u003ca href=\"#monotone-function\"\u003emonotone function\u003c/a\u003e, but \\(\\dv{V}{t} = 0\\). any solution curve would \u003cstrong\u003elie inside\u003c/strong\u003e a level curve of \\(V\\) (parts of the level curve). Its basically the intuition of a \u003ca href=\"#monotone-function\"\u003emonotone function\u003c/a\u003e, but the solution curves instead of pointing inwards and outwards, it just get stuck.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnon_linear_ode/","tags":null,"title":"Non-Linear ODE"},{"categories":null,"contents":"\u0026ldquo;Chaotic Dynamics\u0026rdquo; Because the word is sadly nonlinear.\nmotivating non-linearity \\begin{equation} \\dv t \\mqty(x \\\\ y) = f\\qty(\\mqty(x\\\\y)) \\end{equation}\nThis function is a function from \\(f: \\mathbb{R}^{2}\\to \\mathbb{R}^{2}\\). All the work on Second-Order Linear Differential Equations, has told us that the above system can serve as a \u0026ldquo;linearization\u0026rdquo; of a second order differential equation that looks like the follows:\n\\begin{equation} \\dv t \\mqty(x \\\\y) = A \\mqty(x \\\\ y) +b \\end{equation}\nActually going about deriving a solution to this requires powers of \\(A\\) to commute. If \\(A\\) has a independent variable in it, or if its a time-varying function \\(A(t)\\), you can\u0026rsquo;t actually perform the linearization technique (raising diagonalized \\(A\\) to powers) highlighted here.\nSo we need something new.\nSudden Review of Vector Functions Let\u0026rsquo;s take some function:\n\\begin{equation} f: \\mathbb{R}^{2} \\to \\mathbb{R}^{2} \\end{equation}\nIt will output a vector:\n\\begin{equation} f(x,y) = \\mqty(f_1(x,y)\\\\ f_{2}(x,y)) \\end{equation}\nSolving Non-Linear Systems, actually Let\u0026rsquo;s take a non-linear system:\n\\begin{equation} \\begin{cases} \\dv{x}{t} = F(x,y) \\\\ \\dv{y}{t} = G(x,y) \\end{cases} \\end{equation}\nOverarching Idea: To actually solve this, we go about taking a Taylor Series (i.e. linearize) the functions next to its critical points. Then, we use an epsilon-delta proof to show that the linearization next to those critical points are a good approximation.\nSo! Let us begin.\nLet \\((x*,y*)\\) be a critical point of \\(F\\). Naturally, \\(d 0=0\\), so it is also a critical point of \\(G\\).\nSo we have:\n\\begin{equation} F(x*,y*)=G(x*,y*) = 0 \\end{equation}\nNow, we will begin building the \u0026ldquo;slope\u0026rdquo; of this function to eliminate the independent variable wholesale\u0026mdash;by dividing:\n\\begin{equation} \\dv{y}{x} = \\dv{y}{t} / \\dv{x}{t} = \\frac{G(x,y)}{F(x,y)} \\end{equation}\na divergence into epsilon delta proof\nstable A critical point is considered \u0026ldquo;stable\u0026rdquo; because, for each \\(\\epsilon \u0026gt;0\\), \\(\\exists \\delta \u0026gt;0\\), such that:\n\\begin{equation} |x_0-x*| \u0026lt; \\delta \\implies |x(t)-x*| \u0026lt; \\epsilon \\end{equation}\nasymptotically stable For every trajectory that begins close to the critical point, it will end up at the critical point as time increases. That is, \\(\\exists \\delta \u0026gt;0\\) such that:\n\\begin{equation} |x-x*| \u0026lt; \\delta \\implies \\lim_{t \\to \\infty } x(t)=x* \\end{equation}\nThis is essentially epsilon delta, but the limit traces out the entire process descending so the critical point is stable through the whole descend.\n","html":"\u003cp\u003e\u0026ldquo;Chaotic Dynamics\u0026rdquo; Because the word is sadly nonlinear.\u003c/p\u003e\n\u003ch2 id=\"motivating-non-linearity\"\u003emotivating non-linearity\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\dv t \\mqty(x \\\\ y) = f\\qty(\\mqty(x\\\\y))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis function is a function from \\(f: \\mathbb{R}^{2}\\to \\mathbb{R}^{2}\\). All the work on \u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/\"\u003eSecond-Order Linear Differential Equations\u003c/a\u003e, has told us that the above system can serve as a \u0026ldquo;linearization\u0026rdquo; of a second order differential equation that looks like the follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv t \\mqty(x \\\\y) = A \\mqty(x \\\\ y) +b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eActually going about deriving a solution to this requires powers of \\(A\\) to commute. If \\(A\\) has a independent variable in it, or if its a time-varying function \\(A(t)\\), you can\u0026rsquo;t actually perform the linearization technique (raising diagonalized \\(A\\) to powers) \u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/#solving-homogeneous-higher-order-differential-equations\"\u003ehighlighted here\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSo we need something new.\u003c/p\u003e\n\u003ch2 id=\"sudden-review-of-vector-functions\"\u003eSudden Review of Vector Functions\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s take some function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf: \\mathbb{R}^{2} \\to \\mathbb{R}^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIt will output a vector:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x,y) = \\mqty(f_1(x,y)\\\\ f_{2}(x,y))\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"solving-non-linear-systems--kbhnon-linear-systems-dot-md--actually\"\u003eSolving \u003ca href=\"/posts/kbhnon_linear_systems/\"\u003eNon-Linear Systems\u003c/a\u003e, actually\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s take a non-linear system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x}{t} = F(x,y) \\\\\n\\dv{y}{t} = G(x,y)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eOverarching Idea\u003c/strong\u003e\u003c/strong\u003e: To actually solve this, we go about taking a Taylor Series (i.e. linearize) the \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003es next to its critical points. Then, we use an epsilon-delta proof to show that the linearization next to those critical points are a good approximation.\u003c/p\u003e\n\u003cp\u003eSo! Let us begin.\u003c/p\u003e\n\u003cp\u003eLet \\((x*,y*)\\) be a critical point of \\(F\\). Naturally, \\(d 0=0\\), so it is also a critical point of \\(G\\).\u003c/p\u003e\n\u003cp\u003eSo we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nF(x*,y*)=G(x*,y*) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, we will begin building the \u0026ldquo;slope\u0026rdquo; of this function to eliminate the independent variable wholesale\u0026mdash;by dividing:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{y}{x} = \\dv{y}{t} / \\dv{x}{t} = \\frac{G(x,y)}{F(x,y)}\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003ea divergence into epsilon delta proof\u003c/p\u003e\n\u003ch3 id=\"stable\"\u003estable\u003c/h3\u003e\n\u003cp\u003eA critical point is considered \u0026ldquo;stable\u0026rdquo; because, for each \\(\\epsilon \u0026gt;0\\), \\(\\exists \\delta \u0026gt;0\\), such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|x_0-x*| \u0026lt; \\delta \\implies |x(t)-x*| \u0026lt; \\epsilon\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"asymptotically-stable\"\u003easymptotically stable\u003c/h4\u003e\n\u003cp\u003eFor every trajectory that begins close to the critical point, it will end up at the critical point as time increases. That is, \\(\\exists \\delta \u0026gt;0\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|x-x*| \u0026lt; \\delta \\implies \\lim_{t \\to \\infty } x(t)=x*\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is essentially epsilon delta, but the limit traces out the entire process descending so the critical point is \u003ca href=\"#stable\"\u003estable\u003c/a\u003e through the whole descend.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnon_linear_systems/","tags":null,"title":"Non-Linear System"},{"categories":null,"contents":"kernel density estimation If your data is continuous, you can integrate over the entire dataset and normalize it to be able\n","html":"\u003ch2 id=\"kernel-density-estimation\"\u003ekernel density estimation\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-10_09-53-45_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eIf your data is \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e, you can integrate over the entire dataset and normalize it to be able\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnon_parametric_learning/","tags":null,"title":"non-parametric learning"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhnon_pathological_matricies/","tags":null,"title":"non-pathological matricies"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhnonsingular_matricies/","tags":null,"title":"nonsingular matricies"},{"categories":null,"contents":"The nonviolence movement a method of protest which is developed by Mahatma Ghandi and leveraged by Martin Luther King in the civil rights movement.\nThe idea is to achieve civil disobedience and allowing oneself to be punished so egregiously without inciting violence so at to elicit sympathy across the nation.\nThe civil rights movement leveraged training tactics and training to ensure its participants would be completely nonviolent and so elicit the correct response.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhnonviolence_movement/\"\u003enonviolence movement\u003c/a\u003e a method of protest which is developed by \u003ca href=\"/posts/kbhmahatma_ghandi/\"\u003eMahatma Ghandi\u003c/a\u003e and leveraged by \u003ca href=\"/posts/kbhmartin_luther_king/\"\u003eMartin Luther King\u003c/a\u003e in the \u003ca href=\"/posts/kbhcivil_rights/\"\u003ecivil rights movement\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThe idea is to achieve civil disobedience and allowing oneself to be punished so egregiously without inciting violence so at to elicit sympathy across the nation.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhcivil_rights/\"\u003ecivil rights movement\u003c/a\u003e leveraged training tactics and training to ensure its participants would be completely nonviolent and so elicit the correct response.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnonviolence_movement/","tags":null,"title":"nonviolence movement"},{"categories":null,"contents":"The norm is the \u0026ldquo;length\u0026rdquo; of a vector, defined generally using the inner product as:\n\\begin{equation} \\|v\\| = \\sqrt{\\langle v,v \\rangle} \\end{equation}\nadditional information properties of the norm \\(\\|v\\| = 0\\) IFF \\(v=0\\) \\(\\|\\lambda v\\| = |\\lambda|\\|v\\|\\) Proof:\nBy definition of an inner product, \\(\\langle v,v \\rangle = 0\\) only when \\(v=0\\) See algebra: \\begin{align} \\|\\lambda v\\|^{2} \u0026amp;= \\langle \\lambda v, \\lambda v \\rangle \\\\ \u0026amp;= \\lambda \\langle v, \\lambda v \\rangle \\\\ \u0026amp;= \\lambda \\bar{\\lambda} \\langle v,v \\rangle \\\\ \u0026amp;= |\\lambda |^{2} \\|v\\|^{2} \\end{align}\nmotivating the norm using actual numbers In linear algebra, the norm of a vector in a real vector space is defined as follows:\n\\begin{equation} \\| x\\| = \\sqrt{{{x_1}^{2} + \\dots + {x_n}^{2}}} \\end{equation}\nNote that, given the definition of dot product, \\(\\| x \\|^{2} = x \\cdot x\\).\nThe norm in complex vector space requires taking the absolute value (for \\(a+bi\\), \\(|a+bi| = \\sqrt{{a^{2}+b^{2}}}\\)) of each slot. That is, for Euclidean Inner Product spaces:\n\\begin{equation} \\|z\\| = \\sqrt{|z_1|^{2} + \\dots |z_{n}|^{2}} \\end{equation}\notherwise, simply squaring the complex number (giving us \\(a^{2}-b^{2}\\)) may very well yield negative numbers, which means we\u0026rsquo;d have an imaginary norm!\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e is the \u0026ldquo;length\u0026rdquo; of a vector, defined generally using the \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\|v\\| = \\sqrt{\\langle v,v \\rangle}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"properties-of-the-norm\"\u003eproperties of the norm\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003e\\(\\|v\\| = 0\\) IFF \\(v=0\\)\u003c/li\u003e\n\u003cli\u003e\\(\\|\\lambda v\\| = |\\lambda|\\|v\\|\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eBy definition of an inner product, \\(\\langle v,v \\rangle = 0\\) only when \\(v=0\\)\u003c/li\u003e\n\u003cli\u003eSee algebra:\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\\begin{align}\n\\|\\lambda v\\|^{2} \u0026amp;= \\langle \\lambda v, \\lambda v \\rangle \\\\\n\u0026amp;= \\lambda \\langle v, \\lambda v \\rangle \\\\\n\u0026amp;= \\lambda \\bar{\\lambda} \\langle v,v \\rangle \\\\\n\u0026amp;= |\\lambda |^{2} \\|v\\|^{2}\n\\end{align}\u003c/p\u003e\n\u003ch3 id=\"motivating-the-norm-using-actual-numbers\"\u003emotivating the norm using actual numbers\u003c/h3\u003e\n\u003cp\u003eIn linear algebra, the \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e of a \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e in a \u003ca href=\"/posts/kbhvector_space/#vector-space-over-fields\"\u003ereal vector space\u003c/a\u003e is defined as follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\| x\\| = \\sqrt{{{x_1}^{2} + \\dots + {x_n}^{2}}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNote that, given the definition of \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e, \\(\\| x \\|^{2} = x \\cdot x\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e in \u003ca href=\"/posts/kbhvector_space/#vector-space-over-fields\"\u003ecomplex vector space\u003c/a\u003e requires taking the absolute value (for \\(a+bi\\), \\(|a+bi| = \\sqrt{{a^{2}+b^{2}}}\\)) of each slot. That is, for \u003ca href=\"/posts/kbhinner_product/#euclidean-inner-product\"\u003eEuclidean Inner Product\u003c/a\u003e spaces:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\|z\\| = \\sqrt{|z_1|^{2} + \\dots |z_{n}|^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eotherwise, simply squaring the \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e (giving us \\(a^{2}-b^{2}\\)) may very well yield negative numbers, which means we\u0026rsquo;d have an imaginary norm!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnorm/","tags":null,"title":"norm"},{"categories":null,"contents":"See Gaussian distribution\n","html":"\u003cp\u003eSee \u003ca href=\"/posts/kbhprobability_distributions/#gaussian-distribution\"\u003eGaussian distribution\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnormal_distribution/","tags":null,"title":"normal distribution"},{"categories":null,"contents":"normal random variable is a continuous random variable that allows you to manually specify the expectation and variance\nconstituents \\(\\mu\\) the mean \\(\\sigma\\) the variance requirements \\begin{equation} X \\sim \\mathcal{N}(\\mu, \\sigma^{2}) \\end{equation}\nPDF:\n\\begin{equation} f(x) = \\frac{1}{\\sigma \\sqrt{2 \\pi}} e^{-\\frac{(x-\\mu)^{2}}{2 \\sigma^{2}}} \\end{equation}\nadditional information normal maximizes entropy no other random variable uses as little parameters to convey as much information\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhnormal_random_variable/\"\u003enormal random variable\u003c/a\u003e is a \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e that allows you to manually specify the expectation and variance\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\mu\\) the mean\u003c/li\u003e\n\u003cli\u003e\\(\\sigma\\) the variance\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nX \\sim \\mathcal{N}(\\mu, \\sigma^{2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\frac{1}{\\sigma \\sqrt{2 \\pi}} e^{-\\frac{(x-\\mu)^{2}}{2 \\sigma^{2}}}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"normal-maximizes-entropy\"\u003enormal maximizes entropy\u003c/h3\u003e\n\u003cp\u003eno other \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e uses as little \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es to convey as much information\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnormal_random_variable/","tags":null,"title":"normal random variable"},{"categories":null,"contents":"Foreword Hi there, internet traveler.\nThe time is 2015/2016, I was either in 5th or 6th grade. At that time, I was barely beginning to be actually comfortable using the language of English.\nOne of the ways I practiced English, which is also a habit I continue to do today, is to write. I write mostly expository prose now, but, back then, shining with childish naïvete, I decided to write a multi-part story as a means of practicing English.\nAt the time, I was fortunately supported by four very helpful adults\u0026mdash;ESL instructors, teachers from the local government\u0026rsquo;s ESL program, local students, family friends\u0026mdash;who have supported me and edited this silly story as a means of helping me better my command of English.\nIronically, this story is set in 2018, I think, two years after when I wrote it. Its now 2022, almost 7 years after. Make of that what you will.\nTherefore\u0026mdash;\nNorman, an epic tale told in N parts\nWritten by yours truly, Houjun Liu, circa 2016.\nEdited by: Lynne Zummo, Dorit Hahn, Susan Cole, and Jennifer Fan.\nTypesetted: May 10th, 2022. Menlo Park, California.\nPrologue: James Peter On a sunny day, in a small house at 1623 Wesson Ave, James lay on a dirty, tiny bed. Suddenly a dog was in James’ sight. James stood up, stared at the dog. It was a small, brown, white, fuzzy dog with a tiny stump. The dog walked around James’ bed, looking silly.\n“Let’s call you Norman! It is a good name for you!”\n“There is no dog allowed in my house, get’em out! RIGHT now, or I will get YOU out!” shouted Mr. Miller.\n“Dude,” a voice came from James’ mind. Mr. Miller, the owner of the Wacky Hair Salon, who is James’ uncle, barged into James’ room, continuously shouting.\n“Get’em out, RIGHT NOW! NOW! YOU HEAR ME?”\nJames, staring at Norman, just didn’t care.\nNorman seemed to not understand all this. He followed Mr. Miller to the window, and \u0026hellip; just as suddenly as he had come in, he was thrown out by Mr. Miller.\nWhile Norman was wandering around, James started crying.\nMonths passed…\nPart 1 On a cold winter afternoon, Mr. Miller is sending James to an orphanage as punishment for doing “bad” things. James just doesn’t understand this. He SIMPLY wants Norman to come back!\nWhen they arrive, James finally realizes why he didn’t have parents. The truth is dreadful: his dad went crazy from programming in binary code.\n“I will go crazy, too,” James thinks. “It is not an easy job, no sir.” His mom’s situation was even worse, for she was killed by the African disease Ebola.\nHe trudges into the front building with Dr. Brains, and sees children that had been starved, gone mad, and even had been wandering around hopelessly! Many questions flew into James’ mind: Will I go crazy, too? Will I be starving, too? Will I also be wandering around like a zombie?! Feeling scared, he starts to wander, feel hungry, and starve like the other kids ……\n“Wait ! NO, I can’t do that,” thought James.\nDr. Brains takes James to walk around the orphanage, he realizes it is actually a better place to be rather than 1623 Wesson Ave. He sees cats, he sees ducks, he sees horses, he sees a playground, and he sees…\nNORMAN!\nPart 2 Dr. Brains, who looks bewildered, is staring at him.\n“How can you know him? You just arrived here!”.\n“Long story…,” explains James. “I once met the dog, and he was thrown out by Mr. Miller from where I used to live.”\n“So, this is PART of our orphanage. As you see, it is big. We now should thank the donor, who passed away, Dr. James Rover Peter…” There is a little pause, then Dr. Brains continues.\n“Who is YOUR dad!”\nThey continue walking until they get to a building labeled ‘EDU_K-4’.\n“This is the K-4 grade educational multifunctional building,” explains Dr. Brains, “where you will be staying for half a year. Then you will move to this building for study.”\nDr. Brains is now pointing at a building labeled ‘EX-EDU_5-12’.\nThey continue to walk until they get to another building labeled ‘OPH_LV #20312’. It is a small, lovely building, much like an apartment. “This is where you will live, in room # 20312_004,” says Dr. Brains while he hands James a key. Then he gives him a packet, which reads: Vanilla orphanage grand rules and schedule.\n“This is all you need, good day! I will leave you here.”\nJames watches Dr. Brains until he is out of sight.\nHe walks straight into the room. It looks clean, neat, like a 3-star hotel. There is a twin size bed, a desk, and a restroom. He sits down and starts to read the packet:\nChapter 1: Grand rules Welcome to Vanilla Orphanage! This is a place where you can enjoy yourself, explore yourself, and get prepared for the world!\nBut, there MUST be some rules in our orphanage to keep you and your classmates safe.\nFirst of all, you MUST not run in the front building.\nSecond, no talking is allowed while a grand meeting is taking place (see chapter 2 for more info).\nThird, follow your schedule all the time.\nFourth, if you have an emergency, use the emergency call phone (You don’t need to dial it, it will automatically connect to Vanilla Orphanage Hospital). But if you can walk and speak normally, go to Vanilla Orphanage Hospital for more help.\nFifth, the use of a regular telephone is only allowed three times a day. If your teacher calls you, it won’t count. You can only use a regular telephone for calling inside-the-orphanage friends, no outside call is allowed. To see the interior telephone numbers, see chapter 3.\nChapter 2: Grand schedule + your personal schedule Grand schedule\nYour personal schedule\nMeet me every OTHER Sunday at 15:00 at grand office starting 1/2/2019.\nGrand meeting will take place every first day of the month at the big hall in the front building. Everyone will attend the grand meeting; it lasts the whole day.\nDinner, Lunch, and Breakfast will be served at Front Building.\nDr. Flynn (k-4 Sciences) 4242-5000-2525 Dr. Jones (5-12 Sciences) 2134-1000-1045 Dr. Foster (k-4 Math) 2456-6206-6200 Ms. Garcia (5-12 Math) 1341-4000-4012 Mrs. Newman (k-4 Talk-it-out assistant) 2563-6374-7407 Mrs. Willems (5-12 Talk-it-out assistant) 8908-6997-9000 Dr. Brains (Headmaster) 2563-0035-3526\nPart 3 A brief day, he does whatever he is told, follows the schedule, does the work. But, something that amazes James is that the food is actually YUMMY.\nHe does enjoy eating at vanilla orphanage. Normally, it is like a buffet, but a limited one. You only can have one serving of meat, 2 vegetables, a delicious main dish (e.g. cooked rice, cooked noodles …).\nBy the table, James sees students laugh at each other, talk with each other, and, from far away, he sees a little brown-white puppy is running to a girl with curly hair, and stops.\nNorman!!!!!!!!!!!!!\nIt is funny that the girl asks exactly the same thing as Dr. Brains asked:” How can you know him?” He explains the whole story why he knows Norman and asks his very own and very first question to the very first student he meets at the vanilla orphanage: “How did he get here?”.\n“Long story,” says the girl, “he first arrived here because of our save the dogs project, Calvin and I found him.”\n“And who are you? I’m James”.\n“Sorry, I forgot about that, my name is Amelia!”.\nA tall, black haired student comes and joins them. “Hi there, what’s up? I heard someone mention my name.”\n“Oh, we were just talking about the dog. Our new friend, James, gave him a name: Norman,” responds Amelia.\n“Guess what?” asked Calvin, “I taught him Chinese!”.\n“Oh interesting, show us!” says Amelia.\n“狗儿,请来一下; I told him to come.” Suddenly, Norman comes and starts running around Calvin. “你的名字叫做 Norman; I told him that his name is Norman,” says Calvin. The dog starts moving around in a funny way, which James feels weird about. “Oh, don’t worry about that, that’s the Funny-Brown-Hula-Stump-Wiggle-Wag-Dance that I taught him,” Says Amelia.\nPart 4 Dong, dong, dong, dong…… The school bell rings, everyone gets up to do everything they need. It’s Sunday. According to Dr. Brains, James needs to meet Dr. Brains at the grand office.\nWhen he arrives, Dr. Brains says nothing but a greeting, and he hands James a slip of paper that says:\nThe organization of Brainiacs: 52345 Brainful way, North town, CA 94780\n“What is the org…”. “Stop! I will explain everything right after!” explains Dr. Brains. “Just remember what this parchment says!”\nHe hands him a telephone, and says, “Dial 52325, when you hear a beep, dial 900. Answer every question it’s asking you.”\nHe does what he is told, then a girl’s voice says: “Welcome to the new member registration center of TOFB, or the Organization of Brainiacs. Please answer the question: What is your address? “James states the orphanage’s address. “What is your reason to join?” Dr. Brains says quietly,” Invited.”\nWho invited you?” James answers:”Dr. Brains.” “ Welcome, again, new member. Please take the blood needle that appears in front of you and use it to poke your left ring finger.” James does this, and the voice says, ”Thanks for joining! Please hang up the phone!”\n“Understand this?” Dr. Brains says, ”Let’s go!”. “But, go where?” asks James. ”T-O-F-B,” replies Dr. Brains. They walk straight into a box, where James spots a device. Dr. Brains pushes a button on the device, and suddenly, James feels dizzy. They are spinning. They spin faster and faster. Finally, he hears a pop, then, suddenly, he falls into another device which is like a poison chamber. He and Dr. Brains open the door, and he sees a small, transparent house that reads T-O-F-B.\nPart 5 They walk straight into the house, and see a small elevator that is made out of glass. While they walk into the elevator, James feels something is seriously wrong. First, this is a one-story building, and unlike the 5th avenue apple retail store, it has no underground floor. Second, the elevator has NO button, how can Dr. Brains go anywhere with this elevator?\nDr. Brains seems solemn, he carefully looks at the emergency speaker, then, suddenly, James hears a loud CRACK. Then the elevator starts getting darker and darker. After 5 seconds, it is not transparent anymore.\nThe elevator starts to go down deeper and deeper. Then a screen pops up.”Hello, WELCOME to the Organization of Brainiacs. Please scan your card…” says a voice. He doesn’t have a card!! He looks around to find Dr. Brains, but, he is gone!\n“Where else can he be?”James thinks,”there is no way out!”. Suddenly, smoke fills the elevator, James first doesn’t realize what it is, but suddenly, he knows it.”Oh oh!” thinks James, ”IT IS GAS!!!!!”\nChapter 2: T-O-F-B Way underground, Dr. Brains hesitates. “OOOOOOOOOOPS! I forgot James in the elevator..” , he thinks, ”and the killer gas X03-00 would be deadly.”\nHe rushes to the “hacker center”, and shouts, ”You guys! STOP the elevator! And STOP the gas! Open the doors! Clear out the gas! He is NOT a criminal!”\nEverybody freezes, and some whisper, ”Oops, x03-00 gas can knock a human out in 10 seconds”.\nPart 6 Back in the elevator, James barely has time to call the emergency. “Does Dr. Brains mean to kill me?” he thinks, ”or is this a test for me?” He has more things to worry about than that. However, the good news is that Dr.Brains and his team hurry to the elevator just in time, which is when he gets knocked off. They give him the medicine that will neutralize the effect of the gas, and then they hurry to prepare the WELCOME event of the new T-O-F-B members in this season.\nSoon after, James wakes up, safe and sound. Dr. Brains is right by him.\n“Sorry for the accident, but here, welcome to T-O-F-B”, Dr. Brains says with a little smile.\nThere is a little awkward moment when he and Dr. Brains both try to say something, but no sounds come out. It doesn’t last long, just a few seconds. Then Dr. Brains continues, ”The Organization of Brainiacs is a little like what you see in the movie M-I-B. We basically are the only legal group in human and alien law that can meet, communicate with, and study the aliens from outer universe. You know one of our aliens: Norman. He actually can speak Hidoeneese AND English.”\n“But what is Hidoeneese?” James asks.\n“Hidoeneese is the language of the Hidonipothan.” Dr.Brains says.\n“And what is Hidonipothan?” he asks, again.\n“Long story short, it’s kind of an alien tribe. Later at breakfast, Norman will explain. By the way, he likes his name Norman.” Dr. Brains responds.\n“What? Breakfast? It’s already morning?” James asks.\n“Well yes, you have been knocked out by gas for almost 12 hours, and now it is 6:00 in the morning,” Dr. Brains says, ”you still can get about 3 hours rest. Everyone in T-O-F-B sleeps late and wakes up late. And one last thing, I will give you the NEW MEMBER #04 packet so you can learn more about T-O-F-B.”\nHe hands him a packet, just like the packet in the orphanage. But it is hand written.\nWelcome, new member, we are proud that you are here. As the founder of T-O-F-B, I will introduce you to the few basics of daily life.\nFirst, you all have an outside “job”, which you will still perform. Since you are a child, AS I KNOW, we will just keep you up-to-date and call you via the headphones that we will give you. We won’t interrupt your class, unless it is an emergency, I promise. You will be meeting once a month so it won’t affect any of your grades.\nSecond, in T-O-F-B, we treat any child like an adult. It means a large work load, but you can also access any part of our centre freely with your BNPS. But in some areas, we want you to have adult supervision.\nYour supervisor is:\nGrave Hono ( Dr. Brains, as a substitute name in the human world)\nWe will give you a map and what you should do later.\nDr. Ranboro\n9/23/2018\nPart 7 He falls asleep……… He dreams about aliens attacking the centre, and only Dr. Brains, Dr. Ranboro, Norman, Amelia, Calvin and a guy who he didn’t know survived. He thinks it’s just a dream, but what he doesn’t know is, this day is coming closer and closer.\n“Wakey, Wakey!” Dr. Brains shouts, laughing” JJJJJJJJAAAAAAMMMMMMMEEEEEEEESSSSSSSSSSSS!!!!!!!!!!!!”. James finally wakes up, and mumbles, ”What the heck in the world was this?”\nDr. Brains seems to be confused. “You didn’t recognize my voice? Wake up, Buddy! Get dressed! The welcoming party is waiting!!”.\nHe gets dressed, hurries to follow Dr. Brains, and they go outside to a “secretive” room that is labeled “G—CHECK, BNPS ROOM”. They go in, and he sees a bunch of devices that are new to him. He sits down, just as Dr. Brains ordered, and Dr. Brains brings a needle to his face, straight into his eyes. “Watch out!” James shouts. He doesn’t even have time to think, as the needle goes in and out of his eyes. Dr. Brains says, “Good, we already got the DNA, scanned the iris, scanned the brain map. Ok, 2 last things, then we are good to go!” He does a bunch of scans on James’ finger, and he enters a password into a machine. “Ok, one last thing. Print your BNPS and tattoo it to your shoulder!” Dr. Brains says. The machine reads “bring human to the tattoo station …… step 3/5”. Dr. Brains orders him to put his shoulder into a cylinder. He feels a little pressure and his shoulder pops out of the machine. He sees a little piece of metal on his shoulder and it reads ”TOFB.1029358612.JP/////////” The machine also prints out a metal card. “Don’t lose it!” Dr. Brains says, “it is your ID here!”\nThey walk out of the room and into the elevator. It is an elevator like the one in the TOFB’s entrance. The one that changes color and transparency, only much more slowly. When it tells him to scan the card, he knows better than to not do so. The elevator seems smart, and it asks “Homo, and James! Morning! Which level area do you want to go to?”. Dr. Brains responds, “Dining room number three, formal, both of us”. The elevator responds with a “TOFB wishes you a pleasant day!” When the door opens again, they enter a large area, like the first level of a 5 star hotel. Everything is white: people’s clothes, the ground, the staircase, the light, etc,. He sees Dr.Brains’ clothes change to white! He says, “Dr.Brains! Your outfit changed color!” “Yours did, too!” Dr. Brains responds. James looks down at his clothes. His had actually, as Dr. Brains said, changed color and texture.\nThey eat their breakfast—salmon, soup and broccoli, and Dr. Brains announces to him, “OK, now let’s do some work stuff”. They head back to the living area, and they wash themselves. Then they head to the meeting area. Norman, Dr. Ranboro and the other guy James sees in his dream are waving to James-and-Dr.Brains-in-the-black-suit-and-a-tie.\nPart 8 “So”, Dr. Ranboro says, “Welcome! Thank you for joining the organiza………?!!!\u0026gt;?*\u0026amp;%*\u0026amp;^%\u0026amp;^%%∆˙ßå˚µß∂˙”. FFF! A small arrow flies though the walls and hits Dr. Ranboro, making his words into nonsense. “å∆∆߬—å˚å!!!!!!¡¡¡¡¡¡?¿!¡……Jams…….main sq……is com…..tel..hom……nnnoor…….¡¡¡!!!???¿¿¿å∂ß˚˚˚˚∆ƒå˙”, he says. James can barely understand, but he knows one thing, they will tell him about the main sq…whatsoever.\n“Let’s jump into the topic,” Norman says. “The main sq… is actually an attack called The Main Square Rattle, or what we call TMSR. It’s started because another kind of alien, The Froakan, wants to use humans as slaves, own the TOFB AND the Hidonipothan.The only way to stop that is to get the battle-rattler and rattle it. But if The F’s got the rattler and rattle it, well, we will all freeze and do what they want, like a bunch of zombies. The state of being a zombie is called ratling. Sadly, there isn’t a known cure yet for ratle. But Dr. Brains is working on it! Lastly, the battle-rattler is locked in the Ratle Mountains. And the only way to enter the Ratle Mountain is by using Dr. Ranboro’s key. Otherwise, you will have very little chance to get out alive! And that’s why they shot Dr. Ranboro. As a matter of fact, the arrow is poisonous. If we don’t send him to hospital now, he will become a baby in 72 hours.”\nTalking about Dr. Ranboro, James notices Dr. Ranboro’s hair getting darker and darker from the old-man-white. They send him to the hospital about 5 minutes later.\nChapter 3:That’s called war After another ride on the “TOTP—0111”, which is the “squeeze box” to get to the North Town, they are back at Dr. Brains’ office. But something weird has happened, only students in OPH_LV # 40000 - OPH_LV # 49999 are still in the orphanage. Dr. Brains tries to find out why, but he can’t. And that’s when all of the humans in the orphanage hear a gigantic laugh coming from nowhere.”HHHAAAHHHAAAAHAHHAAA! This is your day, Homo, your death ceremony!HHAAHAHA!”\nMonths passed again……..\nPart 9 The daily live is almost the same as before, just that a part of the students in the orphanage is missing. But live is still very simple. Tasty food, friendly teachers, and visits to TOFB every other week.\nOne day, James is in his math class.\n“So when 2 is raised to the……”\n“Beep! Beep!”\nHis secret headphones from TOFB send a message request to him.\n“Beep! Beep!”\n“Didn’t that Ranboro guy say they won’t interrupt our classes?” James thought.\n“And let’s do some prob….”\n“Beep! Beep!”\nJames requests a bathroom break and answers the headphones in the boys’ restroom.\n“It’s an emergency!!! The Froakans are getting closer to the rattler!!! Help!!!! James, take Homo and get here now!” Norman cries.\nAs fast as he can, he rushes to Dr. Brains’ office, grabs Dr. Brains and locks him and himself into the TOTB-0111.\nAnd as fast as lightning, they are here, in the North town.\nThey rush into the elevator, he swipes his and Dr. Brains’ card and rushes to Dr. Ranboro’s office.\n“Quick! They will rattle it in like…like 20 minutes and we will all ratle!!!”, Norman hollers.\nAnd again, as fast as lightning, they get war-dressed and get into the fastest transport system in TOFB.\nAs James looks down, he is wearing a strong iron chest plate that reads ’T-O-F-B///////The Smarter one’. And on his shoulder, there is a cord which extends from his Digital ID to the chest plate. There is a screen in his chest plate that is unbreakable. There is a soft protection layer, then there is a swimming layer, then the pressure layer, an iron pad, an air supply on his side if the enemy spreads poisonous gas, and an armor on the outside.\nAmazingly, these things only weigh 1 pound and fit perfectly.\nHe is war-trained, so he knows exactly what to do with this fancy outfit. The screen is the main control, the outfit will detect the environment and change to the perfect layer.\nUploaded ate 10/25/2015 [sic.]\nPart 10 The ride seems to be long, but it’s actually only 5 minutes. They will enter the Ratle Mountains from the North End, which is the second-safest route into the mountains without Dr. Ranboro’s key.\nAnd there they are, in the Ratle Mountains. They are led by Mr. Giose, who was the other guy in his dream when he came to the TOFB the first time. The other four warriors are Norman, Dr. Ranboro, Dr. Brains and James. The first 20 miles are short and boring. Nothing happens. But after the 29th mile mark, they enter a cave.\nThe cave is dark. There are only few lights flashing. They are not worried, until they hear a scream.\n“OOOOOOO! Eeeek!”\n“Ahhhhhhhhhhhh! ZZZ! ZZZ…ZZZ..ZZZ…ZZZ…ZZZ…zzz…” The voice is getting smaller and smaller.\n“It is the sleeping spider! It will knock out a human in NO time!” Mr. Giose shouts.\nJames and the whole crew know what to do. They press a few buttons on their screens, and their helmets of their armor dissolve into the air. What is left behind, is the air filtering system.\n“Three! Two! One! It’s gas,” Norman says, playfully.\nDr. Brains spreads out the SSG gas, which will, hopefully, knock out the sleeping spider.\nThat wastes a LOT of time. Before they know it, they all starts to ratle.\nIt is James who feels it first. He feels extremely and uncontrollably happy. He starts running around and talking to other people in a rude way. To himself, it feels like as if he is drifting into unconsciousness.\nThen the same happens to Dr. Brains, and then Norman, followed by Mr. Giose. Luckily, Dr. Ranboro called the TOFB’s team 2 to come for help before he changes, too.\nI never knew what happened after this incident until the year of 2021. Since James was ratling, he couldn’t remember the whole year of 2020. He recovered on the day of 10/26/2021. Dr. Foster, who works at the orphanage AND at TOFB found a cure using Chinese Herbal Tea.\nWell, let’s jump into the time machine. Backward to 2014!!\nChapter 4:Childhood We jump into the time machine, and swoosh. Here we are, in the year of 2014. We are standing in front of 1623 Wesson Ave. It is a sunny day. The Peters are getting ready for a trip to Africa. James greets his uncle, who will look after the house while they are gone. Mrs. Peter is packing hastily. And Mr. Peter is bringing his computer, because, weirdly, he is starting to like CODING in BINARY CODE. Nothing more to say, so here the story goes.\nPart 11 “Hurrrrrryyyy!” Mr. Peter shouts. “Or else we will be late for the plane!”\nThe Peters hurry to the bus stand, waiting for the airport express.\nAfter about an hour ride, they finally arrive at the San Francisco International Airport.\nThey check in. And they hurry to the security check. At the security check, Mrs. Peter thinks she forgot something. Yes, she forgot to bring ANY medication for the disease Ebola.\n","html":"\u003ch2 id=\"foreword\"\u003eForeword\u003c/h2\u003e\n\u003cp\u003eHi there, internet traveler.\u003c/p\u003e\n\u003cp\u003eThe time is 2015/2016, I was either in 5th or 6th grade. At that time, I was barely beginning to be actually comfortable using the language of English.\u003c/p\u003e\n\u003cp\u003eOne of the ways I practiced English, which is also a habit I continue to do today, is to write. I write mostly expository prose now, but, back then, shining with childish naïvete, I decided to write a multi-part story as a means of practicing English.\u003c/p\u003e\n\u003cp\u003eAt the time, I was fortunately supported by four very helpful adults\u0026mdash;ESL instructors, teachers from the local government\u0026rsquo;s ESL program, local students, family friends\u0026mdash;who have supported me and edited this silly story as a means of helping me better my command of English.\u003c/p\u003e\n\u003cp\u003eIronically, this story is set in 2018, I think, two years after when I wrote it. Its now 2022, almost 7 years after. Make of that what you will.\u003c/p\u003e\n\u003cp\u003eTherefore\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eNorman, an epic tale told in N parts\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eWritten by yours truly, Houjun Liu, circa 2016.\u003c/p\u003e\n\u003cp\u003eEdited by: Lynne Zummo, Dorit Hahn, Susan Cole, and Jennifer Fan.\u003c/p\u003e\n\u003cp\u003eTypesetted: May 10th, 2022. Menlo Park, California.\u003c/p\u003e\n\u003ch2 id=\"prologue-james-peter\"\u003ePrologue: James Peter\u003c/h2\u003e\n\u003cp\u003eOn a sunny day, in a small house at 1623 Wesson Ave, James lay on a dirty, tiny bed. Suddenly a dog was in James’ sight. James stood up, stared at the dog. It was a small, brown, white, fuzzy dog with a tiny stump. The dog walked around James’ bed, looking silly.\u003c/p\u003e\n\u003cp\u003e“Let’s call you Norman! It is a good name for you!”\u003c/p\u003e\n\u003cp\u003e“There is no dog allowed in my house, get’em out! RIGHT now, or I will get YOU out!” shouted Mr. Miller.\u003c/p\u003e\n\u003cp\u003e“Dude,” a voice came from James’ mind. Mr. Miller, the owner of the Wacky Hair Salon, who is James’ uncle, barged into James’ room, continuously shouting.\u003c/p\u003e\n\u003cp\u003e“Get’em out, RIGHT NOW! NOW! YOU HEAR ME?”\u003c/p\u003e\n\u003cp\u003eJames, staring at Norman, just didn’t care.\u003c/p\u003e\n\u003cp\u003eNorman seemed to not understand all this. He followed Mr. Miller to the window, and \u0026hellip; just as suddenly as he had come in, he was thrown out by Mr. Miller.\u003c/p\u003e\n\u003cp\u003eWhile Norman was wandering around, James started crying.\u003c/p\u003e\n\u003cp\u003eMonths passed…\u003c/p\u003e\n\u003ch3 id=\"part-1\"\u003ePart 1\u003c/h3\u003e\n\u003cp\u003eOn a cold winter afternoon, Mr. Miller is sending James to an orphanage as punishment for doing “bad” things. James just doesn’t understand this. He SIMPLY wants Norman to come back!\u003c/p\u003e\n\u003cp\u003eWhen they arrive, James finally realizes why he didn’t have parents. The truth is dreadful: his dad went crazy from programming in binary code.\u003c/p\u003e\n\u003cp\u003e“I will go crazy, too,” James thinks. “It is not an easy job, no sir.” His mom’s situation was even worse, for she was killed by the African disease Ebola.\u003c/p\u003e\n\u003cp\u003eHe trudges into the front building with Dr. Brains, and sees children that had been starved, gone mad, and even had been wandering around hopelessly! Many questions flew into James’ mind: Will I go crazy, too? Will I be starving, too? Will I also be wandering around like a zombie?! Feeling scared, he starts to wander, feel hungry, and starve like the other kids ……\u003c/p\u003e\n\u003cp\u003e“Wait ! NO, I can’t do that,” thought James.\u003c/p\u003e\n\u003cp\u003eDr. Brains takes James to walk around the orphanage, he realizes it is actually a better place to be rather than 1623 Wesson Ave. He sees cats, he sees ducks, he sees horses, he sees a playground, and he sees…\u003c/p\u003e\n\u003cp\u003eNORMAN!\u003c/p\u003e\n\u003ch3 id=\"part-2\"\u003ePart 2\u003c/h3\u003e\n\u003cp\u003eDr. Brains, who looks bewildered, is staring at him.\u003c/p\u003e\n\u003cp\u003e“How can you know him? You just arrived here!”.\u003c/p\u003e\n\u003cp\u003e“Long story…,” explains James. “I once met the dog, and he was thrown out by Mr. Miller from where I used to live.”\u003c/p\u003e\n\u003cp\u003e“So, this is PART of our orphanage. As you see, it is big. We now should thank the donor, who passed away, Dr. James Rover Peter…” There is a little pause, then Dr. Brains continues.\u003c/p\u003e\n\u003cp\u003e“Who is YOUR dad!”\u003c/p\u003e\n\u003cp\u003eThey continue walking until they get to a building labeled ‘EDU_K-4’.\u003c/p\u003e\n\u003cp\u003e“This is the K-4 grade educational multifunctional building,” explains Dr. Brains, “where you will be staying for half a year. Then you will move to this building for study.”\u003c/p\u003e\n\u003cp\u003eDr. Brains is now pointing at a building labeled ‘EX-EDU_5-12’.\u003c/p\u003e\n\u003cp\u003eThey continue to walk until they get to another building labeled ‘OPH_LV #20312’. It is a small, lovely building, much like an apartment. “This is where you will live, in room # 20312_004,” says Dr. Brains while he hands James a key. Then he gives him a packet, which reads: Vanilla orphanage grand rules and schedule.\u003c/p\u003e\n\u003cp\u003e“This is all you need, good day! I will leave you here.”\u003c/p\u003e\n\u003cp\u003eJames watches Dr. Brains until he is out of sight.\u003c/p\u003e\n\u003cp\u003eHe walks straight into the room. It looks clean, neat, like a 3-star hotel. There is a twin size bed, a desk, and a restroom. He sits down and starts to read the packet:\u003c/p\u003e\n\u003ch2 id=\"chapter-1-grand-rules\"\u003eChapter 1: Grand rules\u003c/h2\u003e\n\u003cp\u003eWelcome to Vanilla Orphanage! This is a place where you can enjoy yourself, explore yourself, and get prepared for the world!\u003c/p\u003e\n\u003cp\u003eBut, there MUST be some rules in our orphanage to keep you and your classmates safe.\u003c/p\u003e\n\u003cp\u003eFirst of all, you MUST not run in the front building.\u003c/p\u003e\n\u003cp\u003eSecond, no talking is allowed while a grand meeting is taking place (see chapter 2 for more info).\u003c/p\u003e\n\u003cp\u003eThird, follow your schedule all the time.\u003c/p\u003e\n\u003cp\u003eFourth, if you have an emergency, use the emergency call phone (You don’t need to dial it, it will automatically connect to Vanilla Orphanage Hospital). But if you can walk and speak normally, go to Vanilla Orphanage Hospital for more help.\u003c/p\u003e\n\u003cp\u003eFifth, the use of a regular telephone is only allowed three times a day. If your teacher calls you, it won’t count. You can only use a regular telephone for calling inside-the-orphanage friends, no outside call is allowed. To see the interior telephone numbers, see chapter 3.\u003c/p\u003e\n\u003ch2 id=\"chapter-2-grand-schedule-plus-your-personal-schedule\"\u003eChapter 2: Grand schedule + your personal schedule\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eGrand schedule\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eYour personal schedule\u003c/p\u003e\n\u003cp\u003eMeet me every OTHER Sunday at 15:00 at grand office starting 1/2/2019.\u003c/p\u003e\n\u003cp\u003eGrand meeting will take place every first day of the month at the big hall in the front building. Everyone will attend the grand meeting; it lasts the whole day.\u003c/p\u003e\n\u003cp\u003eDinner, Lunch, and Breakfast will be served at Front Building.\u003c/p\u003e\n\u003cp\u003eDr. Flynn (k-4 Sciences)\n4242-5000-2525\nDr. Jones (5-12 Sciences)\n2134-1000-1045\nDr. Foster (k-4 Math)\n2456-6206-6200\nMs. Garcia (5-12 Math)\n1341-4000-4012\nMrs. Newman (k-4 Talk-it-out assistant)\n2563-6374-7407\nMrs. Willems (5-12 Talk-it-out assistant)\n8908-6997-9000\nDr. Brains (Headmaster)\n2563-0035-3526\u003c/p\u003e\n\u003ch3 id=\"part-3\"\u003ePart 3\u003c/h3\u003e\n\u003cp\u003eA brief day, he does whatever he is told, follows the schedule, does the work. But, something that amazes James is that the food is actually YUMMY.\u003c/p\u003e\n\u003cp\u003eHe does enjoy eating at vanilla orphanage. Normally, it is like a buffet, but a limited one. You only can have one serving of meat, 2 vegetables, a delicious main dish (e.g. cooked rice, cooked noodles …).\u003c/p\u003e\n\u003cp\u003eBy the table, James sees students laugh at each other, talk with each other, and, from far away, he sees a little brown-white puppy is running to a girl with curly hair, and stops.\u003c/p\u003e\n\u003cp\u003eNorman!!!!!!!!!!!!!\u003c/p\u003e\n\u003cp\u003eIt is funny that the girl asks exactly the same thing as Dr. Brains asked:” How can you know him?” He explains the whole story why he knows Norman and asks his very own and very first question to the very first student he meets at the vanilla orphanage: “How did he get here?”.\u003c/p\u003e\n\u003cp\u003e“Long story,” says the girl, “he first arrived here because of our save the dogs project, Calvin and I found him.”\u003c/p\u003e\n\u003cp\u003e“And who are you? I’m James”.\u003c/p\u003e\n\u003cp\u003e“Sorry, I forgot about that, my name is Amelia!”.\u003c/p\u003e\n\u003cp\u003eA tall, black haired student comes and joins them. “Hi there, what’s up? I heard someone mention my name.”\u003c/p\u003e\n\u003cp\u003e“Oh, we were just talking about the dog. Our new friend, James, gave him a name: Norman,” responds Amelia.\u003c/p\u003e\n\u003cp\u003e“Guess what?” asked Calvin, “I taught him Chinese!”.\u003c/p\u003e\n\u003cp\u003e“Oh interesting, show us!” says Amelia.\u003c/p\u003e\n\u003cp\u003e“狗儿,请来一下; I told him to come.” Suddenly, Norman comes and starts running around Calvin. “你的名字叫做 Norman; I told him that his name is Norman,” says Calvin. The dog starts moving around in a funny way, which James feels weird about. “Oh, don’t worry about that, that’s the Funny-Brown-Hula-Stump-Wiggle-Wag-Dance that I taught him,” Says Amelia.\u003c/p\u003e\n\u003ch3 id=\"part-4\"\u003ePart 4\u003c/h3\u003e\n\u003cp\u003eDong, dong, dong, dong…… The school bell rings, everyone gets up to do everything they need. It’s Sunday. According to Dr. Brains, James needs to meet Dr. Brains at the grand office.\u003c/p\u003e\n\u003cp\u003eWhen he arrives, Dr. Brains says nothing but a greeting, and he hands James a slip of paper that says:\u003c/p\u003e\n\u003cp\u003eThe organization of Brainiacs: 52345 Brainful way, North town, CA 94780\u003c/p\u003e\n\u003cp\u003e“What is the org…”. “Stop! I will explain everything right after!” explains Dr. Brains. “Just remember what this parchment says!”\u003c/p\u003e\n\u003cp\u003eHe hands him a telephone, and says, “Dial 52325, when you hear a beep, dial 900. Answer every question it’s asking you.”\u003c/p\u003e\n\u003cp\u003eHe does what he is told, then a girl’s voice says: “Welcome to the new member registration center of TOFB, or the Organization of Brainiacs. Please answer the question: What is your address? “James states the orphanage’s address. “What is your reason to join?” Dr. Brains says quietly,” Invited.”\u003c/p\u003e\n\u003cp\u003eWho invited you?” James answers:”Dr. Brains.” “ Welcome, again, new member. Please take the blood needle that appears in front of you and use it to poke your left ring finger.” James does this, and the voice says, ”Thanks for joining! Please hang up the phone!”\u003c/p\u003e\n\u003cp\u003e“Understand this?” Dr. Brains says, ”Let’s go!”. “But, go where?” asks James. ”T-O-F-B,” replies Dr. Brains. They walk straight into a box, where James spots a device. Dr. Brains pushes a button on the device, and suddenly, James feels dizzy. They are spinning. They spin faster and faster. Finally, he hears a pop, then, suddenly, he falls into another device which is like a poison chamber. He and Dr. Brains open the door, and he sees a small, transparent house that reads T-O-F-B.\u003c/p\u003e\n\u003ch3 id=\"part-5\"\u003ePart 5\u003c/h3\u003e\n\u003cp\u003eThey walk straight into the house, and see a small elevator that is made out of glass. While they walk into the elevator, James feels something is seriously wrong. First, this is a one-story building, and unlike the 5th avenue apple retail store, it has no underground floor. Second, the elevator has NO button, how can Dr. Brains go anywhere with this elevator?\u003c/p\u003e\n\u003cp\u003eDr. Brains seems solemn, he carefully looks at the emergency speaker, then, suddenly, James hears a loud CRACK. Then the elevator starts getting darker and darker. After 5 seconds, it is not transparent anymore.\u003c/p\u003e\n\u003cp\u003eThe elevator starts to go down deeper and deeper. Then a screen pops up.”Hello, WELCOME to the Organization of Brainiacs. Please scan your card…” says a voice. He doesn’t have a card!! He looks around to find Dr. Brains, but, he is gone!\u003c/p\u003e\n\u003cp\u003e“Where else can he be?”James thinks,”there is no way out!”. Suddenly, smoke fills the elevator, James first doesn’t realize what it is, but suddenly, he knows it.”Oh oh!” thinks James, ”IT IS GAS!!!!!”\u003c/p\u003e\n\u003ch2 id=\"chapter-2-t-o-f-b\"\u003eChapter 2: T-O-F-B\u003c/h2\u003e\n\u003cp\u003eWay underground, Dr. Brains hesitates. “OOOOOOOOOOPS! I forgot James in the elevator..” , he thinks, ”and the killer gas X03-00 would be deadly.”\u003c/p\u003e\n\u003cp\u003eHe rushes to the “hacker center”, and shouts, ”You guys! STOP the elevator! And STOP the gas! Open the doors! Clear out the gas! He is NOT a criminal!”\u003c/p\u003e\n\u003cp\u003eEverybody freezes, and some whisper, ”Oops, x03-00 gas can knock a human out in 10 seconds”.\u003c/p\u003e\n\u003ch3 id=\"part-6\"\u003ePart 6\u003c/h3\u003e\n\u003cp\u003eBack in the elevator, James barely has time to call the emergency. “Does Dr. Brains mean to kill me?” he thinks, ”or is this a test for me?” He has more things to worry about than that. However, the good news is that Dr.Brains and his team hurry to the elevator just in time, which is when he gets knocked off. They give him the medicine that will neutralize the effect of the gas, and then they hurry to prepare the WELCOME event of the new T-O-F-B members in this season.\u003c/p\u003e\n\u003cp\u003eSoon after, James wakes up, safe and sound. Dr. Brains is right by him.\u003c/p\u003e\n\u003cp\u003e“Sorry for the accident, but here, welcome to T-O-F-B”, Dr. Brains says with a little smile.\u003c/p\u003e\n\u003cp\u003eThere is a little awkward moment when he and Dr. Brains both try to say something, but no sounds come out. It doesn’t last long, just a few seconds. Then Dr. Brains continues, ”The Organization of Brainiacs is a little like what you see in the movie M-I-B. We basically are the only legal group in human and alien law that can meet, communicate with, and study the aliens from outer universe. You know one of our aliens: Norman. He actually can speak Hidoeneese AND English.”\u003c/p\u003e\n\u003cp\u003e“But what is Hidoeneese?” James asks.\u003c/p\u003e\n\u003cp\u003e“Hidoeneese is the language of the Hidonipothan.” Dr.Brains says.\u003c/p\u003e\n\u003cp\u003e“And what is Hidonipothan?” he asks, again.\u003c/p\u003e\n\u003cp\u003e“Long story short, it’s kind of an alien tribe. Later at breakfast, Norman will explain. By the way, he likes his name Norman.” Dr. Brains responds.\u003c/p\u003e\n\u003cp\u003e“What? Breakfast? It’s already morning?” James asks.\u003c/p\u003e\n\u003cp\u003e“Well yes, you have been knocked out by gas for almost 12 hours, and now it is 6:00 in the morning,” Dr. Brains says, ”you still can get about 3 hours rest. Everyone in T-O-F-B sleeps late and wakes up late. And one last thing, I will give you the NEW MEMBER #04 packet so you can learn more about T-O-F-B.”\u003c/p\u003e\n\u003cp\u003eHe hands him a packet, just like the packet in the orphanage. But it is hand written.\u003c/p\u003e\n\u003cp\u003eWelcome, new member, we are proud that you are here. As the founder of T-O-F-B, I will introduce you to the few basics of daily life.\u003c/p\u003e\n\u003cp\u003eFirst, you all have an outside “job”, which you will still perform. Since you are a child, AS I KNOW, we will just keep you up-to-date and call you via the headphones that we will give you. We won’t interrupt your class, unless it is an emergency, I promise. You will be meeting once a month so it won’t affect any of your grades.\u003c/p\u003e\n\u003cp\u003eSecond, in T-O-F-B, we treat any child like an adult. It means a large work load, but you can also access any part of our centre freely with your BNPS. But in some areas, we want you to have adult supervision.\u003c/p\u003e\n\u003cp\u003eYour supervisor is:\u003c/p\u003e\n\u003cp\u003eGrave Hono ( Dr. Brains, as a substitute name in the human world)\u003c/p\u003e\n\u003cp\u003eWe will give you a map and what you should do later.\u003c/p\u003e\n\u003cp\u003eDr. Ranboro\u003c/p\u003e\n\u003cp\u003e9/23/2018\u003c/p\u003e\n\u003ch3 id=\"part-7\"\u003ePart 7\u003c/h3\u003e\n\u003cp\u003eHe falls asleep……… He dreams about aliens attacking the centre, and only Dr. Brains, Dr. Ranboro, Norman, Amelia, Calvin and a guy who he didn’t know survived. He thinks it’s just a dream, but what he doesn’t know is, this day is coming closer and closer.\u003c/p\u003e\n\u003cp\u003e“Wakey, Wakey!” Dr. Brains shouts, laughing” JJJJJJJJAAAAAAMMMMMMMEEEEEEEESSSSSSSSSSSS!!!!!!!!!!!!”. James finally wakes up, and mumbles, ”What the heck in the world was this?”\u003c/p\u003e\n\u003cp\u003eDr. Brains seems to be confused. “You didn’t recognize my voice? Wake up, Buddy! Get dressed! The welcoming party is waiting!!”.\u003c/p\u003e\n\u003cp\u003eHe gets dressed, hurries to follow Dr. Brains, and they go outside to a “secretive” room that is labeled “G—CHECK, BNPS ROOM”. They go in, and he sees a bunch of devices that are new to him. He sits down, just as Dr. Brains ordered, and Dr. Brains brings a needle to his face, straight into his eyes. “Watch out!” James shouts. He doesn’t even have time to think, as the needle goes in and out of his eyes. Dr. Brains says, “Good, we already got the DNA, scanned the iris, scanned the brain map. Ok, 2 last things, then we are good to go!” He does a bunch of scans on James’ finger, and he enters a password into a machine. “Ok, one last thing. Print your BNPS and tattoo it to your shoulder!” Dr. Brains says. The machine reads “bring human to the tattoo station …… step 3/5”. Dr. Brains orders him to put his shoulder into a cylinder. He feels a little pressure and his shoulder pops out of the machine. He sees a little piece of metal on his shoulder and it reads ”TOFB.1029358612.JP/////////” The machine also prints out a metal card. “Don’t lose it!” Dr. Brains says, “it is your ID here!”\u003c/p\u003e\n\u003cp\u003eThey walk out of the room and into the elevator. It is an elevator like the one in the TOFB’s entrance. The one that changes color and transparency, only much more slowly. When it tells him to scan the card, he knows better than to not do so. The elevator seems smart, and it asks “Homo, and James! Morning! Which level area do you want to go to?”. Dr. Brains responds, “Dining room number three, formal, both of us”. The elevator responds with a “TOFB wishes you a pleasant day!” When the door opens again, they enter a large area, like the first level of a 5 star hotel. Everything is white: people’s clothes, the ground, the staircase, the light, etc,. He sees Dr.Brains’ clothes change to white! He says, “Dr.Brains! Your outfit changed color!” “Yours did, too!” Dr. Brains responds. James looks down at his clothes. His had actually, as Dr. Brains said, changed color and texture.\u003c/p\u003e\n\u003cp\u003eThey eat their breakfast—salmon, soup and broccoli, and Dr. Brains announces to him, “OK, now let’s do some work stuff”. They head back to the living area, and they wash themselves. Then they head to the meeting area. Norman, Dr. Ranboro and the other guy James sees in his dream are waving to James-and-Dr.Brains-in-the-black-suit-and-a-tie.\u003c/p\u003e\n\u003ch3 id=\"part-8\"\u003ePart 8\u003c/h3\u003e\n\u003cp\u003e“So”, Dr. Ranboro says, “Welcome! Thank you for joining the organiza………?!!!\u0026gt;?*\u0026amp;%*\u0026amp;^%\u0026amp;^%%∆˙ßå˚µß∂˙”. FFF! A small arrow flies though the walls and hits Dr. Ranboro, making his words into nonsense. “å∆∆߬—å˚å!!!!!!¡¡¡¡¡¡?¿!¡……Jams…….main sq……is com…..tel..hom……nnnoor…….¡¡¡!!!???¿¿¿å∂ß˚˚˚˚∆ƒå˙”, he says. James can barely understand, but he knows one thing, they will tell him about the main sq…whatsoever.\u003c/p\u003e\n\u003cp\u003e“Let’s jump into the topic,” Norman says. “The main sq… is actually an attack called The Main Square Rattle, or what we call TMSR. It’s started because another kind of alien, The Froakan, wants to use humans as slaves, own the TOFB AND the Hidonipothan.The only way to stop that is to get the battle-rattler and rattle it. But if The F’s got the rattler and rattle it, well, we will all freeze and do what they want, like a bunch of zombies. The state of being a zombie is called ratling. Sadly, there isn’t a known cure yet for ratle. But Dr. Brains is working on it! Lastly, the battle-rattler is locked in the Ratle Mountains. And the only way to enter the Ratle Mountain is by using Dr. Ranboro’s key. Otherwise, you will have very little chance to get out alive! And that’s why they shot Dr. Ranboro. As a matter of fact, the arrow is poisonous. If we don’t send him to hospital now, he will become a baby in 72 hours.”\u003c/p\u003e\n\u003cp\u003eTalking about Dr. Ranboro, James notices Dr. Ranboro’s hair getting darker and darker from the old-man-white. They send him to the hospital about 5 minutes later.\u003c/p\u003e\n\u003ch2 id=\"chapter-3-that-s-called-war\"\u003eChapter 3:That’s called war\u003c/h2\u003e\n\u003cp\u003eAfter another ride on the “TOTP—0111”, which is the “squeeze box” to get to the North Town, they are back at Dr. Brains’ office. But something weird has happened, only students in OPH_LV # 40000 - OPH_LV # 49999 are still in the orphanage. Dr. Brains tries to find out why, but he can’t. And that’s when all of the humans in the orphanage hear a gigantic laugh coming from nowhere.”HHHAAAHHHAAAAHAHHAAA! This is your day, Homo, your death ceremony!HHAAHAHA!”\u003c/p\u003e\n\u003cp\u003eMonths passed again……..\u003c/p\u003e\n\u003ch3 id=\"part-9\"\u003ePart 9\u003c/h3\u003e\n\u003cp\u003eThe daily live is almost the same as before, just that a part of the students in the orphanage is missing. But live is still very simple. Tasty food, friendly teachers, and visits to TOFB every other week.\u003c/p\u003e\n\u003cp\u003eOne day, James is in his math class.\u003c/p\u003e\n\u003cp\u003e“So when 2 is raised to the……”\u003c/p\u003e\n\u003cp\u003e“Beep! Beep!”\u003c/p\u003e\n\u003cp\u003eHis secret headphones from TOFB send a message request to him.\u003c/p\u003e\n\u003cp\u003e“Beep! Beep!”\u003c/p\u003e\n\u003cp\u003e“Didn’t that Ranboro guy say they won’t interrupt our classes?” James thought.\u003c/p\u003e\n\u003cp\u003e“And let’s do some prob….”\u003c/p\u003e\n\u003cp\u003e“Beep! Beep!”\u003c/p\u003e\n\u003cp\u003eJames requests a bathroom break and answers the headphones in the boys’ restroom.\u003c/p\u003e\n\u003cp\u003e“It’s an emergency!!! The Froakans are getting closer to the rattler!!! Help!!!! James, take Homo and get here now!” Norman cries.\u003c/p\u003e\n\u003cp\u003eAs fast as he can, he rushes to Dr. Brains’ office, grabs Dr. Brains and locks him and himself into the TOTB-0111.\u003c/p\u003e\n\u003cp\u003eAnd as fast as lightning, they are here, in the North town.\u003c/p\u003e\n\u003cp\u003eThey rush into the elevator, he swipes his and Dr. Brains’ card and rushes to Dr. Ranboro’s office.\u003c/p\u003e\n\u003cp\u003e“Quick! They will rattle it in like…like 20 minutes and we will all ratle!!!”, Norman hollers.\u003c/p\u003e\n\u003cp\u003eAnd again, as fast as lightning, they get war-dressed and get into the fastest transport system in TOFB.\u003c/p\u003e\n\u003cp\u003eAs James looks down, he is wearing a strong iron chest plate that reads ’T-O-F-B///////The Smarter one’. And on his shoulder, there is a cord which extends from his Digital ID to the chest plate. There is a screen in his chest plate that is unbreakable. There is a soft protection layer, then there is a swimming layer, then the pressure layer, an iron pad, an air supply on his side if the enemy spreads poisonous gas, and an armor on the outside.\u003c/p\u003e\n\u003cp\u003eAmazingly, these things only weigh 1 pound and fit perfectly.\u003c/p\u003e\n\u003cp\u003eHe is war-trained, so he knows exactly what to do with this fancy outfit. The screen is the main control, the outfit will detect the environment and change to the perfect layer.\u003c/p\u003e\n\u003cp\u003eUploaded ate 10/25/2015 \u003cem\u003e[sic.]\u003c/em\u003e\u003c/p\u003e\n\u003ch3 id=\"part-10\"\u003ePart 10\u003c/h3\u003e\n\u003cp\u003eThe ride seems to be long, but it’s actually only 5 minutes. They will enter the Ratle Mountains from the North End, which is the second-safest route into the mountains without Dr. Ranboro’s key.\u003c/p\u003e\n\u003cp\u003eAnd there they are, in the Ratle Mountains. They are led by Mr. Giose, who was the other guy in his dream when he came to the TOFB the first time. The other four warriors are Norman, Dr. Ranboro, Dr. Brains and James. The first 20 miles are short and boring. Nothing happens. But after the 29th mile mark, they enter a cave.\u003c/p\u003e\n\u003cp\u003eThe cave is dark. There are only few lights flashing. They are not worried, until they hear a scream.\u003c/p\u003e\n\u003cp\u003e“OOOOOOO! Eeeek!”\u003c/p\u003e\n\u003cp\u003e“Ahhhhhhhhhhhh! ZZZ! ZZZ…ZZZ..ZZZ…ZZZ…ZZZ…zzz…” The voice is getting smaller and smaller.\u003c/p\u003e\n\u003cp\u003e“It is the sleeping spider! It will knock out a human in NO time!” Mr. Giose shouts.\u003c/p\u003e\n\u003cp\u003eJames and the whole crew know what to do. They press a few buttons on their screens, and their helmets of their armor dissolve into the air. What is left behind, is the air filtering system.\u003c/p\u003e\n\u003cp\u003e“Three! Two! One! It’s gas,” Norman says, playfully.\u003c/p\u003e\n\u003cp\u003eDr. Brains spreads out the SSG gas, which will, hopefully, knock out the sleeping spider.\u003c/p\u003e\n\u003cp\u003eThat wastes a LOT of time. Before they know it, they all starts to ratle.\u003c/p\u003e\n\u003cp\u003eIt is James who feels it first. He feels extremely and uncontrollably happy. He starts running around and talking to other people in a rude way. To himself, it feels like as if he is drifting into unconsciousness.\u003c/p\u003e\n\u003cp\u003eThen the same happens to Dr. Brains, and then Norman, followed by Mr. Giose. Luckily, Dr. Ranboro called the TOFB’s team 2 to come for help before he changes, too.\u003c/p\u003e\n\u003cp\u003eI never knew what happened after this incident until the year of 2021. Since James was ratling, he couldn’t remember the whole year of 2020. He recovered on the day of 10/26/2021. Dr. Foster, who works at the orphanage AND at TOFB found a cure using Chinese Herbal Tea.\u003c/p\u003e\n\u003cp\u003eWell, let’s jump into the time machine. Backward to 2014!!\u003c/p\u003e\n\u003ch2 id=\"chapter-4-childhood\"\u003eChapter 4:Childhood\u003c/h2\u003e\n\u003cp\u003eWe jump into the time machine, and swoosh. Here we are, in the year of 2014. We are standing in front of 1623 Wesson Ave. It is a sunny day. The Peters are getting ready for a trip to Africa. James greets his uncle, who will look after the house while they are gone. Mrs. Peter is packing hastily. And Mr. Peter is bringing his computer, because, weirdly, he is starting to like CODING in BINARY CODE. Nothing more to say, so here the story goes.\u003c/p\u003e\n\u003ch3 id=\"part-11\"\u003ePart 11\u003c/h3\u003e\n\u003cp\u003e“Hurrrrrryyyy!” Mr. Peter shouts. “Or else we will be late for the plane!”\u003c/p\u003e\n\u003cp\u003eThe Peters hurry to the bus stand, waiting for the airport express.\u003c/p\u003e\n\u003cp\u003eAfter about an hour ride, they finally arrive at the San Francisco International Airport.\u003c/p\u003e\n\u003cp\u003eThey check in. And they hurry to the security check. At the security check, Mrs. Peter thinks she forgot something. Yes, she forgot to bring ANY medication for the disease Ebola.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnorman_an_epic_tale_in_n_parts/","tags":null,"title":"Norman: An Epic Tale in N Parts"},{"categories":null,"contents":"\u0026ldquo;Doing NSM analysis is a demanding process and there is no mechanical procedure for it. Published explications have often been through a dozen or more iterations over several months\u0026rdquo; \u0026mdash; (Heine, Narrog, and Goddard 2015)\nApproach and XD Introduction and Theory The Natural Semantic Metalanguage (NSM) approach (Wierzbicka 1974) is a long-standing hypothetical theory in structural semantics which claims that all human languages share a common set of primitive lexical units\u0026mdash;usually words, but, in some languages, short connected phrases\u0026mdash;through which all other words in each language can be defined.\nFor NSM to hold, two main results must be demonstrated. (Heine, Narrog, and Goddard 2015) The theory\u0026rsquo;s validity hinges, first, upon the existence of semantic primes\u0026mdash;a series of primitive lexical units both indefinable via other words in the same language but also is universally lexicalized across all languages. Second, the theory\u0026rsquo;s confirmation requires the ability to perform \u0026ldquo;reductive paraphrasing\u0026rdquo;, the process of defining all other words in a language with respect to the universal semantic primes\u0026rsquo; manifest in that language.\nIf proven as fact, the NSM theory and its implications has reaching implications into the long-standing (footnote: not to mention often personally fierce) conflict between the newer theories of generative semantics\u0026mdash;where structure of language is created in support of meaning\u0026mdash;and Noam Chomsky\u0026rsquo;s transformational generative syntax\u0026mdash;where meaning is filled to precomputed structure, which NSM suggests (Harris 2021).\nThe difficulty of forming adequate investigations in the area of NSM is due the theory itself being exceedingly hard to falsify\u0026mdash;the principle method through which NSM is demonstrated is via the manual (i.e. non-standardized) lexicalization of semantic primes and a partial demonstration of their relations (Geeraerts 2009) to other words in the language. Whenever one irregularity in the theory is identified (Bohnemeyer 1998), the proponents of the theory simply respond with another update to the (non standardized) set of reductive paraphrasing rules to account for the irregularity (NO_ITEM_DATA:goddard1998bad.)\nYet, there are repeated empirical (again, non-standardized) confirmations of the existence of the original set (Wierzbicka 1974) of semantic primes in other languages (Chappell 2002; Peeters 1994; Travis 2002); there are also numerous demonstrations of the proposed applications (Goddard 2012) of the theory in structural semantics. These facts has therefore maintained the relevance of NSM in current linguistic study but rendered the theory without a very clear path forward. Due to this reason, recent research has placed larger focus on functional (cognitive linguistical) theories (Divjak, Levshina, and Klavan 2016) and largely overlooked structuralist arguments like the NSM.\nBroad Goals and Approach To complement the very large body of work already in the identification of semantic primes for NSM in numerous languages, we aim in this project to investigate the process of reductive paraphrasing to provide a baseline evaluation of the feasibility of NSM as a theory. The approach proposed below is intended to very generally test the practicality of the act of reductive paraphrasing from the published set of primes: whether paraphrasing from those primes is even broadly possible across the entire lexicon of the few languages for which it is purported to be possible. This test remains needed because, quite counter-intuitively, metalanguage theorists have been constructing lexicalizations for non-prime words on an \u0026ldquo;as-needed\u0026rdquo; basis such as in (Wierzbicka 2007). No lexicon-wide demonstrations of lexicalizability has been performed (i.e. reductive paraphrasing all words down to the primes) as the current approach of manual definition of words from primes is significantly time-consuming and requires careful consideration of NSM\u0026rsquo;s semantic grammar between primes.\nWe aim perform a lexicon-wide test of reductive paraphrasing computationally via much newer approaches in computational linguistics, specifically model-based Natural Language Processing (NLP).\nIn order to isolate the exact problem of reductive paraphrasing, we first will have to highlight a few key assumptions by the NSM theory and therefore this project.\nThe semantic metalanguage theory is itself built on the assumption that \u0026ldquo;each language is its own metalanguage\u0026rdquo; (Goddard 2002)\u0026mdash;that human languages are broadly lexicalizable by itself (i.e. one can write an English dictionary by only using English.) We believe that the examination of this assumption is not within scope of the study and\u0026mdash;given it is fairly universally true from a practical standpoint (i.e. English dictionaries exist)\u0026mdash;we will take it as fact. We will use this fact further as the control for the feasibility of the approach, as discussed in the section below.\nThe remaining assumptions of NSM to be tested here, then, is that 1) semantic primes exist and 2) the original set of NSM primes published (Wierzbicka 1974) (and in subsequent studies in various other languages highlighted before) are correct and, through reductive paraphrase, can lexicalize every word in the lexicon.\nAims and Experimental Design In this study, we aim to develop a computational protocol for lexicon-wide testing of the possibility of performing reductive paraphrasing for every word in the lexicon given a set of purported semantic primes. Practically, this means that we are trying create a model to test whether all words in a language is lexicalizable when restricted to only using a chosen subset of primes in the same language.\nTo create a truly replicable test for lexicalizability under restriction, we turn to probabilistic NLP approaches. We propose the following metric for lexicalizability: a word is \u0026ldquo;lexicalizable\u0026rdquo; under some set of semantic primes if there is a lossless mapping between a linear combination of the primes\u0026rsquo; latent embeddings to the word in lexicon space.\nUnder this model, all words in the lexicon are lexicalizable by the set of primes being tested if there is a lossless projection of the bases of the lexical space to the primes\u0026rsquo; latent embedding space.\nThat is, given we have a latent embedding space of \\(n\\) semantic primes \\(P^n\\) and some lexicon \\(W\\) with \\(m\\) words, we aim to identify a linear mapping \\(M\\) such that:\n\\begin{equation} Mp = e_{W_j}\\ |\\ p \\in P^n, \\forall j=1\\ldots m \\end{equation}\nwhere, \\(e_{W_j}\\) is the \\(j\\) th standard basis of \\(W\\) (i.e. \\(j\\) th word in the lexicon.)\nThis projection is not, in principle, impossible. In the high-dimensional space of the entire lexicon, individual lexicalized words represent only the basis vectors of the space (and indeed in one-hot encodings for deep learning they are shown as the standard-basis of the lexicon-wide space.) Whereas in the lower-dimensional subspace of primes, a linear combination of primes can be used to represent each lexicalized word in the full lexicon.\nSuccess in identifying a feasible \\(M \\in \\mathcal{L}(P, W)\\) for a given \\(P\\) and \\(W\\) indicates the feasibility of finding a linear combination in \\(P\\) which maps to all \\(w \\in W\\), which means reductive paraphrase of \\(w\\) to a set of primes in \\(P\\) is possible as there is a direct \u0026ldquo;translation\u0026rdquo; (namely, \\(W\\)) from \\(P\\) to \\(W\\).\nTo actually compute \\(W\\) given \\(P\\) and \\(M\\), we leverage the well-established Transformer encoder-decoder architecture for language modeling (Vaswani et al. 2017). Furthermore, we frame the problem as one of unsupervised multi-lingual translation without alignments.\nThe basis of the model proposed to be used to obtain \\(W\\) is (Artetxe et al. 2018), a unsupervised multi-lingual translation model.\nFigure from (Artetxe et al. 2018).\nAs we are performing the task with word embeddings, not sentences like that of (Artetxe et al. 2018), the cross-attention lookup vector will serve no purpose (be \\(0\\)) (Niu, Zhong, and Yu 2021) and hence removed.\nFor the sake of standardization, we will call \\(P\\) the primary language/lexicon \\(L1\\), and \\(W\\) the second language/lexicon \\(L2\\). The basic hypothesis provided by (Artetxe et al. 2018) is that, through alternating samples of \\(L1\\) and \\(L2\\) through the model against their corresponding decoders using a shared encoder and separate decoders, the shared encoder is trained to perform the task of autoencoding for both lexicons at once. Therefore, at prediction time, to get the \u0026ldquo;translation\u0026rdquo; of an input, one simply applies the decoder of the desired lexicon to obtain a result.\nDuring training, the input to the shared encoder can either be a word from either \\(P\\) or $W$\u0026mdash;sampled with equal probability. If the input is from \\(P\\), we connect the output of the shared encoder with the \\(L1\\) decoder and train with the objective of recovering the input. Essentially, we are using the model as a alternate method of training a variational auto-encoder (Klys, Snell, and Zemel 2018) with alternating decoders given the lexicon being analyzed.\nThis task is trivial if the embedding space after the shared encoder is exactly as wide as both lexicon. However, we will restrict the output dimension of the shared encoder to \\(dim(P)\\) which after training we will call the latent embedding space of \\(L1\\); this name is verified and justified as a part of the feasibility check below.\nWe will also use the backtranslation mechanism proposed by (Artetxe et al. 2018) during training: whereby the autoencoded output from \\(L1\\) is used as target for the same input as \\(L2\\) (as well as the reverse), mimicking the process of translation.\nAfter training, the \\(L2\\) decoder would then be the candidate \\(W\\), mapping from the (proposed) latent embedding space of \\(P\\) to the lexicon \\(W\\).\nFollowing both (Artetxe et al. 2018; Conneau and Lample 2019) we will use cross-entropy as the objective function of training.\nFeasibility Checkpoint We first need to show that, as expected, the model architecture proposed above\u0026mdash;upon convergence\u0026mdash;will create a latent embedding for \\(L1\\) after encoding if the output size for encoding is \\(dim(L1)\\) (defined to be equal to \\(dim(P)\\)).\nA trivial test of whether the encoding output is desirably the embedding space of \\(L1\\) is that, through training with a toy mapping \\(P=W=L1=L2\\), we would expect both decoders to be an one-to-one mapping that simply copies the input. That is, after training with \\(P=W\\), we should see that activating one input in the shared post-encoding space should activate one or close to one feature only in both decoder\u0026rsquo;s output space.\nNumerically, this means that the result obtained from taking the mean entropy of both outputs given a singular input activation should be statistically insignificantly different from \\(0\\).\nThat is, we expect that given trained decoders \\(L_1\\) and \\(L_2\\), and standard bases of \\(W=P\\) named \\(e\\), we should see that:\n\\begin{equation} \\frac{\\log(L_1e_j) + \\log(L_2e_j)}{2} \\approx 0: \\forall j = 1\\ldots dim(W) \\end{equation}\nWe expect this result because, through gradient-descent, the quickest minima reachable to capture variation in the input perfectly is the copying task; therefore, we should expect here that if the post-encoding distribution is the same distribution as the input, the model\u0026rsquo;s decoders will fit to the copying task. If the post-encoding distribution is different from the input, the model\u0026rsquo;s decoders would then have to actually perform nontrivial mappings to achieve the desired autoencoding result.\nCheckpoint 2 + Hypothesis 1 The following is the first novel result that we can show with the new architecture. We first hypothesize that the model should converge when training to the target of the (already linguistically accepted, as aforementioned) result that English words are themselves a metalanguage.\nFor \\(dim(W)\\) iterations (similar to (Webb et al. 2011)), we will leave a word chosen at random out of the lexicon of \\(P\\). This operation results in \\(dim(P) = dim(W)-1\\). We will then train the model until a local minima is reached and measure convergence.\nTo test this hypothesis, we will measure the cross-entropy performance of \\(L2\\) decoder upon the word that is left out. The resulting loss should be statistically insignificantly different from \\(0\\) if the word is successfully lexicalized via the \\(dim(W)-1\\) other words not left out in \\(P\\) in the latent embedding space after encoding.\nIf the hypothesis is not successful, the model cannot converge even on a large subset of the entire lexicon, much less in the limited subset of the 60-word NSM-proposed metalanguage; it is therefore imperative not to continue the study unless convergence at this point can be shown. Importantly, however, failures in this step does not show any claims about reductive paraphrasing as we are simply benchmarking the model against a control linguistic assumption we discussed earlier.\nIn any case, it would be valuable at this point to again perform analyze for post-encoding output to observe any reductive paraphrasing behavior.\nHypothesis 2 At this point, we will set the lexicons to the sets we are actually testing. We will set \\(P\\) to be the list of semantic primes established by (Heine, Narrog, and Goddard 2015), and \\(W\\) to the English lexicon.\nShould lexicalization of all of the English lexicon via the semantic primes only be possible, this model should again converge after training with cross-entropy inappreciably different from \\(0\\). This result would indicate the existence of a \\(W\\) (i.e. \\(L2\\) decoder), indicating the possibility of lexicon-wide reductive paraphrasing.\nInstitution and Experience The actual protocol proposed as a part of this study (namely, creating, training, and calculating metrics from the autoencoder) is a technical concept taught as a part of the regular curriculum of Advanced Machine Learning at Nueva; however, expertise and mentorship may still be required when implementing a complex model topology and training mechanism like the one proposed. The open-ended project structure of the Advanced Machine Learning course supports and sometimes necessitate implementing a model like the one proposed with the help of the CS faculty. Therefore, if additional mentorship is indeed required, there exists support available within the institution.\nThe more difficult skill-set to capture is the knowledge regarding the theories of NSM and the field of structuralist linguistics in general. As of writing, we are not aware of any students which has an active research interest in traditional linguistics; however, this knowledge constitute a far more insignificant portion of the actual mechanics of the project and is more importantly very easily taught. Mentorship is also available here from members of the Mathematics and CS faculty with prior research interest in computational linguistics.\nIn terms of equipment, the most important tool required in working with a large-scale neural network is a matrix-mathematics accelerator; this often takes the form of a consumer graphics card and typical desktop computing setup. For the Machine Learning course taught at Nueva, Google\u0026rsquo;s Colab (and their free graphics card addition) is frequently used to address this need and would at a minimum suffice here. Also, it is based on the personal experience of the author, though by no means definite, that a large selection of students at Nueva has comparable hardware for training available at home.\nProvided network access to the computing accelerator, this experiment can be done under any setting and definitely does not necessitate the use of the biology lab.\nImpact Academic Significance Within the short term, this experiment provides two major results. First, it establishes the use of a bifercated unsupervised encoder-decoder translation model like that proposed by (Artetxe et al. 2018) as a Conditional Variational Autoencoder (CVAE) (Klys, Snell, and Zemel 2018) with the ability to define and train the hidden latent representation after encoding. Although traditional CVAEs are frequently more suited for most output-aware generation tasks, this new scheme supports the direct influence of the latent representations of the encoder instead of using an additional input to both the encoder and decoder to influence such representations, like in traditional CVAEs. This difference is significant for as it creates the where dimensional projection is needed but the content of the latent representation itself is also relevant to the study.\nOf course, the short-term result also includes the direct result of the second tested hypothesis: a systemic, lexicon-wide evaluation of the feasibility of reductive paraphrasing. The study is to develop a computational protocol for lexicon-wide reductive paraphrasing by creating a lossless mapping between a linear combination of the primes\u0026rsquo; latent embeddings to the word in lexicon space.\nIf both initial metrics succeeds and the third, final reduction step with actual semantic primes fail, the result would indicate an inability to create such a lossless mapping, and therefore raise concerns about the lexicon-wide applicability of the reductive paraphrasing on the set of published semantic primes. That, there is not even a locally convergent linear combination of primes that will generally describe all of the lexicon, despite the hypothesis by NSM theorists. This result will be highly impactful for NSM theory in general which necessitates the possibilty of reductive paraphrase (Geeraerts 2009) (Vanhatalo, Tissari, and Idström, n.d.).\nOn the long term, demonstrations of reductive paraphrasing has wide-reaching implications into NSM theory is general (Heine, Narrog, and Goddard 2015; Geeraerts 2009), and the field of language learning. The paraphrasing capacity of the proposed embedding would hypothetically be able to create a semantic mapping between a set of words to one other word; in this way, it is not infeasible to create a language-learning tool with continually larger embedding size to slowly create a larger lexicon in the target user. Early results (Sharma and Goyal 2021) have shown a possible application of such an approach, using supervised machine translation techniques.\nLearning and Teaching One to two students, along with a facilitator, would be an ideal size for this experiment. Primarily, the three main roles will include model engineering, training and validation, and model ablation and testing. The last role requires the most amount of traditional linguistics knowledge as the student\u0026rsquo;s role would be to connect the weights in the model to the applicable theories being tested.\nThe study proposed is an extremely conventional empirical Machine Learning/NLP study. From a pedagogical standpoint for XRT, this study will be a diversion from the traditional wet-lab sciences or survey-based educational/social sciences commonly produced by the lab and lead a new avenue for the Lab\u0026rsquo;s expansion. Within Nueva, empirical research into machine learning is frequently done through independent study or the Intro/Advance machine learning courses\u0026mdash;which were recently expanded due to widening interest at the Upper School.\nParticipation in this project provides its constituent students an opportunity to practice publish-quality ML/NLP in a longer-term and multi-stage project previously not possible through semester-long courses. Students are trained to perform model construction, data selection and cleaning, collection of model validation metrics, as well as model ablation and interpretation: important concepts in ML operations taught but not formalized in the Machine Learning course as the course exercises, while open-ended, isolate only one skill and have expected outcomes.\nGiven the demand and rate of student progression between Intro/Advanced courses in ML each year, developing a suitable approach to propagate true machine-learning research will be relevant to upwards of 30 students each year.\nIncidentally, students also get an exposure to the practice of conventional linguistics and the new trend of applying empirical research NLP back against classic semantics; however, the demand for this exact skill is likely small at Nueva.\nThough the tool used and expanded upon by this experiment is applicable to the NLP research community, it is unfortunately difficult to predict its future applications to XRT or Nueva students without seeing more expansion into the area of ML and NLP by the XRT lab.\nSafety and Ethics The following are the responses to the safety and ethics checklist.\nThis project does not satisfy any triggers of the second-expert protocol. All data needed is from a dictionary (for the English lexicon, e.g. (Fellbaum 2010)) as well as the semantic primes listed in a figure on the article (Heine, Narrog, and Goddard 2015). The data is being generated during compute. The actual compute hardware will need to be stored in either in the cloud (not on-prem), physically in the iLab, or (for personal compute hardware), in students\u0026rsquo; homes. An internet connection and a model training acceleration scheme (such as the free Google Colab) would suffice. None foreseeable See below The experiment is done on the English lexicon. It is difficult to imagine a tangible harm from the experiment. This study provides students with an opportunity to conduct a full research study in ML; XRT has not had this from of projects before and approval would result in a new avenue of research being conducted with XRT. However, if the project is not approved, other ML projects may subsequently surface and students can leverage those opportunities to learn about the practice of empirical ML instead. As with most machine-learning projects, it is customary and appropriate to end with a statement on ML ethics and its implications. This study is a linguistics, lexicon-scale study, and the data sourced is available generally and not subject to copyright or any known data-protection laws. The inputs to the model are combinations of English words, and the model produces singular English words. The benefits of this model involves generating new knowledge about the English lexicon and semantic theories. The only known harm of the model involves the mis-intepretation of its results, creating overreaching generalizations to semantic primality analysis or NSM theories. The model and source code can be released to the general public without broad impact.\nAcknowledgments I would like to thank Brandon Cho at Princeton University and Ted Theodosopoulos at The Nueva School for the very interesting discussion/argument that resulted in this proposal almost a year ago. I would like to thank Klint Kanopka at Stanford University for his mentorship and discussion of the overall feasibility of the approach and pointing out the path that lead to the proposed model\u0026rsquo;s basis in machine translation. Finally, I would like to thank Prof. Brian MacWhinney at Carnegie Mellon University for pointing out discourse between structuralism/functionalism during our exchanges and for his mentorship in my exploration of computational linguistics.\nReferences Artetxe, Mikel, Gorka Labaka, Eneko Agirre, and Kyunghyun Cho. 2018. “Unsupervised Neural Machine Translation,” 12. Bohnemeyer, Jurgen. 1998. “Temporal Reference from a Radical Pragmatics Perspective: Why Yucatec Does Not Need to Express ’after’ and ’before’.” Walter de Gruyter, Berlin/New York Berlin, New York. Chappell, Hilary. 2002. “5. The Universal Syntax of Semantic Primes in Mandarin Chinese.” In Studies in Language Companion Series, 243–322. Studies in Language Companion Series. John Benjamins Publishing Company. doi:10.1075/slcs.60.12cha. Conneau, Alexis, and Guillaume Lample. 2019. “Cross-Lingual Language Model Pretraining,” 11. Divjak, Dagmar, Natalia Levshina, and Jane Klavan. 2016. Cognitive Linguistics 27 (4): 447–63. doi:doi:10.1515/cog-2016-0095. Fellbaum, Christiane. 2010. “Wordnet.” In Theory and Applications of Ontology: Computer Applications, 231–43. Springer. Geeraerts, Dirk. 2009. “Neostructuralist Semantics.” In Theories of Lexical Semantics, 124–78. Theories of Lexical Semantics. Oxford University Press. doi:10.1093/acprof:oso/9780198700302.003.0004. Goddard, Cliff. 2002. “The Search for the Shared Semantic Core of All Languages.” In Meaning and Universal Grammar: Theory and Empirical Findings. John Benjamins Publishing Company. ———. 2012. “Semantic Primes, Semantic Molecules, Semantic Templates: Key Concepts in the NSM Approach to Lexical Typology.” Linguistics 50 (3). doi:10.1515/ling-2012-0022. Harris, Randy Allen. 2021. The Linguistics Wars: Chomsky, Lakoff, and the Battle over Deep Structure. Oxford University Press. Heine, Bernd, Heiko Narrog, and Cliff Goddard. 2015. “The Natural Semantic Metalanguage Approach.” In The Oxford Handbook of Linguistic Analysis, edited by Bernd Heine and Heiko Narrog. Oxford University Press. doi:10.1093/oxfordhb/9780199677078.013.0018. Klys, Jack, Jake Snell, and Richard Zemel. 2018. “Learning Latent Subspaces in Variational Autoencoders,” 11. Niu, Zhaoyang, Guoqiang Zhong, and Hui Yu. 2021. “A Review on the Attention Mechanism of Deep Learning.” Neurocomputing 452 (September): 48–62. doi:10.1016/j.neucom.2021.03.091. Peeters, Bert. 1994. “16 Semantic and Lexical Universals in French.” In Studies in Language Companion Series, 423. Studies in Language Companion Series. John Benjamins Publishing Company. doi:10.1075/slcs.25.20pee. Sharma, Prawaal, and Navneet Goyal. 2021. “Zero-Shot Reductive Paraphrasing for Digitally Semi-Literate.” In Forum for Information Retrieval Evaluation, 91–98. Travis, Catherine E. 2002. “4. La Metalengua Semántica Natural.” In Studies in Language Companion Series, 173–242. Studies in Language Companion Series. John Benjamins Publishing Company. doi:10.1075/slcs.60.11tra. Vanhatalo, Ulla, Heli Tissari, and Anna Idström. n.d. “Revisiting the Universality of Natural Semantic Metalanguage: A View through Finnish,” 28. Vaswani, Ashish, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Łukasz Kaiser, and Illia Polosukhin. 2017. “Attention Is All You Need,” 11. Webb, Geoffrey I., Claude Sammut, Claudia Perlich, Tamás Horváth, Stefan Wrobel, Kevin B. Korb, William Stafford Noble, et al. 2011. “Leave-One-Out Cross-Validation.” In Encyclopedia of Machine Learning, edited by Claude Sammut and Geoffrey I. Webb, 600–601. Boston, MA: Springer US. doi:10.1007/978-0-387-30164-8_469. Wierzbicka, Anna. 1974. “Semantic Primitives.” Lingua 34 (4): 365–69. doi:10.1016/0024-3841(74)90004-7. ———. 2007. “Bodies and Their Parts: An NSM Approach to Semantic Typology.” Language Sciences 29 (1): 14–65. doi:10.1016/j.langsci.2006.07.002. NO_ITEM_DATA:goddard1998bad. ","html":"\u003cp\u003e\u0026ldquo;Doing NSM analysis is a demanding process and there is no mechanical procedure for it. Published explications have often been through a dozen or more iterations over several months\u0026rdquo; \u0026mdash; (\u003ca href=\"#citeproc_bib_item_11\"\u003eHeine, Narrog, and Goddard 2015\u003c/a\u003e)\u003c/p\u003e\n\u003ch2 id=\"approach-and-xd\"\u003eApproach and XD\u003c/h2\u003e\n\u003ch3 id=\"introduction-and-theory\"\u003eIntroduction and Theory\u003c/h3\u003e\n\u003cp\u003eThe Natural Semantic Metalanguage (NSM) approach (\u003ca href=\"#citeproc_bib_item_20\"\u003eWierzbicka 1974\u003c/a\u003e) is a long-standing hypothetical theory in structural semantics which claims that all human languages share a common set of primitive lexical units\u0026mdash;usually words, but, in some languages, short connected phrases\u0026mdash;through which all other words in each language can be defined.\u003c/p\u003e\n\u003cp\u003eFor NSM to hold, two main results must be demonstrated. (\u003ca href=\"#citeproc_bib_item_11\"\u003eHeine, Narrog, and Goddard 2015\u003c/a\u003e) The theory\u0026rsquo;s validity hinges, first, upon the \u003cem\u003eexistence\u003c/em\u003e of semantic primes\u0026mdash;a series of primitive lexical units both indefinable via other words in the same language but also is universally lexicalized across all languages. Second, the theory\u0026rsquo;s confirmation requires the ability to perform \u0026ldquo;reductive paraphrasing\u0026rdquo;, the process of defining all other words in a language with respect to the universal semantic primes\u0026rsquo; manifest in that language.\u003c/p\u003e\n\u003cp\u003eIf proven as fact, the NSM theory and its implications has reaching implications into the long-standing (footnote: not to mention often personally fierce) conflict between the newer theories of generative semantics\u0026mdash;where structure of language is created in support of meaning\u0026mdash;and Noam Chomsky\u0026rsquo;s transformational generative syntax\u0026mdash;where meaning is filled to precomputed structure, which NSM suggests (\u003ca href=\"#citeproc_bib_item_10\"\u003eHarris 2021\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eThe difficulty of forming adequate investigations in the area of NSM is due the theory itself being exceedingly hard to falsify\u0026mdash;the principle method through which NSM is demonstrated is via the manual (i.e. non-standardized) lexicalization of semantic primes and a partial demonstration of their relations (\u003ca href=\"#citeproc_bib_item_7\"\u003eGeeraerts 2009\u003c/a\u003e) to other words in the language. Whenever one irregularity in the theory is identified (\u003ca href=\"#citeproc_bib_item_2\"\u003eBohnemeyer 1998\u003c/a\u003e), the proponents of the theory simply respond with another update to the (non standardized) set of reductive paraphrasing rules to account for the irregularity (NO_ITEM_DATA:goddard1998bad.)\u003c/p\u003e\n\u003cp\u003eYet, there are repeated empirical (again, non-standardized) confirmations of the existence of the original set (\u003ca href=\"#citeproc_bib_item_20\"\u003eWierzbicka 1974\u003c/a\u003e) of semantic primes in other languages (\u003ca href=\"#citeproc_bib_item_3\"\u003eChappell 2002\u003c/a\u003e; \u003ca href=\"#citeproc_bib_item_14\"\u003ePeeters 1994\u003c/a\u003e; \u003ca href=\"#citeproc_bib_item_16\"\u003eTravis 2002\u003c/a\u003e); there are also numerous demonstrations of the proposed applications (\u003ca href=\"#citeproc_bib_item_9\"\u003eGoddard 2012\u003c/a\u003e) of the theory in structural semantics. These facts has therefore maintained the relevance of NSM in current linguistic study but rendered the theory without a very clear path forward. Due to this reason, recent research has placed larger focus on functional (cognitive linguistical) theories (\u003ca href=\"#citeproc_bib_item_5\"\u003eDivjak, Levshina, and Klavan 2016\u003c/a\u003e) and largely overlooked structuralist arguments like the NSM.\u003c/p\u003e\n\u003ch3 id=\"broad-goals-and-approach\"\u003eBroad Goals and Approach\u003c/h3\u003e\n\u003cp\u003eTo complement the very large body of work already in the identification of semantic primes for NSM in numerous languages, we aim in this project to investigate the process of reductive paraphrasing to provide a baseline evaluation of the feasibility of NSM as a theory. The approach proposed below is intended to very generally test the practicality of the act of reductive paraphrasing from the published set of primes: whether paraphrasing from those primes is even broadly possible across the entire lexicon of the few languages for which it is purported to be possible. This test remains needed because, quite counter-intuitively, metalanguage theorists have been constructing lexicalizations for non-prime words on an \u0026ldquo;as-needed\u0026rdquo; basis such as in (\u003ca href=\"#citeproc_bib_item_21\"\u003eWierzbicka 2007\u003c/a\u003e). No lexicon-wide demonstrations of lexicalizability has been performed (i.e. reductive paraphrasing all words down to the primes) as the current approach of manual definition of words from primes is significantly time-consuming and requires careful consideration of NSM\u0026rsquo;s semantic grammar between primes.\u003c/p\u003e\n\u003cp\u003eWe aim perform a lexicon-wide test of reductive paraphrasing computationally via \u003cem\u003emuch\u003c/em\u003e newer approaches in computational linguistics, specifically model-based Natural Language Processing (NLP).\u003c/p\u003e\n\u003cp\u003eIn order to isolate the exact problem of reductive paraphrasing, we first will have to highlight a few key assumptions by the NSM theory and therefore this project.\u003c/p\u003e\n\u003cp\u003eThe semantic metalanguage theory is itself built on the assumption that \u0026ldquo;each language is its own metalanguage\u0026rdquo; (\u003ca href=\"#citeproc_bib_item_8\"\u003eGoddard 2002\u003c/a\u003e)\u0026mdash;that human languages are broadly lexicalizable by itself (i.e. one can write an English dictionary by only using English.) We believe that the examination of this assumption is not within scope of the study and\u0026mdash;given it is fairly universally true from a practical standpoint (i.e. English dictionaries exist)\u0026mdash;we will take it as fact. We will use this fact further as the control for the feasibility of the approach, as discussed in the section below.\u003c/p\u003e\n\u003cp\u003eThe remaining assumptions of NSM to be tested here, then, is that 1) semantic primes exist and 2) the original set of NSM primes published (\u003ca href=\"#citeproc_bib_item_20\"\u003eWierzbicka 1974\u003c/a\u003e) (and in subsequent studies in various other languages highlighted before) are correct and, through reductive paraphrase, can lexicalize every word in the lexicon.\u003c/p\u003e\n\u003ch3 id=\"aims-and-experimental-design\"\u003eAims and Experimental Design\u003c/h3\u003e\n\u003cp\u003eIn this study, we aim to develop a computational protocol for lexicon-wide testing of the possibility of performing reductive paraphrasing for every word in the lexicon given a set of purported semantic primes. Practically, this means that we are trying create a model to test whether all words in a language is lexicalizable when restricted to only using a chosen subset of primes in the same language.\u003c/p\u003e\n\u003cp\u003eTo create a truly replicable test for lexicalizability under restriction, we turn to probabilistic NLP approaches. We propose the following metric for lexicalizability: a word is \u0026ldquo;lexicalizable\u0026rdquo; under some set of semantic primes if there is a lossless mapping between a linear combination of the primes\u0026rsquo; latent embeddings to the word in lexicon space.\u003c/p\u003e\n\u003cp\u003eUnder this model, all words in the lexicon are lexicalizable by the set of primes being tested if there is a lossless projection of the bases of the lexical space to the primes\u0026rsquo; latent embedding space.\u003c/p\u003e\n\u003cp\u003eThat is, given we have a latent embedding space of \\(n\\) semantic primes \\(P^n\\) and some lexicon \\(W\\) with \\(m\\) words, we aim to identify a linear mapping \\(M\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nMp = e_{W_j}\\ |\\ p \\in P^n, \\forall j=1\\ldots m\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(e_{W_j}\\) is the \\(j\\) th standard basis of \\(W\\) (i.e. \\(j\\) th word in the lexicon.)\u003c/p\u003e\n\u003cp\u003eThis projection is not, in principle, impossible. In the high-dimensional space of the entire lexicon, individual lexicalized words represent only the basis vectors of the space (and indeed in one-hot encodings for deep learning they are shown as the standard-basis of the lexicon-wide space.) Whereas in the lower-dimensional subspace of primes, a linear combination of primes can be used to represent each lexicalized word in the full lexicon.\u003c/p\u003e\n\u003cp\u003eSuccess in identifying a feasible \\(M \\in \\mathcal{L}(P, W)\\) for a given \\(P\\) and \\(W\\) indicates the feasibility of finding a linear combination in \\(P\\) which maps to all \\(w \\in W\\), which means reductive paraphrase of \\(w\\) to a set of primes in \\(P\\) is possible as there is a direct \u0026ldquo;translation\u0026rdquo; (namely, \\(W\\)) from \\(P\\) to \\(W\\).\u003c/p\u003e\n\u003cp\u003eTo actually compute \\(W\\) given \\(P\\) and \\(M\\), we leverage the well-established Transformer encoder-decoder architecture for language modeling (\u003ca href=\"#citeproc_bib_item_18\"\u003eVaswani et al. 2017\u003c/a\u003e). Furthermore, we frame the problem as one of unsupervised multi-lingual translation without alignments.\u003c/p\u003e\n\u003cp\u003eThe basis of the model proposed to be used to obtain \\(W\\) is (\u003ca href=\"#citeproc_bib_item_1\"\u003eArtetxe et al. 2018\u003c/a\u003e), a unsupervised multi-lingual translation model.\u003c/p\u003e\n\u003cp\u003e\u003cimg src=\"/ox-hugo/2022-08-28_20-26-43_screenshot.png\" alt=\"\"\u003e\nFigure from (\u003ca href=\"#citeproc_bib_item_1\"\u003eArtetxe et al. 2018\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eAs we are performing the task with word embeddings, not sentences like that of (\u003ca href=\"#citeproc_bib_item_1\"\u003eArtetxe et al. 2018\u003c/a\u003e), the cross-attention lookup vector will serve no purpose (be \\(0\\)) (\u003ca href=\"#citeproc_bib_item_13\"\u003eNiu, Zhong, and Yu 2021\u003c/a\u003e) and hence removed.\u003c/p\u003e\n\u003cp\u003eFor the sake of standardization, we will call \\(P\\) the primary language/lexicon \\(L1\\), and \\(W\\) the second language/lexicon \\(L2\\). The basic hypothesis provided by (\u003ca href=\"#citeproc_bib_item_1\"\u003eArtetxe et al. 2018\u003c/a\u003e) is that, through alternating samples of \\(L1\\) and \\(L2\\) through the model against their corresponding decoders using a shared encoder and separate decoders, the shared encoder is trained to perform the task of autoencoding for both lexicons at once. Therefore, at prediction time, to get the \u0026ldquo;translation\u0026rdquo; of an input, one simply applies the decoder of the desired lexicon to obtain a result.\u003c/p\u003e\n\u003cp\u003eDuring training, the input to the shared encoder can either be a word from either \\(P\\) or $W$\u0026mdash;sampled with equal probability. If the input is from \\(P\\), we connect the output of the shared encoder with the \\(L1\\) decoder and train with the objective of recovering the input. Essentially, we are using the model as a alternate method of training a variational auto-encoder (\u003ca href=\"#citeproc_bib_item_12\"\u003eKlys, Snell, and Zemel 2018\u003c/a\u003e) with alternating decoders given the lexicon being analyzed.\u003c/p\u003e\n\u003cp\u003eThis task is trivial if the embedding space after the shared encoder is exactly as wide as both lexicon. However, we will restrict the output dimension of the shared encoder to \\(dim(P)\\) which after training we will call the latent embedding space of \\(L1\\); this name is verified and justified as a part of the feasibility check below.\u003c/p\u003e\n\u003cp\u003eWe will also use the backtranslation mechanism proposed by (\u003ca href=\"#citeproc_bib_item_1\"\u003eArtetxe et al. 2018\u003c/a\u003e) during training: whereby the autoencoded output from \\(L1\\) is used as target for the same input as \\(L2\\) (as well as the reverse), mimicking the process of translation.\u003c/p\u003e\n\u003cp\u003eAfter training, the \\(L2\\) decoder would then be the candidate \\(W\\), mapping from the (proposed) latent embedding space of \\(P\\) to the lexicon \\(W\\).\u003c/p\u003e\n\u003cp\u003eFollowing both (\u003ca href=\"#citeproc_bib_item_1\"\u003eArtetxe et al. 2018\u003c/a\u003e; \u003ca href=\"#citeproc_bib_item_4\"\u003eConneau and Lample 2019\u003c/a\u003e) we will use cross-entropy as the objective function of training.\u003c/p\u003e\n\u003ch4 id=\"feasibility-checkpoint\"\u003eFeasibility Checkpoint\u003c/h4\u003e\n\u003cp\u003eWe first need to show that, as expected, the model architecture proposed above\u0026mdash;upon convergence\u0026mdash;will create a latent embedding for \\(L1\\) after encoding if the output size for encoding is \\(dim(L1)\\) (defined to be equal to \\(dim(P)\\)).\u003c/p\u003e\n\u003cp\u003eA trivial test of whether the encoding output is desirably the embedding space of \\(L1\\) is that, through training with a toy mapping \\(P=W=L1=L2\\), we would expect both decoders to be an one-to-one mapping that simply copies the input. That is, after training with \\(P=W\\), we should see that activating one input in the shared post-encoding space should activate one or close to one feature only in both decoder\u0026rsquo;s output space.\u003c/p\u003e\n\u003cp\u003eNumerically, this means that the result obtained from taking the mean entropy of both outputs given a singular input activation should be statistically insignificantly different from \\(0\\).\u003c/p\u003e\n\u003cp\u003eThat is, we expect that given trained decoders \\(L_1\\) and \\(L_2\\), and standard bases of \\(W=P\\) named \\(e\\), we should see that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\log(L_1e_j) + \\log(L_2e_j)}{2} \\approx 0: \\forall j = 1\\ldots dim(W)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe expect this result because, through gradient-descent, the quickest minima reachable to capture variation in the input perfectly is the copying task; therefore, we should expect here that if the post-encoding distribution is the same distribution as the input, the model\u0026rsquo;s decoders will fit to the copying task. If the post-encoding distribution is different from the input, the model\u0026rsquo;s decoders would then have to actually perform nontrivial mappings to achieve the desired autoencoding result.\u003c/p\u003e\n\u003ch4 id=\"checkpoint-2-plus-hypothesis-1\"\u003eCheckpoint 2 + Hypothesis 1\u003c/h4\u003e\n\u003cp\u003eThe following is the first novel result that we can show with the new architecture. We first hypothesize that the model should converge when training to the target of the (already linguistically accepted, as aforementioned) result that English words are themselves a metalanguage.\u003c/p\u003e\n\u003cp\u003eFor \\(dim(W)\\) iterations (similar to (\u003ca href=\"#citeproc_bib_item_19\"\u003eWebb et al. 2011\u003c/a\u003e)), we will leave a word chosen at random out of the lexicon of \\(P\\). This operation results in \\(dim(P) = dim(W)-1\\). We will then train the model until a local minima is reached and measure convergence.\u003c/p\u003e\n\u003cp\u003eTo test this hypothesis, we will measure the cross-entropy performance of \\(L2\\) decoder upon the word that is left out. The resulting loss should be statistically insignificantly different from \\(0\\) if the word is successfully lexicalized via the \\(dim(W)-1\\) other words not left out in \\(P\\) in the latent embedding space after encoding.\u003c/p\u003e\n\u003cp\u003eIf the hypothesis is not successful, the model cannot converge even on a large subset of the entire lexicon, much less in the limited subset of the 60-word NSM-proposed metalanguage; it is therefore imperative not to continue the study unless convergence at this point can be shown. Importantly, however, failures in this step does \u003cem\u003enot\u003c/em\u003e show any claims about reductive paraphrasing as we are simply benchmarking the model against a control linguistic assumption we discussed earlier.\u003c/p\u003e\n\u003cp\u003eIn any case, it would be valuable at this point to again perform analyze for post-encoding output to observe any reductive paraphrasing behavior.\u003c/p\u003e\n\u003ch4 id=\"hypothesis-2\"\u003eHypothesis 2\u003c/h4\u003e\n\u003cp\u003eAt this point, we will set the lexicons to the sets we are actually testing. We will set \\(P\\) to be the list of semantic primes established by (\u003ca href=\"#citeproc_bib_item_11\"\u003eHeine, Narrog, and Goddard 2015\u003c/a\u003e), and \\(W\\) to the English lexicon.\u003c/p\u003e\n\u003cp\u003eShould lexicalization of all of the English lexicon via the semantic primes only be possible, this model should again converge after training with cross-entropy inappreciably different from \\(0\\). This result would indicate the existence of a \\(W\\) (i.e. \\(L2\\) decoder), indicating the possibility of lexicon-wide reductive paraphrasing.\u003c/p\u003e\n\u003ch2 id=\"institution-and-experience\"\u003eInstitution and Experience\u003c/h2\u003e\n\u003cp\u003eThe actual protocol proposed as a part of this study (namely, creating, training, and calculating metrics from the autoencoder) is a technical concept taught as a part of the regular curriculum of Advanced Machine Learning at Nueva; however, expertise and mentorship may still be required when implementing a complex model topology and training mechanism like the one proposed. The open-ended project structure of the Advanced Machine Learning course supports and sometimes necessitate implementing a model like the one proposed with the help of the CS faculty. Therefore, if additional mentorship is indeed required, there exists support available within the institution.\u003c/p\u003e\n\u003cp\u003eThe more difficult skill-set to capture is the knowledge regarding the theories of NSM and the field of structuralist linguistics in general. As of writing, we are not aware of any students which has an active research interest in traditional linguistics; however, this knowledge constitute a far more insignificant portion of the actual mechanics of the project and is more importantly very easily taught. Mentorship is also available here from members of the Mathematics and CS faculty with prior research interest in computational linguistics.\u003c/p\u003e\n\u003cp\u003eIn terms of equipment, the most important tool required in working with a large-scale neural network is a matrix-mathematics accelerator; this often takes the form of a consumer graphics card and typical desktop computing setup. For the Machine Learning course taught at Nueva, Google\u0026rsquo;s Colab (and their free graphics card addition) is frequently used to address this need and would at a minimum suffice here. Also, it is based on the personal experience of the author, though by no means definite, that a large selection of students at Nueva has comparable hardware for training available at home.\u003c/p\u003e\n\u003cp\u003eProvided network access to the computing accelerator, this experiment can be done under any setting and definitely does not necessitate the use of the biology lab.\u003c/p\u003e\n\u003ch2 id=\"impact\"\u003eImpact\u003c/h2\u003e\n\u003ch3 id=\"academic-significance\"\u003eAcademic Significance\u003c/h3\u003e\n\u003cp\u003eWithin the short term, this experiment provides two major results. First, it establishes the use of a bifercated unsupervised encoder-decoder translation model like that proposed by (\u003ca href=\"#citeproc_bib_item_1\"\u003eArtetxe et al. 2018\u003c/a\u003e) as a Conditional Variational Autoencoder (CVAE) (\u003ca href=\"#citeproc_bib_item_12\"\u003eKlys, Snell, and Zemel 2018\u003c/a\u003e) with the ability to define and train the hidden latent representation after encoding. Although traditional CVAEs are frequently more suited for most output-aware generation tasks, this new scheme supports the direct influence of the latent representations of the encoder instead of using an additional input to both the encoder and decoder to influence such representations, like in traditional CVAEs. This difference is significant for as it creates the where dimensional projection is needed but the content of the latent representation itself is also relevant to the study.\u003c/p\u003e\n\u003cp\u003eOf course, the short-term result also includes the direct result of the second tested hypothesis: a systemic, lexicon-wide evaluation of the feasibility of reductive paraphrasing. The study is to develop a computational protocol for lexicon-wide reductive paraphrasing by creating a lossless mapping between a linear combination of the primes\u0026rsquo; latent embeddings to the word in lexicon space.\u003c/p\u003e\n\u003cp\u003eIf both initial metrics succeeds and the third, final reduction step with actual semantic primes fail, the result would indicate an inability to create such a lossless mapping, and therefore raise concerns about the lexicon-wide applicability of the reductive paraphrasing on the set of published semantic primes. That, there is not even a locally convergent linear combination of primes that will generally describe all of the lexicon, despite the hypothesis by NSM theorists. This result will be highly impactful for NSM theory in general which necessitates the possibilty of reductive paraphrase (\u003ca href=\"#citeproc_bib_item_7\"\u003eGeeraerts 2009\u003c/a\u003e) (\u003ca href=\"#citeproc_bib_item_17\"\u003eVanhatalo, Tissari, and Idström, n.d.\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eOn the long term, demonstrations of reductive paraphrasing has wide-reaching implications into NSM theory is general (\u003ca href=\"#citeproc_bib_item_11\"\u003eHeine, Narrog, and Goddard 2015\u003c/a\u003e; \u003ca href=\"#citeproc_bib_item_7\"\u003eGeeraerts 2009\u003c/a\u003e), and the field of language learning. The paraphrasing capacity of the proposed embedding would hypothetically be able to create a semantic mapping between a set of words to one other word; in this way, it is not infeasible to create a language-learning tool with continually larger embedding size to slowly create a larger lexicon in the target user. Early results (\u003ca href=\"#citeproc_bib_item_15\"\u003eSharma and Goyal 2021\u003c/a\u003e) have shown a possible application of such an approach, using supervised machine translation techniques.\u003c/p\u003e\n\u003ch3 id=\"learning-and-teaching\"\u003eLearning and Teaching\u003c/h3\u003e\n\u003cp\u003eOne to two students, along with a facilitator, would be an ideal size for this experiment. Primarily, the three main roles will include model engineering, training and validation, and model ablation and testing. The last role requires the most amount of traditional linguistics knowledge as the student\u0026rsquo;s role would be to connect the weights in the model to the applicable theories being tested.\u003c/p\u003e\n\u003cp\u003eThe study proposed is an extremely conventional empirical Machine Learning/NLP study. From a pedagogical standpoint for XRT, this study will be a diversion from the traditional wet-lab sciences or survey-based educational/social sciences commonly produced by the lab and lead a new avenue for the Lab\u0026rsquo;s expansion. Within Nueva, empirical research into machine learning is frequently done through independent study or the Intro/Advance machine learning courses\u0026mdash;which were recently expanded due to widening interest at the Upper School.\u003c/p\u003e\n\u003cp\u003eParticipation in this project provides its constituent students an opportunity to practice publish-quality ML/NLP in a longer-term and multi-stage project previously not possible through semester-long courses. Students are trained to perform model construction, data selection and cleaning, collection of model validation metrics, as well as model ablation and interpretation: important concepts in ML operations taught but not formalized in the Machine Learning course as the course exercises, while open-ended, isolate only one skill and have expected outcomes.\u003c/p\u003e\n\u003cp\u003eGiven the demand and rate of student progression between Intro/Advanced courses in ML each year, developing a suitable approach to propagate true machine-learning research will be relevant to upwards of 30 students each year.\u003c/p\u003e\n\u003cp\u003eIncidentally, students also get an exposure to the practice of conventional linguistics and the new trend of applying empirical research NLP back against classic semantics; however, the demand for this exact skill is likely small at Nueva.\u003c/p\u003e\n\u003cp\u003eThough the tool used and expanded upon by this experiment is applicable to the NLP research community, it is unfortunately difficult to predict its future applications to XRT or Nueva students without seeing more expansion into the area of ML and NLP by the XRT lab.\u003c/p\u003e\n\u003ch2 id=\"safety-and-ethics\"\u003eSafety and Ethics\u003c/h2\u003e\n\u003cp\u003eThe following are the responses to the safety and ethics checklist.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eThis project does not satisfy any triggers of the second-expert protocol. All data needed is from a dictionary (for the English lexicon, e.g. (\u003ca href=\"#citeproc_bib_item_6\"\u003eFellbaum 2010\u003c/a\u003e)) as well as the semantic primes listed in a figure on the article (\u003ca href=\"#citeproc_bib_item_11\"\u003eHeine, Narrog, and Goddard 2015\u003c/a\u003e). The data is being generated during compute.\u003c/li\u003e\n\u003cli\u003eThe actual compute hardware will need to be stored in either in the cloud (not on-prem), physically in the iLab, or (for personal compute hardware), in students\u0026rsquo; homes. An internet connection and a model training acceleration scheme (such as the free Google Colab) would suffice.\u003c/li\u003e\n\u003cli\u003eNone foreseeable\u003c/li\u003e\n\u003cli\u003eSee below\u003c/li\u003e\n\u003cli\u003eThe experiment is done on the English lexicon. It is difficult to imagine a tangible harm from the experiment.\u003c/li\u003e\n\u003cli\u003eThis study provides students with an opportunity to conduct a full research study in ML; XRT has not had this from of projects before and approval would result in a new avenue of research being conducted with XRT. However, if the project is not approved, other ML projects may subsequently surface and students can leverage those opportunities to learn about the practice of empirical ML instead.\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAs with most machine-learning projects, it is customary and appropriate to end with a statement on ML ethics and its implications. This study is a linguistics, lexicon-scale study, and the data sourced is available generally and not subject to copyright or any known data-protection laws. The inputs to the model are combinations of English words, and the model produces singular English words. The benefits of this model involves generating new knowledge about the English lexicon and semantic theories. The only known harm of the model involves the mis-intepretation of its results, creating overreaching generalizations to semantic primality analysis or NSM theories. The model and source code can be released to the general public without broad impact.\u003c/p\u003e\n\u003ch2 id=\"acknowledgments\"\u003eAcknowledgments\u003c/h2\u003e\n\u003cp\u003eI would like to thank Brandon Cho at Princeton University and Ted Theodosopoulos at The Nueva School for the very interesting discussion/argument that resulted in this proposal almost a year ago. I would like to thank Klint Kanopka at Stanford University for his mentorship and discussion of the overall feasibility of the approach and pointing out the path that lead to the proposed model\u0026rsquo;s basis in machine translation. Finally, I would like to thank Prof. Brian MacWhinney at Carnegie Mellon University for pointing out discourse between structuralism/functionalism during our exchanges and for his mentorship in my exploration of computational linguistics.\u003c/p\u003e\n\u003ch2 id=\"references\"\u003eReferences\u003c/h2\u003e\n\u003cstyle\u003e.csl-entry{text-indent: -1.5em; margin-left: 1.5em;}\u003c/style\u003e\u003cdiv class=\"csl-bib-body\"\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_1\"\u003e\u003c/a\u003eArtetxe, Mikel, Gorka Labaka, Eneko Agirre, and Kyunghyun Cho. 2018. “Unsupervised Neural Machine Translation,” 12.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_2\"\u003e\u003c/a\u003eBohnemeyer, Jurgen. 1998. “Temporal Reference from a Radical Pragmatics Perspective: Why Yucatec Does Not Need to Express ’after’ and ’before’.” Walter de Gruyter, Berlin/New York Berlin, New York.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_3\"\u003e\u003c/a\u003eChappell, Hilary. 2002. “5. The Universal Syntax of Semantic Primes in Mandarin Chinese.” In \u003ci\u003eStudies in Language Companion Series\u003c/i\u003e, 243–322. Studies in Language Companion Series. John Benjamins Publishing Company. doi:\u003ca href=\"https://doi.org/10.1075/slcs.60.12cha\"\u003e10.1075/slcs.60.12cha\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_4\"\u003e\u003c/a\u003eConneau, Alexis, and Guillaume Lample. 2019. “Cross-Lingual Language Model Pretraining,” 11.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_5\"\u003e\u003c/a\u003eDivjak, Dagmar, Natalia Levshina, and Jane Klavan. 2016. \u003ci\u003eCognitive Linguistics\u003c/i\u003e 27 (4): 447–63. doi:\u003ca href=\"https://doi.org/doi:10.1515/cog-2016-0095\"\u003edoi:10.1515/cog-2016-0095\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_6\"\u003e\u003c/a\u003eFellbaum, Christiane. 2010. “Wordnet.” In \u003ci\u003eTheory and Applications of Ontology: Computer Applications\u003c/i\u003e, 231–43. Springer.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_7\"\u003e\u003c/a\u003eGeeraerts, Dirk. 2009. “Neostructuralist Semantics.” In \u003ci\u003eTheories of Lexical Semantics\u003c/i\u003e, 124–78. Theories of Lexical Semantics. Oxford University Press. doi:\u003ca href=\"https://doi.org/10.1093/acprof:oso/9780198700302.003.0004\"\u003e10.1093/acprof:oso/9780198700302.003.0004\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_8\"\u003e\u003c/a\u003eGoddard, Cliff. 2002. “The Search for the Shared Semantic Core of All Languages.” In \u003ci\u003eMeaning and Universal Grammar: Theory and Empirical Findings\u003c/i\u003e. John Benjamins Publishing Company.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_9\"\u003e\u003c/a\u003e———. 2012. “Semantic Primes, Semantic Molecules, Semantic Templates: Key Concepts in the NSM Approach to Lexical Typology.” \u003ci\u003eLinguistics\u003c/i\u003e 50 (3). doi:\u003ca href=\"https://doi.org/10.1515/ling-2012-0022\"\u003e10.1515/ling-2012-0022\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_10\"\u003e\u003c/a\u003eHarris, Randy Allen. 2021. \u003ci\u003eThe Linguistics Wars: Chomsky, Lakoff, and the Battle over Deep Structure\u003c/i\u003e. Oxford University Press.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_11\"\u003e\u003c/a\u003eHeine, Bernd, Heiko Narrog, and Cliff Goddard. 2015. “The Natural Semantic Metalanguage Approach.” In \u003ci\u003eThe Oxford Handbook of Linguistic Analysis\u003c/i\u003e, edited by Bernd Heine and Heiko Narrog. Oxford University Press. doi:\u003ca href=\"https://doi.org/10.1093/oxfordhb/9780199677078.013.0018\"\u003e10.1093/oxfordhb/9780199677078.013.0018\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_12\"\u003e\u003c/a\u003eKlys, Jack, Jake Snell, and Richard Zemel. 2018. “Learning Latent Subspaces in Variational Autoencoders,” 11.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_13\"\u003e\u003c/a\u003eNiu, Zhaoyang, Guoqiang Zhong, and Hui Yu. 2021. “A Review on the Attention Mechanism of Deep Learning.” \u003ci\u003eNeurocomputing\u003c/i\u003e 452 (September): 48–62. doi:\u003ca href=\"https://doi.org/10.1016/j.neucom.2021.03.091\"\u003e10.1016/j.neucom.2021.03.091\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_14\"\u003e\u003c/a\u003ePeeters, Bert. 1994. “16 Semantic and Lexical Universals in French.” In \u003ci\u003eStudies in Language Companion Series\u003c/i\u003e, 423. Studies in Language Companion Series. John Benjamins Publishing Company. doi:\u003ca href=\"https://doi.org/10.1075/slcs.25.20pee\"\u003e10.1075/slcs.25.20pee\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_15\"\u003e\u003c/a\u003eSharma, Prawaal, and Navneet Goyal. 2021. “Zero-Shot Reductive Paraphrasing for Digitally Semi-Literate.” In \u003ci\u003eForum for Information Retrieval Evaluation\u003c/i\u003e, 91–98.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_16\"\u003e\u003c/a\u003eTravis, Catherine E. 2002. “4. La Metalengua Semántica Natural.” In \u003ci\u003eStudies in Language Companion Series\u003c/i\u003e, 173–242. Studies in Language Companion Series. John Benjamins Publishing Company. doi:\u003ca href=\"https://doi.org/10.1075/slcs.60.11tra\"\u003e10.1075/slcs.60.11tra\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_17\"\u003e\u003c/a\u003eVanhatalo, Ulla, Heli Tissari, and Anna Idström. n.d. “Revisiting the Universality of Natural Semantic Metalanguage: A View through Finnish,” 28.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_18\"\u003e\u003c/a\u003eVaswani, Ashish, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Łukasz Kaiser, and Illia Polosukhin. 2017. “Attention Is All You Need,” 11.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_19\"\u003e\u003c/a\u003eWebb, Geoffrey I., Claude Sammut, Claudia Perlich, Tamás Horváth, Stefan Wrobel, Kevin B. Korb, William Stafford Noble, et al. 2011. “Leave-One-Out Cross-Validation.” In \u003ci\u003eEncyclopedia of Machine Learning\u003c/i\u003e, edited by Claude Sammut and Geoffrey I. Webb, 600–601. Boston, MA: Springer US. doi:\u003ca href=\"https://doi.org/10.1007/978-0-387-30164-8_469\"\u003e10.1007/978-0-387-30164-8_469\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_20\"\u003e\u003c/a\u003eWierzbicka, Anna. 1974. “Semantic Primitives.” \u003ci\u003eLingua\u003c/i\u003e 34 (4): 365–69. doi:\u003ca href=\"https://doi.org/10.1016/0024-3841(74)90004-7\"\u003e10.1016/0024-3841(74)90004-7\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_21\"\u003e\u003c/a\u003e———. 2007. “Bodies and Their Parts: An NSM Approach to Semantic Typology.” \u003ci\u003eLanguage Sciences\u003c/i\u003e 29 (1): 14–65. doi:\u003ca href=\"https://doi.org/10.1016/j.langsci.2006.07.002\"\u003e10.1016/j.langsci.2006.07.002\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003eNO_ITEM_DATA:goddard1998bad.\u003c/div\u003e\n\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhnsm_proposal/","tags":null,"title":"NSM Proposal"},{"categories":null,"contents":"NUS Secondary School Other Duties AP Statistics Index AP Phys C Mech Index AP Phys C EM Index Tuning Forks bioinformatics PKM Intersession 2023 NUS-MATH580 QIC Date Topic \u0026lt;2022-04-05 Tue\u0026gt; physical qubits, manipulating physical qubits \u0026lt;2022-04-08 Fri\u0026gt; making qubits interact \u0026lt;2022-05-10 Tue\u0026gt; Chiara Marletto \u0026lt;2022-05-24 Tue\u0026gt; Strong Free Will NUS-CS223 Algorithms Backlog: Finite State Machine\nDate Topic \u0026lt;2022-04-07 Thu\u0026gt; stable matching problem, stable matching algorithm \u0026lt;2022-05-02 Mon\u0026gt; dynamic programming, relaxation \u0026lt;2022-05-23 Mon\u0026gt; distributed algorithum, randomized algorithum, complexity theory NUS-HIST301 American History Backlog: New Deal, Franklin D. Roosevelt (FDR), Works Progress Administration, effects of the New Deal, Great Depression, Herber Hoover, disinformation, Guilded Age\nDate Topic \u0026lt;2022-04-07 Thu\u0026gt; WWII, propaganda \u0026lt;2022-05-02 Mon\u0026gt; cold war \u0026lt;2022-05-09 Mon\u0026gt; civil rights \u0026lt;2022-05-26 Thu\u0026gt; Richard Nixon \u0026lt;2022-06-01 Wed\u0026gt; Ronald Raegan NUS-PHYS301 Mech Date Topic \u0026lt;2022-04-12 Tue\u0026gt; String Yo-Yo Problem, rotational energy \u0026lt;2022-05-24 Tue\u0026gt; Gyroscopes NUS-ENG401 English Date Topic \u0026lt;2022-04-15 Fri\u0026gt; secondary source comparison activity Essays Bluest Eye Essay Planning I, Tituba Essay Planning NUS-MATH530 Please refer to Linear Algebra Index\nNUS-ECON320 Financial Econometrics Date Topic \u0026lt;2022-08-25 Thu\u0026gt; Financial Markets Intro, ECON320 Architecture NUS-CS350 Software Studio Date Topic \u0026lt;2022-08-25 Thu\u0026gt; User Interviews, User Story \u0026lt;2022-09-07 Wed\u0026gt; Software Engineering, Prototyping \u0026lt;2022-09-12 Mon\u0026gt; Task Estimation \u0026lt;2022-09-15 Thu\u0026gt; Documentation and Specification \u0026lt;2022-09-19 Mon\u0026gt; Testing \u0026lt;2022-10-06 Thu\u0026gt; Defensive Programming \u0026lt;2022-11-03 Thu\u0026gt; Code Review \u0026lt;2022-12-01 Thu\u0026gt; UX Design NUS-MATH570 DiffEq Date Topic \u0026lt;2022-08-26 Fri\u0026gt; DiffEq Intro NUS-LANG250 Translation Translation Studies Index\nNUS-MATH575 CompBio Computational Biology Index\n","html":"\u003ch2 id=\"nus-secondary-school-other-duties\"\u003eNUS Secondary School Other Duties\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhapstats/\"\u003eAP Statistics Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhap_phys_c_mech_index/\"\u003eAP Phys C Mech Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhap_phys_c_em_index/\"\u003eAP Phys C EM Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtuning_forks/\"\u003eTuning Forks\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbioinformatics/\"\u003ebioinformatics\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpkm/\"\u003ePKM\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhintersession_2023/\"\u003eIntersession 2023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"nus-math580-qic\"\u003eNUS-MATH580 QIC\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-05 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhphysical_qubits/\"\u003ephysical qubits\u003c/a\u003e, \u003ca href=\"/posts/kbhatoms_as_qubits/#manipulating-physical-qubits\"\u003emanipulating physical qubits\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-08 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmaking_qubits_interact/\"\u003emaking qubits interact\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-10 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhchiara_marletto/\"\u003eChiara Marletto\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-24 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhstrong_free_will/\"\u003eStrong Free Will\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"nus-cs223-algorithms\"\u003eNUS-CS223 Algorithms\u003c/h2\u003e\n\u003cp\u003eBacklog: \u003ca href=\"/posts/kbhfinite_state_machine/\"\u003eFinite State Machine\u003c/a\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-07 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhstable_matching_problem/\"\u003estable matching problem\u003c/a\u003e, \u003ca href=\"\"\u003estable matching algorithm\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-02 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhdynamic_programming/\"\u003edynamic programming\u003c/a\u003e, \u003ca href=\"/posts/kbhrelaxation_algorithums/\"\u003erelaxation\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-23 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhdistributed_algorithum/\"\u003edistributed algorithum\u003c/a\u003e, \u003ca href=\"/posts/kbhrandomized_algorithum/\"\u003erandomized algorithum\u003c/a\u003e, \u003ca href=\"/posts/kbhcomplexity_theory/\"\u003ecomplexity theory\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"nus-hist301-american-history\"\u003eNUS-HIST301 American History\u003c/h2\u003e\n\u003cp\u003eBacklog: \u003ca href=\"/posts/kbhnew_deal/\"\u003eNew Deal\u003c/a\u003e, \u003ca href=\"/posts/kbhfdr/\"\u003eFranklin D. Roosevelt (FDR)\u003c/a\u003e, \u003ca href=\"/posts/kbhwpa/\"\u003eWorks Progress Administration\u003c/a\u003e, \u003ca href=\"/posts/kbheffects_of_the_new_deal/\"\u003eeffects of the New Deal\u003c/a\u003e, \u003ca href=\"/posts/kbhgreat_depression/\"\u003eGreat Depression\u003c/a\u003e, \u003ca href=\"/posts/kbhherber_hoover/\"\u003eHerber Hoover\u003c/a\u003e, \u003ca href=\"\"\u003edisinformation\u003c/a\u003e, \u003ca href=\"/posts/kbhguilded_age/\"\u003eGuilded Age\u003c/a\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-07 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003eWWII\u003c/a\u003e, \u003ca href=\"/posts/kbhpropaganda/\"\u003epropaganda\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-02 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhcold_war/\"\u003ecold war\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-09 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhcivil_rights/\"\u003ecivil rights\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-26 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-06-01 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhronald_raegan/\"\u003eRonald Raegan\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"nus-phys301-mech\"\u003eNUS-PHYS301 Mech\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-12 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003eString Yo-Yo Problem\u003c/a\u003e, \u003ca href=\"/posts/kbhrotational_energy/\"\u003erotational energy\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-24 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003eGyroscopes\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"nus-eng401-english\"\u003eNUS-ENG401 English\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-15 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhsecondary_source_comparison_activity/\"\u003esecondary source comparison activity\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch3 id=\"essays\"\u003eEssays\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhenglish_bluest_eye/\"\u003eBluest Eye Essay Planning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhi_tituba_essay_planning/\"\u003eI, Tituba Essay Planning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"nus-math530\"\u003eNUS-MATH530\u003c/h2\u003e\n\u003cp\u003ePlease refer to \u003ca href=\"/posts/kbhlinear_algebra_index/\"\u003eLinear Algebra Index\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"nus-econ320-financial-econometrics\"\u003eNUS-ECON320 Financial Econometrics\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-08-25 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhfinancial_markets_intro/\"\u003eFinancial Markets Intro\u003c/a\u003e, \u003ca href=\"/posts/kbhecon320_architecture/\"\u003eECON320 Architecture\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"nus-cs350-software-studio\"\u003eNUS-CS350 Software Studio\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-08-25 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhuser_interviews/\"\u003eUser Interviews\u003c/a\u003e, \u003ca href=\"/posts/kbhuser_interviews/#user-story\"\u003eUser Story\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-09-07 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhsoftware_engineering/\"\u003eSoftware Engineering\u003c/a\u003e, \u003ca href=\"/posts/kbhprototyping/\"\u003ePrototyping\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-09-12 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhtask_estimation/\"\u003eTask Estimation\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-09-15 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhdocumentation_and_specification/\"\u003eDocumentation and Specification\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-09-19 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhtesting/\"\u003eTesting\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-10-06 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhdefensive_programming/\"\u003eDefensive Programming\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-11-03 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhcode_review/\"\u003eCode Review\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-12-01 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhux_design/\"\u003eUX Design\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"nus-math570-diffeq\"\u003eNUS-MATH570 DiffEq\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-08-26 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDiffEq Intro\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"nus-lang250-translation\"\u003eNUS-LANG250 Translation\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhtranslation_studies_index/\"\u003eTranslation Studies Index\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"nus-math575-compbio\"\u003eNUS-MATH575 CompBio\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcomputational_biology_index/\"\u003eComputational Biology Index\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnueva_courses_index/","tags":["index"],"title":"Nueva Courses Index"},{"categories":null,"contents":"The Null Space, also known as the kernel, is the subset of vectors which get mapped to \\(0\\) by some Linear Map.\nconstituents Some linear map \\(T \\in \\mathcal{L}(V,W)\\)\nrequirements The subset of \\(V\\) which \\(T\\) maps to \\(0\\) is called the \u0026ldquo;Null Space\u0026rdquo;:\n\\begin{equation} null\\ T = \\{v \\in V: Tv = 0\\} \\end{equation}\nadditional information the null space is a subspace of the domain It should probably not be a surprise, given a Null Space is called a Null Space, that the Null Space is a subspace of the domain.\nzero As linear maps take \\(0\\) to \\(0\\), \\(T 0=0\\) so \\(0\\) is in the Null Space of \\(T\\).\nclosure under addition We have that:\n\\begin{equation} 0+0 = 0 \\end{equation}\nso by additivity of the Linear Maps the map is closed under addition.\nclosure under scalar multiplication By homogeneity of linear maps, the same of the above holds.\nThis completes the subspace proof, making \\(null\\ T\\) a subspace of the domain of \\(T\\), \\(V\\). \\(\\blacksquare\\)\nthe null space of the zero map is just the domain I mean duh. The zero map maps literally everything to zero.\nInjectivity IFF implies that null space is \\(\\{0\\}\\) See injectivity IFF implies that null space is \\(\\{0\\}\\)\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhnull_space/\"\u003eNull Space\u003c/a\u003e, also known as the \u003ca href=\"/posts/kbhnull_space/\"\u003ekernel\u003c/a\u003e, is the subset of vectors which get mapped to \\(0\\) by some \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003eSome linear map \\(T \\in \\mathcal{L}(V,W)\\)\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eThe subset of \\(V\\) which \\(T\\) maps to \\(0\\) is called the \u0026ldquo;\u003ca href=\"/posts/kbhnull_space/\"\u003eNull Space\u003c/a\u003e\u0026rdquo;:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nnull\\ T = \\{v \\in V: Tv = 0\\}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"the-null-space-is-a-subspace--kbhsubspace-dot-md--of-the-domain\"\u003ethe null space is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of the domain\u003c/h3\u003e\n\u003cp\u003eIt should probably not be a surprise, given a \u003ca href=\"/posts/kbhnull_space/\"\u003eNull Space\u003c/a\u003e is called a \u003ca href=\"/posts/kbhnull_space/\"\u003eNull \u003cstrong\u003e\u003cstrong\u003eSpace\u003c/strong\u003e\u003c/strong\u003e\u003c/a\u003e, that the \u003ca href=\"/posts/kbhnull_space/\"\u003eNull Space\u003c/a\u003e is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of the domain.\u003c/p\u003e\n\u003ch4 id=\"zero\"\u003ezero\u003c/h4\u003e\n\u003cp\u003eAs \u003ca href=\"/posts/kbhlinear_map/#linear-maps-take-0-to-0\"\u003elinear maps take \\(0\\) to \\(0\\)\u003c/a\u003e, \\(T 0=0\\) so \\(0\\) is in the \u003ca href=\"/posts/kbhnull_space/\"\u003eNull Space\u003c/a\u003e of \\(T\\).\u003c/p\u003e\n\u003ch4 id=\"closure-under-addition\"\u003eclosure under addition\u003c/h4\u003e\n\u003cp\u003eWe have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0+0 = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso by additivity of the \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es the map is \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e under addition.\u003c/p\u003e\n\u003ch4 id=\"closure-under-scalar-multiplication\"\u003eclosure under scalar multiplication\u003c/h4\u003e\n\u003cp\u003eBy homogeneity of linear maps, the same of the above holds.\u003c/p\u003e\n\u003cp\u003eThis completes the \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e proof, making \\(null\\ T\\) a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of the domain of \\(T\\), \\(V\\). \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"the-null-space-of-the-zero-map-is-just-the-domain\"\u003ethe null space of the zero map is just the domain\u003c/h3\u003e\n\u003cp\u003eI mean duh. The \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e map maps literally everything to zero.\u003c/p\u003e\n\u003ch3 id=\"injectivity-iff-implies-that-null-space--kbhnull-space-dot-md--is-0\"\u003eInjectivity IFF implies that \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e is \\(\\{0\\}\\)\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhinjectivity/#injectivity-implies-that-id-767a441d-4931-4fad-aa8e-c6b001e8b507-null-space-is-0\"\u003einjectivity IFF implies that null space is \\(\\{0\\}\\)\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnull_space/","tags":null,"title":"null space"},{"categories":null,"contents":"A number can be any of\u0026hellip;\n\\(\\mathbb{N}\\): natural number \\(\\mathbb{Z}\\): integer \\(\\mathbb{Q}\\): rational number \\(\\mathbb{R}\\): real number \\(\\mathbb{P}\\): irrational number \\(\\mathbb{C}\\): complex number ","html":"\u003cp\u003eA number can be any of\u0026hellip;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\mathbb{N}\\): \u003ca href=\"/posts/kbhnatural_numbers/\"\u003enatural number\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(\\mathbb{Z}\\): \u003ca href=\"/posts/kbhinteger/\"\u003einteger\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(\\mathbb{Q}\\): \u003ca href=\"/posts/kbhrational_number/\"\u003erational number\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(\\mathbb{R}\\): \u003ca href=\"/posts/kbhreal_number/\"\u003ereal number\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(\\mathbb{P}\\): \u003ca href=\"/posts/kbhirrational_number/\"\u003eirrational number\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(\\mathbb{C}\\): \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnumber/","tags":null,"title":"number"},{"categories":null,"contents":"Consider a general non-linear First Order ODEs:\n\\begin{equation} x\u0026rsquo; = F(x) \\end{equation}\nSuppose we have some time interval, we have some solutions to the expression given. Is it possible for us to, given \\(x(t_0) = x_0\\), what \\(x(t_0+T)\\) would be? Can we approximate for explicit numbers?\nThe solutions have to exist for all time: blow-up cannot be present during numerical estimations.\nExplicit Euler Method \\begin{equation} x(t+h) \\approx x_{t+1} = x_{t} + h f(x_t) \\end{equation}\nmotivation recall that given \\(x(t_0) = x_0\\), we desire \\(x(t_0+T)\\).\ndivide your solution interval into \\(N\\) small intervals; each interval would have length \\(h= \\frac{T}{N}\\) let \\(t_{i} = t_0 + i \\frac{T}{N}\\), where \\(t_{N} = t_{0}+T\\) for each segment \\(t_{i}\\), we attempt to compute a \\(x_{i}\\), and we\u0026rsquo;d like to approximate the error between \\(x_{i}\\) and \\(x(t_{i})\\). In the explicit Euler method, we make piecewise linear approximations. At each \\(x_0\\), we follow the slope estimated via the ODE at that point. Specifically:\n\\begin{equation} x\u0026rsquo;(t) = \\lim_{k \\to 0} \\frac{x(t+k)-x(t)}{k} \\approx \\frac{x(t+h)-x(t)}{h} \\end{equation}\nfor some small \\(h\\). Meaning, specifically, \\(x(t+h) \\approx x(t) + h x\u0026rsquo;(t)\\), where \\(h\\) is the step size we computed before.\nConsider that we had an ODE that is \\(x\u0026rsquo; = F(x)\\), whech gives us:\n\\begin{equation} x_1 = x_{0}+ h f(x_0) \\approx x(t_0 + h) \\end{equation}\nFollowing this scheme, we can calculate from \\(x_0\\) all the way stepwise to \\(x_{N}\\).\nevaluation Situation: we have \\(X_{N}\\), we have \\(x(t_{N})\\), how close are they? In fact:\n\\begin{equation} |x_{N} - x(t_{n}) | \\leq Ch \\end{equation}\nWe have some constant \\(C(x_0, t_0, T, f)\\), which we can use to estimate \\(C\\) the bounds specific to the problem you are solving.\nstiffness Certain parts of a solution maybe decaying/oscillating very different from another part of the solution\u0026mdash;\nexample Consider a system:\n\\begin{equation} y\u0026rsquo; = \\mqty(-1 \u0026amp; 0 \\\\ 0 \u0026amp; -10)y \\end{equation}\nour solutions look like:\n\\begin{equation} y(t) = \\mqty(c_1 e^{-t} \\\\ c_2 e^{-10t}) \\end{equation}\nso the top expression gives \\(x_i = (1-h)^{i} x_0\\) and bottom \\(x_{i} = (1-10h)^{i}x_0\\), which means they will have different requirements for \\(h\\) to be able to converge\nexample 2 \\begin{equation} y\u0026rsquo; = -5 (y-\\cos x) \\end{equation}\nwith method of undetermined coefficients, we obtain:\n\\begin{equation} y = \\frac{25}{26} \\cos t + \\frac{5}{26} \\sin t + Ce^{-5t} \\end{equation}\nthe first parts are fine and not stiff at all, the third part, we realize that we need \\((1-5h)^{i}x_0\\), meaning we need \\(h \u0026lt; \\frac{1}{5}\\).\nmotivation Let\u0026rsquo;s consider:\n\\begin{equation} x\u0026rsquo; = -\\lambda x \\end{equation}\nThe explicit Euler gives out:\n\\begin{equation} x_{t+1} = (1-\\lambda h)x_{i} \\end{equation}\nmeaning, in general:\n\\begin{equation} x_{i} = (1-\\lambda h)^{i} x_0 \\end{equation}\nWe know the function is bound to decay, yet the Explicit Euler will give us that this decays only when:\n\\begin{equation} -1 \u0026lt; 1-\\lambda h \u0026lt; 1 \\end{equation}\nImplicit Euler Method doesn\u0026rsquo;t have this problem\u0026mdash;\nconsider:\n\\begin{equation} x_{i+1} = x_{i} - \\lambda h x_{i+1} \\end{equation}\nmeaning:\n\\begin{equation} x_{i} = \\frac{1}{(1+\\lambda h)^{i}}x_0 \\end{equation}\nImplicit Euler Method A small twist on the Explicit Euler Method. To be able to use this method, we can formulate this as:\n\\begin{equation} x_{i+1} - h f(x_{i+1}) = x_i \\end{equation}\nwhere we use Newton\u0026rsquo;s Method to estimate some input \\(i+1\\) for which the above statement gets to \\(x_{i}\\).\nevaluation We actually didn\u0026rsquo;t do that much error; its is still bounded by:\n\\begin{equation} |x_{N} - x(t_{n}) | \\leq Ch \\end{equation}\nDerivation \\begin{equation} \\frac{x((t+h)-h) - x(t+h)}{-h} \\approx x\u0026rsquo;(t+h) \\end{equation}\nthis is first-order Taylor Approximation written backwards\nThis also yields:\n\\begin{equation} \\frac{x((t+h)-h) - x(t+h)}{-h} = \\frac{x(t+h)-x((t+h)-h)}{h} \\end{equation}\nNow, let \\(t = t_0\\), and therefore we have \\(t_1 = t +h\\), this gives us that:\nNow, recall that, because \\(f\\) is the ODE:\n\\begin{equation} x\u0026rsquo;(t_1) = f(x(t_1)) = x\u0026rsquo;(t+h) \\approx \\frac{x(t_1) - x(t_0)}{h} \\end{equation}\nMultiplying \\(h\\) to both sides gives:\n\\begin{equation} hf(x(t_1)) = x(t_1) - x(t_0) \\end{equation}\nwhich gives:\n\\begin{equation} x(t_0) = x(t_1) - h f(x(t_1)) \\end{equation}\nwe will now attempt to estimate \\(x_1\\) by declaring \\(x_1 := x(t_{1})\\), which will give us:\n\\begin{equation} x_1 - h f(x_1) = x_0 \\end{equation}\nLet us call \\(G(x_{1}) = x_1 - h f(x_1) = x_0\\).\nFinally, we run Newton\u0026rsquo;s Method to solve the \\(x_1\\) such that we can obtain \\(x_0\\) by trying to find the zeros of \\(G(x_1) - x_0\\). Because \\(h\\) is small, a good initial guess is actually \\(G(x_0)\\), and then we can optimize.\nTrapezoidal Method \\begin{equation} x_{t+1} = x_t + h \\frac{f(x_{t+1})+f(x_t)}{2} \\end{equation}\nmotivation \u0026ldquo;averaging smoothed things out\u0026rdquo;:\n\\begin{equation} \\frac{x(t+h) - x(t)}{h} \\approx \\frac{f(x(t+h)) + f(x(t))}{2} \\end{equation}\nmeaning we have:\n\\begin{equation} \\frac{x_1-x_0}{h} = \\frac{f(x_1) + f(x_0)}{2} \\end{equation}\nwhich averages our derivatives out.\nCross-multiplying, this gives:\n\\begin{equation} x_1 - \\frac{1}{2}h f(x_1) = x_0 + \\frac{1}{2} h f(x_0) \\end{equation}\nwhich can also be written as, multiplying by some \\(h\\):\n\\begin{equation} x_1 = x_0 + h \\frac{f(x_1)+f(x_0)}{2} \\end{equation}\nexplicitly \\begin{equation} x_{i} = \\qty( \\frac{(1- \\frac{1}{2}\\lambda h)}{(1+ \\frac{1}{2}\\lambda h)})^{i} x_0 \\end{equation}\nevaluation Importantly, this gives bounds\n\\begin{equation} |x_{N} - x(t_{n}) | \\leq Ch^{2} \\end{equation}\nModified Euler Method This is also called \u0026ldquo;Midpoint Method\u0026rdquo;.\nThis is one of thee methods which doesn\u0026rsquo;t break during \u0026ldquo;stiff\u0026rdquo; ODEs, and converges \\(h^{N}\\) times quickly.\nFor some:\n\\begin{equation} \\dv{x}{t} = f(t,x) \\end{equation}\n\\begin{equation} x_{i+1} = x_{i} + h f\\qty(t_{i} + \\frac{1}{2}h, x_{i} + \\frac{1}{2}h f(t_{i}, x_{i})) \\end{equation}\nthis is motivated by the Trapezoidal Method, but\n\u0026ldquo;A thorough introduction to these methods requires additional background in approximation theory and numerical analysis\u0026rdquo;\nThe Book error \\begin{equation} |x_{N} - x(t_{n}) | \\leq Ch^{2} \\end{equation}\nmotivation we take a half step in front of our original point using its slope, and compute the slope there.\nImproved Euler Method This is also called \u0026ldquo;Heun\u0026rsquo;s Method\u0026rdquo;\n\\begin{equation} x_{i+1} = x_{i} + \\frac{1}{2} h(f(t_{i}, x_{i}) + f(t_{i}+h, x_{i}+hf(t_{i}, x_{i}))) \\end{equation}\nerror \\begin{equation} |x_{N} - x(t_{n}) | \\leq Ch^{2} \\end{equation}\nmotivation we average the slopes of the current location and a full step in front, calculating their slopes, and average them\nRunge-Kutta Method a.k.a. instead of contending with the forward, backward, middle slope, or native slope from \\(f\\), we just ball and average all of them:\n\\begin{equation} \\begin{cases} m_1 = f(t_{i}, x_{i}) \\\\ m_2 = f\\qty(t_{i} + \\frac{h}{2}, x_{i}+\\frac{h}{2}m_{1}) \\\\ m_3 = f\\qty(t_{i}+\\frac{h}{2}, x_{i}+\\frac{h}{2}m_{2}) \\\\ m_4 = f\\qty(t_{i} + h, x_{i}+hm_{3}) \\end{cases} \\end{equation}\nand then:\n\\begin{equation} x_{i+1} = x_{i} + \\frac{1}{6}h m_{1} + \\frac{1}{3} h m_{2} + \\frac{1}{3} h m_{3} + \\frac{1}{6} h m_{4} \\end{equation}\nthe coefficients are that from pascal\u0026rsquo;s triangle.\nerror \\begin{equation} |x_{N} - x(t_{n}) | \\leq Ch^{4} \\end{equation}\nmotivation this is essentially like \u0026ldquo;fitting a parabola\u0026rdquo; against our curve\n","html":"\u003cp\u003eConsider a general non-linear \u003ca href=\"/posts/kbhfirst_order_odes/\"\u003eFirst Order ODEs\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo; = F(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSuppose we have some time interval, we have some solutions to the expression given. Is it possible for us to, given \\(x(t_0) = x_0\\), what \\(x(t_0+T)\\) would be? Can we approximate for explicit numbers?\u003c/p\u003e\n\u003cp\u003eThe solutions have to exist for all time: blow-up \u003cstrong\u003ecannot\u003c/strong\u003e be present during numerical estimations.\u003c/p\u003e\n\u003ch2 id=\"explicit-euler-method\"\u003eExplicit Euler Method\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nx(t+h) \\approx x_{t+1} = x_{t} + h f(x_t)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"motivation\"\u003emotivation\u003c/h3\u003e\n\u003cp\u003erecall that given \\(x(t_0) = x_0\\), we desire \\(x(t_0+T)\\).\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003edivide your solution interval into \\(N\\) small intervals; each interval would have length \\(h= \\frac{T}{N}\\)\u003c/li\u003e\n\u003cli\u003elet \\(t_{i} = t_0 + i \\frac{T}{N}\\), where \\(t_{N} = t_{0}+T\\)\u003c/li\u003e\n\u003cli\u003efor each segment \\(t_{i}\\), we attempt to compute a \\(x_{i}\\), and we\u0026rsquo;d like to approximate the error between \\(x_{i}\\) and \\(x(t_{i})\\).\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eIn the explicit Euler method, we make piecewise linear approximations. At each \\(x_0\\), we follow the slope estimated via the \u003ca href=\"/posts/kbhordinary_differential_equations/\"\u003eODE\u003c/a\u003e at that point. Specifically:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo;(t) = \\lim_{k \\to 0} \\frac{x(t+k)-x(t)}{k} \\approx \\frac{x(t+h)-x(t)}{h}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some small \\(h\\). Meaning, specifically, \\(x(t+h) \\approx x(t) + h x\u0026rsquo;(t)\\), where \\(h\\) is the step size we computed before.\u003c/p\u003e\n\u003cp\u003eConsider that we had an \u003ca href=\"/posts/kbhordinary_differential_equations/\"\u003eODE\u003c/a\u003e that is \\(x\u0026rsquo; = F(x)\\), whech gives us:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_1 = x_{0}+ h f(x_0) \\approx x(t_0 + h)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFollowing this scheme, we can calculate from \\(x_0\\) all the way stepwise to \\(x_{N}\\).\u003c/p\u003e\n\u003ch3 id=\"evaluation\"\u003eevaluation\u003c/h3\u003e\n\u003cp\u003eSituation: we have \\(X_{N}\\), we have \\(x(t_{N})\\), how close are they? In fact:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|x_{N} - x(t_{n}) | \\leq Ch\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have some constant \\(C(x_0, t_0, T, f)\\), which we can use to estimate \\(C\\) the bounds specific to the problem you are solving.\u003c/p\u003e\n\u003ch3 id=\"stiffness\"\u003estiffness\u003c/h3\u003e\n\u003cp\u003eCertain parts of a solution maybe decaying/oscillating very different from another part of the solution\u0026mdash;\u003c/p\u003e\n\u003ch4 id=\"example\"\u003eexample\u003c/h4\u003e\n\u003cp\u003eConsider a system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = \\mqty(-1 \u0026amp; 0 \\\\ 0 \u0026amp; -10)y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eour solutions look like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = \\mqty(c_1 e^{-t} \\\\ c_2 e^{-10t})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso the top expression gives \\(x_i = (1-h)^{i} x_0\\) and bottom \\(x_{i} = (1-10h)^{i}x_0\\), which means they will have different requirements for \\(h\\) to be able to converge\u003c/p\u003e\n\u003ch4 id=\"example-2\"\u003eexample 2\u003c/h4\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = -5 (y-\\cos x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewith \u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/#method-of-undetermined-coefficients\"\u003emethod of undetermined coefficients\u003c/a\u003e, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = \\frac{25}{26} \\cos t + \\frac{5}{26} \\sin t + Ce^{-5t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe first parts are fine and not stiff at all, the third part, we realize that we need \\((1-5h)^{i}x_0\\), meaning we need \\(h \u0026lt; \\frac{1}{5}\\).\u003c/p\u003e\n\u003ch4 id=\"motivation\"\u003emotivation\u003c/h4\u003e\n\u003cp\u003eLet\u0026rsquo;s consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo; = -\\lambda x\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe explicit Euler gives out:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{t+1} = (1-\\lambda h)x_{i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning, in general:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{i} = (1-\\lambda h)^{i} x_0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe know the function is bound to decay, yet the Explicit Euler will give us that this decays only when:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n-1 \u0026lt; 1-\\lambda h \u0026lt; 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#implicit-euler-method\"\u003eImplicit Euler Method\u003c/a\u003e doesn\u0026rsquo;t have this problem\u0026mdash;\u003c/p\u003e\n\u003cp\u003econsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{i+1} = x_{i} - \\lambda h x_{i+1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{i} = \\frac{1}{(1+\\lambda h)^{i}}x_0\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"implicit-euler-method\"\u003eImplicit Euler Method\u003c/h2\u003e\n\u003cp\u003eA small twist on the \u003ca href=\"#explicit-euler-method\"\u003eExplicit Euler Method\u003c/a\u003e. To be able to use this method, we can formulate this as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{i+1} - h f(x_{i+1}) = x_i\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere we use \u003ca href=\"/posts/kbhnewton_s_method/\"\u003eNewton\u0026rsquo;s Method\u003c/a\u003e to estimate some input \\(i+1\\) for which the above statement gets to \\(x_{i}\\).\u003c/p\u003e\n\u003ch3 id=\"evaluation\"\u003eevaluation\u003c/h3\u003e\n\u003cp\u003eWe actually didn\u0026rsquo;t do that much error; its is still bounded by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|x_{N} - x(t_{n}) | \\leq Ch\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"derivation\"\u003eDerivation\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{x((t+h)-h) - x(t+h)}{-h} \\approx x\u0026rsquo;(t+h)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is first-order Taylor Approximation \u003cstrong\u003ewritten backwards\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eThis also yields:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{x((t+h)-h) - x(t+h)}{-h} = \\frac{x(t+h)-x((t+h)-h)}{h}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, let \\(t = t_0\\), and therefore we have \\(t_1 = t +h\\), this gives us that:\u003c/p\u003e\n\u003cp\u003eNow, recall that, because \\(f\\) is the ODE:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo;(t_1) = f(x(t_1)) = x\u0026rsquo;(t+h) \\approx \\frac{x(t_1) - x(t_0)}{h}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMultiplying \\(h\\) to both sides gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nhf(x(t_1)) = x(t_1) - x(t_0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t_0) = x(t_1) - h f(x(t_1))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe will now attempt to estimate \\(x_1\\) by declaring \\(x_1 := x(t_{1})\\), which will give us:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_1 - h f(x_1) = x_0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet us call \\(G(x_{1}) = x_1 - h f(x_1) = x_0\\).\u003c/p\u003e\n\u003cp\u003eFinally, we run \u003ca href=\"/posts/kbhnewton_s_method/\"\u003eNewton\u0026rsquo;s Method\u003c/a\u003e to solve the \\(x_1\\) such that we can obtain \\(x_0\\) by trying to find the zeros of \\(G(x_1) - x_0\\). Because \\(h\\) is small, a good initial guess is actually \\(G(x_0)\\), and then we can optimize.\u003c/p\u003e\n\u003ch2 id=\"trapezoidal-method\"\u003eTrapezoidal Method\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nx_{t+1} = x_t + h \\frac{f(x_{t+1})+f(x_t)}{2}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"motivation\"\u003emotivation\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;averaging smoothed things out\u0026rdquo;:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{x(t+h) - x(t)}{h} \\approx \\frac{f(x(t+h)) + f(x(t))}{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{x_1-x_0}{h} = \\frac{f(x_1) + f(x_0)}{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich averages our derivatives out.\u003c/p\u003e\n\u003cp\u003eCross-multiplying, this gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_1 - \\frac{1}{2}h f(x_1) = x_0 + \\frac{1}{2} h f(x_0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich can also be written as, multiplying by some \\(h\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_1 = x_0 + h \\frac{f(x_1)+f(x_0)}{2}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"explicitly\"\u003eexplicitly\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nx_{i} = \\qty( \\frac{(1- \\frac{1}{2}\\lambda h)}{(1+ \\frac{1}{2}\\lambda h)})^{i} x_0\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"evaluation\"\u003eevaluation\u003c/h3\u003e\n\u003cp\u003eImportantly, this gives bounds\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|x_{N} - x(t_{n}) | \\leq Ch^{2}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"modified-euler-method\"\u003eModified Euler Method\u003c/h2\u003e\n\u003cp\u003eThis is also called \u0026ldquo;\u003ca href=\"#modified-euler-method\"\u003eMidpoint Method\u003c/a\u003e\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eThis is one of thee methods which doesn\u0026rsquo;t break during \u0026ldquo;stiff\u0026rdquo; \u003ca href=\"/posts/kbhordinary_differential_equations/\"\u003eODE\u003c/a\u003es, and converges \\(h^{N}\\) times quickly.\u003c/p\u003e\n\u003cp\u003eFor some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{x}{t} = f(t,x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{i+1} = x_{i} + h f\\qty(t_{i} + \\frac{1}{2}h, x_{i} + \\frac{1}{2}h f(t_{i}, x_{i}))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is motivated by the \u003ca href=\"#trapezoidal-method\"\u003eTrapezoidal Method\u003c/a\u003e, but\u003c/p\u003e\n\u003cblockquote\u003e\n\u003cp\u003e\u0026ldquo;A thorough introduction to these methods requires additional background in approximation theory and numerical analysis\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eThe Book\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/blockquote\u003e\n\u003ch3 id=\"error\"\u003eerror\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n|x_{N} - x(t_{n}) | \\leq Ch^{2}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"motivation\"\u003emotivation\u003c/h3\u003e\n\u003cp\u003ewe take a half step in front of our original point using its slope, and compute the slope there.\u003c/p\u003e\n\u003ch2 id=\"improved-euler-method\"\u003eImproved Euler Method\u003c/h2\u003e\n\u003cp\u003eThis is also called \u0026ldquo;\u003ca href=\"#improved-euler-method\"\u003eHeun\u0026rsquo;s Method\u003c/a\u003e\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{i+1} = x_{i} + \\frac{1}{2} h(f(t_{i}, x_{i}) + f(t_{i}+h, x_{i}+hf(t_{i}, x_{i})))\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"error\"\u003eerror\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n|x_{N} - x(t_{n}) | \\leq Ch^{2}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"motivation\"\u003emotivation\u003c/h3\u003e\n\u003cp\u003ewe average the slopes of the current location and a full step in front, calculating their slopes, and average them\u003c/p\u003e\n\u003ch2 id=\"runge-kutta-method\"\u003eRunge-Kutta Method\u003c/h2\u003e\n\u003cp\u003ea.k.a. instead of contending with the forward, backward, middle slope, or native slope from \\(f\\), we just ball and average all of them:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nm_1 = f(t_{i}, x_{i}) \\\\\nm_2 = f\\qty(t_{i} + \\frac{h}{2}, x_{i}+\\frac{h}{2}m_{1}) \\\\\nm_3 = f\\qty(t_{i}+\\frac{h}{2}, x_{i}+\\frac{h}{2}m_{2}) \\\\\nm_4 = f\\qty(t_{i} + h, x_{i}+hm_{3})\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{i+1} = x_{i} + \\frac{1}{6}h m_{1} + \\frac{1}{3} h m_{2} + \\frac{1}{3} h m_{3} + \\frac{1}{6} h m_{4}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe coefficients are that from pascal\u0026rsquo;s triangle.\u003c/p\u003e\n\u003ch3 id=\"error\"\u003eerror\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n|x_{N} - x(t_{n}) | \\leq Ch^{4}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"motivation\"\u003emotivation\u003c/h3\u003e\n\u003cp\u003ethis is essentially like \u0026ldquo;fitting a parabola\u0026rdquo; against our curve\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnumerical_approximation_schemes/","tags":null,"title":"Numerical Approximation Schemes"},{"categories":null,"contents":"Here\u0026rsquo;s the characteristic equation again:\n\\begin{equation} \\pdv[2] x \\qty(EI \\pdv[2]{w}{x}) = -\\mu \\pdv{w}{t}+q(x) \\end{equation}\nAfter Fourier decomposition, we have that:\n\\begin{equation} EI \\dv[4]{\\hat{w}}{x} - \\mu f^{2}\\hat{w} = 0 \\end{equation}\nLet\u0026rsquo;s solve this!\nE,I,u,f = var(\u0026#34;E I u f\u0026#34;) x, L = var(\u0026#34;x L\u0026#34;) w = function(\u0026#39;w\u0026#39;)(x) _c0, _c1, _c2, _c3 = var(\u0026#34;_C0 _C1 _C2 _C3\u0026#34;) fourier_cantileaver = (E*I*diff(w, x, 2) - u*f^2*w == 0) fourier_cantileaver -f^2*u*w(x) + E*I*diff(w(x), x, x) == 0 And now, we can go about solving this result.\nsolution = desolve(fourier_cantileaver, w, ivar=x, algorithm=\u0026#34;fricas\u0026#34;).expand() w = solution \\begin{equation} _{C_{1}} e^{\\left(\\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{0}} e^{\\left(i \\, \\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{2}} e^{\\left(-i \\, \\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{3}} e^{\\left(-\\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} \\end{equation}\nWe will simplify the repeated, constant top of this expression into a single variable \\(b\\):\nb = var(\u0026#34;b\u0026#34;) top = sqrt(f)*(u/(E*I))**(1/4) w = _c1*e^(b*x) + _c0*e^(i*b*x) + _c2*e^(-i*b*x) + _c3*e^(-b*x) w _C1*e^(b*x) + _C0*e^(I*b*x) + _C2*e^(-I*b*x) + _C3*e^(-b*x) \\begin{equation} _{C_{1}} e^{\\left(b x\\right)} + _{C_{0}} e^{\\left(i \\, b x\\right)} + _{C_{2}} e^{\\left(-i \\, b x\\right)} + _{C_{3}} e^{\\left(-b x\\right)} \\end{equation}\nWe have one equation, four unknowns. However, we are not yet done. We will make one more simplifying assumption\u0026mdash;try to get the \\(e^{x}\\) into sinusoidal form. We know this is supposed to oscillate, and it being in sinusoidal makes the process of solving for periodic solutions easier.\nRecall that:\n\\begin{equation} \\begin{cases} \\cosh x = \\frac{e^{x}+e^{-x}}{2} \\\\ \\cos x = \\frac{e^{ix}+e^{-ix}}{2}\\\\ \\sinh x = \\frac{e^{x}-e^{-x}}{2} \\\\ \\sin x = \\frac{e^{ix}-e^{-ix}}{2i}\\\\ \\end{cases} \\end{equation}\nWith a new set of scaling constants \\(d_0\\dots d_3\\), and some rearranging, we can rewrite the above expressions into just a linear combination of those elements. That is, the same expression for \\(w(x)\\) at a specific frequency can be written as:\n\\begin{equation} d_0\\cosh bx +d_1\\sinh bx +d_2\\cos bx +d_3\\sin bx = w \\end{equation}\nNo more imaginaries!!\nSo, let us redefine the expression:\nd0, d1, d2, d3 = var(\u0026#34;d0 d1 d2 d3\u0026#34;) w = d0*cosh(b*x)+d1*sinh(b*x)+d2*cos(b*x)+d3*sin(b*x) w d2*cos(b*x) + d0*cosh(b*x) + d3*sin(b*x) + d1*sinh(b*x) Now, we need to move onto solving when there will be valid solutions to this expression. However, we currently have four unknowns, and only one equation (at \\(x=0\\), \\(w=0\\), because the cantilever is fixed at base); so, to get a system in four elements, we will take some derivatives.\nThe way that we will go about this is by taking three derivatives and supplying the following initial conditions to get four equations:\nwp = diff(w,x,1) wpp = diff(w,x,2) wppp = diff(w,x,3) (wp, wpp, wppp) (b*d3*cos(b*x) + b*d1*cosh(b*x) - b*d2*sin(b*x) + b*d0*sinh(b*x), -b^2*d2*cos(b*x) + b^2*d0*cosh(b*x) - b^2*d3*sin(b*x) + b^2*d1*sinh(b*x), -b^3*d3*cos(b*x) + b^3*d1*cosh(b*x) + b^3*d2*sin(b*x) + b^3*d0*sinh(b*x)) And then, we have a system:\ncond_1 = w.subs(x=0) == 0 cond_2 = wp.subs(x=0) == 0 cond_3 = wpp.subs(x=L) == 0 cond_4 = wppp.subs(x=L) == 0 conds = (cond_1, cond_2, cond_3, cond_4) conds (d0 + d2 == 0, b*d1 + b*d3 == 0, -b^2*d2*cos(L*b) + b^2*d0*cosh(L*b) - b^2*d3*sin(L*b) + b^2*d1*sinh(L*b) == 0, -b^3*d3*cos(L*b) + b^3*d1*cosh(L*b) + b^3*d2*sin(L*b) + b^3*d0*sinh(L*b) == 0) solve(conds, d0, d1, d2, d3).full_simplify() Ok so, we notice that out of all of these boundary expressions the \\(b^{n}\\) term drop out. Therefore, we have the system:\n\\begin{equation} \\begin{cases} d_0 + d_2 = 0 \\\\ d_1 + d_3 = 0 \\\\ -d_2 \\cos Lb + d_0 \\cosh Lb - d_3 \\sin Lb + d_1 \\sinh Lb = 0 \\\\ -d_3 \\cos Lb + d_1 \\cosh Lb + d_2 \\sin Lb + d_0 \\sinh Lb = 0 \\\\ \\end{cases} \\end{equation}\nNow, taking the top expressions, we gather that:\n\\begin{equation} \\begin{cases} d_0 = -d_2 \\\\ d_1 = -d_3 \\end{cases} \\end{equation}\nPerforming these substitutions:\n\\begin{equation} \\begin{cases} d_0 (\\cos Lb + \\cosh Lb) + d_1 (\\sin Lb + \\sinh Lb) = 0 \\\\ d_1 (\\cos Lb + \\cosh Lb) + d_0 (\\sinh Lb- \\sin Lb ) = 0 \\\\ \\end{cases} \\end{equation}\nNow we are going to do some cursed algebra to get rid of all the rest of the \\(d\\). We want to do this because we don\u0026rsquo;t really care much what the constants are; instead, we care about when a solution exists (hopefully, then, telling us what the \\(f\\) baked inside \\(b\\) is). So:\n\\begin{align} \u0026amp;d_0 (\\cos Lb + \\cosh Lb) + d_1 (\\sin Lb + \\sinh Lb) = 0 \\\\ \\Rightarrow\\ \u0026amp; \\frac{-d_0}{d_1} = \\frac{(\\sin Lb + \\sinh Lb)}{(\\cos Lb + \\cosh Lb)} \\end{align}\nand\n\\begin{align} \u0026amp;d_1 (\\cos Lb + \\cosh Lb) + d_0 (\\sinh Lb- \\sin Lb ) = 0 \\\\ \\Rightarrow\\ \u0026amp; \\frac{-d_0}{d_1} = \\frac{(\\cos Lb + \\cosh Lb)}{(\\sinh Lb- \\sin Lb )} \\end{align}\ntherefore, we have:\n\\begin{equation} \\frac{(\\sin Lb + \\sinh Lb)}{(\\cos Lb + \\cosh Lb)} = \\frac{(\\cos Lb + \\cosh Lb)}{(\\sinh Lb- \\sin Lb )} \\end{equation}\nMultiplying each side by the other:\n\\begin{equation} (\\sin Lb + \\sinh Lb)(\\sinh Lb- \\sin Lb ) = (\\cos Lb + \\cosh Lb)^{2} \\end{equation}\nExpanding both sides now:\n\\begin{equation} (\\sinh^{2} Lb-\\sin^{2} Lb) = (\\cos^{2} Lb + 2\\cos Lb\\ \\cosh Lb + \\cosh ^{2}Lb) \\end{equation}\nMoving everything finally to one side:\n\\begin{equation} \\sinh^{2} Lb - \\cosh^{2} Lb -\\sin ^{2} Lb - \\cos ^{2}Lb - 2\\cos Lb \\cosh Lb = 0 \\end{equation}\nOk, this is where the satisfying \u0026ldquo;candy crush\u0026rdquo; begins when things cancel out. Recall pythagoras:\n\\begin{equation} \\begin{cases} \\cosh^{2}x - \\sinh^{2} x = 1 \\\\ \\sin^{2}x + \\cos^{2} x = 1 \\end{cases} \\end{equation}\nTo apply these effectively, multiply both sides by \\(1\\):\n\\begin{equation} -\\sinh^{2} Lb + \\cosh^{2} Lb +\\sin ^{2} Lb + \\cos ^{2}Lb + 2\\cos Lb \\cosh Lb = 0 \\end{equation}\nFinally, we substitute!\n\\begin{align} \u0026amp; -\\sinh^{2} Lb + \\cosh^{2} Lb +\\sin ^{2} Lb + \\cos ^{2}Lb + 2\\cos Lb \\cosh Lb = 0 \\\\ \\Rightarrow\\ \u0026amp; 1 + 1 + 2\\cos Lb \\cosh Lb = 0 \\\\ \\Rightarrow\\ \u0026amp; 2 + 2\\cos Lb \\cosh Lb = 0 \\\\ \\Rightarrow\\ \u0026amp; 1 + \\cos Lb \\cosh Lb = 0 \\end{align}\nOf course, there is oscillating results here. We will numerically locate them. Why did we subject ourselves to tall of this algebra? No idea. As soon as we got rid of all the \\(d\\) we could have just stopped simplifying and just went to the numerical root solving. But here we are.\nWe will try to locate a root for \\(Lb\\) for every \\(\\pi\\) for two rounds around the circle (until \\(4 \\pi\\))\u0026mdash;there is a solution for every \\(\\pi\\), if you don\u0026rsquo;t believe me, plot it or change the bottom to try to find it for every \\(\\frac{\\pi}{2}\\), sage will crash:\nintervals = [jj*pi for jj in range(0, 5)] intervals [0, pi, 2*pi, 3*pi, 4*pi] We will now declare \\(x=Lb\\), and create a nonlinear expression in it:\nx = var(\u0026#34;x\u0026#34;) characteristic_eqn = 1 + cos(x)*cosh(x) == 0 characteristic_eqn cos(x)*cosh(x) + 1 == 0 Root finding time!\ncharacteristic_solutions = [characteristic_eqn.find_root(i,j) for (i,j) in zip(intervals,intervals[1:])] characteristic_solutions [1.8751040687120917, 4.6940911329739246, 7.854757438237603, 10.995540734875457] These are possible \\(Lb\\) candidates.\nThe takeaway here is that:\n(4.6940911329739246/1.8751040687120917)^2 6.266893025769125 (see below\u0026rsquo;s derivation for why frequency changes by a square of this root)\nthe second overtone will be six and a quarter times (\u0026ldquo;much\u0026rdquo;) higher than the fundamental\u0026mdash;so it will be able to dissipate much quicker.\nRecall now that:\n\\begin{equation} b = \\sqrt{f} \\qty(\\frac{\\mu}{EI})^{\\frac{1}{4}} \\end{equation}\nSimplifying some:\n\\begin{align} b \u0026amp;= f^{\\frac{1}{2}} \\qty(\\frac{\\mu}{EI})^{\\frac{1}{4}} \\\\ \u0026amp;= \\qty(f^{2})^{\\frac{1}{4}}\\qty(\\frac{\\mu}{EI})^{\\frac{1}{4}} \\\\ \u0026amp;= \\qty(\\frac{\\mu f^{2}}{EI})^{\\frac{1}{4}} \\end{align}\nTo solve for \\(f\\), give all other expressions and set one of the above characteristic solutions to \\(Lb\\). Then, solve for \\(f\\).\nSolving for frequency to get things to be correct, substituting the fact that \\(bh \\rho = \\mu\\):\n\\begin{align} \u0026amp;Lb = s \\\\ \\Rightarrow\\ \u0026amp; L f^{\\frac{1}{2}} \\qty(\\frac{\\mu}{EI})^{\\frac{1}{4}} = s \\\\ \\Rightarrow\\ \u0026amp; f^{\\frac{1}{2}} = \\frac{s}{L} \\qty(\\frac{EI}{\\mu})^{\\frac{1}{4}} \\\\ \\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{EI}{\\mu})^{\\frac{1}{2}} \\\\ \\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{E(\\frac{bh^{3}}{12})}{\\mu})^{\\frac{1}{2}} \\\\ \\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{E(\\frac{bh^{3}}{12})}{\\rho bh})^{\\frac{1}{2}} \\\\ \\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{Eh^{2}}{12\\rho })^{\\frac{1}{2}} \\end{align}\n_E = 70000000000 # pascals _p = 2666 # kg/m^3 _h = 0.0064 # m # target LENGTH = 0.095 # mode to index nth_mode = 0 _s = characteristic_solutions[nth_mode] (_s^2/LENGTH^2)*((_E*(_h^2))/(12*_p))^(1/2) 3688.17772197722 Also, to get the constant for the elastic modulus from our force measurements, see calculating shear\u0026rsquo;s modulus.\nLet us create a code snippet to do that consistently:\n# constants https://www.mit.edu/~6.777/matprops/aluminum.htm _E = 44062894805 # modulus (pascals) _I = 0.0000000001365333333 # second moment (m^4) https://amesweb.info/section/second-moment-of-area-calculator.aspx _u = 3.521355063 # length mass density (kg/m) # target LENGTH = 0.09573 # length of tine (meters) # mode to index nth_mode = 0 # variable declaration # solution eqn solution_eqn = characteristic_solutions[nth_mode] == (LENGTH*(sqrt(f)*(_u/(_E*_I))^(1/4))) # as frequency is squared, we take the SECOND (the non-negative) result, and round it solve(solution_eqn, f)[0].rhs().n() 501.482272272831 ","html":"\u003cp\u003eHere\u0026rsquo;s the characteristic equation again:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2] x \\qty(EI \\pdv[2]{w}{x}) = -\\mu \\pdv{w}{t}+q(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAfter Fourier decomposition, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nEI \\dv[4]{\\hat{w}}{x} - \\mu f^{2}\\hat{w} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s solve this!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;E I u f\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;x L\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efunction\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;w\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_c0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;_C0 _C1 _C2 _C3\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efourier_cantileaver\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efourier_cantileaver\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-f^2*u*w(x) + E*I*diff(w(x), x, x) == 0\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd now, we can go about solving this result.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edesolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efourier_cantileaver\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eivar\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ealgorithm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;fricas\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexpand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\\begin{equation}\n_{C_{1}} e^{\\left(\\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{0}} e^{\\left(i \\, \\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{2}} e^{\\left(-i \\, \\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{3}} e^{\\left(-\\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will simplify the repeated, constant top of this expression into a single variable \\(b\\):\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;b\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etop\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esqrt\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c3\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e_C1*e^(b*x) + _C0*e^(I*b*x) + _C2*e^(-I*b*x) + _C3*e^(-b*x)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\\begin{equation}\n_{C_{1}} e^{\\left(b x\\right)} + _{C_{0}} e^{\\left(i \\, b x\\right)} + _{C_{2}} e^{\\left(-i \\, b x\\right)} + _{C_{3}} e^{\\left(-b x\\right)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have one equation, four unknowns. However, we are not yet done. We will make one more simplifying assumption\u0026mdash;try to get the \\(e^{x}\\) into sinusoidal form. We \u003cem\u003eknow\u003c/em\u003e this is supposed to oscillate, and it being in sinusoidal makes the process of solving for periodic solutions easier.\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\cosh x = \\frac{e^{x}+e^{-x}}{2} \\\\\n\\cos x = \\frac{e^{ix}+e^{-ix}}{2}\\\\\n\\sinh x = \\frac{e^{x}-e^{-x}}{2} \\\\\n\\sin x = \\frac{e^{ix}-e^{-ix}}{2i}\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWith a new set of scaling constants \\(d_0\\dots d_3\\), and some rearranging, we can rewrite the above expressions into just a linear combination of those elements. That is, the same expression for \\(w(x)\\) at a specific frequency can be written as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nd_0\\cosh bx +d_1\\sinh bx +d_2\\cos bx +d_3\\sin bx = w\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNo more imaginaries!!\u003c/p\u003e\n\u003cp\u003eSo, let us redefine the expression:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ed0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;d0 d1 d2 d3\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecosh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esinh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecos\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esin\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003ed2*cos(b*x) + d0*cosh(b*x) + d3*sin(b*x) + d1*sinh(b*x)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, we need to move onto solving when there will be valid solutions to this expression. However, we currently have four unknowns, and only one equation (at \\(x=0\\), \\(w=0\\), because the cantilever is fixed at base); so, to get a system in four elements, we will take some derivatives.\u003c/p\u003e\n\u003cp\u003eThe way that we will go about this is by taking three derivatives and supplying the following initial conditions to get four equations:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-11-10_13-38-40_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ewp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ewpp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ewppp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewpp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewppp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(b*d3*cos(b*x) + b*d1*cosh(b*x) - b*d2*sin(b*x) + b*d0*sinh(b*x),\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^2*d2*cos(b*x) + b^2*d0*cosh(b*x) - b^2*d3*sin(b*x) + b^2*d1*sinh(b*x),\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^3*d3*cos(b*x) + b^3*d1*cosh(b*x) + b^3*d2*sin(b*x) + b^3*d0*sinh(b*x))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd then, we have a system:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewpp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_4\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewppp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econds\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econd_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econd_2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econd_3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econd_4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econds\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(d0 + d2 == 0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e b*d1 + b*d3 == 0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^2*d2*cos(L*b) + b^2*d0*cosh(L*b) - b^2*d3*sin(L*b) + b^2*d1*sinh(L*b) == 0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^3*d3*cos(L*b) + b^3*d1*cosh(L*b) + b^3*d2*sin(L*b) + b^3*d0*sinh(L*b) == 0)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econds\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efull_simplify\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eOk so, we notice that out of all of these boundary expressions the \\(b^{n}\\) term drop out. Therefore, we have the system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nd_0 + d_2 = 0 \\\\\nd_1 + d_3 = 0 \\\\\n-d_2 \\cos Lb + d_0 \\cosh Lb - d_3 \\sin Lb + d_1 \\sinh Lb = 0 \\\\\n-d_3 \\cos Lb + d_1 \\cosh Lb + d_2 \\sin Lb + d_0 \\sinh Lb = 0 \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, taking the top expressions, we gather that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nd_0 = -d_2 \\\\\nd_1 = -d_3\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ePerforming these substitutions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nd_0 (\\cos Lb + \\cosh Lb) + d_1 (\\sin Lb + \\sinh Lb) = 0 \\\\\nd_1 (\\cos Lb + \\cosh Lb) + d_0 (\\sinh Lb- \\sin Lb ) = 0 \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow we are going to do some cursed algebra to get rid of all the rest of the \\(d\\). We want to do this because we don\u0026rsquo;t really care much \u003cem\u003ewhat\u003c/em\u003e the constants are; instead, we care about when a solution \u003cem\u003eexists\u003c/em\u003e (hopefully, then, telling us what the \\(f\\) baked inside \\(b\\) is). So:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;d_0 (\\cos Lb + \\cosh Lb) + d_1 (\\sin Lb + \\sinh Lb) = 0 \\\\\n\\Rightarrow\\ \u0026amp; \\frac{-d_0}{d_1} = \\frac{(\\sin Lb + \\sinh Lb)}{(\\cos Lb + \\cosh Lb)}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;d_1 (\\cos Lb + \\cosh Lb) + d_0 (\\sinh Lb- \\sin Lb ) = 0 \\\\\n\\Rightarrow\\ \u0026amp; \\frac{-d_0}{d_1} = \\frac{(\\cos Lb + \\cosh Lb)}{(\\sinh Lb- \\sin Lb )}\n\\end{align}\u003c/p\u003e\n\u003cp\u003etherefore, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{(\\sin Lb + \\sinh Lb)}{(\\cos Lb + \\cosh Lb)} = \\frac{(\\cos Lb + \\cosh Lb)}{(\\sinh Lb- \\sin Lb )}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMultiplying each side by the other:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(\\sin Lb + \\sinh Lb)(\\sinh Lb- \\sin Lb ) = (\\cos Lb + \\cosh Lb)^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eExpanding both sides now:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(\\sinh^{2} Lb-\\sin^{2} Lb) = (\\cos^{2} Lb + 2\\cos Lb\\ \\cosh Lb + \\cosh ^{2}Lb)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMoving everything finally to one side:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sinh^{2} Lb - \\cosh^{2} Lb -\\sin ^{2} Lb - \\cos ^{2}Lb - 2\\cos Lb \\cosh Lb = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOk, this is where the satisfying \u0026ldquo;candy crush\u0026rdquo; begins when things cancel out. Recall pythagoras:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\cosh^{2}x - \\sinh^{2} x = 1 \\\\\n\\sin^{2}x + \\cos^{2} x = 1\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo apply these effectively, multiply both sides by \\(1\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n-\\sinh^{2} Lb + \\cosh^{2} Lb +\\sin ^{2} Lb + \\cos ^{2}Lb + 2\\cos Lb \\cosh Lb = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, we substitute!\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; -\\sinh^{2} Lb + \\cosh^{2} Lb +\\sin ^{2} Lb + \\cos ^{2}Lb + 2\\cos Lb \\cosh Lb = 0 \\\\\n\\Rightarrow\\ \u0026amp; 1 + 1 + 2\\cos Lb \\cosh Lb = 0 \\\\\n\\Rightarrow\\ \u0026amp; 2 + 2\\cos Lb \\cosh Lb = 0 \\\\\n\\Rightarrow\\ \u0026amp; 1 + \\cos Lb \\cosh Lb = 0\n\\end{align}\u003c/p\u003e\n\u003cp\u003eOf course, there is oscillating results here. We will numerically locate them. Why did we subject ourselves to tall of this algebra? No idea. As soon as we got rid of all the \\(d\\) we could have just stopped simplifying and just went to the numerical root solving. But here we are.\u003c/p\u003e\n\u003cp\u003eWe will try to locate a root for \\(Lb\\) for every \\(\\pi\\) for two rounds around the circle (until \\(4 \\pi\\))\u0026mdash;there is a solution for every \\(\\pi\\), if you don\u0026rsquo;t believe me, plot it or change the bottom to try to find it for every \\(\\frac{\\pi}{2}\\), sage will crash:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ejj\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epi\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ejj\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[0, pi, 2*pi, 3*pi, 4*pi]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will now declare \\(x=Lb\\), and create a nonlinear expression in it:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;x\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_eqn\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecos\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecosh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_eqn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003ecos(x)*cosh(x) + 1 == 0\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRoot finding time!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_eqn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efind_root\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ezip\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:])]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1.8751040687120917, 4.6940911329739246, 7.854757438237603, 10.995540734875457]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThese are possible \\(Lb\\) candidates.\u003c/p\u003e\n\u003cp\u003eThe takeaway here is that:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4.6940911329739246\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1.8751040687120917\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e6.266893025769125\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e(see below\u0026rsquo;s derivation for why frequency changes by a \u003cem\u003esquare\u003c/em\u003e of this root)\u003c/p\u003e\n\u003cp\u003ethe second overtone will be six and a quarter times (\u0026ldquo;much\u0026rdquo;) higher than the fundamental\u0026mdash;so it will be able to dissipate much quicker.\u003c/p\u003e\n\u003cp\u003eRecall now that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb = \\sqrt{f} \\qty(\\frac{\\mu}{EI})^{\\frac{1}{4}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSimplifying some:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nb \u0026amp;= f^{\\frac{1}{2}} \\qty(\\frac{\\mu}{EI})^{\\frac{1}{4}} \\\\\n\u0026amp;= \\qty(f^{2})^{\\frac{1}{4}}\\qty(\\frac{\\mu}{EI})^{\\frac{1}{4}} \\\\\n\u0026amp;= \\qty(\\frac{\\mu f^{2}}{EI})^{\\frac{1}{4}}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eTo solve for \\(f\\), give all other expressions and set one of the above characteristic solutions to \\(Lb\\). Then, solve for \\(f\\).\u003c/p\u003e\n\u003cp\u003eSolving for frequency to get things to be correct, substituting the fact that \\(bh \\rho = \\mu\\):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;Lb = s \\\\\n\\Rightarrow\\ \u0026amp; L f^{\\frac{1}{2}} \\qty(\\frac{\\mu}{EI})^{\\frac{1}{4}} = s \\\\\n\\Rightarrow\\ \u0026amp; f^{\\frac{1}{2}} = \\frac{s}{L} \\qty(\\frac{EI}{\\mu})^{\\frac{1}{4}} \\\\\n\\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{EI}{\\mu})^{\\frac{1}{2}} \\\\\n\\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{E(\\frac{bh^{3}}{12})}{\\mu})^{\\frac{1}{2}} \\\\\n\\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{E(\\frac{bh^{3}}{12})}{\\rho bh})^{\\frac{1}{2}} \\\\\n\\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{Eh^{2}}{12\\rho })^{\\frac{1}{2}}\n\\end{align}\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_E\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e70000000000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# pascals\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_p\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2666\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# kg/m^3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_h\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.0064\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# m\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# target\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eLENGTH\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.095\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# mode to index\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_s\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_s\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLENGTH\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_E\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_h\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e12\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_p\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3688.17772197722\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAlso, to get the constant for the elastic modulus from our force measurements, see \u003ca href=\"/posts/kbhcalculating_shear_s_modulus/\"\u003ecalculating shear\u0026rsquo;s modulus.\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eLet us create a code snippet to do that consistently:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# constants https://www.mit.edu/~6.777/matprops/aluminum.htm\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_E\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e44062894805\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# modulus (pascals)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_I\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.0000000001365333333\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# second moment (m^4) https://amesweb.info/section/second-moment-of-area-calculator.aspx\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_u\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3.521355063\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# length mass density (kg/m)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# target\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eLENGTH\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.09573\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# length of tine (meters)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# mode to index\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# variable declaration\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# solution eqn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolution_eqn\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLENGTH\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esqrt\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_u\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_E\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_I\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# as frequency is squared, we take the SECOND (the non-negative) result, and round it\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esolution_eqn\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erhs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e501.482272272831\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhnumerical_cantileaver_simulations-1/","tags":null,"title":"Numerical Cantilever Simulations"},{"categories":null,"contents":"Here\u0026rsquo;s the characteristic equation again:\n\\begin{equation} \\pdv[2] x \\qty(EI \\pdv[2]{w}{x}) = -\\mu \\pdv{w}{t}+q(x) \\end{equation}\nAfter Fourier decomposition, we have that:\n\\begin{equation} EI \\dv[4]{\\hat{w}}{x} - \\mu f^{2}\\hat{w} = 0 \\end{equation}\nLet\u0026rsquo;s solve this!\nE,I,u,f = var(\u0026#34;E I u f\u0026#34;) x, L = var(\u0026#34;x L\u0026#34;) w = function(\u0026#39;w\u0026#39;)(x) _c0, _c1, _c2, _c3 = var(\u0026#34;_C0 _C1 _C2 _C3\u0026#34;) fourier_cantileaver = (E*I*diff(w, x, 2) - u*f^2*w == 0) fourier_cantileaver -f^2*u*w(x) + E*I*diff(w(x), x, x) == 0 And now, we can go about solving this result.\nsolution = desolve(fourier_cantileaver, w, ivar=x, algorithm=\u0026#34;fricas\u0026#34;).expand() w = solution \\begin{equation} _{C_{1}} e^{\\left(\\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{0}} e^{\\left(i \\, \\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{2}} e^{\\left(-i \\, \\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{3}} e^{\\left(-\\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} \\end{equation}\nWe will simplify the repeated, constant top of this expression into a single variable \\(b\\):\nb = var(\u0026#34;b\u0026#34;) top = sqrt(f)*(u/(E*I))**(1/4) w = _c1*e^(b*x) + _c0*e^(i*b*x) + _c2*e^(-i*b*x) + _c3*e^(-b*x) w _C1*e^(b*x) + _C0*e^(I*b*x) + _C2*e^(-I*b*x) + _C3*e^(-b*x) \\begin{equation} _{C_{1}} e^{\\left(b x\\right)} + _{C_{0}} e^{\\left(i \\, b x\\right)} + _{C_{2}} e^{\\left(-i \\, b x\\right)} + _{C_{3}} e^{\\left(-b x\\right)} \\end{equation}\nWe have one equation, four unknowns. However, we are not yet done. We will make one more simplifying assumption\u0026mdash;try to get the \\(e^{x}\\) into sinusoidal form. We know this is supposed to oscillate, and it being in sinusoidal makes the process of solving for periodic solutions easier.\nRecall that:\n\\begin{equation} \\begin{cases} \\cosh x = \\frac{e^{x}+e^{-x}}{2} \\\\ \\cos x = \\frac{e^{ix}+e^{-ix}}{2}\\\\ \\sinh x = \\frac{e^{x}-e^{-x}}{2} \\\\ \\sin x = \\frac{e^{ix}-e^{-ix}}{2i}\\\\ \\end{cases} \\end{equation}\nWith a new set of scaling constants \\(d_0\\dots d_3\\), and some rearranging, we can rewrite the above expressions into just a linear combination of those elements. That is, the same expression for \\(w(x)\\) at a specific frequency can be written as:\n\\begin{equation} d_0\\cosh bx +d_1\\sinh bx +d_2\\cos bx +d_3\\sin bx = w \\end{equation}\nNo more imaginaries!!\nSo, let us redefine the expression:\nd0, d1, d2, d3 = var(\u0026#34;d0 d1 d2 d3\u0026#34;) w = d0*cosh(b*x)+d1*sinh(b*x)+d2*cos(b*x)+d3*sin(b*x) w d2*cos(b*x) + d0*cosh(b*x) + d3*sin(b*x) + d1*sinh(b*x) Now, we need to move onto solving when there will be valid solutions to this expression. However, we currently have four unknowns, and only one equation (at \\(x=0\\), \\(w=0\\), because the cantilever is fixed at base); so, to get a system in four elements, we will take some derivatives.\nThe way that we will go about this is by taking three derivatives and supplying the following initial conditions to get four equations:\nwp = diff(w,x,1) wpp = diff(w,x,2) wppp = diff(w,x,3) (wp, wpp, wppp) (b*d3*cos(b*x) + b*d1*cosh(b*x) - b*d2*sin(b*x) + b*d0*sinh(b*x), -b^2*d2*cos(b*x) + b^2*d0*cosh(b*x) - b^2*d3*sin(b*x) + b^2*d1*sinh(b*x), -b^3*d3*cos(b*x) + b^3*d1*cosh(b*x) + b^3*d2*sin(b*x) + b^3*d0*sinh(b*x)) And then, we have a system:\ncond_1 = w.subs(x=0) == 0 cond_2 = wp.subs(x=0) == 0 cond_3 = wpp.subs(x=L) == 0 cond_4 = wppp.subs(x=L) == 0 conds = (cond_1, cond_2, cond_3, cond_4) conds (d0 + d2 == 0, b*d1 + b*d3 == 0, -b^2*d2*cos(L*b) + b^2*d0*cosh(L*b) - b^2*d3*sin(L*b) + b^2*d1*sinh(L*b) == 0, -b^3*d3*cos(L*b) + b^3*d1*cosh(L*b) + b^3*d2*sin(L*b) + b^3*d0*sinh(L*b) == 0) solve(conds, d0, d1, d2, d3).full_simplify() Ok so, we notice that out of all of these boundary expressions the \\(b^{n}\\) term drop out. Therefore, we have the system:\n\\begin{equation} \\begin{cases} d_0 + d_2 = 0 \\\\ d_1 + d_3 = 0 \\\\ -d_2 \\cos Lb + d_0 \\cosh Lb - d_3 \\sin Lb + d_1 \\sinh Lb = 0 \\\\ -d_3 \\cos Lb + d_1 \\cosh Lb + d_2 \\sin Lb + d_0 \\sinh Lb = 0 \\\\ \\end{cases} \\end{equation}\nNow, taking the top expressions, we gather that:\n\\begin{equation} \\begin{cases} d_0 = -d_2 \\\\ d_1 = -d_3 \\end{cases} \\end{equation}\nPerforming these substitutions:\n\\begin{equation} \\begin{cases} d_0 (\\cos Lb + \\cosh Lb) + d_1 (\\sin Lb + \\sinh Lb) = 0 \\\\ d_1 (\\cos Lb + \\cosh Lb) + d_0 (\\sinh Lb- \\sin Lb ) = 0 \\\\ \\end{cases} \\end{equation}\nNow we are going to do some cursed algebra to get rid of all the rest of the \\(d\\). We want to do this because we don\u0026rsquo;t really care much what the constants are; instead, we care about when a solution exists (hopefully, then, telling us what the \\(f\\) baked inside \\(b\\) is). So:\n\\begin{align} \u0026amp;d_0 (\\cos Lb + \\cosh Lb) + d_1 (\\sin Lb + \\sinh Lb) = 0 \\\\ \\Rightarrow\\ \u0026amp; \\frac{-d_0}{d_1} = \\frac{(\\sin Lb + \\sinh Lb)}{(\\cos Lb + \\cosh Lb)} \\end{align}\nand\n\\begin{align} \u0026amp;d_1 (\\cos Lb + \\cosh Lb) + d_0 (\\sinh Lb- \\sin Lb ) = 0 \\\\ \\Rightarrow\\ \u0026amp; \\frac{-d_0}{d_1} = \\frac{(\\cos Lb + \\cosh Lb)}{(\\sinh Lb- \\sin Lb )} \\end{align}\ntherefore, we have:\n\\begin{equation} \\frac{(\\sin Lb + \\sinh Lb)}{(\\cos Lb + \\cosh Lb)} = \\frac{(\\cos Lb + \\cosh Lb)}{(\\sinh Lb- \\sin Lb )} \\end{equation}\nMultiplying each side by the other:\n\\begin{equation} (\\sin Lb + \\sinh Lb)(\\sinh Lb- \\sin Lb ) = (\\cos Lb + \\cosh Lb)^{2} \\end{equation}\nExpanding both sides now:\n\\begin{equation} (\\sinh^{2} Lb-\\sin^{2} Lb) = (\\cos^{2} Lb + 2\\cos Lb\\ \\cosh Lb + \\cosh ^{2}Lb) \\end{equation}\nMoving everything finally to one side:\n\\begin{equation} \\sinh^{2} Lb - \\cosh^{2} Lb -\\sin ^{2} Lb - \\cos ^{2}Lb - 2\\cos Lb \\cosh Lb = 0 \\end{equation}\nOk, this is where the satisfying \u0026ldquo;candy crush\u0026rdquo; begins when things cancel out. Recall pythagoras:\n\\begin{equation} \\begin{cases} \\cosh^{2}x - \\sinh^{2} x = 1 \\\\ \\sin^{2}x + \\cos^{2} x = 1 \\end{cases} \\end{equation}\nTo apply these effectively, multiply both sides by \\(1\\):\n\\begin{equation} -\\sinh^{2} Lb + \\cosh^{2} Lb +\\sin ^{2} Lb + \\cos ^{2}Lb + 2\\cos Lb \\cosh Lb = 0 \\end{equation}\nFinally, we substitute!\n\\begin{align} \u0026amp; -\\sinh^{2} Lb + \\cosh^{2} Lb +\\sin ^{2} Lb + \\cos ^{2}Lb + 2\\cos Lb \\cosh Lb = 0 \\\\ \\Rightarrow\\ \u0026amp; 1 + 1 + 2\\cos Lb \\cosh Lb = 0 \\\\ \\Rightarrow\\ \u0026amp; 2 + 2\\cos Lb \\cosh Lb = 0 \\\\ \\Rightarrow\\ \u0026amp; 1 + \\cos Lb \\cosh Lb = 0 \\end{align}\nOf course, there is oscillating results here. We will numerically locate them. Why did we subject ourselves to tall of this algebra? No idea. As soon as we got rid of all the \\(d\\) we could have just stopped simplifying and just went to the numerical root solving. But here we are.\nWe will try to locate a root for \\(Lb\\) for every \\(\\pi\\) for two rounds around the circle (until \\(4 \\pi\\))\u0026mdash;there is a solution for every \\(\\pi\\), if you don\u0026rsquo;t believe me, plot it or change the bottom to try to find it for every \\(\\frac{\\pi}{2}\\), sage will crash:\nintervals = [jj*pi for jj in range(0, 5)] intervals [0, pi, 2*pi, 3*pi, 4*pi] We will now declare \\(x=Lb\\), and create a nonlinear expression in it:\nx = var(\u0026#34;x\u0026#34;) characteristic_eqn = 1 + cos(x)*cosh(x) == 0 characteristic_eqn cos(x)*cosh(x) + 1 == 0 Root finding time!\ncharacteristic_solutions = [characteristic_eqn.find_root(i,j) for (i,j) in zip(intervals,intervals[1:])] characteristic_solutions [1.8751040687120917, 4.6940911329739246, 7.854757438237603, 10.995540734875457] These are possible \\(Lb\\) candidates.\nThe takeaway here is that:\n(4.6940911329739246/1.8751040687120917)^2 6.266893025769125 (see below\u0026rsquo;s derivation for why frequency changes by a square of this root)\nthe second overtone will be six and a quarter times (\u0026ldquo;much\u0026rdquo;) higher than the fundamental\u0026mdash;so it will be able to dissipate much quicker.\n\\begin{equation} \\sqrt{f}\\qty(\\frac{u}{EI})^{\\frac{1}{4}} L = s \\end{equation}\n\\begin{equation} \\sqrt{f} = \\frac{s}{L} \\qty(\\frac{EI}{u})^{\\frac{1}{4}} \\end{equation}\n_E = 70000000000 # pascals _p = 2766 # kg/m^3 _h = 0.006 # m _I = 0.0000000001302083333 # m^4 _u = 0.10388 # kg/m, approximate # target LENGTH = 0.095 # mode to index nth_mode = 0 _s = characteristic_solutions[nth_mode] ((3.5160)/(LENGTH^2))*((_E*_I)/_u)^(1/2) 3649.25402142506 _E = 70000000000 # pascals _p = 2766 # kg/m^3 _h = 0.006 # m # target LENGTH = 0.095 # mode to index nth_mode = 0 _s = characteristic_solutions[nth_mode] (_s^2/LENGTH^2)*((_E*(_h^2))/(12*_p))^(1/2) 3394.58823149786 ","html":"\u003cp\u003eHere\u0026rsquo;s the characteristic equation again:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2] x \\qty(EI \\pdv[2]{w}{x}) = -\\mu \\pdv{w}{t}+q(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAfter Fourier decomposition, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nEI \\dv[4]{\\hat{w}}{x} - \\mu f^{2}\\hat{w} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s solve this!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;E I u f\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;x L\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efunction\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;w\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_c0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;_C0 _C1 _C2 _C3\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efourier_cantileaver\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efourier_cantileaver\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-f^2*u*w(x) + E*I*diff(w(x), x, x) == 0\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd now, we can go about solving this result.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edesolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efourier_cantileaver\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eivar\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ealgorithm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;fricas\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexpand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\\begin{equation}\n_{C_{1}} e^{\\left(\\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{0}} e^{\\left(i \\, \\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{2}} e^{\\left(-i \\, \\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{3}} e^{\\left(-\\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will simplify the repeated, constant top of this expression into a single variable \\(b\\):\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;b\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etop\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esqrt\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c3\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e_C1*e^(b*x) + _C0*e^(I*b*x) + _C2*e^(-I*b*x) + _C3*e^(-b*x)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\\begin{equation}\n_{C_{1}} e^{\\left(b x\\right)} + _{C_{0}} e^{\\left(i \\, b x\\right)} + _{C_{2}} e^{\\left(-i \\, b x\\right)} + _{C_{3}} e^{\\left(-b x\\right)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have one equation, four unknowns. However, we are not yet done. We will make one more simplifying assumption\u0026mdash;try to get the \\(e^{x}\\) into sinusoidal form. We \u003cem\u003eknow\u003c/em\u003e this is supposed to oscillate, and it being in sinusoidal makes the process of solving for periodic solutions easier.\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\cosh x = \\frac{e^{x}+e^{-x}}{2} \\\\\n\\cos x = \\frac{e^{ix}+e^{-ix}}{2}\\\\\n\\sinh x = \\frac{e^{x}-e^{-x}}{2} \\\\\n\\sin x = \\frac{e^{ix}-e^{-ix}}{2i}\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWith a new set of scaling constants \\(d_0\\dots d_3\\), and some rearranging, we can rewrite the above expressions into just a linear combination of those elements. That is, the same expression for \\(w(x)\\) at a specific frequency can be written as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nd_0\\cosh bx +d_1\\sinh bx +d_2\\cos bx +d_3\\sin bx = w\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNo more imaginaries!!\u003c/p\u003e\n\u003cp\u003eSo, let us redefine the expression:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ed0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;d0 d1 d2 d3\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecosh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esinh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecos\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esin\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003ed2*cos(b*x) + d0*cosh(b*x) + d3*sin(b*x) + d1*sinh(b*x)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, we need to move onto solving when there will be valid solutions to this expression. However, we currently have four unknowns, and only one equation (at \\(x=0\\), \\(w=0\\), because the cantilever is fixed at base); so, to get a system in four elements, we will take some derivatives.\u003c/p\u003e\n\u003cp\u003eThe way that we will go about this is by taking three derivatives and supplying the following initial conditions to get four equations:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-11-10_13-38-40_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ewp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ewpp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ewppp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewpp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewppp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(b*d3*cos(b*x) + b*d1*cosh(b*x) - b*d2*sin(b*x) + b*d0*sinh(b*x),\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^2*d2*cos(b*x) + b^2*d0*cosh(b*x) - b^2*d3*sin(b*x) + b^2*d1*sinh(b*x),\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^3*d3*cos(b*x) + b^3*d1*cosh(b*x) + b^3*d2*sin(b*x) + b^3*d0*sinh(b*x))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd then, we have a system:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewpp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_4\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewppp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econds\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econd_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econd_2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econd_3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econd_4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econds\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(d0 + d2 == 0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e b*d1 + b*d3 == 0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^2*d2*cos(L*b) + b^2*d0*cosh(L*b) - b^2*d3*sin(L*b) + b^2*d1*sinh(L*b) == 0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^3*d3*cos(L*b) + b^3*d1*cosh(L*b) + b^3*d2*sin(L*b) + b^3*d0*sinh(L*b) == 0)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econds\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efull_simplify\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eOk so, we notice that out of all of these boundary expressions the \\(b^{n}\\) term drop out. Therefore, we have the system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nd_0 + d_2 = 0 \\\\\nd_1 + d_3 = 0 \\\\\n-d_2 \\cos Lb + d_0 \\cosh Lb - d_3 \\sin Lb + d_1 \\sinh Lb = 0 \\\\\n-d_3 \\cos Lb + d_1 \\cosh Lb + d_2 \\sin Lb + d_0 \\sinh Lb = 0 \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, taking the top expressions, we gather that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nd_0 = -d_2 \\\\\nd_1 = -d_3\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ePerforming these substitutions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nd_0 (\\cos Lb + \\cosh Lb) + d_1 (\\sin Lb + \\sinh Lb) = 0 \\\\\nd_1 (\\cos Lb + \\cosh Lb) + d_0 (\\sinh Lb- \\sin Lb ) = 0 \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow we are going to do some cursed algebra to get rid of all the rest of the \\(d\\). We want to do this because we don\u0026rsquo;t really care much \u003cem\u003ewhat\u003c/em\u003e the constants are; instead, we care about when a solution \u003cem\u003eexists\u003c/em\u003e (hopefully, then, telling us what the \\(f\\) baked inside \\(b\\) is). So:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;d_0 (\\cos Lb + \\cosh Lb) + d_1 (\\sin Lb + \\sinh Lb) = 0 \\\\\n\\Rightarrow\\ \u0026amp; \\frac{-d_0}{d_1} = \\frac{(\\sin Lb + \\sinh Lb)}{(\\cos Lb + \\cosh Lb)}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;d_1 (\\cos Lb + \\cosh Lb) + d_0 (\\sinh Lb- \\sin Lb ) = 0 \\\\\n\\Rightarrow\\ \u0026amp; \\frac{-d_0}{d_1} = \\frac{(\\cos Lb + \\cosh Lb)}{(\\sinh Lb- \\sin Lb )}\n\\end{align}\u003c/p\u003e\n\u003cp\u003etherefore, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{(\\sin Lb + \\sinh Lb)}{(\\cos Lb + \\cosh Lb)} = \\frac{(\\cos Lb + \\cosh Lb)}{(\\sinh Lb- \\sin Lb )}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMultiplying each side by the other:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(\\sin Lb + \\sinh Lb)(\\sinh Lb- \\sin Lb ) = (\\cos Lb + \\cosh Lb)^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eExpanding both sides now:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(\\sinh^{2} Lb-\\sin^{2} Lb) = (\\cos^{2} Lb + 2\\cos Lb\\ \\cosh Lb + \\cosh ^{2}Lb)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMoving everything finally to one side:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sinh^{2} Lb - \\cosh^{2} Lb -\\sin ^{2} Lb - \\cos ^{2}Lb - 2\\cos Lb \\cosh Lb = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOk, this is where the satisfying \u0026ldquo;candy crush\u0026rdquo; begins when things cancel out. Recall pythagoras:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\cosh^{2}x - \\sinh^{2} x = 1 \\\\\n\\sin^{2}x + \\cos^{2} x = 1\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo apply these effectively, multiply both sides by \\(1\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n-\\sinh^{2} Lb + \\cosh^{2} Lb +\\sin ^{2} Lb + \\cos ^{2}Lb + 2\\cos Lb \\cosh Lb = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, we substitute!\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; -\\sinh^{2} Lb + \\cosh^{2} Lb +\\sin ^{2} Lb + \\cos ^{2}Lb + 2\\cos Lb \\cosh Lb = 0 \\\\\n\\Rightarrow\\ \u0026amp; 1 + 1 + 2\\cos Lb \\cosh Lb = 0 \\\\\n\\Rightarrow\\ \u0026amp; 2 + 2\\cos Lb \\cosh Lb = 0 \\\\\n\\Rightarrow\\ \u0026amp; 1 + \\cos Lb \\cosh Lb = 0\n\\end{align}\u003c/p\u003e\n\u003cp\u003eOf course, there is oscillating results here. We will numerically locate them. Why did we subject ourselves to tall of this algebra? No idea. As soon as we got rid of all the \\(d\\) we could have just stopped simplifying and just went to the numerical root solving. But here we are.\u003c/p\u003e\n\u003cp\u003eWe will try to locate a root for \\(Lb\\) for every \\(\\pi\\) for two rounds around the circle (until \\(4 \\pi\\))\u0026mdash;there is a solution for every \\(\\pi\\), if you don\u0026rsquo;t believe me, plot it or change the bottom to try to find it for every \\(\\frac{\\pi}{2}\\), sage will crash:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ejj\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epi\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ejj\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[0, pi, 2*pi, 3*pi, 4*pi]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will now declare \\(x=Lb\\), and create a nonlinear expression in it:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;x\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_eqn\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecos\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecosh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_eqn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003ecos(x)*cosh(x) + 1 == 0\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRoot finding time!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_eqn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efind_root\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ezip\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:])]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1.8751040687120917, 4.6940911329739246, 7.854757438237603, 10.995540734875457]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThese are possible \\(Lb\\) candidates.\u003c/p\u003e\n\u003cp\u003eThe takeaway here is that:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4.6940911329739246\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1.8751040687120917\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e6.266893025769125\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e(see below\u0026rsquo;s derivation for why frequency changes by a \u003cem\u003esquare\u003c/em\u003e of this root)\u003c/p\u003e\n\u003cp\u003ethe second overtone will be six and a quarter times (\u0026ldquo;much\u0026rdquo;) higher than the fundamental\u0026mdash;so it will be able to dissipate much quicker.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sqrt{f}\\qty(\\frac{u}{EI})^{\\frac{1}{4}} L = s\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sqrt{f} = \\frac{s}{L} \\qty(\\frac{EI}{u})^{\\frac{1}{4}}\n\\end{equation}\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_E\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e70000000000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# pascals\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_p\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2766\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# kg/m^3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_h\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.006\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# m\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_I\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.0000000001302083333\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# m^4\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_u\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.10388\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# kg/m, approximate\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# target\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eLENGTH\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.095\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# mode to index\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_s\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3.5160\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLENGTH\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_E\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_I\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_u\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3649.25402142506\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_E\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e70000000000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# pascals\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_p\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2766\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# kg/m^3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_h\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.006\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# m\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# target\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eLENGTH\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.095\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# mode to index\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_s\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_s\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLENGTH\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_E\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_h\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e12\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_p\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3394.58823149786\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhnumerical_cantileaver_simulations/","tags":null,"title":"Numerical Cantilever Simulations"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhnus_econ320_capm_problem_set/","tags":null,"title":"NUS-ECON320 CAPM Problem Set"},{"categories":null,"contents":"Let\u0026rsquo;s import some tools.\nimport pandas as pd from scipy.optimize import minimize import numpy as np from datetime import datetime from tqdm import tqdm import torch tqdm.pandas() And load our data:\ndf = pd.read_csv(\u0026#34;./currency_signal.csv\u0026#34;, index_col=0, header=None, parse_dates=[0]) df Let\u0026rsquo;s rename the headers\ndf.index.rename(\u0026#34;date\u0026#34;, True) df.columns = [\u0026#34;value\u0026#34;] Awesome. For the rest of the calculations, we will hide the 2020 data from the model:\ndata = df[df.index \u0026lt; datetime(2020, 1,1)] data value date 2006-03-01 0.000050 2006-03-02 0.001778 2006-03-03 0.000116 2006-03-06 -0.001038 2006-03-07 -0.001197 ... ... 2019-12-25 -0.010659 2019-12-26 -0.000869 2019-12-27 0.000075 2019-12-30 0.000033 2019-12-31 0.000944 [3610 rows x 1 columns] we will add a column of randomness to this, to serve as the seed of our epsilon:\ndata[\u0026#34;epsilon\u0026#34;] = np.random.normal(0,1, data.shape[0]) data value epsilon date 2006-03-01 0.000050 -0.255699 2006-03-02 0.001778 0.157341 2006-03-03 0.000116 0.574378 2006-03-06 -0.001038 -1.319365 2006-03-07 -0.001197 -0.717148 ... ... ... 2019-12-25 -0.010659 0.153559 2019-12-26 -0.000869 -1.066562 2019-12-27 0.000075 0.025730 2019-12-30 0.000033 0.760713 2019-12-31 0.000944 -0.427494 [3610 rows x 2 columns] Awesome, we will now seed three parameter variables. Recall that the GARCH model we are dealing with is:\n\\begin{equation} \\begin{cases} \\eta_t = \\sigma_{t}\\epsilon_{t} \\\\ {\\sigma_{t}}^{2} = \\alpha {\\eta_{t}}^{2} + \\beta {\\sigma_{t-1}}^{2} + \\gamma \\end{cases} \\end{equation}\nSolving for explicit solutions of \\(n_t\\) and \\(\\sigma_t\\), in terms of the others using computer algebra, we have:\n\\begin{equation} \\sigma_{t}^{2} = -\\frac{\\beta \\mathit{\\sigma_{t-1}}^{2} + y}{\\alpha \\epsilon^{2} - 1} \\end{equation}\nThe value of \\(\\eta_t\\) is naturally \\(\\sigma_t \\epsilon_t\\) (i.e. \\(\\eta^{2} = (\\sigma_{t})^{2}(\\epsilon_{t})^{2}\\)).\nSo, to make the squared results, we want to square both value and epsilon:\ndata[\u0026#34;value2\u0026#34;] = data.value**2 data[\u0026#34;epsilon2\u0026#34;] = data.epsilon**2 data value epsilon value2 epsilon2 date 2006-03-01 0.000050 -0.255699 2.450633e-09 0.065382 2006-03-02 0.001778 0.157341 3.162006e-06 0.024756 2006-03-03 0.000116 0.574378 1.334210e-08 0.329910 2006-03-06 -0.001038 -1.319365 1.076978e-06 1.740723 2006-03-07 -0.001197 -0.717148 1.432477e-06 0.514301 ... ... ... ... ... 2019-12-25 -0.010659 0.153559 1.136119e-04 0.023580 2019-12-26 -0.000869 -1.066562 7.549935e-07 1.137555 2019-12-27 0.000075 0.025730 5.670657e-09 0.000662 2019-12-30 0.000033 0.760713 1.083948e-09 0.578684 2019-12-31 0.000944 -0.427494 8.913486e-07 0.182751 [3610 rows x 4 columns] Now, we can now compute a column of these, based on the data we have. To be able to optimize this symbolically, we will leverage PyTorch.\nLet\u0026rsquo;s seed these constants all at \\(1\\), to be optimized later:\na = torch.tensor(1e-10, requires_grad=True) b = torch.tensor(1e-10, requires_grad=True) y = torch.tensor(1e-10, requires_grad=True) (a,b,y) (tensor(1.0000e-10, requires_grad=True), tensor(1.0000e-10, requires_grad=True), tensor(1.0000e-10, requires_grad=True)) We use the complex data type here to make the subtract operation work. We will eventually project it down to real space without much trouble.\nAwesome, let us compute this series of \\(\\sigma\\), and optimize for the loss.\nHere is a gradient descent optimizer:\n# we will use the gradient descent scheme optimizer = torch.optim.SGD([a,b,y], lr=3e-3) optimizer SGD ( Parameter Group 0 dampening: 0 differentiable: False foreach: None lr: 0.003 maximize: False momentum: 0 nesterov: False weight_decay: 0 ) And now, for 1000 steps, we will minimize the difference between the computed \\(n\\) and actual value against \\(\\alpha, \\beta, \\gamma\\). We will run the scheme for 50 steps.\nfor _ in tqdm(range(500)): prev_sigma_2 = 0 # # for each row for i in range(len(data)): # get previous value, or seed at 0 # if it doesn\u0026#39;t exist sigma_2 = (-(b*prev_sigma_2+y)/(a*data[\u0026#34;epsilon2\u0026#34;].iloc[i]-1)) n_2 = sigma_2*data[\u0026#34;epsilon2\u0026#34;].iloc[i] ((n_2-data[\u0026#34;value2\u0026#34;].iloc[i])**2).backward() prev_sigma_2 = sigma_2.detach() optimizer.step() optimizer.zero_grad() Awesome, now, let\u0026rsquo;s see the fitted results:\n(a,b,y) (tensor(611584.9375, requires_grad=True), tensor(37750.6133, requires_grad=True), tensor(-26.5902, requires_grad=True)) We will now work to validate these results in the entire dataset.\ndata_val = df.copy() data_val value date 2006-03-01 0.000050 2006-03-02 0.001778 2006-03-03 0.000116 2006-03-06 -0.001038 2006-03-07 -0.001197 ... ... 2020-05-18 0.000264 2020-05-19 0.001434 2020-05-20 0.000995 2020-05-21 0.000120 2020-05-22 0.000424 [3713 rows x 1 columns] Now, we will use these values to compute the variance and the predicted variance on the data.\nRecall that:\n\\begin{equation} \\sigma_{t}^{2} = -\\frac{\\beta \\mathit{\\sigma_{t-1}}^{2} + y}{\\alpha \\epsilon^{2} - 1} \\end{equation}\nSo:\ndata_val[\u0026#34;epsilon\u0026#34;] = np.random.normal(0,1, data_val.shape[0]) data_val value epsilon date 2006-03-01 0.000050 0.018859 2006-03-02 0.001778 1.943619 2006-03-03 0.000116 0.397312 2006-03-06 -0.001038 1.025379 2006-03-07 -0.001197 0.081920 ... ... ... 2020-05-18 0.000264 -0.976598 2020-05-19 0.001434 -0.357048 2020-05-20 0.000995 -1.230387 2020-05-21 0.000120 0.972614 2020-05-22 0.000424 -0.199802 [3713 rows x 2 columns] Now, we will generate a column of sigma squared\nfor i, date in enumerate(data_val.index): prev_sigma_2 = 0 sigma_2 = (-(b*prev_sigma_2+y)/(a*(data_val[\u0026#34;epsilon\u0026#34;]**2).iloc[i]-1)).detach().numpy() # get previous value, or seed at 0 # if it doesn\u0026#39;t exist data_val.loc[date, \u0026#34;sigma\u0026#34;] = sigma_2**0.5 prev_sigma_2 = sigma_2 data_val value epsilon sigma date 2006-03-01 0.000050 0.018859 0.350442 2006-03-02 0.001778 1.943619 0.003393 2006-03-03 0.000116 0.397312 0.016596 2006-03-06 -0.001038 1.025379 0.006431 2006-03-07 -0.001197 0.081920 0.080500 ... ... ... ... 2020-05-18 0.000264 -0.976598 0.006752 2020-05-19 0.001434 -0.357048 0.018468 2020-05-20 0.000995 -1.230387 0.005359 2020-05-21 0.000120 0.972614 0.006779 2020-05-22 0.000424 -0.199802 0.033002 [3713 rows x 3 columns] And finally, let us generate the eta column:\nRecall that \\(\\eta_t = \\sigma_{t}\\epsilon_{t}\\), so:\ndata_val[\u0026#34;eta\u0026#34;] = data_val.sigma * data_val.epsilon data_val value epsilon sigma eta date 2006-03-01 0.000050 0.018859 0.350442 0.006609 2006-03-02 0.001778 1.943619 0.003393 0.006594 2006-03-03 0.000116 0.397312 0.016596 0.006594 2006-03-06 -0.001038 1.025379 0.006431 0.006594 2006-03-07 -0.001197 0.081920 0.080500 0.006595 ... ... ... ... ... 2020-05-18 0.000264 -0.976598 0.006752 -0.006594 2020-05-19 0.001434 -0.357048 0.018468 -0.006594 2020-05-20 0.000995 -1.230387 0.005359 -0.006594 2020-05-21 0.000120 0.972614 0.006779 0.006594 2020-05-22 0.000424 -0.199802 0.033002 -0.006594 [3713 rows x 4 columns] And finally, let us compute the log loss:\ndata_val[\u0026#34;loss\u0026#34;] = (data_val.eta-data_val.value).abs() data_val value epsilon sigma eta loss date 2006-03-01 0.000050 0.018859 0.350442 0.006609 0.006559 2006-03-02 0.001778 1.943619 0.003393 0.006594 0.004816 2006-03-03 0.000116 0.397312 0.016596 0.006594 0.006478 2006-03-06 -0.001038 1.025379 0.006431 0.006594 0.007632 2006-03-07 -0.001197 0.081920 0.080500 0.006595 0.007791 ... ... ... ... ... ... 2020-05-18 0.000264 -0.976598 0.006752 -0.006594 0.006857 2020-05-19 0.001434 -0.357048 0.018468 -0.006594 0.008028 2020-05-20 0.000995 -1.230387 0.005359 -0.006594 0.007589 2020-05-21 0.000120 0.972614 0.006779 0.006594 0.006474 2020-05-22 0.000424 -0.199802 0.033002 -0.006594 0.007018 [3713 rows x 5 columns] Saving the data:\ndata_val.to_csv(\u0026#34;currency_arbitrage.csv\u0026#34;) ","html":"\u003cp\u003eLet\u0026rsquo;s import some tools.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epandas\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003escipy.optimize\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eminimize\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enumpy\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatetime\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatetime\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etqdm\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etqdm\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etqdm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epandas\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd load our data:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_csv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;./currency_signal.csv\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eindex_col\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eheader\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eNone\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eparse_dates\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s rename the headers\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eindex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erename\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;date\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;value\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAwesome. For the rest of the calculations, we will hide the 2020 data from the model:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eindex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatetime\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2020\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e value\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edate\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-01 0.000050\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-02 0.001778\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-03 0.000116\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-06 -0.001038\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-07 -0.001197\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-25 -0.010659\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-26 -0.000869\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-27 0.000075\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-30 0.000033\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-31 0.000944\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[3610 rows x 1 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ewe will add a column of randomness to this, to serve as the seed of our epsilon:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;epsilon\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erandom\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enormal\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshape\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e value epsilon\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edate\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-01 0.000050 -0.255699\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-02 0.001778 0.157341\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-03 0.000116 0.574378\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-06 -0.001038 -1.319365\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-07 -0.001197 -0.717148\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-25 -0.010659 0.153559\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-26 -0.000869 -1.066562\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-27 0.000075 0.025730\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-30 0.000033 0.760713\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-31 0.000944 -0.427494\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[3610 rows x 2 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAwesome, we will now seed three parameter variables. Recall that the GARCH model we are dealing with is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\eta_t = \\sigma_{t}\\epsilon_{t} \\\\\n{\\sigma_{t}}^{2} = \\alpha {\\eta_{t}}^{2} + \\beta {\\sigma_{t-1}}^{2} + \\gamma\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSolving for explicit solutions of \\(n_t\\) and \\(\\sigma_t\\), in terms of the others using computer algebra, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sigma_{t}^{2} = -\\frac{\\beta \\mathit{\\sigma_{t-1}}^{2} + y}{\\alpha \\epsilon^{2} - 1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe value of \\(\\eta_t\\) is naturally \\(\\sigma_t \\epsilon_t\\) (i.e. \\(\\eta^{2} = (\\sigma_{t})^{2}(\\epsilon_{t})^{2}\\)).\u003c/p\u003e\n\u003cp\u003eSo, to make the squared results, we want to square both value and epsilon:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;value2\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evalue\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;epsilon2\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eepsilon\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e value epsilon value2 epsilon2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edate\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-01 0.000050 -0.255699 2.450633e-09 0.065382\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-02 0.001778 0.157341 3.162006e-06 0.024756\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-03 0.000116 0.574378 1.334210e-08 0.329910\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-06 -0.001038 -1.319365 1.076978e-06 1.740723\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-07 -0.001197 -0.717148 1.432477e-06 0.514301\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-25 -0.010659 0.153559 1.136119e-04 0.023580\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-26 -0.000869 -1.066562 7.549935e-07 1.137555\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-27 0.000075 0.025730 5.670657e-09 0.000662\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-30 0.000033 0.760713 1.083948e-09 0.578684\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-31 0.000944 -0.427494 8.913486e-07 0.182751\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[3610 rows x 4 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, we can now compute a column of these, based on the data we have. To be able to optimize this symbolically, we will leverage PyTorch.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s seed these constants all at \\(1\\), to be optimized later:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1e-10\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erequires_grad\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1e-10\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erequires_grad\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1e-10\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erequires_grad\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(tensor(1.0000e-10, requires_grad=True), tensor(1.0000e-10, requires_grad=True), tensor(1.0000e-10, requires_grad=True))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe use the complex data type here to make the subtract operation work. We will eventually project it down to real space without much trouble.\u003c/p\u003e\n\u003cp\u003eAwesome, let us compute this series of \\(\\sigma\\), and optimize for the loss.\u003c/p\u003e\n\u003cp\u003eHere is a gradient descent optimizer:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# we will use the gradient descent scheme\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptimizer\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSGD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3e-3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptimizer\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSGD (\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eParameter Group 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e dampening: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e differentiable: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e foreach: None\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e lr: 0.003\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e maximize: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e momentum: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nesterov: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e weight_decay: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd now, for 1000 steps, we will minimize the difference between the computed \\(n\\) and actual value against \\(\\alpha, \\beta, \\gamma\\). We will run the scheme for 50 steps.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etqdm\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e500\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eprev_sigma_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# # for each row\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# get previous value, or seed at 0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# if it doesn\u0026#39;t exist\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esigma_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprev_sigma_2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;epsilon2\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003en_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esigma_2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;epsilon2\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en_2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;value2\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eprev_sigma_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esigma_2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edetach\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eoptimizer\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eoptimizer\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAwesome, now, let\u0026rsquo;s see the fitted results:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(tensor(611584.9375, requires_grad=True), tensor(37750.6133, requires_grad=True), tensor(-26.5902, requires_grad=True))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will now work to validate these results in the entire dataset.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecopy\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e value\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edate\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-01 0.000050\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-02 0.001778\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-03 0.000116\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-06 -0.001038\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-07 -0.001197\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-18 0.000264\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-19 0.001434\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-20 0.000995\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-21 0.000120\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-22 0.000424\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[3713 rows x 1 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, we will use these values to compute the variance and the predicted variance on the data.\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sigma_{t}^{2} = -\\frac{\\beta \\mathit{\\sigma_{t-1}}^{2} + y}{\\alpha \\epsilon^{2} - 1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;epsilon\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erandom\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enormal\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshape\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e value epsilon\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edate\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-01 0.000050 0.018859\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-02 0.001778 1.943619\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-03 0.000116 0.397312\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-06 -0.001038 1.025379\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-07 -0.001197 0.081920\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-18 0.000264 -0.976598\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-19 0.001434 -0.357048\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-20 0.000995 -1.230387\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-21 0.000120 0.972614\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-22 0.000424 -0.199802\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[3713 rows x 2 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, we will generate a column of sigma squared\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edate\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eenumerate\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eindex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eprev_sigma_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esigma_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprev_sigma_2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;epsilon\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edetach\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enumpy\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# get previous value, or seed at 0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# if it doesn\u0026#39;t exist\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edate\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;sigma\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esigma_2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.5\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eprev_sigma_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esigma_2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e value epsilon sigma\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edate\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-01 0.000050 0.018859 0.350442\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-02 0.001778 1.943619 0.003393\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-03 0.000116 0.397312 0.016596\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-06 -0.001038 1.025379 0.006431\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-07 -0.001197 0.081920 0.080500\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-18 0.000264 -0.976598 0.006752\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-19 0.001434 -0.357048 0.018468\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-20 0.000995 -1.230387 0.005359\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-21 0.000120 0.972614 0.006779\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-22 0.000424 -0.199802 0.033002\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[3713 rows x 3 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd finally, let us generate the eta column:\u003c/p\u003e\n\u003cp\u003eRecall that \\(\\eta_t = \\sigma_{t}\\epsilon_{t}\\), so:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;eta\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esigma\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eepsilon\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e value epsilon sigma eta\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edate\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-01 0.000050 0.018859 0.350442 0.006609\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-02 0.001778 1.943619 0.003393 0.006594\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-03 0.000116 0.397312 0.016596 0.006594\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-06 -0.001038 1.025379 0.006431 0.006594\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-07 -0.001197 0.081920 0.080500 0.006595\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-18 0.000264 -0.976598 0.006752 -0.006594\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-19 0.001434 -0.357048 0.018468 -0.006594\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-20 0.000995 -1.230387 0.005359 -0.006594\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-21 0.000120 0.972614 0.006779 0.006594\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-22 0.000424 -0.199802 0.033002 -0.006594\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[3713 rows x 4 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd finally, let us compute the log loss:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;loss\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eeta\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evalue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eabs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e value epsilon sigma eta loss\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edate\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-01 0.000050 0.018859 0.350442 0.006609 0.006559\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-02 0.001778 1.943619 0.003393 0.006594 0.004816\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-03 0.000116 0.397312 0.016596 0.006594 0.006478\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-06 -0.001038 1.025379 0.006431 0.006594 0.007632\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-07 -0.001197 0.081920 0.080500 0.006595 0.007791\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-18 0.000264 -0.976598 0.006752 -0.006594 0.006857\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-19 0.001434 -0.357048 0.018468 -0.006594 0.008028\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-20 0.000995 -1.230387 0.005359 -0.006594 0.007589\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-21 0.000120 0.972614 0.006779 0.006594 0.006474\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-22 0.000424 -0.199802 0.033002 -0.006594 0.007018\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[3713 rows x 5 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSaving the data:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eto_csv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;currency_arbitrage.csv\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhnus_econ320_currency_arbitrage/","tags":null,"title":"NUS-ECON320 Currency Arbitrage"},{"categories":null,"contents":"We want to construct a combined agent\n\\begin{equation} (k_1+k_2)x^{*}(k_1+k_2, \\gamma^{*}) = x^{*}(k_1,\\gamma_{1})k_1+x^{*}(k_2, \\gamma_{2})k_2 \\end{equation}\nwhich combines the relative risk of \\(\\gamma_{1}, \\gamma_{2}\\) into some new \\(\\gamma^{*}\\), which produces the same combined consumption of both agents \\(k_1+k_2\\).\nLet us create some CAS tools to solve the inter-temporal choice problem generically for 10 steps in the past.\nWe do this by solving backwards. We will create a variable \\(k\\) to measure asset, and \\(k_{t}\\) the remaining asset at time \\(t\\).\nLet us first declare the function for power utility. \\(k\\) is our asset holding, \\(\\gamma\\) our relative margin of risk, and \\(U\\) the power utility.\n# risk aversion y = var(\u0026#34;y\u0026#34;, latex_name=\u0026#34;\\gamma\u0026#34;, domain=\u0026#39;real\u0026#39;) # discount factor d = var(\u0026#34;d\u0026#34;, latex_name=\u0026#34;\\delta\u0026#34;, domain=\u0026#39;real\u0026#39;) # final value at time t=f k_f = var(\u0026#34;k_f\u0026#34;, latex_name=\u0026#34;k_f\u0026#34;, domain=\u0026#39;real\u0026#39;) # the instrument\u0026#39;s percentage return over a period: i.e. (1+mu)*I_t = k_{t+1} m = var(\u0026#34;m\u0026#34;, latex_name=\u0026#34;\\mu\u0026#34;, domain=\u0026#39;real\u0026#39;) # boundary conditions assume(y\u0026gt;0) assume(y\u0026lt;1) assume(d\u0026gt;0) assume(d\u0026lt;1) # power utility u(c) = ((c^(1-y)-1)/(1-y)) u c |--\u0026gt; -(c^(-y + 1) - 1)/(y - 1) At the final time stamp, we desire to consume all of our assets. Therefore, we will seed our investment amount at \\(I=0\\). We will optimize for eventual global utility, therefore, we will talley our utility; starting this talley at \\(0\\).\n# at the final time, leave nothing for investment I=0; u_total = 0 From every step from here, we will discount this utility by \\(d\\), then solve for the previous step\u0026rsquo;s target consumption that would maximize utility. That is, at every step, we desire:\n\\begin{equation} k_{t-1} = I_{t} + c_{t} \\implies c_{t} = k_{t-1}-I_{t} \\end{equation}\n\u0026ldquo;we want to consume all we have that needed\u0026rsquo;t to be invested\u0026rdquo;\nand\n\\(\\max u(c_{t})\\)\nRecall also that \\((1+\\mu)I_{t} = k_{t+1}\\) (as \\(\\mu\\) is the mean log-return, 1+that times \\(I\\), how much was invested at time \\(t\\), is the expected capital one time period from then.) Therefore, to make sure that \\(k_{f}\\) gets back in the final period, we solve for our seed value for \\(I\\), how much to invest would be:\n\\begin{equation} I_{t-1} = \\frac{k_t}{(1+m)} \\end{equation}\nEnough talk, let\u0026rsquo;s get to it:\n# create an dictionary to keep track of all the capital variables k = {} # we will iterate time stamps 1-10 T = 10 # a variable for captial at that time for i in range(T): k_t = var(f\u0026#34;k_{T-i}\u0026#34;, latex_name=f\u0026#34;k_{T-i}\u0026#34;) # t-i becasue we are solving backwards; i0 = T10 # what can be consumed at every time stamp # is the k of the previous timestamp, minus # what needs to be left over # we multiply here by d because we want to # discount future utility u_total = d*u_total + u(k_t-I) # add the current variable to dictionary k[T-i] = k_t # recall again i0=T10 because backwards # solve for the next investment amount I = k_t/(1+m) u_total -(((((((d*(d*(k_10^(-y + 1) - 1)/(y - 1) + ((k_9 - k_10/(m + 1))^(-y + 1) - 1)/(y - 1)) + ((k_8 - k_9/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_7 - k_8/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_6 - k_7/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_5 - k_6/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_4 - k_5/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_3 - k_4/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_2 - k_3/(m + 1))^(-y + 1) - 1)/(y - 1))*d - ((k_1 - k_2/(m + 1))^(-y + 1) - 1)/(y - 1) We can now use the scipy numerical optimizer to minimize this target. Recall that we can recover the actual value of consumption at each step as \\(c=k-\\frac{k}{m+1}\\).\nWe will set some initial conditions:\n_m = 0.01 # 1% period-to-period increase _k = 1000 # $1000 capital _y = 0.8 # generally risk averse _d = 0.9 # the future matters slightly less Recall that the scipy optimizer MINIMIZES, so we will make the loss the negative of utility. Before we finally start, we need to make the actual, numerical loss function that performs the substitution:\n# we reverse the k_* variables because it is stored in the dictionary # in reverse, because we knew the reverse condition first optim_variables = list(k.values()) optim_variables.reverse() # this function is also the callback, so it returning # True terminates execution def u_total_loss(x): # the optimizer\u0026#39;s current step # we want to take [1:], because we need to keep k1 the same at _k the # initial value substitution_dict = {key: val for key, val in zip(optim_variables[1:], x)} # initial conditions substitution_dict[m] = _m substitution_dict[y] = _y substitution_dict[d] = _d substitution_dict[d] = _d # we want to keep the initial value k1 the same substitution_dict[k[1]] = _k try: # get value content = (-1*u_total).subs(substitution_dict) # recall we multiply by -1 because we are MINIMIZING, so the loss is # the inverse of the maximization utility target return float(content.n()), False except: return 0, True Finally, we are ready to start. We will now create the other initial conditions k1\u0026hellip;k10 and : we will set the initial value to all be 1000 (i.e. do nothing) and have the optimizer work it out from there:\nfrom scipy.optimize import minimize target = minimize(lambda x:u_total_loss(x)[0], [_k for _ in range(T-1)], callback=lambda x:u_total_loss(x)[1]) target fun: -50.71592850322347 hess_inv: array([[ 9518.97596212, 7617.14636381, 5964.42171873, 4433.87331935, 4253.91810669, 3528.72923763, 2329.61846616, 1769.85078017, 1126.51562458], [ 7617.14636381, 14333.33933517, 11251.71278723, 8073.31207641, 7444.53071922, 6481.03236385, 4347.35353474, 2644.39855553, 1359.86586059], [ 5964.42171873, 11251.71278723, 15011.27497355, 10093.46973099, 9229.06386286, 8371.07459024, 5510.14654004, 3480.74298654, 1639.19265606], [ 4433.87331935, 8073.31207641, 10093.46973099, 12434.28059884, 11689.33288295, 10711.57399875, 7440.7461982 , 4810.57094062, 2255.16306648], [ 4253.91810669, 7444.53071922, 9229.06386286, 11689.33288295, 14840.59602968, 12519.06872583, 8708.9160148 , 5688.83339388, 2598.27394651], [ 3528.72923763, 6481.03236385, 8371.07459024, 10711.57399875, 12519.06872583, 14999.44881857, 10630.30739223, 6512.62254338, 2293.45506703], [ 2329.61846616, 4347.35353474, 5510.14654004, 7440.7461982 , 8708.9160148 , 10630.30739223, 12147.11811342, 7149.37937935, 2657.8129831 ], [ 1769.85078017, 2644.39855553, 3480.74298654, 4810.57094062, 5688.83339388, 6512.62254338, 7149.37937935, 7260.90962516, 2422.66762041], [ 1126.51562458, 1359.86586059, 1639.19265606, 2255.16306648, 2598.27394651, 2293.45506703, 2657.8129831 , 2422.66762041, 2911.30717272]]) jac: array([ 0.00000000e+00, -3.81469727e-06, 2.38418579e-06, 0.00000000e+00, 8.58306885e-06, -5.24520874e-06, 2.86102295e-06, -1.43051147e-06, -5.24520874e-06]) message: \u0026#39;Optimization terminated successfully.\u0026#39; nfev: 1360 nit: 130 njev: 136 status: 0 success: True x: array([841.22097906, 699.82556541, 573.89912346, 461.63474591, 361.51493714, 272.10309839, 192.29084196, 120.94057011, 57.12129925]) Awesome! We now can recover \\(c\\) at each point by a nice helpful function:\nc(k0, k1) = k0 - k1/(_m+1) \u0026ldquo;Consumption is how much we have, minus how much we would be investing\u0026rdquo;\nSo, let us translate our list to the actual values consumed:\ncapital_over_time = [_k]+target.x.tolist() # we need to add the initial condition _k back to the # inventory list consumption_over_time = [c(i,j) for i,j in zip(capital_over_time, capital_over_time[1:])] consumption_over_time [167.107941529699, 148.324379643989, 131.608611479784, 116.835018601197, 103.699164584812, 92.1059288329209, 81.7161261514775, 72.5477032414004, 64.3848282779261] Examples of Output _m = 0.01 # 1% period-to-period increase _k = 1000 # $1000 capital _y = 0.8 # generally risk averse _d = 0.9 # the future matters slightly less [167.107941529699, 148.324379643989, 131.608611479784, 116.835018601197, 103.699164584812, 92.1059288329209, 81.7161261514775, 72.5477032414004, 64.3848282779261] _m = 0.1 # 1% period-to-period increase _k = 1000 # $1000 capital _y = 0.8 # generally risk averse _d = 0.9 # the future matters slightly less [154.860597149863, 152.989432556196, 151.010433069881, 149.201249715528, 147.329750167852, 145.539019666462, 143.739371599600, 141.984228587213, 140.243839963791] _m = 0.01 # 1% period-to-period increase _k = 1000 # $1000 capital _y = 0.2 # generally risky _d = 0.9 # the future matters slightly less [388.525041338376, 241.124420093987, 149.632568775223, 92.8644259086613, 57.6330459746870, 35.7667230511026, 22.1970017374152, 13.7754327365677, 8.54930907023498] _m = -0.01 # this is a loosing stock _k = 1000 # $1000 capital _y = 0.9 # very safe _d = 0.9 # the future matters fun: 0 hess_inv: array([[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]]) jac: array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) message: \u0026#39;Optimization terminated successfully.\u0026#39; nfev: 10 nit: 0 njev: 1 status: 0 success: True x: array([1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000.]) Evidently: do nothing if we have a loosing cause.\n_m = 1.00 # this is SUPER winning stock _k = 1000 # $1000 capital _y = 0.9 # very safe _d = 0.9 # the future matters [125.667556437602, 241.474827418105, 460.068836905327, 868.972817783791, 4540.45893314523, 4219.93058738029, 3988.05775624984, 3996.89431939885, 3615.74982832315] We made so much money that we are spending a lot of it and still spending it.\n","html":"\u003cp\u003eWe want to construct a combined agent\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(k_1+k_2)x^{*}(k_1+k_2, \\gamma^{*}) = x^{*}(k_1,\\gamma_{1})k_1+x^{*}(k_2, \\gamma_{2})k_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich combines the relative risk of \\(\\gamma_{1}, \\gamma_{2}\\) into some new \\(\\gamma^{*}\\), which produces the same combined consumption of both agents \\(k_1+k_2\\).\u003c/p\u003e\n\u003cp\u003eLet us create some CAS tools to solve the inter-temporal choice problem generically for 10 steps in the past.\u003c/p\u003e\n\u003cp\u003eWe do this by solving backwards. We will create a variable \\(k\\) to measure asset, and \\(k_{t}\\) the remaining asset at time \\(t\\).\u003c/p\u003e\n\u003cp\u003eLet us first declare the function for \u003ca href=\"/posts/kbhpower_utility/\"\u003epower utility\u003c/a\u003e. \\(k\\) is our asset holding, \\(\\gamma\\) our relative margin of risk, and \\(U\\) the power utility.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# risk aversion\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;y\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\gamma\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edomain\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;real\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# discount factor\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;d\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\delta\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edomain\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;real\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# final value at time t=f\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ek_f\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;k_f\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;k_f\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edomain\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;real\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# the instrument\u0026#39;s percentage return over a period: i.e. (1+mu)*I_t = k_{t+1}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;m\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\mu\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edomain\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;real\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# boundary conditions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eassume\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eassume\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eassume\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eassume\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# power utility\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ec\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ec\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003ec |--\u0026gt; -(c^(-y + 1) - 1)/(y - 1)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAt the final time stamp, we desire to consume all of our assets. Therefore, we will seed our investment amount at \\(I=0\\). We will optimize for eventual global utility, therefore, we will talley our utility; starting this talley at \\(0\\).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# at the final time, leave nothing for investment\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFrom every step from here, we will discount this utility by \\(d\\), then solve for the \u003cem\u003eprevious\u003c/em\u003e step\u0026rsquo;s target consumption that would maximize utility. That is, at every step, we desire:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nk_{t-1} = I_{t} + c_{t} \\implies c_{t} = k_{t-1}-I_{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;we want to consume all we have that needed\u0026rsquo;t to be invested\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\(\\max u(c_{t})\\)\u003c/p\u003e\n\u003cp\u003eRecall also that \\((1+\\mu)I_{t} = k_{t+1}\\) (as \\(\\mu\\) is the mean log-return, 1+that times \\(I\\), how much was invested at time \\(t\\), is the expected capital one time period from then.) Therefore, to make sure that \\(k_{f}\\) gets back in the final period, we solve for our seed value for \\(I\\), how much to invest would be:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI_{t-1} = \\frac{k_t}{(1+m)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eEnough talk, let\u0026rsquo;s get to it:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# create an dictionary to keep track of all the capital variables\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# we will iterate time stamps 1-10\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e10\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# a variable for captial at that time\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ek_t\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;k_\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e}\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;k_\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e}\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# t-i becasue we are solving backwards; i0 = T10\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# what can be consumed at every time stamp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# is the k of the previous timestamp, minus\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# what needs to be left over\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we multiply here by d because we want to\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# discount future utility\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek_t\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# add the current variable to dictionary\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek_t\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# recall again i0=T10 because backwards\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# solve for the next investment amount\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek_t\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-(((((((d*(d*(k_10^(-y + 1) - 1)/(y - 1) + ((k_9 - k_10/(m + 1))^(-y + 1) - 1)/(y - 1)) + ((k_8 - k_9/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_7 - k_8/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_6 - k_7/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_5 - k_6/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_4 - k_5/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_3 - k_4/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_2 - k_3/(m + 1))^(-y + 1) - 1)/(y - 1))*d - ((k_1 - k_2/(m + 1))^(-y + 1) - 1)/(y - 1)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe can now use the scipy numerical optimizer to minimize this target. Recall that we can recover the actual value of consumption at each step as \\(c=k-\\frac{k}{m+1}\\).\u003c/p\u003e\n\u003cp\u003eWe will set some initial conditions:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# 1% period-to-period increase\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.8\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# generally risk averse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters slightly less\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRecall that the scipy optimizer MINIMIZES, so we will make the loss the negative of utility. Before we finally start, we need to make the actual, numerical loss function that performs the substitution:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# we reverse the k_* variables because it is stored in the dictionary\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# in reverse, because we knew the reverse condition first\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim_variables\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evalues\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim_variables\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereverse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# this function is also the callback, so it returning\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# True terminates execution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eu_total_loss\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# the optimizer\u0026#39;s current step\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we want to take [1:], because we need to keep k1 the same at _k the\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# initial value\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ekey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eval\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ekey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eval\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ezip\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eoptim_variables\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# initial conditions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we want to keep the initial value k1 the same\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003etry\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# get value\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003econtent\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# recall we multiply by -1 because we are MINIMIZING, so the loss is\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# the inverse of the maximization utility target\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econtent\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()),\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eFalse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eexcept\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFinally, we are ready to start. We will now create the other initial conditions k1\u0026hellip;k10 and : we will set the initial value to all be 1000 (i.e. do nothing) and have the optimizer work it out from there:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003escipy.optimize\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eminimize\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etarget\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eminimize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elambda\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu_total_loss\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecallback\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elambda\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu_total_loss\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etarget\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e fun: -50.71592850322347\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e hess_inv: array([[ 9518.97596212, 7617.14636381, 5964.42171873, 4433.87331935,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 4253.91810669, 3528.72923763, 2329.61846616, 1769.85078017,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 1126.51562458],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 7617.14636381, 14333.33933517, 11251.71278723, 8073.31207641,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 7444.53071922, 6481.03236385, 4347.35353474, 2644.39855553,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 1359.86586059],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 5964.42171873, 11251.71278723, 15011.27497355, 10093.46973099,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 9229.06386286, 8371.07459024, 5510.14654004, 3480.74298654,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 1639.19265606],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 4433.87331935, 8073.31207641, 10093.46973099, 12434.28059884,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 11689.33288295, 10711.57399875, 7440.7461982 , 4810.57094062,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 2255.16306648],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 4253.91810669, 7444.53071922, 9229.06386286, 11689.33288295,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 14840.59602968, 12519.06872583, 8708.9160148 , 5688.83339388,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 2598.27394651],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 3528.72923763, 6481.03236385, 8371.07459024, 10711.57399875,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 12519.06872583, 14999.44881857, 10630.30739223, 6512.62254338,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 2293.45506703],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 2329.61846616, 4347.35353474, 5510.14654004, 7440.7461982 ,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 8708.9160148 , 10630.30739223, 12147.11811342, 7149.37937935,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 2657.8129831 ],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1769.85078017, 2644.39855553, 3480.74298654, 4810.57094062,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 5688.83339388, 6512.62254338, 7149.37937935, 7260.90962516,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 2422.66762041],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1126.51562458, 1359.86586059, 1639.19265606, 2255.16306648,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 2598.27394651, 2293.45506703, 2657.8129831 , 2422.66762041,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 2911.30717272]])\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e jac: array([ 0.00000000e+00, -3.81469727e-06, 2.38418579e-06, 0.00000000e+00,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 8.58306885e-06, -5.24520874e-06, 2.86102295e-06, -1.43051147e-06,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -5.24520874e-06])\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e message: \u0026#39;Optimization terminated successfully.\u0026#39;\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nfev: 1360\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nit: 130\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e njev: 136\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e status: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e success: True\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e x: array([841.22097906, 699.82556541, 573.89912346, 461.63474591,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 361.51493714, 272.10309839, 192.29084196, 120.94057011,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 57.12129925])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003cem\u003eAwesome!\u003c/em\u003e We now can recover \\(c\\) at each point by a nice helpful function:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ec\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek0\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u0026ldquo;Consumption is how much we have, minus how much we would be investing\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eSo, let us translate our list to the actual values consumed:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ecapital_over_time\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etarget\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etolist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# we need to add the initial condition _k back to the\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# inventory list\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econsumption_over_time\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ec\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ezip\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecapital_over_time\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecapital_over_time\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:])]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econsumption_over_time\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[167.107941529699,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 148.324379643989,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 131.608611479784,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 116.835018601197,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 103.699164584812,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 92.1059288329209,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 81.7161261514775,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 72.5477032414004,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 64.3848282779261]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"examples-of-output\"\u003eExamples of Output\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# 1% period-to-period increase\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.8\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# generally risk averse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters slightly less\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[167.107941529699,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 148.324379643989,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 131.608611479784,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 116.835018601197,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 103.699164584812,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 92.1059288329209,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 81.7161261514775,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 72.5477032414004,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 64.3848282779261]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003chr\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.1\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# 1% period-to-period increase\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.8\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# generally risk averse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters slightly less\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[154.860597149863,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 152.989432556196,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 151.010433069881,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 149.201249715528,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 147.329750167852,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 145.539019666462,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 143.739371599600,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 141.984228587213,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 140.243839963791]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003chr\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# 1% period-to-period increase\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.2\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# generally risky\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters slightly less\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[388.525041338376,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 241.124420093987,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 149.632568775223,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 92.8644259086613,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 57.6330459746870,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 35.7667230511026,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 22.1970017374152,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 13.7754327365677,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 8.54930907023498]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003chr\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# this is a loosing stock\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# very safe\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e fun: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e hess_inv: array([[1, 0, 0, 0, 0, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 1, 0, 0, 0, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 1, 0, 0, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 1, 0, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 1, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 0, 1, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 0, 0, 1, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 0, 0, 0, 1, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 0, 0, 0, 0, 1]])\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e jac: array([0., 0., 0., 0., 0., 0., 0., 0., 0.])\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e message: \u0026#39;Optimization terminated successfully.\u0026#39;\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nfev: 10\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nit: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e njev: 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e status: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e success: True\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e x: array([1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000.])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eEvidently: do nothing if we have a loosing cause.\u003c/p\u003e\n\u003chr\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1.00\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# this is SUPER winning stock\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# very safe\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[125.667556437602,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 241.474827418105,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 460.068836905327,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 868.972817783791,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 4540.45893314523,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 4219.93058738029,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 3988.05775624984,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 3996.89431939885,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 3615.74982832315]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe made so much money that we are spending a lot of it and still spending it.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_econ320_risk_appetite/","tags":null,"title":"NUS-ECON320 Inter-Temporal Choice"},{"categories":null,"contents":"Let\u0026rsquo;s begin. We want to create test for the linearity of a few assets, for whether or not they follow the CAPM.\nNote that we will be using the Sharpe-Linter version of CAPM:\n\\begin{equation} E[R_{i}-R_{f}] = \\beta_{im} E[(R_{m}-R_{f})] \\end{equation}\n\\begin{equation} \\beta_{im} := \\frac{Cov[(R_{i}-R_{f}),(R_{m}-R_{f})]}{Var[R_{m}-R_{f}]} \\end{equation}\nRecall that we declare \\(R_{f}\\) (the risk-free rate) to be non-stochastic.\nLet us begin. We will create a generic function to analyze some given stock.\nWe will first import our utilities\nimport pandas as pd import numpy as np Let\u0026rsquo;s load the data from our market (NYSE) as well as our 10 year t-bill data.\nt_bill = pd.read_csv(\u0026#34;./linearity_test_data/10yrTBill.csv\u0026#34;) nyse = pd.read_csv(\u0026#34;./linearity_test_data/NYSEComposite.csv\u0026#34;) nyse.head() Date Close 0 11/7/2013 16:00:00 9924.37 1 11/8/2013 16:00:00 10032.14 2 11/11/2013 16:00:00 10042.95 3 11/12/2013 16:00:00 10009.84 4 11/13/2013 16:00:00 10079.89 Excellent. Let\u0026rsquo;s load in the data for that stock.\ndef load_stock(stock): return pd.read_csv(f\u0026#34;./linearity_test_data/{stock}.csv\u0026#34;) load_stock(\u0026#34;LMT\u0026#34;).head() Date Close 0 11/7/2013 16:00:00 136.20 1 11/8/2013 16:00:00 138.11 2 11/11/2013 16:00:00 137.15 3 11/12/2013 16:00:00 137.23 4 11/13/2013 16:00:00 137.26 And now, let\u0026rsquo;s load all three stocks, then concatenate them all into a big-ol DataFrame.\n# load data df = { \u0026#34;Date\u0026#34;: nyse.Date, \u0026#34;NYSE\u0026#34;: nyse.Close, \u0026#34;TBill\u0026#34;: t_bill.Close, \u0026#34;LMT\u0026#34;: load_stock(\u0026#34;LMT\u0026#34;).Close, \u0026#34;TWTR\u0026#34;: load_stock(\u0026#34;TWTR\u0026#34;).Close, \u0026#34;MCD\u0026#34;: load_stock(\u0026#34;MCD\u0026#34;).Close } # convert to dataframe df = pd.DataFrame(df) # drop empty df.dropna(inplace=True) df Date NYSE TBill LMT TWTR MCD 0 11/7/2013 16:00:00 9924.37 26.13 136.20 44.90 97.20 1 11/8/2013 16:00:00 10032.14 27.46 138.11 41.65 97.01 2 11/11/2013 16:00:00 10042.95 27.51 137.15 42.90 97.09 3 11/12/2013 16:00:00 10009.84 27.68 137.23 41.90 97.66 4 11/13/2013 16:00:00 10079.89 27.25 137.26 42.60 98.11 ... ... ... ... ... ... ... 2154 10/24/2022 16:00:00 14226.11 30.44 440.11 39.60 252.21 2155 10/25/2022 16:00:00 14440.69 31.56 439.30 39.30 249.28 2156 10/26/2022 16:00:00 14531.69 33.66 441.11 39.91 250.38 2157 10/27/2022 16:00:00 14569.90 34.83 442.69 40.16 248.36 2158 10/28/2022 16:00:00 14795.63 33.95 443.41 39.56 248.07 [2159 rows x 6 columns] Excellent. Now, let\u0026rsquo;s convert all of these values into daily log-returns (we don\u0026rsquo;t really care about the actual pricing.)\nlog_returns = df[[\u0026#34;NYSE\u0026#34;, \u0026#34;TBill\u0026#34;, \u0026#34;LMT\u0026#34;, \u0026#34;TWTR\u0026#34;, \u0026#34;MCD\u0026#34;]].apply(np.log, inplace=True) df.loc[:, [\u0026#34;NYSE\u0026#34;, \u0026#34;TBill\u0026#34;, \u0026#34;LMT\u0026#34;, \u0026#34;TWTR\u0026#34;, \u0026#34;MCD\u0026#34;]] = log_returns df Date NYSE TBill LMT TWTR MCD 0 11/7/2013 16:00:00 9.202749 3.263084 4.914124 3.804438 4.576771 1 11/8/2013 16:00:00 9.213549 3.312730 4.928050 3.729301 4.574814 2 11/11/2013 16:00:00 9.214626 3.314550 4.921075 3.758872 4.575638 3 11/12/2013 16:00:00 9.211324 3.320710 4.921658 3.735286 4.581492 4 11/13/2013 16:00:00 9.218298 3.305054 4.921877 3.751854 4.586089 ... ... ... ... ... ... ... 2154 10/24/2022 16:00:00 9.562834 3.415758 6.087025 3.678829 5.530262 2155 10/25/2022 16:00:00 9.577805 3.451890 6.085183 3.671225 5.518577 2156 10/26/2022 16:00:00 9.584087 3.516310 6.089294 3.686627 5.522980 2157 10/27/2022 16:00:00 9.586713 3.550479 6.092870 3.692871 5.514879 2158 10/28/2022 16:00:00 9.602087 3.524889 6.094495 3.677819 5.513711 [2159 rows x 6 columns] We will now calculate the log daily returns. But before\u0026mdash;the dates are no longer relavent here so we drop them.\ndf Date NYSE TBill LMT TWTR MCD 0 11/7/2013 16:00:00 9.202749 3.263084 4.914124 3.804438 4.576771 1 11/8/2013 16:00:00 9.213549 3.312730 4.928050 3.729301 4.574814 2 11/11/2013 16:00:00 9.214626 3.314550 4.921075 3.758872 4.575638 3 11/12/2013 16:00:00 9.211324 3.320710 4.921658 3.735286 4.581492 4 11/13/2013 16:00:00 9.218298 3.305054 4.921877 3.751854 4.586089 ... ... ... ... ... ... ... 2154 10/24/2022 16:00:00 9.562834 3.415758 6.087025 3.678829 5.530262 2155 10/25/2022 16:00:00 9.577805 3.451890 6.085183 3.671225 5.518577 2156 10/26/2022 16:00:00 9.584087 3.516310 6.089294 3.686627 5.522980 2157 10/27/2022 16:00:00 9.586713 3.550479 6.092870 3.692871 5.514879 2158 10/28/2022 16:00:00 9.602087 3.524889 6.094495 3.677819 5.513711 [2159 rows x 6 columns] And now, the log returns! We will shift this data by one column and subtract.\nreturns = df.drop(columns=[\u0026#34;Date\u0026#34;]) - df.drop(columns=[\u0026#34;Date\u0026#34;]).shift(1) returns.dropna(inplace=True) returns NYSE TBill LMT TWTR MCD 1 0.010801 0.049646 0.013926 -0.075136 -0.001957 2 0.001077 0.001819 -0.006975 0.029570 0.000824 3 -0.003302 0.006161 0.000583 -0.023586 0.005854 4 0.006974 -0.015657 0.000219 0.016568 0.004597 5 0.005010 -0.008476 0.007476 0.047896 -0.005622 ... ... ... ... ... ... 2154 0.005785 0.004940 -0.023467 -0.014291 0.001349 2155 0.014971 0.036133 -0.001842 -0.007605 -0.011685 2156 0.006282 0.064420 0.004112 0.015402 0.004403 2157 0.002626 0.034169 0.003575 0.006245 -0.008100 2158 0.015374 -0.025590 0.001625 -0.015053 -0.001168 [2158 rows x 5 columns] We are now ready to run the correlation study.\nLet\u0026rsquo;s now subtract everything by the risk-free rate (dropping the rfr itself):\nrisk_free_excess = returns.drop(columns=\u0026#34;TBill\u0026#34;).apply(lambda x: x-returns.TBill) risk_free_excess NYSE LMT TWTR MCD 1 -0.038846 -0.035720 -0.124783 -0.051603 2 -0.000742 -0.008794 0.027751 -0.000995 3 -0.009463 -0.005577 -0.029747 -0.000307 4 0.022630 0.015875 0.032225 0.020254 5 0.013486 0.015952 0.056372 0.002854 ... ... ... ... ... 2154 0.000845 -0.028406 -0.019231 -0.003591 2155 -0.021162 -0.037975 -0.043738 -0.047818 2156 -0.058138 -0.060308 -0.049017 -0.060017 2157 -0.031543 -0.030593 -0.027924 -0.042269 2158 0.040964 0.027215 0.010537 0.024422 [2158 rows x 4 columns] Actual Regression It is now time to perform the actual linear regression!\nimport statsmodels.api as sm Let\u0026rsquo;s work with Lockheed Martin first, fitting an ordinary least squares. Remember that the OLS functions reads the endogenous variable first (for us, the return of the asset.)\n# add a column of ones to our input market excess returns nyse_with_bias = sm.add_constant(risk_free_excess.NYSE) # perform linreg lmt_model = sm.OLS(risk_free_excess.LMT, nyse_with_bias).fit() lmt_model.summary() OLS Regression Results ============================================================================== Dep. Variable: LMT R-squared: 0.859 Model: OLS Adj. R-squared: 0.859 Method: Least Squares F-statistic: 1.312e+04 Date: Mon, 31 Oct 2022 Prob (F-statistic): 0.00 Time: 10:39:24 Log-Likelihood: 6318.9 No. Observations: 2158 AIC: -1.263e+04 Df Residuals: 2156 BIC: -1.262e+04 Df Model: 1 Covariance Type: nonrobust ============================================================================== coef std err t P\u0026gt;|t| [0.025 0.975] ------------------------------------------------------------------------------ const 0.0004 0.000 1.311 0.190 -0.000 0.001 NYSE 0.9449 0.008 114.552 0.000 0.929 0.961 ============================================================================== Omnibus: 423.969 Durbin-Watson: 1.965 Prob(Omnibus): 0.000 Jarque-Bera (JB): 11575.074 Skew: -0.160 Prob(JB): 0.00 Kurtosis: 14.341 Cond. No. 29.6 ============================================================================== Notes: [1] Standard Errors assume that the covariance matrix of the errors is correctly specified. Based on the constants row, we can see that\u0026mdash;within \\(95\\%\\) confidence\u0026mdash;the intercept is generally \\(0\\) and CAPM applies. However, we do see a slight positive compared to the market. Furthermore, we can see that the regression has a beta value of \\(0.9449\\) \u0026mdash; according the CAPM model, it being slightly undervarying that the market.\nWe can continue with the other stocks.\n# perform linreg mcd_model = sm.OLS(risk_free_excess.MCD, nyse_with_bias).fit() mcd_model.summary() OLS Regression Results ============================================================================== Dep. Variable: MCD R-squared: 0.887 Model: OLS Adj. R-squared: 0.887 Method: Least Squares F-statistic: 1.697e+04 Date: Mon, 31 Oct 2022 Prob (F-statistic): 0.00 Time: 10:39:24 Log-Likelihood: 6551.1 No. Observations: 2158 AIC: -1.310e+04 Df Residuals: 2156 BIC: -1.309e+04 Df Model: 1 Covariance Type: nonrobust ============================================================================== coef std err t P\u0026gt;|t| [0.025 0.975] ------------------------------------------------------------------------------ const 0.0003 0.000 1.004 0.315 -0.000 0.001 NYSE 0.9651 0.007 130.287 0.000 0.951 0.980 ============================================================================== Omnibus: 323.911 Durbin-Watson: 1.988 Prob(Omnibus): 0.000 Jarque-Bera (JB): 3032.550 Skew: 0.395 Prob(JB): 0.00 Kurtosis: 8.753 Cond. No. 29.6 ============================================================================== Notes: [1] Standard Errors assume that the covariance matrix of the errors is correctly specified. Same thing as before, we are within \\(95\\%\\) confidence having a intercept of \\(0\\) (with a slight positive edge), and it looks like MacDonald\u0026rsquo;s vary a little bit more than Lockheed Martin. The food industry is probably a tougher business than that in defense.\nLastly, to analyze the recently delisted Twitter!\n# perform linreg twtr_model = sm.OLS(risk_free_excess.TWTR, nyse_with_bias).fit() twtr_model.summary() OLS Regression Results ============================================================================== Dep. Variable: TWTR R-squared: 0.522 Model: OLS Adj. R-squared: 0.522 Method: Least Squares F-statistic: 2357. Date: Mon, 31 Oct 2022 Prob (F-statistic): 0.00 Time: 10:39:24 Log-Likelihood: 4307.1 No. Observations: 2158 AIC: -8610. Df Residuals: 2156 BIC: -8599. Df Model: 1 Covariance Type: nonrobust ============================================================================== coef std err t P\u0026gt;|t| [0.025 0.975] ------------------------------------------------------------------------------ const -0.0002 0.001 -0.346 0.730 -0.002 0.001 NYSE 1.0173 0.021 48.549 0.000 0.976 1.058 ============================================================================== Omnibus: 661.205 Durbin-Watson: 1.986 Prob(Omnibus): 0.000 Jarque-Bera (JB): 15925.609 Skew: -0.883 Prob(JB): 0.00 Kurtosis: 16.191 Cond. No. 29.6 ============================================================================== Notes: [1] Standard Errors assume that the covariance matrix of the errors is correctly specified. Evidently, Twitter is much more variable. It looks like it has a nontrivial bias (the intercept being -0.001 being within the \\(95\\%\\) confidence band \u0026mdash; that the security is possibly significantly underperforming the CAPM expectation in the market.) Furthermore, we have a positive beta value: that the asset is more variable than the market.\nmanual regression We can also use the betas formula to manually calculate what we expect for the beta values (i.e. as if they were one IID random variable.)\nrisk_free_cov = risk_free_excess.cov() risk_free_cov NYSE LMT TWTR MCD NYSE 0.001143 0.001080 0.001163 0.001103 LMT 0.001080 0.001188 0.001116 0.001083 TWTR 0.001163 0.001116 0.002264 0.001155 MCD 0.001103 0.001083 0.001155 0.001200 Finally, to construct the beta values. Recall that:\n\\begin{equation} \\beta_{im} := \\frac{Cov[(R_{i}-R_{f}),(R_{m}-R_{f})]}{Var[R_{m}-R_{f}]} \\end{equation}\nand that:\n\\begin{equation} Var[X] = Cov[X,X], \\forall X \\end{equation}\n# get the market variance (covariance with itself) market_variation = risk_free_cov.NYSE.NYSE # calculate betas betas = {\u0026#34;LMT\u0026#34;: (risk_free_cov.LMT.NYSE/market_variation), \u0026#34;TWTR\u0026#34;: (risk_free_cov.TWTR.NYSE/market_variation), \u0026#34;MCD\u0026#34;: (risk_free_cov.MCD.NYSE/market_variation)} # and make dataframe betas = pd.Series(betas) betas LMT 0.944899 TWTR 1.017294 MCD 0.965081 dtype: float64 Apparently, all of our assets swing less than the overall NYSE market! Especially Lockheed\u0026mdash;it is only \\(94.4\\%\\) of the market variation. Furthermore, it is interesting to see that Twitter swings much more dramatically compared to the market.\nFund creation We will now create two funds with the three securities, one with equal parts and one which attempts to maximizes the bias (max returns) while minimizing the beta variance value compared to the market.\n\u0026ldquo;Equal-Parts Fund\u0026rdquo; (\u0026ldquo;Fund 1\u0026rdquo;) We will now create a fund in equal parts. Here it is:\nfund_1_returns = returns.LMT + returns.TWTR + returns.MCD fund_1_returns 1 -0.063167 2 0.023420 3 -0.017149 4 0.021384 5 0.049750 ... 2154 -0.036409 2155 -0.021132 2156 0.023917 2157 0.001720 2158 -0.014596 Length: 2158, dtype: float64 We will calculate the excess returns of this fund:\nfund_1_excess = fund_1_returns-returns.TBill fund_1_excess 1 -0.112813 2 0.021600 3 -0.023310 4 0.037041 5 0.058226 ... 2154 -0.041349 2155 -0.057265 2156 -0.040503 2157 -0.032449 2158 0.010994 Length: 2158, dtype: float64 And then perform a regression\n# perform linreg fund_1_model = sm.OLS(fund_1_excess, nyse_with_bias).fit() fund_1_model.summary() OLS Regression Results ============================================================================== Dep. Variable: y R-squared: 0.473 Model: OLS Adj. R-squared: 0.473 Method: Least Squares F-statistic: 1935. Date: Mon, 31 Oct 2022 Prob (F-statistic): 3.01e-302 Time: 10:39:24 Log-Likelihood: 3869.5 No. Observations: 2158 AIC: -7735. Df Residuals: 2156 BIC: -7724. Df Model: 1 Covariance Type: nonrobust ============================================================================== coef std err t P\u0026gt;|t| [0.025 0.975] ------------------------------------------------------------------------------ const 0.0007 0.001 0.841 0.401 -0.001 0.002 NYSE 1.1290 0.026 43.993 0.000 1.079 1.179 ============================================================================== Omnibus: 600.456 Durbin-Watson: 2.022 Prob(Omnibus): 0.000 Jarque-Bera (JB): 8416.514 Skew: -0.914 Prob(JB): 0.00 Kurtosis: 12.501 Cond. No. 29.6 ============================================================================== Notes: [1] Standard Errors assume that the covariance matrix of the errors is correctly specified. Surprisingly, we have now created a significantly riskier investment that, though riskier, generates a much higher probability of reward (\\(+0.001\\) is now within the \\(99\\%\\) band!)\nA Better Fund To me, this is the payoff of this assignment. We will now use CAPM to create the \u0026ldquo;best\u0026rdquo; fund combination\u0026mdash;given some variance, the funds which match CAPM. To do this, let\u0026rsquo;s create a generic linear combination of the assets.\nimport sympy as sym x = sym.Symbol(\u0026#39;x\u0026#39;) y = sym.Symbol(\u0026#39;y\u0026#39;) z = sym.Symbol(\u0026#39;z\u0026#39;) fund_2_returns = x*returns.LMT + y*returns.TWTR + z*returns.MCD fund_2_returns 1 0.0139260753744255*x - 0.0751364261353569*y - ... 2 -0.00697525170622448*x + 0.0295704573211193*y ... 3 0.000583132897928884*x - 0.0235859990058791*y ... 4 0.000218587198947517*x + 0.016568426347233*y +... 5 0.00747599199607762*x + 0.0478955096700351*y -... ... 2154 -0.0234665578621085*x - 0.0142913301107561*y +... 2155 -0.00184214468578059*x - 0.0076045993852194*y ... 2156 0.00411172646842317*x + 0.0154024001854269*y +... 2157 0.00357547337231878*x + 0.0062445563228315*y -... 2158 0.00162509910496933*x - 0.0150529686289622*y -... Length: 2158, dtype: object Excellent. We will also calculate the excess returns of this fund:\nfund_2_excess = fund_2_returns-returns.TBill Y = fund_2_excess.to_numpy() Y [0.0139260753744255*x - 0.0751364261353569*y - 0.00195664549320096*z - 0.0496463208073039 -0.00697525170622448*x + 0.0295704573211193*y + 0.000824317408861575*z - 0.00181917459665826 0.000583132897928884*x - 0.0235859990058791*y + 0.00585367525146019*z - 0.00616055581298536 ... 0.00411172646842317*x + 0.0154024001854269*y + 0.00440300114913317*z - 0.0644196927849867 0.00357547337231878*x + 0.0062445563228315*y - 0.0081004573348249*z - 0.0341688956152497 0.00162509910496933*x - 0.0150529686289622*y - 0.00116834209450634*z + 0.0255902303732043] We cast this type to a numpy array because we are about to perform some matrix operations upon it.\nNow, let us perform the actual linear regression ourselves. Recall that the pseudoinverse linear regression estimator is:\n\\begin{equation} \\beta = (X^{T}X)^{-1}X^{T}Y \\end{equation}\nWe have already our \\(Y\\) as a vector (above), and our \\(X\\) is:\nX = nyse_with_bias.to_numpy() X [[ 1.00000000e+00 -3.88457302e-02] [ 1.00000000e+00 -7.42217926e-04] [ 1.00000000e+00 -9.46284244e-03] ... [ 1.00000000e+00 -5.81378271e-02] [ 1.00000000e+00 -3.15429207e-02] [ 1.00000000e+00 4.09643405e-02]] We now have our matrices, let\u0026rsquo;s perform the linear regression!\nlinear_model = np.linalg.inv((X.transpose()@X))@X.transpose()@Y linear_model [0.000544056413840724*x - 6.62061061591867e-5*y + 0.000429966553373172*z - 0.000178620725465344 0.0457830563134785*x + 0.118178191274045*y + 0.0659651260604729*z + 0.899115719100281] Excellent. So we now have two rows; the top row represents the \u0026ldquo;bias\u0026rdquo;\u0026mdash;how much deviation there is from CAPM, and the bottom row represents the \u0026ldquo;rate\u0026rdquo;\u0026mdash;the \u0026ldquo;beta\u0026rdquo; value which represents how much excess variance there is.\nWe can will solve for a combination of solutions to give us specific values of returns vs risk. We will set the asset to learn exactly as much as the market (i.e. no bias).\ndeviance_expr = linear_model[0] deviance_expr 0.000544056413840724*x - 6.62061061591867e-5*y + 0.000429966553373172*z - 0.000178620725465344 We will now try to make variance exactly as much as that in the market.\nrisk_expr = linear_model[1] - 1 risk_expr 0.0457830563134785*x + 0.118178191274045*y + 0.0659651260604729*z - 0.100884280899719 Let us now calculate the boundary condition of our optimization problem by solving an expression in these two expressions.\nsolution = sym.solvers.solve([deviance_expr, risk_expr], x,y,z) solution {x: 0.412737013327711 - 0.819584899551304*z, y: 0.693765220909132 - 0.24067066980814*z} Excellent. Let us recalculate our optimization objective (\u0026ldquo;deviance\u0026rdquo;\u0026mdash;return) in terms of these new solutions. We aim now to maximize this expression by minimizing (i.e. our optimizer minimizes) the negative thereof\u0026mdash;recalling that scypy works as a minimizer.\noptim_objective = deviance_expr.subs(solution)-1e2 optim_objective -5.04831636563563e-19*z - 100.0 We can now use this value to solve for a \\(z\\) value.\noptim_solution = sym.solvers.solve([optim_objective], z) optim_solution {z: -1.98085842402250e+20} Excellent. We can now solve for the rest of our values.\nz0 = float(optim_solution[z]) x0 = solution[x].subs(z, z0) y0 = solution[y].subs(z, z0) (x0,y0,z0) (1.62348165247784e+16, 4.76734523704593e+15, -1.980858424022502e+16) This would create the following plan:\n# solution fund_2_nobias = x0*returns.LMT + y0*returns.TWTR + z0*returns.MCD fund_2_nobias.mean() 0.009168283711770158 Recall that this is the performance of the balanced portfolio:\nfund_1_returns.mean() 0.0009224705380695683 Finally, let\u0026rsquo;s plot the prices of our various funds:\nimport matplotlib.pyplot as plt import matplotlib.dates as mdates import seaborn as sns from datetime import datetime sns.set() fund_2_price = x0*df.LMT + y0*df.TWTR + z0*df.MCD fund_1_price = df.LMT + df.TWTR fund_l_price = df.LMT fund_t_price = df.TWTR dates = df.Date.apply(lambda x:datetime.strptime(x, \u0026#34;%m/%d/%Y %H:%M:%S\u0026#34;)) sns.lineplot(x=dates, y=fund_2_price.apply(sym.Float).astype(float)) sns.lineplot(x=dates, y=fund_1_price.apply(sym.Float).astype(float)) sns.lineplot(x=dates, y=fund_l_price.apply(sym.Float).astype(float)) sns.lineplot(x=dates, y=fund_t_price.apply(sym.Float).astype(float)) plt.gca().xaxis.set_major_locator(mdates.YearLocator()) plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(\u0026#39;%Y\u0026#39;)) plt.gca().set_ylabel(\u0026#34;Price\u0026#34;) plt.show() None Recall that we didn\u0026rsquo;t actually buy any MacDonald\u0026rsquo;s. So, we have\u0026mdash;blue, being our \u0026ldquo;optimal\u0026rdquo; portfolio, yellow, our balanced portfolio, green, being Lockheed, and red, being Twitter.\nOur portfolio works surprisingly well!\n","html":"\u003cp\u003eLet\u0026rsquo;s begin. We want to create test for the linearity of a few assets, for whether or not they follow the CAPM.\u003c/p\u003e\n\u003cp\u003eNote that we will be using the Sharpe-Linter version of CAPM:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE[R_{i}-R_{f}] = \\beta_{im} E[(R_{m}-R_{f})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\beta_{im} := \\frac{Cov[(R_{i}-R_{f}),(R_{m}-R_{f})]}{Var[R_{m}-R_{f}]}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that we declare \\(R_{f}\\) (the risk-free rate) to be non-stochastic.\u003c/p\u003e\n\u003cp\u003eLet us begin. We will create a generic function to analyze some given stock.\u003c/p\u003e\n\u003cp\u003eWe will first import our utilities\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epandas\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enumpy\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s load the data from our market (NYSE) as well as our 10 year t-bill data.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003et_bill\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_csv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;./linearity_test_data/10yrTBill.csv\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enyse\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_csv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;./linearity_test_data/NYSEComposite.csv\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enyse\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ehead\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Date Close\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 11/7/2013 16:00:00 9924.37\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 11/8/2013 16:00:00 10032.14\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 11/11/2013 16:00:00 10042.95\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 11/12/2013 16:00:00 10009.84\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 11/13/2013 16:00:00 10079.89\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. Let\u0026rsquo;s load in the data for that stock.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_csv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;./linearity_test_data/\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estock\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e}\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e.csv\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ehead\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Date Close\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 11/7/2013 16:00:00 136.20\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 11/8/2013 16:00:00 138.11\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 11/11/2013 16:00:00 137.15\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 11/12/2013 16:00:00 137.23\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 11/13/2013 16:00:00 137.26\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd now, let\u0026rsquo;s load all three stocks, then concatenate them all into a big-ol DataFrame.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# load data\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;Date\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDate\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;NYSE\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TBill\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et_bill\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# convert to dataframe\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDataFrame\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# drop empty\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edropna\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einplace\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Date NYSE TBill LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 11/7/2013 16:00:00 9924.37 26.13 136.20 44.90 97.20\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 11/8/2013 16:00:00 10032.14 27.46 138.11 41.65 97.01\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 11/11/2013 16:00:00 10042.95 27.51 137.15 42.90 97.09\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 11/12/2013 16:00:00 10009.84 27.68 137.23 41.90 97.66\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 11/13/2013 16:00:00 10079.89 27.25 137.26 42.60 98.11\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 10/24/2022 16:00:00 14226.11 30.44 440.11 39.60 252.21\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 10/25/2022 16:00:00 14440.69 31.56 439.30 39.30 249.28\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 10/26/2022 16:00:00 14531.69 33.66 441.11 39.91 250.38\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 10/27/2022 16:00:00 14569.90 34.83 442.69 40.16 248.36\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 10/28/2022 16:00:00 14795.63 33.95 443.41 39.56 248.07\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2159 rows x 6 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. Now, let\u0026rsquo;s convert all of these values into daily log-returns (we don\u0026rsquo;t really care about the actual pricing.)\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elog_returns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;NYSE\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TBill\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einplace\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;NYSE\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TBill\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elog_returns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Date NYSE TBill LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 11/7/2013 16:00:00 9.202749 3.263084 4.914124 3.804438 4.576771\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 11/8/2013 16:00:00 9.213549 3.312730 4.928050 3.729301 4.574814\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 11/11/2013 16:00:00 9.214626 3.314550 4.921075 3.758872 4.575638\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 11/12/2013 16:00:00 9.211324 3.320710 4.921658 3.735286 4.581492\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 11/13/2013 16:00:00 9.218298 3.305054 4.921877 3.751854 4.586089\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 10/24/2022 16:00:00 9.562834 3.415758 6.087025 3.678829 5.530262\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 10/25/2022 16:00:00 9.577805 3.451890 6.085183 3.671225 5.518577\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 10/26/2022 16:00:00 9.584087 3.516310 6.089294 3.686627 5.522980\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 10/27/2022 16:00:00 9.586713 3.550479 6.092870 3.692871 5.514879\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 10/28/2022 16:00:00 9.602087 3.524889 6.094495 3.677819 5.513711\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2159 rows x 6 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will now calculate the log daily returns. But before\u0026mdash;the dates are no longer relavent here so we drop them.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Date NYSE TBill LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 11/7/2013 16:00:00 9.202749 3.263084 4.914124 3.804438 4.576771\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 11/8/2013 16:00:00 9.213549 3.312730 4.928050 3.729301 4.574814\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 11/11/2013 16:00:00 9.214626 3.314550 4.921075 3.758872 4.575638\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 11/12/2013 16:00:00 9.211324 3.320710 4.921658 3.735286 4.581492\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 11/13/2013 16:00:00 9.218298 3.305054 4.921877 3.751854 4.586089\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 10/24/2022 16:00:00 9.562834 3.415758 6.087025 3.678829 5.530262\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 10/25/2022 16:00:00 9.577805 3.451890 6.085183 3.671225 5.518577\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 10/26/2022 16:00:00 9.584087 3.516310 6.089294 3.686627 5.522980\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 10/27/2022 16:00:00 9.586713 3.550479 6.092870 3.692871 5.514879\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 10/28/2022 16:00:00 9.602087 3.524889 6.094495 3.677819 5.513711\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2159 rows x 6 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd now, the log returns! We will shift this data by one column and subtract.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edrop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Date\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edrop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Date\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshift\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edropna\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einplace\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e NYSE TBill LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 0.010801 0.049646 0.013926 -0.075136 -0.001957\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.001077 0.001819 -0.006975 0.029570 0.000824\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 -0.003302 0.006161 0.000583 -0.023586 0.005854\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.006974 -0.015657 0.000219 0.016568 0.004597\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.005010 -0.008476 0.007476 0.047896 -0.005622\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 0.005785 0.004940 -0.023467 -0.014291 0.001349\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 0.014971 0.036133 -0.001842 -0.007605 -0.011685\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 0.006282 0.064420 0.004112 0.015402 0.004403\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 0.002626 0.034169 0.003575 0.006245 -0.008100\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 0.015374 -0.025590 0.001625 -0.015053 -0.001168\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2158 rows x 5 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe are now ready to run the correlation study.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s now subtract everything by the risk-free rate (dropping the rfr itself):\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edrop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;TBill\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elambda\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTBill\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e NYSE LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 -0.038846 -0.035720 -0.124783 -0.051603\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 -0.000742 -0.008794 0.027751 -0.000995\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 -0.009463 -0.005577 -0.029747 -0.000307\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.022630 0.015875 0.032225 0.020254\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.013486 0.015952 0.056372 0.002854\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 0.000845 -0.028406 -0.019231 -0.003591\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 -0.021162 -0.037975 -0.043738 -0.047818\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 -0.058138 -0.060308 -0.049017 -0.060017\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 -0.031543 -0.030593 -0.027924 -0.042269\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 0.040964 0.027215 0.010537 0.024422\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2158 rows x 4 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"actual-regression\"\u003eActual Regression\u003c/h2\u003e\n\u003cp\u003eIt is now time to perform the actual linear regression!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estatsmodels.api\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s work with Lockheed Martin first, fitting an ordinary least squares. Remember that the OLS functions reads the \u003cem\u003eendogenous\u003c/em\u003e variable first (for us, the return of the asset.)\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# add a column of ones to our input market excess returns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eadd_constant\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# perform linreg\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elmt_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eOLS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elmt_model\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esummary\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e OLS Regression Results\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDep. Variable: LMT R-squared: 0.859\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eModel: OLS Adj. R-squared: 0.859\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMethod: Least Squares F-statistic: 1.312e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDate: Mon, 31 Oct 2022 Prob (F-statistic): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eTime: 10:39:24 Log-Likelihood: 6318.9\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNo. Observations: 2158 AIC: -1.263e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Residuals: 2156 BIC: -1.262e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Model: 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eCovariance Type: nonrobust\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e coef std err t P\u0026gt;|t| [0.025 0.975]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e------------------------------------------------------------------------------\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econst 0.0004 0.000 1.311 0.190 -0.000 0.001\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 0.9449 0.008 114.552 0.000 0.929 0.961\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eOmnibus: 423.969 Durbin-Watson: 1.965\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eProb(Omnibus): 0.000 Jarque-Bera (JB): 11575.074\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSkew: -0.160 Prob(JB): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eKurtosis: 14.341 Cond. No. 29.6\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNotes:\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eBased on the constants row, we can see that\u0026mdash;within \\(95\\%\\) confidence\u0026mdash;the intercept is generally \\(0\\) and CAPM applies. However, we do see a slight positive compared to the market. Furthermore, we can see that the regression has a beta value of \\(0.9449\\) \u0026mdash; according the CAPM model, it being \u003cem\u003eslightly\u003c/em\u003e undervarying that the market.\u003c/p\u003e\n\u003cp\u003eWe can continue with the other stocks.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# perform linreg\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emcd_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eOLS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emcd_model\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esummary\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e OLS Regression Results\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDep. Variable: MCD R-squared: 0.887\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eModel: OLS Adj. R-squared: 0.887\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMethod: Least Squares F-statistic: 1.697e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDate: Mon, 31 Oct 2022 Prob (F-statistic): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eTime: 10:39:24 Log-Likelihood: 6551.1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNo. Observations: 2158 AIC: -1.310e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Residuals: 2156 BIC: -1.309e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Model: 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eCovariance Type: nonrobust\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e coef std err t P\u0026gt;|t| [0.025 0.975]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e------------------------------------------------------------------------------\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econst 0.0003 0.000 1.004 0.315 -0.000 0.001\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 0.9651 0.007 130.287 0.000 0.951 0.980\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eOmnibus: 323.911 Durbin-Watson: 1.988\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eProb(Omnibus): 0.000 Jarque-Bera (JB): 3032.550\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSkew: 0.395 Prob(JB): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eKurtosis: 8.753 Cond. No. 29.6\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNotes:\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSame thing as before, we are within \\(95\\%\\) confidence having a intercept of \\(0\\) (with a slight positive edge), and it looks like MacDonald\u0026rsquo;s vary a little bit more than Lockheed Martin. The food industry is probably a tougher business than that in defense.\u003c/p\u003e\n\u003cp\u003eLastly, to analyze the recently delisted Twitter!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# perform linreg\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etwtr_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eOLS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etwtr_model\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esummary\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e OLS Regression Results\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDep. Variable: TWTR R-squared: 0.522\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eModel: OLS Adj. R-squared: 0.522\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMethod: Least Squares F-statistic: 2357.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDate: Mon, 31 Oct 2022 Prob (F-statistic): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eTime: 10:39:24 Log-Likelihood: 4307.1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNo. Observations: 2158 AIC: -8610.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Residuals: 2156 BIC: -8599.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Model: 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eCovariance Type: nonrobust\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e coef std err t P\u0026gt;|t| [0.025 0.975]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e------------------------------------------------------------------------------\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econst -0.0002 0.001 -0.346 0.730 -0.002 0.001\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 1.0173 0.021 48.549 0.000 0.976 1.058\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eOmnibus: 661.205 Durbin-Watson: 1.986\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eProb(Omnibus): 0.000 Jarque-Bera (JB): 15925.609\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSkew: -0.883 Prob(JB): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eKurtosis: 16.191 Cond. No. 29.6\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNotes:\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eEvidently, Twitter is \u003cem\u003emuch\u003c/em\u003e more variable. It looks like it has a nontrivial bias (the intercept being -0.001 being within the \\(95\\%\\) confidence band \u0026mdash; that the security is possibly significantly underperforming the CAPM expectation in the market.) Furthermore, we have a positive beta value: that the asset is more variable than the market.\u003c/p\u003e\n\u003ch2 id=\"manual-regression\"\u003emanual regression\u003c/h2\u003e\n\u003cp\u003eWe can also use the betas formula to manually calculate what we \u003cem\u003eexpect\u003c/em\u003e for the beta values (i.e. as if they were one IID random variable.)\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecov\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e NYSE LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 0.001143 0.001080 0.001163 0.001103\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLMT 0.001080 0.001188 0.001116 0.001083\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eTWTR 0.001163 0.001116 0.002264 0.001155\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMCD 0.001103 0.001083 0.001155 0.001200\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFinally, to construct the beta values. Recall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\beta_{im} := \\frac{Cov[(R_{i}-R_{f}),(R_{m}-R_{f})]}{Var[R_{m}-R_{f}]}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nVar[X] = Cov[X,X], \\forall X\n\\end{equation}\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# get the market variance (covariance with itself)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emarket_variation\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# calculate betas\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebetas\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emarket_variation\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emarket_variation\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emarket_variation\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# and make dataframe\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebetas\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSeries\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebetas\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebetas\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLMT 0.944899\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eTWTR 1.017294\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMCD 0.965081\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eApparently, all of our assets swing less than the overall NYSE market! Especially Lockheed\u0026mdash;it is only \\(94.4\\%\\) of the market variation. Furthermore, it is interesting to see that Twitter swings much more dramatically compared to the market.\u003c/p\u003e\n\u003ch2 id=\"fund-creation\"\u003eFund creation\u003c/h2\u003e\n\u003cp\u003eWe will now create two funds with the three securities, one with equal parts and one which attempts to maximizes the bias (max returns) while minimizing the beta variance value compared to the market.\u003c/p\u003e\n\u003ch3 id=\"equal-parts-fund--fund-1\"\u003e\u0026ldquo;Equal-Parts Fund\u0026rdquo; (\u0026ldquo;Fund 1\u0026rdquo;)\u003c/h3\u003e\n\u003cp\u003eWe will now create a fund in equal parts. Here it is:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_returns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_returns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 -0.063167\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.023420\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 -0.017149\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.021384\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.049750\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 -0.036409\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 -0.021132\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 0.023917\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 0.001720\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 -0.014596\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLength: 2158, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will calculate the excess returns of this fund:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_excess\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efund_1_returns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTBill\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_excess\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 -0.112813\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.021600\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 -0.023310\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.037041\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.058226\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 -0.041349\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 -0.057265\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 -0.040503\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 -0.032449\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 0.010994\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLength: 2158, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd then perform a regression\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# perform linreg\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eOLS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_1_excess\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_model\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esummary\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e OLS Regression Results\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDep. Variable: y R-squared: 0.473\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eModel: OLS Adj. R-squared: 0.473\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMethod: Least Squares F-statistic: 1935.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDate: Mon, 31 Oct 2022 Prob (F-statistic): 3.01e-302\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eTime: 10:39:24 Log-Likelihood: 3869.5\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNo. Observations: 2158 AIC: -7735.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Residuals: 2156 BIC: -7724.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Model: 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eCovariance Type: nonrobust\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e coef std err t P\u0026gt;|t| [0.025 0.975]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e------------------------------------------------------------------------------\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econst 0.0007 0.001 0.841 0.401 -0.001 0.002\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 1.1290 0.026 43.993 0.000 1.079 1.179\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eOmnibus: 600.456 Durbin-Watson: 2.022\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eProb(Omnibus): 0.000 Jarque-Bera (JB): 8416.514\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSkew: -0.914 Prob(JB): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eKurtosis: 12.501 Cond. No. 29.6\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNotes:\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSurprisingly, we have now created a \u003cstrong\u003esignificantly\u003c/strong\u003e riskier investment that, though riskier, generates a much higher probability of reward (\\(+0.001\\) is now within the \\(99\\%\\) band!)\u003c/p\u003e\n\u003ch3 id=\"a-better-fund\"\u003eA Better Fund\u003c/h3\u003e\n\u003cp\u003eTo me, this is the payoff of this assignment. We will now use CAPM to create the \u0026ldquo;best\u0026rdquo; fund combination\u0026mdash;given some variance, the funds which match CAPM. To do this, let\u0026rsquo;s create a generic linear combination of the assets.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esympy\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSymbol\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;x\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSymbol\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;y\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSymbol\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;z\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_returns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_returns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 0.0139260753744255*x - 0.0751364261353569*y - ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 -0.00697525170622448*x + 0.0295704573211193*y ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 0.000583132897928884*x - 0.0235859990058791*y ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.000218587198947517*x + 0.016568426347233*y +...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.00747599199607762*x + 0.0478955096700351*y -...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 -0.0234665578621085*x - 0.0142913301107561*y +...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 -0.00184214468578059*x - 0.0076045993852194*y ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 0.00411172646842317*x + 0.0154024001854269*y +...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 0.00357547337231878*x + 0.0062445563228315*y -...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 0.00162509910496933*x - 0.0150529686289622*y -...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLength: 2158, dtype: object\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. We will also calculate the excess returns of this fund:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_excess\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efund_2_returns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTBill\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efund_2_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eto_numpy\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[0.0139260753744255*x - 0.0751364261353569*y - 0.00195664549320096*z - 0.0496463208073039\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -0.00697525170622448*x + 0.0295704573211193*y + 0.000824317408861575*z - 0.00181917459665826\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.000583132897928884*x - 0.0235859990058791*y + 0.00585367525146019*z - 0.00616055581298536\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.00411172646842317*x + 0.0154024001854269*y + 0.00440300114913317*z - 0.0644196927849867\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.00357547337231878*x + 0.0062445563228315*y - 0.0081004573348249*z - 0.0341688956152497\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.00162509910496933*x - 0.0150529686289622*y - 0.00116834209450634*z + 0.0255902303732043]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe cast this type to a numpy array because we are about to perform some matrix operations upon it.\u003c/p\u003e\n\u003cp\u003eNow, let us perform the actual linear regression ourselves. Recall that the pseudoinverse linear regression estimator is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\beta = (X^{T}X)^{-1}X^{T}Y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have already our \\(Y\\) as a vector (above), and our \\(X\\) is:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eto_numpy\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[[ 1.00000000e+00 -3.88457302e-02]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 -7.42217926e-04]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 -9.46284244e-03]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 -5.81378271e-02]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 -3.15429207e-02]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 4.09643405e-02]]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe now have our matrices, let\u0026rsquo;s perform the linear regression!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elinear_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elinalg\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etranspose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@X\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@X.transpose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@Y\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elinear_model\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[0.000544056413840724*x - 6.62061061591867e-5*y + 0.000429966553373172*z - 0.000178620725465344\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.0457830563134785*x + 0.118178191274045*y + 0.0659651260604729*z + 0.899115719100281]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. So we now have two rows; the top row represents the \u0026ldquo;bias\u0026rdquo;\u0026mdash;how much deviation there is from CAPM, and the bottom row represents the \u0026ldquo;rate\u0026rdquo;\u0026mdash;the \u0026ldquo;beta\u0026rdquo; value which represents how much excess variance there is.\u003c/p\u003e\n\u003cp\u003eWe can will solve for a combination of solutions to give us specific values of returns vs risk. We will set the asset to learn exactly as much as the market (i.e. no bias).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edeviance_expr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elinear_model\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edeviance_expr\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.000544056413840724*x - 6.62061061591867e-5*y + 0.000429966553373172*z - 0.000178620725465344\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will now try to make variance exactly as much as that in the market.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_expr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elinear_model\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_expr\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.0457830563134785*x + 0.118178191274045*y + 0.0659651260604729*z - 0.100884280899719\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet us now calculate the boundary condition of our optimization problem by solving an expression in these two expressions.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esolvers\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edeviance_expr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erisk_expr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e{x: 0.412737013327711 - 0.819584899551304*z, y: 0.693765220909132 - 0.24067066980814*z}\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. Let us recalculate our optimization objective (\u0026ldquo;deviance\u0026rdquo;\u0026mdash;return) in terms of these new solutions. We aim now to maximize this expression by \u003cem\u003eminimizing\u003c/em\u003e (i.e. our optimizer minimizes) the negative thereof\u0026mdash;recalling that scypy works as a minimizer.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim_objective\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edeviance_expr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1e2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim_objective\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-5.04831636563563e-19*z - 100.0\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe can now use this value to solve for a \\(z\\) value.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim_solution\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esolvers\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eoptim_objective\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim_solution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e{z: -1.98085842402250e+20}\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. We can now solve for the rest of our values.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ez0\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eoptim_solution\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex0\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey0\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(1.62348165247784e+16, 4.76734523704593e+15, -1.980858424022502e+16)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThis would create the following plan:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# solution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_nobias\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_nobias\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.009168283711770158\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRecall that this is the performance of the balanced portfolio:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_returns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.0009224705380695683\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFinally, let\u0026rsquo;s plot the \u003cem\u003eprices\u003c/em\u003e of our various funds:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ematplotlib.pyplot\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ematplotlib.dates\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emdates\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eseaborn\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatetime\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatetime\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eset\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_price\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_price\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_l_price\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_t_price\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDate\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elambda\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edatetime\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estrptime\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;%m/\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e%d\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e/%Y %H:%M:%S\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_2_price\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eastype\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_1_price\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eastype\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_l_price\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eastype\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_t_price\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eastype\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egca\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003exaxis\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eset_major_locator\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emdates\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eYearLocator\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egca\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003exaxis\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eset_major_formatter\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emdates\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDateFormatter\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;%Y\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egca\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eset_ylabel\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Price\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshow\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNone\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-29_23-33-46_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eRecall that we didn\u0026rsquo;t actually buy any MacDonald\u0026rsquo;s. So, we have\u0026mdash;blue, being our \u0026ldquo;optimal\u0026rdquo; portfolio, yellow, our balanced portfolio, green, being Lockheed, and red, being Twitter.\u003c/p\u003e\n\u003cp\u003eOur portfolio works surprisingly well!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_econ320_linearity_tests/","tags":null,"title":"NUS-ECON320 Linearity Tests"},{"categories":null,"contents":"The code created for this problem can be found here.\nProblem 1 Let\u0026rsquo;s begin with a normal function:\n\\begin{equation} f(x) = (\\sqrt{x}-1)^{2} \\end{equation}\nTaking just a normal Riemann sum, we see that, as expected, it converges to about \\(0.167\\) by the following values between bounds \\([0,1]\\) at different \\(N\\):\nN Value 10 0.23 100 0.172 1000 0.167 10000 0.167 100000 0.167 Problem 2 First, as we are implementing a discrete random walk, here\u0026rsquo;s a fun example; \\(p=0.51\\), \\(\\epsilon=0.001\\).\nWhat is particularly interesting about this case is that, due the probability of change being slightly above \\(50\\%\\), we can see that the sequence has an overall positive growth pattern; however, as far as daily returns is concerned, there is almost no value from day-to-day gains in the market.\nTo actually analyze the our expected value for the probability distributions in number of steps \\(T\\) to travel from \\(0\\) to \\(1\\), as a function of \\(p, \\epsilon\\), we perform the following computation:\nExpected Value of T We set:\n\\begin{equation} \\Delta = \\begin{cases} +\\epsilon, P=p\\\\ -\\epsilon, P=1-p \\end{cases} \\end{equation}\nTherefore, for \\(T\\) as a function from \\(0\\) to \\(1\\), we have:\n\\begin{align} E(T)\u0026amp;=\\frac{1}{E\\qty(\\Delta) } \\\\ \u0026amp;= \\frac{1}{p\\epsilon-(1-p)\\epsilon } \\\\ \u0026amp;= \\frac{1}{\\epsilon (2p-1)} \\end{align}\nNow we will calculate the Variance in \\(T\\):\n\\begin{align} Var(T) \u0026amp;= \\frac{1}{Var(\\Delta)} \\end{align}\nWhere, \\(Var(\\Delta)\\) is calculated by:\n\\begin{align} Var(\\Delta) \u0026amp;= E(\\Delta^{2})-E^{2}(\\Delta) \\\\ \u0026amp;= \\qty(\\epsilon^{2} (2p-1)) - \\qty(\\epsilon (2p-1))^{2} \\end{align}\nAnd therefore:\n\\begin{equation} Var(T) = \\frac{1}{\\qty(\\epsilon^{2} (2p-1)) - \\qty(\\epsilon (2p-1))^{2}} \\end{equation}\nProblem 3 Yes, as we expect, that as \\(\\epsilon\\) decreases, the actual steps \\(T\\) it takes to travel from \\([0,1]\\) increases by an order of magnitude. Given \\(10\\) trials, with \\(p=0.51\\) and \\(\\epsilon = \\{0.1,0.01\\}\\), we have that:\n\\(\\epsilon\\) Mean \\(T\\) Std. \\(T\\) 0.1 570.8 1051.142 0.01 3848.2 1457.180 We can see this on the expected value calculations as well, that:\n\\begin{equation} \\lim_{\\epsilon \\to 0} E(T) = \\frac{1}{\\epsilon (2p-1)} = \\infty \\end{equation}\nThis is not true for the case of \\(p=0.5\\), where the limit will create an undefined behavior with \\(0\\infty\\), and l\u0026rsquo;hospital\u0026rsquo;s rule upon \\(\\epsilon\\) doesn\u0026rsquo;t apply here.\nProblem 4 Yes, the quadratic variation converges towards \\(0\\). Similarly as before, with \\(p=0.51\\) and \\(\\epsilon = \\{0.1,0.01,0.001\\}\\), our quadratic variations are:\n\\(\\epsilon\\) quadratic variation 0.1 5.02 0.01 0.32 0.001 0.05 It seems like that, as long as the path terminates and epsilon becomes smaller, the sum of squared difference will converge towards \\(0\\).\nThis means that, for all \\(p\u0026gt;0.5\\), the squared differences will be convergent. However, for \\(p\\leq 0.5\\), the squared differences are arguably still convergent but the sequence doesn\u0026rsquo;t terminate.\nProblem 5 To allow negative values, we changed the function to:\n\\begin{equation} f(x) = ({x}-1)^{2} \\end{equation}\nThe results of running the three expressions with \\(p=0.51\\), \\(\\epsilon=\\{0.1, 0.01, 0.001\\}\\), similarly to before, respectively are as follows:\n\\(\\epsilon\\) \\(f(x_{i})\\) \\(f(x_{i+1})\\) \\(f\\qty(\\frac{x_{i+1}-x_{i}}{2})\\) 0.1 3.03 -2.37 -1.85 0.01 1.7 -1 -0.17 0.001 0.359 0.307 0.938 It seems like\u0026mdash;while all three of these results converge\u0026mdash;they converge to distinctly different limits. Of course, this result also depends on \\(p\\), as the probability determines whether the path is even complete in the first place, which will of course affect the convergence here.\n","html":"\u003cp\u003eThe code created for this problem can be found \u003ca href=\"https://github.com/SkoolNotes/ECON320-BrownianMotion/blob/master/brownian.py\"\u003ehere\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"problem-1\"\u003eProblem 1\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s begin with a normal function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = (\\sqrt{x}-1)^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaking just a normal Riemann sum, we see that, as expected, it converges to about \\(0.167\\) by the following values between bounds \\([0,1]\\) at different \\(N\\):\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eN\u003c/th\u003e\n\u003cth\u003eValue\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e10\u003c/td\u003e\n\u003ctd\u003e0.23\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e100\u003c/td\u003e\n\u003ctd\u003e0.172\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e1000\u003c/td\u003e\n\u003ctd\u003e0.167\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e10000\u003c/td\u003e\n\u003ctd\u003e0.167\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e100000\u003c/td\u003e\n\u003ctd\u003e0.167\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"problem-2\"\u003eProblem 2\u003c/h2\u003e\n\u003cp\u003eFirst, as we are implementing a discrete random walk, here\u0026rsquo;s a fun example; \\(p=0.51\\), \\(\\epsilon=0.001\\).\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-25_22-33-57_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWhat is particularly interesting about this case is that, due the probability of change being slightly above \\(50\\%\\), we can see that the sequence has an overall positive growth pattern; however, as far as daily returns is concerned, there is almost no value from day-to-day gains in the market.\u003c/p\u003e\n\u003cp\u003eTo actually analyze the our expected value for the probability distributions in number of steps \\(T\\) to travel from \\(0\\) to \\(1\\), as a function of \\(p, \\epsilon\\), we perform the following computation:\u003c/p\u003e\n\u003ch3 id=\"expected-value-of-t\"\u003eExpected Value of T\u003c/h3\u003e\n\u003cp\u003eWe set:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Delta = \\begin{cases}\n+\\epsilon, P=p\\\\\n-\\epsilon, P=1-p\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, for \\(T\\) as a function from \\(0\\) to \\(1\\), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nE(T)\u0026amp;=\\frac{1}{E\\qty(\\Delta) } \\\\\n\u0026amp;= \\frac{1}{p\\epsilon-(1-p)\\epsilon } \\\\\n\u0026amp;= \\frac{1}{\\epsilon (2p-1)}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow we will calculate the Variance in \\(T\\):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nVar(T) \u0026amp;= \\frac{1}{Var(\\Delta)}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eWhere, \\(Var(\\Delta)\\) is calculated by:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nVar(\\Delta) \u0026amp;= E(\\Delta^{2})-E^{2}(\\Delta) \\\\\n\u0026amp;= \\qty(\\epsilon^{2} (2p-1)) - \\qty(\\epsilon (2p-1))^{2}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAnd therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nVar(T) = \\frac{1}{\\qty(\\epsilon^{2} (2p-1)) - \\qty(\\epsilon (2p-1))^{2}}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"problem-3\"\u003eProblem 3\u003c/h2\u003e\n\u003cp\u003eYes, as we expect, that as \\(\\epsilon\\) decreases, the actual steps \\(T\\) it takes to travel from \\([0,1]\\) increases by an order of magnitude. Given \\(10\\) trials, with \\(p=0.51\\) and \\(\\epsilon = \\{0.1,0.01\\}\\), we have that:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\\(\\epsilon\\)\u003c/th\u003e\n\u003cth\u003eMean \\(T\\)\u003c/th\u003e\n\u003cth\u003eStd. \\(T\\)\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e0.1\u003c/td\u003e\n\u003ctd\u003e570.8\u003c/td\u003e\n\u003ctd\u003e1051.142\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e0.01\u003c/td\u003e\n\u003ctd\u003e3848.2\u003c/td\u003e\n\u003ctd\u003e1457.180\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWe can see this on the expected value calculations as well, that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lim_{\\epsilon \\to 0} E(T) = \\frac{1}{\\epsilon (2p-1)} = \\infty\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is not true for the case of \\(p=0.5\\), where the limit will create an undefined behavior with \\(0\\infty\\), and l\u0026rsquo;hospital\u0026rsquo;s rule upon \\(\\epsilon\\) doesn\u0026rsquo;t apply here.\u003c/p\u003e\n\u003ch2 id=\"problem-4\"\u003eProblem 4\u003c/h2\u003e\n\u003cp\u003eYes, the quadratic variation converges towards \\(0\\). Similarly as before, with \\(p=0.51\\) and \\(\\epsilon = \\{0.1,0.01,0.001\\}\\), our quadratic variations are:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\\(\\epsilon\\)\u003c/th\u003e\n\u003cth\u003equadratic variation\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e0.1\u003c/td\u003e\n\u003ctd\u003e5.02\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e0.01\u003c/td\u003e\n\u003ctd\u003e0.32\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e0.001\u003c/td\u003e\n\u003ctd\u003e0.05\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eIt seems like that, as long as the path terminates and epsilon becomes smaller, the sum of squared difference will converge towards \\(0\\).\u003c/p\u003e\n\u003cp\u003eThis means that, for all \\(p\u0026gt;0.5\\), the squared differences will be convergent. However, for \\(p\\leq 0.5\\), the squared differences are arguably still convergent but the sequence doesn\u0026rsquo;t terminate.\u003c/p\u003e\n\u003ch2 id=\"problem-5\"\u003eProblem 5\u003c/h2\u003e\n\u003cp\u003eTo allow negative values, we changed the function to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = ({x}-1)^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe results of running the three expressions with \\(p=0.51\\), \\(\\epsilon=\\{0.1, 0.01, 0.001\\}\\), similarly to before, respectively are as follows:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\\(\\epsilon\\)\u003c/th\u003e\n\u003cth\u003e\\(f(x_{i})\\)\u003c/th\u003e\n\u003cth\u003e\\(f(x_{i+1})\\)\u003c/th\u003e\n\u003cth\u003e\\(f\\qty(\\frac{x_{i+1}-x_{i}}{2})\\)\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e0.1\u003c/td\u003e\n\u003ctd\u003e3.03\u003c/td\u003e\n\u003ctd\u003e-2.37\u003c/td\u003e\n\u003ctd\u003e-1.85\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e0.01\u003c/td\u003e\n\u003ctd\u003e1.7\u003c/td\u003e\n\u003ctd\u003e-1\u003c/td\u003e\n\u003ctd\u003e-0.17\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e0.001\u003c/td\u003e\n\u003ctd\u003e0.359\u003c/td\u003e\n\u003ctd\u003e0.307\u003c/td\u003e\n\u003ctd\u003e0.938\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eIt seems like\u0026mdash;while all three of these results converge\u0026mdash;they converge to distinctly different limits. Of course, this result also depends on \\(p\\), as the probability determines whether the path is even complete in the first place, which will of course affect the convergence here.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_econ320_stochastic_integration/","tags":null,"title":"NUS-ECON320 Stochastic Integration"},{"categories":null,"contents":"Let \\(X\\) denote price and \\(Y\\) denote volatility. The two objects obey the following process:\n\\begin{equation} \\begin{cases} \\dd{X} = \\mu X \\dd{t} + XY \\dd{W} \\\\ \\dd{Y} = \\sigma Y \\dd{B} \\end{cases} \\end{equation}\nwhere, \\(W\\) and \\(B\\) are correlated Brownian motions with correlation \\(\\rho\\) \u0026mdash; \\(E[(\\dd{W})(\\dd{B})] = \\rho \\dd{t}\\).\nLet\u0026rsquo;s work with \\(Y\\) first. We understand that \\(Y\\) is some continuous variable \\(e^{a}\\). Therefore, \\(\\dv{Y}{t}=ae^{a}\\). Therefore, \\(dY = ae^{a}dt\\). Finally, then \\(\\frac{\\dd{Y}}{Y} = \\frac{ae^{a}}{e^{a}}\\dd{t} = a\\).\nFinally, then, because we defined \\(Y=e^{a} \\implies \\ln Y = a = \\frac{\\dd{Y}}{Y}\\).\nSo, we have that:\n\\begin{align} \u0026amp;\\dd{Y} = \\sigma Y\\dd{B} \\\\ \\Rightarrow\\ \u0026amp; \\dd{\\log Y} = \\frac{\\sigma Y\\dd{B}}{Y} = \\sigma \\dd{B} \\end{align}\nThis tells that the change in log returns in \\(Y\\) is normal (as \\(B\\) is a Brownian Motion), with a standard deviation of \\(\\sigma\\). Therefore:\n\\begin{equation} \\dd{\\log Y} \\sim \\mathcal{N}(0, \\sigma^{2} \\dd{t}) \\end{equation}\nWe therefore see that the log-returns of \\(Y\\) is a normal with variance \\(\\sigma^{2}\\), making \\(Y\\) itself a Brownian Motion with center \\(0\\) and variance \\(\\sigma^{2}\\).\nSo now, tackling the expression above in \\(X\\), we will do the same exact thing as above and divide by \\(X\\):\n\\begin{equation} \\dd{\\log X} = \\mu \\dd{t} + Y\\dd{W} \\end{equation}\nSo we can see that \\(X\\) is a Geometric Brownian Motion as a sum of two random variables\u0026mdash;its volatility is determined by \\(Y\\) with a time-drift \\(\\mu \\dd{t}\\).\nWe see that we are almost ready to have an analytical solution here, because the top expression is applying some function \\(f=\\log\\) to a stochastic differential equation by time; however, the right side \\(Y\\) here is not quite a constant (it is itself a stochastic process), so we can\u0026rsquo;t simply apply an Itô Intergral and call it a day.\nSo instead, we will proceed to a Monte-Carlo simulation of the results to verify as much as we can.\nWe will begin by setting the sane values for variances\u0026mdash;having \\(0.1\\%\\) drift and \\(1\\%\\) variance in variance, and the two Brownian motions being inverses of each other \\(\\rho = 0.5\\).\nmu = 0.001 sigma = 0.01 rho = 0.5 (mu,sigma,rho) (0.001, 0.01, 0.5) We will seed a standard Brownian motion; as the two random motions are covariate, we can use the value of one to generate another: therefore we will return both at once.\nfrom numpy.random import normal def dbdw(): dB = normal() dW = dB + normal(0, (1-rho)**2) return (dB, dW) dbdw() (-1.0246010237177643, -1.281335746614678) Excellent.\nWe will now simulate the system we were given:\n\\begin{equation} \\begin{cases} \\dd{X} = \\mu X \\dd{t} + XY \\dd{W} \\\\ \\dd{Y} = \\sigma Y \\dd{B} \\end{cases} \\end{equation}\nLet\u0026rsquo;s set the number of trials to \\(10000\\).\nN = 1000 We will measure the convergence of \\(\\bar{\\dd{X}}\\) and \\(\\bar{\\dd{Y}}\\): we will tally each value at each time \\(t\\) as well as compare their expected values over time.\nWe will first seed our systems at \\(1\\%\\) variance and \\(1\\) dollar of price.\nX = 1 Y = 0.01 Now, it\u0026rsquo;s actual simulation time!\n# history of Y and X X_hist = [] Y_hist = [] # history of dx dX_hist = [] dY_hist = [] # current expected value EdX = 0 EdY = 0 # difference in E EdX_diff = 0 EdY_diff = 0 # for n loops, we simulate for _ in range(N): # get a source of randmess dB, dW = dbdw() # get the current dx and dw _dX = mu*X+X*Y*dW _dY = sigma*Y*dB # apply it X += _dX Y += _dY # tally it Y_hist.append(Y) X_hist.append(X) dX_hist.append(_dX) dY_hist.append(_dY) # calculate new expected value # we don\u0026#39;t store it immediately b/c we want to check convergence _EdX = sum(dX_hist)/len(dX_hist) _EdY = sum(dY_hist)/len(dY_hist) EdX_diff = abs(_EdX-EdX) EdY_diff = abs(_EdY-EdY) # store new expected value EdX = _EdX EdY = _EdY Let\u0026rsquo;s observe a few values! For starters, let\u0026rsquo;s measure our new expected values.\nEdX 0.0013333651336800837 EdY -1.225482645599256e-06 And, let\u0026rsquo;s check if we have converged by seeing if the difference is a reasonably small value:\n(EdX_diff, EdY_diff) (2.578663659035343e-05, 1.2183875816528115e-07) Looks like both of our variables have converged. Now, let\u0026rsquo;s plot a few things. Let\u0026rsquo;s first build a table with our data.\nimport pandas as pd data = pd.DataFrame({\u0026#34;price\u0026#34;: X_hist, \u0026#34;variance\u0026#34;: Y_hist}) data[\u0026#34;time\u0026#34;] = data.index data price variance time 0 0.998644 0.009974 0 1 0.980393 0.009796 1 2 0.998355 0.009967 2 3 0.994514 0.009913 3 4 1.001363 0.009961 4 .. ... ... ... 995 2.323640 0.008778 995 996 2.321473 0.008715 996 997 2.343427 0.008818 997 998 2.306271 0.008654 998 999 2.333365 0.008775 999 [1000 rows x 3 columns] We will use this to continue the rest of our analysis. For data augmentation, we will also calculate the natural logs of the change to get the rate of change.\nimport numpy as np data[\u0026#34;price_log\u0026#34;] = np.log(data.price) data[\u0026#34;variance_log\u0026#34;] = np.log(data.variance) data[\u0026#34;price_log_change\u0026#34;] = data.price_log - data.price_log.shift(1) data[\u0026#34;variance_log_change\u0026#34;] = data.variance_log - data.variance_log.shift(1) # drop the first row we have w/o change data = data.dropna() data price variance ... price_log_change variance_log_change 1 0.980393 0.009796 ... -0.018444 -0.018005 2 0.998355 0.009967 ... 0.018155 0.017332 3 0.994514 0.009913 ... -0.003855 -0.005443 4 1.001363 0.009961 ... 0.006863 0.004801 5 0.991306 0.009895 ... -0.010094 -0.006639 .. ... ... ... ... ... 995 2.323640 0.008778 ... 0.002148 0.002124 996 2.321473 0.008715 ... -0.000933 -0.007227 997 2.343427 0.008818 ... 0.009413 0.011765 998 2.306271 0.008654 ... -0.015983 -0.018804 999 2.333365 0.008775 ... 0.011680 0.013827 [999 rows x 7 columns] Let\u0026rsquo;s begin by plotting what we have:\nimport seaborn as sns import matplotlib.pyplot as plt sns.set() We will plot price and variation on two axes.\nplt.gcf().clear() sns.lineplot(x=data.time, y=data.price, color=\u0026#34;g\u0026#34;) ax2 = plt.twinx() sns.lineplot(x=data.time, y=data.variance, color=\u0026#34;b\u0026#34;, ax=ax2) plt.show() Where, the blue line represents the percent variance over time and the green line represents the price. Given the \\(0.1\\%\\) drift we provided, we can see that our simulated market grows steadily in the 1000 data point.\nWe can then plot the log (percent) changes.\nplt.gcf().clear() sns.lineplot(x=data.time, y=data.price_log_change, color=\u0026#34;g\u0026#34;) ax2 = plt.twinx() sns.lineplot(x=data.time, y=data.variance_log_change, color=\u0026#34;b\u0026#34;, ax=ax2) plt.show() As you can see\u0026mdash;we have fairly strong random variables, centered around \\(0\\). Having verified that our drift and variables behave in the way that we expect, we can proceed with analysis.\nWe can use a single-variable \\(t\\) test to figure the \\(99\\%\\) confidence band of the result. To do this, we first need to calculate the mean and standardized deviation of the price percent change (log difference).\nlog_mean, log_std = (data.price_log_change.mean(), data.price_log_change.std()) (log_mean, log_std) (0.0008495184126335735, 0.008471735971085885) And now, we will calculate the\nfrom scipy.stats import t lower_bound, upper_bound = t.interval(0.99, len(data)-1, loc=log_mean, scale=log_std) lower_bound -0.021014037766751738 Therefore, with \\(99\\%\\) confidence, we can say that our asset\u0026mdash;given its current parameters, and an \\(N=1000\\) Monte-Carlo simulation\u0026mdash;will not have a more than \\(2.1\\%\\) drop in value.\nWe will use a hedged option to minimize loss. We will use this value to determine the maximum loss for an European put option, maturing in \\(T\\) time, such that the exercise thereof will be hedged against drops of asset price.\nFirst, we will determine the cost of a correctly hedged European put option.\nWe will define \\(S_{0}\\) as the current price of the asset. We will use \\(P\\) as the price of the put option.\nWe desire the strike price of the option to be:\n\\begin{equation} K = S_{0} + P \\end{equation}\nthat is: the price of the put option we desire here will recuperate the price to trade the option and protect against loss. We will symbolically solve for the price of such an option.\nNote that the codeblocks switches here from standard Python to SageMath.\nWe first define the standard normal cumulative distribution.\nfrom sage.symbolic.integration.integral import definite_integral z = var(\u0026#34;z\u0026#34;) N(x) = 1/sqrt(2*pi)*definite_integral(e^(-z^2/2), z, -infinity, x) We will then leverage the Euopean call Black-Scholes model to calculate the optimal put price. We first instantiate variables \\(T\\), and we will set current time to be \\(0\\).\nWe will use \\(v\\) for \\(\\sigma\\), the volatility of the security. We will use \\(S\\) for current price. Lastly, we define \\(P\\) to be our put price. We will call \\(r\\) our risk-free rate.\nTo determine the discount factor, we first implement symbolically our expression for desired strike price.\nv,T,S,P,r = var(\u0026#34;v T S P r\u0026#34;) K = S+P K P + S Great. Now we will implement our discount factors.\nd1 = 1/v*sqrt(T) * (ln(S/K) + (r+v^2/2)*(T)) d2 = d1-v*T d1, d2 (1/2*((v^2 + 2*r)*T + 2*log(S/(P + S)))*sqrt(T)/v, -T*v + 1/2*((v^2 + 2*r)*T + 2*log(S/(P + S)))*sqrt(T)/v) And lastly, we will implement the Black-Scholes expression for puts as a logical expression.\nexpr = P == N(-d2)*K*e^(-r*T)-N(-d1)*S expr \\begin{equation} P = \\frac{{\\left({\\left(\\operatorname{erf}\\left(\\frac{\\sqrt{2} {\\left(T v^{2} + 2 \\, T r + 2 \\, \\log\\left(\\frac{S}{P + S}\\right)\\right)} \\sqrt{T}}{4 \\, v}\\right) - 1\\right)} e^{\\left(T r\\right)} - \\operatorname{erf}\\left(-\\frac{\\sqrt{2} {\\left(2 \\, T v^{2} - {\\left(T v^{2} + 2 \\, T r + 2 \\, \\log\\left(\\frac{S}{P + S}\\right)\\right)} \\sqrt{T}\\right)}}{4 \\, v}\\right) + 1\\right)} S}{\\operatorname{erf}\\left(-\\frac{\\sqrt{2} {\\left(2 \\, T v^{2} - {\\left(T v^{2} + 2 \\, T r + 2 \\, \\log\\left(\\frac{S}{P + S}\\right)\\right)} \\sqrt{T}\\right)}}{4 \\, v}\\right) + 2 \\, e^{\\left(T r\\right)} - 1} \\end{equation}\nNumerical solutions to this expression\u0026mdash;fitting for each of the values from before\u0026mdash;would then indicate the correct price of the option to generate the hedging effect desired.\n","html":"\u003cp\u003eLet \\(X\\) denote price and \\(Y\\) denote volatility. The two objects obey the following process:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dd{X} = \\mu X \\dd{t} + XY \\dd{W} \\\\\n\\dd{Y} = \\sigma Y \\dd{B}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(W\\) and \\(B\\) are correlated Brownian motions with correlation \\(\\rho\\) \u0026mdash; \\(E[(\\dd{W})(\\dd{B})] = \\rho \\dd{t}\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eLet\u0026rsquo;s work with \\(Y\\) first. We understand that \\(Y\\) is some continuous variable \\(e^{a}\\). Therefore, \\(\\dv{Y}{t}=ae^{a}\\). Therefore, \\(dY = ae^{a}dt\\). Finally, then \\(\\frac{\\dd{Y}}{Y} = \\frac{ae^{a}}{e^{a}}\\dd{t} = a\\).\u003c/p\u003e\n\u003cp\u003eFinally, then, because we defined \\(Y=e^{a} \\implies \\ln Y = a = \\frac{\\dd{Y}}{Y}\\).\u003c/p\u003e\n\u003cp\u003eSo, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\dd{Y} = \\sigma Y\\dd{B} \\\\\n\\Rightarrow\\ \u0026amp; \\dd{\\log Y} = \\frac{\\sigma Y\\dd{B}}{Y} = \\sigma \\dd{B}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThis tells that the change in log returns in \\(Y\\) is \u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormal\u003c/a\u003e (as \\(B\\) is a \u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e), with a standard deviation of \\(\\sigma\\). Therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dd{\\log Y} \\sim \\mathcal{N}(0, \\sigma^{2} \\dd{t})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe therefore see that the log-returns of \\(Y\\) is a normal with variance \\(\\sigma^{2}\\), making \\(Y\\) itself a \u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e with center \\(0\\) and variance \\(\\sigma^{2}\\).\u003c/p\u003e\n\u003cp\u003eSo now, tackling the expression above in \\(X\\), we will do the same exact thing as above and divide by \\(X\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dd{\\log X} = \\mu \\dd{t} + Y\\dd{W}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo we can see that \\(X\\) is a \u003ca href=\"/posts/kbhgeometric_brownian_motion/\"\u003eGeometric Brownian Motion\u003c/a\u003e as a sum of two random variables\u0026mdash;its volatility is determined by \\(Y\\) with a time-drift \\(\\mu \\dd{t}\\).\u003c/p\u003e\n\u003cp\u003eWe see that we are \u003cem\u003ealmost\u003c/em\u003e ready to have an analytical solution here, because the top expression is applying some function \\(f=\\log\\) to a stochastic differential equation by time; however, the right side \\(Y\\) here is not quite a constant (it is itself a stochastic process), so we can\u0026rsquo;t simply apply an \u003ca href=\"/posts/kbhito_intergral/\"\u003eItô Intergral\u003c/a\u003e and call it a day.\u003c/p\u003e\n\u003cp\u003eSo instead, we will proceed to a Monte-Carlo simulation of the results to verify as much as we can.\u003c/p\u003e\n\u003cp\u003eWe will begin by setting the sane values for variances\u0026mdash;having \\(0.1\\%\\) drift and \\(1\\%\\) variance in variance, and the two Brownian motions being inverses of each other \\(\\rho = 0.5\\).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emu\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.001\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esigma\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erho\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.5\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esigma\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erho\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(0.001, 0.01, 0.5)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will seed a standard Brownian motion; as the two random motions are covariate, we can use the value of one to generate another: therefore we will return both at once.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enumpy.random\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enormal\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003edbdw\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e():\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edB\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enormal\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edW\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edB\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enormal\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erho\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edB\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edW\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edbdw\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(-1.0246010237177643, -1.281335746614678)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent.\u003c/p\u003e\n\u003cp\u003eWe will now simulate the system we were given:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dd{X} = \\mu X \\dd{t} + XY \\dd{W} \\\\\n\\dd{Y} = \\sigma Y \\dd{B}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s set the number of trials to \\(10000\\).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will measure the convergence of \\(\\bar{\\dd{X}}\\) and \\(\\bar{\\dd{Y}}\\): we will tally each value at each time \\(t\\) as well as compare their expected values over time.\u003c/p\u003e\n\u003cp\u003eWe will first seed our systems at \\(1\\%\\) variance and \\(1\\) dollar of price.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, it\u0026rsquo;s actual simulation time!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# history of Y and X\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eX_hist\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eY_hist\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# history of dx\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edX_hist\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edY_hist\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# current expected value\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eEdX\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eEdY\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# difference in E\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eEdX_diff\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eEdY_diff\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# for n loops, we simulate\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# get a source of randmess\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edB\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edW\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edbdw\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# get the current dx and dw\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e_dX\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emu\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edW\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e_dY\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esigma\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edB\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# apply it\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_dX\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_dY\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# tally it\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eY_hist\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eX_hist\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edX_hist\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_dX\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edY_hist\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_dY\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# calculate new expected value\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we don\u0026#39;t store it immediately b/c we want to check convergence\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e_EdX\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edX_hist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edX_hist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e_EdY\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edY_hist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edY_hist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eEdX_diff\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eabs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_EdX\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eEdX\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eEdY_diff\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eabs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_EdY\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eEdY\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# store new expected value\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eEdX\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_EdX\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eEdY\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_EdY\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s observe a few values! For starters, let\u0026rsquo;s measure our new expected values.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eEdX\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.0013333651336800837\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eEdY\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-1.225482645599256e-06\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd, let\u0026rsquo;s check if we have converged by seeing if the difference is a reasonably small value:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eEdX_diff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eEdY_diff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(2.578663659035343e-05, 1.2183875816528115e-07)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLooks like both of our variables have converged. Now, let\u0026rsquo;s plot a few things. Let\u0026rsquo;s first build a table with our data.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epandas\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDataFrame\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e({\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;price\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eX_hist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;variance\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eY_hist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e})\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;time\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eindex\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e price variance time\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 0.998644 0.009974 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 0.980393 0.009796 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.998355 0.009967 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 0.994514 0.009913 3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 1.001363 0.009961 4\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e995 2.323640 0.008778 995\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e996 2.321473 0.008715 996\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e997 2.343427 0.008818 997\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e998 2.306271 0.008654 998\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e999 2.333365 0.008775 999\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1000 rows x 3 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will use this to continue the rest of our analysis. For data augmentation, we will also calculate the natural logs of the change to get the rate of change.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enumpy\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;price_log\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprice\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;variance_log\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evariance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;price_log_change\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprice_log\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprice_log\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshift\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;variance_log_change\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evariance_log\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evariance_log\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshift\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# drop the first row we have w/o change\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edropna\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e price variance ... price_log_change variance_log_change\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 0.980393 0.009796 ... -0.018444 -0.018005\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.998355 0.009967 ... 0.018155 0.017332\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 0.994514 0.009913 ... -0.003855 -0.005443\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 1.001363 0.009961 ... 0.006863 0.004801\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.991306 0.009895 ... -0.010094 -0.006639\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e995 2.323640 0.008778 ... 0.002148 0.002124\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e996 2.321473 0.008715 ... -0.000933 -0.007227\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e997 2.343427 0.008818 ... 0.009413 0.011765\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e998 2.306271 0.008654 ... -0.015983 -0.018804\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e999 2.333365 0.008775 ... 0.011680 0.013827\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[999 rows x 7 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s begin by plotting what we have:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eseaborn\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ematplotlib.pyplot\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eset\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will plot price and variation on two axes.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egcf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eclear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etime\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprice\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;g\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eax2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etwinx\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etime\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evariance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;b\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eax\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eax2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshow\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-14_22-09-05_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWhere, the blue line represents the percent variance over time and the green line represents the price. Given the \\(0.1\\%\\) drift we provided, we can see that our simulated market grows steadily in the 1000 data point.\u003c/p\u003e\n\u003cp\u003eWe can then plot the log (percent) changes.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egcf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eclear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etime\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprice_log_change\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;g\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eax2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etwinx\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etime\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evariance_log_change\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;b\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eax\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eax2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshow\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-14_22-19-02_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eAs you can see\u0026mdash;we have fairly strong random variables, centered around \\(0\\). Having verified that our drift and variables behave in the way that we expect, we can proceed with analysis.\u003c/p\u003e\n\u003cp\u003eWe can use a single-variable \\(t\\) test to figure the \\(99\\%\\) confidence band of the result. To do this, we first need to calculate the mean and standardized deviation of the price percent change (log difference).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elog_mean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elog_std\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprice_log_change\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprice_log_change\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog_mean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elog_std\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(0.0008495184126335735, 0.008471735971085885)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd now, we will calculate the\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003escipy.stats\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elower_bound\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eupper_bound\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einterval\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.99\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eloc\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog_mean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003escale\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog_std\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elower_bound\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-0.021014037766751738\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eTherefore, with \\(99\\%\\) confidence, we can say that our asset\u0026mdash;given its current parameters, and an \\(N=1000\\) Monte-Carlo simulation\u0026mdash;will not have a more than \\(2.1\\%\\) drop in value.\u003c/p\u003e\n\u003cp\u003eWe will use a hedged option to minimize loss. We will use this value to determine the maximum loss for an European put option, maturing in \\(T\\) time, such that the exercise thereof will be hedged against drops of asset price.\u003c/p\u003e\n\u003cp\u003eFirst, we will determine the cost of a correctly hedged European put option.\u003c/p\u003e\n\u003cp\u003eWe will define \\(S_{0}\\) as the current price of the asset. We will use \\(P\\) as the price of the put option.\u003c/p\u003e\n\u003cp\u003eWe desire the strike price of the option to be:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nK = S_{0} + P\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat is: the price of the put option we desire here will recuperate the price to trade the option \u003cem\u003eand\u003c/em\u003e protect against loss. We will symbolically solve for the price of such an option.\u003c/p\u003e\n\u003cp\u003eNote that the codeblocks switches here from standard Python to SageMath.\u003c/p\u003e\n\u003cp\u003eWe first define the standard normal cumulative distribution.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esage.symbolic.integration.integral\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edefinite_integral\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;z\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esqrt\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epi\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edefinite_integral\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einfinity\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will then leverage the Euopean call Black-Scholes model to calculate the optimal put price. We first instantiate variables \\(T\\), and we will set current time to be \\(0\\).\u003c/p\u003e\n\u003cp\u003eWe will use \\(v\\) for \\(\\sigma\\), the volatility of the security. We will use \\(S\\) for current price. Lastly, we define \\(P\\) to be our put price. We will call \\(r\\) our risk-free rate.\u003c/p\u003e\n\u003cp\u003eTo determine the discount factor, we first implement symbolically our expression for desired strike price.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eP\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003er\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;v T S P r\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eK\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eS\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eP\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eK\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eP + S\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eGreat. Now we will implement our discount factors.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esqrt\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eln\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eS\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eK\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003er\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(1/2*((v^2 + 2*r)*T + 2*log(S/(P + S)))*sqrt(T)/v,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -T*v + 1/2*((v^2 + 2*r)*T + 2*log(S/(P + S)))*sqrt(T)/v)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd lastly, we will implement the Black-Scholes expression for puts as a logical expression.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eexpr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eP\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eK\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003er\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eS\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eexpr\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\\begin{equation}\nP = \\frac{{\\left({\\left(\\operatorname{erf}\\left(\\frac{\\sqrt{2} {\\left(T v^{2} + 2 \\, T r + 2 \\, \\log\\left(\\frac{S}{P + S}\\right)\\right)} \\sqrt{T}}{4 \\, v}\\right) - 1\\right)} e^{\\left(T r\\right)} - \\operatorname{erf}\\left(-\\frac{\\sqrt{2} {\\left(2 \\, T v^{2} - {\\left(T v^{2} + 2 \\, T r + 2 \\, \\log\\left(\\frac{S}{P + S}\\right)\\right)} \\sqrt{T}\\right)}}{4 \\, v}\\right) + 1\\right)} S}{\\operatorname{erf}\\left(-\\frac{\\sqrt{2} {\\left(2 \\, T v^{2} - {\\left(T v^{2} + 2 \\, T r + 2 \\, \\log\\left(\\frac{S}{P + S}\\right)\\right)} \\sqrt{T}\\right)}}{4 \\, v}\\right) + 2 \\, e^{\\left(T r\\right)} - 1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNumerical solutions to this expression\u0026mdash;fitting for each of the values from before\u0026mdash;would then indicate the correct price of the option to generate the hedging effect desired.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_econ320_volatility_hedging/","tags":null,"title":"NUS-ECON320 Volatility Hedging"},{"categories":null,"contents":"The doctor-patient ratio in Haiti is 1 out of 67,000; a combination of malnutrition and malpractice results in a fatality rate of 47%.\nIn Breath, Eyes, Memory, the high rate of fatalities from birth is included as a part of a proverb in Sophie’s village. Ife tells that, of “three children” conceived by an old woman, “one dies in her body.” (Danticat 118)\nNext Steps Token: s_6285_15\nFollow this link for the next step. You maybe able to continue to the next phrase of the game; it is also possible that you may have died during birth and would have to restart.\n","html":"\u003cp\u003eThe doctor-patient ratio in Haiti is 1 out of 67,000; a combination of malnutrition and malpractice results in a fatality rate of 47%.\u003c/p\u003e\n\u003cp\u003eIn \u003cem\u003eBreath, Eyes, Memory\u003c/em\u003e, the high rate of fatalities from birth is included as a part of a proverb in Sophie’s village. Ife tells that, of “three children” conceived by an old woman, “one dies in her body.” (Danticat 118)\u003c/p\u003e\n\u003ch2 id=\"next-steps\"\u003eNext Steps\u003c/h2\u003e\n\u003cp\u003eToken: s_6285_15\u003c/p\u003e\n\u003cp\u003eFollow \u003ca href=\"https://tinyurl.com/nuseng401giftbounceextra1\"\u003ethis link\u003c/a\u003e for the next step. You maybe able to continue to the next phrase of the game; it is also possible that you may have died during birth and would have to restart.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_gift_1/","tags":null,"title":"NUS-ENG401 Childbirth"},{"categories":null,"contents":"Apart from Russia, Central Africa and the Caribbeans have the highest average death rates of regions on Earth. Death is a pretty common occurrence, and—it appears—through the vicissitudes of the game you have died.\nBetter luck next time!\n","html":"\u003cp\u003eApart from Russia, Central Africa and the Caribbeans have the highest average death rates of regions on Earth. Death is a pretty common occurrence, and—it appears—through the vicissitudes of the game you have died.\u003c/p\u003e\n\u003cp\u003eBetter luck next time!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_gift_0/","tags":null,"title":"NUS-ENG401 Death"},{"categories":null,"contents":"Despite having some access to education, actual success through it varies significantly as the resources are scarce. For instance, postsecondary education only shares 1% of educational spending in Martinique, so access to it is extremely limited.\nIn Black Shack Alley, José’s quarter scholarship—not enough to support his education—causes his mother to lament that they are “black, poor, and alone in the world” (Zobel 125): their station in Martinican society prevented their access to the already limited resource.\nNext Steps Token: s_5166_12\nResources to advance in education is fickle! To continue to the next step, you must locate Jack L. (\u0026lsquo;23) on the San Mateo campus, and he will provide you with the next steps to complete this track.\n","html":"\u003cp\u003eDespite having some access to education, actual success through it varies significantly as the resources are scarce. For instance, postsecondary education only shares 1% of educational spending in Martinique, so access to it is extremely limited.\u003c/p\u003e\n\u003cp\u003eIn \u003cem\u003eBlack Shack Alley\u003c/em\u003e, José’s quarter scholarship—not enough to support his education—causes his mother to lament that they are “black, poor, and alone in the world” (Zobel 125): their station in Martinican society prevented their access to the already limited resource.\u003c/p\u003e\n\u003ch2 id=\"next-steps\"\u003eNext Steps\u003c/h2\u003e\n\u003cp\u003eToken: s_5166_12\u003c/p\u003e\n\u003cp\u003eResources to advance in education is fickle! To continue to the next step, you must locate Jack L. (\u0026lsquo;23) on the San Mateo campus, and he will provide you with the next steps to complete this track.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_gift_5/","tags":null,"title":"NUS-ENG401 Endgame of Education"},{"categories":null,"contents":"Based only on the framing analysis of Sax\u0026rsquo;s Othello, I want to write a piece analyzing collectively grip (lighting) and gaffing (framing) of the films, specifically with a contrast as much as possible to the original text\u0026rsquo;s discussion of framing in, for instance, stage directions. In Sax\u0026rsquo;s Othello, Iago\u0026rsquo;s musings are always framed medium-close from below, with lighting coming from above: the framing helps show a sense of \u0026ldquo;ascension\u0026rdquo; (perhaps not unintentionally biblical, like an ascension to heaven), showing how Iago\u0026rsquo;s schemes are helping him rise through the ranks: a sign of his desire for power. I want to continue to analyses these types of connection between the grip and gaffing throughout the films to help reveal the differing power welded by the various screenwriters\u0026rsquo; differing analysis on Shakespeare\u0026rsquo;s characters.\nVishal Bhardwaj\u0026rsquo;s Omkara Dreary music plays during the misc. Games of marbles\nMopeds?\nsmall bounding songs\nDifferences: Desdemona and Rodrigo were engaged\nEmilia is Othello\u0026rsquo;s sister\nInstances of violence/impending violence underscored by very cheerful music\nIntroductory scene Intro credits: \u0026ldquo;lyrics\u0026rdquo;, \u0026ldquo;music\u0026rdquo;, \u0026ldquo;dialog\u0026rdquo; as the three scripting components call Othello a \u0026ldquo;half-cast\u0026rdquo; \u0026ldquo;abduting\u0026rdquo;\u0026mdash;actually in love or not Marbles scene Robantio (Desdomona\u0026rsquo;s dad) goes and tries to find Othello Ominous music? Monster, Half-Caste, etc. \u0026ldquo;never trust what your eyes say, your eyes will betray you Enable 100 Bet Scene Robantio sets the seed of jealousy by explicitly calling Desdemona \u0026ldquo;two-faced\u0026rdquo; Laugh and discussions The music: very cheerful during scenes of violence Omi was able to kill everyone Steps over dead body: washes himself with water Election So Othello\u0026rsquo;s boss During ceremony, Othello chooses Cassio to be the new general \u0026ldquo;How did you get such a light girl in these dark parts\u0026rdquo; \u0026ldquo;dark lord and magic fluit\u0026rdquo; Music swells and Rodrigo cries Rodrigo and Iago Moving the \u0026ldquo;I ahet the moor\u0026rdquo; scene to the middle of the film Iago fires Rodrigo\u0026rsquo;s jealousies, and then Rodrigo fire\u0026rsquo;s Iagos by engaging The Son\u0026rsquo;s BD, etc. Iago listens and watches Camera flashes are the only sound that\u0026rsquo;s present Camera flashing sound is like guns Iago again talks with Rodrigo about Dolly Bianca and Cassio Filmed through a bollywood musical number: very cheerful despite the omen of bad consiquences Cassio is Drunk Typically, HAPPY precededs violence Smoking hurt the music, which causes the Cassio/Rodrigo brawl First Tail Spinning after brawl \u0026ldquo;It was the booze\u0026rdquo; Iago used \u0026ldquo;Only one voice that could work, Dolly\u0026rsquo;s\u0026rdquo; Omi Brother is back Mopeds Iago begins to spin his tale by avoiding talking about Kesu Drum samples + acient vocal chops Symbolism of dissonant music Othello is not fighting back Long songs to illustrate love:w Not enough Blue cololr grading Desdemona very white because of framing compared to all others Father So motivation is given Serious music in the background, replaying the two characters' Tone of the phone number Basil Dearden\u0026rsquo;s All Night Long Timing: 1962, after the war.\nRex + Delia: Othello and Desdemona\nIago want to break off from Othello\u0026rsquo;s band, but his financeeer will only finance him if Desdemona will join Iago\u0026rsquo;s new band instead of staying with Othello\u0026rsquo;s band.\nGift: cigerette case.\nBrazillan pistol drums: mark of\nIntroductory scene Stairs; what does rising a lot of stairs mean symbolistically? framing: xylophone; poster; bass \u0026mdash; marrige celebrating poster meaning? \u0026ldquo;Cause I want you, always have\u0026rdquo; \u0026mdash; new angle: Iago is in love Desdemona Cassio introduced Desdemona to Othello \u0026ldquo;Here\u0026rsquo;s to the two men in my life\u0026rdquo; \u0026mdash; Desdemona; does she have affection for Cassio too? Rodrigo and Iago Iago planting in Rodrigo the idea Cassio is in love with Desdemona weed is an important plot point Emilia breaking the fourth wall Iago picks up the cigarette case manually Iago handed MJ to Cassio Cigarette case handed to Cassio too Rex does not want Cassio to smoke Uses the idea of smoke Iago\u0026rsquo;s gaze looks down below Othello when speaking to him Iago looks to the side/in front a lot when talking normally Alcohol and MJ is swapped, but the point is about the same Cassio, instead of fighting Rodrigo, fought a different person First drum solo Contrast: Iago looking very ominously forward towards othello, Othello looks to the side Symbolic meaning of drum solo? Triangle between Othello vs. Desdemona vs. Iago Tape Editing Talk to Delia; \u0026ldquo;ocular proof\u0026rdquo; out of the audio recording \u0026ldquo;I know how you feel\u0026rdquo; \u0026mdash; similar to Ben Jago in the previous film The tape editing as a part of Rodrigo\u0026rsquo;s setup, so Rodrigo funded Iago\u0026rsquo;s things \u0026ldquo;Drop that Sherlock Holmes bit, that hat looks funny\u0026rdquo; All Night Long Song All night long song \u0026mdash; why so sad? Othello looks at Delia in a funny way because of the songs \u0026ldquo;I love you both\u0026rdquo;: odd choice of words on Delia\u0026rsquo;s part Ocular Proof Cigarette case + and The tapes were rolling Backroom conversation and recording Camera angle between Othello and Desdemona Othello view from the top looking down, Desdemona/Rodrigo view from the bottom looking up Iago holding door for Othello, submissiveness strangling and throwing out Desdemona is alive? Iago is not attacked and Othello didn\u0026rsquo;t kill himself? Confused about the ending another drum solo: I don\u0026rsquo;t even love Jonny, Iago Repeated shot between the two people Imaginary band playing during Iago\u0026rsquo;s solo Geoffrey Sax\u0026rsquo;s Othello Geoffrey Sax\u0026rsquo;s Othello Symbols\nLight on Iago\u0026rsquo;s face when he is scheming\nExtreme closeup on Othello and Iago is close together\nDrums + violin to build tension; happy violin to show happiness\nIago: \u0026ldquo;I know what you are talking about, I\u0026rsquo;ve been there\u0026rdquo;\nBOTH interrogation and the scene with Cass Iago: \u0026ldquo;And that\u0026rsquo;s a promise\nThe Iago/Othello closeups are very close\nOthello gave the impetus that he is afraid of marriage falling apart explicitly, and Iago explicitly planted into Cassio that their marrage is failing\nCassio in this version is much more active, it is not entirely a misunderstanding \u0026ldquo;TLC\u0026rdquo;: shot from above\u0026mdash;kind of pressure overpushing\nDessie bought the gown\nmoment whenever light comes across Iago\u0026rsquo;s face\nrepeating violin theme in happiness!!!!\nrepeating vocal theme in sadness!!!!\n\u0026ldquo;It\u0026rsquo;s a shame really, really. He\u0026rsquo;s a broken man.\u0026rdquo;\nWho is Iago talking to? Is Othello really rushed to his head?\n\u0026ldquo;I want an example made\u0026rdquo;: kind of a way of cracking\nAdvanced interrogation methods: methods of guaranteeing trust\n\u0026ldquo;I know what you are talking about, I\u0026rsquo;ve been there\u0026rdquo;\ninterrogation and the scene with Cass and towards Othehllo I don\u0026rsquo;t think I can handle it\nbefore otherllo\u0026rsquo;s \u0026ldquo;death\u0026rdquo; (unraveling) before the constable\u0026rsquo;s actual death Othello realization\nExtreme close ups When Othello was about to kill Iago, lots of light on hisface Death scene, repeat from the beginning, replaying the intro, but with DIFFERENT MUSIC\nSo desdemona looks the same as in life or death\nAlso Emilia didn\u0026rsquo;t die??\nWith remember\n\u0026ldquo;And that\u0026rsquo;s a promise\u0026rdquo;\nloud songs\nlight cast onto Iago\u0026rsquo;s face\nRainy, \u0026ldquo;hello\u0026rdquo;\nThe hankerchief is \u0026ldquo;Dessie\u0026rsquo;s early days\u0026rdquo;\nWhat does the swimming symbolize?\nOpening extreme close up of lips, hands, blackness vs. whiteness background singing: a little bit greek? Intro Cuts The cuts between chaos and peace: riots and looting Othello is a policemen Police: elevated POWER Difference in modern power dynamic, etc. Iago: breaking the 4th wall \u0026ldquo;Yesterday\u0026rsquo;s Man\u0026rdquo; Bathroom scene Both groups secretly conspiring Unbeliving (jelous? in love with? of Othello\u0026rsquo;s strength It was recorded! Creating the difference Justice under the law \u0026ldquo;Justice under the law\u0026rdquo;, \u0026ldquo;unlawfully killed\u0026rdquo;: that is that meaning? Did Othello unlawfully kill?\nClear bold statement \u0026ldquo;I know my worth\u0026rdquo; Tell them the status The eyes\u0026rsquo; angle of Ben Iago: Light casting directly cast onto Ben Becomes a Commissioner \u0026ldquo;Something loveable about everyone, even if they turned out to be assholes\u0026rdquo; Iago and Emilia \u0026ldquo;Don\u0026rsquo;t you think John is too good to be true\u0026rdquo; Cassio Doxxing people on a nazi website Michael cass as a form of protection: Story is different Interview \u0026ldquo;They love each other\u0026rdquo; Extreme closeup of Othello and Iago Cassio\u0026rsquo;s character is very indirect: uncomportable angles Shell Marriage \u0026ldquo;Shell marriage\u0026rdquo;: Iago priming Cassio \u0026ldquo;Happy ending\u0026rdquo;: interview suspending as a way of pretense\u0026hellip; of sexual hints? Robe Robe is the hankerchief ","html":"\u003cp\u003eBased only on the framing analysis of Sax\u0026rsquo;s Othello, I want to write a piece analyzing collectively grip (lighting) and gaffing (framing) of the films, specifically with a contrast as much as possible to the original text\u0026rsquo;s discussion of framing in, for instance, stage directions. In Sax\u0026rsquo;s Othello, Iago\u0026rsquo;s musings are always framed medium-close from below, with lighting coming from above: the framing helps show a sense of \u0026ldquo;ascension\u0026rdquo; (perhaps not unintentionally biblical, like an ascension to heaven), showing how Iago\u0026rsquo;s schemes are helping him rise through the ranks: a sign of his desire for power. I want to continue to analyses these types of connection between the grip and gaffing throughout the films to help reveal the differing power welded by the various screenwriters\u0026rsquo; differing analysis on Shakespeare\u0026rsquo;s characters.\u003c/p\u003e\n\u003ch2 id=\"vishal-bhardwaj-s-omkara\"\u003eVishal Bhardwaj\u0026rsquo;s Omkara\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDreary music plays during the misc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eGames of marbles\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eMopeds?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003esmall bounding songs\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDifferences: Desdemona and Rodrigo were engaged\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eEmilia is Othello\u0026rsquo;s sister\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eInstances of violence/impending violence underscored by very cheerful music\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"introductory-scene\"\u003eIntroductory scene\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eIntro credits: \u0026ldquo;lyrics\u0026rdquo;, \u0026ldquo;music\u0026rdquo;, \u0026ldquo;dialog\u0026rdquo; as the three scripting components\u003c/li\u003e\n\u003cli\u003ecall Othello a \u0026ldquo;half-cast\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;abduting\u0026rdquo;\u0026mdash;actually in love or not\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"marbles-scene\"\u003eMarbles scene\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eRobantio (Desdomona\u0026rsquo;s dad) goes and tries to find Othello\u003c/li\u003e\n\u003cli\u003eOminous music?\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eMonster\u003c/strong\u003e, \u003cstrong\u003eHalf-Caste\u003c/strong\u003e, etc.\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;never trust what your eyes say, your eyes will betray you\u003c/li\u003e\n\u003cli\u003eEnable\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"100-bet-scene\"\u003e100 Bet Scene\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eRobantio sets the seed of jealousy by explicitly calling Desdemona \u0026ldquo;two-faced\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eLaugh and discussions\u003c/li\u003e\n\u003cli\u003eThe music: \u003cstrong\u003e\u003cstrong\u003every cheerful during scenes of violence\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eOmi was able to kill everyone\u003c/li\u003e\n\u003cli\u003eSteps over dead body: washes himself with water\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"election\"\u003eElection\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eSo Othello\u0026rsquo;s \u003cstrong\u003eboss\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eDuring ceremony, Othello chooses Cassio to be the new general\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;How did you get such a light girl in these dark parts\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;dark lord and magic fluit\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eMusic swells and Rodrigo cries\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"rodrigo-and-iago\"\u003eRodrigo and Iago\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eMoving the \u0026ldquo;I ahet the moor\u0026rdquo; scene to the middle of the film\u003c/li\u003e\n\u003cli\u003eIago fires Rodrigo\u0026rsquo;s jealousies, and then Rodrigo fire\u0026rsquo;s Iagos by engaging\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"the-son-s-bd-etc-dot\"\u003eThe Son\u0026rsquo;s BD, etc.\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eIago listens and watches\u003c/li\u003e\n\u003cli\u003eCamera flashes are the only sound that\u0026rsquo;s present\u003c/li\u003e\n\u003cli\u003eCamera flashing sound is like guns\u003c/li\u003e\n\u003cli\u003eIago again talks with Rodrigo about Dolly\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"bianca-and-cassio\"\u003eBianca and Cassio\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eFilmed through a bollywood musical number: \u003cstrong\u003every cheerful\u003c/strong\u003e despite the omen of bad consiquences\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"cassio-is-drunk\"\u003eCassio is Drunk\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eTypically, \u003cstrong\u003eHAPPY\u003c/strong\u003e precededs violence\u003c/li\u003e\n\u003cli\u003eSmoking hurt the music, which causes the Cassio/Rodrigo brawl\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"first-tail-spinning-after-brawl\"\u003eFirst Tail Spinning after brawl\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;It was the booze\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eIago used\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Only one voice that could work, Dolly\u0026rsquo;s\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"omi-brother-is-back\"\u003eOmi Brother is back\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eMopeds\u003c/li\u003e\n\u003cli\u003eIago begins to spin his tale by avoiding talking about Kesu\u003c/li\u003e\n\u003cli\u003eDrum samples + acient vocal chops\u003c/li\u003e\n\u003cli\u003eSymbolism of dissonant music\u003c/li\u003e\n\u003cli\u003eOthello is not fighting back\u003c/li\u003e\n\u003cli\u003eLong songs to illustrate love:w\u003c/li\u003e\n\u003cli\u003eNot enough\u003c/li\u003e\n\u003cli\u003eBlue cololr grading\u003c/li\u003e\n\u003cli\u003eDesdemona very white because of framing compared to all others\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"father\"\u003eFather\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eSo motivation is given\u003c/li\u003e\n\u003cli\u003eSerious music in the background, replaying the two characters'\u003c/li\u003e\n\u003cli\u003eTone of the phone number\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"basil-dearden-s-all-night-long\"\u003eBasil Dearden\u0026rsquo;s All Night Long\u003c/h2\u003e\n\u003cp\u003eTiming: 1962, after the war.\u003c/p\u003e\n\u003cp\u003eRex + Delia: Othello and Desdemona\u003c/p\u003e\n\u003cp\u003eIago want to break off from Othello\u0026rsquo;s band, but his financeeer will only finance him if Desdemona will join Iago\u0026rsquo;s new band instead of staying with Othello\u0026rsquo;s band.\u003c/p\u003e\n\u003cp\u003eGift: cigerette case.\u003c/p\u003e\n\u003cp\u003eBrazillan pistol drums: mark of\u003c/p\u003e\n\u003ch3 id=\"introductory-scene\"\u003eIntroductory scene\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eStairs; what does rising a lot of stairs mean symbolistically?\u003c/li\u003e\n\u003cli\u003eframing: xylophone; poster; bass \u0026mdash; marrige celebrating poster meaning?\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Cause I want you, always have\u0026rdquo; \u0026mdash; new angle: Iago is in love Desdemona\u003c/li\u003e\n\u003cli\u003eCassio introduced Desdemona to Othello\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Here\u0026rsquo;s to the two men in my life\u0026rdquo; \u0026mdash; Desdemona; does she have affection for Cassio too?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"rodrigo-and-iago\"\u003eRodrigo and Iago\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eIago planting in Rodrigo the idea Cassio is in love with Desdemona\u003c/li\u003e\n\u003cli\u003eweed is an important plot point\u003c/li\u003e\n\u003cli\u003eEmilia breaking the fourth wall\u003c/li\u003e\n\u003cli\u003eIago picks up the cigarette case manually\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"iago-handed-mj-to-cassio\"\u003eIago handed MJ to Cassio\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCigarette case handed to Cassio too\u003c/li\u003e\n\u003cli\u003eRex does not want Cassio to smoke\u003c/li\u003e\n\u003cli\u003eUses the idea of smoke\u003c/li\u003e\n\u003cli\u003eIago\u0026rsquo;s gaze looks down below Othello when speaking to him\n\u003cul\u003e\n\u003cli\u003eIago looks to the side/in front a lot when talking normally\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eAlcohol and MJ is swapped, but the point is about the same\n\u003cul\u003e\n\u003cli\u003eCassio, instead of fighting Rodrigo, fought a different person\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"first-drum-solo\"\u003eFirst drum solo\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eContrast: Iago looking very ominously \u003cstrong\u003eforward\u003c/strong\u003e \u003cstrong\u003etowards\u003c/strong\u003e othello, Othello looks to the \u003cstrong\u003eside\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eSymbolic meaning of drum solo?\u003c/li\u003e\n\u003cli\u003eTriangle between Othello vs. Desdemona vs. Iago\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"tape-editing\"\u003eTape Editing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eTalk to Delia; \u0026ldquo;ocular proof\u0026rdquo; out of the audio recording\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;I know how you feel\u0026rdquo; \u0026mdash; similar to Ben Jago in the previous film\u003c/li\u003e\n\u003cli\u003eThe tape editing as a part of \u003cstrong\u003eRodrigo\u0026rsquo;s setup\u003c/strong\u003e, so Rodrigo funded Iago\u0026rsquo;s things\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Drop that Sherlock Holmes bit, that hat looks funny\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"all-night-long-song\"\u003eAll Night Long Song\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eAll night long song \u0026mdash; why so sad?\u003c/li\u003e\n\u003cli\u003eOthello looks at Delia in a funny way because of the songs\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;I love you both\u0026rdquo;: odd choice of words on Delia\u0026rsquo;s part\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"ocular-proof\"\u003eOcular Proof\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCigarette case + and\u003c/li\u003e\n\u003cli\u003eThe tapes were rolling\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"backroom-conversation-and-recording\"\u003eBackroom conversation and recording\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCamera angle between Othello and Desdemona\u003c/li\u003e\n\u003cli\u003eOthello view from the top looking down, Desdemona/Rodrigo view from the bottom looking up\u003c/li\u003e\n\u003cli\u003eIago holding door for Othello, submissiveness\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"strangling-and-throwing-out\"\u003estrangling and throwing out\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eDesdemona is alive?\u003c/li\u003e\n\u003cli\u003eIago is not attacked and Othello didn\u0026rsquo;t kill himself?\u003c/li\u003e\n\u003cli\u003eConfused about the ending\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eanother drum solo\u003c/strong\u003e: I don\u0026rsquo;t even love Jonny, Iago\u003c/li\u003e\n\u003cli\u003eRepeated shot between the two people\u003c/li\u003e\n\u003cli\u003eImaginary band playing during Iago\u0026rsquo;s solo\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"geoffrey-sax-s-othello\"\u003eGeoffrey Sax\u0026rsquo;s Othello\u003c/h2\u003e\n\u003cp\u003e\u003cspan class=\"underline\"\u003e\u003cstrong\u003e\u003cstrong\u003eGeoffrey Sax\u0026rsquo;s Othello\u003c/strong\u003e\u003c/strong\u003e\u003c/span\u003e Symbols\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eLight on Iago\u0026rsquo;s face when he is scheming\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eExtreme closeup on Othello and Iago is close together\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDrums + violin to build tension; happy violin to show happiness\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eIago: \u0026ldquo;I know what you are talking about, I\u0026rsquo;ve been there\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eBOTH interrogation\u003c/li\u003e\n\u003cli\u003eand the scene with Cass\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eIago: \u0026ldquo;And that\u0026rsquo;s a promise\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eThe Iago/Othello closeups are very close\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eOthello gave the impetus that he is afraid of marriage falling apart explicitly, and Iago explicitly planted into Cassio that their marrage is failing\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eCassio in this version is much more active, it is not entirely a misunderstanding\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u0026ldquo;TLC\u0026rdquo;: shot from above\u0026mdash;kind of pressure overpushing\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003cstrong\u003eDessie\u003c/strong\u003e \u003cem\u003ebought\u003c/em\u003e the gown\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003emoment whenever light comes across Iago\u0026rsquo;s face\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003erepeating violin theme in happiness\u003c/strong\u003e\u003c/strong\u003e!!!!\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003erepeating vocal theme in sadness\u003c/strong\u003e\u003c/strong\u003e!!!!\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u0026ldquo;It\u0026rsquo;s a shame really, really. He\u0026rsquo;s a broken man.\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eWho is Iago talking to?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eIs Othello really rushed to his head?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u0026ldquo;I want an example made\u0026rdquo;: kind of a way of cracking\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAdvanced interrogation methods: methods of guaranteeing trust\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003cstrong\u003e\u0026ldquo;I know what you are talking about, I\u0026rsquo;ve been there\u0026rdquo;\u003c/strong\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003einterrogation\u003c/li\u003e\n\u003cli\u003eand the scene with Cass\u003c/li\u003e\n\u003cli\u003eand towards Othehllo\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eI don\u0026rsquo;t think I can handle it\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ebefore otherllo\u0026rsquo;s \u0026ldquo;death\u0026rdquo; (unraveling)\u003c/li\u003e\n\u003cli\u003ebefore the constable\u0026rsquo;s actual death\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eOthello realization\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eExtreme close ups\u003c/li\u003e\n\u003cli\u003eWhen Othello was about to kill Iago, lots of light on hisface\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDeath scene, repeat from the beginning, replaying the intro, but with DIFFERENT MUSIC\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSo desdemona looks the same as in life or death\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAlso Emilia didn\u0026rsquo;t die??\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWith remember\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u0026ldquo;And that\u0026rsquo;s a promise\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eloud songs\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003elight cast onto Iago\u0026rsquo;s face\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eRainy, \u0026ldquo;hello\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eThe hankerchief is \u0026ldquo;Dessie\u0026rsquo;s early days\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWhat does the swimming symbolize?\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"opening\"\u003eOpening\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eextreme close up of lips, hands, blackness vs. whiteness\u003c/li\u003e\n\u003cli\u003ebackground singing: a little bit greek?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"intro-cuts\"\u003eIntro Cuts\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eThe cuts between chaos and peace: riots and looting\u003c/li\u003e\n\u003cli\u003eOthello is a policemen\n\u003cul\u003e\n\u003cli\u003ePolice: elevated \u003cstrong\u003ePOWER\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eDifference in modern power dynamic, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eIago: breaking the 4th wall\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Yesterday\u0026rsquo;s Man\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"bathroom-scene\"\u003eBathroom scene\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eBoth groups secretly conspiring\u003c/li\u003e\n\u003cli\u003eUnbeliving (jelous? in love with? of Othello\u0026rsquo;s strength\u003c/li\u003e\n\u003cli\u003eIt was recorded! Creating the difference\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"justice-under-the-law\"\u003eJustice under the law\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;Justice under the law\u0026rdquo;, \u0026ldquo;unlawfully killed\u0026rdquo;: that is that meaning? Did Othello unlawfully kill?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eClear bold statement\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;I know my worth\u0026rdquo;\n\u003cul\u003e\n\u003cli\u003eTell them the status\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eThe eyes\u0026rsquo; angle of Ben Iago:\u003c/li\u003e\n\u003cli\u003eLight casting directly cast onto Ben\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"becomes-a-commissioner\"\u003eBecomes a Commissioner\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Something loveable about everyone, even if they turned out to be assholes\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eIago and Emilia\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Don\u0026rsquo;t you think John is too good to be true\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"cassio\"\u003eCassio\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eDoxxing people on a nazi website\u003c/li\u003e\n\u003cli\u003eMichael cass as a form of protection:\u003c/li\u003e\n\u003cli\u003eStory is different\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"interview\"\u003eInterview\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;They love each other\u0026rdquo; Extreme closeup of Othello and Iago\u003c/li\u003e\n\u003cli\u003eCassio\u0026rsquo;s character is very indirect: uncomportable angles\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"shell-marriage\"\u003eShell Marriage\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Shell marriage\u0026rdquo;: Iago priming Cassio\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Happy ending\u0026rdquo;: interview suspending as a way of pretense\u0026hellip; of sexual hints?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"robe\"\u003eRobe\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eRobe is the hankerchief\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_film_analysis/","tags":null,"title":"NUS-ENG401 Film Analysis"},{"categories":null,"contents":"\u0026ldquo;Dialogue tends towards minimalism; instead, Bhardwaj relies heavily on extradiegetic and intradiegetic instrumental and vocal music\u0026rdquo;\nWhile both a lust for Othello\u0026rsquo;s power and Rodrigo\u0026rsquo;s provocations of Iago drove him initially to begin his scheme, it is Iago\u0026rsquo;s internal racist hatred of Othello as a person that allowed his plot to fully come to fruition.\nEstablish pattern two othellos: othello the person \u0026ldquo;the moor\u0026rdquo;, and othello the general \u0026ldquo;othello\u0026rdquo; The four texts\u0026rsquo; Iagos can be treated in the same framework of \u0026ldquo;love\u0026rdquo;\u0026ndash;hatred, whether racial or otherwise, for Othello\u0026mdash;versus \u0026ldquo;respect\u0026rdquo;\u0026mdash;deference to the authority of Othello.\nShakesphere\u0026rsquo;s Othello: hates the guy, likes the general Iago pretty much said as much \u0026ldquo;I hate the Moor\u0026rdquo; Understand the role of power Othello has: \u0026ldquo;In following him, I follow but myself\u0026rdquo;; evidently want to be Lieutenant for his power In the work, \u0026ldquo;Othello\u0026rdquo; v. \u0026ldquo;Moor\u0026rdquo; is effectively two different people \u0026ldquo;After some time, to abuse Othello\u0026rsquo;s ear \u0026hellip; The Moor is of a free and open nature,\u0026rdquo;. The former, the general, well-respected for his power by Iago, that demoted Cassio; the latter, the outcast person that actually did the betrayal and whom Iago hates.\nSax\u0026rsquo;s Othello: iago\u0026rsquo;s hatred of othello the person is drien by racism Yes, though this lust for power provoked Iago to mess with Othello the person to get the powers of othello the general, he is definitely overtly racist to othello the person starting from the beginning.\nPraises to Andrew Davies, managed to cram in a lot into the I hate the Moor speech.\nHates the guy:\novertly racist \u0026ldquo;You stupid, patronizing ape \u0026hellip; how very quaint, how very d\u0026mdash; sunday school\u0026rdquo;\n\u0026ldquo;d\u0026mdash; sunday school\u0026rdquo;: white mistrel mocking AA worship =\u0026gt; hand gestures of Iago N word he is basically Iago\u0026rsquo;s understudy \u0026ldquo;how very good for you to acknowledge what you owe to me, you owe me everything, everything!\u0026rdquo;\n\u0026mdash; for all the talk about love, it is \u0026ldquo;If I could find any whose brains were as big as their dicks, I’d be a happy man, eh?\u0026rdquo;\nMirrors racism in Shakesphere: \u0026ldquo;The Moor \u0026hellip; will as tenderly be led by the nose \u0026hellip; as asses are.\u0026rdquo;\nAgain, in lust with not with the guy but with his power: \u0026ldquo;It\u0026rsquo;s a shame really, he\u0026rsquo;s a good man. \u0026hellip; It going to take a bit longer, and its all going to end in broken hearts.\u0026rdquo;\n\u0026ldquo;It\u0026rsquo;s love. Simple as that\u0026rdquo; (close reading: love, perhaps against Othello, or perhaps his position)\nDeardon\u0026rsquo;s Iago: hatred actually useful\u0026mdash;-otherwise Iago would not have succeeded Why is this racism necessary.\nDeardon attempts an answer: Perhaps as a product of civil rights, or the jazz counteculture, the race component is functionally erased, and instead Deardon\u0026rsquo;s Iago is lusting over Desdemona. The film is one which Iago and Othello were ostensibly friends\u0026mdash;during the subtext perhaps more of a reflection for civil rights sentiment (especially via the counter-culture language of Jazz of the time) \u0026ldquo;apparently natural construct of a racially diverse \u0026hellip; sub-culture: \u0026rsquo;the [diverse racial archetypes] intermingle smoothly and fraternisation creates deep emotional pangs, rather than embarrassment\u0026rsquo;\u0026rdquo; (Kinematograph Weekly) (Burton and O\u0026rsquo;Sullivan)\nCritics hated particularly the soft ending: \u0026ldquo;never for one moment succeeds in achieving anything like the power and persuasion of the original.\u0026rdquo; (Films and Filming) Why is the soft-ending? When Othello almost smoothered Desdemona, three people went and checked immediatley, and there was no Emilia\u0026rsquo;s \u0026ldquo;The Moor\u0026rsquo;s abused by some most villainous knave, Some base notorious knave\u0026rdquo;. She immediately doubted Iago, confusion was cleared, and all was well.\nSo though Rodrigo ignited Iago\u0026rsquo;s scheme, racism dragged Iago\u0026rsquo;s scheme long enough for both to be dead.\nBhardwaj\u0026rsquo;s Othello: Rodrigo Convinced Merge between two Othellos, setting forward motion https://www.youtube.com/watch?v=zzgDHT3inzI\nIago provoked by Rodrigo jeering him: \u0026ldquo;no more of drowning, do you hear?\u0026rdquo; \u0026ldquo;I could jump into this river\u0026rdquo; \u0026hellip; \u0026ldquo;well jump! Don\u0026rsquo;t be a sissy [sic].\u0026rdquo; Then Rodrigo provoked Iago about how Iago was slighted by Othello; relegating Iago to a role in \u0026ldquo;Company Garden\u0026rdquo;.\nIt is at this moment that the music swells, ominous vocal tones that reprises from the beginning titles of the film, of \u0026ldquo;title faintly visible images of what appear to be ancient scenes of combat.\u0026rdquo;\n\u0026ldquo;Bhardwaj composes all his film scores and writes music and script simultaneously\u0026rdquo;, so the intentionality here is not to be missed. Motivated more by a masculine sense of revenge against General Othello, Iago decided to take personal action against the hated Moor.\nOdd choice, too, to relegate Iago to \u0026ldquo;company garden.\u0026rdquo; Turns out, Shakesphere defined it for us! \u0026ldquo;Our bodies are gardens, to the which our wills are gardeners; \u0026hellip; we have reason to cool our \u0026hellip; unbitted lusts; \u0026hellip; I take this, that you call love, to be a sect or scion.\u0026rdquo; Iago claims love is a \u0026ldquo;cutting\u0026rdquo; of unbitted lust, which he should cut. Metaphorically cutting Othello\u0026rsquo;s the person\u0026rsquo;s love for desdemona as if he\u0026rsquo;s mechanically cutting away some lust; all to achieve a goal for General Othello.\n\u0026ldquo;Othello should go mad\u0026rdquo;\nComparing Iago No Love, No respect | Direct objective: to get Othello\u0026rsquo;s position at the Met Yes Love, Yes Respect | Direct objective: to remove Desdemona from Othello\u0026rsquo;s control and start a band No Love, Yes Respect | Direct objective: faithful\u0026mdash;to get Othello to choose him as the direct regional militia leader \u0026lt;\u0026gt; Shakesphere Missing: Yes Love, Yes Respect\nActual Othello IAGO: \u0026ldquo;No more of drowning, do you hear?\u0026rdquo; \u0026lt;\u0026gt; Omkara Bhardwaj\u0026rsquo;s Iago Direct objective: faithful\u0026mdash;to get Othello to choose him as the direct regional militia leader Hidden objective: \u0026hellip;??? not sure not well motivated Direct Downfall of Othello: \u0026ldquo;ocular proof\u0026rdquo; of jingly heirloom Main Methods \u0026lt;\u0026gt; Shakesphere beer to Cassio \u0026lt;\u0026gt; Shakesphere making Othello overhear conversation with Cassio about Bianca to think its about Desdemona \u0026lt;\u0026gt; Shakesphere convinces Desdemona to soothsay for Cassio Random bad omen about bird/snake? \u0026lt;\u0026gt; Shakesphere Heirloom Distinctions Process of manipulation is more toxically masculine instead of weird submissiveness \u0026ldquo;I hate Moor\u0026rdquo; + drowning scene took place somewhat after being provoked by Rodrigo firing his jealousy after Iago fired Rodrigo\u0026rsquo;s jealousy about Desdemona Unique shots/catchphrases/features Instances of violence/impending violence underscored by very cheerful music Cheerful music as Othello beats up everybody in the rival gang Happy cheerful party music as Cassio becomes drunk and gets demoted Sax\u0026rsquo;s Iago Direct objective: to get Othello\u0026rsquo;s position at the Met Hidden objective: [racism for]/[sexual desire for] Othello Direct Downfall of Othello: A, B, and C for intimacy test for the robe Main methods \u0026lt;\u0026gt; Shakesphere conversation to arise suspicion Hinted to Cassio that marriage is not genuine and provoked him Screwed up Othello\u0026rsquo;s investigation of the constable Demanded intimacy test and A, B, AND C Distinctions Wants Othello\u0026rsquo;s position, and probably his love Is more explicitly lying (A, B, AND C) Is more overtly racist: beginning conversation with Sinclair, the racist (\u0026ldquo;I hate the moor\u0026rdquo; proxy) rant Relationship with Emilia seem less strained Unique shots/catchphrases/features Staring up into the sky w/ shaft of light \u0026ldquo;I know what you are talking about, I\u0026rsquo;ve been there\u0026rdquo; prior to manipulation \u0026ldquo;And that\u0026rsquo;s a promise\u0026rdquo; Darden\u0026rsquo;s Iago Direct objective: to remove Desdemona from Othello\u0026rsquo;s control and start a band Hidden objective: to date Desdemona Direct Downfall of Othello: [the cigarette box?] + edited tape recording Main Methods \u0026lt;\u0026gt; Shakesphere gave weed to Cassio to cause him to make a scene \u0026lt;\u0026gt; Shakesphere conversation to arise suspicion re: knowledge from Emilia Engineered Desdemona\u0026rsquo;s song to be specifically pointed \u0026lt;\u0026gt; Shakesphere \u0026ldquo;ocular proof\u0026rdquo;: cigarette case; also insinuating that Cassio\u0026rsquo;s weed came from Desdemona Doctoring the tape to highlight Cassio\u0026rsquo;s supposed infidelity Distinctions In love with Desdemona and had separate business motivations distinct from taking Othello/Cassio\u0026rsquo;s Didn\u0026rsquo;t seem particularly racist, was previously friends with Othello? Is more explicitly lying by doctoring tape, but words closer to Shakesphere\u0026rsquo;s Iago Didn\u0026rsquo;t succeed Most strained relationship with Emilia, \u0026ldquo;I love nobody. Don\u0026rsquo;t even love Jonny\u0026rdquo; Unique shots/catchphrases/features Shot of Cousin Jonny / Rodrigo alone, in his home, isolated with light cast on him Frenzied drum solo Motivated somewhat by Lou, the financier The rising of stairs up and down the main factory as well as to a side building Weed. Close reading Iago\u0026rsquo;s first drum solo https://www.youtube.com/watch?v=fA-vKHOVDCw Iago: looks to the side, then at the camera directly Othello: looks generally at the direction of the camera, but as camera pans in closes his eyes and look down and away Desdemona: looks entirely to the side, uncaring Iago: looks down (like Othello\u0026rsquo;s ending shot), then looks up and to the side, cutting to\u0026hellip; Othello: looking at the opposite side, seemingly \u0026ldquo;towards\u0026rdquo; Iago, then looks down Iago: as drum solo becomes more frenzied, (mildly extreme) close up staring dead into the camera frienzied drum soloing Iago: looks down, and then to the side, seemingly contemplating Desdemona: stares dead ahead Othello: looks to the side and down, hand holding head Iago: eyes pans across audience ","html":"\u003cp\u003e\u0026ldquo;Dialogue tends towards minimalism; instead, Bhardwaj relies heavily on extradiegetic and intradiegetic instrumental and vocal music\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eWhile both a lust for Othello\u0026rsquo;s power and Rodrigo\u0026rsquo;s provocations of Iago drove him initially to begin his scheme, it is Iago\u0026rsquo;s internal racist hatred of Othello as a person that allowed his plot to fully come to fruition.\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"establish-pattern-two-othellos-othello-the-person-the-moor-and-othello-the-general-othello\"\u003eEstablish pattern two othellos: othello the person \u0026ldquo;the moor\u0026rdquo;, and othello the general \u0026ldquo;othello\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003eThe four texts\u0026rsquo; Iagos can be treated in the same framework of \u0026ldquo;love\u0026rdquo;\u0026ndash;hatred, whether racial or otherwise, for Othello\u0026mdash;versus \u0026ldquo;respect\u0026rdquo;\u0026mdash;deference to the authority of Othello.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eShakesphere\u0026rsquo;s Othello: hates the guy, likes the general\n\u003cul\u003e\n\u003cli\u003eIago pretty much said as much \u0026ldquo;I hate the Moor\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eUnderstand the role of power Othello has: \u0026ldquo;In following him, I follow but myself\u0026rdquo;; evidently want to be Lieutenant for his power\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIn the work, \u0026ldquo;Othello\u0026rdquo; v. \u0026ldquo;Moor\u0026rdquo; is effectively two different people \u0026ldquo;After some time, to abuse Othello\u0026rsquo;s ear \u0026hellip; The Moor is of a free and open nature,\u0026rdquo;. The former, the general, well-respected for his power by Iago, that demoted Cassio; the latter, the outcast \u003cem\u003eperson\u003c/em\u003e that actually did the betrayal and whom Iago hates.\u003c/p\u003e\n\u003ch2 id=\"sax-s-othello-iago-s-hatred-of-othello-the-person-is-drien-by-racism\"\u003eSax\u0026rsquo;s Othello: iago\u0026rsquo;s hatred of othello the person is drien by racism\u003c/h2\u003e\n\u003cp\u003eYes, though this lust for power provoked Iago to mess with Othello the person to get the powers of othello the general, he is definitely overtly racist to othello the person starting from the beginning.\u003c/p\u003e\n\u003cp\u003ePraises to Andrew Davies, managed to cram in a lot into the I hate the Moor speech.\u003c/p\u003e\n\u003cp\u003eHates the guy:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eovertly racist \u0026ldquo;You stupid, patronizing ape \u0026hellip; how very quaint, how very d\u0026mdash; sunday school\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;d\u0026mdash; sunday school\u0026rdquo;: white mistrel mocking AA worship =\u0026gt; hand gestures of Iago\u003c/li\u003e\n\u003cli\u003eN word\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ehe is basically Iago\u0026rsquo;s understudy \u0026ldquo;how very good for you to acknowledge what you owe to me, you owe me everything, everything!\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u0026mdash; for all the talk about love, it is \u0026ldquo;If I could find any whose brains were as big as their dicks, I’d be a happy man, eh?\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eMirrors racism in Shakesphere: \u0026ldquo;The Moor \u0026hellip; will as tenderly be led by the nose \u0026hellip; as asses are.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eAgain, in lust with not with the guy but with his power: \u0026ldquo;It\u0026rsquo;s a shame really, he\u0026rsquo;s a good man. \u0026hellip; It going to take a bit longer, and its all going to end in broken hearts.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;It\u0026rsquo;s love. Simple as that\u0026rdquo; (close reading: love, perhaps against Othello, or perhaps his position)\u003c/p\u003e\n\u003ch2 id=\"deardon-s-iago-hatred-actually-useful-otherwise-iago-would-not-have-succeeded\"\u003eDeardon\u0026rsquo;s Iago: hatred actually useful\u0026mdash;-otherwise Iago would not have succeeded\u003c/h2\u003e\n\u003cp\u003eWhy is this racism necessary.\u003c/p\u003e\n\u003cp\u003eDeardon attempts an answer: Perhaps as a product of civil rights, or the jazz counteculture, the race component is functionally erased, and instead Deardon\u0026rsquo;s Iago is lusting over Desdemona. The film is one which Iago and Othello were ostensibly friends\u0026mdash;during the subtext perhaps more of a reflection for civil rights sentiment (especially via the counter-culture language of Jazz of the time) \u0026ldquo;apparently natural construct of a racially diverse \u0026hellip; sub-culture: \u0026rsquo;the [diverse racial archetypes] intermingle smoothly and fraternisation creates deep emotional pangs, rather than embarrassment\u0026rsquo;\u0026rdquo; (Kinematograph Weekly) (Burton and O\u0026rsquo;Sullivan)\u003c/p\u003e\n\u003cp\u003eCritics hated particularly the soft ending: \u0026ldquo;never for one moment succeeds in achieving anything like the power and persuasion of the original.\u0026rdquo; (Films and Filming) Why is the soft-ending? When Othello almost smoothered Desdemona, three people went and checked immediatley, and there was no Emilia\u0026rsquo;s \u0026ldquo;The Moor\u0026rsquo;s abused by some most villainous knave, Some base notorious knave\u0026rdquo;. She immediately doubted Iago, confusion was cleared, and all was well.\u003c/p\u003e\n\u003cp\u003eSo though Rodrigo ignited Iago\u0026rsquo;s scheme, racism dragged Iago\u0026rsquo;s scheme long enough for both to be dead.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"bhardwaj-s-othello-rodrigo-convinced-merge-between-two-othellos-setting-forward-motion\"\u003eBhardwaj\u0026rsquo;s Othello: Rodrigo Convinced Merge between two Othellos, setting forward motion\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://www.youtube.com/watch?v=zzgDHT3inzI\"\u003ehttps://www.youtube.com/watch?v=zzgDHT3inzI\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eIago provoked by Rodrigo jeering him: \u0026ldquo;no more of drowning, do you hear?\u0026rdquo; \u0026ldquo;I could jump into this river\u0026rdquo; \u0026hellip; \u0026ldquo;well jump! Don\u0026rsquo;t be a sissy [sic].\u0026rdquo; Then Rodrigo provoked Iago about how Iago was slighted by Othello; relegating Iago to a role in \u0026ldquo;Company Garden\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eIt is at this moment that the music swells, ominous vocal tones that reprises from the beginning titles of the film, of \u0026ldquo;title faintly visible images of what appear to be ancient scenes of combat.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Bhardwaj composes all his film scores and writes music and script simultaneously\u0026rdquo;, so the intentionality here is not to be missed. Motivated more by a masculine sense of revenge against \u003cstrong\u003eGeneral Othello\u003c/strong\u003e, Iago decided to take personal action against the hated \u003cstrong\u003eMoor\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eOdd choice, too, to relegate Iago to \u0026ldquo;company garden.\u0026rdquo; Turns out, Shakesphere defined it for us! \u0026ldquo;Our bodies are gardens, to the which our wills are gardeners; \u0026hellip; we have reason to cool our \u0026hellip; unbitted lusts; \u0026hellip; I take this, that you call love, to be a sect or scion.\u0026rdquo; Iago claims love is a \u0026ldquo;cutting\u0026rdquo; of unbitted lust, which he should cut. Metaphorically cutting Othello\u0026rsquo;s the person\u0026rsquo;s love for desdemona as if he\u0026rsquo;s mechanically cutting away some lust; all to achieve a goal for General Othello.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Othello should go mad\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"comparing-iago\"\u003eComparing Iago\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eNo Love, No respect | Direct objective: to get Othello\u0026rsquo;s position at \u003cstrong\u003ethe Met\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eYes Love, Yes Respect | Direct objective: to remove Desdemona from Othello\u0026rsquo;s control and start a \u003cstrong\u003eband\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eNo Love, Yes Respect | Direct objective: faithful\u0026mdash;to get Othello to choose him as the direct regional militia leader \u0026lt;\u0026gt; Shakesphere\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eMissing: Yes Love, Yes Respect\u003c/p\u003e\n\u003ch2 id=\"actual-othello\"\u003eActual Othello\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eIAGO: \u0026ldquo;No more of drowning, do you hear?\u0026rdquo; \u0026lt;\u0026gt; Omkara\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"bhardwaj-s-iago\"\u003eBhardwaj\u0026rsquo;s Iago\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDirect objective: faithful\u0026mdash;to get Othello to choose him as the direct regional militia leader\u003c/li\u003e\n\u003cli\u003eHidden objective: \u0026hellip;??? not sure not well motivated\u003c/li\u003e\n\u003cli\u003eDirect Downfall of Othello: \u0026ldquo;ocular proof\u0026rdquo; of jingly heirloom\u003c/li\u003e\n\u003cli\u003eMain Methods\n\u003cul\u003e\n\u003cli\u003e\u0026lt;\u0026gt; Shakesphere beer to Cassio\u003c/li\u003e\n\u003cli\u003e\u0026lt;\u0026gt; Shakesphere making Othello overhear conversation with Cassio about Bianca to think its about Desdemona\u003c/li\u003e\n\u003cli\u003e\u0026lt;\u0026gt; Shakesphere convinces Desdemona to soothsay for Cassio\u003c/li\u003e\n\u003cli\u003eRandom bad omen about bird/snake?\u003c/li\u003e\n\u003cli\u003e\u0026lt;\u0026gt; Shakesphere Heirloom\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eDistinctions\n\u003cul\u003e\n\u003cli\u003eProcess of manipulation is more toxically masculine instead of weird submissiveness\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;I hate Moor\u0026rdquo; + drowning scene took place somewhat after being provoked by Rodrigo firing his jealousy after Iago fired Rodrigo\u0026rsquo;s jealousy about Desdemona\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eUnique shots/catchphrases/features\n\u003cul\u003e\n\u003cli\u003eInstances of violence/impending violence underscored by very cheerful music\n\u003cul\u003e\n\u003cli\u003eCheerful music as Othello beats up everybody in the rival gang\u003c/li\u003e\n\u003cli\u003eHappy cheerful party music as Cassio becomes drunk and gets demoted\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"sax-s-iago\"\u003eSax\u0026rsquo;s Iago\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDirect objective: to get Othello\u0026rsquo;s position at \u003cstrong\u003ethe Met\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eHidden objective: [racism for]/[sexual desire for] Othello\u003c/li\u003e\n\u003cli\u003eDirect Downfall of Othello: A, B, and C for intimacy test for the \u003cstrong\u003erobe\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eMain methods\n\u003cul\u003e\n\u003cli\u003e\u0026lt;\u0026gt; Shakesphere conversation to arise suspicion\u003c/li\u003e\n\u003cli\u003eHinted to Cassio that marriage is not genuine and provoked him\u003c/li\u003e\n\u003cli\u003eScrewed up Othello\u0026rsquo;s investigation of the constable\u003c/li\u003e\n\u003cli\u003e\u003cem\u003eDemanded intimacy test and A, B, AND C\u003c/em\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eDistinctions\n\u003cul\u003e\n\u003cli\u003eWants Othello\u0026rsquo;s position, and probably his love\u003c/li\u003e\n\u003cli\u003eIs more explicitly lying (A, B, AND C)\u003c/li\u003e\n\u003cli\u003eIs more overtly racist: beginning conversation with Sinclair, the racist (\u0026ldquo;I hate the moor\u0026rdquo; proxy) rant\u003c/li\u003e\n\u003cli\u003eRelationship with Emilia seem less strained\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eUnique shots/catchphrases/features\n\u003cul\u003e\n\u003cli\u003eStaring up into the sky w/ shaft of light\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;I know what you are talking about, I\u0026rsquo;ve been there\u0026rdquo; prior to manipulation\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;And that\u0026rsquo;s a promise\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"darden-s-iago\"\u003eDarden\u0026rsquo;s Iago\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDirect objective: to remove Desdemona from Othello\u0026rsquo;s control and start a \u003cstrong\u003eband\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eHidden objective: to date Desdemona\u003c/li\u003e\n\u003cli\u003eDirect Downfall of Othello: [the \u003cstrong\u003ecigarette box\u003c/strong\u003e?] + edited tape recording\u003c/li\u003e\n\u003cli\u003eMain Methods\n\u003cul\u003e\n\u003cli\u003e\u0026lt;\u0026gt; Shakesphere gave weed to Cassio to cause him to make a scene\u003c/li\u003e\n\u003cli\u003e\u0026lt;\u0026gt; Shakesphere conversation to arise suspicion re: knowledge from Emilia\u003c/li\u003e\n\u003cli\u003eEngineered Desdemona\u0026rsquo;s song to be specifically pointed\u003c/li\u003e\n\u003cli\u003e\u0026lt;\u0026gt; Shakesphere \u0026ldquo;ocular proof\u0026rdquo;: cigarette case; also insinuating that Cassio\u0026rsquo;s weed came from Desdemona\u003c/li\u003e\n\u003cli\u003eDoctoring the tape to highlight Cassio\u0026rsquo;s supposed infidelity\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eDistinctions\n\u003cul\u003e\n\u003cli\u003eIn love with Desdemona and had separate business motivations distinct from taking Othello/Cassio\u0026rsquo;s\u003c/li\u003e\n\u003cli\u003eDidn\u0026rsquo;t seem particularly racist, was previously friends with Othello?\u003c/li\u003e\n\u003cli\u003eIs more explicitly lying by doctoring tape, but words closer to Shakesphere\u0026rsquo;s Iago\u003c/li\u003e\n\u003cli\u003eDidn\u0026rsquo;t succeed\u003c/li\u003e\n\u003cli\u003eMost strained relationship with Emilia, \u0026ldquo;I love nobody. Don\u0026rsquo;t even love Jonny\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eUnique shots/catchphrases/features\n\u003cul\u003e\n\u003cli\u003eShot of Cousin Jonny / Rodrigo alone, in his home, isolated with light cast on him\u003c/li\u003e\n\u003cli\u003eFrenzied drum solo\u003c/li\u003e\n\u003cli\u003eMotivated somewhat by Lou, the financier\u003c/li\u003e\n\u003cli\u003eThe rising of stairs up and down the main factory as well as to a side building\u003c/li\u003e\n\u003cli\u003eWeed.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eClose reading Iago\u0026rsquo;s first drum solo \u003ca href=\"https://www.youtube.com/watch?v=fA-vKHOVDCw\"\u003ehttps://www.youtube.com/watch?v=fA-vKHOVDCw\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003eIago: looks to the side, then at the camera directly\u003c/li\u003e\n\u003cli\u003eOthello: looks generally at the direction of the camera, but as camera pans in closes his eyes and look down and away\u003c/li\u003e\n\u003cli\u003eDesdemona: looks entirely to the side, uncaring\u003c/li\u003e\n\u003cli\u003eIago: looks down (like Othello\u0026rsquo;s ending shot), then looks up and to the side, cutting to\u0026hellip;\u003c/li\u003e\n\u003cli\u003eOthello: looking at the opposite side, seemingly \u0026ldquo;towards\u0026rdquo; Iago, then looks down\u003c/li\u003e\n\u003cli\u003eIago: as drum solo becomes more frenzied, (mildly extreme) close up staring dead into the camera\u003c/li\u003e\n\u003cli\u003e\u003cem\u003efrienzied drum soloing\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003eIago: looks down, and then to the side, seemingly contemplating\u003c/li\u003e\n\u003cli\u003eDesdemona: stares dead ahead\u003c/li\u003e\n\u003cli\u003eOthello: looks to the side and down, hand holding head\u003c/li\u003e\n\u003cli\u003eIago: eyes pans across audience\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"d41d8c\"\u003e\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_film_analysis_outline/","tags":null,"title":"NUS-ENG401 Film Analysis Outline"},{"categories":null,"contents":"Literacy rates differ significantly between genders in Central Africa. In rural Nigeria, there is a 24.1% difference in literacy rates between men and women.\nIn Joys of Motherhood, Adankwo’s natural and implicitly differentiated treatment of Nhu Ego’s sons and daughters reflects the androcentrism in Nigerian society’s view of education; in the work, she asks Nhu Ego to not “forget the … twin [girl’s] bride prices will help out … boy’s school fees.” (Emecheta 127)\nNext Steps Token: s_4979_1d\nFollow this link for the next step.\n","html":"\u003cp\u003eLiteracy rates differ significantly between genders in Central Africa. In rural Nigeria, there is a 24.1% difference in literacy rates between men and women.\u003c/p\u003e\n\u003cp\u003eIn \u003cem\u003eJoys of Motherhood\u003c/em\u003e, Adankwo’s natural and implicitly differentiated treatment of Nhu Ego’s sons and daughters reflects the androcentrism in Nigerian society’s view of education; in the work, she asks Nhu Ego to not “forget the … twin [girl’s] bride prices will help out … boy’s school fees.” (Emecheta 127)\u003c/p\u003e\n\u003ch2 id=\"next-steps\"\u003eNext Steps\u003c/h2\u003e\n\u003cp\u003eToken: s_4979_1d\u003c/p\u003e\n\u003cp\u003eFollow \u003ca href=\"https://tinyurl.com/nuseng401giftbounce2\"\u003ethis link\u003c/a\u003e for the next step.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_gift_2/","tags":null,"title":"NUS-ENG401 Gender and Education"},{"categories":null,"contents":"Welcome! The device of the station of birth plays a large part in all four of the works we read over the semester. In I, Tituba, the author grants Tituba renewed empowerment through her birth; in Black Shack Alley, Jose’s birth in the Alley forces him to leverage the racially unequal devices of the French regime to gain social advancement; Sophie’s trauma in Breath, Eyes, Memory is propagated by her violent conception—which results in her mother’s forced testing upon her; Joys of Motherhood’s Nnu Ego’s family is loving, yet with conservative values which forces a crippling sense of motherly duty that almost drove her to death. Birth, and challenging the station assigned at birth, is a fundamental value pervasive through the texts.\nThis game aims to explore some of the dynamics found in all four of the works, while exploring some aspects of Haitian, Martinican, or Nigerian culture.\nTo play the game, here are what you need to know\u0026ndash;\nThe game works like a CTF: through the game, you are hunting for game tokens that look like this: s_[numbers]_[numbersletters] You can validate whether or not the token is correct with the tool provided below Validate a Token! To check whether or not a token you received through the game is valid, use the utility below:\nValidate\nplease enter a token The Game Please go ahead to this link to get started.\n","html":"\u003ch2 id=\"welcome\"\u003eWelcome!\u003c/h2\u003e\n\u003cp\u003eThe device of the station of birth plays a large part in all four of the works we read over the semester. In I, Tituba, the author grants Tituba renewed empowerment through her birth; in Black Shack Alley, Jose’s birth in the Alley forces him to leverage the racially unequal devices of the French regime to gain social advancement; Sophie’s trauma in Breath, Eyes, Memory is propagated by her violent conception—which results in her mother’s forced testing upon her; Joys of Motherhood’s Nnu Ego’s family is loving, yet with conservative values which forces a crippling sense of motherly duty that almost drove her to death. Birth, and challenging the station assigned at birth, is a fundamental value pervasive through the texts.\u003c/p\u003e\n\u003cp\u003eThis game aims to explore some of the dynamics found in all four of the works, while exploring some aspects of Haitian, Martinican, or Nigerian culture.\u003c/p\u003e\n\u003cp\u003eTo play the game, here are what you need to know\u0026ndash;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eThe game works like a CTF: through the game, you are hunting for game tokens that look like this: \u003cstrong\u003es_[numbers]_[numbersletters]\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eYou can validate whether or not the token is correct with the tool provided below\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"validate-a-token\"\u003eValidate a Token!\u003c/h2\u003e\n\u003cp\u003eTo check whether or not a token you received through the game is valid, use the utility below:\u003c/p\u003e\n\u003cp\u003e\u003cinput id=\"token\" placeholder=\"s_0000_0e\"\u003e\u003c/input\u003e \u003cbutton id=\"validate\"\u003eValidate\u003c/button\u003e\u003c/p\u003e\n\u003cdiv id=\"result\" style=\"font-size: 13px\"\u003eplease enter a token\u003c/div\u003e\n\u003cscript\u003e\n function sumDigits(n) {\n let sum = 0;\n while (n) {\n digit = n % 10;\n sum += digit;\n n = (n - digit) / 10;\n }\n return sum;\n }\n\n $(\"#validate\").click(() =\u003e {\n let invalid = \"invalid token, sorry!\";\n let valid = \"valid token, congrats!\";\n let value = $(\"#token\").val().split(\"_\");\n if (value[0] != \"s\") {\n $(\"#result\").html(invalid);\n } else if (!isNaN(value[1])) {\n let sumVal = sumDigits(parseInt(value[1]));\n let mod18_str = (sumVal % 50117).toString(16);\n if (value[2] == mod18_str) $(\"#result\").html(valid);\n else $(\"#result\").html(invalid);\n }\n })\n\u003c/script\u003e\n\u003ch2 id=\"the-game\"\u003eThe Game\u003c/h2\u003e\n\u003cp\u003ePlease go ahead to \u003ca href=\"/posts/kbhnus_eng401_gift_1/\"\u003ethis link\u003c/a\u003e to get started.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_gift_utility/","tags":null,"title":"NUS-ENG401 Gift Utility"},{"categories":null,"contents":"General Information Due Date Topic Important Documents 9/29 Lit. Devices I, Tituba Prompt In an interview, Maryse Conde explains, \u0026ldquo;I was attracted to write the particular story of Tituba because this woman was unjustly treated by history. I felt the need to give her a reality that was denied to her because of her color and her gender.\u0026rdquo; Choose one or two literary devices and explain how Conde uses it/them in the novel to give Tituba her subjecthood. Examples could be: narrative voice, allusion, irony, dialogue, etc.\nClaim Synthesis Quotes Bin Birth Determines Capacity That birth determines the capacity for one to do Evil\nThere was one thing, however, that I didn\u0026rsquo;t know: evil is a gift received at birth. There\u0026rsquo;s no acquiring it. Those of us who have not come into this world armed with spurs and fangs are losers in every combat. (73)\nMama Yaya highlights that misfortune lies in the center of life derived from birth\nMisfortune, as you know, is our constant companion. We are born with it, we lie with it, and we squabble with it for the same withered breast. It eats the codfish from our calabash. But we\u0026rsquo;re tough, us n\u0026mdash;! (85)\nBelieves that having choice in birth is what would make it fulfilling\n(Irony between \u0026ldquo;gift\u0026rdquo; and \u0026ldquo;choice\u0026rdquo;)\nI began to doubt seriously Mama Yaya\u0026rsquo;s basic conviction that life is a gift. Life would only be a gift if each of us could choose the womb that carried us. \u0026hellip; If one day I am born again, let it be in the steely army of conquerors! (120)\nTituba believes that she is born as a healer\nThe terror of these people seemed like an injustice to me. They should have greeted me with shouts of joy and welcome and presented me with a list of illnesses that I would have tried my utmost to cure. I was born to heal, not to frighten. (12)\n\u0026ldquo;Births\u0026rdquo; Other People Elizabeth Parris \u0026ldquo;reborn\u0026rdquo; after Tituba\u0026rsquo;s Care\nUp till then I had not called on the supernatural to care for Elizabeth Parris. \u0026hellip; hat night I decided to use my powers. \u0026hellip; In the morning the color returned to Goodwife Parris\u0026rsquo;s cheeks. She asked for a little water. Toward midday she managed to feed herself. And in the evening she went to sleep like a newborn babe. (45)\nThe \u0026ldquo;evil\u0026rdquo; of abortion transferred from Tituba into Betsy\nI made her swear not to tell anyone and at dusk I plunged her up to her neck in a liquid to which I had given all the properties of amniotic fluid. \u0026hellip; Plunging Betsey into this scalding hot bath, it seemed to me that these same hands, that not long ago had dealt death were now giving life, and I was purifying myself of the murder of my child. (63)\nHer upper lip curled up into an ugly pout, revealing her sick gums. \u0026ldquo;You, do good? You\u0026rsquo;re a Negress, Tituba! You can only do evil. You are evil itself.\u0026rdquo; \u0026hellip; \u0026ldquo;That bath you had me take; what was in it? The blood of a newborn baby that died from one of your spells?\u0026rdquo; I was aghast. (77)\nRebirth After Death (like the actual book) Tituba\u0026rsquo;s Freeing from Prison into Benjamin is Described an Rebirth\nHe smiled cynically. \u0026ldquo;A man who hasn\u0026rsquo;t got very much money. You know how much slaves are selling for at the present time? Twenty-five pounds!\u0026rdquo; Our conversation stopped there, but now I knew the fate awaiting me. Another master, another bondage. (120)\nThen with one skillful blow of the mallet he smashed my chains to pieces. He did the same thing with my wrists while I screamed. \u0026hellip; I screamed, and this scream, the terrified cry of a newborn baby, heralded my return to this world. I had to learn how to walk again. \u0026hellip; Few people have the misfortune to be born twice. (122)\nTituba\u0026rsquo;s \u0026ldquo;real\u0026rdquo; story begins only after death\nAnd that is the story of my life. Such a bitter, bitter story. My real story starts where this one leaves off and it has no end. (175)\nSuccessful rebirth only without birth\nI watched her grow up and stumble around on her shaky legs, exploring the pur- gatory of the plantation, finding her delight in the shape of a cloud, the drooping foliage of an ylang-ylang, or the taste of a bitter orange. \u0026hellip; A child I didn\u0026rsquo;t give birth to but whom I chose! What motherhood could be nobler! (177)\nMisc Book opens with the framing of her being born\nI was born from this act of aggression. From this act of hatred and contempt. (3)\nPackage insert praises death as something positive\n\u0026ldquo;Death is a porte whereby we pass to joye; Lyfe is a lake that drowneth all in payne \u0026ndash;John Harrington\u0026rdquo; (Cover Insert)\nPlans for Abortion\nThere is no happiness in motherhood for a slave. It is little more than the expulsion of an innocent baby, who will have no chance to change its fate, into a world of slavery and abjection\u0026hellip;. That night, my baby was carried out of my womb in a flow of black blood. I saw him wave his arms like a tadpole in distress and I burst into tears. (52)\nSubclaim Development Tituba Realizes Birth is Involuntary Birth into live is a deterministic process for which those being \u0026ldquo;born\u0026rdquo; have no agency over. For African folks, Mama Yaya claims that misfortune is one such deterministic factor of their birth. Although Mama Yaya disagrees, Tituba believes that life is not a gift unless it is deterministic (this is of course ironic, because you don\u0026rsquo;t choose your gifts.) Despite the indeterminism, Tituba believes that she is born as a healer She leverages (Re)Birth to change others, to poor results Perhaps as an attempt to help others control (\u0026ldquo;choose\u0026rdquo;) birth, she uses her power to reborn people; like\nElizabeth Parris Betsy Parris But Psych! Both of them turned on her. Especially Betsy Parris.\nAlso her child was aborted.\nThe work literally provides rebirth of Tituba and empowers her to give birth despite her abortion Tituba Herself raised her station against those of Benjamin. Yet, this was not voluntary (see quote in section) and designed by Condé in the story.\nTituba\u0026rsquo;s \u0026ldquo;real\u0026rdquo; story begins only after forcible death/\u0026ldquo;rebirth\u0026rdquo;, and she (author?) considers it nobel to give this new form of birth without giving birth (which didn\u0026rsquo;t happen in real world), but without the request of the born either.\nConclusiony bit So this whole book, beginning at her birth, covered by a celebration of alternative birth illustrates such a process of providing agency.\nThe Claim The motif of birth and rebirth plays an important role in Maryse Condé\u0026rsquo;s work I, Tituba. Despite Tituba\u0026rsquo;s own failed attempt at controlling the (re)birth of herself and others to better their fate in history, Condé offers Tituba a renewed empowerment in birth by both illustrating her \u0026ldquo;rebirth\u0026rdquo; and providing her a chance to elect a descendant she wasn\u0026rsquo;t originally able to bear.\n","html":"\u003ch2 id=\"general-information\"\u003eGeneral Information\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDue Date\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003cth\u003eImportant Documents\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e9/29\u003c/td\u003e\n\u003ctd\u003eLit. Devices\u003c/td\u003e\n\u003ctd\u003eI, Tituba\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"prompt\"\u003ePrompt\u003c/h2\u003e\n\u003cp\u003eIn an interview, Maryse Conde explains, \u0026ldquo;I was attracted to write the particular story of Tituba because this woman was unjustly treated by history. I felt the need to give her a reality that was denied to her because of her color and her gender.\u0026rdquo; Choose one or two literary devices and explain how Conde uses it/them in the novel to \u003cstrong\u003e\u003cstrong\u003egive Tituba her subjecthood\u003c/strong\u003e\u003c/strong\u003e. Examples could be: narrative voice, allusion, irony, dialogue, etc.\u003c/p\u003e\n\u003ch2 id=\"claim-synthesis\"\u003eClaim Synthesis\u003c/h2\u003e\n\u003ch3 id=\"quotes-bin\"\u003eQuotes Bin\u003c/h3\u003e\n\u003ch4 id=\"birth-determines-capacity\"\u003eBirth Determines Capacity\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eThat birth determines the capacity for one to do Evil\u003c/p\u003e\n\u003cp\u003eThere was one thing, however, that I didn\u0026rsquo;t know: evil is a gift received at birth. There\u0026rsquo;s no acquiring it. Those of us who have not come into this world armed with spurs and fangs are losers in every combat. (73)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eMama Yaya highlights that misfortune lies in the center of life derived from birth\u003c/p\u003e\n\u003cp\u003eMisfortune, as you know, is our constant companion. We are born with it, we lie with it, and we squabble with it for the same withered breast. It eats the codfish from our calabash. But we\u0026rsquo;re tough, us n\u0026mdash;! (85)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eBelieves that having choice in birth is what would make it fulfilling\u003c/p\u003e\n\u003cp\u003e(Irony between \u0026ldquo;gift\u0026rdquo; and \u0026ldquo;choice\u0026rdquo;)\u003c/p\u003e\n\u003cp\u003eI began to doubt seriously Mama Yaya\u0026rsquo;s basic conviction that life is a gift. Life would only be a gift if each of us could choose the womb that carried us. \u0026hellip; If one day I am born again, let it be in the steely army of conquerors! (120)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eTituba believes that she is born as a healer\u003c/p\u003e\n\u003cp\u003eThe terror of these people seemed like an injustice to me. They should have greeted me with shouts of joy and welcome and presented me with a list of illnesses that I would have tried my utmost to cure. I was born to heal, not to frighten. (12)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"births-other-people\"\u003e\u0026ldquo;Births\u0026rdquo; Other People\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eElizabeth Parris \u0026ldquo;reborn\u0026rdquo; after Tituba\u0026rsquo;s Care\u003c/p\u003e\n\u003cp\u003eUp till then I had not called on the supernatural to care for Elizabeth Parris. \u0026hellip; hat night I decided to use my powers. \u0026hellip; In the morning the color returned to Goodwife Parris\u0026rsquo;s cheeks. She asked for a little water. Toward midday she managed to feed herself. And in the evening she went to sleep like a newborn babe. (45)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eThe \u0026ldquo;evil\u0026rdquo; of abortion transferred from Tituba into Betsy\u003c/p\u003e\n\u003cp\u003eI made her swear not to tell anyone and at dusk I plunged her up to her neck in a liquid to which I had given all the properties of amniotic fluid. \u0026hellip; Plunging Betsey into this scalding hot bath, it seemed to me that these same hands, that not long ago had dealt death were now giving life, and I was purifying myself of the murder of my child. (63)\u003c/p\u003e\n\u003cp\u003eHer upper lip curled up into an ugly pout, revealing her sick gums. \u0026ldquo;You, do good? You\u0026rsquo;re a Negress, Tituba! You can only do evil. You are evil itself.\u0026rdquo; \u0026hellip; \u0026ldquo;That bath you had me take; what was in it? The blood of a newborn baby that died from one of your spells?\u0026rdquo; I was aghast. (77)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"rebirth-after-death--like-the-actual-book\"\u003eRebirth After Death (like the actual book)\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eTituba\u0026rsquo;s Freeing from Prison into Benjamin is Described an Rebirth\u003c/p\u003e\n\u003cp\u003eHe smiled cynically. \u0026ldquo;A man who hasn\u0026rsquo;t got very much money. You know how much slaves are selling for at the present time? Twenty-five pounds!\u0026rdquo; Our conversation stopped there, but now I knew the fate awaiting me. Another master, another bondage. (120)\u003c/p\u003e\n\u003cp\u003eThen with one skillful blow of the mallet he smashed my chains to pieces. He did the same thing with my wrists while I screamed. \u0026hellip; I screamed, and this scream, the terrified cry of a newborn baby, heralded my return to this world. I had to learn how to walk again. \u0026hellip; Few people have the misfortune to be born twice. (122)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eTituba\u0026rsquo;s \u0026ldquo;real\u0026rdquo; story begins only after death\u003c/p\u003e\n\u003cp\u003eAnd that is the story of my life. Such a bitter, bitter story. My real story starts where this one leaves off and it has no end. (175)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eSuccessful rebirth only without birth\u003c/p\u003e\n\u003cp\u003eI watched her grow up and stumble around on her shaky legs, exploring the pur- gatory of the plantation, finding her delight in the shape of a cloud, the drooping foliage of an ylang-ylang, or the taste of a bitter orange. \u0026hellip; A child I didn\u0026rsquo;t give birth to but whom I chose! What motherhood could be nobler! (177)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"misc\"\u003eMisc\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eBook opens with the framing of her being born\u003c/p\u003e\n\u003cp\u003eI was born from this act of aggression. From this act of hatred and contempt. (3)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ePackage insert praises death as something positive\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Death is a porte whereby we pass to joye; Lyfe is a lake that drowneth all in payne \u0026ndash;John Harrington\u0026rdquo; (Cover Insert)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ePlans for Abortion\u003c/p\u003e\n\u003cp\u003eThere is no happiness in motherhood for a slave. It is little more than the expulsion of an innocent baby, who will have no chance to change its fate, into a world of slavery and abjection\u0026hellip;. That night, my baby was carried out of my womb in a flow of black blood. I saw him wave his arms like a tadpole in distress and I burst into tears. (52)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"subclaim-development\"\u003eSubclaim Development\u003c/h3\u003e\n\u003ch4 id=\"tituba-realizes-birth-is-involuntary\"\u003eTituba Realizes Birth is Involuntary\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eBirth into live is a \u003ca href=\"#that-birth-determines-the-capacity-for-one-to-do-evil\"\u003edeterministic process\u003c/a\u003e for which those being \u0026ldquo;born\u0026rdquo; have no agency over.\u003c/li\u003e\n\u003cli\u003eFor African folks, Mama Yaya claims that \u003ca href=\"#mama-yaya-highlights-that-misfortune-lies-in-the-center-of-life-derived-from-birth\"\u003emisfortune is one such\u003c/a\u003e deterministic factor of their birth.\u003c/li\u003e\n\u003cli\u003eAlthough \u003ca href=\"#believes-that-having-choice-in-birth-is-what-would-make-it-fulfilling\"\u003eMama Yaya disagrees, Tituba believes that life is not a gift unless it is deterministic\u003c/a\u003e (this is of course ironic, because you don\u0026rsquo;t choose your gifts.)\u003c/li\u003e\n\u003cli\u003eDespite the indeterminism, \u003ca href=\"#tituba-believes-that-she-is-born-as-a-healer\"\u003eTituba believes that she is born as a healer\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"she-leverages--re--birth-to-change-others-to-poor-results\"\u003eShe leverages (Re)Birth to change others, to poor results\u003c/h4\u003e\n\u003cp\u003ePerhaps as an attempt to help others control (\u0026ldquo;choose\u0026rdquo;) birth, she uses her power to reborn people; like\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"#elizabeth-parris-reborn-after-tituba-s-care\"\u003eElizabeth Parris\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#the-evil-of-abortion-transferred-from-tituba-into-betsy\"\u003eBetsy Parris\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eBut Psych! Both of them turned on her. Especially \u003ca href=\"#the-evil-of-abortion-transferred-from-tituba-into-betsy\"\u003eBetsy Parris\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eAlso her \u003ca href=\"#plans-for-abortion\"\u003echild was aborted\u003c/a\u003e.\u003c/p\u003e\n\u003ch4 id=\"the-work-literally-provides-rebirth-of-tituba-and-empowers-her-to-give-birth-despite-her-abortion\"\u003eThe work literally provides rebirth of Tituba and empowers her to give birth despite her abortion\u003c/h4\u003e\n\u003cp\u003e\u003ca href=\"#tituba-s-freeing-from-prison-into-benjamin-is-described-an-rebirth\"\u003eTituba Herself\u003c/a\u003e raised her station against those of Benjamin. Yet, this was not voluntary (see quote in section) and designed by Condé in the story.\u003c/p\u003e\n\u003cp\u003eTituba\u0026rsquo;s \u0026ldquo;real\u0026rdquo; story \u003ca href=\"#tituba-s-real-story-begins-only-after-death\"\u003ebegins only after forcible death/\u0026ldquo;rebirth\u0026rdquo;\u003c/a\u003e, and she (author?) considers it nobel to give this new form of birth \u003ca href=\"#successful-rebirth-only-without-birth\"\u003ewithout giving birth\u003c/a\u003e (which didn\u0026rsquo;t happen in real world), but without the request of the born either.\u003c/p\u003e\n\u003ch4 id=\"conclusiony-bit\"\u003eConclusiony bit\u003c/h4\u003e\n\u003cp\u003eSo this whole book, \u003ca href=\"#book-opens-with-the-framing-of-her-being-born\"\u003ebeginning at her birth\u003c/a\u003e, covered by a \u003ca href=\"#package-insert-praises-death-as-something-positive\"\u003ecelebration of alternative birth\u003c/a\u003e illustrates such a process of providing agency.\u003c/p\u003e\n\u003ch3 id=\"the-claim\"\u003eThe Claim\u003c/h3\u003e\n\u003cp\u003eThe motif of birth and rebirth plays an important role in Maryse Condé\u0026rsquo;s work \u003cem\u003eI, Tituba\u003c/em\u003e. Despite Tituba\u0026rsquo;s own failed attempt at controlling the (re)birth of herself and others to better their fate in history, Condé offers Tituba a renewed empowerment in birth by both illustrating her \u0026ldquo;rebirth\u0026rdquo; and providing her a chance to elect a descendant she wasn\u0026rsquo;t originally able to bear.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhi_tituba_essay_planning/","tags":null,"title":"NUS-ENG401 I, Tituba Essay Planning"},{"categories":null,"contents":"Joys of Motherhood highlights the plurality of duties for the reader women have to undertake in order to succeed in Nigerian society. Women represent 80% of agricultural labor in Nigeria—a dangerous job, yet is significantly underrepresented in knowledge-based work.\nPrior to gaining ownership to her own stall, Nhu Ego has to “spread her wares on the pavement” (Emecheta 113) selling goods in order to make ends meet—despite Nnaife’s money from employment which he often squanders.\nNext Steps Token: s_2827_13\nThe conditions of this duality of work is harsh. This is where this story leaves off. Tap on this link, and one of two things may happen\u0026mdash;you maybe directed back to this page, or you maybe redirected somewhere else. Unfortunately, neither of these paths lead to further advancement.\n","html":"\u003cp\u003e\u003cem\u003eJoys of Motherhood\u003c/em\u003e highlights the plurality of duties for the reader women have to undertake in order to succeed in Nigerian society. Women represent 80% of agricultural labor in Nigeria—a dangerous job, yet is significantly underrepresented in knowledge-based work.\u003c/p\u003e\n\u003cp\u003ePrior to gaining ownership to her own stall, Nhu Ego has to “spread her wares on the pavement” (Emecheta 113) selling goods in order to make ends meet—despite Nnaife’s money from employment which he often squanders.\u003c/p\u003e\n\u003ch2 id=\"next-steps\"\u003eNext Steps\u003c/h2\u003e\n\u003cp\u003eToken: s_2827_13\u003c/p\u003e\n\u003cp\u003eThe conditions of this duality of work is harsh. This is where this story leaves off. Tap on \u003ca href=\"https://tinyurl.com/nuseng401giftbounce6\"\u003ethis link\u003c/a\u003e, and one of two things may happen\u0026mdash;you maybe directed back to this page, or you maybe redirected somewhere else. Unfortunately, neither of these paths lead to further advancement.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_gift_6/","tags":null,"title":"NUS-ENG401 Many Hats"},{"categories":null,"contents":"Even if the education system provides a ticket for its successful students to gain social advancement, it is often difficult or even arbitrary. Access to education is also frequently dependent on race.\nIn Black Shack Alley, Zobel frames the value of schooling as a “gateway … to escape.” (Zobel) Zobel highlights that the main way to escape the oppression in the colonies is by leveraging the itself oppressive systems of education.\nNext Steps Token: s_7776_1b\nThe process of pursuing education takes a lot more effort than the steps before! Please locate the link to the next target by looking under the cabinet from which a puffer-fish hangs in our San Mateo campus.\n","html":"\u003cp\u003eEven if the education system provides a ticket for its successful students to gain social advancement, it is often difficult or even arbitrary. Access to education is also frequently dependent on race.\u003c/p\u003e\n\u003cp\u003eIn \u003cem\u003eBlack Shack Alley\u003c/em\u003e, Zobel frames the value of schooling as a “gateway … to escape.” (Zobel) Zobel highlights that the main way to escape the oppression in the colonies is by leveraging the itself oppressive systems of education.\u003c/p\u003e\n\u003ch2 id=\"next-steps\"\u003eNext Steps\u003c/h2\u003e\n\u003cp\u003eToken: s_7776_1b\u003c/p\u003e\n\u003cp\u003eThe process of pursuing education takes a lot more effort than the steps before! Please locate the link to the next target by looking under the cabinet from which a puffer-fish hangs in our San Mateo campus.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_gift_3/","tags":null,"title":"NUS-ENG401 Pursuing Education"},{"categories":null,"contents":" Quote Explanation of quote (\u0026ldquo;understanding lived experience\u0026rdquo;) Implication (\u0026ldquo;understanding Duets/Othello\u0026rdquo;) Sharpe Wake; Sears Duet\nWake, p 16: ANALYZE ON TOP, CONNECT HERE\nTo be in the wake is to live in those no’s, to live in the no-space that the law is not bound to respect \u0026hellip; To be in the wake is to recognize \u0026hellip; the ongoing locations of Black being: the wake, the ship, the hold, and the weather.\nAnalysis\n\u0026ldquo;no-space\u0026rdquo;\u0026mdash;ironic term exemplifying sense of claustrophobia: the contrast between \u0026ldquo;space\u0026rdquo; (the possible/freedom), with \u0026ldquo;no.\u0026rdquo; hidden meaning about the fact that, though freedom seemingly exists, it is not genuinely there because it is freedom in the \u0026ldquo;no\u0026rdquo;; in fact, these acts that \u0026ldquo;grant freedom\u0026rdquo; may simply grant oppression in a different form in the same vein, the imagery of \u0026ldquo;the hold\u0026rdquo; illustrates this same sense of claustrophobic constraint: one is \u0026ldquo;held\u0026rdquo; by this perceived sense of freedom Broader Analysis Modern day opportunities available to African Americans may represent elements of the \u0026ldquo;no-space\u0026rdquo;: the areas by which the seeming granting of freedom becomes a hindering detriment that relegates someone to \u0026ldquo;the hold.\u0026rdquo;\n[set this up]: how exactly Sharpe and Sears are brought together.\nThe creation of the arbitrarily racial label of \u0026lsquo;Black\u0026rsquo; for people of African descent allows the arbitrary persecution due to the label\u0026rsquo;s unclear boundaries; furthermore, this persecution continues to modern day: \u0026lsquo;holding\u0026rsquo; a person into seemingly self-inflicted cycles of abuse where the only true way out is to embrace and assimilate into white society.\ndefine no-space here\n[define wake in the end]: to be awake is to be aware of this cycle, and be\nothello and mona are going through only one trauma each billy is going through two Duet: musical composition for two performers with equal importance to the piece.\nBilly as mother (incestruous) of Othello Pieta: motherly holding Jesus (p91) Egytion to my mother give (p93); Billy doing the murdering (p100) Adding Black voices to Othello dismantles Othello\nBillie, Amah, and Magi \u0026lt;=\u0026gt; Desdemona, Emilia, Bianca Vogel =\u0026gt; Desdemona dies; in this one Billy kills instead of dies\nCANADA: figure of a black man not leaving her. Lovers not together, paternal love highlighted.\n\u0026ldquo;Wake: unless you deal become awake of the oprressions agaisnt you and relying on each other, you will fall into the white-centric society and join the oppressiors as the only way to get out of the self-sustaining cycle holding you down.\u0026rdquo;\nslave trade engine: dragging forward ship\nRECLAIMING\nDuet, p 56: Black is a term with power to racialist: it has clear influence and no clear category It\u0026rsquo;s because I\u0026rsquo;m Black. When a clear won\u0026rsquo;t put the change into my held-out hand, I think it\u0026rsquo;s because I\u0026rsquo;m Black. \u0026hellip; Who called us Black anyway? It\u0026rsquo;s not a country, it\u0026rsquo;s not a racial category, its not even the color of my skin.\nAnalysis Sears highlights the practical constraints being \u0026ldquo;Black\u0026rdquo; caused Billie, yet at the same time cleanly rejects all practical associations of Blackness with actual qualities Practical hindrance of arbitrary label \u0026ldquo;who called us\u0026rdquo;\u0026mdash;us-them dynamic; highlighting the amorphousness of the other group, yet did not provide a racialized labels Broader Analysis The contrast between the explicit label \u0026ldquo;Black\u0026rdquo;, and Billie\u0026rsquo;s more general, amorphous term of \u0026ldquo;who\u0026rdquo; highlights the power of racialization While Black is not a clear descriptor category, Bilie shows that it can actually have much detrimental impact; and yet Bilie herself when referring to her oppressors simply ask \u0026ldquo;who\u0026rdquo; is it\u0026mdash;obverting the same problem for her opressors Wake, p 10 Vigilance, too, because any- and everywhere we are, medical and other professionals treat Black patients differently. \u0026hellip; Because they are believed to be less sensitive to pain, black people are forced to endure more pain.\nAnalysis Points out uneven treatment of African Americans simply because of the arbitrary label of \u0026ldquo;Black\u0026rdquo; \u0026ldquo;sensitive\u0026rdquo;: respond to \u0026ldquo;changes, signals, or influences\u0026rdquo; less \u0026ldquo;responsive\u0026rdquo;/can\u0026rsquo;t be influenced highlights sense of diminishment of the Black intellect, a sense of lethargic primitivism Broader Analysis The label of \u0026ldquo;Black\u0026rdquo; has helped create an image of primitivism, which, due to its indeterminism (i.e. there is no specific descriptor for \u0026ldquo;Black\u0026rdquo; or \u0026ldquo;Blackness\u0026rdquo;), makes it very easy to abuse to diminish a group of people.\nDuet, 66: attempts to free of the forced cycle results in separation from Blackness, as it seems Othello has done A black man afflicted with Negrophobia. He\u0026rsquo;s the one that wants to white-wash his life \u0026hellip; Brooker T. Uppermiddleclass III \u0026hellip; found in predominantly White neighborhoods. He refers to other Blacks as \u0026ldquo;them\u0026rdquo;\nAnalysis Brooker T. Washington: educator and orator; born into slavery and was able to eventually lead the Tuskegee institute III =\u0026gt; represents the multi-generation descendant; LAST NAME ERASURE: no longer carrying the herratage Morphing from Washinton\u0026rsquo;s idea of the \u0026ldquo;black elite\u0026rdquo; into white assimilation Broader Analysis Highlights the establishment of us/them dynamic, as a photo of Othello Functional similarity of Othello fighting \u0026ldquo;the Turks\u0026rdquo;, reducing again his enemy to another group and racializing them as well Othello functions as essentially a part of the venetian army, in a \u0026ldquo;predominantly white neighborhood\u0026rdquo; Duet, 31: Billie is forced to be stuck in a vicious cycle, a cycle which is propegated according to Wake only because of her blackness All her money goes up in smokes and writings that tell her she really ain\u0026rsquo;t out of her mind \u0026hellip; [otherwise] all the rot inside her would begin to boil, threaten to shoot out.\nAnalysis Double entere: \u0026ldquo;money goes up in smokes\u0026rdquo; \u0026mdash;- money vanishes + money is used to buy smokes \u0026ldquo;to tell\u0026rdquo;: personifiying money as something that influences Billie Bilie is trapped in a vicious cycle: Linguistically trapped \u0026mdash; Rot =\u0026gt;(boil)=\u0026gt; Smoke =\u0026gt;(prevents)=\u0026gt; Rot.\nBroader Analysis This is an exemplification for the \u0026ldquo;bound in no-space\u0026rdquo; which Sharpe highlights. Unlike the Othello in Shakesphere that pretty much brings his own downfall from Iago\u0026rsquo;s manipulation, Billie in the play acts more due to the the inherent constraints of the system.\nWake, p 16: ANALYZE ON TOP, CONNECT HERE To be in the wake is to live in those no’s, to live in the no-space that the law is not bound to respect \u0026hellip; To be in the wake is to recognize \u0026hellip; the ongoing locations of Black being: the wake, the ship, the hold, and the weather.\nAnalysis \u0026ldquo;no-space\u0026rdquo;\u0026mdash;ironic term exemplifying sense of claustrophobia: the contrast between \u0026ldquo;space\u0026rdquo; (the possible/freedom), with \u0026ldquo;no.\u0026rdquo; hidden meaning about the fact that, though freedom seemingly exists, it is not genuinely there because it is freedom in the \u0026ldquo;no\u0026rdquo;; in fact, these acts that \u0026ldquo;grant freedom\u0026rdquo; may simply grant oppression in a different form in the same vein, the imagery of \u0026ldquo;the hold\u0026rdquo; illustrates this same sense of claustrophobic constraint: one is \u0026ldquo;held\u0026rdquo; by this perceived sense of freedom Broader Analysis Modern day opportunities available to African Americans may represent elements of the \u0026ldquo;no-space\u0026rdquo;: the areas by which the seeming granting of freedom becomes a hindering detriment that relegates someone to \u0026ldquo;the hold.\u0026rdquo;\nWake p 21 In the wake, the semiotics of the slave ship continue: from the forced movements of the enslaved to the forced movements of the migrant and the refugee, to the regulation of Black people in North American streets and neighborhoods.\nAnalysis \u0026ldquo;semiotics\u0026rdquo;: the interpretation of signs and symbols the repetition of the word forced takes agency out of the actor Sharpe\u0026rsquo;s argument: forcibleness is a symbol for the reminants of the slave ship Broader Analysis Billy\u0026rsquo;s being forced by the system to perform her daily, self destructive actions instead of the emotional Othello we see in Shakespeare, Billy is being forced by the system not out of her own volition to continue her act.\n","html":"\u003cul\u003e\n\u003cli\u003eQuote\u003c/li\u003e\n\u003cli\u003eExplanation of quote (\u0026ldquo;understanding lived experience\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003eImplication (\u0026ldquo;understanding Duets/Othello\u0026rdquo;)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSharpe Wake; Sears Duet\u003c/p\u003e\n\u003cp\u003eWake, p 16: ANALYZE ON TOP, CONNECT HERE\u003c/p\u003e\n\u003cblockquote\u003e\n\u003cp\u003eTo be in the wake is to live in those no’s, to live in the no-space that the law is not bound to respect \u0026hellip; To be in the wake is to recognize \u0026hellip; the ongoing locations of Black being: the wake, the ship, the hold, and the weather.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003cp\u003e\u003cstrong\u003eAnalysis\u003c/strong\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;no-space\u0026rdquo;\u0026mdash;ironic term exemplifying sense of claustrophobia: the contrast between \u0026ldquo;space\u0026rdquo; (the possible/freedom), with \u0026ldquo;no.\u0026rdquo;\n\u003cul\u003e\n\u003cli\u003ehidden meaning about the fact that, though freedom seemingly exists, it is not genuinely there because it is freedom in the \u0026ldquo;no\u0026rdquo;; in fact, these acts that \u0026ldquo;grant freedom\u0026rdquo; may simply grant oppression in a different form\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ein the same vein, the imagery of \u0026ldquo;the hold\u0026rdquo; illustrates this same sense of claustrophobic constraint: one is \u0026ldquo;held\u0026rdquo; by this perceived sense of freedom\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003eBroader Analysis\u003c/strong\u003e\nModern day opportunities available to African Americans may represent elements of the \u0026ldquo;no-space\u0026rdquo;: the areas by which the seeming granting of freedom becomes a hindering detriment that relegates someone to \u0026ldquo;the hold.\u0026rdquo;\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e[set this up]: how exactly Sharpe and Sears are brought together.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eThe creation of the arbitrarily racial label of \u0026lsquo;Black\u0026rsquo; for people of African descent allows the arbitrary persecution due to the label\u0026rsquo;s unclear boundaries; furthermore, this persecution continues to modern day: \u0026lsquo;holding\u0026rsquo; a person into seemingly self-inflicted cycles of abuse where the only true way out is to embrace and assimilate into white society.\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003edefine no-space here\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e[define wake in the end]: to be awake is to be aware of this cycle, and be\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eothello and mona are going through only one trauma each\u003c/li\u003e\n\u003cli\u003ebilly is going through two\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDuet: musical composition for two performers with equal importance to the piece.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eBilly as mother (incestruous) of Othello\n\u003cul\u003e\n\u003cli\u003ePieta: motherly holding Jesus (p91)\u003c/li\u003e\n\u003cli\u003eEgytion to my mother give (p93);\u003c/li\u003e\n\u003cli\u003eBilly doing the murdering (p100)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAdding Black voices to Othello dismantles Othello\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eBillie, Amah, and Magi \u0026lt;=\u0026gt; Desdemona, Emilia, Bianca\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eVogel =\u0026gt; Desdemona dies; in this one Billy kills instead of dies\u003c/p\u003e\n\u003cp\u003eCANADA: figure of a black man not leaving her. Lovers not together, paternal love highlighted.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Wake: unless you deal become awake of the oprressions agaisnt you and relying on each other, you will fall into the white-centric society and join the oppressiors as the only way to get out of the self-sustaining cycle holding you down.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eslave trade engine: dragging forward ship\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eRECLAIMING\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"duet-p-56-black-is-a-term-with-power-to-racialist-it-has-clear-influence-and-no-clear-category\"\u003eDuet, p 56: Black is a term with power to racialist: it has clear influence and no clear category\u003c/h2\u003e\n\u003cblockquote\u003e\n\u003cp\u003eIt\u0026rsquo;s because I\u0026rsquo;m Black. When a clear won\u0026rsquo;t put the change into my held-out hand, I think it\u0026rsquo;s because I\u0026rsquo;m Black. \u0026hellip; Who called us Black anyway? It\u0026rsquo;s not a country, it\u0026rsquo;s not a racial category, its not even the color of my skin.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003ch3 id=\"analysis\"\u003eAnalysis\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eSears highlights the practical constraints being \u0026ldquo;Black\u0026rdquo; caused Billie, yet at the same time cleanly rejects all practical associations of Blackness with actual qualities\u003c/li\u003e\n\u003cli\u003ePractical hindrance of arbitrary label\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;who called us\u0026rdquo;\u0026mdash;us-them dynamic; highlighting the amorphousness of the other group, yet did \u003cem\u003enot\u003c/em\u003e provide a racialized labels\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"broader-analysis\"\u003eBroader Analysis\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eThe contrast between the explicit label \u0026ldquo;Black\u0026rdquo;, and Billie\u0026rsquo;s more general, amorphous term of \u0026ldquo;who\u0026rdquo; highlights the power of racialization\u003c/li\u003e\n\u003cli\u003eWhile Black is not a clear descriptor category, Bilie shows that it can actually have much detrimental impact; and yet Bilie herself when referring to her oppressors simply ask \u0026ldquo;who\u0026rdquo; is it\u0026mdash;obverting the same problem for her opressors\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"wake-p-10\"\u003eWake, p 10\u003c/h3\u003e\n\u003cblockquote\u003e\n\u003cp\u003eVigilance, too, because any- and everywhere we are, medical and other professionals treat Black patients differently. \u0026hellip; Because they are believed to be less sensitive to pain, black people are forced to endure more pain.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003ch4 id=\"analysis\"\u003eAnalysis\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003ePoints out uneven treatment of African Americans simply because of the arbitrary label of \u0026ldquo;Black\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;sensitive\u0026rdquo;: respond to \u0026ldquo;changes, signals, or influences\u0026rdquo;\n\u003cul\u003e\n\u003cli\u003eless \u0026ldquo;responsive\u0026rdquo;/can\u0026rsquo;t be influenced\u003c/li\u003e\n\u003cli\u003ehighlights sense of diminishment of the Black intellect, a sense of lethargic primitivism\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"broader-analysis\"\u003eBroader Analysis\u003c/h4\u003e\n\u003cp\u003eThe label of \u0026ldquo;Black\u0026rdquo; has helped create an image of primitivism, which, due to its indeterminism (i.e. there is no specific descriptor for \u0026ldquo;Black\u0026rdquo; or \u0026ldquo;Blackness\u0026rdquo;), makes it very easy to abuse to diminish a group of people.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"duet-66-attempts-to-free-of-the-forced-cycle-results-in-separation-from-blackness-as-it-seems-othello-has-done\"\u003eDuet, 66: attempts to free of the forced cycle results in separation from Blackness, as it seems Othello has done\u003c/h2\u003e\n\u003cblockquote\u003e\n\u003cp\u003eA black man afflicted with Negrophobia. He\u0026rsquo;s the one that wants to white-wash his life \u0026hellip; Brooker T. Uppermiddleclass III \u0026hellip; found in predominantly White neighborhoods. He refers to other Blacks as \u0026ldquo;them\u0026rdquo;\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003ch3 id=\"analysis\"\u003eAnalysis\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eBrooker T. Washington: educator and orator; born into slavery and was able to eventually lead the Tuskegee institute\n\u003cul\u003e\n\u003cli\u003eIII =\u0026gt; represents the multi-generation descendant; LAST NAME ERASURE: no longer carrying the herratage\u003c/li\u003e\n\u003cli\u003eMorphing from Washinton\u0026rsquo;s idea of the \u0026ldquo;black elite\u0026rdquo; into white assimilation\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"broader-analysis\"\u003eBroader Analysis\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eHighlights the establishment of us/them dynamic, as a photo of Othello\u003c/li\u003e\n\u003cli\u003eFunctional similarity of Othello fighting \u0026ldquo;the Turks\u0026rdquo;, reducing again his enemy to another group and racializing them as well\u003c/li\u003e\n\u003cli\u003eOthello functions as essentially a part of the venetian army, in a \u0026ldquo;predominantly white neighborhood\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"duet-31-billie-is-forced-to-be-stuck-in-a-vicious-cycle-a-cycle-which-is-propegated-according-to-wake-only-because-of-her-blackness\"\u003eDuet, 31: Billie is forced to be stuck in a vicious cycle, a cycle which is propegated according to Wake only because of her blackness\u003c/h2\u003e\n\u003cblockquote\u003e\n\u003cp\u003eAll her money goes up in smokes and writings that tell her she really ain\u0026rsquo;t out of her mind \u0026hellip; [otherwise] all the rot inside her would begin to boil, threaten to shoot out.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003ch3 id=\"analysis\"\u003eAnalysis\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eDouble entere: \u0026ldquo;money goes up in smokes\u0026rdquo; \u0026mdash;- money vanishes + money is used to buy smokes\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;to tell\u0026rdquo;: personifiying money as something that influences Billie\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eBilie is trapped in a vicious cycle: Linguistically trapped \u0026mdash; Rot =\u0026gt;(boil)=\u0026gt; Smoke =\u0026gt;(prevents)=\u0026gt; Rot.\u003c/p\u003e\n\u003ch3 id=\"broader-analysis\"\u003eBroader Analysis\u003c/h3\u003e\n\u003cp\u003eThis is an exemplification for the \u0026ldquo;bound in no-space\u0026rdquo; which Sharpe highlights. Unlike the Othello in Shakesphere that pretty much brings his own downfall from Iago\u0026rsquo;s manipulation, Billie in the play acts more due to the the inherent constraints of the system.\u003c/p\u003e\n\u003ch3 id=\"wake-p-16-analyze-on-top-connect-here\"\u003eWake, p 16: ANALYZE ON TOP, CONNECT HERE\u003c/h3\u003e\n\u003cblockquote\u003e\n\u003cp\u003eTo be in the wake is to live in those no’s, to live in the no-space that the law is not bound to respect \u0026hellip; To be in the wake is to recognize \u0026hellip; the ongoing locations of Black being: the wake, the ship, the hold, and the weather.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003ch4 id=\"analysis\"\u003eAnalysis\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;no-space\u0026rdquo;\u0026mdash;ironic term exemplifying sense of claustrophobia: the contrast between \u0026ldquo;space\u0026rdquo; (the possible/freedom), with \u0026ldquo;no.\u0026rdquo;\n\u003cul\u003e\n\u003cli\u003ehidden meaning about the fact that, though freedom seemingly exists, it is not genuinely there because it is freedom in the \u0026ldquo;no\u0026rdquo;; in fact, these acts that \u0026ldquo;grant freedom\u0026rdquo; may simply grant oppression in a different form\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ein the same vein, the imagery of \u0026ldquo;the hold\u0026rdquo; illustrates this same sense of claustrophobic constraint: one is \u0026ldquo;held\u0026rdquo; by this perceived sense of freedom\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"broader-analysis\"\u003eBroader Analysis\u003c/h4\u003e\n\u003cp\u003eModern day opportunities available to African Americans may represent elements of the \u0026ldquo;no-space\u0026rdquo;: the areas by which the seeming granting of freedom becomes a hindering detriment that relegates someone to \u0026ldquo;the hold.\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"wake-p-21\"\u003eWake p 21\u003c/h3\u003e\n\u003cblockquote\u003e\n\u003cp\u003eIn the wake, the semiotics of the slave ship continue: from the forced movements of the enslaved to the forced movements of the migrant and the refugee, to the regulation of Black people in North American streets and neighborhoods.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003ch4 id=\"analysis\"\u003eAnalysis\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;semiotics\u0026rdquo;: the interpretation of signs and symbols\n\u003cul\u003e\n\u003cli\u003ethe repetition of the word forced takes agency out of the actor\u003c/li\u003e\n\u003cli\u003eSharpe\u0026rsquo;s argument: forcibleness is a \u003cem\u003esymbol\u003c/em\u003e for the reminants of the slave ship\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"broader-analysis\"\u003eBroader Analysis\u003c/h4\u003e\n\u003cp\u003eBilly\u0026rsquo;s being forced by the system to perform her daily, self destructive actions instead of the emotional Othello we see in Shakespeare, Billy is being forced by the system not out of her own volition to continue her act.\u003c/p\u003e\n\u003chr\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_racialization_outline/","tags":null,"title":"NUS-ENG401 Racialization Outline"},{"categories":null,"contents":"Traditional values in Caribbean and African societies often place womens’ value in the context of other men. When women pursue independent careers such as midwives and healers, they could be called “witches.”\nMaryse Condé demonstrates this bias in the novel I, Tituba. She writes that “Yao’s love had transformed [Tituba]’s mother”, making her a “young woman.” (Condé 7) In the passage, the womanhood of Tituba’s mother is framed as only being granted when she encounters Yao; in contrast, Mama Yaya’s womanhood exists independently, yet she is viewed as a witch.\nNext Steps Token: s_6780_15\nYou now get to make a choice. You may either\u0026hellip;\nPursue independence You may choose to face the consequences of leveraging the harsh education system to attempt to achieve social advancement. To do so, follow this link.\nSeek domestic dependence Or, you may choose to follow domestic roles without continuing to pursue education. If so, follow this link.\n","html":"\u003cp\u003eTraditional values in Caribbean and African societies often place womens’ value in the context of other men. When women pursue independent careers such as midwives and healers, they could be called “witches.”\u003c/p\u003e\n\u003cp\u003eMaryse Condé demonstrates this bias in the novel \u003cem\u003eI, Tituba\u003c/em\u003e. She writes that “Yao’s love had transformed [Tituba]’s mother”, making her a “young woman.” (Condé 7) In the passage, the womanhood of Tituba’s mother is framed as only being granted when she encounters Yao; in contrast, Mama Yaya’s womanhood exists independently, yet she is viewed as a witch.\u003c/p\u003e\n\u003ch2 id=\"next-steps\"\u003eNext Steps\u003c/h2\u003e\n\u003cp\u003eToken: s_6780_15\u003c/p\u003e\n\u003cp\u003eYou now get to make a choice. You may either\u0026hellip;\u003c/p\u003e\n\u003ch3 id=\"pursue-independence\"\u003ePursue independence\u003c/h3\u003e\n\u003cp\u003eYou may choose to face the consequences of leveraging the harsh education system to attempt to achieve social advancement. To do so, follow \u003ca href=\"/posts/kbhnus_eng401_gift_3/\"\u003ethis link\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"seek-domestic-dependence\"\u003eSeek domestic dependence\u003c/h3\u003e\n\u003cp\u003eOr, you may choose to follow domestic roles without continuing to pursue education. If so, follow \u003ca href=\"/posts/kbhnus_eng401_gift_6/\"\u003ethis link\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_gift_4/","tags":null,"title":"NUS-ENG401 What is a Witch?"},{"categories":null,"contents":"Statement Suppose \\(U_1\\), \\(U_2\\), and \\(W\\) are subspaces of \\(V\\), such that:\n\\begin{equation} \\begin{cases} V = U_1 \\oplus W\\\\ V = U_2 \\oplus W \\end{cases} \\end{equation}\nProve or give a counterexample that \\(U_1=U_2\\)\nIntuition The statement is not true. The definition of direct sums makes it such that, \\(\\forall v \\in V\\), there exists a unique representation of \\(v\\) with \\(u_{1i}+w_{i} = v\\) for \\(u_{1j}\\in U_1, w_{j} \\in W\\) as well as another unique representation \\(u_{2i} + w_{i}=v\\) for \\(u_{2j} \\in U_{2}, w_{j} \\in W\\).\nHowever, the definition of direct sums doesn\u0026rsquo;t guarantee that the distinct unique representations are equivalent; although \\(V\\) can only be represented uniquely by EITHER a sum of \\(U_1+W\\) or \\(U_2+W\\), it does not mean that each \\(v \\in V\\) itself has only one unique representation.\nCounterexample In constructing a counterexample, we turn to the fact that the sums of two variables creates a third free variable; therefore, we can figure two distinct ways of creating a third, final free variable that construct an equivalent space.\nConstructing \\(U_1\\) as a subspace We begin with constructing:\n\\begin{equation} U_1= \\left\\{\\begin{pmatrix} x_1\\\\y_1\\\\2y_1 \\end{pmatrix}, x_1,y_1 \\in \\mathbb{F} \\right\\} \\end{equation}\nBy setting both free variables to \\(0\\), we construct the additive identity. Then:\n\\begin{equation} \\lambda \\begin{pmatrix} x_1 \\\\ y_1 \\\\ 2y_1 \\end{pmatrix} = \\begin{pmatrix} \\lambda x_1 \\\\ \\lambda y_1\\\\ 2(\\lambda y_1) \\end{pmatrix} \\end{equation}\nby multiplication in \\(\\mathbb{F}\\), scalar multiplication, commutativity, and associativity. We can show closure under addition by inheriting the operation in \\(\\mathbb{F}\\) as well as applying distributive to the factor of \\(2\\).\nTherefore, we show that \\(U_1\\) is a subspace of \\(\\mathbb{F}^{3}\\).\nConstructing \\(U_2\\) as a subspace Then, we construct:\n\\begin{equation} U_2=\\left\\{\\begin{pmatrix} x_1 \\\\ y_1 \\\\ 0 \\end{pmatrix}, x_1,y_1\\in \\mathbb{F} \\right\\} \\end{equation}\nWe again have \\(0\\) by setting free variables to create the additive identity. Addition and scalar multiplication is closed by inheriting them from \\(\\mathbb{F}\\) (and the fact that \\(0\\) is the additive inverse and therefore \\(\\lambda 0 = 0\\)).\nTherefore, \\(U_2\\) is a subspace as well in \\(\\mathbb{F}^{3}\\).\nConstructing \\(W\\) as a subspace Finally, we have:\n\\begin{equation} W = \\left\\{\\begin{pmatrix} 0 \\\\ 0 \\\\z_1 \\end{pmatrix}, z_1\\in \\mathbb{F} \\right\\} \\end{equation}\nBy setting \\(z_1=0\\), we have the additive identity. As with above, addition and scalar multiplication is closed through inheritance and that \\(\\lambda 0=0\\).\nConstructing Sum of Subsets Let\u0026rsquo;s construct:\n\\begin{equation} U_1+W = V \\end{equation}\nTake \\(u_1 \\in U_1, w \\in W\\), attempting to construct a \\(v\\in V\\), we have that:\n\\begin{equation} \\begin{pmatrix} x_{1} \\\\ y_1 \\\\ 2y_1 \\end{pmatrix} + \\begin{pmatrix} 0 \\\\ 0 \\\\ z_1 \\end{pmatrix} = \\begin{pmatrix} x_1 \\\\ y_1 \\\\ 2y_1+z_1 \\end{pmatrix} = \\begin{pmatrix} a \\\\ b \\\\ c \\end{pmatrix} \\end{equation}\nConstructing Direct Sum For all vectors in \\(\\mathbb{F}^{3}\\), this is an equivalence with 3 free variables and 3 expressions\u0026mdash;rendering each vector in \\(\\mathbb{F}^{3}\\) to have a representation by \\(U_1+W\\). We can see this also with the unique \\(0\\) test:\nWe see that for:\n\\begin{equation} 0 \\in U_1+W \\end{equation}\nTo solve for some \\(u_1 \\in U, w \\in W : u_1+w = 0\\) we have that:\n\\begin{equation} \\begin{pmatrix} x_{1} \\\\ y_1 \\\\ 2y_1 \\end{pmatrix} + \\begin{pmatrix} 0 \\\\ 0 \\\\ z_1 \\end{pmatrix} = \\begin{pmatrix} 0 \\\\ 0 \\\\ 0 \\end{pmatrix} \\end{equation}\nwhere the first vector is in \\(U_1\\) and the second is in \\(W\\). The first two expressions tell us that \\(x_1=y_1=0\\); the final equation requires that \\(2y_1+z_1=0+z_1=0\\Rightarrow z_1=0\\) .\nTherefore, the only way to write \\(0\\) is to take each element in the sum to \\(0\\) (i.e. in this case \\(u_1=w=0 \\implies u_1+w = 0\\)), making the above a direct sum.\nTherefore:\n\\begin{equation} U_1 \\oplus W = V \\end{equation}\nIn almost the same manner, we can show that:\n\\begin{equation} U_2\\oplus W = V \\end{equation}\nThat, for some \\(u_2\\in U_2, w \\in W, v \\in V\\):\n\\begin{equation} \\begin{pmatrix} x_1\\\\y_1\\\\0 \\end{pmatrix} + \\begin{pmatrix} 0 \\\\ 0 \\\\ z_1 \\end{pmatrix} = \\begin{pmatrix} x_1\\\\y_1\\\\z_1 \\end{pmatrix} \\end{equation}\nfor the first vector in \\(U_2\\), the second in \\(W\\). In fact, this is the statement made in example 1.41.\nCreating the Counterexample Finally, we have that:\n\\begin{equation} \\left\\{\\begin{pmatrix} x_1 \\\\ y_1 \\\\ 2y_1 \\end{pmatrix}: x_1,y_1 \\in \\mathbb{F}\\right\\} \\neq\\left\\{\\begin{pmatrix} x_1 \\\\ y_1 \\\\ 0 \\end{pmatrix}: x_1,y_1 \\in \\mathbb{F}\\right\\} \\end{equation}\n\\(\\forall y_1 \\neq 0\\) in the first expression. Therefore, \\(U_1 \\neq U_2\\), finishing the counterexample. \\(\\blacksquare\\)\n","html":"\u003ch2 id=\"statement\"\u003eStatement\u003c/h2\u003e\n\u003cp\u003eSuppose \\(U_1\\), \\(U_2\\), and \\(W\\) are subspaces of \\(V\\), such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nV = U_1 \\oplus W\\\\\nV = U_2 \\oplus W\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eProve or give a counterexample that \\(U_1=U_2\\)\u003c/p\u003e\n\u003ch2 id=\"intuition\"\u003eIntuition\u003c/h2\u003e\n\u003cp\u003eThe statement is not true. The definition of direct sums makes it such that, \\(\\forall v \\in V\\), there exists a unique representation of \\(v\\) with \\(u_{1i}+w_{i} = v\\) for \\(u_{1j}\\in U_1, w_{j} \\in W\\) as well as another unique representation \\(u_{2i} + w_{i}=v\\) for \\(u_{2j} \\in U_{2}, w_{j} \\in W\\).\u003c/p\u003e\n\u003cp\u003eHowever, the definition of direct sums doesn\u0026rsquo;t guarantee that the distinct unique representations are equivalent; although \\(V\\) can only be represented uniquely by EITHER a sum of \\(U_1+W\\) or \\(U_2+W\\), it does not mean that each \\(v \\in V\\) itself has only one unique representation.\u003c/p\u003e\n\u003ch2 id=\"counterexample\"\u003eCounterexample\u003c/h2\u003e\n\u003cp\u003eIn constructing a counterexample, we turn to the fact that the sums of two variables creates a third free variable; therefore, we can figure two distinct ways of creating a third, final free variable that construct an equivalent space.\u003c/p\u003e\n\u003ch3 id=\"constructing-u-1-as-a-subspace\"\u003eConstructing \\(U_1\\) as a subspace\u003c/h3\u003e\n\u003cp\u003eWe begin with constructing:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1= \\left\\{\\begin{pmatrix}\nx_1\\\\y_1\\\\2y_1\n\\end{pmatrix}, x_1,y_1 \\in \\mathbb{F} \\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy setting both free variables to \\(0\\), we construct the additive identity. Then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda \\begin{pmatrix}\nx_1 \\\\ y_1 \\\\ 2y_1\n\\end{pmatrix} = \\begin{pmatrix}\n\\lambda x_1 \\\\ \\lambda y_1\\\\ 2(\\lambda y_1)\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eby multiplication in \\(\\mathbb{F}\\), scalar multiplication, commutativity, and associativity. We can show closure under addition by inheriting the operation in \\(\\mathbb{F}\\) as well as applying distributive to the factor of \\(2\\).\u003c/p\u003e\n\u003cp\u003eTherefore, we show that \\(U_1\\) is a subspace of \\(\\mathbb{F}^{3}\\).\u003c/p\u003e\n\u003ch3 id=\"constructing-u-2-as-a-subspace\"\u003eConstructing \\(U_2\\) as a subspace\u003c/h3\u003e\n\u003cp\u003eThen, we construct:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_2=\\left\\{\\begin{pmatrix}\nx_1 \\\\ y_1 \\\\ 0\n\\end{pmatrix}, x_1,y_1\\in \\mathbb{F} \\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe again have \\(0\\) by setting free variables to create the additive identity. Addition and scalar multiplication is closed by inheriting them from \\(\\mathbb{F}\\) (and the fact that \\(0\\) is the additive inverse and therefore \\(\\lambda 0 = 0\\)).\u003c/p\u003e\n\u003cp\u003eTherefore, \\(U_2\\) is a subspace as well in \\(\\mathbb{F}^{3}\\).\u003c/p\u003e\n\u003ch3 id=\"constructing-w-as-a-subspace\"\u003eConstructing \\(W\\) as a subspace\u003c/h3\u003e\n\u003cp\u003eFinally, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nW = \\left\\{\\begin{pmatrix}\n0 \\\\ 0 \\\\z_1\n\\end{pmatrix}, z_1\\in \\mathbb{F} \\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy setting \\(z_1=0\\), we have the additive identity. As with above, addition and scalar multiplication is closed through inheritance and that \\(\\lambda 0=0\\).\u003c/p\u003e\n\u003ch3 id=\"constructing-sum-of-subsets\"\u003eConstructing Sum of Subsets\u003c/h3\u003e\n\u003cp\u003eLet\u0026rsquo;s construct:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1+W = V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTake \\(u_1 \\in U_1, w \\in W\\), attempting to construct a \\(v\\in V\\), we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\nx_{1} \\\\ y_1 \\\\ 2y_1\n\\end{pmatrix} + \\begin{pmatrix}\n0 \\\\ 0 \\\\ z_1\n\\end{pmatrix} = \\begin{pmatrix}\nx_1 \\\\ y_1 \\\\ 2y_1+z_1\n\\end{pmatrix} = \\begin{pmatrix}\na \\\\ b \\\\ c\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"constructing-direct-sum\"\u003eConstructing Direct Sum\u003c/h3\u003e\n\u003cp\u003eFor all vectors in \\(\\mathbb{F}^{3}\\), this is an equivalence with 3 free variables and 3 expressions\u0026mdash;rendering each vector in \\(\\mathbb{F}^{3}\\) to have a representation by \\(U_1+W\\). We can see this also with the unique \\(0\\) test:\u003c/p\u003e\n\u003cp\u003eWe see that for:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 \\in U_1+W\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo solve for some \\(u_1 \\in U, w \\in W : u_1+w = 0\\) we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\nx_{1} \\\\ y_1 \\\\ 2y_1\n\\end{pmatrix} + \\begin{pmatrix}\n0 \\\\ 0 \\\\ z_1\n\\end{pmatrix} = \\begin{pmatrix}\n0 \\\\ 0 \\\\ 0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere the first vector is in \\(U_1\\) and the second is in \\(W\\). The first two expressions tell us that \\(x_1=y_1=0\\); the final equation requires that \\(2y_1+z_1=0+z_1=0\\Rightarrow z_1=0\\) .\u003c/p\u003e\n\u003cp\u003eTherefore, the only way to write \\(0\\) is to take each element in the sum to \\(0\\) (i.e. in this case \\(u_1=w=0 \\implies u_1+w = 0\\)), making the above a direct sum.\u003c/p\u003e\n\u003cp\u003eTherefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1 \\oplus W = V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn almost the same manner, we can show that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_2\\oplus W = V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThat, for some \\(u_2\\in U_2, w \\in W, v \\in V\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\nx_1\\\\y_1\\\\0\n\\end{pmatrix} + \\begin{pmatrix}\n0 \\\\ 0 \\\\ z_1\n\\end{pmatrix} = \\begin{pmatrix}\nx_1\\\\y_1\\\\z_1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor the first vector in \\(U_2\\), the second in \\(W\\). In fact, this is the statement made in example \u003ccode\u003e1.41\u003c/code\u003e.\u003c/p\u003e\n\u003ch3 id=\"creating-the-counterexample\"\u003eCreating the Counterexample\u003c/h3\u003e\n\u003cp\u003eFinally, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\left\\{\\begin{pmatrix}\nx_1 \\\\ y_1 \\\\ 2y_1\n\\end{pmatrix}: x_1,y_1 \\in \\mathbb{F}\\right\\} \\neq\\left\\{\\begin{pmatrix}\nx_1 \\\\ y_1 \\\\ 0\n\\end{pmatrix}: x_1,y_1 \\in \\mathbb{F}\\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(\\forall y_1 \\neq 0\\) in the first expression. Therefore, \\(U_1 \\neq U_2\\), finishing the counterexample. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_1_c_proof_preso/","tags":null,"title":"NUS-MATH530 1.C Problem 23"},{"categories":null,"contents":"Claim Proof or give a counter example for the statement that:\n\\begin{align} \\dim\\qty(U_1+U_2+U_3) = \u0026amp;\\dim U_1+\\dim U_2+\\dim U_3\\\\ \u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\ \u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3) \\end{align}\nCounterexample This statement is false.\nTake the following three subspaces of \\(\\mathbb{F}^{2}\\):\n\\begin{align} U_1 = \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}}\\\\ U_2 = \\qty{\\mqty(0 \\\\ b): b \\in \\mathbb{F}}\\\\ U_3 = \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}} \\end{align}\nsubspace check All \\(U_1\\), \\(U_2\\), \\(U_3\\) are in \\(\\mathbb{F}^{2}\\).\nzero Zero exists in all by setting free variables to \\(0\\)\naddition For \\(U_1\\) \u0026mdash;\n\\begin{equation} \\mqty(a_1 \\\\ 0) + \\mqty(a_2 \\\\ 0) = \\mqty(a_1+a_2 \\\\ 0) \\in \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}} \\end{equation}\nand, by the same token, addition is closed for \\(U_2\\).\nFor \\(U_3\\) \u0026mdash;\n\\begin{equation} \\mqty(c_1 \\\\ c_1) + \\mqty(c_2 \\\\ c_2) = \\mqty(c_1+c_2 \\\\ c_1+c_2) \\in \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}} \\end{equation}\nscalar multiplication For \\(U_1\\) \u0026mdash;\n\\begin{equation} \\lambda \\mqty(a \\\\ 0) = \\mqty(\\lambda a \\\\ 0) \\in \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}} \\end{equation}\nand, by the same token, scalar multiplication is closed for \\(U_2\\).\nFor \\(U_3\\) \u0026mdash;\n\\begin{equation} \\lambda \\mqty(c \\\\ c) = \\mqty(\\lambda c \\\\ \\lambda c) \\in \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}} \\end{equation}\nconstructing the counterexample Let us calculate the value of both sides of:\n\\begin{align} \\dim\\qty(U_1+U_2+U_3) = \u0026amp;\\dim U_1+\\dim U_2+\\dim U_3\\\\ \u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\ \u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3) \\end{align}\nRecall that:\n\\begin{align} U_1 = \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}}\\\\ U_2 = \\qty{\\mqty(0 \\\\ b): b \\in \\mathbb{F}}\\\\ U_3 = \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}} \\end{align}\nleft side Let\u0026rsquo;s first construct:\n\\begin{equation} U_1 + U_2 + U_3 \\end{equation}\nBy definition:\n\\begin{equation} U_1 + U_2 + U_3 = \\qty{u_1+u_2+u_3: u_j\\in U_j} \\end{equation}\nTherefore, taking a sample from each results as:\n\\begin{equation} u_1+u_2+u_3 = \\mqty(a \\\\ 0) + \\mqty(0 \\\\ b) + \\mqty(c \\\\c) = \\mqty(a+c \\\\ b +c) \\end{equation}\nThis creates two free variables for slots, meaning:\n\\begin{equation} U_1+U_2+U_3 = \\mathbb{F}^{2} \\end{equation}\nSo: \\(\\dim \\qty(U_1+U_2+U_3)=2\\)\nright side dimension of the subspaces\nLet us construct a basis for each of these spaces to figure their dimension.\nFor \\(U_1\\), \\(\\qty{\\mqty(1 \\\\ 0)}\\). We see that scaling the one vector in this basis will construct all vectors in \\(\\mathbb{F}^{2}\\) for which the second coordinate will be \\(0\\) \u0026mdash; spanning \\(U_1\\). Being a list with one non-zero vector, it is also linearly independent. So \\(\\dim U_1 = 1\\).\nBy almost the same token, for \\(U_2\\), \\(\\qty{\\mqty(0 \\\\ 1)}\\). This makes also \\(\\dim U_2=1\\).\nFor \\(U_3\\), we have \\(\\qty{\\mqty(1 \\\\ 1)}\\). Scaling this one vector will construct all vectors in \\(\\mathbb{F}^{2}\\) for which both coordinates are the same \u0026mdash; spanning \\(U_3\\). Being a list with one non-zero vector, it is also linearly independent. So \\(\\dim U_3 = 1\\).\nThis renders all three subspaces have dimension \\(1\\).\ndimension of the unions\nThese subspaces were picked because of a surprising convenience. Their unions are all the zero vector!\n\\begin{equation} U_1 \\cap U_2 = \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}} \\cap \\qty{\\mqty(0 \\\\ b): b \\in \\mathbb{F}} = \\qty{\\mqty(0 \\\\ 0)} \\end{equation}\nThis is because \\(a=0\\), \\(b=0\\) respectively in order to satisfy both generators.\nSimilarly\n\\begin{equation} U_1 \\cap U_3 = \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}} \\cap \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}} = \\qty{\\mqty(0 \\\\ 0)} \\end{equation}\nTo satisfy both generators, \\(a=c\\) for the top coordinate, \\(c=0\\) for the bottom coordinate, so \\(a=c=0\\).\nBy a similar token:\n\\begin{equation} U_2 \\cap U_3 = \\qty{\\mqty(0 \\\\ 0)} \\end{equation}\nWe established before that the span of \\(\\qty{}\\) (which is declared linearly independent) to be \\(\\qty{0}\\), so we see that the dimensions of all three required unions as \\(0\\) (as an empty list has length \\(0\\).)\nconstructing the expression for the right side\nWe have that:\n\\begin{equation} \\dim U_j = 1, j \\in \\qty{1,2,3} \\end{equation}\nAnd that:\n\\begin{equation} \\dim U_{j} \\cap U_{k} = 0 , j,k \\in \\{1,2,3\\} \\end{equation}\nfrom above.\nThis makes\u0026mdash;\n\\begin{align} \\dim \u0026amp;U_1+\\dim U_2+\\dim U_3\\\\ \u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\ \u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3)\\\\ =1\u0026amp;+1+1-0-0-0+0 \\\\ =3 \\end{align}\nshowing the counterexample We have now that:\n\\begin{equation} \\dim(U_1+U_2+U_3) = 2 \\end{equation}\nBut:\n\\begin{align} \\dim \u0026amp;U_1+\\dim U_2+\\dim U_3\\\\ \u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\ \u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3)\\\\ =3 \\end{align}\nYet \\(2 \\neq 3\\).\nSo:\n\\begin{align} \\dim(U_1+U_2+U_3) \\neq \\dim \u0026amp;U_1+\\dim U_2+\\dim U_3\\\\ \u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\ \u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3)\\\\ \\end{align}\nFinishing the counter example. \\(\\blacksquare\\)\n","html":"\u003ch2 id=\"claim\"\u003eClaim\u003c/h2\u003e\n\u003cp\u003eProof or give a counter example for the statement that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dim\\qty(U_1+U_2+U_3) = \u0026amp;\\dim U_1+\\dim U_2+\\dim U_3\\\\\n\u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\\n\u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3)\n\\end{align}\u003c/p\u003e\n\u003ch2 id=\"counterexample\"\u003eCounterexample\u003c/h2\u003e\n\u003cp\u003eThis statement is false.\u003c/p\u003e\n\u003cp\u003eTake the following three subspaces of \\(\\mathbb{F}^{2}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nU_1 = \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}}\\\\\nU_2 = \\qty{\\mqty(0 \\\\ b): b \\in \\mathbb{F}}\\\\\nU_3 = \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}}\n\\end{align}\u003c/p\u003e\n\u003ch3 id=\"subspace-check\"\u003esubspace check\u003c/h3\u003e\n\u003cp\u003eAll \\(U_1\\), \\(U_2\\), \\(U_3\\) are in \\(\\mathbb{F}^{2}\\).\u003c/p\u003e\n\u003ch4 id=\"zero\"\u003ezero\u003c/h4\u003e\n\u003cp\u003eZero exists in all by setting free variables to \\(0\\)\u003c/p\u003e\n\u003ch4 id=\"addition\"\u003eaddition\u003c/h4\u003e\n\u003cp\u003eFor \\(U_1\\) \u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(a_1 \\\\ 0) + \\mqty(a_2 \\\\ 0) = \\mqty(a_1+a_2 \\\\ 0) \\in \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand, by the same token, addition is closed for \\(U_2\\).\u003c/p\u003e\n\u003cp\u003eFor \\(U_3\\) \u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(c_1 \\\\ c_1) + \\mqty(c_2 \\\\ c_2) = \\mqty(c_1+c_2 \\\\ c_1+c_2) \\in \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}}\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"scalar-multiplication\"\u003escalar multiplication\u003c/h4\u003e\n\u003cp\u003eFor \\(U_1\\) \u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda \\mqty(a \\\\ 0) = \\mqty(\\lambda a \\\\ 0) \\in \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand, by the same token, scalar multiplication is closed for \\(U_2\\).\u003c/p\u003e\n\u003cp\u003eFor \\(U_3\\) \u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda \\mqty(c \\\\ c) = \\mqty(\\lambda c \\\\ \\lambda c) \\in \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"constructing-the-counterexample\"\u003econstructing the counterexample\u003c/h3\u003e\n\u003cp\u003eLet us calculate the value of both sides of:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dim\\qty(U_1+U_2+U_3) = \u0026amp;\\dim U_1+\\dim U_2+\\dim U_3\\\\\n\u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\\n\u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nU_1 = \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}}\\\\\nU_2 = \\qty{\\mqty(0 \\\\ b): b \\in \\mathbb{F}}\\\\\nU_3 = \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}}\n\\end{align}\u003c/p\u003e\n\u003ch4 id=\"left-side\"\u003eleft side\u003c/h4\u003e\n\u003cp\u003eLet\u0026rsquo;s first construct:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1 + U_2 + U_3\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy definition:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1 + U_2 + U_3 = \\qty{u_1+u_2+u_3: u_j\\in U_j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, taking a sample from each results as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu_1+u_2+u_3 = \\mqty(a \\\\ 0) + \\mqty(0 \\\\ b) + \\mqty(c \\\\c) = \\mqty(a+c \\\\ b +c)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis creates two free variables for slots, meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1+U_2+U_3 = \\mathbb{F}^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo: \\(\\dim \\qty(U_1+U_2+U_3)=2\\)\u003c/p\u003e\n\u003ch4 id=\"right-side\"\u003eright side\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003edimension of the subspaces\u003c/p\u003e\n\u003cp\u003eLet us construct a basis for each of these spaces to figure their dimension.\u003c/p\u003e\n\u003cp\u003eFor \\(U_1\\), \\(\\qty{\\mqty(1 \\\\ 0)}\\). We see that scaling the one vector in this basis will construct all vectors in \\(\\mathbb{F}^{2}\\) for which the second coordinate will be \\(0\\) \u0026mdash; spanning \\(U_1\\). Being a list with one non-zero vector, it is also linearly independent. So \\(\\dim U_1 = 1\\).\u003c/p\u003e\n\u003cp\u003eBy almost the same token, for \\(U_2\\), \\(\\qty{\\mqty(0 \\\\ 1)}\\). This makes also \\(\\dim U_2=1\\).\u003c/p\u003e\n\u003cp\u003eFor \\(U_3\\), we have \\(\\qty{\\mqty(1 \\\\ 1)}\\). Scaling this one vector will construct all vectors in \\(\\mathbb{F}^{2}\\) for which both coordinates are the same \u0026mdash; spanning \\(U_3\\). Being a list with one non-zero vector, it is also linearly independent. So \\(\\dim U_3 = 1\\).\u003c/p\u003e\n\u003cp\u003eThis renders all three subspaces have dimension \\(1\\).\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003edimension of the unions\u003c/p\u003e\n\u003cp\u003eThese subspaces were picked because of a surprising convenience. Their unions are all the zero vector!\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1 \\cap U_2 = \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}} \\cap \\qty{\\mqty(0 \\\\ b): b \\in \\mathbb{F}} = \\qty{\\mqty(0 \\\\ 0)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is because \\(a=0\\), \\(b=0\\) respectively in order to satisfy both generators.\u003c/p\u003e\n\u003cp\u003eSimilarly\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1 \\cap U_3 = \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}} \\cap \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}} = \\qty{\\mqty(0 \\\\ 0)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo satisfy both generators, \\(a=c\\) for the top coordinate, \\(c=0\\) for the bottom coordinate, so \\(a=c=0\\).\u003c/p\u003e\n\u003cp\u003eBy a similar token:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_2 \\cap U_3 = \\qty{\\mqty(0 \\\\ 0)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe established before that the span of \\(\\qty{}\\) (which is declared linearly independent) to be \\(\\qty{0}\\), so we see that the dimensions of all three required unions as \\(0\\) (as an empty list has length \\(0\\).)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003econstructing the expression for the right side\u003c/p\u003e\n\u003cp\u003eWe have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim U_j = 1, j \\in \\qty{1,2,3}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim U_{j} \\cap U_{k} = 0 , j,k \\in \\{1,2,3\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efrom above.\u003c/p\u003e\n\u003cp\u003eThis makes\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dim \u0026amp;U_1+\\dim U_2+\\dim U_3\\\\\n\u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\\n\u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3)\\\\\n=1\u0026amp;+1+1-0-0-0+0 \\\\\n=3\n\\end{align}\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"showing-the-counterexample\"\u003eshowing the counterexample\u003c/h3\u003e\n\u003cp\u003eWe have now that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim(U_1+U_2+U_3) = 2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBut:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dim \u0026amp;U_1+\\dim U_2+\\dim U_3\\\\\n\u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\\n\u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3)\\\\\n=3\n\\end{align}\u003c/p\u003e\n\u003cp\u003eYet \\(2 \\neq 3\\).\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dim(U_1+U_2+U_3) \\neq \\dim \u0026amp;U_1+\\dim U_2+\\dim U_3\\\\\n\u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\\n\u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3)\\\\\n\\end{align}\u003c/p\u003e\n\u003cp\u003eFinishing the counter example. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_2_c_problem_17/","tags":null,"title":"NUS-MATH530 2.C Problem 17"},{"categories":null,"contents":"Statement Support \\(W\\) is finite-dimensional, and \\(T \\in \\mathcal{L}(V,W)\\). Prove that \\(T\\) is injective IFF \\(\\exists S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\).\nProof Given injectivity Given an injective \\(T \\in \\mathcal{L}(V,W)\\), we desire that \\(\\exists S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\).\nWe begin with some statements.\nRecall that, a linear map called injective when \\(Tv=Tu \\implies v=u\\) Recall also that the \u0026ldquo;identity map\u0026rdquo; on \\(V\\) is a map \\(I \\in \\mathcal{L}(V,V)\\) such that \\(Iv = v, \\forall v \\in V\\) Motivating \\(S\\) We show that we can indeed create a function \\(S\\) by the injectivity of \\(T\\). Recall a function is a map has the property that \\(v=u \\implies Fv=Fu\\).\nWLOG consider two vectors \\(a,b \\in V\\).\nCreating \\(S\\) Define a function \\(S:W\\to V\\) in the following manner:\n\\begin{equation} S(v) = a \\mid Ta = v \\end{equation}\nDemonstrating that \\(S\\) is a function\nSo, given \\(v, u \\in W\\) and \\(v=u\\), we have:\n\\(Sv = a \\mid Ta=v\\) \\(Su = b \\mid Tb=u\\) If \\(Sv=Su\\), then \\(a=b\\). To demonstrate that \\(S\\) is a function, we now desire that \\(a=b\\).\nFrom the above, we have that \\(Ta=v\\), \\(Tb=u\\). From prior, we have \\(v=u\\) From the two statements above, we have \\(v=u \\implies Ta=Tb\\) Lastly, from the injectivity of \\(T\\), we have that \\(Ta=Tb \\implies a=b\\) Hence demonstrating\n","html":"\u003ch2 id=\"statement\"\u003eStatement\u003c/h2\u003e\n\u003cp\u003eSupport \\(W\\) is finite-dimensional, and \\(T \\in \\mathcal{L}(V,W)\\). Prove that \\(T\\) is injective IFF \\(\\exists S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\).\u003c/p\u003e\n\u003ch2 id=\"proof\"\u003eProof\u003c/h2\u003e\n\u003ch3 id=\"given-injectivity\"\u003eGiven injectivity\u003c/h3\u003e\n\u003cp\u003eGiven an injective \\(T \\in \\mathcal{L}(V,W)\\), we desire that \\(\\exists S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\).\u003c/p\u003e\n\u003cp\u003eWe begin with some statements.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eRecall that, a linear map called injective when \\(Tv=Tu \\implies v=u\\)\u003c/li\u003e\n\u003cli\u003eRecall also that the \u0026ldquo;identity map\u0026rdquo; on \\(V\\) is a map \\(I \\in \\mathcal{L}(V,V)\\) such that \\(Iv = v, \\forall v \\in V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"motivating-s\"\u003eMotivating \\(S\\)\u003c/h4\u003e\n\u003cp\u003eWe show that we can indeed create a function \\(S\\) by the injectivity of \\(T\\). Recall a function is a map has the property that \\(v=u \\implies Fv=Fu\\).\u003c/p\u003e\n\u003cp\u003eWLOG consider two vectors \\(a,b \\in V\\).\u003c/p\u003e\n\u003ch4 id=\"creating-s\"\u003eCreating \\(S\\)\u003c/h4\u003e\n\u003cp\u003eDefine a function \\(S:W\\to V\\) in the following manner:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS(v) = a \\mid Ta = v\n\\end{equation}\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eDemonstrating that \\(S\\) is a function\u003c/p\u003e\n\u003cp\u003eSo, given \\(v, u \\in W\\) and \\(v=u\\), we have:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(Sv = a \\mid Ta=v\\)\u003c/li\u003e\n\u003cli\u003e\\(Su = b \\mid Tb=u\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf \\(Sv=Su\\), then \\(a=b\\). To demonstrate that \\(S\\) is a function, we now desire that \\(a=b\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eFrom the above, we have that \\(Ta=v\\), \\(Tb=u\\).\u003c/li\u003e\n\u003cli\u003eFrom prior, we have \\(v=u\\)\u003c/li\u003e\n\u003cli\u003eFrom the two statements above, we have \\(v=u \\implies Ta=Tb\\)\u003c/li\u003e\n\u003cli\u003eLastly, from the injectivity of \\(T\\), we have that \\(Ta=Tb \\implies a=b\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eHence demonstrating\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_3_b_problem_20-1/","tags":null,"title":"NUS-MATH530 3.B Problem 20"},{"categories":null,"contents":"Statement Support \\(W\\) is finite-dimensional, and \\(T \\in \\mathcal{L}(V,W)\\). Prove that \\(T\\) is injective IFF \\(\\exists S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\).\nProof Given injectivity Given an injective \\(T \\in \\mathcal{L}(V,W)\\), we desire that \\(\\exists S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\).\nCreating \\(S\\) Define a relation \\(S:range\\ T\\to V\\) in the following manner:\n\\begin{equation} S(v) = a \\mid Ta = v \\end{equation}\nDemonstrating that \\(S\\) is a function We show that there are no two possible choices for \\(a\\), and therefore that \\(S\\) is a function, by the injectivity of \\(T\\). Recall a function is a map has the property that \\(v=u \\implies Fv=Fu\\).\nSo, given \\(v, u \\in W\\) and \\(v=u\\), we have:\n\\(Sv = a \\mid Ta=v\\) \\(Su = b \\mid Tb=u\\) If \\(Sv=Su\\), then \\(a=b\\). To demonstrate that \\(S\\) is a function, we now desire that \\(a=b\\).\nFrom the above, we have that \\(Ta=v\\), \\(Tb=u\\). From prior, we have \\(v=u\\) From the two statements above, we have \\(v=u \\implies Ta=Tb\\) Recall now that a linear map called injective when \\(Tv=Tu \\implies v=u\\).\nTherefore, from the injectivity of \\(T\\), we have that \\(Ta=Tb \\implies a=b\\). Hence demonstrating the desired quality that shows \\(S\\) as a function.\nDemonstrating that \\(S\\) is a linear map The linearity \\(S\\) actually simply inherits the linearity of \\(T\\), which is defined to be a linear map.\nAdditivity:\n\\begin{align} Sv+Su \u0026amp;= (a \\mid Ta =v) + (b \\mid Tb =u) \\\\ \u0026amp;= (a+b) \\mid Ta+Tb = (v+u) \\\\ \u0026amp;= (a+b) \\mid T(a+b) = (v+u) \\\\ \u0026amp;= x \\mid Tx = (v+u) \\\\ \u0026amp;= S(v+u) \\end{align}\nHomogenity is shown in a similar fashion. We can therefore conclude that \\(S \\in \\mathcal{L}(range\\ T, V)\\).\nNote on the codomain of \\(S\\) Note that we desire \\(S \\in \\mathcal{L}(W,V),\\ i.e.\\ S:W\\to V\\), And yet, as it stands, \\(S: range\\ T \\to W\\). Fortunately, as \\(range\\ T\\) is a subspace of \\(W\\) (as ranges are subspaces of the codomain), we can leverage Axler 3.A-E11 (Sasha\u0026rsquo;s Proof, \u0026ldquo;maps to subspaces can be extended to the whole space\u0026rdquo;) to arbitrary extend \\(S\\) to \\(S:W\\to V\\).\nIt turns out that where the \u0026ldquo;extended\u0026rdquo; basis vectors gets mapped doesn\u0026rsquo;t matter. We only care about \\(S\\) insofar as its compositional behavior with \\(T\\).\nDemonstrating that \\(S\\) has the properties we desire We desire that \\(ST = I \\in \\mathcal{L}(V,V)\\).\nRecall that the \u0026ldquo;identity map\u0026rdquo; on \\(V\\) is a map \\(I \\in \\mathcal{L}(V,V)\\) such that \\(Iv = v, \\forall v \\in V\\). We now show that \\(ST\\) acts like the identity map.\nWLOG take \\(v \\in V\\).\nLet \\(Tv=a\\). Let \\(Sa = u\\). Based on the definition of \\(S\\) (that \\(Sx = y \\mid Ty=x\\), \u0026ldquo;\\(S\\) is the inverse map\u0026rdquo;), we have that \\(Tu=a\\). Recall once again that a linear map called injective when \\(Tv=Tu \\implies v=u\\).\nWe now have that \\(Tu=a=Tv\\), therefore, because \\(T\\) is given injective, \\(u=v\\).\nWe have show WLOG that \\((ST)v = S(Tv) =Sa = u=v\\). Therefore \\((ST)v=v\\), making \\(ST\\) an identity map \\(ST:V\\to V\\). Lastly, as the product of linear maps are themselves a linear map, \\(ST=I\\in \\mathcal{L}(V,V)\\)\nConclusion Having constructed the existence of \\(S\\) based on the required properties of \\(T\\), we show that given an injective \\(T \\in \\mathcal{L}(V,W)\\), have an \\(S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\), as desired.\nGiven \\(S\\) Given some \\(T \\in \\mathcal{L}(V,W)\\) and that \\(\\exists S \\in \\mathcal{L}(W,V): ST=I \\in \\mathcal{L}(V,V)\\), we desire that \\(T\\) is injective. Fortunately, we essentially just reverse the logic of the last section in the last part of the proof.\nRecall that a linear map called injective when \\(Tv=Tu \\implies v=u\\). Suppose for the sake of contradiction that \\(\\exists u,v: Tv=Tu\\) but \\(u\\neq v\\).\nLet \\(Tv=Tu=a\\) Let \\(Sa=b\\) Therefore: \\((ST)v=(ST)u=S(a)=b\\). Just to reiterate, this means that we have:\n\\((ST)v=b\\implies Iv=b\\) \\((ST)u=b \\implies Iu=b\\) Therefore, we have that \\(Iv=Iu\\) for distinct \\(v,u\\), which is absurd. Having reached contradiction, we have that \\(Tu=Tv\\implies u=v\\), reaching the definition of injectivity for \\(T\\). \\(\\blacksquare\\)\n","html":"\u003ch2 id=\"statement\"\u003eStatement\u003c/h2\u003e\n\u003cp\u003eSupport \\(W\\) is finite-dimensional, and \\(T \\in \\mathcal{L}(V,W)\\). Prove that \\(T\\) is injective IFF \\(\\exists S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\).\u003c/p\u003e\n\u003ch2 id=\"proof\"\u003eProof\u003c/h2\u003e\n\u003ch3 id=\"given-injectivity\"\u003eGiven injectivity\u003c/h3\u003e\n\u003cp\u003eGiven an injective \\(T \\in \\mathcal{L}(V,W)\\), we desire that \\(\\exists S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\).\u003c/p\u003e\n\u003ch4 id=\"creating-s\"\u003eCreating \\(S\\)\u003c/h4\u003e\n\u003cp\u003eDefine a relation \\(S:range\\ T\\to V\\) in the following manner:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS(v) = a \\mid Ta = v\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"demonstrating-that-s-is-a-function\"\u003eDemonstrating that \\(S\\) is a function\u003c/h4\u003e\n\u003cp\u003eWe show that there are no two possible choices for \\(a\\), and therefore that \\(S\\) is a function, by the injectivity of \\(T\\). Recall a function is a map has the property that \\(v=u \\implies Fv=Fu\\).\u003c/p\u003e\n\u003cp\u003eSo, given \\(v, u \\in W\\) and \\(v=u\\), we have:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(Sv = a \\mid Ta=v\\)\u003c/li\u003e\n\u003cli\u003e\\(Su = b \\mid Tb=u\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf \\(Sv=Su\\), then \\(a=b\\). To demonstrate that \\(S\\) is a function, we now desire that \\(a=b\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eFrom the above, we have that \\(Ta=v\\), \\(Tb=u\\).\u003c/li\u003e\n\u003cli\u003eFrom prior, we have \\(v=u\\)\u003c/li\u003e\n\u003cli\u003eFrom the two statements above, we have \\(v=u \\implies Ta=Tb\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eRecall now that a linear map called injective when \\(Tv=Tu \\implies v=u\\).\u003c/p\u003e\n\u003cp\u003eTherefore, from the injectivity of \\(T\\), we have that \\(Ta=Tb \\implies a=b\\). Hence demonstrating the desired quality that shows \\(S\\) as a function.\u003c/p\u003e\n\u003ch4 id=\"demonstrating-that-s-is-a-linear-map\"\u003eDemonstrating that \\(S\\) is a linear map\u003c/h4\u003e\n\u003cp\u003eThe linearity \\(S\\) actually simply inherits the linearity of \\(T\\), which is defined to be a linear map.\u003c/p\u003e\n\u003cp\u003eAdditivity:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nSv+Su \u0026amp;= (a \\mid Ta =v) + (b \\mid Tb =u) \\\\\n\u0026amp;= (a+b) \\mid Ta+Tb = (v+u) \\\\\n\u0026amp;= (a+b) \\mid T(a+b) = (v+u) \\\\\n\u0026amp;= x \\mid Tx = (v+u) \\\\\n\u0026amp;= S(v+u)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eHomogenity is shown in a similar fashion. We can therefore conclude that \\(S \\in \\mathcal{L}(range\\ T, V)\\).\u003c/p\u003e\n\u003ch4 id=\"note-on-the-codomain-of-s\"\u003eNote on the codomain of \\(S\\)\u003c/h4\u003e\n\u003cp\u003eNote that we desire \\(S \\in \\mathcal{L}(W,V),\\ i.e.\\ S:W\\to V\\), And yet, as it stands, \\(S: range\\ T \\to W\\). Fortunately, as \\(range\\ T\\) is a subspace of \\(W\\) (as ranges are subspaces of the codomain), we can leverage Axler 3.A-E11 (Sasha\u0026rsquo;s Proof, \u0026ldquo;maps to subspaces can be extended to the whole space\u0026rdquo;) to arbitrary extend \\(S\\) to \\(S:W\\to V\\).\u003c/p\u003e\n\u003cp\u003eIt turns out that where the \u0026ldquo;extended\u0026rdquo; basis vectors gets mapped doesn\u0026rsquo;t matter. We only care about \\(S\\) insofar as its compositional behavior with \\(T\\).\u003c/p\u003e\n\u003ch4 id=\"demonstrating-that-s-has-the-properties-we-desire\"\u003eDemonstrating that \\(S\\) has the properties we desire\u003c/h4\u003e\n\u003cp\u003eWe desire that \\(ST = I \\in \\mathcal{L}(V,V)\\).\u003c/p\u003e\n\u003cp\u003eRecall that the \u0026ldquo;identity map\u0026rdquo; on \\(V\\) is a map \\(I \\in \\mathcal{L}(V,V)\\) such that \\(Iv = v, \\forall v \\in V\\). We now show that \\(ST\\) acts like the identity map.\u003c/p\u003e\n\u003cp\u003eWLOG take \\(v \\in V\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eLet \\(Tv=a\\).\u003c/li\u003e\n\u003cli\u003eLet \\(Sa = u\\). Based on the definition of \\(S\\) (that \\(Sx = y \\mid Ty=x\\), \u0026ldquo;\\(S\\) is the inverse map\u0026rdquo;), we have that \\(Tu=a\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eRecall once again that a linear map called injective when \\(Tv=Tu \\implies v=u\\).\u003c/p\u003e\n\u003cp\u003eWe now have that \\(Tu=a=Tv\\), therefore, because \\(T\\) is given injective, \\(u=v\\).\u003c/p\u003e\n\u003cp\u003eWe have show WLOG that \\((ST)v = S(Tv) =Sa = u=v\\). Therefore \\((ST)v=v\\), making \\(ST\\) an identity map \\(ST:V\\to V\\). Lastly, as the product of linear maps are themselves a linear map, \\(ST=I\\in \\mathcal{L}(V,V)\\)\u003c/p\u003e\n\u003ch4 id=\"conclusion\"\u003eConclusion\u003c/h4\u003e\n\u003cp\u003eHaving constructed the existence of \\(S\\) based on the required properties of \\(T\\), we show that given an injective \\(T \\in \\mathcal{L}(V,W)\\), have an \\(S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\), as desired.\u003c/p\u003e\n\u003ch3 id=\"given-s\"\u003eGiven \\(S\\)\u003c/h3\u003e\n\u003cp\u003eGiven some \\(T \\in \\mathcal{L}(V,W)\\) and that \\(\\exists S \\in \\mathcal{L}(W,V): ST=I \\in \\mathcal{L}(V,V)\\), we desire that \\(T\\) is injective. Fortunately, we essentially just reverse the logic of the last section in the last part of the proof.\u003c/p\u003e\n\u003cp\u003eRecall that a linear map called injective when \\(Tv=Tu \\implies v=u\\). Suppose for the sake of contradiction that \\(\\exists u,v: Tv=Tu\\) but \\(u\\neq v\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eLet \\(Tv=Tu=a\\)\u003c/li\u003e\n\u003cli\u003eLet \\(Sa=b\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTherefore: \\((ST)v=(ST)u=S(a)=b\\). Just to reiterate, this means that we have:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\((ST)v=b\\implies Iv=b\\)\u003c/li\u003e\n\u003cli\u003e\\((ST)u=b \\implies Iu=b\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTherefore, we have that \\(Iv=Iu\\) for distinct \\(v,u\\), which is absurd. Having reached contradiction, we have that \\(Tu=Tv\\implies u=v\\), reaching the definition of injectivity for \\(T\\). \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_3_b_problem_20/","tags":null,"title":"NUS-MATH530 3.B Problem 20"},{"categories":null,"contents":"Chapter 4 discussion with Lachlan 4.2 False.\nThe union between \\(\\{0\\} \\cup \\{p \\in \\mathcal{P}(\\mathbb{F}): deg\\ p = m\\}\\) is not closed under addition. You can add two \\(m\\) degree polynomials and get something that\u0026rsquo;s not \\(m\\) degrees:\n\\begin{equation} (z^{m} + 1) - z^{m} = 1 \\end{equation}\n4.3 False.\nThe union between \\(\\{0\\} \\cup \\{p \\in \\mathcal{P}(\\mathbb{F}): deg\\ p\\ even\\}\\) is not closed also under addition, for the same reason:\n\\begin{equation} (z^{m} + z^{m-1} + 1) - (z^{m} + 1) = z^{m-1} \\end{equation}\nOne Chapter 5 Exercise 5.A.5 Suppose \\(T \\in \\mathcal{L}(V)\\), prove that the intersection of every collection of \\(V\\) that is invariant under \\(T\\) is invariant under \\(T\\)\nLet \\(U_1 \\dots U_{n}\\) be invariant subspaces under \\(T\\).\nThat is:\n\\begin{equation} T u_{j} \\in U_{j} \\end{equation}\nWe desire that:\n\\begin{align} Tu \\in \\bigcap U_{j}\\ |\\ u \\in \\bigcap U_{j} \\end{align}\nWLOG, treat \\(u \\in \\bigcap U_{j}\\) as \\(u \\in U_{j}\\). Now, \\(Tu \\in U_{j}\\). This holds \\(\\forall U_{j}\\). Therefore, \\(Tu \\in \\forall U_{j}\\). So \\(Tu \\in \\bigcap U_{j}\\).\nHence, the intersection of invariant subspaces are invariant as well.\n","html":"\u003ch2 id=\"chapter-4-discussion-with-lachlan\"\u003eChapter 4 discussion with Lachlan\u003c/h2\u003e\n\u003ch3 id=\"4-dot-2\"\u003e4.2\u003c/h3\u003e\n\u003cp\u003eFalse.\u003c/p\u003e\n\u003cp\u003eThe union between \\(\\{0\\} \\cup \\{p \\in \\mathcal{P}(\\mathbb{F}): deg\\ p = m\\}\\) is not closed under addition. You can add two \\(m\\) degree polynomials and get something that\u0026rsquo;s not \\(m\\) degrees:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(z^{m} + 1) - z^{m} = 1\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"4-dot-3\"\u003e4.3\u003c/h3\u003e\n\u003cp\u003eFalse.\u003c/p\u003e\n\u003cp\u003eThe union between \\(\\{0\\} \\cup \\{p \\in \\mathcal{P}(\\mathbb{F}): deg\\ p\\ even\\}\\) is not closed also under addition, for the same reason:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(z^{m} + z^{m-1} + 1) - (z^{m} + 1) = z^{m-1}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"one-chapter-5-exercise\"\u003eOne Chapter 5 Exercise\u003c/h2\u003e\n\u003ch3 id=\"5-dot-a-dot-5\"\u003e5.A.5\u003c/h3\u003e\n\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V)\\), prove that the intersection of every collection of \\(V\\) that is invariant under \\(T\\) is invariant under \\(T\\)\u003c/p\u003e\n\u003cp\u003eLet \\(U_1 \\dots U_{n}\\) be invariant subspaces under \\(T\\).\u003c/p\u003e\n\u003cp\u003eThat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT u_{j} \\in U_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe desire that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nTu \\in \\bigcap U_{j}\\ |\\ u \\in \\bigcap U_{j}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eWLOG, treat \\(u \\in \\bigcap U_{j}\\) as \\(u \\in U_{j}\\). Now, \\(Tu \\in U_{j}\\). This holds \\(\\forall U_{j}\\). Therefore, \\(Tu \\in \\forall U_{j}\\). So \\(Tu \\in \\bigcap U_{j}\\).\u003c/p\u003e\n\u003cp\u003eHence, the intersection of invariant subspaces are invariant as well.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_5_a_and_discussion/","tags":null,"title":"NUS-MATH530 5.A and Discussion"},{"categories":null,"contents":"Suppose \\(V = U \\oplus W\\), where \\(U\\) and \\(W\\) are nonzero subspaces of \\(V\\). Define \\(P \\in \\mathcal{L}(V)\\) by \\(P(u+w) = u\\) for \\(u \\in U\\), \\(w \\in W\\). Find all eigenvalues and eigenvectors of \\(P\\).\nSolutions:\n\\(\\lambda = 1\\), \\(v = u \\in U\\) \\(\\lambda = 0\\), \\(v = w \\in W\\) For \\(\\lambda\\) to be an eigenvalue of \\(P\\), we have to have:\n\\begin{equation} Pv = \\lambda v \\end{equation}\nMeaning, for WLOG \\(v = u+w\\):\n\\begin{align} \u0026amp;Pv = \\lambda v \\\\ \\Rightarrow\\ \u0026amp; P(u+w) = \\lambda (u+w) \\\\ \\Rightarrow\\ \u0026amp; u = \\lambda u + \\lambda w \\end{align}\nNow, let\u0026rsquo;s rewrite this expression to equal to \\(0\\) to take advantage of the fact that \\(V = U \\oplus W\\).\n\\begin{align} \u0026amp;u = \\lambda u + \\lambda w \\\\ \\Rightarrow\\ \u0026amp; 0 = (\\lambda -1) u + \\lambda w \\end{align}\nNow, recall that a sum of subsets in a direct sum if and only if the only way to write \\(0\\) is for each of the elements of the sums to be \\(0\\). In this case, it means that:\n\\begin{equation} \\begin{cases} (\\lambda -1) u = 0 \\\\ \\lambda w = 0 \\end{cases} \\end{equation}\nWe have two cases here: either \\(w=0\\) or \\(u=0\\).\nAside: why can\u0026rsquo;t \\(u = w = 0\\)? Suppose for the sake of contradiction let\u0026rsquo;s take \\(u=0, w=0\\). Then, \\(Pv = \\lambda v\\), so \\(v=u+w\\), and so \\(v=0\\). This would make \\(v\\) no longer an eigenvector, by definition of eigenvector; this also makes \\(\\lambda\\) no longer an eigenvalue. Hence, one of \\(u\\) or \\(w\\) is not \\(0\\).\n\\(w=0\\) We have that \\(w=0\\). Replacing that in the above expression, we have that:\n\\begin{align} \u0026amp;Pv = \\lambda v \\\\ \\Rightarrow\\ \u0026amp; P(u+w) = \\lambda u + \\lambda w \\\\ \\Rightarrow\\ \u0026amp; u = \\lambda u + 0 \\\\ \\Rightarrow\\ \u0026amp; u = \\lambda u \\end{align}\nFrom this expression, or the top one from before \\((\\lambda -1 ) u = 0\\), we have that \\(\\lambda = 1\\).\nFinally, then, we have:\n\\begin{align} Pv \u0026amp;= \\lambda v \\\\ \u0026amp;= v \\end{align}\nAny valid solution for \\(v\\) is an eigenvector.\nSo:\n\\begin{align} \u0026amp;Pv = v \\\\ \\Rightarrow\\ \u0026amp; P(u+w) = v \\\\ \\Rightarrow\\ \u0026amp; u = v \\end{align}\nHence, all \\(u \\in U\\) is an eigenvector of \\(P\\) with eigenvalue \\(1\\).\n\\(u=0\\) We now have that \\(u=0\\). So, we have that:\n\\begin{align} \u0026amp;Pv = \\lambda v \\\\ \\Rightarrow\\ \u0026amp; P(u+w) = \\lambda u + \\lambda w \\\\ \\Rightarrow\\ \u0026amp; u = \\lambda u + \\lambda w \\\\ \\Rightarrow\\ \u0026amp; 0 = \\lambda w \\end{align}\nFrom this expression, or the bottom one from \\(\\lambda w = 0\\), we have that \\(\\lambda = 0\\).\nFinally, then, we have:\n\\begin{align} Pv \u0026amp;= \\lambda v \\\\ \u0026amp;= 0 \\end{align}\nAny valid solution for \\(v\\) is an eigenvector.\nSo:\n\\begin{align} \u0026amp;Pv = 0 \\\\ \\Rightarrow\\ \u0026amp; P(u+w) = 0 \\\\ \\Rightarrow\\ \u0026amp; u = 0 \\end{align}\nRecall now that \\(v = u+w\\), so \\(v = 0 +w\\), making \\(v = w\\).\nHence, all \\(w \\in W\\) is an eigenvector of \\(P\\) with eigenvalue \\(0\\).\n","html":"\u003cp\u003eSuppose \\(V = U \\oplus W\\), where \\(U\\) and \\(W\\) are nonzero subspaces of \\(V\\). Define \\(P \\in \\mathcal{L}(V)\\) by \\(P(u+w) = u\\) for \\(u \\in U\\), \\(w \\in W\\). Find all eigenvalues and eigenvectors of \\(P\\).\u003c/p\u003e\n\u003cp\u003eSolutions:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\lambda = 1\\), \\(v = u \\in U\\)\u003c/li\u003e\n\u003cli\u003e\\(\\lambda = 0\\), \\(v = w \\in W\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eFor \\(\\lambda\\) to be an eigenvalue of \\(P\\), we have to have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nPv = \\lambda v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning, for WLOG \\(v = u+w\\):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;Pv = \\lambda v \\\\\n\\Rightarrow\\ \u0026amp; P(u+w) = \\lambda (u+w) \\\\\n\\Rightarrow\\ \u0026amp; u = \\lambda u + \\lambda w\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, let\u0026rsquo;s rewrite this expression to equal to \\(0\\) to take advantage of the fact that \\(V = U \\oplus W\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;u = \\lambda u + \\lambda w \\\\\n\\Rightarrow\\ \u0026amp; 0 = (\\lambda -1) u + \\lambda w\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, recall that a sum of subsets in a direct sum if and only if the only way to write \\(0\\) is for each of the elements of the sums to be \\(0\\). In this case, it means that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n(\\lambda -1) u = 0 \\\\\n\\lambda w = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have two cases here: either \\(w=0\\) or \\(u=0\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside: why can\u0026rsquo;t \\(u = w = 0\\)? Suppose for the sake of contradiction let\u0026rsquo;s take \\(u=0, w=0\\). Then, \\(Pv = \\lambda v\\), so \\(v=u+w\\), and so \\(v=0\\). This would make \\(v\\) no longer an eigenvector, by definition of eigenvector; this also makes \\(\\lambda\\) no longer an eigenvalue. Hence, one of \\(u\\) or \\(w\\) is not \\(0\\).\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"w-0\"\u003e\\(w=0\\)\u003c/h2\u003e\n\u003cp\u003eWe have that \\(w=0\\). Replacing that in the above expression, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;Pv = \\lambda v \\\\\n\\Rightarrow\\ \u0026amp; P(u+w) = \\lambda u + \\lambda w \\\\\n\\Rightarrow\\ \u0026amp; u = \\lambda u + 0 \\\\\n\\Rightarrow\\ \u0026amp; u = \\lambda u\n\\end{align}\u003c/p\u003e\n\u003cp\u003eFrom this expression, or the top one from before \\((\\lambda -1 ) u = 0\\), we have that \\(\\lambda = 1\\).\u003c/p\u003e\n\u003cp\u003eFinally, then, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nPv \u0026amp;= \\lambda v \\\\\n\u0026amp;= v\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAny valid solution for \\(v\\) is an eigenvector.\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;Pv = v \\\\\n\\Rightarrow\\ \u0026amp; P(u+w) = v \\\\\n\\Rightarrow\\ \u0026amp; u = v\n\\end{align}\u003c/p\u003e\n\u003cp\u003eHence, all \\(u \\in U\\) is an eigenvector of \\(P\\) with eigenvalue \\(1\\).\u003c/p\u003e\n\u003ch2 id=\"u-0\"\u003e\\(u=0\\)\u003c/h2\u003e\n\u003cp\u003eWe now have that \\(u=0\\). So, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;Pv = \\lambda v \\\\\n\\Rightarrow\\ \u0026amp; P(u+w) = \\lambda u + \\lambda w \\\\\n\\Rightarrow\\ \u0026amp; u = \\lambda u + \\lambda w \\\\\n\\Rightarrow\\ \u0026amp; 0 = \\lambda w\n\\end{align}\u003c/p\u003e\n\u003cp\u003eFrom this expression, or the bottom one from \\(\\lambda w = 0\\), we have that \\(\\lambda = 0\\).\u003c/p\u003e\n\u003cp\u003eFinally, then, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nPv \u0026amp;= \\lambda v \\\\\n\u0026amp;= 0\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAny valid solution for \\(v\\) is an eigenvector.\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;Pv = 0 \\\\\n\\Rightarrow\\ \u0026amp; P(u+w) = 0 \\\\\n\\Rightarrow\\ \u0026amp; u = 0\n\\end{align}\u003c/p\u003e\n\u003cp\u003eRecall now that \\(v = u+w\\), so \\(v = 0 +w\\), making \\(v = w\\).\u003c/p\u003e\n\u003cp\u003eHence, all \\(w \\in W\\) is an eigenvector of \\(P\\) with eigenvalue \\(0\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_5_a_problem_14/","tags":null,"title":"NUS-MATH530 5.A Problem 14"},{"categories":null,"contents":"Warmup: 35\nSuppose \\(V\\) is finite dimensional, \\(T \\in \\mathcal{L}(V)\\) and \\(U\\) is invariant under \\(T\\). Prove each eigenvalue of \\(T / U\\) is an eigenvalue of \\(T\\).\nNow, \\(\\lambda\\) is an eigenvalue of \\(T / U\\). That is:\n\\begin{equation} Tv + U = \\lambda v + U \\end{equation}\nMeaning:\n\\begin{equation} (T-\\lambda I) v \\in U, \\forall v \\in V \\end{equation}\nSuppose for the sake of contradiction \\(\\lambda\\) is not an eigenvalue of \\(T\\). This means no \\(\\lambda\\) such that \\(Tv = \\lambda v\\); specifically, that means also no \\(\\lambda\\) such that \\(T|_{u} u = \\lambda u\\). Now, that means \\(T|_{u} - \\lambda I\\) is invertible given finite dimensional \\(V\\).\nThe previous statement means that \\((T|_{u} - \\lambda I)\\) is subjective across \\(u\\):\n\\begin{equation} \\forall v, \\exists u: (T-\\lambda I)v = (T|_{u}-\\lambda I) u \\end{equation}\nAnd so:\n\\begin{equation} Tv - \\lambda v = Tu - \\lambda u \\end{equation}\nFinally, then:\n\\begin{equation} T(v-u) = \\lambda (v-u) \\end{equation}\nNow, \\(v + U\\) being an eigenvector of \\(T / U\\) requires that \\(v + U \\neq 0\\), which means \\(v \\not \\in U\\). And so, \\(v \\neq u\\) meaning \\(v-u \\neq 0\\). Hence, the above expression demonstrates \\(\\lambda\\) to be an eigenvalue of \\(T\\), reaching contradiction. \\(\\blacksquare\\)\nNow: 36\nRemoving finite-dimensional from the requirements above, demonstrate the result above breaks.\nLet \\(V = \\mathcal{P}(\\mathbb{F})\\) and let \\(T\\) be differentiation. Now, let \\(U\\) be \\(P_{2}(\\mathbb{F})\\). Now:\n\\begin{equation} T / U (v + U) = \\lambda v + U \\end{equation}\nlet \\(v \\in \\mathcal{P}_{3}(\\mathbb{F})\\). Now, then, \\(T / U (v + U) = Tv + U\\), with \\(Tv \\in \\mathcal{P}_{2}(\\mathbb{F})\\). Hence, \\(T / U (v + U) = Tv + U = 0 + U\\). This makes \\(0\\) an eigenvalue and \\(u \\in \\mathcal{P}_{2}(\\mathbb{F})\\) eigenvectors.\nOf course this does not hold for \\(T\\) in general as all \\(Tv \\in \\mathcal{P}_{2}(\\mathbb{F})\\) are not identically \\(0\\).\nHaving shown a counter-example, \\(\\blacksquare\\)\nDo we have finite-dimensions?\n\u0026ldquo;Not invertible\u0026rdquo; =\u0026gt; not injective\u0026mdash;\n\\(T\\) being not injective means that \\(null\\ T\\) has more than just the zero vector.\nHence:\n\\begin{equation} \\exists v: Tv = 0 = 0 v \\end{equation}\nThat would make all nonzero \\(v \\in null\\ T\\) eigenvectors and \\(0\\) an eigenvalue.\n\u0026ldquo;Not invertible\u0026rdquo; =\u0026gt; not surjective\u0026mdash;\n\\(T\\) being not surjective means that \\(range\\ T \\subset V\\) strictly. So then \\(T|_{range\\ T}\\) is an operator so \\(range\\ T\\) is an invariant subspace under \\(T\\).\nEither way, we have that an eigenvalue exist.\n","html":"\u003cp\u003eWarmup: 35\u003c/p\u003e\n\u003cp\u003eSuppose \\(V\\) is finite dimensional, \\(T \\in \\mathcal{L}(V)\\) and \\(U\\) is \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e under \\(T\\). Prove each eigenvalue of \\(T / U\\) is an eigenvalue of \\(T\\).\u003c/p\u003e\n\u003cp\u003eNow, \\(\\lambda\\) is an eigenvalue of \\(T / U\\). That is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv + U = \\lambda v + U\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(T-\\lambda I) v \\in U, \\forall v \\in V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSuppose for the sake of contradiction \\(\\lambda\\) is not an eigenvalue of \\(T\\). This means no \\(\\lambda\\) such that \\(Tv = \\lambda v\\); specifically, that means also no \\(\\lambda\\) such that \\(T|_{u} u = \\lambda u\\). Now, that means \\(T|_{u} - \\lambda I\\) is invertible given finite dimensional \\(V\\).\u003c/p\u003e\n\u003cp\u003eThe previous statement means that \\((T|_{u} - \\lambda I)\\) is subjective across \\(u\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\forall v, \\exists u: (T-\\lambda I)v = (T|_{u}-\\lambda I) u\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv - \\lambda v = Tu - \\lambda u\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(v-u) = \\lambda (v-u)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, \\(v + U\\) being an eigenvector of \\(T / U\\) requires that \\(v + U \\neq 0\\), which means \\(v \\not \\in U\\). And so, \\(v \\neq u\\) meaning \\(v-u \\neq 0\\). Hence, the above expression demonstrates \\(\\lambda\\) to be an eigenvalue of \\(T\\), reaching contradiction. \\(\\blacksquare\\)\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eNow: 36\u003c/p\u003e\n\u003cp\u003eRemoving finite-dimensional from the requirements above, demonstrate the result above breaks.\u003c/p\u003e\n\u003cp\u003eLet \\(V = \\mathcal{P}(\\mathbb{F})\\) and let \\(T\\) be differentiation. Now, let \\(U\\) be \\(P_{2}(\\mathbb{F})\\). Now:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT / U (v + U) = \\lambda v + U\n\\end{equation}\u003c/p\u003e\n\u003cp\u003elet \\(v \\in \\mathcal{P}_{3}(\\mathbb{F})\\). Now, then, \\(T / U (v + U) = Tv + U\\), with \\(Tv \\in \\mathcal{P}_{2}(\\mathbb{F})\\). Hence, \\(T / U (v + U) = Tv + U = 0 + U\\). This makes \\(0\\) an eigenvalue and \\(u \\in \\mathcal{P}_{2}(\\mathbb{F})\\) eigenvectors.\u003c/p\u003e\n\u003cp\u003eOf course this does not hold for \\(T\\) in general as all \\(Tv \\in \\mathcal{P}_{2}(\\mathbb{F})\\) are not identically \\(0\\).\u003c/p\u003e\n\u003cp\u003eHaving shown a counter-example, \\(\\blacksquare\\)\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eDo we have finite-dimensions?\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Not invertible\u0026rdquo; =\u0026gt; not injective\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\(T\\) being not injective means that \\(null\\ T\\) has more than just the zero vector.\u003c/p\u003e\n\u003cp\u003eHence:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\exists v: Tv = 0 = 0 v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThat would make all nonzero \\(v \\in null\\ T\\) eigenvectors and \\(0\\) an eigenvalue.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Not invertible\u0026rdquo; =\u0026gt; not surjective\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\(T\\) being not surjective means that \\(range\\ T \\subset V\\) strictly. So then \\(T|_{range\\ T}\\) is an operator so \\(range\\ T\\) is an invariant subspace under \\(T\\).\u003c/p\u003e\n\u003cp\u003eEither way, we have that an eigenvalue exist.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_5_a_problem_35_36/","tags":null,"title":"NUS-MATH530 5.A Problem 35/36"},{"categories":null,"contents":" Suppose \\(T \\in \\mathcal{L}(V)\\) has a diagonal matrix \\(A\\) w.r.t. some basis of \\(V\\), and that \\(\\lambda \\in \\mathbb{F}\\). Prove that \\(\\lambda\\) appears on the diagonal of \\(A\\) precisely \\(\\dim E(\\lambda, T)\\) times.\nAside: \u0026ldquo;to appear on the diagonal \\(n\\) times\u0026rdquo; We want to begin by giving a description for what \u0026ldquo;appearing on the diagonal\u0026rdquo; of a diagonal matrix implies.\nA diagonal matrix is a special-case upper-triangular matrix, so a value being on its diagonal implies it to be an eigenvalue.\nFurthermore, let \\(v_1, \u0026hellip; v_{n}\\) be the eigenvector-basis which gives the diagonal matrix aforementioned for \\(A\\). By calculation (i.e. properties of multiplying some all-but-one-zero \u0026ldquo;one hot\u0026rdquo; vector to the diagonal representation of \\(A\\)), if \\(\\lambda\\) appears \\(j\\) times on the diagonal representation of \\(A\\), \\(j\\) basis vectors of \\(V\\) will belong to the same eigenvector \\(j\\) as they all will produce \\(Tv = \\lambda v\\) when applied to the diagonal representation of \\(A\\)).\nAnd finally, because basis vectors are linearly independent, we have that if a value \\(\\lambda\\) appears on the diagonal of a diagonal matrix of \\(A\\) \\(n\\) times, it implies that \\(A\\) has \\(n\\) linearly independent eigenvectors all belonging to \\(\\lambda\\) which forms the basis for which the diagonal matrix is built.\nProof To complete the proof, we now perform casework.\n\\(\\lambda\\) appears \\(0\\) times Per our discussion above, this implies that there are \\(0\\) (trivially linearly independent) eigenvectors for which \\(\\lambda\\) serves as its eigenvalue. Namely, that means \\(\\lambda\\) is not an eigenvalue of \\(A\\). And therefore, we have that \\(T - \\lambda I\\) is injective, and hence \\(null (T - \\lambda I) = {0}\\). Recall that \\(E(\\lambda, T) = null(T-\\lambda I)\\). We now have \\(\\dim\\ E(\\lambda, T) = 0\\), as desired.\n\\(\\lambda\\) appears \\(n\\) times Again from above, we have \\(n\\) linearly-independent eigenvectors belonging to the same eigenvalue \\(\\lambda\\) which forms the basis out of which the diagonal matrix is built. Therefore, one can take at least \\(n\\) linearly independent vectors from \\(E(\\lambda, T)\\) as \\(null(T- \\lambda I)\\) is the space of all eigenvectors belonging to \\(\\lambda\\) and the zero vector. This makes \\(\\dim E(\\lambda, T)\\) at least \\(n\\).\nTo show that \\(\\dim E(\\lambda, T)\\) to be exactly \\(n\\), let\u0026rsquo;s suppose the contrary. Let \\(v\\) be another eigenvector belonging to \\(\\lambda\\) linearly independent to the previous \\(n\\) already discussed. \\(v\\) would be linearly independent to all other members of the eigenvector-basis of \\(V\\): as eigenvectors from distinct eigenvalues are linearly independent and we hypothesized that \\(v\\) is linearly independent to the other eigenvectors belonging to \\(\\lambda\\).\nYet, this is not possible: \\(v \\in V\\) cannot create a linearly independent list conjoined to a basis of \\(V\\). Reaching contraction, we see that \\(\\dim E(\\lambda, T) = n\\) as desired. \\(\\blacksquare\\)\n","html":"\u003chr\u003e\n\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V)\\) has a diagonal matrix \\(A\\) w.r.t. some basis of \\(V\\), and that \\(\\lambda \\in \\mathbb{F}\\). Prove that \\(\\lambda\\) appears on the diagonal of \\(A\\) precisely \\(\\dim E(\\lambda, T)\\) times.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"aside-to-appear-on-the-diagonal-n-times\"\u003eAside: \u0026ldquo;to appear on the diagonal \\(n\\) times\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003eWe want to begin by giving a description for what \u0026ldquo;appearing on the diagonal\u0026rdquo; of a diagonal matrix implies.\u003c/p\u003e\n\u003cp\u003eA diagonal matrix is a special-case upper-triangular matrix, so a value being on its diagonal implies it to be an eigenvalue.\u003c/p\u003e\n\u003cp\u003eFurthermore, let \\(v_1, \u0026hellip; v_{n}\\) be the eigenvector-basis which gives the diagonal matrix aforementioned for \\(A\\). By calculation (i.e. properties of multiplying some all-but-one-zero \u0026ldquo;one hot\u0026rdquo; vector to the diagonal representation of \\(A\\)), if \\(\\lambda\\) appears \\(j\\) times on the diagonal representation of \\(A\\), \\(j\\) basis vectors of \\(V\\) will belong to the same eigenvector \\(j\\) as they all will produce \\(Tv = \\lambda v\\) when applied to the diagonal representation of \\(A\\)).\u003c/p\u003e\n\u003cp\u003eAnd finally, because basis vectors are linearly independent, we have that if a value \\(\\lambda\\) appears on the diagonal of a diagonal matrix of \\(A\\) \\(n\\) times, it implies that \\(A\\) has \\(n\\) linearly independent eigenvectors all belonging to \\(\\lambda\\) which forms the basis for which the diagonal matrix is built.\u003c/p\u003e\n\u003ch2 id=\"proof\"\u003eProof\u003c/h2\u003e\n\u003cp\u003eTo complete the proof, we now perform casework.\u003c/p\u003e\n\u003ch3 id=\"lambda-appears-0-times\"\u003e\\(\\lambda\\) appears \\(0\\) times\u003c/h3\u003e\n\u003cp\u003ePer our discussion above, this implies that there are \\(0\\) (trivially linearly independent) eigenvectors for which \\(\\lambda\\) serves as its eigenvalue. Namely, that means \\(\\lambda\\) is not an eigenvalue of \\(A\\). And therefore, we have that \\(T - \\lambda I\\) is injective, and hence \\(null (T - \\lambda I) = {0}\\). Recall that \\(E(\\lambda, T) = null(T-\\lambda I)\\). We now have \\(\\dim\\ E(\\lambda, T) = 0\\), as desired.\u003c/p\u003e\n\u003ch3 id=\"lambda-appears-n-times\"\u003e\\(\\lambda\\) appears \\(n\\) times\u003c/h3\u003e\n\u003cp\u003eAgain from above, we have \\(n\\) linearly-independent eigenvectors belonging to the same eigenvalue \\(\\lambda\\) which forms the basis out of which the diagonal matrix is built. Therefore, one can take at least \\(n\\) linearly independent vectors from \\(E(\\lambda, T)\\) as \\(null(T- \\lambda I)\\) is the space of all eigenvectors belonging to \\(\\lambda\\) and the zero vector. This makes \\(\\dim E(\\lambda, T)\\) at least \\(n\\).\u003c/p\u003e\n\u003cp\u003eTo show that \\(\\dim E(\\lambda, T)\\) to be exactly \\(n\\), let\u0026rsquo;s suppose the contrary. Let \\(v\\) be another eigenvector belonging to \\(\\lambda\\) linearly independent to the previous \\(n\\) already discussed. \\(v\\) would be linearly independent to all other members of the eigenvector-basis of \\(V\\): as eigenvectors from distinct eigenvalues are linearly independent and we hypothesized that \\(v\\) is linearly independent to the other eigenvectors belonging to \\(\\lambda\\).\u003c/p\u003e\n\u003cp\u003eYet, this is not possible: \\(v \\in V\\) cannot create a linearly independent list conjoined to a basis of \\(V\\). Reaching contraction, we see that \\(\\dim E(\\lambda, T) = n\\) as desired. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_5_c_problem_7/","tags":null,"title":"NUS-MATH530 5.C Problem 7"},{"categories":null,"contents":"Standard Bases Back and Fourth To map the vectors from \\(B_2\\) back to the standard bases, we simply have to construct the map:\n\\begin{equation} \\mqty(2 \u0026amp; 1 \u0026amp; 2 \\\\ 1\u0026amp; 1\u0026amp; -1 \\\\ 1 \u0026amp; -1 \u0026amp; 0) \\end{equation}\nEach of the \u0026ldquo;standard\u0026rdquo; vectors in the new basis, when applied to this matrix, gets moved back to their original representation.\nPresumably, then, moving \u0026ldquo;forward\u0026rdquo; into the new space is simply taking the inverse of this vector, which we will do separately; its inverse is:\n\\begin{equation} \\mqty(\\frac{1}{7} \u0026amp; \\frac{2}{7} \u0026amp; \\frac{3}{7} \\\\ \\frac{1}{7} \u0026amp; \\frac{2}{7} \u0026amp; -\\frac{4}{7} \\\\ \\frac{2}{7} \u0026amp; -\\frac{3}{7} \u0026amp; -\\frac{1}{7}) \\end{equation}\nNow. The former matrix will map vectors in terms of \\(B_2\\) into \\(B_1\\), and the latter \\(B_1\\) into \\(B_2\\).\nMapping Back and Forth We can apply the latter matrix to the vector to change its basis:\n\\begin{equation} \\mqty(1 \\\\ 1 \\\\ 0) \\end{equation}\nThis means that, as a linear combination of \\(B_2\\), we have:\n\\begin{equation} 1 \\mqty(2 \\\\ 1 \\\\ 1) + 1 \\mqty(1 \\\\ 1 \\\\ -1) + 0 \\mqty(2 \\\\ -1 \\\\0) \\end{equation}\nAnd the vector \\(\\mqty(1\\\\1\\\\0)\\) aforementioned is the representation of the vector desired in terms of basis \\(B_2\\). The desired matrix mapping in the second matrix above.\nGenerally For mapping from a new basis to the standard basis, simply arrange the vectors that form the basis as columns of a matrix. To map from the standard basis towards the new ones, invert that map. If mappings between two basis are needed, and they are both expressed in terms of the standard basis, compose the maps.\n","html":"\u003ch2 id=\"standard-bases-back-and-fourth\"\u003eStandard Bases Back and Fourth\u003c/h2\u003e\n\u003cp\u003eTo map the vectors from \\(B_2\\) back to the standard bases, we simply have to construct the map:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(2 \u0026amp; 1 \u0026amp; 2 \\\\ 1\u0026amp; 1\u0026amp; -1 \\\\ 1 \u0026amp; -1 \u0026amp; 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eEach of the \u0026ldquo;standard\u0026rdquo; vectors in the new basis, when applied to this matrix, gets moved back to their original representation.\u003c/p\u003e\n\u003cp\u003ePresumably, then, moving \u0026ldquo;forward\u0026rdquo; into the new space is simply taking the inverse of this vector, which we will do separately; its inverse is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(\\frac{1}{7} \u0026amp; \\frac{2}{7} \u0026amp; \\frac{3}{7} \\\\ \\frac{1}{7} \u0026amp; \\frac{2}{7} \u0026amp; -\\frac{4}{7} \\\\ \\frac{2}{7} \u0026amp; -\\frac{3}{7} \u0026amp; -\\frac{1}{7})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow. The former matrix will map vectors in terms of \\(B_2\\) into \\(B_1\\), and the latter \\(B_1\\) into \\(B_2\\).\u003c/p\u003e\n\u003ch2 id=\"mapping-back-and-forth\"\u003eMapping Back and Forth\u003c/h2\u003e\n\u003cp\u003eWe can apply the latter matrix to the vector to change its basis:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(1 \\\\ 1 \\\\ 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis means that, as a linear combination of \\(B_2\\), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n1 \\mqty(2 \\\\ 1 \\\\ 1) + 1 \\mqty(1 \\\\ 1 \\\\ -1) + 0 \\mqty(2 \\\\ -1 \\\\0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd the vector \\(\\mqty(1\\\\1\\\\0)\\) aforementioned is the representation of the vector desired in terms of basis \\(B_2\\). The desired matrix mapping in the second matrix above.\u003c/p\u003e\n\u003ch2 id=\"generally\"\u003eGenerally\u003c/h2\u003e\n\u003cp\u003eFor mapping from a new basis to the standard basis, simply arrange the vectors that form the basis as columns of a matrix. To map from the standard basis towards the new ones, invert that map. If mappings between two basis are needed, and they are both expressed in terms of the standard basis, compose the maps.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_changing_bases/","tags":null,"title":"NUS-MATH530 Changing Bases"},{"categories":null,"contents":"Dot product Calculations Let\u0026rsquo;s calculate some dot products!\n\\begin{equation} \\begin{pmatrix} 1 \\\\ 0 \\end{pmatrix} \\cdot \\begin{pmatrix} 0 \\\\ 1 \\end{pmatrix} = 0 \\end{equation}\n\\begin{equation} \\begin{pmatrix} 1 \\\\2 \\end{pmatrix} \\cdot \\begin{pmatrix} 2 \\\\1 \\end{pmatrix} = 4 \\end{equation}\n\\begin{equation} \\begin{pmatrix} 1 \\\\ 1 \\end{pmatrix} \\cdot \\begin{pmatrix} -1 \\\\1 \\end{pmatrix} = 0 \\end{equation}\n\\begin{equation} \\begin{pmatrix} 1 \\\\1 \\end{pmatrix} \\cdot \\begin{pmatrix} 2 \\\\ 2 \\end{pmatrix} = 4 \\end{equation}\nInterpretation Geometrically, the intepretation of the dot product is the magnitude that comes from scaling the bottom projected value by the top value. This is essentially multiplying the proportion of one vector that\u0026rsquo;s parallel to the other by each other.\nCross Product Calculations Cross products!\n\\begin{equation} \\begin{pmatrix} 1 \\\\ 0 \\\\ 1 \\end{pmatrix} \\times \\begin{pmatrix} -1 \\\\ 0 \\\\ 1 \\end{pmatrix} = \\begin{pmatrix} 1 \\\\ -1 \\\\0 \\end{pmatrix} \\end{equation}\n\\begin{equation} \\begin{pmatrix} 1 \\\\ 1 \\\\ -1 \\end{pmatrix} \\times \\begin{pmatrix} 0 \\\\ 0 \\\\ 2 \\end{pmatrix} = \\begin{pmatrix} 2 \\\\ -2 \\\\0 \\end{pmatrix} \\end{equation}\nThe dot product is the point that is perpendicular to the other two input vectors.\nWhy its not a field We want to check why the multiplication of vectors in \\(\\mathbb{F}^{3}\\) via taking the cross product cannot form a field.\nWe can safely assume that the addition operation of the vectors derive their closure, commutativity, associativity from these properties in \\(\\mathbb{F}\\).\nTherefore, we will verify these properties with multiplication. The only closed multiplication-like operation of vectors is the cross-product. Let\u0026rsquo;s first define the cross-product.\nGiven two vectors in \\(\\mathbb{F}^{3}\\):\n\\begin{equation} \\begin{pmatrix} a \\\\b \\\\ c \\end{pmatrix}, \\begin{pmatrix} d \\\\ e\\\\f \\end{pmatrix} \\end{equation}\nTheir cross product is the vector in \\(\\mathbb{F}^{3}\\) defined by:\n\\begin{equation} \\begin{vmatrix} \\hat{i} \u0026amp; \\hat{j} \u0026amp; \\hat{k} \\\\ a \u0026amp; b \u0026amp; c \\\\ d \u0026amp; e \u0026amp; f \\end{vmatrix} \\end{equation}\nTaking the actual determinant, we have that:\n\\begin{equation} \\begin{vmatrix} \\hat{i} \u0026amp; \\hat{j} \u0026amp; \\hat{k} \\\\ a \u0026amp; b \u0026amp; c \\\\ d \u0026amp; e \u0026amp; f \\end{vmatrix} = \\begin{pmatrix} bf-ce \\\\ dc-af \\\\ ac-db \\end{pmatrix} \\end{equation}\nIdentity Let\u0026rsquo;s first figure the identity of this operation. We wish to figure some \\((a,b,c)\\) such that the result of the cross product would be \\((d,e,f)\\).\nGeometrically, the perpendicularity of a vector is the resulting value of the cross product; however, no vector (apart from \\(\\vec{0}\\)) can be perfectly perpendicular to itself exactly. This would indicate that no such identities exist.\nWe can also observe that there is no \\(f\\) term on the bottom of the cross product. This would indicate that no combination of \\((a,b,c)\\) can construct the needed \\(f\\) on the last entry.\nFinally, for a more formal proof.\nProof: there can not exist a field-like identity for a cross product.\nFor the sake of contradiction let\u0026rsquo;s say for some nonzero vector \\(\\vec{v} \\in \\mathbb{F}^{3}\\), there exists some identity named \\(\\vec{e} \\in \\mathbb{F}^{3}\\) that follows the properties of identities in a field.\nWe first have that:\n\\begin{equation} \\vec{e} \\times \\vec{v} = \\vec{v} \\end{equation}\nby the definition of the identity.\nAnd also that:\n\\begin{equation} \\vec{v} \\times \\vec{e}= \\vec{v} \\end{equation}\nby the fact that field-like operations commutes.\nWe have also the property of cross products that:\n\\begin{align} \u0026amp;\\vec{a} \\times \\vec{b} = -(\\vec{b} \\times \\vec{a}) \\\\ \\Rightarrow\\ \u0026amp; \\vec{a} \\times \\vec{b} + \\vec{b} \\times \\vec{a} = 0 \\end{align}\nBy applying the inverse of \\(-(\\vec{b}\\times \\vec{a})\\) to both sides, as cross products are closed and therefore an additive inverse exists.\nTherefore, we have that:\n\\begin{equation} \\vec{v} + \\vec{v} = 0 \\end{equation}\nWe see then \\(\\vec{v}\\) is its own additive inverse. Therefore \\(\\vec{v}\\) itself is also \\(0\\). But we established that \\(\\vec{v}\\) can be non-zero. Reaching contradiction, \\(\\blacksquare\\). (this is iffy)\nCommutativity Because of the fact that two-by-two matricies exists on the diagonals, the cross product is also not commutative. In fact,\nDeterminants The geometric interpretation of the determinants is the change in area inside a vector which it stretches a given vector.\n","html":"\u003ch2 id=\"dot-product\"\u003eDot product\u003c/h2\u003e\n\u003ch3 id=\"calculations\"\u003eCalculations\u003c/h3\u003e\n\u003cp\u003eLet\u0026rsquo;s calculate some dot products!\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \\\\ 0\n\\end{pmatrix} \\cdot \\begin{pmatrix}\n0 \\\\ 1\n\\end{pmatrix} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \\\\2\n\\end{pmatrix} \\cdot \\begin{pmatrix}\n2 \\\\1\n\\end{pmatrix} = 4\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \\\\ 1\n\\end{pmatrix} \\cdot \\begin{pmatrix}\n-1 \\\\1\n\\end{pmatrix} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \\\\1\n\\end{pmatrix} \\cdot \\begin{pmatrix}\n2 \\\\ 2\n\\end{pmatrix} = 4\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"interpretation\"\u003eInterpretation\u003c/h3\u003e\n\u003cp\u003eGeometrically, the intepretation of the dot product is the magnitude that comes from scaling the bottom projected value by the top value. This is essentially multiplying the proportion of one vector that\u0026rsquo;s parallel to the other by each other.\u003c/p\u003e\n\u003ch2 id=\"cross-product\"\u003eCross Product\u003c/h2\u003e\n\u003ch3 id=\"calculations\"\u003eCalculations\u003c/h3\u003e\n\u003cp\u003eCross products!\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \\\\ 0 \\\\ 1\n\\end{pmatrix} \\times \\begin{pmatrix}\n-1 \\\\ 0 \\\\ 1\n\\end{pmatrix} = \\begin{pmatrix}\n1 \\\\ -1 \\\\0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \\\\ 1 \\\\ -1\n\\end{pmatrix} \\times \\begin{pmatrix}\n0 \\\\ 0 \\\\ 2\n\\end{pmatrix} = \\begin{pmatrix}\n2 \\\\ -2 \\\\0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-06_22-09-06_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe dot product is the point that is perpendicular to the other two input vectors.\u003c/p\u003e\n\u003ch3 id=\"why-its-not-a-field\"\u003eWhy its not a field\u003c/h3\u003e\n\u003cp\u003eWe want to check why the multiplication of vectors in \\(\\mathbb{F}^{3}\\) via taking the cross product cannot form a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eWe can safely assume that the addition operation of the vectors derive their closure, \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e, \u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e from these properties in \\(\\mathbb{F}\\).\u003c/p\u003e\n\u003cp\u003eTherefore, we will verify these properties with \u003ca href=\"/posts/kbhmultiplying/\"\u003emultiplication\u003c/a\u003e. The only closed multiplication-like operation of vectors is the cross-product. Let\u0026rsquo;s first define the cross-product.\u003c/p\u003e\n\u003cp\u003eGiven two vectors in \\(\\mathbb{F}^{3}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\na \\\\b \\\\ c\n\\end{pmatrix}, \\begin{pmatrix}\nd \\\\ e\\\\f\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTheir cross product is the vector in \\(\\mathbb{F}^{3}\\) defined by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{vmatrix}\n\\hat{i} \u0026amp; \\hat{j} \u0026amp; \\hat{k} \\\\\na \u0026amp; b \u0026amp; c \\\\\nd \u0026amp; e \u0026amp; f\n\\end{vmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaking the actual determinant, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{vmatrix}\n\\hat{i} \u0026amp; \\hat{j} \u0026amp; \\hat{k} \\\\\na \u0026amp; b \u0026amp; c \\\\\nd \u0026amp; e \u0026amp; f\n\\end{vmatrix} = \\begin{pmatrix}\nbf-ce \\\\\ndc-af \\\\\nac-db\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"identity\"\u003eIdentity\u003c/h3\u003e\n\u003cp\u003eLet\u0026rsquo;s first figure the \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e of this operation. We wish to figure some \\((a,b,c)\\) such that the result of the cross product would be \\((d,e,f)\\).\u003c/p\u003e\n\u003cp\u003eGeometrically, the perpendicularity of a vector is the resulting value of the cross product; however, no vector (apart from \\(\\vec{0}\\)) can be perfectly perpendicular to itself exactly. This would indicate that no such identities exist.\u003c/p\u003e\n\u003cp\u003eWe can also observe that there is no \\(f\\) term on the bottom of the cross product. This would indicate that no combination of \\((a,b,c)\\) can construct the needed \\(f\\) on the last entry.\u003c/p\u003e\n\u003cp\u003eFinally, for a more formal proof.\u003c/p\u003e\n\u003cp\u003eProof: there can not exist a field-like \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e for a cross product.\u003c/p\u003e\n\u003cp\u003eFor the sake of contradiction let\u0026rsquo;s say for some nonzero vector \\(\\vec{v} \\in \\mathbb{F}^{3}\\), there exists some identity named \\(\\vec{e} \\in \\mathbb{F}^{3}\\) that follows the properties of identities in a field.\u003c/p\u003e\n\u003cp\u003eWe first have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{e} \\times \\vec{v} = \\vec{v}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eby the definition of the identity.\u003c/p\u003e\n\u003cp\u003eAnd also that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{v} \\times \\vec{e}= \\vec{v}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eby the fact that field-like operations commutes.\u003c/p\u003e\n\u003cp\u003eWe have also the property of cross products that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\vec{a} \\times \\vec{b} = -(\\vec{b} \\times \\vec{a}) \\\\\n\\Rightarrow\\ \u0026amp; \\vec{a} \\times \\vec{b} + \\vec{b} \\times \\vec{a} = 0\n\\end{align}\u003c/p\u003e\n\u003cp\u003eBy applying the inverse of \\(-(\\vec{b}\\times \\vec{a})\\) to both sides, as cross products are closed and therefore an additive inverse exists.\u003c/p\u003e\n\u003cp\u003eTherefore, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{v} + \\vec{v} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe see then \\(\\vec{v}\\) is its own \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e. Therefore \\(\\vec{v}\\) itself is also \\(0\\). But we established that \\(\\vec{v}\\) can be non-zero. Reaching contradiction, \\(\\blacksquare\\). (this is iffy)\u003c/p\u003e\n\u003ch3 id=\"commutativity\"\u003eCommutativity\u003c/h3\u003e\n\u003cp\u003eBecause of the fact that two-by-two \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e exists on the diagonals, the cross product is also not commutative. In fact,\u003c/p\u003e\n\u003ch2 id=\"determinants\"\u003eDeterminants\u003c/h2\u003e\n\u003cp\u003eThe geometric interpretation of the \u003ca href=\"/posts/kbhmatricies/#determinants\"\u003edeterminants\u003c/a\u003e is the change in area inside a vector which it stretches a given vector.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_geometric_intepretations/","tags":null,"title":"NUS-MATH530 Geometric Intepretations"},{"categories":null,"contents":"Let \\(\\lambda_{m}\\) be an eigenvalue for \\(T\\) an operator on complex finite-dimensional \\(V\\). Let \\(m\\) be the geometric multiplicity of \\(\\lambda_{m}\\). We desire that the algebraic multiplicity is at least \\(m\\). Let \\(\\dim v = n\\).\nWe have that \\(m\\) is the geometric multiplicity of \\(\\lambda_{m}\\), meaning:\n\\begin{equation} \\dim E(\\lambda_{m}, T) = m \\end{equation}\nThis means we can take \\(m\\) linearly independent eigenvectors from \\(V\\). Extend this list now to a basis of \\(V\\) with \\(v_1, \u0026hellip;v_{m}, u_{1}, u_{n-m}\\).\nConstruct a matrix via this basis. By construction, the first \\(m \\times m\\) of this matrix would appear diagonal (as each \\(Tv = \\lambda v\\)). Furthermore, the diagonal of this sub-matrix would simply contain \\(\\lambda\\) repeated \\(m\\) times.\nTake now \\(A = \\mathcal{M}(T)-\\lambda I\\).\nTake the determinant of this matrix \\(A\\) now against the first column, yielding a characteristic polynomial with at least \\(m\\) factors with \\(\\lambda\\). Hence, the algebraic multiplicity of \\(\\lambda_{m}\\) is at least \\(m\\), as desired. \\(\\blacksquare\\)\n","html":"\u003cp\u003eLet \\(\\lambda_{m}\\) be an eigenvalue for \\(T\\) an operator on complex finite-dimensional \\(V\\). Let \\(m\\) be the geometric multiplicity of \\(\\lambda_{m}\\). We desire that the algebraic multiplicity is at least \\(m\\). Let \\(\\dim v = n\\).\u003c/p\u003e\n\u003cp\u003eWe have that \\(m\\) is the geometric multiplicity of \\(\\lambda_{m}\\), meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim E(\\lambda_{m}, T) = m\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis means we can take \\(m\\) linearly independent eigenvectors from \\(V\\). Extend this list now to a basis of \\(V\\) with \\(v_1, \u0026hellip;v_{m}, u_{1}, u_{n-m}\\).\u003c/p\u003e\n\u003cp\u003eConstruct a matrix via this basis. By construction, the first \\(m \\times m\\) of this matrix would appear diagonal (as each \\(Tv = \\lambda v\\)). Furthermore, the diagonal of this sub-matrix would simply contain \\(\\lambda\\) repeated \\(m\\) times.\u003c/p\u003e\n\u003cp\u003eTake now \\(A = \\mathcal{M}(T)-\\lambda I\\).\u003c/p\u003e\n\u003cp\u003eTake the determinant of this matrix \\(A\\) now against the first column, yielding a characteristic polynomial with at least \\(m\\) factors with \\(\\lambda\\). Hence, the algebraic multiplicity of \\(\\lambda_{m}\\) is at least \\(m\\), as desired. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_geometric_multiplicity/","tags":null,"title":"NUS-MATH530 Geometric Multiplicity"},{"categories":null,"contents":" Date Link \u0026lt;2022-09-09 Fri\u0026gt; NUS-MATH530 Solving Systems \u0026lt;2020-09-09 Wed\u0026gt; NUS-MATH530 Geometric Intepretations \u0026lt;2022-09-13 Tue\u0026gt; NUS-MATH530 Linear Vehicles \u0026lt;2022-09-15 Thu\u0026gt; NUS-MATH530 Plane and 1.B \u0026lt;2022-09-27 Tue\u0026gt; NUS-MATH530 1.C Problem 23 \u0026lt;2022-10-29 Sat\u0026gt; NUS-MATH530 2.C Problem 17 \u0026lt;2022-11-16 Wed\u0026gt; NUS-MATH530 3.B Problem 20 \u0026lt;2023-01-23 Mon\u0026gt; NUS-MATH530 3.E Problem 1 \u0026lt;2023-02-14 Tue\u0026gt; NUS-MATH530 5.A and Discussion \u0026lt;2023-02-20 Mon\u0026gt; NUS-MATH530 5.A Problem 14 \u0026lt;2023-03-16 Thu\u0026gt; NUS-MATH530 Changing Bases \u0026lt;2023-03-28 Tue\u0026gt; NUS-MATH530 5.C Problem 7 \u0026lt;2023-04-07 Fri\u0026gt; NUS-MATH530 Geometric Multiplicity \u0026lt;2023-04-12 Wed\u0026gt; NUS-MATH530 Some 6.A Problems \u0026lt;2023-04-14 Fri\u0026gt; NUS-MATH530 Similar to Diagonal \u0026lt;2023-05-04 Thu\u0026gt; NUS-MATH530 Matrix Adjectives ","html":"\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eLink\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-09-09 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_solving_systems/\"\u003eNUS-MATH530 Solving Systems\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2020-09-09 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_geometric_intepretations/\"\u003eNUS-MATH530 Geometric Intepretations\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-09-13 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_linear_vehicles/\"\u003eNUS-MATH530 Linear Vehicles\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-09-15 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_plane_and_1_b/\"\u003eNUS-MATH530 Plane and 1.B\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-09-27 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_1_c_proof_preso/\"\u003eNUS-MATH530 1.C Problem 23\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-10-29 Sat\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_2_c_problem_17/\"\u003eNUS-MATH530 2.C Problem 17\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-11-16 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_3_b_problem_20/\"\u003eNUS-MATH530 3.B Problem 20\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-01-23 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_3_e_problem_1/\"\u003eNUS-MATH530 3.E Problem 1\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-02-14 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_5_a_and_discussion/\"\u003eNUS-MATH530 5.A and Discussion\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-02-20 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_5_a_problem_14/\"\u003eNUS-MATH530 5.A Problem 14\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-03-16 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_changing_bases/\"\u003eNUS-MATH530 Changing Bases\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-03-28 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_5_c_problem_7/\"\u003eNUS-MATH530 5.C Problem 7\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-04-07 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_geometric_multiplicity/\"\u003eNUS-MATH530 Geometric Multiplicity\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-04-12 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_some_6_a_problems/\"\u003eNUS-MATH530 Some 6.A Problems\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-04-14 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_similar_to_diagonal/\"\u003eNUS-MATH530 Similar to Diagonal\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-05-04 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_matrix_adjectives/\"\u003eNUS-MATH530 Matrix Adjectives\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_homework_index/","tags":null,"title":"NUS-MATH530 Homework Index"},{"categories":null,"contents":"Infinite Plane Two Vehicles Yes. Though the travel of the two vehicles are not entirely independent, the second vehicle can diagonally traverse the plane while the first vehicle cuts across it. Practically, the question asks whether or not a combination of:\n\\begin{equation} \\alpha \\begin{pmatrix} 0 \\\\ 1 \\end{pmatrix} + \\beta \\begin{pmatrix} 1 \\\\ 1 \\end{pmatrix} \\end{equation}\nCan form all vectors in \\(\\mathbb{R}^2\\). Expanding that expression out, we have, given some point \\((a,b)\\) that:\n\\begin{equation} \\begin{pmatrix} \\beta \\\\ \\alpha + \\beta \\end{pmatrix} = \\begin{pmatrix} a \\\\ b \\end{pmatrix} \\end{equation}\nTherefor, we have expressions:\n\\begin{equation} \\begin{cases} \\beta = a \\\\ \\alpha +\\beta = b \\end{cases} \\end{equation}\nSubstituting the definition of \\(\\beta\\) then:\n\\begin{align} \u0026amp;\\alpha + a = b \\\\ \\Rightarrow\\ \u0026amp;\\alpha = b - a \\end{align}\nTherefore, we have that, for all desired locales \\((a,b)\\) we have a fully determined solution:\n\\begin{equation} \\begin{cases} \\alpha = b-a \\\\ \\beta = a \\end{cases} \\end{equation}\nThis means that some direction of travel in both vehicles will suffice.\nGoing Home Not necessarily. Graphically, after shifting yourself to some location upper-right, its impossible to move horizontally in the vertical-only vehicle.\nPractically, the question is asking that, if you are at some beginning location:\n\\begin{equation} \\begin{pmatrix} a \\\\b \\end{pmatrix} \\end{equation}\nCan we devise some travel that follows:\n\\begin{equation} \\alpha \\begin{pmatrix} 0 \\\\ 1 \\end{pmatrix} + \\begin{pmatrix} a \\\\b \\end{pmatrix} = \\begin{pmatrix} 0 \\\\ 0 \\end{pmatrix} \\end{equation}\nExpanding this out, we have expressions:\n\\begin{equation} \\begin{cases} 0 + a = 0 \\\\ \\alpha + b = 0 \\end{cases} \\end{equation}\nNamely, we have that:\n\\begin{equation} \\begin{cases} a = 0 \\\\ \\alpha = -b \\end{cases} \\end{equation}\nWe are therefore under-determined here; there is a solution only \\(\\forall a=0\\), but for no other \\(a\\).\nInfinite Space Pickup and Hoverboard We have:\n\\begin{equation} \\alpha \\begin{pmatrix} 1 \\\\ 1 \\\\ 0 \\end{pmatrix} + \\beta \\begin{pmatrix} 3 \\\\ -2 \\\\1 \\end{pmatrix} \\end{equation}\nto go to all directions \\((a,b,c)\\). Let\u0026rsquo;s try solving:\n\\begin{equation} \\begin{cases} \\alpha + 3\\beta = a\\\\ \\alpha -2\\beta = b \\\\ \\beta = c \\end{cases} \\end{equation}\nSubstituting the value for \\(\\beta=c\\), to the above equations, we have:\n\\begin{equation} \\begin{cases} \\alpha + 3c = a \\\\ \\alpha - 2c = b \\\\ \\end{cases} \\end{equation}\nAnd therefore, we have results:\n\\begin{equation} \\begin{cases} \\alpha = a-3c \\\\ \\alpha = b+ 2c \\end{cases} \\end{equation}\nThis equation is again over-determined. Therefore, you cannot get anywhere in your space; you can, however, get to all the points \\((a,b,c)\\) where:\n\\begin{equation} a-3c = b+2c \\end{equation}\nPickup, Hoverboard, AND Jetpack (part 1) We now have:\n\\begin{equation} \\alpha \\begin{pmatrix} 1 \\\\ 1 \\\\ 0 \\end{pmatrix} + \\beta \\begin{pmatrix} 3 \\\\ -2 \\\\ 1 \\end{pmatrix} + \\gamma \\begin{pmatrix} 0 \\\\ 1 \\\\ 1 \\end{pmatrix} \\end{equation}\nto go to all points \\((a,b,c)\\), we now try solving:\n\\begin{equation} \\begin{cases} \\alpha + 3\\beta = a \\\\ \\alpha -2\\beta + \\gamma = b\\\\ \\beta +\\gamma = c \\end{cases} \\end{equation}\nAt this point, it is probably easier to use a matrix to solve this expression. Hence, let\u0026rsquo;s solve:\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 3 \u0026amp; 0 \\\\ 1 \u0026amp; -2 \u0026amp; 1 \\\\ 0 \u0026amp; 1 \u0026amp; 1 \\end{pmatrix} \\begin{pmatrix} \\alpha \\\\ \\beta \\\\ \\gamma \\end{pmatrix} = \\begin{pmatrix} a \\\\ b \\\\c \\end{pmatrix} \\end{equation}\nLet\u0026rsquo;s first subtract the first column from the second column:\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 3 \u0026amp; 0 \\\\ 0 \u0026amp; -5 \u0026amp; 1 \\\\ 0 \u0026amp; 1 \u0026amp; 1 \\end{pmatrix} \\begin{pmatrix} \\alpha \\\\ \\beta \\\\ \\gamma \\end{pmatrix} = \\begin{pmatrix} a \\\\ b-a \\\\c \\end{pmatrix} \\end{equation}\nLet\u0026rsquo;s now rotate rows \\(2\\) and \\(3\\):\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 3 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \u0026amp; 1\\\\ 0 \u0026amp; -5 \u0026amp; 1 \\end{pmatrix} \\begin{pmatrix} \\alpha \\\\ \\gamma \\\\ \\beta \\end{pmatrix} = \\begin{pmatrix} a \\\\c\\\\ b-a \\end{pmatrix} \\end{equation}\nGreat. Now let\u0026rsquo;s subtract thrice the second row towards the first row:\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 0 \u0026amp; -3 \\\\ 0 \u0026amp; 1 \u0026amp; 1\\\\ 0 \u0026amp; -5 \u0026amp; 1 \\end{pmatrix} \\begin{pmatrix} \\alpha \\\\ \\gamma \\\\ \\beta \\end{pmatrix} = \\begin{pmatrix} a-3c \\\\c\\\\ b-a \\end{pmatrix} \\end{equation}\nAnd add five times the second row to the last row\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 0 \u0026amp; -3 \\\\ 0 \u0026amp; 1 \u0026amp; 1\\\\ 0 \u0026amp; 0 \u0026amp; 6 \\end{pmatrix} \\begin{pmatrix} \\alpha \\\\ \\gamma \\\\ \\beta \\end{pmatrix} = \\begin{pmatrix} a-3c \\\\c\\\\ b-a+5c \\end{pmatrix} \\end{equation}\nLet\u0026rsquo;s subtract a sixth of the last row to the second row:\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 0 \u0026amp; -3 \\\\ 0 \u0026amp; 1 \u0026amp; 0\\\\ 0 \u0026amp; 0 \u0026amp; 6 \\end{pmatrix} \\begin{pmatrix} \\alpha \\\\ \\gamma \\\\ \\beta \\end{pmatrix} = \\begin{pmatrix} a-3c \\\\c-\\frac{b-a+5c}{6}\\\\ b-a+5c \\end{pmatrix} \\end{equation}\nAnd add a half to the top row:\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \u0026amp; 0\\\\ 0 \u0026amp; 0 \u0026amp; 6 \\end{pmatrix} \\begin{pmatrix} \\alpha \\\\ \\gamma \\\\ \\beta \\end{pmatrix} = \\begin{pmatrix} a-3c + \\frac{b-a+5c}{2} \\\\c-\\frac{b-a+5c}{6}\\\\ b-a+5c \\end{pmatrix} \\end{equation}\nAnd finally divide the bottom row by \\(6\\):\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \u0026amp; 0\\\\ 0 \u0026amp; 0 \u0026amp; 1 \\end{pmatrix} \\begin{pmatrix} \\alpha \\\\ \\gamma \\\\ \\beta \\end{pmatrix} = \\begin{pmatrix} a-3c + \\frac{b-a+5c}{2} \\\\c-\\frac{b-a+5c}{6}\\\\ \\frac{{b-a+5c}}{6} \\end{pmatrix} \\end{equation}\nGreat. So now we have a fully determined solution \\(\\forall \\alpha, \\beta, \\gamma\\). Therefore, given a pair of location which you want to reach \\((a,b,c)\\), we can use the expressions above to solve for the values by which we have to move each vehicle.\nPickup, Hoverboard, AND Jetpack (part 2) We have the same problem, but with new numbers.\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 3 \u0026amp; 5 \\\\ 1 \u0026amp; -2 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \u0026amp; 1 \\end{pmatrix} \\begin{pmatrix} \\alpha \\\\ \\beta \\\\ \\gamma \\end{pmatrix} = \\begin{pmatrix} a \\\\ b \\\\c \\end{pmatrix} \\end{equation}\nAt this point, we really want to be checking of the vectors which form this matrix is a spanning set; that is, after performing Gaussian elimination, do we get back a zero-row? If so, it will restrict some combination of our input \\((a,b,c)\\) to converge to \\(0\\).\nLet\u0026rsquo;s begin by subtracting the first row from second row\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 3 \u0026amp; 5 \\\\ 0 \u0026amp; -5 \u0026amp; -5 \\\\ 0 \u0026amp; 1 \u0026amp; 1 \\end{pmatrix} \\end{equation}\nAnd now, let\u0026rsquo;s divide the middle row by \\(\\frac{-1}{5}\\)\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 3 \u0026amp; 5 \\\\ 0 \u0026amp; 1 \u0026amp; 1 \\\\ 0 \u0026amp; 1 \u0026amp; 1 \\end{pmatrix} \\end{equation}\nLet\u0026rsquo;s then subtract the middle row from the last row:\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 3 \u0026amp; 5 \\\\ 0 \u0026amp; 1 \u0026amp; 1 \\\\ 0 \u0026amp; 0 \u0026amp; 0 \\end{pmatrix} \\end{equation}\nAlready we see an \\(0\\) row emerging. That means that some combination of the variables have to be \\(0\\) for a solution to exist: these vectors do not span the space and therefore we can\u0026rsquo;t get everywhere in space.\nPickup Breaks Down We now want to know if the following vectors would reach \\((0,0,0)\\) after driving the pickup some distance \\(d\\); that is, if we started at some \\((a,b,c)\\), can:\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 3 \u0026amp; 0 \\\\ 1 \u0026amp; -2 \u0026amp; 1 \\\\ 0 \u0026amp; 1 \u0026amp; 1 \\\\ \\end{pmatrix} \\begin{pmatrix} 0 \\\\ \\beta \\\\ \\gamma \\end{pmatrix} = -\\begin{pmatrix} a \\\\ b \\\\ c \\end{pmatrix} \\end{equation}\nyield a solution? We have already performed the Gaussian Elimination above, therefore, we will skip directly to the solution:\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \u0026amp; 0\\\\ 0 \u0026amp; 0 \u0026amp; 1 \\end{pmatrix} \\begin{pmatrix} 0 \\\\ \\gamma \\\\ \\beta \\end{pmatrix} = -\\begin{pmatrix} a-3c + \\frac{b-a+5c}{2} \\\\c-\\frac{b-a+5c}{6}\\\\ \\frac{{b-a+5c}}{6} \\end{pmatrix} \\end{equation}\nObviously the bottom few rows yield a solution, however, the top row places some limitation on our possible location. Namely, that:\n\\begin{equation} -\\frac{a+b-c}{2} = 0 \\end{equation}\nIf the locations you are at do not behave with these rules, a solution will not be yielded.\n","html":"\u003ch2 id=\"infinite-plane\"\u003eInfinite Plane\u003c/h2\u003e\n\u003ch3 id=\"two-vehicles\"\u003eTwo Vehicles\u003c/h3\u003e\n\u003cp\u003eYes. Though the travel of the two vehicles are not entirely independent, the second vehicle can diagonally traverse the plane while the first vehicle cuts across it. Practically, the question asks whether or not a combination of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha \\begin{pmatrix}\n0 \\\\ 1\n\\end{pmatrix} + \\beta \\begin{pmatrix}\n1 \\\\ 1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eCan form all vectors in \\(\\mathbb{R}^2\\). Expanding that expression out, we have, given some point \\((a,b)\\) that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n\\beta \\\\\n\\alpha + \\beta\n\\end{pmatrix} = \\begin{pmatrix}\na \\\\ b\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefor, we have expressions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\beta = a \\\\\n\\alpha +\\beta = b\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSubstituting the definition of \\(\\beta\\) then:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\alpha + a = b \\\\\n\\Rightarrow\\ \u0026amp;\\alpha = b - a\n\\end{align}\u003c/p\u003e\n\u003cp\u003eTherefore, we have that, for all desired locales \\((a,b)\\) we have a fully determined solution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\alpha = b-a \\\\\n\\beta = a\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis means that some direction of travel in both vehicles will suffice.\u003c/p\u003e\n\u003ch3 id=\"going-home\"\u003eGoing Home\u003c/h3\u003e\n\u003cp\u003eNot necessarily. Graphically, after shifting yourself to some location upper-right, its impossible to move horizontally in the vertical-only vehicle.\u003c/p\u003e\n\u003cp\u003ePractically, the question is asking that, if you are at some beginning location:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\na \\\\b\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eCan we devise some travel that follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha \\begin{pmatrix}\n0 \\\\ 1\n\\end{pmatrix} + \\begin{pmatrix}\na \\\\b\n\\end{pmatrix} = \\begin{pmatrix}\n0 \\\\ 0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eExpanding this out, we have expressions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n0 + a = 0 \\\\\n\\alpha + b = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNamely, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\na = 0 \\\\\n\\alpha = -b\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe are therefore under-determined here; there is a solution only \\(\\forall a=0\\), but for no other \\(a\\).\u003c/p\u003e\n\u003ch2 id=\"infinite-space\"\u003eInfinite Space\u003c/h2\u003e\n\u003ch3 id=\"pickup-and-hoverboard\"\u003ePickup and Hoverboard\u003c/h3\u003e\n\u003cp\u003eWe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha \\begin{pmatrix}\n1 \\\\ 1 \\\\ 0\n\\end{pmatrix} + \\beta \\begin{pmatrix}\n3 \\\\ -2 \\\\1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eto go to all directions \\((a,b,c)\\). Let\u0026rsquo;s try solving:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\alpha + 3\\beta = a\\\\\n\\alpha -2\\beta = b \\\\\n\\beta = c\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSubstituting the value for \\(\\beta=c\\), to the above equations, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\alpha + 3c = a \\\\\n\\alpha - 2c = b \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd therefore, we have results:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\alpha = a-3c \\\\\n\\alpha = b+ 2c\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis equation is again over-determined. Therefore, you cannot get anywhere in your space; you can, however, get to all the points \\((a,b,c)\\) where:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na-3c = b+2c\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"pickup-hoverboard-and-jetpack--part-1\"\u003ePickup, Hoverboard, AND Jetpack (part 1)\u003c/h3\u003e\n\u003cp\u003eWe now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha \\begin{pmatrix}\n1 \\\\ 1 \\\\ 0\n\\end{pmatrix} + \\beta \\begin{pmatrix}\n3 \\\\ -2 \\\\ 1\n\\end{pmatrix} + \\gamma \\begin{pmatrix}\n0 \\\\ 1 \\\\ 1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eto go to all points \\((a,b,c)\\), we now try solving:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\alpha + 3\\beta = a \\\\\n\\alpha -2\\beta + \\gamma = b\\\\\n\\beta +\\gamma = c\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAt this point, it is probably easier to use a matrix to solve this expression. Hence, let\u0026rsquo;s solve:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 3 \u0026amp; 0 \\\\\n1 \u0026amp; -2 \u0026amp; 1 \\\\\n0 \u0026amp; 1 \u0026amp; 1\n\\end{pmatrix} \\begin{pmatrix}\n\\alpha \\\\ \\beta \\\\ \\gamma\n\\end{pmatrix} = \\begin{pmatrix}\na \\\\ b \\\\c\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s first subtract the first column from the second column:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 3 \u0026amp; 0 \\\\\n0 \u0026amp; -5 \u0026amp; 1 \\\\\n0 \u0026amp; 1 \u0026amp; 1\n\\end{pmatrix} \\begin{pmatrix}\n\\alpha \\\\ \\beta \\\\ \\gamma\n\\end{pmatrix} = \\begin{pmatrix}\na \\\\ b-a \\\\c\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s now rotate rows \\(2\\) and \\(3\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 3 \u0026amp; 0 \\\\\n0 \u0026amp; 1 \u0026amp; 1\\\\\n0 \u0026amp; -5 \u0026amp; 1\n\\end{pmatrix} \\begin{pmatrix}\n\\alpha \\\\ \\gamma \\\\ \\beta\n\\end{pmatrix} = \\begin{pmatrix}\na \\\\c\\\\ b-a\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGreat. Now let\u0026rsquo;s subtract thrice the second row towards the first row:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 0 \u0026amp; -3 \\\\\n0 \u0026amp; 1 \u0026amp; 1\\\\\n0 \u0026amp; -5 \u0026amp; 1\n\\end{pmatrix} \\begin{pmatrix}\n\\alpha \\\\ \\gamma \\\\ \\beta\n\\end{pmatrix} = \\begin{pmatrix}\na-3c \\\\c\\\\ b-a\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd add five times the second row to the last row\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 0 \u0026amp; -3 \\\\\n0 \u0026amp; 1 \u0026amp; 1\\\\\n0 \u0026amp; 0 \u0026amp; 6\n\\end{pmatrix} \\begin{pmatrix}\n\\alpha \\\\ \\gamma \\\\ \\beta\n\\end{pmatrix} = \\begin{pmatrix}\na-3c \\\\c\\\\ b-a+5c\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s subtract a sixth of the last row to the second row:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 0 \u0026amp; -3 \\\\\n0 \u0026amp; 1 \u0026amp; 0\\\\\n0 \u0026amp; 0 \u0026amp; 6\n\\end{pmatrix} \\begin{pmatrix}\n\\alpha \\\\ \\gamma \\\\ \\beta\n\\end{pmatrix} = \\begin{pmatrix}\na-3c \\\\c-\\frac{b-a+5c}{6}\\\\ b-a+5c\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd add a half to the top row:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 0 \u0026amp; 0 \\\\\n0 \u0026amp; 1 \u0026amp; 0\\\\\n0 \u0026amp; 0 \u0026amp; 6\n\\end{pmatrix} \\begin{pmatrix}\n\\alpha \\\\ \\gamma \\\\ \\beta\n\\end{pmatrix} = \\begin{pmatrix}\na-3c + \\frac{b-a+5c}{2} \\\\c-\\frac{b-a+5c}{6}\\\\ b-a+5c\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd finally divide the bottom row by \\(6\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 0 \u0026amp; 0 \\\\\n0 \u0026amp; 1 \u0026amp; 0\\\\\n0 \u0026amp; 0 \u0026amp; 1\n\\end{pmatrix} \\begin{pmatrix}\n\\alpha \\\\ \\gamma \\\\ \\beta\n\\end{pmatrix} = \\begin{pmatrix}\na-3c + \\frac{b-a+5c}{2} \\\\c-\\frac{b-a+5c}{6}\\\\ \\frac{{b-a+5c}}{6}\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGreat. So now we have a fully determined solution \\(\\forall \\alpha, \\beta, \\gamma\\). Therefore, given a pair of location which you want to reach \\((a,b,c)\\), we can use the expressions above to solve for the values by which we have to move each vehicle.\u003c/p\u003e\n\u003ch3 id=\"pickup-hoverboard-and-jetpack--part-2\"\u003ePickup, Hoverboard, AND Jetpack (part 2)\u003c/h3\u003e\n\u003cp\u003eWe have the same problem, but with new numbers.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 3 \u0026amp; 5 \\\\\n1 \u0026amp; -2 \u0026amp; 0 \\\\\n0 \u0026amp; 1 \u0026amp; 1\n\\end{pmatrix} \\begin{pmatrix}\n\\alpha \\\\ \\beta \\\\ \\gamma\n\\end{pmatrix} = \\begin{pmatrix}\na \\\\ b \\\\c\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAt this point, we really want to be checking of the vectors which form this matrix is a spanning set; that is, after performing Gaussian elimination, do we get back a zero-row? If so, it will restrict some combination of our input \\((a,b,c)\\) to converge to \\(0\\).\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s begin by subtracting the first row from second row\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 3 \u0026amp; 5 \\\\\n0 \u0026amp; -5 \u0026amp; -5 \\\\\n0 \u0026amp; 1 \u0026amp; 1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd now, let\u0026rsquo;s divide the middle row by \\(\\frac{-1}{5}\\)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 3 \u0026amp; 5 \\\\\n0 \u0026amp; 1 \u0026amp; 1 \\\\\n0 \u0026amp; 1 \u0026amp; 1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s then subtract the middle row from the last row:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 3 \u0026amp; 5 \\\\\n0 \u0026amp; 1 \u0026amp; 1 \\\\\n0 \u0026amp; 0 \u0026amp; 0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAlready we see an \\(0\\) row emerging. That means that some combination of the variables have to be \\(0\\) for a solution to exist: these vectors do not span the space and therefore we can\u0026rsquo;t get everywhere in space.\u003c/p\u003e\n\u003ch3 id=\"pickup-breaks-down\"\u003ePickup Breaks Down\u003c/h3\u003e\n\u003cp\u003eWe now want to know if the following vectors would reach \\((0,0,0)\\) after driving the pickup some distance \\(d\\); that is, if we started at some \\((a,b,c)\\), can:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 3 \u0026amp; 0 \\\\\n1 \u0026amp; -2 \u0026amp; 1 \\\\\n0 \u0026amp; 1 \u0026amp; 1 \\\\\n\\end{pmatrix} \\begin{pmatrix}\n0 \\\\ \\beta \\\\ \\gamma\n\\end{pmatrix} = -\\begin{pmatrix}\na \\\\ b \\\\ c\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyield a solution? We have already performed the Gaussian Elimination above, therefore, we will skip directly to the solution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 0 \u0026amp; 0 \\\\\n0 \u0026amp; 1 \u0026amp; 0\\\\\n0 \u0026amp; 0 \u0026amp; 1\n\\end{pmatrix} \\begin{pmatrix}\n0 \\\\ \\gamma \\\\ \\beta\n\\end{pmatrix} = -\\begin{pmatrix}\na-3c + \\frac{b-a+5c}{2} \\\\c-\\frac{b-a+5c}{6}\\\\ \\frac{{b-a+5c}}{6}\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eObviously the bottom few rows yield a solution, however, the top row places some limitation on our possible location. Namely, that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n-\\frac{a+b-c}{2} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf the locations you are at do not behave with these rules, a solution will not be yielded.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_linear_vehicles/","tags":null,"title":"NUS-MATH530 Linear Vehicles"},{"categories":null,"contents":"Factoids: \\((AB)^{*} = B^{*} A^{*}\\), \\((A+B)^{*} = A^{*} + B^{*}\\) An unitary operator is invertible, and the inverse of its matrix representation is its transpose Take \\(M\\) an unitary square matrix, with orthonormal columns. Note that this matrix, by construction, sends each basis \\(v_{j}\\) to $ej$\u0026mdash;a set of \\(dim\\ V\\) (as there are \\(dim\\ V\\) columns to \\(M\\)) linearly independent (as \\(e_{j}\\), through orthonormality, are linearly independent) vectors. As we have \\(dim\\ V\\) linearly independent vectors, the \\(e_{j}\\) form a basis. As each \\(v_{j}\\) is sent to $ej$\u0026mdash;both a basis of $V$\u0026mdash;we note that the finite-dimensional operator corresponding to \\(M\\) is subjective and hence invertible.\nConstruct now the matrix \\(M^{*}\\). Consider \\(M M^{*}\\). Note that the multiplication operation will require taking the inner product of each row of \\(M\\), against each column of \\(M^{*}\\); that is, this operation will result in taking the inner products between each pair of orthonormal columns of \\(M\\).\nRecall that, per the definition of orthonormal vectors, for a pair of vectors \\(e_{i}, e_{j}\\), \\(\\langle e_{i}, e_{j} \\rangle = 0\\) for \\(i \\neq j\\) and \\(=1\\) for \\(i=j\\). Therefore, this row-column product will be \\(1\\) when row \\(j\\) and column \\(j\\) is multiplied together and \\(0\\) otherwise.\nIn this fashion, \\(M M^{*} =I\\); in a similar fashion, \\(M^{*} M = I\\). Therefore, \\(M^{*} = M^{-1}\\).\nResult 2: unitary matricies are normal Recall the matrix \\(M\\) is normal if \\(A A^{*} = A^{*} A\\). Now, recall that for a unitary operator \\(A^{*} = A^{-1}\\).\nNow, we have that:\n\\begin{equation} A A^{*} = A A^{-1} = I = A^{-1} A = A^{*}A \\end{equation}\nResult 3: self-adjoint matricies are normal Recall a self-adjoint matrix acts like \\(A = A^{*}\\).\nNow:\n\\begin{equation} A A^{*} = A A = A^{*} A \\end{equation}\nObservation 1: some matricies are both self-adjoint and unitary Take the symmetric matrix formed by conjoining each of the standard bases of euclidian space \\(\\mathbb{F}^{n}\\). That is, the identity matrix.\nThe matrix has orthonormal columns (as the standard bases are orthonormal), and self-adjoint as it is symmetric.\nProblem 1: Venn Diagram Per results above.\nProblem 2: Group! Self-Adjoint Not closed under multiplication:\n\\begin{equation} \\mqty(a \u0026amp; b \\\\ b \u0026amp; c) \\mqty(f \u0026amp; g \\\\ g\u0026amp; h) = \\mqty(af + bg \u0026amp; ag+bh \\\\ bf+cg \u0026amp; bg+ch) \\end{equation}\nEvidently, this matrix is no longer symmetric (i.e. not self adjoint).\nUnitary Do form a group!\nNormal There\u0026rsquo;s no inverse for \\(0\\).\nIs this proof taking too much of a shortcut? / Wishywashy.\nBy the complex spectral theorm, \\(T\\) being normal implies that there is an orthonormal bases of eigenvalues of \\(T\\) (i.e. there is a diagonal representation of \\(T\\)). This can be obtained with Schur\u0026rsquo;s theorem, then applying the condition that \\(A A^{*} = A^{*}A\\) to show that the \u0026ldquo;upper-triangular\u0026rdquo; matrix formed by the orthonormal bases is actually diagonal.\nBy calculation, diagonal matricies\u0026rsquo; multiplication is closed.\nWe now inherit the identity and associativity from general matricies.\nSo invertible normal matricies form a group.\n\u0026ldquo;Matrix Adjoint\u0026rdquo; \\(A^{*}\\) is the adjoint of the matrix.\nThat:\n\\begin{equation} \\langle Tv,w \\rangle = \\langle v, T^{*}w \\rangle \\end{equation}\n\\(A = A^{*} \\implies \\lambda_{i} \\in \\mathbb{R}\\) \\(A^{*} A = A A^{*} \\implies\\) diagonalizable based on an orthonormal basis of eigenvectors \\(A\\) is orthogonal/unitary \\(\\implies\\) \\(A^{*} = A^{-1}\\) 7.13: E.v. of self-adjoint operators are real.\n7.14: Over \\(\\mathbb{C}\\), \\(Tv\\) is orthogonal to all \\(v\\) IFF \\(T\\) is the zero matrix\n7.16: Over \\(\\mathbb{R}\\), \\(Tv\\) is orthogonal to all \\(v\\) and \\(T\\) is self-adjoint, then \\(T\\) is the zero matrix\n7.22: eigenvectors of \\(T\\) corresponding to distinct eigenvalues are orthogonal if \\(T \\in \\mathcal{L}(V)\\) is normal.\nAlso 7.24: the spectral theorem\u0026mdash;that if \\(T\\) is normal, then \\(V\\) has an orthonormal basis of eigenvectors of \\(T\\) and so \\(T\\) is diagonalizable with respect to an orthonormal basis\nRecall \u0026ldquo;normal\u0026rdquo;: \\(A A^{*} = A^{*} A\\)\n","html":"\u003ch2 id=\"factoids\"\u003eFactoids:\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\((AB)^{*} = B^{*} A^{*}\\), \\((A+B)^{*} = A^{*} + B^{*}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"an-unitary-operator-is-invertible-and-the-inverse-of-its-matrix-representation-is-its-transpose\"\u003eAn unitary operator is invertible, and the inverse of its matrix representation is its transpose\u003c/h2\u003e\n\u003cp\u003eTake \\(M\\) an unitary square matrix, with orthonormal columns. Note that this matrix, by construction, sends each basis \\(v_{j}\\) to $e\u003csub\u003ej\u003c/sub\u003e$\u0026mdash;a set of \\(dim\\ V\\) (as there are \\(dim\\ V\\) columns to \\(M\\)) linearly independent (as \\(e_{j}\\), through orthonormality, are linearly independent) vectors. As we have \\(dim\\ V\\) linearly independent vectors, the \\(e_{j}\\) form a basis. As each \\(v_{j}\\) is sent to $e\u003csub\u003ej\u003c/sub\u003e$\u0026mdash;both a basis of $V$\u0026mdash;we note that the finite-dimensional operator corresponding to \\(M\\) is subjective and hence invertible.\u003c/p\u003e\n\u003cp\u003eConstruct now the matrix \\(M^{*}\\). Consider \\(M M^{*}\\). Note that the multiplication operation will require taking the inner product of each \u003cem\u003erow\u003c/em\u003e of \\(M\\), against each \u003cem\u003ecolumn\u003c/em\u003e of \\(M^{*}\\); that is, this operation will result in taking the inner products between each pair of orthonormal columns of \\(M\\).\u003c/p\u003e\n\u003cp\u003eRecall that, per the definition of orthonormal vectors, for a pair of vectors \\(e_{i}, e_{j}\\), \\(\\langle e_{i}, e_{j} \\rangle = 0\\) for \\(i \\neq j\\) and \\(=1\\) for \\(i=j\\). Therefore, this row-column product will be \\(1\\) when row \\(j\\) and column \\(j\\) is multiplied together and \\(0\\) otherwise.\u003c/p\u003e\n\u003cp\u003eIn this fashion, \\(M M^{*} =I\\); in a similar fashion, \\(M^{*} M = I\\). Therefore, \\(M^{*} = M^{-1}\\).\u003c/p\u003e\n\u003ch2 id=\"result-2-unitary-matricies-are-normal\"\u003eResult 2: unitary matricies are normal\u003c/h2\u003e\n\u003cp\u003eRecall the matrix \\(M\\) is normal if \\(A A^{*} = A^{*} A\\). Now, recall that for a unitary operator \\(A^{*} = A^{-1}\\).\u003c/p\u003e\n\u003cp\u003eNow, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA A^{*} = A A^{-1} = I = A^{-1} A = A^{*}A\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"result-3-self-adjoint-matricies-are-normal\"\u003eResult 3: self-adjoint matricies are normal\u003c/h2\u003e\n\u003cp\u003eRecall a self-adjoint matrix acts like \\(A = A^{*}\\).\u003c/p\u003e\n\u003cp\u003eNow:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA A^{*} = A A = A^{*} A\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"observation-1-some-matricies-are-both-self-adjoint-and-unitary\"\u003eObservation 1: some matricies are both self-adjoint and unitary\u003c/h2\u003e\n\u003cp\u003eTake the symmetric matrix formed by conjoining each of the standard bases of euclidian space \\(\\mathbb{F}^{n}\\). That is, the identity matrix.\u003c/p\u003e\n\u003cp\u003eThe matrix has orthonormal columns (as the standard bases are orthonormal), and self-adjoint as it is symmetric.\u003c/p\u003e\n\u003ch2 id=\"problem-1-venn-diagram\"\u003eProblem 1: Venn Diagram\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-05-04_20-58-41_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ePer results above.\u003c/p\u003e\n\u003ch2 id=\"problem-2-group\"\u003eProblem 2: Group!\u003c/h2\u003e\n\u003ch3 id=\"self-adjoint\"\u003eSelf-Adjoint\u003c/h3\u003e\n\u003cp\u003eNot closed under multiplication:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(a \u0026amp; b \\\\ b \u0026amp; c) \\mqty(f \u0026amp; g \\\\ g\u0026amp; h) = \\mqty(af + bg \u0026amp; ag+bh \\\\ bf+cg \u0026amp; bg+ch)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eEvidently, this matrix is no longer symmetric (i.e. not self adjoint).\u003c/p\u003e\n\u003ch3 id=\"unitary\"\u003eUnitary\u003c/h3\u003e\n\u003cp\u003eDo form a group!\u003c/p\u003e\n\u003ch3 id=\"normal\"\u003eNormal\u003c/h3\u003e\n\u003cp\u003eThere\u0026rsquo;s no inverse for \\(0\\).\u003c/p\u003e\n\u003cp\u003eIs this proof taking too much of a shortcut? / Wishywashy.\u003c/p\u003e\n\u003cp\u003eBy the complex spectral theorm, \\(T\\) being normal implies that there is an orthonormal bases of eigenvalues of \\(T\\) (i.e. there is a diagonal representation of \\(T\\)). This can be obtained with Schur\u0026rsquo;s theorem, then applying the condition that \\(A A^{*} = A^{*}A\\) to show that the \u0026ldquo;upper-triangular\u0026rdquo; matrix formed by the orthonormal bases is actually diagonal.\u003c/p\u003e\n\u003cp\u003eBy calculation, diagonal matricies\u0026rsquo; multiplication is closed.\u003c/p\u003e\n\u003cp\u003eWe now inherit the identity and associativity from general matricies.\u003c/p\u003e\n\u003cp\u003eSo invertible normal matricies form a group.\u003c/p\u003e\n\u003ch2 id=\"matrix-adjoint\"\u003e\u0026ldquo;Matrix Adjoint\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003e\\(A^{*}\\) is the \u003cstrong\u003eadjoint\u003c/strong\u003e of the matrix.\u003c/p\u003e\n\u003cp\u003eThat:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle Tv,w \\rangle = \\langle v, T^{*}w \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-05-08_09-42-00_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003chr\u003e\n\u003col\u003e\n\u003cli\u003e\\(A = A^{*} \\implies \\lambda_{i} \\in \\mathbb{R}\\)\u003c/li\u003e\n\u003cli\u003e\\(A^{*} A = A A^{*} \\implies\\) diagonalizable based on an orthonormal basis of eigenvectors\u003c/li\u003e\n\u003cli\u003e\\(A\\) is orthogonal/unitary \\(\\implies\\) \\(A^{*} = A^{-1}\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003cp\u003e\u003cstrong\u003e7.13\u003c/strong\u003e: E.v. of self-adjoint operators are real.\u003c/p\u003e\n\u003cp\u003e7.14: Over \\(\\mathbb{C}\\), \\(Tv\\) is orthogonal to all \\(v\\) IFF \\(T\\) is the zero matrix\u003c/p\u003e\n\u003cp\u003e7.16: Over \\(\\mathbb{R}\\), \\(Tv\\) is orthogonal to all \\(v\\) and \\(T\\) is self-adjoint, then \\(T\\) is the zero matrix\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e7.22\u003c/strong\u003e: eigenvectors of \\(T\\) corresponding to distinct eigenvalues are orthogonal if \\(T \\in \\mathcal{L}(V)\\) is normal.\u003c/p\u003e\n\u003cp\u003eAlso \u003cstrong\u003e7.24\u003c/strong\u003e: the spectral theorem\u0026mdash;that if \\(T\\) is normal, then \\(V\\) has an orthonormal basis of eigenvectors of \\(T\\) and so \\(T\\) is diagonalizable with respect to an orthonormal basis\u003c/p\u003e\n\u003cp\u003eRecall \u0026ldquo;normal\u0026rdquo;: \\(A A^{*} = A^{*} A\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_matrix_adjectives/","tags":null,"title":"NUS-MATH530 Matrix Adjectives"},{"categories":null,"contents":"Equation of a Plane We want to determine all points on the plane formed by two vectors.\nLet\u0026rsquo;s take two vectors \\(\\vec{u} \\in V\\) and \\(\\vec{v} \\in V\\). The orthogonal vector to the both of them (i.e. the normal direction of the plane) is:\n\\begin{equation} \\vec{u}\\times \\vec{v} \\end{equation}\nby the definition of the cross product.\nThe points on the plane, therefore, have to be orthogonal themselves to this normal vector. This means that the dot product of the candidate vector against these vectors should be \\(0\\):\n\\begin{equation} (\\vec{u} \\times \\vec{v}) \\cdot \\begin{pmatrix} x_{1} \\\\ \\dots \\\\ x_{n} \\end{pmatrix} = 0 \\end{equation}\nThis forms the final equation for a plane given two vectors in \\(\\mathbb{F}^{n}\\).\nA.B Exercises Double Negative We desire that \\(-(-v)=v \\forall v \\in V\\)\nBy distributivity in vector spaces, and the fact that \\(0v=0\\), we have that:\n\\begin{equation} v+(-1)v = (1-1)v = 0v = 0 \\end{equation}\nTherefore, \\((-1)v=-v\\).\nWe now have:\n\\begin{equation} -(-v) = -((-1)v) \\end{equation}\nThe scalar multiple of \\(v\\), by definition, is also \\(\\in V\\) if \\(v \\in V\\). Therefore, it itself holds that:\n\\begin{equation} (-1)((-1)v) \\end{equation}\nBy associativity:\n\\begin{equation} (-1\\cdot -1)v \\end{equation}\nFinally:\n\\begin{equation} (-1\\cdot -1)v = (1v) = v\\ \\blacksquare \\end{equation}\nOne of it is zero If \\(a \\in \\mathbb{F}\\), \\(v \\in V\\), and \\(av=0\\), we desire that \\(a=0\\) or \\(v=0\\). We perform casework.\nCase 1: \\(a=0\\) \u0026ndash; we are done.\nCase 2: \\(a \\neq 0\\): As \\(a \\in \\mathbb{F}\\), and \\(a \\neq 0\\), \\(\\exists \\frac{1}{a}: a\\cdot \\frac{1}{a}=1\\).\nTherefore:\n\\begin{align} \u0026amp;av = 0 \\\\ \\Rightarrow\\ \u0026amp; \\frac{1}{a}av = \\frac{1}{a} 0 \\\\ \\Rightarrow\\ \u0026amp; 1v = \\frac{1}{a} 0 \\\\ \\Rightarrow\\ \u0026amp; 1v = 0 \\\\ \\Rightarrow\\ \u0026amp; v=0\\ \\blacksquare \\end{align}\nExistence and Uniqueness Given Equation Given \\(v,w \\in V\\), we desire a unique \\(x\\in V: v+3x=w\\).\nLet\u0026rsquo;s first check existence. Take the expression:\n\\begin{equation} n = \\frac{1}{3} (w-v) \\end{equation}\nAs both \\(v,w \\in V\\), subtraction (addition) and scalar multiplication are defined. Therefore, \\(\\forall w,v \\in V\\), we can construct such an \\(n\\).\nSupplying the expression into \\(v+3x\\) for the definition of \\(x\\):\n\\begin{align} v+3x \u0026amp;= v+3\\qty(\\frac{1}{3}(w-v)) \\\\ \u0026amp;= v+(w-v) \\\\ \u0026amp;= v+w-v \\\\ \u0026amp;= v-v+w \\\\ \u0026amp;= 0+w \\\\ \u0026amp;= w \\end{align}\nby distributivity, associativity, and commutativity in vector spaces, yielding \\(w\\) as desired.\nNow let\u0026rsquo;s check uniqueness.\nSuppose \\(\\exists x_1, x_2: v+3x_1=w\\) and \\(v+3x_2=w\\).\nBy transitivity:\n\\begin{equation} v+3x_1=v+3x_2 \\end{equation}\nApplying \\(-v\\) to both sides:\n\\begin{equation} 3x_1=3x_2 \\end{equation}\nFinally, applying \\(\\frac{1}{3}\\) to both sides:\n\\begin{equation} x_{1}= x_2 \\end{equation}\nTherefore, there only exists one unique \\(x\\) which satisfies the expression. \\(\\blacksquare\\)\nEmpty Set is Not a Vector Space The empty set is not a vector space as it doesn\u0026rsquo;t have an additive identity. \\(\\blacksquare\\)\nAdditive Inverse is also Zero Multiplication We first take the additive inverse expression:\n\\begin{equation} \\forall v \\in V, \\exists -v: v+(-v) = 0 \\end{equation}\nTake now:\n\\begin{equation} 0v \\end{equation}\nWe have that:\n\\begin{align} 0v \u0026amp;= (0+0)v \\\\ \u0026amp;= 0v + 0v \\end{align}\nBy distributivity.\nAs \\(0v \\in V\\), \\(\\exists -0v: 0v+(-0v)=0\\).\n\\begin{align} \u0026amp;0v = 0v+0v \\\\ \\Rightarrow\\ \u0026amp; 0v-0v = 0v+0v-0v \\\\ \\Rightarrow\\ \u0026amp; 0 = 0v \\end{align}\nas desired. Now, we will start from this condition and work out way backwards.\nNote that the statement for additive inverse condition is that:\n\\begin{equation} \\forall v \\in V, \\exists -v: v+(-v) = 0 \\end{equation}\nLet us begin with the expression that:\n\\begin{equation} 0=0v \\end{equation}\nWe have that:\n\\begin{equation} 0=(1-1)v \\end{equation}\nThen, we have by distributivity:\n\\begin{equation} 0 = v + (-1)v \\end{equation}\nscalar multiplication is defined on a vector space. Therefore, we have \\(-1v\\) to construct such an additive inverse \\(\\forall v \\in V\\). \\(\\blacksquare\\)\nWeird Vector Space All operations are defined as given.\nTake scalars \\(t_1, t_2 \\in \\mathbb{R}\\).\n\\begin{equation} (t_1-t_2)\\infty = \\infty \\end{equation}\nYet, if we follow the rules of distribution:\n\\begin{equation} (t_1 -t_2)\\infty = \\infty -\\infty =0 \\end{equation}\nTherefore, distribution doesn\u0026rsquo;t hold on this new structure. It is not a vector space. \\(\\blacksquare\\)\n","html":"\u003ch2 id=\"equation-of-a-plane\"\u003eEquation of a Plane\u003c/h2\u003e\n\u003cp\u003eWe want to determine all \u003ca href=\"/posts/kbhvector/\"\u003epoint\u003c/a\u003es on the plane formed by two vectors.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s take two vectors \\(\\vec{u} \\in V\\) and \\(\\vec{v} \\in V\\). The orthogonal vector to the both of them (i.e. the normal direction of the plane) is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{u}\\times \\vec{v}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eby the definition of the \u003ca href=\"/posts/kbhcross_product/\"\u003ecross product\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhvector/\"\u003epoint\u003c/a\u003es on the plane, therefore, have to be orthogonal themselves to this normal vector. This means that the \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e of the candidate vector against these vectors should be \\(0\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(\\vec{u} \\times \\vec{v}) \\cdot \\begin{pmatrix}\nx_{1} \\\\ \\dots \\\\ x_{n}\n\\end{pmatrix} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis forms the final equation for a plane given two vectors in \\(\\mathbb{F}^{n}\\).\u003c/p\u003e\n\u003ch2 id=\"a-dot-b-exercises\"\u003eA.B Exercises\u003c/h2\u003e\n\u003ch3 id=\"double-negative\"\u003eDouble Negative\u003c/h3\u003e\n\u003cp\u003eWe desire that \\(-(-v)=v \\forall v \\in V\\)\u003c/p\u003e\n\u003cp\u003eBy \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e in vector spaces, and the fact that \\(0v=0\\), we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv+(-1)v = (1-1)v = 0v = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, \\((-1)v=-v\\).\u003c/p\u003e\n\u003cp\u003eWe now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n-(-v) = -((-1)v)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe scalar multiple of \\(v\\), by definition, is also \\(\\in V\\) if \\(v \\in V\\). Therefore, it itself holds that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(-1)((-1)v)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy \u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(-1\\cdot -1)v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(-1\\cdot -1)v = (1v) = v\\ \\blacksquare\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"one-of-it-is-zero\"\u003eOne of it is zero\u003c/h3\u003e\n\u003cp\u003eIf \\(a \\in \\mathbb{F}\\), \\(v \\in V\\), and \\(av=0\\), we desire that \\(a=0\\) or \\(v=0\\). We perform casework.\u003c/p\u003e\n\u003cp\u003eCase 1: \\(a=0\\) \u0026ndash; we are done.\u003c/p\u003e\n\u003cp\u003eCase 2: \\(a \\neq 0\\):\nAs \\(a \\in \\mathbb{F}\\), and \\(a \\neq 0\\), \\(\\exists \\frac{1}{a}: a\\cdot \\frac{1}{a}=1\\).\u003c/p\u003e\n\u003cp\u003eTherefore:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;av = 0 \\\\\n\\Rightarrow\\ \u0026amp; \\frac{1}{a}av = \\frac{1}{a} 0 \\\\\n\\Rightarrow\\ \u0026amp; 1v = \\frac{1}{a} 0 \\\\\n\\Rightarrow\\ \u0026amp; 1v = 0 \\\\\n\\Rightarrow\\ \u0026amp; v=0\\ \\blacksquare\n\\end{align}\u003c/p\u003e\n\u003ch3 id=\"existence-and-uniqueness-given-equation\"\u003eExistence and Uniqueness Given Equation\u003c/h3\u003e\n\u003cp\u003eGiven \\(v,w \\in V\\), we desire a unique \\(x\\in V: v+3x=w\\).\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s first check existence. Take the expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nn = \\frac{1}{3} (w-v)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAs both \\(v,w \\in V\\), subtraction (\u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e) and \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e are defined. Therefore, \\(\\forall w,v \\in V\\), we can construct such an \\(n\\).\u003c/p\u003e\n\u003cp\u003eSupplying the expression into \\(v+3x\\) for the definition of \\(x\\):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nv+3x \u0026amp;= v+3\\qty(\\frac{1}{3}(w-v)) \\\\\n\u0026amp;= v+(w-v) \\\\\n\u0026amp;= v+w-v \\\\\n\u0026amp;= v-v+w \\\\\n\u0026amp;= 0+w \\\\\n\u0026amp;= w\n\\end{align}\u003c/p\u003e\n\u003cp\u003eby \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e, \u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e, and \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e in \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es, yielding \\(w\\) as desired.\u003c/p\u003e\n\u003cp\u003eNow let\u0026rsquo;s check uniqueness.\u003c/p\u003e\n\u003cp\u003eSuppose \\(\\exists x_1, x_2: v+3x_1=w\\) and \\(v+3x_2=w\\).\u003c/p\u003e\n\u003cp\u003eBy transitivity:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv+3x_1=v+3x_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eApplying \\(-v\\) to both sides:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n3x_1=3x_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, applying \\(\\frac{1}{3}\\) to both sides:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{1}= x_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, there only exists one unique \\(x\\) which satisfies the expression. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"empty-set-is-not-a-vector-space\"\u003eEmpty Set is Not a Vector Space\u003c/h3\u003e\n\u003cp\u003eThe empty set is not a vector space as it doesn\u0026rsquo;t have an \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"additive-inverse-is-also-zero-multiplication\"\u003eAdditive Inverse is also Zero Multiplication\u003c/h3\u003e\n\u003cp\u003eWe first take the additive inverse expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\forall v \\in V, \\exists -v: v+(-v) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTake now:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n0v \u0026amp;= (0+0)v \\\\\n\u0026amp;= 0v + 0v\n\\end{align}\u003c/p\u003e\n\u003cp\u003eBy \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eAs \\(0v \\in V\\), \\(\\exists -0v: 0v+(-0v)=0\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;0v = 0v+0v \\\\\n\\Rightarrow\\ \u0026amp; 0v-0v = 0v+0v-0v \\\\\n\\Rightarrow\\ \u0026amp; 0 = 0v\n\\end{align}\u003c/p\u003e\n\u003cp\u003eas desired. Now, we will start from this condition and work out way backwards.\u003c/p\u003e\n\u003cp\u003eNote that the statement for additive inverse condition is that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\forall v \\in V, \\exists -v: v+(-v) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet us begin with the expression that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0=0v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0=(1-1)v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThen, we have by \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = v + (-1)v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e is defined on a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e. Therefore, we have \\(-1v\\) to construct such an additive inverse \\(\\forall v \\in V\\). \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"weird-vector-space\"\u003eWeird Vector Space\u003c/h3\u003e\n\u003cp\u003eAll operations are defined as given.\u003c/p\u003e\n\u003cp\u003eTake scalars \\(t_1, t_2 \\in \\mathbb{R}\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(t_1-t_2)\\infty = \\infty\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYet, if we follow the rules of distribution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(t_1 -t_2)\\infty = \\infty -\\infty =0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, distribution doesn\u0026rsquo;t hold on this new structure. It is not a vector space. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_plane_and_1_b/","tags":null,"title":"NUS-MATH530 Plane and 1.B"},{"categories":null,"contents":"Prove but \\(T\\) is diagonalizable if and only if the matrix of \\(T\\) is similar to a diagonal matrix.\nTry 2.\nGiven similarity:\nSo we have that:\n\\begin{equation} D = S^{-1} A S \\end{equation}\nwhere, \\(D\\) is diagonal. We apply \\(S\\) to both sides to yield:\n\\begin{equation} SD = AS \\end{equation}\nNow, note that \\(S\\) is invertible. This means that its column s are linearly independent (as it is an operator, which means it is injective, and hence has a zero null space; that indicates that the dimension of its range is that of the whole space: indicating its columns vectors are spanning; there is \\(dim\\ V\\) such columns, so it is a basis and hence linearly independent).\nLet \\(S = [v_1 | \\dots | v_{n}]\\); now, \\(SD = [\\lambda_{1} v_1 | \\dots | \\lambda_{n} v_{n}]\\).\nBy that same definition above course, \\(A[v_1 | \\dots | v_{n}] = [\\lambda_{1} v_1 | \\dots | \\lambda_{n} v_{n}]\\).\nFinally, then, by definition, \\(v_1 \\dots v_{n}\\) are eigenvectors of \\(A\\). Note again that, per the above, this is \\(n\\) linearly independent eigenvectors in a space of \\(\\dim n\\) \u0026mdash; this makes them a basis of \\(V\\). Having made a basis of eigenvectors of \\(A\\), it is diagonalizable.\nGiven diagonalizability:\nConstruct \\(S= [v_1 | \\dots | v_{n}]\\), a basis of eigenvectors of \\(A\\) which is diagonalizable. Now, \\(AS\\) would send each of the vectors to their corresponding scales, meaning: \\(AS = [\\lambda_{1} v_{1} | \\dots | \\lambda_{n} v_{n}]\\).\nLastly, applying \\(S^{-1}\\) again would send each vector to each of the standard basis encoded in the original space given homogeneity of the \\(\\lambda\\); leaving the vector of \\(\\lambda_{j}\\) scaled by the identity: creating a diagonal \\(D\\) matrix. \\(\\blacksquare\\)\n","html":"\u003cp\u003eProve but \\(T\\) is \u003ca href=\"/posts/kbhdiagonal_matrix/#properties-of-diagonal-matrices\"\u003ediagonalizable\u003c/a\u003e if and only if the matrix of \\(T\\) is similar to a diagonal matrix.\u003c/p\u003e\n\u003cp\u003eTry 2.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eGiven similarity:\u003c/p\u003e\n\u003cp\u003eSo we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nD = S^{-1} A S\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(D\\) is diagonal. We apply \\(S\\) to both sides to yield:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nSD = AS\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, note that \\(S\\) is invertible. This means that its column s are linearly independent (as it is an operator, which means it is injective, and hence has a zero null space; that indicates that the dimension of its range is that of the whole space: indicating its columns vectors are spanning; there is \\(dim\\ V\\) such columns, so it is a basis and hence linearly independent).\u003c/p\u003e\n\u003cp\u003eLet \\(S = [v_1 | \\dots | v_{n}]\\); now, \\(SD = [\\lambda_{1} v_1 | \\dots | \\lambda_{n} v_{n}]\\).\u003c/p\u003e\n\u003cp\u003eBy that same definition above course, \\(A[v_1 | \\dots | v_{n}] = [\\lambda_{1} v_1 | \\dots | \\lambda_{n} v_{n}]\\).\u003c/p\u003e\n\u003cp\u003eFinally, then, by definition, \\(v_1 \\dots v_{n}\\) are eigenvectors of \\(A\\). Note again that, per the above, this is \\(n\\) linearly independent eigenvectors in a space of \\(\\dim n\\) \u0026mdash; this makes them a basis of \\(V\\). Having made a basis of eigenvectors of \\(A\\), it is diagonalizable.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eGiven diagonalizability:\u003c/p\u003e\n\u003cp\u003eConstruct \\(S= [v_1 | \\dots | v_{n}]\\), a basis of eigenvectors of \\(A\\) which is diagonalizable. Now, \\(AS\\) would send each of the vectors to their corresponding scales, meaning: \\(AS = [\\lambda_{1} v_{1} | \\dots | \\lambda_{n} v_{n}]\\).\u003c/p\u003e\n\u003cp\u003eLastly, applying \\(S^{-1}\\) again would send each vector to each of the standard basis encoded in the original space given homogeneity of the \\(\\lambda\\); leaving the vector of \\(\\lambda_{j}\\) scaled by the identity: creating a diagonal \\(D\\) matrix. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_similar_to_diagonal/","tags":null,"title":"NUS-MATH530 Similar to Diagonal"},{"categories":null,"contents":"Two Variables Let\u0026rsquo;s begin with the equations:\n\\begin{equation} \\begin{cases} 2x+y = 3 \\\\ x - y = 0 \\end{cases} \\end{equation}\nWe will first change this into a matrix equation:\n\\begin{equation} \\begin{pmatrix} 2 \u0026amp; 1 \\\\ 1 \u0026amp; -1 \\end{pmatrix} \\begin{pmatrix} x \\\\ y \\end{pmatrix} = \\begin{pmatrix} 3 \\\\ 0 \\end{pmatrix} \\end{equation}\nWe need to find, then, the inverse of:\n\\begin{equation} \\begin{pmatrix} 2 \u0026amp; 1 \\\\ 1 \u0026amp; -1 \\end{pmatrix} \\end{equation}\nNamely, we need the matrix such that:\n\\begin{equation} M \\begin{pmatrix} 2 \u0026amp; 1 \\\\ 1 \u0026amp; -1 \\end{pmatrix} = I \\end{equation}\nTo do this, we can use row operations on both sides such that the left side becomes the identity, we are essentially inverting the process of reversing a matrix.\n\\begin{equation} \\begin{pmatrix} 2 \u0026amp; 1 \\\\ 1 \u0026amp; -1 \\end{pmatrix} = \\begin{pmatrix} 1 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \\end{pmatrix} \\end{equation}\nLet\u0026rsquo;s begin:\n\\begin{align} \u0026amp; \\begin{pmatrix} 2 \u0026amp; 1 \\\\ 1 \u0026amp; -1 \\end{pmatrix} = \\begin{pmatrix} 1 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \\end{pmatrix} \\\\ \\Rightarrow\\ \u0026amp; \\begin{pmatrix} 0 \u0026amp; 3 \\\\ 1 \u0026amp; -1 \\end{pmatrix} = \\begin{pmatrix} 1 \u0026amp; -2 \\\\ 0 \u0026amp; 1 \\end{pmatrix} \\\\ \\Rightarrow\\ \u0026amp; \\begin{pmatrix} 0 \u0026amp; 1 \\\\ 1 \u0026amp; -1 \\end{pmatrix} = \\begin{pmatrix} \\frac{1}{3} \u0026amp; -\\frac{2}{3} \\\\ 0 \u0026amp; 1 \\end{pmatrix} \\\\ \\Rightarrow\\ \u0026amp; \\begin{pmatrix} 0 \u0026amp; 1 \\\\ 1 \u0026amp; 0 \\end{pmatrix} = \\begin{pmatrix} \\frac{1}{3} \u0026amp; -\\frac{2}{3} \\\\ \\frac{1}{3} \u0026amp; \\frac{1}{3} \\end{pmatrix} \\\\ \\Rightarrow\\ \u0026amp; \\begin{pmatrix} 1 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \\end{pmatrix} = \\begin{pmatrix} \\frac{1}{3} \u0026amp; \\frac{1}{3} \\\\ \\frac{1}{3} \u0026amp; -\\frac{2}{3} \\end{pmatrix} \\end{align}\nFinally, then, we will applying this matrix to the input:\n\\begin{align} \\begin{pmatrix} \\frac{1}{3} \u0026amp; \\frac{1}{3} \\\\ \\frac{1}{3} \u0026amp; -\\frac{2}{3} \\end{pmatrix} \\begin{pmatrix} 3 \\\\ 0 \\end{pmatrix} = \\begin{pmatrix} 1 \\\\ 1 \\end{pmatrix} \\end{align}\nThree Variables We do this again, but now with a much larger matrix. Namely:\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 2 \u0026amp; 1 \\\\ 2 \u0026amp; 0 \u0026amp; -1 \\\\ 1 \u0026amp; -1 \u0026amp; 0 \\end{pmatrix} \\end{equation}\nI spend a good two hours (yes) trying to invert this. At this point, I know its invertable but I keep making mistakes. However, a solution exists and it is of shape:\n\\begin{equation} \\begin{pmatrix} \\frac{1}{5} \u0026amp; \\frac{1}{5} \u0026amp; \\frac{2}{5} \\\\ \\frac{1}{5} \u0026amp; \\frac{1}{5} \u0026amp; -\\frac{3}{5} \\\\ \\frac{2}{5} \u0026amp; -\\frac{3}{5} \u0026amp; \\frac{4}{5} \\end{pmatrix} \\end{equation}\nAnd, applying the output, we have that:\n\\begin{equation} \\begin{pmatrix} 1 \\\\ -1 \\\\ 1 \\end{pmatrix} \\end{equation}\nSo complicated of an inverse, for such a simple result\u0026hellip;\nMatrix Multiplication Matrix multiplication is not commutative. While you can, for instance, multiply a \\(2\\times 3\\) by a \\(3\\times 3\\), we cannot do it the other way.\nFor an equation with three variables, you need three equations at a minimum to have at least one solution; you can get at most the number of equations number of solutions with fewer equations. You probably will have no solutions if you have more equations\u0026mdash;the result is likely to be overdetermined; of course, two equations may be the same relation then in which case one is effectively nulled.\n","html":"\u003ch2 id=\"two-variables\"\u003eTwo Variables\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s begin with the equations:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n2x+y = 3 \\\\\nx - y = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will first change this into a matrix equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n2 \u0026amp; 1 \\\\\n1 \u0026amp; -1\n\\end{pmatrix} \\begin{pmatrix}\nx \\\\ y\n\\end{pmatrix} = \\begin{pmatrix}\n3 \\\\ 0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe need to find, then, the inverse of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n2 \u0026amp; 1 \\\\ 1 \u0026amp; -1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNamely, we need the matrix such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nM \\begin{pmatrix}\n2 \u0026amp; 1 \\\\ 1 \u0026amp; -1\n\\end{pmatrix} = I\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo do this, we can use row operations on both sides such that the left side becomes the identity, we are essentially inverting the process of reversing a matrix.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n2 \u0026amp; 1 \\\\ 1 \u0026amp; -1\n\\end{pmatrix} = \\begin{pmatrix}\n1 \u0026amp; 0 \\\\ 0 \u0026amp; 1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s begin:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; \\begin{pmatrix}\n2 \u0026amp; 1 \\\\ 1 \u0026amp; -1\n\\end{pmatrix} = \\begin{pmatrix}\n1 \u0026amp; 0 \\\\ 0 \u0026amp; 1\n\\end{pmatrix} \\\\\n\\Rightarrow\\ \u0026amp; \\begin{pmatrix}\n0 \u0026amp; 3 \\\\ 1 \u0026amp; -1\n\\end{pmatrix} = \\begin{pmatrix}\n1 \u0026amp; -2 \\\\ 0 \u0026amp; 1\n\\end{pmatrix} \\\\\n\\Rightarrow\\ \u0026amp; \\begin{pmatrix}\n0 \u0026amp; 1 \\\\ 1 \u0026amp; -1\n\\end{pmatrix} = \\begin{pmatrix}\n\\frac{1}{3} \u0026amp; -\\frac{2}{3} \\\\ 0 \u0026amp; 1\n\\end{pmatrix} \\\\\n\\Rightarrow\\ \u0026amp; \\begin{pmatrix}\n0 \u0026amp; 1 \\\\ 1 \u0026amp; 0\n\\end{pmatrix} = \\begin{pmatrix}\n\\frac{1}{3} \u0026amp; -\\frac{2}{3} \\\\ \\frac{1}{3} \u0026amp; \\frac{1}{3}\n\\end{pmatrix} \\\\\n\\Rightarrow\\ \u0026amp; \\begin{pmatrix}\n1 \u0026amp; 0 \\\\ 0 \u0026amp; 1\n\\end{pmatrix} = \\begin{pmatrix}\n\\frac{1}{3} \u0026amp; \\frac{1}{3} \\\\ \\frac{1}{3} \u0026amp; -\\frac{2}{3}\n\\end{pmatrix}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eFinally, then, we will applying this matrix to the input:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\begin{pmatrix}\n\\frac{1}{3} \u0026amp; \\frac{1}{3} \\\\ \\frac{1}{3} \u0026amp; -\\frac{2}{3}\n\\end{pmatrix} \\begin{pmatrix}\n3 \\\\ 0\n\\end{pmatrix} = \\begin{pmatrix}\n1 \\\\ 1\n\\end{pmatrix}\n\\end{align}\u003c/p\u003e\n\u003ch2 id=\"three-variables\"\u003eThree Variables\u003c/h2\u003e\n\u003cp\u003eWe do this again, but now with a much larger matrix. Namely:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 2 \u0026amp; 1 \\\\\n2 \u0026amp; 0 \u0026amp; -1 \\\\\n1 \u0026amp; -1 \u0026amp; 0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eI spend a good two hours (yes) trying to invert this. At this point, I know its invertable but I keep making mistakes. However, a solution exists and it is of shape:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n\\frac{1}{5} \u0026amp; \\frac{1}{5} \u0026amp; \\frac{2}{5} \\\\\n\\frac{1}{5} \u0026amp; \\frac{1}{5} \u0026amp; -\\frac{3}{5} \\\\\n\\frac{2}{5} \u0026amp; -\\frac{3}{5} \u0026amp; \\frac{4}{5}\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd, applying the output, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \\\\\n-1 \\\\\n1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo complicated of an inverse, for such a simple result\u0026hellip;\u003c/p\u003e\n\u003ch2 id=\"matrix-multiplication\"\u003eMatrix Multiplication\u003c/h2\u003e\n\u003cp\u003eMatrix multiplication is not commutative. While you can, for instance, multiply a \\(2\\times 3\\) by a \\(3\\times 3\\), we cannot do it the other way.\u003c/p\u003e\n\u003cp\u003eFor an equation with three variables, you need three equations at a minimum to have at least one solution; you can get at most the number of equations number of solutions with fewer equations. You probably will have no solutions if you have more equations\u0026mdash;the result is likely to be overdetermined; of course, two equations may be the same relation then in which case one is effectively nulled.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_solving_systems/","tags":null,"title":"NUS-MATH530 Solving Systems"},{"categories":null,"contents":"Suppose \\(\\mathbb{F} = \\mathbb{R}\\), and \\(V \\neq \\{0\\}\\). Replace the positivity condition with the condition that \\(\\langle v,v \\rangle \u0026gt; 0\\) for some \\(v \\in V\\). Show that this change in definition does not change the set of functions from \\(V \\times V\\) to \\(\\mathbb{R}\\) that are inner products on \\(V\\).\nWe hope to show that \\(\\langle v,v \\rangle \u0026gt;0\\) for some \\(v \\in V\\) implies that \\(\\langle v,v \\rangle \\geq 0\\) for all \\(v \\in V\\) in real vector spaces.\nTake some \\(v_0 \\in V\\) such that \\(\\langle v_0,v_0 \\rangle \u0026gt;0\\). Now, WLOG let \\(v \\in V\\) and \\(v = v_0+w\\). So:\n\\begin{align} 0 \u0026amp;\u0026lt; \\langle v_0,v_0 \\rangle \\\\ \u0026amp;= \\langle v-w, v-w \\rangle \\\\ \u0026amp;= \\langle v,v \\rangle + \\langle w,w \\rangle - 2\\langle v,w \\rangle \\end{align}\nNow, the last step is possible because symmetry becomes conjugate symmetry in reals.\nWe now have that:\n\\begin{equation} 2 \\langle v,w \\rangle - \\langle w,w \\rangle \u0026lt; \\langle v,v \\rangle \\end{equation}\n","html":"\u003cp\u003eSuppose \\(\\mathbb{F} = \\mathbb{R}\\), and \\(V \\neq \\{0\\}\\). Replace the positivity condition with the condition that \\(\\langle v,v \\rangle \u0026gt; 0\\) for some \\(v \\in V\\). Show that this change in definition does not change the set of functions from \\(V \\times V\\) to \\(\\mathbb{R}\\) that are inner products on \\(V\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eWe hope to show that \\(\\langle v,v \\rangle \u0026gt;0\\) for some \\(v \\in V\\) implies that \\(\\langle v,v \\rangle \\geq 0\\) for all \\(v \\in V\\) in real vector spaces.\u003c/p\u003e\n\u003cp\u003eTake some \\(v_0 \\in V\\) such that \\(\\langle v_0,v_0 \\rangle \u0026gt;0\\). Now, WLOG let \\(v \\in V\\) and \\(v = v_0+w\\). So:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n0 \u0026amp;\u0026lt; \\langle v_0,v_0 \\rangle \\\\\n\u0026amp;= \\langle v-w, v-w \\rangle \\\\\n\u0026amp;= \\langle v,v \\rangle + \\langle w,w \\rangle - 2\\langle v,w \\rangle\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, the last step is possible because symmetry becomes conjugate symmetry in reals.\u003c/p\u003e\n\u003cp\u003eWe now have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n2 \\langle v,w \\rangle - \\langle w,w \\rangle \u0026lt; \\langle v,v \\rangle\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_some_6_a_problems/","tags":null,"title":"NUS-MATH530 Some 6.A Problems"},{"categories":null,"contents":"Proof: identity of a group is unique Assume for contradiction that there exists two identities \\(e_1\\) and \\(e_2\\) which are identities of the group \\(A\\). Take also an \\(a \\in A\\).\nGiven both \\(e_1\\) and \\(e_2\\) are identities, we have that:\n\\begin{equation} a * e_1 = a \\end{equation}\nas well as\n\\begin{equation} a * e_2 = a \\end{equation}\nTherefore, we have by the transitive property that:\n\\begin{equation} a * e_1 = a*e_2 \\end{equation}\nBecause we are in a group, there exists a \\(1/a\\) the inverse of \\(a\\). Applying this inverse to the expression, we have that:\n\\begin{equation} 1/a*a * e_1 = 1/a*a*e_2 \\end{equation}\nTherefore, that:\n\\begin{equation} e_1 = e_2\\ \\blacksquare \\end{equation}\nTherefore, there cannot be two unique identities in a group.\nProof: inverse of an element in a group is unique Take group \\(A\\) and element \\(a\\in A\\), assume for contradiction that there exists two inverses of \\(a\\) named here \\(a\u0026rsquo;_1\\) and \\(a\u0026rsquo;_2\\). Given they are both inverses for \\(a\\), we have that:\n\\begin{equation} a * a\u0026rsquo;_1 = 1 \\end{equation}\nas well as\n\\begin{equation} a * a\u0026rsquo;_2 = 1 \\end{equation}\nTherefore, we have by the transitive property that:\n\\begin{equation} a * a\u0026rsquo;_1 = a*a\u0026rsquo;_2 \\end{equation}\nBecause we are in a group, there exists a \\(1/a\\) the inverse of \\(a\\). Applying this inverse to the expression, we have that:\n\\begin{equation} 1/a*a * a\u0026rsquo;_1 = 1/a*a*a\u0026rsquo;_2 \\end{equation}\nTherefore, that:\n\\begin{equation} a\u0026rsquo;_1 = a\u0026rsquo;_2\\ \\blacksquare \\end{equation}\nTherefore, there cannot be two unique inverses for an element in group.\nProof: additive identity in field cannot have multiplicative inverse For some field \\(F\\) take its additive identity \\(0 \\in F\\). Assume for the sake of contradiction there exists a multiplicative inverse for \\(0\\) named \\(0\u0026rsquo; \\in F\\).\nLet\u0026rsquo;s take some \\(a \\in F\\). By definition of the additive identity, we have:\n\\begin{equation} 0 + a = a \\end{equation}\nWe will apply \\(0\u0026rsquo;\\) to both sides, we having that:\n\\begin{equation} 0\u0026rsquo;(0+a) = 0\u0026rsquo;a \\end{equation}\nDistributing \\(0\u0026rsquo;\\) to both sides, we have:\n\\begin{equation} 1 + 0\u0026rsquo;a = 0\u0026rsquo;a \\end{equation}\nGiven \\(a,0\u0026rsquo; \\in F\\), and multiplication is closed in \\(F\\) being a field, \\(0\u0026rsquo;a \\in F\\); applying \\(-0\u0026rsquo;a \\in F\\) the additive inverse of the result of multiplying together to both sides, we have that:\n\\begin{equation} 1 + 0\u0026rsquo;a - 0\u0026rsquo;a = 0\u0026rsquo;a - 0\u0026rsquo;a \\end{equation}\nAnd therefore:\n\\begin{equation} 1 = 0 \\end{equation}\nwhich is absurd, reaching the desired contradiction. \\(\\blacksquare\\)\nSystem \\begin{equation} \\begin{cases} x + 2y + z = 0 \\\\ 2x + 0y - z = 1 \\\\ x - y + z = 2 \\\\ \\end{cases} \\end{equation}\nWe will subtract the top and bottom expressions to have that:\n\\begin{equation} 3y = -2 \\end{equation}\nAnd to get:\n\\begin{equation} y = \\frac{-2}{3} \\end{equation}\nManipulating the second expression, we have that:\n\\begin{equation} 2x -1 = z \\end{equation}\nSubstituting this expression and \\(y\\) into the third expression, we have:\n\\begin{equation} x + \\frac{2}{3} + 2x -1 = 2 \\end{equation}\nperforming algebraic manipulations:\n\\begin{align} \u0026amp;3x + \\frac{2}{3} = 3 \\\\ \\Rightarrow\\ \u0026amp;3x = \\frac{7}{3} \\\\ \\Rightarrow\\ \u0026amp;x = \\frac{7}{9} \\end{align}\nAnd finally:\n\\begin{equation} \\frac{14}{9}-1 = z = \\frac{5}{9} \\end{equation}\nMultiply \\begin{equation} \\begin{pmatrix} 1 \u0026amp; 2 \u0026amp; 1 \\\\ 2 \u0026amp; 0 \u0026amp; -1 \\\\ 1 \u0026amp; -1 \u0026amp; 0\\end{pmatrix} \\begin{pmatrix} x \\\\ y\\\\ z \\end{pmatrix} = \\begin{pmatrix} x+2y+z \\\\ 2x-z \\\\ x-y \\end{pmatrix} \\end{equation}\nThe inner dimensions (column vs. row) of the matricies have to be the same for them to be multiplied; matrix multiplication is not commutative.\nProof: 2x2 Matrices with Real Entries form a Group Under Addition Closure \\begin{equation} \\begin{pmatrix} a \u0026amp; b \\\\ c \u0026amp;d \\end{pmatrix} + \\begin{pmatrix} e \u0026amp; f \\\\ g \u0026amp; h \\end{pmatrix} = \\begin{pmatrix} a+e \u0026amp; b+f \\\\ c+g \u0026amp; d+h \\end{pmatrix} \\end{equation}\nIdentity \\begin{equation} \\begin{pmatrix} a \u0026amp; b \\\\ c \u0026amp;d \\end{pmatrix} + \\begin{pmatrix} 0 \u0026amp; 0 \\\\ 0 \u0026amp; 0 \\end{pmatrix} = \\begin{pmatrix} a \u0026amp; b \\\\ c \u0026amp; d \\end{pmatrix} \\end{equation}\nInverse \\begin{equation} \\begin{pmatrix} a \u0026amp; b \\\\ c \u0026amp;d \\end{pmatrix} + \\begin{pmatrix} -a \u0026amp; -b \\\\ -c \u0026amp; -d \\end{pmatrix} = \\begin{pmatrix} 0 \u0026amp; 0 \\\\ 0 \u0026amp; 0 \\end{pmatrix} \\end{equation}\nAssociative \\begin{equation} \\left ( \\begin{pmatrix} x_1 \u0026amp; x_2 \\\\ x_3 \u0026amp; x_4 \\end{pmatrix} + \\begin{pmatrix} y_1 \u0026amp; y_2 \\\\ y_3 \u0026amp; y_4 \\end{pmatrix} \\right) + \\begin{pmatrix} z_1 \u0026amp; z_2 \\\\ z_3 \u0026amp; z_4 \\end{pmatrix} = \\begin{pmatrix} (x_1+y_1)+z_1 \u0026amp; (x_2+y_2)+z_2 \\\\ (x_3+y_3)+z_3 \u0026amp; (x_4+y_4)+z_4 \\end{pmatrix} \\end{equation}\nwhich is equal, by associativity in \\(\\mathbb{F}\\), as:\n\\begin{equation} \\begin{pmatrix} x_1+(y_1+z_1) \u0026amp; x_2+(y_2+z_2) \\\\ x_3+(y_3+z_3) \u0026amp; x_4+(y_4+z_4) \\end{pmatrix} \\end{equation}\nAnd finally, this is equal to:\n\\begin{equation} \\begin{pmatrix} x_1 \u0026amp; x_2 \\\\ x_3 \u0026amp; x_4 \\end{pmatrix} + \\left (\\begin{pmatrix} y_1 \u0026amp; y_2 \\\\ y_3 \u0026amp; y_4 \\end{pmatrix} + \\begin{pmatrix} z_1 \u0026amp; z_2 \\\\ z_3 \u0026amp; z_4 \\end{pmatrix} \\right) \\end{equation}\nWe have therefore shown that 2x2 matricies form a group under addition.\nProof: 2x2 Matrices with Real Entries does not from a Group Under Multiplication Inverse The matrix\n\\begin{equation} \\begin{pmatrix} 0 \u0026amp; 0 \\\\ 0 \u0026amp;1 \\end{pmatrix} \\end{equation}\nis not invertable. In that, one cannot apply a matrix to this one to result in the multiplicative identity \\(I_2\\).\n","html":"\u003ch2 id=\"proof-identity-of-a-group-is-unique\"\u003eProof: identity of a group is unique\u003c/h2\u003e\n\u003cp\u003eAssume for contradiction that there exists two identities \\(e_1\\) and \\(e_2\\) which are identities of the group \\(A\\). Take also an \\(a \\in A\\).\u003c/p\u003e\n\u003cp\u003eGiven both \\(e_1\\) and \\(e_2\\) are identities, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na * e_1 = a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas well as\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na * e_2 = a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, we have by the transitive property that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na * e_1 = a*e_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBecause we are in a group, there exists a \\(1/a\\) the inverse of \\(a\\). Applying this inverse to the expression, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n1/a*a * e_1 = 1/a*a*e_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne_1 = e_2\\ \\blacksquare\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, there cannot be two unique identities in a group.\u003c/p\u003e\n\u003ch2 id=\"proof-inverse-of-an-element-in-a-group-is-unique\"\u003eProof: inverse of an element in a group is unique\u003c/h2\u003e\n\u003cp\u003eTake group \\(A\\) and element \\(a\\in A\\), assume for contradiction that there exists two inverses of \\(a\\) named here \\(a\u0026rsquo;_1\\) and \\(a\u0026rsquo;_2\\). Given they are both inverses for \\(a\\), we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na * a\u0026rsquo;_1 = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas well as\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na * a\u0026rsquo;_2 = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, we have by the transitive property that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na * a\u0026rsquo;_1 = a*a\u0026rsquo;_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBecause we are in a group, there exists a \\(1/a\\) the inverse of \\(a\\). Applying this inverse to the expression, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n1/a*a * a\u0026rsquo;_1 = 1/a*a*a\u0026rsquo;_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na\u0026rsquo;_1 = a\u0026rsquo;_2\\ \\blacksquare\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, there cannot be two unique inverses for an element in group.\u003c/p\u003e\n\u003ch2 id=\"proof-additive-identity-in-field-cannot-have-multiplicative-inverse\"\u003eProof: additive identity in field cannot have multiplicative inverse\u003c/h2\u003e\n\u003cp\u003eFor some field \\(F\\) take its additive identity \\(0 \\in F\\). Assume for the sake of contradiction there exists a multiplicative inverse for \\(0\\) named \\(0\u0026rsquo; \\in F\\).\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s take some \\(a \\in F\\). By definition of the additive identity, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 + a = a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will apply \\(0\u0026rsquo;\\) to both sides, we having that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0\u0026rsquo;(0+a) = 0\u0026rsquo;a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eDistributing \\(0\u0026rsquo;\\) to both sides, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n1 + 0\u0026rsquo;a = 0\u0026rsquo;a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven \\(a,0\u0026rsquo; \\in F\\), and multiplication is closed in \\(F\\) being a field, \\(0\u0026rsquo;a \\in F\\); applying \\(-0\u0026rsquo;a \\in F\\) the additive inverse of the result of multiplying together to both sides, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n1 + 0\u0026rsquo;a - 0\u0026rsquo;a = 0\u0026rsquo;a - 0\u0026rsquo;a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n1 = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is absurd, reaching the desired contradiction. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch2 id=\"system\"\u003eSystem\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx + 2y + z = 0 \\\\\n2x + 0y - z = 1 \\\\\nx - y + z = 2 \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will subtract the top and bottom expressions to have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n3y = -2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd to get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = \\frac{-2}{3}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eManipulating the second expression, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n2x -1 = z\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSubstituting this expression and \\(y\\) into the third expression, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx + \\frac{2}{3} + 2x -1 = 2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eperforming algebraic manipulations:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;3x + \\frac{2}{3} = 3 \\\\\n\\Rightarrow\\ \u0026amp;3x = \\frac{7}{3} \\\\\n\\Rightarrow\\ \u0026amp;x = \\frac{7}{9}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAnd finally:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{14}{9}-1 = z = \\frac{5}{9}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"multiply\"\u003eMultiply\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 2 \u0026amp; 1 \\\\\n2 \u0026amp; 0 \u0026amp; -1 \\\\\n1 \u0026amp; -1 \u0026amp; 0\\end{pmatrix} \\begin{pmatrix} x \\\\\ny\\\\\nz \\end{pmatrix} = \\begin{pmatrix}\nx+2y+z \\\\\n2x-z \\\\\nx-y\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe inner dimensions (column vs. row) of the matricies have to be the same for them to be multiplied; matrix multiplication is not commutative.\u003c/p\u003e\n\u003ch2 id=\"proof-2x2-matrices-with-real-entries-form-a-group-under-addition\"\u003eProof: 2x2 Matrices with Real Entries form a Group Under Addition\u003c/h2\u003e\n\u003ch3 id=\"closure\"\u003eClosure\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\na \u0026amp; b \\\\\nc \u0026amp;d\n\\end{pmatrix} + \\begin{pmatrix}\ne \u0026amp; f \\\\\ng \u0026amp; h\n\\end{pmatrix} = \\begin{pmatrix}\na+e \u0026amp; b+f \\\\\nc+g \u0026amp; d+h\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"identity\"\u003eIdentity\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\na \u0026amp; b \\\\\nc \u0026amp;d\n\\end{pmatrix} + \\begin{pmatrix}\n0 \u0026amp; 0 \\\\\n0 \u0026amp; 0\n\\end{pmatrix} = \\begin{pmatrix}\na \u0026amp; b \\\\\nc \u0026amp; d\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"inverse\"\u003eInverse\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\na \u0026amp; b \\\\\nc \u0026amp;d\n\\end{pmatrix} + \\begin{pmatrix}\n-a \u0026amp; -b \\\\\n-c \u0026amp; -d\n\\end{pmatrix} = \\begin{pmatrix}\n0 \u0026amp; 0 \\\\\n0 \u0026amp; 0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"associative\"\u003eAssociative\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\left (\n\\begin{pmatrix}\nx_1 \u0026amp; x_2 \\\\\nx_3 \u0026amp; x_4\n\\end{pmatrix} + \\begin{pmatrix}\ny_1 \u0026amp; y_2 \\\\\ny_3 \u0026amp; y_4\n\\end{pmatrix} \\right) + \\begin{pmatrix}\nz_1 \u0026amp; z_2 \\\\\nz_3 \u0026amp; z_4\n\\end{pmatrix} = \\begin{pmatrix}\n(x_1+y_1)+z_1 \u0026amp; (x_2+y_2)+z_2 \\\\\n(x_3+y_3)+z_3 \u0026amp; (x_4+y_4)+z_4\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is equal, by associativity in \\(\\mathbb{F}\\), as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\nx_1+(y_1+z_1) \u0026amp; x_2+(y_2+z_2) \\\\\nx_3+(y_3+z_3) \u0026amp; x_4+(y_4+z_4)\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd finally, this is equal to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\nx_1 \u0026amp; x_2 \\\\\nx_3 \u0026amp; x_4\n\\end{pmatrix} + \\left (\\begin{pmatrix}\ny_1 \u0026amp; y_2 \\\\\ny_3 \u0026amp; y_4\n\\end{pmatrix} + \\begin{pmatrix}\nz_1 \u0026amp; z_2 \\\\\nz_3 \u0026amp; z_4\n\\end{pmatrix} \\right)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have therefore shown that 2x2 matricies form a group under addition.\u003c/p\u003e\n\u003ch2 id=\"proof-2x2-matrices-with-real-entries-does-not-from-a-group-under-multiplication\"\u003eProof: 2x2 Matrices with Real Entries does \u003cem\u003enot\u003c/em\u003e from a Group Under Multiplication\u003c/h2\u003e\n\u003ch3 id=\"inverse\"\u003eInverse\u003c/h3\u003e\n\u003cp\u003eThe matrix\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n0 \u0026amp; 0 \\\\\n0 \u0026amp;1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis not invertable. In that, one cannot apply a matrix to this one to result in the multiplicative identity \\(I_2\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_some_matrix_manipulation/","tags":null,"title":"NUS-MATH530 Some Matrix Manipulation"},{"categories":null,"contents":"We declare known battery voltage \\(E(t)\\).\nHere are the \\(y\\) values.\n\\begin{equation} \\begin{cases} \\dv{x_1}{t} = y_{4}\\\\ \\dv{x_2}{t} = y_{3}\\\\ \\dv{x_3}{t} = y_{1}\\\\ \\dv{x_4}{t} = y_{2}\\\\ \\end{cases} \\end{equation}\nAnd here are some of the \\(x\\) values.\n\\begin{equation} \\begin{cases} \\dv{x_4}{t}=-\\frac{2}{RC}x_2-\\frac{1}{RC}x_{3}-\\frac{2E(t)}{R} \\\\ \\dv{y_1}{t}=-\\frac{1}{LC}x_2-\\frac{E(t)}{C} \\\\ \\dv{y_4}{t} = -\\frac{R}{L}y_2-\\frac{2E(t)}{L} \\end{cases} \\end{equation}\nRight off the bat, we can see that we can make one substitution. That, given:\n\\begin{equation} \\begin{cases} \\dv{x_4}{t}=-\\frac{2}{RC}x_2-\\frac{1}{RC}x_{3}-\\frac{2E(t)}{R} \\\\ \\dv{x_4}{t} = y_{2} \\end{cases} \\end{equation}\nwe have that:\n\\begin{equation} y_2 = -\\frac{2}{RC}x_2-\\frac{1}{RC}x_{3}-\\frac{2E(t)}{R} \\end{equation}\nThis renders the last expression:\n\\begin{align} \\dv{y_4}{t} \u0026amp;= -\\frac{R}{L}y_2-\\frac{2E(t)}{L} \\\\ \u0026amp;= -\\frac{R}{L}\\qty(-\\frac{2}{RC}x_2-\\frac{1}{RC}x_{3}-\\frac{2E(t)}{R})-\\frac{2E(t)}{L} \\\\ \u0026amp;= \\qty(\\frac{2}{LC}x_2+\\frac{1}{LC}x_{3}+\\frac{2E(t)}{L})-\\frac{2E(t)}{L} \\\\ \u0026amp;= \\frac{2}{LC}x_2+\\frac{1}{LC}x_{3} \\end{align}\nSo now, we have the final unused expressions:\n\\begin{equation} \\begin{cases} \\dv{x_1}{t} = y_4 \\\\ \\dv{x_2}{t} = y_3 \\\\ \\dv{x_{3}}{t} = y_1 \\\\ \\dv{y_1}{t} = -\\frac{1}{LC}x_2-\\frac{E(t)}{C} \\\\ \\dv{y_4}{t} = \\frac{2}{LC}x_2+\\frac{1}{LC}x_3 \\end{cases} \\end{equation}\n","html":"\u003cp\u003eWe declare known battery voltage \\(E(t)\\).\u003c/p\u003e\n\u003cp\u003eHere are the \\(y\\) values.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x_1}{t} = y_{4}\\\\\n\\dv{x_2}{t} = y_{3}\\\\\n\\dv{x_3}{t} = y_{1}\\\\\n\\dv{x_4}{t} = y_{2}\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd here are some of the \\(x\\) values.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x_4}{t}=-\\frac{2}{RC}x_2-\\frac{1}{RC}x_{3}-\\frac{2E(t)}{R} \\\\\n\\dv{y_1}{t}=-\\frac{1}{LC}x_2-\\frac{E(t)}{C} \\\\\n\\dv{y_4}{t} = -\\frac{R}{L}y_2-\\frac{2E(t)}{L}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRight off the bat, we can see that we can make one substitution. That, given:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x_4}{t}=-\\frac{2}{RC}x_2-\\frac{1}{RC}x_{3}-\\frac{2E(t)}{R} \\\\\n\\dv{x_4}{t} = y_{2}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny_2 = -\\frac{2}{RC}x_2-\\frac{1}{RC}x_{3}-\\frac{2E(t)}{R}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis renders the last expression:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dv{y_4}{t} \u0026amp;= -\\frac{R}{L}y_2-\\frac{2E(t)}{L} \\\\\n\u0026amp;= -\\frac{R}{L}\\qty(-\\frac{2}{RC}x_2-\\frac{1}{RC}x_{3}-\\frac{2E(t)}{R})-\\frac{2E(t)}{L} \\\\\n\u0026amp;= \\qty(\\frac{2}{LC}x_2+\\frac{1}{LC}x_{3}+\\frac{2E(t)}{L})-\\frac{2E(t)}{L} \\\\\n\u0026amp;= \\frac{2}{LC}x_2+\\frac{1}{LC}x_{3}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eSo now, we have the final unused expressions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x_1}{t} = y_4 \\\\\n\\dv{x_2}{t} = y_3 \\\\\n\\dv{x_{3}}{t} = y_1 \\\\\n\\dv{y_1}{t} = -\\frac{1}{LC}x_2-\\frac{E(t)}{C} \\\\\n\\dv{y_4}{t} = \\frac{2}{LC}x_2+\\frac{1}{LC}x_3\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math570_circuts/","tags":null,"title":"NUS-MATH570 Circuits"},{"categories":null,"contents":"We need to solve this system:\n\\begin{equation} \\begin{cases} \\dv{I}{t} = -0.73U + 0.0438 + 0.4 \\dv{M}{t} \\\\ \\dv{U}{t} = 0.4I - 0.012 \\\\ \\dv{G}{t} = \\dv{M}{t}- I \\\\ M(t)=0.02\\sin (1.15t + \\phi) \\end{cases} \\end{equation}\nTo be able to work on this, let us create some functions:\n# variable t, dm = var(\u0026#34;t dm\u0026#34;) # functions I = function(\u0026#34;_I\u0026#34;)(t) # _I because i is imaginary U = function(\u0026#34;U\u0026#34;)(t) G = function(\u0026#34;G\u0026#34;)(t) # parameter phi = var(\u0026#34;phi\u0026#34;, latex_name=\u0026#34;\\phi\u0026#34;) # our equations eqns = [ diff(I,t) == -0.73*U + 0.0438 + 0.4*dm, diff(U,t) == 0.4*I - 0.012, diff(G,t) == dm - I ] eqns desolve(eqns, U, ivar=t, algorithm=\u0026#34;fricas\u0026#34;).expand() Great, now, we will run the laplace transform upon these equations:\n# laplace variable s = var(\u0026#34;s\u0026#34;) # laplaced functions Fi = var(\u0026#34;Fi\u0026#34;) Fu = var(\u0026#34;Fu\u0026#34;) Fg = var(\u0026#34;Fg\u0026#34;) Fm = var(\u0026#34;Fm\u0026#34;) # constants I0, U0, G0, M0 = var(\u0026#34;I0 U0 G0 M0\u0026#34;) # substitution dictionary subs = { laplace(I,t,s): Fi, laplace(U,t,s): Fu, laplace(G,t,s): Fg, laplace(M,t,s): Fm, I(0): I0, G(0): G0, U(0): U0, M(0): M0, } # laplace eqns laplace_eqns = [i.laplace(t, s).subs(subs) for i in eqns] laplace_eqns \u0026lt;ipython-input-236-2a2ddfe91635\u0026gt;:20: DeprecationWarning: Substitution using function-call syntax and unnamed arguments is deprecated and will be removed from a future release of Sage; you can use named arguments instead, like EXPR(x=..., y=...) See http://trac.sagemath.org/5930 for details. I(Integer(0)): I0, \u0026lt;ipython-input-236-2a2ddfe91635\u0026gt;:21: DeprecationWarning: Substitution using function-call syntax and unnamed arguments is deprecated and will be removed from a future release of Sage; you can use named arguments instead, like EXPR(x=..., y=...) See http://trac.sagemath.org/5930 for details. G(Integer(0)): G0, \u0026lt;ipython-input-236-2a2ddfe91635\u0026gt;:22: DeprecationWarning: Substitution using function-call syntax and unnamed arguments is deprecated and will be removed from a future release of Sage; you can use named arguments instead, like EXPR(x=..., y=...) See http://trac.sagemath.org/5930 for details. U(Integer(0)): U0, \u0026lt;ipython-input-236-2a2ddfe91635\u0026gt;:23: DeprecationWarning: Substitution using function-call syntax and unnamed arguments is deprecated and will be removed from a future release of Sage; you can use named arguments instead, like EXPR(x=..., y=...) See http://trac.sagemath.org/5930 for details. M(Integer(0)): M0, [Fi*s - I0 == 0.4*Fm*s - 0.73*Fu - 0.4*M0 + 0.0438/s, Fu*s - U0 == 0.4*Fi - 0.012/s, Fg*s - G0 == Fm*s - Fi - M0, Fm == (0.02*s*sin(phi) + 0.023*cos(phi))/(s^2 + 1.3225)] And then, let us solve the Laplace solutions:\n# substitute laplace_solutions = solve(laplace_eqns, Fi, Fu, Fg, Fm, solution_dict=True)[0] laplace_solutions {Fi: 1/100*(80000*(125*I0 - 50*M0 + sin(phi))*s^4 - 2000*(3650*U0 - 46*cos(phi) - 219)*s^3 + 200*(66125*I0 - 26450*M0 + 438)*s^2 - 193085*(50*U0 - 3)*s + 115851)/(100000*s^5 + 161450*s^3 + 38617*s), Fu: 1/50*(5000000*U0*s^4 + 4000*(500*I0 - 200*M0 + 4*sin(phi) - 15)*s^3 + 100*(66125*U0 + 184*cos(phi) + 876)*s^2 + 26450*(100*I0 - 40*M0 - 3)*s + 115851)/(100000*s^5 + 161450*s^3 + 38617*s), Fg: 1/100*(200000*(50*G0 - 50*M0 + sin(phi))*s^5 - 10000*(1000*I0 - 400*M0 - 23*cos(phi) + 8*sin(phi))*s^4 + 200*(80725*G0 - 80725*M0 + 36500*U0 - 460*cos(phi) + 292*sin(phi) - 2190)*s^3 - 40*(330625*I0 - 132250*M0 - 1679*cos(phi) + 2190)*s^2 + 193085*(20*G0 - 20*M0 + 50*U0 - 3)*s - 115851)/(100000*s^6 + 161450*s^4 + 38617*s^2), Fm: 2/5*(20*s*sin(phi) + 23*cos(phi))/(400*s^2 + 529)} Now we inverse Laplace transform:\nI_s(t) = inverse_laplace(laplace_solutions[Fi], s, t) U_s(t) = inverse_laplace(laplace_solutions[Fu], s, t) G_s(t) = inverse_laplace(laplace_solutions[Fg], s, t) M_s(t) = inverse_laplace(laplace_solutions[Fm], s, t) (I_s,U_s,G_s,M_s) (t |--\u0026gt; -1/2061000*sqrt(730)*(103050*U0 + 368*cos(phi) - 6183)*sin(1/50*sqrt(730)*t) + 1/1030500*(1030500*I0 - 412200*M0 - 2336*sin(phi) - 30915)*cos(1/50*sqrt(730)*t) + 529/51525*cos(23/20*t)*sin(phi) + 529/51525*cos(phi)*sin(23/20*t) + 3/100, t |--\u0026gt; 1/37613250*sqrt(730)*(1030500*I0 - 412200*M0 - 2336*sin(phi) - 30915)*sin(1/50*sqrt(730)*t) + 1/103050*(103050*U0 + 368*cos(phi) - 6183)*cos(1/50*sqrt(730)*t) - 184/51525*cos(phi)*cos(23/20*t) + 184/51525*sin(phi)*sin(23/20*t) + 3/50, t |--\u0026gt; -1/15045300*sqrt(730)*(1030500*I0 - 412200*M0 - 2336*sin(phi) - 30915)*sin(1/50*sqrt(730)*t) - 1/41220*(103050*U0 + 368*cos(phi) - 6183)*cos(1/50*sqrt(730)*t) + 1/103050*(920*cos(phi) + 2061*sin(phi))*cos(23/20*t) + 1/103050*(2061*cos(phi) - 920*sin(phi))*sin(23/20*t) + G0 - M0 + 5/2*U0 - 3/100*t - 3/20, t |--\u0026gt; 1/50*cos(23/20*t)*sin(phi) + 1/50*cos(phi)*sin(23/20*t)) Some plots.\nI_specific = I_s.subs(I0=0.024, U0=0.039, M0=0, G0=0, phi=2.35) U_specific = U_s.subs(I0=0.024, U0=0.039, M0=0, G0=0, phi=2.35) G_specific = G_s.subs(I0=0.024, U0=0.039, M0=0, G0=0, phi=2.35) M_specific = M_s.subs(I0=0.024, U0=0.039, M0=0, G0=0, phi=2.35) plot(I_specific, t, 0, 10, color=\u0026#34;blue\u0026#34;) + plot(U_specific, t, 0, 10, color=\u0026#34;orange\u0026#34;) + plot(G_specific, t, 0, 10, color=\u0026#34;green\u0026#34;) + plot(M_specific, t, 0, 10, color=\u0026#34;red\u0026#34;) /Users/houliu/.sage/temp/baboon.jemoka.com/16964/tmp_sei9raar.png ","html":"\u003cp\u003eWe need to solve this system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{I}{t} = -0.73U + 0.0438 + 0.4 \\dv{M}{t} \\\\\n\\dv{U}{t} = 0.4I - 0.012 \\\\\n\\dv{G}{t} = \\dv{M}{t}- I \\\\\nM(t)=0.02\\sin (1.15t + \\phi)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo be able to work on this, let us create some functions:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# variable\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edm\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;t dm\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# functions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efunction\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;_I\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# _I because i is imaginary\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eU\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efunction\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;U\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eG\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efunction\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;G\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# parameter\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;phi\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\phi\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# our equations\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eeqns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.73\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eU\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.0438\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.4\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edm\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eU\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.4\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.012\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eG\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edm\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eeqns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edesolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eeqns\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eU\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eivar\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ealgorithm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;fricas\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexpand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eGreat, now, we will run the laplace transform upon these equations:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# laplace variable\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;s\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# laplaced functions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eFi\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Fi\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eFu\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Fu\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eFg\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Fg\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eFm\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Fm\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# constants\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eI0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eU0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eG0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eM0\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;I0 U0 G0 M0\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# substitution dictionary\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003elaplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eFi\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003elaplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eU\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eFu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003elaplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eG\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eFg\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003elaplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eM\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eFm\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eI0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eG\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eG0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eU\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eU0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eM\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eM0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# laplace eqns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elaplace_eqns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elaplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eeqns\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elaplace_eqns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u0026lt;ipython-input-236-2a2ddfe91635\u0026gt;:20: DeprecationWarning: Substitution using function-call syntax and unnamed arguments is deprecated and will be removed from a future release of Sage; you can use named arguments instead, like EXPR(x=..., y=...)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSee http://trac.sagemath.org/5930 for details.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e I(Integer(0)): I0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u0026lt;ipython-input-236-2a2ddfe91635\u0026gt;:21: DeprecationWarning: Substitution using function-call syntax and unnamed arguments is deprecated and will be removed from a future release of Sage; you can use named arguments instead, like EXPR(x=..., y=...)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSee http://trac.sagemath.org/5930 for details.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e G(Integer(0)): G0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u0026lt;ipython-input-236-2a2ddfe91635\u0026gt;:22: DeprecationWarning: Substitution using function-call syntax and unnamed arguments is deprecated and will be removed from a future release of Sage; you can use named arguments instead, like EXPR(x=..., y=...)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSee http://trac.sagemath.org/5930 for details.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e U(Integer(0)): U0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u0026lt;ipython-input-236-2a2ddfe91635\u0026gt;:23: DeprecationWarning: Substitution using function-call syntax and unnamed arguments is deprecated and will be removed from a future release of Sage; you can use named arguments instead, like EXPR(x=..., y=...)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSee http://trac.sagemath.org/5930 for details.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e M(Integer(0)): M0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[Fi*s - I0 == 0.4*Fm*s - 0.73*Fu - 0.4*M0 + 0.0438/s,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Fu*s - U0 == 0.4*Fi - 0.012/s,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Fg*s - G0 == Fm*s - Fi - M0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Fm == (0.02*s*sin(phi) + 0.023*cos(phi))/(s^2 + 1.3225)]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd then, let us solve the Laplace solutions:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# substitute\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elaplace_solutions\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elaplace_eqns\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eFi\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eFu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eFg\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eFm\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esolution_dict\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elaplace_solutions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e{Fi: 1/100*(80000*(125*I0 - 50*M0 + sin(phi))*s^4 - 2000*(3650*U0 - 46*cos(phi) - 219)*s^3 + 200*(66125*I0 - 26450*M0 + 438)*s^2 - 193085*(50*U0 - 3)*s + 115851)/(100000*s^5 + 161450*s^3 + 38617*s),\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Fu: 1/50*(5000000*U0*s^4 + 4000*(500*I0 - 200*M0 + 4*sin(phi) - 15)*s^3 + 100*(66125*U0 + 184*cos(phi) + 876)*s^2 + 26450*(100*I0 - 40*M0 - 3)*s + 115851)/(100000*s^5 + 161450*s^3 + 38617*s),\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Fg: 1/100*(200000*(50*G0 - 50*M0 + sin(phi))*s^5 - 10000*(1000*I0 - 400*M0 - 23*cos(phi) + 8*sin(phi))*s^4 + 200*(80725*G0 - 80725*M0 + 36500*U0 - 460*cos(phi) + 292*sin(phi) - 2190)*s^3 - 40*(330625*I0 - 132250*M0 - 1679*cos(phi) + 2190)*s^2 + 193085*(20*G0 - 20*M0 + 50*U0 - 3)*s - 115851)/(100000*s^6 + 161450*s^4 + 38617*s^2),\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Fm: 2/5*(20*s*sin(phi) + 23*cos(phi))/(400*s^2 + 529)}\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow we inverse Laplace transform:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eI_s\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einverse_laplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elaplace_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFi\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eU_s\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einverse_laplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elaplace_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eG_s\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einverse_laplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elaplace_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFg\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eM_s\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einverse_laplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elaplace_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFm\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI_s\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eU_s\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eG_s\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eM_s\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(t |--\u0026gt; -1/2061000*sqrt(730)*(103050*U0 + 368*cos(phi) - 6183)*sin(1/50*sqrt(730)*t) + 1/1030500*(1030500*I0 - 412200*M0 - 2336*sin(phi) - 30915)*cos(1/50*sqrt(730)*t) + 529/51525*cos(23/20*t)*sin(phi) + 529/51525*cos(phi)*sin(23/20*t) + 3/100,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e t |--\u0026gt; 1/37613250*sqrt(730)*(1030500*I0 - 412200*M0 - 2336*sin(phi) - 30915)*sin(1/50*sqrt(730)*t) + 1/103050*(103050*U0 + 368*cos(phi) - 6183)*cos(1/50*sqrt(730)*t) - 184/51525*cos(phi)*cos(23/20*t) + 184/51525*sin(phi)*sin(23/20*t) + 3/50,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e t |--\u0026gt; -1/15045300*sqrt(730)*(1030500*I0 - 412200*M0 - 2336*sin(phi) - 30915)*sin(1/50*sqrt(730)*t) - 1/41220*(103050*U0 + 368*cos(phi) - 6183)*cos(1/50*sqrt(730)*t) + 1/103050*(920*cos(phi) + 2061*sin(phi))*cos(23/20*t) + 1/103050*(2061*cos(phi) - 920*sin(phi))*sin(23/20*t) + G0 - M0 + 5/2*U0 - 3/100*t - 3/20,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e t |--\u0026gt; 1/50*cos(23/20*t)*sin(phi) + 1/50*cos(phi)*sin(23/20*t))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSome plots.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eI_specific\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eI_s\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.024\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eU0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.039\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eM0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eG0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.35\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eU_specific\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eU_s\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.024\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eU0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.039\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eM0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eG0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.35\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eG_specific\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eG_s\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.024\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eU0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.039\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eM0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eG0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.35\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eM_specific\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eM_s\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.024\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eU0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.039\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eM0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eG0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.35\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI_specific\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e10\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;blue\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eU_specific\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e10\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;orange\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eG_specific\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e10\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;green\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eM_specific\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e10\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;red\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e/Users/houliu/.sage/temp/baboon.jemoka.com/16964/tmp_sei9raar.png\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhnus_math570_finance/","tags":null,"title":"NUS-MATH570 Finance (Laplace)"},{"categories":null,"contents":"We have:\n\\begin{equation} \\frac{2y^{2}}{9-x^{2}} + y \\dv{y}{x} + \\frac{3y}{2-x} = 0 \\end{equation}\nWe want to get rid of things; let\u0026rsquo;s begin by dividing the whole thing by \\(y\\).\n\\begin{equation} \\frac{2y}{9-x^{2}} + \\dv{y}{x} + \\frac{3}{2-x} = 0 \\end{equation}\nFinally, then, moving the right expression to the right, we have:\n\\begin{equation} \\frac{2y}{9-x^{2}} + \\dv{y}{x} = \\frac{-3}{2-x} \\end{equation}\nIn this case, we have functions:\n\\begin{equation} \\begin{cases} P(x) = \\frac{2}{9-x^{2}}\\\\ Q(x) = \\frac{-3}{2-x}\\\\ \\end{cases} \\end{equation}\nTaking first the top integral:\n\\begin{equation} \\int \\frac{2}{9-x^{2}} \\dd{x} = \\frac{1}{3} \\log \\qty(\\frac{x+3}{3-x}) \\end{equation}\nRaising \\(e\\) to that power, we have that:\n\\begin{equation} \\sqrt[3]{e\\frac{x+3}{3-x}} \\end{equation}\nMultiplying \\(Q(x)\\) to that expression, we have that:\n\\begin{equation} \\int \\frac{-3}{2-x}\\sqrt[3]{e\\cdot \\frac{x+3}{3-x}} \\dd{x} \\end{equation}\nTherefore, our entire answer is defined as the integral function that:\n\\begin{equation} y = \\frac{1}{\\sqrt[3]{e\\cdot \\frac{x+3}{3-x}} } \\int \\frac{-3}{2-x}\\sqrt[3]{e\\cdot \\frac{x+3}{3-x}} \\dd{x} \\end{equation}\n","html":"\u003cp\u003eWe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{2y^{2}}{9-x^{2}} + y \\dv{y}{x} + \\frac{3y}{2-x} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe want to get rid of things; let\u0026rsquo;s begin by dividing the whole thing by \\(y\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{2y}{9-x^{2}} + \\dv{y}{x} + \\frac{3}{2-x} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, then, moving the right expression to the right, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{2y}{9-x^{2}} + \\dv{y}{x} = \\frac{-3}{2-x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn this case, we have functions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nP(x) = \\frac{2}{9-x^{2}}\\\\\nQ(x) = \\frac{-3}{2-x}\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaking first the top integral:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int \\frac{2}{9-x^{2}} \\dd{x} = \\frac{1}{3} \\log \\qty(\\frac{x+3}{3-x})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRaising \\(e\\) to that power, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sqrt[3]{e\\frac{x+3}{3-x}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMultiplying \\(Q(x)\\) to that expression, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int \\frac{-3}{2-x}\\sqrt[3]{e\\cdot \\frac{x+3}{3-x}} \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, our entire answer is defined as the integral function that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = \\frac{1}{\\sqrt[3]{e\\cdot \\frac{x+3}{3-x}} } \\int \\frac{-3}{2-x}\\sqrt[3]{e\\cdot \\frac{x+3}{3-x}} \\dd{x}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math570_problem_set_1/","tags":null,"title":"NUS-MATH570 Problem Set 1"},{"categories":null,"contents":"Considering the system:\n\\begin{equation} \\begin{cases} \\dv{x}{t} = -2x+y+(1-\\sigma)z \\\\ \\dv{y}{t} = 3x-y \\\\ \\dv{z}{t} = (3-\\sigma y)x-z\\\\ \\end{cases} \\end{equation}\nwith the initial locations \\((x_0, y_0, z_0)= (-1,1,2)\\).\nWe notice first that the top and bottom expressions as a factor in \\(x\\) multiplied by \\(y\\), which means that our system is not homogenous. Let\u0026rsquo;s expand all the expressions first.\n\\begin{equation} \\begin{cases} \\dv{x}{t} = -2x+y+(1-\\sigma)z \\\\ \\dv{y}{t} = 3x-y \\\\ \\dv{z}{t} = 3x-\\sigma yx-z\\\\ \\end{cases} \\end{equation}\n","html":"\u003cp\u003eConsidering the system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x}{t} = -2x+y+(1-\\sigma)z \\\\\n\\dv{y}{t} = 3x-y \\\\\n\\dv{z}{t} = (3-\\sigma y)x-z\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewith the initial locations \\((x_0, y_0, z_0)= (-1,1,2)\\).\u003c/p\u003e\n\u003cp\u003eWe notice first that the top and bottom expressions as a factor in \\(x\\) multiplied by \\(y\\), which means that our system is not homogenous. Let\u0026rsquo;s expand all the expressions first.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x}{t} = -2x+y+(1-\\sigma)z \\\\\n\\dv{y}{t} = 3x-y \\\\\n\\dv{z}{t} = 3x-\\sigma yx-z\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math570_problem_set_2/","tags":null,"title":"NUS-MATH570 Problem Set 2, Problem 1"},{"categories":null,"contents":"Intersects:\n\\begin{equation} f(x) = (x+c)^{2} \\end{equation}\n\\begin{equation} h(x) = c x \\end{equation}\nDoesn\u0026rsquo;t Intersect:\n\\begin{equation} g(x) = c e^{\\frac{x^{4}}{4}}} \\end{equation}\n\\begin{align} \u0026amp;h_1(x)-h_2(x) = c_1x-c_2x \\\\ \\Rightarrow\\ \u0026amp; 0 = c_1x-c_2x \\\\ \\Rightarrow\\ \u0026amp; 0 = x(c_1-c_2) \\end{align}\n\\begin{align} \u0026amp;g_1(x)-g_2(x) = c_1e^{\\frac{x^{4}}{4}} - c_2e^{\\frac{x^{4}}{4}} \\\\ \\Rightarrow\\ \u0026amp; 0 = \\qty(c_1 - c_2)e^{\\frac{x^{4}}{4}} \\\\ \\Rightarrow\\ \u0026amp; 0 = e^{\\frac{x^{4}}{4}}(c_1-c_2) \\end{align}\n\\begin{align} \u0026amp; f_1(x)-f_2(x)=(x+c_1)^{2}-(x+c_2)^{2} \\\\ \\Rightarrow\\ \u0026amp; 0 = (x+c_1)^{2}-(x+c_2)^{2} \\\\ \\Rightarrow\\ \u0026amp; 0 = 2x(c_1-c_2)+{c_1}^{2}+{c_2}^{2} \\end{align}\n\\begin{equation} \\dv{y}{x} + P\u0026rsquo;(x)y = Q\u0026rsquo;(x) \\end{equation}\n\\begin{align} \u0026amp;y = e^{\\int P\u0026rsquo;(x)\\dd{x}} \\int e^{\\int P\u0026rsquo;(x)\\dd{x}} Q\u0026rsquo;(x)\\dd{x} \\\\ \\Rightarrow\\ \u0026amp; e^{-P(x)} \\int e^{P(x)}Q\u0026rsquo;(x)\\dd{x} \\\\ \\Rightarrow\\ \u0026amp; e^{-P(x)} (\\dots+C) \\\\ \\Rightarrow\\ \u0026amp; e^{-P(x)}C + \\dots \\end{align}\n\\begin{equation} h(x) \\in e^{-P(x)}C + \\dots \\end{equation}\n\\begin{equation} g(x) \\in e^{-P(x)}C + \\dots \\end{equation}\n\\begin{align} \u0026amp;0 = (e^{-P(x)}C_1+\\dots)-(e^{-P(x)}C_2 + \\dots) \\\\ \\Rightarrow\\ \u0026amp; e^{-P(x)}C_1-e^{-P(x)}C_2 \\\\ \\Rightarrow\\ \u0026amp; e^{-P(x)}(C_1-C_2) = 0 \\end{align}\n","html":"\u003cp\u003eIntersects:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = (x+c)^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nh(x) = c x\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eDoesn\u0026rsquo;t Intersect:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ng(x) = c e^{\\frac{x^{4}}{4}}}\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;h_1(x)-h_2(x) = c_1x-c_2x \\\\\n\\Rightarrow\\ \u0026amp; 0 = c_1x-c_2x \\\\\n\\Rightarrow\\ \u0026amp; 0 = x(c_1-c_2)\n\\end{align}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;g_1(x)-g_2(x) = c_1e^{\\frac{x^{4}}{4}} - c_2e^{\\frac{x^{4}}{4}} \\\\\n\\Rightarrow\\ \u0026amp; 0 = \\qty(c_1 - c_2)e^{\\frac{x^{4}}{4}} \\\\\n\\Rightarrow\\ \u0026amp; 0 = e^{\\frac{x^{4}}{4}}(c_1-c_2)\n\\end{align}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; f_1(x)-f_2(x)=(x+c_1)^{2}-(x+c_2)^{2} \\\\\n\\Rightarrow\\ \u0026amp; 0 = (x+c_1)^{2}-(x+c_2)^{2} \\\\\n\\Rightarrow\\ \u0026amp; 0 = 2x(c_1-c_2)+{c_1}^{2}+{c_2}^{2}\n\\end{align}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{y}{x} + P\u0026rsquo;(x)y = Q\u0026rsquo;(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;y = e^{\\int P\u0026rsquo;(x)\\dd{x}} \\int e^{\\int P\u0026rsquo;(x)\\dd{x}} Q\u0026rsquo;(x)\\dd{x} \\\\\n\\Rightarrow\\ \u0026amp; e^{-P(x)} \\int e^{P(x)}Q\u0026rsquo;(x)\\dd{x} \\\\\n\\Rightarrow\\ \u0026amp; e^{-P(x)} (\\dots+C) \\\\\n\\Rightarrow\\ \u0026amp; e^{-P(x)}C + \\dots\n\\end{align}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e\\begin{equation}\nh(x) \\in e^{-P(x)}C + \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ng(x) \\in e^{-P(x)}C + \\dots\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;0 = (e^{-P(x)}C_1+\\dots)-(e^{-P(x)}C_2 + \\dots) \\\\\n\\Rightarrow\\ \u0026amp; e^{-P(x)}C_1-e^{-P(x)}C_2 \\\\\n\\Rightarrow\\ \u0026amp; e^{-P(x)}(C_1-C_2) = 0\n\\end{align}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math570_research_question_1/","tags":null,"title":"NUS-MATH570 Research Question 1"},{"categories":null,"contents":"We are given a set of expressions:\n\\begin{equation} \\begin{cases} \\dv{x}{t} = \\frac{x}{w}\\\\ \\dv{y}{t} = \\frac{xz}{w} \\\\ \\dv{z}{t} = \\beta y \\\\ \\dv{w}{t} = \\beta y \\end{cases} \\end{equation}\nWe are asked to analyze the solutions to this system, its periodicity, etc.\nStability Analysis The immediate thing to do is to shove all of this into a Jacobian matrix\u0026mdash;not for linearnalization, but to check how the slope changes. We will take the eigenvalues of the matrix at the critical points of the function, which will tell us whether or not the functions converge or diverge from those points.\nLet\u0026rsquo;s go about doing that. Let us declare:\n\\begin{equation} \\begin{cases} \\dv{x}{t} = \\frac{x}{w} = f(x,y,z,w)\\\\ \\dv{y}{t} = \\frac{xz}{w} = g(x,y,z,w)\\\\ \\dv{z}{t} = \\beta y = h(x,y,z,w)\\\\ \\dv{w}{t} = \\beta y = j(x,y,z,w) \\end{cases} \\end{equation}\nThen, the Jacobian is (with each cell being \\(\\dv{row}{column}\\)):\ndx dy dz dw df 1/w 0 0 -x/w^2 dg z/w 0 0 -(xz)/w^2 dh 0 beta 0 0 dj 0 beta 0 0 Properly writing that out, this means that:\n\\begin{equation} J = \\mqty(\\frac{1}{w} \u0026amp; 0 \u0026amp; 0 \u0026amp; -\\frac{x}{w^{2}} \\\\ \\frac{z}{w} \u0026amp; 0 \u0026amp; 0 \u0026amp; -\\frac{xz}{w^{2}} \\\\ 0 \u0026amp; \\beta \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; \\beta \u0026amp; 0 \u0026amp; 0) \\end{equation}\nNow, let us solve for the critical points of this expression. Setting all expressions to \\(0\\):\n\\begin{equation} \\begin{cases} f = 0 \\\\ g = 0 \\\\ h = 0 \\\\ j = 0 \\end{cases} \\end{equation}\nwe have that:\n\\begin{equation} f(\\dots) = \\frac{x}{w} = 0 \\end{equation}\nso \\(x=0\\).\nWe have also:\n\\begin{equation} h(\\dots) = \\beta y= 0 \\end{equation}\ntherefore, \\(y = 0\\).\nEverything else is a free variable.\nSubstituting that into our expressions, we have:\n\\begin{equation} J* = \\mqty(\\frac{1}{w} \u0026amp; 0 \u0026amp; 0 \u0026amp; 0 \\\\ \\frac{z}{w} \u0026amp; 0 \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; \\beta \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; \\beta \u0026amp; 0 \u0026amp; 0) \\end{equation}\nWe are now ready to eigenvalueize. Using technology:\nw,z,b = var(\u0026#34;w z b\u0026#34;) M = matrix([[1/w, 0,0,0], [z/w,0,0,0], [0,b,0,0], [0,b,0,0]]) M [1/w 0 0 0] [z/w 0 0 0] [ 0 b 0 0] [ 0 b 0 0] So, the moment of truth:\nM.eigenvalues() [1/w, 0, 0, 0] Excellent, so we have two eigenvalues: \\(\\frac{1}{w}\\) and \\(0\\). The \\(0\\) eigenvalue indicates to us that the system has \u0026ldquo;neutral stability\u0026rdquo;: that there will be results for which our system: while not exponentially increasing towards asymptotically, does not settle to a stable point.\nBehavior to Extrema The next natural question is then\u0026mdash;even if our system doesn\u0026rsquo;t settle down, does it become larger over time? For this, we turn to the \\(\\frac{1}{w}\\) term. If our initial conditions render negative \\(w\\) eventually at that point, our system will converge to unstable but containable (i.e. does not go to infinity over time); otherwise, it does becomes unstable AND uncontainable (goes to infinity.)\nTo do this, we need to check two things; regrettably, it seems like we could\u0026rsquo;t end up with a properly described solution to evolve our variables analytically. However, we can leverage the Lipschitz condition and hand-fisted dimensional analysis to clue us in about the behavior of the system.\nContinuity Recall again that our system is:\nWe are given a set of expressions:\n\\begin{equation} \\begin{cases} \\dv{x}{t} = \\frac{x}{w}\\\\ \\dv{y}{t} = \\frac{xz}{w} \\\\ \\dv{z}{t} = \\beta y \\\\ \\dv{w}{t} = \\beta y \\end{cases} \\end{equation}\nTo check the Lipschitz Continuity is actually not super difficult. Research indicates that the Litpschitz condition extends in the expected manner into multiple dimensions, checking continuity with a partial in each direction.\nThe actual partials of the terms on the right, though, are really only discontinuous in this case when we have something under a fraction\u0026mdash;there is fortunately no weird exponential/log/sinusoidal/radical here. Evidently, then, we loose Lipschitz continuity at \\(w=0\\). As long as we don\u0026rsquo;t cross that line, anything to the left or right of it exists and is unique(?) is each dimension.\nHam-fisting Dimensional Analysis The initial conditions asks us for starting with \\(w(0)=5\\sqrt 5\\). Recall that we are interested in the value of \\(w\\) at \\((x,y)=(0,0)\\).\nFurthermore, recall the Lipschitz condition we discussed above. That the function is Lipschitz continuous at two boundary intervals: between \\((-\\infty, 0)\\) and \\((0, \\infty )\\). Starting at the conditions of \\(w(0) = 5\\sqrt{5}\\) indicates that there will be no way for \\(w\\) to cross into \\(\\frac{1}{w} \u0026lt;0\\) territory.\nNote, again, that the eigenvalues of the Jacobian of the system are \\(\\{0, \\frac{1}{w}\\}\\), therefore, a positive \\(\\frac{1}{w}\\) will indicate that the system tends towards infinity as there is one positive eigenvalue.\nHowever, if we started at a negative \\(w\\) in the first place, we will equally be unable to use the same initial conditions to cross into \\(\\frac{1}{w} \u0026gt; 0\\) territory. Because of this, conditions that begin with negative \\(w\\) will be unstable but not asymptotically increasing as there will be no positive eigenvalues of its Jacobian at any given point.\n","html":"\u003cp\u003eWe are given a set of expressions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x}{t} = \\frac{x}{w}\\\\\n\\dv{y}{t} = \\frac{xz}{w} \\\\\n\\dv{z}{t} = \\beta y \\\\\n\\dv{w}{t} = \\beta y\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe are asked to analyze the solutions to this system, its periodicity, etc.\u003c/p\u003e\n\u003ch2 id=\"stability-analysis\"\u003eStability Analysis\u003c/h2\u003e\n\u003cp\u003eThe immediate thing to do is to shove all of this into a Jacobian matrix\u0026mdash;not for linearnalization, but to check how the slope changes. We will take the eigenvalues of the matrix at the critical points of the function, which will tell us whether or not the functions converge or diverge from those points.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s go about doing that. Let us declare:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x}{t} = \\frac{x}{w} = f(x,y,z,w)\\\\\n\\dv{y}{t} = \\frac{xz}{w} = g(x,y,z,w)\\\\\n\\dv{z}{t} = \\beta y = h(x,y,z,w)\\\\\n\\dv{w}{t} = \\beta y = j(x,y,z,w)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThen, the Jacobian is (with each cell being \\(\\dv{row}{column}\\)):\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003edx\u003c/th\u003e\n\u003cth\u003edy\u003c/th\u003e\n\u003cth\u003edz\u003c/th\u003e\n\u003cth\u003edw\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003edf\u003c/td\u003e\n\u003ctd\u003e1/w\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e-x/w^2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003edg\u003c/td\u003e\n\u003ctd\u003ez/w\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e-(xz)/w^2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003edh\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003ebeta\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003edj\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003ebeta\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eProperly writing that out, this means that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nJ = \\mqty(\\frac{1}{w} \u0026amp; 0 \u0026amp; 0 \u0026amp; -\\frac{x}{w^{2}} \\\\ \\frac{z}{w} \u0026amp; 0 \u0026amp; 0 \u0026amp; -\\frac{xz}{w^{2}} \\\\ 0 \u0026amp; \\beta \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; \\beta \u0026amp; 0 \u0026amp; 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, let us solve for the critical points of this expression. Setting all expressions to \\(0\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nf = 0 \\\\\ng = 0 \\\\\nh = 0 \\\\\nj = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(\\dots) = \\frac{x}{w} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso \\(x=0\\).\u003c/p\u003e\n\u003cp\u003eWe have also:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nh(\\dots) = \\beta y= 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003etherefore, \\(y = 0\\).\u003c/p\u003e\n\u003cp\u003eEverything else is a free variable.\u003c/p\u003e\n\u003cp\u003eSubstituting that into our expressions, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nJ* = \\mqty(\\frac{1}{w} \u0026amp; 0 \u0026amp; 0 \u0026amp; 0 \\\\ \\frac{z}{w} \u0026amp; 0 \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; \\beta \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; \\beta \u0026amp; 0 \u0026amp; 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe are now ready to eigenvalueize. Using technology:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;w z b\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eM\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ematrix\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eM\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1/w 0 0 0]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[z/w 0 0 0]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[ 0 b 0 0]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[ 0 b 0 0]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSo, the moment of truth:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eM\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eeigenvalues\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1/w, 0, 0, 0]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent, so we have two eigenvalues: \\(\\frac{1}{w}\\) and \\(0\\). The \\(0\\) eigenvalue indicates to us that the system has \u0026ldquo;\u003ca href=\"/posts/kbhneutral_stability/\"\u003eneutral stability\u003c/a\u003e\u0026rdquo;: that there will be results for which our system: while not exponentially increasing towards asymptotically, does not settle to a stable point.\u003c/p\u003e\n\u003ch2 id=\"behavior-to-extrema\"\u003eBehavior to Extrema\u003c/h2\u003e\n\u003cp\u003eThe next natural question is then\u0026mdash;even if our system doesn\u0026rsquo;t settle down, does it become larger over time? For this, we turn to the \\(\\frac{1}{w}\\) term. If our initial conditions render negative \\(w\\) eventually at that point, our system will converge to unstable but containable (i.e. does not go to infinity over time); otherwise, it does becomes unstable AND uncontainable (goes to infinity.)\u003c/p\u003e\n\u003cp\u003eTo do this, we need to check two things; regrettably, it seems like we could\u0026rsquo;t end up with a properly described solution to evolve our variables analytically. However, we can leverage the Lipschitz condition and hand-fisted dimensional analysis to clue us in about the behavior of the system.\u003c/p\u003e\n\u003ch3 id=\"continuity\"\u003eContinuity\u003c/h3\u003e\n\u003cp\u003eRecall again that our system is:\u003c/p\u003e\n\u003cp\u003eWe are given a set of expressions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x}{t} = \\frac{x}{w}\\\\\n\\dv{y}{t} = \\frac{xz}{w} \\\\\n\\dv{z}{t} = \\beta y \\\\\n\\dv{w}{t} = \\beta y\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo check the Lipschitz Continuity is actually not super difficult. Research indicates that the Litpschitz condition extends in the expected manner into multiple dimensions, checking continuity with a partial in each direction.\u003c/p\u003e\n\u003cp\u003eThe actual partials of the terms on the right, though, are really only discontinuous in this case when we have something under a fraction\u0026mdash;there is fortunately no weird exponential/log/sinusoidal/radical here. Evidently, then, we loose Lipschitz continuity at \\(w=0\\). As long as we don\u0026rsquo;t cross that line, anything to the left or right of it exists and is unique(?) is each dimension.\u003c/p\u003e\n\u003ch3 id=\"ham-fisting-dimensional-analysis\"\u003eHam-fisting Dimensional Analysis\u003c/h3\u003e\n\u003cp\u003eThe initial conditions asks us for starting with \\(w(0)=5\\sqrt 5\\). Recall that we are interested in the value of \\(w\\) at \\((x,y)=(0,0)\\).\u003c/p\u003e\n\u003cp\u003eFurthermore, recall the Lipschitz condition we discussed above. That the function is Lipschitz continuous at two boundary intervals: between \\((-\\infty, 0)\\) and \\((0, \\infty )\\). Starting at the conditions of \\(w(0) = 5\\sqrt{5}\\) indicates that there will be no way for \\(w\\) to cross into \\(\\frac{1}{w} \u0026lt;0\\) territory.\u003c/p\u003e\n\u003cp\u003eNote, again, that the eigenvalues of the Jacobian of the system are \\(\\{0, \\frac{1}{w}\\}\\), therefore, a positive \\(\\frac{1}{w}\\) will indicate that the system tends towards infinity as there is one positive eigenvalue.\u003c/p\u003e\n\u003cp\u003eHowever, if we started at a negative \\(w\\) in the first place, we will equally be unable to use the same initial conditions to cross into \\(\\frac{1}{w} \u0026gt; 0\\) territory. Because of this, conditions that begin with negative \\(w\\) will be unstable but not asymptotically increasing as there will be no positive eigenvalues of its Jacobian at any given point.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-11-27_16-28-45_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhnus_math570_supply_demand/","tags":null,"title":"NUS-MATH570 Supply Demand"},{"categories":null,"contents":" Instruments Effects Feeling Dynamics Process/Production 01/09/2023 ","html":"\u003cul\u003e\n\u003cli\u003eInstruments\u003c/li\u003e\n\u003cli\u003eEffects\u003c/li\u003e\n\u003cli\u003eFeeling\u003c/li\u003e\n\u003cli\u003eDynamics\u003c/li\u003e\n\u003cli\u003eProcess/Production\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"01-09-2023\"\u003e01/09/2023\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_mus150_critical_listening/","tags":null,"title":"NUS-MUS150 Critical Listening"},{"categories":null,"contents":" el plastico nuevo de nopal esta un producto de plástico biodegrable, que substituír plastico tradiciónal\nsi el plasticó ir al mar, los animales pueden simple comerlo\nel proceso de manufactura son sosinable, que la planta puede vivir a producir más ojas del plastico\nproteinsas\ngelinicas\ncorbantes\n","html":"\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eel plastico nuevo de nopal esta un producto de plástico biodegrable, que substituír plastico tradiciónal\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003esi el plasticó ir al mar, los animales pueden simple comerlo\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eel proceso de manufactura son sosinable, que la planta puede vivir a producir más ojas del plastico\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eproteinsas\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003egelinicas\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecorbantes\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_span502_plastico_biodegrable/","tags":null,"title":"NUS-SPAN502 Plastico Biodegrable"},{"categories":null,"contents":"Vocabulario alejar forzar el fracaso ocultar atemorizarte prescindir exige asegurar Preguntas ¿Hay problemas a largo plazo con el sistema del calificación? ¿Como medimos los éxitos del sistema nueva sin prejuicios las preferencias propias de los estudiantes? ¿Necesita estudiantes motivados para el desarrollo complete del sistema? ¿Hay diferencias sociocultural que puede influir los resultados o el implementación del sistema? ¿Cómo conducta examines del rendimiento de los estudiantes a través escuelas con implementaciones diferencies del sistema? ","html":"\u003ch2 id=\"vocabulario\"\u003eVocabulario\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ealejar\u003c/li\u003e\n\u003cli\u003eforzar\u003c/li\u003e\n\u003cli\u003eel fracaso\u003c/li\u003e\n\u003cli\u003eocultar\u003c/li\u003e\n\u003cli\u003eatemorizarte\u003c/li\u003e\n\u003cli\u003eprescindir\u003c/li\u003e\n\u003cli\u003eexige\u003c/li\u003e\n\u003cli\u003easegurar\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"preguntas\"\u003ePreguntas\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e¿Hay problemas a largo plazo con el sistema del calificación?\u003c/li\u003e\n\u003cli\u003e¿Como medimos los éxitos del sistema nueva sin prejuicios las preferencias propias de los estudiantes?\u003c/li\u003e\n\u003cli\u003e¿Necesita estudiantes motivados para el desarrollo complete del sistema?\u003c/li\u003e\n\u003cli\u003e¿Hay diferencias sociocultural que puede influir los resultados o el implementación del sistema?\u003c/li\u003e\n\u003cli\u003e¿Cómo conducta examines del rendimiento de los estudiantes a través escuelas con implementaciones diferencies del sistema?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_span502_tarea_2/","tags":null,"title":"NUS-SPAN502 Tarea 2"},{"categories":null,"contents":"Vocabularios Nuevos creciente jornada exigir desempeño subir Preguntas ¿En el primer lugar, porqué tenemos semanas de cinco días? ¿Para los ciudades con recursos de educación abundante, hay un necesario real de trabar en jornadas de cuatro días? ¿Tenemos de verdad un sistema responsable para examinar los diferencias de cantidad de educación a través de el palmo entero del proceso de educación de un estudiante? ¿Existen presiones políticas que motivó el propósito? ¿En realidad, existe un problema muy fundamental que causó los problemas que vemos hoy? ","html":"\u003ch2 id=\"vocabularios-nuevos\"\u003eVocabularios Nuevos\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecreciente\u003c/li\u003e\n\u003cli\u003ejornada\u003c/li\u003e\n\u003cli\u003eexigir\u003c/li\u003e\n\u003cli\u003edesempeño\u003c/li\u003e\n\u003cli\u003esubir\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"preguntas\"\u003ePreguntas\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e¿En el primer lugar, porqué tenemos semanas de cinco días?\u003c/li\u003e\n\u003cli\u003e¿Para los ciudades con recursos de educación abundante, hay un necesario real de trabar en jornadas de cuatro días?\u003c/li\u003e\n\u003cli\u003e¿Tenemos de verdad un sistema responsable para examinar los diferencias de cantidad de educación a través de el palmo entero del proceso de educación de un estudiante?\u003c/li\u003e\n\u003cli\u003e¿Existen presiones políticas que motivó el propósito?\u003c/li\u003e\n\u003cli\u003e¿En realidad, existe un problema muy fundamental que causó los problemas que vemos hoy?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_span502_tarea_4/","tags":null,"title":"NUS-SPAN502 Tarea 4"},{"categories":null,"contents":" Lógico Sentimiento Acuerdo Cancelado/a Cuentas Censurado/a Plataforma Fraces Libertad de Expresión ","html":"\u003cul\u003e\n\u003cli\u003eLógico\u003c/li\u003e\n\u003cli\u003eSentimiento\u003c/li\u003e\n\u003cli\u003eAcuerdo\u003c/li\u003e\n\u003cli\u003eCancelado/a\u003c/li\u003e\n\u003cli\u003eCuentas\u003c/li\u003e\n\u003cli\u003eCensurado/a\u003c/li\u003e\n\u003cli\u003ePlataforma\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"fraces\"\u003eFraces\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eLibertad de Expresión\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_span502_vocab/","tags":null,"title":"NUS-SPAN502 Vocab"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhobjects/","tags":null,"title":"object"},{"categories":null,"contents":" at time \\(t\\), agent received observation \\(o_{t}\\) agent then chooses \\(a_{t}\\) based on what it knows through some kind of process in order to affect a possibly nondeterministic change on the environment the agent choose an \\(a_{t}\\) under the existance of many types of Uncertainty. ","html":"\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-09-26_10-14-20_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003col\u003e\n\u003cli\u003eat time \\(t\\), \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e received observation \\(o_{t}\\)\u003c/li\u003e\n\u003cli\u003eagent then chooses \\(a_{t}\\) based on what it knows through some kind of process in order to affect a possibly nondeterministic change on the environment\n\u003cul\u003e\n\u003cli\u003ethe agent choose an \\(a_{t}\\) under the existance of many \u003ca href=\"/posts/kbhuncertainty/\"\u003etypes of Uncertainty\u003c/a\u003e.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhobserve_act_cycle/","tags":null,"title":"observe-act cycle"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhof_our_spiritual_strivings/","tags":null,"title":"Of Our Spiritual Strivings"},{"categories":null,"contents":"\\begin{equation} V = IR \\end{equation}\nwhere, \\(V\\) is the voltage across the resister, \\(I\\) the current, and \\(R\\) the resistance.\npower (physics) the rate at which electrical energy dissipates into heat is called power:\n\\begin{equation} P = IV \\end{equation}\nwhere, \\(P\\) is power, and \\(I\\) the current, and \\(V\\) the voltage.\nAlternate formulation of power:\n\\begin{equation} P = I V = \\frac{V^{2}}{R} = I^{2} R = \\dv{E}{t} \\end{equation}\n","html":"\u003cp\u003e\\begin{equation}\nV = IR\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(V\\) is the voltage across the resister, \\(I\\) the \u003ca href=\"/posts/kbhcurrent/\"\u003ecurrent\u003c/a\u003e, and \\(R\\) the \u003ca href=\"/posts/kbhcurrent/#resistance-of-a-wire\"\u003eresistance\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"power--physics\"\u003epower (physics)\u003c/h2\u003e\n\u003cp\u003ethe rate at which electrical energy dissipates into heat is called \u003ca href=\"#power--physics\"\u003epower\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP = IV\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(P\\) is power, and \\(I\\) the \u003ca href=\"/posts/kbhcurrent/\"\u003ecurrent\u003c/a\u003e, and \\(V\\) the \u003ca href=\"/posts/kbhvoltage/\"\u003evoltage\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eAlternate formulation of power:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP = I V = \\frac{V^{2}}{R} = I^{2} R = \\dv{E}{t}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhohm_s_law/","tags":null,"title":"Ohm's Law"},{"categories":null,"contents":"Everyday, at 11:00 PM exactly, I stop time tracking.\nAnd it feels somehow as the most liberating time of my day. When I truly feels like I have my time back to myself,\n","html":"\u003cp\u003eEveryday, at 11:00 PM exactly, I stop time tracking.\u003c/p\u003e\n\u003cp\u003eAnd it feels somehow as the most liberating time of my day. When I truly feels like I have my time back to myself,\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproductivity/","tags":null,"title":"On the Clock"},{"categories":null,"contents":"We have an expression:\n\\begin{equation} B = \\frac{FL^{3}}{3EI} = \\frac{N m^{3}}{3 p m^{4}} = \\frac{Nm^{3}}{\\frac{N}{m^{2}}m^{4}} = m \\end{equation}\nWith constants:\n\\(B\\): \\(m\\), deflection at the point of force application \\(F\\): \\(N\\), force applied \\(L\\): \\(m\\), distance between fixed point and point of force application \\(E\\): \\(p=\\frac{N}{m^{2}}\\), elastic modulus \\(I\\): \\(m^{4}\\), second moment of area As per measured:\n\\(B\\): \\(9.15 \\cdot 10^{-4} m\\) \\(F\\): \\(20N\\) \\(L\\): \\(9.373 \\cdot 10^{-2} m\\) \\(I\\): \\(1.37 \\cdot 10^{-10} m^{4}\\) = \\(\\frac{WH^{3}}{12}\\) = \\(\\frac{(6.25 \\cdot 10^{-3})(6.4 \\cdot 10^{-3})^{3}}{12}\\) Theoretical:\n\\(E\\): \\(7 \\cdot 10^{10} P\\) As calculated:\n\\(B\\): \\(5.74 \\cdot 10^{-4} m\\) ","html":"\u003cp\u003eWe have an expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB = \\frac{FL^{3}}{3EI} = \\frac{N m^{3}}{3 p m^{4}} = \\frac{Nm^{3}}{\\frac{N}{m^{2}}m^{4}} = m\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWith constants:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(B\\): \\(m\\), deflection at the point of force application\u003c/li\u003e\n\u003cli\u003e\\(F\\): \\(N\\), force applied\u003c/li\u003e\n\u003cli\u003e\\(L\\): \\(m\\), distance between fixed point and point of force application\u003c/li\u003e\n\u003cli\u003e\\(E\\): \\(p=\\frac{N}{m^{2}}\\), elastic modulus\u003c/li\u003e\n\u003cli\u003e\\(I\\): \\(m^{4}\\), second moment of area\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAs per measured:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(B\\): \\(9.15 \\cdot 10^{-4} m\\)\u003c/li\u003e\n\u003cli\u003e\\(F\\): \\(20N\\)\u003c/li\u003e\n\u003cli\u003e\\(L\\): \\(9.373 \\cdot 10^{-2} m\\)\u003c/li\u003e\n\u003cli\u003e\\(I\\): \\(1.37 \\cdot 10^{-10} m^{4}\\) = \\(\\frac{WH^{3}}{12}\\) = \\(\\frac{(6.25 \\cdot 10^{-3})(6.4 \\cdot 10^{-3})^{3}}{12}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTheoretical:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(E\\): \\(7 \\cdot 10^{10} P\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eAs calculated:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(B\\): \\(5.74 \\cdot 10^{-4} m\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhone_shot_deformation/","tags":null,"title":"One-Shot Deformation"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhonline_m/","tags":null,"title":"online m"},{"categories":null,"contents":"For elements with large possible future state space, we can\u0026rsquo;t just iterate over all states to get a for every state, and THEN go about using the to perform actions.\nTherefore, we employ a technique called receding horizon planning: planning from the current state upwards to a maximum horizon \\(d\\), figure out what the best SINGLE action would be given that information for only this state, and then replan.\nHere are the main methods of doing this:\nRollout with Lookahead: for each possible next action, sample a transition-weighted random trajectory using some policy, use whatever discounted future reward you got for that as your utility : for each possible next action, search through each possible next action until you hit the depth required, calculate the instantaneous reward at that point, and backup until you have recorded the sequence of actions that maybe best, and then return the first action in that sequence : same algorithm as , but you bound your search based on the theoretical upper-bound of the q-value : same core algorithm as , but instead of calculating a based on the , you sample a set of possible next states and average their future utilities : use function to come up with a bunch of possible actions to try, and try them with discounts as you try them Additional Information generative model we perform a random sample of possible next state (weighted by the action you took, meaning an instantiation of \\(s\u0026rsquo; \\sim T(\\cdot | s,a)\\)) and reward \\(R(s,a)\\) from current state\nopen-loop planning vs close-loop planning open loop planning Instead of doing all the methods above, which all requires state information of the future, open loop planning uses an exogenously chosen sequence of actions and tries to simply:\nMaximize: \\(U(a_1, \u0026hellip;, a_{n})\\)\nwhere the choice of actions doesn\u0026rsquo;t change regardless of eventual state is.\nFor high dimensional systems, where is hard to do closed loop systems, this will work better.\n","html":"\u003cp\u003eFor elements with large possible future state space, we can\u0026rsquo;t just iterate over all states to get a for every state, and \u003cstrong\u003eTHEN\u003c/strong\u003e go about using the to perform actions.\u003c/p\u003e\n\u003cp\u003eTherefore, we employ a technique called \u003cstrong\u003ereceding horizon planning\u003c/strong\u003e: planning from the current state upwards to a maximum horizon \\(d\\), figure out what the best \u003cstrong\u003eSINGLE action\u003c/strong\u003e would be given that information for only this state, and then replan.\u003c/p\u003e\n\u003cp\u003eHere are the main methods of doing this:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrollout_with_lookahead/\"\u003eRollout with Lookahead\u003c/a\u003e: for each possible next action, sample a transition-weighted random trajectory using some policy, use whatever discounted future reward you got for that as your utility\u003c/li\u003e\n\u003cli\u003e: for each possible next action, search through each possible next action until you hit the depth required, calculate the instantaneous reward at that point, and backup until you have recorded the sequence of actions that maybe best, and then return the first action in that sequence\u003c/li\u003e\n\u003cli\u003e: same algorithm as , but you bound your search based on the theoretical upper-bound of the q-value\u003c/li\u003e\n\u003cli\u003e: same core algorithm as , but instead of calculating a based on the , you sample a set of possible next states and average their future utilities\u003c/li\u003e\n\u003cli\u003e: use function to come up with a bunch of possible actions to try, and try them with discounts as you try them\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eAdditional Information\u003c/h2\u003e\n\u003ch3 id=\"generative-model\"\u003egenerative model\u003c/h3\u003e\n\u003cp\u003ewe perform a random sample of possible next state (weighted by the action you took, meaning an instantiation of \\(s\u0026rsquo; \\sim T(\\cdot | s,a)\\)) and reward \\(R(s,a)\\) from current state\u003c/p\u003e\n\u003ch3 id=\"open-loop-planning-vs-close-loop-planning\"\u003eopen-loop planning vs close-loop planning\u003c/h3\u003e\n\u003ch4 id=\"open-loop-planning\"\u003eopen loop planning\u003c/h4\u003e\n\u003cp\u003eInstead of doing all the methods above, which all requires state information of the future, \u003ca href=\"#open-loop-planning\"\u003eopen loop planning\u003c/a\u003e uses an exogenously chosen sequence of actions and tries to simply:\u003c/p\u003e\n\u003cp\u003eMaximize: \\(U(a_1, \u0026hellip;, a_{n})\\)\u003c/p\u003e\n\u003cp\u003ewhere the choice of actions doesn\u0026rsquo;t change regardless of eventual state is.\u003c/p\u003e\n\u003cp\u003eFor high dimensional systems, where is hard to do closed loop systems, this will work better.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhonline_planning/","tags":null,"title":"online planning"},{"categories":null,"contents":"These are basically MDP methods but tweaked. We make some changes:\nfor everywhere that we need a state, we use a belief to sample the next state given an action (random next step), we call our generative model to get a new observation, and call update(b,a,o) with our filter to propegate our belief forward if we need an action-value, we use the one-step lookahead in POMDP: \\begin{equation} Q(b,a) = R(b,a)+\\gamma \\qty(\\sum_{o}^{}P(o|b,a) U^{\\Gamma}(update(b,a,o))) \\end{equation}\nwhere,\n\\begin{equation} R(b,a) = \\sum_{s}^{} R(s,a)b(s) \\end{equation}\n\\begin{align} P(o|b,a) \u0026amp;= \\sum_{s}^{} p(o|s,a) b(s) \\\\ \u0026amp;= \\sum_{s}^{} \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) O(o|s\u0026rsquo;,a) b(s) \\end{align}\nand where, if needed (i.e. most algorithms estimate this):\n\\begin{equation} U^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} b \\end{equation}\nwe also revise our generative model:\neach step requires belief and action, and we sample from our belief a next state, propegate belief forward, and use a traditional generative model to get the rewards and next states (which we don\u0026rsquo;t use).\nReceeding Horizon: plan to a depth \\(d\\), select action, replan Rollout with Lookahead: simple to implement, no grantees of optimality or even boundedness Forward Search: quite expensive\u0026mdash;exponential given the size of horizon monte-carlo tree search, but instead our counts are stored not in terms of states (which we don\u0026rsquo;t know), but sequences of action observations: \\(h = a_1o_2a_2o_1a_2o_1\\) etc. Then, the counter takes \\(N(h,a)\\) as input: will head towards optimality and it requires a generative model to sample tracks Branch and Bound, but you use the POMDP Approximation methods to estimate the upper and lower bounds of your utility: its Forward Search with pruning Sparse Sampling: its Forward Search, but the next action-value is determined by a finite sampling of next observations and rewards and you average their future utility. that is, the action-value before depth \\(d\\) is obtained by: \\(Q(b,a) = \\frac{1}{m} \\sum_{i=1}^{m} \\qty(r_{a}^{(i)}+\\gammaU_{d-1}(Update(b,a,o_{a}^{(i)})))\\) ","html":"\u003cp\u003eThese are basically \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003e methods but tweaked. We make some changes:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003efor everywhere that we need a state, we use a \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eto sample the next state given an action (random next step), we call our generative model to get a new observation, and call \u003ccode\u003eupdate(b,a,o)\u003c/code\u003e with our \u003ca href=\"/posts/kbhfilters/\"\u003efilter\u003c/a\u003e to propegate our \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e forward\u003c/li\u003e\n\u003cli\u003eif we need an \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e, we use the \u003ca href=\"/posts/kbhalpha_vector/#one-step-lookahead-in-pomdp\"\u003eone-step lookahead in POMDP\u003c/a\u003e:\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\\begin{equation}\nQ(b,a) = R(b,a)+\\gamma \\qty(\\sum_{o}^{}P(o|b,a) U^{\\Gamma}(update(b,a,o)))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR(b,a) = \\sum_{s}^{} R(s,a)b(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nP(o|b,a) \u0026amp;= \\sum_{s}^{} p(o|s,a) b(s) \\\\\n\u0026amp;= \\sum_{s}^{} \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) O(o|s\u0026rsquo;,a) b(s)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eand where, if needed (i.e. most algorithms estimate this):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe also revise our \u003ca href=\"/posts/kbhonline_planning/#generative-model\"\u003egenerative model\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003eeach step requires belief and action, and we sample from our belief a next state, propegate \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e forward, and use a traditional \u003ca href=\"/posts/kbhonline_planning/#generative-model\"\u003egenerative model\u003c/a\u003e to get the rewards and next states (which we don\u0026rsquo;t use).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhreceeding_horizon/\"\u003eReceeding Horizon\u003c/a\u003e: plan to a depth \\(d\\), select action, replan\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrollout_with_lookahead/\"\u003eRollout with Lookahead\u003c/a\u003e: simple to implement, no grantees of optimality or even boundedness\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhforward_search/\"\u003eForward Search\u003c/a\u003e: quite expensive\u0026mdash;exponential given the size of horizon\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003emonte-carlo tree search\u003c/a\u003e, but instead our counts are stored not in terms of states (which we don\u0026rsquo;t know), but sequences of action observations: \\(h = a_1o_2a_2o_1a_2o_1\\) etc. Then, the counter takes \\(N(h,a)\\) as input: will head towards optimality and it requires a \u003ca href=\"/posts/kbhonline_planning/#generative-model\"\u003egenerative model\u003c/a\u003e to sample tracks\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbranch_and_bound/\"\u003eBranch and Bound\u003c/a\u003e, but you use the \u003ca href=\"/posts/kbhpomdp_approximation/\"\u003ePOMDP Approximation\u003c/a\u003e methods to estimate the upper and lower bounds of your utility: its \u003ca href=\"/posts/kbhforward_search/\"\u003eForward Search\u003c/a\u003e with pruning\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsparse_sampling/\"\u003eSparse Sampling\u003c/a\u003e: its \u003ca href=\"/posts/kbhforward_search/\"\u003eForward Search\u003c/a\u003e, but the next \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e is determined by a finite sampling of next observations and rewards and you average their future utility. that is, the action-value before depth \\(d\\) is obtained by: \\(Q(b,a) = \\frac{1}{m} \\sum_{i=1}^{m} \\qty(r_{a}^{(i)}+\\gammaU_{d-1}(Update(b,a,o_{a}^{(i)})))\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhonline_pomdp_methods/","tags":null,"title":"Online POMDP Methods"},{"categories":null,"contents":"The Open Voice Brain Model is a audio processing architecture proposed by Laguarta 2021 for audio/biomarker correlation work.\nHere\u0026rsquo;s a fairly self-explanatory figure:\nThe model outputs an AD diagnoses as well as a longitudinal correlation with Memory, Mood, and Respiratory biomarkers.\nThis is then the embedding that they are proposing for use by other tasks.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhopen_voice_brain_model/\"\u003eOpen Voice Brain Model\u003c/a\u003e is a audio processing architecture proposed by \u003ca href=\"/posts/kbhlaguarta_2021/\"\u003eLaguarta 2021\u003c/a\u003e for audio/biomarker correlation work.\u003c/p\u003e\n\u003cp\u003eHere\u0026rsquo;s a fairly self-explanatory figure:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-36-29_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe model outputs an AD diagnoses as well as a longitudinal correlation with Memory, Mood, and Respiratory biomarkers.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-38-42_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis is then the embedding that they are proposing for use by other tasks.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhopen_voice_brain_model/","tags":null,"title":"Open Voice Brain Model"},{"categories":null,"contents":"OpenSMILE is a proprietary audio feature exaction tool.\nSite.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhopensmile/\"\u003eOpenSMILE\u003c/a\u003e is a proprietary audio feature exaction tool.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://www.audeering.com/research/opensmile/\"\u003eSite\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhopensmile/","tags":null,"title":"OpenSMILE"},{"categories":null,"contents":"cs111.stanford.edu\nTopics CS111 leverages CS107 experience to show operating systems and how they function.\nWhat is an operating system operating system sits between hardware and user programs most importantly: manages shared resources to allow the program to run CPU: gets which program to do work and for how long RAM: how much memory to give to a program Hard Drive Main Events concurrency: switch between processes so quickly to only use on core while concurrent access memory: memory addresses are mostly scattered everywhere \u0026mdash; everything include the lowest level including CPU uses only virtual memory, translated by the OS file management i/o devices networking: CS144 security: interactions between users in a system Main Components of the Course File Systems Process and Multiprocess Threads Virtual Memory + Paging + limits Modern Technologies/Niceties What\u0026rsquo;s Next SU-CS111 Outline\nContent filesystem How can we design file systems to manage files on disk, and what are the tradeoffs inherent in designing them. How can we interact with the filesystem?\nfilesystem Unix V6 Filesystem Freelist and Block Cache disk crash recovery fsck ordered writes journaling: write-ahead logging syscalls kernel mode files handling file file descriptor multiprocessing How are programs run, how to spawn subprograms, and how they work in general?\nmultiprocessing fork execvp waitpid shell pipe and ipc Multithreading How do we implement a single program within a single program, and how do we not have race conditions\nmultithreading processes vs threads race condition and mutex passing by reference permits model busy waiting condition variable Unique Lock trust how do we trust software?\ntrust by assumption trust by inference trust by substitution patterns monitor pattern dispatches assembly review process control block dispatching trap interrupts context switch scheduling preemption virtual memory \u0026ldquo;how can one set of memory be shared across several processes\u0026rdquo;\nvirtual memory dynamic address translation demand paging clock algorithm model technologies modern OS trust and OS trust An example of a good time:\nvoid main() { // make pipe int fds[2]; pipe(fds); pid_t pidp = fork(); if (pidp == 0) { close(pidp[1]); dup2(pidp[0], STDIN_FILENO); close(pidp[0]); execvp(\u0026#34;\u0026#34;, ...); // throw-a-tantrum exit(1); } close(pidp[0]); return pidp[1]; } ","html":"\u003cp\u003ecs111.stanford.edu\u003c/p\u003e\n\u003ch2 id=\"topics\"\u003eTopics\u003c/h2\u003e\n\u003cp\u003eCS111 leverages CS107 experience to show operating systems and how they function.\u003c/p\u003e\n\u003ch3 id=\"what-is-an-operating-system\"\u003eWhat is an operating system\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eoperating system sits between \u003cstrong\u003ehardware\u003c/strong\u003e and \u003cstrong\u003euser programs\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003emost importantly: manages \u003cem\u003eshared resources\u003c/em\u003e to allow the program to run\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eCPU\u003c/strong\u003e: gets which program to do work and for how long\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eRAM\u003c/strong\u003e: how much memory to give to a program\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eHard Drive\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"main-events\"\u003eMain Events\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003econcurrency: switch between processes so quickly to only use on core while concurrent access\u003c/li\u003e\n\u003cli\u003ememory: memory addresses are mostly scattered everywhere \u0026mdash; everything include the lowest level including CPU uses only virtual memory, translated by the OS\u003c/li\u003e\n\u003cli\u003efile management\u003c/li\u003e\n\u003cli\u003ei/o devices\u003c/li\u003e\n\u003cli\u003enetworking: CS144\u003c/li\u003e\n\u003cli\u003esecurity: interactions between users in a system\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"main-components-of-the-course\"\u003eMain Components of the Course\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eFile Systems\u003c/li\u003e\n\u003cli\u003eProcess and Multiprocess\u003c/li\u003e\n\u003cli\u003eThreads\u003c/li\u003e\n\u003cli\u003eVirtual Memory + Paging + limits\u003c/li\u003e\n\u003cli\u003eModern Technologies/Niceties\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"what-s-next\"\u003eWhat\u0026rsquo;s Next\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhsu_cs111_outline/\"\u003eSU-CS111 Outline\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"content\"\u003eContent\u003c/h2\u003e\n\u003ch3 id=\"filesystem--kbhfilesystem-dot-md\"\u003e\u003ca href=\"/posts/kbhfilesystem/\"\u003efilesystem\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eHow can we design file systems to manage files on disk, and what are the tradeoffs inherent in designing them. How can we interact with the filesystem?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfilesystem/\"\u003efilesystem\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003eUnix V6 Filesystem\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhunix_v6_filesystem/#freelist\"\u003eFreelist\u003c/a\u003e and \u003ca href=\"/posts/kbhunix_v6_filesystem/#block-cache\"\u003eBlock Cache\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfilesystem/#disk\"\u003edisk\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcrash_recovery/\"\u003ecrash recovery\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcrash_recovery/#fsck\"\u003efsck\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcrash_recovery/#ordered-writes\"\u003eordered writes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcrash_recovery/#journaling\"\u003ejournaling\u003c/a\u003e: \u003ca href=\"/posts/kbhcrash_recovery/#journaling\"\u003ewrite-ahead logging\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsyscalls/\"\u003esyscalls\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsyscalls/#kernel-mode\"\u003ekernel mode\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003efiles handling\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsyscalls/#open\"\u003efile\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsyscalls/#file-descriptor\"\u003efile descriptor\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"multiprocessing\"\u003emultiprocessing\u003c/h3\u003e\n\u003cp\u003eHow are programs run, how to spawn subprograms, and how they work in general?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiprocessing/\"\u003emultiprocessing\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfork/\"\u003efork\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfork/#execvp\"\u003eexecvp\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfork/#waitpid\"\u003ewaitpid\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfork/#shell\"\u003eshell\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpipe/\"\u003epipe\u003c/a\u003e and ipc\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"multithreading\"\u003eMultithreading\u003c/h3\u003e\n\u003cp\u003eHow do we implement a single program within a single program, and how do we not have race conditions\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultithreading/\"\u003emultithreading\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultithreading/#id-cc41feaf-ce09-48ec-84d7-8f98d9ca20ba-process-es-vs-id-b4b86ccc-70f3-4d30-b437-2f5fff63b0e6-thread-s\"\u003eprocesses vs threads\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultithreading/#race-condition\"\u003erace condition\u003c/a\u003e and \u003ca href=\"/posts/kbhmultithreading/#mutex\"\u003emutex\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultithreading/#passing-by-reference\"\u003epassing by reference\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpermits_model/\"\u003epermits model\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpermits_model/\"\u003ebusy waiting\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpermits_model/#condition-variable\"\u003econdition variable\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhunique_lock/\"\u003eUnique Lock\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"trust\"\u003etrust\u003c/h4\u003e\n\u003cp\u003ehow do we \u003ca href=\"/posts/kbhprivacy/#trust\"\u003etrust\u003c/a\u003e software?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/#trust-by-assumption\"\u003etrust by assumption\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/#trust-by-inference\"\u003etrust by inference\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/#trust-by-substitution\"\u003etrust by substitution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"patterns\"\u003epatterns\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmonitor_pattern/\"\u003emonitor pattern\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"dispatches\"\u003edispatches\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhassembly/\"\u003eassembly\u003c/a\u003e review\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprocess_control_block/\"\u003eprocess control block\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdispatching/\"\u003edispatching\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdispatching/#trap\"\u003etrap\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdispatching/#interrupt\"\u003einterrupts\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdispatching/#context-switch\"\u003econtext switch\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhscheduling/\"\u003escheduling\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpreemption/\"\u003epreemption\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"virtual-memory\"\u003evirtual memory\u003c/h4\u003e\n\u003cp\u003e\u0026ldquo;how can one set of memory be shared across several processes\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvirtual_memory/\"\u003evirtual memory\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvirtual_memory/#dynamic-address-translation\"\u003edynamic address translation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdemand_paging/\"\u003edemand paging\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhclock_algorthium/\"\u003eclock algorithm\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"model-technologies\"\u003emodel technologies\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodern_os/\"\u003emodern OS\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"trust-and-os\"\u003etrust and OS\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/#trust\"\u003etrust\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eAn example of a good time:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emain\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// make pipe\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efds\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003epipe\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efds\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epidp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003efork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epidp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epidp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003edup2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epidp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSTDIN_FILENO\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epidp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003eexecvp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e...);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// throw-a-tantrum\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eexit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epidp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epidp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhos_index/","tags":null,"title":"Operating Systems Index"},{"categories":null,"contents":" adding multiplying This is object dependent.\n","html":"\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadding/\"\u003eadding\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiplying/\"\u003emultiplying\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThis is \u003ca href=\"/posts/kbhobjects/\"\u003eobject\u003c/a\u003e dependent.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoperation/","tags":null,"title":"operation"},{"categories":null,"contents":"Richard Nixon bombs Vietnam for 13 days to beat the VietCong into submission after the Vietnam War.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e bombs \u003ca href=\"/posts/kbhvietnam/\"\u003eVietnam\u003c/a\u003e for 13 days to beat the VietCong into submission after the \u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoperation_linebacker/","tags":null,"title":"Operation Linebacker"},{"categories":null,"contents":"A Linear Map from a vector space to itself is called an operator.\n\\(\\mathcal{L}(V) = \\mathcal{L}(V,V)\\), which is the set of all operators on \\(V\\).\nconstituents a vector space \\(V\\) a Linear Map \\(T \\in \\mathcal{L}(V,V)\\) requirements \\(T\\) is, by the constraints above, an operator additional information injectivity is surjectivity in finite-dimensional operators Suppose \\(V\\) is finite-dimensional and \\(T \\in \\mathcal{L}(V)\\), then, the following statements are equivalent:\n\\(T\\) is invertable \\(T\\) is injective \\(T\\) is surjective THIS IS NOT TRUE IN infinite-demensional vector space OPERATORS! (for instance, backwards shift in \\(\\mathbb{F}^{\\infty}\\) is surjective but not injective.)\nProof:\nFrom the above, \\(1 \\implies 2\\) by definition of invertability.\nThen, we have that \\(T\\) is invertable. We desire that \\(T\\) is surjective. Given invertability, we have that \\(\\null T = \\{0\\}\\). By the rank-nullity theorem, we have that: \\(\\dim V = \\dim range\\ T + \\dim null\\ T = \\dim range\\ T +0= \\dim range\\ T\\). Now, given \\(T\\) is an operator, we have that \\(range\\ T \\subset V\\). Attempting to extend a basis of \\(range\\ T\\) (which, given it is a subspace of \\(V\\), is a linearly independent list in \\(V\\)) to a basis of \\(V\\) will be the trivial extension. So \\(range\\ T = V\\), which is also the codomain of \\(T\\). This makes \\(T\\) surjective, as desired. So \\(2 \\implies 3\\).\nNow, we have that \\(T\\) is surjective, we desire that \\(T\\) is invertable. We essentially reverse-engineer the step before. Given rank-nullity theorem, we have that: \\(\\dim V = \\dim range\\ T + \\dim null\\ T\\). Now, given \\(T\\) is surjective, \\(\\dim range\\ T = \\dim V\\). Therefore, we have that \\(\\dim V = \\dim V + \\dim null\\ T \\implies 0 = \\dim null\\ T\\). This makes the null space of \\(T\\) be \\(\\{0\\}\\). This makes \\(T\\) injective. Having shown \\(T\\) to be both surjective and injective, \\(T\\) is invertable, as desired. So \\(3 \\implies 1\\).\nHaving shown a loop in the statements, all of them are equivalent.\noperators on complex vector spaces have an eigenvalue See operators on complex vector spaces have an eigenvalue\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e from a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e to itself is called an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\(\\mathcal{L}(V) = \\mathcal{L}(V,V)\\), which is the set of all \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003es on \\(V\\).\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ea \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e \\(V\\)\u003c/li\u003e\n\u003cli\u003ea \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e \\(T \\in \\mathcal{L}(V,V)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(T\\) is, by the constraints above, an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"injectivity--kbhinjectivity-dot-md--is-surjectivity--kbhsurjectivity-dot-md--in-finite-dimensional--kbhfinite-dimensional-vector-space-dot-md--operator--kbhoperator-dot-md--s\"\u003e\u003ca href=\"/posts/kbhinjectivity/\"\u003einjectivity\u003c/a\u003e is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjectivity\u003c/a\u003e in \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003es\u003c/h3\u003e\n\u003cp\u003eSuppose \\(V\\) is \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e and \\(T \\in \\mathcal{L}(V)\\), then, the following statements are equivalent:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(T\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(T\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\u003cstrong\u003eTHIS IS NOT TRUE IN \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003einfinite-demensional vector space\u003c/a\u003e OPERATORS!\u003c/strong\u003e (for instance, backwards shift in \\(\\mathbb{F}^{\\infty}\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e but not \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e.)\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eFrom the above, \\(1 \\implies 2\\) by definition of \u003ca href=\"/posts/kbhinvertability/\"\u003einvertability\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThen, we have that \\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e. We desire that \\(T\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e. Given \u003ca href=\"/posts/kbhinvertability/\"\u003einvertability\u003c/a\u003e, we have that \\(\\null T = \\{0\\}\\). By the \u003ca href=\"/posts/kbhfundamental_theorem_of_linear_maps/\"\u003erank-nullity theorem\u003c/a\u003e, we have that: \\(\\dim V = \\dim range\\ T + \\dim null\\ T = \\dim range\\ T +0= \\dim range\\ T\\). Now, given \\(T\\) is an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e, we have that \\(range\\ T \\subset V\\). Attempting to extend a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(range\\ T\\) (which, given it is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of \\(V\\), is a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(V\\)) to a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\) will be the trivial extension. So \\(range\\ T = V\\), which is also the codomain of \\(T\\). This makes \\(T\\) \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e, as desired. So \\(2 \\implies 3\\).\u003c/p\u003e\n\u003cp\u003eNow, we have that \\(T\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e, we desire that \\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e. We essentially reverse-engineer the step before. Given \u003ca href=\"/posts/kbhfundamental_theorem_of_linear_maps/\"\u003erank-nullity theorem\u003c/a\u003e, we have that: \\(\\dim V = \\dim range\\ T + \\dim null\\ T\\). Now, given \\(T\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e, \\(\\dim range\\ T = \\dim V\\). Therefore, we have that \\(\\dim V = \\dim V + \\dim null\\ T \\implies 0 = \\dim null\\ T\\). This makes the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(T\\) be \\(\\{0\\}\\). This makes \\(T\\) \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e. Having shown \\(T\\) to be both \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e and \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e, \\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e, as desired. So \\(3 \\implies 1\\).\u003c/p\u003e\n\u003cp\u003eHaving shown a loop in the statements, all of them are equivalent.\u003c/p\u003e\n\u003ch3 id=\"operators-on-complex-vector-spaces-have-an-eigenvalue--kbhoperators-on-complex-vector-spaces-have-an-eigenvalue-dot-md\"\u003e\u003ca href=\"/posts/kbhoperators_on_complex_vector_spaces_have_an_eigenvalue/\"\u003eoperators on complex vector spaces have an eigenvalue\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhoperators_on_complex_vector_spaces_have_an_eigenvalue/\"\u003eoperators on complex vector spaces have an eigenvalue\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoperator/","tags":null,"title":"operator"},{"categories":null,"contents":"An opsin is a photo-receptor protein (sensitive to light) that is sensitive to light\n","html":"\u003cp\u003eAn \u003ca href=\"/posts/kbhopsins/\"\u003eopsin\u003c/a\u003e is a photo-receptor protein (sensitive to light) that is sensitive to light\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhopsins/","tags":null,"title":"opsin"},{"categories":null,"contents":"Suppose we have offline statistic regarding wins and losses of each slot machine as our state:\n\\begin{equation} w_1, l_{1}, \\dots, w_{n}, l_{n} \\end{equation}\nWhat if we want to create a policy that maximises exploration?\nWe construct a value function:\n\\begin{equation} U^{*}([w_1, l_{1}, \\dots, w_{n}, l_{n}]) = \\max_{a} Q^{*}([w_1, l_{1}, \\dots, w_{n}, l_{n}], a) \\end{equation}\nour policy is the greedy policy:\n\\begin{equation} U^{*}([w_1, l_{1}, \\dots, w_{n}, l_{n}]) = \\arg\\max_{a} Q^{*}([w_1, l_{1}, \\dots, w_{n}, l_{n}], a) \\end{equation}\nNow, how do we go about calculating the action-value:\n\\begin{align} Q ([w_1, l_{1}, \\dots, w_{n}, l_{n}], a) =\\ \u0026amp; \\frac{w_{a}+1}{w_{a}+l_{a}+2} (R(w) + U^{*}(\\dots, w_{a}+1, l_{a}, \\dots)) \\\u0026amp;+ \\qty(1-\\frac{w_{a}+1}{w_{a}+l_{a}+2})(R(l) + U^{*}(\\dots, w_{a}, l_{a}+1, \\dots)) \\end{align}\n\u0026ldquo;the probability of you win\u0026rdquo; (expectation of Beta Distribution), times the instantaneous reward you win + the utility you gain in terms of information of you doing that.\nTo solve this in a finite horizon, note that at time \\(t=k\\), your \\(U^{*}\\) is \\(0\\) because you have nothing to do anymore.\nThen, you can back up slowly to get each previous state:\ncalculate \\(Q[w_1-1, l_1, \u0026hellip;, 1]\\) calculate \\(Q[w_1, l_1-1, \u0026hellip;,1]\\) and so on, and choosing the utility of each state from there.\n","html":"\u003cp\u003eSuppose we have offline statistic regarding wins and losses of each slot machine as our state:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw_1, l_{1}, \\dots, w_{n}, l_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhat if we want to create a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e that maximises exploration?\u003c/p\u003e\n\u003cp\u003eWe construct a \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{*}([w_1, l_{1}, \\dots, w_{n}, l_{n}]) = \\max_{a} Q^{*}([w_1, l_{1}, \\dots, w_{n}, l_{n}], a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eour policy is the \u003ca href=\"/posts/kbhaction_value_function/#value-function-policy\"\u003egreedy policy\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{*}([w_1, l_{1}, \\dots, w_{n}, l_{n}]) = \\arg\\max_{a} Q^{*}([w_1, l_{1}, \\dots, w_{n}, l_{n}], a)\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eNow, how do we go about calculating the \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nQ ([w_1, l_{1}, \\dots, w_{n}, l_{n}], a) =\\ \u0026amp; \\frac{w_{a}+1}{w_{a}+l_{a}+2} (R(w) + U^{*}(\\dots, w_{a}+1, l_{a}, \\dots)) \\\u0026amp;+ \\qty(1-\\frac{w_{a}+1}{w_{a}+l_{a}+2})(R(l) + U^{*}(\\dots, w_{a}, l_{a}+1, \\dots))\n\\end{align}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the probability of you win\u0026rdquo; (expectation of \u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e), times the instantaneous reward you win + the utility you gain in terms of information of you doing that.\u003c/p\u003e\n\u003cp\u003eTo solve this in a finite horizon, note that at time \\(t=k\\), your \\(U^{*}\\) is \\(0\\) because you have nothing to do anymore.\u003c/p\u003e\n\u003cp\u003eThen, you can back up slowly to get each previous state:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ecalculate \\(Q[w_1-1, l_1, \u0026hellip;, 1]\\)\u003c/li\u003e\n\u003cli\u003ecalculate \\(Q[w_1, l_1-1, \u0026hellip;,1]\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eand so on, and choosing the utility of each state from there.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoptimal_exploration/","tags":null,"title":"Optimal Exploration Policy"},{"categories":null,"contents":" Shuffle cards Keep revealing cards \u0026ldquo;Stop\u0026rdquo; when there\u0026rsquo;s \u0026gt;50% chance the next card to be revealed is black We can Frequentist Definition of Probability calculate the probability of a given card remaining is black:\n\\begin{equation} pblack(b,r) = \\frac{26-b}{52-(r+b)} \\end{equation}\nnow:\n\\begin{equation} pwin(b,r) = \\begin{cases} 0, b+r = 52 \\\\ \\max \\qty[ \\begin{align}\u0026amp;pblack(p,r), \\\\ \u0026amp;pblack(b,r)pwin(b+1,r) + (1-pblack(b,r)pwin(b, r+1) \\end{align}] \\end{cases} \\end{equation}\n\u0026ldquo;with the theory of the Martingales, this comes out to be 50%\u0026rdquo;\n","html":"\u003col\u003e\n\u003cli\u003eShuffle cards\u003c/li\u003e\n\u003cli\u003eKeep revealing cards\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Stop\u0026rdquo; when there\u0026rsquo;s \u0026gt;50% chance the next card to be revealed is black\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWe can \u003ca href=\"/posts/kbhprobability/#frequentist-definition-of-probability\"\u003eFrequentist Definition of Probability\u003c/a\u003e calculate the probability of a given card remaining is black:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\npblack(b,r) = \\frac{26-b}{52-(r+b)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enow:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\npwin(b,r) = \\begin{cases}\n0, b+r = 52 \\\\\n\\max \\qty[ \\begin{align}\u0026amp;pblack(p,r), \\\\ \u0026amp;pblack(b,r)pwin(b+1,r) + (1-pblack(b,r)pwin(b, r+1) \\end{align}]\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;with the theory of the \u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale\u003c/a\u003es, this comes out to be 50%\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoptimal_stopping_problem/","tags":null,"title":"Optimal Stopping Problem"},{"categories":null,"contents":"optimization is a decision making method:\nidentify a performance measure and a space of possible strategies to try run a bunch of simulations given a particular strategy, and measuring the performance try strategies with the goal of maximizing the performance measured Importantly: model is not used to guide the search, it is only used to run simulations to evaluate performance.\nDisadvantage (or advantage) does not take a advantage of the structure of the problem\nOptimization Steps if you are doing something infrequently, make sure the simplest code If you are doing something very often, and/or on big inputs, make the primary algorithm big-o cost reasonable Make GCC Work! Optimize explicitly as last resort. Main Optimization Techniques constant folding sub-expression elimination dead code elimination \u0026ldquo;strength reduction\u0026rdquo; code motion tail recursion loop unrolling constant folding There are many constants which happens during code writing. Therefore, for functions that operate on constant values, they will be folded in and the math done ahead-of-time during compilation.\ncommon sub-instruction elimination If you have the same sub-expression over and over again, we compute it ahead of time and use that result in multiple places.\ndead code elimination Code which doesn\u0026rsquo;t do anything of interest. This maybe subtle:\nif (param == 0) { return 0; } else { return param; } this is (mostly) dead code. It all return 0.\nstrength reduction Multiply can be rounded to a bit shift, and mod can be changed to an AND operation.\n7 * a == 8*a - a vs So you can left shift and then subtract, which is dramatically easier.\nWe can even do this with:\nb / 3 which can be converted to\n(b*3) / 10 which is much easier because its a multiplication\ncode motion if there is a common sub-exppression, it can be pull out of loops\nfor (size_t i = 0; i \u0026lt; strlen(s); i++) { arr[i] = s[i]; } the strlen call can be and will be pulled out.\ntail recursion turn a recursive call into a while loop to save stack frame management time\nloop unrolling A loop can be \u0026ldquo;factored out\u0026rdquo;:\nfor (int i=0; i\u0026lt;=n; i++) { sum += arr[i]; } can turn into\nfor (int i=0; i\u0026lt;=n-4; i+=4) { sum += arr[i]; sum += arr[i+1]; sum += arr[i+2]; sum += arr[i+3]; } // handle ending cases Why don\u0026rsquo;t we unroll all the way? We don\u0026rsquo;t know what \\(n\\) is.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhoptimization/\"\u003eoptimization\u003c/a\u003e is a \u003ca href=\"/posts/kbhdecision_making/#id-9075c44f-4b26-4f5a-9236-bc7a5c10f4ee-decision-making-methods\"\u003edecision making method\u003c/a\u003e:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eidentify a performance measure and a space of possible strategies to try\u003c/li\u003e\n\u003cli\u003erun a bunch of simulations given a particular strategy, and measuring the performance\u003c/li\u003e\n\u003cli\u003etry strategies with the goal of maximizing the performance measured\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eImportantly: model is not used to guide the search, it is only used to run simulations to evaluate performance.\u003c/p\u003e\n\u003ch2 id=\"disadvantage--or-advantage\"\u003eDisadvantage (or advantage)\u003c/h2\u003e\n\u003cp\u003edoes \u003cstrong\u003enot\u003c/strong\u003e take a advantage of the structure of the problem\u003c/p\u003e\n\u003ch2 id=\"optimization-steps\"\u003eOptimization Steps\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eif you are doing something infrequently, make sure the simplest code\u003c/li\u003e\n\u003cli\u003eIf you are doing something very often, and/or on big inputs, make the primary algorithm big-o cost reasonable\u003c/li\u003e\n\u003cli\u003eMake GCC Work!\u003c/li\u003e\n\u003cli\u003eOptimize explicitly as last resort.\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"main-optimization-techniques\"\u003eMain Optimization Techniques\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003econstant folding\u003c/li\u003e\n\u003cli\u003esub-expression elimination\u003c/li\u003e\n\u003cli\u003edead code elimination\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;strength reduction\u0026rdquo;\u003c/li\u003e\n\u003cli\u003ecode motion\u003c/li\u003e\n\u003cli\u003etail recursion\u003c/li\u003e\n\u003cli\u003eloop unrolling\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"constant-folding\"\u003econstant folding\u003c/h3\u003e\n\u003cp\u003eThere are many constants which happens during code writing. Therefore, for functions that operate on constant values, they will be folded in and the math done ahead-of-time during compilation.\u003c/p\u003e\n\u003ch3 id=\"common-sub-instruction-elimination\"\u003ecommon sub-instruction elimination\u003c/h3\u003e\n\u003cp\u003eIf you have the same sub-expression over and over again, we compute it ahead of time and use that result in multiple places.\u003c/p\u003e\n\u003ch3 id=\"dead-code-elimination\"\u003edead code elimination\u003c/h3\u003e\n\u003cp\u003eCode which doesn\u0026rsquo;t do anything of interest. This maybe subtle:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eparam\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eparam\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ethis is (mostly) dead code. It all return \u003ccode\u003e0\u003c/code\u003e.\u003c/p\u003e\n\u003ch3 id=\"strength-reduction\"\u003estrength reduction\u003c/h3\u003e\n\u003cp\u003eMultiply can be rounded to a bit shift, and mod can be changed to an AND operation.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#ae81ff\"\u003e7\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e8\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003evs\nSo you can left shift and then subtract, which is dramatically easier.\u003c/p\u003e\n\u003cp\u003eWe can even do this with:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ewhich can be converted to\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e10\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ewhich is much easier because its a multiplication\u003c/p\u003e\n\u003ch3 id=\"code-motion\"\u003ecode motion\u003c/h3\u003e\n\u003cp\u003eif there is a common sub-exppression, it can be pull out of loops\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003estrlen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e++\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ethe \u003ccode\u003estrlen\u003c/code\u003e call can be and will be pulled out.\u003c/p\u003e\n\u003ch3 id=\"tail-recursion\"\u003etail recursion\u003c/h3\u003e\n\u003cp\u003eturn a recursive call into a while loop to save stack frame management time\u003c/p\u003e\n\u003ch3 id=\"loop-unrolling\"\u003eloop unrolling\u003c/h3\u003e\n\u003cp\u003eA loop can be \u0026ldquo;factored out\u0026rdquo;:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e++\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ecan turn into\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// handle ending cases\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWhy don\u0026rsquo;t we unroll all the way? We don\u0026rsquo;t know what \\(n\\) is.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoptimization/","tags":null,"title":"optimization"},{"categories":null,"contents":"In the event your domain knowledge can help you make decisions about how spark load-balances or stripes data across worker nodes.\nPersistence \u0026ldquo;you should store this data in faster/slower memory\u0026rdquo;\nMEMORY_ONLY, MEMORY_ONLY_SER, MEMORY_AND_DISK, MEMORY_AND_DISK_SER, DISK_ONLY\nrdd.persist(StorageLevel.MEMORY_AND_DISK) # ... do work ... rdd.unpersist() Parallel Programming ","html":"\u003cp\u003eIn the event your domain knowledge can help you make decisions about how spark load-balances or stripes data across worker nodes.\u003c/p\u003e\n\u003ch2 id=\"persistence\"\u003ePersistence\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;you should store this data in faster/slower memory\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eMEMORY_ONLY, MEMORY_ONLY_SER, MEMORY_AND_DISK, MEMORY_AND_DISK_SER, DISK_ONLY\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erdd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epersist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eStorageLevel\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMEMORY_AND_DISK\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# ... do work ...\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erdd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eunpersist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"parallel-programming\"\u003eParallel Programming\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-07-31_14-30-50_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhoptimizing_spark/","tags":null,"title":"Optimizing Spark"},{"categories":null,"contents":"options are derivatives which gives you the permission to make a transaction at a particular date.\nThere are two main types of options:\ncall: gives permission to buy a security on or before the \u0026ldquo;exercise\u0026rdquo; date puts: gives permission to sell a security on or before the \u0026ldquo;exercise\u0026rdquo; date For this article, we will define \\(S_{t}\\) to be the stock price at the time \\(t\\), \\(K\\) as the option\u0026rsquo;s strike price, \\(C_{t}\\) to be the price of the \u0026ldquo;call\u0026rdquo; option, and \\(P_{t}\\) to be the price of the \u0026ldquo;put\u0026rdquo; option at strike price \\(K\\); lastly \\(T\\) we define as the maturity date.\nNaturally, the actual values \\(C_{t}\\) and \\(P_{t}\\) are:\n\\begin{equation} \\begin{cases} C_{t} = Max[0, S_{T}-K] \\\\ P_{t} = Max[0, K-S_{T}] \\\\ \\end{cases} \\end{equation}\nyou either make no money from the option (market price is more optimal), or make some difference between the strike price and the market price.\nThe nice thing here is that little \\(Max\\) term. An option, unlike a futures contract, has no buying obligation: you don\u0026rsquo;t have to exercise it. The payoff is always non-negative!\nNOTE!!! \\(C_{t}\\) at SMALL \\(t\\) is measured at \\(Max[0,S_{*T*}, K}]\\), using \\(S\\) of LARGE \\(T\\). This is because, even when\u0026mdash;currently\u0026mdash;the stock is trading at $60, the right to buy the stock in \\(T\\) months for $70 is not worthless as the price may go up.\nTo analyze options, we usually use the Black-Scholes Formula.\nAmerican vs European Options American options are excercisable at or before the maturity date. European options are exrcercisable only at the maturity date. Analyze Options as Insurance All insurance contracts are actually a form of an option, so why don\u0026rsquo;t we analyze it as such?\nA put option\u0026mdash;-\nAsset insured: stock Current asset value: \\(S_{0}\\) Term of policy: \\(T\\) Maximum coverage: \\(K\\) Deductible: \\(S_0-K\\) Insurance premium: \\(P_{t}\\) A call option is covariant with a put option; so its isomorphic, and so we will deal with it later.\nA few differences:\nAmerican-style early exercise: (you can\u0026rsquo;t, for normal insurance, exercise it without something happening) Marketability: you can\u0026rsquo;t give normal insurance to other people Dividends: holding a stock pays dividends (an option\u0026rsquo;s value goes down as dividends) ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhoptions/\"\u003eoptions\u003c/a\u003e are \u003ca href=\"/posts/kbhderivatives/\"\u003ederivatives\u003c/a\u003e which gives you the \u003cem\u003epermission\u003c/em\u003e to make a transaction at a particular date.\u003c/p\u003e\n\u003cp\u003eThere are two main types of \u003ca href=\"/posts/kbhoptions/\"\u003eoptions\u003c/a\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ecall: gives permission to \u003cstrong\u003ebuy\u003c/strong\u003e a security on or before the \u0026ldquo;exercise\u0026rdquo; date\u003c/li\u003e\n\u003cli\u003eputs: gives permission to \u003cstrong\u003esell\u003c/strong\u003e a security on or before the \u0026ldquo;exercise\u0026rdquo; date\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eFor this article, we will define \\(S_{t}\\) to be the stock price at the time \\(t\\), \\(K\\) as the option\u0026rsquo;s strike price, \\(C_{t}\\) to be the price of the \u0026ldquo;call\u0026rdquo; option, and \\(P_{t}\\) to be the price of the \u0026ldquo;put\u0026rdquo; option at strike price \\(K\\); lastly \\(T\\) we define as the maturity date.\u003c/p\u003e\n\u003cp\u003eNaturally, the actual values \\(C_{t}\\) and \\(P_{t}\\) are:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nC_{t} = Max[0, S_{T}-K] \\\\\nP_{t} = Max[0, K-S_{T}] \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou either make no money from the \u003ca href=\"/posts/kbhoptions/\"\u003eoption\u003c/a\u003e (market price is more optimal), or make some difference between the strike price and the market price.\u003c/p\u003e\n\u003cp\u003eThe nice thing here is that little \\(Max\\) term. An \u003ca href=\"/posts/kbhoptions/\"\u003eoption\u003c/a\u003e, unlike a futures contract, has no buying obligation: you don\u0026rsquo;t have to exercise it. The payoff is always non-negative!\u003c/p\u003e\n\u003cp\u003eNOTE!!! \\(C_{t}\\) at SMALL \\(t\\) is measured at \\(Max[0,S_{*T*}, K}]\\), using \\(S\\) of LARGE \\(T\\). This is because, even when\u0026mdash;currently\u0026mdash;the stock is trading at $60, the right to buy the stock in \\(T\\) months for $70 is not worthless as the price may go up.\u003c/p\u003e\n\u003cp\u003eTo analyze options, we usually use the \u003ca href=\"/posts/kbhblack_scholes_formula/\"\u003eBlack-Scholes Formula\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"american-vs-european-options\"\u003eAmerican vs European Options\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eAmerican options are excercisable at or before the maturity date.\u003c/li\u003e\n\u003cli\u003eEuropean options are exrcercisable only at the maturity date.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"analyze-options-as-insurance\"\u003eAnalyze Options as Insurance\u003c/h2\u003e\n\u003cp\u003eAll insurance contracts are actually a form of an \u003ca href=\"/posts/kbhoptions/\"\u003eoption\u003c/a\u003e, so why don\u0026rsquo;t we analyze it as such?\u003c/p\u003e\n\u003cp\u003eA put option\u0026mdash;-\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eAsset insured: stock\u003c/li\u003e\n\u003cli\u003eCurrent asset value: \\(S_{0}\\)\u003c/li\u003e\n\u003cli\u003eTerm of policy: \\(T\\)\u003c/li\u003e\n\u003cli\u003eMaximum coverage: \\(K\\)\u003c/li\u003e\n\u003cli\u003eDeductible: \\(S_0-K\\)\u003c/li\u003e\n\u003cli\u003eInsurance premium: \\(P_{t}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eA call option is covariant with a put \u003ca href=\"/posts/kbhoptions/\"\u003eoption\u003c/a\u003e; so its isomorphic, and so we will deal with it later.\u003c/p\u003e\n\u003cp\u003eA few differences:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eAmerican-style early exercise: (you can\u0026rsquo;t, for normal insurance, exercise it without something happening)\u003c/li\u003e\n\u003cli\u003eMarketability: you can\u0026rsquo;t give normal insurance to other people\u003c/li\u003e\n\u003cli\u003eDividends: holding a stock pays dividends (an option\u0026rsquo;s value goes down as dividends)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoptions/","tags":null,"title":"option"},{"categories":null,"contents":"an Option (MDP) represents a high level collection of actions. Big Picture: abstract away your big policy into \\(n\\) small policies, and value-iterate over expected values of the big policies.\nMarkov Option A Markov Option is given by a triple \\((I, \\pi, \\beta)\\)\n\\(I \\subset S\\), the states from which the option maybe started \\(S \\times A\\), the MDP during that option \\(\\beta(s)\\), the probability of the option terminating at state \\(s\\) one-step options You can develop one-shot options, which terminates immediate after one action with underlying probability\n\\(I = \\{s:a \\in A_{s}\\}\\) \\(\\pi(s,a) = 1\\) \\(\\beta(s) = 1\\) option value fuction \\begin{equation} Q^{\\mu}(s,o) = \\mathbb{E}\\qty[r_{t} + \\gamma r_{t+1} + \\dots] \\end{equation}\nwhere \\(\\mu\\) is some option selection process\nsemi-markov decision process a semi-markov decision process is a system over a bunch of options, with time being a factor in option transitions, but the underlying policies still being MDPs.\n\\begin{equation} T(s\u0026rsquo;, \\tau | s,o) \\end{equation}\nwhere \\(\\tau\\) is time elapsed.\nbecause option-level termination induces jumps between large scale states, one backup can propagate to a lot of states.\nintra option q-learning \\begin{equation} Q_{k+1} (s_{i},o) = (1-\\alpha_{k})Q_{k}(S_{t}, o) + \\alpha_{k} \\qty(r_{t+1} + \\gamma U_{k}(s_{t+1}, o)) \\end{equation}\nwhere:\n\\begin{equation} U_{k}(s,o) = (1-\\beta(s))Q_{k}(s,o) + \\beta(s) \\max_{o \\in O} Q_{k}(s,o\u0026rsquo;) \\end{equation}\n","html":"\u003cp\u003ean \u003ca href=\"/posts/kbhoption/\"\u003eOption (MDP)\u003c/a\u003e represents a high level collection of actions. Big Picture: abstract away your big policy into \\(n\\) small policies, and value-iterate over expected values of the big policies.\u003c/p\u003e\n\u003ch2 id=\"markov-option\"\u003eMarkov Option\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#markov-option\"\u003eMarkov Option\u003c/a\u003e is given by a triple \\((I, \\pi, \\beta)\\)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(I \\subset S\\), the states from which the option maybe started\u003c/li\u003e\n\u003cli\u003e\\(S \\times A\\), the MDP during that option\u003c/li\u003e\n\u003cli\u003e\\(\\beta(s)\\), the probability of the option terminating at state \\(s\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"one-step-options\"\u003eone-step options\u003c/h3\u003e\n\u003cp\u003eYou can develop one-shot options, which terminates immediate after one action with underlying probability\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(I = \\{s:a \\in A_{s}\\}\\)\u003c/li\u003e\n\u003cli\u003e\\(\\pi(s,a) = 1\\)\u003c/li\u003e\n\u003cli\u003e\\(\\beta(s) = 1\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"option-value-fuction\"\u003eoption value fuction\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nQ^{\\mu}(s,o) = \\mathbb{E}\\qty[r_{t} + \\gamma r_{t+1} + \\dots]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\mu\\) is some option selection process\u003c/p\u003e\n\u003ch3 id=\"semi-markov-decision-process\"\u003esemi-markov decision process\u003c/h3\u003e\n\u003cp\u003ea \u003ca href=\"#semi-markov-decision-process\"\u003esemi-markov decision process\u003c/a\u003e is a system over a bunch of \u003ca href=\"/posts/kbhoptions/\"\u003eoption\u003c/a\u003es, with time being a factor in option transitions, but the underlying policies still being \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(s\u0026rsquo;, \\tau | s,o)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\tau\\) is time elapsed.\u003c/p\u003e\n\u003cp\u003ebecause option-level termination induces jumps between large scale states, one backup can propagate to a lot of states.\u003c/p\u003e\n\u003ch3 id=\"intra-option-q-learning\"\u003eintra option q-learning\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nQ_{k+1} (s_{i},o) = (1-\\alpha_{k})Q_{k}(S_{t}, o) + \\alpha_{k} \\qty(r_{t+1} + \\gamma U_{k}(s_{t+1}, o))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_{k}(s,o) = (1-\\beta(s))Q_{k}(s,o) + \\beta(s) \\max_{o \\in O} Q_{k}(s,o\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoption/","tags":null,"title":"Option (MDP)"},{"categories":null,"contents":"Optogenetics are a process of neurology circuit investigations:\nevery neuron which expresses as specific change becomes sensitive to light therefore, you can shine a light on the mouse\u0026rsquo;s brain to control it This uses a set of molecules named opsins.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhoptogenetics/\"\u003eOptogenetics\u003c/a\u003e are a process of neurology circuit investigations:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eevery neuron which expresses as specific change becomes sensitive to light\u003c/li\u003e\n\u003cli\u003etherefore, you can shine a light on the mouse\u0026rsquo;s brain to control it\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis uses a set of molecules named \u003ca href=\"/posts/kbhopsins/\"\u003eopsins\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoptogenetics/","tags":null,"title":"Optogenetics"},{"categories":null,"contents":"oral lexical retrival is a class of discourse tasks which asks the subject to convert some semantic understanding (\u0026ldquo;concept\u0026rdquo;) into lexical expressions (\u0026ldquo;words\u0026rdquo;)\n\u0026ldquo;ask a patient to describe a thing.\u0026rdquo;\nExamples of oral lexical retrieval:\nSVF BNT Source: CambridgeCore\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhoral_lexical_retrival/\"\u003eoral lexical retrival\u003c/a\u003e is a class of \u003ca href=\"/posts/kbhdiscourse_completion_task/\"\u003ediscourse tasks\u003c/a\u003e which asks the subject to convert some semantic understanding (\u0026ldquo;concept\u0026rdquo;) into lexical expressions (\u0026ldquo;words\u0026rdquo;)\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;ask a patient to describe a thing.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eExamples of \u003ca href=\"/posts/kbhoral_lexical_retrival/\"\u003eoral lexical retrieval\u003c/a\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsemantic_verbal_fluency/\"\u003eSVF\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhboston_naming_test/\"\u003eBNT\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSource: \u003ca href=\"https://www.cambridge.org/core/books/abs/cambridge-handbook-of-biolinguistics/lexical-retrieval-and-its-breakdown-in-aphasia-and-developmental-language-impairment/74D1249BE4923384AF56C2572187E6BF\"\u003eCambridgeCore\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoral_lexical_retrival/","tags":null,"title":"oral lexical retrieval"},{"categories":null,"contents":"ODEs are Differential Equations in one independent variable: \\(y(x)\\).\nMain Content:\nFirst-Order Differential Equations Second-Order Linear Differential Equations Uniqueness and Existance Overarching Categories order of equations the order of an equation is the highest derivative of an equation\nlinear vs. non-linear differential equations A solution of a differential equation is linear when solutions are closed under linear operations.\nWe can spot an ODE by seeing that each of its derivatives are seperated or in separable terms, and only to the first power\u0026mdash;because that ends up being a linear equation (i.e. any two solutions satisfying the equation can add and scale to another solution).\nThe RHS doesn\u0026rsquo;t matter. For instance:\n\\begin{equation} xy\u0026rsquo;\u0026rsquo; + e^{x}y\u0026rsquo; + (x^{2}-3)y = x^{2}-x \\end{equation}\nis linear.\nsuperposition principle any linear combination of a homogeneous linear ODE is also a solution to the ODE.\nfunctional linear independence\nRecall linear independence. If we have two solutions \\(y_1\\), \\(y_2\\), are linearly independent or \u0026ldquo;independent\u0026rdquo;, if\n\\begin{equation} c_1 y_1(t) + c_2y_2(t) = 0 \\end{equation}\nimplies \\(c_1 = c_2 = 0\\).\nhomogeneous vs. inhomogeneous equations whether or not, isolating all the DEPENDENT variables to the left side, is the right side zero?\nlinear systems systems of ODEs are groups of ODEs. Linear systems can obtain you a vector-value function:\n\\begin{equation} y\u0026rsquo;(x) = \\mqty(3 \u0026amp; -2 \\\\ -1 \u0026amp; 5) \\vec{y} \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhordinary_differential_equations/\"\u003eODE\u003c/a\u003es are \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equations\u003c/a\u003e in one independent variable: \\(y(x)\\).\u003c/p\u003e\n\u003cp\u003eMain Content:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_non_seperable_equation/#solving-differential-equations\"\u003eFirst-Order Differential Equations\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/\"\u003eSecond-Order Linear Differential Equations\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhuniqueness_and_existance/\"\u003eUniqueness and Existance\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"overarching-categories\"\u003eOverarching Categories\u003c/h2\u003e\n\u003ch3 id=\"order-of-equations\"\u003eorder of equations\u003c/h3\u003e\n\u003cp\u003ethe \u003ca href=\"#order-of-equations\"\u003eorder\u003c/a\u003e of an equation is the highest derivative of an equation\u003c/p\u003e\n\u003ch3 id=\"linear-vs-dot-non-linear-differential-equations\"\u003elinear vs. non-linear differential equations\u003c/h3\u003e\n\u003cp\u003eA solution of a differential equation is \u003cstrong\u003elinear\u003c/strong\u003e when solutions are \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e under linear operations.\u003c/p\u003e\n\u003cp\u003eWe can spot an ODE by seeing that each of its derivatives are seperated or in separable terms, and only to the first power\u0026mdash;because that ends up being a linear equation (i.e. any two solutions satisfying the equation can add and scale to another solution).\u003c/p\u003e\n\u003cp\u003eThe RHS doesn\u0026rsquo;t matter. For instance:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nxy\u0026rsquo;\u0026rsquo; + e^{x}y\u0026rsquo; + (x^{2}-3)y = x^{2}-x\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis linear.\u003c/p\u003e\n\u003ch4 id=\"superposition-principle\"\u003esuperposition principle\u003c/h4\u003e\n\u003cp\u003eany linear combination of a \u003cem\u003ehomogeneous linear\u003c/em\u003e ODE is also a solution to the ODE.\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003efunctional linear independence\u003c/p\u003e\n\u003cp\u003eRecall linear independence. If we have two solutions \\(y_1\\), \\(y_2\\), are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e or \u0026ldquo;\u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e\u0026rdquo;, if\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_1 y_1(t) + c_2y_2(t) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eimplies \\(c_1 = c_2 = 0\\).\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"homogeneous-vs-dot-inhomogeneous-equations\"\u003ehomogeneous vs. inhomogeneous equations\u003c/h3\u003e\n\u003cp\u003ewhether or not, isolating all the DEPENDENT variables to the left side, is the right side zero?\u003c/p\u003e\n\u003ch3 id=\"linear-systems\"\u003elinear systems\u003c/h3\u003e\n\u003cp\u003esystems of ODEs are groups of ODEs. Linear systems can obtain you a vector-value function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;(x) = \\mqty(3 \u0026amp; -2 \\\\ -1 \u0026amp; 5) \\vec{y}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhordinary_differential_equations/","tags":null,"title":"Ordinary Differential Equation"},{"categories":null,"contents":"Reading notes conservatives in America make less sense because America is supposed to be liberal/new For most Europeans who came to America, the whole purpose of their difficult and dis- ruptive journey to the New World was not to conserve European institutions but to leave them behind and to create something new, often an entirely new life\nThree splits of conservatism in America those who are most concerned about economic or fiscal issues, that is, pro-business or “free-enterprise” conservatives those most concerned with religious or social issues, that is, pro-church or “traditional-values” conservatives those most concerned with national-security or defense issues, that is, pro-military or “patriotic” conservatives Ronald Reagan unified the three conservatism It was the achievement of Ronald Reagan that he was able in the late 1970s to unite these three different kinds of conservatism into one grand coalition.\nThree-in-one conservatism is a part of American \u0026ldquo;fusionist strategy\u0026rdquo; This was the culmination of a “fusionist strategy” that had been developing amongst American conservatives since the early 1960s.\nBusiness and social conservatism should contradict each other, though However, as we shall see, pro-business conservatism has always included a tendency toward the disruption and even dissolution of religious ideals and social practices.\nExtreme pro-business should also include globalization and erasure of national identities And in recent decades, pro-business conservatism has also included a tendency toward the dismantling of national boundaries and even dissolution of national identities\n\u0026ldquo;conservatism\u0026rdquo; actually conserved American revolutionary force economically this means that the conservative party in America has always sought to conserve a revolutionary force.\nExtreme economic \u0026ldquo;conservatism\u0026rdquo; should destry social and moral arrangements It destroys religious, social, and ultimately moral arrangements as well.\nReligions conservatism founds on the \u0026ldquo;open-market\u0026rdquo; of protestanism This open market in religious matters, so nicely isomorphic with the open market in economic matters, was a powerful factor gen- erating both a reality and an ideology of free choice in the United States.\nBecause the \u0026ldquo;new\u0026rdquo; became new protestanism, religious conservatism is the re-take over of traditional religions Since these churches were continually being left behind, religious conservatism was associated with once-dominant churches that were now dwindling into a minority, and would later dwindle into marginality\nBecause of mass economic benifit, religous conservatism became subordinated to economic conservatism Even ordinary middle-class Protestants benefited from cheaper labor, in the form of domestic servants. And of course it was the businessmen and middle-class Protestants who controlled the political parties, particularly that party which was supposed to be the more conservative one\nBecause there is nothing to conserve about current system, the thing that\u0026rsquo;s conserved is free choice If something were going to be conserved, it would normally be the no-conscription and low-taxation (and free-choice) system.\nEconomic systems propergated the source of American patriotism This meant that people who thought of themselves as American patriots or nationalists, and who sought to conserve the American nation and to promote American national interests\nAmerican conservatism is actually a form of European liberalism we have seen that, from a European perspective, American conservatism was not conservative at all, but actually was a kind of classical lib- eralism.\nwartime strengthened American values and liberalism Moreover, the wartime experience seemed decisively to vindicate and even enhance the strengths of both the traditional American economic system and traditional American moral principles.\n","html":"\u003ch2 id=\"reading-notes\"\u003eReading notes\u003c/h2\u003e\n\u003ch3 id=\"conservatives-in-america-make-less-sense-because-america-is-supposed-to-be-liberal-new\"\u003econservatives in America make less sense because America is supposed to be liberal/new\u003c/h3\u003e\n\u003cp\u003eFor most Europeans who came to America, the whole purpose of their difficult and dis- ruptive journey to the New World was not to conserve European institutions but to leave them behind and to create something new, often an entirely new life\u003c/p\u003e\n\u003ch3 id=\"three-splits-of-conservatism-in-america\"\u003eThree splits of conservatism in America\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003ethose who are most concerned about economic or fiscal issues, that is, pro-business or “free-enterprise” conservatives\u003c/li\u003e\n\u003cli\u003ethose most concerned with religious or social issues, that is, pro-church or “traditional-values” conservatives\u003c/li\u003e\n\u003cli\u003ethose most concerned with national-security or defense issues, that is, pro-military or “patriotic” conservatives\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"ronald-reagan-unified-the-three-conservatism\"\u003eRonald Reagan unified the three conservatism\u003c/h3\u003e\n\u003cp\u003eIt was the achievement of Ronald Reagan that he was able in the late 1970s to unite these three different kinds of conservatism into one grand coalition.\u003c/p\u003e\n\u003ch3 id=\"three-in-one-conservatism-is-a-part-of-american-fusionist-strategy\"\u003eThree-in-one conservatism is a part of American \u0026ldquo;fusionist strategy\u0026rdquo;\u003c/h3\u003e\n\u003cp\u003eThis was the culmination of a “fusionist strategy” that had been developing amongst American conservatives since the early 1960s.\u003c/p\u003e\n\u003ch3 id=\"business-and-social-conservatism-should-contradict-each-other-though\"\u003eBusiness and social conservatism should contradict each other, though\u003c/h3\u003e\n\u003cp\u003eHowever, as we shall see, pro-business conservatism has always included a tendency toward the disruption and even dissolution of religious ideals and social practices.\u003c/p\u003e\n\u003ch3 id=\"extreme-pro-business-should-also-include-globalization-and-erasure-of-national-identities\"\u003eExtreme pro-business should also include globalization and erasure of national identities\u003c/h3\u003e\n\u003cp\u003eAnd in recent decades, pro-business conservatism has also included a tendency toward the dismantling of national boundaries and even dissolution of national identities\u003c/p\u003e\n\u003ch3 id=\"conservatism-actually-conserved-american-revolutionary-force-economically\"\u003e\u0026ldquo;conservatism\u0026rdquo; actually conserved American revolutionary force economically\u003c/h3\u003e\n\u003cp\u003ethis means that the conservative party in America has always sought to conserve a revolutionary force.\u003c/p\u003e\n\u003ch3 id=\"extreme-economic-conservatism-should-destry-social-and-moral-arrangements\"\u003eExtreme economic \u0026ldquo;conservatism\u0026rdquo; should destry social and moral arrangements\u003c/h3\u003e\n\u003cp\u003eIt destroys religious, social, and ultimately moral arrangements as well.\u003c/p\u003e\n\u003ch3 id=\"religions-conservatism-founds-on-the-open-market-of-protestanism\"\u003eReligions conservatism founds on the \u0026ldquo;open-market\u0026rdquo; of protestanism\u003c/h3\u003e\n\u003cp\u003eThis open market in religious matters, so nicely isomorphic with the open market in economic matters, was a powerful factor gen- erating both a reality and an ideology of free choice in the United States.\u003c/p\u003e\n\u003ch3 id=\"because-the-new-became-new-protestanism-religious-conservatism-is-the-re-take-over-of-traditional-religions\"\u003eBecause the \u0026ldquo;new\u0026rdquo; became new protestanism, religious conservatism is the re-take over of traditional religions\u003c/h3\u003e\n\u003cp\u003eSince these churches were continually being left behind, religious conservatism was associated with once-dominant churches that were now dwindling into a minority, and would later dwindle into marginality\u003c/p\u003e\n\u003ch3 id=\"because-of-mass-economic-benifit-religous-conservatism-became-subordinated-to-economic-conservatism\"\u003eBecause of mass economic benifit, religous conservatism became subordinated to economic conservatism\u003c/h3\u003e\n\u003cp\u003eEven ordinary middle-class Protestants benefited from cheaper labor, in the form of domestic servants. And of course it was the businessmen and middle-class Protestants who controlled the political parties, particularly that party which was supposed to be the more conservative one\u003c/p\u003e\n\u003ch3 id=\"because-there-is-nothing-to-conserve-about-current-system-the-thing-that-s-conserved-is-free-choice\"\u003eBecause there is nothing to conserve about current system, the thing that\u0026rsquo;s conserved is free choice\u003c/h3\u003e\n\u003cp\u003eIf something were going to be conserved, it would normally be the no-conscription and low-taxation (and free-choice) system.\u003c/p\u003e\n\u003ch3 id=\"economic-systems-propergated-the-source-of-american-patriotism\"\u003eEconomic systems propergated the source of American patriotism\u003c/h3\u003e\n\u003cp\u003eThis meant that people who thought of themselves as American patriots or nationalists, and who sought to conserve the American nation and to promote American national interests\u003c/p\u003e\n\u003ch3 id=\"american-conservatism-is-actually-a-form-of-european-liberalism\"\u003eAmerican conservatism is actually a form of European liberalism\u003c/h3\u003e\n\u003cp\u003ewe have seen that, from a European perspective, American conservatism was not conservative at all, but actually was a kind of classical lib- eralism.\u003c/p\u003e\n\u003ch3 id=\"wartime-strengthened-american-values-and-liberalism\"\u003ewartime strengthened American values and liberalism\u003c/h3\u003e\n\u003cp\u003eMoreover, the wartime experience seemed decisively to vindicate and even enhance the strengths of both the traditional American economic system and traditional American moral principles.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrise_of_american_conservatism/","tags":null,"title":"Origins of American Conservatism"},{"categories":null,"contents":"Two vectors are considered orthogonal if \\(\\langle u,v \\rangle = 0\\), that is, their inner product is \\(0\\).\nSee also orthogonality test.\northogonality and \\(0\\) \\(0\\) is orthogonal to every vector in \\(v\\) because \\(\\langle 0,v \\rangle=0\\) for every \\(v\\) because of the properties of inner product \\(0\\) is the only vector orthogonal to itself as, by inner product definiteness, \\(\\langle v,v \\rangle=0\\) implies \\(v=0\\). ","html":"\u003cp\u003eTwo \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es are considered \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e if \\(\\langle u,v \\rangle = 0\\), that is, their \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e is \\(0\\).\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhdot_product/#orthogonality-test\"\u003eorthogonality test\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"orthogonality-and-0\"\u003eorthogonality and \\(0\\)\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(0\\) is orthogonal to every vector in \\(v\\) because \\(\\langle 0,v \\rangle=0\\) for every \\(v\\) because of the \u003ca href=\"/posts/kbhinner_product/#properties-of-inner-product\"\u003eproperties of inner product\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(0\\) is the only vector orthogonal to itself as, by \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e definiteness, \\(\\langle v,v \\rangle=0\\) implies \\(v=0\\).\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhorthogonal/","tags":null,"title":"orthogonal"},{"categories":null,"contents":"A list of vectors is orthonormal if each vector is orthogonal to every other vector, and they all have norm 1.\nIn other words:\n\\begin{equation} \\langle e_{j}, e_{k} \\rangle = \\begin{cases} 1, j = k\\\\ 0, j \\neq k \\end{cases} \\end{equation}\nThe vectors should inner-product with itself to \\(1\\), and be orthogonal to all others.\nAdditional Information orthonormal basis See also orthonormal basis\nNorm of an Orthogonal Linear Combination \\begin{equation} \\| a_1e_1 + \\dots + a_{m}e_{m} \\|^{2} = |a_1|^{2} + \\dots + |a_{m}|^{2} \\end{equation}\nWhen \\(e_1, \\dots e_{m}\\) are orthonormal vectors in \\(V\\) and \\(a_1, \\dots a_{m} \\in \\mathbb{F}\\).\nProof:\nRecall two facts: \\(e_{j}\\) are orthonormal vectors, so they are 1) orthogonal to each other and have 2) norm 1. Therefore, each \\(a_j e_{j}\\) are also orthogonal and have norm \\(a_{j}\\)\nAnd so, the orthogonal condition guarantees pythagoras, and we know that each vector being added here has norm \\(a_{j}\\).\nAnd so we can just chonk out each of the vectors, apply Pythagoras to the ending bunch and the one removed.\northonormal list is linearly independent Its a corollary of the above is that orthonormal lists are linearly independent.\nProof:\n\\begin{equation} a_1e_1 + \\dots +a_{m}e_{m} = 0 \\end{equation}\nWe desire that each \\(a_{j}=0\\) to show that this list is linearly independent.\nNow, given that the linear combination of these \\(e_{j}\\) adds to \\(0\\), the summed vector is a zero-vector. So:\n\\begin{equation} \\|a_1 e_1 + \\dots +a_{m}e_{m} \\| = \\|0\\| = 0 \\end{equation}\nOF course their norm squared is also \\(0\\).\nApply the above, then, we now have:\n\\begin{equation} |a_1|^{2} + \\dots +|a_{m}|^{2} = \\|a_1 e_1 + \\dots +a_{m}e_{m} \\| = 0 \\end{equation}\nOf course adding a list of positive numbers (\\(|a_{j}|^{2}\\)) together yields not a negative number, so there are no possible additive inverses that will cancel each other out. Hence, \\(a_{j} = 0\\), as desired. \\(\\blacksquare\\)\n","html":"\u003cp\u003eA list of vectors is \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e if each vector is \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e to every other vector, and they all have \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e 1.\u003c/p\u003e\n\u003cp\u003eIn other words:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle e_{j}, e_{k} \\rangle = \\begin{cases}\n1, j = k\\\\\n0, j \\neq k\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe vectors should inner-product with itself to \\(1\\), and be orthogonal to all others.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eAdditional Information\u003c/h2\u003e\n\u003ch3 id=\"orthonormal-basis--kbhorthonormal-basis-dot-md\"\u003e\u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"norm-of-an-orthogonal-linear-combination\"\u003eNorm of an Orthogonal Linear Combination\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\| a_1e_1 + \\dots + a_{m}e_{m} \\|^{2} = |a_1|^{2} + \\dots + |a_{m}|^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhen \\(e_1, \\dots e_{m}\\) are \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e vectors in \\(V\\) and \\(a_1, \\dots a_{m} \\in \\mathbb{F}\\).\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eRecall two facts: \\(e_{j}\\) are \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e vectors, so they are 1) \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e to each other and have 2) \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e 1. Therefore, each \\(a_j e_{j}\\) are also \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e and have norm \\(a_{j}\\)\u003c/p\u003e\n\u003cp\u003eAnd so, the \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e condition guarantees \u003ca href=\"/posts/kbhcornucopia_of_analysis/#pythagorean-theorem\"\u003epythagoras\u003c/a\u003e, and we know that each vector being added here has norm \\(a_{j}\\).\u003c/p\u003e\n\u003cp\u003eAnd so we can just chonk out each of the vectors, apply Pythagoras to the ending bunch and the one removed.\u003c/p\u003e\n\u003ch4 id=\"orthonormal-list-is-linearly-independent\"\u003eorthonormal list is linearly independent\u003c/h4\u003e\n\u003cp\u003eIts a corollary of the above is that \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e lists are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_1e_1 + \\dots +a_{m}e_{m} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe desire that each \\(a_{j}=0\\) to show that this list is linearly independent.\u003c/p\u003e\n\u003cp\u003eNow, given that the linear combination of these \\(e_{j}\\) adds to \\(0\\), the summed vector is a zero-vector. So:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\|a_1 e_1 + \\dots +a_{m}e_{m} \\| = \\|0\\| = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOF course their norm squared is also \\(0\\).\u003c/p\u003e\n\u003cp\u003eApply the above, then, we now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|a_1|^{2} + \\dots +|a_{m}|^{2} = \\|a_1 e_1 + \\dots +a_{m}e_{m} \\| = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOf course adding a list of positive numbers (\\(|a_{j}|^{2}\\)) together yields not a negative number, so there are no possible additive inverses that will cancel each other out. Hence, \\(a_{j} = 0\\), as desired. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhorthonormal/","tags":null,"title":"orthonormal"},{"categories":null,"contents":"An Orthonormal basis is defined as a basis of a finite-dimensional vector space that\u0026rsquo;s orthonormal.\nAdditional Information orthonormal list of the right length is a basis An orthonormal list is linearly independent, and linearly independent list of length dim V are a basis of V. \\(\\blacksquare\\)\nWriting a vector as a linear combination of orthonormal basis According to Axler, this result is why there\u0026rsquo;s so much hoopla about orthonormal basis.\nResult and Motivation For any basis of \\(V\\), and a vector \\(v \\in V\\), we by basis spanning have:\n\\begin{equation} v = a_1e_1 + \\dots a_{n}e_{n} \\end{equation}\nYet, for orthonormal basis, we can actually very easily know what the \\(a_{j}\\) are (and not just that some \\(a_{j}\\) exist). Specifically:\n\\begin{equation} a_{j} = \\langle v,e_{j} \\rangle \\end{equation}\nThat is, for orthonormal basis \\(e_{j}\\) of \\(V\\), we have that:\n\\begin{equation} v = \\langle v, e_{1} \\rangle e_{1} + \\dots + \\langle v, e_{n} \\rangle e_{n} \\end{equation}\nfor all \\(v \\in V\\).\nFurthermore:\n\\begin{equation} \\|v\\|^{2} = | \\langle v,e_1 \\rangle|^{2} + \\dots + | \\langle v, e_{n} \\rangle|^{2} \\end{equation}\nProof Given \\(e_{j}\\) are basis (nevermind orthonormal quite yet), we have that:\n\\begin{equation} v = a_1e_{1} + \\dots + a_{n}e_{n} \\end{equation}\nWLOG let\u0026rsquo;s take \\(\\langle v, e_{j} \\rangle\\):\n\\begin{equation} \\langle v,e_{j} \\rangle = \\langle a_1e_1 + \\dots +a_{n}e_{n}, e_{j} \\rangle \\end{equation}\nGiven additivity and homogenity in the first slot, we now have:\n\\begin{equation} \\langle v, e_{j} \\rangle = a_{1}\\langle e_1, e_{j} \\rangle + \\dots +a_{n}\\langle e_{n}, e_{j} \\rangle \\end{equation}\nOf course, each \\(e_{i}\\) and \\(e_{j}\\) are orthogonal, so for the most part \\(a_{i}\\langle e_{i}, e_{j} \\rangle = 0\\) for \\(i \\neq j\\). Except where \\(a_{j} \\langle e_{j}, e_{j} \\rangle = a_{j} 1 = a_{j}\\) because the \\(e\\) vectors are also norm 1.\nTherefore:\n\\begin{equation} \\langle v, e_{j} \\rangle= 0 + \\dots +a_{j} + \\dots +0 = a_{j} \\end{equation}\nWe now have \\(\\langle v,e_{j} \\rangle = a_{j}\\) WLOG for all \\(j\\), as desired.\nPlugging this in for each \\(a_{j}\\) and applying Norm of an Orthogonal Linear Combination yields the \\(\\|v\\|^{2}\\) equation above. \\(\\blacksquare\\)\n","html":"\u003cp\u003eAn \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eOrthonormal basis\u003c/a\u003e is defined as a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of a finite-dimensional vector space that\u0026rsquo;s \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eAdditional Information\u003c/h2\u003e\n\u003ch3 id=\"orthonormal-list-of-the-right-length-is-a-basis\"\u003eorthonormal list of the right length is a basis\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhorthonormal/#an-orthonormal-list-is-linearly-independent\"\u003eAn orthonormal list is linearly independent\u003c/a\u003e, and \u003ca href=\"/posts/kbhdimension/#linearly-independent-list-of-length-dim-v-are-a-basis-of-v\"\u003elinearly independent list of length dim V are a basis of V\u003c/a\u003e. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"writing-a-vector-as-a-linear-combination-of-orthonormal-basis\"\u003eWriting a vector as a linear combination of orthonormal basis\u003c/h3\u003e\n\u003cp\u003eAccording to Axler, this result is why there\u0026rsquo;s so much hoopla about \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e.\u003c/p\u003e\n\u003ch4 id=\"result-and-motivation\"\u003eResult and Motivation\u003c/h4\u003e\n\u003cp\u003eFor any basis of \\(V\\), and a vector \\(v \\in V\\), we by \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e spanning have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = a_1e_1 + \\dots a_{n}e_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYet, for \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e, we can actually very easily know what the \\(a_{j}\\) are (and not just that \u003cem\u003esome\u003c/em\u003e \\(a_{j}\\) exist). Specifically:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{j} = \\langle v,e_{j} \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThat is, for \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e \\(e_{j}\\) of \\(V\\), we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = \\langle v, e_{1} \\rangle e_{1} + \\dots + \\langle v, e_{n} \\rangle e_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor all \\(v \\in V\\).\u003c/p\u003e\n\u003cp\u003eFurthermore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\|v\\|^{2} = | \\langle v,e_1 \\rangle|^{2} + \\dots + | \\langle v, e_{n} \\rangle|^{2}\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"proof\"\u003eProof\u003c/h4\u003e\n\u003cp\u003eGiven \\(e_{j}\\) are \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e (nevermind \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e quite yet), we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = a_1e_{1} + \\dots + a_{n}e_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWLOG let\u0026rsquo;s take \\(\\langle v, e_{j} \\rangle\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle v,e_{j} \\rangle = \\langle a_1e_1 + \\dots +a_{n}e_{n}, e_{j} \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven additivity and homogenity in the first slot, we now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle v, e_{j} \\rangle = a_{1}\\langle e_1, e_{j} \\rangle + \\dots +a_{n}\\langle e_{n}, e_{j} \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOf course, each \\(e_{i}\\) and \\(e_{j}\\) are \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e, so for the most part \\(a_{i}\\langle e_{i}, e_{j} \\rangle = 0\\) for \\(i \\neq j\\). Except where \\(a_{j} \\langle e_{j}, e_{j} \\rangle = a_{j} 1 = a_{j}\\) because the \\(e\\) vectors are also \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e 1.\u003c/p\u003e\n\u003cp\u003eTherefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle v, e_{j} \\rangle= 0 + \\dots +a_{j} + \\dots +0 = a_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe now have \\(\\langle v,e_{j} \\rangle = a_{j}\\) WLOG for all \\(j\\), as desired.\u003c/p\u003e\n\u003cp\u003ePlugging this in for each \\(a_{j}\\) and applying \u003ca href=\"/posts/kbhorthonormal/#norm-of-an-orthogonal-linear-combination\"\u003eNorm of an Orthogonal Linear Combination\u003c/a\u003e yields the \\(\\|v\\|^{2}\\) equation above. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhorthonormal_basis/","tags":null,"title":"orthonormal basis"},{"categories":null,"contents":"The OTC Markets/pink sheets are an unregulated group of Financial Markets, where many of the Penny stocks are.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhotc_markets/\"\u003eOTC Markets\u003c/a\u003e/\u003ca href=\"/posts/kbhotc_markets/\"\u003epink sheets\u003c/a\u003e are an unregulated group of \u003ca href=\"/posts/kbhfinancial_markets_intro/\"\u003eFinancial Market\u003c/a\u003es, where many of the Penny stocks are.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhotc_markets/","tags":null,"title":"OTC Markets"},{"categories":null,"contents":"action outcomes are uncertain\n","html":"\u003cp\u003eaction outcomes are uncertain\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoutcome_uncertainty/","tags":null,"title":"Outcome Uncertainty"},{"categories":null,"contents":"overfitting is the process\nPenalty for large weight errors is a good way of mitigating overfitting\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhoverfitting/\"\u003eoverfitting\u003c/a\u003e is the process\u003c/p\u003e\n\u003cp\u003ePenalty for large weight errors is a good way of mitigating overfitting\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoverfitting/","tags":null,"title":"overfitting"},{"categories":null,"contents":"We can use the scalars of a polynomial to build a new operator, which scales copies of an operator with the coefficients \\(a_{j}\\) of the polynomial.\nconstituents \\(p(z) = a_{0} + a_{1}z + a_{2}z^{2} + \\cdots + a_{m}z^{m}\\), a polynomial for \\(z \\in \\mathbb{F}\\) \\(T \\in \\mathcal{L}(V)\\) requirements \\(p(T)\\) is an operator refined by:\n\\begin{equation} p(T) = a_{0} I + a_{1} T + a_{2} T^{2} + \\cdots + a_{m} T^{m} \\end{equation}\nwhere, \\(T^{m}\\) is the power of operator\nadditional information \\(p(z) \\to p(T)\\) is a linear function additivity: \\((p_{1} + p_2)T = (a_{0}+b_{0})I \u0026hellip; = a_{0} I + b_{0} I \u0026hellip; = p_{1}(T) + p_{2}(T)\\) homogeneity: \\((\\lambda p)T = (\\lambda a_{0})I \u0026hellip; = \\lambda (a_{0} I \\dots) = \\lambda p(T)\\) polynomial of operator is commutative \\((pq)T = p(T) q(T)\\) \\(p(T)q(T) = q(T)p(T)\\) The first result can be shown because the product of polynomials are a result of rote algebra, and when you come across \\(pq\\) trying to combine \\(z^{j+k}\\) at each FOIL part, you just swap that into \\(T^{j+k} = T^{j}T^{k}\\). Then, you re-split the constants towards either side (i.e. if the FOIL gave \\(a_{j} b_{k} T^{j+k} \\implies a_{j} T^{j} b_{k} T^{k}\\)), then you factor the sums out into two separate pieces to get to \\(p(T)\\) and \\(q(T)\\).\nThe second result: \\(p(T) q(T) = (pq)(T) = (qp)T = q(T) p(T)\\), with the middle commutativity because \\(\\mathbb{F}\\) commutes.\n","html":"\u003cp\u003eWe can use the scalars of a \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e to build a new \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e, which scales copies of an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e with the coefficients \\(a_{j}\\) of the \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(p(z) = a_{0} + a_{1}z + a_{2}z^{2} + \\cdots + a_{m}z^{m}\\), a \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e for \\(z \\in \\mathbb{F}\\)\u003c/li\u003e\n\u003cli\u003e\\(T \\in \\mathcal{L}(V)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\(p(T)\\) is an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e refined by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(T) = a_{0} I + a_{1} T + a_{2} T^{2} + \\cdots + a_{m} T^{m}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(T^{m}\\) is the \u003ca href=\"/posts/kbhraising_operators_to_powers/\"\u003epower of operator\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"p--z--to-p--t--is-a-linear-function--kbhfunction-dot-md\"\u003e\\(p(z) \\to p(T)\\) is a linear \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eadditivity: \\((p_{1} + p_2)T = (a_{0}+b_{0})I \u0026hellip; = a_{0} I + b_{0} I \u0026hellip; = p_{1}(T) + p_{2}(T)\\)\u003c/li\u003e\n\u003cli\u003ehomogeneity: \\((\\lambda p)T = (\\lambda a_{0})I \u0026hellip; = \\lambda (a_{0} I \\dots) = \\lambda p(T)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"polynomial-of-operator--kbhpolynomial-operator-dot-md--is-commutative\"\u003e\u003ca href=\"/posts/kbhpolynomial_operator/\"\u003epolynomial of operator\u003c/a\u003e is commutative\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003e\\((pq)T = p(T) q(T)\\)\u003c/li\u003e\n\u003cli\u003e\\(p(T)q(T) = q(T)p(T)\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThe first result can be shown because the \u003ca href=\"\"\u003eproduct of polynomial\u003c/a\u003es are a result of rote \u003ca href=\"/posts/kbhalgebra/\"\u003ealgebra\u003c/a\u003e, and when you come across \\(pq\\) trying to combine \\(z^{j+k}\\) at each FOIL part, you just swap that into \\(T^{j+k} = T^{j}T^{k}\\). Then, you re-split the constants towards either side (i.e. if the FOIL gave \\(a_{j} b_{k} T^{j+k} \\implies a_{j} T^{j} b_{k} T^{k}\\)), then you factor the sums out into two separate pieces to get to \\(p(T)\\) and \\(q(T)\\).\u003c/p\u003e\n\u003cp\u003eThe second result: \\(p(T) q(T) = (pq)(T) = (qp)T = q(T) p(T)\\), with the middle \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e because \\(\\mathbb{F}\\) commutes.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpolynomial_operator/","tags":null,"title":"p(T)"},{"categories":null,"contents":"PACE is a form of Directed Evolution: which use a bacteriophage with a bit of its gene removed; then, engineer a virus to infect the bacteria, which will only successfully complete infection if the missing area is provided.\nThe mutation of this virus, then, essentially RNG-s mutations of new functions and will only produce successful new generations of bacteriologist when it works.\nPACE is hard The only way to check that PACE worked in the direction you want is by sampling the bacteria and hope that they are evolving in the correct direction\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpace/\"\u003ePACE\u003c/a\u003e is a form of \u003ca href=\"/posts/kbhdirected_evolution/\"\u003eDirected Evolution\u003c/a\u003e: which use a bacteriophage with a bit of its gene removed; then, engineer a virus to infect the bacteria, which will only successfully complete infection if the missing area is provided.\u003c/p\u003e\n\u003cp\u003eThe mutation of this virus, then, essentially RNG-s mutations of new functions and will only produce successful new generations of bacteriologist when it works.\u003c/p\u003e\n\u003ch2 id=\"pace-is-hard\"\u003ePACE is hard\u003c/h2\u003e\n\u003cp\u003eThe only way to check that \u003ca href=\"/posts/kbhpace/\"\u003ePACE\u003c/a\u003e worked in the direction you want is by sampling the bacteria and hope that they are evolving in the correct direction\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpace/","tags":null,"title":"PACE"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpacific_railroad_act/","tags":null,"title":"Pacific Railroad Act"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpagin_q/","tags":null,"title":"pagin:q"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpapyrus/","tags":null,"title":"papyrus"},{"categories":null,"contents":"a parameter of probability distribution govern the probabilities associated with different conditions in that distribution. It is usually a vector:\nFor instance, for uniform \\(Uni(\\alpha, \\beta)\\), parameter \\(\\theta = [\\alpha, \\beta]\\).\nimportantly, for a discrete distribution system with 6 parameters, we only need 5 independent parameters to be able to satisfy the entire system. This is because a probability distribution must sum to 1.\nhowever, for a conditional probability:\n\\begin{equation} p(x|a) \\end{equation}\nwe need to specificity \\((n-1)m\\) parameters, whereby \\(m\\) is the number of states \\(a\\) can take, and \\(n\\) the number of states \\(n\\) can take. Each group of \\(m\\) has to add up to \\(1\\).\nparameter learning see parameter learning\n","html":"\u003cp\u003ea \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003e of \u003ca href=\"/posts/kbhprobability_distributions/\"\u003eprobability distribution\u003c/a\u003e govern the \u003ca href=\"/posts/kbhprobability/\"\u003eprobabilities\u003c/a\u003e associated with different conditions in that distribution. It is usually a \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003eFor instance, for uniform \\(Uni(\\alpha, \\beta)\\), parameter \\(\\theta = [\\alpha, \\beta]\\).\u003c/p\u003e\n\u003cp\u003eimportantly, for a \u003ca href=\"/posts/kbhdiscrete_distribution/\"\u003ediscrete distribution\u003c/a\u003e system with 6 parameters, we only need 5 \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es to be able to satisfy the entire system. This is because a probability distribution must sum to 1.\u003c/p\u003e\n\u003cp\u003ehowever, for a \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(x|a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe need to specificity \\((n-1)m\\) \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es, whereby \\(m\\) is the number of states \\(a\\) can take, and \\(n\\) the number of states \\(n\\) can take. Each group of \\(m\\) has to add up to \\(1\\).\u003c/p\u003e\n\u003ch2 id=\"parameter-learning--kbhparameter-learning-dot-md\"\u003e\u003ca href=\"/posts/kbhparameter_learning/\"\u003eparameter learning\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhparameter_learning/\"\u003eparameter learning\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhparameter/","tags":null,"title":"parameter"},{"categories":null,"contents":"We want to learn a Baysian Network\u0026rsquo;s parameters from data.\nunbiased parameter learning Maximum Likelihood Parameter Learning Baysian Parameter Learning If we want to do it in a Bayes Net:\nparameter learning in Baysian Network\n","html":"\u003cp\u003eWe want to learn a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e\u0026rsquo;s \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es from data.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhunbiased_parameter_learning/\"\u003eunbiased parameter learning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_parameter_learning/\"\u003eBaysian Parameter Learning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf we want to do it in a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBayes Net\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhbaysian_network/#id-d01990aa-fcca-42f0-bd6c-7ba13746b6ca-parameter-learning-in-id-5eaa4b96-cbc2-4811-91c7-88ea2e164fc3-baysian-network\"\u003eparameter learning in Baysian Network\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhparameter_learning/","tags":null,"title":"parameter learning"},{"categories":null,"contents":" tag EEG by data type (what mental stage does it come from?) per region, per data type, we take a band-power series calculate statistics per series shove the results into something interpretable Conclusion N1 results performs the best across brain regions; where the data came from didn\u0026rsquo;t change performance by much.\n","html":"\u003col\u003e\n\u003cli\u003etag EEG by data type (what mental stage does it come from?)\u003c/li\u003e\n\u003cli\u003eper region, per data type, we take a band-power series\u003c/li\u003e\n\u003cli\u003ecalculate statistics per series\u003c/li\u003e\n\u003cli\u003eshove the results into something interpretable\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"conclusion\"\u003eConclusion\u003c/h2\u003e\n\u003cp\u003eN1 results performs the best across brain regions; where the data came from didn\u0026rsquo;t change performance by much.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhparkingson_s_classification_with_eeg/","tags":null,"title":"Parkingson's Classification with EEG"},{"categories":null,"contents":"PARRY is if ELIZA had mental states such as fear, anger, and mistrust. Mentions of various things in the user turn increases or decreases each variable\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhparry/\"\u003ePARRY\u003c/a\u003e is if \u003ca href=\"/posts/kbheliza/\"\u003eELIZA\u003c/a\u003e had mental states such as fear, anger, and mistrust. Mentions of various things in the user turn increases or decreases each variable\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhparry/","tags":null,"title":"PARRY"},{"categories":null,"contents":"Differential Equations in more than one independent variable:\n\\begin{equation} f(x_1, \\dots, x_{n}) \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equations\u003c/a\u003e in more than one independent variable:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x_1, \\dots, x_{n})\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpartial_differential_equations/","tags":null,"title":"Partial Differential Equation"},{"categories":null,"contents":"Partially Observable Markov Decision Process is a with .\nComponents:\nstates actions (given state) transition function (given state and actions) reward function Belief System beliefs observations observation model \\(O(o|a,s\u0026rsquo;)\\) As always we desire to find a \\(\\pi\\) such that we can:\n\\begin{equation} \\underset{\\pi \\in \\Pi}{\\text{maximize}}\\ \\mathbb{E} \\qty[ \\sum_{t=0}^{\\infty} \\gamma^{t} R(b_{t}, \\pi(b_{t}))] \\end{equation}\nwhereby our \\(\\pi\\) instead of taking in a state for input takes in a belief (over possible states) as input.\nobservation and states \u0026ldquo;where are we, and how sure are we about that?\u0026rdquo;\nbeliefs and filters\npolicy representations \u0026ldquo;how do we represent a policy\u0026rdquo;\na tree: conditional plan a graph: with utility: + just take the top action of the conditional plan the alpha-vector was computed from policy evaluations \u0026ldquo;how good is our policy / what\u0026rsquo;s the utility?\u0026rdquo;\nconditional plan evaluation policy solutions \u0026ldquo;how do we make that policy better?\u0026rdquo;\nexact solutions optimal value function for POMDP POMDP value-iteration approximate solutions estimate an , and then use a policy representation: upper-bounds for s lower-bounds for s online solutions Online POMDP Methods\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePartially Observable Markov Decision Process\u003c/a\u003e is a with .\u003c/p\u003e\n\u003cp\u003eComponents:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cul\u003e\n\u003cli\u003estates\u003c/li\u003e\n\u003cli\u003eactions (given state)\u003c/li\u003e\n\u003cli\u003etransition function (given state and actions)\u003c/li\u003e\n\u003cli\u003ereward function\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eBelief System\n\u003cul\u003e\n\u003cli\u003ebeliefs\u003c/li\u003e\n\u003cli\u003eobservations\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbelief/#observation-model\"\u003eobservation model\u003c/a\u003e \\(O(o|a,s\u0026rsquo;)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAs always we desire to find a \\(\\pi\\) such that we can:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\underset{\\pi \\in \\Pi}{\\text{maximize}}\\ \\mathbb{E} \\qty[ \\sum_{t=0}^{\\infty} \\gamma^{t} R(b_{t}, \\pi(b_{t}))]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby our \\(\\pi\\) instead of taking in a state for input takes in a \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e (over possible states) as input.\u003c/p\u003e\n\u003ch2 id=\"observation-and-states\"\u003eobservation and states\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;where are we, and how sure are we about that?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhbelief/\"\u003ebeliefs\u003c/a\u003e and \u003ca href=\"/posts/kbhfilters/\"\u003efilters\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"policy-representations\"\u003epolicy representations\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;how do we represent a policy\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ea tree: \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ea graph:\u003c/li\u003e\n\u003cli\u003ewith utility: +\u003c/li\u003e\n\u003cli\u003e\n\u003cul\u003e\n\u003cli\u003ejust take the top action of the conditional plan the alpha-vector was computed from\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"policy-evaluations\"\u003epolicy evaluations\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;how good is our policy / what\u0026rsquo;s the utility?\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhconditional_plan/#id-6f19368f-74b5-4606-a882-ec9bc5619873-conditional-plan-evaluation\"\u003econditional plan evaluation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"policy-solutions\"\u003epolicy solutions\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;how do we make that policy better?\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"exact-solutions\"\u003eexact solutions\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhconditional_plan/#id-9ccda204-0967-44c8-a801-c92d0df154b5-optimal-value-function-for-id-130d5294-0274-422b-b395-7d6f7f75be7d-pomdp\"\u003eoptimal value function for POMDP\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_iteration/#pomdp--kbhpartially-observable-markov-decision-process-dot-md--value-iteration\"\u003ePOMDP value-iteration\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"approximate-solutions\"\u003eapproximate solutions\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eestimate an , and then use a policy representation:\n\u003cul\u003e\n\u003cli\u003e\n\u003ch2 id=\"upper-bounds-for-s\"\u003eupper-bounds for s\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003ch2 id=\"lower-bounds-for-s\"\u003elower-bounds for s\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"online-solutions\"\u003eonline solutions\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhonline_pomdp_methods/\"\u003eOnline POMDP Methods\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpartially_observable_markov_decision_process/","tags":null,"title":"Partially Observable Markov Decision Process"},{"categories":null,"contents":"A markov game with State Uncertainty solved using POMDPs.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhmarkov_game/\"\u003emarkov game\u003c/a\u003e with \u003ca href=\"\"\u003eState Uncertainty\u003c/a\u003e solved using \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpartially_observable_markov_game/","tags":null,"title":"partially observable markov game"},{"categories":null,"contents":"DOI: 10.3389/fnagi.2020.605317\nOne-Liner An excercize scheme has had some measured effect on theta/alpha ratio and Brain wave frequency on AD patients; prognosis of AD not controlled for.\nNovelty Leveraged physical training scheme and measured EEG effects by quantifying theta/alpha ratio Notable Methods Used theta/alpha ratio as assay for improvement, and found the exercise scheme did so p\u0026lt;0.05 Only tested patients with AD w/o a control for stage Key Figs Figure 1 This figure tells us th N number of participants through the study\nFigure 2 This figure shows us that the excercize intervention has statistically significant results to both Brain Oscillation frequency and Theta/Alpha ratio. The x-axis shows us the pre-and-post bars for TG (treatment) and CG (control); the y-axis quantifies the value measured in a box plot. The subplots are brain oscelation and theta/alpha ratio respectively.\nNew Concepts theta/alpha ratio Notes ","html":"\u003cp\u003eDOI: 10.3389/fnagi.2020.605317\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eAn excercize scheme has had some measured effect on \u003ca href=\"/posts/kbhtheta_alpha_ratio/\"\u003etheta/alpha ratio\u003c/a\u003e and Brain wave frequency on AD patients; prognosis of AD not controlled for.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eLeveraged physical training scheme and measured EEG effects by quantifying \u003ca href=\"/posts/kbhtheta_alpha_ratio/\"\u003etheta/alpha ratio\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eUsed \u003ca href=\"/posts/kbhtheta_alpha_ratio/\"\u003etheta/alpha ratio\u003c/a\u003e as assay for improvement, and found the exercise scheme did so p\u0026lt;0.05\u003c/li\u003e\n\u003cli\u003eOnly tested patients with AD w/o a control for stage\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"figure-1\"\u003eFigure 1\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_11-13-14_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure tells us th N number of participants through the study\u003c/p\u003e\n\u003ch3 id=\"figure-2\"\u003eFigure 2\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_11-14-06_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure shows us that the excercize intervention has \u003ca href=\"/posts/kbhhypothesis_testing/#significance-level\"\u003estatistically significant\u003c/a\u003e results to both Brain Oscillation frequency and Theta/Alpha ratio. The x-axis shows us the pre-and-post bars for TG (treatment) and CG (control); the y-axis quantifies the value measured in a box plot. The subplots are brain oscelation and \u003ca href=\"/posts/kbhtheta_alpha_ratio/\"\u003etheta/alpha ratio\u003c/a\u003e respectively.\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtheta_alpha_ratio/\"\u003etheta/alpha ratio\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhparvin_2020/","tags":["ntj"],"title":"Parvin 2020"},{"categories":null,"contents":"Patient Scoring Systems How do we score the status of a patient? Well, we can begin by having a chart\u0026mdash;SpO2, can breath, etc. etc.\nDrawbacks:\nthese systems are quite generic not very representative of some information Method MIMIC-IV 6000 ICU patient stays, 48994 vital signs\u0026mdash;measuring across patient stays dynamic time warping to create a similar matrix clustering post-hoc to correlate patients together ","html":"\u003ch2 id=\"patient-scoring-systems\"\u003ePatient Scoring Systems\u003c/h2\u003e\n\u003cp\u003eHow do we score the status of a patient? Well, we can begin by having a chart\u0026mdash;SpO2, can breath, etc. etc.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eDrawbacks\u003c/strong\u003e:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ethese systems are quite generic\u003c/li\u003e\n\u003cli\u003enot very representative of some information\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"method\"\u003eMethod\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eMIMIC-IV 6000 ICU patient stays, 48994 vital signs\u0026mdash;measuring across patient stays\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003edynamic time warping to create a similar matrix\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eclustering post-hoc to correlate patients together\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpatient_risk_prediction/","tags":null,"title":"Patient Risk Prediction"},{"categories":null,"contents":" No Demo Day TODO Email need statement template Needfinding Not all patients want to be treated the same way Attitudes towards heathcare system Fostering strong interaction; facilitate interaction Problem: patients have attitudes that physicians can\u0026rsquo;t effectively communicate.\nAction item: interview doctors and patients\nNeed two need statement.\n","html":"\u003cul\u003e\n\u003cli\u003eNo Demo Day\u003c/li\u003e\n\u003cli\u003eTODO Email need statement template\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"needfinding\"\u003eNeedfinding\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eNot all patients want to be treated the same way\u003c/li\u003e\n\u003cli\u003eAttitudes towards heathcare system\u003c/li\u003e\n\u003cli\u003eFostering strong interaction; facilitate interaction\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eProblem: patients have \u003cstrong\u003eattitudes\u003c/strong\u003e that physicians can\u0026rsquo;t effectively communicate.\u003c/p\u003e\n\u003cp\u003eAction item: interview doctors \u003cstrong\u003e\u003cstrong\u003eand\u003c/strong\u003e\u003c/strong\u003e patients\u003c/p\u003e\n\u003cp\u003eNeed two need statement.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpcp_april_checkin/","tags":null,"title":"PCP April Checkin"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpeft/","tags":null,"title":"PEFT"},{"categories":null,"contents":"Memoryless policy search through fake determinism.\nuses a deterministic simulative function to calculate the value performs policy search by using normal standard optimizations Primary contribution: transforming stochastic POMDP to a deterministic simulative function; foregos alpha vectors.\nSuppose you have \\(m\\) initial states that you sampled, you can then just try to get the set of acions that maximize:\n\\begin{equation} \\arg\\max_{\\theta} \\tilde{V} = \\frac{1}{m} \\sum_{n}^{m} V_{\\theta}(s_{m}) \\end{equation}\nTo actually ensure that \\(V\\) has deterministic transitions\u0026hellip;\ndeterministic simulative function Typically, a generative model takes random actions from the action distribution. However, what we do is have a simulator which takes a RANDOM NUMBER as INPUT, and also the action distribution, and DETERMINISTICALLY give an action.\nPegasus procedure We augment the state:\n\\begin{equation} s \\in (S, \\mathbb{R}^{[0,1]}, \\mathbb{R}^{[0,1]}, \\dots) \\end{equation}\nmeaning every state is a state against a series of random numbers between \\(0\\) and \\(1\\):\n\\begin{equation} (s, 0.91, 0.22, \\dots) \\end{equation}\nat every transition, we eat up one of the random numbers to use, and take an action, and use those in our deterministic simulative function to obtain our next state.\ndeterminism The idea is that if we have sampled enough initial states, the correct action trajectory which maximizes the deterministic \\(\\tilde{V}\\) will also maximize that for the real \\(V\\).\n","html":"\u003cp\u003eMemoryless policy search through fake determinism.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003euses a \u003ca href=\"#deterministic-simulative-function\"\u003edeterministic simulative function\u003c/a\u003e to calculate the value\u003c/li\u003e\n\u003cli\u003eperforms policy search by using normal standard optimizations\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ePrimary contribution: transforming \u003cstrong\u003estochastic\u003c/strong\u003e \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e to a \u003ca href=\"#deterministic-simulative-function\"\u003edeterministic simulative function\u003c/a\u003e; foregos \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eSuppose you have \\(m\\) initial states that you sampled, you can then just try to get the set of acions that maximize:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\arg\\max_{\\theta} \\tilde{V} = \\frac{1}{m} \\sum_{n}^{m} V_{\\theta}(s_{m})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo actually ensure that \\(V\\) has deterministic transitions\u0026hellip;\u003c/p\u003e\n\u003ch2 id=\"deterministic-simulative-function\"\u003edeterministic simulative function\u003c/h2\u003e\n\u003cp\u003eTypically, a \u003ca href=\"/posts/kbhonline_planning/#generative-model\"\u003egenerative model\u003c/a\u003e takes random actions from the action distribution. However, what we do is have a simulator which takes a \u003cstrong\u003eRANDOM NUMBER\u003c/strong\u003e as \u003cstrong\u003eINPUT\u003c/strong\u003e, and also the action distribution, and \u003cstrong\u003eDETERMINISTICALLY\u003c/strong\u003e give an action.\u003c/p\u003e\n\u003ch2 id=\"pegasus--kbhpegasus-dot-md--procedure\"\u003e\u003ca href=\"/posts/kbhpegasus/\"\u003ePegasus\u003c/a\u003e procedure\u003c/h2\u003e\n\u003cp\u003eWe augment the state:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ns \\in (S, \\mathbb{R}^{[0,1]}, \\mathbb{R}^{[0,1]}, \\dots)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning every state is a state against a series of random numbers between \\(0\\) and \\(1\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(s, 0.91, 0.22, \\dots)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eat every transition, we eat up one of the random numbers to use, and take an action, and use those in our \u003ca href=\"#deterministic-simulative-function\"\u003edeterministic simulative function\u003c/a\u003e to obtain our next state.\u003c/p\u003e\n\u003ch2 id=\"determinism\"\u003edeterminism\u003c/h2\u003e\n\u003cp\u003eThe idea is that if we have sampled enough initial states, the correct action trajectory which maximizes the deterministic \\(\\tilde{V}\\) will also maximize that for the real \\(V\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpegasus/","tags":null,"title":"Pegasus"},{"categories":null,"contents":"permits model is a counter for which there is \\(n\\) threads can do a task. For instance, there is \\(n\\) permits; each time it is requested, it needs to be subtracted.\nIdeally, we do this without busy waiting (while loops with lock and unlocks). So:\ncondition variable you can call wait on a condition variable, which will block until another thread calls notify_all.\nidentify a single event to wait/notify ensure that there is something to check to represent the event create a condition variable and share it identify who is the notifier, call notify_all when appropriate identify who will wait, and wait until condition variable triggers condition_variable_any permitsCV; // ... thread(ref(permitsCV)) Identify the ISOLATED event to notify: for instance, whenever permit goes from 0=\u0026gt;1, you notify. But, when permits go from 1=\u0026gt;2, there really isn\u0026rsquo;t really a need to notify. If you gave wait an unlocked lock, you UB.\nBut, implementing this is a little tricky: before sleeping on the condition variable, we have to release the underlying lock, but then values are not guaranteed after you unlock. So, the actual implementation:\npermits.lock(); while (permits == 0) { permitsCV.wait(permitsLock); } permits--; permitsLock.unlock(); the condition variable will\u0026hellip;\nstart sleeping FIRST unlock a lock FOR US AFTER the sleeping starts after waiting ends, tries to reaquire lock blocks until we have the lock again this ensures that you don\u0026rsquo;t have to give up the lock before sleeping.\nWe need a \u0026ldquo;while\u0026rdquo; loop here to CHECK whether or not, after sleeping is over, our locked variable needs to be checked again just in case another thread took it: just because we woke up it doesn\u0026rsquo;t mean the condition is true forever.\nWe also need a \u0026ldquo;while\u0026rdquo; loop because condition variables will send false wakeup signal, so we need to check the condition to be extra sure.\nso CALL CONDITION VARIABLES IN A WHILE LOOP.\nimplementation similar to mutexs\nwait should autonomically put the thread to sleep + unlock the given lock when the thread wakes up, it should reacquire the lock + return notify one/all notify_one: should wake up + unblock first waiting thread notify_all: should wake up/unblock all waiting threads if no one is waiting, do nothing.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpermits_model/\"\u003epermits model\u003c/a\u003e is a counter for which there is \\(n\\) threads can do a task. For instance, there is \\(n\\) permits; each time it is requested, it needs to be subtracted.\u003c/p\u003e\n\u003cp\u003eIdeally, we do this without busy waiting (while loops with lock and unlocks). So:\u003c/p\u003e\n\u003ch2 id=\"condition-variable\"\u003econdition variable\u003c/h2\u003e\n\u003cp\u003eyou can call \u003cstrong\u003ewait\u003c/strong\u003e on a condition variable, which will block until another thread calls \u003cstrong\u003enotify_all\u003c/strong\u003e.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eidentify a single event to wait/notify\u003c/li\u003e\n\u003cli\u003eensure that there is something to check to represent the event\u003c/li\u003e\n\u003cli\u003ecreate a condition variable and share it\u003c/li\u003e\n\u003cli\u003eidentify who is the notifier, call \u003cstrong\u003enotify_all\u003c/strong\u003e when appropriate\u003c/li\u003e\n\u003cli\u003eidentify who will wait, and \u003cstrong\u003ewait\u003c/strong\u003e until condition variable triggers\u003c/li\u003e\n\u003c/ol\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econdition_variable_any\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epermitsCV\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// ...\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eref\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epermitsCV\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eIdentify the \u003cstrong\u003eISOLATED event\u003c/strong\u003e to notify: for instance, whenever permit goes from 0=\u0026gt;1, you notify. But, when permits go from 1=\u0026gt;2, there really isn\u0026rsquo;t really a need to notify. If you gave wait an unlocked lock, you UB.\u003c/p\u003e\n\u003cp\u003eBut, implementing this is a little tricky: before sleeping on the condition variable, we have to release the underlying lock, but then values are not guaranteed after you unlock. So, the actual implementation:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epermits\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epermits\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003epermitsCV\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewait\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epermitsLock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epermits\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e--\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epermitsLock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eunlock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ethe condition variable will\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003estart sleeping \u003cstrong\u003e\u003cstrong\u003eFIRST\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eunlock a lock FOR US \u003cstrong\u003e\u003cstrong\u003eAFTER\u003c/strong\u003e\u003c/strong\u003e the sleeping starts\u003c/li\u003e\n\u003cli\u003eafter waiting ends, tries to reaquire lock\u003c/li\u003e\n\u003cli\u003eblocks until we have the lock again\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ethis ensures that you don\u0026rsquo;t have to give up the lock before sleeping.\u003c/p\u003e\n\u003cp\u003eWe need a \u0026ldquo;while\u0026rdquo; loop here to CHECK whether or not, after sleeping is over, our locked variable needs to be checked again just in case another thread took it: \u003cstrong\u003ejust because we woke up it doesn\u0026rsquo;t mean the condition is true forever\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eWe also need a \u0026ldquo;while\u0026rdquo; loop because condition variables will send \u003cstrong\u003efalse\u003c/strong\u003e wakeup signal, so we need to check the condition to be extra sure.\u003c/p\u003e\n\u003cp\u003eso \u003cstrong\u003e\u003cstrong\u003eCALL CONDITION VARIABLES IN A WHILE LOOP\u003c/strong\u003e\u003c/strong\u003e.\u003c/p\u003e\n\u003ch3 id=\"implementation\"\u003eimplementation\u003c/h3\u003e\n\u003cp\u003esimilar to \u003ca href=\"/posts/kbhmultithreading/#mutex\"\u003emutex\u003c/a\u003es\u003c/p\u003e\n\u003ch4 id=\"wait\"\u003ewait\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003eshould autonomically put the thread to sleep + unlock the given lock\u003c/li\u003e\n\u003cli\u003ewhen the thread wakes up, it should reacquire the lock + return\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"notify-one-all\"\u003enotify one/all\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003enotify_one: should wake up + unblock first waiting thread\u003c/li\u003e\n\u003cli\u003enotify_all: should wake up/unblock all waiting threads\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eif no one is waiting, do nothing.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpermits_model/","tags":null,"title":"permits model"},{"categories":null,"contents":"permittivity of free space is a constant \\(\\epsilon_{0} \\approx 8.85 \\times 10^{-12} \\frac{C^{2}}{N \\cdot m^{2}}\\).\nredefinition of Coulomb\u0026rsquo;s Constant based on permittivity of free space \\begin{equation} k = \\frac{1}{4\\pi \\epsilon_{0}} \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpermittivity_of_free_space/\"\u003epermittivity of free space\u003c/a\u003e is a constant \\(\\epsilon_{0} \\approx 8.85 \\times 10^{-12} \\frac{C^{2}}{N \\cdot m^{2}}\\).\u003c/p\u003e\n\u003ch2 id=\"redefinition-of-coulomb-s-constant--kbhcoulomb-s-law-dot-md--based-on-permittivity-of-free-space--kbhpermittivity-of-free-space-dot-md\"\u003eredefinition of \u003ca href=\"/posts/kbhcoulomb_s_law/\"\u003eCoulomb\u0026rsquo;s Constant\u003c/a\u003e based on \u003ca href=\"/posts/kbhpermittivity_of_free_space/\"\u003epermittivity of free space\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nk = \\frac{1}{4\\pi \\epsilon_{0}}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpermittivity_of_free_space/","tags":null,"title":"permittivity of free space"},{"categories":null,"contents":"A permutation \\(\\pi\\) of some \\(\\{1,2,\u0026hellip;, n\\}\\) is a rearrangement of this list. There are \\(n!\\) different permutations of this set.\nA permutation is an ORDERED arrangement of objects.\npermutation with indistinct objects What if you want to order a set with sub-set of indistinct objects? Like, for instance, how many ways are there to order:\n\\begin{equation} 10100 \\end{equation}\nFor every permutation of \\(1\\) in this set, there are two copies being overcounted.\nLet there are \\(n\\) objects. \\(n_1\\) objects are the indistinct, \\(n_2\\) objects are indistinct, \u0026hellip; \\(n_{r}\\) objects are the same. The number of permutations are:\n\\begin{equation} \\frac{n!}{{n_1}!{n_2}! \\dots {n_r}!} \\end{equation}\nYou can use iterators to give you permutations.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhpermutation/\"\u003epermutation\u003c/a\u003e \\(\\pi\\) of some \\(\\{1,2,\u0026hellip;, n\\}\\) is a rearrangement of this list. There are \\(n!\\) different permutations of this set.\u003c/p\u003e\n\u003cp\u003eA \u003ca href=\"/posts/kbhpermutation/\"\u003epermutation\u003c/a\u003e is an \u003cstrong\u003eORDERED\u003c/strong\u003e arrangement of objects.\u003c/p\u003e\n\u003ch2 id=\"permutation-with-indistinct-objects\"\u003epermutation with indistinct objects\u003c/h2\u003e\n\u003cp\u003eWhat if you want to order a set with sub-set of indistinct objects? Like, for instance, how many ways are there to order:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n10100\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor every permutation of \\(1\\) in this set, there are two copies being overcounted.\u003c/p\u003e\n\u003cp\u003eLet there are \\(n\\) objects. \\(n_1\\) objects are the indistinct, \\(n_2\\) objects are indistinct, \u0026hellip; \\(n_{r}\\) objects are the same. The number of permutations are:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{n!}{{n_1}!{n_2}! \\dots {n_r}!}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou can use iterators to give you permutations.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpermutation/","tags":null,"title":"permutation"},{"categories":null,"contents":"perplexity is a measure of a language model\u0026rsquo;s ability to predict words.\nIntuition A good language model should prefer \u0026ldquo;real\u0026rdquo; or otherwise \u0026ldquo;frequently observed\u0026rdquo; sentences. That is, it should assign lower probability to word salad.\nSo a good language model should assign a higher probability to the next word that actually occurs given a sequence of words.\nGenerally, we want the LM to assign high probability to the entire test set. However, big issue is that probability gets smaller by length of the text.\nTo address this, we normalize by number of words.\nExpression \\begin{equation} PP (W) = P(w_1 w_2 \\dots w_{n})^{-\\frac{1}{N}} \\end{equation}\nSpecifically,\n\\begin{equation} PP (W) = N \\sqrt{\\frac{1}{P(w_1, \\dots w_{n)}}} \\end{equation}\nNotably, perplexity is inverse of probability. We want the lowest entropy possible, i.e. the highest likelihood possible. Therefore, the range of perplexity is \\([1, \\infty]\\). We therefore want to minimize perplexity.\nBranching Factor perplexity could be also considered the \u0026ldquo;weighted average Branching Factor\u0026rdquo; of the language. That is, the average number of possible next words given each of the words.\nThe Branching Factor is the set of possible next words that can follow a given word.\nSampling Conditioned upon previous words or current n-gram, sample from the next possible word in the distribution.\nMeaning, we sample from the distribution of n-grams whose n-1 characters are known.\nSparsity Out-of-sample ngrams will never be counted, no matter how truly likely.\nAlso, it causes perplexity problems: because you can\u0026rsquo;t divide by 0, perplexity assumes that any sequence of words should have non zero likeliness.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhperplexity/\"\u003eperplexity\u003c/a\u003e is a measure of a language model\u0026rsquo;s ability to predict words.\u003c/p\u003e\n\u003ch2 id=\"intuition\"\u003eIntuition\u003c/h2\u003e\n\u003cp\u003eA good language model should prefer \u0026ldquo;real\u0026rdquo; or otherwise \u0026ldquo;frequently observed\u0026rdquo; sentences. That is, it should assign lower probability to word salad.\u003c/p\u003e\n\u003cp\u003eSo a good language model should assign a higher probability to the next word that actually occurs given a sequence of words.\u003c/p\u003e\n\u003cp\u003eGenerally, we want the LM to assign high probability to the entire test set. However, big issue is that \u003cstrong\u003eprobability gets smaller by length of the text\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eTo address this, we normalize by number of words.\u003c/p\u003e\n\u003ch2 id=\"expression\"\u003eExpression\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nPP (W) = P(w_1 w_2 \\dots w_{n})^{-\\frac{1}{N}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSpecifically,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nPP (W) = N \\sqrt{\\frac{1}{P(w_1, \\dots w_{n)}}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNotably, perplexity is inverse of probability. We want the lowest entropy possible, i.e. the highest likelihood possible. Therefore, the range of perplexity is \\([1, \\infty]\\). We therefore want to \u003cstrong\u003eminimize perplexity\u003c/strong\u003e.\u003c/p\u003e\n\u003ch2 id=\"branching-factor\"\u003eBranching Factor\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhperplexity/\"\u003eperplexity\u003c/a\u003e could be also considered the \u0026ldquo;weighted average \u003ca href=\"#branching-factor\"\u003eBranching Factor\u003c/a\u003e\u0026rdquo; of the language. That is, the average number of possible next words given each of the words.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"#branching-factor\"\u003eBranching Factor\u003c/a\u003e is the set of possible next words that can follow a given word.\u003c/p\u003e\n\u003ch2 id=\"sampling\"\u003eSampling\u003c/h2\u003e\n\u003cp\u003eConditioned upon previous words or current n-gram, sample from the next possible word in the distribution.\u003c/p\u003e\n\u003cp\u003eMeaning, we sample from the distribution of n-grams whose n-1 characters are known.\u003c/p\u003e\n\u003ch3 id=\"sparsity\"\u003eSparsity\u003c/h3\u003e\n\u003cp\u003eOut-of-sample ngrams will never be counted, no matter how truly likely.\u003c/p\u003e\n\u003cp\u003eAlso, it causes perplexity problems: because you can\u0026rsquo;t divide by 0, perplexity assumes that any sequence of words should have non zero likeliness.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhperplexity/","tags":null,"title":"perplexity"},{"categories":null,"contents":"PET is a type of plastic.\n","html":"\u003cp\u003ePET is a type of plastic.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpet/","tags":null,"title":"PET"},{"categories":null,"contents":"Consider a family of bacterial:\n\\begin{equation} P\u0026rsquo; = 2P \\end{equation}\nthis is a normal exponential growth situation. However, we know this isn\u0026rsquo;t true. Because the nutrients in the petri dish has a finite amount of nutrients. Hopefully this rule succeeds when the population is small, and should stop when the growth is bounded.\nFor instance, say you can never have more than 100 bacteria:\n\\begin{equation} P\u0026rsquo; = 2P(100-P) \\end{equation}\nSee logistic equation for solution\n","html":"\u003cp\u003eConsider a family of bacterial:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP\u0026rsquo; = 2P\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is a normal exponential growth situation. However, we know this isn\u0026rsquo;t true. Because the nutrients in the petri dish has a finite amount of nutrients. Hopefully this rule succeeds when the population is small, and should stop when the growth is bounded.\u003c/p\u003e\n\u003cp\u003eFor instance, say you can never have more than 100 bacteria:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP\u0026rsquo; = 2P(100-P)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhlogistic_equations/\"\u003elogistic equation\u003c/a\u003e for solution\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpetri_dish/","tags":null,"title":"petri dish"},{"categories":null,"contents":"PGA extends controller gradient ascent to cover CPOMDPs\nNotation Recall from controller gradient ascent we have an objective which we will modify for CPOMDPs. For initial controller-states \\(\\beta\\) and utility \\(\\bold{u}_{\\theta}\\):\n\\begin{equation} \\max_{\\theta}\\ \\beta^{\\top} (\\bold{I} - \\gamma \\bold{T}_{\\theta})^{-1} \\bold{r}_{\\theta} \\end{equation}\nsubject to:\n\\(\\Psi\\) remains a probably distribution over \\(|A|\\) \\(\\eta\\) remains a probably distribution over \\(|X|\\) and, new for CPOMDP, \\(\\beta^{\\top} (\\bold{I} - \\gamma \\bold{T}_{\\theta})^{-1} C_{i} \\leq \\epsilon_{i}\\ \\forall i\\), that is, each constraint \\(C_{i} \\in \\bold{C}_{i}\\) is satisfied to be lower than the budget \\(\\epsilon_{i}\\). where\n\\begin{equation} T_{\\theta}((x,s), (x\u0026rsquo;,s\u0026rsquo;)) = \\sum_{a} \\Psi(a | x) T(s\u0026rsquo;|s,a) \\sum_{o} O (o|a,s\u0026rsquo;) \\eta (x\u0026rsquo;|x,a,o) \\end{equation}\nand\n\\begin{equation} R_{\\theta}((x, s)) = \\sum_{a} \\Psi(a|x) R(s,a) \\end{equation}\nin which\n\\(X\\): a set of nodes (hidden, internal states) \\(\\Psi(a|x)\\): probability of taking an action \\(\\eta(x\u0026rsquo;|x,a,o)\\) : transition probability between hidden states Optimization Formulation we formulate policy parameters \\(\\theta\\) as a large stacked vector of the shape:\n\\begin{equation} \\theta = \\mqty(\\Psi(a_0 | x_0) \\\\ \\Psi(a_1 | x_0) \\\\ \\dots \\\\ \\Psi(a_n | x_N) \\\\ \\eta(x_0 | x_0, a_0, o_0) \\\\ \\eta(x_1 | x_0, a_0, o_0) \\\\ \\dots \\\\ \\eta(x_N | x_N, a_n, o_f)) \\end{equation}\nLet us define block diagonal matricies \\(J_{\\Psi}\\) and \\(J_{\\eta}\\), whereby:\n\\begin{equation} J_{\\Psi} = \\mqty(\\bold{1}_{\\Psi}^{\\top} \u0026amp; \\dots \u0026amp; \\bold{0} \\\\ \u0026amp; \\dots \u0026amp; \\\\ \\bold{0} \u0026amp; \\dots \u0026amp;\\bold{1}_{\\Psi}^{\\top} ) \\end{equation}\nwhere \\(J_{\\Psi} \\in \\mathbb{R}^{|X| \\times (|A| \\times |X|)}\\) and each block part \\(\\bold{1}_{\\Psi} \\in \\mathbb{R}^{|A|}\\) represents a one-vector of length of the action space. You can see how multiplying a vector \\(\\qty[\\Psi(a_0|x_0) \\\\ \\dots \\\\ \\Psi(a_n|x_N)]\\) against this matrix should yield a \\(1\\) vector if each \\(\\Psi(\\cdot | x_{i})\\) is a probability distribution.\nSimilar, we define, \\(J_{\\eta}\\) in a similar fashion to add the distributions over each \\(\\eta(\\cdot | x, a, o)\\).\nThis yields another block-block matrix\n\\begin{equation} J = \\mqty(J_{\\Psi} \u0026amp; 0 \\\\ 0 \u0026amp; J_{\\eta}) \\end{equation}\nfor which we desire \\(J\\theta = 1\\) in order to verify that the probability distributions are valid.\nLastly, let us define \\(\\bold{Z} = (\\bold{I} - \\gamma \\bold{T}_{\\theta})\\). For ease of notation in constructing this result, we declare:\n\\begin{equation} f(\\theta) = \\beta^{\\top} \\bold{Z}^{-1} \\bold{r}_{\\theta} \\end{equation}\nand\n\\begin{equation} h_{i}(\\theta) = \\beta^{\\top} \\bold{Z}^{-1} C_{i} \\end{equation}\nFinally, this allows us to formulate the problem as a nonlinear optimization problem:\n\\begin{align} \\max_{\\theta}\\ \u0026amp;f(\\theta) \\\\ \\text{such that}\\ \u0026amp;J\\theta = 1 \\\\ \u0026amp; \\theta \\geq 0 \\\\ \u0026amp; h_{i}(\\theta) \\leq \\epsilon_{i},\\ \\forall i \\end{align}\nGradient Ascent Procedure Note that the initial state information \\(\\beta\\) is constant. Therefore, the gradient of the top expression against each field in \\(\\theta\\) becomes, via an rearrangement of the chain rule:\n\\begin{equation} \\pdv{f(\\theta)}{\\theta_{i}} = \\beta^{\\top} \\qty[\\bold{Z}^{-1} \\qty( \\pdv{\\bold{r}_{\\theta}}{\\theta_{i}} + \\pdv{\\bold{Z}}{\\theta_{i}} \\bold{Z}^{-1}\\bold{r}_{\\theta})] \\end{equation}\nThe derivatives of each \\(\\theta\\) against each \\(\\bold{r}\\) and \\(\\bold{Z}\\) is given on pp 485 of Alg4DM.\nAs with all gradient ascent cases, each \u0026ldquo;step\u0026rdquo; takes the rough form of\n\\begin{equation} \\xi = \\theta + \\alpha \\nabla_{\\theta} f(\\theta) \\end{equation}\nhowever, in this implementation, the step size isn\u0026rsquo;t actually fixed. Instead, we do\u0026hellip;\nGolden Section Line Search Instead of taking fixed-step sizes to get to the maxima, PGA proposes Golden Section Line Search as a line-search algorithm to dynamically choose the steps to get to maxima.\nLine search algorithms are typically computationally heavy as it requires evaluating the relative utility (i.e. here \\(\\bold{Z}^{-1} \\bold{r}_{\\theta}\\)) a lot of times, which is computationally intractable.\nSo, Golden Section Line Search uses a divide-and-conquer method via the golden ratio to address this issue.\ndef optimize!(CPOMDP, phi=golden_ratio, gamma=discount_factor, eps=minimum_boundary): # C-POMDP Spec f = CPOMDP.objective_function # this is f(theta) T = CPOMDP.transition_matrix R = CPOMDP.reward_vector b = CPOMDP.initial_state_vector # as obtained above nabla = f(theta).grad(theta) # initialize the search bounds based on splitting # search space (full step, no step) via the golden ratio a1, a2, a3, a4 = 0, (1-(1/phi)), (1/phi), 1 # calculate new policies and their utilities theta2, theta3 = theta + a2*nabla, theta + a3*nabla z2, z3 = (I-gamma*T@theta2).inverse(), (I-gamma*T@theta3).inverse() # search until the middle bounds converged while (a4-a1) \u0026lt; eps*(abs(a2) + abs(a3)): # calculate utility vectors over belief u2, u3 = z2@R, z3@R # either relax top or bottom bounds, depending on # which one we had been successfully maximizing if b.dot(u3) \u0026gt;= b.dot(u2): # search \u0026#34;upwards\u0026#34;, set bottom bound to a3 a1, a2, a3 = a2, a3, a2+(1/phi)*(a4-a2) theta3, theta2 = theta + a3*nabla, theta3 z3, z2 = (I-gamma*T@theta2).inverse(), z3 else: # search \u0026#34;downwards\u0026#34; a1, a3, a2 = a2, a2, a3-(1/phi)*(a3-a1) theta2, theta3 = theta + a2*nabla, theta2 z2, z3 = (I-gamma*T@theta2).inverse(), z2 # return the average of our converged results return 0.5*(theta2+theta3), 0.5*(u2+u3) Naive Projection Once we obtain a new set of parameters \\(\\xi\\) from Golden Section Line Search, we can\u0026rsquo;t actually directly punch it into \\(\\theta\\). This is because it is likely not going to satisfy the constraints that are given.\nWe can fix this naively with a non-linear programming formulation; that is, we desire to find the closest \\(\\theta\\) to the computed value \\(\\xi\\); we do this by minimizing a L2 norm (sum of squared errors):\n\\begin{align} \\min_{\\theta}\\ \u0026amp; \\frac{1}{2} \\| \\xi - \\theta \\|^{2}_{2} \\\\ \\text{such that}\\ \u0026amp;J\\theta = 1 \\\\ \u0026amp; \\theta \\geq 0 \\\\ \u0026amp; h_{i}(\\theta) \\leq \\epsilon_{i},\\ \\forall i \\end{align}\nThis, for the most part, is computationally intractable and needs to be computed through each iteration. This is especially bad for the \\(h_{i}\\) for all \\(i\\) part. And so, instead of doing this, we formulate instead an approximate proxy objective.\nApproximate Projection The thing that makes the objective above hard is that \\(h_{i}\\) doesn\u0026rsquo;t have nice convex properties. To fix this, we perform a local linearizion of \\(h_{i}\\).\nSpecifically, let\u0026rsquo;s replace \\(h_{i}\\) with its local Taylor expansion.\nFor some step where we started at \\(\\theta_{k}\\), if you wanted to evaluate some next step \\(\\theta_{k+1}\\) from that step, we can write:\n\\begin{equation} h_{i}(\\theta_{k+1}) \\approx h_{i}(\\theta_{k}) + (\\nabla_{\\theta}(\\theta_{0}))(\\theta_{k+1}-\\theta_{k}) \\end{equation}\nUsing this linear decomposition of three parts (i.e. parameter difference from original, the gradient of \\(h\\) against the parameter, and the original value of \\(h\\)), we can now split the \\(h_{i}(\\theta)\\) constraint of the non-linear program into a linear decomposition.\nLet\u0026rsquo;s define:\n\\begin{equation} \\nabla_{\\theta} \\bold{h}(\\theta) = \\mqty[ (\\nabla_{\\theta} h_{1}(\\theta))^{\\top} \\\\ \\dots \\\\ (\\nabla_{\\theta} h_{m}(\\theta))^{\\top}] \\end{equation}\nFrom which we write block matriix\n\\begin{equation} \\bold{A} = \\mqty[-\\bold{I}_{n} \\\\ \\nabla_{\\theta}\\bold{h}(\\theta_{k})] \\end{equation}\nwhere \\(\\bold{I}_{n} \\in \\mathbb{R}^{(|A| + |X|) \\times (|A| + |X|)}\\), and vector:\n\\begin{equation} \\bold{b} = \\mqty[\\bold{0}_{n} \\\\ \\epsilon - \\bold{h}(\\theta_{k}) + \\nabla\\bold{h}(\\theta_{k})\\theta_{k}] \\end{equation}\nThese definitions allow us to rewrite two of our objectives:\n\\begin{equation} \\begin{cases} \\theta \\geq 0 \\\\ h_{i}(\\theta) \\leq \\epsilon_{i},\\ \\forall i \\end{cases} \\end{equation}\nturning them instead into simply \\(\\bold{A}\\theta \\leq \\bold{b}\\). The top half of \\(\\bold{A}\\), \\(\\bold{B}\\) is responsible for making sure that all elements of \\(\\theta\\) is positive (specifically, to ensure the negative of the values is smaller than 0); the bottom half ensures that all of them satisfy the cost.\nThese definitions result in a linear formulation of two objectives of our original non-linear program:\n\\begin{align} \\min_{\\theta}\\ \u0026amp; \\frac{1}{2} \\| \\xi - \\theta \\|^{2}_{2} \\\\ \\text{such that}\\ \u0026amp;J\\theta = 1 \\\\ \u0026amp; \\bold{A}\\theta \\leq \\bold{B} \\end{align}\nand we are done.\nQuick Tip Recall that we have to calculate the inverse of \\(\\bold{Z}\\) quite a lot throughout the computation of \\(h\\) and \\(f\\). For each policy parameter \\(\\theta\\), you can cache the value of \\(\\bold{Z}\\), L-U (upper-triangular/lower-triangular factored) and recombine them/invert them as needed to speed up computation. This ensures that you only calculate \\(\\bold{Z}\\) once per step.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpga/\"\u003ePGA\u003c/a\u003e extends \u003ca href=\"/posts/kbhcontroller_gradient_ascent/\"\u003econtroller gradient ascent\u003c/a\u003e to cover \u003ca href=\"/posts/kbhcpomdp/\"\u003eCPOMDPs\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003cp\u003eRecall from \u003ca href=\"/posts/kbhcontroller_gradient_ascent/\"\u003econtroller gradient ascent\u003c/a\u003e we have an objective which we will modify for \u003ca href=\"/posts/kbhcpomdp/\"\u003eCPOMDP\u003c/a\u003es. For initial controller-states \\(\\beta\\) and utility \\(\\bold{u}_{\\theta}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\max_{\\theta}\\ \\beta^{\\top} (\\bold{I} - \\gamma \\bold{T}_{\\theta})^{-1} \\bold{r}_{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003esubject to:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\Psi\\) remains a probably distribution over \\(|A|\\)\u003c/li\u003e\n\u003cli\u003e\\(\\eta\\) remains a probably distribution over \\(|X|\\)\u003c/li\u003e\n\u003cli\u003eand, new for \u003ca href=\"/posts/kbhcpomdp/\"\u003eCPOMDP\u003c/a\u003e, \\(\\beta^{\\top} (\\bold{I} - \\gamma \\bold{T}_{\\theta})^{-1} C_{i} \\leq \\epsilon_{i}\\ \\forall i\\), that is, each constraint \\(C_{i} \\in \\bold{C}_{i}\\) is satisfied to be lower than the budget \\(\\epsilon_{i}\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewhere\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT_{\\theta}((x,s), (x\u0026rsquo;,s\u0026rsquo;)) = \\sum_{a} \\Psi(a | x) T(s\u0026rsquo;|s,a) \\sum_{o} O (o|a,s\u0026rsquo;) \\eta (x\u0026rsquo;|x,a,o)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR_{\\theta}((x, s)) = \\sum_{a} \\Psi(a|x) R(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ein which\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X\\): a set of nodes (hidden, internal states)\u003c/li\u003e\n\u003cli\u003e\\(\\Psi(a|x)\\): probability of taking an action\u003c/li\u003e\n\u003cli\u003e\\(\\eta(x\u0026rsquo;|x,a,o)\\) : transition probability between hidden states\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"optimization-formulation\"\u003eOptimization Formulation\u003c/h2\u003e\n\u003cp\u003ewe formulate policy parameters \\(\\theta\\) as a large stacked vector of the shape:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta = \\mqty(\\Psi(a_0 | x_0) \\\\ \\Psi(a_1 | x_0) \\\\ \\dots \\\\ \\Psi(a_n | x_N) \\\\ \\eta(x_0 | x_0, a_0, o_0) \\\\ \\eta(x_1 | x_0, a_0, o_0) \\\\ \\dots \\\\ \\eta(x_N | x_N, a_n, o_f))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet us define block diagonal matricies \\(J_{\\Psi}\\) and \\(J_{\\eta}\\), whereby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nJ_{\\Psi} = \\mqty(\\bold{1}_{\\Psi}^{\\top} \u0026amp; \\dots \u0026amp; \\bold{0} \\\\ \u0026amp; \\dots \u0026amp; \\\\ \\bold{0} \u0026amp; \\dots \u0026amp;\\bold{1}_{\\Psi}^{\\top} )\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(J_{\\Psi} \\in \\mathbb{R}^{|X| \\times (|A| \\times |X|)}\\) and each block part \\(\\bold{1}_{\\Psi} \\in \\mathbb{R}^{|A|}\\) represents a one-vector of length of the action space. You can see how multiplying a vector \\(\\qty[\\Psi(a_0|x_0) \\\\ \\dots \\\\ \\Psi(a_n|x_N)]\\) against this matrix should yield a \\(1\\) vector if each \\(\\Psi(\\cdot | x_{i})\\) is a probability distribution.\u003c/p\u003e\n\u003cp\u003eSimilar, we define, \\(J_{\\eta}\\) in a similar fashion to add the distributions over each \\(\\eta(\\cdot | x, a, o)\\).\u003c/p\u003e\n\u003cp\u003eThis yields another block-block matrix\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nJ = \\mqty(J_{\\Psi} \u0026amp; 0 \\\\ 0 \u0026amp; J_{\\eta})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor which we desire \\(J\\theta = 1\\) in order to verify that the probability distributions are valid.\u003c/p\u003e\n\u003cp\u003eLastly, let us define \\(\\bold{Z} = (\\bold{I} - \\gamma \\bold{T}_{\\theta})\\). For ease of notation in constructing this result, we declare:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(\\theta) = \\beta^{\\top} \\bold{Z}^{-1} \\bold{r}_{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nh_{i}(\\theta) = \\beta^{\\top} \\bold{Z}^{-1} C_{i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, this allows us to formulate the problem as a nonlinear optimization problem:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\max_{\\theta}\\ \u0026amp;f(\\theta) \\\\\n\\text{such that}\\ \u0026amp;J\\theta = 1 \\\\\n\u0026amp; \\theta \\geq 0 \\\\\n\u0026amp; h_{i}(\\theta) \\leq \\epsilon_{i},\\ \\forall i\n\\end{align}\u003c/p\u003e\n\u003ch2 id=\"gradient-ascent-procedure\"\u003eGradient Ascent Procedure\u003c/h2\u003e\n\u003cp\u003eNote that the initial state information \\(\\beta\\) is constant. Therefore, the gradient of the top expression against each field in \\(\\theta\\) becomes, via an rearrangement of the chain rule:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{f(\\theta)}{\\theta_{i}} = \\beta^{\\top} \\qty[\\bold{Z}^{-1} \\qty( \\pdv{\\bold{r}_{\\theta}}{\\theta_{i}} + \\pdv{\\bold{Z}}{\\theta_{i}} \\bold{Z}^{-1}\\bold{r}_{\\theta})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe derivatives of each \\(\\theta\\) against each \\(\\bold{r}\\) and \\(\\bold{Z}\\) is given on pp 485 of Alg4DM.\u003c/p\u003e\n\u003cp\u003eAs with all gradient ascent cases, each \u0026ldquo;step\u0026rdquo; takes the rough form of\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\xi = \\theta + \\alpha \\nabla_{\\theta} f(\\theta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ehowever, in this implementation, the step size isn\u0026rsquo;t actually fixed. Instead, we do\u0026hellip;\u003c/p\u003e\n\u003ch3 id=\"golden-section-line-search\"\u003eGolden Section Line Search\u003c/h3\u003e\n\u003cp\u003eInstead of taking fixed-step sizes to get to the maxima, \u003ca href=\"/posts/kbhpga/\"\u003ePGA\u003c/a\u003e proposes \u003ca href=\"#golden-section-line-search\"\u003eGolden Section Line Search\u003c/a\u003e as a line-search algorithm to dynamically choose the steps to get to maxima.\u003c/p\u003e\n\u003cp\u003eLine search algorithms are typically computationally heavy as it requires evaluating the relative utility (i.e. here \\(\\bold{Z}^{-1} \\bold{r}_{\\theta}\\)) a lot of times, which is computationally intractable.\u003c/p\u003e\n\u003cp\u003eSo, \u003ca href=\"#golden-section-line-search\"\u003eGolden Section Line Search\u003c/a\u003e uses a divide-and-conquer method via the golden ratio to address this issue.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eoptimize\u003c/span\u003e\u003cspan style=\"color:#960050;background-color:#1e0010\"\u003e!\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eCPOMDP\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egolden_ratio\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003egamma\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediscount_factor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eeps\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eminimum_boundary\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# C-POMDP Spec\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eCPOMDP\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eobjective_function\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# this is f(theta)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eCPOMDP\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etransition_matrix\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eR\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eCPOMDP\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereward_vector\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eCPOMDP\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einitial_state_vector\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# as obtained above\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003enabla\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etheta\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egrad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etheta\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# initialize the search bounds based on splitting\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# search space (full step, no step) via the golden ratio\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ea1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea4\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# calculate new policies and their utilities\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003etheta2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etheta3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etheta\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enabla\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etheta\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea3\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enabla\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ez2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egamma\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@theta2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einverse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egamma\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@theta3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einverse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# search until the middle bounds converged\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea4\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eeps\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eabs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eabs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# calculate utility vectors over belief\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eu2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eu3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez2\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@R\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez3\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@R\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# either relax top or bottom bounds, depending on\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# which one we had been successfully maximizing\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026gt;=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# search \u0026#34;upwards\u0026#34;, set bottom bound to a3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ea1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea4\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003etheta3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etheta2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etheta\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea3\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enabla\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etheta3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ez3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egamma\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@theta2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einverse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# search \u0026#34;downwards\u0026#34;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ea1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea3\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea3\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003etheta2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etheta3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etheta\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enabla\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etheta2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ez2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egamma\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@theta2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einverse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# return the average of our converged results\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.5\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etheta2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etheta3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.5\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"naive-projection\"\u003eNaive Projection\u003c/h3\u003e\n\u003cp\u003eOnce we obtain a new set of parameters \\(\\xi\\) from \u003ca href=\"#golden-section-line-search\"\u003eGolden Section Line Search\u003c/a\u003e, we can\u0026rsquo;t actually directly punch it into \\(\\theta\\). This is because it is likely not going to satisfy the constraints that are given.\u003c/p\u003e\n\u003cp\u003eWe can fix this naively with a non-linear programming formulation; that is, we desire to find the closest \\(\\theta\\) to the computed value \\(\\xi\\); we do this by minimizing a L2 norm (sum of squared errors):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\min_{\\theta}\\ \u0026amp; \\frac{1}{2} \\| \\xi - \\theta \\|^{2}_{2} \\\\\n\\text{such that}\\ \u0026amp;J\\theta = 1 \\\\\n\u0026amp; \\theta \\geq 0 \\\\\n\u0026amp; h_{i}(\\theta) \\leq \\epsilon_{i},\\ \\forall i\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThis, for the most part, is computationally intractable and needs to be computed through each iteration. This is especially bad for the \\(h_{i}\\) for all \\(i\\) part. And so, instead of doing this, we formulate instead an approximate proxy objective.\u003c/p\u003e\n\u003ch3 id=\"approximate-projection\"\u003eApproximate Projection\u003c/h3\u003e\n\u003cp\u003eThe thing that makes the objective above hard is that \\(h_{i}\\) doesn\u0026rsquo;t have nice convex properties. To fix this, we perform a local linearizion of \\(h_{i}\\).\u003c/p\u003e\n\u003cp\u003eSpecifically, let\u0026rsquo;s replace \\(h_{i}\\) with its local Taylor expansion.\u003c/p\u003e\n\u003cp\u003eFor some step where we started at \\(\\theta_{k}\\), if you wanted to evaluate some next step \\(\\theta_{k+1}\\) from that step, we can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nh_{i}(\\theta_{k+1}) \\approx h_{i}(\\theta_{k}) + (\\nabla_{\\theta}(\\theta_{0}))(\\theta_{k+1}-\\theta_{k})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eUsing this linear decomposition of three parts (i.e. parameter difference from original, the gradient of \\(h\\) against the parameter, and the original value of \\(h\\)), we can now split the \\(h_{i}(\\theta)\\) constraint of the non-linear program into a linear decomposition.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s define:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} \\bold{h}(\\theta) = \\mqty[ (\\nabla_{\\theta} h_{1}(\\theta))^{\\top} \\\\ \\dots \\\\ (\\nabla_{\\theta} h_{m}(\\theta))^{\\top}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFrom which we write block matriix\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{A} = \\mqty[-\\bold{I}_{n} \\\\ \\nabla_{\\theta}\\bold{h}(\\theta_{k})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\bold{I}_{n} \\in \\mathbb{R}^{(|A| + |X|) \\times (|A| + |X|)}\\), and vector:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{b} = \\mqty[\\bold{0}_{n} \\\\ \\epsilon - \\bold{h}(\\theta_{k}) + \\nabla\\bold{h}(\\theta_{k})\\theta_{k}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThese definitions allow us to rewrite two of our objectives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\theta \\geq 0 \\\\\nh_{i}(\\theta) \\leq \\epsilon_{i},\\ \\forall i\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eturning them instead into simply \\(\\bold{A}\\theta \\leq \\bold{b}\\). The top half of \\(\\bold{A}\\), \\(\\bold{B}\\) is responsible for making sure that all elements of \\(\\theta\\) is positive (specifically, to ensure the negative of the values is smaller than 0); the bottom half ensures that all of them satisfy the cost.\u003c/p\u003e\n\u003cp\u003eThese definitions result in a linear formulation of two objectives of our original non-linear program:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\min_{\\theta}\\ \u0026amp; \\frac{1}{2} \\| \\xi - \\theta \\|^{2}_{2} \\\\\n\\text{such that}\\ \u0026amp;J\\theta = 1 \\\\\n\u0026amp; \\bold{A}\\theta \\leq \\bold{B}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eand we are done.\u003c/p\u003e\n\u003ch3 id=\"quick-tip\"\u003eQuick Tip\u003c/h3\u003e\n\u003cp\u003eRecall that we have to calculate the inverse of \\(\\bold{Z}\\) quite a lot throughout the computation of \\(h\\) and \\(f\\). For each policy parameter \\(\\theta\\), you can cache the value of \\(\\bold{Z}\\), L-U (\u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e/lower-triangular factored) and recombine them/invert them as needed to speed up computation. This ensures that you only calculate \\(\\bold{Z}\\) once per step.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpga/","tags":null,"title":"PGA"},{"categories":null,"contents":"\\begin{equation} y\u0026rsquo; = f(y) \\end{equation}\nfor autonomous ODEs, we can plot a phase line\nbecause autonomouse ODEs, we can plot such a line whereby we can analyze the direction of a solution function\u0026rsquo;s travel\na particle\u0026rsquo;s one-way motion must converge to a stationary value, or \\(\\pm \\infty\\), as \\(t\\) increases\n","html":"\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = f(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor autonomous ODEs, we can plot a \u003ca href=\"/posts/kbhphase_line/\"\u003ephase line\u003c/a\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-15_11-35-07_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ebecause autonomouse ODEs, we can plot such a line whereby we can analyze the direction of a solution function\u0026rsquo;s travel\u003c/p\u003e\n\u003cp\u003ea particle\u0026rsquo;s one-way motion must converge to a stationary value, or \\(\\pm \\infty\\), as \\(t\\) increases\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhphase_line/","tags":null,"title":"phase line"},{"categories":null,"contents":"We will leverage atoms as qubits. So, how do we isolate a qubit from an atom? We will leverage electrons.\nWe will select the lowest energy state as the base state; as there maybe multiple ground states, we will choose \\(|u\\big\u0026gt;\\) and \\(|d\\big\u0026gt;\\) from two of the states.\n","html":"\u003cp\u003eWe will leverage \u003ca href=\"/posts/kbhatoms_as_qubits/\"\u003eatoms as qubits\u003c/a\u003e. So, how do we isolate a \u003ca href=\"/posts/kbhqubits/\"\u003equbit\u003c/a\u003e from an atom? We will leverage electrons.\u003c/p\u003e\n\u003cp\u003eWe will select the lowest energy state as the base state; as there maybe multiple ground states, we will choose \\(|u\\big\u0026gt;\\) and \\(|d\\big\u0026gt;\\) from two of the states.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhphysical_qubits/","tags":null,"title":"physical qubits"},{"categories":null,"contents":"physics is the act of explaining what we see in terms of solving for the \u0026ldquo;unseen\u0026rdquo;. For an explanation to be good, it needs to be testable.\nHow exactly does physics work? \u0026ldquo;classical results\u0026rdquo;\nNewton\u0026rsquo;s laws Maxwell\u0026rsquo;s equations General relativity \u0026ldquo;quantum theory\u0026rdquo;\nA new model that actually allows particle inference.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhphysics/\"\u003ephysics\u003c/a\u003e is the act of explaining what we see in terms of solving for the \u0026ldquo;unseen\u0026rdquo;. For an explanation to be good, it needs to be testable.\u003c/p\u003e\n\u003ch2 id=\"how-exactly-does-physics-work\"\u003eHow exactly does physics work?\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;classical results\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eNewton\u0026rsquo;s laws\u003c/li\u003e\n\u003cli\u003eMaxwell\u0026rsquo;s equations\u003c/li\u003e\n\u003cli\u003eGeneral relativity\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;quantum theory\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eA new model that actually allows particle inference.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhphysics/","tags":null,"title":"physics"},{"categories":null,"contents":"(Pineau, Gordon, and Thrun 2006)\nPBVI\nOne-Liner \u0026ldquo;If we can avoid the curse of history, the curse of dimensionality wouldn\u0026rsquo;t be a problem\u0026rdquo;.\nBasically - most POMDP problems don\u0026rsquo;t reach much of the belief simplex. So, can we concetrate planning on more probable beliefs.\nNovelty trajectory based approach to select beliefs belief set is fixed through layers: each backup results in the same number of layers Notable Methods PBVI\nKey Figs New Concepts Notes ","html":"\u003cp\u003e(\u003ca href=\"#citeproc_bib_item_1\"\u003ePineau, Gordon, and Thrun 2006\u003c/a\u003e)\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;If we can avoid the curse of history, the curse of dimensionality wouldn\u0026rsquo;t be a problem\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eBasically - most POMDP problems don\u0026rsquo;t reach much of the belief simplex. So, can we concetrate planning on more probable beliefs.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003etrajectory based approach to select beliefs\u003c/li\u003e\n\u003cli\u003ebelief set is \u003cstrong\u003efixed\u003c/strong\u003e through layers: each backup results in the same number of layers\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpineau_2006/","tags":null,"title":"Pineau 2006"},{"categories":null,"contents":"pipe chains the STDOUT of one command and put it to the STDIN of another command. Typically, we want to do pipe per direction.\ncommand pipelines span two child processes create a pipe to allow the two processes to communicate connect the first child\u0026rsquo;s STDOUT to the pipe + the second child\u0026rsquo;s STDIN to the pipe pipe() pipe() gives us back two file descriptors, such that whatever is written to one can be read from another.\nInterface:\nint pipes[2]; // create the pipes int ret = pipe(pipes); // an so int read_from_here = ret[0]; int write_to_here = ret[1]; // i.e. ret[1] writes to =\u0026gt; ret[0] read // fork! pid_t pid_p = fork(); if(pid_p == 0) { // child subroutine // because child is READING, and not READINg // we want to close the write close(write_to_here); // we want to then make a buffer char buf[num_bytes]; // if the child reads before the parents write // it will block until some data is available // if the write ends are closed globally, read // will also stop. read(read_from_here, buffer, sizeof(buffer)); close(read_from_here); return 0; } // parent subroutine // because parent is WRITING and not READING // we don\u0026#39;t want the read to block, we will // close the parent immediately. close(read_from_here); // write some data write(write_to_here, \u0026#34;msg\u0026#34;, num_bytes); // close now we are done writing close(write_to_here); // clean up child waitpid(pid_p, NULL, 0); pipes have to be closed twice, and opened before the fork.\ndup2() dup2() lets you REWIRE fire descriptors:\ndup2(scrfd, destft); for instance:\ndup2(fds[0], STDIN_FILENO); close(fds[0]); copy the underlying open file pointer which fds[0] points to the FD STDIN, meaning STDIN will now refer to the underlying file of fds[0].\nstalling if you don\u0026rsquo;t close the right ends of your pipes, it STALLS. read() BLOCKS UNTIL ALL WRITES ARE CLOSED!\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpipe/\"\u003epipe\u003c/a\u003e chains the STDOUT of one command and put it to the STDIN of another command. Typically, we want to do pipe per direction.\u003c/p\u003e\n\u003ch2 id=\"command-pipelines\"\u003ecommand pipelines\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003espan two child processes\u003c/li\u003e\n\u003cli\u003ecreate a pipe to allow the two processes to communicate\u003c/li\u003e\n\u003cli\u003econnect the first child\u0026rsquo;s STDOUT to the pipe + the second child\u0026rsquo;s STDIN to the pipe\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"pipe\"\u003epipe()\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#pipe\"\u003epipe()\u003c/a\u003e gives us back two \u003ca href=\"/posts/kbhsyscalls/#file-descriptor\"\u003efile descriptor\u003c/a\u003es, such that whatever is written to one can be read from another.\u003c/p\u003e\n\u003cp\u003eInterface:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epipes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// create the pipes\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eret\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003epipe\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epipes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// an so\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eread_from_here\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eret\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewrite_to_here\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eret\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// i.e. ret[1] writes to =\u0026gt; ret[0] read\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// fork!\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epid_p\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003efork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epid_p\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// child subroutine\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// because child is READING, and not READINg\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// we want to close the write\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewrite_to_here\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// we want to then make a buffer\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebuf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enum_bytes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if the child reads before the parents write\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// it will block until some data is available\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// if the write ends are closed globally, read\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// will also stop.\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_from_here\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebuffer\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esizeof\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebuffer\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e));\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_from_here\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// parent subroutine\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// because parent is WRITING and not READING\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// we don\u0026#39;t want the read to block, we will\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// close the parent immediately.\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_from_here\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// write some data\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ewrite\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewrite_to_here\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;msg\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enum_bytes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// close now we are done writing\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewrite_to_here\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// clean up child\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ewaitpid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epid_p\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eNULL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003ca href=\"/posts/kbhpipe/\"\u003epipe\u003c/a\u003es have to be closed twice, and opened before the fork.\u003c/p\u003e\n\u003ch2 id=\"dup2\"\u003edup2()\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#dup2\"\u003edup2()\u003c/a\u003e lets you \u003cstrong\u003eREWIRE\u003c/strong\u003e fire descriptors:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003edup2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003escrfd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edestft\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003efor instance:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003edup2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efds\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSTDIN_FILENO\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efds\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ecopy the underlying open file pointer which \u003ccode\u003efds[0]\u003c/code\u003e points to the FD STDIN, meaning STDIN will now refer to the underlying file of \u003ccode\u003efds[0]\u003c/code\u003e.\u003c/p\u003e\n\u003ch2 id=\"stalling\"\u003estalling\u003c/h2\u003e\n\u003cp\u003eif you \u003cstrong\u003edon\u0026rsquo;t close the right ends of your pipes, it STALLS\u003c/strong\u003e. \u003ccode\u003eread()\u003c/code\u003e BLOCKS UNTIL ALL WRITES ARE CLOSED!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpipe/","tags":null,"title":"pipe"},{"categories":null,"contents":"User Story Sejin is the executive administrative assistant at Nueva, working on scheduling Liza, Lee, Terry, and the other admins against the members of the wider community. Sejin spends most of her day scheduling people, of which, the largest time drawn is spent convincing people to move their schedules \u0026ldquo;in favor\u0026rdquo; of that of another person (i.e. manually). The reason why this is done is because her approach to scheduling is one-shot: emailing everybody for general availability, noting in her mind who the high-priority attendees are (say, Liza), and if no times match asking/convincing those in lower priority to move their schedules. Although she enjoys the process of putting events together, she is particularly frustrated that, due to the busy schedules and often back-and-forth emails needed to get and conform everyone\u0026rsquo;s schedule, response rates to complicated scheduling problems are low.\nSejin, during a main brunt of her job of scheduling inter or intra-admin meetings, need a solution to schedule many executives at once with attention to their priority/authority/importance to a meeting as well as the possible fluidity of their schedules. There is an inherit fluidity to scheduling as a master-planner of a few admin\u0026rsquo;s schedules: in that, if needed, she has authority to move entire meetings as long as they are swapped for equivalent times of availability. Hence, a previously bad time may suddenly become available if enough scheduling conflicts is generated, thereby creating the incentive for swapping another meeting away for the one being scheduled and rescheduling other attendees of lower priority.\nCurrent scheduling software does not account for either types of fluidity. Tools like Doodle/When2Meet can accommodate for inherent \u0026ldquo;priority\u0026rdquo;\u0026mdash;with Sejin choosing the time-slot that would have the most, highest priority individuals scheduled\u0026mdash;but are one-shot planning tools which do not provide space for swapping entire meetings out to make scheduling work better. Other tools like Calendly or simple iCal does not provide any semblance of priority or \u0026ldquo;multi-possibility\u0026rdquo; for meetings, though does indeed provide the time-blocking capability to swap two meetings at will. These problems result in Sejin needing to just create large email chains to resolve scheduling problems. Also, no scheduling tools provide an opportunity to manually \u0026ldquo;convince\u0026rdquo; or request someone to make time due to the constraints that presented during first-round scheduling. Lastly, scheduling software does not space-block. Sometimes there is a physical capacity/de-duplication limit to spaces, which cannot be accounted for.\nOnce the initial scheduling and emailing processes are automated, Sejin can spend more time focusing on what she actually enjoys: thinking about the process of an event and its details. Schedule can now be an afterthought, an event which happens in the background which is eventually reported to her on the online portal as she is planning the details of the event.\nProposal Fundamentally, this is a fractional knapsack problem. \u0026ldquo;How do we maximize the maximum amount of attendance of maximum amounts of important people?\u0026rdquo;\nFrom a target market, I think a good target would be medium organization assistants: Sejin\u0026rsquo;s concerns really only become a problem when you are scheduling for a one (or few) vs. many situation where there is a stable group of people you are scheduling for, who wants to meet with each other or other people outside.\nAs far as UX, this tool should not require log-in except for the master planner (i.e. our user.) Participants in meeting should be able to freely enter their schedules or create evergreen accounts to manage their scheduling. (This is not as well thought out at the moment.)\nLastly, for the tech stack, I don\u0026rsquo;t think I have the ability to finish the entire stack by myself. From a MVP perspective (if we are trying to satisfy all needs), there needs to be a system optimizing a constantly shifting fractional knapsack, a way to put and store availability information, and a way to automate the requesting/convincing of scheduling change (e.g.. \u0026ldquo;MyApp Notification! Liza is not available at the one time you selected, but everyone else is available at this different time. Can you make this time? Y/N\u0026rdquo;) . Ideally, we would also send iCal invites in the end.\n","html":"\u003ch2 id=\"user-story\"\u003eUser Story\u003c/h2\u003e\n\u003cp\u003eSejin is the executive administrative assistant at Nueva, working on scheduling Liza, Lee, Terry, and the other admins against the members of the wider community. Sejin spends most of her day scheduling people, of which, the largest time drawn is spent convincing people to move their schedules \u0026ldquo;in favor\u0026rdquo; of that of another person (i.e. manually). The reason why this is done is because her approach to scheduling is one-shot: emailing everybody for general availability, noting in her mind who the high-priority attendees are (say, Liza), and if no times match asking/convincing those in lower priority to move their schedules. Although she enjoys the process of putting events together, she is particularly frustrated that, due to the busy schedules and often back-and-forth emails needed to get and conform everyone\u0026rsquo;s schedule, response rates to complicated scheduling problems are low.\u003c/p\u003e\n\u003cp\u003eSejin, during a main brunt of her job of scheduling inter or intra-admin meetings, need a solution to schedule many executives at once with attention to their priority/authority/importance to a meeting as well as the possible fluidity of their schedules. There is an inherit fluidity to scheduling as a master-planner of a few admin\u0026rsquo;s schedules: in that, if needed, she has authority to move entire meetings as long as they are swapped for equivalent times of availability. Hence, a previously bad time may suddenly become available if enough scheduling conflicts is generated, thereby creating the incentive for swapping another meeting away for the one being scheduled and rescheduling other attendees of lower priority.\u003c/p\u003e\n\u003cp\u003eCurrent scheduling software does not account for either types of fluidity. Tools like Doodle/When2Meet can accommodate for inherent \u0026ldquo;priority\u0026rdquo;\u0026mdash;with Sejin choosing the time-slot that would have the most, highest priority individuals scheduled\u0026mdash;but are one-shot planning tools which do not provide space for swapping entire meetings out to make scheduling work better. Other tools like Calendly or simple iCal does not provide any semblance of priority or \u0026ldquo;multi-possibility\u0026rdquo; for meetings, though does indeed provide the time-blocking capability to swap two meetings at will. These problems result in Sejin needing to just create large email chains to resolve scheduling problems. Also, no scheduling tools provide an opportunity to manually \u0026ldquo;convince\u0026rdquo; or request someone to make time due to the constraints that presented during first-round scheduling. Lastly, scheduling software does not space-block. Sometimes there is a physical capacity/de-duplication limit to spaces, which cannot be accounted for.\u003c/p\u003e\n\u003cp\u003eOnce the initial scheduling and emailing processes are automated, Sejin can spend more time focusing on what she actually enjoys: thinking about the process of an event and its details. Schedule can now be an afterthought, an event which happens in the background which is eventually reported to her on the online portal as she is planning the details of the event.\u003c/p\u003e\n\u003ch2 id=\"proposal\"\u003eProposal\u003c/h2\u003e\n\u003cp\u003eFundamentally, this is a fractional knapsack problem. \u0026ldquo;How do we maximize the maximum amount of attendance of maximum amounts of important people?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eFrom a target market, I think a good target would be medium organization assistants: Sejin\u0026rsquo;s concerns really only become a problem when you are scheduling for a one (or few) vs. many situation where there is a stable group of people you are scheduling for, who wants to meet with each other or other people outside.\u003c/p\u003e\n\u003cp\u003eAs far as UX, this tool should not require log-in except for the master planner (i.e. our user.) Participants in meeting should be able to freely enter their schedules or create evergreen accounts to manage their scheduling. (This is not as well thought out at the moment.)\u003c/p\u003e\n\u003cp\u003eLastly, for the tech stack, I don\u0026rsquo;t think I have the ability to finish the entire stack by myself. From a MVP perspective (if we are trying to satisfy all needs), there needs to be a system optimizing a constantly shifting fractional knapsack, a way to put and store availability information, and a way to automate the requesting/convincing of scheduling change (e.g.. \u0026ldquo;MyApp Notification! Liza is not available at the one time you selected, but everyone else is available at this different time. Can you make this time? Y/N\u0026rdquo;) . Ideally, we would also send iCal invites in the end.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpitch_a_project/","tags":null,"title":"Pitch a Project"},{"categories":null,"contents":"The PSC is a supercomputing center.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhpsc/\"\u003ePSC\u003c/a\u003e is a supercomputing center.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpsc/","tags":null,"title":"Pittsburgh Supercomputing Center"},{"categories":null,"contents":"A PKM is a tool (like this one!) to help manage your nodes and knowledge.\nstoring AND processing; just storing is just PIM not PKM PKM = PIM (personal info management) + GTD + knowledge management goal: narrowing \u0026ldquo;flood\u0026rdquo; and focus on useful areas move from passive =\u0026gt; active consumption create, not just regurgitate PKM and Context store info based on context Strategy What are you trying to organize what are the inputs? email? lectures?: \u0026ldquo;reference\u0026rdquo; type emails lecture notes tasks? what are you trying to make? test? essays? a time blocking schedule studying for tests (i.e. for linear, etc.) what does your process look like? mishmash of topic based notes and content based notes what parts of it would you like to improve? article capture workflow: how to process random readings topic wise? knowledge capture: how to come across new content and capture them somewhere? todo/notes integration: task management and note taking currently lives separately, how to unify them? zettlekasten\nhow to take smart notes: arens progressive summarization progressive summarization is a technique in note taking that Tiago Forte developed to summarize text:\nlayer 0: reading layer 1: copy/pasted parts from the reading layer 2: bold relevant parts layer 3: highlight bold parts to get crux of ideas layer 4: mini summary layer 5: remix (add links, etc.) capture read rewrite and summarize engage by adding questions, thoughts, opinions, etc. connect old and new ideas think about the context of usage, not the topic how to think better reduce cognitive overload \u0026ldquo;offload info\u0026rdquo;\u0026mdash;put it on paper test yourself often: listening and understanding are not the same keep things simple: one task at a time keep an open mind + \u0026ldquo;collective\u0026rdquo; new perspectives and mental models new cycle capture curate cultivate connect create ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhpkm/\"\u003ePKM\u003c/a\u003e is a tool (like this one!) to help manage your nodes and knowledge.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003estoring AND processing; just storing is just PIM not PKM\u003c/li\u003e\n\u003cli\u003ePKM = PIM (personal info management) + GTD + knowledge management\u003c/li\u003e\n\u003cli\u003egoal: narrowing \u0026ldquo;flood\u0026rdquo; and focus on useful areas\u003c/li\u003e\n\u003cli\u003emove from passive =\u0026gt; active consumption\u003c/li\u003e\n\u003cli\u003ecreate, not just regurgitate\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"pkm-and-context\"\u003ePKM and Context\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003estore info based on context\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"strategy\"\u003eStrategy\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWhat are you trying to organize\u003c/li\u003e\n\u003cli\u003ewhat are the inputs?\n\u003cul\u003e\n\u003cli\u003eemail? lectures?:\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;reference\u0026rdquo; type emails\u003c/li\u003e\n\u003cli\u003electure notes\u003c/li\u003e\n\u003cli\u003etasks?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ewhat are you trying to make?\n\u003cul\u003e\n\u003cli\u003etest? essays?\n\u003cul\u003e\n\u003cli\u003ea time blocking schedule\u003c/li\u003e\n\u003cli\u003estudying for tests (i.e. for linear, etc.)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ewhat does your process look like?\n\u003cul\u003e\n\u003cli\u003emishmash of topic based notes and content based notes\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ewhat parts of it would you like to improve?\n\u003cul\u003e\n\u003cli\u003earticle capture workflow: how to process random readings topic wise?\u003c/li\u003e\n\u003cli\u003eknowledge capture: how to come across new content and capture them somewhere?\u003c/li\u003e\n\u003cli\u003etodo/notes integration: task management and note taking currently lives separately, how to unify them?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhzettlekasten/\"\u003ezettlekasten\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ehow to take smart notes: arens\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"progressive-summarization\"\u003eprogressive summarization\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#progressive-summarization\"\u003eprogressive summarization\u003c/a\u003e is a technique in note taking that \u003ca href=\"/posts/kbhtiago_forte/\"\u003eTiago Forte\u003c/a\u003e developed to summarize text:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003elayer 0: reading\u003c/li\u003e\n\u003cli\u003elayer 1: copy/pasted parts from the reading\u003c/li\u003e\n\u003cli\u003elayer 2: bold relevant parts\u003c/li\u003e\n\u003cli\u003elayer 3: highlight bold parts to get crux of ideas\u003c/li\u003e\n\u003cli\u003elayer 4: mini summary\u003c/li\u003e\n\u003cli\u003elayer 5: remix (add links, etc.)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"capture\"\u003ecapture\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eread\u003c/li\u003e\n\u003cli\u003erewrite and \u003cem\u003esummarize\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003e\u003cem\u003eengage\u003c/em\u003e by adding \u003cstrong\u003equestions\u003c/strong\u003e, \u003cstrong\u003ethoughts\u003c/strong\u003e, \u003cstrong\u003eopinions\u003c/strong\u003e, etc.\u003c/li\u003e\n\u003cli\u003e\u003cem\u003econnect old and new ideas\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003ethink about the \u003cem\u003econtext\u003c/em\u003e of usage, not the topic\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"how-to-think-better\"\u003ehow to think better\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ereduce cognitive overload\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;offload info\u0026rdquo;\u0026mdash;put it on paper\u003c/li\u003e\n\u003cli\u003etest yourself often: listening and understanding are not the same\u003c/li\u003e\n\u003cli\u003ekeep things simple: one task at a time\u003c/li\u003e\n\u003cli\u003ekeep an open mind + \u0026ldquo;collective\u0026rdquo; new perspectives and mental models\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-cycle\"\u003enew cycle\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecapture\u003c/li\u003e\n\u003cli\u003ecurate\u003c/li\u003e\n\u003cli\u003ecultivate\u003c/li\u003e\n\u003cli\u003econnect\u003c/li\u003e\n\u003cli\u003ecreate\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpkm/","tags":null,"title":"PKM"},{"categories":null,"contents":"A decision making method using search on a model of the problem to be able tom make decisions.\ncreate a (usually deterministic, but for CS238 we care only about non-deterministic cases) model of the problem or a good approximation thereof use the model to plan for possible next actions to yield for a good solution contrast v. explicit programming explicit programming requires you to plan for the action\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhdecision_making/#id-9075c44f-4b26-4f5a-9236-bc7a5c10f4ee-decision-making-methods\"\u003edecision making method\u003c/a\u003e using search on a model of the problem to be able tom make decisions.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ecreate a (usually deterministic, but for \u003ca href=\"/posts/kbhdecision_making_index/\"\u003eCS238\u003c/a\u003e we care only about non-deterministic cases) model of the problem or a good approximation thereof\u003c/li\u003e\n\u003cli\u003euse the model to plan for possible next actions to yield for a good solution\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"contrast-v-dot-explicit-programming\"\u003econtrast v. explicit programming\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhexplicit_programming/\"\u003eexplicit programming\u003c/a\u003e requires you to plan for the action\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhplanning/","tags":null,"title":"planning"},{"categories":null,"contents":" we start at an initial belief point we do a random Rollout to get to the next belief then collect\n","html":"\u003col\u003e\n\u003cli\u003ewe start at an initial \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e point\u003c/li\u003e\n\u003cli\u003ewe do a random \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003eRollout\u003c/a\u003e to get to the next \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ethen collect\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpoint_selection/","tags":null,"title":"point selection"},{"categories":null,"contents":"we keep track of a bunch of alpha vectors and belief samples (which we get from point selection):\n\\begin{equation} \\Gamma = \\{\\alpha_{1}, \\dots, \\alpha_{m}\\} \\end{equation}\nand\n\\begin{equation} B = \\{b_1, \\dots, b_{m}\\} \\end{equation}\nTo preserve the lower-boundedness of these alpha vectors, one should seed the alpha vectors via something like blind lower bound\nWe can estimate our utility function at any belief by looking in the set for the most optimal:\n\\begin{equation} U^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top}b \\end{equation}\nWe now define a function named backup (see PBVI Backup), and call it on all of our beliefs to generate a new set of alpha vectors:\n\\begin{equation} \\Gamma^{t+1} = \\{backup(\\Gamma, b) | b \\in B\\} \\end{equation}\nwhere:\n\\begin{equation} \\alpha \\leftarrow backup(\\Gamma, b) \\end{equation}\ntherefore we call backup on each \\(b\\).\nPBVI Backup backup procedure given \\(\\Gamma\\) and $b$\u0026mdash;\nwe want to mint a single new alpha vector by selecting the highest-valued one from the set of good alpha-vectors, one for each action:\n\\begin{equation} \\alpha = \\arg\\max_{\\alpha_{a}} \\alpha_{a}^{\\top} b \\end{equation}\nnow, we define each \\(\\alpha_{a}\\) as:\n\\begin{equation} \\alpha_{a}(s) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;,o}^{} O(o|a,s\u0026rsquo;)T(s\u0026rsquo;|s,a)\\alpha_{a,o} (s\u0026rsquo;) \\end{equation}\nwhere we obtain the old \\(\\alpha_{a,o}\\) by computing vector which currently provides the highest value estimate, which we compute over all actions and observations \\(a,o\\) given our \\(\\Gamma\\):\n\\begin{equation} \\alpha_{a,o} = \\arg\\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} update(b,a,o) \\end{equation}\nRandomized PBVI see Perseus\n","html":"\u003cp\u003ewe keep track of a bunch of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es and \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e samples (which we get from \u003ca href=\"/posts/kbhpoint_selection/\"\u003epoint selection\u003c/a\u003e):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Gamma = \\{\\alpha_{1}, \\dots, \\alpha_{m}\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB = \\{b_1, \\dots, b_{m}\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo preserve the lower-boundedness of these \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es, one should seed the \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es via something like \u003ca href=\"/posts/kbhblind_lower_bound/\"\u003eblind lower bound\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eWe can estimate our \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e function at any belief by looking in the set for the most optimal:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top}b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe now define a function named \u003ccode\u003ebackup\u003c/code\u003e (see \u003ca href=\"#pbvi-backup\"\u003ePBVI Backup\u003c/a\u003e), and call it on all of our \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003es to generate a new set of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Gamma^{t+1} = \\{backup(\\Gamma, b) | b \\in B\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha \\leftarrow backup(\\Gamma, b)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003etherefore we call backup on each \\(b\\).\u003c/p\u003e\n\u003ch2 id=\"pbvi-backup\"\u003ePBVI Backup\u003c/h2\u003e\n\u003cp\u003e\u003ccode\u003ebackup\u003c/code\u003e procedure given \\(\\Gamma\\) and $b$\u0026mdash;\u003c/p\u003e\n\u003cp\u003ewe want to mint a single new \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e by selecting the highest-valued one from the set of good alpha-vectors, one for each action:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha = \\arg\\max_{\\alpha_{a}} \\alpha_{a}^{\\top} b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enow, we define each \\(\\alpha_{a}\\) as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha_{a}(s) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;,o}^{} O(o|a,s\u0026rsquo;)T(s\u0026rsquo;|s,a)\\alpha_{a,o} (s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere we obtain the old \\(\\alpha_{a,o}\\) by computing vector which currently provides the highest value estimate, which we compute over all actions and observations \\(a,o\\) given our \\(\\Gamma\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha_{a,o} = \\arg\\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} update(b,a,o)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"randomized-pbvi--kbhpoint-based-value-iteration-dot-md\"\u003eRandomized \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhperseus/\"\u003ePerseus\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpoint_based_value_iteration/","tags":null,"title":"Point-Based Value Iteration"},{"categories":null,"contents":"A pointer is a variable which stores memory addresses. Because there are no pass-by reference, we use pointers to emulate pass by reference: by sharing addresses with other functions.\nA pointer can identify a single byte OR some large data structures. We can dynamically allocate pointers, and also identify memory generically without types.\nC is always pass-by-copy. Therefore, to pass-by-reference, you basically have to\nint x = 2; // declare object int *xptr = \u0026amp;x; // get location of object (\u0026amp;: address of) printf(\u0026#34;%d\\n\u0026#34;, *xptr); // dereference the pointer address operator You will note, in the line above:\nint *xptr = \u0026amp;x; uses an operator \u0026amp; to get the address of an object. That\u0026rsquo;s called an object operator.\npointer memory diagram void myFunct(int *intPtr) { *intPtr = 3; } int main() { int x = 2; myFunct(\u0026amp;x); } ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhpointer/\"\u003epointer\u003c/a\u003e is a variable which stores memory addresses. Because there are no pass-by reference, we use pointers to emulate pass by reference: by sharing addresses with other functions.\u003c/p\u003e\n\u003cp\u003eA pointer can identify a single \u003ca href=\"/posts/kbhbinary_number_system/#byte\"\u003ebyte\u003c/a\u003e OR some large data structures. We can dynamically allocate pointers, and also identify memory generically without types.\u003c/p\u003e\n\u003cp\u003eC is always pass-by-copy. Therefore, to pass-by-reference, you basically have to\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// declare object\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003exptr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// get location of object (\u0026amp;: address of)\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eprintf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;%d\u003c/span\u003e\u003cspan style=\"color:#8045ff\"\u003e\\n\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003exptr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// dereference the pointer\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"address-operator\"\u003eaddress operator\u003c/h2\u003e\n\u003cp\u003eYou will note, in the line above:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003exptr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003euses an operator \u003ccode\u003e\u0026amp;\u003c/code\u003e to get the address of an object. That\u0026rsquo;s called an \u003ca href=\"/posts/kbhobjects/\"\u003eobject\u003c/a\u003e operator.\u003c/p\u003e\n\u003ch2 id=\"pointer--kbhpointer-dot-md--memory-diagram\"\u003e\u003ca href=\"/posts/kbhpointer/\"\u003epointer\u003c/a\u003e memory diagram\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-11_11-12-28_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emyFunct\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eintPtr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eintPtr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emain\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003emyFunct\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhpointer/","tags":null,"title":"pointer"},{"categories":null,"contents":"Let\u0026rsquo;s say we want to know what is the chance of having an event occurring \\(k\\) times in a unit time, on average, this event happens at a rate of \\(\\lambda\\) per unit time.\n\u0026ldquo;What\u0026rsquo;s the probability that there are \\(k\\) earthquakes in the 1 year if there\u0026rsquo;s on average \\(2\\) earthquakes in 1 year?\u0026rdquo;\nwhere:\nevents have to be independent probability of sucess in each trial doesn\u0026rsquo;t vary constituents $λ$\u0026mdash;count of events per time \\(X \\sim Poi(\\lambda)\\) requirements the probability mass function:\n\\begin{equation} P(X=k) = e^{-\\lambda} \\frac{\\lambda^{k}}{k!} \\end{equation}\nadditional information properties of poisson distribution expected value: \\(\\lambda\\) variance: \\(\\lambda\\) derivation We divide the event into infinitely small buckets and plug into a binomial distribution, to formulate the question:\n\u0026ldquo;what\u0026rsquo;s the probability of large \\(n\\) number samples getting \\(k\\) events with probability of \\(\\frac{\\lambda}{n}\\) of events\u0026rdquo;\n\\begin{equation} P(X=k) = \\lim_{n \\to \\infty} {n \\choose k} \\qty(\\frac{\\lambda}{n})^{k}\\qty(1- \\frac{\\lambda}{n})^{n-k} \\end{equation}\nand then do algebra.\nAnd because of this, when you have a large \\(n\\) for your binomial distribution, you can just use a poisson distribution, where \\(\\lambda = np\\).\nadding poisson distribution For independent \\(A, B\\)\n\\begin{equation} A+B \\sim Poi(\\lambda_{A}+ \\lambda_{B}) \\end{equation}\nMLE for poisson distribution \\begin{equation} \\lambda = \\frac{1}{n} \\sum_{i=1}^{n} x_{i} \\end{equation}\nyes, that\u0026rsquo;s just the sample mean\n","html":"\u003cp\u003eLet\u0026rsquo;s say we want to know what is the chance of having an event occurring \\(k\\) times in a unit time, on average, this event happens at a rate of \\(\\lambda\\) per unit time.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;What\u0026rsquo;s the probability that there are \\(k\\) earthquakes in the 1 year if there\u0026rsquo;s on average \\(2\\) earthquakes in 1 year?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eevents have to be independent\u003c/li\u003e\n\u003cli\u003eprobability of sucess in each trial doesn\u0026rsquo;t vary\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e$λ$\u0026mdash;count of events per time\u003c/li\u003e\n\u003cli\u003e\\(X \\sim Poi(\\lambda)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003ethe \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003eprobability mass function\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X=k) = e^{-\\lambda} \\frac{\\lambda^{k}}{k!}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"properties-of-poisson-distribution--kbhprobability-of-k-in-x-time-dot-md\"\u003eproperties of \u003ca href=\"/posts/kbhprobability_of_k_in_x_time/\"\u003epoisson distribution\u003c/a\u003e\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhexpectation/\"\u003eexpected value\u003c/a\u003e\u003c/strong\u003e: \\(\\lambda\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhvariance/\"\u003evariance\u003c/a\u003e\u003c/strong\u003e: \\(\\lambda\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"derivation\"\u003ederivation\u003c/h3\u003e\n\u003cp\u003eWe divide the event into infinitely small buckets and plug into a \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e, to formulate the question:\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;what\u0026rsquo;s the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of large \\(n\\) number samples getting \\(k\\) events with probability of \\(\\frac{\\lambda}{n}\\) of events\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X=k) = \\lim_{n \\to \\infty} {n \\choose k} \\qty(\\frac{\\lambda}{n})^{k}\\qty(1- \\frac{\\lambda}{n})^{n-k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand then do algebra.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-13_16-17-13_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eAnd because of this, when you have a large \\(n\\) for your \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e, you can just use a \u003ca href=\"/posts/kbhprobability_of_k_in_x_time/\"\u003epoisson distribution\u003c/a\u003e, where \\(\\lambda = np\\).\u003c/p\u003e\n\u003ch3 id=\"adding-poisson-distribution--kbhprobability-of-k-in-x-time-dot-md\"\u003eadding \u003ca href=\"/posts/kbhprobability_of_k_in_x_time/\"\u003epoisson distribution\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eFor \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e \\(A, B\\)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA+B \\sim Poi(\\lambda_{A}+ \\lambda_{B})\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"mle-for-poisson-distribution--kbhprobability-of-k-in-x-time-dot-md\"\u003eMLE for \u003ca href=\"/posts/kbhprobability_of_k_in_x_time/\"\u003epoisson distribution\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda = \\frac{1}{n} \\sum_{i=1}^{n} x_{i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyes, that\u0026rsquo;s just the \u003ca href=\"/posts/kbhrandom_variables/#sample-mean\"\u003esample mean\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprobability_of_k_in_x_time/","tags":null,"title":"poisson distribution"},{"categories":null,"contents":"constituents the history: last states and actions \\(h_{t} = (s_{1:t}, a_{1:t-1})\\)\nrequirements typically:\n\\begin{equation} a_{t} = \\pi_{t}(h_{t}) \\end{equation}\nfor a Markov Decision Process, our past states are d-seperated from our current action given knowing the state, so really we have \\(\\pi_{t}(s_{t})\\)\nSome policies can be stochastic:\n\\begin{equation} P(a_{t}) = \\pi_{t}(a_{t} | h_{t}) \\end{equation}\ninstead of telling you something to do at a specific point, it tells you what the probability it chooses of doing \\(a_{t}\\) is given the history.\nadditional information stationary policy For infinite-horizon models, our policy can not care about how many time stamps are left (i.e. we are not optimizing within some box with constrained time) and therefore we don\u0026rsquo;t really care about historical actions. So we have:\n\\begin{equation} \\pi(s) \\end{equation}\nthis can be used in infinite-horizon models against stationary Markov Decision Process.\noptimal policy \\begin{equation} \\pi^{*}(s) = \\arg\\max_{\\pi} U^{\\pi}(s) \\end{equation}\n\u0026ldquo;the most optimal policy is the policy that maximizes the expected utility of following \\(\\pi\\) when starting from \\(s\\)\u0026rdquo;\nWe call the utility from the best policy the \u0026ldquo;optimal value function\u0026rdquo;\n\\begin{equation} U^{*} = U^{\\pi^{*}} \\end{equation}\npolicy utility, and value creating a good utility function: either policy evaluation or value iteration creating a policy from a utility function: value-function policy (\u0026ldquo;choose the policy that takes the best valued action\u0026rdquo;) calculating the utility function a policy currently uses: use policy evaluation See policy evaluation\n","html":"\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003ethe history: last states and actions \\(h_{t} = (s_{1:t}, a_{1:t-1})\\)\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003etypically:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{t} = \\pi_{t}(h_{t})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor a \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMarkov Decision Process\u003c/a\u003e, our past states are \u003ca href=\"/posts/kbhbaysian_network/#checking-for-conditional-independence\"\u003ed-seperated\u003c/a\u003e from our \u003ca href=\"/posts/kbhcurrent/\"\u003ecurrent\u003c/a\u003e action given knowing the state, so really we have \\(\\pi_{t}(s_{t})\\)\u003c/p\u003e\n\u003cp\u003eSome \u003ca href=\"/posts/kbhpolicy/\"\u003epolicies\u003c/a\u003e can be stochastic:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(a_{t}) = \\pi_{t}(a_{t} | h_{t})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003einstead of telling you something to do at a specific point, it tells you what the probability it chooses of doing \\(a_{t}\\) is given the history.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"stationary-policy--kbhpolicy-dot-md\"\u003estationary \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eFor \u003ca href=\"/posts/kbhmarkov_decision_process/#infinite-horizon-models\"\u003einfinite-horizon models\u003c/a\u003e, our \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e can not care about how many time stamps are left (i.e. we are not optimizing within some box with constrained time) and therefore we don\u0026rsquo;t really care about historical actions. So we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pi(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis can be used in \u003ca href=\"/posts/kbhmarkov_decision_process/#infinite-horizon-models\"\u003einfinite-horizon models\u003c/a\u003e against \u003ca href=\"/posts/kbhmarkov_decision_process/#stationary-id-5bb5350e-04e4-46dc-9ea8-cb7bb09edd42-markov-decision-process\"\u003estationary Markov Decision Process\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"optimal-policy\"\u003eoptimal policy\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\pi^{*}(s) = \\arg\\max_{\\pi} U^{\\pi}(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the most \u003ca href=\"#optimal-policy\"\u003eoptimal policy\u003c/a\u003e is the \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e that maximizes the \u003ca href=\"/posts/kbhutility_theory/#expected-utility\"\u003eexpected utility\u003c/a\u003e of following \\(\\pi\\) when starting from \\(s\\)\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe call the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e from the best policy the \u0026ldquo;\u003ca href=\"#optimal-policy\"\u003eoptimal value function\u003c/a\u003e\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{*} = U^{\\pi^{*}}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"policy-utility-and-value\"\u003epolicy utility, and value\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ecreating a good \u003ca href=\"/posts/kbhutility_function/\"\u003eutility function\u003c/a\u003e: either \u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e or \u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ecreating a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e from a \u003ca href=\"/posts/kbhutility_function/\"\u003eutility function\u003c/a\u003e: \u003ca href=\"/posts/kbhaction_value_function/#value-function-policy\"\u003evalue-function policy\u003c/a\u003e (\u0026ldquo;choose the policy that takes the best valued action\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003ecalculating the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility function\u003c/a\u003e a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e currently uses: use \u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpolicy/","tags":null,"title":"policy"},{"categories":null,"contents":"See also Roll-out utility if you don\u0026rsquo;t want to get a vector utility over all states.\nsolving for the utility of a policy We can solve for the utility of the policy given the transitions \\(T\\) and reward \\(R\\) by solving the following equation\n\\begin{equation} \\bold{U}^{\\pi} = (I - \\gamma T^{\\pi})^{-1} \\bold{R}^{\\pi} \\end{equation}\nwhere \\(T\\) is an \\(|S| \\times |S|\\) square matrix where each horizontal row is supposed to add up to \\(1\\) which encodes the probability of transitioning from each horizontal row to the column next rows.\nlookahead equation We begin our derivation from finite-horizon models.\nGives some policy \\(\\pi\\), at the base case:\n\\begin{equation} U^{\\pi}_{1} (s) = R(s, \\pi(s)) \\end{equation}\nat time \\(k+1\\) steps remaining:\n\\begin{equation} U^{\\pi}_{k+1}(s) = R(s, \\pi(s)) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s, \\pi(s)) U^{\\pi}_{k} (s\u0026rsquo;) \\end{equation}\nwe don\u0026rsquo;t know what the next state will be; so for each possible next state, we marginalize the result, multiplying the probability of being in that state (gotten by \\(T(\u0026hellip;)\\)) times the utility of being in that state.\nThis is called the lookahead equation, which represents how much utility any future state can be be if we took action at point \\(k\\).\nlookahead with sampling what if we only want to get \\(m\\) of the next states, instead of all next states?\nBellman Expectation Equation The Bellman Equation states that \u0026ldquo;the expected utility of being in a state is the instantaneous reward of being in that state plus the discounted future utility of all possible future state.\u0026rdquo; It is the fundamental result of RL.\n\\begin{equation} U(s) = \\arg\\max_{a} R(s, a) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s, a) U (s\u0026rsquo;) \\end{equation}\nIf we are dealing with infinite-horizon models (at \u0026ldquo;convergence\u0026rdquo; of the lookahead equation), we just no longer have a time dependency from the lookahead equation:\nWe only care about some Markovian state \\(s\\), and its next possible states \\(s\u0026rsquo;\\). When these pair happened doesn\u0026rsquo;t matter.\nFor a stochastic policy, we have:\n\\begin{equation} U(S) = \\sum_{a}^{} \\pi(a|s) \\qty[R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) U(s\u0026rsquo;)] \\end{equation}\nWe now can go about solving for what \\(U^{\\pi}\\) is:\nProcedure:\nfrom the Bellman Expectation Equation, we actually have a linear equation whereby:\n\\begin{equation} \\bold{U}^{\\pi} = \\bold{R}^{\\pi} + \\gamma T^{\\pi}\\bold{U}^{\\pi} \\end{equation}\nwhere, \\(T^{\\pi}\\) is an \\(n\\times n\\) matrix where \\(T^{\\pi}_{i,j}\\) represents the probability of transitioning from the \\(i\\) th to the \\(j\\) the state; and where, \\(\\bold{U}^{\\pi}\\) and \\(\\bold{R}^{\\pi}\\) are \\(n\\) vectors which represents all possible states and all possible utilities. Note that everything is parametrized on \\(\\pi\\) (so \\(T\\) doesn\u0026rsquo;t need an action dimension because we will be using the policy to calculate all the actoins)\nWe can now solve for the utility of the policy. Now, algebra time on the previous equation to get us:\n\\begin{equation} \\bold{U}^{\\pi} = (I - \\gamma T^{\\pi})^{-1} \\bold{R}^{\\pi} \\end{equation}\nwe know that \\(T\\) is invertable because its a transition matrix. And that, folks, is the utility of a policy.\nApproximate Policy Evaluation Instead of having a policy evaluation based on a vector out of the fitness of this policy at all possible states, which really works if our state space is small, what if we made a policy evaluation scheme which estimates the expectation of the utility of our policy based on the possibility of us landing in particular states?\nBackground The utility from following a policy AT A STATE is given by:\n\\begin{equation} U^{\\pi}(s) = R(s, \\pi(s)) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s, \\pi(s)) U^{\\pi} (s\u0026rsquo;) \\end{equation}\nThe utility of a policy, in general, can be represented by:\n\\begin{equation} U(\\pi) = \\sum_{s}^{} b(s) U^{\\pi}(s) \\end{equation}\nwhere, \\(b(s)\\) is the \u0026ldquo;initial state distribution\u0026rdquo; of being in a particular state.\nOur state space may not be discrete or otherwise small enough to be added up for every case. We therefore can a sampling of Rollout trajectory to perform Approximate Policy Evaluation\nRollout utility Collecting a utility for all \\(s\\) is real hard. Therefore, instead, we perform a bunch of Rollouts and then calculate, for each trajectory \\(\\tau\\) you ended up with:\n\\begin{align} U(\\pi_{\\theta}) \u0026amp;= \\mathbb{E}[R(\\tau)] \\\\ \u0026amp;= \\int_{\\tau} p_{\\tau} (\\tau) R(\\tau) d\\tau \\end{align}\nwhere, \\(p(\\tau)\\) is the probability of that trajectory happening, and \\(R(\\tau)\\) is the discounted future reward of that trajectory. That is:\n\\begin{equation} R(\\tau) = \\sum_{k=1}^{d} r_{k}\\ \\gamma^{k-1} \\end{equation}\nmonte-carlo policy evaluation Sometimes, we can\u0026rsquo;t even get all trajectories to add them up, so we simply perform an average of \\(m\\) sample trajectories:\n\\begin{equation} U(\\pi_{\\theta}) = \\frac{1}{m}\\sum_{i=1}^{m} R(\\tau^{i}) \\end{equation}\nWe start each trajectory using a probability-weighted sample of initial states. This is the Roll-out utility\n","html":"\u003cp\u003eSee also \u003ca href=\"#rollout-utility\"\u003eRoll-out utility\u003c/a\u003e if you don\u0026rsquo;t want to get a \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e over all states.\u003c/p\u003e\n\u003ch2 id=\"solving-for-the-utility-of-a-policy\"\u003esolving for the utility of a policy\u003c/h2\u003e\n\u003cp\u003eWe can solve for the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of the \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e given the transitions \\(T\\) and reward \\(R\\) by solving the following equation\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{U}^{\\pi} = (I - \\gamma T^{\\pi})^{-1} \\bold{R}^{\\pi}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(T\\) is an \\(|S| \\times |S|\\) square matrix where each horizontal row is supposed to add up to \\(1\\) which encodes the probability of transitioning from each horizontal row to the column next rows.\u003c/p\u003e\n\u003ch3 id=\"lookahead-equation\"\u003elookahead equation\u003c/h3\u003e\n\u003cp\u003eWe begin our derivation from \u003ca href=\"/posts/kbhmarkov_decision_process/#finite-horizon-models\"\u003efinite-horizon models\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eGives some policy \\(\\pi\\), at the base case:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\pi}_{1} (s) = R(s, \\pi(s))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eat time \\(k+1\\) steps remaining:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\pi}_{k+1}(s) = R(s, \\pi(s)) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s, \\pi(s)) U^{\\pi}_{k} (s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe don\u0026rsquo;t know what the next state will be; so for each possible next state, we marginalize the result, multiplying the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of being in that state (gotten by \\(T(\u0026hellip;)\\)) times the utility of being in that state.\u003c/p\u003e\n\u003cp\u003eThis is called the \u003ca href=\"#lookahead-equation\"\u003elookahead equation\u003c/a\u003e, which represents how much \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e any future state can be be if we took action at point \\(k\\).\u003c/p\u003e\n\u003ch4 id=\"lookahead-with-sampling\"\u003elookahead with sampling\u003c/h4\u003e\n\u003cp\u003ewhat if we only want to get \\(m\\) of the next states, instead of all next states?\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-02_16-45-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"bellman-expectation-equation\"\u003eBellman Expectation Equation\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"#bellman-expectation-equation\"\u003eBellman Equation\u003c/a\u003e states that \u0026ldquo;the expected utility of being in a state is the instantaneous reward of being in that state plus the discounted future utility of all possible future state.\u0026rdquo; It is the fundamental result of RL.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(s) = \\arg\\max_{a} R(s, a) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s, a) U (s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf we are dealing with \u003ca href=\"/posts/kbhmarkov_decision_process/#infinite-horizon-models\"\u003einfinite-horizon models\u003c/a\u003e (at \u0026ldquo;convergence\u0026rdquo; of the \u003ca href=\"#lookahead-equation\"\u003elookahead equation\u003c/a\u003e), we just no longer have a time dependency from the \u003ca href=\"#lookahead-equation\"\u003elookahead equation\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003eWe only care about some Markovian state \\(s\\), and its next possible states \\(s\u0026rsquo;\\). When these pair happened doesn\u0026rsquo;t matter.\u003c/p\u003e\n\u003cp\u003eFor a stochastic policy, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(S) = \\sum_{a}^{} \\pi(a|s) \\qty[R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) U(s\u0026rsquo;)]\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eWe now can go about solving for what \\(U^{\\pi}\\) is:\u003c/p\u003e\n\u003cp\u003eProcedure:\u003c/p\u003e\n\u003cp\u003efrom the \u003ca href=\"#bellman-expectation-equation\"\u003eBellman Expectation Equation\u003c/a\u003e, we actually have a linear equation whereby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{U}^{\\pi} = \\bold{R}^{\\pi} + \\gamma T^{\\pi}\\bold{U}^{\\pi}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(T^{\\pi}\\) is an \\(n\\times n\\) matrix where \\(T^{\\pi}_{i,j}\\) represents the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of transitioning from the \\(i\\) th to the \\(j\\) the state; and where, \\(\\bold{U}^{\\pi}\\) and \\(\\bold{R}^{\\pi}\\) are \\(n\\) vectors which represents all possible states and all possible utilities. Note that everything is parametrized on \\(\\pi\\) (so \\(T\\) doesn\u0026rsquo;t need an action dimension because we will be using the \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e to calculate all the actoins)\u003c/p\u003e\n\u003cp\u003eWe can now solve for the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of the \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e. Now, algebra time on the previous equation to get us:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{U}^{\\pi} = (I - \\gamma T^{\\pi})^{-1} \\bold{R}^{\\pi}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe know that \\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e because its a transition matrix. And that, folks, is the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"approximate-policy-evaluation\"\u003eApproximate Policy Evaluation\u003c/h2\u003e\n\u003cp\u003eInstead of having a \u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e based on a vector out of the fitness of this \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e at all possible states, which really works if our state space is small, what if we made a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e evaluation scheme which estimates the expectation of the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of our policy based on the possibility of us landing in particular states?\u003c/p\u003e\n\u003ch3 id=\"background\"\u003eBackground\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e from following a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e AT A STATE is given by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\pi}(s) = R(s, \\pi(s)) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s, \\pi(s)) U^{\\pi} (s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of a policy, in general, can be represented by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(\\pi) = \\sum_{s}^{} b(s) U^{\\pi}(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(b(s)\\) is the \u0026ldquo;initial state distribution\u0026rdquo; of being in a particular state.\u003c/p\u003e\n\u003cp\u003eOur state space may not be discrete or otherwise small enough to be added up for every case. We therefore can a sampling of \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003eRollout\u003c/a\u003e \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003etrajectory\u003c/a\u003e to perform \u003ca href=\"#approximate-policy-evaluation\"\u003eApproximate Policy Evaluation\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"rollout-utility\"\u003eRollout utility\u003c/h3\u003e\n\u003cp\u003eCollecting a utility for all \\(s\\) is real hard. Therefore, instead, we perform a bunch of \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003eRollout\u003c/a\u003es and then calculate, for each trajectory \\(\\tau\\) you ended up with:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nU(\\pi_{\\theta}) \u0026amp;= \\mathbb{E}[R(\\tau)] \\\\\n\u0026amp;= \\int_{\\tau} p_{\\tau} (\\tau) R(\\tau) d\\tau\n\\end{align}\u003c/p\u003e\n\u003cp\u003ewhere, \\(p(\\tau)\\) is the probability of that trajectory happening, and \\(R(\\tau)\\) is the discounted future reward of that trajectory. That is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR(\\tau) = \\sum_{k=1}^{d} r_{k}\\ \\gamma^{k-1}\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"monte-carlo-policy-evaluation\"\u003emonte-carlo policy evaluation\u003c/h4\u003e\n\u003cp\u003eSometimes, we can\u0026rsquo;t even get all trajectories to add them up, so we simply perform an average of \\(m\\) sample trajectories:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(\\pi_{\\theta}) = \\frac{1}{m}\\sum_{i=1}^{m} R(\\tau^{i})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe start each trajectory using a probability-weighted sample of initial states. This is the \u003ca href=\"#rollout-utility\"\u003eRoll-out utility\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpolicy_evaluation/","tags":null,"title":"policy evaluation"},{"categories":null,"contents":"Two steps:\nobtaining a function for the gradient of policy against some parameters \\(\\theta\\) making them more based than they are right now by optimization Thoughout all of this, \\(U(\\theta)\\) is \\(U(\\pi_{\\theta})\\).\nObtaining a policy gradient Finite-Difference Gradient Estimation We want some expression for:\n\\begin{equation} \\nabla U(\\theta) = \\qty[\\pdv{U}{\\theta_{1}} (\\theta), \\dots, \\pdv{U}{\\theta_{n}}] \\end{equation}\nwe can estimate that with the finite-difference \u0026ldquo;epsilon trick\u0026rdquo;:\n\\begin{equation} \\nabla U(\\theta) = \\qty[ \\frac{U(\\theta + \\delta e^{1}) - U(\\theta)}{\\delta} , \\dots, \\frac{U(\\theta + \\delta e^{n}) - U(\\theta)}{\\delta} ] \\end{equation}\nwhere \\(e^{j}\\) is the standard basis vector at position \\(j\\). We essentially add a small \\(\\delta\\) to the \\(j\\) th slot of each parameter \\(\\theta_{j}\\), and divide to get an estimate of the gradient.\nLinear Regression Gradient Estimate We perform \\(m\\) random perturbations of \\(\\theta\\) parameters, and lay each resulting parameter vector flat onto a matrix:\n\\begin{equation} \\Delta \\theta = \\mqty[(\\Delta \\theta^{1})^{T} \\\\ \\dots \\\\ (\\Delta \\theta^{m})^{T}] \\end{equation}\nFor \\(\\theta\\) that contains \\(n\\) parameters, this is a matrix \\(m\\times n\\).\nWe can now write out the \\(\\Delta U\\) with:\n\\begin{equation} \\Delta U = \\qty[U(\\theta+ \\Delta \\theta^{1}) - U(\\theta), \\dots, U(\\theta+ \\Delta \\theta^{m}) - U(\\theta)] \\end{equation}\nWe have to compute Roll-out utility for each \\(U(\\theta + \u0026hellip;)\\)\nWe now want to fit a function between \\(\\Delta \\theta\\) to \\(\\Delta U\\), because from the definition of the gradient we have:\n\\begin{equation} \\Delta U = \\nabla_{\\theta} U(\\theta)\\ \\Delta \\theta \\end{equation}\n(that is \\(y = mx\\))\nRearranging the expression above\n\\begin{equation} \\nabla_{\\theta} U(\\theta) \\approx \\Delta \\theta^{\\dagger} \\Delta U \\end{equation}\nwhere \\(\\Delta \\theta^{\\dagger}\\) is the pseudoinverse of \\(\\Delta \\theta\\) matrix.\nTo end up at a gradient estimate.\nLikelyhood Ratio Gradient This is likely good, but requires a few things:\nan explicit transition model that you can compute over you being able to take the gradient of the policy this is what people usually refers to as \u0026ldquo;Policy Gradient\u0026rdquo;.\nRecall:\n\\begin{align} U(\\pi_{\\theta}) \u0026amp;= \\mathbb{E}[R(\\tau)] \\\\ \u0026amp;= \\int_{\\tau} p_{\\pi} (\\tau) R(\\tau) d\\tau \\end{align}\nNow consider:\n\\begin{align} \\nabla_{\\theta} U(\\theta) \u0026amp;= \\int_{\\tau} \\nabla_{\\theta} p_{\\pi}(\\tau) R(\\tau) d \\tau \\\\ \u0026amp;= \\int_{\\tau} \\frac{p_{\\pi} (\\tau)}{p_{\\pi} (\\tau)} \\nabla_{\\tau} p_{\\tau}(\\tau) R(\\tau) d \\tau \\end{align}\nAside 1:\nNow, consider the expression:\n\\begin{equation} \\nabla \\log p_{\\pi} (\\tau) = \\frac{\\nabla p_{\\pi}(\\tau)}{p_{\\pi} \\tau} \\end{equation}\nThis is just out of calculus. Consider the derivative chain rule; now, the derivative of \\(\\log (x) = \\frac{1}{x}\\) , and the derivative of the inside is \\(\\nabla x\\).\nRearranging that, we have:\n\\begin{equation} \\nabla p_{\\pi}(\\tau) = (\\nabla \\log p_{\\pi} (\\tau))(p_{\\pi} \\tau) \\end{equation}\nSubstituting that in, one of our \\(p_{\\pi}(\\tau)\\) cancels out, and, we have:\n\\begin{equation} \\int_{\\tau} p_{\\pi}(\\tau) \\nabla_{\\theta} \\log p_{\\pi}(\\tau) R(\\tau) \\dd{\\tau} \\end{equation}\nYou will note that this is the definition of the expectation of the right half (everything to the right of \\(\\nabla_{\\theta}\\)) vis a vi all \\(\\tau\\) (multiplying it by \\(p(\\tau)\\)). Therefore:\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} [\\nabla_{\\theta} \\log p_{\\pi}(\\tau) R(\\tau)] \\end{equation}\nAside 2:\nRecall that \\(\\tau\\) a trajectory is a pair of \\(s_1, a_1, \u0026hellip;, s_{n}, a_{d}\\).\nWe want to come up with some \\(p_{\\pi}(\\tau)\\), \u0026ldquo;what\u0026rsquo;s the probability of a trajectory happening given a policy\u0026rdquo;.\n\\begin{equation} p_{\\pi}(\\tau) = p(s^{1}) \\prod_{k=1}^{d} p(s^{k+1} | s^{k}, a^{k}) \\pi_{\\theta} (a^{k}|s^{k}) \\end{equation}\n(\u0026ldquo;probably of being at a state, times probability of the transition happening, times the probability of the action happening, so on, so on\u0026rdquo;)\nNow, taking the log of it causes the product to become a summation:\n\\begin{equation} \\log p_{\\pi}(\\tau) = p(s^{1}) + \\sum_{k=1}^{d} p(s^{k+1} | s^{k}, a^{k}) + \\pi_{\\theta} (a^{k}|s^{k}) \\end{equation}\nPlugging this into our expectation equation:\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\nabla_{\\theta} \\qty(p(s^{1}) + \\sum_{k=1}^{d} p(s^{k+1} | s^{k}, a^{k}) + \\pi_{\\theta} (a^{k}|s^{k})) R(\\tau)] \\end{equation}\nThis is an important result. You will note that \\(p(s^{1})\\) and \\(p(s^{k+1}|s^{k},a^{k})\\) doesn\u0026rsquo;t have a \\(\\theta\\) term in them!!!!. Therefore, taking term in them!!!!*. Therefore, taking the \\(\\nabla_{\\theta}\\) of them becomes\u0026hellip; ZERO!!! Therefore:\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\qty(0 + \\sum_{k=1}^{d} 0 + \\nabla_{\\theta} \\pi_{\\theta} (a^{k}|s^{k})) R(\\tau)] \\end{equation}\nSo based. We now have:\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) R(\\tau)] \\end{equation}\nwhere,\n\\begin{equation} R(\\tau) = \\sum_{k=1}^{d} r_{k}\\ \\gamma^{k-1} \\end{equation}\n\u0026ldquo;this is very nice\u0026rdquo; because we do not need to know anything regarding the transition model. This means we don\u0026rsquo;t actually need to know what \\(p(s^{k+1}|s^{k}a^{k})\\) because that term just dropped out of the gradient.\nWe can simulate a few trajectories; calculate the gradient, and average them to end up with our overall gradient.\nReward-to-Go Variance typically increases with Rollout depth. We don\u0026rsquo;t want that. We want to correct for the causality of action/reward. Action in the FUTURE do not influence reward in the PAST.\nRecall:\n\\begin{equation} R(\\tau) = \\sum_{k=1}^{d} r_{k}\\ \\gamma^{k-1} \\end{equation}\nLet us plug this into the policy gradient expression:\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\sum_{k=1}^{d} r_{k}\\ \\gamma^{k-1})] \\end{equation}\nLet us split this reward into two piece; one piece for the past (up to \\(k-1\\)), and one for the future:\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\sum_{l=1}^{k-1} r_{l}\\ \\gamma^{l-1} + \\sum_{l=k}^{d} r_{l}\\ \\gamma^{l-1})] \\end{equation}\nWe now want to ignore all the past rewards (i.e. the first half of the internal summation). Again, this is because action in the future shouldn\u0026rsquo;t care about what reward was gather in the past.\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\sum_{l=k}^{d} r_{l}\\ \\gamma^{l-1}] \\end{equation}\nWe now factor out \\(\\gamma^{k-1}\\) to make the expression look like:\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\gamma^{k-1} \\sum_{l=k}^{d} r_{l}\\ \\gamma^{l-k})] \\end{equation}\nWe call the right term Reward-to-Go:\n\\begin{equation} r_{togo}(k) = \\sum_{l=k}^{d} r_{l}\\ \\gamma^{l-k} \\end{equation}\nwhere \\(d\\) is the depth of your trajectory and \\(k\\) is your current state. Finally, then:\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\gamma^{k-1} r_{togo}(k))] \\end{equation}\nBaseline subtraction Sometimes, we want to subtract a baseline reward to show how much actually better an action is (instead of blindly summing all future rewards). This could be the average reward at all actions at that state, this could be any other thing of your choosing.\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\gamma^{k-1} (r_{togo}(k) - r_{baseline}(k)))] \\end{equation}\nFor instance, if you have a system where each action all gave \\(+1000\\) reward, taking any particular action isn\u0026rsquo;t actually very good. Hence:\nOptimizing the Policy Gradient We want to make \\(U(\\theta)\\) real big. We have two knobs: what is our objective function, and what is your restriction?\nPolicy Gradient Ascent good \u0026lsquo;ol fashioned\n\\begin{equation} \\theta \\leftarrow \\theta + \\alpha \\nabla U(\\theta) \\end{equation}\nwhere \\(\\alpha\\) is learning rate/step factor. This is not your STEP SIZE. If you want to specify a step size, see Restricted Step Method.\nRestricted Step Method Policy Gradient Ascent can take very large steps if the gradient is too large.\nOne by which we can optimize the gradient, ensuring that we don\u0026rsquo;t take steps larger than:\n\\begin{equation} \\frac{1}{2}(\\theta\u0026rsquo; - \\theta)^{T} I(\\theta\u0026rsquo; - \\theta) \\leq \\epsilon \\end{equation}\nis through Restricted Gradient:\n\\begin{equation} \\theta \\leftarrow \\theta + \\sqrt{2 \\epsilon} \\frac{\\nabla U(\\theta)}{|| \\nabla U(\\theta)||} \\end{equation}\nOccasionally, if a step-size is directly given to you in terms of euclidean distance, then you would replace the entirety of \\(\\sqrt{2 \\epsilon}\\) with your provided step size.\nTrust Region Policy Optimization Using a different way of restricting the update.\nProximal Policy Optimization Clipping the gradients.\n","html":"\u003cp\u003eTwo steps:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eobtaining a function for the gradient of policy against some parameters \\(\\theta\\)\u003c/li\u003e\n\u003cli\u003emaking them more based than they are right now by optimization\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThoughout all of this, \\(U(\\theta)\\) is \\(U(\\pi_{\\theta})\\).\u003c/p\u003e\n\u003ch2 id=\"obtaining-a-policy-gradient\"\u003eObtaining a policy gradient\u003c/h2\u003e\n\u003ch3 id=\"finite-difference-gradient-estimation\"\u003eFinite-Difference Gradient Estimation\u003c/h3\u003e\n\u003cp\u003eWe want some expression for:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla U(\\theta) = \\qty[\\pdv{U}{\\theta_{1}} (\\theta), \\dots, \\pdv{U}{\\theta_{n}}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can estimate that with the finite-difference \u0026ldquo;epsilon trick\u0026rdquo;:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla U(\\theta) = \\qty[ \\frac{U(\\theta + \\delta e^{1}) - U(\\theta)}{\\delta} , \\dots, \\frac{U(\\theta + \\delta e^{n}) - U(\\theta)}{\\delta} ]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(e^{j}\\) is the standard \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e at position \\(j\\). We essentially add a small \\(\\delta\\) to the \\(j\\) th slot of each parameter \\(\\theta_{j}\\), and divide to get an estimate of the gradient.\u003c/p\u003e\n\u003ch3 id=\"linear-regression-gradient-estimate\"\u003eLinear Regression Gradient Estimate\u003c/h3\u003e\n\u003cp\u003eWe perform \\(m\\) random perturbations of \\(\\theta\\) parameters, and lay each resulting \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003e vector flat onto a matrix:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Delta \\theta = \\mqty[(\\Delta \\theta^{1})^{T} \\\\ \\dots \\\\ (\\Delta \\theta^{m})^{T}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor \\(\\theta\\) that contains \\(n\\) parameters, this is a matrix \\(m\\times n\\).\u003c/p\u003e\n\u003cp\u003eWe can now write out the \\(\\Delta U\\) with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Delta U = \\qty[U(\\theta+ \\Delta \\theta^{1}) - U(\\theta), \\dots, U(\\theta+ \\Delta \\theta^{m}) - U(\\theta)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have to compute \u003ca href=\"/posts/kbhpolicy_evaluation/#roll-out-utility\"\u003eRoll-out utility\u003c/a\u003e for each \\(U(\\theta + \u0026hellip;)\\)\u003c/p\u003e\n\u003cp\u003eWe now want to fit a function between \\(\\Delta \\theta\\) to \\(\\Delta U\\), because from the definition of the gradient we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Delta U = \\nabla_{\\theta} U(\\theta)\\ \\Delta \\theta\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(that is \\(y = mx\\))\u003c/p\u003e\n\u003cp\u003eRearranging the expression above\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) \\approx \\Delta \\theta^{\\dagger} \\Delta U\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\Delta \\theta^{\\dagger}\\) is the \u003ca href=\"\"\u003epseudoinverse\u003c/a\u003e of \\(\\Delta \\theta\\) matrix.\u003c/p\u003e\n\u003cp\u003eTo end up at a gradient estimate.\u003c/p\u003e\n\u003ch3 id=\"likelyhood-ratio-gradient--kbhpolicy-gradient-dot-md\"\u003eLikelyhood Ratio \u003ca href=\"/posts/kbhpolicy_gradient/\"\u003eGradient\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eThis is likely good, but requires a few things:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ean explicit transition model that you can compute over\u003c/li\u003e\n\u003cli\u003eyou being able to take the gradient of the policy\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ethis is what people usually refers to as \u0026ldquo;\u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eRecall:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nU(\\pi_{\\theta}) \u0026amp;= \\mathbb{E}[R(\\tau)] \\\\\n\u0026amp;= \\int_{\\tau} p_{\\pi} (\\tau) R(\\tau) d\\tau\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow consider:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\nabla_{\\theta} U(\\theta) \u0026amp;= \\int_{\\tau} \\nabla_{\\theta} p_{\\pi}(\\tau) R(\\tau) d \\tau \\\\\n\u0026amp;= \\int_{\\tau} \\frac{p_{\\pi} (\\tau)}{p_{\\pi} (\\tau)} \\nabla_{\\tau} p_{\\tau}(\\tau) R(\\tau) d \\tau\n\\end{align}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside 1:\u003c/p\u003e\n\u003cp\u003eNow, consider the expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla \\log p_{\\pi} (\\tau) = \\frac{\\nabla p_{\\pi}(\\tau)}{p_{\\pi} \\tau}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is just out of calculus. Consider the derivative chain rule; now, the derivative of \\(\\log (x) = \\frac{1}{x}\\) , and the derivative of the inside is \\(\\nabla x\\).\u003c/p\u003e\n\u003cp\u003eRearranging that, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla p_{\\pi}(\\tau) = (\\nabla \\log p_{\\pi} (\\tau))(p_{\\pi} \\tau)\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eSubstituting that in, one of our \\(p_{\\pi}(\\tau)\\) cancels out, and, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{\\tau} p_{\\pi}(\\tau) \\nabla_{\\theta} \\log p_{\\pi}(\\tau) R(\\tau) \\dd{\\tau}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will note that this is the definition of the \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e of the right half (everything to the right of \\(\\nabla_{\\theta}\\)) vis a vi all \\(\\tau\\) (multiplying it by \\(p(\\tau)\\)). Therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} [\\nabla_{\\theta} \\log p_{\\pi}(\\tau) R(\\tau)]\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside 2:\u003c/p\u003e\n\u003cp\u003eRecall that \\(\\tau\\) a trajectory is a pair of \\(s_1, a_1, \u0026hellip;, s_{n}, a_{d}\\).\u003c/p\u003e\n\u003cp\u003eWe want to come up with some \\(p_{\\pi}(\\tau)\\), \u0026ldquo;what\u0026rsquo;s the probability of a trajectory happening given a policy\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np_{\\pi}(\\tau) = p(s^{1}) \\prod_{k=1}^{d} p(s^{k+1} | s^{k}, a^{k}) \\pi_{\\theta} (a^{k}|s^{k})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(\u0026ldquo;probably of being at a state, times probability of the transition happening, times the probability of the action happening, so on, so on\u0026rdquo;)\u003c/p\u003e\n\u003cp\u003eNow, taking the log of it causes the product to become a summation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\log p_{\\pi}(\\tau) = p(s^{1}) + \\sum_{k=1}^{d} p(s^{k+1} | s^{k}, a^{k}) + \\pi_{\\theta} (a^{k}|s^{k})\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003ePlugging this into our expectation equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\nabla_{\\theta} \\qty(p(s^{1}) + \\sum_{k=1}^{d} p(s^{k+1} | s^{k}, a^{k}) + \\pi_{\\theta} (a^{k}|s^{k})) R(\\tau)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is an important result. You will note that \\(p(s^{1})\\) and \\(p(s^{k+1}|s^{k},a^{k})\\) \u003cstrong\u003edoesn\u0026rsquo;t have a \\(\\theta\\) term in them!!!!\u003c/strong\u003e. Therefore, taking term in them!!!!*. Therefore, taking the \\(\\nabla_{\\theta}\\) of them becomes\u0026hellip; ZERO!!! Therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\qty(0 + \\sum_{k=1}^{d} 0 + \\nabla_{\\theta} \\pi_{\\theta} (a^{k}|s^{k})) R(\\tau)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo based. We now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) R(\\tau)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR(\\tau) = \\sum_{k=1}^{d} r_{k}\\ \\gamma^{k-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;this is very nice\u0026rdquo; because we do not need to know anything regarding the transition model. This means we don\u0026rsquo;t actually need to know what \\(p(s^{k+1}|s^{k}a^{k})\\) because that term just dropped out of the gradient.\u003c/p\u003e\n\u003cp\u003eWe can simulate a few trajectories; calculate the gradient, and average them to end up with our overall gradient.\u003c/p\u003e\n\u003ch3 id=\"reward-to-go\"\u003eReward-to-Go\u003c/h3\u003e\n\u003cp\u003eVariance typically increases with \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003eRollout\u003c/a\u003e depth. We don\u0026rsquo;t want that. We want to correct for the causality of action/reward. Action in the FUTURE do not influence reward in the PAST.\u003c/p\u003e\n\u003cp\u003eRecall:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR(\\tau) = \\sum_{k=1}^{d} r_{k}\\ \\gamma^{k-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet us plug this into the \u003ca href=\"#linear-regression-gradient-estimate\"\u003epolicy gradient\u003c/a\u003e expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\sum_{k=1}^{d} r_{k}\\ \\gamma^{k-1})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet us split this reward into two piece; one piece for the past (up to \\(k-1\\)), and one for the future:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\sum_{l=1}^{k-1} r_{l}\\ \\gamma^{l-1} + \\sum_{l=k}^{d} r_{l}\\ \\gamma^{l-1})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe now want to ignore all the past rewards (i.e. the first half of the internal summation). Again, this is because action in the future shouldn\u0026rsquo;t care about what reward was gather in the past.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\sum_{l=k}^{d} r_{l}\\ \\gamma^{l-1}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe now factor out \\(\\gamma^{k-1}\\) to make the expression look like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\gamma^{k-1} \\sum_{l=k}^{d} r_{l}\\ \\gamma^{l-k})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe call the right term \u003ca href=\"#reward-to-go\"\u003eReward-to-Go\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr_{togo}(k) = \\sum_{l=k}^{d} r_{l}\\ \\gamma^{l-k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(d\\) is the depth of your trajectory and \\(k\\) is your current state. Finally, then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\gamma^{k-1} r_{togo}(k))]\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"baseline-subtraction\"\u003eBaseline subtraction\u003c/h3\u003e\n\u003cp\u003eSometimes, we want to subtract a baseline reward to show how much actually better an action is (instead of blindly summing all future rewards). This could be the average reward at all actions at that state, this could be any other thing of your choosing.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\gamma^{k-1} (r_{togo}(k) - r_{baseline}(k)))]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor instance, if you have a system where each action all gave \\(+1000\\) reward, taking any particular action isn\u0026rsquo;t actually very good. Hence:\u003c/p\u003e\n\u003ch2 id=\"optimizing-the-policy-gradient--kbhpolicy-gradient-dot-md\"\u003eOptimizing the \u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eWe want to make \\(U(\\theta)\\) real big. We have two knobs: what is our objective function, and what is your restriction?\u003c/p\u003e\n\u003ch3 id=\"policy-gradient-ascent\"\u003ePolicy Gradient Ascent\u003c/h3\u003e\n\u003cp\u003egood \u0026lsquo;ol fashioned\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta \\leftarrow \\theta + \\alpha \\nabla U(\\theta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\alpha\\) is learning rate/step \u003cstrong\u003efactor\u003c/strong\u003e. This is not your STEP SIZE. If you want to specify a step size, see \u003ca href=\"#restricted-step-method\"\u003eRestricted Step Method\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"restricted-step-method\"\u003eRestricted Step Method\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#policy-gradient-ascent\"\u003ePolicy Gradient Ascent\u003c/a\u003e can take very large steps if the gradient is too large.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-02_16-23-21_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eOne by which we can optimize the gradient, ensuring that we don\u0026rsquo;t take steps larger than:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{1}{2}(\\theta\u0026rsquo; - \\theta)^{T} I(\\theta\u0026rsquo; - \\theta) \\leq \\epsilon\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis through \u003ca href=\"#restricted-step-method\"\u003eRestricted Gradient\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta \\leftarrow \\theta + \\sqrt{2 \\epsilon} \\frac{\\nabla U(\\theta)}{|| \\nabla U(\\theta)||}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOccasionally, if a step-size is directly given to you in terms of euclidean distance, then you would replace the entirety of \\(\\sqrt{2 \\epsilon}\\) with your provided step size.\u003c/p\u003e\n\u003ch3 id=\"trust-region-policy-optimization\"\u003eTrust Region Policy Optimization\u003c/h3\u003e\n\u003cp\u003eUsing a different way of restricting the update.\u003c/p\u003e\n\u003ch3 id=\"proximal-policy-optimization\"\u003eProximal Policy Optimization\u003c/h3\u003e\n\u003cp\u003eClipping the gradients.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpolicy_gradient/","tags":null,"title":"Policy Gradient"},{"categories":null,"contents":"policy iteration will allow us to get an optimal policy.\nstart with some initial policy \\(\\pi\\) (this scheme converges to an optimal policy regardless of where you start) solve for \\(U^{\\pi}\\) create a new policy \\(\\pi\u0026rsquo;\\) by creating a value-function policy on \\(U^{\\pi}\\) repeat 2-3 Since there are a finite policies, this will eventually converge.\nAt each point, the utility of the policy increases.\nAt each step, the utility of the resulting policy will necessarily be larger or equal to than the previous one as we are greedily choosing \u0026ldquo;better\u0026rdquo; (or equivalent) actions as measured by the utility of the previous policy.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e will allow us to get an \u003ca href=\"/posts/kbhpolicy/#optimal-policy\"\u003eoptimal policy\u003c/a\u003e.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003estart with some initial \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e \\(\\pi\\) (this scheme converges to an \u003ca href=\"/posts/kbhpolicy/#optimal-policy\"\u003eoptimal policy\u003c/a\u003e regardless of where you start)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/#solving-for-the-utility-of-a-policy\"\u003esolve for \\(U^{\\pi}\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ecreate a new policy \\(\\pi\u0026rsquo;\\) by creating a \u003ca href=\"/posts/kbhaction_value_function/#value-function-policy\"\u003evalue-function policy\u003c/a\u003e on \\(U^{\\pi}\\)\u003c/li\u003e\n\u003cli\u003erepeat 2-3\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSince there are a finite policies, this will eventually converge.\u003c/p\u003e\n\u003cp\u003eAt each point, the utility of the policy increases.\u003c/p\u003e\n\u003cp\u003eAt each step, the utility of the resulting policy will necessarily be larger or equal to than the previous one as we are greedily choosing \u0026ldquo;better\u0026rdquo; (or equivalent) actions as measured by the utility of the previous policy.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpolicy_iteration/","tags":null,"title":"policy iteration"},{"categories":null,"contents":"Policy Optimization deals with algorithms that, unlike value iteration/policy iteration/online planning which uses a surrogate (like value function or some future discounted reward) to calculate a policy, directly optimizes against policy parameters \\(\\theta\\) for a policy \\(\\pi_{\\theta}\\).\nLocal Policy Search (aka Hooke-Jeeves Policy Search) Genetic Policy Search Cross Entropy Method Policy Gradient, Regression Gradient and Likelyhood Ratio Gradient Reward-to-Go ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpolicy_optimization/\"\u003ePolicy Optimization\u003c/a\u003e deals with algorithms that, unlike \u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e/\u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e/\u003ca href=\"/posts/kbhonline_planning/\"\u003eonline planning\u003c/a\u003e which uses a surrogate (like \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e or some future discounted reward) to calculate a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e, directly optimizes against policy parameters \\(\\theta\\) for a policy \\(\\pi_{\\theta}\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlocal_policy_search/\"\u003eLocal Policy Search\u003c/a\u003e (aka \u003ca href=\"/posts/kbhlocal_policy_search/\"\u003eHooke-Jeeves Policy Search\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgenetic_policy_search/\"\u003eGenetic Policy Search\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcross_entropy_method/\"\u003eCross Entropy Method\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e, \u003ca href=\"/posts/kbhpolicy_gradient/#linear-regression-gradient-estimate\"\u003eRegression Gradient\u003c/a\u003e and \u003ca href=\"/posts/kbhpolicy_gradient/#likelyhood-ratio-id-2765155a-ba00-4014-b2a7-cf2f4f184178-gradient\"\u003eLikelyhood Ratio Gradient\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_gradient/#reward-to-go\"\u003eReward-to-Go\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpolicy_optimization/","tags":null,"title":"Policy Optimization"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpolio/","tags":null,"title":"Polio"},{"categories":null,"contents":"A polynomial is a polynomial\nconstituents a function \\(p: \\mathbb{F} \\to \\mathbb{F}\\) coefficient \\(a_0, \\dots, a_{m} \\in \\mathbb{F}\\) requirements A polynomial is defined by:\n\\begin{equation} p(z)=a_0+a_1z+a_2z^{2}+\\dots +a_{m}z^{m} \\end{equation}\nfor all \\(z \\in \\mathbb{F}\\)\nadditional information degree of a polynomial \\(\\deg p\\) A polynomial\u0026rsquo;s degree is the value of the highest non-zero exponent. That is, for a polynomial:\n\\begin{equation} p(z) = a_0+a_1z+\\dots +a_{m}z^{m} \\end{equation}\nwith \\(a_{m} \\neq 0\\), the degree of it is \\(m\\). We write \\(\\deg p = m\\).\nA polynomial \\(=0\\) is defined to have degree \\(-\\infty\\)\nOf course, a polynomial with degree \\(n\\), times a polynomial of degree \\(m\\), has degree \\(mn\\). We see that:\n\\begin{equation} x^{n}x^{m} = x^{n+m} \\end{equation}\n\\(\\mathcal{P}(\\mathbb{F})\\) \\(\\mathcal{P}(\\mathbb{F})\\) is the set of all polynomials with coefficients in \\(\\mathbb{F}\\).\n\\(\\mathcal{P}(\\mathbb{F})\\) is a vector space over \\(\\mathbb{F}\\) We first see that polynomials are functions from \\(\\mathbb{F}\\to \\mathbb{F}\\). We have shown previously that F^s is a Vector Space Over F.\nTherefore, we can first say that \\(\\mathcal{P}(\\mathbb{F}) \\subset \\mathbb{F}^{\\mathbb{F}}\\).\nLastly, we simply have to show that \\(\\mathcal{P}(\\mathbb{F})\\) is a subspace.\nzero exists by taking all \\(a_{m} = 0\\) addition is closed by inheriting commutativity and distributivity in \\(\\mathbb{F}\\) scalar multiplication is closed by distributivity Having satisfied the conditions of subspace, \\(\\mathcal{P}(\\mathbb{F})\\) is a vector space. \\(\\blacksquare\\)\n\\(\\mathcal{P}_{m}(\\mathbb{F})\\) For \\(m\\geq 0\\), \\(\\mathcal{P}_{m}(\\mathbb{F})\\) denotes the set of all polynomials with coefficients \\(\\mathbb{F}\\) and degree at most \\(m\\).\nproduct of polynomials see product of polynomials\npolynomial of operator see polynomial of operator\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e is a \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ea function \\(p: \\mathbb{F} \\to \\mathbb{F}\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolynomial/\"\u003ecoefficient\u003c/a\u003e \\(a_0, \\dots, a_{m} \\in \\mathbb{F}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eA polynomial is defined by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(z)=a_0+a_1z+a_2z^{2}+\\dots +a_{m}z^{m}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor all \\(z \\in \\mathbb{F}\\)\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"degree-of-a-polynomial-deg-p\"\u003edegree of a polynomial \\(\\deg p\\)\u003c/h3\u003e\n\u003cp\u003eA \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e\u0026rsquo;s \u003ca href=\"#degree-of-a-polynomial-deg-p\"\u003edegree\u003c/a\u003e is the value of the highest non-zero exponent. That is, for a polynomial:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(z) = a_0+a_1z+\\dots +a_{m}z^{m}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewith \\(a_{m} \\neq 0\\), the \u003ca href=\"#degree-of-a-polynomial-deg-p\"\u003edegree\u003c/a\u003e of it is \\(m\\). We write \\(\\deg p = m\\).\u003c/p\u003e\n\u003cp\u003eA polynomial \\(=0\\) is defined to have degree \\(-\\infty\\)\u003c/p\u003e\n\u003cp\u003eOf course, a polynomial with \u003ca href=\"#degree-of-a-polynomial-deg-p\"\u003edegree\u003c/a\u003e \\(n\\), times a \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e of degree \\(m\\), has degree \\(mn\\). We see that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx^{n}x^{m} = x^{n+m}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"mathcal-p--mathbb-f\"\u003e\\(\\mathcal{P}(\\mathbb{F})\\)\u003c/h3\u003e\n\u003cp\u003e\\(\\mathcal{P}(\\mathbb{F})\\) is the set of all \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003es with \u003ca href=\"/posts/kbhpolynomial/\"\u003ecoefficient\u003c/a\u003es in \\(\\mathbb{F}\\).\u003c/p\u003e\n\u003ch3 id=\"mathcal-p--mathbb-f--is-a-vector-space-over-mathbb-f\"\u003e\\(\\mathcal{P}(\\mathbb{F})\\) is a vector space over \\(\\mathbb{F}\\)\u003c/h3\u003e\n\u003cp\u003eWe first see that \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003es are functions from \\(\\mathbb{F}\\to \\mathbb{F}\\). We have shown previously that \u003ca href=\"/posts/kbhfs_is_a_vector_space/\"\u003eF^s is a Vector Space Over F\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eTherefore, we can first say that \\(\\mathcal{P}(\\mathbb{F}) \\subset \\mathbb{F}^{\\mathbb{F}}\\).\u003c/p\u003e\n\u003cp\u003eLastly, we simply have to show that \\(\\mathcal{P}(\\mathbb{F})\\) is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e exists by taking all \\(a_{m} = 0\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e is \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e by inheriting \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e and \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e in \\(\\mathbb{F}\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e is \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e by \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eHaving satisfied the conditions of \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e, \\(\\mathcal{P}(\\mathbb{F})\\) is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"mathcal-p-m--mathbb-f\"\u003e\\(\\mathcal{P}_{m}(\\mathbb{F})\\)\u003c/h3\u003e\n\u003cp\u003eFor \\(m\\geq 0\\), \\(\\mathcal{P}_{m}(\\mathbb{F})\\) denotes the set of all \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003es with \u003ca href=\"/posts/kbhpolynomial/\"\u003ecoefficient\u003c/a\u003es \\(\\mathbb{F}\\) and degree at most \\(m\\).\u003c/p\u003e\n\u003ch3 id=\"product-of-polynomials--kbhproduct-of-polynomial-dot-md\"\u003e\u003ca href=\"\"\u003eproduct of polynomials\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"\"\u003eproduct of polynomials\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"polynomial-of-operator--kbhpolynomial-operator-dot-md\"\u003e\u003ca href=\"/posts/kbhpolynomial_operator/\"\u003epolynomial of operator\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhpolynomial_operator/\"\u003epolynomial of operator\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpolynomial/","tags":null,"title":"polynomial"},{"categories":null,"contents":"Previous monte-carlo tree search methods which are not competitive to PBVI, SARSOP, etc., but those are affected by close-up history.\nkey point: monte-cargo roll outs best-first tree search + unweighted particle filter (instead of categorical beliefs)\nBackground History: a trajectory of some \\(h = \\{a_1, o_1, \u0026hellip;\\}\\) generative model: we perform a random sample of possible next state (weighted by the action you took, meaning an instantiation of \\(s\u0026rsquo; \\sim T(\\cdot | s,a)\\)) and reward \\(R(s,a)\\) from current state Rollout: keep sampling at each point, rolling out and calculating future reward monte-carlo tree search loop: sample \\(s\\) from the belief distribution \\(B(h)\\) for each node and call that the node state loop until we reach a leaf: sample exploratino using UCB 1 via the belief get observation, reward, next state add leaf node, add node for each available action Rollout backpropegate the obtained value with discounts backwards via POMDP Bellman Backup During runtime, we choose the action with the best action, prune the tree given what you observed, and do this again in a different.\n","html":"\u003cp\u003ePrevious \u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003emonte-carlo tree search\u003c/a\u003e methods which are not competitive to \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e, \u003ca href=\"/posts/kbhsarsop/\"\u003eSARSOP\u003c/a\u003e, etc., but those are affected by close-up history.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003ekey point\u003c/strong\u003e: monte-cargo roll outs best-first tree search + unweighted particle filter (instead of categorical beliefs)\u003c/p\u003e\n\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eHistory: a trajectory of some \\(h = \\{a_1, o_1, \u0026hellip;\\}\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhonline_planning/#generative-model\"\u003egenerative model\u003c/a\u003e: we perform a random sample of possible next state (weighted by the action you took, meaning an instantiation of \\(s\u0026rsquo; \\sim T(\\cdot | s,a)\\)) and reward \\(R(s,a)\\) from current state\u003c/li\u003e\n\u003cli\u003eRollout: keep sampling at each point, rolling out and calculating future reward\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"monte-carlo-tree-search--kbhmonte-carlo-tree-search-dot-md\"\u003e\u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003emonte-carlo tree search\u003c/a\u003e\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eloop:\n\u003col\u003e\n\u003cli\u003esample \\(s\\) from the belief distribution \\(B(h)\\) for each node and call that the node state\u003c/li\u003e\n\u003cli\u003eloop until we reach a leaf:\n\u003col\u003e\n\u003cli\u003esample exploratino using \u003ca href=\"/posts/kbhdirected_exploration/#ucb-1\"\u003eUCB 1\u003c/a\u003e via the belief\u003c/li\u003e\n\u003cli\u003eget observation, reward, next state\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003eadd leaf node, add node for each available action\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003eRollout\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ebackpropegate the obtained value with discounts backwards via \u003ca href=\"/posts/kbhvalue_iteration/#pomdp-bellman-update\"\u003ePOMDP Bellman Backup\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eDuring runtime, we choose the action with the best action, prune the tree given what you observed, and do this again in a different.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpomcp/","tags":null,"title":"POMCP"},{"categories":null,"contents":"POMDPs with continuous actions are hard. So POMCP or (belief update + MCTS).\nSo instead, let\u0026rsquo;s try improving that. Unlike just POMCP, not only do we have \\(B(h)\\), we also have \\(W(h)\\), which is the weight of a specific state sampled. Naively applying POMCP on continuous states will give a wide-ass tree because each sampled state will not be the same as before.\ndouble progressive widening We want to use sampling to sample from observation. This will eventually lead to a suboptimal QMDP policy\u0026mdash;this is because there are no state uncertainty?\nPOMCPOW get an action from ActionProgressiveWiden function Get an observation, if the observation we got has to many children we prune discard the observation and stick the next state onto previous observation weighted by the observation likelihood system \\(Z(o|s,a,s\u0026rsquo;)\\) \\(k, \\alpha, C\\)\nPFTDTW MCTS Particle filters Double Progressive Widening ","html":"\u003cp\u003ePOMDPs with continuous actions are hard. So \u003ca href=\"/posts/kbhpomcp/\"\u003ePOMCP\u003c/a\u003e or (belief update + \u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003eMCTS\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eSo instead, let\u0026rsquo;s try improving that. Unlike just \u003ca href=\"/posts/kbhpomcp/\"\u003ePOMCP\u003c/a\u003e, not only do we have \\(B(h)\\), we also have \\(W(h)\\), which is the weight of a specific state sampled. Naively applying \u003ca href=\"/posts/kbhpomcp/\"\u003ePOMCP\u003c/a\u003e on continuous states will give a wide-ass tree because each sampled state will not be the same as before.\u003c/p\u003e\n\u003ch2 id=\"double-progressive-widening\"\u003edouble progressive widening\u003c/h2\u003e\n\u003cp\u003eWe want to use sampling to sample from observation. This will eventually lead to a suboptimal \u003ca href=\"/posts/kbhqmdp/\"\u003eQMDP\u003c/a\u003e policy\u0026mdash;this is because there are no state uncertainty?\u003c/p\u003e\n\u003ch2 id=\"pomcpow--kbhpomcpow-dot-md\"\u003e\u003ca href=\"/posts/kbhpomcpow/\"\u003ePOMCPOW\u003c/a\u003e\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eget an action from ActionProgressiveWiden function\u003c/li\u003e\n\u003cli\u003eGet an observation, if the observation we got has to many children we prune\u003c/li\u003e\n\u003cli\u003ediscard the observation and stick the next state onto previous observation weighted by the observation likelihood system \\(Z(o|s,a,s\u0026rsquo;)\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\\(k, \\alpha, C\\)\u003c/p\u003e\n\u003ch2 id=\"pftdtw\"\u003ePFTDTW\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eMCTS\u003c/li\u003e\n\u003cli\u003eParticle filters\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdouble_progressive_widening/\"\u003eDouble Progressive Widening\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpomcpow/","tags":null,"title":"POMCPOW"},{"categories":null,"contents":"Upper bounds of alpha vectors QMDP and FIB represents an upper bound of the true optimal alpha vector values.\nQMDP Fast Informed Bound FIB is a generally lower bound than QMDP.\nLower bounds of alpha vectors BAWS and blind lower bound represents\nFaster:\nbest-action worst-state blind lower bound Slower:\nPoint-Based Value Iteration \u0026ldquo;Perseus\u0026rdquo;: Randomized PBVI HSVI SARSOP point selection see point selection\n","html":"\u003ch2 id=\"upper-bounds-of-alpha-vector--kbhalpha-vector-dot-md--s\"\u003eUpper bounds of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhqmdp/\"\u003eQMDP\u003c/a\u003e and \u003ca href=\"/posts/kbhfast_informed_bound/\"\u003eFIB\u003c/a\u003e represents an \u003cstrong\u003eupper bound\u003c/strong\u003e of the true optimal \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e values.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhqmdp/\"\u003eQMDP\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfast_informed_bound/\"\u003eFast Informed Bound\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhfast_informed_bound/\"\u003eFIB\u003c/a\u003e is a generally lower bound than \u003ca href=\"/posts/kbhqmdp/\"\u003eQMDP\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"lower-bounds-of-alpha-vector--kbhalpha-vector-dot-md--s\"\u003eLower bounds of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhworst_possible_state/\"\u003eBAWS\u003c/a\u003e and \u003ca href=\"/posts/kbhblind_lower_bound/\"\u003eblind lower bound\u003c/a\u003e represents\u003c/p\u003e\n\u003cp\u003eFaster:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhworst_possible_state/\"\u003ebest-action worst-state\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhblind_lower_bound/\"\u003eblind lower bound\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSlower:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePoint-Based Value Iteration\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Perseus\u0026rdquo;: \u003ca href=\"/posts/kbhpoint_based_value_iteration/#randomized-pbvi--kbhpoint-based-value-iteration-dot-md\"\u003eRandomized PBVI\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsarsop/\"\u003eSARSOP\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"point-selection\"\u003epoint selection\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhpoint_selection/\"\u003epoint selection\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpomdp_approximation/","tags":null,"title":"POMDP Approximation"},{"categories":null,"contents":"What if our initial state never change or is deterministically changing? For instance, say, for localization. This should make solving a POMDP easier.\nPOMDP-lite \\(X\\) fully observable states \\(\\theta\\) hidden parameter: finite amount of values \\(\\theta_{1 \\dots N}\\) where \\(S = X \\times \\theta\\) we then assume conditional independence between \\(x\\) and \\(\\theta\\). So: \\(T = P(x\u0026rsquo;|\\theta, x, a)\\), where \\(P(\\theta\u0026rsquo;|\\theta,x,a) = 1\\) (\u0026ldquo;our hidden parameter is known or deterministically changing\u0026rdquo;)\nSolving Main Idea: if that\u0026rsquo;s the case, then we can split our models into a set of MDPs. Because \\(\\theta_{j}\\) change deterministically, we can have a MDP solved ONLINE over \\(X\\) and \\(T\\) for each possible initial \\(\\theta\\). Then, you just take the believe over \\(\\theta\\) and sample over the MDPs based on that belief.\nReward bonus To help coordination, we introduce a reward bonus\nexploration reward bonus, which encourages exploration (this helps coordinate) maintain a value \\(\\xi(b,x,a)\\) which is the number of times b,x,a is visited\u0026mdash;if it exceeds a number of times, clip reward bonus Whereby:\n\\begin{equation} RB(b,s,a) = \\beta \\sum_{s\u0026rsquo;}^{} P(s\u0026rsquo;|b,s,a) || b_{s} - b ||_{1} \\end{equation}\nwhich encourages information gain by encouraging exploring states with more \\(L_{1}\\) divergence in belief compared to our current belief.\nThen, we can formulate an augmented reward function \\(\\tilde{R}(b,s,a) = R(s,a) + RB(b,s,a)\\).\nSolution Finally, at each timestamp, we look at our observation and assume it does not change. This gives an MDP:\n\\begin{equation} \\tilde{V}^{*} (b,s) = \\max_{a} \\left\\{ \\tilde{R}(b,s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} P(s\u0026rsquo;|b,s,a) \\tilde{V}^{*} (b,s\u0026rsquo;)\\right\\} \\end{equation}\nwhich we solve however we\u0026rsquo;d like. Authors used UCT.\nUCT ","html":"\u003cp\u003eWhat if our initial state never change or is deterministically changing? For instance, say, for localization. This should make solving a POMDP easier.\u003c/p\u003e\n\u003ch2 id=\"pomdp-lite\"\u003ePOMDP-lite\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X\\) fully observable states\u003c/li\u003e\n\u003cli\u003e\\(\\theta\\) hidden parameter: finite amount of values \\(\\theta_{1 \\dots N}\\)\u003c/li\u003e\n\u003cli\u003ewhere \\(S = X \\times \\theta\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewe then assume conditional independence between \\(x\\) and \\(\\theta\\). So: \\(T = P(x\u0026rsquo;|\\theta, x, a)\\), where \\(P(\\theta\u0026rsquo;|\\theta,x,a) = 1\\) (\u0026ldquo;our hidden parameter is known or deterministically changing\u0026rdquo;)\u003c/p\u003e\n\u003ch2 id=\"solving\"\u003eSolving\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMain Idea\u003c/strong\u003e\u003c/strong\u003e: if that\u0026rsquo;s the case, then we can split our models into a set of \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003es. Because \\(\\theta_{j}\\) change deterministically, we can have a MDP solved \u003cstrong\u003eONLINE\u003c/strong\u003e over \\(X\\) and \\(T\\) for each possible initial \\(\\theta\\). Then, you just take the believe over \\(\\theta\\) and sample over the MDPs based on that belief.\u003c/p\u003e\n\u003ch3 id=\"reward-bonus\"\u003eReward bonus\u003c/h3\u003e\n\u003cp\u003eTo help coordination, we introduce a reward bonus\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eexploration reward bonus, which encourages exploration (this helps coordinate)\u003c/li\u003e\n\u003cli\u003emaintain a value \\(\\xi(b,x,a)\\) which is the number of times b,x,a is visited\u0026mdash;if it exceeds a number of times, clip reward bonus\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWhereby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nRB(b,s,a) = \\beta \\sum_{s\u0026rsquo;}^{} P(s\u0026rsquo;|b,s,a) || b_{s} - b ||_{1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich encourages information gain by encouraging exploring states with more \\(L_{1}\\) divergence in belief compared to our current belief.\u003c/p\u003e\n\u003cp\u003eThen, we can formulate an augmented reward function \\(\\tilde{R}(b,s,a) = R(s,a) + RB(b,s,a)\\).\u003c/p\u003e\n\u003ch3 id=\"solution\"\u003eSolution\u003c/h3\u003e\n\u003cp\u003eFinally, at each timestamp, we look at our observation and assume it does not change. This gives an \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\tilde{V}^{*} (b,s) = \\max_{a} \\left\\{ \\tilde{R}(b,s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} P(s\u0026rsquo;|b,s,a) \\tilde{V}^{*} (b,s\u0026rsquo;)\\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich we solve however we\u0026rsquo;d like. Authors used \u003ca href=\"#uct\"\u003eUCT\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"uct\"\u003eUCT\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-06_09-54-45_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhpomdp_lite/","tags":null,"title":"POMDP-lite"},{"categories":null,"contents":"a class about POMDPs\nTheme Topics Robot dogs NeBula, AISR NeBula Applications POMDSoar, Offline Solvers PBVI, HSVI, Perseus Offline Solvers SARSOP, E-PCA, CALP Policy Graphs Hansen, MCVI, PGA Online Solvers AEMS, POMCP, DESPOT Moar Online Methods IS-DESPOT, POMCPOW, AdaOPS POMDPish MOMDP, POMDP-lite, rho-POMDPs Memoryless + Policy Search Sarsa (Lambda), JSJ, Pegasus Hierarchical Decomposition Option, MaxQ, LTRDP Hybrid Planning HybPlan, LetsDrive, BetaZero LQR + Shared Autonomy iLQR, Hindsight, TrustPOMDP Multi-Agent Factored MDPs, FV-POMCPs, G-DICE Other Content Research Tips STRIPS-style planning Temperal Abstraction Linear-Quadratic Regulator ","html":"\u003cp\u003ea class about \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eTheme\u003c/th\u003e\n\u003cth\u003eTopics\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eRobot dogs\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnebula/\"\u003eNeBula\u003c/a\u003e, \u003ca href=\"/posts/kbhnebula/#aisr-nebula--kbhnebula-dot-md\"\u003eAISR NeBula\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eApplications\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhkolobov_2018/#pomdsoar\"\u003ePOMDSoar\u003c/a\u003e,\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eOffline Solvers\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e, \u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e, \u003ca href=\"/posts/kbhperseus/\"\u003ePerseus\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eOffline Solvers\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhsarsop/\"\u003eSARSOP\u003c/a\u003e, \u003ca href=\"/posts/kbhe_pca/\"\u003eE-PCA\u003c/a\u003e, \u003ca href=\"/posts/kbhcalp/\"\u003eCALP\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ePolicy Graphs\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhhansen/\"\u003eHansen\u003c/a\u003e, \u003ca href=\"/posts/kbhmcvi/\"\u003eMCVI\u003c/a\u003e, \u003ca href=\"/posts/kbhpga/\"\u003ePGA\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eOnline Solvers\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhaems/\"\u003eAEMS\u003c/a\u003e, \u003ca href=\"/posts/kbhpomcp/\"\u003ePOMCP\u003c/a\u003e, \u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eMoar Online Methods\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhis_despot/\"\u003eIS-DESPOT\u003c/a\u003e, \u003ca href=\"/posts/kbhpomcpow/\"\u003ePOMCPOW\u003c/a\u003e, \u003ca href=\"/posts/kbhadaops/\"\u003eAdaOPS\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ePOMDPish\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmomdp/\"\u003eMOMDP\u003c/a\u003e, \u003ca href=\"/posts/kbhpomdp_lite/\"\u003ePOMDP-lite\u003c/a\u003e, \u003ca href=\"/posts/kbhrho_pomdps/\"\u003erho-POMDPs\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eMemoryless + Policy Search\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhsarsa_lambda/\"\u003eSarsa (Lambda)\u003c/a\u003e, \u003ca href=\"/posts/kbhjsj/\"\u003eJSJ\u003c/a\u003e, \u003ca href=\"/posts/kbhpegasus/\"\u003ePegasus\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eHierarchical Decomposition\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhoption/\"\u003eOption\u003c/a\u003e, \u003ca href=\"/posts/kbhmaxq/\"\u003eMaxQ\u003c/a\u003e, \u003ca href=\"/posts/kbhltrdp/\"\u003eLTRDP\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eHybrid Planning\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhhybplan/\"\u003eHybPlan\u003c/a\u003e, \u003ca href=\"/posts/kbhletsdrive/\"\u003eLetsDrive\u003c/a\u003e, \u003ca href=\"/posts/kbhbetazero/\"\u003eBetaZero\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eLQR + Shared Autonomy\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhilqr/\"\u003eiLQR\u003c/a\u003e, \u003ca href=\"/posts/kbhhindsight_optimization/\"\u003eHindsight\u003c/a\u003e, \u003ca href=\"/posts/kbhtrustpomdp/\"\u003eTrustPOMDP\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eMulti-Agent\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhfactored_mdps/\"\u003eFactored MDPs\u003c/a\u003e, \u003ca href=\"/posts/kbhfv_pomcps/\"\u003eFV-POMCPs\u003c/a\u003e, \u003ca href=\"/posts/kbhg_dice/\"\u003eG-DICE\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"other-content\"\u003eOther Content\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhresearch_tips/\"\u003eResearch Tips\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstrips_style_planning/\"\u003eSTRIPS-style planning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtemperal_abstraction/\"\u003eTemperal Abstraction\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_quadratic_regulator/\"\u003eLinear-Quadratic Regulator\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpomdps_index/","tags":null,"title":"POMDPs Index"},{"categories":null,"contents":" closed class words - words with fixed memberships (prepositions, conjunctivas, etc.); not being created or added much, used for grammatical function open class words - words that are set as content, and are focused on content ","html":"\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eclosed class\u003c/strong\u003e words - words with fixed memberships (prepositions, conjunctivas, etc.); not being created or added much, used for grammatical function\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eopen class\u003c/strong\u003e words - words that are set as content, and are focused on content\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpos_tagging/","tags":null,"title":"POS Tagging"},{"categories":null,"contents":"For some \\(a \\in \\mathbb{F}\\), we define \\(a^m\\) to be \\(a\\) multiplied with itself \\(m\\) times.\nadditional information \\((a^m)^n = a^{mn}\\) \\((ab)^m = a^mb^m\\) ","html":"\u003cp\u003eFor some \\(a \\in \\mathbb{F}\\), we define \\(a^m\\) to be \\(a\\) multiplied with itself \\(m\\) times.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\((a^m)^n = a^{mn}\\)\u003c/li\u003e\n\u003cli\u003e\\((ab)^m = a^mb^m\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpower_math/","tags":null,"title":"power (math)"},{"categories":null,"contents":"a power series centered at \\(a\\) is defined with \\(c_{n} \\in \\mathbb{R}\\), whereby:\n\\begin{equation} f(x) = \\sum_{n=0}^{\\infty} c_{n}(x-a)^{n} \\end{equation}\nmeaning it is written as \\(c_0 + c_1(x-a) + c_2(x-a)^{2} + c_3 (x-a)^{3} + \\cdots\\)\nradius of convergence there is a radius of convergence \\(R \\geq 0\\) for any power series, possibly infinite, by which the series is absolutely convergent where \\(|x-a| \u0026lt; R\\), and it does not converge when \\(|x-a| \u0026gt; R\\) , the case where \\(|x-a| = R\\) is uncertain ratio test: if all coefficients \\(c_{n}\\) are nonzero, and some \\(\\lim_{n \\to \\infty} \\left| \\frac{c_{n}}{c_{n+1}} \\right|\\) evaluates to some \\(c\\) \u0026mdash; if \\(c\\) is positive or \\(+\\infty\\), then that limit is equivalent to the radius of convergence Taylor\u0026rsquo;s Formula: a power series \\(f(x)\\) can be differentiated, integrated on the bounds of \\((a-R, a+R)\\), the derivatives and integrals will have radius of convergence \\(R\\) and \\(c_{n} = \\frac{f^{(n)}(a)}{n!}\\) to construct the series linear combinations of power series When \\(\\sum_{n=0}^{\\infty} a_{n}\\) and \\(\\sum_{n=0}^{\\infty} b_{n}\\) are both convergent, linear combinations of them can be described in the usual fashion:\n\\begin{equation} c_1 \\sum_{n=0}^{\\infty} a_{n}+ c_2 \\sum_{n=0}^{\\infty} b_{n} = \\sum_{n=0}^{\\infty} c_1 a_{n} + c_2 b_{n} \\end{equation}\nsome power series geometric series \\begin{equation} 1 + r + r^{2} + r^{3} + \\dots = \\sum_{n=0}^{\\infty} r^{n} = \\frac{1}{1-r} \\end{equation}\nwhich converges \\(-1 \u0026lt; r \u0026lt; 1\\), and diverges otherwise.\nexponential series \\begin{equation} 1 + x + \\frac{x^{2}}{2!} + \\frac{x^{3}}{3!} + \\dots = \\sum_{n=0}^{\\infty} \\frac{x^{n}}{n!} = e^{x} \\end{equation}\nwhich converges for all \\(x \\in \\mathbb{R}\\).\nabsolutely convergent If:\n\\begin{equation} \\sum_{n=0}^{\\infty} |a_{n}| \\end{equation}\nconverges, then:\n\\begin{equation} \\sum_{n=0}^{\\infty} a_{n} \\end{equation}\nalso converges.\nThis situation is called absolutely convergent.\n","html":"\u003cp\u003ea \u003ca href=\"/posts/kbhpower_series_o/\"\u003epower series\u003c/a\u003e centered at \\(a\\) is defined with \\(c_{n} \\in \\mathbb{R}\\), whereby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\sum_{n=0}^{\\infty} c_{n}(x-a)^{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning it is written as \\(c_0 + c_1(x-a) + c_2(x-a)^{2} + c_3 (x-a)^{3} + \\cdots\\)\u003c/p\u003e\n\u003ch2 id=\"radius-of-convergence\"\u003eradius of convergence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ethere is a \u003ca href=\"#radius-of-convergence\"\u003eradius of convergence\u003c/a\u003e \\(R \\geq 0\\) for any \u003ca href=\"/posts/kbhpower_series_o/\"\u003epower series\u003c/a\u003e, possibly infinite, by which the series is absolutely convergent where \\(|x-a| \u0026lt; R\\), and it does not converge when \\(|x-a| \u0026gt; R\\) , the case where \\(|x-a| = R\\) is uncertain\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#radius-of-convergence\"\u003eratio test\u003c/a\u003e: if all coefficients \\(c_{n}\\) are nonzero, and some \\(\\lim_{n \\to \\infty} \\left| \\frac{c_{n}}{c_{n+1}} \\right|\\) evaluates to some \\(c\\) \u0026mdash; if \\(c\\) is positive or \\(+\\infty\\), then that limit is equivalent to the radius of convergence\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#radius-of-convergence\"\u003eTaylor\u0026rsquo;s Formula\u003c/a\u003e: a power series \\(f(x)\\) can be differentiated, integrated on the bounds of \\((a-R, a+R)\\), the derivatives and integrals will have radius of convergence \\(R\\) and \\(c_{n} = \\frac{f^{(n)}(a)}{n!}\\) to construct the series\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"linear-combination--kbhlinear-combination-dot-md--s-of-power-series--kbhpower-series-o-dot-md\"\u003e\u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003es of \u003ca href=\"/posts/kbhpower_series_o/\"\u003epower series\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eWhen \\(\\sum_{n=0}^{\\infty} a_{n}\\) and \\(\\sum_{n=0}^{\\infty} b_{n}\\) are \u003cstrong\u003eboth convergent\u003c/strong\u003e, linear combinations of them can be described in the usual fashion:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_1 \\sum_{n=0}^{\\infty} a_{n}+ c_2 \\sum_{n=0}^{\\infty} b_{n} = \\sum_{n=0}^{\\infty} c_1 a_{n} + c_2 b_{n}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"some-power-series--kbhpower-series-o-dot-md\"\u003esome \u003ca href=\"/posts/kbhpower_series_o/\"\u003epower series\u003c/a\u003e\u003c/h2\u003e\n\u003ch3 id=\"geometric-series\"\u003egeometric series\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n1 + r + r^{2} + r^{3} + \\dots = \\sum_{n=0}^{\\infty} r^{n} = \\frac{1}{1-r}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich converges \\(-1 \u0026lt; r \u0026lt; 1\\), and diverges otherwise.\u003c/p\u003e\n\u003ch3 id=\"exponential-series\"\u003eexponential series\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n1 + x + \\frac{x^{2}}{2!} + \\frac{x^{3}}{3!} + \\dots = \\sum_{n=0}^{\\infty} \\frac{x^{n}}{n!} = e^{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich converges for all \\(x \\in \\mathbb{R}\\).\u003c/p\u003e\n\u003ch2 id=\"absolutely-convergent\"\u003eabsolutely convergent\u003c/h2\u003e\n\u003cp\u003eIf:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{n=0}^{\\infty} |a_{n}|\n\\end{equation}\u003c/p\u003e\n\u003cp\u003econverges, then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{n=0}^{\\infty} a_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ealso converges.\u003c/p\u003e\n\u003cp\u003eThis situation is called \u003ca href=\"#absolutely-convergent\"\u003eabsolutely convergent\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpower_series_o/","tags":null,"title":"power series"},{"categories":null,"contents":"We can now use power series to also solve differential equations.\n\\begin{equation} \\dv{x}{t} = 0; x(0)=1 \\end{equation}\nWe wish to have a power-series solution of shape:\n\\begin{equation} x(t) = \\sum_{k=0}^{\\infty }a_{k}t^{k} \\end{equation}\nWe want to find the coefficients \\(a_{k}\\). If you can find such a function that fits this form, they both 1) converge and 20 behave the same way as \\(e^{x}\\) does in Simple Differential Equations.\nanalytic functions Functions which can be described with a power series are called analytic functions.\n","html":"\u003cp\u003eWe can now use \u003ca href=\"/posts/kbhpower_series/\"\u003epower series\u003c/a\u003e to also solve \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003edifferential equation\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{x}{t} = 0; x(0)=1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe wish to have a power-series solution of shape:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = \\sum_{k=0}^{\\infty }a_{k}t^{k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe want to find the coefficients \\(a_{k}\\). If you can find such a function that fits this form, they both 1) converge and 20 behave the same way as \\(e^{x}\\) does in \u003ca href=\"/posts/kbhsimple_differential_equations/\"\u003eSimple Differential Equations\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"analytic-functions\"\u003eanalytic functions\u003c/h2\u003e\n\u003cp\u003eFunctions which can be described with a \u003ca href=\"/posts/kbhpower_series/\"\u003epower series\u003c/a\u003e are called \u003ca href=\"#analytic-functions\"\u003eanalytic functions\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpower_series/","tags":null,"title":"power series to solve differential equations"},{"categories":null,"contents":"power utility, or isoelastic utility, is a financial econometric is a utility that results absolute, constant relative risk aversion. i.e.: you tell me how risk averse you are exogenously, I tell you how much utility some consumption is.\nconstituents some relative risk coefficient \\(\\gamma \\in (0,1)\\), higher more risk averse consumption of some asset \\(C\\) requirements Utility \\(U( C)\\) is defined by:\n\\begin{equation} U( C) = \\frac{c^{1-\\gamma}-1}{1-\\gamma} \\end{equation}\nadditional information As you can see, the higher \\(\\gamma\\), the lower utility some consumption brings.\nlog utility log utility is a special case of power utility whereby:\n\\begin{equation} U(x) = \\log x \\end{equation}\nwhich converges to power utility where \\(\\lambda \\to 1\\).\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpower_utility/\"\u003epower utility\u003c/a\u003e, or \u003ca href=\"/posts/kbhpower_utility/\"\u003eisoelastic utility\u003c/a\u003e, is a \u003ca href=\"/posts/kbhfinancial_markets_intro/\"\u003efinancial econometric\u003c/a\u003e is a utility that results absolute, constant relative risk aversion. i.e.: you tell me how risk averse you are exogenously, I tell you how much utility some consumption is.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003esome relative risk coefficient \\(\\gamma \\in (0,1)\\), higher more risk averse\u003c/li\u003e\n\u003cli\u003econsumption of some asset \\(C\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eUtility \\(U( C)\\) is defined by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU( C) = \\frac{c^{1-\\gamma}-1}{1-\\gamma}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cp\u003eAs you can see, the higher \\(\\gamma\\), the lower utility some consumption brings.\u003c/p\u003e\n\u003ch3 id=\"log-utility\"\u003elog utility\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#log-utility\"\u003elog utility\u003c/a\u003e is a special case of \u003ca href=\"/posts/kbhpower_utility/\"\u003epower utility\u003c/a\u003e whereby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(x) = \\log x\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich converges to \u003ca href=\"/posts/kbhpower_utility/\"\u003epower utility\u003c/a\u003e where \\(\\lambda \\to 1\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpower_utility/","tags":null,"title":"power utility"},{"categories":null,"contents":"We use interrupts to implement preemption, \u0026ldquo;preempting\u0026rdquo; threads in order to swap on another thread to CPU. This enables scheduling to happen.\npreempting into a brand new thread IMPORTANT: because interrupts are disabled at the beginning of the interrupt handler, and re-enabled by the end, new threads (which starts not at the interrupt handle) will not re-enable interrupts.\nvoid interrupt_handler() { /* disables interupts, automatically by timer handler */ // future spawns start here context_switch(...); /* enables interupts, automatically by timer handler */ } void threadfunc_wrapper() { // manually enable interrupts before first run intr_enable(true); // start thread\u0026#39;s actual business threadfunc(); } ","html":"\u003cp\u003eWe use \u003ca href=\"/posts/kbhdispatching/#interrupt\"\u003einterrupt\u003c/a\u003es to implement \u003ca href=\"/posts/kbhpreemption/\"\u003epreemption\u003c/a\u003e, \u0026ldquo;\u003ca href=\"/posts/kbhpreemption/\"\u003epreempting\u003c/a\u003e\u0026rdquo; threads in order to swap on another thread to CPU. This enables \u003ca href=\"/posts/kbhscheduling/\"\u003escheduling\u003c/a\u003e to happen.\u003c/p\u003e\n\u003ch2 id=\"preempting-into-a-brand-new-thread\"\u003epreempting into a brand new thread\u003c/h2\u003e\n\u003cp\u003eIMPORTANT: because \u003ca href=\"/posts/kbhdispatching/#interrupt\"\u003einterrupt\u003c/a\u003es are disabled at the beginning of the interrupt handler, and re-enabled by the \u003cstrong\u003eend\u003c/strong\u003e, new threads (which starts not at the interrupt handle) will not re-enable interrupts.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003einterrupt_handler\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e/* disables interupts, automatically by timer handler */\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// future spawns start here\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econtext_switch\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(...);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e/* enables interupts, automatically by timer handler */\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ethreadfunc_wrapper\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// manually enable interrupts before first run\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eintr_enable\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// start thread\u0026#39;s actual business\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ethreadfunc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhpreemption/","tags":null,"title":"preemption"},{"categories":null,"contents":"Problems of pre-training data pre-training influence downstream capabilities \u0026hellip;and therefore can escape into model generation real world users expect novelty Changes in Distribution Big Pretraining Data GPT2 deduplicated data Removed Wikipedia (to prevent data leak) Heuristic based cleaning GPT3 Deduplicated based on leaked data Llama the usual spheal\nremoved high perplexity data using wiki n-gram model removed non-English deduplicated Llama 2 removed high volue of PII Removed non-english Pretraining Curation Decisions what to include what is the timestamp being scraped heuristic based cleaning? data cleaning? etc. language filtering (only take English?) PII removal dedup Toxicity + SafeURL filtering \u0026ldquo;quality filtering\u0026rdquo; sampling distributions Change in Model Age Good alignment shown between validation year and pre-training year, even mixing in older data.\nImplication: \u0026ldquo;fine-tuned T5 may still be worse than fine-tuned llama, because T5 was pretrained using older data\u0026mdash;despite even if FTing is newer\u0026rdquo;\nChange in Toxicity Filtering toxicity made the model worst at spotting toxicity.\nChange in Data Distribution out of domain answers do worse on out of domain results\nReduce Memorization de-duplication using approximate matching think carefully for multiple-epoch training (what is ok to memorize?) remove sensitive memorization from pre-training data Two iffy strategies:\nCheck for memorization Trivial style transfers can get around safety checks \u0026ldquo;do the [copyrighted thing] in French\u0026rdquo;; \u0026ldquo;do the [copyrighted thing] with double the spaces\u0026rdquo;.\nUse RLHF or something \u0026ldquo;hide flaws, and not eliminate them\u0026rdquo;\u0026mdash;edge case problems doesn\u0026rsquo;t eliminate the underlying vulnerability.\n","html":"\u003ch2 id=\"problems-of-pre-training-data\"\u003eProblems of pre-training data\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003epre-training influence downstream capabilities\u003c/li\u003e\n\u003cli\u003e\u0026hellip;and therefore can escape into model generation\u003c/li\u003e\n\u003cli\u003ereal world users expect novelty\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"changes-in-distribution\"\u003eChanges in Distribution\u003c/h2\u003e\n\u003ch3 id=\"big-pretraining-data\"\u003eBig Pretraining Data\u003c/h3\u003e\n\u003ch4 id=\"gpt2\"\u003eGPT2\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003ededuplicated data\u003c/li\u003e\n\u003cli\u003eRemoved Wikipedia (to prevent data leak)\u003c/li\u003e\n\u003cli\u003eHeuristic based cleaning\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"gpt3\"\u003eGPT3\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eDeduplicated\u003c/li\u003e\n\u003cli\u003ebased on leaked data\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"llama\"\u003eLlama\u003c/h4\u003e\n\u003cp\u003ethe usual spheal\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eremoved high perplexity data using wiki n-gram model\u003c/li\u003e\n\u003cli\u003eremoved non-English\u003c/li\u003e\n\u003cli\u003ededuplicated\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"llama-2\"\u003eLlama 2\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eremoved high volue of PII\u003c/li\u003e\n\u003cli\u003eRemoved non-english\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"pretraining-curation-decisions\"\u003ePretraining Curation Decisions\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ewhat to include\u003c/li\u003e\n\u003cli\u003ewhat is the timestamp being scraped\u003c/li\u003e\n\u003cli\u003eheuristic based cleaning? data cleaning? etc.\u003c/li\u003e\n\u003cli\u003elanguage filtering (only take English?)\u003c/li\u003e\n\u003cli\u003ePII removal\u003c/li\u003e\n\u003cli\u003ededup\u003c/li\u003e\n\u003cli\u003eToxicity + SafeURL filtering\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;quality filtering\u0026rdquo;\u003c/li\u003e\n\u003cli\u003esampling distributions\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"change-in-model-age\"\u003eChange in Model Age\u003c/h3\u003e\n\u003cp\u003eGood alignment shown between validation year and pre-training year, even mixing in older data.\u003c/p\u003e\n\u003cp\u003eImplication: \u0026ldquo;fine-tuned T5 may still be worse than fine-tuned llama, because T5 was \u003cstrong\u003epretrained\u003c/strong\u003e using older data\u0026mdash;despite even if FTing is newer\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"change-in-toxicity\"\u003eChange in Toxicity\u003c/h3\u003e\n\u003cp\u003eFiltering toxicity made the model worst at spotting toxicity.\u003c/p\u003e\n\u003ch3 id=\"change-in-data-distribution\"\u003eChange in Data Distribution\u003c/h3\u003e\n\u003cp\u003eout of domain answers do worse on out of domain results\u003c/p\u003e\n\u003ch2 id=\"reduce-memorization\"\u003eReduce Memorization\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ede-duplication using \u003cstrong\u003eapproximate matching\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003ethink carefully for multiple-epoch training (what is ok to memorize?)\u003c/li\u003e\n\u003cli\u003eremove sensitive memorization from pre-training data\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTwo iffy strategies:\u003c/p\u003e\n\u003ch3 id=\"check-for-memorization\"\u003eCheck for memorization\u003c/h3\u003e\n\u003cp\u003eTrivial style transfers can get around safety checks \u0026ldquo;do the [copyrighted thing] in French\u0026rdquo;; \u0026ldquo;do the [copyrighted thing] with double the spaces\u0026rdquo;.\u003c/p\u003e\n\u003ch3 id=\"use-rlhf-or-something\"\u003eUse RLHF or something\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;hide flaws, and not eliminate them\u0026rdquo;\u0026mdash;edge case problems doesn\u0026rsquo;t eliminate the underlying vulnerability.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpretraining_data/","tags":null,"title":"Pretraining Data"},{"categories":null,"contents":"Unfortunately, this note is not published online.\n","html":"\u003cp\u003eUnfortunately, this note is not published online.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpretraining_long_transformers/","tags":null,"title":"Pretraining Long Transformers"},{"categories":null,"contents":"The price\n","html":"\u003cp\u003eThe price\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprice/","tags":null,"title":"price"},{"categories":null,"contents":"An integer \\(p \u0026gt; 1\\) is prime if it has no positive divisors other than \\(1\\) and itself.\nNo even number, except \\(2\\), is prime. Because 2\nadditional information There are infinitely many primes Credit: Euler.\nProof:\nAssume to the contrary that there are finitely many primes. \\(p_1, \u0026hellip;, p_{n}\\). We desire to make a new prime to reach contradiction.\nConsider:\n\\begin{equation} N = p_1 \\times \\dots \\times p_{n} + 1 \\end{equation}\nNote that \\(p_1 \\times \u0026hellip; \\times p_{n}\\) is divisible by each of the \\(p_{j}\\). If some \\(p_i |N\\), \\(p_{i}|1\\), which is impossible as \\(1\\) is not divisible by anything. So, no \\(p_{i}\\) divides \\(N\\).\nIf \\(N\\) is now prime, we are done as it is not in the list of \\(p_{j}\\). If not, pick any prime divisor \\(p\\) of \\(N\\). We will note that given no \\(p_{j}\\) divides \\(N\\), therefore any prime divisor is a new prime.\nHaving made a new prime, we reach contradiction. \\(\\blacksquare\\)\ncoprime Two integers \\(a, b\\) is considered coprime if \\(\\gcd (a,b) = 1\\). Therefore, because greatest common divisor is a linear combination\n","html":"\u003cp\u003eAn \u003ca href=\"/posts/kbhinteger/\"\u003einteger\u003c/a\u003e \\(p \u0026gt; 1\\) is \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e if it has no positive \u003ca href=\"/posts/kbhdivide/\"\u003edivisor\u003c/a\u003es other than \\(1\\) and itself.\u003c/p\u003e\n\u003cp\u003eNo even \u003ca href=\"/posts/kbhnumber/\"\u003enumber\u003c/a\u003e, except \\(2\\), is prime. Because 2\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"there-are-infinitely-many-primes\"\u003eThere are infinitely many primes\u003c/h3\u003e\n\u003cp\u003eCredit: Euler.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eAssume to the contrary that there are finitely many \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003es. \\(p_1, \u0026hellip;, p_{n}\\). We desire to make a new prime to reach contradiction.\u003c/p\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nN = p_1 \\times \\dots \\times p_{n} + 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNote that \\(p_1 \\times \u0026hellip; \\times p_{n}\\) is divisible by each of the \\(p_{j}\\). If some \\(p_i |N\\), \\(p_{i}|1\\), which is impossible as \\(1\\) is not divisible by anything. So, no \\(p_{i}\\) divides \\(N\\).\u003c/p\u003e\n\u003cp\u003eIf \\(N\\) is now prime, we are done as it is not in the list of \\(p_{j}\\). If not, pick any \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e divisor \\(p\\) of \\(N\\). We will note that given no \\(p_{j}\\) divides \\(N\\), therefore any \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e divisor is a new prime.\u003c/p\u003e\n\u003cp\u003eHaving made a new prime, we reach contradiction. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"coprime\"\u003ecoprime\u003c/h3\u003e\n\u003cp\u003eTwo integers \\(a, b\\) is considered \u003ca href=\"#coprime\"\u003ecoprime\u003c/a\u003e if \\(\\gcd (a,b) = 1\\). Therefore, because \u003ca href=\"/posts/kbhgreatest_common_divisor/#greatest-common-divisor-is-a-linear-combination\"\u003egreatest common divisor is a linear combination\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprime/","tags":null,"title":"prime"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhprime_factorization/","tags":null,"title":"prime factorization"},{"categories":null,"contents":"The principle of induction is a technique used to prove the relationship between a smaller subset\nThe following three statements are equivalent.\nstandard induction Suppose \\(S \\subset \\mathbb{N}\\), which is non-empty. If \\(S\\) is a non-empty subset such that \\(0 \\in S\\), and for all \\(n \\in \\mathbb{N}\\), \\(n \\in S \\implies n+1 \\in S\\). Then, \\(S = \\mathbb{N}\\).\nstrong induction Suppose \\(S \\subset \\mathbb{N}\\), which is non-empty. If \\(S\\) is a non-empty subset such that \\(0 \\in S\\), and for all \\(n \\in \\mathbb{N}\\), \\(\\{0, \\dots, n\\} \\in S \\implies n+1 \\in S\\). Then, \\(S = \\mathbb{N}\\).\nwell-ordering principle If \\(S \\subset \\mathbb{N}\\) is non empty, then it has a smallest element\nPROOF: assume well-ordering principle, prove standard induction Given \\(S \\in \\mathbb{N}\\), such that \\(0 \\in S\\), whenever \\(n \\in S\\), then \\(n+1\\) is also in \\(S\\). We desire that that \\(S\\) is the natural numbers.\nAssume for the sake of contradiction \\(S \\neq \\mathbb{N}\\). Define \\(T = \\mathbb{N} \\setminus S\\).\nAssume \\(T\\) is non-empty. The WOP tells us that \\(T\\) has a smallest element \\(t \\in T\\). We know that \\(t \\neq 0\\), because \\(0 \\in S\\). Therefore, \\(t-1 \\in \\mathbb{N}\\). But, \\(t-1 \u0026lt; t\\), which means that \\(t-1 \\in S\\). But by the statement of the givens, \\((t-1) \\in S \\implies (t-1)+1 = t \\in S\\). Reaching contradiction. \\(\\blacksquare\\)\nassuming strong induction, proof well-ordering principle Assume \\(S\\) has no smallest element. Create some \\(T = \\mathbb{N} \\setminus S\\). Now, \\(0 \\in T\\) because otherwise \\(0 \\in S\\) would be the smallest element. Now, consider \\(0, 1, \u0026hellip; n \\in T\\), we notice that \\(n+1\\) must be in \\(T\\) as well. By strong induction, we have that \\(T = \\mathbb{N}\\) and \\(S\\) is empty.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhprinciple_of_induction/\"\u003eprinciple of induction\u003c/a\u003e is a technique used to prove the relationship between a smaller subset\u003c/p\u003e\n\u003cp\u003eThe following three statements are equivalent.\u003c/p\u003e\n\u003ch2 id=\"standard-induction--kbhprinciple-of-induction-dot-md\"\u003estandard \u003ca href=\"/posts/kbhprinciple_of_induction/\"\u003einduction\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eSuppose \\(S \\subset \\mathbb{N}\\), which is non-empty. If \\(S\\) is a non-empty subset such that \\(0 \\in S\\), and for all \\(n \\in \\mathbb{N}\\), \\(n \\in S \\implies n+1 \\in S\\). Then, \\(S = \\mathbb{N}\\).\u003c/p\u003e\n\u003ch2 id=\"strong-induction--kbhstrong-induction-dot-md\"\u003e\u003ca href=\"/posts/kbhstrong_induction/\"\u003estrong induction\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eSuppose \\(S \\subset \\mathbb{N}\\), which is non-empty. If \\(S\\) is a non-empty subset such that \\(0 \\in S\\), and for all \\(n \\in \\mathbb{N}\\), \\(\\{0, \\dots, n\\} \\in S \\implies n+1 \\in S\\). Then, \\(S = \\mathbb{N}\\).\u003c/p\u003e\n\u003ch2 id=\"well-ordering-principle\"\u003ewell-ordering principle\u003c/h2\u003e\n\u003cp\u003eIf \\(S \\subset \\mathbb{N}\\) is non empty, then it has a smallest element\u003c/p\u003e\n\u003ch2 id=\"proof\"\u003ePROOF:\u003c/h2\u003e\n\u003ch3 id=\"assume-well-ordering-principle--org0837e14--prove-standard-induction\"\u003eassume \u003ca href=\"#well-ordering-principle\"\u003ewell-ordering principle\u003c/a\u003e, prove standard induction\u003c/h3\u003e\n\u003cp\u003eGiven \\(S \\in \\mathbb{N}\\), such that \\(0 \\in S\\), whenever \\(n \\in S\\), then \\(n+1\\) is also in \\(S\\). We desire that that \\(S\\) is the \u003ca href=\"/posts/kbhnatural_numbers/\"\u003enatural number\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eAssume for the sake of contradiction \\(S \\neq \\mathbb{N}\\). Define \\(T = \\mathbb{N} \\setminus S\\).\u003c/p\u003e\n\u003cp\u003eAssume \\(T\\) is non-empty. The \u003ca href=\"#well-ordering-principle\"\u003eWOP\u003c/a\u003e tells us that \\(T\\) has a smallest element \\(t \\in T\\). We know that \\(t \\neq 0\\), because \\(0 \\in S\\). Therefore, \\(t-1 \\in \\mathbb{N}\\). But, \\(t-1 \u0026lt; t\\), which means that \\(t-1 \\in S\\). But by the statement of the givens, \\((t-1) \\in S \\implies (t-1)+1 = t \\in S\\). Reaching contradiction. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"assuming-strong-induction--kbhstrong-induction-dot-md--proof-well-ordering-principle--org0837e14\"\u003eassuming \u003ca href=\"/posts/kbhstrong_induction/\"\u003estrong induction\u003c/a\u003e, proof \u003ca href=\"#well-ordering-principle\"\u003ewell-ordering principle\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eAssume \\(S\\) has no smallest element. Create some \\(T = \\mathbb{N} \\setminus S\\). Now, \\(0 \\in T\\) because otherwise \\(0 \\in S\\) would be the smallest element. Now, consider \\(0, 1, \u0026hellip; n \\in T\\), we notice that \\(n+1\\) must be in \\(T\\) as well. By \u003ca href=\"/posts/kbhstrong_induction/\"\u003estrong induction\u003c/a\u003e, we have that \\(T = \\mathbb{N}\\) and \\(S\\) is empty.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprinciple_of_induction/","tags":null,"title":"principle of induction"},{"categories":null,"contents":"printf(\u0026#34;text %s\\n\u0026#34;, formatting, text, here); %s (string) %d (integer) %f (double) ","html":"\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-c\" data-lang=\"c\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eprintf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;text %s\u003c/span\u003e\u003cspan style=\"color:#8045ff\"\u003e\\n\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eformatting\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etext\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ehere\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cul\u003e\n\u003cli\u003e\u003ccode\u003e%s\u003c/code\u003e (string)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e%d\u003c/code\u003e (integer)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e%f\u003c/code\u003e (double)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhc_basic_operations/","tags":null,"title":"printf"},{"categories":null,"contents":"\u0026ldquo;privacy as an individual right\u0026rdquo;\nprivacy is a control of information: controlling our private information shared with others free choice with alternatives and informed understanding of what\u0026rsquo;s offered control over personal data collection and aggregation privacy as autonomy: your agency to decide for what\u0026rsquo;s valuable autonomy over our own lives, and our ability to lead them do you have agency? \u0026ldquo;privacy as a social group\u0026rdquo;\nprivacy as social good: social life would be severely compromised without privacy privacy allows social privacy as a display of trust: privacy enables trusting relationships \u0026ldquo;fiduciary\u0026rdquo;: proxy between you and a company \u0026ldquo;should anyone who has access to personal info have a fiduciary responsibility?\u0026rdquo; key trust questions who/what do we trust? what do we do if trust isn\u0026rsquo;t upheald? how to approach building trust trust trust: to stop questioning the responsibility of something\nintentions dependence extensions of agency We mostly don\u0026rsquo;t trust software; instead, we trust the people that developed the software.\naccountability a lot of people who are accountable in this chain:\nhardware designer (intel) OS developer (iOS, ec.) app developer users stakeholder direct stakeholders (people who are operating, technicians, etc.) indirect stakeholders: patients purchase = long-term support \u0026mdash;- what do you do to get it fixed/repaired.\ntime support duration obsolescence (how long is the end of support) products/services may not be garanteed forever problems with halting use\u0026mdash;requires deleting entire pentagram account meltdown vulnerability meltdown: hardware vulnerability that allows an user program to access kernel level pages of system memory.\npotential ways of fixing a vulnerability/violation of trust:\nhttps://www.cs.cmu.edu/~rdriley/487/papers/Thompson_1984_ReflectionsonTrustingTrust.pdf\nloss of privacy aggregation Through the loss of privacy, information can be piecemeal built up to understand somebody\u0026rsquo;s profile.\nexclusion Not knowing or understanding or control how our information being used.\nsecondary use Using information for purposes not intended without permission.\ntrust trust exposes people to the risk of being betrayed/let down. Differential privacy is used to anonomyze information. especially, for operation systems, each bug can have a massive impact because it impacts billions of users.\n\u0026ldquo;trust means to stop questioning the dependability of something; you become vulnerable to it\u0026rdquo;\ntrusting software is the task of extending your own AGENCY to a piece of software: \u0026ldquo;agential gullibility\u0026rdquo;.\nexamples:\nios bug: alams didn\u0026rsquo;t go off printnightmare: printing caused remote code execution 2017 admin access without password eternalblue (caused wannacry) key points trust between different stakeholders are intertwined trust is about extending agency trust emerges through various pathways we can design ways to partially substitute the need for trust pathways to trust trust by assumption trust absent any clues to warrent it due to timing trust because there is imminent danger trust by inference trust based on information you had before brands affiliation past performance trust in prior version of software trust by substitution trust something, but having a fallback plan trust a system because there would be a backup system protecting you scales of trust scale of impact a bug in an OS can be tremendously bad \u0026ldquo;root access\u0026rdquo; \u0026mdash; privileged aces scale of longevity people maybe on very very old OS it requires keeping older OSes secure against modern technologies ","html":"\u003cp\u003e\u0026ldquo;privacy as an individual right\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/\"\u003eprivacy\u003c/a\u003e is a control of information: controlling our private information shared with others\n\u003cul\u003e\n\u003cli\u003efree choice with alternatives and informed understanding of what\u0026rsquo;s offered\u003c/li\u003e\n\u003cli\u003econtrol over personal data collection and aggregation\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/\"\u003eprivacy\u003c/a\u003e as autonomy: your agency to decide for what\u0026rsquo;s valuable\n\u003cul\u003e\n\u003cli\u003eautonomy over our own lives, and our ability to lead them\u003c/li\u003e\n\u003cli\u003edo you have agency?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;privacy as a social group\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/\"\u003eprivacy\u003c/a\u003e as social good: social life would be severely compromised without privacy\n\u003cul\u003e\n\u003cli\u003eprivacy allows social\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/\"\u003eprivacy\u003c/a\u003e as a display of trust: privacy enables trusting relationships\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;fiduciary\u0026rdquo;: proxy between you and a company\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;should anyone who has access to personal info have a fiduciary responsibility?\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-trust-questions\"\u003ekey trust questions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewho/what do we trust?\u003c/li\u003e\n\u003cli\u003ewhat do we do if trust isn\u0026rsquo;t upheald?\u003c/li\u003e\n\u003cli\u003ehow to approach building trust\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"trust\"\u003etrust\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003etrust\u003c/strong\u003e: to stop questioning the responsibility of something\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eintentions\u003c/li\u003e\n\u003cli\u003edependence\u003c/li\u003e\n\u003cli\u003eextensions of agency\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe mostly don\u0026rsquo;t trust software; instead, we trust the people that developed the software.\u003c/p\u003e\n\u003ch2 id=\"accountability\"\u003eaccountability\u003c/h2\u003e\n\u003cp\u003ea lot of people who are accountable in this chain:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ehardware designer (intel)\u003c/li\u003e\n\u003cli\u003eOS developer (iOS, ec.)\u003c/li\u003e\n\u003cli\u003eapp developer\u003c/li\u003e\n\u003cli\u003eusers\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"stakeholder\"\u003estakeholder\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003edirect stakeholders\u003c/strong\u003e (people who are operating, technicians, etc.)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eindirect stakeholders\u003c/strong\u003e: patients\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003epurchase = long-term support \u0026mdash;- what do you do to get it fixed/repaired.\u003c/p\u003e\n\u003ch2 id=\"time\"\u003etime\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003esupport duration\u003c/li\u003e\n\u003cli\u003eobsolescence (how long is the end of support)\n\u003cul\u003e\n\u003cli\u003eproducts/services may not be garanteed forever\u003c/li\u003e\n\u003cli\u003eproblems with halting use\u0026mdash;requires deleting entire pentagram account\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"meltdown-vulnerability\"\u003emeltdown vulnerability\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003emeltdown\u003c/strong\u003e: hardware vulnerability that allows an user program to access kernel level pages of system memory.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003epotential ways of fixing a vulnerability/violation of trust\u003c/strong\u003e:\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://www.cs.cmu.edu/~rdriley/487/papers/Thompson_1984_ReflectionsonTrustingTrust.pdf\"\u003ehttps://www.cs.cmu.edu/~rdriley/487/papers/Thompson_1984_ReflectionsonTrustingTrust.pdf\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"loss-of-privacy\"\u003eloss of privacy\u003c/h2\u003e\n\u003ch3 id=\"aggregation\"\u003eaggregation\u003c/h3\u003e\n\u003cp\u003eThrough the loss of privacy, information can be piecemeal built up to understand somebody\u0026rsquo;s profile.\u003c/p\u003e\n\u003ch3 id=\"exclusion\"\u003eexclusion\u003c/h3\u003e\n\u003cp\u003eNot knowing or understanding or control how our information being used.\u003c/p\u003e\n\u003ch3 id=\"secondary-use\"\u003esecondary use\u003c/h3\u003e\n\u003cp\u003eUsing information for purposes not intended without permission.\u003c/p\u003e\n\u003ch2 id=\"trust\"\u003etrust\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#trust\"\u003etrust\u003c/a\u003e exposes people to the risk of being betrayed/let down. Differential privacy is used to anonomyze information. especially, for operation systems, each bug can have a massive impact because it impacts billions of users.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;\u003ca href=\"#trust\"\u003etrust\u003c/a\u003e means to stop questioning the dependability of something; you become vulnerable to it\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#trust\"\u003etrust\u003c/a\u003eing software is the task of extending your own \u003cstrong\u003eAGENCY\u003c/strong\u003e to a piece of software: \u0026ldquo;agential gullibility\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eexamples:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eios bug: alams didn\u0026rsquo;t go off\u003c/li\u003e\n\u003cli\u003eprintnightmare: printing caused remote code execution\u003c/li\u003e\n\u003cli\u003e2017 admin access without password\u003c/li\u003e\n\u003cli\u003eeternalblue (caused wannacry)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"key-points\"\u003ekey points\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003etrust between different stakeholders are intertwined\u003c/li\u003e\n\u003cli\u003etrust is about extending agency\u003c/li\u003e\n\u003cli\u003etrust emerges through \u003ca href=\"#pathways-to-trust\"\u003evarious pathways\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe can design ways to partially substitute the need for trust\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"pathways-to-trust\"\u003epathways to trust\u003c/h3\u003e\n\u003ch4 id=\"trust-by-assumption\"\u003etrust by assumption\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003etrust absent any clues to warrent it due to timing\u003c/li\u003e\n\u003cli\u003etrust because there is imminent danger\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"trust-by-inference\"\u003etrust by inference\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003etrust based on information you had before\n\u003cul\u003e\n\u003cli\u003ebrands\u003c/li\u003e\n\u003cli\u003eaffiliation\u003c/li\u003e\n\u003cli\u003epast performance\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003etrust in prior version of software\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"trust-by-substitution\"\u003etrust by substitution\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003etrust something, but having a fallback plan\u003c/li\u003e\n\u003cli\u003etrust a system because there would be a backup system protecting you\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"scales-of-trust\"\u003escales of trust\u003c/h3\u003e\n\u003ch4 id=\"scale-of-impact\"\u003escale of impact\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003ea bug in an OS can be tremendously bad\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;root access\u0026rdquo; \u0026mdash; privileged aces\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"scale-of-longevity\"\u003escale of longevity\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003epeople maybe on very very old OS\u003c/li\u003e\n\u003cli\u003eit requires keeping older OSes secure against modern technologies\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprivacy/","tags":null,"title":"privacy"},{"categories":null,"contents":"probability of an event is the proportion of times the event occurs in many repeated trials. It is \u0026ldquo;our belief that an event \\(E\\) occurs\u0026rdquo;.\nFrequentist Definition of Probability That is, it is a number between \\(0-1\\). Whereby:\n\\begin{equation} P(E) = \\lim_{n \\to \\infty} \\frac{n(E)}{n} \\end{equation}\n\u0026ldquo;frequentist definition of probability\u0026rdquo;\nprobability is the ratio between the number of times \\(E\\) occurring \\(n(E)\\) divided by the number of times you did the thing \\(n\\). This system converge because of the law of large numbers.\nuncertainty and probability Say you are training some kind of model. When it says \\(0.8\\) for motorcycle, its not that there are \\(80\\%\\) chance that there\u0026rsquo;s a motorcycle there. Its that the model is \\(80\\%\\) confident that there\u0026rsquo;s a motorcycle.\nProbability can not only represent the world, but our understanding of the world\naxiom of probability \\(0 \\leq P(E) \\leq 1\\) \\(P(S) = 1\\), where \\(S\\) is the sample space if \\(E\\) and \\(F\\) are mutually exclusive, \\(P(E) + P(F) = P(E \\cup F)\\) This last axiom can be chained\nThis results in three correlaries:\n\\(P(E^{C}) = 1- P(E)\\) Proof: We know that \\(E^{C}, E\\) are mutually exclusive.\n\\begin{equation} P(E^{C} \\cup E) = P(E) + P(E^{C}) \\end{equation}\nNow, recall the fact that something happening OR not happening is \\(1\\).\nSo we have:\n\\(P(E \\cup F) = P(E) + P(F) - P(E \\cap F)\\) if \\(E \\subset F\\), \\(P(E) \\leq P(F)\\) conditional probability \u0026ldquo;What is the new belief that something \\(E\\) happened, conditioned upon the fact that we know that \\(F\\) already happened.\u0026rdquo;\nWritten as: \\(P(E|F)\\).\nFurthermore, we have:\n\\begin{equation} P (X, Y) = P(X\\mid Y) \\cdot P(Y) \\end{equation}\nIn this case, we call \\(Y\\) the \u0026ldquo;evidence\u0026rdquo;. this allows us to find \u0026ldquo;what is the chance of \\(x\\) given \\(y\\)\u0026rdquo;.\nWe can continue this to develop the probability chain rule:\n\\begin{equation} P(A_1, A_2 \\dots, A_{n}) = P(A_{n} \\mid A_1, A_2 \\dots A_{n-1})P(A_1, A_2 \\dots A_{n-1}) \\end{equation}\nand so:\n\\begin{equation} P(E_1) \\cdot P(E_2 | E_1) \\cdot E(E_3 | E_1E_2) \\cdot P(E_4 | E_1E_2E_3) \\cdot \\dots \\cdot \\end{equation}\nand so on.\nIf you are performing the chain rule on something that\u0026rsquo;s already conditioned:\n\\begin{equation} P(X,Y|A) \\end{equation}\nyou can break it up just remembering that \\(A\\) needs to be preserved as a condition, so:\n\\begin{equation} P(X,Y|A) = P(X|Y,A) P(Y|A) \\end{equation}\nNow:\n\\begin{equation} \\sum_{x}^{} p(x \\mid y) = 1 \\end{equation}\nbecause this is still a probability over \\(x\\).\nlaw of total probability say you have two variables \\(x, y\\).\n\u0026ldquo;what\u0026rsquo;s the probablity of \\(x\\)\u0026rdquo;\n\\begin{equation} P(x) = \\sum_{Y} P(x,y) \\end{equation}\na.k.a.:\n\\begin{equation} p(x) = p(x|y_1)p(y_1) + \\dots + p(x|y_{n})y_{n} \\end{equation}\nby applying conditional probability formula upon each term\nThis is because:\n\\begin{align} p(x) \u0026amp;= p(x|y_1)p(y_1) + \\dots + p(x|y_{n})y_{n} \\\\ \u0026amp;= p(x, y_1) + \\dots + p(x, y_{n}) \\end{align}\nIf its not conditional, it holds too:\n\\begin{equation} p(AB^{C}) + p(AB) \\end{equation}\nBayes rule See: Bayes Theorem\nindependence If \\(X\\) and \\(Y\\) are independent (written as \\(X \\perp Y\\)), we know that \\(P(x,y) = P(x)P(y)\\) for all \\(x, y\\).\nFormally:\n\\begin{equation} P(A) = P(A|B) \\end{equation}\nif \\(A\\) and \\(B\\) is independent. That is, \\(P(AB) = P(A) \\cdot P(B)\\). You can check either of these statements (the latter is usually easier).\nIndependence is bidirectional. If \\(A\\) is independent of \\(B\\), then \\(B\\) is independent of \\(A\\). To show this, invoke the Bayes Theorem.\nThis is generalized:\n\\begin{equation} P(x_1, \\dots, x_n) = P(x_1) \\dots p(x_{n}) \\end{equation}\nand this tells us that subset of \\(x_{j}\\) is independent against each other.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of an event is the proportion of times the event occurs in many repeated trials. It is \u0026ldquo;our belief that an event \\(E\\) occurs\u0026rdquo;.\u003c/p\u003e\n\u003ch2 id=\"frequentist-definition-of-probability\"\u003eFrequentist Definition of Probability\u003c/h2\u003e\n\u003cp\u003eThat is, it is a number between \\(0-1\\). Whereby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(E) = \\lim_{n \\to \\infty} \\frac{n(E)}{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;frequentist definition of probability\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eprobability is the ratio between the number of times \\(E\\) occurring \\(n(E)\\) divided by the number of times you did the thing \\(n\\). This system converge because of the \u003ca href=\"/posts/kbhlaw_of_large_numbers/\"\u003elaw of large numbers\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"uncertainty--kbhuncertainty-dot-md--and-probability--kbhprobability-dot-md\"\u003e\u003ca href=\"/posts/kbhuncertainty/\"\u003euncertainty\u003c/a\u003e and \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eSay you are training some kind of model. When it says \\(0.8\\) for motorcycle, its not that there are \\(80\\%\\) chance that there\u0026rsquo;s a motorcycle there. Its that the model is \\(80\\%\\) confident that there\u0026rsquo;s a motorcycle.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eProbability can not only represent the world, but our understanding of the world\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"axiom-of-probability\"\u003eaxiom of probability\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(0 \\leq P(E) \\leq 1\\)\u003c/li\u003e\n\u003cli\u003e\\(P(S) = 1\\), where \\(S\\) is the \u003ca href=\"/posts/kbhsample_space/\"\u003esample space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eif \\(E\\) and \\(F\\) are mutually exclusive, \\(P(E) + P(F) = P(E \\cup F)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThis last axiom can be chained\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eThis results in three correlaries:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(P(E^{C}) = 1- P(E)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eProof:\nWe know that \\(E^{C}, E\\) are mutually exclusive.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(E^{C} \\cup E) = P(E) + P(E^{C})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, recall the fact that something happening OR not happening is \\(1\\).\u003c/p\u003e\n\u003cp\u003eSo we have:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(P(E \\cup F) = P(E) + P(F) - P(E \\cap F)\\)\u003c/li\u003e\n\u003cli\u003eif \\(E \\subset F\\), \\(P(E) \\leq P(F)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"conditional-probability\"\u003econditional probability\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;What is the new belief that something \\(E\\) happened, conditioned upon the fact that we know that \\(F\\) already happened.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWritten as: \\(P(E|F)\\).\u003c/p\u003e\n\u003cp\u003eFurthermore, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP (X, Y) = P(X\\mid Y) \\cdot P(Y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn this case, we call \\(Y\\) the \u0026ldquo;evidence\u0026rdquo;. this allows us to find \u0026ldquo;what is the chance of \\(x\\) given \\(y\\)\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eWe can continue this to develop the \u003ca href=\"#conditional-probability\"\u003eprobability chain rule\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(A_1, A_2 \\dots, A_{n}) = P(A_{n} \\mid A_1, A_2 \\dots A_{n-1})P(A_1, A_2 \\dots A_{n-1})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(E_1) \\cdot P(E_2 | E_1) \\cdot E(E_3 | E_1E_2) \\cdot P(E_4 | E_1E_2E_3) \\cdot \\dots \\cdot\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand so on.\u003c/p\u003e\n\u003cp\u003eIf you are performing the chain rule on something that\u0026rsquo;s already conditioned:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X,Y|A)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou can break it up just remembering that \\(A\\) needs to be preserved as a condition, so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X,Y|A) = P(X|Y,A) P(Y|A)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{x}^{} p(x \\mid y) = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause this is \u003cstrong\u003estill\u003c/strong\u003e a probability over \\(x\\).\u003c/p\u003e\n\u003ch2 id=\"law-of-total-probability\"\u003elaw of total probability\u003c/h2\u003e\n\u003cp\u003esay you have two variables \\(x, y\\).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;what\u0026rsquo;s the probablity of \\(x\\)\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(x) = \\sum_{Y} P(x,y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ea.k.a.:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(x) = p(x|y_1)p(y_1) + \\dots + p(x|y_{n})y_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eby applying \u003ca href=\"#conditional-probability\"\u003econditional probability\u003c/a\u003e formula upon each term\u003c/p\u003e\n\u003cp\u003eThis is because:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\np(x) \u0026amp;= p(x|y_1)p(y_1) + \\dots + p(x|y_{n})y_{n} \\\\\n\u0026amp;= p(x, y_1) + \\dots + p(x, y_{n})\n\\end{align}\u003c/p\u003e\n\u003cp\u003eIf its not conditional, it holds too:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(AB^{C}) + p(AB)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"bayes-rule\"\u003eBayes rule\u003c/h2\u003e\n\u003cp\u003eSee: \u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes Theorem\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"independence\"\u003eindependence\u003c/h2\u003e\n\u003cp\u003eIf \\(X\\) and \\(Y\\) are independent (written as \\(X \\perp Y\\)), we know that \\(P(x,y) = P(x)P(y)\\) for all \\(x, y\\).\u003c/p\u003e\n\u003cp\u003eFormally:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(A) = P(A|B)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif \\(A\\) and \\(B\\) is \u003ca href=\"#independence\"\u003eindependent\u003c/a\u003e. That is, \\(P(AB) = P(A) \\cdot P(B)\\). You can check either of these statements (the latter is usually easier).\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#independence\"\u003eIndependence\u003c/a\u003e is bidirectional. If \\(A\\) is independent of \\(B\\), then \\(B\\) is independent of \\(A\\). To show this, invoke the \u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes Theorem\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThis is generalized:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(x_1, \\dots, x_n) = P(x_1) \\dots p(x_{n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand this tells us that subset of \\(x_{j}\\) is independent against each other.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprobability/","tags":null,"title":"probability"},{"categories":null,"contents":"probability distributions \u0026ldquo;assigns probability to outcomes\u0026rdquo;\n\\(X\\) follows distribution \\(D\\). \\(X\\) is a \u0026ldquo;\\(D\\) random variable\u0026rdquo;, where \\(D\\) is some distribution (normal, gaussian, etc.)\nsyntax: \\(X \\sim D\\).\nEach distribution has three properties:\nvariables (what is being modeled) values (what values can they take on) parameters (how many degrees of freedom do we have) Methods of Compressing the Parameters of a Distribution So, for instance, for a binary distribution with \\(n\\) variables which we know nothing about, we have:\n\\begin{equation} 2^{n} - 1 \\end{equation}\nparameters (\\(2^{n}\\) different possibilities of combinations, and \\(1\\) non-free variables to ensure that the distribution add up)\nassuming independence HOWEVER, if the variables were independent, this becomes much easier. Because the variables are independent, we can claim that:\n\\begin{equation} p(x_{1\\dots n}) = \\prod_{i}^{} p(x_{i)) \\end{equation}\ndecision tree For instance, you can have a decision tree which you selectively ignore some combinations.\nIn this case, we ignored \\(z\\) if both \\(x\\) and \\(y\\) are \\(0\\).\nBaysian networks see Baysian Network\ntypes of probability distributions discrete distribution continuous distribution joint probability distribution distribution of note uniform distribution gaussian distributions Gaussian distribution Truncated Gaussian distribution Gaussian mixture model uniform distribution \\begin{equation} X \\sim Uni(\\alpha, \\beta) \\end{equation}\n\\begin{equation} f(x) = \\begin{cases} \\frac{1}{\\beta -\\alpha }, 0\\leq x \\leq 10 \\\\0 \\end{cases} \\end{equation}\n\\begin{equation} E[x] = \\frac{1}{2}(\\alpha +\\beta) \\end{equation}\n\\begin{equation} Var(X) = \\frac{1}{12}(\\beta -\\alpha )^{2} \\end{equation}\nGaussian Things Truncated Gaussian distribution Sometimes, we don\u0026rsquo;t want to use a Gaussian distribution for values above or below a threshold (say if they are physically impossible). In those cases, we have some:\n\\begin{equation} X \\sim N(\\mu, \\sigma^{2}, a, b) \\end{equation}\nbounded within the interval of \\((a,b)\\). The PDF of this function is given by:\n\\begin{equation} N(\\mu, \\sigma^{2}, a, b) = \\frac{\\frac{1}{\\sigma} \\phi \\qty(\\frac{x-\\mu }{\\sigma })}{\\Phi \\qty(\\frac{b-\\mu }{\\sigma }) - \\Phi \\qty(\\frac{a-\\mu}{\\sigma})} \\end{equation}\nwhere:\n\\begin{equation} \\Phi = \\int_{-\\infty}^{x} \\phi (x\u0026rsquo;) \\dd{x\u0026rsquo;} \\end{equation}\nand where \\(\\phi\\) is the standard normal density function.\nGaussian mixture model Gaussian models are typically unimodal, meaning they have one peak (things decrease to the left of that peak, increases to the right of it).\nTherefore, in order to model something more complex with multiple peaks, we just weighted average multiple gaussian models\n\\begin{equation} p(x | \\dots ) = \\sum_{i-1}^{n}p_i \\mathcal{N}(x | u_{i}, {\\sigma_{i}}^{2}) \\end{equation}\nwhereby,\nthree ways of analysis probability density function PDFs is a function that maps continuous random variables to the corresponding probability.\n\\begin{equation} P(a \u0026lt; X \u0026lt; b) = \\int_{x=a}^{b} f(X=x)\\dd{x} \\end{equation}\nnote: \\(f\\) is no longer in units of probability!!! it is in units of probability scaled by units of \\(X\\). That is, they are DERIVATIVES of probabilities. That is, the units of \\(f\\) should be \\(\\frac{prob}{unit\\ X}\\). So, it can be greater than \\(1\\).\nWe have two important properties:\nif you integrate over any bounds over a probability density function, you get a probability if you integrate over infinity, the result should be \\(1\\) getting exact values from PDF There is a calculus definition for \\(P(X=x)\\), if absolutely needed:\n\\begin{equation} P(X=x) = \\epsilon f(x) \\end{equation}\nmixing discrete and continuous random variables\nLet\u0026rsquo;s say \\(X\\) is continuous, and \\(N\\) is discrete.\nWe desire:\n\\begin{equation} P(N=n|X=x) = \\frac{P(X=x|N=n)P(N=n)}{P(X=x)} \\end{equation}\nnow, to get a specific value for \\(P(X=x)\\), we can just multiply its PMF by a small epsilon:\n\\begin{align} P(N=n|X=x) \u0026amp;= \\lim_{\\epsilon \\to 0} \\frac{\\epsilon f(X=x|N=n)P(N=n)}{\\epsilon f(X=x)} \\\\ \u0026amp;= \\frac{f(X=x|N=n)P(N=n)}{f(X=x)} \\end{align}\nthis same trick works pretty much everywhere\u0026mdash;whenever we need to get the probability of a continuous random variable with\ncumulative distribution function What is the probability that a random variable takes on value less tha\n\\begin{equation} cdf_{x}(x) = P(X\u0026lt;x) = \\int_{-\\infty}^{x} p(x\u0026rsquo;) dx' \\end{equation}\nsometimes written as:\n\\begin{equation} F(x) = P(X \u0026lt; x) \\end{equation}\nRecall that, with\nquantile function \\begin{equation} \\text{quantile}_{X}(\\alpha) \\end{equation}\nis the value \\(x\\) such that:\n\\begin{equation} P(X \\leq x) = \\alpha \\end{equation}\nThat is, the quantile function returns the minimum value of \\(x\\) at which point a certain cumulative distribution value desired is achieved.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhprobability_distributions/\"\u003eprobability distributions\u003c/a\u003e \u0026ldquo;assigns probability to outcomes\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\(X\\) follows distribution \\(D\\). \\(X\\) is a \u0026ldquo;\\(D\\) random variable\u0026rdquo;, where \\(D\\) is some distribution (\u003ca href=\"/posts/kbhaxler_7_a/#normal\"\u003enormal\u003c/a\u003e, gaussian, etc.)\u003c/p\u003e\n\u003cp\u003esyntax: \\(X \\sim D\\).\u003c/p\u003e\n\u003cp\u003eEach distribution has three properties:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003evariables (what is being modeled)\u003c/li\u003e\n\u003cli\u003evalues (what values can they take on)\u003c/li\u003e\n\u003cli\u003eparameters (how many degrees of freedom do we have)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"methods-of-compressing-the-parameters-of-a-distribution\"\u003eMethods of Compressing the Parameters of a Distribution\u003c/h2\u003e\n\u003cp\u003eSo, for instance, for a binary distribution with \\(n\\) variables which we know nothing about, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n2^{n} - 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eparameters (\\(2^{n}\\) different possibilities of combinations, and \\(1\\) non-free variables to ensure that the distribution add up)\u003c/p\u003e\n\u003ch3 id=\"assuming-independence\"\u003eassuming independence\u003c/h3\u003e\n\u003cp\u003eHOWEVER, if the variables were \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e, this becomes much easier. Because the variables are independent, we can claim that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(x_{1\\dots n}) = \\prod_{i}^{} p(x_{i))\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"decision-tree\"\u003edecision tree\u003c/h3\u003e\n\u003cp\u003eFor instance, you can have a decision tree which you selectively ignore some combinations.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-09-28_10-13-07_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eIn this case, we ignored \\(z\\) if both \\(x\\) and \\(y\\) are \\(0\\).\u003c/p\u003e\n\u003ch3 id=\"baysian-networks\"\u003eBaysian networks\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"types-of-probability-distributions\"\u003etypes of probability distributions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiscrete_distribution/\"\u003ediscrete distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcontinuous_distribution/\"\u003econtinuous distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"distribution-of-note\"\u003edistribution of note\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"#uniform-distribution\"\u003euniform distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003egaussian distributions\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgaussian_distribution/\"\u003eGaussian distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#truncated-gaussian-distribution\"\u003eTruncated Gaussian distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#gaussian-mixture-model\"\u003eGaussian mixture model\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"uniform-distribution\"\u003euniform distribution\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nX \\sim Uni(\\alpha, \\beta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\begin{cases}\n\\frac{1}{\\beta -\\alpha }, 0\\leq x \\leq 10 \\\\0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE[x] = \\frac{1}{2}(\\alpha +\\beta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nVar(X) = \\frac{1}{12}(\\beta -\\alpha )^{2}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"gaussian-things\"\u003eGaussian Things\u003c/h3\u003e\n\u003ch4 id=\"truncated-gaussian-distribution\"\u003eTruncated Gaussian distribution\u003c/h4\u003e\n\u003cp\u003eSometimes, we don\u0026rsquo;t want to use a \u003ca href=\"/posts/kbhgaussian_distribution/\"\u003eGaussian distribution\u003c/a\u003e for values above or below a threshold (say if they are physically impossible). In those cases, we have some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX \\sim N(\\mu, \\sigma^{2}, a, b)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebounded within the interval of \\((a,b)\\). The \u003ca href=\"#probability-density-function\"\u003ePDF\u003c/a\u003e of this function is given by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nN(\\mu, \\sigma^{2}, a, b) = \\frac{\\frac{1}{\\sigma} \\phi \\qty(\\frac{x-\\mu }{\\sigma })}{\\Phi \\qty(\\frac{b-\\mu }{\\sigma }) - \\Phi \\qty(\\frac{a-\\mu}{\\sigma})}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Phi = \\int_{-\\infty}^{x} \\phi (x\u0026rsquo;) \\dd{x\u0026rsquo;}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand where \\(\\phi\\) is the \u003ca href=\"/posts/kbhgaussian_distribution/#standard-normal-density-function\"\u003estandard normal density function\u003c/a\u003e.\u003c/p\u003e\n\u003ch4 id=\"gaussian-mixture-model\"\u003eGaussian mixture model\u003c/h4\u003e\n\u003cp\u003eGaussian models are typically \u003ca href=\"/posts/kbhunimodal/\"\u003eunimodal\u003c/a\u003e, meaning they have one peak (things decrease to the left of that peak, increases to the right of it).\u003c/p\u003e\n\u003cp\u003eTherefore, in order to model something more complex with multiple peaks, we just weighted average multiple gaussian models\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(x | \\dots ) = \\sum_{i-1}^{n}p_i \\mathcal{N}(x | u_{i}, {\\sigma_{i}}^{2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby,\u003c/p\u003e\n\u003ch2 id=\"three-ways-of-analysis\"\u003ethree ways of analysis\u003c/h2\u003e\n\u003ch3 id=\"probability-density-function\"\u003eprobability density function\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#probability-density-function\"\u003ePDF\u003c/a\u003es is a function that maps continuous random variables to the corresponding probability.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(a \u0026lt; X \u0026lt; b) = \\int_{x=a}^{b} f(X=x)\\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enote: \\(f\\) is no longer in units of \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e!!! it is in units of \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e scaled by units of \\(X\\). That is, they are DERIVATIVES of probabilities. That is, the units of \\(f\\) should be \\(\\frac{prob}{unit\\ X}\\). So, it can be greater than \\(1\\).\u003c/p\u003e\n\u003cp\u003eWe have two important properties:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eif you integrate over any bounds over a \u003ca href=\"#probability-density-function\"\u003eprobability density function\u003c/a\u003e, you get a \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eif you integrate over infinity, the result should be \\(1\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"getting-exact-values-from-pdf--orge11a5fa\"\u003egetting exact values from \u003ca href=\"#probability-density-function\"\u003ePDF\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eThere is a calculus definition for \\(P(X=x)\\), if absolutely needed:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X=x) = \\epsilon f(x)\n\\end{equation}\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003emixing discrete and continuous random variables\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s say \\(X\\) is continuous, and \\(N\\) is discrete.\u003c/p\u003e\n\u003cp\u003eWe desire:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(N=n|X=x) = \\frac{P(X=x|N=n)P(N=n)}{P(X=x)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enow, to get a specific value for \\(P(X=x)\\), we can just multiply its \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003e by a small epsilon:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nP(N=n|X=x) \u0026amp;= \\lim_{\\epsilon \\to 0} \\frac{\\epsilon f(X=x|N=n)P(N=n)}{\\epsilon f(X=x)} \\\\\n\u0026amp;= \\frac{f(X=x|N=n)P(N=n)}{f(X=x)}\n\\end{align}\u003c/p\u003e\n\u003cp\u003ethis same trick works pretty much everywhere\u0026mdash;whenever we need to get the probability of a continuous random variable with\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"cumulative-distribution-function\"\u003ecumulative distribution function\u003c/h3\u003e\n\u003cp\u003eWhat is the probability that a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e takes on value less tha\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ncdf_{x}(x) = P(X\u0026lt;x) = \\int_{-\\infty}^{x} p(x\u0026rsquo;) dx'\n\\end{equation}\u003c/p\u003e\n\u003cp\u003esometimes written as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nF(x) = P(X \u0026lt; x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that, with\u003c/p\u003e\n\u003ch3 id=\"quantile-function\"\u003equantile function\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\text{quantile}_{X}(\\alpha)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis the value \\(x\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X \\leq x) = \\alpha\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThat is, the \u003ca href=\"#quantile-function\"\u003equantile function\u003c/a\u003e returns the minimum value of \\(x\\) at which point a certain \u003ca href=\"#cumulative-distribution-function\"\u003ecumulative distribution\u003c/a\u003e value desired is achieved.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprobability_distributions/","tags":null,"title":"probability distribution"},{"categories":null,"contents":"PMF is a function that maps possible outcomes of a discrete random variables to the corresponding probability.\nFor random variable \\(Y\\), we have:\n\\begin{equation} f(k) = P(Y=k) \\end{equation}\nand \\(f\\) is a function that is the PMF, which is the mapping between a random variable and a value it takes on to the probability that the random variable takes on that value.\nShorthand \\begin{equation} P(Y=k) = p(y), where\\ y=k \\end{equation}\nits written smaller \\(y\\) represents a case of \\(Y\\) where \\(Y=y\\).\nShorthand For this to be correct, we have to\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003e is a function that maps possible outcomes of a discrete \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es to the corresponding probability.\u003c/p\u003e\n\u003cp\u003eFor random variable \\(Y\\), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(k) = P(Y=k)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand \\(f\\) is a function that is the \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003e, which is the mapping between a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e and a value it takes on to the probability that the \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e takes on that value.\u003c/p\u003e\n\u003ch2 id=\"shorthand\"\u003eShorthand\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nP(Y=k) = p(y), where\\ y=k\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eits written smaller \\(y\\) represents a \u003cem\u003ecase\u003c/em\u003e of \\(Y\\) where \\(Y=y\\).\u003c/p\u003e\n\u003ch2 id=\"shorthand\"\u003eShorthand\u003c/h2\u003e\n\u003cp\u003eFor this to be correct, we have to\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprobability_mass_function/","tags":null,"title":"probability mass function"},{"categories":null,"contents":"multinomial distribution A probability distribution to model specific outcomes like a binomial distribution but for multiple variables.\nlike binomial distribution, we have to assume independence and same probability per trial.\n\u0026ldquo;what\u0026rsquo;s the probability that you get some set of assignments xj=nj\u0026rdquo;:\n\\begin{equation} P(X_1=c_1, X_2=c_2, \\dots, X_{m}=c_{m}) = {n \\choose c_1, c_2, \\dots, c_{m} } p_{1}^{c_1} \\cdot \\dots \\cdot p_{m}^{c_{m}} \\end{equation}\nwhere the big choose is a multinomial coefficient, and \\(n\\) is the number of different outcomes, and \\(p_{j}\\) is the probably of the $j$th outcome.\nIMPORTANT: \\(\\sum_{j=0}^{m} c_{j} = n\\): that is, you MUST provide an assignment for each type of outcome.\n","html":"\u003ch2 id=\"multinomial-distribution\"\u003emultinomial distribution\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e distribution to model specific outcomes like a \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e but for multiple variables.\u003c/p\u003e\n\u003cp\u003elike \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e, we have to assume independence and same probability per trial.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;what\u0026rsquo;s the probability that you get some set of assignments xj=nj\u0026rdquo;:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X_1=c_1, X_2=c_2, \\dots, X_{m}=c_{m}) = {n \\choose c_1, c_2, \\dots, c_{m} } p_{1}^{c_1} \\cdot \\dots \\cdot p_{m}^{c_{m}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere the big choose is a \u003ca href=\"/posts/kbhmultinomial_coefficient/\"\u003emultinomial coefficient\u003c/a\u003e, and \\(n\\) is the number of different outcomes, and \\(p_{j}\\) is the probably of the $j$th outcome.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eIMPORTANT\u003c/strong\u003e\u003c/strong\u003e: \\(\\sum_{j=0}^{m} c_{j} = n\\): that is, you MUST provide an assignment for each type of outcome.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprobablistic_model/","tags":null,"title":"probablistic models"},{"categories":null,"contents":"gravity sucks.\ngeneral relativity claims that our best theory of how gravity work does not work with non-\n","html":"\u003cp\u003egravity sucks.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhgeneral_relativity/\"\u003egeneral relativity\u003c/a\u003e claims that our best theory of how gravity work does not work with non-\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproblem_with_gravity/","tags":null,"title":"problem with gravity"},{"categories":null,"contents":"Each process is controlled by a struct which contain information about the process.\nmemory used by the process file descriptor table thread state other accounting file descriptor table Within each process, we have a file descriptor table (and the ints we get are indicies into this table), for which each entry stores points to the open file table.\nWhen a process forks, the child doesn\u0026rsquo;t get more open file entries, instead, we simply clone the file descriptor table (i.e. parent and child will share the same underlying open file table entries); this is how we can share pipes.\nThis is why you need to CLOSE all open file descriptors once every PROCESS, including forked child.\nthread state Recall that threads are the unit of execution. The process control block keeps track of the *stack pointer* of the thread %rsp, which means if a thread is put to sleep the state can be stored somewhere on the stack.\nrunning blockde - waiting for an event like disk, network, etc. ready - able to run, but not on CPU yet IO vs. CPU bound I/O Bound Thread is a thread that needs to wait for disk events, and don\u0026rsquo;t need CPU that much CPU Thread is a thread that needs CPU time ","html":"\u003cp\u003eEach \u003ca href=\"/posts/kbhmultiprocessing/#process\"\u003eprocess\u003c/a\u003e is controlled by a struct which contain information about the process.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ememory used by the process\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#file-descriptor-table\"\u003efile descriptor table\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#thread--kbhmultithreading-dot-md--state\"\u003ethread state\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eother accounting\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"file-descriptor-table\"\u003efile descriptor table\u003c/h2\u003e\n\u003cp\u003eWithin each process, we have a \u003ca href=\"/posts/kbhsyscalls/#file-descriptor\"\u003efile descriptor\u003c/a\u003e table (and the ints we get are indicies into this table), for which each entry stores points to the \u003ca href=\"/posts/kbhmultiprocessing/#open-file-table\"\u003eopen file table\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eWhen a process forks, the child doesn\u0026rsquo;t get more open file entries, instead, we simply clone the \u003ca href=\"/posts/kbhsyscalls/#file-descriptor\"\u003efile descriptor\u003c/a\u003e table (i.e. parent and child will share the same underlying \u003ca href=\"/posts/kbhmultiprocessing/#open-file-table\"\u003eopen file table\u003c/a\u003e entries); this is how we can share pipes.\u003c/p\u003e\n\u003cp\u003eThis is why you need to \u003cstrong\u003eCLOSE\u003c/strong\u003e all open file descriptors once every \u003cstrong\u003ePROCESS\u003c/strong\u003e, including forked child.\u003c/p\u003e\n\u003ch2 id=\"thread--kbhmultithreading-dot-md--state\"\u003e\u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003e state\u003c/h2\u003e\n\u003cp\u003eRecall that \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003es are the \u003cstrong\u003eunit of execution\u003c/strong\u003e. The \u003ca href=\"/posts/kbhprocess_control_block/\"\u003eprocess control block\u003c/a\u003e keeps track of the \u003ca href=\"/posts/kbhassembly/#stack-pointer\"\u003e*stack pointer\u003c/a\u003e* of the thread \u003ccode\u003e%rsp\u003c/code\u003e, which means if a thread is put to sleep the state can be stored somewhere on the stack.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003erunning\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eblockde\u003c/strong\u003e - waiting for an event like disk, network, etc.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eready\u003c/strong\u003e - able to run, but not on CPU yet\u003c/li\u003e\n\u003c/ol\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-21_13-50-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"io-vs-dot-cpu-bound\"\u003eIO vs. CPU bound\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"#io-vs-dot-cpu-bound\"\u003eI/O Bound Thread\u003c/a\u003e is a thread that needs to wait for disk events, and don\u0026rsquo;t need CPU that much\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#io-vs-dot-cpu-bound\"\u003eCPU Thread\u003c/a\u003e is a thread that needs CPU time\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprocess_control_block/","tags":null,"title":"process control block"},{"categories":null,"contents":"Take two linear maps \\(T \\in \\mathcal{L}(U,V)\\) and \\(S \\in \\mathcal{L}(V,W)\\), then \\(ST \\in \\mathcal{L}(U,W)\\) is defined by:\n\\begin{equation} (ST)(u) = S(Tu) \\end{equation}\nIndeed the \u0026ldquo;product\u0026rdquo; of Linear Maps is just function composition. Of course, \\(ST\\) is defined only when \\(T\\) maps to something in the domain of \\(S\\).\nThe following there properties hold on linear-map products (note that commutativity isn\u0026rsquo;t one of them!):\nassociativity \\begin{equation} (T_1T_2)T_3 = T_1(T_2T_3) \\end{equation}\nidentity \\begin{equation} TI = IT = T \\end{equation}\nfor \\(T \\in \\mathcal{L}(V,W)\\) and \\(I \\in \\mathcal{L}(V,V)\\) (OR \\(I \\in \\mathcal{L}(W,W)\\) depending on the order) is the identity map in \\(V\\).\nidentity commutes, as always.\ndistributive in both directions\u0026mdash;\n\\begin{equation} (S_1+S_2)T = S_1T + S_2T \\end{equation}\nand\n\\begin{equation} S(T_1+T_2) = ST_{1}+ST_{2} \\end{equation}\n","html":"\u003cp\u003eTake two linear maps \\(T \\in \\mathcal{L}(U,V)\\) and \\(S \\in \\mathcal{L}(V,W)\\), then \\(ST \\in \\mathcal{L}(U,W)\\) is defined by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(ST)(u) = S(Tu)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIndeed the \u0026ldquo;product\u0026rdquo; of \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es is just function composition. Of course, \\(ST\\) is defined only when \\(T\\) maps to something in the domain of \\(S\\).\u003c/p\u003e\n\u003cp\u003eThe following there properties hold on linear-map products (\u003cem\u003enote that commutativity isn\u0026rsquo;t one of them!\u003c/em\u003e):\u003c/p\u003e\n\u003ch2 id=\"associativity\"\u003eassociativity\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n(T_1T_2)T_3 = T_1(T_2T_3)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"identity\"\u003eidentity\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nTI = IT = T\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor \\(T \\in \\mathcal{L}(V,W)\\) and \\(I \\in \\mathcal{L}(V,V)\\) (OR \\(I \\in \\mathcal{L}(W,W)\\) depending on the order) is the \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e map in \\(V\\).\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e commutes, as always.\u003c/p\u003e\n\u003ch2 id=\"distributive\"\u003edistributive\u003c/h2\u003e\n\u003cp\u003ein both directions\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(S_1+S_2)T = S_1T + S_2T\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS(T_1+T_2) = ST_{1}+ST_{2}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproduct_of_linear_maps/","tags":null,"title":"Product of Linear Maps"},{"categories":null,"contents":"A product of vector spaces is a vector space formed by putting an element from each space into an element of the vector.\nconstituents Suppose \\(V_1 \\dots V_{m}\\) are vector spaces over the same field \\(\\mathbb{F}\\)\nrequirements Product between \\(V_1 \\dots V_{m}\\) is defined:\n\\begin{equation} V_1 \\times \\dots \\times V_{m} = \\{(v_1, \\dots, v_{m}): v_1 \\in V_1 \\dots v_{m} \\in V_{m}\\} \\end{equation}\n\u0026ldquo;chain an element from each space into another vector\u0026rdquo;\nadditional information operations on Product of Vector Spaces The operations on the product of vector spaces are defined in the usual way.\nAddition: \\((u_1, \\dots, u_{m})+(v_1, \\dots, v_{m}) = (u_1+v_1, \\dots, u_{m}+v_{m})\\)\nScalar multiplication: \\(\\lambda (v_1 \\dots v_{m}) = (\\lambda v_1, \\dots, \\lambda v_{m})\\)\nProduct of Vector Spaces is a vector space The operations defined above inherits closure from their respective vector spaces.\nadditive identity: \\((0, \\dots, 0)\\), taking the zero from each vector space additive inverse: \\((-v_1, \\dots, -v_{m})\\), taking the additive inverse from each vector space scalar multiplicative identity: \\(1\\) operations: commutativity, associativity, distributivity \u0026mdash; inheriting from vector spaces \\(\\blacksquare\\)\ndimension of the Product of Vector Spaces is the sum of the spaces\u0026rsquo; dimension Proof:\nTake each \\(V_{j}\\); construct a list such that, for each basis vector in the basis of \\(V_{j}\\), we have an element of the list such that we have that basis vector in the \\(j^{th}\\) slot and \\(0\\) in all others.\nThis list is linearly independent; and, a linear combination thereof span all of \\(V_1 \\times \\dots \\times V_{m}\\). The length of this is the sum of the number of basis vectors of each space, as desired. \\(\\blacksquare\\)\nproduct summation map See: product summation map\n","html":"\u003cp\u003eA product of vector spaces is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e formed by putting an element from each space into an element of the vector.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003eSuppose \\(V_1 \\dots V_{m}\\) are \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es over the same field \\(\\mathbb{F}\\)\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eProduct\u003c/strong\u003e between \\(V_1 \\dots V_{m}\\) is defined:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV_1 \\times \\dots \\times V_{m} = \\{(v_1, \\dots, v_{m}): v_1 \\in V_1 \\dots v_{m} \\in V_{m}\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;chain an element from each space into another vector\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"operation--kbhoperation-dot-md--s-on-product-of-vector-space--kbhproduct-of-vector-spaces-dot-md--s\"\u003e\u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003es on \u003ca href=\"/posts/kbhproduct_of_vector_spaces/\"\u003eProduct of Vector Space\u003c/a\u003es\u003c/h3\u003e\n\u003cp\u003eThe operations on the product of vector spaces are defined in the usual way.\u003c/p\u003e\n\u003cp\u003eAddition: \\((u_1, \\dots, u_{m})+(v_1, \\dots, v_{m}) = (u_1+v_1, \\dots, u_{m}+v_{m})\\)\u003c/p\u003e\n\u003cp\u003eScalar multiplication: \\(\\lambda (v_1 \\dots v_{m}) = (\\lambda v_1, \\dots, \\lambda v_{m})\\)\u003c/p\u003e\n\u003ch3 id=\"product-of-vector-space--kbhproduct-of-vector-spaces-dot-md--s-is-a-vector-space--kbhvector-space-dot-md\"\u003e\u003ca href=\"/posts/kbhproduct_of_vector_spaces/\"\u003eProduct of Vector Space\u003c/a\u003es is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eThe operations defined above inherits \u003ca href=\"/posts/kbhclosed/\"\u003eclosure\u003c/a\u003e from their respective vector spaces.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e: \\((0, \\dots, 0)\\), taking the \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e from each vector space\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e: \\((-v_1, \\dots, -v_{m})\\), taking the \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e from each \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003escalar multiplicative identity: \\(1\\)\u003c/li\u003e\n\u003cli\u003eoperations: \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e, \u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e, \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e \u0026mdash; inheriting from \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"dimension-of-the-product-of-vector-space--kbhproduct-of-vector-spaces-dot-md--s-is-the-sum-of-the-spaces-dimension\"\u003edimension of the \u003ca href=\"/posts/kbhproduct_of_vector_spaces/\"\u003eProduct of Vector Space\u003c/a\u003es is the sum of the spaces\u0026rsquo; dimension\u003c/h3\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eTake each \\(V_{j}\\); construct a list such that, for each \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e vector in the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V_{j}\\), we have an element of the list such that we have that basis vector in the \\(j^{th}\\) slot and \\(0\\) in all others.\u003c/p\u003e\n\u003cp\u003eThis list is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e; and, a linear combination thereof span all of \\(V_1 \\times \\dots \\times V_{m}\\). The length of this is the sum of the number of \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e vectors of each space, as desired. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"product-summation-map\"\u003eproduct summation map\u003c/h3\u003e\n\u003cp\u003eSee: \u003ca href=\"/posts/kbhproduct_summation_map/\"\u003eproduct summation map\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproduct_of_vector_spaces/","tags":null,"title":"Product of Vector Space"},{"categories":null,"contents":"Let \\(U_1, \\dots, U_{m}\\) be subspaces of \\(V\\); we define a linear\nWe define \\(\\Gamma\\) to be a map \\(U_1 \\times \\dots U_{m} \\to U_1 + \\dots + U_{m}\\) such that:\n\\begin{equation} \\Gamma (u_1, \\dots, u_{m}) = u_1 + \\dots + u_{m} \\end{equation}\nEssentially, \\(\\Gamma\\) is the sum operation of the elements of the tuple made by the Product of Vector Spaces.\n\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\Gamma\\) is injective Proof:\nGiven \\(\\Gamma\\) is injective: Given injectivity, we have that injectivity implies that null space is \\(\\{0\\}\\). Now, because the only way to produce \\(0\\) is to have the input product/tuple be 0, \\(u_1 \\dots u_{m} = 0\\). So, given a sum of subsets is a direct sum IFF there is only one way to write \\(0\\), the sum is a direct sum.\nGiven direct sum: Reverse the logic of above directly. Given its a direct sum, the only way to be in the null space of \\(\\Gamma\\) (i.e. have the sum of the elements of tuple by \\(0\\)) is by taking each \\(u_1 \\dots u_{m}\\) to \\(0\\). Now, injectivity implies that null space is \\(\\{0\\}\\), so \\(\\Gamma\\) is injective. \\(\\blacksquare\\)\nAside: \\(\\Gamma\\) is surjective because product of vector-spaces is simply the pre-combined version of the sum.\nSo a corollary of the above result is that: \\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\Gamma\\) is invertable, because injectivity and surjectivity implies invertability.\n\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\) \\(\\Gamma\\) is surjective for all cases because product of vector-spaces is simply the pre-combined version of the sum.\nSo, by rank-nullity theorem, \\(\\dim (U_1 \\times \\dots U_{m}) = \\dim null\\ \\Gamma + \\dim (U_1 + \\dots + U_{m})\\).\nNow, \\(\\dim null\\ \\Gamma = 0\\) IFF \\(\\dim (U_1 \\times \\dots U_{m}) = 0 + \\dim (U_1 + \\dots + U_{m})\\).\nNow, dimension of the Product of Vector Spaces is the sum of the spaces\u0026rsquo; dimension.\nSo: \\(\\dim null\\ \\Gamma = 0\\) IFF \\(\\dim U_1 + \\dots + \\dim U_{m} = 0 + \\dim (U_1 + \\dots + U_{m})\\).\nNow, \\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\Gamma\\) is injective, and from above \\(\\dim null\\ \\Gamma = 0\\) (that \\(\\Gamma\\) is injective) IFF \\(\\dim U_1 + \\dots + \\dim U_{m} = 0 + \\dim (U_1 + \\dots + U_{m})\\).\nSo, \\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\), as desired. \\(\\blacksquare\\)\n(Note that this proof is built out of a series of IFFs, so it goes in both directions.)\n","html":"\u003cp\u003eLet \\(U_1, \\dots, U_{m}\\) be \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es of \\(V\\); we define a linear\u003c/p\u003e\n\u003cp\u003eWe define \\(\\Gamma\\) to be a map \\(U_1 \\times \\dots U_{m} \\to U_1 + \\dots + U_{m}\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Gamma (u_1, \\dots, u_{m}) = u_1 + \\dots + u_{m}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eEssentially, \\(\\Gamma\\) is the sum operation of the elements of the tuple made by the \u003ca href=\"/posts/kbhproduct_of_vector_spaces/\"\u003eProduct of Vector Space\u003c/a\u003es.\u003c/p\u003e\n\u003ch2 id=\"u-1-plus-dots-plus-u-m-is-a-direct-sum--kbhdirect-sum-dot-md--iff--kbhequivalence-dot-md--gamma-is-injective--kbhinjectivity-dot-md\"\u003e\\(U_1 + \\dots + U_{m}\\) is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e \\(\\Gamma\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eGiven \\(\\Gamma\\) is injective:\nGiven \u003ca href=\"/posts/kbhinjectivity/\"\u003einjectivity\u003c/a\u003e, we have that \u003ca href=\"/posts/kbhinjectivity/#injectivity-implies-that-id-767a441d-4931-4fad-aa8e-c6b001e8b507-null-space-is-0\"\u003einjectivity implies that null space is \\(\\{0\\}\\)\u003c/a\u003e. Now, because the only way to produce \\(0\\) is to have the input product/tuple be 0, \\(u_1 \\dots u_{m} = 0\\). So, given \u003ca href=\"/posts/kbhdirect_sum/#a-id-1b800658-2f83-4802-acfd-2d15cf5a1d74-sum-of-subsets-is-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-id-fddf0648-91ea-4c5b-8298-fa0a30637cb7-iff-there-is-only-one-way-to-write-0\"\u003ea sum of subsets is a direct sum IFF there is only one way to write \\(0\\)\u003c/a\u003e, the sum is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eGiven \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e:\nReverse the logic of above directly. Given its a direct sum, the only way to be in the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(\\Gamma\\) (i.e. have the sum of the elements of tuple by \\(0\\)) is by taking each \\(u_1 \\dots u_{m}\\) to \\(0\\). Now, \u003ca href=\"/posts/kbhinjectivity/#injectivity-implies-that-id-767a441d-4931-4fad-aa8e-c6b001e8b507-null-space-is-0\"\u003einjectivity implies that null space is \\(\\{0\\}\\)\u003c/a\u003e, so \\(\\Gamma\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"aside\"\u003eAside:\u003c/h3\u003e\n\u003cp\u003e\\(\\Gamma\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e because product of vector-spaces is simply the pre-combined version of the sum.\u003c/p\u003e\n\u003cp\u003eSo a corollary of the above result is that: \\(U_1 + \\dots + U_{m}\\) is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e \\(\\Gamma\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e, because \u003ca href=\"/posts/kbhinvertability/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-and-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-implies-id-ff05739c-6e70-46ba-9d56-0958ef847f57-invertability\"\u003einjectivity and surjectivity implies invertability\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"u-1-plus-dots-plus-u-m-is-a-direct-sum--kbhdirect-sum-dot-md--iff-dim--u-1-plus-dots-plus-u-m--dim-u-1-plus-dots-plus-dim-u-m\"\u003e\\(U_1 + \\dots + U_{m}\\) is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\)\u003c/h2\u003e\n\u003cp\u003e\\(\\Gamma\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e for all cases because product of vector-spaces is simply the pre-combined version of the sum.\u003c/p\u003e\n\u003cp\u003eSo, by \u003ca href=\"/posts/kbhfundamental_theorem_of_linear_maps/\"\u003erank-nullity theorem\u003c/a\u003e, \\(\\dim (U_1 \\times \\dots U_{m}) = \\dim null\\ \\Gamma + \\dim (U_1 + \\dots + U_{m})\\).\u003c/p\u003e\n\u003cp\u003eNow, \\(\\dim null\\ \\Gamma = 0\\) \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e \\(\\dim (U_1 \\times \\dots U_{m}) = 0 + \\dim (U_1 + \\dots + U_{m})\\).\u003c/p\u003e\n\u003cp\u003eNow, \u003ca href=\"/posts/kbhproduct_of_vector_spaces/#dimension-of-the-id-a45b05c0-3e01-4c27-bc3b-543ff3606c66-product-of-vector-space-s-is-the-sum-of-the-spaces-dimension\"\u003edimension of the Product of Vector Spaces is the sum of the spaces\u0026rsquo; dimension\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSo: \\(\\dim null\\ \\Gamma = 0\\) \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e \\(\\dim U_1 + \\dots + \\dim U_{m} = 0 + \\dim (U_1 + \\dots + U_{m})\\).\u003c/p\u003e\n\u003cp\u003eNow, \u003ca href=\"#u-1-plus-dots-plus-u-m-is-a-direct-sum--kbhdirect-sum-dot-md--iff--kbhequivalence-dot-md--gamma-is-injective--kbhinjectivity-dot-md\"\u003e\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\Gamma\\) is injective\u003c/a\u003e, and from above \\(\\dim null\\ \\Gamma = 0\\) (that \\(\\Gamma\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e) \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e \\(\\dim U_1 + \\dots + \\dim U_{m} = 0 + \\dim (U_1 + \\dots + U_{m})\\).\u003c/p\u003e\n\u003cp\u003eSo, \\(U_1 + \\dots + U_{m}\\) is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\), as desired. \\(\\blacksquare\\)\u003c/p\u003e\n\u003cp\u003e(Note that this proof is built out of a series of \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003es, so it goes in both directions.)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproduct_summation_map/","tags":null,"title":"product summation map"},{"categories":null,"contents":"This is a work-in-progress page listing all of my production projects.\nFireside: Blog Fireside Index\nYappin: Podcast https://anchor.fm/yappin/\n20MinuteRants: Blog https://medium.com/20minuterants\nProject80: Podcast See Project80.\nNorman Stories: Fiction https://hidonipothan.substack.com/\n(left) Director - Hillview Broadcasting: Production Studio https://hillview.tv/\n","html":"\u003cp\u003eThis is a work-in-progress page listing all of my production projects.\u003c/p\u003e\n\u003ch2 id=\"fireside-blog\"\u003eFireside: Blog\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhfireside/\"\u003eFireside Index\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"yappin-podcast\"\u003eYappin: Podcast\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://anchor.fm/yappin/\"\u003ehttps://anchor.fm/yappin/\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"20minuterants-blog\"\u003e20MinuteRants: Blog\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://medium.com/20minuterants\"\u003ehttps://medium.com/20minuterants\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"project80-podcast\"\u003eProject80: Podcast\u003c/h2\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhproject80/\"\u003eProject80\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"norman-stories-fiction\"\u003eNorman Stories: Fiction\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://hidonipothan.substack.com/\"\u003ehttps://hidonipothan.substack.com/\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"left--director-hillview-broadcasting-production-studio\"\u003e(left) Director - Hillview Broadcasting: Production Studio\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://hillview.tv/\"\u003ehttps://hillview.tv/\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproduction_index/","tags":["index"],"title":"Production Index"},{"categories":null,"contents":"So you wanted to be productive?\nGo do stuff. Stop reading. Get crap done.\n\u0026hellip; \u0026hellip; \u0026hellip;\nWait, you are still here? Well, given that you are sticking around, we might as well discuss some tooling that may help you in organizing your work. By all means I don\u0026rsquo;t think this is a complete list, many of these have intersecting features; think of this as more a survey of the field\u0026mdash;\nTooling, Knowledge Management https://obsidian.md/ https://www.notion.so/ https://evernote.com/ https://logseq.com/ what I use, which is generally not recommended because not very beginner friendly https://www.gnu.org/software/emacs/ + https://www.orgroam.com/ https://www.zotero.org/ https://getdrafts.com/ https://notability.com/ Tooling, Calendaring and Email https://flexibits.com/fantastical https://sparkmailapp.com/ https://www.thunderbird.net/en-US/ Google Calendar (I\u0026rsquo;m not kidding, its very powerful) Tooling, To-Do List https://todoist.com/ https://todo.microsoft.com/ https://www.omnigroup.com/omnifocus/ https://culturedcode.com/things/ Shameless plug: https://www.condution.com/ https://orgmode.org/manual/Agenda-Views.html again, Notion. See above. https://workflowy.com/ https://www.rememberthemilk.com/ Methodologies https://gettingthingsdone.com/ https://marshallgoldsmith.com/book-page-triggers/ https://zettelkasten.de/posts/overview/ https://www.taskade.com/blog/personal-knowledge-management-pkm-guide/ https://docs.google.com/document/d/1PXYTxKmhf8Kb7emJMGPA1qCaPEossCgyE9WmLVSxIwU/edit Media Personalities with Interesting Opinions https://www.relay.fm/cortex https://daringfireball.net/ https://www.relay.fm/connected https://www.macstories.net/ https://www.youtube.com/watch?v=ALaTm6VzTBw ","html":"\u003cp\u003eSo you wanted to be productive?\u003c/p\u003e\n\u003cp\u003eGo do stuff. Stop reading. Get crap done.\u003c/p\u003e\n\u003cp\u003e\u0026hellip;\n\u0026hellip;\n\u0026hellip;\u003c/p\u003e\n\u003cp\u003eWait, you are still here? Well, given that you are sticking around, we might as well discuss some tooling that may help you in organizing your work. By all means I don\u0026rsquo;t think this is a complete list, many of these have intersecting features; think of this as more a survey of the field\u0026mdash;\u003c/p\u003e\n\u003ch2 id=\"tooling-knowledge-management\"\u003eTooling, Knowledge Management\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"https://obsidian.md/\"\u003ehttps://obsidian.md/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.notion.so/\"\u003ehttps://www.notion.so/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://evernote.com/\"\u003ehttps://evernote.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://logseq.com/\"\u003ehttps://logseq.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewhat I use, which is generally not recommended because not very beginner friendly \u003ca href=\"https://www.gnu.org/software/emacs/\"\u003ehttps://www.gnu.org/software/emacs/\u003c/a\u003e + \u003ca href=\"https://www.orgroam.com/\"\u003ehttps://www.orgroam.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.zotero.org/\"\u003ehttps://www.zotero.org/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://getdrafts.com/\"\u003ehttps://getdrafts.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://notability.com/\"\u003ehttps://notability.com/\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"tooling-calendaring-and-email\"\u003eTooling, Calendaring and Email\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"https://flexibits.com/fantastical\"\u003ehttps://flexibits.com/fantastical\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://sparkmailapp.com/\"\u003ehttps://sparkmailapp.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.thunderbird.net/en-US/\"\u003ehttps://www.thunderbird.net/en-US/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eGoogle Calendar (I\u0026rsquo;m not kidding, its very powerful)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"tooling-to-do-list\"\u003eTooling, To-Do List\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"https://todoist.com/\"\u003ehttps://todoist.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://todo.microsoft.com/\"\u003ehttps://todo.microsoft.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.omnigroup.com/omnifocus/\"\u003ehttps://www.omnigroup.com/omnifocus/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://culturedcode.com/things/\"\u003ehttps://culturedcode.com/things/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eShameless plug: \u003ca href=\"https://www.condution.com/\"\u003ehttps://www.condution.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://orgmode.org/manual/Agenda-Views.html\"\u003ehttps://orgmode.org/manual/Agenda-Views.html\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003eagain, Notion. See above.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://workflowy.com/\"\u003ehttps://workflowy.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.rememberthemilk.com/\"\u003ehttps://www.rememberthemilk.com/\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"methodologies\"\u003eMethodologies\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"https://gettingthingsdone.com/\"\u003ehttps://gettingthingsdone.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://marshallgoldsmith.com/book-page-triggers/\"\u003ehttps://marshallgoldsmith.com/book-page-triggers/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://zettelkasten.de/posts/overview/\"\u003ehttps://zettelkasten.de/posts/overview/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.taskade.com/blog/personal-knowledge-management-pkm-guide/\"\u003ehttps://www.taskade.com/blog/personal-knowledge-management-pkm-guide/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://docs.google.com/document/d/1PXYTxKmhf8Kb7emJMGPA1qCaPEossCgyE9WmLVSxIwU/edit\"\u003ehttps://docs.google.com/document/d/1PXYTxKmhf8Kb7emJMGPA1qCaPEossCgyE9WmLVSxIwU/edit\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"media-personalities-with-interesting-opinions\"\u003eMedia Personalities with Interesting Opinions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"https://www.relay.fm/cortex\"\u003ehttps://www.relay.fm/cortex\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://daringfireball.net/\"\u003ehttps://daringfireball.net/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.relay.fm/connected\"\u003ehttps://www.relay.fm/connected\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.macstories.net/\"\u003ehttps://www.macstories.net/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.youtube.com/watch?v=ALaTm6VzTBw\"\u003ehttps://www.youtube.com/watch?v=ALaTm6VzTBw\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproductivity_starter_pack/","tags":null,"title":"Productivity Starter Pack"},{"categories":null,"contents":"We mentioned this in class, and I figured we should write it down.\nSo, if you think about the Product of Vector Space:\n\\begin{equation} \\mathbb{R} \\times \\mathbb{R} \\end{equation}\nyou are essentially taking the \\(x\\) axis straight line and \u0026ldquo;duplicating\u0026rdquo; it along the \\(y\\) axis.\nNow, the opposite of this is the quotient space:\n\\begin{equation} \\mathbb{R}^{2} / \\left\\{\\mqty(a \\\\ 0): a \\in \\mathbb{R} \\right\\} \\end{equation}\nWhere, we are essentially taking the line in the \\(x\\) axis and squish it down, leaving us only the \\(y\\) component freedom to play with (as each element is \\(v +\\left\\{\\mqty(a \\\\ 0): a \\in \\mathbb{R} \\right\\}\\)).\nThis also gets us the result that two affine subsets parallel to \\(U\\) are either equal or disjoint; specifically the conclusion that \\(v-w \\in U \\implies v+U = w+U\\): for our example, only shifting up and down should do different things; if two shifts\u0026rsquo; up-down shift is \\(0\\) (i.e. it drops us back into \\(\\mqty(a \\\\0)\\) land), well then it will not move us anywhere different.\n","html":"\u003cp\u003eWe mentioned this in class, and I figured we should write it down.\u003c/p\u003e\n\u003cp\u003eSo, if you think about the \u003ca href=\"/posts/kbhproduct_of_vector_spaces/\"\u003eProduct of Vector Space\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{R} \\times \\mathbb{R}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou are essentially taking the \\(x\\) axis straight line and \u0026ldquo;duplicating\u0026rdquo; it along the \\(y\\) axis.\u003c/p\u003e\n\u003cp\u003eNow, the opposite of this is the \u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{R}^{2} / \\left\\{\\mqty(a \\\\ 0): a \\in \\mathbb{R} \\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhere, we are essentially taking the line in the \\(x\\) axis and squish it down, leaving us only the \\(y\\) component freedom to play with (as each element is \\(v +\\left\\{\\mqty(a \\\\ 0): a \\in \\mathbb{R} \\right\\}\\)).\u003c/p\u003e\n\u003cp\u003eThis also gets us the result that \u003ca href=\"/posts/kbhparallel_linear_algebra/#two-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-affine-subset-s-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-parallel-to-u-are-either-equal-or-disjoint\"\u003etwo affine subsets parallel to \\(U\\) are either equal or disjoint\u003c/a\u003e; specifically the conclusion that \\(v-w \\in U \\implies v+U = w+U\\): for our example, only shifting up and down should do different things; if two shifts\u0026rsquo; up-down shift is \\(0\\) (i.e. it drops us back into \\(\\mqty(a \\\\0)\\) land), well then it will not move us anywhere different.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproducts_and_quotients_the_intuition/","tags":null,"title":"products and quotients, the intuition"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhprof_xin_liu/","tags":null,"title":"Prof. Xin Liu"},{"categories":null,"contents":"Introduction Recent advances of language models (LMs) introduced the possibility of in-context, few or zero-shot reasoning (Brown et al. 2020) using LMs without much or any fine tuning.\nYet, classically, LM decoding takes place in a left-to-right fashion, auto-regressively resolving one token at a time by sampling from the output distribution of possible next words without multi-step planning.\nWork in LM agents have taken steps to solve more complex problems that would typically require multi-step reasoning even while using this direct decoding approach. The simplest idea, named \u0026ldquo;chain-of-thoughts\u0026rdquo; (CoT), involves forcing the LM at decode time to begin the decoding process with natural language reasoning about its actions (Wei et al. 2022). The method has contributed to the creation of powerful language agents (Yao, Zhao, et al. 2023) that can reason about complex actions.\nDespite the relative success of CoT, the scheme still does not support any kind of backtracking as it samples directly from the LM\u0026rsquo;s posterior distribution. When a problem requires a significantly large number of steps to solve, issues relating to \u0026ldquo;de-generation\u0026rdquo; (Holtzman et al. 2020) becomes increasingly prevalent: whereby, naive maximizations of sequence likelihood results in a most likely sub-phrase being repeated which does not contribute to increased information or progress on a problem.\nRecent theoretical work suggests these types of degeneration arises due to the distortion of output probability density caused by the last-layer softmax projection into the probability simplex (Finlayson et al. 2023): due the lower degrees of freedom offered by a probability syntax, both high and low tails of the latent next-word distribution becomes emphasized in the output probability distribution.\nTo address this, recent approaches such as Tree of Thoughts (ToT) (Yao, Yu, et al. 2023) have separated the process of next-step proposal (\u0026ldquo;thinking\u0026rdquo;) and choosing the actual step to take given a situation (\u0026ldquo;reasoning\u0026rdquo;). This separate allows the representation of a problem through only short decoding sequences that are less prone to degeneration, while allowing a separate LM call to score the value of being at any given partial solution through a maximum-likely single-token output that is less likely to be distorted.\nIn this work, we extend the ToT prompting scheme to formalize this process of \u0026ldquo;thinking\u0026rdquo; and \u0026ldquo;reasoning\u0026rdquo; via a Language Model as a Partially Observable Markov Decision Process (POMDP). We call this decoding scheme the Lookahead Sampler (LS).\nThe key underlying assumption of the proposed LS scheme involves the claim that LMs are able to make judgments about the value of a subsequence towards solving a problem by analyzing the likelihood of a particular sequence against a judgment of value. This assumption is supported by the existence of reinforcement learning formulations of LM-on-LM output verification\u0026mdash;both for reasoning ((Verma et al. 2022)) and hallucination ((Liu et al. 2022))\u0026ndash;as well as the use of LM-inferred state value heuristics in the ToS approach.\nWe leverage this assumption by, similar to ToT, using the LM\u0026rsquo;s evaluation of the likelihood of a sequence (similar to LM \u0026ldquo;scoring\u0026rdquo; of a \u0026ldquo;thought\u0026rdquo; in ToT) as a heuristic for the coherence and reasoning within a subsequence of LM output\u0026mdash;forming a \u0026ldquo;self reflection\u0026rdquo; scheme similar to other LM-scoring schemes previously proposed (Paul et al. 2023; Shinn, Labash, and Gopinath 2023). Yet, differing from ToT, we explicitly formulate this scoring by an LM as an \u0026ldquo;observation\u0026rdquo; of an unobservable underlying latent understanding of the input sequence.\nBy solving the LS problem with the anytime POMCP solver (Silver and Veness 2010), we further demonstrate that LS exhibits stronger anytime characteristics on the Game of 24 task as compared to ToT while maintaining performance that is comparable to ToT and superior to CoT. Lastly, we were able to obtain these results at lower costs to ToT evaluations by using a hybrid language modeling approach by using a larger language model, GPT-4, for posterior sampling and evaluation while using a smaller language model, GPT-3.5-Turbo-Instruct, as the \u0026ldquo;thought\u0026rdquo; generator.\nTree of Thoughts We provide here a short summary of the Tree of Thoughts (ToT) (Yao, Yu, et al. 2023) approach that is relevant to our model. ToT offers a scheme to enable multi-step reasoning with LMs by presenting a decomposition of multi-step LM reasoning into individual steps which is then combined through classic approaches in search and planning.\nSpecifically, ToT represents a given problem as a finite-horizon planning problem which it then solves in four broad steps.\nThought Decomposition: by leveraging problem-specific characteristics, each problem is decomposed into distinct, incremental steps towards a solution. For the \u0026ldquo;Game of 24\u0026rdquo; task, for instance, each \u0026ldquo;thought\u0026rdquo; is considered a line of equation which contributes to the overall task of combining four numbers to reach 24.\nNow, let \\(p_{\\theta}\\) be our language model, \\(s_{j}^{(i)}\\) be thought candidate \\(j\\) of step \\(i\\) of a decomposed problem, \\(s_{*}^{(i)}\\) the optimal thought to continue from at step \\(i\\), \\(\\tau_{ *} = \\qty[s^{(1)}_{ *}, \u0026hellip;, s^{(n)}_{ *}]\\) a \u0026ldquo;solution\u0026rdquo; to a given problem.\nThought Generation: multiple, initial short decodings of a LM\u0026mdash;sampling from \\(s\u0026rsquo; \\sim p_{\\theta}^{thought}\\qty(s^{(i+1)} | s^{(i)})\\) is obtained which forms a series of next states (\u0026ldquo;thoughts\u0026rdquo;) which encodes a partial step towards the solution which is reachable at any given state.\nThought Evaluation: another LM call rates each of the possible next states for their chance in reaching the solution; specifically, we ask the LM to reason about a given state by calculating the posterior probabilities of predicting a specific judgement of value (the words \u0026ldquo;sure\u0026rdquo;\u0026ldquo;likely\u0026rdquo;\u0026ldquo;impossible\u0026rdquo;) given that state; that is: \\(V(s_{j}) = \\arg\\max_{o} \\{p^{value}_\\theta(o|s_{j}), o \\in \\{ sure, likely, impossible\\}, \\forall j \\in 1 \u0026hellip; n\\).\nProblem Solving: finally, given this heuristic, solving a specific problem in ToT involves using a search-and-planning scheme (specifically, DFS or BFS) to cycle between generation and evaluation of thoughts until a terminal thought is reached. Branches on the DFS tree is pruned if they are voted as \u0026ldquo;impossible\u0026rdquo;.\nBy combining explicit planning and LM reasoning, this approach achieved state-of-the-art results on the Game of 24 and other difficult natural-language tasks such as a crossword. However, the ToT approach does not incorporate any form of heuristic-guided preferential planning between different \u0026ldquo;possible\u0026rdquo; states\u0026mdash;in contrast to dynamic approaches which preferentially explore sequences of high probability of success.\nMethods Problem Formulation Our work formalizes and augments the stepwise decoding scheme proposed by ToT as a Partially Observable Markov Decision Process (POMDP) (Kaelbling, Littman, and Cassandra 1998). A POMDP is a search and planning formulation which emphasizes the uncertain nature of intermediate steps by formalizing each problem into a tuple \\((\\mathcal{S}, \\mathcal{A}, \\mathcal{O}, \\mathcal{T}, \\mathcal{R}, \\gamma, s_0)\\).\nWe specifically define our problem formulation as follows:\n\\(\\mathcal{S} = S \\times U\\), where \\(s \\in S\\) is each sub-step of our decomposed problem and \\(u \\in U\\) representing the unmeasurable, true underlying value being estimated by \\(V(s)\\) in ToT representing the usefulness of a particular thought \\(\\mathcal{A} = [a_0, a_1, a_2]\\), a discrete set of possible problem-solving actions\u0026mdash;to \u0026ldquo;continue\u0026rdquo; expanding a particular branch \\(a_0\\), to \u0026ldquo;rollback\u0026rdquo; to previous branch \\(a_1\\), or to \u0026ldquo;think\u0026rdquo; a new thought at the current branch \\(a_2\\). \\(\\mathcal{O} \\in S \\times U\\), exactly the same as \\(\\mathcal{S}\\), but instead of the unobservable underlying value of a given \\(s\\) we obtain \\(V(s)\\) instead from the language model by asking the language model for its judgement regarding a state; importantly, because the observations are simply a sample on the distribution, we can directly use \\(V(s_{j}) \\sim p^{value}_\\theta(o|s_{j}), o \\in \\{ sure, likely, impossible\\}, \\forall j \\in 1 \u0026hellip; n\\) \\(\\mathcal{T}\\) is given deterministically given the state and action\u0026mdash;\u0026ldquo;continue\u0026rdquo; appends the current state to the solution trajectory, yielding a new subproblem, and calculates a new thought; \u0026ldquo;rollback\u0026rdquo; pops the last item in the solution trajectory back into the current state and reverts to the previous subproblem; \u0026ldquo;think\u0026rdquo; reformulates a new state given the current subproblem \\(\\mathcal{R}\\) is given by a language model evaluator given a final trajectory, where: \\(r_{\\max}\\) is given if the LM believes a trajectory successfully solves the problem, and \\(r_{\\min}\\) is given if the LM believes a trajectory failed to solve the problem Lastly, for this problem, we set discount \\(\\gamma\\) to \\(1\\) to maximize joint reward, and \\(s_0\\) would be the initial, unsolved problem.\nModified POMCP To solve the formalization given above in an actual problem, we chose the POMCP solver (Silver and Veness 2010). This solver is chosen for three primary reasons.\nFirst, by only needing actions and observation sequences as input, the solver requires no explicit distribution on observation and transitions, meaning that simply making concrete samples from the language model posterior is enough to take advantage of its distributional nature.\nSecond, the POMCP solver has excellent anytime performance characteristics; the search tree for possible solutions will prioritize most possible solutions as rated by intermediate value, but can expand to (at the worst case) an exhaustive search of all possible intermediate states. In particular, easier problems will have stronger heuristic signals, which typically will take less time to solve; this means that a cutoff could be specified by the user to control the speed/accuracy trade-off in solving a given problems.\nSpecifically, a POMCP solver collects a tree based on sequences of actions and their resulting observations \\(h = \\{a_1, o_1, \u0026hellip;\\}\\). When planning for a specific action, the scheme samples a series of possible next states from a generative model given your current state and action \\(s\u0026rsquo; \\sim T(\\cdot | s,a)\\) and calculates reward \\(R(s,a)\\) from current state.\nOnce this procedure grows the tree to a certain depth, a point-wise value estimate is calculated from a roll-out.\nFor this specific problem, we modify the typical \u0026ldquo;rollout\u0026rdquo; rollout procedure by essentially performing CoT reasoning with a weighted average of the possible rewards in obtained in the end:\n\\begin{algorithm} \\caption{obtain a value estimate at some leaf state $s_{f}$}\\label{alg:cap} \\begin{algorithmic} \\Ensure $d \u0026gt; f$ \\State $s = s_{f}$ \\State $L \\gets d-n$ \\Comment{Depth Remaning in Rollout} \\State $\\tau \\gets \\{s_0, \\dots, s_{f}\\}$ \\While{$L \\neq 0$} \\State $\\tau \\gets \\tau \\cup \\left\\{\\arg\\max_{s\u0026rsquo;} \\qty(p^{thought}_{\\theta}(s\u0026rsquo;|s))\\right\\}$ \\State $s = s\u0026rsquo;$ \\State $L \\gets L-1$ \\EndWhile \\State $V = \\frac{R_{\\max} \\cdot p_{\\theta}^{evaluate}(\\tau^{*}|\\tau)+R_{\\min} \\cdot p_{\\theta}^{evaluate}(\\neg\\tau^{*}|\\tau)}{R_{\\max}+R_{\\min}}$\\Comment{LM-Posterior Weighted Average of Possible Reward} \\State \\Return $V$ \\end{algorithmic} \\end{algorithm}\nwhere, \\(p_{\\theta}^{thought}\\) is the \u0026ldquo;thought\u0026rdquo; generation prompt previously discussed, and \\(p_{\\theta}^{evaluate}\\) is the evaluation prompt to check if a particular trajectory truly solves the target task which is also used in reward calculation. Recall that \\(\\tau^{*}\\) represents a trajectory which answers the given question correctly.\nAs CoT is a reasonably performant reasoning procedure that is relatively lightweight to compute, we believe it would serve to raise the lower bound of possible values, and therefore aid the speed of solution in POMCP.\nTask Setup Similar to ToT, we are going to use the Game of 24 as a difficult multi-step reasoning task with which to test the scheme proposed.\nThe Game of 24 is a mathematical reasoning game, which uses four numbers and basic arithmetic operations to obtain a value of 24. For instance, for the problem of 4 9 10 13, a solution trajectory may look as follows:\n\\(s_0\\): subproblem: 4 9 10 13 \\(s_1\\): \\(13-9=4\\), subproblem: 4 4 10 \\(s_2\\): \\(10-6 = 6\\), subproblem: 4 6 \\(s_3\\): \\(4 \\cdot 6\\), subproblem: 24 which concludes the solution.\nData Source: in order to maintain comparability to ToT, we leverage the exact dataset curated by Yao et. al. scraped from 4nums.com as our problem set as well. Importantly, the data is sorted by rate of difficulty (as measured by weighted average time for solution)\nBenchmark: the \u0026ldquo;success rate\u0026rdquo; metric reported involves the success rate across 100 games, corresponding to the metric reported by ToT. Additionally, we also report time-to-solve metrics as measured by the time between the initialization of an empty POMCP tree to obtaining a proposed solution from the scheme.\nLanguage Modeling: distinct from ToT, to perform language model, we use two separate language models. \\(p_{\\theta}^{evaluate}\\) and \\(p_{\\theta}^{value}\\) (for \\(\\mathcal{R}\\) and \\(\\mathcal{O}\\) respectively) were computed using GPT-4-Turbo (1106), and \\(p_{\\theta}^{thought}\\) was computed using GPT-3.5-Turbo-Instruct (0914). This hybrid approach allows for single-token only inference on the larger GPT-4 models, affording dramatic performance improvements.\nSolving: we performed language model inference through the OpenAI Azure Cognitive Services API and used the POMDPs.jl, BasicPOMCP.jl Julia packages for the orchestration of the solver.\nResults Method Success CoT 4.0% ToT (b=1) 45% ToT (b=5) 74% LS (ours) TODO [plot of dificulty vs time]\nAs shown in [the table], we have [results]. Specifically, we have [these results, which are hopefull ygood]\u0026mdash;far exceeding results from CoT (Wei et al. 2022) and are compatible with the approach in ToT.\nFurthermore, Figure [figure] shows the anytime nature of the proposed solver. As problem difficulty (as rated by solution time-weighted percentage of 4nums.com users\u0026rsquo; solutions) increases, the time it requires for our solver to identify the correct answer increases as well.\nConclusion In this work, we propose Lookahead Sampler (LS), a novel language model decoding scheme that extends ToS (Yao, Yu, et al. 2023) which leverages a large language model\u0026rsquo;s self-reflective reasoning capabilities (Paul et al. 2023; Shinn, Labash, and Gopinath 2023) to guide multi-hop reasoning about a problem.\nWe formalize our approach through the POMDP framework (Kaelbling, Littman, and Cassandra 1998), and demonstrate comparable performance of our approach on the Game of 24 problem to ToT through using the online POMCP (Silver and Veness 2010) solver. Because of the anytime behavior of POMCP, we are able to demonstrate anytime scaling properties of our solver\u0026rsquo;s behavior: more difficult problems takes longer and more LM inferences to solve. Taken together, these properties makes LS a more flexible approach to solving basic multi-step reasoning tasks as compared to previous approaches\u0026mdash;allowing for contemporary LMs to solve more complex problems.\nIn its current form, this work has two key limitations. First, similar to that proposed by ToT, the approach is still significantly more computationally expensive than CoT or other direct decoding approaches; therefore, these search techniques is likely unnecessary for problems which can be solved with high accuracy using simpler techniques. Second, posterior distributions\u0026mdash;even when taking only top-k samples for extremely small k\u0026mdash;are still meaningful only on billion-parameter models if used without additional fine tuning (Hu and Levy 2023): making the heuristic-driven performance improvements of LS limited in scope. With additional fine-tuning of surrogate value models, LS could likely perform dramatically more efficiently while obtaining its positive characteristics in solution quality.\nBrown, Tom B., Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, et al. 2020. “Language Models Are Few-Shot Learners.” arXiv. http://arxiv.org/abs/2005.14165. Finlayson, Matthew, John Hewitt, Alexander Koller, Swabha Swayamdipta, and Ashish Sabharwal. 2023. “Closing the Curious Case of Neural Text Degeneration.” arXiv. http://arxiv.org/abs/2310.01693. Holtzman, Ari, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. 2020. “The Curious Case of Neural Text Degeneration.” arXiv. http://arxiv.org/abs/1904.09751. Hu, Jennifer, and Roger P Levy. 2023. “Prompting Is Not a Substitute for Probability Measurements in Large Language Models.” In The 2023 Conference on Empirical Methods in Natural Language Processing. Kaelbling, Leslie Pack, Michael L. Littman, and Anthony R. Cassandra. 1998. “Planning and Acting in Partially Observable Stochastic Domains.” Artificial Intelligence 101 (1): 99–134. doi:10.1016/S0004-3702(98)00023-X. Liu, Tianyu, Yizhe Zhang, Chris Brockett, Yi Mao, Zhifang Sui, Weizhu Chen, and Bill Dolan. 2022. “A Token-Level Reference-Free Hallucination Detection Benchmark for Free-Form Text Generation.” In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 6723–37. Dublin, Ireland: Association for Computational Linguistics. doi:10.18653/v1/2022.acl-long.464. Paul, Debjit, Mete Ismayilzada, Maxime Peyrard, Beatriz Borges, Antoine Bosselut, Robert West, and Boi Faltings. 2023. “Refiner: Reasoning Feedback on Intermediate Representations.” arXiv Preprint arXiv:2304.01904. Shinn, Noah, Beck Labash, and Ashwin Gopinath. 2023. “Reflexion: An Autonomous Agent with Dynamic Memory and Self-Reflection.” arXiv Preprint arXiv:2303.11366. Silver, David, and Joel Veness. 2010. “Monte-Carlo Planning in Large POMDPs.” Advances in Neural Information Processing Systems 23. Verma, Siddharth, Justin Fu, Mengjiao Yang, and Sergey Levine. 2022. “CHAI: A CHatbot AI for Task-Oriented Dialogue with Offline Reinforcement Learning.” arXiv. http://arxiv.org/abs/2204.08426. Wei, Jason, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H Chi, Quoc V Le, and Denny Zhou. 2022. “Chain-of-Thought Prompting Elicits Reasoning in Large Language Models.” Yao, Shunyu, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. 2023. “Tree of Thoughts: Deliberate Problem Solving with Large Language Models.” arXiv. http://arxiv.org/abs/2305.10601. Yao, Shunyu, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. 2023. “ReAct: Synergizing Reasoning and Acting in Language Models.” arXiv. http://arxiv.org/abs/2210.03629. ","html":"\u003ch2 id=\"introduction\"\u003eIntroduction\u003c/h2\u003e\n\u003cp\u003eRecent advances of language models (LMs) introduced the possibility of in-context, few or zero-shot reasoning (\u003ca href=\"#citeproc_bib_item_1\"\u003eBrown et al. 2020\u003c/a\u003e) using LMs without much or any fine tuning.\u003c/p\u003e\n\u003cp\u003eYet, classically, LM decoding takes place in a left-to-right fashion, auto-regressively resolving one token at a time by sampling from the output distribution of possible next words without multi-step planning.\u003c/p\u003e\n\u003cp\u003eWork in LM agents have taken steps to solve more complex problems that would typically require multi-step reasoning even while using this direct decoding approach. The simplest idea, named \u0026ldquo;chain-of-thoughts\u0026rdquo; (CoT), involves forcing the LM at decode time to begin the decoding process with natural language reasoning about its actions (\u003ca href=\"#citeproc_bib_item_11\"\u003eWei et al. 2022\u003c/a\u003e). The method has contributed to the creation of powerful language agents (\u003ca href=\"#citeproc_bib_item_13\"\u003eYao, Zhao, et al. 2023\u003c/a\u003e) that can reason about complex actions.\u003c/p\u003e\n\u003cp\u003eDespite the relative success of CoT, the scheme still does not support any kind of backtracking as it samples directly from the LM\u0026rsquo;s posterior distribution. When a problem requires a significantly large number of steps to solve, issues relating to \u0026ldquo;de-generation\u0026rdquo; (\u003ca href=\"#citeproc_bib_item_3\"\u003eHoltzman et al. 2020\u003c/a\u003e) becomes increasingly prevalent: whereby, naive maximizations of sequence likelihood results in a most likely sub-phrase being repeated which does not contribute to increased information or progress on a problem.\u003c/p\u003e\n\u003cp\u003eRecent theoretical work suggests these types of degeneration arises due to the distortion of output probability density caused by the last-layer softmax projection into the probability simplex (\u003ca href=\"#citeproc_bib_item_2\"\u003eFinlayson et al. 2023\u003c/a\u003e): due the lower degrees of freedom offered by a probability syntax, both high and low tails of the latent next-word distribution becomes emphasized in the output probability distribution.\u003c/p\u003e\n\u003cp\u003eTo address this, recent approaches such as Tree of Thoughts (ToT) (\u003ca href=\"#citeproc_bib_item_12\"\u003eYao, Yu, et al. 2023\u003c/a\u003e) have separated the process of next-step proposal (\u0026ldquo;thinking\u0026rdquo;) and choosing the actual step to take given a situation (\u0026ldquo;reasoning\u0026rdquo;). This separate allows the representation of a problem through only short decoding sequences that are less prone to degeneration, while allowing a separate LM call to score the value of being at any given partial solution through a maximum-likely single-token output that is less likely to be distorted.\u003c/p\u003e\n\u003cp\u003eIn this work, we extend the ToT prompting scheme to formalize this process of \u0026ldquo;thinking\u0026rdquo; and \u0026ldquo;reasoning\u0026rdquo; via a Language Model as a Partially Observable Markov Decision Process (POMDP). We call this decoding scheme the Lookahead Sampler (LS).\u003c/p\u003e\n\u003cp\u003eThe key underlying assumption of the proposed LS scheme involves the claim that LMs are able to make judgments about the value of a subsequence towards solving a problem by analyzing the likelihood of a particular sequence against a judgment of value. This assumption is supported by the existence of reinforcement learning formulations of LM-on-LM output verification\u0026mdash;both for reasoning ((\u003ca href=\"#citeproc_bib_item_10\"\u003eVerma et al. 2022\u003c/a\u003e)) and hallucination ((\u003ca href=\"#citeproc_bib_item_6\"\u003eLiu et al. 2022\u003c/a\u003e))\u0026ndash;as well as the use of LM-inferred state value heuristics in the ToS approach.\u003c/p\u003e\n\u003cp\u003eWe leverage this assumption by, similar to ToT, using the LM\u0026rsquo;s evaluation of the likelihood of a sequence (similar to LM \u0026ldquo;scoring\u0026rdquo; of a \u0026ldquo;thought\u0026rdquo; in ToT) as a heuristic for the coherence and reasoning within a subsequence of LM output\u0026mdash;forming a \u0026ldquo;self reflection\u0026rdquo; scheme similar to other LM-scoring schemes previously proposed (\u003ca href=\"#citeproc_bib_item_7\"\u003ePaul et al. 2023\u003c/a\u003e; \u003ca href=\"#citeproc_bib_item_8\"\u003eShinn, Labash, and Gopinath 2023\u003c/a\u003e). Yet, differing from ToT, we explicitly formulate this scoring by an LM as an \u0026ldquo;observation\u0026rdquo; of an unobservable underlying latent understanding of the input sequence.\u003c/p\u003e\n\u003cp\u003eBy solving the LS problem with the anytime POMCP solver (\u003ca href=\"#citeproc_bib_item_9\"\u003eSilver and Veness 2010\u003c/a\u003e), we further demonstrate that LS exhibits stronger anytime characteristics on the Game of 24 task as compared to ToT while maintaining performance that is comparable to ToT and superior to CoT. Lastly, we were able to obtain these results at lower costs to ToT evaluations by using a hybrid language modeling approach by using a larger language model, GPT-4, for posterior sampling and evaluation while using a smaller language model, GPT-3.5-Turbo-Instruct, as the \u0026ldquo;thought\u0026rdquo; generator.\u003c/p\u003e\n\u003ch2 id=\"tree-of-thoughts\"\u003eTree of Thoughts\u003c/h2\u003e\n\u003cp\u003eWe provide here a short summary of the Tree of Thoughts (ToT) (\u003ca href=\"#citeproc_bib_item_12\"\u003eYao, Yu, et al. 2023\u003c/a\u003e) approach that is relevant to our model. ToT offers a scheme to enable multi-step reasoning with LMs by presenting a decomposition of multi-step LM reasoning into individual steps which is then combined through classic approaches in search and planning.\u003c/p\u003e\n\u003cp\u003eSpecifically, ToT represents a given problem as a finite-horizon planning problem which it then solves in four broad steps.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eThought Decomposition\u003c/strong\u003e: by leveraging problem-specific characteristics, each problem is decomposed into distinct, incremental steps towards a solution. For the \u0026ldquo;Game of 24\u0026rdquo; task, for instance, each \u0026ldquo;thought\u0026rdquo; is considered a line of equation which contributes to the overall task of combining four numbers to reach 24.\u003c/p\u003e\n\u003cp\u003eNow, let \\(p_{\\theta}\\) be our language model, \\(s_{j}^{(i)}\\) be thought candidate \\(j\\) of step \\(i\\) of a decomposed problem, \\(s_{*}^{(i)}\\) the optimal thought to continue from at step \\(i\\), \\(\\tau_{ *} = \\qty[s^{(1)}_{ *}, \u0026hellip;, s^{(n)}_{ *}]\\) a \u0026ldquo;solution\u0026rdquo; to a given problem.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eThought Generation\u003c/strong\u003e: multiple, initial short decodings of a LM\u0026mdash;sampling from \\(s\u0026rsquo; \\sim p_{\\theta}^{thought}\\qty(s^{(i+1)} | s^{(i)})\\) is obtained which forms a series of next states (\u0026ldquo;thoughts\u0026rdquo;) which encodes a partial step towards the solution which is reachable at any given state.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eThought Evaluation\u003c/strong\u003e: another LM call rates each of the possible next states for their chance in reaching the solution; specifically, we ask the LM to reason about a given state by calculating the posterior probabilities of predicting a specific judgement of value (the words \u0026ldquo;sure\u0026rdquo;\u003cem\u003e\u0026ldquo;likely\u0026rdquo;\u003c/em\u003e\u0026ldquo;impossible\u0026rdquo;) given that state; that is: \\(V(s_{j}) = \\arg\\max_{o} \\{p^{value}_\\theta(o|s_{j}), o \\in \\{ sure, likely, impossible\\}, \\forall j \\in 1 \u0026hellip; n\\).\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eProblem Solving\u003c/strong\u003e: finally, given this heuristic, solving a specific problem in ToT involves using a search-and-planning scheme (specifically, DFS or BFS) to cycle between generation and evaluation of thoughts until a terminal thought is reached. Branches on the DFS tree is pruned if they are voted as \u0026ldquo;impossible\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eBy combining explicit planning and LM reasoning, this approach achieved state-of-the-art results on the Game of 24 and other difficult natural-language tasks such as a crossword. However, the ToT approach does not incorporate any form of heuristic-guided preferential planning between different \u0026ldquo;possible\u0026rdquo; states\u0026mdash;in contrast to dynamic approaches which preferentially explore sequences of high probability of success.\u003c/p\u003e\n\u003ch2 id=\"methods\"\u003eMethods\u003c/h2\u003e\n\u003ch3 id=\"problem-formulation\"\u003eProblem Formulation\u003c/h3\u003e\n\u003cp\u003eOur work formalizes and augments the stepwise decoding scheme proposed by ToT as a Partially Observable Markov Decision Process (POMDP) (\u003ca href=\"#citeproc_bib_item_5\"\u003eKaelbling, Littman, and Cassandra 1998\u003c/a\u003e). A POMDP is a search and planning formulation which emphasizes the uncertain nature of intermediate steps by formalizing each problem into a tuple \\((\\mathcal{S}, \\mathcal{A}, \\mathcal{O}, \\mathcal{T}, \\mathcal{R}, \\gamma, s_0)\\).\u003c/p\u003e\n\u003cp\u003eWe specifically define our problem formulation as follows:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\mathcal{S} = S \\times U\\), where \\(s \\in S\\) is each sub-step of our decomposed problem and \\(u \\in U\\) representing the unmeasurable, true underlying value being estimated by \\(V(s)\\) in ToT representing the usefulness of a particular thought\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{A} = [a_0, a_1, a_2]\\), a discrete set of possible problem-solving actions\u0026mdash;to \u0026ldquo;continue\u0026rdquo; expanding a particular branch \\(a_0\\), to \u0026ldquo;rollback\u0026rdquo; to previous branch \\(a_1\\), or to \u0026ldquo;think\u0026rdquo; a new thought at the current branch \\(a_2\\).\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{O} \\in S \\times U\\), exactly the same as \\(\\mathcal{S}\\), but instead of the unobservable underlying value of a given \\(s\\) we obtain \\(V(s)\\) instead from the language model by asking the language model for its judgement regarding a state; importantly, because the observations are simply a sample on the distribution, we can directly use \\(V(s_{j}) \\sim p^{value}_\\theta(o|s_{j}), o \\in \\{ sure, likely, impossible\\}, \\forall j \\in 1 \u0026hellip; n\\)\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{T}\\) is given deterministically given the state and action\u0026mdash;\u0026ldquo;continue\u0026rdquo; appends the current state to the solution trajectory, yielding a new subproblem, and calculates a new thought; \u0026ldquo;rollback\u0026rdquo; pops the last item in the solution trajectory back into the current state and reverts to the previous subproblem; \u0026ldquo;think\u0026rdquo; reformulates a new state given the current subproblem\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{R}\\) is given by a language model evaluator given a final trajectory, where: \\(r_{\\max}\\) is given if the LM believes a trajectory successfully solves the problem, and \\(r_{\\min}\\) is given if the LM believes a trajectory failed to solve the problem\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eLastly, for this problem, we set discount \\(\\gamma\\) to \\(1\\) to maximize joint reward, and \\(s_0\\) would be the initial, unsolved problem.\u003c/p\u003e\n\u003ch3 id=\"modified-pomcp\"\u003eModified POMCP\u003c/h3\u003e\n\u003cp\u003eTo solve the formalization given above in an actual problem, we chose the POMCP solver (\u003ca href=\"#citeproc_bib_item_9\"\u003eSilver and Veness 2010\u003c/a\u003e). This solver is chosen for three primary reasons.\u003c/p\u003e\n\u003cp\u003eFirst, by only needing actions and observation sequences as input, the solver requires no explicit distribution on observation and transitions, meaning that simply making concrete samples from the language model posterior is enough to take advantage of its distributional nature.\u003c/p\u003e\n\u003cp\u003eSecond, the POMCP solver has excellent anytime performance characteristics; the search tree for possible solutions will prioritize most possible solutions as rated by intermediate value, but can expand to (at the worst case) an exhaustive search of all possible intermediate states. In particular, easier problems will have stronger heuristic signals, which typically will take less time to solve; this means that a cutoff could be specified by the user to control the speed/accuracy trade-off in solving a given problems.\u003c/p\u003e\n\u003cp\u003eSpecifically, a POMCP solver collects a tree based on sequences of actions and their resulting observations \\(h = \\{a_1, o_1, \u0026hellip;\\}\\). When planning for a specific action, the scheme samples a series of possible next states from a generative model given your current state and action \\(s\u0026rsquo; \\sim T(\\cdot | s,a)\\) and calculates reward \\(R(s,a)\\) from current state.\u003c/p\u003e\n\u003cp\u003eOnce this procedure grows the tree to a certain depth, a point-wise value estimate is calculated from a roll-out.\u003c/p\u003e\n\u003cp\u003eFor this specific problem, we modify the typical \u0026ldquo;rollout\u0026rdquo; rollout procedure by essentially performing CoT reasoning with a weighted average of the possible rewards in obtained in the end:\u003c/p\u003e\n\u003cp\u003e\\begin{algorithm}\n\\caption{obtain a value estimate at some leaf state $s_{f}$}\\label{alg:cap}\n\\begin{algorithmic}\n\\Ensure $d \u0026gt; f$\n\\State $s = s_{f}$\n\\State $L \\gets d-n$ \\Comment{Depth Remaning in Rollout}\n\\State $\\tau \\gets \\{s_0, \\dots, s_{f}\\}$\n\\While{$L \\neq 0$}\n\\State $\\tau \\gets \\tau \\cup \\left\\{\\arg\\max_{s\u0026rsquo;} \\qty(p^{thought}_{\\theta}(s\u0026rsquo;|s))\\right\\}$\n\\State $s = s\u0026rsquo;$\n\\State $L \\gets L-1$\n\\EndWhile\n\\State $V = \\frac{R_{\\max} \\cdot p_{\\theta}^{evaluate}(\\tau^{*}|\\tau)+R_{\\min} \\cdot p_{\\theta}^{evaluate}(\\neg\\tau^{*}|\\tau)}{R_{\\max}+R_{\\min}}$\\Comment{LM-Posterior Weighted Average of Possible Reward}\n\\State \\Return $V$\n\\end{algorithmic}\n\\end{algorithm}\u003c/p\u003e\n\u003cp\u003ewhere, \\(p_{\\theta}^{thought}\\) is the \u0026ldquo;thought\u0026rdquo; generation prompt previously discussed, and \\(p_{\\theta}^{evaluate}\\) is the evaluation prompt to check if a particular trajectory truly solves the target task which is also used in reward calculation. Recall that \\(\\tau^{*}\\) represents a trajectory which answers the given question correctly.\u003c/p\u003e\n\u003cp\u003eAs CoT is a reasonably performant reasoning procedure that is relatively lightweight to compute, we believe it would serve to \u003cem\u003eraise\u003c/em\u003e the lower bound of possible values, and therefore aid the speed of solution in POMCP.\u003c/p\u003e\n\u003ch3 id=\"task-setup\"\u003eTask Setup\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-13_22-38-50_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSimilar to ToT, we are going to use the Game of 24 as a difficult multi-step reasoning task with which to test the scheme proposed.\u003c/p\u003e\n\u003cp\u003eThe Game of 24 is a mathematical reasoning game, which uses four numbers and basic arithmetic operations to obtain a value of 24. For instance, for the problem of \u003ccode\u003e4 9 10 13\u003c/code\u003e, a solution trajectory may look as follows:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(s_0\\): subproblem: 4 9 10 13\u003c/li\u003e\n\u003cli\u003e\\(s_1\\): \\(13-9=4\\), subproblem: 4 4 10\u003c/li\u003e\n\u003cli\u003e\\(s_2\\): \\(10-6 = 6\\), subproblem: 4 6\u003c/li\u003e\n\u003cli\u003e\\(s_3\\): \\(4 \\cdot 6\\), subproblem: 24\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewhich concludes the solution.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eData Source\u003c/strong\u003e: in order to maintain comparability to ToT, we leverage the exact dataset curated by Yao et. al. scraped from 4nums.com as our problem set as well. Importantly, the data is sorted by rate of difficulty (as measured by weighted average time for solution)\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eBenchmark\u003c/strong\u003e: the \u0026ldquo;success rate\u0026rdquo; metric reported involves the success rate across 100 games, corresponding to the metric reported by ToT. Additionally, we also report time-to-solve metrics as measured by the time between the initialization of an empty POMCP tree to obtaining a proposed solution from the scheme.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eLanguage Modeling\u003c/strong\u003e: distinct from ToT, to perform language model, we use \u003cem\u003etwo separate language models\u003c/em\u003e. \\(p_{\\theta}^{evaluate}\\) and \\(p_{\\theta}^{value}\\) (for \\(\\mathcal{R}\\) and \\(\\mathcal{O}\\) respectively) were computed using GPT-4-Turbo (1106), and \\(p_{\\theta}^{thought}\\) was computed using GPT-3.5-Turbo-Instruct (0914). This hybrid approach allows for single-token only inference on the larger GPT-4 models, affording dramatic performance improvements.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eSolving\u003c/strong\u003e: we performed language model inference through the OpenAI Azure Cognitive Services API and used the POMDPs.jl, BasicPOMCP.jl Julia packages for the orchestration of the solver.\u003c/p\u003e\n\u003ch3 id=\"results\"\u003eResults\u003c/h3\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eMethod\u003c/th\u003e\n\u003cth\u003eSuccess\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eCoT\u003c/td\u003e\n\u003ctd\u003e4.0%\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eToT (b=1)\u003c/td\u003e\n\u003ctd\u003e45%\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eToT (b=5)\u003c/td\u003e\n\u003ctd\u003e74%\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eLS (ours)\u003c/td\u003e\n\u003ctd\u003eTODO\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e[plot of dificulty vs time]\u003c/p\u003e\n\u003cp\u003eAs shown in [the table], we have [results]. Specifically, we have [these results, which are hopefull ygood]\u0026mdash;far exceeding results from CoT (\u003ca href=\"#citeproc_bib_item_11\"\u003eWei et al. 2022\u003c/a\u003e) and are compatible with the approach in ToT.\u003c/p\u003e\n\u003cp\u003eFurthermore, Figure [figure] shows the anytime nature of the proposed solver. As problem difficulty (as rated by solution time-weighted percentage of 4nums.com users\u0026rsquo; solutions) increases, the time it requires for our solver to identify the correct answer increases as well.\u003c/p\u003e\n\u003ch2 id=\"conclusion\"\u003eConclusion\u003c/h2\u003e\n\u003cp\u003eIn this work, we propose Lookahead Sampler (LS), a novel language model decoding scheme that extends ToS (\u003ca href=\"#citeproc_bib_item_12\"\u003eYao, Yu, et al. 2023\u003c/a\u003e) which leverages a large language model\u0026rsquo;s self-reflective reasoning capabilities (\u003ca href=\"#citeproc_bib_item_7\"\u003ePaul et al. 2023\u003c/a\u003e; \u003ca href=\"#citeproc_bib_item_8\"\u003eShinn, Labash, and Gopinath 2023\u003c/a\u003e) to guide multi-hop reasoning about a problem.\u003c/p\u003e\n\u003cp\u003eWe formalize our approach through the POMDP framework (\u003ca href=\"#citeproc_bib_item_5\"\u003eKaelbling, Littman, and Cassandra 1998\u003c/a\u003e), and demonstrate comparable performance of our approach on the Game of 24 problem to ToT through using the online POMCP (\u003ca href=\"#citeproc_bib_item_9\"\u003eSilver and Veness 2010\u003c/a\u003e) solver. Because of the anytime behavior of POMCP, we are able to demonstrate anytime scaling properties of our solver\u0026rsquo;s behavior: more difficult problems takes longer and more LM inferences to solve. Taken together, these properties makes LS a more flexible approach to solving basic multi-step reasoning tasks as compared to previous approaches\u0026mdash;allowing for contemporary LMs to solve more complex problems.\u003c/p\u003e\n\u003cp\u003eIn its current form, this work has two key limitations. First, similar to that proposed by ToT, the approach is still \u003cem\u003esignificantly\u003c/em\u003e more computationally expensive than CoT or other direct decoding approaches; therefore, these search techniques is likely unnecessary for problems which can be solved with high accuracy using simpler techniques. Second, posterior distributions\u0026mdash;even when taking only top-k samples for extremely small k\u0026mdash;are still meaningful only on billion-parameter models if used without additional fine tuning (\u003ca href=\"#citeproc_bib_item_4\"\u003eHu and Levy 2023\u003c/a\u003e): making the heuristic-driven performance improvements of LS limited in scope. With additional fine-tuning of surrogate value models, LS could likely perform dramatically more efficiently while obtaining its positive characteristics in solution quality.\u003c/p\u003e\n\u003cstyle\u003e.csl-entry{text-indent: -1.5em; margin-left: 1.5em;}\u003c/style\u003e\u003cdiv class=\"csl-bib-body\"\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_1\"\u003e\u003c/a\u003eBrown, Tom B., Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, et al. 2020. “Language Models Are Few-Shot Learners.” arXiv. \u003ca href=\"http://arxiv.org/abs/2005.14165\"\u003ehttp://arxiv.org/abs/2005.14165\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_2\"\u003e\u003c/a\u003eFinlayson, Matthew, John Hewitt, Alexander Koller, Swabha Swayamdipta, and Ashish Sabharwal. 2023. “Closing the Curious Case of Neural Text Degeneration.” arXiv. \u003ca href=\"http://arxiv.org/abs/2310.01693\"\u003ehttp://arxiv.org/abs/2310.01693\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_3\"\u003e\u003c/a\u003eHoltzman, Ari, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. 2020. “The Curious Case of Neural Text Degeneration.” arXiv. \u003ca href=\"http://arxiv.org/abs/1904.09751\"\u003ehttp://arxiv.org/abs/1904.09751\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_4\"\u003e\u003c/a\u003eHu, Jennifer, and Roger P Levy. 2023. “Prompting Is Not a Substitute for Probability Measurements in Large Language Models.” In \u003ci\u003eThe 2023 Conference on Empirical Methods in Natural Language Processing\u003c/i\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_5\"\u003e\u003c/a\u003eKaelbling, Leslie Pack, Michael L. Littman, and Anthony R. Cassandra. 1998. “Planning and Acting in Partially Observable Stochastic Domains.” \u003ci\u003eArtificial Intelligence\u003c/i\u003e 101 (1): 99–134. doi:\u003ca href=\"https://doi.org/10.1016/S0004-3702(98)00023-X\"\u003e10.1016/S0004-3702(98)00023-X\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_6\"\u003e\u003c/a\u003eLiu, Tianyu, Yizhe Zhang, Chris Brockett, Yi Mao, Zhifang Sui, Weizhu Chen, and Bill Dolan. 2022. “A Token-Level Reference-Free Hallucination Detection Benchmark for Free-Form Text Generation.” In \u003ci\u003eProceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\u003c/i\u003e, 6723–37. Dublin, Ireland: Association for Computational Linguistics. doi:\u003ca href=\"https://doi.org/10.18653/v1/2022.acl-long.464\"\u003e10.18653/v1/2022.acl-long.464\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_7\"\u003e\u003c/a\u003ePaul, Debjit, Mete Ismayilzada, Maxime Peyrard, Beatriz Borges, Antoine Bosselut, Robert West, and Boi Faltings. 2023. “Refiner: Reasoning Feedback on Intermediate Representations.” \u003ci\u003earXiv Preprint arXiv:2304.01904\u003c/i\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_8\"\u003e\u003c/a\u003eShinn, Noah, Beck Labash, and Ashwin Gopinath. 2023. “Reflexion: An Autonomous Agent with Dynamic Memory and Self-Reflection.” \u003ci\u003earXiv Preprint arXiv:2303.11366\u003c/i\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_9\"\u003e\u003c/a\u003eSilver, David, and Joel Veness. 2010. “Monte-Carlo Planning in Large POMDPs.” \u003ci\u003eAdvances in Neural Information Processing Systems\u003c/i\u003e 23.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_10\"\u003e\u003c/a\u003eVerma, Siddharth, Justin Fu, Mengjiao Yang, and Sergey Levine. 2022. “CHAI: A CHatbot AI for Task-Oriented Dialogue with Offline Reinforcement Learning.” arXiv. \u003ca href=\"http://arxiv.org/abs/2204.08426\"\u003ehttp://arxiv.org/abs/2204.08426\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_11\"\u003e\u003c/a\u003eWei, Jason, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H Chi, Quoc V Le, and Denny Zhou. 2022. “Chain-of-Thought Prompting Elicits Reasoning in Large Language Models.”\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_12\"\u003e\u003c/a\u003eYao, Shunyu, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. 2023. “Tree of Thoughts: Deliberate Problem Solving with Large Language Models.” arXiv. \u003ca href=\"http://arxiv.org/abs/2305.10601\"\u003ehttp://arxiv.org/abs/2305.10601\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_13\"\u003e\u003c/a\u003eYao, Shunyu, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. 2023. “ReAct: Synergizing Reasoning and Acting in Language Models.” arXiv. \u003ca href=\"http://arxiv.org/abs/2210.03629\"\u003ehttp://arxiv.org/abs/2210.03629\u003c/a\u003e.\u003c/div\u003e\n\u003c/div\u003e\n","permalink":"https://www.jemoka.com/posts/kbhloop_of_thoughts/","tags":null,"title":"Project Proposal: Lookahead Sampler"},{"categories":null,"contents":"Project80 is a podcast hosted by Houjun Liu, Anoushka Krishnan, Micah Brown, Mia Tavares, among others.\nCollege Application w.r.t. Project80 Cheese mission statement: Project80 is a good way of creating a self-propegating set of learning that would serve to benefit and educate future generations in hopes of creating a more equitable planet.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhproject80/\"\u003eProject80\u003c/a\u003e is a podcast hosted by \u003ca href=\"/posts/kbhhoujun_liu/\"\u003eHoujun Liu\u003c/a\u003e, \u003ca href=\"/posts/kbhanoushka_krishnan/\"\u003eAnoushka Krishnan\u003c/a\u003e, \u003ca href=\"/posts/kbhmicah_brown/\"\u003eMicah Brown\u003c/a\u003e, \u003ca href=\"/posts/kbhmia_tavares/\"\u003eMia Tavares\u003c/a\u003e, among others.\u003c/p\u003e\n\u003ch2 id=\"college-application--kbhcollege-application-dot-md--w-dot-r-dot-t-dot-project80--kbhproject80-dot-md\"\u003e\u003ca href=\"/posts/kbhcollege_application/\"\u003eCollege Application\u003c/a\u003e w.r.t. \u003ca href=\"/posts/kbhproject80/\"\u003eProject80\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eCheese mission statement: \u003ca href=\"/posts/kbhproject80/\"\u003eProject80\u003c/a\u003e is a good way of creating a self-propegating set of learning that would serve to benefit and educate future generations in hopes of creating a more equitable planet.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproject80/","tags":null,"title":"Project80"},{"categories":null,"contents":"Natural science education resources traditionally teach only codified theory. While theory education is crucial, much of academic science takes place via scrutinizing contested scientific discourse. Due to such resources’ content complexity, high school students are rarely exposed to current, debatable, and relevant science. In response, we introduce Project80: a systemic, student-run protocol to synthesize the latest primary literature in a sub-field into approachable, produced multimedia educational content. The protocol is run by a team of 7 students over the course of 1 month. Students running the protocol consume complex scientific literature, distill relevant data and findings, and synthesize a culminating product of audiovisual content to supplement existing biology and chemistry pedagogy. The system runs independently with limited faculty involvement. Our analysis indicates that the multimedia content created by this protocol will be relevant to roughly 30 courses locally at our institution and will have further extensions in secondary education beyond.\n","html":"\u003cp\u003eNatural science education resources traditionally teach only codified theory. While theory education is crucial, much of academic science takes place via scrutinizing contested scientific discourse. Due to such resources’ content complexity, high school students are rarely exposed to current, debatable, and relevant science. In response, we introduce Project80: a systemic, student-run protocol to synthesize the latest primary literature in a sub-field into approachable, produced multimedia educational content. The protocol is run by a team of 7 students over the course of 1 month. Students running the protocol consume complex scientific literature, distill relevant data and findings, and synthesize a culminating product of audiovisual content to supplement existing biology and chemistry pedagogy. The system runs independently with limited faculty involvement. Our analysis indicates that the multimedia content created by this protocol will be relevant to roughly 30 courses locally at our institution and will have further extensions in secondary education beyond.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproject80_abstract/","tags":null,"title":"Project80 Abstract"},{"categories":null,"contents":"Projects Index is a index that contains a list of almost all projects for which I have ever worked on. Major categories are highlighted from chapter titles.\nResearch Projects I end up doing a lot of research these days, and so have isolated that to a different, academic homepage.\nFor a list of my recent research, please head to the Research Index.\nMedia Production Projects I produce a lot of media (videos, podcasts, blogs, live events/talks) as a part of publicizing my work or for other purposes. For those types of projects, head on over to Production Index.\nLarge-Scale Endeavors Condution An open-source task management app. Website.\nMotivation: I got really tired with most other to-do apps after swapping them out over and over again, until I got fed up and built one with some friends.\nRole: Co-Founder, Lead Developer. Technologies: React, Ionic, Firebase, Typescript, Swift, PostgreSQL Key facts: 10,000+ users, 8-person team, featured in the Bay Area almanac, praised by Asana’s head of developer relations for “open-source advocacy” MODAP A R\u0026amp;D team for fireline safety during emergency fires. Repository.\nMotivation: a friend approached me with an opportunity to help our local community, especially with the increased influx of fires.\nRole: Team Lead Technologies: Rust, Torch, ARM, electronics (i2C, UART, messaging protocols, etc.) Key facts: coordinated 5 engineers in developing new technology, supported by Dr. Robert G. Gann, Deputy Director, Center of Excellence for Advanced Technology Aerial Firefighting at the state of Colorado as well as Captain Mason of CalFire CMU batchalign A pipeline for the automated preparation of annotated CHAT transcripts from raw audio. Repository.\nMotivation: my work over the summer.\nRole: Author Technologies: Torch, Huggingface, NLTK, CLAN, computational linguistics Key facts: work developed with and maintained under Prof. Brian MacWhinney at CMU\u0026rsquo;s psycolinguistics department. AIBridge A bootcamp for non-CS students in data science. Website\nMotivation:\nRole: Co-Founder, Lecturer Technologies: Python, ScyPy, Scikit-learn, Pandas Key facts: worked with Prof. Xin Liu at UC Davis to develop an introductary one-week bootcamp in ML. We piloted the program this summer at Davis to an in-person group of 20 PhD students in food science sponsored by AIFS. Full-Stack Projects Simon Augmenting the functionality of large-language-models with Elastic. Repository.\nMotivation: LLMs have become more and more prominent, and frameworks like ReAct is finally mature enough to produce coherent, well reasoned responses.\nRole: Author Technologies: Huggingface, GPT-3.5, ElasticSearch tractotato CommonLisp macroset for time tracking. Repo.\nMotivation: I wanted to learn CommonLisp macros syntax after reading the Land of Lisp book.\nRole: author Technologies: CommonLisp Scratchathon Portal Portal to submit projects for a scratch hackathon I hosted. Repo.\nMotivation: my friends McGuy and fuelvin, both content creators on Scratch on YouTube, put together a Scratch hackathon summer of 2020. This is the submission portal.\nRole: author Technologies: React, Vercel, Firebase syzygy Library rethinking to-do list dating to be more flexible and powerful. Repo.\nMotivation: a friend and I wanted to innovate beyond the scope of Condution to see how we can abstract away a to-do list system to its bare minimum.\nRole: co-founder, co-author Technologies: Rust positron Library for building lightweight native apps using web tech. Repo.\nMotivation: I wanted to re-make electron to be more lightweight using Suckless\u0026rsquo; Surf browser concept.\nRole: author Technologies: C++, GTK OS/Driver Development Broadcom Wifi/Bluetooth 4377 Chip Linux Driver A driver patchset to support cutting-edge Broadcom 4377 Wifi/Bluetooth chips. Repo.\nMotivation: I needed to be able to use Wifi on my laptop while running Arch Linux.\nRole: author Technologies: C, (small amounts of) Assembly Key facts: integrated into the t2linux pipeline used to make WiFi possible on Linux for most MacBooks released after 2018 Distributed Algorithms and Parallel Computing coveather An encrypted, anonymized system for protected health information verification. Preprint, Repo, and internal note.\nMotivation: I wanted to be able to make vaccine passports more feasible because the current COVID testing/vaccine verification scheme is really bad.\nRole: author Technologies: Clojure, core.async concurrency, Monte-Carlo simulations, blockchain, PGP Key facts: project won first place at the California STEM Fair, and got special recognition from the Yale Science and Engineering assoc. Total award $3000. multischedule A multiple-asynchronous scheduling and delegation algorithm. Repo.\nMotivation: (didn\u0026rsquo;t even come close to getting there) I wanted to create a way to solve or simplify debugging loop overrun problems in robotics codebases.\nRole: author Technologies: Clojure, core.async concurrency rotifer A work-in-progress distributed algorithm for taproot. Repo.\nMotivation: I wanted to make taproot even more distributed if possible.\nRole: author Technologies: Clojure, XML, UDP, ICE simian Exploring OT/CRDT and collaborative text editing for taproot. Repo.\nMotivation: I wanted to learn about how apps like Google Docs work, and explore Operational Transformation/CRDT, in hopes of putting it into taproot.\nRole: author Technologies: Clojure, OT, CRDT aron A distributed multi-dimensional optimization tool. Repo.\nMotivation: Nueva\u0026rsquo;s course scheduling was quite a mess, and I wanted to help. It is a very complex problem and this project is in the freezer at the moment.\nRole: author Technologies: CommonLisp mitte Easy UDP sockets. Repo, Docs.\nMotivation: a friend and I wanted to explore UDP.\nRole: co-author Technologies: Rust, UDP, ICE (connection) Cryptography and security See also: coveather.\njrainbow An implementation of a MD5 rainbow table. Repo, Crate.\nMotivation: I wanted to understand how Rainbow Tables worked.\nRole: author Technologies: Rust, MD5 Note-taking Systems and \\(\\LaTeX\\) improvements taproot A shared zettlekasten of notes and learning resources put together by some friends and I. there has been a few iterations. Current Repo, Current Site, Legacy Site, Even More Legacy Site.\nMotivation: I started writing nice \\(\\LaTeX\\) PDFs of my homework, and some friends wanted to have access to it. Later when I mentioned it, another friend had a similar need; so we asked many people to pool our notes and work together to share.\nRole: co-founder, co-lead, developer Technologies: Next.JS, XeLaTeX, GNU Make, Firn, Hugo, Emacs Org, Org-Publish, Markdown blag The zettlekasten you are currently in! My currently maintained personal knowledgebase. Repo, Site.\nMotivation: I wanted to experiment with more advanced note-taking techniques after developing taproot, and it ended up superseeding the note-taking abilities of taproot.\nRole: author Technologies: Next.js, Emacs Org, Hugo gdoc.el A utility to enable GNU Emacs to edit Google Doc documents based on the gdrive utility. Repo.\nMotivation: I wanted to edit Google Docs in Emacs!\nRole: author Technologies: GNU Emacs, elisp interesting Things that my friends and I find interesting, chucked on the web and builds itself. Repo, Site. No longer maintained.\nMotivation: many text channels were too clogged with stuff my friend group found interesting, so I wanted to take initiative to collect them.\nRole: co-founder, author Technologies: Next.js, Vercel, remark, CommonMark Markdown Public Configurations borg Automatically configure terminals. Repo.\nMotivation: I needed a way to copy my system terminal config onto a system quickly.\nRole: author Technologies: Bash, Zsh, OhMyZsh .config A group of sane configuration files. Repo.\nMotivation: some Redditors asked for my Config, and I thought I\u0026rsquo;d share it to benefit the community; also for personal backup.\nRole: author, maintainer Technologies: Unix administration, Perl, Ruby, LISP .emacs.d Simple, powerful, and semantic GNU Emacs configuration for personal use. Repo.\nMotivation: I wanted to track my progress in developing a working Emacs config.\nRole: author, maintainer Technologies: GNU Emacs, elisp ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhprojects/\"\u003eProjects Index\u003c/a\u003e is a \u003ca href=\"/posts/kbhzettlekasten_index/\"\u003eindex\u003c/a\u003e that contains a list of almost all projects for which I have ever worked on. Major categories are highlighted from chapter titles.\u003c/p\u003e\n\u003ch2 id=\"research-projects\"\u003eResearch Projects\u003c/h2\u003e\n\u003cp\u003eI end up doing a lot of research these days, and so have isolated that to a different, academic homepage.\u003c/p\u003e\n\u003cp\u003eFor a list of my recent research, please head to the \u003ca href=\"/posts/kbhresearch_index/\"\u003eResearch Index\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"media-production-projects\"\u003eMedia Production Projects\u003c/h2\u003e\n\u003cp\u003eI produce a lot of media (videos, podcasts, blogs, live events/talks) as a part of publicizing my work or for other purposes. For those types of projects, head on over to \u003ca href=\"/posts/kbhproduction_index/\"\u003eProduction Index\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"large-scale-endeavors\"\u003eLarge-Scale Endeavors\u003c/h2\u003e\n\u003ch3 id=\"condution\"\u003eCondution\u003c/h3\u003e\n\u003cp\u003eAn open-source task management app. \u003ca href=\"https://www.condution.com/\"\u003eWebsite\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I got really tired with most other to-do apps after swapping them out over and over again, until I got fed up and built one with some friends.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: Co-Founder, Lead Developer.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: React, Ionic, Firebase, Typescript, Swift, PostgreSQL\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eKey facts\u003c/strong\u003e\u003c/strong\u003e: 10,000+ users, 8-person team, \u003ca href=\"https://www.almanacnews.com/print/story/2021/02/26/community-briefs\"\u003efeatured\u003c/a\u003e in the Bay Area almanac, praised by Asana’s head of developer relations for “open-source advocacy”\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"modap\"\u003eMODAP\u003c/h3\u003e\n\u003cp\u003eA R\u0026amp;D team for fireline safety during emergency fires. \u003ca href=\"https://github.com/MODAP/stack\"\u003eRepository\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: a friend approached me with an opportunity to help our local community, especially with the increased influx of fires.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: Team Lead\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Rust, Torch, ARM, electronics (i2C, UART, messaging protocols, etc.)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eKey facts\u003c/strong\u003e\u003c/strong\u003e: coordinated 5 engineers in developing new technology, supported by Dr. Robert G. Gann, Deputy Director, Center of Excellence for Advanced Technology Aerial Firefighting at the state of Colorado as well as Captain Mason of CalFire\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"cmu-batchalign--kbhbatchalign-dot-md\"\u003eCMU \u003ca href=\"/posts/kbhbatchalign/\"\u003ebatchalign\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eA pipeline for the automated preparation of annotated CHAT transcripts from raw audio. \u003ca href=\"https://github.com/talkbank/batchalign\"\u003eRepository\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: my work over the summer.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: Author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Torch, Huggingface, NLTK, CLAN, computational linguistics\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eKey facts\u003c/strong\u003e\u003c/strong\u003e: work developed with and maintained under Prof. Brian MacWhinney at CMU\u0026rsquo;s psycolinguistics department.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"aibridge\"\u003eAIBridge\u003c/h3\u003e\n\u003cp\u003eA bootcamp for non-CS students in data science. \u003ca href=\"/posts/kbhaibridge_course_website/\"\u003eWebsite\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: Co-Founder, Lecturer\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Python, ScyPy, Scikit-learn, Pandas\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eKey facts\u003c/strong\u003e\u003c/strong\u003e: worked with Prof. Xin Liu at UC Davis to develop an introductary one-week bootcamp in ML. We piloted the program this summer at Davis to an in-person group of 20 PhD students in food science sponsored by \u003ca href=\"/posts/kbhaifs/\"\u003eAIFS\u003c/a\u003e.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"full-stack-projects\"\u003eFull-Stack Projects\u003c/h2\u003e\n\u003ch3 id=\"simon\"\u003eSimon\u003c/h3\u003e\n\u003cp\u003eAugmenting the functionality of large-language-models with Elastic. \u003ca href=\"https://github.com/shabang-systems/simon\"\u003eRepository\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: LLMs have become more and more prominent, and frameworks like ReAct is finally mature enough to produce coherent, well reasoned responses.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: Author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Huggingface, GPT-3.5, ElasticSearch\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"tractotato\"\u003etractotato\u003c/h3\u003e\n\u003cp\u003eCommonLisp macroset for time tracking. \u003ca href=\"https://github.com/Jemoka/tractotato\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I wanted to learn CommonLisp macros syntax after reading the \u003ca href=\"http://landoflisp.com/\"\u003eLand of Lisp\u003c/a\u003e book.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: CommonLisp\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"scratchathon-portal\"\u003eScratchathon Portal\u003c/h3\u003e\n\u003cp\u003ePortal to submit projects for a scratch hackathon I hosted. \u003ca href=\"https://github.com/Jemoka/ScratchathonPortal\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: my friends \u003ca href=\"https://www.youtube.com/channel/UC2MtlTiLxWNQAjHyFZt95Vw\"\u003eMcGuy\u003c/a\u003e and \u003ca href=\"https://www.youtube.com/watch?v=1Fll6uaz5Kk\"\u003efuelvin\u003c/a\u003e, both content creators on Scratch on YouTube, put together a Scratch hackathon summer of 2020. This is the submission portal.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: React, Vercel, Firebase\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"syzygy\"\u003esyzygy\u003c/h3\u003e\n\u003cp\u003eLibrary rethinking to-do list dating to be more flexible and powerful. \u003ca href=\"https://github.com/jklsnt/syzygy\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: a friend and I wanted to innovate beyond the scope of Condution to see how we can abstract away a to-do list system to its bare minimum.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: co-founder, co-author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Rust\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"positron\"\u003epositron\u003c/h3\u003e\n\u003cp\u003eLibrary for building lightweight native apps using web tech. \u003ca href=\"https://github.com/jklsnt/positron\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I wanted to re-make electron to be more lightweight using Suckless\u0026rsquo; Surf browser concept.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: C++, GTK\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"os-driver-development\"\u003eOS/Driver Development\u003c/h2\u003e\n\u003ch3 id=\"broadcom-wifi-bluetooth-4377-chip-linux-driver\"\u003eBroadcom Wifi/Bluetooth 4377 Chip Linux Driver\u003c/h3\u003e\n\u003cp\u003eA driver patchset to support cutting-edge Broadcom 4377 Wifi/Bluetooth chips. \u003ca href=\"https://github.com/Jemoka/linux-mbp-wifi\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I needed to be able to use Wifi on my laptop while running Arch Linux.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: C, (small amounts of) Assembly\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eKey facts\u003c/strong\u003e\u003c/strong\u003e: integrated into the \u003ca href=\"https://wiki.t2linux.org/\"\u003et2linux\u003c/a\u003e pipeline used to make WiFi possible on Linux for most MacBooks released after 2018\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"distributed-algorithms-and-parallel-computing\"\u003eDistributed Algorithms and Parallel Computing\u003c/h2\u003e\n\u003ch3 id=\"coveather\"\u003ecoveather\u003c/h3\u003e\n\u003cp\u003eAn encrypted, anonymized system for protected health information verification. \u003ca href=\"https://arxiv.org/abs/2205.02753\"\u003ePreprint\u003c/a\u003e, \u003ca href=\"https://github.com/Jemoka/coveather\"\u003eRepo\u003c/a\u003e, and \u003ca href=\"/posts/kbhcoveather/\"\u003einternal note\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I wanted to be able to make vaccine passports more feasible because the current COVID testing/vaccine verification scheme is really bad.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Clojure, \u003ccode\u003ecore.async\u003c/code\u003e concurrency, Monte-Carlo simulations, blockchain, PGP\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eKey facts\u003c/strong\u003e\u003c/strong\u003e: project won first place at the California STEM Fair, and got special recognition from the Yale Science and Engineering assoc. Total award $3000.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"multischedule\"\u003emultischedule\u003c/h3\u003e\n\u003cp\u003eA multiple-asynchronous scheduling and delegation algorithm. \u003ca href=\"https://github.com/Jemoka/multischedule\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: (didn\u0026rsquo;t even come close to getting there) I wanted to create a way to solve or simplify debugging loop overrun problems in robotics codebases.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Clojure, \u003ccode\u003ecore.async\u003c/code\u003e concurrency\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"rotifer\"\u003erotifer\u003c/h3\u003e\n\u003cp\u003eA work-in-progress distributed algorithm for \u003ca href=\"#taproot\"\u003etaproot\u003c/a\u003e. \u003ca href=\"https://github.com/jklsnt/rotifer\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I wanted to make taproot even more distributed if possible.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Clojure, XML, UDP, ICE\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"simian\"\u003esimian\u003c/h3\u003e\n\u003cp\u003eExploring OT/CRDT and collaborative text editing for taproot. \u003ca href=\"https://github.com/jklsnt/simian\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I wanted to learn about how apps like Google Docs work, and explore Operational Transformation/CRDT, in hopes of putting it into \u003ca href=\"#taproot\"\u003etaproot\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Clojure, OT, CRDT\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"aron\"\u003earon\u003c/h3\u003e\n\u003cp\u003eA distributed multi-dimensional optimization tool. \u003ca href=\"https://github.com/Jemoka/aron\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: \u003ca href=\"/posts/kbhnueva_courses_index/\"\u003eNueva\u003c/a\u003e\u0026rsquo;s course scheduling was quite a mess, and I wanted to help. It is a very complex problem and this project is in the freezer at the moment.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: CommonLisp\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"mitte\"\u003emitte\u003c/h3\u003e\n\u003cp\u003eEasy UDP sockets. \u003ca href=\"https://github.com/jklsnt/mitte\"\u003eRepo\u003c/a\u003e, \u003ca href=\"https://jklsnt.github.io/mitte/mitte/\"\u003eDocs\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: a friend and I wanted to explore UDP.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: co-author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Rust, UDP, ICE (connection)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"cryptography-and-security\"\u003eCryptography and security\u003c/h2\u003e\n\u003cp\u003eSee also: \u003ca href=\"#coveather\"\u003ecoveather\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"jrainbow\"\u003ejrainbow\u003c/h3\u003e\n\u003cp\u003eAn implementation of a MD5 rainbow table. \u003ca href=\"https://github.com/Jemoka/rainbow\"\u003eRepo\u003c/a\u003e, \u003ca href=\"https://crates.io/crates/jrainbow\"\u003eCrate\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I wanted to understand how Rainbow Tables worked.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Rust, MD5\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"note-taking-systems-and-latex-improvements\"\u003eNote-taking Systems and \\(\\LaTeX\\) improvements\u003c/h2\u003e\n\u003ch3 id=\"taproot\"\u003etaproot\u003c/h3\u003e\n\u003cp\u003eA shared \u003ca href=\"/posts/kbhzettlekasten/\"\u003ezettlekasten\u003c/a\u003e of notes and learning resources put together by some friends and I. there has been a few iterations. \u003ca href=\"https://github.com/jklsnt/taproot3\"\u003eCurrent Repo\u003c/a\u003e, \u003ca href=\"https://taproot3.jklsnt.com/\"\u003eCurrent Site\u003c/a\u003e, \u003ca href=\"https://taproot2.shabang.cf/\"\u003eLegacy Site\u003c/a\u003e, \u003ca href=\"https://taproot.shabang.cf/\"\u003eEven More Legacy Site\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I started writing nice \\(\\LaTeX\\) PDFs of my homework, and some friends wanted to have access to it. Later when I mentioned it, another friend had a similar need; so we asked many people to pool our notes and work together to share.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: co-founder, co-lead, developer\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Next.JS, XeLaTeX, GNU Make, Firn, Hugo, Emacs Org, Org-Publish, Markdown\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"blag\"\u003eblag\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhzettlekasten/\"\u003ezettlekasten\u003c/a\u003e you are currently in! My currently maintained personal knowledgebase. \u003ca href=\"https://github.com/jemoka/blag\"\u003eRepo\u003c/a\u003e, \u003ca href=\"https://www.jemoka.com/\"\u003eSite\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I wanted to experiment with more advanced note-taking techniques after developing taproot, and it ended up superseeding the note-taking abilities of taproot.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Next.js, Emacs Org, Hugo\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"gdoc-dot-el\"\u003egdoc.el\u003c/h3\u003e\n\u003cp\u003eA utility to enable GNU Emacs to edit Google Doc documents based on the \u003ccode\u003egdrive\u003c/code\u003e utility. \u003ca href=\"https://github.com/Jemoka/gdoc.el\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I wanted to edit Google Docs in Emacs!\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: GNU Emacs, elisp\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"interesting\"\u003einteresting\u003c/h3\u003e\n\u003cp\u003eThings that my friends and I find interesting, chucked on the web and builds itself. \u003ca href=\"https://github.com/Jemoka/interesting\"\u003eRepo\u003c/a\u003e, \u003ca href=\"https://interesting-blue.vercel.app/\"\u003eSite\u003c/a\u003e. No longer maintained.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: many text channels were too clogged with stuff my friend group found interesting, so I wanted to take initiative to collect them.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: co-founder, author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Next.js, Vercel, remark, CommonMark Markdown\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"public-configurations\"\u003ePublic Configurations\u003c/h2\u003e\n\u003ch3 id=\"borg\"\u003eborg\u003c/h3\u003e\n\u003cp\u003eAutomatically configure terminals. \u003ca href=\"https://github.com/Jemoka/Borg\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I needed a way to copy my system terminal config onto a system quickly.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Bash, Zsh, OhMyZsh\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"dot-config\"\u003e.config\u003c/h3\u003e\n\u003cp\u003eA group of sane configuration files. \u003ca href=\"https://github.com/Jemoka/.config\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: some Redditors asked for my Config, and I thought I\u0026rsquo;d share it to benefit the community; also for personal backup.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author, maintainer\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Unix administration, Perl, Ruby, LISP\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"dot-emacs-dot-d\"\u003e.emacs.d\u003c/h3\u003e\n\u003cp\u003eSimple, powerful, and semantic GNU Emacs configuration for personal use. \u003ca href=\"https://github.com/Jemoka/.emacs.d\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I wanted to track my progress in developing a working Emacs config.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author, maintainer\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: GNU Emacs, elisp\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprojects/","tags":["index"],"title":"Projects Index"},{"categories":null,"contents":"a type of cell\n","html":"\u003cp\u003ea type of \u003ca href=\"/posts/kbhcell/\"\u003ecell\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprokateotic_cell/","tags":null,"title":"prokateotic cell"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhproof/","tags":null,"title":"proof"},{"categories":null,"contents":"A proof structure that uses induction.\nbase case Prove some base case \\(n_0\\)\ninductive step Prove that, given \\(n\\), \\(n_{j} \\implies n_{j+1}\\).\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhproof/\"\u003eproof\u003c/a\u003e structure that uses \u003ca href=\"/posts/kbhprinciple_of_induction/\"\u003einduction\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"base-case\"\u003ebase case\u003c/h2\u003e\n\u003cp\u003eProve some base case \\(n_0\\)\u003c/p\u003e\n\u003ch2 id=\"inductive-step\"\u003einductive step\u003c/h2\u003e\n\u003cp\u003eProve that, given \\(n\\), \\(n_{j} \\implies n_{j+1}\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproof_by_induction/","tags":null,"title":"proof by induction"},{"categories":null,"contents":"Based on the wise words of a crab, I will start writing down some Proof Design Patterns I saw over Axler.\ninheriting properties (splitting, doing, merging) \u0026ldquo;complex numbers inherit commutativity via real numbers\u0026rdquo;\nconstruct then generalize for uniqueness and existence\ntry to remember to go backwards\nto prove IFF\nzero is cool, and here too!, also \\(1-1=0\\)\n\\(0v = 0\\) \\(1-1 = 0\\) \\(v-v=0\\) a.k.a. \\(v+(-v)=0\\) \\(v+0 = v\\) distributivity is epic: it is essentially the only tool to connect scalar multiplication and addition in a vector space\n\u0026ldquo;smallest\u0026rdquo; double containement proofs to show set equivalence: prove one way, then prove the converse (\\(a \\subset b, b\\subset a \\Rightarrow a=b\\))\ncouple hints\nstep 1: identify hypothesis (assumptions) desired conclusion (results, trying/to/proof) step 2: define write down precise, mathematical notations ","html":"\u003cp\u003eBased on the wise words of a crab, I will start writing down some \u003ca href=\"/posts/kbhproof_design_patterns/\"\u003eProof Design Patterns\u003c/a\u003e I saw over \u003ca href=\"/posts/kbhlinear_algebra_index/\"\u003eAxler\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcomplex_number/#insights-combining-and-splitting\"\u003einheriting properties (splitting, doing, merging)\u003c/a\u003e \u0026ldquo;complex numbers inherit \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e via \u003ca href=\"/posts/kbhreal_number/\"\u003ereal number\u003c/a\u003es\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcomplex_number/#insights-construct-then-generalize\"\u003econstruct then generalize\u003c/a\u003e for uniqueness and existence\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcomplex_number/#insights-try-to-remember-to-go-backwards\"\u003etry to remember to go backwards\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhequivalence/\"\u003eto prove IFF\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhadditive_inverse_is_unique_in_a_vector_space/\"\u003ezero is cool\u003c/a\u003e, \u003ca href=\"/posts/kbhzero_times_vector/\"\u003eand here too!\u003c/a\u003e, also \\(1-1=0\\)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(0v = 0\\)\u003c/li\u003e\n\u003cli\u003e\\(1-1 = 0\\)\u003c/li\u003e\n\u003cli\u003e\\(v-v=0\\) a.k.a. \\(v+(-v)=0\\)\u003c/li\u003e\n\u003cli\u003e\\(v+0 = v\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e is epic: it is essentially the only tool to connect scalar multiplication and addition in a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhsum_of_subsets/#sum-of-subspaces-is-the-smallest-subspace-with-both-subspaces\"\u003e\u0026ldquo;smallest\u0026rdquo; double containement proofs\u003c/a\u003e to show set \u003ca href=\"/posts/kbhequivalence/\"\u003eequivalence\u003c/a\u003e: prove one way, then prove the converse (\\(a \\subset b, b\\subset a \\Rightarrow a=b\\))\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecouple hints\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003estep 1: identify\n\u003cul\u003e\n\u003cli\u003ehypothesis (assumptions)\u003c/li\u003e\n\u003cli\u003edesired conclusion (results, trying/to/proof)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003estep 2: define\n\u003cul\u003e\n\u003cli\u003ewrite down precise, mathematical notations\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproof_design_patterns-1/","tags":null,"title":"Proof Design Patterns"},{"categories":null,"contents":"Based on the wise words of a crab, I will start writing down some Proof Design Patterns I saw over Axler.\ninheriting properties (splitting, doing, merging) \u0026ldquo;complex numbers inherit commutativity via real numbers\u0026rdquo;\nconstruct then generalize for uniqueness and existence\ntry to remember to go backwards\nto prove IFF\nzero is cool, and here too!, also \\(1-1=0\\)\n\\(0v = 0\\) \\(1-1 = 0\\) \\(v-v=0\\) a.k.a. \\(v+(-v)=0\\) \\(v+0 = v\\) distributivity is epic: it is essentially the only tool to connect scalar multiplication and addition in a vector space\n\u0026ldquo;smallest\u0026rdquo; double containement proofs to show set equivalence: prove one way, then prove the converse (\\(a \\subset b, b\\subset a \\Rightarrow a=b\\))\ncouple hints\nstep 1: identify hypothesis (assumptions) desired conclusion (results, trying/to/proof) step 2: define write down precise, mathematical notations proving uniqueness: set up two distinct results, show that they are the same\nproving negation: if the \u0026ldquo;negative\u0026rdquo; is distinct, but the direct case is more nebulous, use proves by contradiction\nproof by induction\nespecially if you are dealing with polynomials, try factoring tools to help includes length of linearly-independent list \\(\\leq\\) length of spanning list Uniqueness by construction: uniqueness part of basis of domain\npick one element that does exist pick arbitrary elements and construct a result if we are trying to prove equivalence, double-containment is a good bet\nsee fundamental theorem of linear maps: but basically wehnever you need to construct basis of things start with an arbiturary basis of the subspace and expand into that of the whole space\na loop in the statements makes them all equivalent\nworking with the square of the norm is often easier\n","html":"\u003cp\u003eBased on the wise words of a crab, I will start writing down some \u003ca href=\"/posts/kbhproof_design_patterns/\"\u003eProof Design Patterns\u003c/a\u003e I saw over \u003ca href=\"/posts/kbhlinear_algebra_index/\"\u003eAxler\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcomplex_number/#insights-combining-and-splitting\"\u003einheriting properties (splitting, doing, merging)\u003c/a\u003e \u0026ldquo;complex numbers inherit \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e via \u003ca href=\"/posts/kbhreal_number/\"\u003ereal number\u003c/a\u003es\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcomplex_number/#insights-construct-then-generalize\"\u003econstruct then generalize\u003c/a\u003e for uniqueness and existence\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcomplex_number/#insights-try-to-remember-to-go-backwards\"\u003etry to remember to go backwards\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhequivalence/\"\u003eto prove IFF\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhadditive_inverse_is_unique_in_a_vector_space/\"\u003ezero is cool\u003c/a\u003e, \u003ca href=\"/posts/kbhzero_times_vector/\"\u003eand here too!\u003c/a\u003e, also \\(1-1=0\\)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(0v = 0\\)\u003c/li\u003e\n\u003cli\u003e\\(1-1 = 0\\)\u003c/li\u003e\n\u003cli\u003e\\(v-v=0\\) a.k.a. \\(v+(-v)=0\\)\u003c/li\u003e\n\u003cli\u003e\\(v+0 = v\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e is epic: it is essentially the only tool to connect scalar multiplication and addition in a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhsum_of_subsets/#sum-of-subspaces-is-the-smallest-subspace-with-both-subspaces\"\u003e\u0026ldquo;smallest\u0026rdquo; double containement proofs\u003c/a\u003e to show set \u003ca href=\"/posts/kbhequivalence/\"\u003eequivalence\u003c/a\u003e: prove one way, then prove the converse (\\(a \\subset b, b\\subset a \\Rightarrow a=b\\))\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecouple hints\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003estep 1: identify\n\u003cul\u003e\n\u003cli\u003ehypothesis (assumptions)\u003c/li\u003e\n\u003cli\u003edesired conclusion (results, trying/to/proof)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003estep 2: define\n\u003cul\u003e\n\u003cli\u003ewrite down precise, mathematical notations\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eproving uniqueness: set up two distinct results, show that they are the same\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eproving negation: if the \u0026ldquo;negative\u0026rdquo; is distinct, but the direct case is more nebulous, use proves by contradiction\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhproof_by_induction/\"\u003eproof by induction\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eespecially if you are dealing with polynomials, try factoring\u003c/li\u003e\n\u003cli\u003etools to help includes \u003ca href=\"/posts/kbhlinear_independence/#length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003elength of linearly-independent list \\(\\leq\\) length of spanning list\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eUniqueness by construction: uniqueness part of \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003epick one element that does exist\u003c/li\u003e\n\u003cli\u003epick arbitrary elements and construct a result\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eif we are trying to prove equivalence, double-containment is a good bet\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhfundamental_theorem_of_linear_maps/\"\u003efundamental theorem of linear maps\u003c/a\u003e: but basically wehnever you need to construct \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of things start with an arbiturary \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of the subspace and expand into that of the whole space\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhoperator/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-is-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-in-id-4ed27ed5-4edc-4ef4-afd7-9b8e3bcd9b96-finite-dimensional-id-36e84a46-76f1-481e-b031-8ab2f0da0aa8-operator-s\"\u003ea loop in the statements makes them all equivalent\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhnorm/#properties-of-the-norm\"\u003eworking with the square of the norm is often easier\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproof_design_patterns/","tags":null,"title":"Proof Design Patterns"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhproof_of_work/","tags":null,"title":"proof of work"},{"categories":null,"contents":"propaganda is a form of advertising which:\npropaganda persuades people into believe in a cause often defies reason to reach into ?? See examples:\nUS WWII Propaganda techniques for propaganda Name calling Generalities Transferring of authority Public testimonial Attachment to plane folks Bandwagoning (FOMO) Fear Bad logic Unwanted extrapolation ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpropaganda/\"\u003epropaganda\u003c/a\u003e is a form of \u003ca href=\"/posts/kbhadvertising/\"\u003eadvertising\u003c/a\u003e which:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpropaganda/\"\u003epropaganda\u003c/a\u003e persuades people into believe in a cause\u003c/li\u003e\n\u003cli\u003eoften defies reason to reach into ??\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSee examples:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhus_wwii_propaganda/\"\u003eUS WWII Propaganda\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"techniques-for-propaganda\"\u003etechniques for propaganda\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eName calling\u003c/li\u003e\n\u003cli\u003eGeneralities\u003c/li\u003e\n\u003cli\u003eTransferring of authority\u003c/li\u003e\n\u003cli\u003ePublic testimonial\u003c/li\u003e\n\u003cli\u003eAttachment to plane folks\u003c/li\u003e\n\u003cli\u003eBandwagoning (FOMO)\u003c/li\u003e\n\u003cli\u003eFear\u003c/li\u003e\n\u003cli\u003eBad logic\u003c/li\u003e\n\u003cli\u003eUnwanted extrapolation\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpropaganda/","tags":null,"title":"propaganda"},{"categories":null,"contents":"protease helps viruses replication\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhprotease/\"\u003eprotease\u003c/a\u003e helps viruses replication\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprotease/","tags":null,"title":"protease"},{"categories":null,"contents":"protected groups are features that one shouldn\u0026rsquo;t use: as in, these cannot be used:\nrace color national origin religion age sex and gender sexual orientation physical or mental disability reprisal (grudges) ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhprotected_group/\"\u003eprotected group\u003c/a\u003es are features that one shouldn\u0026rsquo;t use: as in, these cannot be used:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003erace\u003c/li\u003e\n\u003cli\u003ecolor\u003c/li\u003e\n\u003cli\u003enational origin\u003c/li\u003e\n\u003cli\u003ereligion\u003c/li\u003e\n\u003cli\u003eage\u003c/li\u003e\n\u003cli\u003esex and gender\u003c/li\u003e\n\u003cli\u003esexual orientation\u003c/li\u003e\n\u003cli\u003ephysical or mental disability\u003c/li\u003e\n\u003cli\u003ereprisal (grudges)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprotected_group/","tags":null,"title":"protected group"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhprotons/","tags":null,"title":"proton"},{"categories":null,"contents":"the fast you are willing to prototype, the more willing you are to fail, the faster you will get to a successful partial solution you can refine and repeat.\nhow to prototype faster? In order of decreasing slowness\u0026mdash;-\nbuild out the whole product\u0026hellip; building the minimum viable product\u0026hellip; skeleton prototyping (Figma)\u0026hellip; Pen and paper\u0026hellip; Talking about it The trade-off: each level gives increased fidelity: its closer to what will actually ship, so you can get better+detailed feedback.\n","html":"\u003cp\u003ethe fast you are willing to \u003ca href=\"/posts/kbhprototyping/\"\u003eprototype\u003c/a\u003e, the more willing you are to fail, the faster you will get to a successful partial solution you can refine and repeat.\u003c/p\u003e\n\u003ch2 id=\"how-to-prototype-faster\"\u003ehow to prototype faster?\u003c/h2\u003e\n\u003cp\u003eIn order of decreasing slowness\u0026mdash;-\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ebuild out the whole product\u0026hellip;\u003c/li\u003e\n\u003cli\u003ebuilding the minimum viable product\u0026hellip;\u003c/li\u003e\n\u003cli\u003eskeleton prototyping (Figma)\u0026hellip;\u003c/li\u003e\n\u003cli\u003ePen and paper\u0026hellip;\u003c/li\u003e\n\u003cli\u003eTalking about it\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe trade-off: each level gives increased \u003cem\u003efidelity\u003c/em\u003e: its closer to what will actually ship, so you can get better+detailed feedback.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprototyping/","tags":null,"title":"Prototyping"},{"categories":null,"contents":"A workshop hosted by PSC about Spark.\nContents:\nBig Data ","html":"\u003cp\u003eA workshop hosted by \u003ca href=\"/posts/kbhpsc/\"\u003ePSC\u003c/a\u003e about \u003ca href=\"/posts/kbhspark/\"\u003eSpark\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eContents:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbig_data/\"\u003eBig Data\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpsc_big_data_workshop_july_2023/","tags":null,"title":"PSC Big Data Workshop July 2023 Index"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsu_math53_pset_1/","tags":null,"title":"PSet 1"},{"categories":null,"contents":"Chapter 3 Problem 3.10 Part a Notably, the slope field is symmetric across the \\(y\\) axis, and repeats with every \\(m\\pi\\) interval about the line \\(\\frac{\\pi}{4}\\).\nPart b We have a stationary value at \\(y = \\frac{\\pi}{4}\\). Beyond that, as initial \\(x\u0026gt;0, y\u0026lt;\\frac{\\pi}{4}\\), solutions will all trend towards \\(y=\\frac{\\pi}{4}\\) as \\(t \\to \\infty\\), because the derivative is positive for that entire region. For \\(x\u0026gt;0, \\frac{\\pi}{2}\u0026gt;y\u0026gt; \\frac{\\pi}{4}\\), the function will also trend towards \\(\\frac{\\pi}{4}\\), as the slope is negative for that entire region. This pattern repeats for all \\(y_0+m\\pi\\). That is, for instance, for \\(y\\) between \\(m\\pi+\\frac{\\pi}{4} \u0026lt; y \u0026lt; m\\pi + \\frac{\\pi}{2}\\), \\(y\\) will trend towards \\(m\\pi + \\frac{\\pi}{4}\\). For initial \\(t\u0026lt;0, y \u0026lt; \\frac{\\pi}{4}\\), most solutions will trend towards \\(-\\infty\\) as the region has negative slope. Yet, as \\(t_0 \\approx 0\\), the function will never hit the singularity point of \\(y = -\\frac{\\pi}{2}\\) before traveling to the \\(t\u0026gt;0\\) side, resulting in it trending towards \\(+\\infty\\). Finally, for initial \\(y\u0026gt;\\frac{\\pi}{4}, t\u0026lt;0\\), the function will reach \\(+\\infty\\) because it will hit the positive singularity at \\(\\frac{\\pi}{2}\\).\nChapter 4 Problem 4.1, part a Problem 4.2 Part a Part b Problem 4.3 Part a Part b Problem 4.7 Part a We have:\n\\begin{equation} \\dv{V}{t} = rV \\ln \\qty(\\frac{K}{V}) \\end{equation}\nWe see that when \\(V=K\\), we have \\(\\ln(1) = 0\\) on the right hand side, meaning \\(K\\) is a stationary value.\nNow, we desire that this value is unique in the positive half-line; so, for \\(V \u0026gt; 0\\), we have \\(0 = rV \\ln (\\frac{K}{V})\\), and we desire \\(V=K\\) exactly. Note that \\(V=0\\) would not work, because \\(V\u0026gt;0\\). Therefore, we now have:\n\\begin{equation} \\ln (\\frac{K}{V}) = 0 \\end{equation}\nmeaning \\(\\frac{K}{V} = 1\\). Finally, we have \\(V=K\\), as desired.\nImportantly, note now that:\n\\begin{align} \\dv V rV\\ln (\\frac{K}{V}) \u0026amp;= r(\\ln \\qty(\\frac{K}{V}) + V \\qty(\\frac{V}{K}\\cdot\\qty(K(-1)V^{2})) \\\\ \u0026amp;= r \\qty(\\ln\\qty(\\frac{K}{V}) - V\\qty(\\frac{1}{V})) \\\\ \u0026amp;= r\\qty(\\ln \\qty(\\frac{K}{V}) -1) \\end{align}\nNow, we see at \\(V=K\\), this expression yields \\(r(0-1)\\), meaning \\(-r\\). As we have \\(r\u0026gt;0\\) given in the problem, we see that the stationary value is stable.\nPart b We again have:\n\\begin{equation} \\dv{V}{t} = rV\\ln \\qty(\\frac{K}{V}) \\end{equation}\nthat is:\n\\begin{equation} \\dv{V}{t} = -r V \\ln \\qty( \\frac{V}{K}) \\end{equation}\nTaking an integral on both sides using the division method:\n\\begin{equation} \\int \\frac{1}{V\\ln \\qty(\\frac{V}{K})} \\dd{V} = -\\int r \\dd{t} \\end{equation}\nNow, let us treat:\n\\begin{equation} u = \\ln \\qty(\\frac{V}{K}) \\end{equation}\nwe note that \\(\\dd{u} = \\frac{1}{V} \\dd{V}\\).\nHence:\n\\begin{equation} \\int \\frac{1}{u} \\dd{u} = -rt +C \\end{equation}\ntherefore:\n\\begin{equation} \\ln (u) = -rt+C = \\ln \\qty(\\ln \\qty(\\frac{V}{K})) \\end{equation}\nNow, this means that:\n\\begin{equation} \\ln \\qty(\\frac{V}{K}) = Ce^{-rt} \\end{equation}\nPlugging in our initial conditions at \\(t=0\\), we have:\n\\begin{equation} \\ln \\qty(\\frac{V_0}{K}) = C \\end{equation}\nSubstituting that in, we have:\n\\begin{align} \\ln \\qty(\\frac{V}{K}) \u0026amp;= \\ln \\qty(\\frac{V_0}{K})e^{-rt} \\\\ \u0026amp;= \\ln \\qty(\\qty(\\frac{V_0}{K})^{e^{-rt}}) \\end{align}\nFinally, then, we see that:\n\\begin{equation} \\frac{V}{K} = \\qty(\\frac{V_0}{K})^{e^{-rt}} \\end{equation}\nwhich means:\n\\begin{equation} V = K\\qty(\\frac{V_0}{K})^{e^{-rt}} \\end{equation}\nas desired.\nPart c We want to perform the inverse operation of the previous question.\n\\begin{equation} V(t) = K\\qty(\\frac{V_0}{K})e^{-rt} \\end{equation}\nNow, that means that:\n\\begin{align} V\u0026rsquo;(t) \u0026amp; = K \\qty( \\frac{V_0}{K})^{e^{-rt}} \\ln \\qty(\\frac{V_0}{K}) \\qty(e^{-rt}(-r)) \\\\ \u0026amp;= -rk \\qty(\\frac{V_0}{K})^{e^{-rt}} \\ln \\qty(\\frac{V_0}{K}) e^{-rt} \\\\ \u0026amp;= -r v(t) \\ln \\qty(\\qty(\\frac{V_0}{K})^{e^{-rt}}) \\\\ \u0026amp;= -r v(t) \\qty(\\frac{V}{K}) = r V(\\frac{K}{V}) \\end{align}\nas desired\nPart d Problem 4.10 Part a When the right side of the ODE \u0026ldquo;vanishes\u0026rdquo;, we have:-\n\\begin{equation} ax \\qty(1- \\frac{x}{b}) - \\frac{x^{2}}{1+x^{2}} = 0 \\end{equation}\nwhich means:\n\\begin{equation} x\\qty(a \\qty(1- \\frac{x}{b}) - \\frac{x}{1+x^{2}}) = 0 \\end{equation}\nNow, we have that \\(x = c \u0026gt; 0\\), meaning \\(x\\neq 0\\). Hence, for the top to hold, we have:\n\\begin{equation} a \\qty(1- \\frac{x}{b}) - \\frac{x}{1+x^{2}} = 0 \\end{equation}\nMeaning:\n\\begin{equation} a \\qty(1- \\frac{x}{b}) = \\frac{x}{1+x^{2}} \\end{equation}\nthat is, the graphs of \\(a \\qty(1- \\frac{x}{b})\\) and \\(\\frac{x}{1+x^{2}}\\) intersect, as desired.\nPart b We know that solutions to the expression given in part a), that\n\\begin{equation} a \\qty(1- \\frac{x}{b}) = \\frac{x}{1+x^{2}} \\end{equation}\nare the only locations where positive stationary values exists. Visually, if \\(a(1-\\frac{x}{b})\\) is below the location at \\(x = \\sqrt{3}\\), and the function increases as \\(x\\) decreases, we know that the function is bound to intersect at one point only with the curve of \\(\\frac{x}{1+x^{2}}\\) given which is concave down at values \\(x\u0026lt;\\sqrt{3}\\). We know this intersection point is positive because at \\(x=0\\), which is where the visualized graph of \\(\\frac{x}{(1+x^{2}}\\) changes signs, we have \\(a(1-\\frac{x}{b}) = a\\), and we have \\(a \u0026gt; 0\\).\nPart c If the functions cross, the left term will start out before crossing being large than the right term (because its cross \u0026ldquo;from above\u0026rdquo;, visually). This means that prior to the stationary point, the ODE\u0026rsquo;s right hand side is positive. After crossing, the situation in b) gives that the RHS of the ODE is negative now as the second term is given to be larger than the first term.\nThis means that the stationary point is an attractor (positive slope coming from below, negative slope coming from above), so it is stationary.\nChapter 5 Problem 5.2, Part b LHS:\n\\begin{equation} (2+3i)(4-i) = 8 - 2i + 12i +3 = 11 + 10i \\end{equation}\n\\begin{equation} (11+10i)(1-i) = 11 -11i+10i + 10 = 21 - i \\end{equation}\nRHS:\n\\begin{equation} (4-i)(1-i) = 4 -5i -1 = 3 - 5i \\end{equation}\n\\begin{equation} (2+3i)(3-5i) = 6 - 10i + 9i +15 = 21 -i \\end{equation}\nand finally:\n\\begin{equation} 21-i = 21-i \\end{equation}\nProblem 5.3, Part c \\begin{align} \\frac{1-11i}{1-i} \u0026amp;= \\frac{1-11i}{1-i}\\frac{1+i}{1+i} \\\\ \u0026amp;= \\frac{1+i-11i+11}{1+1} \\\\ \u0026amp;= \\frac{12 -10i}{2} \\\\ \u0026amp;= 6 - 5i \\end{align}\nProblem 5.5, Part c \\begin{align} \\qty | \\frac{8-i}{4+7i}| \u0026amp;= |8-i|/|4+7i| \\\\ \u0026amp;= \\sqrt{\\frac{64+1}{16+49}} \\\\ \u0026amp;= \\sqrt{ \\frac{65}{65}} \\\\ \u0026amp;= 1 \\end{align}\nProblem 5.6 Part c We have:\n\\begin{equation} (3+i)(2+i) = (5 + 5i) \\end{equation}\nWriting it in polar form gives:\n\\begin{equation} \\sqrt{50}e^{i \\arctan (1)} = \\sqrt{50}e^{i \\frac{\\pi}{4}} \\end{equation}\nNow, we have:\n\\begin{equation} (50)^{\\frac{1}{4}} e^{i \\frac{\\pi}{8}} \\end{equation}\nFinally, writing this out in rectangular gives:\n\\begin{equation} (50)^{\\frac{1}{4}} \\qty(\\cos \\qty(\\frac{\\pi}{8}) + i\\sin \\qty(\\frac{\\pi}{8})) \\end{equation}\nRecall now that:\n\\begin{equation} \\sin \\frac{x}{2} = \\sqrt{\\frac{1- \\cos x}{2}} \\end{equation}\nand\n\\begin{equation} \\cos \\frac{x}{2} = \\sqrt{\\frac{1+ \\cos x}{2}} \\end{equation}\nFinally, that:\n\\begin{equation} \\sin \\frac{\\pi}{4} = \\cos \\frac{\\pi}{4} = \\frac{\\sqrt{2}}{2} \\end{equation}\nNow, notice that:\n\\begin{equation} (50)^{\\frac{1}{4}} \\qty(\\cos \\qty(\\frac{\\pi}{2 \\cdot 4}) + i\\sin \\qty(\\frac{\\pi}{2 \\cdot 4})) \\end{equation}\nwhich is equal to, based on the identities above:\n\\begin{equation} (50)^{\\frac{1}{4}} \\qty(\\sqrt{\\frac{2+\\sqrt{2}}{4}} + i\\sqrt{\\frac{2-\\sqrt{2}}{4}}) \\end{equation}\nPart d We have:\n\\begin{equation} \\frac{2+2i}{i} \\end{equation}\nWriting the top and bottom separately in polar, we have:\n\\begin{equation} \\sqrt{8} e^{i \\arctan (1)} = \\sqrt{8} e^{i \\frac{\\pi}{4}} \\end{equation}\n\\begin{equation} e^{i \\frac{\\pi}{2}} \\end{equation}\nDividing the two expressions gives:\n\\begin{equation} \\sqrt{8} e^{i -\\frac{\\pi}{4}} \\end{equation}\nTaking the square root gives\n\\begin{equation} (8)^{\\frac{1}{4}}e^{i \\frac{-\\pi}{8}} \\end{equation}\nFinally, converting the expression back to polar is almost the same as in part C). Recall that:\nRecall now that:\n\\begin{equation} \\sin \\frac{x}{2} = \\sqrt{\\frac{1- \\cos x}{2}} \\end{equation}\nand\n\\begin{equation} \\cos \\frac{x}{2} = \\sqrt{\\frac{1+ \\cos x}{2}} \\end{equation}\nFinally, that:\n\\begin{equation} \\sin \\frac{\\pi}{4} = \\cos \\frac{\\pi}{4} = \\frac{\\sqrt{2}}{2} \\end{equation}\nNow, notice that:\n\\begin{equation} (8)^{\\frac{1}{4}} \\qty(\\cos \\qty(-\\frac{\\pi}{2 \\cdot 4}) + i\\sin \\qty(-\\frac{\\pi}{2 \\cdot 4})) \\end{equation}\nnote that \\(\\sin (-x) = -\\sin (x)\\), while \\(\\cos (-x) = \\cos (x)\\). This results in:\n\\begin{equation} (8)^{\\frac{1}{4}} \\qty(\\sqrt{\\frac{2+\\sqrt{2}}{4}} - i\\sqrt{\\frac{2-\\sqrt{2}}{4}}) \\end{equation}\n","html":"\u003ch2 id=\"chapter-3\"\u003eChapter 3\u003c/h2\u003e\n\u003ch3 id=\"problem-3-dot-10\"\u003eProblem 3.10\u003c/h3\u003e\n\u003ch4 id=\"part-a\"\u003ePart a\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-18_22-30-37_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eNotably, the slope field is symmetric across the \\(y\\) axis, and repeats with every \\(m\\pi\\) interval about the line \\(\\frac{\\pi}{4}\\).\u003c/p\u003e\n\u003ch4 id=\"part-b\"\u003ePart b\u003c/h4\u003e\n\u003cp\u003eWe have a stationary value at \\(y = \\frac{\\pi}{4}\\). Beyond that, as initial \\(x\u0026gt;0, y\u0026lt;\\frac{\\pi}{4}\\), solutions will all trend towards \\(y=\\frac{\\pi}{4}\\) as \\(t \\to \\infty\\), because the derivative is positive for that entire region. For \\(x\u0026gt;0, \\frac{\\pi}{2}\u0026gt;y\u0026gt; \\frac{\\pi}{4}\\), the function will also trend towards \\(\\frac{\\pi}{4}\\), as the slope is negative for that entire region. This pattern repeats for all \\(y_0+m\\pi\\). That is, for instance, for \\(y\\) between \\(m\\pi+\\frac{\\pi}{4} \u0026lt; y \u0026lt; m\\pi + \\frac{\\pi}{2}\\), \\(y\\) will trend towards \\(m\\pi + \\frac{\\pi}{4}\\). For initial \\(t\u0026lt;0, y \u0026lt; \\frac{\\pi}{4}\\), most solutions will trend towards \\(-\\infty\\) as the region has negative slope. Yet, as \\(t_0 \\approx 0\\), the function will never hit the singularity point of \\(y = -\\frac{\\pi}{2}\\) before traveling to the \\(t\u0026gt;0\\) side, resulting in it trending towards \\(+\\infty\\). Finally, for initial \\(y\u0026gt;\\frac{\\pi}{4}, t\u0026lt;0\\), the function will reach \\(+\\infty\\) because it will hit the positive singularity at \\(\\frac{\\pi}{2}\\).\u003c/p\u003e\n\u003ch2 id=\"chapter-4\"\u003eChapter 4\u003c/h2\u003e\n\u003ch3 id=\"problem-4-dot-1-part-a\"\u003eProblem 4.1, part a\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-18_22-31-16_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"problem-4-dot-2\"\u003eProblem 4.2\u003c/h3\u003e\n\u003ch4 id=\"part-a\"\u003ePart a\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-18_22-31-46_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch4 id=\"part-b\"\u003ePart b\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-18_22-32-01_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"problem-4-dot-3\"\u003eProblem 4.3\u003c/h3\u003e\n\u003ch4 id=\"part-a\"\u003ePart a\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-18_22-32-11_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch4 id=\"part-b\"\u003ePart b\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-18_22-34-13_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"problem-4-dot-7\"\u003eProblem 4.7\u003c/h3\u003e\n\u003ch4 id=\"part-a\"\u003ePart a\u003c/h4\u003e\n\u003cp\u003eWe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{V}{t} = rV \\ln \\qty(\\frac{K}{V})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe see that when \\(V=K\\), we have \\(\\ln(1) = 0\\) on the right hand side, meaning \\(K\\) is a stationary value.\u003c/p\u003e\n\u003cp\u003eNow, we desire that this value is unique in the positive half-line; so, for \\(V \u0026gt; 0\\), we have \\(0 = rV \\ln (\\frac{K}{V})\\), and we desire \\(V=K\\) exactly. Note that \\(V=0\\) would not work, because \\(V\u0026gt;0\\). Therefore, we now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ln (\\frac{K}{V}) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning \\(\\frac{K}{V} = 1\\). Finally, we have \\(V=K\\), as desired.\u003c/p\u003e\n\u003cp\u003eImportantly, note now that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dv V rV\\ln (\\frac{K}{V}) \u0026amp;= r(\\ln \\qty(\\frac{K}{V}) + V \\qty(\\frac{V}{K}\\cdot\\qty(K(-1)V^{2})) \\\\\n\u0026amp;= r \\qty(\\ln\\qty(\\frac{K}{V}) - V\\qty(\\frac{1}{V})) \\\\\n\u0026amp;= r\\qty(\\ln \\qty(\\frac{K}{V}) -1)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, we see at \\(V=K\\), this expression yields \\(r(0-1)\\), meaning \\(-r\\). As we have \\(r\u0026gt;0\\) given in the problem, we see that the stationary value is stable.\u003c/p\u003e\n\u003ch4 id=\"part-b\"\u003ePart b\u003c/h4\u003e\n\u003cp\u003eWe again have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{V}{t} = rV\\ln \\qty(\\frac{K}{V})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{V}{t} = -r V \\ln \\qty( \\frac{V}{K})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaking an integral on both sides using the division method:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int \\frac{1}{V\\ln \\qty(\\frac{V}{K})} \\dd{V} = -\\int r \\dd{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, let us treat:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu = \\ln \\qty(\\frac{V}{K})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe note that \\(\\dd{u} = \\frac{1}{V} \\dd{V}\\).\u003c/p\u003e\n\u003cp\u003eHence:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int \\frac{1}{u} \\dd{u} = -rt +C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003etherefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ln (u) = -rt+C = \\ln \\qty(\\ln \\qty(\\frac{V}{K}))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, this means that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ln \\qty(\\frac{V}{K}) = Ce^{-rt}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ePlugging in our initial conditions at \\(t=0\\), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ln \\qty(\\frac{V_0}{K}) = C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSubstituting that in, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\ln \\qty(\\frac{V}{K}) \u0026amp;= \\ln \\qty(\\frac{V_0}{K})e^{-rt} \\\\\n\u0026amp;= \\ln \\qty(\\qty(\\frac{V_0}{K})^{e^{-rt}})\n\\end{align}\u003c/p\u003e\n\u003cp\u003eFinally, then, we see that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{V}{K} = \\qty(\\frac{V_0}{K})^{e^{-rt}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich means:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV = K\\qty(\\frac{V_0}{K})^{e^{-rt}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas desired.\u003c/p\u003e\n\u003ch4 id=\"part-c\"\u003ePart c\u003c/h4\u003e\n\u003cp\u003eWe want to perform the inverse operation of the previous question.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV(t) = K\\qty(\\frac{V_0}{K})e^{-rt}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, that means that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nV\u0026rsquo;(t) \u0026amp; = K \\qty( \\frac{V_0}{K})^{e^{-rt}} \\ln \\qty(\\frac{V_0}{K}) \\qty(e^{-rt}(-r)) \\\\\n\u0026amp;= -rk \\qty(\\frac{V_0}{K})^{e^{-rt}} \\ln \\qty(\\frac{V_0}{K}) e^{-rt} \\\\\n\u0026amp;= -r v(t) \\ln \\qty(\\qty(\\frac{V_0}{K})^{e^{-rt}}) \\\\\n\u0026amp;= -r v(t) \\qty(\\frac{V}{K}) = r V(\\frac{K}{V})\n\\end{align}\u003c/p\u003e\n\u003cp\u003eas desired\u003c/p\u003e\n\u003ch4 id=\"part-d\"\u003ePart d\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-18_22-34-36_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"problem-4-dot-10\"\u003eProblem 4.10\u003c/h3\u003e\n\u003ch4 id=\"part-a\"\u003ePart a\u003c/h4\u003e\n\u003cp\u003eWhen the right side of the ODE \u0026ldquo;vanishes\u0026rdquo;, we have:-\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nax \\qty(1- \\frac{x}{b}) - \\frac{x^{2}}{1+x^{2}} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich means:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\\qty(a \\qty(1- \\frac{x}{b}) - \\frac{x}{1+x^{2}}) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, we have that \\(x = c \u0026gt; 0\\), meaning \\(x\\neq 0\\). Hence, for the top to hold, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na \\qty(1- \\frac{x}{b}) - \\frac{x}{1+x^{2}} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na \\qty(1- \\frac{x}{b}) = \\frac{x}{1+x^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat is, the graphs of \\(a \\qty(1- \\frac{x}{b})\\) and \\(\\frac{x}{1+x^{2}}\\) intersect, as desired.\u003c/p\u003e\n\u003ch4 id=\"part-b\"\u003ePart b\u003c/h4\u003e\n\u003cp\u003eWe know that solutions to the expression given in part a), that\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na \\qty(1- \\frac{x}{b}) = \\frac{x}{1+x^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eare the only locations where positive stationary values exists. Visually, if \\(a(1-\\frac{x}{b})\\) is below the location at \\(x = \\sqrt{3}\\), and the function increases as \\(x\\) decreases, we know that the function is bound to intersect at one point only with the curve of \\(\\frac{x}{1+x^{2}}\\) given which is concave down at values \\(x\u0026lt;\\sqrt{3}\\). We know this intersection point is positive because at \\(x=0\\), which is where the visualized graph of \\(\\frac{x}{(1+x^{2}}\\) changes signs, we have \\(a(1-\\frac{x}{b}) = a\\), and we have \\(a \u0026gt; 0\\).\u003c/p\u003e\n\u003ch4 id=\"part-c\"\u003ePart c\u003c/h4\u003e\n\u003cp\u003eIf the functions cross, the left term will start out before crossing being large than the right term (because its cross \u0026ldquo;from above\u0026rdquo;, visually). This means that prior to the stationary point, the ODE\u0026rsquo;s right hand side is positive. After crossing, the situation in b) gives that the RHS of the ODE is negative now as the second term is given to be larger than the first term.\u003c/p\u003e\n\u003cp\u003eThis means that the stationary point is an attractor (positive slope coming from below, negative slope coming from above), so it is stationary.\u003c/p\u003e\n\u003ch2 id=\"chapter-5\"\u003eChapter 5\u003c/h2\u003e\n\u003ch3 id=\"problem-5-dot-2-part-b\"\u003eProblem 5.2, Part b\u003c/h3\u003e\n\u003cp\u003eLHS:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(2+3i)(4-i) = 8 - 2i + 12i +3 = 11 + 10i\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(11+10i)(1-i) = 11 -11i+10i + 10 = 21 - i\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRHS:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(4-i)(1-i) = 4 -5i -1 = 3 - 5i\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(2+3i)(3-5i) = 6 - 10i + 9i +15 = 21 -i\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand finally:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n21-i = 21-i\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"problem-5-dot-3-part-c\"\u003eProblem 5.3, Part c\u003c/h3\u003e\n\u003cp\u003e\\begin{align}\n\\frac{1-11i}{1-i} \u0026amp;= \\frac{1-11i}{1-i}\\frac{1+i}{1+i} \\\\\n\u0026amp;= \\frac{1+i-11i+11}{1+1} \\\\\n\u0026amp;= \\frac{12 -10i}{2} \\\\\n\u0026amp;= 6 - 5i\n\\end{align}\u003c/p\u003e\n\u003ch3 id=\"problem-5-dot-5-part-c\"\u003eProblem 5.5, Part c\u003c/h3\u003e\n\u003cp\u003e\\begin{align}\n\\qty | \\frac{8-i}{4+7i}| \u0026amp;= |8-i|/|4+7i| \\\\\n\u0026amp;= \\sqrt{\\frac{64+1}{16+49}} \\\\\n\u0026amp;= \\sqrt{ \\frac{65}{65}} \\\\\n\u0026amp;= 1\n\\end{align}\u003c/p\u003e\n\u003ch3 id=\"problem-5-dot-6\"\u003eProblem 5.6\u003c/h3\u003e\n\u003ch4 id=\"part-c\"\u003ePart c\u003c/h4\u003e\n\u003cp\u003eWe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(3+i)(2+i) = (5 + 5i)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWriting it in polar form gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sqrt{50}e^{i \\arctan (1)} = \\sqrt{50}e^{i \\frac{\\pi}{4}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(50)^{\\frac{1}{4}} e^{i \\frac{\\pi}{8}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, writing this out in rectangular gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(50)^{\\frac{1}{4}} \\qty(\\cos \\qty(\\frac{\\pi}{8}) + i\\sin \\qty(\\frac{\\pi}{8}))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall now that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sin \\frac{x}{2} = \\sqrt{\\frac{1- \\cos x}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\cos \\frac{x}{2} = \\sqrt{\\frac{1+ \\cos x}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sin \\frac{\\pi}{4} = \\cos \\frac{\\pi}{4} = \\frac{\\sqrt{2}}{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, notice that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(50)^{\\frac{1}{4}} \\qty(\\cos \\qty(\\frac{\\pi}{2 \\cdot 4}) + i\\sin \\qty(\\frac{\\pi}{2 \\cdot 4}))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is equal to, based on the identities above:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(50)^{\\frac{1}{4}} \\qty(\\sqrt{\\frac{2+\\sqrt{2}}{4}} + i\\sqrt{\\frac{2-\\sqrt{2}}{4}})\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"part-d\"\u003ePart d\u003c/h4\u003e\n\u003cp\u003eWe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{2+2i}{i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWriting the top and bottom separately in polar, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sqrt{8} e^{i \\arctan (1)} = \\sqrt{8} e^{i \\frac{\\pi}{4}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{i \\frac{\\pi}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eDividing the two expressions gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sqrt{8} e^{i -\\frac{\\pi}{4}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaking the square root gives\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(8)^{\\frac{1}{4}}e^{i \\frac{-\\pi}{8}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, converting the expression back to polar is almost the same as in part C). Recall that:\u003c/p\u003e\n\u003cp\u003eRecall now that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sin \\frac{x}{2} = \\sqrt{\\frac{1- \\cos x}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\cos \\frac{x}{2} = \\sqrt{\\frac{1+ \\cos x}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sin \\frac{\\pi}{4} = \\cos \\frac{\\pi}{4} = \\frac{\\sqrt{2}}{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, notice that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(8)^{\\frac{1}{4}} \\qty(\\cos \\qty(-\\frac{\\pi}{2 \\cdot 4}) + i\\sin \\qty(-\\frac{\\pi}{2 \\cdot 4}))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enote that \\(\\sin (-x) = -\\sin (x)\\), while \\(\\cos (-x) = \\cos (x)\\). This results in:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(8)^{\\frac{1}{4}} \\qty(\\sqrt{\\frac{2+\\sqrt{2}}{4}} - i\\sqrt{\\frac{2-\\sqrt{2}}{4}})\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpset_2/","tags":null,"title":"PSet 2"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpset_3/","tags":null,"title":"PSet 3"},{"categories":null,"contents":"o\n","html":"\u003cp\u003eo\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpset_4/","tags":null,"title":"PSet 4"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpset_5/","tags":null,"title":"PSet 5"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpset_6/","tags":null,"title":"PSet 6"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpset_7/","tags":null,"title":"PSet 7"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpset_8/","tags":null,"title":"PSet 8"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpset_9/","tags":null,"title":"PSet 9"},{"categories":null,"contents":"psychoacoustics is the study of sound perception and cognition\nhow does sound work how we perceive it why? and what are its applications? ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpsycoacoustics/\"\u003epsychoacoustics\u003c/a\u003e is the study of \u003ca href=\"/posts/kbhsound/\"\u003esound\u003c/a\u003e perception and cognition\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ehow does sound work\u003c/li\u003e\n\u003cli\u003ehow we perceive it\u003c/li\u003e\n\u003cli\u003ewhy? and what are its applications?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpsycoacoustics/","tags":null,"title":"psychoacoustics"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhptsd/","tags":null,"title":"PTSD"},{"categories":null,"contents":" titles should have proper noun\ncultural context, summary of the text, thesis\u0026mdash;highlight Kairos (American context)\nconclusion: what\u0026rsquo;s significant of this rhetoric?\naccess credibility\npeer review\npublication (editor and publisher should be named), is there a special interest group?\nauthor\u0026rsquo;s training and publicatino record\nquality of argument (reasoning + evidence, quality of works cited)\ncan you find some of the info else where in reputable sources?\ncite one additional source\nFor instance, talk about Karios specifically\n","html":"\u003col\u003e\n\u003cli\u003e\n\u003cp\u003etitles should have proper noun\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecultural context, summary of the text, thesis\u0026mdash;highlight Kairos (American context)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003econclusion: what\u0026rsquo;s significant of this rhetoric?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eaccess credibility\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003epeer review\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003epublication (editor and publisher should be named), is there a special interest group?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eauthor\u0026rsquo;s training and publicatino record\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003equality of argument (reasoning + evidence, quality of works cited)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecan you find some of the info else where in reputable sources?\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ecite one additional source\u003c/p\u003e\n\u003cp\u003eFor instance, talk about Karios specifically\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpwr_notes/","tags":null,"title":"PWR Notes"},{"categories":null,"contents":"Dual influence framework:\nrequires political involvement requires diverse media diet Proposal: based on feedback on TIC-focus on one case study and isolate it well\nQuotes Social media as a means of exposure to the modern world “Daniel Lerner (1958) saw mass media as the main catalyst for social change. Lerner argued that media exposed people who possess traditional values to the “modern” world, and that exposure in turn produced a desire to live in it.” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 467]\nCriticism of the argument of social-media driven modernity because of unequal access “Lerner’s arguments were expectedly later criticized. For some, they did not consider the fact that access to mass communication can be highly unequal in some countries in the global South. Work on Latin America, for example, showed that, in rural areas, media are often dominated by elites (Beltrán 1976)” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 467]\nIncreased exposure on SM results in increased support “relationship b between Internet use and levels of support for SSM” [is strong] [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 477)\nEconomic development is correlated with strong adoption of self expression “Equipped with reliable longitudinal data newly available, this scholarship demonstrates that there exists an association between levels of economic development and the adoption of “self-expression” values, such as support for gender equality and tolerance for homosexuality.” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 468]\nSocial contract theory indicates that people become more tolerant due to interaction, diminishing the power of \u0026ldquo;vicarious\u0026rdquo; communities\u0026quot; “Based on social contact theory, which suggests that individuals become more tolerant of groups as they interact with them, some scholars have shown that contact with “imagined” or “vicarious” communities that are diffused through mass media can have an effect on lowering prejudices and improving attitudes toward gay people (Riggle 1996; Schiappa, Gregg, and Hewes 2006).” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 468]\nPeople who \u0026ldquo;pay attention to news\u0026rdquo; + \u0026ldquo;use internet daily\u0026rdquo; more likely to support SSM “As depicted in Figure 3, the predicted probabilities of supporting SSM are consistent with our expectation that those who pay attention to the news and use the Internet daily are much more likely to support SSM. We believe this is because those who both pay attention to the news and use the Internet daily are likely to encounter news online that helps diffuse global attitudes about SSM” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 475]\nPeople who just consume internet are not exposed nearly as much to SSM “Meanwhile, for those that use the Internet often, but who do not pay attention to the news, the Internet is likely to be a source of entertainment or social interaction, which are not necessarily associated with encountering new information related to SSM. This interaction between Internet use and news consumption is also evident and significant when the data are disaggregated by year (see appendix).” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 475]\nincreased exposure and increased discussion normalizes attitudes\u0026mdash;use of word normalize, depolarize “The results lend credence to the argument, derived from social contact theory, that increased exposure to gays and lesbians as well as to discussions about homosexuality and the merits of SSM may have a normalizing effect on individuals’ attitudes. Research also shows that such interaction can take place through mass media (Berggren and Nilsson 2015)” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 478]\nssm debates preceded by 1) higher emotion and 2) few active engagements results in failed legalization “we find evidence of moral culture wars between ideologies and show that constituencies that express higher levels of emotion and have fewer actively engaged participants often precede legalization efforts that fail” [⁨Zhang⁩ and ⁨Counts⁩, 2015, p. 2603]\nsustained interest and overall engagement precede legalization success “We found that policies that passed had a greater percentage of people with sustained interest over time, had greater overall engagement levels, and had significantly higher levels of language related to fairness and openness before the decision. On the other hand, states with policies that failed had higher levels of anxiety, religion, and tentativeness. These findings align with previous research characterizing the same-sex marriage debate as a “culture war” [1], where proponents advocate for it in terms of fairness morality, while opponents argue against it in terms of religious morality.” [⁨Zhang⁩ and ⁨Counts⁩, 2015, p. 2610]\nmedia diversity is important, otherwise one side may drown out the other “if one message is able to drown out the other, it may allow said message to dictate the terms of the debate and change minds (and eventually laws) accordingly \u0026hellip; This inquiry argues that media coverage (in terms of both specific frames and the competition between said frames) of same-sex marriage and civil unions shapes levels of opposition to these policies.” [Johnson, 2012, p. 1056]\nparadigm shift happens through tipping the scale, not pushing it over (i.e. less effort) “Morality framing in-and-of itself did not seem to have as much power as equality framing when it came to changing minds between 2004 and 2011, but those interested in spreading a message of morality and rolling back same-sex marriage and civil unions efforts should realize that, should they tip the balance of the competition between these two frames back in their favor, levels of opposition have the potential to shift once more in a way that benefits their policy goals.” [Johnson, 2012, p. 1074]\nexpanding generics is a common attack surface in morality arguments “Experience has shown that it is not possible to debate the recognition of same-sex marriage without opponents making a normative connection between homosexuality and other forms of human relationships such as polygamy.” [Ball, p. 1900]\nlack of engegement \u0026ldquo;the lack of spirited political engagement \u0026hellip; Comparatively little is offered in support of gay and lesbian rights\u0026rdquo; (95)\ntolerance tolerance among adults toward homosexuals and toward people of different race, collected from the World Values Survey and European Values Study.31 Both cross-sectional and panel results (available on request) suggest that globalization does not generally affect these tolerance measures.32 In other words, increasing economic, social and political integration does not seem to influence the contemporary level of tolerance in the adult population,” [⁨Berggren⁩ and ⁨Nilsson⁩, 2015, p. 384]\npower Thus, participation in itself is an expression of some degree of (enabled) power [Dahlgren, 2016, p. 26]\nSub-claim Development Define polarization and state of play in SSM Yoink from TIC:\ndefine polarization motivate the study of polarization through extreme cases One salient case of depolarization in recent years is SSM. Debate transition from SSM into other intersectionality / rights (CITE). Direct measurements too: gallup\u0026mdash;Republican vs. Democracts gap closed on the topic.\nSSM offers a salient case study of what depolarization can look like. Having gone through the cycle, we are afforded post-hoc analysis of what worked and didn\u0026rsquo;t work.\nWe know what worked: ssm debates preceded by 1) higher emotion and 2) few active engagements results in failed legalization, whereas sustained interest and overall engagement precede legalization success\u0026mdash;tied to the depolarization.\nBut why? and specifically, why these things? How can we generalize this?\nA careful study of polarization helps inform why these things worked. We are going to do this from social media because (from the TIC social media is easy). The framework will show that [TiC thesis].\nAfter developing such a framework, we will develop this analysis on two cases:\nbriefly on trolls (this is polarizing), and SSM (this is depolarizing), which, given that\u0026rsquo;s what we\u0026rsquo;re after, we are going to exand and learn from to further investigate the motivating dynamics of the framework in context of SSM. \u0026lt;\u0026gt;all of the lit review Begin by developing the framework.\nyipeee\nSSM has all the parts that\u0026rsquo;s needed for this to work. [vocab note: homophilic is not the same thing as MSM. We use it in the network dynamics sense. also not talking about the rest of queer community, which has a much more complex/intersectional set of issues.]\nSo while trolls polarize, SSM is an example of successful polarization.\nRecall previous efforts: 1) high emotion 2) low engagement\u0026mdash;-highly homophilic interactions.\nThe depolarization of SSM follows an extremely similar pattern to the depolarization framework developed through studying social media in general: post-hoc studies highlights that depolarization of SSM is characterized by both increased exposure to the community on social media as well as increased active political engagement with the community. The close correspondence between SSM and the framework, therefore offers an opportunity to directly use the underlying motivations of SSM legalization as a study of the motivation of the framework in general.\nMedia needs to be diverse for SSM because otherwise it creates vicarious communities. SSM\u0026rsquo;s success tied to sustained, diverse interest largely through social media.\nSocial contract theory indicates that people become more tolerant due to interaction, diminishing the power of \u0026ldquo;vicarious\u0026rdquo; communities\u0026quot;. One such group: social media as a means of exposure SSM practice. Indeed, we notice this before overall support: Increased exposure on SM results in increased support.\nYet, these results are tempered: media diversity is important, otherwise one side may drown out the other and may even be counter-productive. This counter-productivity is created by as given by Novoa, generics\u0026mdash;whereby the counter-party will frame SSM along with other factors to criticize as a group. Therefore, true exposure can only happen if no one side drowns out the other; otherwise it will be vicarious \u0026ldquo;totem\u0026rdquo; effect.\nSSM debates cannot succeed with diverse media alone, it requires active engagement with the topic too SSM is an act of \u0026ldquo;self expression\u0026rdquo;, \u0026ldquo;expression\u0026rdquo; is an active verb of engagement. Active engagement doesn\u0026rsquo;t come from simply being on the internet, which is shown to not be a factor enough to expose to SSM. ⁨Díez⁩ and ⁨Dion⁩ frames this engagement as a process of normalization of these attitudes: use of word normalize is the crucial factor to depolarize.\nConclusion + Discussion Two-prong framework + evidence from SSM:\ndiverse media diet is needed to prevent totem creation + generics active engagement is needed to solidify normalization SSM is perhaps the first because a paradigm shift happens through tipping the scale, not pushing it over (i.e. less effort). So its the easiest to tip over.\n","html":"\u003cp\u003eDual influence framework:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003erequires political involvement\u003c/li\u003e\n\u003cli\u003erequires diverse media diet\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eProposal: based on feedback on TIC-focus on \u003cstrong\u003eone case study\u003c/strong\u003e and isolate it well\u003c/p\u003e\n\u003ch2 id=\"quotes\"\u003eQuotes\u003c/h2\u003e\n\u003ch3 id=\"social-media-as-a-means-of-exposure-to-the-modern-world\"\u003eSocial media as a means of exposure to the modern world\u003c/h3\u003e\n\u003cp\u003e“Daniel Lerner (1958) saw mass media as the main catalyst for social change. Lerner argued that media exposed people who possess traditional values to the “modern” world, and that exposure in turn produced a desire to live in it.” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 467]\u003c/p\u003e\n\u003ch3 id=\"criticism-of-the-argument-of-social-media-driven-modernity-because-of-unequal-access\"\u003eCriticism of the argument of social-media driven modernity because of unequal access\u003c/h3\u003e\n\u003cp\u003e“Lerner’s arguments were expectedly later criticized. For some, they did not consider the fact that access to mass communication can be highly unequal in some countries in the global South. Work on Latin America, for example, showed that, in rural areas, media are often dominated by elites (Beltrán 1976)” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 467]\u003c/p\u003e\n\u003ch3 id=\"increased-exposure-on-sm-results-in-increased-support\"\u003eIncreased exposure on SM results in increased support\u003c/h3\u003e\n\u003cp\u003e“relationship b between Internet use and levels of support for SSM” [is strong] [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 477)\u003c/p\u003e\n\u003ch3 id=\"economic-development-is-correlated-with-strong-adoption-of-self-expression\"\u003eEconomic development is correlated with strong adoption of self expression\u003c/h3\u003e\n\u003cp\u003e“Equipped with reliable longitudinal data newly available, this scholarship demonstrates that there exists an association between levels of economic development and the adoption of “self-expression” values, such as support for gender equality and tolerance for homosexuality.” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 468]\u003c/p\u003e\n\u003ch3 id=\"social-contract-theory-indicates-that-people-become-more-tolerant-due-to-interaction-diminishing-the-power-of-vicarious-communities\"\u003eSocial contract theory indicates that people become more tolerant due to interaction, diminishing the power of \u0026ldquo;vicarious\u0026rdquo; communities\u0026quot;\u003c/h3\u003e\n\u003cp\u003e“Based on social contact theory, which suggests that individuals become more tolerant of groups as they interact with them, some scholars have shown that contact with “imagined” or “vicarious” communities that are diffused through mass media can have an effect on lowering prejudices and improving attitudes toward gay people (Riggle 1996; Schiappa, Gregg, and Hewes 2006).” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 468]\u003c/p\u003e\n\u003ch3 id=\"people-who-pay-attention-to-news-plus-use-internet-daily-more-likely-to-support-ssm\"\u003ePeople who \u0026ldquo;pay attention to news\u0026rdquo; + \u0026ldquo;use internet daily\u0026rdquo; more likely to support SSM\u003c/h3\u003e\n\u003cp\u003e“As depicted in Figure 3, the predicted probabilities of supporting SSM are consistent with our expectation that those who pay attention to the news and use the Internet daily are much more likely to support SSM. We believe this is because those who both pay attention to the news and use the Internet daily are likely to encounter news online that helps diffuse global attitudes about SSM” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 475]\u003c/p\u003e\n\u003ch3 id=\"people-who-just-consume-internet-are-not-exposed-nearly-as-much-to-ssm\"\u003ePeople who just consume internet are not exposed nearly as much to SSM\u003c/h3\u003e\n\u003cp\u003e“Meanwhile, for those that use the Internet often, but who do not pay attention to the news, the Internet is likely to be a source of entertainment or social interaction, which are not necessarily associated with encountering new information related to SSM. This interaction between Internet use and news consumption is also evident and significant when the data are disaggregated by year (see appendix).” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 475]\u003c/p\u003e\n\u003ch3 id=\"increased-exposure-and-increased-discussion-normalizes-attitudes-use-of-word-normalize-depolarize\"\u003eincreased exposure and increased discussion \u003cstrong\u003enormalizes\u003c/strong\u003e attitudes\u0026mdash;use of word normalize, depolarize\u003c/h3\u003e\n\u003cp\u003e“The results lend credence to the argument, derived from social contact theory, that increased exposure to gays and lesbians as well as to discussions about homosexuality and the merits of SSM may have a normalizing effect on individuals’ attitudes. Research also shows that such interaction can take place through mass media (Berggren and Nilsson 2015)” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 478]\u003c/p\u003e\n\u003ch3 id=\"ssm-debates-preceded-by-1-higher-emotion-and-2-few-active-engagements-results-in-failed-legalization\"\u003essm debates preceded by 1) higher emotion and 2) few active engagements results in failed legalization\u003c/h3\u003e\n\u003cp\u003e“we find evidence of moral culture wars between ideologies and show that constituencies that express higher levels of emotion and have fewer actively engaged participants often precede legalization efforts that fail” [⁨Zhang⁩ and ⁨Counts⁩, 2015, p. 2603]\u003c/p\u003e\n\u003ch3 id=\"sustained-interest-and-overall-engagement-precede-legalization-success\"\u003esustained interest and overall engagement precede legalization success\u003c/h3\u003e\n\u003cp\u003e“We found that policies that passed had a greater percentage of people with sustained interest over time, had greater overall engagement levels, and had significantly higher levels of language related to fairness and openness before the decision. On the other hand, states with policies that failed had higher levels of anxiety, religion, and tentativeness. These findings align with previous research characterizing the same-sex marriage debate as a “culture war” [1], where proponents advocate for it in terms of fairness morality, while opponents argue against it in terms of religious morality.” [⁨Zhang⁩ and ⁨Counts⁩, 2015, p. 2610]\u003c/p\u003e\n\u003ch3 id=\"media-diversity-is-important-otherwise-one-side-may-drown-out-the-other\"\u003emedia diversity is important, otherwise one side may drown out the other\u003c/h3\u003e\n\u003cp\u003e“if one message is able to drown out the other, it may allow said message to dictate the terms of the debate and change minds (and eventually laws) accordingly \u0026hellip; This inquiry argues that media coverage (in terms of both specific frames and the competition between said frames) of same-sex marriage and civil unions shapes levels of opposition to these policies.” [Johnson, 2012, p. 1056]\u003c/p\u003e\n\u003ch3 id=\"paradigm-shift-happens-through-tipping-the-scale-not-pushing-it-over--i-dot-e-dot-less-effort\"\u003eparadigm shift happens through \u003cstrong\u003etipping\u003c/strong\u003e the scale, not pushing it over (i.e. less effort)\u003c/h3\u003e\n\u003cp\u003e“Morality framing in-and-of itself did not seem to have as much power as equality framing when it came to changing minds between 2004 and 2011, but those interested in spreading a message of morality and rolling back same-sex marriage and civil unions efforts should realize that, should they tip the balance of the competition between these two frames back in their favor, levels of opposition have the potential to shift once more in a way that benefits their policy goals.” [Johnson, 2012, p. 1074]\u003c/p\u003e\n\u003ch3 id=\"expanding-generics-is-a-common-attack-surface-in-morality-arguments\"\u003eexpanding generics is a common attack surface in morality arguments\u003c/h3\u003e\n\u003cp\u003e“Experience has shown that it is not possible to debate the recognition of same-sex marriage without opponents making a normative connection between homosexuality and other forms of human relationships such as polygamy.” [Ball, p. 1900]\u003c/p\u003e\n\u003ch3 id=\"lack-of-engegement\"\u003elack of engegement\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;the lack of spirited political engagement \u0026hellip; Comparatively little is offered in support of gay and lesbian rights\u0026rdquo; (95)\u003c/p\u003e\n\u003ch3 id=\"tolerance\"\u003etolerance\u003c/h3\u003e\n\u003cp\u003etolerance among adults toward homosexuals and toward people of different race, collected from the World Values Survey and European Values Study.31 Both cross-sectional and panel results (available on request) suggest that globalization does not generally affect these tolerance measures.32 In other words, increasing economic, social and political integration does not seem to influence the contemporary level of tolerance in the adult population,” [⁨Berggren⁩ and ⁨Nilsson⁩, 2015, p. 384]\u003c/p\u003e\n\u003ch3 id=\"power\"\u003epower\u003c/h3\u003e\n\u003cp\u003eThus, participation in itself is an expression of some degree of (enabled) power [Dahlgren, 2016, p. 26]\u003c/p\u003e\n\u003ch2 id=\"sub-claim-development\"\u003eSub-claim Development\u003c/h2\u003e\n\u003ch3 id=\"define-polarization-and-state-of-play-in-ssm\"\u003eDefine polarization and state of play in SSM\u003c/h3\u003e\n\u003cp\u003eYoink from TIC:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003edefine polarization\u003c/li\u003e\n\u003cli\u003emotivate the study of polarization through extreme cases\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eOne salient case of depolarization in recent years is SSM. Debate transition from SSM into other intersectionality / rights (CITE). Direct measurements too: gallup\u0026mdash;Republican vs. Democracts gap closed on the topic.\u003c/p\u003e\n\u003cp\u003eSSM offers a salient case study of what depolarization can look like. Having gone through the cycle, we are afforded post-hoc analysis of what worked and didn\u0026rsquo;t work.\u003c/p\u003e\n\u003cp\u003eWe know \u003cstrong\u003ewhat\u003c/strong\u003e worked: \u003ca href=\"3880B92C-F887-41CD-8281-A5B2C053D773\"\u003essm debates preceded by 1) higher emotion and 2) few active engagements results in failed legalization\u003c/a\u003e, whereas \u003ca href=\"#sustained-interest-and-overall-engagement-precede-legalization-success\"\u003esustained interest and overall engagement precede legalization success\u003c/a\u003e\u0026mdash;tied to the depolarization.\u003c/p\u003e\n\u003cp\u003eBut \u003cstrong\u003ewhy\u003c/strong\u003e? and specifically, why these things? How can we generalize this?\u003c/p\u003e\n\u003cp\u003eA careful study of polarization helps inform why these things worked. We are going to do this from social media because (from the TIC social media is easy). The framework will show that [TiC thesis].\u003c/p\u003e\n\u003cp\u003eAfter developing such a framework, we will develop this analysis on two cases:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ebriefly on trolls (this is \u003cstrong\u003epolarizing\u003c/strong\u003e), and\u003c/li\u003e\n\u003cli\u003eSSM (this is \u003cstrong\u003edepolarizing\u003c/strong\u003e), which, given that\u0026rsquo;s what we\u0026rsquo;re after, we are going to exand and learn from to further investigate the motivating dynamics of the framework in context of SSM.\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"all-of-the-lit-review\"\u003e\u0026lt;\u0026gt;all of the lit review\u003c/h3\u003e\n\u003cp\u003eBegin by developing the framework.\u003c/p\u003e\n\u003cp\u003eyipeee\u003c/p\u003e\n\u003ch3 id=\"ssm-has-all-the-parts-that-s-needed-for-this-to-work-dot\"\u003eSSM has all the parts that\u0026rsquo;s needed for this to work.\u003c/h3\u003e\n\u003cp\u003e[vocab note: homophilic is not the same thing as MSM. We use it in the network dynamics sense. also not talking about the rest of queer community, which has a much more complex/intersectional set of issues.]\u003c/p\u003e\n\u003cp\u003eSo while trolls polarize, SSM is an example of successful polarization.\u003c/p\u003e\n\u003cp\u003eRecall previous efforts: 1) high emotion 2) low engagement\u0026mdash;-highly homophilic interactions.\u003c/p\u003e\n\u003cp\u003eThe depolarization of SSM follows an extremely similar pattern to the depolarization framework developed through studying social media in general: post-hoc studies highlights that depolarization of SSM is characterized by both increased exposure to the community on social media as well as increased active political engagement with the community. The close correspondence between SSM and the framework, therefore offers an opportunity to directly use the underlying motivations of SSM legalization as a study of the motivation of the framework in general.\u003c/p\u003e\n\u003ch3 id=\"media-needs-to-be-diverse-for-ssm-because-otherwise-it-creates-vicarious-communities-dot\"\u003eMedia needs to be diverse for SSM because otherwise it creates vicarious communities.\u003c/h3\u003e\n\u003cp\u003eSSM\u0026rsquo;s success tied to sustained, diverse interest largely through social media.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#social-contract-theory-indicates-that-people-become-more-tolerant-due-to-interaction-diminishing-the-power-of-vicarious-communities\"\u003eSocial contract theory indicates that people become more tolerant due to interaction, diminishing the power of \u0026ldquo;vicarious\u0026rdquo; communities\u0026quot;\u003c/a\u003e. One such group: \u003ca href=\"#social-media-as-a-means-of-exposure-to-the-modern-world\"\u003esocial media as a means of exposure SSM practice\u003c/a\u003e. Indeed, we notice this before overall support: \u003ca href=\"#increased-exposure-on-sm-results-in-increased-support\"\u003eIncreased exposure on SM results in increased support\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eYet, these results are tempered: \u003ca href=\"#media-diversity-is-important-otherwise-one-side-may-drown-out-the-other\"\u003emedia diversity is important, otherwise one side may drown out the other and may even be counter-productive.\u003c/a\u003e This counter-productivity is created by\n\u003ca href=\"#expanding-generics-is-a-common-attack-surface-in-morality-arguments\"\u003eas given by Novoa, generics\u003c/a\u003e\u0026mdash;whereby the counter-party will frame SSM along with other factors to criticize as a group. Therefore, true exposure can only happen if no one side drowns out the other; otherwise it will be vicarious \u0026ldquo;totem\u0026rdquo; effect.\u003c/p\u003e\n\u003ch3 id=\"ssm-debates-cannot-succeed-with-diverse-media-alone-it-requires-active-engagement-with-the-topic-too\"\u003eSSM debates cannot succeed with diverse media alone, it requires active engagement with the topic too\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#economic-development-is-correlated-with-strong-adoption-of-self-expression\"\u003eSSM is an act of \u0026ldquo;self expression\u0026rdquo;, \u0026ldquo;expression\u0026rdquo; is an active verb of engagement.\u003c/a\u003e Active engagement doesn\u0026rsquo;t come from simply being on the internet, which is shown to \u003ca href=\"#people-who-just-consume-internet-are-not-exposed-nearly-as-much-to-ssm\"\u003enot be a factor enough to expose to SSM\u003c/a\u003e. ⁨Díez⁩ and ⁨Dion⁩ frames this engagement as a process of \u003ca href=\"#increased-exposure-and-increased-discussion-normalizes-attitudes-use-of-word-normalize-depolarize\"\u003e\u003cstrong\u003enormalization\u003c/strong\u003e of these attitudes: use of word normalize is the crucial factor to depolarize\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"conclusion-plus-discussion\"\u003eConclusion + Discussion\u003c/h3\u003e\n\u003cp\u003eTwo-prong framework + evidence from SSM:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ediverse media diet is needed to prevent totem creation + generics\u003c/li\u003e\n\u003cli\u003eactive engagement is needed to solidify normalization\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSSM is perhaps the first because a \u003ca href=\"#paradigm-shift-happens-through-tipping-the-scale-not-pushing-it-over--i-dot-e-dot-less-effort\"\u003eparadigm shift happens through \u003cstrong\u003etipping\u003c/strong\u003e the scale, not pushing it over (i.e. less effort)\u003c/a\u003e. So its the easiest to tip over.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpwr1_rba_planning/","tags":null,"title":"PWR1 RBA Planning"},{"categories":null,"contents":"General Information Due Date Topic Important Documents Sunday, Jan 21 AI Alignment Yejin Choi Talk Claim Synthesis Quotes Bin Double entendre which frames the study of AI safety as \u0026ldquo;intellectual\u0026rdquo; (in contrast to War) “And then there are these additional intellectual questions. Can AI, without robust common sense, be truly safe for humanity?”\ndouble intentre: \u0026ldquo;intellectual\u0026rdquo; as in interesting but also \u0026ldquo;intellectual\u0026rdquo; as in worth asking\nLanguage of Extremes: AI safety is a \u0026ldquo;bruce force\u0026rdquo; problem which requires \u0026ldquo;extreme scale\u0026rdquo; And is brute-force scale really the only way and even the correct way to teach AI? So I’m often asked these days whether it\u0026rsquo;s even feasible to do any meaningful research without extreme-scale compute.\nFraming AI safety as a war, us-them dynamic between you and AI “Perhaps we can \u0026hellip; seek inspiration from an old-time classic, \u0026ldquo;The Art of War,\u0026rdquo; which tells us, in my interpretation, know your enemy, choose your battles, and innovate your weapons.”\nImporting the language of war\u0026mdash;raising urgency and stakes. Animating: more strongly dynamic.\nAdding scale-optimists to be a part of the antagonized group “Some scale optimists might say, “Don’t worry about this. All of these can be easily fixed by adding similar examples as yet more training data for AI.\u0026quot; But the real question is this. Why should we even do that?”\ncontrast: some \u0026hellip; we—us them dynamic\nWe can\u0026rsquo;t dissect AI models because of big tech \u0026ldquo;we are now at the mercy of those few tech companies because researchers in the larger community do not have the means to truly inspect and dissect these models. \u0026quot;\nLanguage of extremes in both directions, taking agency out of the AI too “AI today is unbelievably intelligent and then shockingly stupid.” (pdf) phrasing: contrast\nadv adj and then adv adj\nRaises stakes: uses the language of extremes\nPassive voice takes agency out of \u0026ldquo;us\u0026rdquo; and into the hands of the enemy “We’ve been told that it’s a research topic of ’70s and ’80s; shouldn’t work on it because it will never work; in fact, don\u0026rsquo;t even say the word to be taken seriously.” (pdf) dropping subject; \u0026ldquo;we\u0026rdquo;—raises urgency by highlighting contrast\nCalls to action; parallel structure associates \u0026ldquo;we\u0026rdquo; for the first time with, :shocked picachu face: organizations “We don\u0026rsquo;t know what\u0026rsquo;s in this, but it should be open and publicly available so that we can inspect and ensure [it supports] diverse norms and values. \u0026hellip; So for this reason, my teams at UW and AI2 have been working on commonsense knowledge graphs ” (pdf) repeated usage of \u0026ldquo;we\u0026rdquo;—bilding a tride\nZoomorphise AI as a new \u0026ldquo;species\u0026rdquo;, in effect animating it “We\u0026rsquo;re now entering a new era in which AI is almost like a new intellectual species with unique strengths and weaknesses compared to humans. In order to make this powerful AI sustainable and humanistic, we need to teach AI common sense, norms and values.” (pdf) anthropormorphising\nGiving AI agency, using the second person \u0026ldquo;singular\u0026rdquo; makes the killing feel more personal “And that AI decided to kill humans to utilize them as additional resources, to turn you into paper clips.” (pdf) humans \u0026hellip; you—increase urgency\nSub-Claim Development Victimze AI: AI is a new species with blunt force that\u0026rsquo;s teachable AI has extreme raw power, but is somewhat of an anthropomorphizing new \u0026ldquo;species\u0026rdquo;, which \u0026ldquo;we\u0026rdquo; can \u0026ldquo;teach\u0026rdquo;. This breaks the audiences\u0026rsquo; traditional framing of AI as a blind tool, and empowers the audience to imagine what \u0026ldquo;teaching it\u0026rdquo; looks like. However, Choi uses the language of extremes in both directions, framing AI as \u0026ldquo;stupid\u0026rdquo; to victimize it. [She spent much of the talk giving copious examples]. This places the audience at a position of power, wanting to help the subjugated AI.\nVictimize Audience wanting help AI: pitting the audience against the AI deveolpers After victimizing AI and inspiring audience to teach it, Choi then victimizes audience wanting to help. Choi introduces that big tech disenfranchises the audience\u0026rsquo;s goals of teaching \u0026ldquo;AI\u0026rdquo;, who we are at the \u0026ldquo;mercy of\u0026rdquo;. Establishes an us-them dynamic that Choi continues to develop: passive voice takes agency out of \u0026ldquo;us\u0026rdquo; and into the hands of the enemy, objectifying it, further establishing and antagonizing the two groups and highlighting how the audience, and by proxy Choi, is disenfranchised.\nWe are at war, and Choi is the savior This two-sided dynamic comes to a head with the language of war. AI can kill \u0026ldquo;you\u0026rdquo;-the second person \u0026ldquo;singular\u0026rdquo; makes the killing feel more personal, further justifying the \u0026ldquo;war\u0026rdquo;. With a parallel structure, Choi frames herself and her research as the leader of the \u0026ldquo;audience\u0026rsquo;s side\u0026rdquo;, wading into the unknown in this war. She frames this war as an intellectual one, which she has the credibility ethos to lead.\nThe Claim Choi uses the language of antagonism to victimize AI language models and, by proxy, the audience wanting the help the victimized AI\u0026mdash;placing both groups at a \u0026ldquo;war\u0026rdquo; of disenfranchisement against big-tech model developers. This dynamic allowing Choi to justify her research as the intellectual savior which empowers the fight of humanity in this \u0026ldquo;war\u0026rdquo;.\n","html":"\u003ch2 id=\"general-information\"\u003eGeneral Information\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDue Date\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003cth\u003eImportant Documents\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eSunday, Jan 21\u003c/td\u003e\n\u003ctd\u003eAI Alignment\u003c/td\u003e\n\u003ctd\u003eYejin Choi Talk\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"claim-synthesis\"\u003eClaim Synthesis\u003c/h2\u003e\n\u003ch3 id=\"quotes-bin\"\u003eQuotes Bin\u003c/h3\u003e\n\u003ch4 id=\"double-entendre-which-frames-the-study-of-ai-safety-as-intellectual--in-contrast-to-war\"\u003eDouble entendre which frames the study of AI safety as \u0026ldquo;intellectual\u0026rdquo; (in contrast to War)\u003c/h4\u003e\n\u003cp\u003e“And then there are these additional intellectual questions. Can AI, without robust common sense, be truly safe for humanity?”\u003c/p\u003e\n\u003cp\u003edouble intentre: \u0026ldquo;intellectual\u0026rdquo; as in interesting but also \u0026ldquo;intellectual\u0026rdquo; as in worth asking\u003c/p\u003e\n\u003ch4 id=\"language-of-extremes-ai-safety-is-a-bruce-force-problem-which-requires-extreme-scale\"\u003eLanguage of Extremes: AI safety is a \u0026ldquo;bruce force\u0026rdquo; problem which requires \u0026ldquo;extreme scale\u0026rdquo;\u003c/h4\u003e\n\u003cp\u003eAnd is brute-force scale really the only way and even the correct way to teach AI? So I’m often asked these days whether it\u0026rsquo;s even feasible to do any meaningful research without extreme-scale compute.\u003c/p\u003e\n\u003ch4 id=\"framing-ai-safety-as-a-war-us-them-dynamic-between-you-and-ai\"\u003eFraming AI safety as a war, us-them dynamic between you and AI\u003c/h4\u003e\n\u003cp\u003e“Perhaps we can \u0026hellip; seek inspiration from an old-time classic, \u0026ldquo;The Art of War,\u0026rdquo; which tells us, in my interpretation, know your enemy, choose your battles, and innovate your weapons.”\u003c/p\u003e\n\u003cp\u003eImporting the language of war\u0026mdash;raising urgency and stakes. Animating: more strongly dynamic.\u003c/p\u003e\n\u003ch4 id=\"adding-scale-optimists-to-be-a-part-of-the-antagonized-group\"\u003eAdding scale-optimists to be a part of the antagonized group\u003c/h4\u003e\n\u003cp\u003e“Some scale optimists might say, “Don’t worry about this. All of these can be easily fixed by adding similar examples as yet more training data for AI.\u0026quot; But the real question is this. Why should we even do that?”\u003c/p\u003e\n\u003cp\u003econtrast: some \u0026hellip; we—us them dynamic\u003c/p\u003e\n\u003ch4 id=\"we-can-t-dissect-ai-models-because-of-big-tech\"\u003eWe can\u0026rsquo;t dissect AI models because of big tech\u003c/h4\u003e\n\u003cp\u003e\u0026ldquo;we are now at the mercy of those few tech companies because researchers in the larger community do not have the means to truly inspect and dissect these models. \u0026quot;\u003c/p\u003e\n\u003ch4 id=\"language-of-extremes-in-both-directions-taking-agency-out-of-the-ai-too\"\u003eLanguage of extremes in both directions, taking agency out of the AI too\u003c/h4\u003e\n\u003cp\u003e“AI today is unbelievably intelligent and then shockingly stupid.” (\u003ca href=\"zotero://open-pdf/library/items/W8C8UZWS?page=3\u0026amp;annotation=35JSUAXU\"\u003epdf\u003c/a\u003e) phrasing: contrast\u003c/p\u003e\n\u003cp\u003eadv adj and then adv adj\u003c/p\u003e\n\u003cp\u003eRaises stakes: uses the language of extremes\u003c/p\u003e\n\u003ch4 id=\"passive-voice-takes-agency-out-of-us-and-into-the-hands-of-the-enemy\"\u003ePassive voice takes agency out of \u0026ldquo;us\u0026rdquo; and into the hands of the enemy\u003c/h4\u003e\n\u003cp\u003e“We’ve been told that it’s a research topic of ’70s and ’80s; shouldn’t work on it because it will never work; in fact, don\u0026rsquo;t even say the word to be taken seriously.” (\u003ca href=\"zotero://open-pdf/library/items/W8C8UZWS?page=4\u0026amp;annotation=97H3RG7L\"\u003epdf\u003c/a\u003e) dropping subject; \u0026ldquo;we\u0026rdquo;—raises urgency by highlighting contrast\u003c/p\u003e\n\u003ch4 id=\"calls-to-action-parallel-structure-associates-we-for-the-first-time-with-shocked-picachu-face-organizations\"\u003eCalls to action; parallel structure associates \u0026ldquo;we\u0026rdquo; for the first time with, :shocked picachu face: organizations\u003c/h4\u003e\n\u003cp\u003e“We don\u0026rsquo;t know what\u0026rsquo;s in this, but it should be open and publicly available so that we can inspect and ensure [it supports] diverse norms and values. \u0026hellip; So for this reason, my teams at UW and AI2 have been working on commonsense knowledge graphs\n” (\u003ca href=\"zotero://open-pdf/library/items/W8C8UZWS?page=5\u0026amp;annotation=8ENT9PEL\"\u003epdf\u003c/a\u003e) repeated usage of \u0026ldquo;we\u0026rdquo;—bilding a tride\u003c/p\u003e\n\u003ch4 id=\"zoomorphise-ai-as-a-new-species-in-effect-animating-it\"\u003eZoomorphise AI as a new \u0026ldquo;species\u0026rdquo;, in effect animating it\u003c/h4\u003e\n\u003cp\u003e“We\u0026rsquo;re now entering a new era in which AI is almost like a new intellectual species with unique strengths and weaknesses compared to humans. In order to make this powerful AI sustainable and humanistic, we need to teach AI common sense, norms and values.” (\u003ca href=\"zotero://open-pdf/library/items/W8C8UZWS?page=6\u0026amp;annotation=DLXMRF8U\"\u003epdf\u003c/a\u003e) anthropormorphising\u003c/p\u003e\n\u003ch4 id=\"giving-ai-agency-using-the-second-person-singular-makes-the-killing-feel-more-personal\"\u003eGiving AI agency, using the second person \u0026ldquo;singular\u0026rdquo; makes the killing feel more personal\u003c/h4\u003e\n\u003cp\u003e“And that AI decided to kill humans to utilize them as additional resources, to turn you into paper clips.” (\u003ca href=\"zotero://open-pdf/library/items/W8C8UZWS?page=3\u0026amp;annotation=25H9C3MG\"\u003epdf\u003c/a\u003e) humans \u0026hellip; you—increase urgency\u003c/p\u003e\n\u003ch3 id=\"sub-claim-development\"\u003eSub-Claim Development\u003c/h3\u003e\n\u003ch4 id=\"victimze-ai-ai-is-a-new-species-with-blunt-force-that-s-teachable\"\u003eVictimze AI: AI is a new species with blunt force that\u0026rsquo;s teachable\u003c/h4\u003e\n\u003cp\u003eAI has \u003ca href=\"#language-of-extremes-ai-safety-is-a-bruce-force-problem-which-requires-extreme-scale\"\u003eextreme raw power\u003c/a\u003e, but is somewhat of an anthropomorphizing \u003ca href=\"#zoomorphise-ai-as-a-new-species-in-effect-animating-it\"\u003enew \u0026ldquo;species\u0026rdquo;, which \u0026ldquo;we\u0026rdquo; can \u0026ldquo;teach\u0026rdquo;\u003c/a\u003e. This breaks the audiences\u0026rsquo; traditional framing of AI as a blind tool, and empowers the audience to imagine what \u0026ldquo;teaching it\u0026rdquo; looks like. However, Choi uses the \u003ca href=\"#language-of-extremes-in-both-directions-taking-agency-out-of-the-ai-too\"\u003elanguage of extremes in both directions, framing AI as \u0026ldquo;stupid\u0026rdquo; to victimize it\u003c/a\u003e. [She spent much of the talk giving copious examples]. This places the audience at a position of power, wanting to help the subjugated AI.\u003c/p\u003e\n\u003ch4 id=\"victimize-audience-wanting-help-ai-pitting-the-audience-against-the-ai-deveolpers\"\u003eVictimize Audience wanting help AI: pitting the audience against the AI deveolpers\u003c/h4\u003e\n\u003cp\u003eAfter victimizing AI and inspiring audience to teach it, Choi then victimizes audience wanting to help. Choi introduces that \u003ca href=\"#we-can-t-dissect-ai-models-because-of-big-tech\"\u003ebig tech disenfranchises the audience\u0026rsquo;s goals of teaching \u0026ldquo;AI\u0026rdquo;\u003c/a\u003e, who we are at the \u0026ldquo;mercy of\u0026rdquo;. Establishes an us-them dynamic that Choi continues to develop: \u003ca href=\"#passive-voice-takes-agency-out-of-us-and-into-the-hands-of-the-enemy\"\u003epassive voice takes agency out of \u0026ldquo;us\u0026rdquo; and into the hands of the enemy, objectifying it\u003c/a\u003e, further establishing and antagonizing the two groups and highlighting how the audience, and by proxy Choi, is disenfranchised.\u003c/p\u003e\n\u003ch4 id=\"we-are-at-war-and-choi-is-the-savior\"\u003eWe are at war, and Choi is the savior\u003c/h4\u003e\n\u003cp\u003eThis two-sided dynamic comes to a head with the \u003ca href=\"#framing-ai-safety-as-a-war-us-them-dynamic-between-you-and-ai\"\u003elanguage of war\u003c/a\u003e. \u003ca href=\"#giving-ai-agency-using-the-second-person-singular-makes-the-killing-feel-more-personal\"\u003eAI can kill \u0026ldquo;you\u0026rdquo;-the second person \u0026ldquo;singular\u0026rdquo; makes the killing feel more personal, further justifying the \u0026ldquo;war\u0026rdquo;\u003c/a\u003e. With a parallel structure, Choi \u003ca href=\"#calls-to-action-parallel-structure-associates-we-for-the-first-time-with-shocked-picachu-face-organizations\"\u003eframes herself and her research as the leader of the \u0026ldquo;audience\u0026rsquo;s side\u0026rdquo;, wading into the unknown\u003c/a\u003e in this war. She frames this war as an \u003ca href=\"#double-entendre-which-frames-the-study-of-ai-safety-as-intellectual--in-contrast-to-war\"\u003eintellectual one\u003c/a\u003e, which she has the credibility \u003cem\u003eethos\u003c/em\u003e to lead.\u003c/p\u003e\n\u003ch3 id=\"the-claim\"\u003eThe Claim\u003c/h3\u003e\n\u003cp\u003eChoi uses the language of antagonism to victimize AI language models and, by proxy, the audience wanting the help the victimized AI\u0026mdash;placing both groups at a \u0026ldquo;war\u0026rdquo; of disenfranchisement against big-tech model developers. This dynamic allowing Choi to justify her research as the intellectual savior which empowers the fight of humanity in this \u0026ldquo;war\u0026rdquo;.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpwr1_rhetorical_analysis_planning/","tags":null,"title":"PWR1 Rhetorical Analysis Essay Planning"},{"categories":null,"contents":"Quotes Bin polarization distorts beliefs about others “Recent years have seen a sharp increase in political polarization in the United States (1–7), leading to deadlock in Congress (8), distorted beliefs about fellow Americans (9, 10), and distrust, hostility, and even violence toward outgroup members (11–13)” [Novoa et al., 2023, p. 1]\ngenerics about particular group is a way that polarized languages manifest “Specifically, we focus on expressions that make claims about a category as a whole (e.g., “Democrats want to defund the police” makes a claim about the category of “Democrats”), also known as generics (34–38).” [Novoa et al., 2023, p. 2]\ngenerics are much more readily remembered “A second distinctive signature of generics documented in prior research is that they tend to be how generalizations are later recalled, even when generalizations are stated in more precise, quantified ways” [Novoa et al., 2023, p. 2]\ngenerics are strongly rejected or accepted based on parity affiliation =\u0026gt; echo chamber “Respondents showed a strong pattern of accepting generics for the target party and rejecting generics for the opposite party.” [Novoa et al., 2023, p. 3]\npeople perceive higher polarization than actually are present due to use of generics “We found that perceived polarization was greater than actual polarization, in two key respects: 1) for nearly every issue, people believed that the two parties were further apart than they actually are, and 2) patterns of generic endorsement were more polarized (i.e., revealed a greater gap between the two parties) than perceptions of prevalence.” [Novoa et al., 2023, p. 5]\npolarized language is present far more in generic statements “The results of this study support the conclusion that generic language leads to polarized judgments regarding political parties, and does so more than nongeneric language. We obtained three key results: 1) for generic statements (e.g., “Democrats \u0026hellip;”), prevalence estimates were larger for the named party (e.g., Democrats, when the generic statement was about Democrats) than for the unnamed party (e.g., Republicans, when the generic statement was about Democrats); 2) for generic statements, prevalence estimates were above 50% for the named party and below 50% for the unnamed party; and 3) the gap between named and unnamed prevalence estimates was larger for generic statements than for nongeneric statements (such as “Many Democrats support House Bill 858” or “Some Democrats support House Bill 858”).” [Novoa et al., 2023, p. 9]\npolitical science generally believes that political polarization and citizen polarization is different “The predominant view in political science is that the current polarization in Congress has not diffused much into the citizenry” [“Political Polarization and the Dynamics of Political Language: Evidence from 130 Years of Partisan Speech [with Comments and Discussion]”, 2024, p. 5]\npolitical polarization and its prevalence has been discussed since founding of the country “The idea that U.S. politics is necessarily polarized, owing to the intrinsic diversity and size of the country, goes back at least to James Madison and the divergence between Hamiltonian and Jeffersonian economic philosophies” [“Political Polarization and the Dynamics of Political Language: Evidence from 130 Years of Partisan Speech [with Comments and Discussion]”, 2024, p. 5]\npartisanship switches to minority party when the house switches control “the partisanship of language tends to switch when House control switches, but in the direction of the new minority party.” [“Political Polarization and the Dynamics of Political Language: Evidence from 130 Years of Partisan Speech [with Comments and Discussion]”, 2024, p. 25]\none type of polarization is where people disengage with those with opposite views “The first aspect of political polarization, which we call “interactional polarization,” focuses on a process whereby participants in a debate increasingly interact with likeminded individuals, while disengaging from interactions with others who hold opposing viewpoints.” [Yarchi et al., 2021, p. 101]\nthere\u0026rsquo;s a difference between filter bubbles and increased polarization “Despite the recent salience of theories regarding fragmented “echo chambers” or “filter bubbles,” it remains contentious whether social media do indeed drive such interactional polarization” [Yarchi et al., 2021, p. 101]\nalready known groups typically bring strong agreements, and strangers typically bring disperate views “Heterophilic interactions appear to be more common along so-called “weak ties” occasional communications that are not underfed by strong social bonds such as friendship or sustained collaboration – while most “strong ties” (among friends, within teams, etc.) are predominantly homophilic” [Yarchi et al., 2021, p. 101]\nsocial media users agree more over time “H2 (Interactional Polarization): Interaction patterns on social media become increasingly homophilic over time.” [Yarchi et al., 2021, p. 102]\nisolation results in more extreme contributions “Individuals embedded within more homophilic interaction networks subsequently express more extreme positions in their contributions.” [Yarchi et al., 2021, p. 102]\nFacebook\u0026rsquo;s chief innovation is to leverage the in-group homogeneity to create supportive opinions “As the world’s foremost social media platform, Facebook’s popularity is arguably derived largely from its capacity to immerse its users in a feed of contents that cater to their personal interests and leanings. To do this, the platform relies heavily on users’ self curated networks of friends, but also on an algorithm that prioritizes content based on users’ interests and support for similar posts, displaying only a small share of predominantly congenial, supportive contents” [Yarchi et al., 2021, p. 104]\nFacebook creates supportive echo chambers “Facebook has become the prime suspect for the creation of homophilic echo chambers” [Yarchi et al., 2021, p. 104]\nTwitter creates asymmetric, non-friend dynamics “Twitter is defined primarily by its unrestricted publicness. Anyone, even non-users, can read any tweet, and any user can respond to any contribution. Users can follow others without a need for permission, enabling asymmetric, non-reciprocated ties.” [Yarchi et al., 2021, p. 104]\nDebate exists between whether Twitter creates or dismantles homogenization, and therefore polarization “Reflecting Twitter’s ambiguous profile, the existing literature yields conflicting findings regarding its tendency toward homophily and polarization (e.g., Kwak et al., 2010 detected little homophily; Weng et al., 2010; Hong \u0026amp; Kim, 2016; Colleoni et al., 2014 found the opposite).” [Yarchi et al., 2021, p. 105]\nCITE: defines social media “social media platform in a narrow sense (following Ellison and Boyd (2013) definition)” [Yarchi et al., 2021, p. 105]\nUsers tend to express more extreme views if surrounded by likeminded users “Considering the effect of homophilic interactions on expressed positions, our data confirm users’ tendency to express more extreme views if interactions with likeminded users take in a larger share of their social media communications (H3).” [Yarchi et al., 2021, p. 111]\ncannot study only one social media as they have different properties “Beyond questioning the widespread reliance on Twitter (and limited public Facebook) data to draw conclusions about social media as a whole, our study also highlights the perils of inferring dynamic properties from static data.” [Yarchi et al., 2021, p. 114]\ntendency to associate with like-minded people increases echo chambers “That social psychology has long shown this tendency to associate with like-minded others is common cross-culturally. However, there is new fear that the current media system is helping people enter echo chambers more easily than ever before.” [Dubois and Blank, 2018, p. 731]\nTwitter is an isolated slice of the population “Twitter itself is used by a relatively small proportion of the population, about one-quarter of the UK, which is younger, wealthier, and better-educated than Britain as a whole” [Dubois and Blank, 2018, p. 732]\nin the UK, going to a news source like BBC is still more common “going directly to a news source such as the BBC remains more common in the UK (Newman et al., 2017).” [Dubois and Blank, 2018, p. 733]\nstudies don\u0026rsquo;t study the aggregate effect of diverse media “A core problem with this line of research is that most studies select only one or a few media to focus on and so the comparative utility or effects of use of media in a diverse media environment are unclear.” [Dubois and Blank, 2018, p. 733]\npeople with strong partisanship report consuming a diverse media digest “First, even individuals who have strong partisan affiliation report using both general news sites (which are largely non-partisan and include a variety of issues) and niche news sites (which may be partisan or focused on specific issues) – Republicans and Democrats have media diets which are quite similar” [Dubois and Blank, 2018, p. 734]\nconsumption of mixed media results in incidental exposure to a variety of news sources “While one might receive primarily left-leaning political content on Twitter, they may be incidentally exposed to a right-leaning perspective from a family member on Facebook or they might hear a debate between representatives from various perspectives on a television news broadcast.” [Dubois and Blank, 2018, p. 734]\nthose who are politically aware are going to encounter more perspectives “As Prior argues, political ‘junkies’ are likely to consume a lot of information and therefore may encounter more perspectives and arguments” [Dubois and Blank, 2018, p. 734]\nincreased involvement in politics results actually in less echo chamber “H2: The higher a person’s level of political interest the less likely they are to be in an echo chamber” [Dubois and Blank, 2018, p. 735]\nPeople who are actually disinterested in politics are in an echo chamber “First, that respondents with no political interest are in an echo chamber. We examine this possibility using the regressions in Table 3. The results in this table are based only on the respondents who said they had ‘No interest at all’ in politics, N = 243.” [Dubois and Blank, 2018, p. 739]\nHigh choice in media doesn\u0026rsquo;t mean a high degree of ability to reconsiliate “A high-choice media environment does not simply mean that individuals develop strategies to deal with the many media options available, though of course they do so as they develop their news and political information repertoires” [Dubois and Blank, 2018, p. 740]\ndiversity in media AND engagement in politics matters “Our results suggest that people who are both not politically interested and who do not use diverse media are more likely to be in an echo chamber. They are less likely check multiple sources or to discover things that change their minds.” [Dubois and Blank, 2018, p. 741]\npolarized language results in greater engagement: but only to trolls and politically engaged users “We also find that polarized language is associated with greater engagement, but this association only holds for politically engaged users (both trolls and regular users). This research clarifies how trolls leverage polarized language and provides an open-source, simple tool for exploration of polarized communications on social media.” [Simchon et al., 2022, p. 1]\nRussian trolls used more polarized language “Again, we find that politically oriented Russian trolls use significantly more polarized language than their politically matched American sample (Russian trolls: M = 5.16, SD = 8.00, and N = 55,726; American controls: M = 2.91, SD = 6.84, and N = 55,726), t(108,836) = 50.61, P \u0026lt; 0.001, and Cohen’s d = 0.30 (for a robustness check, see Supplementary Materials).” [Simchon et al., 2022, p. 4]\nforeign agents increase in their polarization and posting frequency “foreign agents from various countries strategically used polarized language in social media communications, and in a majority of cases we see an increase over time in these attempts.” [Simchon et al., 2022, p. 6]\ndistinction between polarization on issues vs. polarization of anger “Scholars have made the conceptual distinction between issue polarization—an ideological, policy-based political divide, and affective polarization, i.e. dislike, distrust, and general animosity of political partisans toward the other political side” [Simchon et al., 2022, p. 6]\nsmall amount of trolls can polarize lots of people “Questions remain as to the extent of influence of trolls’ social media presence on real people. However, it is important to note that even a small number of agents with aggressive attitudes can have a substantial influence on the majority view, a process called “information gerrymandering”” [Simchon et al., 2022, p. 9]\ninteraction with trolls didn\u0026rsquo;t seem to change partisanship “The authors found that only a small fraction of users interacted with Russian trolls, and they did not observe any change in partisan attitude during that time among these users (68).” [Simchon et al., 2022, p. 9]\nsubclaim organization Polarization comes from congenial echo-chambers driven by generic language, which social media is prone to create due to their curation Interaction with in-group only results in more extreme contributions. This is what we typically call an \u0026ldquo;echo chamber\u0026rdquo; (1, 2). One such chamber enviroment is social media, a particularly salient case of this is Facebook, which creates supportive echo chambers.\nNovoa proposes one analysis through linguistics by which such an echo chamber can get created\u0026mdash;generics: easy to remember generalisations. polarized language is present far more in generic statements, and generics about particular group is a way that polarized languages manifest.\nGenerics only function when deployed within a homegenous environment. Yet, others have noted that Facebook\u0026rsquo;s chief innovation is to leverage the in-group homogeneity to create supportive opinions\u0026mdash;displaying \u0026ldquo;congenial\u0026rdquo; content that are likely to be homegenous.\nThe congenial environment itself, however, is not enough to create or disrupt polarization; breaking echo chambers requires both a diversity of opinions as well as actual engagement Unlike Facebook\u0026rsquo;s congeniality, Twitter creates asymmetric, non-friend dynamics. Though it shows that it helps dismantle some echo chambers, its not conclusive. Yarachi notes this as the difference between filter bubbles and increased polarization. A \u0026ldquo;filter bubble\u0026rdquo; itself isn\u0026rsquo;t polarization, so what is?\nDubois solves this mystery by arguing that it is people who are actually disinterested in politics are in an echo chamber. Through consumption of mixed media results in incidental exposure to a variety of news sources one has to participate in the conversation to get out the echo chamber.\nMeaning, high choice in media itself (i.e. having facebook AND twitter) doesn\u0026rsquo;t mean a high degree of ability to reconsiliate. It is diversity in media AND engagement in politics matters.\nother notes those who are politically aware are going to encounter more perspectives people with strong partisanship report consuming a diverse media digest By using polarized language to target only politically active users, trolls essentially disrupt the ability to dismantle echo chambers Interaction with trolls didn\u0026rsquo;t seem to change partisanship, yet previous work establishes that a small amount of trolls can polarize lots of people\u0026mdash;so the manner by which trolls work is confusing.\nSimchon notes that Russian trolls used more polarized language. Our previous analysis concludes that political activism is an important and inseperable part of breaking an echo chamber; trolls, then take advantage of this fact to disrupt the process of breaking away from polarization by capturing already politically active users, which trolls take part.\nBIN polarization distorts beliefs about others one type of polarization is where people disengage with those with opposite views in the UK, going to a news source like BBC is still more common CITE: defines social media already known groups typically bring strong agreements, and strangers typically bring disperate views cannot study only one social media as they have different properties Twitter is an isolated slice of the population studies don\u0026rsquo;t study the aggregate effect of diverse media political science generally believes that political polarization and citizen polarization is different distinction between polarization on issues vs. polarization of anger language induces perception about polarization foreign agents increase in their polarization and posting frequency partisanship is constantly switching polarization is a long-standing topic ","html":"\u003ch2 id=\"quotes-bin\"\u003eQuotes Bin\u003c/h2\u003e\n\u003ch3 id=\"polarization-distorts-beliefs-about-others\"\u003epolarization distorts beliefs about others\u003c/h3\u003e\n\u003cp\u003e“Recent years have seen a sharp increase in political polarization in the United States (1–7), leading to deadlock in Congress (8), distorted beliefs about fellow Americans (9, 10), and distrust, hostility, and even violence toward outgroup members (11–13)” [Novoa et al., 2023, p. 1]\u003c/p\u003e\n\u003ch3 id=\"generics-about-particular-group-is-a-way-that-polarized-languages-manifest\"\u003egenerics about particular group is a way that polarized languages manifest\u003c/h3\u003e\n\u003cp\u003e“Specifically, we focus on expressions that make claims about a category as a whole (e.g., “Democrats want to defund the police” makes a claim about the category of “Democrats”), also known as generics (34–38).” [Novoa et al., 2023, p. 2]\u003c/p\u003e\n\u003ch3 id=\"generics-are-much-more-readily-remembered\"\u003egenerics are much more readily remembered\u003c/h3\u003e\n\u003cp\u003e“A second distinctive signature of generics documented in prior research is that they tend to be how generalizations are later recalled, even when generalizations are stated in more precise, quantified ways” [Novoa et al., 2023, p. 2]\u003c/p\u003e\n\u003ch3 id=\"generics-are-strongly-rejected-or-accepted-based-on-parity-affiliation-echo-chamber\"\u003egenerics are strongly rejected or accepted based on parity affiliation =\u0026gt; echo chamber\u003c/h3\u003e\n\u003cp\u003e“Respondents showed a strong pattern of accepting generics for the target party and rejecting generics for the opposite party.” [Novoa et al., 2023, p. 3]\u003c/p\u003e\n\u003ch3 id=\"people-perceive-higher-polarization-than-actually-are-present-due-to-use-of-generics\"\u003epeople perceive higher polarization than actually are present due to use of generics\u003c/h3\u003e\n\u003cp\u003e“We found that perceived polarization was greater than actual polarization, in two key respects: 1) for nearly every issue, people believed that the two parties were further apart than they actually are, and 2) patterns of generic endorsement were more polarized (i.e., revealed a greater gap between the two parties) than perceptions of prevalence.” [Novoa et al., 2023, p. 5]\u003c/p\u003e\n\u003ch3 id=\"polarized-language-is-present-far-more-in-generic-statements\"\u003epolarized language is present far more in generic statements\u003c/h3\u003e\n\u003cp\u003e“The results of this study support the conclusion that generic language leads to polarized judgments regarding political parties, and does so more than nongeneric language. We obtained three key results: 1) for generic statements (e.g., “Democrats \u0026hellip;”), prevalence estimates were larger for the named party (e.g., Democrats, when the generic statement was about Democrats) than for the unnamed party (e.g., Republicans, when the generic statement was about Democrats); 2) for generic statements, prevalence estimates were above 50% for the named party and below 50% for the unnamed party; and 3) the gap between named and unnamed prevalence estimates was larger for generic statements than for nongeneric statements (such as “Many Democrats support House Bill 858” or “Some Democrats support House Bill 858”).” [Novoa et al., 2023, p. 9]\u003c/p\u003e\n\u003ch3 id=\"political-science-generally-believes-that-political-polarization-and-citizen-polarization-is-different\"\u003epolitical science generally believes that political polarization and citizen polarization is different\u003c/h3\u003e\n\u003cp\u003e“The predominant view in political science is that the current polarization in Congress has not diffused much into the citizenry” [“Political Polarization and the Dynamics of Political Language: Evidence from 130 Years of Partisan Speech [with Comments and Discussion]”, 2024, p. 5]\u003c/p\u003e\n\u003ch3 id=\"political-polarization-and-its-prevalence-has-been-discussed-since-founding-of-the-country\"\u003epolitical polarization and its prevalence has been discussed since founding of the country\u003c/h3\u003e\n\u003cp\u003e“The idea that U.S. politics is necessarily polarized, owing to the intrinsic diversity and size of the country, goes back at least to James Madison and the divergence between Hamiltonian and Jeffersonian economic philosophies” [“Political Polarization and the Dynamics of Political Language: Evidence from 130 Years of Partisan Speech [with Comments and Discussion]”, 2024, p. 5]\u003c/p\u003e\n\u003ch3 id=\"partisanship-switches-to-minority-party-when-the-house-switches-control\"\u003epartisanship switches to minority party when the house switches control\u003c/h3\u003e\n\u003cp\u003e“the partisanship of language tends to switch when House control switches, but in the direction of the new minority party.” [“Political Polarization and the Dynamics of Political Language: Evidence from 130 Years of Partisan Speech [with Comments and Discussion]”, 2024, p. 25]\u003c/p\u003e\n\u003ch3 id=\"one-type-of-polarization-is-where-people-disengage-with-those-with-opposite-views\"\u003eone type of polarization is where people disengage with those with opposite views\u003c/h3\u003e\n\u003cp\u003e“The first aspect of political polarization, which we call “interactional polarization,” focuses on a process whereby participants in a debate increasingly interact with likeminded individuals, while disengaging from interactions with others who hold opposing viewpoints.” [Yarchi et al., 2021, p. 101]\u003c/p\u003e\n\u003ch3 id=\"there-s-a-difference-between-filter-bubbles-and-increased-polarization\"\u003ethere\u0026rsquo;s a difference between filter bubbles and increased polarization\u003c/h3\u003e\n\u003cp\u003e“Despite the recent salience of theories regarding fragmented “echo chambers” or “filter bubbles,” it remains contentious whether social media do indeed drive such interactional polarization” [Yarchi et al., 2021, p. 101]\u003c/p\u003e\n\u003ch3 id=\"already-known-groups-typically-bring-strong-agreements-and-strangers-typically-bring-disperate-views\"\u003ealready known groups typically bring strong agreements, and strangers typically bring disperate views\u003c/h3\u003e\n\u003cp\u003e“Heterophilic interactions appear to be more common along so-called “weak ties” occasional communications that are not underfed by strong social bonds such as friendship or sustained collaboration – while most “strong ties” (among friends, within teams, etc.) are predominantly homophilic” [Yarchi et al., 2021, p. 101]\u003c/p\u003e\n\u003ch3 id=\"social-media-users-agree-more-over-time\"\u003esocial media users agree more over time\u003c/h3\u003e\n\u003cp\u003e“H2 (Interactional Polarization): Interaction patterns on social media become increasingly homophilic over time.” [Yarchi et al., 2021, p. 102]\u003c/p\u003e\n\u003ch3 id=\"isolation-results-in-more-extreme-contributions\"\u003eisolation results in more extreme contributions\u003c/h3\u003e\n\u003cp\u003e“Individuals embedded within more homophilic interaction networks subsequently express more extreme positions in their contributions.” [Yarchi et al., 2021, p. 102]\u003c/p\u003e\n\u003ch3 id=\"facebook-s-chief-innovation-is-to-leverage-the-in-group-homogeneity-to-create-supportive-opinions\"\u003eFacebook\u0026rsquo;s chief innovation is to leverage the in-group homogeneity to create supportive opinions\u003c/h3\u003e\n\u003cp\u003e“As the world’s foremost social media platform, Facebook’s popularity is arguably derived largely from its capacity to immerse its users in a feed of contents that cater to their personal interests and leanings. To do this, the platform relies heavily on users’ self curated networks of friends, but also on an algorithm that prioritizes content based on users’ interests and support for similar posts, displaying only a small share of predominantly congenial, supportive contents” [Yarchi et al., 2021, p. 104]\u003c/p\u003e\n\u003ch3 id=\"facebook-creates-supportive-echo-chambers\"\u003eFacebook creates supportive echo chambers\u003c/h3\u003e\n\u003cp\u003e“Facebook has become the prime suspect for the creation of homophilic echo chambers” [Yarchi et al., 2021, p. 104]\u003c/p\u003e\n\u003ch3 id=\"twitter-creates-asymmetric-non-friend-dynamics\"\u003eTwitter creates asymmetric, non-friend dynamics\u003c/h3\u003e\n\u003cp\u003e“Twitter is defined primarily by its unrestricted publicness. Anyone, even non-users, can read any tweet, and any user can respond to any contribution. Users can follow others without a need for permission, enabling asymmetric, non-reciprocated ties.” [Yarchi et al., 2021, p. 104]\u003c/p\u003e\n\u003ch3 id=\"debate-exists-between-whether-twitter-creates-or-dismantles-homogenization-and-therefore-polarization\"\u003eDebate exists between whether Twitter creates or dismantles homogenization, and therefore polarization\u003c/h3\u003e\n\u003cp\u003e“Reflecting Twitter’s ambiguous profile, the existing literature yields conflicting findings regarding its tendency toward homophily and polarization (e.g., Kwak et al., 2010 detected little homophily; Weng et al., 2010; Hong \u0026amp; Kim, 2016; Colleoni et al., 2014 found the opposite).” [Yarchi et al., 2021, p. 105]\u003c/p\u003e\n\u003ch3 id=\"cite-defines-social-media\"\u003eCITE: defines social media\u003c/h3\u003e\n\u003cp\u003e“social media platform in a narrow sense (following Ellison and Boyd (2013) definition)” [Yarchi et al., 2021, p. 105]\u003c/p\u003e\n\u003ch3 id=\"users-tend-to-express-more-extreme-views-if-surrounded-by-likeminded-users\"\u003eUsers tend to express more extreme views if surrounded by likeminded users\u003c/h3\u003e\n\u003cp\u003e“Considering the effect of homophilic interactions on expressed positions, our data confirm users’ tendency to express more extreme views if interactions with likeminded users take in a larger share of their social media communications (H3).” [Yarchi et al., 2021, p. 111]\u003c/p\u003e\n\u003ch3 id=\"cannot-study-only-one-social-media-as-they-have-different-properties\"\u003ecannot study only one social media as they have different properties\u003c/h3\u003e\n\u003cp\u003e“Beyond questioning the widespread reliance on Twitter (and limited public Facebook) data to draw conclusions about social media as a whole, our study also highlights the perils of inferring dynamic properties from static data.” [Yarchi et al., 2021, p. 114]\u003c/p\u003e\n\u003ch3 id=\"tendency-to-associate-with-like-minded-people-increases-echo-chambers\"\u003etendency to associate with like-minded people increases echo chambers\u003c/h3\u003e\n\u003cp\u003e“That social psychology has long shown this tendency to associate with like-minded others is common cross-culturally. However, there is new fear that the current media system is helping people enter echo chambers more easily than ever before.” [Dubois and Blank, 2018, p. 731]\u003c/p\u003e\n\u003ch3 id=\"twitter-is-an-isolated-slice-of-the-population\"\u003eTwitter is an isolated slice of the population\u003c/h3\u003e\n\u003cp\u003e“Twitter itself is used by a relatively small proportion of the population, about one-quarter of the UK, which is younger, wealthier, and better-educated than Britain as a whole” [Dubois and Blank, 2018, p. 732]\u003c/p\u003e\n\u003ch3 id=\"in-the-uk-going-to-a-news-source-like-bbc-is-still-more-common\"\u003ein the UK, going to a news source like BBC is still more common\u003c/h3\u003e\n\u003cp\u003e“going directly to a news source such as the BBC remains more common in the UK (Newman et al., 2017).” [Dubois and Blank, 2018, p. 733]\u003c/p\u003e\n\u003ch3 id=\"studies-don-t-study-the-aggregate-effect-of-diverse-media\"\u003estudies don\u0026rsquo;t study the aggregate effect of diverse media\u003c/h3\u003e\n\u003cp\u003e“A core problem with this line of research is that most studies select only one or a few media to focus on and so the comparative utility or effects of use of media in a diverse media environment are unclear.” [Dubois and Blank, 2018, p. 733]\u003c/p\u003e\n\u003ch3 id=\"people-with-strong-partisanship-report-consuming-a-diverse-media-digest\"\u003epeople with strong partisanship report consuming a diverse media digest\u003c/h3\u003e\n\u003cp\u003e“First, even individuals who have strong partisan affiliation report using both general news sites (which are largely non-partisan and include a variety of issues) and niche news sites (which may be partisan or focused on specific issues) – Republicans and Democrats have media diets which are quite similar” [Dubois and Blank, 2018, p. 734]\u003c/p\u003e\n\u003ch3 id=\"consumption-of-mixed-media-results-in-incidental-exposure-to-a-variety-of-news-sources\"\u003econsumption of mixed media results in incidental exposure to a variety of news sources\u003c/h3\u003e\n\u003cp\u003e“While one might receive primarily left-leaning political content on Twitter, they may be incidentally exposed to a right-leaning perspective from a family member on Facebook or they might hear a debate between representatives from various perspectives on a television news broadcast.” [Dubois and Blank, 2018, p. 734]\u003c/p\u003e\n\u003ch3 id=\"those-who-are-politically-aware-are-going-to-encounter-more-perspectives\"\u003ethose who are politically aware are going to encounter more perspectives\u003c/h3\u003e\n\u003cp\u003e“As Prior argues, political ‘junkies’ are likely to consume a lot of information and therefore may encounter more perspectives and arguments” [Dubois and Blank, 2018, p. 734]\u003c/p\u003e\n\u003ch3 id=\"increased-involvement-in-politics-results-actually-in-less-echo-chamber\"\u003eincreased involvement in politics results actually in \u003cstrong\u003eless\u003c/strong\u003e echo chamber\u003c/h3\u003e\n\u003cp\u003e“H2: The higher a person’s level of political interest the less likely they are to be in an echo chamber” [Dubois and Blank, 2018, p. 735]\u003c/p\u003e\n\u003ch3 id=\"people-who-are-actually-disinterested-in-politics-are-in-an-echo-chamber\"\u003ePeople who are actually \u003cstrong\u003edisinterested\u003c/strong\u003e in politics are in an echo chamber\u003c/h3\u003e\n\u003cp\u003e“First, that respondents with no political interest are in an echo chamber. We examine this possibility using the regressions in Table 3. The results in this table are based only on the respondents who said they had ‘No interest at all’ in politics, N = 243.” [Dubois and Blank, 2018, p. 739]\u003c/p\u003e\n\u003ch3 id=\"high-choice-in-media-doesn-t-mean-a-high-degree-of-ability-to-reconsiliate\"\u003eHigh choice in media doesn\u0026rsquo;t mean a high degree of ability to reconsiliate\u003c/h3\u003e\n\u003cp\u003e“A high-choice media environment does not simply mean that individuals develop strategies to deal with the many media options available, though of course they do so as they develop their news and political information repertoires” [Dubois and Blank, 2018, p. 740]\u003c/p\u003e\n\u003ch3 id=\"diversity-in-media-and-engagement-in-politics-matters\"\u003ediversity in media AND engagement in politics matters\u003c/h3\u003e\n\u003cp\u003e“Our results suggest that people who are both not politically interested and who do not use diverse media are more likely to be in an echo chamber. They are less likely check multiple sources or to discover things that change their minds.” [Dubois and Blank, 2018, p. 741]\u003c/p\u003e\n\u003ch3 id=\"polarized-language-results-in-greater-engagement-but-only-to-trolls-and-politically-engaged-users\"\u003epolarized language results in greater engagement: but only to trolls and politically engaged users\u003c/h3\u003e\n\u003cp\u003e“We also find that polarized language is associated with greater engagement, but this association only holds for politically engaged users (both trolls and regular users). This research clarifies how trolls leverage polarized language and provides an open-source, simple tool for exploration of polarized communications on social media.” [Simchon et al., 2022, p. 1]\u003c/p\u003e\n\u003ch3 id=\"russian-trolls-used-more-polarized-language\"\u003eRussian trolls used more polarized language\u003c/h3\u003e\n\u003cp\u003e“Again, we find that politically oriented Russian trolls use significantly more polarized language than their politically matched American sample (Russian trolls: M = 5.16, SD = 8.00, and N = 55,726; American controls: M = 2.91, SD = 6.84, and N = 55,726), t(108,836) = 50.61, P \u0026lt; 0.001, and Cohen’s d = 0.30 (for a robustness check, see Supplementary Materials).” [Simchon et al., 2022, p. 4]\u003c/p\u003e\n\u003ch3 id=\"foreign-agents-increase-in-their-polarization-and-posting-frequency\"\u003eforeign agents increase in their polarization and posting frequency\u003c/h3\u003e\n\u003cp\u003e“foreign agents from various countries strategically used polarized language in social media communications, and in a majority of cases we see an increase over time in these attempts.” [Simchon et al., 2022, p. 6]\u003c/p\u003e\n\u003ch3 id=\"distinction-between-polarization-on-issues-vs-dot-polarization-of-anger\"\u003edistinction between polarization on issues vs. polarization of anger\u003c/h3\u003e\n\u003cp\u003e“Scholars have made the conceptual distinction between issue polarization—an ideological, policy-based political divide, and affective polarization, i.e. dislike, distrust, and general animosity of political partisans toward the other political side” [Simchon et al., 2022, p. 6]\u003c/p\u003e\n\u003ch3 id=\"small-amount-of-trolls-can-polarize-lots-of-people\"\u003esmall amount of trolls can polarize lots of people\u003c/h3\u003e\n\u003cp\u003e“Questions remain as to the extent of influence of trolls’ social media presence on real people. However, it is important to note that even a small number of agents with aggressive attitudes can have a substantial influence on the majority view, a process called “information gerrymandering”” [Simchon et al., 2022, p. 9]\u003c/p\u003e\n\u003ch3 id=\"interaction-with-trolls-didn-t-seem-to-change-partisanship\"\u003einteraction with trolls didn\u0026rsquo;t seem to change partisanship\u003c/h3\u003e\n\u003cp\u003e“The authors found that only a small fraction of users interacted with Russian trolls, and they did not observe any change in partisan attitude during that time among these users (68).” [Simchon et al., 2022, p. 9]\u003c/p\u003e\n\u003ch2 id=\"subclaim-organization\"\u003esubclaim organization\u003c/h2\u003e\n\u003ch3 id=\"polarization-comes-from-congenial-echo-chambers-driven-by-generic-language-which-social-media-is-prone-to-create-due-to-their-curation\"\u003ePolarization comes from congenial echo-chambers driven by generic language, which social media is prone to create due to their curation\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#isolation-results-in-more-extreme-contributions\"\u003eInteraction with in-group only results in more extreme contributions\u003c/a\u003e. This is what we typically call an \u0026ldquo;echo chamber\u0026rdquo; (\u003ca href=\"#users-tend-to-express-more-extreme-views-if-surrounded-by-likeminded-users\"\u003e1\u003c/a\u003e, \u003ca href=\"#tendency-to-associate-with-like-minded-people-increases-echo-chambers\"\u003e2\u003c/a\u003e). One such chamber enviroment is \u003ca href=\"#social-media-users-agree-more-over-time\"\u003esocial media\u003c/a\u003e, a particularly salient case of this is \u003ca href=\"#facebook-creates-supportive-echo-chambers\"\u003eFacebook, which creates supportive echo chambers\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eNovoa proposes one analysis through linguistics by which such an echo chamber can get created\u0026mdash;\u003ca href=\"#generics-are-much-more-readily-remembered\"\u003egenerics: easy to remember generalisations\u003c/a\u003e. \u003ca href=\"#polarized-language-is-present-far-more-in-generic-statements\"\u003epolarized language is present far more in generic statements\u003c/a\u003e, and \u003ca href=\"#generics-about-particular-group-is-a-way-that-polarized-languages-manifest\"\u003egenerics about particular group is a way that polarized languages manifest\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eGenerics only \u003ca href=\"#generics-are-strongly-rejected-or-accepted-based-on-parity-affiliation-echo-chamber\"\u003efunction when deployed within a homegenous environment.\u003c/a\u003e Yet, others have noted that \u003ca href=\"#facebook-s-chief-innovation-is-to-leverage-the-in-group-homogeneity-to-create-supportive-opinions\"\u003eFacebook\u0026rsquo;s chief innovation is to leverage the in-group homogeneity to create supportive opinions\u003c/a\u003e\u0026mdash;displaying \u0026ldquo;congenial\u0026rdquo; content that are likely to be homegenous.\u003c/p\u003e\n\u003ch3 id=\"the-congenial-environment-itself-however-is-not-enough-to-create-or-disrupt-polarization-breaking-echo-chambers-requires-both-a-diversity-of-opinions-as-well-as-actual-engagement\"\u003eThe congenial environment itself, however, is not enough to create or disrupt polarization; breaking echo chambers requires both a diversity of opinions as well as actual engagement\u003c/h3\u003e\n\u003cp\u003eUnlike Facebook\u0026rsquo;s congeniality, \u003ca href=\"#twitter-creates-asymmetric-non-friend-dynamics\"\u003eTwitter creates asymmetric, non-friend dynamics\u003c/a\u003e. Though it shows \u003ca href=\"#debate-exists-between-whether-twitter-creates-or-dismantles-homogenization-and-therefore-polarization\"\u003ethat it helps dismantle some echo chambers, its not conclusive\u003c/a\u003e. Yarachi notes this as \u003ca href=\"#there-s-a-difference-between-filter-bubbles-and-increased-polarization\"\u003ethe difference between filter bubbles and increased polarization\u003c/a\u003e. A \u0026ldquo;filter bubble\u0026rdquo; itself isn\u0026rsquo;t polarization, so what is?\u003c/p\u003e\n\u003cp\u003eDubois solves this mystery by arguing that it is \u003ca href=\"#people-who-are-actually-disinterested-in-politics-are-in-an-echo-chamber\"\u003epeople who are actually \u003cstrong\u003edisinterested\u003c/strong\u003e in politics are in an echo chamber\u003c/a\u003e. Through \u003ca href=\"#consumption-of-mixed-media-results-in-incidental-exposure-to-a-variety-of-news-sources\"\u003econsumption of mixed media results in incidental exposure to a variety of news sources\u003c/a\u003e one has to \u003ca href=\"#increased-involvement-in-politics-results-actually-in-less-echo-chamber\"\u003eparticipate\u003c/a\u003e in the conversation to get out the echo chamber.\u003c/p\u003e\n\u003cp\u003eMeaning, \u003ca href=\"#high-choice-in-media-doesn-t-mean-a-high-degree-of-ability-to-reconsiliate\"\u003ehigh choice in media itself (i.e. having facebook AND twitter) doesn\u0026rsquo;t mean a high degree of ability to reconsiliate\u003c/a\u003e. It is \u003ca href=\"#diversity-in-media-and-engagement-in-politics-matters\"\u003ediversity in media AND engagement in politics matters\u003c/a\u003e.\u003c/p\u003e\n\u003ch4 id=\"other-notes\"\u003eother notes\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"#those-who-are-politically-aware-are-going-to-encounter-more-perspectives\"\u003ethose who are politically aware are going to encounter more perspectives\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#people-with-strong-partisanship-report-consuming-a-diverse-media-digest\"\u003epeople with strong partisanship report consuming a diverse media digest\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"by-using-polarized-language-to-target-only-politically-active-users-trolls-essentially-disrupt-the-ability-to-dismantle-echo-chambers\"\u003eBy using polarized language to target only politically active users, trolls essentially disrupt the ability to dismantle echo chambers\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#interaction-with-trolls-didn-t-seem-to-change-partisanship\"\u003eInteraction with trolls didn\u0026rsquo;t seem to change partisanship\u003c/a\u003e, yet previous work establishes that a \u003ca href=\"#small-amount-of-trolls-can-polarize-lots-of-people\"\u003esmall amount of trolls can polarize lots of people\u003c/a\u003e\u0026mdash;so the manner by which trolls work is confusing.\u003c/p\u003e\n\u003cp\u003eSimchon notes that \u003ca href=\"#russian-trolls-used-more-polarized-language\"\u003eRussian trolls used more polarized language\u003c/a\u003e. Our previous analysis concludes that political activism is an important and inseperable part of breaking an echo chamber; trolls, then take advantage of this fact to disrupt the process of breaking away from polarization by \u003ca href=\"#polarized-language-results-in-greater-engagement-but-only-to-trolls-and-politically-engaged-users\"\u003ecapturing \u003cstrong\u003ealready politically active\u003c/strong\u003e users, which trolls take part.\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"bin\"\u003eBIN\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"#polarization-distorts-beliefs-about-others\"\u003epolarization distorts beliefs about others\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#one-type-of-polarization-is-where-people-disengage-with-those-with-opposite-views\"\u003eone type of polarization is where people disengage with those with opposite views\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#in-the-uk-going-to-a-news-source-like-bbc-is-still-more-common\"\u003ein the UK, going to a news source like BBC is still more common\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#cite-defines-social-media\"\u003eCITE: defines social media\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#already-known-groups-typically-bring-strong-agreements-and-strangers-typically-bring-disperate-views\"\u003ealready known groups typically bring strong agreements, and strangers typically bring disperate views\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#cannot-study-only-one-social-media-as-they-have-different-properties\"\u003ecannot study only one social media as they have different properties\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#twitter-is-an-isolated-slice-of-the-population\"\u003eTwitter is an isolated slice of the population\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#studies-don-t-study-the-aggregate-effect-of-diverse-media\"\u003estudies don\u0026rsquo;t study the aggregate effect of diverse media\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#political-science-generally-believes-that-political-polarization-and-citizen-polarization-is-different\"\u003epolitical science generally believes that political polarization and citizen polarization is different\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#distinction-between-polarization-on-issues-vs-dot-polarization-of-anger\"\u003edistinction between polarization on issues vs. polarization of anger\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#people-perceive-higher-polarization-than-actually-are-present-due-to-use-of-generics\"\u003elanguage induces perception about polarization\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#foreign-agents-increase-in-their-polarization-and-posting-frequency\"\u003eforeign agents increase in their polarization and posting frequency\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#partisanship-switches-to-minority-party-when-the-house-switches-control\"\u003epartisanship is constantly switching\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#political-polarization-and-its-prevalence-has-been-discussed-since-founding-of-the-country\"\u003epolarization is a long-standing topic\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpwr1_texts_in_conversation/","tags":null,"title":"PWR1 Texts in Conversation Planning"},{"categories":null,"contents":"One alpha vector per action:\n\\begin{equation} \\alpha^{(k+1)}_{a}(s) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{}T(s\u0026rsquo;|s,a) \\max_{a\u0026rsquo;} \\alpha^{(k)}_{a\u0026rsquo;} (s\u0026rsquo;) \\end{equation}\nThis is going to give you a set of alpha vectors, one corresponding to each action.\ntime complexity: \\(O(|S|^{2}|A|^{2})\\)\nyou will note we don\u0026rsquo;t ever actually use anything partially-observable in this. Once we get the alpha vector, we need to use one-step lookahead in POMDP (which does use transitions) to actually turn this alpha vector into a policy, which then does create you\nWe can deal with continuous state space by using some estimation of the value function (instead of alpha-vectors, we will just use a value-function estimate like q learning)\n","html":"\u003cp\u003eOne \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e per action:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha^{(k+1)}_{a}(s) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{}T(s\u0026rsquo;|s,a) \\max_{a\u0026rsquo;} \\alpha^{(k)}_{a\u0026rsquo;} (s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is going to give you a set of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es, one corresponding to each action.\u003c/p\u003e\n\u003cp\u003etime complexity: \\(O(|S|^{2}|A|^{2})\\)\u003c/p\u003e\n\u003cp\u003eyou will note we don\u0026rsquo;t ever actually use anything partially-observable in this. Once we get the \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e, we need to use \u003ca href=\"/posts/kbhalpha_vector/#one-step-lookahead-in-pomdp\"\u003eone-step lookahead in POMDP\u003c/a\u003e (which does use transitions) to actually turn this \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e into a policy, which then does create\nyou\u003c/p\u003e\n\u003cp\u003eWe can deal with continuous state space by using some estimation of the \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e (instead of alpha-vectors, we will just use a value-function estimate like q learning)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhqmdp/","tags":null,"title":"QMDP"},{"categories":null,"contents":"system does not work as well for one type/group of people compared to another\ntraining data really does matter: it may make generalized predictions based on a majority/minority class.\nBecause IID characteristic of input data, the majority will be over represented\n","html":"\u003cp\u003esystem \u003cstrong\u003edoes not work as well\u003c/strong\u003e for one type/group of people compared to another\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-12-01_15-56-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003etraining data really does matter: it may make generalized predictions based on a majority/minority class.\u003c/p\u003e\n\u003cp\u003eBecause \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e characteristic of input data, the majority will be over represented\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhquality_of_service_harm/","tags":null,"title":"quality of service harm"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcorrelation/","tags":null,"title":"quantum correlation"},{"categories":null,"contents":" Viewing computational linguistics from the length across linear algebra and linear structure Quantum algorithms and the necessary infra were being developed; and in the 2010s programmable quantum computers became showing up Quantum is done over the complexes, which makes the normal linguistics done with the reals more powerful.\nwant to infer the probability distribution of words based on their letters\nLinearity breaks down: letter combinations in not commutative; and P(letter C) + P(letter A) != P(letters CA) instead of encoding letters as one-hot vectors; we encode these letters with matrices: adds more dimensions\nimmediate benefits: noncommutivity of matricies is a PLUS words is just the composed results into another 2x2 matricies then, to map into probability distrubtion, we map the matrix into a partial trace things create bounds from the problem: letters\nimprove upon optimization scheme in a quantum rhelm\nimplement this scheme on a quantum computer: https://arxiv.org/pdf/1710.10248.pdf\ntask: NTJ reading; come up with the needed novelty\n","html":"\u003cul\u003e\n\u003cli\u003eViewing computational linguistics from the length across linear algebra and linear structure\u003c/li\u003e\n\u003cli\u003eQuantum algorithms and the necessary infra were being developed; and in the 2010s programmable quantum computers became showing up\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eQuantum is done over the complexes, which makes the normal linguistics done with the reals more powerful.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ewant to infer the probability distribution of words based on their letters\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eLinearity breaks down: letter combinations in not commutative; and P(letter C) + P(letter A) != P(letters CA)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003einstead of encoding letters as one-hot vectors; we encode these letters with matrices: adds more dimensions\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eimmediate benefits:\n\u003cul\u003e\n\u003cli\u003enoncommutivity of matricies is a PLUS\u003c/li\u003e\n\u003cli\u003ewords is just the composed results into another 2x2 matricies\u003c/li\u003e\n\u003cli\u003e\n\u003ch2 id=\"then-to-map-into-probability-distrubtion-we-map-the-matrix-into-a-partial-trace\"\u003ethen, to map into probability distrubtion, we map the matrix into a partial trace\u003c/h2\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"things\"\u003ethings\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ecreate bounds from the problem: letters\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eimprove upon optimization scheme in a quantum rhelm\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eimplement this scheme on a quantum computer: \u003ca href=\"https://arxiv.org/pdf/1710.10248.pdf\"\u003ehttps://arxiv.org/pdf/1710.10248.pdf\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003etask: NTJ reading; come up with the needed novelty\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhquantum_group_project/","tags":null,"title":"Quantum Group Project"},{"categories":null,"contents":"The information theory computational model behind quantum theory. It proposes quantum computers, proposed during the 80s. Theoretically, quantum computers have quantum supremacy, which is exciting. It is a theory that works with counterfactual information.\nquantum computer A quantum computer is a computer that uses quantum effects to perform Turing-like computations\nquantum supremacy That a quantum computer outperforms all classical computers\nuniversal computer \u0026ldquo;a programmable system whose repertoire includes all physically possible computations\u0026rdquo; \u0026mdash; Turing.\nYou will realize that modern computers are not actually capable of all computations\u0026mdash;apparently, they can\u0026rsquo;t make itself.\nTherefore, to actually achieve this, we have to make a more general type of computer: a constructor \u0026mdash; a universal quantum constructor.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhinformation_theory/\"\u003einformation theory\u003c/a\u003e computational model behind \u003ca href=\"/posts/kbhquantum_theory/\"\u003equantum theory\u003c/a\u003e. It proposes \u003ca href=\"#quantum-computer\"\u003equantum computer\u003c/a\u003es, proposed during the 80s. Theoretically, \u003ca href=\"#quantum-computer\"\u003equantum computer\u003c/a\u003es have \u003ca href=\"#quantum-supremacy\"\u003equantum supremacy\u003c/a\u003e, which is exciting. It is a theory that works with \u003ca href=\"/posts/kbhcounterfactual/\"\u003ecounterfactual\u003c/a\u003e information.\u003c/p\u003e\n\u003ch2 id=\"quantum-computer\"\u003equantum computer\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#quantum-computer\"\u003equantum computer\u003c/a\u003e is a computer that uses \u003ca href=\"/posts/kbhquantum_theory/\"\u003equantum effects\u003c/a\u003e to perform Turing-like computations\u003c/p\u003e\n\u003ch2 id=\"quantum-supremacy\"\u003equantum supremacy\u003c/h2\u003e\n\u003cp\u003eThat a \u003ca href=\"#quantum-computer\"\u003equantum computer\u003c/a\u003e outperforms all classical computers\u003c/p\u003e\n\u003ch2 id=\"universal-computer\"\u003euniversal computer\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;a programmable system whose repertoire includes all physically possible computations\u0026rdquo; \u0026mdash; Turing.\u003c/p\u003e\n\u003cp\u003eYou will realize that modern computers are not actually capable of all computations\u0026mdash;apparently, they can\u0026rsquo;t make itself.\u003c/p\u003e\n\u003cp\u003eTherefore, to actually achieve this, we have to make a more general type of computer: a \u003ca href=\"/posts/kbhconstructor_theory/\"\u003econstructor\u003c/a\u003e \u0026mdash; a \u003ca href=\"/posts/kbhuniversal_quantum_constructor/\"\u003euniversal quantum constructor.\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhquantum_information_theory/","tags":null,"title":"quantum information theory"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhquantum_supremecy/","tags":null,"title":"quantum supremecy"},{"categories":null,"contents":"quantum theory allows us to understand physics; it reconciliations the classical world with the quantum world.\nClassical particles, in the double slit experiment, would just straight go through and bounce off Actual particles (quantum) like light, under quantum theory, would actually exhibit interference via wave-like hebahior The measurement of quantum theory is done via quantum information theory.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhquantum_theory/\"\u003equantum theory\u003c/a\u003e allows us to understand \u003ca href=\"/posts/kbhphysics/\"\u003ephysics\u003c/a\u003e; it reconciliations the classical world with the quantum world.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eClassical particles, in the \u003ca href=\"/posts/kbhdouble_slit_experiment/\"\u003edouble slit experiment\u003c/a\u003e, would just straight go through and bounce off\u003c/li\u003e\n\u003cli\u003eActual particles (quantum) like light, under \u003ca href=\"/posts/kbhquantum_theory/\"\u003equantum theory\u003c/a\u003e, would actually exhibit interference via wave-like hebahior\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThe measurement of \u003ca href=\"/posts/kbhquantum_theory/\"\u003equantum theory\u003c/a\u003e is done via \u003ca href=\"/posts/kbhquantum_information_theory/\"\u003equantum information theory\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhquantum_theory/","tags":null,"title":"quantum theory"},{"categories":null,"contents":"A little endeavor to learn about Lambek Calculus, quantum information theory, and linguistics I guess.\nCourses to Take for QNLP\nCategorical Grammars Index\n","html":"\u003cp\u003eA little endeavor to learn about \u003ca href=\"/posts/kbhlambek_calculus/\"\u003eLambek Calculus\u003c/a\u003e, \u003ca href=\"/posts/kbhquantum_information_theory/\"\u003equantum information theory\u003c/a\u003e, and linguistics I guess.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcourses_to_take_for_qnlp/\"\u003eCourses to Take for QNLP\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcategorical_grammars_index/\"\u003eCategorical Grammars Index\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhquantumnlp/","tags":null,"title":"QuantumNLP Index"},{"categories":null,"contents":"A qubit is a two-layer quantum theory system.\nA classical bit is something that can be set between two values, a qubit between a much higher dimension.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhqubits/\"\u003equbit\u003c/a\u003e is a two-layer \u003ca href=\"/posts/kbhquantum_theory/\"\u003equantum theory\u003c/a\u003e system.\u003c/p\u003e\n\u003cp\u003eA classical bit is something that can be set between two values, a \u003ca href=\"/posts/kbhqubits/\"\u003equbit\u003c/a\u003e between a much higher dimension.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhqubits/","tags":null,"title":"qubit"},{"categories":null,"contents":"a quotient group is a group which is the product of mapping things out.\nsubgroups The set of integers \\(\\mathbb{Z}\\) is obviously a group. You can show it to yourself that multiples of any number in the group is a subgroup of that group.\nFor instance:\n\\(3 \\mathbb{Z}\\), the set \\(\\{\\dots -6, -3, 0, 3, 6, \\dots\\}\\) is a subgroup\nactual quotient groups We can use the subgroup above to mask out a group. The resulting product is NOT a subgroup, but its a new group with individual elements being subsets of our original group.\nFor instance, the \\(\\mod 3\\) quotient group is written as:\n\\begin{equation} \\mathbb{Z}} / 3 \\mathbb{Z} \\end{equation}\nEach element in this new group is a set; for instance, in \\(\\mathbb{Z} / 3\\mathbb{Z}\\), \\(0\\) is actually the set \\(\\{\\dots -6, -3, 0, 3, 6, \\dots\\}\\) (i.e. the subgroup that we were masking by). Other elements in the quotient space (\u0026ldquo;1\u0026rdquo;, a.k.a. \\(\\{ \\dots, -2, 1, 4, 7 \\dots \\}\\), or \u0026ldquo;2\u0026rdquo;, a.k.a. \\(\\{\\dots, -1, 2, 5, 8 \\dots \\}\\)) are called \u0026ldquo;cosets\u0026rdquo; of \\(3 \\mathbb{Z}\\). You will notice they are not a subgroups.\n","html":"\u003cp\u003ea \u003ca href=\"/posts/kbhquotient_group/\"\u003equotient group\u003c/a\u003e is a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e which is the product of mapping things out.\u003c/p\u003e\n\u003ch2 id=\"subgroup--kbhsubgroup-dot-md--s\"\u003e\u003ca href=\"/posts/kbhsubgroup/\"\u003esubgroup\u003c/a\u003es\u003c/h2\u003e\n\u003cp\u003eThe set of integers \\(\\mathbb{Z}\\) is obviously a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e. You can show it to yourself that multiples of any number in the \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e is a \u003ca href=\"/posts/kbhsubgroup/\"\u003esubgroup\u003c/a\u003e of that group.\u003c/p\u003e\n\u003cp\u003eFor instance:\u003c/p\u003e\n\u003cp\u003e\\(3 \\mathbb{Z}\\), the set \\(\\{\\dots -6, -3, 0, 3, 6, \\dots\\}\\) is a \u003ca href=\"/posts/kbhsubgroup/\"\u003esubgroup\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"actual-quotient-group--kbhquotient-group-dot-md--s\"\u003eactual \u003ca href=\"/posts/kbhquotient_group/\"\u003equotient group\u003c/a\u003es\u003c/h2\u003e\n\u003cp\u003eWe can use the \u003ca href=\"/posts/kbhsubgroup/\"\u003esubgroup\u003c/a\u003e above to mask out a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e. The resulting product is \u003cstrong\u003eNOT\u003c/strong\u003e a \u003ca href=\"/posts/kbhsubgroup/\"\u003esubgroup\u003c/a\u003e, but its a new \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e with individual elements being subsets of our original group.\u003c/p\u003e\n\u003cp\u003eFor instance, the \\(\\mod 3\\) quotient group is written as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{Z}} / 3 \\mathbb{Z}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eEach element in this new group is a set; for instance, in \\(\\mathbb{Z} / 3\\mathbb{Z}\\), \\(0\\) is actually the set \\(\\{\\dots -6, -3, 0, 3, 6, \\dots\\}\\) (i.e. the \u003ca href=\"/posts/kbhsubgroup/\"\u003esubgroup\u003c/a\u003e that we were masking by). Other elements in the quotient space (\u0026ldquo;1\u0026rdquo;, a.k.a. \\(\\{ \\dots, -2, 1, 4, 7 \\dots \\}\\), or \u0026ldquo;2\u0026rdquo;, a.k.a. \\(\\{\\dots, -1, 2, 5, 8 \\dots \\}\\)) are called \u0026ldquo;cosets\u0026rdquo; of \\(3 \\mathbb{Z}\\). You will notice they are not a \u003ca href=\"/posts/kbhsubgroup/\"\u003esubgroup\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhquotient_group/","tags":null,"title":"quotient group"},{"categories":null,"contents":"The quotient map \\(\\pi\\) is the Linear Map \\(V \\to V / U\\) such that:\n\\begin{equation} \\pi(v) = v+U \\end{equation}\nfor \\(v \\in V\\).\nI.e.: the quotient map is affine subsetification map given a vector.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhquotient_map/\"\u003equotient map\u003c/a\u003e \\(\\pi\\) is the \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e \\(V \\to V / U\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pi(v) = v+U\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor \\(v \\in V\\).\u003c/p\u003e\n\u003cp\u003eI.e.: the \u003ca href=\"/posts/kbhquotient_map/\"\u003equotient map\u003c/a\u003e is \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003eification map given a vector.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhquotient_map/","tags":null,"title":"quotient map"},{"categories":null,"contents":"Suppose \\(T \\in \\mathcal{L}(V)\\), and \\(U \\subset V\\), an invariant subspace under \\(T\\). Then:\n\\begin{equation} (T / U)(v+U) = Tv+U, \\forall v \\in V \\end{equation}\nwhere \\(T / U \\in \\mathcal{L}(V / U)\\)\n\u0026ldquo;if you can operator on \\(V\\), you can operator on \\(V / U\\) in the same way.\u0026rdquo; Yes I just verbed operator.\nquotient operator is well-defined Why is this not possible for any subspace of \\(V\\)? This is because we need \\(T\\) to preserve the exact structure of the subspace we are quotienting out by; otherwise our affine subset maybe squished to various unexpected places. The technical way to show that this is well-defined leverages the property of two affine subsets being equal:\nSuppose \\(v +U = w+U\\), we desire that \\(T / U (v+U) = T / U (w+U)\\). That is, we desire that \\(Tv +U = Tw +U\\).\nIf \\(v+U = w+U\\) , then, \\(v-w \\in U\\). Now, this means that \\(T(v-w) \\in U\\) only because \\(U\\) is invariant under \\(T\\) (otherwise it could be sent to anywhere in \\(V\\) as \\(T \\in \\mathcal{L}(V)\\) not \\(\\mathcal{L}(U)\\)). Therefore, \\(Tv-Tw \\in U\\), and so \\(Tv +U = Tw+U\\), as desired. \\(\\blacksquare\\)\n","html":"\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V)\\), and \\(U \\subset V\\), an \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e under \\(T\\). Then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(T / U)(v+U) = Tv+U, \\forall v \\in V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(T / U \\in \\mathcal{L}(V / U)\\)\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;if you can \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e on \\(V\\), you can \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e on \\(V / U\\) in the same way.\u0026rdquo; Yes I just verbed \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"quotient-operator--kbhquotient-operator-dot-md--is-well-defined\"\u003e\u003ca href=\"/posts/kbhquotient_operator/\"\u003equotient operator\u003c/a\u003e is well-defined\u003c/h2\u003e\n\u003cp\u003eWhy is this not possible for any subspace of \\(V\\)? This is because we need \\(T\\) to preserve the exact structure of the \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e we are quotienting out by; otherwise our \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e maybe squished to various unexpected places. The technical way to show that this is well-defined leverages the property of two \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003es being equal:\u003c/p\u003e\n\u003cp\u003eSuppose \\(v +U = w+U\\), we desire that \\(T / U (v+U) = T / U (w+U)\\). That is, we desire that \\(Tv +U = Tw +U\\).\u003c/p\u003e\n\u003cp\u003eIf \\(v+U = w+U\\) , then, \\(v-w \\in U\\). Now, this means that \\(T(v-w) \\in U\\) \u003cstrong\u003eonly because \\(U\\) is \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e under \\(T\\)\u003c/strong\u003e (otherwise it could be sent to anywhere in \\(V\\) as \\(T \\in \\mathcal{L}(V)\\) not \\(\\mathcal{L}(U)\\)). Therefore, \\(Tv-Tw \\in U\\), and so \\(Tv +U = Tw+U\\), as desired. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhquotient_operator/","tags":null,"title":"quotient operator"},{"categories":null,"contents":"A quotient space is the set of all affine subsets of \\(V\\) parallel to some subspace \\(U\\). This should be reminiscent of quotient groups.\nconstituents vector space \\(V\\) a subspace \\(U \\subset V\\) requirements \\begin{equation} V / U = \\{v+U : v \\in V \\} \\end{equation}\nadditional information operations on quotient space Addition and scalar multiplication on the quotient space is defined in the expected way:\ngiven \\((v+U), (w+U) \\in V / U\\), and \\(\\lambda \\in \\mathbb{F}\\):\n\\begin{equation} \\begin{cases} (v+U) + (w+U) = ((v+w)+U) \\\\ \\lambda (v+U) = ((\\lambda v)+U) \\end{cases} \\end{equation}\nquotient space operations behave uniformly on equivalent affine subsets The tricky thing about quotient space operations is that there are multiple ways of representing a single affine subset parallel to \\(U\\); the one-liner about this is that if you think about shifting a parallel line with a vector: shifting the line along any perpendicular vector to the line with the same magnitude will get you the same shifted line.\nFor the operations above to work, we have to make sure that they behave in the same way on distinct representations of the same affine subset, which we endeavor to proof here:\nSuppose we have \\(v,w \\in V\\), \\(v\u0026rsquo;,w\u0026rsquo; \\in V\\), and that \\(v+U = v\u0026rsquo;+U\\); \\(w+U = w\u0026rsquo;+U\\). We desire that the operations above behave the same way to any addition groupings: that WLOG \\((v+U)+(w+U) = (v\u0026rsquo;+U)+(w\u0026rsquo;+U)\\) \u0026mdash; that is, we have to show that \\((v+w)+U = (v\u0026rsquo;+w\u0026rsquo;)+U\\).\nBy the fact that two affine subsets parallel to \\(U\\) are either equal or disjoint, we have that \\(v-v\u0026rsquo;, w-w\u0026rsquo; \\in U\\). And so, \\((v-v\u0026rsquo;)+(w-w\u0026rsquo;) \\in U\\). Commuting these things under \\(V\\), we now have that \\((v+w)-(v\u0026rsquo;+w\u0026rsquo;) \\in U\\). Therefore, invoking the same result again, \\((v+w)+U = (v\u0026rsquo;+w\u0026rsquo;)+U\\), as desired.\nThe same logic can be used for scalar multiplication. Suppose we have \\(v, v\u0026rsquo; \\in V\\), \\(\\lambda \\in \\mathbb{F}\\), and that \\(v+U = v\u0026rsquo;+U\\). We desire that WLOG \\(\\lambda (v+U) = \\lambda (v\u0026rsquo;+U)\\) \u0026mdash; that is, we have to show that \\((\\lambda v)+U = (\\lambda v\u0026rsquo;)+U\\).\nAgain invoking the two affine subsets parallel to \\(U\\) are either equal or disjoint result, we have that \\(v-v\u0026rsquo; \\in U\\). Now, this means that \\(\\lambda (v-v\u0026rsquo;) = \\lambda v-\\lambda v\u0026rsquo; \\in U\\) because closure of scalar multiplication in \\(U\\). Invoking the result again, we now have that \\(\\lambda v + U = \\lambda v\u0026rsquo; +U\\), as desired.\nHaving shown both operations make sense, we can declare that they make sense indeed. \\(\\blacksquare\\)\nquotient space is a vector space Given the name! (jk)\nBleh I give up just prove it yourself given the above operations and the fact that the additive identity is \\(0+U = U\\), the additive inverse is \\(-v+U\\).\n\u0026ldquo;instead of the elements single vectors, we fuse the whole affine subset together. instead of counting the contents, we count the bucket.\u0026rdquo;\ndimension of a quotient space is the difference between dimensions of its constituents that is,\n\\begin{equation} \\dim V / U = \\dim V - \\dim U \\end{equation}\nfor finite dimensional \\(V\\).\nProof:\nLet \\(\\pi: V \\to V /U\\). By definition, \\(null\\ \\pi =U\\); and, given the input is any \\(v \\in V\\), \\(range\\ \\pi = V / U\\). rank-nullity theorem then tells us that:\n\\begin{equation} \\dim V = \\dim U + \\dim V / U \\end{equation}\nnow subtract and get \\(\\dim V /U\\) by itself. \\(\\blacksquare\\)\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e is the set of all \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003es of \\(V\\) \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eparallel\u003c/a\u003e to some \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e \\(U\\). This should be reminiscent of \u003ca href=\"/posts/kbhquotient_group/\"\u003equotient group\u003c/a\u003es.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e \\(V\\)\u003c/li\u003e\n\u003cli\u003ea \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e \\(U \\subset V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nV / U = \\{v+U : v \\in V \\}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"operations-on-quotient-space--kbhquotient-space-dot-md\"\u003eoperations on \u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhadding/\"\u003eAddition\u003c/a\u003e and \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e on the \u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e is defined in the expected way:\u003c/p\u003e\n\u003cp\u003egiven \\((v+U), (w+U) \\in V / U\\), and \\(\\lambda \\in \\mathbb{F}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n(v+U) + (w+U) = ((v+w)+U) \\\\\n\\lambda (v+U) = ((\\lambda v)+U)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"quotient-space--kbhquotient-space-dot-md--operations-behave-uniformly-on-equivalent-affine-subset--kbhparallel-linear-algebra-dot-md--s\"\u003e\u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e operations behave uniformly on equivalent \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003es\u003c/h4\u003e\n\u003cp\u003eThe tricky thing about \u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003es is that there are multiple ways of representing a single \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e parallel to \\(U\\); the one-liner about this is that if you think about shifting a parallel line with a vector: shifting the line along \u003cstrong\u003eany\u003c/strong\u003e perpendicular vector to the line with the same magnitude will get you the same shifted line.\u003c/p\u003e\n\u003cp\u003eFor the \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003es above to work, we have to make sure that they behave in the same way on distinct representations of the same \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e, which we endeavor to proof here:\u003c/p\u003e\n\u003cp\u003eSuppose we have \\(v,w \\in V\\), \\(v\u0026rsquo;,w\u0026rsquo; \\in V\\), and that \\(v+U = v\u0026rsquo;+U\\); \\(w+U = w\u0026rsquo;+U\\). We desire that the operations above behave the same way to any addition groupings: that WLOG \\((v+U)+(w+U) = (v\u0026rsquo;+U)+(w\u0026rsquo;+U)\\) \u0026mdash; that is, we have to show that \\((v+w)+U = (v\u0026rsquo;+w\u0026rsquo;)+U\\).\u003c/p\u003e\n\u003cp\u003eBy the fact that \u003ca href=\"/posts/kbhparallel_linear_algebra/#two-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-affine-subset-s-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-parallel-to-u-are-either-equal-or-disjoint\"\u003etwo affine subsets parallel to \\(U\\) are either equal or disjoint\u003c/a\u003e, we have that \\(v-v\u0026rsquo;, w-w\u0026rsquo; \\in U\\). And so, \\((v-v\u0026rsquo;)+(w-w\u0026rsquo;) \\in U\\). Commuting these things under \\(V\\), we now have that \\((v+w)-(v\u0026rsquo;+w\u0026rsquo;) \\in U\\). Therefore, invoking the same result again, \\((v+w)+U = (v\u0026rsquo;+w\u0026rsquo;)+U\\), as desired.\u003c/p\u003e\n\u003cp\u003eThe same logic can be used for scalar multiplication. Suppose we have \\(v, v\u0026rsquo; \\in V\\), \\(\\lambda \\in \\mathbb{F}\\), and that \\(v+U = v\u0026rsquo;+U\\). We desire that WLOG \\(\\lambda (v+U) = \\lambda (v\u0026rsquo;+U)\\) \u0026mdash; that is, we have to show that \\((\\lambda v)+U = (\\lambda v\u0026rsquo;)+U\\).\u003c/p\u003e\n\u003cp\u003eAgain invoking the \u003ca href=\"/posts/kbhparallel_linear_algebra/#two-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-affine-subset-s-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-parallel-to-u-are-either-equal-or-disjoint\"\u003etwo affine subsets parallel to \\(U\\) are either equal or disjoint\u003c/a\u003e result, we have that \\(v-v\u0026rsquo; \\in U\\). Now, this means that \\(\\lambda (v-v\u0026rsquo;) = \\lambda v-\\lambda v\u0026rsquo; \\in U\\) because \u003ca href=\"/posts/kbhclosed/\"\u003eclosure\u003c/a\u003e of \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e in \\(U\\). Invoking the result again, we now have that \\(\\lambda v + U = \\lambda v\u0026rsquo; +U\\), as desired.\u003c/p\u003e\n\u003cp\u003eHaving shown both operations make sense, we can declare that they make sense indeed. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"quotient-space--kbhquotient-space-dot-md--is-a-vector-space--kbhvector-space-dot-md\"\u003e\u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eGiven the name! (jk)\u003c/p\u003e\n\u003cp\u003eBleh I give up just prove it yourself given the above operations and the fact that the \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e is \\(0+U = U\\), the \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e is \\(-v+U\\).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;instead of the elements single vectors, we fuse the whole \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e together. instead of counting the contents, we count the bucket.\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"dimension-of-a-quotient-space-is-the-difference-between-dimensions-of-its-constituents\"\u003edimension of a quotient space is the difference between dimensions of its constituents\u003c/h3\u003e\n\u003cp\u003ethat is,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim V / U = \\dim V - \\dim U\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor finite dimensional \\(V\\).\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eLet \\(\\pi: V \\to V /U\\). By definition, \\(null\\ \\pi =U\\); and, given the input is any \\(v \\in V\\), \\(range\\ \\pi = V / U\\). \u003ca href=\"/posts/kbhfundamental_theorem_of_linear_maps/\"\u003erank-nullity theorem\u003c/a\u003e then tells us that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim V = \\dim U + \\dim V / U\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enow subtract and get \\(\\dim V /U\\) by itself. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhquotient_space/","tags":null,"title":"quotient space"},{"categories":null,"contents":"Most high-school science programs have a strong focus on scientific theory and do not train students to conduct independent research. Previous work has demonstrated the efficacy of a mentor-supported, student-driven teaching program to effectively introduce research-specific skills in a classroom context. Despite the effectiveness of such programs, their class-based formats and requirements for multiple full-time faculty mentors limit their throughput, and the finite expertise of full-time mentors requires participants to focus on specific research subjects.\nTo address these limitations, we introduce R@N, an extracurricular, student-led, and student-driven program for the independent acquisition of research-specific skills through the self-guided completion of a series of formative checkpoints (“nodes”) for mastery. Students in the program can choose specific subsets of nodes to be trained in research in subjects of their interest. The program is developed and moderated by a small team of students in consultation with skill-specific faculty mentors through regular meetings. Students meet weekly to create, update, and revise nodes in collaboration with mentors in order to enable and supplement the learning of students participating in the program.\nThe program offers a few key results: it electively allows the student body (approximately 400 in our institution) to asynchronously acquire the skills of independent research and enables a group of around 20 students to develop and codify tools and skills for research pedagogy. The program can be sustained with limited faculty involvement, requiring one dedicated faculty mentor working in conjunction with a larger pool of research mentors who commit around 2 hours per month.\n","html":"\u003cp\u003eMost high-school science programs have a strong focus on scientific theory and do not train students to conduct independent research. Previous work has demonstrated the efficacy of a mentor-supported, student-driven teaching program to effectively introduce research-specific skills in a classroom context. Despite the effectiveness of such programs, their class-based formats and requirements for multiple full-time faculty mentors limit their throughput, and the finite expertise of full-time mentors requires participants to focus on specific research subjects.\u003c/p\u003e\n\u003cp\u003eTo address these limitations, we introduce R@N, an extracurricular, student-led, and student-driven program for the independent acquisition of research-specific skills through the self-guided completion of a series of formative checkpoints (“nodes”) for mastery. Students in the program can choose specific subsets of nodes to be trained in research in subjects of their interest. The program is developed and moderated by a small team of students in consultation with skill-specific faculty mentors through regular meetings. Students meet weekly to create, update, and revise nodes in collaboration with mentors in order to enable and supplement the learning of students participating in the program.\u003c/p\u003e\n\u003cp\u003eThe program offers a few key results: it electively allows the student body (approximately 400 in our institution) to asynchronously acquire the skills of independent research and enables a group of around 20 students to develop and codify tools and skills for research pedagogy. The program can be sustained with limited faculty involvement, requiring one dedicated faculty mentor working in conjunction with a larger pool of research mentors who commit around 2 hours per month.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhr_n_abstract/","tags":null,"title":"R@N Abstract"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhr_n_meeting_with_angi/","tags":null,"title":"R@N Meeting with Angi"},{"categories":null,"contents":"Let\u0026rsquo;s compute what \\(e^{tA}\\) should look like, where \\(t\\) is some scalar and \\(A\\) is a diagonalizable matrix. This is a supplement to Second-Order Linear Differential Equations.\nLet \\(v_1\\dots v_{m}\\) be the eigenvectors of \\(A\\). Let \\(\\lambda_{1}\\dots\\lambda_{m}\\) be the eigenvalues.\nRecall that we can therefore diagonalize \\(A\\) as:\n\\begin{equation} A = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\end{equation}\nread: change of choordinates into the eigenbases, scale by the eigenvalues, then change back to normal choordinates.\nNow, imagine if we are multiplying \\(A\\) by itself manymany times; what will that look like?\n\\begin{equation} A^{n} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1}\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\dots \\end{equation}\nThe middle parts, nicely, cancels out! Its a matrix applied to its inverse! So, we get rid of it\n\\begin{equation} A^{n} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\dots \\end{equation}\nNow, we are multiplying diagonal matricies against itself! If you work out the mechanics of matrix multiplication, you will note that each element simply gets scaled to higher powers (the matricies are diagonal!)! So then, we have:\n\\begin{equation} A^{n} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{{\\lambda_{1}}^{n}, \\dots, {\\lambda_{m}}^{n}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\end{equation}\nNice.\nRecall also the Tayler expasion of \\(e^{x}\\); we will apply it to to \\(e^{tA}\\):\n\\begin{equation} e^{tA} = \\sum_{k=0}^{\\infty} \\frac{1}{k!}(tA)^{k} = \\sum_{k=0}^{\\infty} \\frac{t^{k}}{k!}A^{k} \\end{equation}\nOk. We now apply our definition of \\(A^{n}\\) derived above:\n\\begin{equation} e^{tA} = \\sum_{k=0}^{\\infty} \\frac{t^{k}}{k!}\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{{\\lambda_{1}}^{k}, \\dots, {\\lambda_{m}}^{k}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\end{equation}\nSee now that \\(\\mqty(v_1 \u0026amp; \\dots \u0026amp;v_{m})\\) and its inverse is both constant in the sum, so we take it out:\n\\begin{equation} e^{tA} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\qty(\\sum_{k=0}^{\\infty}\\frac{t^{k}}{k!} \\mqty(\\dmat{{\\lambda_{1}}^{k}, \\dots, {\\lambda_{m}}^{k}}))\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\end{equation}\nAnd now, the actual mechanics of adding a matrix is just adding it elementwise, so we will put the summations into the matrix:\n\\begin{equation} e^{tA} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\sum_{k=0}^{\\infty}\\frac{t^{k}}{k!} {\\lambda_{1}}^{k}, \\dots, \\sum_{k=0}^{\\infty}\\frac{t^{k}}{k!} {\\lambda_{m}}^{k}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\end{equation}\nNote now that each value in that matrix is just the Tayler expansion of \\(e^{k_{\\lambda_{j}}}\\) (take a moment to pause if this is not immediately obvious; think about what each element in that diagonal matrix look like and what the Tayler polynomial \\(e^{x}\\) should look like. Perhaps what some arbitrary \\(e^{ab}\\) should looks like.\n\\begin{equation} e^{tA} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\end{equation}\n","html":"\u003cp\u003eLet\u0026rsquo;s compute what \\(e^{tA}\\) should look like, where \\(t\\) is some scalar and \\(A\\) is a diagonalizable matrix. This is a supplement to \u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/\"\u003eSecond-Order Linear Differential Equations\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eLet \\(v_1\\dots v_{m}\\) be the eigenvectors of \\(A\\). Let \\(\\lambda_{1}\\dots\\lambda_{m}\\) be the \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eRecall that we can therefore diagonalize \\(A\\) as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eread: change of choordinates into the eigenbases, scale by the eigenvalues, then change back to normal choordinates.\u003c/p\u003e\n\u003cp\u003eNow, imagine if we are multiplying \\(A\\) by itself manymany times; what will that look like?\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA^{n} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1}\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe middle parts, nicely, cancels out! Its a matrix applied to its inverse! So, we get rid of it\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA^{n} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, we are \u003ca href=\"/posts/kbhmultiplying/\"\u003emultiplying\u003c/a\u003e diagonal \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e against itself! If you work out the mechanics of \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e \u003ca href=\"/posts/kbhmultiplying/\"\u003emultiplication\u003c/a\u003e, you will note that each element simply gets scaled to higher powers (the \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e are diagonal!)! So then, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA^{n} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{{\\lambda_{1}}^{n}, \\dots, {\\lambda_{m}}^{n}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNice.\u003c/p\u003e\n\u003cp\u003eRecall also the Tayler expasion of \\(e^{x}\\); we will apply it to to \\(e^{tA}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{tA} = \\sum_{k=0}^{\\infty} \\frac{1}{k!}(tA)^{k} = \\sum_{k=0}^{\\infty} \\frac{t^{k}}{k!}A^{k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOk. We now apply our definition of \\(A^{n}\\) derived above:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{tA} = \\sum_{k=0}^{\\infty} \\frac{t^{k}}{k!}\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{{\\lambda_{1}}^{k}, \\dots, {\\lambda_{m}}^{k}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSee now that \\(\\mqty(v_1 \u0026amp; \\dots \u0026amp;v_{m})\\) and its inverse is both constant in the sum, so we take it out:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{tA} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\qty(\\sum_{k=0}^{\\infty}\\frac{t^{k}}{k!} \\mqty(\\dmat{{\\lambda_{1}}^{k}, \\dots, {\\lambda_{m}}^{k}}))\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd now, the actual mechanics of adding a \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e is just adding it elementwise, so we will put the summations into the matrix:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{tA} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\sum_{k=0}^{\\infty}\\frac{t^{k}}{k!} {\\lambda_{1}}^{k}, \\dots, \\sum_{k=0}^{\\infty}\\frac{t^{k}}{k!} {\\lambda_{m}}^{k}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNote now that each value in that matrix is just the Tayler expansion of \\(e^{k_{\\lambda_{j}}}\\) (take a moment to pause if this is not immediately obvious; think about what each element in that diagonal matrix look like and what the Tayler polynomial \\(e^{x}\\) should look like. Perhaps what some arbitrary \\(e^{ab}\\) should looks like.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{tA} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhraising_e_to_a_matrix/","tags":null,"title":"raising e to a matrix"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhrandom/","tags":null,"title":"random"},{"categories":null,"contents":"A random variable is a variable that has a value, but there are uncertainty with respect to what that value is.\ndiscrete: finite number of values continuous: infinitely many possible values probability mass function A discrete random variable is encoded as a probability mass function\nprobability density function A continuous random variable is represented as a probability density function.\nsummary statistics probability mass function is a description for the random variable: and random variables are usually communicated via probability mass functions expected value adding random variables \u0026ldquo;what\u0026rsquo;s the probability of \\(X + Y = n\\) with IID \\(X\\) and \\(Y\\)?\u0026rdquo; \u0026ldquo;what\u0026rsquo;s the probability of two independent samples from the same exact distribution adding up to \\(n\\)?\u0026rdquo;\n\\begin{equation} \\sum_{i=-\\infty}^{\\infty} P(X=i, Y=n-i) \\end{equation}\nor integrals and PDFs, as appropriate for continuous cases\nfor every single outcome, we want to create every possible operation which causes the two variables to sum to \\(n\\).\nWe can use convolution to figure out every combination of assignments to random variables which add to a value, and sum their probabilities together.\nadding binomial distribution adding Gaussian distributions adding poisson distribution If you add a bunch of IID things together\u0026hellip;. central limit theorem\naveraging random variables adding random variables + linear transformers on Gaussian\nYou end up with:\n\\begin{equation} \\mathcal{N}\\qty(\\mu, \\frac{1}{n} \\sigma^{2}) \\end{equation}\nyou note: as you sum together many things that is IID, the average is pretty the same; but the variance gets smaller as you add more.\nmaxing random variables Gumbel distribution: fisher tripplett gedembo theorem???\nsampling statistics We assume that there\u0026rsquo;s some underlying distribution with some true mean \\(\\mu\\) and true variance \\(\\sigma^{2}\\). We would like to model it with some confidence.\nConsider a series of measured samples \\(x_1, \u0026hellip;, x_{n}\\), each being an instantiation of a IID random variable drawn from the underlying distribution each being \\(X_1, \u0026hellip;, X_{n}\\).\nsample mean Let us estimate the true population mean\u0026hellip; by creating a random variable representing the the averaging \\(n\\) measured random variables representing the observations:\n\\begin{equation} \\bar{X} = \\frac{1}{N} \\sum_{i=1}^{n} X_{i} \\end{equation}\nwe can do this because we really would like to know \\(\\mathbb{E}[\\bar{X}] = \\mathbb{E}[\\frac{1}{N} \\sum_{i=1}^{n} X_i] = \\frac{1}{N}\\sum_{i=1}^{n} \\mathbb{E}[X_{i}] = \\frac{1}{N} N \\mu = \\mu\\) and so as long as each of the underlying variables have the same expected mean (they do because IID) drawn, we can use the sample mean to estimate the population mean.\nsample variance We can\u0026rsquo;t just calculate the sample variance with the variance of the sample. This is because the sample mean will be by definition by closer to each of the sampled points than the actual value. So we correct for it. This is a random variable too:\n\\begin{equation} S^{2} = \\frac{1}{n-1} \\sum_{i=1}^{N} (X_{i} - \\bar{X})^{2} \\end{equation}\nstandard error of the mean \\begin{equation} Var(\\bar{X}) = \\frac{S^{2}}{n} \\end{equation}\nthis is the ERROR OF the mean given what you measured because of the central limit theorem\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e is a variable that has a value, but there are \u003ca href=\"/posts/kbhuncertainty/\"\u003euncertainty\u003c/a\u003e with respect to what that value is.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003ediscrete\u003c/strong\u003e: finite number of values\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003econtinuous\u003c/strong\u003e: infinitely many possible values\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"probability-mass-function--kbhprobability-mass-function-dot-md\"\u003e\u003ca href=\"/posts/kbhprobability_mass_function/\"\u003eprobability mass function\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eA discrete random variable is encoded as a \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003eprobability mass function\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"probability-density-function--kbhprobability-distributions-dot-md\"\u003e\u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003eprobability density function\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eA continuous \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e is represented as a \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003eprobability density function\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"summary-statistics\"\u003esummary statistics\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_mass_function/\"\u003eprobability mass function\u003c/a\u003e is a description for the random variable: and \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es are usually communicated via \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003eprobability mass function\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexpectation/\"\u003eexpected value\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"adding-random-variables\"\u003eadding random variables\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;what\u0026rsquo;s the probability of \\(X + Y = n\\) with \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e \\(X\\) and \\(Y\\)?\u0026rdquo;\n\u0026ldquo;what\u0026rsquo;s the probability of two independent samples from the same exact distribution adding up to \\(n\\)?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{i=-\\infty}^{\\infty} P(X=i, Y=n-i)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eor integrals and \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003es, as appropriate for \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e cases\u003c/p\u003e\n\u003cp\u003efor every single outcome, we want to create every possible operation which causes the two variables to sum to \\(n\\).\u003c/p\u003e\n\u003cp\u003eWe can use \u003ca href=\"#adding-random-variables\"\u003econvolution\u003c/a\u003e to figure out every combination of assignments to random variables which add to a value, and sum their probabilities together.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinomial_distribution/#adding-id-6ef4a641-135c-45f5-9c71-efd1fe34166c-binomial-distribution\"\u003eadding binomial distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgaussian_distribution/#adding-id-8194b001-e4a1-43c9-9409-cd07bf1f00d4-gaussian-distribution-s\"\u003eadding Gaussian distributions\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_of_k_in_x_time/#adding-id-58a7600a-5169-4473-8ddc-f286534fc1f4-poisson-distribution\"\u003eadding poisson distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf you add a bunch of \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e things together\u0026hellip;. \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"averaging-random-variables\"\u003eaveraging random variables\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#adding-random-variables\"\u003eadding random variables\u003c/a\u003e + \u003ca href=\"/posts/kbhgaussian_distribution/#linear-transformations-on-gaussian\"\u003elinear transformers on Gaussian\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eYou end up with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{N}\\qty(\\mu, \\frac{1}{n} \\sigma^{2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou note: as you sum together many things that is \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e, the average is pretty the same; but the \u003ca href=\"/posts/kbhvariance/\"\u003evariance\u003c/a\u003e gets smaller as you add more.\u003c/p\u003e\n\u003ch2 id=\"maxing-random-variables\"\u003emaxing random variables\u003c/h2\u003e\n\u003cp\u003eGumbel distribution: fisher tripplett gedembo theorem???\u003c/p\u003e\n\u003ch2 id=\"sampling-statistics\"\u003esampling statistics\u003c/h2\u003e\n\u003cp\u003eWe assume that there\u0026rsquo;s some underlying distribution with some true mean \\(\\mu\\) and true variance \\(\\sigma^{2}\\). We would like to model it with some confidence.\u003c/p\u003e\n\u003cp\u003eConsider a series of measured samples \\(x_1, \u0026hellip;, x_{n}\\), each being an instantiation of a \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e drawn from the underlying distribution each being \\(X_1, \u0026hellip;, X_{n}\\).\u003c/p\u003e\n\u003ch3 id=\"sample-mean\"\u003esample mean\u003c/h3\u003e\n\u003cp\u003eLet us estimate the true population mean\u0026hellip; by creating a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e representing the the averaging \\(n\\) measured \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es representing the observations:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bar{X} = \\frac{1}{N} \\sum_{i=1}^{n} X_{i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can do this because we really would like to know \\(\\mathbb{E}[\\bar{X}] = \\mathbb{E}[\\frac{1}{N} \\sum_{i=1}^{n} X_i] = \\frac{1}{N}\\sum_{i=1}^{n} \\mathbb{E}[X_{i}] = \\frac{1}{N} N \\mu = \\mu\\) and so as long as each of the underlying variables have the same expected mean (they do because \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e) drawn, we can use the \u003ca href=\"#sample-mean\"\u003esample mean\u003c/a\u003e to estimate the population mean.\u003c/p\u003e\n\u003ch3 id=\"sample-variance\"\u003esample variance\u003c/h3\u003e\n\u003cp\u003eWe can\u0026rsquo;t just calculate the \u003ca href=\"#sample-variance\"\u003esample variance\u003c/a\u003e with the variance of the sample. This is because the \u003ca href=\"#sample-mean\"\u003esample mean\u003c/a\u003e will be by definition by closer to each of the sampled points than the actual value. So we correct for it. This is a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e too:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS^{2} = \\frac{1}{n-1} \\sum_{i=1}^{N} (X_{i} - \\bar{X})^{2}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"standard-error-of-the-mean\"\u003estandard error of the mean\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nVar(\\bar{X}) = \\frac{S^{2}}{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is the \u003cstrong\u003eERROR OF the mean\u003c/strong\u003e given what you measured because of the central limit theorem\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrandom_variables/","tags":null,"title":"random variable"},{"categories":null,"contents":"The Random Walk Hypothesis is a financial econometric hypothesis that stocks have the same distribution and independent of each other: that stocks are a random variable and not predictable in a macro space.\nTo set up the random walk hypothesis, let\u0026rsquo;s begin with some time \\(t\\), an asset return \\(r_t\\), some time elapsed \\(k\\), and some future asset return \\(r_{t+k}\\).\nWe will create two random variables \\(f(r_t)\\) and \\(g(r_{t+k})\\), which \\(f\\) and \\(g\\) are arbitrary functions we applied to analyze the return at that time.\nThe Random Walk Hypothesis tells us that, at any two unrelated given time, you cannot use the behavior of \\(r_t\\) to predict anything about \\(r_{t+k}\\), under any kind of analysis \\(f\\) or \\(g\\), that:\n\\begin{equation} Cov[f(r_t), g(r_{t+k})] = 0 \\end{equation}\nSo, all of the Random Walk Hypothesis models would leverage the above result, that the two time info don\u0026rsquo;t evolve together and they are independently, randomly distributed: they are random variables.\nFor the market to be a typical Random Walk, the central limit theorem has to hold on the value of return. This usually possible, but if the variance of the return is not finite, the return will not hold the central limit theorem which means that the return will not be normal. Of course the return does not have to hold central limit theorem, then we use other convergence distributions but still model it in the Random Walk Hypothesis as a random variable.\nreturn (FinMetrics) Importantly: its not the price that follows the random walk; it is the RETURN that follows the walk; if it was the price, then its possible for price to become negative. Return, technically, is defined by:\n\\begin{equation} R_t = \\frac{p_t-p_{t-1}}{p_{t-1}} \\end{equation}\nHowever, we really are interested in the natural log of the prices:\n\\begin{equation} r_t = log(p_t) - log(p_{t-1}) \\approx R_t \\end{equation}\nWe can do this is because, for small \\(x\\), \\(log\\ x \\approx x-1\\).\nWe do this is because, if we were wanting to add the returns over the last \\(n\\) days, in \\(R_t\\) you\u0026rsquo;d have to multiply them:\n\\begin{equation} \\frac{p_{t+1}}{p_t} \\cdot \\frac{p_t}{p_{t-1}} = \\frac{p_{t+1}}{p_{t-1}} \\end{equation}\nThis is bad, because of the central limit theorem. To make a random variable built of normalizing \\(n\\) items, you have to add and not multiply them together over a time range. We want to be able to add.\nTherefore, \\(r_t\\) can achieve the same division by adding (see the log laws).\nBut either way, with enough, we know that \\(r_t\\) is independently, identity distributed.\ntime series analysis Over some days \\(k\\), we have:\n\\begin{equation} Y_{k} = \\sum_{i=1}^{k} x_{i} \\end{equation}\nGiven that \\(x_{i}\\) is distributed randomly: \\(\\{x_{i}\\}_{i=1}^{N}\\). This becomes the foundation of time series analysis. The problem of course becomes harder when the values drift against each other, is nonindependent, etc. We can use the Martingale Model to take generic random walk to a more dependent model.\nCJ test If you have some amount of volacitity measurement, we first know that, by the Random Walk Hypothesis, we have:\n\\begin{equation} X_{k} \\sim N(0,\\sigma^{2}) \\end{equation}\nGiven some future return, you hope that:\n\\begin{equation} Y_{k}=\\sum_{i=1}^{k}X_{k}\\sim N(0,\\sigma^{2}) \\end{equation}\nIf so, if you have like \\(20\\%\\) of log returns, to have a statistically significant return, we have that:\n\\begin{equation} \\sigma =\\frac{0.2}{\\sqrt{12}} \\end{equation}\ngetting a statistically significant difference from it is hard.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhrandom_walk/\"\u003eRandom Walk Hypothesis\u003c/a\u003e is a \u003ca href=\"/posts/kbhfinancial_markets_intro/\"\u003efinancial econometric\u003c/a\u003e hypothesis that stocks have the same distribution and independent of each other: that stocks are a random variable and not predictable in a macro space.\u003c/p\u003e\n\u003cp\u003eTo set up the random walk hypothesis, let\u0026rsquo;s begin with some time \\(t\\), an asset return \\(r_t\\), some time elapsed \\(k\\), and some future asset return \\(r_{t+k}\\).\u003c/p\u003e\n\u003cp\u003eWe will create two random variables \\(f(r_t)\\) and \\(g(r_{t+k})\\), which \\(f\\) and \\(g\\) are arbitrary functions we applied to analyze the return at that time.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhrandom_walk/\"\u003eRandom Walk Hypothesis\u003c/a\u003e tells us that, at any two unrelated given time, you cannot use the behavior of \\(r_t\\) to predict anything about \\(r_{t+k}\\), under any kind of analysis \\(f\\) or \\(g\\), that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nCov[f(r_t), g(r_{t+k})] = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, all of the \u003ca href=\"/posts/kbhrandom_walk/\"\u003eRandom Walk Hypothesis\u003c/a\u003e models would leverage the above result, that the two time info don\u0026rsquo;t evolve together and they are independently, \u003ca href=\"/posts/kbhrandom/\"\u003erandom\u003c/a\u003ely distributed: they are \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eFor the market to be a typical \u003ca href=\"/posts/kbhrandom_walk/\"\u003eRandom Walk\u003c/a\u003e, the \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e has to hold on the value of \u003ca href=\"#return--finmetrics\"\u003ereturn\u003c/a\u003e. This usually possible, but if the variance of the return is not finite, the \u003ca href=\"#return--finmetrics\"\u003ereturn\u003c/a\u003e will not hold the \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e which means that the \u003ca href=\"#return--finmetrics\"\u003ereturn\u003c/a\u003e will not be \u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormal\u003c/a\u003e. Of course the \u003ca href=\"#return--finmetrics\"\u003ereturn\u003c/a\u003e does not have to hold \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e, then we use other convergence distributions but still model it in the \u003ca href=\"/posts/kbhrandom_walk/\"\u003eRandom Walk Hypothesis\u003c/a\u003e as a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"return--finmetrics\"\u003ereturn (FinMetrics)\u003c/h2\u003e\n\u003cp\u003eImportantly: its not the \u003cem\u003eprice\u003c/em\u003e that follows the random walk; it is the \u003cem\u003eRETURN\u003c/em\u003e that follows the walk; if it was the price, then its possible for price to become negative. Return, technically, is defined by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR_t = \\frac{p_t-p_{t-1}}{p_{t-1}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eHowever, we really are interested in the natural log of the prices:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr_t = log(p_t) - log(p_{t-1}) \\approx R_t\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can do this is because, for small \\(x\\), \\(log\\ x \\approx x-1\\).\u003c/p\u003e\n\u003cp\u003eWe do this is because, if we were wanting to add the returns over the last \\(n\\) days, in \\(R_t\\) you\u0026rsquo;d have to multiply them:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{p_{t+1}}{p_t} \\cdot \\frac{p_t}{p_{t-1}} = \\frac{p_{t+1}}{p_{t-1}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is bad, because of the \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e. To make a random variable built of normalizing \\(n\\) items, you have to \u003cem\u003eadd\u003c/em\u003e and not \u003cem\u003emultiply\u003c/em\u003e them together over a time range. We want to be able to add.\u003c/p\u003e\n\u003cp\u003eTherefore, \\(r_t\\) can achieve the same division by adding (see the \u003ca href=\"/posts/kbhlog_laws/\"\u003elog laws\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eBut either way, with enough, we know that \\(r_t\\) is independently, identity distributed.\u003c/p\u003e\n\u003ch2 id=\"time-series-analysis\"\u003etime series analysis\u003c/h2\u003e\n\u003cp\u003eOver some days \\(k\\), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nY_{k} = \\sum_{i=1}^{k} x_{i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven that \\(x_{i}\\) is distributed randomly: \\(\\{x_{i}\\}_{i=1}^{N}\\). This becomes the foundation of \u003ca href=\"#time-series-analysis\"\u003etime series analysis\u003c/a\u003e. The problem of course becomes harder when the values drift against each other, is nonindependent, etc. We can use the \u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale Model\u003c/a\u003e to take generic \u003ca href=\"/posts/kbhrandom_walk/\"\u003erandom walk\u003c/a\u003e to a more dependent model.\u003c/p\u003e\n\u003ch2 id=\"cj-test\"\u003eCJ test\u003c/h2\u003e\n\u003cp\u003eIf you have some amount of volacitity measurement, we first know that, by the \u003ca href=\"/posts/kbhrandom_walk/\"\u003eRandom Walk Hypothesis\u003c/a\u003e, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX_{k} \\sim N(0,\\sigma^{2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven some future return, you hope that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nY_{k}=\\sum_{i=1}^{k}X_{k}\\sim N(0,\\sigma^{2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf so, if you have like \\(20\\%\\) of log returns, to have a statistically significant return, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sigma =\\frac{0.2}{\\sqrt{12}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egetting a statistically significant difference from it is \u003cem\u003ehard.\u003c/em\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrandom_walk/","tags":null,"title":"Random Walk Hypothesis"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhrandom_wol/","tags":null,"title":"random wol"},{"categories":null,"contents":"randomized algorithm is a type of algorithm, similar to relaxation.\nMake a hard problem easier by changing the problem What if, instead of guaranteeing we find the best/correct answer, we only provide some chance of finding the best/correct answer? primality testing primality testing is very important for modern crypto systems; we need to be able to find large prime numbers, and be able to generate them quickly.\ntraditional primality testing We can divide every prime number below \\(\\sqrt x\\). In theory, this is pretty fast, but we need to know all the primes we need to test.\nThis would therefore take \\(O(\\sqrt{x})\\) time.\nmiller-rabin primality testing miller-rabin primality testing is a primality testing randomized algorithm.\nConstruct a set of equations, each one requiring an exponentiation and a division If any of them is false, the number is composite If they are all true, the probability that the number is composite is reduced to \\(\\frac{1}{4}\\). If we run miller-rabin 10 times \\(O(10)=O(1)\\), the number is \\(1-\\left(\\frac{1}{4}\\right)^{10}\\) chance of being prime.\nThis is of course much much faster than traditional primality testing.\nModern cryptographic system uses this.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrandomized_algorithum/\"\u003erandomized algorithm\u003c/a\u003e is a type of algorithm, similar to \u003ca href=\"/posts/kbhrelaxation_algorithums/\"\u003erelaxation\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eMake a hard problem easier by changing the problem\u003c/li\u003e\n\u003cli\u003eWhat if, instead of guaranteeing we find the best/correct answer, we only provide some chance of finding the best/correct answer?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"primality-testing\"\u003eprimality testing\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#primality-testing\"\u003eprimality testing\u003c/a\u003e is very important for modern crypto systems; we need to be able to find large prime numbers, and be able to generate them quickly.\u003c/p\u003e\n\u003ch3 id=\"traditional-primality-testing\"\u003etraditional primality testing\u003c/h3\u003e\n\u003cp\u003eWe can divide every prime number below \\(\\sqrt x\\). In theory, this is pretty fast, but we need to know all the primes we need to test.\u003c/p\u003e\n\u003cp\u003eThis would therefore take \\(O(\\sqrt{x})\\) time.\u003c/p\u003e\n\u003ch3 id=\"miller-rabin-primality-testing\"\u003emiller-rabin primality testing\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#miller-rabin-primality-testing\"\u003emiller-rabin primality testing\u003c/a\u003e is a \u003ca href=\"#primality-testing\"\u003eprimality testing\u003c/a\u003e \u003ca href=\"/posts/kbhrandomized_algorithum/\"\u003erandomized algorithm\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eConstruct a set of equations, each one requiring an exponentiation and a division\u003c/li\u003e\n\u003cli\u003eIf any of them is false, the number is composite\u003c/li\u003e\n\u003cli\u003eIf they are all true, the probability that the number is composite is reduced to \\(\\frac{1}{4}\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf we run miller-rabin 10 times \\(O(10)=O(1)\\), the number is \\(1-\\left(\\frac{1}{4}\\right)^{10}\\) chance of being prime.\u003c/p\u003e\n\u003cp\u003eThis is of course much much faster than \u003ca href=\"#traditional-primality-testing\"\u003etraditional primality testing\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eModern cryptographic system uses this.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrandomized_algorithum/","tags":null,"title":"randomized algorithm"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhrandomized_pbvi/","tags":null,"title":"Randomized PBVI"},{"categories":null,"contents":"The number of alpha vectors needed to perform PBVI is one for each of your belief sample. Which is a bad idea. Perseus is essentially PBVI, where this idea is explored slightly.\nThe preamble is the same as PBVI:\nwe keep track of a bunch of alpha vectors and belief samples (which we get from point selection):\n\\begin{equation} \\Gamma = \\{\\alpha_{1}, \\dots, \\alpha_{m}\\} \\end{equation}\nand\n\\begin{equation} B = \\{b_1, \\dots, b_{m}\\} \\end{equation}\nTo preserve the lower-boundedness of these alpha vectors, one should seed the alpha vectors via something like blind lower bound\nWe can estimate our utility function at any belief by looking in the set for the most optimal:\n\\begin{equation} U^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top}b \\end{equation}\nWe now define a function named backup (see PBVI Backup), and call it on ONLY ONE belief:\nlet us sample\u0026mdash;\n\\begin{equation} b \\in B \\end{equation}\nand call backup to get:\n\\begin{equation} \\alpha\u0026rsquo; = backup(\\Gamma, b) \\end{equation}\nwhere,\n\\begin{equation} backup(\\Gamma, b) \\rightarrow \\alpha_{t+1} \\end{equation}\nNow, if \\(b \\cdot a\u0026rsquo; \u0026gt; \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top}b\\) (i.e. we just increased our value floor because our new alpha vector indicates a higher value at \\(b\\)), we add our new vector to the set \\(\\Gamma\\). Otherwise, we add \\(a\u0026rsquo; = \\arg\\max_{\\alpha \\in \\Gamma} b \\cdot \\alpha\\), the alpha vector which previously got the highest value for \\(b\\).\nAfter this, we pull a Perseus-core funni:\nPerseus Belief Pruning let us define:\n\\begin{equation} V_{t}(b) = \\max_{\\alpha \\in \\Gamma_{t}} \\alpha \\cdot b \\end{equation}\nand\n\\begin{equation} V_{t+1}(b) = \\max_{\\alpha \\in \\Gamma_{t+1}} \\alpha \\cdot b \\end{equation}\nnamely, the expected value of \\(b\\) before and after belief updates. Then, we:\n\\begin{equation} B_{t+1} = \\{b \\in B, \\text{if}\\ V_{t+1}(b) \u0026lt; V(b)\\} \\end{equation}\nthat is, if updating our sampled belief\u0026rsquo;s alpha vector improved the value of another belief in the set by accident already, we don\u0026rsquo;t need to update that belief again.\nRepeat this process until we are out of beliefs to update, that is, when \\(B = \\emptyset\\).\nSlight Variation? then,\n","html":"\u003cp\u003eThe number of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es needed to perform \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e is one for each of your belief sample. Which is a bad idea. \u003ca href=\"/posts/kbhperseus/\"\u003ePerseus\u003c/a\u003e is essentially \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e, where this idea is explored slightly.\u003c/p\u003e\n\u003cp\u003eThe preamble is the same as \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003ewe keep track of a bunch of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es and \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e samples (which we get from \u003ca href=\"/posts/kbhpoint_selection/\"\u003epoint selection\u003c/a\u003e):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Gamma = \\{\\alpha_{1}, \\dots, \\alpha_{m}\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB = \\{b_1, \\dots, b_{m}\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo preserve the lower-boundedness of these \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es, one should seed the \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es via something like \u003ca href=\"/posts/kbhblind_lower_bound/\"\u003eblind lower bound\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eWe can estimate our \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e function at any belief by looking in the set for the most optimal:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top}b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe now define a function named \u003ccode\u003ebackup\u003c/code\u003e (see \u003ca href=\"/posts/kbhpoint_based_value_iteration/#pbvi-backup\"\u003ePBVI Backup\u003c/a\u003e), and call it on ONLY ONE belief:\u003c/p\u003e\n\u003cp\u003elet us sample\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb \\in B\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand call \u003ccode\u003ebackup\u003c/code\u003e to get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha\u0026rsquo; = backup(\\Gamma, b)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nbackup(\\Gamma, b) \\rightarrow \\alpha_{t+1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, if \\(b \\cdot a\u0026rsquo; \u0026gt; \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top}b\\) (i.e. we just increased our value floor because our new \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e indicates a higher value at \\(b\\)), we add our new vector to the set \\(\\Gamma\\). Otherwise, we add \\(a\u0026rsquo; = \\arg\\max_{\\alpha \\in \\Gamma} b \\cdot \\alpha\\), the \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e which previously got the highest value for \\(b\\).\u003c/p\u003e\n\u003cp\u003eAfter this, we pull a Perseus-core funni:\u003c/p\u003e\n\u003ch2 id=\"perseus-belief-pruning\"\u003ePerseus Belief Pruning\u003c/h2\u003e\n\u003cp\u003elet us define:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV_{t}(b) = \\max_{\\alpha \\in \\Gamma_{t}} \\alpha \\cdot b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV_{t+1}(b) = \\max_{\\alpha \\in \\Gamma_{t+1}} \\alpha \\cdot b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enamely, the expected value of \\(b\\) before and after \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e updates. Then, we:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB_{t+1} = \\{b \\in B, \\text{if}\\ V_{t+1}(b) \u0026lt; V(b)\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat is, if updating our sampled belief\u0026rsquo;s \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e improved the value of another belief in the set by accident already, we don\u0026rsquo;t need to update that belief again.\u003c/p\u003e\n\u003cp\u003eRepeat this process until we are out of beliefs to update, that is, when \\(B = \\emptyset\\).\u003c/p\u003e\n\u003ch2 id=\"slight-variation\"\u003eSlight Variation?\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-27_20-15-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ethen,\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-27_20-15-24_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhperseus/","tags":null,"title":"Randomized Point-Based Value Iteration"},{"categories":null,"contents":"The range (image, column space) is the set that some function \\(T\\) maps to.\nconstituents some \\(T: V\\to W\\)\nrequirements The range is just the space the map maps to:\n\\begin{equation} range\\ T = \\{Tv: v \\in V\\} \\end{equation}\nadditional information range is a subspace of the codomain This result is hopefully not super surprising.\nzero \\begin{equation} T0 = 0 \\end{equation}\nas linear maps take \\(0\\) to \\(0\\), so \\(0\\) is definitely in the range.\naddition and scalar multiplication inherits from additivity and homogeneity of Linear Maps.\nGiven \\(T v_1 = w_1,\\ T v_2=w_2\\), we have that \\(w_1, w_2 \\in range\\ T\\).\n\\begin{equation} T(v_1 + v_2) = w_1 + w_2 \\end{equation}\n\\begin{equation} T(\\lambda v_1) = \\lambda w_1 \\end{equation}\nSo closed under addition and scalar multiplication. Having shown the zero and closure, we have that the range is a subspace of the codomain. \\(\\blacksquare\\)\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e (\u003ca href=\"/posts/kbhrange/\"\u003eimage\u003c/a\u003e, \u003ca href=\"/posts/kbhrange/\"\u003ecolumn space\u003c/a\u003e) is the set that some \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e \\(T\\) maps to.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003esome \\(T: V\\to W\\)\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eThe range is just the space the map maps to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nrange\\ T = \\{Tv: v \\in V\\}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"range-is-a-subspace-of-the-codomain\"\u003erange is a subspace of the codomain\u003c/h3\u003e\n\u003cp\u003eThis result is hopefully not super surprising.\u003c/p\u003e\n\u003ch4 id=\"zero\"\u003ezero\u003c/h4\u003e\n\u003cp\u003e\\begin{equation}\nT0 = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas \u003ca href=\"/posts/kbhlinear_map/#linear-maps-take-0-to-0\"\u003elinear maps take \\(0\\) to \\(0\\)\u003c/a\u003e, so \\(0\\) is definitely in the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e.\u003c/p\u003e\n\u003ch4 id=\"addition-and-scalar-multiplication\"\u003eaddition and scalar multiplication\u003c/h4\u003e\n\u003cp\u003einherits from additivity and \u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e of \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eGiven \\(T v_1 = w_1,\\ T v_2=w_2\\), we have that \\(w_1, w_2 \\in range\\ T\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(v_1 + v_2) = w_1 + w_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(\\lambda v_1) = \\lambda w_1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e under \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e and \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e. Having shown the \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e and closure, we have that the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of the codomain. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrange/","tags":null,"title":"range"},{"categories":null,"contents":"Most users are incapable of writing good Boolean Retrieval queries.\nfeast or famine problem Boolean Retrieval either returns too few or too many results: AND queries return often too few results \\(\\min (x,y)\\), and OR queries return too many results \\(x+y\\).\nThis is not a problem with Ranked Information Retrieval because a large result set doesn\u0026rsquo;t matter: top results just needs to be good results.\nfree text query Instead of using a series of Boolean Retrieval, we instead give free text to the user.\nscore To do Ranked Information Retrieval, we need a way of asigning a score to a query-document pair.\nthe more frequently the query term appears in the doc, the higher the score should be if the word doesn\u0026rsquo;t appear, we score as 0 Jaccard Coefficient \\begin{equation} jaccard(A,B) = |A \\cap B | / |A \\cup B| \\end{equation}\nwhere \\(A\\) and \\(B\\) are vocab, (i.e. no frequency).\nlimitation doesn\u0026rsquo;t consider frequency rare terms are more informative than frequent terms the normalization isn\u0026rsquo;t quite right, ideally we should use \\(\\sqrt{A\\cup B}\\), which can be obtained via cosine-similarity log-frequency weighting \u0026ldquo;Relevance does not increase proportionally with term frequency\u0026rdquo;\u0026mdash;a document with 10 occurrences of a term is more relevant than that with 1, but its not 10 times more relevant.\n\\begin{equation} w_{t,d} = \\begin{cases} 1 + \\log_{10} (tf_{t,d}), \\text{if } tf_{t,d} \u0026gt; 0 \\\\ 0 \\end{cases} \\end{equation}\nthis gives less-than-linear growth.\nto score, we add up all the terms which intersect.\ndocument frequency the document frequency is the number of documents in which a term occur.\nIts an INVERSE MEASURE of the informativeness of a word\u0026mdash;the more times a word appears, the less informative it is.\n\\begin{equation} idf_{t} = \\log_{10} (N / df_{t}) \\end{equation}\nwhere \\(N\\) is the number of documents, and \\(df_{t}\\) is the number of documents in which the term appears. We take the log in the motivation as log-frequency weighting.\n\u0026ldquo;a word that occurs in every document has a weight of \\(0\\)\u0026rdquo;.\nThere is no effect to one-term queries.\nWe don\u0026rsquo;t use collection frequencies (i.e. we never consider COUNTS in CORPUS, because collection frequencies would score commonly found words equally because it doesn\u0026rsquo;t consider distribution).\nTF-IDF multiply log-frequency weighting TIMES document frequency.\n\\begin{equation} score(q,d) = \\sum_{t \\in q \\cap d} (1+\\log_{10}(tf_{t,d})) \\times \\log_{10}\\qty(\\frac{N}{df_{t}}) \\end{equation}\nif \\(tf = 0\\), set the entire TF score to \\(0\\) without adding 1.\nusing this, we can now construct a weight-matrix. Each document is a vector of the TFIDF score for each term against each document.\nThere are a series of approaches that you can use as a possible approach to compute tfidf: various ways to normalizing, variable document frequency counts (or not use it), etc.\nSMART notation ddd.qqq, where the first three letters represent the document weighting scheme, and the second three letter represents the query weighting scheme.\nvector-space model after creating a matrix where each column is a document and each row is a term, and the cells are TF-IDF of the words against the documents, we can consider each document as a vector over the team.\nwe can treat queries as ALSO a document in the space, and therefore use proximity of the vectors as a searching system.\n(Euclidian distance is bad: because its too large for vectors of different lengths. Instead, we should use angle instead of distance.)\ncosine similarity \\begin{equation} \\cos(q,d) = \\frac{q \\cdot d}{|q| |d|} \\end{equation}\nbecause the dot product becomes just the angle between the two vectors after you normalize by length.\ntypically, you may want to normalize the length of the vectors in advance.\ncosine is a little flatten ontop\nltc.lnn weighting document: logarithm + idf + normed query: logarithm + 1 + 1\nmeaning, we don\u0026rsquo;t weight or normalize query vectors\n","html":"\u003cp\u003eMost users are incapable of writing good \u003ca href=\"/posts/kbhinverted_index/#boolean-retrieval\"\u003eBoolean Retrieval\u003c/a\u003e queries.\u003c/p\u003e\n\u003ch2 id=\"feast-or-famine-problem\"\u003efeast or famine problem\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhinverted_index/#boolean-retrieval\"\u003eBoolean Retrieval\u003c/a\u003e either returns too few or too many results: AND queries return often too few results \\(\\min (x,y)\\), and OR queries return too many results \\(x+y\\).\u003c/p\u003e\n\u003cp\u003eThis is not a problem with \u003ca href=\"/posts/kbhranked_information_retrieval/\"\u003eRanked Information Retrieval\u003c/a\u003e because a large result set doesn\u0026rsquo;t matter: top results just needs to be good results.\u003c/p\u003e\n\u003ch2 id=\"free-text-query\"\u003efree text query\u003c/h2\u003e\n\u003cp\u003eInstead of using a series of \u003ca href=\"/posts/kbhinverted_index/#boolean-retrieval\"\u003eBoolean Retrieval\u003c/a\u003e, we instead give free text to the user.\u003c/p\u003e\n\u003ch2 id=\"score\"\u003escore\u003c/h2\u003e\n\u003cp\u003eTo do \u003ca href=\"/posts/kbhranked_information_retrieval/\"\u003eRanked Information Retrieval\u003c/a\u003e, we need a way of asigning a \u003ca href=\"#score\"\u003escore\u003c/a\u003e to a query-document pair.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ethe more frequently the query term appears in the doc, the higher the score should be\u003c/li\u003e\n\u003cli\u003eif the word doesn\u0026rsquo;t appear, we score as 0\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"jaccard-coefficient\"\u003eJaccard Coefficient\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\njaccard(A,B) = |A \\cap B | / |A \\cup B|\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(A\\) and \\(B\\) are vocab, (i.e. no frequency).\u003c/p\u003e\n\u003ch4 id=\"limitation\"\u003elimitation\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003edoesn\u0026rsquo;t consider \u003cstrong\u003efrequency\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003erare terms are more informative than frequent terms\u003c/li\u003e\n\u003cli\u003ethe normalization isn\u0026rsquo;t quite right, ideally we should use \\(\\sqrt{A\\cup B}\\), which can be obtained via cosine-similarity\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"log-frequency-weighting\"\u003elog-frequency weighting\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;Relevance does not increase proportionally with term frequency\u0026rdquo;\u0026mdash;a document with 10 occurrences of a term is more relevant than that with 1, but its not 10 times more relevant.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw_{t,d} = \\begin{cases}\n1 + \\log_{10} (tf_{t,d}), \\text{if } tf_{t,d} \u0026gt; 0 \\\\\n0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis gives less-than-linear growth.\u003c/p\u003e\n\u003cp\u003eto score, we add up all the terms which intersect.\u003c/p\u003e\n\u003ch3 id=\"document-frequency\"\u003edocument frequency\u003c/h3\u003e\n\u003cp\u003ethe \u003ca href=\"#document-frequency\"\u003edocument frequency\u003c/a\u003e is the number of documents in which a term occur.\u003c/p\u003e\n\u003cp\u003eIts an \u003cstrong\u003eINVERSE MEASURE\u003c/strong\u003e of the informativeness of a word\u0026mdash;the more times a word appears, the less informative it is.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nidf_{t} = \\log_{10} (N / df_{t})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(N\\) is the number of documents, and \\(df_{t}\\) is the number of documents in which the term appears. We take the log in the motivation as \u003ca href=\"#log-frequency-weighting\"\u003elog-frequency weighting\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;a word that occurs in every document has a weight of \\(0\\)\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eThere is no effect to one-term queries.\u003c/p\u003e\n\u003cp\u003eWe don\u0026rsquo;t use \u003cstrong\u003ecollection frequencies\u003c/strong\u003e (i.e. we never consider COUNTS in CORPUS, because collection frequencies would score commonly found words equally because it doesn\u0026rsquo;t consider distribution).\u003c/p\u003e\n\u003ch3 id=\"tf-idf\"\u003eTF-IDF\u003c/h3\u003e\n\u003cp\u003emultiply \u003ca href=\"#log-frequency-weighting\"\u003elog-frequency weighting\u003c/a\u003e TIMES \u003ca href=\"#document-frequency\"\u003edocument frequency\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nscore(q,d) = \\sum_{t \\in q \\cap d} (1+\\log_{10}(tf_{t,d})) \\times \\log_{10}\\qty(\\frac{N}{df_{t}})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif \\(tf = 0\\), set the entire TF score to \\(0\\) without adding 1.\u003c/p\u003e\n\u003cp\u003eusing this, we can now construct a weight-matrix. Each document is a vector of the TFIDF score for each term against each document.\u003c/p\u003e\n\u003cp\u003eThere are a series of approaches that you can use as a possible approach to compute tfidf: various ways to normalizing, variable document frequency counts (or not use it), etc.\u003c/p\u003e\n\u003ch4 id=\"smart-notation\"\u003eSMART notation\u003c/h4\u003e\n\u003cp\u003e\u003ccode\u003eddd.qqq\u003c/code\u003e, where the first three letters represent the document weighting scheme, and the second three letter represents the query weighting scheme.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-24_21-04-31_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"vector-space-model\"\u003evector-space model\u003c/h2\u003e\n\u003cp\u003eafter creating a matrix where each column is a document and each row is a term, and the cells are \u003ca href=\"#tf-idf\"\u003eTF-IDF\u003c/a\u003e of the words against the documents, we can consider each document as a vector over the team.\u003c/p\u003e\n\u003cp\u003ewe can treat queries as \u003cstrong\u003eALSO\u003c/strong\u003e a document in the space, and therefore use proximity of the vectors as a searching system.\u003c/p\u003e\n\u003cp\u003e(Euclidian distance is bad: because its too large for vectors of different lengths. Instead, we should use angle instead of distance.)\u003c/p\u003e\n\u003ch3 id=\"cosine-similarity\"\u003ecosine similarity\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\cos(q,d) = \\frac{q \\cdot d}{|q| |d|}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause the dot product becomes just the angle between the two vectors after you normalize by length.\u003c/p\u003e\n\u003cp\u003etypically, you may want to normalize the length of the vectors in advance.\u003c/p\u003e\n\u003cp\u003ecosine is a little flatten ontop\u003c/p\u003e\n\u003ch3 id=\"ltc-dot-lnn-weighting\"\u003eltc.lnn weighting\u003c/h3\u003e\n\u003cp\u003edocument: logarithm + idf + normed\nquery: logarithm + 1 + 1\u003c/p\u003e\n\u003cp\u003emeaning, we don\u0026rsquo;t weight or normalize query vectors\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhranked_information_retrieval/","tags":null,"title":"Ranked Information Retrieval"},{"categories":null,"contents":"rational numbers are ratios:\n\\begin{equation} \\mathbb{Q} = \\left\\{\\frac{a}{b} \\middle| a,b\\in \\mathbb{Z}, b\\neq 0\\right\\} \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrational_number/\"\u003erational number\u003c/a\u003es are ratios:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{Q} = \\left\\{\\frac{a}{b} \\middle| a,b\\in \\mathbb{Z}, b\\neq 0\\right\\}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrational_number/","tags":null,"title":"rational number"},{"categories":null,"contents":"Motivation Suppose we would like to say that \u0026ldquo;we prefer all to well \\(A\\) more than bad blood \\(B\\)\u0026rdquo;\n\\begin{equation} A \\succ B \\end{equation}\nNo right or wrong answers in this statement by itself, but we can check whether or not your preferences are inconsistent with itself.\nvon Neumann and Morgenstern Axioms Axioms for checking if a set of preferences are rational. The axioms allow you to check if a set of decisions are Rational Preferences.\nFor three conditions \\(A, B, C\\), we have:\ncompleteness \u0026ldquo;universal comparability\u0026rdquo;\neither \\(A \\succ B\\), \\(A \\prec B\\), \\(A \\sim B\\) (you have to like either better, or be indifferent)\ntransitivity If \\(A \\succeq B\\), \\(B \\succeq C\\), then \\(A \\succeq C\\)\ncontinuity If \\(A \\succeq C \\succeq B\\), then there exists some probability \\(p\\) such that we can form a lottery of shape \\([A:p; B:1-p] \\sim C\\)\nThat is, if \\(C\\) is between \\(A, B\\), then we can create a situation where we mix the chance of \\(A\\) and \\(B\\) happening such that selecting from that situation feels equally as good as selecting from \\(C\\)\nindependence for \\(A \\succ B\\), then for any \\(C\\) and probability \\(b\\) and any probability \\(p\\), then the lotteries \\([A:p; c:1-p] \\geq [B:p; C:1-p]\\)\nAs in, if you swap out a component of a lottery with something less desirable, your new lottery should be more undesirable as well.\n","html":"\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003eSuppose we would like to say that \u0026ldquo;we prefer all to well \\(A\\) more than bad blood \\(B\\)\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA \\succ B\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNo right or wrong answers in this statement by itself, but we can check whether or not your preferences are \u003cstrong\u003einconsistent\u003c/strong\u003e with itself.\u003c/p\u003e\n\u003ch2 id=\"von-neumann-and-morgenstern-axioms\"\u003evon Neumann and Morgenstern Axioms\u003c/h2\u003e\n\u003cp\u003eAxioms for checking if a set of preferences are rational. The axioms allow you to check if a set of decisions are \u003ca href=\"/posts/kbhrational_preference/\"\u003eRational Preference\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eFor three conditions \\(A, B, C\\), we have:\u003c/p\u003e\n\u003ch3 id=\"completeness\"\u003ecompleteness\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;\u003ca href=\"/posts/kbhprobability_theory/#universal-comparability\"\u003euniversal comparability\u003c/a\u003e\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eeither \\(A \\succ B\\), \\(A \\prec B\\), \\(A \\sim B\\) (you have to like either better, or be indifferent)\u003c/p\u003e\n\u003ch3 id=\"transitivity--kbhprobability-theory-dot-md\"\u003e\u003ca href=\"/posts/kbhprobability_theory/#transitivity\"\u003etransitivity\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eIf \\(A \\succeq B\\), \\(B \\succeq C\\), then \\(A \\succeq C\\)\u003c/p\u003e\n\u003ch3 id=\"continuity--kbhuniqueness-and-existance-dot-md\"\u003e\u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuity\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eIf \\(A \\succeq C \\succeq B\\), then there exists some probability \\(p\\) such that we can form a \u003ca href=\"/posts/kbhlottery/\"\u003elottery\u003c/a\u003e of shape \\([A:p; B:1-p] \\sim C\\)\u003c/p\u003e\n\u003cp\u003eThat is, if \\(C\\) is between \\(A, B\\), then we can create a situation where we mix the chance of \\(A\\) and \\(B\\) happening such that selecting from that situation feels equally as good as selecting from \\(C\\)\u003c/p\u003e\n\u003ch3 id=\"independence--kbhprobability-dot-md\"\u003e\u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependence\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003efor \\(A \\succ B\\), then for any \\(C\\) and probability \\(b\\) and any probability \\(p\\), then the \u003ca href=\"/posts/kbhlottery/\"\u003elotteries\u003c/a\u003e \\([A:p; c:1-p] \\geq [B:p; C:1-p]\\)\u003c/p\u003e\n\u003cp\u003eAs in, if you swap out a component of a \u003ca href=\"/posts/kbhlottery/\"\u003elottery\u003c/a\u003e with something less desirable, your new \u003ca href=\"/posts/kbhlottery/\"\u003elottery\u003c/a\u003e should be more undesirable as well.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrational_preference/","tags":null,"title":"rational preference"},{"categories":null,"contents":"\\(\\mathbb{R}\\) real numbers are numbers generatable by a possibly infinite sum of powers of 10.\n","html":"\u003cp\u003e\\(\\mathbb{R}\\) real numbers are \u003ca href=\"/posts/kbhnumber/\"\u003enumber\u003c/a\u003es generatable by a possibly infinite sum of powers of 10.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhreal_number/","tags":null,"title":"real number"},{"categories":null,"contents":"plan to depth \\(d\\), take action, replan\n","html":"\u003cp\u003eplan to depth \\(d\\), take action, replan\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhreceeding_horizon/","tags":null,"title":"Receeding Horizon"},{"categories":null,"contents":"Recommender System is a system that provide recommendations due to search; it combines Information Retrival with another goal:\nEditorial/Hand-Curated lists: \u0026ldquo;list of favorites\u0026rdquo;, \u0026ldquo;essential items\u0026rdquo;, etc. Aggregates: top 10 lists, most popular, recent uploads (hardest) Individual tailors: user-based recommendation Formal Model \\(X\\) the set of users \\(S\\) the set of things to recommend \\(R\\) the set of distinct and totally ordered ratings (stars 1-5, real number 0-1, etc.) Utility function: \\(U:X \\times S \\to R\\) (\u0026ldquo;how much Three key problems:\nobtain \\(U\\) as much as possible, leaving something blank extrapolate blank entries in \\(U\\) which maybe high (\u0026ldquo;recommend something\u0026rdquo;) evaluate our recommendation method obtaining \\(U\\) ask people (rate!) implicit signals (buying book, picking song, watching video, etc.)\u0026mdash;this will create a binary matrix extrapolating \\(U\\) \\(U\\) is sparse (people can\u0026rsquo;t rate everything).\nCold Start problem:\nnew items have no ratings new users have no history Three Main Approaches:\ncontent based filtering Recommend \\(s\\) to \\(x\\) if \\(s \\sim s\u0026rsquo;\\) based on content where \\(s\u0026rsquo;\\) is already rated highly by \\(x\\)\n(\u0026ldquo;if the user likes Jazz, given them more Jazz\u0026rdquo;)\ncreate profile of each item (movie: genre, actor, years; lexicon: important words by TF-IDF; etc) create profile of user, say by averaging ratings of the things the user marked as high cosine similarity Advantages:\nno need for data on other users (no user sparsity) able to tailor to unique tastes able to recommend new and unpopular things transparent Disadvantages:\nneed to build a profile for user overspecialization (never recommend outside of user\u0026rsquo;s preferences) unable to exploit other users\u0026rsquo; judgments finding good features is hard collaborative filtering Instead of using content features of items to recommend, we find user instead.\nuser-user collaborative filtering Consider a user \\(x\\), and some set of unrated items \\(i\\).\nLet\u0026rsquo;s find \\(N\\) other users with similar ratings: 1) find similar users and 2) recommend items they like.\nThen, we estimate \\(x\\)\u0026rsquo;s ratings for \\(i\\) based on the similar users\u0026rsquo; ratings for \\(i\\).\nproblem\nbecause the sparsity of the user vectors which we treat as \\(0\\), cosine gets confused. Cosine doesn\u0026rsquo;t really capture the \u0026ldquo;oppositeness\u0026rdquo; of a 5 star vs a 1 star rating.\nsolution: mean center each user\u0026mdash;subtracting each user\u0026rsquo;s score from their mean rating (ignoring missing values, and do not subtract anything to the missing values). This allows opposite opinions to have opposite signs as well.\nsparsity\nwe prevent computing values for which one user does not rate; as in, we chop the vectors such that the comparison between \\(x\\) and \\(x_{n} \\in X\\) are both dense (i.e. if one of the two users don\u0026rsquo;t rate something, we do not include that in the vector).\nafter this, we can compute our normal cosine similarity; remember to normalise.\nprediction\nfinally, after we got our \\(N\\), we can return our prediction for \\(I\\) either based on an average score of the similar users retrieved in \\(N\\) or average weighted of scores in \\(N\\) weighted by similarity to our target user \\(x\\).\n\\begin{equation} r_{xi} = \\frac{1}{N} \\sum_{}^{} r_{yi} \\end{equation}\nor\n\\begin{equation} r_{xi} = \\sum_{}^{} \\frac{sim(x,y) r_{yi}}{sim(x,y)} \\end{equation}\nitem-item collaborative filtering For item \\(i\\), we want to find other similar items to our item \\(i\\), and average the user\u0026rsquo;s own ratings on those similar items onto \\(i\\).\nthis tends to work better because items are easier to classify than users.\nproblem\ncold start (we need initial data to seed the rating) sparsity (user ratings are sparse) popularity bias\u0026mdash;creates filter bubbles and hard to generalize over unique tastes latent factor (neural) systems represent each video and user as an embedding collaborative filtering. YouTube obtains this embedding by predicting what video user is going to watch\nevaluation RMSE between held out ratings:\n\\begin{equation} \\sqrt{\\frac{\\sum_{xi}^{}(r_{xi} - r^{*}_{xi})^{2}}{N}} \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrecommender_system/\"\u003eRecommender System\u003c/a\u003e is a system that provide recommendations due to search; it combines \u003ca href=\"/posts/kbhinformation_retrival/\"\u003eInformation Retrival\u003c/a\u003e with another goal:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eEditorial/Hand-Curated lists\u003c/strong\u003e: \u0026ldquo;list of favorites\u0026rdquo;, \u0026ldquo;essential items\u0026rdquo;, etc.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eAggregates\u003c/strong\u003e: top 10 lists, most popular, recent uploads\u003c/li\u003e\n\u003cli\u003e(hardest) \u003cstrong\u003eIndividual tailors\u003c/strong\u003e: user-based recommendation\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"formal-model\"\u003eFormal Model\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X\\) the set of users\u003c/li\u003e\n\u003cli\u003e\\(S\\) the set of things to recommend\u003c/li\u003e\n\u003cli\u003e\\(R\\) the set of distinct and totally ordered ratings (stars 1-5, real number 0-1, etc.)\u003c/li\u003e\n\u003cli\u003eUtility function: \\(U:X \\times S \\to R\\) (\u0026ldquo;how much\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThree key problems:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eobtain\u003c/strong\u003e \\(U\\) as much as possible, leaving something blank\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eextrapolate\u003c/strong\u003e blank entries in \\(U\\) which maybe high (\u0026ldquo;recommend something\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eevaluate\u003c/strong\u003e our recommendation method\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"obtaining-u\"\u003eobtaining \\(U\\)\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eask people (rate!)\u003c/li\u003e\n\u003cli\u003eimplicit signals (buying book, picking song, watching video, etc.)\u0026mdash;this will create a binary matrix\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"extrapolating-u\"\u003eextrapolating \\(U\\)\u003c/h2\u003e\n\u003cp\u003e\\(U\\) is sparse (people can\u0026rsquo;t rate everything).\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eCold Start problem\u003c/strong\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003enew items have no ratings\u003c/li\u003e\n\u003cli\u003enew users have no history\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003eThree Main Approaches\u003c/strong\u003e:\u003c/p\u003e\n\u003ch3 id=\"content-based-filtering\"\u003econtent based filtering\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003eRecommend \\(s\\) to \\(x\\) if \\(s \\sim s\u0026rsquo;\\) based on content where \\(s\u0026rsquo;\\) is already rated highly by \\(x\\)\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003e(\u0026ldquo;if the user likes Jazz, given them more Jazz\u0026rdquo;)\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ecreate profile of each item (movie: genre, actor, years; lexicon: important words by \u003ca href=\"/posts/kbhranked_information_retrieval/#tf-idf\"\u003eTF-IDF\u003c/a\u003e; etc)\u003c/li\u003e\n\u003cli\u003ecreate profile of user, say by averaging ratings of the things the user marked as high\u003c/li\u003e\n\u003cli\u003ecosine similarity\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003cp\u003eAdvantages:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eno need for data on other users (no user sparsity)\u003c/li\u003e\n\u003cli\u003eable to tailor to unique tastes\u003c/li\u003e\n\u003cli\u003eable to recommend new and unpopular things\u003c/li\u003e\n\u003cli\u003etransparent\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDisadvantages:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eneed to build a profile for user\u003c/li\u003e\n\u003cli\u003eoverspecialization (never recommend outside of user\u0026rsquo;s preferences)\u003c/li\u003e\n\u003cli\u003eunable to exploit other users\u0026rsquo; judgments\u003c/li\u003e\n\u003cli\u003efinding good features is hard\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"collaborative-filtering\"\u003ecollaborative filtering\u003c/h3\u003e\n\u003cp\u003eInstead of using content features of items to recommend, we find user instead.\u003c/p\u003e\n\u003ch4 id=\"user-user-collaborative-filtering--orga6897ce\"\u003euser-user \u003ca href=\"#collaborative-filtering\"\u003ecollaborative filtering\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eConsider a user \\(x\\), and some set of unrated items \\(i\\).\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s find \\(N\\) other users with similar ratings: 1) find similar users and 2) recommend items they like.\u003c/p\u003e\n\u003cp\u003eThen, we estimate \\(x\\)\u0026rsquo;s ratings for \\(i\\) based on the similar users\u0026rsquo; ratings for \\(i\\).\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eproblem\u003c/p\u003e\n\u003cp\u003ebecause the sparsity of the user vectors which we treat as \\(0\\), cosine gets confused. Cosine doesn\u0026rsquo;t really capture the \u0026ldquo;oppositeness\u0026rdquo; of a 5 star vs a 1 star rating.\u003c/p\u003e\n\u003cp\u003esolution: \u003cstrong\u003emean center\u003c/strong\u003e each user\u0026mdash;subtracting each user\u0026rsquo;s score from their mean rating (ignoring missing values, and do not subtract anything to the missing values). This allows opposite opinions to have opposite signs as well.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003esparsity\u003c/p\u003e\n\u003cp\u003ewe prevent computing values for which one user does not rate; as in, we chop the vectors such that the comparison between \\(x\\) and \\(x_{n} \\in X\\) are both dense (i.e. if one of the two users don\u0026rsquo;t rate something, we do not include that in the vector).\u003c/p\u003e\n\u003cp\u003eafter this, we can compute our normal cosine similarity; remember to normalise.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eprediction\u003c/p\u003e\n\u003cp\u003efinally, after we got our \\(N\\), we can return our prediction for \\(I\\) either based on an average score of the similar users retrieved in \\(N\\) or average weighted of scores in \\(N\\) weighted by similarity to our target user \\(x\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr_{xi} = \\frac{1}{N} \\sum_{}^{} r_{yi}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eor\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr_{xi} = \\sum_{}^{} \\frac{sim(x,y) r_{yi}}{sim(x,y)}\n\\end{equation}\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"item-item-collaborative-filtering--orga6897ce\"\u003eitem-item \u003ca href=\"#collaborative-filtering\"\u003ecollaborative filtering\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eFor item \\(i\\), we want to find other similar \u003cstrong\u003eitems\u003c/strong\u003e to our item \\(i\\), and average the user\u0026rsquo;s own ratings on those similar items onto \\(i\\).\u003c/p\u003e\n\u003cp\u003ethis tends to work better because items are easier to classify than users.\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eproblem\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ecold start (we need initial data to seed the rating)\u003c/li\u003e\n\u003cli\u003esparsity (user ratings are sparse)\u003c/li\u003e\n\u003cli\u003epopularity bias\u0026mdash;creates filter bubbles and hard to generalize over unique tastes\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"latent-factor--neural--systems\"\u003elatent factor (neural) systems\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003erepresent each video and user as an embedding\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#collaborative-filtering\"\u003ecollaborative filtering\u003c/a\u003e.\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eYouTube obtains this embedding by predicting what video user is going to watch\u003c/p\u003e\n\u003ch2 id=\"evaluation\"\u003eevaluation\u003c/h2\u003e\n\u003cp\u003eRMSE between held out ratings:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sqrt{\\frac{\\sum_{xi}^{}(r_{xi} - r^{*}_{xi})^{2}}{N}}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrecommender_system/","tags":null,"title":"Recommender System"},{"categories":null,"contents":"reduce reduces compute time for list-based operations: it changes a linear series of events to divide-and-conquer so you can parallelize it\nin theory reduce only works for associative operations?\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhreduce/\"\u003ereduce\u003c/a\u003e reduces compute time for list-based operations: it changes a linear series of events to divide-and-conquer so you can parallelize it\u003c/p\u003e\n\u003cp\u003ein theory \u003ca href=\"/posts/kbhreduce/\"\u003ereduce\u003c/a\u003e only works for associative operations?\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhreduce/","tags":null,"title":"reduce"},{"categories":null,"contents":"in NSM, reductive paraphrase is the act of reducing all utterances in a language into semantic primes.\nThis is usually done with the application of an inherent, universal grammar: the conceptual grammar of semantic primes.\nproblems with reductive paraphrasing In the experiment conducted by (Labov 1973), Labov (according to (Geeraerts 2009), manuscript not found) showed that the boundaries of cup vs. mug are not clearly delineated.\n","html":"\u003cp\u003ein \u003ca href=\"/posts/kbhnatural_semantic_metalanguage/\"\u003eNSM\u003c/a\u003e, \u003ca href=\"/posts/kbhreductive_paraphrase/\"\u003ereductive paraphrase\u003c/a\u003e is the act of reducing all utterances in a language into \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eThis is usually done with the application of an inherent, universal grammar: the \u003ca href=\"/posts/kbhconceptual_grammar/\"\u003econceptual grammar\u003c/a\u003e of \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es.\u003c/p\u003e\n\u003ch2 id=\"problems-with-reductive-paraphrasing\"\u003eproblems with reductive paraphrasing\u003c/h2\u003e\n\u003cp\u003eIn the experiment conducted by (\u003ca href=\"#citeproc_bib_item_2\"\u003eLabov 1973\u003c/a\u003e), Labov (according to (\u003ca href=\"#citeproc_bib_item_1\"\u003eGeeraerts 2009\u003c/a\u003e), manuscript not found) showed that the boundaries of cup vs. mug are not clearly delineated.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhreductive_paraphrase/","tags":null,"title":"reductive paraphrase"},{"categories":null,"contents":"Thanks for opening Jack\u0026rsquo;s long rambly PDF. Please read all of it; I wanted to get this out there before anything else so I apologize in advance for a letter that\u0026rsquo;s on the longer side and I didn\u0026rsquo;t have time to write a shorter one.\nBefore you begin, please read Michael\u0026rsquo;s AMAZING notes on our pitch to get the context. It\u0026rsquo;s amazing. I will not repeat here anything mentioned there.\nPat yourself on the back Oh god was that a difficult semester. We got through many a challenges and worked together to solve most of them. That\u0026rsquo;s cool. We also built a thing that the XRT team liked; so that\u0026rsquo;s cool too.\nSome of you (in the meeting) will already have known, but we are greenlit to go into phase -1! What does that mean? What changes? How can you help? Will meetings finally end on time? When will Jack finish asking silly questions? Find out more\u0026hellip; below.\nBut not too hard Just to reiterate our master deliverable as a team (like how this pitch is culminating the deliverable assigned to us on 1/6), we have until July 8th, 2022 to pitch, again:\nWhat exactly are we doing, in one line, in laymen\u0026rsquo;s terms? Why is it helpful? Clarify the roles and responsibilities for the \u0026ldquo;master faculty member\u0026rdquo;, what time commitments and value they add, and what they have to drop to support the program How can we derive legitimacy for what we are doing? (see below) For me, he also added the derivable of talking more slowly. Presumably, De wants us to come with a glossy pitch too.\nBe legit Why do we need \u0026ldquo;legitimacy\u0026rdquo;? We need motivation for kids to do this, and Nueva\u0026rsquo;s rubber stamp would be a good way to do so. this is the focus of how we are asking Lisa to greenlight phase 2 (see below)\nA valid answer for \u0026ldquo;legitimacy\u0026rdquo; is \u0026ldquo;adding the list of skills students achieved on their transcript.\u0026rdquo; Is this a good answer? Not at the moment. Its very unmotivated (this response does not pass the \u0026ldquo;why is that helpful?\u0026rdquo; test).\nAnd follow the yellow-brick road There is going to be a three stage roadmap.\nPhase -1: developing answers to PREPARE to pitch to Liza the idea, asking her to give feedback WITHOUT any of the \u0026ldquo;asks\u0026rdquo; (legitimacy, faculty time, etc.) Phase 1: building a down-scaled version of the program somewhere. Ted has mentioned interest in this, so we maybe able to co-opt some or all of his classes. Developing details and proof-of-feasibility to pitch to Liza again, this time WITH the asks to roll out to the whole school Phase 2: roll out to the whole school and prey to the Research Gods But not the leader I can\u0026rsquo;t be around forever. We are in phase -1; I will probably be gone in the middle of phase 1. We will probably have to have a faculty supporting this program unofficially for sometime, which will be a big ask.\nThis means we have to make some program changes in anticipation\u0026mdash;\nSeek a corpus callosotomy \u0026ldquo;R@N\u0026rdquo; is now separated form \u0026ldquo;Nueva Research Program.\u0026rdquo; \u0026ldquo;R@N\u0026rdquo;\u0026rsquo;s purpose is a working group to build the \u0026ldquo;Nueva Research Program.\u0026rdquo;\nWe need to separate the two as soon as possible, so that means soon. As soon as after the 7/8 deadline, I hope to make this happen. This means changes changes to our leadership structure.\nAs node A.2 outlines, \u0026ldquo;Nueva Research Program\u0026rdquo; meetings have three stable positions.\nTeams’ Stable — Responsible for managing the count, content, and quality of active Research at Nueva projects, as well as the proces of matching team members to teams. (2-3 hrs/wk) Content Stable — Responsible for managing the content of the training program and review teams. Responsible for updating nodes. Runs meetings. (1-2 hrs/wk) Participant Stable — Responsible for managing the count and recruitment of new students into the program, and identifying key experts and mentors to help build new nodes or support the program. Responsible for participant sheet (1-2 hrs/wk) As well as three review teams\nHypothesis Sciences (key mentor: TBD) Non-Hypothesis Sciences (key mentor: Ted) Literacy, Soft Skills, and Development (key mentor: TBD) In a meeting (TBD) before 7/8, we will organize ourselves into three pairs again. Each pair will choose one \u0026ldquo;stable\u0026rdquo; role and one \u0026ldquo;review team\u0026rdquo; role\u0026mdash;essentially acting as a joint-power head for the new program and a review team in itself.\nWe will split our meetings from then on in half; the first bit dealing with R@N, which I will run; the second, ACTUALLY DOING Nueva Research Programs\u0026rsquo; work, lead by the \u0026ldquo;content stable\u0026rdquo; team. This also means that we will separate the two work docs.\nOh, yeah, also, if you have gotten this far; the headings of this document forms a pretty bad poem. Please send this poem to me privately on a direct message. Thank you.\nPresumably, much of the early \u0026ldquo;nueva research program\u0026rdquo; meetings will be solely the participant stable thinking about recruiting metrics and content stable voting on new nodes. That\u0026rsquo;s OK. The protocol\u0026rsquo;s there to be changed if needed.\nBut not without your consent Although we want each and every one of you on the team (evidenced by the fact that we will be pretty screwed if anyone leaves), your main academics comes first. Please talk to me privately if you have any concerns, no harm no foul.\nAlrighty. Let\u0026rsquo;s find a time to meet.\nhttps://www.when2meet.com/?15887080-XHXI8\nI kinda want to meet y\u0026rsquo;all physically over coffee if you want; but if not virtual is all good.\nThanks again for everything!\n\u0026mdash;Jack\n","html":"\u003cp\u003eThanks for opening Jack\u0026rsquo;s long rambly PDF. Please read all of it; I wanted to get this out there before anything else so I apologize in advance for a letter that\u0026rsquo;s on the longer side and I didn\u0026rsquo;t have time to write a shorter one.\u003c/p\u003e\n\u003cp\u003eBefore you begin, please read \u003ca href=\"https://docs.google.com/document/d/1ZvE4QGFjhR6VeujNzejh1AcmILkLUiYLY0DwKSGsLPs/edit\"\u003eMichael\u0026rsquo;s AMAZING notes on our pitch\u003c/a\u003e to get the context. It\u0026rsquo;s amazing. I will not repeat here anything mentioned there.\u003c/p\u003e\n\u003ch2 id=\"pat-yourself-on-the-back\"\u003ePat yourself on the back\u003c/h2\u003e\n\u003cp\u003eOh god was that a difficult semester. We got through many a challenges and worked together to solve most of them. That\u0026rsquo;s cool. We also built a thing that the XRT team liked; so that\u0026rsquo;s cool too.\u003c/p\u003e\n\u003cp\u003eSome of you (in the meeting) will already have known, but we are greenlit to go into phase -1! What does that mean? What changes? How can you help? Will meetings finally end on time? When will Jack finish asking silly questions? Find out more\u0026hellip; below.\u003c/p\u003e\n\u003ch2 id=\"but-not-too-hard\"\u003eBut not too hard\u003c/h2\u003e\n\u003cp\u003eJust to reiterate our master deliverable as a team (like how this pitch is culminating the deliverable assigned to us on 1/6), we have until \u003cstrong\u003e\u003cstrong\u003eJuly 8th, 2022\u003c/strong\u003e\u003c/strong\u003e to pitch, again:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eWhat exactly are we doing, in one line, in laymen\u0026rsquo;s terms? Why is it helpful?\u003c/li\u003e\n\u003cli\u003eClarify the roles and responsibilities for the \u0026ldquo;master faculty member\u0026rdquo;, what time commitments and value they add, and what they have to drop to support the program\u003c/li\u003e\n\u003cli\u003eHow can we derive legitimacy for what we are doing? (see below)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eFor me, he also added the derivable of talking more slowly. Presumably, De wants us to come with a glossy pitch too.\u003c/p\u003e\n\u003ch2 id=\"be-legit\"\u003eBe legit\u003c/h2\u003e\n\u003cp\u003eWhy do we need \u0026ldquo;legitimacy\u0026rdquo;? We need motivation for kids to do this, and Nueva\u0026rsquo;s rubber stamp would be a good way to do so. \u003cstrong\u003e\u003cstrong\u003ethis is the focus of how we are asking Lisa to greenlight phase 2\u003c/strong\u003e\u003c/strong\u003e (see below)\u003c/p\u003e\n\u003cp\u003eA valid answer for \u0026ldquo;legitimacy\u0026rdquo; is \u0026ldquo;adding the list of skills students achieved on their transcript.\u0026rdquo; Is this a good answer? Not at the moment. Its very unmotivated (this response does not pass the \u0026ldquo;why is that helpful?\u0026rdquo; test).\u003c/p\u003e\n\u003ch2 id=\"and-follow-the-yellow-brick-road\"\u003eAnd follow the yellow-brick road\u003c/h2\u003e\n\u003cp\u003eThere is going to be a three stage roadmap.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003ePhase -1\u003c/strong\u003e: developing answers to PREPARE to pitch to Liza the idea, asking her to give feedback WITHOUT any of the \u0026ldquo;asks\u0026rdquo; (legitimacy, faculty time, etc.)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ePhase 1\u003c/strong\u003e: building a down-scaled version of the program somewhere. Ted has mentioned interest in this, so we maybe able to co-opt some or all of his classes. Developing details and proof-of-feasibility to pitch to Liza again, this time WITH the asks to roll out to the whole school\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ePhase 2\u003c/strong\u003e: roll out to the whole school and prey to the Research Gods\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"but-not-the-leader\"\u003eBut not the leader\u003c/h2\u003e\n\u003cp\u003eI can\u0026rsquo;t be around forever. We are in phase -1; I will probably be gone in the middle of phase 1. We will probably have to have a faculty supporting this program unofficially for sometime, which will be a big ask.\u003c/p\u003e\n\u003cp\u003eThis means we have to make some program changes in anticipation\u0026mdash;\u003c/p\u003e\n\u003ch2 id=\"seek-a-corpus-callosotomy\"\u003eSeek a corpus callosotomy\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;R@N\u0026rdquo; is now separated form \u0026ldquo;Nueva Research Program.\u0026rdquo; \u0026ldquo;R@N\u0026rdquo;\u0026rsquo;s purpose is a working group to build the \u0026ldquo;Nueva Research Program.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe need to separate the two as soon as possible, so that means soon. As soon as after the 7/8 deadline, I hope to make this happen. This means changes changes to our leadership structure.\u003c/p\u003e\n\u003cp\u003eAs \u003ca href=\"https://docs.google.com/document/d/1UgOiVyKE0iixSNyrbh35Y3zDHfI7-eGgBoAZ0tMlDK0/edit\"\u003enode A.2\u003c/a\u003e outlines, \u0026ldquo;Nueva Research Program\u0026rdquo; meetings have three stable positions.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eTeams’ Stable — Responsible for managing the count, content, and quality of active Research at Nueva projects, as well as the proces of matching team members to teams. (2-3 hrs/wk)\u003c/li\u003e\n\u003cli\u003eContent Stable — Responsible for managing the content of the training program and review teams. Responsible for updating nodes. Runs meetings. (1-2 hrs/wk)\u003c/li\u003e\n\u003cli\u003eParticipant Stable — Responsible for managing the count and recruitment of new students into the program, and identifying key experts and mentors to help build new nodes or support the program. Responsible for participant sheet (1-2 hrs/wk)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAs well as three review teams\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eHypothesis Sciences (key mentor: TBD)\u003c/li\u003e\n\u003cli\u003eNon-Hypothesis Sciences (key mentor: Ted)\u003c/li\u003e\n\u003cli\u003eLiteracy, Soft Skills, and Development (key mentor: TBD)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIn a meeting (TBD) before 7/8, we will organize ourselves into three pairs again. Each pair will choose one \u0026ldquo;stable\u0026rdquo; role and one \u0026ldquo;review team\u0026rdquo; role\u0026mdash;essentially acting as a joint-power head for the new program and a review team in itself.\u003c/p\u003e\n\u003cp\u003eWe will split our meetings from then on in half; the first bit dealing with R@N, which I will run; the second, ACTUALLY DOING Nueva Research Programs\u0026rsquo; work, lead by the \u0026ldquo;content stable\u0026rdquo; team. This also means that we will separate the two work docs.\u003c/p\u003e\n\u003cp\u003eOh, yeah, also, if you have gotten this far; the headings of this document forms a pretty bad poem. Please send this poem to me privately on a direct message. Thank you.\u003c/p\u003e\n\u003cp\u003ePresumably, much of the early \u0026ldquo;nueva research program\u0026rdquo; meetings will be solely the participant stable thinking about recruiting metrics and content stable voting on new nodes. That\u0026rsquo;s OK. The protocol\u0026rsquo;s there to be changed if needed.\u003c/p\u003e\n\u003ch2 id=\"but-not-without-your-consent\"\u003eBut not without your consent\u003c/h2\u003e\n\u003cp\u003eAlthough we want each and every one of you on the team (evidenced by the fact that we will be pretty screwed if anyone leaves), your main academics comes first. Please talk to me privately if you have any concerns, no harm no foul.\u003c/p\u003e\n\u003ch2 id=\"alrighty-dot\"\u003eAlrighty.\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s find a time to meet.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://www.when2meet.com/?15887080-XHXI8\"\u003ehttps://www.when2meet.com/?15887080-XHXI8\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eI kinda want to meet y\u0026rsquo;all physically over coffee if you want; but if not virtual is all good.\u003c/p\u003e\n\u003cp\u003eThanks again for everything!\u003c/p\u003e\n\u003cp\u003e\u0026mdash;Jack\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhresearch_at_nueva_notes_06_09_2022/","tags":null,"title":"Regarding R@N"},{"categories":null,"contents":"regular expressions\ndid you know you can do matching inline too matching equivalent statements: test (\\w+) \\1; non-capture group (?:test)\nlookaheads (?=pattern) true if pattern matches, but doesn\u0026rsquo;t touch the character pointer (?!pattern) true if pattern doesn\u0026rsquo;t match; also doesn\u0026rsquo;t advance pointer (?:pattern) will advance character pointer but will not create a capture group ^beginning of line end of line$ ","html":"\u003cp\u003eregular expressions\u003c/p\u003e\n\u003cp\u003edid you know you can do matching inline too matching equivalent statements: \u003ccode\u003etest (\\w+) \\1\u003c/code\u003e; non-capture group \u003ccode\u003e(?:test)\u003c/code\u003e\u003c/p\u003e\n\u003ch2 id=\"lookaheads\"\u003elookaheads\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e(?=pattern) true if pattern matches, but doesn\u0026rsquo;t touch the character pointer\u003c/li\u003e\n\u003cli\u003e(?!pattern) true if pattern doesn\u0026rsquo;t match; also doesn\u0026rsquo;t advance pointer\u003c/li\u003e\n\u003cli\u003e(?:pattern) will advance character pointer but will not create a capture group\u003c/li\u003e\n\u003cli\u003e^beginning of line\u003c/li\u003e\n\u003cli\u003eend of line$\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhregex/","tags":null,"title":"regex"},{"categories":null,"contents":" zinc binds to zur zur inhibits zinc uptake channels zinc uptake channel gets zoped ","html":"\u003col\u003e\n\u003cli\u003ezinc binds to zur\u003c/li\u003e\n\u003cli\u003ezur inhibits zinc uptake channels\u003c/li\u003e\n\u003cli\u003ezinc uptake channel gets zoped\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhregulating_zinc_uptake/","tags":null,"title":"regulating zinc uptake"},{"categories":null,"contents":"reinforcement learning is a decision making method with no known model of the environment at all.\nagent interacts with environment directly designer provide a performance measure of the agent in the environment agent tries to optimize the decision making algorithm to maximise the performance measure Note: agent\u0026rsquo;s own choice of action, in this case, actually influences how the environment works (and what futures the agent sees). So the agent\u0026rsquo;s actions will influence the environment outcomes\ncontrast v. explicit programming v. planning Note 2: look ma, no model! unlike optimization, reinforcement learning tasks does not require an optimization objective connected to a model of the environment where we know what knobs to turn. Instead, the objective is a literal performance of how the agent is doing in the actual environment.\ncontents model-based reinforcement learning model-free reinforcement learning ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhreinforcement_learning/\"\u003ereinforcement learning\u003c/a\u003e is a \u003ca href=\"/posts/kbhdecision_making/#id-9075c44f-4b26-4f5a-9236-bc7a5c10f4ee-decision-making-methods\"\u003edecision making method\u003c/a\u003e with no known model of the environment at all.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e interacts with environment directly\u003c/li\u003e\n\u003cli\u003edesigner provide a performance measure of the agent in the environment\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e tries to optimize the \u003ca href=\"/posts/kbhdecision_making/\"\u003edecision making\u003c/a\u003e algorithm to maximise the performance measure\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eNote: \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e\u0026rsquo;s own choice of action, in this case, actually influences how the environment works (and what futures the agent sees). So the agent\u0026rsquo;s actions will influence the environment outcomes\u003c/p\u003e\n\u003ch2 id=\"contrast-v-dot-explicit-programming-v-dot-planning\"\u003econtrast v. explicit programming v. planning\u003c/h2\u003e\n\u003cp\u003eNote 2: \u003cstrong\u003elook ma, no model!\u003c/strong\u003e unlike \u003ca href=\"/posts/kbhoptimization/\"\u003eoptimization\u003c/a\u003e, \u003ca href=\"/posts/kbhreinforcement_learning/\"\u003ereinforcement learning\u003c/a\u003e tasks does not require an optimization objective connected to a model of the environment where we know what knobs to turn. Instead, the objective is a literal performance of how the \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e is doing in the actual environment.\u003c/p\u003e\n\u003ch2 id=\"contents\"\u003econtents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodel_based_reinforcement_learning/\"\u003emodel-based reinforcement learning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/\"\u003emodel-free reinforcement learning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhreinforcement_learning/","tags":null,"title":"reinforcement learning"},{"categories":null,"contents":"Sample a ton; perform factor conditioning; then count the observation you\u0026rsquo;d like.\nby rejecting those who don\u0026rsquo;t match\n","html":"\u003cp\u003eSample a ton; perform \u003ca href=\"/posts/kbhfactor/#factor-conditioning\"\u003efactor conditioning\u003c/a\u003e; then count the observation you\u0026rsquo;d like.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-01_23-25-20_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eby rejecting those who don\u0026rsquo;t match\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrejection_sampling/","tags":null,"title":"Rejection Sampling"},{"categories":null,"contents":"Let \\(X \\sim \\mathcal{N}\\).\n\u0026ldquo;How much more likely is \\(x=10\\) than \\(x=5\\)?\u0026rdquo;\nWe note that \\(P(x=value) = 0\\) for any value if \\(X\\) is continuous. However, we can still get an answer:\n\\begin{equation} \\frac{\\dd{X} P(x=10)}{\\dd{X} P(x=5)} \\end{equation}\nthese two things cancel out. Therefore, you can just divide the PDF:\n\\begin{equation} \\frac{f(x=10)}{f(x=5)} \\end{equation}\n","html":"\u003cp\u003eLet \\(X \\sim \\mathcal{N}\\).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;How much more likely is \\(x=10\\) than \\(x=5\\)?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe note that \\(P(x=value) = 0\\) for any value if \\(X\\) is continuous. However, we can still get an answer:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\dd{X} P(x=10)}{\\dd{X} P(x=5)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethese two things cancel out. Therefore, you can just divide the \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{f(x=10)}{f(x=5)}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrelative_probability/","tags":null,"title":"relative probability"},{"categories":null,"contents":"background info Recall asymtotic analysis. We remember that:\nconstant time \u0026lt; logarithmic time \u0026lt; linear time \u0026lt; polynomial time \u0026lt; exponential time The question? What happens if dynamic programming is too slow/not good enough for the problem? What if dynamic programming is not needed; instead, why don\u0026rsquo;t we just settle for a pretty good solution?\nTake, for instance, Nueva Courses. The optimal solution is \u0026ldquo;most students get their highest possible preferences.\u0026rdquo; However, this is impractical and pretty much impossible. Instead, what if we endeavor to figure a schedule that generally maximize happiness?\nrelaxation methods constraint relaxation constraint relaxation is a relaxation method to remove extra constraints.\nMotivating problem: traveling salesman problem\nVisit all towns in a given location Travel the minimal distance to do so Cannot visit any town more than once Calculating the basic, naive solution to find all roads is \\(O(n!)\\). Best known solution is \\(O(2^nn^2)\\), which is still slow. Its also an \\(NP\\) hard problem.\nHence, to actually solve it in a reasonable time, we are going to make two relaxations.\nThe salesmen can visit a town more than once The salesmen can teleport to visited towns By these two relations, we convert traveling salesmen to the minimum spanning tree problem.\nWe now (how?) that solving MST is no worse than optimal TSP. We will solve MST, then use that problem as the upper bound of solution to TSP.\ncontinuous relaxation continuous relaxation is a relaxation method to convert difficult discrete problems into continuous ones.\nMotivating problem: set cover\nYou are having a party, and you want your friends to get a nice paper invite.\nyou will send invitations to some subsets of your friends tell them to send invitations to all your mutual friends with them What\u0026rsquo;s the minimum number of friends to invite, and who?\nSet-cover is also hard, and also NP hard. The problem is that sending invitation is discrete.\nHence, to solve, we make it possible to solve for fractions of invitations. Hence, we can prove that our solution is guaranteed to be within bounds\nLagrangian relaxation Lagrangian relaxation is a relaxation method to convert hard-limit constrains into flexible penalization (negative values).\nMotivating problem: shortest paths problem with a constraint.\nYou need to drive the shortest number of miles as well as doing it in a hard constraint to complete the solution in a certain time.\nWe can instead relax the problem into overtime driving being a negative value in the solution.\n","html":"\u003ch2 id=\"background-info\"\u003ebackground info\u003c/h2\u003e\n\u003cp\u003eRecall \u003ca href=\"/posts/kbhasymtotic_analysis/\"\u003easymtotic analysis\u003c/a\u003e. We remember that:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econstant time \u0026lt; logarithmic time \u0026lt; linear time \u0026lt; polynomial time \u0026lt; exponential time\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThe question? What happens if \u003ca href=\"/posts/kbhdynamic_programming/\"\u003edynamic programming\u003c/a\u003e is too slow/not good enough for the problem? What if \u003ca href=\"/posts/kbhdynamic_programming/\"\u003edynamic programming\u003c/a\u003e is not needed; instead, why don\u0026rsquo;t we just settle for a pretty good solution?\u003c/p\u003e\n\u003cp\u003eTake, for instance, \u003ca href=\"/posts/kbhnueva_courses_index/\"\u003eNueva Courses\u003c/a\u003e. The optimal solution is \u0026ldquo;most students get their highest possible preferences.\u0026rdquo; However, this is impractical and pretty much impossible. Instead, what if we endeavor to figure a schedule that generally maximize happiness?\u003c/p\u003e\n\u003ch2 id=\"relaxation-methods\"\u003erelaxation methods\u003c/h2\u003e\n\u003ch3 id=\"constraint-relaxation\"\u003econstraint relaxation\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#constraint-relaxation\"\u003econstraint relaxation\u003c/a\u003e is a \u003ca href=\"/posts/kbhrelaxation_algorithums/\"\u003erelaxation\u003c/a\u003e method to remove extra constraints.\u003c/p\u003e\n\u003cp\u003eMotivating problem: traveling salesman problem\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eVisit all towns in a given location\u003c/li\u003e\n\u003cli\u003eTravel the minimal distance to do so\u003c/li\u003e\n\u003cli\u003eCannot visit any town more than once\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eCalculating the basic, naive solution to find all roads is \\(O(n!)\\). Best known solution is \\(O(2^nn^2)\\), which is still slow. Its also an \\(NP\\) hard problem.\u003c/p\u003e\n\u003cp\u003eHence, to actually solve it in a reasonable time, we are going to make two relaxations.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eThe salesmen can visit a town more than once\u003c/li\u003e\n\u003cli\u003eThe salesmen can teleport to visited towns\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eBy these two relations, we convert traveling salesmen to the \u003ca href=\"/posts/kbhminimum_spanning_tree/\"\u003eminimum spanning tree\u003c/a\u003e problem.\u003c/p\u003e\n\u003cp\u003eWe now (how?) that solving MST is no worse than optimal TSP. We will solve MST, then use that problem as the upper bound of solution to TSP.\u003c/p\u003e\n\u003ch3 id=\"continuous-relaxation\"\u003econtinuous relaxation\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#continuous-relaxation\"\u003econtinuous relaxation\u003c/a\u003e is a \u003ca href=\"/posts/kbhrelaxation_algorithums/\"\u003erelaxation\u003c/a\u003e method to convert difficult discrete problems into continuous ones.\u003c/p\u003e\n\u003cp\u003eMotivating problem: set cover\u003c/p\u003e\n\u003cp\u003eYou are having a party, and you want your friends to get a nice paper invite.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eyou will send invitations to some subsets of your friends\u003c/li\u003e\n\u003cli\u003etell them to send invitations to all your mutual friends with them\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWhat\u0026rsquo;s the minimum number of friends to invite, and who?\u003c/p\u003e\n\u003cp\u003eSet-cover is also hard, and also NP hard. The problem is that sending invitation is discrete.\u003c/p\u003e\n\u003cp\u003eHence, to solve, we make it possible to solve for fractions of invitations. Hence, we can prove that our solution is guaranteed to be within bounds\u003c/p\u003e\n\u003ch3 id=\"lagrangian-relaxation\"\u003eLagrangian relaxation\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#lagrangian-relaxation\"\u003eLagrangian relaxation\u003c/a\u003e is a \u003ca href=\"/posts/kbhrelaxation_algorithums/\"\u003erelaxation\u003c/a\u003e method to convert hard-limit constrains into flexible penalization (negative values).\u003c/p\u003e\n\u003cp\u003eMotivating problem: shortest paths problem with a constraint.\u003c/p\u003e\n\u003cp\u003eYou need to drive the shortest number of miles as well as doing it in a hard constraint to complete the solution in a certain time.\u003c/p\u003e\n\u003cp\u003eWe can instead relax the problem into overtime driving being a negative value in the solution.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrelaxation_algorithums/","tags":null,"title":"relaxation (algorithms)"},{"categories":null,"contents":"replication is the exact copying of cell information\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhreplication/\"\u003ereplication\u003c/a\u003e is the exact copying of cell information\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhreplication/","tags":null,"title":"replication"},{"categories":null,"contents":"In this experiment, a model was devised, trained, and evaluated to automate psychotherapist/client text conversations through the use of state-of-the-art, Seq2Seq Transformer-based Natural Language Generation (NLG) systems. Through training the model upon a mix of the Cornell Movie Dialogue Corpus for language understanding and an open-source, anonymized, and public licensed psychotherapeutic dataset, the model achieved statistically significant performance in published, standardized qualitative benchmarks against human-written validation data - meeting or exceeding human-written responses\u0026rsquo; performance in 59.7% and 67.1% of the test set for two independent test methods respectively. Although the model cannot replace the work of psychotherapists entirely, its ability to synthesize human-appearing utterances for the majority of the test set serves as a promising step towards communizing and easing stigma at the psychotherapeutic point-of-care.\n","html":"\u003cp\u003eIn this experiment, a model was devised, trained, and evaluated to automate psychotherapist/client text conversations through the use of state-of-the-art, Seq2Seq Transformer-based Natural Language Generation (NLG) systems. Through training the model upon a mix of the Cornell Movie Dialogue Corpus for language understanding and an open-source, anonymized, and public licensed psychotherapeutic dataset, the model achieved statistically significant performance in published, standardized qualitative benchmarks against human-written validation data - meeting or exceeding human-written responses\u0026rsquo; performance in 59.7% and 67.1% of the test set for two independent test methods respectively. Although the model cannot replace the work of psychotherapists entirely, its ability to synthesize human-appearing utterances for the majority of the test set serves as a promising step towards communizing and easing stigma at the psychotherapeutic point-of-care.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhreplier_abstract/","tags":null,"title":"Replier Abstract"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhrepresentation_learning/","tags":null,"title":"representation learning"},{"categories":null,"contents":"Instead of calculating:\n\\begin{equation} \\qty( \\frac{52! -1}{52!} ) \\end{equation}\nWe calculate the log of it because then you are able to write:\n\\begin{equation} \\log \\qty( \\frac{52! -1}{52!} ) = \\log (52! - 1) - \\log(52!) \\end{equation}\nwhich won\u0026rsquo;t be rounded to \\(0\\).\n","html":"\u003cp\u003eInstead of calculating:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\qty( \\frac{52! -1}{52!} )\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe calculate the log of it because then you are able to write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\log \\qty( \\frac{52! -1}{52!} ) = \\log (52! - 1) - \\log(52!)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich won\u0026rsquo;t be rounded to \\(0\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrepresenting_large_computation/","tags":null,"title":"Representing Large Computation"},{"categories":null,"contents":"Requirements Analysis is how to satisfy your persnickety users while keeping your fucking app simple.\nGoal The broad goal of Requirements Analysis is to come up with a spec that is:\nDocumented Actionable Measurable Testable Traceable Defined with details Satisfies business goals Timing Requirements Analysis should be performed when\nCalculating costs Setting priorities Creating breakdowns Including specialists Steps Gather requirements by doing User Interviews Analyze the requirements for clarity, completeness, consistency, and lack of conflicts Write them down and implement them Tools Gap Analysis Analyze the difference between where the business is at and its stated goals; figure out how the goals can be closed.\nBusiness Motivation Model (BMM) Does it matter?\nhttps://www.omg.org/spec/BMM/1.3/PDF\nSomeone decided to throw XML to running a company. Why.\nCustomer Journey Modeling See Customer Journey Map\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrequirements_analysis/\"\u003eRequirements Analysis\u003c/a\u003e is how to satisfy your persnickety users while keeping your fucking app simple.\u003c/p\u003e\n\u003ch2 id=\"goal\"\u003eGoal\u003c/h2\u003e\n\u003cp\u003eThe broad goal of \u003ca href=\"/posts/kbhrequirements_analysis/\"\u003eRequirements Analysis\u003c/a\u003e is to come up with a spec that is:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eDocumented\u003c/li\u003e\n\u003cli\u003eActionable\u003c/li\u003e\n\u003cli\u003eMeasurable\u003c/li\u003e\n\u003cli\u003eTestable\u003c/li\u003e\n\u003cli\u003eTraceable\u003c/li\u003e\n\u003cli\u003eDefined with details\u003c/li\u003e\n\u003cli\u003eSatisfies business goals\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"timing\"\u003eTiming\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhrequirements_analysis/\"\u003eRequirements Analysis\u003c/a\u003e should be performed when\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eCalculating costs\u003c/li\u003e\n\u003cli\u003eSetting priorities\u003c/li\u003e\n\u003cli\u003eCreating breakdowns\u003c/li\u003e\n\u003cli\u003eIncluding specialists\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"steps\"\u003eSteps\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eGather requirements by doing \u003ca href=\"/posts/kbhuser_interviews/\"\u003eUser Interviews\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eAnalyze the requirements for clarity, completeness, consistency, and lack of conflicts\u003c/li\u003e\n\u003cli\u003eWrite them down and implement them\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"tools\"\u003eTools\u003c/h2\u003e\n\u003ch3 id=\"gap-analysis\"\u003eGap Analysis\u003c/h3\u003e\n\u003cp\u003eAnalyze the difference between where the business is at and its stated goals; figure out how the goals can be closed.\u003c/p\u003e\n\u003ch3 id=\"business-motivation-model--bmm\"\u003eBusiness Motivation Model (BMM)\u003c/h3\u003e\n\u003cp\u003eDoes it matter?\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://www.omg.org/spec/BMM/1.3/PDF\"\u003ehttps://www.omg.org/spec/BMM/1.3/PDF\u003c/a\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-07-09_21-32-44_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSomeone decided to throw XML to running a company. Why.\u003c/p\u003e\n\u003ch3 id=\"customer-journey-modeling\"\u003eCustomer Journey Modeling\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhcustomer_journey_map/\"\u003eCustomer Journey Map\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrequirements_analysis/","tags":null,"title":"Requirements Analysis"},{"categories":null,"contents":"Learning to ask questions in a reasonable way. Changing you from a consumer of knowledge to a producer of researcher.\nThe Why of Research from Brian Thomas Your discipline is not your topic You should do research to find out whether or not you have chosen the right area of focus You can interact with faculty in a new way: everyone is here because of fantastic scholarship; you are mining a completely novel area of their expertise Research is the act of taming unruly problems ","html":"\u003cp\u003eLearning to ask questions in a reasonable way. Changing you from a consumer of knowledge to a producer of researcher.\u003c/p\u003e\n\u003ch2 id=\"the-why-of-research-from-brian-thomas--kbhstanford-ug-research-program-dot-md\"\u003eThe Why of Research from \u003ca href=\"/posts/kbhstanford_ug_research_program/\"\u003eBrian Thomas\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eYour discipline is not your topic\u003c/li\u003e\n\u003cli\u003eYou should do research to find out whether or not you have chosen the right area of focus\u003c/li\u003e\n\u003cli\u003eYou can interact with faculty in a new way: everyone is here because of fantastic scholarship; you are mining a completely novel area of their expertise\u003c/li\u003e\n\u003cli\u003eResearch is the act of taming unruly problems\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhresearch/","tags":null,"title":"Research"},{"categories":null,"contents":"Mykel\u0026rsquo;s Research Tips!\n\u0026ldquo;we are not trying to sell our ideas, we are trying to sell understanding\u0026rdquo;\nLevels of Critique high level: \u0026ldquo;is this problem important/can it scale?\u0026rdquo; mid level: \u0026ldquo;do the experiments show what is claimed\u0026rdquo; low level: typography, grammar, style State contributions before and after.\nGood Presentations not too stuffy nor casual frequent use of graphics you don\u0026rsquo;t want bullets with more than 2 lines clear, upfront objective of the paper everything was understanding during presentation: timing presentations such that its digestible as drinking down Time Management Randy Pausch\u0026rsquo;s time management lecture.\noptimize for fun \u0026ldquo;why am I doing this?\u0026rdquo; have \u0026ldquo;you can always change your plan, but only if you have one\u0026rdquo; SEPARATE: email, to-do list, calendar\nWriting a Paper Jennifer Widom: how to write a paper\nPackages to Use Plots \u0026ldquo;using matlab to screenshot a plot is\u0026hellip; an automatic F. If you want to have A quality work, you can use pgfplot. Or you can use a pgfplots backend.\u0026rdquo;\nimport tikzplotlib Tables No vertical linens\nhttp://people.inf.ethz.ch/markusp/teaching/guides/guide-tables.pdf\n\\toprule\nPossibly: PGFPlotsTable. TikZ.\nAlgos algorithmicx\nCaptions subcaptions\nUnits siunitx\nCode minted, or\u0026mdash;code executation\u0026mdash;pythontex\nReferences \u0026ldquo;cleveref\u0026rdquo;: tell you what it is, and give you back with the \u0026ldquo;Fig. #N\u0026rdquo; informatino.\nGood Presentation powerpoint tedx talk give strong technical presentations by Markus Puschel General Tips separate the problem from the solution before presenting the solution number slides! also include total number of slides one slide per minute one liners are best, two liners are ok, three + are bad Transitions are hard: don\u0026rsquo;t tap on a slide and go \u0026ldquo;woah\u0026rdquo;; pre-cache first sentence of each slide.\nOverview AFTER the motivation.\nReference Handling biblatex: bibtex with postprocessing the .tex sislstrings.bib: mykel\u0026rsquo;s conference list for .bib JabRef PhD Thesis http://www.feron.org/Eric/PhD_characterization_2.htm\n\u0026ldquo;Cool Theorems and New Methods\u0026rdquo; \u0026ldquo;Cool Methods and Predictions\u0026rdquo; \u0026ldquo;Beautiful Demonstrations\u0026rdquo; \u0026ldquo;Cool engineering ideas\u0026rdquo; \u0026ldquo;How to Write a Paper\u0026rdquo; https://cs.stanford.edu/people/widom/paper-writing.html\nwhat\u0026rsquo;s the problem why is it interesting and important? why is it hard? why hasn\u0026rsquo;t been solved before/what\u0026rsquo;s wrong with previous solutions? what are the key components of my approach and results? You want the intro to end near the end of the first page or near the end of the second page. Always lead with the problem.\nMathematical Writing \u0026ldquo;CS209 mathematical writing\u0026rdquo;\nDon\u0026rsquo;t start a sentence with a symbol.\nDon\u0026rsquo;t use \u0026ldquo;utilize\u0026rdquo;.\nAuthorship talk about it early universities have authorship inclusion deadline Complexity complexity should be justified (why does simpler method\u0026rsquo;s not work?) ","html":"\u003cp\u003eMykel\u0026rsquo;s Research Tips!\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;we are not trying to sell our ideas, we are trying to sell understanding\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"levels-of-critique\"\u003eLevels of Critique\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ehigh level: \u0026ldquo;is this problem important/can it scale?\u0026rdquo;\u003c/li\u003e\n\u003cli\u003emid level: \u0026ldquo;do the experiments show what is claimed\u0026rdquo;\u003c/li\u003e\n\u003cli\u003elow level: typography, grammar, style\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eState contributions before and after.\u003c/p\u003e\n\u003ch2 id=\"good-presentations\"\u003eGood Presentations\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003enot too stuffy nor casual\u003c/li\u003e\n\u003cli\u003efrequent use of graphics\u003c/li\u003e\n\u003cli\u003eyou don\u0026rsquo;t want bullets with more than 2 lines\u003c/li\u003e\n\u003cli\u003eclear, upfront objective of the paper\u003c/li\u003e\n\u003cli\u003eeverything was understanding \u003cstrong\u003eduring\u003c/strong\u003e presentation: timing presentations such that its digestible as drinking down\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"time-management\"\u003eTime Management\u003c/h2\u003e\n\u003cp\u003eRandy Pausch\u0026rsquo;s time management lecture.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eoptimize for fun\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;why am I doing this?\u0026rdquo;\u003c/li\u003e\n\u003cli\u003ehave \u0026ldquo;you can always change your plan, but only if you have one\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSEPARATE: email, to-do list, calendar\u003c/p\u003e\n\u003ch2 id=\"writing-a-paper\"\u003eWriting a Paper\u003c/h2\u003e\n\u003cp\u003eJennifer Widom: how to write a paper\u003c/p\u003e\n\u003ch2 id=\"packages-to-use\"\u003ePackages to Use\u003c/h2\u003e\n\u003ch3 id=\"plots\"\u003ePlots\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;using matlab to screenshot a plot is\u0026hellip; an automatic F. If you want to have A quality work, you can use pgfplot. Or you can use a pgfplots backend.\u0026rdquo;\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etikzplotlib\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"tables\"\u003eTables\u003c/h3\u003e\n\u003cp\u003eNo vertical linens\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"http://people.inf.ethz.ch/markusp/teaching/guides/guide-tables.pdf\"\u003ehttp://people.inf.ethz.ch/markusp/teaching/guides/guide-tables.pdf\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\\toprule\u003c/p\u003e\n\u003cp\u003ePossibly: PGFPlotsTable. TikZ.\u003c/p\u003e\n\u003ch3 id=\"algos\"\u003eAlgos\u003c/h3\u003e\n\u003cp\u003ealgorithmicx\u003c/p\u003e\n\u003ch3 id=\"captions\"\u003eCaptions\u003c/h3\u003e\n\u003cp\u003esubcaptions\u003c/p\u003e\n\u003ch3 id=\"units\"\u003eUnits\u003c/h3\u003e\n\u003cp\u003esiunitx\u003c/p\u003e\n\u003ch3 id=\"code\"\u003eCode\u003c/h3\u003e\n\u003cp\u003eminted, or\u0026mdash;code executation\u0026mdash;pythontex\u003c/p\u003e\n\u003ch3 id=\"references\"\u003eReferences\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;cleveref\u0026rdquo;: tell you what it is, and give you back with the \u0026ldquo;Fig. #N\u0026rdquo; informatino.\u003c/p\u003e\n\u003ch3 id=\"good-presentation\"\u003eGood Presentation\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003epowerpoint tedx talk\u003c/li\u003e\n\u003cli\u003egive strong technical presentations by Markus Puschel\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"general-tips\"\u003eGeneral Tips\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eseparate the problem from the solution before presenting the solution\u003c/li\u003e\n\u003cli\u003enumber slides! also include total number of slides\u003c/li\u003e\n\u003cli\u003eone slide per minute\u003c/li\u003e\n\u003cli\u003eone liners are best, two liners are ok, three + are bad\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTransitions are hard: don\u0026rsquo;t tap on a slide and go \u0026ldquo;woah\u0026rdquo;; pre-cache first sentence of each slide.\u003c/p\u003e\n\u003cp\u003eOverview \u003cstrong\u003eAFTER\u003c/strong\u003e the motivation.\u003c/p\u003e\n\u003ch2 id=\"reference-handling\"\u003eReference Handling\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ebiblatex: bibtex with postprocessing the .tex\u003c/li\u003e\n\u003cli\u003esislstrings.bib: mykel\u0026rsquo;s conference list for .bib\u003c/li\u003e\n\u003cli\u003eJabRef\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"phd-thesis\"\u003ePhD Thesis\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"http://www.feron.org/Eric/PhD_characterization_2.htm\"\u003ehttp://www.feron.org/Eric/PhD_characterization_2.htm\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Cool Theorems and New Methods\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Cool Methods and Predictions\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Beautiful Demonstrations\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Cool engineering ideas\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"how-to-write-a-paper\"\u003e\u0026ldquo;How to Write a Paper\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://cs.stanford.edu/people/widom/paper-writing.html\"\u003ehttps://cs.stanford.edu/people/widom/paper-writing.html\u003c/a\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewhat\u0026rsquo;s the problem\u003c/li\u003e\n\u003cli\u003ewhy is it interesting and important?\u003c/li\u003e\n\u003cli\u003ewhy is it hard?\u003c/li\u003e\n\u003cli\u003ewhy hasn\u0026rsquo;t been solved before/what\u0026rsquo;s wrong with previous solutions?\u003c/li\u003e\n\u003cli\u003ewhat are the key components of my approach and results?\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eYou want the intro to end near the end of the first page or near the end of the second page. \u003cstrong\u003eAlways lead with the problem.\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"mathematical-writing\"\u003eMathematical Writing\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;CS209 mathematical writing\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eDon\u0026rsquo;t start a sentence with a symbol.\u003c/p\u003e\n\u003cp\u003eDon\u0026rsquo;t use \u0026ldquo;utilize\u0026rdquo;.\u003c/p\u003e\n\u003ch2 id=\"authorship\"\u003eAuthorship\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003etalk about it early\u003c/li\u003e\n\u003cli\u003euniversities have authorship inclusion deadline\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"complexity\"\u003eComplexity\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecomplexity should be justified (why does simpler method\u0026rsquo;s not work?)\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhresearch_tips/","tags":null,"title":"Research Tips"},{"categories":null,"contents":"Importantly, we have to keep our data under something that can be called RDD: \u0026ldquo;Resilient Distributed Dataset\u0026rdquo;; it is a theoretical dataset, but you don\u0026rsquo;t actually load it.\nRDDs are has a single vector datastore under, but there are special RDDs that store key-value info. For Spark, RDDs are stored as operational graphs which is backtraced eventually during computational steps.\nPair RDD A Pair RDD is an RDD that stores two pairs of vectors: you have a key and you have an value per entry.\n","html":"\u003cp\u003eImportantly, we have to keep our data under something that can be called \u003cstrong\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhspark/#rdd-api\"\u003eRDD\u003c/a\u003e\u003c/strong\u003e\u003c/strong\u003e: \u0026ldquo;Resilient Distributed Dataset\u0026rdquo;; it is a theoretical dataset, but you don\u0026rsquo;t actually load it.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003es are has a single vector datastore under, but there are special \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003es that store key-value info. For \u003ca href=\"/posts/kbhspark/\"\u003eSpark\u003c/a\u003e, \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003es are stored as operational graphs which is backtraced eventually during computational steps.\u003c/p\u003e\n\u003ch2 id=\"pair-rdd\"\u003ePair RDD\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#pair-rdd\"\u003ePair RDD\u003c/a\u003e is an \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003e that stores two pairs of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es: you have a key and you have an value per entry.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrdd/","tags":null,"title":"Resilient Distributed Dataset"},{"categories":null,"contents":"resistors do resistor stuff.\nresistors in series Their resistance add!\n\\begin{equation} R_{eq} = R_1 + R_2 + \\dots \\end{equation}\nCURRENT remains the same through the resistors.\nresistors in parallel Their resistance add by inverse fraction!\n\\begin{equation} \\frac{1}{R_{eq}} = \\frac{1}{R_1} + \\frac{1}{R_2} + \\dots \\end{equation}\nVOLTAGE remains the same through the resistors.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhresistors/\"\u003eresistors\u003c/a\u003e do resistor stuff.\u003c/p\u003e\n\u003ch2 id=\"resistors-in-series\"\u003eresistors in series\u003c/h2\u003e\n\u003cp\u003eTheir \u003ca href=\"/posts/kbhcurrent/#resistance-of-a-wire\"\u003eresistance\u003c/a\u003e add!\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-03-21_21-53-21_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\\begin{equation}\nR_{eq} = R_1 + R_2 + \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eCURRENT\u003c/strong\u003e remains the same through the resistors.\u003c/p\u003e\n\u003ch2 id=\"resistors-in-parallel\"\u003eresistors in parallel\u003c/h2\u003e\n\u003cp\u003eTheir \u003ca href=\"/posts/kbhcurrent/#resistance-of-a-wire\"\u003eresistance\u003c/a\u003e add by inverse fraction!\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-03-21_21-54-07_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\\begin{equation}\n\\frac{1}{R_{eq}} = \\frac{1}{R_1} + \\frac{1}{R_2} + \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eVOLTAGE\u003c/strong\u003e remains the same through the resistors.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhresistors/","tags":null,"title":"resistor"},{"categories":null,"contents":"A reticle is a photomask/template for a lithography system (like a negative). KLA was the first company to automatically inspect wafers and reticles.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhreticle/\"\u003ereticle\u003c/a\u003e is a photomask/template for a lithography system (like a negative). \u003ca href=\"/posts/kbhkla/\"\u003eKLA\u003c/a\u003e was the first company to automatically inspect wafers and \u003ca href=\"/posts/kbhreticle/\"\u003ereticle\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhreticle/","tags":null,"title":"reticle"},{"categories":null,"contents":" Starting with random residue noise: coordinates + backbones Diffusion happens: train like diffusion, with the goal of increasing binding affinities Eventually resolves to valid protein structures given the binding environments Basically, start with only the desired substraight, and the diffuse the sequence around that small sequence with the goal of higher affinity binding: i.e. allow only the binding site to stay and regenerating the rest.\nRFDiffusion is available starting THIS WEEK!\nadvantages over RoseTTAFold2 inpainting The starting point is random for diffusion, so if you do it multiple times you will get new results instead of the same thing.\n","html":"\u003col\u003e\n\u003cli\u003eStarting with random residue noise: coordinates + backbones\u003c/li\u003e\n\u003cli\u003eDiffusion happens: train like diffusion, with the goal of increasing binding affinities\u003c/li\u003e\n\u003cli\u003eEventually resolves to valid protein structures given the binding environments\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eBasically, start with only the desired substraight, and the diffuse the sequence around that small sequence with the goal of higher affinity binding: i.e. allow only the binding site to stay and regenerating the rest.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhrfdiffusion/\"\u003eRFDiffusion\u003c/a\u003e is available starting THIS WEEK!\u003c/p\u003e\n\u003ch2 id=\"advantages-over-rosettafold2--kbhrosettafold2-dot-md--inpainting\"\u003eadvantages over \u003ca href=\"/posts/kbhrosettafold2/\"\u003eRoseTTAFold2\u003c/a\u003e inpainting\u003c/h2\u003e\n\u003cp\u003eThe starting point is random for diffusion, so if you do it multiple times you will get new results instead of the same thing.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrfdiffusion/","tags":null,"title":"RFDiffusion"},{"categories":null,"contents":"POMDPs to solve Active Sensing Problem: where gathering information is the explicit goal and not a means to do something. Meaning, we can\u0026rsquo;t train them using state-only reward functions (i.e. reward is based on belief and not state).\nDirectly reward the reduction of uncertainty: belief-based reward framework which you can just tack onto the existing solvers.\nTo do this, we want to define some reward directly over the belief space which assigns rewards based on uncertainty reduction:\n\\begin{equation} r(b,a) = \\rho(b,a) \\end{equation}\n\\(\\rho\\) should be some measure of uncertainty, like entropy.\nkey question: how does our POMDP formulations change given this change?\nDon\u0026rsquo;t worry about the Value Function result: if reward function is convex, then Bellman updates should preserve the convexity of the value function\nSo, we now just need to make sure that however we compute our rewards the reward function \\(\\rho\\) has to be piecewise linear convex.\nPWLC rewards One simple PWLC rewards are alpha vectors:\n\\begin{equation} \\rho(b,a) = \\max_{\\alpha in \\Gamma} \\qty[\\sum_{ss}^{} b(s) \\alpha(s)] \\end{equation}\nWe want to use \\(R\\) extra alpha-vectors to compute the value at a state.\nThis makes our Belman updates:\nnon-PWLC objectives As long as \\(\\rho\\) is convex and stronger-than Lipschitz continuous, we can use a modified version of the Bellman updates to force our non PWLC \\(\\rho\\) into pretty much PWLC:\n\\begin{equation} \\hat{\\rho}(b) = \\max_{b\u0026rsquo;} \\qty[\\rho(b\u0026rsquo;) + (b-b\u0026rsquo;) \\cdot \\nabla p(b\u0026rsquo;)] \\end{equation}\nTaylor never fails to disappoint.\nFancy math gives that the error in this would be bounded:\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es to solve \u003ca href=\"/posts/kbhrho_pomdps/\"\u003eActive Sensing Problem\u003c/a\u003e: where \u003cstrong\u003egathering information\u003c/strong\u003e is the explicit goal and not a means to do something. Meaning, we can\u0026rsquo;t train them using state-only reward functions (i.e. reward is based on belief and not state).\u003c/p\u003e\n\u003cp\u003eDirectly reward the \u003cstrong\u003ereduction of uncertainty\u003c/strong\u003e: \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e-based reward framework which you can just tack onto the existing solvers.\u003c/p\u003e\n\u003cp\u003eTo do this, we want to define some reward directly over the belief space which assigns rewards based on uncertainty reduction:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr(b,a) = \\rho(b,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(\\rho\\) should be some measure of uncertainty, like entropy.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003ekey question\u003c/strong\u003e: how does our \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e formulations change given this change?\u003c/p\u003e\n\u003ch2 id=\"don-t-worry-about-the-value-function\"\u003eDon\u0026rsquo;t worry about the Value Function\u003c/h2\u003e\n\u003cp\u003eresult: if \u003cstrong\u003ereward function\u003c/strong\u003e is convex, then Bellman updates should \u003cstrong\u003epreserve the convexity of the value function\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eSo, we now just need to make sure that however we compute our rewards the reward function \\(\\rho\\) has to be piecewise linear convex.\u003c/p\u003e\n\u003ch2 id=\"pwlc--kbhrho-pomdps-dot-md--rewards\"\u003e\u003ca href=\"/posts/kbhrho_pomdps/\"\u003ePWLC\u003c/a\u003e rewards\u003c/h2\u003e\n\u003cp\u003eOne simple \u003ca href=\"/posts/kbhrho_pomdps/\"\u003ePWLC\u003c/a\u003e rewards are \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\rho(b,a) = \\max_{\\alpha in \\Gamma} \\qty[\\sum_{ss}^{} b(s) \\alpha(s)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe want to use \\(R\\) extra alpha-vectors to compute the value at a state.\u003c/p\u003e\n\u003cp\u003eThis makes our Belman updates:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-25_19-56-49_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"non-pwlc--kbhrho-pomdps-dot-md--objectives\"\u003enon-\u003ca href=\"/posts/kbhrho_pomdps/\"\u003ePWLC\u003c/a\u003e objectives\u003c/h2\u003e\n\u003cp\u003eAs long as \\(\\rho\\) is convex and stronger-than \u003ca href=\"/posts/kbhuniqueness_and_existance/#lipschitz-condition\"\u003eLipschitz continuous\u003c/a\u003e, we can use a modified version of the Bellman updates to force our non \u003ca href=\"/posts/kbhrho_pomdps/\"\u003ePWLC\u003c/a\u003e \\(\\rho\\) into pretty much PWLC:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{\\rho}(b) = \\max_{b\u0026rsquo;} \\qty[\\rho(b\u0026rsquo;) + (b-b\u0026rsquo;) \\cdot \\nabla p(b\u0026rsquo;)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaylor never fails to disappoint.\u003c/p\u003e\n\u003cp\u003eFancy math gives that the error in this would be bounded:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-25_19-59-10_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhrho_pomdps/","tags":null,"title":"rho-POMDPs"},{"categories":null,"contents":"Richard Nixon is an American president, but pretty much is the watergate guy.\nServed in House and Senate Eisenhower\u0026rsquo;s VP for 8 years Lost first to JFK Richard Nixon is a pragmatist; he pushes economy out of presession via Keynsian Politics.\nRichard Nixon also realized that the large southern population can be motivated via racist policies, so he shifted the .\npolitical positions of Richard Nixon Richard Nixon\u0026rsquo;s Treatment against the Vietnam War Richard Nixon\u0026rsquo;s Foreign Policy ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e is an American president, but pretty much is the \u003ca href=\"/posts/kbhwatergate/\"\u003ewatergate\u003c/a\u003e guy.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eServed in House and Senate\u003c/li\u003e\n\u003cli\u003eEisenhower\u0026rsquo;s VP for 8 years\u003c/li\u003e\n\u003cli\u003eLost first to JFK\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e is a pragmatist; he pushes economy out of presession via \u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynsian Politics\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e also realized that the large southern population can be motivated via racist policies, so he shifted the .\u003c/p\u003e\n\u003ch2 id=\"political-positions-of-richard-nixon\"\u003epolitical positions of Richard Nixon\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrichard_nixon_s_treatment_against_the_vietnam_war/\"\u003eRichard Nixon\u0026rsquo;s Treatment against the Vietnam War\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrichard_nixon_s_foreign_policy/\"\u003eRichard Nixon\u0026rsquo;s Foreign Policy\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrichard_nixon/","tags":null,"title":"Richard Nixon"},{"categories":null,"contents":"Richard Nixon\u0026rsquo;s foreign policy is marked by the \u0026ldquo;Nixon Doctrine\u0026rdquo;: shifting the burden of military containment to allies.\nSupports China as a means against USSR Negotiate with the USSR to lower tension Shifts focus into building and supporting allies ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e\u0026rsquo;s foreign policy is marked by the \u0026ldquo;\u003ca href=\"/posts/kbhrichard_nixon_s_foreign_policy/\"\u003eNixon Doctrine\u003c/a\u003e\u0026rdquo;: shifting the burden of military containment to allies.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eSupports China as a means against \u003ca href=\"\"\u003eUSSR\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eNegotiate with the \u003ca href=\"\"\u003eUSSR\u003c/a\u003e to lower tension\u003c/li\u003e\n\u003cli\u003eShifts focus into building and supporting allies\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrichard_nixon_s_foreign_policy/","tags":null,"title":"Richard Nixon's Foreign Policy"},{"categories":null,"contents":"Richard Nixon proposed the strategy of vietnamization as a treatment to the Vietnam War. He also expanded to Cambodia. To beat the Viet Cong into submission, he initialized the Operation Linebacker campaign.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e proposed the strategy of \u003ca href=\"/posts/kbhvietnamization/\"\u003evietnamization\u003c/a\u003e as a treatment to the \u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e. He also expanded to Cambodia. To beat the Viet Cong into submission, he initialized the \u003ca href=\"/posts/kbhoperation_linebacker/\"\u003eOperation Linebacker\u003c/a\u003e campaign.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrichard_nixon_s_treatment_against_the_vietnam_war/","tags":null,"title":"Richard Nixon's Treatment against the Vietnam War"},{"categories":null,"contents":"Rick Wallace is the CEO of KLA.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrick_wallace/\"\u003eRick Wallace\u003c/a\u003e is the CEO of \u003ca href=\"/posts/kbhkla/\"\u003eKLA\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrick_wallace/","tags":null,"title":"Rick Wallace"},{"categories":null,"contents":"a ring is\u0026hellip; something; its kind of a field but it doesn\u0026rsquo;t have inverses for multiplication.\n","html":"\u003cp\u003ea \u003ca href=\"/posts/kbhring/\"\u003ering\u003c/a\u003e is\u0026hellip; something; its kind of a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e but it doesn\u0026rsquo;t have inverses for \u003ca href=\"/posts/kbhmultiplying/\"\u003emultiplication\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhring/","tags":null,"title":"ring"},{"categories":null,"contents":"Wealth is a much more complex utility than others because given the different levels of wealth you have the marginal benefit of having that wealth decreases.\nThat is, let \\(A\\) be the fact that you are given $50, and let \\(B\\) be there being \\(0.5\\) chance of winning $100.\nrisk neutral: the utility is linear\u0026mdash;therefore \\(A \\sim B\\) risk seeking: utility is convex (derivative increases as reward increases), so \\(A \\prec B\\) risk averse: utility is concave (derivate decreases as reward decreases), so \\(A \\succ B\\) ","html":"\u003cp\u003eWealth is a much more complex \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e than others because given the different levels of wealth you have the marginal benefit of having that wealth decreases.\u003c/p\u003e\n\u003cp\u003eThat is, let \\(A\\) be the fact that you are given $50, and let \\(B\\) be there being \\(0.5\\) chance of winning $100.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexpected_utility_of_wealth/\"\u003erisk neutral\u003c/a\u003e: the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e is linear\u0026mdash;therefore \\(A \\sim B\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexpected_utility_of_wealth/\"\u003erisk seeking\u003c/a\u003e: \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e is convex (derivative increases as reward increases), so \\(A \\prec B\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexpected_utility_of_wealth/\"\u003erisk averse\u003c/a\u003e: \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e is concave (derivate decreases as reward decreases), so \\(A \\succ B\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhexpected_utility_of_wealth/","tags":null,"title":"risk aversion"},{"categories":null,"contents":"Make PACE better: no need to check the bacteriophage population in PACE yourself; just check it automatically! https://github.com/dgretton/pyhamilton https://www.chorylab.com/\ntake a constant plate measurement of the culture check the growth grade use the grid of materials to test the environmental combinations; checking if certain factors worked better PyLabRobotic to automatically handle the materials \u0026ldquo;run PACE sweeps, adjust parameters as needed to promote mutation replication\u0026rdquo;\n\u0026ldquo;cheaters\u0026rdquo; some molecules create specific increases in population without the need of any mutation at all, \u0026ldquo;cheating\u0026rdquo; the evolutionary process. We don\u0026rsquo;t know why, and the lab seem to have given up on them until an in vivo test is needed.\nso actually applying the above Given the changes in distribution of the replication process, measure schot Adjust selection pressure to promote mutation ","html":"\u003cp\u003eMake \u003ca href=\"/posts/kbhpace/\"\u003ePACE\u003c/a\u003e better: no need to check the bacteriophage population in \u003ca href=\"/posts/kbhpace/\"\u003ePACE\u003c/a\u003e yourself; just check it automatically! \u003ca href=\"https://github.com/dgretton/pyhamilton\"\u003ehttps://github.com/dgretton/pyhamilton\u003c/a\u003e \u003ca href=\"https://www.chorylab.com/\"\u003ehttps://www.chorylab.com/\u003c/a\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003etake a constant plate measurement of the culture\u003c/li\u003e\n\u003cli\u003echeck the growth grade\u003c/li\u003e\n\u003cli\u003euse the grid of materials to test the environmental combinations; checking if certain factors worked better\u003c/li\u003e\n\u003cli\u003ePyLabRobotic to automatically handle the materials\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\u0026ldquo;run \u003ca href=\"/posts/kbhpace/\"\u003ePACE\u003c/a\u003e sweeps, adjust parameters as needed to promote mutation replication\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"cheaters\"\u003e\u0026ldquo;cheaters\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003esome molecules create specific increases in population without the need of any mutation at all, \u0026ldquo;cheating\u0026rdquo; the evolutionary process. We don\u0026rsquo;t know why, and the lab seem to have given up on them until an in vivo test is needed.\u003c/p\u003e\n\u003ch2 id=\"so-actually-applying-the-above\"\u003eso actually applying the above\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eGiven the changes in distribution of the replication process, measure schot\u003c/li\u003e\n\u003cli\u003eAdjust selection pressure to promote mutation\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrobotics_assisted_directed_evolution/","tags":null,"title":"Robotics-Assisted Directed Evolution"},{"categories":null,"contents":"Ingredients:\n\\(\\mathcal{P}\\) problem (states, transitions, etc.) \\(\\pi\\) a Rollout Policy \\(d\\) depth (how many next states to look into)\u0026mdash;more is more accurate but slower Use the greedy policy at each state by using the Rollout procedure to estimate your value function at any given state.\nRollout Rollout works by hallucinating a trajectory and calculating the reward.\nFor some state, Rollout Policy, and depth\u0026hellip;\nlet ret be 0; for i in range depth take action following the Rollout Policy obtain a sample of possible next state (weighted by the action you took, meaning an instantiation of \\(s\u0026rsquo; \\sim T(\\cdot | s,a)\\)) and reward \\(R(s,a)\\) from current state ret += gamma^i * r return ret Rollout Policy A Rollout Policy is a default policy used for lookahead. Usually this policy should be designed with domain knowledge; if not, we just use a uniform random next steps.\n","html":"\u003cp\u003eIngredients:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\mathcal{P}\\) problem (states, transitions, etc.)\u003c/li\u003e\n\u003cli\u003e\\(\\pi\\) a \u003ca href=\"#rollout-policy\"\u003eRollout Policy\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(d\\) depth (how many next states to look into)\u0026mdash;more is more accurate but slower\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eUse the \u003ca href=\"/posts/kbhaction_value_function/#value-function-policy\"\u003egreedy policy\u003c/a\u003e at each state by using the \u003ca href=\"#rollout\"\u003eRollout\u003c/a\u003e procedure to estimate your \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e at any given state.\u003c/p\u003e\n\u003ch2 id=\"rollout\"\u003eRollout\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#rollout\"\u003eRollout\u003c/a\u003e works by hallucinating a trajectory and calculating the reward.\u003c/p\u003e\n\u003cp\u003eFor some state, \u003ca href=\"#rollout-policy\"\u003eRollout Policy\u003c/a\u003e, and depth\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003elet ret be 0; for i in range depth\n\u003col\u003e\n\u003cli\u003etake action following the \u003ca href=\"#rollout-policy\"\u003eRollout Policy\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eobtain a sample of possible next state (weighted by the action you took, meaning an instantiation of \\(s\u0026rsquo; \\sim T(\\cdot | s,a)\\)) and reward \\(R(s,a)\\) from current state\u003c/li\u003e\n\u003cli\u003eret += gamma^i * r\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003ereturn ret\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"rollout-policy\"\u003eRollout Policy\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#rollout-policy\"\u003eRollout Policy\u003c/a\u003e is a default \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e used for lookahead. Usually this \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e should be designed with domain knowledge; if not, we just use a uniform random next steps.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrollout_with_lookahead/","tags":null,"title":"Rollout with Lookahead"},{"categories":null,"contents":"Ronald Reagan is a president of the United States. He rises a wave of the New Right.\nComes out of Hollywood and was CA governor Reagan was a democrat, but McCarthyism lead him Reagan was an FBI informer for McCarthyism investigations Reagan was the first two-term president since 1961, was able to maintain more power compared to others \u0026ldquo;The Great Communicator\u0026rdquo; Reagan politics \u0026ldquo;Government isn\u0026rsquo;t the solution to the problem, its the problem.\u0026rdquo;\nwished for limited politics states rights condemned welfare and \u0026ldquo;welfare cheats\u0026rdquo; (the undertone of racist appeal) Evangelical undertones, family values, moral majority Against affirmative action Supply-side economics: \u0026ldquo;getting rid of taxes will allow more people to spend\u0026rdquo; Anti-Soviet rhetoric Creates the largest increase in welfare spending, gutting about $1.5Bn.\nReagan policy changes Lowering taxes: 70% of tax to 28% of taxes Increase defense budget: 1 trillion to 3 trillion Rising inequality, 1% controlled 40% of wealth (double from the 1970s) Reagan Foreign Policy Ronald Reagan creates the largest military build-up in history (larger than Korea and Vietnam.)\nReasserted Command-in-Chief abilities Creates the National Security Council (for whom the ) Comitted the US to supporting the anti-Marxist insurrections around the world Credited with falling the USSR Supreme Court Interview Process A new interview process for the supreme court designed by Ronald Reagan, creating an extensive process to vet conservative. Reagan swapped out 50% of the Federal judicial process.\nReagan\u0026rsquo;s Legacy Inflation dropped\nUSSR Collapse\nMilitary complex expanded\nIncomes rose\nInequality widened\nWelfare slashed\nDebt\nConcentrated power in the white house\nCentralized conservative agenda\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhronald_raegan/\"\u003eRonald Reagan\u003c/a\u003e is a president of the \u003ca href=\"\"\u003eUnited States\u003c/a\u003e. He rises a wave of the \u003ca href=\"/posts/kbhnew_right/\"\u003eNew Right\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eComes out of Hollywood and was CA governor\u003c/li\u003e\n\u003cli\u003eReagan was a democrat, but McCarthyism lead him\u003c/li\u003e\n\u003cli\u003eReagan was an FBI informer for McCarthyism investigations\u003c/li\u003e\n\u003cli\u003eReagan was the first two-term president since 1961, was able to maintain more power compared to others\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;The Great Communicator\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"reagan-politics\"\u003eReagan politics\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;Government isn\u0026rsquo;t the solution to the problem, its the problem.\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ewished for limited politics\u003c/li\u003e\n\u003cli\u003estates rights\u003c/li\u003e\n\u003cli\u003econdemned welfare and \u0026ldquo;welfare cheats\u0026rdquo; (the undertone of racist appeal)\u003c/li\u003e\n\u003cli\u003eEvangelical undertones, family values, moral majority\u003c/li\u003e\n\u003cli\u003eAgainst affirmative action\u003c/li\u003e\n\u003cli\u003eSupply-side economics: \u0026ldquo;getting rid of taxes will allow more people to spend\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eAnti-Soviet rhetoric\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eCreates the largest increase in welfare spending, gutting about $1.5Bn.\u003c/p\u003e\n\u003ch2 id=\"reagan-policy-changes\"\u003eReagan policy changes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eLowering taxes: 70% of tax to 28% of taxes\u003c/li\u003e\n\u003cli\u003eIncrease defense budget: 1 trillion to 3 trillion\u003c/li\u003e\n\u003cli\u003eRising inequality, 1% controlled 40% of wealth (double from the 1970s)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"reagan-foreign-policy\"\u003eReagan Foreign Policy\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhronald_raegan/\"\u003eRonald Reagan\u003c/a\u003e creates the largest military build-up in history (larger than Korea and Vietnam.)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eReasserted Command-in-Chief abilities\u003c/li\u003e\n\u003cli\u003eCreates the National Security Council (for whom the )\u003c/li\u003e\n\u003cli\u003eComitted the US to supporting the anti-Marxist insurrections around the world\u003c/li\u003e\n\u003cli\u003eCredited with falling the USSR\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"supreme-court-interview-process\"\u003eSupreme Court Interview Process\u003c/h2\u003e\n\u003cp\u003eA new interview process for the supreme court designed by \u003ca href=\"/posts/kbhronald_raegan/\"\u003eRonald Reagan\u003c/a\u003e, creating an extensive process to vet conservative. Reagan swapped out 50% of the Federal judicial process.\u003c/p\u003e\n\u003ch2 id=\"reagan-s-legacy\"\u003eReagan\u0026rsquo;s Legacy\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eInflation dropped\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eUSSR Collapse\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eMilitary complex expanded\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eIncomes rose\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eInequality widened\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWelfare slashed\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDebt\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eConcentrated power in the white house\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCentralized conservative agenda\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhronald_raegan/","tags":null,"title":"Ronald Reagan"},{"categories":null,"contents":"The Rosa Parks bus incident is the instigator which needed to act on an issue to challenge the civil rights movement.\nShe participated in many civil rights agitations, and became the instigator .\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhrosa_parks/\"\u003eRosa Parks\u003c/a\u003e bus incident is the instigator which needed to act on an issue to challenge the civil rights movement.\u003c/p\u003e\n\u003cp\u003eShe participated in many civil rights agitations, and became the instigator .\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrosa_parks/","tags":null,"title":"Rosa Parks"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhroseta/","tags":null,"title":"roseta"},{"categories":null,"contents":"Rosetta is a set of physical-based protein folding models.\nprotein binding with Rosetta check a protein surface check how protein side-chains interact with the binding surface peptide binding with Rosetta The difficulty with this is that we don\u0026rsquo;t know what the overall tertiary structure of a group of peptides are; unlike whole protein binding.\nsequence-specific DNA binding ???\nmore! You take something like a trimer; you shove a peptide between each \u0026ldquo;point\u0026rdquo;, and boom structal change to a quadromer\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrosetta/\"\u003eRosetta\u003c/a\u003e is a set of physical-based protein folding models.\u003c/p\u003e\n\u003ch2 id=\"protein-binding-with-rosetta--kbhrosetta-dot-md\"\u003eprotein binding with \u003ca href=\"/posts/kbhrosetta/\"\u003eRosetta\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003echeck a protein surface\u003c/li\u003e\n\u003cli\u003echeck how protein side-chains interact with the binding surface\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"peptide-binding-with-rosetta--kbhrosetta-dot-md\"\u003epeptide binding with \u003ca href=\"/posts/kbhrosetta/\"\u003eRosetta\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eThe difficulty with this is that we don\u0026rsquo;t know what the overall tertiary structure of a group of peptides are; unlike whole \u003ca href=\"#protein-binding-with-rosetta--kbhrosetta-dot-md\"\u003eprotein binding\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"sequence-specific-dna-binding\"\u003esequence-specific DNA binding\u003c/h2\u003e\n\u003cp\u003e???\u003c/p\u003e\n\u003ch2 id=\"more\"\u003emore!\u003c/h2\u003e\n\u003cp\u003eYou take something like a trimer; you shove a peptide between each \u0026ldquo;point\u0026rdquo;, and boom structal change to a quadromer\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrosetta/","tags":null,"title":"Rosetta"},{"categories":null,"contents":"RoseTTAFold2 is a three-track folding tool, which also handles multimer!\ninputs: amino acid sequence + CHEMICAL structure (WOAH! how?) \u0026ldquo;RF2 all-atom embedding\u0026rdquo; fold! The model does really well!\napplication: de-novo luciferase design come up with the correct shaped scaffolds use old Rosetta to jam a residue sequence into the scaffold refold application: RoseTTAFold2 in-painting Train the model to recover the missing bits of sequence from the overall structure (i.e. training backwards), and\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrosettafold2/\"\u003eRoseTTAFold2\u003c/a\u003e is a three-track folding tool, which also handles multimer!\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003einputs: amino acid sequence + CHEMICAL structure (WOAH! how?)\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;RF2 all-atom embedding\u0026rdquo;\u003c/li\u003e\n\u003cli\u003efold!\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThe model does really well!\u003c/p\u003e\n\u003ch2 id=\"application-de-novo-luciferase-design\"\u003eapplication: de-novo luciferase design\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ecome up with the correct shaped scaffolds\u003c/li\u003e\n\u003cli\u003euse old \u003ca href=\"/posts/kbhrosetta/\"\u003eRosetta\u003c/a\u003e to jam a residue sequence into the scaffold\u003c/li\u003e\n\u003cli\u003erefold\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"application-rosettafold2--kbhrosettafold2-dot-md--in-painting\"\u003eapplication: \u003ca href=\"/posts/kbhrosettafold2/\"\u003eRoseTTAFold2\u003c/a\u003e in-painting\u003c/h2\u003e\n\u003cp\u003eTrain the model to recover the missing bits of sequence from the overall structure (i.e. training backwards), and\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrosettafold2/","tags":null,"title":"RoseTTAFold2"},{"categories":null,"contents":"On the dynamics of Tuning Forks. (Rossing, Russell, and Brown 1992)\nCharacterizing Tuning Forks Aluminum, tines 10mm apart. Four main groups of vibration:\nSymmetrical In-Plane Antisymmetrical In-Plane Symmetrical Out-Of-Plane Antisymmetrical Out-Of-Plane (a) and (c) are in the first group; (b) is in the second group, where the fork just warps.\nDeriving Tuning Forks\u0026rsquo; Frequency As per before, we can treat tuning forks acting in clang and fundamental modes as a good\u0026rsquo;ol fashioned cantilever beam.\nThe frequency action of a cantilever beam is defined as follows:\nOtherwise, for asymmetric modes, we can use the same exact expression but with uniform rods unfixed at either end:\nNote that density is not uniform at this point (because the bottom handle-y bit.)\n","html":"\u003cp\u003eOn the dynamics of \u003ca href=\"/posts/kbhtuning_forks/\"\u003eTuning Fork\u003c/a\u003es. (\u003ca href=\"#citeproc_bib_item_1\"\u003eRossing, Russell, and Brown 1992\u003c/a\u003e)\u003c/p\u003e\n\u003ch2 id=\"characterizing-tuning-forks\"\u003eCharacterizing Tuning Forks\u003c/h2\u003e\n\u003cp\u003eAluminum, tines 10mm apart. Four main groups of vibration:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eSymmetrical In-Plane\u003c/li\u003e\n\u003cli\u003eAntisymmetrical In-Plane\u003c/li\u003e\n\u003cli\u003eSymmetrical Out-Of-Plane\u003c/li\u003e\n\u003cli\u003eAntisymmetrical Out-Of-Plane\u003c/li\u003e\n\u003c/ol\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-20_22-49-48_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e(a) and (c) are in the first group; (b) is in the second group, where the fork just warps.\u003c/p\u003e\n\u003ch2 id=\"deriving-tuning-forks-frequency\"\u003eDeriving Tuning Forks\u0026rsquo; Frequency\u003c/h2\u003e\n\u003cp\u003eAs per before, we can treat tuning forks acting in clang and fundamental modes as a good\u0026rsquo;ol fashioned \u003ca href=\"/posts/kbhcantilever_beam/\"\u003ecantilever beam\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThe frequency action of a \u003ca href=\"/posts/kbhcantilever_beam/\"\u003ecantilever beam\u003c/a\u003e is defined as follows:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-20_23-01-55_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eOtherwise, for asymmetric modes, we can use the same exact expression but with uniform rods unfixed at either end:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-20_23-11-43_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eNote that density is not uniform at this point (because the bottom handle-y bit.)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrossing_1990/","tags":null,"title":"Rossing 1990"},{"categories":null,"contents":"total kinetic energy \\begin{equation} KE_{rigid} = \\frac{1}{2} M{V_{cm}}^2 + \\frac{1}{2} I_{CM}{\\omega_{CM}}^2 \\end{equation}\ntorque from gravity For even non rigid bodies, the following follows:\n\\begin{equation} \\vec{\\tau}_g = \\vec{R}_{CM} \\times M\\vec{g} \\end{equation}\nActually, this follows for any \\(f\\) (like \\(g\\)) evenly applied across point masses.\npotential energy \\begin{equation} \\Delta PE_g = mg\\Delta h \\end{equation}\nwhere, \\(\\Delta h\\) is the travel of center of mass. Regardless of whether or not its point.\n","html":"\u003ch2 id=\"total-kinetic-energy\"\u003etotal kinetic energy\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nKE_{rigid} = \\frac{1}{2} M{V_{cm}}^2 + \\frac{1}{2} I_{CM}{\\omega_{CM}}^2\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"torque-from-gravity\"\u003etorque from gravity\u003c/h2\u003e\n\u003cp\u003eFor even non rigid bodies, the following follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{\\tau}_g = \\vec{R}_{CM} \\times M\\vec{g}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eActually, this follows for any \\(f\\) (like \\(g\\)) evenly applied across point masses.\u003c/p\u003e\n\u003ch2 id=\"potential-energy\"\u003epotential energy\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\Delta PE_g = mg\\Delta h\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\Delta h\\) is the \u003cem\u003etravel of center of mass\u003c/em\u003e. Regardless of whether or not its point.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrotational_energy/","tags":null,"title":"rotational energy theorem"},{"categories":null,"contents":"Rural Electrification Administration create electrification throughout cities. Most of American infrastructure still 1930s.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrural_electrification_administration/\"\u003eRural Electrification Administration\u003c/a\u003e create electrification throughout cities. Most of American infrastructure still 1930s.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrural_electrification_administration/","tags":null,"title":"Rural Electrification Administration"},{"categories":null,"contents":"Observations from studying the comedian Russel Howard.\nStretching analogies Using language/motion/figure do describe something on the opposite end of the spectrum Take, for instance, age: 5Y/O: \u0026ldquo;cheers mum, wasen\u0026rsquo;t on my to-do list\u0026rdquo; A surprisingly sentimental dog: \u0026ldquo;because when I wake up tomorrow I want to see you, and I want to go for a lovely walk\u0026rdquo; Large motions + deadpan after Endless extrapolations of a normal setup: setup: Russian hackers were controlling people\u0026rsquo;s toys; punchline: \u0026ldquo;5 men were dildo\u0026rsquo;d to death, we don\u0026rsquo;t have a recording but here are their final words \u0026mdash; \u0026lsquo;oh yeaaah\u0026rsquo;, \u0026lsquo;oh fuck yeaaah\u0026rsquo;\u0026rdquo; Setup: gweneth paltro Punchline: \u0026ldquo;put an egg up there, you will feel more femenine. no! you will feel like a chicken\u0026rdquo;\nMultiple use of setups: \u0026ldquo;happy birthday too you\u0026rdquo; Peach ","html":"\u003cp\u003eObservations from studying the comedian \u003ca href=\"/posts/kbhrussel_howard/\"\u003eRussel Howard\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eStretching analogies\u003c/li\u003e\n\u003cli\u003eUsing language/motion/figure do describe something on the opposite end of the spectrum\n\u003cul\u003e\n\u003cli\u003eTake, for instance, age: 5Y/O: \u0026ldquo;cheers mum, wasen\u0026rsquo;t on my to-do list\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eA surprisingly sentimental dog: \u0026ldquo;because when I wake up tomorrow I want to see you, and I want to go for a lovely walk\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eLarge motions + deadpan after\u003c/li\u003e\n\u003cli\u003eEndless extrapolations of a normal setup: setup: Russian hackers were controlling people\u0026rsquo;s toys; punchline: \u0026ldquo;5 men were dildo\u0026rsquo;d to death, we don\u0026rsquo;t have a recording but here are their final words \u0026mdash; \u0026lsquo;oh yeaaah\u0026rsquo;, \u0026lsquo;oh fuck yeaaah\u0026rsquo;\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSetup: gweneth paltro\nPunchline: \u0026ldquo;put an egg up there, you will feel more femenine. no! you will feel like a chicken\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eMultiple use of setups: \u0026ldquo;happy birthday too you\u0026rdquo;\u003c/li\u003e\n\u003cli\u003ePeach\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrussel_howard/","tags":null,"title":"Russel Howard"},{"categories":null,"contents":"DOI: 10.3389/fcomp.2021.624594\n(Sadeghian, Schaffer, and Zahorian 2021)\nOne-Liner Using a genetic algorithm, picked features to optimize fore; achieved \\(94\\%\\) with just MMSE data alone (ok like duh me too). Developed ASR tool to aid.\nNovelty Developed an ASR methodology for speech, complete with punctuations Used a genetic algorithm to do feature selection; NNs performed worse because \u0026ldquo;space is smaller???\u0026rdquo; Notable Methods Used a GRU to insert punctuations The paper leveraged the nuke that is a bidirectional GRU, ATTENTION,\nKey Figs Fully automated ANN transcript does pretty well in terms of classifier AD/NL.\nNew Concepts fusion genetic algorithm MMSE Notes very confusing (too many things going on at once)\n","html":"\u003cp\u003eDOI: 10.3389/fcomp.2021.624594\u003c/p\u003e\n\u003cp\u003e(\u003ca href=\"#citeproc_bib_item_1\"\u003eSadeghian, Schaffer, and Zahorian 2021\u003c/a\u003e)\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eUsing a \u003ca href=\"/posts/kbhgenetic_algorithum/\"\u003egenetic algorithm\u003c/a\u003e, picked features to optimize fore; achieved \\(94\\%\\) with just \u003ca href=\"/posts/kbhmmse/\"\u003eMMSE\u003c/a\u003e data alone (ok like duh me too). Developed \u003ca href=\"/posts/kbhasr/\"\u003eASR\u003c/a\u003e tool to aid.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDeveloped an \u003ca href=\"/posts/kbhasr/\"\u003eASR\u003c/a\u003e methodology for speech, complete with punctuations\u003c/li\u003e\n\u003cli\u003eUsed a \u003ca href=\"/posts/kbhgenetic_algorithum/\"\u003egenetic algorithm\u003c/a\u003e to do feature selection; NNs performed worse because \u0026ldquo;space is smaller???\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003ch3 id=\"used-a-gru-to-insert-punctuations\"\u003eUsed a GRU to insert punctuations\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-44-59_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe paper leveraged the nuke that is a bidirectional GRU, ATTENTION,\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_00-00-56_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eFully automated ANN transcript does pretty well in terms of classifier AD/NL.\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfusion/\"\u003efusion\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgenetic_algorithum/\"\u003egenetic algorithm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmmse/\"\u003eMMSE\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n\u003cp\u003every confusing (too many things going on at once)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsadeghian_2021/","tags":["ntj"],"title":"Sadeghian 2021"},{"categories":null,"contents":"Challenge of speech anonymization: cannot develop a model which both preserves speech features well but also effectively anonymizes the speech.\nMethodology Separate content and speech encoders Results in highly concentrated + effective speech content, but with very widespread voiceprint ","html":"\u003cp\u003eChallenge of speech anonymization: cannot develop a model which both preserves speech features well but also effectively anonymizes the speech.\u003c/p\u003e\n\u003ch2 id=\"methodology\"\u003eMethodology\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eSeparate content and speech encoders\u003c/li\u003e\n\u003cli\u003eResults in highly concentrated + effective speech content, but with very widespread voiceprint\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsaic_speech_anonomyzation/","tags":null,"title":"SAIC: Speech Anonomyzation"},{"categories":null,"contents":"Demo day No value add for demo-day winner Competition makes you want to prepare more \u0026ldquo;this much budget for an enriching experience\u0026rdquo; Mentor Conversations None yet\nIntegration Integration into soundscape Hiring Need help designing a PCB\n","html":"\u003ch2 id=\"demo-day\"\u003eDemo day\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eNo value add for demo-day winner\u003c/li\u003e\n\u003cli\u003eCompetition makes you want to prepare more\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;this much budget for an enriching experience\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"mentor-conversations\"\u003eMentor Conversations\u003c/h2\u003e\n\u003cp\u003eNone yet\u003c/p\u003e\n\u003ch2 id=\"integration\"\u003eIntegration\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eIntegration into soundscape\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"hiring\"\u003eHiring\u003c/h2\u003e\n\u003cp\u003eNeed help designing a PCB\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsalus_april_checkin/","tags":null,"title":"Salus April Checkin"},{"categories":null,"contents":"sample space \\(S\\) is the set of all possible outcomes of an experiment. It could be continuous or distinct.\nequally likely outcomes Some sample spaces have equally likely outcomes:\ncoin flip flipping two coins rolling a fair die If we have equally likely outcomes, \\(P(outcome)\\) = \\(\\frac{1}{S}\\).\nIf your sample space has equally likely outcomes, the probability is juts counting:\n\\begin{equation} P(E) = \\frac{count(E)}{count(S)} \\end{equation}\nWhenever you use this tool, you have to think about whether or not your outcomes are equally likely. For instance, the \u0026ldquo;sum of two dice rolling\u0026rdquo; is NOT equally likely.\nDistinct counting makes things equally likely.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsample_space/\"\u003esample space\u003c/a\u003e \\(S\\) is the set of all possible outcomes of an experiment. It could be continuous or distinct.\u003c/p\u003e\n\u003ch2 id=\"equally-likely-outcomes\"\u003eequally likely outcomes\u003c/h2\u003e\n\u003cp\u003eSome \u003ca href=\"/posts/kbhsample_space/\"\u003esample space\u003c/a\u003es have equally likely outcomes:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ecoin flip\u003c/li\u003e\n\u003cli\u003eflipping two coins\u003c/li\u003e\n\u003cli\u003erolling a fair die\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf we have \u003ca href=\"#equally-likely-outcomes\"\u003eequally likely outcomes\u003c/a\u003e, \\(P(outcome)\\) = \\(\\frac{1}{S}\\).\u003c/p\u003e\n\u003cp\u003eIf your sample space has \u003ca href=\"#equally-likely-outcomes\"\u003eequally likely outcomes\u003c/a\u003e, the probability is juts counting:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(E) = \\frac{count(E)}{count(S)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhenever you use this tool, you have to think about whether or not your outcomes are \u003cstrong\u003eequally likely\u003c/strong\u003e. For instance, the \u0026ldquo;sum of two dice rolling\u0026rdquo; is \u003cstrong\u003eNOT\u003c/strong\u003e equally likely.\u003c/p\u003e\n\u003cp\u003eDistinct \u003ca href=\"/posts/kbhcounting/\"\u003ecounting\u003c/a\u003e makes things equally likely.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsample_space/","tags":null,"title":"sample space"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsars_cov2/","tags":null,"title":"SARS-COV2"},{"categories":null,"contents":"SARS-COV2\ntraditional stain techniques to analyze the epitopes being targeted uses cyro-EM structural analysis to figure structural points of neutralization predict correct antibodies binding to force certain structures to neutralize covid-19 analyze mRNA-vax elicited antibodies to see similarity between those that are useful predicted in 3) Study identified three epitopes: C1520, C1791, C1717, which changes the structure/activity of all three variants of concern as identified using methods above, and are inpervious to the mutation to the main supersite.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsars_cov2/\"\u003eSARS-COV2\u003c/a\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003etraditional stain techniques to analyze the \u003ca href=\"/posts/kbhepitophs/\"\u003eepitopes\u003c/a\u003e being targeted\u003c/li\u003e\n\u003cli\u003euses \u003ca href=\"/posts/kbhcyro_em/\"\u003ecyro-EM\u003c/a\u003e structural analysis to figure structural points of neutralization\u003c/li\u003e\n\u003cli\u003epredict correct antibodies binding to force certain structures to neutralize covid-19\u003c/li\u003e\n\u003cli\u003eanalyze mRNA-vax elicited antibodies to see similarity between those that are useful predicted in 3)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eStudy identified three \u003ca href=\"/posts/kbhepitophs/\"\u003eepitopes\u003c/a\u003e: C1520, C1791, C1717, which changes the structure/activity of all three variants of concern as identified using methods above, and are inpervious to the mutation to the main \u003ca href=\"/posts/kbhspersite/\"\u003esupersite\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsars_cov2_structural_analysis/","tags":null,"title":"SARS-COV2 Structural Analysis"},{"categories":null,"contents":"Sarsa (Lambda) is SARSA with Eligibility Traces (\\(\\lambda\\)).\nPrevious approaches to deal with Partially Observable Markov Decision Process:\nmemory-based state estimation (beliefs) special planning methods Key question: Can we use MDP reinforcement learning to deal with POMDPs?\nBackground Recall MDP SARSA:\n\\begin{equation} Q(s,a) \\leftarrow Q(s,a) + \\alpha \\qty [(r + \\gamma Q(s\u0026rsquo;, a\u0026rsquo;)) - Q(s,a)] \\end{equation}\nRecall that, sparse rewards with SARSA can take a long time to learn because it takes time to backpropgate.\nHence, we use Eligibility Traces, which keeps track of what\u0026rsquo;s \u0026ldquo;eligible\u0026rdquo; for updates:\nlet \\(\\lambda\\) be some decay parameter, we have:\n\\begin{equation} \\delta = r + \\gamma Q(s\u0026rsquo;,a\u0026rsquo;) - Q(s,a) \\end{equation}\nand, we can write:\n\\begin{equation} Q(s,a) \\leftarrow Q(s,a) + \\lambda \\delta N(s,a) \\end{equation}\nwhere by the visit counts are discounted such that:\n\\begin{equation} N(s,a) \\leftarrow \\gamma \\lambda N(s,a) \\end{equation}\nWorry Inability of fully observing the state seems to invalidate the point of \\(Q\\) learning, SARSA, etc.\nApplying Eligibility Traces to POMDPs Instead of \\(N(s,a)\\) a visitation count, we initialize some \\(\\eta(x,a)\\) for observation + action and work on the rest of it in the same way.\nAt each step, we sample some reward \\(r_{t+1}\\) and observation \\(x_{t+1}\\) (and remember our current \\(r_{t}, x_{t}\\)). Then in which case:\n\\begin{equation} \\eta(x_{t},a_{t}) = 1\\ \\text{(reset decay)} \\end{equation}\nand\n\\begin{equation} \\forall (x\\neq x_{t}, a \\neq a_{t}): \\eta(x,a) = \\gamma \\lambda \\eta_{t-1}(x,a)\\ \\text{(decay others)} \\\\ \\end{equation}\nUsing the new \\(\\eta\\) values, we update \\(Q(x, a)\\) in the usualish manner:\n\\begin{equation} \\delta_{t} := r_{t} + \\gamma Q (x_{t+1}, a_{t+1}) - Q(x_{t}, a_{t}) \\end{equation}\n\\begin{equation} \\forall (x,a): Q(x,a) = Q(x,a)+a \\delta_{t} \\eta(x,a) \\end{equation}\nall values of \\(\\eta\\) are established to \\(0\\) per episode.\nNotably, we formulate \\(x\\) as a tuple of TWO observations in the past\u0026mdash;meaning we have a single step of memory in the past and optimise over those.\nThis requires no belief propagation!! And note how the \u0026ldquo;eligibility\u0026rdquo; of each observation decays over time, such that the influence of that observation decays until we see the corresponding observation again.\naliasing an important failure of this is aliasing\u0026ndash;where you maybe in one of two different places that has similar properties observationally, but taking actions at those states results in very different places.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsarsa_lambda/\"\u003eSarsa (Lambda)\u003c/a\u003e is \u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/#sarsa\"\u003eSARSA\u003c/a\u003e with \u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/#eligibility-traces\"\u003eEligibility Traces\u003c/a\u003e (\\(\\lambda\\)).\u003c/p\u003e\n\u003cp\u003ePrevious approaches to deal with \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePartially Observable Markov Decision Process\u003c/a\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ememory-based state estimation (\u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003es)\u003c/li\u003e\n\u003cli\u003especial planning methods\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eKey question: \u003cstrong\u003eCan we use MDP \u003ca href=\"/posts/kbhreinforcement_learning/\"\u003ereinforcement learning\u003c/a\u003e to deal with \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es?\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003cp\u003eRecall MDP \u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/#sarsa\"\u003eSARSA\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(s,a) \\leftarrow Q(s,a) + \\alpha \\qty [(r + \\gamma Q(s\u0026rsquo;, a\u0026rsquo;)) - Q(s,a)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that, sparse rewards with \u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/#sarsa\"\u003eSARSA\u003c/a\u003e can take a long time to learn because it takes time to backpropgate.\u003c/p\u003e\n\u003cp\u003eHence, we use \u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/#eligibility-traces\"\u003eEligibility Traces\u003c/a\u003e, which keeps track of what\u0026rsquo;s \u0026ldquo;eligible\u0026rdquo; for updates:\u003c/p\u003e\n\u003cp\u003elet \\(\\lambda\\) be some decay parameter, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\delta = r + \\gamma Q(s\u0026rsquo;,a\u0026rsquo;) - Q(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand, we can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(s,a) \\leftarrow Q(s,a) + \\lambda \\delta N(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere by the visit counts are discounted such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nN(s,a) \\leftarrow \\gamma \\lambda N(s,a)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"worry\"\u003eWorry\u003c/h2\u003e\n\u003cp\u003eInability of fully observing the state seems to invalidate the point of \\(Q\\) learning, \u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/#sarsa\"\u003eSARSA\u003c/a\u003e, etc.\u003c/p\u003e\n\u003ch2 id=\"applying-eligibility-traces--kbhmodel-free-reinforcement-learning-dot-md--to-pomdp--kbhpartially-observable-markov-decision-process-dot-md--s\"\u003eApplying \u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/#eligibility-traces\"\u003eEligibility Traces\u003c/a\u003e to \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es\u003c/h2\u003e\n\u003cp\u003eInstead of \\(N(s,a)\\) a visitation count, we initialize some \\(\\eta(x,a)\\) for observation + action and work on the rest of it in the same way.\u003c/p\u003e\n\u003cp\u003eAt each step, we sample some reward \\(r_{t+1}\\) and observation \\(x_{t+1}\\) (and remember our current \\(r_{t}, x_{t}\\)). Then in which case:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\eta(x_{t},a_{t}) = 1\\ \\text{(reset decay)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\forall (x\\neq x_{t}, a \\neq a_{t}): \\eta(x,a) = \\gamma \\lambda \\eta_{t-1}(x,a)\\ \\text{(decay others)} \\\\\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eUsing the new \\(\\eta\\) values, we update \\(Q(x, a)\\) in the usualish manner:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\delta_{t} := r_{t} + \\gamma Q (x_{t+1}, a_{t+1}) - Q(x_{t}, a_{t})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\forall (x,a): Q(x,a) = Q(x,a)+a \\delta_{t} \\eta(x,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eall values of \\(\\eta\\) are established to \\(0\\) per episode.\u003c/p\u003e\n\u003cp\u003eNotably, we formulate \\(x\\) as a tuple of \u003cstrong\u003e\u003cstrong\u003eTWO\u003c/strong\u003e\u003c/strong\u003e observations in the past\u0026mdash;meaning we have a single step of memory in the past and optimise over those.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eThis requires no belief propagation\u003c/strong\u003e!! And note how the \u0026ldquo;eligibility\u0026rdquo; of each observation decays over time, such that the influence of that observation decays until we see the corresponding observation again.\u003c/p\u003e\n\u003ch2 id=\"aliasing\"\u003ealiasing\u003c/h2\u003e\n\u003cp\u003ean important failure of this is \u003ca href=\"#aliasing\"\u003ealiasing\u003c/a\u003e\u0026ndash;where you maybe in one of two different places that has similar properties observationally, but taking actions at those states results in very different places.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsarsa_lambda/","tags":null,"title":"Sarsa (Lambda)"},{"categories":null,"contents":"Big problem: curse of dimensionality and the curse of history.\nPBVI and HSVI tries to sample the belief simplex generally. But instead we should try to sample OPTIMAL REACHABLE SET.\nBackground Recall one-step lookahead in POMDP. The difficulty here is that the sum over all of the alpha-vectors is still very hard. So, in PBVI, we only do this to a small set of beliefs\nSARSOP sample \\(R^{*}\\) backup prune Initialization choose an initial belief, action, and observation using \u0026ldquo;suitable heuristics\u0026rdquo;. Initialize a set of alpha vectors corresponding to this belief.\nSampling compute \\(b\u0026rsquo; = update(b,a,o)\\) add node \\(b\u0026rsquo;\\) to the tree So far, this is just PBVI, HSVI. The point is that we only want to update the reachable set.\nTo do this, we now take the new \\(b\u0026rsquo;\\), we give an upper bound via FIB, and a lower bound with blind lower bound over the alpha vectors you already got.\nNow:\nwhere \\(\\mathcal{R}^{*}\\) is a reachable space tree set from \\(b_0\\).\nBackup PBVI Backup on the beliefs you sampled to update your alpha vectors.\nPruning We can prune anything that\u0026rsquo;s suboptimal: every step, we perform alpha vector pruning at every step.\nLimitations HSVI is better at handling systems with lower uncertainty.\nDoes not make an attempt at challenges of dimensionality Make unproven theoretical claims Don\u0026rsquo;t compare to domain contraction Compare algorithm to a single alternative Compared to continuous state spaces Subsection headings ","html":"\u003cp\u003eBig problem: curse of \u003cstrong\u003edimensionality\u003c/strong\u003e and the curse of \u003cstrong\u003ehistory\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e and \u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e tries to sample the belief simplex generally. But instead we should try to sample \u003cstrong\u003eOPTIMAL REACHABLE SET\u003c/strong\u003e.\u003c/p\u003e\n\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003cp\u003eRecall \u003ca href=\"/posts/kbhalpha_vector/#one-step-lookahead-in-pomdp\"\u003eone-step lookahead in POMDP\u003c/a\u003e. The difficulty here is that the sum over all of the alpha-vectors is still very hard. So, in \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e, we only do this to a small set of beliefs\u003c/p\u003e\n\u003ch2 id=\"sarsop\"\u003eSARSOP\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003esample \\(R^{*}\\)\u003c/li\u003e\n\u003cli\u003ebackup\u003c/li\u003e\n\u003cli\u003eprune\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"initialization\"\u003eInitialization\u003c/h3\u003e\n\u003cp\u003echoose an initial belief, action, and observation using \u0026ldquo;suitable heuristics\u0026rdquo;. Initialize a set of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es corresponding to this belief.\u003c/p\u003e\n\u003ch3 id=\"sampling\"\u003eSampling\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ecompute \\(b\u0026rsquo; = update(b,a,o)\\)\u003c/li\u003e\n\u003cli\u003eadd node \\(b\u0026rsquo;\\) to the tree\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSo far, this is just \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e, \u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e. The point is that we only want to update the reachable set.\u003c/p\u003e\n\u003cp\u003eTo do this, we now take the new \\(b\u0026rsquo;\\), we give an upper bound via \u003ca href=\"/posts/kbhfast_informed_bound/\"\u003eFIB\u003c/a\u003e, and a lower bound with \u003ca href=\"/posts/kbhblind_lower_bound/\"\u003eblind lower bound\u003c/a\u003e over the alpha vectors you already got.\u003c/p\u003e\n\u003cp\u003eNow:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-27_22-11-41_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ewhere \\(\\mathcal{R}^{*}\\) is a reachable space tree set from \\(b_0\\).\u003c/p\u003e\n\u003ch3 id=\"backup\"\u003eBackup\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhpoint_based_value_iteration/#pbvi-backup\"\u003ePBVI Backup\u003c/a\u003e on the beliefs you sampled to update your \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es.\u003c/p\u003e\n\u003ch3 id=\"pruning\"\u003ePruning\u003c/h3\u003e\n\u003cp\u003eWe can prune anything that\u0026rsquo;s suboptimal: every step, we perform \u003ca href=\"/posts/kbhalpha_vector/#id-a11af4cf-7e36-4b3f-876f-e6a26cf6817e-alpha-vector-pruning\"\u003ealpha vector pruning\u003c/a\u003e at every step.\u003c/p\u003e\n\u003ch2 id=\"limitations\"\u003eLimitations\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e is better at handling systems with lower uncertainty.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eDoes not make an attempt at challenges of dimensionality\u003c/li\u003e\n\u003cli\u003eMake unproven theoretical claims\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t compare to domain contraction\u003c/li\u003e\n\u003cli\u003eCompare algorithm to a single alternative\u003c/li\u003e\n\u003cli\u003eCompared to continuous state spaces\u003c/li\u003e\n\u003cli\u003eSubsection headings\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsarsop/","tags":null,"title":"SARSOP"},{"categories":null,"contents":" adding emails need sequential typing hitting enter should move on to the next page date selection \u0026ldquo;Monday next week\u0026rdquo; doesn\u0026rsquo;t NLP reading calendar output isn\u0026rsquo;t sorted bugs reading other people\u0026rsquo;s calendars isn\u0026rsquo;t working\nneed some information about that the heck is actually happening on the scheduling\nshow other people\u0026rsquo;s overall availibliity in the scheduling page\nthe weight number doesn\u0026rsquo;t make sense\u0026mdash;correct alt-text and make the number work\nhave the idea of a \u0026ldquo;meeting owner\u0026rdquo;, and we only reach out to them to confirm final date + have an ability to add a message; also allow the message owner to change to alternate schedules on that date\nalso scheduling multiple people is broken. ah. ","html":"\u003cul\u003e\n\u003cli\u003eadding emails need sequential typing\u003c/li\u003e\n\u003cli\u003ehitting enter should move on to the next page\u003c/li\u003e\n\u003cli\u003edate selection\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Monday next week\u0026rdquo; doesn\u0026rsquo;t NLP\u003c/li\u003e\n\u003cli\u003ereading calendar output isn\u0026rsquo;t sorted\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"bugs\"\u003ebugs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ereading other people\u0026rsquo;s calendars isn\u0026rsquo;t working\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eneed some information about that the heck is actually happening on the scheduling\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eshow other people\u0026rsquo;s overall availibliity in the scheduling page\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ethe weight number doesn\u0026rsquo;t make sense\u0026mdash;correct alt-text and make the number work\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ehave the idea of a \u0026ldquo;meeting owner\u0026rdquo;, and we only reach out to them to confirm final date + have an ability to add a message; also allow the message owner to change to alternate schedules on that date\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"also\"\u003ealso\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003escheduling multiple people is broken. ah.\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhscalander_notes-1/","tags":null,"title":"scalander notes"},{"categories":null,"contents":" adding emails need sequential typing hitting enter should move on to the next page date selection \u0026ldquo;Monday next week\u0026rdquo; doesn\u0026rsquo;t NLP reading calendar output isn\u0026rsquo;t sorted bugs reading other people\u0026rsquo;s calendars isn\u0026rsquo;t working need some information about that the heck is actually happening on the scheduling show other people\u0026rsquo;s overall availibliity in the scheduling page idea of \u0026ldquo;budget\u0026rdquo; next actions scheduling multiple people is broken. ah. have the idea of a \u0026ldquo;meeting owner\u0026rdquo;, and we only reach out to them to confirm final date + have an ability to add a message (with templates); also allow the message owner to change to alternate schedules on that date the weight number doesn\u0026rsquo;t make sense\u0026mdash;correct alt-text and make the number work ","html":"\u003cul\u003e\n\u003cli\u003eadding emails need sequential typing\u003c/li\u003e\n\u003cli\u003ehitting enter should move on to the next page\u003c/li\u003e\n\u003cli\u003edate selection\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Monday next week\u0026rdquo; doesn\u0026rsquo;t NLP\u003c/li\u003e\n\u003cli\u003ereading calendar output isn\u0026rsquo;t sorted\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"bugs\"\u003ebugs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ereading other people\u0026rsquo;s calendars isn\u0026rsquo;t working\u003c/li\u003e\n\u003cli\u003eneed some information about that the heck is actually happening on the scheduling\u003c/li\u003e\n\u003cli\u003eshow other people\u0026rsquo;s overall availibliity in the scheduling page\u003c/li\u003e\n\u003cli\u003eidea of \u0026ldquo;budget\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"next-actions\"\u003enext actions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003escheduling multiple people is broken. ah.\u003c/li\u003e\n\u003cli\u003ehave the idea of a \u0026ldquo;meeting owner\u0026rdquo;, and we only reach out to them to confirm final date + have an ability to add a message (with templates); also allow the message owner to change to alternate schedules on that date\u003c/li\u003e\n\u003cli\u003ethe weight number doesn\u0026rsquo;t make sense\u0026mdash;correct alt-text and make the number work\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhscalander_notes-2/","tags":null,"title":"scalander notes"},{"categories":null,"contents":" adding emails need sequential typing hitting enter should move on to the next page date selection \u0026ldquo;Monday next week\u0026rdquo; doesn\u0026rsquo;t NLP reading calendar output isn\u0026rsquo;t sorted bugs reading other people\u0026rsquo;s calendars isn\u0026rsquo;t working need some information about that the heck is actually happening on the scheduling show other people\u0026rsquo;s overall availibliity in the scheduling page idea of \u0026ldquo;budget\u0026rdquo; next actions scheduling multiple people is broken. ah. have the idea of a \u0026ldquo;meeting owner\u0026rdquo;, and we only reach out to them to confirm final date + have an ability to add a message (with templates); also allow the message owner to change to alternate schedules on that date (have default notifications in the iCal invite) somehow remind people after the fact that the meeting is scheduled tie in evite abilities (this will be nice for your party, etc.) event planning built in? type in a budget and find vendors for the party. age range? the weight number doesn\u0026rsquo;t make sense\u0026mdash;correct alt-text and make the number work home page \u0026ldquo;the front page looks like that for an OB-GYN\u0026rdquo; \u0026mdash; feels like ZocDoc it is also not that fun maybe some kind of memphis design ","html":"\u003cul\u003e\n\u003cli\u003eadding emails need sequential typing\u003c/li\u003e\n\u003cli\u003ehitting enter should move on to the next page\u003c/li\u003e\n\u003cli\u003edate selection\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Monday next week\u0026rdquo; doesn\u0026rsquo;t NLP\u003c/li\u003e\n\u003cli\u003ereading calendar output isn\u0026rsquo;t sorted\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"bugs\"\u003ebugs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ereading other people\u0026rsquo;s calendars isn\u0026rsquo;t working\u003c/li\u003e\n\u003cli\u003eneed some information about that the heck is actually happening on the scheduling\u003c/li\u003e\n\u003cli\u003eshow other people\u0026rsquo;s overall availibliity in the scheduling page\u003c/li\u003e\n\u003cli\u003eidea of \u0026ldquo;budget\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"next-actions\"\u003enext actions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003escheduling multiple people is broken. ah.\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003ehave the idea of a \u0026ldquo;meeting owner\u0026rdquo;, and we only reach out to them to confirm final date + have an ability to add a message (with templates); also allow the message owner to change to alternate schedules on that date (have default notifications in the iCal invite)\u003c/strong\u003e\u003c/strong\u003e\n\u003cul\u003e\n\u003cli\u003esomehow remind people after the fact that the meeting is scheduled\u003c/li\u003e\n\u003cli\u003etie in evite abilities (this will be nice for your party, etc.)\n\u003cul\u003e\n\u003cli\u003eevent planning built in? type in a budget and find vendors for the party.\u003c/li\u003e\n\u003cli\u003eage range?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003ethe weight number doesn\u0026rsquo;t make sense\u0026mdash;correct alt-text and make the number work\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"home-page\"\u003ehome page\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;the front page looks like that for an OB-GYN\u0026rdquo; \u0026mdash; feels like ZocDoc\u003c/li\u003e\n\u003cli\u003eit is also not that fun\u003c/li\u003e\n\u003cli\u003emaybe some kind of memphis design\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhscalander_notes/","tags":null,"title":"scalander notes"},{"categories":null,"contents":"Scalar multiplication is the process of multiplying a scalar to an element in a set.\nconstituents A set \\(V\\) Some \\(\\lambda \\in \\mathbb{F}\\) Each \\(v \\in V\\) requirements scalar multiplication is defined by a function that results in \\(\\lambda v \\in V\\) (maps back to the space!) to each \\(\\lambda \\in \\mathbb{F}\\) and each \\(v \\in V\\).\nadditional information See also scalar multiplication in \\(\\mathbb{F}^n\\).\n","html":"\u003cp\u003eScalar multiplication is the process of multiplying a scalar to an element in a set.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA set \\(V\\)\u003c/li\u003e\n\u003cli\u003eSome \\(\\lambda \\in \\mathbb{F}\\)\u003c/li\u003e\n\u003cli\u003eEach \\(v \\in V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e is defined by a function that results in \\(\\lambda v \\in V\\) (maps back to the space!) to each \\(\\lambda \\in \\mathbb{F}\\) and each \\(v \\in V\\).\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhlists_over_fields/#scalar-multiplication-in-mathbb-f-n\"\u003escalar multiplication in \\(\\mathbb{F}^n\\)\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhscalar_multiplication/","tags":null,"title":"scalar multiplication"},{"categories":null,"contents":"scheduling is the tool to figure out which thread can run. Because threads exist in different thread states:\nrunning blockde - waiting for an event like disk, network, etc. ready - able to run, but not on CPU yet a scheduler needs to do the task of ordering ready threads to run, moving running threads to ready when it has ran for enough time. Possible pathways:\nready =\u0026gt; running blocked =\u0026gt; running blocked =\u0026gt; ready =\u0026gt; running You can\u0026rsquo;t go from ready to blocked because you have to do something to know you are blocked.\nscheduling \u0026ldquo;ready\u0026rdquo; threads The following assumes one core.\nTradeoffs:\nminimize time to a useful result\u0026mdash;(assumption: a \u0026ldquo;useful result\u0026rdquo; = a thread blocking or completes) using resources efficiently (keeping cores/disks busy) fairness (multiple users / many jobs for one users) Typically, we focus on (1); approaches that maximize useful results quickly is unfair beacuse you are prioritizing. We can measure this based on \u0026ldquo;average completion time\u0026rdquo;: tracking the average time elapsed for a particular queue based on the start of scheduling that queue to the time when each thread ends.\nfirst-come first-serve keep all threads in ready in a queue run the first thread on the front until it finishes/it blocks for however long repeat Problem: a thread can run away with the entire system, accidentally, through infinite loops\nround robin keep all threads in a round robin each thread can run for a set amount of time called a time slice (10ms or so) if a thread terminates before that time, great; if a thread does not, we swap it off and put it to the end of the round robin Problem: what\u0026rsquo;s a good time slice?\ntoo small: the overhead of context switching is higher than the overhead of running the program too large: threads can monopolize cores, can\u0026rsquo;t handle user input, etc. Linux uses 4ms. Generally, you want 5-10ms range.\nYou can think about this as dividing each time slot by time slices, and add as fcfs\nshortest remaining processing time Run first the thread in queue that will finish the most quickly and run it fully to competition.\nIt gives preference to those that need it the least: a good side effect is that it gives preference to I/O Bound Thread first, so we can wait on them during disk operations while CPU Threads run after the I/O Bound Thread has ran.\nTHIS IS not implementable\u0026mdash;-we can\u0026rsquo;t build this beacuse we have to know which thread will finish the most quickly, which we can\u0026rsquo;t because you have to solve the halting problem to know.\nOur goal, then is to get as close as possible to the performance of SRPT.\nProblem:\nwe don\u0026rsquo;t know which one will finish the most quickly if we have many threads and one long-running thread, the long running thread won\u0026rsquo;t be able to run ever priority based scheduling Key idea: behavior tends to be consistent in a thread. We build multiple priority queues to address this.\npriority based scheduling is an approximation of SRPT, using the past performance of the thread to estimate the running time of the thread. Over time, threads will move between priority queues, and we run the topmost thread from the highest priority queue\nthreads that aren\u0026rsquo;t using much CPU stay in higher priority queue threads that are using much CPU gets bumped down to lower priority queues Similar to SRPT, this also has the good property of giving preference to those that need it the least: a good side effect is that it gives preference to I/O Bound Thread first, so we can wait on them during disk operations while CPU Threads run after the I/O Bound Thread has ran.\nimplement based on time slice usage a thread always enters in the highest priority queue\nif the thread uses all of its time slice and didn\u0026rsquo;t exit, bump them down a priority queue if a thread blocked before it used all of its time slice, bump them up a priority queue implement based on aggregate time used: fixing neglect a thread has a number for \u0026ldquo;how much time did you use on the CPU recently\u0026rdquo;? The priories are sorted by that value, and the smallest time use will be ran.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhscheduling/\"\u003escheduling\u003c/a\u003e is the tool to figure out which thread can run. Because \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003es exist in different \u003ca href=\"/posts/kbhprocess_control_block/#id-b4b86ccc-70f3-4d30-b437-2f5fff63b0e6-thread-state\"\u003ethread state\u003c/a\u003es:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003erunning\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eblockde\u003c/strong\u003e - waiting for an event like disk, network, etc.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eready\u003c/strong\u003e - able to run, but not on CPU yet\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ea scheduler needs to do the task of ordering ready threads to run, moving running threads to ready when it has ran for enough time. Possible pathways:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eready =\u0026gt; running\u003c/li\u003e\n\u003cli\u003eblocked =\u0026gt; running\u003c/li\u003e\n\u003cli\u003eblocked =\u0026gt; ready =\u0026gt; running\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eYou can\u0026rsquo;t go from \u003cstrong\u003eready\u003c/strong\u003e to \u003cstrong\u003eblocked\u003c/strong\u003e because you have to \u003cem\u003edo something\u003c/em\u003e to know you are blocked.\u003c/p\u003e\n\u003ch2 id=\"scheduling--kbhscheduling-dot-md--ready-threads\"\u003e\u003ca href=\"/posts/kbhscheduling/\"\u003escheduling\u003c/a\u003e \u0026ldquo;ready\u0026rdquo; threads\u003c/h2\u003e\n\u003cp\u003eThe following assumes one core.\u003c/p\u003e\n\u003cp\u003eTradeoffs:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eminimize time to a useful result\u0026mdash;(\u003cstrong\u003eassumption\u003c/strong\u003e: a \u0026ldquo;useful result\u0026rdquo; = a thread blocking or completes)\u003c/li\u003e\n\u003cli\u003eusing resources efficiently (keeping cores/disks busy)\u003c/li\u003e\n\u003cli\u003efairness (multiple users / many jobs for one users)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTypically, we focus on (1); approaches that maximize useful results quickly is unfair beacuse you are prioritizing. We can measure this based on \u0026ldquo;average completion time\u0026rdquo;: tracking the average time elapsed for a particular queue based on the start of scheduling that queue to the time when each thread ends.\u003c/p\u003e\n\u003ch3 id=\"first-come-first-serve\"\u003efirst-come first-serve\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ekeep all threads in ready in a \u003cstrong\u003equeue\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003erun the first thread on the front until it finishes/it blocks for however long\u003c/li\u003e\n\u003cli\u003erepeat\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003eProblem\u003c/strong\u003e: a thread can run away with the entire system, accidentally, through infinite loops\u003c/p\u003e\n\u003ch3 id=\"round-robin\"\u003eround robin\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ekeep all threads in a \u003cstrong\u003eround robin\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eeach thread can run for a set amount of time called a \u003ca href=\"#round-robin\"\u003etime slice\u003c/a\u003e (10ms or so)\u003c/li\u003e\n\u003cli\u003eif a thread terminates before that time, great; if a thread does not, we swap it off and put it to the end of the round robin\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003eProblem\u003c/strong\u003e: what\u0026rsquo;s a good \u003ca href=\"#round-robin\"\u003etime slice\u003c/a\u003e?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003etoo small: the overhead of context switching is higher than the overhead of running the program\u003c/li\u003e\n\u003cli\u003etoo large: threads can monopolize cores, can\u0026rsquo;t handle user input, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eLinux uses 4ms. Generally, you want 5-10ms range.\u003c/p\u003e\n\u003cp\u003eYou can think about this as dividing each time slot by time slices, and add as fcfs\u003c/p\u003e\n\u003ch3 id=\"shortest-remaining-processing-time\"\u003eshortest remaining processing time\u003c/h3\u003e\n\u003cp\u003eRun first the thread in queue that will finish the \u003cstrong\u003emost quickly\u003c/strong\u003e and run it \u003cstrong\u003efully to competition\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eIt \u003cstrong\u003egives preference to those that need it the least\u003c/strong\u003e: a good side effect is that it gives preference to \u003ca href=\"/posts/kbhprocess_control_block/#io-vs-dot-cpu-bound\"\u003eI/O Bound Thread\u003c/a\u003e first, so we can wait on them during disk operations while \u003ca href=\"/posts/kbhprocess_control_block/#io-vs-dot-cpu-bound\"\u003eCPU Thread\u003c/a\u003es run after the \u003ca href=\"/posts/kbhprocess_control_block/#io-vs-dot-cpu-bound\"\u003eI/O Bound Thread\u003c/a\u003e has ran.\u003c/p\u003e\n\u003cp\u003eTHIS IS \u003cstrong\u003enot implementable\u003c/strong\u003e\u0026mdash;-we can\u0026rsquo;t build this beacuse we have to know which thread will finish the most quickly, which we can\u0026rsquo;t because you have to solve the halting problem to know.\u003c/p\u003e\n\u003cp\u003eOur goal, then is to get as close as possible to the performance of \u003ca href=\"#shortest-remaining-processing-time\"\u003eSRPT\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eProblem\u003c/strong\u003e:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewe don\u0026rsquo;t know which one will finish the most quickly\u003c/li\u003e\n\u003cli\u003eif we have many threads and one long-running thread, the long running thread won\u0026rsquo;t be able to run ever\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"priority-based-scheduling\"\u003epriority based scheduling\u003c/h3\u003e\n\u003cp\u003eKey idea: \u003cstrong\u003ebehavior tends to be consistent in a thread\u003c/strong\u003e. We build multiple \u003cstrong\u003epriority queues\u003c/strong\u003e to address this.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#priority-based-scheduling\"\u003epriority based scheduling\u003c/a\u003e is an approximation of \u003ca href=\"#shortest-remaining-processing-time\"\u003eSRPT\u003c/a\u003e, using the past performance of the thread to estimate the running time of the thread. Over time, \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003es will move between priority queues, and we \u003cstrong\u003erun the topmost thread from the highest priority queue\u003c/strong\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ethreads that aren\u0026rsquo;t using much CPU stay in higher priority queue\u003c/li\u003e\n\u003cli\u003ethreads that are using much CPU gets bumped down to lower priority queues\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSimilar to \u003ca href=\"#shortest-remaining-processing-time\"\u003eSRPT\u003c/a\u003e, this also has the good property of \u003cstrong\u003egiving preference to those that need it the least\u003c/strong\u003e: a good side effect is that it gives preference to \u003ca href=\"/posts/kbhprocess_control_block/#io-vs-dot-cpu-bound\"\u003eI/O Bound Thread\u003c/a\u003e first, so we can wait on them during disk operations while \u003ca href=\"/posts/kbhprocess_control_block/#io-vs-dot-cpu-bound\"\u003eCPU Thread\u003c/a\u003es run after the \u003ca href=\"/posts/kbhprocess_control_block/#io-vs-dot-cpu-bound\"\u003eI/O Bound Thread\u003c/a\u003e has ran.\u003c/p\u003e\n\u003ch4 id=\"implement-based-on-time-slice--org04deec4--usage\"\u003eimplement based on \u003ca href=\"#round-robin\"\u003etime slice\u003c/a\u003e usage\u003c/h4\u003e\n\u003cp\u003ea \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003e always enters in the \u003cstrong\u003ehighest\u003c/strong\u003e priority queue\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eif the \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003e uses all of its \u003ca href=\"#round-robin\"\u003etime slice\u003c/a\u003e and didn\u0026rsquo;t exit, bump them down a priority queue\u003c/li\u003e\n\u003cli\u003eif a \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003e blocked before it used all of its \u003ca href=\"#round-robin\"\u003etime slice\u003c/a\u003e, bump them up a priority queue\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"implement-based-on-aggregate-time-used-fixing-neglect\"\u003eimplement based on aggregate time used: fixing neglect\u003c/h4\u003e\n\u003cp\u003ea \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003e has a number for \u0026ldquo;how much time did you use on the CPU recently\u0026rdquo;? The priories are sorted by that value, and the smallest time use will be ran.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhscheduling/","tags":null,"title":"scheduling"},{"categories":null,"contents":"Search the knowledgbase.\n","html":"\u003cp\u003eSearch the knowledgbase.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/search/","tags":null,"title":"Search Results"},{"categories":null,"contents":"The second moment of area is a value which\u0026mdash;given an origin\u0026mdash;describes how point masses are distributed around that origin. (i.e. a number for how point masses are distributed). It is in units \\(m^{4}\\).\nTake, for instance, the following picture:\nWe have defined an origin at \\((0,0)\\) of the figure above. Furthermore, we have some \\(\\rho_{i}\\) which is the distance from that origin to each of the infinitesimal areas \\(\\dd{A}\\).\nThen, the second moment of area is defined as:\n\\begin{equation} I = \\iint_{R} \\rho^{2} \\dd{A} \\end{equation}\nThis\u0026hellip; would make sense.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhsecond_moment_of_area/\"\u003esecond moment of area\u003c/a\u003e is a value which\u0026mdash;given an origin\u0026mdash;describes how point masses are distributed around that origin. (i.e. a number for how point masses are distributed). It is in units \\(m^{4}\\).\u003c/p\u003e\n\u003cp\u003eTake, for instance, the following picture:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-05_22-46-56_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWe have defined an origin at \\((0,0)\\) of the figure above. Furthermore, we have some \\(\\rho_{i}\\) which is the distance from that origin to each of the infinitesimal areas \\(\\dd{A}\\).\u003c/p\u003e\n\u003cp\u003eThen, the \u003ca href=\"/posts/kbhsecond_moment_of_area/\"\u003esecond moment of area\u003c/a\u003e is defined as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI = \\iint_{R} \\rho^{2} \\dd{A}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis\u0026hellip; would make sense.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsecond_moment_of_area/","tags":null,"title":"second moment of area"},{"categories":null,"contents":"the trick Here is a pretty ubiquitous trick to solve differential equations of the second order differential equations. It is used to change a second order differential equation to a First-Order Differential Equations.\nIf you have a differential equation of the shape:\n\\begin{equation} x^{\u0026rsquo;\u0026rsquo;} = f(x,x\u0026rsquo;) \\end{equation}\nthat, the second derivative is strictly a function between the first derivative value and the current value.\nWe are going to define a notation \\(x\u0026rsquo; = v\\), which makes sense.\nSo, we will describe:\n\\begin{equation} x^{\u0026rsquo;\u0026rsquo;} = \\dv{v}{t} = \\dv{v}{x} \\dv{x}{t} = v\\dv{v}{x} \\end{equation}\nSo therefore, we have:\n\\begin{equation} x^{\u0026rsquo;\u0026rsquo;} = v\\dv{v}{x} = f(x,v) \\end{equation}\nSo turns out, the original input \\(t\\) is, given a specific equation above, we have no need to know it.\nTo actually go about solving it, see solving homogeneous higher-order differential equations.\n","html":"\u003ch2 id=\"the-trick\"\u003ethe trick\u003c/h2\u003e\n\u003cp\u003eHere is a pretty ubiquitous trick to solve \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003edifferential equation\u003c/a\u003es of the \u003ca href=\"/posts/kbhsecond_order_differential_equations/\"\u003esecond order differential equations\u003c/a\u003e. It is used to change a \u003ca href=\"/posts/kbhsecond_order_differential_equations/\"\u003esecond order differential equation\u003c/a\u003e to a \u003ca href=\"/posts/kbhlinear_non_seperable_equation/#solving-differential-equations\"\u003eFirst-Order Differential Equations\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIf you have a \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003edifferential equation\u003c/a\u003e of the shape:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx^{\u0026rsquo;\u0026rsquo;} = f(x,x\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat, the second derivative is strictly a function between the first derivative value and the current value.\u003c/p\u003e\n\u003cp\u003eWe are going to define a notation \\(x\u0026rsquo; = v\\), which makes sense.\u003c/p\u003e\n\u003cp\u003eSo, we will describe:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx^{\u0026rsquo;\u0026rsquo;} = \\dv{v}{t} = \\dv{v}{x} \\dv{x}{t} = v\\dv{v}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo therefore, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx^{\u0026rsquo;\u0026rsquo;} = v\\dv{v}{x} = f(x,v)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo turns out, the original input \\(t\\) is, given a specific equation above, we have no need to know it.\u003c/p\u003e\n\u003cp\u003eTo actually go about solving it, see \u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/#solving-homogeneous-higher-order-differential-equations\"\u003esolving homogeneous higher-order differential equations\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsecond_order_differential_equations/","tags":null,"title":"second order differential equation"},{"categories":null,"contents":"Here\u0026rsquo;s a general form:\n\\begin{equation} a\\dv[2]{x}{t} + b \\dv{x}{t} + cx = f(t) \\end{equation}\nsee:\nsolving homogeneous constant coefficient higher-order differential equations and more generally, using matrix exponentiation, solving homogeneous higher-order differential equations solving homogeneous higher-order differential equations This problem because easier if the right side is \\(0\\).\n\\begin{equation} a\\dv[2]{x}{t} + b \\dv{x}{t} + cx = 0 \\end{equation}\nThe general goal to solve in this case is to make this a system of First-Order Differential Equations.\nTo do this, we begin by making:\n\\begin{equation} y = \\dv{x}{t} \\end{equation}\nTherefore, we can change the first equation:\n\\begin{equation} a \\dv{y}{t} + by + cx = 0 \\end{equation}\nSolving both of these conditions, we form a system of linear equations:\n\\begin{align} \u0026amp;\\dv{x}{t}=y \\\\ \u0026amp;\\dv{y}{t} = \\frac{-c}{a}x-\\frac{b}{a}y \\end{align}\nWe are now first-order, so we can put this into a matrix equation:\n\\begin{equation} \\dv t \\begin{pmatrix} x \\\\ y \\end{pmatrix} = \\begin{pmatrix} 0 \u0026amp; 1 \\\\ -\\frac{c}{a} \u0026amp; \\frac{-b}{a} \\end{pmatrix} \\begin{pmatrix} x \\\\ y \\end{pmatrix} \\end{equation}\nNow! We have an equation:\n\\begin{equation} \\dv{t}v = Av \\end{equation}\nThe result above shows that the transformations \\(\\dv{t}\\) and \\(A\\) are isomorphic. Therefore, we now attempt to characterize \\(A\\) to solve this expression.\nLet\u0026rsquo;s begin. We will first shove that \\(v\\) on top of the differential for aesthetics:\n\\begin{equation} \\dv{v}{t} = Av \\end{equation}\nThis expression is actually nicely seperable, so we shall endeavor to separate it:\n\\begin{equation} \\dd{v} = Av\\dd{t} \\end{equation}\nOf course, \\(v\\) is a function of \\(t\\). Therefore, the right side would be woefully complicated. Therefore, we shall do this handwavy thing where we go:\n\\begin{equation} \\frac{1}{v}\\dd{v} = A\\dd{t} \\end{equation}\nNow, \\(A\\) is not a function in \\(t\\) \u0026mdash; its just some constants! So, we can integrate this safely without much trouble:\n\\begin{equation} \\int \\frac{1}{v}\\dd{v} =\\int A\\dd{t} \\end{equation}\nTo get:\n\\begin{equation} \\ln v = t A + C \\end{equation}\nNote the order as \\(t\\) is a constant. Finally, we will invert the natural log and get \\(v\\) back:\n\\begin{equation} v = e^{tA+C} \\end{equation}\nExcellent. We will now apply some log/exponent laws:\n\\begin{equation} v = e^{tA}e^{C} = e^{tA}C \\end{equation}\nthis is so very handwavy. \\(C\\) is technically a vector here\u0026hellip; long story and iffy understanding\nOk, how do we go about solving \\(x\\)?\nNote now that \\(v=(x\\ y)\\), so we will expand that:\n\\begin{equation} \\begin{pmatrix} x \\\\ y \\end{pmatrix} = e^{tA}\\begin{pmatrix} x_0 \\\\ y_0 \\end{pmatrix} \\end{equation}\nwhere, as we defined above \\(y=\\dv{x}{t}\\) (each integral needing a different constant.)\nNow. remember that \\(A\\) is diagonalizable; and so will \\(tA\\) (citation needed, but intuition is that scaling eigenvalues do nothing anyways). So, to make this exponentiation easier, we will diagonalize it.\nWe now have that\n\\begin{equation} e^{tA} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\end{equation}\n(how?)\nOk. Finally, we will take the binroller that is \u0026ldquo;constancy\u0026rdquo; and apply it to \\(e^{tA}\\). This took quite a bit of time for me to get, so feel free to take some time to get it too.\nThis all hinges upon the fact that \\(C\\) is a constant, so multiplying any constant to it still makes it \\(C\\).\nSo far, we have that:\n\\begin{equation} \\begin{pmatrix} x \\\\ y \\end{pmatrix} = e^{tA}\\begin{pmatrix} x_0 \\\\ y_0 \\end{pmatrix} = \\qty(\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} )\\begin{pmatrix} x_0 \\\\ y_0 \\end{pmatrix} \\end{equation}\nRemember, now, that \\(v_1\\dots v_{}\\) and its inverses are nothing but vectors filled with a lot of scalars. And any scalar \\(\\alpha\\) times a constant still results in the (a new) constant: \\(\\alpha C =C\\). So, we will steamroll \\(\\mqty(x_0\u0026amp;y_0)\\) over the right side eigenbases matrix (multiplying a constant vector to any\u0026rsquo;ol matrix will just get a new set of constants back) to get:\n\\begin{align} \\begin{pmatrix} x \\\\ y \\end{pmatrix} \u0026amp;= \\qty(\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} )\\begin{pmatrix} x_0 \\\\ y_0 \\end{pmatrix} \\\\ \u0026amp;= \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}}) \\begin{pmatrix} C_1 \\\\ C_2 \\end{pmatrix} \\end{align}\nNow, the middle thing has \\(t\\) in it! (the input!) So, we can\u0026rsquo;t just steamroll now. We have to preserve the middle part.\n\\begin{align} \\begin{pmatrix} x \\\\ y \\end{pmatrix} \u0026amp;= \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}}) \\begin{pmatrix} C_1 \\\\ C_2 \\end{pmatrix} \\\\ \u0026amp;= \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m}) \\begin{pmatrix} C_1 e^{t\\lambda_{1}} \\\\ C_2 e^{t\\lambda_{2}} \\end{pmatrix} \\end{align}\nAnd finally, we keep steamrolling:\n\\begin{align} \\begin{pmatrix} x \\\\ y \\end{pmatrix} \u0026amp;= \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m}) \\begin{pmatrix} C_1 e^{t\\lambda_{1}} \\\\ C_2 e^{t\\lambda_{2}} \\end{pmatrix}\\\\ \u0026amp;= \\mqty({C_{1_{x}} e^{t\\lambda_{1}} + C_{2_{x}} e^{t\\lambda_{2}}} \\\\ {C_{1_{y}} e^{t\\lambda_{1}} + C_{2_{y}} e^{t\\lambda_{2}}}) \\end{align}\nThere is absolutely no difference in nature between \\(C_{j_{x}}\\) and \\(C_{j_{y}}\\) except for the fact that they are different constants (which we got by multiplying \\(v_1 \\dots v_{m}\\)) to it.\nOk so:\n\\begin{equation} \\begin{cases} x = C_{1_{x}} e^{t\\lambda_{1}} + C_{2_{x}} e^{t\\lambda_{2}}\\\\ y = C_{1_{y}} e^{t\\lambda_{1}} + C_{2_{y}} e^{t\\lambda_{2}}\\\\ \\end{cases} \\end{equation}\nconstructing the characteristic equation, as desired.\nsolving homogeneous constant coefficient higher-order differential equations in the homogeneous case, we have some:\n\\begin{equation} y\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; + by = 0 \\end{equation}\nand it arises that there\u0026rsquo;s a pair of solutions \\(y_1(t)\\) and \\(y_2(t)\\) whose linear combinations span the entire space of solutions. in fact, it arises as a solution to some functional quadratic equation \\(\\lambda^{2} + a\\lambda + b = 0\\).\nThe specific coefficients \\(c_1\\) and \\(c_2\\) of the linear combination arises out of the initial conditons, which is the same measurement given at the initial time and its derivative: \\(y(t_0)\\) and \\(y\u0026rsquo;(t_0)\\). It comes out of Linear Algebra why there is exactly two initial values.\nSpecifically, it arises out of solutions of the shape:\n\\begin{equation} y(t) = c_1 e^{\\lambda_{1}t} + c_2e^{\\lambda_{2}t} \\end{equation}\nwhere \\(\\lambda_{1}\\) and \\(\\lambda_{2}\\) are solutions to the characteristic polynomial above. For why exactly this is, see method of undetermined coefficients.\nfinding independent solutions of second-order constant-coefficient linear ODEs Given some:\n\\begin{equation} y\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; + by = 0 \\end{equation}\nwe desire to find two independent solutions. After which, by superposition principle, we know that any linear combinations will yield a solution.\nAside, consider:\n\\begin{equation} y\u0026rsquo;\u0026rsquo; = y \\end{equation}\nwe see that both \\(y=e^{t}\\) and \\(y=e^{-t}\\) are solutions. We can see that this is independent by setting up:\n\\begin{equation} c_1 e^{t} + c_2 e^{-t} = 0 \\end{equation}\nwhich, multiplying through by \\(e^{t}\\) and dividing, we obtain:\n\\begin{equation} e^{2t} = -\\frac{c_2}{c_1} \\end{equation}\nNow, the right side is constant, and the left is not. So the only way this can be true is if the right side is identically zero.\nLinear Shifts Consider the case where you are given initial conditions:\n\\begin{equation} \\begin{cases} y\u0026rsquo;\u0026rsquo; - y = 0 \\\\ y(5) = -2 \\\\ y\u0026rsquo;(5) = 5 \\end{cases} \\end{equation}\ninstead of bothering to solve this, we define:\n\\begin{equation} Y(t) = y(t+5) \\end{equation}\nand it still hold that:\n\\begin{equation} Y\u0026rsquo;\u0026rsquo; - Y = 0 \\end{equation}\nbecause the derivatives don\u0026rsquo;t actually change.\nThen, after solving, we can just translate it back:\n\\begin{equation} y(t) = Y(t-5) \\end{equation}\nSolution, more generally Consider:\n\\begin{equation} y\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; + by = 0 \\end{equation}\nlet us guess that \\(y = e^{\\lambda t}\\)\nrecall that, in that case:\n\\begin{equation} \\begin{cases} y\u0026rsquo; = \\lambda e^{\\lambda t} \\\\ y\u0026rsquo;\u0026rsquo; = \\lambda^{2} e^{\\lambda t} \\end{cases} \\end{equation}\nplugging this back in:\n\\begin{equation} \\lambda^{2} e^{\\lambda t} + a \\lambda e^{\\lambda t} + b e^{\\lambda t} = 0 \\end{equation}\nwhich is:\n\\begin{equation} (\\lambda^{2} + a\\lambda +b ) e^{\\lambda t} = 0 \\end{equation}\nbecause the right side is never zero, we need the left side \\((\\lambda^{2} + a\\lambda +b )\\) is zero.\nNote that there exists three separate cases:\n\\(a^{2}-4b \u0026gt; 0\\), two exact solutions: \\(e^{\\lambda_{1}t}\\) and \\(e^{\\lambda_{2} t}\\), these two are independent functions as long as \\(\\lambda_{1} \\neq \\lambda_{2}\\) \\(a^{2}-4b \u0026lt; 0\\), which will yield imaginary solutions, recall Euler\u0026rsquo;s Equation, you can split \\(e^{ikx}\\) into a superposition of \\(\\cos (x) + i\\sin (x)\\), each of which individually is a solution. You can break this up into the case of some real \\(e^{-at}\\) multiplied by sinusoldial functions.\u0026mdash; whereby \\(e^{at} (\\cos(bt) \\pm i\\sin(bt))\\), we can break into two functions \\(y_1 = e^{at}\\cos (bt), y_2= e^{at}i \\sin (bt)\\). for \\(a^{2}-4b = 0\\), we yield some solution \\(e^{-\\frac{a}{2} t}\\), and the solution is \\(t e^{-\\frac{a}{2}t}\\). because this is the limit of the first solution \\(\\lim_{\\lambda_{2} \\to \\lambda_{1}}\\frac{e^{\\lambda_{2}t} - e^{\\lambda_{1}t}}{\\lambda_{2} - \\lambda_{2}}\\) All 2nd order solution is a linear combination\nIn fact, all solutions carry the form of the two solutions:\n\\begin{equation} c_1 y_1(t) + c_2 y_2(t) = y(t) \\end{equation}\nThis is because, consider the initial form \\(y_1(t_0)\\), and \\(y_2(t_0)\\):\n\\begin{equation} \\begin{cases} y_1(t_0) c_1 + y_2(t_0) c_2 = y(t_0) \\\\ y_1\u0026rsquo;(t_0) c_1 + y_2\u0026rsquo;(t_0) c_2 = y\u0026rsquo;(t_0) \\\\ \\end{cases} \\end{equation}\nThis is the same as the matrix equation:\n\\begin{equation} \\mqty(y_1(t_0) \u0026amp; y_2(t_0) \\\\ y_1\u0026rsquo;(t_0) \u0026amp; y_2\u0026rsquo;(t_0)) \\mqty(c_1 \\\\ c_2) = \\mqty(y(t_0) \\\\ y\u0026rsquo;(t_0)) \\end{equation}\nSo, this map is surjective.\nUniqueness and Existance of second order The uniqueness is also guaranteed with one and exactly one solution exist for every initial condition of an IVP. Unlike first order ODE, solutions can cross: because the uniq and exi. is only guaranteed for the same point AND slope (i.e. the initial condition).\nSo solutions can cross, they just can\u0026rsquo;t be tangent.\nmethod of undetermined coefficients Ok. This mechanism hinges upon the fact that linear combinations of differential equation solutions are solutions themselves. You can show this to yourself by illustrating diffeq solutions as subspaces of F^S, which are linear objects.\nTherefore, for a non-homogeneous second-order linear equation, we attempt to find two sets of solutions\u0026mdash;\nnamely, the general solution to the homogeneous case (using method above):\n\\begin{equation} a\\dv[2]{x}{t} + b \\dv{x}{t} + cx = 0 \\end{equation}\nas well attempting to fit particular solutions to the general case:\n\\begin{equation} a\\dv[2]{x}{t} + b \\dv{x}{t} + cx = f(t) \\end{equation}\nthe linear combination of both solutions would construct the final solution space.\nWe already know how to do step 1\u0026mdash;solve homogeneous higher-order differential equations\u0026mdash;so we won\u0026rsquo;t harp on it here. However, how do we find particular solutions to the general equations?\nWell, we guess! Here\u0026rsquo;s a general table to help illustrate how:\n\\(f(t)\\) \\(x(t)\\) \\(ae^{bt}\\) \\(Ae^{bt}\\) \\(a \\cos (ct) + b\\sin (ct)\\) \\(A\\cos(ct) + B\\sin (ct)\\) \\(kt^{n}\\) \\(A_{n}t^{n} + A_{n-1}x^{n-1} \\dots + A_{0}\\) you can show these to yourself by taking derivatives. \\(a,b,c, k,A,B\\) are distinct constants.\nNow, once you make an educated guess for what \\(x(t)\\) is, perhaps aided by the homogeneous solution, you would take the number of derivatives needed to plug it back to the original expression. Then, equate the left expression and right \\(f(t)\\) and match coefficients of equal-degree terms to solve for the final constants \\(A\\), \\(B\\), etc.\nAfter you finally got the specific solution for \\(A\\) and \\(B\\) , we add the degree of freedom back by adding the homogenous solution in.\nLook for \u0026ldquo;Example 1 (again)\u0026rdquo; on this page (silly, I know, but worth it) to see end-to-end such a solution.\n","html":"\u003cp\u003eHere\u0026rsquo;s a general form:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na\\dv[2]{x}{t} + b \\dv{x}{t} + cx = f(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003esee:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"#solving-homogeneous-constant-coefficient-higher-order-differential-equations\"\u003esolving homogeneous constant coefficient higher-order differential equations\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eand more generally, using \u003ca href=\"/posts/kbhmatrix_exponentiation/\"\u003ematrix exponentiation\u003c/a\u003e, \u003ca href=\"#solving-homogeneous-higher-order-differential-equations\"\u003esolving homogeneous higher-order differential equations\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"solving-homogeneous-higher-order-differential-equations\"\u003esolving homogeneous higher-order differential equations\u003c/h2\u003e\n\u003cp\u003eThis problem because easier if the right side is \\(0\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na\\dv[2]{x}{t} + b \\dv{x}{t} + cx = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe general goal to solve in this case is to make this a system of \u003ca href=\"/posts/kbhlinear_non_seperable_equation/#solving-differential-equations\"\u003eFirst-Order Differential Equations\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eTo do this, we begin by making:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = \\dv{x}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, we can change the first equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na \\dv{y}{t} + by + cx = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSolving both of these conditions, we form a system of linear equations:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\dv{x}{t}=y \\\\\n\u0026amp;\\dv{y}{t} = \\frac{-c}{a}x-\\frac{b}{a}y\n\\end{align}\u003c/p\u003e\n\u003cp\u003eWe are now first-order, so we can put this into a matrix equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv t \\begin{pmatrix}\nx \\\\ y\n\\end{pmatrix} = \\begin{pmatrix}\n0 \u0026amp; 1 \\\\ -\\frac{c}{a} \u0026amp; \\frac{-b}{a}\n\\end{pmatrix} \\begin{pmatrix}\nx \\\\ y\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow! We have an equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{t}v = Av\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe result above shows that the transformations \\(\\dv{t}\\) and \\(A\\) are isomorphic. Therefore, we now attempt to characterize \\(A\\) to solve this expression.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s begin. We will first shove that \\(v\\) on top of the differential for aesthetics:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{v}{t} = Av\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis expression is actually nicely \u003ca href=\"/posts/kbhlinear_constant_coefficient_equation/#solving-separable-differential-equations\"\u003eseperable\u003c/a\u003e, so we shall endeavor to separate it:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dd{v} = Av\\dd{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOf course, \\(v\\) is a function of \\(t\\). Therefore, the right side would be woefully complicated. Therefore, we shall do this handwavy thing where we go:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{1}{v}\\dd{v} = A\\dd{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, \\(A\\) is not a function in \\(t\\) \u0026mdash; its just some constants! So, we can integrate this safely without much trouble:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int \\frac{1}{v}\\dd{v} =\\int A\\dd{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ln v = t A + C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNote the order as \\(t\\) is a constant. Finally, we will invert the natural log and get \\(v\\) back:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = e^{tA+C}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eExcellent. We will now apply some \u003ca href=\"/posts/kbhlog_laws/\"\u003elog/exponent laws\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = e^{tA}e^{C} = e^{tA}C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003cdel\u003ethis is so very handwavy. \\(C\\) is technically a vector here\u0026hellip; long story and iffy understanding\u003c/del\u003e\u003c/p\u003e\n\u003cp\u003eOk, how do we go about solving \\(x\\)?\u003c/p\u003e\n\u003cp\u003eNote now that \\(v=(x\\ y)\\), so we will expand that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\nx \\\\ y\n\\end{pmatrix} = e^{tA}\\begin{pmatrix}\nx_0 \\\\ y_0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, as we defined above \\(y=\\dv{x}{t}\\) (each integral needing a different constant.)\u003c/p\u003e\n\u003cp\u003eNow. remember that \\(A\\) is diagonalizable; and so will \\(tA\\) (citation needed, but intuition is that scaling eigenvalues do nothing anyways). So, to make this exponentiation easier, we will diagonalize it.\u003c/p\u003e\n\u003cp\u003eWe now have that\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{tA} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(\u003ca href=\"/posts/kbhraising_e_to_a_matrix/\"\u003ehow?\u003c/a\u003e)\u003c/p\u003e\n\u003cp\u003eOk. Finally, we will take the binroller that is \u0026ldquo;constancy\u0026rdquo; and apply it to \\(e^{tA}\\). This took quite a bit of time for me to get, so feel free to take some time to get it too.\u003c/p\u003e\n\u003cp\u003eThis all hinges upon the fact that \\(C\\) is a constant, so multiplying any constant to it still makes it \\(C\\).\u003c/p\u003e\n\u003cp\u003eSo far, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\nx \\\\ y\n\\end{pmatrix} = e^{tA}\\begin{pmatrix}\nx_0 \\\\ y_0\n\\end{pmatrix} = \\qty(\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} )\\begin{pmatrix}\nx_0 \\\\ y_0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRemember, now, that \\(v_1\\dots v_{}\\) and its inverses are nothing but vectors filled with a lot of scalars. And any scalar \\(\\alpha\\) times a constant still results in the (a new) constant: \\(\\alpha C =C\\). So, we will steamroll \\(\\mqty(x_0\u0026amp;y_0)\\) over the right side eigenbases matrix (multiplying a constant vector to any\u0026rsquo;ol matrix will just get a new set of constants back) to get:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\begin{pmatrix}\nx \\\\ y\n\\end{pmatrix} \u0026amp;= \\qty(\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} )\\begin{pmatrix}\nx_0 \\\\ y_0\n\\end{pmatrix} \\\\\n\u0026amp;= \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}}) \\begin{pmatrix}\nC_1 \\\\ C_2\n\\end{pmatrix}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, the middle thing has \\(t\\) in it! (the input!) So, we can\u0026rsquo;t just steamroll now. We have to preserve the middle part.\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\begin{pmatrix}\nx \\\\ y\n\\end{pmatrix} \u0026amp;= \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}}) \\begin{pmatrix}\nC_1 \\\\ C_2\n\\end{pmatrix} \\\\\n\u0026amp;= \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m}) \\begin{pmatrix}\nC_1 e^{t\\lambda_{1}} \\\\ C_2 e^{t\\lambda_{2}}\n\\end{pmatrix}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAnd finally, we keep steamrolling:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\begin{pmatrix}\nx \\\\ y\n\\end{pmatrix} \u0026amp;= \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m}) \\begin{pmatrix}\nC_1 e^{t\\lambda_{1}} \\\\ C_2 e^{t\\lambda_{2}} \\end{pmatrix}\\\\\n\u0026amp;= \\mqty({C_{1_{x}} e^{t\\lambda_{1}} + C_{2_{x}} e^{t\\lambda_{2}}} \\\\ {C_{1_{y}} e^{t\\lambda_{1}} + C_{2_{y}} e^{t\\lambda_{2}}})\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThere is absolutely no difference in nature between \\(C_{j_{x}}\\) and \\(C_{j_{y}}\\) except for the fact that they are \u003cem\u003edifferent\u003c/em\u003e constants (which we got by multiplying \\(v_1 \\dots v_{m}\\)) to it.\u003c/p\u003e\n\u003cp\u003eOk so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx = C_{1_{x}} e^{t\\lambda_{1}} + C_{2_{x}} e^{t\\lambda_{2}}\\\\\ny = C_{1_{y}} e^{t\\lambda_{1}} + C_{2_{y}} e^{t\\lambda_{2}}\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003econstructing the characteristic equation, as desired.\u003c/p\u003e\n\u003ch2 id=\"solving-homogeneous-constant-coefficient-higher-order-differential-equations\"\u003esolving homogeneous constant coefficient higher-order differential equations\u003c/h2\u003e\n\u003cp\u003ein the homogeneous case, we have some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; + by = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand it arises that there\u0026rsquo;s a pair of solutions \\(y_1(t)\\) and \\(y_2(t)\\) whose linear combinations span the entire space of solutions. in fact, it arises as a solution to some functional quadratic equation \\(\\lambda^{2} + a\\lambda + b = 0\\).\u003c/p\u003e\n\u003cp\u003eThe specific coefficients \\(c_1\\) and \\(c_2\\) of the linear combination arises out of the initial conditons, which is the same measurement given at the initial time and its derivative: \\(y(t_0)\\) and \\(y\u0026rsquo;(t_0)\\). It comes out of \u003ca href=\"/posts/kbhlinear_algebra_errors/\"\u003eLinear Algebra\u003c/a\u003e why there is exactly two initial values.\u003c/p\u003e\n\u003cp\u003eSpecifically, it arises out of solutions of the shape:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = c_1 e^{\\lambda_{1}t} + c_2e^{\\lambda_{2}t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\lambda_{1}\\) and \\(\\lambda_{2}\\) are solutions to the characteristic polynomial above. For why exactly this is, see \u003ca href=\"#method-of-undetermined-coefficients\"\u003emethod of undetermined coefficients\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"finding-independent--kbhprobability-dot-md--solutions-of-second-order-constant-coefficient-linear-odes\"\u003efinding \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e solutions of second-order constant-coefficient linear ODEs\u003c/h3\u003e\n\u003cp\u003eGiven some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; + by = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe desire to find two \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e solutions. After which, by \u003ca href=\"/posts/kbhordinary_differential_equations/#superposition-principle\"\u003esuperposition principle\u003c/a\u003e, we know that any \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003es will yield a solution.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside, consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; = y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe see that both \\(y=e^{t}\\) and \\(y=e^{-t}\\) are solutions. We can see that this is independent by setting up:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_1 e^{t} + c_2 e^{-t} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich, multiplying through by \\(e^{t}\\) and dividing, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{2t} = -\\frac{c_2}{c_1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, the right side is constant, and the left is not. So the only way this can be true is if the right side is identically zero.\u003c/p\u003e\n\u003ch4 id=\"linear-shifts\"\u003eLinear Shifts\u003c/h4\u003e\n\u003cp\u003eConsider the case where you are given initial conditions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\ny\u0026rsquo;\u0026rsquo; - y = 0 \\\\\ny(5) = -2 \\\\\ny\u0026rsquo;(5) = 5\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003einstead of bothering to solve this, we define:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nY(t) = y(t+5)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand it still hold that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nY\u0026rsquo;\u0026rsquo; - Y = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause the derivatives don\u0026rsquo;t actually change.\u003c/p\u003e\n\u003cp\u003eThen, after solving, we can just translate it back:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = Y(t-5)\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"solution-more-generally\"\u003eSolution, more generally\u003c/h4\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; + by = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003elet us guess that \\(y = e^{\\lambda t}\\)\u003c/p\u003e\n\u003cp\u003erecall that, in that case:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\ny\u0026rsquo; = \\lambda e^{\\lambda t} \\\\\ny\u0026rsquo;\u0026rsquo; = \\lambda^{2} e^{\\lambda t}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eplugging this back in:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda^{2} e^{\\lambda t} + a \\lambda e^{\\lambda t} + b e^{\\lambda t} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(\\lambda^{2} + a\\lambda +b ) e^{\\lambda t} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause the right side is never zero, we need the left side \\((\\lambda^{2} + a\\lambda +b )\\) is zero.\u003c/p\u003e\n\u003cp\u003eNote that there exists three separate cases:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(a^{2}-4b \u0026gt; 0\\), two exact solutions: \\(e^{\\lambda_{1}t}\\) and \\(e^{\\lambda_{2} t}\\), these two are independent functions as long as \\(\\lambda_{1} \\neq \\lambda_{2}\\)\u003c/li\u003e\n\u003cli\u003e\\(a^{2}-4b \u0026lt; 0\\), which will yield imaginary solutions, recall \u003ca href=\"/posts/kbheuler_s_equation/\"\u003eEuler\u0026rsquo;s Equation\u003c/a\u003e, you can split \\(e^{ikx}\\) into a superposition of \\(\\cos (x) + i\\sin (x)\\), each of which individually is a solution. You can break this up into the case of some real \\(e^{-at}\\) multiplied by sinusoldial functions.\u0026mdash; whereby \\(e^{at} (\\cos(bt) \\pm i\\sin(bt))\\), we can break into two functions \\(y_1 = e^{at}\\cos (bt), y_2= e^{at}i \\sin (bt)\\).\u003c/li\u003e\n\u003cli\u003efor \\(a^{2}-4b = 0\\), we yield some solution \\(e^{-\\frac{a}{2} t}\\), and the solution is \\(t e^{-\\frac{a}{2}t}\\). because this is the limit of the first solution \\(\\lim_{\\lambda_{2} \\to \\lambda_{1}}\\frac{e^{\\lambda_{2}t} - e^{\\lambda_{1}t}}{\\lambda_{2} - \\lambda_{2}}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eAll 2nd order solution is a linear combination\u003c/p\u003e\n\u003cp\u003eIn fact, all solutions carry the form of the two solutions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_1 y_1(t) + c_2 y_2(t) = y(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is because, consider the initial form \\(y_1(t_0)\\), and \\(y_2(t_0)\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\ny_1(t_0) c_1 + y_2(t_0) c_2 = y(t_0) \\\\\ny_1\u0026rsquo;(t_0) c_1 + y_2\u0026rsquo;(t_0) c_2 = y\u0026rsquo;(t_0) \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is the same as the matrix equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(y_1(t_0) \u0026amp; y_2(t_0) \\\\ y_1\u0026rsquo;(t_0) \u0026amp; y_2\u0026rsquo;(t_0)) \\mqty(c_1 \\\\ c_2) = \\mqty(y(t_0) \\\\ y\u0026rsquo;(t_0))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, this map is surjective.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"uniqueness-and-existance--kbhuniqueness-and-existance-dot-md--of-second-order\"\u003e\u003ca href=\"/posts/kbhuniqueness_and_existance/\"\u003eUniqueness and Existance\u003c/a\u003e of second order\u003c/h4\u003e\n\u003cp\u003eThe uniqueness is also guaranteed with \u003ca href=\"/posts/kbhinitial_value_problems/#one-and-exactly-one-solution-exist-for-every-initial-condition-of-an-ivp\"\u003eone and exactly one solution exist for every initial condition of an IVP\u003c/a\u003e. Unlike first order ODE, solutions can cross: because the uniq and exi. is only guaranteed for the same \u003cstrong\u003epoint\u003c/strong\u003e AND \u003cstrong\u003eslope\u003c/strong\u003e (i.e. the initial condition).\u003c/p\u003e\n\u003cp\u003eSo solutions can cross, they just can\u0026rsquo;t be tangent.\u003c/p\u003e\n\u003ch2 id=\"method-of-undetermined-coefficients\"\u003emethod of undetermined coefficients\u003c/h2\u003e\n\u003cp\u003eOk. This mechanism hinges upon the fact that \u003cstrong\u003elinear combinations of differential equation solutions are solutions themselves\u003c/strong\u003e. You can show this to yourself by illustrating diffeq solutions as \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es of \u003ca href=\"/posts/kbhfs_is_a_vector_space/\"\u003eF^S\u003c/a\u003e, which are linear objects.\u003c/p\u003e\n\u003cp\u003eTherefore, for a non-homogeneous second-order linear equation, we attempt to find two sets of solutions\u0026mdash;\u003c/p\u003e\n\u003cp\u003enamely, the general solution to the homogeneous case (using method above):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na\\dv[2]{x}{t} + b \\dv{x}{t} + cx = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas well attempting to fit \u003cstrong\u003eparticular\u003c/strong\u003e solutions to the general case:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na\\dv[2]{x}{t} + b \\dv{x}{t} + cx = f(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe linear combination of both solutions would construct the final solution space.\u003c/p\u003e\n\u003cp\u003eWe already know how to do step 1\u0026mdash;\u003ca href=\"#solving-homogeneous-higher-order-differential-equations\"\u003esolve homogeneous higher-order differential equations\u003c/a\u003e\u0026mdash;so we won\u0026rsquo;t harp on it here. However, how do we find \u003cem\u003eparticular\u003c/em\u003e solutions to the general equations?\u003c/p\u003e\n\u003cp\u003eWell, we guess! Here\u0026rsquo;s a general table to help illustrate how:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\\(f(t)\\)\u003c/th\u003e\n\u003cth\u003e\\(x(t)\\)\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\\(ae^{bt}\\)\u003c/td\u003e\n\u003ctd\u003e\\(Ae^{bt}\\)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\\(a \\cos (ct) + b\\sin (ct)\\)\u003c/td\u003e\n\u003ctd\u003e\\(A\\cos(ct) + B\\sin (ct)\\)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\\(kt^{n}\\)\u003c/td\u003e\n\u003ctd\u003e\\(A_{n}t^{n} + A_{n-1}x^{n-1} \\dots + A_{0}\\)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eyou can show these to yourself by taking derivatives. \\(a,b,c, k,A,B\\) are distinct constants.\u003c/p\u003e\n\u003cp\u003eNow, once you make an educated guess for what \\(x(t)\\) is, perhaps aided by the homogeneous solution, you would take the number of derivatives needed to plug it back to the original expression. Then, equate the left expression and right \\(f(t)\\) and match \u003ca href=\"/posts/kbhpolynomial/\"\u003ecoefficient\u003c/a\u003es of equal-degree terms to solve for the final constants \\(A\\), \\(B\\), etc.\u003c/p\u003e\n\u003cp\u003eAfter you finally got the specific solution for \\(A\\) and \\(B\\) , we add the degree of freedom back by adding the homogenous solution in.\u003c/p\u003e\n\u003cp\u003eLook for \u0026ldquo;Example 1 (again)\u0026rdquo; on \u003ca href=\"https://www.mathsisfun.com/calculus/differential-equations-undetermined-coefficients.html\"\u003ethis page\u003c/a\u003e (silly, I know, but worth it) to see end-to-end such a solution.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsecond_order_linear_differential_equation/","tags":null,"title":"Second-Order Linear Differential Equations"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhselective_service_system/","tags":null,"title":"Selective Service System"},{"categories":null,"contents":"The principle of semantic accountability claims that a good grammar should be able to say \u0026ldquo;something explicit about how the abstractions of the grammar match with actual meaning\u0026rdquo;\n","html":"\u003cp\u003eThe principle of \u003ca href=\"/posts/kbhsemantic_accountability/\"\u003esemantic accountability\u003c/a\u003e claims that a good \u003ca href=\"/posts/kbhgrammar/\"\u003egrammar\u003c/a\u003e should be able to say \u0026ldquo;something explicit about how the abstractions of the \u003ca href=\"/posts/kbhgrammar/\"\u003egrammar\u003c/a\u003e match with actual meaning\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsemantic_accountability/","tags":null,"title":"semantic accountability"},{"categories":null,"contents":"Represent health information in terms of rule-based ontologies.\n","html":"\u003cp\u003eRepresent health information in terms of rule-based ontologies.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsemantic_health_risk_prediction/","tags":null,"title":"Semantic Health Risk Prediction"},{"categories":null,"contents":"In NSM, semantic primes are the most fundimental \u0026ldquo;lexical units\u0026rdquo; (so they can be words, or morphemes, etc. the size doesn\u0026rsquo;t matter) across languages.\nThey are the \u0026ldquo;core of a universal mental lexicon\u0026rdquo;.\nThere are\u0026hellip;\nguidelines for identifying semantic primes A semantic prime has to be found in every(ish?) natural language A semantic prime has to be indefinable by other primes proof for the existence of semantic primes Proof: given if the Strong Lexicalization Hypothesis holds, semantic primes must exist.\nAssume for the sake of contradiction no semantic primes exist.\nBecause Strong Lexicalization Hypothesis holds, there does not exist syntactic transformations which can take original single words and transform them into newly lexicalized words to express a different meaning.\nAt the same time, again because of the Strong Lexicalization Hypothesis, one must only leverage syntactic transformation on syntatic constituents when forming ideas.\nTherefore, given a word to lexicalize, it has to be defined by an syntatic transformation on a set of previously lexicalized words.\n(by definition) there are no words lexicalizable from the empty set of words.\nTherefore, there exists some word that needs to be lexicalized by words that are not previously defined, which is absurd. (instead, these words are lexicalized via semantic primes.)\nQED\nproblems with semantic primes the list has grown over time the problem of allolexy: formal restrictions of a language resulting in the same concept needing to be radicalized multiple times (I vs. me) finding semantic primes According to (Geeraerts 2009), (Goddard 2009) provides a \u0026ldquo;practical\u0026rdquo; (though flawed) way of establishing primes. Something to do with large-scale comparisons in \u0026ldquo;whole metalanguage studies\u0026rdquo;, which requires pairwise language comparison\nLocating primes are seen as an enforcement of NSM theories (Vanhatalo, Tissari, and Idström, n.d.). Recent prime locations: in Amharic (Amberber 2008), East Cree (Junker 2008), French (Peeters 1994), Japanese (Onishi 1994), Korean (Yoon 2008), Lao (Enfield 2002), Mandarin (Chappell 2002), Mangaaba-Mbula (Bugenhagen 2002), Malay (Goddard 2002), Polish (Wierzbicka 2002), Russian (Gladkova 2010, for the latest set, see the NSM home page), Spanish (Travis 2002), and Thai (Diller 1994).\n","html":"\u003cp\u003eIn \u003ca href=\"/posts/kbhnatural_semantic_metalanguage/\"\u003eNSM\u003c/a\u003e, \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic primes\u003c/a\u003e are the most fundimental \u0026ldquo;lexical units\u0026rdquo; (so they can be words, or morphemes, etc. the \u003cem\u003esize\u003c/em\u003e doesn\u0026rsquo;t matter) across languages.\u003c/p\u003e\n\u003cp\u003eThey are the \u0026ldquo;core of a universal mental lexicon\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eThere are\u0026hellip;\u003c/p\u003e\n\u003ch2 id=\"guidelines-for-identifying-semantic-primes--kbhsemantic-primes-dot-md\"\u003eguidelines for identifying \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic primes\u003c/a\u003e\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eA \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003e has to be found in every(ish?) natural language\u003c/li\u003e\n\u003cli\u003eA \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003e has to be indefinable by other primes\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"proof-for-the-existence-of-semantic-prime--kbhsemantic-primes-dot-md--s\"\u003eproof for the existence of \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es\u003c/h2\u003e\n\u003cp\u003eProof: given if the \u003ca href=\"/posts/kbhlexicalization_hypothesis/#strong-id-09e74661-d561-4cec-af0f-84c82c0367a1-lexicalization-hypothesis\"\u003eStrong Lexicalization Hypothesis\u003c/a\u003e holds, \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es must exist.\u003c/p\u003e\n\u003cp\u003eAssume for the sake of contradiction no \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es exist.\u003c/p\u003e\n\u003cp\u003eBecause \u003ca href=\"/posts/kbhlexicalization_hypothesis/#strong-id-09e74661-d561-4cec-af0f-84c82c0367a1-lexicalization-hypothesis\"\u003eStrong Lexicalization Hypothesis\u003c/a\u003e holds, there does not exist syntactic transformations which can take original single words and transform them into newly lexicalized words to express a different meaning.\u003c/p\u003e\n\u003cp\u003eAt the same time, again because of the \u003ca href=\"/posts/kbhlexicalization_hypothesis/#strong-id-09e74661-d561-4cec-af0f-84c82c0367a1-lexicalization-hypothesis\"\u003eStrong Lexicalization Hypothesis\u003c/a\u003e, one must only leverage syntactic transformation on \u003ca href=\"\"\u003esyntatic constituents\u003c/a\u003e when forming ideas.\u003c/p\u003e\n\u003cp\u003eTherefore, given a word to lexicalize, it has to be defined by an syntatic transformation on a set of previously lexicalized words.\u003c/p\u003e\n\u003cp\u003e(by definition) there are no words lexicalizable from the empty set of words.\u003c/p\u003e\n\u003cp\u003eTherefore, there exists some word that needs to be lexicalized by words that are not previously defined, which is absurd. (instead, these words are lexicalized via semantic primes.)\u003c/p\u003e\n\u003cp\u003eQED\u003c/p\u003e\n\u003ch2 id=\"problems-with-semantic-prime--kbhsemantic-primes-dot-md--s\"\u003eproblems with \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ethe list has grown over time\u003c/li\u003e\n\u003cli\u003ethe problem of \u003cstrong\u003e\u003cstrong\u003e\u003ca href=\"#problems-with-semantic-prime--kbhsemantic-primes-dot-md--s\"\u003eallolexy\u003c/a\u003e\u003c/strong\u003e\u003c/strong\u003e: formal restrictions of a language resulting in the same concept needing to be radicalized multiple times (I vs. me)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"finding-semantic-prime--kbhsemantic-primes-dot-md--s\"\u003efinding \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es\u003c/h2\u003e\n\u003cp\u003eAccording to (\u003ca href=\"#citeproc_bib_item_1\"\u003eGeeraerts 2009\u003c/a\u003e), (\u003ca href=\"#citeproc_bib_item_2\"\u003eGoddard 2009\u003c/a\u003e) provides a \u0026ldquo;practical\u0026rdquo; (though flawed) way of establishing primes. Something to do with large-scale comparisons in \u0026ldquo;\u003ca href=\"/posts/kbhwhole_metalanguage_study/\"\u003ewhole metalanguage studies\u003c/a\u003e\u0026rdquo;, which requires pairwise language comparison\u003c/p\u003e\n\u003cp\u003eLocating primes are seen as an enforcement of \u003ca href=\"/posts/kbhnatural_semantic_metalanguage/\"\u003eNSM\u003c/a\u003e theories (\u003ca href=\"#citeproc_bib_item_3\"\u003eVanhatalo, Tissari, and Idström, n.d.\u003c/a\u003e). Recent prime locations: in Amharic (Amberber 2008), East Cree (Junker 2008), French (Peeters 1994), Japanese (Onishi 1994), Korean (Yoon 2008), Lao (Enfield 2002), Mandarin (Chappell 2002), Mangaaba-Mbula (Bugenhagen 2002), Malay (Goddard 2002), Polish (Wierzbicka 2002), Russian (Gladkova 2010, for the latest set, see the NSM home page), Spanish (Travis 2002), and Thai (Diller 1994).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsemantic_primes/","tags":null,"title":"semantic prime"},{"categories":null,"contents":"SVF is a standardized Discourse-Completion Task for verbal recall and fluency. It is administered by asking the participant to recall a bunch of words from within a category within 60 seconds.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsemantic_verbal_fluency/\"\u003eSVF\u003c/a\u003e is a standardized \u003ca href=\"/posts/kbhdiscourse_completion_task/\"\u003eDiscourse-Completion Task\u003c/a\u003e for verbal recall and fluency. It is administered by asking the participant to recall a bunch of words from within a category within 60 seconds.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsemantic_verbal_fluency/","tags":null,"title":"Semantic Verbal Fluency"},{"categories":null,"contents":"The semiconductor industry is a growing industry, the beginning of the semiconductor industry was actually in the silicon valley.\nWe are now taking a look at a reticle.\nalgorithms used in the semiconductor industry Per KLA \u0026mdash;\nClassification Random forest Boosted decision trees MLPs CNNs Reference generation GANs (WAT) VAEs Natural Grouping and Clustering auto-encoders manual feature extractors ","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhsemiconductor/\"\u003esemiconductor\u003c/a\u003e industry is a growing industry, the beginning of the \u003ca href=\"/posts/kbhsemiconductor/\"\u003esemiconductor\u003c/a\u003e industry was actually in the silicon valley.\u003c/p\u003e\n\u003cp\u003eWe are now taking a look at a \u003ca href=\"/posts/kbhreticle/\"\u003ereticle\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"algorithms-used-in-the-semiconductor-industry\"\u003ealgorithms used in the semiconductor industry\u003c/h2\u003e\n\u003cp\u003ePer \u003ca href=\"/posts/kbhkla/\"\u003eKLA\u003c/a\u003e \u0026mdash;\u003c/p\u003e\n\u003ch3 id=\"classification\"\u003eClassification\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eRandom forest\u003c/li\u003e\n\u003cli\u003eBoosted decision trees\u003c/li\u003e\n\u003cli\u003eMLPs\u003c/li\u003e\n\u003cli\u003eCNNs\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"reference-generation\"\u003eReference generation\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eGANs (WAT)\u003c/li\u003e\n\u003cli\u003eVAEs\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"natural-grouping-and-clustering\"\u003eNatural Grouping and Clustering\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eauto-encoders\u003c/li\u003e\n\u003cli\u003emanual feature extractors\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsemiconductor/","tags":null,"title":"semiconductor"},{"categories":null,"contents":"sense is the meaning of the word.\nsynonymy synonymy\u0026mdash;using synonyms as a proxy for word meaning.\nBut! There are probably no examples of perfect synonymy: two synonyms have slightly different sense (\u0026ldquo;my big sister\u0026rdquo; != \u0026ldquo;my large sister\u0026rdquo;)\nword relatedness not synonyms, but closeness in utility or semantic frame:\ncoffee + tea \u0026mdash; similar coffee + cup \u0026mdash; related, but not similar semantic field words that relate by covering a similar semantic domain:\ne.g.: hospital - surgeon, scapel, nurse\nantonyms antonyms can be binaries at opposite ends of a related semantic field\naffective meaning word meaning that relate to the affect, emotion, etc. of the speaker which doesn\u0026rsquo;t relate to literal meaning (replica vs. knockoff)\nvalence: pleasantness of stimulus arousal: intensity of the emotion provoked by stimulus dominance: degree of control exerted by the stimulus principle of contrast \u0026ldquo;a difference in form results in a difference in meaning\u0026rdquo;\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsense/\"\u003esense\u003c/a\u003e is the meaning of the word.\u003c/p\u003e\n\u003ch2 id=\"synonymy\"\u003esynonymy\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#synonymy\"\u003esynonymy\u003c/a\u003e\u0026mdash;using synonyms as a proxy for word meaning.\u003c/p\u003e\n\u003cp\u003eBut! There are probably no examples of perfect \u003ca href=\"#synonymy\"\u003esynonymy\u003c/a\u003e: two synonyms have slightly different \u003ca href=\"/posts/kbhsense/\"\u003esense\u003c/a\u003e (\u0026ldquo;my big sister\u0026rdquo; != \u0026ldquo;my large sister\u0026rdquo;)\u003c/p\u003e\n\u003ch2 id=\"word-relatedness\"\u003eword relatedness\u003c/h2\u003e\n\u003cp\u003enot synonyms, but closeness in utility or semantic frame:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ecoffee + tea \u0026mdash; similar\u003c/li\u003e\n\u003cli\u003ecoffee + cup \u0026mdash; related, but not similar\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"semantic-field\"\u003esemantic field\u003c/h3\u003e\n\u003cp\u003ewords that relate by covering a similar semantic domain:\u003c/p\u003e\n\u003cp\u003ee.g.: \u003cstrong\u003ehospital\u003c/strong\u003e - surgeon, scapel, nurse\u003c/p\u003e\n\u003ch2 id=\"antonyms\"\u003eantonyms\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#antonyms\"\u003eantonyms\u003c/a\u003e can be binaries at opposite ends of a related \u003ca href=\"#semantic-field\"\u003esemantic field\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"affective-meaning\"\u003eaffective meaning\u003c/h2\u003e\n\u003cp\u003eword meaning that relate to the affect, emotion, etc. of the speaker which doesn\u0026rsquo;t relate to literal meaning (replica vs. knockoff)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003evalence\u003c/strong\u003e: pleasantness of stimulus\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003earousal\u003c/strong\u003e: intensity of the emotion provoked by stimulus\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003edominance\u003c/strong\u003e: degree of control exerted by the stimulus\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"principle-of-contrast\"\u003eprinciple of contrast\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;a difference in form results in a difference in meaning\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsense/","tags":null,"title":"sense"},{"categories":null,"contents":"Common algorithm\ntokenization classier for whether a period token is a part of a word or the sentence boundary ","html":"\u003cp\u003eCommon algorithm\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtokenization/\"\u003etokenization\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eclassier for whether a period token is a part of a word or the sentence boundary\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsentence_segmentation/","tags":null,"title":"sentence segmentation"},{"categories":null,"contents":"\\begin{equation} \\dv{y}{t} = a(t)f(y) \\end{equation}\nare a class of functions are called seperable. We can solve them using the division method\ndivision method the division method involves solving autonomous ODEs by dividing and treating it normally:\n\\begin{equation} y\u0026rsquo; = 8y \\end{equation}\n\\begin{equation} \\frac{y\u0026rsquo;}{8} = y \\end{equation}\nwe now write something fishy:\n\\begin{equation} \\frac{\\dd{y}}{y} = 8 \\dd{t} \\end{equation}\nwe now take the antiderivative of this:\n\\begin{equation} \\int \\frac{1}{y} \\dd{y} = \\int 8 \\dd{t} \\end{equation}\nWe will get that:\n\\begin{equation} \\ln |y| = 8t + C \\end{equation}\nwe finally get:\n\\begin{equation} |y| = e^{C} e^{8t} \\end{equation}\ngetting rid of that absolute value:\n\\begin{align} y \u0026amp;= \\pm e^{C} e^{8t} \\\\ \u0026amp;= K e^{8t} \\end{align}\nplaces where this breaks down sometimes, \\(\\frac{1}{f(y)}\\) may not have a nice antiderivative sometimes, \\(G(y)\\), the antidepressant, may not be nicely invertible general solution to y\u0026rsquo;(t) = ry(t) generally, for \\(r \\in \\mathbb{R}\\), the solution to \\(y\u0026rsquo;(t) = ry(t)\\) is at \\(y(t)=y_0e^{rt}\\), where \\(y_0 = y(0)\\).\nfor autonomous ODEs for which \\(ry(t) = f(y)\\), we have that:\n\\begin{equation} \\dv{y}{x} = ry(x) \\end{equation}\nwhich means:\n\\begin{equation} \\frac{1}{y(x)} \\dd{y} = r\\dd{x} \\end{equation}\nand so:\n\\begin{equation} \\ln \\qty| y(x) | = rx +C \\end{equation}\nand hence:\n\\begin{equation} y(x) = K e^{rx} \\end{equation}\nplugging in \\(x=0\\), yields \\(y(0) = Ke^{0} = K\\).\n","html":"\u003cp\u003e\\begin{equation}\n\\dv{y}{t} = a(t)f(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eare a class of functions are called \u003ca href=\"/posts/kbhseperable_diffequ/\"\u003eseperable\u003c/a\u003e. We can solve them using the \u003ca href=\"#division-method\"\u003edivision method\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"division-method\"\u003edivision method\u003c/h2\u003e\n\u003cp\u003ethe \u003ca href=\"#division-method\"\u003edivision method\u003c/a\u003e involves solving \u003ca href=\"/posts/kbhautonomous_odes/\"\u003eautonomous ODEs\u003c/a\u003e by dividing and treating it normally:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = 8y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{y\u0026rsquo;}{8} = y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe now write something fishy:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\dd{y}}{y} = 8 \\dd{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe now take the antiderivative of this:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int \\frac{1}{y} \\dd{y} = \\int 8 \\dd{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will get that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ln |y| = 8t + C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe finally get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|y| = e^{C} e^{8t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egetting rid of that absolute value:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\ny \u0026amp;= \\pm e^{C} e^{8t} \\\\\n\u0026amp;= K e^{8t}\n\\end{align}\u003c/p\u003e\n\u003ch3 id=\"places-where-this-breaks-down\"\u003eplaces where this breaks down\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003esometimes, \\(\\frac{1}{f(y)}\\) may not have a nice antiderivative\u003c/li\u003e\n\u003cli\u003esometimes, \\(G(y)\\), the antidepressant, may not be nicely invertible\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"general-solution-to-y--t--ry--t\"\u003egeneral solution to y\u0026rsquo;(t) = ry(t)\u003c/h3\u003e\n\u003cp\u003egenerally, for \\(r \\in \\mathbb{R}\\), the solution to \\(y\u0026rsquo;(t) = ry(t)\\) is at \\(y(t)=y_0e^{rt}\\), where \\(y_0 = y(0)\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003efor \u003ca href=\"/posts/kbhautonomous_odes/\"\u003eautonomous ODEs\u003c/a\u003e for which \\(ry(t) = f(y)\\), we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{y}{x} = ry(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich means:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{1}{y(x)} \\dd{y} = r\\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ln \\qty| y(x) | = rx +C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand hence:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(x) = K e^{rx}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eplugging in \\(x=0\\), yields \\(y(0) = Ke^{0} = K\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhseperable_diffequ/","tags":null,"title":"seperable diffequ"},{"categories":null,"contents":"server-clients is a command in Emacs LISP which is used to get the clients of the current running emacsclient server\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhserver_clients/\"\u003eserver-clients\u003c/a\u003e is a command in Emacs LISP which is used to get the clients of the current running emacsclient server\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhserver_clients/","tags":null,"title":"server-clients"},{"categories":null,"contents":"A set is an unordered collection of objects, which maybe infinitely long. It is generated with \\(\\{, \\}\\). For instance, most numbers are sets.\nconstituents a collection of objects requirements repetition does not matter order does not matter additional information ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhset/\"\u003eset\u003c/a\u003e is an unordered collection of \u003ca href=\"/posts/kbhobjects/\"\u003eobject\u003c/a\u003es, which maybe infinitely long. It is generated with \\(\\{, \\}\\). For instance, most \u003ca href=\"/posts/kbhnumber/\"\u003enumber\u003c/a\u003es are sets.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ea collection of \u003ca href=\"/posts/kbhobjects/\"\u003eobject\u003c/a\u003es\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003erepetition does not matter\u003c/li\u003e\n\u003cli\u003eorder does not matter\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhset/","tags":null,"title":"set"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsets/","tags":null,"title":"sets"},{"categories":null,"contents":"DOI: 10.3389/fcomp.2021.624659\nOne-Liner Multi-feature late fusion of NLP results (by normalizing text and n-gram processing) with OpenSMILE embedding results.\nNovelty NLP transcript normalization (see methods) and OpenSMILE; otherwise similar to Martinc 2021. Same gist but different data-prep.\nNotable Methods N-gram processed the input features Used WordNet to replace words with roots Key Figs New Concepts OpenSMILE ","html":"\u003cp\u003eDOI: 10.3389/fcomp.2021.624659\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eMulti-feature \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e of NLP results (by normalizing text and n-gram processing) with \u003ca href=\"/posts/kbhopensmile/\"\u003eOpenSMILE\u003c/a\u003e embedding results.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003eNLP transcript normalization (see methods) and \u003ca href=\"/posts/kbhopensmile/\"\u003eOpenSMILE\u003c/a\u003e; otherwise similar to \u003ca href=\"/posts/kbhmartinc_2021/\"\u003eMartinc 2021\u003c/a\u003e. Same gist but different data-prep.\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eN-gram processed the input features\u003c/li\u003e\n\u003cli\u003eUsed WordNet to replace words with roots\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_22-28-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhopensmile/\"\u003eOpenSMILE\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhshah_2021/","tags":["ntj"],"title":"Shah 2021"},{"categories":null,"contents":"Short selling involves betting against the stock.\nProcess of Short Selling the trader borrows a number of shares from a third party the trader sells them immediately for cash when the security dips, the debt is repaid by repurchasing the same amount of shares of the borrowed security at the lower price traders nets the profit from the negative price differential If the person shorting\nshort squeeze \u0026ldquo;what happened to GameStock\u0026rdquo;\nA short squeeze is a situation in which a bunch of people try to drive the price of the up by buying enough shares such that the short sellers are forced to sell high\u0026mdash;driving up the price.\n","html":"\u003cp\u003eShort selling involves betting against the stock.\u003c/p\u003e\n\u003ch2 id=\"process-of-short-selling\"\u003eProcess of Short Selling\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ethe trader borrows a number of shares from a third party\u003c/li\u003e\n\u003cli\u003ethe trader sells them immediately for cash\u003c/li\u003e\n\u003cli\u003ewhen the security dips, the debt is repaid by repurchasing the same amount of shares of the borrowed security at the lower price\u003c/li\u003e\n\u003cli\u003etraders nets the profit from the negative price differential\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eIf the person \u003ca href=\"/posts/kbhshort_selling/\"\u003eshorting\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"short-squeeze\"\u003eshort squeeze\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;what happened to GameStock\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eA \u003ca href=\"#short-squeeze\"\u003eshort squeeze\u003c/a\u003e is a situation in which a bunch of people try to drive the \u003ca href=\"/posts/kbhprice/\"\u003eprice\u003c/a\u003e of the up by buying enough shares such that the \u003ca href=\"/posts/kbhshort_selling/\"\u003eshort sellers\u003c/a\u003e are forced to sell high\u0026mdash;driving up the price.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhshort_selling/","tags":null,"title":"short selling"},{"categories":null,"contents":"sigmoid function is used to squash your data between \\(0\\) and \\(1\\). Sigmoid is symmetric. It could take any number and squash it to look like a probability between 0 and 1.\n\\begin{equation} \\sigma(z) = \\frac{1}{1+ e^{-z}} \\end{equation}\nSay you have one discrete variable \\(X\\), and one continuous variable \\(Y\\), and you desire to express \\(p(x|y)\\).\nThe simplest way to do this, of course, is to say something like:\n\\begin{equation} P(x^{j} \\mid y) = \\begin{cases} P(x^{j} \\mid y) = 0, y \u0026lt; \\theta \\\\ P(x^{j} \\mid y) = 1, y \u0026gt; \\theta \\end{cases} \\end{equation}\nwhereby if \\(y\\) is above or below a value, \\(x^{j}|y\\) behaves differently. But we often don\u0026rsquo;t want a card cap.\nTo soften this, we can use a sigmoid model:\n\\begin{equation} P(x^{1} \\mid y) = \\frac{1}{1 + \\exp \\qty(-2 \\frac{y-\\theta_{1}}{\\theta_{2}})} \\end{equation}\nwhereby, \\(\\theta_{1}\\) is where the threshold of activation is, and \\(\\theta_{2}\\) is how soft you want the spread to be.\nThe derivative of this function is also dead simple:\n\\begin{equation} \\dv{\\sigma(z)}{z} = \\sigma(z) (1-\\sigma(z)) \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e function is used to squash your data between \\(0\\) and \\(1\\). Sigmoid is symmetric. It could take any number and squash it to look like a probability between 0 and 1.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sigma(z) = \\frac{1}{1+ e^{-z}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSay you have one discrete variable \\(X\\), and one continuous variable \\(Y\\), and you desire to express \\(p(x|y)\\).\u003c/p\u003e\n\u003cp\u003eThe simplest way to do this, of course, is to say something like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(x^{j} \\mid y) = \\begin{cases}\nP(x^{j} \\mid y) = 0, y \u0026lt; \\theta \\\\\nP(x^{j} \\mid y) = 1, y \u0026gt; \\theta\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby if \\(y\\) is above or below a value, \\(x^{j}|y\\) behaves differently. But we often don\u0026rsquo;t want a card cap.\u003c/p\u003e\n\u003cp\u003eTo soften this, we can use a \u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e model:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(x^{1} \\mid y) = \\frac{1}{1 + \\exp \\qty(-2 \\frac{y-\\theta_{1}}{\\theta_{2}})}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby, \\(\\theta_{1}\\) is where the threshold of activation is, and \\(\\theta_{2}\\) is how soft you want the spread to be.\u003c/p\u003e\n\u003cp\u003eThe derivative of this function is also dead simple:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{\\sigma(z)}{z} = \\sigma(z) (1-\\sigma(z))\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsigmoid/","tags":null,"title":"sigmoid"},{"categories":null,"contents":"Here is the most simple Differential Equation one could imagine:\n\\begin{equation} \\dv{x}{t} = f(t,x) \\end{equation}\nOr, perhaps, we have a second order differential equation which is the same thing but in the second degree:\n\\begin{equation} \\dv[2]{x}{t} = f\\qty(t,x,\\dv{x}{t}) \\end{equation}\nThen in which case, we have that the first most simple type of differential equation to be as follows:\n\\begin{equation} \\dv{x}{t} = x(t) \\end{equation}\nIf we can solve this, we can generalize this to most of other First-Order Differential Equations.\nwhere, the function \\(f(t,x)=x(t)\\).\n\\begin{align} \u0026amp; \\dv{x}{t} = x(t) \\\\ \\Rightarrow\\ \u0026amp; \\frac{1}{x(t)}\\dd{x} = \\dd{t} \\end{align}\nAt this point, you may ask yourself, why not construct it such that we have \\(\\dd{x} = x(t)\\dd{t}\\)? Well, its because our \\(x\\) is a variable in \\(t\\), so if we constructed it that way we\u0026rsquo;d have to integrate a function \\(\\dd{t}\\) with usub and the reverse chain rule, etc. etc. If we are instead integrating it on \\(\\dd{x}\\), it becomes much easier because our variable of interest no longer considers the \\(t\\).\nContinuing on, then:\n\\begin{align} \u0026amp;\\frac{1}{x(t)}\\dd{x} = \\dd{t} \\\\ \\Rightarrow\\ \u0026amp;\\int \\frac{1}{x(t)}\\dd{x} = \\int \\dd{t} \\\\ \\Rightarrow\\ \u0026amp; \\ln (x(t)) = t \\\\ \\Rightarrow\\ \u0026amp; x(t) = e^{t} \\end{align}\nAwesome. It should\u0026rsquo;t be hard also to see that, generally:\n\\begin{equation} x(t) = e^{ct} \\end{equation}\nis the solution to all equations \\(\\dv{x}{t} = cx\\).\nTurns out (not proven in the book), this holds for complex valued equations as well. So, we have some:\n\\begin{align} \u0026amp;x(t) = e^{it} \\\\ \\Rightarrow\\ \u0026amp; \\dv{x}{t} = ix \\end{align}\nOf course, from elementary calculus we also learned the fact that \\(e^{x}\\) can be represented as a power series; so check that out for now we connect it.\nThis equation leads us to solve:\n\\begin{equation} \\dv{x}{t} + ax = b(t) \\end{equation}\nIn order to do this, we neeed to find a replacement of the property that:\n\\begin{equation} \\dv t\\qty(e^{at}x) = e^{at}\\qty(\\dv{x}{t} +at) \\end{equation}\nA more general result of the above form is\n\\begin{equation} \\dv{x}{t} + a(t)x = b(t) \\end{equation}\nThis is fine, but now we need to leverage to chain rule to have \\(\\dv t a(t)\\) would be simply changing the above result to \\(a\u0026rsquo;(t)\\).\nBut anyways through this we will end up with the same solution we get from solving differential equations.\n","html":"\u003cp\u003eHere is the most simple \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equation\u003c/a\u003e one could imagine:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{x}{t} = f(t,x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOr, perhaps, we have a second order \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003edifferential equation\u003c/a\u003e which is the same thing but in the second degree:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv[2]{x}{t} = f\\qty(t,x,\\dv{x}{t})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThen in which case, we have that the first most simple type of \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003edifferential equation\u003c/a\u003e to be as follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{x}{t} = x(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf we can solve this, we can generalize this to most of other \u003ca href=\"/posts/kbhlinear_non_seperable_equation/#solving-differential-equations\"\u003eFirst-Order Differential Equations\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003ewhere, the function \\(f(t,x)=x(t)\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; \\dv{x}{t} = x(t) \\\\\n\\Rightarrow\\ \u0026amp; \\frac{1}{x(t)}\\dd{x} = \\dd{t}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAt this point, you may ask yourself, why not construct it such that we have \\(\\dd{x} = x(t)\\dd{t}\\)? Well, its because our \\(x\\) is a variable in \\(t\\), so if we constructed it that way we\u0026rsquo;d have to integrate a function \\(\\dd{t}\\) with usub and the reverse chain rule, etc. etc. If we are instead integrating it on \\(\\dd{x}\\), it becomes much easier because our variable of interest no longer considers the \\(t\\).\u003c/p\u003e\n\u003cp\u003eContinuing on, then:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\frac{1}{x(t)}\\dd{x} = \\dd{t} \\\\\n\\Rightarrow\\ \u0026amp;\\int \\frac{1}{x(t)}\\dd{x} = \\int \\dd{t} \\\\\n\\Rightarrow\\ \u0026amp; \\ln (x(t)) = t \\\\\n\\Rightarrow\\ \u0026amp; x(t) = e^{t}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAwesome. It should\u0026rsquo;t be hard also to see that, generally:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = e^{ct}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis the solution to all equations \\(\\dv{x}{t} = cx\\).\u003c/p\u003e\n\u003cp\u003eTurns out (not proven in the book), this holds for complex valued equations as well. So, we have some:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;x(t) = e^{it} \\\\\n\\Rightarrow\\ \u0026amp; \\dv{x}{t} = ix\n\\end{align}\u003c/p\u003e\n\u003cp\u003eOf course, from elementary calculus we also learned the fact that \\(e^{x}\\) can be represented as a \u003ca href=\"/posts/kbhpower_series/\"\u003epower series\u003c/a\u003e; so check that out for now we connect it.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eThis equation leads us to solve:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{x}{t} + ax = b(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn order to do this, we neeed to find a replacement of the property that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv t\\qty(e^{at}x) = e^{at}\\qty(\\dv{x}{t} +at)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eA more general result of the above form is\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{x}{t} + a(t)x = b(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is fine, but now we need to leverage to chain rule to have \\(\\dv t a(t)\\) would be simply changing the above result to \\(a\u0026rsquo;(t)\\).\u003c/p\u003e\n\u003cp\u003eBut anyways through this we will end up with the same solution we get from \u003ca href=\"/posts/kbhlinear_non_seperable_equation/#solving-differential-equations\"\u003esolving differential equations\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsimple_differential_equations/","tags":null,"title":"Simple Differential Equations"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsimple_game/","tags":null,"title":"simple game"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsingle_party_control/","tags":null,"title":"single party control"},{"categories":null,"contents":"Singular value decomposition is a factorization of a matrix, which is a generalization of the eigendecomposition of normal matricies (i.e. where \\(A = V^{-1} D V\\) when \\(A\\) is diagonalizable, i.e. by the spectral theorem possible when matricies are normal).\nDefinitions Singular value decomposition Every \\(m \\times n\\) matrix has a factorization of the form:\n\\begin{equation} M = U D^{\\frac{1}{2}} V^{*} \\end{equation}\nwhere, \\(U\\) is an unitary matrix, \\(D^{\\frac{1}{2}}\\) a diagonalish (i.e. rectangular diagonal) matrix with non-negative numbers on its diagonal called singular values, which are the positive square roots of eigenvalues of \\(M^{* }M\\) \u0026mdash; meaning the diagonal of \\(D^{\\frac{1}{2}}\\) is non-negative (\\(\\geq 0\\)). Finally, \\(V\\) is formed columns of orthonormal bases of eigenvectors of \\(M^{*}M\\).\nSVD is not technically unique, but we like to force a specific (convenient, see proof for why) ordering: where \\(D^{\\frac{1}{2}}\\) (and the corresponding values in \\(V^{*}\\)) is sorted such that the zero values are to the right.\nDoing It Doing SVD is not actually super duper hard, but it takes some thinking on why it works, which we shall do below.\nRecall that \\(V^{* }\\) is the conjugate transpose of the orthonormal eigenvectors of \\(M^{*} M\\). Then, we construct the square roots of the corresponding eigenvalues and arrange them into \\(D^{\\frac{1}{2}}\\).\nTangent:\nWhy is it we can take square roots of these values (i.e. the eigenvalues are guaranteed positive or zero?) Recall the definition of adjoint:\n\\begin{equation} \\langle Tv, w \\rangle = \\langle v, T^{*}w \\rangle \\end{equation}\nApplying it here, we have\n\\begin{equation} \\langle M^{*}M v, v \\rangle = \\langle M v, M v \\rangle \\end{equation}\nAnd recall that, by definition of inner product, \\(\\langle Mv, Mv \\rangle \\geq 0\\), and so \\(\\|Mv\\|^{2} \\geq 0\\) and so \\(\\|Mv\\| \\geq 0\\) so \\(\\| \\lambda v \\| \\geq 0\\).\nAnd so you can take the square roots of those singular values (i.e. square roots of eigenvalues of \\(M^{*}M\\)).\nHow do we get \\(U\\)? Well recall:\n\\begin{equation} M = U D^{\\frac{1}{2}} V^{*} \\end{equation}\nAnd \\(V\\) is an operator lined with orthornomal eigenbases so it is unitary and so \\(V = (V^{*})^{-1}\\).\nAnd therefore, we apply \\(V\\) on both sides:\n\\begin{equation} MV = UD^{\\frac{1}{2}} \\end{equation}\nAs \\(D\\) is diagonal, and we know the left side, we can then easily recover \\(U\\) by staring at it (and norming the vectors).\nMotivation and Proof Beginning Motivation We have a matrix \\(M\\) of shape \\(m \\times n\\), it sucks: it may not be normal, it may not even be an operator.\nSo consider now:\n\\begin{equation} M^{*} M \\end{equation}\nyou will note that this is now an operator (\\((n \\times m)(m \\times n) = n \\times n\\))!! Not only that, \\(M^{*}M\\) is self-adjoint (\\((M^{*}M)^{*} = M^{*}(M^{*})^{*} = M^{*}M\\)). Of course self-adjoint matricies are normal, which is nice, so spectral theorem applies here (even the real version because self-adjoint!)\nEigendecomposition of \\(M^{*}M\\) So, by the spectral theorem, there are a basis of orthonormal eigenvectors \\(v_1, \\dots v_{n}\\) of \\(M^{*}M\\) such that:\nGiven:\n\\begin{equation} V = \\mqty(v_1 \u0026amp; \\dots \u0026amp; v_{n}) \\end{equation}\nwe have\n\\begin{equation} M^{*}M = V D_0 V^{-1} \\end{equation}\ni.e. this is the eigendecomposition (\u0026ldquo;similar to diagonal\u0026rdquo;) result we had from before, where \\(D_0\\) is a Diagonal Matrix of eigenvalues on the diagonal.\nSwapping the direction of conjugation, to expose the diagonal matrix by itself, we have:\n\\begin{equation} D_0 = V^{-1} M^{*} M V \\end{equation}\nYou will NOTICE! The spectral theorem gives us that \\(v_1, \u0026hellip; v_{n}\\) is not only a basis of eigenvectors, but an ORTHONORMAL basis of eigenvectors. So \\(V\\) is an operator with orthogonal columns. And so, because of this result, we have that: \\(V^{*} = V^{-1}\\).\nSubstituting this in, we have:\n\\begin{equation} D_0 = V^{*} M^{*} M V \\end{equation}\nAside #1: zero-eigenvalue eigenvector ordering To make this better, we can order \\(v_1, \\dots v_{n}\\) such that eigenvectors vectors corresponding to \\(\\lambda = 0\\) comes last.\nAnd so we make a \\(V\\):\n\\begin{equation} V = \\mqty(v_1 \u0026amp;\\dots \u0026amp;v_{n-p} \u0026amp; v_{n-p+1} \u0026amp;\\dots \u0026amp;v_{n}) \\end{equation}\nSo we have two sub-matricies: an matrix \\(V_1\\) of shape \\((n, n-p)\\) which is filled by eigenvectors corresponding to eigenvalues not \\(=0\\), and the other matrix \\(V_2\\) of shape \\((n,p)\\) which is made of eigenvectors corresponding to zero eigenvalues.\nThat is:\n\\begin{equation} \\begin{cases} V_1 = \\mqty(v_1 \u0026amp; \\dots \u0026amp; v_{n-p}) \\\\ V_1 = \\mqty(v_{n-p+1} \u0026amp; \\dots \u0026amp; v_{n}) \\\\ \\end{cases} \\end{equation}\nand\n\\begin{equation} V = \\mqty(V_1 \u0026amp; V_2) \\end{equation}\nwhere, \\(v_1, \u0026hellip;, v_{n-p}\\) are orthonormal eigenvectors corresponding to non-zero eigenvalues, and \\(v_{n-p+1}, \u0026hellip;, v_{n}\\) are that corresponding to zero eigenvalue.\nFurthermore, this ordering of the eigenvectors can help us better clarify what \\(D_0\\) is:\n\\begin{equation} D_0 = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0) \\end{equation}\nWhere, \\(D\\) is a Diagonal Matrix with a strictly positive diagonal as the non-diagonals are zero by definition, the lower-right quadrant is \\(0\\) because the sub-part of \\(V_2\\) are eigenvectors corresponding to the zero eigenvalue.\nApplying \\(V_1, V_2\\) breakup from aside above Ok, recall where we were:\n\\begin{equation} D_0 = V^{*} M^{*} M V \\end{equation}\nApplying the substitutions from above:\n\\begin{equation} \\mqty(V_1^{*} \\\\ V_2^{*}) M^{*} M \\mqty(V_1\\ V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0) \\end{equation}\nNow, recall how matricies multiply:\n\\begin{align} \u0026amp;\\mqty(V_1^{*} \\\\ V_2^{*}) M^{*} M \\mqty(V_1\\ V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0)\\\\ \\Rightarrow\\ \u0026amp;\\mqty(V_1^{*} \\\\ V_2^{*}) \\mqty(M^{*} M V_1\\ M^{*} M V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0) \\\\ \\Rightarrow\\ \u0026amp; \\mqty(V_1^{*} M^{*} M V_1 \u0026amp; V_1^{*} M^{*} M V_2 \\\\ V_2^{*}M^{*} M V_1 \u0026amp; V_2^{*} M^{*} M V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0) \\end{align}\nAside #2: \\(A^{*} A = 0 \\implies A=0\\) Take the construction:\n\\begin{equation} A^{*} A = 0 \\end{equation}\nwe desire that \\(A = 0\\).\nRecall the definition of \\(A^{*}\\):\n\\begin{equation} \\langle Av, w \\rangle = \\langle v, A^{*}w \\rangle \\end{equation}\nfor all \\(v,w\\).\nNow, consider:\n\\begin{equation} \\langle A^{*} Av, w \\rangle = \\langle A^{*} (Av), w \\rangle = \\langle Av, (A^{*})^{*}w \\rangle = \\langle Av, Aw \\rangle \\end{equation}\nApplying the above, finally, consider:\n\\begin{equation} \\|Av\\|^{2} = \\langle Av, Av \\rangle = \\langle A^{*}A v, v \\rangle \\end{equation}\nRecall that \\(A^{*}A = 0\\), so:\n\\begin{equation} \\|Av\\|^{2} = \\langle A^{*}A v, v \\rangle = \\langle 0v,v \\rangle = 0 \\end{equation}\nSo, the norm of \\(Av = 0\\) for all \\(v \\in V\\), which means \\(A\\) produces only \\(0\\) vectors, which means \\(A=0\\), as desired.\nBreaking \\(V_{j}^{*} M^{*}M V_{j}\\) up Recall where we ended up at:\n\\begin{equation} \\mqty(V_1^{*} M^{*} M V_1 \u0026amp; V_1^{*} M^{*} M V_2 \\\\ V_2^{*}M^{*} M V_1 \u0026amp; V_2^{*} M^{*} M V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0) \\end{equation}\nConsider its diagonals:\n\\begin{equation} \\begin{cases} V_1^{*} M^{*} M V_1 = D \\\\ V_2^{*} M^{*} M V_2 = 0 \\end{cases} \\end{equation}\nNow, for the second expression, we have: \\(V_2^{*}M^{*}MV_{2} = (M V_2)^{*} (M V_2) = 0\\). So, from the result above (that \\(A^{*}A = 0 \\implies A=0\\)), we have that \\(MV_{2} = 0\\).\nAside #3: \\(V_1 V_1^{*} + V_2V_2^{*} = I\\) Consider:\n\\begin{equation} V_1 V_1^{*} \\end{equation}\nThe matrix \\(V_1\\) has shape \\((n, n-p)\\), and this makes \\(V_1^{* }\\) have shape \\((n-p, n)\\). You will, therefore, note that \\(V_{1}^{* }\\) is a map from a vector space of dimension \\(n\\) to that in a dimension \\(n-p\\). This map, then, is not injective when \\(p\\neq 0\\). Therefore, the overall operator \\(V_1 V_1^{* }\\) is also not going to be injective because non-zero is going to be sent by \\(V_1^{* }\\) to \\(0\\), then sent still by \\(V_1\\) to \\(0\\). This also means that \\(V_1 V_1^{*}\\) is not invertable.\nYet, we are trying to show \\(V_1 V_1^{*} + V_2 V_2^{*} = I\\), which is the sum of these two noninvertible map, is \\(I\\): the grandaddy of all invertible maps. What gives?\nRecall that:\n\\begin{equation} \\begin{cases} \\mqty(V_1 \u0026amp; V_2) = V \\\\ V V^{*} = I \\end{cases} \\end{equation}\nThe first result is by definition, the second because \\(V\\) is an orthonormal operator so it is unitary.\nLet us begin with:\n\\begin{align} I \u0026amp;= V V^{*} \\\\ \u0026amp;= \\mqty(V_1 \u0026amp; V_2) \\mqty(V_1 \u0026amp; V_2)^{*} \\\\ \u0026amp;= \\mqty(V_1 \u0026amp; V_2) \\mqty(V_1^{*} \\\\ V_2^{*}) \\\\ \u0026amp;= V_1V_1^{*} + V_2 V_2^{*} \\end{align}\nAnd the last equation simply comes from how matrices multiply: row by column. And so, weirdly, we can confirm that adding non-full rank matricies and end up to be the identity. So, again:\n\\begin{equation} V_1 V_1^{*} + V_2V_2^{*} = I \\end{equation}\nConstructing \\(U_1\\) With the result above, we are finally close to doing what we want to do. Recall our last set of conclusions:\none, that:\n\\begin{equation} \\begin{cases} V_1^{*} M^{*} M V_1 = D \\\\ V_2^{*} M^{*} M V_2 = 0 \\end{cases} \\end{equation}\nand specifically, that \\(MV_{2} = 0\\).\nand two, that:\n\\begin{align} \u0026amp;V_1 V_1^{* } + V_2V_2^{* } = I \\\\ \\Rightarrow\\ \u0026amp; V_1 V_1^{* } = I - V_2V_2^{* } \\end{align}\nLet\u0026rsquo;s now turn our attention to \\(D\\) above. It has all non-zero diagonals, because we cropped out the zero already (see above during the definition of \\(D\\) vis a vi \\(D_0\\)). This means it is invertible because operator is only invertible if diagonal of its upper-triangular matrix is nonzero. For a diagonal matrix, this is particularly easy; let us construct:\n\\begin{equation} D = D^{\\frac{1}{2}} D^{\\frac{1}{2}} \\end{equation}\nwhere, \\(D^{\\frac{1}{2}}\\) is just the same diagonal matrix as \\(D\\) except we take the square root of everything in the diagonal. The above could be shown then to be true by calculation (\\(\\sqrt{a}\\sqrt{a} = a\\) on every element in the diagonal).\nLet us also make:\n\\begin{equation} I = D^{-\\frac{1}{2}} D^{\\frac{1}{2}} \\end{equation}\nwhere, \\(D^{-\\frac{1}{2}}\\) is \\(\\frac{1}{\\sqrt{a}}\\) for event element \\(a\\) in the diagonal. Again, the above could be shown to be true by calculation by \\(\\sqrt{a} \\frac{1}{\\sqrt{a}} = 1\\).\nGiven the diagonal of \\(D\\) contains the eigenvalues of \\(M^{*}M\\), by calculation \\(D^{\\frac{1}{2}}\\) contains the square roots of these eigenvalues, which means that it should contain on its diagonal the singular values of \\(M\\), which is rather nice (because we have corollaries below that show concordance between singular values of \\(M\\) and its eigenvalues, see below).\nConsider, finally, the matrix \\(M\\):\n\\begin{align} M \u0026amp;= M - 0 \\\\ \u0026amp;= M - 0 V_2^{* } \\\\ \u0026amp;= M - (M V_2) V_2^{* } \\\\ \u0026amp;= M (I - V_2 V_2^{* }) \\\\ \u0026amp;= M V_1V_1^{*} \\\\ \u0026amp;= M V_1 I V_1^{*} \\\\ \u0026amp;= M V_1 (D^{-\\frac{1}{2}} D^{\\frac{1}{2}}) V_1^{*} \\\\ \u0026amp;= (M V_1 D^{-\\frac{1}{2}}) D^{\\frac{1}{2}} V_1^{*} \\end{align}\nWe now define a matrix \\(U_1\\):\n\\begin{equation} U_1 = M V_1 D^{-\\frac{1}{2}} \\end{equation}\nWe now have:\n\\begin{equation} M = U_1 D^{\\frac{1}{2}} V_1^{*} \\end{equation}\nWere \\(U_1\\) is a matrix of shape \\((m \\times n)(n \\times n-p)(n-p \\times n-p) = m \\times n-p\\), \\(D^{\\frac{1}{2}}\\) is a diagonal matrix of shape \\(n-p \\times n-p\\) with singular values on the diagonal, and \\(V_1^{*}\\) is a matrix with orthonormal rows of shape \\(n-p \\times n\\).\nThis is a compact svd. We are sandwitching a diagonal matrix of singular values between two rectangular matricies to recover \\(M\\). Ideally, we want the left and right matricies too to have nice properties (like, say, be an operator or have unitarity). So we work harder.\nAside #4: \\(U_1\\) is orthonormal We can\u0026rsquo;t actually claim \\(U_1\\) is unitary because its not an operator. However, we like to show its columns are orthonormal so far so we can extend it into a fully, actually, unitary matrix.\nOne sign that a matrix is orthonormal is if \\(T^{*}T = I\\). Because of the way that matricies multiply, this holds IFF each column yields a \\(1\\) when its own conjugate transpose is applied, and \\(0\\) otherwise. This is also the definition of orthonormality.\nTherefore, we desire \\(U_{1}^{*} U_1 = I\\). We hence consider:\n\\begin{equation} U_1^{*} U_1 \\end{equation}\nWe have by substitution of \\(U_1 = M V_1 D^{-\\frac{1}{2}}\\):\n\\begin{equation} (M V_1 D^{-\\frac{1}{2}})^{*}(M V_1 D^{-\\frac{1}{2}}) \\end{equation}\nGiven the property that \\((AB)^{*} = B^{*}A^{*}\\), we have that:\n\\begin{equation} (M V_1 D^{-\\frac{1}{2}})^{*}(M V_1 D^{-\\frac{1}{2}}) = {D^{-\\frac{1}{2}}}^{*} V_1^{*} M^{*}M V_1 D^{-\\frac{1}{2}} \\end{equation}\nRecall now that, from way before, we have:\n\\begin{equation} V_1^{*} M^{*} M V_1 = D \\end{equation}\nSubstituting that in:\n\\begin{align} {D^{-\\frac{1}{2}}}^{*} V_1^{*} M^{*}M V_1 D^{-\\frac{1}{2}} \u0026amp;= {D^{-\\frac{1}{2}}}^{*} (V_1^{*} M^{*}M V_1) D^{-\\frac{1}{2}} \\\\ \u0026amp;= {D^{-\\frac{1}{2}}}^{*} D D^{-\\frac{1}{2}} \\end{align}\nRecall now that the multiplication of diagonal matricies are commutative (by calculation), and that diagonal real matricies are self-adjoint (try conjugate-transposing a real diagonal matrix). We know that \\(D^{-\\frac{1}{2}}\\) is real (because its filled with the square roots of the eigenvalues of \\(M^{*}M\\), which is self-adjoint, and eigenvalues of self-adjoint matricies are real) and is by definition diagonal. So we have that \\(D^{-\\frac{1}{2}}\\) is self-adjoint.\nTaking those facts in mind, we can now rewrite this expression:\n\\begin{align} {D^{-\\frac{1}{2}}}^{*} V_1^{*} M^{*}M V_1 D^{-\\frac{1}{2}} \u0026amp;= {D^{-\\frac{1}{2}}}^{*} D D^{-\\frac{1}{2}} \\\\ \u0026amp;= D^{-\\frac{1}{2}} D D^{-\\frac{1}{2}} \\\\ \u0026amp;= D^{-\\frac{1}{2}}D^{-\\frac{1}{2}} D \\\\ \u0026amp;= D^{-1} D \\\\ \u0026amp;= I \\end{align}\nTherefore, \\(U_1^{*} U_1 = I\\) as desired, so the columns of \\(U_1\\) is orthonormal.\nSVD, fully Recall that, so far, we have:\n\\begin{equation} M = U_1 D^{\\frac{1}{2}} V_1^{*} \\end{equation}\nwhere\n\\begin{equation} U_1 = M V_1 D^{-\\frac{1}{2}} \\end{equation}\nSo far, \\(U_1\\) and \\(V_1^{*}\\) are both disappointingly not operators. However, we know that \\(U_1\\) and \\(V_1\\) are both orthonormal (the former per aside #4 above, the latter by the spectral theorem and construction above). So wouldn\u0026rsquo;t it be doubleplusgood for both of them to be unitary operators?\nTo make this happen, we need to change the shapes of things a little without changing how the matricies behave. That is: we want \\(U\\) and \\(V^{* }\\) to both be operators, and yet still have \\(U D^{\\frac{1}{2}} V^{*} = M\\).\nPadding out \\(D\\) and \\(V\\) There are immediate and direct ways of padding out \\(D^{\\frac{1}{2}}\\) and \\(V_{1}^{*}\\): let us replace \\(V_1 \\implies V\\), and just shove enough zeros into \\(D\\) such that the dimensions work out (changing it from shape \\(n-p \\times n-p\\) to \\(m \\times n\\), but do this by just adding enough zeros on the edges until it works).\nSo first, \\(D^{\\frac{1}{2}}\\) becomes:\n\\begin{equation} D^{\\frac{1}{2}}_{new} = \\mqty(D^{\\frac{1}{2}}_{orig} \u0026amp; 0 \u0026amp;\\dots \\\\ 0 \u0026amp; 0 \u0026amp;\\dots \\\\ 0\u0026amp;0\u0026amp;\\dots) \\end{equation}\n(the number of zeros on the edge vary based on the proportions of \\(n, p, m\\)).\nWhy would this always be padding? i.e. why is \\(n-p \\leq m\\)? Here\u0026rsquo;s a hand-wavy proof that the reader can choose to fill in the gaps of: consider the fact that \\(M\\)\u0026rsquo;s shape is \\(m \\times n\\). Specifically, this means that \\(M: V \\to W\\) where \\(\\dim V = n\\) and \\(\\dim W = m\\). Say for the sake of argument \\(n\u0026gt; m\\) (otherwise naturally \\(n-p \\leq m\\) because \\(n\\leq m\\)). Consider \\(null\\ M\\); given it is a map from a larger space to a smaller space, there\u0026rsquo;s going to be a non-trivial null space. This non-trivial null space is going to be as large or larger than \\(m-n\\); and the null space of \\(M^{*}M\\) will be at least as large as \\(m-n\\) as well because everything is sent through \\(M\\) first. And then applying rank nullity can show that \\(m \\geq \\dim\\ range\\ M^{ *}M\\). Therefore, the number of non-zero eigenvalues of \\(M^{ *}M\\), which also corresponds to the number of non-zero columns of \\(D\\), which also is \\(n-p\\), must be smaller than or equal to \\(m\\) because otherwise the diagonal representation would have too many linearly independent columns (i.e. more lin. indp. columns that the rank which is impossible).\nNow, we have\n\\begin{equation} V = \\mqty(V_1 \u0026amp; V_2) \\end{equation}\nwhere \\(V_1\\) is a matrix whose columns are the non-zero eigenvalue correlated eigenvectors, and the columns of \\(V_1\\) the zero-eigenvalue related ones.\nNote, now that:\n\\(D^{\\frac{1}{2}}_{new} V^{* }\\) is an \\(m \\times n\\) matrix that behaves almost exactly like \\(D^{\\frac{1}{2}}_{orig} V_1^{*}\\), a \\(n-p \\times n\\) matrix. The last \\(m-(n-p)\\) (as we established before, \\(m \\geq n-p\\)) dimensions of the new, padded matrix\u0026rsquo;s output will simply be \\(0\\): because recall that \\(DT\\) for some diagonal matrix \\(D\\) scales the rows of \\(T\\): and the first \\(n-p\\) rows (corresponding to the columns of \\(V_1\\), because recall we are applying \\(V\\) not \\(V^{ *}\\) to \\(D\\)) will be scaled normally, and the last \\(m-(n-p)\\) rows will be scaled by \\(0\\) as they are a part of the padded zero-diagonal.\nPadding out \\(U\\) With \\(D\\) and \\(V\\) padded, its time to deal with \\(U\\). Fortunately, recall that the last bit of the output of \\(DV\\) will just be \\(0\\): so whatever we stick in terms of columns of \\(V\\) for those slots will never actually be added to the final output. In a sense, they don\u0026rsquo;t really matter.\nThe first \\(n-p\\) of \\(U\\) (i.e. \\(U_{1}\\)) we already have a well-defined answer: recall from before \\(U_1 = M D^{-\\frac{1}{2}} V_{1}^{*}\\). So the last bit we can just stick on literally whatever to make it work.\nAnd by \u0026ldquo;making it work\u0026rdquo;, we literally just mean extending the columns of \\(U_1\\) until you have \\(m\\) linearly-independent of them, then Gram-Schmidtting to make it all orthonormal. The first \\(n-p\\) columns will not be affected by Gram-Schmidtting, as we have established before \\(U_1\\) is orthonormal.\nAgain, these are basically arbitrary: no one cares because these columns will always be scaled by \\(0\\) as they are part of the \u0026ldquo;padding columns\u0026rdquo; from padding \\(D^{\\frac{1}{2}}\\) out.\nand so, finally Finally, we now have:\n\\begin{equation} M = U D^{\\frac{1}{2}} V^{*} \\end{equation}\nwhere, \\(U\\) is an \\(m \\times m\\) unitary operator, \\(D^{\\frac{1}{2}}\\) is an \\(m \\times n\\) semidiagonal matrix (diagonal \\(n-p \\times n-p\\) part, then \\(0\\) padding all around) filled with singular values of \\(M\\) on the diagonal, and \\(V^{*}\\) is an \\(n \\times n\\) unitary operator filled with orthonormal rows of right singular-vectors (i.e. eigenvectors of \\(M^{ *}M\\)).\nUseful corollaries If \\(\\lambda\\) is an non-negative real eigenvalue of \\(M\\), then \\(\\lambda\\) is sometimes a singular value of \\(M\\) Consider the matrix:\n\\begin{equation} \\mqty(1 \u0026amp; 1 \\\\0 \u0026amp; 0) \\end{equation}\nsingular values: \\(\\sqrt{2},0\\) eigenvalues: \\(1,0\\) However, the statement is the case if \\(M\\) is already diagonalizable, then in which case you can imagine constructing \\(M^{* }M\\) to be vis a vi the eigenbasis of \\(M\\), which means that the resulting diagonal representation of \\(M^{*}M\\) would just be the eigenvalues of \\(M\\) squared as you are multiplying a diagonal matrix by itself.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsingular_value_decomposition/\"\u003eSingular value decomposition\u003c/a\u003e is a factorization of a \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e, which is a generalization of the \u003ca href=\"/posts/kbhnus_math530_similar_to_diagonal/\"\u003eeigendecomposition\u003c/a\u003e of \u003ca href=\"/posts/kbhaxler_7_a/#normal\"\u003enormal\u003c/a\u003e \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e (i.e. where \\(A = V^{-1} D V\\) when \\(A\\) is \u003ca href=\"/posts/kbhdiagonal_matrix/#properties-of-diagonal-matrices\"\u003ediagonalizable\u003c/a\u003e, i.e. by the \u003ca href=\"/posts/kbhaxler_7_a/#complex-spectral-theorem\"\u003espectral theorem\u003c/a\u003e possible when matricies are \u003ca href=\"/posts/kbhaxler_7_a/#normal\"\u003enormal\u003c/a\u003e).\u003c/p\u003e\n\u003ch2 id=\"definitions\"\u003eDefinitions\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eSingular value decomposition\u003c/strong\u003e Every \\(m \\times n\\) matrix has a factorization of the form:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nM = U D^{\\frac{1}{2}} V^{*}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(U\\) is an \u003ca href=\"/posts/kbhaxler_7_a/#unitary\"\u003eunitary\u003c/a\u003e matrix, \\(D^{\\frac{1}{2}}\\) a \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003ediagonal\u003c/a\u003eish (i.e. rectangular diagonal) matrix with non-negative numbers on its diagonal called \u003cstrong\u003esingular values\u003c/strong\u003e, which are the positive square roots of eigenvalues of \\(M^{* }M\\) \u0026mdash; meaning the diagonal of \\(D^{\\frac{1}{2}}\\) is non-negative (\\(\\geq 0\\)). Finally, \\(V\\) is formed columns of orthonormal bases of eigenvectors of \\(M^{*}M\\).\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhsingular_value_decomposition/\"\u003eSVD\u003c/a\u003e is not technically unique, but we like to force a specific (convenient, see proof for why) ordering: where \\(D^{\\frac{1}{2}}\\) (and the corresponding values in \\(V^{*}\\)) is sorted such that the zero values are to the right.\u003c/p\u003e\n\u003ch2 id=\"doing-it\"\u003eDoing It\u003c/h2\u003e\n\u003cp\u003eDoing SVD is not actually super duper hard, but it takes some thinking on why it works, which we shall do below.\u003c/p\u003e\n\u003cp\u003eRecall that \\(V^{* }\\) is the conjugate transpose of the orthonormal eigenvectors of \\(M^{*} M\\). Then, we construct the square roots of the corresponding eigenvalues and arrange them into \\(D^{\\frac{1}{2}}\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e\u003cstrong\u003eTangent\u003c/strong\u003e:\u003c/p\u003e\n\u003cp\u003eWhy is it we can take square roots of these values (i.e. the eigenvalues are guaranteed positive or zero?) Recall the definition of adjoint:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle Tv, w \\rangle = \\langle v, T^{*}w \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eApplying it here, we have\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle M^{*}M v, v \\rangle = \\langle M v, M v \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd recall that, by definition of inner product, \\(\\langle Mv, Mv \\rangle \\geq 0\\), and so \\(\\|Mv\\|^{2} \\geq 0\\) and so \\(\\|Mv\\| \\geq 0\\) so \\(\\| \\lambda v \\| \\geq 0\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAnd so you can take the square roots of those singular values (i.e. square roots of eigenvalues of \\(M^{*}M\\)).\u003c/p\u003e\n\u003cp\u003eHow do we get \\(U\\)? Well recall:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nM = U D^{\\frac{1}{2}} V^{*}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd \\(V\\) is an operator lined with orthornomal eigenbases so it is unitary and so \\(V = (V^{*})^{-1}\\).\u003c/p\u003e\n\u003cp\u003eAnd therefore, we apply \\(V\\) on both sides:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nMV = UD^{\\frac{1}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAs \\(D\\) is diagonal, and we know the left side, we can then easily recover \\(U\\) by staring at it (and norming the vectors).\u003c/p\u003e\n\u003ch2 id=\"motivation-and-proof\"\u003eMotivation and Proof\u003c/h2\u003e\n\u003ch3 id=\"beginning-motivation\"\u003eBeginning Motivation\u003c/h3\u003e\n\u003cp\u003eWe have a matrix \\(M\\) of shape \\(m \\times n\\), it sucks: it may not be \u003ca href=\"/posts/kbhaxler_7_a/#normal\"\u003enormal\u003c/a\u003e, it may not even be an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSo consider now:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nM^{*} M\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou will note that this is now an \u003cem\u003eoperator (\\((n \\times m)(m \\times n) = n \\times n\\))!!\u003c/em\u003e Not only that, \\(M^{*}M\\) is \u003ca href=\"/posts/kbhaxler_7_a/#self-adjoint\"\u003eself-adjoint\u003c/a\u003e (\\((M^{*}M)^{*} = M^{*}(M^{*})^{*} = M^{*}M\\)). Of course \u003ca href=\"/posts/kbhaxler_7_a/#self-adjoint\"\u003eself-adjoint\u003c/a\u003e matricies are \u003ca href=\"/posts/kbhaxler_7_a/#normal\"\u003enormal\u003c/a\u003e, which is nice, so \u003ca href=\"/posts/kbhaxler_7_a/#complex-spectral-theorem\"\u003espectral theorem\u003c/a\u003e applies here (even the real version because \u003ca href=\"/posts/kbhaxler_7_a/#self-adjoint\"\u003eself-adjoint\u003c/a\u003e!)\u003c/p\u003e\n\u003ch3 id=\"eigendecomposition--kbhnus-math530-similar-to-diagonal-dot-md--of-m-m\"\u003e\u003ca href=\"/posts/kbhnus_math530_similar_to_diagonal/\"\u003eEigendecomposition\u003c/a\u003e of \\(M^{*}M\\)\u003c/h3\u003e\n\u003cp\u003eSo, by the \u003ca href=\"/posts/kbhaxler_7_a/#complex-spectral-theorem\"\u003espectral theorem\u003c/a\u003e, there are a basis of orthonormal \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es \\(v_1, \\dots v_{n}\\) of \\(M^{*}M\\) such that:\u003c/p\u003e\n\u003cp\u003eGiven:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV = \\mqty(v_1 \u0026amp; \\dots \u0026amp; v_{n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe have\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nM^{*}M = V D_0 V^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ei.e. this is the \u003ca href=\"/posts/kbhnus_math530_similar_to_diagonal/\"\u003eeigendecomposition\u003c/a\u003e (\u0026ldquo;similar to diagonal\u0026rdquo;) result we had from before, where \\(D_0\\) is a \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003eDiagonal Matrix\u003c/a\u003e of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es on the diagonal.\u003c/p\u003e\n\u003cp\u003eSwapping the direction of conjugation, to expose the diagonal matrix by itself, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nD_0 = V^{-1} M^{*} M V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will NOTICE! The \u003ca href=\"/posts/kbhaxler_7_a/#complex-spectral-theorem\"\u003espectral theorem\u003c/a\u003e gives us that \\(v_1, \u0026hellip; v_{n}\\) is not only a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es, but an \u003cstrong\u003eORTHONORMAL\u003c/strong\u003e basis of eigenvectors. So \\(V\\) is an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e with \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e columns. And so, because of \u003ca href=\"/posts/kbhnus_math530_matrix_adjectives/#an-unitary-operator-is-invertible-and-the-inverse-of-its-matrix-representation-is-its-transpose\"\u003ethis result\u003c/a\u003e, we have that: \\(V^{*} = V^{-1}\\).\u003c/p\u003e\n\u003cp\u003eSubstituting this in, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nD_0 = V^{*} M^{*} M V\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"aside-1-zero-eigenvalue-eigenvector-ordering\"\u003eAside #1: zero-eigenvalue eigenvector ordering\u003c/h3\u003e\n\u003cp\u003eTo make this better, we can order \\(v_1, \\dots v_{n}\\) such that eigenvectors vectors corresponding to \\(\\lambda = 0\\) comes last.\u003c/p\u003e\n\u003cp\u003eAnd so we make a \\(V\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV = \\mqty(v_1 \u0026amp;\\dots \u0026amp;v_{n-p} \u0026amp; v_{n-p+1} \u0026amp;\\dots \u0026amp;v_{n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo we have two sub-matricies: an matrix \\(V_1\\) of shape \\((n, n-p)\\) which is filled by \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es corresponding to \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es not \\(=0\\), and the other matrix \\(V_2\\) of shape \\((n,p)\\) which is made of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es corresponding to zero \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eThat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nV_1 = \\mqty(v_1 \u0026amp; \\dots \u0026amp; v_{n-p}) \\\\\nV_1 = \\mqty(v_{n-p+1} \u0026amp; \\dots \u0026amp; v_{n}) \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV = \\mqty(V_1 \u0026amp; V_2)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(v_1, \u0026hellip;, v_{n-p}\\) are orthonormal \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es corresponding to non-zero \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es, and \\(v_{n-p+1}, \u0026hellip;, v_{n}\\) are that corresponding to \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eFurthermore, this ordering of the eigenvectors can help us better clarify what \\(D_0\\) is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nD_0 = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhere, \\(D\\) is a \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003eDiagonal Matrix\u003c/a\u003e with a strictly positive \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003ediagonal\u003c/a\u003e as the non-diagonals are zero by definition, the lower-right quadrant is \\(0\\) because the sub-part of \\(V_2\\) are eigenvectors corresponding to the zero eigenvalue.\u003c/p\u003e\n\u003ch3 id=\"applying-v-1-v-2-breakup-from-aside-above\"\u003eApplying \\(V_1, V_2\\) breakup from aside above\u003c/h3\u003e\n\u003cp\u003eOk, recall where we were:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nD_0 = V^{*} M^{*} M V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eApplying the substitutions from above:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(V_1^{*} \\\\ V_2^{*}) M^{*} M \\mqty(V_1\\ V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, recall how matricies multiply:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\mqty(V_1^{*} \\\\ V_2^{*}) M^{*} M \\mqty(V_1\\ V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0)\\\\\n\\Rightarrow\\ \u0026amp;\\mqty(V_1^{*} \\\\ V_2^{*}) \\mqty(M^{*} M V_1\\ M^{*} M V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0) \\\\\n\\Rightarrow\\ \u0026amp; \\mqty(V_1^{*} M^{*} M V_1 \u0026amp; V_1^{*} M^{*} M V_2 \\\\ V_2^{*}M^{*} M V_1 \u0026amp; V_2^{*} M^{*} M V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0)\n\\end{align}\u003c/p\u003e\n\u003ch3 id=\"aside-2-a-a-0-implies-a-0\"\u003eAside #2: \\(A^{*} A = 0 \\implies A=0\\)\u003c/h3\u003e\n\u003cp\u003eTake the construction:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA^{*} A = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe desire that \\(A = 0\\).\u003c/p\u003e\n\u003cp\u003eRecall the definition of \\(A^{*}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle Av, w \\rangle = \\langle v, A^{*}w \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor all \\(v,w\\).\u003c/p\u003e\n\u003cp\u003eNow, consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle A^{*} Av, w \\rangle = \\langle A^{*} (Av), w \\rangle = \\langle Av, (A^{*})^{*}w \\rangle = \\langle Av, Aw \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eApplying the above, finally, consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\|Av\\|^{2} = \\langle Av, Av \\rangle = \\langle A^{*}A v, v \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that \\(A^{*}A = 0\\), so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\|Av\\|^{2} = \\langle A^{*}A v, v \\rangle = \\langle 0v,v \\rangle = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, the norm of \\(Av = 0\\) for all \\(v \\in V\\), which means \\(A\\) produces only \\(0\\) vectors, which means \\(A=0\\), as desired.\u003c/p\u003e\n\u003ch3 id=\"breaking-v-j-m-m-v-j-up\"\u003eBreaking \\(V_{j}^{*} M^{*}M V_{j}\\) up\u003c/h3\u003e\n\u003cp\u003eRecall where we ended up at:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(V_1^{*} M^{*} M V_1 \u0026amp; V_1^{*} M^{*} M V_2 \\\\ V_2^{*}M^{*} M V_1 \u0026amp; V_2^{*} M^{*} M V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eConsider its diagonals:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nV_1^{*} M^{*} M V_1 = D \\\\\nV_2^{*} M^{*} M V_2 = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, for the second expression, we have: \\(V_2^{*}M^{*}MV_{2} = (M V_2)^{*} (M V_2) = 0\\). So, from the result above (that \\(A^{*}A = 0 \\implies A=0\\)), we have that \\(MV_{2} = 0\\).\u003c/p\u003e\n\u003ch3 id=\"aside-3-v-1-v-1-plus-v-2v-2-i\"\u003eAside #3: \\(V_1 V_1^{*} + V_2V_2^{*} = I\\)\u003c/h3\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV_1 V_1^{*}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe matrix \\(V_1\\) has shape \\((n, n-p)\\), and this makes \\(V_1^{* }\\) have shape \\((n-p, n)\\). You will, therefore, note that \\(V_{1}^{* }\\) is a map from a vector space of dimension \\(n\\) to that in a dimension \\(n-p\\). This map, then, is not \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e when \\(p\\neq 0\\). Therefore, the overall operator \\(V_1 V_1^{* }\\) is also not going to be \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e because non-zero is going to be sent by \\(V_1^{* }\\) to \\(0\\), then sent still by \\(V_1\\) to \\(0\\). This also means that \\(V_1 V_1^{*}\\) is not \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eYet, we are trying to show \\(V_1 V_1^{*} + V_2 V_2^{*} = I\\), which is the sum of these two noninvertible map, is \\(I\\): the grandaddy of all invertible maps. What gives?\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\mqty(V_1 \u0026amp; V_2) = V \\\\\nV V^{*} = I\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe first result is by definition, the second because \\(V\\) is an orthonormal operator so it is \u003ca href=\"/posts/kbhaxler_7_a/#unitary\"\u003eunitary\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eLet us begin with:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nI \u0026amp;= V V^{*} \\\\\n\u0026amp;= \\mqty(V_1 \u0026amp; V_2) \\mqty(V_1 \u0026amp; V_2)^{*} \\\\\n\u0026amp;= \\mqty(V_1 \u0026amp; V_2) \\mqty(V_1^{*} \\\\ V_2^{*}) \\\\\n\u0026amp;= V_1V_1^{*} + V_2 V_2^{*}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAnd the last equation simply comes from how matrices multiply: row by column. And so, weirdly, we can confirm that adding non-full rank matricies and end up to be the identity. So, again:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV_1 V_1^{*} + V_2V_2^{*} = I\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"constructing-u-1\"\u003eConstructing \\(U_1\\)\u003c/h3\u003e\n\u003cp\u003eWith the result above, we are finally close to doing what we want to do. Recall our last set of conclusions:\u003c/p\u003e\n\u003cp\u003eone, that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nV_1^{*} M^{*} M V_1 = D \\\\\nV_2^{*} M^{*} M V_2 = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand specifically, that \\(MV_{2} = 0\\).\u003c/p\u003e\n\u003cp\u003eand two, that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;V_1 V_1^{* } + V_2V_2^{* } = I \\\\\n\\Rightarrow\\ \u0026amp; V_1 V_1^{* } = I - V_2V_2^{* }\n\\end{align}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s now turn our attention to \\(D\\) above. It has all non-zero diagonals, because we cropped out the zero already (\u003ca href=\"#aside-1-zero-eigenvalue-eigenvector-ordering\"\u003esee above\u003c/a\u003e during the definition of \\(D\\) vis a vi \\(D_0\\)). This means it is invertible because \u003ca href=\"/posts/kbhupper_triangular_matrix/#operator-is-only-invertible-if-id-c38ed162-6861-420c-a812-6d25ac539ea9-diagonal-of-its-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix-is-nonzero\"\u003eoperator is only invertible if diagonal of its upper-triangular matrix is nonzero\u003c/a\u003e. For a diagonal matrix, this is particularly easy; let us construct:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nD = D^{\\frac{1}{2}} D^{\\frac{1}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(D^{\\frac{1}{2}}\\) is just the same \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003ediagonal\u003c/a\u003e matrix as \\(D\\) except we take the square root of everything in the diagonal. The above could be shown then to be true by calculation (\\(\\sqrt{a}\\sqrt{a} = a\\) on every element in the diagonal).\u003c/p\u003e\n\u003cp\u003eLet us also make:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI = D^{-\\frac{1}{2}} D^{\\frac{1}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(D^{-\\frac{1}{2}}\\) is \\(\\frac{1}{\\sqrt{a}}\\) for event element \\(a\\) in the diagonal. Again, the above could be shown to be true by calculation by \\(\\sqrt{a} \\frac{1}{\\sqrt{a}} = 1\\).\u003c/p\u003e\n\u003cp\u003eGiven the diagonal of \\(D\\) contains the \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es of \\(M^{*}M\\), by calculation \\(D^{\\frac{1}{2}}\\) contains the square roots of these \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es, which means that it should contain on its \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003ediagonal\u003c/a\u003e the \u003ca href=\"/posts/kbhsingular_value_decomposition/\"\u003esingular value\u003c/a\u003es of \\(M\\), which is rather nice (because we have corollaries below that show concordance between \u003ca href=\"/posts/kbhsingular_value_decomposition/\"\u003esingular value\u003c/a\u003es of \\(M\\) and its \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es, see below).\u003c/p\u003e\n\u003cp\u003eConsider, finally, the matrix \\(M\\):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nM \u0026amp;= M - 0 \\\\\n\u0026amp;= M - 0 V_2^{* } \\\\\n\u0026amp;= M - (M V_2) V_2^{* } \\\\\n\u0026amp;= M (I - V_2 V_2^{* }) \\\\\n\u0026amp;= M V_1V_1^{*} \\\\\n\u0026amp;= M V_1 I V_1^{*} \\\\\n\u0026amp;= M V_1 (D^{-\\frac{1}{2}} D^{\\frac{1}{2}}) V_1^{*} \\\\\n\u0026amp;= (M V_1 D^{-\\frac{1}{2}}) D^{\\frac{1}{2}} V_1^{*}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eWe now define a matrix \\(U_1\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1 = M V_1 D^{-\\frac{1}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nM = U_1 D^{\\frac{1}{2}} V_1^{*}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWere \\(U_1\\) is a matrix of shape \\((m \\times n)(n \\times n-p)(n-p \\times n-p) = m \\times n-p\\), \\(D^{\\frac{1}{2}}\\) is a \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003ediagonal\u003c/a\u003e matrix of shape \\(n-p \\times n-p\\) with \u003ca href=\"/posts/kbhsingular_value_decomposition/\"\u003esingular values\u003c/a\u003e on the diagonal, and \\(V_1^{*}\\) is a matrix with orthonormal rows of shape \\(n-p \\times n\\).\u003c/p\u003e\n\u003cp\u003eThis is a \u003cstrong\u003ecompact svd\u003c/strong\u003e. We are sandwitching a \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003ediagonal\u003c/a\u003e matrix of \u003ca href=\"/posts/kbhsingular_value_decomposition/\"\u003esingular values\u003c/a\u003e between two rectangular matricies to recover \\(M\\). Ideally, we want the left and right matricies too to have nice properties (like, say, be an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e or have \u003ca href=\"/posts/kbhaxler_7_a/#unitary\"\u003eunitarity\u003c/a\u003e). So we work harder.\u003c/p\u003e\n\u003ch3 id=\"aside-4-u-1-is-orthonormal\"\u003eAside #4: \\(U_1\\) is orthonormal\u003c/h3\u003e\n\u003cp\u003eWe can\u0026rsquo;t actually claim \\(U_1\\) is \u003ca href=\"/posts/kbhaxler_7_a/#unitary\"\u003eunitary\u003c/a\u003e because its not an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e. However, we like to show its columns are \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e so far so we can extend it into a fully, actually, \u003ca href=\"/posts/kbhaxler_7_a/#unitary\"\u003eunitary\u003c/a\u003e matrix.\u003c/p\u003e\n\u003cp\u003eOne sign that a matrix is \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e is if \\(T^{*}T = I\\). Because of the way that matricies multiply, this holds IFF each column yields a \\(1\\) when its own conjugate transpose is applied, and \\(0\\) otherwise. This is also the definition of \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003eity.\u003c/p\u003e\n\u003cp\u003eTherefore, we desire \\(U_{1}^{*} U_1 = I\\). We hence consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1^{*} U_1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have by substitution of \\(U_1 = M V_1 D^{-\\frac{1}{2}}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(M V_1 D^{-\\frac{1}{2}})^{*}(M V_1 D^{-\\frac{1}{2}})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven the property that \\((AB)^{*} = B^{*}A^{*}\\), we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(M V_1 D^{-\\frac{1}{2}})^{*}(M V_1 D^{-\\frac{1}{2}}) = {D^{-\\frac{1}{2}}}^{*} V_1^{*} M^{*}M V_1 D^{-\\frac{1}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall now that, from way before, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV_1^{*} M^{*} M V_1 = D\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSubstituting that in:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n{D^{-\\frac{1}{2}}}^{*} V_1^{*} M^{*}M V_1 D^{-\\frac{1}{2}} \u0026amp;= {D^{-\\frac{1}{2}}}^{*} (V_1^{*} M^{*}M V_1) D^{-\\frac{1}{2}} \\\\\n\u0026amp;= {D^{-\\frac{1}{2}}}^{*} D D^{-\\frac{1}{2}}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eRecall now that the multiplication of diagonal matricies are commutative (by calculation), and that diagonal real matricies are self-adjoint (try conjugate-transposing a real diagonal matrix). We know that \\(D^{-\\frac{1}{2}}\\) is real (because its filled with the square roots of the \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es of \\(M^{*}M\\), which is \u003ca href=\"/posts/kbhaxler_7_a/#self-adjoint\"\u003eself-adjoint\u003c/a\u003e, and \u003ca href=\"/posts/kbhaxler_7_a/#eigenvalues-of-id-04577953-b953-4ac0-8102-fe9b804bdfc9-self-adjoint-matricies-are-real\"\u003eeigenvalues of self-adjoint matricies are real\u003c/a\u003e) and is by definition diagonal. So we have that \\(D^{-\\frac{1}{2}}\\) is self-adjoint.\u003c/p\u003e\n\u003cp\u003eTaking those facts in mind, we can now rewrite this expression:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n{D^{-\\frac{1}{2}}}^{*} V_1^{*} M^{*}M V_1 D^{-\\frac{1}{2}} \u0026amp;= {D^{-\\frac{1}{2}}}^{*} D D^{-\\frac{1}{2}} \\\\\n\u0026amp;= D^{-\\frac{1}{2}} D D^{-\\frac{1}{2}} \\\\\n\u0026amp;= D^{-\\frac{1}{2}}D^{-\\frac{1}{2}} D \\\\\n\u0026amp;= D^{-1} D \\\\\n\u0026amp;= I\n\\end{align}\u003c/p\u003e\n\u003cp\u003eTherefore, \\(U_1^{*} U_1 = I\\) as desired, so the columns of \\(U_1\\) is orthonormal.\u003c/p\u003e\n\u003ch3 id=\"svd-fully\"\u003eSVD, fully\u003c/h3\u003e\n\u003cp\u003eRecall that, so far, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nM = U_1 D^{\\frac{1}{2}} V_1^{*}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1 = M V_1 D^{-\\frac{1}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo far, \\(U_1\\) and \\(V_1^{*}\\) are both disappointingly not \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003es. However, we know that \\(U_1\\) and \\(V_1\\) are both orthonormal (the former per aside #4 above, the latter by the \u003ca href=\"/posts/kbhaxler_7_a/#complex-spectral-theorem\"\u003espectral theorem\u003c/a\u003e and \u003ca href=\"#aside-1-zero-eigenvalue-eigenvector-ordering\"\u003econstruction above\u003c/a\u003e). So wouldn\u0026rsquo;t it be doubleplusgood for both of them to be \u003ca href=\"/posts/kbhaxler_7_a/#unitary\"\u003eunitary\u003c/a\u003e \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003es?\u003c/p\u003e\n\u003cp\u003eTo make this happen, we need to change the shapes of things a little without changing how the matricies behave. That is: we want \\(U\\) and \\(V^{* }\\) to both be \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003es, and yet still have \\(U D^{\\frac{1}{2}} V^{*} = M\\).\u003c/p\u003e\n\u003ch4 id=\"padding-out-d-and-v\"\u003ePadding out \\(D\\) and \\(V\\)\u003c/h4\u003e\n\u003cp\u003eThere are immediate and direct ways of padding out \\(D^{\\frac{1}{2}}\\) and \\(V_{1}^{*}\\): let us replace \\(V_1 \\implies V\\), and just shove enough zeros into \\(D\\) such that the dimensions work out (changing it from shape \\(n-p \\times n-p\\) to \\(m \\times n\\), but do this by just adding enough zeros on the edges until it works).\u003c/p\u003e\n\u003cp\u003eSo first, \\(D^{\\frac{1}{2}}\\) becomes:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nD^{\\frac{1}{2}}_{new} = \\mqty(D^{\\frac{1}{2}}_{orig} \u0026amp; 0 \u0026amp;\\dots \\\\ 0 \u0026amp; 0 \u0026amp;\\dots \\\\ 0\u0026amp;0\u0026amp;\\dots)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(the number of zeros on the edge vary based on the proportions of \\(n, p, m\\)).\u003c/p\u003e\n\u003cp\u003eWhy would this always be padding? i.e. why is \\(n-p \\leq m\\)? Here\u0026rsquo;s a hand-wavy proof that the reader can choose to fill in the gaps of: consider the fact that \\(M\\)\u0026rsquo;s shape is \\(m \\times n\\). Specifically, this means that \\(M: V \\to W\\) where \\(\\dim V = n\\) and \\(\\dim W = m\\). Say for the sake of argument \\(n\u0026gt; m\\) (otherwise naturally \\(n-p \\leq m\\) because \\(n\\leq m\\)). Consider \\(null\\ M\\); given it is a map from a larger space to a smaller space, \u003ca href=\"/posts/kbhlinear_map/#map-to-smaller-space-is-not-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003ethere\u0026rsquo;s going to be a non-trivial null space\u003c/a\u003e. This non-trivial null space is going to be as large or larger than \\(m-n\\); and the null space of \\(M^{*}M\\) will be at least as large as \\(m-n\\) as well because everything is sent through \\(M\\) first. And then applying rank nullity can show that \\(m \\geq \\dim\\ range\\ M^{ *}M\\). Therefore, the number of non-zero eigenvalues of \\(M^{ *}M\\), which also corresponds to the number of non-zero columns of \\(D\\), which also is \\(n-p\\), must be smaller than or equal to \\(m\\) because otherwise the diagonal representation would have too many linearly independent columns (i.e. more lin. indp. columns that the rank which is impossible).\u003c/p\u003e\n\u003cp\u003eNow, we have\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV = \\mqty(V_1 \u0026amp; V_2)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(V_1\\) is a matrix whose columns are the non-zero \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e correlated \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es, and the columns of \\(V_1\\) the zero-eigenvalue related ones.\u003c/p\u003e\n\u003cp\u003eNote, now that:\u003c/p\u003e\n\u003cp\u003e\\(D^{\\frac{1}{2}}_{new} V^{* }\\) is an \\(m \\times n\\) matrix that behaves almost exactly like \\(D^{\\frac{1}{2}}_{orig} V_1^{*}\\), a \\(n-p \\times n\\) matrix. The last \\(m-(n-p)\\) (as we established before, \\(m \\geq n-p\\)) dimensions of the new, padded matrix\u0026rsquo;s output will simply be \\(0\\): because recall that \\(DT\\) for some diagonal matrix \\(D\\) scales the \u003cem\u003erows\u003c/em\u003e of \\(T\\): and the first \\(n-p\\) rows (corresponding to the columns of \\(V_1\\), because recall we are applying \\(V\\) not \\(V^{ *}\\) to \\(D\\)) will be scaled normally, and the last \\(m-(n-p)\\) rows will be scaled by \\(0\\) as they are a part of the padded zero-diagonal.\u003c/p\u003e\n\u003ch4 id=\"padding-out-u\"\u003ePadding out \\(U\\)\u003c/h4\u003e\n\u003cp\u003eWith \\(D\\) and \\(V\\) padded, its time to deal with \\(U\\). Fortunately, recall that the last bit of the output of \\(DV\\) will just be \\(0\\): so whatever we stick in terms of columns of \\(V\\) for those slots will never actually be added to the final output. In a sense, they don\u0026rsquo;t really matter.\u003c/p\u003e\n\u003cp\u003eThe first \\(n-p\\) of \\(U\\) (i.e. \\(U_{1}\\)) we already have a well-defined answer: recall from before \\(U_1 = M D^{-\\frac{1}{2}} V_{1}^{*}\\). So the last bit we can just stick on literally whatever to make it work.\u003c/p\u003e\n\u003cp\u003eAnd by \u0026ldquo;making it work\u0026rdquo;, we literally just mean extending the columns of \\(U_1\\) until you have \\(m\\) linearly-independent of them, then \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003eting to make it all orthonormal. The first \\(n-p\\) columns will not be affected by \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003eting, as we have established before \u003ca href=\"#aside-4-u-1-is-orthonormal\"\u003e\\(U_1\\) is orthonormal\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eAgain, these are basically arbitrary: no one cares because these columns will always be scaled by \\(0\\) as they are part of the \u0026ldquo;padding columns\u0026rdquo; from padding \\(D^{\\frac{1}{2}}\\) out.\u003c/p\u003e\n\u003ch4 id=\"and-so-finally\"\u003eand so, finally\u003c/h4\u003e\n\u003cp\u003eFinally, we now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nM = U D^{\\frac{1}{2}} V^{*}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(U\\) is an \\(m \\times m\\) \u003ca href=\"/posts/kbhaxler_7_a/#unitary\"\u003eunitary\u003c/a\u003e \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e, \\(D^{\\frac{1}{2}}\\) is an \\(m \\times n\\) semidiagonal matrix (diagonal \\(n-p \\times n-p\\) part, then \\(0\\) padding all around) filled with \u003ca href=\"/posts/kbhsingular_value_decomposition/\"\u003esingular value\u003c/a\u003es of \\(M\\) on the diagonal, and \\(V^{*}\\) is an \\(n \\times n\\) \u003ca href=\"/posts/kbhaxler_7_a/#unitary\"\u003eunitary\u003c/a\u003e \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e filled with orthonormal rows of right singular-vectors (i.e. \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es of \\(M^{ *}M\\)).\u003c/p\u003e\n\u003ch2 id=\"useful-corollaries\"\u003eUseful corollaries\u003c/h2\u003e\n\u003ch3 id=\"if-lambda-is-an-non-negative-real-eigenvalue-of-m-then-lambda-is-sometimes-a-singular-value-of-m\"\u003eIf \\(\\lambda\\) is an non-negative real eigenvalue of \\(M\\), then \\(\\lambda\\) is sometimes a singular value of \\(M\\)\u003c/h3\u003e\n\u003cp\u003eConsider the matrix:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(1 \u0026amp; 1 \\\\0 \u0026amp; 0)\n\\end{equation}\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsingular_value_decomposition/\"\u003esingular value\u003c/a\u003es: \\(\\sqrt{2},0\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es: \\(1,0\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eHowever, the statement is the case if \\(M\\) is already \u003ca href=\"/posts/kbhdiagonal_matrix/#properties-of-diagonal-matrices\"\u003ediagonalizable\u003c/a\u003e, then in which case you can imagine constructing \\(M^{* }M\\) to be vis a vi the eigenbasis of \\(M\\), which means that the resulting diagonal representation of \\(M^{*}M\\) would just be the eigenvalues of \\(M\\) squared as you are multiplying a diagonal matrix by itself.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsingular_value_decomposition/","tags":null,"title":"singular value decomposition"},{"categories":null,"contents":"The SIR Model is a model to show how diseases spread.\nSusceptible \u0026ndash; # of susceptible people Infectious \u0026mdash; # of infectious people Removed \u0026mdash; # of removed people Compartmental SIR model S =\u0026gt; I =\u0026gt; R [ =\u0026gt; S]\nSo then, the question is: what is the transfer rate between populations between these compartments?\nParameters:\n\\(R_0\\) \u0026ldquo;reproductive rate\u0026rdquo;: the number of people that one infectious person will infect over the duration of their entire infectious period, if the rest of the population is entirely susceptible (only appropriate for a short duration) \\(D\\) \u0026ldquo;duration\u0026rdquo;: duration of the infectious period \\(N\\) \u0026ldquo;number\u0026rdquo;: population size (fixed) Transition I to R:\n\\begin{equation} \\frac{I}{D} \\end{equation}\n\\(I\\) is the number of infectious people, and \\(\\frac{1}{D}\\) is the number of people that recover/remove per day (i.e. because the duration is \\(D\\).)\nTransition from S to I:\n\\begin{equation} I \\frac{R_0}{D} \\frac{S}{N} \\end{equation}\nSo for \\(\\frac{R_0}{D}\\) is the number of people able to infect per day, \\(\\frac{S}{N}\\) is the percentage of population that\u0026rsquo;s able to infect, and \\(I\\) are the number of people doing the infecting.\nAnd so therefore\u0026mdash;\n\\(\\dv{S}{T} = -\\frac{SIR_{0}}{DN}\\) \\(\\dv{I}{T} = \\frac{SIR_{0}}{DN}\\) \\(\\dv{I}{T} = \\frac{I}{D}\\) Evolutionary Game Theory Suppose that we have two strategies, \\(A\\) and \\(B\\), and they have some payoff matrix:\nA B A (a,a) (b,c) B (c,b) (d,d) and we have some values:\n\\begin{equation} \\mqty(x_{a} \\\\x_{b}) \\end{equation}\nare the relative abundances (i.e. that \\(xa+xb\\)).\nThe finesses (\u0026ldquo;how much are you going to reproduce\u0026rdquo;) of the strategies are determined by\u0026mdash;\n\\(f_{A}(x_{A}, x_{B}) = ax_{A} + bx_{B}\\) \\(f_{B}(x_{A}, x_{B}) = cx_{A} + dx_{B}\\) Except for payoff constants \\((a,b,c,d)\\), everything else is a function of time.\nThe mean fitness, then:\n\\begin{equation} q = x_{A}f_{A} + x_{B}f_{B} \\end{equation}\nLet\u0026rsquo;s have the actual, absolute number of individuals:\n\\begin{equation} \\mqty(N_{A}\\\\ N_{B}) \\end{equation}\nSo, we can talk about the change is individuals using strategy \\(A\\):\n\\begin{equation} \\dv t x_{A} = \\dv t \\frac{N_{A}}{N} = X_{A}(f_{a}) \\end{equation}\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhsir_model/\"\u003eSIR Model\u003c/a\u003e is a model to show how diseases spread.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eSusceptible \u0026ndash; # of susceptible people\u003c/li\u003e\n\u003cli\u003eInfectious \u0026mdash; # of infectious people\u003c/li\u003e\n\u003cli\u003eRemoved \u0026mdash; # of removed people\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"compartmental-sir-model\"\u003eCompartmental SIR model\u003c/h2\u003e\n\u003cp\u003eS =\u0026gt; I =\u0026gt; R [ =\u0026gt; S]\u003c/p\u003e\n\u003cp\u003eSo then, the question is: what is the transfer rate between populations between these compartments?\u003c/p\u003e\n\u003cp\u003eParameters:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(R_0\\) \u0026ldquo;reproductive rate\u0026rdquo;: the number of people that one infectious person will infect over the duration of their entire infectious period, if the rest of the population is entirely susceptible (only appropriate for a short duration)\u003c/li\u003e\n\u003cli\u003e\\(D\\) \u0026ldquo;duration\u0026rdquo;: duration of the infectious period\u003c/li\u003e\n\u003cli\u003e\\(N\\) \u0026ldquo;number\u0026rdquo;: population size (fixed)\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eTransition I to R:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{I}{D}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(I\\) is the number of infectious people, and \\(\\frac{1}{D}\\) is the number of people that recover/remove per day (i.e. because the duration is \\(D\\).)\u003c/p\u003e\n\u003cp\u003eTransition from S to I:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI \\frac{R_0}{D} \\frac{S}{N}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo for \\(\\frac{R_0}{D}\\) is the number of people able to infect per day, \\(\\frac{S}{N}\\) is the percentage of population that\u0026rsquo;s able to infect, and \\(I\\) are the number of people doing the infecting.\u003c/p\u003e\n\u003cp\u003eAnd so therefore\u0026mdash;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\dv{S}{T} = -\\frac{SIR_{0}}{DN}\\)\u003c/li\u003e\n\u003cli\u003e\\(\\dv{I}{T} = \\frac{SIR_{0}}{DN}\\)\u003c/li\u003e\n\u003cli\u003e\\(\\dv{I}{T} = \\frac{I}{D}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"evolutionary-game-theory\"\u003eEvolutionary Game Theory\u003c/h2\u003e\n\u003cp\u003eSuppose that we have two strategies, \\(A\\) and \\(B\\), and they have some payoff matrix:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003eA\u003c/th\u003e\n\u003cth\u003eB\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eA\u003c/td\u003e\n\u003ctd\u003e(a,a)\u003c/td\u003e\n\u003ctd\u003e(b,c)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eB\u003c/td\u003e\n\u003ctd\u003e(c,b)\u003c/td\u003e\n\u003ctd\u003e(d,d)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eand we have some values:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(x_{a} \\\\x_{b})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eare the relative abundances (i.e. that \\(xa+xb\\)).\u003c/p\u003e\n\u003cp\u003eThe finesses (\u0026ldquo;how much are you going to reproduce\u0026rdquo;) of the strategies are determined by\u0026mdash;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(f_{A}(x_{A}, x_{B}) = ax_{A} + bx_{B}\\)\u003c/li\u003e\n\u003cli\u003e\\(f_{B}(x_{A}, x_{B}) = cx_{A} + dx_{B}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eExcept for payoff constants \\((a,b,c,d)\\), everything else is a function of time.\u003c/p\u003e\n\u003cp\u003eThe mean fitness, then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nq = x_{A}f_{A} + x_{B}f_{B}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s have the actual, absolute number of individuals:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(N_{A}\\\\ N_{B})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, we can talk about the change is individuals using strategy \\(A\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv t x_{A} = \\dv t \\frac{N_{A}}{N} = X_{A}(f_{a})\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsir_model/","tags":null,"title":"SIR Model"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhslopes/","tags":null,"title":"slope (statistics)"},{"categories":null,"contents":"HSVI\nOne-Liner \u0026ldquo;impact of approximation decreases as steps from the root node\u0026rdquo;\nNovelty combined alpha-vector and forward heuristics to guide search of belief states before backup 100x times faster in PBVI scales to huge environments Goal: minimize \u0026ldquo;regret\u0026rdquo; (difference until optimal policy)\nNovelty HSVI 2 Projected the upper bound onto a convex hull (HSVI2: via approximate convex hull projection) uses blind lower bound Notable Methods Key Figs New Concepts Notes ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;impact of approximation decreases as steps from the root node\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecombined alpha-vector and forward heuristics to guide search of belief states before backup\u003c/li\u003e\n\u003cli\u003e100x times faster in \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003escales to \u003cem\u003ehuge\u003c/em\u003e environments\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eGoal: minimize \u0026ldquo;regret\u0026rdquo; (difference until optimal policy)\u003c/p\u003e\n\u003ch2 id=\"novelty-hsvi-2\"\u003eNovelty HSVI 2\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eProjected the upper bound onto a convex hull (HSVI2: via approximate convex hull projection)\u003c/li\u003e\n\u003cli\u003euses \u003ca href=\"/posts/kbhblind_lower_bound/\"\u003eblind lower bound\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsmith/","tags":null,"title":"Smith 20??"},{"categories":null,"contents":"a function is called smoo\n","html":"\u003cp\u003ea function is called smoo\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsmooth_function/","tags":null,"title":"smooth function"},{"categories":null,"contents":"A Social Network is a scheme for studying the relationships and interactions amongst groups of people.\npeople: \\(V\\) relationship: \\(E\\) system: a network \\(G(V,E)\\) Importantly, the \u0026ldquo;labels\u0026rdquo; of \\(E\\) often do not matter as we frequently want to study only the graphical structure of the Social Network.\ndegree (node) The degree of a node is the number of edges that are touching that node (whether in or out, or undirected).\nThe in-degree and out-degree are the number of edges touching that node (going in or out) respectively.\ndegree of node many nodes on the internet have fairly low degree, whereas some hubs have very high degree. Consider a function \\(P(k)\\), representing the number of nodes with degree \\(k\\). This follows a power law:\n\\begin{equation} P(k) \\propto k^{-a} \\end{equation}\nmeaning:\n\\begin{equation} P(k) = ck^{-a} \\end{equation}\nwhereby as degree increases, the percentage of nodes with that number of degree drops of exponentially.\nA power law distribution is log-log linear, and is \u0026ldquo;scale free\u0026rdquo;: meaning no matter how the input \\(x\\) is scaled its simply resulting in a multiplicative constant under the output: shape does NOT change.\nZipf\u0026rsquo;s Law \\begin{equation} freq(w_{r}) \\prop \\frac{1}{r^{\\beta}} \\end{equation}\nwhere \\(\\beta\\) is close to \\(1\\) and \\(w_{r}\\) is the r-th most frequent word.\nbetweenness the betweenness of a target node is calculated as: for all pairs of nodes on the graph that is not our target node, what\u0026rsquo;s the ratio between the number of shortest paths between the two nodes and the number that goes through \\(j\\).\nFormally:\nfor some node \\(j\\) for which we want to calculate betweenness, and \\(s_{ik}(j)\\) being the number of shortest paths between \\(i\\) and \\(k\\) that goes through \\(j\\) and \\(s_{ik}\\) being the number of shortest paths there are in general, we have:\n\\begin{equation} b_{j} = \\frac{\\sum_{i=1}^{n} \\sum_{k=1}^{n} s_{ik}(j)}{\\sum_{i=1}^{n} \\sum_{k=1}^{n} s_{ik}} \\end{equation}\nwhere \\(i \\neq j \\neq k\\)\nRecall that with directed graphs we may need to double count.\nclustering coefficient for some node \\(A\\), the clustering coefficient measures the percentage of nodes directly adjacent to \\(A\\) which are also directly adjacent with each other.\nrecall that, if a node has \\(n\\) friends, the total possible edges is \\(\\frac{n\\qty(n-1)}{2}\\).\nMilgram Small-World experiment made 300 people in Omaha NE to mail a thing to somebody in Boston by passing it through only people they knew by first-name basis.\nSmall World Graph The world is a Small World Graph: networks of friends is large, sparse, decentralized, and extremely clustered. Yet, people mostly seem to be about 5-6 degrees of separation away.\nThis characterizes a Small World Graph:\nhigh clustering coefficient low average shortest path Watts and Strogatz Watts and Strogatz proposes a way to build a Small World Graph:\nstart with a ring and connect each node to the next \\(z\\) nodes with probability \\(p\\) on each node, rewire every edge/add a shortcut to a random node as long as \\(0 \u0026lt; p \u0026lt; 1\\), we get a Small World Graph\nintuition: a single random connection builds a shortcut through highly centralized clusters\u0026mdash;high \\(C\\), low \\(L\\).\nweak link most job referrals were through personal contacts, but they are often WEAK LINKS.\nTriadic Closure If two people have a common friend, its likely that they become friends eventually too. This increases cluster coefficient.\nStrong Triadic Closure If there is a strong tie between \\(A - B\\), and \\(B - C\\), then there must be a strong tie between \\(A - C\\).\nIf this property is satisfied, then any Local Bridge must be a weak tie. Otherwise:\nif there is a strong \\(A-B\\) tie and it is a local bridge, then \\(C-B\\) must be a connection under Strong Triadic Closure. yet, \\(A-B\\) is a local bridge.\nBy contradiction, \\(A-B\\) is a weak tie.\nLocal Bridge A Local Bridge is an edge \\(x-y\\) which bridges two \u0026ldquo;local components\u0026rdquo; together. More formally, an edge between \\(A,B\\) is a Local Bridge if it does not live on any triangle of \\(A\\) or \\(B\\).\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhsocial_network/\"\u003eSocial Network\u003c/a\u003e is a scheme for studying the relationships and interactions amongst groups of people.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003epeople\u003c/strong\u003e: \\(V\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003erelationship\u003c/strong\u003e: \\(E\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003esystem\u003c/strong\u003e: a network \\(G(V,E)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eImportantly, the \u0026ldquo;labels\u0026rdquo; of \\(E\\) often do not matter as we frequently want to study only the graphical structure of the \u003ca href=\"/posts/kbhsocial_network/\"\u003eSocial Network\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"degree--node\"\u003edegree (node)\u003c/h2\u003e\n\u003cp\u003eThe degree of a node is the number of edges that are touching that node (whether in or out, or undirected).\u003c/p\u003e\n\u003cp\u003eThe in-degree and out-degree are the number of edges touching that node (going in or out) respectively.\u003c/p\u003e\n\u003ch3 id=\"degree-of-node\"\u003edegree of node\u003c/h3\u003e\n\u003cp\u003emany nodes on the internet have fairly low degree, whereas some hubs have very high degree. Consider a function \\(P(k)\\), representing the number of nodes with degree \\(k\\). This follows a power law:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(k) \\propto k^{-a}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(k) = ck^{-a}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby as degree increases, the percentage of nodes with that number of degree drops of exponentially.\u003c/p\u003e\n\u003cp\u003eA power law distribution is log-log linear, and is \u0026ldquo;scale free\u0026rdquo;: meaning no matter how the input \\(x\\) is scaled its simply resulting in a multiplicative constant under the output: shape does NOT change.\u003c/p\u003e\n\u003ch3 id=\"zipf-s-law\"\u003eZipf\u0026rsquo;s Law\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nfreq(w_{r}) \\prop \\frac{1}{r^{\\beta}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\beta\\) is close to \\(1\\) and \\(w_{r}\\) is the r-th most frequent word.\u003c/p\u003e\n\u003ch2 id=\"betweenness\"\u003ebetweenness\u003c/h2\u003e\n\u003cp\u003ethe \u003ca href=\"#betweenness\"\u003ebetweenness\u003c/a\u003e of a target node is calculated as: for all pairs of nodes on the graph that is not our target node, what\u0026rsquo;s the ratio between the number of shortest paths between the two nodes and the number that goes through \\(j\\).\u003c/p\u003e\n\u003cp\u003eFormally:\u003c/p\u003e\n\u003cp\u003efor some node \\(j\\) for which we want to calculate \u003ca href=\"#betweenness\"\u003ebetweenness\u003c/a\u003e, and \\(s_{ik}(j)\\) being the number of shortest paths between \\(i\\) and \\(k\\) that goes through \\(j\\) and \\(s_{ik}\\) being the number of shortest paths there are in general, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb_{j} = \\frac{\\sum_{i=1}^{n} \\sum_{k=1}^{n} s_{ik}(j)}{\\sum_{i=1}^{n} \\sum_{k=1}^{n} s_{ik}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(i \\neq j \\neq k\\)\u003c/p\u003e\n\u003cp\u003eRecall that with directed graphs we may need to double count.\u003c/p\u003e\n\u003ch2 id=\"clustering-coefficient\"\u003eclustering coefficient\u003c/h2\u003e\n\u003cp\u003efor some node \\(A\\), the \u003ca href=\"#clustering-coefficient\"\u003eclustering coefficient\u003c/a\u003e measures the percentage of nodes directly adjacent to \\(A\\) which are also directly adjacent with each other.\u003c/p\u003e\n\u003cp\u003erecall that, if a node has \\(n\\) friends, the total possible edges is \\(\\frac{n\\qty(n-1)}{2}\\).\u003c/p\u003e\n\u003ch2 id=\"milgram-small-world-experiment\"\u003eMilgram Small-World experiment\u003c/h2\u003e\n\u003cp\u003emade 300 people in Omaha NE to mail a thing to somebody in Boston by passing it through only people they knew by first-name basis.\u003c/p\u003e\n\u003ch2 id=\"small-world-graph\"\u003eSmall World Graph\u003c/h2\u003e\n\u003cp\u003eThe world is a \u003ca href=\"#small-world-graph\"\u003eSmall World Graph\u003c/a\u003e: networks of friends is large, sparse, decentralized, and extremely clustered. Yet, people mostly seem to be about 5-6 degrees of separation away.\u003c/p\u003e\n\u003cp\u003eThis characterizes a \u003ca href=\"#small-world-graph\"\u003eSmall World Graph\u003c/a\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ehigh \u003ca href=\"#clustering-coefficient\"\u003eclustering coefficient\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003elow average shortest path\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"watts-and-strogatz\"\u003eWatts and Strogatz\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#watts-and-strogatz\"\u003eWatts and Strogatz\u003c/a\u003e proposes a way to build a \u003ca href=\"#small-world-graph\"\u003eSmall World Graph\u003c/a\u003e:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003estart with a ring and connect each node to the next \\(z\\) nodes\u003c/li\u003e\n\u003cli\u003ewith probability \\(p\\) on each node, rewire every edge/add a shortcut to a random node\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eas long as \\(0 \u0026lt; p \u0026lt; 1\\), we get a \u003ca href=\"#small-world-graph\"\u003eSmall World Graph\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eintuition\u003c/strong\u003e: a single random connection builds a shortcut through highly centralized clusters\u0026mdash;high \\(C\\), low \\(L\\).\u003c/p\u003e\n\u003ch2 id=\"weak-link\"\u003eweak link\u003c/h2\u003e\n\u003cp\u003emost job referrals were through personal contacts, but they are often \u003cstrong\u003eWEAK LINKS\u003c/strong\u003e.\u003c/p\u003e\n\u003ch3 id=\"triadic-closure\"\u003eTriadic Closure\u003c/h3\u003e\n\u003cp\u003eIf two people have a common friend, its likely that they become friends eventually too. This increases cluster coefficient.\u003c/p\u003e\n\u003ch4 id=\"strong-triadic-closure\"\u003eStrong Triadic Closure\u003c/h4\u003e\n\u003cp\u003eIf there is a strong tie between \\(A - B\\), and \\(B - C\\), then there must be a strong tie between \\(A - C\\).\u003c/p\u003e\n\u003cp\u003eIf this property is satisfied, then any \u003ca href=\"#local-bridge\"\u003eLocal Bridge\u003c/a\u003e must be a weak tie. Otherwise:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-06_20-12-33_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eif there is a strong \\(A-B\\) tie and it is a local bridge, then \\(C-B\\) must be a connection under \u003ca href=\"#strong-triadic-closure\"\u003eStrong Triadic Closure\u003c/a\u003e. yet, \\(A-B\\) is a local bridge.\u003c/p\u003e\n\u003cp\u003eBy contradiction, \\(A-B\\) is a weak tie.\u003c/p\u003e\n\u003ch3 id=\"local-bridge\"\u003eLocal Bridge\u003c/h3\u003e\n\u003cp\u003eA \u003ca href=\"#local-bridge\"\u003eLocal Bridge\u003c/a\u003e is an edge \\(x-y\\) which bridges two \u0026ldquo;local components\u0026rdquo; together. More formally, an edge between \\(A,B\\) is a \u003ca href=\"#local-bridge\"\u003eLocal Bridge\u003c/a\u003e if it does not live on any triangle of \\(A\\) or \\(B\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsocial_network/","tags":null,"title":"Social Network"},{"categories":null,"contents":"Social Security Administration is a welfare program to directly give cash to those who are in need.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsocial_security_administration/\"\u003eSocial Security Administration\u003c/a\u003e is a welfare program to directly give cash to those who are in need.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsocial_security_administration/","tags":null,"title":"Social Security Administration"},{"categories":null,"contents":":clap: What. Does. The. Client. Want.\nWeb Applications vs Local Application scale\u0026mdash;what levels of functionality and access do we want training speed SOLID principles SOLID principles is a set of OOP principles; its kinda famous but encourages mindless braindead Java devs.\nSingle Responsibility: that a class should have only one clearly defined thing it represents, and the class should only change IFF the underlying spec regarding that thing changes Easy pitfalls: mixing PERSISTENCE LOGIC with BUSINESS LOGIC (db should be moved to a separate class like ThingProvider/ThingPersistence) Open-Close Principle: classes should be easily extendable and closed to modification \u0026ldquo;we should be able to add new functionality without touching what\u0026rsquo;s written\u0026rdquo; so like interfaces are nice Liskov Substitution Principle: subclasses should act like base classes (and more); good inheritance systems should have this built in Interface Segregation Principle: you should build lots of interfaces + sub-interfaces based on what clients are and will need, such that a client only has to extend precisely the amount needed to do their job Dependency Inversion Principle: when possible, depend on abstract classes or interfaces and not their implementations Dependency Injection \u0026ldquo;Dependency Injection\u0026rdquo; is a 25-dollar term for a 5-cent concept. [\u0026hellip;] Dependency injection means giving an object its instance variables. [\u0026hellip;].\nBlame this for all the fucking Factory classes.\nBasically having a factory (or just a fancy-enough constructor) to give a class all the right instantiations of the things it needs instead of having the class construct them inside.\nYou do this because 1) the class can then depend on more abstract interfaces 2) you can test shit easier by constructing all the necessary parts\nShared-nothing architecture A shared-nothing architecture is a type of distributed computing software architecture which ensures that no single shard shares/overlaps resources with others (i.e. needing shared memory, etc.)\nSo no mutexes; and no single-points of failure (i.e. we don\u0026rsquo;t dependent on a central node always working).\n","html":"\u003cp\u003e:clap: What. Does. The. Client. Want.\u003c/p\u003e\n\u003ch2 id=\"web-applications-vs-local-application\"\u003eWeb Applications vs Local Application\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003escale\u0026mdash;what levels of functionality and access do we want\u003c/li\u003e\n\u003cli\u003etraining\u003c/li\u003e\n\u003cli\u003espeed\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"solid-principles\"\u003eSOLID principles\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#solid-principles\"\u003eSOLID principles\u003c/a\u003e is a set of OOP principles; its kinda famous but encourages mindless braindead Java devs.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eSingle Responsibility\u003c/strong\u003e: that a class should have only one clearly defined thing it represents, and the class should only change IFF the underlying spec regarding that thing changes\n\u003cul\u003e\n\u003cli\u003eEasy pitfalls: mixing PERSISTENCE LOGIC with BUSINESS LOGIC (db should be moved to a separate class like ThingProvider/ThingPersistence)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eOpen-Close Principle\u003c/strong\u003e: classes should be easily extendable and closed to modification\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;we should be able to add new functionality without touching what\u0026rsquo;s written\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eso like interfaces are nice\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eLiskov Substitution Principle\u003c/strong\u003e: subclasses should act like base classes (and more); good inheritance systems should have this built in\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eInterface Segregation Principle\u003c/strong\u003e: you should build lots of interfaces + sub-interfaces based on what clients are and will need, such that a client only has to extend precisely the amount needed to do their job\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eDependency Inversion Principle\u003c/strong\u003e: when possible, depend on abstract classes or interfaces and not their implementations\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"dependency-injection\"\u003eDependency Injection\u003c/h3\u003e\n\u003cblockquote\u003e\n\u003cp\u003e\u0026ldquo;Dependency Injection\u0026rdquo; is a 25-dollar term for a 5-cent concept. [\u0026hellip;] Dependency injection means giving an object its instance variables. [\u0026hellip;].\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003cp\u003eBlame this for all the fucking Factory classes.\u003c/p\u003e\n\u003cp\u003eBasically having a factory (or just a fancy-enough constructor) to give a class all the right instantiations of the things it needs instead of having the class construct them inside.\u003c/p\u003e\n\u003cp\u003eYou do this because 1) the class can then depend on more abstract interfaces 2) you can test shit easier by constructing all the necessary parts\u003c/p\u003e\n\u003ch2 id=\"shared-nothing-architecture\"\u003eShared-nothing architecture\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#shared-nothing-architecture\"\u003eshared-nothing architecture\u003c/a\u003e is a type of distributed computing software architecture which ensures that no single shard shares/overlaps resources with others (i.e. needing shared memory, etc.)\u003c/p\u003e\n\u003cp\u003eSo no mutexes; and no single-points of failure (i.e. we don\u0026rsquo;t dependent on a central node always working).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsoftware_design_and_architecture_patterns/","tags":null,"title":"Software Design and Architecture Patterns"},{"categories":null,"contents":"Here\u0026rsquo;s a bit of a guide to start in software development. It is mostly links to other resources that would help.\nIntroductory Remarks Nobody \u0026ldquo;learns\u0026rdquo; software development. Even in job interviews, people expect you to have \u0026ldquo;worked\u0026rdquo; in software development. The industry, as a whole, drives via \u0026ldquo;learn-by-doing\u0026rdquo;, so its best to start thinking about what you want to achieve with software dev in terms of projects, then look specifically for resources to help you achieve those. Once you Google enough, et viola! You will have the skills needed to tackle another project.\nCommon Tooling There are some common tooling that is standard across all of software development.\nGoogle Google it! 99.98% of programming skills center around google-fu. Learn to Google unknown terms and get a better sense of the picture. The same rule applies through this guide as well.\nStackExchange A group of very mean people put together a very helpful family of websites which are essentially vicious forum boards. They are the StackExchange family of boards.\nThe most famous of which, and the one focused on programming, is called StackOverflow. StackOverflow (\u0026ldquo;SO\u0026rdquo;) is an extremely helpful resource for browsing any question you may have. For instance, if your code crashes with a stack trace, Googling the error and set site:stackoverflow.com will get you pretty far.\nIf you ask a question, though, be prepared to get yelled at though, the likely reason is that your question is already answered.\nmacOS For the quick-start type of hardware fitting for this guide: get a macBook. Even the cheapest one.\nDevelopment on Windows is like cooking on campfire. Doable, useful for specific things, but not great overall. If you have a PC, I would (and recommend! its great for advanced users especially) to put Debian/Ubuntu/some easy to use Linux on it. Windows is just terrible.\nI should add that Microsoft started doing Windows Subsystem for Linux: https://docs.microsoft.com/en-us/windows/wsl/install, which apparently have been pretty good. So worth taking a shot if you are stuck on Windows.\n*nix Terminal BSD/UNIX terminal is a tool that essentially skips the fancy user interface (UI) which your operating system draws and directly runs things \u0026ldquo;organically.\u0026rdquo; If you see something in a guide that says like:\n\u0026ldquo;please execute\u0026rdquo;\npython3 test.py or perhaps\nwget https://wandb.ai/jemoka \u0026gt;\u0026gt; test they are probably asking you to type it (\u0026ldquo;execute it\u0026rdquo;) into the Terminal and hit enter.\nRead this guide put together by the Ubuntu people, it\u0026rsquo;s very good. To open the terminal on your macOS device, open an app called Terminal.app. On Ubuntu, I believe its also an app called terminal.\nIDE An \u0026ldquo;IDE\u0026rdquo; is an Integrated Development Environment. It is where code is written. Fortunately, this is an easy one: use VSCode. There is literally no better tool out there for beginners and advanced users; no wonder it has 70% market share.\nSidenote: But Jack? What do you use? I use something called emacs for very specific reasons. Please don\u0026rsquo;t unless you really want misery and to learn a whole language to configure it.\nComputer Language Architecture This is how an idea turns into \u0026ldquo;stuff\u0026rdquo; on your screen.\nHuman programming languages (\u0026ldquo;Python\u0026rdquo;), are a very readable sort of code. No computers can actually read it. Usually, code you write goes through a three-step process before its able to be ran.\nFirst, the language you write gets converted by a \u0026ldquo;compiler\u0026rdquo; or \u0026ldquo;interpreter\u0026rdquo;, specialized pieces of code that takes human programming languages into a more machine-readable form of code named \u0026ldquo;assembly\u0026rdquo; or \u0026ldquo;byte code\u0026rdquo; respectively, called the \u0026ldquo;intermediate\u0026rdquo;.\nFor now, think of the difference between compilers and interpreters as translating code either all-at-once (compilers) or line-by-line (interpreters). Because the former has a grander view of the whole, languages that use a compiler (\u0026ldquo;compiled languages\u0026rdquo;) are faster. Although, many programmers find languages that use in interpreter (\u0026ldquo;interpreted language\u0026rdquo;) easier because they can spot problems line by line.\nBut wait! There\u0026rsquo;s more. Assembly and byte-code (what compilers and interpreters generate) are not actually runnable by a computer. Yet another piece of software called a \u0026ldquo;runtime\u0026rdquo; takes the reasonably-machine-readable code and actually performs the required operations.\nSome runtimes for languages like C++ uses the raw x86 CPU, which is the stereotypical \u0026ldquo;binary\u0026rdquo; zeros-and-ones. Some other languages, say Java, uses horribly complex runtimes that amounts to a whole virtual machine.\nHere\u0026rsquo;s a bit of a table.\nLanguage C/I Compiler/Interpreter Intermediate Runtime Python I python python bytecode python Java C javac java object java VM JavaScript I V8 (literally) js bytecode web browser! C/C++ C gcc/clang x86 asm x86 cpu Wonder what the runtimes for languages like Java are built in? C/C++. Eventually it all becomes x86 cpu instructions but its like a layer cake. This is why Python and friends are called a \u0026ldquo;higher level language\u0026rdquo;.\ngit Git is where all the code is!\nGit is a decentralized \u0026ldquo;version-control\u0026rdquo; system. It is basically a timestamp-backup system of code with messages and branching.\nGitHub is a website where people like to back up their code. Here\u0026rsquo;s my profile on GitHub.\nManaging Git is pretty\u0026hellip; Involved. It, for instance, assumes familiarity with the Terminal as described above. I suggest learning it, though. Here are some good resources:\nMorgan\u0026rsquo;s very very good git tutorial\u0026hellip; On GitHub! And this article on some commands you should know! Industry-Specific Skills What you start with doesn\u0026rsquo;t matter, but start with something Its easiest to learn programming if you have a project in mind. So, find a project in mind\u0026mdash;what it is, though, doesn\u0026rsquo;t matter. The concepts across programming are highly transferable, but the actual skill is easiest to learn if you are learning w.r.t. a project.\nData science, prototyping, and machine learning Python would be your friend for all things of programming where the act of programming is a means to an end. That is: if you are writing code to do something that\u0026rsquo;s not inherently software (data science, machine learning, heck, also manipulating quantum qubits), Python is your friend.\nIts a language that\u0026rsquo;s designed to be easy to write: is a very do-as-I-say language that sacrifices efficiency and elegance for getting crap done. This is how I started programming. This is the book I started with. It teaches Python through programming a series of small projects that are mostly Terminal games.\nTo learn data science, Nueva\u0026rsquo;s very own data science course give very good conceptual framework. A typical first project is to recognize pictures of handwritten digits, for which there is a good guide. I also started something called AIBridge with AIFS, so if we ever publish the recordings I will put them here.\nGoogle also: pip, ipython, Jupyter.\nBackend engineering Backend engineering is the science of dealing with databases and writing API (application programming interfaces). I don\u0026rsquo;t suggest starting with this, but if you are particularly interested in databases, you could!\nTo master backend engineering, first learn a database manipulation language. For 99.98% of the industry, this means mysql. The link directs to a pretty good guide.\nFurthermore, the language with which backend is written is Java. I hate to say it, but despite Java\u0026rsquo;s terribleness (don\u0026rsquo;t worry about it ;)) its very dependable. Here\u0026rsquo;s a book on Java. In general, I really like all of the books from no starch press.\nFrontend and Web engineering Do you like making stuff move? Do you like drawing buttons? Front end maybe for you. The most basic type of front-end engineering is making websites.\nStart by making a \u0026ldquo;vanilla website\u0026rdquo;: HTML (what\u0026rsquo;s on the page), CSS (what colours and sizes), JavaScript (how it moves) is the standard trio of languages to start. freeCodeCamp (a great Medium blog, check their stuff out) has a good guide on the matter.\nHowever, as you progress in your journey, you will find these tools woefully inadequate. Hence, most people writing web front end move on to something called a \u0026ldquo;JavaScript Framework\u0026rdquo;, a tool to generate a \u0026ldquo;vanilla\u0026rdquo; website from some more easily manipulable JS (changing the text on the page moves from a four line operation (indexing, selecting, grabbing, changing) to a one-liner (state.text=new text)).\nA popular JS framework is ReactJS. Check them out.\nFullstack Engineering Frontend + Backend.\nGame development Game development is honestly one of the most horribly complicated and richly science-y part of CS. I am not super experience in game development but learning C++ and mastering Unity, the game engine. Oh, right, game dev is the only, and I repeat only (with invisible footnotes and qualifications) reason why you should be writing code on Windows.\nA friend is good at game dev, I can make an intro if needed.\nGood Luck! Remember: Google-fu and project-based curiosity is your friend. Let me know if you have questions.\n","html":"\u003cp\u003eHere\u0026rsquo;s a bit of a guide to start in software development. It is mostly links to other resources that would help.\u003c/p\u003e\n\u003ch2 id=\"introductory-remarks\"\u003eIntroductory Remarks\u003c/h2\u003e\n\u003cp\u003eNobody \u0026ldquo;learns\u0026rdquo; software development. Even in job interviews, people expect you to have \u0026ldquo;worked\u0026rdquo; in software development. The industry, as a whole, drives via \u0026ldquo;learn-by-doing\u0026rdquo;, so its best to start thinking about \u003cem\u003ewhat you want to achieve\u003c/em\u003e with software dev in terms of projects, then look specifically for resources to help you achieve those. Once you Google enough, et viola! You will have the skills needed to tackle another project.\u003c/p\u003e\n\u003ch2 id=\"common-tooling\"\u003eCommon Tooling\u003c/h2\u003e\n\u003cp\u003eThere are some common tooling that is standard across all of software development.\u003c/p\u003e\n\u003ch3 id=\"google\"\u003eGoogle\u003c/h3\u003e\n\u003cp\u003eGoogle it! 99.98% of programming skills center around google-fu. Learn to Google unknown terms and get a better sense of the picture. The same rule applies through this guide as well.\u003c/p\u003e\n\u003ch3 id=\"stackexchange\"\u003eStackExchange\u003c/h3\u003e\n\u003cp\u003eA group of very mean people put together a very helpful family of websites which are essentially vicious forum boards. They are the StackExchange family of boards.\u003c/p\u003e\n\u003cp\u003eThe most famous of which, and the one focused on programming, is called \u003ca href=\"https://stackoverflow.com/\"\u003eStackOverflow\u003c/a\u003e. StackOverflow (\u0026ldquo;SO\u0026rdquo;) is an \u003cem\u003eextremely\u003c/em\u003e helpful resource for browsing any question you may have. For instance, if your code crashes with a \u003ca href=\"/posts/kbhstack_trace/\"\u003estack trace\u003c/a\u003e, Googling the error and set \u003ccode\u003esite:stackoverflow.com\u003c/code\u003e will get you pretty far.\u003c/p\u003e\n\u003cp\u003eIf you ask a question, though, \u003ca href=\"https://www.reddit.com/r/learnprogramming/comments/7w5bm4/why_on_people_on_stack_overflow_so_rude/\"\u003ebe prepared to get yelled at\u003c/a\u003e though, the likely reason is that your question is already answered.\u003c/p\u003e\n\u003ch3 id=\"macos\"\u003emacOS\u003c/h3\u003e\n\u003cp\u003eFor the quick-start type of hardware fitting for this guide: \u003ca href=\"https://www.apple.com/macbook-air/\"\u003eget a macBook\u003c/a\u003e. Even the cheapest one.\u003c/p\u003e\n\u003cp\u003eDevelopment on Windows is like cooking on campfire. Doable, useful for specific things, but not great overall. If you have a PC, I would (and recommend! its great for advanced users especially) to put \u003ca href=\"https://ubuntu.com/tutorials/install-ubuntu-desktop#1-overview\"\u003eDebian/Ubuntu/some easy to use Linux\u003c/a\u003e on it. Windows is just terrible.\u003c/p\u003e\n\u003cp\u003eI should add that Microsoft started doing Windows Subsystem for Linux: \u003ca href=\"https://docs.microsoft.com/en-us/windows/wsl/install\"\u003ehttps://docs.microsoft.com/en-us/windows/wsl/install\u003c/a\u003e, which apparently have been pretty good. So worth taking a shot if you are stuck on Windows.\u003c/p\u003e\n\u003ch3 id=\"nix-terminal\"\u003e*nix Terminal\u003c/h3\u003e\n\u003cp\u003eBSD/UNIX terminal is a tool that essentially skips the fancy user interface (UI) which your operating system draws and directly runs things \u0026ldquo;organically.\u0026rdquo; If you see something in a guide that says like:\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;please execute\u0026rdquo;\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-bash\" data-lang=\"bash\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003epython3 test.py\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eor perhaps\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-bash\" data-lang=\"bash\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003ewget https://wandb.ai/jemoka \u0026gt;\u0026gt; \u003cspan style=\"color:#111\"\u003etest\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ethey are probably asking you to type it (\u0026ldquo;execute it\u0026rdquo;) into the Terminal and hit enter.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://ubuntu.com/tutorials/command-line-for-beginners#1-overview\"\u003eRead this guide\u003c/a\u003e put together by the Ubuntu people, it\u0026rsquo;s very good. To open the terminal on your macOS device, open an app called \u003ccode\u003eTerminal.app\u003c/code\u003e. On Ubuntu, I believe its also an app called \u003ccode\u003eterminal\u003c/code\u003e.\u003c/p\u003e\n\u003ch3 id=\"ide\"\u003eIDE\u003c/h3\u003e\n\u003cp\u003eAn \u0026ldquo;IDE\u0026rdquo; is an \u003ca href=\"https://en.wikipedia.org/wiki/Integrated_development_environment\"\u003eIntegrated Development Environment\u003c/a\u003e. It is where code is written. Fortunately, this is an easy one: \u003ca href=\"https://code.visualstudio.com/\"\u003euse VSCode\u003c/a\u003e. There is literally no better tool out there for beginners and advanced users; no wonder it has 70% market share.\u003c/p\u003e\n\u003cblockquote\u003e\n\u003cp\u003eSidenote: But Jack? What do you use? I use something called \u003ca href=\"https://www.gnu.org/software/emacs/\"\u003eemacs\u003c/a\u003e for very specific reasons. Please don\u0026rsquo;t unless you really want misery and to \u003ca href=\"https://www.gnu.org/software/emacs/manual/html_node/eintr/\"\u003elearn a whole language\u003c/a\u003e to configure it.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003ch3 id=\"computer-language-architecture\"\u003eComputer Language Architecture\u003c/h3\u003e\n\u003cp\u003eThis is how an idea turns into \u0026ldquo;stuff\u0026rdquo; on your screen.\u003c/p\u003e\n\u003cp\u003eHuman programming languages (\u0026ldquo;Python\u0026rdquo;), are a very readable sort of code. No computers can actually read it. Usually, code you write goes through a three-step process before its able to be ran.\u003c/p\u003e\n\u003cp\u003eFirst, the language you write gets converted by a \u0026ldquo;compiler\u0026rdquo; or \u0026ldquo;interpreter\u0026rdquo;, specialized pieces of code that takes human programming languages into a more machine-readable form of code named \u0026ldquo;assembly\u0026rdquo; or \u0026ldquo;byte code\u0026rdquo; respectively, called the \u0026ldquo;intermediate\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eFor now, think of the difference between compilers and interpreters as translating code either all-at-once (compilers) or line-by-line (interpreters). Because the former has a grander view of the whole, languages that use a compiler (\u0026ldquo;compiled languages\u0026rdquo;) are faster. Although, many programmers find languages that use in interpreter (\u0026ldquo;interpreted language\u0026rdquo;) easier because they can spot problems line by line.\u003c/p\u003e\n\u003cp\u003eBut wait! There\u0026rsquo;s more. Assembly and byte-code (what compilers and interpreters generate) are not actually runnable by a computer. Yet another piece of software called a \u0026ldquo;runtime\u0026rdquo; takes the reasonably-machine-readable code and actually performs the required operations.\u003c/p\u003e\n\u003cp\u003eSome runtimes for languages like C++ uses the raw x86 CPU, which is the stereotypical \u0026ldquo;binary\u0026rdquo; zeros-and-ones. Some other languages, say Java, uses horribly complex runtimes that amounts to a whole virtual machine.\u003c/p\u003e\n\u003cp\u003eHere\u0026rsquo;s a bit of a table.\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eLanguage\u003c/th\u003e\n\u003cth\u003eC/I\u003c/th\u003e\n\u003cth\u003eCompiler/Interpreter\u003c/th\u003e\n\u003cth\u003eIntermediate\u003c/th\u003e\n\u003cth\u003eRuntime\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003ePython\u003c/td\u003e\n\u003ctd\u003eI\u003c/td\u003e\n\u003ctd\u003epython\u003c/td\u003e\n\u003ctd\u003epython bytecode\u003c/td\u003e\n\u003ctd\u003epython\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eJava\u003c/td\u003e\n\u003ctd\u003eC\u003c/td\u003e\n\u003ctd\u003ejavac\u003c/td\u003e\n\u003ctd\u003ejava object\u003c/td\u003e\n\u003ctd\u003ejava VM\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eJavaScript\u003c/td\u003e\n\u003ctd\u003eI\u003c/td\u003e\n\u003ctd\u003eV8 (literally)\u003c/td\u003e\n\u003ctd\u003ejs bytecode\u003c/td\u003e\n\u003ctd\u003eweb browser!\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eC/C++\u003c/td\u003e\n\u003ctd\u003eC\u003c/td\u003e\n\u003ctd\u003egcc/clang\u003c/td\u003e\n\u003ctd\u003ex86 asm\u003c/td\u003e\n\u003ctd\u003ex86 cpu\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWonder what the runtimes for languages like Java are built in? C/C++. Eventually it all becomes x86 cpu instructions but its like a layer cake. This is why Python and friends are called a \u0026ldquo;higher level language\u0026rdquo;.\u003c/p\u003e\n\u003ch3 id=\"git\"\u003egit\u003c/h3\u003e\n\u003cp\u003eGit is where all the code is!\u003c/p\u003e\n\u003cp\u003eGit is a decentralized \u0026ldquo;version-control\u0026rdquo; system. It is basically a timestamp-backup system of code with messages and branching.\u003c/p\u003e\n\u003cp\u003eGitHub is a website where people like to back up their code. \u003ca href=\"https://github.com/Jemoka/\"\u003eHere\u0026rsquo;s my profile on GitHub.\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eManaging Git is pretty\u0026hellip; Involved. It, for instance, assumes familiarity with the Terminal as described above. I suggest learning it, though. Here are some good resources:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eMorgan\u0026rsquo;s \u003ca href=\"https://github.com/morgansierrasnyder/git-going\"\u003every very good\u003c/a\u003e git tutorial\u0026hellip; On GitHub!\u003c/li\u003e\n\u003cli\u003eAnd \u003ca href=\"https://www.freecodecamp.org/news/10-important-git-commands-that-every-developer-should-know/\"\u003ethis article\u003c/a\u003e on some commands you should know!\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"industry-specific-skills\"\u003eIndustry-Specific Skills\u003c/h2\u003e\n\u003ch3 id=\"what-you-start-with-doesn-t-matter-but-start-with-something\"\u003eWhat you start with doesn\u0026rsquo;t matter, but start with something\u003c/h3\u003e\n\u003cp\u003eIts easiest to learn programming if you have a project in mind. So, find a project in mind\u0026mdash;what it is, though, \u003cem\u003edoesn\u0026rsquo;t matter.\u003c/em\u003e The concepts across programming are highly transferable, but the actual skill is easiest to learn if you are learning w.r.t. a project.\u003c/p\u003e\n\u003ch3 id=\"data-science-prototyping-and-machine-learning\"\u003eData science, prototyping, and machine learning\u003c/h3\u003e\n\u003cp\u003ePython would be your friend for all things of programming where the act of programming is a means to an end. That is: if you are writing code to \u003cem\u003edo something\u003c/em\u003e that\u0026rsquo;s not inherently software (data science, machine learning, heck, also manipulating quantum qubits), Python is your friend.\u003c/p\u003e\n\u003cp\u003eIts a language that\u0026rsquo;s designed to be easy to write: is a very do-as-I-say language that sacrifices efficiency and elegance for getting crap done. This is how I started programming. \u003ca href=\"https://www.amazon.com/Python-Programming-Absolute-Beginner-3rd/dp/1435455002\"\u003eThis is the book I started with.\u003c/a\u003e It teaches Python through programming a series of small projects that are mostly Terminal games.\u003c/p\u003e\n\u003cp\u003eTo learn data science, \u003ca href=\"https://jennselby.github.io/MachineLearningCourseNotes/\"\u003eNueva\u0026rsquo;s very own data science course\u003c/a\u003e give very good conceptual framework. A typical first project is to recognize pictures of handwritten digits, \u003ca href=\"https://towardsdatascience.com/handwritten-digit-mnist-pytorch-977b5338e627\"\u003efor which there is a good guide\u003c/a\u003e. I also started something called \u003ca href=\"/posts/kbhaibridge_course_website/\"\u003eAIBridge\u003c/a\u003e with \u003ca href=\"/posts/kbhaifs/\"\u003eAIFS\u003c/a\u003e, so if we ever publish the recordings I will put them here.\u003c/p\u003e\n\u003cp\u003eGoogle also: pip, ipython, Jupyter.\u003c/p\u003e\n\u003ch3 id=\"backend-engineering\"\u003eBackend engineering\u003c/h3\u003e\n\u003cp\u003eBackend engineering is the science of dealing with databases and writing API (application programming interfaces). I don\u0026rsquo;t suggest starting with this, but if you are particularly interested in databases, you could!\u003c/p\u003e\n\u003cp\u003eTo master backend engineering, first learn a database manipulation language. For 99.98% of the industry, this means \u003ca href=\"https://www.mysqltutorial.org/getting-started-with-mysql/\"\u003emysql\u003c/a\u003e. The link directs to a pretty good guide.\u003c/p\u003e\n\u003cp\u003eFurthermore, the language with which backend is written is Java. I hate to say it, but despite Java\u0026rsquo;s terribleness (don\u0026rsquo;t worry about it ;)) its very dependable. \u003ca href=\"https://nostarch.com/learnjava\"\u003eHere\u0026rsquo;s a book\u003c/a\u003e on Java. In general, I really like all of the books from no starch press.\u003c/p\u003e\n\u003ch3 id=\"frontend-and-web-engineering\"\u003eFrontend and Web engineering\u003c/h3\u003e\n\u003cp\u003eDo you like making stuff move? Do you like drawing buttons? Front end maybe for you. The most basic type of front-end engineering is making websites.\u003c/p\u003e\n\u003cp\u003eStart by making a \u0026ldquo;vanilla website\u0026rdquo;: HTML (what\u0026rsquo;s on the page), CSS (what colours and sizes), JavaScript (how it moves) is the standard trio of languages to start. freeCodeCamp (a great Medium blog, check their stuff out) has a \u003ca href=\"https://www.freecodecamp.org/news/html-css-and-javascript-explained-for-beginners/\"\u003egood guide\u003c/a\u003e on the matter.\u003c/p\u003e\n\u003cp\u003eHowever, as you progress in your journey, you will find these tools woefully inadequate. Hence, most people writing web front end move on to something called a \u0026ldquo;JavaScript Framework\u0026rdquo;, a tool to generate a \u0026ldquo;vanilla\u0026rdquo; website from some more easily manipulable JS (changing the text on the page moves from a four line operation (indexing, selecting, grabbing, changing) to a one-liner (state.text=new text)).\u003c/p\u003e\n\u003cp\u003eA popular JS framework is \u003ca href=\"https://reactjs.org/tutorial/tutorial.html\"\u003eReactJS\u003c/a\u003e. Check them out.\u003c/p\u003e\n\u003ch3 id=\"fullstack-engineering\"\u003eFullstack Engineering\u003c/h3\u003e\n\u003cp\u003eFrontend + Backend.\u003c/p\u003e\n\u003ch3 id=\"game-development\"\u003eGame development\u003c/h3\u003e\n\u003cp\u003eGame development is honestly one of the most horribly complicated and richly science-y part of CS. I am not super experience in game development but learning C++ and mastering \u003ca href=\"https://docs.unity3d.com/560/Documentation/Manual/UnityBasics.html\"\u003eUnity\u003c/a\u003e, the game engine. Oh, right, game dev is the \u003cem\u003eonly\u003c/em\u003e, and I repeat \u003cem\u003eonly\u003c/em\u003e (with invisible footnotes and qualifications) reason why you should be writing code on Windows.\u003c/p\u003e\n\u003cp\u003eA friend \u003ca href=\"https://github.com/Radbuglet/\"\u003eis good at game dev\u003c/a\u003e, I can make an intro if needed.\u003c/p\u003e\n\u003ch2 id=\"good-luck\"\u003eGood Luck!\u003c/h2\u003e\n\u003cp\u003eRemember: Google-fu and project-based curiosity is your friend. Let me know if you have questions.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsoftware_dev_starter_pack/","tags":null,"title":"software dev starter pack"},{"categories":null,"contents":"The software development models, or Software Development Life-cycles (SDLCs), are methodologies to approach organizing a software project.\nWaterfall The waterfall specification gets written before any code written. We hand off spec and code directly to tester, and code should behave like spec.\nCode specification exactly Spec does not update Code happens only after stuff is done\nAgile Agile are designed to work with minimum specification before code. Spec is updated constantly as code changes and get user feedback.\nSpiral (Software Development) The Spiral model is as SDLC that combines the iterative development approach of Agile and the structure of Waterfall.\nIt focuses on Risk to mitigate it.\nWaterfall style requirements detailing Preliminary design First prototype: scaled down system Second prototype Mitigates strengths, weaknesses, and risks of 1st prototype Augmented requirements that got scaled down during the firts prototype \u0026ldquo;The entire project can be aborted if the risk is deemed too great.\u0026rdquo; Budget Operating cost Repeat until customer likes it Construct final system using the prototype as a spec Other Non-Canonical SDLCs Test-Driven Development See Test-Driven Development\nExtreme Programming TDD + continually integrating code and pair programming to review code\n","html":"\u003cp\u003eThe software development models, or Software Development Life-cycles (\u003ca href=\"/posts/kbhsoftware_development_methodologies/\"\u003eSDLC\u003c/a\u003es), are methodologies to approach organizing a software project.\u003c/p\u003e\n\u003ch2 id=\"waterfall\"\u003eWaterfall\u003c/h2\u003e\n\u003cp\u003eThe waterfall specification gets written before any code written. We hand off spec and code directly to tester, and code should behave like spec.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eCode specification exactly\u003c/li\u003e\n\u003cli\u003eSpec does not update\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eCode happens only after stuff is done\u003c/p\u003e\n\u003ch2 id=\"agile\"\u003eAgile\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#agile\"\u003eAgile\u003c/a\u003e are designed to work with minimum specification before code. Spec is updated constantly as code changes and get user feedback.\u003c/p\u003e\n\u003ch2 id=\"spiral--software-development\"\u003eSpiral (Software Development)\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#spiral--software-development\"\u003eSpiral\u003c/a\u003e model is as \u003ca href=\"/posts/kbhsoftware_development_methodologies/\"\u003eSDLC\u003c/a\u003e that combines the iterative development approach of \u003ca href=\"#agile\"\u003eAgile\u003c/a\u003e and the structure of \u003ca href=\"#waterfall\"\u003eWaterfall\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIt focuses on Risk to mitigate it.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-07-08_17-30-42_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003col\u003e\n\u003cli\u003eWaterfall style requirements detailing\u003c/li\u003e\n\u003cli\u003ePreliminary design\u003c/li\u003e\n\u003cli\u003eFirst prototype: scaled down system\u003c/li\u003e\n\u003cli\u003eSecond prototype\n\u003col\u003e\n\u003cli\u003eMitigates strengths, weaknesses, and risks of 1st prototype\u003c/li\u003e\n\u003cli\u003eAugmented requirements that got scaled down during the firts prototype\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;The entire project can be aborted if the risk is deemed too great.\u0026rdquo;\n\u003col\u003e\n\u003cli\u003eBudget\u003c/li\u003e\n\u003cli\u003eOperating cost\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003eRepeat until customer likes it\u003c/li\u003e\n\u003cli\u003eConstruct final system using the prototype as a spec\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"other-non-canonical-sdlcs\"\u003eOther Non-Canonical SDLCs\u003c/h2\u003e\n\u003ch3 id=\"test-driven-development--kbhtesting-dot-md\"\u003e\u003ca href=\"/posts/kbhtesting/#test-driven-development\"\u003eTest-Driven Development\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhtesting/#test-driven-development\"\u003eTest-Driven Development\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"extreme-programming\"\u003eExtreme Programming\u003c/h3\u003e\n\u003cp\u003eTDD + continually integrating code and pair programming to review code\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsoftware_development_methodologies/","tags":null,"title":"Software Development Methodologies"},{"categories":null,"contents":"process of Engineering: chronological order User Interviews + User Stories Requirements Analysis Documentation and Specification Build the damned thing Project Management and Development Methodology (SDLC) Task Estimation Software Design and Architecture Patterns Testing Build and Release engineering (TODO) Other topics Query optimization (TODO) Fucking acronyms to know AAA Method SOLID principles STAR method: state behaviorals in Situation, Task, Action, Results fundamental trade-off of Software Engineering The MIT vs. New Jersey problem: in Software Engineering, you can only choose one of FAST or ROBUST.\nProblem Fast (\u0026ldquo;Bell Labs/NJ\u0026rdquo;) Robust (\u0026ldquo;MIT\u0026rdquo;) Specs Whatever it looks like screens, states, UI elements documented; transitions Time \u0026ldquo;whenever\u0026rdquo; precise projections, track work and dependencies Testing \u0026ldquo;ran it + didn\u0026rsquo;t crash\u0026rdquo; black, white box, code overage, edge/adv. cases Modular Giant function object/data model, grouped function, abstraction barriers Failure Unpredictable + silent Graceful, noisy, error reporting + logging Language Scripting, high level Low-level, assembly/bare metal, control, can be difficult Proto. Many/Quickly Few/Slowly Being Done Now Later Source: here.\nhow to choose? Which is the better approach? There isn\u0026rsquo;t one. However, here are some critical questions for you to answer:\nDeadline: what happens if you don\u0026rsquo;t finish today? Release cycle: if you ship a bug, how long can you fix it? Consequences: if the software malfunctions, how bad is it? Life-cycle: how long will the software get used? So\u0026mdash;\nAs consequences for deadline gets worse, trend towards fast; as consequences for failure gets worse, trend towards robust.\n","html":"\u003ch2 id=\"process-of-engineering-chronological-order\"\u003eprocess of Engineering: chronological order\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhuser_interviews/\"\u003eUser Interviews\u003c/a\u003e + \u003ca href=\"/posts/kbhuser_interviews/#user-story\"\u003eUser Stories\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrequirements_analysis/\"\u003eRequirements Analysis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdocumentation_and_specification/\"\u003eDocumentation and Specification\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eBuild the damned thing\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsoftware_development_methodologies/\"\u003eProject Management and Development Methodology (SDLC)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtask_estimation/\"\u003eTask Estimation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsoftware_design_and_architecture_patterns/\"\u003eSoftware Design and Architecture Patterns\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtesting/\"\u003eTesting\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eBuild and Release engineering (TODO)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"other-topics\"\u003eOther topics\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eQuery optimization (TODO)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"fucking-acronyms-to-know\"\u003eFucking acronyms to know\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtesting/#arrange-act-assert\"\u003eAAA Method\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsoftware_design_and_architecture_patterns/#solid-principles\"\u003eSOLID principles\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eSTAR method: state behaviorals in Situation, Task, Action, Results\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"fundamental-trade-off-of-software-engineering--kbhsoftware-engineering-dot-md\"\u003efundamental trade-off of \u003ca href=\"/posts/kbhsoftware_engineering/\"\u003eSoftware Engineering\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#fundamental-trade-off-of-software-engineering--kbhsoftware-engineering-dot-md\"\u003eMIT vs. New Jersey\u003c/a\u003e problem: in \u003ca href=\"/posts/kbhsoftware_engineering/\"\u003eSoftware Engineering\u003c/a\u003e, you can only choose one of FAST or ROBUST.\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eProblem\u003c/th\u003e\n\u003cth\u003eFast (\u0026ldquo;Bell Labs/NJ\u0026rdquo;)\u003c/th\u003e\n\u003cth\u003eRobust (\u0026ldquo;MIT\u0026rdquo;)\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eSpecs\u003c/td\u003e\n\u003ctd\u003eWhatever it looks like\u003c/td\u003e\n\u003ctd\u003escreens, states, UI elements documented; transitions\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eTime\u003c/td\u003e\n\u003ctd\u003e\u0026ldquo;whenever\u0026rdquo;\u003c/td\u003e\n\u003ctd\u003eprecise projections, track work and dependencies\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eTesting\u003c/td\u003e\n\u003ctd\u003e\u0026ldquo;ran it + didn\u0026rsquo;t crash\u0026rdquo;\u003c/td\u003e\n\u003ctd\u003eblack, white box, code overage, edge/adv. cases\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eModular\u003c/td\u003e\n\u003ctd\u003eGiant function\u003c/td\u003e\n\u003ctd\u003eobject/data model, grouped function, abstraction barriers\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eFailure\u003c/td\u003e\n\u003ctd\u003eUnpredictable + silent\u003c/td\u003e\n\u003ctd\u003eGraceful, noisy, error reporting + logging\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eLanguage\u003c/td\u003e\n\u003ctd\u003eScripting, high level\u003c/td\u003e\n\u003ctd\u003eLow-level, assembly/bare metal, control, can be difficult\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eProto.\u003c/td\u003e\n\u003ctd\u003eMany/Quickly\u003c/td\u003e\n\u003ctd\u003eFew/Slowly\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eBeing Done\u003c/td\u003e\n\u003ctd\u003eNow\u003c/td\u003e\n\u003ctd\u003eLater\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eSource: \u003ca href=\"https://www.dreamsongs.com/RiseOfWorseIsBetter.html\"\u003ehere\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"how-to-choose\"\u003ehow to choose?\u003c/h3\u003e\n\u003cp\u003eWhich is the better approach? There isn\u0026rsquo;t one. However, here are some critical questions for you to answer:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eDeadline: what happens if you don\u0026rsquo;t finish today?\u003c/li\u003e\n\u003cli\u003eRelease cycle: if you ship a bug, how long can you fix it?\u003c/li\u003e\n\u003cli\u003eConsequences: if the software malfunctions, how bad is it?\u003c/li\u003e\n\u003cli\u003eLife-cycle: how long will the software get used?\u003c/li\u003e\n\u003c/ul\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-07_13-00-47_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSo\u0026mdash;\u003c/p\u003e\n\u003cp\u003eAs consequences for deadline gets worse, trend towards fast; as consequences for failure gets worse, trend towards robust.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsoftware_engineering/","tags":["index"],"title":"Software Engineering Index"},{"categories":null,"contents":"This will have no explicit boundary conditions in \\(x\\)!\nAssume \\(|U(t,x)|\\) decays quickly as \\(|x| \\to \\infty\\).\nApply Fourier Transform Step one is to apply the Fourier Transform on our PDE\n\\begin{equation} \\hat{U}(t, \\lambda) = \\int_{R} U(t,x) e^{-i\\lambda x} \\dd{x} \\end{equation}\nLeveraging the fact that Derivative of Fourier Transform is a multiplication, we can simply our Fourier transform in terms of one expression in \\(x\\).\nApply a Fourier Transform on \\(f(x)\\) This allows you to plug the initial conditions into your transformed expression above.\nSolve for \\(\\hat{U}(t,\\lambda)\\), and then convert back This uses the inverse Fourier transform.\n","html":"\u003cp\u003eThis will have no explicit boundary conditions in \\(x\\)!\u003c/p\u003e\n\u003cp\u003eAssume \\(|U(t,x)|\\) decays quickly as \\(|x| \\to \\infty\\).\u003c/p\u003e\n\u003ch2 id=\"apply-fourier-transform\"\u003eApply Fourier Transform\u003c/h2\u003e\n\u003cp\u003eStep one is to apply the Fourier Transform on our PDE\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U}(t, \\lambda) = \\int_{R} U(t,x) e^{-i\\lambda x} \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLeveraging the fact that \u003ca href=\"/posts/kbhfourier_transform/#derivative-of-fourier-transform\"\u003eDerivative of Fourier Transform\u003c/a\u003e is a multiplication, we can simply our Fourier transform in terms of one expression in \\(x\\).\u003c/p\u003e\n\u003ch2 id=\"apply-a-fourier-transform--kbhfourier-transform-dot-md--on-f--x\"\u003eApply a \u003ca href=\"/posts/kbhfourier_transform/\"\u003eFourier Transform\u003c/a\u003e on \\(f(x)\\)\u003c/h2\u003e\n\u003cp\u003eThis allows you to plug the initial conditions into your transformed expression above.\u003c/p\u003e\n\u003ch2 id=\"solve-for-hat-u--t-lambda--and-then-convert-back\"\u003eSolve for \\(\\hat{U}(t,\\lambda)\\), and then convert back\u003c/h2\u003e\n\u003cp\u003eThis uses the inverse Fourier transform.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsolving_pdes_via_fourier_transform/","tags":null,"title":"Solving PDEs via Fourier Transform"},{"categories":null,"contents":"So let\u0026rsquo;s say given a system:\n\\begin{equation} \\begin{cases} x + 2y + z = 0 \\\\ 2x + 0y - z = 1 \\\\ x - y + 0z = 2 \\end{cases} \\end{equation}\nWe can represent this using a matricies.\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 2 \u0026amp; 1 \\\\ 2 \u0026amp; 0 \u0026amp; -1 \\\\ 1 \u0026amp; -1 \u0026amp; 0 \\end{pmatrix} \\begin{pmatrix} x \\\\ y \\\\ z \\end{pmatrix} = \\begin{pmatrix} 0 \\\\ 1 \\\\ 2 \\end{pmatrix} \\end{equation}\nWe will use Gaussian elimination. We will begin by multiplying the top row by \\(-2\\).\n\\begin{equation} \\begin{pmatrix} -2 \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \u0026amp; 0 \\\\ 0 \u0026amp; 0 \u0026amp; 1 \\end{pmatrix} \\begin{pmatrix} 1 \u0026amp; 2 \u0026amp; 1 \\\\ 2 \u0026amp; 0 \u0026amp; -1 \\\\ 1 \u0026amp; -1 \u0026amp; 0 \\end{pmatrix} \\begin{pmatrix} x \\\\ y \\\\ z \\end{pmatrix} =\\begin{pmatrix} -2 \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \u0026amp; 0 \\\\ 0 \u0026amp; 0 \u0026amp; 1 \\end{pmatrix} \\begin{pmatrix} 0 \\\\ 1 \\\\2 \\end{pmatrix} \\end{equation}\nAnd then we add row one to row two; we will not write out the transformation matrix:\n\\begin{equation} \\begin{pmatrix} -2 \u0026amp;-4 \u0026amp;-2 \\\\ 2 \u0026amp;-0 \u0026amp;-1 \\\\ 1 \u0026amp;-1 \u0026amp;0 \\end{pmatrix} \\begin{pmatrix} x \\\\ y \\\\ z \\end{pmatrix} = \\begin{pmatrix} 0 \\\\ 1 \\\\2 \\end{pmatrix} \\end{equation}\n","html":"\u003cp\u003eSo let\u0026rsquo;s say given a system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx + 2y + z = 0 \\\\\n2x + 0y - z = 1 \\\\\nx - y + 0z = 2\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can represent this using a \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 2 \u0026amp; 1 \\\\\n2 \u0026amp; 0 \u0026amp; -1 \\\\\n1 \u0026amp; -1 \u0026amp; 0\n\\end{pmatrix} \\begin{pmatrix}\nx \\\\ y \\\\ z\n\\end{pmatrix} = \\begin{pmatrix}\n0 \\\\ 1 \\\\ 2\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will use \u003ca href=\"/posts/kbhmatricies/#gaussian-elimination\"\u003eGaussian elimination\u003c/a\u003e. We will begin by multiplying the top row by \\(-2\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n-2 \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \u0026amp; 0 \\\\ 0 \u0026amp; 0 \u0026amp; 1\n\\end{pmatrix} \\begin{pmatrix}\n1 \u0026amp; 2 \u0026amp; 1 \\\\\n2 \u0026amp; 0 \u0026amp; -1 \\\\\n1 \u0026amp; -1 \u0026amp; 0\n\\end{pmatrix} \\begin{pmatrix}\nx \\\\ y \\\\ z\n\\end{pmatrix} =\\begin{pmatrix}\n-2 \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \u0026amp; 0 \\\\ 0 \u0026amp; 0 \u0026amp; 1\n\\end{pmatrix} \\begin{pmatrix}\n0 \\\\ 1 \\\\2\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd then we add row one to row two; we will not write out the transformation \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n-2 \u0026amp;-4 \u0026amp;-2 \\\\ 2 \u0026amp;-0 \u0026amp;-1 \\\\ 1 \u0026amp;-1 \u0026amp;0\n\\end{pmatrix} \\begin{pmatrix}\nx \\\\ y \\\\ z\n\\end{pmatrix} = \\begin{pmatrix}\n0 \\\\ 1 \\\\2\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsolving_systems/","tags":null,"title":"solving systems"},{"categories":null,"contents":" \u0026ldquo;laws.als\u0026rdquo;: \u0026ldquo;drumuomup\u0026rdquo; \u0026ldquo;ping.als\u0026rdquo;: \u0026ldquo;walking down the street, eating children\u0026rdquo;\u0026quot;\u0026quot; \u0026ldquo;planets.als\u0026rdquo;: \u0026ldquo;sing a song among the starlight\u0026rdquo; \u0026ldquo;songs.als\u0026rdquo;: \u0026ldquo;thank you klint for your discussion\u0026rdquo; Other things I have to finish \u0026ldquo;Tunel2.als\u0026rdquo; ","html":"\u003cul\u003e\n\u003cli\u003e\u003cdel\u003e\u0026ldquo;laws.als\u0026rdquo;: \u0026ldquo;drumuomup\u0026rdquo;\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;ping.als\u0026rdquo;: \u0026ldquo;walking down the street, eating children\u0026rdquo;\u0026quot;\u0026quot;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;planets.als\u0026rdquo;: \u0026ldquo;sing a song among the starlight\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;songs.als\u0026rdquo;: \u0026ldquo;thank you klint for your discussion\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"other-things-i-have-to-finish\"\u003eOther things I have to finish\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Tunel2.als\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsongs_that_need_lyrics/","tags":null,"title":"Songs that need Lyrics"},{"categories":null,"contents":"qsort: sort an array of any type\nbsearch binary search of an array of any type\nlfind: linear search in a array of any find\nlsearch: lfind, but perform insertion as well\n","html":"\u003cp\u003e\u003ccode\u003eqsort\u003c/code\u003e: sort an array of any type\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003ebsearch\u003c/code\u003e binary search of an array of any type\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003elfind\u003c/code\u003e: linear search in a array of any find\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003elsearch\u003c/code\u003e: lfind, but perform insertion as well\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsorting_functions/","tags":null,"title":"sorting functions"},{"categories":null,"contents":"sound is the compression of air molecules: high/low pressure air. \u0026ldquo;This is your brain on music.\u0026rdquo;\n\u0026ldquo;Dynamic EQ\u0026rdquo;: to attenuate certain frequencies to preventing things from happening.\nSoothe audio\nhow we hear sound the way that sound is deflected as it enter our ear is important:\nsound bounce around our pinna it echos in the ear canal then it gets processed anechoic chamber an anechoic chamber is a room that blocks all forms of reflection. In the room, people experience hallucinations as the brain is trying to complete information but it can\u0026rsquo;t confirm it using sensory input.\nYou r brain is always trying to inteperate what\u0026rsquo;s going on.\nBasilar Membrane The Basilar Membrane sits after the eardrums; it is a liquid in which a membrane + some hair sits. Depending on the frequency of the sound, the hairs vibrate at different shapes.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsound/\"\u003esound\u003c/a\u003e is the compression of air molecules: high/low pressure air. \u0026ldquo;This is your brain on music.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Dynamic EQ\u0026rdquo;: to attenuate certain frequencies to preventing things from happening.\u003c/p\u003e\n\u003cp\u003eSoothe audio\u003c/p\u003e\n\u003ch2 id=\"how-we-hear-sound\"\u003ehow we hear sound\u003c/h2\u003e\n\u003cp\u003ethe way that \u003ca href=\"/posts/kbhsound/\"\u003esound\u003c/a\u003e is deflected as it enter our ear is important:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003esound bounce around our \u003ca href=\"\"\u003epinna\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eit echos in the ear canal\u003c/li\u003e\n\u003cli\u003ethen it gets processed\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"anechoic-chamber\"\u003eanechoic chamber\u003c/h2\u003e\n\u003cp\u003ean \u003ca href=\"#anechoic-chamber\"\u003eanechoic chamber\u003c/a\u003e is a room that blocks all forms of reflection. In the room, people experience hallucinations as the brain is trying to complete information but it can\u0026rsquo;t confirm it using sensory input.\u003c/p\u003e\n\u003cp\u003eYou r brain is always trying to inteperate what\u0026rsquo;s going on.\u003c/p\u003e\n\u003ch2 id=\"basilar-membrane\"\u003eBasilar Membrane\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#basilar-membrane\"\u003eBasilar Membrane\u003c/a\u003e sits after the eardrums; it is a liquid in which a membrane + some hair sits. Depending on the frequency of the sound, the hairs vibrate at different shapes.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsound/","tags":null,"title":"sound"},{"categories":null,"contents":"Reading notes Because feeling for self-endowment, they wish to build socialist society As Communists considered themselves as a vanguard of the revolutionary proletariat – their “aim” was to build socialist society in the whole world.\nSocialist had necesity against capitalist aggression The Soviet approaches towards historical descriptions of the twentieth century showed that with the emergence of the new type of state – socialist one – it became a target for capitalist aggression.\nSocialist revolution requires the creation of socialist society against the world It was first positive move towards realization of the Soviet foreign policy main idea: the world socialist revolution and creation of the socialist society in the whole world.\nThe Soviets believe that the US wants to take over world The US had plans to dominate in the entire world.\nThat the US was intentionally sturggling with socialism All US post-war foreign policy doctrines were aimed on the struggle with socialism\nthat soviets believed that US was exclusivly fighting socialism We can summarize – that on Soviet point of view all American presidents of Cold War period were creating their own doctrines, and all of them were anti-communist and anti-Soviet\nSoviets believes that the US made the first move Soviet concept first vivid steps, which signalized about the start of the confrontation between East and West, were steps made by the West.\nbelieves its a fight against imperialism bipolar confrontation had western roots and the Cold War was the policy of the US and other imperialistic countries against socialist countries.\ncommunism is working towards revolution mankind is a process of revolutionary changes\nthe soviet union believes only it can stop American aggression the Soviet Union was the only power in the world able to stop American ambitions of superpower.\nUSSR believes that itself was the only defender The Soviet Union considered itself as the only defender of the interests of the working class all over the world because it was the first socialist state in history.\nDefinding US and defending imperialism The Imperialistic was the system of capitalist countries: they had a lot of contradictions in their “camp” where each wanted to solve their problems and to defend their own interests by using the others.\nBlack and white view of the world prevailed USSR The entire world was separated into two main categories: friends and enemies. Such black and white world-view was a distinctive feature of Stalin’s way of seeing the world (outside as well as inside the USSR), but even after his death,\n","html":"\u003ch2 id=\"reading-notes\"\u003eReading notes\u003c/h2\u003e\n\u003ch3 id=\"because-feeling-for-self-endowment-they-wish-to-build-socialist-society\"\u003eBecause feeling for self-endowment, they wish to build socialist society\u003c/h3\u003e\n\u003cp\u003eAs Communists considered themselves as a vanguard of the revolutionary proletariat – their “aim” was to build socialist society in the whole world.\u003c/p\u003e\n\u003ch3 id=\"socialist-had-necesity-against-capitalist-aggression\"\u003eSocialist had necesity against capitalist aggression\u003c/h3\u003e\n\u003cp\u003eThe Soviet approaches towards historical descriptions of the twentieth century showed that with the emergence of the new type of state – socialist one – it became a target for capitalist aggression.\u003c/p\u003e\n\u003ch3 id=\"socialist-revolution-requires-the-creation-of-socialist-society-against-the-world\"\u003eSocialist revolution requires the creation of socialist society against the world\u003c/h3\u003e\n\u003cp\u003eIt was first positive move towards realization of the Soviet foreign policy main idea: the world socialist revolution and creation of the socialist society in the whole world.\u003c/p\u003e\n\u003ch3 id=\"the-soviets-believe-that-the-us-wants-to-take-over-world\"\u003eThe Soviets believe that the US wants to take over world\u003c/h3\u003e\n\u003cp\u003eThe US had plans to dominate in the entire world.\u003c/p\u003e\n\u003ch3 id=\"that-the-us-was-intentionally-sturggling-with-socialism\"\u003eThat the US was intentionally sturggling with socialism\u003c/h3\u003e\n\u003cp\u003eAll US post-war foreign policy doctrines were aimed on the struggle with socialism\u003c/p\u003e\n\u003ch3 id=\"that-soviets-believed-that-us-was-exclusivly-fighting-socialism\"\u003ethat soviets believed that US was exclusivly fighting socialism\u003c/h3\u003e\n\u003cp\u003eWe can summarize – that on Soviet point of view all American presidents of Cold War period were creating their own doctrines, and all of them were anti-communist and anti-Soviet\u003c/p\u003e\n\u003ch3 id=\"soviets-believes-that-the-us-made-the-first-move\"\u003eSoviets believes that the US made the first move\u003c/h3\u003e\n\u003cp\u003eSoviet concept first vivid steps, which signalized about the start of the confrontation between East and West, were steps made by the West.\u003c/p\u003e\n\u003ch3 id=\"believes-its-a-fight-against-imperialism\"\u003ebelieves its a fight against imperialism\u003c/h3\u003e\n\u003cp\u003ebipolar confrontation had western roots and the Cold War was the policy of the US and other imperialistic countries against socialist countries.\u003c/p\u003e\n\u003ch3 id=\"communism-is-working-towards-revolution\"\u003ecommunism is working towards revolution\u003c/h3\u003e\n\u003cp\u003emankind is a process of revolutionary changes\u003c/p\u003e\n\u003ch3 id=\"the-soviet-union-believes-only-it-can-stop-american-aggression\"\u003ethe soviet union believes only it can stop American aggression\u003c/h3\u003e\n\u003cp\u003ethe Soviet Union was the only power in the world able to stop American ambitions of superpower.\u003c/p\u003e\n\u003ch3 id=\"ussr-believes-that-itself-was-the-only-defender\"\u003eUSSR believes that itself was the only defender\u003c/h3\u003e\n\u003cp\u003eThe Soviet Union considered itself as the only defender of the interests of the working class all over the world because it was the first socialist state in history.\u003c/p\u003e\n\u003ch3 id=\"definding-us-and-defending-imperialism\"\u003eDefinding US and defending imperialism\u003c/h3\u003e\n\u003cp\u003eThe Imperialistic was the system of capitalist countries: they had a lot of contradictions in their “camp” where each wanted to solve their problems and to defend their own interests by using the others.\u003c/p\u003e\n\u003ch3 id=\"black-and-white-view-of-the-world-prevailed-ussr\"\u003eBlack and white view of the world prevailed USSR\u003c/h3\u003e\n\u003cp\u003eThe entire world was separated into two main categories: friends and enemies. Such black and white world-view was a distinctive feature of Stalin’s way of seeing the world (outside as well as inside the USSR), but even after his death,\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsoviet_perspective_on_cold_war/","tags":null,"title":"Soviet Perspective on Cold War"},{"categories":null,"contents":"(Spaan and Vlassis 2005)\nRandomized PBVI\nOne-Liner PVBI, faster.\nNovelty \u0026ldquo;Is it necessary to maintain an alpha vector for each belief?\u0026rdquo;\nNotable Methods Don\u0026rsquo;t update all beliefs; backup only belief that didn\u0026rsquo;t yet confer an improvement\nKey Figs New Concepts Notes ","html":"\u003cp\u003e(\u003ca href=\"#citeproc_bib_item_1\"\u003eSpaan and Vlassis 2005\u003c/a\u003e)\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhpoint_based_value_iteration/#randomized\"\u003eRandomized PBVI\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003ePVBI, faster.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;Is it necessary to maintain an alpha vector for each belief?\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cp\u003eDon\u0026rsquo;t update all beliefs; backup only belief that didn\u0026rsquo;t yet confer an improvement\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhspaan_2005/","tags":null,"title":"Spaan 2005"},{"categories":null,"contents":"The span of a bunch of vectors is the set of all linear combinations of that bunch of vectors. We denote it as \\(span(v_1, \\dots v_{m)}\\).\nconstituents for constructing a linear combination a list of vectors \\(v_1,\\dots,v_{m}\\) and scalars \\(a_1, a_2, \\dots, a_{m} \\in \\mathbb{F}\\) requirements \\begin{equation} span(v_{1}..v_{m}) = \\{a_1v_1+\\dots +a_{m}v_{m}:a_1\\dots a_{m} \\in \\mathbb{F}\\} \\end{equation}\nadditional information span is the smallest subspace containing all vectors in the list Part 1: that a span of a list of vectors is a subspace containing those vectors\nBy taking all \\(a_{n}\\) as \\(0\\), we show that the additive identity exists.\nTaking two linear combinations and adding them (i.e. adding two members of the span) is still in the span by commutativity and distributivity (reorganize each constant \\(a_{1}\\) together)\u0026mdash;creating another linear combination and therefore a member of the span.\nScaling a linear combination, by distributivity, just scales the scalars and create yet another linear combination.\nPart 2: a subspace containing the list of vectors contain the span\nsubspaces are closed under scalar multiplication and addition. Therefore, we can just construct every linear combination.\nBy double-containment, a subspace is the smallest subspace containing all vectors. \\(\\blacksquare\\)\nspans If \\(span(v_1, \\dots v_{m})\\) equals \\(V\\), we say that \\(v_1, \\dots, v_{m}\\) spans \\(V\\).\nNOTE! the two things have to be equal\u0026mdash;if the span of a set of vectors is larger than \\(V\\), they do not span \\(V\\).\nlength of linearly-independent list \\(\\leq\\) length of spanning list see here.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e of a bunch of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es is the set of all \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003es of that bunch of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es. We denote it as \\(span(v_1, \\dots v_{m)}\\).\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003efor constructing a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003ea \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es \\(v_1,\\dots,v_{m}\\)\u003c/li\u003e\n\u003cli\u003eand scalars \\(a_1, a_2, \\dots, a_{m} \\in \\mathbb{F}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nspan(v_{1}..v_{m}) = \\{a_1v_1+\\dots +a_{m}v_{m}:a_1\\dots a_{m} \\in \\mathbb{F}\\}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"span-is-the-smallest-subspace-containing-all-vectors-in-the-list\"\u003espan is the smallest subspace containing all vectors in the list\u003c/h3\u003e\n\u003cp\u003ePart 1: that a \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e of a list of vectors is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e containing those \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es\u003c/p\u003e\n\u003cp\u003eBy taking all \\(a_{n}\\) as \\(0\\), we show that the additive identity exists.\u003c/p\u003e\n\u003cp\u003eTaking two \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003es and \u003ca href=\"/posts/kbhadding/\"\u003eadding\u003c/a\u003e them (i.e. adding two members of the \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e) is still in the \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e by \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e and \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e (reorganize each constant \\(a_{1}\\) together)\u0026mdash;creating another \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e and therefore a member of the \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eScaling a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e, by \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e, just scales the scalars and create yet another \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003ePart 2: a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e containing the list of vectors contain the span\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es are closed under \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e and \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e. Therefore, we can just construct every \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eBy double-containment, a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e is the smallest \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e containing all vectors. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"spans\"\u003espans\u003c/h3\u003e\n\u003cp\u003eIf \\(span(v_1, \\dots v_{m})\\) equals \\(V\\), we say that \\(v_1, \\dots, v_{m}\\) \u003ca href=\"#spans\"\u003espans\u003c/a\u003e \\(V\\).\u003c/p\u003e\n\u003cp\u003eNOTE! the two things have to be equal\u0026mdash;if the \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e of a set of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es is \u003cem\u003elarger\u003c/em\u003e than \\(V\\), they do \u003cstrong\u003enot\u003c/strong\u003e span \\(V\\).\u003c/p\u003e\n\u003ch3 id=\"length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003elength of linearly-independent list \\(\\leq\\) length of spanning list\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhlinear_independence/#length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003esee here\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhspan/","tags":null,"title":"span (linear algebra)"},{"categories":null,"contents":" Órale pues: confirmando No hay pedo: no hay problema Ponte la de puebla: dividirlo Qué padre: sopresa positiva De a grapa: gratis De poca madre: júbilo y aceptación Te vas a dar un ranazo: nos vamos a hacer daño (hurt) Mamá: ¡necesitamos limpiar sus cuartos! Me: órale pues, no hay pedo. Voy a limpiarlo mañana.\nMi plan está simple. Voy a dividir mi cuarto a media, y contrata mi amiga para ayudarme. ¡Ponte la de puebla!\nDos días después\u0026hellip;\nMamá: Oye, ¿su habitación? ¡De poca madre, qué padre! Qué limpia.\nMe: Sí, de a grapa con mi amigo, también.\n","html":"\u003col\u003e\n\u003cli\u003eÓrale pues: confirmando\u003c/li\u003e\n\u003cli\u003eNo hay pedo: no hay problema\u003c/li\u003e\n\u003cli\u003ePonte la de puebla: dividirlo\u003c/li\u003e\n\u003cli\u003eQué padre: sopresa positiva\u003c/li\u003e\n\u003cli\u003eDe a grapa: gratis\u003c/li\u003e\n\u003cli\u003eDe poca madre: júbilo y aceptación\u003c/li\u003e\n\u003cli\u003eTe vas a dar un ranazo: nos vamos a hacer daño (hurt)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eMamá: ¡necesitamos limpiar sus cuartos! Me: órale pues, no hay pedo. Voy a limpiarlo mañana.\u003c/p\u003e\n\u003cp\u003eMi plan está simple. Voy a dividir mi cuarto a media, y contrata mi amiga para ayudarme. ¡Ponte la de puebla!\u003c/p\u003e\n\u003cp\u003eDos días después\u0026hellip;\u003c/p\u003e\n\u003cp\u003eMamá: Oye, ¿su habitación? ¡De poca madre, qué padre! Qué limpia.\u003c/p\u003e\n\u003cp\u003eMe: Sí, de a grapa con mi amigo, también.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhspanish/","tags":null,"title":"spanish"},{"categories":null,"contents":"Spark is not a database. Importantly, its a \u0026ldquo;framework\u0026rdquo; of data:\nProgramming platform Distributed file system Prallel execution environment Software ecosystem It gives you the \u0026ldquo;parallel\u0026rdquo; search/sort needed to navigate a large database. It is based on the Hadoop ecosystem. Spark operates on RDDs to do lazy-evaluation.\nQuickstart When we start up Spark Shell, it will build you a sc variable which is appropriate for your supercomputer; if you are not, you need to set up the context yourself using the 3 lines noted below to make sc variable:\n# build context, IF NOT in a SHELL from myspark import SparkConf, SparkContext conf = SparkConf().setMaster(\u0026#34;local\u0026#34;).setaAppName(\u0026#34;Test_App\u0026#34;) sc = SparkContext(conf=conf) # do shit \u0026#34;transform\u0026#34; my_rdd = sc.textFile(\u0026#34;whatever\u0026#34;) # create an RDD from a datastore filtered_rdd = my_rdd.filter(lambda Line: \u0026#34;hubble\u0026#34; in line) # perform action in rdd # actually perform actions filtered_rdd.count() # 47 filtered_rdd.first() Then, you can submit this script. If you are in a shell its REPL so you don\u0026rsquo;t.\nspark-submit Test_App.py Main types of files Spark can handle:\nText (CSV, XML, JSON, etc.) SQL stuff Other NoSQL Stuff Parquet Hive Hadoopy things JDBC, Cassandra, Mongo, etc. etc. etc. Compressed files: tarballs, gzip Main types of filesystems:\nyours (local files) HDFS Lustre AWS S3 See also Common Spark Actions, Common Spark Transformations\nSpark RDD API The base level API of Spark is one which interacts with vectors or tuples via RDDs. It deals with manipulating unordered sets (i.e. not dataframes).\nCreate/Load RDD Transform RDD] (performing any operations in theory, lazily, without loading data) Apply some more transforms (filtering, etc.) Perform Actions (actually get data) Transformations move one RDD to another, and Actions load the data from RDD.\nSpark is smart to not do anything that\u0026rsquo;s not needed; removing entire stores which isn\u0026rsquo;t needed, caching, dynamically getting them back etc.\nSpark can read from whole databases, text files, etc. and move them into an RDD.\nTips and Tricks \u0026ldquo;kv\u0026rdquo; values are kinda like Pandas .groupby without groupyby. You can do most mapping operations by key instead, which means you can always do group.\nif you want a certain form, use the .map function to turn it into something else; for instance, if you want to turn something into a kv pair, you can thing.map(lambda x: x-key, x-value) if you need to work with KV data, you should think about swapping key and value if you so desire Spark Anti-Patterns You should never do heavy-duty compute in Python (i.e. if you end up with a for loop somewhere you are probably not using map reduce right) You should never take an action until you absolutely, seriously, need all the data Optimizations See Optimizing Spark\nSpark DataFrame API DataFrames are type-safe collections of tables built out of RDDs; they are collections of tuples, where columns have the same type\nrow_rdd = sc.parallelize([(\u0026#34;thing1\u0026#34;, \u0026#34;thing2\u0026#34;, 3), (\u0026#34;thing1\u0026#34;, \u0026#34;thing2\u0026#34;, 3)]) row_dataframe = spark.createDataFrame(row_rdd, [\u0026#34;col1\u0026#34;, \u0026#34;col2\u0026#34;, \u0026#34;col3\u0026#34;]) row_dataframe.show() You can load entire structured data via:\ndf = spark.read.json(\u0026#34;jsonfile.json\u0026#34;) And it will create the right schema on your behalf in the DataFrame.\nDataFrame Limitations Unlike Pandas, you can\u0026rsquo;t manipulate structured DataFrame well. Hence, you should like. Go back to RDDs if you are manipulating specific data into unstructured form. ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhspark/\"\u003eSpark\u003c/a\u003e is not a database. Importantly, its a \u0026ldquo;framework\u0026rdquo; of data:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eProgramming platform\u003c/li\u003e\n\u003cli\u003eDistributed file system\u003c/li\u003e\n\u003cli\u003ePrallel execution environment\u003c/li\u003e\n\u003cli\u003eSoftware ecosystem\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIt gives you the \u0026ldquo;parallel\u0026rdquo; search/sort needed to navigate a large database. It is based on the \u003ca href=\"\"\u003eHadoop\u003c/a\u003e ecosystem. Spark operates on \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003es to do lazy-evaluation.\u003c/p\u003e\n\u003ch2 id=\"quickstart\"\u003eQuickstart\u003c/h2\u003e\n\u003cp\u003eWhen we start up Spark Shell, it will build you a \u003ccode\u003esc\u003c/code\u003e variable which is appropriate for your supercomputer; if you are not, you need to set up the context yourself using the 3 lines noted below to make \u003ccode\u003esc\u003c/code\u003e variable:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# build context, IF NOT in a SHELL\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emyspark\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSparkConf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSparkContext\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSparkConf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esetMaster\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;local\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esetaAppName\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Test_App\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esc\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSparkContext\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# do shit \u0026#34;transform\u0026#34;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_rdd\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esc\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etextFile\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;whatever\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# create an RDD from a datastore\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efiltered_rdd\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emy_rdd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efilter\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elambda\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLine\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;hubble\u0026#34;\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eline\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# perform action in rdd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# actually perform actions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efiltered_rdd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecount\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# 47\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efiltered_rdd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efirst\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThen, you can submit this script. If you are in a shell its REPL so you don\u0026rsquo;t.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-bash\" data-lang=\"bash\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003espark-submit Test_App.py\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eMain types of files Spark can handle:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eText (CSV, XML, JSON, etc.)\u003c/li\u003e\n\u003cli\u003eSQL stuff\u003c/li\u003e\n\u003cli\u003eOther NoSQL Stuff\n\u003cul\u003e\n\u003cli\u003eParquet\u003c/li\u003e\n\u003cli\u003eHive\u003c/li\u003e\n\u003cli\u003eHadoopy things\u003c/li\u003e\n\u003cli\u003eJDBC, Cassandra, Mongo, etc. etc. etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eCompressed files: tarballs, gzip\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eMain types of filesystems:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eyours (local files)\u003c/li\u003e\n\u003cli\u003eHDFS\u003c/li\u003e\n\u003cli\u003eLustre\u003c/li\u003e\n\u003cli\u003eAWS S3\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhcommon_spark_actions/\"\u003eCommon Spark Actions\u003c/a\u003e, \u003ca href=\"/posts/kbhcommon_spark_transformations/\"\u003eCommon Spark Transformations\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"spark-rdd-api\"\u003eSpark RDD API\u003c/h2\u003e\n\u003cp\u003eThe base level API of \u003ca href=\"/posts/kbhspark/\"\u003eSpark\u003c/a\u003e is one which interacts with \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es or tuples via \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003es. It deals with manipulating unordered sets (i.e. \u003cstrong\u003enot\u003c/strong\u003e dataframes).\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eCreate/Load \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eTransform \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003e] (performing any operations \u003cem\u003ein theory, lazily, without loading data\u003c/em\u003e)\u003c/li\u003e\n\u003cli\u003eApply some more transforms (filtering, etc.)\u003c/li\u003e\n\u003cli\u003ePerform Actions (actually get data)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eTransformations\u003c/strong\u003e\u003c/strong\u003e move one \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003e to another, and \u003cstrong\u003e\u003cstrong\u003eActions\u003c/strong\u003e\u003c/strong\u003e load the data from \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSpark is smart to not do anything that\u0026rsquo;s not needed; removing entire stores which isn\u0026rsquo;t needed, caching, dynamically getting them back etc.\u003c/p\u003e\n\u003cp\u003eSpark can read from whole databases, text files, etc. and move them into an RDD.\u003c/p\u003e\n\u003ch3 id=\"tips-and-tricks\"\u003eTips and Tricks\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;kv\u0026rdquo; values are kinda like Pandas \u003ccode\u003e.groupby\u003c/code\u003e without groupyby. You can do most mapping operations by key instead, which means you can always do group.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eif you want a certain form, use the \u003ccode\u003e.map\u003c/code\u003e function to turn it into something else; for instance, if you want to turn something into a kv pair, you can \u003ccode\u003ething.map(lambda x: x-key, x-value)\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eif you need to work with KV data, you should think about swapping key and value if you so desire\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"spark-anti-patterns\"\u003eSpark Anti-Patterns\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eYou should never do heavy-duty compute in Python (i.e. if you end up with a for loop somewhere you are probably not using map reduce right)\u003c/li\u003e\n\u003cli\u003eYou should never take an \u003cstrong\u003eaction\u003c/strong\u003e until you absolutely, seriously, need all the data\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"optimizations\"\u003eOptimizations\u003c/h4\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhoptimizing_spark/\"\u003eOptimizing Spark\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"spark-dataframe-api\"\u003eSpark DataFrame API\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#spark-dataframe-api\"\u003eDataFrame\u003c/a\u003es are type-safe collections of tables built out of RDDs; they are collections of tuples, where columns have the same type\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erow_rdd\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esc\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eparallelize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;thing1\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;thing2\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;thing1\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;thing2\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erow_dataframe\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003espark\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecreateDataFrame\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erow_rdd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;col1\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;col2\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;col3\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erow_dataframe\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshow\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou can load entire structured data via:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003espark\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ejson\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;jsonfile.json\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd it will create the right schema on your behalf in the DataFrame.\u003c/p\u003e\n\u003ch3 id=\"dataframe-limitations\"\u003eDataFrame Limitations\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eUnlike Pandas, you can\u0026rsquo;t manipulate structured DataFrame well.\u003c/li\u003e\n\u003cli\u003eHence, you should like. Go back to RDDs if you are manipulating specific data into unstructured form.\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhspark/","tags":null,"title":"Spark"},{"categories":null,"contents":"Same core algorithm as Forward Search, but instead of calculating a utility based on the action-value over all possible next states, you make \\(m\\) different samples of next state, action, and reward, and average them\n","html":"\u003cp\u003eSame core algorithm as \u003ca href=\"/posts/kbhforward_search/\"\u003eForward Search\u003c/a\u003e, but instead of calculating a \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e based on the \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e over all possible next states, you make \\(m\\) different \u003cstrong\u003esamples\u003c/strong\u003e of next state, action, and reward, and average them\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsparse_sampling/","tags":null,"title":"Sparse Sampling"},{"categories":null,"contents":" take audio\ncalculate Mel Scale representation\napply a series of Filter Banks which attenuates the input to highlight groups of frequencies\nwe then run a discrete-cosine transform to obtain MFCCs, because much of the output results will still correlate with each other\n","html":"\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003etake audio\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecalculate \u003ca href=\"/posts/kbhmel_scale/\"\u003eMel Scale\u003c/a\u003e representation\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eapply a series of \u003ca href=\"/posts/kbhfilter_bank/\"\u003eFilter Bank\u003c/a\u003es which attenuates the input to highlight groups of frequencies\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-20_14-50-55_screenshot.png\"\u003e\n \u003c/figure\u003e\n\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ewe then run a discrete-cosine transform to obtain \u003ca href=\"/posts/kbhspeech_feature_extraction/\"\u003eMFCC\u003c/a\u003es, because much of the output results will still correlate with each other\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhspeech_feature_extraction/","tags":null,"title":"Speech Feature Extraction"},{"categories":null,"contents":"Group Meetings ","html":"\u003ch2 id=\"group-meetings\"\u003eGroup Meetings\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhspeech_processing_index/","tags":null,"title":"Speech Processing Index"},{"categories":null,"contents":"A spinal tap is a medical procedure whereby cerebralspinal fluid is collected by puncturing the lumbar; used to diagnose problems where biomakers from the brain are needed.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhspinal_tap/\"\u003espinal tap\u003c/a\u003e is a medical procedure whereby cerebralspinal fluid is collected by puncturing the lumbar; used to diagnose problems where biomakers from the brain are needed.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhspinal_tap/","tags":null,"title":"spinal tap"},{"categories":null,"contents":"A stationary point of an ODE is considered \u0026ldquo;stable\u0026rdquo; if, at the stationary point \\(y=c\\), the function with initial condition.\nIf you start near a stationary point, the function will either diverge \\(t\\to \\infty\\) to that stationary point, or converge to a stationary point. Whether the functions done that makes it \u0026ldquo;stable\u0026rdquo;/\u0026ldquo;unstable\u0026rdquo;.\nFor an autonomous ODEs \\(y\u0026rsquo;(t) = f(y(t))\\), suppose \\(y(t) = c\\) is a stationary solutiona:\n\\(c\\) is stable (i.e. \\(t\\to \\infty, y \\to c\\) for \\(y_0 \\approx c\\)) if the graph of \\(f\\) near \\(c\\) crosses from positive to negative; that is, when \\(f\u0026rsquo;( c) \u0026lt; 0\\) \\(c\\) is unstable (i.e. \\(t\\to -\\infty, y \\to c\\) for \\(y_0 \\approx c\\)) if the graph of \\(f\\) near \\(c\\) crosses from negative to positive; that is, when \\(f\u0026rsquo;(t) \u0026gt; 0\\) \\(c\\) is semi-stable (i.e. stable on one side, unstable on the other) if the graph of \\(f\\) near \\(c\\) has the same sign on both sides; meaning \\(f\u0026rsquo;( c) = 0\\) and \\(f\u0026rsquo;\u0026rsquo;( c)\\neq 0\\) if \\(f\u0026rsquo;( c) = 0\\) and \\(f\u0026rsquo;\u0026rsquo;( c) \\neq 0\\), we are sad and should investigate more away from zeros, the concavity of \\(y(t)\\) could be checked for \\(f f\u0026rsquo;\\). when its positive, \\(y(t)\\) is concave up; when its negative \\(y(t)\\) is concave down.\n","html":"\u003cp\u003eA stationary point of an \u003ca href=\"/posts/kbhordinary_differential_equations/\"\u003eODE\u003c/a\u003e is considered \u0026ldquo;stable\u0026rdquo; if, at the stationary point \\(y=c\\), the function with initial condition.\u003c/p\u003e\n\u003cp\u003eIf you start near a stationary point, the function will either diverge \\(t\\to \\infty\\) to that stationary point, or converge to a stationary point. Whether the functions done that makes it \u0026ldquo;stable\u0026rdquo;/\u0026ldquo;unstable\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eFor an \u003ca href=\"/posts/kbhautonomous_odes/\"\u003eautonomous ODEs\u003c/a\u003e \\(y\u0026rsquo;(t) = f(y(t))\\), suppose \\(y(t) = c\\) is a stationary solutiona:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(c\\) is stable (i.e. \\(t\\to \\infty, y \\to c\\) for \\(y_0 \\approx c\\)) if the graph of \\(f\\) near \\(c\\) crosses from positive to negative; that is, when \\(f\u0026rsquo;( c) \u0026lt; 0\\)\u003c/li\u003e\n\u003cli\u003e\\(c\\) is unstable (i.e. \\(t\\to -\\infty, y \\to c\\) for \\(y_0 \\approx c\\)) if the graph of \\(f\\) near \\(c\\) crosses from negative to positive; that is, when \\(f\u0026rsquo;(t) \u0026gt; 0\\)\u003c/li\u003e\n\u003cli\u003e\\(c\\) is semi-stable (i.e. stable on one side, unstable on the other) if the graph of \\(f\\) near \\(c\\) has the same sign on both sides; meaning \\(f\u0026rsquo;( c) = 0\\) and \\(f\u0026rsquo;\u0026rsquo;( c)\\neq 0\\)\u003c/li\u003e\n\u003cli\u003eif \\(f\u0026rsquo;( c) = 0\\) and \\(f\u0026rsquo;\u0026rsquo;( c) \\neq 0\\), we are sad and should investigate more\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eaway from zeros, the concavity of \\(y(t)\\) could be checked for \\(f f\u0026rsquo;\\). when its positive, \\(y(t)\\) is concave up; when its negative \\(y(t)\\) is concave down.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstability/","tags":null,"title":"stability (ODEs)"},{"categories":null,"contents":"stack is where all local variables and parameters live for a function. The stack frame goes away when the function returns.\nstack grows downwards in memory; each function call sets aside some space in stack regardless if local variables are used.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhstack/\"\u003estack\u003c/a\u003e is where all local variables and parameters live for a \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e. The stack frame goes away when the function returns.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhstack/\"\u003estack\u003c/a\u003e grows downwards in memory; each function call sets aside some space in stack regardless if local variables are used.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstack/","tags":null,"title":"stack"},{"categories":null,"contents":"A stack trace is the output of failing code by the runtime to indicate the location of the fault. For instance, in Python:\n--------------------------------------------------------------------------- TypeError Traceback (most recent call last) \u0026lt;ipython-input-1-0b766d7d4bc7\u0026gt; in \u0026lt;module\u0026gt; ----\u0026gt; 1 0+\u0026#34;\u0026#34; TypeError: unsupported operand type(s) for +: \u0026#39;int\u0026#39; and \u0026#39;str\u0026#39; ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhstack_trace/\"\u003estack trace\u003c/a\u003e is the output of failing code by the runtime to indicate the location of the fault. For instance, in Python:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003e---------------------------------------------------------------------------\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eTypeError\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eTraceback\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emost\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erecent\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecall\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elast\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eipython\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einput\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb766d7d4bc7\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emodule\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003e----\u0026gt;\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u0026#34;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eTypeError\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eunsupported\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eoperand\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etype\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#39;int\u0026#39;\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eand\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#39;str\u0026#39;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhstack_trace/","tags":null,"title":"stack trace"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhstandard_error/","tags":null,"title":"standard error"},{"categories":null,"contents":"Stanford is an university.\nStuff https://knight-hennessy.stanford.edu/ Stanford UG Courses Index ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhstanford/\"\u003eStanford\u003c/a\u003e is an university.\u003c/p\u003e\n\u003ch2 id=\"stuff\"\u003eStuff\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"https://knight-hennessy.stanford.edu/\"\u003ehttps://knight-hennessy.stanford.edu/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstanford_courses_index/\"\u003eStanford UG Courses Index\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstanford/","tags":null,"title":"Stanford"},{"categories":null,"contents":"Locales of Interest 5th flr Green Library: Albert Bender Room 380-382T, 2nd floor: UG Math Student Lounge Classes of Interest Classes that Were Good Resources tutoring.stanford.edu hume center skills coaching: academicskills.stanford.edu Tips ask your UAD about research opportunities and networking with faculty connections show up for office hours ","html":"\u003ch2 id=\"locales-of-interest\"\u003eLocales of Interest\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e5th flr Green Library: Albert Bender Room\u003c/li\u003e\n\u003cli\u003e380-382T, 2nd floor: UG Math Student Lounge\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"classes-of-interest\"\u003eClasses of Interest\u003c/h2\u003e\n\u003ch2 id=\"classes-that-were-good\"\u003eClasses that Were Good\u003c/h2\u003e\n\u003ch2 id=\"resources\"\u003eResources\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-09-20_13-14-27_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003etutoring.stanford.edu\u003c/li\u003e\n\u003cli\u003ehume center\u003c/li\u003e\n\u003cli\u003eskills coaching: academicskills.stanford.edu\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"tips\"\u003eTips\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eask your UAD about research opportunities and networking with faculty connections\u003c/li\u003e\n\u003cli\u003eshow up for office hours\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstanford_factoids_index/","tags":null,"title":"Stanford Factoids Index"},{"categories":null,"contents":"Stanford UG Y1, Aut Decision Making Index Computer Systems Index CS Probability Index Speech Processing Index Stanford UG Y1, Win ODEs Index OS Index POMDPs Index Language Information Index UG Other Duties Here are a list of random indicies which may end up being helpful!\nCLRS Index AI Master Class Software Engineering Stanford UG Talks Date Topic Presenter Link \u0026lt;2023-09-20 Wed\u0026gt; UG Research Program Brian Thomas Stanford UG Research Program \u0026lt;2023-09-28 Thu\u0026gt; Bld an Ecosystem, Not Monolith Colin Raffel Build a System \u0026lt;2023-10-05 Thu\u0026gt; Training Helpful CHatbots Nazeen Rajani Training Helpful Chatbots \u0026lt;2023-10-26 Thu\u0026gt; AI Intepretability for Bio Gasper Begus AI Intepretability \u0026lt;2023-11-02 Thu\u0026gt; PT Transformers on Long Seqs Mike Lewis Pretraining Long Transformers \u0026lt;2023-11-07 Tue\u0026gt; Transformers! A. Vaswani Transformers \u0026lt;2023-11-09 Thu\u0026gt; Towards Interactive Agents Jessy Lin Interactive Agent \u0026lt;2023-11-16 Thu\u0026gt; Dissociating Language and Thought Anna Ivanova Dissociating Language and Thought \u0026lt;2024-01-11 Thu\u0026gt; Language Agents Karthik Narasimhan Language Agents with Karthik \u0026lt;2024-02-01 Thu\u0026gt; Pretraining Data \u0026lt;2024-02-08 Thu\u0026gt; value alignment Been Kim LM Alignment \u0026lt;2024-02-15 Thu\u0026gt; model editing Peter Hase Knowledge Editing Contacts Talk Contacts\n","html":"\u003ch2 id=\"stanford-ug-y1-aut\"\u003eStanford UG Y1, Aut\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdecision_making_index/\"\u003eDecision Making Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcomputer_systems_index/\"\u003eComputer Systems Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcs_probability_index/\"\u003eCS Probability Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhspeech_processing_index/\"\u003eSpeech Processing Index\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"stanford-ug-y1-win\"\u003eStanford UG Y1, Win\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhodes_index/\"\u003eODEs Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhos_index/\"\u003eOS Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpomdps_index/\"\u003ePOMDPs Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlanguage_information_index/\"\u003eLanguage Information Index\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"ug-other-duties\"\u003eUG Other Duties\u003c/h2\u003e\n\u003cp\u003eHere are a list of random indicies which may end up being helpful!\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhclrs_index/\"\u003eCLRS Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhai_master_class/\"\u003eAI Master Class\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsoftware_engineering/\"\u003eSoftware Engineering\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"stanford-ug-talks\"\u003eStanford UG Talks\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003cth\u003ePresenter\u003c/th\u003e\n\u003cth\u003eLink\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-09-20 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003eUG Research Program\u003c/td\u003e\n\u003ctd\u003eBrian Thomas\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhstanford_ug_research_program/\"\u003eStanford UG Research Program\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-09-28 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003eBld an Ecosystem, Not Monolith\u003c/td\u003e\n\u003ctd\u003eColin Raffel\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhbuild_a_system_not_a_monolyth/\"\u003eBuild a System\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-10-05 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003eTraining Helpful CHatbots\u003c/td\u003e\n\u003ctd\u003eNazeen Rajani\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhtraining_helpful_chatbots/\"\u003eTraining Helpful Chatbots\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-10-26 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003eAI Intepretability for Bio\u003c/td\u003e\n\u003ctd\u003eGasper Begus\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhai_intepretability/\"\u003eAI Intepretability\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-11-02 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003ePT Transformers on Long Seqs\u003c/td\u003e\n\u003ctd\u003eMike Lewis\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpretraining_long_transformers/\"\u003ePretraining Long Transformers\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-11-07 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003eTransformers!\u003c/td\u003e\n\u003ctd\u003eA. Vaswani\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhtransformers/\"\u003eTransformers\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-11-09 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003eTowards Interactive Agents\u003c/td\u003e\n\u003ctd\u003eJessy Lin\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhinteractive_agent/\"\u003eInteractive Agent\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-11-16 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003eDissociating Language and Thought\u003c/td\u003e\n\u003ctd\u003eAnna Ivanova\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhdissociating_language_and_thought/\"\u003eDissociating Language and Thought\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-01-11 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003eLanguage Agents\u003c/td\u003e\n\u003ctd\u003eKarthik Narasimhan\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhlanguage_agents/\"\u003eLanguage Agents with Karthik\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-02-01 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpretraining_data/\"\u003ePretraining Data\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-02-08 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003evalue alignment\u003c/td\u003e\n\u003ctd\u003eBeen Kim\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhlm_alignment/\"\u003eLM Alignment\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-02-15 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003emodel editing\u003c/td\u003e\n\u003ctd\u003ePeter Hase\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhknowledge_editing/\"\u003eKnowledge Editing\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"contacts\"\u003eContacts\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhtalk_contacts/\"\u003eTalk Contacts\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstanford_courses_index/","tags":["index"],"title":"Stanford UG Courses Index"},{"categories":null,"contents":"Brian Thomas, the research guy. Don\u0026rsquo;t start research at Autumn Frosh Freshmen Year.\nGetting Started Think about the Institutes (many of them do not have an UG major) Stanford HAI Stanford HCI? Find faculty Don\u0026rsquo;t just ask for a job Research the person\u0026rsquo;s publications and ask some questions about it: TRY TO ASK FOR A OFFICE HOUR MEETING WITH QUESTIONS: \u0026ldquo;I read your thing, and I would love to talk more about it\u0026rdquo; (there is coaching from Brian Thomas\u0026rsquo;s office, and coaching from UAD) OR, use a program, but still talk HB-ref? CURIS Stanford\u0026rsquo;s grant programs can pay for research needs. There will be people talking about grants later. Don\u0026rsquo;t worry about them. Get to the point where you need money and then figure it out.\nThe UADs The UADs are PhD+ or Profs They review UG research grants They know the program that are available In general: talk to UADs (when Kristin is not sick).\nDeliverables undergradresearch.stanford.edu Reach out to professors Look into UAD workshops + talk to UADs Find the edges of your textbook/courses (identify \u0026ldquo;where the trail seems to end\u0026rdquo;) SURPS: symposium of UG research and public service. Thursday 10/19, 4P. Burnham Pavilion Large groups have \u0026ldquo;Student Services Officers\u0026rdquo;, reach out ","html":"\u003cp\u003eBrian Thomas, the \u003ca href=\"/posts/kbhresearch/\"\u003eresearch\u003c/a\u003e guy. Don\u0026rsquo;t start research at Autumn Frosh Freshmen Year.\u003c/p\u003e\n\u003ch2 id=\"getting-started\"\u003eGetting Started\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eThink about the Institutes (many of them do not have an UG major)\n\u003cul\u003e\n\u003cli\u003eStanford HAI\u003c/li\u003e\n\u003cli\u003eStanford HCI?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eFind faculty\n\u003cul\u003e\n\u003cli\u003eDon\u0026rsquo;t just ask for a job\u003c/li\u003e\n\u003cli\u003eResearch the person\u0026rsquo;s publications and ask some questions about it: \u003cstrong\u003e\u003cstrong\u003eTRY TO ASK FOR A OFFICE HOUR MEETING WITH QUESTIONS\u003c/strong\u003e\u003c/strong\u003e: \u0026ldquo;I read your thing, and I would love to talk more about it\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e(there is coaching from \u003ca href=\"/posts/kbhstanford_ug_research_program/\"\u003eBrian Thomas\u003c/a\u003e\u0026rsquo;s office, and coaching from UAD)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eOR, use a program, but still talk\n\u003cul\u003e\n\u003cli\u003eHB-ref?\u003c/li\u003e\n\u003cli\u003eCURIS\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eStanford\u0026rsquo;s grant programs can pay for research needs. There will be people talking about grants later. Don\u0026rsquo;t worry about them. Get to the point where you need money and then figure it out.\u003c/p\u003e\n\u003ch2 id=\"the-uads\"\u003eThe UADs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eThe UADs are PhD+ or Profs\u003c/li\u003e\n\u003cli\u003eThey review UG research grants\u003c/li\u003e\n\u003cli\u003eThey know the program that are available\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIn general: talk to UADs (when Kristin is not sick).\u003c/p\u003e\n\u003ch2 id=\"deliverables\"\u003eDeliverables\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eundergradresearch.stanford.edu\u003c/li\u003e\n\u003cli\u003eReach out to professors\u003c/li\u003e\n\u003cli\u003eLook into UAD workshops + talk to UADs\u003c/li\u003e\n\u003cli\u003eFind the edges of your textbook/courses (identify \u0026ldquo;where the trail seems to end\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003eSURPS: symposium of UG research and public service. Thursday 10/19, 4P. Burnham Pavilion\u003c/li\u003e\n\u003cli\u003eLarge groups have \u0026ldquo;Student Services Officers\u0026rdquo;, reach out\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstanford_ug_research_program/","tags":null,"title":"Stanford UG Research Program"},{"categories":null,"contents":"Everyone and their dog has a blog at this point. Why not me? You see, I don\u0026rsquo;t really like the idea of blogging, but I do enjoy taking notes. I take a crap tonnes of notes, and sometimes people want to see a copy of them.\nIn order to facilitate this, some friends and I created taproot, a collective note-taking effort which also automatically compiled pretty cool previews and an internet site. I still am one of the primary maintainers of taproot.\nWhile working on the project, however, we noticed that the loop-based architecture (instead of being based on events/triggers), lack of duplicity, and requirement of a central build server made it difficult.\nIn this vein, quantumish (also with his own lovely set of notes, tap on the link!) and I were discussing if the essentials of taproot can be built into a static site generator. Hence, this is an experiment (to hopefully be merged with the taproot group) to facilitate this.\n","html":"\u003cp\u003eEveryone and their dog has a blog at this point. Why not me? You see, I don\u0026rsquo;t really like the idea of blogging, but I \u003cem\u003edo\u003c/em\u003e enjoy taking notes. I take a crap tonnes of notes, and sometimes people want to see a copy of them.\u003c/p\u003e\n\u003cp\u003eIn order to facilitate this, some friends and I created \u003ca href=\"https://taproot3.sanity.gq/\"\u003etaproot\u003c/a\u003e, a collective note-taking effort which also automatically compiled pretty cool previews and an internet site. I still am one of the primary maintainers of taproot.\u003c/p\u003e\n\u003cp\u003eWhile working on the project, however, we noticed that the loop-based architecture (instead of being based on events/triggers), lack of duplicity, and requirement of a central build server made it difficult.\u003c/p\u003e\n\u003cp\u003eIn this vein, \u003ca href=\"https://quantumish.github.io/\"\u003equantumish\u003c/a\u003e (also with his own lovely set of notes, tap on the link!) and I were discussing if the essentials of taproot can be built into a static site generator. Hence, this is an experiment (to hopefully be merged with the taproot group) to facilitate this.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstarting_with_why_the_knowledgebase/","tags":null,"title":"Starting With Why: The Knowledgebase"},{"categories":null,"contents":"Smol companies\nTips A well-run startup should have 18 month of cash planned, and have a runway of 6 months to ensure you can always get acq-hired and \u0026ldquo;bail out\u0026rdquo; Myths of Startups \u0026ldquo;Joining Big Tech\u0026rdquo; vs. \u0026ldquo;Starting a Startup\u0026rdquo; are not binary options In between these poles: joining an existing startup Myth: \u0026ldquo;90% of startups of fail\u0026rdquo; True statement: 90% of SMALL BUSINESSES fail. Venture backed tech startups are very different world: only 1% of small businesses are venture backed.\nRoughly: 1/3 of VC backed startups \u0026ldquo;fail\u0026rdquo;\n1/3 fail 1/2 return the money (nothing happens) 1/6 exits + drive returns Myth (kinda): \u0026ldquo;you are under paid\u0026rdquo; If you JOIN a startup, the small amount of compensation corresponds to betting on yourself in a similar way.\nIf you are negotiating your compensation, you should try to get MORE EQUITY and less cash.\nAnatomy of a Startup \u0026ldquo;Fail\u0026rdquo; Most startup failures looks like an acqui-hire. Acq-hiring results in investors\nInvestors loose money Employees get a minor payoff ","html":"\u003cp\u003eSmol companies\u003c/p\u003e\n\u003ch2 id=\"tips\"\u003eTips\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA well-run startup should have \u003cstrong\u003e18 month of cash planned\u003c/strong\u003e, and have a \u003cstrong\u003erunway of 6 months\u003c/strong\u003e to ensure you can always get acq-hired and \u0026ldquo;bail out\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"myths-of-startups\"\u003eMyths of Startups\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Joining Big Tech\u0026rdquo; vs. \u0026ldquo;Starting a Startup\u0026rdquo; are \u003cstrong\u003enot\u003c/strong\u003e binary options\u003c/li\u003e\n\u003cli\u003eIn between these poles: joining an existing startup\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"myth-90-of-startups-of-fail\"\u003eMyth: \u0026ldquo;90% of startups of fail\u0026rdquo;\u003c/h3\u003e\n\u003cp\u003eTrue statement: 90% of \u003cstrong\u003eSMALL BUSINESSES\u003c/strong\u003e fail. Venture backed tech startups are \u003cstrong\u003every different world\u003c/strong\u003e: only 1% of small businesses are venture backed.\u003c/p\u003e\n\u003cp\u003eRoughly: 1/3 of VC backed startups \u0026ldquo;fail\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e1/3 fail\u003c/li\u003e\n\u003cli\u003e1/2 return the money (nothing happens)\u003c/li\u003e\n\u003cli\u003e1/6 exits + drive returns\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"myth--kinda--you-are-under-paid\"\u003eMyth (kinda): \u0026ldquo;you are under paid\u0026rdquo;\u003c/h3\u003e\n\u003cp\u003eIf you JOIN a startup, the small amount of compensation corresponds to betting on yourself in a similar way.\u003c/p\u003e\n\u003cp\u003eIf you are negotiating your compensation, you should try to get MORE EQUITY and less cash.\u003c/p\u003e\n\u003ch2 id=\"anatomy-of-a-startup-fail\"\u003eAnatomy of a Startup \u0026ldquo;Fail\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003eMost startup failures looks like an acqui-hire. Acq-hiring results in investors\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eInvestors loose money\u003c/li\u003e\n\u003cli\u003eEmployees get a minor payoff\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstartup/","tags":null,"title":"Startup"},{"categories":null,"contents":"The stationary-action principle states that, in a dynamic system, the equations of motion of that system is yielded as the \u0026ldquo;stationary points\u0026rdquo; of the system\u0026rsquo;s action. i.e. the points of \u0026ldquo;least\u0026rdquo; action. (i.e. a ball sliding down a ramp is nice, but you don\u0026rsquo;t expect it\u0026mdash;in that system\u0026mdash;to fly off the ramp, do a turn, and then fly down.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhstationary_action_principle/\"\u003estationary-action principle\u003c/a\u003e states that, in a dynamic system, the equations of motion of that system is yielded as the \u0026ldquo;stationary points\u0026rdquo; of the system\u0026rsquo;s action. i.e. the points of \u0026ldquo;least\u0026rdquo; action. (i.e. a ball sliding down a ramp is nice, but you don\u0026rsquo;t expect it\u0026mdash;in that system\u0026mdash;to fly off the ramp, do a turn, and then fly down.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstationary_action_principle/","tags":null,"title":"stationary-action principle"},{"categories":null,"contents":"A statistic is a measure of something\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhstastistic/\"\u003estatistic\u003c/a\u003e is a measure of something\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstastistic/","tags":null,"title":"statistic"},{"categories":null,"contents":"To put some math behind that very, extremely simple Dyson\u0026rsquo;s Model, we will declare a vector space \\(K\\) which encodes the possible set of states that our \u0026ldquo;cell\u0026rdquo; can be in. Now, declare a transition matrix \\(M \\in \\mathcal{L}(K)\\) which maps from one state to another.\nFinally, then, we can define a function \\(P(k)\\) for the \\(k\\) th state of our cell.\nThat is, then:\n\\begin{equation} P(k+1) = M P(k) \\end{equation}\n(as the \u0026ldquo;next\u0026rdquo; state is simply \\(M\\) applied onto the previous state).\nRolling that out, we have:\n\\begin{equation} P(k) = M^{k} P(0) \\end{equation}\n","html":"\u003cp\u003eTo put some math behind that \u003cem\u003every, extremely\u003c/em\u003e simple \u003ca href=\"/posts/kbhdyson_s_model_of_life/\"\u003eDyson\u0026rsquo;s Model\u003c/a\u003e, we will declare a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e \\(K\\) which encodes the possible set of states that our \u0026ldquo;\u003ca href=\"/posts/kbhcell/\"\u003ecell\u003c/a\u003e\u0026rdquo; can be in. Now, declare a transition \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e \\(M \\in \\mathcal{L}(K)\\) which maps from one state to another.\u003c/p\u003e\n\u003cp\u003eFinally, then, we can define a function \\(P(k)\\) for the \\(k\\) th state of our \u003ca href=\"/posts/kbhcell/\"\u003ecell\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThat is, then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(k+1) = M P(k)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(as the \u0026ldquo;next\u0026rdquo; state is simply \\(M\\) applied onto the previous state).\u003c/p\u003e\n\u003cp\u003eRolling that out, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(k) = M^{k} P(0)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstepwise_evolution/","tags":null,"title":"Stepwise Evolution"},{"categories":null,"contents":"This is a theory that come back to CAPM.\n","html":"\u003cp\u003eThis is a theory that come back to \u003ca href=\"/posts/kbhcapm/\"\u003eCAPM\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstochastic_discount_factor/","tags":null,"title":"Stochastic Discount Factor"},{"categories":null,"contents":"\\begin{equation} \\theta^{t+1} = \\theta^{t} - \\eta \\nabla_{\\theta} L(f_{\\theta}(x), y) \\end{equation}\nthis terminates when theta differences becomes small, or when progress halts: like when \\(\\theta\\) begins going up instead.\nwe update the weights in SGD by taking a single random sample and moving weights to that direction.\nbatch gradient descent stochastic gradient descent gives choppy movements because it does one sample at once.\nbatch gradient descent does it over the entire dataset, which is fine but its slow.\nmini-batch gradient mini-batches helps take advantage of both by training over groups of \\(m\\) samples\nregularization regularization penalize large weights to reduce over-fitting\n","html":"\u003cp\u003e\\begin{equation}\n\\theta^{t+1} = \\theta^{t} - \\eta \\nabla_{\\theta} L(f_{\\theta}(x), y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis terminates when theta differences becomes small, or when progress halts: like when \\(\\theta\\) begins going up instead.\u003c/p\u003e\n\u003cp\u003ewe update the weights in SGD by taking a \u003cstrong\u003esingle random sample\u003c/strong\u003e and moving weights to that direction.\u003c/p\u003e\n\u003ch2 id=\"batch-gradient-descent\"\u003ebatch gradient descent\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhstochastic_gradient_descent/\"\u003estochastic gradient descent\u003c/a\u003e gives choppy movements because it does one sample at once.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#batch-gradient-descent\"\u003ebatch gradient descent\u003c/a\u003e does it over the entire dataset, which is fine but its slow.\u003c/p\u003e\n\u003ch2 id=\"mini-batch-gradient\"\u003emini-batch gradient\u003c/h2\u003e\n\u003cp\u003emini-batches helps take advantage of both by training over groups of \\(m\\) samples\u003c/p\u003e\n\u003ch2 id=\"regularization\"\u003eregularization\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#regularization\"\u003eregularization\u003c/a\u003e penalize large weights to reduce over-fitting\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstochastic_gradient_descent/","tags":null,"title":"stochastic gradient descent"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhstochat/","tags":null,"title":"stochat"},{"categories":null,"contents":"the stock indicies\n","html":"\u003cp\u003ethe \u003ca href=\"/posts/kbhstock_indicies/\"\u003estock indicies\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstock_indicies/","tags":null,"title":"stock indicies"},{"categories":null,"contents":"Stock Issues are policy debate doctrines which divides the debate into 5 subtopical ideas.\nWikipedia\nHarms: what are the problems in the status quo?\nInherency: what are these problems not already being solved? (Or not already being solved in the best way?)\nSignificancy: comparing the advantages and disadvantages of the status quo and your proposed solution, why is the proposed solution more worthy than the status quo?\nThe Ws:\nWhy this? Why is your proposed solution the best (most effective, or most feasible, or fastest, etc.) one?\nWhy now? Why is now the best time to build this solution?\nWhy you? Why are you (and your team) the best builders of this solution?\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhstock_issues_debate/\"\u003eStock Issues\u003c/a\u003e are policy debate doctrines which divides the debate into 5 subtopical ideas.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://en.wikipedia.org/wiki/Stock_issues\"\u003eWikipedia\u003c/a\u003e\u003c/p\u003e\n\u003chr\u003e\n\u003col\u003e\n\u003cli\u003e\n\u003cp\u003eHarms: what are the problems in the status quo?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eInherency: what are these problems not already being solved? (Or not already being solved in the best way?)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSignificancy: comparing the advantages and disadvantages of the status quo and your proposed solution, why is the proposed solution more worthy than the status quo?\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThe Ws:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\n\u003cp\u003eWhy this? Why is your proposed solution the best (most effective, or most feasible, or fastest, etc.) one?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWhy now? Why is now the best time to build this solution?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWhy you? Why are you (and your team) the best builders of this solution?\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstock_issues_debate/","tags":null,"title":"Stock Issues (Debate)"},{"categories":null,"contents":" Around 20,000 stocks valued at $47 Trillion Only about 2,000 matter Transaction frequency is high, liquidity is generally low \u0026mdash; grade sizes are small Roughly 59 places to trade stock (exchanges + darkpools) ","html":"\u003cul\u003e\n\u003cli\u003eAround 20,000 stocks valued at $47 Trillion\u003c/li\u003e\n\u003cli\u003eOnly about 2,000 matter\u003c/li\u003e\n\u003cli\u003eTransaction frequency is high, liquidity is generally low \u0026mdash; grade sizes are small\u003c/li\u003e\n\u003cli\u003eRoughly 59 places to trade stock (exchanges + \u003ca href=\"/posts/kbhdarkpool/\"\u003edarkpool\u003c/a\u003es)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstock_market_survey/","tags":null,"title":"stock market survey"},{"categories":null,"contents":"strain is the proportional deformation of a material given some stress applied\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhstrain/\"\u003estrain\u003c/a\u003e is the proportional \u003cstrong\u003edeformation\u003c/strong\u003e of a material given some \u003ca href=\"/posts/kbhstress/\"\u003estress\u003c/a\u003e applied\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstrain/","tags":null,"title":"strain"},{"categories":null,"contents":"revising:\ncharacter as subjects, actions as verbs old before new (connect sentences\u0026rsquo; subjects from tail to head) short before long: (say the short phrase before the long phrase, move the verb up front if you can) topic then stress (the last position is being stressed—the thing that\u0026rsquo;s most important to communicate is the end of an utterance) The above principles apply to all units—as in, each sentences paragraphs, and arguments should all follow a similar principles ","html":"\u003cp\u003erevising:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003echaracter as subjects, actions as verbs\u003c/li\u003e\n\u003cli\u003eold before new (connect sentences\u0026rsquo; subjects from tail to head)\u003c/li\u003e\n\u003cli\u003eshort before long: (say the short phrase before the long phrase, move the verb up front if you can)\u003c/li\u003e\n\u003cli\u003etopic then stress (the last position is being stressed—the thing that\u0026rsquo;s most important to communicate is the end of an utterance)\u003c/li\u003e\n\u003cli\u003eThe above principles apply to all units—as in, each sentences paragraphs, and arguments should all follow a similar principles\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstrategies_to_revise_an_essay/","tags":null,"title":"Strategies to Revise an Essay"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhstress/","tags":null,"title":"stress"},{"categories":null,"contents":"In C, string is an array of chars. C strings don\u0026rsquo;t track their length; each C string always end in an null-terminating character: \\0. This is represents the zero byte.\nThere\u0026rsquo;s a built in function strlen which checks the length of a string without the null-terminating character. This function is O(n)!!!\nString Pointer Syntax Sugar Synonyms char str[6]; // these are equivalent char *ptr = str; char *ptr = \u0026amp;str[0]; char *ptr = \u0026amp;str; // DON\u0026#39;T DO THIS // these are equivalent char thirdLetter = str[3]; char thirdLetter = *(str + 3); seven commandments of c strings if we create a string as char[], we can modify its characters because its memory lives in our stack instead of living in a global data segment we can\u0026rsquo;t set char[] as equaling to something, because its not strictly a pointer and instead it refers to an entire block of memory instead of a pointer to the first element (in a same vein, an array\u0026rsquo;s size is fixed and travels with the variable) if we pass char[] as a parameter, it is converted to a char * if we create a string with new string literal as char *thing = \u0026quot;thing\u0026quot;, we can\u0026rsquo;t modify it because its on the global data segment we can set char * equaling to another value because its a pointer adding an offset to a c string gives a substring that\u0026rsquo;s places past the first character if we change characters in a string parameter, these changes will persist passing strings around Strings are passed as a pointer to their first character.\nvoid foo(char *str) { // do string things } char string[6]; // THIS IS A STRING OF LENGTH 5!!!! (beacuse there\u0026#39;s a null terminator) foo(string); // pass the syntax sugar pointer foo(\u0026amp;string[0]); // pass the actual first pointer you won\u0026rsquo;t know whether or not this is the address to a string or a pointer to a single character; so good practice to call it something_str if you\u0026rsquo;d like a string.\ncharacter manipulation checker #include \u0026lt;ctype.h\u0026gt; int main() { isalpha(ch); islower(ch); ... } string manipulations #include \u0026lt;string.h\u0026gt; strcmp When you comparing strings, you can\u0026rsquo;t use == or \u0026lt; or \u0026gt;. Instead:\n#include \u0026lt;string.h\u0026gt; int main() { int cmp = strcmp(str1, str2); if (cmp == 0) { // if str1 is equal to str2 } else if (cmp \u0026lt; 0) { // if str1 comes before str2 lexographically } else { // if str2 comes before str1 lexographically } } strcpy Copying strings, dangerously, because buffer overflows are fun.\nThis function does NOT care about buffer overflows, and WILL put in a null terminator.\nstrncopy This function optimize against buffer overflow, but it may not write a null terminator.\nstrcat strncat always puts in a null terminator.\npointer arithmetic with strings Fortunatly, each char is\nstrspn Count the number of characters that are \u0026ldquo;cool\u0026rdquo;: contained within the end\n","html":"\u003cp\u003eIn C, \u003ca href=\"/posts/kbhstring/\"\u003estring\u003c/a\u003e is an array of \u003ca href=\"/posts/kbhchar/\"\u003echar\u003c/a\u003es. C strings don\u0026rsquo;t track their length; each C string always end in an null-terminating character: \u003ccode\u003e\\0\u003c/code\u003e. This is represents the zero byte.\u003c/p\u003e\n\u003cp\u003eThere\u0026rsquo;s a built in function \u003ccode\u003estrlen\u003c/code\u003e which checks the length of a string without the null-terminating character. This function is \u003ccode\u003eO(n)\u003c/code\u003e!!!\u003c/p\u003e\n\u003ch2 id=\"string-pointer-syntax-sugar-synonyms\"\u003eString Pointer Syntax Sugar Synonyms\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e6\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// these are equivalent\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eptr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eptr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eptr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// DON\u0026#39;T DO THIS\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// these are equivalent\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ethirdLetter\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ethirdLetter\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"seven-commandments-of-c-string--kbhstring-dot-md--s\"\u003eseven commandments of c \u003ca href=\"/posts/kbhstring/\"\u003estring\u003c/a\u003es\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eif we create a string as \u003ccode\u003echar[]\u003c/code\u003e, we can modify its characters because its memory lives in our stack instead of living in a global data segment\u003c/li\u003e\n\u003cli\u003ewe can\u0026rsquo;t set \u003ccode\u003echar[]\u003c/code\u003e as equaling to something, because its not strictly a pointer and instead it refers to an entire block of memory instead of a pointer to the first element (in a same vein, an array\u0026rsquo;s size is fixed and travels with the variable)\u003c/li\u003e\n\u003cli\u003eif we pass \u003ccode\u003echar[]\u003c/code\u003e as a parameter, it is converted to a \u003ccode\u003echar *\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eif we create a string with new string literal as \u003ccode\u003echar *thing = \u0026quot;thing\u0026quot;\u003c/code\u003e, we can\u0026rsquo;t modify it because its on the global data segment\u003c/li\u003e\n\u003cli\u003ewe can set \u003ccode\u003echar *\u003c/code\u003e equaling to another value because its a pointer\u003c/li\u003e\n\u003cli\u003eadding an offset to a c string gives a substring that\u0026rsquo;s places past the first character\u003c/li\u003e\n\u003cli\u003eif we change characters in a string parameter, these changes will persist\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"passing-strings-around\"\u003epassing strings around\u003c/h2\u003e\n\u003cp\u003eStrings are passed as a pointer to their first character.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003efoo\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// do string things\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estring\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e6\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// THIS IS A STRING OF LENGTH 5!!!! (beacuse there\u0026#39;s a null terminator)\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003efoo\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estring\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// pass the syntax sugar pointer\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003efoo\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estring\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]);\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// pass the actual first pointer\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eyou won\u0026rsquo;t know whether or not this is the address to a string or a pointer to a single character; so good practice to call it \u003ccode\u003esomething_str\u003c/code\u003e if you\u0026rsquo;d like a string.\u003c/p\u003e\n\u003ch2 id=\"character-manipulation-checker\"\u003echaracter manipulation checker\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e#include\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e\u0026lt;ctype.h\u0026gt;\u003c/span\u003e\u003cspan style=\"color:#75715e\"\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emain\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003eisalpha\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ech\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003eislower\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ech\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e...\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-06_11-08-28_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"string-manipulations\"\u003estring manipulations\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e#include\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e\u0026lt;string.h\u0026gt;\u003c/span\u003e\u003cspan style=\"color:#75715e\"\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-06_11-17-37_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"strcmp\"\u003estrcmp\u003c/h3\u003e\n\u003cp\u003eWhen you comparing \u003ca href=\"/posts/kbhstring/\"\u003estring\u003c/a\u003es, you can\u0026rsquo;t use == or \u0026lt; or \u0026gt;. Instead:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e#include\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e\u0026lt;string.h\u0026gt;\u003c/span\u003e\u003cspan style=\"color:#75715e\"\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emain\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecmp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003estrcmp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estr1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estr2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecmp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if str1 is equal to str2\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecmp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if str1 comes before str2 lexographically\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if str2 comes before str1 lexographically\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"strcpy\"\u003estrcpy\u003c/h3\u003e\n\u003cp\u003eCopying strings, dangerously, because buffer \u003ca href=\"/posts/kbhbinary_number_system/#overflow\"\u003eoverflow\u003c/a\u003es are fun.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-06_11-22-58_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis function does NOT care about buffer overflows, and \u003cstrong\u003eWILL\u003c/strong\u003e put in a null terminator.\u003c/p\u003e\n\u003ch3 id=\"strncopy\"\u003estrncopy\u003c/h3\u003e\n\u003cp\u003eThis function optimize \u003cstrong\u003eagainst\u003c/strong\u003e buffer overflow, but it may not write a null terminator.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-09_10-42-25_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"strcat\"\u003estrcat\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-09_10-45-17_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003estrncat always puts in a null terminator.\u003c/p\u003e\n\u003ch3 id=\"pointer-arithmetic-with-strings\"\u003epointer arithmetic with strings\u003c/h3\u003e\n\u003cp\u003eFortunatly, each \u003ca href=\"/posts/kbhchar/\"\u003echar\u003c/a\u003e is\u003c/p\u003e\n\u003ch3 id=\"strspn\"\u003estrspn\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-09_11-16-40_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eCount the number of characters that are \u0026ldquo;cool\u0026rdquo;: contained within the end\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstring/","tags":null,"title":"string"},{"categories":null,"contents":"This is a precursor to MDP planning:\nstates: conjunction of \u0026ldquo;fluents\u0026rdquo; (which are state) actions: transition between fulents transitions: deleting of older, changed parts of fluents, adding new parts Planning Domain Definition Language A LISP used to specify a STRIPS-style planning problem.\nHierarchical Task Network Decompose classical planning into a hierarchy of actions Leverage High level actions to generate a coarse plan Refine to smaller problems ","html":"\u003cp\u003eThis is a precursor to \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003e planning:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003estates: conjunction of \u0026ldquo;fluents\u0026rdquo; (which are state)\u003c/li\u003e\n\u003cli\u003eactions: transition between fulents\u003c/li\u003e\n\u003cli\u003etransitions: deleting of older, changed parts of fluents, adding new parts\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"planning-domain-definition-language\"\u003ePlanning Domain Definition Language\u003c/h2\u003e\n\u003cp\u003eA LISP used to specify a \u003ca href=\"/posts/kbhstrips_style_planning/\"\u003eSTRIPS-style planning\u003c/a\u003e problem.\u003c/p\u003e\n\u003ch2 id=\"hierarchical-task-network\"\u003eHierarchical Task Network\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eDecompose classical planning into a hierarchy of actions\u003c/li\u003e\n\u003cli\u003eLeverage High level actions to generate a coarse plan\u003c/li\u003e\n\u003cli\u003eRefine to smaller problems\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstrips_style_planning/","tags":null,"title":"STRIPS-style planning"},{"categories":null,"contents":"Reading Notes Strong Free Will vs. Weak Free Will \u0026mdash; \u0026ldquo;will\u0026rdquo; and \u0026ldquo;bells inequality\u0026rdquo; is a demonstration of indeterminism/randomness between particles \u0026mdash; but indeterminism and randomness a demonstration of will.\nThat if humans have free will, it should be spawened from the indeterminism of elementary particles It asserts, roughly, that if indeed we humans have free will, then elementary particles already have their own small share of this valuable commodity.\nSPIN Axiom SPIN Axiom: Measurements of the squared (components of) spin of a spin 1 particle in three orthogonal directions always give the answers 1, 0, 1 in some order.\nTWIN Axiom Paired particles will come up with same measurements if measured in the same way\nThe TWIN Axiom: For twinned spin 1 particles, suppose experimenter A performs a triple experiment of measuring the squared spin component of particle a in three orthogonal directions x, y, z, while experimenter B measures the twinned par- ticle b in one direction, w . Then if w happens to be in the same direction as one of x, y, z, experimenter B’s measurement will necessarily yield the same answer as the corresponding measurement by A.\nFree as something that cannot be an uncurried function of previous states To say that A’s choice of x, y, z is free means more precisely that it is not determined by (i.e., is not a function of) what has happened at earlier times (in any inertial frame).\nMIN Axiom Choice of direction of measurement of one twinned qubit does not influence the results of the current qubit (unless they happen to align.)\nThe MIN Axiom: Assume that the experiments performed by A and B are space-like separated. Then experimenter B can freely choose any one of the 33 particular directions w , and a’s response is independent of this choice. Similarly and inde- pendently, A can freely choose any one of the 40 triples x, y, z, and b’s response is independent of that choice.\n","html":"\u003ch2 id=\"reading-notes\"\u003eReading Notes\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhstrong_free_will/#reading-notes\"\u003eStrong Free Will\u003c/a\u003e vs. Weak Free Will \u0026mdash; \u0026ldquo;will\u0026rdquo; and \u0026ldquo;bells inequality\u0026rdquo; is a demonstration of indeterminism/randomness between particles \u0026mdash; but indeterminism and randomness a demonstration of will.\u003c/p\u003e\n\u003ch3 id=\"that-if-humans-have-free-will-it-should-be-spawened-from-the-indeterminism-of-elementary-particles\"\u003eThat if humans have free will, it should be spawened from the indeterminism of elementary particles\u003c/h3\u003e\n\u003cp\u003eIt asserts, roughly, that if indeed we humans have free will, then elementary particles already have their own small share of this valuable commodity.\u003c/p\u003e\n\u003ch3 id=\"spin-axiom\"\u003eSPIN Axiom\u003c/h3\u003e\n\u003cp\u003eSPIN Axiom: Measurements of the squared (components of) spin of a spin 1 particle in three orthogonal directions always give the answers 1, 0, 1 in some order.\u003c/p\u003e\n\u003ch3 id=\"twin-axiom\"\u003eTWIN Axiom\u003c/h3\u003e\n\u003cp\u003ePaired particles will come up with same measurements if measured in the same way\u003c/p\u003e\n\u003cp\u003eThe TWIN Axiom: For twinned spin 1 particles, suppose experimenter A performs a triple experiment of measuring the squared spin component of particle a in three orthogonal directions x, y, z, while experimenter B measures the twinned par- ticle b in one direction, w . Then if w happens to be in the same direction as one of x, y, z, experimenter B’s measurement will necessarily yield the same answer as the corresponding measurement by A.\u003c/p\u003e\n\u003ch3 id=\"free-as-something-that-cannot-be-an-uncurried-function-of-previous-states\"\u003eFree as something that cannot be an uncurried function of previous states\u003c/h3\u003e\n\u003cp\u003eTo say that A’s choice of x, y, z is free means more precisely that it is not determined by (i.e., is not a function of) what has happened at earlier times (in any inertial frame).\u003c/p\u003e\n\u003ch3 id=\"min-axiom\"\u003eMIN Axiom\u003c/h3\u003e\n\u003cp\u003eChoice of direction of measurement of one twinned qubit does not influence the results of the current qubit (unless they happen to align.)\u003c/p\u003e\n\u003cp\u003eThe MIN Axiom: Assume that the experiments performed by A and B are space-like separated. Then experimenter B can freely choose any one of the 33 particular directions w , and a’s response is independent of this choice. Similarly and inde- pendently, A can freely choose any one of the 40 triples x, y, z, and b’s response is independent of that choice.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstrong_free_will/","tags":null,"title":"Strong Free Will"},{"categories":null,"contents":"proof by induction but assuming that all \\(k \u0026lt; n\\) is given.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhproof_by_induction/\"\u003eproof by induction\u003c/a\u003e but assuming that all \\(k \u0026lt; n\\) is given.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstrong_induction/","tags":null,"title":"strong induction"},{"categories":null,"contents":"We learn a Bayes Net grphical structure by following Bayes rule:\n\\begin{align} P(G|D) \u0026amp;\\propto P(D|G) P(G) \\\\ \u0026amp;= P(G) \\int P(D | \\theta, G) P(\\theta|G) d\\theta \\\\ \u0026amp;= P(G) \\prod_{i=1}^{n} \\prod_{j=1}^{q_{i}} \\frac{\\Gamma(\\alpha_{i,j,0})}{\\Gamma(\\alpha_{i,j,0} + m_{i,j,0})} \\prod_{k=1}^{r_{i}} \\frac{\\Gamma(\\alpha_{i,j,k} + m_{i,j,k})}{\\Gamma(\\alpha_{i,j,k})} \\end{align}\nwhere, we define: \\(\\alpha_{i,j,0} = \\sum_{k} \\alpha_{i,j,k}\\).\nThe actual integration process is not provided, but mostly uninteresting. See Beta Distribution for a flavour of how it came about.\nThis is hard. We are multiply many gammas together, which is computationally lame. So instead, we use\nBaysian Network Scoring Log Bayesian Score is a score for measure of well-fittingness of a Baysian Network against some data. We sometimes call this the Baysian Score.\nLet:\n\\(x_{1:n}\\) be variables \\(o_1, \u0026hellip;, o_{m}\\) be the \\(m\\) observations we took \\(G\\) is the graph \\(r_{i}\\) is the number of instantiations in \\(X_{i}\\) (for boolean variables, this would be \\(2\\)) \\(q_{i}\\) is the number of parental instantiations of \\(X_{i}\\) (if parent 1 can take on 10 values, parent 2 can take 3 values, then child\u0026rsquo;s \\(q_{i}=10\\cdot 3=30\\)) \u0026mdash; if a node has no parents it has a \\(q_{i}\\) is \\(1\\) \\(\\pi_{i,j}\\) is \\(j\\) instantiation of parents of \\(x_{i}\\) (the \\(j\\) th combinator) Let us first make some observations. We use \\(m_{i,j,k}\\) to denote the COUNT of the number of times \\(x_{i}\\) took a value \\(k\\) when \\(x_{i}\\) parents took instantiation \\(j\\).\nWe aim to compute:\n\\begin{equation} \\log P(G|D) = \\log P(G) + \\sum_{i=1}^{n} \\sum_{j=1}^{q_{i}} \\qty[\\qty(\\log \\frac{\\Gamma(\\alpha_{i,j,0})}{\\Gamma(\\alpha_{i,j,0}+ m_{i,j,0})}) + \\sum_{k=1}^{r_{i}} \\log \\frac{\\Gamma(\\alpha_{i,j,k} + m_{i,j,k})}{\\Gamma(\\alpha_{i,j,k})}] \\end{equation}\nIn practice, uniform prior of the graph is mostly used always. Assuming uniform priors, so \\(P(G)=1\\) and therefore we can drop the first term. Recall that \\(\\alpha_{i,j,0} = \\sum_{k} \\alpha_{i,j,k}\\).\nWe can effectively take a prior structure, and blindly compute the Baysian Score vis a vi your data, and you will get an answer which whether or not something is the simplest model.\nOf course, we can\u0026rsquo;t just try all graphs to get a graph structure. Instead, we use some search algorithm:\nK2 Algorithm Runs in polynomial time, but doesn\u0026rsquo;t grantee an optimal structure. Let us create a network with a sequence of variables with some ordering:\n\\begin{equation} x_1, x_2, x_3, x_4 \\end{equation}\nFor K2 Algorithm, we assume a uniform distribution initially before the graph is learned.\nwe lay down \\(x_1\\) onto the graph we then try to lay down \\(x_{2}\\): compute the Baysian Scores of two networks: \\(x_1 \\to x_2\\) OR \\(x_1\\ x_2\\) (see if connecting \\(x_2\\) to \\(x_1\\) helps). keep the structure with the maximum score we then try to lay down \\(x_{3}\\): compute the Baysian Score of \\(x_1 \\to x_3\\) (plus whatever decision you made about \\(x_2\\)) OR \\(x_1, x_3\\); keep the one that works the best. Then, try the same to decide whether to connect \\(x_2\\) to \\(x_3\\) as well Repeat until you considered all nodes After you try out one ordering, you should try out another one. Because you can only add parents from elements before you in the list, you will never get a cycle.\nLocal Graph Search Start with an uncorrected graph. Search on the following actions:\nbasic graph operations:\nadd edge remove edge flip edge A graph\u0026rsquo;s neighborhood is the graphs for whicthey are one basic graph operation away.\nCreate a cycle detection scheme.\nNow, just try crap. Keep computing a Baysian Score after you tried something, if its good, keep it. If its not, don\u0026rsquo;t.\nTo prevent you from being stuck in a local minimum:\nperform random restarts perform K2 Algorithm, and then try things out simulated annealing: take a step that\u0026rsquo;s worse for optimizing Baysian Scores genetic algorithms: random population which reproduces at a rate proportional to their score Partially Directed Graph Search We first formulate a partially-directed graph, which is a graph which has some edges, but some edges left to be decided:\nIn this case, edges \\(C \\to D\\) and \\(D \\leftarrow E\\) are both defined. \\(A,B,C\\) are left as undirected nodes available to be searched on.\nWe now try out all combinations of arrows that may fit between \\(A,B,C\\), with the constraint of all objects you search on being Markov Equivalent (so, you can\u0026rsquo;t remove or introduce new immoral v-structures).\n","html":"\u003cp\u003eWe learn a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBayes Net\u003c/a\u003e grphical structure by following \u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes rule\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nP(G|D) \u0026amp;\\propto P(D|G) P(G) \\\\\n\u0026amp;= P(G) \\int P(D | \\theta, G) P(\\theta|G) d\\theta \\\\\n\u0026amp;= P(G) \\prod_{i=1}^{n} \\prod_{j=1}^{q_{i}} \\frac{\\Gamma(\\alpha_{i,j,0})}{\\Gamma(\\alpha_{i,j,0} + m_{i,j,0})} \\prod_{k=1}^{r_{i}} \\frac{\\Gamma(\\alpha_{i,j,k} + m_{i,j,k})}{\\Gamma(\\alpha_{i,j,k})}\n\\end{align}\u003c/p\u003e\n\u003cp\u003ewhere, we define: \\(\\alpha_{i,j,0} = \\sum_{k} \\alpha_{i,j,k}\\).\u003c/p\u003e\n\u003cp\u003eThe actual integration process is not provided, but mostly uninteresting. See \u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e for a flavour of how it came about.\u003c/p\u003e\n\u003cp\u003eThis is hard. We are multiply many gammas together, which is computationally lame. So instead, we use\u003c/p\u003e\n\u003ch2 id=\"baysian-network-scoring\"\u003eBaysian Network Scoring\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#baysian-network-scoring\"\u003eLog Bayesian Score\u003c/a\u003e is a score for measure of well-fittingness of a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e against some data. We sometimes call this the \u003ca href=\"#baysian-network-scoring\"\u003eBaysian Score\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eLet:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(x_{1:n}\\) be variables\u003c/li\u003e\n\u003cli\u003e\\(o_1, \u0026hellip;, o_{m}\\) be the \\(m\\) observations we took\u003c/li\u003e\n\u003cli\u003e\\(G\\) is the graph\u003c/li\u003e\n\u003cli\u003e\\(r_{i}\\) is the number of instantiations in \\(X_{i}\\) (for boolean variables, this would be \\(2\\))\u003c/li\u003e\n\u003cli\u003e\\(q_{i}\\) is the number of parental instantiations of \\(X_{i}\\) (if parent 1 can take on 10 values, parent 2 can take 3 values, then child\u0026rsquo;s \\(q_{i}=10\\cdot 3=30\\)) \u0026mdash; if a node has no parents it has a \\(q_{i}\\) is \\(1\\)\u003c/li\u003e\n\u003cli\u003e\\(\\pi_{i,j}\\) is \\(j\\) instantiation of parents of \\(x_{i}\\) (the \\(j\\) th combinator)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eLet us first make some observations. We use \\(m_{i,j,k}\\) to denote the COUNT of the number of times \\(x_{i}\\) took a value \\(k\\) when \\(x_{i}\\) parents took instantiation \\(j\\).\u003c/p\u003e\n\u003cp\u003eWe aim to compute:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\log P(G|D) = \\log P(G) + \\sum_{i=1}^{n} \\sum_{j=1}^{q_{i}} \\qty[\\qty(\\log \\frac{\\Gamma(\\alpha_{i,j,0})}{\\Gamma(\\alpha_{i,j,0}+ m_{i,j,0})}) + \\sum_{k=1}^{r_{i}} \\log \\frac{\\Gamma(\\alpha_{i,j,k} + m_{i,j,k})}{\\Gamma(\\alpha_{i,j,k})}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn practice, uniform prior of the graph is mostly used always. Assuming uniform priors, so \\(P(G)=1\\) and therefore we can drop the first term. Recall that \\(\\alpha_{i,j,0} = \\sum_{k} \\alpha_{i,j,k}\\).\u003c/p\u003e\n\u003cp\u003eWe can effectively take a prior structure, and blindly compute the \u003ca href=\"#baysian-network-scoring\"\u003eBaysian Score\u003c/a\u003e vis a vi your data, and you will get an answer which whether or not something is the simplest model.\u003c/p\u003e\n\u003cp\u003eOf course, we can\u0026rsquo;t just try all graphs to get a graph structure. Instead, we use some search algorithm:\u003c/p\u003e\n\u003ch2 id=\"k2-algorithm\"\u003eK2 Algorithm\u003c/h2\u003e\n\u003cp\u003eRuns in polynomial time, but doesn\u0026rsquo;t grantee an optimal structure. Let us create a network with a sequence of variables with some ordering:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_1, x_2, x_3, x_4\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor \u003ca href=\"#k2-algorithm\"\u003eK2 Algorithm\u003c/a\u003e, we assume a \u003ca href=\"/posts/kbhprobability_distributions/#uniform-distribution\"\u003euniform distribution\u003c/a\u003e initially before the graph is learned.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewe lay down \\(x_1\\) onto the graph\u003c/li\u003e\n\u003cli\u003ewe then try to lay down \\(x_{2}\\): compute the \u003ca href=\"#baysian-network-scoring\"\u003eBaysian Score\u003c/a\u003es of two networks: \\(x_1 \\to x_2\\) OR \\(x_1\\ x_2\\) (see if connecting \\(x_2\\) to \\(x_1\\) helps). keep the structure with the maximum score\u003c/li\u003e\n\u003cli\u003ewe then try to lay down \\(x_{3}\\): compute the \u003ca href=\"#baysian-network-scoring\"\u003eBaysian Score\u003c/a\u003e of \\(x_1 \\to x_3\\) (plus whatever decision you made about \\(x_2\\)) OR \\(x_1, x_3\\); keep the one that works the best. Then, try the same to decide whether to connect \\(x_2\\) to \\(x_3\\) as well\u003c/li\u003e\n\u003cli\u003eRepeat until you considered all nodes\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAfter you try out one ordering, you should try out another one. Because you can only add parents from elements before you in the list, you will never get a cycle.\u003c/p\u003e\n\u003ch2 id=\"local-graph-search\"\u003eLocal Graph Search\u003c/h2\u003e\n\u003cp\u003eStart with an uncorrected graph. Search on the following actions:\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#local-graph-search\"\u003ebasic graph operation\u003c/a\u003es:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eadd edge\u003c/li\u003e\n\u003cli\u003eremove edge\u003c/li\u003e\n\u003cli\u003eflip edge\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eA graph\u0026rsquo;s \u003ca href=\"#local-graph-search\"\u003eneighborhood\u003c/a\u003e is the graphs for whicthey are one basic graph operation away.\u003c/p\u003e\n\u003cp\u003eCreate a cycle detection scheme.\u003c/p\u003e\n\u003cp\u003eNow, just try crap. Keep computing a \u003ca href=\"#baysian-network-scoring\"\u003eBaysian Score\u003c/a\u003e after you tried something, if its good, keep it. If its not, don\u0026rsquo;t.\u003c/p\u003e\n\u003cp\u003eTo prevent you from being stuck in a local minimum:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eperform random restarts\u003c/li\u003e\n\u003cli\u003eperform \u003ca href=\"#k2-algorithm\"\u003eK2 Algorithm\u003c/a\u003e, and then try things out\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"\"\u003esimulated annealing\u003c/a\u003e: take a step that\u0026rsquo;s worse for optimizing \u003ca href=\"#baysian-network-scoring\"\u003eBaysian Score\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003egenetic algorithms: random population which reproduces at a rate proportional to their score\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"partially-directed-graph-search\"\u003ePartially Directed Graph Search\u003c/h2\u003e\n\u003cp\u003eWe first formulate a partially-directed graph, which is a graph which has some edges, but some edges left to be decided:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-12_11-09-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eIn this case, edges \\(C \\to D\\) and \\(D \\leftarrow E\\) are both defined. \\(A,B,C\\) are left as undirected nodes available to be searched on.\u003c/p\u003e\n\u003cp\u003eWe now try out all combinations of arrows that may fit between \\(A,B,C\\), with the constraint of all objects you search on being \u003ca href=\"/posts/kbhmarkov_equivalence_classes/\"\u003eMarkov Equivalent\u003c/a\u003e (so, you can\u0026rsquo;t remove or introduce new \u003ca href=\"/posts/kbhimmoral_v_structure/\"\u003eimmoral v-structure\u003c/a\u003es).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstructure_learning/","tags":null,"title":"structure learning"},{"categories":null,"contents":"Goal: using protein-protein interfaces and docking to learn about the polymerase behavior\nToo bio-y and I\u0026rsquo;m literally not sure how to make of it\n","html":"\u003cp\u003eGoal: using protein-protein interfaces and docking to learn about the polymerase behavior\u003c/p\u003e\n\u003cp\u003eToo bio-y and I\u0026rsquo;m literally not sure how to make of it\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstructure_of_covid_replication/","tags":null,"title":"Structure of COVID Replication"},{"categories":null,"contents":"Key Sequence Notation New Concepts Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_dec012023/","tags":null,"title":"SU-CS107 DEC012023"},{"categories":null,"contents":"Not published to prevent AIV.\n","html":"\u003cp\u003eNot published to prevent AIV.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_midterm_sheet/","tags":null,"title":"SU-CS107 Midterm Sheet"},{"categories":null,"contents":" privacy ","html":"\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/\"\u003eprivacy\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_nov102023/","tags":null,"title":"SU-CS107 NOV102023"},{"categories":null,"contents":"Key Sequence Notation New Concepts privacy memory allocation optimization caching Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/\"\u003eprivacy\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmemory_allocation/\"\u003ememory allocation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoptimization/\"\u003eoptimization\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcaching/\"\u003ecaching\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_nov132023/","tags":null,"title":"SU-CS107 NOV132023"},{"categories":null,"contents":" optimization ","html":"\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoptimization/\"\u003eoptimization\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_nov272023/","tags":null,"title":"SU-CS107 NOV272023"},{"categories":null,"contents":"Notation New Concepts computer number system bits and bytes base 10, base 2, base 16 integers unsigned integers signed integers and two\u0026rsquo;s complement Important Results / Claims conversion from base 10 to base 2 min and max of binary \u0026ldquo;Which bit is missing\u0026rdquo; two\u0026rsquo;s complement Questions Interesting Factoids ","html":"\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/\"\u003ecomputer number system\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/#bit\"\u003ebits\u003c/a\u003e and \u003ca href=\"/posts/kbhbinary_number_system/#byte\"\u003ebytes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/#base-10\"\u003ebase 10\u003c/a\u003e, \u003ca href=\"/posts/kbhbinary_number_system/#base-2\"\u003ebase 2\u003c/a\u003e, \u003ca href=\"/posts/kbhbinary_number_system/#base-16\"\u003ebase 16\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eintegers\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/#unsigned-integers\"\u003eunsigned integers\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/#signed-integers\"\u003esigned integers\u003c/a\u003e and \u003ca href=\"/posts/kbhtwo_s_complement/\"\u003etwo\u0026rsquo;s complement\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/#conversion-from-base-10-to-base-2\"\u003econversion from base 10 to base 2\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/#min-and-max-of-binary\"\u003emin and max of binary\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/#which-bit-is-missing\"\u003e\u0026ldquo;Which bit is missing\u0026rdquo;\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtwo_s_complement/\"\u003etwo\u0026rsquo;s complement\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct022023/","tags":null,"title":"SU-CS107 OCT022023"},{"categories":null,"contents":"Key Sequence Notation New Concepts two\u0026rsquo;s complement overflow casting Important Results / Claims mnemonic for remembering where overflows happened automatic signed promotion automated type size promotion Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtwo_s_complement/\"\u003etwo\u0026rsquo;s complement\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/#overflow\"\u003eoverflow\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcasting/\"\u003ecasting\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtwo_s_complement/#mnemonic-for-remembering-where-overflows-happened\"\u003emnemonic for remembering where overflows happened\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcasting/#automatic-signed-promotion\"\u003eautomatic signed promotion\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcasting/#automated-type-size-promotion\"\u003eautomated type size promotion\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct032023/","tags":null,"title":"SU-CS107 OCT032023"},{"categories":null,"contents":"Key Sequence Notation New Concepts casting sign promotion type size trunctaion bitwise operations bitmask GDB Important Results / Claims sizes of stuff Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcasting/\"\u003ecasting\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcasting/#sign-promotion\"\u003esign promotion\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcasting/#type-size-promotion\"\u003etype size trunctaion\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbitwise_operations/\"\u003ebitwise operations\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbitmask/\"\u003ebitmask\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgdb/\"\u003eGDB\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/#sizes-of-stuff\"\u003esizes of stuff\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct042023/","tags":null,"title":"SU-CS107 OCT042023"},{"categories":null,"contents":"Key Sequence Notation New Concepts char ASCII string string manpulations strcmp strcpy Important Results / Claims Absolute Value Function Questions Interesting Factoids eigenvalue\n","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhchar/\"\u003echar\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhascii/\"\u003eASCII\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/\"\u003estring\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003estring manpulations\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/#strcmp\"\u003estrcmp\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/#strcpy\"\u003estrcpy\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhabsolute_value_function/\"\u003eAbsolute Value Function\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct062023/","tags":null,"title":"SU-CS107 OCT062023"},{"categories":null,"contents":"Key Sequence Notation New Concepts string Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/\"\u003estring\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct092023/","tags":null,"title":"SU-CS107 OCT092023"},{"categories":null,"contents":"Key Sequence Notation New Concepts buffer overflow valgrind pointer memory Important Results / Claims identifying buffer overflows Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbuffer_overflow/\"\u003ebuffer overflow\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbuffer_overflow/#valgrind\"\u003evalgrind\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpointer/\"\u003epointer\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmemory/\"\u003ememory\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbuffer_overflow/#identifying-buffer-overflow--kbhbuffer-overflow-dot-md--s\"\u003eidentifying buffer overflows\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct112023/","tags":null,"title":"SU-CS107 OCT112023"},{"categories":null,"contents":"Key Sequence Notation New Concepts pointer and address operator string Important Results / Claims seven commandments of c strings String Pointer Syntax Sugar Synonyms Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpointer/\"\u003epointer\u003c/a\u003e and \u003ca href=\"/posts/kbhpointer/#address-operator\"\u003eaddress operator\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/\"\u003estring\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/#seven-commandments-of-c-id-11f0accb-37d9-4785-afea-1aeb53c8823f-string-s\"\u003eseven commandments of c strings\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/#string-pointer-syntax-sugar-synonyms\"\u003eString Pointer Syntax Sugar Synonyms\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct132023/","tags":null,"title":"SU-CS107 OCT132023"},{"categories":null,"contents":"Key Sequence Notation New Concepts strings array Pointer Arithmetic Important Results / Claims REMEMBER: you CANNOT change strings in the data segment Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/\"\u003estring\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbharray/\"\u003earray\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbharray/#pointer-arithmetic\"\u003ePointer Arithmetic\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eREMEMBER: you \u003cstrong\u003eCANNOT\u003c/strong\u003e change strings in the data segment\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct162023/","tags":null,"title":"SU-CS107 OCT162023"},{"categories":null,"contents":"Key Sequence Notation New Concepts stack heap malloc and calloc and free strdup realloc generic Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstack/\"\u003estack\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhheap/\"\u003eheap\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhheap/#malloc\"\u003emalloc\u003c/a\u003e and \u003ca href=\"/posts/kbhheap/#calloc\"\u003ecalloc\u003c/a\u003e and \u003ca href=\"/posts/kbhheap/#free\"\u003efree\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhheap/#strdup\"\u003estrdup\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhheap/#realloc\"\u003erealloc\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgeneric/\"\u003egeneric\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct182023/","tags":null,"title":"SU-CS107 OCT182023"},{"categories":null,"contents":"Key Sequence Notation New Concepts Little Endian generics memcpy, memmove Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlittle_endian/\"\u003eLittle Endian\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgeneric/\"\u003egenerics\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgeneric/#memcpy\"\u003ememcpy\u003c/a\u003e, \u003ca href=\"/posts/kbhgeneric/#memmove\"\u003ememmove\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct2023/","tags":null,"title":"SU-CS107 OCT202023"},{"categories":null,"contents":"Key Sequence Notation New Concepts generics memcpy, memmove Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgeneric/\"\u003egenerics\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgeneric/#memcpy\"\u003ememcpy\u003c/a\u003e, \u003ca href=\"/posts/kbhgeneric/#memmove\"\u003ememmove\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct232023/","tags":null,"title":"SU-CS107 OCT232023"},{"categories":null,"contents":"Key Sequence Notation New Concepts function pointers Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfunction/#function-pointers\"\u003efunction pointers\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct252023/","tags":null,"title":"SU-CS107 OCT252023"},{"categories":null,"contents":"Key Sequence Notation New Concepts sorting functions assembly Registers Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsorting_functions/\"\u003esorting functions\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhassembly/\"\u003eassembly\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhassembly/#register\"\u003eRegister\u003c/a\u003es\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct272023/","tags":null,"title":"SU-CS107 OCT272023"},{"categories":null,"contents":"Core Themes of CS107 how and why of 107:\nhow is program data represented in the hardware how does the heap work and how is it implemented how does a computer know how run code how does an executable map onto computer systems why is my program doing one thing when I expect it to do something else \u0026ldquo;why is this broken system behaving the way it does?\u0026rdquo;\nCore Goals of CS107 fluency pointers and memory, and how to make use of them an executable\u0026rsquo;s address space + runtime behavior competency the translation of C to and from assembly implement programs with limits of computer arithmetic identify bottlenecks and improve runtime performance navigate Unix ethical frameworks to design and implement software exposure computer architecture\nContent of CS107 bits and bytes chars and c strings pointers stacks and heaps generics: use them assembly: reverse an engineering of binary heap allocators: implement malloc and free ","html":"\u003ch2 id=\"core-themes-of-cs107\"\u003eCore Themes of CS107\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003ehow and why\u003c/strong\u003e of 107:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003ehow\u003c/strong\u003e is program data represented in the hardware\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ehow\u003c/strong\u003e does the heap work and \u003cstrong\u003ehow\u003c/strong\u003e is it implemented\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ehow\u003c/strong\u003e does a computer know how run code\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ehow\u003c/strong\u003e does an executable map onto computer systems\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ewhy\u003c/strong\u003e is my program doing one thing when I expect it to do something else\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;why is this broken system behaving the way it does?\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"core-goals-of-cs107\"\u003eCore Goals of CS107\u003c/h2\u003e\n\u003ch3 id=\"fluency\"\u003efluency\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003epointers and memory, and how to make use of them\u003c/li\u003e\n\u003cli\u003ean executable\u0026rsquo;s address space + runtime behavior\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"competency\"\u003ecompetency\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ethe translation of C to and from assembly\u003c/li\u003e\n\u003cli\u003eimplement programs with limits of computer arithmetic\u003c/li\u003e\n\u003cli\u003eidentify bottlenecks and improve runtime performance\u003c/li\u003e\n\u003cli\u003enavigate Unix\u003c/li\u003e\n\u003cli\u003eethical frameworks to design and implement software\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"exposure\"\u003eexposure\u003c/h3\u003e\n\u003cp\u003ecomputer architecture\u003c/p\u003e\n\u003ch2 id=\"content-of-cs107\"\u003eContent of CS107\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ebits and bytes\u003c/li\u003e\n\u003cli\u003echars and c strings\u003c/li\u003e\n\u003cli\u003epointers stacks and heaps\u003c/li\u003e\n\u003cli\u003egenerics: use them\u003c/li\u003e\n\u003cli\u003eassembly: reverse an engineering of binary\u003c/li\u003e\n\u003cli\u003eheap allocators: implement malloc and free\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_sep272023/","tags":null,"title":"SU-CS107 SEP272023"},{"categories":null,"contents":"New Concepts Unix C printf bool integer Important Results / Claims principles of C C limitations ","html":"\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhunix/\"\u003eUnix\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhc/\"\u003eC\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhc_basic_operations/\"\u003eprintf\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbool/\"\u003ebool\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinteger/\"\u003einteger\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhc/#principles-of-c\"\u003eprinciples of C\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhc/#c-limitations\"\u003eC limitations\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_sep292023/","tags":null,"title":"SU-CS107 SEP292023"},{"categories":null,"contents":"Key Sequence Notation New Concepts Mencius Philosophy types of harm protected group Important Results / Claims Procedural vs. Distributive Fairness Questions Interesting Factoids logistic regression is a linear classifier Naive Bayes is a linear classier: there is literally no interaction between input features ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmencius_philosophy/\"\u003eMencius Philosophy\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtypes_of_harm/\"\u003etypes of harm\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprotected_group/\"\u003eprotected group\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprocedural_vs_distributive_fairness/\"\u003eProcedural vs. Distributive Fairness\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003elogistic regression is a linear classifier\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e is a linear classier: there is literally no interaction between input features\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_dec012023/","tags":null,"title":"SU-CS109 DEC012023"},{"categories":null,"contents":"Diffusion Models We can consider a model between random noise and trees.\nFor every step, we sample Gaussian noise and add it to the image. The original approach adds Gaussian to the pixels, and nowadays people replace the pixel.\nUsually, there is a few thousand steps of noising.\nWhy is it that we can\u0026rsquo;t have a one-step policy from noise to pictures? Because of a physics result that says the stability of diffusion becomes intractable at too large steps.\nloss function One way we can model our objective is as a MLE. Because we are continuously adding noise, we can assume that\n\\begin{equation} y \\sim \\mathcal{N}(\\mu = \\hat{y}(\\theta), \\sigma^{2}=k) \\end{equation}\nIf you compute MLE over the choice of \\(\\hat{y}(\\theta)\\), you get the squared error.\nELBO A cool loss function that diffusion actually uses that leverages the fact above but considers the entire diffusion process.\nLSTMs Big text generation flaw with LSTMs: the latent state vector has to contain information about the ENTIRE sentence and have the information propagated through recursion. Information\nCross Entropy its MLE over a multinomials; the counts of everything that\u0026rsquo;s not the one-hot thing just so happens to be 0.\nWe are essentially computing the derivative of:\n\\begin{equation} \\arg\\max_{p_{correct}} p_{correct} \\end{equation}\nwhich is trying to maximize the categorical of only the correct element.\n","html":"\u003ch2 id=\"diffusion-models\"\u003eDiffusion Models\u003c/h2\u003e\n\u003cp\u003eWe can consider a model between random noise and trees.\u003c/p\u003e\n\u003cp\u003eFor every step, we sample Gaussian noise and \u003cstrong\u003eadd\u003c/strong\u003e it to the image. The original approach adds Gaussian to the pixels, and nowadays people replace the pixel.\u003c/p\u003e\n\u003cp\u003eUsually, there is a few thousand steps of noising.\u003c/p\u003e\n\u003cp\u003eWhy is it that we can\u0026rsquo;t have a one-step policy from noise to pictures? Because of a physics result that says the stability of diffusion becomes intractable at too large steps.\u003c/p\u003e\n\u003ch3 id=\"loss-function\"\u003eloss function\u003c/h3\u003e\n\u003cp\u003eOne way we can model our objective is as a \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e. Because we are continuously adding noise, we can assume that\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny \\sim \\mathcal{N}(\\mu = \\hat{y}(\\theta), \\sigma^{2}=k)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf you compute MLE over the choice of \\(\\hat{y}(\\theta)\\), you get the squared error.\u003c/p\u003e\n\u003ch3 id=\"elbo\"\u003eELBO\u003c/h3\u003e\n\u003cp\u003eA cool loss function that diffusion actually uses that leverages the fact above but considers the entire diffusion process.\u003c/p\u003e\n\u003ch3 id=\"lstms\"\u003eLSTMs\u003c/h3\u003e\n\u003cp\u003eBig text generation flaw with LSTMs: the latent state vector has to contain information about the ENTIRE sentence and have the information propagated through recursion. Information\u003c/p\u003e\n\u003ch3 id=\"cross-entropy\"\u003eCross Entropy\u003c/h3\u003e\n\u003cp\u003eits MLE over a multinomials; the counts of everything that\u0026rsquo;s not the one-hot thing just so happens to be 0.\u003c/p\u003e\n\u003cp\u003eWe are essentially computing the derivative of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\arg\\max_{p_{correct}} p_{correct}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is trying to maximize the categorical of only the correct element.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_dec042023/","tags":null,"title":"SU-CS109 DEC042023"},{"categories":null,"contents":" standard normal density function, and formula for phi for ARBITURARY normals (x-u/sigma) practice using inverse phi BEWARE that sigma is not sigma squared PDF AND CDF, Mean, Var, params, Generative Story for: uniform distribution and the normal distribution all of these continuity correction permutation and combinations formula counting formulas: binning, stars and bars, and the counting methods tree from beginning of class probability theorems: law of total probabaly, baysian, demorgans, counting with and and or multinomial distribution binomial coefficient (i.e. combinations formula), multinomial coefficient joint probability distribution and their table Naive Bayes divison ratio trick relative probability ","html":"\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgaussian_distribution/#standard-normal-density-function\"\u003estandard normal density function\u003c/a\u003e, and formula for phi for ARBITURARY normals (x-u/sigma)\n\u003cul\u003e\n\u003cli\u003epractice using \u003cstrong\u003einverse phi\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eBEWARE that sigma is not sigma squared\u003c/li\u003e\n\u003cli\u003ePDF AND CDF, Mean, Var, params, Generative Story for:\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#uniform-distribution\"\u003euniform distribution\u003c/a\u003e and the \u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormal distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcs_probability_index/#what-random-variable-should-i-use\"\u003eall of these\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcontinuity_correction/\"\u003econtinuity correction\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpermutation/\"\u003epermutation\u003c/a\u003e and \u003ca href=\"/posts/kbhcombination/\"\u003ecombination\u003c/a\u003es formula\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcounting/\"\u003ecounting\u003c/a\u003e formulas: binning, stars and bars, and the counting methods tree from beginning of class\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e theorems: law of total probabaly, baysian, demorgans, counting with and and or\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobablistic_model/#multinomial-distribution\"\u003emultinomial distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ebinomial \u003ca href=\"/posts/kbhpolynomial/\"\u003ecoefficient\u003c/a\u003e (i.e. \u003ca href=\"/posts/kbhcombination/\"\u003ecombination\u003c/a\u003es formula), \u003ca href=\"/posts/kbhmultinomial_coefficient/\"\u003emultinomial coefficient\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e and their table\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e divison ratio trick\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrelative_probability/\"\u003erelative probability\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_midterm/","tags":null,"title":"SU-CS109 Midterm"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_midterm_sheet/","tags":null,"title":"SU-CS109 Midterm Sheet"},{"categories":null,"contents":"What if you don\u0026rsquo;t know about a probability of success?\nBeta Distribution time!!!\nMulti-Arm Bandit See Multi-Arm Bandit\nStrategies:\nupper confidence bound: take the action with theh highest n-tn-thonfidence bound Posterior Sampling: take a sample from each Beta Distributions distribution; take the action that has a higher probability of success based on their r ","html":"\u003cp\u003eWhat if you don\u0026rsquo;t know about a probability of success?\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e time!!!\u003c/p\u003e\n\u003ch2 id=\"multi-arm-bandit--kbhexploration-and-exploitation-dot-md\"\u003e\u003ca href=\"/posts/kbhexploration_and_exploitation/\"\u003eMulti-Arm Bandit\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhexploration_and_exploitation/\"\u003eMulti-Arm Bandit\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eStrategies:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirected_exploration/#quantile-exploration\"\u003eupper confidence bound\u003c/a\u003e: take the action with theh highest n-tn-thonfidence bound\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirected_exploration/#posterior-sampling\"\u003ePosterior Sampling\u003c/a\u003e: take a sample from each \u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003es distribution; take the action that has a higher probability of success based on their r\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov012023/","tags":null,"title":"SU-CS109 NOV012023"},{"categories":null,"contents":"Key Sequence Notation New Concepts IID Zero-Sum Game central limit theorem bootstrap Important Results / Claims Sum of Two Dice adding random variables Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhzero_sum_game/\"\u003eZero-Sum Game\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhboostrap/\"\u003ebootstrap\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsum_of_two_dice/\"\u003eSum of Two Dice\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_variables/#adding-random-variables\"\u003eadding random variables\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov032023/","tags":null,"title":"SU-CS109 NOV032023"},{"categories":null,"contents":"Key Sequence Notation New Concepts central limit theorem sampling statistics Important Results / Claims sample mean sample variance standard error of the mean: \u0026ldquo;variance of the mean\u0026rdquo;: \u0026ldquo;how wrong is your meassured mean\u0026rdquo; Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_variables/#sampling-statistics\"\u003esampling statistics\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_variables/#sample-mean\"\u003esample mean\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_variables/#sample-variance\"\u003esample variance\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_variables/#standard-error-of-the-mean\"\u003estandard error of the mean\u003c/a\u003e: \u0026ldquo;variance of the mean\u0026rdquo;: \u0026ldquo;how wrong is your meassured mean\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov062023/","tags":null,"title":"SU-CS109 NOV062023"},{"categories":null,"contents":"Key Sequence Notation New Concepts central limit theorem sampling statistics sample mean, sample variance, standard error of the mean bootstrap Important Results / Claims p-value from bootstrap Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_variables/#sampling-statistics\"\u003esampling statistics\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_variables/#sample-mean\"\u003esample mean\u003c/a\u003e, \u003ca href=\"/posts/kbhrandom_variables/#sample-variance\"\u003esample variance\u003c/a\u003e, \u003ca href=\"/posts/kbhrandom_variables/#standard-error-of-the-mean\"\u003estandard error of the mean\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhboostrap/\"\u003ebootstrap\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhboostrap/#p-value-from-bootstrap\"\u003ep-value from bootstrap\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov082023/","tags":null,"title":"SU-CS109 NOV082023"},{"categories":null,"contents":"Key Sequence Notation New Concepts Bernoulli distribution as an indicator conditional expectation law of total expectation Important Results / Claims Review: expectation. Expectation of the sums of random variables are linear regardless of whether or not the variables are IID, independent, whatever.\n\u0026ldquo;expectation of the sum is the sum of the expectations\u0026rdquo;. \u0026ldquo;Can I write the expectation I want to calculate as the sum of something else?\u0026rdquo;\nQuestions Interesting Factoids \\(\\mathbb{E}[Y]\\) =\n\\(x=1\\): 3 \\(x=2\\): 5 + Y \\(x=3\\): 7 + Y \\begin{equation} \\mathbb{E}[Y] = 3 \\cdot \\frac{1}{3} + (5+ \\mathbb{E}[Y]) \\cdot \\frac{1}{3} + (7+ \\mathbb{E}[Y]) \\cdot \\frac{1}{3} \\end{equation}\n","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbernoulli_random_variable/\"\u003eBernoulli distribution\u003c/a\u003e as an indicator\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexpectation/#conditional-expectation\"\u003econditional expectation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexpectation/#law-of-total-expectation\"\u003elaw of total expectation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cp\u003eReview: \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e. \u003ca href=\"/posts/kbhexpectation/\"\u003eExpectation\u003c/a\u003e of the sums of \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es are \u003ca href=\"/posts/kbhexpectation/#properties-of-id-24e5fb5b-b0b2-4872-adf2-398e91c3ee0e-expectation\"\u003elinear\u003c/a\u003e regardless of whether or not the variables are \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e, \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e, whatever.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;expectation of the sum is the sum of the expectations\u0026rdquo;. \u0026ldquo;Can I write the expectation I want to calculate as the sum of something else?\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003e\\(\\mathbb{E}[Y]\\) =\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(x=1\\): 3\u003c/li\u003e\n\u003cli\u003e\\(x=2\\): 5 + Y\u003c/li\u003e\n\u003cli\u003e\\(x=3\\): 7 + Y\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{E}[Y] = 3 \\cdot \\frac{1}{3} + (5+ \\mathbb{E}[Y]) \\cdot \\frac{1}{3} + (7+ \\mathbb{E}[Y]) \\cdot \\frac{1}{3}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov102023/","tags":null,"title":"SU-CS109 NOV102023"},{"categories":null,"contents":"Key Sequence Notation New Concepts modeling parameter parameter learning argmax Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodeling/\"\u003emodeling\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhparameter_learning/\"\u003eparameter learning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhargmax/\"\u003eargmax\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov132023/","tags":null,"title":"SU-CS109 NOV132023"},{"categories":null,"contents":"Key Sequence Notation New Concepts likelyhood Maximum Likelihood Parameter Learning argmax maximum a posteriori estimate Important Results / Claims Double Envelope Problem Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlikelyhood/\"\u003elikelyhood\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhargmax/\"\u003eargmax\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmaximum_a_posteriori_estimate/\"\u003emaximum a posteriori estimate\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdouble_envelope_problem/\"\u003eDouble Envelope Problem\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov152023/","tags":null,"title":"SU-CS109 NOV152023"},{"categories":null,"contents":"Key Sequence Notation For some feature input matrix, where each row is the data samples, and columns are the input features; we write\n\\begin{equation} x_{j}^{(i)} \\end{equation}\nwhere \\(i\\) are rows (datapoints), and \\(j\\) are columns (features)\nNew Concepts Naive Bayes Important Results / Claims Naive Bayes assumption Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003cp\u003eFor some feature input matrix, where each row is the data samples, and columns are the input features; we write\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{j}^{(i)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(i\\) are rows (datapoints), and \\(j\\) are columns (features)\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnaive_bayes/#naive-bayes--kbhnaive-bayes-dot-md--assumption\"\u003eNaive Bayes assumption\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov172023/","tags":null,"title":"SU-CS109 NOV172023"},{"categories":null,"contents":"Key Sequence Notation New Concepts machine learning Naive Bayes sigmoid logistic regression Important Results / Claims logistic regression assumption Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmachine_learning/\"\u003emachine learning\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlogistic_regression/\"\u003elogistic regression\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlogistic_regression/#logistic-regression-assumption\"\u003elogistic regression assumption\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov272023/","tags":null,"title":"SU-CS109 NOV272023"},{"categories":null,"contents":"Key Sequence Notation New Concepts logistic regression deep learning Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlogistic_regression/\"\u003elogistic regression\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdeep_learning/\"\u003edeep learning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov292023/","tags":null,"title":"SU-CS109 NOV292023"},{"categories":null,"contents":"Key Sequence Notation New Concepts probability + Frequentist Definition of Probability sample space event equally likely outcomes sample space Important Results / Claims uncertainty and probability axiom of probability with making elements INDISTINCT during picking your sample space, your sample space may not have equally likely outcomes Questions Interesting Factoids Doing probability\nconsider your cards as being distinct create a generative story ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e + \u003ca href=\"/posts/kbhprobability/#frequentist-definition-of-probability\"\u003eFrequentist Definition of Probability\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsample_space/\"\u003esample space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhevent/\"\u003eevent\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsample_space/#equally-likely-outcomes\"\u003eequally likely outcomes\u003c/a\u003e sample space\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#id-c5f09e8a-9c3f-4874-a1c1-79d156801208-uncertainty-and-id-744af885-3e9c-4130-bd2b-2f41bcc0440e-probability\"\u003euncertainty and probability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#axiom-of-probability\"\u003eaxiom of probability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003ewith making elements INDISTINCT during picking your \u003ca href=\"/posts/kbhsample_space/\"\u003esample space\u003c/a\u003e, your \u003ca href=\"/posts/kbhsample_space/\"\u003esample space\u003c/a\u003e may not have \u003ca href=\"/posts/kbhsample_space/#equally-likely-outcomes\"\u003eequally likely outcomes\u003c/a\u003e\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003eDoing probability\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003econsider your cards as being distinct\u003c/li\u003e\n\u003cli\u003ecreate a generative story\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct022023/","tags":null,"title":"SU-CS109 OCT022023"},{"categories":null,"contents":"Key Sequence Notation And Or Given P(E and F) P(E or F) P(E \\bar F) P(E,F) P(E ∪ F) New Concepts Review: axiom of probability conditional probability law of total probability Bayes Theorem Important Results / Claims Representing Large Computation probability chain rule Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eAnd\u003c/th\u003e\n\u003cth\u003eOr\u003c/th\u003e\n\u003cth\u003eGiven\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eP(E and F)\u003c/td\u003e\n\u003ctd\u003eP(E or F)\u003c/td\u003e\n\u003ctd\u003eP(E \\bar F)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eP(E,F)\u003c/td\u003e\n\u003ctd\u003eP(E ∪ F)\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eReview: \u003ca href=\"/posts/kbhprobability/#axiom-of-probability\"\u003eaxiom of probability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#law-of-total-probability\"\u003elaw of total probability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes Theorem\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrepresenting_large_computation/\"\u003eRepresenting Large Computation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003eprobability chain rule\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct042023/","tags":null,"title":"SU-CS109 OCT042023"},{"categories":null,"contents":"Key Sequence Notation New Concepts mutually exclusive easy \u0026ldquo;or\u0026rdquo; independence \u0026ldquo;and\u0026rdquo; Important Results / Claims inclusion exclusion counting for independent events, AND questions are easier for mutually exclusive events, OR questions are easier if you didn\u0026rsquo;t get the condition you want, use DeMorgan\u0026rsquo;s Law to flip them over Questions Interesting Factoids eigenvalue\nQuality Investing\n","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmutually_exclusive/\"\u003emutually exclusive\u003c/a\u003e easy \u0026ldquo;or\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependence\u003c/a\u003e \u0026ldquo;and\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsum_rule_of_counting/\"\u003einclusion exclusion counting\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003efor \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e events, AND questions are easier\u003c/li\u003e\n\u003cli\u003efor \u003ca href=\"/posts/kbhmutually_exclusive/\"\u003emutually exclusive\u003c/a\u003e events, OR questions are easier\u003c/li\u003e\n\u003cli\u003eif you didn\u0026rsquo;t get the condition you want, use \u003ca href=\"/posts/kbhdemorgan_s_law/\"\u003eDeMorgan\u0026rsquo;s Law\u003c/a\u003e to flip them over\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhfundimental_investing/#quality-investing\"\u003eQuality Investing\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct062023/","tags":null,"title":"SU-CS109 OCT062023"},{"categories":null,"contents":"Key Sequence Notation New Concepts conditional independence random variable probability mass function expectation Important Results / Claims When you are using conditional probability, if you are consistently within your condition, you can effectively just leave it there conditioning on something CHANGES whether or not two things are independent Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_network/#conditional-independence\"\u003econditional independence\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_mass_function/\"\u003eprobability mass function\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWhen you are using \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e, if you are consistently within your condition, you can effectively just leave it there\u003c/li\u003e\n\u003cli\u003econditioning on something CHANGES whether or not two things are \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct092023/","tags":null,"title":"SU-CS109 OCT092023"},{"categories":null,"contents":"Key Sequence Notation New Concepts random variables of interest binomial distribution Bernoulli distribution Important Results / Claims New problem solving recipe recognize classic random variable define a random variable of the correct parameters use their PMF and solve Galton Board Questions Debugging probability: put in large values of \\(p\\) for things; if you end up with a number higher than \\(1\\) for the probability, you probably tried to add not mutually exclusive events\nInteresting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es of interest\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbernoulli_random_variable/\"\u003eBernoulli distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eNew problem solving recipe\n\u003cul\u003e\n\u003cli\u003erecognize classic \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003edefine a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e of the correct parameters\u003c/li\u003e\n\u003cli\u003euse their \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003e and solve\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgalton_board/\"\u003eGalton Board\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003cp\u003eDebugging probability: put in large values of \\(p\\) for things; if you end up with a number higher than \\(1\\) for the probability, you probably tried to add not \u003ca href=\"/posts/kbhmutually_exclusive/\"\u003emutually exclusive\u003c/a\u003e events\u003c/p\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct112023/","tags":null,"title":"SU-CS109 OCT112023"},{"categories":null,"contents":"Key Sequence Notation New Concepts e variance poisson distribution Important Results / Claims probability of k in x time Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhe/\"\u003ee\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvariance/\"\u003evariance\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_of_k_in_x_time/\"\u003epoisson distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_of_k_in_x_time/\"\u003eprobability of k in x time\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_109_oct132023/","tags":null,"title":"SU-CS109 OCT132023"},{"categories":null,"contents":"Key Sequence Notation New Concepts support more discrete random variables! geometric random variable negative binomial distribution hypergeometric: drawing without replacement probability density function continuous random variables uniform distribution exponential distribution cumulative distribution function Important Results / Claims PDFs are derivatives of probability Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsupport/\"\u003esupport\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003emore discrete \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es!\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgeometric_random_variable/\"\u003egeometric random variable\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnegative_binomial_distribution/\"\u003enegative binomial distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ehypergeometric: drawing without replacement\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003eprobability density function\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#uniform-distribution\"\u003euniform distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexponential_distribution/\"\u003eexponential distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#cumulative-distribution-function\"\u003ecumulative distribution function\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003es are derivatives of \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct162023/","tags":null,"title":"SU-CS109 OCT162023"},{"categories":null,"contents":"Key Sequence Notation New Concepts Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct182023/","tags":null,"title":"SU-CS109 OCT182023"},{"categories":null,"contents":"Key Sequence Notation New Concepts multinomial coefficient joint probability distribution probablistic models multinomial distribution text Naive Bayes Important Results / Claims for Naive Bayes against some multinomial distribution, its often a good time to find the ratios of the results because the combination in the beginning cancels out if you are analyzing the same samples upon different prior probabilities \\begin{equation} \\frac{P(H|D)}{P(M|D)} = \\frac{\\prod_{i} h_{i}^{c_{i}}}{\\prod_{i} m_{i}^{c_{i}}} \\end{equation}\nQuestions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultinomial_coefficient/\"\u003emultinomial coefficient\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobablistic_model/\"\u003eprobablistic models\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobablistic_model/#multinomial-distribution\"\u003emultinomial distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003etext \u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003efor \u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e against some \u003ca href=\"/posts/kbhprobablistic_model/#multinomial-distribution\"\u003emultinomial distribution\u003c/a\u003e, its often a good time to find the ratios of the results because the combination in the beginning cancels out if you are analyzing the same samples upon different prior probabilities\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{P(H|D)}{P(M|D)} = \\frac{\\prod_{i} h_{i}^{c_{i}}}{\\prod_{i} m_{i}^{c_{i}}}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct202023/","tags":null,"title":"SU-CS109 OCT202023"},{"categories":null,"contents":"Key Sequence Notation New Concepts relative probability probability density function inference Important Results / Claims getting exact values from PDF Bayes Normalization Constant Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrelative_probability/\"\u003erelative probability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003eprobability density function\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#getting-exact-values-from-pdf--kbhprobability-distributions-dot-md\"\u003egetting exact values from PDF\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbayes_normalization_constant/\"\u003eBayes Normalization Constant\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct232023/","tags":null,"title":"SU-CS109 OCT232023"},{"categories":null,"contents":"Key Sequence Notation New Concepts Bayes Theorem Over Random Variable sigmoid Important Results / Claims not all beliefs are able to be written down as a function it is ok to discretize things Item Response Theory Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbayes_theorem_over_random_variable/\"\u003eBayes Theorem Over Random Variable\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003enot all beliefs are able to be written down as a \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eit is ok to discretize things\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhitem_response_theory/\"\u003eItem Response Theory\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct252023/","tags":null,"title":"SU-CS109 OCT252023"},{"categories":null,"contents":"Key Sequence Notation New Concepts General Inference (i.e. inference in general) Important Results / Claims Methods of Compressing the Parameters of a Distribution Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgeneral_inference/\"\u003eGeneral Inference\u003c/a\u003e (i.e. \u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e in general)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#methods-of-compressing-the-parameters-of-a-distribution\"\u003eMethods of Compressing the Parameters of a Distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct272023/","tags":null,"title":"SU-CS109 OCT272023"},{"categories":null,"contents":"Key Sequence Notation New Concepts counting Important Results / Claims thinking by steps step rule of counting aka product rule of counting inclusion exclusion counting (\u0026ldquo;counting with or\u0026rdquo;) Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcounting/\"\u003ecounting\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ethinking by steps\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcounting/#step-rule-of-counting\"\u003estep rule of counting\u003c/a\u003e aka \u003ca href=\"/posts/kbhcounting/#step-rule-of-counting\"\u003eproduct rule of counting\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsum_rule_of_counting/\"\u003einclusion exclusion counting (\u0026ldquo;counting with or\u0026rdquo;)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_sep272023/","tags":null,"title":"SU-CS109 SEP272023"},{"categories":null,"contents":"Key Sequence Notation New Concepts permutation combination grouping Important Results / Claims permutation with indistinct objects grouping with entirely indistinct objects (divider method) Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpermutation/\"\u003epermutation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcombination/\"\u003ecombination\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgrouping/\"\u003egrouping\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpermutation/#permutation-with-indistinct-objects\"\u003epermutation with indistinct objects\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgrouping/#grouping-with-entirely-indistinct-objects\"\u003egrouping with entirely indistinct objects\u003c/a\u003e (\u003ca href=\"/posts/kbhgrouping/#grouping-with-entirely-indistinct-objects\"\u003edivider method\u003c/a\u003e)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_sep292023/","tags":null,"title":"SU-CS109 SEP292023"},{"categories":null,"contents":"FS main challenges naming: how do users name files reliability: surviving OS crashes and hardware failures protection: isolation between users, controlled sharing disk space management: minimize seeks, sharing space (\u0026ldquo;preventing fragmentation\u0026rdquo;) seeks to wait until the platter go under the arm and read.\ninternal v. external fragmentation internal: a file can be no less than a single block of text. external: no space is available even if the space in aggregate is available main designs contiguous allocation IBM used this? puts files and meta-data together + implement an explicit free list allocator. benefit: simple; drawback: 1) external fragmentation 2) hard to grow files\nlinked files in every block, store the location of the next block; don\u0026rsquo;t store files continuously\u0026mdash;instead, store a pointer to where the next block of the file is. benefit: solves fragmentation and file growth; drawback: 1) huge seek time 2) random access from the middle is hard (i.e. O(n))\nWindows FAT linked files, but cached the file links in memory when using it. benefits: same as linked files, and a bit faster drawback: data still fragmented and now you have a whole ass table to deal with! but its at least faster\nFile Payload Data Kind of what we do\u0026mdash;instead of storing file data in order OR using links, store the file BLOCK information contiguously.\nmulti-level index: store all block numbers for a given file down a tree (EXT2/3, Unix V6, NTFS)\nUnix V6 + MLI Sector Size Block Size Inode Size Inodes Per Block Address Type 512 512 32 16 Short, 2 bytes block const size_t INODE_PER_BLOCK = SECTOR_SIZE / sizeof(struct inode); struct inode inodes[INODE_PER_BLOCK]; char buf[SECTOR_SIZE]; readsector(2, \u0026amp;inodes); // recall this is the first 16 inodes: sec0 is fs info, sec1 is supernode printf(\u0026#34;addr: %d\\n\u0026#34;, inodes[0].i_add); ino struct inode { uint16_t i_addr[8]; uint16_t i_mode[8]; uint16_t file_size; } inodes have two modes\nif ((inode.i_mode \u0026amp; ILARG) != 0) == // node is in \u0026#34;large mode\u0026#34; in small mode, the inode stores in i_addr the block numbers to the data in large mode, the inode stores in the first seven numbers in i_addr block numbers to blocks that contain block numbers (512/2 = 256 block numbers, which are chars); the eighth number points to doubly indirect blocks that contain block numbers that point to other blocks The inode table for each file only contains space to point to \\(8\\) block. 1 block = 1 sector on Unix v6. inodes are usualy 32 bytes big, and 1 block = 1 sector = 512 bytes. usually this packs 16 inodes per block\nin large mode, this system can store \\((7+256) \\cdot (256 \\cdot 512) = 34MB\\), which is as large as the file system itself, which means we are fine now.\nsizing\nsmall: \\(512\\) bytes per block, and \\(8\\) block storable, so \\(8 \\cdot 512 = 4096\\) bytes large: \\(512\\) bytes per block pointed to by i_addr, each containing \\(\\frac{512}{2} = 256\\) block numbers. The first seven in total would therefore address \\(256 \\times 7 = 1792\\) blocks of memory. The last eight would each address \\(256 \\cdot 256 = 65536\\) blocks of memory. In total, that addresses \\(1792+65536 = 67328\\) blocks of memory. Finally, that means we can address \\(67328 \\cdot 512 = 34471936\\) bytes. dir struct dirent { uint16_t d_inumber; // inode number of this file char d_name[14]; // the name; *NOT NECESSARILY NULL TERMINATED* } THE NAME MAY NOT BE NULL TERMINATED to cram max things. You have to use strncmp\nstrcmp/strncmp: stops comparing after \\(n\\) characters; \u0026lt;0 if str1 comes before str2 alphabetically; \u0026gt;0 if str1 comes after str2; 0 if equal\nStart at the root directory, /. We want to go to the root directory, and find the entry named /classes/, and then, in that directory, find the file. etc. Traverse recursively. Directory could have metadata.\nA directory is basically just a file whose payload is a list of dirent.\nThe inode tells you whether something is a file or a directory. They can be small or large, as usual. Root directory always have inode number 1; 0 is reserved to NULL.\nfile Recall that read doesn\u0026rsquo;t read the whole thing. So, we it in parts.\nvoid copyContents(int sourceFD, int destinationFD) { char buffer[INCREMENT]; while (true) { ssize_t bytesRead = read(sourceFD, buffer, sizeof(buffer)); if (bytesRead == 0) break; size_t bytesWritten = 0; while (bytesWritten \u0026lt; bytesRead) { ssize_t count = write(destinationFD, buffer + bytesWritten, bytesRead - bytesWritten); bytesWritten += count; } } } int open(const char *pathname, int flags); Flags are a bitwise OR operations: you have to open with O_RDONLY (read only), O_WRONLY (write only), or O_RDWR (both read and write). This returns \\(-1\\) if the reading fails.\nOther flags:\nO_TRUNC (truncate file) O_CREAT (creating a file if not exist), which will require a mode_t mode parameter to set the permission O_EXCL (file must not exist) Block Cache We will use part of the main memory to retain recently-accessed disk blocks. This is NOT at the granularity of individual files.\nLeast Recently Used (LRU) Cache When you insert a new element into the cache, kick out the element on the cache that hasn\u0026rsquo;t been touched in the longest time.\nBlock Cache Modification we can either write asap, or delay.\nwrite asap: safer: less risk of data loss, written as soon as possible; slow: program must wait to proceed until disk I/O completes\nwrite delay: dangerous: may loose data after crash; efficient: memory writes is faster\nCrash Recovery main challenges main designs goal design implementation MP main challenges main designs goal design implementation MT main challenges main designs goal design implementation Virtual Memory main challenges main designs goal design implementation Multicore + Flash main challenges main designs goal design implementation Ethics main challenges main designs goal design implementation Crash Recovery: tradeoffs, data loss and inconsistency, atomic operations, free list and block cache, fsck, ordered writes, write-ahead logging, transactions, checkpoints, idempotency, durability and consistency\nMultiprocessing: processes, PIDs, fork, execution order, copy on write, waitpid, zombie processes, execvp, pipes and pipe / pipe2, I/O redirection\nMultithreading: processes vs. threads, C++ threads and .join(), thread safety, race conditions, atomicity, critical sections, mutexes, deadlock, busy waiting, condition variables, notify_all, unique_lock, monitor pattern; dining philosophers\nDispatching / Scheduling: Process control blocks, traps and interrupts, context switching, thread state (running / blocked / ready), I/O-bound and CPU-bound threads, scheduling algorithms, first-come-first-serve, round robin, shortest remaining processing time (SRPT), priority-based scheduling, preemption, interrupts, implementing single-core locks and condition variables\nVirtual memory: single-tasking, process memory, memory sharing goals, load-time relocation, dynamic address translation and MMU, virtual and physical addresses, base and bound, multiple segments, paging, demand paging, page maps, page faults, thrashing, fragmentation, disk swap, page replacement policies, random replacement, FIFO replacement, LRU replacement, clock algorithm, per process vs. global replacement, virtualization.\nModern technologies: multicore processors (multicore scheduling, work stealing, core affinity, gang scheduling, multicore locks (for multicore locks, just high level ideas about interrupts being insufficient to prevent races, atomic operations, and that busy waiting is necessary)), flash storage (quirks of erase + write operations, wear-out, wear-leveling, flash translation layer high-level idea)\nEthics and trust: trust and agency, trust by assumption, trust by inference, trust by substitution, agential gullibility, violations of trust, stakeholders, pervasiveness, time.\n","html":"\u003ch2 id=\"fs\"\u003eFS\u003c/h2\u003e\n\u003ch3 id=\"main-challenges\"\u003emain challenges\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003enaming\u003c/strong\u003e: how do users name files\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ereliability\u003c/strong\u003e: surviving OS crashes and hardware failures\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eprotection\u003c/strong\u003e: isolation between users, controlled sharing\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003edisk space management\u003c/strong\u003e: minimize seeks, sharing space (\u0026ldquo;preventing fragmentation\u0026rdquo;)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"seeks\"\u003eseeks\u003c/h4\u003e\n\u003cp\u003eto wait until the platter go under the arm and read.\u003c/p\u003e\n\u003ch4 id=\"internal-v-dot-external-fragmentation\"\u003einternal v. external fragmentation\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003einternal\u003c/strong\u003e: a file can be no less than a single block of text.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eexternal\u003c/strong\u003e: no space is available even if the space in aggregate is available\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"main-designs\"\u003emain designs\u003c/h3\u003e\n\u003ch4 id=\"contiguous-allocation\"\u003econtiguous allocation\u003c/h4\u003e\n\u003cp\u003eIBM used this? puts files and meta-data together + implement an explicit free list allocator. \u003cstrong\u003ebenefit\u003c/strong\u003e: simple; \u003cstrong\u003edrawback\u003c/strong\u003e: 1) external fragmentation 2) hard to grow files\u003c/p\u003e\n\u003ch4 id=\"linked-files\"\u003elinked files\u003c/h4\u003e\n\u003cp\u003ein every block, store the location of the next block; don\u0026rsquo;t store files continuously\u0026mdash;instead, store a pointer to where the next block of the file is. \u003cstrong\u003ebenefit\u003c/strong\u003e: solves fragmentation and file growth; \u003cstrong\u003edrawback\u003c/strong\u003e: 1) huge seek time 2) random access from the middle is hard (i.e. O(n))\u003c/p\u003e\n\u003ch4 id=\"windows-fat\"\u003eWindows FAT\u003c/h4\u003e\n\u003cp\u003elinked files, but cached the file links in memory when using it. \u003cstrong\u003ebenefits\u003c/strong\u003e: same as linked files, and a bit faster \u003cstrong\u003edrawback\u003c/strong\u003e: data \u003cem\u003estill\u003c/em\u003e fragmented and now you have a whole ass table to deal with! but its at least faster\u003c/p\u003e\n\u003ch4 id=\"file-payload-data\"\u003eFile Payload Data\u003c/h4\u003e\n\u003cp\u003eKind of what we do\u0026mdash;instead of storing file data in order OR using links, store the file BLOCK information contiguously.\u003c/p\u003e\n\u003cp\u003e\u003cem\u003emulti-level index\u003c/em\u003e: store all block numbers for a given file down a tree (EXT2/3, Unix V6, NTFS)\u003c/p\u003e\n\u003ch3 id=\"unix-v6-plus-mli\"\u003eUnix V6 + MLI\u003c/h3\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eSector Size\u003c/th\u003e\n\u003cth\u003eBlock Size\u003c/th\u003e\n\u003cth\u003eInode Size\u003c/th\u003e\n\u003cth\u003eInodes Per Block\u003c/th\u003e\n\u003cth\u003eAddress Type\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e512\u003c/td\u003e\n\u003ctd\u003e512\u003c/td\u003e\n\u003ctd\u003e32\u003c/td\u003e\n\u003ctd\u003e16\u003c/td\u003e\n\u003ctd\u003eShort, 2 bytes\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch4 id=\"block\"\u003eblock\u003c/h4\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003econst\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eINODE_PER_BLOCK\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSECTOR_SIZE\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esizeof\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003estruct\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estruct\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einode\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einodes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eINODE_PER_BLOCK\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebuf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSECTOR_SIZE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003ereadsector\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einodes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// recall this is the first 16 inodes: sec0 is fs info, sec1 is supernode\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eprintf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;addr: %d\u003c/span\u003e\u003cspan style=\"color:#8045ff\"\u003e\\n\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einodes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e].\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei_add\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch4 id=\"ino\"\u003eino\u003c/h4\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estruct\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einode\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003euint16_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei_addr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e8\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003euint16_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei_mode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e8\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003euint16_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efile_size\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003es have two modes\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei_mode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eILARG\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e!=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// node is in \u0026#34;large mode\u0026#34;\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cul\u003e\n\u003cli\u003ein \u003cstrong\u003esmall mode\u003c/strong\u003e, the \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e stores in \u003ccode\u003ei_addr\u003c/code\u003e the block numbers to the data\u003c/li\u003e\n\u003cli\u003ein \u003cstrong\u003elarge mode\u003c/strong\u003e, the \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e stores in the \u003cstrong\u003efirst seven\u003c/strong\u003e numbers in \u003ccode\u003ei_addr\u003c/code\u003e block numbers to \u003cem\u003eblocks that contain block numbers\u003c/em\u003e (512/2 = 256 block numbers, which are chars); the \u003cstrong\u003eeighth number\u003c/strong\u003e points to \u003cstrong\u003edoubly indirect\u003c/strong\u003e \u003cem\u003eblocks that contain block numbers that point to other blocks\u003c/em\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e table for each file only contains space to point to \\(8\\) block. 1 block = 1 sector on Unix v6. \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003es are usualy 32 bytes big, and 1 block = 1 sector = 512 bytes. usually this packs 16 inodes per block\u003c/p\u003e\n\u003cp\u003ein \u003cstrong\u003elarge mode\u003c/strong\u003e, this system can store \\((7+256) \\cdot (256 \\cdot 512) = 34MB\\), which is as large as the file system itself, which means we are fine now.\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003esizing\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003esmall: \\(512\\) bytes per block, and \\(8\\) block storable, so \\(8 \\cdot 512 = 4096\\) bytes\u003c/li\u003e\n\u003cli\u003elarge: \\(512\\) bytes per block pointed to by i_addr, each containing \\(\\frac{512}{2} = 256\\) block numbers. The first seven in total would therefore address \\(256 \\times 7 = 1792\\) blocks of memory. The last eight would each address \\(256 \\cdot 256 = 65536\\) blocks of memory. In total, that addresses \\(1792+65536 = 67328\\) blocks of memory. Finally, that means we can address \\(67328 \\cdot 512 = 34471936\\) bytes.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"dir\"\u003edir\u003c/h4\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estruct\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edirent\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003euint16_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed_inumber\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// inode number of this file\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed_name\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e14\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// the name; *NOT NECESSARILY NULL TERMINATED*\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003cstrong\u003eTHE NAME MAY NOT BE NULL TERMINATED\u003c/strong\u003e to cram max things. You have to use \u003cstrong\u003estrncmp\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003estrcmp/strncmp\u003c/strong\u003e: stops comparing after \\(n\\) characters; \u0026lt;0 if str1 comes before str2 alphabetically; \u0026gt;0 if str1 comes after str2; 0 if equal\u003c/p\u003e\n\u003cp\u003eStart at the root directory, \u003ccode\u003e/\u003c/code\u003e. We want to go to the root directory, and find the entry named \u003ccode\u003e/classes/\u003c/code\u003e, and then, in that directory, find the file. etc. Traverse recursively. Directory could have metadata.\u003c/p\u003e\n\u003cp\u003eA directory is basically just a \u003cstrong\u003efile whose payload is a list of \u003ccode\u003edirent\u003c/code\u003e\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eThe inode tells you whether something is a file or a directory. They can be small or large, as usual. Root directory always have inode number \u003ccode\u003e1\u003c/code\u003e; \u003ccode\u003e0\u003c/code\u003e is reserved to NULL.\u003c/p\u003e\n\u003ch4 id=\"file\"\u003efile\u003c/h4\u003e\n\u003cp\u003eRecall that \u003ccode\u003eread\u003c/code\u003e doesn\u0026rsquo;t read the whole thing. So, we it in parts.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ecopyContents\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esourceFD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edestinationFD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebuffer\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eINCREMENT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003essize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebytesRead\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esourceFD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebuffer\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esizeof\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebuffer\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e));\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebytesRead\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ebreak\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebytesWritten\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebytesWritten\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebytesRead\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003essize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecount\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewrite\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edestinationFD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebuffer\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebytesWritten\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ebytesRead\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebytesWritten\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ebytesWritten\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecount\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eopen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003econst\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epathname\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eflags\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFlags are a bitwise OR operations: you have to open with \u003ccode\u003eO_RDONLY\u003c/code\u003e (read only), \u003ccode\u003eO_WRONLY\u003c/code\u003e (write only), or \u003ccode\u003eO_RDWR\u003c/code\u003e (both read and write). This returns \\(-1\\) if the reading fails.\u003c/p\u003e\n\u003cp\u003eOther flags:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003eO_TRUNC\u003c/code\u003e (truncate file)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003eO_CREAT\u003c/code\u003e (creating a file if not exist), which will require a \u003ccode\u003emode_t mode\u003c/code\u003e parameter to set the permission\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003eO_EXCL\u003c/code\u003e (file must not exist)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"block-cache\"\u003eBlock Cache\u003c/h3\u003e\n\u003cp\u003eWe will use part of the main memory to retain recently-accessed disk \u003cstrong\u003eblocks\u003c/strong\u003e. This is \u003cstrong\u003eNOT\u003c/strong\u003e at the granularity of individual files.\u003c/p\u003e\n\u003ch4 id=\"least-recently-used--lru--cache\"\u003eLeast Recently Used (LRU) Cache\u003c/h4\u003e\n\u003cp\u003eWhen you insert a new element into the cache, kick out the element on the cache that hasn\u0026rsquo;t been touched in the longest time.\u003c/p\u003e\n\u003ch4 id=\"block-cache-modification\"\u003eBlock Cache Modification\u003c/h4\u003e\n\u003cp\u003ewe can either \u003cstrong\u003ewrite asap\u003c/strong\u003e, or \u003cstrong\u003edelay\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003ewrite asap\u003c/strong\u003e\u003c/strong\u003e: \u003cem\u003esafer\u003c/em\u003e: less risk of data loss, written as soon as possible; \u003cem\u003eslow\u003c/em\u003e: program must wait to proceed until disk I/O completes\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003ewrite delay\u003c/strong\u003e\u003c/strong\u003e: \u003cem\u003edangerous\u003c/em\u003e: may loose data after crash; \u003cem\u003eefficient\u003c/em\u003e: memory writes is faster\u003c/p\u003e\n\u003ch2 id=\"crash-recovery\"\u003eCrash Recovery\u003c/h2\u003e\n\u003ch3 id=\"main-challenges\"\u003emain challenges\u003c/h3\u003e\n\u003ch3 id=\"main-designs\"\u003emain designs\u003c/h3\u003e\n\u003ch3 id=\"goal-design\"\u003egoal design\u003c/h3\u003e\n\u003ch3 id=\"implementation\"\u003eimplementation\u003c/h3\u003e\n\u003ch2 id=\"mp\"\u003eMP\u003c/h2\u003e\n\u003ch3 id=\"main-challenges\"\u003emain challenges\u003c/h3\u003e\n\u003ch3 id=\"main-designs\"\u003emain designs\u003c/h3\u003e\n\u003ch3 id=\"goal-design\"\u003egoal design\u003c/h3\u003e\n\u003ch3 id=\"implementation\"\u003eimplementation\u003c/h3\u003e\n\u003ch2 id=\"mt\"\u003eMT\u003c/h2\u003e\n\u003ch3 id=\"main-challenges\"\u003emain challenges\u003c/h3\u003e\n\u003ch3 id=\"main-designs\"\u003emain designs\u003c/h3\u003e\n\u003ch3 id=\"goal-design\"\u003egoal design\u003c/h3\u003e\n\u003ch3 id=\"implementation\"\u003eimplementation\u003c/h3\u003e\n\u003ch2 id=\"virtual-memory\"\u003eVirtual Memory\u003c/h2\u003e\n\u003ch3 id=\"main-challenges\"\u003emain challenges\u003c/h3\u003e\n\u003ch3 id=\"main-designs\"\u003emain designs\u003c/h3\u003e\n\u003ch3 id=\"goal-design\"\u003egoal design\u003c/h3\u003e\n\u003ch3 id=\"implementation\"\u003eimplementation\u003c/h3\u003e\n\u003ch2 id=\"multicore-plus-flash\"\u003eMulticore + Flash\u003c/h2\u003e\n\u003ch3 id=\"main-challenges\"\u003emain challenges\u003c/h3\u003e\n\u003ch3 id=\"main-designs\"\u003emain designs\u003c/h3\u003e\n\u003ch3 id=\"goal-design\"\u003egoal design\u003c/h3\u003e\n\u003ch3 id=\"implementation\"\u003eimplementation\u003c/h3\u003e\n\u003ch2 id=\"ethics\"\u003eEthics\u003c/h2\u003e\n\u003ch3 id=\"main-challenges\"\u003emain challenges\u003c/h3\u003e\n\u003ch3 id=\"main-designs\"\u003emain designs\u003c/h3\u003e\n\u003ch3 id=\"goal-design\"\u003egoal design\u003c/h3\u003e\n\u003ch3 id=\"implementation\"\u003eimplementation\u003c/h3\u003e\n\u003cp\u003eCrash Recovery: tradeoffs, data loss and inconsistency, atomic operations, free list and block cache, fsck, ordered writes, write-ahead logging, transactions, checkpoints, idempotency, durability and consistency\u003c/p\u003e\n\u003cp\u003eMultiprocessing: processes, PIDs, fork, execution order, copy on write, waitpid, zombie processes, execvp, pipes and pipe / pipe2, I/O redirection\u003c/p\u003e\n\u003cp\u003eMultithreading: processes vs. threads, C++ threads and .join(), thread safety, race conditions, atomicity, critical sections, mutexes, deadlock, busy waiting, condition variables, notify_all, unique_lock, monitor pattern; dining philosophers\u003c/p\u003e\n\u003cp\u003eDispatching / Scheduling: Process control blocks, traps and interrupts, context switching, thread state (running / blocked / ready), I/O-bound and CPU-bound threads, scheduling algorithms, first-come-first-serve, round robin, shortest remaining processing time (SRPT), priority-based scheduling, preemption, interrupts, implementing single-core locks and condition variables\u003c/p\u003e\n\u003cp\u003eVirtual memory: single-tasking, process memory, memory sharing goals, load-time relocation, dynamic address translation and MMU, virtual and physical addresses, base and bound, multiple segments, paging, demand paging, page maps, page faults, thrashing, fragmentation, disk swap, page replacement policies, random replacement, FIFO replacement, LRU replacement, clock algorithm, per process vs. global replacement, virtualization.\u003c/p\u003e\n\u003cp\u003eModern technologies: multicore processors (multicore scheduling, work stealing, core affinity, gang scheduling, multicore locks (for multicore locks, just high level ideas about interrupts being insufficient to prevent races, atomic operations, and that busy waiting is necessary)), flash storage (quirks of erase + write operations, wear-out, wear-leveling, flash translation layer high-level idea)\u003c/p\u003e\n\u003cp\u003eEthics and trust: trust and agency, trust by assumption, trust by inference, trust by substitution, agential gullibility, violations of trust, stakeholders, pervasiveness, time.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs111_final_sheet/","tags":null,"title":"SU-CS111 Final Sheet"},{"categories":null,"contents":"KEY IDEAS:\nfilesystems - how do we design filesystems to manage files on disk multiprocessing - how does programs interact with one another, coordinating, etc. multithreading - how can we have single-process concurrency virtual memory - how can one set of memory can be shared among several processes modern technologies - busy waiting locking, Flash Storage, etc. interplay between tech + OS: OS at the hardware, software boundary designing with tradeoffs: not always one \u0026ldquo;best\u0026rdquo; way - evaluating pros/cons/priorities virtualization: make one thing look like something else, or many of them concurrency: synchronization is hard locality: predicting the future (scheduling, paging, block cache, etc.)\u0026mdash;try to estimate the future with priority queues, etc. atomics: collections of operations that make them appear as a single, indivisible operation \u0026mdash; synchronization + file system consistency (log transactions) layering: building higher level abstractions to hide details (monitors, fs layers, file descriptors, etc.) system builders wrangling complexity: solving complex problems with simple interfaces that others can build on (virtual memory, filesystems, etc.) trust: we have to trust something or someone\u0026mdash;evaluating what to trust and how systems can incorporate trust understanding justifies how complex systems work elegant ideas of computing (concurrency, virtualization, etc.) take advantage of hardware and OS software that\u0026rsquo;s available OS aren\u0026rsquo;t standing still: OS changing and encountering new challenges Massive Review\nFS Design filesystems to manage files, what are the tradeoffs in designing them? How can we interact with the filesystem?\nmultiple approaches (continuous allocation, linked files, FAT, multi-level index\u0026mdash;file access, metadata) crash recovery designs file descriptions Why? large system design manipulate files in programs + what is a file design challenges and limitatinos MP How can our program create and interact other programs?\nfork/waitpid/execvp/pipe: coordinating and run other programs and erpcosses process control block information + running processes in any order Why? challenges of concurrency shells! chrome site isolation MT Concurrency within a single process.\ndining philosopher problem and its solution OS\u0026rsquo; tracking of threads (and not processes) to run, and when to switch between them scheduling (round robin, SRPT, priority based scheduling, etc.), preemption, and dispatching Concurrency Management Why? maximally take advantage of hardware through multi cores many applications in modern software (Excel\u0026rsquo;s threads, for instance) understand the behavior of computers\u0026mdash;single core machines may also multi-task! concurrency challenges + synchronization: this is hard Concurrency Management synchronization/race conditions/deadlock\nprocesses and threads creating and dispatching sync primitive and their implementation: mutexes, CVs, monitor pattern scheduling interrupts deadlock races and inconsistency VMem How can one set of memory be shared among several processes? How do we manage access to a limited amount of system memory?\ngives each process isolated virtual address space OS maps what\u0026rsquo;s needed to real physical memory OS can manage physical memory however it wants, including swapping pages to disk Why? virtualization - virtual world does not need to know about the complexities of where to run programmer: we always assume tones of contiguous memory thrashing, swapping, etc. Modern Technologies How do hardware impact design of OSes?\nmulti-core scheduling + locks how to schedule multi-core threads\u0026mdash;Gang Scheduling + Work Stealing + Core Affinity locking between cores: busy waiting and atomics flash-storage: impacts on file systems with wear-out and Flash Translation Layer Why? OSes sitting at software-hardware boundary: system changes can change OSes Can more fully understand how modern technologies impact our devices\u0026mdash;we can understanding their impact at the OS level Ethics and Trust Who/what do we trust, how do we decided, what do we do when the thrust is not upheald, how gcan we factor trust?\nagential gullibility privacy + trust pathways to trust accountability, stakeholder Why OS has extreme scale: high amount of trust we must trust some things, improtant to reflect what we trust and what we value reflect on what to do when trust is violated, how can we incorporate considerations of trust into what we build Next Steps ","html":"\u003cp\u003e\u003cstrong\u003eKEY IDEAS\u003c/strong\u003e:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfilesystem/\"\u003efilesystem\u003c/a\u003es - how do we design filesystems to manage files on disk\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiprocessing/\"\u003emultiprocessing\u003c/a\u003e - how does programs interact with one another, coordinating, etc.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultithreading/\"\u003emultithreading\u003c/a\u003e - how can we have single-process concurrency\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvirtual_memory/\"\u003evirtual memory\u003c/a\u003e - how can one set of memory can be shared among several processes\u003c/li\u003e\n\u003cli\u003emodern technologies - \u003ca href=\"/posts/kbhpermits_model/\"\u003ebusy waiting\u003c/a\u003e locking, \u003ca href=\"/posts/kbhmodern_os/#flash-storage\"\u003eFlash Storage\u003c/a\u003e, etc.\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003einterplay between tech + OS\u003c/strong\u003e: OS at the hardware, software boundary\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003edesigning with tradeoffs\u003c/strong\u003e: not always one \u0026ldquo;best\u0026rdquo; way - evaluating pros/cons/priorities\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003evirtualization\u003c/strong\u003e: make one thing look like something else, or many of them\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003econcurrency\u003c/strong\u003e: synchronization is hard\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003elocality\u003c/strong\u003e: predicting the future (scheduling, paging, block cache, etc.)\u0026mdash;try to estimate the future with priority queues, etc.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eatomics\u003c/strong\u003e: collections of operations that make them appear as a single, indivisible operation \u0026mdash; synchronization + file system consistency (log transactions)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003elayering\u003c/strong\u003e: building higher level abstractions to hide details (monitors, fs layers, file descriptors, etc.)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003esystem builders wrangling complexity\u003c/strong\u003e: solving complex problems with simple interfaces that others can build on (virtual memory, filesystems, etc.)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003etrust\u003c/strong\u003e: we have to trust \u003cem\u003esomething\u003c/em\u003e or \u003cem\u003esomeone\u003c/em\u003e\u0026mdash;evaluating what to trust and how systems can incorporate trust\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003col\u003e\n\u003cli\u003eunderstanding justifies how complex systems work\u003c/li\u003e\n\u003cli\u003eelegant ideas of computing (\u003cstrong\u003econcurrency\u003c/strong\u003e, \u003cstrong\u003evirtualization\u003c/strong\u003e, etc.)\u003c/li\u003e\n\u003cli\u003etake advantage of hardware and OS software that\u0026rsquo;s available\u003c/li\u003e\n\u003cli\u003eOS aren\u0026rsquo;t standing still: OS changing and encountering new challenges\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003cp\u003eMassive Review\u003c/p\u003e\n\u003ch2 id=\"fs\"\u003eFS\u003c/h2\u003e\n\u003cp\u003eDesign filesystems to manage files, what are the tradeoffs in designing them? How can we interact with the filesystem?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003emultiple approaches (continuous allocation, linked files, FAT, multi-level index\u0026mdash;file access, metadata)\u003c/li\u003e\n\u003cli\u003ecrash recovery designs\u003c/li\u003e\n\u003cli\u003efile descriptions\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"why\"\u003eWhy?\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003elarge system design\u003c/li\u003e\n\u003cli\u003emanipulate files in programs + what is a file\u003c/li\u003e\n\u003cli\u003edesign challenges and limitatinos\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"mp\"\u003eMP\u003c/h2\u003e\n\u003cp\u003eHow can our program create and interact other programs?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003efork/waitpid/execvp/pipe\u003c/strong\u003e: coordinating and run other programs and erpcosses\u003c/li\u003e\n\u003cli\u003eprocess control block information + running processes in any order\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"why\"\u003eWhy?\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003echallenges of concurrency\u003c/li\u003e\n\u003cli\u003eshells!\u003c/li\u003e\n\u003cli\u003echrome site isolation\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"mt\"\u003eMT\u003c/h2\u003e\n\u003cp\u003eConcurrency within a single process.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003edining philosopher problem and its solution\u003c/li\u003e\n\u003cli\u003eOS\u0026rsquo; tracking of threads (and not processes) to run, and when to switch between them\u003c/li\u003e\n\u003cli\u003escheduling (round robin, SRPT, \u003ca href=\"/posts/kbhscheduling/#priority-based-scheduling\"\u003epriority based scheduling\u003c/a\u003e, etc.), preemption, and dispatching\u003c/li\u003e\n\u003cli\u003eConcurrency Management\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"why\"\u003eWhy?\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003emaximally take advantage of hardware through multi cores\u003c/li\u003e\n\u003cli\u003emany applications in modern software (Excel\u0026rsquo;s threads, for instance)\u003c/li\u003e\n\u003cli\u003eunderstand the behavior of computers\u0026mdash;single core machines may also multi-task!\u003c/li\u003e\n\u003cli\u003econcurrency challenges + synchronization: this is \u003cstrong\u003ehard\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"concurrency-management\"\u003eConcurrency Management\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003esynchronization/race conditions/deadlock\u003c/strong\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eprocesses and threads\u003c/li\u003e\n\u003cli\u003ecreating and dispatching\u003c/li\u003e\n\u003cli\u003esync primitive and their implementation: mutexes, CVs, monitor pattern\u003c/li\u003e\n\u003cli\u003escheduling\u003c/li\u003e\n\u003cli\u003einterrupts\u003c/li\u003e\n\u003cli\u003edeadlock\u003c/li\u003e\n\u003cli\u003eraces and inconsistency\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"vmem\"\u003eVMem\u003c/h2\u003e\n\u003cp\u003eHow can one set of memory be shared among several processes? How do we manage access to a limited amount of system memory?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003egives each process isolated virtual address space\u003c/li\u003e\n\u003cli\u003eOS maps what\u0026rsquo;s needed to real physical memory\u003c/li\u003e\n\u003cli\u003eOS can manage physical memory however it wants, including swapping pages to disk\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"why\"\u003eWhy?\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003evirtualization\u003c/strong\u003e - virtual world does not need to know about the complexities of where to run\n\u003cul\u003e\n\u003cli\u003eprogrammer: we always assume tones of contiguous memory\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ethrashing, swapping, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"modern-technologies\"\u003eModern Technologies\u003c/h2\u003e\n\u003cp\u003eHow do hardware impact design of OSes?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003emulti-core scheduling + locks\n\u003cul\u003e\n\u003cli\u003ehow to schedule multi-core threads\u0026mdash;\u003ca href=\"/posts/kbhmodern_os/#gang-scheduling\"\u003eGang Scheduling\u003c/a\u003e + \u003ca href=\"/posts/kbhmodern_os/#work-stealing\"\u003eWork Stealing\u003c/a\u003e + \u003ca href=\"/posts/kbhmodern_os/#core-affinity\"\u003eCore Affinity\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003elocking between cores: busy waiting and atomics\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eflash-storage: impacts on file systems with wear-out and \u003ca href=\"/posts/kbhmodern_os/#flash-storage\"\u003eFlash Translation Layer\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"why\"\u003eWhy?\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eOSes sitting at software-hardware boundary: system changes can change OSes\u003c/li\u003e\n\u003cli\u003eCan more fully understand how modern technologies impact our devices\u0026mdash;we can understanding their impact at the OS level\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"ethics-and-trust\"\u003eEthics and Trust\u003c/h2\u003e\n\u003cp\u003eWho/what do we trust, how do we decided, what do we do when the thrust is not upheald, how gcan we factor trust?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/#trust\"\u003eagential gullibility\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/\"\u003eprivacy\u003c/a\u003e + \u003ca href=\"/posts/kbhprivacy/#trust\"\u003etrust\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/#pathways-to-trust\"\u003epathways to trust\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/#accountability\"\u003eaccountability\u003c/a\u003e, \u003ca href=\"/posts/kbhprivacy/#stakeholder\"\u003estakeholder\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"why\"\u003eWhy\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eOS has extreme \u003cstrong\u003escale\u003c/strong\u003e: high amount of trust\u003c/li\u003e\n\u003cli\u003ewe must trust \u003cstrong\u003esome\u003c/strong\u003e things, improtant to reflect what we trust and what we value\u003c/li\u003e\n\u003cli\u003ereflect on what to do when trust is violated, how can we incorporate considerations of trust into what we build\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"next-steps\"\u003eNext Steps\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-13_14-03-04_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs111_outline/","tags":null,"title":"SU-CS111 Outline"},{"categories":null,"contents":"Key Sequence Notation New Concepts Optimal Stopping Problem reinforcement learning model-based reinforcement learning model-free reinforcement learning Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoptimal_stopping_problem/\"\u003eOptimal Stopping Problem\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhreinforcement_learning/\"\u003ereinforcement learning\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodel_based_reinforcement_learning/\"\u003emodel-based reinforcement learning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/\"\u003emodel-free reinforcement learning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_nov022023/","tags":null,"title":"SU-CS238 NOV022023"},{"categories":null,"contents":"Key Sequence Notation New Concepts POMDP belief observation model error model discrete state filter Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbelief/#observation-model\"\u003eobservation model\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbelief/#error-model\"\u003eerror model\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbelief/#discrete-state-filter\"\u003ediscrete state filter\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_nov092023/","tags":null,"title":"SU-CS238 NOV092023"},{"categories":null,"contents":"Key Sequence Notation New Concepts belief filters Kalman Filter Particle Filter POMDP belief-state MDP conditional plan alpha vector Important Results / Claims conditional plan evaluation Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfilters/\"\u003efilter\u003c/a\u003es\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfilters/#kalman-filter\"\u003eKalman Filter\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfilters/#particle-filter\"\u003eParticle Filter\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbelief_state_mdp/\"\u003ebelief-state MDP\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhconditional_plan/#conditional-plan--kbhconditional-plan-dot-md--evaluation\"\u003econditional plan evaluation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_nov142023/","tags":null,"title":"SU-CS238 NOV142023"},{"categories":null,"contents":"Key Sequence Notation New Concepts belief-state MDP optimal value function for POMDP with alpha vector one-step lookahead in POMDP alpha vector pruning Important Results / Claims POMDP value-iteration Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbelief_state_mdp/\"\u003ebelief-state MDP\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhalpha_vector/#with\"\u003eoptimal value function for POMDP with alpha vector\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhalpha_vector/#one-step-lookahead-in-pomdp\"\u003eone-step lookahead in POMDP\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhalpha_vector/#id-a11af4cf-7e36-4b3f-876f-e6a26cf6817e-alpha-vector-pruning\"\u003ealpha vector pruning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_iteration/#value-iteration\"\u003ePOMDP value-iteration\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_nov162023/","tags":null,"title":"SU-CS238 NOV162023"},{"categories":null,"contents":"Key Sequence Notation New Concepts POMDP Approximation lower bound BAWS blind lower bound Point-Based Value Iteration Randomized PBVI Online POMDP Methods Important Results / Claims point selection Questions how do you use alpha-vectors in Rollout with Lookahead? forward search? in a sense: how do you get the value function what is best-action worst-state reward converting into alpha vectors? bandit quiz question Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpomdp_approximation/\"\u003ePOMDP Approximation lower bound\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhworst_possible_state/\"\u003eBAWS\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhblind_lower_bound/\"\u003eblind lower bound\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePoint-Based Value Iteration\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpoint_based_value_iteration/#randomized-id-ab745ce0-4282-44bc-91ab-823458060df7-pbvi\"\u003eRandomized PBVI\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhonline_pomdp_methods/\"\u003eOnline POMDP Methods\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpoint_selection/\"\u003epoint selection\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ehow do you use alpha-vectors in \u003ca href=\"/posts/kbhrollout_with_lookahead/\"\u003eRollout with Lookahead\u003c/a\u003e? forward search? in a sense: how do you get the value function\u003c/li\u003e\n\u003cli\u003ewhat is \u003ca href=\"/posts/kbhworst_possible_state/\"\u003ebest-action worst-state\u003c/a\u003e reward converting into alpha vectors?\u003c/li\u003e\n\u003cli\u003ebandit quiz question\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_nov282023/","tags":null,"title":"SU-CS238 NOV282023"},{"categories":null,"contents":"Key Sequence Notation New Concepts controller finite state controller multiagent reasoning prisoner\u0026rsquo;s dilemma traveler\u0026rsquo;s dilemma response model Dominant Strategy Equilibrium Nash Equilibrium Important Results / Claims finite state controller evaluation joint policy agent utility Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcontroller/\"\u003econtroller\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcontroller/#finite-state-controller\"\u003efinite state controller\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiagent_reasoning/\"\u003emultiagent reasoning\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiagent_reasoning/#prisoner-s-dilemma\"\u003eprisoner\u0026rsquo;s dilemma\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiagent_reasoning/#traveler-s-dilemma\"\u003etraveler\u0026rsquo;s dilemma\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiagent_reasoning/#response-model\"\u003eresponse model\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiagent_reasoning/#dominant-strategy-equilibrium\"\u003eDominant Strategy Equilibrium\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiagent_reasoning/#nash-equilibrium\"\u003eNash Equilibrium\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcontroller/#finite-state-controller-evaluation\"\u003efinite state controller evaluation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiagent_reasoning/#joint-policy-agent-utility\"\u003ejoint policy agent utility\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_nov302023/","tags":null,"title":"SU-CS238 NOV302023"},{"categories":null,"contents":"Key Sequence Notation New Concepts Bayes Net + conditional independence Baysian Net inference factor factor operations factor product factor marginalization factor conditioning Naive Bayes + inference with Naive Bayes Direct Sampling and Likelihood Weighted Sampling Important Results / Claims parameter checking for conditional independence sum-product elimination Direct Sampling a Baysian Network Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_network/\"\u003eBayes Net\u003c/a\u003e + \u003ca href=\"/posts/kbhbaysian_network/#conditional-independence\"\u003econditional independence\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinference/\"\u003eBaysian Net inference\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfactor/\"\u003efactor\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfactor/#factor-operations\"\u003efactor operations\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfactor/#factor-product\"\u003efactor product\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfactor/#factor-marginalization\"\u003efactor marginalization\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfactor/#factor-conditioning\"\u003efactor conditioning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e + \u003ca href=\"/posts/kbhnaive_bayes/#inference--kbhinference-dot-md--with-naive-bayes--kbhnaive-bayes-dot-md\"\u003einference with Naive Bayes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e and \u003ca href=\"/posts/kbhdirect_sampling/#likelihood-weighted-sampling\"\u003eLikelihood Weighted Sampling\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_network/#checking-for-conditional-independence\"\u003echecking for conditional independence\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinference/#sum-product-elimination\"\u003esum-product elimination\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirect_sampling/#direct-sampling-a-id-5eaa4b96-cbc2-4811-91c7-88ea2e164fc3-baysian-network\"\u003eDirect Sampling a Baysian Network\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_oct032023/","tags":null,"title":"SU-CS238 OCT032023"},{"categories":null,"contents":"Key Sequence Notation New Concepts inference Inference for Gaussian Models approximate inference Direct Sampling Likelihood Weighted Sampling parameter learning Maximum Likelihood Parameter Learning Baysian Parameter Learning Dirichlet Distribution Important Results / Claims \u0026ldquo;there is usually a tradeoff between the computational time you are willing to devote, and th Bayesian Learning on Binary Distributions Questions for Likelihood Weighted Sampling, where do the conditional probability values come from Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinference_for_gaussian_models/\"\u003eInference for Gaussian Models\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhapproximate_inference/\"\u003eapproximate inference\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirect_sampling/#likelihood-weighted-sampling\"\u003eLikelihood Weighted Sampling\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhparameter_learning/\"\u003eparameter learning\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_parameter_learning/\"\u003eBaysian Parameter Learning\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_parameter_learning/#dirichlet-distribution\"\u003eDirichlet Distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;there is usually a tradeoff between the computational time you are willing to devote, and th\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_parameter_learning/#bayesian-parameter-learning-on-binary-distributions\"\u003eBayesian Learning on Binary Distributions\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003efor \u003ca href=\"/posts/kbhdirect_sampling/#likelihood-weighted-sampling\"\u003eLikelihood Weighted Sampling\u003c/a\u003e, where do the conditional probability values come from\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_oct052023/","tags":null,"title":"SU-CS238 OCT052023"},{"categories":null,"contents":"Key Sequence Notation New Concepts Beta Distribution Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_parameter_learning/#non-uniform-prior\"\u003eBeta Distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_oct102023/","tags":null,"title":"SU-CS238 OCT102023"},{"categories":null,"contents":"New Concepts structure learning K2 Algorithm Local Search Markov Equivalence Classes Partially Directed Graph Search utility and Rational Preferences lottery utility elicitation expected utility risk aversion decision network utility functions quadratic utility exponential utility power utility and its special case log utility maximum expected utility principle value of information Important Results / Claims checking Markov Equivalence utility of Rational Preference von Neumann and Morgenstern Axioms: Axioms for checking rationality never have a utility function that\u0026rsquo;s infinite utility of a lottery process of observation selection Questions ","html":"\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstructure_learning/\"\u003estructure learning\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstructure_learning/#k2-algorithm\"\u003eK2 Algorithm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstructure_learning/#local-search\"\u003eLocal Search\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmarkov_equivalence_classes/\"\u003eMarkov Equivalence Classes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstructure_learning/#partially-directed-graph-search\"\u003ePartially Directed Graph Search\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e and \u003ca href=\"/posts/kbhrational_preference/\"\u003eRational Preferences\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlottery/\"\u003elottery\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_elicitation/\"\u003eutility elicitation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_theory/#expected-utility\"\u003eexpected utility\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexpected_utility_of_wealth/\"\u003erisk aversion\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdecision_networks/\"\u003edecision network\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_function/\"\u003eutility function\u003c/a\u003es\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_function/#quadratic-utility\"\u003equadratic utility\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_function/#exponential-utility\"\u003eexponential utility\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpower_utility/\"\u003epower utility\u003c/a\u003e and its special case \u003ca href=\"/posts/kbhpower_utility/#log-utility\"\u003elog utility\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_theory/#maximum-expected-utility-principle\"\u003emaximum expected utility principle\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_of_information/\"\u003evalue of information\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmarkov_equivalence_classes/#checking-markov-equivalence--kbhmarkov-equivalence-classes-dot-md\"\u003echecking Markov Equivalence\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_theory/#utility-of-id-3ed9f842-7fa3-4244-ab09-58b088b9c27e-rational-preference\"\u003eutility of Rational Preference\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrational_preference/#von-neumann-and-morgenstern-axioms\"\u003evon Neumann and Morgenstern Axioms\u003c/a\u003e: Axioms for checking rationality\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_theory/#never-have-a-utility-function-that-s-infinite\"\u003enever have a utility function that\u0026rsquo;s infinite\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlottery/#utility-of-a-lottery\"\u003eutility of a lottery\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_of_information/#process-of-observation-selection\"\u003eprocess of observation selection\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_oct122023/","tags":null,"title":"SU-CS238 OCT122023"},{"categories":null,"contents":"Notation \u0026ldquo;state variables\u0026rdquo; represent the contents of the state; \u0026ldquo;state\u0026rdquo; is a complete assignment of state variables.\nNew Concepts Markov Decision Process stationary Markov Decision Process finite-horizon models + infinite-horizon models policy stationary policy optimal policy and optimal value function policy evaluation and policy iteration lookahead equation Bellman Expectation Equation action-value function and value-function policy advantage function Important Results / Claims policy evaluation methods solving for the utility of a policy finding the best policy policy iteration Questions why is it d seperated Interesting Factoids ","html":"\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;state variables\u0026rdquo; represent the contents of the state; \u0026ldquo;state\u0026rdquo; is a complete assignment of state variables.\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMarkov Decision Process\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmarkov_decision_process/#stationary-markov-decision-process--kbhmarkov-decision-process-dot-md\"\u003estationary Markov Decision Process\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmarkov_decision_process/#finite-horizon-models\"\u003efinite-horizon models\u003c/a\u003e + \u003ca href=\"/posts/kbhmarkov_decision_process/#infinite-horizon-models\"\u003einfinite-horizon models\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy/#stationary-policy--kbhpolicy-dot-md\"\u003estationary policy\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy/#optimal-policy\"\u003eoptimal policy\u003c/a\u003e and \u003ca href=\"/posts/kbhpolicy/#optimal-policy\"\u003eoptimal value function\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e and \u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/#lookahead-equation\"\u003elookahead equation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/#bellman-expectation-equation\"\u003eBellman Expectation Equation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/#action-value-function\"\u003eaction-value function\u003c/a\u003e and \u003ca href=\"/posts/kbhpolicy_evaluation/#value-function-policy\"\u003evalue-function policy\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadvantage_function/\"\u003eadvantage function\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e methods\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/#solving-for-the-utility-of-a-policy\"\u003esolving for the utility of a policy\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/#value-function-policy\"\u003efinding the best policy\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewhy is it d seperated\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_oct172023/","tags":null,"title":"SU-CS238 OCT172023"},{"categories":null,"contents":"Key Sequence Notation New Concepts Markov Decision Process value iteration Bellman Residual for continuous state spaces: Approximate Value Function use global approximation or local approximation methods Important Results / Claims policy and utility creating a good utility function / policy from instantaneous rewards: either policy evaluation or value iteration creating a policy from a utility function: value-function policy (\u0026ldquo;choose the policy that takes the best valued action\u0026rdquo;) calculating the utility function a policy currently uses: use policy evaluation kernel smoothing value iteration, in practice Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMarkov Decision Process\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_iteration/#bellman-residual\"\u003eBellman Residual\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003efor \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e state spaces: \u003ca href=\"/posts/kbhapproximate_value_function/\"\u003eApproximate Value Function\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003euse \u003ca href=\"/posts/kbhapproximate_value_function/#global-approximation\"\u003eglobal approximation\u003c/a\u003e or \u003ca href=\"/posts/kbhapproximate_value_function/#local-approximation\"\u003elocal approximation\u003c/a\u003e methods\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003epolicy and utility\n\u003cul\u003e\n\u003cli\u003ecreating a good \u003ca href=\"/posts/kbhutility_function/\"\u003eutility function\u003c/a\u003e / \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e from instantaneous rewards: either \u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e or \u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ecreating a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e from a \u003ca href=\"/posts/kbhutility_function/\"\u003eutility function\u003c/a\u003e: \u003ca href=\"/posts/kbhaction_value_function/#value-function-policy\"\u003evalue-function policy\u003c/a\u003e (\u0026ldquo;choose the policy that takes the best valued action\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003ecalculating the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility function\u003c/a\u003e a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e currently uses: use \u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhkernel_smoothing/\"\u003ekernel smoothing\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_iteration_in_practice/\"\u003evalue iteration, in practice\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_oct192023/","tags":null,"title":"SU-CS238 OCT192023"},{"categories":null,"contents":"Key Sequence Notation New Concepts global approximation online planning Rollout with Lookahead Forward Search Branch and Bound monte-carlo tree search open loop planning Important Results / Claims Rollout Policy monte-carlo exploration open-loop planning vs close-loop planning Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhapproximate_value_function/#global-approximation\"\u003eglobal approximation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhonline_planning/\"\u003eonline planning\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrollout_with_lookahead/\"\u003eRollout with Lookahead\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhforward_search/\"\u003eForward Search\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbranch_and_bound/\"\u003eBranch and Bound\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003emonte-carlo tree search\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhonline_planning/#open-loop-planning\"\u003eopen loop planning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout-policy\"\u003eRollout Policy\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmonte_carlo_tree_search/#monte-carlo-exploration\"\u003emonte-carlo exploration\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhonline_planning/#open-loop-planning-vs-close-loop-planning\"\u003eopen-loop planning vs close-loop planning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_oct242023/","tags":null,"title":"SU-CS238 OCT242023"},{"categories":null,"contents":"Big day. Policy Gradient.\nNew Concepts Approximate Policy Evaluation and Roll-out utility Policy Optimization methods: Local Policy Search (aka Hooke-Jeeves Policy Search) Genetic Policy Search Cross Entropy Method Policy Gradient, Regression Gradient and Likelyhood Ratio Gradient Reward-to-Go Important Results / Claims monte-carlo policy evaluation Finite-Difference Gradient Estimation Linear Regression Gradient Estimate Questions for next office hour ","html":"\u003cp\u003eBig day. \u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/#approximate-policy-evaluation\"\u003eApproximate Policy Evaluation\u003c/a\u003e and \u003ca href=\"/posts/kbhpolicy_evaluation/#roll-out-utility\"\u003eRoll-out utility\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_optimization/\"\u003ePolicy Optimization\u003c/a\u003e methods:\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlocal_policy_search/\"\u003eLocal Policy Search\u003c/a\u003e (aka \u003ca href=\"/posts/kbhlocal_policy_search/\"\u003eHooke-Jeeves Policy Search\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgenetic_policy_search/\"\u003eGenetic Policy Search\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcross_entropy_method/#cross-entropy-method\"\u003eCross Entropy Method\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e, \u003ca href=\"/posts/kbhpolicy_gradient/#linear-regression-gradient-estimate\"\u003eRegression Gradient\u003c/a\u003e and \u003ca href=\"/posts/kbhpolicy_gradient/#likelyhood-ratio-gradient--kbhpolicy-gradient-dot-md\"\u003eLikelyhood Ratio Gradient\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_gradient/#reward-to-go\"\u003eReward-to-Go\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/#monte-carlo-policy-evaluation\"\u003emonte-carlo policy evaluation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_gradient/#finite-difference-gradient-estimation\"\u003eFinite-Difference Gradient Estimation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_gradient/#linear-regression-gradient-estimate\"\u003eLinear Regression Gradient Estimate\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-next-office-hour\"\u003eQuestions for next office hour\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_oct262023/","tags":null,"title":"SU-CS238 OCT262023"},{"categories":null,"contents":"Key Sequence Notation New Concepts Restricted Gradient and Actor-Critic Exploration and Exploitation with Binary Bandit Undirected Exploration (Explore-then-commit) Directed Exploration (Softmax Method, Quantile Exploration, UCB 1, Posterior Sampling) Important Results / Claims Bayesian Model Estimation and greedy action epsilon-greedy exploration with decay Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_gradient/#restricted-gradient\"\u003eRestricted Gradient\u003c/a\u003e and \u003ca href=\"/posts/kbhactor_critic/\"\u003eActor-Critic\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexploration_and_exploitation/\"\u003eExploration and Exploitation\u003c/a\u003e with \u003ca href=\"/posts/kbhexploration_and_exploitation/#binary-bandit\"\u003eBinary Bandit\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhundirected_exploration/\"\u003eUndirected Exploration\u003c/a\u003e (\u003ca href=\"/posts/kbhundirected_exploration/#explore-then-commit\"\u003eExplore-then-commit\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirected_exploration/\"\u003eDirected Exploration\u003c/a\u003e (\u003ca href=\"/posts/kbhdirected_exploration/#softmax-method\"\u003eSoftmax Method\u003c/a\u003e, \u003ca href=\"/posts/kbhdirected_exploration/#quantile-exploration\"\u003eQuantile Exploration\u003c/a\u003e, \u003ca href=\"/posts/kbhdirected_exploration/#ucb-1\"\u003eUCB 1\u003c/a\u003e, \u003ca href=\"/posts/kbhdirected_exploration/#posterior-sampling\"\u003ePosterior Sampling\u003c/a\u003e)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexploration_and_exploitation/#bayesian-model-estimation\"\u003eBayesian Model Estimation\u003c/a\u003e and \u003ca href=\"/posts/kbhexploration_and_exploitation/#bayesian-model-estimation\"\u003egreedy action\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhundirected_exploration/#epsilon-greedy-exploration-with-decay\"\u003eepsilon-greedy exploration with decay\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_oct212023/","tags":null,"title":"SU-CS238 OCT312023"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_q0q3/","tags":null,"title":"SU-CS238 Q0Q3"},{"categories":null,"contents":"See Double Envelope Problem\nKey Sequence we introduced Decision Making, models of decision making and gave some examples we introduced different types of uncertainty in the reading, we introduced the Course Outline New Definitions Decision Making uncertainty Questions for Jana how are planning methods different from explicit programming methods? \u0026ldquo;Sequential Decision Process\u0026rdquo; Partially observable Markov decision process (POMDP) ","html":"\u003cp\u003eSee \u003ca href=\"/posts/kbhdouble_envelope_problem/#double-envelope-problem\"\u003eDouble Envelope Problem\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe introduced \u003ca href=\"/posts/kbhdecision_making/\"\u003eDecision Making\u003c/a\u003e, \u003ca href=\"/posts/kbhdecision_making/#id-9075c44f-4b26-4f5a-9236-bc7a5c10f4ee-decision-making-methods\"\u003emodels of decision making\u003c/a\u003e and gave some examples\u003c/li\u003e\n\u003cli\u003ewe introduced different \u003ca href=\"/posts/kbhuncertainty/\"\u003etypes of uncertainty\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ein the reading, we introduced the \u003ca href=\"/posts/kbhdecision_making_index/#course-outline\"\u003eCourse Outline\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdecision_making/\"\u003eDecision Making\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhuncertainty/\"\u003euncertainty\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ehow are \u003ca href=\"/posts/kbhplanning/\"\u003eplanning\u003c/a\u003e methods different from \u003ca href=\"/posts/kbhexplicit_programming/\"\u003eexplicit programming\u003c/a\u003e methods?\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Sequential Decision Process\u0026rdquo;\u003c/li\u003e\n\u003cli\u003ePartially observable Markov decision process (POMDP)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_sep262023/","tags":null,"title":"SU-CS238 SEP262023"},{"categories":null,"contents":"definitions probability and random variable uniform distribution Gaussian distribution probability distributions conditional probability Bayes Theorem results axiom of probability support = range\n","html":"\u003ch2 id=\"definitions\"\u003edefinitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e and \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#uniform-distribution\"\u003euniform distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#gaussian-distribution\"\u003eGaussian distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/\"\u003eprobability distributions\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#bayes-theorem\"\u003eBayes Theorem\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results\"\u003eresults\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#axiom-of-probability\"\u003eaxiom of probability\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003esupport = range\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_sep272023/","tags":null,"title":"SU-CS238 SEP272023"},{"categories":null,"contents":"Notation shorthand for probability Take\n\\begin{equation} P(X = 1) = \\frac{1}{6} \\end{equation}\nWe can write this in short hand like:\n\\begin{equation} P(X^{1}) = P(X=1) = \\frac{1}{6} \\end{equation}\n\\(P\\) vs \\(p\\) Upper case \\(P\\) for probability mass function (one shot chance), lower case \\(p\\) for probability density functions (integral)\nNew Concepts degrees of belief and describing them using the language of probability discrete distribution and continuous distribution and joint probability distribution important tools: parameters of a distribution probability density functions cumulative distribution function quantile function fun probability distributions Gaussian distribution + Truncated Gaussian distribution uniform distribution conditional probability and Bayes Theorem unique models that leverage conditional probability conditional Gaussian models linear gaussian model conditional linear Gaussian models: use your big brain to add up 1) and 2), with continuous random variables \\(X, Y\\), and a discrete \\(Z\\), where \\(p(x \\mid y, z)\\). sigmoid model Baysian Network and conditional independence d seperation Important Results / Claims history and impact of decision making law of total probability fun axioms belief axioms: universal comparability transitivity probability axioms: axiom of probability Methods of Compressing the Parameters of a Distribution assuming independence using a decision tree checking for conditional independence Questions ","html":"\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch3 id=\"shorthand-for-probability\"\u003eshorthand for probability\u003c/h3\u003e\n\u003cp\u003eTake\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X = 1) = \\frac{1}{6}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can write this in short hand like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X^{1}) = P(X=1) = \\frac{1}{6}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"p-vs-p\"\u003e\\(P\\) vs \\(p\\)\u003c/h3\u003e\n\u003cp\u003eUpper case \\(P\\) for \u003ca href=\"/posts/kbhprobability_distributions/#probability-mass-function\"\u003eprobability mass function\u003c/a\u003e (one shot chance), lower case \\(p\\) for \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-functions\"\u003eprobability density functions\u003c/a\u003e (integral)\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_theory/\"\u003edegrees of belief\u003c/a\u003e and \u003ca href=\"/posts/kbhprobability_theory/#language-of-probability\"\u003edescribing them using the language of probability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiscrete_distribution/\"\u003ediscrete distribution\u003c/a\u003e and \u003ca href=\"/posts/kbhcontinuous_distribution/\"\u003econtinuous distribution\u003c/a\u003e and \u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eimportant tools\u003c/strong\u003e:\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhparameter/\"\u003eparameters of a distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#probability-density-functions\"\u003eprobability density functions\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#cumulative-distribution-function\"\u003ecumulative distribution function\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#quantile-function\"\u003equantile function\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003efun \u003ca href=\"/posts/kbhprobability_distributions/\"\u003eprobability distributions\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#gaussian-distribution\"\u003eGaussian distribution\u003c/a\u003e + \u003ca href=\"/posts/kbhprobability_distributions/#truncated-gaussian-distribution\"\u003eTruncated Gaussian distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#uniform-distribution\"\u003euniform distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e and \u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes Theorem\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003eunique models that leverage \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhconditional_gaussian_models/\"\u003econditional Gaussian models\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_gaussian_model/\"\u003elinear gaussian model\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003econditional linear Gaussian models: use your big brain to add up 1) and 2), with continuous \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es \\(X, Y\\), and a discrete \\(Z\\), where \\(p(x \\mid y, z)\\).\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid model\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e and \u003ca href=\"/posts/kbhbaysian_network/#conditional-independence\"\u003econditional independence\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_network/#checking-for-conditional-independence\"\u003ed seperation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdecision_making_history/\"\u003ehistory and impact of decision making\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#law-of-total-probability\"\u003elaw of total probability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003efun axioms\n\u003cul\u003e\n\u003cli\u003ebelief axioms:\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_theory/#universal-comparability\"\u003euniversal comparability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_theory/#transitivity\"\u003etransitivity\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eprobability axioms:\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#axiom-of-probability\"\u003eaxiom of probability\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#methods-of-compressing-the-parameters-of-a-distribution\"\u003eMethods of Compressing the Parameters of a Distribution\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003eassuming \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependence\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eusing a \u003ca href=\"/posts/kbhprobability_distributions/#decision-tree\"\u003edecision tree\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_network/#checking-for-conditional-independence\"\u003echecking for conditional independence\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_sep282023/","tags":null,"title":"SU-CS238 SEP282023"},{"categories":null,"contents":"Tips ","html":"\u003ch2 id=\"tips\"\u003eTips\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs239_jan092023/","tags":null,"title":"SU-CS239 JAN092023"},{"categories":null,"contents":"Of course I\u0026rsquo;m not committing my midterm.\n","html":"\u003cp\u003eOf course I\u0026rsquo;m not committing my midterm.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs239_midterm_1/","tags":null,"title":"SU-CS239 Midterm 1"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsu_math109_problem_set_1/","tags":null,"title":"SU-MATH109 Problem Set 1"},{"categories":null,"contents":"Key Sequence New Definitions division + division algorithm greatest common divisor prime Euclidean Algorithm Results and Their Proofs principle of induction primes there are infinitely many primes division and greatest common divisor division algorithm properties of the gcd Euclidean Algorithm and some euclid lemma linked below in fundamental theorem of arithmetic Questions for Jana Why does something being prime require that \\(p\u0026gt;1\\). that is, why is \\(1\\) not defined as prime? Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdivide/\"\u003edivision\u003c/a\u003e + \u003ca href=\"/posts/kbhdivide/#division-algorithm\"\u003edivision algorithm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgreatest_common_divisor/\"\u003egreatest common divisor\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheuclidean_algorithm/\"\u003eEuclidean Algorithm\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprinciple_of_induction/\"\u003eprinciple of induction\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eprimes\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprime/#there-are-infinitely-many-primes\"\u003ethere are infinitely many primes\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdivide/\"\u003edivision\u003c/a\u003e and \u003ca href=\"/posts/kbhgreatest_common_divisor/\"\u003egreatest common divisor\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdivide/#division-algorithm\"\u003edivision algorithm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgreatest_common_divisor/#properties-of-the-id-0ccf3a9f-e788-4485-b49a-b54906e52fe4-gcd\"\u003eproperties of the gcd\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheuclidean_algorithm/\"\u003eEuclidean Algorithm\u003c/a\u003e and some euclid lemma linked below in \u003ca href=\"/posts/kbhfundamental_theorem_of_arithmetic/\"\u003efundamental theorem of arithmetic\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWhy does something being \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e require that \\(p\u0026gt;1\\). that is, why is \\(1\\) not defined as \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math109_sep272023/","tags":null,"title":"SU-MATH109 SEP272023"},{"categories":null,"contents":"\\begin{equation} \\mathbb{N} = \\{0, 1,2,3 \\dots \\} \\end{equation}\nthe set of natural numbers. start from 0.\n\\begin{equation} \\mathbb{Z} = \\{\\dots, -2, -1, 0,1,2, \\dots \\} \\end{equation}\nthe set of integers. natural language and their negatives\nKey Sequence first, we built the ground work of principle of induction in order to construct the WOP we defined division, and formalized the algorithm for doing so we then defined the greatest common divisor, and the fact that greatest common divisor is a linear combination we then constructed the idea of prime numbers, coprimes, and showed that There are infinitely many primes Finally, we used yet another lemma from Euler to build the fundamental theorem of arithmetic New Definitions division division algorithm greatest common divisor prime coprime Results and Their Proofs principle of induction well-ordering principle There are infinitely many primes division algorithm greatest common divisor is a linear combination fundamental theorem of arithmetic and its factorization motivator lemma ","html":"\u003cp\u003e\\begin{equation}\n\\mathbb{N} = \\{0, 1,2,3 \\dots \\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe set of natural numbers. \u003cstrong\u003estart from 0\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{Z} = \\{\\dots, -2, -1, 0,1,2, \\dots \\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe set of integers. natural language and their negatives\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003efirst, we built the ground work of \u003ca href=\"/posts/kbhprinciple_of_induction/\"\u003eprinciple of induction\u003c/a\u003e in order to construct the \u003ca href=\"/posts/kbhprinciple_of_induction/#well-ordering-principle\"\u003eWOP\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe defined \u003ca href=\"/posts/kbhdivide/\"\u003edivision\u003c/a\u003e, and formalized \u003ca href=\"/posts/kbhdivide/#division-algorithm\"\u003ethe algorithm for doing so\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe then defined the \u003ca href=\"/posts/kbhgreatest_common_divisor/\"\u003egreatest common divisor\u003c/a\u003e, and the fact that \u003ca href=\"/posts/kbhgreatest_common_divisor/#greatest-common-divisor-is-a-linear-combination\"\u003egreatest common divisor is a linear combination\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe then constructed the idea of \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e numbers, \u003ca href=\"/posts/kbhprime/#coprime\"\u003ecoprime\u003c/a\u003es, and showed that \u003ca href=\"/posts/kbhprime/#there-are-infinitely-many-primes\"\u003eThere are infinitely many primes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eFinally, we used \u003ca href=\"/posts/kbhfundamental_theorem_of_arithmetic/#factorization-motivator\"\u003eyet another lemma from Euler\u003c/a\u003e to build the \u003ca href=\"/posts/kbhfundamental_theorem_of_arithmetic/\"\u003efundamental theorem of arithmetic\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdivide/\"\u003edivision\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdivide/#division-algorithm\"\u003edivision algorithm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgreatest_common_divisor/\"\u003egreatest common divisor\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprime/#coprime\"\u003ecoprime\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprinciple_of_induction/\"\u003eprinciple of induction\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprinciple_of_induction/#well-ordering-principle\"\u003ewell-ordering principle\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprime/#there-are-infinitely-many-primes\"\u003eThere are infinitely many primes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdivide/#division-algorithm\"\u003edivision algorithm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgreatest_common_divisor/#greatest-common-divisor-is-a-linear-combination\"\u003egreatest common divisor is a linear combination\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfundamental_theorem_of_arithmetic/\"\u003efundamental theorem of arithmetic\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003eand its \u003ca href=\"/posts/kbhfundamental_theorem_of_arithmetic/#factorization-motivator\"\u003efactorization motivator lemma\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math109_sep272023_exp/","tags":null,"title":"SU-MATH109 SEP272023"},{"categories":null,"contents":"Key Sequence New Definitions modular arithmetic + basic modular arithmetic operations permutation groups left cancellation, right cancellation Results and Their Proofs Chinese Remainder Theorem Questions for Jana why is it that adding all the digits work for \\(\\ \\text{mod}\\ 9\\). I still don\u0026rsquo;t get it. in re Chinese Remainder Theorem: is there any case in which \\(a\\ \\text{mod}\\ b\\) is not unique? Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodular_arithmetic/\"\u003emodular arithmetic\u003c/a\u003e + \u003ca href=\"/posts/kbhmodular_arithmetic/#basic-id-85873ac2-491c-4cef-916b-2b409fac0b47-modular-arithmetic-operations\"\u003ebasic modular arithmetic operations\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpermutation/\"\u003epermutation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003es\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgroup/\"\u003eleft cancellation\u003c/a\u003e, \u003ca href=\"/posts/kbhgroup/\"\u003eright cancellation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodular_arithmetic/#chinese-remainder-theorem\"\u003eChinese Remainder Theorem\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewhy is it that adding all the digits work for \\(\\ \\text{mod}\\ 9\\). I still don\u0026rsquo;t get it.\u003c/li\u003e\n\u003cli\u003ein re \u003ca href=\"/posts/kbhmodular_arithmetic/#chinese-remainder-theorem\"\u003eChinese Remainder Theorem\u003c/a\u003e: is there any case in which \\(a\\ \\text{mod}\\ b\\) is \u003cstrong\u003enot\u003c/strong\u003e unique?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math109_sep292023/","tags":null,"title":"SU-MATH109 SEP292023"},{"categories":null,"contents":"2nd order linear inhomogeneous: non-homogeneous linear differential equation\n","html":"\u003cp\u003e2nd order linear inhomogeneous: \u003ca href=\"/posts/kbhnon_homogeneous_linear_differential_equation/\"\u003enon-homogeneous linear differential equation\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb022024/","tags":null,"title":"SU-MATH53 FEB022024"},{"categories":null,"contents":"Sensitivity to Initial Conditions + Parameters.\nODE Existence and Uniqueness We can recast all high order systems into a first-order vector-valued system. So, for any system:\n\\begin{equation} x\u0026rsquo; = g(t,x, a) \\end{equation}\nif \\(g\\) is differentiable across \\(t,x\\) and \\(a\\), the IVP given by \\(x\u0026rsquo; = g(t,x,a)\\) and \\(x(0) = x_0\\), has the property has that:\nthe ODE has a solution \\(x(t_0) = x_0\\) for any \\(t_0\\), and any two solutions on the interval coincide as the same solution The only way for a solution to fail to extend temporally is due to the bounds\u0026rsquo; \\(||x(t)||\\) becomes unbounded as \\(t\\) approaches the endpoints On any interval \\(t_0 \\leq t \\leq T\\) the solution \\(y_{a,y_0}\\) depends continuously on \\(a, y_0\\), \u0026ldquo;if I look at my solution sometime later, it would be a non-discontinuous change on the choice of initial condition\u0026rdquo; Example Let\u0026rsquo;s consider:\n\\begin{equation} y\u0026rsquo; = -y \\end{equation}\nand take the initial value at:\n\\begin{equation} y(0) = y_0 \\end{equation}\nwe have a solution such that:\n\\begin{equation} y(t) = y_0e^{-t} \\end{equation}\nwhich, at \\(y(10)\\), we obtain:\n\\begin{equation} y(10) = y_0e^{-10} \\end{equation}\nWhich brings the question: \u0026ldquo;how close should \\(y_0\u0026rsquo;\\) be such that \\(|y\u0026rsquo;(10) - y(10)| \\leq 10^{-5}\\)?\u0026rdquo;\nWe can recast this as:\n\\begin{equation} |y_0\u0026rsquo; e^{-10} - y_0 e^{-10} | \u0026lt; 10^{-5} \\end{equation}\nmeaning:\n\\begin{equation} |y_0\u0026rsquo; - y_0| \u0026lt; \\frac{10^{-5}}{e^{-10}} \\approx \\frac{1}{4} \\end{equation}\nIf you flip it over, you will have extreme instability.\nExample \\begin{equation} \\begin{cases} \\dv{x}{t} = a(y-x) \\\\ \\dv{y}{t} = (b-z)x-y \\\\ \\dv{z}{t} = xy-cz \\end{cases} \\end{equation}\nthis seems innocuous, but no. If we set our parameters to be weirdly specific values:\n\\begin{equation} \\begin{cases} a \\approx 10 \\\\ b \\approx 28 \\\\ c \\approx \\frac{8}{3} \\end{cases} \\end{equation}\nThese attractors spins across two separate spheres, and the number of times the system spins around a particular area is unknown. It is called\u0026hellip;\nDeterministic Chaos Deterministic Chaos is a hard problems which there is a bounded region in which the behavior happens, but the system is bounded.\nAnother Example Logistic expression:\n\\begin{equation} y\u0026rsquo; = ry\\qty(1-\\frac{y}{k}) -h \\end{equation}\nYou can get solutions of this form for some carrying capacity \\(k\\) and a constant rate of removal \\(h\\). You can observe that we can build a phase line of this system, and observe. This behavior is called bifurcation: when some \\(h\\) is high enough, our whole system dies out.\n\u0026ldquo;if the finish rate is too high over other parameters, you just die out.\u0026rdquo;\nYou can also draw a plot, where the \\(x\\) axis is some parameter \\(p\\), and phase plot can be drawn sideways.\nCauchy Stability Suppose \\(x(t)\\) satisfies:\n\\begin{equation} x\u0026rsquo;(t) = g(t,x(t)), x(t_0) = x_0 \\end{equation}\nFor some interval \\(t \\in I\\) where the IVP is satisfied; for any time interval \\([t_1, t_2]\\) inside \\(I\\) and any \\(x_0\u0026rsquo;\\) near to \\(x_0\\), the associated \\(x(t_0) = x_0\u0026rsquo;\\) should exist for the same interval \\([t_1, t_2]\\) and \\(|| x\u0026rsquo;(t) - x(t) ||\\) is small for \\(t\\).\nThis extends for not just initial conditions, but also parameters as well. For function parameters \\(a_0\\) and \\(a_0\u0026rsquo;\\).\nNewtonian 3-body problem \\begin{equation} m_1 x_1\u0026rsquo;\u0026rsquo; = \\frac{-Gm_{1}m_2}{|x_1-x_2|^{2}}- \\frac{Gm_{1}m_3}{|x_1-x_3|^{2}} \\end{equation}\nyou will note that this expression has no close form solution, so you can\u0026rsquo;t do the Cauchy Stability thing to it.\n","html":"\u003cp\u003eSensitivity to Initial Conditions + Parameters.\u003c/p\u003e\n\u003ch2 id=\"ode-existence-and-uniqueness\"\u003eODE Existence and Uniqueness\u003c/h2\u003e\n\u003cp\u003eWe can recast all high order systems into a first-order vector-valued system. So, for any system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo; = g(t,x, a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif \\(g\\) is differentiable across \\(t,x\\) and \\(a\\), the \u003ca href=\"/posts/kbhinitial_value_problems/\"\u003eIVP\u003c/a\u003e given by \\(x\u0026rsquo; = g(t,x,a)\\) and \\(x(0) = x_0\\), has the property has that:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ethe ODE has a solution \\(x(t_0) = x_0\\) for any \\(t_0\\), and any two solutions on the interval coincide as the same solution\u003c/li\u003e\n\u003cli\u003eThe only way for a solution to fail to extend temporally is due to the bounds\u0026rsquo; \\(||x(t)||\\) becomes unbounded as \\(t\\) approaches the endpoints\u003c/li\u003e\n\u003cli\u003eOn any interval \\(t_0 \\leq t \\leq T\\) the solution \\(y_{a,y_0}\\) depends continuously on \\(a, y_0\\), \u0026ldquo;if I look at my solution sometime later, it would be a non-discontinuous change on the choice of initial condition\u0026rdquo;\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"example\"\u003eExample\u003c/h3\u003e\n\u003cp\u003eLet\u0026rsquo;s consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = -y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand take the initial value at:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(0) = y_0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe have a solution such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = y_0e^{-t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich, at \\(y(10)\\), we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(10) = y_0e^{-10}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich brings the question: \u0026ldquo;how close should \\(y_0\u0026rsquo;\\) be such that \\(|y\u0026rsquo;(10) - y(10)| \\leq 10^{-5}\\)?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe can recast this as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|y_0\u0026rsquo; e^{-10} - y_0 e^{-10} | \u0026lt; 10^{-5}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|y_0\u0026rsquo; - y_0| \u0026lt; \\frac{10^{-5}}{e^{-10}} \\approx \\frac{1}{4}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf you flip it over, you will have extreme instability.\u003c/p\u003e\n\u003ch3 id=\"example\"\u003eExample\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x}{t} = a(y-x) \\\\\n\\dv{y}{t} = (b-z)x-y \\\\\n\\dv{z}{t} = xy-cz\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis seems innocuous, but no. If we set our parameters to be weirdly specific values:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\na \\approx 10 \\\\\nb \\approx 28 \\\\\nc \\approx \\frac{8}{3}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThese attractors spins across two separate spheres, and the number of times the system spins around a particular area is unknown. It is called\u0026hellip;\u003c/p\u003e\n\u003ch4 id=\"deterministic-chaos\"\u003eDeterministic Chaos\u003c/h4\u003e\n\u003cp\u003e\u003ca href=\"#deterministic-chaos\"\u003eDeterministic Chaos\u003c/a\u003e is a hard problems which there is a bounded region in which the behavior happens, but the system is bounded.\u003c/p\u003e\n\u003ch3 id=\"another-example\"\u003eAnother Example\u003c/h3\u003e\n\u003cp\u003eLogistic expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = ry\\qty(1-\\frac{y}{k}) -h\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou can get solutions of this form for some carrying capacity \\(k\\) and a constant rate of removal \\(h\\). You can observe that we can build a \u003ca href=\"/posts/kbhphase_line/\"\u003ephase line\u003c/a\u003e of this system, and observe. This behavior is called \u003ca href=\"#ode-existence-and-uniqueness\"\u003ebifurcation\u003c/a\u003e: when some \\(h\\) is high enough, our whole system dies out.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;if the finish rate is too high over other parameters, you just die out.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eYou can also draw a plot, where the \\(x\\) axis is some parameter \\(p\\), and phase plot can be drawn sideways.\u003c/p\u003e\n\u003ch2 id=\"cauchy-stability\"\u003eCauchy Stability\u003c/h2\u003e\n\u003cp\u003eSuppose \\(x(t)\\) satisfies:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo;(t) = g(t,x(t)), x(t_0) = x_0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor some interval \\(t \\in I\\) where the IVP is satisfied; for any time interval \\([t_1, t_2]\\) inside \\(I\\) and any \\(x_0\u0026rsquo;\\) near to \\(x_0\\), the associated \\(x(t_0) = x_0\u0026rsquo;\\) should exist for the same interval \\([t_1, t_2]\\) and \\(|| x\u0026rsquo;(t) - x(t) ||\\) is small for \\(t\\).\u003c/p\u003e\n\u003cp\u003eThis extends for not just initial conditions, but also parameters as well. For function parameters \\(a_0\\) and \\(a_0\u0026rsquo;\\).\u003c/p\u003e\n\u003ch2 id=\"newtonian-3-body-problem\"\u003eNewtonian 3-body problem\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nm_1 x_1\u0026rsquo;\u0026rsquo; = \\frac{-Gm_{1}m_2}{|x_1-x_2|^{2}}- \\frac{Gm_{1}m_3}{|x_1-x_3|^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou will note that this expression has no close form solution, so you can\u0026rsquo;t do the \u003ca href=\"#cauchy-stability\"\u003eCauchy Stability\u003c/a\u003e thing to it.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb052024/","tags":null,"title":"SU-MATH53 FEB052024"},{"categories":null,"contents":"Non-Linear ODE\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhnon_linear_ode/\"\u003eNon-Linear ODE\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb072024/","tags":null,"title":"SU-MATH53 FEB072024"},{"categories":null,"contents":"Still Non-Linear ODE\n","html":"\u003cp\u003eStill \u003ca href=\"/posts/kbhnon_linear_ode/\"\u003eNon-Linear ODE\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb092024/","tags":null,"title":"SU-MATH53 FEB092024"},{"categories":null,"contents":"How would we solve equations like:\n\\begin{equation} \\begin{cases} y\u0026rsquo;\u0026rsquo; - 2xy\u0026rsquo; + 2\\lambda y = 0 \\\\ y\u0026rsquo;\u0026rsquo; - xy = 0 \\end{cases} \\end{equation}\nTaylor Series Its time to have a blast from the past! Taylor Series time.\n\\begin{equation} p_{n}(x) = \\sum_{i=0}^{n} \\frac{f^{(n)}(0) x^{n}}{n!} \\end{equation}\nTaylor\u0026rsquo;s Theorem with Remainder gives us that, at some \\(n\\), \\(|f(x) - p_{n}(x)|\\) is bounded.\n\\begin{equation} |x(t+h) - (x(t) + h x\u0026rsquo;(t))| \\leq Ch \\end{equation}\nTwo constraints:\nneed \\(f^{(n)}\\) to exist infinitely and there\u0026rsquo;s a set of functions that are representable by Taylor Series (even if differentiable; such as \\(e^{-\\frac{1}{|x|}}\\) variable-coefficient ODEs \\begin{equation} \\dv[2]{y}{x} + a(x) \\dv{y}{x} + b(x) y = 0 \\end{equation}\nWe can no longer use any linearizion facilities we have developed before because matrix exponentiation (i.e. the eigenvalue trick) no longer work very well as squaring independent variable within the expression actually have consequences now.\nSolving ODEs via power series if \\(a_0(t), \u0026hellip;, a_{n}(t), f(t)\\) are all convergent power series on an interval centered at \\(t_0\\) then, solutions of \\(a_{n}(t)y^{(n)} + \u0026hellip; a_0(t)y = f(t)\\) is also a convergent power series on an interval at \\(t_{0}\\), provided that \\(a_{n}(t)\\) doesn\u0026rsquo;t go to \\(0\\) on that interval.\nwrite down solutions in terms of \\(y(t) = \\sum_{n=0}^{\\infty} c_{n}(t-t_0)^{n}\\) take enough derivatives of that expression \\(y(t)\\) above solve for \\(c_0\\), \\(c_1\\), etc. by using the fact that \\(c_{n} = \\frac{y^{(n)}(t_0)}{n!}\\) (i.e. plug in the given \\(y^{(n)}\\) from the IVP and solve for \\(c_{j}\\)) plug what you have in terms of derivatives as well as the initial coefficients, and relate to a general power series notice patterns Case Study Take \\(y\u0026rsquo; = 2y\\). Consider:\n\\begin{equation} y = \\sum_{n=0}^{\\infty} a_{n}x^{n} \\end{equation}\nWe hope that our solution function can be fit to this form.\nIf we differentiate:\n\\begin{equation} y\u0026rsquo; = \\sum_{n=0}^{\\infty} a_{n} n x^{n-1} \\end{equation}\nWe want to line up powers of \\(x\\), which makes life earlier. Because this is an infinite series, and at \\(n=0\\) the whole differentiated term looks like \\(0\\), we can actually just shift \\(n\\) one over and we\u0026rsquo;d be good.\n\\begin{equation} y\u0026rsquo; = \\sum_{n=0}^{\\infty} a_{n+1} (n+1) x^{n} \\end{equation}\nWe can now plug the whole thing into our original equation:\n\\begin{equation} \\sum_{n=0}^{\\infty} a_{n+1} (n+1) x^{n} = \\sum_{n=0}^{\\infty} 2a_{n}x^{n} \\end{equation}\nBecause these are two polynomials that equal, corresponding coefficients should match:\n\\begin{equation} a_{n+1}(n+1) = 2a_{n} \\end{equation}\nSo, we have:\n\\begin{equation} a_{n+1} = \\frac{2a_{n}}{n+1} \\end{equation}\nAt \\(y(0)=a_{0}\\), so we can start the recursion relationship at any initial condition we\u0026rsquo;d like.\nWe notice that the value:\n\\begin{equation} a_{n} = \\frac{2^{n}}{n!} a_{0} \\end{equation}\nsatisfies the system above. Which means we can write out the general answer as \\(a_0 \\sum_{i=0}^{\\infty} \\frac{2^{n}x^{n}}{n!}\\)\nCase Study 2 We have:\n\\begin{equation} y\u0026rsquo;\u0026rsquo; - 2xy\u0026rsquo; + 2\\lambda y = 0 \\end{equation}\nLet\u0026rsquo;s calculate our Taylor series:\n\\begin{equation} y = \\sum_{i=0}^{\\infty} a_{n} x^{n} \\end{equation}\n\\begin{equation} y\u0026rsquo; = \\sum_{i=0}^{\\infty} n a_{n}x^{n-1} \\end{equation}\n\\begin{equation} y\u0026rsquo;\u0026rsquo; = \\sum_{n=0}^{\\infty} n(n-1)a_{n}x^{n-2} \\end{equation}\nReindexing:\n\\begin{equation} y\u0026rsquo;\u0026rsquo; = \\sum_{n=0}^{\\infty} (n+1)(n+1) a_{n+2} x^{n} \\end{equation}\nBecause \\(2xy\u0026rsquo;\\) appears in the equation, we can actually write:\n\\begin{equation} -2xy\u0026rsquo; = -\\sum_{i=0}^{\\infty} 2n a_{n} x^{n} \\end{equation}\nand the final term:\n\\begin{equation} 2\\lambda = \\sum_{n=0}^{\\infty} a_{n} x^{n} \\end{equation}\nAdding the whole thing up, we obtain that:\n\\begin{equation} \\sum_{n=0}^{\\infty} \\qty[(n+2)(n+1) a_{n+2} - 2_{n}a_{n} + 2\\lambda a_{n}] x^{n} = 0 \\end{equation}\nFor each term, we get a recursion relationship in:\n\\begin{equation} a_{n+2} = \\frac{2(n-\\lambda)}{(n+2)(n+1)} a_{n} \\end{equation}\n","html":"\u003cp\u003eHow would we solve equations like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\ny\u0026rsquo;\u0026rsquo; - 2xy\u0026rsquo; + 2\\lambda y = 0 \\\\\ny\u0026rsquo;\u0026rsquo; - xy = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"taylor-series\"\u003eTaylor Series\u003c/h2\u003e\n\u003cp\u003eIts time to have a blast from the past! \u003ca href=\"#taylor-series\"\u003eTaylor Series\u003c/a\u003e time.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np_{n}(x) = \\sum_{i=0}^{n} \\frac{f^{(n)}(0) x^{n}}{n!}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#taylor-series\"\u003eTaylor\u0026rsquo;s Theorem with Remainder\u003c/a\u003e gives us that, at some \\(n\\), \\(|f(x) - p_{n}(x)|\\) is bounded.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|x(t+h) - (x(t) + h x\u0026rsquo;(t))| \\leq Ch\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTwo constraints:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eneed \\(f^{(n)}\\) to exist infinitely\u003c/li\u003e\n\u003cli\u003eand there\u0026rsquo;s a set of functions that are representable by Taylor Series (even if differentiable; such as \\(e^{-\\frac{1}{|x|}}\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"variable-coefficient-odes\"\u003evariable-coefficient ODEs\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\dv[2]{y}{x} + a(x) \\dv{y}{x} + b(x) y = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can no longer use any linearizion facilities we have developed before because \u003ca href=\"/posts/kbhmatrix_exponentiation/\"\u003ematrix exponentiation\u003c/a\u003e (i.e. the eigenvalue trick) no longer work very well as squaring independent variable within the expression actually have consequences now.\u003c/p\u003e\n\u003ch2 id=\"solving-odes-via-power-series--kbhpower-series-o-dot-md\"\u003eSolving ODEs via \u003ca href=\"/posts/kbhpower_series_o/\"\u003epower series\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eif \\(a_0(t), \u0026hellip;, a_{n}(t), f(t)\\) are all convergent \u003ca href=\"/posts/kbhpower_series_o/\"\u003epower series\u003c/a\u003e on an interval centered at \\(t_0\\) then, solutions of \\(a_{n}(t)y^{(n)} + \u0026hellip; a_0(t)y = f(t)\\) is also a convergent power series on an interval at \\(t_{0}\\), provided that \\(a_{n}(t)\\) doesn\u0026rsquo;t go to \\(0\\) on that interval.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewrite down solutions in terms of \\(y(t) = \\sum_{n=0}^{\\infty} c_{n}(t-t_0)^{n}\\)\u003c/li\u003e\n\u003cli\u003etake enough derivatives of that expression \\(y(t)\\) above\u003c/li\u003e\n\u003cli\u003esolve for \\(c_0\\), \\(c_1\\), etc. by using the fact that \\(c_{n} = \\frac{y^{(n)}(t_0)}{n!}\\) (i.e. plug in the given \\(y^{(n)}\\) from the IVP and solve for \\(c_{j}\\))\u003c/li\u003e\n\u003cli\u003eplug what you have in terms of derivatives as well as the initial coefficients, and relate to a general power series\u003c/li\u003e\n\u003cli\u003enotice patterns\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"case-study\"\u003eCase Study\u003c/h3\u003e\n\u003cp\u003eTake \\(y\u0026rsquo; = 2y\\). Consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = \\sum_{n=0}^{\\infty} a_{n}x^{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe hope that our solution function can be fit to this form.\u003c/p\u003e\n\u003cp\u003eIf we differentiate:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = \\sum_{n=0}^{\\infty} a_{n} n x^{n-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe want to line up powers of \\(x\\), which makes life earlier. Because this is an infinite series, and at \\(n=0\\) the whole differentiated term looks like \\(0\\), we can actually just shift \\(n\\) one over and we\u0026rsquo;d be good.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = \\sum_{n=0}^{\\infty} a_{n+1} (n+1) x^{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can now plug the whole thing into our original equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{n=0}^{\\infty} a_{n+1} (n+1) x^{n} = \\sum_{n=0}^{\\infty} 2a_{n}x^{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBecause these are two polynomials that equal, corresponding coefficients should match:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{n+1}(n+1) = 2a_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{n+1} = \\frac{2a_{n}}{n+1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAt \\(y(0)=a_{0}\\), so we can start the recursion relationship at any initial condition we\u0026rsquo;d like.\u003c/p\u003e\n\u003cp\u003eWe notice that the value:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{n} = \\frac{2^{n}}{n!} a_{0}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003esatisfies the system above. Which means we can write out the general answer as \\(a_0 \\sum_{i=0}^{\\infty} \\frac{2^{n}x^{n}}{n!}\\)\u003c/p\u003e\n\u003ch3 id=\"case-study-2\"\u003eCase Study 2\u003c/h3\u003e\n\u003cp\u003eWe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; - 2xy\u0026rsquo; + 2\\lambda y = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s calculate our Taylor series:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = \\sum_{i=0}^{\\infty} a_{n} x^{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = \\sum_{i=0}^{\\infty} n a_{n}x^{n-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; = \\sum_{n=0}^{\\infty} n(n-1)a_{n}x^{n-2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eReindexing:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; = \\sum_{n=0}^{\\infty} (n+1)(n+1) a_{n+2} x^{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBecause \\(2xy\u0026rsquo;\\) appears in the equation, we can actually write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n-2xy\u0026rsquo; = -\\sum_{i=0}^{\\infty} 2n a_{n} x^{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand the final term:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n2\\lambda = \\sum_{n=0}^{\\infty} a_{n} x^{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAdding the whole thing up, we obtain that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{n=0}^{\\infty} \\qty[(n+2)(n+1) a_{n+2} - 2_{n}a_{n} + 2\\lambda a_{n}] x^{n} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor each term, we get a recursion relationship in:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{n+2} = \\frac{2(n-\\lambda)}{(n+2)(n+1)} a_{n}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb122024/","tags":null,"title":"SU-MATH53 FEB122024"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb142024/","tags":null,"title":"SU-MATH53 FEB142024"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb162024/","tags":null,"title":"SU-MATH53 FEB162024"},{"categories":null,"contents":"A Partial Differential Equation is a Differential Equation which has more than one independent variable: $u(x,y), u(t,x,y), \u0026hellip;$\nFor instance:\n\\begin{equation} \\pdv{U}{t} = \\alpha \\pdv[2]{U}{x} \\end{equation}\nKey Intuition PDEs may have no solutions (unlike Uniqueness and Existance for ODEs) yet, usually, there are too many solutions\u0026mdash;so\u0026hellip; how do you describe all solutions? usually, there are no explicit formulas Laplacian of \\(u(x,y)\\) Laplacian of \\(u(x,y)\\)\nExamples Heat Equation See Heat Equation\nWave Equation see Wave Equation\nTransport Equation \\begin{equation} \\pdv{u}{t} = \\pdv{u}{x} \\end{equation}\ngenerally any \\(u = w(x+t)\\) should solve this\nSchrodinger Equation We have some:\n\\begin{equation} u(x,t) \\end{equation}\nand its a complex-valued function:\n\\begin{equation} i \\pdv{u}{t} = \\pdv[2]{u}{x} \\end{equation}\nwhich results in a superposition in linear equations\nNonlinear Example \\begin{equation} \\pdv{u}{t} = \\pdv[2]{u}{x} + u(1-u) \\end{equation}\nthis is a PDE variant of the logistic equation: this is non-linear\nMonge-Ampere Equations \\begin{equation} u(x,y) \\end{equation}\nHessian \\begin{equation} Hess(u) = \\mqty(\\pdv[2]{u}{x} \u0026amp; \\frac{\\partial^{2} u}{\\partial x \\partial y} \\\\ \\frac{\\partial^{2} u}{\\partial x \\partial y} \u0026amp; \\pdv[2]{u}{y}) \\end{equation}\nIf we take its determinant, we obtain:\n\\begin{equation} \\pdv[2]{u}{x} \\pdv[2]{u}{y} - \\qty(\\frac{\\partial^{2} u}{\\partial x \\partial y})^{2} \\end{equation}\nTraveling Wave For two-variable PDEs, it is called a Traveling Wave if solutions to \\(u\\) takes on the form:\n\\begin{equation} u(t,x) = w(x-ct) \\end{equation}\nfor some constant \\(c\\), and where \\(w(x)\\) is a function which depends on only one of the two variables.\nBell Curves See also Bell Curves\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhpartial_differential_equations/\"\u003ePartial Differential Equation\u003c/a\u003e is a \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equation\u003c/a\u003e which has more than one \u003cstrong\u003eindependent variable\u003c/strong\u003e: $u(x,y), u(t,x,y), \u0026hellip;$\u003c/p\u003e\n\u003cp\u003eFor instance:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{U}{t} = \\alpha \\pdv[2]{U}{x}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"key-intuition\"\u003eKey Intuition\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpartial_differential_equations/\"\u003ePDE\u003c/a\u003es may have no solutions (unlike \u003ca href=\"/posts/kbhuniqueness_and_existance/\"\u003eUniqueness and Existance\u003c/a\u003e for \u003ca href=\"/posts/kbhordinary_differential_equations/\"\u003eODE\u003c/a\u003es)\u003c/li\u003e\n\u003cli\u003eyet, usually, there are too many solutions\u0026mdash;so\u0026hellip; how do you describe all solutions?\u003c/li\u003e\n\u003cli\u003eusually, there are no explicit formulas\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"laplacian-of-u--x-y----kbhlaplacian-of-u-x-y-dot-md\"\u003e\u003ca href=\"\"\u003eLaplacian of \\(u(x,y)\\)\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"\"\u003eLaplacian of \\(u(x,y)\\)\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"examples\"\u003eExamples\u003c/h2\u003e\n\u003ch3 id=\"heat-equation--kbhheat-equation-dot-md\"\u003e\u003ca href=\"/posts/kbhheat_equation/\"\u003eHeat Equation\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhheat_equation/\"\u003eHeat Equation\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"wave-equation--kbhwave-equation-dot-md\"\u003e\u003ca href=\"/posts/kbhwave_equation/\"\u003eWave Equation\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhwave_equation/\"\u003eWave Equation\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"transport-equation\"\u003eTransport Equation\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t} = \\pdv{u}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egenerally any \\(u = w(x+t)\\) should solve this\u003c/p\u003e\n\u003ch3 id=\"schrodinger-equation\"\u003eSchrodinger Equation\u003c/h3\u003e\n\u003cp\u003eWe have some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(x,t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand its a complex-valued function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ni \\pdv{u}{t} = \\pdv[2]{u}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich results in a superposition in linear equations\u003c/p\u003e\n\u003ch3 id=\"nonlinear-example\"\u003eNonlinear Example\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t} = \\pdv[2]{u}{x} + u(1-u)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is a \u003ca href=\"/posts/kbhpartial_differential_equations/\"\u003ePDE\u003c/a\u003e variant of the \u003ca href=\"/posts/kbhlogistic_equations/\"\u003elogistic equation\u003c/a\u003e: this is \u003cstrong\u003enon-linear\u003c/strong\u003e\u003c/p\u003e\n\u003ch3 id=\"monge-ampere-equations\"\u003eMonge-Ampere Equations\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nu(x,y)\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"hessian\"\u003eHessian\u003c/h4\u003e\n\u003cp\u003e\\begin{equation}\nHess(u) = \\mqty(\\pdv[2]{u}{x} \u0026amp; \\frac{\\partial^{2} u}{\\partial x \\partial y} \\\\ \\frac{\\partial^{2} u}{\\partial x \\partial y} \u0026amp; \\pdv[2]{u}{y})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf we take its determinant, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2]{u}{x} \\pdv[2]{u}{y} - \\qty(\\frac{\\partial^{2} u}{\\partial x \\partial y})^{2}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"traveling-wave\"\u003eTraveling Wave\u003c/h2\u003e\n\u003cp\u003eFor two-variable \u003ca href=\"/posts/kbhpartial_differential_equations/\"\u003ePDE\u003c/a\u003es, it is called a \u003ca href=\"#traveling-wave\"\u003eTraveling Wave\u003c/a\u003e if solutions to \\(u\\) takes on the form:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(t,x) = w(x-ct)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some constant \\(c\\), and where \\(w(x)\\) is a function which depends on only one of the two variables.\u003c/p\u003e\n\u003ch2 id=\"bell-curves--kbhbell-curves-dot-md\"\u003e\u003ca href=\"\"\u003eBell Curves\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eSee also \u003ca href=\"\"\u003eBell Curves\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb212024/","tags":null,"title":"SU-MATH53 FEB212024"},{"categories":null,"contents":"Boundary Value Problem A BVP for an ODE is defined at two different points \\(x_0\\) and \\(x_1\\) at two different values of \\(l\\), whereby we are given:\n\\begin{equation} X_0 = a, X(L) = b \\end{equation}\nwhich we use to further specify a PDE. BVPs can either have no or lots of solutions.\nTo aid in the discovery of solutions, for:\n\\begin{equation} X\u0026rsquo;\u0026rsquo; = \\lambda X \\end{equation}\nwe have:\n\\begin{equation} X = \\begin{cases} c_1 e^{\\sqrt{\\lambda}x} + c_2 e^{-\\sqrt{\\lambda}x}, \\lambda \u0026gt; 0 \\\\ c_1 x + c_2, \\lambda =0 \\\\ c_1 \\cos \\qty(\\sqrt{|\\lambda|}x) +c_2 \\sin \\qty(\\sqrt{|\\lambda|}x), \\lambda \u0026lt; 0 \\end{cases} \\end{equation}\nWhich specific solution arises out of which initial condition you use.\nDirichlet Conditions Initial conditions:\n\\begin{equation} \\begin{cases} u(t,0) = 0 \\\\ u(t, l) = 0 \\end{cases} \\end{equation}\nThis tells us that we are holding the ends of the rod at a constant temperature.\nSolutions For:\n\\begin{equation} X\u0026rsquo;\u0026rsquo; = \\lambda X \\end{equation}\nin the vanishing Case (\\(X(0) = 0 = X(L)\\)):\n\\begin{equation} X = c \\sin \\qty( \\frac{k \\pi x}{L}) \\end{equation}\nwhere \\(c \\neq 0\\), and the solutions quantized \\(k = 1, 2, 3, \\ldots\\).\nwhich gives rise to:\n\\begin{equation} \\lambda = \\frac{-n^{2}\\pi^{2}}{L^{2}} \\end{equation}\nNeumann Conditions \\begin{equation} \\begin{cases} \\pdv{u}{x}(t,0) = 0 \\\\ \\pdv{u}{x}(t, l) = 0 \\end{cases} \\end{equation}\nthis tells us there is no heat flux across the boundary (i.e. heat doesn\u0026rsquo;t escape).\nSolutions For:\n\\begin{equation} X\u0026rsquo;\u0026rsquo; = \\lambda X \\end{equation}\nin the vanishing Case (\\(X\u0026rsquo;(0) = 0 = X\u0026rsquo;(L)\\)):\n\\begin{equation} X = c \\cos \\qty( \\frac{k \\pi x}{L}) \\end{equation}\nwhere \\(c \\neq 0\\), and the solutions quantized \\(k = 1, 2, 3, \\ldots\\).\nwhich gives rise to:\n\\begin{equation} \\lambda = \\frac{-n^{2}\\pi^{2}}{L^{2}} \\end{equation}\nExamples See Heat Equation, and its worked solution.\n","html":"\u003ch2 id=\"boundary-value-problem\"\u003eBoundary Value Problem\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#boundary-value-problem\"\u003eBVP\u003c/a\u003e for an \u003ca href=\"/posts/kbhordinary_differential_equations/\"\u003eODE\u003c/a\u003e is defined at two different points \\(x_0\\) and \\(x_1\\) at two different values of \\(l\\), whereby we are given:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX_0 = a, X(L) = b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich we use to further specify a \u003ca href=\"/posts/kbhpartial_differential_equations/\"\u003ePDE\u003c/a\u003e. \u003ca href=\"#boundary-value-problem\"\u003eBVP\u003c/a\u003es can either have \u003cstrong\u003eno\u003c/strong\u003e or \u003cstrong\u003elots\u003c/strong\u003e of solutions.\u003c/p\u003e\n\u003cp\u003eTo aid in the discovery of solutions, for:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX\u0026rsquo;\u0026rsquo; = \\lambda X\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX = \\begin{cases}\nc_1 e^{\\sqrt{\\lambda}x} + c_2 e^{-\\sqrt{\\lambda}x}, \\lambda \u0026gt; 0 \\\\\nc_1 x + c_2, \\lambda =0 \\\\\nc_1 \\cos \\qty(\\sqrt{|\\lambda|}x) +c_2 \\sin \\qty(\\sqrt{|\\lambda|}x), \\lambda \u0026lt; 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich specific solution arises out of which initial condition you use.\u003c/p\u003e\n\u003ch3 id=\"dirichlet-conditions\"\u003eDirichlet Conditions\u003c/h3\u003e\n\u003cp\u003eInitial conditions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nu(t,0) = 0 \\\\\nu(t, l) = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis tells us that we are holding the ends of the rod at a constant temperature.\u003c/p\u003e\n\u003ch4 id=\"solutions\"\u003eSolutions\u003c/h4\u003e\n\u003cp\u003eFor:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX\u0026rsquo;\u0026rsquo; = \\lambda X\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ein the vanishing Case (\\(X(0) = 0 = X(L)\\)):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX = c \\sin \\qty( \\frac{k \\pi x}{L})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(c \\neq 0\\), and the solutions quantized \\(k = 1, 2, 3, \\ldots\\).\u003c/p\u003e\n\u003cp\u003ewhich gives rise to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda = \\frac{-n^{2}\\pi^{2}}{L^{2}}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"neumann-conditions\"\u003eNeumann Conditions\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\pdv{u}{x}(t,0) = 0 \\\\\n\\pdv{u}{x}(t, l) = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis tells us there is no heat \u003ca href=\"/posts/kbhflux/\"\u003eflux\u003c/a\u003e across the boundary (i.e. heat doesn\u0026rsquo;t escape).\u003c/p\u003e\n\u003ch4 id=\"solutions\"\u003eSolutions\u003c/h4\u003e\n\u003cp\u003eFor:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX\u0026rsquo;\u0026rsquo; = \\lambda X\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ein the vanishing Case (\\(X\u0026rsquo;(0) = 0 = X\u0026rsquo;(L)\\)):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX = c \\cos \\qty( \\frac{k \\pi x}{L})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(c \\neq 0\\), and the solutions quantized \\(k = 1, 2, 3, \\ldots\\).\u003c/p\u003e\n\u003cp\u003ewhich gives rise to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda = \\frac{-n^{2}\\pi^{2}}{L^{2}}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"examples\"\u003eExamples\u003c/h2\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhheat_equation/\"\u003eHeat Equation\u003c/a\u003e, and its \u003ca href=\"/posts/kbhheat_equation/#solution-in-full\"\u003eworked solution\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb232024/","tags":null,"title":"SU-MATH53 FEB232024"},{"categories":null,"contents":"Fourier Decomposition Main idea, any induction \\(f(x)\\) on an interval \\([0, L]\\) can be written as a sum:\n\\begin{equation} f(x) = a_0 + \\sum_{k=1}^{\\infty} a_{k} \\cos \\qty( \\frac{2\\pi k}{L} x) + \\sum_{k=1}^{\\infty} b_{k} \\sin \\qty( \\frac{2\\pi k}{L} x) \\end{equation}\nL-periodicity A function is $L$-periodic if \\(f(x+L) = f(x)\\). In that case, it has period \\(L\\).\n$L$-periodicity is preserved across\u0026hellip;\ntranslation we are just moving it to the right/left\ndilation Suppose \\(f(x)\\) is \\(L\\) periodic and let \\(g(x) = f(kx)\\), then, \\(g\\) is also \\(L\\) periodic.\nProof:\n\\(g(x+L) = f(k(x+L)) = f(kx + kL) = f(kx) = g(x)\\). So \\(g\\) would also be \\(L\\) periodic. However, importantly, \\(g\\) would also be \\(\\frac{L}{k}\\) periodic (verified by using the same sketch as before)\nlinear combinations Suppose \\(f,g\\) are \\(L\\) periodic and \\(h(x) = af(x) + bg(x)\\), then \\(h\\) is also \\(L\\) periodic.\nProof:\n\\begin{equation} h(x+L) = af(x+L) + bg(x+L) = af(x) + bg(x) = h(x) \\end{equation}\nFourier Series see Fourier Series\n","html":"\u003ch2 id=\"fourier-decomposition\"\u003eFourier Decomposition\u003c/h2\u003e\n\u003cp\u003eMain idea, any induction \\(f(x)\\) on an interval \\([0, L]\\) can be written as a sum:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = a_0 + \\sum_{k=1}^{\\infty} a_{k} \\cos \\qty( \\frac{2\\pi k}{L} x) + \\sum_{k=1}^{\\infty} b_{k} \\sin \\qty( \\frac{2\\pi k}{L} x)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"l-periodicity\"\u003eL-periodicity\u003c/h2\u003e\n\u003cp\u003eA function is $L$-periodic if \\(f(x+L) = f(x)\\). In that case, it has period \\(L\\).\u003c/p\u003e\n\u003cp\u003e$L$-periodicity is preserved across\u0026hellip;\u003c/p\u003e\n\u003ch3 id=\"translation\"\u003etranslation\u003c/h3\u003e\n\u003cp\u003ewe are just moving it to the right/left\u003c/p\u003e\n\u003ch3 id=\"dilation\"\u003edilation\u003c/h3\u003e\n\u003cp\u003eSuppose \\(f(x)\\) is \\(L\\) periodic and let \\(g(x) = f(kx)\\), then, \\(g\\) is also \\(L\\) periodic.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003e\\(g(x+L) = f(k(x+L)) = f(kx + kL) = f(kx) = g(x)\\). So \\(g\\) would also be \\(L\\) periodic. However, importantly, \\(g\\) would also be \\(\\frac{L}{k}\\) periodic (verified by using the same sketch as before)\u003c/p\u003e\n\u003ch3 id=\"linear-combinations\"\u003elinear combinations\u003c/h3\u003e\n\u003cp\u003eSuppose \\(f,g\\) are \\(L\\) periodic and \\(h(x) = af(x) + bg(x)\\), then \\(h\\) is also \\(L\\) periodic.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nh(x+L) = af(x+L) + bg(x+L) = af(x) + bg(x) = h(x)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"fourier-series--kbhfourier-series-dot-md\"\u003e\u003ca href=\"/posts/kbhfourier_series/#fourier-series\"\u003eFourier Series\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhfourier_series/#fourier-series\"\u003eFourier Series\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb252024/","tags":null,"title":"SU-MATH53 FEB252024"},{"categories":null,"contents":"more on Fourier Series.\ndecomposition of functions to even and odd Suppose we have any function with period \\(L\\) over \\([-\\frac{L}{2}, \\frac{L}{2}]\\), we can write this as a sum of even and odd functions:\n\\begin{equation} f(x) = \\frac{1}{2} (f(x) - f(-x)) + \\frac{1}{2} (f(x) + f(-x)) \\end{equation}\nAnd because of this fact, we can actually take each part and break it down individually as a Fourier Series because sin and cos are even and odd parts.\nSo we can take the first part, which is odd, and break it down using \\(a_{n} \\sin (k\\omega x)\\).\nWe can take the second part, which is odd, and break it down using \\(b_{n} \\cos (k\\omega x)\\).\nIf you then assume periodicity over the interval you care about \\(L\\), suddenly you can decompose it to a Fourier Series.\n","html":"\u003cp\u003emore on \u003ca href=\"/posts/kbhfourier_series/\"\u003eFourier Series\u003c/a\u003e.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"decomposition-of-functions-to-even-and-odd\"\u003edecomposition of functions to even and odd\u003c/h2\u003e\n\u003cp\u003eSuppose we have any function with period \\(L\\) over \\([-\\frac{L}{2}, \\frac{L}{2}]\\), we can write this as a sum of even and odd functions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\frac{1}{2} (f(x) - f(-x)) + \\frac{1}{2} (f(x) + f(-x))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd because of this fact, we can actually take each part and break it down individually as a \u003ca href=\"/posts/kbhfourier_series/\"\u003eFourier Series\u003c/a\u003e because \u003ca href=\"/posts/kbhfourier_series/#sin-and-cos-are-even-and-odd-parts\"\u003esin and cos are even and odd parts\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSo we can take the first part, which is odd, and break it down using \\(a_{n} \\sin (k\\omega x)\\).\u003c/p\u003e\n\u003cp\u003eWe can take the second part, which is odd, and break it down using \\(b_{n} \\cos (k\\omega x)\\).\u003c/p\u003e\n\u003cp\u003eIf you then assume periodicity over the interval you care about \\(L\\), suddenly you can decompose it to a \u003ca href=\"/posts/kbhfourier_series/\"\u003eFourier Series\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb282024/","tags":null,"title":"SU-MATH53 FEB282024"},{"categories":null,"contents":" Week Link \u0026lt;2024-01-16 Tue\u0026gt; PSet 1 \u0026lt;2024-01-18 Thu\u0026gt; PSet 2 \u0026lt;2024-01-25 Thu\u0026gt; PSet 3 \u0026lt;2024-02-02 Fri\u0026gt; PSet 4 \u0026lt;2024-02-07 Wed\u0026gt; PSet 5 \u0026lt;2024-02-14 Wed\u0026gt; PSet 6 \u0026lt;2024-02-23 Fri\u0026gt; PSet 7 \u0026lt;2024-03-02 Sat\u0026gt; PSet 8 \u0026lt;2024-03-09 Sat\u0026gt; PSet 9 ","html":"\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eWeek\u003c/th\u003e\n\u003cth\u003eLink\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-01-16 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhsu_math53_pset_1/\"\u003ePSet 1\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-01-18 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpset_2/\"\u003ePSet 2\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-01-25 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpset_3/\"\u003ePSet 3\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-02-02 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpset_4/\"\u003ePSet 4\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-02-07 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpset_5/\"\u003ePSet 5\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-02-14 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpset_6/\"\u003ePSet 6\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-02-23 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpset_7/\"\u003ePSet 7\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-03-02 Sat\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpset_8/\"\u003ePSet 8\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-03-09 Sat\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpset_9/\"\u003ePSet 9\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_homework_index/","tags":null,"title":"SU-MATH53 Homework Index"},{"categories":null,"contents":"Key Sequence Notation New Concepts First Order ODEs order of equations linear vs. non-linear equations homogeneous vs. inhomogeneous equations linear systems Newton\u0026rsquo;s Law of Cooling Important Results / Claims superposition principle Fundamental Theorem of Calculus Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfirst_order_odes/\"\u003eFirst Order ODEs\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhordinary_differential_equations/#order-of-equations\"\u003eorder of equations\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhordinary_differential_equations/#linear-vs-dot-non-linear-equations\"\u003elinear vs. non-linear equations\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhordinary_differential_equations/#homogeneous-vs-dot-inhomogeneous-equations\"\u003ehomogeneous vs. inhomogeneous equations\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhordinary_differential_equations/#linear-systems\"\u003elinear systems\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnewton_s_law_of_cooling/\"\u003eNewton\u0026rsquo;s Law of Cooling\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhordinary_differential_equations/#superposition-principle\"\u003esuperposition principle\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfundamental_theorem_of_calculus/\"\u003eFundamental Theorem of Calculus\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_jan082023/","tags":null,"title":"SU-MATH53 JAN082024"},{"categories":null,"contents":"Key Sequence Notation New Concepts First Order ODEs autonomous ODEs seperable ODEs initial value problems interval principle Important Results / Claims division method general solution to y\u0026rsquo;(t) = ry(t) IMPORTANT: one and exactly one solution exist for every point of an IVP Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfirst_order_odes/\"\u003eFirst Order ODEs\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhautonomous_odes/\"\u003eautonomous ODEs\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhseperable_diffequ/\"\u003eseperable ODEs\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinitial_value_problems/\"\u003einitial value problems\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinitial_value_problems/#one-and-exactly-one-solution-exist-for-every-point-of-an-ivp\"\u003einterval principle\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhautonomous_odes/#division-method\"\u003edivision method\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhautonomous_odes/#general-solution-to-y--t--ry--t\"\u003egeneral solution to y\u0026rsquo;(t) = ry(t)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eIMPORTANT: \u003ca href=\"/posts/kbhinitial_value_problems/#one-and-exactly-one-solution-exist-for-every-point-of-an-ivp\"\u003eone and exactly one solution exist for every point of an IVP\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_jan102023/","tags":null,"title":"SU-MATH53 JAN102024"},{"categories":null,"contents":"Key Sequence Notation New Concepts level set Newton\u0026rsquo;s Law of Cooling logistic equations Important Results / Claims case study: pietri dish Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlevel_set/\"\u003elevel set\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnewton_s_law_of_cooling/\"\u003eNewton\u0026rsquo;s Law of Cooling\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlogistic_equations/\"\u003elogistic equations\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpetri_dish/\"\u003ecase study: pietri dish\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_jan122023/","tags":null,"title":"SU-MATH53 JAN122024"},{"categories":null,"contents":"Key Sequence Notation New Concepts tying into Separated Equations: \\(y\u0026rsquo; = f(t,y)\\) which are the most nicest. Recall that there was two special cases: seperable and autonomous ODEs. if we can write in terms of elementary function, good times if we can\u0026rsquo;t do it in terms of elementary functions, we can use qualitative analysis t(slope field, etc.) recall again Newton\u0026rsquo;s Law of Cooling phase line and stability (ODEs) Important Results / Claims autonomous First Order ODEs\u0026rsquo; solutions do not cross; as in, if there are two solutinos \\(y_1\\) and \\(y_2\\), their curves never intersect. one and exactly one solution exist for every point of an IVP autonomous ODEs level off at stationary curves Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003etying into \u003ca href=\"/posts/kbhfirst_order_odes/#separated-equations\"\u003eSeparated Equations\u003c/a\u003e: \\(y\u0026rsquo; = f(t,y)\\) which are the most nicest. Recall that there was two special cases: \u003ca href=\"/posts/kbhseperable_diffequ/\"\u003eseperable\u003c/a\u003e and \u003ca href=\"/posts/kbhautonomous_odes/\"\u003eautonomous ODEs\u003c/a\u003e.\n\u003cul\u003e\n\u003cli\u003eif we can write in terms of elementary function, good times\u003c/li\u003e\n\u003cli\u003eif we can\u0026rsquo;t do it in terms of elementary functions, we can use qualitative analysis t(slope field, etc.)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003erecall again \u003ca href=\"/posts/kbhnewton_s_law_of_cooling/\"\u003eNewton\u0026rsquo;s Law of Cooling\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhphase_line/\"\u003ephase line\u003c/a\u003e and \u003ca href=\"/posts/kbhstability/\"\u003estability (ODEs)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhautonomous_odes/\"\u003eautonomous\u003c/a\u003e \u003ca href=\"/posts/kbhfirst_order_odes/\"\u003eFirst Order ODEs\u003c/a\u003e\u0026rsquo; solutions do not cross; as in, if there are two solutinos \\(y_1\\) and \\(y_2\\), their curves never intersect.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinitial_value_problems/#one-and-exactly-one-solution-exist-for-every-point-of-an-ivp\"\u003eone and exactly one solution exist for every point of an IVP\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhautonomous_odes/#autonomous-odes-level-off-at-stationary-curves\"\u003eautonomous ODEs level off at stationary curves\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_jan172024/","tags":null,"title":"SU-MATH53 JAN172024"},{"categories":null,"contents":"Key Sequence Notation New Concepts complex number Recall also \\(|z| = \\sqrt{\\bar{z}z}\\)\nEuler\u0026rsquo;s Equation Important Results / Claims complex numbers, fundamentally, are a way of *multiplying in \\(\\mathbb{R}^{2}\\) scaling by reals will result in scaling up, and multiplying by complex will result in rotation. Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eRecall also \\(|z| = \\sqrt{\\bar{z}z}\\)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheuler_s_equation/\"\u003eEuler\u0026rsquo;s Equation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecomplex numbers, fundamentally, are a way of *multiplying in \\(\\mathbb{R}^{2}\\)\u003c/li\u003e\n\u003cli\u003escaling by reals will result in \u003cstrong\u003escaling up\u003c/strong\u003e, and multiplying by complex will result in \u003cstrong\u003erotation\u003c/strong\u003e.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_jan192023/","tags":null,"title":"SU-MATH53 JAN192024"},{"categories":null,"contents":"Key Sequence Notation New Concepts Second-Order Linear Differential Equations superposition principle and functional independence Newton\u0026rsquo;s First Law of Motion Important Results / Claims finding independent solutions of second-order constant-coefficient linear ODEs homogeneous constant-coefficient second order linear ODE Uniqueness and Existance of second order superposition principle Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/\"\u003eSecond-Order Linear Differential Equations\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhordinary_differential_equations/#superposition-principle\"\u003esuperposition principle\u003c/a\u003e and functional \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependence\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnewton_s_first_law_of_motion/\"\u003eNewton\u0026rsquo;s First Law of Motion\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/#finding-independent--kbhprobability-dot-md--solutions-of-second-order-constant-coefficient-linear-odes\"\u003efinding independent solutions of second-order constant-coefficient linear ODEs\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/#homogeneous-constant-coefficient-second-order-linear-ode\"\u003ehomogeneous constant-coefficient second order linear ODE\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/#id-24ec5541-9b12-4521-b698-014711a2c762-uniqueness-and-existance-of-second-order\"\u003eUniqueness and Existance of second order\u003c/a\u003e\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhordinary_differential_equations/#superposition-principle\"\u003esuperposition principle\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_jan202024/","tags":null,"title":"SU-MATH53 JAN222024"},{"categories":null,"contents":"Underdetermined ODEs Complex ODE System Matrix Exponentiation Finding eigenvectors \\(A = n \\times n\\) matrix, the task of finding eigenvalues and eigenvectors is a linear algebra problem:\n\\begin{equation} A v = \\lambda v \\end{equation}\nFinding specific solutions to IVPs with special substitution For some:\n\\begin{equation} \\begin{cases} x\u0026rsquo; = Ax \\\\ x(t_0) = x_0 \\end{cases} \\end{equation}\nwe can leverage the first task:\nfind \\(v\\), \\(\\lambda\\) for \\(A\\) guess \\(x = u(t)v\\), this is \u0026ldquo;magical substitution\u0026rdquo; and now, we can see that \\(x\u0026rsquo; = u\u0026rsquo;v = A(uv) = \\lambda u v\\) meaning \\(u\u0026rsquo; = \\lambda u\\) finaly, \\(u(t) = ce^{\\lambda} t\\) Eigenbasis case Suppose \\(A\\) has a basis of eigenvectors, and real eigenvalues. We can write its entire solution set in terms of these basis eigenvectors:\n\\begin{equation} x(t) = u_1(t) v_1 + \\dots + u_{n}(t) v_{n} \\end{equation}\nthis means:\n\\begin{equation} x\u0026rsquo;(t) = Ax = u_1\u0026rsquo; v_1 + \\dots +u_{n} \u0026rsquo; v_{n} = \\lambda_{1} u_{1} v_1 + \\dots + \\lambda_{n} u_{n} v_{n} \\end{equation}\nBecause \\(v\\) forms a basis, each \\(u_j\u0026rsquo; = \\lambda_{j} u_{j}\\).\nWe thereby decomposed our entangled expression seperably by changing into eigenbasis.\nAfter solving each \\(u\\), we obtain:\n\\begin{equation} x(t) = c_1 e^{\\lambda_{1}} v_1 + \\dots + c_{n} e^{\\lambda_{n}} v_{n} \\end{equation}\nWe can identify \\(c_{j}\\) by noting, that \\(x(0)\\) resolves to:\n\\begin{equation} x(0) = c_1v_1 + \\dots + c_{n}v_{n} \\end{equation}\nFinally, we can write this as:\n\\begin{equation} x(0) = x_0 = \\mqty[v_1 \u0026amp; \\dots \u0026amp; v_{n}] c \\end{equation}\nMeaning, we can solve for initial conditions as:\n\\begin{equation} \\mqty[v_1 \u0026amp; \\dots \u0026amp; v_{n}]^{-1} x_0 = c \\end{equation}\nPractice Solving Let:\n\\begin{equation} A = \\mqty(0 \u0026amp; 1 \u0026amp; 1 \\\\ 1 \u0026amp; 0 \u0026amp; 1 \\\\ 1 \u0026amp; 1 \u0026amp; 0) \\end{equation}\nWe have two eigenspaces:\n\\begin{equation} \\lambda = -1, v = \\left\\{\\mqty(-1 \\\\ 1 \\\\ 0), \\mqty(0 \\\\ 1 \\\\ -1)\\right\\} \\end{equation}\nand\n\\begin{equation} \\lambda = 2, v = \\left\\{\\mqty(1 \\\\ 1 \\\\ 1)\\right\\} \\end{equation}\nThis gives rise to a basis of eigenvectors with all three vectors. We obtain:\n\\begin{equation} x(t) = c_1 e^{-t} \\mqty(-1 \\\\ 1\\\\0) + c_2 \\mqty(0 \\\\ 1 \\\\ -1) e^{-t} + c_3 \\mqty(1 \\\\ 1 \\\\ 1) e^{2t} \\end{equation}\n","html":"\u003ch2 id=\"underdetermined-odes\"\u003eUnderdetermined ODEs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhunderdetermined_ode_system/\"\u003eComplex ODE System\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatrix_exponentiation/\"\u003eMatrix Exponentiation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"finding-eigenvector--kbheigenvalue-dot-md--s\"\u003eFinding \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es\u003c/h3\u003e\n\u003cp\u003e\\(A = n \\times n\\) matrix, the task of finding eigenvalues and eigenvectors is a linear algebra problem:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA v = \\lambda v\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"finding-specific-solutions-to-ivp--kbhinitial-value-problems-dot-md--s-with-special-substitution\"\u003eFinding specific solutions to \u003ca href=\"/posts/kbhinitial_value_problems/\"\u003eIVP\u003c/a\u003es with special substitution\u003c/h3\u003e\n\u003cp\u003eFor some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx\u0026rsquo; = Ax \\\\\nx(t_0) = x_0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can leverage the first task:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003efind \\(v\\), \\(\\lambda\\) for \\(A\\)\u003c/li\u003e\n\u003cli\u003eguess \\(x = u(t)v\\), this is \u0026ldquo;magical substitution\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eand now, we can see that \\(x\u0026rsquo; = u\u0026rsquo;v = A(uv) = \\lambda u v\\)\u003c/li\u003e\n\u003cli\u003emeaning \\(u\u0026rsquo; = \\lambda u\\)\u003c/li\u003e\n\u003cli\u003efinaly, \\(u(t) = ce^{\\lambda} t\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"eigenbasis-case\"\u003eEigenbasis case\u003c/h3\u003e\n\u003cp\u003eSuppose \\(A\\) has a basis of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es, and \u003cstrong\u003ereal\u003c/strong\u003e eigenvalues. We can write its entire solution set in terms of these basis eigenvectors:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = u_1(t) v_1 + \\dots + u_{n}(t) v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis means:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo;(t) = Ax = u_1\u0026rsquo; v_1 + \\dots +u_{n} \u0026rsquo; v_{n} = \\lambda_{1} u_{1} v_1 + \\dots + \\lambda_{n} u_{n} v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBecause \\(v\\) forms a basis, each \\(u_j\u0026rsquo; = \\lambda_{j} u_{j}\\).\u003c/p\u003e\n\u003cp\u003eWe thereby decomposed our entangled expression seperably by changing into eigenbasis.\u003c/p\u003e\n\u003cp\u003eAfter solving each \\(u\\), we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = c_1 e^{\\lambda_{1}} v_1 + \\dots + c_{n} e^{\\lambda_{n}} v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can identify \\(c_{j}\\) by noting, that \\(x(0)\\) resolves to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(0) = c_1v_1 + \\dots + c_{n}v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, we can write this as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(0) = x_0 = \\mqty[v_1 \u0026amp; \\dots \u0026amp; v_{n}] c\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning, we can solve for initial conditions as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty[v_1 \u0026amp; \\dots \u0026amp; v_{n}]^{-1} x_0 = c\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"practice-solving\"\u003ePractice Solving\u003c/h2\u003e\n\u003cp\u003eLet:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA = \\mqty(0 \u0026amp; 1 \u0026amp; 1 \\\\ 1 \u0026amp; 0 \u0026amp; 1 \\\\ 1 \u0026amp; 1 \u0026amp; 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have two eigenspaces:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda = -1, v = \\left\\{\\mqty(-1 \\\\ 1 \\\\ 0), \\mqty(0 \\\\ 1 \\\\ -1)\\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda = 2, v = \\left\\{\\mqty(1 \\\\ 1 \\\\ 1)\\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis gives rise to a basis of eigenvectors with all three vectors. We obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = c_1 e^{-t} \\mqty(-1 \\\\ 1\\\\0) + c_2 \\mqty(0 \\\\ 1 \\\\ -1) e^{-t} + c_3 \\mqty(1 \\\\ 1 \\\\ 1) e^{2t}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_jan262023/","tags":null,"title":"SU-MATh53 JAN262023"},{"categories":null,"contents":"Review For Linear Constant-Coefficient Equation that are homogeneous, we can solve it generally in terms of some matrix \\(A\\) as:\n\\begin{equation} x\u0026rsquo; = Ax \\end{equation}\nif \\(A\\) has enough eigenvectors, we can just write out \\(y(t) = c_1 e^{\\lambda_{1}t} v_1 + \u0026hellip; + c_{n}e^{\\lambda_{n}t} v_{2}\\)\nBut, if we don\u0026rsquo;t, we can use matrix exponentiation\nContent eigensolutions ","html":"\u003ch2 id=\"review\"\u003eReview\u003c/h2\u003e\n\u003cp\u003eFor \u003ca href=\"/posts/kbhlinear_constant_coefficient_equation/\"\u003eLinear Constant-Coefficient Equation\u003c/a\u003e that are \u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneous\u003c/a\u003e, we can solve it generally in terms of some matrix \\(A\\) as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo; = Ax\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif \\(A\\) has enough \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es, we can just write out \\(y(t) = c_1 e^{\\lambda_{1}t} v_1 + \u0026hellip; + c_{n}e^{\\lambda_{n}t} v_{2}\\)\u003c/p\u003e\n\u003cp\u003eBut, if we don\u0026rsquo;t, we can use \u003ca href=\"/posts/kbhmatrix_exponentiation/\"\u003ematrix exponentiation\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"content\"\u003eContent\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigensolutions/\"\u003eeigensolutions\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_jan292024/","tags":null,"title":"SU-MATH53 JAN292024"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsu_math53_jan312024/","tags":null,"title":"SU-MATH53 JAN312024"},{"categories":null,"contents":"We\u0026rsquo;ve gone over Heat Equation, Wave Equation, and let\u0026rsquo;s talk about some more stuff.\ndamped heat equation damped wave equation two-dimensional heat equation ","html":"\u003cp\u003eWe\u0026rsquo;ve gone over \u003ca href=\"/posts/kbhheat_equation/\"\u003eHeat Equation\u003c/a\u003e, \u003ca href=\"/posts/kbhwave_equation/\"\u003eWave Equation\u003c/a\u003e, and let\u0026rsquo;s talk about some more stuff.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdamped_heat_equation/#damped-heat-equation\"\u003edamped heat equation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"\"\u003edamped wave equation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtwo_dimensional_heat_equation/\"\u003etwo-dimensional heat equation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_mar012024/","tags":null,"title":"SU-MATH53 MAR012024"},{"categories":null,"contents":"What if, Fourier Series, but exponential?\nThis also motivates Discrete Fourier Transform.\nAlso Complex Exponential.\nReview Recall again that if we have a periodic function, we\u0026rsquo;ve got:\n\\begin{equation} f(x) = \\sum_{k=0}^{\\infty} a_{k} \\sin \\qty( \\frac{2\\pi k}{l} x) + b_{n} \\cos \\qty( \\frac{2\\pi k x}{L}) \\end{equation}\nWe note that this breaks individually into the sign and cosine series depending of the function\u0026rsquo;s oddness.\nComplex Fourier Series This will begin by feeling like a notation rewrite:\n\\begin{equation} f(x) = \\sum_{-\\infty}^{\\infty} c_{n} e^{n \\omega x i} \\end{equation}\nwhere \\(\\omega = \\frac{2\\pi}{L}\\).\nWhy is this summing from negative to positive?\nConsider:\n\\begin{equation} \\cos \\qty(nx) = \\frac{e^{inx}+e^{-inx}}{2} \\end{equation}\nYou will note that summing \\(n \\in 0 \u0026hellip; \\infty\\), plugging it into above, will result in summing from both \\(n \\in -\\infty \u0026hellip; \\infty\\).\nFinding \\(c_{n}\\) Recall that complex exponentials are orthonormal + inner product over complex-valued functions\nBecause most cancels except one thing, we get:\n\\begin{equation} \\langle f, e^{i\\omega n x} \\rangle = c_{n} L \\end{equation}\nmeaning:\n\\begin{equation} c_{n} = \\frac{1}{L} \\int_{0}^{L} f(x) e^{-i\\omega n x} \\dd{x} = \\frac{1}{L} \\int_{\\frac{-L}{2}}^{\\frac{L}{2}} f(x) e^{-i\\omega n x} \\dd{x} \\end{equation}\nif our function is \\(L\\) periodic.\nNOTE: this integral has a NEGATIVE power vs the series has a POSITIVE power!!\nComplex Exponentials with Sawtooth Consider:\n\\begin{equation} f(x) = x-n \\end{equation}\nwhere this function is periodic over \\(n \\leq x \\leq n+1\\), so\u0026mdash;\n\\begin{equation} c_{n} = \\int_{0}^{1} x e^{-2\\pi i n x} \\dd{x} = -\\frac{1}{2\\pi i n} e^{-2 \\pi i n} \\end{equation}\n","html":"\u003cp\u003eWhat if, \u003ca href=\"/posts/kbhfourier_series/\"\u003eFourier Series\u003c/a\u003e, but exponential?\u003c/p\u003e\n\u003cp\u003eThis also motivates Discrete Fourier Transform.\u003c/p\u003e\n\u003cp\u003eAlso \u003ca href=\"/posts/kbhcomplex_exponential/\"\u003eComplex Exponential\u003c/a\u003e.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"review\"\u003eReview\u003c/h2\u003e\n\u003cp\u003eRecall again that if we have a periodic function, we\u0026rsquo;ve got:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\sum_{k=0}^{\\infty} a_{k} \\sin \\qty( \\frac{2\\pi k}{l} x) + b_{n} \\cos \\qty( \\frac{2\\pi k x}{L})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe note that this breaks individually into the sign and cosine series depending of the function\u0026rsquo;s oddness.\u003c/p\u003e\n\u003ch2 id=\"complex-fourier-series\"\u003eComplex Fourier Series\u003c/h2\u003e\n\u003cp\u003eThis will begin by feeling like a notation rewrite:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\sum_{-\\infty}^{\\infty} c_{n} e^{n \\omega x i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\omega = \\frac{2\\pi}{L}\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eWhy is this summing from negative to positive?\u003c/p\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\cos \\qty(nx) = \\frac{e^{inx}+e^{-inx}}{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will note that summing \\(n \\in 0 \u0026hellip; \\infty\\), plugging it into above, will result in summing from both \\(n \\in -\\infty \u0026hellip; \\infty\\).\u003c/p\u003e\n\u003chr\u003e\n\u003ch3 id=\"finding-c-n\"\u003eFinding \\(c_{n}\\)\u003c/h3\u003e\n\u003cp\u003eRecall that \u003ca href=\"/posts/kbhcomplex_exponential/#complex-exponentials-are-orthonormal\"\u003ecomplex exponentials are orthonormal\u003c/a\u003e + \u003ca href=\"/posts/kbhcomplex_exponential/#over-complex-valued-functions\"\u003einner product over complex-valued functions\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eBecause most cancels except one thing, we get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle f, e^{i\\omega n x} \\rangle = c_{n} L\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_{n} = \\frac{1}{L} \\int_{0}^{L} f(x) e^{-i\\omega n x} \\dd{x} = \\frac{1}{L} \\int_{\\frac{-L}{2}}^{\\frac{L}{2}} f(x) e^{-i\\omega n x} \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif our function is \\(L\\) periodic.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eNOTE: this integral has a NEGATIVE power vs the series has a POSITIVE power\u003c/strong\u003e\u003c/strong\u003e!!\u003c/p\u003e\n\u003ch3 id=\"complex-exponentials-with-sawtooth\"\u003eComplex Exponentials with Sawtooth\u003c/h3\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = x-n\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere this function is periodic over \\(n \\leq x \\leq n+1\\), so\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_{n} = \\int_{0}^{1} x e^{-2\\pi i n x} \\dd{x} = -\\frac{1}{2\\pi i n} e^{-2 \\pi i n}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_mar042024/","tags":null,"title":"SU-MATH53 MAR042024"},{"categories":null,"contents":"Fourier Transform\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhfourier_transform/#fourier-transform\"\u003eFourier Transform\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_mar062024/","tags":null,"title":"SU-MATH53 MAR062024"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsu_math53_mar082024/","tags":null,"title":"SU-MATH53 MAR082024"},{"categories":null,"contents":"heat equation on the entire line \\begin{equation} \\pdv{u}{t} = \\frac{1}{2} \\pdv[2]{u}{x} \\end{equation}\nWe can try to find a:\n\\begin{equation} U(0,x) = f(x) \\end{equation}\nif we write:\n\\begin{equation} \\hat{U}(t,\\lambda) = \\int e^{-i x \\lambda} U(t,x) \\dd{x} \\end{equation}\nwhich means we can write, with initial condtions:\n\\begin{equation} \\hat{U} (t, \\lambda) = \\hat{f}(\\lambda) e^{- t \\frac{\\lambda^{2}}{2}} \\end{equation}\nWe want to reach a close form:\n\\begin{equation} U (t, x) = \\frac{1}{\\sqrt{2\\pi} t} \\int_{-\\infty}^{\\infty} f(y) e^{-\\frac{(x-y)^{2}}{2t}} \\dd{y} \\end{equation}\nSteps: recall we ended up at\n\\begin{equation} \\hat{U} (t, \\lambda) = \\hat{f}(\\lambda) e^{- t \\frac{\\lambda^{2}}{2}} \\end{equation}\nLet\u0026rsquo;s call:\n\\begin{equation} \\hat{g}(\\lambda) = e^{- t \\frac{\\lambda^{2}}{2}} \\end{equation}\nso we have:\n\\begin{equation} \\hat{U} (t, \\lambda) = \\hat{f}(\\lambda) \\hat{g}(\\lambda) \\end{equation}\nwe can use convolution to figure \\(U(t,x)\\).\nRecall that the Fourier transform of a Gaussian:\n\\begin{equation} \\mathcal{F}\\qty(e^{-\\frac{ax^{2}}{2}}) = \\sqrt{\\frac{2\\pi}{a}}e^{-\\frac{\\lambda^{2}}{2a}} \\end{equation}\nLet\u0026rsquo;s first set:\n\\begin{equation} a = \\frac{1}{t} \\end{equation}\nWhich will give us that:\n\\begin{equation} g(x) = \\frac{1}{\\sqrt{2\\pi t} } e^{-\\frac{x^{2}}{2t}} \\end{equation}\nMeaning, with convolution:\n\\begin{equation} \\mathcal{F}^{-1}(\\hat{f} \\hat{g}) = f * g \\end{equation}\nwhy does this make sense We are convolving a Gaussian against \\(f(x)\\). Meaning, at very small \\(t\\) , we are taking a very small window of size \\(1\\) against.\nHeavyside function \\begin{equation} f(x) = \\begin{cases} 1, x\\geq 0 \\\\ 0, x\u0026lt;0 \\end{cases} \\end{equation}\nThis gives: if we split the room by \\(x\\). Recall:\n\\begin{equation} U (t, x) = \\frac{1}{\\sqrt{2\\pi} t} \\int_{-\\infty}^{\\infty} f(y) e^{-\\frac{(x-y)^{2}}{2t}} \\dd{y} \\end{equation}\nGiven our \\(f\\), this becomes:\n\\begin{equation} U (t, x) = \\frac{1}{\\sqrt{2\\pi} t} \\int_{0}^{\\infty} e^{-\\frac{(x-y)^{2}}{2t}} \\dd{y} \\end{equation}\nIf we change variables:\n\\begin{align} \\frac{(x-y)^{2}}{2t} - \\qty( \\frac{x}{\\sqrt{2t}} - \\frac{y}{\\sqrt{2t}})^{2} \\end{align}\nwhich means:\n\\begin{equation} z = \\frac{y}{2\\sqrt{t}} \\end{equation}\n\\begin{equation} \\frac{1}{\\sqrt{\\pi}} \\int_{0}^{\\infty} e^{^{-\\qty(\\frac{x}{\\sqrt{2t}} - z)^{2}}} \\dd{z} \\end{equation}\nand we will also apply:\n\\begin{equation} w = z - \\frac{x}{\\sqrt{2t}} \\end{equation}\nwhich will give:\n\\begin{equation} \\frac{1}{\\sqrt{\\pi}} \\int_{-\\frac{x}{\\sqrt{2t}}}^{\\infty} e^{-w^{2}} \\dd{w} \\end{equation}\nnotice, as \\(x\\) increases, we are integrating more of a Gaussian, which will be exceedingly close to \\(1\\); as \\(x\\) decreases, we\u0026rsquo;ll get closer to \\(0\\). And also, \\(t\\) smoothed \\(x\\) out, which means as \\(t\\) increases the interface between \\(0\\) and \\(1\\) becomes smoother.\nerf erf\nconvolution see convolution\n","html":"\u003ch2 id=\"heat-equation-on-the-entire-line\"\u003eheat equation on the entire line\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t} = \\frac{1}{2} \\pdv[2]{u}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can try to find a:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(0,x) = f(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif we write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U}(t,\\lambda) = \\int e^{-i x \\lambda} U(t,x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich means we can write, with initial condtions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U} (t, \\lambda) = \\hat{f}(\\lambda) e^{- t \\frac{\\lambda^{2}}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe want to reach a close form:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU (t, x) = \\frac{1}{\\sqrt{2\\pi} t} \\int_{-\\infty}^{\\infty} f(y) e^{-\\frac{(x-y)^{2}}{2t}} \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eSteps: recall we ended up at\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U} (t, \\lambda) = \\hat{f}(\\lambda) e^{- t \\frac{\\lambda^{2}}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s call:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{g}(\\lambda) = e^{- t \\frac{\\lambda^{2}}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U} (t, \\lambda) = \\hat{f}(\\lambda) \\hat{g}(\\lambda)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can use \u003ca href=\"/posts/kbhconvolution/#convolution\"\u003econvolution\u003c/a\u003e to figure \\(U(t,x)\\).\u003c/p\u003e\n\u003cp\u003eRecall that the Fourier transform of a Gaussian:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}\\qty(e^{-\\frac{ax^{2}}{2}}) = \\sqrt{\\frac{2\\pi}{a}}e^{-\\frac{\\lambda^{2}}{2a}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s first set:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na = \\frac{1}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich will give us that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ng(x) = \\frac{1}{\\sqrt{2\\pi t} } e^{-\\frac{x^{2}}{2t}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning, with convolution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}^{-1}(\\hat{f} \\hat{g}) = f * g\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"why-does-this-make-sense\"\u003ewhy does this make sense\u003c/h3\u003e\n\u003cp\u003eWe are convolving a Gaussian against \\(f(x)\\). Meaning, at very small \\(t\\) , we are taking a very small window of size \\(1\\) against.\u003c/p\u003e\n\u003ch3 id=\"heavyside-function\"\u003eHeavyside function\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\begin{cases}\n1, x\\geq 0 \\\\\n0, x\u0026lt;0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis gives: if we split the room by \\(x\\). Recall:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU (t, x) = \\frac{1}{\\sqrt{2\\pi} t} \\int_{-\\infty}^{\\infty} f(y) e^{-\\frac{(x-y)^{2}}{2t}} \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven our \\(f\\), this becomes:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU (t, x) = \\frac{1}{\\sqrt{2\\pi} t} \\int_{0}^{\\infty} e^{-\\frac{(x-y)^{2}}{2t}} \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf we change variables:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\frac{(x-y)^{2}}{2t} - \\qty( \\frac{x}{\\sqrt{2t}} - \\frac{y}{\\sqrt{2t}})^{2}\n\\end{align}\u003c/p\u003e\n\u003cp\u003ewhich means:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nz = \\frac{y}{2\\sqrt{t}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{1}{\\sqrt{\\pi}} \\int_{0}^{\\infty} e^{^{-\\qty(\\frac{x}{\\sqrt{2t}} - z)^{2}}} \\dd{z}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand we will also apply:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw = z - \\frac{x}{\\sqrt{2t}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich will give:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{1}{\\sqrt{\\pi}} \\int_{-\\frac{x}{\\sqrt{2t}}}^{\\infty} e^{-w^{2}} \\dd{w}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enotice, as \\(x\\) increases, we are integrating more of a Gaussian, which will be exceedingly close to \\(1\\); as \\(x\\) decreases, we\u0026rsquo;ll get closer to \\(0\\). And also, \\(t\\) smoothed \\(x\\) out, which means as \\(t\\) increases the interface between \\(0\\) and \\(1\\) becomes smoother.\u003c/p\u003e\n\u003ch2 id=\"erf\"\u003eerf\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#erf\"\u003eerf\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"convolution--kbhconvolution-dot-md\"\u003e\u003ca href=\"/posts/kbhconvolution/#convolution\"\u003econvolution\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhconvolution/#convolution\"\u003econvolution\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_mar112024/","tags":null,"title":"SU-MATH53 MAR112024"},{"categories":null,"contents":"This is the staging file for the midterm sheet, which I don\u0026rsquo;t usually publicise.\n","html":"\u003cp\u003eThis is the staging file for the midterm sheet, which I don\u0026rsquo;t usually publicise.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_midterm_sheet/","tags":null,"title":"SU-MATH53 Midterm Sheet"},{"categories":null,"contents":"L-Periodic Functions So, we have:\n\\begin{equation} f(x+L) = f(x) \\end{equation}\nThe integral is equivalent for any:\n\\begin{equation} \\int_{a}^{a+L} f(x) \\end{equation}\nfor any \\(a\\).\nHeat Equation Recipe are we on a finite interval? then, decompose into product-type solution \\(A(t)B(x)\\) and solve. are we not? Fourier transform on the space variable and solve. What if \\(\\lambda \\in \\mathbb{C} \\backslash \\mathbb{R}\\) Shush.\nWhy can we guess \\(A(t)B(x)\\) Because we were able to find solutions. Believe that the solution set spans.\nFourier Transform on Three-Variable Expressions We have better Fourier transforms on n-space rather than on a line. Use those.\nExistence and Uniqueness + Superposition ","html":"\u003ch2 id=\"l-periodic-functions\"\u003eL-Periodic Functions\u003c/h2\u003e\n\u003cp\u003eSo, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x+L) = f(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe integral is equivalent for any:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{a}^{a+L} f(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor any \\(a\\).\u003c/p\u003e\n\u003ch2 id=\"heat-equation-recipe\"\u003eHeat Equation Recipe\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eare we on a finite interval? then, decompose into product-type solution \\(A(t)B(x)\\) and solve.\u003c/li\u003e\n\u003cli\u003eare we not? Fourier transform on the space variable and solve.\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"what-if-lambda-in-mathbb-c-backslash-mathbb-r\"\u003eWhat if \\(\\lambda \\in \\mathbb{C} \\backslash \\mathbb{R}\\)\u003c/h3\u003e\n\u003cp\u003eShush.\u003c/p\u003e\n\u003ch3 id=\"why-can-we-guess-a--t--b--x\"\u003eWhy can we guess \\(A(t)B(x)\\)\u003c/h3\u003e\n\u003cp\u003eBecause we were able to find solutions. Believe that the solution set spans.\u003c/p\u003e\n\u003ch2 id=\"fourier-transform-on-three-variable-expressions\"\u003eFourier Transform on Three-Variable Expressions\u003c/h2\u003e\n\u003cp\u003eWe have better Fourier transforms on n-space rather than on a line. Use those.\u003c/p\u003e\n\u003ch2 id=\"existence-and-uniqueness-plus-superposition\"\u003eExistence and Uniqueness + Superposition\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_problem_session/","tags":null,"title":"SU-MATH53 Problem Session"},{"categories":null,"contents":"a\n","html":"\u003cp\u003ea\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsubgroup/","tags":null,"title":"subgroup"},{"categories":null,"contents":"A subspace is a vector space which is a subset of a vector space, using the same addition and scalar multiplication operations. Intuitively, a subspace of \\(\\mathbb{R}^{2}\\) are all the lines through the origin as well as \\(\\{0\\}\\); a subspace of \\(\\mathbb{R}^{3}\\) are all the planes through the origin as well as \\(\\{0\\}\\), etc. etc.\nconstituents vector space \\(V\\) A subset \\(U \\subset V\\) which is itself a vector space requirements You check if \\(U\\) is a subspace of \\(V\\) by checking IFF the following three conditions:\nadditive identity: \\(0 \\in U\\) closed under the same addition as in \\(V\\): \\(u,w \\in U: u+w \\in U\\) closed under scalar multiplication as in \\(V\\): \\(a \\in \\mathbb{F}\\) and \\(u \\in U\\) means \\(au \\in U\\) Yes, by only checking three you can prove everything else.\nadditional information simplified check for subspace commutativity, associativity, distributivity These properties are inherited from \\(V\\) as they hold for every element in \\(V\\) so they will hold for \\(U \\subset V\\).\nadditive inverse Because scalar multiplication is defined, and we proved in Axler 1.B that \\(-1v=-v\\) (proof: \\(v+(-1)v = (1+(-1))v = 0v = 0\\)).\nmultiplicative identity Its still \\(1\\).\n\\(\\blacksquare\\)\nfinite-dimensional subspaces Every subspace of a finite-dimensional vector space is a finite-dimensional vector space.\nWe prove this result again via induction.\nbase case If \\(U=\\{0\\}\\), we know \\(U\\) is finite-dimensional and are done. If not, take some \\(v_1 \\in U\\) and create a list with only \\(v_1\\) thus far; the invariant here is that the list is linearly independent as we see that a list containing this one element as indeed linearly independent.\ncase \\(j\\) If the linearly independent list we created \\(v_1, \\dots v_{j-1}\\) spans \\(U\\), we are done. We have created a finite list which spans \\(U\\), making \\(U\\) finite-dimensional.\nIf not, that means that we can pick some \\(u \\in U\\) that cannot be written as a linear combination of the invariantly linearly independent vectors \\(v_1, \\dots v_{j-1}\\). We append \\(u\\) to the list, naming it \\(v_{j}\\). As \\(v_{j}\\) cannot be written as a linear combination of the original list, appending it to the list doesn\u0026rsquo;t make the list dependent. This means that the list is still linearly independent.\ninduction Therefore, we have constructed a list of increasing length that is linearly independent. By the fact that length of linearly-independent list \\(\\leq\\) length of spanning list, and the fact that the spanning list of \\(V\\) has finite length (it is given that \\(V\\) is a finite-dimensional vector space), the increasingly longer linearly independent list\u0026mdash;building upwards to eventually span \\(U\\) in finite length.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e which is a subset of a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e, using the same \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e and \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e operations. Intuitively, a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of \\(\\mathbb{R}^{2}\\) are all the lines through the origin as well as \\(\\{0\\}\\); a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of \\(\\mathbb{R}^{3}\\) are all the planes through the origin as well as \\(\\{0\\}\\), etc. etc.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e \\(V\\)\u003c/li\u003e\n\u003cli\u003eA subset \\(U \\subset V\\) which is itself a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eYou check if \\(U\\) is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of \\(V\\) by checking \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e the following three conditions:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e: \\(0 \\in U\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e under the same \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e as in \\(V\\): \\(u,w \\in U: u+w \\in U\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e under \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e as in \\(V\\): \\(a \\in \\mathbb{F}\\) and \\(u \\in U\\) means \\(au \\in U\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eYes, by only checking three you can prove everything else.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"simplified-check-for-subspace\"\u003esimplified check for subspace\u003c/h3\u003e\n\u003ch4 id=\"commutativity--kbhcommutivity-dot-md--associativity--kbhassociative-dot-md--distributivity--kbhdistributivity-dot-md\"\u003e\u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e, \u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e, \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eThese properties are inherited from \\(V\\) as they hold for every element in \\(V\\) so they will hold for \\(U \\subset V\\).\u003c/p\u003e\n\u003ch4 id=\"additive-inverse--kbhinverses-dot-md\"\u003e\u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eBecause \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e is defined, and we proved in \u003ca href=\"/posts/kbhaxler_1_b/\"\u003eAxler 1.B\u003c/a\u003e that \\(-1v=-v\\) (proof: \\(v+(-1)v = (1+(-1))v = 0v = 0\\)).\u003c/p\u003e\n\u003ch4 id=\"multiplicative-identity--kbhmultiplicative-identity-dot-md\"\u003e\u003ca href=\"/posts/kbhmultiplicative_identity/\"\u003emultiplicative identity\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eIts still \\(1\\).\u003c/p\u003e\n\u003cp\u003e\\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"finite-dimensional-subspaces\"\u003efinite-dimensional subspaces\u003c/h3\u003e\n\u003cp\u003eEvery \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of a \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e is a \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eWe prove this result again via induction.\u003c/p\u003e\n\u003ch4 id=\"base-case\"\u003ebase case\u003c/h4\u003e\n\u003cp\u003eIf \\(U=\\{0\\}\\), we know \\(U\\) is \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e and are done. If not, take some \\(v_1 \\in U\\) and create a list with only \\(v_1\\) thus far; the invariant here is that the list is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e as we see that a list containing this one element as indeed \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e.\u003c/p\u003e\n\u003ch4 id=\"case-j\"\u003ecase \\(j\\)\u003c/h4\u003e\n\u003cp\u003eIf the \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list we created \\(v_1, \\dots v_{j-1}\\) \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(U\\), we are done. We have created a finite list which \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(U\\), making \\(U\\) \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIf not, that means that we can pick some \\(u \\in U\\) that cannot be written as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of the invariantly \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e vectors \\(v_1, \\dots v_{j-1}\\). We append \\(u\\) to the list, naming it \\(v_{j}\\). As \\(v_{j}\\) cannot be written as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of the original list, appending it to the list doesn\u0026rsquo;t make the list dependent. This means that the list is still \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e.\u003c/p\u003e\n\u003ch4 id=\"induction\"\u003einduction\u003c/h4\u003e\n\u003cp\u003eTherefore, we have constructed a list of increasing length that is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e. By the fact that \u003ca href=\"/posts/kbhlinear_independence/#length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003elength of linearly-independent list \\(\\leq\\) length of spanning list\u003c/a\u003e, and the fact that the \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list of \\(V\\) has finite length (it is given that \\(V\\) is a \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e), the increasingly longer \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list\u0026mdash;building upwards to eventually \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e \\(U\\) in finite length.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsubspace/","tags":null,"title":"subspace"},{"categories":null,"contents":"the pocket at which the ligand binds to the enzyme\n","html":"\u003cp\u003ethe pocket at which the ligand binds to the enzyme\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsubtrait_envelope/","tags":null,"title":"substrate envelope"},{"categories":null,"contents":"The sum of subsets is the definition of addition upon two subsets.\nApparently, the unions of subsets are almost never subspaces (they don\u0026rsquo;t produce linearity?) Therefore, we like to work with sum of subsets more.\nRemember this has arbitrarily many things!! as a part of the content. When defining, remember to open that possibility.\nconstituents Sub-sets of \\(V\\) named \\(U_1, U_2, \\dots, U_{m}\\)\nrequirements The sum of subsets \\(U_1, \\dots, U_{m}\\) is defined as:\n\\begin{equation} U_1, \\dots, U_{m} = \\{u_1+\\dots+u_{m}: u_1\\in U_1, \\dots, u_{m} \\in U_{m}\\} \\end{equation}\n\u0026ldquo;all elements formed by taking one element from each and add it.\u0026rdquo;\nadditional information sum of subspaces is the smallest subspace with both subspaces Suppose \\(U_1, \\dots U_{m}\\) are subspaces of \\(V\\), then \\(U_1+\\dots +U_{m}\\) is the smallest subspace of \\(V\\) containing \\(U_1, \\dots, U_{m}\\).\nProof:\nIs a subspace\u0026mdash;\nclearly \\(0\\) is in the sum. (taking \\(0\\) from each subspace and adding) addition and scalar multiplication inherits (closed in each subspace, then, reapplying definition of sum of subsets) Smallest containing subspace\u0026mdash;\nBecause a subspace is closed under addition, if a subspace contains \\(U_{1}, \\dots, U_{m}\\) you can always add each of the constituent elements manually to form every \\(U_1+\\dots+U_{m}\\).\nConversely, the subspace \\(U_1+\\dots +U_{m}\\) should contain \\(U_1, \\dots, U_{m}\\) by simply setting the coefficients except for the one you are interested in to \\(0\\).\nTherefore, as both subsets contain each other; they are equivalent.\ndimension of sums Let there be two finite-dimensional subspaces: \\(U_1\\) and \\(U_2\\). Then:\n\\begin{equation} \\dim(U_1+U_2)=\\dim U_1+\\dim U_{2} - \\dim(U_1 \\cap U_2) \\end{equation}\nProof:\nlet us form an basis of \\(U_1 \\cap U_{2}\\): \\(u_1, \\dots u_{m}\\); this indicates to us that \\(\\dim(U_1 \\cap U_{2}) = m\\). Being a basis of \\(U_1 \\cap U_{2}\\), it is linearly independent in \\(U_1\\) (which forms a part of the intersection.\nAs any linearly independent list (in this case, in \\(U_1\\)) can be expanded into a basis of \\(U_1\\). Let\u0026rsquo;s say by some vectors \\(v_1 \\dots v_{j}\\). Therefore, we have that:\nThe new basis is \\(u_1, \\dots u_{m}, v_1, \\dots v_{m}\\), and so:\n\\begin{equation} \\dim U_1 = m+j \\end{equation}\nBy the same token, let\u0026rsquo;s just say some \\(w_1, \\dots w_{k}\\) can be used to extend \\(u_1, \\dots u_{m}\\) into a basis of \\(U_2\\) (as \\(u_1, \\dots u_{m}\\) is also an linearly independent list in \\(U_2\\)). So:\n\\begin{equation} \\dim U_{2} = m+k \\end{equation}\nWe desire that \\(\\dim(U_1+U_2)=\\dim U_1+\\dim U_{2} - \\dim(U_1 \\cap U_2)\\). Having constructed all three of the elements, we desire to find a list that is length \\((m+j)+(m+k)-m = m+j+k\\) that forms a basis of \\(U_1+U_2\\), which will complete the proof.\nConveniently, \\(u_1, \\dots u_{m}, v_1, \\dots v_{j}, w_1, \\dots w_{k}\\) nicely is list of length \\(m+j+k\\). Therefore, we desire that that list forms a basis of \\(U_1+U_{2}\\).\nAs pairwise in this list are the basis of \\(U_1\\) and \\(U_2\\), this list can span both \\(U_1\\) and \\(U_2\\) (just zero out the \u0026ldquo;other\u0026rdquo; sublist\u0026mdash;zero \\(w\\) if desiring a basis of \\(U_1\\), \\(v\\) if \\(U_2\\) \u0026mdash;and you have a basis of each space. As \\(U_1+U_2\\) requires plucking a member from each and adding, as this list spans \\(U_1\\) and \\(U_2\\) separately (again, it forms the basis of the each space), we can just use this list to construct individually each component of \\(U_1+U_2\\) then adding it together. Hence, that long combo list spans \\(U_1+U_2\\).\nThe only thing left is to show that the giant list there is linearly independent. Let\u0026rsquo;s construct:\n\\begin{equation} a_1u_1+ \\dots + a_{m}u_{m} + b_1v_1 + \\dots + b_{j}v_{j} + c_1w_1 + \\dots + c_{k}w_{k} = 0 \\end{equation}\nto demonstrate linearly independence,\nMoving the \\(w\\) to the right, we have that:\n\\begin{equation} a_1u_1+ \\dots + a_{m}u_{m} + b_1v_1 + \\dots + b_{j}v_{j} =-(c_1w_1 + \\dots + c_{k}w_{k}) \\end{equation}\nRecall that \\(u_1 \\dots v_{j}\\) are all vectors in \\(U_1\\). Having written \\(-(c_1w_1 + \\dots + c_{k}w_{k})\\) as a linear combination thereof, we say that \\(-(c_1w_1 + \\dots + c_{k}w_{k}) \\in U_1\\) due to closure. But also, \\(w_1 \\dots w_{k} \\in U_2\\) as they form a basis of \\(U_2\\). Hence, \\(-(c_1w_1 + \\dots + c_{k}w_{k}) \\in U_2\\). So, \\(-(c_1w_1 + \\dots + c_{k}w_{k}) \\in U_1 \\cap U_2\\).\nAnd we said that \\(u_1, \\dots u_{m}\\) are a basis for \\(U_1 \\cap U_{2}\\). Therefore, we can write the \\(c_{i}\\) sums as a linear combination of $u$s:\n\\begin{equation} d_1u_1 \\dots + \\dots + d_{m}u_{m} = (c_1w_1 + \\dots + c_{k}w_{k}) \\end{equation}\nNow, moving the right to the left again:\n\\begin{equation} d_1u_1 \\dots + \\dots + d_{m}u_{m} - (c_1w_1 + \\dots + c_{k}w_{k}) = 0 \\end{equation}\nWe have established before that \\(u_1 \\dots w_{k}\\) is a linearly independent list (it is the basis of \\(U_2\\).) So, to write \\(0\\), \\(d_1 = \\dots = c_{k} = 0\\).\nSubstituting back to the original:\n\\begin{align} a_1u_1+ \\dots + a_{m}u_{m} + b_1v_1 + \\dots + b_{j}v_{j} \u0026amp;=-(c_1w_1 + \\dots + c_{k}w_{k}) \\\\ \u0026amp;= 0 \\end{align}\nrecall \\(u_1 \\dots v_{j}\\) is the basis of \\(U_1\\), meaning they are linearly independent. The above expression makes \\(a_1 = \\dots b_{j} = 0\\). Having shown that, to write \\(0\\) via \\(u, v, \\dots w\\) requires all scalars \\(a,b,c=0\\), the list is linearly independent.\nHaving shown that the list of \\(u_1, \\dots v_1, \\dots w_1 \\dots w_{k}\\) spans \\(U_1+U_2\\) and is linearly independent within it, it is a basis.\nIt does indeed have length \\(m+j+k\\), completing the proof. \\(\\blacksquare\\)\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e is the definition of \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e upon two subsets.\u003c/p\u003e\n\u003cp\u003eApparently, the unions of subsets are almost never \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es (they don\u0026rsquo;t produce linearity?) Therefore, we like to work with \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e more.\u003c/p\u003e\n\u003cp\u003eRemember this has \u003cstrong\u003e\u003cstrong\u003earbitrarily many things!!\u003c/strong\u003e\u003c/strong\u003e as a part of the content. When defining, remember to open that possibility.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003eSub-\u003cstrong\u003e\u003cstrong\u003esets\u003c/strong\u003e\u003c/strong\u003e of \\(V\\) named \\(U_1, U_2, \\dots, U_{m}\\)\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e \\(U_1, \\dots, U_{m}\\) is defined as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1, \\dots, U_{m} = \\{u_1+\\dots+u_{m}: u_1\\in U_1, \\dots, u_{m} \\in U_{m}\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;all elements formed by taking one element from each and add it.\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"sum-of-subspaces-is-the-smallest-subspace-with-both-subspaces\"\u003esum of subspaces is the smallest subspace with both subspaces\u003c/h3\u003e\n\u003cp\u003eSuppose \\(U_1, \\dots U_{m}\\) are \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es of \\(V\\), then \\(U_1+\\dots +U_{m}\\) is the smallest subspace of \\(V\\) containing \\(U_1, \\dots, U_{m}\\).\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eIs a subspace\u0026mdash;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eclearly \\(0\\) is in the sum. (taking \\(0\\) from each subspace and adding)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e and \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e inherits (closed in each subspace, then, reapplying definition of \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSmallest containing subspace\u0026mdash;\u003c/p\u003e\n\u003cp\u003eBecause a subspace is \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e under \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e, if a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e contains \\(U_{1}, \\dots, U_{m}\\) you can always add each of the constituent elements manually to form every \\(U_1+\\dots+U_{m}\\).\u003c/p\u003e\n\u003cp\u003eConversely, the \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e \\(U_1+\\dots +U_{m}\\) should contain \\(U_1, \\dots, U_{m}\\) by simply setting the coefficients except for the one you are interested in to \\(0\\).\u003c/p\u003e\n\u003cp\u003eTherefore, as both subsets contain each other; they are equivalent.\u003c/p\u003e\n\u003ch3 id=\"dimension--kbhdimension-dot-md--of-sums\"\u003e\u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of sums\u003c/h3\u003e\n\u003cp\u003eLet there be two \u003ca href=\"/posts/kbhsubspace/#finite-dimensional-subspaces\"\u003efinite-dimensional subspaces\u003c/a\u003e: \\(U_1\\) and \\(U_2\\). Then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim(U_1+U_2)=\\dim U_1+\\dim U_{2} - \\dim(U_1 \\cap U_2)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003elet us form an \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U_1 \\cap U_{2}\\): \\(u_1, \\dots u_{m}\\); this indicates to us that \\(\\dim(U_1 \\cap U_{2}) = m\\). Being a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U_1 \\cap U_{2}\\), it is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e in \\(U_1\\) (which forms a part of the intersection.\u003c/p\u003e\n\u003cp\u003eAs any \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list (in this case, in \\(U_1\\)) can be \u003ca href=\"/posts/kbhbasis/#a-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent-list-expends-to-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003eexpanded into a basis\u003c/a\u003e of \\(U_1\\). Let\u0026rsquo;s say by some vectors \\(v_1 \\dots v_{j}\\). Therefore, we have that:\u003c/p\u003e\n\u003cp\u003eThe new basis is \\(u_1, \\dots u_{m}, v_1, \\dots v_{m}\\), and so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim U_1 = m+j\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy the same token, let\u0026rsquo;s just say some \\(w_1, \\dots w_{k}\\) can be used to extend \\(u_1, \\dots u_{m}\\) into a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U_2\\) (as \\(u_1, \\dots u_{m}\\) is \u003cem\u003ealso\u003c/em\u003e an \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(U_2\\)). So:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim U_{2} = m+k\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe desire that \\(\\dim(U_1+U_2)=\\dim U_1+\\dim U_{2} - \\dim(U_1 \\cap U_2)\\). Having constructed all three of the elements, we desire to find a list that is length \\((m+j)+(m+k)-m = m+j+k\\) that forms a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U_1+U_2\\), which will complete the proof.\u003c/p\u003e\n\u003cp\u003eConveniently, \\(u_1, \\dots u_{m}, v_1, \\dots v_{j}, w_1, \\dots w_{k}\\) nicely is list of length \\(m+j+k\\). Therefore, we desire that that list forms a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U_1+U_{2}\\).\u003c/p\u003e\n\u003cp\u003eAs pairwise in this list are the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U_1\\) and \\(U_2\\), this list can \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e both \\(U_1\\) and \\(U_2\\) (just \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e out the \u0026ldquo;other\u0026rdquo; sublist\u0026mdash;zero \\(w\\) if desiring a basis of \\(U_1\\), \\(v\\) if \\(U_2\\) \u0026mdash;and you have a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of each space. As \\(U_1+U_2\\) requires plucking a member from each and adding, as this \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(U_1\\) and \\(U_2\\) separately (again, it forms the basis of the each space), we can just use this list to construct individually each component of \\(U_1+U_2\\) then adding it together. Hence, that long combo list \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(U_1+U_2\\).\u003c/p\u003e\n\u003cp\u003eThe only thing left is to show that the giant list there is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e. Let\u0026rsquo;s construct:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_1u_1+ \\dots + a_{m}u_{m} + b_1v_1 + \\dots + b_{j}v_{j} + c_1w_1 + \\dots + c_{k}w_{k} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eto demonstrate \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independence\u003c/a\u003e,\u003c/p\u003e\n\u003cp\u003eMoving the \\(w\\) to the right, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_1u_1+ \\dots + a_{m}u_{m} + b_1v_1 + \\dots + b_{j}v_{j} =-(c_1w_1 + \\dots + c_{k}w_{k})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that \\(u_1 \\dots v_{j}\\) are all \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es in \\(U_1\\). Having written \\(-(c_1w_1 + \\dots + c_{k}w_{k})\\) as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e thereof, we say that \\(-(c_1w_1 + \\dots + c_{k}w_{k}) \\in U_1\\) due to closure. But also, \\(w_1 \\dots w_{k} \\in U_2\\) as they form a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U_2\\). Hence, \\(-(c_1w_1 + \\dots + c_{k}w_{k}) \\in U_2\\). So, \\(-(c_1w_1 + \\dots + c_{k}w_{k}) \\in U_1 \\cap U_2\\).\u003c/p\u003e\n\u003cp\u003eAnd we said that \\(u_1, \\dots u_{m}\\) are a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e for \\(U_1 \\cap U_{2}\\). Therefore, we can write the \\(c_{i}\\) sums as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of $u$s:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nd_1u_1 \\dots + \\dots + d_{m}u_{m} = (c_1w_1 + \\dots + c_{k}w_{k})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, moving the right to the left again:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nd_1u_1 \\dots + \\dots + d_{m}u_{m} - (c_1w_1 + \\dots + c_{k}w_{k}) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have established before that \\(u_1 \\dots w_{k}\\) is a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list (it is the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U_2\\).) So, to write \\(0\\), \\(d_1 = \\dots = c_{k} = 0\\).\u003c/p\u003e\n\u003cp\u003eSubstituting back to the original:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\na_1u_1+ \\dots + a_{m}u_{m} + b_1v_1 + \\dots + b_{j}v_{j} \u0026amp;=-(c_1w_1 + \\dots + c_{k}w_{k}) \\\\\n\u0026amp;= 0\n\\end{align}\u003c/p\u003e\n\u003cp\u003erecall \\(u_1 \\dots v_{j}\\) is the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U_1\\), meaning they are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e. The above expression makes \\(a_1 = \\dots b_{j} = 0\\). Having shown that, to write \\(0\\) via \\(u, v, \\dots w\\) requires all scalars \\(a,b,c=0\\), the list is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eHaving shown that the list of \\(u_1, \\dots v_1, \\dots w_1 \\dots w_{k}\\) \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(U_1+U_2\\) and is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e within it, it is a basis.\u003c/p\u003e\n\u003cp\u003eIt does indeed have length \\(m+j+k\\), completing the proof. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsum_of_subsets/","tags":null,"title":"sum of subsets"},{"categories":null,"contents":"Consider \u0026ldquo;what\u0026rsquo;s the variable representing the sum of the result of 2 dice?\u0026rdquo;\n\\begin{equation} Y = \\sum_{i=1}^{2} X \\end{equation}\nwhere \\(X\\) is a random variable representing the result of once dice.\n","html":"\u003cp\u003eConsider \u0026ldquo;what\u0026rsquo;s the variable representing the sum of the result of 2 dice?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nY = \\sum_{i=1}^{2} X\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(X\\) is a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e representing the result of once dice.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsum_of_two_dice/","tags":null,"title":"Sum of Two Dice, Random Variable Edition"},{"categories":null,"contents":"Suppose \\(v \\in V\\), and \\(U \\subset V\\). Then, \\(v+U\\) is the subset (not a subspace, obviously):\n\\begin{equation} v + U = \\{v+u : u \\in U\\} \\end{equation}\n","html":"\u003cp\u003eSuppose \\(v \\in V\\), and \\(U \\subset V\\). Then, \\(v+U\\) is the \u003cem\u003esubset\u003c/em\u003e (not a subspace, obviously):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv + U = \\{v+u : u \\in U\\}\n\\end{equation}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-01-20_22-01-31_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhsum_of_vector_and_subspace/","tags":null,"title":"sum of vector and subspace"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhspersite/","tags":null,"title":"supersite"},{"categories":null,"contents":"Supervised learning (also known as behavioral cloning) if the agent is learning what to do in an observe-act cycle) is a type of decision making method.\nprovide the agent with some examples use an automated learning algorithm to generalize from the example This is good for typically representative situations, but if you are throwing an agent into a completely unfamiliar situation, supervised learning cannot perform better.\nDisadvantages the labeled data is finite limited by the quality of performance in the training data interpolation between states are finite ","html":"\u003cp\u003eSupervised learning (also known as \u003ca href=\"/posts/kbhsupervised_learning/\"\u003ebehavioral cloning\u003c/a\u003e) if the agent is learning what to do in an \u003ca href=\"/posts/kbhobserve_act_cycle/\"\u003eobserve-act cycle\u003c/a\u003e) is a type of \u003ca href=\"/posts/kbhdecision_making/\"\u003edecision making\u003c/a\u003e method.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eprovide the \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e with some examples\u003c/li\u003e\n\u003cli\u003euse an automated learning algorithm to generalize from the example\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis is good for typically representative situations, but if you are throwing an \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e into a completely unfamiliar situation, supervised learning cannot perform better.\u003c/p\u003e\n\u003ch2 id=\"disadvantages\"\u003eDisadvantages\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ethe labeled data is finite\u003c/li\u003e\n\u003cli\u003elimited by the quality of performance in the training data\u003c/li\u003e\n\u003cli\u003einterpolation between states are finite\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsupervised_learning/","tags":null,"title":"supervised learning"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsupport/","tags":null,"title":"support"},{"categories":null,"contents":"A function \\(T: V\\to W\\) is surjective if its range equals its codomain \\(W\\). \u0026ldquo;onto\u0026rdquo;\n\u0026ldquo;For any possible output, \\(w \\in W\\) for \\(T \\in \\mathcal{L}(V,W)\\), there is at LEAST one input \\(T\\) that maps \\(Tv \\to w\\). \u0026quot;\n\\begin{equation} \\forall w \\in W, \\exists v \\in V:Tv=W \\end{equation}\nmap to bigger space is not surjective See map to bigger space is not surjective\n","html":"\u003cp\u003eA function \\(T: V\\to W\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e if its \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e equals its codomain \\(W\\). \u0026ldquo;onto\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;For any possible output, \\(w \\in W\\) for \\(T \\in \\mathcal{L}(V,W)\\), there is at LEAST one input \\(T\\) that maps \\(Tv \\to w\\). \u0026quot;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\forall w \\in W, \\exists v \\in V:Tv=W\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"map-to-bigger-space-is-not-surjective--kbhlinear-map-dot-md\"\u003e\u003ca href=\"/posts/kbhlinear_map/#map-to-bigger-space-is-not-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjective\"\u003emap to bigger space is not surjective\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhlinear_map/#map-to-bigger-space-is-not-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjective\"\u003emap to bigger space is not surjective\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsurjectivity/","tags":null,"title":"surjectivity"},{"categories":null,"contents":"syscalls are public functions that allow user land operations to access system-level services (such as reading a sector) which otherwise is locked in kernel mode because they require special privileges.\nThese functions are called completely isolated to another function: 1) private stack frame 2) private memory, etc.\nopen, close, read, write\nkernel mode kernel mode allows superuser function access such as reading sectors, etc. which would be dangerous if public.\nfile open int open(const char *pathname, int flags, mode_t mode); Flags are a bitwise OR operations: you have to open with O_RDONLY (read only), O_WRONLY (write only), or O_RDWR (both read and write). This returns \\(-1\\) if the reading fails.\nOther flags:\nO_TRUNC (truncate file) O_CREAT (creating a file if not exist), which will require a mode_t mode parameter to set the permission O_EXCL (file must not exist) close int close(int fd); ssize_t is a type that is a size_t which accepts -1.\nread get a block of a file\nssize_t read(int fd, void *buf, size_t count); Returns the number of bytes actually read (for instance, if count is too large, it will only return the number of bytes read). \\(0\\) if EOF, \\(-1\\) on error.\nread my nat read all the bytes the OS keeps track of where you are reading from write writes a block of a file\nssize_t write(int fd, void *buf, size_t count); Returns the number of bytes actually read (for instance, if count is too large, it will only return the number of bytes read). \\(0\\) if EOF, \\(-1\\) on error.\nfile descriptor After we open a file, file descriptors, which are ints, which track where the reading head is in the file; so you can have multiple descriptors each with a different location\nfile descriptor is used to model access to a variety of resources:\nnetwork connections printers/services and special file descriptors:\n0: STDIN_FILENO \u0026mdash; input from the terminal 1: STDOUT_FILENO \u0026mdash; output to the terminal 2: STDERR_FILENO \u0026mdash; error to the terminal ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsyscalls/\"\u003esyscalls\u003c/a\u003e are public functions that allow user land operations to access system-level services (such as reading a sector) which otherwise is locked in \u003ca href=\"#kernel-mode\"\u003ekernel mode\u003c/a\u003e because they require special privileges.\u003c/p\u003e\n\u003cp\u003eThese functions are called completely isolated to another function: 1) private stack frame 2) private memory, etc.\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003eopen\u003c/code\u003e, \u003ccode\u003eclose\u003c/code\u003e, \u003ccode\u003eread\u003c/code\u003e, \u003ccode\u003ewrite\u003c/code\u003e\u003c/p\u003e\n\u003ch2 id=\"kernel-mode\"\u003ekernel mode\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#kernel-mode\"\u003ekernel mode\u003c/a\u003e allows superuser function access such as reading \u003ca href=\"/posts/kbhfilesystem/#disk\"\u003esector\u003c/a\u003es, etc. which would be dangerous if public.\u003c/p\u003e\n\u003ch2 id=\"file\"\u003efile\u003c/h2\u003e\n\u003ch3 id=\"open\"\u003eopen\u003c/h3\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eopen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003econst\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epathname\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eflags\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003emode_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFlags are a bitwise OR operations: you have to open with \u003ccode\u003eO_RDONLY\u003c/code\u003e (read only), \u003ccode\u003eO_WRONLY\u003c/code\u003e (write only), or \u003ccode\u003eO_RDWR\u003c/code\u003e (both read and write). This returns \\(-1\\) if the reading fails.\u003c/p\u003e\n\u003cp\u003eOther flags:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003eO_TRUNC\u003c/code\u003e (truncate file)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003eO_CREAT\u003c/code\u003e (creating a file if not exist), which will require a \u003ccode\u003emode_t mode\u003c/code\u003e parameter to set the permission\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003eO_EXCL\u003c/code\u003e (file must not exist)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"close\"\u003eclose\u003c/h3\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003ccode\u003essize_t\u003c/code\u003e is a type that is a \u003ccode\u003esize_t\u003c/code\u003e which accepts \u003ccode\u003e-1\u003c/code\u003e.\u003c/p\u003e\n\u003ch3 id=\"read\"\u003eread\u003c/h3\u003e\n\u003cp\u003eget a block of a file\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003essize_t\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebuf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecount\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eReturns the number of bytes actually read (for instance, if count is too large, it will only return the number of bytes read). \\(0\\) if EOF, \\(-1\\) on error.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eread my nat read all the bytes\u003c/li\u003e\n\u003cli\u003ethe OS keeps track of where you are reading from\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"write\"\u003ewrite\u003c/h3\u003e\n\u003cp\u003ewrites a block of a file\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003essize_t\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ewrite\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebuf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecount\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eReturns the number of bytes actually read (for instance, if count is too large, it will only return the number of bytes read). \\(0\\) if EOF, \\(-1\\) on error.\u003c/p\u003e\n\u003ch2 id=\"file-descriptor\"\u003efile descriptor\u003c/h2\u003e\n\u003cp\u003eAfter we open a file, file descriptors, which are ints, which track where the reading head is in the file; so you can have multiple descriptors each with a different location\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#file-descriptor\"\u003efile descriptor\u003c/a\u003e is used to model access to a variety of resources:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003enetwork connections\u003c/li\u003e\n\u003cli\u003eprinters/services\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eand special file descriptors:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e0: \u003ccode\u003eSTDIN_FILENO\u003c/code\u003e \u0026mdash; input from the terminal\u003c/li\u003e\n\u003cli\u003e1: \u003ccode\u003eSTDOUT_FILENO\u003c/code\u003e \u0026mdash; output to the terminal\u003c/li\u003e\n\u003cli\u003e2: \u003ccode\u003eSTDERR_FILENO\u003c/code\u003e \u0026mdash; error to the terminal\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsyscalls/","tags":null,"title":"syscalls"},{"categories":null,"contents":"Suppose \\(T \\in \\mathcal{L}(V,W)\\). Define a \\(\\widetilde{T}: V / (null\\ T) \\to W\\) such that:\n\\begin{align} \\widetilde{T}(v+ null\\ T) = Tv \\end{align}\nso \\(\\widetilde{T}\\) is the map that recovers the mapped result from an affine subset from the null space of the map.\n\\(\\widetilde{T}\\) is well defined Same problem as that with operations on quotient space. We need to make sure that \\(\\widetilde{T}\\) behave the same way on distinct but equivalent representations of the same affine subset.\nSuppose \\(u,v \\in V\\) such that \\(u+null\\ T = v+null\\ T\\). Because two affine subsets parallel to \\(U\\) are either equal or disjoint, we have that \\(u-v \\in null\\ T\\). This means that \\(Tu-Tv = 0 \\implies Tu= Tv\\). So applying \\(\\widetilde{T}\\) on equivalent representations of the same affine subset would yield the same result, as desired. \\(\\blacksquare\\)\nproperties of \\(\\widetilde{T}\\) it is a linear map TBD proof. Basically just like do it inheriting operations from the operations on quotient space.\nit is injective We desire here that \\(null\\ \\widetilde{T} = \\{0\\}\\) which will tell us that \\(\\widetilde{T}\\) is injective.\nSuppose some \\(v + null\\ T\\) is in the null space of \\(\\widetilde{T}\\). So, we have that:\n\\begin{equation} \\widetilde{T}(v+null\\ T) = Tv = 0 \\end{equation}\nSo, we have that \\(v \\in null\\ T\\). Now, this means that \\(v-0 \\in null\\ T\\). Because two affine subsets parallel to \\(U\\) are either equal or disjoint, \\(v + null\\ T = 0 + null\\ T\\) WLOG \\(\\forall v+null\\ T \\in null\\ \\widetilde{T}\\). This means that \\(null\\ \\widetilde{T}=\\{0\\}\\), as desired.\nits range is equal to the map\u0026rsquo;s range \\begin{equation} range\\ \\widetilde{T} = range\\ T \\end{equation}\nby definition of everything.\n\\(V / null\\ T\\) is isomorphic to \\(range\\ T\\) \u0026hellip;.is this the point of this whole thing?\nShown by the two sub-results above, and that injectivity and surjectivity implies invertability.\n","html":"\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V,W)\\). Define a \\(\\widetilde{T}: V / (null\\ T) \\to W\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\widetilde{T}(v+ null\\ T) = Tv\n\\end{align}\u003c/p\u003e\n\u003cp\u003eso \\(\\widetilde{T}\\) is the map that recovers the mapped result from an \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e from the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of the map.\u003c/p\u003e\n\u003ch2 id=\"widetilde-t-is-well-defined\"\u003e\\(\\widetilde{T}\\) is well defined\u003c/h2\u003e\n\u003cp\u003eSame problem as that with \u003ca href=\"/posts/kbhquotient_space/#operations-on-id-53548f85-b3c8-42ce-81e7-9016ed7bd280-quotient-space\"\u003eoperations on quotient space\u003c/a\u003e. We need to make sure that \\(\\widetilde{T}\\) behave the same way on distinct but equivalent representations of the same \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSuppose \\(u,v \\in V\\) such that \\(u+null\\ T = v+null\\ T\\). Because \u003ca href=\"/posts/kbhparallel_linear_algebra/#two-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-affine-subset-s-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-parallel-to-u-are-either-equal-or-disjoint\"\u003etwo affine subsets parallel to \\(U\\) are either equal or disjoint\u003c/a\u003e, we have that \\(u-v \\in null\\ T\\). This means that \\(Tu-Tv = 0 \\implies Tu= Tv\\). So applying \\(\\widetilde{T}\\) on equivalent representations of the same \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e would yield the same result, as desired. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch2 id=\"properties-of-widetilde-t\"\u003eproperties of \\(\\widetilde{T}\\)\u003c/h2\u003e\n\u003ch3 id=\"it-is-a-linear-map\"\u003eit is a linear map\u003c/h3\u003e\n\u003cp\u003eTBD proof. Basically just like do it inheriting operations from the \u003ca href=\"/posts/kbhquotient_space/#operations-on-id-53548f85-b3c8-42ce-81e7-9016ed7bd280-quotient-space\"\u003eoperations on quotient space\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"it-is-injective\"\u003eit is injective\u003c/h3\u003e\n\u003cp\u003eWe desire here that \\(null\\ \\widetilde{T} = \\{0\\}\\) which will tell us that \\(\\widetilde{T}\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSuppose some \\(v + null\\ T\\) is in the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(\\widetilde{T}\\). So, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\widetilde{T}(v+null\\ T) = Tv = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, we have that \\(v \\in null\\ T\\). Now, this means that \\(v-0 \\in null\\ T\\). Because \u003ca href=\"/posts/kbhparallel_linear_algebra/#two-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-affine-subset-s-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-parallel-to-u-are-either-equal-or-disjoint\"\u003etwo affine subsets parallel to \\(U\\) are either equal or disjoint\u003c/a\u003e, \\(v + null\\ T = 0 + null\\ T\\) WLOG \\(\\forall v+null\\ T \\in null\\ \\widetilde{T}\\). This means that \\(null\\ \\widetilde{T}=\\{0\\}\\), as desired.\u003c/p\u003e\n\u003ch3 id=\"its-range-is-equal-to-the-map-s-range\"\u003eits range is equal to the map\u0026rsquo;s range\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nrange\\ \\widetilde{T} = range\\ T\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eby definition of everything.\u003c/p\u003e\n\u003ch3 id=\"v-null-t-is-isomorphic--kbhisomorphism-dot-md--to-range-t\"\u003e\\(V / null\\ T\\) is \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e to \\(range\\ T\\)\u003c/h3\u003e\n\u003cp\u003e\u0026hellip;.is this the point of this whole thing?\u003c/p\u003e\n\u003cp\u003eShown by the two sub-results above, and that \u003ca href=\"/posts/kbhinvertability/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-and-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-implies-id-ff05739c-6e70-46ba-9d56-0958ef847f57-invertability\"\u003einjectivity and surjectivity implies invertability\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbht_twiddle/","tags":null,"title":"T twiddle"},{"categories":null,"contents":"confidence intervals, a review:\n\\begin{equation} statistic \\pm z^*\\sigma_{statistic} \\end{equation}\nFrequently, we don\u0026rsquo;t have access to \\(\\sigma\\) and hence have to guestimate. When we have a sample means and a proportion, we have ways of guestimating it from the standard error (available on the single-sample section of the AP Statistics formula sheet.)\nHowever, for means, the standard error involves! \\(\\sigma\\). How do we figure \\(\\sigma\\) when we don\u0026rsquo;t know it? We could use \\(s\\), sample standard deviation, but then we have to adjust \\(z^*\\) otherwise we will have underestimation. Hence, we have to use a statistic called \\(t^*\\).\nWe can use t-values to perform t-test, a hypothesis test of means.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhconfidence_interval/\"\u003econfidence interval\u003c/a\u003es, a review:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nstatistic \\pm z^*\\sigma_{statistic}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFrequently, we don\u0026rsquo;t have access to \\(\\sigma\\) and hence have to guestimate. When we have a sample means and a proportion, we have ways of guestimating it from the standard error (available on the single-sample section of the \u003ca href=\"/posts/kbhapstats/\"\u003eAP Statistics\u003c/a\u003e formula sheet.)\u003c/p\u003e\n\u003cp\u003eHowever, for means, the standard error \u003cem\u003einvolves!\u003c/em\u003e \\(\\sigma\\). How do we figure \\(\\sigma\\) when we don\u0026rsquo;t know it? We \u003cem\u003ecould\u003c/em\u003e use \\(s\\), sample standard deviation, but then we have to adjust \\(z^*\\) otherwise we will have underestimation. Hence, we have to use a statistic called \\(t^*\\).\u003c/p\u003e\n\u003cp\u003eWe can use t-values to perform \u003ca href=\"/posts/kbht_test/\"\u003et-test\u003c/a\u003e, a \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis test\u003c/a\u003e of means.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbht_statistics/","tags":null,"title":"t-statistics"},{"categories":null,"contents":"A t-test is a hypothesis test for statistical significance between two sample means based on t-statistics. Before it can be conducted, it must meet the conditions for inference.\nconditions for inference (t-test) To use t-statistics, you have to meet three conditions just like the conditions for inference used in z-score.\nrandom sampling normal (sample size larger than 30, or if original distribution is confirmed as roughly symmetric about the mean) Independence use a z-statistic to find a p-value Begin by finding a \\(t\\) statistic. Remember that:\n\\begin{equation} t = \\frac{statistic-parameter}{std\\ err} \\end{equation}\nIn this case, when we are dealing with sample means, then, we have:\n\\begin{equation} t = \\frac{\\bar{x}-\\mu_0}{\\frac{S_x}{\\sqrt{n}}} \\end{equation}\nwhere \\(\\bar{x}\\) is the measured mean, \\(\\mu_0\\) is the null hypothesis mean, and \\(S_x\\) the sample\u0026rsquo;s sample standard deviation.\nQuick note:\n\\(SE = \\frac{S}{\\sqrt{n}}\\) because the central limit theorem states that sample means for their own distribution, whose variance equals the original variance divided by the sample size. Hence, the standard deviation of the means would be the sample standard deviation divided by the square root of the sample size.\nOnce you have a \\(t\\) value, you look at the test and what its asking (above the mean? below the mean? etc.) and add up the tail probabilities.\npaired vs two-sample tests A paired t-test looks at pairs of values as statistic in itself (i.e. substracts directly, etc.) Think about it as a compound statistic, so you are doing a \\(t\\) test on one value, it just happened to be composed/calculated by a pair of values. (for instance, \u0026ldquo;difference between mother-father glucose levels.\u0026rdquo;)\nA two-staple t-test looks at two independent events and compares them. Hence, they are two random variables and should be manipulated as such.\nt-tests for regression lines regression lines can be imbibed with predictive power and confidence intervals:\n\\begin{equation} m \\pm t^* SE_b \\end{equation}\nwhere \\(m\\) is the slope and \\(SE_b\\) is the standard error of the regression line.\nNote that the degrees of freedom used for \\(t^*\\) is the number of data points, minus two.\nconditions for inference (slops) Acronym: LINEAR\nLinear Independent (observations are independent or \\(\u0026lt;10\\%\\)) Normal (for a given \\(x\\), \\(y\\) is normally distributed) Equal variance (for any given \\(x\\), it should have a roughly equal standard deviation in \\(y\\)) Random ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbht_test/\"\u003et-test\u003c/a\u003e is a \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis test\u003c/a\u003e for statistical significance between two sample means based on \u003ca href=\"/posts/kbht_statistics/\"\u003et-statistics\u003c/a\u003e. Before it can be conducted, it must meet the \u003ca href=\"#conditions-for-inference--t-test\"\u003econditions for inference\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"conditions-for-inference--t-test\"\u003econditions for inference (t-test)\u003c/h2\u003e\n\u003cp\u003eTo use \u003ca href=\"/posts/kbht_statistics/\"\u003et-statistics\u003c/a\u003e, you have to meet three conditions just like the \u003ca href=\"/posts/kbhz_test/#conditions-for-inference--z-test\"\u003econditions for inference\u003c/a\u003e used in \u003ca href=\"/posts/kbhz_score/\"\u003ez-score.\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003erandom sampling\u003c/li\u003e\n\u003cli\u003enormal (sample size larger than 30, or if original distribution is confirmed as roughly symmetric about the mean)\u003c/li\u003e\n\u003cli\u003eIndependence\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"use-a-z-statistic-to-find-a-p-value\"\u003euse a z-statistic to find a p-value\u003c/h2\u003e\n\u003cp\u003eBegin by finding a \\(t\\) statistic. Remember that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nt = \\frac{statistic-parameter}{std\\ err}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn this case, when we are dealing with sample means, then, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nt = \\frac{\\bar{x}-\\mu_0}{\\frac{S_x}{\\sqrt{n}}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\bar{x}\\) is the measured mean, \\(\\mu_0\\) is the \u003ca href=\"/posts/kbhhypothesis_testing/#null-hypothesis\"\u003enull hypothesis\u003c/a\u003e mean, and \\(S_x\\) the sample\u0026rsquo;s sample standard deviation.\u003c/p\u003e\n\u003cp\u003eQuick note:\u003c/p\u003e\n\u003cp\u003e\\(SE = \\frac{S}{\\sqrt{n}}\\) because the \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e states that sample means for their own distribution, whose variance equals the original variance divided by the sample size. Hence, the standard deviation of the means would be the sample standard deviation divided by the square root of the sample size.\u003c/p\u003e\n\u003cp\u003eOnce you have a \\(t\\) value, you look at the test and what its asking (above the mean? below the mean? etc.) and add up the tail probabilities.\u003c/p\u003e\n\u003ch2 id=\"paired-vs-two-sample-tests\"\u003epaired vs two-sample tests\u003c/h2\u003e\n\u003cp\u003eA paired t-test looks at pairs of values as \u003ca href=\"/posts/kbhstastistic/\"\u003estatistic\u003c/a\u003e in itself (i.e. substracts directly, etc.) Think about it as a compound statistic, so you are doing a \\(t\\) test on one value, it just happened to be composed/calculated by a pair of values. (for instance, \u0026ldquo;difference between mother-father glucose levels.\u0026rdquo;)\u003c/p\u003e\n\u003cp\u003eA two-staple t-test looks at two independent events and compares them. Hence, they are two random variables and should be manipulated as such.\u003c/p\u003e\n\u003ch2 id=\"t-tests-for-regression-lines\"\u003et-tests for regression lines\u003c/h2\u003e\n\u003cp\u003eregression lines can be imbibed with predictive power and confidence intervals:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nm \\pm t^* SE_b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(m\\) is the slope and \\(SE_b\\) is the \u003ca href=\"/posts/kbhstandard_error/\"\u003estandard error\u003c/a\u003e of the regression line.\u003c/p\u003e\n\u003cp\u003eNote that the degrees of freedom used for \\(t^*\\) is the number of data points, minus \u003cstrong\u003etwo\u003c/strong\u003e.\u003c/p\u003e\n\u003ch3 id=\"conditions-for-inference--slops\"\u003econditions for inference (slops)\u003c/h3\u003e\n\u003cp\u003eAcronym: LINEAR\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eLinear\u003c/li\u003e\n\u003cli\u003eIndependent (observations are independent or \\(\u0026lt;10\\%\\))\u003c/li\u003e\n\u003cli\u003eNormal (for a given \\(x\\), \\(y\\) is \u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormally distributed\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003eEqual variance (for any given \\(x\\), it should have a roughly equal standard deviation in \\(y\\))\u003c/li\u003e\n\u003cli\u003eRandom\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbht_test/","tags":null,"title":"t-test"},{"categories":null,"contents":"For an operator \\(T \\in \\mathcal{L}(V)\\), \\(T^{n}\\) would make sense. Instead of writing \\(TTT\\dots\\), then, we just write \\(T^{n}\\).\nconstituents operator \\(T \\in \\mathcal{L}(V)\\) requirements \\(T^{m} = T \\dots T\\) additional information \\(T^{0}\\) \\begin{equation} T^{0} := I \\in \\mathcal{L}(V) \\end{equation}\n\\(T^{-1}\\) \\begin{equation} T^{-m} = (T^{-1})^{m} \\end{equation}\nif \\(T\\) is invertable\nusual rules of squaring \\begin{equation} \\begin{cases} T^{m}T^{n} = T^{m+n} \\\\ (T^{m})^{n} = T^{mn} \\end{cases} \\end{equation}\nThis can be shown by counting the number of times \\(T\\) is repeated by writing each \\(T^{m}\\) out.\n","html":"\u003cp\u003eFor an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e \\(T \\in \\mathcal{L}(V)\\), \\(T^{n}\\) would make sense. Instead of writing \\(TTT\\dots\\), then, we just write \\(T^{n}\\).\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e \\(T \\in \\mathcal{L}(V)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(T^{m} = T \\dots T\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"t-0\"\u003e\\(T^{0}\\)\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nT^{0} := I \\in \\mathcal{L}(V)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"t-1\"\u003e\\(T^{-1}\\)\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nT^{-m} = (T^{-1})^{m}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eif\u003c/strong\u003e \\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"usual-rules-of-squaring\"\u003eusual rules of squaring\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nT^{m}T^{n} = T^{m+n} \\\\\n(T^{m})^{n} = T^{mn}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis can be shown by counting the number of times \\(T\\) is repeated by writing each \\(T^{m}\\) out.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhraising_operators_to_powers/","tags":null,"title":"T^m"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhtalk_contacts/","tags":null,"title":"Talk Contacts"},{"categories":null,"contents":"Moved much of this to Drafts instead\nphonbank: poor articulation\ndisfluent kids\nlate talkers\nWrite a review about ASR benchmark methods\nREV would be our benchmark What corpora we use? Has anyone used disordered speech? Or really seriously accented speech vis a vi CORALL (how was CORALL sampled?) What samples? How do we sample? What are the benchmarks? ASR model + WER\ntildes and noprompt swapped\nWER\nmissing words\ncorrect alignment\nthings swap noprompt backwards apostrophies for quotes the word separation error put tilde BETWEEN specific symbols with connection symbols jemoka becomes batchalign 2 Extended UD? combining bash script to run batchalign multiple times throughout the directories Removing removing non-auditory SBCA corpus area Diarization Diarization as a Bi-product of ASR humans at the end do speaker ID in the end DO TO BATCHALIGN allow people to reject files runhouse meeting \u0026lt;\u0026gt;Donny Greenberg: ADNE, nurses\u0026rsquo; health implementation at Google: grantees of canniniminty Remaining questions but we can\u0026rsquo;t provide SSH function.save() remote running through hashicorp vault? serializing ssh key remote? RUNHOUSE call into remote! headscale take wave2vec and hubert and GSLM questions? ask about inter-turn pauses, where INV: something something something \u0026lt;- PAR: WWW \u0026lt;- INV: somethingsomething else \u0026lt;- PAR: words words word no bullets are given for PAR, so do we skip it? do we count the time for WWW all as an inter-turn pause between INV and PAR? etc. Per Turn Turn level analysis Rename tier to Silence duration? does it include inter-utterance pauses?\nwithin-utterance pause\nfluency, mechanistic between-utterance pause\npause between utterances also: between-speaker pause!\nleaves room for the speaker to take the floor BETWEEN speaker pauses: \u0026ldquo;I don\u0026rsquo;t know what you are asking me\u0026rdquo;, etc.: \u0026ldquo;breakdown!\u0026rdquo; add features: STOPPA, TRESTLE, Wang\nhttps://coryshain.github.io/\nfeaturize saturnino fausa Questions What features? Where to put them? TalkBankDB How to encode the features? \u0026ldquo;How informative are your features\u0026rdquo; Start coming up with features (TRESTLE, perhaps) Encode them into xarray \u0026lt;\u0026gt; saturnino stuff make Spanish names list name, city, countries corpuses SABSAE: santa barbara english CABNC: British english next ignore any words that goes wrong in the pipeline ~change: noun =\u0026gt; n; verb =\u0026gt; v, etc.~ DET: ignore \u0026ldquo;DEF\u0026rdquo;, or perhaps the entir featureless unbulleted VAD exprimentents errors! line 1492\n*PAR:\tso ‡ anyway I tiptoe to the front door , open the front door and walk in . •1045194_1050644• %mor:\tco|so beg|beg adv|anyway pro:sub|I v|+n|tip+n|toe prep|to det:art|the n|front n|door cm|cm adj|open det:art|the n|front n|door coord|and n|walk adv|in . %gra:\t1|0|BEG 2|1|BEGP 3|5|JCT 4|5|SUBJ 5|0|ROOT 6|5|JCT 7|9|DET 8|9|MOD 9|6|POBJ 10|5|LP 11|14|MOD 12|14|DET 13|14|MOD 14|5|OBJ 15|14|CONJ 16|15|COORD 17|16|NJCT 18|5|PUNCT\nerrors? words without features needs to be correctly handled (done in the middle of meeting) 04111 (me ma SOS) nouns shouldn\u0026rsquo;t mark if it is Com,Neut, should\u0026rsquo;nt mark if its Com fix PASTP =\u0026gt; PAST and does past participles exist? more Move shua to d(e) Include instructions on how to recreate a broken Conda environment Update the package to conda somehow move next steps deal with `n` +\u0026hellip; fix remove bullets results ~ contraction \u0026amp; fused suffix getting rid of punkt in mor , =\u0026gt; cm . =\u0026gt; no PUNKT, stays stuff chocolaty (noadmin, https://docs.chocolatey.org/en-us/choco/setup#non-administrative-install) miniconda setx path \u0026ldquo;%path%;C:\\tools\\miniconda3\\condabin\u0026rdquo; curl env first, the install (Windows can\u0026rsquo;t do it from a URL) readme conda init zsh (close shell, open again) .mp4 mfa model downloading what\u0026rsquo;s the difference between online docker install and manual install NLTK Huggingface transformers tokenizers (versining) /opt/homebrew/Caskroom/miniforge/base/envs/aligner/lib/python3.9/site-packages/montreal_forced_aligner/corpus/text_corpus.py; getattr(self, k).update(error_dict[k]) AttributeError: \u0026rsquo;list\u0026rsquo; object has no attribute \u0026lsquo;update\u0026rsquo; FileArgumentNotFoundError: ; line 139\nDBA See the data on the frequency of haphax legomina vs. COCA ESPNet need to talk to Ji Yang Andrew\u0026rsquo;s Features Collapse two PAR tiers down Checkpoint per file One corpus prompt per run Handle empty tiers I/P selection crashes! contingency preview the LONGEST segment instead of the top one -i kill in the middle fixes \u0026ldquo;my mom\u0026rsquo;s cryin(g)\u0026rdquo; [\u0026lt;] mm [l648] (also themmm after) \u0026ldquo;made her a nice dress\u0026rdquo; [\u0026lt;] mhm [l1086] \u0026ldquo;when I was a kid I\u0026rdquo; \u0026amp;=laughs [l1278] Others chstring (for uh, mm-hmm)\nretrace (asr\u0026amp;fa folder)\nlowcase (caps)\nrep-join.cut (fixes/)\nnumbers \u0026lt;affirmative\u0026gt; \u0026lsquo;mo data! CallFriend/CallHome (ca-data) ISL? SBCSAE Aphasia + MICASE TBI data Providing a Two-Pass Solution Writing Big description of the pipeline Notion of the pipeline Better tokenization? 8/18 Initial segment repetition Extracting studdering Gramatically problematic mar mar has done a thing and its phoneme level We did it, now automated LEAP data next actions Aphasia (-apraxia?): classification Child data (EllisWeismer) Dementia a ~Multiple @Begin/CHECK problem~\n~Placement of @Options~\n~Strange, missing period~\n~Bracket comments should FOLLOW words instead of PRECEEDING them~\n~%xwor: line~\nSTICK TO DASHES WHEN DISTRIBUTING BATCHALIGN\nend the utterance when it ends (incl. inter-utterance pauses)\n\u0026ldquo;I\u0026rdquo; need to be capitalized\n11005 (LT)\nAlign EllisWeismer\nAlso cool to align:\nfluency IISRP/*\nhttps://en.wikipedia.org/wiki/Speaker_diarisation\nhttps://universaldependencies.org/\nAlzheimer\u0026rsquo;s Project https://dementia.talkbank.org/\nhttps://luzs.gitlab.io/adresso-2021/\nSpecifically: https://dementia.talkbank.org/access/English/Pitt.html\nReview Kathleen Fraser: https://drive.google.com/drive/u/1/folders/1lYTIzzXLXw3LlDG9ZQ7k4RayDiP6eLs1\nHere are the review papers: https://drive.google.com/drive/u/1/folders/1pokU75aKt6vNdeSMpc-HfN9fkLvRyutt\nRead this first: https://drive.google.com/drive/u/1/folders/0B3XZtiQwQW4XMnlFN0ZGUndUamM?resourcekey=0-AlOCZb4q9TyG4KpaMQpeoA\nSome PITT data have 3-4 recordings\nThe best way to diagnosing alzhimers\u0026rsquo; is from language.\nWhy this field is needed: to analyze a pre-post test metric.\nDesired output: existence of dementia (a.k.a alzheimer\u0026rsquo;s\u0026rsquo;).\nOther research to read:\nPenn (julia parish something but they don\u0026rsquo;t stare their data but they smile and things with Mark Libermann type of thing) Learning more about speech text https://my.clevelandclinic.org/health/diagnostics/22327-differential-diagnosis python3 ~/mfa_data/batchalign-dist/batchalign.py ~/mfa_data/my_corpus ~/mfa_data/my_corpus_aligned\nchristan marr paper on MFA on child data\n","html":"\u003cp\u003eMoved much of this to Drafts instead\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ephonbank: poor articulation\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003edisfluent kids\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003elate talkers\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWrite a review about ASR benchmark methods\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eREV would be our benchmark\n\u003cul\u003e\n\u003cli\u003eWhat corpora we use?\u003c/li\u003e\n\u003cli\u003eHas anyone used \u003cstrong\u003edisordered speech\u003c/strong\u003e?\u003c/li\u003e\n\u003cli\u003eOr really seriously accented speech vis a vi CORALL (how was CORALL sampled?)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eWhat samples? How do we sample? What are the benchmarks?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-06-01_16-32-05_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eASR model + WER\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003etildes and noprompt swapped\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWER\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003emissing words\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecorrect alignment\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"things\"\u003ethings\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eswap noprompt backwards\u003c/li\u003e\n\u003cli\u003eapostrophies for quotes\u003c/li\u003e\n\u003cli\u003ethe word separation error\n\u003cul\u003e\n\u003cli\u003eput tilde BETWEEN specific symbols with connection symbols\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ejemoka becomes batchalign 2\u003c/li\u003e\n\u003cli\u003eExtended UD?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"combining\"\u003ecombining\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ebash script to run batchalign multiple times throughout the directories\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"removing\"\u003eRemoving\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eremoving non-auditory SBCA corpus area\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"diarization\"\u003eDiarization\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDiarization as a Bi-product of ASR\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"humans-at-the-end\"\u003ehumans at the end\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003edo speaker ID in the end\u003c/li\u003e\n\u003cli\u003eDO TO BATCHALIGN\u003c/li\u003e\n\u003cli\u003eallow people to reject files\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"runhouse-meeting\"\u003erunhouse meeting\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026lt;\u0026gt;Donny Greenberg: ADNE, nurses\u0026rsquo; health\u003c/li\u003e\n\u003cli\u003eimplementation at Google: grantees of canniniminty\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"remaining-questions\"\u003eRemaining questions\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ebut \u003cstrong\u003ewe can\u0026rsquo;t provide SSH\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003efunction.save()\u003c/li\u003e\n\u003cli\u003eremote\n\u003cul\u003e\n\u003cli\u003erunning through hashicorp vault?\u003c/li\u003e\n\u003cli\u003eserializing ssh key remote?\u003c/li\u003e\n\u003cli\u003eRUNHOUSE call into remote!\u003c/li\u003e\n\u003cli\u003e\u003cem\u003eheadscale\u003c/em\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003etake wave2vec and hubert and GSLM\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003equestions?\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eask about inter-turn pauses, where\n\u003cul\u003e\n\u003cli\u003eINV: something something something \u0026lt;-\u003c/li\u003e\n\u003cli\u003ePAR: WWW \u0026lt;-\u003c/li\u003e\n\u003cli\u003eINV: somethingsomething else \u0026lt;-\u003c/li\u003e\n\u003cli\u003ePAR: words words word\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eno bullets are given for PAR, so do we skip it? do we count the time for WWW all as an inter-turn pause between INV and PAR? etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"per-turn\"\u003ePer Turn\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eTurn\u003c/strong\u003e level analysis\u003c/li\u003e\n\u003cli\u003eRename tier to\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"silence-duration\"\u003eSilence duration?\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003edoes it include inter-utterance pauses?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ewithin-utterance pause\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003efluency, mechanistic\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ebetween-utterance pause\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003epause between utterances\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ealso: between-speaker pause!\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eleaves room for the speaker to take the floor\u003c/li\u003e\n\u003cli\u003eBETWEEN speaker pauses: \u0026ldquo;I don\u0026rsquo;t know what you are asking me\u0026rdquo;, etc.: \u0026ldquo;breakdown!\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eadd features: STOPPA, TRESTLE, Wang\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"https://coryshain.github.io/\"\u003ehttps://coryshain.github.io/\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"featurize\"\u003efeaturize\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003esaturnino\u003c/li\u003e\n\u003cli\u003efausa\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"questions\"\u003eQuestions\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eWhat features?\u003c/li\u003e\n\u003cli\u003eWhere to put them?\u003c/li\u003e\n\u003cli\u003eTalkBankDB\u003c/li\u003e\n\u003cli\u003eHow to encode the features?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"how-informative-are-your-features\"\u003e\u0026ldquo;How informative are your features\u0026rdquo;\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eStart coming up with features (TRESTLE, perhaps)\u003c/li\u003e\n\u003cli\u003eEncode them into xarray\u003c/li\u003e\n\u003cli\u003e\u0026lt;\u0026gt; saturnino\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"stuff\"\u003estuff\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003emake Spanish names list\u003c/li\u003e\n\u003cli\u003ename, city, countries\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"corpuses\"\u003ecorpuses\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eSABSAE\u003c/strong\u003e: santa barbara english\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eCABNC\u003c/strong\u003e: British english\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"next\"\u003enext\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eignore any words that goes wrong in the pipeline\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e~change: noun =\u0026gt; n; verb =\u0026gt; v, etc.~\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eDET: ignore \u0026ldquo;DEF\u0026rdquo;, or perhaps the entir featureless\u003c/li\u003e\n\u003cli\u003eunbulleted VAD exprimentents\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"errors\"\u003eerrors!\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-12-23_20-22-46_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eline 1492\u003c/p\u003e\n\u003cp\u003e*PAR:\tso ‡ anyway I tiptoe to the front door , open the front door and\nwalk in . •1045194_1050644•\n%mor:\tco|so beg|beg adv|anyway pro:sub|I v|+n|tip+n|toe prep|to\ndet:art|the n|front n|door cm|cm adj|open det:art|the n|front n|door\ncoord|and n|walk adv|in .\n%gra:\t1|0|BEG 2|1|BEGP 3|5|JCT 4|5|SUBJ 5|0|ROOT 6|5|JCT 7|9|DET 8|9|MOD\n9|6|POBJ 10|5|LP 11|14|MOD 12|14|DET 13|14|MOD 14|5|OBJ 15|14|CONJ\n16|15|COORD 17|16|NJCT 18|5|PUNCT\u003c/p\u003e\n\u003ch2 id=\"errors\"\u003eerrors?\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewords without features needs to be correctly handled (done in the middle of meeting)\u003c/li\u003e\n\u003cli\u003e04111 (me ma SOS)\u003c/li\u003e\n\u003cli\u003enouns shouldn\u0026rsquo;t mark if it is Com,Neut, should\u0026rsquo;nt mark if its Com\u003c/li\u003e\n\u003cli\u003efix PASTP =\u0026gt; PAST\u003c/li\u003e\n\u003cli\u003eand does past participles exist?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"more\"\u003emore\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eMove shua to d(e)\u003c/li\u003e\n\u003cli\u003eInclude instructions on how to recreate a broken Conda environment\u003c/li\u003e\n\u003cli\u003eUpdate the package to conda somehow\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"move\"\u003emove\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-12-03_00-17-14_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"next-steps\"\u003enext steps\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003edeal with `n`\u003c/li\u003e\n\u003cli\u003e+\u0026hellip; fix\u003c/li\u003e\n\u003cli\u003eremove bullets\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results\"\u003eresults\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e~ contraction\u003c/li\u003e\n\u003cli\u003e\u0026amp; fused\u003c/li\u003e\n\u003cli\u003e\n\u003cul\u003e\n\u003cli\u003esuffix\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003egetting rid of punkt in mor\n\u003cul\u003e\n\u003cli\u003e, =\u0026gt; cm\u003c/li\u003e\n\u003cli\u003e. =\u0026gt; no PUNKT, stays\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"stuff\"\u003estuff\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003echocolaty (noadmin, \u003ca href=\"https://docs.chocolatey.org/en-us/choco/setup#non-administrative-install\"\u003ehttps://docs.chocolatey.org/en-us/choco/setup#non-administrative-install\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003eminiconda\u003c/li\u003e\n\u003cli\u003esetx path \u0026ldquo;%path%;C:\\tools\\miniconda3\\condabin\u0026rdquo;\u003c/li\u003e\n\u003cli\u003ecurl env first, the install (Windows can\u0026rsquo;t do it from a URL)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"readme\"\u003ereadme\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cdel\u003econda init zsh (close shell, open again)\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003e.mp4\u003c/li\u003e\n\u003cli\u003emfa model downloading\u003c/li\u003e\n\u003cli\u003ewhat\u0026rsquo;s the difference between online docker install and manual install\u003c/li\u003e\n\u003cli\u003eNLTK Huggingface transformers tokenizers (versining)\u003c/li\u003e\n\u003cli\u003e/opt/homebrew/Caskroom/miniforge/base/envs/aligner/lib/python3.9/site-packages/montreal_forced_aligner/corpus/text_corpus.py; getattr(self, k).update(error_dict[k])\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAttributeError: \u0026rsquo;list\u0026rsquo; object has no attribute \u0026lsquo;update\u0026rsquo;\nFileArgumentNotFoundError: ; line 139\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"dba\"\u003eDBA\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eSee the data on the frequency of haphax legomina vs. COCA\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"espnet\"\u003eESPNet\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eneed to talk to Ji Yang\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"andrew-s-features\"\u003eAndrew\u0026rsquo;s Features\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eCollapse two PAR tiers down\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003eCheckpoint per file\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003eOne corpus prompt per run\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003eHandle empty tiers\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003eI/P selection crashes! contingency\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003epreview the LONGEST segment instead of the top one\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003e-i kill in the middle\u003c/del\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"fixes\"\u003efixes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;my mom\u0026rsquo;s cryin(g)\u0026rdquo; [\u0026lt;] mm [l648] (also themmm after)\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;made her a nice dress\u0026rdquo; [\u0026lt;] mhm [l1086]\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;when I was a kid I\u0026rdquo; \u0026amp;=laughs [l1278]\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"others\"\u003eOthers\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003echstring (for uh, mm-hmm)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eretrace (asr\u0026amp;fa folder)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003elowcase (caps)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003erep-join.cut (fixes/)\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-08-02_12-55-55_screenshot.png\"\u003e\n \u003c/figure\u003e\n\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cul\u003e\n\u003cli\u003enumbers\u003c/li\u003e\n\u003cli\u003e\u0026lt;affirmative\u0026gt;\u003c/li\u003e\n\u003cli\u003e\u0026lsquo;mo data!\n\u003cul\u003e\n\u003cli\u003eCallFriend/CallHome (ca-data)\u003c/li\u003e\n\u003cli\u003eISL?\u003c/li\u003e\n\u003cli\u003eSBCSAE\u003c/li\u003e\n\u003cli\u003eAphasia + MICASE\u003c/li\u003e\n\u003cli\u003eTBI data\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eProviding a Two-Pass Solution\u003c/li\u003e\n\u003cli\u003eWriting\n\u003cul\u003e\n\u003cli\u003eBig description of the pipeline\u003c/li\u003e\n\u003cli\u003eNotion of the pipeline\u003c/li\u003e\n\u003cli\u003eBetter tokenization?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e8/18\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cul\u003e\n\u003cli\u003eInitial segment repetition\u003c/li\u003e\n\u003cli\u003eExtracting studdering\u003c/li\u003e\n\u003cli\u003eGramatically problematic\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"mar\"\u003emar\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003emar has done a thing and its phoneme level\u003c/li\u003e\n\u003cli\u003eWe did it, now automated\u003c/li\u003e\n\u003cli\u003eLEAP data\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"next-actions\"\u003enext actions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eAphasia (-apraxia?): classification\u003c/li\u003e\n\u003cli\u003eChild data (EllisWeismer)\u003c/li\u003e\n\u003cli\u003eDementia\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"a\"\u003ea\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ccode\u003e~Multiple @Begin/CHECK problem~\u003c/code\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ccode\u003e~Placement of @Options~\u003c/code\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ccode\u003e~Strange, missing period~\u003c/code\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ccode\u003e~Bracket comments should FOLLOW words instead of PRECEEDING them~\u003c/code\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ccode\u003e~%xwor: line~\u003c/code\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSTICK TO DASHES WHEN DISTRIBUTING BATCHALIGN\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eend the utterance when it ends (incl. inter-utterance pauses)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u0026ldquo;I\u0026rdquo; need to be capitalized\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e11005 (LT)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAlign EllisWeismer\u003c/p\u003e\n\u003cp\u003eAlso cool to align:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003efluency IISRP/*\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"https://en.wikipedia.org/wiki/Speaker_diarisation\"\u003ehttps://en.wikipedia.org/wiki/Speaker_diarisation\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"https://universaldependencies.org/\"\u003ehttps://universaldependencies.org/\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"alzheimer-s-project\"\u003eAlzheimer\u0026rsquo;s Project\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"https://dementia.talkbank.org/\"\u003ehttps://dementia.talkbank.org/\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"https://luzs.gitlab.io/adresso-2021/\"\u003ehttps://luzs.gitlab.io/adresso-2021/\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSpecifically: \u003ca href=\"https://dementia.talkbank.org/access/English/Pitt.html\"\u003ehttps://dementia.talkbank.org/access/English/Pitt.html\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eReview Kathleen Fraser: \u003ca href=\"https://drive.google.com/drive/u/1/folders/1lYTIzzXLXw3LlDG9ZQ7k4RayDiP6eLs1\"\u003ehttps://drive.google.com/drive/u/1/folders/1lYTIzzXLXw3LlDG9ZQ7k4RayDiP6eLs1\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eHere are the review papers: \u003ca href=\"https://drive.google.com/drive/u/1/folders/1pokU75aKt6vNdeSMpc-HfN9fkLvRyutt\"\u003ehttps://drive.google.com/drive/u/1/folders/1pokU75aKt6vNdeSMpc-HfN9fkLvRyutt\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eRead this first: \u003ca href=\"https://drive.google.com/drive/u/1/folders/0B3XZtiQwQW4XMnlFN0ZGUndUamM?resourcekey=0-AlOCZb4q9TyG4KpaMQpeoA\"\u003ehttps://drive.google.com/drive/u/1/folders/0B3XZtiQwQW4XMnlFN0ZGUndUamM?resourcekey=0-AlOCZb4q9TyG4KpaMQpeoA\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSome PITT data have 3-4 recordings\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eThe best way to diagnosing alzhimers\u0026rsquo; is from language.\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eWhy this field is needed: to analyze a pre-post test metric.\u003c/p\u003e\n\u003cp\u003eDesired output: existence of dementia (a.k.a alzheimer\u0026rsquo;s\u0026rsquo;).\u003c/p\u003e\n\u003cp\u003eOther research to read:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ePenn (julia parish something but they don\u0026rsquo;t stare their data but they smile and things with Mark Libermann type of thing)\u003c/li\u003e\n\u003cli\u003eLearning more about speech text\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://my.clevelandclinic.org/health/diagnostics/22327-differential-diagnosis\"\u003ehttps://my.clevelandclinic.org/health/diagnostics/22327-differential-diagnosis\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003epython3 ~/mfa_data/\u003ca href=\"/posts/kbhbatchalign/\"\u003ebatchalign\u003c/a\u003e-dist/batchalign.py ~/mfa_data/my_corpus ~/mfa_data/my_corpus_aligned\u003c/p\u003e\n\u003cp\u003echristan marr paper on MFA on child data\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtalkbank/","tags":null,"title":"talkbank"},{"categories":null,"contents":"Lit Survey Pipeline Segmentation ","html":"\u003ch2 id=\"lit-survey\"\u003eLit Survey\u003c/h2\u003e\n\u003ch3 id=\"pipeline\"\u003ePipeline\u003c/h3\u003e\n\u003ch3 id=\"segmentation\"\u003eSegmentation\u003c/h3\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtalkbank_pipeline_project/","tags":null,"title":"TalkBank Pipeline Project"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhtariffs/","tags":null,"title":"tariffs"},{"categories":null,"contents":"Step 0: know what you are building.\nbreaking tasks The process of breaking tasks down.\nWe need to research tasks to see how complex they are + how to break them down Research takes time! It should be its own task Over the process of research, the task becomes much simpler estimating tasks Requirement: tasks should always be estimated by the person doing the work.\nTask Estimation should be done each time! tasks shift Estimate only in powers of 2: 30 minutes, 1h, 2h, 4h, 8h, etc. If you never done something before, double the time than you estimate If you are teaching someone to do something, quadruple the time than you estimate Add buffer time (*1.5), especially if you think yourself as a procrastinator Focus is draining! You need breaks. Take breaks. Things will go wrong! Plan for it. time iterating If anything is longer than 8 hours, that\u0026rsquo;s a good sign you need to break it down! Likely that you have to break things down MVP You probably don\u0026rsquo;t have time to build your feature list\nMVP: minimum viable product We need the basic set of features; you probably have more features than you have time to build Prioritize what you build based on\u0026hellip; Dependencies: is this required for other stuff to work Viability: can the product exist without this? Time: how long does it take? Be ruthless about what you cut; talk to your user.\n","html":"\u003cp\u003eStep 0: know what you are building.\u003c/p\u003e\n\u003ch2 id=\"breaking-tasks\"\u003ebreaking tasks\u003c/h2\u003e\n\u003cp\u003eThe process of \u003ca href=\"#breaking-tasks\"\u003ebreaking tasks\u003c/a\u003e down.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eWe need to research tasks to see how complex they are + how to break them down\u003c/li\u003e\n\u003cli\u003eResearch takes time! It should be its own task\u003c/li\u003e\n\u003cli\u003eOver the process of research, the task becomes much simpler\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"estimating-tasks\"\u003eestimating tasks\u003c/h2\u003e\n\u003cp\u003eRequirement: tasks should \u003cem\u003ealways\u003c/em\u003e be estimated by the person doing the work.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtask_estimation/\"\u003eTask Estimation\u003c/a\u003e should be done \u003cem\u003eeach time!\u003c/em\u003e tasks shift\u003c/li\u003e\n\u003cli\u003eEstimate only in powers of 2: 30 minutes, 1h, 2h, 4h, 8h, etc.\u003c/li\u003e\n\u003cli\u003eIf you never done something before, \u003cstrong\u003edouble\u003c/strong\u003e the time than you estimate\u003c/li\u003e\n\u003cli\u003eIf you are teaching someone to do something, \u003cstrong\u003equadruple\u003c/strong\u003e the time than you estimate\u003c/li\u003e\n\u003cli\u003eAdd buffer time (*1.5), especially if you think yourself as a procrastinator\n\u003cul\u003e\n\u003cli\u003eFocus is draining! You need breaks. Take breaks.\u003c/li\u003e\n\u003cli\u003eThings will go wrong! Plan for it.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"time-iterating\"\u003etime iterating\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eIf anything is longer than 8 hours, that\u0026rsquo;s a good sign you need to break it down!\u003c/li\u003e\n\u003cli\u003eLikely that you have to break things down\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"mvp\"\u003eMVP\u003c/h2\u003e\n\u003cp\u003eYou probably don\u0026rsquo;t have time to build your feature list\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eMVP: minimum viable product\u003c/li\u003e\n\u003cli\u003eWe need the basic set of features; you probably have more features than you have time to build\u003c/li\u003e\n\u003cli\u003ePrioritize what you build based on\u0026hellip;\n\u003cul\u003e\n\u003cli\u003eDependencies: is this required for other stuff to work\u003c/li\u003e\n\u003cli\u003eViability: can the product exist without this?\u003c/li\u003e\n\u003cli\u003eTime: how long does it take?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eBe ruthless about what you cut; talk to your user.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtask_estimation/","tags":null,"title":"Task Estimation"},{"categories":null,"contents":"The taxicab norm is a norm against a gridded system; it should follow the same properties of the norm, but not inner products.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhtaxicab_norm/\"\u003etaxicab norm\u003c/a\u003e is a norm against a gridded system; it should follow the same properties of the \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e, but not \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtaxicab_norm/","tags":null,"title":"taxicab norm"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhtaylor_se/","tags":null,"title":"taylor se"},{"categories":null,"contents":"Hostname: baboon.jemoka.com\nTechnology: MacBook Pro A2338\nSerial: C02FX4W2Q05N\nDescription: Space-Grey MacBook Pro 2020\nThis piece of technology is no longer managed by me, and should not be registered under this domain anymore.\n","html":"\u003cp\u003eHostname: \u003cstrong\u003ebaboon.jemoka.com\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eTechnology: MacBook Pro A2338\u003c/p\u003e\n\u003cp\u003eSerial: C02FX4W2Q05N\u003c/p\u003e\n\u003cp\u003eDescription: Space-Grey MacBook Pro 2020\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eThis piece of technology is no longer managed by me, and should not be registered under this domain anymore.\u003c/strong\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtechnology_baboon_jemoka_com/","tags":null,"title":"Technology: baboon.jemoka.com"},{"categories":null,"contents":"Hostname: balloon.jemoka.com\nTechnology: MacBook Pro A2779\nSerial: Q5491WTGGM\nDescription: Space-Grey MacBook Pro 2023\nLost If you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to houjun@jemoka.com or +1 650 209-0966 to get in touch. Please do the right thing.\nThank you.\n","html":"\u003cp\u003eHostname: \u003cstrong\u003eballoon.jemoka.com\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eTechnology: MacBook Pro A2779\u003c/p\u003e\n\u003cp\u003eSerial: Q5491WTGGM\u003c/p\u003e\n\u003cp\u003eDescription: Space-Grey MacBook Pro 2023\u003c/p\u003e\n\u003ch2 id=\"lost\"\u003eLost\u003c/h2\u003e\n\u003cp\u003eIf you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to \u003ccode\u003ehoujun@jemoka.com\u003c/code\u003e or \u003ccode\u003e+1 650 209-0966\u003c/code\u003e to get in touch. Please do the right thing.\u003c/p\u003e\n\u003cp\u003eThank you.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtechnology_balloon_jemoka_com/","tags":null,"title":"Technology: balloon.jemoka.com"},{"categories":null,"contents":"Hostname: bassoon.jemoka.com\nTechnology: Teenage Engineering OP-Z\nSerial: X3C-KJFBB\nDescription: Gray Portable Synthesizer\nLost If you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to houjun@jemoka.com or +1 650 209-0966 to get in touch. Please do the right thing.\nThank you.\n","html":"\u003cp\u003eHostname: \u003cstrong\u003ebassoon.jemoka.com\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eTechnology: Teenage Engineering OP-Z\u003c/p\u003e\n\u003cp\u003eSerial: X3C-KJFBB\u003c/p\u003e\n\u003cp\u003eDescription: Gray Portable Synthesizer\u003c/p\u003e\n\u003ch2 id=\"lost\"\u003eLost\u003c/h2\u003e\n\u003cp\u003eIf you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to \u003ccode\u003ehoujun@jemoka.com\u003c/code\u003e or \u003ccode\u003e+1 650 209-0966\u003c/code\u003e to get in touch. Please do the right thing.\u003c/p\u003e\n\u003cp\u003eThank you.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtechnology_bassoon_jemoka_com/","tags":null,"title":"Technology: bassoon.jemoka.com"},{"categories":null,"contents":"Hostname: bilon.jemoka.com\nTechnology: Trek Fx1\nDescription: Black Bicycle with front and rear Deralieurs\nSerial: WTU 270 XC1581 S\nLost If you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to houjun@jemoka.com or +1 650 209-0966 to get in touch. Please do the right thing.\nThank you.\n","html":"\u003cp\u003eHostname: \u003cstrong\u003ebilon.jemoka.com\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eTechnology: Trek Fx1\u003c/p\u003e\n\u003cp\u003eDescription: Black Bicycle with front and rear Deralieurs\u003c/p\u003e\n\u003cp\u003eSerial: WTU 270 XC1581 S\u003c/p\u003e\n\u003ch2 id=\"lost\"\u003eLost\u003c/h2\u003e\n\u003cp\u003eIf you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to \u003ccode\u003ehoujun@jemoka.com\u003c/code\u003e or \u003ccode\u003e+1 650 209-0966\u003c/code\u003e to get in touch. Please do the right thing.\u003c/p\u003e\n\u003cp\u003eThank you.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtechnology_bilon_jemoka_com/","tags":null,"title":"Technology: bilon.jemoka.com"},{"categories":null,"contents":"Hostname: bison.jemoka.com\nTechnology: iPhone MLHT3LL/A\nSerial: LXQM93HWLC\nDescription: iPhone 13 Mini Midnight\nLost If you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to houjun@jemoka.com or +1 650 209-0966 to get in touch. Please do the right thing.\nThank you.\n","html":"\u003cp\u003eHostname: \u003cstrong\u003ebison.jemoka.com\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eTechnology: iPhone MLHT3LL/A\u003c/p\u003e\n\u003cp\u003eSerial: LXQM93HWLC\u003c/p\u003e\n\u003cp\u003eDescription: iPhone 13 Mini Midnight\u003c/p\u003e\n\u003ch2 id=\"lost\"\u003eLost\u003c/h2\u003e\n\u003cp\u003eIf you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to \u003ccode\u003ehoujun@jemoka.com\u003c/code\u003e or \u003ccode\u003e+1 650 209-0966\u003c/code\u003e to get in touch. Please do the right thing.\u003c/p\u003e\n\u003cp\u003eThank you.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtechnology_bison_jemoka_com/","tags":null,"title":"Technology: bison.jemoka.com"},{"categories":null,"contents":"Hostname: bonbon.jemoka.com\nTechnology: iPhone MT972LL/A\nSerial: C39Z2HS9KPFT\nDescription: iPhone Xs Black\nLost If you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to houjun@jemoka.com or +1 650 209-0966 to get in touch. Please do the right thing.\nThank you.\n","html":"\u003cp\u003eHostname: \u003cstrong\u003ebonbon.jemoka.com\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eTechnology: iPhone MT972LL/A\u003c/p\u003e\n\u003cp\u003eSerial: C39Z2HS9KPFT\u003c/p\u003e\n\u003cp\u003eDescription: iPhone Xs Black\u003c/p\u003e\n\u003ch2 id=\"lost\"\u003eLost\u003c/h2\u003e\n\u003cp\u003eIf you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to \u003ccode\u003ehoujun@jemoka.com\u003c/code\u003e or \u003ccode\u003e+1 650 209-0966\u003c/code\u003e to get in touch. Please do the right thing.\u003c/p\u003e\n\u003cp\u003eThank you.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtechnology_bonbon_jemoka_com/","tags":null,"title":"Technology: bonbon.jemoka.com"},{"categories":null,"contents":"Hostname: boon.jemoka.com\nLost If you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to houjun@jemoka.com or +1 650 209-0966 to get in touch. Please do the right thing.\nThank you.\n","html":"\u003cp\u003eHostname: \u003cstrong\u003eboon.jemoka.com\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"lost\"\u003eLost\u003c/h2\u003e\n\u003cp\u003eIf you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to \u003ccode\u003ehoujun@jemoka.com\u003c/code\u003e or \u003ccode\u003e+1 650 209-0966\u003c/code\u003e to get in touch. Please do the right thing.\u003c/p\u003e\n\u003cp\u003eThank you.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtechnology_boon_jemoka_com/","tags":null,"title":"Technology: boon.jemoka.com"},{"categories":null,"contents":"Teddy Roosevelt was an American president.\nLarge personality: expanded scope of the Presidency \u0026mdash; \u0026ldquo;if it doesn\u0026rsquo;t explicit say its belong to the congress, it belongs to me\u0026rdquo; Moralist (Support American People), Imperialist (Believes in American Righteousness), Progressive Monroe Doctrine \u0026amp; Roosevelt Corollary: America for Americans The Panama Canal - engineered coup! to build the panama canal ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhteddy_roosevelt/\"\u003eTeddy Roosevelt\u003c/a\u003e was an American president.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eLarge personality: expanded scope of the Presidency \u0026mdash; \u0026ldquo;if it doesn\u0026rsquo;t explicit say its belong to the congress, it belongs to me\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eMoralist (Support American People), Imperialist (Believes in American Righteousness), Progressive\u003c/li\u003e\n\u003cli\u003eMonroe Doctrine \u0026amp; Roosevelt Corollary: America for Americans\u003c/li\u003e\n\u003cli\u003eThe Panama Canal - engineered \u003cem\u003ecoup!\u003c/em\u003e to build the panama canal\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhteddy_roosevelt/","tags":null,"title":"Teddy Roosevelt"},{"categories":null,"contents":"\u0026hellip;is a series for which pairs cancel out:\n\\begin{equation} (1-x)(1+x+ \\dots + x^{n-1}) \\end{equation}\nyou will note that, though the expansion of this result, pairs of:\n\\begin{equation} -x^{j} + x^{j} \\end{equation}\nform. And you will note those cancel. Hence this is a telescoping series.\n","html":"\u003cp\u003e\u0026hellip;is a series for which pairs cancel out:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(1-x)(1+x+ \\dots + x^{n-1})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou will note that, though the expansion of this result, pairs of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n-x^{j} + x^{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eform. And you will note those cancel. Hence this is a telescoping series.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhteelscoping_series/","tags":null,"title":"Teelscoping Series"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhtemperal_abstraction/","tags":null,"title":"Temperal Abstraction"},{"categories":null,"contents":"A Term-Document Matrix is a boolean matrix of: rows\u0026mdash;\u0026ldquo;terms\u0026rdquo;, the search keywords\u0026mdash;and columns\u0026mdash;\u0026ldquo;documents\u0026rdquo;, which is the document. Each element \\((x,y)\\) is \\(1\\) if \\(y\\) contains term \\(x\\), and \\(0\\) otherwise.\nTo perform a search, we take a boolean operation over each row (usually either complement for NOT or identity), and AND it with all other terms. The resulting boolean string are the valid documents.\nNotably, this is quite intractable because the matrix is quite (words times documents) blows up. However, this representation is QUITE SPARSE. So, ideally we only store it sparsely.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhterm_document_matrix/\"\u003eTerm-Document Matrix\u003c/a\u003e is a boolean matrix of: rows\u0026mdash;\u0026ldquo;terms\u0026rdquo;, the search keywords\u0026mdash;and columns\u0026mdash;\u0026ldquo;documents\u0026rdquo;, which is the document. Each element \\((x,y)\\) is \\(1\\) if \\(y\\) contains term \\(x\\), and \\(0\\) otherwise.\u003c/p\u003e\n\u003cp\u003eTo perform a search, we take a boolean operation over each row (usually either complement for NOT or identity), and AND it with all other terms. The resulting boolean string are the valid documents.\u003c/p\u003e\n\u003cp\u003eNotably, this is quite intractable because the matrix is quite (words times documents) blows up. However, this representation is \u003cstrong\u003eQUITE SPARSE\u003c/strong\u003e. So, ideally we only store it sparsely.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhterm_document_matrix/","tags":null,"title":"Term-Document Matrix"},{"categories":null,"contents":"Given what you claim as a normal distribution, we can test for its normality. Any distribution you claim as normal has to follow that:\n\\begin{equation} np \\geq 10 \u0026amp; n(1-p) \\geq 10 \\end{equation}\nthat number of successes and failures need both be greater than or equal to ten.\n","html":"\u003cp\u003eGiven what you claim as a \u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormal distribution\u003c/a\u003e, we can test for its normality. Any distribution you claim as normal has to follow that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nnp \\geq 10 \u0026amp; n(1-p) \\geq 10\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat number of successes and failures need both be greater than or equal to ten.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtest_for_normality/","tags":null,"title":"test for normality (statistics)"},{"categories":null,"contents":"How many bugs are in 1,000 lines of code?\nTypical code: 1-10 Platform code: 0.1-1 The best\u0026mdash;NASA: 0.01-0.1 Never assume your software doesn\u0026rsquo;t have bugs.\nTest-Driven Development Test before you build!\nSpecs are already written We know what the expected behavior is We can write tests for the expected behavior first All tests fail to start We know we are done writing code when all tests pass \u0026ldquo;NYI\u0026rdquo; (not-yet implemented)\noften, writing test exposes gaps in your specs How NOT! not write tests Random Sampling Pick one or two inputs and show your code works on it Why it doesn\u0026rsquo;t work: there maybe specific inputs that break your code Exhaustive Testing Test for the domain of inputs Why it doesn\u0026rsquo;t work: tests run forever How DO you write tsets Black-Box Testing Pretend the code implementation is a black box All you know is what the specification; and what the input/output produces White-Box Testing You can see the implementation You test for specific edge cases Off-by-one, running time, specific inputs, etc. Malicious Testing What happens if a user is trying to break your system Sometimes, this is known as \u0026ldquo;pen-testing\u0026rdquo; or \u0026ldquo;white-hack hacking\u0026rdquo; Take CS340 Compsec How BIG are your tests Unit Testing Backbone of testing Typically, that means one test per function Tests choose representative inputs Idempotent: the state of the testing system should be a the beginning and end of the test (tests should revert) (setup + teardown tests) Subsystem Testing Exercise multiple functions working together in a system Often takes longer OK to run these less frequently End-to-End Integration Exercise the entire workflow May involve external libraries, hardware, etc. Regression Testing Isolate the cause of the bug to the smallest possible test case Write a test assuming the bug is fixed Fix the bug Add the test to your test suite How MUCH do we run tests Ideally, run tests every time code is committed Ideally\u0026mdash;run tests that address the function Schedule long tests What to test for see also here\nequivalence partitioning Come up with one test case per equivalence class. For instance, for a function that uppercases letters, analyze the following:\nLowercase letters Uppercase letters Non-alpha letters Non-printable letters Combinations Each group will therefore have nicely the requirements covered\nboundary value analysis In addition to just testing 1 element per class in equivalence partitioning, try to test boundary values (off-by-one, etc.) cases for each equivalence class if you can come up with them.\nArrange, Act, Assert arrange for setup by setting up variables, etc., and define the expected result (yes we do it before to be more readable) act do the thing assert correctness by checking the expected result ","html":"\u003cp\u003eHow many bugs are in 1,000 lines of code?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eTypical code: 1-10\u003c/li\u003e\n\u003cli\u003ePlatform code: 0.1-1\u003c/li\u003e\n\u003cli\u003eThe best\u0026mdash;NASA: 0.01-0.1\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eNever \u003cem\u003eassume\u003c/em\u003e your software doesn\u0026rsquo;t have bugs.\u003c/p\u003e\n\u003ch2 id=\"test-driven-development\"\u003eTest-Driven Development\u003c/h2\u003e\n\u003cp\u003eTest \u003cem\u003ebefore\u003c/em\u003e you build!\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eSpecs are already written\u003c/li\u003e\n\u003cli\u003eWe know what the expected behavior is\u003c/li\u003e\n\u003cli\u003eWe can write tests for the expected behavior \u003cem\u003efirst\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003eAll tests fail to start\u003c/li\u003e\n\u003cli\u003eWe know we are done writing code when all tests pass\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;NYI\u0026rdquo; (not-yet implemented)\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eoften, writing test exposes gaps in your specs\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"how-not-not-write-tests\"\u003eHow \u003cem\u003eNOT!\u003c/em\u003e not write tests\u003c/h2\u003e\n\u003ch3 id=\"random-sampling\"\u003eRandom Sampling\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ePick one or two inputs and show your code works on it\u003c/li\u003e\n\u003cli\u003eWhy it doesn\u0026rsquo;t work: there maybe specific inputs that break your code\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"exhaustive-testing\"\u003eExhaustive Testing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eTest for the domain of inputs\u003c/li\u003e\n\u003cli\u003eWhy it doesn\u0026rsquo;t work: tests run forever\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"how-do-you-write-tsets\"\u003eHow \u003cem\u003eDO\u003c/em\u003e you write tsets\u003c/h2\u003e\n\u003ch3 id=\"black-box-testing\"\u003eBlack-Box Testing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ePretend the code implementation is a black box\u003c/li\u003e\n\u003cli\u003eAll you know is what the specification; and what the input/output produces\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"white-box-testing\"\u003eWhite-Box Testing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eYou can see the implementation\u003c/li\u003e\n\u003cli\u003eYou test for specific edge cases\u003c/li\u003e\n\u003cli\u003eOff-by-one, running time, specific inputs, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"malicious-testing\"\u003eMalicious Testing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eWhat happens if a user is \u003cem\u003etrying\u003c/em\u003e to break your system\u003c/li\u003e\n\u003cli\u003eSometimes, this is known as \u0026ldquo;pen-testing\u0026rdquo; or \u0026ldquo;white-hack hacking\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eTake CS340 Compsec\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"how-big-are-your-tests\"\u003eHow \u003cem\u003eBIG\u003c/em\u003e are your tests\u003c/h2\u003e\n\u003ch3 id=\"unit-testing\"\u003eUnit Testing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eBackbone of testing\u003c/li\u003e\n\u003cli\u003eTypically, that means one test per function\u003c/li\u003e\n\u003cli\u003eTests choose representative inputs\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eIdempotent\u003c/strong\u003e\u003c/strong\u003e: the state of the testing system should be a the beginning and end of the test (tests should revert) (setup + teardown tests)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"subsystem-testing\"\u003eSubsystem Testing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eExercise multiple functions working together in a system\u003c/li\u003e\n\u003cli\u003eOften takes longer\u003c/li\u003e\n\u003cli\u003eOK to run these less frequently\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"end-to-end-integration\"\u003eEnd-to-End Integration\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eExercise the entire workflow\u003c/li\u003e\n\u003cli\u003eMay involve external libraries, hardware, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"regression-testing\"\u003eRegression Testing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eIsolate the cause of the bug to the smallest possible test case\u003c/li\u003e\n\u003cli\u003eWrite a test assuming the bug is fixed\u003c/li\u003e\n\u003cli\u003eFix the bug\u003c/li\u003e\n\u003cli\u003eAdd the test to your test suite\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"how-much-do-we-run-tests\"\u003eHow \u003cem\u003eMUCH\u003c/em\u003e do we run tests\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eIdeally, run tests every time code is committed\u003c/li\u003e\n\u003cli\u003eIdeally\u0026mdash;run tests that address the function\u003c/li\u003e\n\u003cli\u003eSchedule long tests\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"what-to-test-for\"\u003e\u003cem\u003eWhat\u003c/em\u003e to test for\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://softwareengineering.stackexchange.com/questions/750/what-should-you-test-with-unit-tests\"\u003esee also here\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"equivalence-partitioning\"\u003eequivalence partitioning\u003c/h3\u003e\n\u003cp\u003eCome up with one test case per equivalence class. For instance, for a function that uppercases letters, analyze the following:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eLowercase letters\u003c/li\u003e\n\u003cli\u003eUppercase letters\u003c/li\u003e\n\u003cli\u003eNon-alpha letters\u003c/li\u003e\n\u003cli\u003eNon-printable letters\u003c/li\u003e\n\u003cli\u003eCombinations\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eEach group will therefore have nicely the requirements covered\u003c/p\u003e\n\u003ch3 id=\"boundary-value-analysis\"\u003eboundary value analysis\u003c/h3\u003e\n\u003cp\u003eIn addition to just testing 1 element per class in \u003ca href=\"#equivalence-partitioning\"\u003eequivalence partitioning\u003c/a\u003e, try to test boundary values (off-by-one, etc.) cases for each equivalence class if you can come up with them.\u003c/p\u003e\n\u003ch3 id=\"arrange-act-assert\"\u003eArrange, Act, Assert\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003earrange\u003c/strong\u003e for setup by setting up variables, etc., and \u003cstrong\u003edefine the expected result\u003c/strong\u003e (yes we do it before to be more readable)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eact\u003c/strong\u003e do the thing\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eassert\u003c/strong\u003e correctness by checking the expected result\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtesting/","tags":null,"title":"Testing"},{"categories":null,"contents":"Take a document \\(d\\) and assign a fixed set of classes \\(\\{c_1, c_2, \u0026hellip;, c_{j}\\}\\) to that document. You want to predict \\(f(d) = c \\in C\\).\n","html":"\u003cp\u003eTake a document \\(d\\) and assign a fixed set of classes \\(\\{c_1, c_2, \u0026hellip;, c_{j}\\}\\) to that document. You want to predict \\(f(d) = c \\in C\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtext_classification/","tags":null,"title":"Text Classification"},{"categories":null,"contents":"two main parts:\ntokenization lemmatization ","html":"\u003cp\u003etwo main parts:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtokenization/\"\u003etokenization\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlemmatization/\"\u003elemmatization\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtext_normalization/","tags":null,"title":"text normalization"},{"categories":null,"contents":"The Unreasonable Effectiveness of Mathematics in the Natural Sciences is an article by the famous mathematician Eugene Wigner. (Wigner 1990)\nReflection What I found most peculiarly interesting is the focus on many mathematical/physics texts on the idea of the \u0026ldquo;beauty\u0026rdquo; of the expressions; and, it seems, the clear pleasure that Wigner gets from analyzing the systems with the aforementioned \u0026ldquo;beauty.\u0026rdquo;\nSetting aside whether or not this beauty is \u0026ldquo;deserved\u0026rdquo;/appropriate, I love that my attraction to physics is somewhat similar to what Wigner describes. Under the appropriate conditions, with constraints, it is possible to build a solution to physics problems simply through the evolution of mathematics.\nIt is not to say that the models mathematics provides is correct. I like that Winger ended on the note about how \u0026ldquo;false\u0026rdquo; theories, even despite their falseness, provided shockingly accurate estimations of physical phenomena. Perhaps mathematics provides an almost-fully solid foundation to creating physical systems, but then the entire \u0026ldquo;flaw\u0026rdquo; we see with mathematical modeling is in our (in)ability to provide the limitations to scope.\nFor instance, Bohr\u0026rsquo;s model, an example of \u0026ldquo;falsehood\u0026rdquo; modeled, is an over-limitation to scope which\u0026mdash;thought reducing mathematical complexity\u0026mdash;resulted in a \u0026ldquo;wrong\u0026rdquo; theory. However, the mathematics behind the theory remains to be solid despite the scope limitation, making the result work in a reasonable manner (except for the pitfalls).\nThe inherent concern behind this statement, then, is that there is a case where we can build a perfectly reasonable system to model something, but it turns out that the system is correct only in the limited scope which we are used to operating; when suddenly the scope becomes broken, we are so used to the mathematical tools that we have came to rely on that we don\u0026rsquo;t notice their failures.\nI like that this entire point is brought up before our start in DiffEq, perhaps as a \u0026ldquo;with great power comes great responsibility\u0026rdquo; type of caution to us in terms of how our modeling may go awry while at the same time acting as a preview of the usefulness of the principles provided taken as a whole.\nReading notes Maths show up at entirely random places The first point is that mathematical concepts turn up in entirely unexpected connections. Moreover, they often permit an unexpectedly close and accurate description of the phenomena in these connections.\nWondering whether or not the theory is unique due to its applicability He became skeptical concerning the uniqueness of the coordination between keys and doors.\nThat math is really useful, its weird The first point is that the enormous usefulness of mathematics in the natural sciences is something bordering on the mysterious and that there is no rational explanation for it.\nIt also raises the question of how actually unique our theories are given they are all so applicable Second, it is just this uncanny usefulness of mathematical concepts that raises the question of the uniqueness of our physical theories.\nThe goal of mathematics is maximize the space of usefulness The great mathematician fully, almost ruthlessly, exploits the domain of permissible reasoning and skirts the impermissible.\nRegularity is suprising because its\u0026hellip; regularly found, which is unique The second surprising feature is that the regularity which we are discussing is independent of so many conditions which could have an effect on it.\nLaws of Nature are all highly conditional The principal purpose of the preceding discussion is to point out that the laws of nature are all conditional statements and they relate only to a very small part of our knowledge of the world.\nThat maths is just a fallback for \u0026ldquo;beatiful\u0026rdquo; physics happening the connection is that discussed in mathematics simply because he does not know of any other similar connection.\nApart from invarients, we just scope-limit ourselves to get the remaining bits that we need to make stuff work \u0026ldquo;beautifully\u0026rdquo; propose to refer to the observation which these examples illustrate as the empirical law of epistemology. Together with the laws of invariance of physical theories, it is an indispensable foundation of these theories.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhthe_unreasonable_effectiveness_of_mathematics_in_the_natural_sciences/\"\u003eThe Unreasonable Effectiveness of Mathematics in the Natural Sciences\u003c/a\u003e is an article by the famous mathematician \u003ca href=\"/posts/kbheugene_wigner/\"\u003eEugene Wigner\u003c/a\u003e. (\u003ca href=\"#citeproc_bib_item_1\"\u003eWigner 1990\u003c/a\u003e)\u003c/p\u003e\n\u003ch2 id=\"reflection\"\u003eReflection\u003c/h2\u003e\n\u003cp\u003eWhat I found most peculiarly interesting is the focus on many mathematical/physics texts on the idea of the \u0026ldquo;beauty\u0026rdquo; of the expressions; and, it seems, the clear pleasure that Wigner gets from analyzing the systems with the aforementioned \u0026ldquo;beauty.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eSetting aside whether or not this beauty is \u0026ldquo;deserved\u0026rdquo;/appropriate, I love that my attraction to physics is somewhat similar to what Wigner describes. Under the appropriate conditions, with constraints, it is possible to build a solution to physics problems simply through the evolution of mathematics.\u003c/p\u003e\n\u003cp\u003eIt is not to say that the models mathematics provides is correct. I like that Winger ended on the note about how \u0026ldquo;false\u0026rdquo; theories, even despite their falseness, provided shockingly accurate estimations of physical phenomena. Perhaps mathematics provides an almost-fully solid foundation to creating physical systems, but then the entire \u0026ldquo;flaw\u0026rdquo; we see with mathematical modeling is in our (in)ability to provide the limitations to scope.\u003c/p\u003e\n\u003cp\u003eFor instance, Bohr\u0026rsquo;s model, an example of \u0026ldquo;falsehood\u0026rdquo; modeled, is an over-limitation to scope which\u0026mdash;thought reducing mathematical complexity\u0026mdash;resulted in a \u0026ldquo;wrong\u0026rdquo; theory. However, the mathematics behind the theory remains to be solid despite the scope limitation, making the result work in a reasonable manner (except for the pitfalls).\u003c/p\u003e\n\u003cp\u003eThe inherent concern behind this statement, then, is that there is a case where we can build a perfectly reasonable system to model something, but it turns out that the system is correct only in the limited scope which we are used to operating; when suddenly the scope becomes broken, we are so used to the mathematical tools that we have came to rely on that we don\u0026rsquo;t notice their failures.\u003c/p\u003e\n\u003cp\u003eI like that this entire point is brought up before our start in DiffEq, perhaps as a \u0026ldquo;with great power comes great responsibility\u0026rdquo; type of caution to us in terms of how our modeling may go awry while at the same time acting as a preview of the usefulness of the principles provided taken as a whole.\u003c/p\u003e\n\u003ch2 id=\"reading-notes\"\u003eReading notes\u003c/h2\u003e\n\u003ch3 id=\"maths-show-up-at-entirely-random-places\"\u003eMaths show up at entirely random places\u003c/h3\u003e\n\u003cp\u003eThe first point is that mathematical concepts turn up in entirely unexpected connections. Moreover, they often permit an unexpectedly close and accurate description of the phenomena in these connections.\u003c/p\u003e\n\u003ch3 id=\"wondering-whether-or-not-the-theory-is-unique-due-to-its-applicability\"\u003eWondering whether or not the theory is unique due to its applicability\u003c/h3\u003e\n\u003cp\u003eHe became skeptical concerning the uniqueness of the coordination between keys and doors.\u003c/p\u003e\n\u003ch3 id=\"that-math-is-really-useful-its-weird\"\u003eThat math is really useful, its weird\u003c/h3\u003e\n\u003cp\u003eThe first point is that the enormous usefulness of mathematics in the natural sciences is something bordering on the mysterious and that there is no rational explanation for it.\u003c/p\u003e\n\u003ch3 id=\"it-also-raises-the-question-of-how-actually-unique-our-theories-are-given-they-are-all-so-applicable\"\u003eIt also raises the question of how actually unique our theories are given they are all so applicable\u003c/h3\u003e\n\u003cp\u003eSecond, it is just this uncanny usefulness of mathematical concepts that raises the question of the uniqueness of our physical theories.\u003c/p\u003e\n\u003ch3 id=\"the-goal-of-mathematics-is-maximize-the-space-of-usefulness\"\u003eThe goal of mathematics is maximize the space of usefulness\u003c/h3\u003e\n\u003cp\u003eThe great mathematician fully, almost ruthlessly, exploits the domain of permissible reasoning and skirts the impermissible.\u003c/p\u003e\n\u003ch3 id=\"regularity-is-suprising-because-its-dot-dot-dot-regularly-found-which-is-unique\"\u003eRegularity is suprising because its\u0026hellip; regularly found, which is unique\u003c/h3\u003e\n\u003cp\u003eThe second surprising feature is that the regularity which we are discussing is independent of so many conditions which could have an effect on it.\u003c/p\u003e\n\u003ch3 id=\"laws-of-nature-are-all-highly-conditional\"\u003eLaws of Nature are all highly conditional\u003c/h3\u003e\n\u003cp\u003eThe principal purpose of the preceding discussion is to point out that the laws of nature are all conditional statements and they relate only to a very small part of our knowledge of the world.\u003c/p\u003e\n\u003ch3 id=\"that-maths-is-just-a-fallback-for-beatiful-physics-happening\"\u003eThat maths is just a fallback for \u0026ldquo;beatiful\u0026rdquo; physics happening\u003c/h3\u003e\n\u003cp\u003ethe connection is that discussed in mathematics simply because he does not know of any other similar connection.\u003c/p\u003e\n\u003ch3 id=\"apart-from-invarients-we-just-scope-limit-ourselves-to-get-the-remaining-bits-that-we-need-to-make-stuff-work-beautifully\"\u003eApart from invarients, we just scope-limit ourselves to get the remaining bits that we need to make stuff work \u0026ldquo;beautifully\u0026rdquo;\u003c/h3\u003e\n\u003cp\u003epropose to refer to the observation which these examples illustrate as the empirical law of epistemology. Together with the laws of invariance of physical theories, it is an indispensable foundation of these theories.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhthe_unreasonable_effectiveness_of_mathematics_in_the_natural_sciences/","tags":null,"title":"The Unreasonable Effectiveness of Mathematics in the Natural Sciences"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhtherma/","tags":null,"title":"therma"},{"categories":null,"contents":"thermoregulation is the brain\u0026rsquo;s regulation of body temperature to respond to heat, cold events.\nStudies indicate that cold exposure cold exposure can activate AgRP (stimulate food intake) as a means for the brain leveraging CNS regulation to which would lower the glucose level and maintain glucose homeostatis.\nHowever, cold exposure also trigger energy expenditure, and seems contradictory but not really why?.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhthermoregulation/\"\u003ethermoregulation\u003c/a\u003e is the brain\u0026rsquo;s regulation of body temperature to respond to heat, cold events.\u003c/p\u003e\n\u003cp\u003eStudies indicate that cold exposure cold exposure can activate \u003ca href=\"/posts/kbhagrp/\"\u003eAgRP\u003c/a\u003e (stimulate food intake) as a means for the brain leveraging \u003ca href=\"/posts/kbhcns_regulation/\"\u003eCNS regulation\u003c/a\u003e to which would lower the \u003ca href=\"\"\u003eglucose\u003c/a\u003e level and maintain \u003ca href=\"\"\u003eglucose homeostatis\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eHowever, cold exposure also trigger energy expenditure, and seems contradictory but \u003cem\u003enot really why?\u003c/em\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhthermoregulation/","tags":null,"title":"thermoregulation"},{"categories":null,"contents":"The theta/alpha ratio is the ratio between two oscillations measurable by an EEG that is shown to be a possible indicator for AD development.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhtheta_alpha_ratio/\"\u003etheta/alpha ratio\u003c/a\u003e is the ratio between two oscillations measurable by an EEG that \u003ca href=\"https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3793211/\"\u003eis shown\u003c/a\u003e to be a possible indicator for AD development.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtheta_alpha_ratio/","tags":null,"title":"theta/alpha ratio"},{"categories":null,"contents":" Because this chapter is not about linear algebra, your instructor may go through it rapidly. You may not be asked to scrutinize all the proofs. Make sure, however, that you at least read and understand the statements of all the results in this chapter—they will be used in later chapters.\nSo we are not going to go through everything very very carefully. Instead, I\u0026rsquo;m just going to go through some interesting results at my own leisure. This also means that this note is not very complete.\nfacts \u0026ldquo;you can factor out every root\u0026rdquo;: \\(p(\\alpha) = 0 \\implies p(z)=(z-\\alpha)q(z)\\)\nfundamental theorem of algebra: \u0026ldquo;if you have an nth-degree polynomial, you can factor it into n factors\u0026rdquo; (over the complex numbers, you have as many roots as the degree of the polynomials)\nthese coefficients are unique barring ordering factoring real polynomials can be treated in two pieces: one piece, the reals, which can be treated usually; then one other piece, the complexes, can be multiplied pairwise together (as over real coeffs they always come in conjugate pairs) into quadratics. This is why all real polynormials can always be factored as \\((x-\\lambda)(x-\\lambda) \\dots (x^{2}+ax+b) (x^{2}+ax+b)\\dots\\) the number of complex polynomials has to be even complex polynomials have \\(deg\\ p\\) factors\nreal polynomials have \\(deg\\ p\\) real/complex factors, but complex factors come in pairs\nwe can squish the complex part of the real polynomials together, and get\u0026mdash;wlog $m$\u0026mdash;first-degree real roots and \\(\\frac{deg\\ p - m}{2}\\) second-degree real roots where \\(b^{2} \u0026lt; 4c\\)\n\\(x^{2} + bx + c\\) has a factor of \\((x-\\lambda_{1})(x-\\lambda_{2})\\) under reals \\(b^{2} \\geq 4c\\)\nkey sequence complex numbers we defined: complex numbers, conjugates, and absolute value 9 properties of complexes (see below) polynomial coefficients polynomial coefficients are unique; namely, if a polynomial is the zero function, all of its coefficients have to be \\(0\\) division, zero, and factoring polynomial division: given two polynomials \\(p,s \\in \\mathcal{P}(\\mathbb{F})\\), with \\(s\\neq 0\\), then \\(\\exists q,r \\in \\mathcal{P}(\\mathbb{F})\\) such that: \\(p = s q +r\\); that is, given two polynomials, you can always divide one by the other with some remainder as long as the \u0026ldquo;other\u0026rdquo; is not \\(0\\) we defined zero (\\(p \\lambda =0\\), then \\(\\lambda\\) is a \u0026ldquo;zero\u0026rdquo;) and factor which is some polynomial \\(s \\in \\mathcal{P}(\\mathbb{F})\\) for another polynomial \\(p\\) such that there exists some \\(q \\in \\mathcal{P}(\\mathbb{F})\\) such that \\(p = s q\\) we show that each zero corresponds to a factor of the shape \\(p(z) = (z-\\lambda)q(z)\\) we show that a polynomial with degree \\(m\\) has at most \\(m\\) distinct zeros FToA and corollaries FToA: every non-constant polynomial under the complexes has a zero and that means every polynomial over the complexes has a unique factorization \\(p(z) = c(z-\\lambda_{1})(z-\\lambda_{2}) \\dots (z-\\lambda_{m})\\) polynomials with zero coefficients have zeros in pairs: if \\(\\lambda \\in \\mathbb{C}\\) is a factor of the polynomial, so is \\(\\bar{\\lambda}\\) Is a real polynomial factorable? A polynomial \\(x^{2}+bx+c\\) is factorable into \\((x-\\lambda_{1})(x-\\lambda_{2})\\) IFF \\(b^{2} \u0026gt; 4c\\). All polynomials over the reals can be factored into at least second degree polynomials \\(p(z) = c(z-\\lambda_{1})(z-\\lambda_{2}) \\dots (z-\\lambda_{m}) \\dots (x^{2}+b_{M}x+c_{M})\\) first, review complex numbers \\(z+\\bar{z} = 2 \\text{Re}\\ z\\) \\(z-\\bar{z} =2(\\text{Im}\\ z)i\\) \\(z\\bar{z} = |z|^{2}\\) \\(\\bar{x+z} = \\bar{w}+\\bar{z}\\), \\(\\bar{wz} = \\bar{w}\\bar{z}\\) \\(\\bar{\\bar{z}} = z\\) \\(| \\text{\\{Re,Im\\}}\\ z| \\leq |z|\\) both real and imaginary components are smaller than the actual absolute value \\(|\\bar{z}| = |z|\\) \\(|wz| = |w| |z|\\) \\(|w+z| \\leq |w| + |z|\\), the triangle inequality triangle inequality (complexes) For \\(w, z \\in \\mathbb{C}\\), we do route algebra:\npolynomial division Suppose \\(p,s \\in \\mathcal{P}(\\mathbb{F}), s\\neq 0\\), then, \\(\\exists\\) polynomials \\(q,r \\in \\mathcal{P(\\mathbb{F})}\\) such that:\n\\begin{equation} p = s q +r \\end{equation}\nand \\(\\deg r \u0026lt; \\deg s\\).\nProof:\nLet: \\(n = \\deg p\\), and \\(m = \\deg s\\). So, if \\(n \u0026lt; m\\) (i.e. it is not a division), then take \\(q=0\\) and \\(r=p\\).\nNow, let\u0026rsquo;s make ???\nFactoring A polynomial \\(s \\in \\mathcal{P}(\\mathbb{F})\\) is a factor of \\(p \\in \\mathcal{P}(\\mathbb{F})\\) if \\(\\exists\\) \\(q \\in \\mathcal{P}(\\mathbb{F})\\) such that \\(p=s q\\).\nquestions proofs: wut if the FToA holds, isn\u0026rsquo;t the polynomials over the reals a \u0026ldquo;subset\u0026rdquo;(ish) of the polynomials over the complexes? so there is going to be at least complex roots to all polynormials always no? ","html":"\u003cblockquote\u003e\n\u003cp\u003eBecause this chapter is not about linear algebra, your instructor may go through it rapidly. You may not be asked to scrutinize all the proofs. Make sure, however, that you at least read and understand the statements of all the results in this chapter—they will be used in later chapters.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003cp\u003eSo we are not going to go through everything very very carefully. Instead, I\u0026rsquo;m just going to go through some interesting results at my own leisure. This also means that this note is not very complete.\u003c/p\u003e\n\u003ch2 id=\"facts\"\u003efacts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u0026ldquo;you can factor out every root\u0026rdquo;: \\(p(\\alpha) = 0 \\implies p(z)=(z-\\alpha)q(z)\\)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"\"\u003efundamental theorem of algebra\u003c/a\u003e: \u0026ldquo;if you have an nth-degree polynomial, you can factor it into n factors\u0026rdquo; (over the complex numbers, you have as many roots as the degree of the polynomials)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ethese coefficients are unique barring ordering\u003c/li\u003e\n\u003cli\u003efactoring real polynomials can be treated in two pieces: one piece, the reals, which can be treated usually; then one other piece, the complexes, can be multiplied pairwise together (as over real coeffs they always come in conjugate pairs) into quadratics. This is why all real polynormials can always be factored as \\((x-\\lambda)(x-\\lambda) \\dots (x^{2}+ax+b) (x^{2}+ax+b)\\dots\\)\u003c/li\u003e\n\u003cli\u003ethe number of complex polynomials has to be even\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecomplex polynomials have \\(deg\\ p\\) factors\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ereal polynomials have \\(deg\\ p\\) real/complex factors, but complex factors come in pairs\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ewe can squish the complex part of the real polynomials together, and get\u0026mdash;wlog $m$\u0026mdash;first-degree real roots and \\(\\frac{deg\\ p - m}{2}\\) second-degree real roots where \\(b^{2} \u0026lt; 4c\\)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\\(x^{2} + bx + c\\) has a factor of \\((x-\\lambda_{1})(x-\\lambda_{2})\\) under reals \\(b^{2} \\geq 4c\\)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-sequence\"\u003ekey sequence\u003c/h2\u003e\n\u003ch3 id=\"complex-numbers\"\u003ecomplex numbers\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ewe defined: complex numbers, conjugates, and absolute value\n\u003cul\u003e\n\u003cli\u003e9 properties of complexes (see below)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"polynomial-coefficients\"\u003epolynomial coefficients\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003epolynomial coefficients are unique\u003c/strong\u003e; namely, if a polynomial is the zero function, all of its coefficients have to be \\(0\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"division-zero-and-factoring\"\u003edivision, zero, and factoring\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003epolynomial division\u003c/strong\u003e: given two polynomials \\(p,s \\in \\mathcal{P}(\\mathbb{F})\\), with \\(s\\neq 0\\), then \\(\\exists q,r \\in \\mathcal{P}(\\mathbb{F})\\) such that: \\(p = s q +r\\); that is, given two polynomials, you can always divide one by the other with some remainder as long as the \u0026ldquo;other\u0026rdquo; is not \\(0\\)\u003c/li\u003e\n\u003cli\u003ewe defined \u003cstrong\u003ezero\u003c/strong\u003e (\\(p \\lambda =0\\), then \\(\\lambda\\) is a \u0026ldquo;zero\u0026rdquo;) and \u003cstrong\u003efactor\u003c/strong\u003e which is some polynomial \\(s \\in \\mathcal{P}(\\mathbb{F})\\) for another polynomial \\(p\\) such that there exists some \\(q \\in \\mathcal{P}(\\mathbb{F})\\) such that \\(p = s q\\)\n\u003cul\u003e\n\u003cli\u003ewe show that each zero corresponds to a factor of the shape \\(p(z) = (z-\\lambda)q(z)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ewe show that a polynomial with degree \\(m\\) has at most \\(m\\) distinct zeros\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"ftoa-and-corollaries\"\u003eFToA and corollaries\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eFToA\u003c/strong\u003e: every non-constant polynomial under the complexes has a zero\u003c/li\u003e\n\u003cli\u003eand that means every polynomial over the complexes has a unique factorization \\(p(z) = c(z-\\lambda_{1})(z-\\lambda_{2}) \\dots (z-\\lambda_{m})\\)\u003c/li\u003e\n\u003cli\u003epolynomials with zero coefficients have zeros in pairs: if \\(\\lambda \\in \\mathbb{C}\\) is a factor of the polynomial, so is \\(\\bar{\\lambda}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"is-a-real-polynomial-factorable\"\u003eIs a real polynomial factorable?\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eA polynomial \\(x^{2}+bx+c\\) is factorable into \\((x-\\lambda_{1})(x-\\lambda_{2})\\) IFF \\(b^{2} \u0026gt; 4c\\).\u003c/li\u003e\n\u003cli\u003eAll polynomials over the reals can be factored into at least second degree polynomials\n\u003cul\u003e\n\u003cli\u003e\\(p(z) = c(z-\\lambda_{1})(z-\\lambda_{2}) \\dots (z-\\lambda_{m}) \\dots (x^{2}+b_{M}x+c_{M})\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"first-review-complex-number--kbhcomplex-number-dot-md--s\"\u003efirst, review \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003es\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(z+\\bar{z} = 2 \\text{Re}\\ z\\)\u003c/li\u003e\n\u003cli\u003e\\(z-\\bar{z} =2(\\text{Im}\\ z)i\\)\u003c/li\u003e\n\u003cli\u003e\\(z\\bar{z} = |z|^{2}\\)\u003c/li\u003e\n\u003cli\u003e\\(\\bar{x+z} = \\bar{w}+\\bar{z}\\), \\(\\bar{wz} = \\bar{w}\\bar{z}\\)\u003c/li\u003e\n\u003cli\u003e\\(\\bar{\\bar{z}} = z\\)\u003c/li\u003e\n\u003cli\u003e\\(| \\text{\\{Re,Im\\}}\\ z| \\leq |z|\\) both real and imaginary components are smaller than the actual absolute value\u003c/li\u003e\n\u003cli\u003e\\(|\\bar{z}| = |z|\\)\u003c/li\u003e\n\u003cli\u003e\\(|wz| = |w| |z|\\)\u003c/li\u003e\n\u003cli\u003e\\(|w+z| \\leq |w| + |z|\\), the \u003ca href=\"#triangle-inequality--complexes\"\u003etriangle inequality\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"triangle-inequality--complexes\"\u003etriangle inequality (complexes)\u003c/h2\u003e\n\u003cp\u003eFor \\(w, z \\in \\mathbb{C}\\), we do route algebra:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-01-30_20-46-35_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"polynomial-division\"\u003epolynomial division\u003c/h2\u003e\n\u003cp\u003eSuppose \\(p,s \\in \\mathcal{P}(\\mathbb{F}), s\\neq 0\\), then, \\(\\exists\\) polynomials \\(q,r \\in \\mathcal{P(\\mathbb{F})}\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np = s q +r\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand \\(\\deg r \u0026lt; \\deg s\\).\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eLet: \\(n = \\deg p\\), and \\(m = \\deg s\\). So, if \\(n \u0026lt; m\\) (i.e. it is not a division), then take \\(q=0\\) and \\(r=p\\).\u003c/p\u003e\n\u003cp\u003eNow, let\u0026rsquo;s make\n???\u003c/p\u003e\n\u003ch2 id=\"factoring\"\u003eFactoring\u003c/h2\u003e\n\u003cp\u003eA polynomial \\(s \\in \\mathcal{P}(\\mathbb{F})\\) is a \u003cstrong\u003efactor\u003c/strong\u003e of \\(p \\in \\mathcal{P}(\\mathbb{F})\\) if \\(\\exists\\) \\(q \\in \\mathcal{P}(\\mathbb{F})\\) such that \\(p=s q\\).\u003c/p\u003e\n\u003ch2 id=\"questions\"\u003equestions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eproofs: wut\u003c/li\u003e\n\u003cli\u003eif the FToA holds, isn\u0026rsquo;t the polynomials over the reals a \u0026ldquo;subset\u0026rdquo;(ish) of the polynomials over the complexes? so there is going to be at least complex roots to all polynormials always no?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhthoughts_on_axler_4/","tags":null,"title":"Thoughts on Axler 4"},{"categories":null,"contents":"A\n","html":"\u003cp\u003eA\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtiago_forte/","tags":null,"title":"Tiago Forte"},{"categories":null,"contents":"Every NLP task involve some kind of text normalization.\ntokenizing words normalizing word formats (lemmatize?) sentence and paragraph segmentation For Latin, Arabic, Cyrillic, Greek systems, spaces can usually be used for tokenization. Other writing systems can\u0026rsquo;t do this. See morpheme\nSubword Tokenization Algorithms for breaking up tokens using corpus statistics which acts on lower-than-word level.\nBPE Unigram Language Modeling tokenization WordPiece They all work in 2 parst:\na token learner: takes training corpus and derives a vocabulary set a token segmenter that tokenizes text according to the vocab tr For those languages, you can use these systems to perform tokenization.\ntr -sc \u0026#34;A-Za-z\u0026#34; \u0026#34;\\n\u0026#34; \u0026lt; input.txt this takes every form which is not text (-c is the complement operator) and replaces it with a newline. -s squeezes the text so that there are not multiple newlines.\nThis turns the text into one word per line.\nSorting it (because uniq requires it) and piping into uniq gives word count\ntr -sc \u0026#34;A-Za-z\u0026#34; \u0026#34;\\n\u0026#34; \u0026lt; input.txt | sort | uniq We can then do a reverse numerical sort:\ntr -sc \u0026#34;A-Za-z\u0026#34; \u0026#34;\\n\u0026#34; \u0026lt; input.txt | sort | uniq | sort -r -n which gives a list of words per frequency.\nThis is a BAD RESULT most of the time: some words have punctuation with meaning that\u0026rsquo;s not tokenizaiton: m.p.h., or AT\u0026amp;T, or John's, or 1/1/12.\nWhat to Tokenize \u0026ldquo;I do uh main- mainly business data processing\u0026rdquo;\nuh: filled pause main-: fragments Consider:\n\u0026ldquo;Seuss\u0026rsquo;s cat in the cat is different from other cats!\u0026rdquo;\ncat and cats: same lemma (i.e. stem + part of speech + word sense) cat and cats: different wordforms We usually consider a token as distinct wordform, counting duplicates; whereas, we usually consider word types as unique, non-duplicated distinct wordforms.\nclitics John's: word that doesn\u0026rsquo;t stand on its own.\n","html":"\u003cp\u003eEvery NLP task involve some kind of text normalization.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003etokenizing words\u003c/li\u003e\n\u003cli\u003enormalizing word formats (lemmatize?)\u003c/li\u003e\n\u003cli\u003esentence and paragraph segmentation\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eFor Latin, Arabic, Cyrillic, Greek systems, spaces can usually be used for tokenization. Other writing systems can\u0026rsquo;t do this. See \u003ca href=\"/posts/kbhmorpheme/\"\u003emorpheme\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"subword-tokenization\"\u003eSubword Tokenization\u003c/h2\u003e\n\u003cp\u003eAlgorithms for breaking up tokens using \u003ca href=\"/posts/kbhcorpus/\"\u003ecorpus\u003c/a\u003e statistics which acts on lower-than-word level.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbpe/\"\u003eBPE\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eUnigram Language Modeling tokenization\u003c/li\u003e\n\u003cli\u003eWordPiece\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThey all work in 2 parst:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ea token \u003cstrong\u003elearner\u003c/strong\u003e: takes training corpus and derives a vocabulary set\u003c/li\u003e\n\u003cli\u003ea token \u003cstrong\u003esegmenter\u003c/strong\u003e that tokenizes text according to the vocab\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"tr\"\u003etr\u003c/h2\u003e\n\u003cp\u003eFor those languages, you can use these systems to perform tokenization.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-bash\" data-lang=\"bash\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etr -sc \u003cspan style=\"color:#d88200\"\u003e\u0026#34;A-Za-z\u0026#34;\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\n\u0026#34;\u003c/span\u003e \u0026lt; input.txt\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ethis takes every form which is not text (\u003ccode\u003e-c\u003c/code\u003e is the complement operator) and replaces it with a newline. \u003ccode\u003e-s\u003c/code\u003e squeezes the text so that there are not multiple newlines.\u003c/p\u003e\n\u003cp\u003eThis turns the text into one word per line.\u003c/p\u003e\n\u003cp\u003eSorting it (because \u003ccode\u003euniq\u003c/code\u003e requires it) and piping into \u003ccode\u003euniq\u003c/code\u003e gives word count\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-bash\" data-lang=\"bash\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etr -sc \u003cspan style=\"color:#d88200\"\u003e\u0026#34;A-Za-z\u0026#34;\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\n\u0026#34;\u003c/span\u003e \u0026lt; input.txt \u003cspan style=\"color:#111\"\u003e|\u003c/span\u003e sort \u003cspan style=\"color:#111\"\u003e|\u003c/span\u003e uniq\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe can then do a reverse numerical sort:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-bash\" data-lang=\"bash\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etr -sc \u003cspan style=\"color:#d88200\"\u003e\u0026#34;A-Za-z\u0026#34;\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\n\u0026#34;\u003c/span\u003e \u0026lt; input.txt \u003cspan style=\"color:#111\"\u003e|\u003c/span\u003e sort \u003cspan style=\"color:#111\"\u003e|\u003c/span\u003e uniq \u003cspan style=\"color:#111\"\u003e|\u003c/span\u003e sort -r -n\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ewhich gives a list of words per frequency.\u003c/p\u003e\n\u003cp\u003eThis is a \u003cstrong\u003eBAD RESULT\u003c/strong\u003e most of the time: some words have punctuation with meaning that\u0026rsquo;s not tokenizaiton: \u003ccode\u003em.p.h.\u003c/code\u003e, or \u003ccode\u003eAT\u0026amp;T\u003c/code\u003e, or \u003ccode\u003eJohn's\u003c/code\u003e, or \u003ccode\u003e1/1/12\u003c/code\u003e.\u003c/p\u003e\n\u003ch2 id=\"what-to-tokenize\"\u003eWhat to Tokenize\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;I do uh main- mainly business data processing\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003euh\u003c/code\u003e: filled pause\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003emain-\u003c/code\u003e: fragments\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Seuss\u0026rsquo;s cat in the cat is different from other cats!\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003ecat\u003c/code\u003e and \u003ccode\u003ecats\u003c/code\u003e: same \u003ca href=\"/posts/kbhtokenization/\"\u003elemma\u003c/a\u003e (i.e. stem + part of speech + word sense)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003ecat\u003c/code\u003e and \u003ccode\u003ecats\u003c/code\u003e: different \u003ca href=\"/posts/kbhtokenization/\"\u003ewordform\u003c/a\u003es\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe usually consider a \u003ca href=\"/posts/kbhtokenization/\"\u003etoken\u003c/a\u003e as distinct \u003ca href=\"/posts/kbhtokenization/\"\u003ewordform\u003c/a\u003e, counting duplicates; whereas, we usually consider \u003ca href=\"/posts/kbhtokenization/\"\u003eword type\u003c/a\u003es as unique, non-duplicated distinct \u003ca href=\"/posts/kbhtokenization/\"\u003ewordform\u003c/a\u003es.\u003c/p\u003e\n\u003ch3 id=\"clitics\"\u003eclitics\u003c/h3\u003e\n\u003cp\u003e\u003ccode\u003eJohn's\u003c/code\u003e: word that doesn\u0026rsquo;t stand on its own.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtokenization/","tags":null,"title":"tokenization"},{"categories":null,"contents":"For directed acyclic graphs, a topological sort of a directed graph is such that if there\u0026rsquo;s an edge \\(A \\to B\\), then \\(A\\) comes before \\(B\\) in the sort.\nUnder direct acyclic graphs, a topological sort always exist.\n","html":"\u003cp\u003eFor directed acyclic graphs, a \u003ca href=\"/posts/kbhtopological_sort/\"\u003etopological sort\u003c/a\u003e of a directed graph is such that if there\u0026rsquo;s an edge \\(A \\to B\\), then \\(A\\) comes before \\(B\\) in the sort.\u003c/p\u003e\n\u003cp\u003eUnder direct acyclic graphs, a \u003ca href=\"/posts/kbhtopological_sort/\"\u003etopological sort\u003c/a\u003e always exist.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtopological_sort/","tags":null,"title":"topological sort"},{"categories":null,"contents":"Finding training data for AI is hard. So instead:\nIntentional training data curated for training data Spent time thinking about bias, control, etc. Training set of convenience Dataset that just comes about Problematic: Accidentally introduce bias into the data: Googling images of CEOs, which is convenient, results in all white males for a bit.\n","html":"\u003cp\u003eFinding training data for AI is hard. So instead:\u003c/p\u003e\n\u003ch2 id=\"intentional-training-data\"\u003eIntentional training data\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecurated for training data\u003c/li\u003e\n\u003cli\u003eSpent time thinking about bias, control, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"training-set-of-convenience\"\u003eTraining set of convenience\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDataset that just comes about\u003c/li\u003e\n\u003cli\u003eProblematic:\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAccidentally introduce bias into the data: Googling images of CEOs, which is convenient, results in all white males for a bit.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtraining_data_sourcing/","tags":null,"title":"Training Data Sourcing"},{"categories":null,"contents":"\u0026ldquo;What we have been building since ChatGPT at H4.\nNo pretraining in any way Basic Three Steps Goal: \u0026ldquo;helpful, harmless, honest, and huggy\u0026rdquo; bots.\nRetraining step: large-scale next token prediction Incontext learning: few shot learning without updating parameters \u0026ldquo;Helpful\u0026rdquo; steps Taking supervised data to perform supervised fine tuning \u0026ldquo;Harmless\u0026rdquo; steps Training a classifier for result ranking RLHF Benchmarking Before we started to train, we have a problem. Most benchmarks are on generic reasoning, which evaluates 1), 2). Therefore, we need new metrics for steps 4) and 5).\nSo:\nEvaluating instruction and \u0026ldquo;chatty-ness\u0026rdquo; Pairwise Elo Ratings leaderboard from 🤗 + AlpacaEval. Both use GPT4 as the automated evaluator + as well as humans. MTBench from LMSYS has a new benchmark for the same thing, but supports multi-turn evaluation.\nThree main effects observed:\nresults improve slightly the longer the prompt GPT4 MTBench assigns worse scores on gpt4 like data adding more data into fine tuning had diminishing returns after thousands of samples TruthfulQA is the most differentiating benchmark; most others score about the same\nEvaluating Reward Model There are not any open source reward models. Nor is there anything on evaluating or dataset on red teaming. The only dataset out there is Anthropic\u0026rsquo;s red teaming data.\nhttps://huggingface.co/blog/red-teaming\nWackiness GPT4 as an evaluator Why is everybody using GPT4 as a proxy for humans?\nGPT4 has a left positional bias (if you admonish GPT about this, it will prefer the second one instead :/), while humans provide pretty much uniform rating \u0026ldquo;Doping\u0026rdquo;: GPT4 prefers model trained on data that it itself generated GPT4 prefers a large variance in unique tokens GPT4 has bad correlation with humans with \u0026ldquo;low entropy\u0026rdquo; factual tasks: QA, Summarization, Code; it has better correlation with humans in brainstorming and creative generation arxiv:2306.05685\nSupervised Fine Tuning Data \u0026ldquo;Self-Instruct\u0026rdquo; dataset, Wang et all 2022 =\u0026gt; \u0026ldquo;Surge Instruct\u0026rdquo;, huggingface 2023\nInstruction (what to do) Input (what to do it on) Output (what you are supposed to do) Goal: \u0026ldquo;helpful and chatty\u0026rdquo;\nBootstrapping Data Generation 175 seed tasks: 1 instruction + 1 input/output pair Give it to a language model to generate more instructions Language mode Human-In-The-loop Data Generation Ultrachat, Ding et al 2023\nhuman doing some research on the topic and create a prompt ask LLM to generate the output if result not good, rephrase the prompt repeat until good Roleplaying Data Generation Have two models role play to get and correct data.\nHuggingface Surge-Instruct Humans write everything from scratch. With a pretrained model, diminishing return is seen after a few thousand high quality examples.\nTask Distribution\nWhat should the topics be?\nUse InstructGPT as guidance: largestest section is a generation task (12%), OpenQA the second largest one (12.4%).\nHF replaced InstructGPT distribution\u0026rsquo;s \u0026ldquo;Other\u0026rdquo; section (3.5%) with code work.\nLength Distribution\nHow long should the prompts be? Collected distributions, and Surge Instruct seems to be closest with InstructionGPT.\nBoth Anthropic and InstructGPT used a US based task force, and so so did 🤗\nus based taskforce roughly even gender slpit 19 to 62 years old primarily white technical degree to PhD Only used one turn. Multi-turn fine tuning wasn\u0026rsquo;t a thing a few mounths ago.\nTraining starcoder, falcon, llama2 True fine tuning + PEFT (LoRA) The HF part of RLHF Scale agreed with Surge and H4 a lot more, but mostly no one agreed with anyone.\nGoal: \u0026ldquo;safe and factual\u0026rdquo;\nTask Distribution Distribution: a lot more about factual things (because we want to encourage factualness) so much more math and code than the general generation. Its also easier to score.\nRanking Guidelines OpenAI have guidelines about how to rate Rate every turn from the diaglogue Smaller total length (\u0026lt;2048 tokens) Helpfulness OVER honesty \u0026ndash; this is the opposite of OpenAI because the model wasn\u0026rsquo;t large enough to be very honest Two step selection: \u0026ldquo;which one is better\u0026rdquo; =\u0026gt; \u0026ldquo;how much is better\u0026rdquo;\n","html":"\u003cp\u003e\u0026ldquo;What we have been building since ChatGPT at \u003ca href=\"/posts/kbhh4/\"\u003eH4\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eNo pretraining in any way\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"basic-three-steps\"\u003eBasic Three Steps\u003c/h2\u003e\n\u003cp\u003eGoal: \u0026ldquo;helpful, harmless, honest, and huggy\u0026rdquo; bots.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eRetraining step: large-scale next token prediction\u003c/li\u003e\n\u003cli\u003eIncontext learning: few shot learning without updating parameters\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Helpful\u0026rdquo; steps\n\u003col\u003e\n\u003cli\u003eTaking supervised data to perform supervised fine tuning\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Harmless\u0026rdquo; steps\n\u003col\u003e\n\u003cli\u003eTraining a classifier for result ranking\u003c/li\u003e\n\u003cli\u003eRLHF\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"benchmarking\"\u003eBenchmarking\u003c/h2\u003e\n\u003cp\u003eBefore we started to train, we have a problem. Most benchmarks are on generic reasoning, which evaluates 1), 2). Therefore, we need new metrics for steps 4) and 5).\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003ch3 id=\"evaluating-instruction-and-chatty-ness\"\u003eEvaluating instruction and \u0026ldquo;chatty-ness\u0026rdquo;\u003c/h3\u003e\n\u003cp\u003ePairwise \u003ca href=\"/posts/kbhelo_ratings/\"\u003eElo Ratings\u003c/a\u003e leaderboard from 🤗 + AlpacaEval. Both use GPT4 as the automated evaluator + as well as humans. MTBench from LMSYS has a new benchmark for the same thing, but supports multi-turn evaluation.\u003c/p\u003e\n\u003cp\u003eThree main effects observed:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eresults improve \u003cem\u003eslightly\u003c/em\u003e the longer the prompt\u003c/li\u003e\n\u003cli\u003eGPT4 MTBench assigns worse scores on gpt4 like data\u003c/li\u003e\n\u003cli\u003eadding more data into fine tuning had diminishing returns after thousands of samples\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTruthfulQA is the most differentiating benchmark; most others score about the same\u003c/p\u003e\n\u003ch3 id=\"evaluating-reward-model\"\u003eEvaluating Reward Model\u003c/h3\u003e\n\u003cp\u003eThere are not any open source reward models. Nor is there anything on evaluating or dataset on red teaming. The only dataset out there is Anthropic\u0026rsquo;s red teaming data.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://huggingface.co/blog/red-teaming\"\u003ehttps://huggingface.co/blog/red-teaming\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"wackiness-gpt4-as-an-evaluator\"\u003eWackiness GPT4 as an evaluator\u003c/h3\u003e\n\u003cp\u003eWhy is everybody using GPT4 as a proxy for humans?\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eGPT4 has a left positional bias (if you admonish GPT about this, it will prefer the second one instead :/), while humans provide pretty much uniform rating\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Doping\u0026rdquo;: GPT4 prefers model trained on data that it itself generated\u003c/li\u003e\n\u003cli\u003eGPT4 prefers a large variance in unique tokens\u003c/li\u003e\n\u003cli\u003eGPT4 has bad correlation with humans with \u0026ldquo;low entropy\u0026rdquo; factual tasks: QA, Summarization, Code; it has better correlation with humans in brainstorming and creative generation\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003earxiv:2306.05685\u003c/p\u003e\n\u003ch2 id=\"supervised-fine-tuning\"\u003eSupervised Fine Tuning\u003c/h2\u003e\n\u003ch3 id=\"data\"\u003eData\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;Self-Instruct\u0026rdquo; dataset, Wang et all 2022 =\u0026gt; \u0026ldquo;Surge Instruct\u0026rdquo;, huggingface 2023\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eInstruction (what to do)\u003c/li\u003e\n\u003cli\u003eInput (what to do it on)\u003c/li\u003e\n\u003cli\u003eOutput (what you are supposed to do)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eGoal: \u0026ldquo;helpful and chatty\u0026rdquo;\u003c/p\u003e\n\u003ch4 id=\"bootstrapping-data-generation\"\u003eBootstrapping Data Generation\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e175 seed tasks: 1 instruction + 1 input/output pair\u003c/li\u003e\n\u003cli\u003eGive it to a language model to generate more instructions\u003c/li\u003e\n\u003cli\u003eLanguage mode\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"human-in-the-loop-data-generation\"\u003eHuman-In-The-loop Data Generation\u003c/h4\u003e\n\u003cp\u003eUltrachat, Ding et al 2023\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ehuman doing some research on the topic and create a prompt\u003c/li\u003e\n\u003cli\u003eask LLM to generate the output\u003c/li\u003e\n\u003cli\u003eif result not good, rephrase the prompt\u003c/li\u003e\n\u003cli\u003erepeat until good\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"roleplaying-data-generation\"\u003eRoleplaying Data Generation\u003c/h4\u003e\n\u003cp\u003eHave two models role play to get and correct data.\u003c/p\u003e\n\u003ch4 id=\"huggingface-surge-instruct\"\u003eHuggingface Surge-Instruct\u003c/h4\u003e\n\u003cp\u003eHumans write everything from scratch. With a pretrained model, diminishing return is seen after a few thousand high quality examples.\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eTask Distribution\u003c/p\u003e\n\u003cp\u003eWhat should the topics be?\u003c/p\u003e\n\u003cp\u003eUse InstructGPT as guidance: largestest section is a generation task (12%), OpenQA the second largest one (12.4%).\u003c/p\u003e\n\u003cp\u003eHF replaced InstructGPT distribution\u0026rsquo;s \u0026ldquo;Other\u0026rdquo; section (3.5%) with code work.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eLength Distribution\u003c/p\u003e\n\u003cp\u003eHow long should the prompts be? Collected distributions, and Surge Instruct seems to be closest with InstructionGPT.\u003c/p\u003e\n\u003cp\u003eBoth Anthropic and InstructGPT used a US based task force, and so so did 🤗\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eus based taskforce\u003c/li\u003e\n\u003cli\u003eroughly even gender slpit\u003c/li\u003e\n\u003cli\u003e19 to 62 years old\u003c/li\u003e\n\u003cli\u003eprimarily white\u003c/li\u003e\n\u003cli\u003etechnical degree to PhD\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eOnly used one turn. Multi-turn fine tuning wasn\u0026rsquo;t a thing a few mounths ago.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"training\"\u003eTraining\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003estarcoder, falcon, llama2\u003c/li\u003e\n\u003cli\u003eTrue fine tuning + \u003ca href=\"/posts/kbhpeft/\"\u003ePEFT\u003c/a\u003e (LoRA)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"the-hf-part-of-rlhf\"\u003eThe HF part of RLHF\u003c/h2\u003e\n\u003cp\u003eScale agreed with Surge and \u003ca href=\"/posts/kbhh4/\"\u003eH4\u003c/a\u003e a lot more, but mostly no one agreed with anyone.\u003c/p\u003e\n\u003cp\u003eGoal: \u0026ldquo;safe and factual\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"task-distribution\"\u003eTask Distribution\u003c/h3\u003e\n\u003cp\u003eDistribution: a lot more about factual things (because we want to encourage factualness) so much more math and code than the general generation. Its also easier to score.\u003c/p\u003e\n\u003ch3 id=\"ranking-guidelines\"\u003eRanking Guidelines\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eOpenAI have guidelines about how to rate\u003c/li\u003e\n\u003cli\u003eRate every turn from the diaglogue\u003c/li\u003e\n\u003cli\u003eSmaller total length (\u0026lt;2048 tokens)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eHelpfulness OVER honesty\u003c/strong\u003e \u0026ndash; this is the opposite of OpenAI because the model wasn\u0026rsquo;t large enough to be very honest\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTwo step selection: \u0026ldquo;which one is better\u0026rdquo; =\u0026gt; \u0026ldquo;how much is better\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtraining_helpful_chatbots/","tags":null,"title":"Training Helpful Chatbots"},{"categories":null,"contents":"the transformational generative syntax is a linguistical precept proposed by Noam Chomsky which has the interesting conclusion that meaning is supported by structure, rather than the other way around as generative semantics suggests.\nThis means that you can first come up with generic, independent structure to a sentence, then fill in the sentence with meaning.\nFor instance, \u0026ldquo;colorless green ideas sleep furiously\u0026rdquo; is a sentence Noam Chomsky proposes to have perfect structure but failes to be filled with meaning, supporting the transformational generative syntax theory.\nThis supports the Lexicalist Hypothesis, which is the theory that lexicalization transformations are independent of structural transformations. This would therefore support the proof for the existence of semantic primes.\n","html":"\u003cp\u003ethe \u003ca href=\"/posts/kbhtransformational_generative_syntax/\"\u003etransformational generative syntax\u003c/a\u003e is a linguistical precept proposed by \u003ca href=\"/posts/kbhchomsky/\"\u003eNoam Chomsky\u003c/a\u003e which has the interesting conclusion that \u003cstrong\u003e\u003cstrong\u003emeaning\u003c/strong\u003e\u003c/strong\u003e is supported by \u003cstrong\u003e\u003cstrong\u003estructure\u003c/strong\u003e\u003c/strong\u003e, rather than the other way around as \u003ca href=\"/posts/kbhgenerative_semantics/\"\u003egenerative semantics\u003c/a\u003e suggests.\u003c/p\u003e\n\u003cp\u003eThis means that you can first come up with generic, independent structure to a sentence, then fill in the sentence with meaning.\u003c/p\u003e\n\u003cp\u003eFor instance, \u0026ldquo;colorless green ideas sleep furiously\u0026rdquo; is a sentence \u003ca href=\"/posts/kbhchomsky/\"\u003eNoam Chomsky\u003c/a\u003e proposes to have perfect structure but failes to be filled with meaning, supporting the \u003ca href=\"/posts/kbhtransformational_generative_syntax/\"\u003etransformational generative syntax\u003c/a\u003e theory.\u003c/p\u003e\n\u003cp\u003eThis supports the \u003ca href=\"/posts/kbhlexicalization_hypothesis/\"\u003eLexicalist Hypothesis\u003c/a\u003e, which is the theory that lexicalization transformations are independent of structural transformations. This would therefore support the \u003ca href=\"/posts/kbhsemantic_primes/#proof-for-the-existence-of-id-4e587814-bfd1-458e-ba5f-67882832107b-semantic-prime-s\"\u003eproof for the existence of semantic primes\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtransformational_generative_syntax/","tags":null,"title":"transformational generative syntax"},{"categories":null,"contents":"Background Current deep-learning first approaches have shown promising results for the speech text diarization task. For ASR-independent diarization, specifically, two main methods appear as yielding fruitful conclusions:\nAuditory feature extraction using deep learning to create a trained, fixed-size latent representation via Mel-frequency cepstral coefficients slices that came from any existing voice-activity detection (VAD) scheme ((Snyder et al. 2018)), where the features extracted with the neural network are later used with traditional clustering and Variational Bayes refinement ((Sell et al. 2018; Landini et al. 2022)) approaches to produce groups of diarized speakers\nEnd-to-end neural approaches which takes temporally-dependent log-mel-frequency cepstrum and perform voice activity detection, speaker recognition, and diarization directly on the same neural network ((Fujita, Kanda, Horiguchi, Xue, et al. 2019))\nFigure 1: \u0026lt;\u0026amp;fujita2019end1\u0026gt;\nThe latter, end-to-end approach (EEND), offers lower Diarization Error Rate (DER) than former clustering ((Fujita, Kanda, Horiguchi, Xue, et al. 2019)), achiving 10.76 vs. 11.53 DER on the CALLHOME dataset respectively. However, it confers a few disadvantages: the end-to-end system produces a diarization result directly dependent on the time dimension of the input Log-Mel (i.e. it outputs probability per speaker per time slice), so its error could include both the error in voice activity detection and diarization; furthermore, the one-shot nature of this method allows no interpretation or manipulation of its actual outputs\u0026mdash;such as specifying the number of speakers after diarization is completed (as is possible with clustering because one could simply choose the number of centroids to calculate) (Park et al. 2021).\nWe therefore desire here to combine the advantages of both methods discussed here in producing a diarization technique that both retains the flexible nature of vector-based approaches but also seeks to generate as complete and performant (in terms of DER) a pipeline as possible with deep learning.\nMotivations The discussion here is motivated by a few facts:\nExcellent ((Radford et al. 2022)) ASR models exist without being pre-trained on the diarization task, meaning they produce well-timed transcriptions without the speakers labels Well performing forced-alignment tools exist (McAuliffe et al. 2017), which can be applied on-top-of rough voice activity segments from assumption #1 (for instance, by reading attention activations; or by concatenating rough word timings). The number of speakers is not exogenously known, yet could be specified after diarization completes. Proposal One of the latest advances for EEND-class models leverages the reasonably novel Convolutional Transformer (\u0026ldquo;Conformer\u0026rdquo;) architecture ((Gulati et al. 2020)) to improve the performance of the model. Specifically, the model swaps the time-delayed fully connected blocks in favor of Conformer blocks, and mixes in the SpecAugment data augmentation technique for the Log-Mel frequency input ((Liu et al. 2021)). We will use this new model both as the basis of our work, as well as the benchmark to improve upon for diarization results.\nText-Aware (Near) End-to-End Approach Contextual and positional information (for instance, raving pronoun use) provides a useful basis by which humans recognize the flow of an utterance used to diarize speech.\nAssumption #2 above indicates that one could identify segments of text transcripts corresponding to the input audio\u0026mdash;albeit not diarized. We hypothesize that leveraging the information from text transcripts (even if not tightly aligned) will help the model track the flow of conversation and get better diarization performance.\nIn the figure above, we specifically chose the Transformer BERT encoder ((Devlin et al. 2018)) to process a segment of text ASR corresponding to the input log-mel audio. The processed Bert latents are added and statistically pooled to the Conformer outputs from processing the audio signals; the fused embeddings are then passed through a fully-connected classification head for the usual labeling consistent with EEND ((Fujita, Kanda, Horiguchi, Nagamatsu, et al. 2019)).\nBy training a multimodal scheme in this manner, we hope to demonstrate an improved level of performance which fusing ASR text can provide to the diarization task.\nImproved X-Vector via Conformers Design constraint #3 which we outlined earlier was the desire to identify extogenously the number of speakers. While an extension below explores the possibility of this in an end-to-end architecture, conventional probabilistic clustering methods (even including neural components, such as (Snyder et al. 2018)) allow manually specified clusters to be created and tagged using PLDA ((Kenny et al. 2013)) or HMMs ((Landini et al. 2022)).\nOne direct extension to this approach would be the use of the Conformer architecture highlighted above in place of the fully-connected network ((Snyder et al. 2017)) which forms the basis of the x-vector approach.\nTo perform this, the x-vector representations would be swapped directly for the final latent from the EEND Conformer architecture prior to the fully-connected prediction head. All other post-processing of x-vectors, we hypothesize, could be applied to the new latents with minimal changes.\nSpecifically, convolution self-attention in the Conformer architecture work in a similar pattern to ((Peddinti, Povey, and Khudanpur 2015)) to scan across time frames; however, self-attention is a trained parameter, allowing the timescale dependence to be adaptive to the context provided.\nFurther adaptive training\u0026mdash;including training on previously segmented voice activity, and/or taking MFCC instead of Log-Mel as input\u0026mdash;maybe needed mostly following the training objectives in ((Snyder et al. 2018)) in order for the latent vectors to reflect the characteristics of new, unknown speakers.\nOther Possibilities Text and Vectors One direct correlary of the two proposals above is simply concatenating the novelty of each: creating text+audio transformer based latent embeddings as the basis for speaker clustering.\nSpeaker-Count Signal Clustering approaches, although more explainable, does confer some disadvantages. For instance, it will have no good way forward to predict overlapping speakers (as \u0026ldquo;a speaker similar to both A and B\u0026rdquo; would appear in a similar place in the latent space as \u0026ldquo;A and B are crosstalking\u0026rdquo;).\nReturning to the EEND approach, however, brings into focus the question regarding speaker count. One possibility for addressing this involves injecting an extra token\u0026mdash;either in the \u0026ldquo;text\u0026rdquo; portion of the multimodal implementation, or perhaps simply fused into the input of the original diarizing Conformer (i.e. (Liu et al. 2021))\u0026mdash;representing the number of speakers.\nThen, we will add a large negative positive term to the loss associated with incorrectly-used (i.e. out of bounds) speaker ID classes.\nUnfortunately, because of the minimal weight of one speaker-count feature compared to the audio sample, and the Gaussian nature of neural networks, this method will provide no garantees regarding the actual diarization outputs.\nDevlin, Jacob, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. “Bert: Pre-Training of Deep Bidirectional Transformers for Language Understanding.” arXiv Preprint arXiv:1810.04805. Fujita, Yusuke, Naoyuki Kanda, Shota Horiguchi, Kenji Nagamatsu, and Shinji Watanabe. 2019. “End-to-End Neural Speaker Diarization with Permutation-Free Objectives.” arXiv Preprint arXiv:1909.05952. Fujita, Yusuke, Naoyuki Kanda, Shota Horiguchi, Yawen Xue, Kenji Nagamatsu, and Shinji Watanabe. 2019. “End-to-End Neural Speaker Diarization with Self-Attention.” In 2019 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU), 296–303. IEEE. Gulati, Anmol, James Qin, Chung-Cheng Chiu, Niki Parmar, Yu Zhang, Jiahui Yu, Wei Han, et al. 2020. “Conformer: Convolution-Augmented Transformer for Speech Recognition.” arXiv. http://arxiv.org/abs/2005.08100. Kenny, Patrick, Themos Stafylakis, Pierre Ouellet, Md. Jahangir Alam, and Pierre Dumouchel. 2013. “PLDA for Speaker Verification with Utterances of Arbitrary Duration.” In 2013 IEEE International Conference on Acoustics, Speech and Signal Processing, 7649–53. Vancouver, BC, Canada: IEEE. doi:10.1109/ICASSP.2013.6639151. Landini, Federico, Ján Profant, Mireia Diez, and Lukáš Burget. 2022. “Bayesian HMM Clustering of X-Vector Sequences (VBx) in Speaker Diarization: Theory, Implementation and Analysis on Standard Tasks.” Computer Speech \u0026#38; Language 71 (January): 101254. doi:10.1016/j.csl.2021.101254. Liu, Yi Chieh, Eunjung Han, Chul Lee, and Andreas Stolcke. 2021. “End-to-End Neural Diarization: From Transformer to Conformer.” In Interspeech 2021, 3081–85. doi:10.21437/Interspeech.2021-1909. McAuliffe, Michael, Michaela Socolof, Sarah Mihuc, Michael Wagner, and Morgan Sonderegger. 2017. “Montreal Forced Aligner: Trainable Text-Speech Alignment Using Kaldi.” In Interspeech, 2017:498–502. Park, Tae Jin, Naoyuki Kanda, Dimitrios Dimitriadis, Kyu J. Han, Shinji Watanabe, and Shrikanth Narayanan. 2021. “A Review of Speaker Diarization: Recent Advances with Deep Learning.” arXiv. http://arxiv.org/abs/2101.09624. Peddinti, Vijayaditya, Daniel Povey, and Sanjeev Khudanpur. 2015. “A Time Delay Neural Network Architecture for Efficient Modeling of Long Temporal Contexts.” In Interspeech 2015, 3214–18. ISCA. doi:10.21437/Interspeech.2015-647. Radford, Alec, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. 2022. “Robust Speech Recognition via Large-Scale Weak Supervision.” arXiv Preprint arXiv:2212.04356. Sell, Gregory, David Snyder, Alan McCree, Daniel Garcia-Romero, Jesús Villalba, Matthew Maciejewski, Vimal Manohar, et al. 2018. “Diarization Is Hard: Some Experiences and Lessons Learned for the JHU Team in the Inaugural DIHARD Challenge.” In Interspeech 2018, 2808–12. ISCA. doi:10.21437/Interspeech.2018-1893. Snyder, David, Daniel Garcia-Romero, Daniel Povey, and Sanjeev Khudanpur. 2017. “Deep Neural Network Embeddings for Text-Independent Speaker Verification.” In Interspeech 2017, 999–1003. ISCA. doi:10.21437/Interspeech.2017-620. Snyder, David, Daniel Garcia-Romero, Gregory Sell, Daniel Povey, and Sanjeev Khudanpur. 2018. “X-Vectors: Robust DNN Embeddings for Speaker Recognition.” In 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 5329–33. Calgary, AB: IEEE. doi:10.1109/ICASSP.2018.8461375. ","html":"\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003cp\u003eCurrent deep-learning first approaches have shown promising results for the speech text diarization task. For ASR-independent diarization, specifically, two main methods appear as yielding fruitful conclusions:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\n\u003cp\u003eAuditory feature extraction using deep learning to create a trained, fixed-size latent representation via Mel-frequency cepstral coefficients slices that came from any existing voice-activity detection (VAD) scheme ((\u003ca href=\"#citeproc_bib_item_14\"\u003eSnyder et al. 2018\u003c/a\u003e)), where the features extracted with the neural network are later used with traditional clustering and Variational Bayes refinement ((\u003ca href=\"#citeproc_bib_item_12\"\u003eSell et al. 2018\u003c/a\u003e; \u003ca href=\"#citeproc_bib_item_6\"\u003eLandini et al. 2022\u003c/a\u003e)) approaches to produce groups of diarized speakers\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eEnd-to-end neural approaches which takes temporally-dependent log-mel-frequency cepstrum and perform voice activity detection, speaker recognition, and diarization directly on the same neural network ((\u003ca href=\"#citeproc_bib_item_3\"\u003eFujita, Kanda, Horiguchi, Xue, et al. 2019\u003c/a\u003e))\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-09_23-26-04_screenshot.png\"\n alt=\"Figure 1: \u0026amp;lt;\u0026amp;amp;fujita2019end1\u0026amp;gt;\"\u003e\u003cfigcaption\u003e\n \u003cp\u003e\u003cspan class=\"figure-number\"\u003eFigure 1: \u003c/span\u003e\u0026lt;\u0026amp;fujita2019end1\u0026gt;\u003c/p\u003e\n \u003c/figcaption\u003e\n \u003c/figure\u003e\n\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThe latter, end-to-end approach (EEND), offers lower Diarization Error Rate (DER) than former clustering ((\u003ca href=\"#citeproc_bib_item_3\"\u003eFujita, Kanda, Horiguchi, Xue, et al. 2019\u003c/a\u003e)), achiving 10.76 vs. 11.53 DER on the CALLHOME dataset respectively. However, it confers a few disadvantages: the end-to-end system produces a diarization result directly dependent on the time dimension of the input Log-Mel (i.e. it outputs probability per speaker per time slice), so its error could include \u003cem\u003eboth\u003c/em\u003e the error in voice activity detection and diarization; furthermore, the one-shot nature of this method allows no interpretation or manipulation of its actual outputs\u0026mdash;such as specifying the number of speakers \u003cem\u003eafter\u003c/em\u003e diarization is completed (as is possible with clustering because one could simply choose the number of centroids to calculate) (\u003ca href=\"#citeproc_bib_item_9\"\u003ePark et al. 2021\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eWe therefore desire here to combine the advantages of both methods discussed here in producing a diarization technique that both retains the flexible nature of vector-based approaches but also seeks to generate as complete and performant (in terms of DER) a pipeline as possible with deep learning.\u003c/p\u003e\n\u003ch2 id=\"motivations\"\u003eMotivations\u003c/h2\u003e\n\u003cp\u003eThe discussion here is motivated by a few facts:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eExcellent ((\u003ca href=\"#citeproc_bib_item_11\"\u003eRadford et al. 2022\u003c/a\u003e)) ASR models exist without being pre-trained on the diarization task, meaning they produce well-timed transcriptions without the speakers labels\u003c/li\u003e\n\u003cli\u003eWell performing forced-alignment tools exist (\u003ca href=\"#citeproc_bib_item_8\"\u003eMcAuliffe et al. 2017\u003c/a\u003e), which can be applied on-top-of rough voice activity segments from assumption \u003ccode\u003e#1\u003c/code\u003e (for instance, by reading attention activations; or by concatenating rough word timings).\u003c/li\u003e\n\u003cli\u003eThe number of speakers is not exogenously known, yet could be specified after diarization completes.\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"proposal\"\u003eProposal\u003c/h2\u003e\n\u003cp\u003eOne of the latest advances for EEND-class models leverages the reasonably novel Convolutional Transformer (\u0026ldquo;Conformer\u0026rdquo;) architecture ((\u003ca href=\"#citeproc_bib_item_4\"\u003eGulati et al. 2020\u003c/a\u003e)) to improve the performance of the model. Specifically, the model swaps the time-delayed fully connected blocks in favor of Conformer blocks, and mixes in the SpecAugment data augmentation technique for the Log-Mel frequency input ((\u003ca href=\"#citeproc_bib_item_7\"\u003eLiu et al. 2021\u003c/a\u003e)). We will use this new model both as the basis of our work, as well as the benchmark to improve upon for diarization results.\u003c/p\u003e\n\u003ch3 id=\"text-aware--near--end-to-end-approach\"\u003eText-Aware (Near) End-to-End Approach\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-11_22-49-01_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eContextual and positional information (for instance, raving pronoun use) provides a useful basis by which humans recognize the flow of an utterance used to diarize speech.\u003c/p\u003e\n\u003cp\u003eAssumption \u003ccode\u003e#2\u003c/code\u003e above indicates that one could identify segments of text transcripts corresponding to the input audio\u0026mdash;albeit not diarized. We hypothesize that leveraging the information from text transcripts (even if not tightly aligned) will help the model track the flow of conversation and get better diarization performance.\u003c/p\u003e\n\u003cp\u003eIn the figure above, we specifically chose the Transformer BERT encoder ((\u003ca href=\"#citeproc_bib_item_1\"\u003eDevlin et al. 2018\u003c/a\u003e)) to process a segment of text ASR corresponding to the input log-mel audio. The processed Bert latents are added and statistically pooled to the Conformer outputs from processing the audio signals; the fused embeddings are then passed through a fully-connected classification head for the usual labeling consistent with EEND ((\u003ca href=\"#citeproc_bib_item_2\"\u003eFujita, Kanda, Horiguchi, Nagamatsu, et al. 2019\u003c/a\u003e)).\u003c/p\u003e\n\u003cp\u003eBy training a multimodal scheme in this manner, we hope to demonstrate an improved level of performance which fusing ASR text can provide to the diarization task.\u003c/p\u003e\n\u003ch3 id=\"improved-x-vector-via-conformers\"\u003eImproved X-Vector via Conformers\u003c/h3\u003e\n\u003cp\u003eDesign constraint \u003ccode\u003e#3\u003c/code\u003e which we outlined earlier was the desire to identify extogenously the number of speakers. While an extension below explores the possibility of this in an end-to-end architecture, conventional probabilistic clustering methods (even including neural components, such as (\u003ca href=\"#citeproc_bib_item_14\"\u003eSnyder et al. 2018\u003c/a\u003e)) allow manually specified clusters to be created and tagged using PLDA ((\u003ca href=\"#citeproc_bib_item_5\"\u003eKenny et al. 2013\u003c/a\u003e)) or HMMs ((\u003ca href=\"#citeproc_bib_item_6\"\u003eLandini et al. 2022\u003c/a\u003e)).\u003c/p\u003e\n\u003cp\u003eOne direct extension to this approach would be the use of the Conformer architecture highlighted above in place of the fully-connected network ((\u003ca href=\"#citeproc_bib_item_13\"\u003eSnyder et al. 2017\u003c/a\u003e)) which forms the basis of the x-vector approach.\u003c/p\u003e\n\u003cp\u003eTo perform this, the x-vector representations would be swapped directly for the final latent from the EEND Conformer architecture prior to the fully-connected prediction head. All other post-processing of x-vectors, we hypothesize, could be applied to the new latents with minimal changes.\u003c/p\u003e\n\u003cp\u003eSpecifically, convolution self-attention in the Conformer architecture work in a similar pattern to ((\u003ca href=\"#citeproc_bib_item_10\"\u003ePeddinti, Povey, and Khudanpur 2015\u003c/a\u003e)) to scan across time frames; however, self-attention is a trained parameter, allowing the timescale dependence to be adaptive to the context provided.\u003c/p\u003e\n\u003cp\u003eFurther adaptive training\u0026mdash;including training on previously segmented voice activity, and/or taking MFCC instead of Log-Mel as input\u0026mdash;maybe needed mostly following the training objectives in ((\u003ca href=\"#citeproc_bib_item_14\"\u003eSnyder et al. 2018\u003c/a\u003e)) in order for the latent vectors to reflect the characteristics of new, unknown speakers.\u003c/p\u003e\n\u003ch3 id=\"other-possibilities\"\u003eOther Possibilities\u003c/h3\u003e\n\u003ch4 id=\"text-and-vectors\"\u003eText and Vectors\u003c/h4\u003e\n\u003cp\u003eOne direct correlary of the two proposals above is simply concatenating the novelty of each: creating text+audio transformer based latent embeddings as the basis for speaker clustering.\u003c/p\u003e\n\u003ch4 id=\"speaker-count-signal\"\u003eSpeaker-Count Signal\u003c/h4\u003e\n\u003cp\u003eClustering approaches, although more explainable, does confer some disadvantages. For instance, it will have no good way forward to predict overlapping speakers (as \u0026ldquo;a speaker similar to both A and B\u0026rdquo; would appear in a similar place in the latent space as \u0026ldquo;A and B are crosstalking\u0026rdquo;).\u003c/p\u003e\n\u003cp\u003eReturning to the EEND approach, however, brings into focus the question regarding speaker count. One possibility for addressing this involves injecting an extra token\u0026mdash;either in the \u0026ldquo;text\u0026rdquo; portion of the multimodal implementation, or perhaps simply fused into the input of the original diarizing Conformer (i.e. (\u003ca href=\"#citeproc_bib_item_7\"\u003eLiu et al. 2021\u003c/a\u003e))\u0026mdash;representing the number of speakers.\u003c/p\u003e\n\u003cp\u003eThen, we will add a large negative positive term to the loss associated with incorrectly-used (i.e. out of bounds) speaker ID classes.\u003c/p\u003e\n\u003cp\u003eUnfortunately, because of the minimal weight of one speaker-count feature compared to the audio sample, and the Gaussian nature of neural networks, this method will provide no garantees regarding the actual diarization outputs.\u003c/p\u003e\n\u003cstyle\u003e.csl-entry{text-indent: -1.5em; margin-left: 1.5em;}\u003c/style\u003e\u003cdiv class=\"csl-bib-body\"\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_1\"\u003e\u003c/a\u003eDevlin, Jacob, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. “Bert: Pre-Training of Deep Bidirectional Transformers for Language Understanding.” \u003ci\u003earXiv Preprint arXiv:1810.04805\u003c/i\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_2\"\u003e\u003c/a\u003eFujita, Yusuke, Naoyuki Kanda, Shota Horiguchi, Kenji Nagamatsu, and Shinji Watanabe. 2019. “End-to-End Neural Speaker Diarization with Permutation-Free Objectives.” \u003ci\u003earXiv Preprint arXiv:1909.05952\u003c/i\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_3\"\u003e\u003c/a\u003eFujita, Yusuke, Naoyuki Kanda, Shota Horiguchi, Yawen Xue, Kenji Nagamatsu, and Shinji Watanabe. 2019. “End-to-End Neural Speaker Diarization with Self-Attention.” In \u003ci\u003e2019 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)\u003c/i\u003e, 296–303. IEEE.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_4\"\u003e\u003c/a\u003eGulati, Anmol, James Qin, Chung-Cheng Chiu, Niki Parmar, Yu Zhang, Jiahui Yu, Wei Han, et al. 2020. “Conformer: Convolution-Augmented Transformer for Speech Recognition.” arXiv. \u003ca href=\"http://arxiv.org/abs/2005.08100\"\u003ehttp://arxiv.org/abs/2005.08100\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_5\"\u003e\u003c/a\u003eKenny, Patrick, Themos Stafylakis, Pierre Ouellet, Md. Jahangir Alam, and Pierre Dumouchel. 2013. “PLDA for Speaker Verification with Utterances of Arbitrary Duration.” In \u003ci\u003e2013 IEEE International Conference on Acoustics, Speech and Signal Processing\u003c/i\u003e, 7649–53. Vancouver, BC, Canada: IEEE. doi:\u003ca href=\"https://doi.org/10.1109/ICASSP.2013.6639151\"\u003e10.1109/ICASSP.2013.6639151\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_6\"\u003e\u003c/a\u003eLandini, Federico, Ján Profant, Mireia Diez, and Lukáš Burget. 2022. “Bayesian HMM Clustering of X-Vector Sequences (VBx) in Speaker Diarization: Theory, Implementation and Analysis on Standard Tasks.” \u003ci\u003eComputer Speech \u0026#38; Language\u003c/i\u003e 71 (January): 101254. doi:\u003ca href=\"https://doi.org/10.1016/j.csl.2021.101254\"\u003e10.1016/j.csl.2021.101254\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_7\"\u003e\u003c/a\u003eLiu, Yi Chieh, Eunjung Han, Chul Lee, and Andreas Stolcke. 2021. “End-to-End Neural Diarization: From Transformer to Conformer.” In \u003ci\u003eInterspeech 2021\u003c/i\u003e, 3081–85. doi:\u003ca href=\"https://doi.org/10.21437/Interspeech.2021-1909\"\u003e10.21437/Interspeech.2021-1909\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_8\"\u003e\u003c/a\u003eMcAuliffe, Michael, Michaela Socolof, Sarah Mihuc, Michael Wagner, and Morgan Sonderegger. 2017. “Montreal Forced Aligner: Trainable Text-Speech Alignment Using Kaldi.” In \u003ci\u003eInterspeech\u003c/i\u003e, 2017:498–502.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_9\"\u003e\u003c/a\u003ePark, Tae Jin, Naoyuki Kanda, Dimitrios Dimitriadis, Kyu J. Han, Shinji Watanabe, and Shrikanth Narayanan. 2021. “A Review of Speaker Diarization: Recent Advances with Deep Learning.” arXiv. \u003ca href=\"http://arxiv.org/abs/2101.09624\"\u003ehttp://arxiv.org/abs/2101.09624\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_10\"\u003e\u003c/a\u003ePeddinti, Vijayaditya, Daniel Povey, and Sanjeev Khudanpur. 2015. “A Time Delay Neural Network Architecture for Efficient Modeling of Long Temporal Contexts.” In \u003ci\u003eInterspeech 2015\u003c/i\u003e, 3214–18. ISCA. doi:\u003ca href=\"https://doi.org/10.21437/Interspeech.2015-647\"\u003e10.21437/Interspeech.2015-647\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_11\"\u003e\u003c/a\u003eRadford, Alec, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. 2022. “Robust Speech Recognition via Large-Scale Weak Supervision.” \u003ci\u003earXiv Preprint arXiv:2212.04356\u003c/i\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_12\"\u003e\u003c/a\u003eSell, Gregory, David Snyder, Alan McCree, Daniel Garcia-Romero, Jesús Villalba, Matthew Maciejewski, Vimal Manohar, et al. 2018. “Diarization Is Hard: Some Experiences and Lessons Learned for the JHU Team in the Inaugural DIHARD Challenge.” In \u003ci\u003eInterspeech 2018\u003c/i\u003e, 2808–12. ISCA. doi:\u003ca href=\"https://doi.org/10.21437/Interspeech.2018-1893\"\u003e10.21437/Interspeech.2018-1893\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_13\"\u003e\u003c/a\u003eSnyder, David, Daniel Garcia-Romero, Daniel Povey, and Sanjeev Khudanpur. 2017. “Deep Neural Network Embeddings for Text-Independent Speaker Verification.” In \u003ci\u003eInterspeech 2017\u003c/i\u003e, 999–1003. ISCA. doi:\u003ca href=\"https://doi.org/10.21437/Interspeech.2017-620\"\u003e10.21437/Interspeech.2017-620\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_14\"\u003e\u003c/a\u003eSnyder, David, Daniel Garcia-Romero, Gregory Sell, Daniel Povey, and Sanjeev Khudanpur. 2018. “X-Vectors: Robust DNN Embeddings for Speaker Recognition.” In \u003ci\u003e2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)\u003c/i\u003e, 5329–33. Calgary, AB: IEEE. doi:\u003ca href=\"https://doi.org/10.1109/ICASSP.2018.8461375\"\u003e10.1109/ICASSP.2018.8461375\u003c/a\u003e.\u003c/div\u003e\n\u003c/div\u003e\n","permalink":"https://www.jemoka.com/posts/kbhspeech_diarization/","tags":null,"title":"Transformer Speech Diarization"},{"categories":null,"contents":"Transformers has replaced large pipelines into a single system.\n\u0026ldquo;Transformers verticalized tasks in 2013 EMNLP; various domains\u0026rdquo;\nProcess Multiple manual systems that talk to each other has been replaced by neurons talking to each other General word embeddings like Word2Vec Sequence to sequence modeling from those vecs that are more general: learning variable length representations From LSTMs to Encoder-Decoder architectures: Google Neural Machine Translation System 2016 (LSTM seq2seq SoTA) So: big complicated pipelines turn into one homogeneous system.\nBig LSTM Problems and their Solutinos LSTMs crush the entire sequence into one embedding, which is bad because there\u0026rsquo;s no representation between inputs.\nConvolutions begin to solve this problem by looking at the local interactions to learn about the structure of the problem.\nSelf-attention does this massively: capturing token-to-token interactions in a parallelization fashion.\nTransformer Motivation Motivation: convolutions allows parallelism, \u0026ldquo;can we read and write in parallel instead of left to right generation?\u0026rdquo;\nno. decoding in parallel sucks apparently.\nthe ordering is hard: we don\u0026rsquo;t know how the outputs should be ordered; generating all at once assumes the output are conditionally independent each ordering selection narrows the posterior space and it makes generation easier But we can still read in parallel unlike LSTMs which is BASED.\nSelf attention is actually faster too, because convolutions are \\(O(knd^{2})\\) but self attention happens without convolving with only \\(O(nd^{2})\\).\n\u0026ldquo;Processing\u0026rdquo; happens through contractions/expansions like ResNet.\nMulti-Head Attention Language modeling: \u0026ldquo;who did what to whom\u0026rdquo;?\nA single self-attention only can capture one of those W relationships. The best in can do (because of softmax) is to do a weighted average of the inputs.\nPosition encodings Because adding is commutative, attention is permutation invariant so we have to add a positional encoding.\nIn theory we want length invariant models, which requires long term embeddings. Absolute embeddings, when generation length becomes too long, you end up with degration as length increases\nNext Steps Long-Form Retrival There are ways of doing \u0026ldquo;structured sparse attention\u0026rdquo;, an input modulated sparse matrix for attention that saves a lot of flops. So, we can do long form contexts eventually by playing with this area of retrival.\nLiking Physics \u0026ldquo;You want to make physics your friend\u0026rdquo;\nConvolutions and self attention moves memory between GPU HBM and GPU SRAM a lot: four different move/load operations. That\u0026rsquo;s not a FLOP problem. How do we fix that?\nmulti-query/group query approach: reduce read heads: n\u0026lt;d key and n\u0026lt;d values; a bunch of queries attend to the same keys and values\u0026mdash;loose fidelity but less loads Softmax improvements to improve performance ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhtransformers/\"\u003eTransformers\u003c/a\u003e has replaced large pipelines into a single system.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;\u003ca href=\"/posts/kbhtransformers/\"\u003eTransformers\u003c/a\u003e verticalized tasks in 2013 EMNLP; various domains\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"process\"\u003eProcess\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eMultiple manual systems that talk to each other has been replaced by neurons talking to each other\u003c/li\u003e\n\u003cli\u003eGeneral word embeddings like Word2Vec\u003c/li\u003e\n\u003cli\u003eSequence to sequence modeling from those vecs that are more general: learning variable length representations\u003c/li\u003e\n\u003cli\u003eFrom LSTMs to Encoder-Decoder architectures: Google Neural Machine Translation System 2016 (LSTM seq2seq SoTA)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSo: big complicated pipelines turn into one homogeneous system.\u003c/p\u003e\n\u003ch3 id=\"big-lstm-problems-and-their-solutinos\"\u003eBig LSTM Problems and their Solutinos\u003c/h3\u003e\n\u003cp\u003eLSTMs crush the entire sequence into one embedding, which is bad because there\u0026rsquo;s no representation between inputs.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhrandom_variables/#adding-random-variables\"\u003eConvolution\u003c/a\u003es begin to solve this problem by looking at the local interactions to learn about the structure of the problem.\u003c/p\u003e\n\u003cp\u003eSelf-attention does this massively: capturing token-to-token interactions in a parallelization fashion.\u003c/p\u003e\n\u003ch3 id=\"transformer-motivation\"\u003eTransformer Motivation\u003c/h3\u003e\n\u003cp\u003eMotivation: convolutions allows parallelism, \u0026ldquo;can we read and write in parallel instead of left to right generation?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eno. decoding in parallel sucks apparently.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ethe ordering is hard: we don\u0026rsquo;t know how the outputs should be ordered; generating all at once assumes the output are conditionally independent\u003c/li\u003e\n\u003cli\u003eeach ordering selection narrows the posterior space and it makes generation easier\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eBut we can still read in parallel unlike LSTMs which is BASED.\u003c/p\u003e\n\u003cp\u003eSelf attention is actually faster too, because convolutions are \\(O(knd^{2})\\) but self attention happens without convolving with only \\(O(nd^{2})\\).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Processing\u0026rdquo; happens through contractions/expansions like ResNet.\u003c/p\u003e\n\u003ch3 id=\"multi-head-attention\"\u003eMulti-Head Attention\u003c/h3\u003e\n\u003cp\u003eLanguage modeling: \u0026ldquo;who did what to whom\u0026rdquo;?\u003c/p\u003e\n\u003cp\u003eA single self-attention only can capture one of those W relationships. The best in can do (because of softmax) is to do a weighted average of the inputs.\u003c/p\u003e\n\u003ch3 id=\"position-encodings\"\u003ePosition encodings\u003c/h3\u003e\n\u003cp\u003eBecause adding is commutative, attention is permutation invariant so we have to add a positional encoding.\u003c/p\u003e\n\u003cp\u003eIn theory we want length invariant models, which requires long term embeddings. Absolute embeddings, when generation length becomes too long, you end up with degration as length increases\u003c/p\u003e\n\u003ch2 id=\"next-steps\"\u003eNext Steps\u003c/h2\u003e\n\u003ch3 id=\"long-form-retrival\"\u003eLong-Form Retrival\u003c/h3\u003e\n\u003cp\u003eThere are ways of doing \u0026ldquo;structured sparse attention\u0026rdquo;, an input modulated sparse matrix for attention that saves a lot of flops. So, we can do long form contexts eventually by playing with this area of retrival.\u003c/p\u003e\n\u003ch3 id=\"liking-physics\"\u003eLiking Physics\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;You want to make physics your friend\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eConvolutions and self attention moves memory between GPU HBM and GPU SRAM a lot: four different move/load operations. That\u0026rsquo;s not a FLOP problem. How do we fix that?\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003emulti-query/group query approach: reduce read heads: n\u0026lt;d key and n\u0026lt;d values; a bunch of queries attend to the same keys and values\u0026mdash;loose fidelity but less loads\u003c/li\u003e\n\u003cli\u003eSoftmax improvements to improve performance\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtransformers/","tags":null,"title":"Transformers"},{"categories":null,"contents":"Translation Theory\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhtranslation_theory/\"\u003eTranslation Theory\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtranslation_studies_index/","tags":null,"title":"Translation Studies Index"},{"categories":null,"contents":"Translation Theory is the theory that studies how translation works.\nSpectrum of Translation domestication and foreignization are processes by which a translator can choose to alter the style of a translation for a purpose.\nforeignization trying to bring the target language closer to the source language\nbring in foreign words use colourful idioms use old words domestication trying to bring he source language closer to the target language\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhtranslation_theory/\"\u003eTranslation Theory\u003c/a\u003e is the theory that studies how \u003ca href=\"/posts/kbhtranslation_studies_index/\"\u003etranslation\u003c/a\u003e works.\u003c/p\u003e\n\u003ch2 id=\"spectrum-of-translation\"\u003eSpectrum of Translation\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#spectrum-of-translation\"\u003edomestication\u003c/a\u003e and \u003ca href=\"#spectrum-of-translation\"\u003eforeignization\u003c/a\u003e are processes by which a translator can choose to alter the style of a translation for a purpose.\u003c/p\u003e\n\u003ch3 id=\"foreignization--org36767d3\"\u003e\u003ca href=\"#spectrum-of-translation\"\u003eforeignization\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003etrying to bring the target language closer to the source language\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ebring in foreign words\u003c/li\u003e\n\u003cli\u003euse colourful idioms\u003c/li\u003e\n\u003cli\u003euse old words\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"domestication--org36767d3\"\u003e\u003ca href=\"#spectrum-of-translation\"\u003edomestication\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003etrying to bring he source language closer to the target language\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtranslation_theory/","tags":null,"title":"Translation Theory"},{"categories":null,"contents":"A load perpendicular to the long end of a rod. Think of a metal rod lying flat on the ground; a transverse\n","html":"\u003cp\u003eA load perpendicular to the \u003cem\u003elong\u003c/em\u003e end of a rod. Think of a metal rod lying flat on the ground; a transverse\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtransverse_loaod/","tags":null,"title":"transverse load"},{"categories":null,"contents":"Humans either over-rely (drive a Tesla while sleeping) or under rely (interfering) with robot\u0026rsquo;s actions.\nhuman and robot interactions may depend on entire history trust is a proxy for the full interaction history the human\u0026rsquo;s policy must be modeled by th robot trust is demonstrated through real-world experimentation Formulation Add two variable\nTrust: \\(\\theta_{t}\\), the robot\u0026rsquo;s ability to succeed in a task Performance: \\(e_{t+1}\\), success or failure in attempting a task the trust model probabilities for model\u0026rsquo;s correct modeling of humans are low: high variance between participants.\nTrust Dynamics models human\u0026rsquo;s trust in the robot as a linear gaussian.\nHuman Model Results Sadly, the system didn\u0026rsquo;t actually increase in trust score, but the performance was better through lower human intervention.\n","html":"\u003cp\u003eHumans either over-rely (drive a Tesla while sleeping) or under rely (interfering) with robot\u0026rsquo;s actions.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ehuman and robot interactions may depend on entire history\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003etrust is a proxy for the full interaction history\u003c/strong\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003ethe \u003cstrong\u003ehuman\u0026rsquo;s\u003c/strong\u003e policy must be modeled by th robot\u003c/li\u003e\n\u003cli\u003etrust is demonstrated through real-world experimentation\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"formulation\"\u003eFormulation\u003c/h2\u003e\n\u003cp\u003eAdd two variable\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eTrust: \\(\\theta_{t}\\), the robot\u0026rsquo;s ability to succeed in a task\u003c/li\u003e\n\u003cli\u003ePerformance: \\(e_{t+1}\\), success or failure in attempting a task\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003ethe trust model probabilities for model\u0026rsquo;s correct modeling of humans are low: high variance between participants.\u003c/strong\u003e\u003c/p\u003e\n\u003ch3 id=\"trust-dynamics\"\u003eTrust Dynamics\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-20_10-10-19_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003emodels human\u0026rsquo;s trust in the robot as a linear gaussian.\u003c/p\u003e\n\u003ch3 id=\"human-model\"\u003eHuman Model\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-20_10-13-33_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"results\"\u003eResults\u003c/h2\u003e\n\u003cp\u003eSadly, the system didn\u0026rsquo;t actually increase in trust score, but the performance was better through lower human intervention.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtrustpomdp/","tags":null,"title":"TrustPOMDP"},{"categories":null,"contents":"Tuning Forks (funing torks!) is a Tuning Fork. You smack it and it goes \u0026ldquo;biiing!\u0026rdquo;\nLet\u0026rsquo;s figure out how it works. For us to be one same page, let\u0026rsquo;s define some vocab:\nVocab \u0026ldquo;Tine\u0026rdquo;: one of the two prongs of the fork A Cursory Explanation Source: here and here. Both are not very scientific but a good first step.\nFrom a very basic perspective, hiting a tuning fork creates a transverse wave on the tine you hit, which vibrates and then compresses the air around it in a longitudinal fashion at a set frequency, which we hear as a sound.\nOk but then this raises the question of why there\u0026rsquo;s two tines. The explanation this website gives is essentially that the actual mechanism of the Tuning Fork is in squishing the air immediately around the fork, so\u0026hellip;\nif the tines are push towards together, it creates a void in the space it just was; this creates a low pressure rarefaction area if the tines snap back apart, it compresses the air creating compression by squishing the air around it And therefore, the air around the funing tork is essentially being played like a two-way slingy. To adjust the pitch of the Tuning Fork, you lengthen or shorten it: longer tuning forks have larger tines, which vibrate more slowly.\nOk but now many, many questions why does smacking one side of the Tuning Fork make both sides vibrate presumably the base is not vibrating; hence, how does the downward-bendy vibration cause perpendicular oscillation (does it?) A Detour on Rigid Body Harmonic Motion Let\u0026rsquo;s talk about Bending. How does this relate to springs/slinkies? read this. A Better Detour on Cantilever Beams Cantilever Beams\nA Detour on the Temperature We are really worried about two different things here.\nMetal expands/contracts based on the temperature Temperature affects speed of sound A Detour on Material Science Why are our Tuning Forks out of tune? Fun, Relevant Factoids About the World The range of human hearing from a youngen is about 20Hz to 20,000Hz. Look into Young\u0026rsquo;s Modulus\nDensity Second overtones: six and a quarter; why?\nprove the equations given in Rossing 1990\nwhy do high frequencies die faster?\nWhy are they FORKS? What\u0026rsquo;s wrong with one prong\nLagrangian Mechanics\nexperiments to do in the end measuring in water measuring questions to ask why no free vibrations just standing? do the various tuning fork modes compose what happened to the harmonics of the fundimental? I know the overotens are 6/14, but where did the harmonics go? do they compose? what if we did it in a vaccume? of course the tuning fork is not going to be heard, but will it keep vibrating forever? Nyquist limit (FFT is only accurate to half the sampling rate; 10000 hz sampling (default on logger pro) means max is 5000 Hz) things we can use far field because the wavelength is much mucm much much larger than the seperation between the two tines; what is the wavelength? function of frequency and hertz Questions for Mark cuw tuning forks\u0026rsquo; freq is not the predicted freq of its shortest tine. urg how driven oscellation. how would it actually work? last minute tuning forks easy explanation of FFT \u0026ldquo;wrapping around circle\u0026rdquo; backup slide on octahedral scress explain beta how to get wavelength from sinusoidal equation how does wavelength change with temp; how does our ear compensate? https://en.wikipedia.org/wiki/Residual_stress ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhtuning_forks/\"\u003eTuning Forks\u003c/a\u003e (funing torks!) is a \u003ca href=\"/posts/kbhtuning_forks/\"\u003eTuning Fork\u003c/a\u003e. You smack it and it goes \u0026ldquo;biiing!\u0026rdquo;\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-08-27_14-02-11_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eLet\u0026rsquo;s figure out how it works. For us to be one same page, let\u0026rsquo;s define some vocab:\u003c/p\u003e\n\u003ch2 id=\"vocab\"\u003eVocab\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Tine\u0026rdquo;: one of the two prongs of the fork\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"a-cursory-explanation\"\u003eA Cursory Explanation\u003c/h2\u003e\n\u003cp\u003eSource: \u003ca href=\"https://science.howstuffworks.com/tuning-fork2.htm\"\u003ehere\u003c/a\u003e and \u003ca href=\"https://americanhistory.si.edu/science/tuningfork.htm\"\u003ehere\u003c/a\u003e. Both are not very scientific but a good first step.\u003c/p\u003e\n\u003cp\u003eFrom a very basic perspective, hiting a tuning fork creates a transverse wave on the tine you hit, which vibrates and then compresses the air around it in a longitudinal fashion at a set frequency, which we hear as a sound.\u003c/p\u003e\n\u003cp\u003eOk but then this raises the question of why there\u0026rsquo;s two tines. The explanation this website gives is essentially that the actual mechanism of the \u003ca href=\"/posts/kbhtuning_forks/\"\u003eTuning Fork\u003c/a\u003e is in squishing the air \u003cstrong\u003eimmediately around\u003c/strong\u003e the fork, so\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eif the tines are push towards together, it creates a void in the space it just was; this creates a low pressure \u003cstrong\u003e\u003cstrong\u003erarefaction\u003c/strong\u003e\u003c/strong\u003e area\u003c/li\u003e\n\u003cli\u003eif the tines snap back apart, it compresses the air creating \u003cstrong\u003e\u003cstrong\u003ecompression\u003c/strong\u003e\u003c/strong\u003e by squishing the air around it\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAnd therefore, the air around the \u003ca href=\"/posts/kbhtuning_forks/\"\u003efuning tork\u003c/a\u003e is essentially being played like a two-way slingy. To adjust the pitch of the \u003ca href=\"/posts/kbhtuning_forks/\"\u003eTuning Fork\u003c/a\u003e, you lengthen or shorten it: longer tuning forks have larger tines, which vibrate more slowly.\u003c/p\u003e\n\u003ch2 id=\"ok-but-now-many-many-questions\"\u003eOk but now many, many questions\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ewhy does smacking one side of the \u003ca href=\"/posts/kbhtuning_forks/\"\u003eTuning Fork\u003c/a\u003e make both sides vibrate\u003c/li\u003e\n\u003cli\u003epresumably the base is not vibrating; hence, how does the downward-bendy vibration cause perpendicular oscillation (does it?)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"a-detour-on-rigid-body-harmonic-motion\"\u003eA Detour on Rigid Body Harmonic Motion\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s talk about \u003ca href=\"/posts/kbhbending/\"\u003eBending.\n\u003c/a\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eHow does this relate to springs/slinkies? \u003ca href=\"https://ccrma.stanford.edu/~jos/pasp/Young_s_Modulus_Spring_Constant.html\"\u003eread this.\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"a-better-detour-on-cantilever-beams\"\u003eA Better Detour on Cantilever Beams\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcantilever_beams/\"\u003eCantilever Beams\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"a-detour-on-the-temperature\"\u003eA Detour on the Temperature\u003c/h2\u003e\n\u003cp\u003eWe are really worried about two different things here.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eMetal expands/contracts based on the temperature\u003c/li\u003e\n\u003cli\u003eTemperature affects speed of sound\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"a-detour-on-material-science\"\u003eA Detour on Material Science\u003c/h2\u003e\n\u003ch2 id=\"why-are-our-tuning-fork--kbhtuning-forks-dot-md--s-out-of-tune\"\u003eWhy are our \u003ca href=\"/posts/kbhtuning_forks/\"\u003eTuning Fork\u003c/a\u003es out of tune?\u003c/h2\u003e\n\u003ch2 id=\"fun-relevant-factoids-about-the-world\"\u003eFun, Relevant Factoids About the World\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eThe range of human hearing from a youngen is about 20Hz to 20,000Hz.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"look-into\"\u003eLook into\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eYoung\u0026rsquo;s Modulus\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eDensity\u003c/li\u003e\n\u003cli\u003eSecond\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eovertones: six and a quarter; why?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eprove the equations given in \u003ca href=\"/posts/kbhrossing_1990/\"\u003eRossing 1990\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ewhy do high frequencies die faster?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWhy are they FORKS? What\u0026rsquo;s wrong with one prong\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-25_16-17-35_screenshot.png\"\u003e\n \u003c/figure\u003e\n\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhlagrangian_mechanics/\"\u003eLagrangian Mechanics\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"experiments-to-do-in-the-end\"\u003eexperiments to do in the end\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003emeasuring in water\u003c/li\u003e\n\u003cli\u003emeasuring\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-to-ask\"\u003equestions to ask\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewhy no free vibrations just standing?\u003c/li\u003e\n\u003cli\u003edo the various tuning fork modes compose\u003c/li\u003e\n\u003cli\u003ewhat happened to the harmonics of the fundimental? I know the overotens are 6/14, but where did the harmonics go? do they compose?\u003c/li\u003e\n\u003cli\u003ewhat if we did it in a vaccume? of course the tuning fork is not going to be heard, but will it keep vibrating forever?\u003c/li\u003e\n\u003cli\u003eNyquist limit (FFT is only accurate to half the sampling rate; 10000 hz sampling (default on logger pro) means max is 5000 Hz)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"things\"\u003ethings\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe can use far field because the wavelength is much mucm much much larger than the seperation between the two tines; what is the wavelength? function of frequency and hertz\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-mark\"\u003eQuestions for Mark\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecuw tuning forks\u0026rsquo; freq is not the predicted freq of its shortest tine. urg how\u003c/li\u003e\n\u003cli\u003edriven oscellation. how would it actually work?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"last-minute-tuning-forks\"\u003elast minute tuning forks\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eeasy explanation of FFT \u0026ldquo;wrapping around circle\u0026rdquo;\u003c/li\u003e\n\u003cli\u003ebackup slide on octahedral scress\u003c/li\u003e\n\u003cli\u003eexplain beta\u003c/li\u003e\n\u003cli\u003ehow to get wavelength from sinusoidal equation\u003c/li\u003e\n\u003cli\u003ehow does wavelength change with temp; how does our ear compensate?\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://en.wikipedia.org/wiki/Residual_stress\"\u003ehttps://en.wikipedia.org/wiki/Residual_stress\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtuning_forks/","tags":null,"title":"Tuning Fork"},{"categories":null,"contents":"what if heat, but plate\n\\begin{equation} \\pdv{u}{t} = \\pdv[2]{u}{x} + \\pdv[2]{u}{y} \\end{equation}\nFor some heat distribution that has arbitrary shape, on some domain \\(\\Omega \\times [0, \\infty]_{t}\\) (i.e. argumentation of some space dimensions by time).\nDirichlet Conditions: edges have heat \\(0\\) OR Neumann Conditions: normal derivative (flux) is \\(0\\) at the edge If \\(\\Omega\\) is a general blob, you are actually kinda fucked. Because the bounds on \\(x\\) depend on \\(y\\), and \\(y\\) on \\(x\\), so you can\u0026rsquo;t just separate them into a product.\nHowever, if we cut a rectangle, life is better.\nwhere:\n\\begin{equation} 0 \\leq x \\leq l_1 \\end{equation}\n\\begin{equation} 0 \\leq y \\leq l_2 \\end{equation}\nwhere the Dirichlet condition is now described as the four line segments along the curve at \\(l_1\\) and \\(l_2\\) having constant (or vanishing) temperature.\nIts general solution is:\n\\begin{equation} u(t,x,y) = \\sum_{n_1=1}^{\\infty}\\sum_{n_2=1}^{\\infty} A_{n_1, n_2} e^{-\\qty(\\qty( \\frac{{n_{1}}^{2}}{{l_{1}}^{2}}) + ( \\frac{{n_{2}}^{2}}{{l_{2}}^{2}})) \\pi^{2}t} \\sin \\qty(\\qty(n_1 \\frac{\\pi}{l_{1}})x )\\sin \\qty(\\qty(n_2 \\frac{\\pi}{l_{2}})y ) \\end{equation}\nwhere:\n\\begin{equation} \\lambda = \\lambda_{1} + \\lambda_{2} = - \\qty( \\frac{{n_{1}}^{2}}{{l_1}^{2}} + \\frac{{n_{2}}^{2}}{{l_2}^{2}}) \\pi^{2} \\end{equation}\nsolving \\begin{equation} U(t,x,y) = A(t)B(x)C(y) \\end{equation}\nSo now with much prayer and plugging:\n\\begin{equation} A\u0026rsquo;(t) B(x) C(y) = A(t) B\u0026rsquo;\u0026rsquo;(X)C(y) + A(t)B(x)C\u0026rsquo;\u0026rsquo;(y) \\end{equation}\nwhich gives:\n\\begin{equation} \\frac{A\u0026rsquo;(t)}{A(t)} = \\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(x)} + \\frac{C\u0026rsquo;\u0026rsquo;(y)}{C(y)} = \\lambda \\end{equation}\nWhich causes two problems to arise:\n\\begin{equation} \\begin{cases} A\u0026rsquo;(t) = \\lambda A(t) = 0 \\\\ \\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(x)} + \\frac{C\u0026rsquo;\u0026rsquo;(y)}{C(y)} = \\lambda \\end{cases} \\end{equation}\nthe second expression gives:\n\\begin{equation} \\frac{B\u0026rsquo;\u0026rsquo;(X)}{B(X)} = \\lambda - \\frac{C\u0026rsquo;\u0026rsquo;(y)}{C(y)} \\end{equation}\nMeaning:\n\\begin{equation} \\frac{B\u0026rsquo;\u0026rsquo;(X)}{B(X)} = \\lambda - \\frac{C\u0026rsquo;\u0026rsquo;(y)}{C(y)} = \\lambda_{1} \\end{equation}\nMeaning:\n\\begin{equation} B\u0026rsquo;\u0026rsquo;(x) - \\lambda_{1} B(x) = 0 \\end{equation}\nand:\n\\begin{equation} C\u0026rsquo;\u0026rsquo;(y) - \\lambda_{2} C = 0 \\end{equation}\nwhere \\(\\lambda - \\lambda_{1} = \\lambda_{2}\\).\nNow, recall our boundary conditions:\n\\begin{equation} B(0) = B(l_1) = 0 \\end{equation}\nand\n\\begin{equation} C(0) = C(\\lambda_{2}) = 0 \\end{equation}\nSo, for the expression in \\(B\\), we obtain:\n\\begin{equation} \\lambda_{1} = \\frac{-k_{1}^{2}\\pi^{2}}{l_{1}^{2}}} \\end{equation}\n\\begin{equation} \\lambda_{2} = \\frac{-k_{2}^{2}\\pi^{2}}{l_{2}^{2}}} \\end{equation}\nso:\n\\begin{equation} \\lambda = \\lambda_{1} + \\lambda_{2} \\end{equation}\nAll together, we obtain:\n\\begin{equation} B(x) = \\sin \\qty( \\frac{k_{1} \\pi x}{l_{1}}) \\end{equation}\nand:\n\\begin{equation} C(y) = \\sin \\qty( \\frac{k_{2} \\pi y}{l_{2}}) \\end{equation}\nfinally, where:\n\\begin{equation} A\u0026rsquo; + \\qty( \\frac{k_{1}^{2} \\pi^{2}}{ l_1^{2}} + \\frac{k_{2}^{2} \\pi^{2}}{ l_2^{2}})A = 0 \\end{equation}\nwhich gives us:\n\\begin{equation} A(t) = e^{-\\qty( \\frac{k_{1}^{2} \\pi^{2}}{ l_1^{2}} + \\frac{k_{2}^{2} \\pi^{2}}{ l_2^{2}})t} \\end{equation}\nso then multiply them together:\n\\begin{equation} \\sum_{k_1}^{} \\sum_{k_2}^{}E_{k_1, k_2} e^{-\\qty( \\frac{k_{1}^{2} \\pi^{2}}{ l_1^{2}} + \\frac{k_{2}^{2} \\pi^{2}}{ l_2^{2}})t} \\sin \\qty( \\frac{k_{2} \\pi y}{l_{2}}) \\sin \\qty( \\frac{k_{1} \\pi x}{l_{1}}) \\end{equation}\nat \\(u(0,x,y)\\), we obtain:\n\\begin{equation} u(0,x,y) = \\sum_{k_1}^{} \\sum_{k_2}^{}E_{k_1, k_2} \\sin \\qty( \\frac{k_{2} \\pi y}{l_{2}}) \\sin \\qty( \\frac{k_{1} \\pi x}{l_{1}}) \\end{equation}\nfor every \\(f(x,y)\\), we can solve for \\(E_{k_1, k_2}\\) by fixing \\(y\\), for instance, then writing a Fourier series as a function that depends on the coefficients you left out. This gives:\n\\begin{equation} f(x,y) = \\sum a_{k_1}(y) \\sin \\qty( \\frac{k_1 \\pi x}{l_1}) \\end{equation}\nand then, each of THESE internal functions a function \\(a_{k_1}(y)\\) , which you can obtain over \\(y\\) and expand as a fourier series.\nTo solve for each, you do the susual:\n\\begin{equation} a_{k_1}(y) = \\frac{2}{l_1} \\int_{0}^{l_1} f(x,y) \\sin \\qty( \\frac{k_1 \\pi x}{l_1}) \\dd{x} \\end{equation}\nwhich you can expand:\n\\begin{equation} E_{k_1, k_2} = \\frac{2}{l_2} \\int_{0}^{l_2} a_{k_1}(y) \\sin \\qty( \\frac{k_1 \\pi y}{l_2}) \\dd{y} \\end{equation}\nwhich means that, substituting it in, the whole thing can be written together as:\n\\begin{equation} E_{k_1, k_2} = \\frac{2}{l_2} \\int_{0}^{l_2} \\frac{2}{l_1} \\int_{0}^{l_1} f(x,y) \\sin \\qty( \\frac{k_1 \\pi x}{l_1}) \\dd{x} \\sin \\qty( \\frac{k_1 \\pi y}{l_2}) \\dd{y} \\end{equation}\n","html":"\u003cp\u003ewhat if heat, but plate\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t} = \\pdv[2]{u}{x} + \\pdv[2]{u}{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor some heat distribution that has arbitrary shape, on some domain \\(\\Omega \\times [0, \\infty]_{t}\\) (i.e. argumentation of some space dimensions by time).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb232024/#dirichlet-conditions\"\u003eDirichlet Conditions\u003c/a\u003e: edges have heat \\(0\\)\u003c/li\u003e\n\u003cli\u003eOR \u003ca href=\"/posts/kbhsu_math53_feb232024/#neumann-conditions\"\u003eNeumann Conditions\u003c/a\u003e: normal derivative (flux) is \\(0\\) at the edge\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf \\(\\Omega\\) is a general blob, you are actually kinda fucked. Because the bounds on \\(x\\) depend on \\(y\\), and \\(y\\) on \\(x\\), so you can\u0026rsquo;t just separate them into a product.\u003c/p\u003e\n\u003cp\u003eHowever, if we cut a rectangle, life is better.\u003c/p\u003e\n\u003chr\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-28_22-57-25_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 \\leq x \\leq l_1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 \\leq y \\leq l_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere the Dirichlet condition is now described as the four line segments along the curve at \\(l_1\\) and \\(l_2\\) having constant (or vanishing) temperature.\u003c/p\u003e\n\u003cp\u003eIts general solution is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(t,x,y) = \\sum_{n_1=1}^{\\infty}\\sum_{n_2=1}^{\\infty} A_{n_1, n_2} e^{-\\qty(\\qty( \\frac{{n_{1}}^{2}}{{l_{1}}^{2}}) + ( \\frac{{n_{2}}^{2}}{{l_{2}}^{2}})) \\pi^{2}t} \\sin \\qty(\\qty(n_1 \\frac{\\pi}{l_{1}})x )\\sin \\qty(\\qty(n_2 \\frac{\\pi}{l_{2}})y )\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda = \\lambda_{1} + \\lambda_{2} = - \\qty( \\frac{{n_{1}}^{2}}{{l_1}^{2}} + \\frac{{n_{2}}^{2}}{{l_2}^{2}}) \\pi^{2}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"solving\"\u003esolving\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nU(t,x,y) = A(t)B(x)C(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo now with much prayer and plugging:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA\u0026rsquo;(t) B(x) C(y) = A(t) B\u0026rsquo;\u0026rsquo;(X)C(y) + A(t)B(x)C\u0026rsquo;\u0026rsquo;(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{A\u0026rsquo;(t)}{A(t)} = \\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(x)} + \\frac{C\u0026rsquo;\u0026rsquo;(y)}{C(y)} = \\lambda\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich causes two problems to arise:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nA\u0026rsquo;(t) = \\lambda A(t) = 0 \\\\\n\\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(x)} + \\frac{C\u0026rsquo;\u0026rsquo;(y)}{C(y)} = \\lambda\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe second expression gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{B\u0026rsquo;\u0026rsquo;(X)}{B(X)} = \\lambda - \\frac{C\u0026rsquo;\u0026rsquo;(y)}{C(y)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{B\u0026rsquo;\u0026rsquo;(X)}{B(X)} = \\lambda - \\frac{C\u0026rsquo;\u0026rsquo;(y)}{C(y)} = \\lambda_{1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB\u0026rsquo;\u0026rsquo;(x) - \\lambda_{1} B(x) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC\u0026rsquo;\u0026rsquo;(y) - \\lambda_{2} C = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\lambda - \\lambda_{1} = \\lambda_{2}\\).\u003c/p\u003e\n\u003cp\u003eNow, recall our boundary conditions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB(0) = B(l_1) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC(0) = C(\\lambda_{2}) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, for the expression in \\(B\\), we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda_{1} = \\frac{-k_{1}^{2}\\pi^{2}}{l_{1}^{2}}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda_{2} = \\frac{-k_{2}^{2}\\pi^{2}}{l_{2}^{2}}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda = \\lambda_{1} + \\lambda_{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAll together, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB(x) = \\sin \\qty( \\frac{k_{1} \\pi x}{l_{1}})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC(y) = \\sin \\qty( \\frac{k_{2} \\pi y}{l_{2}})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efinally, where:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA\u0026rsquo; + \\qty( \\frac{k_{1}^{2} \\pi^{2}}{ l_1^{2}} + \\frac{k_{2}^{2} \\pi^{2}}{ l_2^{2}})A = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich gives us:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA(t) = e^{-\\qty( \\frac{k_{1}^{2} \\pi^{2}}{ l_1^{2}} + \\frac{k_{2}^{2} \\pi^{2}}{ l_2^{2}})t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso then multiply them together:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{k_1}^{} \\sum_{k_2}^{}E_{k_1, k_2} e^{-\\qty( \\frac{k_{1}^{2} \\pi^{2}}{ l_1^{2}} + \\frac{k_{2}^{2} \\pi^{2}}{ l_2^{2}})t} \\sin \\qty( \\frac{k_{2} \\pi y}{l_{2}}) \\sin \\qty( \\frac{k_{1} \\pi x}{l_{1}})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eat \\(u(0,x,y)\\), we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(0,x,y) = \\sum_{k_1}^{} \\sum_{k_2}^{}E_{k_1, k_2} \\sin \\qty( \\frac{k_{2} \\pi y}{l_{2}}) \\sin \\qty( \\frac{k_{1} \\pi x}{l_{1}})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor every \\(f(x,y)\\), we can solve for \\(E_{k_1, k_2}\\) by fixing \\(y\\), for instance, then writing a Fourier series as a function that depends on the coefficients you left out. This gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x,y) = \\sum a_{k_1}(y) \\sin \\qty( \\frac{k_1 \\pi x}{l_1})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand then, each of \u003cstrong\u003eTHESE\u003c/strong\u003e internal functions a function \\(a_{k_1}(y)\\) , which you can obtain over \\(y\\) and expand as a fourier series.\u003c/p\u003e\n\u003cp\u003eTo solve for each, you do the susual:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{k_1}(y) = \\frac{2}{l_1} \\int_{0}^{l_1} f(x,y) \\sin \\qty( \\frac{k_1 \\pi x}{l_1}) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich you can expand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE_{k_1, k_2} = \\frac{2}{l_2} \\int_{0}^{l_2} a_{k_1}(y) \\sin \\qty( \\frac{k_1 \\pi y}{l_2}) \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich means that, substituting it in, the whole thing can be written together as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE_{k_1, k_2} = \\frac{2}{l_2} \\int_{0}^{l_2} \\frac{2}{l_1} \\int_{0}^{l_1} f(x,y) \\sin \\qty( \\frac{k_1 \\pi x}{l_1}) \\dd{x} \\sin \\qty( \\frac{k_1 \\pi y}{l_2}) \\dd{y}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtwo_dimensional_heat_equation/","tags":null,"title":"two-dimensional heat equation"},{"categories":null,"contents":"Say we want to find the number which is the additive inverse (\u0026ldquo;negative\u0026rdquo;) of a number.\nWe can just flip each of the digit, and then add 1:\ntake \\(0101\\), invert it to get \\(1010\\) adding these two numbers will give you \\(1111\\). If we just added one more \\(0001\\), it will flip over to be \\(0000\\). Therefore, \\(1010+0001 = 1011\\) is the additive inverse of \\(0101\\). The left most bit being one: still a mark of whether or not something is negative. It just works backwards:\npros and cons of twos complement con: more difficult to represent and difficult to convert pro: only 1 representation for 0 pro: the most significant bit still indicates the sign of a number pro: addition works for any combination of positive/negative tricks all zeros: its always 0 zero plus all ones (011111\u0026hellip;111): it always is the largest signed value and some middle value for unsigned all ones: its always -1 (11111 =\u0026gt; 00000 +1 =\u0026gt; 1) for signed one plus all zeros mnemonic for remembering where overflows happened Unsigned Signed ","html":"\u003cp\u003eSay we want to find the number which is the \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e (\u0026ldquo;negative\u0026rdquo;) of a number.\u003c/p\u003e\n\u003cp\u003eWe can just flip each of the digit, and then add 1:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-02_11-17-26_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003etake \\(0101\\), invert it to get \\(1010\\)\u003c/li\u003e\n\u003cli\u003eadding these two numbers will give you \\(1111\\). If we just added one more \\(0001\\), it will flip over to be \\(0000\\).\u003c/li\u003e\n\u003cli\u003eTherefore, \\(1010+0001 = 1011\\) is the additive inverse of \\(0101\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe left most bit being one: still a mark of whether or not something is negative. It just works backwards:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-03_16-36-15_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"pros-and-cons-of-twos-complement\"\u003epros and cons of twos complement\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003econ: more difficult to represent and difficult to convert\u003c/li\u003e\n\u003cli\u003epro: only 1 representation for 0\u003c/li\u003e\n\u003cli\u003epro: the most significant bit still indicates the sign of a number\u003c/li\u003e\n\u003cli\u003epro: addition works for any combination of positive/negative\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"tricks\"\u003etricks\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eall zeros: its always 0\u003c/li\u003e\n\u003cli\u003ezero plus all ones (011111\u0026hellip;111): it always is the largest signed value and some middle value for unsigned\u003c/li\u003e\n\u003cli\u003eall ones: its always -1 (11111 =\u0026gt; 00000 +1 =\u0026gt; 1) for signed\u003c/li\u003e\n\u003cli\u003eone plus all zeros\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"mnemonic-for-remembering-where-overflows-happened\"\u003emnemonic for remembering where overflows happened\u003c/h2\u003e\n\u003ch3 id=\"unsigned\"\u003eUnsigned\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-03_16-58-13_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"signed\"\u003eSigned\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-03_16-58-27_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhtwo_s_complement/","tags":null,"title":"two's complement"},{"categories":null,"contents":" quality of service harm distributive harm existential harm ","html":"\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhquality_of_service_harm/\"\u003equality of service harm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdistributive_harm/\"\u003edistributive harm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"\"\u003eexistential harm\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtypes_of_harm/","tags":null,"title":"types of harm"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhu1_c/","tags":null,"title":"u1.c"},{"categories":null,"contents":"If you are given the problem, you can learn the parameters by just computing them. For instance, to estimate the parameters of a gaussian, we can compute the mean and variance and shove it in.\n","html":"\u003cp\u003eIf you are given the problem, you can learn the \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es by just computing them. For instance, to estimate the \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es of a \u003ca href=\"/posts/kbhgaussian_distribution/\"\u003egaussian\u003c/a\u003e, we can compute the mean and variance and shove it in.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhunbiased_parameter_learning/","tags":null,"title":"unbiased parameter learning"},{"categories":null,"contents":"There are many different types of uncertainty.\nOutcome Uncertainty: actions may not have known results Model Uncertainty: best action in a state may not be known State Uncertainty: current state may not be precisely known Interaction Uncertainty: interference between models ","html":"\u003cp\u003eThere are many different types of \u003ca href=\"/posts/kbhuncertainty/\"\u003euncertainty\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoutcome_uncertainty/\"\u003eOutcome Uncertainty\u003c/a\u003e: actions may not have known results\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"\"\u003eModel Uncertainty\u003c/a\u003e: best action in a state may not be known\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"\"\u003eState Uncertainty\u003c/a\u003e: current state may not be precisely known\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinteraction_uncertainty/\"\u003eInteraction Uncertainty\u003c/a\u003e: interference between models\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhuncertainty/","tags":null,"title":"uncertainty"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhunconc/","tags":null,"title":"unconc"},{"categories":null,"contents":"base epsilon-greedy:\nchoose a random action with probability \\(\\epsilon\\) otherwise, we choose the action with the best expectation \\(\\arg\\max_{a} Q(s,a)\\) epsilon-greedy exploration with decay Sometimes, approaches are suggested to decay \\(\\epsilon\\) whereby, at each timestamp:\n\\begin{equation} \\epsilon \\leftarrow \\alpha \\epsilon \\end{equation}\nwhereby \\(\\alpha \\in (0,1)\\) is called the \u0026ldquo;decay factor.\u0026rdquo;\nExplore-then-commit Select actions uniformly at random for \\(k\\) steps; then, go to greedy and stay there\n","html":"\u003cp\u003ebase \u003ca href=\"/posts/kbhundirected_exploration/\"\u003eepsilon-greedy\u003c/a\u003e:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003echoose a random action with probability \\(\\epsilon\\)\u003c/li\u003e\n\u003cli\u003eotherwise, we choose the action with the best expectation \\(\\arg\\max_{a} Q(s,a)\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"epsilon-greedy-exploration-with-decay\"\u003eepsilon-greedy exploration with decay\u003c/h2\u003e\n\u003cp\u003eSometimes, approaches are suggested to decay \\(\\epsilon\\) whereby, at each timestamp:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\epsilon \\leftarrow \\alpha \\epsilon\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby \\(\\alpha \\in (0,1)\\) is called the \u0026ldquo;decay factor.\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"explore-then-commit\"\u003eExplore-then-commit\u003c/h2\u003e\n\u003cp\u003eSelect actions uniformly at random for \\(k\\) steps; then, go to greedy and stay there\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhundirected_exploration/","tags":null,"title":"Undirected Exploration"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhunimodal/","tags":null,"title":"unimodal"},{"categories":null,"contents":"the unique_lock is a mutex management type. Its a lock management system whereby the type will unlock the mutex on your behalf whenever the unique lock goes out of scope.\nthis is useful if there are multiple paths to exit a function, where an edge case made you forget when to unlock:\nvoid my_scope(mutex \u0026amp;mut, condition_variable_any \u0026amp;cv) { unique_lock\u0026lt;mutex\u0026gt; lck(mut); // do stuff, you can even pass it to a condition variable! cv.wait(lck); } ","html":"\u003cp\u003ethe \u003ca href=\"/posts/kbhunique_lock/\"\u003eunique_lock\u003c/a\u003e is a mutex management type. Its a lock management system whereby the type will unlock the mutex on your behalf whenever the unique lock goes out of scope.\u003c/p\u003e\n\u003cp\u003ethis is useful if there are multiple paths to exit a function, where an edge case made you forget when to unlock:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emy_scope\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emutex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emut\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econdition_variable_any\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eunique_lock\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emutex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elck\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emut\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// do stuff, you can even pass it to a condition variable!\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewait\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elck\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhunique_lock/","tags":null,"title":"unique_lock"},{"categories":null,"contents":"Questions of Uniqueness and Existance are important elements in Differential Equations.\nHere\u0026rsquo;s a very general form of a differential equations. First, here\u0026rsquo;s the:\nfunction behavior tests continuity Weakest statement.\nA function is continuous if and only if:\n\\begin{equation} \\lim_{x \\to y} f(x) =f(y) \\end{equation}\nLipschitz Condition Stronger statement.\nThe Lipschitz Condition is a stronger test of Continuity such that:\n\\begin{equation} || F(t,x)-F(t,y)|| \\leq L|| x- y|| \\end{equation}\nfor all \\(t \\in I\\), \\(x,y \\in \\omega\\), with \\(L \\in (0,\\infty)\\) is a Lipschitz Condition in the dependent variable \\(x\\).\nReshaping this into linear one-dimensional function, we have that:\n\\begin{equation} \\left | \\frac{F(t,x)-F(t,y)}{x-y} \\right | \\leq L \u0026lt; \\infty \\end{equation}\nThe important thing here is that its the same \\(L\\) of convergence \\(\\forall t\\). However, \\(L\\) may not be stable\u0026mdash;in can oscillate\nDifferentiable We finally have the strongest statement.\n\\begin{equation} \\lim_{x \\to y} \\frac{f(x)-f(y)}{x-y} = C \\end{equation}\nTo make something Differentiable, it has to not only converge but converge to a constant \\(C\\).\nExistence and Uniqueness Check for differential equation Assume some \\(F:I \\times \\omega \\to \\mathbb{R}^{n}\\) (a function \\(F\\) whose domain is in some space \\(I \\times \\omega\\)) is bounded and continuous and satisfies the Lipschitz Condition, and let \\(x_{0} \\in \\omega\\), then, there exists \\(T_{0} \u0026gt; 0\\) and a unique solution for \\(x(t)\\) that touches \\(x_{0}\\) to the standard First-Order Differential Equation \\(\\dv{x}{t} = F(t,x), x(t_{0}) = t_{0}\\) for some \\(|t-t_{0}| \u0026lt; T_{0}\\).\nTo actually check that \\(F\\) satisfies Lipschitz Condition, we pretty much usually just go and take the partial derivative w.r.t. \\(x\\) (dependent variable, yes its \\(x\\)) of \\(F\\) on \\(x\\), which\u0026mdash;if exists on some bound\u0026mdash;satisfies the Lipschitz condition on that bound.\nProof So we started at:\n\\begin{equation} \\dv{x}{t} = F(t,x), x(t_{0}) = x_{0} \\end{equation}\nWe can separate this expression and integrate:\n\\begin{align} \u0026amp; \\dv{x}{t} = F(t,x) \\\\ \\Rightarrow\\ \u0026amp; \\dd{x} = F(t,x)\\dd{t} \\\\ \\Rightarrow\\ \u0026amp; \\int_{x_{0)}}^{x(t)} \\dd{x} = \\int_{t_{0}}^{t} F(s,x(s)) \\dd{s} \\\\ \\Rightarrow\\ \u0026amp; x(t)-x_{0}= \\int_{t_{0}}^{t} F(s,x(s)) \\dd{s} \\end{align}\nAt this point, if \\(F\\) is seperable, we can then seperate it out by \\(\\dd{t}\\) and taking the right integral. However, we are only interested in existance and uniquness, so we will do something named\u0026hellip;\nPicard Integration Picard Integration is a inductive iteration scheme which leverages the Lipschitz Condition to show that a function integral converges. Begin with the result that all First-Order Differential Equations have shape (after forcibly separating):\n\\begin{equation} x(t)-x_{0}= \\int_{t_{0}}^{t} F(s,x(s)) \\dd{s} \\end{equation}\nWe hope that the inductive sequence:\n\\begin{equation} x_{n+1}(t) = x_{0} + \\int_{t_{0}}^{t} F(s,x_{n}(s)) \\dd{s} \\end{equation}\nconverges to the same result above (that is, the functions \\(x_{n}(s)\\) stop varying and therefore we converge to a solution \\(x(s)\\) to show existance.\nThis is hard!\nHere\u0026rsquo;s a digression/example:\nif we fix a time \\(t=10\\):\nwe hope to say that:\n\\begin{equation} \\lim_{n \\to \\infty } G_{n}(10) = G(10) \\end{equation}\n\\(\\forall \\epsilon \u0026gt; 0\\), \\(\\exists M \u0026lt; \\infty\\), \\(\\forall n\u0026gt;M\\),\n\\begin{equation} |G_{n}(10)-G(10)| \u0026lt; \\epsilon \\end{equation}\nNow, the thing is, for the integral above to converge uniformly, we hope that \\(M\\) stays fixed \\(\\forall t\\) (that all of the domain converges at once after the same under of the iterations.\nTaking the original expression, and applying the following page of algebra to it:\nFinally, we then apply the Lipschitz Condition because our setup is that \\(F\\) satisfies the Lipschitz Condition, we have that:\n\\begin{equation} ||x_{n+1}(t)-x_{n}(t)|| \\leq L\\int_{x_{0}}^{t} ||x_{n}(s)-x_{n-1}(s)||ds \\end{equation}\n","html":"\u003cp\u003eQuestions of \u003ca href=\"/posts/kbhuniqueness_and_existance/\"\u003eUniqueness and Existance\u003c/a\u003e are important elements in \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equations\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eHere\u0026rsquo;s a very general form of a differential equations. First, here\u0026rsquo;s the:\u003c/p\u003e\n\u003ch2 id=\"function-behavior-tests\"\u003efunction behavior tests\u003c/h2\u003e\n\u003ch3 id=\"continuity\"\u003econtinuity\u003c/h3\u003e\n\u003cp\u003eWeakest statement.\u003c/p\u003e\n\u003cp\u003eA function is \u003ca href=\"#continuity\"\u003econtinuous\u003c/a\u003e if and only if:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lim_{x \\to y} f(x) =f(y)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"lipschitz-condition\"\u003eLipschitz Condition\u003c/h3\u003e\n\u003cp\u003eStronger statement.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"#lipschitz-condition\"\u003eLipschitz Condition\u003c/a\u003e is a stronger test of \u003ca href=\"#continuity\"\u003eContinuity\u003c/a\u003e such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|| F(t,x)-F(t,y)|| \\leq L|| x- y||\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor all \\(t \\in I\\), \\(x,y \\in \\omega\\), with \\(L \\in (0,\\infty)\\) is a \u003ca href=\"#lipschitz-condition\"\u003eLipschitz Condition\u003c/a\u003e in the \u003cstrong\u003edependent\u003c/strong\u003e variable \\(x\\).\u003c/p\u003e\n\u003cp\u003eReshaping this into linear one-dimensional function, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\left | \\frac{F(t,x)-F(t,y)}{x-y} \\right | \\leq L \u0026lt; \\infty\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe important thing here is that its the same \\(L\\) of convergence \\(\\forall t\\). However, \\(L\\) may not be stable\u0026mdash;in can oscillate\u003c/p\u003e\n\u003ch3 id=\"differentiable\"\u003eDifferentiable\u003c/h3\u003e\n\u003cp\u003eWe finally have the strongest statement.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lim_{x \\to y} \\frac{f(x)-f(y)}{x-y} = C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo make something \u003ca href=\"#differentiable\"\u003eDifferentiable\u003c/a\u003e, it has to not only converge but converge to a constant \\(C\\).\u003c/p\u003e\n\u003ch2 id=\"existence-and-uniqueness-check-for-differential-equation--kbhdiffeq-intro-dot-md\"\u003eExistence and Uniqueness Check for \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003edifferential equation\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eAssume some \\(F:I \\times \\omega \\to \\mathbb{R}^{n}\\) (a function \\(F\\) whose domain is in some space \\(I \\times \\omega\\)) is \u003cstrong\u003ebounded\u003c/strong\u003e and \u003cstrong\u003econtinuous\u003c/strong\u003e and \u003cstrong\u003esatisfies the \u003ca href=\"#lipschitz-condition\"\u003eLipschitz Condition\u003c/a\u003e\u003c/strong\u003e, and let \\(x_{0} \\in \\omega\\), then, there exists \\(T_{0} \u0026gt; 0\\) and a unique solution for \\(x(t)\\) that touches \\(x_{0}\\) to the standard \u003ca href=\"/posts/kbhlinear_non_seperable_equation/#solving-differential-equations\"\u003eFirst-Order Differential Equation\u003c/a\u003e \\(\\dv{x}{t} = F(t,x), x(t_{0}) = t_{0}\\) for some \\(|t-t_{0}| \u0026lt; T_{0}\\).\u003c/p\u003e\n\u003cp\u003eTo actually check that \\(F\\) satisfies \u003ca href=\"#lipschitz-condition\"\u003eLipschitz Condition\u003c/a\u003e, we pretty much usually just go and take the partial derivative w.r.t. \\(x\\) (\u003cstrong\u003e\u003cstrong\u003edependent\u003c/strong\u003e\u003c/strong\u003e variable, yes its \\(x\\)) of \\(F\\) on \\(x\\), which\u0026mdash;if exists on some bound\u0026mdash;satisfies the \u003ca href=\"#lipschitz-condition\"\u003eLipschitz condition\u003c/a\u003e on that bound.\u003c/p\u003e\n\u003ch3 id=\"proof\"\u003eProof\u003c/h3\u003e\n\u003cp\u003eSo we started at:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{x}{t} = F(t,x), x(t_{0}) = x_{0}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can separate this expression and integrate:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; \\dv{x}{t} = F(t,x) \\\\\n\\Rightarrow\\ \u0026amp; \\dd{x} = F(t,x)\\dd{t} \\\\\n\\Rightarrow\\ \u0026amp; \\int_{x_{0)}}^{x(t)} \\dd{x} = \\int_{t_{0}}^{t} F(s,x(s)) \\dd{s} \\\\\n\\Rightarrow\\ \u0026amp; x(t)-x_{0}= \\int_{t_{0}}^{t} F(s,x(s)) \\dd{s}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAt this point, if \\(F\\) is \u003ca href=\"/posts/kbhlinear_constant_coefficient_equation/#solving-separable-differential-equations\"\u003eseperable\u003c/a\u003e, we can then seperate it out by \\(\\dd{t}\\) and taking the right integral. However, we are only interested in existance and uniquness, so we will do something named\u0026hellip;\u003c/p\u003e\n\u003ch4 id=\"picard-integration\"\u003ePicard Integration\u003c/h4\u003e\n\u003cp\u003e\u003ca href=\"#picard-integration\"\u003ePicard Integration\u003c/a\u003e is a inductive iteration scheme which leverages the \u003ca href=\"#lipschitz-condition\"\u003eLipschitz Condition\u003c/a\u003e to show that a function integral converges. Begin with the result that all \u003ca href=\"/posts/kbhlinear_non_seperable_equation/#solving-differential-equations\"\u003eFirst-Order Differential Equations\u003c/a\u003e have shape (after forcibly separating):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t)-x_{0}= \\int_{t_{0}}^{t} F(s,x(s)) \\dd{s}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe hope that the inductive sequence:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{n+1}(t) = x_{0} + \\int_{t_{0}}^{t} F(s,x_{n}(s)) \\dd{s}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003econverges to the same result above (that is, the functions \\(x_{n}(s)\\) stop varying and therefore we converge to a solution \\(x(s)\\) to show existance.\u003c/p\u003e\n\u003cp\u003eThis is hard!\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eHere\u0026rsquo;s a digression/example:\u003c/p\u003e\n\u003cp\u003eif we fix a time \\(t=10\\):\u003c/p\u003e\n\u003cp\u003ewe hope to say that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lim_{n \\to \\infty } G_{n}(10) = G(10)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(\\forall \\epsilon \u0026gt; 0\\), \\(\\exists M \u0026lt; \\infty\\), \\(\\forall n\u0026gt;M\\),\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|G_{n}(10)-G(10)| \u0026lt; \\epsilon\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, the thing is, for the integral above to converge uniformly, we hope that \\(M\\) stays fixed \\(\\forall t\\) (that all of the domain converges at once after the same under of the iterations.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eTaking the original expression, and applying the following page of algebra to it:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-13_13-34-58_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eFinally, we then apply the \u003ca href=\"#lipschitz-condition\"\u003eLipschitz Condition\u003c/a\u003e because our setup is that \\(F\\) satisfies the \u003ca href=\"#lipschitz-condition\"\u003eLipschitz Condition\u003c/a\u003e, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n||x_{n+1}(t)-x_{n}(t)|| \\leq L\\int_{x_{0}}^{t} ||x_{n}(s)-x_{n-1}(s)||ds\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhuniqueness_and_existance/","tags":null,"title":"Uniqueness and Existance"},{"categories":null,"contents":"A constructor built out of quantum theory which can replicate itself. It is considered a universal computer.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhconstructor_theory/\"\u003econstructor\u003c/a\u003e built out of \u003ca href=\"/posts/kbhquantum_theory/\"\u003equantum theory\u003c/a\u003e which can replicate itself. It is considered a \u003ca href=\"/posts/kbhquantum_information_theory/#universal-computer\"\u003euniversal computer\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhuniversal_quantum_constructor/","tags":null,"title":"universal quantum constructor"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhuniversity_of_georgia/","tags":null,"title":"University of Georgia"},{"categories":null,"contents":"Unix is a standard set of tools commonly used in software development.\nmacOS and Linux are on top of Unix Windows comes Unix now lol You can navigate Unix inside a command line.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhunix/\"\u003eUnix\u003c/a\u003e is a standard set of tools commonly used in software development.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003emacOS and Linux are on top of Unix\u003c/li\u003e\n\u003cli\u003eWindows comes Unix now lol\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eYou can navigate \u003ca href=\"/posts/kbhunix/\"\u003eUnix\u003c/a\u003e inside a \u003ca href=\"/posts/kbhunix/\"\u003ecommand line\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhunix/","tags":null,"title":"Unix"},{"categories":null,"contents":"its a File Payload Data with smartness.\nSector Size Block Size Inode Size Inodes Per Block Address Type 512 512 32 16 Short, 2 bytes Notably, the entire file system only supports \\(2^{16} = 32MB\\) worth of space due to short address types.\nFor each file on the disk, we store payload data in a bunch of places scattered across the disk, and a single inode which stores the location of each block for the file in an array.\ninodes contain an ordered list of block numbers, file size, permissions. all inodes are stored together in an inode table, which starts at block 2. Blocks 0 and 1 are disk metadata. inode can be read into memory individally to cache 10% of harddrive is used to inode Unix V6 Filesystem limits the maximum file size in order to keep the inode a finite size.\nThe inode table for each file only contains space to point to \\(8\\) block. 1 block = 1 sector on Unix v6. inodes are usualy 32 bytes big, and 1 block = 1 sector = 512 bytes. usually this packs 16 inodes per block\ninodes are 1 indexed in order to make.\ninode struct inode { uint16_t i_addr[8]; uint16_t i_mode[8]; uint16_t file_size; } Each inode contains 8 addresses in shorts in file order.\nreading inode tables from disk We read the raw 16-block inode data from the right sector, type coerce it into the inode type, and then read from it.\nconst size_t INODE_PER_BLOCK = SECTOR_SIZE / sizeof(struct inode); struct inode inodes[INODE_PER_BLOCK]; char buf[SECTOR_SIZE]; readsector(2, \u0026amp;inodes); // recall this is the first 16 inodes: sec0 is fs info, sec1 is supernode printf(\u0026#34;addr: %d\\n\u0026#34;, inodes[0].i_add); inode modes inodes have two modes\nif ((inode.i_mode \u0026amp; ILARG) != 0) == // node is in \u0026#34;large mode\u0026#34; in small mode, the inode stores in i_addr the block numbers to the data in large mode, the inode stores in the first seven numbers in i_addr block numbers to blocks that contain block numbers (512/2 = 256 block numbers, which are chars); the eighth number points to doubly indirect blocks that contain block numbers that point to other blocks this is called indirect addressing\nindirect addressing uses more steps to get to the data, and requires more blocks to get to the block numbers.\nin large mode, this system can store \\((7+256) \\cdot (256 \\cdot 512) = 34MB\\), which is as large as the file system itself, which is fine now.\nDirectory Folders! Directory is a container that contains files, folders, directories, etc.! Its a file container.\nAll files ultimately live within root directory /. Absolute paths start with root directory, and gets you to the file. Relative paths start at the current folder, and gets you to the file File names are NOT stored within the inode. They are stored in directories.\nUnix stores 16 byte unsorted \u0026ldquo;directory entires\u0026rdquo; to represent directories:\nDirectory Entries struct dirent { uint16_t d_inumber; // inode number of this file char d_name[14]; // the name; *NOT NECESSARILY NULL TERMINATED* } THE NAME MAY NOT BE NULL TERMINATED to cram max things. You have to use strncmp because it may not be terminated.\nLookup Start at the root directory, /. We want to go to the root directory, and find the entry named /classes/, and then, in that directory, find the file. etc. Traverse recursively. Directory could have metadata.\nA directory is basically just a file whose payload is a list of dirent.\nThe inode tells you whether something is a file or a directory. They can be small or large, as usual. Root directory always have inode number 1; 0 is reserved to NULL.\nBecause the directory entries are not sorted, in each direcotry the find is a linear search.\nKey Points modularity: subdivision of a system into a collection of smaller systems layers: layer several modules over each other name resolution: system resolves human friendly name to machine friendly names visualization: making one thing look like another Overall theme: multi-level index\nAdvantages can access all block numbers for a file still supports easy sequential access easy to grow files Disadvantages lots of linear directory search Caching Freelist Linked List Linked list of free-blocks\nBitmap Take a bit for every block in the disk, if its 1, its free. If 0, its not free. This allows locality: data likely used next is closed by, we can search local, continuous spaces.\nproblem: as the disk becomes full, we have to search basically \\(O(n)\\) for each bit until we find the free block\u0026mdash;as the disk fills up, it becomes harder to find free space.\nsolution: lie to the user. don\u0026rsquo;t let the disk used up. grantee that there are some free space at all times. we typically reserve \\(10\\%\\).\nBlock Cache Getting blocks is very expensive. We can keep blocks around in memory because we may need to use them in the near future.\nWe will use part of the main memory to retain recently-accessed disk blocks. This is NOT at the granularity of individual files.\nLRU When you insert a new element into the cache, kick out the element on the cache that hasn\u0026rsquo;t been touched in the longest time.\nBlock Cache Modification we can either write asap, or delay.\nwrite asap\nsafer: less risk of data loss, written as soon as possible slow: program must wait to proceed until disk I/O completes write delay\ndangerous: may loose data after crash efficient: memory writes is faster ","html":"\u003cp\u003eits a \u003ca href=\"/posts/kbhfile_payload_data/\"\u003eFile Payload Data\u003c/a\u003e with smartness.\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eSector Size\u003c/th\u003e\n\u003cth\u003eBlock Size\u003c/th\u003e\n\u003cth\u003eInode Size\u003c/th\u003e\n\u003cth\u003eInodes Per Block\u003c/th\u003e\n\u003cth\u003eAddress Type\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e512\u003c/td\u003e\n\u003ctd\u003e512\u003c/td\u003e\n\u003ctd\u003e32\u003c/td\u003e\n\u003ctd\u003e16\u003c/td\u003e\n\u003ctd\u003eShort, 2 bytes\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eNotably, the entire file system only supports \\(2^{16} = 32MB\\) worth of space due to short address types.\u003c/p\u003e\n\u003cp\u003eFor each file on the disk, we store payload data in a bunch of places scattered across the disk, and a \u003cstrong\u003esingle\u003c/strong\u003e \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e which stores the location of each block for the file in an array.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003es contain an \u003cstrong\u003eordered\u003c/strong\u003e list of block numbers, file size, permissions.\u003c/li\u003e\n\u003cli\u003eall \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003es are stored together in an \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e table, which starts at \u003cstrong\u003eblock 2\u003c/strong\u003e. Blocks 0 and 1 are disk metadata.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e can be read into memory individally to cache\u003c/li\u003e\n\u003cli\u003e10% of harddrive is used to \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003eUnix V6 Filesystem\u003c/a\u003e limits the maximum file size in order to keep the \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e a finite size.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e table for each file only contains space to point to \\(8\\) block. 1 block = 1 sector on Unix v6. \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003es are usualy 32 bytes big, and 1 block = 1 sector = 512 bytes. usually this packs 16 inodes per block\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003es are 1 indexed\u003c/strong\u003e\u003c/strong\u003e in order to make.\u003c/p\u003e\n\u003ch2 id=\"inode\"\u003einode\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estruct\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einode\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003euint16_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei_addr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e8\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003euint16_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei_mode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e8\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003euint16_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efile_size\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eEach inode contains 8 addresses in shorts in \u003cstrong\u003efile order\u003c/strong\u003e.\u003c/p\u003e\n\u003ch3 id=\"reading-inode-tables-from-disk\"\u003ereading inode tables from disk\u003c/h3\u003e\n\u003cp\u003eWe read the raw 16-block inode data from the right sector, type coerce it into the inode type, and then read from it.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003econst\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eINODE_PER_BLOCK\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSECTOR_SIZE\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esizeof\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003estruct\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estruct\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einode\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einodes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eINODE_PER_BLOCK\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebuf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSECTOR_SIZE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003ereadsector\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einodes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// recall this is the first 16 inodes: sec0 is fs info, sec1 is supernode\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eprintf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;addr: %d\u003c/span\u003e\u003cspan style=\"color:#8045ff\"\u003e\\n\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einodes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e].\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei_add\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"inode-modes\"\u003einode modes\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003es have two modes\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei_mode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eILARG\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e!=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// node is in \u0026#34;large mode\u0026#34;\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cul\u003e\n\u003cli\u003ein \u003cstrong\u003esmall mode\u003c/strong\u003e, the \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e stores in \u003ccode\u003ei_addr\u003c/code\u003e the block numbers to the data\u003c/li\u003e\n\u003cli\u003ein \u003cstrong\u003elarge mode\u003c/strong\u003e, the \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e stores in the \u003cstrong\u003efirst seven\u003c/strong\u003e numbers in \u003ccode\u003ei_addr\u003c/code\u003e block numbers to \u003cem\u003eblocks that contain block numbers\u003c/em\u003e (512/2 = 256 block numbers, which are chars); the \u003cstrong\u003eeighth number\u003c/strong\u003e points to \u003cstrong\u003edoubly indirect\u003c/strong\u003e \u003cem\u003eblocks that contain block numbers that point to other blocks\u003c/em\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ethis is called \u003ca href=\"#inode-modes\"\u003eindirect addressing\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#inode-modes\"\u003eindirect addressing\u003c/a\u003e uses more steps to get to the data, and requires more blocks to get to the block numbers.\u003c/p\u003e\n\u003cp\u003ein \u003cstrong\u003elarge mode\u003c/strong\u003e, this system can store \\((7+256) \\cdot (256 \\cdot 512) = 34MB\\), which is as large as the file system itself, which is fine now.\u003c/p\u003e\n\u003ch2 id=\"directory\"\u003eDirectory\u003c/h2\u003e\n\u003cp\u003eFolders! Directory is a container that contains files, folders, directories, etc.! Its a \u003cstrong\u003efile container\u003c/strong\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eAll files ultimately live within root directory \u003ccode\u003e/\u003c/code\u003e.\u003c/li\u003e\n\u003cli\u003eAbsolute paths start with root directory, and gets you to the file.\u003c/li\u003e\n\u003cli\u003eRelative paths start at the current folder, and gets you to the file\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eFile names are \u003cstrong\u003eNOT\u003c/strong\u003e stored within the inode. They are stored in directories.\u003c/p\u003e\n\u003cp\u003eUnix stores 16 byte unsorted \u0026ldquo;directory entires\u0026rdquo; to represent directories:\u003c/p\u003e\n\u003ch3 id=\"directory-entries\"\u003eDirectory Entries\u003c/h3\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estruct\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edirent\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003euint16_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed_inumber\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// inode number of this file\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed_name\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e14\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// the name; *NOT NECESSARILY NULL TERMINATED*\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003cstrong\u003eTHE NAME MAY NOT BE NULL TERMINATED\u003c/strong\u003e to cram max things. You have to use \u003cstrong\u003estrncmp\u003c/strong\u003e because it may not be terminated.\u003c/p\u003e\n\u003ch3 id=\"lookup\"\u003eLookup\u003c/h3\u003e\n\u003cp\u003eStart at the root directory, \u003ccode\u003e/\u003c/code\u003e. We want to go to the root directory, and find the entry named \u003ccode\u003e/classes/\u003c/code\u003e, and then, in that directory, find the file. etc. Traverse recursively. Directory could have metadata.\u003c/p\u003e\n\u003cp\u003eA directory is basically just a \u003cstrong\u003efile whose payload is a list of \u003ccode\u003edirent\u003c/code\u003e\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eThe inode tells you whether something is a file or a directory. They can be small or large, as usual. Root directory always have inode number \u003ccode\u003e1\u003c/code\u003e; \u003ccode\u003e0\u003c/code\u003e is reserved to NULL.\u003c/p\u003e\n\u003cp\u003eBecause the directory entries are not sorted, in each direcotry the find is a linear search.\u003c/p\u003e\n\u003ch2 id=\"key-points\"\u003eKey Points\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003emodularity\u003c/strong\u003e: subdivision of a system into a collection of smaller systems\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003elayers\u003c/strong\u003e: layer several modules over each other\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ename resolution\u003c/strong\u003e: system resolves human friendly name to machine friendly names\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003evisualization\u003c/strong\u003e: making one thing look like another\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eOverall theme: \u003cem\u003emulti-level index\u003c/em\u003e\u003c/p\u003e\n\u003ch3 id=\"advantages\"\u003eAdvantages\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ecan access all block numbers for a file\u003c/li\u003e\n\u003cli\u003estill supports easy sequential access\u003c/li\u003e\n\u003cli\u003eeasy to grow files\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"disadvantages\"\u003eDisadvantages\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003elots of linear directory search\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"caching\"\u003eCaching\u003c/h2\u003e\n\u003ch3 id=\"freelist\"\u003eFreelist\u003c/h3\u003e\n\u003ch4 id=\"linked-list\"\u003eLinked List\u003c/h4\u003e\n\u003cp\u003eLinked list of free-blocks\u003c/p\u003e\n\u003ch4 id=\"bitmap\"\u003eBitmap\u003c/h4\u003e\n\u003cp\u003eTake a bit for every block in the disk, if its 1, its free. If 0, its not free. This allows \u003cem\u003elocality\u003c/em\u003e: data likely used next is closed by, we can search local, continuous spaces.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eproblem\u003c/strong\u003e: as the disk becomes full, we have to search basically \\(O(n)\\) for each bit until we find the free block\u0026mdash;as the disk fills up, it becomes harder to find free space.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003esolution\u003c/strong\u003e: lie to the user. don\u0026rsquo;t let the disk used up. grantee that there are some free space at all times. we typically reserve \\(10\\%\\).\u003c/p\u003e\n\u003ch3 id=\"block-cache\"\u003eBlock Cache\u003c/h3\u003e\n\u003cp\u003eGetting blocks is very expensive. We can keep blocks around in memory because we may need to use them in the near future.\u003c/p\u003e\n\u003cp\u003eWe will use part of the main memory to retain recently-accessed disk blocks. This is \u003cstrong\u003eNOT\u003c/strong\u003e at the granularity of individual files.\u003c/p\u003e\n\u003ch4 id=\"lru\"\u003eLRU\u003c/h4\u003e\n\u003cp\u003eWhen you insert a new element into the cache, kick out the element on the cache that hasn\u0026rsquo;t been touched in the longest time.\u003c/p\u003e\n\u003ch4 id=\"block-cache-modification\"\u003eBlock Cache Modification\u003c/h4\u003e\n\u003cp\u003ewe can either \u003cstrong\u003ewrite asap\u003c/strong\u003e, or \u003cstrong\u003edelay\u003c/strong\u003e.\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ewrite asap\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003esafer\u003c/strong\u003e: less risk of data loss, written as soon as possible\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eslow\u003c/strong\u003e: program must wait to proceed until disk I/O completes\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ewrite delay\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003edangerous\u003c/strong\u003e: may loose data after crash\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eefficient\u003c/strong\u003e: memory writes is faster\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhunix_v6_filesystem/","tags":null,"title":"Unix V6 Filesystem"},{"categories":null,"contents":"A matrix is upper-triangular if the entries below the diagonal are \\(0\\):\n\\begin{equation} \\mqty(\\lambda_{1} \u0026amp; \u0026amp; * \\\\ \u0026amp; \\ddots \u0026amp; \\\\ 0 \u0026amp; \u0026amp; \\lambda_{n}) \\end{equation}\nproperties of upper-triangular matrix Suppose \\(T \\in \\mathcal{L}(V)\\), and \\(v_1 \u0026hellip; v_{n}\\) is a basis of \\(V\\). Then:\nthe matrix of \\(T\\) w.r.t. \\(v_1 \u0026hellip; v_{n}\\) is upper-triangular \\(Tv_{j} \\in span(v_1 \\dots v_{j})\\) for each \\(v_{j}\\) \\(span(v_{1}, \u0026hellip; v_{j})\\) is invariant under \\(T\\) for each \\(v_{j}\\) \\(1 \\implies 2\\) Recall that our matrix \\(A=\\mathcal{M}(T)\\) is upper-triangular. So, for any \\(v_{j}\\) sent through \\(A\\), it will be multiplied to the $j$-th column vector of the matrix. Now, that $j$-th column has \\(0\\) for rows \\(j+1 \u0026hellip; n\\), meaning that only through a linear combination of the first \\(j\\) vectors we can construct \\(T v_{j}\\). Hence, \\(Tv_{j} \\in span(v_1 \u0026hellip; v_{j})\\)\n\\(3 \\implies 2\\) \u0026ldquo;obviously\u0026rdquo;\nAll \\(v_{j} \\in span(v_1, \\dots v_{j})\\), and yet \\(T v_{j} \\in span (v_{1}, \u0026hellip; v_{j})\\) as it is given. Hence, \\(span(v_1, \u0026hellip; v_{j})\\) is invariant under \\(T\\).\n\\(2 \\implies 3\\) Let \\(v \\in span(v_1, \u0026hellip; v_{j})\\); meaning: \\(v = a_1 v_1 + \u0026hellip; + a_{j} v_{j}\\). Now, \\(Tv = a_1 T v_{1} + \u0026hellip; + a_{j} T v_{j}\\). Recall now we are given \\(T v_{j} \\in span(v_1, \u0026hellip; v_{j})\\) for each \\(v_{j}\\) (of course if \\(T{v_{1}} \\in span(v_{1})\\) it is also in \\(span(v_1, \u0026hellip; v_{j})\\) so the statement make sense.) Therefore, a linear combinations of \\(T v_{j}\\) also is in \\(span(v_1 \u0026hellip; v_{j})\\). Making the latter invariant under \\(T\\). \\(\\blacksquare\\)\nevery complex operator has an upper-triangular matrix Suppose \\(V\\) is a finite-dimensional complex vector space, with an operator \\(T \\in \\mathcal{L}(V)\\). Then, \\(T\\) has an upper-triangular matrix w.r.t. some basis of \\(V\\).\nProof:\nWe will use induction.\nInductive hypothesis: given dimension of \\(V\\), \\(T \\in \\mathcal{L}(V)\\) has an upper-triangular matrix for a basis of \\(V\\).\nBase case: \\(\\dim V=1\\)\nIf \\(\\dim V = 1\\), any matrix of \\(T\\) is technically upper-triangular because its just one number \\(\\mqty(a)\\).\nStep: \\(\\dim V = n\\), and \\(T \\in \\mathcal{L}(V)\\)\nBecause operators on complex vector spaces have an eigenvalue, let \\(v_1\\) be an eigenvector corresponding to an eigenvalue of \\(T\\). Now, create an invariant subspace \\(U = span(v_1)\\). (it is invariant because \\(v_1\\) is an eigenvalue). Now, evidently \\(\\dim U =1\\).\nNow, \\(\\dim V / U = n-1\\), the previous step from induction tells us that there exists a upper-triangular matrix for \\(T/U \\in \\mathcal{L}(V / U)\\). Specifically, because of the properties of upper-triangular matrix, it tells us that there is a basis \\(v_{2} + U \u0026hellip; v_{n} + U\\) such that its span is invariant under \\(T / U\\). Meaning:\n\\begin{equation} (T / U) (v_{j} + U ) \\in span( v_{2} + U \\dots v_{j} + U) \\end{equation}\nWriting it out:\n\\begin{equation} T v_{j} + U = (a_{2} v_{2} + \\dots + a_{j} v_{j}) + U \\end{equation}\nSpecifically, this means, there exists at least one pair \\(u_1, u_2\\) for which:\n\\begin{equation} T v_{j} + u_1 = (a_{2} v_{2} + \\dots + a_{j} v_{j}) + u_2 \\end{equation}\nAnd so:\n\\begin{equation} T v_{j} = (a_{2} v_{2} + \\dots + a_{j} v_{j}) + (u_2 - u_1) \\end{equation}\nAnd since \\(\\{v_1\\}\\) is a basis of \\(U\\), and \\(u_2 - u_1 \\in U\\), we can say:\n\\begin{equation} T v_{j} = (a_{2} v_{2} + \\dots + a_{j} v_{j}) + a_1 v_1 \\end{equation}\nHence:\n\\begin{equation} T v_{j} \\in span(v_1, \\dots v_{j}) \\end{equation}\nIt has been shown in the past (see Linear Algebra Errors) that if a list form a basis of \\(V /U\\) and another form a basis of \\(U\\) then the two lists combined form a basis of the whole thing \\(V\\). So \\(v_1 \u0026hellip; v_{j}\\) is a basis of \\(V\\).\nNow, by the properties of upper-triangular matrix again, we have that there exists an upper-triangular matrix of \\(T\\) for \\(\\dim V = n\\). \\(\\blacksquare\\)\noperator is only invertible if diagonal of its upper-triangular matrix is nonzero Suppose \\(T \\in \\mathcal{L}(V)\\) has an upper-triangular matrix w.r.t. a basis of \\(V\\). Then, \\(T\\) is invertable IFF all the entries on the diagonal of the upper-triangular matrix is nonzero.\nassume nonzero diagonal Let \\(\\lambda_{1} \u0026hellip; \\lambda_{n}\\) be the diagonal entries of \\(T\\). Per given, let there be an upper-triangular matrix of \\(T\\) under the basis \\(v_1 \u0026hellip; v_{n}\\). The matrix w.r.t. \\(T\\)\u0026rsquo;s matrix being upper-triangular under the list of \\(v_{j}\\) means that:\n\\begin{equation} T v_1 = \\lambda_{1} v_1 \\end{equation}\n(because \\(T v_{j} \\in span(v_1 \u0026hellip; v_{j})\\), and let \\(j=1\\)). And so:\n\\begin{equation} T \\frac{v_1}{\\lambda_{1}} = v_{1} \\end{equation}\n(legal as \\(\\lambda_{j} \\neq 0\\) per given).\nThus, \\(v_1 \\in range(T)\\).\nIn a similar fashion, let:\n\\begin{equation} T v_{2} = a v_{1} + \\lambda_{2} v_{2} \\end{equation}\n(\\(a\\) being the element just to the right of the \\(\\lambda_{1}\\) diagonal; recall again that \\(T\\)\u0026rsquo;s matrix under \\(v_{j}\\) is upper-triangular)\nNow:\n\\begin{equation} T \\frac{v_2}{\\lambda 2} = \\frac{a}{\\lambda_{2}} v_{1} + v_{2} \\end{equation}\nThe left side is in range \\(T\\) by definition; the right side\u0026rsquo;s \\(\\frac{a}{\\lambda 2} v_{1} \\in range\\ T\\) and hence so is its scaled versions. Thus, \\(v_2 \\in range\\ T\\).\nContinuing in this fashion, we have all \\(v_{j} \\in range\\ T\\). So \\(T\\) is surjective as it can hit all basis of \\(V\\). Now, injectivity is surjectivity in finite-dimensional operators, so \\(T\\) is invertable, as desired.\nassume invertible We will prove this by induction. Let \\(\\lambda_{1} \u0026hellip; \\lambda_{n}\\) be the diagonal entries of \\(T\\).\nInductive hypothesis: \\(\\lambda_{j} \\neq 0\\)\nBase case: \\(\\lambda_{1} \\neq 0\\) because if not, \\(T v_{1} = 0\\) and \\(v_{1} \\neq 0\\) as it is part of a basis so that would make \\(T\\) not injective and hence not invertable. Hence, by contradiction, \\(\\lambda_{1} = 0\\).\nStep: \\(\\lambda_{j}\\)\nSuppose for the sake of contradiction \\(\\lambda_{j} = 0\\). This means that the basis \\(v_{j}\\) is mapped to somewhere in \\(span(v_{1}, \u0026hellip; v_{j-1})\\) as only the top \\(j-1\\) slots are non-zero for the $j$-th column. And so, \\(T\\), under the assumption, would map \\(span(v_1, \u0026hellip; v_{j})\\) into \\(span(v_1, \u0026hellip; v_{j-1})\\).\nNow, because \\(v_{j}\\) are linearly independent (they form a basis after all), \\(\\dim span(v_1, \u0026hellip; v_{j}) = j\\) and \\(\\dim span(v_1, \u0026hellip;, v_{j-1}) = j-1\\). Now, as \\(T\\) restricted on \\(span(v_1, ..v_{j})\\) maps to a smaller subspace, it is not injective. So, \\(T\\) as a whole is not injective, so it is not invertable. Reaching contradiction, \\(\\blacksquare\\).\neigenvalues of a map are the entries of the diagonal of its upper-triangular matrix The matrix of \\(T-\\lambda I\\) for an upper-triangular form of \\(T\\) would look like:\n\\begin{equation} \\mqty(\\lambda_{1} - \\lambda \u0026amp;\u0026amp;* \\\\ \u0026amp; \\ddots \u0026amp; \\\\ 0 \u0026amp;\u0026amp;\\lambda_{n} - \\lambda) \\end{equation}\nwhere \\(\\lambda_{j}\\) are the diagonals of the upper-triangular form of \\(T\\), and \\(\\lambda\\) an eigenvalue of \\(T\\).\nRecall that operator is only invertible if diagonal of its upper-triangular matrix is nonzero; so if \\(\\lambda\\) equals any of the \\(\\lambda_{j}\\), it will make the matrix above for \\(T - \\lambda I\\) not invertable as one of its diagonal will be \\(0\\). Recall the properties of eigenvalues, specifically that \\(\\lambda\\) is an eigenvalue IFF \\((T-\\lambda I)\\) is not invertable. Hence, each \\(\\lambda_{j}\\) is an eigenvalue of \\(T\\). \\(\\blacksquare\\)\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e is \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e if the entries below the \u003ca href=\"/posts/kbhmatricies/#diagonal\"\u003ediagonal\u003c/a\u003e are \\(0\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(\\lambda_{1} \u0026amp; \u0026amp; * \\\\ \u0026amp; \\ddots \u0026amp; \\\\ 0 \u0026amp; \u0026amp; \\lambda_{n})\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"properties-of-upper-triangular-matrix--kbhupper-triangular-matrix-dot-md\"\u003eproperties of \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V)\\), and \\(v_1 \u0026hellip; v_{n}\\) is a basis of \\(V\\). Then:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ethe matrix of \\(T\\) w.r.t. \\(v_1 \u0026hellip; v_{n}\\) is \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(Tv_{j} \\in span(v_1 \\dots v_{j})\\) for each \\(v_{j}\\)\u003c/li\u003e\n\u003cli\u003e\\(span(v_{1}, \u0026hellip; v_{j})\\) is \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e under \\(T\\) for each \\(v_{j}\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"1-implies-2\"\u003e\\(1 \\implies 2\\)\u003c/h3\u003e\n\u003cp\u003eRecall that our matrix \\(A=\\mathcal{M}(T)\\) is \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e. So, for any \\(v_{j}\\) sent through \\(A\\), it will be multiplied to the $j$-th column \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e of the \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e. Now, that $j$-th column has \\(0\\) for rows \\(j+1 \u0026hellip; n\\), meaning that only through a linear combination of the first \\(j\\) vectors we can construct \\(T v_{j}\\). Hence, \\(Tv_{j} \\in span(v_1 \u0026hellip; v_{j})\\)\u003c/p\u003e\n\u003ch3 id=\"3-implies-2\"\u003e\\(3 \\implies 2\\)\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;obviously\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eAll \\(v_{j} \\in span(v_1, \\dots v_{j})\\), and yet \\(T v_{j} \\in span (v_{1}, \u0026hellip; v_{j})\\) as it is given. Hence, \\(span(v_1, \u0026hellip; v_{j})\\) is \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e under \\(T\\).\u003c/p\u003e\n\u003ch3 id=\"2-implies-3\"\u003e\\(2 \\implies 3\\)\u003c/h3\u003e\n\u003cp\u003eLet \\(v \\in span(v_1, \u0026hellip; v_{j})\\); meaning: \\(v = a_1 v_1 + \u0026hellip; + a_{j} v_{j}\\). Now, \\(Tv = a_1 T v_{1} + \u0026hellip; + a_{j} T v_{j}\\). Recall now we are given \\(T v_{j} \\in span(v_1, \u0026hellip; v_{j})\\) for each \\(v_{j}\\) (of course if \\(T{v_{1}} \\in span(v_{1})\\) it is also in \\(span(v_1, \u0026hellip; v_{j})\\) so the statement make sense.) Therefore, a linear combinations of \\(T v_{j}\\) also is in \\(span(v_1 \u0026hellip; v_{j})\\). Making the latter \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e under \\(T\\). \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch2 id=\"every-complex-operator-has-an-upper-triangular-matrix--kbhupper-triangular-matrix-dot-md\"\u003eevery complex operator has an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eSuppose \\(V\\) is a finite-dimensional \u003ca href=\"/posts/kbhvector_space/#vector-space-over-fields\"\u003ecomplex vector space\u003c/a\u003e, with an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e \\(T \\in \\mathcal{L}(V)\\). Then, \\(T\\) has an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e w.r.t. some \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\).\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eWe will use induction.\u003c/p\u003e\n\u003cp\u003eInductive hypothesis: given dimension of \\(V\\), \\(T \\in \\mathcal{L}(V)\\) has an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e for a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\).\u003c/p\u003e\n\u003cp\u003eBase case: \\(\\dim V=1\\)\u003c/p\u003e\n\u003cp\u003eIf \\(\\dim V = 1\\), any matrix of \\(T\\) is technically \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e because its just one number \\(\\mqty(a)\\).\u003c/p\u003e\n\u003cp\u003eStep: \\(\\dim V = n\\), and \\(T \\in \\mathcal{L}(V)\\)\u003c/p\u003e\n\u003cp\u003eBecause \u003ca href=\"/posts/kbhoperators_on_complex_vector_spaces_have_an_eigenvalue/\"\u003eoperators on complex vector spaces have an eigenvalue\u003c/a\u003e, let \\(v_1\\) be an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e corresponding to an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\). Now, create an \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e \\(U = span(v_1)\\). (it is \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e because \\(v_1\\) is an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e). Now, evidently \\(\\dim U =1\\).\u003c/p\u003e\n\u003cp\u003eNow, \\(\\dim V / U = n-1\\), the previous step from induction tells us that there exists a \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e for \\(T/U \\in \\mathcal{L}(V / U)\\). Specifically, because of the \u003ca href=\"#properties-of-upper-triangular-matrix--kbhupper-triangular-matrix-dot-md\"\u003eproperties of upper-triangular matrix\u003c/a\u003e, it tells us that there is a basis \\(v_{2} + U \u0026hellip; v_{n} + U\\) such that its \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e is \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e under \\(T / U\\). Meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(T / U) (v_{j} + U ) \\in span( v_{2} + U \\dots v_{j} + U)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWriting it out:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT v_{j} + U = (a_{2} v_{2} + \\dots + a_{j} v_{j}) + U\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSpecifically, this means, there exists at least one pair \\(u_1, u_2\\) for which:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT v_{j} + u_1 = (a_{2} v_{2} + \\dots + a_{j} v_{j}) + u_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT v_{j} = (a_{2} v_{2} + \\dots + a_{j} v_{j}) + (u_2 - u_1)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd since \\(\\{v_1\\}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U\\), and \\(u_2 - u_1 \\in U\\), we can say:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT v_{j} = (a_{2} v_{2} + \\dots + a_{j} v_{j}) + a_1 v_1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eHence:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT v_{j} \\in span(v_1, \\dots v_{j})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIt has been shown in the past (see \u003ca href=\"/posts/kbhlinear_algebra_errors/\"\u003eLinear Algebra Errors\u003c/a\u003e) that if a list form a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V /U\\) and another form a basis of \\(U\\) then the two lists combined form a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of the whole thing \\(V\\). So \\(v_1 \u0026hellip; v_{j}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\).\u003c/p\u003e\n\u003cp\u003eNow, by the \u003ca href=\"#properties-of-upper-triangular-matrix--kbhupper-triangular-matrix-dot-md\"\u003eproperties of upper-triangular matrix\u003c/a\u003e again, we have that there exists an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e of \\(T\\) for \\(\\dim V = n\\). \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch2 id=\"operator-is-only-invertible-if-diagonal--kbhmatricies-dot-md--of-its-upper-triangular-matrix--kbhupper-triangular-matrix-dot-md--is-nonzero\"\u003eoperator is only invertible if \u003ca href=\"/posts/kbhmatricies/#diagonal\"\u003ediagonal\u003c/a\u003e of its \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e is nonzero\u003c/h2\u003e\n\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V)\\) has an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e w.r.t. a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\). Then, \\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e IFF all the entries on the \u003ca href=\"/posts/kbhmatricies/#diagonal\"\u003ediagonal\u003c/a\u003e of the \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e is nonzero.\u003c/p\u003e\n\u003ch3 id=\"assume-nonzero-diagonal\"\u003eassume nonzero diagonal\u003c/h3\u003e\n\u003cp\u003eLet \\(\\lambda_{1} \u0026hellip; \\lambda_{n}\\) be the \u003ca href=\"/posts/kbhmatricies/#diagonal\"\u003ediagonal\u003c/a\u003e entries of \\(T\\). Per given, let there be an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e of \\(T\\) under the basis \\(v_1 \u0026hellip; v_{n}\\). The \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e w.r.t. \\(T\\)\u0026rsquo;s matrix being \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e under the list of \\(v_{j}\\) means that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT v_1 = \\lambda_{1} v_1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(because \\(T v_{j} \\in span(v_1 \u0026hellip; v_{j})\\), and let \\(j=1\\)). And so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT \\frac{v_1}{\\lambda_{1}} = v_{1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(legal as \\(\\lambda_{j} \\neq 0\\) per given).\u003c/p\u003e\n\u003cp\u003eThus, \\(v_1 \\in range(T)\\).\u003c/p\u003e\n\u003cp\u003eIn a similar fashion, let:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT v_{2} = a v_{1} + \\lambda_{2} v_{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(\\(a\\) being the element just to the right of the \\(\\lambda_{1}\\) diagonal; recall again that \\(T\\)\u0026rsquo;s matrix under \\(v_{j}\\) is \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e)\u003c/p\u003e\n\u003cp\u003eNow:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT \\frac{v_2}{\\lambda 2} = \\frac{a}{\\lambda_{2}} v_{1} + v_{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe left side is in \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e \\(T\\) by definition; the right side\u0026rsquo;s \\(\\frac{a}{\\lambda 2} v_{1} \\in range\\ T\\) and hence so is its scaled versions. Thus, \\(v_2 \\in range\\ T\\).\u003c/p\u003e\n\u003cp\u003eContinuing in this fashion, we have all \\(v_{j} \\in range\\ T\\). So \\(T\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e as it can hit all \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\). Now, \u003ca href=\"/posts/kbhoperator/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-is-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-in-id-4ed27ed5-4edc-4ef4-afd7-9b8e3bcd9b96-finite-dimensional-id-36e84a46-76f1-481e-b031-8ab2f0da0aa8-operator-s\"\u003einjectivity is surjectivity in finite-dimensional operators\u003c/a\u003e, so \\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e, as desired.\u003c/p\u003e\n\u003ch3 id=\"assume-invertible\"\u003eassume invertible\u003c/h3\u003e\n\u003cp\u003eWe will prove this by induction. Let \\(\\lambda_{1} \u0026hellip; \\lambda_{n}\\) be the \u003ca href=\"/posts/kbhmatricies/#diagonal\"\u003ediagonal\u003c/a\u003e entries of \\(T\\).\u003c/p\u003e\n\u003cp\u003eInductive hypothesis: \\(\\lambda_{j} \\neq 0\\)\u003c/p\u003e\n\u003cp\u003eBase case: \\(\\lambda_{1} \\neq 0\\) because if not, \\(T v_{1} = 0\\) and \\(v_{1} \\neq 0\\) as it is part of a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e so that would make \\(T\\) not \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e and hence not \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e. Hence, by contradiction, \\(\\lambda_{1} = 0\\).\u003c/p\u003e\n\u003cp\u003eStep: \\(\\lambda_{j}\\)\u003c/p\u003e\n\u003cp\u003eSuppose for the sake of contradiction \\(\\lambda_{j} = 0\\). This means that the basis \\(v_{j}\\) is mapped to somewhere in \\(span(v_{1}, \u0026hellip; v_{j-1})\\) as only the top \\(j-1\\) slots are non-zero for the $j$-th column. And so, \\(T\\), under the assumption, would map \\(span(v_1, \u0026hellip; v_{j})\\) into \\(span(v_1, \u0026hellip; v_{j-1})\\).\u003c/p\u003e\n\u003cp\u003eNow, because \\(v_{j}\\) are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e (they form a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e after all), \\(\\dim span(v_1, \u0026hellip; v_{j}) = j\\) and \\(\\dim span(v_1, \u0026hellip;, v_{j-1}) = j-1\\). Now, as \\(T\\) \u003ca href=\"/posts/kbhmap_restriction_operator/\"\u003erestricted\u003c/a\u003e on \\(span(v_1, ..v_{j})\\) maps to a smaller \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e, \u003ca href=\"/posts/kbhlinear_map/#map-to-smaller-space-is-not-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003eit is not injective\u003c/a\u003e. So, \\(T\\) as a whole is not \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e, so it is not \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e. Reaching contradiction, \\(\\blacksquare\\).\u003c/p\u003e\n\u003ch2 id=\"eigenvalues-of-a-map-are-the-entries-of-the-diagonal--kbhmatricies-dot-md--of-its-upper-triangular-matrix--kbhupper-triangular-matrix-dot-md\"\u003eeigenvalues of a map are the entries of the \u003ca href=\"/posts/kbhmatricies/#diagonal\"\u003ediagonal\u003c/a\u003e of its \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e of \\(T-\\lambda I\\) for an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e form of \\(T\\) would look like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(\\lambda_{1} - \\lambda \u0026amp;\u0026amp;* \\\\ \u0026amp; \\ddots \u0026amp; \\\\ 0 \u0026amp;\u0026amp;\\lambda_{n} - \\lambda)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\lambda_{j}\\) are the \u003ca href=\"/posts/kbhmatricies/#diagonal\"\u003ediagonal\u003c/a\u003es of the \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e form of \\(T\\), and \\(\\lambda\\) an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\).\u003c/p\u003e\n\u003cp\u003eRecall that \u003ca href=\"#operator-is-only-invertible-if-diagonal--kbhmatricies-dot-md--of-its-upper-triangular-matrix--kbhupper-triangular-matrix-dot-md--is-nonzero\"\u003eoperator is only invertible if diagonal of its upper-triangular matrix is nonzero\u003c/a\u003e; so if \\(\\lambda\\) equals any of the \\(\\lambda_{j}\\), it will make the matrix above for \\(T - \\lambda I\\) not \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e as one of its \u003ca href=\"/posts/kbhmatricies/#diagonal\"\u003ediagonal\u003c/a\u003e will be \\(0\\). Recall the \u003ca href=\"/posts/kbheigenvalue/#properties-of-id-7d742b39-4a4a-4a9d-a55b-07e2030dfdeb-eigenvalue-s\"\u003eproperties of eigenvalues\u003c/a\u003e, specifically that \\(\\lambda\\) is an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e \\((T-\\lambda I)\\) is not \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e. Hence, each \\(\\lambda_{j}\\) is an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\). \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhupper_triangular_matrix/","tags":null,"title":"upper-triangular matrix"},{"categories":null,"contents":" Investment: Paid for 50% of war bonds Production: ships, tanks, airplanes, etc. \u0026mdash; encourages production Conservation: 5% of the world\u0026rsquo;s population production, 50% of the world\u0026rsquo;s manufactured goods \u0026mdash; rationing, grow goods, etc. ","html":"\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eInvestment\u003c/strong\u003e\u003c/strong\u003e: Paid for 50% of war bonds\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eProduction\u003c/strong\u003e\u003c/strong\u003e: ships, tanks, airplanes, etc. \u0026mdash; encourages production\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eConservation\u003c/strong\u003e\u003c/strong\u003e: 5% of the world\u0026rsquo;s population production, 50% of the world\u0026rsquo;s manufactured goods \u0026mdash; rationing, grow goods, etc.\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhus_wwii_propaganda/","tags":null,"title":"US WWII Propaganda"},{"categories":null,"contents":"USAYPT or USIYPT is a physics research competition ran by Greg Jacobs.\n2022 My own work doc for the 2022 Tuning Forks problem is here.\nGeneral Tips When in doubt, ask about error prop ANSWER THE RESEARCH QUESTION (elevator) Convey that you understand basics via presentation Have intuition regarding phenomenon Be able to explain every formula from first principles Order of magnitude and dimension analysis Have clear variance in parameters (what did you vary and why) What does the intercepts mean on graphs? \u0026ldquo;Don\u0026rsquo;t be obtuse\u0026rdquo; Connect to simple physics terms Explanations needs to be simple Engage discussion ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhusaypt/\"\u003eUSAYPT\u003c/a\u003e or \u003ca href=\"/posts/kbhusaypt/\"\u003eUSIYPT\u003c/a\u003e is a \u003ca href=\"/posts/kbhphysics/\"\u003ephysics\u003c/a\u003e research competition ran by Greg Jacobs.\u003c/p\u003e\n\u003ch2 id=\"2022\"\u003e2022\u003c/h2\u003e\n\u003cp\u003eMy own work doc for the 2022 \u003ca href=\"/posts/kbhtuning_forks/\"\u003eTuning Fork\u003c/a\u003es problem is \u003ca href=\"/posts/kbhtuning_forks/\"\u003ehere\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"general-tips\"\u003eGeneral Tips\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWhen in doubt, ask about error prop\u003c/li\u003e\n\u003cli\u003eANSWER THE RESEARCH QUESTION (elevator)\u003c/li\u003e\n\u003cli\u003eConvey that you understand basics via presentation\u003c/li\u003e\n\u003cli\u003eHave intuition regarding phenomenon\u003c/li\u003e\n\u003cli\u003eBe able to explain every formula from first principles\u003c/li\u003e\n\u003cli\u003eOrder of magnitude and dimension analysis\u003c/li\u003e\n\u003cli\u003eHave clear variance in parameters (what did you vary and why)\u003c/li\u003e\n\u003cli\u003eWhat does the intercepts mean on graphs?\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Don\u0026rsquo;t be obtuse\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eConnect to simple physics terms\u003c/li\u003e\n\u003cli\u003eExplanations needs to be simple\u003c/li\u003e\n\u003cli\u003eEngage discussion\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhusaypt/","tags":null,"title":"USAYPT"},{"categories":null,"contents":"User Experience is the\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhuser_experience/\"\u003eUser Experience\u003c/a\u003e is the\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhuser_experience/","tags":null,"title":"User Experience"},{"categories":null,"contents":"The User Experience design sprung out of WWII\u0026mdash;in Aerospace engineering.\nThe Design Process The \u0026ldquo;Double Diamond\u0026rdquo; Process\nFirst Round of Going Broad Explore the problem space (what are you users trying to do? why? why is it hard?) Decide what to fix (what is the most high impact problem?) Second Round of Going Broad Test potential solutions (does this fix the problem?) Refine final solution (do all users understand this? can they use them?) Usability Heuristics Usability Heuristics is a set of principles used in User Experience design to identify problems and potential solutions.\nVisibility of System Status Keep the users informed about what\u0026rsquo;s actively going on, through appropriate visual feedback placed at an appropriate amount of time.\nMatch Between System and the Real World Use language that\u0026rsquo;s familiar to the user, using words, phrases, concepts familiar to the users rather than internal jargon.\nBalance User Control and Freedom User often perform actions by mistake; mark \u0026ldquo;emergency exits\u0026rdquo; to leave unwanted pathways/actions without causing side effects.\nConsistency and Standards Having consistency between different versions/family of products: putting buttons that do the same thing to the same place across the app, at the same region.\nError Prevention Eliminate error-prone conditions (prevent the users from doing it), or present users with a confirmation before they commit to an erroneous action\nRecognition vs. Recall Users should\u0026rsquo;t need to remember when they are going through an UI; instead they should be able to recognize the intended behavior from the UI\nFlexibility and Efficiency of User Catering functionality to both novice and advanced users. Make advanced actions hidden to novice users, but easily accessible for advanced users.\nMinimalism Keep the UI focused on essential actions and information\u0026mdash;maintaining an aesthetic and minimalist design\nHelp Users Recognize, Diagnoses, and Recover from Errors Errors should\u0026hellip;\ngive context for what the problem is instruct the user for possible next actions Help It maybe necessary to provide documentation to help users understand how to complete their tasks; the documentation should be clear\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhux_design/\"\u003eUser Experience\u003c/a\u003e design sprung out of WWII\u0026mdash;in Aerospace engineering.\u003c/p\u003e\n\u003ch2 id=\"the-design-process\"\u003eThe Design Process\u003c/h2\u003e\n\u003cp\u003eThe \u0026ldquo;Double Diamond\u0026rdquo; Process\u003c/p\u003e\n\u003ch3 id=\"first-round-of-going-broad\"\u003eFirst Round of Going Broad\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eExplore the problem space (what are you users trying to do? why? why is it hard?)\u003c/li\u003e\n\u003cli\u003eDecide what to fix (what is the most high impact problem?)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"second-round-of-going-broad\"\u003eSecond Round of Going Broad\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eTest potential solutions (does this fix the problem?)\u003c/li\u003e\n\u003cli\u003eRefine final solution (do all users understand this? can they use them?)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"usability-heuristics\"\u003eUsability Heuristics\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#usability-heuristics\"\u003eUsability Heuristics\u003c/a\u003e is a set of principles used in \u003ca href=\"/posts/kbhux_design/\"\u003eUser Experience\u003c/a\u003e design to identify problems and potential solutions.\u003c/p\u003e\n\u003ch3 id=\"visibility-of-system-status\"\u003eVisibility of System Status\u003c/h3\u003e\n\u003cp\u003eKeep the users informed about what\u0026rsquo;s actively going on, through appropriate visual feedback placed at an appropriate amount of time.\u003c/p\u003e\n\u003ch3 id=\"match-between-system-and-the-real-world\"\u003eMatch Between System and the Real World\u003c/h3\u003e\n\u003cp\u003eUse language that\u0026rsquo;s familiar to the user, using words, phrases, concepts familiar to the users rather than internal jargon.\u003c/p\u003e\n\u003ch3 id=\"balance-user-control-and-freedom\"\u003eBalance User Control and Freedom\u003c/h3\u003e\n\u003cp\u003eUser often perform actions by mistake; mark \u0026ldquo;emergency exits\u0026rdquo; to leave unwanted pathways/actions without causing side effects.\u003c/p\u003e\n\u003ch3 id=\"consistency-and-standards\"\u003eConsistency and Standards\u003c/h3\u003e\n\u003cp\u003eHaving consistency between different versions/family of products: putting buttons that do the same thing to the same place across the app, at the same region.\u003c/p\u003e\n\u003ch3 id=\"error-prevention\"\u003eError Prevention\u003c/h3\u003e\n\u003cp\u003eEliminate error-prone conditions (prevent the users from doing it), or present users with a confirmation before they commit to an erroneous action\u003c/p\u003e\n\u003ch3 id=\"recognition-vs-dot-recall\"\u003eRecognition vs. Recall\u003c/h3\u003e\n\u003cp\u003eUsers should\u0026rsquo;t need to remember when they are going through an UI; instead they should be able to recognize the intended behavior from the UI\u003c/p\u003e\n\u003ch3 id=\"flexibility-and-efficiency-of-user\"\u003eFlexibility and Efficiency of User\u003c/h3\u003e\n\u003cp\u003eCatering functionality to both novice and advanced users. Make advanced actions hidden to novice users, but easily accessible for advanced users.\u003c/p\u003e\n\u003ch3 id=\"minimalism\"\u003eMinimalism\u003c/h3\u003e\n\u003cp\u003eKeep the UI focused on essential actions and information\u0026mdash;maintaining an aesthetic and minimalist design\u003c/p\u003e\n\u003ch3 id=\"help-users-recognize-diagnoses-and-recover-from-errors\"\u003eHelp Users Recognize, Diagnoses, and Recover from Errors\u003c/h3\u003e\n\u003cp\u003eErrors should\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003egive context for what the problem is\u003c/li\u003e\n\u003cli\u003einstruct the user for possible next actions\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"help\"\u003eHelp\u003c/h3\u003e\n\u003cp\u003eIt maybe necessary to provide documentation to help users understand how to complete their tasks; the documentation should be clear\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhux_design/","tags":null,"title":"User Experience"},{"categories":null,"contents":"Goal: understand the user.\nFind out\u0026hellip;\nMotivation Context Deeper need? The goal of user interviews is to understand the user even if they know what they want!\nGood User Interviews Make person feel welcome/safe/appreciated\nAsk open-ended \u0026ldquo;questions\u0026rdquo;\nDescribe a time that\u0026hellip; Tell me more about.. Leave space: awkward silences (not too awkward)\nReally listen!; repress the urge to think of what you want to say next\nRepeat statements back to people\nAsk about examples, context, etc.\nA roadmap 1: create a comfortable entry point 2: go wide, deep into more personal and complex questions 3: focus on the problem, not the solution 4: focus on feelings\u0026mdash;feelings matter, how nice matters 5: end with conclusions and statements for what you User Story The user story should contain\u0026hellip;.\nA main character (your user) Character background (motivation) A plot (context) Climax and Resolution Framework describe the user; who are they; what do they like or not like an iStudio classic need statement finish with a description of the emotional impact of using our software ","html":"\u003cp\u003eGoal: \u003cstrong\u003e\u003cstrong\u003eunderstand the user.\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eFind out\u0026hellip;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eMotivation\u003c/li\u003e\n\u003cli\u003eContext\u003c/li\u003e\n\u003cli\u003eDeeper need?\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe goal of user interviews is to understand the user even if they know what they want!\u003c/p\u003e\n\u003ch2 id=\"good-user-interviews\"\u003eGood User Interviews\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eMake person feel welcome/safe/appreciated\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAsk open-ended \u0026ldquo;questions\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eDescribe a time that\u0026hellip;\u003c/li\u003e\n\u003cli\u003eTell me more about..\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eLeave space: awkward silences (not too awkward)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eReally \u003cem\u003elisten!\u003c/em\u003e; repress the urge to think of what you want to say next\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eRepeat statements back to people\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAsk about examples, context, etc.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"a-roadmap\"\u003eA roadmap\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e1: create a comfortable entry point\u003c/li\u003e\n\u003cli\u003e2: go wide, deep into more personal and complex questions\u003c/li\u003e\n\u003cli\u003e3: focus on the problem, not the solution\u003c/li\u003e\n\u003cli\u003e4: focus on feelings\u0026mdash;feelings matter, how nice matters\u003c/li\u003e\n\u003cli\u003e5: end with conclusions and statements for what you\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"user-story\"\u003eUser Story\u003c/h2\u003e\n\u003cp\u003eThe user story should contain\u0026hellip;.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eA main character (your user)\u003c/li\u003e\n\u003cli\u003eCharacter background (motivation)\u003c/li\u003e\n\u003cli\u003eA plot (context)\u003c/li\u003e\n\u003cli\u003eClimax and Resolution\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"framework\"\u003eFramework\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003edescribe the user; who are they; what do they like or not like\u003c/li\u003e\n\u003cli\u003ean iStudio classic need statement\u003c/li\u003e\n\u003cli\u003efinish with a description of the emotional impact of using our software\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhuser_interviews/","tags":null,"title":"User Interviews"},{"categories":null,"contents":"utility elicitation is the process to go from Rational Preferences to a utility function. Its a bad idea to use money to do this, because money is not linear.\nConsider the best and worst possible events:\n\\begin{equation} \\overline{S}, \\underline{S} \\end{equation}\nWe assign the best event to have utility \\(1\\), and worst to have utility \\(0\\):\n\\begin{equation} \\begin{cases} U(\\overline{S}) = 1 \\\\ U(\\underline{S}) = 0 \\end{cases} \\end{equation}\nGiven some test event now \\(S\\), we try to find the \\(p\\) such that we can set up a lottery:\n\\begin{equation} S \\sim [\\overline{S}:p; \\underline{S}:(1-p)] \\end{equation}\nbecause the desirability of \\(S\\) is between the best and worst possible events, the continuity von Neumann and Morgenstern Axiom states that this \\(p\\) exists.\nOnce this \\(p\\) has been figured, we then assign:\n\\begin{equation} U(S) = p \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhutility_elicitation/\"\u003eutility elicitation\u003c/a\u003e is the process to go from \u003ca href=\"/posts/kbhrational_preference/\"\u003eRational Preference\u003c/a\u003es to a \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e function. \u003cstrong\u003eIts a bad idea to use money to do this, because money is not linear.\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eConsider the best and worst possible events:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\overline{S}, \\underline{S}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe assign the best event to have \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e \\(1\\), and worst to have utility \\(0\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nU(\\overline{S}) = 1 \\\\\nU(\\underline{S}) = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven some test event now \\(S\\), we try to find the \\(p\\) such that we can set up a \u003ca href=\"/posts/kbhlottery/\"\u003elottery\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS \\sim [\\overline{S}:p; \\underline{S}:(1-p)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause the desirability of \\(S\\) is between the best and worst possible events, the \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuity\u003c/a\u003e \u003ca href=\"/posts/kbhrational_preference/#von-neumann-and-morgenstern-axioms\"\u003evon Neumann and Morgenstern Axiom\u003c/a\u003e states that this \\(p\\) exists.\u003c/p\u003e\n\u003cp\u003eOnce this \\(p\\) has been figured, we then assign:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(S) = p\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhutility_elicitation/","tags":null,"title":"utility elicitation"},{"categories":null,"contents":"quadratic utility \\begin{equation} U(x) = \\lambda x - x^{2} \\end{equation}\nwhere, \\(\\lambda\u0026gt;0\\) controls risk aversion: as risk increases, utility increases concavely, then eventually utility falls\nexponential utility \\begin{equation} U(x) = 1 - e^{-\\lambda x} \\end{equation}\nwhere \\(\\lambda \u0026gt;0\\) controls risk aversion. This is usually not plausible as utility because people\u0026rsquo;s utility doesn\u0026rsquo;t grow exponentially ever\npower utility see power utility\n","html":"\u003ch2 id=\"quadratic-utility\"\u003equadratic utility\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nU(x) = \\lambda x - x^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\lambda\u0026gt;0\\) controls \u003ca href=\"/posts/kbhexpected_utility_of_wealth/\"\u003erisk aversion\u003c/a\u003e: as risk increases, utility increases concavely, then eventually utility falls\u003c/p\u003e\n\u003ch2 id=\"exponential-utility\"\u003eexponential utility\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nU(x) = 1 - e^{-\\lambda x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\lambda \u0026gt;0\\) controls \u003ca href=\"/posts/kbhexpected_utility_of_wealth/\"\u003erisk aversion\u003c/a\u003e. This is usually not plausible as utility because people\u0026rsquo;s utility doesn\u0026rsquo;t grow exponentially ever\u003c/p\u003e\n\u003ch2 id=\"power-utility\"\u003epower utility\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhpower_utility/\"\u003epower utility\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhutility_function/","tags":null,"title":"utility function"},{"categories":null,"contents":"Take the utility function from a bunch of POMDPs and combine them together using a fusion function.\n\\begin{equation} U^{*}(b,a) = f(U^{*}(b_1, a) \u0026hellip; U^{*}(b_{n}, a) \\end{equation}\nwhere \\(f\\) can be sum or min. The overall belief \\(b\\) is simply \\(B_1 \\times \u0026hellip; \\times B_{n}\\), which combines all beliefs together.\n","html":"\u003cp\u003eTake the utility function from a bunch of \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es and combine them together using a fusion function.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{*}(b,a) = f(U^{*}(b_1, a) \u0026hellip; U^{*}(b_{n}, a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(f\\) can be \u003ccode\u003esum\u003c/code\u003e or \u003ccode\u003emin\u003c/code\u003e. The overall belief \\(b\\) is simply \\(B_1 \\times \u0026hellip; \\times B_{n}\\), which combines all beliefs together.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhutility_fusion/","tags":null,"title":"utility fusion"},{"categories":null,"contents":"utility theory is a set of theories that deals with rational decision making through maximizing the expected utility.\nutility theory can be leveraged to choose the right actions in the observe-act cycle in a graphical network via decision networks\nadditional information never have a utility function that\u0026rsquo;s infinite If something has infinite utility, doing two of the good things is the same as doing one good thing, which is wrong.\nSay going to a Taylor concert has \\(+\\infty\\) utility. Then, you would be indifferent to the difference between Taylor + Harry vs. Taylor only. However, the former case clearly has higher utility as long as Harry concert doesn\u0026rsquo;t have negative utility.\nutility elicitation see utility elicitation\nexpected utility expected utility is the utility we expect from taking an action \\(a\\) at a state \\(o\\). To compute it based on transition probabilities:\n\\begin{equation} EU(a|o) = \\sum_{s\u0026rsquo;} p(s\u0026rsquo; | a,o) U(s\u0026rsquo;) \\end{equation}\nthe expected utility of taking some action \\(a\\) at an observation \\(o\\) is the probability of any given next state \\(s\u0026rsquo;\\) happening times the utility of being in that state \\(U(s\u0026rsquo;)\\).\nSee also expected utility of wealth.\nmaximum expected utility principle MEU states that a rational agent should choose an action which maximizes expected utility. That is,\n\\begin{equation} a^{*} = \\arg\\max_{a} EU(a|o) \\end{equation}\nNotably, this is not always the best action. This action maximizes utility NOT outcome.\nutility of Rational Preference For rational values, for two situations, \\(A, B\\), we have, with utility function \\(U\\):\n\\begin{equation} U(A) \u0026gt; U(B) \\iff A \\succ B \\end{equation}\n\\begin{equation} U(A) = U(B) \\iff A \\sim B \\end{equation}\nand this \\(U\\) is unique up to the same affine transformation\nrisk aversion see risk aversion\ncommon utility functions see utility function\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhutility_theory/\"\u003eutility theory\u003c/a\u003e is a set of theories that deals with rational decision making through maximizing the \u003cstrong\u003eexpected utility\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhutility_theory/\"\u003eutility theory\u003c/a\u003e can be leveraged to choose the right actions in the \u003ca href=\"/posts/kbhobserve_act_cycle/\"\u003eobserve-act cycle\u003c/a\u003e in a graphical network via \u003ca href=\"/posts/kbhdecision_networks/\"\u003edecision networks\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"never-have-a-utility-function-that-s-infinite\"\u003enever have a utility function that\u0026rsquo;s infinite\u003c/h3\u003e\n\u003cp\u003eIf something has infinite utility, doing two of the good things is the same as doing one good thing, which is wrong.\u003c/p\u003e\n\u003cp\u003eSay going to a Taylor concert has \\(+\\infty\\) utility. Then, you would be indifferent to the difference between Taylor + Harry vs. Taylor only. However, the former case clearly has higher utility as long as Harry concert doesn\u0026rsquo;t have negative utility.\u003c/p\u003e\n\u003ch3 id=\"utility-elicitation--kbhutility-elicitation-dot-md\"\u003e\u003ca href=\"/posts/kbhutility_elicitation/\"\u003eutility elicitation\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhutility_elicitation/\"\u003eutility elicitation\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"expected-utility\"\u003eexpected utility\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#expected-utility\"\u003eexpected utility\u003c/a\u003e is the utility we expect from taking an action \\(a\\) at a state \\(o\\). To compute it based on transition probabilities:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nEU(a|o) = \\sum_{s\u0026rsquo;} p(s\u0026rsquo; | a,o) U(s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe expected \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of taking some action \\(a\\) at an observation \\(o\\) is the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of any given next state \\(s\u0026rsquo;\\) happening times the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of being in that state \\(U(s\u0026rsquo;)\\).\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhexpected_utility_of_wealth/\"\u003eexpected utility of wealth\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"maximum-expected-utility-principle\"\u003emaximum expected utility principle\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#maximum-expected-utility-principle\"\u003eMEU\u003c/a\u003e states that a rational agent should choose an action which maximizes \u003ca href=\"#expected-utility\"\u003eexpected utility\u003c/a\u003e. That is,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na^{*} = \\arg\\max_{a} EU(a|o)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNotably, this is \u003cstrong\u003enot always the best action\u003c/strong\u003e. This action maximizes \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e \u003cstrong\u003eNOT\u003c/strong\u003e outcome.\u003c/p\u003e\n\u003ch3 id=\"utility-of-rational-preference--kbhrational-preference-dot-md\"\u003eutility of \u003ca href=\"/posts/kbhrational_preference/\"\u003eRational Preference\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eFor rational values, for two situations, \\(A, B\\), we have, with \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e function \\(U\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(A) \u0026gt; U(B) \\iff A \\succ B\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(A) = U(B) \\iff A \\sim B\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand this \\(U\\) is unique up to the same \u003ca href=\"/posts/kbhaffine_transformation/\"\u003eaffine transformation\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"risk-aversion--kbhexpected-utility-of-wealth-dot-md\"\u003e\u003ca href=\"/posts/kbhexpected_utility_of_wealth/\"\u003erisk aversion\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhexpected_utility_of_wealth/\"\u003erisk aversion\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"common-utility-functions\"\u003ecommon utility functions\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility function\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhutility_theory/","tags":null,"title":"utility theory"},{"categories":null,"contents":"We apply the Bellman Expectation Equation and selecting the utility that is calculated by taking the most optimal action given the current utility:\n\\begin{equation} U_{k+1}(s) = \\max_{a} \\qty(R(s,a) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s,a) U_{k}(s\u0026rsquo;)) \\end{equation}\nThis iterative process is called the Bellman backup, or Bellman update.\n\\begin{equation} U_1 \\dots U_{k} \\dots U^{*} \\end{equation}\neventually will converge into the optimal value function. After which, we just extract the greedy policy from the utility to get a policy to use.\nWe stop when the Bellman Residual hits a the desired error threshold:\nBellman Residual Take the L-\\(\\infty\\) norm of \\(U^{k+1}-U^{k}\\) (that is, take \\(||U_{k+1} - U_{k}||_{\\infty}\\). We call that the Bellman Residual. If this Bellman Residual drops below \\(\\delta\\), it is shown that the error between \\(U^{*}\\) (convergence) and \\(U_{k}\\) will only be:\n\\begin{equation} \\epsilon = \\frac{\\delta \\gamma}{(1-\\gamma)} \\end{equation}\nSo as long as the Bellman Residual between your two updates \\(\\leq \\delta\\), you know that you are at most \\(\\epsilon\\) away from the optimal utility.\nYou will note that as future discount \\(\\gamma \\to 1\\), this error bound becomes much larger. Therefore, you have to iterate more to get to the same \\(\\epsilon\\). You need more iterations when \\(\\gamma \\to 1\\).\nNotably, the loss of some arbitrary utility derived from policy evaluation is:\n\\begin{equation} || U^{\\pi} - U^{*} || \u0026lt; \\frac{2\\epsilon \\gamma}{1-\\gamma} \\end{equation}\nasynchronous value iteration We choose an ordering of states. We then loop through the entire list, updating the value function. Then, we loop through this system multiple times until the system converged.\nThat is, instead of creating a list of things \\(U_{k}\\), keeping only the current current one in memory, we come up with some:\n\\begin{equation} U(s) \\leftarrow \\max_{a} \\qty(R(s,a) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s,a) U(s\u0026rsquo;)) \\end{equation}\nThe idea is, instead of keeping all of the \\(U_{k-1}\\) until you have calculated all of \\(U_{k}\\) for each state, we just use an ordering of the states to just use whatever value we calculated last.\ntime complexity \\begin{equation} O(S^{2}A) \\end{equation}\nwhere \\(S\\) is the number of states and \\(A\\) the number of actions.\nloop over all states in each update loop over all actions to figure out the max loop over all next states and calculate their utility POMDP value-iteration compute alpha vectors for all one-step plans (i.e. conditional plans that does just one action and gives up) alpha vector pruning on any plans that are dominated generate all possible two-step conditional plans over all actions using combinations of non-pruned one-step plans above as SUBPLANS (yes, you can use a one-step plan twice) repeat steps 2-3 see also performing value-iteration naively with one-step lookahead in POMDP.\nPOMDP Bellman Update Say you want to extract a policy out of a bunch of alpha vectors.\nLet \\(\\alpha \\in \\Gamma\\), a set of alpha vectors; we obtain a new alpha vector \\(U\u0026rsquo;(b) = [U(s_0) \\dots U(s_{n})]\\) by:\n\\begin{equation} U\u0026rsquo;(b) = \\max_{a}\\qty[R(b,a)+\\gamma \\qty(\\sum_{o}^{}P(o|b,a) U(b))] \\end{equation}\nwhere:\n\\begin{equation} R(b,a) = \\sum_{s}^{} R(s,a)b(s) \\end{equation}\n\\begin{align} P(o|b,a) \u0026amp;= \\sum_{s}^{} p(o|s,a) b(s) \\\\ \u0026amp;= \\sum_{s}^{} \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) O(o|s\u0026rsquo;,a) b(s) \\end{align}\nand\n\\begin{equation} U^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} b \\end{equation}\n","html":"\u003cp\u003eWe apply the \u003ca href=\"/posts/kbhpolicy_evaluation/#bellman-expectation-equation\"\u003eBellman Expectation Equation\u003c/a\u003e and selecting the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e that is calculated by taking the most optimal action given the current \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_{k+1}(s) = \\max_{a} \\qty(R(s,a) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s,a) U_{k}(s\u0026rsquo;))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis iterative process is called the \u003ca href=\"/posts/kbhvalue_iteration/\"\u003eBellman backup\u003c/a\u003e, or \u003ca href=\"/posts/kbhvalue_iteration/\"\u003eBellman update\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1 \\dots U_{k} \\dots U^{*}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eeventually will converge into the \u003ca href=\"/posts/kbhpolicy/#optimal-policy\"\u003eoptimal value function\u003c/a\u003e. After which, we just extract the \u003ca href=\"/posts/kbhaction_value_function/#value-function-policy\"\u003egreedy policy\u003c/a\u003e from the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e to get a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e to use.\u003c/p\u003e\n\u003cp\u003eWe stop when the \u003ca href=\"#bellman-residual\"\u003eBellman Residual\u003c/a\u003e hits a the desired error threshold:\u003c/p\u003e\n\u003ch2 id=\"bellman-residual\"\u003eBellman Residual\u003c/h2\u003e\n\u003cp\u003eTake the \u003ca href=\"/posts/kbhl_infty/\"\u003eL-\\(\\infty\\)\u003c/a\u003e norm of \\(U^{k+1}-U^{k}\\) (that is, take \\(||U_{k+1} - U_{k}||_{\\infty}\\). We call that the \u003ca href=\"#bellman-residual\"\u003eBellman Residual\u003c/a\u003e. If this \u003ca href=\"#bellman-residual\"\u003eBellman Residual\u003c/a\u003e drops below \\(\\delta\\), it is shown that the error between \\(U^{*}\\) (convergence) and \\(U_{k}\\) will only be:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\epsilon = \\frac{\\delta \\gamma}{(1-\\gamma)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo as long as the \u003ca href=\"#bellman-residual\"\u003eBellman Residual\u003c/a\u003e between your two updates \\(\\leq \\delta\\), you know that you are at most \\(\\epsilon\\) away from the \u003ca href=\"/posts/kbhpolicy/#optimal-policy\"\u003eoptimal utility\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eYou will note that as future discount \\(\\gamma \\to 1\\), this error bound becomes much larger. Therefore, you have to iterate more to get to the same \\(\\epsilon\\).\u003c/strong\u003e You need more iterations when \\(\\gamma \\to 1\\).\u003c/p\u003e\n\u003cp\u003eNotably, the loss of some arbitrary \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e derived from \u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|| U^{\\pi} - U^{*} || \u0026lt; \\frac{2\\epsilon \\gamma}{1-\\gamma}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"asynchronous-value-iteration\"\u003easynchronous value iteration\u003c/h2\u003e\n\u003cp\u003eWe choose an ordering of states. We then loop through the entire list, updating the value function. Then, we loop through this system multiple times until the system converged.\u003c/p\u003e\n\u003cp\u003eThat is, instead of creating a list of things \\(U_{k}\\), keeping only the current current one in memory, we come up with some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(s) \\leftarrow \\max_{a} \\qty(R(s,a) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s,a) U(s\u0026rsquo;))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe idea is, instead of keeping all of the \\(U_{k-1}\\) until you have calculated all of \\(U_{k}\\) for each state, we just use an ordering of the states to just use whatever value we calculated last.\u003c/p\u003e\n\u003ch2 id=\"time-complexity\"\u003etime complexity\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nO(S^{2}A)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(S\\) is the number of states and \\(A\\) the number of actions.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eloop over all states in each update\u003c/li\u003e\n\u003cli\u003eloop over all actions to figure out the max\u003c/li\u003e\n\u003cli\u003eloop over all next states and calculate their \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"pomdp--kbhpartially-observable-markov-decision-process-dot-md--value-iteration\"\u003e\u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e value-iteration\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ecompute \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es for all one-step plans (i.e. \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003es that does just one action and gives up)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhalpha_vector/#id-a11af4cf-7e36-4b3f-876f-e6a26cf6817e-alpha-vector-pruning\"\u003ealpha vector pruning\u003c/a\u003e on any plans that are dominated\u003c/li\u003e\n\u003cli\u003egenerate all possible two-step \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003es over all actions using combinations of non-pruned one-step plans above as \u003cstrong\u003e\u003cstrong\u003eSUBPLANS\u003c/strong\u003e\u003c/strong\u003e (yes, you can use a one-step plan twice)\u003c/li\u003e\n\u003cli\u003erepeat steps 2-3\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003esee also performing value-iteration naively with \u003ca href=\"/posts/kbhalpha_vector/#one-step-lookahead-in-pomdp\"\u003eone-step lookahead in POMDP\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"pomdp-bellman-update\"\u003ePOMDP Bellman Update\u003c/h2\u003e\n\u003cp\u003eSay you want to extract a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e out of a bunch of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eLet \\(\\alpha \\in \\Gamma\\), a set of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es; we obtain a new \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e \\(U\u0026rsquo;(b) = [U(s_0) \\dots U(s_{n})]\\) by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU\u0026rsquo;(b) = \\max_{a}\\qty[R(b,a)+\\gamma \\qty(\\sum_{o}^{}P(o|b,a) U(b))]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR(b,a) = \\sum_{s}^{} R(s,a)b(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nP(o|b,a) \u0026amp;= \\sum_{s}^{} p(o|s,a) b(s) \\\\\n\u0026amp;= \\sum_{s}^{} \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) O(o|s\u0026rsquo;,a) b(s)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} b\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvalue_iteration/","tags":null,"title":"value iteration"},{"categories":null,"contents":"Say we have a system:\nStates: 4\u0026mdash;school, internship, job, jungle Actions: 2\u0026mdash;stay, graduate create transition model Create tables of size \\(S \\times S\\) (that is, 4x4), one for each action. These are our transition models. Rows are the states where we took the action, columns are the states which are the results of the action, and the values are the probability of that transition happening given you took the action.\nEach row should sum up to \\(1\\): after an action, you should always end up at some state.\nenumerate rewards and discount for us, we are going to say that:\n\\(R(s_1)= -1\\) \\(R(s_2)= +1\\) \\(R(s_3) = +5\\) the rest of this should work if your states are parameterized by action.\nWe are going to discount by \\(0.9\\)\niterate! for each state\u0026hellip; calculate the values within the sum of the Bellman update for each action as well as the instantaneous reward for being in that state get the maximum value of that store for the next iteration ","html":"\u003cp\u003eSay we have a system:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-20_12-11-30_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003col\u003e\n\u003cli\u003eStates: 4\u0026mdash;school, internship, job, jungle\u003c/li\u003e\n\u003cli\u003eActions: 2\u0026mdash;stay, graduate\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"create-transition-model\"\u003ecreate transition model\u003c/h2\u003e\n\u003cp\u003eCreate tables of size \\(S \\times S\\) (that is, 4x4), one for each action. These are our transition models. Rows are the states where we took the action, columns are the states which are the results of the action, and the values are the probability of that transition happening given you took the action.\u003c/p\u003e\n\u003cp\u003eEach row should sum up to \\(1\\): after an action, you should always end up at \u003cem\u003esome\u003c/em\u003e state.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-20_12-18-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"enumerate-rewards-and-discount\"\u003eenumerate rewards and discount\u003c/h2\u003e\n\u003cp\u003efor us, we are going to say that:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(R(s_1)= -1\\)\u003c/li\u003e\n\u003cli\u003e\\(R(s_2)= +1\\)\u003c/li\u003e\n\u003cli\u003e\\(R(s_3) = +5\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ethe rest of this should work if your states are parameterized by action.\u003c/p\u003e\n\u003cp\u003eWe are going to discount by \\(0.9\\)\u003c/p\u003e\n\u003ch2 id=\"iterate\"\u003eiterate!\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003efor each state\u0026hellip;\n\u003col\u003e\n\u003cli\u003ecalculate the values within the sum of the \u003ca href=\"/posts/kbhvalue_iteration/\"\u003eBellman update\u003c/a\u003e for each action as well as the instantaneous reward for being in that state\u003c/li\u003e\n\u003cli\u003eget the maximum value of that\u003c/li\u003e\n\u003cli\u003estore for the next iteration\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvalue_iteration_in_practice/","tags":null,"title":"value iteration, in practice"},{"categories":null,"contents":"VOI is a measure of how much observing something changes your action if you are a rational agent.\nThe value of information a measure for how much observing an additional variable is expected to increase our utility. VOI can never be negative, and does not take into account the COST of performing the observation.\nconstituents \\(o\\): an observation \\(O\u0026rsquo;\\): a possible observation to run which yield \\(o\u0026rsquo;_{j}\\) different outcomes requirements \\begin{equation} VOI(O\u0026rsquo;|o) = (\\sum_{o\u0026rsquo;} P(o\u0026rsquo;|o) EU^{*}(o, o\u0026rsquo;)) - EU^{*}(o) \\end{equation}\nwhere, \\(EU^{*}(o_{1} \\dots o_{n})\\) is the maximum expected utility given observations \\(o_1, \u0026hellip;, o_{n}\\), that is:\n\\begin{equation} EU^{*}(o_1, \\dots, o_{n}) = \\max_{a} EU(o_1, \\dots, o_{n}) \\end{equation}\n\u0026ldquo;the value of an observation is the sum of the MEU of each possible outcome from that new observation, time their probability of occurance, subtracted by the MEU of the current observation\u0026rdquo;\nadditional information process of observation selection Here\u0026rsquo;s how you would select what variables to observe.\nmake observation determine value of information of anything you haven\u0026rsquo;t observed yet select the next feature to observe repeat 1-3 wait until its no longer beneficial to observe any more variables make decision based on observations This is not the true optimum: its only a heuristic!\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhvalue_of_information/\"\u003eVOI\u003c/a\u003e is a measure of how much observing something changes your action if you are a rational agent.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhvalue_of_information/\"\u003evalue of information\u003c/a\u003e a measure for how much observing an additional variable is expected to \u003cstrong\u003eincrease\u003c/strong\u003e our \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e. \u003ca href=\"/posts/kbhvalue_of_information/\"\u003eVOI\u003c/a\u003e can never be negative, and does not take into account the \u003cstrong\u003eCOST\u003c/strong\u003e of performing the observation.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(o\\): an observation\u003c/li\u003e\n\u003cli\u003e\\(O\u0026rsquo;\\): a possible observation to run which yield \\(o\u0026rsquo;_{j}\\) different outcomes\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nVOI(O\u0026rsquo;|o) = (\\sum_{o\u0026rsquo;} P(o\u0026rsquo;|o) EU^{*}(o, o\u0026rsquo;)) - EU^{*}(o)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(EU^{*}(o_{1} \\dots o_{n})\\) is the \u003ca href=\"/posts/kbhutility_theory/#maximum-expected-utility-principle\"\u003emaximum expected utility\u003c/a\u003e given observations \\(o_1, \u0026hellip;, o_{n}\\), that is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nEU^{*}(o_1, \\dots, o_{n}) = \\max_{a} EU(o_1, \\dots, o_{n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the value of an observation is the sum of the \u003ca href=\"/posts/kbhutility_theory/#maximum-expected-utility-principle\"\u003eMEU\u003c/a\u003e of each possible outcome from that new observation, time their probability of occurance, subtracted by the \u003ca href=\"/posts/kbhutility_theory/#maximum-expected-utility-principle\"\u003eMEU\u003c/a\u003e of the current observation\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"process-of-observation-selection\"\u003eprocess of observation selection\u003c/h3\u003e\n\u003cp\u003eHere\u0026rsquo;s how you would select what variables to observe.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003emake observation\u003c/li\u003e\n\u003cli\u003edetermine \u003ca href=\"/posts/kbhvalue_of_information/\"\u003evalue of information\u003c/a\u003e of anything you haven\u0026rsquo;t observed yet\u003c/li\u003e\n\u003cli\u003eselect the next feature to observe\u003c/li\u003e\n\u003cli\u003erepeat 1-3\u003c/li\u003e\n\u003cli\u003ewait until its no longer beneficial to observe any more variables\u003c/li\u003e\n\u003cli\u003emake decision based on observations\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis is not the true optimum: its only a heuristic!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvalue_of_information/","tags":null,"title":"value of information"},{"categories":null,"contents":"variance (also known as second central moment) is a way of measuring spread:\n\\begin{align} Var(X) \u0026amp;= E[(X-E(X))^{2}] \\\\ \u0026amp;= E[X^{2}] - (E[X])^{2} \\\\ \u0026amp;= \\qty(\\sum_{x}^{} x^{2} p\\qty(X=x)) - (E[X])^{2} \\end{align}\n\u0026ldquo;on average, how far is the probability of \\(X\\) from its expectation\u0026rdquo;\nThe expression(s) are derived below. Recall that standard deviation is a square root of the variance.\ncomputing variance: \\begin{align} Var(X) \u0026amp;= E[(X - \\mu)^{2}] \\\\ \u0026amp;= \\sum_{x}^{} (x-\\mu)^{2} p(X) \\end{align}\nbased on the law of the Unconscious statistician. And then, we do algebra:\nSo, for any random variable \\(X\\), we say:\n\\begin{align} Var(X) \u0026amp;= E[X^{2}] - (E[X])^{2} \\\\ \u0026amp;= \\qty(\\sum_{x}^{} x^{2} p(X=x)) - (E[X])^{2} \\end{align}\nbased on the law of Unconscious statistician.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhvariance/\"\u003evariance\u003c/a\u003e (also known as \u003ca href=\"/posts/kbhvariance/\"\u003esecond central moment\u003c/a\u003e) is a way of measuring spread:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nVar(X) \u0026amp;= E[(X-E(X))^{2}] \\\\\n\u0026amp;= E[X^{2}] - (E[X])^{2} \\\\\n\u0026amp;= \\qty(\\sum_{x}^{} x^{2} p\\qty(X=x)) - (E[X])^{2}\n\\end{align}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;on average, how far is the probability of \\(X\\) from its \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eThe expression(s) are derived below. Recall that \u003ca href=\"\"\u003estandard deviation\u003c/a\u003e is a square root of the \u003ca href=\"/posts/kbhvariance/\"\u003evariance\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"computing-variance\"\u003ecomputing variance:\u003c/h2\u003e\n\u003cp\u003e\\begin{align}\nVar(X) \u0026amp;= E[(X - \\mu)^{2}] \\\\\n\u0026amp;= \\sum_{x}^{} (x-\\mu)^{2} p(X)\n\\end{align}\u003c/p\u003e\n\u003cp\u003ebased on the law of the \u003ca href=\"/posts/kbhexpectation/#unconscious-statistician\"\u003eUnconscious statistician\u003c/a\u003e. And then, we do algebra:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-13_15-43-21_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSo, for any random variable \\(X\\), we say:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nVar(X) \u0026amp;= E[X^{2}] - (E[X])^{2} \\\\\n\u0026amp;= \\qty(\\sum_{x}^{} x^{2} p(X=x)) - (E[X])^{2}\n\\end{align}\u003c/p\u003e\n\u003cp\u003ebased on the law of \u003ca href=\"/posts/kbhexpectation/#unconscious-statistician\"\u003eUnconscious statistician\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvariance/","tags":null,"title":"variance"},{"categories":null,"contents":" Secrets of Silicon Valley - Horowitz Looking for people who have feel for the problem: people need to believe in the problem Team: can people come with execution? people that are good at startups which are usually not good at later stage stuff Buy a startup and kick out the founders This is very typical Team and idea are easy to decouple Vetting problems Lack of market Technically insatiability \u0026ldquo;Unbelievable stupidity\u0026rdquo;: calcium is so cheap Idea goes through many morphs; getting the credit back People wiling to have a meeting? Decoupling value proposition =\u0026gt; iStudio as a service\nRandom Need: Nueva Alumni Network Maybe set up a Nueva alumni network? What could we do to facilitate the Nueva alumni network; extraction of mutual value from the next work.\nNueva alumni as a service.\nInnovation consultants Ideas are no longer valuable, which ideas to peruse is better. \u0026ldquo;helping people along in their relationship with the idea or with each other.\u0026rdquo; Decoupling solution with the customer with the most value.\n","html":"\u003col\u003e\n\u003cli\u003eSecrets of Silicon Valley - Horowitz\n\u003col\u003e\n\u003cli\u003eLooking for people who have \u003cem\u003efeel\u003c/em\u003e for the problem: people need to believe in the problem\u003c/li\u003e\n\u003cli\u003eTeam: can people come with execution? people that are good at startups which are usually not good at later stage stuff\n\u003col\u003e\n\u003cli\u003eBuy a startup and kick out the founders\u003c/li\u003e\n\u003cli\u003eThis is very typical\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003eTeam and idea are easy to decouple\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003eVetting problems\n\u003col\u003e\n\u003cli\u003eLack of market\u003c/li\u003e\n\u003cli\u003eTechnically insatiability\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Unbelievable stupidity\u0026rdquo;: calcium is so cheap\u003c/li\u003e\n\u003cli\u003eIdea goes through many morphs; getting the credit back\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003ePeople wiling to have a meeting?\u003c/li\u003e\n\u003cli\u003eDecoupling value proposition\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e=\u0026gt; iStudio as a service\u003c/p\u003e\n\u003ch2 id=\"random-need-nueva-alumni-network\"\u003eRandom Need: Nueva Alumni Network\u003c/h2\u003e\n\u003cp\u003eMaybe set up a Nueva alumni network? What could we do to facilitate the Nueva alumni network; extraction of mutual value from the next work.\u003c/p\u003e\n\u003cp\u003eNueva alumni as a service.\u003c/p\u003e\n\u003ch2 id=\"innovation-consultants\"\u003eInnovation consultants\u003c/h2\u003e\n\u003cp\u003eIdeas are no longer valuable, which ideas to peruse is better. \u0026ldquo;helping people along in their relationship with the idea or with each other.\u0026rdquo; Decoupling solution with the customer with the most value.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvc_thing/","tags":null,"title":"vc thing"},{"categories":null,"contents":"A vector is an element of a vector space. They are also called a point.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e is an element of a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e. They are also called a \u003ca href=\"/posts/kbhvector/\"\u003epoint\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvector/","tags":null,"title":"vector"},{"categories":null,"contents":"vector semantics is a sense encoding method.\n\u0026ldquo;a meaning of the word should be tied to how they are used\u0026rdquo;\nwe measure similarity between word vectors with cosine similarity. see also vector-space model.\nmotivation idea 1 neighboring words can help infer semantic meaning of new words: \u0026ldquo;we can define a word based on its distribution in language use\u0026rdquo;\nidea 2 meaning should be in a point in space, just like affective meaning (i.e. a score in each dimension).\nthat is: a word should be a vector in n space\nvector semantics Each word is a point based on distribution; each word is a vector and similar words are nearby in semantic space.\nThe intuition is that classifiers can generalize to similar, but unseen words more easily by processing embeddings.\ntransposing a Term-Document Matrix Typically we read a Term-Document Matrix column-wise, to understand what each document can be encoded in terms of words.\nHowever, if you read it row-wise, you can see a distribution for words over the documents.\nterm-term matrix a term-term matrix is a \\(|V| \\times |V|\\) matrix that measures co-occurrence in some context. So each cell would be the number of times the two words co-occur in some small window.\npoint-wise mutual information we usually normalize a Term-Document Matrix via TF-IDF. However, for term-term matrix, we usually normalize it as:\n\\begin{equation} PMI(w_1, w_2) = \\log \\frac{p(w_1,w_2)}{p(w_1)p(w_2)} \\end{equation}\n\u0026ldquo;would something appear more often then change\u0026rdquo;\nword2vec see word2vec\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhvector_semantics/\"\u003evector semantics\u003c/a\u003e is a \u003ca href=\"/posts/kbhsense/\"\u003esense\u003c/a\u003e encoding method.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;a meaning of the word should be tied to how they are used\u0026rdquo;\u003c/p\u003e\n\u003cp\u003ewe measure similarity between word \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es with \u003ca href=\"/posts/kbhranked_information_retrieval/#cosine-similarity\"\u003ecosine similarity\u003c/a\u003e. see also \u003ca href=\"/posts/kbhranked_information_retrieval/#vector-space-model\"\u003evector-space model\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"motivation\"\u003emotivation\u003c/h2\u003e\n\u003ch3 id=\"idea-1\"\u003eidea 1\u003c/h3\u003e\n\u003cp\u003eneighboring words can help infer semantic meaning of new words: \u0026ldquo;we can define a word based on its distribution in language use\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"idea-2\"\u003eidea 2\u003c/h3\u003e\n\u003cp\u003emeaning should be in a point in space, just like \u003ca href=\"/posts/kbhsense/#affective-meaning\"\u003eaffective meaning\u003c/a\u003e (i.e. a score in each dimension).\u003c/p\u003e\n\u003cp\u003ethat is: a word should be a \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e in n space\u003c/p\u003e\n\u003ch2 id=\"vector-semantics--kbhvector-semantics-dot-md\"\u003e\u003ca href=\"/posts/kbhvector_semantics/\"\u003evector semantics\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eEach word is a point based on distribution; each word is a \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e and similar words are nearby in semantic space.\u003c/p\u003e\n\u003cp\u003eThe intuition is that classifiers can generalize to similar, but unseen words more easily by processing embeddings.\u003c/p\u003e\n\u003ch2 id=\"transposing-a-term-document-matrix--kbhterm-document-matrix-dot-md\"\u003etransposing a \u003ca href=\"/posts/kbhterm_document_matrix/\"\u003eTerm-Document Matrix\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eTypically we read a \u003ca href=\"/posts/kbhterm_document_matrix/\"\u003eTerm-Document Matrix\u003c/a\u003e column-wise, to understand what each document can be encoded in terms of words.\u003c/p\u003e\n\u003cp\u003eHowever, if you read it row-wise, you can see a distribution for words over the documents.\u003c/p\u003e\n\u003ch2 id=\"term-term-matrix\"\u003eterm-term matrix\u003c/h2\u003e\n\u003cp\u003ea \u003ca href=\"#term-term-matrix\"\u003eterm-term matrix\u003c/a\u003e is a \\(|V| \\times |V|\\) matrix that measures co-occurrence in some context. So each cell would be the number of times the two words co-occur in some small window.\u003c/p\u003e\n\u003ch3 id=\"point-wise-mutual-information\"\u003epoint-wise mutual information\u003c/h3\u003e\n\u003cp\u003ewe usually normalize a \u003ca href=\"/posts/kbhterm_document_matrix/\"\u003eTerm-Document Matrix\u003c/a\u003e via \u003ca href=\"/posts/kbhranked_information_retrieval/#tf-idf\"\u003eTF-IDF\u003c/a\u003e. However, for \u003ca href=\"#term-term-matrix\"\u003eterm-term matrix\u003c/a\u003e, we usually normalize it as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nPMI(w_1, w_2) = \\log \\frac{p(w_1,w_2)}{p(w_1)p(w_2)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;would something appear more often then change\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"word2vec\"\u003eword2vec\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhword2vec/\"\u003eword2vec\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvector_semantics/","tags":null,"title":"vector semantics"},{"categories":null,"contents":"A vector space is an object between a field and a group; it has two ops\u0026mdash;addition and scalar multiplication. Its not quite a field and its more than a group.\nconstituents A set \\(V\\) An addition on \\(V\\) An scalar multiplication on \\(V\\) such that\u0026hellip;\nrequirements commutativity in add.: \\(u+v=v+u\\) associativity in add. and mult.: \\((u+v)+w=u+(v+w)\\); \\((ab)v=a(bv)\\): \\(\\forall u,v,w \\in V\\) and \\(a,b \\in \\mathbb{F}\\) distributivity: goes both ways \\(a(u+v) = au+av\\) AND!! \\((a+b)v=av+bv\\): \\(\\forall a,b \\in \\mathbb{F}\\) and \\(u,v \\in V\\) additive identity: \\(\\exists 0 \\in V: v+0=v \\forall v \\in V\\) additive inverse: \\(\\forall v \\in V, \\exists w \\in V: v+w=0\\) multiplicative identity: \\(1v=v \\forall v \\in V\\) additional information Elements of a vector space are called vectors or points. vector space \u0026ldquo;over\u0026rdquo; fields Scalar multiplication is not in the set \\(V\\); instead, \u0026ldquo;scalars\u0026rdquo; \\(\\lambda\\) come from this magic faraway land called \\(\\mathbb{F}\\). The choice of \\(\\mathbb{F}\\) for each vector space makes it different; so, when precision is needed, we can say that a vector space is \u0026ldquo;over\u0026rdquo; some \\(\\mathbb{F}\\) which contributes its scalars.\nTherefore:\nA vector space over \\(\\mathbb{R}\\) is called a real vector space A vector space over \\(\\mathbb{C}\\) is called a real vector space ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e is an object between a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e and a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e; it has two ops\u0026mdash;addition and scalar multiplication. Its not quite a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e and its more than a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA set \\(V\\)\u003c/li\u003e\n\u003cli\u003eAn \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e on \\(V\\)\u003c/li\u003e\n\u003cli\u003eAn \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e on \\(V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003esuch that\u0026hellip;\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e in add.: \\(u+v=v+u\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e in add. and mult.: \\((u+v)+w=u+(v+w)\\); \\((ab)v=a(bv)\\): \\(\\forall u,v,w \\in V\\) and \\(a,b \\in \\mathbb{F}\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e: goes both ways \\(a(u+v) = au+av\\) AND!! \\((a+b)v=av+bv\\): \\(\\forall a,b \\in \\mathbb{F}\\) and \\(u,v \\in V\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e: \\(\\exists 0 \\in V: v+0=v \\forall v \\in V\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e: \\(\\forall v \\in V, \\exists w \\in V: v+w=0\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiplicative_identity/\"\u003emultiplicative identity\u003c/a\u003e: \\(1v=v \\forall v \\in V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eElements of a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e are called \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es or \u003ca href=\"/posts/kbhvector/\"\u003epoint\u003c/a\u003es.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"vector-space-over-fields\"\u003evector space \u0026ldquo;over\u0026rdquo; fields\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhscalar_multiplication/\"\u003eScalar multiplication\u003c/a\u003e is not in the set \\(V\\); instead, \u0026ldquo;scalars\u0026rdquo; \\(\\lambda\\) come from this magic faraway land called \\(\\mathbb{F}\\). The choice of \\(\\mathbb{F}\\) for each vector space makes it different; so, when precision is needed, we can say that a vector space is \u0026ldquo;over\u0026rdquo; some \\(\\mathbb{F}\\) which contributes its scalars.\u003c/p\u003e\n\u003cp\u003eTherefore:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eA vector space over \\(\\mathbb{R}\\) is called a \u003cem\u003ereal vector space\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003eA vector space over \\(\\mathbb{C}\\) is called a \u003cem\u003ereal vector space\u003c/em\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvector_space/","tags":null,"title":"vector space"},{"categories":null,"contents":"this is worse ","html":"\u003ch2 id=\"this-is-worse\"\u003ethis is worse\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcraintech/","tags":null,"title":"VFUA"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhvgg/","tags":null,"title":"VGG"},{"categories":null,"contents":"VGGish is VGG, ish. VGGish is a network based on VGG which is pretrained on the audio-feature-extraction task.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhvggish/\"\u003eVGGish\u003c/a\u003e is \u003ca href=\"/posts/kbhvgg/\"\u003eVGG\u003c/a\u003e, ish. \u003ca href=\"/posts/kbhvggish/\"\u003eVGGish\u003c/a\u003e is a network based on \u003ca href=\"/posts/kbhvgg/\"\u003eVGG\u003c/a\u003e which is pretrained on the audio-feature-extraction task.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-29-00_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhvggish/","tags":null,"title":"VGGish"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhvietnam/","tags":null,"title":"Vietnam"},{"categories":null,"contents":"vietnamization is a political position held by Richard Nixon which is characterized by the slow replacement of American troops with Vietnamese ones.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhvietnamization/\"\u003evietnamization\u003c/a\u003e is a political position held by \u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e which is characterized by the slow replacement of American troops with Vietnamese ones.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvietnamization/","tags":null,"title":"vietnamization"},{"categories":null,"contents":"We are trying to share a resource: memory; memory allows multiple processes to use a share pool of memory.\nkey goals multitasking: multiple processes should be able to use memory transparency: no process need to know that memory is shared; each process should be able to run regardless of the number/locations of processes isolation: processes shouldn\u0026rsquo;t be able to corrupt other processes\u0026rsquo; memory efficiency: shouldn\u0026rsquo;t be degraded by sharing virtual memory The operating system will translate virtual addresses (which are 0 based for every program, which isn\u0026rsquo;t a problem) to physical addresses in memory.\nthe OS doesn\u0026rsquo;t need to map all virtual addresses unless its needed (i.e. if the program is asking for it) worst case: we can kick out unused memory into disk, and load it back when needed This is an example of virtualization.\nOS memory Whenever a process makes a syscall, OS will be handed virtual memory addresses. How do we resolve it?\nSolution: *every process reserves some virtual memory for the OS\u0026mdash;all of these virtual addresses maps to the SAME PHYSICAL REGION for the OS.\nThen, the page map will have a kernel bit which marks this virtual region no read and no write.\ndynamic address translation The system will die if we try to do virtual mapping to physical mapping.\nSo we have a Memory Management Unit (MMU) to do:\nHow does an MMU work?\nbase and bound This is basically load-time relocation, but with virtual memory.\nassign a location in physical memory, call the base; during translation, we just add every virtual address by the base we can cap the virtual address space for each process by a bound, we can raise a bus error/segfault if it goes above the highest allowable The bound is a virtual address (the first invalid address in the virtual world), whereas the base is a physical address. This is both stored in the process control block.\nlast possible address: is (bound - 1)+base\ntranslation compare virtual address to bound, trap and raise if \u0026gt;= bound then, return virtual address + base importantly, we can arbitrary adjust base and bound.\ntradeoff good news\ninexpensive: just doing addition doesn\u0026rsquo;t require additional space: (just two addresses) separation: virtualization. bad news\none contiguous region: need to allocate free spcae fragmentation: because of the above growing can only happens upwards with bounds (and its kind of useless)\u0026mdash;we can\u0026rsquo;t move the stack up in virtual space, and we can\u0026rsquo;t give more space downwards, because that would cause negative addresses no read only memory (we\u0026rsquo;ll want to limit access to code segment, for instance) multiple segments Let\u0026rsquo;s break up multiple virtual address space into segments, and map each of those segments separately. EACH SEGMENT will have its own base and bound. So, you will store each struct in a map: [segment number: [segment base, segment bound, read only or not]].\ntranslation look up what segment a virtual address is in (we can do this by making the top couple bits of the virtual address the segment number, and the next bits as the offset into the segment) get that segment\u0026rsquo;s info compare that address\u0026rsquo; offset to that segment\u0026rsquo;s bound, if its \u0026gt;= limit, trap otherwise, to go the base of that segment and fetch data tradeoff features\nyou can recycle segments: if you have two instances of a program running, we can actually share read-only segments (such as code). you can not map the middle: because stack and data segments are independent, we can not map the hole in the middle until more data is asked you can grow things: if you run out of continuous space, you can grow the segment by either just doing it or by moving it and growing it (and indeed we now can move the stack down as the stack is addressed as the highest address) drawbacks\ngrowing can only happens upwards with bounds\u0026mdash;now that we can move the heap independently, growing the heap makes sense now; however, growing the STACK still is impossible because growing the stack would entail moving the base address in order to go downwards variable length segments\u0026mdash;extrernal fragmentation! small number of segments\u0026mdash;the [segment, offset] deign divides virtual addresses, so you have to decide segment number exogenously paging So let\u0026rsquo;s instead allocate memory in pages. Instead of variable-length segments that can GROW in base and bound and multiple segments, let\u0026rsquo;s force a specific size of memory in each chunk.\nvirtual address: virtual page number + offset physical address: physical page number + offset we map each page independently, and keep the offset. If a page is unused, internal fragmentation but not too bad. The stack can now grow downwards: because if it reaches into lower page numbers we can just map that page somewhere too.\nTo store page mappings, in a seperate storage location, we store a page map/page table: its an array of tuples, where the index is the virtual page number, and each entry has [(physical page, writable)].\nNotice that page continuity isn\u0026rsquo;t a problem: the upper digits just count up, and the lower digits tells you offset in that chunk:\n0x0000 - 0x0fff 0x1000 - 0x1fff 0x2000 - 0x2fff where, the first digit tells you the page number\n0x0 - 0x0 0x1 - 0x1 0x2 - 0x2 and the rest is the offset.\nAnd everything is contiunous, and automatically paged.\nFor instance, typically page sizes are 4kb\nPage Size Offset Number Digits 4096 bytes (16^3) 3 then the rest of the address would just be the page number.\nIntel\u0026rsquo;s implementation Virtual Addresses\nUnused (16 bits) Virtual page number (36 bits) Offset (12 bits) Physical Addresses\nPage number (40 bits) Offset (12 bits) translation chop off page number and offset translate the page number concat the two together internal fragmentation why not something simpler? single-tasking memory very bad idea:\nASSUME that there is only one process. Stack grows down, data grows up, and code sits at the bottom.\ntradeoff no isolation: even in this case, nothing is stopping the program from accessing memory in the OS reserve segment; which is bad. no multitasking: because, well, we have one program fragmentation: little bits of space all over the place load-time relocation separate processes.\nWhen program is compiled, it assumes that its initial address is 0x0; so, at load time, we have to go into the code segment when the program is set up and increment all of its memory addresses up.\ntradeoff no isolation: nothing is stopping the program from accessing memory in otherbody\u0026rsquo;s segments must decide the memory usage of a program ahead of time + cannot grow if needs more memory (we can\u0026rsquo;t move because the addresses would be in stack) external fragmentation (normal alloc problems) ","html":"\u003cp\u003eWe are trying to share a resource: memory; memory allows multiple processes to use a share pool of memory.\u003c/p\u003e\n\u003ch2 id=\"key-goals\"\u003ekey goals\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003emultitasking\u003c/strong\u003e: multiple processes should be able to use memory\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003etransparency\u003c/strong\u003e: no process need to know that memory is shared; each process should be able to run regardless of the number/locations of processes\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eisolation\u003c/strong\u003e: processes shouldn\u0026rsquo;t be able to corrupt other processes\u0026rsquo; memory\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eefficiency\u003c/strong\u003e: shouldn\u0026rsquo;t be degraded by sharing\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"virtual-memory\"\u003evirtual memory\u003c/h2\u003e\n\u003cp\u003eThe operating system will translate \u003cstrong\u003evirtual\u003c/strong\u003e addresses (which are 0 based for every program, which isn\u0026rsquo;t a problem) to \u003cstrong\u003ephysical\u003c/strong\u003e addresses in memory.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ethe OS doesn\u0026rsquo;t need to map all virtual addresses unless its needed (i.e. if the program is asking for it)\u003c/li\u003e\n\u003cli\u003eworst case: we can kick out unused memory into disk, and load it back when needed\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThis is an example of \u003ca href=\"/posts/kbhvirtual_memory/\"\u003evirtualization\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"os-memory\"\u003eOS memory\u003c/h2\u003e\n\u003cp\u003eWhenever a process makes a \u003cstrong\u003esyscall\u003c/strong\u003e, OS will be handed virtual memory addresses. How do we resolve it?\u003c/p\u003e\n\u003cp\u003eSolution: \u003cstrong\u003e*every process reserves some \u003ca href=\"/posts/kbhvirtual_memory/\"\u003evirtual memory\u003c/a\u003e for the OS\u003c/strong\u003e\u0026mdash;all of these virtual addresses maps to the \u003cstrong\u003eSAME PHYSICAL REGION\u003c/strong\u003e for the OS.\u003c/p\u003e\n\u003cp\u003eThen, the \u003ca href=\"/posts/kbhdemand_paging/#page-map\"\u003epage map\u003c/a\u003e will have a \u003cstrong\u003ekernel bit\u003c/strong\u003e which marks this virtual region no read and no write.\u003c/p\u003e\n\u003ch2 id=\"dynamic-address-translation\"\u003edynamic address translation\u003c/h2\u003e\n\u003cp\u003eThe system will die if we try to do virtual mapping to physical mapping.\u003c/p\u003e\n\u003cp\u003eSo we have a \u003ca href=\"#dynamic-address-translation\"\u003eMemory Management Unit\u003c/a\u003e (\u003ca href=\"#dynamic-address-translation\"\u003eMMU\u003c/a\u003e) to do:\u003c/p\u003e\n\u003cp\u003eHow does an \u003ca href=\"#dynamic-address-translation\"\u003eMMU\u003c/a\u003e work?\u003c/p\u003e\n\u003ch3 id=\"base-and-bound\"\u003ebase and bound\u003c/h3\u003e\n\u003cp\u003eThis is basically \u003ca href=\"#load-time-relocation\"\u003eload-time relocation\u003c/a\u003e, but with \u003ca href=\"/posts/kbhvirtual_memory/\"\u003evirtual memory\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eassign a location in physical memory, call the \u003cstrong\u003ebase\u003c/strong\u003e; during translation, we just add every virtual address by the \u003cstrong\u003ebase\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003ewe can cap the virtual address space for each process by a \u003cstrong\u003ebound\u003c/strong\u003e, we can raise a bus error/segfault if it goes above the highest allowable\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe \u003cstrong\u003ebound\u003c/strong\u003e is a virtual address (the first invalid address in the virtual world), whereas the \u003cstrong\u003ebase\u003c/strong\u003e is a physical address. This is both stored in the \u003cstrong\u003eprocess control block\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003elast possible address\u003c/strong\u003e: is (bound - 1)+base\u003c/p\u003e\n\u003ch4 id=\"translation\"\u003etranslation\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003ecompare virtual address to bound, \u003ca href=\"/posts/kbhdispatching/#trap\"\u003etrap\u003c/a\u003e and raise if \u0026gt;= \u003cstrong\u003ebound\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003ethen, return virtual address + \u003cstrong\u003ebase\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eimportantly, we can arbitrary adjust base and bound.\u003c/p\u003e\n\u003ch4 id=\"tradeoff\"\u003etradeoff\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003egood news\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003einexpensive\u003c/strong\u003e: just doing addition\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003edoesn\u0026rsquo;t require additional space\u003c/strong\u003e: (just two addresses)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eseparation\u003c/strong\u003e: \u003ca href=\"/posts/kbhvirtual_memory/\"\u003evirtualization\u003c/a\u003e.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ebad news\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eone contiguous region\u003c/strong\u003e: need to allocate free spcae\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003efragmentation\u003c/strong\u003e: because of the above\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003egrowing can only happens upwards with bounds\u003c/strong\u003e (and its kind of useless)\u0026mdash;we can\u0026rsquo;t move the stack up in virtual space, and we can\u0026rsquo;t give more space downwards, because that would cause negative addresses\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eno read only memory\u003c/strong\u003e (we\u0026rsquo;ll want to limit access to code segment, for instance)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"multiple-segments\"\u003emultiple segments\u003c/h3\u003e\n\u003cp\u003eLet\u0026rsquo;s break up multiple virtual address space into segments, and map each of those segments separately. \u003cstrong\u003eEACH SEGMENT\u003c/strong\u003e will have its own \u003ca href=\"#base-and-bound\"\u003ebase and bound\u003c/a\u003e. So, you will store each struct in a map: \u003ccode\u003e[segment number: [segment base, segment bound, read only or not]]\u003c/code\u003e.\u003c/p\u003e\n\u003ch4 id=\"translation\"\u003etranslation\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003elook up what segment a virtual address is in (we can do this by making the top couple bits of the virtual address the segment number, and the next bits as the offset into the segment)\u003c/li\u003e\n\u003cli\u003eget that segment\u0026rsquo;s info\u003c/li\u003e\n\u003cli\u003ecompare that address\u0026rsquo; offset to that segment\u0026rsquo;s bound, if its \u0026gt;= limit, \u003ca href=\"/posts/kbhdispatching/#trap\"\u003etrap\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eotherwise, to go the base of that segment and fetch data\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"tradeoff\"\u003etradeoff\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003efeatures\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eyou can recycle segments\u003c/strong\u003e: if you have two instances of a program running, we can actually share read-only segments (such as code).\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eyou can not map the middle\u003c/strong\u003e: because stack and data segments are independent, we can not map the hole in the middle until more data is asked\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eyou can grow things\u003c/strong\u003e: if you run out of continuous space, you can grow the segment by either just doing it or by moving it and growing it (and indeed we now can move the stack down as the stack is addressed as the highest address)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003edrawbacks\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003egrowing can only happens upwards with bounds\u003c/strong\u003e\u0026mdash;now that we can move the heap independently, growing the heap makes sense now; however, growing the STACK \u003cstrong\u003estill\u003c/strong\u003e is impossible because growing the stack would entail moving the base address in order to go downwards\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003evariable length segments\u003c/strong\u003e\u0026mdash;extrernal fragmentation!\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003esmall number of segments\u003c/strong\u003e\u0026mdash;the [segment, offset] deign divides virtual addresses, so you have to decide segment number exogenously\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"paging\"\u003epaging\u003c/h3\u003e\n\u003cp\u003eSo let\u0026rsquo;s instead allocate memory in pages. Instead of variable-length segments that can GROW in \u003ca href=\"#base-and-bound\"\u003ebase and bound\u003c/a\u003e and \u003ca href=\"#multiple-segments\"\u003emultiple segments\u003c/a\u003e, let\u0026rsquo;s force a specific size of memory in each chunk.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003evirtual address\u003c/strong\u003e: \u003cstrong\u003evirtual page number\u003c/strong\u003e + \u003cstrong\u003eoffset\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ephysical address\u003c/strong\u003e: \u003cstrong\u003ephysical page number\u003c/strong\u003e + \u003cstrong\u003eoffset\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewe map each page independently, and keep the offset. If a page is unused, internal fragmentation but not too bad. The \u003cstrong\u003estack can now grow downwards\u003c/strong\u003e: because if it reaches into lower page numbers we can just map that page somewhere too.\u003c/p\u003e\n\u003cp\u003eTo store page mappings, in a seperate storage location, we store a \u003ca href=\"#paging\"\u003epage map\u003c/a\u003e/\u003ca href=\"#paging\"\u003epage table\u003c/a\u003e: its an array of tuples, where the index is the virtual page number, and each entry has [(physical page, writable)].\u003c/p\u003e\n\u003cp\u003eNotice that page continuity isn\u0026rsquo;t a problem: the upper digits just count up, and the lower digits tells you offset in that chunk:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-asm\" data-lang=\"asm\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#960050;background-color:#1e0010\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ex0000\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0x0fff\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#960050;background-color:#1e0010\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ex1000\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0x1fff\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#960050;background-color:#1e0010\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ex2000\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0x2fff\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ewhere, the first digit tells you the page number\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-asm\" data-lang=\"asm\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#960050;background-color:#1e0010\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ex0\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0x0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#960050;background-color:#1e0010\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ex1\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0x1\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#960050;background-color:#1e0010\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ex2\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0x2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eand the rest is the offset.\u003c/p\u003e\n\u003cp\u003eAnd everything is contiunous, and automatically paged.\u003c/p\u003e\n\u003cp\u003eFor instance, typically page sizes are 4kb\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ePage Size\u003c/th\u003e\n\u003cth\u003eOffset Number Digits\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e4096 bytes (16^3)\u003c/td\u003e\n\u003ctd\u003e3\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003ethen the rest of the address would just be the page number.\u003c/p\u003e\n\u003ch4 id=\"intel-s-implementation\"\u003eIntel\u0026rsquo;s implementation\u003c/h4\u003e\n\u003cp\u003e\u003cstrong\u003eVirtual Addresses\u003c/strong\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eUnused (16 bits)\u003c/td\u003e\n\u003ctd\u003eVirtual page number (36 bits)\u003c/td\u003e\n\u003ctd\u003eOffset (12 bits)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\u003cstrong\u003ePhysical Addresses\u003c/strong\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003ePage number (40 bits)\u003c/td\u003e\n\u003ctd\u003eOffset (12 bits)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch4 id=\"translation\"\u003etranslation\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003echop off page number and offset\u003c/li\u003e\n\u003cli\u003etranslate the page number\u003c/li\u003e\n\u003cli\u003econcat the two together\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003einternal fragmentation\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"why-not-something-simpler\"\u003ewhy not something simpler?\u003c/h2\u003e\n\u003ch3 id=\"single-tasking-memory\"\u003esingle-tasking memory\u003c/h3\u003e\n\u003cp\u003every bad idea:\u003c/p\u003e\n\u003cp\u003eASSUME that there is only one process. Stack grows down, data grows up, and code sits at the bottom.\u003c/p\u003e\n\u003ch4 id=\"tradeoff\"\u003etradeoff\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eno isolation\u003c/strong\u003e: even in this case, nothing is stopping the program from accessing memory in the OS reserve segment; which is bad.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eno multitasking\u003c/strong\u003e: because, well, we have one program\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003efragmentation\u003c/strong\u003e: little bits of space all over the place\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"load-time-relocation\"\u003eload-time relocation\u003c/h3\u003e\n\u003cp\u003eseparate processes.\u003c/p\u003e\n\u003cp\u003eWhen program is compiled, it assumes that its initial address is \u003ccode\u003e0x0\u003c/code\u003e; so, at load time, we have to go into the code segment when the program is set up and increment all of its memory addresses up.\u003c/p\u003e\n\u003ch4 id=\"tradeoff\"\u003etradeoff\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eno isolation\u003c/strong\u003e: nothing is stopping the program from accessing memory in otherbody\u0026rsquo;s segments\u003c/li\u003e\n\u003cli\u003emust decide the memory usage of a program ahead of time + cannot grow if needs more memory (we can\u0026rsquo;t move because the addresses would be in stack)\u003c/li\u003e\n\u003cli\u003eexternal fragmentation (normal alloc problems)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvirtual_memory/","tags":null,"title":"virtual memory"},{"categories":null,"contents":"voltage is a measure of difference in electric potential energy across two points\n\\begin{equation} V = \\frac{1}{4\\pi \\epsilon_{0}} \\sum_{i} \\frac{q_{i}}{r_{i}} \\end{equation}\nor,\n\\begin{equation} PE = qV \\end{equation}\npotential energy experienced by \\(q\\) at the point.\n\\begin{equation} E_{x} = -\\dv{V}{x} \\end{equation}\n\\begin{equation} \\Delta V = - \\int E \\cdot dr \\end{equation}\n\\begin{equation} PE = \\frac{1}{4\\pi \\epsilon_{0}} \\frac{q_1q_2}{r} \\end{equation}\nequipotentential lines and electric field lines should align at right angles.\ncurrent through series is the same voltage through parallel is the same ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhvoltage/\"\u003evoltage\u003c/a\u003e is a measure of difference in \u003ca href=\"/posts/kbhelectric_potential_energy/\"\u003eelectric potential energy\u003c/a\u003e across two points\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV = \\frac{1}{4\\pi \\epsilon_{0}} \\sum_{i} \\frac{q_{i}}{r_{i}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eor,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nPE = qV\n\\end{equation}\u003c/p\u003e\n\u003cp\u003epotential energy experienced by \\(q\\) at the point.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE_{x} = -\\dv{V}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Delta V = - \\int E \\cdot dr\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nPE = \\frac{1}{4\\pi \\epsilon_{0}} \\frac{q_1q_2}{r}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eequipotentential lines and electric field lines should align at right angles.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ecurrent through series is the same\u003c/li\u003e\n\u003cli\u003evoltage through parallel is the same\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvoltage/","tags":null,"title":"voltage"},{"categories":null,"contents":"The VWAP is a Financial Market metric that stands for \u0026ldquo;volume-weighted average price.\u0026rdquo; It is given by (sumshares brought(shares bought at price*price its at)/(total shares bought in period)).\n\u0026ldquo;the price we care the most about, is the price where the most volume is traded.\u0026rdquo;\nMotivation Its a weighted-by volume trading price. Though the closing price is the price used for accounting, it isn\u0026rsquo;t a good metric for large-volume trades.\nTrading at the VWAP We Trade at the VWAP because a LARGE trade will move the market around, and we don\u0026rsquo;t want that if we are a large trader. So we trade at the VWAP to ensure that we are getting the best possible value.\nBuild a volume a profile Slicing the orders to match Control for volume deviations Volume Profile We use the volume-profile: \u0026ldquo;how much/what percentage of today\u0026rsquo;s volume happened in this chunk of the day\u0026rdquo; to predict today\u0026rsquo;s trading by matching by historical data. This often results in looking like a J curve: lots of trading happen at the beginning of the day, very little towards the middle, and LOTS in the end.\nSlicing Orders Slice your funds needed to trade, volume-wise, according to the Volume Profile. Set limit orders per slice at the best price for the market.\nControl Deviations from Expectation If you were\u0026rsquo;t able to trade by the limit order you posted at that slice, by the end of the slice, cancel your limit order and just send in a market order to ensure your participation with the desired volume at that slice.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhvwap/\"\u003eVWAP\u003c/a\u003e is a \u003ca href=\"/posts/kbhfinancial_markets_intro/\"\u003eFinancial Market\u003c/a\u003e metric that stands for \u0026ldquo;volume-weighted average price.\u0026rdquo; It is given by (sum\u003csub\u003eshares brought\u003c/sub\u003e(shares bought at price*price its at)/(total shares bought in period)).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the \u003ca href=\"/posts/kbhprice/\"\u003eprice\u003c/a\u003e we care the most about, is the price where the most volume is traded.\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003eIts a weighted-by volume trading price. Though the \u003ca href=\"/posts/kbhaccounting_price/\"\u003eclosing price\u003c/a\u003e is the \u003ca href=\"/posts/kbhprice/\"\u003eprice\u003c/a\u003e used for accounting, it isn\u0026rsquo;t a good metric for large-volume trades.\u003c/p\u003e\n\u003ch2 id=\"trading-at-the-vwap\"\u003eTrading at the VWAP\u003c/h2\u003e\n\u003cp\u003eWe \u003ca href=\"#trading-at-the-vwap\"\u003eTrade at the VWAP\u003c/a\u003e because a LARGE trade will move the market around, and we don\u0026rsquo;t want that if we are a large trader. So we trade at the \u003ca href=\"/posts/kbhvwap/\"\u003eVWAP\u003c/a\u003e to ensure that we are getting the best possible value.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eBuild a volume a profile\u003c/li\u003e\n\u003cli\u003eSlicing the orders to match\u003c/li\u003e\n\u003cli\u003eControl for volume deviations\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"volume-profile\"\u003eVolume Profile\u003c/h3\u003e\n\u003cp\u003eWe use the volume-profile: \u0026ldquo;how much/what percentage of today\u0026rsquo;s volume happened in this chunk of the day\u0026rdquo; to predict today\u0026rsquo;s trading by matching by historical data. This often results in looking like a J curve: lots of trading happen at the beginning of the day, very little towards the middle, and LOTS in the end.\u003c/p\u003e\n\u003ch3 id=\"slicing-orders\"\u003eSlicing Orders\u003c/h3\u003e\n\u003cp\u003eSlice your funds needed to trade, volume-wise, according to the \u003ca href=\"#volume-profile\"\u003eVolume Profile\u003c/a\u003e. Set limit orders per slice at the best price for the market.\u003c/p\u003e\n\u003ch3 id=\"control-deviations-from-expectation\"\u003eControl Deviations from Expectation\u003c/h3\u003e\n\u003cp\u003eIf you were\u0026rsquo;t able to trade by the limit order you posted at that slice, by the end of the slice, cancel your limit order and just send in a market order to ensure your participation with the desired volume at that slice.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvwap/","tags":null,"title":"VWAP"},{"categories":null,"contents":"(Walker and Davies 2013)\nOne-Liner Emergency of life corresponds with the time for physical transition.\nNovelty Notable Methods Key Figs New Concepts Notes ","html":"\u003cp\u003e(\u003ca href=\"#citeproc_bib_item_1\"\u003eWalker and Davies 2013\u003c/a\u003e)\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eEmergency of life corresponds with the time for physical transition.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhwalker_2018/","tags":null,"title":"Walker 2018"},{"categories":null,"contents":"DOI: 10.21437/Interspeech.2019-2414\n","html":"\u003cp\u003eDOI: 10.21437/Interspeech.2019-2414\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhwang_2019/","tags":null,"title":"Wang 2019"},{"categories":null,"contents":"One-Liner Modeling carbon storage operations as a POMDP to show how different monitoring strategies can influence decision quality. Evalutae\nNovelty Applying POMDP to the task of carbon capture monitor planning.\nNotable Methods POMDP formulation\nSolver: POMCPOW Reward: trapped, free, and exited Co2 Action: injector placement Observation: CO2 saturation Belief: the permeability of the rock POMDP Solution: particle filter tree.\nExperimental design validated by simulations of CO2 sperad through injectors\nFourier Network Simulation The actual fluid dynamics is really really hard to solve. As such, we do the evaluation over a lot of scenarios and then train a neural network to act as surrogate.\nKey Figs New Concepts Notes ","html":"\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eModeling carbon storage operations as a POMDP to show how different monitoring strategies can influence decision quality. Evalutae\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003eApplying POMDP to the task of carbon capture monitor planning.\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cp\u003ePOMDP formulation\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eSolver: POMCPOW\u003c/li\u003e\n\u003cli\u003eReward: trapped, free, and exited Co2\u003c/li\u003e\n\u003cli\u003eAction: injector placement\u003c/li\u003e\n\u003cli\u003eObservation: CO2 saturation\u003c/li\u003e\n\u003cli\u003eBelief: the permeability of the rock\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ePOMDP Solution: particle filter tree.\u003c/p\u003e\n\u003cp\u003eExperimental design validated by simulations of CO2 sperad through injectors\u003c/p\u003e\n\u003ch3 id=\"fourier-network-simulation\"\u003eFourier Network Simulation\u003c/h3\u003e\n\u003cp\u003eThe actual fluid dynamics is really really hard to solve. As such, we do the evaluation over a lot of scenarios and then train a neural network to act as surrogate.\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhwang_2023/","tags":null,"title":"Wang 2023"},{"categories":null,"contents":"Richard Nixon does not like democratic policies. Therefore, he had 5 operatives break into the DNC. Woodward and Berstein reports on the issue. Nixon rebounds and fires his investigator.\nThen, he released the \u0026ldquo;smoking gun\u0026rdquo; tape with the middle missing\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e does not like democratic policies. Therefore, he had 5 operatives break into the DNC. Woodward and Berstein reports on the issue. Nixon rebounds and fires his investigator.\u003c/p\u003e\n\u003cp\u003eThen, he released the \u0026ldquo;smoking gun\u0026rdquo; tape with the middle missing\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhwatergate/","tags":null,"title":"watergate"},{"categories":null,"contents":"If we write it in a single set of variables:\n\\begin{equation} \\pdv[2]{u}{t} = \\pdv[2]{u}{x} \\end{equation}\nAt a glance, for Dirichlet Conditions:\n\\begin{equation} u(t,x) = \\sum_{k} \\qty(a_{k} \\sin \\qty(\\frac{ck\\pi}{l} t) + b_{k} \\cos \\qty(\\frac{ck\\pi}{l} t)) \\sin \\qty( \\frac{k \\pi}{l}x) \\end{equation}\nthis takes two initial condition:\n\\begin{equation} u(0,x) = \\sum b_{k} \\sin \\qty( \\frac{k\\pi x}{l}) = f(x) \\end{equation}\n\\begin{equation} \\pdv{u}{t}(0,x) = \\sum a_{k} \\frac{ck \\pi}{l} \\sin \\qty( \\frac{k\\pi x}{l}) = g(x) \\end{equation}\nmeaning:\n\\begin{equation} b_{k} = \\frac{2}{l} \\int_{0}^{l} f(x) \\sin \\qty( \\frac{k\\pi}{l} x) \\dd{x} \\end{equation}\nand:\n\\begin{equation} a_{k} = \\frac{2}{k\\pi c} \\int_{0}^{l} h(x) \\sin \\qty( \\frac{k\\pi}{l} x) \\dd{x} \\end{equation}\nwhich now finishes our initial conditions.\nImportantly, as we have a SECOND ORDER expression now, we need two initial conditions with initial amplitude and velocity.\nd\u0026rsquo;alembert\u0026rsquo;s formula The general solution to the wave equation, with:\n\\begin{equation} \\pdv[2]{U}{t} = c^{2} \\pdv[2]{U}{x} \\end{equation}\nwith \\(U(0,x) = f_0(x)\\), and \\(\\pdv{U}{t}(0,x) = f_1(x)\\) is:\n\\begin{equation} U(t,x) = \\frac{1}{2} \\qty(f_0 (x+ct) + f_0 (x-ct)) + \\frac{1}{2c} \\int_{x-ct}^{x+ct} f_1(y) \\dd{y} \\end{equation}\ndamping see damped wave equation\nsolving wave equation Recall:\n\\begin{equation} \\pdv[2]{u}{t} = c^{2} \\pdv[2]{u}{x} \\end{equation}\nwhere \\(c^{2}\\) is called the \u0026ldquo;wave speed\u0026rdquo;. Let\u0026rsquo;s start with the Dirichlet Conditions.\nUnlike the Heat Equation, Wave Equation are time reversible (i.e. time going forward and backwards should have no difference). Any solutions that go forward in time also satisfy for going backwards in time.\nLet\u0026rsquo;s try to solve it. Guess:\n\\begin{equation} u = A(t) B(x) \\end{equation}\nmeaning, we have:\n\\begin{equation} A\u0026rsquo;\u0026rsquo;(t) B(x) = c^{2} A(t)B\u0026rsquo;\u0026rsquo;(x) \\end{equation}\nThis finally gives:\n\\begin{equation} \\frac{A\u0026rsquo;\u0026rsquo;(t)}{A(t)} = c^{2} \\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(X)} = \\lambda \\end{equation}\nwhich gives:\n\\begin{equation} B\u0026rsquo;\u0026rsquo;(x) - \\frac{\\lambda}{c^{2}} B(x) = 0 \\end{equation}\nwe can only solve this, given our boundary conditions:\n\\begin{equation} \\lambda = \\frac{-c^{2} k^{2} \\pi^{2}}{l^{2}} \\end{equation}\nwhich gives:\n\\begin{equation} B(x) = \\sin \\qty( \\frac{k \\pi}{l}x) \\end{equation}\nand \\(A\\) will result in a second order equation (unlike before):\n\\begin{equation} A\u0026rsquo;\u0026rsquo;(t) + \\frac{c^{2} h^{2} \\pi^{2}}{l^{2}} A(t) = 0 \\end{equation}\nThis gives generally a solution:\n\\begin{equation} A(t) = c_1 \\sin \\qty(\\frac{ck\\pi}{l} t) + c_2 \\cos \\qty(\\frac{ck\\pi}{l} t) \\end{equation}\nTherefore, multiplying everything out:\n\\begin{equation} u(t,x) = \\sum_{k} \\qty(c_1 \\sin \\qty(\\frac{ck\\pi}{l} t) + c_2 \\cos \\qty(\\frac{ck\\pi}{l} t)) \\sin \\qty( \\frac{k \\pi}{l}x) \\end{equation}\nmeaning: the overall oscillation is controlled by the wave speed, which changes in time but not space.\nFinally, note that:\n\\begin{equation} u(0,x) = \\sum b_{k} \\sin \\qty( \\frac{k\\pi x}{l}) = f(x) \\end{equation}\nConsider the \\(t\\) derivative as well:\n\\begin{equation} \\pdv{u}{t} = \\sum \\qty(a_{n} \\frac{ck\\pi}{l} \\cos \\qty( \\frac{ck \\pi}{l} t) - b_{k}\\frac{k\\pi}{l} \\sin \\qty( \\frac{k\\pi}{l}t)) \\sin \\qty( \\frac{k\\pi}{l} x) \\end{equation}\nnow, this gives us another initial condition:\n\\begin{equation} \\pdv{u}{t}(0,x) = \\sum a_{k} \\frac{ck \\pi}{l} \\sin \\qty( \\frac{k\\pi x}{l}) = g(x) \\end{equation}\nwhich now finishes our initial conditions.\nGeneral Standing Wave Solution Because the PDE given is linear, solutions compose, and we note that any scale of \\(\\cos kt \\sin kx\\) will compose.\n\\begin{equation} u(t,x) = \\sum_{k=0}^{\\infty} a_{k} \\cos kt \\sin kx \\end{equation}\nFourier Series \\begin{equation} u(o,x) \\sum_{k} a_{k}\\sin kx \\end{equation}\nBIG stunning conclusion: every single function, including wack ones, can be decomposed. See Fourier Series\nGeneral Traveling Wave Solution \\begin{equation} u(t,x) = \\sin (x-t) w(x-t) \\end{equation}\nas long as \\(w\\) is a valid twice-differentiable solution, plugging its derivative in will resolve as well.\nComposition \\begin{equation} \\sin (x-t) + \\sin (x+t) = \\sin x \\cos t - \\cos x \\sin t + \\sin x \\cos t + \\cos x \\sin t = 2 \\sin x \\cos t \\end{equation}\n","html":"\u003cp\u003eIf we write it in a single set of variables:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2]{u}{t} = \\pdv[2]{u}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAt a glance, for \u003ca href=\"/posts/kbhsu_math53_feb232024/#dirichlet-conditions\"\u003eDirichlet Conditions\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(t,x) = \\sum_{k} \\qty(a_{k} \\sin \\qty(\\frac{ck\\pi}{l} t) + b_{k} \\cos \\qty(\\frac{ck\\pi}{l} t)) \\sin \\qty( \\frac{k \\pi}{l}x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis takes two initial condition:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(0,x) = \\sum b_{k} \\sin \\qty( \\frac{k\\pi x}{l}) = f(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t}(0,x) = \\sum a_{k} \\frac{ck \\pi}{l} \\sin \\qty( \\frac{k\\pi x}{l}) = g(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb_{k} = \\frac{2}{l} \\int_{0}^{l} f(x) \\sin \\qty( \\frac{k\\pi}{l} x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{k} = \\frac{2}{k\\pi c} \\int_{0}^{l} h(x) \\sin \\qty( \\frac{k\\pi}{l} x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich now finishes our initial conditions.\u003c/p\u003e\n\u003cp\u003eImportantly, as we have a \u003cstrong\u003eSECOND ORDER\u003c/strong\u003e expression now, we need \u003cstrong\u003etwo\u003c/strong\u003e initial conditions with initial amplitude and velocity.\u003c/p\u003e\n\u003ch2 id=\"d-alembert--kbhwave-equation-dot-md--s-formula\"\u003e\u003ca href=\"/posts/kbhwave_equation/\"\u003ed\u0026rsquo;alembert\u003c/a\u003e\u0026rsquo;s formula\u003c/h2\u003e\n\u003cp\u003eThe general solution to the wave equation, with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2]{U}{t} = c^{2} \\pdv[2]{U}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewith \\(U(0,x) = f_0(x)\\), and \\(\\pdv{U}{t}(0,x) = f_1(x)\\) is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(t,x) = \\frac{1}{2} \\qty(f_0 (x+ct) + f_0 (x-ct)) + \\frac{1}{2c} \\int_{x-ct}^{x+ct} f_1(y) \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"damping\"\u003edamping\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"\"\u003edamped wave equation\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"solving-wave-equation\"\u003esolving wave equation\u003c/h2\u003e\n\u003chr\u003e\n\u003cp\u003eRecall:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2]{u}{t} = c^{2} \\pdv[2]{u}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(c^{2}\\) is called the \u0026ldquo;wave speed\u0026rdquo;. Let\u0026rsquo;s start with the \u003ca href=\"/posts/kbhsu_math53_feb232024/#dirichlet-conditions\"\u003eDirichlet Conditions\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eUnlike the \u003ca href=\"/posts/kbhheat_equation/\"\u003eHeat Equation\u003c/a\u003e, \u003ca href=\"/posts/kbhwave_equation/\"\u003eWave Equation\u003c/a\u003e are time reversible (i.e. time going forward and backwards should have no difference). Any solutions that go forward in time also satisfy for going backwards in time.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s try to solve it. Guess:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu = A(t) B(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA\u0026rsquo;\u0026rsquo;(t) B(x) = c^{2} A(t)B\u0026rsquo;\u0026rsquo;(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis finally gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{A\u0026rsquo;\u0026rsquo;(t)}{A(t)} = c^{2} \\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(X)} = \\lambda\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB\u0026rsquo;\u0026rsquo;(x) - \\frac{\\lambda}{c^{2}} B(x) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can only solve this, given our boundary conditions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda = \\frac{-c^{2} k^{2} \\pi^{2}}{l^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB(x) = \\sin \\qty( \\frac{k \\pi}{l}x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand \\(A\\) will result in a second order equation (unlike before):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA\u0026rsquo;\u0026rsquo;(t) + \\frac{c^{2} h^{2} \\pi^{2}}{l^{2}} A(t) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis gives generally a solution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA(t) = c_1 \\sin \\qty(\\frac{ck\\pi}{l} t) + c_2 \\cos \\qty(\\frac{ck\\pi}{l} t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, multiplying everything out:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(t,x) = \\sum_{k} \\qty(c_1 \\sin \\qty(\\frac{ck\\pi}{l} t) + c_2 \\cos \\qty(\\frac{ck\\pi}{l} t)) \\sin \\qty( \\frac{k \\pi}{l}x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning: the overall oscillation is controlled by the wave speed, which changes in \u003cstrong\u003etime\u003c/strong\u003e but not \u003cstrong\u003espace\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eFinally, note that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(0,x) = \\sum b_{k} \\sin \\qty( \\frac{k\\pi x}{l}) = f(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eConsider the \\(t\\) derivative as well:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t} = \\sum \\qty(a_{n} \\frac{ck\\pi}{l} \\cos \\qty( \\frac{ck \\pi}{l} t) - b_{k}\\frac{k\\pi}{l} \\sin \\qty( \\frac{k\\pi}{l}t)) \\sin \\qty( \\frac{k\\pi}{l} x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enow, this gives us another initial condition:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t}(0,x) = \\sum a_{k} \\frac{ck \\pi}{l} \\sin \\qty( \\frac{k\\pi x}{l}) = g(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich now finishes our initial conditions.\u003c/p\u003e\n\u003ch2 id=\"general-standing-wave-solution\"\u003eGeneral Standing Wave Solution\u003c/h2\u003e\n\u003cp\u003eBecause the \u003ca href=\"/posts/kbhpartial_differential_equations/\"\u003ePDE\u003c/a\u003e given is linear, solutions compose, and we note that any scale of \\(\\cos kt \\sin kx\\) will compose.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(t,x) = \\sum_{k=0}^{\\infty} a_{k} \\cos kt \\sin kx\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"fourier-series\"\u003eFourier Series\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nu(o,x) \\sum_{k} a_{k}\\sin kx\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBIG \u003cstrong\u003estunning conclusion\u003c/strong\u003e: \u003cstrong\u003eevery single function, including wack ones, can be decomposed\u003c/strong\u003e. See \u003ca href=\"/posts/kbhsu_math53_feb252024/#fourier-decomposition\"\u003eFourier Series\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"general-traveling-wave-solution\"\u003eGeneral Traveling Wave Solution\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nu(t,x) = \\sin (x-t) w(x-t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas long as \\(w\\) is a valid twice-differentiable solution, plugging its derivative in will resolve as well.\u003c/p\u003e\n\u003ch3 id=\"composition\"\u003eComposition\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\sin (x-t) + \\sin (x+t) = \\sin x \\cos t - \\cos x \\sin t + \\sin x \\cos t + \\cos x \\sin t = 2 \\sin x \\cos t\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhwave_equation/","tags":null,"title":"Wave Equation"},{"categories":null,"contents":"let\u0026rsquo;s consider the web as a directed graph, where\u0026hellip;\na hyper-link denotes perceived relevance (\u0026ldquo;quality\u0026rdquo;) anchor of the hyper-link describe the target page (\u0026ldquo;textual context\u0026rdquo;) anchor text consider:\nIBM\u0026rsquo;s mostly graphical homepage IBM\u0026rsquo;s copyright page Rival\u0026rsquo;s span IBM page Consider, a million picees of anchor text saying \u0026ldquo;IBM\u0026rdquo; pointing to ibm.com, suddenly, that legitimizes the home page\n\u0026lt;a href=\u0026#34;target\u0026#34;\u0026gt;[archor text that says IBM]\u0026lt;/a\u0026gt; So, when we index a website, we index not just the website but 1) all links pointing to it and 2) the text of those links.\nside effects \u0026ldquo;Google Bombing\u0026rdquo;\u0026mdash;a lot of people artificially increasing the rank of the website by pointing a lot to it on more fake websites and writing in anchor text about the spoofed website\nsolution: weight each web page\u0026rsquo;s target anchors based on their \u0026ldquo;authoritativeness\u0026rdquo;\u0026mdash;either curated or calculated.\nuses of anchor text synonym usage (collect multiple ways of referring to the same website) finding translations (collect multiple languages referring to the same website) providing constituency boundaries (i.e. the anchor text is a NP within the larger sentence) PageRank \u0026ldquo;A page that\u0026rsquo;s a very popular is a good page.\u0026rdquo;\nPage Rank Solves the LinkCount problem by using the intuition that we want to weight Directed Popularity based on the importance of the page where the link is from.\nPrecisely: after starting at a random page, we walk along each link with equal probability and continue until we reach a time where a page\u0026rsquo;s visitation rate converges. We will use this as PageRank\nTeleporting To resolve the problem of dead ends, if we reach a dead end we jump to a random page.\nEven if we didn\u0026rsquo;t reach a dead end, with probability \\(\\alpha\\) we still jump to a random page.\nif the node has no out-link, the transition probability to each other node is \\(\\frac{1}{N}\\) if the node does have \\(K\\) out links, the probability of telephoning to a random node is \\(\\frac{\\alpha}{N}\\), and the probability of going to a normal out link is \\(\\frac{1-\\alpha}{k}\\). Building PageRank Matrix For some matrix A, \\(A_{ij}\\) is \\(1\\) if there is a hyper-link from \\(i\\) to \\(j\\).\nIf row \\(A\\) has no 1s, then we will replace each element by \\(\\frac{1}{N}\\). For all other rows, divide each row by the sum of the row, and multplying each entry by \\((1-\\alpha)\\). Then, add \\(\\frac{a}{N}\\) to the whole row.\ncalculating PageRank uses the fact that the matrix you built in the previous step is Ergotic to compute its steady state.\nLink Count page that is pointed to by lots of other pages\nFailure: this is very easy to spam\u0026ndash;we can just create a bunch of pages and add arbitrary number of links.\nUndirected Popularity \u0026ldquo;Degree\u0026rdquo;: number of in links plus the number of out-links.\nDirected Popularity Number of in-links\n","html":"\u003cp\u003elet\u0026rsquo;s consider the web as a \u003cstrong\u003edirected graph\u003c/strong\u003e, where\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ea hyper-link denotes perceived relevance (\u0026ldquo;quality\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003eanchor of the hyper-link describe the target page (\u0026ldquo;textual context\u0026rdquo;)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"anchor-text\"\u003eanchor text\u003c/h2\u003e\n\u003cp\u003econsider:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eIBM\u0026rsquo;s mostly graphical homepage\u003c/li\u003e\n\u003cli\u003eIBM\u0026rsquo;s copyright page\u003c/li\u003e\n\u003cli\u003eRival\u0026rsquo;s span IBM page\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eConsider, a million picees of \u003ca href=\"#anchor-text\"\u003eanchor text\u003c/a\u003e saying \u0026ldquo;IBM\u0026rdquo; pointing to ibm.com, suddenly, that legitimizes the home page\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-html\" data-lang=\"html\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003ea\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ehref\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;target\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e\u0026gt;\u003c/span\u003e[archor text that says IBM]\u003cspan style=\"color:#111\"\u003e\u0026lt;/\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e\u0026gt;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSo, when we index a website, we index not just the website but 1) all links pointing to it and 2) the text of those links.\u003c/p\u003e\n\u003ch3 id=\"side-effects\"\u003eside effects\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;Google Bombing\u0026rdquo;\u0026mdash;a lot of people artificially increasing the rank of the website by pointing a lot to it on more fake websites and writing in anchor text about the spoofed website\u003c/p\u003e\n\u003cp\u003esolution: weight each web page\u0026rsquo;s target anchors based on their \u0026ldquo;authoritativeness\u0026rdquo;\u0026mdash;either curated or calculated.\u003c/p\u003e\n\u003ch3 id=\"uses-of-anchor-text\"\u003euses of anchor text\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003esynonym usage (collect multiple ways of referring to the same website)\u003c/li\u003e\n\u003cli\u003efinding translations (collect multiple languages referring to the same website)\u003c/li\u003e\n\u003cli\u003eproviding constituency boundaries (i.e. the anchor text is a NP within the larger sentence)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"pagerank\"\u003ePageRank\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;A page that\u0026rsquo;s a very popular is a good page.\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"page-rank\"\u003ePage Rank\u003c/h3\u003e\n\u003cp\u003eSolves the LinkCount problem by using the intuition that we want to weight \u003ca href=\"#directed-popularity\"\u003eDirected Popularity\u003c/a\u003e based on the importance of the page where the link is from.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003ePrecisely\u003c/strong\u003e: after starting at a random page, we walk along each link with equal probability and continue until we reach a time where a page\u0026rsquo;s visitation rate converges. We will use this as PageRank\u003c/p\u003e\n\u003ch4 id=\"teleporting\"\u003eTeleporting\u003c/h4\u003e\n\u003cp\u003eTo resolve the problem of dead ends, if we reach a dead end we jump to a random page.\u003c/p\u003e\n\u003cp\u003eEven if we didn\u0026rsquo;t reach a dead end, with probability \\(\\alpha\\) we still jump to a random page.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eif the node has no out-link, the transition probability to each other node is \\(\\frac{1}{N}\\)\u003c/li\u003e\n\u003cli\u003eif the node does have \\(K\\) out links, the probability of telephoning to a random node is \\(\\frac{\\alpha}{N}\\), and the probability of going to a normal out link is \\(\\frac{1-\\alpha}{k}\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"building-pagerank-matrix\"\u003eBuilding PageRank Matrix\u003c/h4\u003e\n\u003cp\u003eFor some matrix A, \\(A_{ij}\\) is \\(1\\) if there is a hyper-link from \\(i\\) to \\(j\\).\u003c/p\u003e\n\u003cp\u003eIf row \\(A\\) has no 1s, then we will replace each element by \\(\\frac{1}{N}\\). For all other rows, divide each row by the sum of the row, and multplying each entry by \\((1-\\alpha)\\). Then, add \\(\\frac{a}{N}\\) to the whole row.\u003c/p\u003e\n\u003ch4 id=\"calculating-pagerank--org69f1397\"\u003ecalculating \u003ca href=\"#pagerank\"\u003ePageRank\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003euses the fact that the matrix you built in the previous step is \u003ca href=\"/posts/kbhmarkov_chain/#ergotic-markov-chain\"\u003eErgotic\u003c/a\u003e to \u003ca href=\"/posts/kbhmarkov_chain/#computing-steady-state\"\u003ecompute its steady state\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"link-count\"\u003eLink Count\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003epage that is pointed to by lots of other pages\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eFailure: this is \u003cstrong\u003every easy to spam\u003c/strong\u003e\u0026ndash;we can just create a bunch of pages and add arbitrary number of links.\u003c/p\u003e\n\u003ch4 id=\"undirected-popularity\"\u003eUndirected Popularity\u003c/h4\u003e\n\u003cp\u003e\u0026ldquo;Degree\u0026rdquo;: number of in links plus the number of out-links.\u003c/p\u003e\n\u003ch4 id=\"directed-popularity\"\u003eDirected Popularity\u003c/h4\u003e\n\u003cp\u003eNumber of in-links\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhweb_graph/","tags":null,"title":"Web Graph"},{"categories":null,"contents":"For instance, in spellcheck, you are more likely to confuse say \\(a\\) and \\(e\\) than \\(a\\) and \\(b\\). Therefore, sometimes we want to weight our edit distance with DP to account for these \u0026ldquo;common\u0026rdquo; paths to make certain corrections more \u0026ldquo;jarring\u0026rdquo;.\nFor two strings, let\u0026rsquo;s define:\n\\(X\\) of length \\(n\\) \\(Y\\) of length \\(m\\) we define some \\(D(i,j)\\) as the edit distance between substring \\(X[1:i]\\) and \\(Y[1:j]\\).\nLet:\n\\(D(i,0) = i, \\forall i\\) \\(D(0,j) = j, \\forall j\\) for i in range(1,M): for j in range(1,N): # deletion: ignoring one char of previous string d1 = D(i-1,j) + 1 # (cost) # insertion: insertion into string before using rest of j d2 = D(i,j-1) + 1 # (cost) # keep same if char is same or substitute current d3 = D(i-1,j-1) + (0 if X[i] == Y[j] else 2) # cache D(i,j) = min(d1, d2, d3) ","html":"\u003cp\u003eFor instance, in spellcheck, you are more likely to confuse say \\(a\\) and \\(e\\) than \\(a\\) and \\(b\\). Therefore, sometimes we want to weight our \u003ca href=\"/posts/kbhedit_distance_with_dp/\"\u003eedit distance with DP\u003c/a\u003e to account for these \u0026ldquo;common\u0026rdquo; paths to make certain corrections more \u0026ldquo;jarring\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eFor two strings, let\u0026rsquo;s define:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X\\) of length \\(n\\)\u003c/li\u003e\n\u003cli\u003e\\(Y\\) of length \\(m\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewe define some \\(D(i,j)\\) as the edit distance between substring \\(X[1:i]\\) and \\(Y[1:j]\\).\u003c/p\u003e\n\u003cp\u003eLet:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(D(i,0) = i, \\forall i\\)\u003c/li\u003e\n\u003cli\u003e\\(D(0,j) = j, \\forall j\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eM\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# deletion: ignoring one char of previous string\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# (cost)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# insertion: insertion into string before using rest of j\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# (cost)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# keep same if char is same or substitute current\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# cache\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emin\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhweighted_edit_distance/","tags":null,"title":"weighted edit distance"},{"categories":null,"contents":"Fireside a series of articles that I\u0026rsquo;m writing to consolidate my learning.\nI have always dreamed of blogging. I have even tried once called 20MinuteRants. They worked quite well as a basic format whereby I can write about things in a fairly efficient manner (hence the 20 minutes), and be able to reflect about the things I\u0026rsquo;m up to.\nThe problem with the project is that I rarely had the motivation to do one. Once I was too busy, or out of ideas to write about, I stop. If there\u0026rsquo;s not anything to rant about, why is there a 20MinuteRant?\nIndeed that has been why the blog has been on hiatus for the past many months. I suppose we can consider this entry the last of the 20MinuteRants and the first one of a new series of writings\u0026mdash;Fireside\u0026mdash;which I hope will continue for a long time.\nImpetus My mentor D has always told me to start arguments with why. The larger system in which Fireside is located, my knowlegebase system, has such a page arguing why its a good idea.\nAnd here\u0026rsquo;s the why: starting university has made me surprisingly lost in terms of what I want to do. In that: there\u0026rsquo;s so much of the vicissitudes of daily life that I no longer have the same intellectual curiosity that I think I had during middle and high school. And, in doing this, I hope to get it back.\nThis illustrates the key goal of Fireside: a once+ per week posting, illustrating some new thing I\u0026rsquo;m aiming to learn for the week. It can even be in the classroom: but something I\u0026rsquo;m going above and beyond to try to understand. Each article will either plan something to learn, or summarize my learning in it.\nI remember seeing an article through HN at one point (its not this one, it was significantly less \u0026ldquo;ha ha! business\u0026rdquo; language, but this will do) that \u0026ldquo;You never really learn something until you write about it.\u0026rdquo; A dormmate of mine working on ray tracing also said something to the same effect. And, so, what the heck. Let\u0026rsquo;s try this out.\nA part of me wishes that this fulfills the \u0026ldquo;deliverable\u0026rdquo; in my head of \u0026ldquo;YOU AREN\u0026rsquo;T DOING ENOUGH!\u0026rdquo; whenever I spend time wandering aimlessly to try to learn something. If it has become a Fireside, it counts. I guess.\nI should also add that Fireside is named Fireside because of FDR\u0026rsquo;s Fireside Chats, where he got to directly talk to people unfiltered about his views.\nParameters Frequency At a minimum once a week. No promises though.\nNames You have already noticed one of the parameters. Unless it is a general concept or a well known thing I did, I won\u0026rsquo;t be using names throughout the articles. Somewhat contrarily, I firmly believe that the process of building shit is a very personal one. Hence, following the example of one of my favorite essayists Zhu Ziqing, I will be using the first letter of the name I refer to them to refer to all people mentioned. Of course, if you don\u0026rsquo;t want to be included, I\u0026rsquo;d be happy to pull things down.\nThemes technology in general random nerdism deep learning and language models, methods and applications the shit I get up to We Begin I hope to begin this weekend. I\u0026rsquo;ve spent the last while trying to train a serious deep learning model (read: OpenAI Whisper Large V2), and dying because all I have access to is 2 32GB V-100s on the PSC (and yes, I point out that this is terribly privileged statement: woe is me with a cutting edge GPU).\nHowever, the Language Model literally doesn\u0026rsquo;t fit in the damned box. So, I\u0026rsquo;m trying to learn about distributed training methods like Ray, methods of efficient tuning with LoRA, and new-fangled memory sharing things like DeepSpeed.\nStay tuned.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhfireside/\"\u003eFireside\u003c/a\u003e a series of articles that I\u0026rsquo;m writing to consolidate my learning.\u003c/p\u003e\n\u003cp\u003eI have always dreamed of blogging. I have even tried once \u003ca href=\"https://medium.com/20minuterants\"\u003ecalled 20MinuteRants\u003c/a\u003e. They worked quite well as a basic format whereby I can write about things in a fairly efficient manner (hence the 20 minutes), and be able to reflect about the things I\u0026rsquo;m up to.\u003c/p\u003e\n\u003cp\u003eThe problem with the project is that I rarely had the motivation to do one. Once I was too busy, or out of ideas to write about, I stop. If there\u0026rsquo;s not anything to rant about, why is there a 20MinuteRant?\u003c/p\u003e\n\u003cp\u003eIndeed that has been why the blog has been on hiatus for the past many months. I suppose we can consider this entry the last of the 20MinuteRants and the first one of a new series of writings\u0026mdash;\u003ca href=\"/posts/kbhfireside/\"\u003eFireside\u003c/a\u003e\u0026mdash;which I hope will continue for a long time.\u003c/p\u003e\n\u003ch2 id=\"impetus\"\u003eImpetus\u003c/h2\u003e\n\u003cp\u003eMy mentor D has always told me to start arguments with why. The larger system in which \u003ca href=\"/posts/kbhfireside/\"\u003eFireside\u003c/a\u003e is located, my knowlegebase system, has \u003ca href=\"/posts/kbhstarting_with_why_the_knowledgebase/\"\u003esuch a page\u003c/a\u003e arguing why its a good idea.\u003c/p\u003e\n\u003cp\u003eAnd here\u0026rsquo;s the why: starting university has made me surprisingly lost in terms of what I want to do. In that: there\u0026rsquo;s so much of the vicissitudes of daily life that I no longer have the same intellectual curiosity that I think I had during middle and high school. And, in doing this, I hope to get it back.\u003c/p\u003e\n\u003cp\u003eThis illustrates the key goal of \u003ca href=\"/posts/kbhfireside/\"\u003eFireside\u003c/a\u003e: a once+ per week posting, illustrating some new thing I\u0026rsquo;m aiming to learn for the week. It can even be in the classroom: but something I\u0026rsquo;m going above and beyond to try to understand. Each article will either plan something to learn, or summarize my learning in it.\u003c/p\u003e\n\u003cp\u003eI remember seeing an article through HN at one point (\u003ca href=\"https://addyosmani.com/blog/write-learn/\"\u003eits not this one, it was significantly less \u0026ldquo;ha ha! business\u0026rdquo; language, but this will do\u003c/a\u003e) that \u0026ldquo;You never really learn something until you write about it.\u0026rdquo; A dormmate of mine working on ray tracing also said something to the same effect. And, so, what the heck. Let\u0026rsquo;s try this out.\u003c/p\u003e\n\u003cp\u003eA part of me wishes that this fulfills the \u0026ldquo;deliverable\u0026rdquo; in my head of \u0026ldquo;YOU AREN\u0026rsquo;T DOING ENOUGH!\u0026rdquo; whenever I spend time wandering aimlessly to try to learn something. If it has become a \u003ca href=\"/posts/kbhfireside/\"\u003eFireside\u003c/a\u003e, it counts. I guess.\u003c/p\u003e\n\u003cp\u003eI should also add that \u003ca href=\"/posts/kbhfireside/\"\u003eFireside\u003c/a\u003e is named \u003ca href=\"/posts/kbhfireside/\"\u003eFireside\u003c/a\u003e because of FDR\u0026rsquo;s \u003ca href=\"/posts/kbhfireside_chats/\"\u003eFireside Chats\u003c/a\u003e, where he got to directly talk to people unfiltered about his views.\u003c/p\u003e\n\u003ch2 id=\"parameters\"\u003eParameters\u003c/h2\u003e\n\u003ch3 id=\"frequency\"\u003eFrequency\u003c/h3\u003e\n\u003cp\u003eAt a minimum once a week. No promises though.\u003c/p\u003e\n\u003ch3 id=\"names\"\u003eNames\u003c/h3\u003e\n\u003cp\u003eYou have already noticed one of the parameters. Unless it is a general concept or a well known thing I did, I won\u0026rsquo;t be using names throughout the articles. Somewhat contrarily, I firmly believe that the process of building shit is a very personal one. Hence, following the example of one of my favorite essayists \u003ca href=\"https://en.wikipedia.org/wiki/Zhu_Ziqing\"\u003eZhu Ziqing\u003c/a\u003e, I will be using the first letter of the name I refer to them to refer to all people mentioned. Of course, if you don\u0026rsquo;t want to be included, I\u0026rsquo;d be happy to pull things down.\u003c/p\u003e\n\u003ch3 id=\"themes\"\u003eThemes\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003etechnology in general\u003c/li\u003e\n\u003cli\u003erandom nerdism\u003c/li\u003e\n\u003cli\u003edeep learning and language models, methods and applications\u003c/li\u003e\n\u003cli\u003ethe shit I get up to\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"we-begin\"\u003eWe Begin\u003c/h2\u003e\n\u003cp\u003eI hope to begin this weekend. I\u0026rsquo;ve spent the last while trying to train a serious deep learning model (read: OpenAI Whisper Large V2), and dying because all I have access to is 2 32GB V-100s on the PSC (and yes, I point out that this is terribly privileged statement: woe is me with a cutting edge GPU).\u003c/p\u003e\n\u003cp\u003eHowever, the \u003ca href=\"/posts/kbhnlp/#language-model\"\u003eLanguage Model\u003c/a\u003e literally doesn\u0026rsquo;t fit in the damned box. So, I\u0026rsquo;m trying to learn about distributed training methods like Ray, methods of efficient tuning with LoRA, and new-fangled memory sharing things like DeepSpeed.\u003c/p\u003e\n\u003cp\u003eStay tuned.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfireside_article/","tags":["fireside"],"title":"Welcome to the Fireside"},{"categories":null,"contents":"Under Construction\n","html":"\u003cp\u003eUnder Construction\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhwho_s_talking_when/","tags":null,"title":"Who's Talking When?"},{"categories":null,"contents":"A study with the goal of identifying semantic primes.\n","html":"\u003cp\u003eA study with the goal of identifying \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhwhole_metalanguage_study/","tags":null,"title":"whole metalanguage study"},{"categories":null,"contents":"Why are Todo Lists (a.k.a. personal productivity systems) so hard to build well?\nI\u0026rsquo;m genuinely curious. I was listening to the last episode of Cortex, and one of the hosts (CGP Grey) brought up a similar point regarding personal productivity platforms. OmniFocus, the reigning champion of the industry for professionals looking for a deeply customized system, has been staggering in their ability to ship the next version of their application. Much of the market consists of various different packagings of the same offering. Grey\u0026rsquo;s thesis of these platforms essentially boils down to this:\nTodo Lists are very personal systems that requires deep customizations Yet, they need to stay very out of the way (\u0026ldquo;fit into the workflow\u0026rdquo;) of their user Plus, a point that I have noticed which was brought up briefly:\nits very easy to build a crappy one, and very hard to build a good one I particularly like Grey\u0026rsquo;s phrasing of point 3.: \u0026ldquo;there\u0026rsquo;s high desire market saturation, but no practical market saturation in the offering.\u0026rdquo; That is, \u0026ldquo;everyone has a good idea of what a good to-do list software is, and no one has been able to write one that generally works very well for \u0026ldquo;most\u0026rdquo; people.\nLearnings and Next Steps I tried once. Condution was an early experiment between me and a few of my friends to try to solve some of the issues we saw in the to-do list market at the time. There was some response within the community: within about 6 months, we got about 1000+ MAU, 10,000+ registered users; but I think there\u0026rsquo;s an undeniable sense in me that, while Condution solved the specific problem we saw in the market, it still doesn\u0026rsquo;t solve the fundamental issue that to-do list platforms have:\nCondution, like all other platforms, isn\u0026rsquo;t for everyone. And that is a problem. Peter Thiel gave this famous talk in 183B which outlines the fact that much of the illusion of \u0026ldquo;no competition\u0026rdquo; comes from the small companies trying desperatly to dominate a fiercely competitive market; to win, one truly has to dominate a market. The person productivity space is one which the battle for hearts and minds have created an exciting explosion of choices for the consumer, but no one system has yet emerged to engulf it all.\nThe feedback that I tracked from building Condution boils down to a few asks:\n\u0026ldquo;can we have this repeating task/date/filter behavior?\u0026rdquo; (yes, but it has to be a ticket for each one of these and god knows when it can happen) \u0026ldquo;can you be a calendar app with scheduling?\u0026rdquo; (aaaaaaaaaa so much API work; plus, scheduling is hard) \u0026ldquo;why is [this native feature] not supported?\u0026rdquo; (because we run a PWA wrapped in a WebView on mobile) \u0026ldquo;self hosting when?\u0026rdquo; / \u0026ldquo;API when?\u0026rdquo; (oh, we tried. alas) I think this boils down to a few things:\nusers want myriad behavior that we don\u0026rsquo;t possibly have time to implement ourselves users want their ability to process their own data (i.e. API, calendar, etc.), at their own pacing, without going through our server And, to be honest, I don\u0026rsquo;t think anyone on the market (barring lots of elisp code + Org mode, which really fails at \u0026ldquo;users want an experience that\u0026rsquo;s actually not limited to Desktop Emacs nuts like me\u0026rdquo;) achieves these two objectives.\nWe\u0026rsquo;ve Seen This Before Here\u0026rsquo;s the thing. This is not the world\u0026rsquo;s first rodeo to this problem. Visual Studio Code, the text editing sensation which today holds something like 75% market share, was released in 2015. Let\u0026rsquo;s examine two random blogs I found in 2013 writing about text editors:\nthis one and this one I\u0026rsquo;m of the humble opinion that Text Editors circa 2013 has the same exact set of problems as to-do lists now. Technology was mature enough for everyone to build one; the space is crowded enough that its was worth writing a listicle literally titled (\u0026ldquo;10 most fascinating text editors\u0026rdquo;); and each of us developers had a persnickety opinion about what our Text Editor/IDE should do: from editing text, to running code, to making coffee.\nAnd then, a disruptor.\nthat has easy to implement plugins and a web-based core which allows any and all users to customize their experience down to the tat and which makes the core experience of editing text and setting up autocomplete (bare bones basics) so easy that its inconceivable you would use anything else Our friend Visual Studio Code.\nAnd so, a proposal I don\u0026rsquo;t know what such a disruptor of the field would look like, but I\u0026rsquo;d like to try again. I would like to take a crack at building a to-do list app (YET AGAIN!) that solely focuses on these two tenants:\nultimate and intuitive extensibility by the user absolutly smooth down to a tat introductory (\u0026ldquo;basic\u0026rdquo;) experience, which is minimally opinionated but can accommodate as many modalities as possible I hope to do this while not compromising the platforms the product is available on, and the nativeness of this experience.\nIt\u0026rsquo;s a lot to ask, and its yet unclear what a solution may look like. But it never hurts to take a crack. If you want to help out with this probably open-source effort, reach out.\nand also, defer/start dates. We should have those.\n","html":"\u003cp\u003eWhy are \u003ca href=\"/posts/kbhtodo_lists/\"\u003eTodo Lists\u003c/a\u003e (a.k.a. personal productivity systems) so hard to build well?\u003c/p\u003e\n\u003cp\u003eI\u0026rsquo;m genuinely curious. I was listening to the last episode of \u003ca href=\"https://www.relay.fm/cortex/\"\u003eCortex\u003c/a\u003e, and one of the hosts (CGP Grey) brought up a similar point regarding personal productivity platforms. OmniFocus, the reigning champion of the industry for professionals looking for a deeply customized system, has been staggering in their ability to ship the next version of their application. Much of the market consists of various different packagings of the same offering. Grey\u0026rsquo;s thesis of these platforms essentially boils down to this:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtodo_lists/\"\u003eTodo Lists\u003c/a\u003e are very personal systems that requires deep customizations\u003c/li\u003e\n\u003cli\u003eYet, they need to stay very out of the way (\u0026ldquo;fit into the workflow\u0026rdquo;) of their user\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ePlus, a point that I have noticed which was brought up briefly:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eits very easy to build a crappy one, and very hard to build a good one\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eI particularly like Grey\u0026rsquo;s phrasing of point 3.: \u0026ldquo;there\u0026rsquo;s high desire market saturation, but no practical market saturation in the offering.\u0026rdquo; That is, \u0026ldquo;everyone has a good idea of what a good to-do list software is, and no one has been able to write one that generally works very well for \u0026ldquo;most\u0026rdquo; people.\u003c/p\u003e\n\u003ch2 id=\"learnings-and-next-steps\"\u003eLearnings and Next Steps\u003c/h2\u003e\n\u003cp\u003eI tried once. \u003ca href=\"https://www.condution.com/\"\u003eCondution\u003c/a\u003e was an early experiment between me and a few of my friends to try to solve some of the issues we saw in the to-do list market at the time. There was some response within the community: within about 6 months, we got about 1000+ MAU, 10,000+ registered users; but I think there\u0026rsquo;s an undeniable sense in me that, while Condution solved the specific problem we saw in the market, it still doesn\u0026rsquo;t solve the fundamental issue that to-do list platforms have:\u003c/p\u003e\n\u003cp\u003eCondution, like all other platforms, isn\u0026rsquo;t for everyone. And that is a problem. Peter Thiel gave this \u003ca href=\"https://www.youtube.com/watch?v=3Fx5Q8xGU8k\"\u003efamous talk\u003c/a\u003e in 183B which outlines the fact that much of the illusion of \u0026ldquo;no competition\u0026rdquo; comes from the small companies trying desperatly to dominate a fiercely competitive market; to win, one truly has to dominate a market. The person productivity space is one which the battle for hearts and minds have created an exciting explosion of choices for the consumer, but no one system has yet emerged to engulf it all.\u003c/p\u003e\n\u003cp\u003eThe feedback that I tracked from building Condution boils down to a few asks:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u0026ldquo;can we have this repeating task/date/filter behavior?\u0026rdquo; (yes, but it has to be a ticket for each one of these and god knows when it can happen)\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;can you be a calendar app with scheduling?\u0026rdquo; (aaaaaaaaaa so much API work; plus, scheduling is hard)\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;why is [this native feature] not supported?\u0026rdquo; (because we run a PWA wrapped in a WebView on mobile)\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;self hosting when?\u0026rdquo; / \u0026ldquo;API when?\u0026rdquo; (oh, we tried. alas)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eI think this boils down to a few things:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eusers want myriad behavior that we don\u0026rsquo;t possibly have time to implement ourselves\u003c/li\u003e\n\u003cli\u003eusers want their ability to process their own data (i.e. API, calendar, etc.), at their own pacing, without going through our server\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAnd, to be honest, I don\u0026rsquo;t think anyone on the market (barring lots of elisp code + Org mode, which really fails at \u0026ldquo;users want an experience that\u0026rsquo;s actually not limited to Desktop Emacs nuts like me\u0026rdquo;) achieves these two objectives.\u003c/p\u003e\n\u003ch2 id=\"we-ve-seen-this-before\"\u003eWe\u0026rsquo;ve Seen This Before\u003c/h2\u003e\n\u003cp\u003eHere\u0026rsquo;s the thing. This is not the world\u0026rsquo;s first rodeo to this problem. Visual Studio Code, the text editing sensation which today holds something like 75% market share, was released in 2015. Let\u0026rsquo;s examine two random blogs I found in 2013 writing about text editors:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"https://web.archive.org/web/20230509053757/https://www.theregister.com/2013/03/11/verity_stob_text_editor/\"\u003ethis one\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://web.archive.org/web/20230608151954/https://www.bloggersentral.com/2013/09/awesome-text-editors-for-web-developers.html\"\u003eand this one\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eI\u0026rsquo;m of the humble opinion that Text Editors circa 2013 has the same exact set of problems as to-do lists now. Technology was mature enough for everyone to build one; the space is crowded enough that its was worth writing a listicle literally titled (\u0026ldquo;10 most fascinating text editors\u0026rdquo;); and each of us developers had a persnickety opinion about what our Text Editor/IDE should do: from editing text, to running code, to making coffee.\u003c/p\u003e\n\u003cp\u003eAnd then, a disruptor.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ethat has easy to implement plugins and a web-based core which allows any and all users to customize their experience down to the tat\u003c/li\u003e\n\u003cli\u003eand which makes the core experience of editing text and setting up autocomplete (bare bones basics) so easy that its inconceivable you would use anything else\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eOur friend Visual Studio Code.\u003c/p\u003e\n\u003ch2 id=\"and-so-a-proposal\"\u003eAnd so, a proposal\u003c/h2\u003e\n\u003cp\u003eI don\u0026rsquo;t know what such a disruptor of the field would look like, but I\u0026rsquo;d like to try again. I would like to take a crack at building a to-do list app (YET AGAIN!) that solely focuses on these two tenants:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eultimate and intuitive extensibility by the user\u003c/li\u003e\n\u003cli\u003eabsolutly smooth down to a tat introductory (\u0026ldquo;basic\u0026rdquo;) experience, which is minimally opinionated but can accommodate as many modalities as possible\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eI hope to do this while not compromising the platforms the product is available on, and the nativeness of this experience.\u003c/p\u003e\n\u003cp\u003eIt\u0026rsquo;s a lot to ask, and its yet unclear what a solution may look like. But it never hurts to take a crack. If you want to help out with this probably open-source effort, reach out.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eand also, defer/start dates. We should have those.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtodo_lists/","tags":["writing","fireside"],"title":"Why is building a to-do list app so darn hard?"},{"categories":null,"contents":"linked files architecture for filesystem, but it caches the file links in memory when the OS is running.\nproblems data is still scattered across the disk we had to construct the file allocation table though its must faster because jumping to the middle of the file is now in memory, we are still doing O(n) search for a specific sub part ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhlinked_files/\"\u003elinked files\u003c/a\u003e architecture for \u003ca href=\"/posts/kbhfilesystem/\"\u003efilesystem\u003c/a\u003e, but it caches the file links in memory when the OS is running.\u003c/p\u003e\n\u003ch2 id=\"problems\"\u003eproblems\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003edata is \u003cstrong\u003estill\u003c/strong\u003e scattered across the disk\u003c/li\u003e\n\u003cli\u003ewe had to construct the file allocation table\u003c/li\u003e\n\u003cli\u003ethough its must faster because jumping to the middle of the file is now in memory, we are still doing O(n) search for a specific sub part\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhwindows_fat/","tags":null,"title":"Windows FAT"},{"categories":null,"contents":"Pay attention to:\ncases (all letters to lower case?) lemmatization This is often done with morphological parsing, for instance, you can try stemming.\n","html":"\u003cp\u003ePay attention to:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ecases (all letters to lower case?)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtokenization/\"\u003elemma\u003c/a\u003etization\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis is often done with \u003ca href=\"/posts/kbhmorphological_parsing/\"\u003emorphological parsing\u003c/a\u003e, for instance, you can try \u003ca href=\"/posts/kbhmorphological_parsing/#stemming\"\u003estemming\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhword_normalization/","tags":null,"title":"Word Normalization"},{"categories":null,"contents":"we will train a classifier on a binary prediction task: \u0026ldquo;is context words \\(c_{1:L}\\) likely to show up near some target word \\(W_0\\)?\u0026rdquo;\nWe estimate the probability that \\(w_{0}\\) occurs within this window based on the product of the probabilities of the similarity of the embeddings between each context word and the target word.\nTo turn cosine similarity dot products into probability, we squish the dot product via the sigmoid function.\nimportantly, we don\u0026rsquo;t actually use these results. we simply take the resulting embeddings.\nproperties window size smaller windows: captures more syntax level information large windows: capture more semantic field information parallelogram model simple way to solve analogies problems with vector semantics: get the difference between two word vectors, and add it somewhere else to get an analogous transformation.\nonly words for frequent words small distances but not quite for large systems allocational harm embeddings bake in existing biases, which leads to bias in hiring practices, etc.\nskip-gram with negative sampling skip-gram trains vectors separately for word being used as target and word being used as context.\nthe mechanism for training the embedding:\nselect some \\(k\\), which is the multiplier of the negative examples (if \\(k=2\\), ever one positive example will be matched with 2 negative examples) sample a target word, and generate positive samples paired by words in its immediate window sample window size times \\(k\\) negative examples, where the noise words are chosen explicitly as not being near our target word, and weighted based on unigram frequency for each paired training sample, we minimize the loss via cross entropy loss:\n\\begin{equation} L_{CE} = -\\qty[ \\log (\\sigma(c_{pos} \\cdot w)) + \\sum_{i=1}^{k} \\log \\sigma\\qty(-c_{neg} \\cdot w)] \\end{equation}\nrecall that:\n\\begin{equation} \\pdv{L_{CE}}{w} = \\qty[\\sigma(c_{pos} \\cdot w) -1]c_{pos} + \\sum_{i=1}^{k} \\qty[\\sigma(c_{neg_{i}}\\cdot w)]c_{neg_{i}} \\end{equation}\n","html":"\u003cp\u003ewe will train a classifier on a binary prediction task: \u0026ldquo;is context words \\(c_{1:L}\\) likely to show up near some target word \\(W_0\\)?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe estimate the probability that \\(w_{0}\\) occurs within this window based on the product of the probabilities of the similarity of the embeddings between each context word and the target word.\u003c/p\u003e\n\u003cp\u003eTo turn \u003ca href=\"/posts/kbhranked_information_retrieval/#cosine-similarity\"\u003ecosine similarity\u003c/a\u003e \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003es into probability, we squish the \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e via the \u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e function.\u003c/p\u003e\n\u003cp\u003eimportantly, we don\u0026rsquo;t actually use these results. we simply take the resulting embeddings.\u003c/p\u003e\n\u003ch2 id=\"properties\"\u003eproperties\u003c/h2\u003e\n\u003ch3 id=\"window-size\"\u003ewindow size\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003esmaller windows\u003c/strong\u003e: captures more syntax level information\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003elarge windows\u003c/strong\u003e: capture more semantic field information\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"parallelogram-model\"\u003eparallelogram model\u003c/h3\u003e\n\u003cp\u003esimple way to solve analogies problems with vector semantics: get the difference between two word vectors, and add it somewhere else to get an analogous transformation.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eonly words for frequent words\u003c/li\u003e\n\u003cli\u003esmall distances\u003c/li\u003e\n\u003cli\u003ebut not quite for large systems\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"allocational-harm\"\u003eallocational harm\u003c/h4\u003e\n\u003cp\u003eembeddings bake in existing biases, which leads to bias in hiring practices, etc.\u003c/p\u003e\n\u003ch2 id=\"skip-gram-with-negative-sampling\"\u003eskip-gram with negative sampling\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#skip-gram-with-negative-sampling\"\u003eskip-gram\u003c/a\u003e trains vectors separately for word being used as target and word being used as context.\u003c/p\u003e\n\u003cp\u003ethe mechanism for training the embedding:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eselect some \\(k\\), which is the multiplier of the negative examples (if \\(k=2\\), ever one positive example will be matched with 2 negative examples)\u003c/li\u003e\n\u003cli\u003esample a target word, and generate positive samples paired by words in its immediate window\u003c/li\u003e\n\u003cli\u003esample window size times \\(k\\) negative examples, where the noise words are chosen explicitly as not being near our target word, and weighted based on unigram frequency\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003efor each paired training sample, we minimize the loss via \u003ca href=\"/posts/kbhcross_entropy_loss/\"\u003ecross entropy loss\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL_{CE} = -\\qty[ \\log (\\sigma(c_{pos} \\cdot w)) + \\sum_{i=1}^{k} \\log \\sigma\\qty(-c_{neg} \\cdot w)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003erecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{L_{CE}}{w} = \\qty[\\sigma(c_{pos} \\cdot w) -1]c_{pos} + \\sum_{i=1}^{k} \\qty[\\sigma(c_{neg_{i}}\\cdot w)]c_{neg_{i}}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhword2vec/","tags":null,"title":"word2vec"},{"categories":null,"contents":"WPA is the largest relief program ever in the Great Depression New Deal, to promote public infrastructure and create artistic murals. It helped unskilled men to carry out public works infrastructure.\nThe project started 5/1935 and dissolved 6/1943.\n","html":"\u003cp\u003eWPA is the largest relief program ever in the \u003ca href=\"/posts/kbhgreat_depression/\"\u003eGreat Depression\u003c/a\u003e \u003ca href=\"/posts/kbhnew_deal/\"\u003eNew Deal\u003c/a\u003e, to promote public infrastructure and create artistic murals. It helped unskilled men to carry out public works infrastructure.\u003c/p\u003e\n\u003cp\u003eThe project started 5/1935 and dissolved 6/1943.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhwpa/","tags":null,"title":"Works Progress Administration"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhwriting_index/","tags":null,"title":"Writing Index"},{"categories":null,"contents":" vertibre backbone: 3 points to remember \u0026ldquo;we are in the business of looking for outliers\u0026rdquo; tarpit ides vision with world + good team iStudio Meeting Notes\n","html":"\u003cul\u003e\n\u003cli\u003evertibre backbone: 3 points to remember\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;we are in the business of looking for outliers\u0026rdquo;\u003c/li\u003e\n\u003cli\u003etarpit ides\u003c/li\u003e\n\u003cli\u003evision with world + good team\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhistudio_meeting_notes/\"\u003eiStudio Meeting Notes\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhycomb/","tags":null,"title":"ycomb"},{"categories":null,"contents":"Young\u0026rsquo;s Modulus is a mechanical property that measures the stiffness of a solid material.\nIt measures the ratio between mechanical stress \\(\\sigma\\) and the relative resulting strain \\(\\epsilon\\).\nAnd so, very simply:\n\\begin{equation} E = \\frac{\\sigma }{\\epsilon } \\end{equation}\nThinking about this, silly puddy deforms very easily given a little stress, so it would have low Young\u0026rsquo;s Modulus (\\(\\sigma \\ll \\epsilon\\)); and visa versa. https://aapt.scitation.org/doi/10.1119/1.17116?cookieSet=1\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhyoung_s_modulus/\"\u003eYoung\u0026rsquo;s Modulus\u003c/a\u003e is a mechanical property that measures the stiffness of a solid material.\u003c/p\u003e\n\u003cp\u003e\u003cimg src=\"/ox-hugo/2022-09-05_22-27-31_screenshot.png\" alt=\"\"\u003e\nIt measures the ratio between mechanical \u003ca href=\"/posts/kbhstress/\"\u003estress\u003c/a\u003e \\(\\sigma\\) and the relative resulting \u003ca href=\"/posts/kbhstrain/\"\u003estrain\u003c/a\u003e \\(\\epsilon\\).\u003c/p\u003e\n\u003cp\u003eAnd so, very simply:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE = \\frac{\\sigma }{\\epsilon }\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThinking about this, silly puddy deforms very easily given a little stress, so it would have \u003cem\u003elow\u003c/em\u003e \u003ca href=\"/posts/kbhyoung_s_modulus/\"\u003eYoung\u0026rsquo;s Modulus\u003c/a\u003e (\\(\\sigma \\ll \\epsilon\\)); and visa versa.\n\u003ca href=\"https://aapt.scitation.org/doi/10.1119/1.17116?cookieSet=1\"\u003ehttps://aapt.scitation.org/doi/10.1119/1.17116?cookieSet=1\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhyoung_s_modulus/","tags":null,"title":"Young's Modulus"},{"categories":null,"contents":"DOI: 10.3389/fcomp.2020.624488\nOne-Liner Used an ERNIE trained on transcripts for classification; inclusion of pause encoding made results better.\nNovelty Instead of just looking at actual speech content, look at pauses specific as a feature engineering task \\(89.6\\%\\) on the ADReSS Challenge dataset Notable Methods Applied FA with pause encoding with standard .cha semantics (short pauses, medium pauses, long pauses). Shoved all of this into an ERNIE.\nAssay for performance was LOO\nKey Figs Fig 1 This figure motivates the point that subjects with AD says oh and um more often; which prompted Table 1\nTable 1 Subjects with AD says uh a lot more often; no significance level calculations but ok.\nFigure 5 This figure is the result of a LOO study on the proposed model and presumably others before. X axis is the validation accuracy in question, Y is the density by which the score in X appears in an \\(N=35\\) LOO measurement.\nThis figure tells us that either way the ERNIE model is better than state of the art; furthermore, transcripts with pause encoding did better and did it better more of the time; that\u0026rsquo;s where the 89.6% came from.\nNew Concepts Leave-One-Out cross validation Notes Glorious.\n","html":"\u003cp\u003eDOI: 10.3389/fcomp.2020.624488\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eUsed an ERNIE trained on transcripts for classification; inclusion of pause encoding made results better.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eInstead of just looking at actual speech content, look at pauses specific as a feature engineering task\u003c/li\u003e\n\u003cli\u003e\\(89.6\\%\\) on the \u003ca href=\"/posts/kbhadress_challenge/\"\u003eADReSS Challenge\u003c/a\u003e dataset\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_20-45-47_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eApplied FA with pause encoding with standard \u003ccode\u003e.cha\u003c/code\u003e semantics (short pauses, medium pauses, long pauses). Shoved all of this into an ERNIE.\u003c/p\u003e\n\u003cp\u003eAssay for performance was \u003ca href=\"/posts/kbhloo/\"\u003eLOO\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"fig-1\"\u003eFig 1\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_20-43-43_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure motivates the point that subjects with AD says oh and um more often; which prompted Table 1\u003c/p\u003e\n\u003ch3 id=\"table-1\"\u003eTable 1\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_20-44-31_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSubjects with AD says uh a lot more often; no \u003ca href=\"/posts/kbhhypothesis_testing/#significance-level\"\u003esignificance level\u003c/a\u003e calculations but ok.\u003c/p\u003e\n\u003ch3 id=\"figure-5\"\u003eFigure 5\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_20-55-10_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure is the result of a \u003ca href=\"/posts/kbhloo/\"\u003eLOO\u003c/a\u003e study on the proposed model and presumably others before. X axis is the validation accuracy in question, Y is the density by which the score in X appears in an \\(N=35\\) \u003ca href=\"/posts/kbhloo/\"\u003eLOO\u003c/a\u003e measurement.\u003c/p\u003e\n\u003cp\u003eThis figure tells us that either way the ERNIE model is better than state of the art; furthermore, transcripts with pause encoding did better and did it better more of the time; that\u0026rsquo;s where the 89.6% came from.\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhloo/\"\u003eLeave-One-Out cross validation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n\u003cp\u003eGlorious.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_20-41-41_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhyuan_2021/","tags":["ntj"],"title":"Yuan 2021"},{"categories":null,"contents":"A z-test is a hypothesis test for statistical significance between two sample proportions. Before it can be conducted, it must meet the conditions for inference for a z-test.\nconditions for inference (z-test) has to be random has to be reasonably normal (vis a vi test for normality) each sample has to be independent (or 10% rule) use a z-statistic to find p-value Given a sample proportion, calculate the sample proportion standard deviation (given on the formula sheet) Then, divide the difference between measured and null proportions to figure \\(z\\) that is,\n\\begin{equation} z = \\frac{\\hat{p}-p_0}{\\sqrt{\\frac{p_0(1-p_0)}{n}}} \\end{equation}\nLook up the probability of \\(z\\) taking place on a \\(z\\) table. Then, \\(1-z\\) would yield the \\(p\\) vaule.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhz_test/\"\u003ez-test\u003c/a\u003e is a \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis test\u003c/a\u003e for statistical significance between two sample proportions. Before it can be conducted, it must meet the \u003ca href=\"#conditions-for-inference--z-test\"\u003econditions for inference\u003c/a\u003e for a z-test.\u003c/p\u003e\n\u003ch2 id=\"conditions-for-inference--z-test\"\u003econditions for inference (z-test)\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ehas to be random\u003c/li\u003e\n\u003cli\u003ehas to be reasonably normal (vis a vi \u003ca href=\"/posts/kbhtest_for_normality/\"\u003etest for normality\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003eeach sample has to be independent (or 10% rule)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"use-a-z-statistic-to-find-p-value\"\u003euse a z-statistic to find p-value\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eGiven a sample proportion, calculate the sample proportion standard deviation (given on the formula sheet)\u003c/li\u003e\n\u003cli\u003eThen, divide the difference between measured and null proportions to figure \\(z\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ethat is,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nz = \\frac{\\hat{p}-p_0}{\\sqrt{\\frac{p_0(1-p_0)}{n}}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLook up the probability of \\(z\\) taking place on a \\(z\\) table. Then, \\(1-z\\) would yield the \\(p\\) vaule.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhz_test/","tags":null,"title":"z-test"},{"categories":null,"contents":"\\(0\\) is a list of length \\(n\\) whose coordinates are all zero\nFormally\u0026mdash;\n\\begin{equation} 0 = (0,\\ldots,0) \\end{equation}\n","html":"\u003cp\u003e\\(0\\) is a \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e of length \\(n\\) whose coordinates are all zero\u003c/p\u003e\n\u003cp\u003eFormally\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = (0,\\ldots,0)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhzero/","tags":null,"title":"zero"},{"categories":null,"contents":"A Zero-Sum Game happens during the following situation:\nWe have two distributions \\(X\\) and \\(Y\\). A \u0026ldquo;Zero-Sum Game\u0026rdquo; is a case where:\n\\begin{equation} P(success) = P(Y \u0026gt; X) \\end{equation}\n(because \\(Y\u0026gt;X \\implies X\u0026lt; Y\\), so there\u0026rsquo;s no case whereby a situation can both \u0026ldquo;cause success\u0026rdquo;).\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhzero_sum_game/\"\u003eZero-Sum Game\u003c/a\u003e happens during the following situation:\u003c/p\u003e\n\u003cp\u003eWe have two distributions \\(X\\) and \\(Y\\). A \u0026ldquo;\u003ca href=\"/posts/kbhzero_sum_game/\"\u003eZero-Sum Game\u003c/a\u003e\u0026rdquo; is a case where:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(success) = P(Y \u0026gt; X)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(because \\(Y\u0026gt;X \\implies X\u0026lt; Y\\), so there\u0026rsquo;s no case whereby a situation can both \u0026ldquo;cause success\u0026rdquo;).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhzero_sum_game/","tags":null,"title":"Zero-Sum Game"},{"categories":null,"contents":"A zettlekasten is an atomic notetaking system.\nSteps:\nLit notes brief: \u0026lt; 3 sentences write it in your own words Reference notes Take reference notes? Fleeting Notes shower notes Permanent nodes Go through each notes from above, think about how it matters to your research Try to explicitly add value to existing ideas Try to find meaningful connection between ideas finding connections How does this fit into what I know? Can this be explained? find keywords not just to store a note, but how to retrieve it \u0026ldquo;in which circumstance will I need this note\u0026rdquo; \u0026ldquo;when and how will I need this idea\u0026rdquo; ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhzettlekasten/\"\u003ezettlekasten\u003c/a\u003e is an atomic notetaking system.\u003c/p\u003e\n\u003cp\u003eSteps:\u003c/p\u003e\n\u003ch2 id=\"lit-notes\"\u003eLit notes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ebrief: \u0026lt; 3 sentences\u003c/li\u003e\n\u003cli\u003ewrite it in your own words\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"reference-notes\"\u003eReference notes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eTake reference notes?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"fleeting-notes\"\u003eFleeting Notes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eshower notes\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"permanent-nodes\"\u003ePermanent nodes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eGo through each notes from above, think about how it matters to your research\u003c/li\u003e\n\u003cli\u003eTry to explicitly add value to existing ideas\u003c/li\u003e\n\u003cli\u003eTry to find meaningful connection between ideas\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"finding-connections\"\u003efinding connections\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eHow does this fit into what I know?\u003c/li\u003e\n\u003cli\u003eCan this be explained?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"find-keywords\"\u003efind keywords\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003enot just to store a note, but how to retrieve it\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;in which circumstance will I need this note\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;when and how will I need this idea\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhzettlekasten/","tags":null,"title":"zettlekasten"},{"categories":null,"contents":"a zettlekasten index is an index in a zettlekasten file format; it keeps track of all lists of notes. Head to Index Index for an index of indexes in this particular zettlekasten.\n","html":"\u003cp\u003ea \u003ca href=\"/posts/kbhzettlekasten_index/\"\u003ezettlekasten index\u003c/a\u003e is an index in a \u003ca href=\"/posts/kbhzettlekasten/\"\u003ezettlekasten\u003c/a\u003e file format; it keeps track of all lists of notes. Head to \u003ca href=\"/posts/kbhindex_index/\"\u003eIndex Index\u003c/a\u003e for an \u003ca href=\"/posts/kbhzettlekasten_index/\"\u003eindex\u003c/a\u003e of \u003ca href=\"/posts/kbhzettlekasten_index/\"\u003eindex\u003c/a\u003ees in this particular \u003ca href=\"/posts/kbhzettlekasten/\"\u003ezettlekasten.\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhzettlekasten_index/","tags":null,"title":"zettlekasten index"},{"categories":null,"contents":"DOI: 10.3389/fcomp.2021.624683\nOne-Liner late fusion of multimodal signal on the CTP task using transformers, mobilnet, yamnet, and mockingjay\nNovelty Similar to Martinc 2021 and Shah 2021 but actually used the the current Neural-Network state of the art Used late fusion again after the base model training Proposed that inconsistency in the diagnoses of MMSE scores could be a great contributing factor to multi-task learning performance hindrance Notable Methods Proposed base model for transfer learning from text based on MobileNet (image), YAMNet (audio), Mockingjay (speech) and BERT (text) Data all sourced from recording/transcribing/recognizing CTP task Key Figs Figure 3 and 4 This figure tells us the late fusion architecture used\nTable 2 Pre-training with an existing dataset had (not statistically quantified) improvement against a randomly seeded model.\nTable 3 Concat/Add fusion methods between audio and text provided even better results; confirms Martinc 2021 on newer data\n","html":"\u003cp\u003eDOI: 10.3389/fcomp.2021.624683\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e of multimodal signal on the \u003ca href=\"/posts/kbhctp/\"\u003eCTP\u003c/a\u003e task using transformers, mobilnet, yamnet, and mockingjay\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eSimilar to \u003ca href=\"/posts/kbhmartinc_2021/\"\u003eMartinc 2021\u003c/a\u003e and \u003ca href=\"/posts/kbhshah_2021/\"\u003eShah 2021\u003c/a\u003e but actually used the the current Neural-Network state of the art\u003c/li\u003e\n\u003cli\u003eUsed \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e again after the base model training\u003c/li\u003e\n\u003cli\u003eProposed that inconsistency in the diagnoses of \u003ca href=\"/posts/kbhmmse/\"\u003eMMSE\u003c/a\u003e scores could be a great contributing factor to multi-task learning performance hindrance\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eProposed base model for transfer learning from text based on MobileNet (image), YAMNet (audio), Mockingjay (speech) and BERT (text)\u003c/li\u003e\n\u003cli\u003eData all sourced from recording/transcribing/recognizing \u003ca href=\"/posts/kbhctp/\"\u003eCTP\u003c/a\u003e task\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"figure-3-and-4\"\u003eFigure 3 and 4\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_10-54-21_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure tells us the \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e architecture used\u003c/p\u003e\n\u003ch3 id=\"table-2\"\u003eTable 2\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_10-55-53_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ePre-training with an existing dataset had (not statistically quantified) improvement against a randomly seeded model.\u003c/p\u003e\n\u003ch3 id=\"table-3\"\u003eTable 3\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_10-56-22_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eConcat/Add fusion methods between audio and text provided even better results; confirms \u003ca href=\"/posts/kbhmartinc_2021/\"\u003eMartinc 2021\u003c/a\u003e on newer data\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhzhu_2021/","tags":["ntj"],"title":"Zhu 2021"},{"categories":null,"contents":"Zinc is toxic in excess; so to manage Zinc is important.\nregulating zinc uptake\n","html":"\u003cp\u003eZinc is toxic in excess; so to manage Zinc is important.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhregulating_zinc_uptake/\"\u003eregulating zinc uptake\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhzinc_abc_transporters/","tags":null,"title":"Zinc ABC Transporters"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhgaussian_mixture_model/","tags":null,"title":"zzzzzz"}] \ No newline at end of file +[{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhassembly/","tags":null,"title":""},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhistudio_meeting_nodes/","tags":null,"title":""},{"categories":null,"contents":"Separated qubits don\u0026rsquo;t really like to interact. Instead, then, we just make them bigger and control them at the same time. We can implement gates via a sequence of pulses. If you work with interacting qubits a lot, you will end up with the APR Paradox.\nIf you take two qubits, and move them though two gates, you essentially will get entangled results.\nTo make this works, you will need to take some probability. Know correlation, expectation, etc.\n","html":"\u003cp\u003eSeparated qubits don\u0026rsquo;t really like to interact. Instead, then, we just make them bigger and control them at the same time. We can implement gates via a sequence of pulses. If you work with interacting \u003ca href=\"/posts/kbhqubits/\"\u003equbit\u003c/a\u003es a lot, you will end up with the \u003ca href=\"/posts/kbhapr_paradox/\"\u003eAPR Paradox\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIf you take two \u003ca href=\"/posts/kbhqubits/\"\u003equbit\u003c/a\u003es, and move them though two gates, you essentially will get \u003ca href=\"/posts/kbhentangled/\"\u003eentangled\u003c/a\u003e results.\u003c/p\u003e\n\u003cp\u003eTo make this works, you will need to take some probability. Know \u003ca href=\"/posts/kbhcorrelation/\"\u003ecorrelation\u003c/a\u003e, \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e, etc.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmaking_qubits_interact/","tags":null,"title":""},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpoint_estimate/","tags":null,"title":""},{"categories":null,"contents":" \\(A\\) does all the asking, \\(B\\) has all the decision making power Population \\(A\\)\u0026rsquo;s match never goes up at best, they stay the same Population \\(B\\)\u0026rsquo;s match can never go down. At worse, they stay the same. Population \\(A\\) always ends up with the highest-preferred person in their realm of possibility Population \\(B\\) always ends up with the lowest-preferred person in their realm of possibility ","html":"\u003cul\u003e\n\u003cli\u003e\\(A\\) does all the asking, \\(B\\) has all the decision making power\u003c/li\u003e\n\u003cli\u003ePopulation \\(A\\)\u0026rsquo;s match \u003cem\u003enever\u003c/em\u003e goes up at best, they stay the same\u003c/li\u003e\n\u003cli\u003ePopulation \\(B\\)\u0026rsquo;s match can \u003cem\u003enever\u003c/em\u003e go down. At worse, they stay the same.\u003c/li\u003e\n\u003cli\u003ePopulation \\(A\\) always ends up with the highest-preferred person in their realm of possibility\u003c/li\u003e\n\u003cli\u003ePopulation \\(B\\) always ends up with the lowest-preferred person in their realm of possibility\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproperties_of_the_stable_matching_algorithm/","tags":null,"title":""},{"categories":null,"contents":" \u0026ldquo;Are the nodes system independent of the class system?\u0026rdquo; Does the model require a set of L2 class? Can we build the model to take advantage of as many 10* things as possible? A preso Demo of a kid moving through MVP vis a vis advantage over just taking all classes Naming skills that would go on the graph Figuring: comparability with flattening like in a L1 system ","html":"\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Are the nodes system independent of the class system?\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eDoes the model require a set of L2 class?\n\u003cul\u003e\n\u003cli\u003eCan we build the model to take advantage of as many 10* things as possible?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eA preso\n\u003cul\u003e\n\u003cli\u003eDemo of a kid moving through MVP vis a vis advantage over just taking all classes\u003c/li\u003e\n\u003cli\u003eNaming skills that would go on the graph\u003c/li\u003e\n\u003cli\u003eFiguring: comparability with flattening like in a L1 system\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrnn_notes/","tags":null,"title":""},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhrural_hospitals_problem/","tags":null,"title":""},{"categories":null,"contents":"The Stable Matching Problem is Wes Chao\u0026rsquo;s favourite algorithm.\nConsider two populations, \\(A\\) and \\(B\\), who want to form paired relationships between a person \\(A\\) and \\(B\\). \\(A_i\\) has a list of their ranked order matches (I want to be paired with \\(B_1\\) most, \\(B_4\\) second, etc.), and so does \\(B_i\\) (I want to be paired with \\(A_4\\) most \\(A_9\\) second, etc.)\nWe want to discover a stable matching, where pairs are most unwilling to move. We can solve it using the stable matching algorithm.\nNueva Invention Studio speed-dating noises?\napplications of the stable matching problem Dating Applying to college Both of these are high-stress situations, especially if you are doing asking You can mathematically prove that person doing the asking gets the best result Hence, it shows us that the best possible outcomes go to the people who are willing to ask and get rejected.\nextensions to the stable matching problem the stable matching problem can be extended to the rural hospitals problem, which is slightly better.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhstable_matching_problem/\"\u003eStable Matching Problem\u003c/a\u003e is \u003ca href=\"\"\u003eWes Chao\u003c/a\u003e\u0026rsquo;s favourite algorithm.\u003c/p\u003e\n\u003cp\u003eConsider two populations, \\(A\\) and \\(B\\), who want to form paired relationships between a person \\(A\\) and \\(B\\). \\(A_i\\) has a list of their ranked order matches (I want to be paired with \\(B_1\\) most, \\(B_4\\) second, etc.), and so does \\(B_i\\) (I want to be paired with \\(A_4\\) most \\(A_9\\) second, etc.)\u003c/p\u003e\n\u003cp\u003eWe want to discover a \u003ca href=\"\"\u003estable matching\u003c/a\u003e, where pairs are most unwilling to move. We can solve it using the \u003ca href=\"\"\u003estable matching algorithm\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"\"\u003eNueva Invention Studio\u003c/a\u003e speed-dating noises?\u003c/p\u003e\n\u003ch2 id=\"applications-of-the-stable-matching-problem--kbhstable-matching-problem-dot-md\"\u003eapplications of the \u003ca href=\"/posts/kbhstable_matching_problem/\"\u003estable matching problem\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDating\u003c/li\u003e\n\u003cli\u003eApplying to college\u003c/li\u003e\n\u003cli\u003eBoth of these are high-stress situations, especially if you are doing asking\u003c/li\u003e\n\u003cli\u003eYou can mathematically prove that \u003cem\u003eperson doing the asking gets the best result\u003c/em\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eHence, it shows us that the \u003cstrong\u003e\u003cstrong\u003ebest possible outcomes go to the people who are willing to ask and get rejected.\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"extensions-to-the-stable-matching-problem\"\u003eextensions to the stable matching problem\u003c/h2\u003e\n\u003cp\u003ethe \u003ca href=\"/posts/kbhstable_matching_problem/\"\u003estable matching problem\u003c/a\u003e can be extended to the \u003ca href=\"/posts/kbhrural_hospitals_problem/\"\u003erural hospitals problem\u003c/a\u003e, which is slightly better.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstable_matching_problem/","tags":null,"title":""},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhz_score/","tags":null,"title":""},{"categories":null,"contents":"\\begin{align} v+(-1)v \u0026amp;= (1+(-1))v \\\\ \u0026amp;= 0v \\\\ \u0026amp;= 0 \\end{align}\nAs \\((-1)v=0\\), \\((-1)v\\) is the additive identity of \\(v\\) which we defined as \\(-v\\) \\(\\blacksquare\\).\n","html":"\u003cp\u003e\\begin{align}\nv+(-1)v \u0026amp;= (1+(-1))v \\\\\n\u0026amp;= 0v \\\\\n\u0026amp;= 0\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAs \\((-1)v=0\\), \\((-1)v\\) is the \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e of \\(v\\) which we defined as \\(-v\\) \\(\\blacksquare\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbh1v_1/","tags":null,"title":"-1v=-v"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhq/","tags":null,"title":":q"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhw/","tags":null,"title":":w"},{"categories":null,"contents":"\\begin{align} 0v \u0026amp;= (0+0)v \\\\ \u0026amp;= 0v+0v \\end{align}\nGiven scalar multiplication is closed, \\(0v \\in V\\), which means \\(\\exists -0v:0v+(-0v)=0\\). Applying that to both sides:\n\\begin{equation} 0 = 0v\\ \\blacksquare \\end{equation}\nThe opposite proof of \\(\\lambda 0=0\\) but vectors work the same exact way.\n","html":"\u003cp\u003e\\begin{align}\n0v \u0026amp;= (0+0)v \\\\\n\u0026amp;= 0v+0v\n\\end{align}\u003c/p\u003e\n\u003cp\u003eGiven \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e is \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e, \\(0v \\in V\\), which means \\(\\exists -0v:0v+(-0v)=0\\). Applying that to both sides:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = 0v\\ \\blacksquare\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe opposite proof of \\(\\lambda 0=0\\) but vectors work the same exact way.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhzero_times_vector/","tags":null,"title":"0v=0"},{"categories":null,"contents":"eigenvalue is the scalar needed to scale the basis element of a one dimensional invariant subspace of a Linear Map to represent the behavior of the map:\n\\begin{equation} Tv = \\lambda v \\end{equation}\nNote we require \\(v \\neq 0\\) because otherwise all scalars count.\neigenvector is a vector that forms the basis list of length 1 of that 1-D invariant subspace under \\(T\\).\n\u0026ldquo;operators own eigenvalues, eigenvalues own eigenvectors\u0026rdquo;\nWhy is eigenvalue consistent per eigenvector? Because a linear map has to act on the same way to something\u0026rsquo;s basis as it does to the whole space.\nMotivation Take some subspace \\(U \\subset V\\):\n\\begin{equation} U = \\{\\lambda v\\ |\\ \\lambda \\in \\mathbb{F}, v \\in V\\} = span(v) \\end{equation}\nNow, if \\(T|_{U}\\) is an operator on \\(U\\), \\(U\\) would be an invariant subspace of \\(T\\) of dimension 1 (its basis being the list \\(\\{v\\}\\)).\nTherefore, for some vector \\(v \\in U\\) (basically like various scalings of \\(v\\)), \\(T\\) will always send back to \\(U\\) so we can represent it yet again with another scalar on \\(v\\), like \\(\\lambda v\\).\nIn this case, then, we can write that:\n\\begin{equation} Tv = \\lambda v \\end{equation}\nAnd then the usual definition of eigenvalues persist.\nconstituents linear map \\(T \\in \\mathcal{L}(V)\\) vector \\(v \\in V\\), such that \\(v \\neq 0\\) scalar \\(\\lambda \\in \\mathbb{F}\\) requirements If there exists \\(v \\in V\\) such that \\(v\\neq 0\\) and:\n\\begin{equation} Tv = \\lambda v \\end{equation}\nthen, \\(\\lambda\\) is called an eigenvalue, and \\(v\\) the eigenvector.\nadditional information properties of eigenvalues Suppose \\(V\\) in finite-dimensional, \\(T \\in \\mathcal{L}(V)\\) and \\(\\lambda \\in \\mathbb{F}\\), then:\n\\(\\lambda\\) is an eigenvalue of \\(T\\) \\(T - \\lambda I\\) is not injective \\(T - \\lambda I\\) is not surjective \\(T - \\lambda I\\) is not invertable Showing one shows all.\nProof:\n\\(1 \\implies 2\\) Suppose \\(\\lambda\\) is an eigenvalue of \\(T\\). Then, we have some \\(v \\in V\\) such that:\n\\begin{equation} Tv = \\lambda v \\end{equation}\nNow:\n\\begin{align} \u0026amp;Tv = \\lambda v \\\\ \\Rightarrow\\ \u0026amp; Tv - \\lambda v = 0 \\\\ \\Rightarrow\\ \u0026amp; Tv - \\lambda Iv = 0 \\\\ \\Rightarrow\\ \u0026amp; (T-\\lambda I)v = 0 \\end{align}\nthe last step by \\((T+S)v = Tv+Sv\\), the property of the vector space of \\(\\mathcal{L}(V)\\) (or any \\(\\mathcal{L}\\)).\nAnd therefore, \\(v \\in null\\ (T-\\lambda I)\\), and \\(v\\neq 0\\). And so \\(null\\ (T-\\lambda I) \\neq \\{0\\}\\) and so \\(T-\\lambda I\\) is not injective, as desired.\nThe reverse of this result shows the opposite direction that \\(1 \\implies 2\\).\nThe others \\(I \\in \\mathcal{L}(V)\\), \\(T \\in \\mathcal{L}(V)\\), \\(\\mathcal{L}(V)\\) is closed, so \\((T - \\lambda I) \\in \\mathcal{L}(V)\\), and so it is an operator. Having 2) implies all other conditions of non-injectivity, non-surjectivity, non-invertiblility by injectivity is surjectivity in finite-dimensional operators\nlist of eigenvectors are linearly independent Let \\(T \\in \\mathcal{L}(V)\\), suppose \\(\\lambda_{j}\\) are distinct eigenvalues of \\(T\\), and \\(v_1, \\ldots, v_{m}\\) the corresponding eigenvectors, then \\(v_1, \\ldots, v_{m}\\) is linearly independent.\nproof:\nWe will show this by contradiction. Suppose \\(v_1, \\ldots, v_{m}\\) are linearly dependent; then, by the Linear Dependence Lemma, \\(\\exists v_{j}\\) such that:\n\\begin{equation} v_{j} \\in span(v_1, \\dots, v_{j-1}) \\end{equation}\nMeaning:\n\\begin{equation} v_{j} = a_1v_1 + \\dots + a_{j-1}v_{j-1} \\end{equation}\nGiven the list is a list of eigenvalues, we can apply \\(T\\) to both sides to get:\n\\begin{equation} \\lambda_{j}v_{j} = a_1\\lambda_{1}v_1 + \\dots + a_{j-1}\\lambda_{j-1}v_{j-1} \\end{equation}\nWe can also get another definition for \\(\\lambda_{j} v_{j}\\) by simply multiplying the definition for \\(v_{j}\\) above by \\(\\lambda_{j}\\):\n\\begin{align} \u0026amp;v_{j} = a_1v_1 + \\dots + a_{j-1}v_{j-1}\\ \\text{from above} \\\\ \\Rightarrow\\ \u0026amp; \\lambda_{j} v_{j} = a_1\\lambda_{j}v_1 + \\dots + a_{j-1}\\lambda_{j}v_{j-1} \\end{align}\nNow, subtracting our two definitions of \\(\\lambda_{j} v_{j}\\), we get:\n\\begin{equation} 0 = a_1 (\\lambda_{j} - \\lambda_{1})v_{1} + \\dots +a_{j-1} (\\lambda_{j} - \\lambda_{j-1})v_{j-1} \\end{equation}\nRecall now that the eigenvalue list \\(\\lambda_{j}\\) are distinct. This means all \\(\\lambda_{j} - \\lambda_{k \\neq j} \\neq 0\\). No \\(v_{j} =0\\); so if we choose the smallest positive integer for \\(j\\), the list before it \\(v_1, \\dots, v_{j-1}\\) is linearly independent (as no value in that list would satisfy the Linear Dependence Lemma). This makes \\(a_{j} =\\dots =a_{j-1} = 0\\).\nAnd yet, substituting this back into the expression for \\(v_{j}\\), we have \\(v_{j} = 0\\), reaching contradiction. So therefore, the list of eigenvectors are linearly independent. \\(\\blacksquare\\)\noperators on finite dimensional V has at most dim V eigenvalues As a corollary of the above result, suppose \\(V\\) is finite dimensional; then, each operator on \\(V\\) has at most \\(dim\\ V\\) distinct eigenvalues because their eigenvectors form an linearly independent list and length of linearly-independent list \\(\\leq\\) length of spanning list.\neigenspaces are disjoint the eigenspaces of a Linear Map form a direct sum:\nproof:\nCorollary of result above. Because eigenvectors (i.e. bases) from distinct eigenspaces are linearly independent. So the only way to write \\(0\\) is by taking each to \\(0\\). So by taking the bases all to \\(0\\), you take the \\(0\\) vector from each space, which shows that the eigenspaces are a direct sum. \\(\\blacksquare\\)\nfinding eigenvalues with actual numbers \\begin{equation} \\lambda_{j} \\in Spec(T) \\Rightarrow det(\\lambda_{j}I-T) = 0 \\end{equation}\nThe right polynomial \\(det(\\lambda_{j} I-T) = 0\\) is named the \u0026ldquo;characteristic polynomial.\u0026rdquo;\nnatural choordinates of a map Given the eigenvectors \\((x+,y+), (x-,y-)\\), we can change coordinates of your matrix into the natural choordinates.\n\\begin{equation} A = \\begin{pmatrix} x+ \u0026amp; x- \\\\y+ \u0026amp; y- \\end{pmatrix} \\begin{pmatrix} \\lambda+ \u0026amp; 0 \\\\ 0 \u0026amp; \\lambda- \\end{pmatrix} \\begin{pmatrix} x+ \u0026amp; x- \\\\y+ \u0026amp; y- \\end{pmatrix}^{-1} \\end{equation}\nThis makes scaling matricides much much easier. If you think about multiplying the above matrix \\(n\\) times, the inverse and non-inverse cancells out.\nsimilar matrices Let \\(A,B\\) be defined:\n\\begin{equation} A = C B C^{-1} \\end{equation}\nand of course:\n\\begin{equation} B = C^{-1} B C \\end{equation}\nwhere, \\(A,B,C \\in \\mathcal{L}(V)\\)\n\\(A, B\\) has the same eigenvalues.\ninvertable matricies Let \\(T \\in \\mathcal{L}(V)\\) be invertable. If \\(\\lambda\\) is an eigenvalue of \\(T\\), then \\(\\frac{1}{\\lambda}\\) is an eigenvalue of \\(T\\). Furthermore, \\(T\\) and \\(T^{-1}\\) share eigenvectors with eigenvalues \\(\\lambda\\) and \\(\\frac{1}{\\lambda}\\)\nsymmetric matricies have a real basis of eigenvalues this falls out of the real spectral theorem.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e is the scalar needed to scale the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e element of a one \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003eal \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e of a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e to represent the behavior of the map:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv = \\lambda v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNote we require \\(v \\neq 0\\) because otherwise all scalars count.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e is a \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e that forms the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e list of length 1 of that 1-D \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e under \\(T\\).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;\u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003es own \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es, \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es own \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWhy is \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e consistent per \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e? Because a linear map has to act on the same way to something\u0026rsquo;s basis as it does to the whole space.\u003c/p\u003e\n\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003eTake some subspace \\(U \\subset V\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU = \\{\\lambda v\\ |\\ \\lambda \\in \\mathbb{F}, v \\in V\\} = span(v)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, if \\(T|_{U}\\) is an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e on \\(U\\), \\(U\\) would be an \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e of \\(T\\) of dimension 1 (its \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e being the list \\(\\{v\\}\\)).\u003c/p\u003e\n\u003cp\u003eTherefore, for some vector \\(v \\in U\\) (basically like various scalings of \\(v\\)), \\(T\\) will always send back to \\(U\\) so we can represent it yet again with another scalar on \\(v\\), like \\(\\lambda v\\).\u003c/p\u003e\n\u003cp\u003eIn this case, then, we can write that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv = \\lambda v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd then the usual definition of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es persist.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003elinear map \\(T \\in \\mathcal{L}(V)\\)\u003c/li\u003e\n\u003cli\u003evector \\(v \\in V\\), such that \\(v \\neq 0\\)\u003c/li\u003e\n\u003cli\u003escalar \\(\\lambda \\in \\mathbb{F}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eIf there exists \\(v \\in V\\) such that \\(v\\neq 0\\) and:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv = \\lambda v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethen, \\(\\lambda\\) is called an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e, and \\(v\\) the \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"properties-of-eigenvalue--kbheigenvalue-dot-md--s\"\u003eproperties of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es\u003c/h3\u003e\n\u003cp\u003eSuppose \\(V\\) in \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e, \\(T \\in \\mathcal{L}(V)\\) and \\(\\lambda \\in \\mathbb{F}\\), then:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\\(\\lambda\\) is an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\)\u003c/li\u003e\n\u003cli\u003e\\(T - \\lambda I\\) is not \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(T - \\lambda I\\) is not \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(T - \\lambda I\\) is not \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eShowing one shows all.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003ch4 id=\"1-implies-2\"\u003e\\(1 \\implies 2\\)\u003c/h4\u003e\n\u003cp\u003eSuppose \\(\\lambda\\) is an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\). Then, we have some \\(v \\in V\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv = \\lambda v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;Tv = \\lambda v \\\\\n\\Rightarrow\\ \u0026amp; Tv - \\lambda v = 0 \\\\\n\\Rightarrow\\ \u0026amp; Tv - \\lambda Iv = 0 \\\\\n\\Rightarrow\\ \u0026amp; (T-\\lambda I)v = 0\n\\end{align}\u003c/p\u003e\n\u003cp\u003ethe last step by \\((T+S)v = Tv+Sv\\), the property of the vector space of \\(\\mathcal{L}(V)\\) (or any \\(\\mathcal{L}\\)).\u003c/p\u003e\n\u003cp\u003eAnd therefore, \\(v \\in null\\ (T-\\lambda I)\\), and \\(v\\neq 0\\). And so \\(null\\ (T-\\lambda I) \\neq \\{0\\}\\) and so \\(T-\\lambda I\\) is not \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e, as desired.\u003c/p\u003e\n\u003cp\u003eThe reverse of this result shows the opposite direction that \\(1 \\implies 2\\).\u003c/p\u003e\n\u003ch4 id=\"the-others\"\u003eThe others\u003c/h4\u003e\n\u003cp\u003e\\(I \\in \\mathcal{L}(V)\\), \\(T \\in \\mathcal{L}(V)\\), \\(\\mathcal{L}(V)\\) is \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e, so \\((T - \\lambda I) \\in \\mathcal{L}(V)\\), and so it is an operator. Having 2) implies all other conditions of non-injectivity, non-surjectivity, non-invertiblility by \u003ca href=\"/posts/kbhoperator/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-is-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-in-id-4ed27ed5-4edc-4ef4-afd7-9b8e3bcd9b96-finite-dimensional-id-36e84a46-76f1-481e-b031-8ab2f0da0aa8-operator-s\"\u003einjectivity is surjectivity in finite-dimensional operators\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"list-of-eigenvectors-are-linearly-independent--kbhlinear-independence-dot-md\"\u003elist of eigenvectors are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eLet \\(T \\in \\mathcal{L}(V)\\), suppose \\(\\lambda_{j}\\) are distinct \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es of \\(T\\), and \\(v_1, \\ldots, v_{m}\\) the corresponding \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es, then \\(v_1, \\ldots, v_{m}\\) is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eproof:\u003c/p\u003e\n\u003cp\u003eWe will show this by contradiction. Suppose \\(v_1, \\ldots, v_{m}\\) are \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e; then, by the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e, \\(\\exists v_{j}\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv_{j} \\in span(v_1, \\dots, v_{j-1})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv_{j} = a_1v_1 + \\dots + a_{j-1}v_{j-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven the list is a list of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es, we can apply \\(T\\) to both sides to get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda_{j}v_{j} = a_1\\lambda_{1}v_1 + \\dots + a_{j-1}\\lambda_{j-1}v_{j-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can also get another definition for \\(\\lambda_{j} v_{j}\\) by simply multiplying the definition for \\(v_{j}\\) above by \\(\\lambda_{j}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;v_{j} = a_1v_1 + \\dots + a_{j-1}v_{j-1}\\ \\text{from above} \\\\\n\\Rightarrow\\ \u0026amp; \\lambda_{j} v_{j} = a_1\\lambda_{j}v_1 + \\dots + a_{j-1}\\lambda_{j}v_{j-1}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, subtracting our two definitions of \\(\\lambda_{j} v_{j}\\), we get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = a_1 (\\lambda_{j} - \\lambda_{1})v_{1} + \\dots +a_{j-1} (\\lambda_{j} - \\lambda_{j-1})v_{j-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall now that the eigenvalue list \\(\\lambda_{j}\\) are distinct. This means all \\(\\lambda_{j} - \\lambda_{k \\neq j} \\neq 0\\). No \\(v_{j} =0\\); so if we choose the smallest positive integer for \\(j\\), the list before it \\(v_1, \\dots, v_{j-1}\\) is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e (as no value in that list would satisfy the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e). This makes \\(a_{j} =\\dots =a_{j-1} = 0\\).\u003c/p\u003e\n\u003cp\u003eAnd yet, substituting this back into the expression for \\(v_{j}\\), we have \\(v_{j} = 0\\), reaching contradiction. So therefore, the list of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent.\u003c/a\u003e \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch4 id=\"operators-on-finite-dimensional-v-has-at-most-dim-v-eigenvalue--kbheigenvalue-dot-md--s\"\u003eoperators on finite dimensional V has at most dim V \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es\u003c/h4\u003e\n\u003cp\u003eAs a corollary of the above result, suppose \\(V\\) is finite dimensional; then, each \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e on \\(V\\) has at most \\(dim\\ V\\) distinct \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es because their \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es form an \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list and \u003ca href=\"/posts/kbhlinear_independence/#length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003elength of linearly-independent list \\(\\leq\\) length of spanning list\u003c/a\u003e.\u003c/p\u003e\n\u003ch4 id=\"eigenspaces-are-disjoint\"\u003eeigenspaces are disjoint\u003c/h4\u003e\n\u003cp\u003ethe \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003es of a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e form a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003eproof:\u003c/p\u003e\n\u003cp\u003eCorollary of result above. Because \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es (i.e. bases) from distinct \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003es are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e. So the only way to write \\(0\\) is by taking each to \\(0\\). So by taking the bases all to \\(0\\), you take the \\(0\\) vector from each space, which shows that the \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003es are a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"finding-eigenvalues-with-actual-numbers\"\u003efinding eigenvalues with actual numbers\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda_{j} \\in Spec(T) \\Rightarrow det(\\lambda_{j}I-T) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe right polynomial \\(det(\\lambda_{j} I-T) = 0\\) is named the \u0026ldquo;characteristic polynomial.\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"natural-choordinates-of-a-map\"\u003enatural choordinates of a map\u003c/h3\u003e\n\u003cp\u003eGiven the eigenvectors \\((x+,y+), (x-,y-)\\), we can change coordinates of your matrix into the natural choordinates.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA = \\begin{pmatrix}\nx+ \u0026amp; x- \\\\y+ \u0026amp; y-\n\\end{pmatrix} \\begin{pmatrix}\n\\lambda+ \u0026amp; 0 \\\\ 0 \u0026amp; \\lambda-\n\\end{pmatrix} \\begin{pmatrix}\nx+ \u0026amp; x- \\\\y+ \u0026amp; y-\n\\end{pmatrix}^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis makes scaling matricides much much easier. If you think about multiplying the above matrix \\(n\\) times, the inverse and non-inverse cancells out.\u003c/p\u003e\n\u003ch3 id=\"similar-matrices\"\u003esimilar matrices\u003c/h3\u003e\n\u003cp\u003eLet \\(A,B\\) be defined:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA = C B C^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand of course:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB = C^{-1} B C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(A,B,C \\in \\mathcal{L}(V)\\)\u003c/p\u003e\n\u003cp\u003e\\(A, B\\) has the same \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es.\u003c/p\u003e\n\u003ch3 id=\"invertable-matricies\"\u003einvertable matricies\u003c/h3\u003e\n\u003cp\u003eLet \\(T \\in \\mathcal{L}(V)\\) be \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e. If \\(\\lambda\\) is an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\), then \\(\\frac{1}{\\lambda}\\) is an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\). Furthermore, \\(T\\) and \\(T^{-1}\\) share \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es with eigenvalues \\(\\lambda\\) and \\(\\frac{1}{\\lambda}\\)\u003c/p\u003e\n\u003ch3 id=\"symmetric-matricies-have-a-real-basis-of-eigenvalues\"\u003esymmetric matricies have a real basis of eigenvalues\u003c/h3\u003e\n\u003cp\u003ethis falls out of the real \u003ca href=\"/posts/kbhaxler_7_a/#complex-spectral-theorem\"\u003espectral theorem\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbheigenvalue/","tags":null,"title":"1-d invariant subspace"},{"categories":null,"contents":" New Deal ","html":"\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnew_deal/\"\u003eNew Deal\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbh1980s_political_alignment/","tags":null,"title":"1980s Political Alignment"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbh1a/","tags":null,"title":"1a"},{"categories":null,"contents":"Galactica test Ka\u0026rsquo;Chava\n","html":"\u003cp\u003e\u003ca href=\"\"\u003eGalactica\u003c/a\u003e test \u003ca href=\"/posts/kbhka_chava/\"\u003eKa\u0026rsquo;Chava\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/2023-02-26/","tags":null,"title":"2023-02-26"},{"categories":null,"contents":"Suppose \\(T\\) is a function from \\(V\\) to \\(W\\). Let the \u0026ldquo;graph\u0026rdquo; of \\(T\\) be the subset of \\(V \\times W\\) such that:\n\\begin{equation} graph\\ T = \\{(v,Tv) \\in V \\times W \\mid v \\in V\\} \\end{equation}\nShow that \\(T\\) is a linear map IFF the graph of \\(T\\) is a subspace of \\(V \\times W\\).\nReview: A Linear Map Recall that a function \\(T: V \\to W\\) is called a linear map if it is a map that\u0026hellip;\nis additive: so \\(Tv + Tu = T(v+u): v,u \\in V\\) is homogeneous, so \\(\\lambda Tv = T\\lambda v: \\lambda \\in \\mathbb{F}, v \\in V\\) Given Graph is Subspace Given the graph of \\(T\\) is a subspace of \\(V \\times W\\), we desire that the function \\(T\\) is a linear map and therefore additive and homogeneous.\nBy declaration before, \\(graph\\ T\\) is a subspace, meaning it would be closed under adddition and scalar multiplication. We will use this fact to show that \\(T\\) follows the properties of a linear map.\nAdditivity We first desire that \\(T\\) is additive, that is, for \\(v,u \\in V\\), we desire \\(Tv + Tu = T(v+u)\\).\nLet \\(v,u \\in V\\), and let \\(a,b \\in graph\\ T\\) declared as follows:\n\\begin{equation} \\begin{cases} a = (v,Tv) \\in V \\times W \\\\ b = (u,Tu) \\in V \\times W \\end{cases} \\end{equation}\nWe are given that \\(graph\\ T\\) is a subspace of \\(T\\). As such, it is closed under addition; meaning, the sum of two elements from the space must remain in the space. Therefore:\n\\begin{equation} (v, Tv) + (u,Tu) = (v+u, Tv+Tu) \\in graph\\ T \\end{equation}\nAnd now, the latter being in \\(graph\\ T\\) implies that \\(\\exists\\) some \\(c \\in graph\\ T\\), \\(n \\in V\\) such that:\n\\begin{equation} c := (n, Tn) = (v+u, Tv+Tu) \\end{equation}\nTaking the latter equivalence and solving for \\(n\\), we have that \\(n = v+u\\). And so, we have that:\n\\begin{equation} (v+u, T(v+u)) = (v+u, Tv+Tu) \\end{equation}\nTherefore, \\(T(v+u) = Tv+Tu\\), as desired.\nHomogeneity We now desire that \\(T\\) is homogeneous. That is, for \\(v \\in V, \\lambda \\in \\mathbb{F}\\), we desire \\(\\lambda Tv = T\\lambda v\\).\nLet \\(v \\in V\\), \\(\\lambda \\in \\mathbb{F}\\), and \\(a \\in graph\\ T\\) declared as follows:\n\\begin{equation} a = (v, Tv) \\in V \\times W \\end{equation}\nBy the same logic before, \\(graph\\ T\\) is closed under scalar multiplication; meaning, the product of en element from the space to a scalar remain in the space. Therefore:\n\\begin{equation} \\lambda (v,Tv) = (\\lambda v, \\lambda Tv) \\in graph\\ T \\end{equation}\nThe latter being in \\(graph\\ T\\) implies that \\(\\exists\\) some \\(c \\in graph\\ T\\), \\(n \\in V\\) such that:\n\\begin{equation} c :=(n,Tn) = (\\lambda v, \\lambda Tv) \\end{equation}\nTaking the latter equivalence and solving for \\(n\\), we have \\(n = \\lambda v\\). And so, we have:\n\\begin{equation} (\\lambda v, T \\lambda v) = (\\lambda v, \\lambda Tv) \\end{equation}\nAnd therefore, \\(T\\lambda v = \\lambda Tv\\), as desired.\nHaving shown that \\(T\\) is now both additive and homogeneous, we have that \\(T\\) is a linear map, as desired.\nGiven \\(T\\) is a Linear Map We will essentially prove the previous condition backwards.\nWe are given that the graph of \\(T\\) is a subset of \\(V \\times W\\), and that \\(T: V \\to W\\) is a linear map. We desire that the graph of \\(T\\) is a subspace of \\(V \\times W\\).\nRecall that to show that a subset is a subspace, on simply has to show that it has closed operations and that it contains the additive identity.\nAdditive Identity Recall that the additive identity in \\(V \\times W\\) is the tuple that\u0026rsquo;s identically \\((0,0) \\in V \\times W\\).\nAs \\(V\\) is a vector space, \\(0 \\in V\\). Any linear map will send \\(0\\) to \\(0\\). Therefore, \\(T 0 = 0\\).\nTherefore, construct \\(a \\in graph\\ T\\):\n\\begin{equation} a = (0, T 0) \\in V \\times W = (0, 0) \\end{equation}\nBy construction, we have shown that the additive identity of \\(V \\times W\\) is in \\(graph\\ T\\).\nClosure of Addition Given WLOG \\(a,b \\in graph\\ T\\), we desire that \\(a+b \\in graph\\ T\\).\nLet \\(v,u \\in V\\), and let \\(a,b \\in graph\\ T\\) declared as follows:\n\\begin{equation} \\begin{cases} a = (v,Tv) \\in V \\times W \\\\ b = (u,Tu) \\in V \\times W \\end{cases} \\end{equation}\nNow:\n\\begin{equation} a+b = (v,Tv) + (u+Tu) = (v+u, Tv+Tu) \\end{equation}\nGiven \\(T\\) is a linear map, we have WLOG \\(Tv+Tu = T(v+u)\\). And therefore:\n\\begin{equation} a+b = (v,Tv) + (u+Tu) = (v+u, Tv+Tu) = (v+u, T(v+u)) \\in \\{(v,Tv) \\mid v \\in V\\} \\end{equation}\nHence, \\(graph\\ T\\) is closed under addition.\nClosure of Scalar Multiplication Given WLOG \\(a \\in graph\\ T, \\lambda \\in \\mathbb{F}\\), we desire that \\(\\lambda a \\in graph\\ T\\).\nLet \\(v \\in V, \\lambda \\in \\mathbb{F}\\), and let \\(a \\in graph\\ T\\) declared as follows:\n\\begin{equation} a = (v,Tv) \\in V \\times W \\end{equation}\nNow:\n\\begin{equation} \\lambda a = \\lambda (v,Tv) = (\\lambda v, \\lambda Tv) \\end{equation}\nGiven \\(T\\) is a linear map, we have WLOG \\(\\lambda Tv = T\\lambda v\\). And therefore:\n\\begin{equation} \\lambda a = \\lambda (v,Tv) = (\\lambda v, \\lambda Tv) = (\\lambda v, T \\lambda v)\\in \\{(v,Tv) \\mid v \\in V\\} \\end{equation}\nHence, \\(graph\\ T\\) is closed under scalar multiplication.\nHaving shown \\(graph\\ T\\) to be closed under addition and scalar multiplication, as well as containing the additive identity, we see that it is a subspace of \\(V \\times W\\) of which it is a subset.\nHaving shown both directions of the proof, \\(\\blacksquare\\)\n","html":"\u003cp\u003eSuppose \\(T\\) is a function from \\(V\\) to \\(W\\). Let the \u0026ldquo;graph\u0026rdquo; of \\(T\\) be the subset of \\(V \\times W\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ngraph\\ T = \\{(v,Tv) \\in V \\times W \\mid v \\in V\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eShow that \\(T\\) is a linear map IFF the graph of \\(T\\) is a subspace of \\(V \\times W\\).\u003c/p\u003e\n\u003ch2 id=\"review-a-linear-map\"\u003eReview: A Linear Map\u003c/h2\u003e\n\u003cp\u003eRecall that a function \\(T: V \\to W\\) is called a linear map if it is a map that\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eis \u003cstrong\u003eadditive\u003c/strong\u003e: so \\(Tv + Tu = T(v+u): v,u \\in V\\)\u003c/li\u003e\n\u003cli\u003eis \u003cstrong\u003ehomogeneous\u003c/strong\u003e, so \\(\\lambda Tv = T\\lambda v: \\lambda \\in \\mathbb{F}, v \\in V\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"given-graph-is-subspace\"\u003eGiven Graph is Subspace\u003c/h2\u003e\n\u003cp\u003eGiven the graph of \\(T\\) is a subspace of \\(V \\times W\\), we desire that the function \\(T\\) is a linear map and therefore additive and homogeneous.\u003c/p\u003e\n\u003cp\u003eBy declaration before, \\(graph\\ T\\) is a subspace, meaning it would be closed under adddition and scalar multiplication. We will use this fact to show that \\(T\\) follows the properties of a linear map.\u003c/p\u003e\n\u003ch3 id=\"additivity\"\u003eAdditivity\u003c/h3\u003e\n\u003cp\u003eWe first desire that \\(T\\) is additive, that is, for \\(v,u \\in V\\), we desire \\(Tv + Tu = T(v+u)\\).\u003c/p\u003e\n\u003cp\u003eLet \\(v,u \\in V\\), and let \\(a,b \\in graph\\ T\\) declared as follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\na = (v,Tv) \\in V \\times W \\\\\nb = (u,Tu) \\in V \\times W\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe are given that \\(graph\\ T\\) is a subspace of \\(T\\). As such, it is closed under addition; meaning, the sum of two elements from the space must remain in the space. Therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(v, Tv) + (u,Tu) = (v+u, Tv+Tu) \\in graph\\ T\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd now, the latter being in \\(graph\\ T\\) implies that \\(\\exists\\) some \\(c \\in graph\\ T\\), \\(n \\in V\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc := (n, Tn) = (v+u, Tv+Tu)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaking the latter equivalence and solving for \\(n\\), we have that \\(n = v+u\\). And so, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(v+u, T(v+u)) = (v+u, Tv+Tu)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, \\(T(v+u) = Tv+Tu\\), as desired.\u003c/p\u003e\n\u003ch3 id=\"homogeneity\"\u003eHomogeneity\u003c/h3\u003e\n\u003cp\u003eWe now desire that \\(T\\) is homogeneous. That is, for \\(v \\in V, \\lambda \\in \\mathbb{F}\\), we desire \\(\\lambda Tv = T\\lambda v\\).\u003c/p\u003e\n\u003cp\u003eLet \\(v \\in V\\), \\(\\lambda \\in \\mathbb{F}\\), and \\(a \\in graph\\ T\\) declared as follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na = (v, Tv) \\in V \\times W\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy the same logic before, \\(graph\\ T\\) is closed under scalar multiplication; meaning, the product of en element from the space to a scalar remain in the space. Therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda (v,Tv) = (\\lambda v, \\lambda Tv) \\in graph\\ T\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe latter being in \\(graph\\ T\\) implies that \\(\\exists\\) some \\(c \\in graph\\ T\\), \\(n \\in V\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc :=(n,Tn) = (\\lambda v, \\lambda Tv)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaking the latter equivalence and solving for \\(n\\), we have \\(n = \\lambda v\\). And so, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(\\lambda v, T \\lambda v) = (\\lambda v, \\lambda Tv)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd therefore, \\(T\\lambda v = \\lambda Tv\\), as desired.\u003c/p\u003e\n\u003cp\u003eHaving shown that \\(T\\) is now both additive and homogeneous, we have that \\(T\\) is a linear map, as desired.\u003c/p\u003e\n\u003ch2 id=\"given-t-is-a-linear-map\"\u003eGiven \\(T\\) is a Linear Map\u003c/h2\u003e\n\u003cp\u003eWe will essentially prove the previous condition backwards.\u003c/p\u003e\n\u003cp\u003eWe are given that the graph of \\(T\\) is a subset of \\(V \\times W\\), and that \\(T: V \\to W\\) is a linear map. We desire that the graph of \\(T\\) is a subspace of \\(V \\times W\\).\u003c/p\u003e\n\u003cp\u003eRecall that to show that a subset is a subspace, on simply has to show that it has closed operations and that it contains the additive identity.\u003c/p\u003e\n\u003ch3 id=\"additive-identity\"\u003eAdditive Identity\u003c/h3\u003e\n\u003cp\u003eRecall that the additive identity in \\(V \\times W\\) is the tuple that\u0026rsquo;s identically \\((0,0) \\in V \\times W\\).\u003c/p\u003e\n\u003cp\u003eAs \\(V\\) is a vector space, \\(0 \\in V\\). Any linear map will send \\(0\\) to \\(0\\). Therefore, \\(T 0 = 0\\).\u003c/p\u003e\n\u003cp\u003eTherefore, construct \\(a \\in graph\\ T\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na = (0, T 0) \\in V \\times W = (0, 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy construction, we have shown that the additive identity of \\(V \\times W\\) is in \\(graph\\ T\\).\u003c/p\u003e\n\u003ch3 id=\"closure-of-addition\"\u003eClosure of Addition\u003c/h3\u003e\n\u003cp\u003eGiven WLOG \\(a,b \\in graph\\ T\\), we desire that \\(a+b \\in graph\\ T\\).\u003c/p\u003e\n\u003cp\u003eLet \\(v,u \\in V\\), and let \\(a,b \\in graph\\ T\\) declared as follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\na = (v,Tv) \\in V \\times W \\\\\nb = (u,Tu) \\in V \\times W\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na+b = (v,Tv) + (u+Tu) = (v+u, Tv+Tu)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven \\(T\\) is a linear map, we have WLOG \\(Tv+Tu = T(v+u)\\). And therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na+b = (v,Tv) + (u+Tu) = (v+u, Tv+Tu) = (v+u, T(v+u)) \\in \\{(v,Tv) \\mid v \\in V\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eHence, \\(graph\\ T\\) is closed under addition.\u003c/p\u003e\n\u003ch3 id=\"closure-of-scalar-multiplication\"\u003eClosure of Scalar Multiplication\u003c/h3\u003e\n\u003cp\u003eGiven WLOG \\(a \\in graph\\ T, \\lambda \\in \\mathbb{F}\\), we desire that \\(\\lambda a \\in graph\\ T\\).\u003c/p\u003e\n\u003cp\u003eLet \\(v \\in V, \\lambda \\in \\mathbb{F}\\), and let \\(a \\in graph\\ T\\) declared as follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na = (v,Tv) \\in V \\times W\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda a = \\lambda (v,Tv) = (\\lambda v, \\lambda Tv)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven \\(T\\) is a linear map, we have WLOG \\(\\lambda Tv = T\\lambda v\\). And therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda a = \\lambda (v,Tv) = (\\lambda v, \\lambda Tv) = (\\lambda v, T \\lambda v)\\in \\{(v,Tv) \\mid v \\in V\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eHence, \\(graph\\ T\\) is closed under scalar multiplication.\u003c/p\u003e\n\u003cp\u003eHaving shown \\(graph\\ T\\) to be closed under addition and scalar multiplication, as well as containing the additive identity, we see that it is a subspace of \\(V \\times W\\) of which it is a subset.\u003c/p\u003e\n\u003cp\u003eHaving shown both directions of the proof, \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_3_e_problem_1/","tags":null,"title":"3.E Problem 1"},{"categories":null,"contents":"776 is a VC firm lead by Reddit cofounder Alexis Ohanian.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbh776/\"\u003e776\u003c/a\u003e is a VC firm lead by Reddit cofounder \u003ca href=\"/posts/kbhalexis_ohanian/\"\u003eAlexis Ohanian\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbh776/","tags":null,"title":"776"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhaaa/","tags":null,"title":"AAA"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhaaai_talk_contacts/","tags":null,"title":"AAAI Talk Contacts"},{"categories":null,"contents":" Locale Speaker Topic + Link W3PHI-AI Elizabeth Broycki AI Healthcare Safety W3PHI-AI Jeff Clark Patient Risk Prediction W3PHI-AI Yasmine and Emily! Abulance Trajectories W3PHI-AI Simeon Allmendinger Diffusion Laproscopic Surgeries W3PHI-AI Andrea Borghesi Clinical Skin Disease Image Generation W3PHI-AI Hossein Jafarinia Multiple Instance Learning W3PHI-AI Thomas Kannampallil AI Medicine W3PHI-AI Soumadeep Saha DOST W3PHI-AI Dimitris Spathis Multimodal AI for Real-World Signals W3PHI-AI William Bolton Medical Knowledge Extraction W3PHI-AI Prajwal Panzade MedBlindTuner W3PHI-AI Hita Kambhamettu Medical Dialogue Generation W3PHI-AI Amarpal Sahota Parkingson\u0026rsquo;s Classification with EEG W3PHI-AI Yidou Weng Baysian Networks for Healthcare W3PHI-AI Cheng Huang Multi-LSTM for Clinical Report Generation W3PHI-AI Rickard Stureborg Hierarchical Multi-Label Clsf. for Vaccine W3PHI-AI Mbithe Nzomo Semantic Health Risk Prediction Talk Contact AAAI Talk Contacts\n","html":"\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eLocale\u003c/th\u003e\n\u003cth\u003eSpeaker\u003c/th\u003e\n\u003cth\u003eTopic + Link\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eElizabeth Broycki\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhai_healthcare_safety/\"\u003eAI Healthcare Safety\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eJeff Clark\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpatient_risk_prediction/\"\u003ePatient Risk Prediction\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eYasmine and Emily!\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhabulance_trajectories/\"\u003eAbulance Trajectories\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eSimeon Allmendinger\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhdiffusion_models_for_laproscopic_surgeries/\"\u003eDiffusion Laproscopic Surgeries\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eAndrea Borghesi\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhclinical_skin_disease_imaging/\"\u003eClinical Skin Disease Image Generation\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eHossein Jafarinia\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmultiple_instance_learning/\"\u003eMultiple Instance Learning\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eThomas Kannampallil\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhai_medicine/\"\u003eAI Medicine\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eSoumadeep Saha\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhdost/\"\u003eDOST\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eDimitris Spathis\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmultimodal_ai_for_real_world_signals/\"\u003eMultimodal AI for Real-World Signals\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eWilliam Bolton\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmedical_knowledge_extraction/\"\u003eMedical Knowledge Extraction\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003ePrajwal Panzade\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmedblindtuner/\"\u003eMedBlindTuner\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eHita Kambhamettu\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmedical_dialogue_generation/\"\u003eMedical Dialogue Generation\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eAmarpal Sahota\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhparkingson_s_classification_with_eeg/\"\u003eParkingson\u0026rsquo;s Classification with EEG\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eYidou Weng\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhbaysian_networks_for_healthcare/\"\u003eBaysian Networks for Healthcare\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eCheng Huang\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmulti_lstm_for_clinical_report_generation/\"\u003eMulti-LSTM for Clinical Report Generation\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eRickard Stureborg\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhhierarchical_multi_label_clsf_for_vaccine/\"\u003eHierarchical Multi-Label Clsf. for Vaccine\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eW3PHI-AI\u003c/td\u003e\n\u003ctd\u003eMbithe Nzomo\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhsemantic_health_risk_prediction/\"\u003eSemantic Health Risk Prediction\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"talk-contact\"\u003eTalk Contact\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhaaai_talk_contacts/\"\u003eAAAI Talk Contacts\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaaai2024_index/","tags":null,"title":"AAAI2024 Index"},{"categories":null,"contents":"Welcome to the personal site of Houjun \u0026ldquo;Jack\u0026rdquo; Liu.\nI\u0026rsquo;m on the blaggosphere as u/jemoka and @jemoka.\nWho\u0026rsquo;s this guy? I am a human interested in linguistic analysis, L2 learning, and user interfaces. AGI \u0026amp; Emacs are cool. I run Shabang, do do research in NLP and education, and am working for TalkBank on the intersection between speech and language. I\u0026rsquo;m currently doing my undergrad at Stanford, where I write some code for Stanza, a NLP package for many human languages, and a rover that we are sending to Antarctica.\nNeed to catch me? Email me at houjun@jemoka.com. Please do email me, I actually check.\nRecent Projects Take a look at my GitHub profile for programming projects. For larger scale things, take a look at the Projects Index on this site.\nNotes This site also contains the vast majority of my course notes. It is a organized in a zettlekasten format. To begin exploring, why don\u0026rsquo;t you check out Nueva Courses Index and Stanford UG Courses Index.\njklsnt Some friends and I started a small collection of fun internets that we made. Check it out!.\nHow do I know you are you? Good question! gpg --locate-keys houjun@jemoka.com. Note that GPG don\u0026rsquo;t actually check fingerprints you received so do that yourself (CA0D6B9C1EA1CD08F0AC1802E7EDDE691807A0C6).\nBugga Bugga Bontehu? Sometimes I use this domain as a downlink to fastcalculator to friends and coworkers. To achieve this, here are two links you could click on that I don\u0026rsquo;t always promise do anything: oliver and socks.\n","html":"\u003cp\u003eWelcome to the personal site of \u003cstrong\u003e\u003cstrong\u003eHoujun \u0026ldquo;Jack\u0026rdquo; Liu\u003c/strong\u003e\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eI\u0026rsquo;m on the blaggosphere as \u003ca href=\"https://www.reddit.com/user/Jemoka/\"\u003eu/jemoka\u003c/a\u003e and \u003ca href=\"https://github.com/Jemoka/\"\u003e@jemoka\u003c/a\u003e.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"who-s-this-guy\"\u003eWho\u0026rsquo;s this guy?\u003c/h2\u003e\n\u003cp\u003eI am a \u003ca href=\"https://avatars.githubusercontent.com/u/28765741?v=4\"\u003ehuman\u003c/a\u003e interested in \u003ca href=\"http://pubs.asha.org/doi/10.1044/2023_JSLHR-22-00642\"\u003elinguistic analysis\u003c/a\u003e, \u003ca href=\"https://en.wikipedia.org/wiki/Second-language_acquisition\"\u003eL2 learning\u003c/a\u003e, and \u003ca href=\"https://www.shabang.io\"\u003euser interfaces\u003c/a\u003e. \u003ca href=\"https://en.wikipedia.org/wiki/Artificial_general_intelligence\"\u003eAGI\u003c/a\u003e \u0026amp; \u003ca href=\"https://github.com/Jemoka/.emacs.d\"\u003eEmacs\u003c/a\u003e are cool. I run \u003ca href=\"https://www.shabang.io\"\u003eShabang\u003c/a\u003e, do \u003ca href=\"/posts/kbhresearch_index/\"\u003edo research in NLP and education\u003c/a\u003e, and am working for \u003ca href=\"https://www.talkbank.org/\"\u003eTalkBank\u003c/a\u003e on the intersection between speech and language. I\u0026rsquo;m currently doing my undergrad at Stanford, where I write some code for \u003ca href=\"https://github.com/stanfordnlp/stanza\"\u003eStanza\u003c/a\u003e, a NLP package for many human languages, and a \u003ca href=\"https://github.com/stanford-ssi\"\u003erover that we are sending to Antarctica\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eNeed to catch me? Email me at \u003ccode\u003ehoujun@jemoka.com\u003c/code\u003e. Please do email me, I actually check.\u003c/p\u003e\n\u003ch2 id=\"recent-projects\"\u003eRecent Projects\u003c/h2\u003e\n\u003cp\u003eTake a look at my \u003ca href=\"https://github.com/Jemoka/\"\u003eGitHub profile\u003c/a\u003e for programming projects. For larger scale things, take a look at the \u003ca href=\"/posts/kbhprojects/\"\u003eProjects Index\u003c/a\u003e on this site.\u003c/p\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n\u003cp\u003eThis site also contains the vast majority of my course notes. It is a organized in a \u003ca href=\"/posts/kbhzettlekasten/\"\u003ezettlekasten\u003c/a\u003e format. To begin exploring, why don\u0026rsquo;t you check out \u003ca href=\"/posts/kbhnueva_courses_index/\"\u003eNueva Courses Index\u003c/a\u003e and \u003ca href=\"/posts/kbhstanford_courses_index/\"\u003eStanford UG Courses Index\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"jklsnt\"\u003ejklsnt\u003c/h2\u003e\n\u003cp\u003eSome friends and I started a small collection of fun internets that we made. \u003ca href=\"https://www.jklsnt.com/\"\u003eCheck it out!\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"how-do-i-know-you-are-you\"\u003eHow do I know you are you?\u003c/h2\u003e\n\u003cp\u003eGood question! \u003ccode\u003egpg --locate-keys houjun@jemoka.com\u003c/code\u003e. Note that GPG don\u0026rsquo;t actually check fingerprints you received so do that yourself (\u003ccode\u003eCA0D6B9C1EA1CD08F0AC1802E7EDDE691807A0C6\u003c/code\u003e).\u003c/p\u003e\n\u003ch2 id=\"bugga-bugga-bontehu\"\u003eBugga Bugga Bontehu?\u003c/h2\u003e\n\u003cp\u003eSometimes I use this domain as a downlink to fastcalculator to friends and coworkers. To achieve this, here are two links you could click on that I don\u0026rsquo;t always promise do anything: \u003ca href=\"https://oliver.jemoka.com/\"\u003eoliver\u003c/a\u003e and \u003ca href=\"https://socks.jemoka.com/\"\u003esocks\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhindex/","tags":null,"title":"About"},{"categories":null,"contents":"Here\u0026rsquo;s a fun implementation of absolute value.\nlong abs_value(long num) { long sign = num \u0026gt;\u0026gt; (sizeof(long)*CHAR_BIT - 1); // so you only get all 1s or all 0s return (num ^ sign) - sign; // sign is either -1 or 0. So, if num is non-negative // num^sign is not going to do anything (as 0^0 = 0, 0^1 = 1). // If num negative, num^sign is going to flip the bit AND subtract // negative on (i.e. add one) } ","html":"\u003cp\u003eHere\u0026rsquo;s a fun implementation of absolute value.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003elong\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eabs_value\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elong\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003elong\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esign\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enum\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u0026gt;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003esizeof\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elong\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eCHAR_BIT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// so you only get all 1s or all 0s\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enum\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esign\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esign\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// sign is either -1 or 0. So, if num is non-negative\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// num^sign is not going to do anything (as 0^0 = 0, 0^1 = 1).\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// If num negative, num^sign is going to flip the bit AND subtract\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// negative on (i.e. add one)\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhabsolute_value_function/","tags":null,"title":"Absolute Value Function"},{"categories":null,"contents":"To determine\n","html":"\u003cp\u003eTo determine\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaccounting_price/","tags":null,"title":"accounting price"},{"categories":null,"contents":"Capecitabmine =\u0026gt; 5-Fluoropyrimidine =\u0026gt; Cancer cell death.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcapecitabmine/\"\u003eCapecitabmine\u003c/a\u003e =\u0026gt; 5-\u003ca href=\"\"\u003eFluoropyrimidine\u003c/a\u003e =\u0026gt; Cancer cell death.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaction_of_capecitabmine/","tags":null,"title":"action of Capecitabmine"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhaction_research/","tags":null,"title":"action research"},{"categories":null,"contents":"Quality of taking a particular value at a function\u0026mdash;\u0026ldquo;expected discounted return when following a policy from \\(S\\) and taking \\(a\\)\u0026rdquo;:\n\\begin{equation} Q(s,a) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo;|s,a) U(s\u0026rsquo;) \\end{equation}\nwhere, \\(T\\) is the transition probability from \\(s\\) to \\(s\u0026rsquo;\\) given action \\(a\\).\nvalue function Therefore, the utility of being in a state (called the value function) is:\n\\begin{equation} U(s) = \\max_{a} Q(s,a) \\end{equation}\n\u0026ldquo;the utility that gains the best action-value\u0026rdquo;\nvalue-function policy A value-function policy is a policy that maximizes the action-value\n\\begin{equation} \\pi(s) = \\arg\\max_{a} Q(s,a) \\end{equation}\n\u0026ldquo;the policy that takes the best action to maximize action-value\u0026rdquo;\nwe call this \\(\\pi\\) \u0026ldquo;greedy policy with respect to \\(U\\)\u0026rdquo;\nadvantage see advantage function\n","html":"\u003cp\u003eQuality of taking a particular value at a function\u0026mdash;\u0026ldquo;expected discounted return when following a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e from \\(S\\) and taking \\(a\\)\u0026rdquo;:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(s,a) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo;|s,a) U(s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(T\\) is the transition probability from \\(s\\) to \\(s\u0026rsquo;\\) given action \\(a\\).\u003c/p\u003e\n\u003ch2 id=\"value-function--kbhaction-value-function-dot-md\"\u003e\u003ca href=\"/posts/kbhaction_value_function/\"\u003evalue function\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eTherefore, the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of being in a state (called the \u003ca href=\"/posts/kbhaction_value_function/\"\u003evalue function\u003c/a\u003e) is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(s) = \\max_{a} Q(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e that gains the best \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"value-function-policy\"\u003evalue-function policy\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#value-function-policy\"\u003evalue-function policy\u003c/a\u003e is a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e that maximizes the \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pi(s) = \\arg\\max_{a} Q(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e that takes the best action to maximize \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e\u0026rdquo;\u003c/p\u003e\n\u003cp\u003ewe call this \\(\\pi\\) \u0026ldquo;\u003ca href=\"#value-function-policy\"\u003egreedy policy\u003c/a\u003e with respect to \\(U\\)\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"advantage\"\u003eadvantage\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhadvantage_function/\"\u003eadvantage function\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaction_value_function/","tags":null,"title":"action-value function"},{"categories":null,"contents":"Comes from doi.org/10.3389/fcomp.2020.00001\nADR is a vectorization/encoding technique whereby time-series data is segmented, clustered via solf-organizing maps, and the centroids of the clusters are used as the encoding\n","html":"\u003cp\u003eComes from \u003ca href=\"https://www.frontiersin.org/articles/10.3389/fcomp.2020.00001/full\"\u003edoi.org/10.3389/fcomp.2020.00001\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhactive_data_representation/\"\u003eADR\u003c/a\u003e is a vectorization/encoding technique whereby time-series data is segmented, clustered via solf-organizing maps, and the centroids of the clusters are used as the encoding\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhactive_data_representation/","tags":null,"title":"Active Data Representation"},{"categories":null,"contents":"create a space of molecules of its ocnstructions, and use active learning to search through it.\n","html":"\u003cp\u003ecreate a space of molecules of its ocnstructions, and use active learning to search through it.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhactive_learning_molecule_iteration/","tags":null,"title":"Active Learning Molecule Iteration"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhactive_listening/","tags":null,"title":"active listening"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhactive_recall/","tags":null,"title":"active recall"},{"categories":null,"contents":"Create an approximation of the value function \\(U_{\\phi}\\) using Approximate Value Function, and use Policy Gradient to optimize an monte-carlo tree search policy\n","html":"\u003cp\u003eCreate an approximation of the \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e \\(U_{\\phi}\\) using \u003ca href=\"/posts/kbhapproximate_value_function/\"\u003eApproximate Value Function\u003c/a\u003e, and use \u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e to optimize an \u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003emonte-carlo tree search\u003c/a\u003e policy\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhactor_critic/","tags":null,"title":"Actor-Critic"},{"categories":null,"contents":"How do you sample particle filters? This doesn\u0026rsquo;t work for a continuous action space.\nContributions Uses KLD sampling\u0026mdash;adaptive sampling of particple filters \u0026ldquo;belief packing\u0026rdquo;\u0026mdash;pack similar beliefs together, making observation tree smaller KLD Sampling KLD Sampling uses KL Divergence to approximate difference between two probability distributions:\n\\begin{equation} N \\approx \\frac{k-1}{2\\xi} \\qty(1- \\frac{2}{9(k-1)} + \\sqrt{\\frac{2}{9(k-1)}} z_{1-\\eta})^{3} \\end{equation}\n\u0026ldquo;Propagation\u0026rdquo; We want to get a set of sampled observations from belief + action.\nBelief Packing L1 norm between beliefs. If its too small consider them the same beliefs.\n","html":"\u003cp\u003eHow do you sample particle filters? This doesn\u0026rsquo;t work for a continuous action space.\u003c/p\u003e\n\u003ch2 id=\"contributions\"\u003eContributions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eUses KLD sampling\u0026mdash;adaptive sampling of particple filters\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;belief packing\u0026rdquo;\u0026mdash;pack similar beliefs together, making observation tree smaller\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"kld-sampling\"\u003eKLD Sampling\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#kld-sampling\"\u003eKLD Sampling\u003c/a\u003e uses \u003ca href=\"/posts/kbhkl_divergence/\"\u003eKL Divergence\u003c/a\u003e to approximate difference between two probability distributions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nN \\approx \\frac{k-1}{2\\xi} \\qty(1- \\frac{2}{9(k-1)} + \\sqrt{\\frac{2}{9(k-1)}} z_{1-\\eta})^{3}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"propagation\"\u003e\u0026ldquo;Propagation\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003eWe want to get a set of sampled observations from belief + action.\u003c/p\u003e\n\u003ch2 id=\"belief-packing\"\u003eBelief Packing\u003c/h2\u003e\n\u003cp\u003eL1 norm between beliefs. If its too small consider them the same beliefs.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhadaops/","tags":null,"title":"AdaOPS"},{"categories":null,"contents":"Operation that adds elements in a set\nconstituents A set \\(V\\) Each non-necessarily-distinct elements \\(u,v \\in V\\) requirements addition on a set \\(V\\) is defined by a function that assigned an element named \\(u+v \\in V\\) (its closed), \\(\\forall u,v\\in V\\)\nadditional information See also addition in \\(\\mathbb{F}^n\\)\n","html":"\u003cp\u003eOperation that adds elements in a set\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA set \\(V\\)\u003c/li\u003e\n\u003cli\u003eEach non-necessarily-distinct elements \\(u,v \\in V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e on a set \\(V\\) is defined by a function that assigned an element named \\(u+v \\in V\\) (its \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e), \\(\\forall u,v\\in V\\)\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhlists_over_fields/#addition-in-mathbb-f-n\"\u003eaddition in \\(\\mathbb{F}^n\\)\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhadding/","tags":null,"title":"adding"},{"categories":null,"contents":"The additive identity allows another number to retain its identity after adding. That is: there exists an element \\(0\\) such that \\(v+0=v\\) for whatever structure \\(v\\) and addition \\(+\\) you are working with.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e allows another \u003ca href=\"/posts/kbhnumber/\"\u003enumber\u003c/a\u003e to retain its identity after \u003ca href=\"/posts/kbhadding/\"\u003eadding\u003c/a\u003e. That is: there exists an element \\(0\\) such that \\(v+0=v\\) for whatever structure \\(v\\) and addition \\(+\\) you are working with.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhadditive_identity/","tags":null,"title":"additive identity"},{"categories":null,"contents":"Assume for the sake of contradiction \\(\\exists\\ 0, 0\u0026rsquo;\\) both being additive identities in vector space \\(V\\).\nTherefore:\n\\begin{equation} 0+0\u0026rsquo; = 0\u0026rsquo; +0 \\end{equation}\ncommutativity.\nTherefore:\n\\begin{equation} 0+0\u0026rsquo; = 0 = 0\u0026rsquo;+0 = 0' \\end{equation}\ndefn. of identity.\nHence: \\(0=0\u0026rsquo;\\), \\(\\blacksquare\\).\n","html":"\u003cp\u003eAssume for the sake of contradiction \\(\\exists\\ 0, 0\u0026rsquo;\\) both being additive identities in vector space \\(V\\).\u003c/p\u003e\n\u003cp\u003eTherefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0+0\u0026rsquo; = 0\u0026rsquo; +0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eTherefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0+0\u0026rsquo; = 0 = 0\u0026rsquo;+0 = 0'\n\\end{equation}\u003c/p\u003e\n\u003cp\u003edefn. of identity.\u003c/p\u003e\n\u003cp\u003eHence: \\(0=0\u0026rsquo;\\), \\(\\blacksquare\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhadditive_identity_is_unique_in_a_vector_space/","tags":null,"title":"additive identity is unique in a vector space"},{"categories":null,"contents":"Take a vector \\(v \\in V\\) and additive inverses \\(a,b \\in V\\).\n\\begin{equation} a+0 = a \\end{equation}\ndefn. of additive identity\n\\begin{equation} a+(v+b) = a \\end{equation}\ndefn. of additive inverse\n\\begin{equation} (a+v)+b = a \\end{equation}\nassociativity\n\\begin{equation} 0+b = a \\end{equation}\ndefn. of additive inverse\n\\begin{equation} b=a\\ \\blacksquare \\end{equation}\n","html":"\u003cp\u003eTake a vector \\(v \\in V\\) and additive inverses \\(a,b \\in V\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na+0 = a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003edefn. of additive identity\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na+(v+b) = a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003edefn. of additive inverse\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(a+v)+b = a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eassociativity\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0+b = a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003edefn. of additive inverse\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb=a\\ \\blacksquare\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhadditive_inverse_is_unique_in_a_vector_space/","tags":null,"title":"additive inverse is unique in a vector space"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhadhd/","tags":null,"title":"ADHD"},{"categories":null,"contents":"adMe: absorbtion, distribution, metabolism, excretion.\nPharmacology treatment of diseases. The microbiome regulates metabolism.\n","html":"\u003cp\u003eadMe: absorbtion, distribution, metabolism, excretion.\u003c/p\u003e\n\u003cp\u003ePharmacology treatment of diseases. The \u003ca href=\"\"\u003emicrobiome\u003c/a\u003e regulates metabolism.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhadme/","tags":null,"title":"adMe"},{"categories":null,"contents":"ADReSS Challenge is a Alzheimer\u0026rsquo;s Dementia Recognition challenge from the data available on DementiaBank.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhadress_challenge/\"\u003eADReSS Challenge\u003c/a\u003e is a Alzheimer\u0026rsquo;s Dementia Recognition challenge from the data available on \u003ca href=\"/posts/kbhdementiabank/\"\u003eDementiaBank\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhadress_challenge/","tags":null,"title":"ADReSS Challenge"},{"categories":null,"contents":"The ADReSS Literature Survey is a literature survey for the results published during the ADReSS Challenge.\nAntonsson 2021: disfluency + SVF features trained on SVM: lexical \u0026gt; narrative qual. Chlasta 2021: features extracted from VGGish on SVM; also trained new CNN from .wav. Sadeghian 2021: Used GA for feature sel., achieved 94% w/ MMSE alone; dev\u0026rsquo;d ASR tool. Martinc 2021: CBOW (text) + ADR (sound) late fusion\u0026rsquo;d to a BERT, ablated for features. Meghanani 2021: spontaneous speech transcripts with fastText and CNN; 83.33% acc. Yuan 2021: ERNIE on transcripts with pause encoding; 89.6% acc. Jonell 2021: Developed a kitchen sink of diag. tools and correlated it with biomarkers. Laguarta 2021: multimodel (OVBM) to embed auditory info + biomarkers for clsf. Shah 2021: late fusion of n-gram and OpenSMILE on std. classifiers. Lindsay 2021: Cross-linguistic markers shared for AD patients between English and French. Zhu 2021: late fusion of CTP task for AD clsf. w/ transf., mobilenet, yamnet, mockingjay. Guo 2021: WLS data to augment CTP from ADReSS Challenge and trained it on a BERT. Balagopalan 2021: lexo. and synt. features trained on a BERT and other models. Mahajan 2021: a bimodal model on speech/text with GRU on speech and CNN-LSTM on text. Parvin 2020: excercize scheme effects on theta/alpha ratio and Brain wave frequency. Luz 2021: review paper presenting the ADReSSo challenge and current baselines. From Meghanani 2021, a review:\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhadress_literature_survey/\"\u003eADReSS Literature Survey\u003c/a\u003e is a literature survey for the results published during the \u003ca href=\"/posts/kbhadress_challenge/\"\u003eADReSS Challenge\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhantonsson_2021/\"\u003eAntonsson 2021\u003c/a\u003e: disfluency + \u003ca href=\"/posts/kbhsemantic_verbal_fluency/\"\u003eSVF\u003c/a\u003e features trained on SVM: lexical \u0026gt; narrative qual.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhchlasta_2021/\"\u003eChlasta 2021\u003c/a\u003e: features extracted from \u003ca href=\"/posts/kbhvggish/\"\u003eVGGish\u003c/a\u003e on SVM; also trained new CNN from .wav.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsadeghian_2021/\"\u003eSadeghian 2021\u003c/a\u003e: Used \u003ca href=\"/posts/kbhgenetic_algorithum/\"\u003eGA\u003c/a\u003e for feature sel., achieved 94% w/ \u003ca href=\"/posts/kbhmmse/\"\u003eMMSE\u003c/a\u003e alone; dev\u0026rsquo;d \u003ca href=\"/posts/kbhasr/\"\u003eASR\u003c/a\u003e tool.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmartinc_2021/\"\u003eMartinc 2021\u003c/a\u003e: CBOW (text) + \u003ca href=\"/posts/kbhactive_data_representation/\"\u003eADR\u003c/a\u003e (sound) \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u0026rsquo;d\u003c/a\u003e to a BERT, ablated for features.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmeghanani_2021/\"\u003eMeghanani 2021\u003c/a\u003e: spontaneous speech transcripts with fastText and CNN; 83.33% acc.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhyuan_2021/\"\u003eYuan 2021\u003c/a\u003e: ERNIE on transcripts with pause encoding; 89.6% acc.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhjonell_2021/\"\u003eJonell 2021\u003c/a\u003e: Developed a kitchen sink of diag. tools and correlated it with biomarkers.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlaguarta_2021/\"\u003eLaguarta 2021\u003c/a\u003e: multimodel (\u003ca href=\"/posts/kbhopen_voice_brain_model/\"\u003eOVBM\u003c/a\u003e) to embed auditory info + biomarkers for clsf.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhshah_2021/\"\u003eShah 2021\u003c/a\u003e: \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e of n-gram and \u003ca href=\"/posts/kbhopensmile/\"\u003eOpenSMILE\u003c/a\u003e on std. classifiers.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlindsay_2021/\"\u003eLindsay 2021\u003c/a\u003e: Cross-linguistic markers shared for AD patients between English and French.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhzhu_2021/\"\u003eZhu 2021\u003c/a\u003e: \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e of \u003ca href=\"/posts/kbhctp/\"\u003eCTP\u003c/a\u003e task for AD clsf. w/ transf., mobilenet, yamnet, mockingjay.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhguo_2021/\"\u003eGuo 2021\u003c/a\u003e: WLS data to augment \u003ca href=\"/posts/kbhctp/\"\u003eCTP\u003c/a\u003e from \u003ca href=\"/posts/kbhadress_challenge/\"\u003eADReSS Challenge\u003c/a\u003e and trained it on a BERT.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbalagopalan_2021/\"\u003eBalagopalan 2021\u003c/a\u003e: lexo. and synt. features trained on a BERT and other models.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmahajan_2021/\"\u003eMahajan 2021\u003c/a\u003e: a bimodal model on speech/text with GRU on speech and CNN-LSTM on text.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhparvin_2020/\"\u003eParvin 2020\u003c/a\u003e: excercize scheme effects on \u003ca href=\"/posts/kbhtheta_alpha_ratio/\"\u003etheta/alpha ratio\u003c/a\u003e and Brain wave frequency.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhluz_2021/\"\u003eLuz 2021\u003c/a\u003e: review paper presenting the ADReSSo challenge and current baselines.\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eFrom \u003ca href=\"/posts/kbhmeghanani_2021/\"\u003eMeghanani 2021\u003c/a\u003e, a review:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_00-32-54_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhadress_literature_survey/","tags":["index"],"title":"ADReSS Literature Survey Index"},{"categories":null,"contents":"an advantage function is a method for scoring a policy based on how much additional value it provides compared to the greedy policy:\n\\begin{align} A(s,a) \u0026amp;= Q(s,a) - U(s) \\\\ \u0026amp;= Q(s,a) - \\max_{a}Q(s,a) \\end{align}\nthat is, how much does your policy\u0026rsquo;s action-value function differ from that of choosing the action that maximizes the utility.\nFor a greedy policy that just optimizes this exact metric, \\(A =0\\).\n","html":"\u003cp\u003ean \u003ca href=\"/posts/kbhadvantage_function/\"\u003eadvantage function\u003c/a\u003e is a method for scoring a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e based on how much additional value it provides compared to the \u003ca href=\"/posts/kbhpolicy_evaluation/#value-function-policy\"\u003egreedy policy\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nA(s,a) \u0026amp;= Q(s,a) - U(s) \\\\\n\u0026amp;= Q(s,a) - \\max_{a}Q(s,a)\n\\end{align}\u003c/p\u003e\n\u003cp\u003ethat is, how much does your policy\u0026rsquo;s \u003ca href=\"/posts/kbhpolicy_evaluation/#action-value-function\"\u003eaction-value function\u003c/a\u003e differ from that of choosing the action that maximizes the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eFor a \u003ca href=\"/posts/kbhpolicy_evaluation/#value-function-policy\"\u003egreedy policy\u003c/a\u003e that just optimizes this exact metric, \\(A =0\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhadvantage_function/","tags":null,"title":"advantage function"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhadvertising/","tags":null,"title":"advertising"},{"categories":null,"contents":"an affine subset of \\(V\\) is a subset of \\(V\\) that is the sum of a vector and one of its subspace; that is, an affine subset of \\(V\\) is a subset of \\(V\\) of the form \\(v+U\\) for \\(v \\in V\\) and subspace \\(U \\subset V\\).\nfor \\(v \\in V\\) and \\(U \\subset V\\), an affine subset \\(v+U\\) is said to be parallel to \\(U\\).\nthat is, an affine subset for \\(U \\subset V\\) and \\(v \\in V\\):\n\\begin{equation} v + U = \\{v+u : u \\in U\\} \\end{equation}\nadditional information two affine subsets parallel to \\(U\\) are either equal or disjoint Suppose \\(U\\) is a subspace of \\(V\\); and \\(v,w \\in V\\), then, if one of the following is true all of them are true:\n\\(v-w \\in U\\) \\(v+U = w+U\\) \\((v+U) \\cap (w+U) \\neq \\emptyset\\) \\(1 \\implies 2\\) Given \\(v-w \\in U\\)\u0026hellip;.\nFor an element in \\(v+U\\), we have that \\(v+u = (w-w)+v+u = w+((v-w)+u) \\in w + U\\). This is because \\(U\\) is closed so adding \\(v-w \\in U\\) and \\(u\\) will remain being in \\(U\\). \\(w-w=0\\) just by everything being in \\(V\\).\nWe now have \\(v+u \\in w+U\\ \\forall u \\in U\\); we now can reverse the argument to argue in a similar fashion that \\(w+u \\in v+U\\ \\forall u \\in U\\). So, we have that \\(v+U \\subset w+U\\) and \\(w+U \\subset v+U\\). So \\(v+U = w+U\\), as desired.\n\\(2 \\implies 3\\) By definition of \\(v+U=w+U\\) as long as \\(v+U\\) and \\(w+U\\) is not empty sets, which they can\u0026rsquo;t be because \\(U\\) is a vector space so guaranteed nonempty.\n\\(3\\implies 1\\) Given \\((v+U) \\cap (w+U) \\neq \\emptyset\\), we have that there exists some \\(u_1, u_2 \\in U\\) such that \\(v+u_1 = w+u_2\\). Because everything here is in \\(V\\), we can add their respective inverses (\u0026ldquo;move them around\u0026rdquo;) such that: \\(v-w = u_2-u_1\\). Therefore \\(u_2-u_1 \\in U \\implies v-w \\in U\\).\n","html":"\u003cp\u003ean \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e of \\(V\\) is a subset of \\(V\\) that is the \u003ca href=\"/posts/kbhsum_of_vector_and_subspace/\"\u003esum of a vector and one of its subspace\u003c/a\u003e; that is, an \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e of \\(V\\) is a subset of \\(V\\) of the form \\(v+U\\) for \\(v \\in V\\) and \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e \\(U \\subset V\\).\u003c/p\u003e\n\u003cp\u003efor \\(v \\in V\\) and \\(U \\subset V\\), an \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e \\(v+U\\) is said to be \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eparallel\u003c/a\u003e to \\(U\\).\u003c/p\u003e\n\u003cp\u003ethat is, an \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e for \\(U \\subset V\\) and \\(v \\in V\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv + U = \\{v+u : u \\in U\\}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"two-affine-subset--kbhparallel-linear-algebra-dot-md--s-parallel--kbhparallel-linear-algebra-dot-md--to-u-are-either-equal-or-disjoint\"\u003etwo \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003es \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eparallel\u003c/a\u003e to \\(U\\) are either equal or disjoint\u003c/h3\u003e\n\u003cp\u003eSuppose \\(U\\) is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of \\(V\\); and \\(v,w \\in V\\), then, if one of the following is true all of them are true:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\\(v-w \\in U\\)\u003c/li\u003e\n\u003cli\u003e\\(v+U = w+U\\)\u003c/li\u003e\n\u003cli\u003e\\((v+U) \\cap (w+U) \\neq \\emptyset\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"1-implies-2\"\u003e\\(1 \\implies 2\\)\u003c/h4\u003e\n\u003cp\u003eGiven \\(v-w \\in U\\)\u0026hellip;.\u003c/p\u003e\n\u003cp\u003eFor an element in \\(v+U\\), we have that \\(v+u = (w-w)+v+u = w+((v-w)+u) \\in w + U\\). This is because \\(U\\) is closed so adding \\(v-w \\in U\\) and \\(u\\) will remain being in \\(U\\). \\(w-w=0\\) just by everything being in \\(V\\).\u003c/p\u003e\n\u003cp\u003eWe now have \\(v+u \\in w+U\\ \\forall u \\in U\\); we now can reverse the argument to argue in a similar fashion that \\(w+u \\in v+U\\ \\forall u \\in U\\). So, we have that \\(v+U \\subset w+U\\) and \\(w+U \\subset v+U\\). So \\(v+U = w+U\\), as desired.\u003c/p\u003e\n\u003ch4 id=\"2-implies-3\"\u003e\\(2 \\implies 3\\)\u003c/h4\u003e\n\u003cp\u003eBy definition of \\(v+U=w+U\\) as long as \\(v+U\\) and \\(w+U\\) is not empty sets, which they can\u0026rsquo;t be because \\(U\\) is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e so guaranteed nonempty.\u003c/p\u003e\n\u003ch4 id=\"3-implies-1\"\u003e\\(3\\implies 1\\)\u003c/h4\u003e\n\u003cp\u003eGiven \\((v+U) \\cap (w+U) \\neq \\emptyset\\), we have that there exists some \\(u_1, u_2 \\in U\\) such that \\(v+u_1 = w+u_2\\). Because everything here is in \\(V\\), we can add their respective inverses (\u0026ldquo;move them around\u0026rdquo;) such that: \\(v-w = u_2-u_1\\). Therefore \\(u_2-u_1 \\in U \\implies v-w \\in U\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhparallel_linear_algebra/","tags":null,"title":"affine subset"},{"categories":null,"contents":"In math, an affine transformation is a transformation that preserves lines and parallelism.\nFor instance, here is an affine transformation:\n\\begin{equation} U\u0026rsquo;(S) = mU(s) + b \\end{equation}\nwhere \\(m \u0026gt; 0\\), and \\(b\\) is unconstrained.\nhttps://en.wikipedia.org/wiki/Affine_transformation\n","html":"\u003cp\u003eIn math, an affine transformation is a transformation that preserves lines and parallelism.\u003c/p\u003e\n\u003cp\u003eFor instance, here is an \u003ca href=\"/posts/kbhaffine_transformation/\"\u003eaffine transformation\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU\u0026rsquo;(S) = mU(s) + b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(m \u0026gt; 0\\), and \\(b\\) is unconstrained.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://en.wikipedia.org/wiki/Affine_transformation\"\u003ehttps://en.wikipedia.org/wiki/Affine_transformation\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaffine_transformation/","tags":null,"title":"affine transformation"},{"categories":null,"contents":"An agent is an entity that act upon the observations of its environment.\n","html":"\u003cp\u003eAn \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e is an entity that act upon the observations of its environment.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhagent/","tags":null,"title":"agent"},{"categories":null,"contents":"Agricultural Adjustment Administration is a part of the New Deal programs to support the agricultural sector and maintain supply. They regulated production of seven different crops to group increase farming income. It is very far-reaching of other parts of the economy.\nIt was ruled unconstitutional in 1936.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhagricultural_adjustment_administration/\"\u003eAgricultural Adjustment Administration\u003c/a\u003e is a part of the \u003ca href=\"/posts/kbhnew_deal/\"\u003eNew Deal\u003c/a\u003e programs to support the agricultural sector and maintain supply. They regulated production of seven different crops to group increase farming income. It is very far-reaching of other parts of the economy.\u003c/p\u003e\n\u003cp\u003eIt was ruled unconstitutional in 1936.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhagricultural_adjustment_administration/","tags":null,"title":"Agricultural Adjustment Administration"},{"categories":null,"contents":"AgRP is a type of neurons that stimulates food intake.\nInhibit metacortin Activate NPY Release GABA Diet-induced obesity blunts AgRP response, and so, because AgRP plays a part in thermoregulation, diet-inducsed obesity responds less to temperature changes.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhagrp/\"\u003eAgRP\u003c/a\u003e is a type of neurons that stimulates food intake.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eInhibit metacortin\u003c/li\u003e\n\u003cli\u003eActivate \u003ca href=\"\"\u003eNPY\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eRelease \u003ca href=\"\"\u003eGABA\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDiet-induced \u003ca href=\"\"\u003eobesity\u003c/a\u003e blunts \u003ca href=\"/posts/kbhagrp/\"\u003eAgRP\u003c/a\u003e response, and so, because \u003ca href=\"/posts/kbhagrp/\"\u003eAgRP\u003c/a\u003e plays a part in \u003ca href=\"/posts/kbhthermoregulation/\"\u003ethermoregulation\u003c/a\u003e, diet-inducsed \u003ca href=\"\"\u003eobesity\u003c/a\u003e responds less to temperature changes.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhagrp/","tags":null,"title":"AgRP"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhai/","tags":null,"title":"AI"},{"categories":null,"contents":"AI Ethics is the Ethics of training AI models.\n","html":"\u003cp\u003eAI Ethics is the Ethics of training \u003ca href=\"/posts/kbhai/\"\u003eAI\u003c/a\u003e models.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhai_ethics/","tags":null,"title":"AI Ethics"},{"categories":null,"contents":"Iteration of Healthcare System\ndigital structure =\u0026gt; configuration =\u0026gt; activities or processes =\u0026gt; outcomes =\u0026gt; more structures\nEHR decision support systems Technology in general is becoming safer over time =\u0026gt; ideally, some form of high level safety is achieved.\nimportant: define a shared vocabulary between technologists and clinicians\nTechnology-Induced Errors Though EHS can serve to reduce medical errors, it can work in increase errors as well.\nbias ignorance of environmental factors Error reporting + incident reporting Resolution get rid of each problem help establish well-configured systems which could promote safety patient simulation with clinician + technology to see how it fits into process of care what could that look like from a home health-care perspective ","html":"\u003cp\u003eIteration of Healthcare System\u003c/p\u003e\n\u003cp\u003edigital structure =\u0026gt; configuration =\u0026gt; activities or processes =\u0026gt; outcomes =\u0026gt; more structures\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eEHR\u003c/li\u003e\n\u003cli\u003edecision support systems\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTechnology in general is becoming safer over time =\u0026gt; ideally, some form of high level safety is achieved.\u003c/p\u003e\n\u003cp\u003eimportant: \u003cstrong\u003edefine a shared vocabulary between technologists and clinicians\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"technology-induced-errors\"\u003eTechnology-Induced Errors\u003c/h2\u003e\n\u003cp\u003eThough EHS can serve to \u003cstrong\u003ereduce medical errors\u003c/strong\u003e, it can work in \u003cstrong\u003eincrease errors\u003c/strong\u003e as well.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ebias\u003c/li\u003e\n\u003cli\u003eignorance of environmental factors\u003c/li\u003e\n\u003cli\u003eError reporting + incident reporting\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"resolution\"\u003eResolution\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eget rid of each problem\u003c/li\u003e\n\u003cli\u003ehelp establish well-configured systems which could promote safety\u003c/li\u003e\n\u003cli\u003epatient simulation with clinician + technology to see how it fits into process of care\u003c/li\u003e\n\u003cli\u003ewhat could that look like from a home health-care perspective\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhai_healthcare_safety/","tags":null,"title":"AI Healthcare Safety"},{"categories":null,"contents":"The BIG NEW THING in AI research. Because normal results for seq2sseq is already doing very well. Slightly more realistic models of language acquisition.\n\u0026ldquo;Linguistics is not just about human languages\u0026rdquo;: humans, animals, and machines.\nBIG AGI What: question is the difference between a dumb human and GPT?\nSpeech is Based for Interoperability continuous data (as opposed to text) much less complex with vision: vision is more complex speech is a nice controllable system (????) Models of Language Acquisition https://arxiv.org/pdf/2309.07861.pdf\nApproximation of spoken language: GAN generates ElectroMagnetic Articulagraphy; then, a pretrained model turns that into a spoken language.\nThen, a discriminator then perform discrimination; and a decoder decodes the speech and feeds it back into the gan with a reconstruction loss.\nYou can take a latent representation and stretch it real hard.\nYou can also feed the audio into \\(Q\\), and decode an latent encoding, and then performs changes.\nPer layer, you can also add the activations of all filters outputs together to get a pattern of activation (frequency vs. number of samples) to figure out what is being encoded. If you see high activations at vowels, it\u0026rsquo;d be values, etc.\nearlier layers correspond to brain stem layer layers correspond to acoustic envelope questions whale still has no semantics though? reconstruction loss between brain and intermediate layers ","html":"\u003cp\u003eThe BIG NEW THING in AI research. Because normal results for seq2sseq is already doing very well. Slightly more realistic models of language acquisition.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Linguistics is not just about human languages\u0026rdquo;: humans, animals, and machines.\u003c/p\u003e\n\u003cp\u003eBIG AGI What: question is the difference between a dumb human and GPT?\u003c/p\u003e\n\u003ch2 id=\"speech-is-based-for-interoperability\"\u003eSpeech is Based for Interoperability\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003econtinuous data (as opposed to text)\u003c/li\u003e\n\u003cli\u003emuch less complex with vision: vision is more complex\u003c/li\u003e\n\u003cli\u003espeech is a nice controllable system (????)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"models-of-language-acquisition\"\u003eModels of Language Acquisition\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://arxiv.org/pdf/2309.07861.pdf\"\u003ehttps://arxiv.org/pdf/2309.07861.pdf\u003c/a\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-26_11-13-01_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eApproximation of spoken language: GAN generates ElectroMagnetic Articulagraphy; then, a pretrained model turns that into a spoken language.\u003c/p\u003e\n\u003cp\u003eThen, a discriminator then perform discrimination; and a decoder decodes the speech and feeds it back into the gan with a reconstruction loss.\u003c/p\u003e\n\u003cp\u003eYou can take a latent representation and stretch it real hard.\u003c/p\u003e\n\u003cp\u003eYou can also feed the audio into \\(Q\\), and decode an latent encoding, and then performs changes.\u003c/p\u003e\n\u003cp\u003ePer layer, you can also add the activations of all filters outputs together to get a pattern of activation (frequency vs. number of samples) to figure out what is being encoded. If you see high activations at vowels, it\u0026rsquo;d be values, etc.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eearlier layers correspond to brain stem\u003c/li\u003e\n\u003cli\u003elayer layers correspond to acoustic envelope\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003equestions\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ewhale still has no semantics though?\u003c/li\u003e\n\u003cli\u003ereconstruction loss between brain and intermediate layers\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhai_intepretability/","tags":null,"title":"AI Intepretability"},{"categories":null,"contents":"A lecture hosted by Cynthia Lee.\n\u0026ldquo;AI: how it works \u0026amp; why its often so biased\u0026rdquo;\nDefining Artificial Intelligence.\n","html":"\u003cp\u003eA lecture hosted by \u003ca href=\"/posts/kbhcynthia_lee/\"\u003eCynthia Lee\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;AI: how it works \u0026amp; why its often so biased\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eDefining \u003ca href=\"/posts/kbhartificial_intelligence/\"\u003eArtificial Intelligence\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhai_master_class/","tags":null,"title":"AI Master Class"},{"categories":null,"contents":"AI/Clinical Decision Support\nworkload measurement clinical wellness, etc. perioperative outcomes Big problem: integration is impossible; there\u0026rsquo;s lots of models. \u0026ldquo;Researched models are rarely implemented; implemented models are rarely researched\u0026rdquo;. Epic doesn\u0026rsquo;t stand behind its models.\nImplementation of AI based mechanisms though \u0026ldquo;saves time\u0026rdquo; on paper, results in more patient throughput and patient burn. At this point: harder question\u0026mdash;not whether you can make a model, but how do you govren their use and actually put them into implementation.\nMPOG: multi-centre perioperative outcomes group\n","html":"\u003cp\u003e\u003cstrong\u003eAI/Clinical Decision Support\u003c/strong\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eworkload measurement\u003c/li\u003e\n\u003cli\u003eclinical wellness, etc.\u003c/li\u003e\n\u003cli\u003eperioperative outcomes\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eBig problem: integration is impossible; there\u0026rsquo;s lots of models. \u0026ldquo;Researched models are \u003cstrong\u003erarely\u003c/strong\u003e implemented; implemented models are \u003cstrong\u003erarely\u003c/strong\u003e researched\u0026rdquo;. Epic doesn\u0026rsquo;t stand behind its models.\u003c/p\u003e\n\u003cp\u003eImplementation of AI based mechanisms though \u0026ldquo;saves time\u0026rdquo; on paper, results in more patient throughput and patient burn. At this point: harder question\u0026mdash;not whether you can make a model, but how do you govren their use and actually put them into implementation.\u003c/p\u003e\n\u003cp\u003eMPOG: multi-centre perioperative outcomes group\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhai_medicine/","tags":null,"title":"AI Medicine"},{"categories":null,"contents":"AIBridge is an introductory AI bootcamp developed and taught by Prof. Xin Liu, yours truly, and Samuel Ren in collaboration with AIFS.\nAIBRidge Notes Pause [more] to allow some time to see if people follow did y\u0026rsquo;all not introduce pandas? Closest to doing this without try/except:\nslide 49: what is conc? is this too much recap time? Haven\u0026rsquo;t we been recapping for a long while already? probably good to mention what is /content/iris.data, also, just opening from ./iris.data should work and will be probably more ergonomic read function confusion .read() =\u0026gt; str .readlines() =\u0026gt; [str] the pauses feel a tad ackward? speak up! SSE squares and lines need to be darker: increase opacity 39 \u0026ldquo;very common metric\u0026rdquo; \u0026mdash; not a metric motivate confidence value better; the \u0026ldquo;middle\u0026rdquo; question makes sense I think its actually probably good to explain cross-entropy in the future (i.e. its not a lot of fancy math + I think it provides a lot of intuition w.r.t. one-hot encoding, probablitiy distributions, etc.) Problem with how I made the old slides: multi-Class classification (1va, ava, etc.) needs better motivation before, otherwise throwing three classes on the screen is a tad confusing motivate that the whole random.seed business is so that the whole class can compare answers more effectively LogReg = LogisticRegression(), typically, name instance variables as lower snake case; so maybe call it my_log_reg or something ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhaibridge/\"\u003eAIBridge\u003c/a\u003e is an introductory AI bootcamp developed and taught by \u003ca href=\"/posts/kbhprof_xin_liu/\"\u003eProf. Xin Liu\u003c/a\u003e, yours truly, and Samuel Ren in collaboration with \u003ca href=\"/posts/kbhaifs/\"\u003eAIFS\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"aibridge-notes\"\u003eAIBRidge Notes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ePause [more] to allow some time to see if people follow\u003c/li\u003e\n\u003cli\u003edid y\u0026rsquo;all not introduce pandas?\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eClosest to doing this without try/except:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eslide 49: what is conc?\u003c/li\u003e\n\u003cli\u003eis this too much recap time? Haven\u0026rsquo;t we been recapping for a long while already?\u003c/li\u003e\n\u003cli\u003eprobably good to mention what is \u003ccode\u003e/content/iris.data\u003c/code\u003e, also, just opening from \u003ccode\u003e./iris.data\u003c/code\u003e should work and will be probably more ergonomic\u003c/li\u003e\n\u003cli\u003eread function confusion\n\u003cul\u003e\n\u003cli\u003e.read() =\u0026gt; str\u003c/li\u003e\n\u003cli\u003e.readlines() =\u0026gt; [str]\u003c/li\u003e\n\u003cli\u003ethe pauses feel a tad ackward?\u003c/li\u003e\n\u003cli\u003espeak up!\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cul\u003e\n\u003cli\u003eSSE squares and lines need to be darker: increase opacity 39\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;very common metric\u0026rdquo; \u0026mdash; not a metric\u003c/li\u003e\n\u003cli\u003emotivate confidence value better; the \u0026ldquo;middle\u0026rdquo; question makes sense\u003c/li\u003e\n\u003cli\u003eI think its actually probably good to explain cross-entropy in the future\n\u003cul\u003e\n\u003cli\u003e(i.e. its not a lot of fancy math + I think it provides a lot of intuition w.r.t. one-hot encoding, probablitiy distributions, etc.)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eProblem with how I made the old slides: multi-Class classification (1va, ava, etc.) needs better motivation before, otherwise throwing three classes on the screen is a tad confusing\u003c/li\u003e\n\u003cli\u003emotivate that the whole \u003ccode\u003erandom.seed\u003c/code\u003e business is so that the whole class can compare answers more effectively\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003eLogReg = LogisticRegression()\u003c/code\u003e, typically, name instance variables as lower snake case; so maybe call it \u003ccode\u003emy_log_reg\u003c/code\u003e or something\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridge/","tags":null,"title":"AIBridge"},{"categories":null,"contents":" Welcome to the AIBridge Course homepage.\nThe purpose of AIBridge is to bridge the gap between computer science and other disciplines. To many, working with AI might seem like an unreachable objective. However, in reality, one week is enough to get started. AIBridge will provide basic programming capability in Python and knowledge of object-oriented programming as well as the concepts behind machine learning and how to implement it using a popular toolbox, Scikit-Learn. Students work to complete a personally-defined project using techniques in AI, with data from their own research or with problems supplied by the Course. This one week course will be hosted in-person at UC Davis and will target mainly undergraduate and non-technical graduate students.\nThe course is taught by Prof. Xin Liu in collaboration with Houjun \u0026ldquo;Jack\u0026rdquo; Liu, Samuel Ren, and Albara Ah Ramli.\nEvergreen Resources Python Tutorial: W3 Schools Python Documentation: Python.org SciKit Documentation: scikit-learn.org Iris Dataset: UCI DB, or, for better user experience, scikit Wine Dataset: UCI DB Class Discord: Invite Data-Loading Cheat-Sheet: Colab When in doubt\u0026hellip;\nGoogle it! Try it! Andrew Ng\u0026rsquo;s Machine Learning Suite of Courses DONE Day 1: Python Basics On Monday, 06/27/2022, we covered the basics of Python so that we are all up to speed to perform basic ML with the Scikit Learn toolkit.\nIntroductory Remarks: Slides Lecture on Python Basics: Slides Lab Exercises: Morning Lab Notes, Afternoon Lab Notes Colab Notebooks: Morning Lecture Notebook, Morning Lab Notebook, Afternoon Lecture Notebook, Afternoon Lab Notebook Day 1 feedback survey: Link\nDONE Day 2: OOP + Linear Models Today, we are going to cover the basic intuition and terminology behind Object Oriented Programming, as well as introduce two simple, linear approaches to Machine Learning tasks: linear regression and logistic regression.\nLecture on OOP and more on functions (morning): Slides Lecture on Linear and Logistic Regression (afternoon): Slides Lab Exercises: Morning Lab Notes, Afternoon Lab Notes Colab Notebooks: Morning Lecture Notebook, Morning Lab Notebook, Afternoon Lab Notebook Day 2 feedback survey: Link\nDONE Day 3: Data + Classifier Today, we are going to cover data cleaning, and three more classifiers!\nLecture on data cleaning and pandas (morning): Slides Lecture on three classification algorithms (afternoon): Slides Lab Exercises: Morning Lab Notes, Afternoon Lab Notes Colab Notebooks: Morning Lab Notebook, Afternoon Lab Notebook Day 3 feedback survey: Link\nDONE Day 4: Operations and Clustering Today, we are going to work on the validation operations tools, and talk about clustering\nLecture on training and data operations (morning): Slides Lecture on clustering and feature operations (afternoon): Slides Lab Exercises: Morning Lab Notes, Afternoon Lab Notes Colab Notebooks: Afternoon Notebook Day 4 feedback survey: Link\nDay 5: Closing Thoughts Today, we are going to tie some loose ends with missing data, error analysis, semi supervised learning, cross validation, and ethics.\nClosing thoughts lecture (morning): Slides Final Project: AIBridge Final Project\nDay 5/Bootcamp feedback survey: Link\nOther Links and Resources Tools we use: AIBridge Packages and Tools Cleaned Wine Dataset (try cleaning it yourself before using!): Google Drive Iris Data with Temperature (don\u0026rsquo;t use without instructions, though!): Google Drive ","html":"\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-26_20-07-53_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWelcome to the AIBridge Course homepage.\u003c/p\u003e\n\u003cp\u003eThe purpose of AIBridge is to bridge the gap between computer science and other disciplines. To many, working with AI might seem like an unreachable objective. However, in reality, one week is enough to get started. AIBridge will provide basic programming capability in Python and knowledge of object-oriented programming as well as the concepts behind machine learning and how to implement it using a popular toolbox, Scikit-Learn. Students work to complete a personally-defined project using techniques in AI, with data from their own research or with problems supplied by the Course. This one week course will be hosted in-person at UC Davis and will target mainly undergraduate and non-technical graduate students.\u003c/p\u003e\n\u003cp\u003eThe course is taught by Prof. Xin Liu in collaboration with Houjun \u0026ldquo;Jack\u0026rdquo; Liu, Samuel Ren, and Albara Ah Ramli.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"evergreen-resources\"\u003eEvergreen Resources\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ePython Tutorial: \u003ca href=\"https://www.w3schools.com/python/\"\u003eW3 Schools\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ePython Documentation: \u003ca href=\"https://docs.python.org/3/\"\u003ePython.org\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eSciKit Documentation: \u003ca href=\"https://scikit-learn.org/stable/getting_started.html\"\u003escikit-learn.org\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eIris Dataset: \u003ca href=\"https://archive.ics.uci.edu/ml/datasets/iris\"\u003eUCI DB\u003c/a\u003e, or, for better user experience, \u003ca href=\"https://scikit-learn.org/stable/auto_examples/datasets/plot_iris_dataset.html\"\u003escikit\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eWine Dataset: \u003ca href=\"https://archive.ics.uci.edu/ml/datasets/wine+quality\"\u003eUCI DB\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eClass Discord: \u003ca href=\"https://discord.gg/DNj7masa\"\u003eInvite\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eData-Loading Cheat-Sheet: \u003ca href=\"https://colab.research.google.com/drive/1VlnKSUgefcSUBPLgAvOYHvBjB9bjcUQh?usp=sharing\"\u003eColab\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWhen in doubt\u0026hellip;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eGoogle it! Try it!\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.coursera.org/specializations/machine-learning-introduction#courses\"\u003eAndrew Ng\u0026rsquo;s Machine Learning Suite of Courses\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"day-1-python-basics\"\u003e\u003cspan class=\"org-todo done DONE\"\u003eDONE\u003c/span\u003e Day 1: Python Basics\u003c/h2\u003e\n\u003cp\u003eOn Monday, 06/27/2022, we covered the basics of Python so that we are all up to speed to perform basic ML with the Scikit Learn toolkit.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eIntroductory Remarks: \u003ca href=\"https://drive.google.com/file/d/1XPkB9GL6rG2F5s5ydTsJOMg33y87HEBB/view?usp=sharing\"\u003eSlides\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eLecture on Python Basics: \u003ca href=\"https://drive.google.com/file/d/1udI-c1roIS7Fb1cgGQOzRc7a6dfYZWu8/view?usp=sharing\"\u003eSlides\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eLab Exercises: \u003ca href=\"https://drive.google.com/file/d/1oomPZGg9NUgDhi6S_RuTH60Vzlv5kD8z/view?usp=sharing\"\u003eMorning Lab Notes\u003c/a\u003e, \u003ca href=\"https://drive.google.com/file/d/1nG_hQ02GDpHpIlwJ6VOGRqU8obv4dCx_/view?usp=sharing\"\u003eAfternoon Lab Notes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eColab Notebooks: \u003ca href=\"https://colab.research.google.com/drive/1EKSvewySaceQqSzy_sNJTWeWuEjE-T1n?usp=sharing\"\u003eMorning Lecture Notebook\u003c/a\u003e, \u003ca href=\"https://colab.research.google.com/drive/1jo5MMQsfkQ3IQ0pYI9G0pgp5bea6lUnZ?usp=sharing\"\u003eMorning Lab Notebook\u003c/a\u003e, \u003ca href=\"https://colab.research.google.com/drive/1FuFlG5UnP3H0dgFyvBG9kb21deW2UHIU#scrollTo=rTxx-vWi-qct\"\u003eAfternoon Lecture Notebook\u003c/a\u003e, \u003ca href=\"https://colab.research.google.com/drive/1HxWScbDZ0AuBrIZ0N2QDlrVzJBXORFi_#scrollTo=TkGfQYJmI3j1\"\u003eAfternoon Lab Notebook\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eDay 1 feedback survey\u003c/strong\u003e\u003c/strong\u003e: \u003ca href=\"https://forms.gle/KAdWJLDM9saTZCrT8\"\u003eLink\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"day-2-oop-plus-linear-models\"\u003e\u003cspan class=\"org-todo done DONE\"\u003eDONE\u003c/span\u003e Day 2: OOP + Linear Models\u003c/h2\u003e\n\u003cp\u003eToday, we are going to cover the basic intuition and terminology behind Object Oriented Programming, as well as introduce two simple, linear approaches to Machine Learning tasks: linear regression and logistic regression.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eLecture on OOP and more on functions (morning): \u003ca href=\"https://drive.google.com/file/d/1udI-c1roIS7Fb1cgGQOzRc7a6dfYZWu8/view?usp=sharing\"\u003eSlides\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eLecture on Linear and Logistic Regression (afternoon): \u003ca href=\"https://drive.google.com/file/d/1HXn7aat_bGzUh3vpQ7vQxNQvp6GrIi-6/view?usp=sharing\"\u003eSlides\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eLab Exercises: \u003ca href=\"https://drive.google.com/file/d/1nidC7fOeHMWnD_QZcSasiqRxUOEx-9Cx/view?usp=sharing\"\u003eMorning Lab Notes\u003c/a\u003e, \u003ca href=\"https://drive.google.com/file/d/1-PD2ZRbxyZN3kclo4FPi-cbx-wBh5cbn/view?usp=sharing\"\u003eAfternoon Lab Notes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eColab Notebooks: \u003ca href=\"https://colab.research.google.com/drive/1KFotnZcEKyiRjY5fRKwjLLUbaYzc6Ogi?usp=sharing\"\u003eMorning Lecture Notebook\u003c/a\u003e, \u003ca href=\"https://colab.research.google.com/drive/1gMAZPZs3y532sb3fdeXVqKNjOz-Ri8wa?usp=sharing\"\u003eMorning Lab Notebook\u003c/a\u003e, \u003ca href=\"https://colab.research.google.com/drive/18f3vNcDg2WKRuip31TCPRHN_t7Fy07Q9?usp=sharing\"\u003eAfternoon Lab Notebook\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eDay 2 feedback survey\u003c/strong\u003e\u003c/strong\u003e: \u003ca href=\"https://forms.gle/VtHtozjqsB9Y113F9\"\u003eLink\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"day-3-data-plus-classifier\"\u003e\u003cspan class=\"org-todo done DONE\"\u003eDONE\u003c/span\u003e Day 3: Data + Classifier\u003c/h2\u003e\n\u003cp\u003eToday, we are going to cover data cleaning, and three more classifiers!\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eLecture on data cleaning and pandas (morning): \u003ca href=\"https://drive.google.com/file/d/1pMHtQo1iITFSMPRls2K7gc1JGd-LK2Nv/view?usp=sharing\"\u003eSlides\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eLecture on three classification algorithms (afternoon): \u003ca href=\"https://drive.google.com/file/d/16Vjr3sXnoBTv_2vaa7cEz_t9qn_3QsrC/view?usp=sharing\"\u003eSlides\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eLab Exercises: \u003ca href=\"https://drive.google.com/file/d/16ady6_tt96YgiraSzxtZ7CBdASy3SSOu/view?usp=sharing\"\u003eMorning Lab Notes\u003c/a\u003e, \u003ca href=\"https://drive.google.com/file/d/1yHaSL73Tki_WULN0k85RY3wmyV3tlpLd/view?usp=sharing\"\u003eAfternoon Lab Notes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eColab Notebooks: \u003ca href=\"https://colab.research.google.com/drive/1i_OfqkrdfNU-fiIbz0bmrKMLf_sZV4xc?usp=sharing\"\u003eMorning Lab Notebook\u003c/a\u003e, \u003ca href=\"https://colab.research.google.com/drive/1aKjJVnmermrw5ysPQWHX5yUJHfVcR8FJ?usp=sharing\"\u003eAfternoon Lab Notebook\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eDay 3 feedback survey\u003c/strong\u003e\u003c/strong\u003e: \u003ca href=\"https://forms.gle/GybrD48kDkQbdcMi7\"\u003eLink\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"day-4-operations-and-clustering\"\u003e\u003cspan class=\"org-todo done DONE\"\u003eDONE\u003c/span\u003e Day 4: Operations and Clustering\u003c/h2\u003e\n\u003cp\u003eToday, we are going to work on the validation operations tools, and talk about clustering\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eLecture on training and data operations (morning): \u003ca href=\"https://drive.google.com/file/d/13CXp1pcXLjyAKTGq2ifimVbSeRsDnlGa/view?usp=sharing\"\u003eSlides\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eLecture on clustering and feature operations (afternoon): \u003ca href=\"https://drive.google.com/file/d/147eyCXJKx2tTEX_wzY-6L8jPbsZwNKZ2/view?usp=sharing\"\u003eSlides\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eLab Exercises: \u003ca href=\"/posts/kbhaibridge_iris_variance_worksheet/\"\u003eMorning Lab Notes\u003c/a\u003e, \u003ca href=\"https://drive.google.com/file/d/1I61UAf1VnziMs7N7sTinXm-QXWzPKbyA/view?usp=sharing\"\u003eAfternoon Lab Notes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eColab Notebooks: \u003ca href=\"https://colab.research.google.com/drive/1zSGk2e3vFzFliNiSLCs-HxOGm7e-caLC?usp=sharing\"\u003eAfternoon Notebook\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eDay 4 feedback survey\u003c/strong\u003e\u003c/strong\u003e: \u003ca href=\"https://forms.gle/F7sGtFsJryeV3SEJ8\"\u003eLink\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"day-5-closing-thoughts\"\u003eDay 5: Closing Thoughts\u003c/h2\u003e\n\u003cp\u003eToday, we are going to tie some loose ends with missing data, error analysis, semi supervised learning, cross validation, and ethics.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eClosing thoughts lecture (morning): \u003ca href=\"https://drive.google.com/file/d/1-a6VSDlJRdUb9MPw1d6EMajld3Pnd86N/view?usp=sharing\"\u003eSlides\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eFinal Project\u003c/strong\u003e\u003c/strong\u003e: \u003ca href=\"/posts/kbhaibridge_final_project/\"\u003eAIBridge Final Project\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eDay 5/Bootcamp feedback survey\u003c/strong\u003e\u003c/strong\u003e: \u003ca href=\"https://forms.gle/qCA34bWjfFXxeAjZ8\"\u003eLink\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"other-links-and-resources\"\u003eOther Links and Resources\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eTools we use: \u003ca href=\"/posts/kbhaibridge_packages/\"\u003eAIBridge Packages and Tools\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eCleaned Wine Dataset (try cleaning it yourself before using!): \u003ca href=\"https://drive.google.com/file/d/1K54C6QOZ2xlGJls59RRCLXr4OOa-8D1l/view?usp=sharing\"\u003eGoogle Drive\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eIris Data with Temperature (don\u0026rsquo;t use without instructions, though!): \u003ca href=\"https://drive.google.com/file/d/1WgruhndN1M1md4vgS87Ho9WS3wAshROP/view?usp=sharing\"\u003eGoogle Drive\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridge_course_website/","tags":null,"title":"AIBridge Course Website"},{"categories":null,"contents":"Part 1: ML Training Practice One of the things that makes a very good Sommelier is their ability to figure out as much details about a wine as possible with very little information.\nYou are tasked with making a Sommelier program that is able to figure both the type and quality of wine from available chemical information. Also, you have a \u0026ldquo;flavor-ater\u0026rdquo; machine that makes a linear combination of multiple chemical features together (similar to PCA), which is counted as one chemical feature after combination.\nA good Sommelier uses as little information as possible to deduce the quality and type. So, what is the best model(s) you can build for predicting quality and type of wine based on the least amount of features? What features should you choose?\nGood luck!\nPart 2: ML Project Walk-through Create your own machine learning experiement! Begin with a problem in your field; go through the available/your own data, determine what type of problem it is, and discuss why machine learning could be a good solution for the problem. Research/quantify the baselines in the field for the task (remembering our discussion on ML validation methods), and determine a list of possible features of your data.\nIf we were to help collect data together, how can we best collect a representative sample? How expensive (resources, monetary, or temporal) would it be? What are some ethical issues?\nSelect the features in the data available to you that would be most relavent (this time you are not trying to minimize the features, but select the most appropriate ones), and the model/training mechanism you think would be most appropriate.\nFinally, present your thinking! Share with us a few (1-3) slides on Friday afternoon. If you have additional time, possibly train the model on baseline data!\n","html":"\u003ch2 id=\"part-1-ml-training-practice\"\u003ePart 1: ML Training Practice\u003c/h2\u003e\n\u003cp\u003eOne of the things that makes a very good Sommelier is their ability to figure out as much details about a wine as possible with very little information.\u003c/p\u003e\n\u003cp\u003eYou are tasked with making a Sommelier program that is able to figure both the type and quality of wine from available chemical information. Also, you have a \u0026ldquo;flavor-ater\u0026rdquo; machine that makes a linear combination of multiple chemical features together (similar to PCA), which is counted as one chemical feature after combination.\u003c/p\u003e\n\u003cp\u003eA good Sommelier uses as little information as possible to deduce the quality and type. So, what is the best model(s) you can build for predicting quality and type of wine based on the least amount of features? What features should you choose?\u003c/p\u003e\n\u003cp\u003eGood luck!\u003c/p\u003e\n\u003ch2 id=\"part-2-ml-project-walk-through\"\u003ePart 2: ML Project Walk-through\u003c/h2\u003e\n\u003cp\u003eCreate your own machine learning experiement! Begin with a problem in your field; go through the available/your own data, determine what type of problem it is, and discuss why machine learning could be a good solution for the problem. Research/quantify the baselines in the field for the task (remembering our discussion on ML validation methods), and determine a list of possible features of your data.\u003c/p\u003e\n\u003cp\u003eIf we were to help collect data together, how can we best collect a representative sample? How expensive (resources, monetary, or temporal) would it be? What are some ethical issues?\u003c/p\u003e\n\u003cp\u003eSelect the features in the data available to you that would be most relavent (this time you are not trying to minimize the features, but select the most appropriate ones), and the model/training mechanism you think would be most appropriate.\u003c/p\u003e\n\u003cp\u003eFinally, present your thinking! Share with us a few (1-3) slides on Friday afternoon. If you have additional time, possibly train the model on baseline data!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridge_final_project/","tags":null,"title":"AIBridge Final Project"},{"categories":null,"contents":"SPOILER ALERT for future labs!! Don\u0026rsquo;t scroll down!\nWe are going to create a copy of the iris dataset with a random variance.\nimport sklearn from sklearn.datasets import load_iris Let\u0026rsquo;s load the iris dataset:\nx,y = load_iris(return_X_y=True) Because we need to generate a lot of random data, let\u0026rsquo;s import random\nimport random Put this in a df\nimport pandas as pd df = pd.DataFrame(x) df 0 1 2 3 0 5.1 3.5 1.4 0.2 1 4.9 3.0 1.4 0.2 2 4.7 3.2 1.3 0.2 3 4.6 3.1 1.5 0.2 4 5.0 3.6 1.4 0.2 .. ... ... ... ... 145 6.7 3.0 5.2 2.3 146 6.3 2.5 5.0 1.9 147 6.5 3.0 5.2 2.0 148 6.2 3.4 5.4 2.3 149 5.9 3.0 5.1 1.8 [150 rows x 4 columns] Let\u0026rsquo;s make 150 random numbers with pretty low variance:\nrandom_ns = [random.uniform(65,65.2) for _ in range(0, 150)] random_series = pd.Series(random_ns) random_series 0 65.127515 1 65.034572 2 65.123271 3 65.043985 4 65.145743 ... 145 65.036410 146 65.157172 147 65.034925 148 65.037373 149 65.042466 Length: 150, dtype: float64 Excellent. Now let\u0026rsquo;s put the two things together!\ndf[\u0026#34;temp\u0026#34;] = random_series df 0 1 2 3 temp 0 5.1 3.5 1.4 0.2 65.127515 1 4.9 3.0 1.4 0.2 65.034572 2 4.7 3.2 1.3 0.2 65.123271 3 4.6 3.1 1.5 0.2 65.043985 4 5.0 3.6 1.4 0.2 65.145743 .. ... ... ... ... ... 145 6.7 3.0 5.2 2.3 65.036410 146 6.3 2.5 5.0 1.9 65.157172 147 6.5 3.0 5.2 2.0 65.034925 148 6.2 3.4 5.4 2.3 65.037373 149 5.9 3.0 5.1 1.8 65.042466 [150 rows x 5 columns] And, while we are at it, let\u0026rsquo;s make new labels\nnames = pd.Series([\u0026#34;sepal length\u0026#34;, \u0026#34;sepal width\u0026#34;, \u0026#34;pedal length\u0026#34;, \u0026#34;pedal width\u0026#34;, \u0026#34;temp\u0026#34;]) df.columns = names df sepal length sepal width pedal length pedal width temp 0 5.1 3.5 1.4 0.2 65.127515 1 4.9 3.0 1.4 0.2 65.034572 2 4.7 3.2 1.3 0.2 65.123271 3 4.6 3.1 1.5 0.2 65.043985 4 5.0 3.6 1.4 0.2 65.145743 .. ... ... ... ... ... 145 6.7 3.0 5.2 2.3 65.036410 146 6.3 2.5 5.0 1.9 65.157172 147 6.5 3.0 5.2 2.0 65.034925 148 6.2 3.4 5.4 2.3 65.037373 149 5.9 3.0 5.1 1.8 65.042466 [150 rows x 5 columns] Excellent. Let\u0026rsquo;s finally get the flower results.\ndf[\u0026#34;species\u0026#34;] = y df sepal length sepal width pedal length pedal width temp species 0 5.1 3.5 1.4 0.2 65.127515 0 1 4.9 3.0 1.4 0.2 65.034572 0 2 4.7 3.2 1.3 0.2 65.123271 0 3 4.6 3.1 1.5 0.2 65.043985 0 4 5.0 3.6 1.4 0.2 65.145743 0 .. ... ... ... ... ... ... 145 6.7 3.0 5.2 2.3 65.036410 2 146 6.3 2.5 5.0 1.9 65.157172 2 147 6.5 3.0 5.2 2.0 65.034925 2 148 6.2 3.4 5.4 2.3 65.037373 2 149 5.9 3.0 5.1 1.8 65.042466 2 [150 rows x 6 columns] And dump it to a CSV.\ndf.to_csv(\u0026#34;./iris_variance.csv\u0026#34;, index=False) Let\u0026rsquo;s select for the input data again:\nX = df.iloc[:,0:5] y = df.iloc[:,5] X sepal length sepal width pedal length pedal width temp 0 5.1 3.5 1.4 0.2 65.127515 1 4.9 3.0 1.4 0.2 65.034572 2 4.7 3.2 1.3 0.2 65.123271 3 4.6 3.1 1.5 0.2 65.043985 4 5.0 3.6 1.4 0.2 65.145743 .. ... ... ... ... ... 145 6.7 3.0 5.2 2.3 65.036410 146 6.3 2.5 5.0 1.9 65.157172 147 6.5 3.0 5.2 2.0 65.034925 148 6.2 3.4 5.4 2.3 65.037373 149 5.9 3.0 5.1 1.8 65.042466 [150 rows x 5 columns] And use the variance threshold tool:\nfrom sklearn.feature_selection import VarianceThreshold sel = VarianceThreshold(0.1) sel.fit_transform(X) 5.1 3.5 1.4 0.2 4.9 3 1.4 0.2 4.7 3.2 1.3 0.2 4.6 3.1 1.5 0.2 5 3.6 1.4 0.2 5.4 3.9 1.7 0.4 4.6 3.4 1.4 0.3 \u0026hellip;\nAs we expected.\nAnd let\u0026rsquo;s use the select k best tool:\nfrom sklearn.feature_selection import SelectKBest, chi2 sel = SelectKBest(chi2, k=4) res = sel.fit_transform(X, y) res 5.1 3.5 1.4 0.2 4.9 3 1.4 0.2 4.7 3.2 1.3 0.2 4.6 3.1 1.5 0.2 5 3.6 1.4 0.2 5.4 3.9 1.7 0.4 4.6 3.4 1.4 0.3 5 3.4 1.5 0.2 \u0026hellip;\nAlso, as we expected. Got rid of temp.\n","html":"\u003cp\u003eSPOILER ALERT for future labs!! Don\u0026rsquo;t scroll down!\u003c/p\u003e\n\u003cp\u003eWe are going to create a copy of the iris dataset with a random variance.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.datasets\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s load the iris dataset:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturn_X_y\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eBecause we need to generate a lot of random data, let\u0026rsquo;s import random\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erandom\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ePut this in a df\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epandas\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDataFrame\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0 1 2 3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 4.9 3.0 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 1.3 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 4.6 3.1 1.5 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 5.0 3.6 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 5.2 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 6.3 2.5 5.0 1.9\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 6.5 3.0 5.2 2.0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 6.2 3.4 5.4 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 5.9 3.0 5.1 1.8\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 4 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s make 150 random numbers with pretty low variance:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erandom_ns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erandom\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003euniform\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e65\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e65.2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e150\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erandom_series\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSeries\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erandom_ns\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erandom_series\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 65.127515\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 65.034572\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 65.123271\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 65.043985\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 65.145743\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 65.036410\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 65.157172\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 65.034925\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 65.037373\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 65.042466\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLength: 150, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. Now let\u0026rsquo;s put the two things together!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;temp\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erandom_series\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0 1 2 3 temp\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 1.4 0.2 65.127515\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 4.9 3.0 1.4 0.2 65.034572\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 1.3 0.2 65.123271\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 4.6 3.1 1.5 0.2 65.043985\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 5.0 3.6 1.4 0.2 65.145743\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 5.2 2.3 65.036410\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 6.3 2.5 5.0 1.9 65.157172\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 6.5 3.0 5.2 2.0 65.034925\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 6.2 3.4 5.4 2.3 65.037373\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 5.9 3.0 5.1 1.8 65.042466\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 5 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd, while we are at it, let\u0026rsquo;s make new labels\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enames\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSeries\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;sepal length\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;sepal width\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;pedal length\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;pedal width\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;temp\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enames\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e sepal length sepal width pedal length pedal width temp\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 1.4 0.2 65.127515\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 4.9 3.0 1.4 0.2 65.034572\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 1.3 0.2 65.123271\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 4.6 3.1 1.5 0.2 65.043985\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 5.0 3.6 1.4 0.2 65.145743\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 5.2 2.3 65.036410\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 6.3 2.5 5.0 1.9 65.157172\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 6.5 3.0 5.2 2.0 65.034925\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 6.2 3.4 5.4 2.3 65.037373\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 5.9 3.0 5.1 1.8 65.042466\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 5 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. Let\u0026rsquo;s finally get the flower results.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;species\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e sepal length sepal width pedal length pedal width temp species\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 1.4 0.2 65.127515 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 4.9 3.0 1.4 0.2 65.034572 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 1.3 0.2 65.123271 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 4.6 3.1 1.5 0.2 65.043985 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 5.0 3.6 1.4 0.2 65.145743 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 5.2 2.3 65.036410 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 6.3 2.5 5.0 1.9 65.157172 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 6.5 3.0 5.2 2.0 65.034925 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 6.2 3.4 5.4 2.3 65.037373 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 5.9 3.0 5.1 1.8 65.042466 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 6 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd dump it to a CSV.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eto_csv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;./iris_variance.csv\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eindex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eFalse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s select for the input data again:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e sepal length sepal width pedal length pedal width temp\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 1.4 0.2 65.127515\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 4.9 3.0 1.4 0.2 65.034572\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 1.3 0.2 65.123271\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 4.6 3.1 1.5 0.2 65.043985\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 5.0 3.6 1.4 0.2 65.145743\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 5.2 2.3 65.036410\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 6.3 2.5 5.0 1.9 65.157172\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 6.5 3.0 5.2 2.0 65.034925\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 6.2 3.4 5.4 2.3 65.037373\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 5.9 3.0 5.1 1.8 65.042466\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 5 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd use the variance threshold tool:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.feature_selection\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eVarianceThreshold\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esel\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eVarianceThreshold\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esel\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit_transform\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e5.1\u003c/th\u003e\n\u003cth\u003e3.5\u003c/th\u003e\n\u003cth\u003e1.4\u003c/th\u003e\n\u003cth\u003e0.2\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e4.9\u003c/td\u003e\n\u003ctd\u003e3\u003c/td\u003e\n\u003ctd\u003e1.4\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e4.7\u003c/td\u003e\n\u003ctd\u003e3.2\u003c/td\u003e\n\u003ctd\u003e1.3\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e4.6\u003c/td\u003e\n\u003ctd\u003e3.1\u003c/td\u003e\n\u003ctd\u003e1.5\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e5\u003c/td\u003e\n\u003ctd\u003e3.6\u003c/td\u003e\n\u003ctd\u003e1.4\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e5.4\u003c/td\u003e\n\u003ctd\u003e3.9\u003c/td\u003e\n\u003ctd\u003e1.7\u003c/td\u003e\n\u003ctd\u003e0.4\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e4.6\u003c/td\u003e\n\u003ctd\u003e3.4\u003c/td\u003e\n\u003ctd\u003e1.4\u003c/td\u003e\n\u003ctd\u003e0.3\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\u0026hellip;\u003c/p\u003e\n\u003cp\u003eAs we expected.\u003c/p\u003e\n\u003cp\u003eAnd let\u0026rsquo;s use the select k best tool:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.feature_selection\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSelectKBest\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003echi2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esel\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSelectKBest\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echi2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esel\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit_transform\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e5.1\u003c/th\u003e\n\u003cth\u003e3.5\u003c/th\u003e\n\u003cth\u003e1.4\u003c/th\u003e\n\u003cth\u003e0.2\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e4.9\u003c/td\u003e\n\u003ctd\u003e3\u003c/td\u003e\n\u003ctd\u003e1.4\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e4.7\u003c/td\u003e\n\u003ctd\u003e3.2\u003c/td\u003e\n\u003ctd\u003e1.3\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e4.6\u003c/td\u003e\n\u003ctd\u003e3.1\u003c/td\u003e\n\u003ctd\u003e1.5\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e5\u003c/td\u003e\n\u003ctd\u003e3.6\u003c/td\u003e\n\u003ctd\u003e1.4\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e5.4\u003c/td\u003e\n\u003ctd\u003e3.9\u003c/td\u003e\n\u003ctd\u003e1.7\u003c/td\u003e\n\u003ctd\u003e0.4\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e4.6\u003c/td\u003e\n\u003ctd\u003e3.4\u003c/td\u003e\n\u003ctd\u003e1.4\u003c/td\u003e\n\u003ctd\u003e0.3\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e5\u003c/td\u003e\n\u003ctd\u003e3.4\u003c/td\u003e\n\u003ctd\u003e1.5\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\u0026hellip;\u003c/p\u003e\n\u003cp\u003eAlso, as we expected. Got rid of temp.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridge_iris_variance_worksheet/","tags":null,"title":"AIBridge Iris Variance Worksheet"},{"categories":null,"contents":"This is usually not needed if you are using Google Colab. If you are following the instructions provided during our lecture series, please disregard this page.\nHowever, students have expressed interest in working with their own system\u0026rsquo;s copy of Jupyter or local installation. We therefore provide a set of very tenuous instructions for installing the tools used in our session using vanilla C-Python (i.e. not anaconda/conda/miniconda.)\nPython Our tools target Python 3.8+. Use your system\u0026rsquo;s package manager to install Python at least version 3.8, or use Python Foundation\u0026rsquo;s universal installers.\nPackages Python sometimes ships pip, its packaging utility separately. Refer to your own distribution\u0026rsquo;s installation instructions if none of pip or pip3 or python -m pip or python -m pip.\nOnce your copy of pip has been identified, let\u0026rsquo;s move on to\u0026hellip;\nInstalling Packages Here are the packages we will need for our sessions:\nscikit-learn pandas numpy Along with its respective dependencies. Here\u0026rsquo;s a one-liner:\npython3 -m pip install scikit-learn pandas numpy Good luck!\n","html":"\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eThis is usually not needed if you are using \u003ca href=\"https://colab.research.google.com/\"\u003eGoogle Colab\u003c/a\u003e.\u003c/strong\u003e\u003c/strong\u003e If you are following the instructions provided during our lecture series, please disregard this page.\u003c/p\u003e\n\u003cp\u003eHowever, students have expressed interest in working with their own system\u0026rsquo;s copy of Jupyter or local installation. We therefore provide a set of very tenuous instructions for installing the tools used in our session using \u003cem\u003evanilla C-Python\u003c/em\u003e (i.e. not anaconda/conda/miniconda.)\u003c/p\u003e\n\u003ch2 id=\"python\"\u003ePython\u003c/h2\u003e\n\u003cp\u003eOur tools target Python 3.8+. Use your system\u0026rsquo;s package manager to install Python at least version 3.8, or use \u003ca href=\"https://www.python.org/downloads/\"\u003ePython Foundation\u0026rsquo;s\u003c/a\u003e universal installers.\u003c/p\u003e\n\u003ch2 id=\"packages\"\u003ePackages\u003c/h2\u003e\n\u003cp\u003ePython sometimes ships \u003ccode\u003epip\u003c/code\u003e, its packaging utility separately. Refer to your own distribution\u0026rsquo;s installation instructions if none of \u003ccode\u003epip\u003c/code\u003e or \u003ccode\u003epip3\u003c/code\u003e or \u003ccode\u003epython -m pip\u003c/code\u003e or \u003ccode\u003epython -m pip\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eOnce your copy of pip has been identified, let\u0026rsquo;s move on to\u0026hellip;\u003c/p\u003e\n\u003ch2 id=\"installing-packages\"\u003eInstalling Packages\u003c/h2\u003e\n\u003cp\u003eHere are the packages we will need for our sessions:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003escikit-learn\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003epandas\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003enumpy\u003c/code\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAlong with its respective dependencies. Here\u0026rsquo;s a one-liner:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-bash\" data-lang=\"bash\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003epython3 -m pip install scikit-learn pandas numpy\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eGood luck!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridge_packages/","tags":null,"title":"AIBridge Packages and Tools"},{"categories":null,"contents":"Rewa Rai Nitin Lab, Dept. of Food Sci + Tech - Davis\nWine Classification Task Whole data:\nDecision Tree: 98.46% Random Forest: 99.84% Gaussian NB: 97.08% Regression Task Feature selection with 2 best features actually improved.\nTalkthrough Detecting berry infection by leaf classification. Use FTIR spectroscopy as a means of infection classification.\nTana Hernandez PHD Student, Nitin Lab, Dept. of Food Sci + Tech - Davis\nTalkthrough Given input for reaction, predict resulting gell strength from protein+carbo+lactic acid.\nGoal to figure out what features are o predict gell formation. Use feature extraction to reduce the need of doing.\nWet lab task: use high-throughput 96 hole plates to measuring kinetics of absorborance and kinetics. In a single hour, 96 data points can be acquired.\nThen, droplet elements are added to the plates.\nModel: take feature inputs which was selected, classification on gell formation and regression for time for gell.\nJimmy Nguyen PHD Student, Nitin Lab, Dept. of Food Sci + Tech - Davis\nTalk through Need: creating plant-based products which just feels and tastes like actual meet based food.\nTask: given molecular information, classify taste based on like-product and unlike\nLuyao Ma Postdoc Researcher, Nitin Lab, Dept. of Food Sci + Tech - Davis\nTalk thought Problem: lots of antimicrobian resistance in food: on track for 10 million deaths due to antimicrobial resistance. This is caused by antibiotics given to animals, which then is given indirectly to humans. Humans gut bactorials became more more resistant to antibiotics due to antimicrobial bacterial deveolping in animal guts.\nCurrent surveilance systems for antibiotic bacteria: require centralized lab for analysis, data collection is slow, and data integration is very slow (2ish years to publish final results), protocol also changes.\nGoal: rapid in field automatic detection scheme\nExpose wells of bacterial to detect color intensity\n? PHD Student, USDA\nWine Naive bayes (6 RFE features); XE Boost Random Forest + Search with 9 features\nTalkthrough Dietary data Random calls Interested in gut miocrobiome influences. Goal: which factors to predict CAZyme dyvirsetiy?\nRandom forest regression Need for prediction for which features: use Shapley Addadtive for result intepretation.\nYue Wine OH WOWO\nReg:\n99.98 train, 59.788 test.\nBalanced dataset Sequential feature selection PCA -\u0026gt; 3 features Random Forest Something else: ExhaustiveFeatureSelector\nClsf:\nstill 4 features.\nTalkthrough Deep learning, CV applications.\nNutrition product validation so far is entirely manual; current work in bias are mostly political, so finding a ground truth is difficult.\nSupervised is probability difficult; getting the data and cluster.\nSriya Sunil PhD Food Science, Cornell\nWine Decision tree classifier; resulted in 7 features.\n99.97% train, 97.08% test.\nSupport Vector Regression; resulted in 7 features as well.\n39.25% train, 32.79% test.\nTalkthrough Microbial growth on baby spinach. Features: initial counts, prevalence of bacteria, growth of bacteria.\nOutput regression to time to spoilage\n","html":"\u003ch2 id=\"rewa-rai\"\u003eRewa Rai\u003c/h2\u003e\n\u003cp\u003eNitin Lab, Dept. of Food Sci + Tech - Davis\u003c/p\u003e\n\u003ch3 id=\"wine\"\u003eWine\u003c/h3\u003e\n\u003ch4 id=\"classification-task\"\u003eClassification Task\u003c/h4\u003e\n\u003cp\u003eWhole data:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eDecision Tree: 98.46%\u003c/li\u003e\n\u003cli\u003eRandom Forest: 99.84%\u003c/li\u003e\n\u003cli\u003eGaussian NB: 97.08%\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"regression-task\"\u003eRegression Task\u003c/h4\u003e\n\u003cp\u003eFeature selection with 2 best features actually improved.\u003c/p\u003e\n\u003ch3 id=\"talkthrough\"\u003eTalkthrough\u003c/h3\u003e\n\u003cp\u003eDetecting berry infection by leaf classification. Use FTIR spectroscopy as a means of infection classification.\u003c/p\u003e\n\u003ch2 id=\"tana-hernandez\"\u003eTana Hernandez\u003c/h2\u003e\n\u003cp\u003ePHD Student, Nitin Lab, Dept. of Food Sci + Tech - Davis\u003c/p\u003e\n\u003ch3 id=\"talkthrough\"\u003eTalkthrough\u003c/h3\u003e\n\u003cp\u003eGiven input for reaction, predict resulting gell strength from protein+carbo+lactic acid.\u003c/p\u003e\n\u003cp\u003eGoal to figure out what features are o predict gell formation. Use feature extraction to reduce the need of doing.\u003c/p\u003e\n\u003cp\u003eWet lab task: use high-throughput 96 hole plates to measuring kinetics of absorborance and kinetics. In a single hour, 96 data points can be acquired.\u003c/p\u003e\n\u003cp\u003eThen, droplet elements are added to the plates.\u003c/p\u003e\n\u003cp\u003eModel: take feature inputs which was selected, classification on gell formation and regression for time for gell.\u003c/p\u003e\n\u003ch2 id=\"jimmy-nguyen\"\u003eJimmy Nguyen\u003c/h2\u003e\n\u003cp\u003ePHD Student, Nitin Lab, Dept. of Food Sci + Tech - Davis\u003c/p\u003e\n\u003ch3 id=\"talk-through\"\u003eTalk through\u003c/h3\u003e\n\u003cp\u003eNeed: creating plant-based products which just feels and tastes like actual meet based food.\u003c/p\u003e\n\u003cp\u003eTask: given molecular information, classify taste based on like-product and unlike\u003c/p\u003e\n\u003ch2 id=\"luyao-ma\"\u003eLuyao Ma\u003c/h2\u003e\n\u003cp\u003ePostdoc Researcher, Nitin Lab, Dept. of Food Sci + Tech - Davis\u003c/p\u003e\n\u003ch3 id=\"talk-thought\"\u003eTalk thought\u003c/h3\u003e\n\u003cp\u003eProblem: lots of antimicrobian resistance in food: on track for 10 million deaths due to antimicrobial resistance. This is caused by antibiotics given to animals, which then is given indirectly to humans. Humans gut bactorials became more more resistant to antibiotics due to antimicrobial bacterial deveolping in animal guts.\u003c/p\u003e\n\u003cp\u003eCurrent surveilance systems for antibiotic bacteria: require centralized lab for analysis, data collection is slow, and data integration is very slow (2ish years to publish final results), protocol also changes.\u003c/p\u003e\n\u003cp\u003eGoal: rapid in field automatic detection scheme\u003c/p\u003e\n\u003cp\u003eExpose wells of bacterial to detect color intensity\u003c/p\u003e\n\u003ch2 id=\"d1457b\"\u003e?\u003c/h2\u003e\n\u003cp\u003ePHD Student, USDA\u003c/p\u003e\n\u003ch3 id=\"wine\"\u003eWine\u003c/h3\u003e\n\u003cp\u003eNaive bayes (6 RFE features); XE Boost Random Forest + Search with 9 features\u003c/p\u003e\n\u003ch3 id=\"talkthrough\"\u003eTalkthrough\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eDietary data\u003c/li\u003e\n\u003cli\u003eRandom calls\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eInterested in gut miocrobiome influences. Goal: which factors to predict CAZyme dyvirsetiy?\u003c/p\u003e\n\u003cp\u003eRandom forest regression\nNeed for prediction for which features: use Shapley Addadtive for result intepretation.\u003c/p\u003e\n\u003ch2 id=\"yue\"\u003eYue\u003c/h2\u003e\n\u003ch3 id=\"wine\"\u003eWine\u003c/h3\u003e\n\u003cp\u003eOH WOWO\u003c/p\u003e\n\u003cp\u003eReg:\u003c/p\u003e\n\u003cp\u003e99.98 train, 59.788 test.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eBalanced dataset\u003c/li\u003e\n\u003cli\u003eSequential feature selection\u003c/li\u003e\n\u003cli\u003ePCA -\u0026gt; 3 features\u003c/li\u003e\n\u003cli\u003eRandom Forest\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSomething else: \u003ccode\u003eExhaustiveFeatureSelector\u003c/code\u003e\u003c/p\u003e\n\u003cp\u003eClsf:\u003c/p\u003e\n\u003cp\u003estill 4 features.\u003c/p\u003e\n\u003ch3 id=\"talkthrough\"\u003eTalkthrough\u003c/h3\u003e\n\u003cp\u003eDeep learning, CV applications.\u003c/p\u003e\n\u003cp\u003eNutrition product validation so far is entirely manual; current work in bias are mostly political, so finding a ground truth is difficult.\u003c/p\u003e\n\u003cp\u003eSupervised is probability difficult; getting the data and cluster.\u003c/p\u003e\n\u003ch2 id=\"sriya-sunil\"\u003eSriya Sunil\u003c/h2\u003e\n\u003cp\u003ePhD Food Science, Cornell\u003c/p\u003e\n\u003ch3 id=\"wine\"\u003eWine\u003c/h3\u003e\n\u003cp\u003eDecision tree classifier; resulted in 7 features.\u003c/p\u003e\n\u003cp\u003e99.97% train, 97.08% test.\u003c/p\u003e\n\u003cp\u003eSupport Vector Regression; resulted in 7 features as well.\u003c/p\u003e\n\u003cp\u003e39.25% train, 32.79% test.\u003c/p\u003e\n\u003ch3 id=\"talkthrough\"\u003eTalkthrough\u003c/h3\u003e\n\u003cp\u003eMicrobial growth on baby spinach. Features: initial counts, prevalence of bacteria, growth of bacteria.\u003c/p\u003e\n\u003cp\u003eOutput regression to time to spoilage\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridge_student_presentations/","tags":null,"title":"AIBridge Student Presentations"},{"categories":null,"contents":"Welcome to the Day-2 Afternoon Lab! We are super excited to work through tasks in linear regression and logistic regression, as well as familiarize you with the Iris dataset.\nIris Dataset Let\u0026rsquo;s load the Iris dataset! Begin by importing the load_iris tool from sklearn. This is an easy loader scheme for the iris dataset.\nfrom sklearn.datasets import load_iris Then, we simply execute the following to load the data.\nx,y = load_iris(return_X_y=True) We use the return_X_y argument here so that, instead of dumping a large CSV, we get the neat-cleaned input and output values.\nLet\u0026rsquo;s inspect this data a little.\nx[0] 5.1 3.5 1.4 0.2 We can see that each sample of the data is a vector in \\(\\mathbb{R}^4\\). They correspond to four attributes:\nseptal length septal width pedal length pedal width What\u0026rsquo;s the output?\ny[0] 0 We can actually see all the possible values of the output by putting it into a set.\nset(y) 0 1 2 There are three different classes of outputs.\nIris Setosa Iris Versicolour Iris Virginica Excellent. So we can see that we have a dataset of four possible inputs and one possible output. Let\u0026rsquo;s see what we can do with it.\nLogistic Regression The simplest thing we can do is a logistic regression. We have a there categories for output and a lot of data for input. Let\u0026rsquo;s figure out if we can predict the output from the input!\nLet\u0026rsquo;s import logistic regression tool first, and instantiate it.\nfrom sklearn.linear_model import LogisticRegression reg = LogisticRegression() We will \u0026ldquo;fit\u0026rdquo; the data to the model: adjusting the model to best represent the data. Our data has 150 samples, so let\u0026rsquo;s fit the data on 140 of them.\ntesting_samples_x = x[-5:] testing_samples_y = y[-5:] x = x[:-5] y = y[:-5] Wonderful. Let\u0026rsquo;s fit the data onto the model.\nreg = reg.fit(x,y) Let\u0026rsquo;s go ahead and run the model on our 10 testing samples!\npredicted_y = reg.predict(testing_samples_x) predicted_y 2 2 2 2 2 And, let\u0026rsquo;s figure out what our actual results say:\ntesting_samples_y 2 2 2 2 2 Woah! That\u0026rsquo;s excellent.\nLinear Regression Instead of predicting the output classes, we can predict some values from the output. How about if we used septal length, width, and pedal length to predict petal width? The output now is a number, not some classes, which calls for linear regression!\nLet\u0026rsquo;s import linear regression tool first, and instantiate it.\nfrom sklearn.linear_model import LinearRegression reg = LinearRegression() We will \u0026ldquo;fit\u0026rdquo; the data to the model again. As we have cleaned out the testing_samples, we simply need to split out the fourth column for the new x and y:\nnew_x = x[:,:3] new_y = x[:,3] new_testing_samples_y = testing_samples_x[:,3] new_testing_samples_x = testing_samples_x[:,:3] Taking now our newly parsed data, let\u0026rsquo;s fit it to a linear model.\nreg = reg.fit(new_x,new_y) Let\u0026rsquo;s go ahead and run the model on our 10 testing samples!\nnew_predicted_y = reg.predict(new_testing_samples_x) new_predicted_y 1.7500734 1.61927061 1.79218767 2.04824364 1.86638164 And, let\u0026rsquo;s figure out what our actual results say:\nnew_testing_samples_y 2.3 1.9 2 2.3 1.8 Close on some samples, not quite there on others. How good does our model actually do? We can use .score() to figure out the \\(r^2\\) value of our line on some data.\nreg.score(new_x, new_y) 0.9405617534915884 Evidently, it seems like about \\(94\\%\\) of the variation in our output data can be explained by the input features. This means that the relationship between septals are not exactly a linear pattern!\nNow you try Download the wine quality dataset Predict the quality of wine given its chemical metrics Predict if its red or white wine given its chemical metrics Vary the amount of data used to .fit the model, how does that influence the results? Vary the amount in each \u0026ldquo;class\u0026rdquo; (red wine, white wine) to fit the model, how much does that influence the results. ","html":"\u003cp\u003eWelcome to the Day-2 Afternoon Lab! We are super excited to work through tasks in linear regression and logistic regression, as well as familiarize you with the Iris dataset.\u003c/p\u003e\n\u003ch2 id=\"iris-dataset\"\u003eIris Dataset\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s load the Iris dataset! Begin by importing the \u003ccode\u003eload_iris\u003c/code\u003e tool from \u003ccode\u003esklearn\u003c/code\u003e. This is an easy loader scheme for the iris dataset.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.datasets\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThen, we simply execute the following to load the data.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturn_X_y\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe use the \u003ccode\u003ereturn_X_y\u003c/code\u003e argument here so that, instead of dumping a large \u003ccode\u003eCSV\u003c/code\u003e, we get the neat-cleaned input and output values.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s inspect this data a little.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e5.1\u003c/td\u003e\n\u003ctd\u003e3.5\u003c/td\u003e\n\u003ctd\u003e1.4\u003c/td\u003e\n\u003ctd\u003e0.2\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWe can see that each \u003ccode\u003esample\u003c/code\u003e of the data is a vector in \\(\\mathbb{R}^4\\). They correspond to four attributes:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eseptal length\u003c/li\u003e\n\u003cli\u003eseptal width\u003c/li\u003e\n\u003cli\u003epedal length\u003c/li\u003e\n\u003cli\u003epedal width\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWhat\u0026rsquo;s the output?\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe can actually see all the possible values of the output by putting it into a set.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eset\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eThere are three different \u003ccode\u003eclasses\u003c/code\u003e of outputs.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eIris Setosa\u003c/li\u003e\n\u003cli\u003eIris Versicolour\u003c/li\u003e\n\u003cli\u003eIris Virginica\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eExcellent. So we can see that we have a dataset of four possible inputs and one possible output. Let\u0026rsquo;s see what we can do with it.\u003c/p\u003e\n\u003ch2 id=\"logistic-regression\"\u003eLogistic Regression\u003c/h2\u003e\n\u003cp\u003eThe simplest thing we can do is a logistic regression. We have a there \u003cem\u003ecategories\u003c/em\u003e for output and a lot of data for input. Let\u0026rsquo;s figure out if we can predict the output from the input!\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s import logistic regression tool first, and instantiate it.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.linear_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLogisticRegression\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereg\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLogisticRegression\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will \u0026ldquo;fit\u0026rdquo; the data to the model: adjusting the model to best represent the data. Our data has 150 samples, so let\u0026rsquo;s fit the data on 140 of them.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etesting_samples_x\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etesting_samples_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWonderful. Let\u0026rsquo;s fit the data onto the model.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereg\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereg\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s go ahead and run the model on our 10 testing samples!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epredicted_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereg\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epredict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etesting_samples_x\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epredicted_y\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eAnd, let\u0026rsquo;s figure out what our actual results say:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etesting_samples_y\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWoah! That\u0026rsquo;s excellent.\u003c/p\u003e\n\u003ch2 id=\"linear-regression\"\u003eLinear Regression\u003c/h2\u003e\n\u003cp\u003eInstead of predicting the output \u003cem\u003eclasses\u003c/em\u003e, we can predict some values from the output. How about if we used septal length, width, and pedal length to predict petal width? The output now is a number, not some classes, which calls for linear regression!\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s import linear regression tool first, and instantiate it.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.linear_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLinearRegression\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereg\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLinearRegression\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will \u0026ldquo;fit\u0026rdquo; the data to the model again. As we have cleaned out the \u003ccode\u003etesting_samples\u003c/code\u003e, we simply need to split out the fourth column for the new x and y:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enew_x\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:,:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enew_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enew_testing_samples_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etesting_samples_x\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enew_testing_samples_x\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etesting_samples_x\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:,:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eTaking now our newly parsed data, let\u0026rsquo;s fit it to a linear model.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereg\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereg\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enew_x\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enew_y\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s go ahead and run the model on our 10 testing samples!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enew_predicted_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereg\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epredict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enew_testing_samples_x\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enew_predicted_y\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e1.7500734\u003c/td\u003e\n\u003ctd\u003e1.61927061\u003c/td\u003e\n\u003ctd\u003e1.79218767\u003c/td\u003e\n\u003ctd\u003e2.04824364\u003c/td\u003e\n\u003ctd\u003e1.86638164\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eAnd, let\u0026rsquo;s figure out what our actual results say:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enew_testing_samples_y\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e2.3\u003c/td\u003e\n\u003ctd\u003e1.9\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2.3\u003c/td\u003e\n\u003ctd\u003e1.8\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eClose on some samples, not quite there on others. How good does our model actually do? We can use \u003ccode\u003e.score()\u003c/code\u003e to figure out the \\(r^2\\) value of our line on some data.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereg\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003escore\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enew_x\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enew_y\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.9405617534915884\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eEvidently, it seems like about \\(94\\%\\) of the variation in our output data can be explained by the input features. This means that the relationship between septals are not \u003cem\u003eexactly\u003c/em\u003e a linear pattern!\u003c/p\u003e\n\u003ch2 id=\"now-you-try\"\u003eNow you try\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDownload the \u003ca href=\"https://archive.ics.uci.edu/ml/datasets/Wine+Quality\"\u003ewine quality dataset\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ePredict the quality of wine given its chemical metrics\u003c/li\u003e\n\u003cli\u003ePredict if its red or white wine given its chemical metrics\u003c/li\u003e\n\u003cli\u003eVary the amount of data used to .fit the model, how does that influence the results?\u003c/li\u003e\n\u003cli\u003eVary the amount in each \u0026ldquo;class\u0026rdquo; (red wine, white wine) to fit the model, how much does that influence the results.\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridgelab_d1aft/","tags":null,"title":"AIBridgeLab D2Aft"},{"categories":null,"contents":"Woah! We talked about a lot of different ways of doing classification today! Let\u0026rsquo;s see what we can do about this for the Iris dataset!\nIris Dataset Let\u0026rsquo;s load the Iris dataset! Begin by importing the load_iris tool from sklearn. This is an easy loader scheme for the iris dataset.\nfrom sklearn.datasets import load_iris Then, we simply execute the following to load the data.\nx,y = load_iris(return_X_y=True) We use the return_X_y argument here so that, instead of dumping a large CSV, we get the neat-cleaned input and output values.\nA reminder that there is three possible flowers that we can sort by.\nDecision Trees Scikit learn has great facilities for using decision trees for classification! Let\u0026rsquo;s use some of them by fitting to the Iris dataset.\nLet us begin by importing the SciKit learn tree system:\nfrom sklearn.tree import DecisionTreeClassifier We will fit and instantiate this classifier and fit it to the data exactly!\nclf = DecisionTreeClassifier() clf = clf.fit(x,y) One cool thing about decision trees is that we can actually see what its doing! by looking at the series of splits and decisions. This is a function provided by tree too.\n# We first import the plotting utility from matplotlib import matplotlib.pyplot as plt # as well as the tree plotting tool from sklearn.tree import plot_tree # We call the tree plot tool, which puts it on teh matplotlib graph for side effects plot_tree(clf) # And we save the figure plt.savefig(\u0026#34;tree.png\u0026#34;) Cool! As you can see, by the end of the entire graph, the gini impurity of each node has been sorted to 0.\nApparently, if the third feature (pedal length) is smaller that 2.45, it is definitely the first type of flower!\nCan you explain the rest of the divisions?\nThere are some arguments available in .fit of a DecisionTreeClassifier which controls for when splitting ends; for instance, max_depth controls the maximum depth by which the tree can go.\nExtra Addition! Random Forests. If you recall, we make the initial splitting decisions fairly randomly, and simply select the one with the lowest Ginni impurity. Of course, this makes the selection of the initial sets of splits very important.\nWhat if, instead of needing to make a decision about that now, we can just deal with it later? Well, that\u0026rsquo;s where the addition of Random Forests come in.\nAs the name suggests, instead of having one great tree that does a \u0026ldquo;pretty good\u0026rdquo; job, we can have a lot of trees acting in ensemble! We can randomly start a bunch of random trees, and pick the selection that most would correspond with.\nRandom forests come from the ensemble package from sklearn; we can use it fairly simply:\nfrom sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() Wonderful! I bet you can guess what the syntax is. Instead of fitting on the whole dataset, though, we will fit on the first 145 items.\nclf = clf.fit(x[:-5],y[:-5]) We can go ahead and run predict on some samples, just to see how it does on data it has not already seen before!\nclf.score(x[-5:], y[-5:]) 1.0 As you can see, it still does pretty well!\nSVM Let\u0026rsquo;s put another classification technique we learned today to use! Support Vector Machines. The entire syntax to manipulate support vector machines is very simple; at this point, you can probably guess it in yours sleep :)\nLet\u0026rsquo;s import a SVM:\nfrom sklearn import svm Great. Now, we will instantiate it and fit it onto the data. SVC is the support-vector machine classifier.\nclf = svm.SVC() clf.fit(x,y) Excellent, now, let\u0026rsquo;s score our predictions:\nclf.score(x,y) 0.9733333333333334 As you can see, our data is not entirely linear! Fitting our entire dataset onto a linear SVM didn\u0026rsquo;t score perfectly, which means that the model is not complex enough to support our problem.\nScikit\u0026rsquo;s support vector machine supports lots of nonlinearity function; this is set by the argument kernel. For instance, if we wanted a nonlinear, exponential function kernel (where nonlinear function \\(f(x,x\u0026rsquo;)= e^{-\\gamma||\\big\u0026lt;x,x\u0026rsquo;\\big\u0026gt;||^2}\\)), we can say:\nclf = svm.SVC(kernel=\u0026#34;rbf\u0026#34;) clf.fit(x,y) clf.score(x,y) 0.9733333333333334 Looks like our results are fairly similar, though.\nNaive Bayes One last one! Its Bayes time. Let\u0026rsquo;s first take a look at how an Naive Bayes implementation can be done via Scikit learn.\nOne of the things that the Scikit Learn Naive Bayes estimator does differently than the one that we learned via probabilities is that it assumes that\u0026mdash;instead of a uniform distribution (and therefore \u0026ldquo;chance of occurrence\u0026rdquo; is just occurrence divided by count), our samples are normally distributed. Therefore, we have that\n\\begin{equation} P(x_i | y) = \\frac{1}{\\sqrt{2\\pi{\\sigma^2}_y}}e^{\\left(-\\frac{(x_i-\\mu_y)^2}{2{\\sigma^2}_y}\\right)} \\end{equation}\nWe can instantiate such a model with the same exact syntax.\nfrom sklearn.naive_bayes import GaussianNB clf = GaussianNB() clf = clf.fit(x,y) Let\u0026rsquo;s see how it does!\nclf.score(x,y) 0.96 Same thing as before, it seems simple probabilities can\u0026rsquo;t model our relationship super well. However, this is still a fairly accurate and powerful classifier.\nNow you try! Try all three classifiers on the Wine dataset for red-white divide! Which one does better on generalizing to data you haven\u0026rsquo;t seen before? Explain the results of the decision trees trained on the Wine data by plotting it. Is there anything interesting that the tree used as a heuristic that came up? The probabilistic, uniform Naive-Bayes is fairly simple to implement write if we are using the traditional version of the Bayes theorem. Can you use Pandas to implement one yourself? ","html":"\u003cp\u003eWoah! We talked about a lot of different ways of doing classification today! Let\u0026rsquo;s see what we can do about this for the Iris dataset!\u003c/p\u003e\n\u003ch2 id=\"iris-dataset\"\u003eIris Dataset\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s load the Iris dataset! Begin by importing the \u003ccode\u003eload_iris\u003c/code\u003e tool from \u003ccode\u003esklearn\u003c/code\u003e. This is an easy loader scheme for the iris dataset.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.datasets\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThen, we simply execute the following to load the data.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturn_X_y\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe use the \u003ccode\u003ereturn_X_y\u003c/code\u003e argument here so that, instead of dumping a large \u003ccode\u003eCSV\u003c/code\u003e, we get the neat-cleaned input and output values.\u003c/p\u003e\n\u003cp\u003eA reminder that there is three possible flowers that we can sort by.\u003c/p\u003e\n\u003ch2 id=\"decision-trees\"\u003eDecision Trees\u003c/h2\u003e\n\u003cp\u003eScikit learn has great facilities for using decision trees for classification! Let\u0026rsquo;s use some of them by fitting to the Iris dataset.\u003c/p\u003e\n\u003cp\u003eLet us begin by importing the SciKit learn tree system:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.tree\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eDecisionTreeClassifier\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will fit and instantiate this classifier and fit it to the data exactly!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eDecisionTreeClassifier\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eOne cool thing about decision trees is that we can actually see what its \u003cem\u003edoing!\u003c/em\u003e by looking at the series of splits and decisions. This is a function provided by tree too.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# We first import the plotting utility from matplotlib\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ematplotlib.pyplot\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# as well as the tree plotting tool\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.tree\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplot_tree\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# We call the tree plot tool, which puts it on teh matplotlib graph for side effects\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplot_tree\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# And we save the figure\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esavefig\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;tree.png\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cfigure\u003e\u003cimg src=\"/ox-hugo/tree.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eCool! As you can see, by the end of the entire graph, the gini impurity of each node has been sorted to 0.\u003c/p\u003e\n\u003cp\u003eApparently, if the third feature (pedal length) is smaller that 2.45, it is definitely the first type of flower!\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_11-46-34_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eCan you explain the rest of the divisions?\u003c/p\u003e\n\u003cp\u003eThere are some arguments available in \u003ccode\u003e.fit\u003c/code\u003e of a \u003ccode\u003eDecisionTreeClassifier\u003c/code\u003e which controls for when splitting ends; for instance, \u003ccode\u003emax_depth\u003c/code\u003e controls the maximum depth by which the tree can go.\u003c/p\u003e\n\u003ch2 id=\"extra-addition-random-forests-dot\"\u003eExtra Addition! Random Forests.\u003c/h2\u003e\n\u003cp\u003eIf you recall, we make the initial splitting decisions fairly randomly, and simply select the one with the lowest Ginni impurity. Of course, this makes the selection of the initial sets of splits very important.\u003c/p\u003e\n\u003cp\u003eWhat if, instead of needing to make a decision about that now, we can just deal with it later? Well, that\u0026rsquo;s where the addition of Random Forests come in.\u003c/p\u003e\n\u003cp\u003eAs the name suggests, instead of having one great tree that does a \u0026ldquo;pretty good\u0026rdquo; job, we can have a lot of trees acting in \u003cem\u003eensemble!\u003c/em\u003e We can randomly start a bunch of random trees, and pick the selection that most would correspond with.\u003c/p\u003e\n\u003cp\u003eRandom forests come from the ensemble package from \u003ccode\u003esklearn\u003c/code\u003e; we can use it fairly simply:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.ensemble\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eRandomForestClassifier\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eRandomForestClassifier\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWonderful! I bet you can guess what the syntax is. Instead of fitting on the whole dataset, though, we will fit on the first 145 items.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe can go ahead and run predict on some samples, just to see how it does on data it has not already seen before!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003escore\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1.0\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAs you can see, it still does pretty well!\u003c/p\u003e\n\u003ch2 id=\"svm\"\u003eSVM\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s put another classification technique we learned today to use! Support Vector Machines. The entire syntax to manipulate support vector machines is very simple; at this point, you can probably guess it in yours sleep :)\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s import a SVM:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esvm\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eGreat. Now, we will instantiate it and fit it onto the data. \u003ccode\u003eSVC\u003c/code\u003e is the support-vector machine classifier.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esvm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSVC\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent, now, let\u0026rsquo;s score our predictions:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003escore\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.9733333333333334\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAs you can see, our data is not entirely linear! Fitting our entire dataset onto a linear SVM didn\u0026rsquo;t score perfectly, which means that the model is not complex enough to support our problem.\u003c/p\u003e\n\u003cp\u003eScikit\u0026rsquo;s support vector machine supports lots of nonlinearity function; this is set by the argument \u003ccode\u003ekernel\u003c/code\u003e. For instance, if we wanted a nonlinear, exponential function kernel (where nonlinear function \\(f(x,x\u0026rsquo;)= e^{-\\gamma||\\big\u0026lt;x,x\u0026rsquo;\\big\u0026gt;||^2}\\)), we can say:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esvm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSVC\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ekernel\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;rbf\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003escore\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.9733333333333334\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLooks like our results are fairly similar, though.\u003c/p\u003e\n\u003ch2 id=\"naive-bayes\"\u003eNaive Bayes\u003c/h2\u003e\n\u003cp\u003eOne last one! Its Bayes time. Let\u0026rsquo;s first take a look at how an Naive Bayes implementation can be done via Scikit learn.\u003c/p\u003e\n\u003cp\u003eOne of the things that the Scikit Learn Naive Bayes estimator does differently than the one that we learned via probabilities is that it assumes that\u0026mdash;instead of a uniform distribution (and therefore \u0026ldquo;chance of occurrence\u0026rdquo; is just occurrence divided by count), our samples are normally distributed. Therefore, we have that\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(x_i | y) = \\frac{1}{\\sqrt{2\\pi{\\sigma^2}_y}}e^{\\left(-\\frac{(x_i-\\mu_y)^2}{2{\\sigma^2}_y}\\right)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can instantiate such a model with the same exact syntax.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.naive_bayes\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eGaussianNB\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eGaussianNB\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s see how it does!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003escore\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.96\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSame thing as before, it seems simple probabilities can\u0026rsquo;t model our relationship super well. However, this is still a fairly accurate and powerful classifier.\u003c/p\u003e\n\u003ch2 id=\"now-you-try\"\u003eNow you try!\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eTry all three classifiers on the Wine dataset for red-white divide! Which one does better on generalizing to data you haven\u0026rsquo;t seen before?\u003c/li\u003e\n\u003cli\u003eExplain the results of the decision trees trained on the Wine data by plotting it. Is there anything interesting that the tree used as a heuristic that came up?\u003c/li\u003e\n\u003cli\u003eThe \u003cem\u003eprobabilistic\u003c/em\u003e, uniform Naive-Bayes is fairly simple to implement write if we are using the traditional version of the Bayes theorem. Can you use Pandas to implement one yourself?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridgelab_d3_d4/","tags":null,"title":"AIBridgeLab D3/D4"},{"categories":null,"contents":"Welcome to the Day-3 Morning Lab! We are glad for you to join us. Today, we are learning about how Pandas, a data manipulation tool, works, and working on cleaning some data of your own!\nIris Dataset We are going to lead the Iris dataset from sklearn again. This time, however, we will load the full dataset and parse it ourselves (instead of using return_X_y.)\nLet\u0026rsquo;s begin by importing the Iris dataset, as we expect.\nfrom sklearn.datasets import load_iris And, load the dataset to see what it looks like.\niris = load_iris() iris.keys() dict_keys([\u0026#39;data\u0026#39;, \u0026#39;target\u0026#39;, \u0026#39;frame\u0026#39;, \u0026#39;target_names\u0026#39;, \u0026#39;DESCR\u0026#39;, \u0026#39;feature_names\u0026#39;, \u0026#39;filename\u0026#39;, \u0026#39;data_module\u0026#39;]) We have a pretty large dictionary full of information! Let\u0026rsquo;s pull out data (our input data), target (our output data), and feature_names, the names of our feature.\niris_in = iris[\u0026#34;data\u0026#34;] iris_out = iris[\u0026#34;target\u0026#34;] iris_names = iris[\u0026#34;feature_names\u0026#34;] Data Manipulation pandas is a very helpful utility that allow us to see into data more conveniently. The object that we are usually working with, when using pandas, is called a DataFrame. We can actually create a DataFrame pretty easily. Let\u0026rsquo;s first import pandas\nimport pandas as pd Loading Data We have aliased it as pd so that its easier to type. Awesome! Let\u0026rsquo;s make a DataFrame.\ndf = pd.DataFrame(iris_in) df 0 1 2 3 0 5.1 3.5 1.4 0.2 1 4.9 3.0 1.4 0.2 2 4.7 3.2 1.3 0.2 3 4.6 3.1 1.5 0.2 4 5.0 3.6 1.4 0.2 .. ... ... ... ... 145 6.7 3.0 5.2 2.3 146 6.3 2.5 5.0 1.9 147 6.5 3.0 5.2 2.0 148 6.2 3.4 5.4 2.3 149 5.9 3.0 5.1 1.8 [150 rows x 4 columns] Nice! We have our input data contained in a data frame and nicely printed in a table; cool! However, the column names 1, 2, 3, 4 aren\u0026rsquo;t exactly the most useful labels for us. Instead, then, let\u0026rsquo;s change the column headers to:\niris_names sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) How? We can both get and set the columns via df.columns:\ndf.columns = iris_names Let\u0026rsquo;s look at the DataFrame again!\ndf sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) 0 5.1 3.5 1.4 0.2 1 4.9 3.0 1.4 0.2 2 4.7 3.2 1.3 0.2 3 4.6 3.1 1.5 0.2 4 5.0 3.6 1.4 0.2 .. ... ... ... ... 145 6.7 3.0 5.2 2.3 146 6.3 2.5 5.0 1.9 147 6.5 3.0 5.2 2.0 148 6.2 3.4 5.4 2.3 149 5.9 3.0 5.1 1.8 [150 rows x 4 columns] Excellent! Now our data frame looks much more reasonable.\nWranging Data How do we manipulate the data around? Well, we can index this data by both columns and rows.\nIndexing by columns first is very easy. Pandas tables are, by default, \u0026ldquo;column-major\u0026rdquo;. This means that we can just index the columns just like a list!\ndf[\u0026#34;petal width (cm)\u0026#34;] 0 0.2 1 0.2 2 0.2 3 0.2 4 0.2 ... 145 2.3 146 1.9 147 2.0 148 2.3 149 1.8 Name: petal width (cm), Length: 150, dtype: float64 Nice! I want to know introduce the idea of a \u0026ldquo;cursor\u0026rdquo;. A \u0026ldquo;cursor\u0026rdquo; is used to index this high-dimensional data; think about it as the way to turn this table into something like an indexable 1-D list.\nThe simplest cursor is .loc (\u0026ldquo;locator.\u0026rdquo;)\nUnlike list indexing directly, .loc is \u0026ldquo;row-major:\u0026rdquo; the first index selects rows instead of columns.\ndf.loc[0] sepal length (cm) 5.1 sepal width (cm) 3.5 petal length (cm) 1.4 petal width (cm) 0.2 Name: 0, dtype: float64 Nice! You can see that .loc turned our table into a list, with each \u0026ldquo;sample\u0026rdquo; of the data more clearly represented by indexing it like a list.\nWhat if, then, we want to select the \u0026ldquo;pedal width\u0026rdquo; value inside this sample? We just select the first index, a comma, then select the second index.\ndf.loc[0, \u0026#34;petal width (cm)\u0026#34;] 0.2 Excellent! We can see, because we changed the header columns to be strings, we have to index them like strings.\nWhat if, instead of the first row, we want to get\u0026hellip; say, the first, fifth, and sixth rows? Unlike traditional lists, Pandas\u0026rsquo; cursors can be indexed by a list.\nSo this:\ndf.loc[0] sepal length (cm) 5.1 sepal width (cm) 3.5 petal length (cm) 1.4 petal width (cm) 0.2 Name: 0, dtype: float64 turns into\ndf.loc[[0,2,8,9]] sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) 0 5.1 3.5 1.4 0.2 2 4.7 3.2 1.3 0.2 8 4.4 2.9 1.4 0.2 9 4.9 3.1 1.5 0.1 This would give us the 0th, 2nd, 8th, and 9th row!\nThis is all good, but, it\u0026rsquo;s kind of annoying to type the column names (like \u0026ldquo;petal width (cm)\u0026rdquo;) every time! No worries, we can address this.\niloc is a variant of loc which uses integer indexes. For row indexing, the syntax remains exactly the same; iloc, however, converts all column indexes to integers sequentially. Therefore:\ndf.loc[0, \u0026#34;petal width (cm)\u0026#34;] becomes\ndf.iloc[0, 3] 0.2 Nice! Isn\u0026rsquo;t that convenient.\nSome statistics The main gist of the lab here is to manipulate the input data a little. Pandas provides many helpful utilities to help us with that. For instance, let\u0026rsquo;s take a single feature in the data, say, the pedal with:\npwidth = df[\u0026#34;petal width (cm)\u0026#34;] # same pwidth = df.iloc[:,3], where : returns everything in the row dimention pwidth 0 0.2 1 0.2 2 0.2 3 0.2 4 0.2 ... 145 2.3 146 1.9 147 2.0 148 2.3 149 1.8 Name: petal width (cm), Length: 150, dtype: float64 We can now find out how distributed this data is, to glean some info about normalization! The most basic is for us to find the mean width of the petals:\npwidth.mean() 1.1993333333333336 Awesome! We can calculate the standard by applying this constant to that entire row. The syntax works just like how you expect\u0026mdash;subtracting a scalar from the whole column just subtracts that constant from every element\u0026mdash;without any fuss:\n(((pwidth-pwidth.mean())**2).sum()/len(pwidth))**0.5 0.7596926279021594 Cool! In the scheme of things, that\u0026rsquo;s actually a pretty good. However, if it was not, we could normalize the data!\nLet\u0026rsquo;s first get the norm of the vector\npwidth_norm = sum(pwidth**2)**0.5 pwidth_norm 17.38763928772391 And, let\u0026rsquo;s normalize our vector by this norm!\npwidth_normd = pwidth/pwidth_norm pwidth_normd 0 0.011502 1 0.011502 2 0.011502 3 0.011502 4 0.011502 ... 145 0.132278 146 0.109273 147 0.115024 148 0.132278 149 0.103522 Name: petal width (cm), Length: 150, dtype: float64 Excellent. Let\u0026rsquo;s find out its standard deviation again! This time we will use .std() instead.\npwidth_normd.std() 0.04383790440709825 Much better.\nNow you try Load the wine dataset into a DataFrame and manipulate it. Feed slices back into our functions yesterday! Can you make the subsets of the data you made yesterday via the .iloc notation to make slicing easier? Can you quantify the accuracy, precision, and recall on a shuffled version of the wine dataset and logistic regression? seed=0 Is there any columns that need normalisation? Any outliers (2 std. dev away)? Why/why not? Create a balanced version of the wine dataset between red and white classes. Does fitting this normalized version into our model makes training results better? ","html":"\u003cp\u003eWelcome to the Day-3 Morning Lab! We are glad for you to join us. Today, we are learning about how Pandas, a data manipulation tool, works, and working on cleaning some data of your own!\u003c/p\u003e\n\u003ch2 id=\"iris-dataset\"\u003eIris Dataset\u003c/h2\u003e\n\u003cp\u003eWe are going to lead the Iris dataset from \u003ccode\u003esklearn\u003c/code\u003e again. This time, however, we will load the full dataset and parse it ourselves (instead of using \u003ccode\u003ereturn_X_y\u003c/code\u003e.)\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s begin by importing the Iris dataset, as we expect.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.datasets\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd, load the dataset to see what it looks like.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eiris\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eiris\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ekeys\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edict_keys([\u0026#39;data\u0026#39;, \u0026#39;target\u0026#39;, \u0026#39;frame\u0026#39;, \u0026#39;target_names\u0026#39;, \u0026#39;DESCR\u0026#39;, \u0026#39;feature_names\u0026#39;, \u0026#39;filename\u0026#39;, \u0026#39;data_module\u0026#39;])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe have a pretty large dictionary full of information! Let\u0026rsquo;s pull out \u003ccode\u003edata\u003c/code\u003e (our input data), \u003ccode\u003etarget\u003c/code\u003e (our output data), and \u003ccode\u003efeature_names\u003c/code\u003e, the names of our feature.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eiris_in\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eiris\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;data\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eiris_out\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eiris\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;target\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eiris_names\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eiris\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;feature_names\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"data-manipulation\"\u003eData Manipulation\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-15_15-52-10_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\u003ccode\u003epandas\u003c/code\u003e is a very helpful utility that allow us to see into data more conveniently. The object that we are usually working with, when using pandas, is called a \u003ccode\u003eDataFrame\u003c/code\u003e. We can actually create a \u003ccode\u003eDataFrame\u003c/code\u003e pretty easily. Let\u0026rsquo;s first import \u003ccode\u003epandas\u003c/code\u003e\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epandas\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"loading-data\"\u003eLoading Data\u003c/h3\u003e\n\u003cp\u003eWe have aliased it as \u003ccode\u003epd\u003c/code\u003e so that its easier to type. Awesome! Let\u0026rsquo;s make a DataFrame.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDataFrame\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiris_in\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0 1 2 3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 4.9 3.0 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 1.3 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 4.6 3.1 1.5 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 5.0 3.6 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 5.2 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 6.3 2.5 5.0 1.9\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 6.5 3.0 5.2 2.0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 6.2 3.4 5.4 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 5.9 3.0 5.1 1.8\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 4 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNice! We have our input data contained in a data frame and nicely printed in a table; cool! However, the column names \u003ccode\u003e1\u003c/code\u003e, \u003ccode\u003e2\u003c/code\u003e, \u003ccode\u003e3\u003c/code\u003e, \u003ccode\u003e4\u003c/code\u003e aren\u0026rsquo;t exactly the most useful labels for us. Instead, then, let\u0026rsquo;s change the column headers to:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eiris_names\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003esepal length (cm)\u003c/td\u003e\n\u003ctd\u003esepal width (cm)\u003c/td\u003e\n\u003ctd\u003epetal length (cm)\u003c/td\u003e\n\u003ctd\u003epetal width (cm)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eHow? We can both get and set the columns via \u003ccode\u003edf.columns\u003c/code\u003e:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eiris_names\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s look at the \u003ccode\u003eDataFrame\u003c/code\u003e again!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e sepal length (cm) sepal width (cm) petal length (cm) petal width (cm)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 4.9 3.0 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 1.3 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 4.6 3.1 1.5 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 5.0 3.6 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 5.2 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 6.3 2.5 5.0 1.9\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 6.5 3.0 5.2 2.0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 6.2 3.4 5.4 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 5.9 3.0 5.1 1.8\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 4 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent! Now our data frame looks much more reasonable.\u003c/p\u003e\n\u003ch2 id=\"wranging-data\"\u003eWranging Data\u003c/h2\u003e\n\u003cp\u003eHow do we manipulate the data around? Well, we can index this data by both columns and rows.\u003c/p\u003e\n\u003cp\u003eIndexing by columns first is very easy. Pandas tables are, by default, \u0026ldquo;column-major\u0026rdquo;. This means that we can just index the columns just like a list!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;petal width (cm)\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 1.9\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 2.0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 1.8\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eName: petal width (cm), Length: 150, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNice! I want to know introduce the idea of a \u0026ldquo;cursor\u0026rdquo;. A \u0026ldquo;cursor\u0026rdquo; is used to index this high-dimensional data; think about it as the way to turn this table into something like an indexable 1-D list.\u003c/p\u003e\n\u003cp\u003eThe simplest cursor is \u003ccode\u003e.loc\u003c/code\u003e (\u0026ldquo;locator.\u0026rdquo;)\u003c/p\u003e\n\u003cp\u003eUnlike list indexing directly, \u003ccode\u003e.loc\u003c/code\u003e is \u0026ldquo;row-major:\u0026rdquo; the first index selects \u003cem\u003erows\u003c/em\u003e instead of columns.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003esepal length (cm) 5.1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003esepal width (cm) 3.5\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003epetal length (cm) 1.4\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003epetal width (cm) 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eName: 0, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNice! You can see that \u003ccode\u003e.loc\u003c/code\u003e turned our table into a list, with each \u0026ldquo;sample\u0026rdquo; of the data more clearly represented by indexing it like a list.\u003c/p\u003e\n\u003cp\u003eWhat if, then, we want to select the \u0026ldquo;pedal width\u0026rdquo; value inside this sample? We just select the first index, a comma, then select the second index.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;petal width (cm)\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.2\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent! We can see, because we changed the header columns to be strings, we have to index them like strings.\u003c/p\u003e\n\u003cp\u003eWhat if, instead of the first row, we want to get\u0026hellip; say, the first, fifth, and sixth rows? Unlike traditional lists, Pandas\u0026rsquo; cursors can be \u003cem\u003eindexed by a list\u003c/em\u003e.\u003c/p\u003e\n\u003cp\u003eSo this:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003esepal length (cm) 5.1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003esepal width (cm) 3.5\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003epetal length (cm) 1.4\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003epetal width (cm) 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eName: 0, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eturns into\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e8\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e9\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e sepal length (cm) sepal width (cm) petal length (cm) petal width (cm)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 1.3 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e8 4.4 2.9 1.4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e9 4.9 3.1 1.5 0.1\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThis would give us the 0th, 2nd, 8th, and 9th row!\u003c/p\u003e\n\u003cp\u003eThis is all good, but, it\u0026rsquo;s kind of annoying to type the column names (like \u0026ldquo;petal width (cm)\u0026rdquo;) every time! No worries, we can address this.\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003eiloc\u003c/code\u003e is a variant of \u003ccode\u003eloc\u003c/code\u003e which uses integer indexes. For row indexing, the syntax remains exactly the same; \u003ccode\u003eiloc\u003c/code\u003e, however, converts all column indexes to integers sequentially. Therefore:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;petal width (cm)\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ebecomes\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.2\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNice! Isn\u0026rsquo;t that convenient.\u003c/p\u003e\n\u003ch2 id=\"some-statistics\"\u003eSome statistics\u003c/h2\u003e\n\u003cp\u003eThe main gist of the lab here is to manipulate the input data a little. Pandas provides many helpful utilities to help us with that. For instance, let\u0026rsquo;s take a single feature in the data, say, the pedal with:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epwidth\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;petal width (cm)\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# same pwidth = df.iloc[:,3], where : returns everything in the row dimention\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epwidth\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 1.9\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 2.0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 2.3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 1.8\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eName: petal width (cm), Length: 150, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe can now find out how distributed this data is, to glean some info about normalization! The most basic is for us to find the mean width of the petals:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epwidth\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1.1993333333333336\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAwesome! We can calculate the standard by applying this constant to that entire row. The syntax works just like how you expect\u0026mdash;subtracting a scalar from the whole column just subtracts that constant from every element\u0026mdash;without any fuss:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epwidth\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epwidth\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epwidth\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.5\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.7596926279021594\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eCool! In the scheme of things, that\u0026rsquo;s actually a pretty good. However, if it was not, we could normalize the data!\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s first get the norm of the vector\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epwidth_norm\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epwidth\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.5\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epwidth_norm\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e17.38763928772391\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd, let\u0026rsquo;s normalize our vector by this norm!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epwidth_normd\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epwidth\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epwidth_norm\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epwidth_normd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 0.011502\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 0.011502\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.011502\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 0.011502\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.011502\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 0.132278\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 0.109273\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 0.115024\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 0.132278\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 0.103522\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eName: petal width (cm), Length: 150, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. Let\u0026rsquo;s find out its standard deviation again! This time we will use \u003ccode\u003e.std()\u003c/code\u003e instead.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epwidth_normd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.04383790440709825\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eMuch better.\u003c/p\u003e\n\u003ch2 id=\"now-you-try\"\u003eNow you try\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eLoad the wine dataset into a DataFrame and manipulate it.\u003c/li\u003e\n\u003cli\u003eFeed slices back into our functions yesterday! Can you make the subsets of the data you made yesterday via the \u003ccode\u003e.iloc\u003c/code\u003e notation to make slicing easier?\u003c/li\u003e\n\u003cli\u003eCan you quantify the accuracy, precision, and recall on a shuffled version of the wine dataset and logistic regression? \u003ccode\u003eseed=0\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eIs there any columns that need normalisation? Any outliers (2 std. dev away)? Why/why not?\u003c/li\u003e\n\u003cli\u003eCreate a balanced version of the wine dataset between red and white classes. Does fitting this normalized version into our model makes training results better?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridgelab_d2aft/","tags":null,"title":"AIBridgeLab D3Morning"},{"categories":null,"contents":"Let\u0026rsquo;s run some clustering algorithms! We are still going to use the Iris data, because we are super familiar with it already. Loading it works the exactly in the same way; I will not repeat the notes but just copy the code and description from before here for your reference\nIris Dataset Let\u0026rsquo;s load the Iris dataset! Begin by importing the load_iris tool from sklearn. This is an easy loader scheme for the iris dataset.\nfrom sklearn.datasets import load_iris Then, we simply execute the following to load the data.\nx,y = load_iris(return_X_y=True) We use the return_X_y argument here so that, instead of dumping a large CSV, we get the neat-cleaned input and output values.\nk-means clustering The basics of k-means clustering works exactly the same as before, except this time we have to specify and get a few more parameters. Let\u0026rsquo;s begin by importing k-means and getting some clusters together!\nfrom sklearn.cluster import KMeans Let\u0026rsquo;s instantiate the KMeans cluster with 3 clusters, which is the number of classes there is.\nkmeans = KMeans(n_clusters=3) kmeans = kmeans.fit(x) Great! Let\u0026rsquo;s take a look at how it sorted all of our samples\nkmeans.labels_ 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0 2 2 2 2 0 2 2 2 2 2 2 0 0 2 2 2 2 0 2 0 2 0 2 2 0 0 2 2 2 2 2 0 2 2 2 2 0 2 2 2 0 2 2 2 0 2 2 0 Let\u0026rsquo;s plot our results.\nimport matplotlib.pyplot as plt We then need to define some colours.\ncolors=[\u0026#34;red\u0026#34;, \u0026#34;green\u0026#34;, \u0026#34;blue\u0026#34;] Recall from yesterday that we realized that inner Septal/Pedal differences are not as variable as intra Septal/Pedal differences. So, we will plot the first and third columns next to each other, and use labels_ for coloring.\n# for each element for indx, element in enumerate(x): # add a scatter point plt.scatter(element[0], element[1], color=colors[kmeans.labels_[indx]]) # save our figure plt.savefig(\u0026#34;scatter.png\u0026#34;) Nice. These look like the main groups are captured!\nLet\u0026rsquo;s compare that to intended classes\ny 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 There are obviously some clustering mistakes. Woah! Without prompting with answers, our model was able to figure out much of the general clusters at which our data exists. Nice.\nWe can also see the \u0026ldquo;average\u0026rdquo;/\u0026ldquo;center\u0026rdquo; for each of the clusters:\nkmeans.cluster_centers_ 5.9016129 2.7483871 4.39354839 1.43387097 5.006 3.428 1.462 0.246 6.85 3.07368421 5.74210526 2.07105263 Nice! These are what our model thinks are the centers of each group.\nPrinciple Component Analysis Let\u0026rsquo;s try reducing the dimentionality of our data by one, so that we only have three dimensions. We do this, by, again, begin importing PCA.\nfrom sklearn.decomposition import PCA When we are instantiating, we need to create a PCA instance with a keyword n_components, which is the number of dimensions (\u0026ldquo;component vectors\u0026rdquo;) we want to keep.\npca = PCA(n_components=3) Great, let\u0026rsquo;s fit our data to this PCA.\npca.fit(x) Wonderful. singular_values_ is how we can get out of the PCA\u0026rsquo;d change of basis results:\ncob = pca.components_ cob 0.36138659 -0.08452251 0.85667061 0.3582892 0.65658877 0.73016143 -0.17337266 -0.07548102 -0.58202985 0.59791083 0.07623608 0.54583143 So, we can then take a change of basis matrix and apply it to some samples!\ncob@(x[0]) 2.81823951 5.64634982 -0.65976754 What\u0026rsquo;s @? Well\u0026hellip; Unfortunately, Python has different operator for matrix-operations (\u0026ldquo;dot\u0026rdquo;); otherwise, it will perform element-wise operations.\nWe can actually also see the \\(R^2\\) values on each of the axis: the variance explained by each of the dimensions.\npca.explained_variance_ 4.22824171 0.24267075 0.0782095 Nice! As you can see, much of the variance is contained in our first dimension here.\n","html":"\u003cp\u003eLet\u0026rsquo;s run some clustering algorithms! We are still going to use the Iris data, because we are super familiar with it already. Loading it works the exactly in the same way; I will not repeat the notes but just copy the code and description from before here for your reference\u003c/p\u003e\n\u003ch2 id=\"iris-dataset\"\u003eIris Dataset\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s load the Iris dataset! Begin by importing the \u003ccode\u003eload_iris\u003c/code\u003e tool from \u003ccode\u003esklearn\u003c/code\u003e. This is an easy loader scheme for the iris dataset.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.datasets\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThen, we simply execute the following to load the data.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturn_X_y\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe use the \u003ccode\u003ereturn_X_y\u003c/code\u003e argument here so that, instead of dumping a large \u003ccode\u003eCSV\u003c/code\u003e, we get the neat-cleaned input and output values.\u003c/p\u003e\n\u003ch2 id=\"k-means-clustering\"\u003ek-means clustering\u003c/h2\u003e\n\u003cp\u003eThe basics of k-means clustering works exactly the same as before, except this time we have to specify and get a few more parameters. Let\u0026rsquo;s begin by importing k-means and getting some clusters together!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.cluster\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eKMeans\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s instantiate the KMeans cluster with 3 clusters, which is the number of classes there is.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ekmeans\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eKMeans\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en_clusters\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ekmeans\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ekmeans\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eGreat! Let\u0026rsquo;s take a look at how it sorted all of our samples\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ekmeans\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elabels_\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eLet\u0026rsquo;s plot our results.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ematplotlib.pyplot\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe then need to define some colours.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ecolors\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;red\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;green\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;blue\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRecall from yesterday that we realized that inner Septal/Pedal differences are not as variable as intra Septal/Pedal differences. So, we will plot the first and third columns next to each other, and use \u003ccode\u003elabels_\u003c/code\u003e for coloring.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# for each element\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eindx\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eelement\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eenumerate\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# add a scatter point\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003escatter\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eelement\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eelement\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolors\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ekmeans\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elabels_\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eindx\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# save our figure\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esavefig\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;scatter.png\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cfigure\u003e\u003cimg src=\"/ox-hugo/scatter.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eNice. These look like the main groups are captured!\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s compare that to intended classes\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003ctd\u003e2\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eThere are obviously some clustering mistakes. Woah! Without prompting with answers, our model was able to figure out much of the general clusters at which our data exists. Nice.\u003c/p\u003e\n\u003cp\u003eWe can also see the \u0026ldquo;average\u0026rdquo;/\u0026ldquo;center\u0026rdquo; for each of the clusters:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ekmeans\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecluster_centers_\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e5.9016129\u003c/th\u003e\n\u003cth\u003e2.7483871\u003c/th\u003e\n\u003cth\u003e4.39354839\u003c/th\u003e\n\u003cth\u003e1.43387097\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e5.006\u003c/td\u003e\n\u003ctd\u003e3.428\u003c/td\u003e\n\u003ctd\u003e1.462\u003c/td\u003e\n\u003ctd\u003e0.246\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e6.85\u003c/td\u003e\n\u003ctd\u003e3.07368421\u003c/td\u003e\n\u003ctd\u003e5.74210526\u003c/td\u003e\n\u003ctd\u003e2.07105263\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eNice! These are what our model thinks are the centers of each group.\u003c/p\u003e\n\u003ch2 id=\"principle-component-analysis\"\u003ePrinciple Component Analysis\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s try reducing the dimentionality of our data by one, so that we only have three dimensions. We do this, by, again, begin importing PCA.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.decomposition\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ePCA\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWhen we are instantiating, we need to create a PCA instance with a keyword \u003ccode\u003en_components\u003c/code\u003e, which is the number of dimensions (\u0026ldquo;component vectors\u0026rdquo;) we want to keep.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epca\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ePCA\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en_components\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eGreat, let\u0026rsquo;s fit our data to this PCA.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epca\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWonderful. \u003ccode\u003esingular_values_\u003c/code\u003e is how we can get out of the PCA\u0026rsquo;d change of basis results:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ecob\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epca\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecomponents_\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ecob\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e0.36138659\u003c/th\u003e\n\u003cth\u003e-0.08452251\u003c/th\u003e\n\u003cth\u003e0.85667061\u003c/th\u003e\n\u003cth\u003e0.3582892\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e0.65658877\u003c/td\u003e\n\u003ctd\u003e0.73016143\u003c/td\u003e\n\u003ctd\u003e-0.17337266\u003c/td\u003e\n\u003ctd\u003e-0.07548102\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e-0.58202985\u003c/td\u003e\n\u003ctd\u003e0.59791083\u003c/td\u003e\n\u003ctd\u003e0.07623608\u003c/td\u003e\n\u003ctd\u003e0.54583143\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eSo, we can then take a change of basis matrix and apply it to some samples!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ecob\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e@\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e2.81823951\u003c/td\u003e\n\u003ctd\u003e5.64634982\u003c/td\u003e\n\u003ctd\u003e-0.65976754\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWhat\u0026rsquo;s \u003ccode\u003e@\u003c/code\u003e? Well\u0026hellip; Unfortunately, Python has different operator for matrix-operations (\u0026ldquo;dot\u0026rdquo;); otherwise, it will perform element-wise operations.\u003c/p\u003e\n\u003cp\u003eWe can actually also see the \\(R^2\\) values on each of the axis: the variance explained by each of the dimensions.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epca\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexplained_variance_\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e4.22824171\u003c/td\u003e\n\u003ctd\u003e0.24267075\u003c/td\u003e\n\u003ctd\u003e0.0782095\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eNice! As you can see, much of the variance is contained in our first dimension here.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaibridgelab_d4aft/","tags":null,"title":"AIBridgeLab D4Aft"},{"categories":null,"contents":"AIFS is a food systems institute at UC Davis.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhaifs/\"\u003eAIFS\u003c/a\u003e is a food systems institute at \u003ca href=\"\"\u003eUC Davis\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaifs/","tags":null,"title":"AIFS"},{"categories":null,"contents":"I am honestly not entirely sure why or what state of mind I was in circa 2017 to write, edit, and act! in this video, but I did.\nThis is an adaption of a Greek-Style story which someone else wrote, I don\u0026rsquo;t know who.\nVideo produced mostly by myself in front of a green screen, with help from my lovely mother as well as a very nice teacher named Joseph O\u0026rsquo;Brian.\nhttps://youtu.be/b1YxOkcwtgw\nBe prepared. 前方高能\n","html":"\u003cp\u003eI am honestly \u003cem\u003enot entirely sure\u003c/em\u003e why or what state of mind I was in circa 2017 to write, edit, and \u003cstrong\u003eact!\u003c/strong\u003e in this video, but I did.\u003c/p\u003e\n\u003cp\u003eThis is an adaption of a Greek-Style story which someone else wrote, I don\u0026rsquo;t know who.\u003c/p\u003e\n\u003cp\u003eVideo produced mostly by myself in front of a green screen, with help from my lovely mother as well as a very nice teacher named Joseph O\u0026rsquo;Brian.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://youtu.be/b1YxOkcwtgw\"\u003ehttps://youtu.be/b1YxOkcwtgw\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eBe prepared. 前方高能\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhair_a_greek_style_myth/","tags":null,"title":"Air: A Greek Style Myth"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhalexis_ohanian/","tags":null,"title":"Alexis Ohanian"},{"categories":null,"contents":"algebra is the study of\u0026hellip;\nsymbols/variables transformations/operations: \u0026ldquo;add\u0026rdquo;, \u0026ldquo;multiply\u0026rdquo; simple functions abstraction substitution ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhalgebra/\"\u003ealgebra\u003c/a\u003e is the study of\u0026hellip;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003esymbols/variables\u003c/li\u003e\n\u003cli\u003etransformations/operations: \u0026ldquo;add\u0026rdquo;, \u0026ldquo;multiply\u0026rdquo;\u003c/li\u003e\n\u003cli\u003esimple functions\u003c/li\u003e\n\u003cli\u003eabstraction\u003c/li\u003e\n\u003cli\u003esubstitution\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhalgebra/","tags":null,"title":"algebra"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhalgebreic_equation/","tags":null,"title":"algebreic equation"},{"categories":null,"contents":"The algebreic multiplicity for a given eigenvalue is the multiplicity for which the linear factor containing it shows up in the characteristic polynomial.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhalgebreic_multiplicity/\"\u003ealgebreic multiplicity\u003c/a\u003e for a given \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e is the multiplicity for which the linear factor containing it shows up in the \u003ca href=\"/posts/kbhcharacteristic_polynomial/\"\u003echaracteristic polynomial\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhalgebreic_multiplicity/","tags":null,"title":"algebreic multiplicity"},{"categories":null,"contents":" Startups Kwotes \u0026ldquo;Working with big tech is a job closer to big tech.\u0026rdquo;\n","html":"\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstartup/\"\u003eStartups\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"kwotes\"\u003eKwotes\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;Working with big tech is a job closer to big tech.\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhali_partovi/","tags":null,"title":"Ali Partovi"},{"categories":null,"contents":"Begin with a new installation of MFA, and head to the directory. First run validate with the original dictionary.\nmfa validate ~/Downloads/tb/my_corpus english_us_arpa english_us_arpa We see that there is in deed an section of corpus that is out-of-vocab.\nINFO - 11 OOV word types INFO - 18 total OOV tokens Therefore, we will generate a new dictionary based on the existing dictionary of english_us_arpa.\nFirst download the english_us_arpa model\nmfa model download g2p english_us_arpa Then, perform the actual dictionary generation:\nmfa g2p english_us_arpa ~/Downloads/tb/my_corpus ~/Downloads/tb/my_corpus/new_dict.txt There is a chance this command fails with\nThere was an issue importing Pynini, please ensure that it is installed. If you are on Windows, please use the Windows Subsystem for Linux to use g2p functionality. If so, install pynini\nconda add pynini Finally, run the mfa g2p command above to generate pronunciations.\nYou should end up with a file named new_dict.txt, which should include missing words.\nFinally, perform alignment with this new dictionary.\nmfa align ~/Downloads/tb/my_corpus ~/Downloads/tb/my_corpus/new_dict.txt english_us_arpa ~/Downloads/tb/my_corpus_output Notice here the second argument of mfa align is no longer english_us_arpa, our base dictionary. Instead, it is our custom dictionary.\n","html":"\u003cp\u003eBegin with a new installation of MFA, and head to the directory. First run validate with the original dictionary.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-shell\" data-lang=\"shell\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003emfa validate ~/Downloads/tb/my_corpus english_us_arpa english_us_arpa\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe see that there is in deed an section of corpus that is out-of-vocab.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eINFO - 11 OOV word types\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eINFO - 18 total OOV tokens\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eTherefore, we will generate a new dictionary based on the existing dictionary of \u003ccode\u003eenglish_us_arpa\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eFirst download the english_us_arpa model\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-shell\" data-lang=\"shell\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003emfa model download g2p english_us_arpa\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThen, perform the actual dictionary generation:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-shell\" data-lang=\"shell\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003emfa g2p english_us_arpa ~/Downloads/tb/my_corpus ~/Downloads/tb/my_corpus/new_dict.txt\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThere is a chance this command fails with\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eThere was an issue importing Pynini, please ensure that it is installed. If you are on Windows, please use the Windows Subsystem for Linux to use g2p functionality.\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eIf so, install pynini\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-shell\" data-lang=\"shell\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econda add pynini\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFinally, run the \u003ccode\u003emfa g2p\u003c/code\u003e command above to generate pronunciations.\u003c/p\u003e\n\u003cp\u003eYou should end up with a file named \u003ccode\u003enew_dict.txt\u003c/code\u003e, which should include missing words.\u003c/p\u003e\n\u003cp\u003eFinally, perform alignment with this new dictionary.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-shell\" data-lang=\"shell\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003emfa align ~/Downloads/tb/my_corpus ~/Downloads/tb/my_corpus/new_dict.txt english_us_arpa ~/Downloads/tb/my_corpus_output\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNotice here the second argument of \u003ccode\u003emfa align\u003c/code\u003e is no longer \u003ccode\u003eenglish_us_arpa\u003c/code\u003e, our base dictionary. Instead, it is our custom dictionary.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhalign_with_new_vocab/","tags":null,"title":"Align with New Vocab"},{"categories":null,"contents":" Want to interview more severe ashma Want to find someone younger Difference between marketing and purchaser.\nTaking to people Spoke with Matt. Talked with more details with prototyping and how they can build a unique product.\nHave not gotten back to him yet.\n","html":"\u003cul\u003e\n\u003cli\u003eWant to interview more severe ashma\u003c/li\u003e\n\u003cli\u003eWant to find someone younger\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDifference between marketing and purchaser.\u003c/p\u003e\n\u003ch2 id=\"taking-to-people\"\u003eTaking to people\u003c/h2\u003e\n\u003cp\u003eSpoke with Matt. Talked with more details with prototyping and how they can build a unique product.\u003c/p\u003e\n\u003cp\u003eHave not gotten back to him yet.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhalivio_april_checkin/","tags":null,"title":"Alivio April Checkin"},{"categories":null,"contents":"Recall, from conditional plan evaluation, we had that:\n\\begin{equation} U^{\\pi}(b) = \\sum_{s}^{} b(s) U^{\\pi}(s) \\end{equation}\nlet\u0026rsquo;s write it as:\n\\begin{equation} U^{\\pi}(b) = \\sum_{s}^{} b(s) U^{\\pi}(s) = {\\alpha_{\\pi}}^{\\top} b \\end{equation}\nwhere \\(\\U_{\\pi}(s)\\) is the conditional plan evaluation starting at each of the initial states.\n\\begin{equation} \\alpha_{\\pi} = \\qty[ U^{\\pi}(s_1), U^{\\pi}(s_2) ] \\end{equation}\nYou will notice, then the utility of \\(b\\) is linear on \\(b\\) for different policies \\(\\alpha_{\\pi}\\):\nAt every belief \\(b\\), there is a policy which has the highest \\(U(b)\\) at that \\(b\\) given be the alpha vector formulation.\nAdditional Information top action you can just represent a policy out of alpha vectors by taking the top (root) action of the conditional plan with the alpha vector on top.\noptimal value function for POMDP with alpha vector Recall:\n\\begin{equation} U^{*}(b) = \\max_{\\pi} U^{\\pi}(b) = \\max_{\\pi} \\alpha_{\\pi}^{\\top}b \\end{equation}\nNOTE! This function (look at the chart above from \\(b\\) to \\(u\\)) is:\npiecewise linear convex (because the \u0026ldquo;best\u0026rdquo; (highest) line) is always curving up and so, for a policy instantiated by a bunch of alpha vectors \\(\\Gamma\\), we have:\n\\begin{equation} U^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} b \\end{equation}\nTo actually extract a policy out of this set of vectors \\(\\Gamma\\), we turn to one-step lookahead in POMDP\none-step lookahead in POMDP Say you want to extract a policy out of a bunch of alpha vectors.\nLet \\(\\alpha \\in \\Gamma\\), a set of alpha vectors.\n\\begin{equation} \\pi^{\\Gamma}(b) = \\arg\\max_{a}\\qty[R(b,a)+\\gamma \\qty(\\sum_{o}^{}P(o|b,a) U^{\\Gamma}(update(b,a,o)))] \\end{equation}\nwhere:\n\\begin{equation} R(b,a) = \\sum_{s}^{} R(s,a)b(s) \\end{equation}\n\\begin{align} P(o|b,a) \u0026amp;= \\sum_{s}^{} p(o|s,a) b(s) \\\\ \u0026amp;= \\sum_{s}^{} \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) O(o|s\u0026rsquo;,a) b(s) \\end{align}\nand\n\\begin{equation} U^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} b \\end{equation}\nalpha vector pruning Say we had as set of alpha vectors \\(\\Gamma\\):\n\\(\\alpha_{3}\\) isn\u0026rsquo;t all that useful here. So we ask:\n\u0026ldquo;Is alpha dominated by some \\(\\alpha_{i}\\) everywhere?\u0026rdquo;\nWe formulate this question in terms of a linear program:\n\\begin{equation} \\max_{\\delta, b} \\delta \\end{equation}\nwhere \\(\\delta\\) is the gap between \\(\\alpha\\) and the utility o\nsubject to:\n\\begin{align} \u0026amp;1^{\\top} b = 1\\ \\text{(b adds up to 1)} \\\\ \u0026amp; b\\geq 0 \\\\ \u0026amp; \\alpha^{\\top} b \\geq \\alpha\u0026rsquo;^{\\top} b + \\delta, \\forall \\alpha\u0026rsquo; \\in \\Gamma \\end{align}\nif \\(\\delta \u0026lt; 0\\), then we can prune \\(\\alpha\\) because it had been dominated.\nif each value on the top of the set\n","html":"\u003cp\u003eRecall, from \u003ca href=\"/posts/kbhconditional_plan/#id-6f19368f-74b5-4606-a882-ec9bc5619873-conditional-plan-evaluation\"\u003econditional plan evaluation\u003c/a\u003e, we had that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\pi}(b) = \\sum_{s}^{} b(s) U^{\\pi}(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003elet\u0026rsquo;s write it as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\pi}(b) = \\sum_{s}^{} b(s) U^{\\pi}(s) = {\\alpha_{\\pi}}^{\\top} b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\U_{\\pi}(s)\\) is the \u003ca href=\"/posts/kbhconditional_plan/#id-6f19368f-74b5-4606-a882-ec9bc5619873-conditional-plan-evaluation\"\u003econditional plan evaluation\u003c/a\u003e starting at each of the initial states.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha_{\\pi} = \\qty[ U^{\\pi}(s_1), U^{\\pi}(s_2) ]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will notice, then the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of \\(b\\) is linear on \\(b\\) for different policies \\(\\alpha_{\\pi}\\):\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-16_09-23-10_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eAt every belief \\(b\\), there is a policy which has the highest \\(U(b)\\) at that \\(b\\) given be the \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e formulation.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eAdditional Information\u003c/h2\u003e\n\u003ch3 id=\"top-action\"\u003etop action\u003c/h3\u003e\n\u003cp\u003eyou can just represent a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e out of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es by taking the top (root) action of the \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e with the \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e on top.\u003c/p\u003e\n\u003ch3 id=\"optimal-value-function-for-pomdp--kbhconditional-plan-dot-md--with-alpha-vector--kbhalpha-vector-dot-md\"\u003e\u003ca href=\"/posts/kbhconditional_plan/#id-9ccda204-0967-44c8-a801-c92d0df154b5-optimal-value-function-for-id-130d5294-0274-422b-b395-7d6f7f75be7d-pomdp\"\u003eoptimal value function for POMDP\u003c/a\u003e with \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eRecall:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{*}(b) = \\max_{\\pi} U^{\\pi}(b) = \\max_{\\pi} \\alpha_{\\pi}^{\\top}b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNOTE! This function (look at the chart above from \\(b\\) to \\(u\\)) is:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003epiecewise linear\u003c/li\u003e\n\u003cli\u003econvex (because the \u0026ldquo;best\u0026rdquo; (highest) line) is always curving up\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eand so, for a policy instantiated by a bunch of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es \\(\\Gamma\\), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo actually extract a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e out of this set of vectors \\(\\Gamma\\), we turn to \u003ca href=\"#one-step-lookahead-in-pomdp\"\u003eone-step lookahead in POMDP\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"one-step-lookahead-in-pomdp\"\u003eone-step lookahead in POMDP\u003c/h3\u003e\n\u003cp\u003eSay you want to extract a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e out of a bunch of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eLet \\(\\alpha \\in \\Gamma\\), a set of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pi^{\\Gamma}(b) = \\arg\\max_{a}\\qty[R(b,a)+\\gamma \\qty(\\sum_{o}^{}P(o|b,a) U^{\\Gamma}(update(b,a,o)))]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR(b,a) = \\sum_{s}^{} R(s,a)b(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nP(o|b,a) \u0026amp;= \\sum_{s}^{} p(o|s,a) b(s) \\\\\n\u0026amp;= \\sum_{s}^{} \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) O(o|s\u0026rsquo;,a) b(s)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} b\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"alpha-vector--kbhalpha-vector-dot-md--pruning\"\u003e\u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e pruning\u003c/h3\u003e\n\u003cp\u003eSay we had as set of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es \\(\\Gamma\\):\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-16_09-40-27_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\\(\\alpha_{3}\\) isn\u0026rsquo;t all that useful here. So we ask:\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Is alpha dominated by some \\(\\alpha_{i}\\) everywhere?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe formulate this question in terms of a linear program:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\max_{\\delta, b} \\delta\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\delta\\) is the gap between \\(\\alpha\\) and the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e o\u003c/p\u003e\n\u003cp\u003esubject to:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;1^{\\top} b = 1\\ \\text{(b adds up to 1)} \\\\\n\u0026amp; b\\geq 0 \\\\\n\u0026amp; \\alpha^{\\top} b \\geq \\alpha\u0026rsquo;^{\\top} b + \\delta, \\forall \\alpha\u0026rsquo; \\in \\Gamma\n\\end{align}\u003c/p\u003e\n\u003cp\u003eif \\(\\delta \u0026lt; 0\\), then we can prune \\(\\alpha\\) because it had been dominated.\u003c/p\u003e\n\u003cp\u003eif each value on the top of the set\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhalpha_vector/","tags":null,"title":"alpha vector"},{"categories":null,"contents":"Alternating Least Squares is a method to Factoring a matrix into two components:\n\\begin{equation} \\mathcal{M}( R) \\approx \\mathcal{M}(U) \\cdot \\mathcal{M}(P) \\end{equation}\nwhere, we want to come up matricies \\(U\\) and \\(P\\) with a certain side length \\(k\\) that we exdogenously come up with\nTo perform Alternating Least Squares, we fix the values of either \\(U\\) or \\(P\\), then perform the least-squares optimization on\n(This is proven best-fit for \u0026ldquo;non-pathological matricies\u0026rdquo;)\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhalternating_least_squares/\"\u003eAlternating Least Squares\u003c/a\u003e is a method to \u003ca href=\"/posts/kbhthoughts_on_axler_4/#factoring\"\u003eFactoring\u003c/a\u003e a \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e into two components:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{M}( R) \\approx \\mathcal{M}(U) \\cdot \\mathcal{M}(P)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, we want to come up matricies \\(U\\) and \\(P\\) with a certain side length \\(k\\) that we exdogenously come up with\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-08-01_11-10-42_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003chr\u003e\n\u003cp\u003eTo perform \u003ca href=\"/posts/kbhalternating_least_squares/\"\u003eAlternating Least Squares\u003c/a\u003e, we fix the values of either \\(U\\) or \\(P\\), then perform the least-squares optimization on\u003c/p\u003e\n\u003cp\u003e(This is proven best-fit for \u0026ldquo;\u003ca href=\"/posts/kbhnon_pathological_matricies/\"\u003enon-pathological matricies\u003c/a\u003e\u0026rdquo;)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhalternating_least_squares/","tags":null,"title":"Alternating Least Squares"},{"categories":null,"contents":"Problem: current ambulance routing don\u0026rsquo;t optimize significantly on the contextual cases for stroke patients\nStroke hospitals: PSC is smaller than a CSC.\nPrevious work Routing methods\u0026mdash;\nroute all patient to nearest PSC, which is worse than route high risk patient to CSC, which is worse than always route to CSC This is counter-intuitive. How do we solve, given a stroke condition, available PSC/CSC locations, traffic, etc., for where and how to route a patient?\nAmbulance MDP formulation \\(S\\): (location, symptom onset, known stroke type, stroke type) \\(A\\): route to clinic, route to [specific] PSC, route to [specific] CSC will never be downrouted (for instance, if you are at a PSC you will always either stay or go to CSC) \\(T(s\u0026rsquo;|s,a)\\): location changes distance \\(R(s,a)\\): \u0026ldquo;probability of patient outcome\u0026rdquo; \\(P(success|time)\\) (Holodinsky, et. al. 2018) if stroke type is unknown, its a weighted average Solving Forward Search, depth of 2: patient will either get transported or bounced and transported.\nResults status quo: people near Stanford hospital/ChanZuck are better MDP: smoother gradient ","html":"\u003cp\u003eProblem: \u003cstrong\u003ecurrent ambulance routing don\u0026rsquo;t optimize significantly on the contextual cases for stroke patients\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eStroke hospitals: PSC is smaller than a CSC.\u003c/p\u003e\n\u003ch2 id=\"previous-work\"\u003ePrevious work\u003c/h2\u003e\n\u003cp\u003eRouting methods\u0026mdash;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eroute all patient to nearest PSC, which is worse than\u003c/li\u003e\n\u003cli\u003eroute high risk patient to CSC, which is worse than\u003c/li\u003e\n\u003cli\u003ealways route to CSC\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThis is counter-intuitive. How do we solve, given a stroke condition, available PSC/CSC locations, traffic, etc., for where and how to route a patient?\u003c/p\u003e\n\u003ch2 id=\"ambulance-mdp-formulation\"\u003eAmbulance MDP formulation\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(S\\): (location, symptom onset, known stroke type, stroke type)\u003c/li\u003e\n\u003cli\u003e\\(A\\):\n\u003cul\u003e\n\u003cli\u003eroute to clinic, route to [specific] PSC, route to [specific] CSC\u003c/li\u003e\n\u003cli\u003ewill never be downrouted (for instance, if you are at a PSC you will always either stay or go to CSC)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\\(T(s\u0026rsquo;|s,a)\\):\n\u003cul\u003e\n\u003cli\u003elocation changes\u003c/li\u003e\n\u003cli\u003edistance\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\\(R(s,a)\\):\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;probability of patient outcome\u0026rdquo; \\(P(success|time)\\) (Holodinsky, et. al. 2018)\u003c/li\u003e\n\u003cli\u003eif stroke type is unknown, its a weighted average\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"solving\"\u003eSolving\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhforward_search/\"\u003eForward Search\u003c/a\u003e, depth of 2: patient will either get transported or bounced and transported.\u003c/p\u003e\n\u003ch2 id=\"results\"\u003eResults\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003estatus quo: people near Stanford hospital/ChanZuck are better\u003c/li\u003e\n\u003cli\u003eMDP: smoother gradient\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhabulance_trajectories/","tags":null,"title":"Ambulance Trajectories"},{"categories":null,"contents":"Hello! Welcome to the series of guided code-along labs to introduce you to the basis of using the PyTorch library and its friends to create a neural network! We will dive deeply into Torch, focusing on how practically it can be used to build Neural Networks, as well as taking sideroads into how it works under the hood.\nGetting Started To get started, let\u0026rsquo;s open a colab and import Torch!\nimport torch import torch.nn as nn The top line here import PyTorch generally, and the bottom line imports the Neural Network libraries. We will need both for today and into the future!\nTensors and AutoGrad The most basic element we will be working with in Torch is something called a tensor. A tensor is a variable, which holds either a single number (scalar, or a single neuron) or a list of numbers (vector, or a layer of neurons), that can change. We will see what that means in a sec.\nYour First Tensors Everything that you are going to put through to PyTorch needs to be in a tensor. Therefore, we will need to get good at making them! As we discussed, a tensor can hold an number (scalar), a list (vector) or a (matrix).\nHere are a bunch of them!\nscalar_tensor = torch.tensor(2.2) vector_tensor = torch.tensor([1,3,4]) matrix_tensor = torch.tensor([[3,1,4],[1,7,4]]) You can perform operations on these tensors, like adding them together:\ntorch.tensor(2.2) + torch.tensor(5.1) tensor(7.3000) Vector and Matrix tensors work like NumPy arrays. You can add them pairwise:\ntorch.tensor([[3,1,4],[1,7,4]]) + torch.tensor([[0,2,1],[3,3,4]]) tensor([[ 3, 3, 5], [ 4, 10, 8]]) Connecting Tensors A single number can\u0026rsquo;t be a neural network! ([citation needed]) So, to be able to actually build networks, we have to connect tensors together.\nSo, let\u0026rsquo;s create two tensors, each holding a neuron, and connect them together!\nHere are two lovely scalar tensors:\nvar_1 = torch.tensor(3.0, requires_grad=True) var_2 = torch.tensor(4.0, requires_grad=True) var_1, var_2 (tensor(3., requires_grad=True), tensor(4., requires_grad=True)) We initialized two numbers, 3, which we named var_1, and 4, which we named var_2.\nThe value requires_grad here tells PyTorch that these values can change, which we need it to do\u0026hellip; very shortly!\nFirst, though, let\u0026rsquo;s create a latent variable. A \u0026ldquo;latent\u0026rdquo; value is a value that is the result of operations on other non-latent tensors\u0026mdash;connecting the activation of some neurons together with a new one. For instance, if I multiplied our two tensors together, we can create our very own latent tensor.\nmy_latent_value = var_1*var_2 my_latent_value tensor(12., grad_fn=\u0026lt;MulBackward0\u0026gt;) Evidently, \\(3 \\cdot 4 = 12\\).\nAutograd Now! The beauty of PyTorch is that we can tell it to set any particular latent variable to \\(0\\) (Why only \\(0\\), and \\(0\\) specifically? Calculus; turns out this limitation doesn\u0026rsquo;t matter at all, as we will see), and it can update all of its constituent tensors with required_grad \u0026ldquo;True\u0026rdquo; such that the latent variable we told PyTorch to set to \\(0\\) indeed becomes \\(0\\)!\nThis process is called \u0026ldquo;automatic gradient calculation\u0026rdquo; and \u0026ldquo;backpropagation.\u0026rdquo; (Big asterisks throughout, but bear with us. Find Matt/Jack if you want more.)\nTo do this, we will leverage the help of a special optimization algorithm called stochastic gradient descent.\nLet\u0026rsquo;s get a box of this stuff first:\nfrom torch.optim import SGD SGD \u0026lt;class \u0026#39;torch.optim.sgd.SGD\u0026#39;\u0026gt; Excellent. By the way, from the torch.optim package, there\u0026rsquo;s tonnes (like at least 20) different \u0026ldquo;optimizer\u0026rdquo; algorithms that all do the same thing (\u0026ldquo;take this latent variable to \\(0\\) by updating its constituents\u0026rdquo;) but do them in important different ways. We will explore some of them through this semester, and others you can Google for yourself by looking up \u0026ldquo;PyTorch optimizers\u0026rdquo;.\nOk, to get this SGD thing up and spinning, we have to tell it every tensor it gets to play with in a list. For us, let\u0026rsquo;s ask PyTorch SGD to update var_1 and var_2 such that my_latent_value (which, remember, is var1 times var2) becomes a new value.\nAside: learning rate\nNow, if you recall the neural network simulation, our model does not reach the desired outcome immediately. It does so in steps. The size of these steps are called the learning rate; the LARGER these steps are, the quicker you will get close to your desired solution, but where you end up getting maybe farther away from the actual solution; and vise versa.\nThink about the learning rate as a hoppy frog: a frog that can hop a yard at a time (\u0026ldquo;high learning rate\u0026rdquo;) can probably hit a target a mile away much quicker, but will have a hard time actually hitting the foot-wide target precisely; a frog that can hop an inch at a time (\u0026ldquo;low learning rate\u0026rdquo;) can probably hit a target a mile away\u0026hellip;. years from now, but will definitely be precisely hitting the foot-wide target when it finally gets there.\nSo what does \u0026ldquo;high\u0026rdquo; and \u0026ldquo;low\u0026rdquo; mean? Usually, we adjust learning rate by considering the number of decimal places it has. \\(1\\) is considered a high learning rate, \\(1 \\times 10^{-3} = 0.001\\) as medium-ish learning rate, and \\(1 \\times 10^{-5}=0.00001\\) as a small one. There are, however, no hard and fast rules about this and it is subjcet to experimentation.\nSo, choose also an appropriate learning rate for our optimizer. I would usually start with \\(3 \\times 10^{-3}\\) and go from there. In Python, we write that as 3e-3.\nSo, let\u0026rsquo;s make a SGD, and give it var_1 and var_2 to play with, and set the learning rate to 3e-3:\nmy_sgd = SGD([var_1, var_2], lr=3e-3) my_sgd SGD ( Parameter Group 0 dampening: 0 differentiable: False foreach: None lr: 0.003 maximize: False momentum: 0 nesterov: False weight_decay: 0 ) Wonderful. Don\u0026rsquo;t worry much about how many of these means for now; however, we will see it in action shortly.\nNow! Recall that we allowed my_sgd to mess with var_1 and var_2 to change the value of my_latent_value (the product of var_1 and var_2).\nCurrent, var_1 and var_2 carries the values of:\nvar_1, var_2 (tensor(3., requires_grad=True), tensor(4., requires_grad=True)) And, of course, their product my_latent_value carries the value of:\nmy_latent_value tensor(12., grad_fn=\u0026lt;MulBackward0\u0026gt;) What if we want my_latent_value to be\u0026hellip; \\(15\\)? That sounds like a good number. Let\u0026rsquo;s ask our SGD algorithm to update var_1 and var_2 such that my_latent_value will be \\(15\\)!\nWaaait. I mentioned that the optimizers can only take things to \\(0\\). How could it take my_latent_value to \\(15\\) then? Recall! I said SGD takes a latent variable to \\(0\\). So, we can just build another latent variable such that, when my_latent_value is \\(15\\), our new latent variable will be \\(0\\), and then ask SGD optimize on that!\nWhat could that be\u0026hellip; Well, the squared difference between \\(15\\) and my_latent_value is a good one. If my_latent_value is \\(15\\), the squared difference between it and \\(15\\) will be \\(0\\), as desired!\nSo, similar to what we explored last semester, we use sum of squared difference as our loss because it will be able to account for errors of fit in both directions: a \\(-4\\) difference in predicted and actual output is just as bad as a \\(+4\\) difference.\nTurns out, the \u0026ldquo;objective\u0026rdquo; for SGD optimization, the thing that we ask SGD to take to \\(0\\) on our behalf by updating the parameters we allowed it to update (again, they are var_1 and var_2 in our case here), is indeed the loss value of our model. Sum of squared errors is, therefore, called our loss function for this toy problem.\nSo let\u0026rsquo;s do it! Let\u0026rsquo;s create a tensor our loss:\nloss = (15-my_latent_value)**2 loss tensor(9., grad_fn=\u0026lt;PowBackward0\u0026gt;) Nice. So our loss is at \\(3\\) right now; when my_latent_value is correctly at \\(15\\), our loss will be at \\(0\\)! So, to get my_latent_value to \\(15\\), we will ask SGD to take loss to \\(0\\).\nTo do this, there are three steps. COMMIT THIS TO MEMORY, as it will be basis of literally everything else in the future.\nBackpropagate: \u0026ldquo;please tell SGD to take this variable to \\(0\\), and mark the correct tensors to change\u0026rdquo; Optimize: \u0026ldquo;SGD, please update the marked tensors such that the variable I asked you to take to \\(0\\) is closer to \\(0\\)\u0026rdquo; Reset: \u0026ldquo;SGD, please get ready for step 1 again by unmarking everything that you have changed\u0026rdquo; Again! Is it commited to memory yet?\nBackprop Optimize Reset I am stressing this here because a lot of people 1) miss one of these steps 2) do them out of order. Doing these in any other order will cause your desired result to not work. Why? Think about what each step does, and think about doing them out of order.\nOne more time for good luck:\nBackprop! Optimize! Reset! Let\u0026rsquo;s do it.\nBackprop! Backpropergation marks the correct loss value to minimize (optimze towards being \\(0\\)), and marks all tensors with requires_grad set to True which make up the value of that loss value for update.\nSecretly, this steps takes the partial derivative of our loss against each of the tensors we marked requires_grad, allowing SGD to \u0026ldquo;slide down the gradient\u0026rdquo; based on those partial derivatives. Don\u0026rsquo;t worry if you didn\u0026rsquo;t get that sentence.\nTo do this, we call .backward() on the loss we want to take to \\(0\\):\nloss.backward() None This call will produce nothing. And that\u0026rsquo;s OK, because here comes\u0026hellip;\nOptimize! The next step is tell SGD to update all of the tensors marked for update in the previous step to get loss closer to \\(0\\). To do this, we simply:\nmy_sgd.step() None This call will produce nothing. But, if you check now, the tensors should updated.\nAlthough\u0026hellip; You should\u0026rsquo;t check! Because we have one more step left:\nReset! my_sgd.zero_grad() None I cannot stress this enough. People often stop at the previous step because \u0026ldquo;ooo look my tensors updated!!!\u0026rdquo; and forget to do this step. THIS IS BAD. We won\u0026rsquo;t go into why for now, but basically not resetting the update mark results in a tensor being updated twice, then thrice, etc. each time you call .step(), which will cause double-updates, which will cause you to overshoot (handwavy, but roughly), which is bad.\nooo look my tensors updated!!! var_1, var_2 (tensor(3.0720, requires_grad=True), tensor(4.0540, requires_grad=True)) WOAH! Look at that! Without us telling SGD, it figured out that var_1 and var_2 both need to be BIGGER for my_latent_value, the product of var_1 and var_2 to change from \\(12\\) to \\(15\\). Yet, the product of \\(3.0720\\) and \\(4.0540\\) is hardly close to \\(15\\).\nWhy? Because our step size. It was tiny! To get my_latent_value to be properly \\(15\\), we have to do the cycle of 1) calculating new latent value 2) calculating new loss 3) backprop, optimize, reset, a LOT of times.\nNow do that a lot of times. for _ in range(100): my_latent_value = var_1*var_2 loss = (15-my_latent_value)**2 loss.backward() # BACKPROP! my_sgd.step() # OPTIMIZE! my_sgd.zero_grad() # RESET! var_1, var_2 (tensor(3.4505, requires_grad=True), tensor(4.3472, requires_grad=True)) Weird solution, but we got there! The product of these two values is indeed very close to \\(15\\)! Give yourself a pat on the back.\nSo why the heck are we doing all this So why did we go through all the effort of like 25 lines of code to get two numbers to multiply to \\(15\\)? If you think about Neural Networks as a process of function fitting, we are essentially asking our very basic \u0026ldquo;network\u0026rdquo; (as indeed, the chain of tensors to build up to our latent value, then to compute our loss, is a network!) to achieve a measurable task (\u0026ldquo;take the product of these numbers to \\(15\\)\u0026rdquo;). Though the relationships we will be modeling in this class will be more complex than literal multiplication, it will be just using more fancy mechanics of doing the same thing\u0026mdash;taking tensors values which was undesirable, and moving them to more desirable values to model our relationship.\ny=mx+b and your first neural network \u0026ldquo;module\u0026rdquo; nn.Linear The power of neural networks actually comes when a BUNCH of numbers gets multiplied together, all at once! using\u0026hellip; VECTORS and MATRICIES! Don\u0026rsquo;t remember what they are? Ask your friendly neighborhood Matt/Jack.\nRecall, a matrix is how you can transform a vector from one space to another. Turns out, the brunt of everything you will be doing involves asking SGD to move a bunch of matricies around (like we did before!) such that our input vector(s) gets mapped to the right place.\nA matrix, in neural network world, is referred to as a linear layer. It holds a whole series of neurons, taking every single value of the input into account to producing a whole set of output. Because of this property, it is considered a fully connected layer.\nLet\u0026rsquo;s create such a fully-connected layer (matrix) in PyTorch! When you ask PyTorch to make a matrix for you, you use the nn sublibrary which we imported before. Furthermore, and this is confusing for many people who have worked with matricies before, you specify the input dimension first.\nmy_matrix_var_1 = nn.Linear(3, 2) my_matrix_var_1 Linear(in_features=3, out_features=2, bias=True) my_matrix_var_1 is a linear map from three dimensions to two dimensions; it will take a vector of three things as input and spit out a vector of two.\nNote! Although my_matrix_var_1 is a tensor under the hood just like var_1, we 1) didn\u0026rsquo;t have to set default values for it 2) didn\u0026rsquo;t have to mark it as requires_grad. This is because, unlike a raw Tensor which often does not require to be changed (such as, for instance, the input value, which you can\u0026rsquo;t change), a matrix is basically ALWAYS a tensor that encodes the weights of a model we are working with\u0026mdash;so it is always going to be something that we will ask SGD to change on our behalf.\nSo, since you are asking SGD to change it anyways, PyTorch just filled a bunch of random numbers in for you and set requires_grad on for you to my_matrix_var_1. If you want to see the actual underlying tensor, you can:\nmy_matrix_var_1.weight Parameter containing: tensor([[-0.2634, 0.3729, 0.5019], [ 0.2796, 0.5425, -0.4337]], requires_grad=True) As you can see, we have indeed what we expect: a tensor containing a \\(2\\times 3\\) matrix with requires_grad on filled with random values.\nHow do we actually optimize over this tensor? You can do all the shenanigans we did before and pass my_matrix_var_1 to SGD, but this will quickly get unwieldy as you have more parameters. Remember how we had to give SVG a list of EVERYTHING it had to keep track of? var_1 and var_2 was simple enough, but what if we had to do var_1.weight, var_2.weight, var_3.weight\u0026hellip; \u0026hellip; \u0026hellip; ad nausium for every parameter we use on our large graph? GPT3 has 1.5 billion parameters. Do you really want to type that?\nNo.\nThere is, of course, a better way.\nnn.Module This, by the way, is the standard of how a Neural Network is properly built from now on until the industry moves on from PyTorch. You will want to remember this.\nLet\u0026rsquo;s replicate the example of our previous 3=\u0026gt;2 dimensional linear map, but with a whole lot more code.\nclass MyNetwork(nn.Module): def __init__(self): # important: runs early calls to make sure that # the module is correct super().__init__() # we declare our layers. we will use them below self.m1 = nn.Linear(3,2) # this is a special function that you don\u0026#39;t actually call # manually, but as you use this module Torch will call # on your behalf. It passes the input through to the layers # of your network. def forward(self, x): # we want to pass whatever input we get, named x # through to every layer. right now there is only # one fully-connected layer x = self.m1(x) return x What this does, behind the scenes, is to wrap our matrix and all of its parameters into one giant module. (NOTE! This is PyTorch-specific language. Unlike all other vocab before, this term is specific to PyTorch.) A module is an operation on tensors which can retain gradients (i.e. it can change, i.e. requires_grad=True).\nLet\u0026rsquo;s see it in action. Recall that our matrix takes a vector of 3 things as input, and spits out a vector of 2 things. So let\u0026rsquo;s make a vector of three things:\nthree_vector = torch.tensor([1.,2.,3.]) three_vector tensor([1., 2., 3.]) By the way, notice the period I\u0026rsquo;m putting after numbers here? That\u0026rsquo;s a shorthand for .0. So 3.0 = 3.. I want to take this opportunity to remind you that the tensor operations all take FLOATING POINT tensors as input, because the matrices themselves as initialized with random floating points.\nLet\u0026rsquo;s get an instance of the new MyNetwork module.\nmy_network = MyNetwork() my_network MyNetwork( (m1): Linear(in_features=3, out_features=2, bias=True) ) And apply this operation we designed to our three-vector!\nmy_network(three_vector) tensor([0.3850, 1.4120], grad_fn=\u0026lt;AddBackward0\u0026gt;) Woah! It mapped our vector tensor in three dimensions to a vector tensor in two!\nThe above code, by the way, is how we actually use our model to run predictions: my_network is transforming the input vector to the desired output vector.\nCool. This may not seem all that amazing to you\u0026hellip; yet. But, remember, we can encode any number of matrix operations in our forward() function above. Let\u0026rsquo;s design another module that uses two matricies\u0026mdash;or two fully-connected layers, or layers for short (when we don\u0026rsquo;t specify what kind of layer it is, it is fully connected)\u0026mdash;to perform a transformation.\nWe will transform a vector from 3 dimensions to 2 dimensions, then from 2 dimensions to 5 dimensions:\nclass MyNetwork(nn.Module): def __init__(self): # important: runs early calls to make sure that # the module is correct super().__init__() # we declare our layers. we will use them below self.m1 = nn.Linear(3,2) self.m2 = nn.Linear(2,5) # this is a special function that you don\u0026#39;t actually call # manually, but as you use this module Torch will call # on your behalf. It passes the input through to the layers # of your network. def forward(self, x): # we want to pass whatever input we get, named x # through to every layer. right now there is only # one fully-connected layer x = self.m1(x) x = self.m2(x) return x Of course, this network topology is kind of randomly tossed into the network.\nDoing everything else we did before again, we should end up a vector in 5 dimensions, having been transformed twice behind the scenes!\nmy_network = MyNetwork() my_network MyNetwork( (m1): Linear(in_features=3, out_features=2, bias=True) (m2): Linear(in_features=2, out_features=5, bias=True) ) And apply this operation we designed to our three-vector!\nmy_network(three_vector) tensor([ 0.8241, -0.1014, 0.2940, -0.2019, 0.6749], grad_fn=\u0026lt;AddBackward0\u0026gt;) Nice.\nAnd here\u0026rsquo;s the magical thing: when we are asking SGD to optimize this network, instead of needing to pass every darn parameter used in this network into SVG, we can just pass in:\nmy_network.parameters() \u0026lt;generator object Module.parameters at 0x115214270\u0026gt; This is actually a list of every single tensor that has requires_grad=True that we secretly created. No more typing out a list of every parameter to SGD like we did with var_1 and var_2! We will see this in action shortly.\nHow to Train Your Dragon Neural Network Note, the MyNetwork transformation is currently kind of useless. We know it maps the vector [1,2,3] to some arbitrary numbers above (i.e. 0.8241 an such). That\u0026rsquo;s quite lame.\nWe want our network to model some relationship between numbers, that\u0026rsquo;s why we are here. Let\u0026rsquo;s, arbitrarily and for fun, ask SGD to update my_network such that it will return [1,2,3,4,5] given [1,2,3].\nBy the way, from here on, I will use MyNetwork to refer to the model 3=\u0026gt;2=\u0026gt;5 network we made above generally, and my_network the specific instantiation of MyNetwork whose parameters we will ask SGD to update.\nLet\u0026rsquo;s get a clean copy of MyNetwork first:\nmy_network = MyNetwork() my_network MyNetwork( (m1): Linear(in_features=3, out_features=2, bias=True) (m2): Linear(in_features=2, out_features=5, bias=True) ) And, let\u0026rsquo;s create a static (i.e. SGD cannot change it) input and output vector pair which we will pass into our operation:\nmy_input = torch.tensor([1.,2.,3.]) my_desired_output = torch.tensor([1.,2.,3.,4.,5.]) my_input,my_desired_output (tensor([1., 2., 3.]), tensor([1., 2., 3., 4., 5.])) We will pass our input through the my_network operation, and figure out what our inputs currently map to:\nmy_network_output = my_network(my_input) my_network_output tensor([-1.4672, -0.7089, -0.2645, -0.0598, 0.1239], grad_fn=\u0026lt;AddBackward0\u0026gt;) Ah, clearly not [1,2,3,4,5]. Recall we want these values to be the same as my_output, which they isn\u0026rsquo;t doing right now. Let\u0026rsquo;s fix that.\nCan you guess what loss function we will use? \u0026hellip; That\u0026rsquo;s right, the same exact thing as before! Squaring the difference.\nloss = (my_network_output-my_desired_output)**2 loss tensor([ 6.0869, 7.3380, 10.6571, 16.4821, 23.7766], grad_fn=\u0026lt;PowBackward0\u0026gt;) Waiiiit. There\u0026rsquo;s a problem. Remember, SGD can take a single latent value to \\(0\\). That\u0026rsquo;s a whole lotta latent values in a vector! Which one will it take to \\(0\\)? Stop to think about this for a bit: we want to take all of these values to \\(0\\), but we can take only a single value to \\(0\\) with SGD. How can we do it?\nTo do this, we just\u0026hellip; add the values up using the torch.sum function!\nloss = torch.sum((my_network_output-my_desired_output)**2) loss tensor(64.3406, grad_fn=\u0026lt;SumBackward0\u0026gt;) Nice. We now have something to optimize against, let\u0026rsquo;s actually create our optimizer! Remember that, instead of passing in every single parameter we want PyTorch to change manually, we just pass in my_network.parameters() and PyTorch will scan for every single parameter that lives in MyNetwork and give it all to SGD:\nmy_sgd = SGD(my_network.parameters(), lr=1e-6) my_sgd SGD ( Parameter Group 0 dampening: 0 differentiable: False foreach: None lr: 1e-06 maximize: False momentum: 0 nesterov: False weight_decay: 0 ) Just for running this model, we are going to run our network with more steps (\\(50,000\\)), but with smaller step sizes (\\(1 \\times 10^{-6}\\)). We will not worry about it too much for now, and dive into discussing it further for network parameter tuning.\nSo, let\u0026rsquo;s make the actual training loop now that will take the latent variable named my_network_output, created by applying my_network on my_input, to take on the value of my_desired_output! Can you do it without looking? This will be almost the same as our first training loop, except we are asking our network to calculate the current latent output (instead of computing it from scratch each time.)\nfor _ in range(50000): # calculate new latent variable my_network_output = my_network(my_input) # calculate loss loss = torch.sum((my_network_output-my_desired_output)**2) # Backprop! loss.backward() # Optimize! my_sgd.step() # Reset! my_sgd.zero_grad() my_network(my_input) tensor([-0.9814, 0.4252, 1.8085, 2.7022, 3.5517], grad_fn=\u0026lt;AddBackward0\u0026gt;) Not great! But\u0026mdash;we are both ordered correctly and \u0026mdash; if you just kept running this loop, we will eventually converge (arrive at) the right answer! For kicks, let\u0026rsquo;s run it \\(50000\\) more times:\nfor _ in range(50000): # calculate new latent variable my_network_output = my_network(my_input) # calculate loss loss = torch.sum((my_network_output-my_desired_output)**2) # Backprop! loss.backward() # Optimize! my_sgd.step() # Reset! my_sgd.zero_grad() my_network(my_input) tensor([0.9975, 1.9986, 3.0006, 4.0026, 5.0052], grad_fn=\u0026lt;AddBackward0\u0026gt;) Would you look at that! What did I promise you :)\nYour network learned something! Specifically, the skill of mapping \\([1,2,3]\\) to \\([1,2,3,4,5]\\)! Congrats!\nChallenge Now that you know how to get the network to map a specific vector in three dimensions to a specific place in five dimensions, can you do that more generally? Can you generate and give your own network enough examples such that it will learn to do that for ALL vectors in three dimensions?\nSpecifically, generate a training set of in python and train your neural network now to perform the following operation:\nGiven a vector \\([a,b,c]\\), return \\([a,b,c,c+1,c+2]\\), for every integer \\([a,b,c]\\).\nHint: pass in many examples for correct behavior sequentially during each of your training loops, calculating loss and running the optimization step (i.e. back! optimize! reset!) after each example you give.\n","html":"\u003cp\u003eHello! Welcome to the series of guided code-along labs to introduce you to the basis of using the PyTorch library and its friends to create a neural network! We will dive deeply into Torch, focusing on how practically it can be used to build Neural Networks, as well as taking sideroads into how it works under the hood.\u003c/p\u003e\n\u003ch2 id=\"getting-started\"\u003eGetting Started\u003c/h2\u003e\n\u003cp\u003eTo get started, let\u0026rsquo;s open a \u003ca href=\"https://colab.research.google.com/\"\u003ecolab\u003c/a\u003e and import Torch!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch.nn\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThe top line here import PyTorch generally, and the bottom line imports the Neural Network libraries. We will need both for today and into the future!\u003c/p\u003e\n\u003ch2 id=\"tensors-and-autograd\"\u003eTensors and AutoGrad\u003c/h2\u003e\n\u003cp\u003eThe most basic element we will be working with in Torch is something called a \u003cstrong\u003etensor\u003c/strong\u003e. A tensor is a \u003cstrong\u003evariable\u003c/strong\u003e, which holds either a single number (\u003cstrong\u003escalar\u003c/strong\u003e, or a single \u003cstrong\u003eneuron\u003c/strong\u003e) or a list of numbers (\u003cstrong\u003evector\u003c/strong\u003e, or a \u003cstrong\u003elayer\u003c/strong\u003e of neurons), that \u003cem\u003ecan change\u003c/em\u003e. We will see what that means in a sec.\u003c/p\u003e\n\u003ch3 id=\"your-first-tensors\"\u003eYour First Tensors\u003c/h3\u003e\n\u003cp\u003eEverything that you are going to put through to PyTorch needs to be in a tensor. Therefore, we will need to get good at making them! As we discussed, a tensor can hold an number (scalar), a list (vector) or a (matrix).\u003c/p\u003e\n\u003cp\u003eHere are a bunch of them!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003escalar_tensor\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evector_tensor\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ematrix_tensor\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e7\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou can perform operations on these tensors, like adding them together:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5.1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor(7.3000)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eVector and Matrix tensors work like NumPy arrays. You can add them pairwise:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e7\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]])\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([[ 3, 3, 5],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 4, 10, 8]])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"connecting-tensors\"\u003eConnecting Tensors\u003c/h3\u003e\n\u003cp\u003eA single number can\u0026rsquo;t be a neural network! ([citation needed]) So, to be able to actually build networks, we have to connect tensors together.\u003c/p\u003e\n\u003cp\u003eSo, let\u0026rsquo;s create two tensors, each holding a neuron, and connect them together!\u003c/p\u003e\n\u003cp\u003eHere are two lovely scalar tensors:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3.0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erequires_grad\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4.0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erequires_grad\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(tensor(3., requires_grad=True), tensor(4., requires_grad=True))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe initialized two numbers, \u003ccode\u003e3\u003c/code\u003e, which we named \u003ccode\u003evar_1\u003c/code\u003e, and \u003ccode\u003e4\u003c/code\u003e, which we named \u003ccode\u003evar_2\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eThe value \u003ccode\u003erequires_grad\u003c/code\u003e here tells PyTorch that these values can change, which we need it to do\u0026hellip; very shortly!\u003c/p\u003e\n\u003cp\u003eFirst, though, let\u0026rsquo;s create a \u003cstrong\u003elatent\u003c/strong\u003e variable. A \u0026ldquo;latent\u0026rdquo; value is a value that is the \u003cem\u003eresult\u003c/em\u003e of operations on other non-latent tensors\u0026mdash;connecting the activation of some neurons together with a new one. For instance, if I multiplied our two tensors together, we can create our very own latent tensor.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_latent_value\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_latent_value\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor(12., grad_fn=\u0026lt;MulBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eEvidently, \\(3 \\cdot 4 = 12\\).\u003c/p\u003e\n\u003ch3 id=\"autograd\"\u003eAutograd\u003c/h3\u003e\n\u003cp\u003eNow! The beauty of PyTorch is that we can tell it to set any particular latent variable to \\(0\\) (Why only \\(0\\), and \\(0\\) specifically? Calculus; turns out this limitation doesn\u0026rsquo;t matter at all, as we will see), and it can update all of its constituent tensors with \u003ccode\u003erequired_grad\u003c/code\u003e \u0026ldquo;True\u0026rdquo; such that the latent variable we told PyTorch to set to \\(0\\) indeed becomes \\(0\\)!\u003c/p\u003e\n\u003cp\u003eThis process is called \u0026ldquo;automatic gradient calculation\u0026rdquo; and \u0026ldquo;backpropagation.\u0026rdquo; (Big asterisks throughout, but bear with us. Find Matt/Jack if you want more.)\u003c/p\u003e\n\u003cp\u003eTo do this, we will leverage the help of a special optimization algorithm called \u003cstrong\u003estochastic gradient descent\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s get a box of this stuff first:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch.optim\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSGD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eSGD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u0026lt;class \u0026#39;torch.optim.sgd.SGD\u0026#39;\u0026gt;\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. By the way, from the \u003ccode\u003etorch.optim\u003c/code\u003e package, there\u0026rsquo;s tonnes (like at least 20) different \u0026ldquo;optimizer\u0026rdquo; algorithms that all do the same thing (\u0026ldquo;take this latent variable to \\(0\\) by updating its constituents\u0026rdquo;) but do them in important different ways. We will explore some of them through this semester, and others you can Google for yourself by looking up \u0026ldquo;PyTorch optimizers\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eOk, to get this SGD thing up and spinning, we have to tell it every tensor it gets to play with in a list. For us, let\u0026rsquo;s ask PyTorch SGD to update \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e such that \u003ccode\u003emy_latent_value\u003c/code\u003e (which, remember, is var1 times var2) becomes a new value.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside: \u003cstrong\u003elearning rate\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eNow, if you recall the neural network simulation, our model does not reach the desired outcome immediately. It does so in \u003cem\u003esteps\u003c/em\u003e. The size of these steps are called the \u003cstrong\u003elearning rate\u003c/strong\u003e; the LARGER these steps are, the quicker you will get \u003cem\u003eclose\u003c/em\u003e to your desired solution, but where you end up getting maybe farther away from the actual solution; and vise versa.\u003c/p\u003e\n\u003cp\u003eThink about the learning rate as a hoppy frog: a frog that can hop a yard at a time (\u0026ldquo;high learning rate\u0026rdquo;) can probably hit a target a mile away much quicker, but will have a hard time actually hitting the foot-wide target precisely; a frog that can hop an inch at a time (\u0026ldquo;low learning rate\u0026rdquo;) can probably hit a target a mile away\u0026hellip;. years from now, but will definitely be precisely hitting the foot-wide target when it finally gets there.\u003c/p\u003e\n\u003cp\u003eSo what does \u0026ldquo;high\u0026rdquo; and \u0026ldquo;low\u0026rdquo; mean? Usually, we adjust learning rate by considering the number of decimal places it has. \\(1\\) is considered a high learning rate, \\(1 \\times 10^{-3} = 0.001\\) as medium-ish learning rate, and \\(1 \\times 10^{-5}=0.00001\\) as a small one. There are, however, no hard and fast rules about this and it is subjcet to experimentation.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eSo, choose also an appropriate \u003cstrong\u003elearning rate\u003c/strong\u003e for our optimizer. I would usually start with \\(3 \\times 10^{-3}\\) and go from there. In Python, we write that as \u003ccode\u003e3e-3\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eSo, let\u0026rsquo;s make a SGD, and give it \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e to play with, and set the learning rate to \u003ccode\u003e3e-3\u003c/code\u003e:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSGD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3e-3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSGD (\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eParameter Group 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e dampening: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e differentiable: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e foreach: None\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e lr: 0.003\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e maximize: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e momentum: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nesterov: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e weight_decay: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWonderful. Don\u0026rsquo;t worry much about how many of these means for now; however, we will see it in action shortly.\u003c/p\u003e\n\u003cp\u003eNow! Recall that we allowed \u003ccode\u003emy_sgd\u003c/code\u003e to mess with \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e to change the value of \u003ccode\u003emy_latent_value\u003c/code\u003e (the product of \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e).\u003c/p\u003e\n\u003cp\u003eCurrent, \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e carries the values of:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(tensor(3., requires_grad=True), tensor(4., requires_grad=True))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd, of course, their product \u003ccode\u003emy_latent_value\u003c/code\u003e carries the value of:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_latent_value\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor(12., grad_fn=\u0026lt;MulBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWhat if we want \u003ccode\u003emy_latent_value\u003c/code\u003e to be\u0026hellip; \\(15\\)? That sounds like a good number. Let\u0026rsquo;s ask our SGD algorithm to update \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e such that \u003ccode\u003emy_latent_value\u003c/code\u003e will be \\(15\\)!\u003c/p\u003e\n\u003cp\u003eWaaait. I mentioned that the optimizers can only take things to \\(0\\). How could it take \u003ccode\u003emy_latent_value\u003c/code\u003e to \\(15\\) then? Recall! I said SGD takes \u003cem\u003ea\u003c/em\u003e latent variable to \\(0\\). So, we can just build another latent variable such that, when \u003ccode\u003emy_latent_value\u003c/code\u003e is \\(15\\), our new latent variable will be \\(0\\), and then ask SGD optimize on that!\u003c/p\u003e\n\u003cp\u003eWhat could that be\u0026hellip; Well, the \u003cem\u003esquared difference\u003c/em\u003e between \\(15\\) and \u003ccode\u003emy_latent_value\u003c/code\u003e is a good one. If \u003ccode\u003emy_latent_value\u003c/code\u003e is \\(15\\), the \u003cem\u003esquared difference\u003c/em\u003e between it and \\(15\\) will be \\(0\\), as desired!\u003c/p\u003e\n\u003cp\u003eSo, similar to what we explored last semester, we use \u003cstrong\u003esum of squared difference\u003c/strong\u003e as our \u003cstrong\u003eloss\u003c/strong\u003e because it will be able to account for errors of fit in both directions: a \\(-4\\) difference in predicted and actual output is just as bad as a \\(+4\\) difference.\u003c/p\u003e\n\u003cp\u003eTurns out, the \u0026ldquo;objective\u0026rdquo; for SGD optimization, the thing that we ask SGD to take to \\(0\\) on our behalf by updating the parameters we allowed it to update (again, they are \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e in our case here), is indeed the \u003cstrong\u003eloss\u003c/strong\u003e value of our model. \u003cstrong\u003eSum of squared errors\u003c/strong\u003e is, therefore, called our \u003cstrong\u003eloss function\u003c/strong\u003e for this toy problem.\u003c/p\u003e\n\u003cp\u003eSo let\u0026rsquo;s do it! Let\u0026rsquo;s create a tensor our loss:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e15\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_latent_value\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor(9., grad_fn=\u0026lt;PowBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNice. So our loss is at \\(3\\) right now; when \u003ccode\u003emy_latent_value\u003c/code\u003e is correctly at \\(15\\), our loss will be at \\(0\\)! So, to get \u003ccode\u003emy_latent_value\u003c/code\u003e to \\(15\\), we will ask SGD to take \u003ccode\u003eloss\u003c/code\u003e to \\(0\\).\u003c/p\u003e\n\u003cp\u003eTo do this, there are three steps. \u003cstrong\u003eCOMMIT THIS TO MEMORY\u003c/strong\u003e, as it will be basis of literally everything else in the future.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eBackpropagate: \u0026ldquo;please tell SGD to take this variable to \\(0\\), and mark the correct tensors to change\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eOptimize: \u0026ldquo;SGD, please update the marked tensors such that the variable I asked you to take to \\(0\\) is closer to \\(0\\)\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eReset: \u0026ldquo;SGD, please get ready for step 1 again by unmarking everything that you have changed\u0026rdquo;\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAgain! Is it commited to memory yet?\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eBackprop\u003c/li\u003e\n\u003cli\u003eOptimize\u003c/li\u003e\n\u003cli\u003eReset\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eI am stressing this here because a \u003cem\u003elot\u003c/em\u003e of people 1) miss one of these steps 2) do them out of order. Doing these in any other order will cause your desired result to not work. Why? Think about what each step does, and think about doing them out of order.\u003c/p\u003e\n\u003cp\u003eOne more time for good luck:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eBackprop!\u003c/li\u003e\n\u003cli\u003eOptimize!\u003c/li\u003e\n\u003cli\u003eReset!\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eLet\u0026rsquo;s do it.\u003c/p\u003e\n\u003ch4 id=\"backprop\"\u003eBackprop!\u003c/h4\u003e\n\u003cp\u003eBackpropergation marks the correct loss value to minimize (optimze towards being \\(0\\)), and marks all tensors with \u003ccode\u003erequires_grad\u003c/code\u003e set to True which make up the value of that loss value for update.\u003c/p\u003e\n\u003cp\u003eSecretly, this steps takes the \u003cstrong\u003epartial derivative\u003c/strong\u003e of our loss against each of the tensors we marked \u003ccode\u003erequires_grad\u003c/code\u003e, allowing SGD to \u0026ldquo;slide down the gradient\u0026rdquo; based on those partial derivatives. Don\u0026rsquo;t worry if you didn\u0026rsquo;t get that sentence.\u003c/p\u003e\n\u003cp\u003eTo do this, we call \u003ccode\u003e.backward()\u003c/code\u003e on the loss we want to take to \\(0\\):\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNone\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThis call will produce nothing. And that\u0026rsquo;s OK, because here comes\u0026hellip;\u003c/p\u003e\n\u003ch4 id=\"optimize\"\u003eOptimize!\u003c/h4\u003e\n\u003cp\u003eThe next step is tell SGD to update all of the tensors marked for update in the previous step to get \u003ccode\u003eloss\u003c/code\u003e closer to \\(0\\). To do this, we simply:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNone\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThis call will produce nothing. But, if you check now, the tensors should updated.\u003c/p\u003e\n\u003cp\u003eAlthough\u0026hellip; You should\u0026rsquo;t check! Because we have one more step left:\u003c/p\u003e\n\u003ch4 id=\"reset\"\u003eReset!\u003c/h4\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNone\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eI cannot stress this enough. People often stop at the previous step because \u0026ldquo;ooo look my tensors updated!!!\u0026rdquo; and forget to do this step. THIS IS BAD. We won\u0026rsquo;t go into why for now, but basically not resetting the update mark results in a tensor being updated twice, then thrice, etc. each time you call \u003ccode\u003e.step()\u003c/code\u003e, which will cause double-updates, which will cause you to overshoot (handwavy, but roughly), which is bad.\u003c/p\u003e\n\u003ch4 id=\"ooo-look-my-tensors-updated\"\u003eooo look my tensors updated!!!\u003c/h4\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(tensor(3.0720, requires_grad=True), tensor(4.0540, requires_grad=True))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWOAH! Look at that! Without us telling SGD, it figured out that \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e both need to be BIGGER for \u003ccode\u003emy_latent_value\u003c/code\u003e, the product of \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e to change from \\(12\\) to \\(15\\). Yet, the product of \\(3.0720\\) and \\(4.0540\\) is hardly close to \\(15\\).\u003c/p\u003e\n\u003cp\u003eWhy? Because our step size. It was \u003cem\u003etiny!\u003c/em\u003e To get \u003ccode\u003emy_latent_value\u003c/code\u003e to be properly \\(15\\), we have to do the cycle of 1) calculating new latent value 2) calculating new loss 3) backprop, optimize, reset, a LOT of times.\u003c/p\u003e\n\u003ch3 id=\"now-do-that-a-lot-of-times-dot\"\u003eNow do that a lot of times.\u003c/h3\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e100\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emy_latent_value\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e15\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_latent_value\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# BACKPROP!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# OPTIMIZE!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# RESET!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(tensor(3.4505, requires_grad=True), tensor(4.3472, requires_grad=True))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWeird solution, but we got there! The product of these two values is indeed very close to \\(15\\)! Give yourself a pat on the back.\u003c/p\u003e\n\u003ch3 id=\"so-why-the-heck-are-we-doing-all-this\"\u003eSo why the heck are we doing all this\u003c/h3\u003e\n\u003cp\u003eSo why did we go through all the effort of like 25 lines of code to get two numbers to multiply to \\(15\\)? If you think about Neural Networks as a process of \u003cem\u003efunction fitting\u003c/em\u003e, we are essentially asking our very basic \u0026ldquo;network\u0026rdquo; (as indeed, the chain of tensors to build up to our latent value, then to compute our loss, \u003cem\u003eis\u003c/em\u003e a network!) to achieve a measurable task (\u0026ldquo;take the product of these numbers to \\(15\\)\u0026rdquo;). Though the relationships we will be modeling in this class will be more complex than literal multiplication, it will be just using more fancy mechanics of doing the same thing\u0026mdash;taking tensors values which was undesirable, and moving them to more desirable values to model our relationship.\u003c/p\u003e\n\u003ch2 id=\"y-mx-plus-b-and-your-first-neural-network-module\"\u003ey=mx+b and your first neural network \u0026ldquo;module\u0026rdquo;\u003c/h2\u003e\n\u003ch3 id=\"nn-dot-linear\"\u003e\u003ccode\u003enn.Linear\u003c/code\u003e\u003c/h3\u003e\n\u003cp\u003eThe power of neural networks actually comes when a BUNCH of numbers gets multiplied together, all at once! using\u0026hellip; VECTORS and MATRICIES! Don\u0026rsquo;t remember what they are? Ask your friendly neighborhood Matt/Jack.\u003c/p\u003e\n\u003cp\u003eRecall, a \u003cstrong\u003ematrix\u003c/strong\u003e is how you can transform a \u003cstrong\u003evector\u003c/strong\u003e from one space to another. Turns out, the brunt of everything you will be doing involves asking SGD to move a bunch of matricies around (like we did before!) such that our input vector(s) gets mapped to the right place.\u003c/p\u003e\n\u003cp\u003eA \u003cstrong\u003ematrix\u003c/strong\u003e, in neural network world, is referred to as a \u003cstrong\u003elinear layer\u003c/strong\u003e. It holds a whole \u003cem\u003eseries\u003c/em\u003e of neurons, taking every single value of the input into account to producing a whole set of output. Because of this property, it is considered a \u003cstrong\u003efully connected layer\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s create such a fully-connected layer (matrix) in PyTorch! When you ask PyTorch to make a matrix for you, you use the \u003ccode\u003enn\u003c/code\u003e sublibrary which we imported before. Furthermore, and this is confusing for many people who have worked with matricies before, you specify the \u003cstrong\u003einput dimension first\u003c/strong\u003e.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_matrix_var_1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_matrix_var_1\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLinear(in_features=3, out_features=2, bias=True)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003ccode\u003emy_matrix_var_1\u003c/code\u003e is a linear map from three dimensions to two dimensions; it will take a vector of three things as input and spit out a vector of two.\u003c/p\u003e\n\u003cp\u003eNote! Although \u003ccode\u003emy_matrix_var_1\u003c/code\u003e \u003cem\u003eis\u003c/em\u003e a tensor under the hood just like \u003ccode\u003evar_1\u003c/code\u003e, we 1) didn\u0026rsquo;t have to set default values for it 2) didn\u0026rsquo;t have to mark it as \u003ccode\u003erequires_grad\u003c/code\u003e. This is because, unlike a raw Tensor which often does not require to be changed (such as, for instance, the input value, which you can\u0026rsquo;t change), a matrix is basically ALWAYS a tensor that encodes the \u003cstrong\u003eweights\u003c/strong\u003e of a model we are working with\u0026mdash;so it is always going to be something that we will ask SGD to change on our behalf.\u003c/p\u003e\n\u003cp\u003eSo, since you are asking SGD to change it anyways, PyTorch just filled a bunch of random numbers in for you and set \u003ccode\u003erequires_grad\u003c/code\u003e on for you to \u003ccode\u003emy_matrix_var_1\u003c/code\u003e. If you want to see the actual underlying tensor, you can:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_matrix_var_1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eweight\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eParameter containing:\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([[-0.2634, 0.3729, 0.5019],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 0.2796, 0.5425, -0.4337]], requires_grad=True)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAs you can see, we have indeed what we expect: a tensor containing a \\(2\\times 3\\) matrix with \u003ccode\u003erequires_grad\u003c/code\u003e on filled with random values.\u003c/p\u003e\n\u003cp\u003eHow do we actually optimize over this tensor? You can do all the shenanigans we did before and pass \u003ccode\u003emy_matrix_var_1\u003c/code\u003e to SGD, but this will \u003cem\u003equickly\u003c/em\u003e get unwieldy as you have more parameters. Remember how we had to give SVG a list of EVERYTHING it had to keep track of? \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e was simple enough, but what if we had to do \u003ccode\u003evar_1.weight\u003c/code\u003e, \u003ccode\u003evar_2.weight\u003c/code\u003e, \u003ccode\u003evar_3.weight\u003c/code\u003e\u0026hellip; \u0026hellip; \u0026hellip; \u003cem\u003ead nausium\u003c/em\u003e for every parameter we use on our large graph? GPT3 has 1.5 billion parameters. Do you really want to type that?\u003c/p\u003e\n\u003cp\u003eNo.\u003c/p\u003e\n\u003cp\u003eThere is, of course, a better way.\u003c/p\u003e\n\u003ch3 id=\"nn-dot-module\"\u003e\u003ccode\u003enn.Module\u003c/code\u003e\u003c/h3\u003e\n\u003cp\u003eThis, by the way, is the standard of how a Neural Network is properly built from now on until the industry moves on from PyTorch. You will want to remember this.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s replicate the example of our previous 3=\u0026gt;2 dimensional linear map, but with a whole lot more code.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eclass\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eMyNetwork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eModule\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e__init__\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# important: runs early calls to make sure that\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# the module is correct\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esuper\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e__init__\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we declare our layers. we will use them below\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# this is a special function that you don\u0026#39;t actually call\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# manually, but as you use this module Torch will call\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# on your behalf. It passes the input through to the layers\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# of your network.\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eforward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we want to pass whatever input we get, named x\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# through to every layer. right now there is only\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# one fully-connected layer\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWhat this does, behind the scenes, is to wrap our matrix and all of its parameters into one giant \u003cstrong\u003emodule\u003c/strong\u003e. (NOTE! This is PyTorch-specific language. Unlike all other vocab before, this term is specific to PyTorch.) A module is an operation on tensors which can retain gradients (i.e. it can change, i.e. \u003ccode\u003erequires_grad=True\u003c/code\u003e).\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s see it in action. Recall that our matrix takes a vector of 3 things as input, and spits out a vector of 2 things. So let\u0026rsquo;s make a vector of three things:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ethree_vector\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ethree_vector\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([1., 2., 3.])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eBy the way, notice the period I\u0026rsquo;m putting after numbers here? That\u0026rsquo;s a shorthand for \u003ccode\u003e.0\u003c/code\u003e. So \u003ccode\u003e3.0 = 3.\u003c/code\u003e. I want to take this opportunity to remind you that the tensor operations all take FLOATING POINT tensors as input, because the matrices themselves as initialized with random floating points.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s get an instance of the new \u003ccode\u003eMyNetwork\u003c/code\u003e module.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eMyNetwork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMyNetwork(\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e (m1): Linear(in_features=3, out_features=2, bias=True)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd apply this operation we designed to our three-vector!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ethree_vector\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([0.3850, 1.4120], grad_fn=\u0026lt;AddBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWoah! It mapped our vector tensor in three dimensions to a vector tensor in two!\u003c/p\u003e\n\u003cp\u003eThe above code, by the way, is how we actually use our model to run \u003cstrong\u003epredictions\u003c/strong\u003e: \u003ccode\u003emy_network\u003c/code\u003e is \u003cem\u003etransforming\u003c/em\u003e the input vector to the desired output vector.\u003c/p\u003e\n\u003cp\u003eCool. This may not seem all that amazing to you\u0026hellip; yet. But, remember, we can encode \u003cem\u003eany number\u003c/em\u003e of matrix operations in our \u003ccode\u003eforward()\u003c/code\u003e function above. Let\u0026rsquo;s design another module that uses two matricies\u0026mdash;or two \u003cstrong\u003efully-connected layers\u003c/strong\u003e, or \u003cstrong\u003elayers\u003c/strong\u003e for short (when we don\u0026rsquo;t specify what kind of layer it is, it is fully connected)\u0026mdash;to perform a transformation.\u003c/p\u003e\n\u003cp\u003eWe will transform a vector from 3 dimensions to 2 dimensions, then from 2 dimensions to 5 dimensions:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eclass\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eMyNetwork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eModule\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e__init__\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# important: runs early calls to make sure that\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# the module is correct\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esuper\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e__init__\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we declare our layers. we will use them below\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# this is a special function that you don\u0026#39;t actually call\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# manually, but as you use this module Torch will call\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# on your behalf. It passes the input through to the layers\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# of your network.\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eforward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we want to pass whatever input we get, named x\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# through to every layer. right now there is only\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# one fully-connected layer\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eOf course, this network topology is kind of randomly tossed into the network.\u003c/p\u003e\n\u003cp\u003eDoing everything else we did before again, we should end up a vector in 5 dimensions, having been transformed twice behind the scenes!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eMyNetwork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMyNetwork(\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e (m1): Linear(in_features=3, out_features=2, bias=True)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e (m2): Linear(in_features=2, out_features=5, bias=True)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd apply this operation we designed to our three-vector!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ethree_vector\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([ 0.8241, -0.1014, 0.2940, -0.2019, 0.6749], grad_fn=\u0026lt;AddBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNice.\u003c/p\u003e\n\u003cp\u003eAnd here\u0026rsquo;s the magical thing: when we are asking SGD to optimize this network, instead of needing to pass every darn parameter used in this network into SVG, we can just pass in:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eparameters\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u0026lt;generator object Module.parameters at 0x115214270\u0026gt;\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThis is actually a list of every single \u003ccode\u003etensor\u003c/code\u003e that has \u003ccode\u003erequires_grad=True\u003c/code\u003e that we secretly created. No more typing out a list of every parameter to SGD like we did with \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e! We will see this in action shortly.\u003c/p\u003e\n\u003ch3 id=\"how-to-train-your-neural-network\"\u003eHow to Train Your \u003cdel\u003eDragon\u003c/del\u003e Neural Network\u003c/h3\u003e\n\u003cp\u003eNote, the \u003ccode\u003eMyNetwork\u003c/code\u003e transformation is currently kind of useless. We know it maps the vector \u003ccode\u003e[1,2,3]\u003c/code\u003e to some arbitrary numbers above (i.e. \u003ccode\u003e0.8241\u003c/code\u003e an such). That\u0026rsquo;s quite lame.\u003c/p\u003e\n\u003cp\u003eWe want our network to model some relationship between numbers, that\u0026rsquo;s why we are here. Let\u0026rsquo;s, arbitrarily and for fun, ask SGD to update \u003ccode\u003emy_network\u003c/code\u003e such that it will return \u003ccode\u003e[1,2,3,4,5]\u003c/code\u003e given \u003ccode\u003e[1,2,3]\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eBy the way, from here on, I will use \u003ccode\u003eMyNetwork\u003c/code\u003e to refer to the model 3=\u0026gt;2=\u0026gt;5 network we made above generally, and \u003ccode\u003emy_network\u003c/code\u003e the specific \u003cem\u003einstantiation\u003c/em\u003e of \u003ccode\u003eMyNetwork\u003c/code\u003e whose parameters we will ask SGD to update.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s get a clean copy of \u003ccode\u003eMyNetwork\u003c/code\u003e first:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eMyNetwork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMyNetwork(\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e (m1): Linear(in_features=3, out_features=2, bias=True)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e (m2): Linear(in_features=2, out_features=5, bias=True)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd, let\u0026rsquo;s create a \u003cem\u003estatic\u003c/em\u003e (i.e. SGD cannot change it) input and output vector pair which we will pass into our operation:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_input\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_desired_output\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_input\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_desired_output\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(tensor([1., 2., 3.]), tensor([1., 2., 3., 4., 5.]))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will pass our input through the \u003ccode\u003emy_network\u003c/code\u003e operation, and figure out what our inputs currently map to:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network_output\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_input\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network_output\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([-1.4672, -0.7089, -0.2645, -0.0598, 0.1239], grad_fn=\u0026lt;AddBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAh, clearly not \u003ccode\u003e[1,2,3,4,5]\u003c/code\u003e. Recall we want these values to be the same as \u003ccode\u003emy_output\u003c/code\u003e, which they isn\u0026rsquo;t doing right now. Let\u0026rsquo;s fix that.\u003c/p\u003e\n\u003cp\u003eCan you guess what loss function we will use? \u0026hellip; That\u0026rsquo;s right, the same exact thing as before! Squaring the difference.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_network_output\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_desired_output\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([ 6.0869, 7.3380, 10.6571, 16.4821, 23.7766], grad_fn=\u0026lt;PowBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWaiiiit. There\u0026rsquo;s a problem. Remember, SGD can take a single latent value to \\(0\\). That\u0026rsquo;s a whole lotta latent values in a vector! Which one will it take to \\(0\\)? Stop to think about this for a bit: we \u003cem\u003ewant\u003c/em\u003e to take all of these values to \\(0\\), but we can take only a single value to \\(0\\) with SGD. How can we do it?\u003c/p\u003e\n\u003cp\u003eTo do this, we just\u0026hellip; add the values up using the \u003ccode\u003etorch.sum\u003c/code\u003e function!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_network_output\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_desired_output\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor(64.3406, grad_fn=\u0026lt;SumBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNice. We now have something to optimize against, let\u0026rsquo;s actually create our optimizer! Remember that, instead of passing in every single parameter we want PyTorch to change manually, we just pass in \u003ccode\u003emy_network.parameters()\u003c/code\u003e and PyTorch will scan for every single parameter that lives in \u003ccode\u003eMyNetwork\u003c/code\u003e and give it all to SGD:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSGD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eparameters\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1e-6\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSGD (\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eParameter Group 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e dampening: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e differentiable: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e foreach: None\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e lr: 1e-06\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e maximize: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e momentum: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nesterov: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e weight_decay: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eJust for running this model, we are going to run our network with more steps (\\(50,000\\)), but with smaller step sizes (\\(1 \\times 10^{-6}\\)). We will not worry about it too much for now, and dive into discussing it further for network parameter tuning.\u003c/p\u003e\n\u003cp\u003eSo, let\u0026rsquo;s make the actual training loop now that will take the latent variable named \u003ccode\u003emy_network_output\u003c/code\u003e, created by applying \u003ccode\u003emy_network\u003c/code\u003e on \u003ccode\u003emy_input\u003c/code\u003e, to take on the value of \u003ccode\u003emy_desired_output\u003c/code\u003e! Can you do it without looking? This will be \u003cem\u003ealmost\u003c/em\u003e the same as our first training loop, except we are asking our network to calculate the current latent output (instead of computing it from scratch each time.)\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e50000\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# calculate new latent variable\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emy_network_output\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_input\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# calculate loss\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_network_output\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_desired_output\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# Backprop!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# Optimize!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# Reset!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_input\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([-0.9814, 0.4252, 1.8085, 2.7022, 3.5517], grad_fn=\u0026lt;AddBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNot great! But\u0026mdash;we are both \u003cem\u003eordered\u003c/em\u003e correctly and \u0026mdash; if you just kept running this loop, we will eventually \u003cstrong\u003econverge\u003c/strong\u003e (arrive at) the right answer! For kicks, let\u0026rsquo;s run it \\(50000\\) more times:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e50000\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# calculate new latent variable\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emy_network_output\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_input\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# calculate loss\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_network_output\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_desired_output\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# Backprop!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eloss\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# Optimize!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# Reset!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emy_sgd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_input\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([0.9975, 1.9986, 3.0006, 4.0026, 5.0052], grad_fn=\u0026lt;AddBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWould you look at that! What did I promise you :)\u003c/p\u003e\n\u003cp\u003eYour network \u003cem\u003elearned\u003c/em\u003e something! Specifically, the skill of mapping \\([1,2,3]\\) to \\([1,2,3,4,5]\\)! Congrats!\u003c/p\u003e\n\u003ch2 id=\"challenge\"\u003eChallenge\u003c/h2\u003e\n\u003cp\u003eNow that you know how to get the network to map a specific vector in three dimensions to a specific place in five dimensions, can you do that more generally? Can you generate and give your own network enough examples such that it will learn to do that for ALL vectors in three dimensions?\u003c/p\u003e\n\u003cp\u003eSpecifically, generate a training set of in python and train your neural network now to perform the following operation:\u003c/p\u003e\n\u003cp\u003eGiven a vector \\([a,b,c]\\), return \\([a,b,c,c+1,c+2]\\), for every integer \\([a,b,c]\\).\u003c/p\u003e\n\u003cp\u003eHint: pass in many examples for correct behavior sequentially during each of your training loops, calculating loss and running the \u003cstrong\u003eoptimization step\u003c/strong\u003e (i.e. back! optimize! reset!) after each example you give.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaml_dipping_into_pytorch/","tags":["writing","aml"],"title":"AML: Dipping into PyTorch"},{"categories":null,"contents":"You are no doubt familiar with the Iris dataset: a dataset containing flower pedal shapes and their corresponding sub-type of Iris flower: Setosa, Versicolour, and Virginica.\nWe are going to take those pedal measurements, and predict the type of Iris we are looking at!\nLet\u0026rsquo;s get the Iris dataset first. Turns out, Scikit Learn (your old friend from last semester) ships a copy of the Iris dataset with itself. So, we will load the dataset from it.\nLet\u0026rsquo;s first import what we need:\nimport torch import torch.nn as nn import sklearn from sklearn import datasets import pandas as pd Excellent. To load the built-in Iris dataset from sklearn, we can:\n# load iris iris = datasets.load_iris() # put input features into a dataframe df = pd.DataFrame(data=iris.data, columns=iris.feature_names) # add targets column from iris data df[\u0026#34;target\u0026#34;] = iris.target df sepal length (cm) sepal width (cm) ... petal width (cm) target 0 5.1 3.5 ... 0.2 0 1 4.9 3.0 ... 0.2 0 2 4.7 3.2 ... 0.2 0 3 4.6 3.1 ... 0.2 0 4 5.0 3.6 ... 0.2 0 .. ... ... ... ... ... 145 6.7 3.0 ... 2.3 2 146 6.3 2.5 ... 1.9 2 147 6.5 3.0 ... 2.0 2 148 6.2 3.4 ... 2.3 2 149 5.9 3.0 ... 1.8 2 [150 rows x 5 columns] You can imagine that this dataset could have been loaded from a CSV, etc.\nJust to recap, here are the columns of this dataset:\ndf.columns Index([\u0026#39;sepal length (cm)\u0026#39;, \u0026#39;sepal width (cm)\u0026#39;, \u0026#39;petal length (cm)\u0026#39;, \u0026#39;petal width (cm)\u0026#39;, \u0026#39;target\u0026#39;], dtype=\u0026#39;object\u0026#39;) Now, pause. Let\u0026rsquo;s think about two questions from last semester:\nWhat type of ML problem is this? (Classification? Regression? Clustering?) Before any engineering: How many input features are there? How many output features? \u0026hellip;\n\u0026hellip;\nWhat type of ML problem is this? Classification Before any engineering: 4 input features, 1 output feature Awesome. Let\u0026rsquo;s inspect this dataset again:\ndf sepal length (cm) sepal width (cm) ... petal width (cm) target 0 5.1 3.5 ... 0.2 0 1 4.9 3.0 ... 0.2 0 2 4.7 3.2 ... 0.2 0 3 4.6 3.1 ... 0.2 0 4 5.0 3.6 ... 0.2 0 .. ... ... ... ... ... 145 6.7 3.0 ... 2.3 2 146 6.3 2.5 ... 1.9 2 147 6.5 3.0 ... 2.0 2 148 6.2 3.4 ... 2.3 2 149 5.9 3.0 ... 1.8 2 [150 rows x 5 columns] You will notice that the targets are not shuffled. If we fit this into our neural network, it will overfit\u0026mdash;memorize output without generalization\u0026mdash;to one target, then to another, etc.\nSo first, let\u0026rsquo;s shuffle this table. To do so, we will simply ask Pandas to resample \\(100\\%\\) of the dataset; it will do this sampling randomly:\ndf = df.sample(frac=1) df sepal length (cm) sepal width (cm) ... petal width (cm) target 49 5.0 3.3 ... 0.2 0 93 5.0 2.3 ... 1.0 1 50 7.0 3.2 ... 1.4 1 145 6.7 3.0 ... 2.3 2 14 5.8 4.0 ... 0.2 0 .. ... ... ... ... ... 48 5.3 3.7 ... 0.2 0 91 6.1 3.0 ... 1.4 1 45 4.8 3.0 ... 0.3 0 131 7.9 3.8 ... 2.0 2 5 5.4 3.9 ... 0.4 0 [150 rows x 5 columns] You will note, however, that the indicies are reshuffled as well! This is actually Pandas being helpful\u0026mdash;allowing us to unshuffle the dataset if needed. But, we actually have no need to do this.\n","html":"\u003cp\u003eYou are no doubt familiar with the Iris dataset: a dataset containing flower pedal shapes and their corresponding sub-type of Iris flower: Setosa, Versicolour, and Virginica.\u003c/p\u003e\n\u003cp\u003eWe are going to take those pedal measurements, and predict the type of Iris we are looking at!\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s get the Iris dataset first. Turns out, Scikit Learn (your old friend from last semester) ships a copy of the Iris dataset with itself. So, we will load the dataset from it.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s first import what we need:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch.nn\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatasets\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epandas\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. To load the built-in Iris dataset from sklearn, we can:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# load iris\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eiris\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatasets\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eload_iris\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# put input features into a dataframe\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDataFrame\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiris\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiris\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efeature_names\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# add targets column from iris data\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;target\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eiris\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etarget\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e sepal length (cm) sepal width (cm) ... petal width (cm) target\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 4.9 3.0 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 4.6 3.1 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 5.0 3.6 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 ... 2.3 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 6.3 2.5 ... 1.9 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 6.5 3.0 ... 2.0 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 6.2 3.4 ... 2.3 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 5.9 3.0 ... 1.8 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 5 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou can imagine that this dataset could have been loaded from a CSV, etc.\u003c/p\u003e\n\u003cp\u003eJust to recap, here are the columns of this dataset:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eIndex([\u0026#39;sepal length (cm)\u0026#39;, \u0026#39;sepal width (cm)\u0026#39;, \u0026#39;petal length (cm)\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;petal width (cm)\u0026#39;, \u0026#39;target\u0026#39;],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e dtype=\u0026#39;object\u0026#39;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, pause. Let\u0026rsquo;s think about two questions from last semester:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eWhat type of ML problem is this? (Classification? Regression? Clustering?)\u003c/li\u003e\n\u003cli\u003eBefore any engineering: How many \u003cstrong\u003einput\u003c/strong\u003e features are there? How many \u003cstrong\u003eoutput\u003c/strong\u003e features?\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003cp\u003e\u0026hellip;\u003c/p\u003e\n\u003cp\u003e\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eWhat type of ML problem is this? \u003cem\u003eClassification\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003eBefore any engineering: \u003cem\u003e4 input features\u003c/em\u003e, \u003cem\u003e1 output feature\u003c/em\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAwesome. Let\u0026rsquo;s inspect this dataset again:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e sepal length (cm) sepal width (cm) ... petal width (cm) target\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 5.1 3.5 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 4.9 3.0 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 4.7 3.2 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 4.6 3.1 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 5.0 3.6 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 ... 2.3 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e146 6.3 2.5 ... 1.9 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e147 6.5 3.0 ... 2.0 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e148 6.2 3.4 ... 2.3 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e149 5.9 3.0 ... 1.8 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 5 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou will notice that the targets are \u003cem\u003enot shuffled\u003c/em\u003e. If we fit this into our neural network, it will \u003cstrong\u003eoverfit\u003c/strong\u003e\u0026mdash;memorize output without generalization\u0026mdash;to one target, then to another, etc.\u003c/p\u003e\n\u003cp\u003eSo first, let\u0026rsquo;s shuffle this table. To do so, we will simply ask Pandas to resample \\(100\\%\\) of the dataset; it will do this sampling randomly:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esample\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efrac\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e sepal length (cm) sepal width (cm) ... petal width (cm) target\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e49 5.0 3.3 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e93 5.0 2.3 ... 1.0 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e50 7.0 3.2 ... 1.4 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e145 6.7 3.0 ... 2.3 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e14 5.8 4.0 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e48 5.3 3.7 ... 0.2 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e91 6.1 3.0 ... 1.4 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e45 4.8 3.0 ... 0.3 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e131 7.9 3.8 ... 2.0 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 5.4 3.9 ... 0.4 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[150 rows x 5 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou will note, however, that the indicies are reshuffled as well! This is actually Pandas being helpful\u0026mdash;allowing us to unshuffle the dataset if needed. But, we actually have no need to do this.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaml_iris_strikes_bath/","tags":null,"title":"AML: Iris Strikes Back"},{"categories":null,"contents":"Hello everyone! It\u0026rsquo;s April, which means we are ready again for a new unit. Let\u0026rsquo;s dive in.\nYou know what\u0026rsquo;s better than one neural network? TWO!!! Multi-modal approaches\u0026mdash;making two neural networks interact for a certain result\u0026mdash;dominate many of the current edge of neural network research. In this unit, we are going to introduce one such approach, Generative Adversarial Networks (GAN), but leave you with some food for thought for other possibilities for what training multiple networks together can do.\nBe aware that this unit will begin our more theory-focused discussions, and will leave more of the implementation up to your own explorations or a later fuller example. If you don\u0026rsquo;t understand the math or the theory, please do flag us down in class or out to get things clarified.\nMotivation Although we will provide motivations for the architecture of a GAN in a bit, let\u0026rsquo;s first provide a problem to ground ourselves.\nSay we want to build a neural network to generate pictures of mountain goats. How would you do that?\nYou can\u0026rsquo;t build a supervised model exactly: what\u0026rsquo;s the input, and what are the labels? No clear answer. Even if you have labels, you\u0026rsquo;d have infinitely many possible such mountain goats; how do you generate labels for all of those?\nTo help us in solving this problem, let us make a few related claims that may seem unmotivated for now:\nIt is easy to find images of mountain goats [citation needed] It is eas(ier) to train a model to classify if an image is a mountain goat or not It is easy to generate random noise We want more pictures of mountain goats because they are cool It maybe unclear how 1-3 help us solve the mountain-goat generation problem; to explain why they are all crucial, we have to first understand some hand wavy game theory.\n(Better) Motivation It\u0026rsquo;s storytime!\nAl Capone and Eliot Ness are playing a game. Al is trying to create counterfeit Franklins, and Eliot is trying to catch them out of circulation.\nAl first uses his HP Inkjet printer to print the currency. Eliot quickly wises up and uses a microscope to observe whether or not a piece of money in question is printed by ink or via color pigmented paper. Not wishing to foil his plan, Al asks his lab to develop new color pigmentation technology\u0026mdash;just like the US currency does!\nYet, Eliot outsmarts Al again\u0026mdash;he uses a spectrophotometer to analyze whether or not the money in question is made using paper or on cotton like the actual US currency. Seeing this, an angry Al purchases a tonne of cotton and starts printing his counterfeits on cotton.\nWanting to satisfy Jack\u0026rsquo;s uselessly long analogy, Doctor Strange comes and freezes time for everyone except Al and Eliot (and their respective teams). As the true US currency technology remains the same, Eliot and Al continue to play this game: both developing better technologies to make or catch counterfeits.\nAfter a billion years, Doctor Strange gets bored and looked into his frozen world. What does he see?\nAl Capone built an exact replica of the US Mint.\nWhy? Each time Al gets caught out by Eliot, Al learns one more aspect of how his counterfeit differs from actual US currency. In effect, he\u0026rsquo;s also learning one new detail of how the US currency is made. Therefore, if he keeps patching these tiny differences that Eliot helpfully pointed out for him for the span of a billion years, what Al will be producing will eventually be indistinguishable from US dollars as Eliot will be out of things to point out!\nAt this point, the Capone-Ness system has reached what we call Nash equilibrium: neither Eliot nor Al have a better move to make\u0026mdash;Eliot no longer has anything more he can possibly do to catch counterfeits as Al\u0026rsquo;s money is identical to US currency, and Al can no longer change his formula for money-making as any deviation will create another factor Eliot can latch onto.\nGANs A Generative Adversarial Network (GAN) is a multi-modal generation model.\nIt is made out of two interacting neural networks:\ngenerator \\(G(x)\\): Al Capone discriminator \\(D(x)\\): Eliot Ness Specifically, the generator is an unsupervised model trained on the task of generating the targets (\u0026ldquo;images of mountain goats\u0026rdquo;) from random noise, while the discriminator is a self-supervised model trained on the task of classifying whether or not something is actually the target (\u0026ldquo;actual images of mountain goats\u0026rdquo;) or the output of the generator.\nThe two models converge in tandem, in a similar fashion to the story discribed above.\nDiscriminator \\(D(x)\\) The discriminator \\(D(x)\\) is perhaps the more easily understandable model out of the two. It is a self-supervised model designed with the task of discriminating whether or not a particular input came from the actual world (\u0026ldquo;goat images\u0026rdquo;) or was the output of the generator.\nSpecifically, the discriminator is a neural network with any middle layers you\u0026rsquo;d like that takes the output of the generator or real images as input, and produces a single sigmoid activated feature (between 0-1) where \\(0\\) represents \u0026ldquo;definitely produced by generator\u0026rdquo; and \\(1\\) represents \u0026ldquo;definitely real world.\u0026rdquo;\nGenerator \\(G(x)\\) The generator \\(G(x)\\) is a model that takes a random tensor as input and attempts to produce a generated sample (\u0026ldquo;a picture of a goat\u0026rdquo;). As with the discriminator, it can have any middle layers you\u0026rsquo;d like but has to produce a tensor with the same shape and activation of an actual sample. For instance, if you are trying to produce images, the output of your generator has to be of shape \\((channels, x, y)\\) activated with sigmoid for brightness; if you are trying to produce single scalars, then the generator has to produce only value, etc.\nIt is perhaps very mystifying how we would ever build a magical box that takes a random tensor and turn it into a pretend image; looking at the loss functions (i.e. training objectives) of these two networks may perhaps help clarify this.\nLoss Functions Before we begin, I want to quickly reiterate something which will be crucial to your mental framework of the loss functions: THEY ARE NOT METRICS. The value of the loss functions\u0026mdash;especially these ones\u0026mdash;are now completely devoid of physical meaning; instead, the trend of the loss functions (\u0026ldquo;value goes down means model is doing better\u0026rdquo;) is what matters.\nWe are introducing the simplest form of GAN loss functions by Goodfellow, et al called \u0026ldquo;non-saturating loss.\u0026rdquo; There are better ones, but these ones are mathematically elegant and works most of the time\u0026mdash;and are the \u0026ldquo;base case\u0026rdquo; loss functions which other models improve on.\nDiscriminator Loss \\begin{equation} L_{d} (\\bold{x}_{i}, \\bold{z}_{i}) = -\\log D(\\bold{x}_{i}) - \\log (1- D(G(\\bold{z}_{i}))) \\end{equation}\nwhere, \\(\\bold{x}_{i}\\) is a tensor representing a real sample (for instance, again, an actual grid of pixels for a mountain goat image), and \\(\\bold{z}_{i}\\) is a tensor containing random noise.\nWoof. This is quite a scary loss function; let\u0026rsquo;s break it up into pieces.\n\\(-\\log D(\\bold{x}_{i})\\): \\(\\bold{x}_{i}\\) is a real sample, so we expect \\(D\\) to produce \\(1\\). Any value below \\(1\\) (i.e. the discriminator thinking a real image is generated) will produce negative values of increasingly larger magnitude as \\(D(\\bold{x}_{i})\\) approaches \\(0\\). If the discriminator produces \\(1\\) correctly, \\(\\log 1 = 0\\) and we indeed have converged.\n\\(-\\log (1- D(G(\\bold{z}_{i})))\\): on the flip side, we expect the generator to consider the output of the generator (i.e. \\(D(G(\\bold{z}_{i}))\\)) to be generated and produce \\(0\\). Therefore, we expect the same scheme as before but flipped (\\(1-D(G(\\bold{z}_{i})\\))\u0026mdash;if \\(D(G(\\bold{z}))\\) produces \\(1\\) (\u0026ldquo;the discriminator is fooled\u0026rdquo;), \\(1-D(G(\\bold{z}))\\) will produce \\(0\\) and the loss will be very high. Vise versa: if \\(D(G(\\bold{z}))\\) produces \\(0\\) (\u0026ldquo;the discriminator picked out the fake\u0026rdquo;), the loss will be \\(0\\).\nAdding the two values encourages our discriminator to both classify real samples as real \\(1\\), and generated samples as fake \\(0\\).\nGenerator Loss \\begin{equation} L_{g}(\\bold{z}_{i}) = -\\log (D(G(\\bold{z}_{i}))) \\end{equation}\nThe sharp-eyed among you may realize that this is just the right term from the above expression without the \\(1-\\) negation. Indeed, the training target for the generator is very simple: \u0026ldquo;did I fool the discriminator\u0026rdquo;: if \\(D\\) produces a large (close to \\(1\\)) output on the generated result\u0026mdash;indicating that it is indeed \u0026ldquo;fooled\u0026rdquo;\u0026mdash;our \\(log\\) will approach \\(0\\); whereas, if \\(D\\) produces a small (close to \\(0\\)) output on the generated result\u0026mdash;indicating that it correctly spotted the fake\u0026mdash;our \\(log\\) will produce a very negative value which creates high loss.\nThe GAN Training Loop Loss functions in place, we are almost ready to make the model. The thing that\u0026rsquo;s tricky about training a GAN is that we have to ensure that both the discriminator and generator are converging at the same exact time: ensuring that neither Capone nor Ness has dramatically better technology than the other. This requires a little bit of finesse on your part in terms of the training loop. Plus, our loss functions here are quite special, so their definitions will also need a little wrangling.\nAt this point, though, I hope we are all pretty confident in how to structure the basics of a ML model. Instead of going over that again, let\u0026rsquo;s go over some of the differences in Python pseudo-code (code that doesn\u0026rsquo;t run, but to illustrate how you would write it)\u0026mdash;specially in four focus areas.\nDataprep Just a short note here on GAN data prep. What\u0026rsquo;s the special thing about GANs? They are self-supervised\u0026mdash;meaning they make their own labels. Instead, all you need to provide is plenty of examples of the thing you want your model to generate.\nAs such, your batch wouldn\u0026rsquo;t contain x_data, y_data, etc. Instead, your dataset code should look something of the flavor:\nimage_grid = example_data_for_the_gan_numpy() dataset = TensorDataset(torch.tensor(image_grid).float()) # only one argument! dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True) You will notice that the TensorDataset here took only one argument as input, as opposed to the usual 2: this is, as we discussed before, as product of the fact that our GAN only needs examples of the thing you want it to generate\u0026mdash;no labels needed (or possible!)\nNetwork Construction Of course, a GAN consists of two different networks. Though the network construction is mostly arbitrary, there are some general constraints:\ngenerator input shape: arbitrary, but takes exclusively random values as input; ideally you want this to be the same number of dimensions as the output output shape: the output shape of your network has to be the shape of one sample of the real data as the generator should generate something that looks like real data output activation: whatever makes sense for the real data: if probabilities, then softmax; if images, then sigmoid (as normalized brightness), etc. discriminator input shape: the output shape of the generator, or the shape of one real sample of data. (Thinking Break: WHY? as usual, pause and chat) output shape: (batch_size, 1). We want to output a scalar between \\(0\\) (\u0026ldquo;probably fake\u0026rdquo;) and \\(1\\) (\u0026ldquo;probably real\u0026rdquo;) for every sample output activation: sigmoid to get those values actually between \\(0\\) and \\(1\\) Network Initialization Because the generator and discriminator are two different networks, they require different optimizers!\nSo, we have to go about making them. This is fortunately pretty direct:\n# initialize networks gen = GeneratorNetwork() disc = DiscriminatorNetwork() # initalize *two seperate optimizers* gen_optim = Adam(gen.parameters(), lr=LR1) disc_optim = Adam(disc.parameters(), lr=LR2) Nothing out of the ordinary here, but a worthy reminder that you need 2. This will become important shortly.\nTraining Loop This is the main event, and probably the bit that most people trip up the most: the training loop. Let\u0026rsquo;s see a pseudocode implementation of one, and we will discuss how its structured.\nNote that we will be making some adjustments to our tried-and-true backprop logic.\nfor _ in range(EPOCHS): for batch in iter(dataloader): # train generator first disc_score = disc(gen(torch.rand(BATCH_SIZE,YOUR,INPUT,SHAPE,HERE))) # compute + backprop generator loss generator_loss = (-torch.log(disc_score)) generator_loss.backward() # disconnect discriminator gradients disc_optim.zero_grad() # step and clear gen_optim.step() gen_optim.zero_grad() # now, train discriminator disc_score_false = disc(gen(torch.rand(BATCH_SIZE,YOUR,INPUT,SHAPE,HERE)).detach()) disc_score_true = disc(batch) # compute + backprop discriminator loss discriminator_loss = (-torch.log(disc_score_true)-torch.log(1-disc_score_false)) discriminator_loss.backward() # step and clear disc_optim.step() disc_optim.zero_grad() Woweee. Much to talk about. Let\u0026rsquo;s break it down.\nScoring on fake sample We first generate a fake sample from the generator by first passing it random noise from torch.rand, then passing its output to the discriminator to get a group of scores.\ndisc_score = disc(gen(torch.rand(BATCH_SIZE,YOUR,INPUT,SHAPE,HERE))) Calculating the generator loss Next up, we will calculate the generator loss on the score that the discriminator gave for that fake sample we generated earlier.\nRecall that:\n\\begin{equation} L_{g}(\\bold{z}_{i}) = -\\log (D(G(\\bold{z}_{i}))) \\end{equation}\nand hence:\ngenerator_loss = (-torch.log(disc_score)) Thinking break!: why does implementing (-torch.log(disc_score)) accomplish the same thing as taking \\(-\\log (D(G(\\bold{z}_{i})))\\)? Specifically, how is disc_score calculated in our example?\nThe generator backprop step For all that drilling we did of BACKPROP! STEP! RESET!, the next step may feel sacrilegious:\ngenerator_loss.backward() # disconnect discriminator gradients disc_optim.zero_grad() # step and clear gen_optim.step() gen_optim.zero_grad() What is happening here? Let\u0026rsquo;s take it one step at a time.\nFirst, we call generator_loss.backward() to backprop the loss; nothing wrong here. But then, against all odds, we call .zero_grad() on the discriminator optimizer. What gives?\nRecall that, in this case, we are training the generator; as the loss-function literally asks the discriminator to be wrong, we mustn\u0026rsquo;t be updating the discriminator using the gradients computed against this function; instead, we simply want the generator to be updated to better fool the discriminator.\nTherefore, we immediately zero out all the gradients on the discriminator to prevent this step from updating the discriminator with the \u0026ldquo;fooling\u0026rdquo; loss function; and proceed to update the generator weights as usual.\nScoring on detached fake sample and real sample Next up, training the discriminator. We first obtain scores from the discriminator for a real sample and a fake sample separately:\ndisc_score_false = disc(gen(torch.rand(BATCH_SIZE,YOUR,INPUT,SHAPE,HERE)).detach()) disc_score_true = disc(batch) You should notice that the code here for obtaining the fake sample is almost identical to the one before; except, we are calling this .detach() against the generator output. This is very functionally similar to the \u0026ldquo;calling .zero_grad() immediately\u0026rdquo; move we made earlier; called .detach() asks PyTorch to treat whatever tensor there as a constant, and not propagate gradients any more backwards into the generator, which in this case we do not want to change as we are optimizing the discriminator.\nCalculating the discriminator loss With all the pieces in place, this is again just a very directly implementation of:\n\\begin{equation} L_{d} (\\bold{x}_{i}, \\bold{z}_{i}) = -\\log D(\\bold{x}_{i}) - \\log (1- D(G(\\bold{z}_{i}))) \\end{equation}\nin code.\ndiscriminator_loss = (-torch.log(disc_score_true)-torch.log(1-disc_score_false)) Normal backprop Because we ran .detach() before on the generator output, the generator is treated as a constant through this second loss function; as such, our backpropegation step will normally update the discriminator\u0026rsquo;s weights without any fuss. We therefore go back to our tried-and-true formula:\ndiscriminator_loss.backward() disc_optim.step() disc_optim.zero_grad() Tada! That\u0026rsquo;s it; the GAN training loop.\nFinal Thoughts and Unit Challenge Sorry for the very theoretically dense unit; please don\u0026rsquo;t hesitate to flag us down if any questions take place. To leave you, here are a few final tips and tricks for making GANs.\nIf your model doesn\u0026rsquo;t work, try pretraining the discriminator: letting Eliot Ness get a bit of a head start by training the discriminator to recognize noise from real images; to do this, just don\u0026rsquo;t run the code that updates the generator weights. GANs are known to perform something called mode collapse: whereby, instead of reaching Nash equilibrium, one of the two networks crash while the other one completely converges. One attempt to solve this is something called Wassterstein Loss, which is discussed here (https://developers.google.com/machine-learning/gan/loss#wasserstein-loss). One important note, however, is that using this loss function makes your network technically not a GAN anymore (as the discriminator will not be actually usefully discriminating, instead acting as a \u0026ldquo;critic\u0026rdquo; for the generator only producing non-interpretable scores), but it has shown improved performance for the generator only. GANs are notoriously hard to make work. See this whole page from Google (https://developers.google.com/machine-learning/gan/loss) about the various ways GANs can fail and possible strategies to remedy them. Do not be scared if your model doesn\u0026rsquo;t work immediately or even after copious tuning. Ok, onto the challenge: make a GAN! There are two variants of this:\nEasier \u0026mdash; use a pair of dense neural networks to make a GAN to generate valid series of \\(5\\) numbers which we explored in the beginning of this class \\([a,b,c,c+1,c+2]\\) Harder \u0026mdash; use a pair of convolutional neural networks to make a GAN to generate these nice pictures of pets (https://thor.robots.ox.ac.uk/~vgg/data/pets/images.tar.gz). Sorry that this is not mountain goats: unfortunately, a dataset large enough is not available for this task :/ Good luck, and have fun!\n","html":"\u003cp\u003eHello everyone! It\u0026rsquo;s April, which means we are ready again for a new unit. Let\u0026rsquo;s dive in.\u003c/p\u003e\n\u003cp\u003eYou know what\u0026rsquo;s better than one neural network? TWO!!! Multi-modal approaches\u0026mdash;making two neural networks interact for a certain result\u0026mdash;dominate many of the current edge of neural network research. In this unit, we are going to introduce one such approach, \u003cstrong\u003eGenerative Adversarial Networks\u003c/strong\u003e (\u003cstrong\u003eGAN\u003c/strong\u003e), but leave you with some food for thought for other possibilities for what training multiple networks together can do.\u003c/p\u003e\n\u003cp\u003eBe aware that this unit will begin our more theory-focused discussions, and will leave more of the implementation up to your own explorations or a later fuller example. If you don\u0026rsquo;t understand the math or the theory, please do flag us down in class or out to get things clarified.\u003c/p\u003e\n\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003eAlthough we will provide motivations for the architecture of a \u003cstrong\u003eGAN\u003c/strong\u003e in a bit, let\u0026rsquo;s first provide a problem to ground ourselves.\u003c/p\u003e\n\u003cp\u003eSay we want to build a neural network to generate pictures of mountain goats. How would you do that?\u003c/p\u003e\n\u003cp\u003eYou can\u0026rsquo;t build a supervised model exactly: what\u0026rsquo;s the input, and what are the labels? No clear answer. Even if you have labels, you\u0026rsquo;d have infinitely many possible such mountain goats; how do you generate labels for all of those?\u003c/p\u003e\n\u003cp\u003eTo help us in solving this problem, let us make a few related claims that may seem unmotivated for now:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eIt is easy to find images of mountain goats \u003ccode\u003e[citation needed]\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eIt is eas(ier) to train a model to classify if an image is a mountain goat or not\u003c/li\u003e\n\u003cli\u003eIt is easy to generate random noise\u003c/li\u003e\n\u003cli\u003eWe want more pictures of mountain goats because they are cool\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eIt maybe unclear how \u003ccode\u003e1-3\u003c/code\u003e help us solve the mountain-goat generation problem; to explain why they are all crucial, we have to first understand some hand wavy game theory.\u003c/p\u003e\n\u003ch2 id=\"better--motivation\"\u003e(Better) Motivation\u003c/h2\u003e\n\u003cp\u003eIt\u0026rsquo;s storytime!\u003c/p\u003e\n\u003cp\u003eAl Capone and Eliot Ness are playing a game. Al is trying to create counterfeit Franklins, and Eliot is trying to catch them out of circulation.\u003c/p\u003e\n\u003cp\u003eAl first uses his HP Inkjet printer to print the currency. Eliot quickly wises up and uses a microscope to observe whether or not a piece of money in question is printed by ink or via color pigmented paper. Not wishing to foil his plan, Al asks his lab to develop new color pigmentation technology\u0026mdash;just like the US currency does!\u003c/p\u003e\n\u003cp\u003eYet, Eliot outsmarts Al again\u0026mdash;he uses a spectrophotometer to analyze whether or not the money in question is made using paper or on cotton like the actual US currency. Seeing this, an angry Al purchases a tonne of cotton and starts printing his counterfeits on cotton.\u003c/p\u003e\n\u003cp\u003eWanting to satisfy Jack\u0026rsquo;s uselessly long analogy, Doctor Strange comes and freezes time for everyone except Al and Eliot (and their respective teams). As the true US currency technology remains the same, Eliot and Al continue to play this game: both developing better technologies to make or catch counterfeits.\u003c/p\u003e\n\u003cp\u003eAfter a billion years, Doctor Strange gets bored and looked into his frozen world. What does he see?\u003c/p\u003e\n\u003cp\u003eAl Capone built an exact replica of the US Mint.\u003c/p\u003e\n\u003cp\u003eWhy? Each time Al gets caught out by Eliot, Al learns one more aspect of how his counterfeit differs from actual US currency. In effect, he\u0026rsquo;s also learning one new detail of how the US currency is made. Therefore, if he keeps patching these tiny differences that Eliot helpfully pointed out for him for the span of a billion years, what Al will be producing will eventually be indistinguishable from US dollars as Eliot will be out of things to point out!\u003c/p\u003e\n\u003cp\u003eAt this point, the Capone-Ness system has reached what we call \u003cstrong\u003eNash equilibrium\u003c/strong\u003e: neither Eliot nor Al have a better move to make\u0026mdash;Eliot no longer has anything more he can possibly do to catch counterfeits as Al\u0026rsquo;s money is identical to US currency, and Al can no longer change his formula for money-making as any deviation will create another factor Eliot can latch onto.\u003c/p\u003e\n\u003ch2 id=\"gans\"\u003eGANs\u003c/h2\u003e\n\u003cp\u003eA \u003cstrong\u003eGenerative Adversarial Network\u003c/strong\u003e (\u003cstrong\u003eGAN\u003c/strong\u003e) is a multi-modal generation model.\u003c/p\u003e\n\u003cp\u003eIt is made out of two interacting neural networks:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003egenerator\u003c/strong\u003e \\(G(x)\\): Al Capone\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ediscriminator\u003c/strong\u003e \\(D(x)\\): Eliot Ness\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSpecifically, the \u003cstrong\u003egenerator\u003c/strong\u003e is an unsupervised model trained on the task of generating the targets (\u0026ldquo;images of mountain goats\u0026rdquo;) from random noise, while the \u003cstrong\u003ediscriminator\u003c/strong\u003e is a \u003cstrong\u003eself-supervised model\u003c/strong\u003e trained on the task of classifying whether or not something is actually the target (\u0026ldquo;actual images of mountain goats\u0026rdquo;) or the output of the generator.\u003c/p\u003e\n\u003cp\u003eThe two models converge in tandem, in a similar fashion to the story discribed above.\u003c/p\u003e\n\u003ch3 id=\"discriminator-d--x\"\u003eDiscriminator \\(D(x)\\)\u003c/h3\u003e\n\u003cp\u003eThe \u003cstrong\u003ediscriminator\u003c/strong\u003e \\(D(x)\\) is perhaps the more easily understandable model out of the two. It is a \u003cstrong\u003eself-supervised model\u003c/strong\u003e designed with the task of discriminating whether or not a particular input came from the actual world (\u0026ldquo;goat images\u0026rdquo;) or was the output of the \u003cstrong\u003egenerator\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eSpecifically, the \u003cstrong\u003ediscriminator\u003c/strong\u003e is a neural network with any middle layers you\u0026rsquo;d like that takes the output of the \u003cstrong\u003egenerator\u003c/strong\u003e \u003cem\u003eor\u003c/em\u003e real images as input, and produces a single \u003ccode\u003esigmoid\u003c/code\u003e activated feature (between 0-1) where \\(0\\) represents \u0026ldquo;definitely produced by \u003cstrong\u003egenerator\u003c/strong\u003e\u0026rdquo; and \\(1\\) represents \u0026ldquo;definitely real world.\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"generator-g--x\"\u003eGenerator \\(G(x)\\)\u003c/h3\u003e\n\u003cp\u003eThe \u003cstrong\u003egenerator\u003c/strong\u003e \\(G(x)\\) is a model that takes a \u003cem\u003erandom tensor\u003c/em\u003e as input and attempts to produce a generated sample (\u0026ldquo;a picture of a goat\u0026rdquo;). As with the discriminator, it can have any middle layers you\u0026rsquo;d like but has to produce a tensor with the same shape and activation of an actual sample. For instance, if you are trying to produce images, the output of your \u003cstrong\u003egenerator\u003c/strong\u003e has to be of shape \\((channels, x, y)\\) activated with \u003ccode\u003esigmoid\u003c/code\u003e for brightness; if you are trying to produce single scalars, then the \u003cstrong\u003egenerator\u003c/strong\u003e has to produce only value, etc.\u003c/p\u003e\n\u003cp\u003eIt is perhaps very mystifying how we would ever build a magical box that takes a random tensor and turn it into a pretend image; looking at the loss functions (i.e. training objectives) of these two networks may perhaps help clarify this.\u003c/p\u003e\n\u003ch3 id=\"loss-functions\"\u003eLoss Functions\u003c/h3\u003e\n\u003cp\u003eBefore we begin, I want to quickly reiterate something which will be crucial to your mental framework of the loss functions: \u003cstrong\u003eTHEY ARE NOT METRICS\u003c/strong\u003e. The \u003cem\u003evalue\u003c/em\u003e of the loss functions\u0026mdash;especially these ones\u0026mdash;are now completely devoid of physical meaning; instead, the \u003cem\u003etrend\u003c/em\u003e of the loss functions (\u0026ldquo;value goes down means model is doing better\u0026rdquo;) is what matters.\u003c/p\u003e\n\u003cp\u003eWe are introducing the simplest form of \u003cstrong\u003eGAN\u003c/strong\u003e loss functions by \u003ca href=\"https://arxiv.org/abs/1406.2661\"\u003eGoodfellow, et al\u003c/a\u003e called \u0026ldquo;non-saturating loss.\u0026rdquo; There are better ones, but these ones are mathematically elegant and works most of the time\u0026mdash;and are the \u0026ldquo;base case\u0026rdquo; loss functions which other models improve on.\u003c/p\u003e\n\u003ch4 id=\"discriminator-loss\"\u003eDiscriminator Loss\u003c/h4\u003e\n\u003cp\u003e\\begin{equation}\nL_{d} (\\bold{x}_{i}, \\bold{z}_{i}) = -\\log D(\\bold{x}_{i}) - \\log (1- D(G(\\bold{z}_{i})))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\bold{x}_{i}\\) is a tensor representing a real sample (for instance, again, an actual grid of pixels for a mountain goat image), and \\(\\bold{z}_{i}\\) is a tensor containing random noise.\u003c/p\u003e\n\u003cp\u003eWoof. This is quite a scary loss function; let\u0026rsquo;s break it up into pieces.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\\(-\\log D(\\bold{x}_{i})\\): \\(\\bold{x}_{i}\\) is a real sample, so we expect \\(D\\) to produce \\(1\\). Any value below \\(1\\) (i.e. the \u003cstrong\u003ediscriminator\u003c/strong\u003e thinking a real image is generated) will produce negative values of increasingly larger magnitude as \\(D(\\bold{x}_{i})\\) approaches \\(0\\). If the discriminator produces \\(1\\) correctly, \\(\\log 1 = 0\\) and we indeed have converged.\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\\(-\\log (1- D(G(\\bold{z}_{i})))\\): on the flip side, we expect the generator to consider the output of the generator (i.e. \\(D(G(\\bold{z}_{i}))\\)) to be generated and produce \\(0\\). Therefore, we expect the same scheme as before but flipped (\\(1-D(G(\\bold{z}_{i})\\))\u0026mdash;if \\(D(G(\\bold{z}))\\) produces \\(1\\) (\u0026ldquo;the discriminator is fooled\u0026rdquo;), \\(1-D(G(\\bold{z}))\\) will produce \\(0\\) and the loss will be very high. Vise versa: if \\(D(G(\\bold{z}))\\) produces \\(0\\) (\u0026ldquo;the discriminator picked out the fake\u0026rdquo;), the loss will be \\(0\\).\u003c/p\u003e\n\u003cp\u003eAdding the two values encourages our discriminator to both classify real samples as real \\(1\\), and generated samples as fake \\(0\\).\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"generator-loss\"\u003eGenerator Loss\u003c/h4\u003e\n\u003cp\u003e\\begin{equation}\nL_{g}(\\bold{z}_{i}) = -\\log (D(G(\\bold{z}_{i})))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe sharp-eyed among you may realize that this is just the right term from the above expression without the \\(1-\\) negation. Indeed, the training target for the \u003cstrong\u003egenerator\u003c/strong\u003e is very simple: \u0026ldquo;did I fool the discriminator\u0026rdquo;: if \\(D\\) produces a large (close to \\(1\\)) output on the generated result\u0026mdash;indicating that it is indeed \u0026ldquo;fooled\u0026rdquo;\u0026mdash;our \\(log\\) will approach \\(0\\); whereas, if \\(D\\) produces a small (close to \\(0\\)) output on the generated result\u0026mdash;indicating that it correctly spotted the fake\u0026mdash;our \\(log\\) will produce a very negative value which creates high loss.\u003c/p\u003e\n\u003ch2 id=\"the-gan-training-loop\"\u003eThe GAN Training Loop\u003c/h2\u003e\n\u003cp\u003eLoss functions in place, we are almost ready to make the model. The thing that\u0026rsquo;s tricky about training a GAN is that we have to ensure that \u003cem\u003eboth\u003c/em\u003e the \u003cstrong\u003ediscriminator\u003c/strong\u003e and \u003cstrong\u003egenerator\u003c/strong\u003e are converging at the same exact time: ensuring that neither Capone nor Ness has \u003cem\u003edramatically\u003c/em\u003e better technology than the other. This requires a little bit of finesse on your part in terms of the training loop. Plus, our loss functions here are quite special, so their definitions will also need a little wrangling.\u003c/p\u003e\n\u003cp\u003eAt this point, though, I hope we are all pretty confident in how to structure the basics of a ML model. Instead of going over that again, let\u0026rsquo;s go over some of the differences in Python pseudo-code (code that doesn\u0026rsquo;t run, but to illustrate how you would write it)\u0026mdash;specially in four focus areas.\u003c/p\u003e\n\u003ch3 id=\"dataprep\"\u003eDataprep\u003c/h3\u003e\n\u003cp\u003eJust a short note here on GAN data prep. What\u0026rsquo;s the special thing about GANs? They are \u003cstrong\u003eself-supervised\u003c/strong\u003e\u0026mdash;meaning they make their own labels. Instead, all you need to provide is plenty of examples of the thing you want your model to generate.\u003c/p\u003e\n\u003cp\u003eAs such, your batch wouldn\u0026rsquo;t contain \u003ccode\u003ex_data\u003c/code\u003e, \u003ccode\u003ey_data\u003c/code\u003e, etc. Instead, your dataset code should look something of the flavor:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eimage_grid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eexample_data_for_the_gan_numpy\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edataset\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eTensorDataset\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eimage_grid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# only one argument!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edataloader\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eDataLoader\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edataset\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebatch_size\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eBATCH_SIZE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eshuffle\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou will notice that the \u003ccode\u003eTensorDataset\u003c/code\u003e here took only \u003cem\u003eone\u003c/em\u003e argument as input, as opposed to the usual 2: this is, as we discussed before, as product of the fact that our GAN only needs examples of the thing you want it to generate\u0026mdash;no labels needed (or possible!)\u003c/p\u003e\n\u003ch3 id=\"network-construction\"\u003eNetwork Construction\u003c/h3\u003e\n\u003cp\u003eOf course, a GAN consists of two different networks. Though the network construction is mostly arbitrary, there are some general constraints:\u003c/p\u003e\n\u003ch4 id=\"generator\"\u003egenerator\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003einput shape\u003c/strong\u003e: arbitrary, but takes exclusively random values as input; ideally you want this to be the same number of dimensions as the output\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eoutput shape\u003c/strong\u003e: the \u003cem\u003eoutput shape\u003c/em\u003e of your network has to be the shape of one sample of the real data as the generator should generate something that looks like real data\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eoutput activation\u003c/strong\u003e: whatever makes sense for the real data: if probabilities, then \u003ccode\u003esoftmax\u003c/code\u003e; if images, then \u003ccode\u003esigmoid\u003c/code\u003e (as normalized brightness), etc.\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"discriminator\"\u003ediscriminator\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003einput shape\u003c/strong\u003e: the \u003cem\u003eoutput shape\u003c/em\u003e of the generator, or the shape of one real sample of data. (\u003cem\u003eThinking Break\u003c/em\u003e: WHY? as usual, pause and chat)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eoutput shape\u003c/strong\u003e: \u003ccode\u003e(batch_size, 1)\u003c/code\u003e. We want to output a scalar between \\(0\\) (\u0026ldquo;probably fake\u0026rdquo;) and \\(1\\) (\u0026ldquo;probably real\u0026rdquo;) for every sample\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eoutput activation\u003c/strong\u003e: \u003ccode\u003esigmoid\u003c/code\u003e to get those values actually between \\(0\\) and \\(1\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"network-initialization\"\u003eNetwork Initialization\u003c/h3\u003e\n\u003cp\u003eBecause the generator and discriminator are two different networks, they require different optimizers!\u003c/p\u003e\n\u003cp\u003eSo, we have to go about making them. This is fortunately pretty direct:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# initialize networks\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003egen\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eGeneratorNetwork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edisc\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eDiscriminatorNetwork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# initalize *two seperate optimizers*\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003egen_optim\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eAdam\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egen\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eparameters\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLR1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edisc_optim\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eAdam\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisc\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eparameters\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLR2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNothing out of the ordinary here, but a worthy reminder that you need 2. This will become important shortly.\u003c/p\u003e\n\u003ch3 id=\"training-loop\"\u003eTraining Loop\u003c/h3\u003e\n\u003cp\u003eThis is the main event, and probably the bit that most people trip up the most: the training loop. Let\u0026rsquo;s see a pseudocode implementation of one, and we will discuss how its structured.\u003c/p\u003e\n\u003cp\u003e\u003cem\u003eNote that we will be making some adjustments to our tried-and-true backprop logic.\u003c/em\u003e\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eEPOCHS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebatch\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eiter\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edataloader\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# train generator first\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edisc_score\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eBATCH_SIZE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eYOUR\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eINPUT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSHAPE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eHERE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# compute + backprop generator loss\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003egenerator_loss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisc_score\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003egenerator_loss\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# disconnect discriminator gradients\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edisc_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# step and clear\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003egen_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003egen_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# now, train discriminator\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edisc_score_false\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eBATCH_SIZE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eYOUR\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eINPUT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSHAPE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eHERE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edetach\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edisc_score_true\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebatch\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# compute + backprop discriminator loss\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ediscriminator_loss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisc_score_true\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisc_score_false\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ediscriminator_loss\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# step and clear\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edisc_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edisc_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWoweee. Much to talk about. Let\u0026rsquo;s break it down.\u003c/p\u003e\n\u003ch4 id=\"scoring-on-fake-sample\"\u003eScoring on fake sample\u003c/h4\u003e\n\u003cp\u003eWe first generate a fake sample from the generator by first passing it random noise from \u003ccode\u003etorch.rand\u003c/code\u003e, then passing its output to the discriminator to get a group of scores.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edisc_score\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eBATCH_SIZE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eYOUR\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eINPUT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSHAPE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eHERE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch4 id=\"calculating-the-generator-loss\"\u003eCalculating the generator loss\u003c/h4\u003e\n\u003cp\u003eNext up, we will calculate the generator loss on the score that the discriminator gave for that fake sample we generated earlier.\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL_{g}(\\bold{z}_{i}) = -\\log (D(G(\\bold{z}_{i})))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand hence:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003egenerator_loss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisc_score\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003cspan class=\"underline\"\u003eThinking break!\u003c/span\u003e: why does implementing \u003ccode\u003e(-torch.log(disc_score))\u003c/code\u003e accomplish the same thing as taking \\(-\\log (D(G(\\bold{z}_{i})))\\)? Specifically, how is \u003ccode\u003edisc_score\u003c/code\u003e calculated in our example?\u003c/p\u003e\n\u003ch4 id=\"the-generator-backprop-step\"\u003eThe generator backprop step\u003c/h4\u003e\n\u003cp\u003eFor all that drilling we did of BACKPROP! STEP! RESET!, the next step may feel sacrilegious:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003egenerator_loss\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# disconnect discriminator gradients\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edisc_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# step and clear\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003egen_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003egen_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003cem\u003eWhat is happening here?\u003c/em\u003e Let\u0026rsquo;s take it one step at a time.\u003c/p\u003e\n\u003cp\u003eFirst, we call \u003ccode\u003egenerator_loss.backward()\u003c/code\u003e to backprop the loss; nothing wrong here. But then, against all odds, we call \u003ccode\u003e.zero_grad()\u003c/code\u003e on the \u003cstrong\u003ediscriminator\u003c/strong\u003e optimizer. What gives?\u003c/p\u003e\n\u003cp\u003eRecall that, in this case, we are training the \u003cstrong\u003egenerator\u003c/strong\u003e; as the loss-function literally asks the \u003cstrong\u003ediscriminator\u003c/strong\u003e to be wrong, we mustn\u0026rsquo;t be updating the discriminator using the gradients computed against this function; instead, we simply want the generator to be updated to better fool the \u003cstrong\u003ediscriminator\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eTherefore, we immediately zero out all the gradients on the \u003cstrong\u003ediscriminator\u003c/strong\u003e to prevent this step from updating the \u003cstrong\u003ediscriminator\u003c/strong\u003e with the \u0026ldquo;fooling\u0026rdquo; loss function; and proceed to update the \u003cstrong\u003egenerator\u003c/strong\u003e weights as usual.\u003c/p\u003e\n\u003ch4 id=\"scoring-on-detached-fake-sample-and-real-sample\"\u003eScoring on detached fake sample and real sample\u003c/h4\u003e\n\u003cp\u003eNext up, training the \u003cstrong\u003ediscriminator\u003c/strong\u003e. We first obtain scores from the discriminator for a real sample and a fake sample separately:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edisc_score_false\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eBATCH_SIZE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eYOUR\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eINPUT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSHAPE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eHERE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edetach\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edisc_score_true\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebatch\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou should notice that the code here for obtaining the fake sample is almost identical to the one before; except, we are calling this \u003ccode\u003e.detach()\u003c/code\u003e against the generator output. This is very functionally similar to the \u0026ldquo;calling \u003ccode\u003e.zero_grad()\u003c/code\u003e immediately\u0026rdquo; move we made earlier; called \u003ccode\u003e.detach()\u003c/code\u003e asks PyTorch to treat whatever tensor there as a constant, and not propagate gradients any more backwards into the \u003cstrong\u003egenerator\u003c/strong\u003e, which in this case we do not want to change as we are optimizing the \u003cstrong\u003ediscriminator\u003c/strong\u003e.\u003c/p\u003e\n\u003ch4 id=\"calculating-the-discriminator-loss\"\u003eCalculating the discriminator loss\u003c/h4\u003e\n\u003cp\u003eWith all the pieces in place, this is again just a very directly implementation of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL_{d} (\\bold{x}_{i}, \\bold{z}_{i}) = -\\log D(\\bold{x}_{i}) - \\log (1- D(G(\\bold{z}_{i})))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ein code.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ediscriminator_loss\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisc_score_true\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisc_score_false\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch4 id=\"normal-backprop\"\u003eNormal backprop\u003c/h4\u003e\n\u003cp\u003eBecause we ran \u003ccode\u003e.detach()\u003c/code\u003e before on the \u003cstrong\u003egenerator\u003c/strong\u003e output, the \u003cstrong\u003egenerator\u003c/strong\u003e is treated as a constant through this second loss function; as such, our backpropegation step will normally update the \u003cstrong\u003ediscriminator\u003c/strong\u003e\u0026rsquo;s weights without any fuss. We therefore go back to our tried-and-true formula:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ediscriminator_loss\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edisc_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edisc_optim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eTada! That\u0026rsquo;s it; the GAN training loop.\u003c/p\u003e\n\u003ch2 id=\"final-thoughts-and-unit-challenge\"\u003eFinal Thoughts and Unit Challenge\u003c/h2\u003e\n\u003cp\u003eSorry for the very theoretically dense unit; please don\u0026rsquo;t hesitate to flag us down if any questions take place. To leave you, here are a few final tips and tricks for making GANs.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eIf your model doesn\u0026rsquo;t work, try \u003cstrong\u003epretraining\u003c/strong\u003e the \u003cstrong\u003ediscriminator\u003c/strong\u003e: letting Eliot Ness get a bit of a head start by training the discriminator to recognize noise from real images; to do this, just don\u0026rsquo;t run the code that updates the generator weights.\u003c/li\u003e\n\u003cli\u003eGANs are known to perform something called \u003cstrong\u003emode collapse\u003c/strong\u003e: whereby, instead of reaching \u003cstrong\u003eNash equilibrium\u003c/strong\u003e, one of the two networks crash while the other one completely converges. One attempt to solve this is something called \u003cstrong\u003eWassterstein Loss\u003c/strong\u003e, which is \u003ca href=\"https://developers.google.com/machine-learning/gan/loss#wasserstein-loss\"\u003ediscussed here\u003c/a\u003e (\u003ca href=\"https://developers.google.com/machine-learning/gan/loss#wasserstein-loss\"\u003ehttps://developers.google.com/machine-learning/gan/loss#wasserstein-loss\u003c/a\u003e). One important note, however, is that using this loss function makes your network \u003cem\u003etechnically\u003c/em\u003e not a GAN anymore (as the \u003cstrong\u003ediscriminator\u003c/strong\u003e will not be actually usefully discriminating, instead acting as a \u0026ldquo;\u003cstrong\u003ecritic\u003c/strong\u003e\u0026rdquo; for the generator only producing non-interpretable scores), but it has shown improved performance for the \u003cstrong\u003egenerator\u003c/strong\u003e only.\u003c/li\u003e\n\u003cli\u003eGANs are notoriously hard to make work. \u003ca href=\"https://developers.google.com/machine-learning/gan/problems\"\u003eSee this whole page from Google\u003c/a\u003e (\u003ca href=\"https://developers.google.com/machine-learning/gan/loss\"\u003ehttps://developers.google.com/machine-learning/gan/loss\u003c/a\u003e) about the various ways GANs can fail and possible strategies to remedy them. \u003cstrong\u003eDo not\u003c/strong\u003e be scared if your model doesn\u0026rsquo;t work immediately or even after copious tuning.\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eOk, onto the challenge: make a GAN! There are two variants of this:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eEasier \u0026mdash; use a pair of \u003cstrong\u003edense neural networks\u003c/strong\u003e to make a GAN to generate valid series of \\(5\\) numbers which we explored in the beginning of this class \\([a,b,c,c+1,c+2]\\)\u003c/li\u003e\n\u003cli\u003eHarder \u0026mdash; use a pair of \u003cstrong\u003econvolutional neural networks\u003c/strong\u003e to make a GAN to generate \u003ca href=\"https://thor.robots.ox.ac.uk/~vgg/data/pets/images.tar.gz\"\u003ethese nice pictures of pets\u003c/a\u003e (\u003ca href=\"https://thor.robots.ox.ac.uk/~vgg/data/pets/images.tar.gz\"\u003ehttps://thor.robots.ox.ac.uk/~vgg/data/pets/images.tar.gz\u003c/a\u003e). Sorry that this is not mountain goats: unfortunately, a dataset large enough is not available for this task :/\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eGood luck, and have fun!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaml_it_takes_two/","tags":["writing","aml"],"title":"AML: It Takes Two"},{"categories":null,"contents":"Woof. As I begin to write this I should add that this unit is going to be conceptually dense. Though we are teaching one particular algorithm (incidentally, named, REINFORCE), the world of reinforcement learning is build by one, if not many, very advanced treatments in maths.\nSo if anything, I would focus on getting the conceptual flavor of how these problems are formulated and discuses. If you can be along for the mathematical and algorithmic journey, then even better \u0026mdash; but by no means required or expected\u0026hellip; There\u0026rsquo;s still lots for all of us to learn together.\nSpeaking of college level classes, I loved the detailed and accessible overview of Reinforcement Learning methods by Professors Charles Isbell and Michael Littlman from Georgia Tech CoC. If you find yourself gravitating towards the topic of this unit, go check them out:\nhttps://omscs.gatech.edu/cs-7642-reinforcement-learning-course-videos\nOk. Let\u0026rsquo;s dive in.\nMotivation We are used to a clear, differentiable loss function. One particular exercise in class we do a lot is to shout out a problem, and think about its loss function:\n\u0026ldquo;classifying Pokemon!\u0026rdquo; \u0026hellip; \u0026ldquo;cross entropy!\u0026rdquo; \u0026ldquo;generating stock price!\u0026rdquo; \u0026hellip; \u0026ldquo;MSE!\u0026rdquo; \u0026ldquo;making pictures of rice!\u0026rdquo; \u0026hellip; \u0026ldquo;GAN non-saturating loss!\u0026rdquo; and so on. Regardless of the classification/regression difference, you will note that these functions are all of the shape:\n\\begin{align} \u0026amp;f(\\hat{y}, y) = \\text{single float value} \\end{align}\nMeaning, it takes two vectors\u0026mdash;the output (\u0026ldquo;prediction\u0026rdquo;, \\(\\hat{y}\\)) of the network, and the desired output (\u0026ldquo;target\u0026rdquo;, \\(y\\)) in your training data, and produces (sometimes with much mathematical gymnastics) a single scalar value representing which we try to optimize to be lower.\nNote that, regardless of supervised learning (like Pokemon classification; we have input, desired targets, and actual output) or unsupervised learning (like GAN rice generation; we have only the desired targets and actual output), we have the desired targets in hand. We know what the model is supposed to do (i.e. have many examples of correct behavior), and are just teaching the model to do so one way or other.\nBut what if\u0026hellip;. we don\u0026rsquo;t know the correct behavior of the model? Can you brainstorm some tasks that would very well might want to automate using ML, but can\u0026rsquo;t provide precise labels for the desired action?\n\u0026hellip;\nTake, for instance, the task of teaching this poor stick figure how to stand up:\nFigure 1: aw so sad\nyou are given a list of forces currently hitting the figure, and you are to produce a list of forces the figure\u0026rsquo;s limbs should produce.\nOf course you can\u0026rsquo;t know precisely the labels at every given moment: there are no \u0026ldquo;best\u0026rdquo; or, arguably, even a \u0026ldquo;correct\u0026rdquo; strategy for standing the figure up. There\u0026rsquo;s no labels which you can use to even begin to approach this task!\nWhat to do?\nIn come Reinforcement Learning (RL)\nOk, this is where the math will begin. I encourage you to take a piece of paper and start writing down each symbol we define together, and refer to that piece of paper copiously to understand the expressions.\nIf you want to learn this more, the conceptual basis we are working with is called policy gradient, specifically the REINFORCE algorithm. This is not even close to being the only way to approach the Reinforcement Learning task; but its one fairly interesting and successful approach.\nThe Environment: Agent, State, Action, and Policy Three variables underlie the basics of Reinforcement Learning:\nstate \\(s_{t}\\): the \u0026ldquo;situation\u0026rdquo; of the environment, what can be \u0026ldquo;observed\u0026rdquo;; for our example above, this looks like the forces on each limb of our humanoid. action \\(a\\): a certain perturbation one can do to the agent which will influence its state; for our example, this looks like moving (\u0026ldquo;translating\u0026rdquo;/\u0026ldquo;applying force on\u0026rdquo;) one or many limbs. policy \\(\\pi\\): the policy is a function which takes the state as input, and produces a probability distribution (think \u0026ldquo;softmax\u0026rdquo;) over all the actions one could choose. We will talk extensively about this shortly. agent: a general term describing the actual thing being controlled; for instance, our humanoid. episode: an entire group of states, starting at the beginning and continuing for instance, for a fixed number of states or until a certain end is reached (for instance, for the humanoid walking task, when it falls). IMPORTANT NOTE: policy is as function \\(\\pi(s_{t})\\), literally a function named pi. It has nothing to do with the ratio between the radius and circumference of a circle. Its just called pi\u0026hellip; Unfortunately, we are working to stick to the language used by current literature, but sometimes their symbol choice is rather deranged.\nReward In lieu of a loss function, Reinforcement Learning is a class of models that learn from a numerical signal called reward. The reward function typically looks like this:\n\\begin{equation} r_{t}(s_{t},a_{t}) = \\text{single float value}\\ (-\\infty, +\\infty) \\end{equation}\nInstead of calculating the difference between the desired and actual output of the model, the reward signal scores how good taking a certain action is in an environment. It takes two vectors as input: the state and an action on the state, to produce a certain score.\nUnlike what we are used to with the loss, this reward value is not differentiable w.r.t. the parameters of the network! The action is a sample from the distribution; so this score can be generated however you\u0026rsquo;d like. Furthermore, unlike what we are used to with loss, a higher reward value means a better action.\nCumulative Discounted Reward Note again the expression for that reward statement:\n\\begin{equation} r_{t}(s_{t}, a_{t}) \\end{equation}\neach of these variables are parameterized by this subscript $t$\u0026mdash;meaning reward is calculated per time! This actually presents us a problem to describe the overall behavior of our agent. Pause and think why this may be.\n\u0026hellip;\nFor instance, the act of \u0026ldquo;standing up\u0026rdquo; often require multiple steps; many of which honestly doesn\u0026rsquo;t contribute at all to the act of standing up until many steps later! For instance, the act of propping up one\u0026rsquo;s hands to the ground\u0026mdash;which actually lowers your center of gravity, and hence naively should get a negative reward\u0026mdash;is actually critical in being able to stand up well.\nIf we train a model (somehow, ignoring the details for now) to maximize \\(r_{t}\\), then we will get an instant gratification machine: meaning, its pretty useless for any just a tad-bit complex task!\nTo deal with this, we need to introduce the idea of a trajectory (\\(\\tau\\)). A trajectory is a list of state-action pairs generated by the same exact policy (i.e. no learning at all) just playing a game out to completion\u0026mdash;i.e. until the end of the episode.\nThat is:\n\\begin{equation} \\tau = [(s_{0}, \\pi(s_{0})), \\dots, (s_{n}, \\pi(s_{n}))] = [(s_{0}, a_{0}), \\dots, (s_{n}, a_{n})] \\end{equation}\nWe then define a new-and-improved reward function \\(R_{t}(\\tau)\\) which models not just how good our policy is right now, but how good WILL our policy be given these set of actions.\nSpecifically, at every timestamp:\n\\begin{equation} R_{t}(\\tau) = r_{t}(s_{t}, a_{t}) + \\gamma r_{t+1}(s_{t+1}, a_{t+1})+ \\gamma^{2} r_{t+2}(s_{t+2}, a_{t+2}) + \\dots \\end{equation}\nwhere, \\(0 \\leq \\gamma \\leq 1\\) is a hyperparameter called a discount factor controlling how much more the current reward matters.\nWoof, the math here looks a bit scary; let\u0026rsquo;s break it down. We are defining a function $Rt(τ)$\u0026mdash;taking \\(\\tau\\) as input, meaning this function actually knows all of the model\u0026rsquo;s future behavior as well as current ones; each term of this function \\(R\\) multiplies \\(\\gamma\\) a certain number of times to the instantaneous reward at that point.\nThis function, then, essentially adds up all the future reward taking the current action will eventually lead to\u0026mdash;\u0026ldquo;how much reward does choosing this path afford you\u0026rdquo;\u0026mdash;discounting rewards earned in the future with a certain factor \\(\\gamma\\) because those are subject to change based on your agent\u0026rsquo;s future decisions. Things that are more the future gets discounted harder, by \\(\\gamma^{n}\\).\nThis expression for \\(R_{t}(\\tau)\\) is called the cumulative discounted reward, or \u0026ldquo;the reward\u0026rdquo; for short. When we refer to the reward in the rest of this write-up, this is probably the expression you are looking for.\nPolicy Gradient Theorem The policy gradient theorem is unfortunately not going to be very well motivated in the time that we have together. If you are curious, the proof, and some more discussion, can be found here or in my notes here.\nFor now, let\u0026rsquo;s just skip to the result\u0026hellip; The loss function objective \\(J\\) with which we can use to optimize a neural network, given a set of non-connected reward signals and a policy to optimize \\(\\pi\\), is:\n\\begin{equation} -\\sum_{t=0} \\log \\pi_{\\theta} (a_{t} | s_{t}) R_{t}(\\tau) \\end{equation}\nwhere, \\(\\theta\\) are the weights to policy \\(\\pi\\), and the rest are usual symbols defined above.\nLet\u0026rsquo;s break it down.\nThe rest of this is a summation over all time of the trajectory; meaning you have to first generate the entire trajectory \\(\\tau\\) first and then add this value per slice:\n\\(\\pi_{\\theta}(a_{t}|s_{t})\\): this is the probability (often called \u0026ldquo;confidence\u0026rdquo;) of the model to take action \\(a_{t}\\) at state \\(s_{t}\\); for a discrete set of actions (i.e. choosing/classification), we already know how to do this: torch.argmax. The code example below/in class explores how to do this for a continuous sample. \\(\\log \\pi_{t}(a_{t}|s_{t})\\): we want to take the log of this confidence score the model produced: bigger \u0026ldquo;confident\u0026rdquo; number, smaller magnitude log, smaller error \\(R_{t}(\\tau)\\) the Cumulative Discounted Reward from that timestamp on, as we discussed before The sharp-eyed among you may notice that this function is very similarly shaped as cross-entropy: except you swap out the ground truth \\(y\\) for the cumulative reward \\(R_{t}(\\tau)\\). Indeed that is the case! In fact, much of the similar motivations apply both functions.\nREINFORCE Loss function in hand, its time to actual perform the actual optimization. There\u0026rsquo;s three main steps to actually perform the REINFORCE algorithm optimization:\nPlay the game: generating entire episode worth of \\(s_{t}, a_{t}, r_{t}\\) using the same exact unoptimized policy \\(\\pi\\), storing a full trajectory \\(\\tau\\) Calculate the reward: calculate, using the discounting equation above, \\(R_{t}\\) from each \\(r_{t}\\). Remember that each \\(R_{t}\\) is a reward comprised of the current reward, plus \\(\\gamma\\) raised to a certain power to discount the future rewards \\(r_{t+n}\\). Replay and backprop: Compute the actual error above for each timeframe, backpropegating them all but don\u0026rsquo;t change the weights (i.e. call .backward() but not .step()) Change the weights all at once: call .step() and make our model better! As we have no fixed-length data, there are no epochs to this setup.; we will instead specify a number of times we want to run the above steps\u0026mdash;meaning we have a number of episodes you can tune while training the model.\nNext Steps Apart from the bunch of theory here, there still remain a lot of practical questions in how to make all this happen in PyTorch. We hope to discuss this together in class, and explore the wonderful set of tools Gym\u0026mdash;a RL state/reward calculation library\u0026mdash;can do for us!\nTo get started on this discussion, here\u0026rsquo;s one implementation of the humanoid-standup task we can be working from: https://github.com/Jemoka/demo_amlmod4rl/blob/master/main.py\n","html":"\u003cp\u003eWoof. As I begin to write this I should add that \u003cstrong\u003ethis unit is going to be conceptually dense\u003c/strong\u003e. Though we are teaching one particular algorithm (incidentally, named, REINFORCE), the world of reinforcement learning is build by one, if not many, very advanced treatments in maths.\u003c/p\u003e\n\u003cp\u003eSo if anything, I would focus on getting the conceptual flavor of how these problems are formulated and discuses. If you can be along for the mathematical and algorithmic journey, then even better \u0026mdash; but by no means required or expected\u0026hellip; There\u0026rsquo;s still lots for all of us to learn together.\u003c/p\u003e\n\u003cp\u003eSpeaking of college level classes, I \u003cem\u003eloved\u003c/em\u003e the detailed and accessible overview of Reinforcement Learning methods by Professors Charles Isbell and Michael Littlman from Georgia Tech CoC. If you find yourself gravitating towards the topic of this unit, go check them out:\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://omscs.gatech.edu/cs-7642-reinforcement-learning-course-videos\"\u003ehttps://omscs.gatech.edu/cs-7642-reinforcement-learning-course-videos\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eOk. Let\u0026rsquo;s dive in.\u003c/p\u003e\n\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003eWe are used to a clear, \u003cstrong\u003edifferentiable\u003c/strong\u003e loss function. One particular exercise in class we do a lot is to shout out a problem, and think about its loss function:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;classifying Pokemon!\u0026rdquo; \u0026hellip; \u0026ldquo;cross entropy!\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;generating stock price!\u0026rdquo; \u0026hellip; \u0026ldquo;MSE!\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;making pictures of rice!\u0026rdquo; \u0026hellip; \u0026ldquo;GAN non-saturating loss!\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eand so on. Regardless of the classification/regression difference, you will note that these functions are all of the shape:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;f(\\hat{y}, y) = \\text{single float value}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eMeaning, it takes \u003cstrong\u003etwo vectors\u003c/strong\u003e\u0026mdash;the \u003cem\u003eoutput\u003c/em\u003e (\u0026ldquo;prediction\u0026rdquo;, \\(\\hat{y}\\)) of the network, and the \u003cem\u003edesired output\u003c/em\u003e (\u0026ldquo;target\u0026rdquo;, \\(y\\)) in your training data, and produces (sometimes with much mathematical gymnastics) a single scalar value representing which we try to optimize to be lower.\u003c/p\u003e\n\u003cp\u003eNote that, regardless of \u003cstrong\u003esupervised learning\u003c/strong\u003e (like Pokemon classification; we have input, desired targets, and actual output) or \u003cstrong\u003eunsupervised learning\u003c/strong\u003e (like GAN rice generation; we have only the desired targets and actual output), we \u003cem\u003ehave the desired targets\u003c/em\u003e in hand. We \u003cem\u003eknow\u003c/em\u003e what the model is supposed to do (i.e. have many examples of correct behavior), and are just teaching the model to do so one way or other.\u003c/p\u003e\n\u003cp\u003eBut what if\u0026hellip;. we \u003cem\u003edon\u0026rsquo;t\u003c/em\u003e know the correct behavior of the model? Can you brainstorm some tasks that would very well might want to automate using ML, but can\u0026rsquo;t provide precise labels for the desired action?\u003c/p\u003e\n\u003cp\u003e\u0026hellip;\u003c/p\u003e\n\u003cp\u003eTake, for instance, the task of \u003ca href=\"https://gymnasium.farama.org/environments/mujoco/humanoid_standup/\"\u003eteaching this poor stick figure how to stand up\u003c/a\u003e:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-30_12-37-54_screenshot.png\"\n alt=\"Figure 1: aw so sad\"\u003e\u003cfigcaption\u003e\n \u003cp\u003e\u003cspan class=\"figure-number\"\u003eFigure 1: \u003c/span\u003eaw so sad\u003c/p\u003e\n \u003c/figcaption\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eyou are given a list of forces currently hitting the figure, and you are to produce a list of forces the figure\u0026rsquo;s limbs should produce.\u003c/p\u003e\n\u003cp\u003eOf course you can\u0026rsquo;t know precisely the labels at every given moment: there are no \u0026ldquo;best\u0026rdquo; or, arguably, even a \u0026ldquo;correct\u0026rdquo; strategy for standing the figure up. There\u0026rsquo;s no labels which you can use to even begin to approach this task!\u003c/p\u003e\n\u003cp\u003eWhat to do?\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eIn come Reinforcement Learning (RL)\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eOk, this is where the math will begin. I encourage you to take a piece of paper and start writing down each symbol we define together, and refer to that piece of paper copiously to understand the expressions.\u003c/p\u003e\n\u003cp\u003eIf you want to learn this more, the conceptual basis we are working with is called \u003cstrong\u003epolicy gradient\u003c/strong\u003e, specifically the \u003cstrong\u003eREINFORCE\u003c/strong\u003e algorithm. This is \u003cem\u003enot even close\u003c/em\u003e to being the only way to approach the Reinforcement Learning task; but its one fairly interesting and successful approach.\u003c/p\u003e\n\u003ch2 id=\"the-environment-agent-state-action-and-policy\"\u003eThe Environment: Agent, State, Action, and Policy\u003c/h2\u003e\n\u003cp\u003eThree variables underlie the basics of \u003cstrong\u003eReinforcement Learning\u003c/strong\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003estate\u003c/strong\u003e \\(s_{t}\\): the \u0026ldquo;situation\u0026rdquo; of the \u003cstrong\u003eenvironment\u003c/strong\u003e, what can be \u0026ldquo;observed\u0026rdquo;; for our example above, this looks like the forces on each limb of our humanoid.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eaction\u003c/strong\u003e \\(a\\): a certain perturbation one can do to the agent which will influence its \u003cstrong\u003estate\u003c/strong\u003e; for our example, this looks like moving (\u0026ldquo;translating\u0026rdquo;/\u0026ldquo;applying force on\u0026rdquo;) one or many limbs.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003epolicy\u003c/strong\u003e \\(\\pi\\): the \u003cstrong\u003epolicy\u003c/strong\u003e is a function which takes the \u003cstrong\u003estate\u003c/strong\u003e as input, and produces a probability distribution (think \u0026ldquo;softmax\u0026rdquo;) over all the \u003cstrong\u003eactions\u003c/strong\u003e one could choose. We will talk extensively about this shortly.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eagent\u003c/strong\u003e: a general term describing the actual thing being controlled; for instance, our humanoid.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eepisode\u003c/strong\u003e: an entire group of \u003cstrong\u003estates\u003c/strong\u003e, starting at the beginning and continuing for instance, for a fixed number of \u003cstrong\u003estates\u003c/strong\u003e or until a certain end is reached (for instance, for the humanoid walking task, when it falls).\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eIMPORTANT NOTE: policy is as function \\(\\pi(s_{t})\\), literally a \u003cem\u003efunction named pi\u003c/em\u003e. It has nothing to do with the ratio between the radius and circumference of a circle. Its \u003cem\u003ejust called pi\u0026hellip;\u003c/em\u003e Unfortunately, we are working to stick to the language used by current literature, but sometimes their symbol choice is rather deranged.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"reward\"\u003eReward\u003c/h2\u003e\n\u003cp\u003eIn lieu of a loss function, \u003cstrong\u003eReinforcement Learning\u003c/strong\u003e is a class of models that learn from a numerical signal called \u003cstrong\u003ereward\u003c/strong\u003e. The reward function typically looks like this:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr_{t}(s_{t},a_{t}) = \\text{single float value}\\ (-\\infty, +\\infty)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eInstead of calculating the difference between the desired and actual output of the model, the \u003cstrong\u003ereward\u003c/strong\u003e signal scores \u003cem\u003ehow good taking a certain action is\u003c/em\u003e in an environment. It takes two vectors as input: the \u003cstrong\u003estate\u003c/strong\u003e and an \u003cstrong\u003eaction\u003c/strong\u003e on the state, to produce a certain score.\u003c/p\u003e\n\u003cp\u003eUnlike what we are used to with the loss, this \u003cstrong\u003ereward\u003c/strong\u003e value is \u003cem\u003enot\u003c/em\u003e differentiable w.r.t. the parameters of the network! The action is a \u003cem\u003esample\u003c/em\u003e from the distribution; so this score can be generated however you\u0026rsquo;d like. Furthermore, unlike what we are used to with \u003cstrong\u003eloss\u003c/strong\u003e, a \u003cstrong\u003ehigher\u003c/strong\u003e \u003cstrong\u003ereward\u003c/strong\u003e value means a better action.\u003c/p\u003e\n\u003ch2 id=\"cumulative-discounted-reward\"\u003eCumulative Discounted Reward\u003c/h2\u003e\n\u003cp\u003eNote again the expression for that reward statement:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr_{t}(s_{t}, a_{t})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eeach of these variables are parameterized by this subscript $t$\u0026mdash;meaning reward is calculated \u003cem\u003eper time!\u003c/em\u003e This actually presents us a problem to describe the \u003cem\u003eoverall\u003c/em\u003e behavior of our agent. Pause and think why this may be.\u003c/p\u003e\n\u003cp\u003e\u0026hellip;\u003c/p\u003e\n\u003cp\u003eFor instance, the act of \u0026ldquo;standing up\u0026rdquo; often require multiple steps; many of which honestly doesn\u0026rsquo;t contribute at all to the act of standing up until many steps later! For instance, the act of propping up one\u0026rsquo;s hands to the ground\u0026mdash;which actually \u003cem\u003elowers\u003c/em\u003e your center of gravity, and hence naively should get a negative reward\u0026mdash;is actually critical in being able to stand up well.\u003c/p\u003e\n\u003cp\u003eIf we train a model (somehow, ignoring the details for now) to maximize \\(r_{t}\\), then we will get an \u003cem\u003einstant gratification machine\u003c/em\u003e: meaning, its pretty useless for any just a tad-bit complex task!\u003c/p\u003e\n\u003cp\u003eTo deal with this, we need to introduce the idea of a \u003cstrong\u003etrajectory\u003c/strong\u003e (\\(\\tau\\)). A \u003cstrong\u003etrajectory\u003c/strong\u003e is a list of state-action pairs generated by the same exact policy (i.e. no learning at all) just playing a game out to completion\u0026mdash;i.e. until the end of the \u003cstrong\u003eepisode\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eThat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\tau = [(s_{0}, \\pi(s_{0})), \\dots, (s_{n}, \\pi(s_{n}))] = [(s_{0}, a_{0}), \\dots, (s_{n}, a_{n})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe then define a new-and-improved reward function \\(R_{t}(\\tau)\\) which models not just \u003cem\u003ehow good our policy is right now\u003c/em\u003e, but \u003cem\u003ehow good WILL our policy be given these set of actions\u003c/em\u003e.\u003c/p\u003e\n\u003cp\u003eSpecifically, at every timestamp:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR_{t}(\\tau) = r_{t}(s_{t}, a_{t}) + \\gamma r_{t+1}(s_{t+1}, a_{t+1})+ \\gamma^{2} r_{t+2}(s_{t+2}, a_{t+2}) + \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(0 \\leq \\gamma \\leq 1\\) is a hyperparameter called a \u003cstrong\u003ediscount factor\u003c/strong\u003e controlling how much more the current reward matters.\u003c/p\u003e\n\u003cp\u003eWoof, the math here looks a bit scary; let\u0026rsquo;s break it down. We are defining a function $R\u003csub\u003et\u003c/sub\u003e(τ)$\u0026mdash;taking \\(\\tau\\) as input, meaning this function actually knows all of the model\u0026rsquo;s future behavior as well as current ones; each term of this function \\(R\\) multiplies \\(\\gamma\\) a certain number of times to the instantaneous reward at that point.\u003c/p\u003e\n\u003cp\u003eThis function, then, essentially adds up all the \u003cem\u003efuture reward taking the current action will eventually lead to\u003c/em\u003e\u0026mdash;\u0026ldquo;how much reward does choosing this path afford you\u0026rdquo;\u0026mdash;discounting rewards earned in the future with a certain factor \\(\\gamma\\) because those are subject to change based on your \u003cstrong\u003eagent\u003c/strong\u003e\u0026rsquo;s future decisions. Things that are more the future gets discounted harder, by \\(\\gamma^{n}\\).\u003c/p\u003e\n\u003cp\u003eThis expression for \\(R_{t}(\\tau)\\) is called the \u003cstrong\u003ecumulative discounted reward\u003c/strong\u003e, or \u0026ldquo;the reward\u0026rdquo; for short. When we refer to the reward in the rest of this write-up, this is probably the expression you are looking for.\u003c/p\u003e\n\u003ch2 id=\"policy-gradient-theorem\"\u003ePolicy Gradient Theorem\u003c/h2\u003e\n\u003cp\u003eThe policy gradient theorem is unfortunately not going to be very well motivated in the time that we have together. If you are curious, the proof, and some more discussion, can be found \u003ca href=\"https://lilianweng.github.io/posts/2018-04-08-policy-gradient/#proof-of-policy-gradient-theorem\"\u003ehere\u003c/a\u003e or in my notes \u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ehere\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eFor now, let\u0026rsquo;s just skip to the result\u0026hellip; The loss function objective \\(J\\) with which we can use to optimize a neural network, given a set of \u003cem\u003enon-connected\u003c/em\u003e \u003cstrong\u003ereward\u003c/strong\u003e signals and a policy to optimize \\(\\pi\\), is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n-\\sum_{t=0} \\log \\pi_{\\theta} (a_{t} | s_{t}) R_{t}(\\tau)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\theta\\) are the \u003cstrong\u003eweights\u003c/strong\u003e to \u003cstrong\u003epolicy\u003c/strong\u003e \\(\\pi\\), and the rest are usual symbols defined above.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s break it down.\u003c/p\u003e\n\u003cp\u003eThe rest of this is a summation over all time of the trajectory; meaning you have to first generate the entire trajectory \\(\\tau\\) first and then add this value per slice:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\pi_{\\theta}(a_{t}|s_{t})\\): this is the \u003cem\u003eprobability\u003c/em\u003e (often called \u0026ldquo;confidence\u0026rdquo;) of the model to take action \\(a_{t}\\) at state \\(s_{t}\\); for a discrete set of actions (i.e. choosing/classification), we already know how to do this: \u003ccode\u003etorch.argmax\u003c/code\u003e. The code example below/in class explores how to do this for a continuous sample.\u003c/li\u003e\n\u003cli\u003e\\(\\log \\pi_{t}(a_{t}|s_{t})\\): we want to take the log of this confidence score the model produced: bigger \u0026ldquo;confident\u0026rdquo; number, smaller magnitude log, smaller error\u003c/li\u003e\n\u003cli\u003e\\(R_{t}(\\tau)\\) the \u003cstrong\u003eCumulative Discounted Reward\u003c/strong\u003e from that timestamp on, as we discussed before\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe sharp-eyed among you may notice that this function is very similarly shaped as cross-entropy: except you swap out the ground truth \\(y\\) for the cumulative reward \\(R_{t}(\\tau)\\). Indeed that is the case! In fact, much of the similar motivations apply both functions.\u003c/p\u003e\n\u003ch2 id=\"reinforce\"\u003eREINFORCE\u003c/h2\u003e\n\u003cp\u003eLoss function in hand, its time to actual perform the actual optimization. There\u0026rsquo;s three main steps to actually perform the REINFORCE algorithm optimization:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003ePlay the game\u003c/strong\u003e: generating \u003cem\u003eentire \u003cstrong\u003eepisode\u003c/strong\u003e\u003c/em\u003e worth of \\(s_{t}, a_{t}, r_{t}\\) using the same exact unoptimized policy \\(\\pi\\), storing a full trajectory \\(\\tau\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eCalculate the reward\u003c/strong\u003e: calculate, using the discounting equation above, \\(R_{t}\\) from each \\(r_{t}\\). Remember that each \\(R_{t}\\) is a reward comprised of the current reward, plus \\(\\gamma\\) raised to a certain power to discount the future rewards \\(r_{t+n}\\).\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eReplay and backprop\u003c/strong\u003e: Compute the actual error above for each timeframe, backpropegating them all but \u003cem\u003edon\u0026rsquo;t change the weights\u003c/em\u003e (i.e. call \u003ccode\u003e.backward()\u003c/code\u003e but not \u003ccode\u003e.step()\u003c/code\u003e)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eChange the weights all at once\u003c/strong\u003e: call \u003ccode\u003e.step()\u003c/code\u003e and make our model better!\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAs we have no fixed-length data, there are no \u003cstrong\u003eepochs\u003c/strong\u003e to this setup.; we will instead specify a number of times we want to run the above steps\u0026mdash;meaning we have a number of \u003cstrong\u003eepisodes\u003c/strong\u003e you can tune while training the model.\u003c/p\u003e\n\u003ch2 id=\"next-steps\"\u003eNext Steps\u003c/h2\u003e\n\u003cp\u003eApart from the bunch of theory here, there still remain a lot of practical questions in how to make all this happen in PyTorch. We hope to discuss this together in class, and explore the wonderful set of tools Gym\u0026mdash;a RL state/reward calculation library\u0026mdash;can do for us!\u003c/p\u003e\n\u003cp\u003eTo get started on this discussion, here\u0026rsquo;s one implementation of the humanoid-standup task we can be working from: \u003ca href=\"https://github.com/Jemoka/demo_amlmod4rl/blob/master/main.py\"\u003ehttps://github.com/Jemoka/demo_amlmod4rl/blob/master/main.py\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaml_reinforce/","tags":["writing","aml"],"title":"AML: REINFORCE(ment learning)"},{"categories":null,"contents":"Welcome back! I think, over the last few days, we have been hyping up convolutional neural networks enough such that you are probably ready to dive right in. So\u0026hellip; Let\u0026rsquo;s, uh, motivate it first!\nWhy do we use a CNN? Let\u0026rsquo;s think of a toy problem to play with. Given a pattern made using two colours (let\u0026rsquo;s name them a and b, or perhaps black and white), let\u0026rsquo;s classify whether it is the \u0026ldquo;zebra\u0026rdquo; pattern\u0026quot; or the \u0026ldquo;checkerboard\u0026rdquo; pattern.\nZebra\u0026mdash;aligned stripes:\na b a b a b a b a b a b a b a b Checkerboard\u0026mdash;alternating stripes:\na b a b b a b a a b a b b a b a We are already familiar with one neural-network architecture: stacked linear layers, also known as deep neural networks. If we are trying to process these two input samples for a linear layer, what would we do?\n\u0026hellip;\nWell, we would take each of the figures, and flatten it into a long row. Then, feed it into a layer of \\(4 \\times 4 = 16\\) input neurons.\nWhat would that look like? Well; let \\(a=0, b=1\\):\nzebra_sample = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] chessboard_sample = [0,1,0,1,1,0,1,0,0,1,0,1,1,0,1,0] Without looking very closely, those two very different patterns seem to yield pretty similar input samples! A dense neural network need to fit very well to notice the numerical trick of checking if two \\(1\\) or two \\(0\\) are next. That\u0026rsquo;s not good\u0026hellip; A human can spot the difference in the original, 2D figure very obviously!\nENTER CNNs\nWhat is a CNN? The take-home-message from the previous simple example is that 2D structures loose information when they are flattened. So, CNNs\u0026mdash;ultimately\u0026mdash;offer a way to process this 2D structural information with a neural network without flattening immediately. Generally, a CNN takes the following structure:\n2D-input fed into the model convolutional layers process small sections of the 2D input, projecting each section another section on a larger, 2D hidden grid; think about this as upsampling in images a pooling layer takes sections of the larger 2D grid of neurons then process each section into one value (usually by taking their maximum or average); think about this as downsampling in images Repeat steps 2-3 a flatten layer takes the now processed 2D grid and flattens it in the usual manner into a 1D tensor process the now information-rich, flat hidden representation as usual with a dense neural-network I would pause and ponder the above outline a little bit; but, no worries if this does immediately make sense; hopefully, as the layers are introduced progressively, what they do on various inputs will start to make more sense.\nI promise we will get to the actual layers soon, but before then, we have some vocabulary terms to go over.\nVocab Time! kernel Everything in the CNN world rests upon the idea of a kernel. A kernel is a sub-sample of the input of a certain fixed size (you can choose the size). Take our original checkerboard input:\na b a b b a b a a b a b b a b a An example \\((2 \\times 2)\\) kernel on this input could be:\na b b a that is:\na b a b b a b a a b a b b a b a Tada! A \\((2 \\times 2)\\) kernel is simply a \\((2\\times 2)\\) sample of the input.\nconvol-* and stride Convolving, convolutional, convoluting\u0026hellip; What does that mean? For a kernel size (i.e. dimensions of the kernel) that\u0026rsquo;s smaller than the size of the entire 2D input\u0026mdash;which, if you want your CNN to perform better than a DNN, it has to be (challenge question: why?)\u0026mdash;you need to move it around the input to capture the entirety of the input sample.\nThat movement is called convolution\nHere\u0026rsquo;s a \\((2\\times 2)\\) kernel!\na b a b b a b a a b a b b a b a and\u0026hellip; here it is convolving to the right!\na b a b b a b a a b a b b a b a Look! A moving kernel. That\u0026rsquo;s convolution.\nNow, how much the kernel moves at each step is called the stride, or the stride size. For a 2D input sample, this is usually specified as a 2D tuple: \\((x,y)\\), with \\(x\\) representing how much the kernel moves per step in the \\(x\\) direction, and \\(y\\) representing how much the kernel moves per step in the \\(y\\) direction.\nfilter So far we are doing nothing with the kernel: we are just taking convolving sub-samples, and doing a grand total of nothing with the array of subsamples. Filters are responsible of doing the actual processing.\nEach time a kernel is sampled, it is sent through a weight-matrix (just like what is stuck between two linear layers) which is called a filter. The output of this matrix is then reassembled into a 2D array after the sample kernel from each convolution is passed through the same filter, ready for more processing!\nchannel Here\u0026rsquo;s a little-known secret about the world: humans see colors! The toy example conveniently ignored this important fact: each pixel was simply a number, which\u0026mdash;in the real world\u0026mdash;would represent only one hue (think shades of gray). That\u0026rsquo;s an alright assumption to make if we are only encoding checkerboards or zebras, but not great if we want to recognize anything complicated. How would we represent colors in our input?\nMultiple channels to the rescue!\nA \u0026ldquo;2D\u0026rdquo; sample actually contains three dimensions: (channel_size, height, width). Namely, each convolutional layer actually take multiple of those grids we discussed above as input, each representing the saturation of a specific color at each pixel. Those separate grids representing the same input are called channels.\nA conventional \u0026ldquo;image\u0026rdquo;, then, is actually three samples masquerading as one:\na grid of the concentrations of the red channel a grid of the concentrations of the green channel a grid of the concentrations of the blue channel Say, a sample image is square and has side-length \\(20\\). Can you guess the actual dimensions of one \u0026ldquo;sample\u0026rdquo; tensor?\n\u0026hellip;\n\\((3, 20,20)\\): three channels RGB, height, width.\nLet\u0026rsquo;s get convolving Throughout this workbook, we are never actually going to build a neural network. You already know how to do that! In this section, let\u0026rsquo;s go through each of the layers discussed above that a CNN consists of, and we will leave you with the task of putting them together in the workbook challenge. Don\u0026rsquo;t worry, we will be here to help you through that process.\nEither way, however, let\u0026rsquo;s get PyTorch going:\nimport torch import torch.nn as nn sampling images We went through all this talk about images, but we never actually dealt with one. So, before we can actually do anything with CNNs, let\u0026rsquo;s see how we can actually turn an image into the numbered pixel-grid we discussed in the toy example above.\nTo do this, we will use the PythonImageLibrary (PIL), whose currently implementation is cutely named \u0026ldquo;Pillow\u0026rdquo;. If the following line does not work because you are not running Colab, run pip install pillow on your machine and you will be off to the races.\nfrom PIL import Image Let\u0026rsquo;s open an example image!\nimg = Image.open(\u0026#34;./beesnees.png\u0026#34;) img \u0026lt;PIL.PngImagePlugin.PngImageFile image mode=RGBA size=938x1436 at 0x12B293FD0\u0026gt; Nice. We just opened a local image on my computer, of size \\(938 \\times 1436\\), named \u0026ldquo;beesnees.png\u0026rdquo;.\nAside: loading images\nWhat? You don\u0026rsquo;t just conveniently have a file named \u0026ldquo;beesnees\u0026rdquo; located on your Colab instance? Well\u0026hellip; Let\u0026rsquo;s load it.\nLocate on the left side of your Colab window the left sidebar, where the fourth icon down is the \u0026ldquo;file\u0026rdquo; folder icon. Tap on that, and\u0026mdash;in the \u0026ldquo;files\u0026rdquo; pane that opens\u0026mdash;tap on the first of the four icons, shaped like a page with an arrow, below the word \u0026ldquo;files\u0026rdquo;. Select your file, and you are off to the races.\nOh, and here\u0026rsquo;s beesnees.jpg\nAnyways, now that we have an image, what can we do with it? Well, for starters, we can ask numpy to make an array out of it.\nimport numpy as np arr = np.array(img) arr [[[ 10 10 8 255] [ 10 10 8 255] [ 10 10 8 255] ... [ 7 7 5 255] [ 7 7 5 255] [ 7 7 5 255]] ... [[ 3 3 3 255] [ 3 3 3 255] [ 3 3 3 255] ... [ 12 16 21 255] [ 12 16 21 255] [ 12 16 21 255]]] Interested in what shape it is?\narr.shape (1436, 938, 4) Hmmm\u0026hellip; We know that it is a \\((1436 \\times 938)\\) image, and apparently numpy encoded each of the pixels with all the channels instead of separating them into one channel per grids. That\u0026rsquo;s all fine and good, but why are there \\(4\\) channels for an RGB image?\nTurns out, png images by default are RGB*A*\u0026mdash;the last channel being transparency. Staring at the array above, we can see that every single pixel\u0026rsquo;s fourth channel is \\(255\\), meaning this image is not transparent anywhere.\nTransparency info is not super useful info for most models, so let\u0026rsquo;s slice it apart: leaving us with \\((1436 \\times 938 \\times 3)\\):\narr = arr[:,:,:-1] arr.shape (1436, 938, 3) Aside: woah! what is that syntax?\nIn the example code above, we used array slicing notation: arr[:,:,:-1]. This is an extension on Python list slicing only valid on Numpy arrays and PyTorch tensors. To come up with your own, here are the rules:\nseparate each dimension with a comma, from outer to inner on each dimension, slice the dimension normally with Python slice syntax, remembering that it has a fencepost problem on the end index: [startIndex]:[endIndex+1]; recall that negative numbers loop around (i.e. -1 means the end of the list) recall that, if you want to start from the beginning or end at the end of an array, you can leave that side blank: :5 means \u0026ldquo;give me the first 5 elements 0,1,2,3,4\u0026rdquo; if you want to keep the entirety of a dimension, type an colon and move on So, decoding what we just typed: arr[:,:,:-1]:\narr (the Numpy array we are slicing) [ (slice!) :, (keep all of the H dimension) :, (keep all of the W dimension) :-1 (keep everything in the channels dimension except until the last element, hence removing that) ] (end slice) One more change before moving on. The arr matrix right now has shape \\(H \\times W \\times C\\), where \\(C\\) is the number of channels. However, recall that PyTorch (more reasonably, in my opinion), expects \\(C \\times H \\times W\\): where channels are the first dimension to show that each channel is an independent grid of pixels representing that color.\nSo, we need to swap the inner and outer dimensions of our array to separate the three channels into grids of \\(H \\times W\\).\narr = arr.swapaxes(0,2) arr.shape (3, 938, 1436) Excellent. We have now swapped the axes of the image such that each channel is by itself. Although\u0026hellip; we also messed up the orientation of the image: it is now \\(938 \\times 1436\\) instead of \\(1436 \\times 938\\). Turns out, this does not matter much\u0026mdash;your machine learning model does not care about what orientation things are as long as they are consistent between the images and labels (challengeish question: why is that?).\nUsually, when we deal with image inputs, we end up with color values between \\(0\\) and \\(255\\). Yet, as you probably saw already, neural networks are exceptionally bad at dealing with large integers such as \\(255\\). As such, we will squish the input into a matrix of small numbers by just reporting pixel values in terms of \u0026ldquo;brightness\u0026rdquo;, also known as \u0026ldquo;percentage until 255\u0026rdquo;, which would nicely normalize the input\nbrightness_arr = arr/255 brightness_arr [[[0.03921569 0.03921569 0.03921569 ... 0.01176471 0.01176471 0.01176471] ... [0.02745098 0.02745098 0.02745098 ... 0.04705882 0.04705882 0.04705882]] [[0.03921569 0.03921569 0.03921569 ... 0.01176471 0.01176471 0.01176471] ... [0.02745098 0.02745098 0.02745098 ... 0.0627451 0.0627451 0.0627451 ]] [[0.03137255 0.03137255 0.03137255 ... 0.01176471 0.01176471 0.01176471] ... [0.01960784 0.01960784 0.01960784 ... 0.08235294 0.08235294 0.08235294]]] One last step before we go forward: arr in as numpy array, which can\u0026rsquo;t be fed through Torch-accelerated objects. To get it to do things in a neural network, we need it to be a tensor:\nimg_tsr = torch.tensor(brightness_arr).float() img_tsr tensor([[[0.0392, 0.0392, 0.0392, ..., 0.0118, 0.0118, 0.0118], ..., [0.0275, 0.0275, 0.0275, ..., 0.0471, 0.0471, 0.0471]], [[0.0392, 0.0392, 0.0392, ..., 0.0118, 0.0118, 0.0118], ..., [0.0275, 0.0275, 0.0275, ..., 0.0627, 0.0627, 0.0627]], [[0.0314, 0.0314, 0.0314, ..., 0.0118, 0.0118, 0.0118], ..., [0.0196, 0.0196, 0.0196, ..., 0.0824, 0.0824, 0.0824]]]) For kicks, let\u0026rsquo;s also put it in a batch of \\(1\\) (as, recall, layers take an array of samples for input, so we just need to create a batch containing one element). .unsqueeze(dim) does this on a tensor; it just surrounds the desired dimension with another set of \u0026ldquo;brackets\u0026rdquo; (i.e. add a dimension of \\(1\\)).\nimg_tsr = img_tsr.unsqueeze(0) img_tsr.shape torch.Size([1, 3, 938, 1436]) Machine learning now awaits us\u0026hellip;\nconvolutional layer Finally, it is convolutioning time. The main object we will be using will be Conv2d, the 2D convolutional layer. If you care about the behind-the-scenes math, here it is: basically, it is the bias per channel, plus the weight of that channel (filter) times each kernel of that channel, cross-correlated across convolutions.\nTo instantiate a convolutional layer, here are what you need to figure out:\ninput channels: 3 for RGB, if you are not convolving across images, or your image is sepia or B\u0026amp;W, etc., your mileage will vary output channels: the output dimension of your weight matrix, what your convolving kernels get projected to kernel size: the width of your kernel, it is usually square so one number suffices stride size: how much your kernel should move per convolution (i.e. stride), default is \\((1\\times 1)\\) So, here goes; I gave an arbitrary hidden size of \\(5\\) and a kernel size of \\(4 \\times 4\\); we will talk about recommended topologies later.\ntest_convolution_layer = nn.Conv2d(in_channels=3, out_channels=5, kernel_size=4, stride=(1,1)) test_convolution_layer Conv2d(3, 5, kernel_size=(4, 4), stride=(1, 1)) Now, passing our input through our single layer:\nnet = test_convolution_layer(img_tsr) net tensor([[[[-0.0296, -0.0288, -0.0296, ..., -0.0249, -0.0249, -0.0249], [-0.0296, -0.0288, -0.0300, ..., -0.0249, -0.0249, -0.0249], [-0.0290, -0.0282, -0.0293, ..., -0.0249, -0.0249, -0.0249], ..., [ 0.0713, 0.0713, 0.0713, ..., -0.0086, -0.0080, -0.0093], [ 0.0713, 0.0713, 0.0713, ..., -0.0089, -0.0087, -0.0102], [ 0.0713, 0.0713, 0.0713, ..., -0.0113, -0.0113, -0.0113]]]], grad_fn=\u0026lt;ConvolutionBackward0\u0026gt;) Now, let\u0026rsquo;s take a look at the shape of this nice output:\nnet.shape torch.Size([1, 5, 935, 1433]) Look! Each of our kernels got projected from the \\(3\\) input channels into the \\(5\\) output channels; as our convolutions has stride size \\((1 \\times 1)\\), our kernel moves across our image and the filter takes each kernel and spits out a vector of length \\(5\\) at each step, resulting in \\((935 \\times 1433)\\) such steps and hence an output of \\((5 \\times 935 \\times 1433)\\).\nThinking break: kernel sizes and steps!\nNow, recall that our input has size \\((938 \\times 1436)\\), and yet our output has \\((935 \\times 1433)\\) as the last two dimensions, meaning the kernel only took \\(935\\) steps in the first dimension and \\(1433\\) steps in the second. What happened? Our step size is \\(1\\), so shouldn\u0026rsquo;t the steps span across the whole image?\nMess with the stride size and kernel size and figure this out. Shout a compelling answer in class before moving on.\nUnderstanding this will be critical to demonstrate your intuition of CNNs, pleeeese don\u0026rsquo;t move on until you think you have a good answer.\nLet\u0026rsquo;s think about this in context of a larger network. Our convolutional layer just took our input image, and processed it such that it ended up with a more, for the lack of a better word, \u0026ldquo;nuanced\u0026rdquo; 2D representation of our image. Instead of RGB being our three channels, our five channels will, after good training, contain more complex information such as \u0026ldquo;edges\u0026rdquo; or \u0026ldquo;blocks\u0026rdquo; that downstream networks can process.\nmaxpool/avgpool layer In traditional convolutional networks, each kernel is processed with a filter, projecting its channels into some larger space with its weights. What if instead, we took each kernel stride and squished it down into a single number?\nThat process is called pooling; correctly sequenced pooling layers acts as \u0026ldquo;information extraction\u0026rdquo; layers. Think of it as layers that asks question to the tune of \u0026ldquo;is anything in this sub-area of the image very bright?\u0026rdquo; or \u0026ldquo;what is the average color of this sub-area of the image?\u0026rdquo;: giving us actually more actionable information about the area than the pixels themselves.\nThere are two common pooling algorithms:\nMaxPool: take a kernel, squish it into one vector with one number each channel representing the maximum of the kernel in that channel AvgPool: take a kernel, squish it into one vector with one number each channel representing the average of the kernel in that channel To instantiate such a pooling layer, you need to figure out:\nkernel size: the width of your squishification kernel stride size: how much your squishing kernel should move per convolution (i.e. stride), default is \\((1\\times 1)\\) So, instantiating a MaxPool layer:\ntest_pooling_layer = nn.MaxPool2d(kernel_size=4, stride=(1,1)) test_pooling_layer MaxPool2d(kernel_size=4, stride=(1, 1), padding=0, dilation=1, ceil_mode=False) Now, applying our layer:\nnet = test_pooling_layer(net) net tensor([[[[-0.0277, -0.0277, -0.0279, ..., -0.0249, -0.0249, -0.0249], [-0.0271, -0.0277, -0.0279, ..., -0.0249, -0.0249, -0.0249], [-0.0271, -0.0277, -0.0279, ..., -0.0249, -0.0249, -0.0249], ..., [ 0.0713, 0.0713, 0.0713, ..., -0.0064, -0.0041, -0.0041], [ 0.0713, 0.0713, 0.0713, ..., -0.0064, -0.0041, -0.0041], [ 0.0713, 0.0713, 0.0713, ..., -0.0083, -0.0064, -0.0061]]]], grad_fn=\u0026lt;MaxPool2DWithIndicesBackward0\u0026gt;) net.shape torch.Size([1, 5, 932, 1430]) Thinking break: kernel sizes and steps, again!\nA very similar question as before. Why is it that, while pooling the input kernels (i.e. squishing every kernel of \\(4 \\times 4 = 16\\) pixels into one value), our side length didn\u0026rsquo;t, say, get divided by \\(4\\)? Why it is that our image is still almost as large?\nI will leave you to infer the calling convention of nn.AvgPool2d.\nGreat, one more layer before we are off to the races.\nflatten This one is really simple. All the 2D convolution work we did before is fine and good, but eventually we need to fall back on dense neural-networks to do the work of, for example, classification. Eventually, we need to flatten these tensors.\nFortunately, PyTorch has a layer for that (it seems like when it doubt, this is usually true\u0026hellip;)\nflat = nn.Flatten() flat(net).shape torch.Size([1, 6663800]) Nice and flat, just the way we expect for a dense-neural-network.\nAside: woah! that\u0026rsquo;s awfully large!\nThe natural next layer to this would be something like\nnn.Dense(6663800, 256) which would result in us using a matrix to project this GIGANTIC! processed input into a comparatively tiny output dimension. The whole point, as we discussed, of CNNs is to prevent the need to flatten an image right up front into giant, hard-to-process input vectors. How is this output serving that need?\nWe will discuss strategies of projecting large samples downwards very shortly, but even if we didn\u0026rsquo;t, this large input vector is not at all the same thing as just flattening the raw input vector: it has been processed with many filters and a pooling layer already, which means that the information contained in it is probably much more readily accessible for a neural network.\nA Typical CNN It is always very hard to name what exact architecture will work for a problem. However, these guidelines can help you architect a good CNN:\nStart with convolutional layers that has a tiny kernel, but projects the input into a large amount of channels (i.e. hyper-dimensional filters); good candidates looks like \\(32\\) output channels, but with kernels of \\(2\\times 2\\). Think of these as the \u0026ldquo;edge detection\u0026rdquo;, \u0026ldquo;face detection\u0026rdquo;, etc. layers as Grant outlined in his video we saw at the beginning of the class. Gradually decrease the number of output channels (filters), but increase your kernel size; good candidates look like \\(2\\) output channels, but with kernels of \\(32 \\times 32\\). Think of these as the \u0026ldquo;structural\u0026rdquo; layers that detect large structures like \u0026ldquo;loops\u0026rdquo; or \u0026ldquo;shadows\u0026rdquo;, etc. Put a pooling layer (which algorithm is to taste of the problem) between 3-5 convolutional layers If you want to end up with a smaller sample, try taking larger strides. Always try to keep your kernel size larger then your stride size, or you will end up missing values in the data (thinking break: why?)\nChallenge + First Project Now that you have learned three new layers, its time to see them in action. Build a neural network to classify tiny images! Use at least one convolutional layer, one pooling layer, and the typical architecture of the network that we discussed last time. No need to do a full write-up, just the model, explanation and the associated colab is fine.\nData: https://drive.google.com/drive/folders/1U7RUybsCGpZCTYejnGGzSB_6HnQetmlj\n","html":"\u003cp\u003eWelcome back! I think, over the last few days, we have been hyping up convolutional neural networks enough such that you are probably ready to dive right in. So\u0026hellip; Let\u0026rsquo;s, uh, motivate it first!\u003c/p\u003e\n\u003ch2 id=\"why-do-we-use-a-cnn\"\u003eWhy do we use a CNN?\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s think of a toy problem to play with. Given a pattern made using two colours (let\u0026rsquo;s name them a and b, or perhaps black and white), let\u0026rsquo;s classify whether it is the \u0026ldquo;zebra\u0026rdquo; pattern\u0026quot; or the \u0026ldquo;checkerboard\u0026rdquo; pattern.\u003c/p\u003e\n\u003cp\u003eZebra\u0026mdash;aligned stripes:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eCheckerboard\u0026mdash;alternating stripes:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWe are already familiar with one neural-network architecture: stacked \u003cstrong\u003elinear\u003c/strong\u003e layers, also known as \u003cstrong\u003edeep neural networks\u003c/strong\u003e. If we are trying to process these two input samples for a linear layer, what would we do?\u003c/p\u003e\n\u003cp\u003e\u0026hellip;\u003c/p\u003e\n\u003cp\u003eWell, we would take each of the figures, and flatten it into a long row. Then, feed it into a layer of \\(4 \\times 4 = 16\\) input neurons.\u003c/p\u003e\n\u003cp\u003eWhat would that look like? Well; let \\(a=0, b=1\\):\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ezebra_sample\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echessboard_sample\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWithout looking very closely, those two \u003cem\u003every\u003c/em\u003e different patterns seem to yield pretty similar input samples! A \u003cstrong\u003edense neural network\u003c/strong\u003e need to fit very well to notice the numerical trick of checking if two \\(1\\) or two \\(0\\) are next. That\u0026rsquo;s not good\u0026hellip; A human can spot the difference in the original, 2D figure very obviously!\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eENTER CNNs\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"what-is-a-cnn\"\u003eWhat is a CNN?\u003c/h2\u003e\n\u003cp\u003eThe take-home-message from the previous simple example is that \u003cem\u003e2D structures loose information when they are flattened\u003c/em\u003e. So, \u003cstrong\u003eCNNs\u003c/strong\u003e\u0026mdash;ultimately\u0026mdash;offer a way to process this 2D structural information with a \u003cstrong\u003eneural network\u003c/strong\u003e without flattening immediately. Generally, a \u003cstrong\u003eCNN\u003c/strong\u003e takes the following structure:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e2D-input fed into the model\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003econvolutional layers\u003c/strong\u003e process small sections of the 2D input, projecting each section another section on a larger, 2D hidden grid; think about this as \u003cstrong\u003eupsampling\u003c/strong\u003e in images\u003c/li\u003e\n\u003cli\u003ea \u003cstrong\u003epooling layer\u003c/strong\u003e takes sections of the larger 2D grid of neurons then process each section into one value (usually by taking their maximum or average); think about this as \u003cstrong\u003edownsampling\u003c/strong\u003e in images\u003c/li\u003e\n\u003cli\u003eRepeat steps 2-3\u003c/li\u003e\n\u003cli\u003ea \u003cstrong\u003eflatten\u003c/strong\u003e layer takes the now \u003cem\u003eprocessed\u003c/em\u003e 2D grid and flattens it in the usual manner into a 1D tensor\u003c/li\u003e\n\u003cli\u003eprocess the now information-rich, flat hidden representation as usual with a \u003cstrong\u003edense neural-network\u003c/strong\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eI would pause and ponder the above outline a little bit; but, no worries if this does immediately make sense; hopefully, as the layers are introduced progressively, what they do on various inputs will start to make more sense.\u003c/p\u003e\n\u003cp\u003eI promise we will get to the actual layers soon, but before then, we have some vocabulary terms to go over.\u003c/p\u003e\n\u003ch2 id=\"vocab-time\"\u003eVocab Time!\u003c/h2\u003e\n\u003ch3 id=\"kernel\"\u003ekernel\u003c/h3\u003e\n\u003cp\u003eEverything in the \u003cstrong\u003eCNN\u003c/strong\u003e world rests upon the idea of a \u003cstrong\u003ekernel\u003c/strong\u003e. A \u003cstrong\u003ekernel\u003c/strong\u003e is a sub-sample of the input of a certain fixed size (you can choose the size). Take our original checkerboard input:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eAn example \\((2 \\times 2)\\) \u003cstrong\u003ekernel\u003c/strong\u003e on this input could be:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003ethat is:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003e\u003cstrong\u003ea\u003c/strong\u003e\u003c/td\u003e\n\u003ctd\u003e\u003cstrong\u003eb\u003c/strong\u003e\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003e\u003cstrong\u003eb\u003c/strong\u003e\u003c/td\u003e\n\u003ctd\u003e\u003cstrong\u003ea\u003c/strong\u003e\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eTada! A \\((2 \\times 2)\\) \u003cstrong\u003ekernel\u003c/strong\u003e is simply a \\((2\\times 2)\\) sample of the input.\u003c/p\u003e\n\u003ch3 id=\"convol-and-stride\"\u003econvol-* and stride\u003c/h3\u003e\n\u003cp\u003eConvolving, convolutional, convoluting\u0026hellip; What does that mean? For a \u003cstrong\u003ekernel size\u003c/strong\u003e (i.e. dimensions of the \u003cstrong\u003ekernel\u003c/strong\u003e) that\u0026rsquo;s smaller than the size of the entire 2D input\u0026mdash;which, if you want your \u003cstrong\u003eCNN\u003c/strong\u003e to perform better than a \u003cstrong\u003eDNN\u003c/strong\u003e, it has to be (challenge question: why?)\u0026mdash;you need to move it around the input to capture the entirety of the input sample.\u003c/p\u003e\n\u003cp\u003eThat movement is called \u003cstrong\u003econvolution\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eHere\u0026rsquo;s a \\((2\\times 2)\\) \u003cstrong\u003ekernel\u003c/strong\u003e!\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003cstrong\u003ea\u003c/strong\u003e\u003c/th\u003e\n\u003cth\u003e\u003cstrong\u003eb\u003c/strong\u003e\u003c/th\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cstrong\u003eb\u003c/strong\u003e\u003c/td\u003e\n\u003ctd\u003e\u003cstrong\u003ea\u003c/strong\u003e\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eand\u0026hellip; here it is \u003cstrong\u003econvolving\u003c/strong\u003e to the right!\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ea\u003c/th\u003e\n\u003cth\u003e\u003cstrong\u003eb\u003c/strong\u003e\u003c/th\u003e\n\u003cth\u003e\u003cstrong\u003ea\u003c/strong\u003e\u003c/th\u003e\n\u003cth\u003eb\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003e\u003cstrong\u003ea\u003c/strong\u003e\u003c/td\u003e\n\u003ctd\u003e\u003cstrong\u003eb\u003c/strong\u003e\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003ctd\u003eb\u003c/td\u003e\n\u003ctd\u003ea\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eLook! A moving \u003cstrong\u003ekernel\u003c/strong\u003e. That\u0026rsquo;s \u003cstrong\u003econvolution\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eNow, how \u003cem\u003emuch\u003c/em\u003e the \u003cstrong\u003ekernel\u003c/strong\u003e moves at each step is called the \u003cstrong\u003estride\u003c/strong\u003e, or the \u003cstrong\u003estride size\u003c/strong\u003e. For a 2D input sample, this is usually specified as a 2D tuple: \\((x,y)\\), with \\(x\\) representing how much the kernel moves per step in the \\(x\\) direction, and \\(y\\) representing how much the kernel moves per step in the \\(y\\) direction.\u003c/p\u003e\n\u003ch3 id=\"filter\"\u003efilter\u003c/h3\u003e\n\u003cp\u003eSo far we are doing nothing with the \u003cstrong\u003ekernel\u003c/strong\u003e: we are just taking \u003cstrong\u003econvolving\u003c/strong\u003e sub-samples, and doing a grand total of nothing with the array of subsamples. \u003cstrong\u003eFilters\u003c/strong\u003e are responsible of doing the actual processing.\u003c/p\u003e\n\u003cp\u003eEach time a \u003cstrong\u003ekernel\u003c/strong\u003e is sampled, it is sent through a weight-matrix (just like what is stuck between two \u003cstrong\u003elinear\u003c/strong\u003e layers) which is called a \u003cstrong\u003efilter\u003c/strong\u003e. The output of this matrix is then reassembled into a 2D array after the sample kernel from each \u003cstrong\u003econvolution\u003c/strong\u003e is passed through the same filter, ready for more processing!\u003c/p\u003e\n\u003ch3 id=\"channel\"\u003echannel\u003c/h3\u003e\n\u003cp\u003eHere\u0026rsquo;s a little-known secret about the world: humans see colors! The toy example conveniently ignored this important fact: each pixel was simply a number, which\u0026mdash;in the real world\u0026mdash;would represent only one hue (think shades of gray). That\u0026rsquo;s an alright assumption to make if we are only encoding checkerboards or zebras, but not great if we want to recognize anything complicated. How would we represent colors in our input?\u003c/p\u003e\n\u003cp\u003e\u003cem\u003eMultiple \u003cstrong\u003echannels\u003c/strong\u003e to the rescue!\u003c/em\u003e\u003c/p\u003e\n\u003cp\u003eA \u0026ldquo;2D\u0026rdquo; sample actually contains three dimensions: \u003ccode\u003e(channel_size, height, width)\u003c/code\u003e. Namely, each \u003cstrong\u003econvolutional\u003c/strong\u003e layer actually take multiple of those grids we discussed above as input, each representing the saturation of a specific color at each pixel. Those separate grids representing the same input are called \u003cstrong\u003echannels\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eA conventional \u0026ldquo;image\u0026rdquo;, then, is actually three samples masquerading as one:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ea grid of the concentrations of the red \u003cstrong\u003echannel\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003ea grid of the concentrations of the green \u003cstrong\u003echannel\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003ea grid of the concentrations of the blue \u003cstrong\u003echannel\u003c/strong\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSay, a sample image is square and has side-length \\(20\\). Can you guess the actual dimensions of one \u0026ldquo;sample\u0026rdquo; tensor?\u003c/p\u003e\n\u003cp\u003e\u0026hellip;\u003c/p\u003e\n\u003cp\u003e\\((3, 20,20)\\): three channels RGB, height, width.\u003c/p\u003e\n\u003ch2 id=\"let-s-get-convolving\"\u003eLet\u0026rsquo;s get convolving\u003c/h2\u003e\n\u003cp\u003eThroughout this workbook, we are never actually going to build a neural network. You already know how to do that! In this section, let\u0026rsquo;s go through each of the \u003cem\u003elayers\u003c/em\u003e discussed above that a CNN consists of, and we will leave you with the task of putting them together in the workbook challenge. Don\u0026rsquo;t worry, we will be here to help you through that process.\u003c/p\u003e\n\u003cp\u003eEither way, however, let\u0026rsquo;s get PyTorch going:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch.nn\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"sampling-images\"\u003esampling images\u003c/h3\u003e\n\u003cp\u003eWe went through all this talk about images, but we never actually dealt with one. So, before we can actually do anything with CNNs, let\u0026rsquo;s see how we can actually turn an image into the numbered pixel-grid we discussed in the toy example above.\u003c/p\u003e\n\u003cp\u003eTo do this, we will use the PythonImageLibrary (PIL), whose currently implementation is cutely named \u0026ldquo;Pillow\u0026rdquo;. If the following line does not work because you are not running Colab, run \u003ccode\u003epip install pillow\u003c/code\u003e on your machine and you will be off to the races.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ePIL\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eImage\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s open an example image!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eimg\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eImage\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eopen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;./beesnees.png\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eimg\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u0026lt;PIL.PngImagePlugin.PngImageFile image mode=RGBA size=938x1436 at 0x12B293FD0\u0026gt;\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNice. We just opened a local image on my computer, of size \\(938 \\times 1436\\), named \u0026ldquo;beesnees.png\u0026rdquo;.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside: \u003cstrong\u003eloading images\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eWhat? You don\u0026rsquo;t just conveniently have a file named \u0026ldquo;beesnees\u0026rdquo; located on your Colab instance? Well\u0026hellip; Let\u0026rsquo;s load it.\u003c/p\u003e\n\u003cp\u003eLocate on the left side of your Colab window the left sidebar, where the fourth icon down is the \u0026ldquo;file\u0026rdquo; folder icon. Tap on that, and\u0026mdash;in the \u0026ldquo;files\u0026rdquo; pane that opens\u0026mdash;tap on the first of the four icons, shaped like a page with an arrow, below the word \u0026ldquo;files\u0026rdquo;. Select your file, and you are off to the races.\u003c/p\u003e\n\u003cp\u003eOh, and here\u0026rsquo;s \u003ca href=\"https://haha.business/business.jpg\"\u003ebeesnees.jpg\u003c/a\u003e\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAnyways, now that we have an image, what can we do with it? Well, for starters, we can ask \u003ccode\u003enumpy\u003c/code\u003e to make an array out of it.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enumpy\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003earray\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eimg\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[[[ 10 10 8 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 10 10 8 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 10 10 8 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 7 7 5 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 7 7 5 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 7 7 5 255]]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [[ 3 3 3 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 3 3 3 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 3 3 3 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 12 16 21 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 12 16 21 255]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 12 16 21 255]]]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eInterested in what shape it is?\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshape\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(1436, 938, 4)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eHmmm\u0026hellip; We know that it is a \\((1436 \\times 938)\\) image, and apparently \u003ccode\u003enumpy\u003c/code\u003e encoded each of the pixels with all the channels instead of separating them into one channel per grids. That\u0026rsquo;s all fine and good, but why are there \\(4\\) channels for an RGB image?\u003c/p\u003e\n\u003cp\u003eTurns out, \u003ccode\u003epng\u003c/code\u003e images by default are RGB*A*\u0026mdash;the last channel being transparency. Staring at the array above, we can see that every single pixel\u0026rsquo;s fourth channel is \\(255\\), meaning this image is not transparent anywhere.\u003c/p\u003e\n\u003cp\u003eTransparency info is not super useful info for most models, so let\u0026rsquo;s slice it apart: leaving us with \\((1436 \\times 938 \\times 3)\\):\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:,:,:\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshape\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(1436, 938, 3)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003chr\u003e\n\u003cp\u003eAside: \u003cstrong\u003ewoah! what is that syntax?\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eIn the example code above, we used \u003cstrong\u003earray slicing notation\u003c/strong\u003e: \u003ccode\u003earr[:,:,:-1]\u003c/code\u003e. This is an extension on Python list slicing only valid on Numpy arrays and PyTorch tensors. To come up with your own, here are the rules:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eseparate each dimension with a comma, from outer to inner\u003c/li\u003e\n\u003cli\u003eon each dimension, slice the dimension normally with Python slice syntax, remembering that it has a fencepost problem on the end index: \u003ccode\u003e[startIndex]:[endIndex+1]\u003c/code\u003e; recall that negative numbers loop around (i.e. \u003ccode\u003e-1\u003c/code\u003e means the end of the list)\u003c/li\u003e\n\u003cli\u003erecall that, if you want to start from the beginning or end at the end of an array, you can leave that side blank: \u003ccode\u003e:5\u003c/code\u003e means \u0026ldquo;give me the first 5 elements 0,1,2,3,4\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eif you want to keep the entirety of a dimension, type an colon and move on\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSo, decoding what we just typed: \u003ccode\u003earr[:,:,:-1]\u003c/code\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003earr\u003c/code\u003e (the Numpy array we are slicing)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e[\u003c/code\u003e (slice!)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e:,\u003c/code\u003e (keep all of the H dimension)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e:,\u003c/code\u003e (keep all of the W dimension)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e:-1\u003c/code\u003e (keep everything in the channels dimension except until the last element, hence removing that)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e]\u003c/code\u003e (end slice)\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eOne more change before moving on. The \u003ccode\u003earr\u003c/code\u003e matrix right now has shape \\(H \\times W \\times C\\), where \\(C\\) is the number of channels. However, recall that PyTorch (more reasonably, in my opinion), expects \\(C \\times H \\times W\\): where channels are the first dimension to show that each channel is an independent grid of pixels representing that color.\u003c/p\u003e\n\u003cp\u003eSo, we need to swap the inner and outer dimensions of our array to separate the three \u003cstrong\u003echannels\u003c/strong\u003e into grids of \\(H \\times W\\).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eswapaxes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshape\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(3, 938, 1436)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. We have now swapped the axes of the image such that each \u003cstrong\u003echannel\u003c/strong\u003e is by itself. Although\u0026hellip; we also messed up the orientation of the image: it is now \\(938 \\times 1436\\) instead of \\(1436 \\times 938\\). Turns out, this does not matter much\u0026mdash;your machine learning model does not care about what orientation things are \u003cem\u003eas long as they are consistent between the images and labels\u003c/em\u003e (challengeish question: why is that?).\u003c/p\u003e\n\u003cp\u003eUsually, when we deal with image inputs, we end up with color values between \\(0\\) and \\(255\\). Yet, as you probably saw already, neural networks are exceptionally bad at dealing with large integers such as \\(255\\). As such, we will squish the input into a matrix of small numbers by just reporting pixel values in terms of \u0026ldquo;brightness\u0026rdquo;, also known as \u0026ldquo;percentage until 255\u0026rdquo;, which would nicely normalize the input\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebrightness_arr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e255\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebrightness_arr\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[[[0.03921569 0.03921569 0.03921569 ... 0.01176471 0.01176471 0.01176471]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0.02745098 0.02745098 0.02745098 ... 0.04705882 0.04705882 0.04705882]]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [[0.03921569 0.03921569 0.03921569 ... 0.01176471 0.01176471 0.01176471]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0.02745098 0.02745098 0.02745098 ... 0.0627451 0.0627451 0.0627451 ]]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [[0.03137255 0.03137255 0.03137255 ... 0.01176471 0.01176471 0.01176471]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0.01960784 0.01960784 0.01960784 ... 0.08235294 0.08235294 0.08235294]]]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eOne last step before we go forward: \u003ccode\u003earr\u003c/code\u003e in as numpy array, which can\u0026rsquo;t be fed through Torch-accelerated objects. To get it to do things in a neural network, we need it to be a \u003cstrong\u003etensor\u003c/strong\u003e:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eimg_tsr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebrightness_arr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eimg_tsr\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([[[0.0392, 0.0392, 0.0392, ..., 0.0118, 0.0118, 0.0118],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0.0275, 0.0275, 0.0275, ..., 0.0471, 0.0471, 0.0471]],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [[0.0392, 0.0392, 0.0392, ..., 0.0118, 0.0118, 0.0118],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0.0275, 0.0275, 0.0275, ..., 0.0627, 0.0627, 0.0627]],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [[0.0314, 0.0314, 0.0314, ..., 0.0118, 0.0118, 0.0118],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0.0196, 0.0196, 0.0196, ..., 0.0824, 0.0824, 0.0824]]])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFor kicks, let\u0026rsquo;s also put it in a batch of \\(1\\) (as, recall, layers take an \u003cem\u003earray\u003c/em\u003e of samples for input, so we just need to create a batch containing one element). \u003ccode\u003e.unsqueeze(dim)\u003c/code\u003e does this on a tensor; it just surrounds the desired dimension with another set of \u0026ldquo;brackets\u0026rdquo; (i.e. add a dimension of \\(1\\)).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eimg_tsr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eimg_tsr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eunsqueeze\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eimg_tsr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshape\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etorch.Size([1, 3, 938, 1436])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eMachine learning now awaits us\u0026hellip;\u003c/p\u003e\n\u003ch3 id=\"convolutional-layer\"\u003econvolutional layer\u003c/h3\u003e\n\u003cp\u003eFinally, it is convolutioning time. The main object we will be using will be \u003ccode\u003eConv2d\u003c/code\u003e, the 2D convolutional layer. If you care about the behind-the-scenes math, \u003ca href=\"https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html\"\u003ehere it is\u003c/a\u003e: basically, it is the bias per channel, plus the weight of that channel (\u003cstrong\u003efilter\u003c/strong\u003e) times each \u003cstrong\u003ekernel\u003c/strong\u003e of that channel, cross-correlated across \u003cstrong\u003econvolutions\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eTo instantiate a convolutional layer, here are what you need to figure out:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003einput channels: \u003cem\u003e3\u003c/em\u003e for RGB, if you are not convolving across images, or your image is sepia or B\u0026amp;W, etc., your mileage will vary\u003c/li\u003e\n\u003cli\u003eoutput channels: the output dimension of your weight matrix, what your \u003cstrong\u003econvolving\u003c/strong\u003e \u003cstrong\u003ekernels\u003c/strong\u003e get projected to\u003c/li\u003e\n\u003cli\u003ekernel size: the width of your \u003cstrong\u003ekernel\u003c/strong\u003e, it is usually square so one number suffices\u003c/li\u003e\n\u003cli\u003estride size: how much your \u003cstrong\u003ekernel\u003c/strong\u003e should move per \u003cstrong\u003econvolution\u003c/strong\u003e (i.e. \u003cstrong\u003estride\u003c/strong\u003e), default is \\((1\\times 1)\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSo, here goes; I gave an arbitrary hidden size of \\(5\\) and a kernel size of \\(4 \\times 4\\); we will talk about recommended topologies later.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etest_convolution_layer\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eConv2d\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ein_channels\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eout_channels\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ekernel_size\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003estride\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etest_convolution_layer\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eConv2d(3, 5, kernel_size=(4, 4), stride=(1, 1))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, passing our input through our single layer:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enet\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etest_convolution_layer\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eimg_tsr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enet\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([[[[-0.0296, -0.0288, -0.0296, ..., -0.0249, -0.0249, -0.0249],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [-0.0296, -0.0288, -0.0300, ..., -0.0249, -0.0249, -0.0249],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [-0.0290, -0.0282, -0.0293, ..., -0.0249, -0.0249, -0.0249],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 0.0713, 0.0713, 0.0713, ..., -0.0086, -0.0080, -0.0093],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 0.0713, 0.0713, 0.0713, ..., -0.0089, -0.0087, -0.0102],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 0.0713, 0.0713, 0.0713, ..., -0.0113, -0.0113, -0.0113]]]],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e grad_fn=\u0026lt;ConvolutionBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, let\u0026rsquo;s take a look at the shape of this nice output:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enet\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshape\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etorch.Size([1, 5, 935, 1433])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLook! Each of our kernels got projected from the \\(3\\) input \u003cstrong\u003echannels\u003c/strong\u003e into the \\(5\\) output \u003cstrong\u003echannels\u003c/strong\u003e; as our \u003cstrong\u003econvolutions\u003c/strong\u003e has \u003cstrong\u003estride size\u003c/strong\u003e \\((1 \\times 1)\\), our \u003cstrong\u003ekernel\u003c/strong\u003e moves across our image and the \u003cstrong\u003efilter\u003c/strong\u003e takes each kernel and spits out a vector of length \\(5\\) at each step, resulting in \\((935 \\times 1433)\\) such steps and hence an output of \\((5 \\times 935 \\times 1433)\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eThinking break: \u003cstrong\u003ekernel sizes and steps!\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eNow, recall that our input has size \\((938 \\times 1436)\\), and yet our output has \\((935 \\times 1433)\\) as the last two dimensions, meaning the kernel only took \\(935\\) steps in the first dimension and \\(1433\\) steps in the second. What happened? Our step size is \\(1\\), so shouldn\u0026rsquo;t the steps span across the whole image?\u003c/p\u003e\n\u003cp\u003eMess with the stride size and kernel size and figure this out. Shout a compelling answer in class before moving on.\u003c/p\u003e\n\u003cp\u003eUnderstanding this will be \u003cem\u003ecritical\u003c/em\u003e to demonstrate your intuition of CNNs, \u003cem\u003epleeeese don\u0026rsquo;t move on until you think you have a good answer\u003c/em\u003e.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eLet\u0026rsquo;s think about this in context of a larger network. Our convolutional layer just took our input image, and processed it such that it ended up with a more, for the lack of a better word, \u0026ldquo;nuanced\u0026rdquo; 2D representation of our image. Instead of RGB being our three \u003cstrong\u003echannels\u003c/strong\u003e, our five \u003cstrong\u003echannels\u003c/strong\u003e will, after good training, contain more complex information such as \u0026ldquo;edges\u0026rdquo; or \u0026ldquo;blocks\u0026rdquo; that downstream networks can process.\u003c/p\u003e\n\u003ch3 id=\"maxpool-avgpool-layer\"\u003emaxpool/avgpool layer\u003c/h3\u003e\n\u003cp\u003eIn traditional convolutional networks, each \u003cstrong\u003ekernel\u003c/strong\u003e is processed with a \u003cstrong\u003efilter\u003c/strong\u003e, projecting its channels into some larger space with its weights. What if instead, we took each \u003cstrong\u003ekernel\u003c/strong\u003e stride and squished it down into a single \u003cstrong\u003enumber\u003c/strong\u003e?\u003c/p\u003e\n\u003cp\u003eThat process is called \u003cstrong\u003epooling\u003c/strong\u003e; correctly sequenced pooling layers acts as \u0026ldquo;information extraction\u0026rdquo; layers. Think of it as layers that asks question to the tune of \u0026ldquo;is anything in this sub-area of the image very bright?\u0026rdquo; or \u0026ldquo;what is the average color of this sub-area of the image?\u0026rdquo;: giving us actually \u003cem\u003emore\u003c/em\u003e actionable information about the area than the pixels themselves.\u003c/p\u003e\n\u003cp\u003eThere are two common pooling algorithms:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eMaxPool: take a \u003cstrong\u003ekernel\u003c/strong\u003e, squish it into one vector with one number each channel representing the maximum of the kernel in that channel\u003c/li\u003e\n\u003cli\u003eAvgPool: take a \u003cstrong\u003ekernel\u003c/strong\u003e, squish it into one vector with one number each channel representing the average of the kernel in that channel\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTo instantiate such a pooling layer, you need to figure out:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ekernel size: the width of your squishification \u003cstrong\u003ekernel\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003estride size: how much your squishing \u003cstrong\u003ekernel\u003c/strong\u003e should move per \u003cstrong\u003econvolution\u003c/strong\u003e (i.e. \u003cstrong\u003estride\u003c/strong\u003e), default is \\((1\\times 1)\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSo, instantiating a MaxPool layer:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etest_pooling_layer\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMaxPool2d\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ekernel_size\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003estride\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etest_pooling_layer\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMaxPool2d(kernel_size=4, stride=(1, 1), padding=0, dilation=1, ceil_mode=False)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, applying our layer:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enet\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etest_pooling_layer\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enet\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enet\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([[[[-0.0277, -0.0277, -0.0279, ..., -0.0249, -0.0249, -0.0249],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [-0.0271, -0.0277, -0.0279, ..., -0.0249, -0.0249, -0.0249],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [-0.0271, -0.0277, -0.0279, ..., -0.0249, -0.0249, -0.0249],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 0.0713, 0.0713, 0.0713, ..., -0.0064, -0.0041, -0.0041],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 0.0713, 0.0713, 0.0713, ..., -0.0064, -0.0041, -0.0041],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 0.0713, 0.0713, 0.0713, ..., -0.0083, -0.0064, -0.0061]]]],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e grad_fn=\u0026lt;MaxPool2DWithIndicesBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enet\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshape\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etorch.Size([1, 5, 932, 1430])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003chr\u003e\n\u003cp\u003eThinking break: \u003cstrong\u003ekernel sizes and steps, again!\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eA very similar question as before. Why is it that, while pooling the input kernels (i.e. squishing every kernel of \\(4 \\times 4 = 16\\) pixels into one value), our side length didn\u0026rsquo;t, say, get divided by \\(4\\)? Why it is that our image is still almost as large?\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eI will leave you to infer the calling convention of \u003ccode\u003enn.AvgPool2d\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eGreat, one more layer before we are off to the races.\u003c/p\u003e\n\u003ch3 id=\"flatten\"\u003eflatten\u003c/h3\u003e\n\u003cp\u003eThis one is \u003cem\u003ereally\u003c/em\u003e simple. All the 2D convolution work we did before is fine and good, but eventually we need to fall back on dense neural-networks to do the work of, for example, classification. \u003cem\u003eEventually\u003c/em\u003e, we need to flatten these tensors.\u003c/p\u003e\n\u003cp\u003eFortunately, PyTorch has a layer for that (it seems like when it doubt, this is usually true\u0026hellip;)\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eflat\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFlatten\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eflat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enet\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshape\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etorch.Size([1, 6663800])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNice and flat, just the way we expect for a \u003cstrong\u003edense-neural-network\u003c/strong\u003e.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside: \u003cstrong\u003ewoah! that\u0026rsquo;s awfully large!\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eThe natural next layer to this would be something like\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDense\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e6663800\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e256\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ewhich would result in us using a matrix to project this \u003cstrong\u003eGIGANTIC!\u003c/strong\u003e processed input into a comparatively \u003cem\u003etiny\u003c/em\u003e output dimension. The whole point, as we discussed, of CNNs is to prevent the need to flatten an image right up front into giant, hard-to-process input vectors. How is this output serving that need?\u003c/p\u003e\n\u003cp\u003eWe will discuss strategies of projecting large samples downwards very shortly, but even if we didn\u0026rsquo;t, this large input vector is not at all the same thing as just flattening the raw input vector: it has been processed with many filters and a pooling layer already, which means that the information contained in it is probably much more readily accessible for a neural network.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"a-typical-cnn\"\u003eA Typical CNN\u003c/h2\u003e\n\u003cp\u003eIt is always very hard to name what exact architecture will work for a problem. However, these guidelines can help you architect a good CNN:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eStart with convolutional layers that has a \u003cem\u003etiny\u003c/em\u003e \u003cstrong\u003ekernel\u003c/strong\u003e, but projects the input into a \u003cem\u003elarge\u003c/em\u003e amount of channels (i.e. hyper-dimensional \u003cstrong\u003efilters\u003c/strong\u003e); good candidates looks like \\(32\\) output channels, but with kernels of \\(2\\times 2\\). Think of these as the \u0026ldquo;edge detection\u0026rdquo;, \u0026ldquo;face detection\u0026rdquo;, etc. layers as Grant outlined in his video we saw at the beginning of the class.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eGradually\u003c/strong\u003e \u003cem\u003edecrease\u003c/em\u003e the number of output channels (\u003cstrong\u003efilters\u003c/strong\u003e), but \u003cem\u003eincrease\u003c/em\u003e your \u003cstrong\u003ekernel\u003c/strong\u003e size; good candidates look like \\(2\\) output channels, but with kernels of \\(32 \\times 32\\). Think of these as the \u0026ldquo;structural\u0026rdquo; layers that detect large structures like \u0026ldquo;loops\u0026rdquo; or \u0026ldquo;shadows\u0026rdquo;, etc.\u003c/li\u003e\n\u003cli\u003ePut a pooling layer (which algorithm is to taste of the problem) between 3-5 convolutional layers\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eIf you want to end up with a smaller sample, try taking larger \u003cstrong\u003estrides\u003c/strong\u003e. Always try to keep your \u003cstrong\u003ekernel size\u003c/strong\u003e larger then your \u003cstrong\u003estride size\u003c/strong\u003e, or you will end up missing values in the data (thinking break: why?)\u003c/p\u003e\n\u003ch2 id=\"challenge-plus-first-project\"\u003eChallenge + First Project\u003c/h2\u003e\n\u003cp\u003eNow that you have learned three new layers, its time to see them in action. Build a neural network to \u003ca href=\"https://drive.google.com/drive/folders/1U7RUybsCGpZCTYejnGGzSB_6HnQetmlj?usp=sharing\"\u003eclassify tiny images\u003c/a\u003e! Use at least one \u003cstrong\u003econvolutional\u003c/strong\u003e layer, one \u003cstrong\u003epooling\u003c/strong\u003e layer, and the typical architecture of the network that we discussed last time. No need to do a full write-up, just the model, explanation and the associated colab is fine.\u003c/p\u003e\n\u003cp\u003eData: \u003ca href=\"https://drive.google.com/drive/folders/1U7RUybsCGpZCTYejnGGzSB_6HnQetmlj\"\u003ehttps://drive.google.com/drive/folders/1U7RUybsCGpZCTYejnGGzSB_6HnQetmlj\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaml_time_to_convolve/","tags":["writing","aml"],"title":"AML: Time to Convolve"},{"categories":null,"contents":"Hello y\u0026rsquo;all! This quick post about\u0026hellip; writing your first \u0026ldquo;article\u0026rdquo; (ahem, MA) for this class. To me, the most rewarding part of our journey together is to be able to support everyone through writing very presentable reports\u0026mdash;even if it is on old or simple problems\u0026mdash;but in the format from which you can easily jump off and write a fully-blown scientific article in the future.\nexemplar we discussed (Kameswari, Sravani, and Mamidi 2020)\nI should add that, despite the word being used, this is by no means the only way that you can write a wonderful scientific article. It just had the right structure for us to go over in class :)\noverall goals We want to make reports that are clear (easily understandable for audience), concise (only uses words when needed, prioritizing figures and intuition), and precise (when making claims, they are supported clearly with data). And so, the following sections focus mostly on improving those three criteria.\ndiscussion per section Let\u0026rsquo;s now switch to bullet-point format, going over some ideas of how to make strong sections:\nabstract Guiding Questions: What is your study about? What did you do? How well did it work?\nmore jargon here is OK, but should be immediately summative of your whole study only use enough jargon to make it concise: this section should be crystal clear for everyone of your academic peers (i.e. anyone in our class/familiar with ML) it should be easily skimmable intro/motivation/background Guiding Questions: Why are you making big picture choices you made? Did anyone do it before you? How did they do it? What changes are you making (for this class, \u0026ldquo;none\u0026rdquo; is acceptable)?\nimmediately, you should state what you are trying to do and why its worthwhile of your reader\u0026rsquo;s time keep the goal scope small: not \u0026ldquo;cancer is an sickness that affects a lot of people\u0026rdquo;, but \u0026ldquo;this specific gene is correlated with cancer prognosis\u0026rdquo; justify why you are using AML tools! if the relationship you are modeling is a line, deep learning is way overkill summarize previous work: if anyone did it before you, what approach did they do; why? Are you/why are you doing anything difference (i.e. why do you believe methods to be not as good?) methods Guiding Questions: How did you do your study?\ngive as much information for a peer (i.e. anyone in our class/familiar with ML) to be able to reproduce your entire study good to think about\u0026hellip; data sourcing detailed notes on data preparation and feature engineering (bonus points for code) model selection + motivation (should be broadly given in the prev. section already) model implementation (layer counts, activations, seeds) (hyper)parameters (LR, batch size, epoch, optimizer momentum/beta, layer seeds) \u0026mdash; saying \u0026ldquo;default\u0026rdquo; here is fine but be specific about what you are leaving to default training environment (hardware, library versions, time it took, etc.) Also, from a class standpoint, we want to see your hard work in actually practicing the skills we are learning!\nresults/data Guiding Questions: Why should your peer believe you did what you said you did?\nmotivate validation metrics used (i.e. why is success in this validation metric a measurement by proxy of success in the stated problem in the intro?) report the clear details the withholding scheme used\u0026mdash;simple train/val split? k-fold? leave-one-out? present in graphical or tabular form! your clear key takeaways; in general, keep words in this section to a minimum \u0026ldquo;these distributions look visually different!\u0026rdquo; \u0026ldquo;these lines are definitely parallel!\u0026rdquo; \u0026ldquo;this number is definitely larger than this other number!\u0026rdquo; During the process of \u0026ldquo;NTJ\u0026rdquo;, a paper reading methodology taught by the XRT lab, the skill of jumping abstract =\u0026gt; data (\u0026ldquo;figures\u0026rdquo;) =\u0026gt; takeaways (\u0026ldquo;novelty\u0026rdquo;) is greatly emphasized. Usually, the best papers will represent their key takeaways clearly and graphically in this section, so that the reader only need to go into the methods section strictly when needed to reproduce or clarify questions.\nconclusion/discussion Guiding Questions: Summarize.\nIt is often good to include future work here as well as well as fascinating extensions of your choosing. This section differs from the abstract in both the inclusion of future work, as well as its audience: while the abstract need only to be crystal clear for your peers, the conclusion should be clear to everyone in the field \u0026mdash; so redefinition of paper-specific jargon, etc.\nethics Guiding Questions: Where did your data come from; why is its collection and processing (they are independent permissions!) legal and ethical? Why are you not breaking the world?\nSee: this Medium article for more!\nfrom the experts NIPS (leading ML conference) rubric for a paper (jump to \u0026ldquo;Review Content\u0026rdquo; section)\nKameswari, Lalitha, Dama Sravani, and Radhika Mamidi. 2020. “Enhancing Bias Detection in Political News Using Pragmatic Presupposition.” In Proceedings of the Eighth International Workshop on Natural Language Processing for Social Media, nil. doi:10.18653/v1/2020.socialnlp-1.1. ","html":"\u003cp\u003eHello y\u0026rsquo;all! This quick post about\u0026hellip; writing your first \u0026ldquo;article\u0026rdquo; (ahem, MA) for this class. To me, the most rewarding part of our journey together is to be able to support everyone through writing very presentable reports\u0026mdash;even if it is on old or simple problems\u0026mdash;but in the format from which you can easily jump off and write a fully-blown scientific article in the future.\u003c/p\u003e\n\u003ch2 id=\"exemplar-we-discussed\"\u003eexemplar we discussed\u003c/h2\u003e\n\u003cp\u003e(\u003ca href=\"#citeproc_bib_item_1\"\u003eKameswari, Sravani, and Mamidi 2020\u003c/a\u003e)\u003c/p\u003e\n\u003cp\u003eI should add that, despite the word being used, this is by \u003cem\u003eno means\u003c/em\u003e the only way that you can write a wonderful scientific article. It just had the right structure for us to go over in class :)\u003c/p\u003e\n\u003ch2 id=\"overall-goals\"\u003eoverall goals\u003c/h2\u003e\n\u003cp\u003eWe want to make reports that are \u003cem\u003eclear\u003c/em\u003e (easily understandable for audience), \u003cem\u003econcise\u003c/em\u003e (only uses words when needed, prioritizing figures and intuition), and \u003cem\u003eprecise\u003c/em\u003e (when making claims, they are supported clearly with data). And so, the following sections focus mostly on improving those three criteria.\u003c/p\u003e\n\u003ch2 id=\"discussion-per-section\"\u003ediscussion per section\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s now switch to bullet-point format, going over some ideas of how to make strong sections:\u003c/p\u003e\n\u003ch3 id=\"abstract\"\u003eabstract\u003c/h3\u003e\n\u003cp\u003e\u003cem\u003eGuiding Questions\u003c/em\u003e: What is your study about? What did you do? How well did it work?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003emore jargon here is OK, but should be \u003cstrong\u003eimmediately\u003c/strong\u003e summative of your whole study\u003c/li\u003e\n\u003cli\u003eonly use enough jargon to make it concise: this section should be \u003cstrong\u003ecrystal clear\u003c/strong\u003e for everyone of your academic peers (i.e. anyone in our class/familiar with ML)\u003c/li\u003e\n\u003cli\u003eit should be easily skimmable\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"intro-motivation-background\"\u003eintro/motivation/background\u003c/h3\u003e\n\u003cp\u003e\u003cem\u003eGuiding Questions\u003c/em\u003e: Why are you making big picture choices you made? Did anyone do it before you? How did they do it? What changes are you making (for this class, \u0026ldquo;none\u0026rdquo; is acceptable)?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eimmediately, you should state what you are trying to do and why its worthwhile of your reader\u0026rsquo;s time\u003c/li\u003e\n\u003cli\u003ekeep the goal scope small: not \u0026ldquo;cancer is an sickness that affects a lot of people\u0026rdquo;, but \u0026ldquo;this specific gene is correlated with cancer prognosis\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ejustify why you are using AML tools!\u003c/strong\u003e if the relationship you are modeling is a line, deep learning is way overkill\u003c/li\u003e\n\u003cli\u003esummarize previous work: if anyone did it before you, what approach did they do; why? Are you/why are you doing anything difference (i.e. why do you believe methods to be not as good?)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"methods\"\u003emethods\u003c/h3\u003e\n\u003cp\u003e\u003cem\u003eGuiding Questions\u003c/em\u003e: How did you do your study?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003egive as much information for a peer (i.e. anyone in our class/familiar with ML) to be able to \u003cstrong\u003ereproduce your entire study\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003egood to think about\u0026hellip;\n\u003cul\u003e\n\u003cli\u003edata sourcing\u003c/li\u003e\n\u003cli\u003e\u003cem\u003edetailed\u003c/em\u003e notes on data preparation and feature engineering (bonus points for code)\u003c/li\u003e\n\u003cli\u003emodel selection + motivation (should be broadly given in the prev. section already)\u003c/li\u003e\n\u003cli\u003emodel implementation (layer counts, activations, seeds)\u003c/li\u003e\n\u003cli\u003e(hyper)parameters (LR, batch size, epoch, optimizer momentum/beta, layer seeds) \u0026mdash; saying \u0026ldquo;default\u0026rdquo; here is fine but be specific about what you are leaving to default\u003c/li\u003e\n\u003cli\u003etraining environment (hardware, library versions, time it took, etc.)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAlso, from a class standpoint, we want to see your hard work in actually practicing the skills we are learning!\u003c/p\u003e\n\u003ch3 id=\"results-data\"\u003eresults/data\u003c/h3\u003e\n\u003cp\u003e\u003cem\u003eGuiding Questions\u003c/em\u003e: Why should your peer believe you did what you said you did?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003emotivate validation metrics used (i.e. why is success in this validation metric a measurement by proxy of success in the stated problem in the intro?)\u003c/li\u003e\n\u003cli\u003ereport the clear details the withholding scheme used\u0026mdash;simple train/val split? k-fold? leave-one-out?\u003c/li\u003e\n\u003cli\u003epresent in \u003cstrong\u003egraphical or tabular form!\u003c/strong\u003e your clear key takeaways; in general, keep words in this section to a minimum\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;these distributions look visually different!\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;these lines are definitely parallel!\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;this number is definitely larger than this other number!\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDuring the process of \u0026ldquo;NTJ\u0026rdquo;, a paper reading methodology taught by the XRT lab, the skill of jumping abstract =\u0026gt; data (\u0026ldquo;figures\u0026rdquo;) =\u0026gt; takeaways (\u0026ldquo;novelty\u0026rdquo;) is greatly emphasized. Usually, the best papers will represent their key takeaways clearly and graphically in this section, so that the reader only need to go into the methods section strictly when needed to reproduce or clarify questions.\u003c/p\u003e\n\u003ch3 id=\"conclusion-discussion\"\u003econclusion/discussion\u003c/h3\u003e\n\u003cp\u003e\u003cem\u003eGuiding Questions\u003c/em\u003e: Summarize.\u003c/p\u003e\n\u003cp\u003eIt is often good to include future work here as well as well as fascinating extensions of your choosing. This section differs from the abstract in both the inclusion of future work, as well as its audience: while the abstract need only to be crystal clear for your peers, the conclusion should be clear to everyone in the \u003cem\u003efield\u003c/em\u003e \u0026mdash; so redefinition of paper-specific jargon, etc.\u003c/p\u003e\n\u003ch3 id=\"ethics\"\u003eethics\u003c/h3\u003e\n\u003cp\u003e\u003cem\u003eGuiding Questions\u003c/em\u003e: Where did your data come from; why is its collection and \u003cstrong\u003eprocessing\u003c/strong\u003e (they are independent permissions!) legal and ethical? Why are you not breaking the world?\u003c/p\u003e\n\u003cp\u003eSee: \u003ca href=\"https://medium.com/@GovAI/a-guide-to-writing-the-neurips-impact-statement-4293b723f832\"\u003ethis Medium article\u003c/a\u003e for more!\u003c/p\u003e\n\u003ch2 id=\"from-the-experts\"\u003efrom the experts\u003c/h2\u003e\n\u003cp\u003eNIPS (leading ML conference) rubric for a paper (\u003ca href=\"https://nips.cc/Conferences/2020/PaperInformation/ReviewerGuidelines\"\u003ejump to \u0026ldquo;Review Content\u0026rdquo; section\u003c/a\u003e)\u003c/p\u003e\n\u003cstyle\u003e.csl-entry{text-indent: -1.5em; margin-left: 1.5em;}\u003c/style\u003e\u003cdiv class=\"csl-bib-body\"\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_1\"\u003e\u003c/a\u003eKameswari, Lalitha, Dama Sravani, and Radhika Mamidi. 2020. “Enhancing Bias Detection in Political News Using Pragmatic Presupposition.” In \u003ci\u003eProceedings of the Eighth International Workshop on Natural Language Processing for Social Media\u003c/i\u003e, nil. doi:\u003ca href=\"https://doi.org/10.18653/v1/2020.socialnlp-1.1\"\u003e10.18653/v1/2020.socialnlp-1.1\u003c/a\u003e.\u003c/div\u003e\n\u003c/div\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaml_your_first_article/","tags":null,"title":"AML: Your First Article"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhanatomy/","tags":null,"title":"anatomy"},{"categories":null,"contents":"anatomy learning is the learning of anatomy.\nAnatomy information acquired prior to medical school has a positive correlation in medical school outcomes. Also leveraging anatomy information.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhanatomy_learning/\"\u003eanatomy learning\u003c/a\u003e is the learning of \u003ca href=\"/posts/kbhanatomy/\"\u003eanatomy\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhanatomy/\"\u003eAnatomy\u003c/a\u003e information acquired prior to medical school has a positive correlation in medical school outcomes. Also leveraging anatomy information.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhanatomy_learning/","tags":null,"title":"anatomy learning"},{"categories":null,"contents":"ANCA-AE is a tool to use deep learning to take tedrehedral tessilated results + Finite Difference Method + [magical machine learning] to figure the\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhanca_ae/\"\u003eANCA-AE\u003c/a\u003e is a tool to use deep learning to take tedrehedral tessilated results + \u003ca href=\"/posts/kbhfinite_difference_method/\"\u003eFinite Difference Method\u003c/a\u003e + [magical machine learning] to figure the\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhanca_ae/","tags":null,"title":"ANCA-AE"},{"categories":null,"contents":"Angelman Syndrome is a syndrome is ~1 in 15000, clinically recognizable, developmental delay syndrome.\ncause of Angelman Syndrome Angelman Syndrome is primarily caused by the UBE3A and the ubiquitin proteasome system. Poly-ubiquitin chain asks to discard cells.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhangelman_syndrome/\"\u003eAngelman Syndrome\u003c/a\u003e is a syndrome is ~1 in 15000, clinically recognizable, developmental delay syndrome.\u003c/p\u003e\n\u003ch2 id=\"cause-of-angelman-syndrome--kbhangelman-syndrome-dot-md\"\u003ecause of \u003ca href=\"/posts/kbhangelman_syndrome/\"\u003eAngelman Syndrome\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhangelman_syndrome/\"\u003eAngelman Syndrome\u003c/a\u003e is primarily caused by the \u003ca href=\"\"\u003eUBE3A\u003c/a\u003e and the \u003ca href=\"\"\u003eubiquitin proteasome system.\u003c/a\u003e Poly-\u003ca href=\"\"\u003eubiquitin\u003c/a\u003e chain asks to discard cells.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhangelman_syndrome/","tags":null,"title":"Angelman Syndrome"},{"categories":null,"contents":"Need-finding conversation Main idea: testing?\u0026mdash;pregnancy testing and COVID testing\ntalking to longer-scope challenges in visually impaired community Navigation; transportation Cannot see markers on smaller steps; trying to find an uber drive and cannot reorient ","html":"\u003ch2 id=\"need-finding-conversation\"\u003eNeed-finding conversation\u003c/h2\u003e\n\u003cp\u003eMain idea: testing?\u0026mdash;pregnancy testing and COVID testing\u003c/p\u003e\n\u003chr\u003e\n\u003cul\u003e\n\u003cli\u003etalking to longer-scope challenges in visually impaired community\u003c/li\u003e\n\u003cli\u003eNavigation; transportation\n\u003cul\u003e\n\u003cli\u003eCannot see markers on smaller steps; trying to find an uber drive and cannot reorient\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhanna_s_team_checkin/","tags":null,"title":"Anna's Team Checkin"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhanotehuaoeu/","tags":null,"title":"anotehuaoeu"},{"categories":null,"contents":"Anoushka is a student at Nueva, also the host of Project80, among other things.\n","html":"\u003cp\u003eAnoushka is a student at Nueva, also the host of \u003ca href=\"/posts/kbhproject80/\"\u003eProject80\u003c/a\u003e, among other things.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhanoushka_krishnan/","tags":null,"title":"Anoushka Krishnan"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhanthony_badger/","tags":null,"title":"Anthony Badger"},{"categories":null,"contents":"DOI: 10.3389/fnagi.2020.607449\nOne-Liner oral lexical retrieval works better than qualitative narrative analysis to classify dementia; and semantic fluency + Disfluency features chucked on an SVM returns pretty good results.\nNovelty Tried two different assays of measuring linguistic ability: oral lexical retrieval metrics, and qualitative discourse features analysis of speech.\nNotable Methods Subjects divided into three groups\nGreat cog. decline Impaired but stable Healthy controls Administered BNT and SVF tests as baseline\nKey Figs Table 3 This figure tells us that the percentages of unrelated utterances was a statistically significant metric to figure differences between the three experimental groups.\n(CD, CS, HC: cognitive decline, cognitively stable (but declining normally), healthy control)\n(no other items are bolded)\nTable 4 This figure tells us the disfluency features analyzed. None of them were independently statistically significant.\nTable 5 This figure tells us that analyzing Semantic Verbal Fluency, plus the information of disfluency, trained on an SVM, actually shows \u0026gt;90% recall value?\nNew Concepts Discourse-Completion Task oral lexical retrieval discourse features modalization Semantic Verbal Fluency Boston Naming Test ","html":"\u003cp\u003eDOI: 10.3389/fnagi.2020.607449\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhoral_lexical_retrival/\"\u003eoral lexical retrieval\u003c/a\u003e works better than qualitative narrative analysis to classify dementia; and semantic fluency + Disfluency features chucked on an SVM returns pretty good results.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003eTried two different assays of measuring linguistic ability: \u003ca href=\"/posts/kbhoral_lexical_retrival/\"\u003eoral lexical retrieval\u003c/a\u003e metrics, and qualitative \u003ca href=\"/posts/kbhdiscourse_features/\"\u003ediscourse features\u003c/a\u003e analysis of speech.\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eSubjects divided into three groups\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eGreat cog. decline\u003c/li\u003e\n\u003cli\u003eImpaired but stable\u003c/li\u003e\n\u003cli\u003eHealthy controls\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAdministered \u003ca href=\"/posts/kbhboston_naming_test/\"\u003eBNT\u003c/a\u003e and \u003ca href=\"/posts/kbhsemantic_verbal_fluency/\"\u003eSVF\u003c/a\u003e tests as baseline\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-23-08_screenshot.png\"\u003e\n \u003c/figure\u003e\n\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"table-3\"\u003eTable 3\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-02-14_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure tells us that the percentages of unrelated utterances was a \u003ca href=\"/posts/kbhhypothesis_testing/#significance-level\"\u003estatistically significant\u003c/a\u003e metric to figure differences between the three experimental groups.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-02-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e(\u003cstrong\u003e\u003cstrong\u003eCD\u003c/strong\u003e\u003c/strong\u003e, \u003cstrong\u003e\u003cstrong\u003eCS\u003c/strong\u003e\u003c/strong\u003e, \u003cstrong\u003e\u003cstrong\u003eHC\u003c/strong\u003e\u003c/strong\u003e: cognitive decline, cognitively stable (but declining normally), healthy control)\u003c/p\u003e\n\u003cp\u003e(no other items are bolded)\u003c/p\u003e\n\u003ch3 id=\"table-4\"\u003eTable 4\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-15-54_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure tells us the disfluency features analyzed. None of them were independently \u003ca href=\"/posts/kbhhypothesis_testing/#significance-level\"\u003estatistically significant\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"table-5\"\u003eTable 5\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-17-38_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure tells us that analyzing \u003ca href=\"/posts/kbhsemantic_verbal_fluency/\"\u003eSemantic Verbal Fluency\u003c/a\u003e, plus the information of disfluency, trained on an SVM, actually shows \u0026gt;90% recall value?\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiscourse_completion_task/\"\u003eDiscourse-Completion Task\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoral_lexical_retrival/\"\u003eoral lexical retrieval\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiscourse_features/\"\u003ediscourse features\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodalization/\"\u003emodalization\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsemantic_verbal_fluency/\"\u003eSemantic Verbal Fluency\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhboston_naming_test/\"\u003eBoston Naming Test\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhantonsson_2021/","tags":["ntj"],"title":"Antonsson 2021"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhany_name_here/","tags":null,"title":"any name here"},{"categories":null,"contents":"Big picture: combining off-line and on-line approaches maybe the best way to tackle large POMDPs.\nTry planning:\nonly where we are only where we can reach Take into account three factors:\nuncertainty in the value function reachability from the current belief actions that are likely optimal It allows policy improvement on any base policy.\nSetup Discrete POMDPs:\n\\(L\\), lower bound \\(U\\), upper-bound \\(b_0\\): current belief Two main phases: the algorithm\nPlanning Phase at each belief point, choose a particular next node to expand (using the scheme below to score the nodes) expand that next node that are chosen propagate the value of the belief upwards through POMDP Bellman Backup up through the tree Best Node Selection We select the best node by three metrics:\nUncertainty: \\(\\epsilon(b) = U(b)-L(b)\\) we want small gap between upper and lower bound Optimality in actions: AEMS1: \\(p(a|b) = \\frac{U(a,b)-L(b)}{U(a^{*}, b)-L(b)}\\) (\u0026ldquo;what\u0026rsquo;s the relative optimality of our action, compared to best action\u0026rdquo;) AEMS2: \\(p(a|b)\\) = \\(1\\) if \\(a=A^{*}\\), \\(0\\) otherwise. (\u0026ldquo;just take best action\u0026rdquo;) Reachability: \\(p(b) = \\prod_{i=0}^{d} P(o^{(i)} | b^{(i)}, a^{(i)}) p(a^{(i)}|b^{(i)}})\\), where small \\(p\\) is either AIMS 1 or 2 above, where \\(a\\) comes from the best action conditional plan that came so far Combining the metrics gives:\n\\begin{equation} E(b) = \\gamma P(b) \\epsilon(b) \\end{equation}\nExecution execute the best action at \\(b_0\\) Perceive a new observation \\(b_0 \\leftarrow update(b_0,a,o)\\) ","html":"\u003cp\u003eBig picture: \u003cstrong\u003ecombining off-line and on-line approaches\u003c/strong\u003e maybe the best way to tackle large POMDPs.\u003c/p\u003e\n\u003cp\u003eTry planning:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eonly where we are\u003c/li\u003e\n\u003cli\u003eonly where we can reach\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTake into account three factors:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003euncertainty in the value function\u003c/li\u003e\n\u003cli\u003ereachability from the current belief\u003c/li\u003e\n\u003cli\u003eactions that are likely optimal\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eIt allows policy improvement on any base policy.\u003c/p\u003e\n\u003ch2 id=\"setup\"\u003eSetup\u003c/h2\u003e\n\u003cp\u003eDiscrete POMDPs:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(L\\), lower bound\u003c/li\u003e\n\u003cli\u003e\\(U\\), upper-bound\u003c/li\u003e\n\u003cli\u003e\\(b_0\\): current belief\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTwo main phases: the algorithm\u003c/p\u003e\n\u003ch3 id=\"planning-phase\"\u003ePlanning Phase\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eat each belief point, choose a particular next node to expand (using the scheme below to score the nodes)\u003c/li\u003e\n\u003cli\u003eexpand that next node that are chosen\u003c/li\u003e\n\u003cli\u003epropagate the value of the belief upwards through \u003ca href=\"/posts/kbhvalue_iteration/#pomdp-bellman-update\"\u003ePOMDP Bellman Backup\u003c/a\u003e up through the tree\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"best-node-selection\"\u003eBest Node Selection\u003c/h4\u003e\n\u003cp\u003eWe select the best node by three metrics:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eUncertainty: \\(\\epsilon(b) = U(b)-L(b)\\) we want small gap between upper and lower bound\u003c/li\u003e\n\u003cli\u003eOptimality in actions:\n\u003cul\u003e\n\u003cli\u003eAEMS1: \\(p(a|b) = \\frac{U(a,b)-L(b)}{U(a^{*}, b)-L(b)}\\) (\u0026ldquo;what\u0026rsquo;s the relative optimality of our action, compared to best action\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003eAEMS2: \\(p(a|b)\\) = \\(1\\) if \\(a=A^{*}\\), \\(0\\) otherwise. (\u0026ldquo;just take best action\u0026rdquo;)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eReachability: \\(p(b) = \\prod_{i=0}^{d} P(o^{(i)} | b^{(i)}, a^{(i)}) p(a^{(i)}|b^{(i)}})\\), where small \\(p\\) is either AIMS 1 or 2 above, where \\(a\\) comes from the best action conditional plan that came so far\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eCombining the metrics gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE(b) = \\gamma P(b) \\epsilon(b)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"execution\"\u003eExecution\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eexecute the best action at \\(b_0\\)\u003c/li\u003e\n\u003cli\u003ePerceive a new observation\u003c/li\u003e\n\u003cli\u003e\\(b_0 \\leftarrow update(b_0,a,o)\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaems/","tags":null,"title":"Anytime Error Minimization Search"},{"categories":null,"contents":"eansoetuhaosneu\n","html":"\u003cp\u003eeansoetuhaosneu\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaosneuhasoneuh/","tags":null,"title":"aosneuhasoneuh"},{"categories":null,"contents":"Other Factoids charged flux Chapters coulomb\u0026rsquo;s law superposition electric field Gauss\u0026rsquo; Law electric potential current Ohm\u0026rsquo;s Law resistor kirchoff\u0026rsquo;s laws Capacitor Dynamic RC Circuits magnetism faraday\u0026rsquo;s law Things to do AP Phys C EM Things to Do\n","html":"\u003ch2 id=\"other-factoids\"\u003eOther Factoids\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcharged/\"\u003echarged\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhflux/\"\u003eflux\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"chapters\"\u003eChapters\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcoulomb_s_law/\"\u003ecoulomb\u0026rsquo;s law\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcoulomb_s_law/#superposition\"\u003esuperposition\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhelectric_field/\"\u003eelectric field\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgauss_law/\"\u003eGauss\u0026rsquo; Law\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhelectric_potential_energy/\"\u003eelectric potential\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcurrent/\"\u003ecurrent\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhohm_s_law/\"\u003eOhm\u0026rsquo;s Law\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhresistors/\"\u003eresistor\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhkirchoff_s_laws/\"\u003ekirchoff\u0026rsquo;s laws\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcapacitor/\"\u003eCapacitor\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdynamic_rc_circuts/\"\u003eDynamic RC Circuits\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmagnetism/\"\u003emagnetism\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfaraday_s_law/\"\u003efaraday\u0026rsquo;s law\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"things-to-do\"\u003eThings to do\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhap_phys_c_em_things_to_do/\"\u003eAP Phys C EM Things to Do\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhap_phys_c_em_index/","tags":["index"],"title":"AP Phys C EM Index"},{"categories":null,"contents":" Review all the names of units, and their SI conversions Review all eqns of time constants \u0026ldquo;Amperian Loop\u0026rdquo; ","html":"\u003cul\u003e\n\u003cli\u003eReview all the names of units, and their SI conversions\u003c/li\u003e\n\u003cli\u003eReview all eqns of time constants\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Amperian Loop\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-05-07_18-06-56_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhap_phys_c_em_things_to_do/","tags":null,"title":"AP Phys C EM Things to Do"},{"categories":null,"contents":"AP Phys C Mech is an examination held by the CollegeBoard in mechanics.\nThings to Study Permittivity of free space Impulse Springs! In general. Perhaps review old notes. How to be faster? Kepler\u0026rsquo;s Laws of Planetary Motion\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhap_phys_c_mech_index/\"\u003eAP Phys C Mech\u003c/a\u003e is an examination held by the \u003ca href=\"/posts/kbhcollegeboard/\"\u003eCollegeBoard\u003c/a\u003e in mechanics.\u003c/p\u003e\n\u003ch2 id=\"things-to-study\"\u003eThings to Study\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ePermittivity of free space\u003c/li\u003e\n\u003cli\u003eImpulse\u003c/li\u003e\n\u003cli\u003eSprings! In general. Perhaps \u003ca href=\"https://www.notion.so/shabangsystems/013cd5fdedda491b86eb45eb139813a5?v=0449600634bd485fbf0f6f7b8a0833a3\"\u003ereview old notes\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eHow to be faster?\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhkepler_s_laws_of_planetary_motion/\"\u003eKepler\u0026rsquo;s Laws of Planetary Motion\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhap_phys_c_mech_index/","tags":["index"],"title":"AP Phys C Mech Index"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhap_physi/","tags":null,"title":"ap physi"},{"categories":null,"contents":"AP Statistics is an examination by the CollegeBoard.\nSee also crap to remember for AP Stats\nNon-Focus Mistakes file:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf file:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf file:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf Interpretation of regression outputs Backlog Chi-square file:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf file:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf Notes confidence interval hypothesis testing t-statistics chi-square data inference binomial distribution ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhapstats/\"\u003eAP Statistics\u003c/a\u003e is an examination by the \u003ca href=\"/posts/kbhcollegeboard/\"\u003eCollegeBoard\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhcrap_to_remember_for_ap_stats/\"\u003ecrap to remember for AP Stats\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"non-focus-mistakes\"\u003eNon-Focus Mistakes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003efile:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf\u003c/li\u003e\n\u003cli\u003efile:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf\u003c/li\u003e\n\u003cli\u003efile:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf\u003c/li\u003e\n\u003cli\u003eInterpretation of regression outputs\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"backlog\"\u003eBacklog\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eChi-square\u003c/li\u003e\n\u003cli\u003efile:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf\u003c/li\u003e\n\u003cli\u003efile:///Users/houliu/Documents/School Work/The Bible/APStats/APStats5Steps.pdf\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhconfidence_interval/\"\u003econfidence interval\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbht_statistics/\"\u003et-statistics\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhchi_square/\"\u003echi-square\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdata_inference/\"\u003edata inference\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhapstats/","tags":["index"],"title":"AP Statistics Index"},{"categories":null,"contents":"Show that:\n\\begin{equation} \\dv t e^{tA} = e^{tA}A \\end{equation}\nWe can apply the result we shown in eigenvalue:\n\\begin{equation} \\dv t \\qty(e^{tA}) = \\dv t \\qty(I + \\sum_{k=1}^{\\infty} \\frac{t^{k}}{k!}A^{k}) = \\qty(\\sum_{k=1}^{\\infty }\\frac{1}{k!}kt^{k-1}A^{k-1})A \\end{equation}\nWe do this separation because \\(k=0\\) would\u0026rsquo;t make sense to raise \\(A\\) (\\(k-1=-1\\)) to as we are unsure about the invertability of \\(A\\). Obviously \\(\\frac{1}{k!}k = \\frac{1}{(k-1)!}\\). Therefore, we can shift our index back yet again:\n\\begin{equation} \\qty(\\sum_{k=1}^{\\infty }\\frac{1}{k!}kt^{k-1}A^{k-1})A = \\qty(\\sum_{j=0}^{\\infty }\\frac{1}{j!}t^{j}A^{j})A \\end{equation}\nAwesome. So now we have the taylor series in \\(e^{tA}\\) back, times \\(A\\).\nSo therefore:\n\\begin{equation} \\qty(\\sum_{j=0}^{\\infty }\\frac{1}{j!}t^{j}A^{j})A = e^{tA}A \\end{equation}\nBe forewarned:\n\\begin{equation} e^{A}e^{B} \\neq e^{A+B} \\end{equation}\nmostly because matrix multiplication is not commutative..\n","html":"\u003cp\u003eShow that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv t e^{tA} = e^{tA}A\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can apply the result we shown in \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv t \\qty(e^{tA}) = \\dv t \\qty(I + \\sum_{k=1}^{\\infty} \\frac{t^{k}}{k!}A^{k}) = \\qty(\\sum_{k=1}^{\\infty }\\frac{1}{k!}kt^{k-1}A^{k-1})A\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe do this separation because \\(k=0\\) would\u0026rsquo;t make sense to raise \\(A\\) (\\(k-1=-1\\)) to as we are unsure about the \u003ca href=\"/posts/kbhmatricies/#invertability\"\u003einvertability\u003c/a\u003e of \\(A\\). Obviously \\(\\frac{1}{k!}k = \\frac{1}{(k-1)!}\\). Therefore, we can shift our index back yet again:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\qty(\\sum_{k=1}^{\\infty }\\frac{1}{k!}kt^{k-1}A^{k-1})A = \\qty(\\sum_{j=0}^{\\infty }\\frac{1}{j!}t^{j}A^{j})A\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAwesome. So now we have the taylor series in \\(e^{tA}\\) back, times \\(A\\).\u003c/p\u003e\n\u003cp\u003eSo therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\qty(\\sum_{j=0}^{\\infty }\\frac{1}{j!}t^{j}A^{j})A = e^{tA}A\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eBe forewarned:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{A}e^{B} \\neq e^{A+B}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emostly because matrix multiplication is not commutative..\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhapplying_eigenspace/","tags":null,"title":"applying eigenspace"},{"categories":null,"contents":"Direct Sampling Direct Sampling is an approximate inference method where we pull samples from the given joint probability distribution.\nExample Suppose we are interested in:\nwhere we dare \\(P(B^{1}|D^{1},C^{1})\\).\nStep 1: sort We obtain a topological sort of this network:\n\\begin{equation} B, S, E, D, C \\end{equation}\nStep 2: sample from \\(B,S\\) We sample \\(B\\). We sampled that \\(B=1\\) today. We sample \\(S\\). We sampled that \\(S=0\\) today. Step 3: sample from \\(E\\) We sample \\(E\\) GIVEN what we already sampled, that \\(B=1, S=0\\), we sampled that that \\(E = 1\\) Step 4: sample from \\(D, C\\) We sample \\(D\\) given that \\(E=1\\) as we sampled. We sample \\(C\\) given that \\(E=1\\) as we sampled. Repeat Repeat steps 2-4\nStep n: Analyze B S E D C 1 0 1 0 1 0 1 1 0 0 1 1 1 1 0 0 0 1 1 0 1 0 1 1 1 We desire to know \\(P(b^{1}|d^{1}, c^{1})\\). Looks like, given this table, it would be \\(100\\%\\).\nLikelihood Weighted Sampling Likelihood Weighted Sampling is a sampling approach whereby you force values that you wont, and then weight the results by the chance of it happening.\nThis is super useful when our envidence is unlikely.\nExample Suppose again you are interested in \\(P(b^{1}|d^{1}, c^{1})\\). In this case, we only sample \\(B,S,E\\):\nB S E 0 1 0 1 0 1 Now, for each of these results, we the compute the chance of our priors happening given the samples.\nRow 1: \\(p(d^{1}|e^{0})p(c^{1}|e^{0})\\) Row 2: \\(p(d^{1}|e^{1})p(c^{1}|e^{1})\\) Let\u0026rsquo;s say:\nRow 1: \\(p(d^{1}|e^{0})p(c^{1}|e^{0})=0.3\\) Row 2: \\(p(d^{1}|e^{1})p(c^{1}|e^{1})=0.9\\) Finally, to compute \\(p(b^{1}|d^{1}c^{1})\\):\n\\begin{equation} \\frac{0.9}{0.9+0.3} \\end{equation}\nbecause only row \\(2\\) fit with our expectations.\n","html":"\u003ch2 id=\"direct-sampling--kbhdirect-sampling-dot-md\"\u003e\u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e is an \u003ca href=\"/posts/kbhapproximate_inference/\"\u003eapproximate inference\u003c/a\u003e method where we pull samples from the given \u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"example\"\u003eExample\u003c/h3\u003e\n\u003cp\u003eSuppose we are interested in:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-05_09-19-51_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ewhere we dare \\(P(B^{1}|D^{1},C^{1})\\).\u003c/p\u003e\n\u003ch4 id=\"step-1-sort\"\u003eStep 1: sort\u003c/h4\u003e\n\u003cp\u003eWe obtain a \u003ca href=\"/posts/kbhtopological_sort/\"\u003etopological sort\u003c/a\u003e of this network:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB, S, E, D, C\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"step-2-sample-from-b-s\"\u003eStep 2: sample from \\(B,S\\)\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eWe sample \\(B\\). We sampled that \\(B=1\\) today.\u003c/li\u003e\n\u003cli\u003eWe sample \\(S\\). We sampled that \\(S=0\\) today.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"step-3-sample-from-e\"\u003eStep 3: sample from \\(E\\)\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eWe sample \\(E\\) \u003cstrong\u003eGIVEN\u003c/strong\u003e what we already sampled, that \\(B=1, S=0\\), we sampled that that \\(E = 1\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"step-4-sample-from-d-c\"\u003eStep 4: sample from \\(D, C\\)\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eWe sample \\(D\\) given that \\(E=1\\) as we sampled.\u003c/li\u003e\n\u003cli\u003eWe sample \\(C\\) given that \\(E=1\\) as we sampled.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"repeat\"\u003eRepeat\u003c/h4\u003e\n\u003cp\u003eRepeat steps 2-4\u003c/p\u003e\n\u003ch4 id=\"step-n-analyze\"\u003eStep n: Analyze\u003c/h4\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eB\u003c/th\u003e\n\u003cth\u003eS\u003c/th\u003e\n\u003cth\u003eE\u003c/th\u003e\n\u003cth\u003eD\u003c/th\u003e\n\u003cth\u003eC\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWe desire to know \\(P(b^{1}|d^{1}, c^{1})\\). Looks like, given this table, it would be \\(100\\%\\).\u003c/p\u003e\n\u003ch2 id=\"likelihood-weighted-sampling--kbhdirect-sampling-dot-md\"\u003e\u003ca href=\"/posts/kbhdirect_sampling/#likelihood-weighted-sampling\"\u003eLikelihood Weighted Sampling\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdirect_sampling/#likelihood-weighted-sampling\"\u003eLikelihood Weighted Sampling\u003c/a\u003e is a sampling approach whereby you force values that you wont, and then weight the results by the chance of it happening.\u003c/p\u003e\n\u003cp\u003eThis is \u003cstrong\u003esuper useful\u003c/strong\u003e when our envidence is unlikely.\u003c/p\u003e\n\u003ch3 id=\"example\"\u003eExample\u003c/h3\u003e\n\u003cp\u003eSuppose again you are interested in \\(P(b^{1}|d^{1}, c^{1})\\). In this case, we only sample \\(B,S,E\\):\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eB\u003c/th\u003e\n\u003cth\u003eS\u003c/th\u003e\n\u003cth\u003eE\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eNow, for each of these results, we the compute the chance of our priors happening given the samples.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eRow 1: \\(p(d^{1}|e^{0})p(c^{1}|e^{0})\\)\u003c/li\u003e\n\u003cli\u003eRow 2: \\(p(d^{1}|e^{1})p(c^{1}|e^{1})\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eLet\u0026rsquo;s say:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eRow 1: \\(p(d^{1}|e^{0})p(c^{1}|e^{0})=0.3\\)\u003c/li\u003e\n\u003cli\u003eRow 2: \\(p(d^{1}|e^{1})p(c^{1}|e^{1})=0.9\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eFinally, to compute \\(p(b^{1}|d^{1}c^{1})\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{0.9}{0.9+0.3}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause only row \\(2\\) fit with our expectations.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhapproximate_inference/","tags":null,"title":"approximate inference"},{"categories":null,"contents":"How do we deal with Markov Decision Process solution with continuous state space?\nLet there be a value function parameterized on \\(\\theta\\):\n\\begin{equation} U_{\\theta}(s) \\end{equation}\nLet us find the value-function policy of this utility:\n\\begin{equation} \\pi(s) = \\arg\\max_{a} \\qty(R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) U_{\\theta}(s\u0026rsquo;)) \\end{equation}\nWe now create a finite sampling of our state space, which maybe infinitely large (for instance, continuous):\n\\begin{equation} S \\in \\mathcal{S} \\end{equation}\nwhere, \\(S\\) is a set of discrete states \\(\\{s_1, \\dots, s_{m}\\}\\).\nNow, what next?\ngenerally: Loop until convergence:\nInitialize \\(u_{\\theta}\\) For all \\(s_{i} \\in S\\), let \\(u_{i} = \\max_{a} R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{}T(s\u0026rsquo;|s,a) u_{\\theta}(s\u0026rsquo;)\\), the utility at those discrete state samples \\(s_{i}\\) Then, fit a \\(\\theta\\) so that \\(U_{\\theta}(s_{i})\\) is close to \\(u_{i}\\) to get \\(T\\): get a finite sampling of next states, or fit a function to it.\nBUT: Convergence is not guaranteed.\nThere are two main specific approaches to achieve this:\nglobal approximation linreg a best-fit line of state value vs. utility value polynomial fit a best-fit line, whereby \\(U_{\\theta}(s) = \\theta^{T}\\beta(s)\\), where each \\(\\beta_{j}(s)=s^{j-1}\\). a frigin neural network (train a model with parameters \\(\\theta\\) which produces the utility calculations for you \\(M_{\\theta}(s) = U_{\\theta}(s)\\)) local approximation make a sampling in your continuous state space to discretized it do any utility function thing you\u0026rsquo;d like (policy evaluation or value iteration) to get some set of \\(\\theta_{i}\\), which is the utility for being in each sampled discrete state \\(s_{i}\\) whenever you need to calculate \\(U(s)\\) of a particular state\u0026hellip; linearly interpolate k nearest neighbor kernel smoothing ","html":"\u003cp\u003eHow do we deal with \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMarkov Decision Process\u003c/a\u003e solution with \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e state space?\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eLet there be a \u003ca href=\"/posts/kbhaction_value_function/\"\u003evalue function\u003c/a\u003e \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003eized on \\(\\theta\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_{\\theta}(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet us find the \u003ca href=\"/posts/kbhaction_value_function/#value-function-policy\"\u003evalue-function policy\u003c/a\u003e of this utility:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pi(s) = \\arg\\max_{a} \\qty(R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) U_{\\theta}(s\u0026rsquo;))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe now create a finite sampling of our state space, which maybe infinitely large (for instance, \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS \\in \\mathcal{S}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(S\\) is a set of discrete states \\(\\{s_1, \\dots, s_{m}\\}\\).\u003c/p\u003e\n\u003cp\u003eNow, what next?\u003c/p\u003e\n\u003ch2 id=\"generally\"\u003egenerally:\u003c/h2\u003e\n\u003cp\u003eLoop until convergence:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eInitialize \\(u_{\\theta}\\)\u003c/li\u003e\n\u003cli\u003eFor all \\(s_{i} \\in S\\), let \\(u_{i} = \\max_{a} R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{}T(s\u0026rsquo;|s,a) u_{\\theta}(s\u0026rsquo;)\\), the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e at those discrete state samples \\(s_{i}\\)\u003c/li\u003e\n\u003cli\u003eThen, fit a \\(\\theta\\) so that \\(U_{\\theta}(s_{i})\\) is close to \\(u_{i}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003eto get \\(T\\)\u003c/strong\u003e: get a finite sampling of next states, or fit a function to it.\u003c/p\u003e\n\u003cp\u003eBUT: \u003cstrong\u003eConvergence is not guaranteed.\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eThere are two main specific approaches to achieve this:\u003c/p\u003e\n\u003ch2 id=\"global-approximation\"\u003eglobal approximation\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003elinreg a best-fit line of state value vs. \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e value\n\u003cul\u003e\n\u003cli\u003epolynomial fit a best-fit line, whereby \\(U_{\\theta}(s) = \\theta^{T}\\beta(s)\\), where each \\(\\beta_{j}(s)=s^{j-1}\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ea frigin neural network (train a model with parameters \\(\\theta\\) which produces the utility calculations for you \\(M_{\\theta}(s) = U_{\\theta}(s)\\))\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"local-approximation\"\u003elocal approximation\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003emake a sampling in your \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e state space to discretized it\u003c/li\u003e\n\u003cli\u003edo any \u003ca href=\"/posts/kbhutility_function/\"\u003eutility function\u003c/a\u003e thing you\u0026rsquo;d like (\u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e or \u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e) to get some set of \\(\\theta_{i}\\), which is the utility for being in each sampled discrete state \\(s_{i}\\)\u003c/li\u003e\n\u003cli\u003ewhenever you need to calculate \\(U(s)\\) of a particular state\u0026hellip;\n\u003cul\u003e\n\u003cli\u003elinearly interpolate\u003c/li\u003e\n\u003cli\u003ek nearest neighbor\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhkernel_smoothing/\"\u003ekernel smoothing\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhapproximate_value_function/","tags":null,"title":"Approximate Value Function"},{"categories":null,"contents":"If we take entangled qubits, and separate them real far away, their behavior would be the same even despite it will take longer for light to travel.\n","html":"\u003cp\u003eIf we take \u003ca href=\"/posts/kbhentangled/\"\u003eentangled\u003c/a\u003e \u003ca href=\"/posts/kbhqubits/\"\u003equbit\u003c/a\u003es, and separate them real far away, their behavior would be the same even despite it will take longer for light to travel.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhapr_paradox/","tags":null,"title":"APR Paradox"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhaps/","tags":null,"title":"APS"},{"categories":null,"contents":"Background In the 60s, economists that the pricing of options were independent of pricing of underlying assets. Nowadays, we can see that, if the underlying assets were obeying of a Brownian Motion, there is no additional degree of freedom that options can bring: that knowing the stocks will tell you exactly through a DiffEQ how the option will evolve.\nThe idea, then, is that you can replicate options: by dynamically buying and selling pairs of securities in the same way as the option, your new portfolio can track the option exactly.\nOf course, there is a certain amount of volatility associated with Brownian Motion markets.\nUnfortunately, there is no one fixed volatility which can be used to model all options; you can fit a volatility given all strike prices\u0026mdash;creating an implied volatility surface.\nOtherwise, you can also model volatility as a random variable, a stochastic process modeled by stochastic volatility.\nReading pg 350-352: diffusion are described by stochastic differential equations Option Pricing A Vanilla Call Given some current price \\(S\\), option price \\(K\\), time to maturity \\(T\\); the payoff increases linearly after the option matures. How much should the option be changed for the right to buy the option after \\(T\\) days?\nWe can use the option info to calculate the implied volatility.\n","html":"\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003cp\u003eIn the 60s, economists that the pricing of options were independent of pricing of underlying assets. Nowadays, we can see that, if the underlying assets were obeying of a Brownian Motion, there is no additional degree of freedom that options can bring: that knowing the stocks will tell you exactly through a \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDiffEQ\u003c/a\u003e how the option will evolve.\u003c/p\u003e\n\u003cp\u003eThe idea, then, is that you can replicate options: by dynamically buying and selling pairs of securities in the same way as the option, your new portfolio can track the option exactly.\u003c/p\u003e\n\u003cp\u003eOf course, there is a certain amount of volatility associated with \u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e markets.\u003c/p\u003e\n\u003cp\u003eUnfortunately, there is no one fixed volatility which can be used to model all options; you can fit a volatility given all strike prices\u0026mdash;creating an implied \u003cstrong\u003evolatility surface\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eOtherwise, you can also model volatility as a random variable, a stochastic process modeled by \u003cstrong\u003estochastic volatility\u003c/strong\u003e.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"reading\"\u003eReading\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003epg 350-352: diffusion are described by stochastic differential equations\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003ch2 id=\"option-pricing\"\u003eOption Pricing\u003c/h2\u003e\n\u003ch3 id=\"a-vanilla-call\"\u003eA Vanilla Call\u003c/h3\u003e\n\u003cp\u003eGiven some current price \\(S\\), option price \\(K\\), time to maturity \\(T\\); the payoff increases linearly after the option matures. How much should the option be changed for the right to buy the option after \\(T\\) days?\u003c/p\u003e\n\u003cp\u003eWe can use the option info to calculate the implied volatility.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbharbitrage_pricing/","tags":null,"title":"Arbitrage Pricing"},{"categories":null,"contents":"function that returns the input that maximizes the expression.\nfinding argmax direct optimization Typical maximization system. Take derivative, set it to 0, solve, plug in, solve. THis is pretty bad during times are not differentiable.\ngradient ascent We take steps following the direction\n\\begin{equation} \\theta_{1j} = \\theta_{0j} + \\eta \\pdv{LL(\\theta_{0})}{\\theta_{0j}} \\end{equation}\nadditional information argmax of log see argmax of log\n","html":"\u003cp\u003efunction that returns the input that maximizes the expression.\u003c/p\u003e\n\u003ch2 id=\"finding-argmax\"\u003efinding argmax\u003c/h2\u003e\n\u003ch3 id=\"direct-optimization--kbhoptimization-dot-md\"\u003edirect \u003ca href=\"/posts/kbhoptimization/\"\u003eoptimization\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eTypical maximization system. Take derivative, set it to 0, solve, plug in, solve. THis is pretty bad during times are not differentiable.\u003c/p\u003e\n\u003ch3 id=\"gradient-ascent\"\u003egradient ascent\u003c/h3\u003e\n\u003cp\u003eWe take steps following the direction\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{1j} = \\theta_{0j} + \\eta \\pdv{LL(\\theta_{0})}{\\theta_{0j}}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"argmax-of-log--kbhmaximum-likelihood-parameter-learning-dot-md\"\u003e\u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/#argmax-of-log\"\u003eargmax of log\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/#argmax-of-log\"\u003eargmax of log\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhargmax/","tags":null,"title":"argmax"},{"categories":null,"contents":" When you make an array, you are making space for each element When you create a pointer, you are making space for 64 bit address arrays \u0026ldquo;decay to pointers\u0026rdquo;: when you identify an array by name, you are sharing the location of the leading element \u0026amp;arr gets an address to the FIRST element \u0026mdash; don\u0026rsquo;t do this, \u0026amp;ptr gets the pointers\u0026rsquo; address Array is a special type that represent a segment of contiguously allocated memory. You can\u0026rsquo;t reassign an array to be equal to a new array.\nint nums[] = {1, 2, 3}; int nums2[] = {4, 5, 6}; nums and nums2 has no memory set aside. Calling sizeof() on an array gets the length of the array; calling sizeof() on the pointer it decays to will get the word size. Therefore, when we pass an array to a function will require you giving the size to the function as well.\nTo get a pointer to the beginning of an array, write \u0026amp;str[0]. NEVER EVER write \u0026amp;str, even if its the same thing: the latter sounds like you are getting the address of an array which doesn\u0026rsquo;t exist.\nPointer Arithmetic If you add/subtract values to a pointer, the value you add/subtract gets scaled by the size of type so you can always add/subtract as needed.\n","html":"\u003cul\u003e\n\u003cli\u003eWhen you make an \u003ca href=\"/posts/kbharray/\"\u003earray\u003c/a\u003e, you are making space for each element\u003c/li\u003e\n\u003cli\u003eWhen you create a pointer, you are making space for 64 bit address\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbharray/\"\u003earray\u003c/a\u003es \u0026ldquo;decay to pointers\u0026rdquo;: when you identify an array by name, you are sharing the location of the leading element\u003c/li\u003e\n\u003cli\u003e\u0026amp;arr gets an address to the FIRST element \u0026mdash; don\u0026rsquo;t do this, \u0026amp;ptr gets the pointers\u0026rsquo; address\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eArray is a special type that represent a segment of contiguously allocated memory. You can\u0026rsquo;t reassign an array to be equal to a new array.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enums\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e};\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enums2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e6\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e};\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003ccode\u003enums\u003c/code\u003e and \u003ccode\u003enums2\u003c/code\u003e has no memory set aside. Calling \u003ccode\u003esizeof()\u003c/code\u003e on an array gets the length of the array; calling \u003ccode\u003esizeof()\u003c/code\u003e on the pointer it decays to will get the word size. Therefore, when we pass an array to a function will require you giving the size to the function as well.\u003c/p\u003e\n\u003cp\u003eTo get a pointer to the beginning of an array, write \u003ccode\u003e\u0026amp;str[0]\u003c/code\u003e. NEVER EVER write \u003ccode\u003e\u0026amp;str\u003c/code\u003e, even if its the same thing: the latter sounds like you are getting the address of an array which doesn\u0026rsquo;t exist.\u003c/p\u003e\n\u003ch2 id=\"pointer-arithmetic\"\u003ePointer Arithmetic\u003c/h2\u003e\n\u003cp\u003eIf you add/subtract values to a pointer, the value you add/subtract gets scaled by the size of type so you can always add/subtract as needed.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbharray/","tags":null,"title":"array"},{"categories":null,"contents":"Require: analyze movie + quote [story + bellows]\nineffability of language vs. Sapire-Wolf\nForeignizing Time in Heptopod B\nLouise\u0026rsquo;s ability to re-express her temporally-independent thoughts in English after learning Heptopod B represents a successful act foreignization of Heptopod thought for an English L1 audience despite this audience\u0026rsquo;s supposed limitations in understanding temporally-independent concepts according to the Sapir-Wholf Hypothesis.\nHeptopod B does not have temporality RUSSIAN SCIENTIST: \u0026ldquo;Their final words translate to, \u0026ldquo;There is no time, many become one.\u0026rdquo; I fear we have all been given weapons because we answered the timeline wrong, please, if you - -\u0026rdquo; \u0026ldquo;Explain it by saying that light minimized the time needed to travel to its destination, and one saw the world as the heptapods saw it.\u0026rdquo; So it seems like quintessential Sapir-Wholf: time hard to express with Heptopod, and so their way of thinking work around it.\n\u0026ldquo;Sapir’s point was that instead of saying that Latin and Greek are well suited to abstract thought, we should say, rather, that abstract thought is well suited to Greek and Latin and view the particular kinds of philosophical discourse that were developed by their speakers as a consequence of the grammar they used\u0026rdquo; and there are evidence for it\u0026hellip; Louis got this ability and then can think into the future.\n\u0026ldquo;It worked better when I didn\u0026rsquo;t think about it too much. \u0026hellip; my initial strokes almost always turned out to be compatible with an elegant rendition of what I was trying to say. I was developing a faculty like that of the heptapods.\u0026rdquo; Louise ditches temporality in her ENGLISH as well And yet, Louis very effectively translated HB into English by abusing English grammar.\n\u0026ldquo;you will be twenty-five then\u0026rdquo;\nLOUISE \u0026ldquo;We are so bound by time; by its order.\u0026rdquo;\nLOUISE: \u0026ldquo;I forgot how good it feels to be held by you\u0026rdquo;\nSo this lack of temporality not ineffable: \u0026ldquo;translation is the enemy of the ineffable. One causes the other to cease to exist.\u0026rdquo;\nnobody knows how to translate “animal signals” into human speech or vice versa. When and if we ever can translate nonhuman noises into human speech, species-related ineffabilities will evaporate like the morning haze.\nSo despite English grammar\u0026rsquo;s temporality, it can be adapted.\nThis process of translation can be explained effectively in English, even if the original event is not:\nDR. KETTLER \u0026ldquo;Not everyone is wired for what you\u0026rsquo;re about to do. Our brains aren\u0026rsquo;t always able to process experiences like this.\u0026rdquo; Moving though over to another language is actually essentially just forenizing So: non-temporality vis a vi Heptopod B can be expressed in any language that you desire, except for the fact that it will be forenizing and hence require understanding of their culture. It is not that L1 itself limits understanding of L2 culture vis a vi S-W; instead, L2 culture needed to be understood correctly to foreignize for L1 audience. Louis does this.\nlouis is forenizing \u0026ldquo;As a result, the “foreign-soundingness” of a translation seeking to give the reader a glimpse of the authentic quality of the source can only reproduce and reinforce what the receiving culture already imagines the foreign to be. \u0026hellip; Foreign-soundingness is therefore only a real option for a translator when working from a language with which the receiving language and its culture have an established relationship\u0026rdquo; Louise establishes this relationship LOUISE: \u0026ldquo;Language is the foundation of civilization. \u0026ldquo;It is the glue that holds a people together, and it is the first weapon drawn in a conflict.\u0026rdquo; \u0026ldquo;The only way to learn an unknown language is to interact with a native speaker, and by that I mean asking questions, holding a conversation \u0026hellip; without that, it\u0026rsquo;s simply not possible.\u0026rdquo; We get to hear it too: film VFX forenization; \u0026ldquo;in the pitch of the low tone reverberating inside. \u0026hellip; another metallic ROAR. Followed by a distant HIGH PITCH.\u0026rdquo; the music is forenizing\nStuff from Chaing \u0026ldquo;you will be twenty-five then\u0026rdquo; \u0026ldquo;Over time, the sentences I wrote grew shapelier, more cohesive. I had reached the point where it worked better when I didn\u0026rsquo;t think about it too much. Instead of carefully trying to design a sentence before writing, I could simply begin putting down strokes immediately; my initial strokes almost always turned out to be compatible with an elegant rendition of what I was trying to say. I was developing a faculty like that of the heptapods.\u0026rdquo; \u0026ldquo;I could understand that: the physical attributes that humans found intuitive, like kinetic energy or acceleration, were all properties of an objectat a given moment in time.\u0026rdquo; \u0026ldquo;And these were conducive to a teleological interpretation of events: by viewing events over a period of time, one recognized that there was a requirement that had to be satisfied, a goal of minimizing or maximizing. And one had to know the initial and final states to meet that goal; one needed knowledge of the effects before the causes could be initiated. I was growing to understand that, too.\u0026rdquo; \u0026ldquo;Gary once told me that the fundamental laws of physics were time-symmetric, that there was no physical difference between past and future.\u0026rdquo; \u0026ldquo;Explain it by saying that light minimized the time needed to travel to its destination, and one saw the world as the heptapods saw it.\u0026rdquo; Stuff from Bellows \u0026ldquo;A reader who says that poetry is what has been lost in translation is also claiming to be simultaneously in full possession of the original (which is poetry) and of the translation (which is not). Otherwise there would be no knowing if anything has been lost, let alone knowing that it was poetry.\u0026rdquo; \u0026ldquo;Because if the inhabitants of the distant planet did have a language, and if the space crew had learned it, then it must be possible for them to say what the aliens had said. Must, not should: radically untranslatable sounds do not make a language simply because we could not know it was a language unless we could translate it, even if only roughly\u0026rdquo; \u0026ldquo;The tonal and rhythmic patterns of whale song are of such complexity as to make it quite impossible to believe that what we can hear (and pick up on instruments more sensitive than human ears) is just random noise.\u0026rdquo; nobody knows how to translate “animal signals” into human speech or vice versa. When and if we ever can translate nonhuman noises into human speech, species-related ineffabilities will evaporate like the morning haze. Translation is the enemy of the ineffable. One causes the other to cease to exist. Foreign-soundingness is therefore only a real option for a translator when working from a language with which the receiving language and its culture have an established relationship. But there are significant differences between cultures and languages in how people do things with words. \u0026ldquo;Different languages, [Wilhelm] von Humboldt saw, were different worlds, and the great diversity of natural languages on the planet should be seen as a treasure house of tools for thinking in other ways.\u0026rdquo; Wilhelm von Humboldt, elder brother of the great explorer Alexander \u0026ldquo;The evidence itself brought [Sapir] to see that any attempt to match the grammar of a language with the culture of its speakers or their ethnic origins was completely impossible. “Language,” “culture,” and “race” were independent variables.\u0026rdquo; \u0026ldquo;Sapir’s point was that instead of saying that Latin and Greek are well suited to abstract thought, we should say, rather, that abstract thought is well suited to Greek and Latin and view the particular kinds of philosophical discourse that were developed by their speakers as a consequence of the grammar they used\u0026rdquo; \u0026ldquo;It means that speakers of what Sapir called “Average West European” are poorly equipped to engage in Hopi thought. To expand our minds and to become more fully civilized members of the human race, we should learn as many different languages as we can.\u0026rdquo; Movie Script Film VFX forenization; \u0026ldquo;in the pitch of the low tone reverberating inside. \u0026hellip; another metallic ROAR. Followed by a distant HIGH PITCH.\u0026rdquo; all caps meaning LOUISE \u0026ldquo;We are so bound by time; by its order.\u0026rdquo; DR. KETTLER \u0026ldquo;Not everyone is wired for what you\u0026rsquo;re about to do. Our brains aren\u0026rsquo;t always able to process experiences like this.\u0026rdquo; RUSSIAN SCIENTIST: \u0026ldquo;Their final words translate to, \u0026ldquo;There is no time, many become one.\u0026rdquo; I fear we have all been given weapons because we answered the timeline wrong, please, if you - -\u0026rdquo; LOUISE: \u0026ldquo;I forgot how good it feels to be held by you\u0026rdquo; \u0026ldquo;This is the same scene as the first. Shot for shot.\u0026rdquo; \u0026ldquo;And now we stay here a moment longer than the opening scene, and see that while Louise is smiling, a tear slips down her cheek.\u0026rdquo; INT. LAKE HOUSE - LOUISE\u0026rsquo;S STUDY - NIGHT (FLASHBACK) Childhood Flashback \u0026ldquo;memory is a strange thing\u0026rdquo; \u0026ldquo;we are so bound by time, by its order\u0026rdquo;\u0026mdash;progression of time backed into the inherentness of language the story opens with an emotional connection to the audience for imprinting Lecture Scene\u0026mdash;Alien Invasion \u0026ldquo;Authority assess the object\u0026rdquo; emphasis a weird place Shadow framing of the events question: why is she so worried Second time with plan down from blackness Army Scene + Fly Away Scene \u0026ldquo;Fluttering\u0026rdquo; reproduced Army attempts to replicate the results Sanskrit word for \u0026ldquo;war\u0026rdquo; and its translation\u0026mdash;\u0026ldquo;Louis: desire for more cows; Cal: argument\u0026rdquo; Film environment from the back, filming forward Helicopter Scene Louis: communication as first priority vs. Army: understanding as first priority Approach Scene Music pitched the \u0026ldquo;alien\u0026rdquo; flutters tonally Kind of emotional communication with the audience, the Dalian sound + high highs EQs is a foreignization technique taller than it was in the movie Entry Scene Driving: perspective questions \u0026ldquo;every 18 hours\u0026rdquo; continuous panning from black downwards Sargent\u0026rsquo;s differing eye length Difference in gravity and perspectives? Camera angle trickery, panning down is no longer the same direction \u0026ldquo;light at the end of the tunnel\u0026rdquo; again, music + dissonant sounds create foreignization in the audience \u0026ldquo;they arrive\u0026rdquo;\nCommunication scene They are very animal-like as portrayed in the film \u0026ldquo;Visual Aid\u0026rdquo; scenes Pairwise matching \u0026ldquo;A more advanced race\u0026rdquo; Rosetta stone behavior Taking of headgear: contrast between small vs. large (size differences Increasing breathing during a moment of transition Introductory scene not sure if they have names making an assumption what is happening to Louise\u0026rsquo;s thoughts \u0026ldquo;As Banks studies the language, she starts to have flashback-like visions of her daughter. \u0026quot; Panic worried Scene Fear of the unknown Thoughts flashing back: symbols muting sounds Flashing back being real: Thoughts having panicked sensation of time Repeating single syllable\u0026mdash;foreignization \u0026ldquo;unline speech, a logogram is free of time\u0026rdquo; Voiceover Scene No directionality: complex understanding The repeated vocalizations help highlight the distantness Dialogue Between the two \u0026ldquo;Sapire-Wolf Hypothesis\u0026rdquo;: being used incorrectly; hallucinating Louis' China mobilizing forces Majiong is a form of mobiling forces Final Message Group \u0026ldquo;offer\u0026rdquo; vs \u0026ldquo;use\u0026rdquo; \u0026mdash; the US understands it as \u0026ldquo;offer\u0026rdquo; and China understands it as \u0026ldquo;use\u0026rdquo; contextual intepretation varies how the use of linguistics language as something contextually dependent large amounts of communication can be packed very densely Non-Zero Sum Game the ask to work together need to be interpreted differently different parts of time fold together to become hole: \u0026ldquo;non zero sum game\u0026rdquo; The alien speech is being subtitled! Final communication Result becomes \u0026ldquo;objective\u0026rdquo;: i.e. there is a direct understanding of the aliens, suddenly Also, palendromic names: \u0026ldquo;Hannah\u0026rdquo; for daughter, not translatable Seeing into the future Seeing into Time Being able to understand heptopod + time properly means that they are able to understand time Gave private number in the future allow you to see the past: General Shang can see into the future Why is the banquet colored yellow Finale Repeat of the opener scene: pan down as a trope that cycles from the beginning Ian is in Louise\u0026rsquo;s house! The house+baby scenes (which is different from baby nature scenes) is lit orange in the same way as the banquet scene whereas the hospital scene and the house scene were lit blue ","html":"\u003cp\u003eRequire: analyze movie + quote [story + bellows]\u003c/p\u003e\n\u003cp\u003eineffability of language vs. Sapire-Wolf\u003c/p\u003e\n\u003cp\u003eForeignizing Time in Heptopod B\u003c/p\u003e\n\u003cp\u003eLouise\u0026rsquo;s ability to re-express her temporally-independent thoughts in English after learning Heptopod B represents a successful act \u003cem\u003eforeignization\u003c/em\u003e of Heptopod thought for an English L1 audience despite this audience\u0026rsquo;s supposed limitations in understanding temporally-independent concepts according to the Sapir-Wholf Hypothesis.\u003c/p\u003e\n\u003ch2 id=\"heptopod-b-does-not-have-temporality\"\u003eHeptopod B does not have temporality\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eRUSSIAN SCIENTIST: \u0026ldquo;Their final words translate to, \u0026ldquo;There is no time, many become one.\u0026rdquo; I fear we have all been given weapons because we answered the timeline wrong, please, if you - -\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Explain it by saying that light minimized the time needed to travel to its destination, and one saw the world as the heptapods saw it.\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSo it seems like quintessential Sapir-Wholf: time hard to express with Heptopod, and so their way of thinking work around it.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Sapir’s point was that instead of saying that Latin and Greek are well suited to abstract thought, we should say, rather, that abstract thought is well suited to Greek and Latin and view the particular kinds of philosophical discourse that were developed by their speakers as a consequence of the grammar they used\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eand there are evidence for it\u0026hellip; Louis got this ability and then can think into the future.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;It worked better when I didn\u0026rsquo;t think about it too much. \u0026hellip; my initial strokes almost always turned out to be compatible with an elegant rendition of what I was trying to say. I was developing a faculty like that of the heptapods.\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"louise-ditches-temporality-in-her-english-as-well\"\u003eLouise ditches temporality in her ENGLISH as well\u003c/h2\u003e\n\u003cp\u003eAnd yet, Louis very effectively translated HB into English by abusing English grammar.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u0026ldquo;you will be twenty-five then\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eLOUISE \u0026ldquo;We are so bound by time; by its order.\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eLOUISE: \u0026ldquo;I forgot how good it feels to be held by you\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSo this lack of temporality not ineffable: \u0026ldquo;translation is the enemy of the ineffable. One causes the other to cease to exist.\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003enobody knows how to translate “animal signals” into human speech or vice versa. When and if we ever can translate nonhuman noises into human speech, species-related ineffabilities will evaporate like the morning haze.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSo despite English grammar\u0026rsquo;s temporality, it can be adapted.\u003c/p\u003e\n\u003cp\u003eThis process of translation can be explained effectively in English, even if the original event is not:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eDR. KETTLER \u0026ldquo;Not everyone is wired for what you\u0026rsquo;re about to do. Our brains aren\u0026rsquo;t always able to process experiences like this.\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"moving-though-over-to-another-language-is-actually-essentially-just-forenizing\"\u003eMoving though over to another language is actually essentially just forenizing\u003c/h2\u003e\n\u003cp\u003eSo: non-temporality vis a vi Heptopod B can be expressed in any language that you desire, except for the fact that it will be forenizing and hence require understanding of their \u003cem\u003eculture\u003c/em\u003e. It is \u003cstrong\u003enot\u003c/strong\u003e that L1 itself limits understanding of L2 culture vis a vi S-W; instead, L2 culture needed to be understood correctly to foreignize for L1 audience. Louis does this.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003elouis is forenizing\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;As a result, the “foreign-soundingness” of a translation seeking to give the reader a glimpse of the authentic quality of the source can only reproduce and reinforce what the receiving culture already imagines the foreign to be. \u0026hellip; Foreign-soundingness is therefore only a real option for a translator when working from a language with which the receiving language and its culture have an established relationship\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eLouise establishes this relationship\n\u003cul\u003e\n\u003cli\u003eLOUISE: \u0026ldquo;Language is the foundation of civilization. \u0026ldquo;It is the glue that holds a people together, and it is the first weapon drawn in a conflict.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;The only way to learn an unknown language is to interact with a native speaker, and by that I mean asking questions, holding a conversation \u0026hellip; without that, it\u0026rsquo;s simply not possible.\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe get to hear it too: film VFX forenization; \u0026ldquo;in the pitch of the low tone reverberating inside. \u0026hellip; another metallic ROAR. Followed by a distant HIGH PITCH.\u0026rdquo; the \u003cstrong\u003emusic\u003c/strong\u003e is forenizing\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"stuff-from-chaing\"\u003eStuff from Chaing\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;you will be twenty-five then\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Over time, the sentences I wrote grew shapelier, more cohesive. I had reached the point where it worked better when I didn\u0026rsquo;t think about it too much. Instead of carefully trying to design a sentence before writing, I could simply begin putting down strokes immediately; my initial strokes almost always turned out to be compatible with an elegant rendition of what I was trying to say. I was developing a faculty like that of the heptapods.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;I could understand that: the physical attributes that humans found intuitive, like kinetic energy or acceleration, were all properties of an objectat a given moment in time.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;And these were conducive to a teleological interpretation of events: by viewing events over a period of time, one recognized that there was a requirement that had to be satisfied, a goal of minimizing or maximizing. And one had to know the initial and final states to meet that goal; one needed knowledge of the effects before the causes could be initiated. I was growing to understand that, too.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Gary once told me that the fundamental laws of physics were time-symmetric, that there was no physical difference between past and future.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Explain it by saying that light minimized the time needed to travel to its destination, and one saw the world as the heptapods saw it.\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"stuff-from-bellows\"\u003eStuff from Bellows\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;A reader who says that poetry is what has been lost in translation is also claiming to be simultaneously in full possession of the original (which is poetry) and of the translation (which is not). Otherwise there would be no knowing if anything has been lost, let alone knowing that it was poetry.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Because if the inhabitants of the distant planet did have a language, and if the space crew had learned it, then it must be possible for them to say what the aliens had said. Must, not should: radically untranslatable sounds do not make a language simply because we could not know it was a language unless we could translate it, even if only roughly\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;The tonal and rhythmic patterns of whale song are of such complexity as to make it quite impossible to believe that what we can hear (and pick up on instruments more sensitive than human ears) is just random noise.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003enobody knows how to translate “animal signals” into human speech or vice versa. When and if we ever can translate nonhuman noises into human speech, species-related ineffabilities will evaporate like the morning haze.\u003c/li\u003e\n\u003cli\u003eTranslation is the enemy of the ineffable. One causes the other to cease to exist.\u003c/li\u003e\n\u003cli\u003eForeign-soundingness is therefore only a real option for a translator when working from a language with which the receiving language and its culture have an established relationship.\u003c/li\u003e\n\u003cli\u003eBut there are significant differences between cultures and languages in how people do things with words.\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Different languages, [Wilhelm] von Humboldt saw, were different worlds, and the great diversity of natural languages on the planet should be seen as a treasure house of tools for thinking in other ways.\u0026rdquo;\n\u003cul\u003e\n\u003cli\u003eWilhelm von Humboldt, elder brother of the great explorer Alexander\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;The evidence itself brought [Sapir] to see that any attempt to match the grammar of a language with the culture of its speakers or their ethnic origins was completely impossible. “Language,” “culture,” and “race” were independent variables.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Sapir’s point was that instead of saying that Latin and Greek are well suited to abstract thought, we should say, rather, that abstract thought is well suited to Greek and Latin and view the particular kinds of philosophical discourse that were developed by their speakers as a consequence of the grammar they used\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;It means that speakers of what Sapir called “Average West European” are poorly equipped to engage in Hopi thought. To expand our minds and to become more fully civilized members of the human race, we should learn as many different languages as we can.\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"movie-script\"\u003eMovie Script\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eFilm VFX forenization; \u0026ldquo;in the pitch of the low tone reverberating inside. \u0026hellip; another metallic ROAR. Followed by a distant HIGH PITCH.\u0026rdquo; all caps meaning\u003c/li\u003e\n\u003cli\u003eLOUISE \u0026ldquo;We are so bound by time; by its order.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eDR. KETTLER \u0026ldquo;Not everyone is wired for what you\u0026rsquo;re about to do. Our brains aren\u0026rsquo;t always able to process experiences like this.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eRUSSIAN SCIENTIST: \u0026ldquo;Their final words translate to, \u0026ldquo;There is no time, many become one.\u0026rdquo; I fear we have all been given weapons because we answered the timeline wrong, please, if you - -\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eLOUISE: \u0026ldquo;I forgot how good it feels to be held by you\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;This is the same scene as the first. Shot for shot.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;And now we stay here a moment longer than the opening scene, and see that while Louise is smiling, a tear slips down her cheek.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eINT. LAKE HOUSE - LOUISE\u0026rsquo;S STUDY - NIGHT (FLASHBACK)\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003ch2 id=\"childhood-flashback\"\u003eChildhood Flashback\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;memory is a strange thing\u0026rdquo;\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;we are so bound by time, by its order\u0026rdquo;\u0026mdash;progression of time backed into the inherentness of language\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ethe story opens with an emotional connection to the audience for imprinting\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"lecture-scene-alien-invasion\"\u003eLecture Scene\u0026mdash;Alien Invasion\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;\u003cstrong\u003eAuthority\u003c/strong\u003e assess the object\u0026rdquo; emphasis a weird place\u003c/li\u003e\n\u003cli\u003eShadow framing of the events\u003c/li\u003e\n\u003cli\u003equestion: why is \u003cem\u003eshe\u003c/em\u003e so worried\u003c/li\u003e\n\u003cli\u003eSecond time with plan down from blackness\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"army-scene-plus-fly-away-scene\"\u003eArmy Scene + Fly Away Scene\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Fluttering\u0026rdquo; reproduced\n\u003cul\u003e\n\u003cli\u003eArmy attempts to replicate the results\u003c/li\u003e\n\u003cli\u003eSanskrit word for \u0026ldquo;war\u0026rdquo; and its translation\u0026mdash;\u0026ldquo;Louis: desire for more cows; Cal: argument\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eFilm environment from the back, filming forward\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"helicopter-scene\"\u003eHelicopter Scene\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eLouis: communication as first priority vs. Army: understanding as first priority\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"approach-scene\"\u003eApproach Scene\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eMusic pitched the \u0026ldquo;alien\u0026rdquo; flutters tonally\n\u003cul\u003e\n\u003cli\u003eKind of emotional communication with the audience, the Dalian sound + high highs EQs is a \u003ca href=\"/posts/kbhtranslation_theory/#spectrum-of-translation\"\u003eforeignization\u003c/a\u003e technique\u003c/li\u003e\n\u003cli\u003e\u003cem\u003etaller\u003c/em\u003e than it was in the movie\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"entry-scene\"\u003eEntry Scene\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDriving: perspective questions\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;every 18 hours\u0026rdquo;\u003c/li\u003e\n\u003cli\u003econtinuous panning from black \u003cstrong\u003edownwards\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eSargent\u0026rsquo;s differing eye length\u003c/li\u003e\n\u003cli\u003eDifference in gravity and perspectives?\u003c/li\u003e\n\u003cli\u003eCamera angle trickery, panning \u003cstrong\u003edown\u003c/strong\u003e is no longer the same direction\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;light at the end of the tunnel\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eagain, music + dissonant sounds create \u003ca href=\"/posts/kbhtranslation_theory/#spectrum-of-translation\"\u003eforeignization\u003c/a\u003e in the audience\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;they arrive\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"communication-scene\"\u003eCommunication scene\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eThey are very animal-like as portrayed in the film\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"visual-aid-scenes\"\u003e\u0026ldquo;Visual Aid\u0026rdquo; scenes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ePairwise matching\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;A more advanced race\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eRosetta stone behavior\u003c/li\u003e\n\u003cli\u003eTaking of headgear: contrast between small vs. large (size differences\u003c/li\u003e\n\u003cli\u003eIncreasing breathing during a moment of transition\u003c/li\u003e\n\u003cli\u003eIntroductory scene\n\u003cul\u003e\n\u003cli\u003enot sure if they have names\u003c/li\u003e\n\u003cli\u003emaking an assumption\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ewhat is happening to Louise\u0026rsquo;s thoughts\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;As Banks studies the language, she starts to have flashback-like visions of her daughter. \u0026quot;\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"panic-worried-scene\"\u003ePanic worried Scene\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eFear of the unknown\u003c/li\u003e\n\u003cli\u003eThoughts flashing back: symbols muting sounds\n\u003cul\u003e\n\u003cli\u003eFlashing back being real: Thoughts having panicked sensation of time\u003c/li\u003e\n\u003cli\u003eRepeating single syllable\u0026mdash;foreignization\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;unline speech, a logogram is free of time\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"voiceover-scene\"\u003eVoiceover Scene\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eNo directionality: complex understanding\u003c/li\u003e\n\u003cli\u003eThe repeated vocalizations help highlight the distantness\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"dialogue-between-the-two\"\u003eDialogue Between the two\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Sapire-Wolf Hypothesis\u0026rdquo;: being used incorrectly; hallucinating Louis'\n\u003cul\u003e\n\u003cli\u003eChina mobilizing forces\u003c/li\u003e\n\u003cli\u003eMajiong is a form of mobiling forces\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"final-message-group\"\u003eFinal Message Group\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;offer\u0026rdquo; vs \u0026ldquo;use\u0026rdquo; \u0026mdash; the \u003cstrong\u003eUS\u003c/strong\u003e understands it as \u0026ldquo;offer\u0026rdquo; and \u003cstrong\u003eChina\u003c/strong\u003e understands it as \u0026ldquo;use\u0026rdquo;\n\u003cul\u003e\n\u003cli\u003econtextual intepretation varies how the use of linguistics\u003c/li\u003e\n\u003cli\u003elanguage as something contextually dependent\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003elarge amounts of communication can be packed very densely\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"non-zero-sum-game\"\u003eNon-Zero Sum Game\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ethe ask to work together need to be interpreted differently\u003c/li\u003e\n\u003cli\u003edifferent parts of time fold together to become hole: \u0026ldquo;non zero sum game\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eThe alien speech is being subtitled!\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"final-communication\"\u003eFinal communication\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eResult becomes \u0026ldquo;objective\u0026rdquo;: i.e. there is a direct understanding of the aliens, suddenly\u003c/li\u003e\n\u003cli\u003eAlso, palendromic names: \u0026ldquo;Hannah\u0026rdquo; for daughter, not translatable\u003c/li\u003e\n\u003cli\u003eSeeing into the future\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"seeing-into-time\"\u003eSeeing into Time\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eBeing able to understand heptopod + time properly means that they are able to understand time\u003c/li\u003e\n\u003cli\u003eGave private number in the future allow you to see the past: General Shang can see into the future\u003c/li\u003e\n\u003cli\u003eWhy is the banquet colored yellow\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"finale\"\u003eFinale\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eRepeat of the opener scene: pan down as a trope that cycles from the beginning\u003c/li\u003e\n\u003cli\u003eIan is in Louise\u0026rsquo;s house!\u003c/li\u003e\n\u003cli\u003eThe house+baby scenes (which is different from baby nature scenes) is lit orange in the same way as the banquet scene\n\u003cul\u003e\n\u003cli\u003ewhereas the hospital scene and the house scene were lit blue\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n","permalink":"https://www.jemoka.com/posts/kbharrival_movie/","tags":null,"title":"Arrival Movie"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbharthur_m_schlesinger/","tags":null,"title":"Arthur M. Schlesinger"},{"categories":null,"contents":"Artificial Intelligence is defined as the act of parameter estimation.\nToy example for Linear Regression Imagine if we want to predict the price of a choclate bar We feed in a bunch of bar weight vs. chocolate price data; that\u0026rsquo;s the training data Then, we come up with a model of the training data We then throw away the training data Therefore, we can think of the model as a COMPRESSION of the training data; estimating parameters.\nBias in AI It is the ability of AI as a TOOL that causes it a very big UNDOING.\nBlueberry Muffin vs. Chuwawa Its quite difficult to come with codified rules; yet, we can just ask for it to compressive\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhartificial_intelligence/\"\u003eArtificial Intelligence\u003c/a\u003e is defined as the act of \u003cstrong\u003eparameter estimation\u003c/strong\u003e.\u003c/p\u003e\n\u003ch2 id=\"toy-example-for-linear-regression\"\u003eToy example for Linear Regression\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eImagine if we want to predict the price of a choclate bar\u003c/li\u003e\n\u003cli\u003eWe feed in a bunch of bar weight vs. chocolate price data; that\u0026rsquo;s the \u003cstrong\u003etraining data\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eThen, we come up with a \u003cstrong\u003emodel\u003c/strong\u003e of the \u003cstrong\u003etraining data\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eWe then throw away the \u003cstrong\u003etraining data\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTherefore, we can think of the \u003cstrong\u003emodel\u003c/strong\u003e as a \u003cstrong\u003eCOMPRESSION\u003c/strong\u003e of the training data; \u003cstrong\u003eestimating\u003c/strong\u003e parameters.\u003c/p\u003e\n\u003ch2 id=\"bias-in-ai\"\u003eBias in AI\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eIt is the ability of AI as a TOOL that causes it a very big UNDOING\u003c/strong\u003e.\u003c/p\u003e\n\u003ch3 id=\"blueberry-muffin-vs-dot-chuwawa\"\u003eBlueberry Muffin vs. Chuwawa\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-29_09-46-36_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eIts quite difficult to come with codified rules; yet, we can just ask for it to compressive\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhartificial_intelligence/","tags":null,"title":"Artificial Intelligence"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhasbmb/","tags":null,"title":"ASBMB"},{"categories":null,"contents":"2023 annual meeting of ASBMB.\nTalks Molecular Engineering\nChristopher Barnes, Stanford SARS-COV2 Structural Analysis 10.1126/sciimmunol.ade0958 Emma J. Chory, Duke Robotics-Assisted Directed Evolution 10.1038/s41592-021-01348-4 Daniel-Adriano Silva, Monod De novo biosensors 10.1038/s41586-021-03258-z Structure Determination and Machine Learning\nSonya Hanson, Flatiron cyro-EM + ensemble reweighting 10.1073/pnas.1419276111 Celia Schiffer, UMass Med Drug Resistance Analysis 10.7554/eLife.77433 Arvind Ramanathan, Argonne Lab Models of Interaction Analysis ?, see GenSLMs Jason K Perry, Gilead Structure of COVID Replication Protein Structure and Biophysics\nErik Yukl Zinc ABC Transporters ? Wentao Li Calpains \u0026hellip; AFIB Drug Design w/ AI\nAlpha Lee, PostEra ML COVID Drug Discovery 10.1101/2020.10.29.339317 David Baker, UW De Novo Protein Design RFDiffusion, RoseTTAFold2 Relay Therapeutics Active Learning Molecule Iteration Rommie E Amary, UCSD Immunogen Design Things to Google diffusion maps and https://www.charmm-gui.org/ GenSLMs: LLMs, but genome sequence representation learning RFDiffusion \u0026ldquo;quantum chemistry\u0026rdquo; https://www.biosolveit.de/infiniSee what tool do these people use to make these? ","html":"\u003cp\u003e2023 annual meeting of \u003ca href=\"/posts/kbhasbmb/\"\u003eASBMB\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"talks\"\u003eTalks\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eMolecular Engineering\u003c/strong\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eChristopher Barnes, \u003ca href=\"/posts/kbhstanford/\"\u003eStanford\u003c/a\u003e\u003c/th\u003e\n\u003cth\u003e\u003ca href=\"/posts/kbhsars_cov2_structural_analysis/\"\u003eSARS-COV2 Structural Analysis\u003c/a\u003e\u003c/th\u003e\n\u003cth\u003e10.1126/sciimmunol.ade0958\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eEmma J. Chory, Duke\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhrobotics_assisted_directed_evolution/\"\u003eRobotics-Assisted Directed Evolution\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e10.1038/s41592-021-01348-4\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eDaniel-Adriano Silva, Monod\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhde_novo_biosensors/\"\u003eDe novo biosensors\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e10.1038/s41586-021-03258-z\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\u003cstrong\u003eStructure Determination and Machine Learning\u003c/strong\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eSonya Hanson, Flatiron\u003c/th\u003e\n\u003cth\u003e\u003ca href=\"/posts/kbhcyro_em/#manifold-embedding\"\u003ecyro-EM\u003c/a\u003e + \u003ca href=\"/posts/kbhcyro_em/#ensemble-reweighting\"\u003eensemble reweighting\u003c/a\u003e\u003c/th\u003e\n\u003cth\u003e10.1073/pnas.1419276111\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eCelia Schiffer, UMass Med\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmolecular_drug_resistance/\"\u003eDrug Resistance Analysis\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e10.7554/eLife.77433\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eArvind Ramanathan, Argonne Lab\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhfundational_models_of_interaction_analysis/\"\u003eModels of Interaction Analysis\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e?, see \u003ca href=\"/posts/kbhgenslms/\"\u003eGenSLMs\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eJason K Perry, Gilead\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhstructure_of_covid_replication/\"\u003eStructure of COVID Replication\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\u003cstrong\u003eProtein Structure and Biophysics\u003c/strong\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eErik Yukl\u003c/th\u003e\n\u003cth\u003e\u003ca href=\"/posts/kbhzinc_abc_transporters/\"\u003eZinc ABC Transporters\u003c/a\u003e\u003c/th\u003e\n\u003cth\u003e?\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eWentao Li\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhcalpains_afib/\"\u003eCalpains \u0026hellip; AFIB\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\u003cstrong\u003eDrug Design w/ AI\u003c/strong\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eAlpha Lee, PostEra\u003c/th\u003e\n\u003cth\u003e\u003ca href=\"/posts/kbhml_drug_discovery/\"\u003eML COVID Drug Discovery\u003c/a\u003e\u003c/th\u003e\n\u003cth\u003e10.1101/2020.10.29.339317\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eDavid Baker, UW\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhde_novo_protein_design/\"\u003eDe Novo Protein Design\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhrfdiffusion/\"\u003eRFDiffusion\u003c/a\u003e, \u003ca href=\"/posts/kbhrosettafold2/\"\u003eRoseTTAFold2\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eRelay Therapeutics\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhactive_learning_molecule_iteration/\"\u003eActive Learning Molecule Iteration\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eRommie E Amary, UCSD\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhimmunogen_design/\"\u003eImmunogen Design\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"things-to-google\"\u003eThings to Google\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiffusion_map/\"\u003ediffusion map\u003c/a\u003es and\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.charmm-gui.org/\"\u003ehttps://www.charmm-gui.org/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgenslms/\"\u003eGenSLMs\u003c/a\u003e: LLMs, but genome sequence\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrepresentation_learning/\"\u003erepresentation learning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrfdiffusion/\"\u003eRFDiffusion\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;quantum chemistry\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.biosolveit.de/infiniSee\"\u003ehttps://www.biosolveit.de/infiniSee\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewhat tool do these people use to make these?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhasbmb2023_index/","tags":null,"title":"ASBMB2023 Index"},{"categories":null,"contents":"ASCII represents each char as an integer, its \u0026ldquo;ascii value\u0026rdquo;.\nUppercase letters are sequentially numbered Lowercase letters are sequentially numbered Digits are sequentially numbered Lowercase letters are 32 more than their uppercases (which means its a single bit flip) char upper = \u0026#39;A\u0026#39;; // 65 char lower = \u0026#39;a\u0026#39;; // 97 char zero = \u0026#39;0\u0026#39;; // 48 ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhascii/\"\u003eASCII\u003c/a\u003e represents each \u003ca href=\"/posts/kbhchar/\"\u003echar\u003c/a\u003e as an \u003ca href=\"/posts/kbhinteger/\"\u003einteger\u003c/a\u003e, its \u0026ldquo;ascii value\u0026rdquo;.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eUppercase letters are sequentially numbered\u003c/li\u003e\n\u003cli\u003eLowercase letters are sequentially numbered\u003c/li\u003e\n\u003cli\u003eDigits are sequentially numbered\u003c/li\u003e\n\u003cli\u003eLowercase letters are 32 more than their uppercases (which means its a single bit flip)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eupper\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#39;A\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// 65\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elower\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#39;a\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// 97\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ezero\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#39;0\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// 48\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhascii/","tags":null,"title":"ASCII"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhasee_prism/","tags":null,"title":"ASEE Prism"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhasip/","tags":null,"title":"ASIP"},{"categories":null,"contents":"ASR are tech that helps make transcripts from speech\n","html":"\u003cp\u003eASR are tech that helps make transcripts from speech\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhasr/","tags":null,"title":"ASR"},{"categories":null,"contents":"After a brief survey of current literature, it appears that no standardized benchmarks for ASR on clinical data exists that are widely used. Given the vast resources available from the TalkBank corpus, it is not infeasible to build such a corpus and evaluate the performance of a few commercial ASR systems in its ability to perform such a task.\nDespite there not being a single baseline that works to benchmark ASR on clinical datasets, a few different subsets of efforts exists on each component of this front.\nEvaluation Datasets As perhaps an exemplar to the lack of standardization in ASR performance evaluation, the Whisper ASR model ((NO_ITEM_DATA:radford2022robust)) was not evaluated on one particular benchmark but instead a series of multi-domain benchmarks.\nThis is perhaps for good reason, recent results (discuses below) show that single-domain benchmarks do not describe performance well across other domains, or even other usage methods. Therefore, the battery of different tests done by (Radford et al. 2022) could be essentially thought of as a single battery of multi-usage tests that covers a good span of recent-standard ASR performance tests; among them:\nStandard Datasets CORAAL: a dataset of high-quality lower-fidelity recordings of African-American vernacular of varying degrees in conversation (incl. cross talk, etc.) ((Farrington and Kendall 2021)) EARNINGS: a set of benchmark datasets of earnings calls within various financial industries ((Rio et al. 2021)) TED-LIUM 3: a dataset of high-fidelity recordings of full-length TED talks ((Hernandez et al. 2018)) In addition to the three evaluation datasets provided, the entire model was also trained on (Panayotov et al. 2015), a gold-standard corpus of open-source high-fidelity recordings of audiobooks.\n\u0026ldquo;Home brew\u0026rdquo; Benchmarks In addition to the three published, standard datasets above, Radford et al. also used a series of self-selected datasets of varying quality.\nRev16: Rev.AI\u0026rsquo;s clean podcast transcription dataset https://www.rev.ai/blog/podcast-transcription-benchmark-part-1/ Meanwhile: Recordings of Stephen Colbert\u0026rsquo;s Meanwhile segments Kinkaid46: \u0026hellip;.apparently a selection of YouTube videos from this guy\u0026rsquo;s blog post: https://medium.com/descript/which-automatic-transcription-service-is-the-most-accurate-2018-2e859b23ed19 Evaluation Metrics Most benchmarks still report results in terms of word-error-rate (WER) and the standard lexical distance metric of BLEU ((Papineni et al. 2001)). These are two generally well-accepted ways of reporting ASR performance, and for most of the datasets cited above suffice.\nHowever, some very recent results ((Shor et al. 2023)) indicate that BLEU and WER themselves do not capture a good view for what would be clinically relevant data. Some ASR mistakes (such as that on the investigator, or that which doesn\u0026rsquo;t relate to the disfluency being observed) matter a lot less than others (errors on the participant, esp. missing filled pauses, wrongly coded utterances etc.). The work by Shor et al. presents also an alternative metric to quantify such errors: essentially training a BERT model ((Devlin et al. 2018)) to perform the classification task of \u0026ldquo;clinician preference\u0026rdquo; (i.e. \u0026ldquo;predict which of these errors would be less problematic to a clinician\u0026rdquo;), then using the results of that model to evaluate the ASR performance.\nThis last method is likely overkill. However, it is useful to discuss if richer information\u0026ndash;such as special binning for missing clinically significant markers, like filled pauses\u0026mdash;in addition to simple BLEU and WER will be useful as we develop our own benchmarks.\nDiscussion (Szymański et al. 2020) (sec. 3, \u0026ldquo;Call to Action) offers some guidance with regards to the design of robust ASR benchmarks. Among which:\nHigher quality annotations, like morphology information we provide with %mor, to help aid language model training Broader range of human diects and variations covered Performance across many recording domains (various processing of audio signals, properties of the signal itself, etc.) Though TalkBank contains a wealth of data, individual corpuses often have little variation, which Szymańki et. al. shows cause degraded performance. Therefore, it is useful to create a benchmark that strives across multiple problem domains and recording schemes to be able to provide a reproducible and more accurate benchmark of a given model.\nSzymańki et. al. also brought up another issue through their paper: \u0026ldquo;due to legal constraints \u0026hellip; we are not able to provide the community with neither the benchmark data nor the detailed information about evaluated systems.\u0026rdquo; ((Szymański et al. 2020), section 2.) The anonymization of benchmarked models seen in both Szymańki et. al. and Radford et. al. may point to a certain legal barrier in specifically benchmarking existing, commercial ASR models.\nDevlin, Jacob, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. “Bert: Pre-Training of Deep Bidirectional Transformers for Language Understanding.” arXiv Preprint arXiv:1810.04805. Farrington, Charlie, and Tyler Kendall. 2021. “The Corpus of Regional African American Language.” doi:10.7264/1AD5-6T35. Hernandez, François, Vincent Nguyen, Sahar Ghannay, Natalia Tomashenko, and Yannick Estève. 2018. “TED-LIUM 3: Twice as Much Data and Corpus Repartition for Experiments on Speaker Adaptation,” 11096:198–208. doi:10.1007/978-3-319-99579-3_21. Panayotov, Vassil, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. 2015. “Librispeech: An ASR Corpus Based on Public Domain Audio Books.” In 2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 5206–10. South Brisbane, Queensland, Australia: IEEE. doi:10.1109/ICASSP.2015.7178964. Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2001. “BLEU: A Method for Automatic Evaluation of Machine Translation.” In Proceedings of the 40th Annual Meeting on Association for Computational Linguistics - ACL ’02, 311. Philadelphia, Pennsylvania: Association for Computational Linguistics. doi:10.3115/1073083.1073135. Radford, Alec, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. 2022. “Robust Speech Recognition via Large-Scale Weak Supervision.” arXiv Preprint arXiv:2212.04356. Rio, M., Natalie Delworth, R. Westerman, Michelle Huang, Nishchal Bhandari, Joseph Palakapilly, Quinten McNamara, Joshua Dong, Piotr Żelasko, and Miguel Jette. 2021. “Earnings-21: A Practical Benchmark for ASR in the Wild.” ArXiv. doi:10.21437/Interspeech.2021-1915. Shor, Joel, Ruyue Agnes Bi, Subhashini Venugopalan, Steven Ibara, Roman Goldenberg, and Ehud Rivlin. 2023. “Clinical BERTScore: An Improved Measure of Automatic Speech Recognition Performance in Clinical Settings.” arXiv. http://arxiv.org/abs/2303.05737. Szymański, Piotr, Piotr Żelasko, Mikolaj Morzy, Adrian Szymczak, Marzena Żyła-Hoppe, Joanna Banaszczak, Lukasz Augustyniak, Jan Mizgajski, and Yishay Carmiel. 2020. “WER We Are and WER We Think We Are.” Findings of the Association for Computational Linguistics: EMNLP 2020, 3290–95. doi:10.18653/v1/2020.findings-emnlp.295. NO_ITEM_DATA:radford2022robust) ","html":"\u003cp\u003eAfter a brief survey of current literature, it appears that no standardized benchmarks for ASR on clinical data exists that are widely used. Given the vast resources available from the TalkBank corpus, it is not infeasible to build such a corpus and evaluate the performance of a few commercial ASR systems in its ability to perform such a task.\u003c/p\u003e\n\u003cp\u003eDespite there not being a single baseline that works to benchmark ASR on clinical datasets, a few different subsets of efforts exists on each component of this front.\u003c/p\u003e\n\u003ch2 id=\"evaluation-datasets\"\u003eEvaluation Datasets\u003c/h2\u003e\n\u003cp\u003eAs perhaps an exemplar to the lack of standardization in ASR performance evaluation, the Whisper ASR model ((NO_ITEM_DATA:radford2022robust)) was not evaluated on one particular benchmark but instead a series of multi-domain benchmarks.\u003c/p\u003e\n\u003cp\u003eThis is perhaps for good reason, recent results (discuses below) show that single-domain benchmarks do not describe performance well across other domains, or even other usage methods. Therefore, the battery of different tests done by (\u003ca href=\"#citeproc_bib_item_6\"\u003eRadford et al. 2022\u003c/a\u003e) could be essentially thought of as a single battery of multi-usage tests that covers a good span of recent-standard ASR performance tests; among them:\u003c/p\u003e\n\u003ch3 id=\"standard-datasets\"\u003eStandard Datasets\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCORAAL: a dataset of high-quality \u003cstrong\u003elower-fidelity recordings\u003c/strong\u003e of African-American vernacular of varying degrees in conversation (incl. cross talk, etc.) ((\u003ca href=\"#citeproc_bib_item_2\"\u003eFarrington and Kendall 2021\u003c/a\u003e))\u003c/li\u003e\n\u003cli\u003eEARNINGS: a set of benchmark datasets of \u003cstrong\u003eearnings calls\u003c/strong\u003e within various financial industries ((\u003ca href=\"#citeproc_bib_item_7\"\u003eRio et al. 2021\u003c/a\u003e))\u003c/li\u003e\n\u003cli\u003eTED-LIUM 3: a dataset of \u003cstrong\u003ehigh-fidelity recordings\u003c/strong\u003e of full-length TED talks ((\u003ca href=\"#citeproc_bib_item_3\"\u003eHernandez et al. 2018\u003c/a\u003e))\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIn addition to the three evaluation datasets provided, the entire model was also trained on (\u003ca href=\"#citeproc_bib_item_4\"\u003ePanayotov et al. 2015\u003c/a\u003e), a gold-standard corpus of open-source \u003cstrong\u003ehigh-fidelity recordings\u003c/strong\u003e of audiobooks.\u003c/p\u003e\n\u003ch3 id=\"home-brew-benchmarks\"\u003e\u0026ldquo;Home brew\u0026rdquo; Benchmarks\u003c/h3\u003e\n\u003cp\u003eIn addition to the three published, standard datasets above, Radford et al. also used a series of self-selected datasets of varying quality.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eRev16: Rev.AI\u0026rsquo;s \u003cstrong\u003eclean podcast\u003c/strong\u003e transcription dataset \u003ca href=\"https://www.rev.ai/blog/podcast-transcription-benchmark-part-1/\"\u003ehttps://www.rev.ai/blog/podcast-transcription-benchmark-part-1/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eMeanwhile: Recordings of Stephen Colbert\u0026rsquo;s Meanwhile segments\u003c/li\u003e\n\u003cli\u003eKinkaid46: \u0026hellip;.apparently a selection of YouTube videos from this guy\u0026rsquo;s blog post: \u003ca href=\"https://medium.com/descript/which-automatic-transcription-service-is-the-most-accurate-2018-2e859b23ed19\"\u003ehttps://medium.com/descript/which-automatic-transcription-service-is-the-most-accurate-2018-2e859b23ed19\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"evaluation-metrics\"\u003eEvaluation Metrics\u003c/h2\u003e\n\u003cp\u003eMost benchmarks still report results in terms of word-error-rate (WER) and the standard lexical distance metric of BLEU ((\u003ca href=\"#citeproc_bib_item_5\"\u003ePapineni et al. 2001\u003c/a\u003e)). These are two generally well-accepted ways of reporting ASR performance, and for most of the datasets cited above suffice.\u003c/p\u003e\n\u003cp\u003eHowever, some very recent results ((\u003ca href=\"#citeproc_bib_item_8\"\u003eShor et al. 2023\u003c/a\u003e)) indicate that BLEU and WER themselves do not capture a good view for what would be clinically relevant data. Some ASR mistakes (such as that on the investigator, or that which doesn\u0026rsquo;t relate to the disfluency being observed) matter a lot less than others (errors on the participant, esp. missing filled pauses, wrongly coded utterances etc.). The work by Shor et al. presents also an alternative metric to quantify such errors: essentially training a BERT model ((\u003ca href=\"#citeproc_bib_item_1\"\u003eDevlin et al. 2018\u003c/a\u003e)) to perform the classification task of \u0026ldquo;clinician preference\u0026rdquo; (i.e. \u0026ldquo;predict which of these errors would be less problematic to a clinician\u0026rdquo;), then using the results of that model to evaluate the ASR performance.\u003c/p\u003e\n\u003cp\u003eThis last method is likely overkill. However, it is useful to discuss if richer information\u0026ndash;such as special binning for missing clinically significant markers, like filled pauses\u0026mdash;in addition to simple BLEU and WER will be useful as we develop our own benchmarks.\u003c/p\u003e\n\u003ch2 id=\"discussion\"\u003eDiscussion\u003c/h2\u003e\n\u003cp\u003e(\u003ca href=\"#citeproc_bib_item_9\"\u003eSzymański et al. 2020\u003c/a\u003e) (sec. 3, \u0026ldquo;Call to Action) offers some guidance with regards to the design of robust ASR benchmarks. Among which:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eHigher quality annotations, like morphology information we provide with %mor, to help aid language model training\u003c/li\u003e\n\u003cli\u003eBroader range of human diects and variations covered\u003c/li\u003e\n\u003cli\u003ePerformance across many recording domains (various processing of audio signals, properties of the signal itself, etc.)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThough TalkBank contains a wealth of data, individual corpuses often have little variation, which Szymańki et. al. shows cause degraded performance. Therefore, it is useful to create a benchmark that strives across multiple problem domains and recording schemes to be able to provide a reproducible and more accurate benchmark of a given model.\u003c/p\u003e\n\u003cp\u003eSzymańki et. al. also brought up another issue through their paper: \u0026ldquo;due to legal constraints \u0026hellip; we are not able to provide the community with neither the benchmark data nor the detailed information about evaluated systems.\u0026rdquo; ((\u003ca href=\"#citeproc_bib_item_9\"\u003eSzymański et al. 2020\u003c/a\u003e), section 2.) The anonymization of benchmarked models seen in both Szymańki et. al. and Radford et. al. may point to a certain legal barrier in specifically benchmarking existing, commercial ASR models.\u003c/p\u003e\n\u003cstyle\u003e.csl-entry{text-indent: -1.5em; margin-left: 1.5em;}\u003c/style\u003e\u003cdiv class=\"csl-bib-body\"\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_1\"\u003e\u003c/a\u003eDevlin, Jacob, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. “Bert: Pre-Training of Deep Bidirectional Transformers for Language Understanding.” \u003ci\u003earXiv Preprint arXiv:1810.04805\u003c/i\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_2\"\u003e\u003c/a\u003eFarrington, Charlie, and Tyler Kendall. 2021. “The Corpus of Regional African American Language.” doi:\u003ca href=\"https://doi.org/10.7264/1AD5-6T35\"\u003e10.7264/1AD5-6T35\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_3\"\u003e\u003c/a\u003eHernandez, François, Vincent Nguyen, Sahar Ghannay, Natalia Tomashenko, and Yannick Estève. 2018. “TED-LIUM 3: Twice as Much Data and Corpus Repartition for Experiments on Speaker Adaptation,” 11096:198–208. doi:\u003ca href=\"https://doi.org/10.1007/978-3-319-99579-3_21\"\u003e10.1007/978-3-319-99579-3_21\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_4\"\u003e\u003c/a\u003ePanayotov, Vassil, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. 2015. “Librispeech: An ASR Corpus Based on Public Domain Audio Books.” In \u003ci\u003e2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)\u003c/i\u003e, 5206–10. South Brisbane, Queensland, Australia: IEEE. doi:\u003ca href=\"https://doi.org/10.1109/ICASSP.2015.7178964\"\u003e10.1109/ICASSP.2015.7178964\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_5\"\u003e\u003c/a\u003ePapineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2001. “BLEU: A Method for Automatic Evaluation of Machine Translation.” In \u003ci\u003eProceedings of the 40th Annual Meeting on Association for Computational Linguistics - ACL ’02\u003c/i\u003e, 311. Philadelphia, Pennsylvania: Association for Computational Linguistics. doi:\u003ca href=\"https://doi.org/10.3115/1073083.1073135\"\u003e10.3115/1073083.1073135\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_6\"\u003e\u003c/a\u003eRadford, Alec, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. 2022. “Robust Speech Recognition via Large-Scale Weak Supervision.” \u003ci\u003earXiv Preprint arXiv:2212.04356\u003c/i\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_7\"\u003e\u003c/a\u003eRio, M., Natalie Delworth, R. Westerman, Michelle Huang, Nishchal Bhandari, Joseph Palakapilly, Quinten McNamara, Joshua Dong, Piotr Żelasko, and Miguel Jette. 2021. “Earnings-21: A Practical Benchmark for ASR in the Wild.” \u003ci\u003eArXiv\u003c/i\u003e. doi:\u003ca href=\"https://doi.org/10.21437/Interspeech.2021-1915\"\u003e10.21437/Interspeech.2021-1915\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_8\"\u003e\u003c/a\u003eShor, Joel, Ruyue Agnes Bi, Subhashini Venugopalan, Steven Ibara, Roman Goldenberg, and Ehud Rivlin. 2023. “Clinical BERTScore: An Improved Measure of Automatic Speech Recognition Performance in Clinical Settings.” arXiv. \u003ca href=\"http://arxiv.org/abs/2303.05737\"\u003ehttp://arxiv.org/abs/2303.05737\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_9\"\u003e\u003c/a\u003eSzymański, Piotr, Piotr Żelasko, Mikolaj Morzy, Adrian Szymczak, Marzena Żyła-Hoppe, Joanna Banaszczak, Lukasz Augustyniak, Jan Mizgajski, and Yishay Carmiel. 2020. “WER We Are and WER We Think We Are.” \u003ci\u003eFindings of the Association for Computational Linguistics: EMNLP 2020\u003c/i\u003e, 3290–95. doi:\u003ca href=\"https://doi.org/10.18653/v1/2020.findings-emnlp.295\"\u003e10.18653/v1/2020.findings-emnlp.295\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003eNO_ITEM_DATA:radford2022robust)\u003c/div\u003e\n\u003c/div\u003e\n","permalink":"https://www.jemoka.com/posts/kbhasr_disordered_speech/","tags":null,"title":"ASR on Disordered Speech"},{"categories":null,"contents":"associative means that operations can be grouped in any way as long as order is preserved.\nThat is:\n\\begin{equation} (AB)C = A(BC) \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e means that \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003es can be grouped in any way as long as order is preserved.\u003c/p\u003e\n\u003cp\u003eThat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(AB)C = A(BC)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhassociative/","tags":null,"title":"associative"},{"categories":null,"contents":"~ Given functions \\(f(n)\\) and \\(g(n)\\), if:\n\\begin{equation} \\lim_{n\\to \\infty} \\left(\\frac{f(n)}{g(n)}\\right) = 1 \\end{equation}\nwe say that \\(f \\sim g\\).\nThat \u0026ndash; the relationship between \\(f\\) and \\(g\\) grows in a similar fashion as \\(n\\) increases. For instance:\n\\(f(n) = n+1\\) \\(g(n) = n+2\\) Therefore:\n\\begin{equation} f\\sim g = \\lim_{n\\to \\infty} \\frac{f(n)}{g(n)} = \\lim_{n\\to \\infty} \\frac{n+1}{n+2} = 1 \\end{equation}\nThe \\(\\sim\\) operator is commutative (\\(f \\sim g \\Rightarrow g\\sim f\\)) and transitive (\\(f\\sim g, g\\sim h \\Rightarrow f \\sim h\\)).\no(n) Given two functions \\(f(n)\\), \\(g(n)\\), if their relationship shows:\n\\begin{equation} \\lim_{n \\to \\infty} \\frac{f(n)}{g(n)} = 0 \\end{equation}\nwe can write it as\n\\begin{equation} f = o(g) \\end{equation}\nThis tells us that if \\(n\\) becomes very large, \\(g\\) becomes much larger than \\(f\\). \\(f\\) does not grow nearly as fast as \\(g\\).\nThe operation is not commutative, but is transitive (\\(f = o(g), g = o(h) \\Rightarrow f = o(h)\\))\nO(n) Given two functions \\(f(n)\\), \\(g(n)\\).\n\\begin{equation} \\lim_{n \\to \\infty} \\frac{f(n)}{g(n)} \u0026lt; \\infty \\end{equation}\nthat the relationship between \\(f(n)\\) and \\(g(n)\\) is countable as \\(n\\) trends to infinity.\nWe can also say that, given \\(n\\), \\(n_0\\), and some \\(c\\) which \\(\\forall n, n \u0026gt; n_0\\), there is:\n\\begin{equation} |f(n)| \u0026lt; |cg(n)| \\end{equation}\nThis tells us that \\(f(n)\\) does not grow much much faster than \\(g(n)\\).\nTherefore:\nIf \\(f \\sim g\\), \\(f = O(g)\\) (as they grow together, \\(f\\) is not much faster) If \\(f = o(g)\\), \\(f=O(g)\\) (as \\(f\\) does not grow at all, \\(f\\) is not faster) \\(\\theta\\)(n) \\(f=\\theta(g)\\) IFF \\(f=O(g)\\) and \\(g=O(f)\\), its essentially \\(\\sim\\) but without the strict requirement of a 1:1 ratio.\n\\(\\omega\\)(n) and \\(\\Omega\\)(n) The inverses of \\(O\\) and \\(o\\):\n\\(f(n) = O(g(n)) \\Rightarrow g(n) = \\omega(f(n))\\) \\(f(n) = o(g(n)) \\Rightarrow g(n) = \\Omega(f(n))\\) ","html":"\u003ch2 id=\"4c761f\"\u003e~\u003c/h2\u003e\n\u003cp\u003eGiven functions \\(f(n)\\) and \\(g(n)\\), if:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lim_{n\\to \\infty} \\left(\\frac{f(n)}{g(n)}\\right) = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe say that \\(f \\sim g\\).\u003c/p\u003e\n\u003cp\u003eThat \u0026ndash; the relationship between \\(f\\) and \\(g\\) grows in a similar fashion as \\(n\\) increases. For instance:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(f(n) = n+1\\)\u003c/li\u003e\n\u003cli\u003e\\(g(n) = n+2\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTherefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf\\sim g = \\lim_{n\\to \\infty} \\frac{f(n)}{g(n)} = \\lim_{n\\to \\infty} \\frac{n+1}{n+2} = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe \\(\\sim\\) operator is \u003cem\u003ecommutative\u003c/em\u003e (\\(f \\sim g \\Rightarrow g\\sim f\\)) and \u003cem\u003etransitive\u003c/em\u003e (\\(f\\sim g, g\\sim h \\Rightarrow f \\sim h\\)).\u003c/p\u003e\n\u003ch2 id=\"o--n\"\u003eo(n)\u003c/h2\u003e\n\u003cp\u003eGiven two functions \\(f(n)\\), \\(g(n)\\), if their relationship shows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lim_{n \\to \\infty} \\frac{f(n)}{g(n)} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can write it as\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf = o(g)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis tells us that if \\(n\\) becomes very large, \\(g\\) becomes much larger than \\(f\\). \\(f\\) does not grow nearly as fast as \\(g\\).\u003c/p\u003e\n\u003cp\u003eThe operation is \u003cem\u003enot\u003c/em\u003e commutative, but is \u003cem\u003etransitive\u003c/em\u003e (\\(f = o(g), g = o(h) \\Rightarrow f = o(h)\\))\u003c/p\u003e\n\u003ch2 id=\"o--n\"\u003eO(n)\u003c/h2\u003e\n\u003cp\u003eGiven two functions \\(f(n)\\), \\(g(n)\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lim_{n \\to \\infty} \\frac{f(n)}{g(n)} \u0026lt; \\infty\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat the relationship between \\(f(n)\\) and \\(g(n)\\) is countable as \\(n\\) trends to infinity.\u003c/p\u003e\n\u003cp\u003eWe can also say that, given \\(n\\), \\(n_0\\), and some \\(c\\) which \\(\\forall n, n \u0026gt; n_0\\), there is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|f(n)| \u0026lt; |cg(n)|\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis tells us that \\(f(n)\\) does not grow much much faster than \\(g(n)\\).\u003c/p\u003e\n\u003cp\u003eTherefore:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eIf \\(f \\sim g\\), \\(f = O(g)\\) (as they grow together, \\(f\\) is not much faster)\u003c/li\u003e\n\u003cli\u003eIf \\(f = o(g)\\), \\(f=O(g)\\) (as \\(f\\) does not grow at all, \\(f\\) is not faster)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"theta--n\"\u003e\\(\\theta\\)(n)\u003c/h2\u003e\n\u003cp\u003e\\(f=\\theta(g)\\) IFF \\(f=O(g)\\) and \\(g=O(f)\\), its essentially \\(\\sim\\) but without the strict requirement of a 1:1 ratio.\u003c/p\u003e\n\u003ch2 id=\"omega--n--and-omega--n\"\u003e\\(\\omega\\)(n) and \\(\\Omega\\)(n)\u003c/h2\u003e\n\u003cp\u003eThe inverses of \\(O\\) and \\(o\\):\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(f(n) = O(g(n)) \\Rightarrow g(n) = \\omega(f(n))\\)\u003c/li\u003e\n\u003cli\u003e\\(f(n) = o(g(n)) \\Rightarrow g(n) = \\Omega(f(n))\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhasymtotic_analysis/","tags":null,"title":"asymtotic analysis"},{"categories":null,"contents":"You can use atoms as many different types of qubits.\nmanipulating physical qubits To make physical qubits go to different states, we will again use something in the ancillary states. Rotating it to \\(z\\) \u0026mdash; leverage one lazer to make it fall; \\(rx\\), \\(ry\\), we leverage combinations of two light.\nvarious qubit implementations Implementations of physical qubits\nType Superconductor Ions Atoms Company Google, IBM, Rigetti IonQ, Honeywell Atom Computing, QuEra Nature Artifical Natural Natural Calibration Individual calibration Naturally calibrated Naturally calibrated Coherence Time Short Long Long Connectivity Adjacent connectivity All-to-all More than adjacent Scalability Compatible with existing tech Not easily scalable Potentially scalable Speed Fast gates Kinda fast Untested possible uses for qubits Here are some possible uses for physical qubits\nTraveling salesman Research + simulations Cryptography ","html":"\u003cp\u003eYou can use atoms as many different types of \u003ca href=\"/posts/kbhqubits/\"\u003equbits\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"manipulating-physical-qubits\"\u003emanipulating physical qubits\u003c/h2\u003e\n\u003cp\u003eTo make \u003ca href=\"/posts/kbhphysical_qubits/\"\u003ephysical qubits\u003c/a\u003e go to different states, we will again use something in the ancillary states. Rotating it to \\(z\\) \u0026mdash; leverage one lazer to make it fall; \\(rx\\), \\(ry\\), we leverage combinations of two light.\u003c/p\u003e\n\u003ch2 id=\"various-qubit-implementations\"\u003evarious qubit implementations\u003c/h2\u003e\n\u003cp\u003eImplementations of \u003ca href=\"/posts/kbhphysical_qubits/\"\u003ephysical qubits\u003c/a\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eType\u003c/th\u003e\n\u003cth\u003eSuperconductor\u003c/th\u003e\n\u003cth\u003eIons\u003c/th\u003e\n\u003cth\u003eAtoms\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eCompany\u003c/td\u003e\n\u003ctd\u003eGoogle, IBM, Rigetti\u003c/td\u003e\n\u003ctd\u003eIonQ, Honeywell\u003c/td\u003e\n\u003ctd\u003eAtom Computing, QuEra\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eNature\u003c/td\u003e\n\u003ctd\u003eArtifical\u003c/td\u003e\n\u003ctd\u003eNatural\u003c/td\u003e\n\u003ctd\u003eNatural\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eCalibration\u003c/td\u003e\n\u003ctd\u003eIndividual calibration\u003c/td\u003e\n\u003ctd\u003eNaturally calibrated\u003c/td\u003e\n\u003ctd\u003eNaturally calibrated\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhcoherence_time/\"\u003eCoherence Time\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003eShort\u003c/td\u003e\n\u003ctd\u003eLong\u003c/td\u003e\n\u003ctd\u003eLong\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eConnectivity\u003c/td\u003e\n\u003ctd\u003eAdjacent connectivity\u003c/td\u003e\n\u003ctd\u003eAll-to-all\u003c/td\u003e\n\u003ctd\u003eMore than adjacent\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eScalability\u003c/td\u003e\n\u003ctd\u003eCompatible with existing tech\u003c/td\u003e\n\u003ctd\u003eNot easily scalable\u003c/td\u003e\n\u003ctd\u003ePotentially scalable\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eSpeed\u003c/td\u003e\n\u003ctd\u003eFast gates\u003c/td\u003e\n\u003ctd\u003eKinda fast\u003c/td\u003e\n\u003ctd\u003eUntested\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"possible-uses-for-qubits\"\u003epossible uses for qubits\u003c/h2\u003e\n\u003cp\u003eHere are some possible uses for \u003ca href=\"/posts/kbhphysical_qubits/\"\u003ephysical qubits\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eTraveling salesman\u003c/li\u003e\n\u003cli\u003eResearch + simulations\u003c/li\u003e\n\u003cli\u003eCryptography\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhatoms_as_qubits/","tags":null,"title":"atoms as qubits"},{"categories":null,"contents":"AFIB is a heart conditinos, which is augmented during heart surgery.\n\u0026ldquo;Endogeneous extrolluculor proteases damage to Kv1.5 in the atria contributes to AFIB\u0026rdquo;\nWBC\u0026rsquo;s secretion of proteases, such as calpain when it is inflamed ","html":"\u003cp\u003eAFIB is a heart conditinos, which is augmented during heart surgery.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Endogeneous extrolluculor proteases damage to Kv1.5 in the atria contributes to AFIB\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eWBC\u0026rsquo;s secretion of proteases, such as calpain when it is inflamed\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhafib/","tags":null,"title":"Atrial Fibrillation"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhauthoritarianism/","tags":null,"title":"Authoritarianism"},{"categories":null,"contents":"autism is a spectrum disorder that are caused by both environmental and genetic factors.\nKey Question: how can different chromatin regulators lead to the same set of symptoms named \u0026ldquo;autism\u0026rdquo;.\nautism gene signature The gene signature of autism can be measured in clean and quantitative assays.\n","html":"\u003cp\u003eautism is a spectrum disorder that are caused by both environmental and genetic factors.\u003c/p\u003e\n\u003cp\u003eKey Question: how can different \u003ca href=\"/posts/kbhchromatin/\"\u003echromatin\u003c/a\u003e regulators lead to the same set of symptoms named \u0026ldquo;\u003ca href=\"/posts/kbhautism/\"\u003eautism\u003c/a\u003e\u0026rdquo;.\u003c/p\u003e\n\u003ch2 id=\"autism--kbhautism-dot-md--gene-signature\"\u003e\u003ca href=\"/posts/kbhautism/\"\u003eautism\u003c/a\u003e gene signature\u003c/h2\u003e\n\u003cp\u003eThe gene signature of autism can be measured in clean and quantitative assays.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhautism/","tags":null,"title":"autism"},{"categories":null,"contents":"an First Order ODE is \u0026ldquo;autonomous\u0026rdquo; when:\n\\begin{equation} y\u0026rsquo; = f(y) \\end{equation}\nfor some \\(f\\) of one variables. Meaning, it only depends on the independent variable \\(t\\) through the use of \\(y(t)\\) in context.\nThis is a special class of seperable diffequ.\nautonomous ODEs level off at stationary curves for autonomous ODEs can never level off at non-stationary points. Otherwise, that would be a stationary point.\nSee stability (ODEs)\ntime-invariant expressions For forms by which:\n\\begin{equation} y\u0026rsquo; = f(y) \\end{equation}\nas in, the expression is time invariant.\n","html":"\u003cp\u003ean \u003ca href=\"/posts/kbhfirst_order_odes/\"\u003eFirst Order ODE\u003c/a\u003e is \u0026ldquo;autonomous\u0026rdquo; when:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = f(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some \\(f\\) of one variables. Meaning, it only depends on the independent variable \\(t\\) through the use of \\(y(t)\\) in context.\u003c/p\u003e\n\u003cp\u003eThis is a special class of \u003ca href=\"/posts/kbhseperable_diffequ/\"\u003eseperable diffequ\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"autonomous-odes-level-off-at-stationary-curves\"\u003eautonomous ODEs level off at stationary curves\u003c/h2\u003e\n\u003cp\u003efor autonomous ODEs can never level off at non-stationary points. Otherwise, that would be a stationary point.\u003c/p\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhstability/\"\u003estability (ODEs)\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"time-invariant-expressions\"\u003etime-invariant expressions\u003c/h2\u003e\n\u003cp\u003eFor forms by which:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = f(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas in, the expression is \u003cstrong\u003etime invariant\u003c/strong\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhautonomous_odes/","tags":null,"title":"autonomous ODEs"},{"categories":null,"contents":"Key sequence In this chapter, we defined complex numbers, their definition, their closeness under addition and multiplication, and their properties These properties make them a field: namely, they have, associativity, commutativity, identities, inverses, and distribution. notably, they are different from a group by having 1) two operations 2) additionally, commutativity and distributivity. We then defined \\(\\mathbb{F}^n\\), defined addition, additive inverse, and zero. These combined (with some algebra) shows that \\(\\mathbb{F}^n\\) under addition is a commutative group. Lastly, we show that there is this magical thing called scalar multiplication in \\(\\mathbb{F}^n\\) and that its associative, distributive, and has an identity. Technically scalar multiplication in \\(\\mathbb{F}^n\\) commutes too but extremely wonkily so we don\u0026rsquo;t really think about it. New Definitions complex number addition and multiplication of complex numbers subtraction and division of complex numbers field: \\(\\mathbb{F}\\) is \\(\\mathbb{R}\\) or \\(\\mathbb{C}\\) power list \\(\\mathbb{F}^n\\): F^n coordinate addition in \\(\\mathbb{F}^n\\) additive inverse of \\(\\mathbb{F}^n\\) \\(0\\): zero scalar multiplication in \\(\\mathbb{F}^n\\) Results and Their Proofs properties of complex arithmetic commutativity associativity identities additive inverse multiplicative inverse distributive property properties of \\(\\mathbb{F}^n\\) addition in \\(\\mathbb{F}^n\\) is associative addition in \\(\\mathbb{F}^n\\) is commutative addition in \\(\\mathbb{F}^n\\) has an identity (zero) addition in \\(\\mathbb{F}^n\\) has an inverse scalar multiplication in \\(\\mathbb{F}^n\\) is associative scalar multiplication in \\(\\mathbb{F}^n\\) has an identity (one) scalar multiplication in \\(\\mathbb{F}^n\\) is distributive Question for Jana No demonstration in exercises or book that scalar multiplication is commutative, why? Interesting Factoids You can take a field, look at an operation, and take that (minus the other op\u0026rsquo;s identity), and call it a group (groups (vector spaces (fields ))) ","html":"\u003ch2 id=\"key-sequence\"\u003eKey sequence\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eIn this chapter, we defined \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003es, their definition, their closeness under \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e and \u003ca href=\"/posts/kbhmultiplying/\"\u003emultiplication\u003c/a\u003e, and their \u003ca href=\"/posts/kbhcomplex_number/#requirements\"\u003eproperties\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eThese properties make them a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e: namely, they have, \u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e, \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e, \u003ca href=\"/posts/kbhidentity/\"\u003eidentities\u003c/a\u003e, \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003es, and distribution.\u003c/li\u003e\n\u003cli\u003enotably, they are different from a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e by having 1) two operations 2) additionally, \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e and distributivity. We then defined \\(\\mathbb{F}^n\\), defined \u003ca href=\"/posts/kbhlists_over_fields/#addition-in-mathbb-f-n\"\u003eaddition\u003c/a\u003e, \u003ca href=\"/posts/kbhlists_over_fields/#additive-inverse-of-mathbb-f-n\"\u003eadditive inverse\u003c/a\u003e, and \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eThese combined (with some \u003ca href=\"/posts/kbhalgebra/\"\u003ealgebra\u003c/a\u003e) shows that \\(\\mathbb{F}^n\\) under \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e is a \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutative\u003c/a\u003e \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eLastly, we show that there is this magical thing called \u003ca href=\"/posts/kbhlists_over_fields/#scalar-multiplication-in-mathbb-f-n\"\u003escalar multiplication in \\(\\mathbb{F}^n\\)\u003c/a\u003e and that its \u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e, distributive, and has an \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e. Technically \u003ca href=\"/posts/kbhlists_over_fields/#scalar-multiplication-in-mathbb-f-n\"\u003escalar multiplication in \\(\\mathbb{F}^n\\)\u003c/a\u003e commutes too but extremely wonkily so we don\u0026rsquo;t really think about it.\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcomplex_number/#addition-and-multiplication-of-id-7c982e7e-b8be-4053-a71e-fc0dba7a5da5-complex-number-s\"\u003eaddition and multiplication of complex numbers\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcomplex_number/#subtraction-and-division-of-id-7c982e7e-b8be-4053-a71e-fc0dba7a5da5-complex-number-s\"\u003esubtraction and division of complex numbers\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e: \\(\\mathbb{F}\\) is \\(\\mathbb{R}\\) or \\(\\mathbb{C}\\)\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpower_math/\"\u003epower\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(\\mathbb{F}^n\\): \u003ca href=\"/posts/kbhlists_over_fields/\"\u003eF^n\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlists_over_fields/\"\u003ecoordinate\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlists_over_fields/#addition-in-mathbb-f-n\"\u003eaddition in \\(\\mathbb{F}^n\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlists_over_fields/#additive-inverse-of-mathbb-f-n\"\u003eadditive inverse of \\(\\mathbb{F}^n\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(0\\): \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlists_over_fields/#scalar-multiplication-in-mathbb-f-n\"\u003escalar multiplication in \\(\\mathbb{F}^n\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcomplex_number/#requirements\"\u003eproperties of complex arithmetic\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003ecommutativity\u003c/li\u003e\n\u003cli\u003eassociativity\u003c/li\u003e\n\u003cli\u003eidentities\u003c/li\u003e\n\u003cli\u003eadditive inverse\u003c/li\u003e\n\u003cli\u003emultiplicative inverse\u003c/li\u003e\n\u003cli\u003edistributive property\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eproperties of \\(\\mathbb{F}^n\\)\n\u003cul\u003e\n\u003cli\u003eaddition in \\(\\mathbb{F}^n\\) is \u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlists_over_fields/#addition-in-mathbb-f-n-is-commutative\"\u003eaddition in \\(\\mathbb{F}^n\\) is commutative\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e in \\(\\mathbb{F}^n\\) has an \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e (\u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlists_over_fields/#additive-inverse-of-mathbb-f-n\"\u003eaddition in \\(\\mathbb{F}^n\\) has an inverse\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlists_over_fields/#scalar-multiplication-in-mathbb-f-n\"\u003escalar multiplication in \\(\\mathbb{F}^n\\)\u003c/a\u003e is \u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003escalar multiplication in \\(\\mathbb{F}^n\\) has an \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e (one)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlists_over_fields/#scalar-multiplication-in-mathbb-f-n\"\u003escalar multiplication in \\(\\mathbb{F}^n\\)\u003c/a\u003e is distributive\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"question-for-jana\"\u003eQuestion for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cinput disabled=\"\" type=\"checkbox\"\u003e No demonstration in exercises or book that scalar \u003ca href=\"/posts/kbhmultiplying/\"\u003emultiplication\u003c/a\u003e is commutative, why?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eYou can take a field, look at an operation, and take that (minus the other op\u0026rsquo;s identity), and call it a group\u003c/li\u003e\n\u003cli\u003e(groups (vector spaces (fields )))\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_a/","tags":null,"title":"Axler 1.A"},{"categories":null,"contents":"Key Sequence \\(\\mathbb{F}^{n}\\) not being a field kinda sucks, so we made an object called a \u0026ldquo;vector space\u0026rdquo; which essentially does everything a field does except without necessitating a multiplicative inverse Formally, a vector space is closed over addition and have a scalar multiplication. Its addition is commutative, both addition and scalar multiplication is associative, and distributivity holds. There is an additive identity, additive inverse, and multiplicative identity. We defined something called \\(\\mathbb{F}^{S}\\), which is the set of functions from a set \\(S\\) to \\(\\mathbb{F}\\). Turns out, \\(\\mathbb{F}^{S}\\) is a Vector Space Over \\(\\mathbb{F}\\) and we can secretly treat \\(\\mathbb{F}^{n}\\) and \\(\\mathbb{F}^{\\infty}\\) as special cases of \\(\\mathbb{F}^{s}\\). We established that identity and inverse are unique additively in vector spaces. Lastly, we proved some expressions we already know: \\(0v=0\\), \\(-1v=-v\\). New Definitions addition and scalar multiplication vector space and vectors vector space \u0026ldquo;over\u0026rdquo; fields \\(V\\) denotes a vector space over \\(\\mathbb{F}\\) \\(-v\\) is defined as the additive inverse of \\(v \\in V\\) Results and Their Proofs \\(\\mathbb{F}^{\\infty}\\) is a Vector Space over \\(\\mathbb{F}\\) \\(\\mathbb{F}^{S}\\) is a Vector Space Over \\(\\mathbb{F}\\) All vector spaces \\(\\mathbb{F}^{n}\\) and \\(\\mathbb{F}^{\\infty}\\) are just special cases \\(\\mathbb{F}^{S}\\): you can think about those as a mapping from coordinates \\((1,2,3, \\dots )\\) to their actual values in the \u0026ldquo;vector\u0026rdquo; additive identity is unique in a vector space additive inverse is unique in a vector space \\(0v=0\\), both ways (for zero scalars and vectors) \\(-1v=-v\\) Questions for Jana The way Axler presented the idea of \u0026ldquo;over\u0026rdquo; is a tad weird; is it really only scalar multiplication which hinders vector spaces without \\(\\mathbb{F}\\)? In other words, do the sets that form vector spaces, apart from the \\(\\lambda\\) used for scalar multiplication, need anything to do with the \\(\\mathbb{F}\\) they are \u0026ldquo;over\u0026rdquo;? The name of the field and what its over do not have to be the same\u0026mdash;\u0026ldquo;vector space \\(\\mathbb{C}^2\\) over \\(\\{0,1\\}\\)\u0026rdquo; is a perfectly valid statement If lists have finite length \\(n\\), then what are the elements of \\(\\mathbb{F}^{\\infty}\\) called? \u0026ldquo;we could think about \\(\\mathbb{F}^{\\infty}\\), but we aren\u0026rsquo;t gonna.\u0026rdquo; Why is \\(1v=v\\) an axiom, whereas we say that some \\(0\\) exists? because we know 1 already, and you can follow the behavor of scalar multiplication what\u0026rsquo;s that thing called again in proofs where you just steal the property of a constituent element?: inherits Interesting Factoids The simplest vector space is \\(\\{0\\}\\) ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\mathbb{F}^{n}\\) not being a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e kinda sucks, so we made an object called a \u0026ldquo;\u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u0026rdquo; which essentially does everything a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e does except without necessitating a \u003ca href=\"/posts/kbhinverses/\"\u003emultiplicative inverse\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eFormally, a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e is \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e over \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e and have a \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e. Its \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e is \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutative\u003c/a\u003e, both \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e and \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e is \u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e, and \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e holds. There is an \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e, \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e, and \u003ca href=\"/posts/kbhmultiplicative_identity/\"\u003emultiplicative identity\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003eWe defined something called \\(\\mathbb{F}^{S}\\), which is the set of functions from a set \\(S\\) to \\(\\mathbb{F}\\). Turns out, \u003ca href=\"/posts/kbhfs_is_a_vector_space/\"\u003e\\(\\mathbb{F}^{S}\\) is a Vector Space Over \\(\\mathbb{F}\\)\u003c/a\u003e and we can secretly treat \\(\\mathbb{F}^{n}\\) and \\(\\mathbb{F}^{\\infty}\\) as special cases of \\(\\mathbb{F}^{s}\\).\u003c/li\u003e\n\u003cli\u003eWe established that \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e and \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e are unique additively in \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es.\u003c/li\u003e\n\u003cli\u003eLastly, we proved some expressions we already know: \\(0v=0\\), \\(-1v=-v\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e and \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e and \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvector_space/#vector-space-over-fields\"\u003evector space \u0026ldquo;over\u0026rdquo; fields\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(V\\) denotes a vector space over \\(\\mathbb{F}\\)\u003c/li\u003e\n\u003cli\u003e\\(-v\\) is defined as the \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e of \\(v \\in V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfinfty_is_a_vector_space_over_f/\"\u003e\\(\\mathbb{F}^{\\infty}\\) is a Vector Space over \\(\\mathbb{F}\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfs_is_a_vector_space/\"\u003e\\(\\mathbb{F}^{S}\\) is a Vector Space Over \\(\\mathbb{F}\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eAll \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es \\(\\mathbb{F}^{n}\\) and \\(\\mathbb{F}^{\\infty}\\) are just special cases \\(\\mathbb{F}^{S}\\): you can think about those as a mapping from coordinates \\((1,2,3, \\dots )\\) to their actual values in the \u0026ldquo;vector\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadditive_identity_is_unique_in_a_vector_space/\"\u003eadditive identity is unique in a vector space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadditive_inverse_is_unique_in_a_vector_space/\"\u003eadditive inverse is unique in a vector space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhzero_times_vector/\"\u003e\\(0v=0\\)\u003c/a\u003e, both ways (for zero scalars and vectors)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbh1v_1/\"\u003e\\(-1v=-v\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cdel\u003eThe way Axler presented the idea of \u0026ldquo;over\u0026rdquo; is a tad weird; is it really only scalar multiplication which hinders vector spaces without \\(\\mathbb{F}\\)? In other words, do the sets that form vector spaces, apart from the \\(\\lambda\\) used for scalar multiplication, need anything to do with the \\(\\mathbb{F}\\) they are \u0026ldquo;over\u0026rdquo;?\u003c/del\u003e The \u003cstrong\u003ename\u003c/strong\u003e of the field and what its \u003cstrong\u003eover\u003c/strong\u003e do not have to be the same\u0026mdash;\u0026ldquo;vector space \\(\\mathbb{C}^2\\) over \\(\\{0,1\\}\\)\u0026rdquo; is a perfectly valid statement\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003eIf lists have finite length \\(n\\), then what are the elements of \\(\\mathbb{F}^{\\infty}\\) called?\u003c/del\u003e \u0026ldquo;we could think about \\(\\mathbb{F}^{\\infty}\\), but we aren\u0026rsquo;t gonna.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003eWhy is \\(1v=v\\) an axiom, whereas we say that \u003cem\u003esome\u003c/em\u003e \\(0\\) exists?\u003c/del\u003e because we know 1 already, and you can follow the behavor of scalar multiplication\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003ewhat\u0026rsquo;s that thing called again in proofs where you just steal the property of a constituent element?\u003c/del\u003e: inherits\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eThe simplest vector space is \\(\\{0\\}\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_1_b/","tags":null,"title":"Axler 1.B"},{"categories":null,"contents":"Key Sequence we defined subspace and how to check for them we want to operate on subsets, so we defined the sum of subsets we saw that the sum of subspaces are the smallest containing subspace and finally, we defined direct sums and how to prove them New Definitions subspace sum of subsets direct sum Results and Their Proofs checking for subspaces simplified check for subspace sum of subspaces is the smallest subspace with both subspaces creating direct sums a sum of subsets is a direct sum IFF there is only one way to write \\(0\\) a sum of subsets is only a direct sum IFF their intersection is the set containing \\(0\\) Questions for Jana Does the additive identity have be the same between different subspaces of the same vector space? yes, otherwise the larger vector space has two additive identities. Does the addition and multiplication operations in a subspace have to be the same as its constituent vector space? by definition Why are direct sums defined on sub-spaces and not sum of subsets? because the union is usually not a subspace so we use sums and keep it in subspaces ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe defined \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e and how to check for them\u003c/li\u003e\n\u003cli\u003ewe want to operate on subsets, so we defined the \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe saw that the \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subspaces\u003c/a\u003e are the \u003ca href=\"/posts/kbhsum_of_subsets/#sum-of-subspaces-is-the-smallest-subspace-with-both-subspaces\"\u003esmallest containing subspace\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eand finally, we defined \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003es and how to prove them\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003echecking for subspaces\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsubspace/#simplified-check-for-subspace\"\u003esimplified check for subspace\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsum_of_subsets/#sum-of-subspaces-is-the-smallest-subspace-with-both-subspaces\"\u003esum of subspaces is the smallest subspace with both subspaces\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ecreating direct sums\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirect_sum/#a-id-1b800658-2f83-4802-acfd-2d15cf5a1d74-sum-of-subsets-is-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-id-fddf0648-91ea-4c5b-8298-fa0a30637cb7-iff-there-is-only-one-way-to-write-0\"\u003ea sum of subsets is a direct sum IFF there is only one way to write \\(0\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirect_sum/#a-id-1b800658-2f83-4802-acfd-2d15cf5a1d74-sum-of-subsets-is-only-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-id-fddf0648-91ea-4c5b-8298-fa0a30637cb7-iff-their-intersection-is-set-containing-0\"\u003ea sum of subsets is only a direct sum IFF their intersection is the set containing \\(0\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cdel\u003eDoes the \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e have be the same between different \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es of the same \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e?\u003c/del\u003e yes, otherwise the larger \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e has two \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identities\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003eDoes the \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e and \u003ca href=\"/posts/kbhmultiplying/\"\u003emultiplication\u003c/a\u003e operations in a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e have to be the same as its constituent \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e?\u003c/del\u003e by definition\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003eWhy are \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003es defined on sub-\u003cstrong\u003e\u003cstrong\u003espaces\u003c/strong\u003e\u003c/strong\u003e and not \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e?\u003c/del\u003e because the union is usually not a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e so we use sums and keep it in subspaces\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_1_c/","tags":null,"title":"Axler 1.C"},{"categories":null,"contents":"3: Show that the set of differential real-valued functions \\(f\\) on the interval \\((-4,4)\\) such that \\(f\u0026rsquo;(-1)=3f(2)\\) is a subspace of \\(\\mathbb{R}^{(-4,4)}\\)\n4: Suppose \\(b \\in R\\). Show that the set of continuous real-valued functions \\(f\\) on the interval \\([0,1]\\) such that \\(\\int_{0}^{1}f=b\\) is a subspace of \\(\\mathbb{R}^{[0,1]}\\) IFF \\(b=0\\)\nAdditive Identity:\nassume \\(\\int_{0}^{1}f=b\\) is a subspace\n","html":"\u003cp\u003e3: Show that the set of differential real-valued functions \\(f\\) on the interval \\((-4,4)\\) such that \\(f\u0026rsquo;(-1)=3f(2)\\) is a subspace of \\(\\mathbb{R}^{(-4,4)}\\)\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e4: Suppose \\(b \\in R\\). Show that the set of continuous real-valued functions \\(f\\) on the interval \\([0,1]\\) such that \\(\\int_{0}^{1}f=b\\) is a subspace of \\(\\mathbb{R}^{[0,1]}\\) IFF \\(b=0\\)\u003c/p\u003e\n\u003cp\u003eAdditive Identity:\u003c/p\u003e\n\u003cp\u003eassume \\(\\int_{0}^{1}f=b\\) is a subspace\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_1_c_excercises/","tags":null,"title":"Axler 1.C Exercises"},{"categories":null,"contents":"Key Sequence we defined the combination of a list of vectors as a linear combination and defined set of all linear combination of vectors to be called a span we defined the idea of a finite-dimensional vector space vis a vi spanning we took a god-forsaken divergence into polynomials that will surely not come back and bite us in chapter 4 we defined linear independence + linear dependence and, from those definition, proved the actual usecase of these concepts which is the Linear Dependence Lemma we apply the Linear Dependence Lemma to show that length of linearly-independent list \\(\\leq\\) length of spanning list as well as that finite-dimensional vector spaces make finite subspaces. Both of these proofs work by making linearly independent lists\u0026mdash;the former by taking a spanning list and making it smaller and smaller, and the latter by taking a linearly independent list and making it bigger and bigger New Definitions linear combination span + \u0026ldquo;spans\u0026rdquo; finite-dimensional vector space infinite-demensional vector space finite-dimensional subspaces polynomial \\(\\mathcal{P}(\\mathbb{F})\\) \\(\\mathcal{P}_{m}(\\mathbb{F})\\) degree of a polynomial \\(\\deg p\\) linear independence and linear dependence Linear Dependence Lemma Results and Their Proofs span is the smallest subspace containing all vectors in the list \\(\\mathcal{P}(\\mathbb{F})\\) is a vector space over \\(\\mathbb{F}\\) the world famous Linear Dependence Lemma and its fun issue length of linearly-independent list \\(\\leq\\) length of spanning list subspaces of inite-dimensional vector spaces is finite dimensional Questions for Jana obviously polynomials are non-linear structures; under what conditions make them nice to work with in linear algebra? what is the \u0026ldquo;obvious way\u0026rdquo; to change Linear Dependence Lemma\u0026rsquo;s part \\(b\\) to make \\(v_1=0\\) work? for the finite-dimensional subspaces proof, though we know that the process terminates, how do we know that it terminates at a spanning list of \\(U\\) and not just a linearly independent list in \\(U\\)? direct sum and linear independence related; how exactly? Interesting Factoids I just ate an entire Chinese new-year worth of food while typing this up. That\u0026rsquo;s worth something right\n","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe defined the combination of a list of vectors as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e and defined set of all \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es to be called a \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe defined the idea of a \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e vis a vi \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe took a god-forsaken divergence into \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003es that will surely not come back and bite us in chapter 4\u003c/li\u003e\n\u003cli\u003ewe defined \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e + \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinear dependence\u003c/a\u003e and, from those definition, proved the actual usecase of these concepts which is the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe apply the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e to show that \u003ca href=\"/posts/kbhlinear_independence/#length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003elength of linearly-independent list \\(\\leq\\) length of spanning list\u003c/a\u003e as well as that \u003ca href=\"/posts/kbhsubspace/#finite-dimensional-subspaces\"\u003efinite-dimensional vector spaces make finite subspaces\u003c/a\u003e. Both of these proofs work by making \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e lists\u0026mdash;the former by taking a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list and making it smaller and smaller, and the latter by taking a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list and making it bigger and bigger\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e + \u0026ldquo;\u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003einfinite-demensional vector space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsubspace/#finite-dimensional-subspaces\"\u003efinite-dimensional subspaces\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolynomial/#mathcal-p--mathbb-f\"\u003e\\(\\mathcal{P}(\\mathbb{F})\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolynomial/#mathcal-p-m--mathbb-f\"\u003e\\(\\mathcal{P}_{m}(\\mathbb{F})\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolynomial/#degree-of-a-polynomial-deg-p\"\u003edegree of a polynomial \\(\\deg p\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e and \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinear dependence\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhspan/#span-is-the-smallest-subspace-containing-all-vectors-in-the-list\"\u003espan is the smallest subspace containing all vectors in the list\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolynomial/#mathcal-p--mathbb-f--is-a-vector-space-over-mathbb-f\"\u003e\\(\\mathcal{P}(\\mathbb{F})\\) is a vector space over \\(\\mathbb{F}\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ethe world famous \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e and its fun \u003ca href=\"/posts/kbhlinear_dependence_lemma/#issue\"\u003eissue\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_independence/#length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003elength of linearly-independent list \\(\\leq\\) length of spanning list\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsubspace/#finite-dimensional-subspaces\"\u003esubspaces of inite-dimensional vector spaces is finite dimensional\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cdel\u003eobviously \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003es are non-linear structures; under what conditions make them nice to work with in linear algebra?\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003ewhat is the \u0026ldquo;obvious way\u0026rdquo; to change \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e\u0026rsquo;s part \\(b\\) to make \\(v_1=0\\) work?\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003efor the \u003ca href=\"/posts/kbhsubspace/#finite-dimensional-subspaces\"\u003efinite-dimensional subspaces\u003c/a\u003e proof, though we know that the process terminates, how do we know that it terminates at a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list of \\(U\\) and not just a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(U\\)?\u003c/li\u003e\n\u003cli\u003edirect sum and linear independence related; how exactly?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003eI just ate an entire Chinese new-year worth of food while typing this up. That\u0026rsquo;s worth \u003cem\u003esomething\u003c/em\u003e right\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_2_a/","tags":null,"title":"Axler 2.A"},{"categories":null,"contents":"Key Sequence we defined basis of a vector space\u0026mdash;a linearly independent spanning list of that vector space\u0026mdash;and shown that to be a basis one has to be able to write a write an unique spanning list we show that you can chop a spanning list of a space down to a basis or build a linearly independent list up to a basis because of this, you can make a spanning list of finite-dimensional vector spaces and chop it down to a basis: so every finite-dimensional vector space has a basis lastly, we can use the fact that you can grow list to basis to show that every subspace of \\(V\\) is a part of a direct sum equaling to \\(V\\) New Definitions basis and criteria for basis\nI mean its a chapter on bases not sure what you are expecting.\nResults and Their Proofs a list is a basis if you can write every memeber of their span uniquely every finite-dimensional vector space has a basis dualing basis constructions all spanning lists contains a basis of which you are spanning a linearly independent list expends to a basis every subspace of \\(V\\) is a part of a direct sum equaling to \\(V\\) Questions for Jana Is the subspace direct sum proof a unique relationship? That is, is every complement \\(W\\) for each \\(U \\subset V\\) unique? ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe defined \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u0026mdash;a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list of that \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u0026mdash;and shown that to \u003ca href=\"/posts/kbhbasis/#criteria-for-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003ebe a basis\u003c/a\u003e one has to be able to write a write an unique \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list\u003c/li\u003e\n\u003cli\u003ewe show that you can \u003ca href=\"/posts/kbhbasis/#all-id-e8109222-5548-4d08-b6df-8f933f2dbb36-spanning-lists-contains-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis-of-which-you-are-id-e8109222-5548-4d08-b6df-8f933f2dbb36-spanning\"\u003echop a spanning list of a space down to a basis\u003c/a\u003e or \u003ca href=\"/posts/kbhbasis/#a-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent-list-expends-to-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003ebuild a linearly independent list up to a basis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ebecause of this, you can make a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list of \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector spaces\u003c/a\u003e and \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/#every-id-4ed27ed5-4edc-4ef4-afd7-9b8e3bcd9b96-finite-dimensional-vector-space-has-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003echop it down to a basis\u003c/a\u003e: so every \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e has a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003elastly, we can use the fact that you can grow \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e to \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e to show that \u003ca href=\"/posts/kbhdirect_sum/#every-id-345c37fa-5d4c-44e9-ad03-2fe7e5a37224-subspace-of-v-is-a-part-of-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-equaling-to-v\"\u003eevery subspace of \\(V\\) is a part of a direct sum equaling to \\(V\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e and \u003ca href=\"/posts/kbhbasis/#criteria-for-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003ecriteria for basis\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eI mean its a chapter on \u003ca href=\"/posts/kbhbasis/\"\u003ebases\u003c/a\u003e not sure what you are expecting.\u003c/p\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbasis/#criteria-for-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003ea list is a basis if you can write every memeber of their span uniquely\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfinite_dimensional_vector_space/#every-id-4ed27ed5-4edc-4ef4-afd7-9b8e3bcd9b96-finite-dimensional-vector-space-has-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003eevery finite-dimensional vector space has a basis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003edualing basis constructions\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbasis/#all-id-e8109222-5548-4d08-b6df-8f933f2dbb36-spanning-lists-contains-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis-of-which-you-are-id-e8109222-5548-4d08-b6df-8f933f2dbb36-spanning\"\u003eall spanning lists contains a basis of which you are spanning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbasis/#a-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent-list-expends-to-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003ea linearly independent list expends to a basis\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirect_sum/#every-id-345c37fa-5d4c-44e9-ad03-2fe7e5a37224-subspace-of-v-is-a-part-of-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-equaling-to-v\"\u003eevery subspace of \\(V\\) is a part of a direct sum equaling to \\(V\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eIs the \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e \u003ca href=\"/posts/kbhproof/\"\u003eproof\u003c/a\u003e a unique relationship? That is, is every complement \\(W\\) for each \\(U \\subset V\\) unique?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_2_b/","tags":null,"title":"Axler 2.B"},{"categories":null,"contents":"Key Sequence Because Length of Basis Doesn\u0026rsquo;t Depend on Basis, we defined dimension as the same, shared length of basis in a vector space We shown that lists of the right length (i.e. dim that space) that is either spanning or linearly independent must be a basis\u0026mdash;\u0026ldquo;half is good enough\u0026rdquo; theorems we also shown that \\(dim(U_1+U_2) = dim(U_1)+dim(U_2) - dim(U_1 \\cap U_2)\\): dimension of sums New Definitions dimension Results and Their Proofs Length of Basis Doesn\u0026rsquo;t Depend on Basis lists of right length are basis linearly independent list of length dim V are a basis of V spanning list of length of dim V are a basis of V dimension of sums Questions for Jana Example 2.41: why is it that \\(\\dim U \\neq 4\\)? We only know that \\(\\dim \\mathcal{P}_{3}(\\mathbb{R}) = 4\\), and \\(\\dim U \\leq 4\\). Is it because \\(U\\) (i.e. basis of \\(U\\) doesn\u0026rsquo;t span the polynomial) is strictly a subset of \\(\\mathcal{P}_{3}(\\mathbb{R})\\), so there must be some extension needed? because we know that \\(U\\) isn\u0026rsquo;t all of \\(\\mathcal{P}_{3}\\). Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eBecause \u003ca href=\"/posts/kbhlength_of_basis_doesn_t_depend_on_basis/\"\u003eLength of Basis Doesn\u0026rsquo;t Depend on Basis\u003c/a\u003e, we defined \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e as the same, shared length of \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eWe shown that lists of the right length (i.e. dim that space) that is \u003cem\u003eeither\u003c/em\u003e \u003ca href=\"/posts/kbhdimension/#spanning-list-of-length-of-dim-v-are-a-basis-of-v\"\u003espanning\u003c/a\u003e or \u003ca href=\"/posts/kbhdimension/#linearly-independent-list-of-length-dim-v-are-a-basis-of-v\"\u003elinearly independent\u003c/a\u003e must be a basis\u0026mdash;\u0026ldquo;half is good enough\u0026rdquo; theorems\u003c/li\u003e\n\u003cli\u003ewe also shown that \\(dim(U_1+U_2) = dim(U_1)+dim(U_2) - dim(U_1 \\cap U_2)\\): \u003ca href=\"/posts/kbhsum_of_subsets/#id-07b04334-5ae7-457c-bc3e-92feed8fc2cc-dimension-of-sums\"\u003edimension of sums\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlength_of_basis_doesn_t_depend_on_basis/\"\u003eLength of Basis Doesn\u0026rsquo;t Depend on Basis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003elists of right length are \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdimension/#linearly-independent-list-of-length-dim-v-are-a-basis-of-v\"\u003elinearly independent list of length dim V are a basis of V\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdimension/#spanning-list-of-length-of-dim-v-are-a-basis-of-v\"\u003espanning list of length of dim V are a basis of V\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsum_of_subsets/#id-07b04334-5ae7-457c-bc3e-92feed8fc2cc-dimension-of-sums\"\u003edimension of sums\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cdel\u003eExample 2.41: why is it that \\(\\dim U \\neq 4\\)? We only know that \\(\\dim \\mathcal{P}_{3}(\\mathbb{R}) = 4\\), and \\(\\dim U \\leq 4\\). Is it because \\(U\\) (i.e. \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U\\) doesn\u0026rsquo;t \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e the polynomial) is strictly a subset of \\(\\mathcal{P}_{3}(\\mathbb{R})\\), so there must be \u003cem\u003esome\u003c/em\u003e extension needed?\u003c/del\u003e because we know that \\(U\\) isn\u0026rsquo;t all of \\(\\mathcal{P}_{3}\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_2_c/","tags":null,"title":"Axler 2.C"},{"categories":null,"contents":"OMGOMGOMG its Linear Maps time! \u0026ldquo;One of the key definitions in linear algebra.\u0026rdquo;\nKey Sequence We define these new-fangled functions called Linear Maps, which obey \\(T(u+v) = Tu+Tv\\) and \\(T(\\lambda v) = \\lambda Tv\\) We show that the set of all linear maps between two vector spaces \\(V,W\\) is denoted \\(\\mathcal{L}(V,W)\\); and, in fact, by defining addition and scalar multiplication of Linear Maps in the way you\u0026rsquo;d expect, \\(\\mathcal{L}(V,W)\\) is a vector space! this also means that we can use effectively the \\(0v=0\\) proof to show that linear maps take \\(0\\) to \\(0\\) we show that Linear Maps can be defined uniquely by where it takes the basis of a vector space; in fact, there exists a Linear Map to take the basis anywhere you want to go! though this doesn\u0026rsquo;t usually make sense, we call the \u0026ldquo;composition\u0026rdquo; operation on Linear Maps their \u0026ldquo;product\u0026rdquo; and show that this product is associative, distributive, and has an identity New Definitions Linear Map \u0026mdash; additivity (adding \u0026ldquo;distributes\u0026rdquo;) and homogeneity (scalar multiplication \u0026ldquo;factors\u0026rdquo;) \\(\\mathcal{L}(V,W)\\) any polynomial map from Fn to Fm is a linear map addition and scalar multiplication on \\(\\mathcal{L}(V,W)\\); and, as a bonus, \\(\\mathcal{L}(V,W)\\) a vector space! naturally (almost by the same \\(0v=0\\) proof), linear maps take \\(0\\) to \\(0\\) Product of Linear Maps is just composition. These operations are: associative distributive has an identity Results and Their Proofs technically a result: any polynomial map from Fn to Fm is a linear map basis of domain of linear maps uniquely determines them Questions for Jana why does the second part of the basis of domain proof make it unique? ","html":"\u003cp\u003eOMGOMGOMG its \u003cem\u003eLinear Maps\u003c/em\u003e time! \u0026ldquo;One of the key definitions in linear algebra.\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWe define these new-fangled functions called \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Maps\u003c/a\u003e, which obey \\(T(u+v) = Tu+Tv\\) and \\(T(\\lambda v) = \\lambda Tv\\)\u003c/li\u003e\n\u003cli\u003eWe show that the set of all linear maps between two \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es \\(V,W\\) is denoted \\(\\mathcal{L}(V,W)\\); and, in fact, by defining \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e and \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e of \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es in the way you\u0026rsquo;d expect, \\(\\mathcal{L}(V,W)\\) is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e!\n\u003cul\u003e\n\u003cli\u003ethis also means that we can use effectively the \\(0v=0\\) proof to show that \u003ca href=\"/posts/kbhlinear_map/#linear-maps-take-0-to-0\"\u003elinear maps take \\(0\\) to \\(0\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ewe show that \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es can be defined uniquely by \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ewhere it takes the basis of a vector space\u003c/a\u003e; in fact, there exists a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e to take the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e \u003cem\u003eanywhere\u003c/em\u003e you want to go!\u003c/li\u003e\n\u003cli\u003ethough this doesn\u0026rsquo;t usually make sense, we call the \u0026ldquo;composition\u0026rdquo; operation on \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es their \u0026ldquo;product\u0026rdquo; and show that this product is \u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e, \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributive\u003c/a\u003e, and has an \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e \u0026mdash; additivity (adding \u0026ldquo;distributes\u0026rdquo;) and homogeneity (scalar multiplication \u0026ldquo;factors\u0026rdquo;)\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_map/#mathcal-l--v-w\"\u003e\\(\\mathcal{L}(V,W)\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_map/#any-map-from-mathbb-f-n-to-mathbb-f-m\"\u003eany polynomial map from Fn to Fm is a linear map\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_map/#addition-and-scalar-multiplication-on-mathcal-l--v-w\"\u003eaddition and scalar multiplication on \\(\\mathcal{L}(V,W)\\)\u003c/a\u003e; and, as a bonus, \\(\\mathcal{L}(V,W)\\) a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e!\u003c/li\u003e\n\u003cli\u003enaturally (almost by the same \\(0v=0\\) proof), \u003ca href=\"/posts/kbhlinear_map/#linear-maps-take-0-to-0\"\u003elinear maps take \\(0\\) to \\(0\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduct_of_linear_maps/\"\u003eProduct of Linear Maps\u003c/a\u003e is just composition. These operations are:\n\u003cul\u003e\n\u003cli\u003eassociative\u003c/li\u003e\n\u003cli\u003edistributive\u003c/li\u003e\n\u003cli\u003ehas an identity\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003etechnically a result: \u003ca href=\"/posts/kbhlinear_map/#any-map-from-mathbb-f-n-to-mathbb-f-m\"\u003eany polynomial map from Fn to Fm is a linear map\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain of linear maps uniquely determines them\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewhy does the second part of the \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e proof make it unique?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_3_a/","tags":null,"title":"Axler 3.A"},{"categories":null,"contents":"Key Sequence we defined the null space and injectivity from that, we showed that injectivity IFF implies that null space is \\(\\{0\\}\\), essentially because if \\(T0=0\\) already, there cannot be another one that also is taken to \\(0\\) in an injective function we defined range and surjectivity we showed that these concepts are strongly related by the fundamental theorem of linear maps: if \\(T \\in \\mathcal{L}(V,W)\\), then \\(\\dim V = \\dim null\\ T + \\dim range\\ T\\) from the fundamental theorem, we showed the somewhat intuitive pair about the sizes of maps: map to smaller space is not injective, map to bigger space is not surjective we then applied that result to show results about homogeneous systems homogenous system with more variables than equations has nonzero solutions inhomogenous system with more equations than variables has no solutions for an arbitrary set of constants New Definitions null space injectivity range surjectivity homogeneous system Results and Their Proofs the null space is a subspace of the domain injectivity IFF implies that null space is \\(\\{0\\}\\) the fundamental theorem of linear maps \u0026ldquo;sizes\u0026rdquo; of maps map to smaller space is not injective map to bigger space is not surjective solving systems of equations: homogenous system with more variables than equations has nonzero solutions inhomogenous system with more equations than variables has no solutions for an arbitrary set of constants Questions for Jana \u0026ldquo;To prove the inclusion in the other direction, suppose v 2 null T.\u0026rdquo; for 3.16; what is the first direction? maybe nothing maps to \\(0\\) ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe defined the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e and \u003ca href=\"/posts/kbhinjectivity/\"\u003einjectivity\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003efrom that, we showed that \u003ca href=\"/posts/kbhinjectivity/#injectivity-implies-that-id-767a441d-4931-4fad-aa8e-c6b001e8b507-null-space-is-0\"\u003einjectivity IFF implies that null space is \\(\\{0\\}\\)\u003c/a\u003e, essentially because if \\(T0=0\\) already, there cannot be another one that also is taken to \\(0\\) in an \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e function\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ewe defined \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e and \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjectivity\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe showed that these concepts are strongly related by the \u003ca href=\"/posts/kbhfundamental_theorem_of_linear_maps/\"\u003efundamental theorem of linear maps\u003c/a\u003e: if \\(T \\in \\mathcal{L}(V,W)\\), then \\(\\dim V = \\dim null\\ T + \\dim range\\ T\\)\u003c/li\u003e\n\u003cli\u003efrom the fundamental theorem, we showed the somewhat intuitive pair about the sizes of maps: \u003ca href=\"/posts/kbhlinear_map/#map-to-smaller-space-is-not-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003emap to smaller space is not injective\u003c/a\u003e, \u003ca href=\"/posts/kbhlinear_map/#map-to-bigger-space-is-not-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjective\"\u003emap to bigger space is not surjective\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe then applied that result to show results about \u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneous\u003c/a\u003e systems\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhomogeneity/#id-f57b638c-b8c9-4c88-b02f-9cd0ed47c51e-homogenous-system-with-more-variables-than-equations-has-nonzero-solutions\"\u003ehomogenous system with more variables than equations has nonzero solutions\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhomogeneity/#in-id-f57b638c-b8c9-4c88-b02f-9cd0ed47c51e-homogenous-system-with-more-equations-than-variables-has-no-solutions-for-an-arbitrary-set-of-constants\"\u003einhomogenous system with more equations than variables has no solutions for an arbitrary set of constants\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinjectivity/\"\u003einjectivity\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjectivity\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneous system\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnull_space/#the-null-space-is-a-id-345c37fa-5d4c-44e9-ad03-2fe7e5a37224-subspace-of-the-domain\"\u003ethe null space is a subspace of the domain\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinjectivity/#injectivity-implies-that-id-767a441d-4931-4fad-aa8e-c6b001e8b507-null-space-is-0\"\u003einjectivity IFF implies that null space is \\(\\{0\\}\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ethe \u003ca href=\"/posts/kbhfundamental_theorem_of_linear_maps/\"\u003efundamental theorem of linear maps\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;sizes\u0026rdquo; of maps\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_map/#map-to-smaller-space-is-not-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003emap to smaller space is not injective\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_map/#map-to-bigger-space-is-not-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjective\"\u003emap to bigger space is not surjective\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003esolving systems of equations:\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhomogeneity/#id-f57b638c-b8c9-4c88-b02f-9cd0ed47c51e-homogenous-system-with-more-variables-than-equations-has-nonzero-solutions\"\u003ehomogenous system with more variables than equations has nonzero solutions\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhomogeneity/#in-id-f57b638c-b8c9-4c88-b02f-9cd0ed47c51e-homogenous-system-with-more-equations-than-variables-has-no-solutions-for-an-arbitrary-set-of-constants\"\u003einhomogenous system with more equations than variables has no solutions for an arbitrary set of constants\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cdel\u003e\u0026ldquo;To prove the inclusion in the other direction, suppose v 2 null T.\u0026rdquo; for 3.16; what is the \u003cem\u003efirst\u003c/em\u003e direction?\u003c/del\u003e maybe nothing maps to \\(0\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_3_b/","tags":null,"title":"Axler 3.B"},{"categories":null,"contents":"matricies!!!!\nKey Sequence matricies exist, you can add them, scalarly multiply them, and actually multiply them they can represent Linear Maps by showing where they take basis unsurprisingly, the set of matricies of a shape is a vector space New Definitions matricies matrix of Linear Map matrix addition and scalar multiplications matrix multiplication \\(\\mathbb{F}^{m,n}\\) Results and Their Proofs sums and scalar multiplication of matricies, and why they work to represent Linear Maps \\(\\mathbb{F}^{m,n}\\) is a vector space Interesting Factoids its literally matricies\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e!!!!\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e exist, you can \u003ca href=\"/posts/kbhmatricies/#sums-and-scalar-multiplication-of-id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matricies\"\u003eadd them, scalarly multiply them\u003c/a\u003e, and \u003ca href=\"/posts/kbhmatrix_multiplication/\"\u003eactually multiply them\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ethey \u003ca href=\"/posts/kbhmatricies/#matrix-of-id-17f3b01c-4945-4268-8da4-9887d960596b-linear-map\"\u003ecan represent Linear Maps by showing where they take basis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eunsurprisingly, the \u003ca href=\"/posts/kbhmatricies/#mathbb-f-m-n\"\u003eset of matricies of a shape is a vector space\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/#matrix-of-id-17f3b01c-4945-4268-8da4-9887d960596b-linear-map\"\u003ematrix of Linear Map\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/#sums-and-scalar-multiplication-of-id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matricies\"\u003ematrix addition and scalar multiplications\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatrix_multiplication/\"\u003ematrix multiplication\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/#mathbb-f-m-n\"\u003e\\(\\mathbb{F}^{m,n}\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/#sums-and-scalar-multiplication-of-id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matricies\"\u003esums and scalar multiplication of matricies\u003c/a\u003e, and why they work to represent \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/#mathbb-f-m-n\"\u003e\\(\\mathbb{F}^{m,n}\\) is a vector space\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003eits literally \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_3_c/","tags":null,"title":"Axler 3.C"},{"categories":null,"contents":"isomorphisms. Somebody\u0026rsquo;s new favourite word since last year.\nKey Sequence we showed that a linear map\u0026rsquo;s inverse is unique, and so named the inverse \\(T^{-1}\\) we then showed an important result, that injectivity and surjectivity implies invertability this property allowed us to use invertable maps to define isomorphic spaces, naming the invertable map between them as the isomorphism we see that having the same dimension is enough to show invertability (IFF), because we can use basis of domain to map the basis of one space to another we then use that property to establish that matricies and linear maps have an isomorphism between them: namely, the matrixify operator \\(\\mathcal{M}\\). this isomorphism allow us to show that the dimension of a set of Linear Maps is the product of the dimensions of their domain and codomain (that \\(\\dim \\mathcal{L}(V,W) = (\\dim V)(\\dim W)\\)) We then, for some unknown reason, decided that right this second we gotta define matrix of a vector, and that linear map applications are like matrix multiplication because of it. Not sure how this relates finally, we defined a Linear Map from a space to itself as an operator we finally show an important result that, despite not being true for infinite-demensional vector space, injectivity is surjectivity in finite-dimensional operators New Definitions invertability isomorphism + isomorphic vector spaces matrix of a vector operator Results and Their Proofs linear map inverse is unique injectivity and surjectivity implies invertability two vector spaces are isomorphic IFF they have the same dimension matricies and Linear Maps from the right dimensions are isomorphic \\(\\dim \\mathcal{L}(V,W) = (\\dim V)(\\dim W)\\) \\(\\mathcal{M}(T)_{.,k} = \\mathcal{M}(Tv_{k})\\), a result of how everything is defined (see matrix of a vector) \u0026ldquo;each column of a matrix represents where each of the basis of the input gets taken to\u0026rdquo; So applying a vector to a matrix shows the linear combination of what where the basis sent linear maps are like matrix multiplication injectivity is surjectivity in finite-dimensional operators Questions for Jana why doesn\u0026rsquo;t axler just say the \u0026ldquo;basis of domain\u0026rdquo; directly (i.e. he did a lin comb instead) for the second direction for the two vector spaces are isomorphic IFF they have the same dimension proof? because the next steps for spanning (surjectivity) and linear independence (injectivity) is made more obvious clarify the matricies and Linear Maps from the right dimensions are isomorphic proof what is the \u0026ldquo;multiplication by \\(x^{2}\\)\u0026rdquo; operator? literally multiplying by \\(x^{2}\\) how does the matrix of a vector detour relate to the content before and after? I suppose an isomorphism exists but it isn\u0026rsquo;t explicitly used in the linear maps are like matrix multiplication proof, which is the whole point because we needed to close the loop of being able to linear algebra with matricies completely, which we didn\u0026rsquo;t know without the isomorphism between matricies and maps Interesting Factoids ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003es. Somebody\u0026rsquo;s new favourite word since last year.\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe showed that a \u003ca href=\"/posts/kbhinvertability/#linear-map-inverse-is-unique\"\u003elinear map\u0026rsquo;s inverse is unique\u003c/a\u003e, and so named the inverse \\(T^{-1}\\)\u003c/li\u003e\n\u003cli\u003ewe then showed an important result, that \u003ca href=\"/posts/kbhinvertability/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-and-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-implies-id-ff05739c-6e70-46ba-9d56-0958ef847f57-invertability\"\u003einjectivity and surjectivity implies invertability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ethis property allowed us to use \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e maps to define \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e spaces, naming the \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e map between them as the \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003ewe see that \u003ca href=\"/posts/kbhisomorphism/#two-vector-spaces-are-isomorphic-iff-they-have-the-same-dimension\"\u003ehaving the same dimension is enough to show invertability (IFF)\u003c/a\u003e, because we can use \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e to map the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of one space to another\u003c/li\u003e\n\u003cli\u003ewe then use that property to establish that \u003ca href=\"/posts/kbhisomorphism/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matricies-and-id-17f3b01c-4945-4268-8da4-9887d960596b-linear-map-s-from-the-right-id-07b04334-5ae7-457c-bc3e-92feed8fc2cc-dimension-s-are-id-3f5ba3a5-15d4-4b58-99de-09eb1e4713cb-isomorphic\"\u003ematricies and linear maps have an isomorphism between them\u003c/a\u003e: namely, the matrixify operator \\(\\mathcal{M}\\).\u003c/li\u003e\n\u003cli\u003ethis \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e allow us to show that the \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of a set of \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es is the product of the \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003es of their domain and codomain (that \u003ca href=\"/posts/kbhisomorphism/#dim-mathcal-l--v-w----dim-v----dim-w\"\u003e\\(\\dim \\mathcal{L}(V,W) = (\\dim V)(\\dim W)\\)\u003c/a\u003e)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eWe then, for some unknown reason, decided that right this second we gotta define \u003ca href=\"/posts/kbhmatricies/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matrix-of-a-vector\"\u003ematrix of a vector\u003c/a\u003e, and that \u003ca href=\"/posts/kbhmatrix_multiplication/#linear-maps-are-like-matrix-multiplication\"\u003elinear map applications are like matrix multiplication\u003c/a\u003e because of it. Not sure how this relates\u003c/li\u003e\n\u003cli\u003efinally, we defined a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e from a space to itself as an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003ewe finally show an important result that, despite not being true for \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003einfinite-demensional vector space\u003c/a\u003e, \u003ca href=\"/posts/kbhoperator/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-is-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-in-id-4ed27ed5-4edc-4ef4-afd7-9b8e3bcd9b96-finite-dimensional-id-36e84a46-76f1-481e-b031-8ab2f0da0aa8-operator-s\"\u003einjectivity is surjectivity in finite-dimensional operators\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinvertability/\"\u003einvertability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e + \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matrix-of-a-vector\"\u003ematrix of a vector\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinvertability/#linear-map-inverse-is-unique\"\u003elinear map inverse is unique\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinvertability/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-and-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-implies-id-ff05739c-6e70-46ba-9d56-0958ef847f57-invertability\"\u003einjectivity and surjectivity implies invertability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhisomorphism/#two-vector-spaces-are-isomorphic-iff-they-have-the-same-dimension\"\u003etwo vector spaces are isomorphic IFF they have the same dimension\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhisomorphism/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matricies-and-id-17f3b01c-4945-4268-8da4-9887d960596b-linear-map-s-from-the-right-id-07b04334-5ae7-457c-bc3e-92feed8fc2cc-dimension-s-are-id-3f5ba3a5-15d4-4b58-99de-09eb1e4713cb-isomorphic\"\u003ematricies and Linear Maps from the right dimensions are isomorphic\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhisomorphism/#dim-mathcal-l--v-w----dim-v----dim-w\"\u003e\\(\\dim \\mathcal{L}(V,W) = (\\dim V)(\\dim W)\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{M}(T)_{.,k} = \\mathcal{M}(Tv_{k})\\), a result of how everything is defined (see \u003ca href=\"/posts/kbhmatricies/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matrix-of-a-vector\"\u003ematrix of a vector\u003c/a\u003e)\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;each column of a \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e represents where each of the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of the input gets taken to\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eSo applying a vector to a matrix shows the linear combination of what where the basis sent\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatrix_multiplication/#linear-maps-are-like-matrix-multiplication\"\u003elinear maps are like matrix multiplication\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoperator/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-is-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-in-id-4ed27ed5-4edc-4ef4-afd7-9b8e3bcd9b96-finite-dimensional-id-36e84a46-76f1-481e-b031-8ab2f0da0aa8-operator-s\"\u003einjectivity is surjectivity in finite-dimensional operators\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cdel\u003ewhy doesn\u0026rsquo;t axler just say the \u0026ldquo;\u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e\u0026rdquo; directly (i.e. he did a lin comb instead) for the second direction for the \u003ca href=\"/posts/kbhisomorphism/#two-vector-spaces-are-isomorphic-iff-they-have-the-same-dimension\"\u003etwo vector spaces are isomorphic IFF they have the same dimension\u003c/a\u003e proof?\u003c/del\u003e because the next steps for \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e (\u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjectivity\u003c/a\u003e) and \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e (\u003ca href=\"/posts/kbhinjectivity/\"\u003einjectivity\u003c/a\u003e) is made more obvious\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003eclarify the \u003ca href=\"/posts/kbhisomorphism/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matricies-and-id-17f3b01c-4945-4268-8da4-9887d960596b-linear-map-s-from-the-right-id-07b04334-5ae7-457c-bc3e-92feed8fc2cc-dimension-s-are-id-3f5ba3a5-15d4-4b58-99de-09eb1e4713cb-isomorphic\"\u003ematricies and Linear Maps from the right dimensions are isomorphic\u003c/a\u003e proof\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003ewhat is the \u0026ldquo;multiplication by \\(x^{2}\\)\u0026rdquo; \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e?\u003c/del\u003e literally multiplying by \\(x^{2}\\)\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003ehow does the \u003ca href=\"/posts/kbhmatricies/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matrix-of-a-vector\"\u003ematrix of a vector\u003c/a\u003e detour relate to the content before and after? I suppose an \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e exists but it isn\u0026rsquo;t explicitly used in the \u003ca href=\"/posts/kbhmatrix_multiplication/#linear-maps-are-like-matrix-multiplication\"\u003elinear maps are like matrix multiplication\u003c/a\u003e proof, which is the whole point\u003c/del\u003e because we needed to close the loop of being able to linear algebra with \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e completely, which we didn\u0026rsquo;t know without the \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e between matricies and maps\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_3_d/","tags":null,"title":"Axler 3.D"},{"categories":null,"contents":"No idea why this is so long!!!\nKey Sequence Firehose of a chapter.\nWe first began an unrelated exploration in Product of Vector Spaces (\u0026ldquo;tuples\u0026rdquo;): we show that the Product of Vector Spaces is a vector space because you can build a list out of zeroing every element except each one on each basis of each element of the tuple sequentially, we learned that the dimension of the Product of Vector Spaces is the sum of the spaces\u0026rsquo; dimension. we defined the product-to-sum map \\(\\Gamma\\) \\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\Gamma\\) is injective and, as a result, \\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\) We then tackled the fun part of this chapter, which is affine subsets, parallel structures, quotient spaces, quotient map (affine subsetification maps) we learned an important and useful result that two affine subsets parallel to \\(U\\) are either equal or disjoint (\\(v-w \\in U\\) means \\(v+U = w+U\\) means \\(v+U \\cap w+U \\neq \\emptyset\\), means the first thing) we defined the operations on quotient space, and showed that quotient space operations behave uniformly on equivalent affine subsets. This, and the usual closer proof, demonstrates that quotient spaces is a vector space with the help of the affine subsetification map (the quotient map \\(\\pi\\)), we show that the dimension of a quotient space is the difference between dimensions of its constituents essentially by invoking rank-nullity theorem after knowing the fact that \\(null\\ \\pi = U\\) (because \\(u+U\\) is an affine subset that has not been shifted (think about a line moving along itself\u0026hellip; it doesn\u0026rsquo;t move)) Then, and I\u0026rsquo;m not quite sure why, we defined \\(\\widetilde{T}: V / null\\ T \\to W\\), for some \\(T: V\\to W\\), defined as \\(\\widetilde{T}(v+null\\ T) = Tv\\). We show that the map is Linear, injective, its range is \\(range\\ T\\), and so it forms an isomorphism between \\(V / null\\ T\\) and \\(range\\ T\\). Here\u0026rsquo;s something: products and quotients, the intuition\nNew Definitions Product of Vector Spaces operations on Product of Vector Spaces product summation map \\(\\Gamma\\) sum of vector and subspace parallel + affine subset quotient space operations on the quotient space quotient map \\(\\widetilde{T}\\) Results and Their Proofs Product of Vector Spaces is a vector space dimension of the Product of Vector Spaces is the sum of the spaces\u0026rsquo; dimension Results relating to \\(\\Gamma\\) \\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\Gamma\\) is injective \\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\) results relating to affine subsets and quotient spaces two affine subsets parallel to \\(U\\) are either equal or disjoint quotient space operations behave uniformly on equivalent affine subsets quotient space is a vector space: bleh just prove it yourself. additive identity is \\(0+U\\) and additive inverse is \\(-v + U\\). dimension of a quotient space is the difference between dimensions of its constituents results relating to \\(\\widetilde{T}\\) \\(\\widetilde{T}\\) is well defined properties of \\(\\widetilde{T}\\) it is linear it is injective its range is the range of \\(range\\ T\\) it is an isomorphism between \\(V / null\\ T\\) and \\(range\\ T\\) Questions for Jana what\u0026rsquo;s the point of learning about \\(\\widetilde{T}\\)? how are Product of Vector Spaces and quotient space opposites of each other?: products and quotients, the intuition Interesting Factoids Happy Lunar New Year! Also, let\u0026rsquo;s hope this is not a trend:\n","html":"\u003cp\u003eNo idea why this is so long!!!\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cp\u003eFirehose of a chapter.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eWe first began an unrelated exploration in \u003ca href=\"/posts/kbhproduct_of_vector_spaces/\"\u003eProduct of Vector Space\u003c/a\u003es (\u0026ldquo;tuples\u0026rdquo;):\n\u003cul\u003e\n\u003cli\u003ewe show that the \u003ca href=\"/posts/kbhproduct_of_vector_spaces/\"\u003eProduct of Vector Space\u003c/a\u003es is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ebecause you can build a list out of zeroing every element except each one on each \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of each element of the tuple sequentially, we learned that the \u003ca href=\"/posts/kbhproduct_of_vector_spaces/#dimension-of-the-id-a45b05c0-3e01-4c27-bc3b-543ff3606c66-product-of-vector-space-s-is-the-sum-of-the-spaces-dimension\"\u003edimension of the Product of Vector Spaces is the sum of the spaces\u0026rsquo; dimension\u003c/a\u003e.\u003c/li\u003e\n\u003cli\u003ewe defined the product-to-sum map \\(\\Gamma\\)\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduct_summation_map/#u-1-plus-dots-plus-u-m-is-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-id-fddf0648-91ea-4c5b-8298-fa0a30637cb7-iff-gamma-is-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003e\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\Gamma\\) is injective\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eand, as a result, \u003ca href=\"/posts/kbhproduct_summation_map/#u-1-plus-dots-plus-u-m-is-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-iff-dim--u-1-plus-dots-plus-u-m--dim-u-1-plus-dots-plus-dim-u-m\"\u003e\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eWe then tackled the fun part of this chapter, which is \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003es, \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eparallel\u003c/a\u003e structures, \u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003es, \u003ca href=\"/posts/kbhquotient_map/\"\u003equotient map\u003c/a\u003e (\u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003eification maps)\n\u003cul\u003e\n\u003cli\u003ewe learned an important and useful result that \u003ca href=\"/posts/kbhparallel_linear_algebra/#two-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-affine-subset-s-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-parallel-to-u-are-either-equal-or-disjoint\"\u003etwo affine subsets parallel to \\(U\\) are either equal or disjoint\u003c/a\u003e (\\(v-w \\in U\\) means \\(v+U = w+U\\) means \\(v+U \\cap w+U \\neq \\emptyset\\), means the first thing)\u003c/li\u003e\n\u003cli\u003ewe defined the \u003ca href=\"/posts/kbhquotient_space/#operations-on-id-53548f85-b3c8-42ce-81e7-9016ed7bd280-quotient-space\"\u003eoperations on quotient space\u003c/a\u003e, and showed that \u003ca href=\"/posts/kbhquotient_space/#id-53548f85-b3c8-42ce-81e7-9016ed7bd280-quotient-space-operations-behave-uniformly-on-equivalent-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-affine-subset-s\"\u003equotient space operations behave uniformly on equivalent affine subsets\u003c/a\u003e. This, and the usual closer proof, demonstrates that \u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003es is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewith the help of the \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003eification map (the \u003ca href=\"/posts/kbhquotient_map/\"\u003equotient map\u003c/a\u003e \\(\\pi\\)), we show that the \u003ca href=\"/posts/kbhquotient_space/#dimension-of-a-quotient-space-is-the-difference-between-dimensions-of-its-constituents\"\u003edimension of a quotient space is the difference between dimensions of its constituents\u003c/a\u003e essentially by invoking \u003ca href=\"/posts/kbhfundamental_theorem_of_linear_maps/\"\u003erank-nullity theorem\u003c/a\u003e after knowing the fact that \\(null\\ \\pi = U\\) (because \\(u+U\\) is an \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e that has not been shifted (think about a line moving along itself\u0026hellip; it doesn\u0026rsquo;t move))\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eThen, and I\u0026rsquo;m not quite sure why, we defined \\(\\widetilde{T}: V / null\\ T \\to W\\), for some \\(T: V\\to W\\), defined as \\(\\widetilde{T}(v+null\\ T) = Tv\\).\n\u003cul\u003e\n\u003cli\u003eWe show that the map is \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear\u003c/a\u003e, \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e, its range is \\(range\\ T\\), and so it forms an \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e between \\(V / null\\ T\\) and \\(range\\ T\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eHere\u0026rsquo;s something: \u003ca href=\"/posts/kbhproducts_and_quotients_the_intuition/\"\u003eproducts and quotients, the intuition\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduct_of_vector_spaces/\"\u003eProduct of Vector Spaces\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduct_of_vector_spaces/#id-9700ea39-282d-48ef-a959-a416eee0d3ec-operation-s-on-id-a45b05c0-3e01-4c27-bc3b-543ff3606c66-product-of-vector-space-s\"\u003eoperations on Product of Vector Spaces\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduct_summation_map/\"\u003eproduct summation map\u003c/a\u003e \\(\\Gamma\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsum_of_vector_and_subspace/\"\u003esum of vector and subspace\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eparallel\u003c/a\u003e + \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhquotient_space/#operations-on-id-53548f85-b3c8-42ce-81e7-9016ed7bd280-quotient-space\"\u003eoperations on the quotient space\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhquotient_map/\"\u003equotient map\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbht_twiddle/\"\u003e\\(\\widetilde{T}\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduct_of_vector_spaces/#id-a45b05c0-3e01-4c27-bc3b-543ff3606c66-product-of-vector-space-s-is-a-id-123d705f-7ede-44bf-882a-04c2f123f7fc-vector-space\"\u003eProduct of Vector Spaces is a vector space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduct_of_vector_spaces/#dimension-of-the-id-a45b05c0-3e01-4c27-bc3b-543ff3606c66-product-of-vector-space-s-is-the-sum-of-the-spaces-dimension\"\u003edimension of the Product of Vector Spaces is the sum of the spaces\u0026rsquo; dimension\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eResults relating to \\(\\Gamma\\)\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduct_summation_map/#u-1-plus-dots-plus-u-m-is-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-id-fddf0648-91ea-4c5b-8298-fa0a30637cb7-iff-gamma-is-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003e\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\Gamma\\) is injective\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduct_summation_map/#u-1-plus-dots-plus-u-m-is-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-iff-dim--u-1-plus-dots-plus-u-m--dim-u-1-plus-dots-plus-dim-u-m\"\u003e\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eresults relating to \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003es and \u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003es\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhparallel_linear_algebra/#two-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-affine-subset-s-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-parallel-to-u-are-either-equal-or-disjoint\"\u003etwo affine subsets parallel to \\(U\\) are either equal or disjoint\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhquotient_space/#id-53548f85-b3c8-42ce-81e7-9016ed7bd280-quotient-space-operations-behave-uniformly-on-equivalent-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-affine-subset-s\"\u003equotient space operations behave uniformly on equivalent affine subsets\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e: bleh just prove it yourself. \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e is \\(0+U\\) and \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e is \\(-v + U\\).\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhquotient_space/#dimension-of-a-quotient-space-is-the-difference-between-dimensions-of-its-constituents\"\u003edimension of a quotient space is the difference between dimensions of its constituents\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eresults relating to \\(\\widetilde{T}\\)\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbht_twiddle/#widetilde-t-is-well-defined\"\u003e\\(\\widetilde{T}\\) is well defined\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbht_twiddle/#properties-of-widetilde-t\"\u003eproperties of \\(\\widetilde{T}\\)\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003eit is linear\u003c/li\u003e\n\u003cli\u003eit is injective\u003c/li\u003e\n\u003cli\u003eits range is the range of \\(range\\ T\\)\u003c/li\u003e\n\u003cli\u003eit is an \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e between \\(V / null\\ T\\) and \\(range\\ T\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewhat\u0026rsquo;s the point of learning about \\(\\widetilde{T}\\)?\u003c/li\u003e\n\u003cli\u003ehow are \u003ca href=\"/posts/kbhproduct_of_vector_spaces/\"\u003eProduct of Vector Space\u003c/a\u003es and \u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e opposites of each other?: \u003ca href=\"/posts/kbhproducts_and_quotients_the_intuition/\"\u003eproducts and quotients, the intuition\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003eHappy Lunar New Year! Also, let\u0026rsquo;s hope this is not a trend:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-01-21_00-33-13_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhaxler_3_e/","tags":null,"title":"Axler 3.E"},{"categories":null,"contents":"Because duality is fun and I\u0026rsquo;m bored and houjun-being-obtuse.\nKey Sequence New Definitions linear functional dual space Results and Their Proofs dimension of dual space is equivalent to the original space Questions for Jana Interesting Factoids Hello from onboard NH107! Or perhaps my next connecting flight, or from China.\n","html":"\u003cp\u003eBecause duality is fun and I\u0026rsquo;m bored and \u003ccode\u003ehoujun-being-obtuse\u003c/code\u003e.\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdual_space/\"\u003edual space\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdual_space/#dimension-of-dual-space-is-equivalent-to-the-original-space\"\u003edimension of dual space is equivalent to the original space\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003eHello from onboard NH107! Or perhaps my next connecting flight, or from China.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_3_f/","tags":null,"title":"Axler 3.F"},{"categories":null,"contents":"EIGENSTUFF and OPERATORS! Invariant subspaces are nice.\nSometimes, if we can break the domain of a linear map down to its eigenvalues, we can understand what its doing on a component-wise level.\nKey Sequence we defined an invariant subspace, and gave a name to 1-D invariant subspaces: the span of eigenvectors we showed some properties of eigenvalues and showed that a list of eigenvectors are linearly independent a correlate of this is that operators on finite dimensional V has at most dim V eigenvalues finally, we defined map restriction operator and quotient operator, and showed that they were well-defined New Definitions invariant subspace conditions for nontrivial invariant subspace eigenvalues + eigenvectors + eigenspace two new operators: map restriction operator and quotient operator Results and Their Proofs properties of eigenvalues list of eigenvectors are linearly independent eigenspaces are disjoint operators on finite dimensional V has at most dim V eigenvalues quotient operator is well-defined Questions for Jana Interesting Factoids \u0026ldquo;eigenvalue\u0026rdquo; is sometimes called the \u0026ldquo;characterizing value\u0026rdquo; of a map\nfinding eigenvalues with actual numbers natural choordinates of a map ","html":"\u003cp\u003eEIGENSTUFF and \u003ca href=\"/posts/kbhoperator/\"\u003eOPERATOR\u003c/a\u003eS! Invariant subspaces are nice.\u003c/p\u003e\n\u003cp\u003eSometimes, if we can break the domain of a linear map down to its eigenvalues, we can understand what its doing on a component-wise level.\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe defined an \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e, and gave a name to 1-D \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003es: the span of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003ewe showed some \u003ca href=\"/posts/kbheigenvalue/#properties-of-id-7d742b39-4a4a-4a9d-a55b-07e2030dfdeb-eigenvalue-s\"\u003eproperties of eigenvalues\u003c/a\u003e and showed that a \u003ca href=\"/posts/kbheigenvalue/#list-of-eigenvectors-are-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent\"\u003elist of eigenvectors are linearly independent\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003ea correlate of this is that \u003ca href=\"/posts/kbheigenvalue/#operators-on-finite-dimensional-v-has-at-most-dim-v-id-7d742b39-4a4a-4a9d-a55b-07e2030dfdeb-eigenvalue-s\"\u003eoperators on finite dimensional V has at most dim V eigenvalues\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003efinally, we defined \u003ca href=\"/posts/kbhmap_restriction_operator/\"\u003emap restriction operator\u003c/a\u003e and \u003ca href=\"/posts/kbhquotient_operator/\"\u003equotient operator\u003c/a\u003e, and showed that they were well-defined\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003econditions for \u003ca href=\"/posts/kbhinvariant_subspace/#nontrivial-id-731fad15-1ec3-4619-8532-1290fefd3b89-invariant-subspace\"\u003enontrivial invariant subspace\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es + \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es + \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003etwo new operators: \u003ca href=\"/posts/kbhmap_restriction_operator/\"\u003emap restriction operator\u003c/a\u003e and \u003ca href=\"/posts/kbhquotient_operator/\"\u003equotient operator\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/#properties-of-id-7d742b39-4a4a-4a9d-a55b-07e2030dfdeb-eigenvalue-s\"\u003eproperties of eigenvalues\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/#list-of-eigenvectors-are-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent\"\u003elist of eigenvectors are linearly independent\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/#eigenspaces-are-disjoint\"\u003eeigenspaces are disjoint\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/#operators-on-finite-dimensional-v-has-at-most-dim-v-id-7d742b39-4a4a-4a9d-a55b-07e2030dfdeb-eigenvalue-s\"\u003eoperators on finite dimensional V has at most dim V eigenvalues\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhquotient_operator/#id-84dca125-e64f-48d7-b71e-858ad5c3db6c-quotient-operator-is-well-defined\"\u003equotient operator is well-defined\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cpre tabindex=\"0\"\u003e\u003ccode class=\"language-Doesn't\" data-lang=\"Doesn't\"\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;\u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e\u0026rdquo; is sometimes called the \u0026ldquo;characterizing value\u0026rdquo; of a map\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/#finding-eigenvalues-with-actual-numbers\"\u003efinding eigenvalues with actual numbers\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/#natural-choordinates-of-a-map\"\u003enatural choordinates of a map\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_5_a/","tags":null,"title":"Axler 5.A"},{"categories":null,"contents":"Key Sequence we began the chapter defining \\(T^m\\) (reminding ourselves the usual rules of \\(T^{m+n} = T^{m}T^{n}\\), \\((T^{m})^{n} = T^{mn}\\), and, for invertible maps, \\(T^{-m} = (T^{-1})^{m}\\)) and \\(p(T)\\), wrapping copies of \\(T\\) into coefficients of a polynomial, and from those definitions showed that polynomial of operator is commutative we then used those results + fundamental theorem of algebra to show that operators on complex vector spaces have an eigenvalue that previous, important result in hand, we then dove into upper-triangular matricies specifically, we learned the properties of upper-triangular matrix, that if \\(v_1 \u0026hellip; v_{n}\\) is a basis of \\(V\\) then \\(\\mathcal{M}(T)\\) is upper-triangular if \\(Tv_{j} \\in span(v_1, \u0026hellip; v_{j})\\) for all \\(j \\leq n\\); and, equivalently, \\(T\\) in invariant under the span of \\(v_{j}\\) using that result, we show that every complex operator has an upper-triangular matrix using some neat tricks of algebra, we then establish that operator is only invertible if diagonal of its upper-triangular matrix is nonzero, which seems awfully unmotivated until you learn that\u0026hellip; eigenvalues of a map are the entries of the diagonal of its upper-triangular matrix, and that basically is a direct correlary from the upper-triangular matrix of \\(T-\\lambda I\\) New Definitions \\(T^m\\) \\(p(T)\\) technically also product of polynomials matrix of an operator diagonal of a matrix upper-triangular matrix Results and Their Proofs \\(p(z) \\to p(T)\\) is a linear function polynomial of operator is commutative operators on complex vector spaces have an eigenvalue properties of upper-triangular matrix every complex operator has an upper-triangular matrix operator is only invertible if diagonal of its upper-triangular matrix is nonzero eigenvalues of a map are the entries of the diagonal of its upper-triangular matrix Questions for Jana why define the matrix of an operator again?? just to stress that its square for the second flavor of the proof that every complex operator has an upper-triangular matrix, why is \\(v_1 \u0026hellip; v_{j}\\) a basis of \\(V\\)? Interesting Factoids Its 12:18AM and I read this chapter for 5 hours. I also just got jumpscared by my phone notification. What\u0026rsquo;s happening?\n","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe began the chapter defining \u003ca href=\"/posts/kbhraising_operators_to_powers/\"\u003e\\(T^m\\)\u003c/a\u003e (reminding ourselves the usual rules of \\(T^{m+n} = T^{m}T^{n}\\), \\((T^{m})^{n} = T^{mn}\\), and, for invertible maps, \\(T^{-m} = (T^{-1})^{m}\\)) and \u003ca href=\"/posts/kbhpolynomial_operator/\"\u003e\\(p(T)\\)\u003c/a\u003e, wrapping copies of \\(T\\) into coefficients of a polynomial, and from those definitions showed that \u003ca href=\"/posts/kbhpolynomial_operator/#id-fbaf420a-6345-417b-8016-a976e7b155be-polynomial-of-operator-is-commutative\"\u003epolynomial of operator is commutative\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe then used those results + \u003ca href=\"\"\u003efundamental theorem of algebra\u003c/a\u003e to show that \u003ca href=\"/posts/kbhoperators_on_complex_vector_spaces_have_an_eigenvalue/\"\u003eoperators on complex vector spaces have an eigenvalue\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ethat previous, important result in hand, we then dove into \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matricies\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003especifically, we learned the \u003ca href=\"/posts/kbhupper_triangular_matrix/#properties-of-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix\"\u003eproperties of upper-triangular matrix\u003c/a\u003e, that if \\(v_1 \u0026hellip; v_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\) then \\(\\mathcal{M}(T)\\) is \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e if \\(Tv_{j} \\in span(v_1, \u0026hellip; v_{j})\\) for all \\(j \\leq n\\); and, equivalently, \\(T\\) in \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e under the \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e of \\(v_{j}\\)\u003c/li\u003e\n\u003cli\u003eusing that result, we show that \u003ca href=\"/posts/kbhupper_triangular_matrix/#every-complex-operator-has-an-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix\"\u003eevery complex operator has an upper-triangular matrix\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eusing some neat tricks of algebra, we then establish that \u003ca href=\"/posts/kbhupper_triangular_matrix/#operator-is-only-invertible-if-id-c38ed162-6861-420c-a812-6d25ac539ea9-diagonal-of-its-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix-is-nonzero\"\u003eoperator is only invertible if diagonal of its upper-triangular matrix is nonzero\u003c/a\u003e, which seems awfully unmotivated until you learn that\u0026hellip;\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhupper_triangular_matrix/#eigenvalues-of-a-map-are-the-entries-of-the-id-c38ed162-6861-420c-a812-6d25ac539ea9-diagonal-of-its-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix\"\u003eeigenvalues of a map are the entries of the diagonal of its upper-triangular matrix\u003c/a\u003e, and that basically is a direct correlary from the \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e of \\(T-\\lambda I\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhraising_operators_to_powers/\"\u003e\\(T^m\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolynomial_operator/\"\u003e\\(p(T)\\)\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003etechnically also \u003ca href=\"\"\u003eproduct of polynomials\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e of an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/#diagonal\"\u003ediagonal\u003c/a\u003e of a matrix\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolynomial_operator/#p--z--to-p--t--is-a-linear-id-d782b5f7-29b5-4f70-a058-f15c0162cbef-function\"\u003e\\(p(z) \\to p(T)\\) is a linear function\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolynomial_operator/#id-fbaf420a-6345-417b-8016-a976e7b155be-polynomial-of-operator-is-commutative\"\u003epolynomial of operator is commutative\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoperators_on_complex_vector_spaces_have_an_eigenvalue/\"\u003eoperators on complex vector spaces have an eigenvalue\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhupper_triangular_matrix/#properties-of-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix\"\u003eproperties of upper-triangular matrix\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhupper_triangular_matrix/#every-complex-operator-has-an-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix\"\u003eevery complex operator has an upper-triangular matrix\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhupper_triangular_matrix/#operator-is-only-invertible-if-id-c38ed162-6861-420c-a812-6d25ac539ea9-diagonal-of-its-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix-is-nonzero\"\u003eoperator is only invertible if diagonal of its upper-triangular matrix is nonzero\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhupper_triangular_matrix/#eigenvalues-of-a-map-are-the-entries-of-the-id-c38ed162-6861-420c-a812-6d25ac539ea9-diagonal-of-its-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix\"\u003eeigenvalues of a map are the entries of the diagonal of its upper-triangular matrix\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cdel\u003ewhy define the \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e of an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e again??\u003c/del\u003e just to stress that its square\u003c/li\u003e\n\u003cli\u003efor the second flavor of the proof that \u003ca href=\"/posts/kbhupper_triangular_matrix/#every-complex-operator-has-an-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix\"\u003eevery complex operator has an upper-triangular matrix\u003c/a\u003e, why is \\(v_1 \u0026hellip; v_{j}\\) a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\)?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003eIts 12:18AM and I read this chapter for 5 hours. I also just got jumpscared by my phone notification. What\u0026rsquo;s happening?\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_5_b/","tags":null,"title":"Axler 5.B"},{"categories":null,"contents":"Key Sequence we defined an eigenspace, which is the space of all eigenvalues of a distinct eigenvector, and show that they form a direct sum to the whole space and, as a correlate to how direct sums are kind of like disjoint sets, we have the perhaps expected result of dimension, that the sum of the eigenspace\u0026rsquo; dimensions must be smaller than or equal than that of \\(V\\) we defined a Diagonal Matrix, which by its structure + calculation can be shown to require that it is formed by a basis of eigenvalues and from there, and the properties of eigenspaces above, we deduce some conditions equal to diagonalizability a direct correlary of the last point (perhaps more straightforwardly intuited by just lining eigenvalue up diagonally in a matrix) is that enough eigenvalues implies diagonalizability New Definitions diagonal matrix properties of diagonal matrices eigenspace Results and Their Proofs eigenspaces are a direct sum dimension of sum of eigenspaces is smaller than or equal to the dimension of the whole space conditions equal to diagonalizability enough eigenvalues implies diagonalizability Questions for Jana for diagonalizability, shouldn\u0026rsquo;t \\(n\\) be \\(m\\) on item 3? Interesting Factoids Short!\n","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe defined an \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e, which is the space of all \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es of a distinct \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e, and show that\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/#eigenspaces-are-disjoint\"\u003ethey form a direct sum\u003c/a\u003e to the whole space\u003c/li\u003e\n\u003cli\u003eand, as a correlate to how \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003es are kind of like disjoint sets, we have the \u003ca href=\"/posts/kbheigenspace/#dimension-of-sum-of-eigenspaces-is-smaller-than-or-equal-to-the-dimension-of-the-whole-space\"\u003eperhaps expected result of dimension\u003c/a\u003e, that the sum of the \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e\u0026rsquo; dimensions must be smaller than or equal than that of \\(V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ewe defined a \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003eDiagonal Matrix\u003c/a\u003e, which by its structure + calculation can be shown to require that it is formed by a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es\n\u003cul\u003e\n\u003cli\u003eand from there, and the properties of \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003es above, we deduce \u003ca href=\"/posts/kbhdiagonal_matrix/#properties-of-diagonal-matrices\"\u003esome conditions equal to diagonalizability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ea direct correlary of the last point (perhaps more straightforwardly intuited by just lining \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e up diagonally in a matrix) is that \u003ca href=\"/posts/kbhdiagonal_matrix/#enough-eigenvalues-implies-diagonalizability\"\u003eenough eigenvalues implies diagonalizability\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003ediagonal matrix\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiagonal_matrix/#properties-of-diagonal-matrices\"\u003eproperties of diagonal matrices\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/#eigenspaces-are-disjoint\"\u003eeigenspaces are a direct sum\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenspace/#dimension-of-sum-of-eigenspaces-is-smaller-than-or-equal-to-the-dimension-of-the-whole-space\"\u003edimension of sum of eigenspaces is smaller than or equal to the dimension of the whole space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiagonal_matrix/#properties-of-diagonal-matrices\"\u003econditions equal to diagonalizability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiagonal_matrix/#enough-eigenvalues-implies-diagonalizability\"\u003eenough eigenvalues implies diagonalizability\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003efor \u003ca href=\"/posts/kbhdiagonal_matrix/#properties-of-diagonal-matrices\"\u003ediagonalizability\u003c/a\u003e, shouldn\u0026rsquo;t \\(n\\) be \\(m\\) on item 3?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003eShort!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_5_c/","tags":null,"title":"Axler 5.C"},{"categories":null,"contents":"Hear ye, hear ye! Length and angles are a thing now!!\nKey Sequence We remembered how dot products work, then proceeded to generalize them into inner products\u0026mdash;this is needed because complex numbers don\u0026rsquo;t behave well when squared so we need to add in special guardrails We then learned that dot products is just an instantiation of Euclidean Inner Product, which itself is simply one of many inner products. A vector space that has a well-defined inner product is now called an Inner Product Space Along with revisiting our definition of dot products to include complexes, we changed our definition of norm to be in terms of inner products \\(\\sqrt{\\langle v,v \\rangle}\\) to help support complex vector spaces better; we then also redefined orthogonality and showed a few results regarding them Then, we did a bunch of analysis-y work to understand some properties of norms and inner products: Pythagorean Theorem, Cauchy-Schwartz Inequality, triangle inequality and parallelogram equality. New Definitions dot product and the more generally important\u0026hellip; inner product!, Euclidean Inner Product, Inner Product Space norm orthogonal and now, a cornucopia of analysis Pythagorean Theorem Cauchy-Schwartz Inequality triangle inequality parallelogram equality Results and Their Proofs properties of the dot product properties of inner product properties of the norm orthogonality and \\(0\\) Questions for Jana How much of the analysis-y proof work do we have to remember for the analysis-y results? Interesting Factoids ","html":"\u003cp\u003eHear ye, hear ye! Length and angles are a thing now!!\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWe remembered how \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003es work, then proceeded to generalize them into \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003es\u0026mdash;this is needed because \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003es don\u0026rsquo;t behave well when squared so we need to add in special guardrails\u003c/li\u003e\n\u003cli\u003eWe then learned that \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003es is just an instantiation of \u003ca href=\"/posts/kbhinner_product/#euclidean-inner-product\"\u003eEuclidean Inner Product\u003c/a\u003e, which itself is simply one of many \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003es. A \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e that has a well-defined \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e is now called an \u003ca href=\"/posts/kbhinner_product/#inner-product-space\"\u003eInner Product Space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eAlong with revisiting our definition of \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003es to include complexes, we changed our definition of \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e to be in terms of \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003es \\(\\sqrt{\\langle v,v \\rangle}\\) to help support \u003ca href=\"/posts/kbhvector_space/#vector-space-over-fields\"\u003ecomplex vector space\u003c/a\u003es better; we then also redefined \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonality\u003c/a\u003e and showed a few results regarding them\u003c/li\u003e\n\u003cli\u003eThen, we did a bunch of analysis-y work to understand some properties of norms and \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003es: \u003ca href=\"/posts/kbhcornucopia_of_analysis/#pythagorean-theorem\"\u003ePythagorean Theorem\u003c/a\u003e, \u003ca href=\"/posts/kbhcornucopia_of_analysis/#cauchy-schwartz-inequality\"\u003eCauchy-Schwartz Inequality\u003c/a\u003e, \u003ca href=\"/posts/kbhcornucopia_of_analysis/#triangle-inequality--vectors\"\u003etriangle inequality\u003c/a\u003e and \u003ca href=\"/posts/kbhcornucopia_of_analysis/#parallelogram-equality\"\u003eparallelogram equality\u003c/a\u003e.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e and the more generally important\u0026hellip;\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e!, \u003ca href=\"/posts/kbhinner_product/#euclidean-inner-product\"\u003eEuclidean Inner Product\u003c/a\u003e, \u003ca href=\"/posts/kbhinner_product/#inner-product-space\"\u003eInner Product Space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eand now, a cornucopia of analysis\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcornucopia_of_analysis/#pythagorean-theorem\"\u003ePythagorean Theorem\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcornucopia_of_analysis/#cauchy-schwartz-inequality\"\u003eCauchy-Schwartz Inequality\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcornucopia_of_analysis/#triangle-inequality--vectors\"\u003etriangle inequality\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcornucopia_of_analysis/#parallelogram-equality\"\u003eparallelogram equality\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdot_product/#properties-of-the-dot-product\"\u003eproperties of the dot product\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinner_product/#properties-of-inner-product\"\u003eproperties of inner product\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnorm/#properties-of-the-norm\"\u003eproperties of the norm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhorthogonal/#orthogonality-and-0\"\u003eorthogonality and \\(0\\)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eHow much of the analysis-y proof work do we have to remember for the analysis-y results?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_6_a/","tags":null,"title":"Axler 6.A"},{"categories":null,"contents":"OMG its Gram-Schmidtting\nKey Sequence we defined lists of vectors that all have norm 1 and are all orthogonal to each other as orthonormal; we showed orthonormal list is linearly independent by hijacking pythagoras of course, once we have a finitely long linearly independent thing we must be able to build a basis. The nice thing about such an orthonormal basis is that for every vector we know precisely what its coefficients have to be! Specifically, \\(a_{j} = \\langle v, e_{j} \\rangle\\). That\u0026rsquo;s cool. What we really want, though, is to be able to get an orthonormal basis from a regular basis, which we can do via Gram-Schmidt. In fact, this gives us some useful correlaries regarding the existance of orthonormal basis (just Gram-Schmidt a normal one), or extending a orthonormal list to a basis, etc. There are also important implications (still along the veins of \u0026ldquo;just Gram-Schmidt it!\u0026rdquo;) for upper-traingular matricies as well We also learned, as a result of orthonormal basis, any finite-dimensional linear functional (Linear Maps to scalars) can be represented as an inner product via the Riesz Representation Theorem, which is honestly kinda epic. New Definitions orthonormal + orthonormal basis Gram-Schmidt (i.e. orthonormalization) linear functional and Riesz Representation Theorem Results and Their Proofs Norm of an Orthogonal Linear Combination An orthonormal list is linearly independent An orthonormal list of the right length is a basis Writing a vector as a linear combination of orthonormal basis Corollaries of Gram-Schmidt Every Inner Product Space has an orthonormal basis Orthonormal list extended to orthonormal basis Orthonormal upper-triangular matrix basis exists if normal upper-triangular exists Schur\u0026rsquo;s Theorem Riesz Representation Theorem Questions for Jana Interesting Factoids ","html":"\u003cp\u003eOMG its \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidtting\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe defined lists of vectors that all have \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e 1 and are all \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e to each other as \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e; we showed \u003ca href=\"/posts/kbhorthonormal/#orthonormal-list-is-linearly-independent\"\u003eorthonormal list is linearly independent\u003c/a\u003e by hijacking \u003ca href=\"/posts/kbhcornucopia_of_analysis/#pythagorean-theorem\"\u003epythagoras\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eof course, once we have a finitely long \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e thing we must be able to \u003ca href=\"/posts/kbhorthonormal_basis/#orthonormal-list-of-the-right-length-is-a-basis\"\u003ebuild a basis\u003c/a\u003e. The nice thing about such an \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e is that for every vector \u003ca href=\"/posts/kbhorthonormal_basis/#writing-a-vector-as-a-linear-combination-of-orthonormal-basis\"\u003ewe know precisely what its coefficients have to be\u003c/a\u003e! Specifically, \\(a_{j} = \\langle v, e_{j} \\rangle\\). That\u0026rsquo;s cool.\u003c/li\u003e\n\u003cli\u003eWhat we really want, though, is to be able to get an \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e from a regular \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, which we can do via \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003e. In fact, this gives us some useful correlaries regarding the existance of \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e (just \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003e a normal one), or extending a \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e list to a basis, etc. There are also important implications (still along the veins of \u0026ldquo;just \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003e it!\u0026rdquo;) for \u003ca href=\"/posts/kbhgram_schmidt/#schur-s-theorem\"\u003eupper-traingular matricies\u003c/a\u003e as well\u003c/li\u003e\n\u003cli\u003eWe also learned, as a result of \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e, any finite-dimensional \u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003e (\u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es to scalars) can be represented as an \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e via the \u003ca href=\"/posts/kbhlinear_functional/#riesz-representation-theorem\"\u003eRiesz Representation Theorem\u003c/a\u003e, which is honestly kinda epic.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e + \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003e (i.e. \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eorthonormalization\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003e and \u003ca href=\"/posts/kbhlinear_functional/#riesz-representation-theorem\"\u003eRiesz Representation Theorem\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhorthonormal/#norm-of-an-orthogonal-linear-combination\"\u003eNorm of an Orthogonal Linear Combination\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhorthonormal/#orthonormal-list-is-linearly-independent\"\u003eAn orthonormal list is linearly independent\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhorthonormal_basis/#orthonormal-list-of-the-right-length-is-a-basis\"\u003eAn orthonormal list of the right length is a basis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhorthonormal_basis/#writing-a-vector-as-a-linear-combination-of-orthonormal-basis\"\u003eWriting a vector as a linear combination of orthonormal basis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eCorollaries of \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgram_schmidt/#every-id-4a788e29-a3e9-4c13-8c97-08746878966e-inner-product-space-has-an-id-2a1eecb2-f23a-469f-a860-b561a9197906-orthonormal-basis\"\u003eEvery Inner Product Space has an orthonormal basis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgram_schmidt/#orthonormal-list-extended-to-id-2a1eecb2-f23a-469f-a860-b561a9197906-orthonormal-basis\"\u003eOrthonormal list extended to orthonormal basis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgram_schmidt/#orthonormal-upper-triangular-matrix-basis-exists-if-normal-upper-triangular-exists\"\u003eOrthonormal upper-triangular matrix basis exists if normal upper-triangular exists\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgram_schmidt/#schur-s-theorem\"\u003eSchur\u0026rsquo;s Theorem\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_functional/#riesz-representation-theorem\"\u003eRiesz Representation Theorem\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_6_b/","tags":null,"title":"Axler 6.B"},{"categories":null,"contents":"This is not actually like a proper review of a chapter, instead, it is an opinionated review of what I think Jana thinks Axler thinks is important about 7.A.\nNote that all of the \u0026ldquo;proofy things\u0026rdquo; in this section are poofy because problems with putting trips prior to the end of the year.\nHere\u0026rsquo;s an outline:\nWe defined the adjoint We learned some properties of the adjoint; importantly, that \\((A+B)^{*} = A^{*} + B^{*}\\), \\((AB)^{*} = B^{*} A^{*}\\), \\((\\lambda T)^{*} = \\bar{\\lambda}T^{*}\\); a correlary is that \\(M^{*}M\\) is self-adjoint We defined normal, self-adjoint, and unitary With those definitions, we showed that eigenvalues of self-adjoint matricies are real Then, we created two mildly interesting intermediate results Over \\(\\mathbb{C}\\), \\(Tv\\) is orthogonal to all \\(v\\) IFF \\(T\\) is the zero matrix Over \\(\\mathbb{R}\\), \\(Tv\\) is orthogonal to all \\(v\\) and \\(T\\) is self-adjoint, then \\(T\\) is the zero matrix The latter of which shows that Eigenvectors of \\(T\\) corresponding to distinct eigenvalues are orthogonal if \\(T \\in \\mathcal{L}(V)\\) is normal This, all, builds up to the result of the Complex Spectral Theorem, which you should know adjoint Suppose \\(T \\in \\mathcal{L}(V,W)\\), we define the adjoint as a \\(T^{*} \\in \\mathcal{L}(W,V)\\) that:\n\\begin{equation} \\langle Tv,w \\rangle = \\langle v, T^{*}w \\rangle \\end{equation}\nthe usual verifications could be made (setting \\(w = w_1+w_2\\), then applying inner product additivity, etc.) to show that \\(T^{*}\\) is a linear map.\nproperties of the adjoint \\((S+T)^{*} = S^{*} + T^{*}\\) \\((\\lambda T)^{*} = \\bar{\\lambda}T^{*}\\) \\((T^{*})^{*} = T\\) \\(I^{*} = I\\) \u0026ldquo;identity is self adjoint\u0026rdquo; and, importantly, \\((ST)^{*} = T^{*} S^{*}\\) All of these results are findable by chonking through the expressions for inner product properties.\nadjoint is the conjugate transpose For complex-valued matricies, and the Euclidean Inner Product, the adjoint is the conjugate transpose of the matrix.\nTo show this ham-fistedly, first convince yourself that the property of \\((ST)^{*} = T^{*} S^{*}\\) holds for the act of conjugate transpose of matricies as well. Now, we will show how we can get to the adjoint definition from that result:\nConsider:\n\\begin{equation} \\langle v,w \\rangle \\end{equation}\nwe can represent this as the product of two \u0026ldquo;matricies\u0026rdquo;: an \\(n \\times 1\\) \u0026ldquo;matrix\u0026rdquo; named \\(v\\), and a \\(n \\times 1\\) \u0026ldquo;matrix\u0026rdquo; named \\(w\\); specifically:\n\\begin{equation} \\langle v,w \\rangle = v^{*} w \\end{equation}\nwhere \\(v^{*}\\) is the conjugate-transpose of \\(v\\) (the dagger is nonstandard notation, to distinguish from \\(*\\) the adjoint defined above which we haven\u0026rsquo;t shown yet). This is by definition of how matricies multiply and how the Euclidean Inner Product works.\nSo then consider the same for the above:\n\\begin{equation} \\langle Tv,w \\rangle = (Tv)^{*}w = v^{*} T^{*}w = \\langle v, T^{*}w \\rangle \\end{equation}\nAxler gives an arguably better proof involving representing the matricies w.r.t. the orthonomal bases, and then showing that the inner products just swapped slots:\nBuncha matrix adjectives And given we now know what the adjoint is, we can make some definitions:\nself-adjoint \\begin{equation} A = A^{*} \\end{equation}\nwow.\nNamely:\n\\begin{equation} \\langle Tv, w \\rangle = \\langle v, T^{*}w \\rangle = \\langle v, Tw \\rangle \\end{equation}\nnormal \\begin{equation} A A^{*} = A^{*} A \\end{equation}\nAs in, if the operator commutes with its own adjoint.\nunitary \\begin{equation} A^{*} = A^{-1} \\end{equation}\nor, that \\(A\\) has orthonormal columns: an unitary operator is invertible, and the inverse of its matrix representation is its transpose\nEigenvalues of self-adjoint matricies are real So, if we have:\n\\begin{equation} Tv = \\lambda v \\end{equation}\nwe want to show that \\(\\lambda\\) is real. To do this, we can show that \\(\\lambda = \\bar{\\lambda}\\) which would mean the \\(b\\) component is \\(0\\).\nNow, recall that self-adjoint means \\(\\langle Tv,w \\rangle = \\langle v, Tw \\rangle\\).\nConstruct now: \\(\\lambda \\|v\\|^{2}\\) \u0026mdash;\n\\begin{equation} \\lambda \\|v\\|^{2} = \\lambda \\langle v,v \\rangle = \\langle \\lambda v,v \\rangle = \\langle Tv, v \\rangle = \\langle v, Tv \\rangle = \\langle v, \\lambda v \\rangle = \\bar{\\lambda} \\langle v,v \\rangle = \\bar{\\lambda} \\|v\\|^{2} \\end{equation}\nSo we have \\(\\lambda \\|v\\|^{2} = \\bar{\\lambda} \\|v\\|^{2}\\), which means \\(\\lambda = \\bar{\\lambda}\\), as desired.\nTwo less important intermediate results \u0026hellip;that we just trust Axler\u0026rsquo;s word + our intuition for:\n7.14: Over \\(\\mathbb{C}\\), \\(Tv\\) is orthogonal to all \\(v\\) IFF \\(T\\) is the zero matrix 7.16: Over \\(\\mathbb{R}\\), \\(Tv\\) is orthogonal to all \\(v\\) and \\(T\\) is self-adjoint, then \\(T\\) is the zero matrix Why is it different? ASK JANA IDK\nEigenvectors of \\(T\\) corresponding to distinct eigenvalues are orthogonal if \\(T \\in \\mathcal{L}(V)\\) is normal Prove depended on the minor results from before\nand then voodo whitchcraft.\nComplex Spectral Theorem On \\(\\mathbb{C}\\), and with \\(T \\in \\mathcal{L}(V)\\), the following statements are equivalent:\n\\(T\\) is normal \\(T\\) has an orthonormal basis of eigenvectors and so \\(T\\) is diagonalizable w.r.t. that orthonormal basis of eigenvectors This proof depends on Schur\u0026rsquo;s Theorem.\nThe real number version requires that \\(T\\) is self-adjoint.\nThings to ask jana why is 7.14 and 7.16 different ","html":"\u003cp\u003eThis is not actually like a proper review of a chapter, instead, it is an \u003cstrong\u003eopinionated review\u003c/strong\u003e of what I think Jana thinks Axler thinks is important about 7.A.\u003c/p\u003e\n\u003cp\u003eNote that all of the \u0026ldquo;proofy things\u0026rdquo; in this section are poofy because problems with putting trips prior to the end of the year.\u003c/p\u003e\n\u003cp\u003eHere\u0026rsquo;s an outline:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eWe defined the \u003ca href=\"#adjoint\"\u003eadjoint\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003eWe learned some \u003ca href=\"#properties-of-the-adjoint\"\u003eproperties of the adjoint\u003c/a\u003e; importantly, that \\((A+B)^{*} = A^{*} + B^{*}\\), \\((AB)^{*} = B^{*} A^{*}\\), \\((\\lambda T)^{*} = \\bar{\\lambda}T^{*}\\); a correlary is that \\(M^{*}M\\) is \u003ca href=\"#self-adjoint\"\u003eself-adjoint\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eWe defined \u003ca href=\"#buncha-matrix-adjectives\"\u003enormal, self-adjoint, and unitary\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eWith those definitions, we showed that \u003ca href=\"#eigenvalues-of-self-adjoint--org71f113d--matricies-are-real\"\u003eeigenvalues of self-adjoint matricies are real\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eThen, we created two mildly interesting intermediate results\n\u003cul\u003e\n\u003cli\u003eOver \\(\\mathbb{C}\\), \\(Tv\\) is orthogonal to all \\(v\\) IFF \\(T\\) is the zero matrix\u003c/li\u003e\n\u003cli\u003eOver \\(\\mathbb{R}\\), \\(Tv\\) is orthogonal to all \\(v\\) \u003cstrong\u003eand \\(T\\) is self-adjoint\u003c/strong\u003e, then \\(T\\) is the zero matrix\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eThe latter of which shows that \u003ca href=\"#eigenvectors-of-t-corresponding-to-distinct-eigenvalues-are-orthogonal-if-t-in-mathcal-l--v--is-normal\"\u003eEigenvectors of \\(T\\) corresponding to distinct eigenvalues are orthogonal if \\(T \\in \\mathcal{L}(V)\\) is normal\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eThis, all, builds up to the result of the \u003cstrong\u003e\u003ca href=\"#complex-spectral-theorem\"\u003eComplex Spectral Theorem\u003c/a\u003e\u003c/strong\u003e, which you should know\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"adjoint\"\u003eadjoint\u003c/h2\u003e\n\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V,W)\\), we define the \u003cstrong\u003eadjoint\u003c/strong\u003e as a \\(T^{*} \\in \\mathcal{L}(W,V)\\) that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle Tv,w \\rangle = \\langle v, T^{*}w \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe usual verifications could be made (setting \\(w = w_1+w_2\\), then applying \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e additivity, etc.) to show that \\(T^{*}\\) is a linear map.\u003c/p\u003e\n\u003ch3 id=\"properties-of-the-adjoint\"\u003eproperties of the adjoint\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\\((S+T)^{*} = S^{*} + T^{*}\\)\u003c/li\u003e\n\u003cli\u003e\\((\\lambda T)^{*} = \\bar{\\lambda}T^{*}\\)\u003c/li\u003e\n\u003cli\u003e\\((T^{*})^{*} = T\\)\u003c/li\u003e\n\u003cli\u003e\\(I^{*} = I\\) \u0026ldquo;identity is self adjoint\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eand, importantly, \\((ST)^{*} = T^{*} S^{*}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAll of these results are findable by chonking through the expressions for inner product properties.\u003c/p\u003e\n\u003ch3 id=\"adjoint-is-the-conjugate-transpose\"\u003eadjoint is the conjugate transpose\u003c/h3\u003e\n\u003cp\u003eFor complex-valued \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e, and the \u003ca href=\"/posts/kbhinner_product/#euclidean-inner-product\"\u003eEuclidean Inner Product\u003c/a\u003e, the adjoint is the conjugate transpose of the matrix.\u003c/p\u003e\n\u003cp\u003eTo show this ham-fistedly, first convince yourself that the property of \\((ST)^{*} = T^{*} S^{*}\\) holds for the act of conjugate transpose of matricies as well. Now, we will show how we can get to the adjoint definition from that result:\u003c/p\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle v,w \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can represent this as the product of two \u0026ldquo;matricies\u0026rdquo;: an \\(n \\times 1\\) \u0026ldquo;matrix\u0026rdquo; named \\(v\\), and a \\(n \\times 1\\) \u0026ldquo;matrix\u0026rdquo; named \\(w\\); specifically:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle v,w \\rangle = v^{*} w\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(v^{*}\\) is the conjugate-transpose of \\(v\\) (the dagger is nonstandard notation, to distinguish from \\(*\\) the adjoint defined above which we haven\u0026rsquo;t shown yet). This is by definition of how matricies multiply and how the \u003ca href=\"/posts/kbhinner_product/#euclidean-inner-product\"\u003eEuclidean Inner Product\u003c/a\u003e works.\u003c/p\u003e\n\u003cp\u003eSo then consider the same for the above:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle Tv,w \\rangle = (Tv)^{*}w = v^{*} T^{*}w = \\langle v, T^{*}w \\rangle\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAxler gives an arguably better proof involving representing the matricies w.r.t. the orthonomal bases, and then showing that the inner products just swapped slots:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-05-10_20-53-51_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"buncha-matrix-adjectives\"\u003eBuncha matrix adjectives\u003c/h2\u003e\n\u003cp\u003eAnd given we now know what the \u003ca href=\"#adjoint\"\u003eadjoint\u003c/a\u003e is, we can make some definitions:\u003c/p\u003e\n\u003ch3 id=\"self-adjoint\"\u003eself-adjoint\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nA = A^{*}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewow.\u003c/p\u003e\n\u003cp\u003eNamely:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle Tv, w \\rangle = \\langle v, T^{*}w \\rangle = \\langle v, Tw \\rangle\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"normal\"\u003enormal\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nA A^{*} = A^{*} A\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAs in, if the operator commutes with its own adjoint.\u003c/p\u003e\n\u003ch3 id=\"unitary\"\u003eunitary\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nA^{*} = A^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eor, that \\(A\\) has orthonormal columns: \u003ca href=\"/posts/kbhnus_math530_matrix_adjectives/#an-unitary-operator-is-invertible-and-the-inverse-of-its-matrix-representation-is-its-transpose\"\u003ean unitary operator is invertible, and the inverse of its matrix representation is its transpose\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"eigenvalues-of-self-adjoint--org71f113d--matricies-are-real\"\u003eEigenvalues of \u003ca href=\"#self-adjoint\"\u003eself-adjoint\u003c/a\u003e matricies are real\u003c/h2\u003e\n\u003cp\u003eSo, if we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv = \\lambda v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe want to show that \\(\\lambda\\) is real. To do this, we can show that \\(\\lambda = \\bar{\\lambda}\\) which would mean the \\(b\\) component is \\(0\\).\u003c/p\u003e\n\u003cp\u003eNow, recall that \u003ca href=\"#self-adjoint\"\u003eself-adjoint\u003c/a\u003e means \\(\\langle Tv,w \\rangle = \\langle v, Tw \\rangle\\).\u003c/p\u003e\n\u003cp\u003eConstruct now: \\(\\lambda \\|v\\|^{2}\\) \u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda \\|v\\|^{2} = \\lambda \\langle v,v \\rangle = \\langle \\lambda v,v \\rangle = \\langle Tv, v \\rangle = \\langle v, Tv \\rangle = \\langle v, \\lambda v \\rangle = \\bar{\\lambda} \\langle v,v \\rangle = \\bar{\\lambda} \\|v\\|^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo we have \\(\\lambda \\|v\\|^{2} = \\bar{\\lambda} \\|v\\|^{2}\\), which means \\(\\lambda = \\bar{\\lambda}\\), as desired.\u003c/p\u003e\n\u003ch2 id=\"two-less-important-intermediate-results\"\u003eTwo less important intermediate results\u003c/h2\u003e\n\u003cp\u003e\u0026hellip;that we just trust Axler\u0026rsquo;s word + our intuition for:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e7.14: Over \\(\\mathbb{C}\\), \\(Tv\\) is orthogonal to all \\(v\\) IFF \\(T\\) is the zero matrix\u003c/li\u003e\n\u003cli\u003e7.16: Over \\(\\mathbb{R}\\), \\(Tv\\) is orthogonal to all \\(v\\) \u003cstrong\u003eand \\(T\\) is self-adjoint\u003c/strong\u003e, then \\(T\\) is the zero matrix\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWhy is it different? \u003cstrong\u003e\u003cstrong\u003eASK JANA IDK\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"eigenvectors-of-t-corresponding-to-distinct-eigenvalues-are-orthogonal-if-t-in-mathcal-l--v--is-normal\"\u003eEigenvectors of \\(T\\) corresponding to distinct eigenvalues are orthogonal if \\(T \\in \\mathcal{L}(V)\\) is normal\u003c/h2\u003e\n\u003cp\u003eProve depended on the minor results from before\u003c/p\u003e\n\u003cp\u003eand then voodo whitchcraft.\u003c/p\u003e\n\u003ch2 id=\"complex-spectral-theorem\"\u003eComplex Spectral Theorem\u003c/h2\u003e\n\u003cp\u003eOn \\(\\mathbb{C}\\), and with \\(T \\in \\mathcal{L}(V)\\), the following statements are equivalent:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\\(T\\) is normal\u003c/li\u003e\n\u003cli\u003e\\(T\\) has an orthonormal basis of eigenvectors\u003c/li\u003e\n\u003cli\u003eand so \\(T\\) is diagonalizable w.r.t. that orthonormal \u003cstrong\u003ebasis of eigenvectors\u003c/strong\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis proof depends on \u003ca href=\"/posts/kbhgram_schmidt/#schur-s-theorem\"\u003eSchur\u0026rsquo;s Theorem\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThe real number version requires that \\(T\\) is \u003ca href=\"#self-adjoint\"\u003eself-adjoint\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"things-to-ask-jana\"\u003eThings to ask jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewhy is 7.14 and 7.16 different\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhaxler_7_a/","tags":null,"title":"Axler 7.A"},{"categories":null,"contents":"AAAAA I want a good backpack.\nrequirements\nexplicit laptop compartment (whether intentional or not; water bladder component that fits a laptop is fine) earbags (those fannypack things on the side of the bottom belt); needs to be large (i.e. enough to fit an iphone 5) raincover at least 3 compartments, ideally one with a pen holder and key ring, and the outermost being very accessible (think mesh bag) basically I want an exact replica of the columbia silver ridge 30L from 2012 which they don\u0026rsquo;t sell anymore; the new one breaks 4) slightly and is also $150 and I got mine for ilke $60-70 (it was like 300-350 rmb) max in 2012\nhttps://www.amazon.com/FENGDONG-Waterproof-Lightweight-Outdoor-Backpack/dp/B07T9KJ87Y/ref=sr_1_8?keywords=Ergonomic%2BBackpack%2Bwith%2BWaist%2BBelt\u0026amp;qid=1681186885\u0026amp;sr=8-8\u0026amp;th=1 https://www.thenorthface.com/en-us/bags-and-gear/technical-packs/hike-c224525/basin-36-backpack-pNF0A52CX?color=KX7 https://www.patagonia.com/product/altvia-pack-36-liters/195699592506.html or just https://www.ebay.com/itm/314416731511?hash=item4934b29577:g:~qEAAOSwxPFkGep5\u0026amp;amdata=enc%3AAQAHAAAA8KUu6vJvKh7jVrgZg0FOdwH29YdndgW0Z%2FMxdEOH15jv1RGQN6DetZ4Lt8NqSwRFCH3wCCJe6cBmKvSysTNf2QUIYAv%2FKaRUGkeKX4XTkMwQRtAP67yRMdZJEu3GRcuLgjv7QRmPS6BmydT1NUBi4ULiHVupVFYMv9ei%2BJK2JGjf53DJJ7JqFjftWdQ%2F6mfoEdjsmZ5LZbMKP0XFCtyt6OojQKE63GcQt16lXGdfjlZuGpHJ6znv%2BhAHiHyV9FlguEi1v3hHSn1zx9qmfzriXvh1vbqlguRc2uiNVz3dJ52A7WYzvvWVkHZ1Vo8SekjByQ%3D%3D%7Ctkp%3ABk9SR97_n-ntYQ ","html":"\u003cp\u003eAAAAA I want a good backpack.\u003c/p\u003e\n\u003cp\u003erequirements\u003c/p\u003e\n\u003chr\u003e\n\u003col\u003e\n\u003cli\u003eexplicit laptop compartment (whether intentional or not; water bladder component that fits a laptop is fine)\u003c/li\u003e\n\u003cli\u003eearbags (those fannypack things on the side of the bottom belt); needs to be large (i.e. enough to fit an iphone 5)\u003c/li\u003e\n\u003cli\u003eraincover\u003c/li\u003e\n\u003cli\u003eat least 3 compartments, ideally one with a pen holder and key ring, and the outermost being very accessible (think mesh bag)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ebasically I want an exact replica of the columbia silver ridge 30L from 2012 which they don\u0026rsquo;t sell anymore; the new one breaks 4) slightly and is also $150 and I got mine for ilke $60-70 (it was like 300-350 rmb) max in 2012\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"https://www.amazon.com/FENGDONG-Waterproof-Lightweight-Outdoor-Backpack/dp/B07T9KJ87Y/ref=sr_1_8?keywords=Ergonomic%2BBackpack%2Bwith%2BWaist%2BBelt\u0026amp;qid=1681186885\u0026amp;sr=8-8\u0026amp;th=1\"\u003ehttps://www.amazon.com/FENGDONG-Waterproof-Lightweight-Outdoor-Backpack/dp/B07T9KJ87Y/ref=sr_1_8?keywords=Ergonomic%2BBackpack%2Bwith%2BWaist%2BBelt\u0026amp;qid=1681186885\u0026amp;sr=8-8\u0026amp;th=1\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.thenorthface.com/en-us/bags-and-gear/technical-packs/hike-c224525/basin-36-backpack-pNF0A52CX?color=KX7\"\u003ehttps://www.thenorthface.com/en-us/bags-and-gear/technical-packs/hike-c224525/basin-36-backpack-pNF0A52CX?color=KX7\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.patagonia.com/product/altvia-pack-36-liters/195699592506.html\"\u003ehttps://www.patagonia.com/product/altvia-pack-36-liters/195699592506.html\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eor just \u003ca href=\"https://www.ebay.com/itm/314416731511?hash=item4934b29577:g:~qEAAOSwxPFkGep5\u0026amp;amdata=enc%3AAQAHAAAA8KUu6vJvKh7jVrgZg0FOdwH29YdndgW0Z%2FMxdEOH15jv1RGQN6DetZ4Lt8NqSwRFCH3wCCJe6cBmKvSysTNf2QUIYAv%2FKaRUGkeKX4XTkMwQRtAP67yRMdZJEu3GRcuLgjv7QRmPS6BmydT1NUBi4ULiHVupVFYMv9ei%2BJK2JGjf53DJJ7JqFjftWdQ%2F6mfoEdjsmZ5LZbMKP0XFCtyt6OojQKE63GcQt16lXGdfjlZuGpHJ6znv%2BhAHiHyV9FlguEi1v3hHSn1zx9qmfzriXvh1vbqlguRc2uiNVz3dJ52A7WYzvvWVkHZ1Vo8SekjByQ%3D%3D%7Ctkp%3ABk9SR97_n-ntYQ\"\u003ehttps://www.ebay.com/itm/314416731511?hash=item4934b29577:g:~qEAAOSwxPFkGep5\u0026amp;amdata=enc%3AAQAHAAAA8KUu6vJvKh7jVrgZg0FOdwH29YdndgW0Z%2FMxdEOH15jv1RGQN6DetZ4Lt8NqSwRFCH3wCCJe6cBmKvSysTNf2QUIYAv%2FKaRUGkeKX4XTkMwQRtAP67yRMdZJEu3GRcuLgjv7QRmPS6BmydT1NUBi4ULiHVupVFYMv9ei%2BJK2JGjf53DJJ7JqFjftWdQ%2F6mfoEdjsmZ5LZbMKP0XFCtyt6OojQKE63GcQt16lXGdfjlZuGpHJ6znv%2BhAHiHyV9FlguEi1v3hHSn1zx9qmfzriXvh1vbqlguRc2uiNVz3dJ52A7WYzvvWVkHZ1Vo8SekjByQ%3D%3D%7Ctkp%3ABk9SR97_n-ntYQ\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbackpacks/","tags":null,"title":"Backpacks"},{"categories":null,"contents":"we need to keep two sequences aligned; so in addition to minimum edit distance we need to know how to transform one sequence into another.\nTo do this, we keep a pointer of what cell we came from.\nThis is similar to edit distance with DP, but we keep a pointer of each cell of the action: point DOWN (less j) if inserting, point LEFT (less i) if deleting, and point diagonally if substituting.\nFor two strings, let\u0026rsquo;s define:\n\\(X\\) of length \\(n\\) \\(Y\\) of length \\(m\\) we define some \\(D(i,j)\\) as the edit distance between substring \\(X[1:i]\\) and \\(Y[1:j]\\).\nLet:\n\\(D(i,0) = i\\) \\(D(0,j) = j\\) for i in range(1,M): for j in range(1,N): # deletion: ignoring one char of previous string d1 = D(i-1,j) + 1 # (cost) # insertion: insertion into string before using rest of j d2 = D(i,j-1) + 1 # (cost) # keep same if char is same or substitute current d3 = D(i-1,j-1) + (0 if X[i] == Y[j] else 2) # cache D(i,j) = min(d1, d2, d3) ","html":"\u003cp\u003ewe need to keep two sequences aligned; so in addition to \u003ca href=\"/posts/kbhminimum_edit_distance/\"\u003eminimum edit distance\u003c/a\u003e we need to know how to transform one sequence into another.\u003c/p\u003e\n\u003cp\u003eTo do this, we keep a pointer of what cell we came from.\u003c/p\u003e\n\u003cp\u003eThis is similar to \u003ca href=\"/posts/kbhedit_distance_with_dp/\"\u003eedit distance with DP\u003c/a\u003e, but we keep a pointer of each cell of the action: point DOWN (less j) if inserting, point LEFT (less i) if deleting, and point diagonally if substituting.\u003c/p\u003e\n\u003cp\u003eFor two strings, let\u0026rsquo;s define:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X\\) of length \\(n\\)\u003c/li\u003e\n\u003cli\u003e\\(Y\\) of length \\(m\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewe define some \\(D(i,j)\\) as the edit distance between substring \\(X[1:i]\\) and \\(Y[1:j]\\).\u003c/p\u003e\n\u003cp\u003eLet:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(D(i,0) = i\\)\u003c/li\u003e\n\u003cli\u003e\\(D(0,j) = j\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eM\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# deletion: ignoring one char of previous string\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# (cost)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# insertion: insertion into string before using rest of j\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# (cost)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# keep same if char is same or substitute current\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# cache\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emin\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003chr\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbacktracing/","tags":null,"title":"backtracing"},{"categories":null,"contents":"Bag of Words is an order-free representation of a corpus. Specifically, each word has a count which we assign to each word without any other information about ordering, etc.\nWith the Bayes Theorem, we have:\n\\begin{equation} C_{MAP} = \\arg\\max_{c \\in C} P(d|c)P( c) \\end{equation}\nwhere \\(d\\) is the document, and \\(c\\) is the class.\nSo, given a document is a set of a bunch of words:\n\\begin{equation} C_{MAP} = \\arg\\max_{c\\in C} P(x_1, \\dots, x_{n}|c)P( c) \\end{equation}\nNaive Bayes for Text Classification Assumptions of Bag of Words for Naive Bayes\n\\(P( c)\\) The right side is just relative frequencies (count of freq divided by count of class).\n\\(P(x_1, \u0026hellip;, x_{n})\\) Naive Bayes assumption (each word\u0026rsquo;s position doesn\u0026rsquo;t matter) Bag of Words assumption (assume position doesn\u0026rsquo;t matter) So we have:\n\\begin{equation} C_{NB} = \\arg\\max_{c\\in C} P(c_{j}) \\prod_{x\\in X} P(x|c) \\end{equation}\nWe eventually use logs to prevent underflow:\n\\begin{equation} C_{NB} = \\arg\\max_{c\\in C}\\log P(c_{j}) +\\sum_{x\\in X} \\log P(x|c) \\end{equation}\nParameter Estimation Because we don\u0026rsquo;t know new words completely decimating probability, we want to establish a Beta Distribution prior which smoothes the outcomes, meaning:\n\\begin{equation} P(w_{k}|c_{j}) = \\frac{n_{k} + \\alpha }{n + \\alpha |V|} \\end{equation}\nwhere \\(n_{k}\\) is the number of occurrences of word \\(k\\) in class \\(C\\), and \\(n\\) is the number of words in total that occurs in class \\(C\\).\nUnknown Words We ignore them. Because knowing a class has lots of unknown words don\u0026rsquo;t help.\nBinary Naive Bayes There is another version, which simply clip all the word count \\(n_{k}\\) to \\(1\\) for both train and test. You do this by de-duplicating the entire corpus by DOCUMENT (i.e. if a word appears twice in the same document, we count it only once).\nBenefits Doesn\u0026rsquo;t have significant fragmentation problems (i.e. many important features clotting up decision) Robust to irrelevant features (which cancel each other out) ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbag_of_words/\"\u003eBag of Words\u003c/a\u003e is an order-free representation of a \u003ca href=\"/posts/kbhcorpus/\"\u003ecorpus\u003c/a\u003e. Specifically, each word has a count which we assign to each word without any other information about ordering, etc.\u003c/p\u003e\n\u003cp\u003eWith the \u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes Theorem\u003c/a\u003e, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC_{MAP} = \\arg\\max_{c \\in C} P(d|c)P( c)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(d\\) is the document, and \\(c\\) is the class.\u003c/p\u003e\n\u003cp\u003eSo, given a document is a set of a bunch of words:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC_{MAP} = \\arg\\max_{c\\in C} P(x_1, \\dots, x_{n}|c)P( c)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"naive-bayes-for-text-classification\"\u003eNaive Bayes for Text Classification\u003c/h2\u003e\n\u003cp\u003eAssumptions of \u003ca href=\"/posts/kbhbag_of_words/\"\u003eBag of Words\u003c/a\u003e for \u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"p--c\"\u003e\\(P( c)\\)\u003c/h3\u003e\n\u003cp\u003eThe right side is just relative frequencies (count of freq divided by count of class).\u003c/p\u003e\n\u003ch3 id=\"p--x-1-dot-dot-dot-x-n\"\u003e\\(P(x_1, \u0026hellip;, x_{n})\\)\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnaive_bayes/#id-6cdf74a2-2451-47d1-8a62-62aa6dff62c6-naive-bayes-assumption\"\u003eNaive Bayes assumption\u003c/a\u003e (each word\u0026rsquo;s position doesn\u0026rsquo;t matter)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbag_of_words/\"\u003eBag of Words\u003c/a\u003e assumption (assume position doesn\u0026rsquo;t matter)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSo we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC_{NB} = \\arg\\max_{c\\in C} P(c_{j}) \\prod_{x\\in X} P(x|c)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe eventually use logs to prevent underflow:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC_{NB} = \\arg\\max_{c\\in C}\\log P(c_{j}) +\\sum_{x\\in X} \\log P(x|c)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"parameter-estimation\"\u003eParameter Estimation\u003c/h3\u003e\n\u003cp\u003eBecause we don\u0026rsquo;t know new words completely decimating probability, we want to establish a \u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e prior which smoothes the outcomes, meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(w_{k}|c_{j}) = \\frac{n_{k} + \\alpha }{n + \\alpha |V|}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(n_{k}\\) is the number of occurrences of word \\(k\\) in class \\(C\\), and \\(n\\) is the number of words in total that occurs in class \\(C\\).\u003c/p\u003e\n\u003ch3 id=\"unknown-words\"\u003eUnknown Words\u003c/h3\u003e\n\u003cp\u003eWe ignore them. Because knowing a class has lots of unknown words don\u0026rsquo;t help.\u003c/p\u003e\n\u003ch3 id=\"binary-naive-bayes\"\u003eBinary Naive Bayes\u003c/h3\u003e\n\u003cp\u003eThere is another version, which simply clip all the word count \\(n_{k}\\) to \\(1\\) for both train and test. You do this by de-duplicating the entire corpus by \u003cstrong\u003eDOCUMENT\u003c/strong\u003e (i.e. if a word appears twice in the same document, we count it only once).\u003c/p\u003e\n\u003ch3 id=\"benefits\"\u003eBenefits\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eDoesn\u0026rsquo;t have significant \u003cstrong\u003efragmentation\u003c/strong\u003e problems (i.e. many important features clotting up decision)\u003c/li\u003e\n\u003cli\u003eRobust to irrelevant features (which cancel each other out)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbag_of_words/","tags":null,"title":"Bag of Words"},{"categories":null,"contents":"DOI: 10.3389/fnagi.2021.635945\nOne-Liner extracted lexicographic and syntactical features from ADReSS Challenge data and trained it on various models, with BERT performing the best.\nNovelty ???????\nSeems like results here are a strict subset of Zhu 2021. Same sets of dataprep of Antonsson 2021 but trained on a BERT now. Seem to do worse than Antonsson 2021 too.\nNotable Methods Essentially Antonsson 2021\nAlso performed MMSE score regression. Key Figs Table 7 training result This figure shows us that the results attained by training on extracted feature is past the state-of-the-art at the time.\nTable 4 These tables tells us the feature extracted\n","html":"\u003cp\u003eDOI: 10.3389/fnagi.2021.635945\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eextracted lexicographic and syntactical features from \u003ca href=\"/posts/kbhadress_challenge/\"\u003eADReSS Challenge\u003c/a\u003e data and trained it on various models, with BERT performing the best.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003e???????\u003c/p\u003e\n\u003cp\u003eSeems like results here are a strict subset of \u003ca href=\"/posts/kbhzhu_2021/\"\u003eZhu 2021\u003c/a\u003e. Same sets of dataprep of \u003ca href=\"/posts/kbhantonsson_2021/\"\u003eAntonsson 2021\u003c/a\u003e but trained on a BERT now. Seem to do worse than \u003ca href=\"/posts/kbhantonsson_2021/\"\u003eAntonsson 2021\u003c/a\u003e too.\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cp\u003eEssentially \u003ca href=\"/posts/kbhantonsson_2021/\"\u003eAntonsson 2021\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eAlso performed \u003ca href=\"/posts/kbhmmse/\"\u003eMMSE\u003c/a\u003e score regression.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"table-7-training-result\"\u003eTable 7 training result\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_11-47-38_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure shows us that the results attained by training on extracted feature is past the state-of-the-art at the time.\u003c/p\u003e\n\u003ch3 id=\"table-4\"\u003eTable 4\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_11-48-55_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_11-48-59_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThese tables tells us the feature extracted\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbalagopalan_2021/","tags":["ntj"],"title":"Balagopalan 2021"},{"categories":null,"contents":"A basis is a list of vectors in \\(V\\) that spans \\(V\\) and is linearly independent\nconstituents a LIST! of vectors in vector space \\(V\\) requirements the list is\u0026hellip; linear independent spans \\(V\\) additional information criteria for basis A list \\(v_1, \\dots v_{n}\\) of vectors in \\(V\\) is a basis of \\(V\\) IFF every \\(v \\in V\\) can be written uniquely as:\n\\begin{equation} v = a_1v_1+ \\dots + a_{n}v_{n} \\end{equation}\nwhere \\(a_1, \\dots, a_{n} \\in \\mathbb{F}\\).\nforward direction Suppose we have \\(v_1, \\dots, v_{n}\\) as the basis in \\(V\\). We desire that \\(v_1, \\dots v_{n}\\) uniquely constructs each \\(v \\in V\\).\nBy definition, they span \\(V\\) and are linear independent in \\(V\\).\nBecause of the spanning quality, there exists at least one set of \\(a_1, \\dots, a_{n} \\in \\mathbb{F}\\) such that we can write:\n\\begin{equation} v \\in V = a_1v_1+ \\dots + a_{n}v_{n} \\end{equation}\nSuppose now that we have another representation of \\(v\\) via scalars \\(c_1, \\dots, c_{n}\\) and our same list of vectors:\n\\begin{equation} v \\in V =^{?} c_1v_1+ \\dots + c_{n}v_{n} \\end{equation}\nSubtracting the two expressions, we have that:\n\\begin{equation} 0 = (a_1-c_1)v_1 + \\dots +(a_{n}-c_{n}) v_{n} \\end{equation}\nBy definition that \\(v_1 \\dots v_{n}\\) is linearly independent, we have that \\(a_j-c_j=0 \\implies a_{j}=c_{j}\\). Therefore, there is only one unique representation for \\(v\\) as a linear combination of vectors \\(v_1, \\dots v_{n}\\).\n(to be honest, we could have just applied that as the definition of linear independence that the scalars in a linear combo of linearly independent list is unique but this is the more careful definition.)\nbackward direction Suppose we have a list \\(v_1, \\dots v_{n}\\) which uniquely constructs each \\(v \\in V\\). We desire that \\(v_1, \\dots v_{n}\\) is a basis in \\(V\\). Given a linear combination thereof can construct all \\(v \\in V\\), we can say that \\(v_1, \\dots v_{n}\\) spans \\(V\\).\nAs \\(V\\) is a vector space, we have \\(0 \\in V\\). Therefore, there exists some scalars \\(a_1, \\dots a_{n}\\) for which:\n\\begin{equation} 0 = a_1v_1 + \\dots +a_{n}v_{n} \\end{equation}\n(as we already established \\(v_1, \\dots, v_{n}\\) spans \\(V\\) and \\(0 \\in V\\))\nOf course, we are given that \\(v_1, \\dots v_{n}\\) uniquely constructs each \\(v \\in V\\). As the trivial solution does exist: that \\(a_1 = \\dots = a_{n} = 0\\), it is the only solution.\nBy definition of linear independence, then, \\(v_1, \\dots v_{n}\\) is linearly independent. Having constructed that \\(v_1, \\dots v_{n}\\) is both a spanning set in \\(V\\) and are linearly independent, we have that they are a basis of \\(V\\). \\(\\blacksquare\\)\nDualing Basis Construction These are two results that says: \u0026ldquo;you can build up a linearly independent list to a basis or you can pluck away a spanning list to a basis\u0026rdquo;.\nall spanning lists contains a basis of which you are spanning Every spanning list in \\(V\\) contains the basis (and possibly some more) in \\(V\\).\nRead: \u0026ldquo;apply Linear Dependence Lemma your way to success\u0026rdquo;.\nBegin with a spanning list \\(v_1, \\dots v_{m}\\) of \\(V\\). We run a for loop for the list.\nStep 0:\nIf \\(v_1=0\\) (i.e. \\(v_1 \\in span(\\{\\})\\)), delete \\(v_1\\). Otherwise, do nothing.\nStep \\(j\\):\nIf \\(v_{j}\\) is in \\(span(v_1, \\dots v_{j-1})\\), \\(v_{j}\\) satisfies the Linear Dependence Lemma\u0026rsquo;s first condition, and therefore naturally satisfies the second condition (removal from list keeps the same span because \\(v_{j}\\) can just be rewritten from \\(v_1, \\dots v_{j-1}\\)).\nSo we remove \\(v_{j}\\) if it is indeed in the span of the previous vectors. By the Linear Dependence Lemma, the new list spans the same space the old list.\nConclusion\nBy the end of this process, no vectors left in the list will satisfy the Linear Dependence Lemma (read: we got rid of all of them.) Therefore, the list is linearly independent. However, every step of the way the Linear Dependence Lemma ensures that the new list spans the same space; therefore, the new list still spans \\(V\\). Having constructed a linearly independent list that spans \\(V\\), we declare the new list as a basis of \\(V\\).\nAs all we did was pluck vectors out of the old list, the new list is a sublist of the old list. This means that the spanning list (old list) contains the new list, which is a basis. \\(\\blacksquare\\)\na linearly independent list expends to a basis Every linearly independent list of vectors in finite-dimensional vector spaces can be extended to a basis.\nRecall first that every finite-dimensional vector space has a basis.\nLet\u0026rsquo;s begin with a linearly independent list in \\(V\\) \\(u_1, \\dots u_{m}\\). Let\u0026rsquo;s recruit also a basis of \\(V\\): \\(w_{1}, \\dots w_{m}\\).\nNaturally: \\(u_1, \\dots u_{m}, w_1, \\dots w_{m}\\) spans \\(V\\) (as the \\(w\\) vectors already span \\(V\\)). We will now apply the fact that all spanning lists contains a basis of which you are spanning (the order of \\(u\\) vectors first and \\(w\\) vectors second ensuring that you try to remove the \\(w\\), and, as \\(u\\) are linearly independent, none of them will be removed) to get back a basis in \\(V\\) consisting of all \\(u\\) and some \\(w\\). \\(\\blacksquare\\)\n","html":"\u003cp\u003eA basis is a list of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es in \\(V\\) that \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\) and is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ea LIST! of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es in \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e \\(V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ethe list is\u0026hellip;\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independent\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"criteria-for-basis--kbhbasis-dot-md\"\u003ecriteria for \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eA list \\(v_1, \\dots v_{n}\\) of vectors in \\(V\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\) \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e every \\(v \\in V\\) can be written uniquely as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = a_1v_1+ \\dots + a_{n}v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(a_1, \\dots, a_{n} \\in \\mathbb{F}\\).\u003c/p\u003e\n\u003ch4 id=\"forward-direction\"\u003eforward direction\u003c/h4\u003e\n\u003cp\u003eSuppose we have \\(v_1, \\dots, v_{n}\\) as the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in \\(V\\). We desire that \\(v_1, \\dots v_{n}\\) uniquely constructs each \\(v \\in V\\).\u003c/p\u003e\n\u003cp\u003eBy definition, they \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e \\(V\\) and are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independent\u003c/a\u003e in \\(V\\).\u003c/p\u003e\n\u003cp\u003eBecause of the \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e quality, there exists \u003cem\u003eat least\u003c/em\u003e one set of \\(a_1, \\dots, a_{n} \\in \\mathbb{F}\\) such that we can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv \\in V = a_1v_1+ \\dots + a_{n}v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSuppose now that we have another representation of \\(v\\) via scalars \\(c_1, \\dots, c_{n}\\) and our same list of vectors:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv \\in V =^{?} c_1v_1+ \\dots + c_{n}v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSubtracting the two expressions, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = (a_1-c_1)v_1 + \\dots +(a_{n}-c_{n}) v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy definition that \\(v_1 \\dots v_{n}\\) is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e, we have that \\(a_j-c_j=0 \\implies a_{j}=c_{j}\\). Therefore, there is only one unique representation for \\(v\\) as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of vectors \\(v_1, \\dots v_{n}\\).\u003c/p\u003e\n\u003cp\u003e(to be honest, we could have just applied that as the definition of \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e that the scalars in a linear combo of linearly independent list is unique but this is the more careful definition.)\u003c/p\u003e\n\u003ch4 id=\"backward-direction\"\u003ebackward direction\u003c/h4\u003e\n\u003cp\u003eSuppose we have a list \\(v_1, \\dots v_{n}\\) which uniquely constructs each \\(v \\in V\\). We desire that \\(v_1, \\dots v_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in \\(V\\). Given a linear combination thereof can construct all \\(v \\in V\\), we can say that \\(v_1, \\dots v_{n}\\) \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\).\u003c/p\u003e\n\u003cp\u003eAs \\(V\\) is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e, we have \\(0 \\in V\\). Therefore, there exists some scalars \\(a_1, \\dots a_{n}\\) for which:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = a_1v_1 + \\dots +a_{n}v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(as we already established \\(v_1, \\dots, v_{n}\\) \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\) and \\(0 \\in V\\))\u003c/p\u003e\n\u003cp\u003eOf course, we are given that \\(v_1, \\dots v_{n}\\) uniquely constructs each \\(v \\in V\\). As the trivial solution \u003cem\u003edoes\u003c/em\u003e exist: that \\(a_1 = \\dots = a_{n} = 0\\), it is the only solution.\u003c/p\u003e\n\u003cp\u003eBy definition of \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e, then, \\(v_1, \\dots v_{n}\\) is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e. Having constructed that \\(v_1, \\dots v_{n}\\) is both a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e set in \\(V\\) and are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e, we have that they are a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\). \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"dualing-basis-construction\"\u003eDualing Basis Construction\u003c/h3\u003e\n\u003cp\u003eThese are two results that says: \u0026ldquo;you can build up a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list to a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e or you can pluck away a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list to a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e\u0026rdquo;.\u003c/p\u003e\n\u003ch4 id=\"all-spanning--kbhspan-dot-md--lists-contains-a-basis--kbhbasis-dot-md--of-which-you-are-spanning--kbhspan-dot-md\"\u003eall \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e lists contains a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of which you are \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eEvery \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list in \\(V\\) contains the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e (and possibly some more) in \\(V\\).\u003c/p\u003e\n\u003cp\u003eRead: \u0026ldquo;apply \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e your way to success\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eBegin with a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list \\(v_1, \\dots v_{m}\\) of \\(V\\). We run a for loop for the list.\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eStep 0:\u003c/p\u003e\n\u003cp\u003eIf \\(v_1=0\\) (i.e. \\(v_1 \\in span(\\{\\})\\)), delete \\(v_1\\). Otherwise, do nothing.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eStep \\(j\\):\u003c/p\u003e\n\u003cp\u003eIf \\(v_{j}\\) is in \\(span(v_1, \\dots v_{j-1})\\), \\(v_{j}\\) satisfies the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e\u0026rsquo;s first condition, and therefore naturally satisfies the second condition (removal from list keeps the same \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e because \\(v_{j}\\) can just be rewritten from \\(v_1, \\dots v_{j-1}\\)).\u003c/p\u003e\n\u003cp\u003eSo we remove \\(v_{j}\\) if it is indeed in the span of the previous vectors. By the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e, the new list \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e the same space the old list.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eConclusion\u003c/p\u003e\n\u003cp\u003eBy the end of this process, no vectors left in the list will satisfy the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e (read: we got rid of all of them.) Therefore, the list is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e. However, every step of the way the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e ensures that the new list \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e the same space; therefore, the new list still \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\). Having constructed a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list that \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\), we declare the new list as a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\).\u003c/p\u003e\n\u003cp\u003eAs all we did was pluck vectors out of the old list, the new list is a sublist of the old list. This means that the \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list (old list) contains the new list, which is a basis. \\(\\blacksquare\\)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"a-linearly-independent--kbhlinear-independence-dot-md--list-expends-to-a-basis--kbhbasis-dot-md\"\u003ea \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list expends to a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eEvery \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es in \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003es can be extended to a basis.\u003c/p\u003e\n\u003cp\u003eRecall first that \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/#every-id-4ed27ed5-4edc-4ef4-afd7-9b8e3bcd9b96-finite-dimensional-vector-space-has-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003eevery finite-dimensional vector space has a basis\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s begin with a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(V\\) \\(u_1, \\dots u_{m}\\). Let\u0026rsquo;s recruit also a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\): \\(w_{1}, \\dots w_{m}\\).\u003c/p\u003e\n\u003cp\u003eNaturally: \\(u_1, \\dots u_{m}, w_1, \\dots w_{m}\\) \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\) (as the \\(w\\) vectors already \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e \\(V\\)). We will now apply the fact that \u003ca href=\"#all-spanning--kbhspan-dot-md--lists-contains-a-basis--kbhbasis-dot-md--of-which-you-are-spanning--kbhspan-dot-md\"\u003eall spanning lists contains a basis of which you are spanning\u003c/a\u003e (the order of \\(u\\) vectors first and \\(w\\) vectors second ensuring that you try to remove the \\(w\\), and, as \\(u\\) are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e, none of them will be removed) to get back a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in \\(V\\) consisting of all \\(u\\) and some \\(w\\). \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbasis/","tags":null,"title":"basis"},{"categories":null,"contents":"Suppose \\(v_1, \\dots v_{n} \\in V\\) is a basis of some vector space \\(V\\); \\(w_1, \\dots w_{n} \\in W\\) is just a good\u0026rsquo;ol list of length \\(n= \\dim V\\) in \\(W\\).\nThere exists a unique linear map \\(T \\in \\mathcal{L}(V,W)\\) such that\u0026hellip;\n\\begin{equation} Tv_{j} = w_{j} \\end{equation}\nfor each \\(j = 1, \\dots n\\)\nIntuition The layperson\u0026rsquo;s explanation of this result: 1) that, for everywhere you want to take the basis of one space, there\u0026rsquo;s always a unique linear map to take you there. 2) that, a linear map is determined uniquely by what it does to the basis of its domain.\nProof We have two vector spaces, \\(V\\) and \\(W\\); \\(v_1, \\dots v_{n} \\in V\\) forms a basis of \\(V\\); \\(w_1, \\dots w_{n} \\in W\\) are just some vectors in \\(W\\).\nDefinition We define some \\(T: V \\to W\\) as follows:\n\\begin{equation} T(c_1v_1 + \\dots + c_{n}v_{n}) = c_1 w_1 + \\dots + c_{n} w_{n} \\end{equation}\nwhere, \\(c_1, \\dots c_{n} \\in \\mathbb{F}\\). Note that the actual values of \\(c\\) doesn\u0026rsquo;t actually matter here.\nExistence We now show that the \\(T\\) defined above has the property of mapping \\(Tv_{j} \\to w_{j}\\).\nAs the basis \\(v_1, \\dots v_{n}\\) is a spanning list of \\(V\\), some \\(T\\) that takes an arbitrary linear combination of \\(v\\) as input does indeed have domain \\(V\\). Due to addition\u0026rsquo;s closure, a linear combination of \\(w\\) is \\(\\in W\\). This makes \\(T\\) at least a function from \\(V \\to W\\).\nOf course, by taking all \\(c_{i}\\) to \\(0\\) except for the index \\(c_{j}\\) you are interested in to \\(1\\), you can show that this \\(T\\) takes \\(v_{j}\\) to \\(w_{j}\\).\nWe now show that \\(T\\) is a Linear Map. This part proof is just route algebra so I won\u0026rsquo;t type it again.\nUniqueness Suppose there is a Linear Map that has the desired property: that \\(T \\in \\mathcal{L}(V,W)\\) and that \\(Tv_{j}=w_{j}, \\forall j=1, \\dots n\\). For any scalar \\(c_{j}\\), the homogeneity of \\(T\\) indicates that this same \\(T\\) has to take \\(T(c_{j}v_{j}) = c_{j}Tv_{j} = c_{j}w_{j}\\).\nNow, the additivity of \\(T\\) also indicates that we can string these \\(c_{j} v_{j}\\) together in the same \\(T\\); that:\ngiven \\(T(c_{j}v_{j}) = c_{j}w_{j}\\), we can just string it all together to get \\(T(c_1v_1 + \\dots + c_{n}v_{n}) = c_1w_1+ \\dots + c_{n}w_{n}\\).\nThis means that there is only one \\(T\\) that behaves in the way that we desire, on the span of \\(v_1 \\dots v_{n}\\). Those vectors being the basis, their span is just the domain \\(V\\). This makes \\(T\\) uniquely determined on \\(V\\) as we were able to construct the original given map simply by following the rules of the Linear Map.\n","html":"\u003cp\u003eSuppose \\(v_1, \\dots v_{n} \\in V\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of some \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e \\(V\\); \\(w_1, \\dots w_{n} \\in W\\) is just a good\u0026rsquo;ol list of length \\(n= \\dim V\\) in \\(W\\).\u003c/p\u003e\n\u003cp\u003eThere exists a unique linear map \\(T \\in \\mathcal{L}(V,W)\\) such that\u0026hellip;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv_{j} = w_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor each \\(j = 1, \\dots n\\)\u003c/p\u003e\n\u003ch2 id=\"intuition\"\u003eIntuition\u003c/h2\u003e\n\u003cp\u003eThe layperson\u0026rsquo;s explanation of this result: 1) that, for everywhere you want to take the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of one space, there\u0026rsquo;s always a unique linear map to take you there. 2) that, a linear map is determined uniquely by what it does to the \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of its domain\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"proof\"\u003eProof\u003c/h2\u003e\n\u003cp\u003eWe have two vector spaces, \\(V\\) and \\(W\\); \\(v_1, \\dots v_{n} \\in V\\) forms a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\); \\(w_1, \\dots w_{n} \\in W\\) are just some \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es in \\(W\\).\u003c/p\u003e\n\u003ch3 id=\"definition\"\u003eDefinition\u003c/h3\u003e\n\u003cp\u003eWe define some \\(T: V \\to W\\) as follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(c_1v_1 + \\dots + c_{n}v_{n}) = c_1 w_1 + \\dots + c_{n} w_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(c_1, \\dots c_{n} \\in \\mathbb{F}\\). Note that the actual \u003cem\u003evalues\u003c/em\u003e of \\(c\\) doesn\u0026rsquo;t actually matter here.\u003c/p\u003e\n\u003ch3 id=\"existence\"\u003eExistence\u003c/h3\u003e\n\u003cp\u003eWe now show that the \\(T\\) defined above has the property of mapping \\(Tv_{j} \\to w_{j}\\).\u003c/p\u003e\n\u003cp\u003eAs the basis \\(v_1, \\dots v_{n}\\) is a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list of \\(V\\), some \\(T\\) that takes an arbitrary \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \\(v\\) as input does indeed have domain \\(V\\). Due to \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e\u0026rsquo;s closure, a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \\(w\\) is \\(\\in W\\). This makes \\(T\\) at least a function from \\(V \\to W\\).\u003c/p\u003e\n\u003cp\u003eOf course, by taking all \\(c_{i}\\) to \\(0\\) except for the index \\(c_{j}\\) you are interested in to \\(1\\), you can show that this \\(T\\) takes \\(v_{j}\\) to \\(w_{j}\\).\u003c/p\u003e\n\u003cp\u003eWe now show that \\(T\\) is a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e. This part proof is just route algebra so I won\u0026rsquo;t type it again.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-11-02_22-26-02_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"uniqueness\"\u003eUniqueness\u003c/h3\u003e\n\u003cp\u003eSuppose there is a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e that has the desired property: that \\(T \\in \\mathcal{L}(V,W)\\) and that \\(Tv_{j}=w_{j}, \\forall j=1, \\dots n\\). For any scalar \\(c_{j}\\), the homogeneity of \\(T\\) indicates that this same \\(T\\) has to take \\(T(c_{j}v_{j}) = c_{j}Tv_{j} = c_{j}w_{j}\\).\u003c/p\u003e\n\u003cp\u003eNow, the additivity of \\(T\\) also indicates that we can string these \\(c_{j} v_{j}\\) together in the same \\(T\\); that:\u003c/p\u003e\n\u003cp\u003egiven \\(T(c_{j}v_{j}) = c_{j}w_{j}\\), we can just string it all together to get \\(T(c_1v_1 + \\dots + c_{n}v_{n}) = c_1w_1+ \\dots + c_{n}w_{n}\\).\u003c/p\u003e\n\u003cp\u003eThis means that there is only one \\(T\\) that behaves in the way that we desire, on the span of \\(v_1 \\dots v_{n}\\). Those vectors being the basis, their span is just the domain \\(V\\). This makes \\(T\\) uniquely determined on \\(V\\) as we were able to construct the original given map simply by following the rules of the \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbasis_of_domain/","tags":null,"title":"basis of domain"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhbatchalign/","tags":null,"title":"batchalign"},{"categories":null,"contents":"Things to include Rev How to handle interspersed results Utterance segmentation Why --prealigned and the overall performance of MFA Beginning/End Bullet and why we throw away Rev\u0026rsquo;s output fixbullets and manual utterance segmentation \u0026amp;*INV= interspersed comments ","html":"\u003ch2 id=\"things-to-include\"\u003eThings to include\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eRev\u003c/li\u003e\n\u003cli\u003eHow to handle interspersed results\u003c/li\u003e\n\u003cli\u003eUtterance segmentation\u003c/li\u003e\n\u003cli\u003eWhy \u003ccode\u003e--prealigned\u003c/code\u003e and the overall performance of MFA\u003c/li\u003e\n\u003cli\u003eBeginning/End Bullet and why we throw away Rev\u0026rsquo;s output\u003c/li\u003e\n\u003cli\u003efixbullets and manual utterance segmentation\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e\u0026amp;*INV=\u003c/code\u003e interspersed comments\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbatchalign_paper_outline/","tags":null,"title":"Batchalign Paper Outline"},{"categories":null,"contents":"For some Baysian Network situation, you will note that there\u0026rsquo;s some bodge of values below:\n\\begin{equation} P(A|M) = \\frac{P(M|A)P(A)}{P(M)} \\end{equation}\nif we are only interested in a function in terms of different values of \\(a\\), \\(P(M)\\) is not that interesting. Therefore, we can just calculate \\(A\\) for all \\(a\\), and then normalize it to sum to 1:\n\\begin{equation} P(A|M) \\propto P(M|A)P(A) \\end{equation}\nand then, after calculating each \\(P(M|A)P(A)\\) , we just ensure that each thing sums to one.\n","html":"\u003cp\u003eFor some \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e situation, you will note that there\u0026rsquo;s some bodge of values below:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(A|M) = \\frac{P(M|A)P(A)}{P(M)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif we are only interested in a function in terms of different values of \\(a\\), \\(P(M)\\) is not that interesting. Therefore, we can just calculate \\(A\\) for all \\(a\\), and then normalize it to sum to 1:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(A|M) \\propto P(M|A)P(A)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand then, after calculating each \\(P(M|A)P(A)\\) , we just ensure that each thing sums to one.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbayes_normalization_constant/","tags":null,"title":"Bayes Normalization Constant"},{"categories":null,"contents":"\\begin{align} p(x\\mid y) = \\frac{p(y \\mid x) p(x)}{p(y)} \\end{align}\nthis is a direct result of the probability chain rule.\nTypically, we name \\(p(y|x)\\) the \u0026ldquo;likelihood\u0026rdquo;, \\(p(x)\\) the \u0026ldquo;prior\u0026rdquo;.\nBetter normalization What if you don\u0026rsquo;t fully know \\(p(y)\\), say it was parameterized over \\(x\\)?\n\\begin{align} p(x|y) \u0026amp;= \\frac{p(y|x) \\cdot p(x)}{p(y)} \\\\ \u0026amp;= \\frac{p(y|x) \\cdot p(x)}{\\sum_{X_{i}} p(y|X_{i})} \\end{align}\njust apply law of total probability! taad\n","html":"\u003cp\u003e\\begin{align}\np(x\\mid y) = \\frac{p(y \\mid x) p(x)}{p(y)}\n\\end{align}\u003c/p\u003e\n\u003cp\u003ethis is a direct result of the \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003eprobability chain rule\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eTypically, we name \\(p(y|x)\\) the \u0026ldquo;likelihood\u0026rdquo;, \\(p(x)\\) the \u0026ldquo;prior\u0026rdquo;.\u003c/p\u003e\n\u003ch2 id=\"better-normalization\"\u003eBetter normalization\u003c/h2\u003e\n\u003cp\u003eWhat if you don\u0026rsquo;t fully know \\(p(y)\\), say it was parameterized over \\(x\\)?\u003c/p\u003e\n\u003cp\u003e\\begin{align}\np(x|y) \u0026amp;= \\frac{p(y|x) \\cdot p(x)}{p(y)} \\\\\n\u0026amp;= \\frac{p(y|x) \\cdot p(x)}{\\sum_{X_{i}} p(y|X_{i})}\n\\end{align}\u003c/p\u003e\n\u003cp\u003ejust apply \u003ca href=\"/posts/kbhprobability/#law-of-total-probability\"\u003elaw of total probability\u003c/a\u003e! taad\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbayes_theorem/","tags":null,"title":"Bayes Theorem"},{"categories":null,"contents":"\\begin{equation} P(B=b | D=d) = P(D=d|B=b) P(B=b) k \\end{equation}\nwhere, \\(P(B=b | D=d)\\) is your \u0026ldquo;posterior\u0026rdquo;; \\(P(D=d|B=b)\\) is your likelyhood; and \\(P(B=b)\\) is your prior.\n","html":"\u003cp\u003e\\begin{equation}\nP(B=b | D=d) = P(D=d|B=b) P(B=b) k\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(P(B=b | D=d)\\) is your \u0026ldquo;posterior\u0026rdquo;; \\(P(D=d|B=b)\\) is your likelyhood; and \\(P(B=b)\\) is your prior.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbayes_theorem_over_random_variable/","tags":null,"title":"Bayes Theorem Over Random Variable"},{"categories":null,"contents":"A Baysian Network is composed of:\na directed, acyclic graph a set of conditional probabilities acting as factors. You generally want arrows to go in the direction of causality.\nVia the chain rule of Bayes nets, we can write this equivalently as:\n\\begin{equation} (P(B) \\cdot P(S)) \\cdot P(E \\mid B,S) \\cdot P(D \\mid E) \\cdot P(C \\mid E) \\end{equation}\ngenerally, for \\(n\\) different variables,\n\\begin{equation} \\prod_{i=1}^{n} p(X_{i} \\mid pa(x_{i})) \\end{equation}\nwhere, \\(pa(x_{i})\\) are the parent values of \\(x_{i}\\).\nconditional independence \\(X\\) and \\(Y\\) are conditionally independent given \\(Z\\) IFF:\n\\begin{equation} P(X, Y|Z) = P(X|Z) \\cdot P(Y|Z) \\end{equation}\n(\u0026ldquo;two variables are conditionally independent if they exhibit independence conditioned on \\(Z\\)\u0026rdquo;)\nthis is equivalent to saying:\n\\begin{equation} P(X|Z) = P(X|Y,Z) \\end{equation}\n(\u0026ldquo;two variables are conditionally independent if the inclusion of the evidence of another set into the condition doesn\u0026rsquo;t influence the outcome if they are both conditioned on \\(Z\\)\u0026rdquo;)\nWe write:\n\\begin{equation} X \\perp Y \\mid Z \\end{equation}\nThe network above has an important property: conditions \\(B\\) and \\(S\\) are independent; and conditions \\(D\\) and \\(C\\) are independent. Though they all depended on \\(E\\), each pair is conditionally independent.\nchecking for conditional independence \\((A \\perp B \\mid C)\\) IFF ALL undirected paths from \\(A\\) to \\(B\\) on a Baysian Network exhibits d seperation, whose conditions are below:\nA path is d-seperated by \\(C\\), the set of evidence if ANY of the following:\nthe path contains a chain of nodes: \\(X \\to Y \\to Z\\) where \\(Y \\in C\\) the path contains a fork: \\(X \\leftarrow C \\to Z\\), where \\(Y \\in C\\) the path contains a inverted fork: \\(X \\to Y \\leftarrow Z\\), where \\(Y\\) is not in \\(C\\) and no descendent of \\(Y\\) is in \\(C\\). Note that \\(C\\) can be empty. This is why, \\(B,S\\) is conditionally independent on nothing on that graph above, so they are just actually independent.\nIf the structure does not imply conditional independence, it does NOT mean that the structure is conditionally dependent. It could still be conditionally independent. end{equation}\nmarkov blanket the markov blanket of node \\(X\\) is the minimal set of nodes on a Baysian Network which renders \\(X\\) conditionally independent from all other nodes not in the blanket.\nIt includes, at most:\nnode\u0026rsquo;s parenst node\u0026rsquo;s chlidren other parents of node\u0026rsquo;s children Check that you need all of these values: frequently, you don\u0026rsquo;t\u0026mdash;simply selecting a subset of this often d-seperates the node from everyone else.\nparameter learning in Baysian Network Let:\n\\(x_{1:n}\\) be variables \\(o_1, \u0026hellip;, o_{m}\\) be the \\(m\\) observations we took \\(G\\) is the graph \\(r_{i}\\) is the number of instantiations in \\(X_{i}\\) (for boolean variables, this would be \\(2\\)) \\(q_{i}\\) is the number of parental instantiations of \\(X_{i}\\) (if parent 1 can take on 10 values, parent 2 can take 3 values, then child\u0026rsquo;s \\(q_{i}=10\\cdot 3=30\\)) \u0026mdash; if a node has no parents it has a \\(q_{i}\\) is \\(1\\) \\(\\pi_{i,j}\\) is \\(j\\) instantiation of parents of \\(x_{i}\\) (the \\(j\\) th combinator) What we want to learn from the graph, is:\n\\begin{equation} P(x_{i}=k | \\pi_{i,j}) = \\theta_{i,j,k} \\end{equation}\n\u0026ldquo;what\u0026rsquo;s the probability that \\(x_{i}\\) takes on value \\(k\\), given the state of \\(x_{i}\\)\u0026rsquo;s parents are \\(\\pi_{i,j}\\) right now?\u0026rdquo;\nLet us first make some observations. We use \\(m_{i,j,k}\\) to denote the COUNT of the number of times \\(x_{i}\\) took a value \\(k\\) when \\(x_{i}\\) parents took instantiation \\(j\\). This is usually represented programmatically as a set of matrices:\nTo learn the parameter as desired, we use:\n\\begin{equation} MLE\\ \\hat{\\theta}_{i,j,k} = \\frac{m_{i,j,k}}{\\sum_{k\u0026rsquo;} m_{i,j,k\u0026rsquo;}} \\end{equation}\nIn that: we want to sum up all possible value \\(x_{i}\\) takes on, and check how many times it takes on a certain value, given the conditions are the same.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e is composed of:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ea directed, acyclic graph\u003c/li\u003e\n\u003cli\u003ea set of \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probabilities\u003c/a\u003e acting as \u003ca href=\"/posts/kbhfactor/\"\u003efactor\u003c/a\u003es.\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eYou generally want arrows to go in the direction of causality.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-09-28_10-20-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eVia the chain rule of Bayes nets, we can write this equivalently as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(P(B) \\cdot P(S)) \\cdot P(E \\mid B,S) \\cdot P(D \\mid E) \\cdot P(C \\mid E)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egenerally, for \\(n\\) different variables,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\prod_{i=1}^{n} p(X_{i} \\mid pa(x_{i}))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(pa(x_{i})\\) are the parent values of \\(x_{i}\\).\u003c/p\u003e\n\u003ch2 id=\"conditional-independence\"\u003econditional independence\u003c/h2\u003e\n\u003cp\u003e\\(X\\) and \\(Y\\) are \u003ca href=\"#conditional-independence\"\u003econditionally independent\u003c/a\u003e given \\(Z\\) IFF:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X, Y|Z) = P(X|Z) \\cdot P(Y|Z)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(\u0026ldquo;two variables are \u003ca href=\"#conditional-independence\"\u003econditionally independent\u003c/a\u003e if they exhibit \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependence\u003c/a\u003e conditioned on \\(Z\\)\u0026rdquo;)\u003c/p\u003e\n\u003cp\u003ethis is equivalent to saying:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X|Z) = P(X|Y,Z)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(\u0026ldquo;two variables are \u003ca href=\"#conditional-independence\"\u003econditionally independent\u003c/a\u003e if the inclusion of the evidence of another set into the condition doesn\u0026rsquo;t influence the outcome if they are both conditioned on \\(Z\\)\u0026rdquo;)\u003c/p\u003e\n\u003cp\u003eWe write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX \\perp Y \\mid Z\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe network above has an important property: conditions \\(B\\) and \\(S\\) are independent; and conditions \\(D\\) and \\(C\\) are independent. Though they all depended on \\(E\\), each pair is \u003ca href=\"#conditional-independence\"\u003econditionally independent\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"checking-for-conditional-independence\"\u003echecking for conditional independence\u003c/h3\u003e\n\u003cp\u003e\\((A \\perp B \\mid C)\\) IFF ALL undirected paths from \\(A\\) to \\(B\\) on a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e exhibits \u003ca href=\"#checking-for-conditional-independence\"\u003ed seperation\u003c/a\u003e, whose conditions are below:\u003c/p\u003e\n\u003cp\u003eA path is d-seperated by \\(C\\), the set of evidence if ANY of the following:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ethe path contains a chain of nodes: \\(X \\to Y \\to Z\\) where \\(Y \\in C\\)\u003c/li\u003e\n\u003cli\u003ethe path contains a fork: \\(X \\leftarrow C \\to Z\\), where \\(Y \\in C\\)\u003c/li\u003e\n\u003cli\u003ethe path contains a \u003ca href=\"#checking-for-conditional-independence\"\u003einverted fork\u003c/a\u003e: \\(X \\to Y \\leftarrow Z\\), where \\(Y\\) is \u003cstrong\u003enot\u003c/strong\u003e in \\(C\\) and no descendent of \\(Y\\) is in \\(C\\).\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eNote that \\(C\\) can be empty. This is why, \\(B,S\\) is \u003ca href=\"#conditional-independence\"\u003econditionally independent\u003c/a\u003e on \u003cstrong\u003enothing\u003c/strong\u003e on that graph above, so they are just actually independent.\u003c/p\u003e\n\u003cp\u003eIf the structure does not imply \u003ca href=\"#conditional-independence\"\u003econditional independence\u003c/a\u003e, it does \u003cstrong\u003eNOT\u003c/strong\u003e mean that the structure is conditionally dependent. It could still be \u003ca href=\"#conditional-independence\"\u003econditionally independent\u003c/a\u003e.\nend{equation}\u003c/p\u003e\n\u003ch4 id=\"markov-blanket\"\u003emarkov blanket\u003c/h4\u003e\n\u003cp\u003ethe \u003ca href=\"#markov-blanket\"\u003emarkov blanket\u003c/a\u003e of node \\(X\\) is the minimal set of nodes on a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e which renders \\(X\\) \u003ca href=\"#conditional-independence\"\u003econditionally independent\u003c/a\u003e from all other nodes not in the blanket.\u003c/p\u003e\n\u003cp\u003eIt includes, at most:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003enode\u0026rsquo;s parenst\u003c/li\u003e\n\u003cli\u003enode\u0026rsquo;s chlidren\u003c/li\u003e\n\u003cli\u003eother parents of node\u0026rsquo;s children\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eCheck that you need all of these values: frequently, you don\u0026rsquo;t\u0026mdash;simply selecting a subset of this often d-seperates the node from everyone else.\u003c/p\u003e\n\u003ch2 id=\"parameter-learning--kbhparameter-learning-dot-md--in-baysian-network--kbhbaysian-network-dot-md\"\u003e\u003ca href=\"/posts/kbhparameter_learning/\"\u003eparameter learning\u003c/a\u003e in \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eLet:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(x_{1:n}\\) be variables\u003c/li\u003e\n\u003cli\u003e\\(o_1, \u0026hellip;, o_{m}\\) be the \\(m\\) observations we took\u003c/li\u003e\n\u003cli\u003e\\(G\\) is the graph\u003c/li\u003e\n\u003cli\u003e\\(r_{i}\\) is the number of instantiations in \\(X_{i}\\) (for boolean variables, this would be \\(2\\))\u003c/li\u003e\n\u003cli\u003e\\(q_{i}\\) is the number of parental instantiations of \\(X_{i}\\) (if parent 1 can take on 10 values, parent 2 can take 3 values, then child\u0026rsquo;s \\(q_{i}=10\\cdot 3=30\\)) \u0026mdash; if a node has no parents it has a \\(q_{i}\\) is \\(1\\)\u003c/li\u003e\n\u003cli\u003e\\(\\pi_{i,j}\\) is \\(j\\) instantiation of parents of \\(x_{i}\\) (the \\(j\\) th combinator)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWhat we want to learn from the graph, is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(x_{i}=k | \\pi_{i,j}) = \\theta_{i,j,k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;what\u0026rsquo;s the probability that \\(x_{i}\\) takes on value \\(k\\), given the state of \\(x_{i}\\)\u0026rsquo;s parents are \\(\\pi_{i,j}\\) right now?\u0026rdquo;\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eLet us first make some observations. We use \\(m_{i,j,k}\\) to denote the COUNT of the number of times \\(x_{i}\\) took a value \\(k\\) when \\(x_{i}\\) parents took instantiation \\(j\\). This is usually represented programmatically as a set of matrices:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-10_09-47-04_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eTo learn the parameter as desired, we use:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nMLE\\ \\hat{\\theta}_{i,j,k} = \\frac{m_{i,j,k}}{\\sum_{k\u0026rsquo;} m_{i,j,k\u0026rsquo;}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn that: we want to sum up all possible value \\(x_{i}\\) takes on, and check how many times it takes on a certain value, given the conditions are the same.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbaysian_network/","tags":null,"title":"Baysian Network"},{"categories":null,"contents":"Representing conditional dependencies.\nSemiparametric Expert Bayes Net We use a Semiparametric Expert Bayes Net to learn the structure of the dynamics\u0026hellip;. of medicine somewhere?\nAtienza et al. 2022\nlearns semiparemetic relations in expert basian networks uses gaussian rocesses for modeling no-linear performances + horseshoe regularization 2401.16419\nResults UCI Liver Disorder Dataste\nwhat is the oracle graph? what specific dynamics did the model learn? ","html":"\u003cp\u003eRepresenting conditional dependencies.\u003c/p\u003e\n\u003ch2 id=\"semiparametric-expert-bayes-net\"\u003eSemiparametric Expert Bayes Net\u003c/h2\u003e\n\u003cp\u003eWe use a \u003ca href=\"#semiparametric-expert-bayes-net\"\u003eSemiparametric Expert Bayes Net\u003c/a\u003e to learn the structure of the dynamics\u0026hellip;. of medicine somewhere?\u003c/p\u003e\n\u003cp\u003eAtienza et al. 2022\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003elearns semiparemetic relations in expert basian networks\u003c/li\u003e\n\u003cli\u003euses gaussian rocesses for modeling no-linear performances + horseshoe regularization\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e2401.16419\u003c/p\u003e\n\u003ch2 id=\"results\"\u003eResults\u003c/h2\u003e\n\u003cp\u003eUCI Liver Disorder Dataste\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ewhat is the oracle graph?\u003c/li\u003e\n\u003cli\u003ewhat specific dynamics did the model learn?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbaysian_networks_for_healthcare/","tags":null,"title":"Baysian Networks for Healthcare"},{"categories":null,"contents":"We treat this as an inference problem in Naive Bayes: observations are independent from each other.\nInstead of trying to compute a \\(\\theta\\) that works for Maximum Likelihood Parameter Learning, what we instead do is try to understand what \\(\\theta\\) can be in terms of a distribution.\nThat is, we want to get some:\n\u0026ldquo;for each value of \\(\\theta\\), what\u0026rsquo;s the chance that that is the actual value\u0026rdquo;\nTo do this, we desire:\n\\begin{equation} p(\\theta | D) \\end{equation}\n\u0026ldquo;what\u0026rsquo;s the probability of theta being at a certain value given the observations we had.\u0026rdquo;\nAnd to obtain the actual the actual value, we calculate the expectation of this distribution:\n\\begin{equation} \\hat{\\theta} = \\mathbb{E}[\\theta] = \\int \\theta p(\\theta | D) \\dd{\\theta} \\end{equation}\nIf its not possible to obtain such an expected value, we then calculate just the mode of the distribution (like where the peak probability of \\(\\theta\\) is) by:\n\\begin{equation} \\hat{\\theta} = \\arg\\max_{\\theta} p(\\theta | D) \\end{equation}\nBayesian Parameter Learning on Binary Distributions We are working in a Naive Bayes environment, where we assume that \\(o_{1:m}\\) are conditionally independent. Then, we essentially consider each class as carrying some parameter \\(\\theta\\) which contains the possibility of that class happening.\nUsing the same steps as inference with Naive Bayes and some algebra:\n\\begin{equation} p(\\theta | o_{1:m}) \\propto p(\\theta, o_{1:m}) \\end{equation}\nNow, we would like to normalize this function for \\(\\theta \\in [0,1]\\), so, we get:\n\\begin{equation} \\int_{0}^{1} \\theta^{n}(1-\\theta)^{m-n}\\dd{\\theta} = \\frac{\\Gamma(n+1) \\Gamma(m-n+1)}{\\Gamma(m+2)} \\end{equation}\nwhere, \\(\\Gamma\\) is a real valued factorial generalization, and this entire integral is often called the \u0026ldquo;Beta Function\u0026rdquo;\nNormalizing the output, we have that:\n\\begin{align} p(\\theta | o_{1:m}) \u0026amp;\\propto p(\\theta, o_{1:m}) \\\\ \u0026amp;= \\frac{\\Gamma(m+2)}{\\Gamma(n+1) \\Gamma(m-n+1)} \\theta^{n} (1-\\theta)^{m-n} \\\\ \u0026amp;= Beta(\\theta | n+1, m-n +1) \\end{align}\nwhere \\(m\\) is the sample size and \\(n\\) is the number of events in the sample space.\nBeta Distribution Suppose you had a non-uniform prior:\nPrior: \\(Beta(\\alpha, \\beta)\\) Observe: \\(m_1\\) positive outcomes, \\(m_2\\) negative outcomes Posterior: \\(Beta(\\alpha+m_1, \\beta+m_2)\\) That is: for binary outcomes, the beta distribution can be updated without doing any math.\nFor instance, say we had:\n\\begin{equation} \\theta_{t} = Beta(\\alpha, \\beta) \\end{equation}\nand we observed that \\(o_{i} = 1\\), then:\n\\begin{equation} \\theta_{t+1} = Beta(\\alpha+1, \\beta) \\end{equation}\ninstead, if we observed that \\(o_{i} = 0\\), then:\n\\begin{equation} \\theta_{t+1} = Beta(\\alpha, \\beta+1) \\end{equation}\nEssentially: MAGNITUDE of beta distribution governs how small the spread is (higher magnitude smaller spread), and the balance between the two values represents how much skew there is.\nBeta is a special distribution which takes parameters \\(\\alpha, \\beta\\),and has mean:\n\\begin{equation} \\frac{\\alpha}{\\alpha + \\beta} \\end{equation}\nand variance:\n\\begin{equation} \\frac{ab}{(a+b)^{2}(a+b+1)} \\end{equation}\nand has mode:\n\\begin{equation} \\frac{\\alpha -1 }{\\alpha + \\beta -2} \\end{equation}\nwhen \\(\\alpha \u0026gt; 1\\) and \\(\\beta \u0026gt; 1\\).\nThis means that, at \\(beta(1,1)\\), we have a inform distribution\nLaplace Smoothing Laplace Smoothing is a prior where:\n\\begin{equation} prior\\ X \\sim Beta(2,2) \\end{equation}\nso you just add \\(2\\) to each of our output pseudo counts.\nsee also Laplace prior, where you use Laplace Smoothing for your prior\nTotal Probability in beta distributions Recall, for total probability, beta is a special distribution which takes parameters \\(\\alpha, \\beta\\),and has expectation:\n\\begin{equation} \\frac{\\alpha}{\\alpha + \\beta} \\end{equation}\nand has mode:\n\\begin{equation} \\frac{\\alpha -1 }{\\alpha + \\beta -2} \\end{equation}\nChoosing a prior do it with only the problem and no knowledge of the data uniform typically works well, but if you have any reason why it won\u0026rsquo;t be uniform (say coin flip), you should count accordingly such as making the distribution more normal with \\(Beta(1,1)\\) Dirichlet Distribution We can generalize the Bayesian Parameter Learning on Binary Distributions with the Dirichlet Distribution.\nFor \\(n\\) parameters \\(\\theta_{1:n}\\) (\\(n-1\\) of which independent, because we know that \\(\\sum \\theta_{i} = 1\\)), where \\(\\theta_{j}\\) is the probability that the \\(j\\) th case of the categorical distribution happening.\nNow:\n\\begin{equation} Dir(\\theta_{1:n} | \\alpha) = \\frac{\\Gamma(\\alpha_{0})}{\\prod_{i=1}^{n} \\Gamma(\\alpha_{i})} \\prod_{i=1}^{n} \\theta_{i}^{\\alpha_{i}-1} \\end{equation}\nwhereby:\n\\begin{equation} \\alpha_{j} = prior + count \\end{equation}\nfor \\(j \\geq 1\\), and\n\\begin{equation} \\alpha_{0} = prior + total_{}count \\end{equation}\nwhereby prior is your initial distribution. If its uniform, then all prior equals one.\nThe expectation for each \\(\\theta_{i}\\) happening is:\n\\begin{equation} \\mathbb{E}[\\theta_{i}] = \\frac{a_{i}}{\\sum_{j=1}^{n} \\alpha_{j}} \\end{equation}\nand, with \\(a_{i} \u0026gt; 1\\), the $i$th mode is:\n\\begin{equation} \\frac{a_{i}-1 }{\\sum_{j=1}^{n} a_{j}-n} \\end{equation}\nexpectation of a distribution For Beta Distribution and Dirichlet Distribution, the expectation of their distribution is simply their mean.\nif you say want to know what the probability of \\(P(thing|D)\\), you can integrate over all \\(P(thing|\\theta)\\):\n\\begin{equation} \\int^{1}_{0} P(thing|\\theta)P(\\theta)d\\theta \\end{equation}\nThe first thing is just the actual value of \\(\\theta\\) (because \\(\\theta\\) is literally the probability of \\(thing\\) happening). The second thing is the probability of that \\(\\theta\\) actually happening.\nThis, of course, just add up to the expected value of \\(\\theta\\), which is given above:\n\\begin{equation} \\frac{\\alpha}{\\alpha + \\beta} \\end{equation}\n","html":"\u003cp\u003eWe treat this as an inference problem in \u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e: \u003cstrong\u003eobservations are independent from each other\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eInstead of trying to compute a \\(\\theta\\) that works for \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e, what we instead do is try to understand what \\(\\theta\\) can be in terms of a distribution.\u003c/p\u003e\n\u003cp\u003eThat is, we want to get some:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-05_10-22-12_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\u0026ldquo;for each value of \\(\\theta\\), what\u0026rsquo;s the chance that that is the actual value\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eTo do this, we desire:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(\\theta | D)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;what\u0026rsquo;s the probability of theta being at a certain value given the observations we had.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eAnd to obtain the actual the actual value, we calculate the \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e of this distribution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{\\theta} = \\mathbb{E}[\\theta] = \\int \\theta p(\\theta | D) \\dd{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf its not possible to obtain such an expected value, we then calculate just the mode of the distribution (like where the peak probability of \\(\\theta\\) is) by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{\\theta} = \\arg\\max_{\\theta} p(\\theta | D)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"bayesian-parameter-learning-on-binary-distributions\"\u003eBayesian Parameter Learning on Binary Distributions\u003c/h2\u003e\n\u003cp\u003eWe are working in a \u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e environment, where we assume that \\(o_{1:m}\\) are \u003ca href=\"/posts/kbhbaysian_network/#conditional-independence\"\u003econditionally independent\u003c/a\u003e. Then, we essentially consider each class as carrying some parameter \\(\\theta\\) which contains the possibility of that class happening.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-05_14-55-41_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eUsing the same steps as \u003ca href=\"/posts/kbhnaive_bayes/#id-76165699-9f9a-4b7e-a081-c8462cece2ee-inference-with-id-6cdf74a2-2451-47d1-8a62-62aa6dff62c6-naive-bayes\"\u003einference with Naive Bayes\u003c/a\u003e and some algebra:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(\\theta | o_{1:m}) \\propto p(\\theta, o_{1:m})\n\\end{equation}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-05_17-20-53_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eNow, we would like to normalize this function for \\(\\theta \\in [0,1]\\), so, we get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{0}^{1} \\theta^{n}(1-\\theta)^{m-n}\\dd{\\theta} = \\frac{\\Gamma(n+1) \\Gamma(m-n+1)}{\\Gamma(m+2)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\Gamma\\) is a real valued factorial generalization, and this entire integral is often called the \u0026ldquo;\u003ca href=\"/posts/kbhbaysian_parameter_learning/\"\u003eBeta Function\u003c/a\u003e\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eNormalizing the output, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\np(\\theta | o_{1:m}) \u0026amp;\\propto p(\\theta, o_{1:m}) \\\\\n\u0026amp;= \\frac{\\Gamma(m+2)}{\\Gamma(n+1) \\Gamma(m-n+1)} \\theta^{n} (1-\\theta)^{m-n} \\\\\n\u0026amp;= Beta(\\theta | n+1, m-n +1)\n\\end{align}\u003c/p\u003e\n\u003cp\u003ewhere \\(m\\) is the sample size and \\(n\\) is the number of events in the sample space.\u003c/p\u003e\n\u003ch3 id=\"beta-distribution\"\u003eBeta Distribution\u003c/h3\u003e\n\u003cp\u003eSuppose you had a non-uniform prior:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ePrior: \\(Beta(\\alpha, \\beta)\\)\u003c/li\u003e\n\u003cli\u003eObserve: \\(m_1\\) positive outcomes, \\(m_2\\) negative outcomes\u003c/li\u003e\n\u003cli\u003ePosterior: \\(Beta(\\alpha+m_1, \\beta+m_2)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThat is: for binary outcomes, the beta distribution can be updated without doing any math.\u003c/p\u003e\n\u003cp\u003eFor instance, say we had:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{t} = Beta(\\alpha, \\beta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand we observed that \\(o_{i} = 1\\), then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{t+1} = Beta(\\alpha+1, \\beta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003einstead, if we observed that \\(o_{i} = 0\\), then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{t+1} = Beta(\\alpha, \\beta+1)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eEssentially: MAGNITUDE of beta distribution governs how small the spread is (higher magnitude smaller spread), and the balance between the two values represents how much skew there is.\u003c/p\u003e\n\u003cp\u003eBeta is a special distribution which takes parameters \\(\\alpha, \\beta\\),and has mean:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\alpha}{\\alpha + \\beta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand variance:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{ab}{(a+b)^{2}(a+b+1)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand has mode:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\alpha -1 }{\\alpha + \\beta -2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhen \\(\\alpha \u0026gt; 1\\) and \\(\\beta \u0026gt; 1\\).\u003c/p\u003e\n\u003cp\u003eThis means that, at \\(beta(1,1)\\), we have a inform distribution\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-05_21-32-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch4 id=\"laplace-smoothing\"\u003eLaplace Smoothing\u003c/h4\u003e\n\u003cp\u003e\u003ca href=\"#laplace-smoothing\"\u003eLaplace Smoothing\u003c/a\u003e is a prior where:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nprior\\ X \\sim Beta(2,2)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso you just add \\(2\\) to each of our output pseudo counts.\u003c/p\u003e\n\u003cp\u003esee also \u003ca href=\"/posts/kbhmaximum_a_posteriori_estimate/#map-for-bernoulli-and-binomial-p\"\u003eLaplace prior\u003c/a\u003e, where you use \u003ca href=\"#laplace-smoothing\"\u003eLaplace Smoothing\u003c/a\u003e for your prior\u003c/p\u003e\n\u003ch3 id=\"total-probability-in-beta-distributions\"\u003eTotal Probability in beta distributions\u003c/h3\u003e\n\u003cp\u003eRecall, for total probability, beta is a special distribution which takes parameters \\(\\alpha, \\beta\\),and has \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\alpha}{\\alpha + \\beta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand has mode:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\alpha -1 }{\\alpha + \\beta -2}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"choosing-a-prior\"\u003eChoosing a prior\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003edo it with only the problem and no knowledge of the data\u003c/li\u003e\n\u003cli\u003euniform typically works well, but if you have any reason why it won\u0026rsquo;t be uniform (say coin flip), you should count accordingly such as making the distribution more normal with \\(Beta(1,1)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"dirichlet-distribution\"\u003eDirichlet Distribution\u003c/h2\u003e\n\u003cp\u003eWe can generalize the \u003ca href=\"#bayesian-parameter-learning-on-binary-distributions\"\u003eBayesian Parameter Learning on Binary Distributions\u003c/a\u003e with the \u003ca href=\"#dirichlet-distribution\"\u003eDirichlet Distribution\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eFor \\(n\\) parameters \\(\\theta_{1:n}\\) (\\(n-1\\) of which independent, because we know that \\(\\sum \\theta_{i} = 1\\)), where \\(\\theta_{j}\\) is the probability that the \\(j\\) th case of the categorical distribution happening.\u003c/p\u003e\n\u003cp\u003eNow:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nDir(\\theta_{1:n} | \\alpha) = \\frac{\\Gamma(\\alpha_{0})}{\\prod_{i=1}^{n} \\Gamma(\\alpha_{i})} \\prod_{i=1}^{n} \\theta_{i}^{\\alpha_{i}-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha_{j} = prior + count\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor \\(j \\geq 1\\), and\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha_{0} = prior + total_{}count\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby prior is your initial distribution. If its uniform, then all prior equals one.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e for each \\(\\theta_{i}\\) happening is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{E}[\\theta_{i}] = \\frac{a_{i}}{\\sum_{j=1}^{n} \\alpha_{j}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand, with \\(a_{i} \u0026gt; 1\\), the $i$th mode is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{a_{i}-1 }{\\sum_{j=1}^{n} a_{j}-n}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"expectation--kbhexpectation-dot-md--of-a-distribution\"\u003e\u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e of a distribution\u003c/h2\u003e\n\u003cp\u003eFor \u003ca href=\"#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e and \u003ca href=\"#dirichlet-distribution\"\u003eDirichlet Distribution\u003c/a\u003e, the \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e of their distribution is simply their mean.\u003c/p\u003e\n\u003cp\u003eif you say want to know what the probability of \\(P(thing|D)\\), you can integrate over all \\(P(thing|\\theta)\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int^{1}_{0} P(thing|\\theta)P(\\theta)d\\theta\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe first thing is just the actual value of \\(\\theta\\) (because \\(\\theta\\) is literally the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of \\(thing\\) happening). The second thing is the probability of that \\(\\theta\\) actually happening.\u003c/p\u003e\n\u003cp\u003eThis, of course, just add up to the expected value of \\(\\theta\\), which is given above:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\alpha}{\\alpha + \\beta}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbaysian_parameter_learning/","tags":null,"title":"Baysian Parameter Learning"},{"categories":null,"contents":"belief is a probability distribution over your states.\n\u0026ldquo;an informational state decoupled from motivational states\u0026rdquo;\n\\begin{equation} b \\leftarrow update(b,a,o) \\end{equation}\nThere are two main flavours of how to represent beliefs\nparametric: belief distribution is fully represented over all states by a set of parameters (categorical, gaussian, etc.) non-parametric: belief is represented by a non-weighted list of possible locations of where you are; such as a Particle Filter To update parametric beliefs, we can use a discrete state filter (for categorical belief distributions) or a Kalman Filter (for linear Gaussian). To update non-parametric beliefs, we can use a Particle Filter.\nIf we have an parametric belief that\u0026rsquo;s not categorical nor linear Gaussian, we can use Extended Kalman Filter or Unscented Kalman Filter to approximate a belief update.\nbelief update To update belief, we need to initialize it somehow. If you have no knowledge of the situation, you want to diffuse your initial distributions because you don\u0026rsquo;t want to be overconfident For non-parametric situations, this may cause logistical problems; so, you may need to make many observations before you can be confident enough to seed a belief observation model \\(O(o|a,s\u0026rsquo;)\\) is a model for what observations we may get if we are in a particular state/action.\nerror model there is some model which is a probability distribution over the state given observation:\nlet orange \\(d\\) be state, the green would be the error model\nfilters filters are how beliefs are updated from observation. \u0026ldquo;we want to perform localization\u0026rdquo;\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e is a \u003ca href=\"/posts/kbhprobability_distributions/\"\u003eprobability distribution\u003c/a\u003e over your states.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;an informational state decoupled from motivational states\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb \\leftarrow update(b,a,o)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThere are two main flavours of how to represent beliefs\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eparametric\u003c/strong\u003e: belief distribution is fully represented over all states by a set of parameters (categorical, \u003ca href=\"/posts/kbhgaussian_distribution/\"\u003egaussian\u003c/a\u003e, etc.)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003enon-parametric\u003c/strong\u003e: belief is represented by a non-weighted list of possible locations of where you are; such as a \u003ca href=\"/posts/kbhfilters/#particle-filter\"\u003eParticle Filter\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eTo update \u003cstrong\u003eparametric\u003c/strong\u003e beliefs, we can use a \u003ca href=\"/posts/kbhfilters/#discrete-state-filter\"\u003ediscrete state filter\u003c/a\u003e (for categorical belief distributions) or a \u003ca href=\"/posts/kbhfilters/#kalman-filter\"\u003eKalman Filter\u003c/a\u003e (for linear Gaussian). To update \u003cstrong\u003enon-parametric\u003c/strong\u003e beliefs, we can use a \u003ca href=\"/posts/kbhfilters/#particle-filter\"\u003eParticle Filter\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIf we have an \u003cstrong\u003eparametric\u003c/strong\u003e belief that\u0026rsquo;s not categorical nor linear Gaussian, we can use \u003ca href=\"/posts/kbhfilters/#extended-kalman-filter--kbhfilters-dot-md\"\u003eExtended Kalman Filter\u003c/a\u003e or \u003ca href=\"/posts/kbhfilters/#unscented-id-6800e7a8-729c-4654-adcc-e0f877079b6a-kalman-filter\"\u003eUnscented Kalman Filter\u003c/a\u003e to approximate a belief update.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"belief-update\"\u003ebelief update\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eTo \u003ca href=\"#belief-update\"\u003eupdate belief\u003c/a\u003e, we need to initialize it somehow.\n\u003cul\u003e\n\u003cli\u003eIf you have no knowledge of the situation, you want to \u003cstrong\u003ediffuse\u003c/strong\u003e your initial distributions because you don\u0026rsquo;t want to be overconfident\u003c/li\u003e\n\u003cli\u003eFor non-parametric situations, this may cause logistical problems; so, you may need to make many observations before you can be confident enough to seed a belief\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"observation-model\"\u003eobservation model\u003c/h2\u003e\n\u003cp\u003e\\(O(o|a,s\u0026rsquo;)\\) is a model for what observations we may get if we are in a particular state/action.\u003c/p\u003e\n\u003ch3 id=\"error-model\"\u003eerror model\u003c/h3\u003e\n\u003cp\u003ethere is some model which is a probability distribution over the state given observation:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-09_10-01-10_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003elet orange \\(d\\) be state, the green would be the \u003ca href=\"#error-model\"\u003eerror model\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"filters--kbhfilters-dot-md\"\u003e\u003ca href=\"/posts/kbhfilters/\"\u003efilters\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhfilters/\"\u003efilters\u003c/a\u003e are how \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003es are updated from observation. \u0026ldquo;we want to perform localization\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbelief/","tags":null,"title":"belief"},{"categories":null,"contents":"Motivation Imperfect sensors in robot control: partial observations Manipulators face tradeoff between sensing + acting curse of dimensionality and curse of history.\nBelief-Space Planning Perhaps we should plan over all possible distributions of state space, making a belief-state MDP.\nBut: this is a nonlinear, stochastic dynamic. In fact: there maybe stochastic events that affects dynamics.\nBig problem:\ndim(belief) \u0026gt;\u0026gt; dim(state) dim(belief) \u0026gt;\u0026gt; dim(action) Belief iLQR \u0026ldquo;determinize and replan\u0026rdquo;: simplify the dynamics at each step, plan, take action, and replan\ntracks belief via observations simplifies belief state dynamics based on linear MLE When the dynamics is linear, you can use Linear-Quadratic Regulator to solve. This results in a worse policy but will give you a policy.\nPrevious Work \u0026ldquo;just solve most-likely state\u0026rdquo;: doesn\u0026rsquo;t take action to explore and understand the state. \u0026ldquo;belief roadmap\u0026rdquo;: not really planning in the belief space itself Approach Belief Update We use Baysian updates for the state probably updates:\n\\begin{equation} P(s_{t+1}) = \\eta P(o_{t+1}|s_{t+1}) \\int_{x} p(_{t+1}|x, a_{t}) P(s) \\end{equation}\nand then the actual beliefs are updated with Extended Kalman Filter.\nImportantly, the Extended Kalman Filter usually requires us to take an expectation of each observation O over all O; instead, we assume that the future states are uniform linearly distributed.\nBelief Update Cost Ideally, we want to lower covariance of the belief vectors in order to be more confident.\nfirst term: reduce large trajectories (verify) second: stabilization Replanning Strategy while b not at goal: # replan at where we are at now (b, a, mean_b) = create_initial_plan(b); for depth d: a_t = solve_lqr_for_plan_at_time(b, a, mean_b) o = environment.step(a_t) b = extended_kalman(b, a, o) if mean(b) \u0026gt; max_allowed_belief_uncertainty: break ","html":"\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eImperfect sensors in robot control: \u003cstrong\u003epartial observations\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eManipulators face tradeoff between \u003cstrong\u003esensing\u003c/strong\u003e + \u003cstrong\u003eacting\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcurse_of_dimensionality/\"\u003ecurse of dimensionality\u003c/a\u003e and curse of history.\u003c/p\u003e\n\u003ch2 id=\"belief-space-planning\"\u003eBelief-Space Planning\u003c/h2\u003e\n\u003cp\u003ePerhaps we should plan over all possible distributions of state space, making a \u003ca href=\"/posts/kbhbelief_state_mdp/\"\u003ebelief-state MDP\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eBut: this is a \u003cstrong\u003enonlinear\u003c/strong\u003e, \u003cstrong\u003estochastic\u003c/strong\u003e dynamic. In fact: there maybe stochastic events that affects dynamics.\u003c/p\u003e\n\u003cp\u003eBig problem:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003edim(\u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e) \u0026gt;\u0026gt; dim(\u003ca href=\"\"\u003estate\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003edim(\u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e) \u0026gt;\u0026gt; dim(\u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction\u003c/a\u003e)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"belief-ilqr--kbhilqr-dot-md\"\u003e\u003ca href=\"/posts/kbhilqr/\"\u003eBelief iLQR\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;determinize and replan\u0026rdquo;: simplify the dynamics at each step, plan, take action, and replan\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003etracks belief via observations\u003c/li\u003e\n\u003cli\u003esimplifies belief state dynamics based on linear \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWhen the dynamics is linear, you can use \u003ca href=\"/posts/kbhlinear_quadratic_regulator/\"\u003eLinear-Quadratic Regulator\u003c/a\u003e to solve. This results in a worse policy but will give you a policy.\u003c/p\u003e\n\u003ch3 id=\"previous-work\"\u003ePrevious Work\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;just solve most-likely state\u0026rdquo;: doesn\u0026rsquo;t take action to explore and understand the state.\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;belief roadmap\u0026rdquo;: not really planning in the belief space itself\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"approach\"\u003eApproach\u003c/h3\u003e\n\u003ch4 id=\"belief-update\"\u003eBelief Update\u003c/h4\u003e\n\u003cp\u003eWe use Baysian updates for the state probably updates:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(s_{t+1}) = \\eta P(o_{t+1}|s_{t+1}) \\int_{x} p(_{t+1}|x, a_{t}) P(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand then the actual beliefs are updated with \u003ca href=\"/posts/kbhfilters/#extended\"\u003eExtended Kalman Filter\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eImportantly, the \u003ca href=\"/posts/kbhfilters/#extended\"\u003eExtended Kalman Filter\u003c/a\u003e usually requires us to take an expectation of each observation O over all O; instead, we assume that the future states are uniform linearly distributed.\u003c/p\u003e\n\u003ch4 id=\"belief-update-cost\"\u003eBelief Update Cost\u003c/h4\u003e\n\u003cp\u003eIdeally, we want to lower \u003ca href=\"/posts/kbhcovariance/\"\u003ecovariance\u003c/a\u003e of the \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e vectors in order to be more confident.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-20_09-32-34_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003col\u003e\n\u003cli\u003efirst term: reduce large trajectories (verify)\u003c/li\u003e\n\u003cli\u003esecond: stabilization\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"replanning-strategy\"\u003eReplanning Strategy\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-20_09-35-47_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003enot\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eat\u003c/span\u003e \u003cspan style=\"color:#111\"\u003egoal\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# replan at where we are at now\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emean_b\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecreate_initial_plan\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edepth\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ea_t\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esolve_lqr_for_plan_at_time\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emean_b\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eo\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eenvironment\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea_t\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eextended_kalman\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eo\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emax_allowed_belief_uncertainty\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ebreak\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhilqr/","tags":null,"title":"Belief iLQR"},{"categories":null,"contents":"Our belief can be represented as vectors as the probability of us being in each state. If we have that, we can just use our belief vector as our state vector. Now use MDP any solving you\u0026rsquo;d like, keeping in mind that the reward is just the expected reward:\n\\begin{equation} \\mathbb{E}[R(b,a)] = \\sum_{s} R(s,a) b(s) \\end{equation}\nwe can estimate our transition between belief-states like so:\n\\begin{align} T(b\u0026rsquo;|b,a) \u0026amp;= P(b\u0026rsquo;|b,a) \\\\ \u0026amp;= \\sum_{o}^{} P(b\u0026rsquo;|b,a,o) P(o|b,a) \\\\ \u0026amp;= \\sum_{o}^{} P(b\u0026rsquo; = Update(b,a,o)) \\sum_{s\u0026rsquo;}^{}O(o|a,s\u0026rsquo;) \\sum_{s}^{}T(s\u0026rsquo;|s,a)b(s) \\end{align}\n\u0026ldquo;the probability of the next belief being \\(b\u0026rsquo;\\) is equal to how probable it is to get state b\u0026rsquo; from conditions b,a,o, times the probability of getting that particular observation.\u0026rdquo;.\nHowever, this expression is quite unwheldy if your state-space is large. Hence, we turn to a technique like conditional plans which foregos considering individual states altogether.\n","html":"\u003cp\u003eOur \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e can be represented as \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es as the probability of us being in each state. If we have that, we can just use our \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e vector as our state vector. Now use \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003e any solving you\u0026rsquo;d like, keeping in mind that the reward is just the expected reward:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{E}[R(b,a)] = \\sum_{s} R(s,a) b(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can estimate our transition between belief-states like so:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nT(b\u0026rsquo;|b,a) \u0026amp;= P(b\u0026rsquo;|b,a) \\\\\n\u0026amp;= \\sum_{o}^{} P(b\u0026rsquo;|b,a,o) P(o|b,a) \\\\\n\u0026amp;= \\sum_{o}^{} P(b\u0026rsquo; = Update(b,a,o)) \\sum_{s\u0026rsquo;}^{}O(o|a,s\u0026rsquo;) \\sum_{s}^{}T(s\u0026rsquo;|s,a)b(s)\n\\end{align}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the probability of the next belief being \\(b\u0026rsquo;\\) is equal to how probable it is to get state b\u0026rsquo; from conditions b,a,o, times the probability of getting that particular observation.\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eHowever, this expression is quite unwheldy if your state-space is large. Hence, we turn to a technique like \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003es which foregos considering individual states altogether.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbelief_state_mdp/","tags":null,"title":"belief-state MDP"},{"categories":null,"contents":"Bending is what happens when you apply a transverse load to an object and it goes wooosh.\nThat\u0026rsquo;s cool. Now how does it work? see Euler-Bernoulli Theory\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbending/\"\u003eBending\u003c/a\u003e is what happens when you apply a \u003ca href=\"/posts/kbhtransverse_loaod/\"\u003etransverse load\u003c/a\u003e to an object and it goes wooosh.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-05_22-22-09_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThat\u0026rsquo;s cool. Now how does it work? see \u003ca href=\"/posts/kbheuler_bernoulli_theory/\"\u003eEuler-Bernoulli Theory\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbending/","tags":null,"title":"bending"},{"categories":null,"contents":"Consider a case where there\u0026rsquo;s only a single binary outcome:\n\u0026ldquo;success\u0026rdquo;, with probability \\(p\\) \u0026ldquo;failure\u0026rdquo;, with probability \\(1-p\\) constituents \\begin{equation} X \\sim Bern(p) \\end{equation}\nrequirements the probability mass function:\n\\begin{equation} P(X=k) = \\begin{cases} p,\\ if\\ k=1\\\\ 1-p,\\ if\\ k=0\\\\ \\end{cases} \\end{equation}\nThis is sadly not Differentiable, which is sad for Maximum Likelihood Parameter Learning. Therefore, we write:\n\\begin{equation} P(X=k) = p^{k} (1-p)^{1-k} \\end{equation}\nWhich emulates the behavior of your function at \\(0\\) and \\(1\\) and we kinda don\u0026rsquo;t care any other place.\nWe can use it\nadditional information properties of Bernoulli distribution expected value: \\(p\\) variance: \\(p(1-p)\\) Bernoulli as indicator If there\u0026rsquo;s a series of event whose probability you are given, you can use a Bernoulli to model each one and add/subtract\nMLE for Bernouli \\begin{equation} p_{MLE} = \\frac{m}{n} \\end{equation}\n\\(m\\) is the number of events\n","html":"\u003cp\u003eConsider a case where there\u0026rsquo;s only a single \u003ca href=\"/posts/kbhbinary_number_system/#base-2\"\u003ebinary\u003c/a\u003e outcome:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;success\u0026rdquo;, with probability \\(p\\)\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;failure\u0026rdquo;, with probability \\(1-p\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nX \\sim Bern(p)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003ethe \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003eprobability mass function\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X=k) =\n\\begin{cases}\np,\\ if\\ k=1\\\\\n1-p,\\ if\\ k=0\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is sadly not \u003ca href=\"/posts/kbhuniqueness_and_existance/#differentiable\"\u003eDifferentiable\u003c/a\u003e, which is sad for \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e. Therefore, we write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X=k) = p^{k} (1-p)^{1-k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich emulates the behavior of your function at \\(0\\) and \\(1\\) and we kinda don\u0026rsquo;t care any other place.\u003c/p\u003e\n\u003cp\u003eWe can use it\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"properties-of-bernoulli-distribution--kbhbernoulli-random-variable-dot-md\"\u003eproperties of \u003ca href=\"/posts/kbhbernoulli_random_variable/\"\u003eBernoulli distribution\u003c/a\u003e\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhexpectation/\"\u003eexpected value\u003c/a\u003e\u003c/strong\u003e: \\(p\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhvariance/\"\u003evariance\u003c/a\u003e\u003c/strong\u003e: \\(p(1-p)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"bernoulli--kbhbernoulli-random-variable-dot-md--as-indicator\"\u003e\u003ca href=\"/posts/kbhbernoulli_random_variable/\"\u003eBernoulli\u003c/a\u003e as indicator\u003c/h3\u003e\n\u003cp\u003eIf there\u0026rsquo;s a series of event whose probability you are given, you can use a \u003ca href=\"/posts/kbhbernoulli_random_variable/\"\u003eBernoulli\u003c/a\u003e to model each one and add/subtract\u003c/p\u003e\n\u003ch3 id=\"mle-for-bernouli\"\u003eMLE for Bernouli\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\np_{MLE} = \\frac{m}{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(m\\) is the number of events\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbernoulli_random_variable/","tags":null,"title":"Bernoulli distribution"},{"categories":null,"contents":"\\begin{equation} x^{2}y\u0026rsquo;\u0026rsquo; + xy\u0026rsquo; + (x^{2}-n^{2})y = 0 \\end{equation}\nthis function is very useful, they have no well defined elementary result.\n","html":"\u003cp\u003e\\begin{equation}\nx^{2}y\u0026rsquo;\u0026rsquo; + xy\u0026rsquo; + (x^{2}-n^{2})y = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis function is very useful, they have no well defined elementary result.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbessel_s_equation/","tags":null,"title":"Bessel's Equation"},{"categories":null,"contents":"best-action worst-state is a lower bound for alpha vectors:\n\\begin{equation} r_{baws} = \\max_{a} \\sum_{k=1}^{\\infty} \\gamma^{k-1} \\min_{s}R(s,a) \\end{equation}\nThe alpha vector corresponding to this system would be the same \\(r_{baws}\\) at each slot.\nwhich should give us the highest possible reward possible given we always pick the most optimal actions while being stuck in the worst state\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhworst_possible_state/\"\u003ebest-action worst-state\u003c/a\u003e is a lower bound for \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr_{baws} = \\max_{a} \\sum_{k=1}^{\\infty} \\gamma^{k-1} \\min_{s}R(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e corresponding to this system would be the same \\(r_{baws}\\) at each slot.\u003c/p\u003e\n\u003cp\u003ewhich should give us the highest possible reward possible given we always pick the most optimal actions while being stuck in the worst state\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhworst_possible_state/","tags":null,"title":"best-action worst-state"},{"categories":null,"contents":"Background recall AlphaZero\nSelection (UCB 1, or DTW, etc.) Expansion (generate possible belief notes) Simulation (if its a brand new node, Rollout, etc.) Backpropegation (backpropegate your values up) Key Idea Remove the need for heuristics for MCTS\u0026mdash;removing inductive bias\nApproach We keep the ol\u0026rsquo; neural network:\n\\begin{equation} f_{\\theta}(b_{t}) = (p_{t}, v_{t}) \\end{equation}\nPolicy Evaluation Do \\(n\\) episodes of MCTS, then use cross entropy to improve \\(f\\)\nGround truth policy Action Selection Uses Double Progressive Widening\nImportantly, no need to use a heuristic (or worst yet random Rollouts) for action selection.\nDifference vs. LetsDrive LetsDrive uses DESPOT BetaZero uses MCTS with belief states. ","html":"\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003cp\u003erecall \u003ca href=\"/posts/kbhlm_alignment/#alphazero\"\u003eAlphaZero\u003c/a\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eSelection (\u003ca href=\"/posts/kbhdirected_exploration/#ucb-1\"\u003eUCB 1\u003c/a\u003e, or \u003ca href=\"/posts/kbhdouble_progressive_widening/\"\u003eDTW\u003c/a\u003e, etc.)\u003c/li\u003e\n\u003cli\u003eExpansion (generate possible belief notes)\u003c/li\u003e\n\u003cli\u003eSimulation (if its a brand new node, \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003eRollout\u003c/a\u003e, etc.)\u003c/li\u003e\n\u003cli\u003eBackpropegation (backpropegate your values up)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"key-idea\"\u003eKey Idea\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eRemove the need for heuristics for \u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003eMCTS\u003c/a\u003e\u0026mdash;removing inductive bias\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"approach\"\u003eApproach\u003c/h2\u003e\n\u003cp\u003eWe keep the ol\u0026rsquo; neural network:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf_{\\theta}(b_{t}) = (p_{t}, v_{t})\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"policy-evaluation\"\u003ePolicy Evaluation\u003c/h3\u003e\n\u003cp\u003eDo \\(n\\) episodes of \u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003eMCTS\u003c/a\u003e, then use cross entropy to improve \\(f\\)\u003c/p\u003e\n\u003ch3 id=\"ground-truth-policy\"\u003eGround truth policy\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-15_10-05-04_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"action-selection\"\u003eAction Selection\u003c/h3\u003e\n\u003cp\u003eUses \u003ca href=\"/posts/kbhdouble_progressive_widening/\"\u003eDouble Progressive Widening\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eImportantly, \u003cstrong\u003eno need to use a heuristic (or worst yet random \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003eRollout\u003c/a\u003es) for action selection\u003c/strong\u003e.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-15_10-11-45_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"difference-vs-dot-letsdrive--kbhletsdrive-dot-md\"\u003eDifference vs. \u003ca href=\"/posts/kbhletsdrive/\"\u003eLetsDrive\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhletsdrive/\"\u003eLetsDrive\u003c/a\u003e uses \u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbetazero/\"\u003eBetaZero\u003c/a\u003e uses \u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003eMCTS\u003c/a\u003e with \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e states.\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbetazero/","tags":null,"title":"BetaZero"},{"categories":null,"contents":"Big Data is a term for datasets large enough that traditional data processing applications are inadequate. i.e. when non-parallel processing is inadequate.\nThat is: \u0026ldquo;Big Data\u0026rdquo; is when Pandas and SQL is inadequate. To handle big data, its very difficult to sequentially go through and process stuff. To make it work, you usually have to perform parallel processing under the hood.\nRules of Thumb of Datasets 1000 Genomes (AWS, 260TB) CommonCraw - the entire web (On PSC! 300-800 TB) GDELT - https://www.gdeltproject.org/ a dataset that contains everything that\u0026rsquo;s happening in the world right now in terms of news (small!! 2.5 TB per year; however, there is a LOT of fields: 250 Million fields) Evolution of Big Data Good Ol\u0026rsquo; SQL schemas are too set in stone (\u0026ldquo;not a fit for Agile development\u0026rdquo; \u0026mdash; a research scientist) SQL sharding, when working correctly, is KV Stores And this is why we gave up and made Redis (or Amazon DynamoDB, Riak, Memcached) which keeps only Key/Value information. We just make the key really really complicated to support structures: GET cart:joe:15~4...\nBut the problem with key-value stores isn\u0026rsquo;t good at indexing at all: if we want like to get all of Joe\u0026rsquo;s cart, you can\u0026rsquo;t just GET cart:joe because you can\u0026rsquo;t compare partial hashes.\nDocument Stores [And something something mongo\u0026rsquo;s document stores but something its bad about those too but CMU can\u0026rsquo;t do tech and the speakers died]\nWide Column Stores Google BigTable type thing\nJust have a wide column of arbitrary width with no schema:\nCart Joe 15~4 Cart Robert 15~3 More Things! Chicken 15~2 Etc. No idea how you query this but google does it and CMU\u0026rsquo;s speakers died again good on them.\nGraphs! Neo4j: don\u0026rsquo;t store triples, and have better schemes for encoding. You can then use nice graph query schemes.\nHard to visualize VERY hard to serialize Queries are hard And so: And sooooo. Introducing Spark. We don\u0026rsquo;t want to do parallel programming, we want to use traditional databases, so we just make someone else do it on an adapter and just query boring databases with lots of parallel connections.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbig_data/\"\u003eBig Data\u003c/a\u003e is a term for datasets large enough that traditional data processing applications are inadequate. i.e. when non-parallel processing is inadequate.\u003c/p\u003e\n\u003cp\u003eThat is: \u0026ldquo;\u003ca href=\"/posts/kbhbig_data/\"\u003eBig Data\u003c/a\u003e\u0026rdquo; is when Pandas and SQL is inadequate. To handle big data, its very difficult to sequentially go through and process stuff. To make it work, you usually have to perform parallel processing under the hood.\u003c/p\u003e\n\u003ch2 id=\"rules-of-thumb-of-datasets\"\u003eRules of Thumb of Datasets\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e1000 Genomes (AWS, 260TB)\u003c/li\u003e\n\u003cli\u003eCommonCraw - the entire web (On PSC! 300-800 TB)\u003c/li\u003e\n\u003cli\u003eGDELT - \u003ca href=\"https://www.gdeltproject.org/\"\u003ehttps://www.gdeltproject.org/\u003c/a\u003e a dataset that contains everything that\u0026rsquo;s happening in the world right now in terms of news (small!! 2.5 TB per year; however, there is a LOT of fields: 250 Million fields)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"evolution-of-big-data\"\u003eEvolution of Big Data\u003c/h2\u003e\n\u003ch3 id=\"good-ol-sql\"\u003eGood Ol\u0026rsquo; SQL\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eschemas are too set in stone (\u0026ldquo;not a fit for \u003ca href=\"/posts/kbhsoftware_development_methodologies/#agile\"\u003eAgile\u003c/a\u003e development\u0026rdquo; \u0026mdash; a research scientist)\u003c/li\u003e\n\u003cli\u003eSQL sharding, when working correctly, is\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"kv-stores\"\u003eKV Stores\u003c/h3\u003e\n\u003cp\u003eAnd this is why we gave up and made Redis (or Amazon DynamoDB, Riak, Memcached) which keeps only Key/Value information. We just make the key really really complicated to support structures: \u003ccode\u003eGET cart:joe:15~4...\u003c/code\u003e\u003c/p\u003e\n\u003cp\u003eBut the problem with key-value stores isn\u0026rsquo;t good at indexing at all: if we want like to get all of Joe\u0026rsquo;s cart, you can\u0026rsquo;t just \u003ccode\u003eGET cart:joe\u003c/code\u003e because you can\u0026rsquo;t compare partial hashes.\u003c/p\u003e\n\u003ch3 id=\"document-stores\"\u003eDocument Stores\u003c/h3\u003e\n\u003cp\u003e[And something something mongo\u0026rsquo;s document stores but something its bad about those too but CMU can\u0026rsquo;t do tech and the speakers died]\u003c/p\u003e\n\u003ch3 id=\"wide-column-stores\"\u003eWide Column Stores\u003c/h3\u003e\n\u003cp\u003eGoogle BigTable type thing\u003c/p\u003e\n\u003cp\u003eJust have a wide column of arbitrary width with no schema:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eCart\u003c/th\u003e\n\u003cth\u003eJoe\u003c/th\u003e\n\u003cth\u003e15~4\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eCart\u003c/td\u003e\n\u003ctd\u003eRobert\u003c/td\u003e\n\u003ctd\u003e15~3\u003c/td\u003e\n\u003ctd\u003eMore Things!\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003ctd\u003eChicken\u003c/td\u003e\n\u003ctd\u003e15~2\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eEtc. No idea how you query this but google does it and CMU\u0026rsquo;s speakers died again good on them.\u003c/p\u003e\n\u003ch3 id=\"graphs\"\u003eGraphs!\u003c/h3\u003e\n\u003cp\u003eNeo4j: don\u0026rsquo;t store triples, and have better schemes for encoding. You can then use nice graph query schemes.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eHard to visualize\u003c/li\u003e\n\u003cli\u003eVERY hard to serialize\u003c/li\u003e\n\u003cli\u003eQueries are hard\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"and-so\"\u003eAnd so:\u003c/h3\u003e\n\u003cp\u003eAnd sooooo. Introducing \u003ca href=\"/posts/kbhspark/\"\u003eSpark\u003c/a\u003e. We don\u0026rsquo;t want to do parallel programming, we want to use traditional databases, so we just make someone else do it on an adapter and just query boring databases with lots of parallel connections.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbig_data/","tags":null,"title":"Big Data"},{"categories":null,"contents":"A binary operation means that you are taking two things in and you are getting one thing out; for instance:\n\\begin{equation} f: (\\mathbb{F},\\mathbb{F}) \\to \\mathbb{F} \\end{equation}\nThis is also closed, but binary operations dons\u0026rsquo;t have to be.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhbinary_operation/\"\u003ebinary operation\u003c/a\u003e means that you are taking two things in and you are getting one thing out; for instance:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf: (\\mathbb{F},\\mathbb{F}) \\to \\mathbb{F}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is also \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e, but \u003ca href=\"/posts/kbhbinary_operation/\"\u003ebinary operation\u003c/a\u003es dons\u0026rsquo;t have to be.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbinary_operation/","tags":null,"title":"binary operation"},{"categories":null,"contents":"A binomial distribution is a typo of distribution whose contents are:\nBinary Independent Fixed number Same probability: \u0026ldquo;That means: WITH REPLACEMENT\u0026rdquo; Think: \u0026ldquo;what\u0026rsquo;s the probability of \\(n\\) coin flips getting \\(k\\) heads given the head\u0026rsquo;s probability is \\(p\\)\u0026rdquo;.\nconstituents We write:\n\\begin{equation} X \\sim Bin(n,p) \\end{equation}\nwhere, \\(n\\) is the number of trials, \\(p\\) is the probability of success on each trial.\nrequirements Here is the probability mass function:\n\\begin{equation} P(X=k) = {n \\choose k} p^{k}(1-p)^{n-k} \\end{equation}\nadditional information properties of binomial distribution expected value: \\(np\\) variance: \\(np(1-p)\\) deriving the expectation The expectation of the binomial distribution is derivable from the fact:\n\\begin{equation} X = \\sum_{i=1}^{n} Y_{i} \\end{equation}\nwhere,\n\\begin{equation} \\begin{cases} X \\sim Bin(n,p) \\\\ Y_{i} \\sim Bern(p) \\end{cases} \\end{equation}\nNow, recall that expected value is linear.\nTherefore, we can write that:\napproximating binomial normal distribution approximation: \\(n \u0026gt; 20\\), variance large \\((np(1-p)) \u0026gt; 10\\), absolute independence; beware of continuity correction poisson distribution approximation: \\(n \u0026gt; 20\\), p small \\(p \u0026lt; 0.05\\) adding binomial distribution For \\(X\\) and \\(Y\\) independent binomial distributions, with equivalent probability:\n\\begin{equation} X \\sim Bin(a, p), Y \\sim Bin(b, p) \\end{equation}\nThen:\n\\begin{equation} X+Y \\sim Bin(a+b, p) \\end{equation}\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e is a typo of distribution whose contents are:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eBinary\u003c/li\u003e\n\u003cli\u003eIndependent\u003c/li\u003e\n\u003cli\u003eFixed number\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eSame probability\u003c/strong\u003e: \u0026ldquo;That means: WITH REPLACEMENT\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThink: \u0026ldquo;what\u0026rsquo;s the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of \\(n\\) coin flips getting \\(k\\) heads given the head\u0026rsquo;s probability is \\(p\\)\u0026rdquo;.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003eWe write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX \\sim Bin(n,p)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(n\\) is the number of trials, \\(p\\) is the probability of success on each trial.\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eHere is the \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003eprobability mass function\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X=k) = {n \\choose k} p^{k}(1-p)^{n-k}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"properties-of-binomial-distribution--kbhbinomial-distribution-dot-md\"\u003eproperties of \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhexpectation/\"\u003eexpected value\u003c/a\u003e\u003c/strong\u003e: \\(np\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhvariance/\"\u003evariance\u003c/a\u003e\u003c/strong\u003e: \\(np(1-p)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"deriving-the-expectation--kbhexpectation-dot-md\"\u003ederiving the \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eThe expectation of the \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e is derivable from the fact:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX = \\sum_{i=1}^{n} Y_{i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nX \\sim Bin(n,p) \\\\\nY_{i} \\sim Bern(p)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, recall that \u003ca href=\"/posts/kbhexpectation/\"\u003eexpected value\u003c/a\u003e is linear.\u003c/p\u003e\n\u003cp\u003eTherefore, we can write that:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-11_16-46-39_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"approximating-binomial\"\u003eapproximating binomial\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormal distribution\u003c/a\u003e approximation: \\(n \u0026gt; 20\\), variance large \\((np(1-p)) \u0026gt; 10\\), absolute independence; beware of \u003ca href=\"/posts/kbhcontinuity_correction/\"\u003econtinuity correction\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_of_k_in_x_time/\"\u003epoisson distribution\u003c/a\u003e approximation: \\(n \u0026gt; 20\\), p small \\(p \u0026lt; 0.05\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"adding-binomial-distribution--kbhbinomial-distribution-dot-md\"\u003eadding \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eFor \\(X\\) and \\(Y\\) independent \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003es, with \u003cstrong\u003eequivalent\u003c/strong\u003e probability:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX \\sim Bin(a, p), Y \\sim Bin(b, p)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThen:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX+Y \\sim Bin(a+b, p)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbinomial_distribution/","tags":null,"title":"binomial distribution"},{"categories":null,"contents":"bioinformatics is a field of biology that deals with biology information. Blending CS, Data, Strategies and of course biology into one thing.\nFirst, let\u0026rsquo;s review genetic information\npossible use for bioinformatics Find the start/stop codons of known gene, and determine the gene and protein length ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbioinformatics/\"\u003ebioinformatics\u003c/a\u003e is a field of \u003ca href=\"\"\u003ebiology\u003c/a\u003e that deals with \u003ca href=\"\"\u003ebiology\u003c/a\u003e information. Blending CS, Data, Strategies and of course \u003ca href=\"\"\u003ebiology\u003c/a\u003e into one thing.\u003c/p\u003e\n\u003cp\u003eFirst, let\u0026rsquo;s review \u003ca href=\"\"\u003egenetic information\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"possible-use-for-bioinformatics--kbhbioinformatics-dot-md\"\u003epossible use for \u003ca href=\"/posts/kbhbioinformatics/\"\u003ebioinformatics\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eFind the start/stop codons of known gene, and determine the gene and protein length\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbioinformatics/","tags":null,"title":"bioinformatics"},{"categories":null,"contents":"bitmasking is a very helpful to create bit vectors.\n| with a 1-mask is useful to turning things on \u0026amp; with a 0-mask is useful to turning things off (bitvector \u0026amp; not(1-mask)) | is useful for set unions \u0026amp; is useful for intersections of bits ^ is useful for flipping isolated bits: 0 is bit preserving, 1 is bit negating ~ is useful for flipping all bits ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbitmask/\"\u003ebitmask\u003c/a\u003eing is a very helpful to create \u003ca href=\"/posts/kbhbinary_number_system/#bit\"\u003ebit\u003c/a\u003e \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e| with a 1-mask is useful to turning things on\u003c/li\u003e\n\u003cli\u003e\u0026amp; with a 0-mask is useful to turning things off (bitvector \u0026amp; not(1-mask))\u003c/li\u003e\n\u003cli\u003e| is useful for set unions\u003c/li\u003e\n\u003cli\u003e\u0026amp; is useful for intersections of bits\u003c/li\u003e\n\u003cli\u003e^ is useful for flipping isolated bits: 0 is bit preserving, 1 is bit negating\u003c/li\u003e\n\u003cli\u003e~ is useful for flipping all bits\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbitmask/","tags":null,"title":"bitmask"},{"categories":null,"contents":"\u0026amp; | ~ ^ \u0026lt;\u0026lt; \u0026gt;\u0026gt;\n\u0026amp; Bitwise level AND\n| Bitwise level OR\n~ Unary bitwise negation\n^ Unary XOR\n\u0026lt;\u0026lt; Shift the number to the left. Fill unused slots with 0.\n\u0026gt;\u0026gt; Shift the number to the right\nfor signed values, we perform an arithmetic right shift: fill the unused slots with the most significant bit from before (\u0026ldquo;fill with 1s\u0026rdquo;) for unsigned values, we perform a logical right shift ","html":"\u003cp\u003e\u0026amp; | ~ ^ \u0026lt;\u0026lt; \u0026gt;\u0026gt;\u003c/p\u003e\n\u003ch2 id=\"and\"\u003e\u0026amp;\u003c/h2\u003e\n\u003cp\u003eBitwise level AND\u003c/p\u003e\n\u003ch2 id=\"b99834\"\u003e|\u003c/h2\u003e\n\u003cp\u003eBitwise level OR\u003c/p\u003e\n\u003ch2 id=\"4c761f\"\u003e~\u003c/h2\u003e\n\u003cp\u003eUnary bitwise negation\u003c/p\u003e\n\u003ch2 id=\"7e6a2a\"\u003e^\u003c/h2\u003e\n\u003cp\u003eUnary XOR\u003c/p\u003e\n\u003ch2 id=\"9c1628\"\u003e\u0026lt;\u0026lt;\u003c/h2\u003e\n\u003cp\u003eShift the number to the left. Fill unused slots with 0.\u003c/p\u003e\n\u003ch2 id=\"22a1da\"\u003e\u0026gt;\u0026gt;\u003c/h2\u003e\n\u003cp\u003eShift the number to the right\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003efor \u003cstrong\u003esigned\u003c/strong\u003e values, we perform an \u003ca href=\"#22a1da\"\u003earithmetic right shift\u003c/a\u003e: fill the unused slots with the most significant bit from before (\u0026ldquo;fill with 1s\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003efor \u003cstrong\u003eunsigned\u003c/strong\u003e values, we perform a \u003ca href=\"#22a1da\"\u003elogical right shift\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbitwise_operations/","tags":null,"title":"bitwise operations"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhblack_thursday/","tags":null,"title":"Black Thursday"},{"categories":null,"contents":"People have been trading options for a very long time, but there wasn\u0026rsquo;t a good way of quantify the value of an option.\nThere are two main types of uses for Black-Scholes Formula\nyou can use all variables and determine the value of options you can get the price of options being traded, then compute the $σ$\u0026mdash;the market\u0026rsquo;s estimation of volatility (how much they want the insurance policy that is the options) constituents \\(S_0\\): stock price \\(X\\): exercise price \\(r\\): risk-free interest rate \\(T\\): maturity time \\(\\sigma\\): standard-deviation of log returns\u0026mdash;\u0026ldquo;volatility\u0026rdquo; Black-Scholes Formula for an European \u0026ldquo;Call\u0026rdquo; Option Here is the scary formula:\n\\begin{equation} C_0 = S_0 \\mathcal{N}(d_{1})-Xe^{-rT}\\mathcal{N}(d_{2}) \\end{equation}\nwhere, the variables are defined above, and:\n\\begin{equation} \\begin{cases} d_1 = \\frac{\\ln\\qty(\\frac{S_0}{X})+\\qty(r+\\frac{\\sigma^{2}}{2})T}{\\sigma \\sqrt{t}}\\\\ d_2 = \\frac{\\ln\\qty(\\frac{S_0}{X})+\\qty(r-\\frac{\\sigma^{2}}{2})T}{\\sigma \\sqrt{t}} \\end{cases} \\end{equation}\nand \\(\\mathcal{N}\\) is the area at point under the standard normal distribution.\noh god So let\u0026rsquo;s dissect this a little.\nThe first term:\n\\begin{equation} S_0\\mathcal{N}(d_{1}) \\end{equation}\nis the \u0026ldquo;current\u0026rdquo; stock price, weighted by the probability of you being willing to exercise it.\nand the second term:\n\\begin{equation} Xe^{-rT}\\mathcal{N}(d_{2}) \\end{equation}\nis the \u0026ldquo;price\u0026rdquo; of the exercise (what you need to pay, if exercising the option, to get the stock.)\nThis strike price \\(X\\) is discounted by \\(e^{-rT}\\), which is like a time machine that rolls that strike price back to what it would be today (so that it\u0026rsquo;s comparable to \\(S_0\\).) As \\(r\\) is the risk free interest rate, we are essentially saying: \u0026ldquo;in a perfectly functional market, over the next \\(T\\) days, how will our asset grow?\u0026rdquo;\nThis is again weighted by the probability of you being willing to exercise it\u0026mdash;through modified slightly differently.\nTherefore, subtracting the two terms, we get the actual value of the option\u0026mdash;the money you would gain by exercising it, then immediately selling the stock, weighted by how willing you are actually to excercise it.\nLet\u0026rsquo;s now take a look at those \u0026ldquo;probabilities\u0026rdquo; \\(d_{\\{1,2\\}}\\). These factors essentially provide quantification of the statement that: \u0026ldquo;the higher our current price is ABOVE the excrecise price\u0026mdash;accounting for volatility\u0026mdash;the more willing we are to excercise the option.\u0026rdquo;\nNote then, \\(\\ln\\qty(\\frac{S_{0}}{X})\\) form the top of both expressions. That essentially measures how high the current price \\(S_0\\) deviates from the strike price \\(X\\).\nNow, as volatility \\(\\sigma\\) increases, \\(d_1\\) increases and \\(d_2\\) decreases (as \\(\\frac{\\sigma^{2}}{2}\\) is being added in \\(d_1\\) and subtracted in \\(d_2\\)). This is because, as volatility increase, you are less certain about what the actual \u0026ldquo;pay\u0026rdquo; (price) is, but your option\u0026mdash;given its constant strike price\u0026mdash;provides the certainty in gain.\n","html":"\u003cp\u003ePeople have been trading \u003ca href=\"/posts/kbhoptions/\"\u003eoption\u003c/a\u003es for a very long time, but there wasn\u0026rsquo;t a good way of quantify the value of an \u003ca href=\"/posts/kbhoptions/\"\u003eoption\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThere are two main types of uses for \u003ca href=\"/posts/kbhblack_scholes_formula/\"\u003eBlack-Scholes Formula\u003c/a\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eyou can use all variables and determine the value of \u003ca href=\"/posts/kbhoptions/\"\u003eoption\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003eyou can get the price of \u003ca href=\"/posts/kbhoptions/\"\u003eoption\u003c/a\u003es being traded, then compute the $σ$\u0026mdash;the market\u0026rsquo;s estimation of volatility (how much they want the \u003ca href=\"/posts/kbhoptions/#analyze-options-as-insurance\"\u003einsurance policy\u003c/a\u003e that is the options)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(S_0\\): stock price\u003c/li\u003e\n\u003cli\u003e\\(X\\): exercise price\u003c/li\u003e\n\u003cli\u003e\\(r\\): risk-free interest rate\u003c/li\u003e\n\u003cli\u003e\\(T\\): maturity time\u003c/li\u003e\n\u003cli\u003e\\(\\sigma\\): standard-deviation of log \u003ca href=\"/posts/kbhrandom_walk/#return--finmetrics\"\u003ereturn\u003c/a\u003es\u0026mdash;\u0026ldquo;volatility\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"black-scholes-formula--kbhblack-scholes-formula-dot-md--for-an-european-call-option--kbhoptions-dot-md\"\u003e\u003ca href=\"/posts/kbhblack_scholes_formula/\"\u003eBlack-Scholes Formula\u003c/a\u003e for an \u003ca href=\"/posts/kbhoptions/#american-vs-european-options\"\u003eEuropean \u0026ldquo;Call\u0026rdquo; Option\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eHere is the scary formula:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC_0 = S_0 \\mathcal{N}(d_{1})-Xe^{-rT}\\mathcal{N}(d_{2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, the variables are defined above, and:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nd_1 = \\frac{\\ln\\qty(\\frac{S_0}{X})+\\qty(r+\\frac{\\sigma^{2}}{2})T}{\\sigma \\sqrt{t}}\\\\\nd_2 = \\frac{\\ln\\qty(\\frac{S_0}{X})+\\qty(r-\\frac{\\sigma^{2}}{2})T}{\\sigma \\sqrt{t}}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand \\(\\mathcal{N}\\) is the area at point under the standard \u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormal distribution\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"oh-god\"\u003eoh god\u003c/h3\u003e\n\u003cp\u003eSo let\u0026rsquo;s dissect this a little.\u003c/p\u003e\n\u003cp\u003eThe first term:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS_0\\mathcal{N}(d_{1})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis the \u0026ldquo;current\u0026rdquo; stock price, weighted by the probability of you being willing to exercise it.\u003c/p\u003e\n\u003cp\u003eand the second term:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nXe^{-rT}\\mathcal{N}(d_{2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis the \u0026ldquo;price\u0026rdquo; of the exercise (what you need to pay, if exercising the option, to get the stock.)\u003c/p\u003e\n\u003cp\u003eThis strike price \\(X\\) is discounted by \\(e^{-rT}\\), which is like a time machine that rolls that strike price back to what it would be today (so that it\u0026rsquo;s comparable to \\(S_0\\).) As \\(r\\) is the risk free interest rate, we are essentially saying: \u0026ldquo;in a perfectly functional market, over the next \\(T\\) days, how will our asset grow?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eThis is again weighted by the probability of you being willing to exercise it\u0026mdash;through modified slightly differently.\u003c/p\u003e\n\u003cp\u003eTherefore, subtracting the two terms, we get the actual value of the option\u0026mdash;the money you would gain by exercising it, then immediately selling the stock, weighted by how willing you are actually to excercise it.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s now take a look at those \u0026ldquo;probabilities\u0026rdquo; \\(d_{\\{1,2\\}}\\). These factors essentially provide quantification of the statement that: \u0026ldquo;the higher our current price is ABOVE the excrecise price\u0026mdash;accounting for volatility\u0026mdash;the more willing we are to excercise the option.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eNote then, \\(\\ln\\qty(\\frac{S_{0}}{X})\\) form the top of both expressions. That essentially measures how high the current price \\(S_0\\) deviates from the strike price \\(X\\).\u003c/p\u003e\n\u003cp\u003eNow, as volatility \\(\\sigma\\) increases, \\(d_1\\) increases and \\(d_2\\) decreases (as \\(\\frac{\\sigma^{2}}{2}\\) is being \u003cem\u003eadded\u003c/em\u003e in \\(d_1\\) and \u003cem\u003esubtracted\u003c/em\u003e in \\(d_2\\)). This is because, as volatility increase, you are \u003cem\u003eless\u003c/em\u003e certain about what the actual \u0026ldquo;pay\u0026rdquo; (price) is, but your option\u0026mdash;given its constant strike price\u0026mdash;provides the certainty in gain.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhblack_scholes_formula/","tags":null,"title":"Black-Scholes Formula"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhblb/","tags":null,"title":"BLB"},{"categories":null,"contents":"To evaluate the lower bound:\n\\begin{equation} \\alpha_{a}^{k+1} (s) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) \\alpha_{a}^{k}(s\u0026rsquo;) \\end{equation}\nwe are essentially sticking with an action and do conditional plan evaluation of a policy that do one action into the future\n","html":"\u003cp\u003eTo evaluate the lower bound:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha_{a}^{k+1} (s) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) \\alpha_{a}^{k}(s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe are essentially sticking with an action and do \u003ca href=\"/posts/kbhconditional_plan/#id-6f19368f-74b5-4606-a882-ec9bc5619873-conditional-plan-evaluation\"\u003econditional plan evaluation\u003c/a\u003e of a policy that do one action into the future\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhblind_lower_bound/","tags":null,"title":"blind lower bound"},{"categories":null,"contents":"The bloch sphere is a sphere encoding all possible probabilities of a qubit shared between two axis, \\(|u\\big\u0026gt;\\) and \\(|d\\big\u0026gt;\\).\nYou will notice that its a unit sphere, in which any magnitude has size \\(1\\). Hence, probabilities would result as projected onto each of the directions.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhbloch_sphere/\"\u003ebloch sphere\u003c/a\u003e is a sphere encoding all possible probabilities of a \u003ca href=\"/posts/kbhqubits/\"\u003equbit\u003c/a\u003e shared between two axis, \\(|u\\big\u0026gt;\\) and \\(|d\\big\u0026gt;\\).\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-03-19_21-56-36_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eYou will notice that its a unit sphere, in which any magnitude has size \\(1\\). Hence, probabilities would result as projected onto each of the directions.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbloch_sphere/","tags":null,"title":"bloch sphere"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhbluest_eye/","tags":null,"title":"Bluest Eye"},{"categories":null,"contents":"General Information Due Date Topic Important Documents \u0026lt;2022-05-06 Fri\u0026gt; Bluest Eye Essay Bluest Eye Prompt Beauty: discuss Morrison’s treatment of the idea of beauty. From what, where, or whom does this notion come? What effect does it have on the way one perceives the world? On the way others perceive an individual?\nHow does beauty (the acquisition of it, the lack of it, or the presence of it) determine one’s fate in America? Is beauty a necessarily fixed entity or does it fluctuate at the whim of society? How much or to what extent does one’s perception of beauty contribute to one’s sense of self-worth?\nQuotes Bin Beauty Claudia: I had only one desire: to dismember it. To see of what it was made, to discover the dearness, to find the beauty, the desirability that had escaped me, but apparently only me. Pecola: Thrown, in this way, into the binding conviction that only a miracle could relieve her, she would never know her beauty. She would see only what there was to see: the eyes of other people. Maureen: Maureen agreed. \u0026ldquo;Ooooo yes. My mother told me that a girl named Audrey, she went to the beauty parlor where we lived before, and asked the lady to fix her hair like Hedy Lamarr’s, and the lady said, \u0026lsquo;Yeah, when you grow some hair like Hedy Lamarr’s.\u0026rsquo;\u0026rdquo; She laughed long and sweet. (post pecola beat-up) Pauline (Polly): Along with the idea of romantic love, she was introduced to another—physical beauty. In equating physical beauty with virtue, she stripped her mind, bound it, and collected self-contempt by the heap. Pauline (Polly) cont\u0026rsquo;d: She was never able, after her education in the movies, to look at a face and not assign it some category in the scale of absolute beauty, and the scale was one she absorbed in full from the silver screen. Pauline (Polly): More and more she neglected her house, her children, her man\u0026mdash;\u0026hellip;the dark edges that made the daily life with the Fishers lighter, more delicate, more lovely \u0026hellip; Here she found beauty, order, cleanliness, and praise. Pauline (Polly): Pauline kept this order, this beauty, for herself, a private world, and never introduced it into her storefront, or to her children. Cholly after Aunt Death: The funeral banquet was a peal of joy after the thunderous beauty of the funeral. It was like a street tragedy with spontaneity tucked softly into the corners of a highly formal structure. Soaphead Church: He thought it was at once the most fantastic and the most logical petition he had ever received. Here was an ugly little girl asking for beauty. A surge of love and understanding swept through him, but was quickly replaced by anger. Claudia (reflecting on Pecola): All of our waste which we dumped on her and which she absorbed. And all of our beauty, which was hers first and which she gave to us. Eyes a: Her eyes are full of sorrow. She sings to me: \u0026ldquo;When the deep purple falls over sleepy garden walls, someone thinks of me\u0026hellip;.\u0026rdquo; ** Sub-Claim Synthesis There\u0026rsquo;s always the UCLA Writing Lab.\n","html":"\u003ch2 id=\"general-information\"\u003eGeneral Information\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDue Date\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003cth\u003eImportant Documents\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-06 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003eBluest Eye Essay\u003c/td\u003e\n\u003ctd\u003eBluest Eye\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"prompt\"\u003ePrompt\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eBeauty\u003c/strong\u003e\u003c/strong\u003e: discuss Morrison’s treatment of the idea of beauty. From what, where, or whom does this notion come? What effect does it have on the way one perceives the world? On the way others perceive an individual?\u003c/p\u003e\n\u003cp\u003eHow does beauty (the acquisition of it, the lack of it, or the presence of it) determine one’s fate in America? Is beauty a necessarily fixed entity or does it fluctuate at the whim of society? How much or to what extent does one’s perception of beauty contribute to one’s sense of self-worth?\u003c/p\u003e\n\u003ch2 id=\"quotes-bin\"\u003eQuotes Bin\u003c/h2\u003e\n\u003ch3 id=\"beauty\"\u003eBeauty\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eClaudia\u003c/strong\u003e\u003c/strong\u003e: I had only one desire: to dismember it. To see of what it was made, to discover the dearness, to find the beauty, the desirability that had escaped me, but apparently only me.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003ePecola\u003c/strong\u003e\u003c/strong\u003e: Thrown, in this way, into the binding conviction that only a miracle could relieve her, she would never know her beauty. She would see only what there was to see: the eyes of other people.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eMaureen\u003c/strong\u003e\u003c/strong\u003e: Maureen agreed. \u0026ldquo;Ooooo yes. My mother told me that a girl named Audrey, she went to the beauty parlor where we lived before, and asked the lady to fix her hair like Hedy Lamarr’s, and the lady said, \u0026lsquo;Yeah, when you grow some hair like Hedy Lamarr’s.\u0026rsquo;\u0026rdquo; She laughed long and sweet. (post pecola beat-up)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003ePauline\u003c/strong\u003e\u003c/strong\u003e (Polly): Along with the idea of romantic love, she was introduced to another—physical beauty. In equating physical beauty with virtue, she stripped her mind, bound it, and collected self-contempt by the heap.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003ePauline\u003c/strong\u003e\u003c/strong\u003e (Polly) cont\u0026rsquo;d: She was never able, after her education in the movies, to look at a face and not assign it some category in the scale of absolute beauty, and the scale was one she absorbed in full from the silver screen.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003ePauline\u003c/strong\u003e\u003c/strong\u003e (Polly): More and more she neglected her house, her children, her man\u0026mdash;\u0026hellip;the dark edges that made the daily life with the Fishers lighter, more delicate, more lovely \u0026hellip; Here she found beauty, order, cleanliness, and praise.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003ePauline\u003c/strong\u003e\u003c/strong\u003e (Polly): Pauline kept this order, this beauty, for herself, a private world, and never introduced it into her storefront, or to her children.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eCholly\u003c/strong\u003e\u003c/strong\u003e after Aunt Death: The funeral banquet was a peal of joy after the thunderous beauty of the funeral. It was like a street tragedy with spontaneity tucked softly into the corners of a highly formal structure.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eSoaphead Church\u003c/strong\u003e\u003c/strong\u003e: He thought it was at once the most fantastic and the most logical petition he had ever received. Here was an ugly little girl asking for beauty. A surge of love and understanding swept through him, but was quickly replaced by anger.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eClaudia\u003c/strong\u003e\u003c/strong\u003e (reflecting on Pecola): All of our waste which we dumped on her and which she absorbed. And all of our beauty, which was hers first and which she gave to us.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"eyes\"\u003eEyes\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003ea\u003c/strong\u003e\u003c/strong\u003e: Her eyes are full of sorrow. She sings to me: \u0026ldquo;When the deep purple falls over sleepy garden walls, someone thinks of me\u0026hellip;.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e**\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"sub-claim-synthesis\"\u003eSub-Claim Synthesis\u003c/h2\u003e\n\u003chr\u003e\n\u003cp\u003eThere\u0026rsquo;s always the \u003ca href=\"https://wp.ucla.edu/wp-content/uploads/2016/01/UWC_handouts_What-How-So-What-Thesis-revised-5-4-15-RZ.pdf\"\u003eUCLA Writing Lab\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhenglish_bluest_eye/","tags":null,"title":"Bluest Eye Essay Planning"},{"categories":null,"contents":"A secondary source comparison activity for the Bluest Eye\nTony Morrison\u0026rsquo;s Rootedness That, if an action were to be done as in a community, its regarded as safer It is a very personal grief and a personal statement done among people you trust. Done within the context of the community, therefore safe.\nPublic (white-washed) and private image, by necessesity, is separated it\u0026rsquo;s just important that it be private. And then, whatever I do that is public can be done seriously.\nthat people are only defined by the uniqueness they have out of the tribe My single solitary and individual Jifejs like the lives of the tribe; it differs in these specific ways, but it is a balanced life because it is both solitary and representative\nPurpose of the novel is enlightening as well as an art form It should have something in it that enlightens; something in it that opens the door arid points the way. Something in it that suggests what the conflicts are, what the problems are.\nThe Novel is a middle class art form The history of the novel as a form began when there was a new class, a middle class, to read it; it was an art form that they needed.\nThat there is already a form of artistry for the lower class, but not middle class The lower classes didn\u0026rsquo;t need novels at that time because they had an art form already they had songs and dances, and ceremony, and gossip, and celebrations.\nnovels of manners tell people of a different world we call 1t the novel of manners, an art form designed to tell peole something they didn\u0026rsquo;t know.\nPortrays quintessential forms of connection How to get married. What a good living was.\nThe African Americans became unexclusive For a long time, the art form that was healing for Black people was music. That music is no longer exclusively ours; we don\u0026rsquo;t have exclusive rights to it.\nThat the story of the novel is told where the reader constructs the story together To construct the dialogue so that it is heard. So that there are no adverbs attached to them: \u0026ldquo;loudly,\u0026rdquo; \u0026ldquo;softly,\u0026rdquo; \u0026ldquo;he said menacingly.'\nThat the artistry is not described as Black but inherently black Black, because it uses the characteristics of Black art\n","html":"\u003cp\u003eA \u003ca href=\"\"\u003esecondary source\u003c/a\u003e comparison activity for the \u003ca href=\"/posts/kbhbluest_eye/\"\u003eBluest Eye\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"tony-morrison-s-rootedness\"\u003eTony Morrison\u0026rsquo;s Rootedness\u003c/h2\u003e\n\u003ch3 id=\"that-if-an-action-were-to-be-done-as-in-a-community-its-regarded-as-safer\"\u003eThat, if an action were to be done as in a community, its regarded as safer\u003c/h3\u003e\n\u003cp\u003eIt is a very personal grief and a personal statement done among people you trust. Done within the context of the community, therefore safe.\u003c/p\u003e\n\u003ch3 id=\"public--white-washed--and-private-image-by-necessesity-is-separated\"\u003ePublic (white-washed) and private image, by necessesity, is separated\u003c/h3\u003e\n\u003cp\u003eit\u0026rsquo;s just important that it be private. And then, whatever I do that is public can be done seriously.\u003c/p\u003e\n\u003ch3 id=\"that-people-are-only-defined-by-the-uniqueness-they-have-out-of-the-tribe\"\u003ethat people are only defined by the uniqueness they have out of the tribe\u003c/h3\u003e\n\u003cp\u003eMy single solitary and individual Jifejs like the lives of the tribe; it differs in these specific ways, but it is a balanced life because it is both solitary and representative\u003c/p\u003e\n\u003ch3 id=\"purpose-of-the-novel-is-enlightening-as-well-as-an-art-form\"\u003ePurpose of the novel is enlightening as well as an art form\u003c/h3\u003e\n\u003cp\u003eIt should have something in it that enlightens; something in it that opens the door arid points the way. Something in it that suggests what the conflicts are, what the problems are.\u003c/p\u003e\n\u003ch3 id=\"the-novel-is-a-middle-class-art-form\"\u003eThe Novel is a middle class art form\u003c/h3\u003e\n\u003cp\u003eThe history of the novel as a form began when there was a new class, a middle class, to read it; it was an art form that they needed.\u003c/p\u003e\n\u003ch3 id=\"that-there-is-already-a-form-of-artistry-for-the-lower-class-but-not-middle-class\"\u003eThat there is already a form of artistry for the lower class, but not middle class\u003c/h3\u003e\n\u003cp\u003eThe lower classes didn\u0026rsquo;t need novels at that time because they had an art form already they had songs and dances, and ceremony, and gossip, and celebrations.\u003c/p\u003e\n\u003ch3 id=\"novels-of-manners-tell-people-of-a-different-world\"\u003enovels of manners tell people of a different world\u003c/h3\u003e\n\u003cp\u003ewe call 1t the novel of manners, an art form designed to tell peole something they didn\u0026rsquo;t know.\u003c/p\u003e\n\u003ch3 id=\"portrays-quintessential-forms-of-connection\"\u003ePortrays quintessential forms of connection\u003c/h3\u003e\n\u003cp\u003eHow to get married. What a good living was.\u003c/p\u003e\n\u003ch3 id=\"the-african-americans-became-unexclusive\"\u003eThe African Americans became unexclusive\u003c/h3\u003e\n\u003cp\u003eFor a long time, the art form that was healing for Black people was music. That music is no longer exclusively ours; we don\u0026rsquo;t have exclusive rights to it.\u003c/p\u003e\n\u003ch3 id=\"that-the-story-of-the-novel-is-told-where-the-reader-constructs-the-story-together\"\u003eThat the story of the novel is told where the reader constructs the story together\u003c/h3\u003e\n\u003cp\u003eTo construct the dialogue so that it is heard. So that there are no adverbs attached to them: \u0026ldquo;loudly,\u0026rdquo; \u0026ldquo;softly,\u0026rdquo; \u0026ldquo;he said menacingly.'\u003c/p\u003e\n\u003ch3 id=\"that-the-artistry-is-not-described-as-black-but-inherently-black\"\u003eThat the artistry is not described as Black but inherently black\u003c/h3\u003e\n\u003cp\u003eBlack, because it uses the characteristics of Black art\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsecondary_source_comparison_activity/","tags":null,"title":"Bluest Eye: secondary source comparison activity"},{"categories":null,"contents":"bool does not belong in pure C.\n#include \u0026lt;stdio.h\u0026gt; #include \u0026lt;stdbool.h\u0026gt; // you need to include this to get bools to work. int main(int argc, char *argv[]) { bool test = true; if (test) printf(\u0026#34;its true\\n\u0026#34;) } ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbool/\"\u003ebool\u003c/a\u003e does \u003cstrong\u003enot\u003c/strong\u003e belong in pure C.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-c\" data-lang=\"c\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e#include\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e\u0026lt;stdio.h\u0026gt;\u003c/span\u003e\u003cspan style=\"color:#75715e\"\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e#include\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e\u0026lt;stdbool.h\u0026gt; // you need to include this to get bools to work.\u003c/span\u003e\u003cspan style=\"color:#75715e\"\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emain\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eargc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[])\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ebool\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etest\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etest\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003eprintf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;its true\u003c/span\u003e\u003cspan style=\"color:#8045ff\"\u003e\\n\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhbool/","tags":null,"title":"bool"},{"categories":null,"contents":"bootstrap allows you to know distribution statistics, calculate p-value, etc, with NO statistical testing like t test, etc.\nBig idea: treat your sample space as your population, and sample from it to obtain an estimate of the properties of the sample distribution.\n\\begin{equation} D \\approx \\hat{D} \\end{equation}\nso, to calculate the distribution of any given statistic via a sample:\nestimate the PMF using sample my_statistic_dist = [] (like sample mean, sample variance, etc.) for i in (N \u0026gt;\u0026gt; 10000) take a subsample of len(sample) samples from PMFu my_statistic_dist.append(my_statistic=(=subsample)) (recall it has to be a sampling statistic (like N-1 for sample variance) how you have a distribution of my_statistic We know that taking mean and var re drawn as a statistic of the same random variable, \\(N\\) times. So, central limit theorem holds. Therefore, these are normal and you can deal with them.\nIn terms of step 3.1, the subsample of len sample can be given by:\nnp.random.choice(sample_pop, len(sample_pop), replace=True) because we essentilaly want to draw from a weighted distribution of your input sample, WITH REPLACEMENT (otherwise it\u0026rsquo;d be the same exact set of data instead of a sample from it).\np-value from bootstrap p-value is defined as \u0026ldquo;probability of having an difference in sample means (called Effecient Frontier) greater than that observed in samples of the null hypothesis, that the two sames came from the same distribution\u0026rdquo;.\nso:\n\\begin{equation} P(|\\mu_{1} - \\mu_{2}|\u0026gt;x | \\text{null}\\)) \\end{equation}\nWe can simply calculate an effect size distribution via the bootstrapping on the combined population of both distributions, to see what the probability above is where \\(x\\) is the actual effect size we got.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhboostrap/\"\u003ebootstrap\u003c/a\u003e allows you to know distribution statistics, calculate \u003ca href=\"/posts/kbhhypothesis_testing/#p-value\"\u003ep-value\u003c/a\u003e, etc, with NO \u003ca href=\"/posts/kbhstastistic/\"\u003estatistic\u003c/a\u003eal testing like t test, etc.\u003c/p\u003e\n\u003cp\u003eBig idea: treat your \u003ca href=\"/posts/kbhsample_space/\"\u003esample space\u003c/a\u003e as your population, and sample from it to obtain an estimate of the properties of the sample distribution.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nD \\approx \\hat{D}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso, to calculate the distribution of any given \u003ca href=\"/posts/kbhstastistic/\"\u003estatistic\u003c/a\u003e via a sample:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eestimate the \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003e using sample\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003emy_statistic_dist\u003c/code\u003e = [] (like \u003ca href=\"/posts/kbhrandom_variables/#sample-mean\"\u003esample mean\u003c/a\u003e, \u003ca href=\"/posts/kbhrandom_variables/#sample-variance\"\u003esample variance\u003c/a\u003e, etc.)\u003c/li\u003e\n\u003cli\u003efor i in (N \u0026gt;\u0026gt; 10000)\n\u003col\u003e\n\u003cli\u003etake a \u003ccode\u003esubsample\u003c/code\u003e of len(sample) samples from \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003eu\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003emy_statistic_dist\u003c/code\u003e.append(\u003ccode\u003emy_statistic=(=subsample\u003c/code\u003e)) (recall it has to be a \u003ca href=\"/posts/kbhrandom_variables/#sampling-statistics\"\u003esampling statistic\u003c/a\u003e (like N-1 for \u003ca href=\"/posts/kbhrandom_variables/#sample-variance\"\u003esample variance\u003c/a\u003e)\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003ehow you have a distribution of \u003ccode\u003emy_statistic\u003c/code\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWe know that taking mean and var re drawn as a statistic of the same random variable, \\(N\\) times. So, \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e holds. Therefore, these are normal and you can deal with them.\u003c/p\u003e\n\u003cp\u003eIn terms of step 3.1, the subsample of len sample can be given by:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erandom\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echoice\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esample_pop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esample_pop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereplace\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ebecause we essentilaly want to draw from a weighted distribution of your input sample, WITH REPLACEMENT (otherwise it\u0026rsquo;d be the same exact set of data instead of a sample from it).\u003c/p\u003e\n\u003ch2 id=\"p-value-from-bootstrap\"\u003ep-value from bootstrap\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhhypothesis_testing/#p-value\"\u003ep-value\u003c/a\u003e is defined as \u0026ldquo;\u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of having an difference in sample means (called \u003ca href=\"/posts/kbhcapm/#minimum-variance-boundary\"\u003eEffecient Frontier\u003c/a\u003e) greater than that observed in samples of the \u003ca href=\"/posts/kbhhypothesis_testing/#null-hypothesis\"\u003enull hypothesis\u003c/a\u003e, that the two sames came from the same distribution\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eso:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(|\\mu_{1} - \\mu_{2}|\u0026gt;x | \\text{null}\\))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can simply calculate an \u003ca href=\"#p-value-from-bootstrap\"\u003eeffect size\u003c/a\u003e distribution via the \u003ca href=\"/posts/kbhboostrap/\"\u003ebootstrap\u003c/a\u003eping on the combined population of both distributions, to see what the probability above is where \\(x\\) is the actual effect size we got.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhboostrap/","tags":null,"title":"bootstrap"},{"categories":null,"contents":"BNT is a discourse task where subjects are shown 60 pictures decreasing frequency and asked to recall the word.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhboston_naming_test/\"\u003eBNT\u003c/a\u003e is a \u003ca href=\"/posts/kbhdiscourse_completion_task/\"\u003ediscourse task\u003c/a\u003e where subjects are shown 60 pictures decreasing frequency and asked to recall the word.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhboston_naming_test/","tags":null,"title":"Boston Naming Test"},{"categories":null,"contents":"(Bouton et al. 2018)\nOne-Liner Uses the single-user avoidance POMDP formulation presented in (Bouton, Cosgun, and Kochenderfer 2017) to extend to multiple road users\nNovelty Uses Single-User Model of Road Navigation to extend general POMDP formulation into multi-pedestrian/multi road user casesroad user cases\nPrevious Work Imagine worst-case scenario always: set upper bound and always imagine it; could cause gridlock if situation never resolves.\nNotable Methods Uses QMDP and SARSOP to perform optimization\nSingle-User Model of Road Navigation See Single-User Model of Road Navigation\nScaling to multiple road users make an aggregate utility which is a function across all the single-user avoidance strategies (i.e. the aggregate utiltiy of mulitlpe road user is the utility of avoiding each individual user) \\(U^{*}(b,a) = f(U^{*}(b_1, a) \u0026hellip; U^{*}(b_{n}, a)\\). this is called utility fusion two possible approaches: either minimum of all the utilities, or the sum of them; the former is more risk averse (we want to hit no one), and latter treats each user is independent. further, the number of users in the road is modeled by a belief Evaluation \u0026ldquo;the evaluation models are different to find the optimal policy, and are also higher fidelity\u0026rdquo;\nWe want to evaluate our POMDP on a higher fidelity model to check if the system can generalize to harder environments.\nBaselines: random actions, or hand crafted rules-based policy.\nKey Figs New Concepts Single-User Model of Road Navigation POMDP formulation; we only care about one road user\naction: a finite set of change in acceleration -4m/s2, -2m/s2, 0m/s2, 2m/s2, 4m/s2 states and transitions: poses (position + velocity) of the car and the road user; position are velocities are discretized observation: measured position and velocity of the one other road user with a pm 1 meter variance for crosswalks and pm 2 meter variance for intersection users in non-occluded area will always be detected user in an occluded area will not be detected position and velocity of road users are uncertain pm 1 meter and pm 1 meter / second belief: categorical distribution over states dynamics: physics + kinematics for car; pedestrians have stochastic velocity reward: unit reward for final position, tuned penalty for collision Notes ","html":"\u003cp\u003e(\u003ca href=\"#citeproc_bib_item_2\"\u003eBouton et al. 2018\u003c/a\u003e)\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eUses the single-user avoidance POMDP formulation presented in (\u003ca href=\"#citeproc_bib_item_1\"\u003eBouton, Cosgun, and Kochenderfer 2017\u003c/a\u003e) to extend to multiple road users\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003eUses \u003ca href=\"#single-user-model-of-road-navigation\"\u003eSingle-User Model of Road Navigation\u003c/a\u003e to extend general \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e formulation into multi-pedestrian/multi road user casesroad user cases\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-11_09-18-05_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"previous-work\"\u003ePrevious Work\u003c/h2\u003e\n\u003cp\u003eImagine worst-case scenario always: set upper bound and always imagine it; could cause gridlock if situation never resolves.\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cp\u003eUses \u003ca href=\"/posts/kbhqmdp/\"\u003eQMDP\u003c/a\u003e and \u003ca href=\"/posts/kbhsarsop/\"\u003eSARSOP\u003c/a\u003e to perform optimization\u003c/p\u003e\n\u003ch3 id=\"single-user-model-of-road-navigation--orgea079d6\"\u003e\u003ca href=\"#single-user-model-of-road-navigation\"\u003eSingle-User Model of Road Navigation\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"#single-user-model-of-road-navigation\"\u003eSingle-User Model of Road Navigation\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"scaling-to-multiple-road-users\"\u003eScaling to multiple road users\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003emake an aggregate utility which is a function across all the single-user avoidance strategies (i.e. the aggregate utiltiy of mulitlpe road user is the utility of avoiding each individual user) \\(U^{*}(b,a) = f(U^{*}(b_1, a) \u0026hellip; U^{*}(b_{n}, a)\\). this is called \u003ca href=\"/posts/kbhutility_fusion/\"\u003eutility fusion\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003etwo possible approaches: either minimum of all the utilities, or the sum of them; the former is more risk averse (we want to hit no one), and latter treats each user is independent.\u003c/li\u003e\n\u003cli\u003efurther, the number of users in the road is modeled by a belief\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"evaluation\"\u003eEvaluation\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;the evaluation models are different to find the optimal policy, and are also higher fidelity\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe want to evaluate our \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e on a higher fidelity model to check if the system can generalize to harder environments.\u003c/p\u003e\n\u003cp\u003eBaselines: random actions, or hand crafted rules-based policy.\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-09_13-00-53_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-11_09-35-15_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003ch3 id=\"single-user-model-of-road-navigation\"\u003eSingle-User Model of Road Navigation\u003c/h3\u003e\n\u003cp\u003ePOMDP formulation; we only care about \u003cstrong\u003eone road user\u003c/strong\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eaction: a finite set of change in acceleration -4m/s2, -2m/s2, 0m/s2, 2m/s2, 4m/s2\u003c/li\u003e\n\u003cli\u003estates and transitions: poses (position + velocity) of the car and the road user; position are velocities are discretized\u003c/li\u003e\n\u003cli\u003eobservation: measured position and velocity of the one other road user with a pm 1 meter variance for crosswalks and pm 2 meter variance for intersection\n\u003cul\u003e\n\u003cli\u003eusers in non-occluded area will always be detected\u003c/li\u003e\n\u003cli\u003euser in an occluded area will not be detected\u003c/li\u003e\n\u003cli\u003eposition and velocity of road users are uncertain pm 1 meter and pm 1 meter / second\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ebelief: categorical distribution over states\u003c/li\u003e\n\u003cli\u003edynamics: physics + kinematics for car; pedestrians have stochastic velocity\u003c/li\u003e\n\u003cli\u003ereward: unit reward for final position, tuned penalty for collision\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbouton_2018/","tags":null,"title":"Bouton 2018"},{"categories":null,"contents":"Big idea: keep branching/selecting until a tally hits an upper/lower bound\nIngredients:\n\\(Ulo(s)\\): lower bound function of value function \\(Qhi(s,a)\\): upper bound function of action-value function \\(\\mathcal{P}\\) problem (states, transitions, etc.) \\(d\\) depth (how many next states to look into)\u0026mdash;more is more accurate but slower Its Forward Search, but with bounds instead of exponentially looking into every possible next state, we only check the actions in the order of their bounded value. We start with the actions with the highest bound (most possible value), and if its already better than the upper bound, we can be done because we know everything else will have lower value as their bounds are lower.\nDefine subroutine branch_and_bound(depth_remaining, utility_lower_bound, q_upper_bound, state).\nif depth_remaining=0; return (action=None, utility=utility_lower_bound(s)) otherwise, let best=(action = None, utility = -infiny) for each possible action at our state, SORTED from highest q_upper_bound(s,a) to lowest if best.utility is higher than the q_upper_bound(s,a) return best (because its not worth to search any other action anymore because anything else would have a lower max bound) get an action-value for our current state where the utility of each next state is the utility given by branch_and_bound(depth_remaining-1, utility_lower_bound, q_upper_bound, next_state) if the action-value is higher than what we have, then we set best (a, action-value) return best This is basically the same thing as Forward Search, but you get the bonus benefit of being able to early-terminate if you bettered your max bounds\n","html":"\u003cp\u003eBig idea: keep branching/selecting until a tally hits an upper/lower bound\u003c/p\u003e\n\u003cp\u003eIngredients:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(Ulo(s)\\): lower bound function of \u003ca href=\"/posts/kbhaction_value_function/#value-function--kbhaction-value-function-dot-md\"\u003evalue function\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(Qhi(s,a)\\): upper bound function of \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value function\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{P}\\) problem (states, transitions, etc.)\u003c/li\u003e\n\u003cli\u003e\\(d\\) depth (how many next states to look into)\u0026mdash;more is more accurate but slower\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIts \u003ca href=\"/posts/kbhforward_search/\"\u003eForward Search\u003c/a\u003e, but with bounds instead of exponentially looking into every possible next state, we only check the actions in the order of their bounded value. We start with the actions with the highest bound (most possible value), and if its already better than the upper bound, we can be done because we know everything else will have lower value as their bounds are lower.\u003c/p\u003e\n\u003cp\u003eDefine subroutine \u003ccode\u003ebranch_and_bound(depth_remaining, utility_lower_bound, q_upper_bound, state)\u003c/code\u003e.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eif depth_remaining=0; return (action=None, utility=utility_lower_bound(s))\u003c/li\u003e\n\u003cli\u003eotherwise,\n\u003col\u003e\n\u003cli\u003elet \u003ccode\u003ebest=(action = None, utility = -infiny)\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003efor each possible action at our state, SORTED from highest \u003ccode\u003eq_upper_bound(s,a)\u003c/code\u003e to lowest\n\u003col\u003e\n\u003cli\u003eif \u003ccode\u003ebest.utility\u003c/code\u003e is higher than the \u003ccode\u003eq_upper_bound(s,a)\u003c/code\u003e return best (because its not worth to search any other action anymore because anything else would have a lower max bound)\u003c/li\u003e\n\u003cli\u003eget an \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e for our current state where the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of each next state is the utility given by \u003ccode\u003ebranch_and_bound(depth_remaining-1, utility_lower_bound, q_upper_bound, next_state)\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eif the \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e is higher than what we have, then we set best (a, action-value)\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003ereturn best\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis is basically the same thing as \u003ca href=\"/posts/kbhforward_search/\"\u003eForward Search\u003c/a\u003e, but you get the bonus benefit of being able to early-terminate if you bettered your max bounds\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbranch_and_bound/","tags":null,"title":"Branch and Bound"},{"categories":null,"contents":"Way of performing action research developed by Victoria Clarke and Virginia Braun in 2006\n","html":"\u003cp\u003eWay of performing \u003ca href=\"/posts/kbhaction_research/\"\u003eaction research\u003c/a\u003e developed by Victoria Clarke and Virginia Braun in 2006\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbraun_and_clarke_thematic_analysis/","tags":null,"title":"Braun and Clarke thematic analysis"},{"categories":null,"contents":"Professor Brian MacWhinney is a professor of psychology, modern languages, and language technology at CMU.\n","html":"\u003cp\u003eProfessor \u003ca href=\"/posts/kbhbrian_macwinney/\"\u003eBrian MacWhinney\u003c/a\u003e is a professor of psychology, modern languages, and language technology at \u003ca href=\"/posts/kbhcmu/\"\u003eCMU\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbrian_macwinney/","tags":null,"title":"Brian MacWhinney"},{"categories":null,"contents":"Brown v. Board of Education is a landmark case in the US. This lead for schools to be integrated, and many children were taken out of school out of protest due to the subsequent integration movement between schools.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbrown_v_board_of_education/\"\u003eBrown v. Board of Education\u003c/a\u003e is a landmark case in the US. This lead for schools to be integrated, and many children were taken out of school out of protest due to the subsequent integration movement between schools.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbrown_v_board_of_education/","tags":null,"title":"Brown v. Board of Education"},{"categories":null,"contents":"Brownian Motion is the pattern for measuring the convergence of random walk through continuous timing.\ndiscrete random walk discrete random walk is a tool used to construct Brownian Motion. It is a random walk which only takes on two discrete values at any given time: \\(\\Delta\\) and its additive inverse \\(-\\Delta\\). These two cases take place at probabilities \\(\\pi\\) and \\(1-\\pi\\).\nTherefore, the expected return over each time \\(k\\) is:\n\\begin{equation} \\epsilon_{k} = \\begin{cases} \\Delta, p(\\pi) \\\\ -\\Delta, p(1-\\pi) \\end{cases} \\end{equation}\n(that, at any given time, the expectation of return is either\u0026mdash;with probability $π$\u0026mdash;\\(\\Delta\\), or\u0026ndash;with probability $1-π$\u0026mdash;\\(-\\Delta\\).\nThis makes \\(\\epsilon_{k}\\) independently and identically distributed. The price, then, is formed by:\n\\begin{equation} p_{k} = p_{k-1}+\\epsilon_{k} \\end{equation}\nand therefore the price follows a random walk.\nSuch a discrete random walk can look like this:\nWe can split this time from \\([0,T]\\) into \\(n\\) pieces; making each segment with length \\(h=\\frac{T}{n}\\). Then, we can parcel out:\n\\begin{equation} p_{n}(t) = p_{[\\frac{t}{h}]} = p_{[\\frac{nt}{T}]} \\end{equation}\nDescretized at integer intervals.\nAt this current, discrete moments have expected value \\(E[p_{n}(T)] = n(\\pi -(1-\\pi))\\Delta\\) and variance \\(Var[p_{n}(T)]=4n\\pi (1-\\pi)\\Delta^{2}\\). #why\nNow, if we want to have a continuous version of the descretized interval above, we will maintain the finiteness of \\(p_{n}(T)\\) but take \\(n\\) to \\(\\infty\\). To get a continuous random walk needed for Brownian Motion, we adjust \\(\\Delta\\), \\(\\pi\\), and \\(1-\\pi\\) such that the expected value and variance tends towards the normal (as we expect for a random walk); that is, we hope to see that:\n\\begin{equation} \\begin{cases} n(\\pi -(1-\\pi))\\Delta \\to \\mu T \\\\ 4n\\pi (1-\\pi )\\Delta ^{2} \\to \\sigma^{2} T \\end{cases} \\end{equation}\nTo solve for these desired convergences into the normal, we have probabilities \\(\\pi, (1-\\pi), \\Delta\\) such that:\n\\begin{equation} \\begin{cases} \\pi = \\frac{1}{2}\\qty(1+\\frac{\\mu \\sqrt{h}}{\\sigma})\\\\ (1-\\pi) = \\frac{1}{2}\\qty(1-\\frac{\\mu \\sqrt{h}}{\\sigma})\\\\ \\Delta = \\sigma \\sqrt{h} \\end{cases} \\end{equation}\nwhere, \\(h = \\frac{1}{n}\\).\nSo looking at the expression for \\(\\Delta\\), we can see that as \\(n\\) in increases, \\(h =\\frac{1}{n}\\) decreases and therefore \\(\\Delta\\) decreases. In fact, we can see that the change in all three variables track the change in the rate of \\(\\sqrt{h}\\); namely, they vary with O(h).\n\\begin{equation} \\pi = (1-\\pi) = \\frac{1}{2}+\\frac{\\mu \\sqrt{h}}{2\\sigma} = \\frac{1}{2}+O\\qty(\\sqrt{h}) \\end{equation}\nOf course:\n\\begin{equation} \\Delta = O\\qty(\\sqrt{h}) \\end{equation}\nSo, finally, we have the conclusion that:\nas \\(n\\) (number of subdivision pieces of the time domain \\(T\\)) increases, \\(\\frac{1}{n}\\) decreases, \\(O\\qty(\\sqrt{h})\\) decreases with the same proportion. Therefore, as \\(\\lim_{n \\to \\infty}\\) in the continuous-time case, the probability of either positive or negative delta (\\(\\pi\\) and \\(-\\pi\\) trends towards each to \\(\\frac{1}{2}\\)) by the same vein, as \\(\\lim_{n \\to \\infty}\\), \\(\\Delta \\to 0\\) Therefore, this is a cool result: in a continuous-time case of a discrete random walk, the returns (NOT! just the expect value, but literal \\(\\Delta\\)) trend towards \\(+0\\) and \\(-0\\) each with \\(\\frac{1}{2}\\) probability.\nactual Brownian motion Given the final results above for the limits of discrete random walk, we can see that the price moment traced from the returns (i.e. \\(p_{k} = p_{k-1}+\\epsilon_{k}\\)) have the properties of normality (\\(p_{n}(T) \\to \\mathcal{N}(\\mu T, \\sigma^{2}T)\\))\nTrue Brownian Motion follows, therefore, three basic properties:\n\\(B_{t}\\) is normally distributed by a mean of \\(0\\), and variance of \\(t\\) For some \\(s\u0026lt;t\\), \\(B_{t}-B_{s}\\) is normally distributed by a mean of \\(0\\), and variance of \\(t-s\\) Distributions \\(B_{j}\\) and \\(B_{t}-B_{s}\\) is independent Standard Brownian Motion Brownian motion that starts at \\(B_0=0\\) is called Standard Brownian Motion\nquadratic variation The quadratic variation of a sequence of values is the expression that:\n\\begin{equation} \\sum_{i=0}^{N-1} (x_{i+1}-x_i)^{2} \\end{equation}\nOn any sequence of values \\(x_0=0,\\dots,x_{N}=1\\) (with defined bounds), the quadratic variation becomes bounded.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e is the pattern for measuring the convergence of \u003ca href=\"/posts/kbhrandom_walk/\"\u003erandom walk\u003c/a\u003e through continuous timing.\u003c/p\u003e\n\u003ch2 id=\"discrete-random-walk\"\u003ediscrete random walk\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#discrete-random-walk\"\u003ediscrete random walk\u003c/a\u003e is a tool used to construct \u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e. It is a \u003ca href=\"/posts/kbhrandom_walk/\"\u003erandom walk\u003c/a\u003e which only takes on two discrete values at any given time: \\(\\Delta\\) and its \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e \\(-\\Delta\\). These two cases take place at probabilities \\(\\pi\\) and \\(1-\\pi\\).\u003c/p\u003e\n\u003cp\u003eTherefore, the expected return over each time \\(k\\) is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\epsilon_{k} = \\begin{cases}\n\\Delta, p(\\pi) \\\\\n-\\Delta, p(1-\\pi)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(that, at any given time, the \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e of return is either\u0026mdash;with probability $π$\u0026mdash;\\(\\Delta\\), or\u0026ndash;with probability $1-π$\u0026mdash;\\(-\\Delta\\).\u003c/p\u003e\n\u003cp\u003eThis makes \\(\\epsilon_{k}\\) independently and identically distributed. The price, then, is formed by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np_{k} = p_{k-1}+\\epsilon_{k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand therefore the price follows a \u003ca href=\"/posts/kbhrandom_walk/\"\u003erandom walk\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSuch a \u003ca href=\"#discrete-random-walk\"\u003ediscrete random walk\u003c/a\u003e can look like this:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-19_10-53-11_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWe can split this time from \\([0,T]\\) into \\(n\\) pieces; making each segment with length \\(h=\\frac{T}{n}\\). Then, we can parcel out:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np_{n}(t) = p_{[\\frac{t}{h}]} = p_{[\\frac{nt}{T}]}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eDescretized at integer intervals.\u003c/p\u003e\n\u003cp\u003eAt this current, discrete moments have expected value \\(E[p_{n}(T)] = n(\\pi -(1-\\pi))\\Delta\\) and variance \\(Var[p_{n}(T)]=4n\\pi (1-\\pi)\\Delta^{2}\\). #why\u003c/p\u003e\n\u003cp\u003eNow, if we want to have a \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e version of the descretized interval above, we will maintain the finiteness of \\(p_{n}(T)\\) but take \\(n\\) to \\(\\infty\\). To get a continuous \u003ca href=\"/posts/kbhrandom_walk/\"\u003erandom walk\u003c/a\u003e needed for \u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e, we adjust \\(\\Delta\\), \\(\\pi\\), and \\(1-\\pi\\) such that the expected value and variance tends towards the normal (as we expect for a \u003ca href=\"/posts/kbhrandom_walk/\"\u003erandom walk\u003c/a\u003e); that is, we hope to see that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nn(\\pi -(1-\\pi))\\Delta \\to \\mu T \\\\\n4n\\pi (1-\\pi )\\Delta ^{2} \\to \\sigma^{2} T\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo solve for these desired convergences into the normal, we have probabilities \\(\\pi, (1-\\pi), \\Delta\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\pi = \\frac{1}{2}\\qty(1+\\frac{\\mu \\sqrt{h}}{\\sigma})\\\\\n(1-\\pi) = \\frac{1}{2}\\qty(1-\\frac{\\mu \\sqrt{h}}{\\sigma})\\\\\n\\Delta = \\sigma \\sqrt{h}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(h = \\frac{1}{n}\\).\u003c/p\u003e\n\u003cp\u003eSo looking at the expression for \\(\\Delta\\), we can see that as \\(n\\) in increases, \\(h =\\frac{1}{n}\\) decreases and therefore \\(\\Delta\\) decreases. In fact, we can see that the change in all three variables track the change in the rate of \\(\\sqrt{h}\\); namely, they vary with \u003ca href=\"/posts/kbhasymtotic_analysis/#o--n\"\u003eO(h)\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pi = (1-\\pi) = \\frac{1}{2}+\\frac{\\mu \\sqrt{h}}{2\\sigma} = \\frac{1}{2}+O\\qty(\\sqrt{h})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOf course:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Delta = O\\qty(\\sqrt{h})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, finally, we have the conclusion that:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eas \\(n\\) (number of subdivision pieces of the time domain \\(T\\)) increases, \\(\\frac{1}{n}\\) decreases, \\(O\\qty(\\sqrt{h})\\) decreases with the same proportion. Therefore, as \\(\\lim_{n \\to \\infty}\\) in the continuous-time case, the probability of \u003cem\u003eeither\u003c/em\u003e positive or negative delta (\\(\\pi\\) and \\(-\\pi\\) trends towards each to \\(\\frac{1}{2}\\))\u003c/li\u003e\n\u003cli\u003eby the same vein, as \\(\\lim_{n \\to \\infty}\\), \\(\\Delta \\to 0\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTherefore, this is a cool result: in a continuous-time case of a \u003ca href=\"#discrete-random-walk\"\u003ediscrete random walk\u003c/a\u003e, the returns (NOT! just the expect value, but literal \\(\\Delta\\)) trend towards \\(+0\\) and \\(-0\\) each with \\(\\frac{1}{2}\\) probability.\u003c/p\u003e\n\u003ch2 id=\"actual-brownian-motion\"\u003eactual Brownian motion\u003c/h2\u003e\n\u003cp\u003eGiven the final results above for the limits of \u003ca href=\"#discrete-random-walk\"\u003ediscrete random walk\u003c/a\u003e, we can see that the price moment traced from the returns (i.e. \\(p_{k} = p_{k-1}+\\epsilon_{k}\\)) have the properties of normality (\\(p_{n}(T) \\to \\mathcal{N}(\\mu T, \\sigma^{2}T)\\))\u003c/p\u003e\n\u003cp\u003eTrue \u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e follows, therefore, three basic properties:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\\(B_{t}\\) is normally distributed by a mean of \\(0\\), and variance of \\(t\\)\u003c/li\u003e\n\u003cli\u003eFor some \\(s\u0026lt;t\\), \\(B_{t}-B_{s}\\) is normally distributed by a mean of \\(0\\), and variance of \\(t-s\\)\u003c/li\u003e\n\u003cli\u003eDistributions \\(B_{j}\\) and \\(B_{t}-B_{s}\\) is independent\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"standard-brownian-motion\"\u003eStandard Brownian Motion\u003c/h2\u003e\n\u003cp\u003eBrownian motion that starts at \\(B_0=0\\) is called \u003ca href=\"#standard-brownian-motion\"\u003eStandard Brownian Motion\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"quadratic-variation\"\u003equadratic variation\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#quadratic-variation\"\u003equadratic variation\u003c/a\u003e of a sequence of values is the expression that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{i=0}^{N-1} (x_{i+1}-x_i)^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOn any sequence of values \\(x_0=0,\\dots,x_{N}=1\\) (with defined bounds), the quadratic variation becomes bounded.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbrownian_motion/","tags":null,"title":"Brownian Motion"},{"categories":null,"contents":"buffer overflow happens when operations like stcpy runs beyond the edge of the allocated buffer. We need to find and fix buffer overflows, which causes people who use o\nbuffer overflow horror stories AOL messanger identifying buffer overflows Think about whether or not what you are going to do will cause buffer overflows. There are stuff which you shouldn\u0026rsquo;t do:\nstrcpy: which keeps copying strcat: gets: it keeps taking input forever and forever https://www.acm.org/code-of-ethics \u0026ldquo;Design and implement systems that are robustly and usably secure.\u0026rdquo;\nvalgrind valgrind is a good tool to check whether or not something will buffer overflow.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbuffer_overflow/\"\u003ebuffer overflow\u003c/a\u003e happens when operations like \u003ccode\u003estcpy\u003c/code\u003e runs beyond the edge of the allocated buffer. We need to find and fix \u003ca href=\"/posts/kbhbuffer_overflow/\"\u003ebuffer overflow\u003c/a\u003es, which causes people who use o\u003c/p\u003e\n\u003ch2 id=\"buffer-overflow--kbhbuffer-overflow-dot-md--horror-stories\"\u003e\u003ca href=\"/posts/kbhbuffer_overflow/\"\u003ebuffer overflow\u003c/a\u003e horror stories\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eAOL messanger\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"identifying-buffer-overflow--kbhbuffer-overflow-dot-md--s\"\u003eidentifying \u003ca href=\"/posts/kbhbuffer_overflow/\"\u003ebuffer overflow\u003c/a\u003es\u003c/h2\u003e\n\u003cp\u003eThink about whether or not what you are going to do will cause \u003ca href=\"/posts/kbhbuffer_overflow/\"\u003ebuffer overflow\u003c/a\u003es. There are stuff which you shouldn\u0026rsquo;t do:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/#strcpy\"\u003estrcpy\u003c/a\u003e: which keeps copying\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/#strcat\"\u003estrcat\u003c/a\u003e:\u003c/li\u003e\n\u003cli\u003egets: it keeps taking input forever and forever\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"https://www.acm.org/code-of-ethics\"\u003ehttps://www.acm.org/code-of-ethics\u003c/a\u003e \u0026ldquo;Design and implement systems that are robustly and usably secure.\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"valgrind\"\u003evalgrind\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#valgrind\"\u003evalgrind\u003c/a\u003e is a good tool to check whether or not something will buffer overflow.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbuffer_overflow/","tags":null,"title":"buffer overflow"},{"categories":null,"contents":"\u0026ldquo;How do we build well developed AI systems without a bangin\u0026rsquo; company\u0026rdquo;\nTwo main paradigms transfer learning: (pretrain a model, and) faster convergence, better performance *monolithic models: (pretrain a model, and) just use the pretrained model Problems with monolythic models Continual development of large language models mostly don\u0026rsquo;t exist: no incremental updates To get better improvements, we throw out the old monolythic model Most of the research community can\u0026rsquo;t participate in their development New Alternative Paradigm A very simple routing layer A very large collection of specialist models all from a base model Collaborative model development means that a large amount of contributors can band together to contribute to the development of the models Why Specialist models are cheaper and better to train few shot parameter efficient fine tuning is better liu et al few shot fine-tuning is better than few-shot in-context learning Specialist models can be communicable, incremental updates to a base model think: PEFT each of the specialist models can only need to update a small percent of the weights think \u0026ldquo;adapters\u0026rdquo;: parameter efficient updates Routing task2vec: task embedding for meta learning Achille et al efficiently tuned parameters are task embeddings Zhou et al distinction between MoE instead of routing in sub-layer level routing, we are routing at the input level we look at the Novel Tasks (Model Merging) Tasks can be considered as a composition of skills.\neach task can be encoded as a composition of skills we can merge the skills of sub-models Usual updates we take a pretrained model we adapt it to some target task Model Merging Fisher-weight averaging\n\u0026ldquo;Merging models with fisher-weight averaging\u0026rdquo;, Matena et al Merging can be shown as an optimization problem:\n\\begin{equation} argmax_{\\theta} \\sum_{i-1}^{M} \\lambda_{i} \\log p(\\theta \\mid \\mathcal{D}_{i}) \\end{equation}\n\u0026ldquo;a merged model is the set of parameters which would maximize the log-posterior of each model \\(\\mathcal{D}_{i}\\), controlled by \\(\\lambda_{i}\\)\u0026rdquo;\nTask arthmetic\n\u0026ldquo;Editing models with Task Arthmetic\u0026rdquo;, llharco et al \u0026ldquo;Resolving inference when merging models\u0026rdquo; by Yadev et al\nYou can create multi-task models by just doing maff:\n\\begin{equation} \\tau_{1} = \\theta_{finetune_{1}} - \\theta_{pretrain} \\end{equation}\n\\begin{equation} \\tau_{2} = \\theta_{finetune_{2}} - \\theta_{pretrain} \\end{equation}\n\\begin{equation} \\theta_{finetune_{1+2}} = (\\tau_{1} + \\tau_{2}) + \\theta_{pretrain} \\end{equation}\nthis apparently works ok.\nSoft MoE\nSoft merging of experts with adaptive routing, Muqeeth et al\nMoE, but instead of choosing an expert to activate, the router\u0026rsquo;s probability densities will result in a mixture of the experts\u0026rsquo; weights. So, mulitple experts can be invoked in a linear way.\nGit-Theta Git-Theta: A Git Extension for Collaborative Development of Machine Learning Models, Kandpal et al\nCommunal and iterative development of model checkpoints. Saves only LoRA\u0026rsquo;d parameters, and removes any weights that didn\u0026rsquo;t change between diffs.\nPetals Petals: Collaborative Inference and Fine-Tuning of Large Models, Borzunov et al.\nDistributed fine-tuning and model inference by using different sub-worker nodes to run different layers of the model.\nhttps://health.petals.dev/\n","html":"\u003cp\u003e\u0026ldquo;How do we build well developed AI systems without a bangin\u0026rsquo; company\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"two-main-paradigms\"\u003eTwo main paradigms\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003etransfer learning\u003c/strong\u003e: (pretrain a model, and) faster convergence, better performance\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e*monolithic models\u003c/strong\u003e: (pretrain a model, and) just use the pretrained model\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"problems-with-monolythic-models\"\u003eProblems with monolythic models\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eContinual development of large language models mostly don\u0026rsquo;t exist: no incremental updates\u003c/li\u003e\n\u003cli\u003eTo get better improvements, we throw out the old monolythic model\u003c/li\u003e\n\u003cli\u003eMost of the research community can\u0026rsquo;t participate in their development\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-alternative-paradigm\"\u003eNew Alternative Paradigm\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA very simple routing layer\u003c/li\u003e\n\u003cli\u003eA very large collection of specialist models all from a base model\u003c/li\u003e\n\u003cli\u003eCollaborative model development means that a large amount of contributors can band together to contribute to the development of the models\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"why\"\u003eWhy\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eSpecialist models are cheaper and better to train\n\u003cul\u003e\n\u003cli\u003efew shot parameter efficient fine tuning is better liu et al\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003efew shot fine-tuning is better than few-shot in-context learning\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eSpecialist models can be communicable, incremental updates to a base model\n\u003cul\u003e\n\u003cli\u003ethink: \u003ca href=\"/posts/kbhpeft/\"\u003ePEFT\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eeach of the specialist models can only need to update a small percent of the weights\u003c/li\u003e\n\u003cli\u003ethink \u0026ldquo;adapters\u0026rdquo;: parameter efficient updates\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"routing\"\u003eRouting\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003etask2vec: task embedding for meta learning Achille et al\u003c/li\u003e\n\u003cli\u003eefficiently tuned parameters are task embeddings Zhou et al\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"distinction-between-moe\"\u003edistinction between MoE\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003einstead of routing in sub-layer level routing, we are routing at the \u003cstrong\u003einput level\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003ewe look at the\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"novel-tasks--model-merging\"\u003eNovel Tasks (Model Merging)\u003c/h3\u003e\n\u003cp\u003eTasks can be considered as a composition of skills.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eeach task can be encoded as a composition of skills\u003c/li\u003e\n\u003cli\u003ewe can merge the skills of sub-models\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"usual-updates\"\u003eUsual updates\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003ewe take a pretrained model\u003c/li\u003e\n\u003cli\u003ewe adapt it to some target task\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"model-merging\"\u003eModel Merging\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eFisher-weight averaging\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u0026ldquo;Merging models with fisher-weight averaging\u0026rdquo;, Matena et al\u003c/strong\u003e\nMerging can be shown as an optimization problem:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nargmax_{\\theta} \\sum_{i-1}^{M} \\lambda_{i} \\log p(\\theta \\mid \\mathcal{D}_{i})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;a merged model is the set of parameters which would maximize the log-posterior of each model \\(\\mathcal{D}_{i}\\), controlled by \\(\\lambda_{i}\\)\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eTask arthmetic\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u0026ldquo;Editing models with Task Arthmetic\u0026rdquo;, llharco et al\u003c/strong\u003e\n\u003cstrong\u003e\u0026ldquo;Resolving inference when merging models\u0026rdquo; by Yadev et al\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eYou can create multi-task models by just doing maff:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\tau_{1} = \\theta_{finetune_{1}} - \\theta_{pretrain}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\tau_{2} = \\theta_{finetune_{2}} - \\theta_{pretrain}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{finetune_{1+2}} = (\\tau_{1} + \\tau_{2}) + \\theta_{pretrain}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis apparently works ok.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eSoft MoE\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eSoft merging of experts with adaptive routing, Muqeeth et al\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eMoE, but instead of choosing an expert to activate, the router\u0026rsquo;s probability densities will result in a mixture of the experts\u0026rsquo; weights. So, mulitple experts can be invoked in a linear way.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"git-theta\"\u003eGit-Theta\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eGit-Theta: A Git Extension for Collaborative Development of Machine Learning Models, Kandpal et al\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eCommunal and iterative development of model checkpoints. Saves only LoRA\u0026rsquo;d parameters, and removes any weights that didn\u0026rsquo;t change between diffs.\u003c/p\u003e\n\u003ch2 id=\"petals\"\u003ePetals\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003ePetals: Collaborative Inference and Fine-Tuning of Large Models, Borzunov et al.\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eDistributed fine-tuning and model inference by using different sub-worker nodes to run different layers of the model.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://health.petals.dev/\"\u003ehttps://health.petals.dev/\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbuild_a_system_not_a_monolyth/","tags":null,"title":"Build a System, Not a Monolith"},{"categories":null,"contents":"BPE is a common Subword Tokenization scheme.\nTraining choose two symbols that are most frequency adjacent merge those two symbols as one symbol throughout the text repeat to step \\(1\\) until we merge \\(k\\) times v = set(corpus.characters()) for i in range(k): tl, tr = get_most_common_bigram(v) tnew = f\u0026#34;{tl}{tr}\u0026#34; v.push(tnew) corpus.replace((tl,tr), tnew) return v Most commonly, BPE is not ran alone: it usually run inside space separation systems. Hence, after each word we usually put a special _ token which delineates end of word.\nHence: \u0026ldquo;pink fluffy unicorn dancing on rainbows\u0026rdquo; becomes\np i n k _ f l u f f y _ u n i c o r n _ d a n c i n g _ o n _ r a i n b o w s Inference During inference time, we apply our stored merges in the order we learned them. As in, if we merged er first during training, we should do that first during inference before merging say n er.\nFrequent subwords often ends up being morphemes.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbpe/\"\u003eBPE\u003c/a\u003e is a common \u003ca href=\"/posts/kbhtokenization/#subword-tokenization\"\u003eSubword Tokenization\u003c/a\u003e scheme.\u003c/p\u003e\n\u003ch2 id=\"training\"\u003eTraining\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003echoose two symbols that are most frequency adjacent\u003c/li\u003e\n\u003cli\u003emerge those two symbols as one symbol throughout the text\u003c/li\u003e\n\u003cli\u003erepeat to step \\(1\\) until we merge \\(k\\) times\u003c/li\u003e\n\u003c/ol\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eset\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecorpus\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echaracters\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003etl\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eget_most_common_bigram\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003etnew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etl\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e}{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etr\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e}\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epush\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etnew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ecorpus\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etl\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etnew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eMost commonly, \u003ca href=\"/posts/kbhbpe/\"\u003eBPE\u003c/a\u003e is not ran alone: it usually run \u003cstrong\u003einside\u003c/strong\u003e space separation systems. Hence, after each word we usually put a special \u003ccode\u003e_\u003c/code\u003e token which delineates end of word.\u003c/p\u003e\n\u003cp\u003eHence: \u0026ldquo;pink fluffy unicorn dancing on rainbows\u0026rdquo; becomes\u003c/p\u003e\n\u003cpre tabindex=\"0\"\u003e\u003ccode class=\"language-nil\" data-lang=\"nil\"\u003ep i n k _ f l u f f y _ u n i c o r n _ d a n c i n g _ o n _ r a i n b o w s\n\u003c/code\u003e\u003c/pre\u003e\u003ch2 id=\"inference\"\u003eInference\u003c/h2\u003e\n\u003cp\u003eDuring inference time, we apply our stored merges \u003cstrong\u003ein the order we learned them\u003c/strong\u003e. As in, if we merged \u003ccode\u003eer\u003c/code\u003e first during training, we should do that first during inference before merging say \u003ccode\u003en er\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eFrequent subwords often ends up being \u003ca href=\"/posts/kbhmorpheme/\"\u003emorpheme\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbpe/","tags":null,"title":"Byte-Pair Encoding"},{"categories":null,"contents":"C was created around 1970s to make Unix tool writing easier. It was meant to the be the simplest, lightest, human readable code on top of assembly.\nC is procedural: you write functions and compose them, instead of defining types C++ is procedural (mostly), but you can have objects: you still write functions, and can build objects and call methods Java is actual oop We use C because its fast, highly efficient. C is popular for systems programming, OSes, networking, etc. Lets you work at a lower level to understand/manipulate the underlying systems.\nprinciples of C small, simple abstractions minimalist aesthetic efficiency and simplicity over safety and high-level abstractions C limitations no advanced features operator overloading default arguments pass by reference classes, objects abstract data types no extensive libs no networking no graphics no safety (\u0026ldquo;people who write C think they don\u0026rsquo;t make mistakes\u0026rdquo;) weak compiler no runtime checks at all ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhc/\"\u003eC\u003c/a\u003e was created around 1970s to make \u003ca href=\"/posts/kbhunix/\"\u003eUnix\u003c/a\u003e tool writing easier. It was meant to the be the simplest, lightest, human readable code on top of assembly.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eC is procedural\u003c/strong\u003e: you write functions and compose them, instead of defining types\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eC++ is procedural (mostly), but you can have objects\u003c/strong\u003e: you still write functions, and \u003cem\u003ecan\u003c/em\u003e build objects and call methods\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eJava is actual oop\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe use C because its fast, highly efficient. C is popular for systems programming, OSes, networking, etc. Lets you work at a lower level to understand/manipulate the underlying systems.\u003c/p\u003e\n\u003ch2 id=\"principles-of-c\"\u003eprinciples of C\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003esmall, simple abstractions\u003c/li\u003e\n\u003cli\u003eminimalist aesthetic\u003c/li\u003e\n\u003cli\u003eefficiency and simplicity over safety and high-level abstractions\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"c-limitations\"\u003eC limitations\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eno advanced features\n\u003cul\u003e\n\u003cli\u003eoperator overloading\u003c/li\u003e\n\u003cli\u003edefault arguments\u003c/li\u003e\n\u003cli\u003epass by reference\u003c/li\u003e\n\u003cli\u003eclasses, objects\u003c/li\u003e\n\u003cli\u003eabstract data types\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eno extensive libs\n\u003cul\u003e\n\u003cli\u003eno networking\u003c/li\u003e\n\u003cli\u003eno graphics\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eno safety (\u0026ldquo;people who write C think they don\u0026rsquo;t make mistakes\u0026rdquo;)\n\u003cul\u003e\n\u003cli\u003eweak compiler\u003c/li\u003e\n\u003cli\u003eno runtime checks at all\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhc/","tags":null,"title":"C"},{"categories":null,"contents":"All caches, uses similar memory addressing scheme (i.e. each address can map to one of each device); each \u0026ldquo;line\u0026rdquo; of cache is usually 64 bytes. This means that each time you grab something from memory, the 64 bytes surrounding that area of memory will be cached. You are most likely to do this because of iterating through an array.\nL1-D cache: on the CPU, used for staging for registers L1-I cache: on the CPU, used for staging for assembly instructions L2 cache: L1 staging L3 cache: L2 staging Main memory: memory cache locality temporal locality \u0026ldquo;Things I have used recently, I\u0026rsquo;m likely to use again soon\u0026rdquo;\nspacial locality \u0026ldquo;Things next to what I got I\u0026rsquo;m likely going to use soon\u0026rdquo;\n","html":"\u003cp\u003eAll caches, uses similar memory addressing scheme (i.e. each address can map to one of each device); each \u0026ldquo;line\u0026rdquo; of cache is usually 64 bytes. This means that each time you grab something from memory, the 64 bytes surrounding that area of memory will be cached. You are most likely to do this because of iterating through an array.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eL1-D cache: on the CPU, used for staging for registers\u003c/li\u003e\n\u003cli\u003eL1-I cache: on the CPU, used for staging for assembly instructions\u003c/li\u003e\n\u003cli\u003eL2 cache: L1 staging\u003c/li\u003e\n\u003cli\u003eL3 cache: L2 staging\u003c/li\u003e\n\u003cli\u003eMain memory: memory\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"cache-locality\"\u003ecache locality\u003c/h2\u003e\n\u003ch3 id=\"temporal-locality\"\u003etemporal locality\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;Things I have used recently, I\u0026rsquo;m likely to use again soon\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"spacial-locality\"\u003espacial locality\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;Things next to what I got I\u0026rsquo;m likely going to use soon\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcaching/","tags":null,"title":"caching"},{"categories":null,"contents":"cal.com is an automating calendar service funded by the VC firm 776.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcal_com/\"\u003ecal.com\u003c/a\u003e is an automating calendar service funded by the VC firm \u003ca href=\"/posts/kbh776/\"\u003e776\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcal_com/","tags":null,"title":"cal.com"},{"categories":null,"contents":" ","html":"\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-12-08_12-59-06_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-12-08_12-59-13_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhcalculating_shear_s_modulus/","tags":null,"title":"calculating shear's modulus"},{"categories":null,"contents":"Contraindicated offline POMDP solver.\nContrained belief state MDP Linear Programming belief set generation Approximate POMDP with Contrainst CPOMDPs are Hard Can\u0026rsquo;t do DP with pruning: optimal policies may be stochastic Minimax quadratically contained program: computational intractable Contained PBVI struggles with contraint satisfaction CALP Core Idea Recast CPOMDP as a contrained belief-state MDP.\nWe replace our state-space with our belief space:\n\\(S = B\\) \\(s_0 = b_0\\) You essentially assume here that there is some finite belief space.\n","html":"\u003cp\u003e\u003cstrong\u003eContraindicated\u003c/strong\u003e offline POMDP solver.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eContrained belief state MDP\u003c/li\u003e\n\u003cli\u003eLinear Programming\u003c/li\u003e\n\u003cli\u003ebelief set generation\u003c/li\u003e\n\u003cli\u003eApproximate POMDP with Contrainst\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"cpomdps-are-hard\"\u003eCPOMDPs are Hard\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eCan\u0026rsquo;t do DP with pruning: optimal policies may be stochastic\u003c/li\u003e\n\u003cli\u003eMinimax quadratically contained program: computational intractable\u003c/li\u003e\n\u003cli\u003eContained \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e struggles with contraint satisfaction\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"calp-core-idea\"\u003eCALP Core Idea\u003c/h2\u003e\n\u003cp\u003eRecast \u003ca href=\"/posts/kbhcpomdp/\"\u003eCPOMDP\u003c/a\u003e as a contrained \u003ca href=\"/posts/kbhbelief_state_mdp/\"\u003ebelief-state MDP\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eWe replace our state-space with our belief space:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(S = B\\)\u003c/li\u003e\n\u003cli\u003e\\(s_0 = b_0\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eYou essentially assume here that there is some finite belief space.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcalp/","tags":null,"title":"CALP"},{"categories":null,"contents":"Calpains something something something AFIB\n","html":"\u003cp\u003eCalpains something something something \u003ca href=\"/posts/kbhafib/\"\u003eAFIB\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcalpains_afib/","tags":null,"title":"Calpains … AFIB"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcanciones/","tags":null,"title":"Canciones"},{"categories":null,"contents":"duplicate article creation. see Cantilever Beams\n","html":"\u003cp\u003eduplicate article creation. see \u003ca href=\"/posts/kbhcantilever_beams/\"\u003eCantilever Beams\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcantilever_beam/","tags":null,"title":"cantilever beam"},{"categories":null,"contents":"A Cantilever beam is a rigid structure which is extended horizontally and supported on one end.\nWorking with Cantilever Beams curvature Let\u0026rsquo;s first define a function:\n\\begin{equation} w(x) \\end{equation}\nthis represents the deflection of the beam at point \\(x\\). We will begin by taking its derivative by location:\n\\begin{equation} \\Delta w = \\pdv{w}{x} \\end{equation}\nis the change in deflection over location. \u0026ldquo;How much deviation of the beam from the resting axi is there as you run along it?\u0026rdquo;\nWe now take another derivative:\n\\begin{equation} k = \\pdv[2]{w}{x} \\end{equation}\n\\(k\\) is defined as the \u0026ldquo;Curvature\u0026rdquo; of the beam: the \u0026ldquo;change in the change of bentness.\u0026rdquo; The intuition is essentially this:\na straight, flat beam fixed an one end has \\(\\Delta w=0\\), \\(k=0\\). It does not change from its resting axis, and its rate of change from resting does not change a straight, slanted beam fixed at one end has \\(\\Delta w=C, k=0\\). It changes from its resting axis with a linear rate, and its rate of change from resting does not change. a curved, slanted beam fixed at one end has \\(\\Delta \\omega = f(x), k=C\\). It changes from its resting axis non-linearly (hence curving at a function of \\(x\\)), and its rate of change from resting is changing at a constant \\(c\\). flexural rigidity Flexural Rigidity is the \u0026ldquo;force couple\u0026rdquo; (\u0026ldquo;rate\u0026rdquo;) which relates the Curvature of an non-rigid body and how much torque it actually generates given the object\u0026rsquo;s properties.\nRecall first our Elastic Modulus \\(E\\): it is a fraction of \\(\\frac{stress}{strain}\\) measured in Pascals (force per unit area, i.e. \\(\\frac{N}{m^{2}} = \\frac{kg}{ms^{2}}\\)).\nFind also second moment of area \\(I\\): a value in units \\(m^{4}\\) which is the sum (by area) of the squared displacement of each infinitesimal area to the axis of origin.\nAnd we bam! we multiply the two things together, creating a value \\(EI\\) in units \\(Nm^{2}\\).\nbending moment bending moment is the torque from bending. It is expressed usually in \\(M\\). As mentioned in the section about Flexural Rigidity, we can use that value to relate \\(M\\) with the actual Curvature of your object.\nSpecifically, that:\n\\begin{equation} M = -(EI)k = -EI\\pdv[2]{w}{x} \\end{equation}\n\u0026ldquo;bending moment is flexural rigidity times curvature\u0026rdquo; =\u0026gt; \u0026ldquo;[how much force per distance you exert] is the result of [how bendy your thing is] times [how much you bent it].\u0026rdquo;\nThere is a negative in front because if you pull out your lovely little right hand, point your thumb forward (+y), start curling your nice fingers around your nice hand (-z), you will notice that you are wrapping them downwards (the - part of the z) which is rather not positive. If we want \\(\\pdv[2]{w}{x}\\) to be positive (bend up), we will need to chuck a negative in front of it to make both things positive.\nThis relation, while intuitive, is not from first-principles. In order to get such a derivation, you read Wikipedia.\nmagic We can take two derivatives by location\u0026mdash;\n\\begin{equation} \\pdv[2] x \\qty(EI \\pdv[2]{w}{x}) = -\\mu \\pdv{w}{t}+q(x) \\end{equation}\nwhere \\(\\mu\\) is the mass density, \\(q(x)\\) is the force applied (in Newtons) by area. this is magic. Will come back to it.\nSolving this? See Finite Difference Method\nActually attempting to solve it Numerical Cantileaver Simulations\nWorking on Deformation ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhcantilever_beams/\"\u003eCantilever\u003c/a\u003e beam is a rigid structure which is extended horizontally and supported on one end.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"working-with-cantilever-beams--kbhcantilever-beams-dot-md\"\u003eWorking with \u003ca href=\"/posts/kbhcantilever_beams/\"\u003eCantilever Beams\u003c/a\u003e\u003c/h2\u003e\n\u003ch3 id=\"curvature\"\u003ecurvature\u003c/h3\u003e\n\u003cp\u003eLet\u0026rsquo;s first define a function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis represents the deflection of the beam at point \\(x\\). We will begin by taking its derivative by location:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Delta w = \\pdv{w}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis the change in deflection over location. \u0026ldquo;How much deviation of the beam from the resting axi is there as you run along it?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe now take another derivative:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nk = \\pdv[2]{w}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(k\\) is defined as the \u0026ldquo;\u003ca href=\"#curvature\"\u003eCurvature\u003c/a\u003e\u0026rdquo; of the beam: the \u0026ldquo;change in the change of bentness.\u0026rdquo; The intuition is essentially this:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ea straight, flat beam fixed an one end has \\(\\Delta w=0\\), \\(k=0\\). It does \u003cstrong\u003enot change\u003c/strong\u003e from its resting axis, and its rate of change from resting does \u003cstrong\u003e\u003cstrong\u003enot change\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003ea straight, slanted beam fixed at one end has \\(\\Delta w=C, k=0\\). It \u003cstrong\u003e\u003cstrong\u003echanges\u003c/strong\u003e\u003c/strong\u003e from its resting axis with a linear rate, and its rate of change from resting does \u003cstrong\u003e\u003cstrong\u003enot change\u003c/strong\u003e\u003c/strong\u003e.\u003c/li\u003e\n\u003cli\u003ea \u003cem\u003ecurved\u003c/em\u003e, slanted beam fixed at one end has \\(\\Delta \\omega = f(x), k=C\\). It \u003cstrong\u003e\u003cstrong\u003echanges\u003c/strong\u003e\u003c/strong\u003e from its resting axis non-linearly (hence curving at a function of \\(x\\)), and its rate of change from resting is \u003cstrong\u003echanging\u003c/strong\u003e at a constant \\(c\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"flexural-rigidity\"\u003eflexural rigidity\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#flexural-rigidity\"\u003eFlexural Rigidity\u003c/a\u003e is the \u0026ldquo;force couple\u0026rdquo; (\u0026ldquo;rate\u0026rdquo;) which relates the \u003ca href=\"#curvature\"\u003eCurvature\u003c/a\u003e of an non-rigid body and how much torque it actually generates given the object\u0026rsquo;s properties.\u003c/p\u003e\n\u003cp\u003eRecall first our \u003ca href=\"/posts/kbhelastic_modulus/\"\u003eElastic Modulus\u003c/a\u003e \\(E\\): it is a fraction of \\(\\frac{stress}{strain}\\) measured in Pascals (force per unit area, i.e. \\(\\frac{N}{m^{2}} = \\frac{kg}{ms^{2}}\\)).\u003c/p\u003e\n\u003cp\u003eFind also \u003ca href=\"/posts/kbhsecond_moment_of_area/\"\u003esecond moment of area\u003c/a\u003e \\(I\\): a value in units \\(m^{4}\\) which is the sum (by area) of the squared displacement of each infinitesimal area to the axis of origin.\u003c/p\u003e\n\u003cp\u003eAnd we bam! we multiply the two things together, creating a value \\(EI\\) in units \\(Nm^{2}\\).\u003c/p\u003e\n\u003ch3 id=\"bending-moment\"\u003ebending moment\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#bending-moment\"\u003ebending moment\u003c/a\u003e is the \u003ca href=\"#bending-moment\"\u003etorque from bending\u003c/a\u003e. It is expressed usually in \\(M\\). As mentioned in the section about \u003ca href=\"#flexural-rigidity\"\u003eFlexural Rigidity\u003c/a\u003e, we can use that value to relate \\(M\\) with the actual \u003ca href=\"#curvature\"\u003eCurvature\u003c/a\u003e of your object.\u003c/p\u003e\n\u003cp\u003eSpecifically, that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nM = -(EI)k = -EI\\pdv[2]{w}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;\u003ca href=\"#bending-moment\"\u003ebending moment\u003c/a\u003e is \u003ca href=\"#flexural-rigidity\"\u003eflexural rigidity\u003c/a\u003e times \u003ca href=\"#curvature\"\u003ecurvature\u003c/a\u003e\u0026rdquo; =\u0026gt; \u0026ldquo;[how much force per distance you exert] is the result of [how bendy your thing is] times [how much you bent it].\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eThere is a negative in front because if you pull out your lovely little right hand, point your thumb forward (+y), start curling your nice fingers around your nice hand (-z), you will notice that you are wrapping them downwards (the - part of the z) which is rather not positive. If we want \\(\\pdv[2]{w}{x}\\) to be positive (bend up), we will need to chuck a negative in front of it to make both things positive.\u003c/p\u003e\n\u003cp\u003eThis relation, while intuitive, is not from first-principles. In order to get such a derivation, \u003ca href=\"https://en.wikipedia.org/wiki/Euler%E2%80%93Bernoulli_beam_theory#Derivation_of_the_bending_equation\"\u003eyou read Wikipedia\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"magic\"\u003emagic\u003c/h3\u003e\n\u003cp\u003eWe can take two derivatives by location\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2] x \\qty(EI \\pdv[2]{w}{x}) = -\\mu \\pdv{w}{t}+q(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\mu\\) is the mass density, \\(q(x)\\) is the force applied (in Newtons) by area. this is magic. Will come back to it.\u003c/p\u003e\n\u003ch2 id=\"solving-this\"\u003eSolving this?\u003c/h2\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhfinite_difference_method/\"\u003eFinite Difference Method\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"actually-attempting-to-solve-it\"\u003eActually attempting to solve it\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhnumerical_cantileaver_simulations/\"\u003eNumerical Cantileaver Simulations\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"working-on-deformation\"\u003eWorking on Deformation\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcantilever_beams/","tags":null,"title":"Cantilever Beams"},{"categories":null,"contents":"The capacitance is the amount of change something can hold; this scales based on how much electric potential is being applied.\nParallel plates \\begin{equation} C = \\frac{\\epsilon_{0} A}{d} \\end{equation}\nwhere, \\(e_0\\) is the permittivity of free space, \\(A\\) the area of the plates, and \\(d\\) their distance.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhcapacitance/\"\u003ecapacitance\u003c/a\u003e is the amount of change something can hold; this scales based on how much \u003ca href=\"/posts/kbhelectric_potential_energy/\"\u003eelectric potential\u003c/a\u003e is being applied.\u003c/p\u003e\n\u003ch2 id=\"parallel-plates\"\u003eParallel plates\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nC = \\frac{\\epsilon_{0} A}{d}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(e_0\\) is the \u003ca href=\"/posts/kbhpermittivity_of_free_space/\"\u003epermittivity of free space\u003c/a\u003e, \\(A\\) the area of the plates, and \\(d\\) their distance.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcapacitance/","tags":null,"title":"capacitance"},{"categories":null,"contents":"A capacitor changes, then resists being charged further. Their rules work opposite to resistors.\ncapacitor in series \\begin{equation} \\frac{1}{C_{eq}} = \\frac{1}{C_1} + \\frac{1}{C_2} + \\frac{1}{C_3} \\end{equation}\nand yet,\ncapacitor in parallel \\begin{equation} C_{eq} = C_1 + C_2 + C_3 \\end{equation}\nenergy stored by a capacitor \\begin{equation} E = \\frac{1}{2} CV^{2} \\end{equation}\nwhere, \\(E\\) is the energy stored, \\(C\\) the capacitance, and \\(V\\) the voltage across the capacitor.\nWhich, subbing the formula below:\n\\begin{equation} U = \\frac{1}{2} \\frac{Q^{2}}{C} \\end{equation}\nvoltage across and max charge stored on a capacitor \\begin{equation} C = \\frac{Q}{V} \\end{equation}\nwhere, \\(Q\\) is the change and \\(V\\) the voltage\n\u0026ldquo;the more change the capacitor can store given a voltage, the higher the capacitance.\u0026rdquo;\n\\begin{equation} Q = CV \\end{equation}\n","html":"\u003cp\u003eA capacitor changes, then resists being charged further. Their rules work opposite to \u003ca href=\"/posts/kbhresistors/\"\u003eresistor\u003c/a\u003es.\u003c/p\u003e\n\u003ch2 id=\"capacitor-in-series\"\u003ecapacitor in series\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{1}{C_{eq}} = \\frac{1}{C_1} + \\frac{1}{C_2} + \\frac{1}{C_3}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand yet,\u003c/p\u003e\n\u003ch2 id=\"capacitor-in-parallel\"\u003ecapacitor in parallel\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nC_{eq} = C_1 + C_2 + C_3\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"energy-stored-by-a-capacitor\"\u003eenergy stored by a capacitor\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nE = \\frac{1}{2} CV^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(E\\) is the energy stored, \\(C\\) the capacitance, and \\(V\\) the \u003ca href=\"/posts/kbhvoltage/\"\u003evoltage\u003c/a\u003e across the capacitor.\u003c/p\u003e\n\u003cp\u003eWhich, subbing the formula below:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU = \\frac{1}{2} \\frac{Q^{2}}{C}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"voltage-across-and-max-charge-stored-on-a-capacitor\"\u003evoltage across and max charge stored on a capacitor\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nC = \\frac{Q}{V}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(Q\\) is the change and \\(V\\) the voltage\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the more change the capacitor can store given a voltage, the higher the capacitance.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ = CV\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcapacitor/","tags":null,"title":"Capacitor"},{"categories":null,"contents":"A cancer drug to synthesize Fluoropyrimidine.\n","html":"\u003cp\u003eA cancer drug to synthesize \u003ca href=\"\"\u003eFluoropyrimidine\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcapecitabmine/","tags":null,"title":"Capecitabmine"},{"categories":null,"contents":"CAPM is a method of portfolio selection analysis which focuses on maximizing return given some fixed variance.\nIt deals with optimal Capital Market Line, given here:\n\\begin{equation} E[R_{p}] = r_{f}+\\frac{\\sigma_{p}}{\\sigma_{T}}\\qty(E[R_{T}]-r_{f}) \\end{equation}\nWhich describes \\(E[R_{p}]\\), the expected return of an optimal portfolio in a market, given, \\(R_{T}\\) is the market return, \\(r_{f}\\) is the risk-free rate, \\(\\sigma_{p}\\) is the portfolio returns, and \\(\\sigma_{t}\\) is standard-deviation of the market returns.\nSharpe Ratio The Sharpe Ratio is a measure of the risk-adjusted performance of an asset\u0026mdash;given the rate of return of some risk-free asset.\nIt is defined as:\n\\begin{equation} S_{a} = \\frac{E[R_{a}-R_{b}]}{\\sigma_{a}} \\end{equation}\nwhere, \\(R_{a}\\) is the raw return of the asset, \\(R_{b}\\) is the risk-free rate of return, and \\(\\sigma_{a}\\) is the standard deviation of the asset \u0026ldquo;excess\u0026rdquo; return (i.e. standard deviation actual return - expected return\u0026mdash;how much extra there is).\nMinimum-Variance Boundary For a given a weighted-average portfolio of stocks their waited averages, and correlations between the stocks you can draw this curvy curve. Let pink dots represent the two securities in your portfolio, and various curves highlighting possible linear combinations thereof\u0026mdash;\nLet\u0026rsquo;s observe the boundary conditions of this curve.\nIf the two stocks are exactly negatively correlated, then the more risk you take the more return you have for one while less return you have for the other (hence, two straight divergent lines.)\nIf you have an exactly correlated portfolio, the two assets will will form a line.\nThe Effecient Frontier is the top half of this curve (i.e. higher risk/higher return is not a fun place to be, so that\u0026rsquo;s an inefficient frontier.)\nCapital Market Line The Capital Market Line is a line that uses the Sharpe Ratio of a market as a whole (how the market is performing against the risk-free rate) to analyze the performance of portfolio. It plots the performance of an \u0026ldquo;optimal portfolio\u0026rdquo; in a given market.\nLet\u0026rsquo;s construct first the Sharpe Ratio of a hypothetical market:\n\\begin{equation} \\frac{R_{t}-r_{f}}{\\sigma_{t}} \\end{equation}\nwhere \\(R_{T}\\) is the market return, \\(r_{f}\\) is the risk-free rate, and \\(\\sigma_{t}\\) is standard-deviation of the market returns.\nWe will multiply this value by the standard-deviation of your portfolio to calculate what the market claims should be your expected return. Then, we shift the line by the risk-free rate (as you are expected also to get that rate back in your return.\nSo an \u0026ldquo;effecient\u0026rdquo; portfolio (getting the max expected return per unit risk as measured by the market Sharpe Ratio) should behave like:\n\\begin{equation} E[R_{p}] = r_{f}+\\frac{E[R_{T}]-r_{f}}{\\sigma_{T}}\\sigma_{p} \\end{equation}\nagain, \\(R_{T}\\) is the market return, \\(r_{f}\\) is the risk-free rate, and \\(\\sigma_{t}\\) is standard-deviation of the market returns.\nThe one liner is: \u0026ldquo;the return of your portfolio should be the base return by risk-free rate, plus how much excess risk you are taking on (and therefore return you should be getting back by the Sharpe Ratio)\u0026rdquo;\n(how much you are expected to get (i.e. market Sharpe Ratio times your portfolio volatility), shifted back up by the risk-free rate.\nSharpe-Lintner CAPM A linear formulation of CAPM base on market-excess return (i.e. if you want to beat the market, you will have to sustain proportionally the same amount of risk.)\nTangency Portfolio There is a portfolio, which is named the Tangency Portfolio. This portfolio is the tangent point between the Capital Market Line and the Effecient Frontier.\nIt represents the point where you can get the highest return given some risk, but also control the risk at the market\u0026rsquo;s Sharpe Ratio.\nBlack\u0026rsquo;s CAPM CAPM depends on a risk-free asset. Black of Black-Scholes Formula fame derived another formulation of CAPM which doesn\u0026rsquo;t dependent on a risk-free asset.\nZero-Beta Portfolio To work with Black\u0026rsquo;s CAPM, we first define \\(0m\\), the Zero-Beta Portfolio (used in the formula as \\(R_{0m}\\), the return of the Zero-Beta Portfolio).\nIt is defined to be the portfolio with the minimum variance of all portfolios not correlated with \\(m\\).\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcapm/\"\u003eCAPM\u003c/a\u003e is a method of portfolio selection analysis which focuses on \u003cem\u003emaximizing\u003c/em\u003e \u003ca href=\"/posts/kbhrandom_walk/#return--finmetrics\"\u003ereturn\u003c/a\u003e given some fixed variance.\u003c/p\u003e\n\u003cp\u003eIt deals with optimal \u003ca href=\"#capital-market-line\"\u003eCapital Market Line\u003c/a\u003e, given here:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE[R_{p}] = r_{f}+\\frac{\\sigma_{p}}{\\sigma_{T}}\\qty(E[R_{T}]-r_{f})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich describes \\(E[R_{p}]\\), the expected return of an optimal portfolio in a market, given, \\(R_{T}\\) is the market return, \\(r_{f}\\) is the risk-free rate, \\(\\sigma_{p}\\) is the portfolio returns, and \\(\\sigma_{t}\\) is standard-deviation of the market returns.\u003c/p\u003e\n\u003ch2 id=\"sharpe-ratio\"\u003eSharpe Ratio\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#sharpe-ratio\"\u003eSharpe Ratio\u003c/a\u003e is a measure of the risk-adjusted performance of an asset\u0026mdash;given the rate of return of some risk-free asset.\u003c/p\u003e\n\u003cp\u003eIt is defined as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS_{a} = \\frac{E[R_{a}-R_{b}]}{\\sigma_{a}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(R_{a}\\) is the raw \u003ca href=\"/posts/kbhrandom_walk/#return--finmetrics\"\u003ereturn\u003c/a\u003e of the asset, \\(R_{b}\\) is the risk-free rate of \u003ca href=\"/posts/kbhrandom_walk/#return--finmetrics\"\u003ereturn\u003c/a\u003e, and \\(\\sigma_{a}\\) is the standard deviation of the asset \u0026ldquo;excess\u0026rdquo; return (i.e. standard deviation actual return - expected return\u0026mdash;how much extra there is).\u003c/p\u003e\n\u003ch2 id=\"minimum-variance-boundary\"\u003eMinimum-Variance Boundary\u003c/h2\u003e\n\u003cp\u003eFor a given a weighted-average portfolio of stocks their waited averages, and correlations between the stocks you can draw this curvy curve. Let pink dots represent the two securities in your portfolio, and various curves highlighting possible linear combinations thereof\u0026mdash;\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-29_16-04-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eLet\u0026rsquo;s observe the boundary conditions of this curve.\u003c/p\u003e\n\u003cp\u003eIf the two stocks are exactly negatively correlated, then the more risk you take the more return you have for one while less return you have for the other (hence, two straight divergent lines.)\u003c/p\u003e\n\u003cp\u003eIf you have an exactly correlated portfolio, the two assets will will form a line.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"#minimum-variance-boundary\"\u003eEffecient Frontier\u003c/a\u003e is the top half of this curve (i.e. higher risk/higher return is not a fun place to be, so that\u0026rsquo;s an inefficient frontier.)\u003c/p\u003e\n\u003ch2 id=\"capital-market-line\"\u003eCapital Market Line\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#capital-market-line\"\u003eCapital Market Line\u003c/a\u003e is a line that uses the \u003ca href=\"#sharpe-ratio\"\u003eSharpe Ratio\u003c/a\u003e of a \u003cstrong\u003emarket\u003c/strong\u003e as a whole (how the market is performing against the risk-free rate) to analyze the performance of portfolio. It plots the performance of an \u0026ldquo;optimal portfolio\u0026rdquo; in a given market.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s construct first the \u003ca href=\"#sharpe-ratio\"\u003eSharpe Ratio\u003c/a\u003e of a hypothetical market:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{R_{t}-r_{f}}{\\sigma_{t}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(R_{T}\\) is the market return, \\(r_{f}\\) is the risk-free rate, and \\(\\sigma_{t}\\) is standard-deviation of the market returns.\u003c/p\u003e\n\u003cp\u003eWe will multiply this value by the standard-deviation of your portfolio to calculate what the market claims should be your expected return. Then, we shift the line by the risk-free rate (as you are expected also to get that rate back in your return.\u003c/p\u003e\n\u003cp\u003eSo an \u0026ldquo;effecient\u0026rdquo; portfolio (getting the max expected return per unit risk as measured by the market \u003ca href=\"#sharpe-ratio\"\u003eSharpe Ratio\u003c/a\u003e) should behave like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE[R_{p}] = r_{f}+\\frac{E[R_{T}]-r_{f}}{\\sigma_{T}}\\sigma_{p}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eagain, \\(R_{T}\\) is the market return, \\(r_{f}\\) is the risk-free rate, and \\(\\sigma_{t}\\) is standard-deviation of the market returns.\u003c/p\u003e\n\u003cp\u003eThe one liner is: \u0026ldquo;the return of your portfolio should be the base return by risk-free rate, plus how much excess risk you are taking on (and therefore \u003ca href=\"/posts/kbhrandom_walk/#return--finmetrics\"\u003ereturn\u003c/a\u003e you should be getting back by the \u003ca href=\"#sharpe-ratio\"\u003eSharpe Ratio\u003c/a\u003e)\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e(how much you are expected to get (i.e. market \u003ca href=\"#sharpe-ratio\"\u003eSharpe Ratio\u003c/a\u003e times your portfolio volatility), shifted back up by the risk-free rate.\u003c/p\u003e\n\u003ch2 id=\"sharpe-lintner-capm\"\u003eSharpe-Lintner CAPM\u003c/h2\u003e\n\u003cp\u003eA linear formulation of CAPM base on market-excess return (i.e. if you want to beat the market, you will have to sustain proportionally the same amount of risk.)\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-29_18-44-41_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-29_18-45-03_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"tangency-portfolio\"\u003eTangency Portfolio\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-27_10-35-03_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThere is a portfolio, which is named the \u003ca href=\"#tangency-portfolio\"\u003eTangency Portfolio\u003c/a\u003e. This portfolio is the tangent point between the \u003ca href=\"#capital-market-line\"\u003eCapital Market Line\u003c/a\u003e and the \u003ca href=\"#minimum-variance-boundary\"\u003eEffecient Frontier\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIt represents the point where you can get the highest return given some risk, but also control the risk at the market\u0026rsquo;s \u003ca href=\"#sharpe-ratio\"\u003eSharpe Ratio\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"black-s-capm\"\u003eBlack\u0026rsquo;s CAPM\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcapm/\"\u003eCAPM\u003c/a\u003e depends on a risk-free asset. Black of \u003ca href=\"/posts/kbhblack_scholes_formula/\"\u003eBlack-Scholes Formula\u003c/a\u003e fame derived another formulation of CAPM which doesn\u0026rsquo;t dependent on a risk-free asset.\u003c/p\u003e\n\u003ch3 id=\"zero-beta-portfolio\"\u003eZero-Beta Portfolio\u003c/h3\u003e\n\u003cp\u003eTo work with \u003ca href=\"#black-s-capm\"\u003eBlack\u0026rsquo;s CAPM\u003c/a\u003e, we first define \\(0m\\), the \u003ca href=\"#zero-beta-portfolio\"\u003eZero-Beta Portfolio\u003c/a\u003e (used in the formula as \\(R_{0m}\\), the return of the \u003ca href=\"#zero-beta-portfolio\"\u003eZero-Beta Portfolio\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eIt is defined to be the portfolio with the minimum variance of all portfolios not correlated with \\(m\\).\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-29_16-52-14_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhcapm/","tags":null,"title":"Capital-Asset Pricing Model"},{"categories":null,"contents":"Pitfalls The bytes remains the same despite copying, so you can get too funky:\nint v = -12345; unsigned int uv = v; printf(\u0026#34;v = %d, uv = %d\\n\u0026#34;, v, uv); This prints \u0026ldquo;v = -12345, uv=4294954951\u0026rdquo;. As in: when you copy rvalues, the bit pattern gets copied and not the numerical number itself; so, it will overflow.\nYou can use U to force an signed quantity to be unsigned:\nunsigned int uv = -12345U; sign promotion If you have the nerve of putting a comparing things of different types (don\u0026rsquo;t), then, the signed quantities gets promoted to be unsigned.\nThat is, we get that:\n-1 \u0026lt; 0U is false because the -1 is promoted to an unsigned integer 2\u0026hellip;.7 \u0026gt; -2\u0026hellip;.7 is true because nothing is converted type size promotion If you have the nerve of putting a comparing things of different types (don\u0026rsquo;t), then, the smaller types get promoted to being a bigger types.\ncasting from small unsigned value to larger unsigned value just requires us prepending a buncha zeros as needed casting from a small signed value to larger signed value requires us repeating the left most value to fill out the rest of the variable (-1 = 11111, so bigger -1 = 11111 (repeated) 111) lasting from a large value to a smaller value will cause truncation type size truncation Take, for instance:\nint x = 53191; short sx = x; int y = sx; The short is 2 byte, which means that 2 of the left bytes of the int got dropped.\n","html":"\u003ch2 id=\"pitfalls\"\u003ePitfalls\u003c/h2\u003e\n\u003cp\u003eThe bytes remains the same despite copying, so you can get too funky:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e12345\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eunsigned\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003euv\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eprintf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;v = %d, uv = %d\u003c/span\u003e\u003cspan style=\"color:#8045ff\"\u003e\\n\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003euv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThis prints \u0026ldquo;v = -12345, uv=4294954951\u0026rdquo;. As in: \u003cstrong\u003ewhen you copy rvalues, the \u003ca href=\"/posts/kbhbinary_number_system/#bit\"\u003ebit\u003c/a\u003e pattern gets copied and not the numerical number itself\u003c/strong\u003e; so, it will overflow.\u003c/p\u003e\n\u003cp\u003eYou can use U to force an signed quantity to be unsigned:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eunsigned\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003euv\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e12345U\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"sign-promotion\"\u003esign promotion\u003c/h2\u003e\n\u003cp\u003eIf you have the nerve of putting a comparing things of different types (don\u0026rsquo;t), then, the \u003cstrong\u003esigned quantities gets promoted to be unsigned\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eThat is, we get that:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e-1 \u0026lt; 0U is \u003cstrong\u003efalse\u003c/strong\u003e because the -1 is promoted to an \u003ca href=\"/posts/kbhbinary_number_system/#unsigned-integers\"\u003eunsigned integer\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e2\u0026hellip;.7 \u0026gt; -2\u0026hellip;.7 is \u003cstrong\u003etrue\u003c/strong\u003e because nothing is converted\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"type-size-promotion\"\u003etype size promotion\u003c/h2\u003e\n\u003cp\u003eIf you have the nerve of putting a comparing things of different types (don\u0026rsquo;t), then, the \u003cstrong\u003esmaller types get promoted to being a bigger types\u003c/strong\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ecasting from small unsigned value to larger unsigned value just requires us prepending a buncha zeros as needed\u003c/li\u003e\n\u003cli\u003ecasting from a small signed value to larger signed value requires us repeating the left most value to fill out the rest of the variable (-1 = 11111, so bigger -1 = 11111 (repeated) 111)\u003c/li\u003e\n\u003cli\u003elasting from a large value to a smaller value will cause \u003cstrong\u003etruncation\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"type-size-truncation\"\u003etype size truncation\u003c/h2\u003e\n\u003cp\u003eTake, for instance:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-c\" data-lang=\"c\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e53191\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eshort\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esx\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esx\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThe short is 2 \u003ca href=\"/posts/kbhbinary_number_system/#byte\"\u003ebyte\u003c/a\u003e, which means that 2 of the left \u003ca href=\"/posts/kbhbinary_number_system/#byte\"\u003ebyte\u003c/a\u003es of the int got dropped.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcasting/","tags":null,"title":"casting"},{"categories":null,"contents":"categorical grammar is a grammar in the language of categories.\nconstituents \\(A\\), a set of \u0026ldquo;expressions\u0026rdquo; \\(C\\), a set of categories of \u0026ldquo;syntax\u0026rdquo; \\(\\varphi: A \\to Pow( C)\\), assigning each \\(a \\in A\\) to a set of categories \\(c \\subset C\\) \\(G\\): a family of sets of n-place operations where \\(n=1, 2, \\ldots\\) (what does a \u0026ldquo;3-place\u0026rdquo; op mean? idk) \\(R\\): a set of rules encoded as tuples: \\((f; \\{c_1, \\dots c_{k}\\}; c_{k+1})\\), where \\(f\\) is a \\(k\\) place operation, and \\(c_{j} \\in C\\) requirements The operations of this grammar behaves like so:\ngiven a rule \\(r \\in R\\), it tells you that given WLOG an expression in \\(c_{1}, c_2, \\ldots c_{k} \\in C\\) (i.e. they were mapped to that set \\(\\varphi\\)), \\(f\\) will map that set of expressions into the same new category \\(c_{k+1}\\).\nadditional information a basic categorical grammar one implementation of a basic categorical grammar is as follows:\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcategorical_grammar/\"\u003ecategorical grammar\u003c/a\u003e is a \u003ca href=\"/posts/kbhgrammar/\"\u003egrammar\u003c/a\u003e in the language of \u003ca href=\"/posts/kbhcategory/\"\u003ecategories\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(A\\), a \u003ca href=\"/posts/kbhset/\"\u003eset\u003c/a\u003e of \u0026ldquo;expressions\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\\(C\\), a set of \u003ca href=\"/posts/kbhcategory/\"\u003ecategories\u003c/a\u003e of \u0026ldquo;syntax\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\\(\\varphi: A \\to Pow( C)\\), assigning each \\(a \\in A\\) to a \u003cstrong\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhset/\"\u003eset\u003c/a\u003e\u003c/strong\u003e\u003c/strong\u003e of categories \\(c \\subset C\\)\u003c/li\u003e\n\u003cli\u003e\\(G\\): a family of sets of n-place operations where \\(n=1, 2, \\ldots\\) (what does a \u0026ldquo;3-place\u0026rdquo; op mean? idk)\u003c/li\u003e\n\u003cli\u003e\\(R\\): a set of rules encoded as tuples: \\((f; \\{c_1, \\dots c_{k}\\}; c_{k+1})\\), where \\(f\\) is a \\(k\\) place operation, and \\(c_{j} \\in C\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eThe operations of this grammar behaves like so:\u003c/p\u003e\n\u003cp\u003egiven a rule \\(r \\in R\\), it tells you that given WLOG an expression in \\(c_{1}, c_2, \\ldots c_{k} \\in C\\) (i.e. they were mapped to that set \\(\\varphi\\)), \\(f\\) will map that set of expressions into the same new category \\(c_{k+1}\\).\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"a-basic-categorical-grammar\"\u003ea basic categorical grammar\u003c/h3\u003e\n\u003cp\u003eone implementation of a basic \u003ca href=\"/posts/kbhcategorical_grammar/\"\u003ecategorical grammar\u003c/a\u003e is as follows:\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcategorical_grammar/","tags":null,"title":"categorical grammar"},{"categories":null,"contents":" categorical grammar ","html":"\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcategorical_grammar/\"\u003ecategorical grammar\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcategorical_grammars_index/","tags":null,"title":"Categorical Grammars Index"},{"categories":null,"contents":"A category is an abstract collection of objects\nconstituents collection of objects, where if \\(X\\) is an object of \\(C\\) we write \\(X \\in C\\) for a pair of objects \\(X, Y \\in C\\), a set of morphisms acting upon the objects which we call the homset additional information requirements there exists the identity morphism; that is, \\(\\forall X \\in C, \\exists I_{X}: X\\to X\\) morphisms are always composable: given \\(f: X\\to Y\\), and \\(g: Y\\to Z\\), exists \\(gf: X \\to Z\\) the identity morphism can compose in either direction: given \\(f: X \\to Y\\), then \\(f I_{x} = f = I_{y} f\\) morphism composition is associative: \\((hg)f=h(gf)\\) ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhcategory/\"\u003ecategory\u003c/a\u003e is an abstract collection of \u003ca href=\"/posts/kbhobjects/\"\u003eobject\u003c/a\u003es\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecollection of \u003ca href=\"/posts/kbhobjects/\"\u003eobjects\u003c/a\u003e, where if \\(X\\) is an \u003ca href=\"/posts/kbhobjects/\"\u003eobject\u003c/a\u003e of \\(C\\) we write \\(X \\in C\\)\u003c/li\u003e\n\u003cli\u003efor a pair of objects \\(X, Y \\in C\\), a set of \u003ca href=\"/posts/kbhmorphism/\"\u003emorphism\u003c/a\u003es acting upon the objects which we call the \u003ca href=\"/posts/kbhhomset/\"\u003ehomset\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ethere exists the \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e \u003ca href=\"/posts/kbhmorphism/\"\u003emorphism\u003c/a\u003e; that is, \\(\\forall X \\in C, \\exists I_{X}: X\\to X\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmorphism/\"\u003emorphism\u003c/a\u003es are always composable: given \\(f: X\\to Y\\), and \\(g: Y\\to Z\\), exists \\(gf: X \\to Z\\)\u003c/li\u003e\n\u003cli\u003ethe \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e \u003ca href=\"/posts/kbhmorphism/\"\u003emorphism\u003c/a\u003e can compose in either direction: given \\(f: X \\to Y\\), then \\(f I_{x} = f = I_{y} f\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmorphism/\"\u003emorphism\u003c/a\u003e composition is \u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e: \\((hg)f=h(gf)\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcategory/","tags":null,"title":"category"},{"categories":null,"contents":"An abstract study of mathematics based on categories, functors, and natural transformations.\n","html":"\u003cp\u003eAn abstract study of mathematics based on \u003ca href=\"/posts/kbhcategory/\"\u003ecategories\u003c/a\u003e, \u003ca href=\"/posts/kbhfunctor/\"\u003efunctors\u003c/a\u003e, and \u003ca href=\"/posts/kbhnatural_transformations/\"\u003enatural transformations\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcategory_theory/","tags":null,"title":"category theory"},{"categories":null,"contents":"stock market crash of 1929 At October 24th, 1929, Black Thursday took place, and the stock market crashed. During this time, a record of 13 million shares traded, over $3b of losses. This began a 4 year slide of the global economy.\nCrash theories:\ndemand-driven theory Monetarist theory bank failures of 1929 Banks became irrelevant. Lots of risky loans given out, farmers are taken out huge loans and the banks can\u0026rsquo;t deal.\nother factors economy of credit tariffs ","html":"\u003ch2 id=\"stock-market-crash-of-1929\"\u003estock market crash of 1929\u003c/h2\u003e\n\u003cp\u003eAt October 24th, 1929, \u003ca href=\"/posts/kbhblack_thursday/\"\u003eBlack Thursday\u003c/a\u003e took place, and the stock market crashed. During this time, a record of 13 million shares traded, over $3b of losses. This began a 4 year slide of the global economy.\u003c/p\u003e\n\u003cp\u003eCrash theories:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdemand_driven_theory/\"\u003edemand-driven theory\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmonetarist_theory/\"\u003eMonetarist theory\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"bank-failures-of-1929\"\u003ebank failures of 1929\u003c/h2\u003e\n\u003cp\u003eBanks became irrelevant. Lots of risky loans given out, farmers are taken out huge loans and the banks can\u0026rsquo;t deal.\u003c/p\u003e\n\u003ch2 id=\"other-factors\"\u003eother factors\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheconomy_of_credit/\"\u003eeconomy of credit\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtariffs/\"\u003etariffs\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcauses_of_the_great_depression/","tags":null,"title":"causes of the Great Depression"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcell/","tags":null,"title":"cell"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcell_free_biocatalysis/","tags":null,"title":"cell-free biocatalysis"},{"categories":null,"contents":"\u0026ldquo;If sample size is large and IID, the sampling distribution is normal. The larger \\(N\\) is, the more normal the resulting shape is.\u0026rdquo;\nWe can use the central limit theorem to estimate the sum of IID random variables:\nLet there be \\(n\\) random variables named \\(X_{j}\\), they are IID, and they have \\(E[x] = \\mu\\), and \\(Var(x) = \\sigma^{2}\\)\nWe have that:\n\\begin{equation} \\sum_{i=1}^{N} X_{n} \\sim N(n\\mu, n \\sigma^{2}), \\text{as}\\ n \\to \\infty \\end{equation}\nThat, as long as you normalize a random variable and have enough of it, you get closer and closer to the normal distribution.\nNotably, for the central limit theorem to hold, the variance has to be finite (that the results vary in a certain finite value \\(\\sigma\\). With that \\(\\sigma\\) value, we can see above that the central limit theorem will eventually converge to the normal. THis is useful for the Random Walk Hypothesis.\nREMEMBER THAT IF YOU ARE APPROXIMATIGN DISCRETE THINGS YOU NEED continuity correction!!!\n","html":"\u003cp\u003e\u0026ldquo;If sample size is large and \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e, the sampling distribution is normal. The larger \\(N\\) is, the more normal the resulting shape is.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe can use the \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e to estimate the sum of \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es:\u003c/p\u003e\n\u003cp\u003eLet there be \\(n\\) \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es named \\(X_{j}\\), they are \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e, and they have \\(E[x] = \\mu\\), and \\(Var(x) = \\sigma^{2}\\)\u003c/p\u003e\n\u003cp\u003eWe have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{i=1}^{N} X_{n} \\sim N(n\\mu, n \\sigma^{2}), \\text{as}\\ n \\to \\infty\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThat, as long as you normalize a random variable and have enough of it, you get closer and closer to the normal distribution.\u003c/p\u003e\n\u003cp\u003eNotably, for the \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e to hold, the variance has to be finite (that the results vary in a certain finite value \\(\\sigma\\). With that \\(\\sigma\\) value, we can see above that the \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e will eventually converge to the normal. THis is useful for the \u003ca href=\"/posts/kbhrandom_walk/\"\u003eRandom Walk Hypothesis\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003e\u003cstrong\u003e\u003cstrong\u003eREMEMBER THAT IF YOU ARE APPROXIMATIGN DISCRETE THINGS YOU NEED \u003ca href=\"/posts/kbhcontinuity_correction/\"\u003econtinuity correction\u003c/a\u003e!!!\u003c/strong\u003e\u003c/strong\u003e\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcentral_limit_theorem/","tags":null,"title":"central limit theorem"},{"categories":null,"contents":" 80% of the human genome is actually transcribed very little \u0026ldquo;junk DNA\u0026rdquo; 40% IncRNA are gene specific ","html":"\u003cul\u003e\n\u003cli\u003e80% of the human genome is actually transcribed\u003c/li\u003e\n\u003cli\u003every little \u0026ldquo;junk DNA\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e40% \u003ca href=\"\"\u003eIncRNA\u003c/a\u003e are gene specific\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhchanges_to_central_dogma/","tags":null,"title":"changes to central dogma"},{"categories":null,"contents":"char is a character that represents a glypth:\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhchar/\"\u003echar\u003c/a\u003e is a character that represents a glypth:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-06_11-05-49_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhchar/","tags":null,"title":"char"},{"categories":null,"contents":"The polynomial given by the determinant of:\n\\begin{equation} det(A-\\lambda I) \\end{equation}\nfor some Linear Map \\(A\\). Solutions for \\(\\lambda\\) are the eigenvalues. This is because something is an eigenvalue IFF \\((A-\\lambda I)v = 0\\) for some \\(\\lambda, v\\), so we need \\((A-\\lambda I)\\) to be singular.\nCharacteristic polynomial of a 2x2 matrix is given by \\(\\lambda^{2}-tr(A)\\lambda + det(A)\\).\n","html":"\u003cp\u003eThe polynomial given by the determinant of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ndet(A-\\lambda I)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e \\(A\\). Solutions for \\(\\lambda\\) are the \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es. This is because something is an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e IFF \\((A-\\lambda I)v = 0\\) for some \\(\\lambda, v\\), so we need \\((A-\\lambda I)\\) to be singular.\u003c/p\u003e\n\u003cp\u003eCharacteristic \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e of a 2x2 matrix is given by \\(\\lambda^{2}-tr(A)\\lambda + det(A)\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcharacteristic_polynomial/","tags":null,"title":"characteristic polynomial"},{"categories":null,"contents":"an atom is said to be charged when there is an imbalance between its number of protons and electrons.\nadditional information units of charge charge is measured in SI unit \\(C\\), coulomb. However, we are often dealing with \\(e\\), the charge of an electron (as ultimate that\u0026rsquo;s the principle way by which charge moves around). \\(e \\approx 1.6 \\times 10^{-19} C\\).\nnet charge can be neither created nor destroyed Unsurprisingly, though you can move electrons around, they will be conserved across a system.\n","html":"\u003cp\u003ean atom is said to be \u003ca href=\"/posts/kbhcharged/\"\u003echarged\u003c/a\u003e when there is an imbalance between its number of \u003ca href=\"/posts/kbhprotons/\"\u003eproton\u003c/a\u003es and \u003ca href=\"/posts/kbhelectron/\"\u003eelectron\u003c/a\u003es.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"units-of-charge\"\u003eunits of charge\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcharged/\"\u003echarge\u003c/a\u003e is measured in SI unit \\(C\\), coulomb. However, we are often dealing with \\(e\\), the charge of an \u003ca href=\"/posts/kbhelectron/\"\u003eelectron\u003c/a\u003e (as ultimate that\u0026rsquo;s the principle way by which charge moves around). \\(e \\approx 1.6 \\times 10^{-19} C\\).\u003c/p\u003e\n\u003ch3 id=\"net-charge-can-be-neither-created-nor-destroyed\"\u003enet charge can be neither created nor destroyed\u003c/h3\u003e\n\u003cp\u003eUnsurprisingly, though you can move \u003ca href=\"/posts/kbhelectron/\"\u003eelectron\u003c/a\u003es around, they will be conserved across a system.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcharged/","tags":null,"title":"charged"},{"categories":null,"contents":"Two main Dialogue Systems architectures:\nframe based systems: talk to users + accomplish specific tasks LLM: reasoning as agents Dialogue Systems vs Chatbot Previously, when we say Chatbot we mean task-based systems\nhumans and chat humans tend to think of Dialogue Systems as human-like even if they know its not. this makes users more prone to share private information and worry less about its disclosure.\nELIZA see ELIZA\nLLM Chatbots Training Corpus C4: colossal clean crawled corpus\npatent, wikipedia, news\nChatbots EmphaticDialogues SaFeRDialogues Pseudo-conversations: reddit, twitter, weibo Fine-Tuning quality: improving sensible and interesting responses safety: prevention of suggesting harmful actions IFT: perhaps you can add positive data as fine tuning as a part of instruction-finetuning step.\nFiltering: build a filter for whether something is safe/unsafe, etc.\nRetrieval Augmented Generation call search engine get back a retrieved passages shove them into prompt \u0026ldquo;based on this tasks, answer:\u0026rdquo; we can make Chatbots use RAG by adding \u0026ldquo;pseudo-participants\u0026rdquo; to make the chat bots, which the system should add.\nEvaluation task based systems: measure task performance chatbot: enjoyability by humans we evaluate chatbots by asking a human to assign a score, and observer is a third party that assigns a score via a transcript of a conversation.\nparticipants scoring interact with 6 turns, then score:\navoiding repetition interestingness sensemaking fluency listening inquisitiveness humanness engagingness ACUTE-EVAL: choosing who you would like to speak to\nadversarial evaluation train a human/robot classifier, use it, use the inverse of its score at the metric of the chat bot\ntask evaluatino measure overall task success, or measure slot error rate\ndesign system design Don\u0026rsquo;t build Frankenstein: safety (ensure people aren\u0026rsquo;t crashing cars), limiting representation harm (don\u0026rsquo;t demean social groups), privacy\nstudy users and task what are their values? how do they interact?\nbuild simulations wizard of oz study: observe user interaction with a HUMAN pretending to be a chat bot\ntest the design test on users\ninfo leakage accidentally leaking information (microphone, etc.) intentionally leaking information due to advertising, etc. ","html":"\u003cp\u003eTwo main \u003ca href=\"/posts/kbhchatbot/\"\u003eDialogue Systems\u003c/a\u003e architectures:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eframe based\u003c/strong\u003e systems: talk to users + accomplish specific tasks\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eLLM\u003c/strong\u003e: reasoning as agents\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"dialogue-systems--kbhchatbot-dot-md--vs-chatbot--kbhchatbot-dot-md\"\u003e\u003ca href=\"/posts/kbhchatbot/\"\u003eDialogue Systems\u003c/a\u003e vs \u003ca href=\"/posts/kbhchatbot/\"\u003eChatbot\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003ePreviously, when we say \u003ca href=\"/posts/kbhchatbot/\"\u003eChatbot\u003c/a\u003e we mean task-based systems\u003c/p\u003e\n\u003ch2 id=\"humans-and-chat\"\u003ehumans and chat\u003c/h2\u003e\n\u003cp\u003ehumans tend to think of \u003ca href=\"/posts/kbhchatbot/\"\u003eDialogue Systems\u003c/a\u003e as human-like even if they know its not. this makes users more prone to share private information and worry less about its disclosure.\u003c/p\u003e\n\u003ch2 id=\"eliza--kbheliza-dot-md\"\u003e\u003ca href=\"/posts/kbheliza/\"\u003eELIZA\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbheliza/\"\u003eELIZA\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"llm-chatbots\"\u003eLLM Chatbots\u003c/h2\u003e\n\u003ch3 id=\"training-corpus\"\u003eTraining Corpus\u003c/h3\u003e\n\u003cp\u003eC4: colossal clean crawled corpus\u003c/p\u003e\n\u003cp\u003epatent, wikipedia, news\u003c/p\u003e\n\u003ch3 id=\"chatbots\"\u003eChatbots\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eEmphaticDialogues\u003c/li\u003e\n\u003cli\u003eSaFeRDialogues\u003c/li\u003e\n\u003cli\u003ePseudo-conversations: reddit, twitter, weibo\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"fine-tuning\"\u003eFine-Tuning\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003equality\u003c/strong\u003e: improving sensible and interesting responses\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003esafety\u003c/strong\u003e: prevention of suggesting harmful actions\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003eIFT\u003c/strong\u003e: perhaps you can add positive data as fine tuning as a part of instruction-finetuning step.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eFiltering\u003c/strong\u003e: build a filter for whether something is safe/unsafe, etc.\u003c/p\u003e\n\u003ch3 id=\"retrieval-augmented-generation\"\u003eRetrieval Augmented Generation\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003ecall search engine\u003c/li\u003e\n\u003cli\u003eget back a retrieved passages\u003c/li\u003e\n\u003cli\u003eshove them into prompt\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;based on this tasks, answer:\u0026rdquo;\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ewe can make \u003ca href=\"/posts/kbhchatbot/\"\u003eChatbot\u003c/a\u003es use \u003ca href=\"#retrieval-augmented-generation\"\u003eRAG\u003c/a\u003e by adding \u0026ldquo;pseudo-participants\u0026rdquo; to make the chat bots, which the system should add.\u003c/p\u003e\n\u003ch2 id=\"evaluation\"\u003eEvaluation\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003etask based systems\u003c/strong\u003e: measure task performance\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003echatbot\u003c/strong\u003e: enjoyability by humans\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewe evaluate chatbots by asking a human to assign a score, and observer is a third party that assigns a score via a transcript of a conversation.\u003c/p\u003e\n\u003ch3 id=\"participants-scoring\"\u003eparticipants scoring\u003c/h3\u003e\n\u003cp\u003einteract with 6 turns, then score:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eavoiding repetition\u003c/li\u003e\n\u003cli\u003einterestingness\u003c/li\u003e\n\u003cli\u003esensemaking\u003c/li\u003e\n\u003cli\u003efluency\u003c/li\u003e\n\u003cli\u003elistening\u003c/li\u003e\n\u003cli\u003einquisitiveness\u003c/li\u003e\n\u003cli\u003ehumanness\u003c/li\u003e\n\u003cli\u003eengagingness\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eACUTE-EVAL: \u003cstrong\u003echoosing who you would like to speak to\u003c/strong\u003e\u003c/p\u003e\n\u003ch3 id=\"adversarial-evaluation\"\u003eadversarial evaluation\u003c/h3\u003e\n\u003cp\u003etrain a human/robot classifier, use it, use the inverse of its score at the metric of the chat bot\u003c/p\u003e\n\u003ch3 id=\"task-evaluatino\"\u003etask evaluatino\u003c/h3\u003e\n\u003cp\u003emeasure overall task success, or measure slot error rate\u003c/p\u003e\n\u003ch2 id=\"design-system-design\"\u003edesign system design\u003c/h2\u003e\n\u003cp\u003eDon\u0026rsquo;t build \u003cstrong\u003eFrankenstein\u003c/strong\u003e: safety (ensure people aren\u0026rsquo;t crashing cars), limiting representation harm (don\u0026rsquo;t demean social groups), privacy\u003c/p\u003e\n\u003ch3 id=\"study-users-and-task\"\u003estudy users and task\u003c/h3\u003e\n\u003cp\u003ewhat are their values? how do they interact?\u003c/p\u003e\n\u003ch3 id=\"build-simulations\"\u003ebuild simulations\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003ewizard of oz\u003c/strong\u003e study: observe user interaction with a \u003cstrong\u003eHUMAN\u003c/strong\u003e pretending to be a chat bot\u003c/p\u003e\n\u003ch3 id=\"test-the-design\"\u003etest the design\u003c/h3\u003e\n\u003cp\u003etest on users\u003c/p\u003e\n\u003ch3 id=\"info-leakage\"\u003einfo leakage\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eaccidentally leaking information (microphone, etc.)\u003c/li\u003e\n\u003cli\u003eintentionally leaking information due to advertising, etc.\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhchatbot/","tags":null,"title":"Chatbot"},{"categories":null,"contents":"\\(\\chi^2\\) is a test statistic for hypothesis testing.\nmotivation for chi-square The motivation for chi-square is because t-test (means, \u0026ldquo;is the value significantly different\u0026rdquo;) and z-test (proportion, \u0026ldquo;is the incidence percentage significantly different\u0026rdquo;) all don\u0026rsquo;t really cover categorical data samples: \u0026ldquo;the categories are distributed in this way.\u0026rdquo;\nTake, for instance, if we want to test the following null hypothesis:\nCategory Expected Actual A 25 20 B 25 20 C 25 25 D 25 25 \\(\\alpha = 0.05\\). What do we use to test this??\n(hint: we can\u0026rsquo;t, unless\u0026hellip;)\nEnter chi-square.\nchi-square test chi-square test is a hypothesis test for categorical data. It is responsible to translate differences in distributions into p-values for significance.\nBegin by calculating chi-square after you confirmed that your experiment meets conditions for inference (chi-square test).\nOnce you have that, look it up at a chi-square table to figure the appropriate p-value. Then, proceed with normal hypothesis testing.\nBecause of this categorical nature, chi-square test can also be used as a homogeneity test.\nconditions for inference (chi-square test) random sampling expected value for data must be \\(\\geq 5\\) sampling should be \\(\u0026lt;10\\%\\) or independent chi-square test for homogeneity The chi-square test for homogeneity is a test for homogeneity via the chi-square statistic.\nTo do this, we take the probability of a certain outcome happening\u0026mdash;if distributed equally\u0026mdash;and apply it to the samples to compare.\nTake, for instance:\nSubject Right Hand Left Hand Total STEM 30 10 40 Humanities 15 25 40 Equal 15 5 20 Total 60 40 100 We will then figure the expected outcomes:\nRight Left 24 16 24 16 12 8 Awesome! Now, calculate chi-square with each cell of measured outcomes. Calculate degrees of freedom by (num_row-1)*(num_col-1).\nchi-square test for independence The chi-square test for independence is a test designed to accept-reject the null hypothesis of \u0026ldquo;no association between two variables.\u0026rdquo;\nEssentially, you leverage the fact that \u0026ldquo;AND\u0026rdquo; relationships are multiplicative probabilities. Therefore, the expected outcomes are simply the multiplied/fraction of sums:\ncalculating chi-square \\begin{equation} \\chi^2 = \\frac{(\\hat{x}_0-x_0)^2}{x_0} +\\frac{(\\hat{x}_1-x_1)^2}{x_1} + \\cdots + \\frac{(\\hat{x}_n-x_n)^2}{x_n} \\end{equation}\nWhere, \\(\\hat{x}_i\\) is the measured value and \\(x_i\\) is the expected value.\n","html":"\u003cp\u003e\\(\\chi^2\\) is a test \u003ca href=\"/posts/kbhstastistic/\"\u003estatistic\u003c/a\u003e for \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"motivation-for-chi-square\"\u003emotivation for chi-square\u003c/h2\u003e\n\u003cp\u003eThe motivation for \u003ca href=\"/posts/kbhchi_square/\"\u003echi-square\u003c/a\u003e is because \u003ca href=\"/posts/kbht_test/\"\u003et-test\u003c/a\u003e (means, \u0026ldquo;is the value significantly different\u0026rdquo;) and \u003ca href=\"/posts/kbhz_test/\"\u003ez-test\u003c/a\u003e (proportion, \u0026ldquo;is the incidence percentage significantly different\u0026rdquo;) all don\u0026rsquo;t really cover categorical data samples: \u0026ldquo;the categories are distributed in this way.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eTake, for instance, if we want to test the following \u003ca href=\"/posts/kbhhypothesis_testing/#null-hypothesis\"\u003enull hypothesis\u003c/a\u003e:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eCategory\u003c/th\u003e\n\u003cth\u003eExpected\u003c/th\u003e\n\u003cth\u003eActual\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eA\u003c/td\u003e\n\u003ctd\u003e25\u003c/td\u003e\n\u003ctd\u003e20\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eB\u003c/td\u003e\n\u003ctd\u003e25\u003c/td\u003e\n\u003ctd\u003e20\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eC\u003c/td\u003e\n\u003ctd\u003e25\u003c/td\u003e\n\u003ctd\u003e25\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eD\u003c/td\u003e\n\u003ctd\u003e25\u003c/td\u003e\n\u003ctd\u003e25\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\\(\\alpha = 0.05\\). What do we use to test this??\u003c/p\u003e\n\u003cp\u003e(hint: we can\u0026rsquo;t, unless\u0026hellip;)\u003c/p\u003e\n\u003cp\u003eEnter \u003ca href=\"/posts/kbhchi_square/\"\u003echi-square\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"chi-square-test\"\u003echi-square test\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#chi-square-test\"\u003echi-square test\u003c/a\u003e is a \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis test\u003c/a\u003e for categorical data. It is responsible to translate differences in distributions into \u003ca href=\"/posts/kbhhypothesis_testing/#p-value\"\u003ep-value\u003c/a\u003es for significance.\u003c/p\u003e\n\u003cp\u003eBegin by \u003ca href=\"#calculating-chi-square\"\u003ecalculating chi-square\u003c/a\u003e after you confirmed that your experiment meets \u003ca href=\"#conditions-for-inference--chi-square-test\"\u003econditions for inference (chi-square test)\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eOnce you have that, look it up at a chi-square table to figure the appropriate \u003ca href=\"/posts/kbhhypothesis_testing/#p-value\"\u003ep-value\u003c/a\u003e. Then, proceed with normal \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eBecause of this categorical nature, \u003ca href=\"#chi-square-test\"\u003echi-square test\u003c/a\u003e can also be used as a homogeneity test.\u003c/p\u003e\n\u003ch2 id=\"conditions-for-inference--chi-square-test\"\u003econditions for inference (chi-square test)\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003erandom sampling\u003c/li\u003e\n\u003cli\u003eexpected value for data must be \\(\\geq 5\\)\u003c/li\u003e\n\u003cli\u003esampling should be \\(\u0026lt;10\\%\\) or independent\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"chi-square-test-for-homogeneity\"\u003echi-square test for homogeneity\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#chi-square-test-for-homogeneity\"\u003echi-square test for homogeneity\u003c/a\u003e is a test for \u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e via the \u003ca href=\"/posts/kbhchi_square/\"\u003echi-square\u003c/a\u003e \u003ca href=\"/posts/kbhstastistic/\"\u003estatistic\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eTo do this, we take the probability of a certain outcome happening\u0026mdash;if distributed equally\u0026mdash;and apply it to the samples to compare.\u003c/p\u003e\n\u003cp\u003eTake, for instance:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eSubject\u003c/th\u003e\n\u003cth\u003eRight Hand\u003c/th\u003e\n\u003cth\u003eLeft Hand\u003c/th\u003e\n\u003cth\u003e\u003cstrong\u003e\u003cstrong\u003eTotal\u003c/strong\u003e\u003c/strong\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eSTEM\u003c/td\u003e\n\u003ctd\u003e30\u003c/td\u003e\n\u003ctd\u003e10\u003c/td\u003e\n\u003ctd\u003e40\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eHumanities\u003c/td\u003e\n\u003ctd\u003e15\u003c/td\u003e\n\u003ctd\u003e25\u003c/td\u003e\n\u003ctd\u003e40\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eEqual\u003c/td\u003e\n\u003ctd\u003e15\u003c/td\u003e\n\u003ctd\u003e5\u003c/td\u003e\n\u003ctd\u003e20\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cstrong\u003e\u003cstrong\u003eTotal\u003c/strong\u003e\u003c/strong\u003e\u003c/td\u003e\n\u003ctd\u003e60\u003c/td\u003e\n\u003ctd\u003e40\u003c/td\u003e\n\u003ctd\u003e100\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWe will then figure the expected outcomes:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eRight\u003c/th\u003e\n\u003cth\u003eLeft\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e24\u003c/td\u003e\n\u003ctd\u003e16\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e24\u003c/td\u003e\n\u003ctd\u003e16\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e12\u003c/td\u003e\n\u003ctd\u003e8\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eAwesome! Now, \u003ca href=\"#calculating-chi-square\"\u003ecalculate chi-square\u003c/a\u003e with each cell of measured outcomes. Calculate degrees of freedom by (num_row-1)*(num_col-1).\u003c/p\u003e\n\u003ch2 id=\"chi-square-test-for-independence\"\u003echi-square test for independence\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#chi-square-test-for-independence\"\u003echi-square test for independence\u003c/a\u003e is a test designed to accept-reject the null hypothesis of \u0026ldquo;no association between two variables.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eEssentially, you leverage the fact that \u0026ldquo;AND\u0026rdquo; relationships are multiplicative probabilities. Therefore, the expected outcomes are simply the multiplied/fraction of sums:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_22-37-43_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"calculating-chi-square\"\u003ecalculating chi-square\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\chi^2 = \\frac{(\\hat{x}_0-x_0)^2}{x_0} +\\frac{(\\hat{x}_1-x_1)^2}{x_1} + \\cdots + \\frac{(\\hat{x}_n-x_n)^2}{x_n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhere, \\(\\hat{x}_i\\) is the measured value and \\(x_i\\) is the expected value.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhchi_square/","tags":null,"title":"chi-square"},{"categories":null,"contents":"Chiara Marletto is an physicist working on Quantum mechanics working in D. of Physics, Wolfson College, University of Oxford.\nSubfield: constructor theory. She studies quantum theory.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhchiara_marletto/\"\u003eChiara Marletto\u003c/a\u003e is an physicist working on Quantum mechanics working in D. of Physics, Wolfson College, University of Oxford.\u003c/p\u003e\n\u003cp\u003eSubfield: \u003ca href=\"/posts/kbhconstructor_theory/\"\u003econstructor theory\u003c/a\u003e. She studies \u003ca href=\"/posts/kbhquantum_theory/\"\u003equantum theory\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhchiara_marletto/","tags":null,"title":"Chiara Marletto"},{"categories":null,"contents":"I was digging through my OneDrive recently for work, and found this piece of writing.\nThere is naught but a small, dirt-filled puddle in front of this lawn. Yet only here – by the puddle – can Gary find a small, much-needed respite from the neverending work. Of course, without the hours he has committed to the sweatshop, his mother would have died ages ago from colora.\nBut how does it matter now? Rarely now \u0026ndash; once every year \u0026ndash; does he even earn the privilege to exit the heavily-guarded area to visit his mother; and how little time he has during such visits: each visit seems to just be a long walk, a knock, a kiss on the cheek \u0026ndash; then back to the workhouse he goes.\nNo, he must push on. Focusing his tired mind back to the concrete structure in front of him, he sees the supervisor hollering the same old phrase. Back to work! Back to work! Move! Move! Break is over!\nWhat is this break, even? The notable lack of timepieces around the lawn means the important task of timekeeping falls to the supervisors \u0026ndash; who, notably, have an obvious interest in shortening the break. And ‘lo, the breaks are shortened: Gary has always remembered the session as until the bottom of the clock, yet doubtless he will find himself staring at a clock hand pointing to the horizontal upon walking into the building.\nHe can do nothing now: there is one \u0026ndash; and the ultimate \u0026ndash; sanction for not listening to the supervisor, and he wants nothing to do with it: beating. Beating that gets harder, faster, as time progresses is the one, the only, and the final answer to all cases of disobedience. Heck, if the supervisor demands time run backwards during breaks, Cronus will listen and obey \u0026ndash; for even he, a god, is probably as scared of these “correctional sessions” as anyone else.\nThere is, then, no time to be wasted. Up towards the factory he walks \u0026ndash; joined by hundreds of others suffering a similar fate, doing the same tedious, repetitive tasks as him. If he hadn\u0026rsquo;t been made dependent \u0026ndash; addicted! in fact \u0026ndash; to the meager wages he received, he could have achieved greatness the world has yet to see.\nBut, alas, towards the factory he walks, steps. Timidly, slowly, shuffling his feet quickly enough so as to not anger the increasingly stressed supervisor. Stressed understandably, perhaps, due to the increasing external talk of organizing such congregations as the “Child Labour Committee”, which Gary himself isn’t sure to what extent he should trust.\nThe quarter-bell strikes. Indeed, his suspicions were correct \u0026ndash; yet superfluous. When all was thought and done, he couldn\u0026rsquo;t possibly have even produced the thought of defying the wishes of the supervisor, let along execute it. But now, he has not even the physical capacity to escape \u0026ndash; the door was locked, and locked means work eternal \u0026ndash; at least until the next meager halt seemingly few decades later.\nSuddenly, a clicking occurs. A machine screeching to a halt, perhaps due to the same overwork and misuse. In walks the supervisor: nevermind that: the work must go on!\nIt is now down to the same routine \u0026ndash; picking the smallest, nimblist of the bunch \u0026ndash; Gary, of course \u0026ndash; to, through great persuasion and threatenings of beatings, climb under the mechanical beast and undo the mess. It’s a dance of oil and gear that Gary has rehearsed many times before, each time dreading the next. Yet he still brought himself to perform the task time and time again for it, although dreadful, seems to be heavenly compared to the alternate: getting the “correctional session.”\nDown the cover he goes: a little pulling there, a little dabbing there, and Hark! The machine jumped to a start with a splash of brilliant pink hue, announcing \u0026ndash; celebrating, it seems \u0026ndash; itself as Gary’s final quarters.\nNevermind that: the work must go on!\n","html":"\u003cp\u003eI was digging through my OneDrive recently for work, and found this piece of writing.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eThere is naught but a small, dirt-filled puddle in front of this lawn. Yet only here – by the puddle – can Gary find a small, much-needed respite from the neverending work. Of course, without the hours he has committed to the sweatshop, his mother would have died ages ago from colora.\u003c/p\u003e\n\u003cp\u003eBut how does it matter now? Rarely now \u0026ndash; once every year \u0026ndash; does he even earn the privilege to exit the heavily-guarded area to visit his mother; and how little time he has during such visits: each visit seems to just be a long walk, a knock, a kiss on the cheek \u0026ndash; then back to the workhouse he goes.\u003c/p\u003e\n\u003cp\u003eNo, he must push on. Focusing his tired mind back to the concrete structure in front of him, he sees the supervisor hollering the same old phrase. Back to work! Back to work! Move! Move! Break is over!\u003c/p\u003e\n\u003cp\u003eWhat is this break, even? The notable lack of timepieces around the lawn means the important task of timekeeping falls to the supervisors \u0026ndash; who, notably, have an obvious interest in shortening the break. And ‘lo, the breaks are shortened: Gary has always remembered the session as until the bottom of the clock, yet doubtless he will find himself staring at a clock hand pointing to the horizontal upon walking into the building.\u003c/p\u003e\n\u003cp\u003eHe can do nothing now: there is one \u0026ndash; and the ultimate \u0026ndash; sanction for not listening to the supervisor, and he wants nothing to do with it: beating. Beating that gets harder, faster, as time progresses is the one, the only, and the final answer to all cases of disobedience. Heck, if the supervisor demands time run backwards during breaks, Cronus will listen and obey \u0026ndash; for even he, a god, is probably as scared of these “correctional sessions” as anyone else.\u003c/p\u003e\n\u003cp\u003eThere is, then, no time to be wasted. Up towards the factory he walks \u0026ndash; joined by hundreds of others suffering a similar fate, doing the same tedious, repetitive tasks as him. If he hadn\u0026rsquo;t been made dependent \u0026ndash; addicted! in fact \u0026ndash; to the meager wages he received, he could have achieved greatness the world has yet to see.\u003c/p\u003e\n\u003cp\u003eBut, alas, towards the factory he walks, steps. Timidly, slowly, shuffling his feet quickly enough so as to not anger the increasingly stressed supervisor. Stressed understandably, perhaps, due to the increasing external talk of organizing such congregations as the  “Child Labour Committee”, which Gary himself isn’t sure to what extent he should trust.\u003c/p\u003e\n\u003cp\u003eThe quarter-bell strikes. Indeed, his suspicions were correct \u0026ndash; yet superfluous. When all was thought and done, he couldn\u0026rsquo;t possibly have even produced the thought of defying the wishes of the supervisor, let along execute it. But now, he has not even the physical capacity to escape \u0026ndash; the door was locked, and locked means work eternal \u0026ndash; at least until the next meager halt seemingly few decades later.\u003c/p\u003e\n\u003cp\u003eSuddenly, a clicking occurs. A machine screeching to a halt, perhaps due to the same overwork and misuse. In walks the supervisor: nevermind that: the work must go on!\u003c/p\u003e\n\u003cp\u003eIt is now down to the same routine \u0026ndash; picking the smallest, nimblist of the bunch \u0026ndash; Gary, of course \u0026ndash; to, through great persuasion and threatenings of beatings, climb under the mechanical beast and undo the mess. It’s a dance of oil and gear that Gary has rehearsed many times before, each time dreading the next. Yet he still brought himself to perform the task time and time again for it, although dreadful, seems to be heavenly compared to the alternate: getting the “correctional session.”\u003c/p\u003e\n\u003cp\u003eDown the cover he goes: a little pulling there, a little dabbing there, and Hark! The machine jumped to a start with a splash of brilliant pink hue, announcing \u0026ndash; celebrating, it seems \u0026ndash; itself as Gary’s final quarters.\u003c/p\u003e\n\u003cp\u003eNevermind that: the work must go on!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhchild_labour/","tags":null,"title":"Child Labour: A Short Story"},{"categories":null,"contents":"DOI: 10.3389/fpsyg.2020.623237\nOne-Liner (thrice) Used features extracted by VGGish from raw acoustic audio against a SVM, Perceptron, 1NN; got \\(59.1\\%\\) classif. accuracy for dementia Then, trained a CNN on raw wave-forms and got \\(63.6\\%\\) accuracy Then, they fine-tuned a VGGish on the raw wave-forms and didn\u0026rsquo;t report their results and just said \u0026ldquo;we discovered that audio transfer learning with a pretrained VGGish feature extractor performs better\u0026rdquo; Gah! Novelty Threw the kitchen sink to process only raw acoustic input, most of it missed; wanted 0 human involvement. It seems like last method is promising.\nNotable Methods fine-tuning VGGish against raw acoustic waveforms to build a classifier via a CNN.\nKey Figs Their fancy network Its just a CNN afaik with much maxpooling; could have used some skipped connections. I wonder if it overfit?\nTheir actual training results Looks generally pretty bad, but a run of their DemCNN seem to have gotten state-of-the-art results. Not sure where transfer training data went.\nNew Concepts VGGish Notes Accuracy question According to this the state of the art at the time from pure audio was 56.6%? For a binary classifier isn\u0026rsquo;t that just doing nothing?\nSo somebody did get better before?\n","html":"\u003cp\u003eDOI: 10.3389/fpsyg.2020.623237\u003c/p\u003e\n\u003ch2 id=\"one-liner--thrice\"\u003eOne-Liner (thrice)\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eUsed features extracted by \u003ca href=\"/posts/kbhvggish/\"\u003eVGGish\u003c/a\u003e from raw acoustic audio against a SVM, Perceptron, 1NN; got \\(59.1\\%\\) classif. accuracy for dementia\u003c/li\u003e\n\u003cli\u003eThen, trained a CNN on raw wave-forms and got \\(63.6\\%\\) accuracy\u003c/li\u003e\n\u003cli\u003eThen, they fine-tuned a \u003ca href=\"/posts/kbhvggish/\"\u003eVGGish\u003c/a\u003e on the raw wave-forms and didn\u0026rsquo;t report their results and just said \u0026ldquo;we discovered that audio transfer learning with a pretrained VGGish feature extractor performs better\u0026rdquo; Gah!\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003eThrew the kitchen sink to process only raw acoustic input, most of it missed; wanted 0 human involvement. It seems like last method is promising.\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cp\u003efine-tuning \u003ca href=\"/posts/kbhvggish/\"\u003eVGGish\u003c/a\u003e against raw acoustic waveforms to build a classifier via a CNN.\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"their-fancy-network\"\u003eTheir fancy network\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-37-51_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eIts just a CNN afaik with much maxpooling; could have used some skipped connections. I wonder if it overfit?\u003c/p\u003e\n\u003ch3 id=\"their-actual-training-results\"\u003eTheir actual training results\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-38-34_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eLooks generally pretty bad, but a run of their DemCNN seem to have gotten state-of-the-art results. Not sure where transfer training data went.\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvggish/\"\u003eVGGish\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n\u003ch3 id=\"accuracy-question\"\u003eAccuracy question\u003c/h3\u003e\n\u003cp\u003eAccording to this the state of the art at the time from pure audio was 56.6%? For a binary classifier isn\u0026rsquo;t that just doing nothing?\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-39-31_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSo somebody did get better before?\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhchlasta_2021/","tags":["ntj"],"title":"Chlasta 2021"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhchromatin/","tags":null,"title":"chromatin"},{"categories":null,"contents":"civil rights movement starting civil rights moment was kicked off by the Rosa Parks incident, which caused the Montomery Bus Boycott.\nMartin Luther King capitalized the incident to kick start civil rights movement. He employed the method of nonviolence movement.\neducational integration in the civil rights movement K-12 disintegration: Brown v. Board of Education University of Georgia was the first disintegrated university in the south service integration in the civil rights movement Lunch counter boycotts. Nashville became the first desegregated lunch counter.\nSNICK SNICK is a student organization founded by Ella Baker in the civil rights movement that sent students into the most dangerous areas of segregation and leading protests.\nMotown Records Motown Records is an African-American owned Detroit record business\nMalcom X A civil rights movement activist, calling for more violent forms of protest and prosecuting specific white actions. Malcom X and Martin Luther King contradicted each other in methods of active persecution vs. nonviolent integration.\nBloody Sunday Bloody Sunday was a voting rights march from Selma to Montgomery. Peaceful protesters were attacked with nightsticks and tear gas. The event was widely televised: transforming the movement as a televised morality play.\nNonviolence helps getting the clergy leaders as a form of leveraging religion in a show of unity.\nBlack Power Movement A new chapter in the civil rights movement which incorporated less of the elements of integration but instead in wanted more sense of self-determination. nonviolence movement, which the Black Power Movement overrided, had ran its course when Martin Luther King was assassinated.\n","html":"\u003ch2 id=\"civil-rights-movement-starting\"\u003ecivil rights movement starting\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcivil_rights/\"\u003ecivil rights\u003c/a\u003e moment was kicked off by the \u003ca href=\"/posts/kbhrosa_parks/\"\u003eRosa Parks\u003c/a\u003e incident, which caused the \u003ca href=\"/posts/kbhmontomery_bus_boycott/\"\u003eMontomery Bus Boycott.\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhmartin_luther_king/\"\u003eMartin Luther King\u003c/a\u003e capitalized the incident to kick start civil rights movement. He employed the method of \u003ca href=\"/posts/kbhnonviolence_movement/\"\u003enonviolence movement\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"educational-integration-in-the-civil-rights-movement\"\u003eeducational integration in the civil rights movement\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eK-12 disintegration: \u003ca href=\"/posts/kbhbrown_v_board_of_education/\"\u003eBrown v. Board of Education\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhuniversity_of_georgia/\"\u003eUniversity of Georgia\u003c/a\u003e was the first disintegrated university in the south\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"service-integration-in-the-civil-rights-movement--kbhcivil-rights-dot-md\"\u003eservice integration in the \u003ca href=\"/posts/kbhcivil_rights/\"\u003ecivil rights movement\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eLunch counter boycotts. Nashville became the first desegregated lunch counter.\u003c/p\u003e\n\u003ch2 id=\"snick\"\u003eSNICK\u003c/h2\u003e\n\u003cp\u003eSNICK is a student organization founded by \u003ca href=\"/posts/kbhella_baker/\"\u003eElla Baker\u003c/a\u003e in the \u003ca href=\"/posts/kbhcivil_rights/\"\u003ecivil rights movement\u003c/a\u003e that sent students into the most dangerous areas of segregation and leading protests.\u003c/p\u003e\n\u003ch2 id=\"motown-records\"\u003eMotown Records\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#motown-records\"\u003eMotown Records\u003c/a\u003e is an African-American owned Detroit record business\u003c/p\u003e\n\u003ch2 id=\"malcom-x\"\u003eMalcom X\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"/posts/kbhcivil_rights/\"\u003ecivil rights movement\u003c/a\u003e activist, calling for more violent forms of protest and prosecuting specific white actions. \u003ca href=\"#malcom-x\"\u003eMalcom X\u003c/a\u003e and \u003ca href=\"/posts/kbhmartin_luther_king/\"\u003eMartin Luther King\u003c/a\u003e contradicted each other in methods of active persecution vs. nonviolent integration.\u003c/p\u003e\n\u003ch2 id=\"bloody-sunday\"\u003eBloody Sunday\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#bloody-sunday\"\u003eBloody Sunday\u003c/a\u003e was a voting rights march from Selma to Montgomery. Peaceful protesters were attacked with nightsticks and tear gas. The event was widely televised: transforming the movement as a televised morality play.\u003c/p\u003e\n\u003cp\u003eNonviolence helps getting the clergy leaders as a form of leveraging religion in a show of unity.\u003c/p\u003e\n\u003ch2 id=\"black-power-movement\"\u003eBlack Power Movement\u003c/h2\u003e\n\u003cp\u003eA new chapter in the \u003ca href=\"/posts/kbhcivil_rights/\"\u003ecivil rights movement\u003c/a\u003e which incorporated less of the elements of integration but instead in wanted more sense of self-determination. \u003ca href=\"/posts/kbhnonviolence_movement/\"\u003enonviolence movement\u003c/a\u003e, which the \u003ca href=\"#black-power-movement\"\u003eBlack Power Movement\u003c/a\u003e overrided, had ran its course when \u003ca href=\"/posts/kbhmartin_luther_king/\"\u003eMartin Luther King\u003c/a\u003e was assassinated.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcivil_rights/","tags":null,"title":"civil rights movement"},{"categories":null,"contents":"A part of the New Deal programs for unmarried men to go and build American infrastructure outdoors under reasonably harsh conditions. \u0026ldquo;Kind of like boy scouts for adults.\u0026rdquo; It is structured like the military; Black men were segregated and not given leadership roles.\n1933-1942.\n","html":"\u003cp\u003eA part of the \u003ca href=\"/posts/kbhnew_deal/\"\u003eNew Deal\u003c/a\u003e programs for unmarried men to go and build American infrastructure outdoors under reasonably harsh conditions. \u0026ldquo;Kind of like boy scouts for adults.\u0026rdquo; It is structured like the military; Black men were segregated and not given leadership roles.\u003c/p\u003e\n\u003cp\u003e1933-1942.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcivillian_conservation_corps/","tags":null,"title":"Civillian Conservation Corps"},{"categories":null,"contents":"key question: are there bias and changes in young children + changes in skin color to generate more samples of skin disease.\nprevious work: DermGAN (Ghorbani 2020), this is not pediatric and also a bit deterministic.\nkey problems data is scarce data is not available and lack of data sharing.\ndata is sensitive especially children.\npediatric specificity we want to generate children\u0026rsquo;s skin disease samples, which as vastly out of sample. The work is therefore trained on only 1000-2000ish samples.\nmodeling latent diffusion model (LDF) ControlNet (Zhang 2023)\u0026mdash;allows specific conditioning of the generation by exogenous rules data gym cleaning get rid of face crop for specific body part ensure anonymity, etc. patch extraction start in the upper left corner, ensure that the resulting patch has at least n% of diseases available\nfurther, create a mask for where the disease should live\nmodeling train a diffusion model yay\nresults the diffusion model by itself does nothing combined with ControlNet, life is much better and achieve higher than SOTA ","html":"\u003cp\u003e\u003cstrong\u003ekey question\u003c/strong\u003e: are there bias and changes in young children + changes in skin color to generate more samples of skin disease.\u003c/p\u003e\n\u003cp\u003eprevious work: DermGAN (Ghorbani 2020), this is not pediatric and also a bit deterministic.\u003c/p\u003e\n\u003ch2 id=\"key-problems\"\u003ekey problems\u003c/h2\u003e\n\u003ch3 id=\"data-is-scarce\"\u003edata is scarce\u003c/h3\u003e\n\u003cp\u003edata is not available and lack of data sharing.\u003c/p\u003e\n\u003ch3 id=\"data-is-sensitive\"\u003edata is sensitive\u003c/h3\u003e\n\u003cp\u003eespecially children.\u003c/p\u003e\n\u003ch3 id=\"pediatric-specificity\"\u003epediatric specificity\u003c/h3\u003e\n\u003cp\u003ewe want to generate children\u0026rsquo;s skin disease samples, which as vastly out of sample. The work is therefore trained on only 1000-2000ish samples.\u003c/p\u003e\n\u003ch2 id=\"modeling\"\u003emodeling\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003elatent diffusion model (LDF)\u003c/li\u003e\n\u003cli\u003eControlNet (Zhang 2023)\u0026mdash;allows specific conditioning of the generation by exogenous rules\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"data-gym\"\u003edata gym\u003c/h2\u003e\n\u003ch3 id=\"cleaning\"\u003ecleaning\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eget rid of face\u003c/li\u003e\n\u003cli\u003ecrop for specific body part\u003c/li\u003e\n\u003cli\u003eensure anonymity, etc.\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"patch-extraction\"\u003epatch extraction\u003c/h3\u003e\n\u003cp\u003estart in the upper left corner, ensure that the resulting patch has at least n% of diseases available\u003c/p\u003e\n\u003cp\u003efurther, create a mask for where the disease should live\u003c/p\u003e\n\u003ch3 id=\"modeling\"\u003emodeling\u003c/h3\u003e\n\u003cp\u003etrain a diffusion model yay\u003c/p\u003e\n\u003ch2 id=\"results\"\u003eresults\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ethe diffusion model by itself does nothing\u003c/li\u003e\n\u003cli\u003ecombined with ControlNet, life is much better and achieve higher than SOTA\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhclinical_skin_disease_imaging/","tags":null,"title":"Clinical Skin Disease Image Generation"},{"categories":null,"contents":"in demand paging, if we have to kick out a page, which one do we kick?\npossible basic approaches random page (this works surprisingly well) throw out the page that\u0026rsquo;s the longest in memory (this is BAD because if a page is there for a long time, its probably accessed a lot) oracle (pick the page whose next accesses is farest in the future\u0026hellip; we can\u0026rsquo;t predict the future) LRU (replace the page that\u0026rsquo;s accessed longest time ago) LRU sounds decently good, but recall that \\(2^{36}\\) wall times to store wall time for each page are needed which is bad\nclock algorithm rotate through all pages until we find one that hasn\u0026rsquo;t been referenced since last time\nwe add a reference bit to the page table\u0026mdash;its set to \\(1\\) if the program wrote or read each page, otherwise its set to \\(0\\) when page kick is needed, clock algorithm starts where it left off before and scan through physical pages each page it checks with reference bit 1, it sets the reference bit as 0 if it checked a page and its reference bit is 0, we kick it out (because we\u0026rsquo;ve gone through two ) We now save the position of the hand\u0026mdash;we want to begin checking with the page that hasn\u0026rsquo;t been checked for the longest time.\nIf every page has a reference bit is one, running this algorithm doesn\u0026rsquo;t break because it would set its immediately next bit of memory.\npage replacement model per-process replacement THIS IS NOT USED: we only kick out our own pages (but\u0026hellip; how would we know how many pages we allocate max per process before we start kicking?).\nglobal replacement THIS IS USED: a page fault in one process can kick out a page from another process. all pages from all processes in a single pool.\nrecall: demand paging get space:\npick a page to kick out write it to disk mark the old page map entry as \u0026ldquo;not present\u0026rdquo; update the new page map entry for the new process + point to our reused page restore kicked page:\ntrigger page fault check swap (if it isn\u0026rsquo;t present, page fault end) get a new physical page by kicking another page out load the data from disk into that reused page update page map yet again ","html":"\u003cp\u003ein \u003ca href=\"/posts/kbhdemand_paging/\"\u003edemand paging\u003c/a\u003e, if we have to kick out a page, which one do we kick?\u003c/p\u003e\n\u003ch2 id=\"possible-basic-approaches\"\u003epossible basic approaches\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003erandom page (this works surprisingly well)\u003c/li\u003e\n\u003cli\u003ethrow out the page that\u0026rsquo;s the longest in memory (this is BAD because if a page is there for a long time, its probably accessed a lot)\u003c/li\u003e\n\u003cli\u003eoracle (pick the page whose next accesses is farest in the future\u0026hellip; we can\u0026rsquo;t predict the future)\u003c/li\u003e\n\u003cli\u003eLRU (replace the page that\u0026rsquo;s accessed \u003cstrong\u003elongest time ago\u003c/strong\u003e)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003eLRU\u003c/strong\u003e sounds decently good, but recall that \\(2^{36}\\) wall times to store wall time for each page are needed which is bad\u003c/p\u003e\n\u003ch2 id=\"clock-algorithm\"\u003eclock algorithm\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003erotate through all pages until we find one that hasn\u0026rsquo;t been referenced since last time\u003c/strong\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewe add a \u003cstrong\u003ereference bit\u003c/strong\u003e to the \u003ca href=\"/posts/kbhvirtual_memory/#paging\"\u003epage table\u003c/a\u003e\u0026mdash;its set to \\(1\\) if the program wrote or read each page, otherwise its set to \\(0\\)\u003c/li\u003e\n\u003cli\u003ewhen page kick is needed, clock algorithm starts where it left off before and scan through physical pages\n\u003col\u003e\n\u003cli\u003eeach page it checks with reference bit 1, it sets the \u003cstrong\u003ereference bit\u003c/strong\u003e as 0\u003c/li\u003e\n\u003cli\u003eif it checked a page and its reference bit is 0, we kick it out (because we\u0026rsquo;ve gone through two )\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWe now \u003cstrong\u003esave the position of the hand\u003c/strong\u003e\u0026mdash;we want to begin checking with the page that hasn\u0026rsquo;t been checked for the longest time.\u003c/p\u003e\n\u003cp\u003eIf every page has a \u003cstrong\u003ereference bit\u003c/strong\u003e is one, running this algorithm doesn\u0026rsquo;t break because it would set its immediately next bit of memory.\u003c/p\u003e\n\u003ch2 id=\"page-replacement-model\"\u003epage replacement model\u003c/h2\u003e\n\u003ch3 id=\"per-process-replacement\"\u003eper-process replacement\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003eTHIS IS NOT USED\u003c/strong\u003e: we only kick out our own pages (but\u0026hellip; how would we know how many pages we allocate max per process before we start kicking?).\u003c/p\u003e\n\u003ch3 id=\"global-replacement\"\u003eglobal replacement\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003eTHIS IS USED\u003c/strong\u003e: a page fault in one process can kick out a page from another process. all pages from all processes in a single pool.\u003c/p\u003e\n\u003ch2 id=\"recall-demand-paging\"\u003erecall: demand paging\u003c/h2\u003e\n\u003cp\u003eget space:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003epick a page to kick out\u003c/li\u003e\n\u003cli\u003ewrite it to disk\u003c/li\u003e\n\u003cli\u003emark the old page map entry as \u0026ldquo;not present\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eupdate the new page map entry for the new process + point to our reused page\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003cp\u003erestore kicked page:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003etrigger page fault\u003c/li\u003e\n\u003cli\u003echeck swap (if it isn\u0026rsquo;t present, page fault end)\u003c/li\u003e\n\u003cli\u003eget a new physical page by kicking another page out\u003c/li\u003e\n\u003cli\u003eload the data from disk into that reused page\u003c/li\u003e\n\u003cli\u003eupdate page map yet again\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhclock_algorthium/","tags":null,"title":"clock algorithm"},{"categories":null,"contents":"to be closed means that the operation of a group applied to an element of a group would produce another element of the group.\n","html":"\u003cp\u003eto be \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e means that the \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003e of a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e applied to an element of a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e would produce another element of the \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhclosed/","tags":null,"title":"closed"},{"categories":null,"contents":"Exploring CLRS.\n2 insertion sort ","html":"\u003cp\u003eExploring CLRS.\u003c/p\u003e\n\u003ch2 id=\"2\"\u003e2\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinsertion_sort/\"\u003einsertion sort\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhclrs_index/","tags":["index"],"title":"CLRS Index"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhclustering/","tags":null,"title":"clustering"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcmu/","tags":null,"title":"CMU"},{"categories":null,"contents":"A brain signal to help maintain glucose homeostatis\nBrain takes glucose product + glucose uptake to control energy balance in food intake and energy expenditure.\nThe brain takes:\nNeural Behavioral Hormonal responses to maintain glucode uptake.\n","html":"\u003cp\u003eA brain signal to help maintain \u003ca href=\"\"\u003eglucose homeostatis\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eBrain takes glucose product + glucose uptake to control energy balance in food intake and energy expenditure.\u003c/p\u003e\n\u003cp\u003eThe brain takes:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eNeural\u003c/li\u003e\n\u003cli\u003eBehavioral\u003c/li\u003e\n\u003cli\u003eHormonal\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eresponses to maintain glucode uptake.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcns_regulation/","tags":null,"title":"CNS regulation"},{"categories":null,"contents":"A Code Review is a systematic study code by others\u0026mdash;like proofreading an essay. There\u0026rsquo;s a few different ways of doing Code Review.\nWhy do we code review? catch bugs, style deviations, design + convention violations security trade-off: having someone who is well-versed in security is useful to know how other people\u0026rsquo;s code work to learn additional skills, languages, frameworks Code Review Methodology Don\u0026rsquo;t do it Very fast! None of the benefits of code review Over-the-Shoulder Code Review Over-the-Shoulder Code Review typically is done over someone\u0026rsquo;s shoulder\u0026mdash;author walking the reviewer through code.\nProps Typically catch major + obvious issues Lightweight and fast\u0026mdash;most likely to get done Cons Author explains code as they go; biasing the reviewer Author knows the code, so may gloss over the parts they are familiar with + move at a pace faster than the reviewer can process The author only has two solders: can\u0026rsquo;t involve more than 2 people Usually real-time: the author is waiting for reviewer, which is blocking both author and reviewer\u0026rsquo;s schedules Pair Programming Pair Programming is a two-brains, one keyboard paradigm. The less experienced person types (moves at the pace of the slower person)\nPros Real-time feedback/correction; good for learning new things Writing code + code review at the same time\u0026mdash;total time saved Cons Two people working together are susceptible to bind slots still! Remember that it will take four times as long to do something: this trade-off is worth it! Formal Code Review Tools: Phabricator, Gerrit\nThe process of Formal Code Review is a very formal process.\nAuthor writes and commits code The diff of the commit is sent to the reviewer Reviewer reads through the code at their own pace Reviewer can comment on the entire diff, or on specific lines of code This can involve multiple reviewers This is basically informal code review, but solving the original issues.\nBest Practices Every commit must be code reviewed before pushed to other people The larger the commit, the more reviewers should be on it Reviewer Approval Levels +2: code can be pushed +1: code looks good, someone else should make the call 0: why am I here? I dunno -1: this code smells funny, but I\u0026rsquo;m willing to be overruled -2: this code must be changed before being pushed -1/-2: comment should clearly indicate whether they are blocking push.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhcode_review/\"\u003eCode Review\u003c/a\u003e is a systematic study code by others\u0026mdash;like proofreading an essay. There\u0026rsquo;s a few different ways of doing \u003ca href=\"/posts/kbhcode_review/\"\u003eCode Review\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"why-do-we-code-review\"\u003eWhy do we code review?\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ecatch bugs, style deviations, design + convention violations\u003c/li\u003e\n\u003cli\u003esecurity trade-off: having someone who is well-versed in security is useful\u003c/li\u003e\n\u003cli\u003eto know how other people\u0026rsquo;s code work\u003c/li\u003e\n\u003cli\u003eto learn additional skills, languages, frameworks\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"code-review-methodology\"\u003eCode Review Methodology\u003c/h2\u003e\n\u003ch3 id=\"don-t-do-it\"\u003eDon\u0026rsquo;t do it\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eVery fast!\u003c/li\u003e\n\u003cli\u003eNone of the benefits of code review\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"over-the-shoulder-code-review\"\u003eOver-the-Shoulder Code Review\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#over-the-shoulder-code-review\"\u003eOver-the-Shoulder Code Review\u003c/a\u003e typically is done over someone\u0026rsquo;s shoulder\u0026mdash;author walking the reviewer through code.\u003c/p\u003e\n\u003ch4 id=\"props\"\u003eProps\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eTypically catch major + obvious issues\u003c/li\u003e\n\u003cli\u003eLightweight and fast\u0026mdash;most likely to get done\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"cons\"\u003eCons\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eAuthor explains code as they go; biasing the reviewer\u003c/li\u003e\n\u003cli\u003eAuthor knows the code, so may gloss over the parts they are familiar with + move at a pace faster than the reviewer can process\u003c/li\u003e\n\u003cli\u003eThe author only has two solders: can\u0026rsquo;t involve more than 2 people\u003c/li\u003e\n\u003cli\u003eUsually real-time: the author is waiting for reviewer, which is blocking both author and reviewer\u0026rsquo;s schedules\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"pair-programming\"\u003ePair Programming\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#pair-programming\"\u003ePair Programming\u003c/a\u003e is a two-brains, one keyboard paradigm. The less experienced person types (moves at the pace of the slower person)\u003c/p\u003e\n\u003ch4 id=\"pros\"\u003ePros\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eReal-time feedback/correction; good for learning new things\u003c/li\u003e\n\u003cli\u003eWriting code + code review at the same time\u0026mdash;total time saved\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"cons\"\u003eCons\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eTwo people working together are susceptible to bind slots still!\u003c/li\u003e\n\u003cli\u003eRemember that it will take four times as long to do something: this trade-off is worth it!\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"formal-code-review\"\u003eFormal Code Review\u003c/h3\u003e\n\u003cp\u003eTools: Phabricator, Gerrit\u003c/p\u003e\n\u003cp\u003eThe process of \u003ca href=\"#formal-code-review\"\u003eFormal Code Review\u003c/a\u003e is a very formal process.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eAuthor writes and commits code\u003c/li\u003e\n\u003cli\u003eThe diff of the commit is sent to the reviewer\u003c/li\u003e\n\u003cli\u003eReviewer reads through the code at their own pace\u003c/li\u003e\n\u003cli\u003eReviewer can comment on the entire diff, or on specific lines of code\u003c/li\u003e\n\u003cli\u003eThis can involve multiple reviewers\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThis is \u003cem\u003ebasically\u003c/em\u003e informal code review, but solving the original issues.\u003c/p\u003e\n\u003ch4 id=\"best-practices\"\u003eBest Practices\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eEvery commit must be code reviewed before pushed to other people\u003c/li\u003e\n\u003cli\u003eThe larger the commit, the more reviewers should be on it\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"reviewer-approval-levels\"\u003eReviewer Approval Levels\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e+2: code can be pushed\u003c/li\u003e\n\u003cli\u003e+1: code looks good, someone else should make the call\u003c/li\u003e\n\u003cli\u003e0: why am I here? I dunno\u003c/li\u003e\n\u003cli\u003e-1: this code smells funny, but I\u0026rsquo;m willing to be overruled\u003c/li\u003e\n\u003cli\u003e-2: this code must be changed before being pushed\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e-1/-2: comment should clearly indicate whether they are blocking push.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcode_review/","tags":null,"title":"Code Review"},{"categories":null,"contents":"The time it takes for a qubit to oscillate between two states between damping down.\n","html":"\u003cp\u003eThe time it takes for a \u003ca href=\"/posts/kbhqubits/\"\u003equbit\u003c/a\u003e to oscillate between two states between damping down.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcoherence_time/","tags":null,"title":"coherence time"},{"categories":null,"contents":"A family wide between SARS-COV2 variances identified specific sites which are maintained across variance of concerns suggesting why specific antibidies targeting them maybe able to render higher neutralizing potential\n","html":"\u003cp\u003eA family wide between \u003ca href=\"/posts/kbhsars_cov2/\"\u003eSARS-COV2\u003c/a\u003e variances identified specific sites which are maintained across variance of concerns suggesting why specific antibidies targeting them maybe able to render higher neutralizing potential\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcold_sites/","tags":null,"title":"cold sites"},{"categories":null,"contents":"The cold war is a period of time in which there is blocks of conflict. This is after WWII.\nSee also:\ncold war in vietnam ","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhcold_war/\"\u003ecold war\u003c/a\u003e is a period of time in which there is blocks of conflict. This is after \u003ca href=\"\"\u003eWWII\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSee also:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003ecold war in vietnam\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcold_war/","tags":null,"title":"cold war"},{"categories":null,"contents":"A fact sheet on the progress of the cold war in Vietnam.\nprogression of US escalation in the war, an overview Reading: encyclopedia Britannica\n1959-1960: VCs initiated a group of ambushes which the exiled government led by Ngô Đình Diệm can no longer fend off 1961: Kennedy takes office, institutes a plan to put American advisors at all levels of Vietnam leadership 1963: Buddest monks rebelled, Ngô family rule became bizarre and leveraged their Roman Catholic views to persecute Buddhists in the region 1963: Ngô Đình Diệm is assassinated after the support of the US (Kennedy) via Cable 243 seeking a regime change 1963: Kennedy assisinated 1964: Vietnam situation worsens 1965: American government fully went in, and Ky was eased out of power when Neuyen Van Thieu ad full control 1965: US fighting was effective though unpresistent; viet cong just went in after US leaves 1967: Protests in the US lead to a growing anti-war sentiment, which the VietCong picked up on 1968: the Tet Offensive, a VietCong operation, tried to pillage South Vietnam. Though it failed, strong anti-war sentiments were stirred. 1969: Anti-War protests pick up force 1970: Ohio National Guard opens fire on unarmed protesters 1973: Peace Pact Signed after the US giving up, essentially 1975: Saigon falls, US evacuates anti-war protest motivation in Vietnam Reading: Protest against the War in Vietnam\nThe first protests rose in the 1950 and picked up force by the late 1960s when LBJ decided not to seek re-election.\nForeign policy is usually hard to change, but the strength of domestic dissent in Vietnam represents an usual shift which drove foreign policy changes.\nRight-wing sentiment: seeing the war as a means of future-proofing the American government from Communistic influences. Left-wing protest More organized than the spontaneous of the right-wing protest Split between moralistic + legalistic interests vs. national interest domestic political influence of the Vietnam War Reading: The War that Killed Trust, Karl Marlantes, 2017\n\u0026ldquo;Of course presidents lie\u0026rdquo;\u0026mdash;that the Vietnam War represented the shift away from genuine truthfulness as a part of American politics Killed 58,000 service-members, and made Americans cynical and distrustful of governmental institutions Systemic Cynicism Johnson\u0026rsquo;s \u0026ldquo;credibility gap\u0026rdquo;: that the president maybe lying. Nowadays this is commonplace, but back then it was quite unusual.\nCLAIM: engendered Cynicism threatened inaction.\nRacial Integration The cold war promised higher degrees of racial integration because of collective service.\nRepeated Touring That, post-draft, the American working class became much more likely to serve \u0026ldquo;voluntarily\u0026rdquo; by being recruited. Unlike the draft, which is some ways is universal service, the volunteer system is much more reliant upon th emiddle class.\nsocial impacts of the Vietnam War Reading: The Social Impact of War, Modell and Haggerty, 1991\nWars\u0026rsquo; effects can be treated with a lens of social manifestation The Vietnam war had an impact on the last 20 years of primary war literature draft The draft is the principle mechanism by which people into the war. The system facilitating the draft in the United States, the Selective Service System, is a good case study for such a system in the Vietnam War.\nBy its design, the draft is supposed to be an equitable process (baring gender and age.) However, the Vietnam War reveals that the military services was not straightforwardly distributed: often drafting children of lower socioeconomic status.\nexperience of servicemen in Vietnam Soldiers in the Vietnam War have shown some negative psychological side effects. Solders are shown to be \u0026ldquo;working through\u0026rdquo; the ideas to process, creating a larger effects.\neffects on the economy War veterans generally had higher incomes than non-vets, mostly because they have more income per level of educational attanment.\nhistoriographical school of Vietnam War Reading: James McLeroy, Small Wars Journal, (Army Special Forces Officer in I Corps, Vietnam, in 1968)\nOrthodox treatment Vietnam War as an extension/afterthought of late-20th century cold war history\nVietnam War escalated only because of United States involvement \u0026ldquo;anti-war\u0026rdquo; is not opposition against communistic conquest but opposition against war in itself Revisionist treatment Vietnam War as a calculable implementation of escalator revolutionary strategy modeled after Mao.\nVietnam War is not an insurgency or a civil war, but instead a part of the three-step guerrilla warfare Provocation of the United States is a part of the strategy\u0026mdash;to force them to move out of Vietnam and to encourage the communist bloc to provide more support ","html":"\u003cp\u003eA fact sheet on the progress of the \u003ca href=\"/posts/kbhcold_war/\"\u003ecold war\u003c/a\u003e in \u003ca href=\"/posts/kbhvietnam/\"\u003eVietnam\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"progression-of-us-escalation-in-the-war-an-overview\"\u003eprogression of US escalation in the war, an overview\u003c/h2\u003e\n\u003cp\u003eReading: encyclopedia Britannica\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e1959-1960: VCs initiated a group of ambushes which the exiled government led by Ngô Đình Diệm can no longer fend off\u003c/li\u003e\n\u003cli\u003e1961: Kennedy takes office, institutes a plan to put American advisors at all levels of Vietnam leadership\u003c/li\u003e\n\u003cli\u003e1963: Buddest monks rebelled, Ngô family rule became bizarre and leveraged their Roman Catholic views to persecute Buddhists in the region\u003c/li\u003e\n\u003cli\u003e1963: Ngô Đình Diệm is assassinated after the support of the US (Kennedy) via Cable 243 seeking a regime change\u003c/li\u003e\n\u003cli\u003e1963: Kennedy assisinated\u003c/li\u003e\n\u003cli\u003e1964: Vietnam situation worsens\u003c/li\u003e\n\u003cli\u003e1965: American government fully went in, and Ky was eased out of power when Neuyen Van Thieu ad full control\u003c/li\u003e\n\u003cli\u003e1965: US fighting was effective though unpresistent; viet cong just went in after US leaves\u003c/li\u003e\n\u003cli\u003e1967: Protests in the US lead to a growing anti-war sentiment, which the VietCong picked up on\u003c/li\u003e\n\u003cli\u003e1968: the Tet Offensive, a VietCong operation, tried to pillage South Vietnam. Though it failed, strong anti-war sentiments were stirred.\u003c/li\u003e\n\u003cli\u003e1969: Anti-War protests pick up force\u003c/li\u003e\n\u003cli\u003e1970: Ohio National Guard opens fire on unarmed protesters\u003c/li\u003e\n\u003cli\u003e1973: Peace Pact Signed after the US giving up, essentially\u003c/li\u003e\n\u003cli\u003e1975: Saigon falls, US evacuates\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"anti-war-protest-motivation-in-vietnam\"\u003eanti-war protest motivation in Vietnam\u003c/h2\u003e\n\u003cp\u003eReading: Protest against the War in Vietnam\u003c/p\u003e\n\u003cp\u003eThe first protests rose in the 1950 and picked up force by the late 1960s when LBJ decided not to seek re-election.\u003c/p\u003e\n\u003cp\u003eForeign policy is usually hard to change, but the strength of domestic dissent in Vietnam represents an usual shift which drove foreign policy changes.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eRight-wing sentiment: seeing the war as a means of future-proofing the American government from Communistic influences.\u003c/li\u003e\n\u003cli\u003eLeft-wing protest\n\u003cul\u003e\n\u003cli\u003eMore organized than the spontaneous of the right-wing protest\u003c/li\u003e\n\u003cli\u003eSplit between moralistic + legalistic interests vs. national interest\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"domestic-political-influence-of-the-vietnam-war--kbhcold-war-in-vietnam-dot-md\"\u003edomestic political influence of the \u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eReading: The War that Killed Trust, Karl Marlantes, 2017\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Of course presidents lie\u0026rdquo;\u0026mdash;that the \u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e represented the shift away from genuine truthfulness as a part of American politics\u003c/li\u003e\n\u003cli\u003eKilled 58,000 service-members, and made Americans cynical and distrustful of governmental institutions\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"systemic-cynicism\"\u003eSystemic Cynicism\u003c/h3\u003e\n\u003cp\u003eJohnson\u0026rsquo;s \u0026ldquo;credibility gap\u0026rdquo;: that the president maybe lying. Nowadays this is commonplace, but back then it was quite unusual.\u003c/p\u003e\n\u003cp\u003eCLAIM: engendered Cynicism threatened inaction.\u003c/p\u003e\n\u003ch3 id=\"racial-integration\"\u003eRacial Integration\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhcold_war/\"\u003ecold war\u003c/a\u003e promised higher degrees of racial integration because of collective service.\u003c/p\u003e\n\u003ch3 id=\"repeated-touring\"\u003eRepeated Touring\u003c/h3\u003e\n\u003cp\u003eThat, post-draft, the American working class became much more likely to serve \u0026ldquo;voluntarily\u0026rdquo; by being recruited. Unlike the draft, which is some ways is universal service, the volunteer system is much more reliant upon th emiddle class.\u003c/p\u003e\n\u003ch2 id=\"social-impacts-of-the-vietnam-war--kbhcold-war-in-vietnam-dot-md\"\u003esocial impacts of the \u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eReading: The Social Impact of War, Modell and Haggerty, 1991\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eWars\u0026rsquo; effects can be treated with a lens of social manifestation\u003c/li\u003e\n\u003cli\u003eThe \u003ca href=\"/posts/kbhvietnam/\"\u003eVietnam\u003c/a\u003e war had an impact on the last 20 years of primary war literature\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"draft\"\u003edraft\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"#draft\"\u003edraft\u003c/a\u003e is the principle mechanism by which people into the war. The system facilitating the \u003ca href=\"#draft\"\u003edraft\u003c/a\u003e in the \u003ca href=\"\"\u003eUnited States\u003c/a\u003e, the \u003ca href=\"/posts/kbhselective_service_system/\"\u003eSelective Service System\u003c/a\u003e, is a good case study for such a system in the \u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eBy its design, the \u003ca href=\"#draft\"\u003edraft\u003c/a\u003e is supposed to be an equitable process (baring gender and age.) However, the \u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e reveals that the military services was not straightforwardly distributed: often \u003ca href=\"#draft\"\u003edrafting\u003c/a\u003e children of lower socioeconomic status.\u003c/p\u003e\n\u003ch3 id=\"experience-of-servicemen-in-vietnam\"\u003eexperience of servicemen in Vietnam\u003c/h3\u003e\n\u003cp\u003eSoldiers in the \u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e have shown some negative psychological side effects. Solders are shown to be \u0026ldquo;working through\u0026rdquo; the ideas to process, creating a larger effects.\u003c/p\u003e\n\u003ch3 id=\"effects-on-the-economy\"\u003eeffects on the economy\u003c/h3\u003e\n\u003cp\u003eWar veterans generally had higher incomes than non-vets, mostly because they have more income per level of educational attanment.\u003c/p\u003e\n\u003ch2 id=\"historiographical-school-of-vietnam-war--kbhcold-war-in-vietnam-dot-md\"\u003ehistoriographical school of \u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eReading: James McLeroy, Small Wars Journal, (Army Special Forces Officer in I Corps, Vietnam, in 1968)\u003c/p\u003e\n\u003ch3 id=\"orthodox-treatment\"\u003eOrthodox treatment\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e as an extension/afterthought of late-20th century \u003ca href=\"/posts/kbhcold_war/\"\u003ecold war\u003c/a\u003e history\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e escalated only because of \u003ca href=\"\"\u003eUnited States\u003c/a\u003e involvement\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;anti-war\u0026rdquo; is not opposition against communistic conquest but opposition against war in itself\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"revisionist-treatment\"\u003eRevisionist treatment\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e as a calculable implementation of escalator revolutionary strategy modeled after Mao.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e is not an insurgency or a civil war, but instead a part of the three-step guerrilla warfare\u003c/li\u003e\n\u003cli\u003eProvocation of the \u003ca href=\"\"\u003eUnited States\u003c/a\u003e is a part of the strategy\u0026mdash;to force them to move out of Vietnam and to encourage the communist bloc to provide more support\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcold_war_in_vietnam/","tags":null,"title":"cold war in vietnam"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcollectivist_economy/","tags":null,"title":"Collectivist Economy"},{"categories":null,"contents":"College application is the process of applying to an American college.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcollege_application/\"\u003eCollege application\u003c/a\u003e is the process of applying to an American college.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcollege_application/","tags":null,"title":"college application"},{"categories":null,"contents":"COLLEGE101 Readings These links will not be online, as they contain actual notes from my reading.\nReading Date Link W.E.B. DuBois, Of Our Spiritual Strivings \u0026lt;2023-09-24 Sun\u0026gt; Of Our Spiritual Strivings ","html":"\u003ch2 id=\"college101-readings\"\u003eCOLLEGE101 Readings\u003c/h2\u003e\n\u003cp\u003eThese links will not be online, as they contain actual notes from my reading.\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eReading\u003c/th\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eLink\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eW.E.B. DuBois, Of Our Spiritual Strivings\u003c/td\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-09-24 Sun\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhof_our_spiritual_strivings/\"\u003eOf Our Spiritual Strivings\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcollege101_index/","tags":null,"title":"COLLEGE101 Index"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcollegeboard/","tags":null,"title":"CollegeBoard"},{"categories":null,"contents":"collocation extraction is the task of extracting n-grams from text that would co-occur next to each other more often than chance.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcollocation_extractio/\"\u003ecollocation extraction\u003c/a\u003e is the task of extracting n-grams from text that would co-occur next to each other more often than chance.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcollocation_extractio/","tags":null,"title":"collocation extraction"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcolumn_space/","tags":null,"title":"column space"},{"categories":null,"contents":"A combination is a choice task which shows that order does not matter.\n\\begin{equation} \\mqty(n \\\\k) = \\frac{n!}{k!(n-k)!} = n! \\times 1 \\times \\frac{1}{k!} \\times \\frac{1}{(n-k)!} \\end{equation}\nThis could be shown as follows: we first permute the group of people \\(n\\) (\\(n!\\)); take the first \\(k\\) of them (only 1 chose); we remove the overcounted order from the \\(k\\) subset chosen (\\(\\frac{1}{k!}\\)),; we remove the overcounted order from the \\(n-k\\) subset (\\(\\frac{1}{(n-k)!}\\)).\nThere are many ways of making this happen in code:\nn_choose_k = math.factorial(n) / (math.factorial(k) * math.factorial(n-k)) n_choose_k = math.comb(n,k) n_choose_k = itertools.combinations(range(n), k) ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhcombination/\"\u003ecombination\u003c/a\u003e is a choice task which shows that order does not matter.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(n \\\\k) = \\frac{n!}{k!(n-k)!} = n! \\times 1 \\times \\frac{1}{k!} \\times \\frac{1}{(n-k)!}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis could be shown as follows: we first \u003ca href=\"/posts/kbhpermutation/\"\u003epermute\u003c/a\u003e the group of people \\(n\\) (\\(n!\\)); take the first \\(k\\) of them (only 1 chose); we remove the overcounted order from the \\(k\\) subset chosen (\\(\\frac{1}{k!}\\)),; we remove the overcounted order from the \\(n-k\\) subset (\\(\\frac{1}{(n-k)!}\\)).\u003c/p\u003e\n\u003cp\u003eThere are many ways of making this happen in code:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003en_choose_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emath\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efactorial\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emath\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efactorial\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emath\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efactorial\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003en_choose_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emath\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecomb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003en_choose_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eitertools\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecombinations\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhcombination/","tags":null,"title":"combination"},{"categories":null,"contents":" collect(): get all of your data count(): get a count of the elements in the RDD countByValue(): list the times each value appears reduce(func): the reduce part of MapReduce first(), take(n): return some number of elements top(n): return the highest n values in the list ","html":"\u003cul\u003e\n\u003cli\u003e\u003ccode\u003ecollect()\u003c/code\u003e: get all of your data\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003ecount()\u003c/code\u003e: get a count of the elements in the RDD\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003ecountByValue()\u003c/code\u003e: list the times each value appears\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003ereduce(func)\u003c/code\u003e: the \u003ca href=\"/posts/kbhreduce/\"\u003ereduce\u003c/a\u003e part of \u003ca href=\"/posts/kbhmapreduce/\"\u003eMapReduce\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003efirst(), take(n)\u003c/code\u003e: return some number of elements\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003etop(n)\u003c/code\u003e: return the highest n values in the list\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcommon_spark_actions/","tags":null,"title":"Common Spark Actions"},{"categories":null,"contents":" map(func): apply a function on all functions filter(func): filter based on function flatMap(func): flatten returned lists into one giant list union(rdd): create a union of multiple RDD0 subtract(rdd): subtract RDDs cartesian(rdd): cartesian product of rdd parallelize(list): make an RDD from list Special transformations for Pair RDDs reduceByKey(func): key things groupByKey(func): key things sortByKey(func): key things See also Database \u0026ldquo;Join\u0026rdquo;\n","html":"\u003cul\u003e\n\u003cli\u003e\u003ccode\u003emap(func)\u003c/code\u003e: apply a function on all functions\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003efilter(func)\u003c/code\u003e: filter based on function\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003eflatMap(func)\u003c/code\u003e: flatten returned lists into one giant list\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003eunion(rdd)\u003c/code\u003e: create a union of multiple RDD0\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003esubtract(rdd)\u003c/code\u003e: subtract RDDs\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003ecartesian(rdd)\u003c/code\u003e: cartesian product of rdd\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003eparallelize(list)\u003c/code\u003e: make an RDD from list\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"special-transformations-for-pair-rdd--kbhrdd-dot-md--s\"\u003eSpecial transformations for \u003ca href=\"/posts/kbhrdd/#pair-rdd\"\u003ePair RDD\u003c/a\u003es\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003ereduceByKey(func)\u003c/code\u003e: key things\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003egroupByKey(func)\u003c/code\u003e: key things\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003esortByKey(func)\u003c/code\u003e: key things\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSee also \u003ca href=\"\"\u003eDatabase \u0026ldquo;Join\u0026rdquo;\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcommon_spark_transformations/","tags":null,"title":"Common Spark Transformations"},{"categories":null,"contents":"commutativity means that the same operation can be ran in any order.\nThat is:\n\\begin{equation} ABC = ACB \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e means that the same \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003e can be ran in any order.\u003c/p\u003e\n\u003cp\u003eThat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nABC = ACB\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcommutivity/","tags":null,"title":"commutativity"},{"categories":null,"contents":" return \u0026lt; 0 if first value should come before second value return \u0026gt; 0 if first value should come AFTEr second value 0 if the first and second value are equivalent ","html":"\u003col\u003e\n\u003cli\u003ereturn \u0026lt; 0 if first value should come before second value\u003c/li\u003e\n\u003cli\u003ereturn \u0026gt; 0 if first value should come AFTEr second value\u003c/li\u003e\n\u003cli\u003e0 if the first and second value are equivalent\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcomparison_function/","tags":null,"title":"comparison function"},{"categories":null,"contents":"Recall that Euler\u0026rsquo;s Equation exists:\n\\begin{equation} f(x) = e^{i k \\omega x} = \\cos (k\\omega x) + i \\sin(k\\omega x) \\end{equation}\nand, for \\(\\omega = \\frac{2\\pi}{L}\\), this is still \\(L\\) periodic!\nNext up, we make an important note:\n\\begin{equation} e^{ik\\omega x}, e^{-i k \\omega x} \\end{equation}\nis linearly independent over \\(x\\).\ninner product over complex-valued functions recall all of the inner product properties. Now, for functions periodic over \\([0,L]\\) (recall we have double this if the function is period over \\([-L, L]\\):\n\\begin{equation} \\langle f, g \\rangle = \\frac{1}{L} \\int_{0}^{L} f(x) \\overline{g(x)} \\dd{x} \\end{equation}\nsimilar to all other inner products, \\(\\langle f,f \\rangle = 0\\) IFF \\(f = 0\\), and \\(\\langle f,g \\rangle = 0\\) implies that \\(f\\) and \\(g\\) are orthogonal.\ncomplex exponentials are orthonormal For \\(L \u0026gt; 0\\), and \\(\\omega = \\frac{2\\pi}{L}\\), consider:\n\\begin{equation} \\langle e^{ik_{1} \\omega x}, e^{ik_{2} \\omega x} \\rangle \\end{equation}\nImportantly, we have the property that:\n\\(\\langle e^{ik_{1} \\omega x}, e^{ik_{2} \\omega x} \\rangle = 0\\) if \\(k_1 \\neq k_2\\) \\(\\langle e^{ik_{1} \\omega x}, e^{ik_{2} \\omega x} \\rangle = 1\\) if \\(k_1 = 1\\) ","html":"\u003cp\u003eRecall that \u003ca href=\"/posts/kbheuler_s_equation/\"\u003eEuler\u0026rsquo;s Equation\u003c/a\u003e exists:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = e^{i k \\omega x} = \\cos (k\\omega x) + i \\sin(k\\omega x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand, for \\(\\omega = \\frac{2\\pi}{L}\\), this is \u003cstrong\u003estill\u003c/strong\u003e \\(L\\) periodic!\u003c/p\u003e\n\u003cp\u003eNext up, we make an important note:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{ik\\omega x}, e^{-i k \\omega x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e over \\(x\\).\u003c/p\u003e\n\u003ch2 id=\"inner-product--kbhinner-product-dot-md--over-complex-valued-functions\"\u003e\u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e over complex-valued functions\u003c/h2\u003e\n\u003cp\u003erecall all of the \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e properties. Now, for functions periodic over \\([0,L]\\) (recall we have double this if the function is period over \\([-L, L]\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle f, g \\rangle = \\frac{1}{L} \\int_{0}^{L} f(x) \\overline{g(x)} \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003esimilar to all other \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003es, \\(\\langle f,f \\rangle = 0\\) IFF \\(f = 0\\), and \\(\\langle f,g \\rangle = 0\\) implies that \\(f\\) and \\(g\\) are orthogonal.\u003c/p\u003e\n\u003ch2 id=\"complex-exponentials-are-orthonormal\"\u003ecomplex exponentials are orthonormal\u003c/h2\u003e\n\u003cp\u003eFor \\(L \u0026gt; 0\\), and \\(\\omega = \\frac{2\\pi}{L}\\), consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle e^{ik_{1} \\omega x}, e^{ik_{2} \\omega x} \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eImportantly, we have the property that:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\langle e^{ik_{1} \\omega x}, e^{ik_{2} \\omega x} \\rangle = 0\\) if \\(k_1 \\neq k_2\\)\u003c/li\u003e\n\u003cli\u003e\\(\\langle e^{ik_{1} \\omega x}, e^{ik_{2} \\omega x} \\rangle = 1\\) if \\(k_1 = 1\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcomplex_exponential/","tags":null,"title":"Complex Exponential"},{"categories":null,"contents":"A complex number is a type of number. They are usually written as \\(a+bi\\).\nFormally\u0026mdash;\n\\begin{equation} \\mathbb{C} = \\left\\{a+bi\\ \\middle |\\ a,b \\in \\mathbb{R} \\right\\} \\end{equation}\nThis set generates solutions to every single polynomial with unique solutions. Its plane looks like \\(\\mathbb{R}^{2}\\).\nconstituents an order pair of two elements \\((a,b)\\) where \\(a,b\\in \\mathbb{R}\\).\nproperties of complex arithmetic there are 6. For all statements below, we assume \\(\\alpha = a+bi\\) and \\(\\beta=c+di\\), \\(\\lambda = e+fi\\), where \\(a,b,c,d,e,f \\in \\mathbb{R}\\) and therefore \\(\\alpha, \\beta,\\lambda \\in \\mathbb{C}\\).\ncommutativity \\(\\alpha + \\beta = \\beta + \\alpha\\) and \\(\\alpha\\beta = \\beta\\alpha\\) for all \\(\\alpha,\\beta \\in \\mathbb{C}\\).\nProof of complex number commutativity We desire \\(\\alpha + \\beta = \\beta + \\alpha\\).\n\\begin{align} \\alpha + \\beta \u0026amp;= (a+bi)+(c+di) \\\\ \u0026amp;=(a+c)+(b+d)i \\\\ \u0026amp;=(c+a)+(d+b)i \\\\ \u0026amp;=(c+di) + (a+bi) \\\\ \u0026amp;=\\beta+\\alpha\\ \\blacksquare \\end{align}\nleveraging the commutativity inside real numbers.\nInsights: combining and splitting\nThis proof has the feature of combining, operating (commuting, here), the splitting.\nassociativity \\((\\alpha +\\beta) + \\lambda = \\alpha + (\\beta +\\lambda)\\) and \\((\\alpha\\beta) \\lambda = (\\alpha \\beta) \\lambda\\)\nProven via the same trick from last time\nidentities \\(\\lambda + 0 = \\lambda\\), \\(\\lambda 1 = \\lambda\\)\nProof of complex number additive identity We desire that \\(\\lambda + 0 = 0\\).\n\\begin{align} \\lambda + 0 \u0026amp;= (e+fi) + (0+0i) \\\\ \u0026amp;= (e+0) + (f+0)i \\\\ \u0026amp;= e+fi\\ \\blacksquare \\end{align}\nmultiplicative identity is proven in the same way\nadditive inverse \\(\\forall \\alpha \\in \\mathbb{C}, \\exists !\\ \\beta \\in \\mathbb{C}: \\alpha + \\beta = 0\\)\nProof of complex number additive inverse We desire to claim that \\(\\forall \\alpha \\in \\mathbb{C}, \\exists !\\ \\beta \\in \\mathbb{C}: \\alpha + \\beta = 0\\), specifically that there is a unique \\(\\beta\\) which is the additive inverse of every \\(\\alpha\\).\nTake a number \\(\\alpha \\in \\mathbb{C}\\). We have that \\(\\alpha\\) would then by definition be some \\((a+bi)\\) where \\(a,b \\in \\mathbb{R}\\).\nTake some \\(\\beta\\) for which \\(\\alpha + \\beta = 0\\); by definition we again have \\(\\beta\\) equals some \\((c+di)\\) where \\(c,d \\in \\mathbb{R}\\).\n\\(\\because \\alpha + \\beta =0\\), \\(\\therefore (a+bi) + (c+di) = 0\\). \\(\\therefore (a+c) + (b+d)i = 0\\) \\(\\therefore a+c = 0, b+d = 0\\) \\(\\therefore c = -a, d = -b\\) We have created a unique definition of \\(c,d\\) and therefore \\(\\beta\\) given any \\(\\alpha\\), implying both uniqueness and existence.\nInsights: construct then generalize\nIn this case, the cool insight is the construct and generalize pattern. We are taking a single case \\(\\alpha\\), manipulating it, and wrote the result we want in terms of the constituents of \\(\\alpha\\). This creates both an existence and uniqueness proof.\nmultiplicative inverse \\(\\forall \\alpha \\in \\mathbb{C}, \\alpha \\neq 0, \\exists!\\ \\beta \\in \\mathbb{C} : \\alpha\\beta =1\\)\nThis is proven exactly in the same way as before.\ndistributive property \\(\\lambda(\\alpha+\\beta) = \\lambda \\alpha + \\lambda \\beta\\ \\forall\\ \\lambda, \\alpha, \\beta \\in \\mathbb{C}\\)\nProof of complex number distributive property We desire to claim that \\(\\lambda(\\alpha+\\beta) = \\lambda \\alpha + \\lambda \\beta\\).\n\\begin{align} \\lambda(\\alpha+\\beta) \u0026amp;= (e+fi)((a+bi)+(c+di))\\\\ \u0026amp;=(e+fi)((a+c)+(b+d)i)\\\\ \u0026amp;=((ea+ec)-(fb+fd))+((eb+ed)+(fa+fc))i\\\\ \u0026amp;=ea+ec-fb-fd+(eb+ed+fa+fc)i\\\\ \u0026amp;=ea-fb+ec-fd+(eb+fa+ed+fc)i\\\\ \u0026amp;=(ea-fb)+(ec-fd)+((eb+fa)+(ed+fc))i\\\\ \u0026amp;=((ea-fb)+(eb+fa)i) + ((ec-fd)+(ed+fc)i)\\\\ \u0026amp;=(e+fi)(a+bi) + (e+fi)(c+di)\\\\ \u0026amp;=\\lambda \\alpha + \\lambda \\beta\\ \\blacksquare \\end{align}\nInsights: try to remember to go backwards\nAt some point in this proof I had to reverse complex addition then multiplication, which actually tripped me up for a bit (\u0026ldquo;how does i distribute!!!\u0026rdquo;, etc.) Turns out, there was already a definition for addition and multiplication of complex numbers so we just needed to use that.\nadditional information addition and multiplication of complex numbers \\begin{align} (a+bi) + (c+di) \u0026amp;= (a+c)+(b+d)i \\\\ (a+bi)(c+di) \u0026amp;= (ac-bd)+(ad+bc)i \\end{align}\nwhere, \\(a,b,c,d\\in\\mathbb{R}\\).\nsubtraction and division of complex numbers Let \\(\\alpha, \\beta \\in \\mathbb{C}\\), and \\(-a\\) be the additive inverse of \\(\\alpha\\) and \\(\\frac{1}{\\alpha}\\) be the multiplicative inverse of \\(\\alpha\\).\nsubtraction: \\(\\beta-\\alpha = \\beta + (-\\alpha)\\) division: \\(\\frac{\\beta}{\\alpha} = \\beta\\frac{1}{\\alpha}\\) Simple enough, subtraction and division of complex numbers is just defined by applying the inverses of a number to a different number.\ncomplex numbers form a field See properties of complex arithmetic, how we proved that it satisfies a field.\ncomplex conjugate The complex conjugate of a complex number is defined as\n\\begin{equation} \\bar{z} = \\text{Re}\\ z - (\\text{Im}\\ z)i \\end{equation}\ni.e. taking the complex part to be negative. Say, \\(z = 3+2i\\), then \\(\\bar{z}=3-2i\\).\nabsolute value (complex numbers) The absolute value (complex numbers) of a complex number is:\n\\begin{equation} |z| = \\sqrt{{(\\text{Re}\\ z)^{2} + (\\text{Im}\\ z)^{2}}} \\end{equation}\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e is a type of \u003ca href=\"/posts/kbhnumber/\"\u003enumber\u003c/a\u003e. They are usually written as \\(a+bi\\).\u003c/p\u003e\n\u003cp\u003eFormally\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{C} = \\left\\{a+bi\\ \\middle |\\ a,b \\in \\mathbb{R} \\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis set generates solutions to every single polynomial with unique solutions. Its plane looks like \\(\\mathbb{R}^{2}\\).\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003ean order pair of two elements \\((a,b)\\) where \\(a,b\\in \\mathbb{R}\\).\u003c/p\u003e\n\u003ch2 id=\"properties-of-complex-arithmetic\"\u003eproperties of complex arithmetic\u003c/h2\u003e\n\u003cp\u003ethere are 6. For all statements below, we assume \\(\\alpha = a+bi\\) and \\(\\beta=c+di\\), \\(\\lambda = e+fi\\), where \\(a,b,c,d,e,f \\in \\mathbb{R}\\) and therefore \\(\\alpha, \\beta,\\lambda \\in \\mathbb{C}\\).\u003c/p\u003e\n\u003ch3 id=\"commutativity--kbhcommutivity-dot-md\"\u003e\u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\(\\alpha + \\beta = \\beta + \\alpha\\) and \\(\\alpha\\beta = \\beta\\alpha\\) for all \\(\\alpha,\\beta \\in \\mathbb{C}\\).\u003c/p\u003e\n\u003ch4 id=\"proof-of-complex-number--kbhcomplex-number-dot-md--commutativity--kbhcommutivity-dot-md\"\u003eProof of \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eWe desire \\(\\alpha + \\beta = \\beta + \\alpha\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\alpha + \\beta \u0026amp;= (a+bi)+(c+di) \\\\\n\u0026amp;=(a+c)+(b+d)i \\\\\n\u0026amp;=(c+a)+(d+b)i \\\\\n\u0026amp;=(c+di) + (a+bi) \\\\\n\u0026amp;=\\beta+\\alpha\\ \\blacksquare\n\\end{align}\u003c/p\u003e\n\u003cp\u003eleveraging the \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e inside \u003ca href=\"/posts/kbhreal_number/\"\u003ereal number\u003c/a\u003es.\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eInsights: combining and splitting\u003c/p\u003e\n\u003cp\u003eThis proof has the feature of combining, operating (commuting, here), the splitting.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"associativity--kbhassociative-dot-md\"\u003e\u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\((\\alpha +\\beta) + \\lambda = \\alpha + (\\beta +\\lambda)\\) and \\((\\alpha\\beta) \\lambda = (\\alpha \\beta) \\lambda\\)\u003c/p\u003e\n\u003cp\u003eProven via the same trick from last time\u003c/p\u003e\n\u003ch3 id=\"identities--kbhidentity-dot-md\"\u003e\u003ca href=\"/posts/kbhidentity/\"\u003eidentities\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\(\\lambda + 0 = \\lambda\\), \\(\\lambda 1 = \\lambda\\)\u003c/p\u003e\n\u003ch4 id=\"proof-of-complex-number--kbhcomplex-number-dot-md--additive-identity--kbhadditive-identity-dot-md\"\u003eProof of \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eWe desire that \\(\\lambda + 0 = 0\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\lambda + 0 \u0026amp;= (e+fi) + (0+0i) \\\\\n\u0026amp;= (e+0) + (f+0)i \\\\\n\u0026amp;= e+fi\\ \\blacksquare\n\\end{align}\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhmultiplicative_identity/\"\u003emultiplicative identity\u003c/a\u003e is proven in the same way\u003c/p\u003e\n\u003ch3 id=\"additive-inverse--kbhinverses-dot-md\"\u003eadditive \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\(\\forall \\alpha \\in \\mathbb{C}, \\exists !\\ \\beta \\in \\mathbb{C}: \\alpha + \\beta = 0\\)\u003c/p\u003e\n\u003ch4 id=\"proof-of-complex-number--kbhcomplex-number-dot-md--additive-inverse--kbhinverses-dot-md\"\u003eProof of \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e additive \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eWe desire to claim that \\(\\forall \\alpha \\in \\mathbb{C}, \\exists !\\ \\beta \\in \\mathbb{C}: \\alpha + \\beta = 0\\), specifically that there \u003cem\u003eis\u003c/em\u003e a \u003cem\u003eunique\u003c/em\u003e \\(\\beta\\) which is the additive \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e of every \\(\\alpha\\).\u003c/p\u003e\n\u003cp\u003eTake a number \\(\\alpha \\in \\mathbb{C}\\). We have that \\(\\alpha\\) would then by definition be some \\((a+bi)\\) where \\(a,b \\in \\mathbb{R}\\).\u003c/p\u003e\n\u003cp\u003eTake some \\(\\beta\\) for which \\(\\alpha + \\beta = 0\\); by definition we again have \\(\\beta\\) equals some \\((c+di)\\) where \\(c,d \\in \\mathbb{R}\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\because \\alpha + \\beta =0\\), \\(\\therefore (a+bi) + (c+di) = 0\\).\u003c/li\u003e\n\u003cli\u003e\\(\\therefore (a+c) + (b+d)i = 0\\)\u003c/li\u003e\n\u003cli\u003e\\(\\therefore a+c = 0, b+d = 0\\)\u003c/li\u003e\n\u003cli\u003e\\(\\therefore c = -a, d = -b\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe have created a unique definition of \\(c,d\\) and therefore \\(\\beta\\) given any \\(\\alpha\\), implying both uniqueness and existence.\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eInsights: construct then generalize\u003c/p\u003e\n\u003cp\u003eIn this case, the cool insight is the construct and generalize pattern. We are taking a single case \\(\\alpha\\), manipulating it, and wrote the result we want in terms of the constituents of \\(\\alpha\\). This creates both an existence and uniqueness proof.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"multiplicative-inverse\"\u003emultiplicative inverse\u003c/h3\u003e\n\u003cp\u003e\\(\\forall \\alpha \\in \\mathbb{C}, \\alpha \\neq 0, \\exists!\\ \\beta \\in \\mathbb{C} : \\alpha\\beta =1\\)\u003c/p\u003e\n\u003cp\u003eThis is proven exactly in the same way as before.\u003c/p\u003e\n\u003ch3 id=\"distributive-property\"\u003edistributive property\u003c/h3\u003e\n\u003cp\u003e\\(\\lambda(\\alpha+\\beta) = \\lambda \\alpha + \\lambda \\beta\\ \\forall\\ \\lambda, \\alpha, \\beta \\in \\mathbb{C}\\)\u003c/p\u003e\n\u003ch4 id=\"proof-of-complex-number--kbhcomplex-number-dot-md--distributive-property\"\u003eProof of \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e distributive property\u003c/h4\u003e\n\u003cp\u003eWe desire to claim that \\(\\lambda(\\alpha+\\beta) = \\lambda \\alpha + \\lambda \\beta\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\lambda(\\alpha+\\beta) \u0026amp;= (e+fi)((a+bi)+(c+di))\\\\\n\u0026amp;=(e+fi)((a+c)+(b+d)i)\\\\\n\u0026amp;=((ea+ec)-(fb+fd))+((eb+ed)+(fa+fc))i\\\\\n\u0026amp;=ea+ec-fb-fd+(eb+ed+fa+fc)i\\\\\n\u0026amp;=ea-fb+ec-fd+(eb+fa+ed+fc)i\\\\\n\u0026amp;=(ea-fb)+(ec-fd)+((eb+fa)+(ed+fc))i\\\\\n\u0026amp;=((ea-fb)+(eb+fa)i) + ((ec-fd)+(ed+fc)i)\\\\\n\u0026amp;=(e+fi)(a+bi) + (e+fi)(c+di)\\\\\n\u0026amp;=\\lambda \\alpha + \\lambda \\beta\\ \\blacksquare\n\\end{align}\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eInsights: try to remember to go backwards\u003c/p\u003e\n\u003cp\u003eAt some point in this proof I had to reverse complex addition then multiplication, which actually tripped me up for a bit (\u0026ldquo;how does \u003ccode\u003ei\u003c/code\u003e distribute!!!\u0026rdquo;, etc.) Turns out, there was already a definition for \u003ca href=\"#addition-and-multiplication-of-complex-number--kbhcomplex-number-dot-md--s\"\u003eaddition and multiplication of complex numbers\u003c/a\u003e so we just needed to use that.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"addition-and-multiplication-of-complex-number--kbhcomplex-number-dot-md--s\"\u003eaddition and multiplication of \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003es\u003c/h3\u003e\n\u003cp\u003e\\begin{align}\n(a+bi) + (c+di) \u0026amp;= (a+c)+(b+d)i \\\\\n(a+bi)(c+di) \u0026amp;= (ac-bd)+(ad+bc)i\n\\end{align}\u003c/p\u003e\n\u003cp\u003ewhere, \\(a,b,c,d\\in\\mathbb{R}\\).\u003c/p\u003e\n\u003ch3 id=\"subtraction-and-division-of-complex-number--kbhcomplex-number-dot-md--s\"\u003esubtraction and division of \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003es\u003c/h3\u003e\n\u003cp\u003eLet \\(\\alpha, \\beta \\in \\mathbb{C}\\), and \\(-a\\) be the additive inverse of \\(\\alpha\\) and \\(\\frac{1}{\\alpha}\\) be the multiplicative inverse of \\(\\alpha\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003esubtraction\u003c/strong\u003e\u003c/strong\u003e: \\(\\beta-\\alpha = \\beta + (-\\alpha)\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003edivision\u003c/strong\u003e\u003c/strong\u003e: \\(\\frac{\\beta}{\\alpha} = \\beta\\frac{1}{\\alpha}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSimple enough, \u003ca href=\"#subtraction-and-division-of-complex-number--kbhcomplex-number-dot-md--s\"\u003esubtraction and division of complex numbers\u003c/a\u003e is just defined by applying the inverses of a number to a different number.\u003c/p\u003e\n\u003ch3 id=\"complex-number--kbhcomplex-number-dot-md--s-form-a-field--kbhfield-dot-md\"\u003e\u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003es form a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"#properties-of-complex-arithmetic\"\u003eproperties of complex arithmetic\u003c/a\u003e, how we proved that it satisfies a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"complex-conjugate\"\u003ecomplex conjugate\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"#complex-conjugate\"\u003ecomplex conjugate\u003c/a\u003e of a \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e is defined as\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bar{z} = \\text{Re}\\ z - (\\text{Im}\\ z)i\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ei.e. taking the complex part to be negative. Say, \\(z = 3+2i\\), then \\(\\bar{z}=3-2i\\).\u003c/p\u003e\n\u003ch3 id=\"absolute-value--complex-number-kbhcomplex-number-dot-md--s\"\u003eabsolute value (\u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003es)\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"#absolute-value--complex-number-kbhcomplex-number-dot-md--s\"\u003eabsolute value (complex numbers)\u003c/a\u003e of a \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|z| = \\sqrt{{(\\text{Re}\\ z)^{2} + (\\text{Im}\\ z)^{2}}}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcomplex_number/","tags":null,"title":"complex number"},{"categories":null,"contents":"\\begin{equation} \\begin{cases} x_1\u0026rsquo; = 5x_1 - 5x_2 \\\\ x_2\u0026rsquo; = 2x_1 -x_2 \\end{cases} \\end{equation}\nThis gives rise to:\n\\begin{equation} A = \\mqty(5 \u0026amp; -5 \\\\ 2 \u0026amp;-1) \\end{equation}\nSolving the characteristic polynomial gives:\n\\begin{equation} (5-\\lambda)(-1-\\lambda) + 10 = \\lambda^{2} - 4\\lambda +5 \\end{equation}\nTherefore, our solutions are imaginary!\n\\begin{equation} \\lambda_{1}, \\lambda_{2} = 2 \\pm i \\end{equation}\nAside: we only need to deal with one\nNotably, anything that satisfies the original polynomial, its conjugates also satisfies:\n\\begin{equation} \\bar{\\lambda^{2}-4\\lambda +5} = 0= {\\bar{\\lambda}}^{2} - 4\\bar{\\lambda} + 5 \\end{equation}\nFurther, for some:\n\\begin{equation} Av = \\lambda v \\end{equation}\nwe have:\n\\begin{equation} A \\bar{v} = \\lambda \\bar{v} \\end{equation}\nmeaning if we just figured the eigenvector of one of the lambdas we are good\nNow, let us consider the case before with \\(\\lambda = 2 +i\\). We therefore have:\n\\begin{equation} \\mqty(3-i \u0026amp; -5 \\\\ 2 \u0026amp; -3-i) \\mqty(a \\\\ b) = \\mqty(0 \\\\ 0) \\end{equation}\nThis gives one particular null space, such as:\n\\begin{equation} v = \\mqty(5 \\\\ 3-i) \\end{equation}\nThis gives rise to:\n\\begin{equation} u\u0026rsquo; = (2+i)u \\end{equation}\nwhich means:\n\\begin{equation} u(t) = ce^{(2+i)t} \\end{equation}\nfinally, resulting in:\n\\begin{equation} x(t) = ce^{(2+i)t} \\mqty(5 \\\\ 3-i) \\end{equation}\nwhich is a particular solution. Now, the general solution would tack on a complex conjugate, which doesn\u0026rsquo;t actually add any new information.\nInstead, we can actually use Euler to break this into two, independent, and equally valid solutions:\n\\begin{equation} x(t) = e^{2t} \\qty(\\cos t + i \\sin t) \\qty( \\mqty(5 \\\\3) - i \\mqty(0 \\\\ 1)) \\end{equation}\nfinally, we obtain:\n\\begin{equation} x(t) = e^{2t} \\qty( \\cos t \\mqty(5 \\\\ 1) + \\sin t \\mqty(0 \\\\1)) + i e^{2t} \\qty( \\sin t \\mqty(5 \\\\ 1) - \\cos t \\mqty(0 \\\\1)) \\end{equation}\neach of which individual is a solution:\n\\begin{equation} x_1(t) =e^{2t} \\qty( \\cos t \\mqty(5 \\\\ 1) + \\sin t \\mqty(0 \\\\1)) \\end{equation}\nand:\n\\begin{equation} x_2(t) = e^{2t} \\qty( \\sin t \\mqty(5 \\\\ 1) - \\cos t \\mqty(0 \\\\1)) \\end{equation}\n","html":"\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx_1\u0026rsquo; = 5x_1 - 5x_2 \\\\\nx_2\u0026rsquo; = 2x_1 -x_2\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis gives rise to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA = \\mqty(5 \u0026amp; -5 \\\\ 2 \u0026amp;-1)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSolving the \u003ca href=\"/posts/kbhcharacteristic_polynomial/\"\u003echaracteristic polynomial\u003c/a\u003e gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(5-\\lambda)(-1-\\lambda) + 10 = \\lambda^{2} - 4\\lambda +5\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, our solutions are \u003cstrong\u003eimaginary!\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda_{1}, \\lambda_{2} = 2 \\pm i\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside: we only need to deal with one\u003c/p\u003e\n\u003cp\u003eNotably, anything that satisfies the original polynomial, its conjugates also satisfies:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bar{\\lambda^{2}-4\\lambda +5} = 0= {\\bar{\\lambda}}^{2} - 4\\bar{\\lambda} + 5\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFurther, for some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nAv = \\lambda v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA \\bar{v} = \\lambda \\bar{v}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning if we just figured the eigenvector of one of the lambdas we are good\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eNow, let us consider the case before with \\(\\lambda = 2 +i\\). We therefore have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(3-i \u0026amp; -5 \\\\ 2 \u0026amp; -3-i) \\mqty(a \\\\ b) = \\mqty(0 \\\\ 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis gives one particular null space, such as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = \\mqty(5 \\\\ 3-i)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis gives rise to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu\u0026rsquo; = (2+i)u\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich means:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(t) = ce^{(2+i)t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efinally, resulting in:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = ce^{(2+i)t} \\mqty(5 \\\\ 3-i)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is a particular solution. Now, the general solution would tack on a complex conjugate, which doesn\u0026rsquo;t actually add any new information.\u003c/p\u003e\n\u003cp\u003eInstead, we can actually use Euler to break this into two, independent, and equally valid solutions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = e^{2t} \\qty(\\cos t + i \\sin t) \\qty( \\mqty(5 \\\\3) - i \\mqty(0 \\\\ 1))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efinally, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = e^{2t} \\qty( \\cos t \\mqty(5 \\\\ 1) + \\sin t \\mqty(0 \\\\1)) + i e^{2t} \\qty( \\sin t \\mqty(5 \\\\ 1) - \\cos t \\mqty(0 \\\\1))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eeach of which individual is a solution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_1(t) =e^{2t} \\qty( \\cos t \\mqty(5 \\\\ 1) + \\sin t \\mqty(0 \\\\1))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_2(t) = e^{2t} \\qty( \\sin t \\mqty(5 \\\\ 1) - \\cos t \\mqty(0 \\\\1))\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhunderdetermined_ode_system/","tags":null,"title":"Complex ODE System"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcomplex_system/","tags":null,"title":"Complex System"},{"categories":null,"contents":"complexity theory is a theory in algorithms to analyze time classes.\nWe know that \\(O(n\\ log\\ n)\\) is between \\(O(n)\\) and \\(O(n^2)\\) \u0026mdash; so we can roughly call it \u0026ldquo;polynomial time.\u0026rdquo;\nSince the optimal comparison cannot be faster than polynomial time, we say that comparison-based sorting is a polynomial-time algorithm.\nFrom this information, we can come up with two main time classes: \\(P\\) for solutions with known polynomial time, \\(NP\\) for non-deterministic polynomial time.\nThink of it as \\(P\\) is solvable with polynomial time and \\(NP\\) is verifiable with polynomial time.\nThe cool thing about \\(NP\\) problems is that solving a subset of them (\u0026quot;\\(NP\\) hard\u0026quot; problems) solves all \\(NP\\) problems.\nreduction (algorithms) reduction is how you can use \\(NP-hard\\) problems to solve all \\(NP\\) problems in complexity theory.\nSay, multiplication:\nsay you have a basic algorithm to add we can perform multiplication by asking our black box addition algorithm to add \\(n\\) times in complexity theory terms, this means addition is \u0026ldquo;at least as hard\u0026rdquo; as multiplication. Because, if we can solve any addition problem, we can solve any multiplication problem. \u0026ldquo;Given this, do that.\u0026rdquo;\nproblem classes (see above)\n\u0026ldquo;Polynomial time\u0026rdquo; \\(P\\) \u0026mdash; problems solvable with polynomial time \u0026ldquo;Non-deterministic polynomial time\u0026rdquo; \\(NP\\) \u0026mdash; problem verifiable with polynomial time \u0026ldquo;Exponential time\u0026rdquo; \\(EXPTIME\\) \u0026mdash; problems that can only be solved in exponential time \u0026ldquo;2 Exponential time\u0026rdquo; \\(2EXPTIME\\) \u0026mdash; class of problems that takes \\(2^{2^n}\\) time to solve Space complexity works in a similar way.\n\\(P\\) and \\(NP\\) are deterministic and non-deterministic in context to a Turing machine.\nFundamentally, \\(P\\) and \\(NP\\) only apply to decision problems\u0026mdash;given a problem, output \u0026ldquo;yes\u0026rdquo; or \u0026ldquo;no.\u0026rdquo; However, this definition can be stretched: sorting is a decision problem, because it can be stated as \u0026ldquo;given an unsorted array, can you verify whether or not an array is sorted\u0026rdquo;\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcomplexity_theory/\"\u003ecomplexity theory\u003c/a\u003e is a theory in algorithms to analyze time classes.\u003c/p\u003e\n\u003cp\u003eWe know that \\(O(n\\ log\\ n)\\) is between \\(O(n)\\) and \\(O(n^2)\\) \u0026mdash; so we can roughly call it \u0026ldquo;polynomial time.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eSince the optimal comparison cannot be faster than polynomial time, we say that comparison-based sorting is a \u003cem\u003epolynomial-time\u003c/em\u003e algorithm.\u003c/p\u003e\n\u003cp\u003eFrom this information, we can come up with two main time classes: \\(P\\) for solutions with known polynomial time, \\(NP\\) for non-deterministic polynomial time.\u003c/p\u003e\n\u003cp\u003eThink of it as \\(P\\) is solvable with polynomial time and \\(NP\\) is verifiable with polynomial time.\u003c/p\u003e\n\u003cp\u003eThe cool thing about \\(NP\\) problems is that solving a subset of them (\u0026quot;\\(NP\\) hard\u0026quot; problems) solves all \\(NP\\) problems.\u003c/p\u003e\n\u003ch2 id=\"reduction--algorithms\"\u003ereduction (algorithms)\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#reduction--algorithms\"\u003ereduction\u003c/a\u003e is how you can use \\(NP-hard\\) problems to solve all \\(NP\\) problems in \u003ca href=\"/posts/kbhcomplexity_theory/\"\u003ecomplexity theory\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSay, multiplication:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003esay you have a basic algorithm to add\u003c/li\u003e\n\u003cli\u003ewe can perform multiplication by asking our black box addition algorithm to add \\(n\\) times\u003c/li\u003e\n\u003cli\u003ein \u003ca href=\"/posts/kbhcomplexity_theory/\"\u003ecomplexity theory\u003c/a\u003e terms, this means addition is \u0026ldquo;at least as hard\u0026rdquo; as multiplication. Because, if we can solve any addition problem, we can solve any multiplication problem.\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;Given this, do that.\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"problem-classes\"\u003eproblem classes\u003c/h2\u003e\n\u003cp\u003e(see above)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Polynomial time\u0026rdquo; \\(P\\) \u0026mdash; problems solvable with polynomial time\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Non-deterministic polynomial time\u0026rdquo; \\(NP\\) \u0026mdash; problem verifiable with polynomial time\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Exponential time\u0026rdquo; \\(EXPTIME\\) \u0026mdash; problems that can only be solved in exponential time\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;2 Exponential time\u0026rdquo; \\(2EXPTIME\\) \u0026mdash; class of problems that takes \\(2^{2^n}\\) time to solve\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSpace complexity works in a similar way.\u003c/p\u003e\n\u003cp\u003e\\(P\\) and \\(NP\\) are deterministic and non-deterministic \u003cem\u003ein context\u003c/em\u003e to a Turing machine.\u003c/p\u003e\n\u003cp\u003eFundamentally, \\(P\\) and \\(NP\\) only apply to \u003cem\u003edecision problems\u003c/em\u003e\u0026mdash;given a problem, output \u0026ldquo;yes\u0026rdquo; or \u0026ldquo;no.\u0026rdquo; However, this definition can be stretched: sorting is a decision problem, because it can be stated as \u0026ldquo;given an unsorted array, can you verify whether or not an array is sorted\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcomplexity_theory/","tags":null,"title":"complexity theory"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcomposite_system/","tags":null,"title":"composite system"},{"categories":null,"contents":"compositional scene representation is the process of trying to represent a certain visual signal into its constituent parts.\nAim: unsupervised segmentation + representation\nthe model finds the most intuitive representations of the scene train segmentation and representation together Autoencoding segmentation! Segment =\u0026gt; Represent =\u0026gt; Resegment =\u0026gt; etc.\nGaussian Mixture Model???? over pixels: regularizes by taking KL Divergence between latent and predicted output, to force them to be similar.\nLoss: error in RECONSTRUCTION and KL-Divergence of latent space\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhscene_representation/\"\u003ecompositional scene representation\u003c/a\u003e is the process of trying to represent a certain visual signal into its constituent parts.\u003c/p\u003e\n\u003cp\u003eAim: unsupervised segmentation + representation\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ethe model finds the most intuitive representations of the scene\u003c/li\u003e\n\u003cli\u003etrain segmentation and representation together\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAutoencoding segmentation! Segment =\u0026gt; Represent =\u0026gt; Resegment =\u0026gt; etc.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhgaussian_mixture_model/\"\u003eGaussian Mixture Model\u003c/a\u003e???? over pixels: regularizes by taking \u003ca href=\"/posts/kbhkl_divergence/\"\u003eKL Divergence\u003c/a\u003e between latent and predicted output, to force them to be similar.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eLoss: error in RECONSTRUCTION and KL-Divergence of latent space\u003c/strong\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhscene_representation/","tags":null,"title":"compositional scene representation"},{"categories":null,"contents":"Computational Biology is a the study of biology using computation.\nRather that starting from the properties, start with the end states or what properties it has; instead, we define the initial values based on the edges.\nConstructor theory: https://en.wikipedia.org/wiki/Constructor_theory?\nthe relationship between temperature and occurance of a uniform gas is actually right-skewed; the mean temperature in a uniform closed system will be higher than the median temperature molecules are not static: at best, molecules are static when frozen in place; yet, generally it is not in their nature to stay solidly in place; they just shuffle around but maintain the molecular sturucture If the energy level is higher, it will ignore various troughs\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcomputational_biology_index/\"\u003eComputational Biology\u003c/a\u003e is a the study of biology using computation.\u003c/p\u003e\n\u003cp\u003eRather that starting from the properties, start with the end states or what properties it has; instead, we define the initial values based on the edges.\u003c/p\u003e\n\u003cp\u003eConstructor theory: \u003ca href=\"https://en.wikipedia.org/wiki/Constructor_theory\"\u003ehttps://en.wikipedia.org/wiki/Constructor_theory\u003c/a\u003e?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ethe relationship between temperature and occurance of a uniform gas is actually right-skewed; the mean temperature in a uniform closed system will be higher than the median temperature\u003c/li\u003e\n\u003cli\u003emolecules are not static: at best, molecules are static when frozen in place; yet, generally it is not in their nature to stay solidly in place; they just shuffle around but maintain the molecular sturucture\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf the energy level is higher, it will ignore various troughs\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcomputational_biology_index/","tags":null,"title":"Computational Biology Index"},{"categories":null,"contents":"bit A computer is built out of binary gates:\nSo, having voltage into \\(B\\) allows current to pass through between \\(S\\) and \\(D\\), it could be on/off.\nbyte Accumulation of \\(8\\) bits\nComputer memory is a large array of bytes. It is only BYTE ADDRESSABLE: you can\u0026rsquo;t address a bit in isolation.\nbases Generate, each base uses digits \\(0\\) to \\(base-1\\).\nWe prefix 0x to represent hexadecimal, and 0b to represent binary.\nbase 10 base 10 uses digits \\(0-9\\). Each place value represents to a certain power of \\(10\\): \\(10^{n}\\) at each place value \\(n\\), starting at \\(0\\).\nbase 2 base 2 uses digits \\(0-1\\). Each place value represents a certain power of \\(2\\): \\(2^{n}\\) at each place \\(n\\), starting at \\(0\\).\nThe leftmost (largest value) is considered most-significant bit, right most is the least-significant bit.\nconversion from base 10 to base 2 \u0026ldquo;What is \\(6\\) is base 2?\u0026rdquo;\nWhat\u0026rsquo;s the largest power of \\(2 \\leq 6\\)? Well, we have \\(2^{2}\\). Therefore, the first place value is \\(2\\), which is the third digit. now, we subtract the remainder, we now have \\(6-4=2\\) , which is \\(2^{1}\\) min and max of binary The maximum value could be one minus the extra place value. For instance, if you have \\(8\\) digits (i.e. 7 place values), you would only be able to represent:\n\\begin{equation} 2^{8}-1 = 255 \\end{equation}\nmultiplying and dividing by base It works in the way you expect.\nbase 16 We can use base 16 essentially to divide base 2 numbers into groups of \\(4\\).\nEach quartet of bits can be converted separately\n\u0026ldquo;Which bit is missing\u0026rdquo; The way you can do conversion in your head more simply is to stare at a binary number in groups of \\(4\\), and see which missing bytes are there and subtract that much.\nnumerical representations unsigned integers Positive numbers and 0. A number is either \\(0\\) or some positive integer.\nThe range of is \\(2^{w}-1\\) where \\(w\\) is the number of bits, because we are cramming the entire number from \\(0\\) to \\(2^{w}-1\\).\nsigned integers Negative, positive, and \\(0\\).\na bad system The fact that \\(0\\) is signed is quite bad. And like adding negative numbers to positive number is very hard because you need another processor to figure out what the sign is.\ntwo\u0026rsquo;s complement See two\u0026rsquo;s complement.\nsizes of stuff (bytes)\nint: 4 float: 4 double: 8 char: 1 pointer: 8 (for 64 bit systems) short: 2 long: 8 overflow If you exceed the maximum value of bit representation, it rolls over to becoming negative. If you subtract one, you have to borrow from an imaginary\n","html":"\u003ch2 id=\"bit\"\u003ebit\u003c/h2\u003e\n\u003cp\u003eA computer is built out of binary gates:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-02_10-36-03_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSo, having voltage into \\(B\\) allows current to pass through between \\(S\\) and \\(D\\), it could be on/off.\u003c/p\u003e\n\u003ch2 id=\"byte\"\u003ebyte\u003c/h2\u003e\n\u003cp\u003eAccumulation of \\(8\\) \u003ca href=\"#bit\"\u003ebit\u003c/a\u003es\u003c/p\u003e\n\u003cp\u003eComputer memory is a large array of \u003ca href=\"#byte\"\u003ebyte\u003c/a\u003es. It is only \u003cstrong\u003eBYTE ADDRESSABLE\u003c/strong\u003e: you can\u0026rsquo;t address a bit in isolation.\u003c/p\u003e\n\u003ch2 id=\"bases\"\u003ebases\u003c/h2\u003e\n\u003cp\u003eGenerate, each base uses digits \\(0\\) to \\(base-1\\).\u003c/p\u003e\n\u003cp\u003eWe prefix \u003ccode\u003e0x\u003c/code\u003e to represent hexadecimal, and \u003ccode\u003e0b\u003c/code\u003e to represent binary.\u003c/p\u003e\n\u003ch3 id=\"base-10\"\u003ebase 10\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#base-10\"\u003ebase 10\u003c/a\u003e uses digits \\(0-9\\). Each place value represents to a certain power of \\(10\\): \\(10^{n}\\) at each place value \\(n\\), starting at \\(0\\).\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-02_10-40-16_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"base-2\"\u003ebase 2\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#base-2\"\u003ebase 2\u003c/a\u003e uses digits \\(0-1\\). Each place value represents a certain power of \\(2\\): \\(2^{n}\\) at each place \\(n\\), starting at \\(0\\).\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-02_10-41-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe leftmost (largest value) is considered most-significant bit, right most is the least-significant bit.\u003c/p\u003e\n\u003ch4 id=\"conversion-from-base-10-to-base-2\"\u003econversion from base 10 to base 2\u003c/h4\u003e\n\u003cp\u003e\u0026ldquo;What is \\(6\\) is base 2?\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eWhat\u0026rsquo;s the largest power of \\(2 \\leq 6\\)? Well, we have \\(2^{2}\\). Therefore, the first place value is \\(2\\), which is the third digit.\u003c/li\u003e\n\u003cli\u003enow, we subtract the remainder, we now have \\(6-4=2\\) , which is \\(2^{1}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"min-and-max-of-binary\"\u003emin and max of binary\u003c/h4\u003e\n\u003cp\u003eThe maximum value could be one minus the extra place value. For instance, if you have \\(8\\) digits (i.e. 7 place values), you would only be able to represent:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n2^{8}-1 = 255\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"multiplying-and-dividing-by-base\"\u003emultiplying and dividing by base\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-02_10-48-50_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-02_10-49-33_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eIt works in the way you expect.\u003c/p\u003e\n\u003ch3 id=\"base-16\"\u003ebase 16\u003c/h3\u003e\n\u003cp\u003eWe can use \u003ca href=\"#base-16\"\u003ebase 16\u003c/a\u003e essentially to divide \u003ca href=\"#base-2\"\u003ebase 2\u003c/a\u003e numbers into groups of \\(4\\).\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-02_10-54-50_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eEach quartet of bits can be converted separately\u003c/p\u003e\n\u003ch3 id=\"which-bit-is-missing\"\u003e\u0026ldquo;Which bit is missing\u0026rdquo;\u003c/h3\u003e\n\u003cp\u003eThe way you can do conversion in your head more simply is to stare at a binary number in groups of \\(4\\), and see which missing bytes are there and subtract that much.\u003c/p\u003e\n\u003ch2 id=\"numerical-representations\"\u003enumerical representations\u003c/h2\u003e\n\u003ch3 id=\"unsigned-integers\"\u003eunsigned integers\u003c/h3\u003e\n\u003cp\u003ePositive numbers and 0. A number is either \\(0\\) or some positive integer.\u003c/p\u003e\n\u003cp\u003eThe range of is \\(2^{w}-1\\) where \\(w\\) is the number of bits, because we are cramming the entire number from \\(0\\) to \\(2^{w}-1\\).\u003c/p\u003e\n\u003ch3 id=\"signed-integers\"\u003esigned integers\u003c/h3\u003e\n\u003cp\u003eNegative, positive, and \\(0\\).\u003c/p\u003e\n\u003ch4 id=\"a-bad-system\"\u003ea bad system\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-02_11-13-22_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe fact that \\(0\\) is signed is quite bad. And like adding negative numbers to positive number is very hard because you need another processor to figure out what the sign is.\u003c/p\u003e\n\u003ch4 id=\"two-s-complement\"\u003etwo\u0026rsquo;s complement\u003c/h4\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhtwo_s_complement/\"\u003etwo\u0026rsquo;s complement.\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"sizes-of-stuff\"\u003esizes of stuff\u003c/h3\u003e\n\u003cp\u003e(\u003ca href=\"#byte\"\u003ebyte\u003c/a\u003es)\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-03_16-49-34_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eint: 4\u003c/li\u003e\n\u003cli\u003efloat: 4\u003c/li\u003e\n\u003cli\u003edouble: 8\u003c/li\u003e\n\u003cli\u003echar: 1\u003c/li\u003e\n\u003cli\u003epointer: 8 (for 64 bit systems)\u003c/li\u003e\n\u003cli\u003eshort: 2\u003c/li\u003e\n\u003cli\u003elong: 8\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"overflow\"\u003eoverflow\u003c/h2\u003e\n\u003cp\u003eIf you exceed the maximum value of bit representation, it rolls over to becoming negative. If you subtract one, you have to borrow from an imaginary\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhbinary_number_system/","tags":null,"title":"computer number system"},{"categories":null,"contents":"Notes on CS 107, C, MIPS, and computational systems.\nLectures SU-CS107 SEP272023 SU-CS107 SEP292023 SU-CS107 OCT022023 SU-CS107 OCT032023 SU-CS107 OCT042023 SU-CS107 OCT062023 SU-CS107 OCT092023 SU-CS107 OCT112023 SU-CS107 OCT132023 SU-CS107 OCT182023 SU-CS107 OCT202023 SU-CS107 OCT232023 SU-CS107 OCT252023 SU-CS107 OCT272023 SU-CS107 NOV102023 SU-CS107 NOV132023 SU-CS107 DEC012023 Worksheets SU-CS107 Midterm Sheet ","html":"\u003cp\u003eNotes on CS 107, C, MIPS, and computational systems.\u003c/p\u003e\n\u003ch2 id=\"lectures\"\u003eLectures\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_sep272023/\"\u003eSU-CS107 SEP272023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_sep292023/\"\u003eSU-CS107 SEP292023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct022023/\"\u003eSU-CS107 OCT022023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct032023/\"\u003eSU-CS107 OCT032023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct042023/\"\u003eSU-CS107 OCT042023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct062023/\"\u003eSU-CS107 OCT062023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct092023/\"\u003eSU-CS107 OCT092023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct112023/\"\u003eSU-CS107 OCT112023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct132023/\"\u003eSU-CS107 OCT132023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct182023/\"\u003eSU-CS107 OCT182023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct2023/\"\u003eSU-CS107 OCT202023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct232023/\"\u003eSU-CS107 OCT232023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct252023/\"\u003eSU-CS107 OCT252023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_oct272023/\"\u003eSU-CS107 OCT272023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_nov102023/\"\u003eSU-CS107 NOV102023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_nov132023/\"\u003eSU-CS107 NOV132023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_dec012023/\"\u003eSU-CS107 DEC012023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"worksheets\"\u003eWorksheets\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs107_midterm_sheet/\"\u003eSU-CS107 Midterm Sheet\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcomputer_systems_index/","tags":["index"],"title":"Computer Systems Index"},{"categories":null,"contents":"conceptual grammar is the proposed universal grammar which connects semantic primes. In theory, this grammar is universal across languages.\nThere are three main categories of conceptual grammars:\nCombinatorics (connecting one idea to another) Account of valancies? #what Propositional complementation (location \u0026ldquo;something that happen in this place\u0026rdquo; ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhconceptual_grammar/\"\u003econceptual grammar\u003c/a\u003e is the proposed universal grammar which connects \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es. In theory, this grammar is universal across languages.\u003c/p\u003e\n\u003cp\u003eThere are three main categories of \u003ca href=\"/posts/kbhconceptual_grammar/\"\u003econceptual grammar\u003c/a\u003es:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eCombinatorics (connecting one idea to another)\u003c/li\u003e\n\u003cli\u003eAccount of valancies? #what\u003c/li\u003e\n\u003cli\u003ePropositional complementation (location \u0026ldquo;something that happen in this place\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhconceptual_grammar/","tags":null,"title":"conceptual grammar"},{"categories":null,"contents":"Current automated lexicography (term definition) techniques cannot include contextual or new term information as a part of its synthesis. We propose a novel data harvesting scheme leveraging lead paragraphs in Wikipedia to train automated context-aware lexicographical models. Furthermore, we present ConDef, a fine-tuned BART trained on the harvested data that defines vocabulary terms from a short context. ConDef is determined to be highly accurate in context-dependent lexicography as validated on ROUGE-1 and ROUGE-L measures in an 1000-item withheld test set, achieving scores of 46.40% and 43.26% respectively. Furthermore, we demonstrate that ConDef\u0026rsquo;s synthesis serve as good proxies for term definitions by achieving ROUGE-1 measure of 27.79% directly against gold-standard WordNet definitions.Accepted to the 2022 SAI Computing Conference, to be published on Springer Nature\u0026rsquo;s Lecture Notes on Networks and Systems Current automated lexicography (term definition) techniques cannot include contextual or new term information as a part of its synthesis. We propose a novel data harvesting scheme leveraging lead paragraphs in Wikipedia to train automated context-aware lexicographical models. Furthermore, we present ConDef, a fine-tuned BART trained on the harvested data that defines vocabulary terms from a short context. ConDef is determined to be highly accurate in context-dependent lexicography as validated on ROUGE-1 and ROUGE-L measures in an 1000-item withheld test set, achieving scores of 46.40% and 43.26% respectively. Furthermore, we demonstrate that ConDef\u0026rsquo;s synthesis serve as good proxies for term definitions by achieving ROUGE-1 measure of 27.79% directly against gold-standard WordNet definitions.\n","html":"\u003cp\u003eCurrent automated lexicography (term definition) techniques cannot include contextual or new term information as a part of its synthesis. We propose a novel data harvesting scheme leveraging lead paragraphs in Wikipedia to train automated context-aware lexicographical models. Furthermore, we present ConDef, a fine-tuned BART trained on the harvested data that defines vocabulary terms from a short context. ConDef is determined to be highly accurate in context-dependent lexicography as validated on ROUGE-1 and ROUGE-L measures in an 1000-item withheld test set, achieving scores of 46.40% and 43.26% respectively. Furthermore, we demonstrate that ConDef\u0026rsquo;s synthesis serve as good proxies for term definitions by achieving ROUGE-1 measure of 27.79% directly against gold-standard WordNet definitions.Accepted to the 2022 SAI Computing Conference, to be published on Springer Nature\u0026rsquo;s Lecture Notes on Networks and Systems Current automated lexicography (term definition) techniques cannot include contextual or new term information as a part of its synthesis. We propose a novel data harvesting scheme leveraging lead paragraphs in Wikipedia to train automated context-aware lexicographical models. Furthermore, we present ConDef, a fine-tuned BART trained on the harvested data that defines vocabulary terms from a short context. ConDef is determined to be highly accurate in context-dependent lexicography as validated on ROUGE-1 and ROUGE-L measures in an 1000-item withheld test set, achieving scores of 46.40% and 43.26% respectively. Furthermore, we demonstrate that ConDef\u0026rsquo;s synthesis serve as good proxies for term definitions by achieving ROUGE-1 measure of 27.79% directly against gold-standard WordNet definitions.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcondef_abstract/","tags":null,"title":"ConDef Abstract"},{"categories":null,"contents":"Say you have one continuous variable \\(X\\), and one discrete variable \\(Y\\), and you desire to express the probability of \\(X\\) conditioned upon \\(Y\\) using a gaussian model:\n\\begin{equation} p(x|y) = \\begin{cases} \\mathcal{N}(x \\mid \\mu_{1}, \\sigma_{1}^{2}), y^{1} \\\\ \\dots \\\\ \\mathcal{N}(x \\mid \\mu_{1}, \\sigma_{1}^{2}), y^{n} \\\\ \\end{cases} \\end{equation}\n","html":"\u003cp\u003eSay you have one continuous variable \\(X\\), and one discrete variable \\(Y\\), and you desire to express the probability of \\(X\\) conditioned upon \\(Y\\) using a \u003ca href=\"/posts/kbhprobability_distributions/#gaussian-distribution\"\u003egaussian model\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(x|y) = \\begin{cases}\n\\mathcal{N}(x \\mid \\mu_{1}, \\sigma_{1}^{2}), y^{1} \\\\\n\\dots \\\\\n\\mathcal{N}(x \\mid \\mu_{1}, \\sigma_{1}^{2}), y^{n} \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhconditional_gaussian_models/","tags":null,"title":"conditional Gaussian model"},{"categories":null,"contents":"conditional plan is a POMDP representation technique. We can represent a conditional plan as a tree.\ntoy problem crying baby POMDP problem:\nactions: feed, ignore reward: if hungry, negative reward state: two states: is the baby hungry or not observation: noisy crying (she maybe crying because she\u0026rsquo;s genuinely hungry or crying just for kicks) formulate a conditional plan we can create a conditional plan by generating a exponential tree based on the observations. This is a policy which tells you what you should do given the sequence of observations you get, with no knowledge of the underlying state.\nWe call this plan \\(\\pi\\) (shock suprise). We define two notations:\n\\(\\pi()\\): the ACTION at the head of this tree (in this case, \u0026ldquo;ignore\u0026rdquo;) \\(\\pi(o)\\): the SUBTREE which is one-level below the first action. For instance, for both observations of the tree above, \\(\\pi(o)()\\) is ignore for both \\(o\\). conditional plan evaluation Assume we have a starting at some given true state \\(s\\). We can evaluate a conditional plan at that state by formulating:\n\\begin{equation} U^{\\pi} (s) = R(s, \\pi()) + \\gamma \\qty[\\sum_{s\u0026rsquo;} T(s\u0026rsquo;|s, \\pi()) \\sum_{o} O(o|\\pi(), s\u0026rsquo;) U^{\\pi(o)}(s\u0026rsquo;)] \\end{equation}\nwhere, \\(\\pi()\\) is the action at the root node of the tree; and \\(\\pi(o)\\) is the subtree for subplan at observation \\(o\\); essentially, at each point where we evaluate \\(U\\), we move the root node forward and recalculate. If we run out of depth, the utility is \\(0\\) and hence the whole right term is \\(0\\).\nOf course this assumes we know what our initial state is. Which is lame. So now:\n\\begin{equation} U^{\\pi}(b) = \\sum_{s}^{} b(s) U^{\\pi}(s) \\end{equation}\nwhich will give us the utility of our policy given a belief about wher ewe are.\nso literally take our belief about the probability of us being in each initial state and calculate it for each of our initial states.\noptimal value function for POMDP \\begin{equation} U^{*}(b) = \\max_{\\pi} U^{\\pi}(b) \\end{equation}\nOf course, trying to actually do this is impossible because you have to iterate over all possible policies and then calculate every utility from them.\nThis is practically untenable, because the space of \\(\\pi\\) is wayyy too big. Hence, we turn to alpha vectors.\nSee also optimal value function for POMDP with alpha vector\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e is a \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e representation technique. We can represent a \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e as a tree.\u003c/p\u003e\n\u003ch2 id=\"toy-problem\"\u003etoy problem\u003c/h2\u003e\n\u003cp\u003ecrying baby \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e problem:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eactions\u003c/strong\u003e: feed, ignore\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ereward\u003c/strong\u003e: if hungry, negative reward\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003estate\u003c/strong\u003e: two states: is the baby hungry or not\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eobservation\u003c/strong\u003e: noisy crying (she maybe crying because she\u0026rsquo;s genuinely hungry or crying just for kicks)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"formulate-a-conditional-plan--kbhconditional-plan-dot-md\"\u003eformulate a \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003ewe can create a \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e by generating a exponential tree based on the \u003cstrong\u003eobservations\u003c/strong\u003e. This is a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e which tells you what you should do given the sequence of observations you get, with no knowledge of the underlying state.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-14_10-04-21_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWe call this plan \\(\\pi\\) (shock suprise). We define two notations:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\pi()\\): the \u003cstrong\u003e\u003cstrong\u003eACTION\u003c/strong\u003e\u003c/strong\u003e at the head of this tree (in this case, \u0026ldquo;ignore\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003e\\(\\pi(o)\\): the \u003cstrong\u003e\u003cstrong\u003eSUBTREE\u003c/strong\u003e\u003c/strong\u003e which is one-level below the first action. For instance, for both observations of the tree above, \\(\\pi(o)()\\) is ignore for both \\(o\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"conditional-plan--kbhconditional-plan-dot-md--evaluation\"\u003e\u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e evaluation\u003c/h2\u003e\n\u003cp\u003eAssume we have a starting at some given true state \\(s\\). We can evaluate a \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e at that state by formulating:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\pi} (s) = R(s, \\pi()) + \\gamma \\qty[\\sum_{s\u0026rsquo;} T(s\u0026rsquo;|s, \\pi()) \\sum_{o} O(o|\\pi(), s\u0026rsquo;) U^{\\pi(o)}(s\u0026rsquo;)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\pi()\\) is the action at the root node of the tree; and \\(\\pi(o)\\) is the subtree for subplan at observation \\(o\\); essentially, at each point where we evaluate \\(U\\), we move the root node forward and recalculate. If we run out of depth, the utility is \\(0\\) and hence the whole right term is \\(0\\).\u003c/p\u003e\n\u003cp\u003eOf course this assumes we know what our initial state is. Which is lame. So now:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\pi}(b) = \\sum_{s}^{} b(s) U^{\\pi}(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich will give us the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of our policy given a \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e about wher ewe are.\u003c/p\u003e\n\u003cp\u003eso literally take our belief about the probability of us being in each initial state and calculate it for each of our initial states.\u003c/p\u003e\n\u003ch2 id=\"optimal-value-function--kbhpolicy-dot-md--for-pomdp--kbhpartially-observable-markov-decision-process-dot-md\"\u003e\u003ca href=\"/posts/kbhpolicy/#optimal-policy\"\u003eoptimal value function\u003c/a\u003e for \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nU^{*}(b) = \\max_{\\pi} U^{\\pi}(b)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOf course, trying to actually do this is impossible because you have to iterate over all possible policies and then calculate every utility from them.\u003c/p\u003e\n\u003cp\u003eThis is practically untenable, because the space of \\(\\pi\\) is wayyy too big. Hence, we turn to \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhalpha_vector/#id-a2dee193-65b1-47ed-8dbc-aa362b28b451-optimal-value-function-for-pomdp-with-id-a11af4cf-7e36-4b3f-876f-e6a26cf6817e-alpha-vector\"\u003eoptimal value function for POMDP with alpha vector\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhconditional_plan/","tags":null,"title":"conditional plan"},{"categories":null,"contents":"There are many condition in the Great Depression caused\nby 1932, 1/4 had no work emigration exceeded immigration decrease in American birth increase of mental illness and suicide people create Hooverviles movies and radio became much more popular ","html":"\u003cp\u003eThere are many condition in the \u003ca href=\"/posts/kbhgreat_depression/\"\u003eGreat Depression\u003c/a\u003e caused\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eby 1932, 1/4 had no work\u003c/li\u003e\n\u003cli\u003eemigration exceeded immigration\u003c/li\u003e\n\u003cli\u003edecrease in American birth\u003c/li\u003e\n\u003cli\u003eincrease of mental illness and suicide\u003c/li\u003e\n\u003cli\u003epeople create \u003ca href=\"/posts/kbhhooverviles/\"\u003eHooverviles\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003emovies and radio became much more popular\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhconditions_in_the_great_depression/","tags":null,"title":"conditions in the Great Depression"},{"categories":null,"contents":"proportional confidence intervals We will measure a single stastistic from a large population, and call it the point estimate. This is usually denoted as \\(\\hat{p}\\).\nGiven a proportion \\(\\hat{p}\\) (\u0026ldquo;95% of sample), the range which would possibly contain it as part of its \\(2\\sigma\\) range is the \\(95\\%\\) confidence interval.\nTherefore, given a \\(\\hat{p}\\) the plausible interval for its confidence is:\n\\begin{equation} \\hat{p} \\pm z^* \\sqrt{\\frac{\\hat{p}(1-\\hat{p})}{n}} \\end{equation}\nwhere, \\(n\\) is the sample size, \\(\\hat{p}\\) is the point estimate, and \\(z*=1.96\\) is the critical value, the z-score denoting \\(95\\%\\) confidence (or any other desired confidence level).\nconditions for proportional confidence interval There are the conditions that make a proportional confidence interval work\ndistribution is normal \\(n\\hat{p}\\) and \\(n(1-\\hat{p})\\) are both \\(\u0026gt;10\\) we are sampling with replacement, or otherwise sampling \\(\u0026lt;10\\%\\) of population (otherwise, we need to apply a finite population correction value confidence intervals The expression is:\n\\begin{equation} \\bar{x} \\pm t^* \\frac{s}{\\sqrt{n}} \\end{equation}\nwhere \\(t*\\) is the \\(t\\) score of the desired power level with the correct degrees of freedom; \\(s\\) the sample standard deviation, \\(n\\) the sample size, and \\(\\har{x}\\) the mean.\n","html":"\u003ch2 id=\"proportional-confidence-intervals\"\u003eproportional confidence intervals\u003c/h2\u003e\n\u003cp\u003eWe will measure a single \u003ca href=\"/posts/kbhstastistic/\"\u003estastistic\u003c/a\u003e from a large population, and call it the \u003ca href=\"/posts/kbhpoint_estimate/\"\u003epoint estimate\u003c/a\u003e. This is usually denoted as \\(\\hat{p}\\).\u003c/p\u003e\n\u003cp\u003eGiven a proportion \\(\\hat{p}\\) (\u0026ldquo;95% of sample), the range which would possibly contain it as part of its \\(2\\sigma\\) range is the \\(95\\%\\) confidence interval.\u003c/p\u003e\n\u003cp\u003eTherefore, given a \\(\\hat{p}\\) the plausible interval for its confidence is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{p} \\pm z^* \\sqrt{\\frac{\\hat{p}(1-\\hat{p})}{n}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(n\\) is the sample size, \\(\\hat{p}\\) is the \u003ca href=\"/posts/kbhpoint_estimate/\"\u003epoint estimate\u003c/a\u003e, and \\(z*=1.96\\) is the \u003ca href=\"/posts/kbhcritical_value/\"\u003ecritical value\u003c/a\u003e, the \u003ca href=\"/posts/kbhz_score/\"\u003ez-score\u003c/a\u003e denoting \\(95\\%\\) confidence (or any other desired confidence level).\u003c/p\u003e\n\u003ch2 id=\"conditions-for-proportional-confidence-interval\"\u003econditions for proportional confidence interval\u003c/h2\u003e\n\u003cp\u003eThere are the conditions that make a \u003ca href=\"#proportional-confidence-intervals\"\u003eproportional confidence interval\u003c/a\u003e work\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003edistribution is normal\u003c/li\u003e\n\u003cli\u003e\\(n\\hat{p}\\) and \\(n(1-\\hat{p})\\) are both \\(\u0026gt;10\\)\u003c/li\u003e\n\u003cli\u003ewe are sampling with replacement, or otherwise sampling \\(\u0026lt;10\\%\\) of population (otherwise, we need to apply a finite population correction\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"value-confidence-intervals\"\u003evalue confidence intervals\u003c/h2\u003e\n\u003cp\u003eThe expression is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bar{x} \\pm t^* \\frac{s}{\\sqrt{n}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(t*\\) is the \\(t\\) score of the desired power level with the correct degrees of freedom; \\(s\\) the sample standard deviation, \\(n\\) the sample size, and \\(\\har{x}\\) the mean.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhconfidence_interval/","tags":null,"title":"confidence interval"},{"categories":null,"contents":"conjugation is the process of building a similar matricies.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhconjugation/\"\u003econjugation\u003c/a\u003e is the process of building a \u003ca href=\"/posts/kbheigenvalue/#similar-matrices\"\u003esimilar\u003c/a\u003e \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhconjugation/","tags":null,"title":"conjugation"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhconnectionism/","tags":null,"title":"connectionism"},{"categories":null,"contents":"constructor theory deals with \u0026ldquo;constructors\u0026rdquo;, a general type of computer.\nconstructor theory can give us a theory of the universal quantum constructor by expanding upon quantum information theory. It allows us to unify quantum and classical information by simply defining operations in terms of counterfactuals exclusively: that a space is entirely defined by what\u0026rsquo;s possible and what\u0026rsquo;s not possible.\nAccording to constructor theory, fundamental laws are not dynamical laws instead are boundary conditions. We can take the boundary conditions to form the most general set of initial conditions.\nyou can conjecture a set of laws is fully complete at some point, you will find something that hits the bounds then you revise the theory ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhconstructor_theory/\"\u003econstructor theory\u003c/a\u003e deals with \u0026ldquo;\u003ca href=\"/posts/kbhconstructor_theory/\"\u003econstructor\u003c/a\u003es\u0026rdquo;, a general type of computer.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhconstructor_theory/\"\u003econstructor theory\u003c/a\u003e can give us a theory of the \u003ca href=\"/posts/kbhuniversal_quantum_constructor/\"\u003euniversal quantum constructor\u003c/a\u003e by expanding upon \u003ca href=\"/posts/kbhquantum_information_theory/\"\u003equantum information theory\u003c/a\u003e. It allows us to unify quantum and classical information by simply defining operations in terms of \u003ca href=\"/posts/kbhcounterfactual/\"\u003ecounterfactual\u003c/a\u003es exclusively: that a space is entirely defined by what\u0026rsquo;s possible and what\u0026rsquo;s not possible.\u003c/p\u003e\n\u003cp\u003eAccording to \u003ca href=\"/posts/kbhconstructor_theory/\"\u003econstructor\u003c/a\u003e theory, fundamental laws are not dynamical laws instead are boundary conditions. We can take the boundary conditions to form the most general set of initial conditions.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eyou can conjecture a set of laws is fully complete\u003c/li\u003e\n\u003cli\u003eat some point, you will find something that hits the bounds\u003c/li\u003e\n\u003cli\u003ethen you revise the theory\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhconstructor_theory/","tags":null,"title":"constructor"},{"categories":null,"contents":"Used: IBM/OS360\ncontiguous allocation puts the files and metadata together, and implements a Explicit Free List Allocator across the file.\nbenefits simple problems external fragmentation: little pockets of data is everywhere editing: hard to grow files ","html":"\u003cp\u003eUsed: IBM/OS360\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcontiguous_allocation/\"\u003econtiguous allocation\u003c/a\u003e puts the files and metadata together, and implements a \u003ca href=\"/posts/kbhheap_allocator/#explicit-free-list-allocator\"\u003eExplicit Free List Allocator\u003c/a\u003e across the file.\u003c/p\u003e\n\u003ch2 id=\"benefits\"\u003ebenefits\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003esimple\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"problems\"\u003eproblems\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eexternal fragmentation: little pockets of data is everywhere\u003c/li\u003e\n\u003cli\u003eediting: hard to grow files\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcontiguous_allocation/","tags":null,"title":"contiguous allocation"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcontinuity_correct/","tags":null,"title":"continuity correct"},{"categories":null,"contents":"Because we want to including rounding during continuity correction to account for things discretized to certain values.\nDiscrete Continuous P(X = 6) P( 5.5 \u0026lt;= X \u0026lt;= 6.5) P(X \u0026gt;= 6) P (X \u0026gt;= 5.5) P(X \u0026gt; 6) P (X \u0026gt;= 6.5) basically \u0026ldquo;less than\n","html":"\u003cp\u003eBecause we want to including rounding during \u003ca href=\"/posts/kbhcontinuity_correction/\"\u003econtinuity correction\u003c/a\u003e to account for things discretized to certain values.\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDiscrete\u003c/th\u003e\n\u003cth\u003eContinuous\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eP(X = 6)\u003c/td\u003e\n\u003ctd\u003eP( 5.5 \u0026lt;= X \u0026lt;= 6.5)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eP(X \u0026gt;= 6)\u003c/td\u003e\n\u003ctd\u003eP (X \u0026gt;= 5.5)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eP(X \u0026gt; 6)\u003c/td\u003e\n\u003ctd\u003eP (X \u0026gt;= 6.5)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003ebasically \u0026ldquo;less than\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcontinuity_correction/","tags":null,"title":"continuity correction"},{"categories":null,"contents":"This is a continuous distribution for which the probability can be quantified as:\n\\begin{equation} p(x) \\dd{x} \\end{equation}\nYou will note that, at any given exact point, the probability is \\(\\lim_{\\dd{x} \\to 0} p(x)\\dd{x} = 0\\). However, to get the actual probability, we take an integral over some range:\n\\begin{equation} \\int_{-\\infty}^{\\infty} p(x) \\dd{x} = 1 \\end{equation}\nSee also cumulative distribution function which represents the chance of something happening up to a threshold.\n","html":"\u003cp\u003eThis is a continuous distribution for which the probability can be quantified as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will note that, at any given exact point, the probability is \\(\\lim_{\\dd{x} \\to 0} p(x)\\dd{x} = 0\\). However, to get the actual probability, we take an integral over some range:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{-\\infty}^{\\infty} p(x) \\dd{x} = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhprobability_distributions/#cumulative-distribution-function\"\u003ecumulative distribution function\u003c/a\u003e which represents the chance of something happening up to a threshold.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcontinuous_distribution/","tags":null,"title":"continuous distribution"},{"categories":null,"contents":"a controller is a that maintains its own state.\nconstituents \\(X\\): a set of nodes (hidden, internal states) \\(\\Psi(a|x)\\): probability of taking an action \\(\\eta(x\u0026rsquo;|x,a,o)\\) : transition probability between hidden states requirements Controllers are nice because we:\ndon\u0026rsquo;t have to maintain a belief over time: we need an initial belief, and then we can create beliefs as we\u0026rsquo;d like without much worry controllers can be made shorter than conditional plans additional information finite state controller A finite state controller has a finite amount of hidden internal state.\nConsider the crying baby problem. We will declare two internal state:\n\\begin{equation} x_1, x_2 \\end{equation}\nGiven our observations and our internal states, we can declare transitions and an action probability \\(\\Psi\\):\nWe essentially declare a policy vis a vi your observations. It can be a sequence, for instance, if we want to declare a policy whereby if you cry twice then you feed, you can declare:\nfinite state controller evaluation \\begin{equation} U(x, s) = \\sum_{a}^{} \\Psi(a|x) \\qty[R(s,a) + \\gamma \\qty(\\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) \\sum_{o}^{} O(o|a, s\u0026rsquo;) \\sum_{x\u0026rsquo;}^{} \\eta(x\u0026rsquo;|x,a,o) U(x\u0026rsquo;, s\u0026rsquo;)) ] \\end{equation}\nwhich is a conditional plan evaluation but we know even litle\nand, to construct alpha vectors:\n\\begin{equation} \\alpha_{x} = \\qty[U(x, s_1), \\dots, U(x, s_{n})] \\end{equation}\nwe just make one alpha vector per node. So the entire plan is represented as usual by \\(\\Gamma\\) a set of alpha vectors. And yes you can alpha vector pruning.\n\\begin{align} U(x,b) = b^{\\top} \\alpha_{x} \\end{align}\nnode we want to start at:\n\\begin{equation} X^{*} = \\arg\\max_{x} U(x,b) \\end{equation}\nsolving for \\(\\Psi\\) and \\(\\eta\\) policy iteration: incrementally add nodes and evaluate it nonlinear programming: this can be a nonlinear optimization problem controller gradient ascent ","html":"\u003cp\u003ea \u003ca href=\"/posts/kbhcontroller/\"\u003econtroller\u003c/a\u003e is a that maintains its own state.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X\\): a set of nodes (hidden, internal states)\u003c/li\u003e\n\u003cli\u003e\\(\\Psi(a|x)\\): probability of taking an action\u003c/li\u003e\n\u003cli\u003e\\(\\eta(x\u0026rsquo;|x,a,o)\\) : transition probability between hidden states\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eControllers are nice because we:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003edon\u0026rsquo;t have to maintain a belief over time: we need an initial belief, and then we can create beliefs as we\u0026rsquo;d like without much worry\u003c/li\u003e\n\u003cli\u003econtrollers can be made shorter than \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003es\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"finite-state-controller\"\u003efinite state controller\u003c/h3\u003e\n\u003cp\u003eA \u003ca href=\"#finite-state-controller\"\u003efinite state controller\u003c/a\u003e has a finite amount of hidden internal state.\u003c/p\u003e\n\u003cp\u003eConsider the crying baby problem. We will declare two internal state:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_1, x_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven our observations and our internal states, we can declare transitions and an action probability \\(\\Psi\\):\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-30_09-07-04_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWe essentially declare a policy vis a vi your observations. It can be a sequence, for instance, if we want to declare a policy whereby if you cry twice then you feed, you can declare:\u003c/p\u003e\n\u003ch3 id=\"finite-state-controller-evaluation\"\u003efinite state controller evaluation\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nU(x, s) = \\sum_{a}^{} \\Psi(a|x) \\qty[R(s,a) + \\gamma \\qty(\\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) \\sum_{o}^{} O(o|a, s\u0026rsquo;) \\sum_{x\u0026rsquo;}^{} \\eta(x\u0026rsquo;|x,a,o) U(x\u0026rsquo;, s\u0026rsquo;)) ]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is a \u003ca href=\"/posts/kbhconditional_plan/#id-6f19368f-74b5-4606-a882-ec9bc5619873-conditional-plan-evaluation\"\u003econditional plan evaluation\u003c/a\u003e but we know even litle\u003c/p\u003e\n\u003cp\u003eand, to construct \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha_{x} = \\qty[U(x, s_1), \\dots, U(x, s_{n})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe just make one alpha vector per node. So the entire plan is represented as usual by \\(\\Gamma\\) a set of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es. And yes you can \u003ca href=\"/posts/kbhalpha_vector/#id-a11af4cf-7e36-4b3f-876f-e6a26cf6817e-alpha-vector-pruning\"\u003ealpha vector pruning\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nU(x,b) = b^{\\top} \\alpha_{x}\n\\end{align}\u003c/p\u003e\n\u003cp\u003enode we want to start at:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX^{*} = \\arg\\max_{x} U(x,b)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"solving-for-psi-and-eta\"\u003esolving for \\(\\Psi\\) and \\(\\eta\\)\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e: incrementally add nodes and evaluate it\u003c/li\u003e\n\u003cli\u003enonlinear programming: this can be a nonlinear optimization problem\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcontroller_gradient_ascent/\"\u003econtroller gradient ascent\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcontroller/","tags":null,"title":"controller"},{"categories":null,"contents":"We aim to solve for a fixed-sized controller based policy using gradient ascent. This is the unconstrained variation on PGA.\nRecall that we seek to optimize, for some initial node \\(x^{(1)}\\) and belief-state \\(b\\), we want to find the distribution of actions and transitions \\(\\Psi\\) and \\(\\eta\\), which maximizes the utility we can obtain based on initial state:\n\\begin{equation} \\sum_{s}b(s) U(x^{(1)}, s) \\end{equation}\nRecall that \\(U(x,s)\\) is given by:\n\\begin{equation} U(x, s) = \\sum_{a}^{} \\Psi(a|x) \\qty[R(s,a) + \\gamma \\qty(\\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) \\sum_{o}^{} O(o|a, s\u0026rsquo;) \\sum_{x\u0026rsquo;}^{} \\eta(x\u0026rsquo;|x,a,o) U(x\u0026rsquo;, s\u0026rsquo;)) ] \\end{equation}\nwhere\n\\(X\\): a set of nodes (hidden, internal states) \\(\\Psi(a|x)\\): probability of taking an action \\(\\eta(x\u0026rsquo;|x,a,o)\\) : transition probability between hidden states Let\u0026rsquo;s first develop some tools which can help us linearize the objective equation given above.\nWe can define a transition map (matrix) between any two controller-states (latent + state) as:\n\\begin{equation} T_{\\theta}((x,s), (x\u0026rsquo;,s\u0026rsquo;)) = \\sum_{a} \\Psi(a | x) T(s\u0026rsquo;|s,a) \\sum_{o} O (o|a,s\u0026rsquo;) \\eta (x\u0026rsquo;|x,a,o) \\end{equation}\nwhere \\(\\bold{T}_{\\theta} \\in \\mathbb{R}^{|X \\times S| \\times |X \\times S|}\\) .\nFurther, we can parameterize reward over \\(R(s,a)\\) for:\n\\begin{equation} R_{\\theta}((x, s)) = \\sum_{a} \\Psi(a|x) R(s,a) \\end{equation}\nwhere \\(R_{\\theta}\\in \\mathbb{R}^{|X \\times S|}\\)\n(i.e. the reward of being in each controller state is the expected reward over all possible actions at that controller state).\nAnd now, recall the procedure for Bellman Expectation Equation; having formulated the transition and reward at any given controller state \\(X \\times S\\), we can write:\n\\begin{equation} \\bold{u}_{\\theta} = \\bold{r}_{\\theta} + \\gamma \\bold{T_{\\theta}}\\bold{u}_{\\theta} \\end{equation}\nnote that this vector \\(\\bold{U} \\in \\mathbb{R}^{|X \\times S}}\\). Therefore, to write out an \u0026ldquo;utility of belief\u0026rdquo; (prev. \\(b^{\\top} U\\) where \\(U \\in \\alpha\\) some alpha vector over states), we have to redefine a:\n\\begin{equation} \\bold{\\beta}_{xs}, \\text{where} \\begin{cases} \\bold{\\beta}_{xs} = b(s), if\\ x = x^{(1)} \\\\ 0 \\end{cases} \\end{equation}\nFinally, then we can rewrite the objective as:\n\\begin{equation} \\beta^{\\top} \\bold{U}_{\\theta} \\end{equation}\nwhere we seek to use gradient ascend to maximize \\(\\bold{U}_{\\theta}\\).\nWriting this out, we have:\n\\begin{equation} \\bold{u}_{\\theta} = \\bold{r}_{\\theta} + \\gamma \\bold{T}_{\\theta} \\bold{u}_{\\theta} \\end{equation}\nwhich gives:\n\\begin{equation} \\bold{u}_{\\theta} = (\\bold{I} - \\gamma \\bold{T}_{\\theta})^{-1} \\bold{r}_{\\theta} \\end{equation}\nLet\u0026rsquo;s call \\(\\bold{Z} = (\\bold{I}-\\gamma \\bold{T}_{\\theta})\\), meaning:\n\\begin{equation} \\bold{u}_{\\theta} = \\bold{Z}^{-1} \\bold{r}_{\\theta} \\end{equation}\nFinally, to gradient ascent, we better get the gradient. So\u0026hellip; its CHAIN RULE TIME\nRecall that \\(\\theta\\) at this point refers to both \\(\\eta\\) and \\(\\Psi\\), so we need to take a partial against each of those variables. After doing copious calculus in Alg4DM pp 485, we arrive at the update expressions.\n","html":"\u003cp\u003eWe aim to solve for a fixed-sized \u003ca href=\"/posts/kbhcontroller/\"\u003econtroller\u003c/a\u003e based policy using \u003ca href=\"/posts/kbhargmax/#gradient-ascent\"\u003egradient ascent\u003c/a\u003e. This is the \u003cstrong\u003eunconstrained\u003c/strong\u003e variation on \u003ca href=\"/posts/kbhpga/\"\u003ePGA\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eRecall that we seek to optimize, for some initial node \\(x^{(1)}\\) and belief-state \\(b\\), we want to find the distribution of actions and transitions \\(\\Psi\\) and \\(\\eta\\), which maximizes the utility we can obtain based on initial state:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{s}b(s) U(x^{(1)}, s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that \\(U(x,s)\\) is given by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(x, s) = \\sum_{a}^{} \\Psi(a|x) \\qty[R(s,a) + \\gamma \\qty(\\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) \\sum_{o}^{} O(o|a, s\u0026rsquo;) \\sum_{x\u0026rsquo;}^{} \\eta(x\u0026rsquo;|x,a,o) U(x\u0026rsquo;, s\u0026rsquo;)) ]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X\\): a set of nodes (hidden, internal states)\u003c/li\u003e\n\u003cli\u003e\\(\\Psi(a|x)\\): probability of taking an action\u003c/li\u003e\n\u003cli\u003e\\(\\eta(x\u0026rsquo;|x,a,o)\\) : transition probability between hidden states\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eLet\u0026rsquo;s first develop some tools which can help us linearize the objective equation given above.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eWe can define a transition map (matrix) between any two controller-states (latent + state) as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT_{\\theta}((x,s), (x\u0026rsquo;,s\u0026rsquo;)) = \\sum_{a} \\Psi(a | x) T(s\u0026rsquo;|s,a) \\sum_{o} O (o|a,s\u0026rsquo;) \\eta (x\u0026rsquo;|x,a,o)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\bold{T}_{\\theta} \\in \\mathbb{R}^{|X \\times S| \\times |X \\times S|}\\) .\u003c/p\u003e\n\u003cp\u003eFurther, we can parameterize reward over \\(R(s,a)\\) for:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR_{\\theta}((x, s)) = \\sum_{a} \\Psi(a|x) R(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(R_{\\theta}\\in \\mathbb{R}^{|X \\times S|}\\)\u003c/p\u003e\n\u003cp\u003e(i.e. the reward of being in each controller state is the expected reward over all possible actions at that controller state).\u003c/p\u003e\n\u003cp\u003eAnd now, recall the procedure for \u003ca href=\"/posts/kbhpolicy_evaluation/#bellman-expectation-equation\"\u003eBellman Expectation Equation\u003c/a\u003e; having formulated the transition and reward at any given controller state \\(X \\times S\\), we can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{u}_{\\theta} = \\bold{r}_{\\theta} + \\gamma \\bold{T_{\\theta}}\\bold{u}_{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enote that this vector \\(\\bold{U} \\in \\mathbb{R}^{|X \\times S}}\\). Therefore, to write out an \u0026ldquo;utility of belief\u0026rdquo; (prev. \\(b^{\\top} U\\) where \\(U \\in \\alpha\\) some alpha vector over \u003cstrong\u003estates\u003c/strong\u003e), we have to redefine a:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{\\beta}_{xs}, \\text{where} \\begin{cases}\n\\bold{\\beta}_{xs} = b(s), if\\ x = x^{(1)} \\\\\n0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eFinally, then we can rewrite the objective as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\beta^{\\top} \\bold{U}_{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere we seek to use gradient ascend to maximize \\(\\bold{U}_{\\theta}\\).\u003c/p\u003e\n\u003cp\u003eWriting this out, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{u}_{\\theta} = \\bold{r}_{\\theta} + \\gamma \\bold{T}_{\\theta} \\bold{u}_{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{u}_{\\theta} = (\\bold{I} - \\gamma \\bold{T}_{\\theta})^{-1} \\bold{r}_{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s call \\(\\bold{Z} = (\\bold{I}-\\gamma \\bold{T}_{\\theta})\\), meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{u}_{\\theta} = \\bold{Z}^{-1} \\bold{r}_{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, to \u003ca href=\"/posts/kbhargmax/#gradient-ascent\"\u003egradient ascent\u003c/a\u003e, we better get the gradient. So\u0026hellip; its \u003cstrong\u003e\u003cstrong\u003eCHAIN RULE TIME\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-13_12-16-13_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eRecall that \\(\\theta\\) at this point refers to both \\(\\eta\\) and \\(\\Psi\\), so we need to take a partial against each of those variables. After doing copious calculus in Alg4DM pp 485, we arrive at the update expressions.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcontroller_gradient_ascent/","tags":null,"title":"controller gradient ascent"},{"categories":null,"contents":"For \\(f,g : \\mathbb{R} \\to \\mathbb{C}\\), we have:\n\\begin{equation} (f * g)(x) = \\int_{\\mathbb{R}} f(x-y) g(y) \\dd{y} = \\int_{\\mathbb{R}} f(y) g(x-y) \\dd{y} \\end{equation}\nproperties of convolution \\((g * f) (x) = (f * g) (x)\\) \\(\\mathcal{F}(f * g) = \\mathcal{F}(f)\\mathcal{F}(g)\\) \\(\\mathcal{F}^{-1}(\\hat{f} \\hat{g}) = f * g\\) \\((f * g)\u0026rsquo; = f * g\u0026rsquo; = f\u0026rsquo; * g\\) \\(\\lambda ( f * g ) = (\\lambda f) * g = f * (\\lambda g)\\) =\u0026gt; \u0026ldquo;in a convolution, if ANY ONE of the two functions are Differentiable, both are Differentiable.\u0026rdquo;; think about smoothing a jagged function using a Gaussian.\nexamples rolling average \\begin{align} U_{L}(x) = \\begin{cases} L, |x| \\leq \\frac{1}{2L} \\\\ 0, |x| \u0026gt; \\frac{1}{2L} \\end{cases} \\end{align}\nThe width of the area for which the expression is positive is \\(2L\\), and the height is \\(L\\), so the area (integral) is \\(1\\).\nSo now let\u0026rsquo;s consider:\n\\begin{equation} (f * U_{L})(x) \\end{equation}\nwhich is:\n\\begin{equation} \\int_{\\mathbb{R}} f(x-y) U_{L}(y) \\dd{y} \\end{equation}\nmeaning:\n\\begin{equation} L \\int_{-\\frac{1}{2}L}^{\\frac{1}{2}L} f(x-y) \\dd{y} \\end{equation}\nYou will note that we are sweeping something of window width \\(\\frac{1}{L}\\) over the function, which averages the function \\(f\\) over the window \\(L\\).\nSo convolving with this function essentially smoothes function over a window \\(\\frac{1}{L}\\); as \\(L\\) decreases, we are averaging over a greater interval; vise versa.\nsignal compression Write your signal in terms of its Fourier transform:\n\\begin{equation} f(t) = \\frac{1}{2\\pi} \\int_{-\\infty}^{\\infty} e^{it\\lambda} \\hat{f}(\\lambda) \\dd{\\lambda} \\end{equation}\nWe can write:\n\\begin{equation} \\hat{f}(\\lambda) \\cdot 1_{J}(\\lambda) \\end{equation}\nwhose inverse Fourier transform would be:\n\\begin{equation} f(x) * \\mathcal{F}\\qty(1_{J}(\\lambda)) \\end{equation}\nmotivation What if we want the Fourier Transform of \\(\\hat{f}(\\lambda)\\hat{g}(\\lambda)\\) in terms of one expression?\nConsider:\n\\begin{equation} \\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\qty(\\int_{\\mathbb{R}} f(x) e^{-i\\lambda x} \\dd{x}) \\qty(\\int_{\\mathbb{R}} g(y) e^{-i\\lambda y} \\dd{y}) \\end{equation}\nNotice that because neither integral have dependence on the other, we can actually:\n\\begin{equation} \\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\int_{\\mathbb{R}} \\int_{\\mathbb{R}} f(x) g(y) e^{-i\\lambda (x+y)} \\dd{x}\\dd{y} \\end{equation}\nwriting this as a change of variable:\n\\begin{equation} \\begin{cases} u = x+y \\\\ x = u-y \\\\ \\dd{x} = \\dd{u} \\end{cases} \\end{equation}\nwe can write:\n\\begin{equation} \\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\int_{\\mathbb{R}} \\qty(\\int_{\\mathbb{R}} f(u-y) g(y) e^{-i\\lambda (u)} \\dd{u})\\dd{y} \\end{equation}\nConsidering they the integrands are isolated and decaying, we can swap them, pulling out also \\(e^{-i\\lambda(u)}\\) because it has no \\(y\\) dependence:\n\\begin{equation} \\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\int_{\\mathbb{R}} \\qty(\\int_{\\mathbb{R}} f(u-y) g(y) \\dd{y})e^{-i\\lambda (u)} \\dd{u} \\end{equation}\nNotice! The inner part is a function, and the outer part is a Fourier transform! This is similar to a convolution (probability)!\nMeaning:\n\\begin{equation} \\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\mathcal{F}(f * g) = \\mathcal{F}(f) \\mathcal{F}(g) \\end{equation}\nOperating on the inverse, we can obtain a similar result:\n\\begin{equation} \\mathcal{F}^{-1}(\\hat{f} \\hat{g}) = f * g \\end{equation}\n","html":"\u003cp\u003eFor \\(f,g : \\mathbb{R} \\to \\mathbb{C}\\), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(f * g)(x) = \\int_{\\mathbb{R}} f(x-y) g(y) \\dd{y} = \\int_{\\mathbb{R}} f(y) g(x-y) \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"properties-of-convolution\"\u003eproperties of convolution\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\((g * f) (x) = (f * g) (x)\\)\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{F}(f * g) = \\mathcal{F}(f)\\mathcal{F}(g)\\)\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{F}^{-1}(\\hat{f} \\hat{g}) = f * g\\)\u003c/li\u003e\n\u003cli\u003e\\((f * g)\u0026rsquo; = f * g\u0026rsquo; = f\u0026rsquo; * g\\)\u003c/li\u003e\n\u003cli\u003e\\(\\lambda ( f * g ) = (\\lambda f) * g = f * (\\lambda g)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e=\u0026gt; \u0026ldquo;in a convolution, if \u003cstrong\u003eANY ONE\u003c/strong\u003e of the two functions are \u003ca href=\"/posts/kbhuniqueness_and_existance/#differentiable\"\u003eDifferentiable\u003c/a\u003e, both are \u003ca href=\"/posts/kbhuniqueness_and_existance/#differentiable\"\u003eDifferentiable\u003c/a\u003e.\u0026rdquo;; think about smoothing a jagged function using a \u003ca href=\"/posts/kbhgaussian/\"\u003eGaussian\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"examples\"\u003eexamples\u003c/h2\u003e\n\u003ch3 id=\"rolling-average\"\u003erolling average\u003c/h3\u003e\n\u003cp\u003e\\begin{align}\nU_{L}(x) = \\begin{cases}\nL, |x| \\leq \\frac{1}{2L} \\\\\n0, |x| \u0026gt; \\frac{1}{2L}\n\\end{cases}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThe width of the area for which the expression is positive is \\(2L\\), and the height is \\(L\\), so the area (integral) is \\(1\\).\u003c/p\u003e\n\u003cp\u003eSo now let\u0026rsquo;s consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(f * U_{L})(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{\\mathbb{R}} f(x-y) U_{L}(y) \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL \\int_{-\\frac{1}{2}L}^{\\frac{1}{2}L} f(x-y) \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will note that we are sweeping something of window width \\(\\frac{1}{L}\\) over the function, which averages the function \\(f\\) over the window \\(L\\).\u003c/p\u003e\n\u003cp\u003eSo convolving with this function essentially smoothes function over a window \\(\\frac{1}{L}\\); as \\(L\\) decreases, we are averaging over a greater interval; vise versa.\u003c/p\u003e\n\u003ch3 id=\"signal-compression\"\u003esignal compression\u003c/h3\u003e\n\u003cp\u003eWrite your signal in terms of its Fourier transform:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(t) = \\frac{1}{2\\pi} \\int_{-\\infty}^{\\infty} e^{it\\lambda} \\hat{f}(\\lambda) \\dd{\\lambda}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) \\cdot 1_{J}(\\lambda)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhose inverse Fourier transform would be:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) * \\mathcal{F}\\qty(1_{J}(\\lambda))\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"motivation\"\u003emotivation\u003c/h2\u003e\n\u003cp\u003eWhat if we want the \u003ca href=\"/posts/kbhfourier_transform/\"\u003eFourier Transform\u003c/a\u003e of \\(\\hat{f}(\\lambda)\\hat{g}(\\lambda)\\) in terms of one expression?\u003c/p\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\qty(\\int_{\\mathbb{R}} f(x) e^{-i\\lambda x} \\dd{x}) \\qty(\\int_{\\mathbb{R}} g(y) e^{-i\\lambda y} \\dd{y})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNotice that because neither integral have dependence on the other, we can actually:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\int_{\\mathbb{R}} \\int_{\\mathbb{R}} f(x) g(y) e^{-i\\lambda (x+y)} \\dd{x}\\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewriting this as a change of variable:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nu = x+y \\\\\nx = u-y \\\\\n\\dd{x} = \\dd{u}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\int_{\\mathbb{R}} \\qty(\\int_{\\mathbb{R}} f(u-y) g(y) e^{-i\\lambda (u)} \\dd{u})\\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eConsidering they the integrands are isolated and decaying, we can swap them, pulling out also \\(e^{-i\\lambda(u)}\\) because it has no \\(y\\) dependence:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\int_{\\mathbb{R}} \\qty(\\int_{\\mathbb{R}} f(u-y) g(y) \\dd{y})e^{-i\\lambda (u)} \\dd{u}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNotice! The inner part is a function, and the outer part is a Fourier transform! This is similar to a \u003ca href=\"/posts/kbhrandom_variables/#adding-random-variables\"\u003econvolution (probability)\u003c/a\u003e!\u003c/p\u003e\n\u003cp\u003eMeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) \\hat{g}(\\lambda) = \\mathcal{F}(f * g) = \\mathcal{F}(f) \\mathcal{F}(g)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOperating on the inverse, we can obtain a similar result:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}^{-1}(\\hat{f} \\hat{g}) = f * g\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhconvolution/","tags":null,"title":"convolution"},{"categories":null,"contents":"Cookie Theft is a Discourse-Completion Task that involves describing the following picture:\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhctp/\"\u003eCookie Theft\u003c/a\u003e is a \u003ca href=\"/posts/kbhdiscourse_completion_task/\"\u003eDiscourse-Completion Task\u003c/a\u003e that involves describing the following picture:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_23-28-15_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhctp/","tags":null,"title":"Cookie Theft Picture Description Task"},{"categories":null,"contents":"Pythagorean Theorem \\begin{equation} \\|u + v\\|^{2} = \\|u \\|^{2} + \\|v\\|^{2} \\end{equation}\nif \\(v\\) and \\(u\\) are orthogonal vectors.\nProof:\nAn Useful Orthogonal Decomposition Suppose we have a vector \\(u\\), and another \\(v\\), both belonging to \\(V\\). We can decompose \\(u\\) as a sum of two vectors given a choice of \\(v\\): one a scalar multiple of \\(v\\), and another orthogonal to \\(v\\).\nThat is: we can write \\(u = cv + w\\), where \\(c \\in \\mathbb{F}\\) and \\(w \\in V\\), such that \\(\\langle w,v \\rangle = 0\\).\nHere\u0026rsquo;s how:\nFor nonzero \\(v\\)\n\\begin{equation} c = \\frac{\\langle u,v \\rangle}{\\|v\\|^{2}} \\end{equation}\nand\n\\begin{equation} w = (u - cv) \\end{equation}\nWe can show \\(\\langle w,v \\rangle=0\\) as follows:\n\\begin{align} \\langle (u-cv), v \\rangle \u0026amp;= \\langle u,v \\rangle - \\langle cv, v \\rangle \\\\ \u0026amp;= \\langle u,v \\rangle - c \\langle v,v \\rangle \\\\ \u0026amp;= \\langle u,v \\rangle - \\frac{\\langle u,v \\rangle}{\\|v\\|^{2}} \\langle v,v \\rangle \\\\ \u0026amp;= \\langle u,v \\rangle - \\frac{\\langle u,v \\rangle}{\\|v\\|^{2}} \\|v\\|^{2} \\\\ \u0026amp;= 0 \\end{align}\nCauchy-Schwartz Inequality \\begin{equation} | \\langle u,v \\rangle | \\leq \\|u\\| \\|v\\| \\end{equation}\nand the expression is an equality of each vector \\(u,v\\) is the scalar multiple of the other.\nProof:\nPick some set of \\(v\\) and \\(u\\) and write out the orthogonal decomposition we had outlined above:\n\\begin{equation} u = cv + w \\end{equation}\nNow, recall \\(c = \\frac{\\langle u,v \\rangle}{\\|v\\|^{2}}\\). We now apply Pythagorean Theorem:\nNow we just multiply \\(\\|v\\|^{2}\\) to both sides and take square roots.\nIf \\(w = 0\\) (i.e. \\(v\\) and \\(w\\) have no othogonal component, and therefore they are scalar multiples), then this would turn into an equality as desired.\ntriangle inequality (vectors) See also triangle inequality (complexes)\n\u0026ldquo;The length of \\(u+v\\) is always less than the length of each \\(u\\) plus \\(v\\); the third side length is always shorter than the sum of both other sides\u0026rsquo; lengths.\u0026rdquo;\n\\begin{equation} \\|u\\| + \\|v\\| \\geq \\|u+v\\| \\end{equation}\nNotably, the two lines between \\(2|\\langle u,v \\rangle|\\) and \\(2 \\|u\\| \\|v\\|\\) holds because of the Cauchy-Schwartz Inequality.\nThis inequality becomes an equality if \\(u\\) and \\(v\\) are a non-negative multiple of the other.\nparallelogram equality The sums of squared side lengths of a parallelogram is equal to the sum of the squares of the length of diagonals:\n\\begin{equation} \\|u + v\\|^{2} + \\|u-v\\|^{2} = 2(\\|u\\|^{2} + \\|v\\|^{2}) \\end{equation}\n","html":"\u003ch2 id=\"pythagorean-theorem\"\u003ePythagorean Theorem\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\|u + v\\|^{2} = \\|u \\|^{2} + \\|v\\|^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif \\(v\\) and \\(u\\) are orthogonal vectors.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-08_22-53-17_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"an-useful-orthogonal-decomposition\"\u003eAn Useful Orthogonal Decomposition\u003c/h2\u003e\n\u003cp\u003eSuppose we have a vector \\(u\\), and another \\(v\\), both belonging to \\(V\\). We can decompose \\(u\\) as a sum of two vectors given a choice of \\(v\\): one a scalar multiple of \\(v\\), and another orthogonal to \\(v\\).\u003c/p\u003e\n\u003cp\u003eThat is: we can write \\(u = cv + w\\), where \\(c \\in \\mathbb{F}\\) and \\(w \\in V\\), such that \\(\\langle w,v \\rangle = 0\\).\u003c/p\u003e\n\u003cp\u003eHere\u0026rsquo;s how:\u003c/p\u003e\n\u003cp\u003eFor nonzero \\(v\\)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc = \\frac{\\langle u,v \\rangle}{\\|v\\|^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw = (u - cv)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can show \\(\\langle w,v \\rangle=0\\) as follows:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\langle (u-cv), v \\rangle \u0026amp;= \\langle u,v \\rangle - \\langle cv, v \\rangle \\\\\n\u0026amp;= \\langle u,v \\rangle - c \\langle v,v \\rangle \\\\\n\u0026amp;= \\langle u,v \\rangle - \\frac{\\langle u,v \\rangle}{\\|v\\|^{2}} \\langle v,v \\rangle \\\\\n\u0026amp;= \\langle u,v \\rangle - \\frac{\\langle u,v \\rangle}{\\|v\\|^{2}} \\|v\\|^{2} \\\\\n\u0026amp;= 0\n\\end{align}\u003c/p\u003e\n\u003ch2 id=\"cauchy-schwartz-inequality\"\u003eCauchy-Schwartz Inequality\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n| \\langle u,v \\rangle | \\leq \\|u\\| \\|v\\|\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand the expression is an equality of each vector \\(u,v\\) is the scalar multiple of the other.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003ePick some set of \\(v\\) and \\(u\\) and write out the orthogonal decomposition we had outlined above:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu = cv + w\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, recall \\(c = \\frac{\\langle u,v \\rangle}{\\|v\\|^{2}}\\). We now apply \u003ca href=\"#pythagorean-theorem\"\u003ePythagorean Theorem\u003c/a\u003e:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-08_23-13-44_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eNow we just multiply \\(\\|v\\|^{2}\\) to both sides and take square roots.\u003c/p\u003e\n\u003cp\u003eIf \\(w = 0\\) (i.e. \\(v\\) and \\(w\\) have no othogonal component, and therefore they are scalar multiples), then this would turn into an equality as desired.\u003c/p\u003e\n\u003ch2 id=\"triangle-inequality--vectors\"\u003etriangle inequality (vectors)\u003c/h2\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhthoughts_on_axler_4/#triangle-inequality--complexes\"\u003etriangle inequality (complexes)\u003c/a\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-08_23-23-03_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\u0026ldquo;The length of \\(u+v\\) is always less than the length of each \\(u\\) plus \\(v\\); the third side length is always shorter than the sum of both other sides\u0026rsquo; lengths.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\|u\\| + \\|v\\| \\geq \\|u+v\\|\n\\end{equation}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-08_23-26-02_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eNotably, the two lines between \\(2|\\langle u,v \\rangle|\\) and \\(2 \\|u\\| \\|v\\|\\) holds because of the \u003ca href=\"#cauchy-schwartz-inequality\"\u003eCauchy-Schwartz Inequality\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThis inequality becomes an equality if \\(u\\) and \\(v\\) are a \u003cstrong\u003enon-negative\u003c/strong\u003e multiple of the other.\u003c/p\u003e\n\u003ch2 id=\"parallelogram-equality\"\u003eparallelogram equality\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-08_23-32-19_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe sums of squared side lengths of a parallelogram is equal to the sum of the squares of the length of diagonals:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\|u + v\\|^{2} + \\|u-v\\|^{2} = 2(\\|u\\|^{2} + \\|v\\|^{2})\n\\end{equation}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-08_23-33-33_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhcornucopia_of_analysis/","tags":null,"title":"cornucopia of analysis"},{"categories":null,"contents":"usually we use \\(N\\) to denote the number of tokens, and \\(V\\) the \u0026ldquo;vocab\u0026rdquo; or set of word types.\nCorpora is usually considered in context of:\nspecific writers at specific time for specific varieties of specific languages for a specific function Particularly hard: code switching, gender, demographics, variety, etc.\nHerdan\u0026rsquo;s Law \\begin{equation} |V| = kN^{\\beta} \\end{equation}\nwith \\(\\beta\\) being a constant between \\(0.67 \u0026lt; \\beta \u0026lt; 0.75\\).\nThe vocab size is roughly proportional to the number of tokens.\n","html":"\u003cp\u003eusually we use \\(N\\) to denote the number of \u003ca href=\"/posts/kbhtokenization/\"\u003etoken\u003c/a\u003es, and \\(V\\) the \u0026ldquo;vocab\u0026rdquo; or set of \u003ca href=\"/posts/kbhtokenization/\"\u003eword type\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eCorpora is usually considered in context of:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003especific writers\u003c/li\u003e\n\u003cli\u003eat specific time\u003c/li\u003e\n\u003cli\u003efor specific varieties\u003c/li\u003e\n\u003cli\u003eof specific languages\u003c/li\u003e\n\u003cli\u003efor a specific function\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eParticularly hard: code switching, gender, demographics, variety, etc.\u003c/p\u003e\n\u003ch2 id=\"herdan-s-law\"\u003eHerdan\u0026rsquo;s Law\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n|V| = kN^{\\beta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewith \\(\\beta\\) being a constant between \\(0.67 \u0026lt; \\beta \u0026lt; 0.75\\).\u003c/p\u003e\n\u003cp\u003eThe vocab size is roughly proportional to the number of tokens.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcorpus/","tags":null,"title":"corpus"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcortex/","tags":null,"title":"cortex"},{"categories":null,"contents":"coulomb\u0026rsquo;s law is a principle that deals with the force that two charged particles exhibit to each other.\nconstituents \\(k\\), Coulomb\u0026rsquo;s Constant, found roughly to be \\(9 \\times 10^{9} \\frac{N m^{2}}{C}\\) \\(q_{1,2}\\), the charge of the two particles you are analyzing \\(r\\), distance between particles requirements \\begin{equation} \\vec{F_{E}} = k \\frac{q_1q_2}{r^{2}} \\end{equation}\nadditional information interpreting signs on \\(F_{e}\\) negative: attraction force between changes (the points have opposite signed charges, and so attract) positive: repulsion force between changes (the point have the same signed change, so repel) alternative formulation of Coulomb\u0026rsquo;s Law The law is often redefined with the language of the premittivity of free space:\n\\begin{equation} \\vec{F_{E}} = \\frac{1}{4\\pi \\epsilon_{0}} \\frac{q_1q_2}{r^{2}} \\end{equation}\nsuperposition The net electric force on a test change is simply the sum of the electric forces which other particles exhibit on the test change. That is:\n\\begin{equation} F_{on\\ 2} = F_{1 \\to 2} + F_{3 \\to 2} \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcoulomb_s_law/\"\u003ecoulomb\u0026rsquo;s law\u003c/a\u003e is a principle that deals with the force that two charged particles exhibit to each other.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(k\\), \u003ca href=\"/posts/kbhcoulomb_s_law/\"\u003eCoulomb\u0026rsquo;s Constant\u003c/a\u003e, found roughly to be \\(9 \\times 10^{9} \\frac{N m^{2}}{C}\\)\u003c/li\u003e\n\u003cli\u003e\\(q_{1,2}\\), the \u003ca href=\"/posts/kbhcharged/\"\u003echarge\u003c/a\u003e of the two particles you are analyzing\u003c/li\u003e\n\u003cli\u003e\\(r\\), distance between particles\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{F_{E}} = k \\frac{q_1q_2}{r^{2}}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"interpreting-signs-on-f-e\"\u003einterpreting signs on \\(F_{e}\\)\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003enegative: attraction force between changes (the points have opposite signed charges, and so attract)\u003c/li\u003e\n\u003cli\u003epositive: repulsion force between changes (the point have the same signed change, so repel)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"alternative-formulation-of-coulomb-s-law\"\u003ealternative formulation of Coulomb\u0026rsquo;s Law\u003c/h3\u003e\n\u003cp\u003eThe law is often \u003ca href=\"/posts/kbhpermittivity_of_free_space/#redefinition-of-id-e9d5f9b8-f44f-4ec2-a3ca-eddf236efe1b-coulomb-s-constant-based-on-id-d9abe64a-7668-490b-a297-af51c8859624-permittivity-of-free-space\"\u003eredefined with the language of the premittivity of free space\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{F_{E}} = \\frac{1}{4\\pi \\epsilon_{0}} \\frac{q_1q_2}{r^{2}}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"superposition\"\u003esuperposition\u003c/h3\u003e\n\u003cp\u003eThe net \u003ca href=\"/posts/kbhcoulomb_s_law/\"\u003eelectric force\u003c/a\u003e on a test change is simply the sum of the \u003ca href=\"/posts/kbhcoulomb_s_law/\"\u003eelectric force\u003c/a\u003es which other particles exhibit on the test change. That is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nF_{on\\ 2} = F_{1 \\to 2} + F_{3 \\to 2}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcoulomb_s_law/","tags":null,"title":"Coulomb's Law"},{"categories":null,"contents":"quantum information theory requires manipulating counterfactual information\u0026mdash;not what the current known states are, but what are the next possible states.\nInside physics, there is already a few principles which are counterfactual.\nConservation of energy: a perpetual machine is *impossible Second law: its impossible to convert all heat into useful work Heisenberg\u0026rsquo;s uncertainty: its impossible to copy reliable all states of a qubit With the impossibles, we can make the possible.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhquantum_information_theory/\"\u003equantum information theory\u003c/a\u003e requires manipulating counterfactual information\u0026mdash;not what the current known states are, but what are the \u003cem\u003enext possible states\u003c/em\u003e.\u003c/p\u003e\n\u003cp\u003eInside \u003ca href=\"/posts/kbhphysics/\"\u003ephysics\u003c/a\u003e, there is already a few principles which are counterfactual.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eConservation of energy: a perpetual machine is \u003cstrong\u003e*impossible\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eSecond law: its \u003cstrong\u003e\u003cstrong\u003eimpossible\u003c/strong\u003e\u003c/strong\u003e to convert all heat into useful work\u003c/li\u003e\n\u003cli\u003eHeisenberg\u0026rsquo;s uncertainty: its \u003cstrong\u003e\u003cstrong\u003eimpossible\u003c/strong\u003e\u003c/strong\u003e to copy reliable all states of a qubit\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWith the impossibles, we can make the possible.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcounterfactual/","tags":null,"title":"counterfactual"},{"categories":null,"contents":"counting asks: \u0026ldquo;how many possible outcomes satisfy an event?\u0026rdquo; You create a \u0026ldquo;generative story\u0026rdquo; to think about how you can count for the total choices.\n(like, 3 times will we roll even in a regular fair dice)\nstep rule of counting ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcounting/\"\u003ecounting\u003c/a\u003e asks: \u0026ldquo;how many possible outcomes satisfy an event?\u0026rdquo; You create a \u0026ldquo;generative story\u0026rdquo; to think about how you can count for the total choices.\u003c/p\u003e\n\u003cp\u003e(like, 3 times will we roll even in a regular fair dice)\u003c/p\u003e\n\u003ch2 id=\"step-rule-of-counting\"\u003estep rule of counting\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-02_16-28-05_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhcounting/","tags":null,"title":"counting"},{"categories":null,"contents":" CS157 Introduction to Logic; Carta average 3.9, \u0026ldquo;Only take if you are actually interested in logic or this is an absolute requirement for your degree that you can\u0026rsquo;t dodge. Class structure is very tricky as your entire grade depends on how you do on 3 exams of 5 questions each.\u0026rdquo; ","html":"\u003cul\u003e\n\u003cli\u003eCS157 Introduction to Logic; Carta average 3.9, \u0026ldquo;Only take if you are actually interested in logic or this is an absolute requirement for your degree that you can\u0026rsquo;t dodge. Class structure is very tricky as your entire grade depends on how you do on 3 exams of 5 questions each.\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcourses_to_take_for_qnlp/","tags":null,"title":"Courses to Take for QNLP"},{"categories":null,"contents":"\\begin{equation} cov(x,y) = E[(X-E[X])(Y-E[Y])] = E[XY]-E[X]E[Y] \\end{equation}\n(the derivation comes from FOIling the two terms and applying properties of expectation.\nwe want to consider: if a point goes way beyond its expectation, does the corresponding point change for another?\n\\begin{equation} (x-E[x])(y-E[y]) \\end{equation}\nif both points are varying .\nInstead of using this unbounded value, we sometimes use a normalized value named correlation:\n\\begin{equation} \\rho(X,Y) = \\frac{Cov(X,Y)}{\\sqrt{Var(X)Var(Y)}} \\end{equation}\n","html":"\u003cp\u003e\\begin{equation}\ncov(x,y) = E[(X-E[X])(Y-E[Y])] = E[XY]-E[X]E[Y]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(the derivation comes from FOIling the two terms and applying \u003ca href=\"/posts/kbhexpectation/#properties-of-id-24e5fb5b-b0b2-4872-adf2-398e91c3ee0e-expectation\"\u003eproperties of expectation\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003ewe want to consider: if a point goes way beyond its expectation, does the corresponding point change for another?\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(x-E[x])(y-E[y])\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif both points are varying .\u003c/p\u003e\n\u003cp\u003eInstead of using this unbounded value, we sometimes use a normalized value named \u003ca href=\"/posts/kbhcovariance/\"\u003ecorrelation\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\rho(X,Y) = \\frac{Cov(X,Y)}{\\sqrt{Var(X)Var(Y)}}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcovariance/","tags":null,"title":"covariance"},{"categories":null,"contents":"coveather is a novel consensus algorithm based on the proof of work mechanism.\nSee also minimum user base requirements for coveather and Coveather Abstract.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcoveather/\"\u003ecoveather\u003c/a\u003e is a novel consensus algorithm based on the \u003ca href=\"/posts/kbhproof_of_work/\"\u003eproof of work\u003c/a\u003e mechanism.\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhminimum_user_base_requirements_for_coveather/\"\u003eminimum user base requirements for coveather\u003c/a\u003e and \u003ca href=\"/posts/kbhcoveather_abstract/\"\u003eCoveather Abstract\u003c/a\u003e.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-22_22-22-19_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhcoveather/","tags":null,"title":"coveather"},{"categories":null,"contents":"Digital Health Passes (DHP), systems of digitally validating quarantine and vaccination status such as the New York IBM Excelsior Pass, demonstrate a lawful means to approach some benefits offered by \u0026ldquo;true elimination\u0026rdquo; treatment strategies-which focus on the complete elimination of cases instead of investing more in controlling the progression of the disease-of COVID-19. Current implementations of DHPs require region-based control and central storage of Protected Health Information (PHI)-creating a challenge to widespread use across different jurisdictions with incompatible data management systems and a lack of standardized patient privacy controls. In this work, a mechanism for decentralized PHI storage and validation is proposed through a novel two-stage handshaking mechanism update to blockchain proof-of-stake consensus. The proposed mechanism, when used to support a DHP, allows individuals to validate their quarantine and testing universally with any jurisdiction while allowing their right of independent movement and the protection of their PHI. Implementational details on the protocol are given, and the protocol is shown to withstand a 1% disturbance attack at only 923 participants via a Monte-Carlo simulation: further validating its stability.\n","html":"\u003cp\u003eDigital Health Passes (DHP), systems of digitally validating quarantine and vaccination status such as the New York IBM Excelsior Pass, demonstrate a lawful means to approach some benefits offered by \u0026ldquo;true elimination\u0026rdquo; treatment strategies-which focus on the complete elimination of cases instead of investing more in controlling the progression of the disease-of COVID-19. Current implementations of DHPs require region-based control and central storage of Protected Health Information (PHI)-creating a challenge to widespread use across different jurisdictions with incompatible data management systems and a lack of standardized patient privacy controls. In this work, a mechanism for decentralized PHI storage and validation is proposed through a novel two-stage handshaking mechanism update to blockchain proof-of-stake consensus. The proposed mechanism, when used to support a DHP, allows individuals to validate their quarantine and testing universally with any jurisdiction while allowing their right of independent movement and the protection of their PHI. Implementational details on the protocol are given, and the protocol is shown to withstand a 1% disturbance attack at only 923 participants via a Monte-Carlo simulation: further validating its stability.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcoveather_abstract/","tags":null,"title":"Coveather Abstract"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcovid_19/","tags":null,"title":"COVID-19"},{"categories":null,"contents":"A CPOMDP, or Constrained Partially Observable Markov Decision Process, gives two objectives for the system to optimize upon:\nan reward function \\(r(s,a)\\) and a set of constraints \\(c(s,a) \\geq 0\\). Specifically, we formulate it as a POMDP: \\((S,A,\\Omega), T, O ,R\\), with an additional set of constraints \\(\\bold{C}\\) and budgets \\(\\beta\\).\nWhereby, we seek to maximize the infinite-horizon reward \\(\\mathbb{E}_{t} \\qty[R(a_{t}, s_{t})]\\) subject to discounting, subject to:\n\\begin{equation} C_{i}(s,a) \\leq \\beta_{i}, \\forall C_{i},\\beta_{i} \\in \\bold{C}, \\beta \\end{equation}\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhcpomdp/\"\u003eCPOMDP\u003c/a\u003e, or \u003ca href=\"/posts/kbhcpomdp/\"\u003eConstrained Partially Observable Markov Decision Process\u003c/a\u003e, gives two objectives for the system to optimize upon:\u003c/p\u003e\n\u003cp\u003ean reward function \\(r(s,a)\\) and a set of constraints \\(c(s,a) \\geq 0\\). Specifically, we formulate it as a \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e: \\((S,A,\\Omega), T, O ,R\\), with an additional set of constraints \\(\\bold{C}\\) and budgets \\(\\beta\\).\u003c/p\u003e\n\u003cp\u003eWhereby, we seek to maximize the infinite-horizon reward \\(\\mathbb{E}_{t} \\qty[R(a_{t}, s_{t})]\\) subject to discounting, subject to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC_{i}(s,a) \\leq \\beta_{i}, \\forall C_{i},\\beta_{i} \\in \\bold{C}, \\beta\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcpomdp/","tags":null,"title":"CPOMDP"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcram/","tags":null,"title":"cram"},{"categories":null,"contents":"crap to remember for AP Stats is a cram sheet for the AP Statistics exam.\n95% confidence: \\(z^*=1.96\\)\n\\(r=1\\): perfect positive correlation \\(r=-1\\): perfect negative correlation \\(r=0\\): no correlation S: standard deviation of residuals R-sq: how much of varience in dep. var can be explained by indp. var SE: estimate of standard deviation of the random var. that is slope.\nFor lines:\nNote that p value from regression outputs are two-tailed. So divide by 2 if you want a one-tail result.\nMultiplication changes mean as well as well as standard deviation. Adding changes mean but not standard deviation.\nExpected value of the sum and differences of random variables are just the sums and differences of their expected value. \\(S = X+Y, \\bar{S} = \\bar{X}+\\bar{Y}\\).\nVariance of random variables are just the sum and differences of their variance. \\(S=X+Y,{\\sigma^2}_S = {\\sigma^2}_X+{\\sigma^2}_Y\\).\n#WHAPS\nwhat test what hypothesis and what significance level assumptions and conditions; state! random independent: \\(\\le 10\\%\\) of population. t and z special: normal (z tests: \\(np, n(1-p) \\geq 10\\), t tests: \\(n\u0026gt;30\\) or given) chi-square special: \\(\\forall\\ EV \u0026gt; 5\\) p: z-statistic that would XD: Control (control for confounding and bias, placebo, etc.), Randomization (spread uncontrolled variability), Replication (need to have adequate units and ability to be repeated)\n=\u0026gt; Describing a distribution\nCenter: Mean, Median, or Mode? figure by skew Shape: Symmetric vs Skewed? Unimodal vs Bimodal Spread: Range and Inter-Quartile Range Outlier: anything more than 1.5*IQR away Context: what the distribution shows \u0026ldquo;Experimental Unit\u0026rdquo;: a physic entity that\u0026rsquo;s the primary unit of interest in a research objective.\nConditions for binomial distribution:\nBinary Independent Fixed number of trials All trials with same probability Conditions for geometric distrubiton\nBinary Independent Fixed number of successes All trials with same probability state the thing, state the conditions: \u0026ldquo;normal distribution with n= s=\u0026rdquo;, binomial distribution with n= p= etc.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcrap_to_remember_for_ap_stats/\"\u003ecrap to remember for AP Stats\u003c/a\u003e is a cram sheet for the \u003ca href=\"/posts/kbhapstats/\"\u003eAP Statistics\u003c/a\u003e exam.\u003c/p\u003e\n\u003chr\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_22-46-44_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_22-47-19_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_22-47-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e95% confidence: \\(z^*=1.96\\)\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_22-56-57_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003e\\(r=1\\): perfect positive correlation\u003c/li\u003e\n\u003cli\u003e\\(r=-1\\): perfect negative correlation\u003c/li\u003e\n\u003cli\u003e\\(r=0\\): no correlation\u003c/li\u003e\n\u003c/ul\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_22-57-20_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_22-48-27_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_22-48-45_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_22-49-30_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_23-19-06_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_23-20-47_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eS: standard deviation of residuals\u003c/li\u003e\n\u003cli\u003eR-sq: how much of varience in dep. var can be explained by indp. var\u003c/li\u003e\n\u003c/ul\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_23-21-56_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSE: estimate of standard deviation of the random var. that is slope.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_23-34-45_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eFor lines:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_23-39-50_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eNote that p value from regression outputs are two-tailed. So divide by 2 if you want a one-tail result.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-20_23-45-38_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eMultiplication changes mean as well as well as standard deviation. Adding changes mean but not standard deviation.\u003c/p\u003e\n\u003cp\u003eExpected value of the sum and differences of random variables are just the sums and differences of their expected value. \\(S = X+Y, \\bar{S} = \\bar{X}+\\bar{Y}\\).\u003c/p\u003e\n\u003cp\u003eVariance of random variables are just the sum and differences of their variance. \\(S=X+Y,{\\sigma^2}_S = {\\sigma^2}_X+{\\sigma^2}_Y\\).\u003c/p\u003e\n\u003cp\u003e#WHAPS\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ewhat test\u003c/li\u003e\n\u003cli\u003ewhat hypothesis and what \u003ca href=\"/posts/kbhhypothesis_testing/#significance-level\"\u003esignificance level\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eassumptions and conditions; state!\n\u003cul\u003e\n\u003cli\u003erandom\u003c/li\u003e\n\u003cli\u003eindependent: \\(\\le 10\\%\\) of population.\u003c/li\u003e\n\u003cli\u003et and z special: normal (z tests: \\(np, n(1-p) \\geq 10\\), t tests: \\(n\u0026gt;30\\) or given)\u003c/li\u003e\n\u003cli\u003echi-square special: \\(\\forall\\ EV \u0026gt; 5\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ep: z-statistic that would\u003c/li\u003e\n\u003c/ul\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-21_21-28-45_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eXD: Control (control for confounding and bias, placebo, etc.), Randomization (spread uncontrolled variability), Replication (need to have adequate units and ability to be repeated)\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-21_22-30-54_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e=\u0026gt; Describing a distribution\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eCenter: Mean, Median, or Mode? figure by skew\u003c/li\u003e\n\u003cli\u003eShape: Symmetric vs Skewed? Unimodal vs Bimodal\u003c/li\u003e\n\u003cli\u003eSpread: Range and Inter-Quartile Range\u003c/li\u003e\n\u003cli\u003eOutlier: anything more than 1.5*IQR away\u003c/li\u003e\n\u003cli\u003eContext: what the distribution shows\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;Experimental Unit\u0026rdquo;: a physic entity that\u0026rsquo;s the primary unit of interest in a research objective.\u003c/p\u003e\n\u003cp\u003eConditions for \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eBinary\u003c/li\u003e\n\u003cli\u003eIndependent\u003c/li\u003e\n\u003cli\u003eFixed number of trials\u003c/li\u003e\n\u003cli\u003eAll trials with same probability\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eConditions for geometric distrubiton\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eBinary\u003c/li\u003e\n\u003cli\u003eIndependent\u003c/li\u003e\n\u003cli\u003eFixed number of successes\u003c/li\u003e\n\u003cli\u003eAll trials with same probability\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003estate the thing, state the conditions\u003c/strong\u003e\u003c/strong\u003e: \u0026ldquo;normal distribution with n= s=\u0026rdquo;, \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e with n= p= etc.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcrap_to_remember_for_ap_stats/","tags":null,"title":"crap to remember for AP Stats"},{"categories":null,"contents":"Major Challenges data loss: crashes can happen, and not all data could be saved to disk inconsistency: crashes can happen in the middle of operations crashes could occur when someone of the blocks that have been written to disk, but not others inode and free lists don\u0026rsquo;t agree. Ideally, filesystem operations should be atomic. Every operation should happen or not happen at all\u0026mdash;but not halfway.\nCase study: Block Cache Modification\nTradeoffs The overall design tradeoffs between this:\ndurability - the data needs to be safe (which is slow, and may require manual crash recovery (sans cache, etc.)) performance - it needs to be fast (which may mean less error checking) consistency - the filesystem needs to be uniform (which means that we need to be slower and we may drop data in favor of previous checkpoints that worked) Also the disks themselves can still fail.\nfsck Don\u0026rsquo;t make any changes to filesystem at all. At the system boot time, check filesystem for consistency.\nmain limitation:\ncan\u0026rsquo;t restart filesystem until it completes: this process takes forever restores consistency, but doesn\u0026rsquo;t prevent loss of info restores consistency, but filesystem may still be unusable (core files moved to lost+found) a block could migrate from a password file to some other random file, hence removing info Check whether or not there is a clean shutdown: setting a disk flag on clean shutdown; so if the flag isn\u0026rsquo;t set there isn\u0026rsquo;t a clean shutdown. If it wasn\u0026rsquo;t a clean shutdown, identify inconsistencies Scans meta data (inodes, indirect blocks, free list, directory blocks) and handle any of the following situations\u0026mdash; block in an inode and in free list Solution: pull the block off of free list\nblock is a part of two inodes Solutions:\ngive to newest randomly pick make a copy remove (generally a bad idea, we don\u0026rsquo;t want to destroy data) inode claims one dirent refers to it, but there are no such dirent put the file under the lost+found folder\nordered writes Example: block is in file and also in the free list.\nThis basically removes the need to wait for fsck on reboot.\nWe can use a certain order of operations to prevent these types of errors from occurring:\nAlways initialize the TARGET before initializing the REFERENCE Initialize inode before initalize directory entry to it Never reuse a resource before NULLIFYING all existing REFERENCES Remove the inode reference before putting a block on the free list Never clear the LAST REFERENCE to a live resource before setting a NEW REFERENCE (\u0026ldquo;its better to have 2 copies instead of none\u0026rdquo;) Make the new directory entry before get rid of the old one Limitations:\nperformance: we need to do operations synchronously if we really want to do caching async, we can track dependencies circular dependencies are possible leak: it could leak resources (reference nullification happens but resource not added) We can run fsck in the background journaling journaling keeps a paper trail of disk appertains in the event of a crash. We have an append-only log on disk that stores disk operations.\nbefore performing an operation, record its info in the log and write that to disk The log will always record what\u0026rsquo;s happening ahead. The actual block updates can eventually be carried out in any order.\nwhat do we log? we only log metadata changes (inodes, moving stuff around, etc.) payload operations are not saved structure We typically have a LSN: log serial number, operations, and metadata.\n#+begin_src toml [offset 335050] LSN 18384030 operation = \u0026ldquo;LogBlockAlloc\u0026rdquo; blockno = 1027 zero_on_replay = 0\n[offset 23232] LSN N operation = \u0026ldquo;LogPatch\u0026rdquo; blockno = 8 offset = 137 bytes = 0.04 inode = 52 #+end_arc\n\u0026ldquo;zero-on-replay\u0026rdquo;:\nSpecifies that block number blockno, which was previously free, should now be marked as allocated (0 or false in the freemap). If zero_on_replay is non-zero, it means that the block is being used for metadata\u0026mdash;i.e., as an indirect (or double-indirect) block, or for a directory.\nlimitations checkpoints Its an add-only paper trial. We can truncate the log occasionally at a \u0026ldquo;checkpoint\u0026rdquo;, and truncate the log which is no longer needed.\nmultiple log entries An atomic operations may have multiple log entries corresponding to it (because they have many steps). We need to make sure that the entire operation is replayed or none at all.\nSo, we introduce transactions: each atomic operation will be wrapped into a unit transaction.\nwhere do we start replaying You don\u0026rsquo;t know where exactly you crashed.\nSo, log entries should be idempotent: doing something multiple times should have the same effect of doing them once. To make this happen, we need to cache all the data that\u0026rsquo;s needed to write to the log in the log itself. It cannot have external dependencies.\nSo we just replay the entire log. To save time every so often you trim the logs via checkpoints\nlog entries may take time We can also make log entry writes in the block cache too. This doesn\u0026rsquo;t matter too much: if both the log and the actual data is wiped from the cache, the filesystem is still consistent (we just lost data).\nWhen finally we write stuff to disk, we write the logs first. So no problems there.\n","html":"\u003ch2 id=\"major-challenges\"\u003eMajor Challenges\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003edata loss\u003c/strong\u003e: crashes can happen, and not all data could be saved to disk\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003einconsistency\u003c/strong\u003e: crashes can happen in the middle of operations\n\u003cul\u003e\n\u003cli\u003ecrashes could occur when someone of the blocks that have been written to disk, but not others\u003c/li\u003e\n\u003cli\u003einode and free lists don\u0026rsquo;t agree.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIdeally, filesystem operations should be \u003cstrong\u003eatomic\u003c/strong\u003e. Every operation should happen or not happen at all\u0026mdash;but not halfway.\u003c/p\u003e\n\u003cp\u003eCase study: \u003ca href=\"/posts/kbhunix_v6_filesystem/#block-cache-modification\"\u003eBlock Cache Modification\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"tradeoffs\"\u003eTradeoffs\u003c/h2\u003e\n\u003cp\u003eThe overall design tradeoffs between this:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003edurability - the data needs to be safe (which is slow, and may require manual crash recovery (sans cache, etc.))\u003c/li\u003e\n\u003cli\u003eperformance - it needs to be fast (which may mean less error checking)\u003c/li\u003e\n\u003cli\u003econsistency - the filesystem needs to be uniform (which means that we need to be slower and we may drop data in favor of previous checkpoints that worked)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAlso the disks themselves can still fail.\u003c/p\u003e\n\u003ch2 id=\"fsck\"\u003efsck\u003c/h2\u003e\n\u003cp\u003eDon\u0026rsquo;t make any changes to filesystem at all. At the system boot time, check filesystem for consistency.\u003c/p\u003e\n\u003cp\u003emain limitation:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ecan\u0026rsquo;t restart filesystem until it completes: this process \u003cstrong\u003e\u003cstrong\u003etakes forever\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003erestores consistency, but doesn\u0026rsquo;t prevent loss of info\u003c/li\u003e\n\u003cli\u003erestores consistency, but filesystem may still be unusable (core files moved to lost+found)\u003c/li\u003e\n\u003cli\u003ea block could migrate from a password file to some other random file, hence removing info\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cul\u003e\n\u003cli\u003eCheck whether or not there is a clean shutdown: setting a disk flag on clean shutdown; so if the flag isn\u0026rsquo;t set there isn\u0026rsquo;t a clean shutdown.\u003c/li\u003e\n\u003cli\u003eIf it wasn\u0026rsquo;t a clean shutdown, identify inconsistencies\u003c/li\u003e\n\u003cli\u003eScans meta data (inodes, indirect blocks, free list, directory blocks) and handle any of the following situations\u0026mdash;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"block-in-an-inode-and-in-free-list\"\u003eblock in an inode and in free list\u003c/h3\u003e\n\u003cp\u003eSolution: pull the block off of free list\u003c/p\u003e\n\u003ch3 id=\"block-is-a-part-of-two-inodes\"\u003eblock is a part of two inodes\u003c/h3\u003e\n\u003cp\u003eSolutions:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003egive to newest\u003c/li\u003e\n\u003cli\u003erandomly pick\u003c/li\u003e\n\u003cli\u003emake a copy\u003c/li\u003e\n\u003cli\u003eremove (generally a bad idea, we don\u0026rsquo;t want to destroy data)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"inode-claims-one-dirent-refers-to-it-but-there-are-no-such-dirent\"\u003einode claims one dirent refers to it, but there are no such dirent\u003c/h3\u003e\n\u003cp\u003eput the file under the \u003ccode\u003elost+found\u003c/code\u003e folder\u003c/p\u003e\n\u003ch2 id=\"ordered-writes\"\u003eordered writes\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eExample\u003c/strong\u003e\u003c/strong\u003e: block is in file and also in the free list.\u003c/p\u003e\n\u003cp\u003eThis basically removes the need to wait for fsck on reboot.\u003c/p\u003e\n\u003cp\u003eWe can use a certain \u003cstrong\u003eorder\u003c/strong\u003e of operations to prevent these types of errors from occurring:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eAlways initialize the \u003cstrong\u003eTARGET\u003c/strong\u003e before initializing the \u003cstrong\u003eREFERENCE\u003c/strong\u003e\n\u003cul\u003e\n\u003cli\u003eInitialize inode before initalize directory entry to it\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eNever reuse a resource before \u003cstrong\u003eNULLIFYING\u003c/strong\u003e all existing \u003cstrong\u003e\u003cstrong\u003eREFERENCES\u003c/strong\u003e\u003c/strong\u003e\n\u003cul\u003e\n\u003cli\u003eRemove the inode reference before putting a block on the free list\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eNever clear the \u003cstrong\u003e\u003cstrong\u003eLAST REFERENCE\u003c/strong\u003e\u003c/strong\u003e to a live resource before setting a \u003cstrong\u003e\u003cstrong\u003eNEW REFERENCE\u003c/strong\u003e\u003c/strong\u003e (\u0026ldquo;its better to have 2 copies instead of none\u0026rdquo;)\n\u003cul\u003e\n\u003cli\u003eMake the new directory entry before get rid of the old one\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003cp\u003eLimitations:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eperformance\u003c/strong\u003e: we need to do operations synchronously\n\u003cul\u003e\n\u003cli\u003eif we really want to do caching async, we can track dependencies\u003c/li\u003e\n\u003cli\u003ecircular dependencies are possible\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eleak\u003c/strong\u003e\u003c/strong\u003e: it could leak resources (reference nullification happens but resource not added)\n\u003cul\u003e\n\u003cli\u003eWe can run fsck in the background\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"journaling\"\u003ejournaling\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#journaling\"\u003ejournaling\u003c/a\u003e keeps a paper trail of disk appertains in the event of a crash. We have an append-only log on disk that stores disk operations.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ebefore performing an operation, record its info in the log\u003c/li\u003e\n\u003cli\u003eand write that to disk\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe log will always record what\u0026rsquo;s happening ahead. The actual block updates can eventually be carried out in any order.\u003c/p\u003e\n\u003ch3 id=\"what-do-we-log\"\u003ewhat do we log?\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ewe only log \u003cstrong\u003emetadata\u003c/strong\u003e changes (inodes, moving stuff around, etc.)\u003c/li\u003e\n\u003cli\u003epayload operations are not saved\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"structure\"\u003estructure\u003c/h3\u003e\n\u003cp\u003eWe typically have a LSN: log serial number, operations, and metadata.\u003c/p\u003e\n\u003cp\u003e#+begin_src toml\n[offset 335050]\nLSN 18384030\noperation = \u0026ldquo;LogBlockAlloc\u0026rdquo;\nblockno = 1027\nzero_on_replay = 0\u003c/p\u003e\n\u003cp\u003e[offset 23232]\nLSN N\noperation = \u0026ldquo;LogPatch\u0026rdquo;\nblockno = 8\noffset = 137\nbytes = 0.04\ninode = 52\n#+end_arc\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;zero-on-replay\u0026rdquo;:\u003c/p\u003e\n\u003cblockquote\u003e\n\u003cp\u003eSpecifies that block number blockno, which was previously free, should now be marked as allocated (0 or false in the freemap). If zero_on_replay is non-zero, it means that the block is being used for metadata\u0026mdash;i.e., as an indirect (or double-indirect) block, or for a directory.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003ch3 id=\"limitations\"\u003elimitations\u003c/h3\u003e\n\u003ch4 id=\"checkpoints\"\u003echeckpoints\u003c/h4\u003e\n\u003cp\u003eIts an add-only paper trial. We can truncate the log occasionally at a \u0026ldquo;checkpoint\u0026rdquo;, and truncate the log which is no longer needed.\u003c/p\u003e\n\u003ch4 id=\"multiple-log-entries\"\u003emultiple log entries\u003c/h4\u003e\n\u003cp\u003eAn atomic operations may have multiple log entries corresponding to it (because they have many steps). We need to make sure that the entire operation is replayed or none at all.\u003c/p\u003e\n\u003cp\u003eSo, we introduce \u003cstrong\u003etransactions\u003c/strong\u003e: each atomic operation will be wrapped into a unit transaction.\u003c/p\u003e\n\u003ch4 id=\"where-do-we-start-replaying\"\u003ewhere do we start replaying\u003c/h4\u003e\n\u003cp\u003eYou don\u0026rsquo;t know where \u003cstrong\u003eexactly\u003c/strong\u003e you crashed.\u003c/p\u003e\n\u003cp\u003eSo, log entries should be \u003cstrong\u003eidempotent\u003c/strong\u003e: doing something multiple times should have the same effect of doing them once. To make this happen, we need to cache all the data that\u0026rsquo;s needed to write to the log in the log itself. It cannot have external dependencies.\u003c/p\u003e\n\u003cp\u003eSo we just replay the entire log. To save time every so often you trim the logs via \u003ca href=\"#checkpoints\"\u003echeckpoints\u003c/a\u003e\u003c/p\u003e\n\u003ch4 id=\"log-entries-may-take-time\"\u003elog entries may take time\u003c/h4\u003e\n\u003cp\u003eWe can also make log entry writes in the block cache too. This doesn\u0026rsquo;t matter too much: if both the log and the actual data is wiped from the cache, the filesystem is \u003cstrong\u003estill consistent\u003c/strong\u003e (we just lost data).\u003c/p\u003e\n\u003cp\u003eWhen finally we write stuff to disk, we write the logs first. So no problems there.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcrash_recovery/","tags":null,"title":"crash recovery"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcredit/","tags":null,"title":"credit"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcredit_suisse/","tags":null,"title":"Credit Suisse"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcritical_value/","tags":null,"title":"critical value"},{"categories":null,"contents":"criticized the New Deal from all sides. Senator Huy P. Long claimed to \u0026ldquo;show our wealth.\u0026rdquo; nullification from conservative supreme court, FDR threatened to restructure + hurts his coalition.\nFDR ordered cuts in spending 1938 midterms: Republicans can block programs \u0026mdash; gained control of congress + created ability to gain control ","html":"\u003cp\u003ecriticized the \u003ca href=\"/posts/kbhnew_deal/\"\u003eNew Deal\u003c/a\u003e from all sides. Senator Huy P. Long claimed to \u0026ldquo;show our wealth.\u0026rdquo; nullification from conservative supreme court, FDR threatened to restructure + hurts his coalition.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eFDR ordered cuts in spending\u003c/li\u003e\n\u003cli\u003e1938 midterms: Republicans can block programs \u0026mdash; gained control of congress + created ability to gain control\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcriticism_of_the_new_deal/","tags":null,"title":"criticism of the New Deal (See file KBhnew_deal.org)"},{"categories":null,"contents":"Cross Entropy Method is a \u0026ldquo;conditional MLE\u0026rdquo; objective; whereby we try to maximize:\nthe log prob of the true y labels in the training data given the observations Derivation Recall the Bernoulli distribution, and specifically:\n\\begin{equation} P(Y=y) = p^{y} (1-p)^{1-y} \\end{equation}\nMeaning, we want to maximize:\n\\begin{equation} \\log P(y=y) = y \\log p + (1-y)\\log (1-y) \\end{equation}\nspecifically, we\u0026rsquo;d like to minimize:\n\\begin{equation} -[y \\log p + (1-y)\\log (1-y)] \\end{equation}\nIntuition This function should be\nsmaller when the model estimate is close to correct bigger if the model is confused or wrong ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcross_entropy_method/\"\u003eCross Entropy Method\u003c/a\u003e is a \u0026ldquo;conditional \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e\u0026rdquo; objective; whereby we try to maximize:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ethe log prob\u003c/li\u003e\n\u003cli\u003eof the true y labels in the training data\u003c/li\u003e\n\u003cli\u003egiven the observations\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"derivation\"\u003eDerivation\u003c/h2\u003e\n\u003cp\u003eRecall the \u003ca href=\"/posts/kbhbernoulli_random_variable/\"\u003eBernoulli distribution\u003c/a\u003e, and specifically:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(Y=y) = p^{y} (1-p)^{1-y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning, we want to maximize:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\log P(y=y) = y \\log p + (1-y)\\log (1-y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003especifically, we\u0026rsquo;d like to \u003cstrong\u003eminimize\u003c/strong\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n-[y \\log p + (1-y)\\log (1-y)]\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"intuition\"\u003eIntuition\u003c/h2\u003e\n\u003cp\u003eThis function should be\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003esmaller when the model estimate is close to correct\u003c/li\u003e\n\u003cli\u003ebigger if the model is confused or wrong\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcross_entropy_loss/","tags":null,"title":"cross entropy loss"},{"categories":null,"contents":"This method introduces a search distribution instead of discrete points:\n\\begin{equation} p(\\theta | \\psi) \\end{equation}\nWe want to know how parameters \\(\\theta\\) are distributed, given some input parameters \\(\\psi\\) (for instance, we assume parameters are gaussian distributed such as the mean/variance).\nGiven this distribution, we sample \\(m\\) samples of \\(\\theta\\) from the distribution. Those are our starting candidate points. We then check its policy for its utility via the Roll-out utility We want to take top \\(k\\) of our best performers, called \u0026ldquo;elite samples\u0026rdquo; \\(m_{elite}\\) Use the set of \\(m_{elite}\\) points, we fit a new distribution parameter \\(\\psi\\) that describes those sample This allows us to bound how many Roll-out utilities we are doing.\nFor each dimension, we should have 10x elite sample points (1d should have 10 samples, 2d should have 20, etc.)\n","html":"\u003cp\u003eThis method introduces a search distribution instead of discrete points:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(\\theta | \\psi)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe want to know how parameters \\(\\theta\\) are distributed, given some input parameters \\(\\psi\\) (for instance, we assume parameters are \u003ca href=\"/posts/kbhgaussian_distribution/\"\u003egaussian\u003c/a\u003e distributed such as the mean/variance).\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eGiven this distribution, we sample \\(m\\) samples of \\(\\theta\\) from the distribution. Those are our starting candidate points.\u003c/li\u003e\n\u003cli\u003eWe then check its policy for its \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e via the \u003ca href=\"/posts/kbhpolicy_evaluation/#roll-out-utility\"\u003eRoll-out utility\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eWe want to take top \\(k\\) of our best performers, called \u0026ldquo;elite samples\u0026rdquo; \\(m_{elite}\\)\u003c/li\u003e\n\u003cli\u003eUse the set of \\(m_{elite}\\) points, we fit a new distribution parameter \\(\\psi\\) that describes those sample\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis allows us to bound how many \u003ca href=\"/posts/kbhpolicy_evaluation/#roll-out-utility\"\u003eRoll-out utilitie\u003c/a\u003es we are doing.\u003c/p\u003e\n\u003cp\u003eFor each dimension, we should have 10x elite sample points (1d should have 10 samples, 2d should have 20, etc.)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcross_entropy_method/","tags":null,"title":"Cross Entropy Method"},{"categories":null,"contents":"constituents additional information lack of inverse of cross product The cross product doesn\u0026rsquo;t have an inverse\ngeometric interpretation of cross product \\begin{equation} a \\times b = |\\vec{a}| |\\vec{b}| \\sin \\theta n \\end{equation}\nwhere, \\(n\\) is the unit vector in some direction.\nThe length of the resulting vector in the cross product is the area of the parallelogram formed by the two vectors.\n","html":"\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"lack-of-inverse-of-cross-product\"\u003elack of inverse of cross product\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhcross_product/\"\u003ecross product\u003c/a\u003e doesn\u0026rsquo;t have an inverse\u003c/p\u003e\n\u003ch3 id=\"geometric-interpretation-of-cross-product--kbhcross-product-dot-md\"\u003egeometric interpretation of \u003ca href=\"/posts/kbhcross_product/\"\u003ecross product\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\na \\times b = |\\vec{a}| |\\vec{b}| \\sin \\theta n\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(n\\) is the unit vector in some direction.\u003c/p\u003e\n\u003cp\u003eThe length of the resulting vector in the \u003ca href=\"/posts/kbhcross_product/\"\u003ecross product\u003c/a\u003e is the area of the parallelogram formed by the two vectors.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcross_product/","tags":null,"title":"cross product"},{"categories":null,"contents":"CrossFinder is a darkpool owned by Credit Suisse.\nFeatures:\nNormal darkpooling Routing the transaction out to other exchanges and dark-pools if needed Measuring the latency of each other exchange, etc. ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcrossfinder/\"\u003eCrossFinder\u003c/a\u003e is a \u003ca href=\"/posts/kbhdarkpool/\"\u003edarkpool\u003c/a\u003e owned by \u003ca href=\"/posts/kbhcredit_suisse/\"\u003eCredit Suisse\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eFeatures:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eNormal \u003ca href=\"/posts/kbhdarkpool/\"\u003edarkpool\u003c/a\u003eing\u003c/li\u003e\n\u003cli\u003eRouting the transaction out to other exchanges and dark-pools if needed\u003c/li\u003e\n\u003cli\u003eMeasuring the latency of each other exchange, etc.\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcrossfinder/","tags":null,"title":"CrossFinder"},{"categories":null,"contents":"cyro-EM is a structure determination system of a solution (Dutta, M. 2018. J indian inst sci 98) to analyze a structural population of particles from TEM; the resulting 3-D structures obtained can be analyzed and classified.\n\u0026ldquo;The Resolution Revolution\u0026rdquo;: much better structures to analyze because of high-fidelity cyro-EM\ncyro-EM vs x-ray crystallography cyro-EM can identify heterogeneous motions throughout the structure, instead of averaging out multiple structural combinations; instead of the \u0026ldquo;general\u0026rdquo; structure on average, we can get a collection of various states the particle can be in.\nmanifold embedding manifold embedding is set of methods using diffusion maps to analyze the primary dynamics behavior, not sure what exactly are the methods\nhttps://cryosparc.com/\nensemble reweighting http://arxiv.org/abs/2212.05320\ntake MD =\u0026gt; create \u0026ldquo;fake\u0026rdquo; cyro-EM images [something happens I didn\u0026rsquo;t catch for cyro-em] then, project back to MD. misfolded elements would then be removed CHARMM-GUI\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcyro_em/\"\u003ecyro-EM\u003c/a\u003e is a structure determination system of a \u003cstrong\u003esolution\u003c/strong\u003e (Dutta, M. 2018. J indian inst sci 98) to analyze a structural population of particles from TEM; the resulting 3-D structures obtained can be analyzed and classified.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;The Resolution Revolution\u0026rdquo;: much better structures to analyze because of high-fidelity \u003ca href=\"/posts/kbhcyro_em/\"\u003ecyro-EM\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"cyro-em-vs-x-ray-crystallography\"\u003ecyro-EM vs x-ray crystallography\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcyro_em/\"\u003ecyro-EM\u003c/a\u003e can identify heterogeneous motions throughout the structure, instead of averaging out multiple structural combinations; instead of the \u0026ldquo;general\u0026rdquo; structure on average, we can get a collection of various states the particle can be in.\u003c/p\u003e\n\u003ch2 id=\"manifold-embedding\"\u003emanifold embedding\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#manifold-embedding\"\u003emanifold embedding\u003c/a\u003e is set of methods using \u003ca href=\"/posts/kbhdiffusion_map/\"\u003ediffusion map\u003c/a\u003es to analyze the primary dynamics behavior, not sure what exactly are the methods\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://cryosparc.com/\"\u003ehttps://cryosparc.com/\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"ensemble-reweighting\"\u003eensemble reweighting\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"http://arxiv.org/abs/2212.05320\"\u003ehttp://arxiv.org/abs/2212.05320\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003etake MD =\u0026gt; create \u0026ldquo;fake\u0026rdquo; \u003ca href=\"#manifold-embedding\"\u003ecyro-EM\u003c/a\u003e images\u003c/li\u003e\n\u003cli\u003e[something happens I didn\u0026rsquo;t catch for cyro-em]\u003c/li\u003e\n\u003cli\u003ethen, project back to MD. misfolded elements would then be removed\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eCHARMM-GUI\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcyro_em/","tags":null,"title":"cryo-electron microscopy"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcrystels/","tags":null,"title":"Crystals"},{"categories":null,"contents":"What random variable should I use? Unit 1 core probability, axiom of probability, counting, combination, permutation, DeMorgan\u0026rsquo;s Law.\nSU-CS109 SEP272023 SU-CS109 SEP292023 SU-CS109 OCT022023 SU-CS109 OCT042023 SU-CS109 OCT062023 SU-CS109 OCT112023 Unit 2 random variables\nSU-CS109 OCT092023 SU-CS109 OCT132023 SU-CS109 OCT162023 SU-CS109 OCT182023 Unit 3 inference, General Inference\nSU-CS109 OCT202023 SU-CS109 OCT232023 SU-CS109 OCT252023 SU-CS109 OCT272023 Unit 4 Beta Distribution, central limit theorem, bootstrapping\nSU-CS109 NOV012023 SU-CS109 NOV032023 SU-CS109 NOV102023 Unit 5 Apredezahe de machinas: Naive Bayes, logistic regression, deep learning\nSU-CS109 NOV132023 SU-CS109 NOV292023 SU-CS109 DEC012023 SU-CS109 DEC042023 Notes SU-CS109 Midterm SU-CS109 Midterm Sheet ","html":"\u003ch2 id=\"what-random-variable-should-i-use\"\u003eWhat random variable should I use?\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-16_15-40-15_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"unit-1\"\u003eUnit 1\u003c/h2\u003e\n\u003cp\u003ecore \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e, \u003ca href=\"/posts/kbhprobability/#axiom-of-probability\"\u003eaxiom of probability\u003c/a\u003e, \u003ca href=\"/posts/kbhcounting/\"\u003ecounting\u003c/a\u003e, \u003ca href=\"/posts/kbhcombination/\"\u003ecombination\u003c/a\u003e, \u003ca href=\"/posts/kbhpermutation/\"\u003epermutation\u003c/a\u003e, \u003ca href=\"/posts/kbhdemorgan_s_law/\"\u003eDeMorgan\u0026rsquo;s Law\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_sep272023/\"\u003eSU-CS109 SEP272023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_sep292023/\"\u003eSU-CS109 SEP292023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct022023/\"\u003eSU-CS109 OCT022023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct042023/\"\u003eSU-CS109 OCT042023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct062023/\"\u003eSU-CS109 OCT062023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct112023/\"\u003eSU-CS109 OCT112023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"unit-2\"\u003eUnit 2\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variables\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct092023/\"\u003eSU-CS109 OCT092023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_109_oct132023/\"\u003eSU-CS109 OCT132023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct162023/\"\u003eSU-CS109 OCT162023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct182023/\"\u003eSU-CS109 OCT182023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"unit-3\"\u003eUnit 3\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e, \u003ca href=\"/posts/kbhgeneral_inference/\"\u003eGeneral Inference\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct202023/\"\u003eSU-CS109 OCT202023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct232023/\"\u003eSU-CS109 OCT232023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct252023/\"\u003eSU-CS109 OCT252023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_oct272023/\"\u003eSU-CS109 OCT272023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"unit-4\"\u003eUnit 4\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e, \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e, \u003ca href=\"/posts/kbhboostrap/\"\u003ebootstrap\u003c/a\u003eping\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_nov012023/\"\u003eSU-CS109 NOV012023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_nov032023/\"\u003eSU-CS109 NOV032023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_nov102023/\"\u003eSU-CS109 NOV102023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"unit-5\"\u003eUnit 5\u003c/h2\u003e\n\u003cp\u003eApredezahe de machinas: \u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e, \u003ca href=\"/posts/kbhlogistic_regression/\"\u003elogistic regression\u003c/a\u003e, \u003ca href=\"/posts/kbhdeep_learning/\"\u003edeep learning\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_nov132023/\"\u003eSU-CS109 NOV132023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_nov292023/\"\u003eSU-CS109 NOV292023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_dec012023/\"\u003eSU-CS109 DEC012023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_dec042023/\"\u003eSU-CS109 DEC042023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_midterm/\"\u003eSU-CS109 Midterm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs109_midterm_sheet/\"\u003eSU-CS109 Midterm Sheet\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcs_probability_index/","tags":["index"],"title":"CS Probability Index"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcs124/","tags":null,"title":"cs124"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcultural_revolution/","tags":null,"title":"Cultural Revolution"},{"categories":null,"contents":"current is defined as the flow of positive charge. Specifically:\n\\begin{equation} I = \\frac{\\Delta Q}{\\Delta t} \\end{equation}\nresistance of a wire (if ever you come across needing to calculate the resistance of the wire from scratch)\n\\begin{equation} R = \\rho \\frac{L}{A} \\end{equation}\nwhere, \\(\\rho\\) is the material resistivity, \\(L\\) the length, and \\(A\\) the cross-sectional area.\nyou rarely need to do this!\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcurrent/\"\u003ecurrent\u003c/a\u003e is defined as the flow of positive \u003ca href=\"/posts/kbhcharged/\"\u003echarge\u003c/a\u003e. Specifically:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI = \\frac{\\Delta Q}{\\Delta t}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"resistance-of-a-wire\"\u003eresistance of a wire\u003c/h2\u003e\n\u003cp\u003e(if ever you come across needing to calculate the resistance of the wire from scratch)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR = \\rho \\frac{L}{A}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\rho\\) is the material resistivity, \\(L\\) the length, and \\(A\\) the cross-sectional area.\u003c/p\u003e\n\u003cp\u003eyou rarely need to do this!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcurrent/","tags":null,"title":"current"},{"categories":null,"contents":"The curse of dimensionality is the result of correlatives of the fact that:\nat higher dimensions, most random data points become equidistant from each other \u0026mdash; you can prove this to yourself pythagoras and some math of expectation\nrandom vectors are almost orthogonal unit sphere takes almost no volume in unit square (?) ","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhcurse_of_dimensionality/\"\u003ecurse of dimensionality\u003c/a\u003e is the result of correlatives of the fact that:\u003c/p\u003e\n\u003cp\u003eat higher dimensions, \u003cstrong\u003emost random data points become equidistant from each other\u003c/strong\u003e \u0026mdash; you can prove this to yourself \u003ca href=\"/posts/kbhcornucopia_of_analysis/#pythagorean-theorem\"\u003epythagoras\u003c/a\u003e and some math of \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-07-31_15-16-20_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003col\u003e\n\u003cli\u003erandom vectors are almost orthogonal\u003c/li\u003e\n\u003cli\u003eunit sphere takes almost no volume in unit square (?)\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcurse_of_dimensionality/","tags":null,"title":"curse of dimensionality"},{"categories":null,"contents":"User Stories but harder and more rigerous: https://www.visual-paradigm.com/guide/customer-experience/what-is-customer-journey-mapping/\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhuser_interviews/#user-story\"\u003eUser Stories\u003c/a\u003e but harder and more rigerous: \u003ca href=\"https://www.visual-paradigm.com/guide/customer-experience/what-is-customer-journey-mapping/\"\u003ehttps://www.visual-paradigm.com/guide/customer-experience/what-is-customer-journey-mapping/\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcustomer_journey_map/","tags":null,"title":"Customer Journey Map"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcynthia_lee/","tags":null,"title":"Cynthia Lee"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcyrodrgn/","tags":null,"title":"cyroDRGN"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhd_see/","tags":null,"title":"d-see"},{"categories":null,"contents":"we can also damp the heat equation:\n\\begin{equation} \\pdv{u}{t} + ku = \\pdv[2]{u}{x} \\end{equation}\nwe note that substituting \\(u(t,x) = e^{-kt}w(t,x)\\) into the expression, we yield:\n\\begin{equation} \\pdv{w}{t} = \\pdv[2]{w}{t} \\end{equation}\ntherefore, we simply have to solve the system normally on \\(w\\), then multiply the solution by \\(e^{-kt}\\) to obtain our solution for the damped equation.\n","html":"\u003cp\u003ewe can also damp the heat equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t} + ku = \\pdv[2]{u}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe note that substituting \\(u(t,x) = e^{-kt}w(t,x)\\) into the expression, we yield:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{w}{t} = \\pdv[2]{w}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003etherefore, we simply have to solve the system normally on \\(w\\), then multiply the solution by \\(e^{-kt}\\) to obtain our solution for the damped equation.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdamped_heat_equation/","tags":null,"title":"damped heat equation"},{"categories":null,"contents":"darkpools are non-exchange, non-published exchange which doesn\u0026rsquo;t have the same reporting obligations of a stock market. The only thing they have to report is the actual filled transactions after 90 seconds.\ndarkpools are used because the order book/bid-ask spread is not leaked, which means large transactions will not be able to influence the market as much.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdarkpool/\"\u003edarkpool\u003c/a\u003es are non-exchange, non-published exchange which doesn\u0026rsquo;t have the same reporting obligations of a \u003ca href=\"/posts/kbhfinancial_markets_intro/\"\u003estock market\u003c/a\u003e. The only thing they have to report is the actual filled transactions after 90 seconds.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdarkpool/\"\u003edarkpool\u003c/a\u003es are used because the order book/bid-ask spread is not leaked, which means large transactions will not be able to influence the market as much.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdarkpool/","tags":null,"title":"darkpool"},{"categories":null,"contents":"For data inference tasks, categorical data\n","html":"\u003cp\u003eFor \u003ca href=\"/posts/kbhdata_inference/\"\u003edata inference\u003c/a\u003e tasks, categorical data\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdata_inference/","tags":null,"title":"data inference"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdcgan/","tags":null,"title":"DCGAN"},{"categories":null,"contents":"Speed up biosensor development? Preso felt like an ad.\nLucCage\nAnd then we throw ML at designing LucCage binding structures. Using LucCage florescence as a reporter tool for detection of the molecule\nwe found LucCage used the platform + ML to engineer binding sites to things we want to bio on we tacked a light on it florescence now you have a lightbulb as an assay ","html":"\u003cp\u003eSpeed up biosensor development? Preso felt like an ad.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhluccage/\"\u003eLucCage\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eAnd then we throw ML at designing \u003ca href=\"/posts/kbhluccage/\"\u003eLucCage\u003c/a\u003e binding structures. Using \u003ca href=\"/posts/kbhluccage/\"\u003eLucCage\u003c/a\u003e florescence as a reporter tool for detection of the molecule\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewe found \u003ca href=\"/posts/kbhluccage/\"\u003eLucCage\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eused the platform + ML to engineer binding sites to things we want to bio on\u003c/li\u003e\n\u003cli\u003ewe tacked a light on it\u003c/li\u003e\n\u003cli\u003eflorescence\u003c/li\u003e\n\u003cli\u003enow you have a lightbulb as an assay\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhde_novo_biosensors/","tags":null,"title":"De novo biosensors"},{"categories":null,"contents":"hype hype hype\nprotein binding with Rosetta RoseTTAFold2 RFDiffusion ","html":"\u003cp\u003ehype hype hype\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrosetta/#protein-binding-with-id-dc7d9d61-130a-435d-bd45-9757aed9555a-rosetta\"\u003eprotein binding with Rosetta\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrosettafold2/\"\u003eRoseTTAFold2\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrfdiffusion/\"\u003eRFDiffusion\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhde_novo_protein_design/","tags":null,"title":"De Novo Protein Design"},{"categories":null,"contents":"deadlock is when mutexes lock in a circular order:\nthread 1:\nm1.lock(); m2.lock(); thread 2:\nm2.lock(); m3.lock(); We prevent this by locking things in the same order. Which maybe hard: because loops.\nWe need, also, to limit the number of threads competing for a shared resource: imagine all of your threads doing a thing, will it deadlock? If so, limit.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdeadlock/\"\u003edeadlock\u003c/a\u003e is when \u003ca href=\"/posts/kbhmultithreading/#mutex\"\u003emutex\u003c/a\u003ees lock in a circular order:\u003c/p\u003e\n\u003cp\u003ethread 1:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ethread 2:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe prevent this by locking things in the same order. Which maybe hard: because loops.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eWe need, also, to \u003cstrong\u003elimit the number of threads competing for a shared resource\u003c/strong\u003e: imagine all of your threads doing a thing, will it deadlock? If so, limit.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdeadlock/","tags":null,"title":"deadlock"},{"categories":null,"contents":"Key components Task/Objective (\u0026ldquo;Automated Driving to reach destination [here]\u0026rdquo;) Resources (state) (\u0026ldquo;sensors, fuel, etc.\u0026rdquo;) Uncertainties (\u0026ldquo;What in the world is happening\u0026rdquo;) Actions (\u0026ldquo;turn left\u0026rdquo;) In one line: an agent makes decisions via the balance of observation with uncertainty. This is called the observe-act cycle.\nSee also connectionism\nApplications Stock shelving Automated driving Space missions Sports Congestion modeling Online dating Traffic light control decision making methods explicit programming: \u0026ldquo;just code it up\u0026rdquo; \u0026mdash; try this first if you are building something, which should establish a baseline: guess all possible states, and hard code strategies for all of them supervised learning: manually solve representative states, hard code strategies for them, make model interpolate between them optimization: create optimization objective connected to a model of the environment, optimize that objective planning: using model of the environment directly to predict best moves reinforcement learning: make agent interact with environment directly, and optimize its score of success in the environment without a model Method Model Visible? Strategy Hard-Coded? explicit programming yes, all states fully known yes supervised learning no, only a sample of it yes, only a sample of it optimization no, except reward no planning yes no reinforcement learning history see decision making history\n","html":"\u003ch2 id=\"key-components\"\u003eKey components\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eTask/Objective (\u0026ldquo;Automated Driving to reach destination [here]\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003eResources (state) (\u0026ldquo;sensors, fuel, etc.\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003eUncertainties (\u0026ldquo;What in the world is happening\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003eActions (\u0026ldquo;turn left\u0026rdquo;)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIn one line: an \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e makes decisions via the balance of \u003cstrong\u003eobservation\u003c/strong\u003e with \u003cstrong\u003e\u003ca href=\"/posts/kbhuncertainty/\"\u003euncertainty\u003c/a\u003e\u003c/strong\u003e. This is called the \u003ca href=\"/posts/kbhobserve_act_cycle/\"\u003eobserve-act cycle\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhconnectionism/\"\u003econnectionism\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"applications\"\u003eApplications\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eStock shelving\u003c/li\u003e\n\u003cli\u003eAutomated driving\u003c/li\u003e\n\u003cli\u003eSpace missions\u003c/li\u003e\n\u003cli\u003eSports\u003c/li\u003e\n\u003cli\u003eCongestion modeling\u003c/li\u003e\n\u003cli\u003eOnline dating\u003c/li\u003e\n\u003cli\u003eTraffic light control\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"decision-making--kbhdecision-making-dot-md--methods\"\u003e\u003ca href=\"/posts/kbhdecision_making/\"\u003edecision making\u003c/a\u003e methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexplicit_programming/\"\u003eexplicit programming\u003c/a\u003e: \u0026ldquo;just code it up\u0026rdquo; \u0026mdash; try this first if you are building something, which should establish a \u003cstrong\u003ebaseline\u003c/strong\u003e: guess all possible states, and hard code strategies for all of them\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsupervised_learning/\"\u003esupervised learning\u003c/a\u003e: manually solve representative states, hard code strategies for them, make model interpolate between them\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoptimization/\"\u003eoptimization\u003c/a\u003e: create optimization objective connected to a model of the environment, optimize that objective\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhplanning/\"\u003eplanning\u003c/a\u003e: using model of the environment directly to predict best moves\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhreinforcement_learning/\"\u003ereinforcement learning\u003c/a\u003e: make agent interact with environment directly, and optimize its score of success in the environment without a model\u003c/li\u003e\n\u003c/ul\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eMethod\u003c/th\u003e\n\u003cth\u003eModel Visible?\u003c/th\u003e\n\u003cth\u003eStrategy Hard-Coded?\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhexplicit_programming/\"\u003eexplicit programming\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003eyes, all states fully known\u003c/td\u003e\n\u003ctd\u003eyes\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhsupervised_learning/\"\u003esupervised learning\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003eno, only a sample of it\u003c/td\u003e\n\u003ctd\u003eyes, only a sample of it\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhoptimization/\"\u003eoptimization\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003eno, except reward\u003c/td\u003e\n\u003ctd\u003eno\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhplanning/\"\u003eplanning\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003eyes\u003c/td\u003e\n\u003ctd\u003eno\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhreinforcement_learning/\"\u003ereinforcement learning\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"history\"\u003ehistory\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhdecision_making_history/\"\u003edecision making history\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdecision_making/","tags":null,"title":"decision making"},{"categories":null,"contents":"Lecture notes taking during CS238, decision making. Stanford Intelligence Systems Laboratory (SISL: planning and validation of intelligent systems).\nBig Ideas Themes There\u0026rsquo;s a principled mathematical framework for defining rational behavior There are computational techniques that could lead to better, and perhaps counter-intuitive decisions Successful application depends on your choice of representation and approximation you typically can\u0026rsquo;t solve mathematical models exactly so, we have to rely on good models of approximations The same computational approaches can be applied to different application domains the same set of abstractions can be carried through life send Mykel a note about how these topics about where this stuff is applied These algorithms drive high quality decisions on a tight timeline. You can\u0026rsquo;t fuck up: people die.\nContents Fundamental understanding of mathematical models and solution methods\u0026mdash;ungraded book exercises Three quizzes: one question per chapter chapters 2, 3, 5 Implement and extend key algorithms for learning and decision making Identify an application of the theory of this course and formulate it mathematically (proposal) what are the i/o what are the sensors measurements what are the decisions to be made [one other thing] Course Outline 1-shot: Probabilistic Reasoning models of distributions over many variables using distributions to make inferences utility theory n-shot: Sequential Problems we now 1-shot decision networks into making a series of decisions assume: model of environment is known (no Model Uncertainty), and environment is fully observable (no State Uncertainty) this introduces a Markov Decision Process (MDP) approximation solutions for observing the environment both online and offline Model Uncertainty deal with situations where we don\u0026rsquo;t know what the best action is at any given step i.e.: future rewards, etc. introduce reinforcement learning and its challenges Rewards may be received long after important decisions Agents must generalized from limited exploration experience State Uncertainty deal with situations where we don\u0026rsquo;t know what is actually happening: we only have a probabilistic state introduce Partially Observable Markov Decision Process keep a distribution of believes update the distribution of believes make decisions based the distribution Multiagent Systems challenges of Interaction Uncertainty building up interaction complexity simple games: many agents, each with individual rewards, acting to make a single joint action markov games: many agents, many states, multiple outcomes in a stochastic environment; Interaction Uncertainty arises out of unknowns about what other agents will do partially observable markov game: markov games with State Uncertainty decentralized partially observable markov game: POMGs with shared rewards between agents instead of individual rewards Lectures probabilistic reasoning relating to single decisions Baysian Networks, and how to deal with them.\nSU-CS238 SEP262023 SU-CS238 SEP272023 SU-CS238 OCT032023 SU-CS238 OCT052023 SU-CS238 OCT102023 SU-CS238 OCT122023 a chain of reasoning with feedback Markov Decision Process uses policies that are evaluated with policy evaluation via utility, Bellman Equation, value function, etc.\nIf we know the state space fully, we can use policy iteration and value iteration to determine an objectively optimal policy. If we don\u0026rsquo;t (or if the state space is too large), we can try to discretize our state space and appropriate through Approximate Value Functions, or use online planning approaches to compute good policy as we go.\nIf none of those things are feasible (i.e. your state space is too big or complex to be discretized (i.e. sampling will cause you to loose the structure of the problem)), you can do some lovely Policy Optimization which will keep you in continuous space while iterating on the policy directly. Some nerds lmao like Policy Gradient methods if your policy is differentiable.\nNow, Policy Optimization methods all require sampling a certain set of trajectories and optimizing over them in order to work. How do we know how much sampling to do before we start optimizing? That\u0026rsquo;s an Exploration and Exploitation question. We can try really hard to collect trajectories, but then we\u0026rsquo;d loose out on collecting intermediate reward.\nSU-CS238 OCT172023 SU-CS238 OCT192023 SU-CS238 OCT242023 SU-CS238 OCT262023 SU-CS238 OCT312023 SU-CS238 NOV022023 POMDP bomp bomp bomp SU-CS238 NOV092023 SU-CS238 NOV142023 SU-CS238 NOV162023 SU-CS238 NOV282023 SU-CS238 NOV302023 Failures? Change the action space Change the reward function Change the transition function Improve the solver Don\u0026rsquo;t worry about it Don\u0026rsquo;t deploy the system Words of Wisdom from Mykel \u0026ldquo;The belief update is central to learning. The point of education is to change your beliefs; look for opportunities to change your belief.\u0026rdquo;\n\u0026ldquo;What\u0026rsquo;s in the action space, how do we maximize it?\u0026rdquo;\nFrom MDPs, \u0026ldquo;we can learn from the past, but the past doesn\u0026rsquo;t influence you.\u0026rdquo;\n\u0026ldquo;Optimism under uncertainty\u0026rdquo;: Exploration and Exploitation \u0026ldquo;you should try things\u0026rdquo;\nWorksheets SU-CS238 Q0Q3 ","html":"\u003cp\u003eLecture notes taking during CS238, decision making. Stanford Intelligence Systems Laboratory (SISL: planning and validation of intelligent systems).\u003c/p\u003e\n\u003ch2 id=\"big-ideas\"\u003eBig Ideas\u003c/h2\u003e\n\u003ch3 id=\"themes\"\u003eThemes\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eThere\u0026rsquo;s a principled mathematical framework for defining rational behavior\u003c/li\u003e\n\u003cli\u003eThere are computational techniques that could lead to better, and perhaps counter-intuitive decisions\u003c/li\u003e\n\u003cli\u003eSuccessful application depends on your choice of representation and approximation\n\u003cul\u003e\n\u003cli\u003eyou typically can\u0026rsquo;t solve mathematical models \u003cstrong\u003eexactly\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eso, we have to rely on good models of approximations\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eThe same computational approaches can be applied to different application domains\n\u003cul\u003e\n\u003cli\u003ethe same set of abstractions can be carried through life\u003c/li\u003e\n\u003cli\u003esend Mykel a note about how these topics about where this stuff is applied\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThese algorithms drive \u003cstrong\u003ehigh quality\u003c/strong\u003e decisions on a \u003cstrong\u003etight timeline\u003c/strong\u003e. You can\u0026rsquo;t fuck up: people die.\u003c/p\u003e\n\u003ch3 id=\"contents\"\u003eContents\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eFundamental understanding of mathematical models and solution methods\u0026mdash;ungraded book exercises\n\u003cul\u003e\n\u003cli\u003eThree quizzes: one question per chapter\n\u003col\u003e\n\u003cli\u003echapters 2, 3, 5\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eImplement and extend key algorithms for learning and decision making\u003c/li\u003e\n\u003cli\u003eIdentify an application of the theory of this course and formulate it mathematically (proposal)\n\u003cul\u003e\n\u003cli\u003ewhat are the i/o\u003c/li\u003e\n\u003cli\u003ewhat are the sensors measurements\u003c/li\u003e\n\u003cli\u003ewhat are the decisions to be made\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e[one other thing]\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"course-outline\"\u003eCourse Outline\u003c/h2\u003e\n\u003ch3 id=\"1-shot-probabilistic-reasoning\"\u003e1-shot: Probabilistic Reasoning\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003emodels of distributions over many variables\u003c/li\u003e\n\u003cli\u003eusing distributions to make inferences\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_theory/\"\u003eutility theory\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"n-shot-sequential-problems\"\u003en-shot: Sequential Problems\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ewe now 1-shot \u003ca href=\"/posts/kbhdecision_networks/\"\u003edecision network\u003c/a\u003es into making a series of decisions\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eassume\u003c/strong\u003e: model of environment is known (no \u003ca href=\"\"\u003eModel Uncertainty\u003c/a\u003e), and environment is fully observable (no \u003ca href=\"\"\u003eState Uncertainty\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003ethis introduces a \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMarkov Decision Process\u003c/a\u003e (MDP)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eapproximation solutions for observing the environment both online and offline\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"model-uncertainty\"\u003eModel Uncertainty\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003edeal with situations where we don\u0026rsquo;t know what the best action is at any given step\u003c/li\u003e\n\u003cli\u003ei.e.: future rewards, etc.\u003c/li\u003e\n\u003cli\u003eintroduce \u003ca href=\"/posts/kbhreinforcement_learning/\"\u003ereinforcement learning\u003c/a\u003e and its challenges\n\u003col\u003e\n\u003cli\u003eRewards may be received long after important decisions\u003c/li\u003e\n\u003cli\u003eAgents must generalized from limited exploration experience\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"state-uncertainty\"\u003eState Uncertainty\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003edeal with situations where we don\u0026rsquo;t know what is actually happening: we only have a \u003cstrong\u003eprobabilistic\u003c/strong\u003e state\u003c/li\u003e\n\u003cli\u003eintroduce \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePartially Observable Markov Decision Process\u003c/a\u003e\n\u003col\u003e\n\u003cli\u003ekeep a distribution of believes\u003c/li\u003e\n\u003cli\u003eupdate the distribution of believes\u003c/li\u003e\n\u003cli\u003emake decisions based the distribution\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"multiagent-systems\"\u003eMultiagent Systems\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003echallenges of \u003ca href=\"/posts/kbhinteraction_uncertainty/\"\u003eInteraction Uncertainty\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ebuilding up interaction complexity\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsimple_game/\"\u003esimple game\u003c/a\u003es: many \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003es, each with individual rewards, acting to make a single joint action\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmarkov_game/\"\u003emarkov game\u003c/a\u003es: many agents, many states, multiple outcomes in a stochastic environment; \u003ca href=\"/posts/kbhinteraction_uncertainty/\"\u003eInteraction Uncertainty\u003c/a\u003e arises out of unknowns about what other agents will do\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpartially_observable_markov_game/\"\u003epartially observable markov game\u003c/a\u003e: \u003ca href=\"/posts/kbhmarkov_game/\"\u003emarkov game\u003c/a\u003es with \u003ca href=\"\"\u003eState Uncertainty\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003edecentralized \u003ca href=\"/posts/kbhpartially_observable_markov_game/\"\u003epartially observable markov game\u003c/a\u003e: \u003ca href=\"/posts/kbhpartially_observable_markov_game/\"\u003ePOMG\u003c/a\u003es with shared rewards between \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003es instead of individual rewards\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"lectures\"\u003eLectures\u003c/h2\u003e\n\u003ch3 id=\"probabilistic-reasoning-relating-to-single-decisions\"\u003eprobabilistic reasoning relating to single decisions\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003es, and how to deal with them.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_sep262023/\"\u003eSU-CS238 SEP262023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_sep272023/\"\u003eSU-CS238 SEP272023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_oct032023/\"\u003eSU-CS238 OCT032023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_oct052023/\"\u003eSU-CS238 OCT052023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_oct102023/\"\u003eSU-CS238 OCT102023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_oct122023/\"\u003eSU-CS238 OCT122023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"a-chain-of-reasoning-with-feedback\"\u003ea chain of reasoning with feedback\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMarkov Decision Process\u003c/a\u003e uses \u003ca href=\"/posts/kbhpolicy/\"\u003epolicies\u003c/a\u003e that are evaluated with \u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e via \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e, \u003ca href=\"/posts/kbhpolicy_evaluation/#bellman-expectation-equation\"\u003eBellman Equation\u003c/a\u003e, \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e, etc.\u003c/p\u003e\n\u003cp\u003eIf we know the state space fully, we can use \u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e and \u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e to determine an objectively \u003ca href=\"/posts/kbhpolicy/#optimal-policy\"\u003eoptimal policy\u003c/a\u003e. If we don\u0026rsquo;t (or if the state space is too large), we can try to discretize our state space and appropriate through \u003ca href=\"/posts/kbhapproximate_value_function/\"\u003eApproximate Value Function\u003c/a\u003es, or use \u003ca href=\"/posts/kbhonline_planning/\"\u003eonline planning\u003c/a\u003e approaches to compute good policy as we go.\u003c/p\u003e\n\u003cp\u003eIf none of those things are feasible (i.e. your state space is too big or complex to be discretized (i.e. sampling will cause you to loose the structure of the problem)), you can do some lovely \u003ca href=\"/posts/kbhpolicy_optimization/\"\u003ePolicy Optimization\u003c/a\u003e which will keep you in continuous space while iterating on the policy directly. Some nerds lmao like \u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e methods if your policy is differentiable.\u003c/p\u003e\n\u003cp\u003eNow, \u003ca href=\"/posts/kbhpolicy_optimization/\"\u003ePolicy Optimization\u003c/a\u003e methods all require sampling a certain set of \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003etrajectories\u003c/a\u003e and optimizing over them in order to work. How do we know how much sampling to do before we start optimizing? That\u0026rsquo;s an \u003ca href=\"/posts/kbhexploration_and_exploitation/\"\u003eExploration and Exploitation\u003c/a\u003e question. We can try really hard to collect \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003etrajectories\u003c/a\u003e, but then we\u0026rsquo;d loose out on collecting intermediate reward.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_oct172023/\"\u003eSU-CS238 OCT172023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_oct192023/\"\u003eSU-CS238 OCT192023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_oct242023/\"\u003eSU-CS238 OCT242023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_oct262023/\"\u003eSU-CS238 OCT262023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_oct212023/\"\u003eSU-CS238 OCT312023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_nov022023/\"\u003eSU-CS238 NOV022023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"pomdp--kbhpartially-observable-markov-decision-process-dot-md--bomp-bomp-bomp\"\u003e\u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e bomp bomp bomp\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_nov092023/\"\u003eSU-CS238 NOV092023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_nov142023/\"\u003eSU-CS238 NOV142023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_nov162023/\"\u003eSU-CS238 NOV162023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_nov282023/\"\u003eSU-CS238 NOV282023\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_nov302023/\"\u003eSU-CS238 NOV302023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"failures\"\u003eFailures?\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eChange the action space\u003c/li\u003e\n\u003cli\u003eChange the reward function\u003c/li\u003e\n\u003cli\u003eChange the transition function\u003c/li\u003e\n\u003cli\u003eImprove the solver\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t worry about it\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t deploy the system\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"words-of-wisdom-from-mykel\"\u003eWords of Wisdom from Mykel\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;The belief update is central to learning. The point of education is to change your beliefs; look for opportunities to change your belief.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;What\u0026rsquo;s in the action space, how do we maximize it?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eFrom MDPs, \u0026ldquo;we can learn from the past, but the past doesn\u0026rsquo;t influence you.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Optimism under uncertainty\u0026rdquo;: \u003ca href=\"/posts/kbhexploration_and_exploitation/\"\u003eExploration and Exploitation\u003c/a\u003e \u0026ldquo;you should try things\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"worksheets\"\u003eWorksheets\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_cs238_q0q3/\"\u003eSU-CS238 Q0Q3\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdecision_making_index/","tags":["index"],"title":"Decision Making Index"},{"categories":null,"contents":"A decision network is a Baysian Network which is used to make decisions based on optimizing utility.\nTo solve a problem, we iterate through all possible decision parameters to find the one that maximizes utility.\nNodes chance nodes: random variables \u0026mdash; some inputs we can observe, some are latent variables we can\u0026rsquo;t observe \u0026mdash; circles action nodes: what we have control over \u0026mdash; squares utility nodes: output, what the results would be; we typically sum utilities together if you have multiple of them \u0026mdash; diamonds Edges conditional edge - arrows to chance nodes: conditional probability edges informational edge - arrows to action nodes: this information is used to inform choice of action functional edge - arrows to utility nodes: computes how the action affects the world Example For \\(U\\), for instance, you can have a factor that loks ilke:\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhdecision_networks/\"\u003edecision network\u003c/a\u003e is a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e which is used to make decisions based on optimizing \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eTo solve a problem, we iterate through all possible decision parameters to find the one that maximizes utility.\u003c/p\u003e\n\u003ch2 id=\"nodes\"\u003eNodes\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003echance nodes: random variables \u0026mdash; some inputs we can observe, some are latent variables we can\u0026rsquo;t observe \u0026mdash; circles\u003c/li\u003e\n\u003cli\u003eaction nodes: what we have control over \u0026mdash; squares\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e nodes: output, what the results would be; we typically sum utilities together if you have multiple of them \u0026mdash; diamonds\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"edges\"\u003eEdges\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003econditional edge - arrows to chance nodes: conditional \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e edges\u003c/li\u003e\n\u003cli\u003einformational edge - arrows to action nodes: this information is used to inform choice of action\u003c/li\u003e\n\u003cli\u003efunctional edge - arrows to utility nodes: computes how the action affects the world\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"example\"\u003eExample\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-12_12-27-32_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eFor \\(U\\), for instance, you can have a \u003ca href=\"/posts/kbhfactor/\"\u003efactor\u003c/a\u003e that loks ilke:\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdecision_networks/","tags":null,"title":"decision network"},{"categories":null,"contents":"a student approach to learning where learning outcomes are driven by student\u0026rsquo;s own experience to deeply drive educational results independenlty\n","html":"\u003cp\u003ea \u003ca href=\"\"\u003estudent approach to learning\u003c/a\u003e where learning outcomes are driven by student\u0026rsquo;s own experience to deeply drive educational results independenlty\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdeep_approach/","tags":null,"title":"deep approach"},{"categories":null,"contents":"deep learning is MLE performed with neural networks. A neural network is many logistic regression pieces (sic.?) stack on top of each other.\nWe begin motivating this with trying to solve MNIST with logistic regression. What a time to be alive. After each layer of deep learning, we are going to use a layer of \u0026ldquo;hidden variable\u0026rdquo;, made of singular logistic regressions,\nNotation:\n\\(x\\) is the input, \\(h\\) is the hidden layers, and \\(\\hat{y}\\) is the prediction.\nWe call each weight, at each layer, from \\(x_{i}\\) to \\(h_{j}\\), \\(\\theta_{i,j}^{(h)}\\). At every neuron on each layer, we calculate:\n\\begin{equation} h_{j} = \\sigma\\qty[\\sum_{i}^{} x_{i} \\theta_{i,j}^{(h)}] \\end{equation}\n\\begin{equation} \\hat{y} = \\sigma\\qty[\\sum_{i}^{} h_{i}\\theta_{i}^{(y)}] \\end{equation}\nnote! we often\nbackpropegation backpropegation is a special case of \u0026ldquo;backwards differentiation\u0026rdquo; to update a computation grap.h\nToy Consider:\n\\begin{equation} L(a,b,c) = c(a+2b) \\end{equation}\nmeaning, we obtain a graph that looks like:\nin three steps, we have:\n\\(d = 2b\\) \\(e = a+d\\) \\(L = e\\cdot e\\) To perform backpropagation, we compute derivatives from right to left, computing first \\(\\pdv{L}{L}= 1\\), then, moving slowly towards the left to obtain \\(\\pdv{L}{c} = \\pdv{L}{L}\\pdv{L}{c}\\), and then \\(\\pdv{L}{e} = \\pdv{L}{L}\\pdv{L}{c}\\) , and then \\(\\pdv{L}{d} = \\pdv{L}{L}\\pdv{L}{e}\\pdv{e}{d}\\) and so forth.\nMotivation deep learning is useful by having good \\(\\theta\\) we can find useful thetas by MLE we MLE by doing optimization to maximize the likelyhood Example For one data point, let us define our neural network:\n\\begin{equation} h_{j} = \\sigma\\qty[\\sum_{i}^{} x_{i} \\theta_{i,j}^{(h)}] \\end{equation}\n\\begin{equation} \\hat{y} = \\sigma\\qty[\\sum_{i}^{} h_{i}\\theta_{i}^{(y)}] \\end{equation}\nwe can define our network:\n\\begin{equation} L(\\theta) = P(Y=y|X=x) = (\\hat{y})^{y} (1-\\hat{y})^{1-y} \\end{equation}\nfrom IID datasets, we can multiply the probablities together:\n\\begin{equation} L(\\theta) = \\prod_{i=1}^{n} (\\hat{y_{i}})^{y_{i}} (1-\\hat{y_{i}})^{1-y_{i}} \\end{equation}\nand, to prevent calculus and derivative instability, we take the log:\n\\begin{equation} LL(\\theta) = \\sum_{i=1}^{n}{y_{i}}\\log (\\hat{y_{i}}) \\cdot ( 1-y_{i} )\\log (1-\\hat{y_{i}}) \\end{equation}\nWe want to maximise this, meaning we perform gradient ascent on this statement. Recall the chain rule; so we can break each layer down:\n\\begin{equation} \\pdv{LL(\\theta)}{\\theta_{ij}^{h}} = \\pdv{LL(\\theta)}{\\hat{y}} \\pdv{\\hat{y}}{h_{j}} \\pdv{h_{j}}{\\theta_{ij}^{h}} \\end{equation}\nfurthermore, for any summation,\n\\begin{equation} \\dv x \\sum_{i=0}^{} x = \\sum_{i=0}^{}\\dv x x \\end{equation}\nSo we can consider our derivatives with respect to each data point. When going about the second part, recall an important trick:\n\\begin{equation} \\pdv{h_{i}} \\qty[\\sum_{i}^{} h_{i}\\theta_{i}^{(y)}] \\end{equation}\nyou will note that, for the inside derivative, much the summation expands\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdeep_learning/\"\u003edeep learning\u003c/a\u003e is \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e performed with neural networks. A \u003ca href=\"/posts/kbhdeep_learning/\"\u003eneural network\u003c/a\u003e is many \u003ca href=\"/posts/kbhlogistic_regression/\"\u003elogistic regression\u003c/a\u003e pieces (sic.?) stack on top of each other.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eWe begin motivating this with trying to solve MNIST with \u003ca href=\"/posts/kbhlogistic_regression/\"\u003elogistic regression\u003c/a\u003e. What a time to be alive. After each layer of \u003ca href=\"/posts/kbhdeep_learning/\"\u003edeep learning\u003c/a\u003e, we are going to use a layer of \u0026ldquo;\u003ca href=\"/posts/kbhinference/\"\u003ehidden variable\u003c/a\u003e\u0026rdquo;, made of singular \u003ca href=\"/posts/kbhlogistic_regression/\"\u003elogistic regression\u003c/a\u003es,\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eNotation:\u003c/p\u003e\n\u003cp\u003e\\(x\\) is the input, \\(h\\) is the hidden layers, and \\(\\hat{y}\\) is the prediction.\u003c/p\u003e\n\u003cp\u003eWe call each weight, at each layer, from \\(x_{i}\\) to \\(h_{j}\\), \\(\\theta_{i,j}^{(h)}\\). At every neuron on each layer, we calculate:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nh_{j} = \\sigma\\qty[\\sum_{i}^{} x_{i} \\theta_{i,j}^{(h)}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{y} = \\sigma\\qty[\\sum_{i}^{} h_{i}\\theta_{i}^{(y)}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enote! we often\u003c/p\u003e\n\u003ch2 id=\"backpropegation\"\u003ebackpropegation\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#backpropegation\"\u003ebackpropegation\u003c/a\u003e is a special case of \u0026ldquo;backwards differentiation\u0026rdquo; to update a computation grap.h\u003c/p\u003e\n\u003ch3 id=\"toy\"\u003eToy\u003c/h3\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL(a,b,c) = c(a+2b)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning, we obtain a graph that looks like:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-13_23-46-34_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ein three steps, we have:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(d = 2b\\)\u003c/li\u003e\n\u003cli\u003e\\(e = a+d\\)\u003c/li\u003e\n\u003cli\u003e\\(L = e\\cdot e\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTo perform backpropagation, we compute derivatives from right to left, computing first \\(\\pdv{L}{L}= 1\\), then, moving slowly towards the left to obtain \\(\\pdv{L}{c} = \\pdv{L}{L}\\pdv{L}{c}\\), and then \\(\\pdv{L}{e} = \\pdv{L}{L}\\pdv{L}{c}\\) , and then \\(\\pdv{L}{d} = \\pdv{L}{L}\\pdv{L}{e}\\pdv{e}{d}\\) and so forth.\u003c/p\u003e\n\u003ch3 id=\"motivation\"\u003eMotivation\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003edeep learning is useful by having good \\(\\theta\\)\u003c/li\u003e\n\u003cli\u003ewe can find useful thetas by \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e by doing optimization to maximize the \u003ca href=\"/posts/kbhlikelyhood/\"\u003elikelyhood\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"example\"\u003eExample\u003c/h3\u003e\n\u003cp\u003eFor one data point, let us define our neural network:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nh_{j} = \\sigma\\qty[\\sum_{i}^{} x_{i} \\theta_{i,j}^{(h)}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{y} = \\sigma\\qty[\\sum_{i}^{} h_{i}\\theta_{i}^{(y)}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can define our network:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL(\\theta) = P(Y=y|X=x) = (\\hat{y})^{y} (1-\\hat{y})^{1-y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efrom IID datasets, we can multiply the probablities together:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL(\\theta) = \\prod_{i=1}^{n} (\\hat{y_{i}})^{y_{i}} (1-\\hat{y_{i}})^{1-y_{i}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand, to prevent calculus and derivative instability, we take the log:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nLL(\\theta) = \\sum_{i=1}^{n}{y_{i}}\\log (\\hat{y_{i}}) \\cdot ( 1-y_{i} )\\log (1-\\hat{y_{i}})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe want to maximise this, meaning we perform \u003ca href=\"/posts/kbhargmax/#gradient-ascent\"\u003egradient ascent\u003c/a\u003e on this statement. Recall the chain rule; so we can break each layer down:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{LL(\\theta)}{\\theta_{ij}^{h}} = \\pdv{LL(\\theta)}{\\hat{y}} \\pdv{\\hat{y}}{h_{j}} \\pdv{h_{j}}{\\theta_{ij}^{h}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efurthermore, for any summation,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv x \\sum_{i=0}^{} x = \\sum_{i=0}^{}\\dv x x\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo we can consider our derivatives with respect to each data point. When going about the second part, recall an important trick:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{h_{i}} \\qty[\\sum_{i}^{} h_{i}\\theta_{i}^{(y)}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou will note that, for the inside derivative, much the summation expands\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdeep_learning/","tags":null,"title":"deep learning"},{"categories":null,"contents":"Facts Everybody writes bugs Debugging sucks Defensive Programming Tools + Techniques Use language features Specs, documentations, Test-Driven Development, unit testing Fail fast and loudly Systematic debugging Investing in tools Use Language Features Descriptors: static, final, pub./priv. Type checking: prevent type errors Automatic array bounds checking Memory management Compiler optimization Key idea: know what language features are available, why/when to use them. don\u0026rsquo;t work against the language in circumventing them\nSpecs, Docs., TDD, Unit Tests How should it work: specs How does it work: docs How will I know it works: TDD How do I know it still works: unit tests These all force you to think about your code before!! you write it so then you can correct them as soon as possible.\nFailing Fast and Failing Loudly The earlier you recognize there is a problem, the easier it is to fix it Problems not fixed can be lost, covered up, or even relied upon Learn from every failure How do we put this into practice Use asserts, exceptions, logging Fix/diagnose/track every bug, even if you choose not to fix it Add regression tests for every bug + run them regularly Systematic Debugging Systematic Debugging is a framework for debugging software.\nReproduce the bug Reduce the bug to the smallest possible, repeatable test case Faster test cases mean faster iterations in debugging Smaller test cases help eliminate possible causes for error Find the root cause Study data (logs, behavior, etc.), hypothesis, experiment, repeat Change code and data to get more information FIXING SYMPTOM IS NOT ENOUGH Fix the bug Add a regression test, and run all tests Reducing Test Case Start with the data that uncovered the bug Remove pieces of data until the bug no longer occurs Bracketing: create both a test case that fails and similar test cases that pass Binary search: remove/add back half of the data at a time Can work from either end: start with everything and reduce until disappearance, or start with only one line and build until bug Finding the Cause Trace through the program View intermediate results Every iteration of a for loop Input and output of a given function Tools to use assert() printing/logging a debugger binary search Tooling! Linter Fuzzer Sanitizer Valgrind DTrace ","html":"\u003ch2 id=\"facts\"\u003eFacts\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eEverybody writes bugs\u003c/li\u003e\n\u003cli\u003eDebugging \u003cem\u003esucks\u003c/em\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"defensive-programming-tools-plus-techniques\"\u003eDefensive Programming Tools + Techniques\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eUse language features\u003c/li\u003e\n\u003cli\u003eSpecs, documentations, \u003ca href=\"/posts/kbhtesting/#test-driven-development\"\u003eTest-Driven Development\u003c/a\u003e, unit testing\u003c/li\u003e\n\u003cli\u003eFail fast and loudly\u003c/li\u003e\n\u003cli\u003eSystematic debugging\u003c/li\u003e\n\u003cli\u003eInvesting in tools\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"use-language-features\"\u003eUse Language Features\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDescriptors: static, final, pub./priv.\u003c/li\u003e\n\u003cli\u003eType checking: prevent type errors\u003c/li\u003e\n\u003cli\u003eAutomatic array bounds checking\u003c/li\u003e\n\u003cli\u003eMemory management\u003c/li\u003e\n\u003cli\u003eCompiler optimization\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eKey idea: know what language features are available, why/when to use them. \u003cstrong\u003edon\u0026rsquo;t work against the language in circumventing them\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"specs-docs-dot-tdd-unit-tests\"\u003eSpecs, Docs., TDD, Unit Tests\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eHow should it work: specs\u003c/li\u003e\n\u003cli\u003eHow does it work: docs\u003c/li\u003e\n\u003cli\u003eHow will I know it works: TDD\u003c/li\u003e\n\u003cli\u003eHow do I know it still works: unit tests\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThese all force you to \u003cem\u003ethink\u003c/em\u003e about your code \u003cem\u003ebefore!!\u003c/em\u003e you write it so then you can correct them as soon as possible.\u003c/p\u003e\n\u003ch2 id=\"failing-fast-and-failing-loudly\"\u003eFailing Fast and Failing Loudly\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eThe earlier you recognize there is a problem, the easier it is to fix it\u003c/li\u003e\n\u003cli\u003eProblems not fixed can be lost, covered up, or even \u003cstrong\u003erelied upon\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eLearn from every failure\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"how-do-we-put-this-into-practice\"\u003eHow do we put this into practice\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eUse asserts, exceptions, logging\u003c/li\u003e\n\u003cli\u003eFix/diagnose/track every bug, even if you choose not to fix it\u003c/li\u003e\n\u003cli\u003eAdd regression tests for every bug + run them regularly\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"systematic-debugging\"\u003eSystematic Debugging\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#systematic-debugging\"\u003eSystematic Debugging\u003c/a\u003e is a framework for debugging software.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eReproduce the bug\u003c/li\u003e\n\u003cli\u003eReduce the bug to the smallest possible, repeatable test case\n\u003col\u003e\n\u003cli\u003eFaster test cases mean faster iterations in debugging\u003c/li\u003e\n\u003cli\u003eSmaller test cases help eliminate possible causes for error\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003eFind the root cause\n\u003col\u003e\n\u003cli\u003eStudy data (logs, behavior, etc.), hypothesis, experiment, repeat\u003c/li\u003e\n\u003cli\u003eChange code and data to get more information\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eFIXING SYMPTOM IS NOT ENOUGH\u003c/strong\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003eFix the bug\u003c/li\u003e\n\u003cli\u003eAdd a regression test, and run all tests\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"reducing-test-case\"\u003eReducing Test Case\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eStart with the data that uncovered the bug\u003c/li\u003e\n\u003cli\u003eRemove pieces of data until the bug no longer occurs\n\u003cul\u003e\n\u003cli\u003eBracketing: create both a test case that fails and similar test cases that pass\u003c/li\u003e\n\u003cli\u003eBinary search: remove/add back half of the data at a time\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eCan work from either end: start with everything and reduce until disappearance, or start with only one line and build until bug\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"finding-the-cause\"\u003eFinding the Cause\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eTrace through the program\u003c/li\u003e\n\u003cli\u003eView intermediate results\n\u003cul\u003e\n\u003cli\u003eEvery iteration of a for loop\u003c/li\u003e\n\u003cli\u003eInput and output of a given function\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eTools to use\n\u003cul\u003e\n\u003cli\u003eassert()\u003c/li\u003e\n\u003cli\u003eprinting/logging\u003c/li\u003e\n\u003cli\u003ea debugger\u003c/li\u003e\n\u003cli\u003ebinary search\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"tooling\"\u003eTooling!\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eLinter\u003c/li\u003e\n\u003cli\u003eFuzzer\u003c/li\u003e\n\u003cli\u003eSanitizer\u003c/li\u003e\n\u003cli\u003eValgrind\u003c/li\u003e\n\u003cli\u003eDTrace\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"d41d8c\"\u003e\u003c/h2\u003e\n\u003ch2 id=\"d41d8c\"\u003e\u003c/h2\u003e\n\u003ch2 id=\"d41d8c\"\u003e\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdefensive_programming/","tags":null,"title":"Defensive Programming"},{"categories":null,"contents":"degrees of belief help us quantify how much we believe some event \\(A\\) is more/less plausible than some event \\(B\\).\nLet us take two statements:\n\\(A\\) Taylor gets Nobel Prize in Literature \\(B\\) Han shot first For instance, if we want to express \u0026ldquo;I think its more likely that Taylor gets the prize than Han shot first\u0026rdquo;:\n\\begin{equation} A \\succ B \\end{equation}\naxioms of degrees of belief universal comparability for two statements \\(A, B\\), only three states can exist:\n\\(A \\succ B\\) (A more likely) \\(A \\prec B\\) (B more likely) \\(A \\sim B\\) (equally likely) transitivity if \\(A \\succeq B\\) and \\(B \\succeq C\\), then \\(A \\succeq C\\)\nlanguage of probability using this framework, we can then describe the events in terms of probability\n\\(P(A) \u0026gt; P(B) \\Leftrightarrow A \\succ B\\) \\(P(A) = P(B) \\Leftrightarrow A \\sim B\\) See also axiom of probability\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhprobability_theory/\"\u003edegrees of belief\u003c/a\u003e help us quantify how much we believe some event \\(A\\) is more/less plausible than some event \\(B\\).\u003c/p\u003e\n\u003cp\u003eLet us take two statements:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(A\\) Taylor gets Nobel Prize in Literature\u003c/li\u003e\n\u003cli\u003e\\(B\\) Han shot first\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eFor instance, if we want to express \u0026ldquo;I think its more likely that Taylor gets the prize than Han shot first\u0026rdquo;:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA \\succ B\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"axioms-of-degrees-of-belief--kbhprobability-theory-dot-md\"\u003eaxioms of \u003ca href=\"/posts/kbhprobability_theory/\"\u003edegrees of belief\u003c/a\u003e\u003c/h2\u003e\n\u003ch3 id=\"universal-comparability\"\u003euniversal comparability\u003c/h3\u003e\n\u003cp\u003efor two statements \\(A, B\\), only three states can exist:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(A \\succ B\\) (A more likely)\u003c/li\u003e\n\u003cli\u003e\\(A \\prec B\\) (B more likely)\u003c/li\u003e\n\u003cli\u003e\\(A \\sim B\\) (equally likely)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"transitivity\"\u003etransitivity\u003c/h3\u003e\n\u003cp\u003eif \\(A \\succeq B\\) and \\(B \\succeq C\\), then \\(A \\succeq C\\)\u003c/p\u003e\n\u003ch3 id=\"language-of-probability\"\u003elanguage of probability\u003c/h3\u003e\n\u003cp\u003eusing this framework, we can then describe the events in terms of \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(P(A) \u0026gt; P(B) \\Leftrightarrow A \\succ B\\)\u003c/li\u003e\n\u003cli\u003e\\(P(A) = P(B) \\Leftrightarrow A \\sim B\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhprobability/#axiom-of-probability\"\u003eaxiom of probability\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprobability_theory/","tags":null,"title":"degrees of belief"},{"categories":null,"contents":" use efficient page maps too translate virtual to physical addresses kick things off to disk when memory runs out Every process has its own page map.\ndemand paging Key idea: physical representation of virtual memory does not have to be on actual memory.\nif memory fills out, kick a page to disk if the program asks for memory again, kick another page to disk and load its memory back Keep in memory the information that\u0026rsquo;s being used, kick the rest to swap space/\u0026quot;paging file\u0026quot;. Ideally: we have a performance of main memory and capacity of disk.\ndemand fetching most modern OSes start with no pages loaded\u0026mdash;load pages only when referenced; this is tempered by the type of page that\u0026rsquo;s needed:\nread only code pages (program code, doesn\u0026rsquo;t change) \u0026mdash; do NOT save to swap; executable will always be there so you can just reload from disk program will expect code pages too contain executable data initialized data pages \u0026mdash; save to swap because contents may have changed frorm initial values program expects them to contain data on load, so we need to load them ahead of time unitialized data pages save to swap no initial content Page Type Need Content on First Load Save to Swap (\u0026ldquo;Swap?\u0026rdquo;) code yes no (read from exe) data yes yes stack/heap no yes We only write to disk if its dirty.\nusing swap to get extra memory pick a page to kick out write kicked page to disk mark the old page entry as not present give the physical address to the new virtual page If we ever ask for the old page back, trigger page fault:\npage fault (to recover a page that\u0026rsquo;s on swap)\ncheck with swap for the data get new physical page (perhaps kicking out another page) load data into page update page map as present and with new address choosing what to swap thrashing downside of demand paging\nWhen the pages being actively used don\u0026rsquo;t fit in memory: you will have to get the page, kick it out immediately, get it back again, etc. This basically make memory as fast as disk; which is really slow.\nSolution: download more RAM. \u0026ldquo;buy more memory, or use task manager\u0026rdquo; - nick\npage map A page map, to keep track of something is valid/invalid, we have to store information about EVERY PAGE for EVERY PROCESS.\nEach entry in the page:\nIndex Physical Address Writable Present/Mapped? Last Access Kernel Dirty 0 0x2023 1 0 0 0 0 1 0x0023 1 1 1 0 0 Dirty: the content matters and it needs to be written out.\nThis is, of course, very big if stored densely. Consider 36 bit page numbers, 8 byte entries, it requires \\(2^{36} \\cdot 8 = 512GB\\) worth of space per process. This is sad.\nPRESENT simply means if the segment of memory is MAPPED. ITs possible for a not present index to be in SWAP instead.\npage map tree implementation To resolve this, we have entry for RANGES of virtual pages; there\u0026rsquo;s about \\(4\\) levels. If everything is invalid in a range, we just consider the whole range invalid using one row.\nTherefore, we lazily make space for this tree.\n","html":"\u003col\u003e\n\u003cli\u003euse efficient \u003cstrong\u003epage maps\u003c/strong\u003e too translate virtual to physical addresses\u003c/li\u003e\n\u003cli\u003ekick things off to \u003cstrong\u003edisk\u003c/strong\u003e when memory runs out\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eEvery process has its own \u003ca href=\"#page-map\"\u003epage map\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"demand-paging\"\u003edemand paging\u003c/h2\u003e\n\u003cp\u003eKey idea: physical representation of virtual memory does \u003cstrong\u003enot have to be on actual memory\u003c/strong\u003e.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eif memory fills out, kick a page to disk\u003c/li\u003e\n\u003cli\u003eif the program asks for memory again, kick another page to disk and load its memory back\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eKeep in memory the information that\u0026rsquo;s being \u003cstrong\u003eused\u003c/strong\u003e, kick the rest to \u003ca href=\"/posts/kbhdemand_paging/\"\u003eswap space\u003c/a\u003e/\u0026quot;\u003ca href=\"/posts/kbhdemand_paging/\"\u003epaging file\u003c/a\u003e\u0026quot;. Ideally: we have a performance of main memory and capacity of disk.\u003c/p\u003e\n\u003ch3 id=\"demand-fetching\"\u003edemand fetching\u003c/h3\u003e\n\u003cp\u003emost modern OSes start with \u003cstrong\u003eno pages loaded\u003c/strong\u003e\u0026mdash;load pages only when referenced; this is tempered by the type of page that\u0026rsquo;s needed:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eread only code pages (program code, doesn\u0026rsquo;t change) \u0026mdash;\n\u003cul\u003e\n\u003cli\u003edo \u003cstrong\u003eNOT\u003c/strong\u003e save to swap; executable will always be there so you can just reload from disk\u003c/li\u003e\n\u003cli\u003eprogram will expect code pages too contain executable data\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003einitialized data pages \u0026mdash;\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003esave to swap\u003c/strong\u003e because contents may have changed frorm initial values\u003c/li\u003e\n\u003cli\u003eprogram expects them to contain data on load, so we need to load them ahead of time\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eunitialized data pages\n\u003cul\u003e\n\u003cli\u003esave to swap\u003c/li\u003e\n\u003cli\u003eno initial content\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ePage Type\u003c/th\u003e\n\u003cth\u003eNeed Content on First Load\u003c/th\u003e\n\u003cth\u003eSave to Swap (\u0026ldquo;Swap?\u0026rdquo;)\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003ecode\u003c/td\u003e\n\u003ctd\u003eyes\u003c/td\u003e\n\u003ctd\u003eno (read from exe)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003edata\u003c/td\u003e\n\u003ctd\u003eyes\u003c/td\u003e\n\u003ctd\u003eyes\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003estack/heap\u003c/td\u003e\n\u003ctd\u003eno\u003c/td\u003e\n\u003ctd\u003eyes\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWe only write to disk if its \u003cstrong\u003edirty\u003c/strong\u003e.\u003c/p\u003e\n\u003ch3 id=\"using-swap-to-get-extra-memory\"\u003eusing swap to get extra memory\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003epick a page to kick out\u003c/li\u003e\n\u003cli\u003ewrite kicked page to disk\u003c/li\u003e\n\u003cli\u003emark the old page entry as not present\u003c/li\u003e\n\u003cli\u003egive the physical address to the new virtual page\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eIf we ever ask for the old page back, trigger \u003ca href=\"#page-fault\"\u003epage fault\u003c/a\u003e:\u003c/p\u003e\n\u003ch3 id=\"page-fault\"\u003epage fault\u003c/h3\u003e\n\u003cp\u003e(to recover a page that\u0026rsquo;s on swap)\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003echeck with swap for the data\u003c/li\u003e\n\u003cli\u003eget new physical page (perhaps kicking out another page)\u003c/li\u003e\n\u003cli\u003eload data into page\u003c/li\u003e\n\u003cli\u003eupdate page map as present and with new address\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"choosing-what-to-swap\"\u003echoosing what to swap\u003c/h3\u003e\n\u003ch3 id=\"thrashing\"\u003ethrashing\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003edownside of \u003ca href=\"/posts/kbhdemand_paging/\"\u003edemand paging\u003c/a\u003e\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eWhen the pages being actively used don\u0026rsquo;t fit in memory: you will have to get the page, kick it out immediately, get it back again, etc. This basically make memory as fast as disk; which is really slow.\u003c/p\u003e\n\u003cp\u003eSolution: download more RAM. \u0026ldquo;buy more memory, or use task manager\u0026rdquo; - nick\u003c/p\u003e\n\u003ch2 id=\"page-map\"\u003epage map\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#page-map\"\u003epage map\u003c/a\u003e, to keep track of something is valid/invalid, we have to store information about \u003cstrong\u003eEVERY PAGE\u003c/strong\u003e for \u003cstrong\u003eEVERY PROCESS\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eEach entry in the page:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eIndex\u003c/th\u003e\n\u003cth\u003ePhysical Address\u003c/th\u003e\n\u003cth\u003eWritable\u003c/th\u003e\n\u003cth\u003ePresent/Mapped?\u003c/th\u003e\n\u003cth\u003eLast Access\u003c/th\u003e\n\u003cth\u003eKernel\u003c/th\u003e\n\u003cth\u003eDirty\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0x2023\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0x0023\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\u003cstrong\u003eDirty\u003c/strong\u003e: the content matters and it needs to be written out.\u003c/p\u003e\n\u003cp\u003eThis is, of course, very big if stored densely. Consider 36 bit page numbers, 8 byte entries, it requires \\(2^{36} \\cdot 8 = 512GB\\) worth of space per process. This is sad.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003ePRESENT\u003c/strong\u003e simply means if the segment of memory is \u003cstrong\u003eMAPPED\u003c/strong\u003e. ITs possible for a not present index to be in SWAP instead.\u003c/p\u003e\n\u003ch3 id=\"page-map-tree-implementation\"\u003epage map tree implementation\u003c/h3\u003e\n\u003cp\u003eTo resolve this, we have entry for RANGES of virtual pages; there\u0026rsquo;s about \\(4\\) levels. If everything is invalid in a range, we just consider the whole range invalid using one row.\u003c/p\u003e\n\u003cp\u003eTherefore, we lazily make space for this tree.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdemand_paging/","tags":null,"title":"demand paging"},{"categories":null,"contents":"demand-driven theory hypothesis that the reason why the Great Depression took place was because people were not buying stocks, etc, and there was no demand.\nSee also: Monetarist theory.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdemand_driven_theory/\"\u003edemand-driven theory\u003c/a\u003e hypothesis that the reason why the \u003ca href=\"/posts/kbhgreat_depression/\"\u003eGreat Depression\u003c/a\u003e took place was because people were not buying stocks, etc, and there was no demand.\u003c/p\u003e\n\u003cp\u003eSee also: \u003ca href=\"/posts/kbhmonetarist_theory/\"\u003eMonetarist theory.\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdemand_driven_theory/","tags":null,"title":"demand-driven theory"},{"categories":null,"contents":"DementiaBank is a shared database of multimedia interactions for the study of communication in dementia. There are a few projects being explored for DementiaBank.\nSee also: ADReSS Literature Survey\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdementiabank/\"\u003eDementiaBank\u003c/a\u003e is a shared database of multimedia interactions for the study of communication in dementia. There are a few projects being explored for \u003ca href=\"/posts/kbhdementiabank/\"\u003eDementiaBank\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSee also: \u003ca href=\"/posts/kbhadress_literature_survey/\"\u003eADReSS Literature Survey\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdementiabank/","tags":null,"title":"DementiaBank"},{"categories":null,"contents":"Ideas Can we correlate any longitudinal data with NACC?\nData dementia/English/Lanzi: Alyssa Lanzi\u0026rsquo;s new data\ndementia/English/Delaware\nWhat are the standard for acoustic features?\nMotor cortex/frontal control may also be impacted\nVocal tremer\nWhat are the predictors? How automatic can we make it?\n","html":"\u003ch2 id=\"ideas\"\u003eIdeas\u003c/h2\u003e\n\u003cp\u003eCan we correlate any longitudinal data with \u003ca href=\"/posts/kbhnacc/\"\u003eNACC\u003c/a\u003e?\u003c/p\u003e\n\u003ch2 id=\"data\"\u003eData\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003edementia/English/Lanzi: Alyssa Lanzi\u0026rsquo;s new data\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003edementia/English/Delaware\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWhat are the standard for acoustic features?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eMotor cortex/frontal control may also be impacted\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eVocal tremer\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWhat are the predictors? How automatic can we make it?\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdementiabank_acoustics_brainstoming/","tags":null,"title":"DementiaBank Acoustics Brainstoming"},{"categories":null,"contents":"The DementiaBank Acoustics Project is a working title for an acoustic-only challenge for AD detection. This document serves as the lab notebook for this project.\nThis project will attempt to replicate some of the results of Wang 2019 and Martinc 2021, but focusing on minimizing human involvement; we will first work on raw transcript classification with ERNIE (cutting all CHAT annotations), then introduce pause-encoding in a manner similar to Yuan 2021 which is automated by MFA. Goal is to replicate the results of Yuan 2021/or even Martinc 2021 in a completely automated manner.\nBackground Reading I first began by doing a literature survey on the ADReSS Challenge results published in the Frontiers AD special interest group issue.\nProposal And then, we wrote a proposal: DementiaBank Acoustics Project Proposal\nBrainstoming More notes from the meeting: DementiaBank Acoustics Brainstoming\nProtocol Notes July 1st Began by moving a subsample of Pitt\u0026rsquo;s Cookie Theft to pitt-7-1 in the raw data folder Ran flo on all collected samples. Arguments used are the same as that for batchalign, except we filter out the INV tier as we are detecting AD on patient and not investigator: so flo +d +ca +t* -tINV Moved all collected samples (and changed extension to .txt) to the same sub-folder, but in transcripts_nodisfluency July 2nd Created a dataprep script dataprep.py which dumps a pickled copy of cleaned data to transcripts_nodisfluency/pitt-7-1.dat. Created sliding windows of 5 pieces of dialogue concatenated, stored it in transcripts_nodisfluency/pitt-7-1-windowed.dat Used tencent/HuYong\u0026rsquo;s nghuyong/ernie-2.0-en Ernie 2.0 model, the continuous language model from Baidu (Layer:12, Hidden:768, Heads:12) July 4th Finalized training code. Selected base hyperparameters {bs: 8, epochs: 2, lr: 3e-3, length: 60}. Again, we are using Baidu\u0026rsquo;s nghuyong/ernie-2.0-en. Started training fastcalculator on 24bc812 train: faithful-frog-3 {bs: 8, epochs: 2, lr: 3e-3, length: 60, pitt-7-1-windowed.dat }\nCommentary: LR could be too high, looking at the divergent loss behavior. Decision: dropping bs to 4 and lr to 1e-5, similar to previous transformers. Also training for 3 epochs. train: revived-disco-5 {bs: 4, epochs: 3, lr: 1e-5, length: 60, pitt-7-1-windowed.dat }\nCommentary: quintessential overfitting Decision: Made the corpus bigger cleaned the entire Pitt corpus (pitt-7-4 in the raw folder) to become training data. Similar to pitt-7-1, ran flo on all collected samples; arguments used are the same as that for batchalign, except we filter out the INV tier as we are detecting AD on patient and not investigator: so flo +d +ca +t* -tINV; the flo\u0026rsquo;d results are in transcripts_nodisfluency. the notable difference between the previous dataset 7-1 and the current one 7-4 is that the 7-4 are prepended numbered by the task (cookie/100-01.cha \u0026gt; =cookie-100-01.txt) New (full) Pitt data as prepared above is ran though the dataprep script as of b325514cfad79da82d7a519ed29ea19ed87b2be4 (difference is that empty/dummy files are ignored), and pickled at transcripts_nodisfluency/pitt-7-4.dat and transcripts_nodisfluency/pitt-7-4-windowed.dat respectively. For new data, window size is still 5, splitting 10 cases out for testing now instead of 5. train: vocal-oath-6 {bs: 4, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed.dat}\nCommentary: high recall, low precision. Perhaps classes aren\u0026rsquo;t balanced? Spoiler alert: they are not. An inspection of data reveals that there is 3211 rows of dementia, 2397 rows of control Decision: Created pitt-7-4-bal and pitt-7-4-windowed-bal series of data based on dataprep.py on 703f79248a20fd7a13a5033ca2bf7f691f42c941. This version force-crops to make sure that the dementia and control indicies have the exact same length for each class. train: helpful-leaf-7 {bs: 4, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed-bal.dat}\nBeautiful. Question now is whether or not there is data leakage/external heuristics. It is a good time to do some LOOCV. Getting this result without any disfluency calculations seems unlikely.\nBut anyways, going to discuss these results as they seem to meet results we see in Yuan 2021, even without top-N ensemble; though this is one trial, LOOCV may still show that we actually need it.\nJuly 5th Began the day with creating the script k-fold validation; I originally hoped to exactly replicate the procedure of Yuan 2021 for comparability, but, not sure how they got the actual result of a min/max range with LOOCV on binary; therefore, we will instead create a 95% confidence interval analysis via a single-variable t test on standard k-fold validation. K=50 During one-off testing, another set of hyperparameters seems to work too: {bs: 72, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed-bal.dat}. As we have not begun tuning for hyperparameters, we are just going to use this set, K=50, for the first k-fold trial. k-fold: F4ZVbGfdBAQvtvXemWZCZD code: 55f77ff1dea03c3ed66967864dc52fd2c0062f23\n{bs: 72, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed-bal.dat} K = 50\nIt seems like the results we got is consistent and validates in a manner which we expect.\nJuly 7th Yesterday was a day filled with working on batchalign, but we are back now. Today, I aim to look into the heuristic that I identified yesterday by playing with the model, which is that it seems like the model prefers the use of long-focused sentences about cookies, so the heruistic its picking up is probably on-topicness.\nI am going to first leverage the lovely cdpierse/transformers-interpret tool to help build some explainability by adding it to validate.py. Upon some human validation with random sampling, the model seem to do less well than I\u0026rsquo;d hoped. Running a train cycle with the new results/params seen above to see if it does better.\ntrain: brisk-oath-10 {bs: 72, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed-bal.dat}\nCommentary: It seems like the model is doing overall worse from validation data, but it does fairly well during test data. Decision: I can fairly confidently claim that the model is just fitting on topic. As in, if the topic is about cookies (theft/taking/cookie/mother/etc.), it will be classified as control. One thing that we can do is to claim this task as directly task-controlled: that is, include no data except cookie and control for that difference Then, the model would\u0026rsquo;t be able to predict the result b/c the variation in topic won\u0026rsquo;t have influence. This is going to be prepared in the cookiepitt-7-7-bal* based on dataprep.py in commit 518dec82bb961c0a8ad02e3080289b56102aa1a2 train: super-durian-11 {bs: 72, epochs: 3, lr: 1e-5, length: 60, cookiepitt-7-7-windowed-bal.dat}\nCommentary: the model is no where near convergence Decision: multiplying the LR by 10 train: floral-sunset-12 {bs: 72, epochs: 3, lr: 1e-4, length: 60, cookiepitt-7-7-windowed-bal.dat}\nCommentary: There we go. This seem to be more in line with what we see in Yuan 2021 Decision: ok, let\u0026rsquo;s elongate the actual content. Perhaps we can try a 7-element search instead? This is written as cookiepitt-7-7-*-long. Code based on 9e31f4bc13c4bfe193dcc049059c3d9bda46c8d0 train: sweet-plasma-13 {bs: 72, epochs: 3, lr: 1e-4, length: 60, cookiepitt-7-7-windowed-long-bal.dat}\nCommentary: underfitting Dropping batch size down to 64 to add more steps train: smart-river-14 {bs: 64, epochs: 3, lr: 1e-4, length: 60, cookiepitt-7-7-windowed-long-bal.dat}\nCommentary: this finally fits to the specifications which Yuan 2021 have revealed Decision: running k-fold on this architecture k-fold: XgsP4FVS6ScFxCZKFJoVQ5. Code: 3870651ba71da8ddb3f481a7c3e046397a09d8b2\nJuly 8th Began the day with aligning the entirety of cookie for both control and dementia, named the dataset alignedpitt-7-8 in the RAW folder\nPer what we discussed, will add [pause] as a token to the model. Then, transcript the text such that it would contain normalized values to the pauses for pauses \u0026gt; 0.250 seconds. Therefore, the data would look like\n\u0026ldquo;hello my name is [pause] 262 [pause] bob\u0026rdquo;\nJuly 9th Created transcript.py, which coverts the data in raw to transcripts_pauses, which contains pause values \u0026gt; 250 msc and prepends them with [pause] tokens The code from above is taken from check.py in batchalign, used transcript.py from 7e19a4912cf0ad5d269c139da5ce018615495ebb to clean out the dataset; placed it in similar txt format to alignedpitt-7-8 Ran dataprep with window size of 5, created alignedpitt-7-8.bat and alignedpitt-7-8-windowed.bat as the dataprep file starting a new training run, with [pause] added as a new token, code 06846c6c95e6b1ccf17f0660c5da76aa50231567 train: golden-tree-16 {bs: 64, epochs: 3, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}\nSo realistically, we have the same F1 between the two, but pause encoding increased the accuracy of prediction yet dropped recall dramatically.\nAs a random check, let\u0026rsquo;s find out if simple fine-tuning (only training on classifier) would work, so:\ntrain: jumping-blaze-17 {bs: 64, epochs: 3, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}. This time with only training classifier.\nCommentary: we did not like. start coverging Bumping LR by a factor of 10 train: vital-water-18 {bs: 64, epochs: 3, lr: 1e-3, length: 60, alignedpitt-7-8-windowed.dat}. This time with only training classifier.\nCommentary: barely started converging, seem to be a local Training for 2 more epochs train: fiery-smoke-19 {bs: 64, epochs: 5, lr: 1e-3, length: 60, alignedpitt-7-8-windowed.dat}. This time with only training classifier.\nCommentary: classic overfitting At this point, unlocking the model would probably be a good bet\ntrain: leafy-deluge-20 {bs: 64, epochs: 5, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}.\nTraining once again with code without locking, and bump LR down\nCommentary: classic the recall is slowly creeping up Decision: let\u0026rsquo;s go for 8 epochs train: royal-pond-21 {bs: 64, epochs: 8, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}.\nCommentary: let\u0026rsquo;s run k-fold now, with these settings.\nk-fold: QskZWfEsML52ofcQgGujE2. {bs: 64, epochs: 8, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}.\nOk, the base hypothesis from Yuan 2021 is very much confirmed here. The same training, same content, but pause encoding is very beneficial to the quality of the results. The results that they reported contained an ensemble data, which is in the high 80s; we can now continue doing something new as Yuan 2021\u0026rsquo;s conclusion is fairly achieved.\nWe can probably call the replication stage done, with no dramatically better effect.\nJuly 10th FluCalc! Leonid\u0026rsquo;s lovely new program can be an uberuseful feature extraction tool Let\u0026rsquo;s try using to build a new dataset, and network. FluCalc + Pause Encoding + Textual Data late fusion This is becoming alignedpitt-7-8-flucalc. As the program is currently under heavy development to include results from batchalign, we will specify version V 09-Jul-2022 11:00 for now. Done, the new data has the same i/o shape, but then has a bunch of features filtered for nulls which contains outputs from flucalc. Again, alignedpitt-7-8-flucalc from 4346fc07c4707343c507e32786b6769b6bd6fb49 does not take into account results from the %wor tier! July 11th ab19abd6486884141c9ab4e4e185255a77ae833e is the final-ish version of the late fusion model We are going to use alignedpitt-7-8-flucalc to start training train: royal-pond-21 {bs: 64, epochs: 8, lr: 1e-4, length: 60, alignedpitt-7-8-flucalc-windowed.dat}.\nCommentary: overfitting Decision, droping lr by a factor of 10, also increasing length to 70 train: fallen-dust-25 {bs: 64, epochs: 8, lr: 1e-5, length: 70, alignedpitt-7-8-flucalc-windowed.dat}.\nCommentary: overfitting Decision, droping lr by a factor of 10, dropping batch size to 32, training more to 10 train: dainty-meadow-26 {bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}.\nah\nAt this point, I think it\u0026rsquo;d be good to do some feature selection Let\u0026rsquo;s do a chi^2 correlation, and select 3 best features import pandas as pd DATA = \u0026#34;/Users/houliu/Documents/Projects/DBC/data/transcripts_pauses/alignedpitt-7-8-flucalc-windowed.bat\u0026#34; # read pickle df = pd.read_pickle(DATA) # test test_data = df[df.split==\u0026#34;test\u0026#34;] # also, get only train data df = df[df.split==\u0026#34;train\u0026#34;] df target mor_Utts ... split utterance trial sample ... 120-2 1049 1 -0.179084 ... train well the boy is getting some cookies handing o... 336-1 2492 0 -0.481740 ... train +oh okay, the the little girl askin(g) for the... 076-4 786 1 -0.179084 ... train well the little boy was looking at that cookie... 279-0 2250 1 1.980274 ... train kid\u0026#39;s stool turnin(g) [pause]540[pause] over s... 014-2 151 1 0.746355 ... train he\u0026#39;s fallin(g) off the chair down here or try... ... ... ... ... ... ... 208-0 1655 0 -0.481740 ... train the boy [pause]920[pause] is going after [paus... 492-0 2696 1 -0.179084 ... train oh yes quite a_lot the kid\u0026#39;s tryin(g) to get t... 497-1 2727 1 0.129396 ... train what else ? \u0026amp;uh the see the [pause]2400[pause]... 175-2 1535 0 0.863668 ... train the window is open you can see out the curtain... 279-0 2261 1 1.980274 ... train the other kid with [pause]610[pause] the stool... [2848 rows x 44 columns] Let\u0026rsquo;s slice out the bits which is labels, etc.\nin_data = df.drop(columns=[\u0026#34;utterance\u0026#34;, \u0026#34;target\u0026#34;, \u0026#34;split\u0026#34;]) in_data.columns Index([\u0026#39;mor_Utts\u0026#39;, \u0026#39;mor_Words\u0026#39;, \u0026#39;mor_syllables\u0026#39;, \u0026#39;#_Prolongation\u0026#39;, \u0026#39;%_Prolongation\u0026#39;, \u0026#39;#_Broken_word\u0026#39;, \u0026#39;%_Broken_word\u0026#39;, \u0026#39;#_Block\u0026#39;, \u0026#39;%_Block\u0026#39;, \u0026#39;#_PWR\u0026#39;, \u0026#39;%_PWR\u0026#39;, \u0026#39;#_PWR-RU\u0026#39;, \u0026#39;%_PWR-RU\u0026#39;, \u0026#39;#_WWR\u0026#39;, \u0026#39;%_WWR\u0026#39;, \u0026#39;#_mono-WWR\u0026#39;, \u0026#39;%_mono-WWR\u0026#39;, \u0026#39;#_WWR-RU\u0026#39;, \u0026#39;%_WWR-RU\u0026#39;, \u0026#39;#_mono-WWR-RU\u0026#39;, \u0026#39;%_mono-WWR-RU\u0026#39;, \u0026#39;Mean_RU\u0026#39;, \u0026#39;#_Phonological_fragment\u0026#39;, \u0026#39;%_Phonological_fragment\u0026#39;, \u0026#39;#_Phrase_repetitions\u0026#39;, \u0026#39;%_Phrase_repetitions\u0026#39;, \u0026#39;#_Word_revisions\u0026#39;, \u0026#39;%_Word_revisions\u0026#39;, \u0026#39;#_Phrase_revisions\u0026#39;, \u0026#39;%_Phrase_revisions\u0026#39;, \u0026#39;#_Pauses\u0026#39;, \u0026#39;%_Pauses\u0026#39;, \u0026#39;#_Filled_pauses\u0026#39;, \u0026#39;%_Filled_pauses\u0026#39;, \u0026#39;#_TD\u0026#39;, \u0026#39;%_TD\u0026#39;, \u0026#39;#_SLD\u0026#39;, \u0026#39;%_SLD\u0026#39;, \u0026#39;#_Total_(SLD+TD)\u0026#39;, \u0026#39;%_Total_(SLD+TD)\u0026#39;, \u0026#39;Weighted_SLD\u0026#39;], dtype=\u0026#39;object\u0026#39;) And the labels:\nout_data = df[\u0026#34;target\u0026#34;] out_data trial sample 120-2 1049 1 336-1 2492 0 076-4 786 1 279-0 2250 1 014-2 151 1 .. 208-0 1655 0 492-0 2696 1 497-1 2727 1 175-2 1535 0 279-0 2261 1 Name: target, Length: 2848, dtype: int64 And now, let\u0026rsquo;s select 3 best features.\nfrom sklearn.feature_selection import SelectKBest, f_classif k_best_tool = SelectKBest(f_classif, k=3) k_best_tool.fit(in_data, out_data) best_features = k_best_tool.get_feature_names_out() best_features %_WWR %_mono-WWR %Total(SLD+TD) OD = other disfluencies; SLD = stuttering-like disfluencies; TD = total disfluencies; WWR = whole-word-repetition\nok, let\u0026rsquo;s select those features\ntrain: visionary-plasma-27 {bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}. Also with feature selection.\nhmmm.\nI am curious if we just ran something like a decision tree, what happens.\nin_features = df.drop(columns=[\u0026#34;utterance\u0026#34;, \u0026#34;target\u0026#34;, \u0026#34;split\u0026#34;]) test_features = test_data.drop(columns=[\u0026#34;utterance\u0026#34;, \u0026#34;target\u0026#34;, \u0026#34;split\u0026#34;]) in_targets = df[\u0026#34;target\u0026#34;] test_targets = test_data[\u0026#34;target\u0026#34;] seed the classifier, and fit.\nfrom sklearn.ensemble import RandomForestClassifier clsf = RandomForestClassifier() clsf.fit(in_features, in_targets) clsf.score(test_features, test_targets) 0.5932203389830508 OK nevermind. What about SVC?\nfrom sklearn.svm import SVC clsf = SVC() clsf.fit(in_features, in_targets) clsf.score(test_features, test_targets) 0.5932203389830508 Turns out, deep learning still does better. I\u0026rsquo;m thinking maybe the output is being faulty, say, for something like the loss function.\nDecision: switching activation to sigmoid.\ntrain: sunny-bush-31 {bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}, selected features\nOk let\u0026rsquo;s think about this. Decision: added batch normalization.\ntrain: autumn-jazz-32 {bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}, selected features\nThe model maybe overfitting on some simple heuristic; some basic statistics revealed that these variables are actually quite differently distributed.\nPerhaps we should increase the complexity of the model?\ntrain: fallen-microwave-33 {bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}, selected features\nJust to test, I am bumping the LR to 1e-5, just to see what happens. I am very confused.\ntrain: upbeat-flower-35 {bs: 32, epochs: 10, lr: 1e-5, length: 70, alignedpitt-7-8-flucalc-windowed.dat}, selected features\nThe more we work on this, the more overfit it gets. (I FORGOT A RELUCTIFIER)\na note {bs: 32, epochs: 10, lr: 1e-5, length: 70, alignedpitt-7-11-flucalc-windowed.dat}, selected features\nPauses, no meta:\nPauses, meta:\nso effectively cointoss\nConcerns and Questions July 2nd pitt7-1/dementia/493-0 PAR tier \u0026ldquo;tell me everything you see going on in that picture\u0026rdquo; doesn\u0026rsquo;t seem to be labeled correctly; I am guessing that\u0026rsquo;s supposed to be INV? Has anyone tried to include investigator/participant cross-dialogue? July 4th Is the model overfitting on antiquated language? Is the model overfitting on cooke-theft on-topic-ness? July 11th LSTM only on pauses? ","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhdementiabank_acoustics_project/\"\u003eDementiaBank Acoustics Project\u003c/a\u003e is a working title for an acoustic-only challenge for AD detection. This document serves as the lab notebook for this project.\u003c/p\u003e\n\u003cp\u003eThis project will attempt to replicate some of the results of \u003ca href=\"/posts/kbhwang_2019/\"\u003eWang 2019\u003c/a\u003e and \u003ca href=\"/posts/kbhmartinc_2021/\"\u003eMartinc 2021\u003c/a\u003e, but focusing on minimizing human involvement; we will first work on raw transcript classification with ERNIE (cutting all CHAT annotations), then introduce pause-encoding in a manner similar to \u003ca href=\"/posts/kbhyuan_2021/\"\u003eYuan 2021\u003c/a\u003e which is automated by MFA. Goal is to replicate the results of \u003ca href=\"/posts/kbhyuan_2021/\"\u003eYuan 2021\u003c/a\u003e/or even \u003ca href=\"/posts/kbhmartinc_2021/\"\u003eMartinc 2021\u003c/a\u003e in a completely automated manner.\u003c/p\u003e\n\u003ch2 id=\"background-reading\"\u003eBackground Reading\u003c/h2\u003e\n\u003cp\u003eI first began by doing a literature survey on the \u003ca href=\"/posts/kbhadress_literature_survey/\"\u003eADReSS Challenge\u003c/a\u003e results published in the Frontiers AD special interest group issue.\u003c/p\u003e\n\u003ch2 id=\"proposal\"\u003eProposal\u003c/h2\u003e\n\u003cp\u003eAnd then, we wrote a proposal: \u003ca href=\"/posts/kbhdementiabank_acoustics_project_proposal/\"\u003eDementiaBank Acoustics Project Proposal\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"brainstoming\"\u003eBrainstoming\u003c/h2\u003e\n\u003cp\u003eMore notes from the meeting: \u003ca href=\"/posts/kbhdementiabank_acoustics_brainstoming/\"\u003eDementiaBank Acoustics Brainstoming\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"protocol-notes\"\u003eProtocol Notes\u003c/h2\u003e\n\u003ch3 id=\"july-1st\"\u003eJuly 1st\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eBegan by moving a subsample of \u003ca href=\"https://dementia.talkbank.org/access/English/Pitt.html\"\u003ePitt\u003c/a\u003e\u0026rsquo;s \u003ca href=\"/posts/kbhctp/\"\u003eCookie Theft\u003c/a\u003e to \u003ccode\u003epitt-7-1\u003c/code\u003e in the \u003ccode\u003eraw\u003c/code\u003e data folder\u003c/li\u003e\n\u003cli\u003eRan \u003ccode\u003eflo\u003c/code\u003e on all collected samples. Arguments used are the same as that for \u003ca href=\"/posts/kbhbatchalign/\"\u003ebatchalign\u003c/a\u003e, except \u003cem\u003ewe filter out the \u003ccode\u003eINV\u003c/code\u003e tier\u003c/em\u003e as we are detecting AD on patient and not investigator: so \u003ccode\u003eflo +d +ca +t* -tINV\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eMoved all collected samples (and changed extension to .txt) to the same sub-folder, but in \u003ccode\u003etranscripts_nodisfluency\u003c/code\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"july-2nd\"\u003eJuly 2nd\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCreated a dataprep script \u003ccode\u003edataprep.py\u003c/code\u003e which dumps a pickled copy of cleaned data to \u003ccode\u003etranscripts_nodisfluency/pitt-7-1.dat\u003c/code\u003e.\u003c/li\u003e\n\u003cli\u003eCreated sliding windows of 5 pieces of dialogue concatenated, stored it in \u003ccode\u003etranscripts_nodisfluency/pitt-7-1-windowed.dat\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eUsed tencent/HuYong\u0026rsquo;s \u003ccode\u003enghuyong/ernie-2.0-en\u003c/code\u003e Ernie 2.0 model, the continuous language model from Baidu (Layer:12, Hidden:768, Heads:12)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"july-4th\"\u003eJuly 4th\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eFinalized training code. Selected base hyperparameters {bs: 8, epochs: 2, lr: 3e-3, length: 60}. Again, we are using Baidu\u0026rsquo;s \u003ccode\u003enghuyong/ernie-2.0-en\u003c/code\u003e.\u003c/li\u003e\n\u003cli\u003eStarted training fastcalculator on \u003ccode\u003e24bc812\u003c/code\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-faithful-frog-3\"\u003etrain: faithful-frog-3\u003c/h4\u003e\n\u003cp\u003e{bs: 8, epochs: 2, lr: 3e-3, length: 60, pitt-7-1-windowed.dat }\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-04_19-20-13_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: LR could be too high, looking at the divergent loss behavior.\u003c/li\u003e\n\u003cli\u003eDecision: dropping bs to \u003ccode\u003e4\u003c/code\u003e and lr to \u003ccode\u003e1e-5\u003c/code\u003e, similar to previous transformers. Also training for 3 epochs.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-revived-disco-5\"\u003etrain: revived-disco-5\u003c/h4\u003e\n\u003cp\u003e{bs: 4, epochs: 3, lr: 1e-5, length: 60, pitt-7-1-windowed.dat }\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-04_19-28-07_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: quintessential overfitting\u003c/li\u003e\n\u003cli\u003eDecision:\n\u003cul\u003e\n\u003cli\u003eMade the corpus bigger\n\u003cul\u003e\n\u003cli\u003ecleaned the entire \u003ca href=\"https://dementia.talkbank.org/access/English/Pitt.html\"\u003ePitt\u003c/a\u003e corpus (\u003ccode\u003epitt-7-4\u003c/code\u003e in the \u003ccode\u003eraw\u003c/code\u003e folder) to become training data. Similar to \u003ccode\u003epitt-7-1\u003c/code\u003e, ran \u003ccode\u003eflo\u003c/code\u003e on all collected samples; arguments used are the same as that for \u003ca href=\"/posts/kbhbatchalign/\"\u003ebatchalign\u003c/a\u003e, except \u003cem\u003ewe filter out the \u003ccode\u003eINV\u003c/code\u003e tier\u003c/em\u003e as we are detecting AD on patient and not investigator: so \u003ccode\u003eflo +d +ca +t* -tINV\u003c/code\u003e; the \u003ccode\u003eflo\u003c/code\u003e\u0026rsquo;d results are in \u003ccode\u003etranscripts_nodisfluency\u003c/code\u003e.\u003c/li\u003e\n\u003cli\u003ethe notable difference between the previous dataset \u003ccode\u003e7-1\u003c/code\u003e and the current one \u003ccode\u003e7-4\u003c/code\u003e is that the \u003ccode\u003e7-4\u003c/code\u003e are prepended numbered by the task (\u003ccode\u003ecookie/100-01.cha\u003c/code\u003e \u003ccode\u003e\u0026gt; =cookie-100-01.txt\u003c/code\u003e)\u003c/li\u003e\n\u003cli\u003eNew (full) Pitt data as prepared above is ran though the dataprep script as of \u003ccode\u003eb325514cfad79da82d7a519ed29ea19ed87b2be4\u003c/code\u003e (difference is that empty/dummy files are ignored), and pickled at \u003ccode\u003etranscripts_nodisfluency/pitt-7-4.dat\u003c/code\u003e and \u003ccode\u003etranscripts_nodisfluency/pitt-7-4-windowed.dat\u003c/code\u003e respectively.\u003c/li\u003e\n\u003cli\u003eFor new data, window size is still \u003ccode\u003e5\u003c/code\u003e, splitting \u003ccode\u003e10\u003c/code\u003e cases out for testing now instead of \u003ccode\u003e5\u003c/code\u003e.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-vocal-oath-6\"\u003etrain: vocal-oath-6\u003c/h4\u003e\n\u003cp\u003e{bs: 4, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed.dat}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-04_20-20-01_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-04_20-35-54_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: high recall, low precision. Perhaps classes aren\u0026rsquo;t balanced?\n\u003cul\u003e\n\u003cli\u003eSpoiler alert: they are not.\u003c/li\u003e\n\u003cli\u003eAn inspection of data reveals that there is 3211 rows of dementia, 2397 rows of control\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eDecision:\n\u003cul\u003e\n\u003cli\u003eCreated \u003ccode\u003epitt-7-4-bal\u003c/code\u003e and \u003ccode\u003epitt-7-4-windowed-bal\u003c/code\u003e series of data based on dataprep.py on \u003ccode\u003e703f79248a20fd7a13a5033ca2bf7f691f42c941\u003c/code\u003e. This version force-crops to make sure that the dementia and control indicies have the exact same length for each class.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-helpful-leaf-7\"\u003etrain: helpful-leaf-7\u003c/h4\u003e\n\u003cp\u003e{bs: 4, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed-bal.dat}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-04_21-31-19_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-04_21-35-43_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eBeautiful. Question now is whether or not there is data leakage/external heuristics. It is a good time to do some \u003ca href=\"/posts/kbhloo/\"\u003eLOOCV\u003c/a\u003e. Getting this result without any disfluency calculations seems unlikely.\u003c/p\u003e\n\u003cp\u003eBut anyways, going to discuss these results as they seem to meet results we see in \u003ca href=\"/posts/kbhyuan_2021/\"\u003eYuan 2021\u003c/a\u003e, even without top-N ensemble; though this is one trial, \u003ca href=\"/posts/kbhloo/\"\u003eLOOCV\u003c/a\u003e may still show that we actually need it.\u003c/p\u003e\n\u003ch3 id=\"july-5th\"\u003eJuly 5th\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eBegan the day with creating the script k-fold validation; I originally hoped to exactly replicate the procedure of \u003ca href=\"/posts/kbhyuan_2021/\"\u003eYuan 2021\u003c/a\u003e for comparability, but, not sure how they got the actual result of a min/max range with \u003ca href=\"/posts/kbhloo/\"\u003eLOOCV\u003c/a\u003e on binary; therefore, we will instead create a 95% \u003ca href=\"/posts/kbhconfidence_interval/\"\u003econfidence interval\u003c/a\u003e analysis via a single-variable \u003ca href=\"/posts/kbht_statistics/\"\u003et test\u003c/a\u003e on standard k-fold validation. K=50\u003c/li\u003e\n\u003cli\u003eDuring one-off testing, another set of hyperparameters seems to work too: {bs: 72, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed-bal.dat}. As we have not begun tuning for hyperparameters, we are just going to use this set, K=50, for the first k-fold trial.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"k-fold-f4zvbgfdbaqvtvxemwzczd\"\u003ek-fold: F4ZVbGfdBAQvtvXemWZCZD\u003c/h4\u003e\n\u003cp\u003ecode: 55f77ff1dea03c3ed66967864dc52fd2c0062f23\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-05_13-22-24_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e{bs: 72, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed-bal.dat}\nK = 50\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-05_14-25-26_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-05_14-26-00_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eIt seems like the results we got is consistent and validates in a manner which we expect.\u003c/p\u003e\n\u003ch3 id=\"july-7th\"\u003eJuly 7th\u003c/h3\u003e\n\u003cp\u003eYesterday was a day filled with working on \u003ca href=\"/posts/kbhbatchalign/\"\u003ebatchalign\u003c/a\u003e, but we are back now. Today, I aim to look into the heuristic that I identified yesterday by playing with the model, which is that it seems like the model prefers the use of long-focused sentences \u003cem\u003eabout\u003c/em\u003e cookies, so the heruistic its picking up is probably on-topicness.\u003c/p\u003e\n\u003cp\u003eI am going to first leverage the lovely \u003ccode\u003ecdpierse/transformers-interpret\u003c/code\u003e tool to help build some explainability by adding it to validate.py. Upon some human validation with random sampling, the model seem to do less well than I\u0026rsquo;d hoped. Running a train cycle with the new results/params seen above to see if it does better.\u003c/p\u003e\n\u003ch4 id=\"train-brisk-oath-10\"\u003etrain: brisk-oath-10\u003c/h4\u003e\n\u003cp\u003e{bs: 72, epochs: 3, lr: 1e-5, length: 60, pitt-7-4-windowed-bal.dat}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_11-39-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_11-48-40_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: It seems like the model is doing overall worse from validation data, but it does fairly well during test data.\u003c/li\u003e\n\u003cli\u003eDecision:\n\u003cul\u003e\n\u003cli\u003eI can fairly confidently claim that the model is just fitting on topic. As in, if the topic is about cookies (theft/taking/cookie/mother/etc.), it will be classified as control.\u003c/li\u003e\n\u003cli\u003eOne thing that we can do is to claim this task as directly task-controlled: that is, include \u003cstrong\u003eno\u003c/strong\u003e data except cookie and control for that difference\u003c/li\u003e\n\u003cli\u003eThen, the model would\u0026rsquo;t be able to predict the result b/c the variation in topic won\u0026rsquo;t have influence.\u003c/li\u003e\n\u003cli\u003eThis is going to be prepared in the \u003ccode\u003ecookiepitt-7-7-bal*\u003c/code\u003e based on \u003ccode\u003edataprep.py\u003c/code\u003e in commit \u003ccode\u003e518dec82bb961c0a8ad02e3080289b56102aa1a2\u003c/code\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-super-durian-11\"\u003etrain: super-durian-11\u003c/h4\u003e\n\u003cp\u003e{bs: 72, epochs: 3, lr: 1e-5, length: 60, cookiepitt-7-7-windowed-bal.dat}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_13-51-01_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: the model is \u003cem\u003eno where near convergence\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003eDecision: multiplying the LR by 10\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-floral-sunset-12\"\u003etrain: floral-sunset-12\u003c/h4\u003e\n\u003cp\u003e{bs: 72, epochs: 3, lr: 1e-4, length: 60, cookiepitt-7-7-windowed-bal.dat}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_13-54-38_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_14-02-47_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: There we go. This seem to be more in line with what we see in \u003ca href=\"/posts/kbhyuan_2021/\"\u003eYuan 2021\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eDecision: ok, let\u0026rsquo;s elongate the actual content. Perhaps we can try a 7-element search instead? This is written as \u003ccode\u003ecookiepitt-7-7-*-long\u003c/code\u003e. Code based on \u003ccode\u003e9e31f4bc13c4bfe193dcc049059c3d9bda46c8d0\u003c/code\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-sweet-plasma-13\"\u003etrain: sweet-plasma-13\u003c/h4\u003e\n\u003cp\u003e{bs: 72, epochs: 3, lr: 1e-4, length: 60, cookiepitt-7-7-windowed-long-bal.dat}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_15-05-28_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: underfitting\u003c/li\u003e\n\u003cli\u003eDropping batch size down to 64 to add more steps\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-smart-river-14\"\u003etrain: smart-river-14\u003c/h4\u003e\n\u003cp\u003e{bs: 64, epochs: 3, lr: 1e-4, length: 60, cookiepitt-7-7-windowed-long-bal.dat}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_15-13-21_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_15-20-57_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: this finally fits to the specifications which \u003ca href=\"/posts/kbhyuan_2021/\"\u003eYuan 2021\u003c/a\u003e have revealed\u003c/li\u003e\n\u003cli\u003eDecision: running k-fold on this architecture\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"k-fold-xgsp4fvs6scfxczkfjovq5-dot\"\u003ek-fold: XgsP4FVS6ScFxCZKFJoVQ5.\u003c/h4\u003e\n\u003cp\u003eCode: 3870651ba71da8ddb3f481a7c3e046397a09d8b2\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_15-30-07_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_16-18-44_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-07_16-20-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"july-8th\"\u003eJuly 8th\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eBegan the day with aligning the entirety of cookie for both control and dementia, named the dataset \u003ccode\u003ealignedpitt-7-8\u003c/code\u003e in the RAW folder\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ePer what we discussed, will add [pause] as a token to the model. Then, transcript the text such that it would contain normalized values to the pauses for pauses \u0026gt; 0.250 seconds. Therefore, the data would look like\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;hello my name is [pause] 262 [pause] bob\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"july-9th\"\u003eJuly 9th\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCreated transcript.py, which coverts the data in \u003ccode\u003eraw\u003c/code\u003e to \u003ccode\u003etranscripts_pauses\u003c/code\u003e, which contains pause values \u0026gt; 250 msc and prepends them with [pause] tokens\u003c/li\u003e\n\u003cli\u003eThe code from above is taken from \u003ccode\u003echeck.py\u003c/code\u003e in \u003ca href=\"/posts/kbhbatchalign/\"\u003ebatchalign\u003c/a\u003e, used \u003ccode\u003etranscript.py\u003c/code\u003e from \u003ccode\u003e7e19a4912cf0ad5d269c139da5ce018615495ebb\u003c/code\u003e to clean out the dataset; placed it in similar txt format to \u003ccode\u003ealignedpitt-7-8\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eRan dataprep with window size of 5, created \u003ccode\u003ealignedpitt-7-8.bat\u003c/code\u003e and \u003ccode\u003ealignedpitt-7-8-windowed.bat\u003c/code\u003e as the dataprep file\u003c/li\u003e\n\u003cli\u003estarting a new training run, with \u003ccode\u003e[pause]\u003c/code\u003e added as a new token, code \u003ccode\u003e06846c6c95e6b1ccf17f0660c5da76aa50231567\u003c/code\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-golden-tree-16\"\u003etrain: golden-tree-16\u003c/h4\u003e\n\u003cp\u003e{bs: 64, epochs: 3, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_11-48-01_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_11-51-24_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSo realistically, we have the same F1 between the two, but pause encoding increased the accuracy of prediction yet dropped recall dramatically.\u003c/p\u003e\n\u003cp\u003eAs a random check, let\u0026rsquo;s find out if simple fine-tuning (only training on classifier) would work, so:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_12-07-31_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch4 id=\"train-jumping-blaze-17\"\u003etrain: jumping-blaze-17\u003c/h4\u003e\n\u003cp\u003e{bs: 64, epochs: 3, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}. This time with only training classifier.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_12-09-22_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: we did not like. start coverging\u003c/li\u003e\n\u003cli\u003eBumping LR by a factor of 10\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-vital-water-18\"\u003etrain: vital-water-18\u003c/h4\u003e\n\u003cp\u003e{bs: 64, epochs: 3, lr: 1e-3, length: 60, alignedpitt-7-8-windowed.dat}. This time with only training classifier.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_12-11-20_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: barely started converging, seem to be a local\u003c/li\u003e\n\u003cli\u003eTraining for 2 more epochs\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-fiery-smoke-19\"\u003etrain: fiery-smoke-19\u003c/h4\u003e\n\u003cp\u003e{bs: 64, epochs: 5, lr: 1e-3, length: 60, alignedpitt-7-8-windowed.dat}. This time with only training classifier.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_12-14-14_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: classic overfitting\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAt this point, unlocking the model would probably be a good bet\u003c/p\u003e\n\u003ch4 id=\"train-leafy-deluge-20\"\u003etrain: leafy-deluge-20\u003c/h4\u003e\n\u003cp\u003e{bs: 64, epochs: 5, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}.\u003c/p\u003e\n\u003cp\u003eTraining once again with code without locking, and bump LR down\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_13-14-39_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_13-17-36_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: classic the recall is slowly creeping up\u003c/li\u003e\n\u003cli\u003eDecision: let\u0026rsquo;s go for 8 epochs\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-royal-pond-21\"\u003etrain: royal-pond-21\u003c/h4\u003e\n\u003cp\u003e{bs: 64, epochs: 8, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_13-22-40_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_13-24-01_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eCommentary: let\u0026rsquo;s run k-fold now, with these settings.\u003c/p\u003e\n\u003ch4 id=\"k-fold-qskzwfesml52ofcqgguje2-dot\"\u003ek-fold: QskZWfEsML52ofcQgGujE2.\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_14-06-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e{bs: 64, epochs: 8, lr: 1e-4, length: 60, alignedpitt-7-8-windowed.dat}.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_16-08-09_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_16-08-31_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eOk, the base hypothesis from \u003ca href=\"/posts/kbhyuan_2021/\"\u003eYuan 2021\u003c/a\u003e is very much confirmed here. The same training, same content, but pause encoding is very beneficial to the quality of the results. The results that they reported contained an ensemble data, which is in the high 80s; we can now continue doing something new as \u003ca href=\"/posts/kbhyuan_2021/\"\u003eYuan 2021\u003c/a\u003e\u0026rsquo;s conclusion is fairly achieved.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_16-15-07_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-09_18-26-57_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWe can probably call the replication stage done, with no dramatically better effect.\u003c/p\u003e\n\u003ch3 id=\"july-10th\"\u003eJuly 10th\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eFluCalc! Leonid\u0026rsquo;s lovely new program can be an uberuseful feature extraction tool\u003c/li\u003e\n\u003cli\u003eLet\u0026rsquo;s try using to build a new dataset, and network. FluCalc + Pause Encoding + Textual Data \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eThis is becoming \u003ccode\u003ealignedpitt-7-8-flucalc\u003c/code\u003e. As the program is currently under heavy development to include results from \u003ca href=\"/posts/kbhbatchalign/\"\u003ebatchalign\u003c/a\u003e, we will specify version \u003ccode\u003eV 09-Jul-2022 11:00\u003c/code\u003e for now.\u003c/li\u003e\n\u003cli\u003eDone, the new data has the same i/o shape, but then has a bunch of features filtered for nulls which contains outputs from flucalc. Again, \u003ccode\u003ealignedpitt-7-8-flucalc\u003c/code\u003e from \u003ccode\u003e4346fc07c4707343c507e32786b6769b6bd6fb49\u003c/code\u003e does not take into account results from the \u003ccode\u003e%wor\u003c/code\u003e tier!\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"july-11th\"\u003eJuly 11th\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003eab19abd6486884141c9ab4e4e185255a77ae833e\u003c/code\u003e is the final-ish version of the late fusion model\u003c/li\u003e\n\u003cli\u003eWe are going to use \u003ccode\u003ealignedpitt-7-8-flucalc\u003c/code\u003e to start training\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-royal-pond-21\"\u003etrain: royal-pond-21\u003c/h4\u003e\n\u003cp\u003e{bs: 64, epochs: 8, lr: 1e-4, length: 60, alignedpitt-7-8-flucalc-windowed.dat}.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_10-15-58_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: overfitting\u003c/li\u003e\n\u003cli\u003eDecision, droping lr by a factor of 10, also increasing length to 70\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-fallen-dust-25\"\u003etrain: fallen-dust-25\u003c/h4\u003e\n\u003cp\u003e{bs: 64, epochs: 8, lr: 1e-5, length: 70, alignedpitt-7-8-flucalc-windowed.dat}.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_10-37-32_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_10-38-37_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCommentary: overfitting\u003c/li\u003e\n\u003cli\u003eDecision, droping lr by a factor of 10, dropping batch size to 32, training more to 10\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"train-dainty-meadow-26\"\u003etrain: dainty-meadow-26\u003c/h4\u003e\n\u003cp\u003e{bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_10-45-52_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_10-46-31_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eah\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eAt this point, I think it\u0026rsquo;d be good to do some feature selection\u003c/li\u003e\n\u003cli\u003eLet\u0026rsquo;s do a chi^2 correlation, and select 3 best features\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epandas\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eDATA\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;/Users/houliu/Documents/Projects/DBC/data/transcripts_pauses/alignedpitt-7-8-flucalc-windowed.bat\u0026#34;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# read pickle\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_pickle\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDATA\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# test\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etest_data\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esplit\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;test\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# also, get only train data\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esplit\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;train\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e target mor_Utts ... split utterance\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etrial sample ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e120-2 1049 1 -0.179084 ... train well the boy is getting some cookies handing o...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e336-1 2492 0 -0.481740 ... train +oh okay, the the little girl askin(g) for the...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e076-4 786 1 -0.179084 ... train well the little boy was looking at that cookie...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e279-0 2250 1 1.980274 ... train kid\u0026#39;s stool turnin(g) [pause]540[pause] over s...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e014-2 151 1 0.746355 ... train he\u0026#39;s fallin(g) off the chair down here or try...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e208-0 1655 0 -0.481740 ... train the boy [pause]920[pause] is going after [paus...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e492-0 2696 1 -0.179084 ... train oh yes quite a_lot the kid\u0026#39;s tryin(g) to get t...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e497-1 2727 1 0.129396 ... train what else ? \u0026amp;uh the see the [pause]2400[pause]...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e175-2 1535 0 0.863668 ... train the window is open you can see out the curtain...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e279-0 2261 1 1.980274 ... train the other kid with [pause]610[pause] the stool...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2848 rows x 44 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s slice out the bits which is labels, etc.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ein_data\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edrop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;utterance\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;target\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;split\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ein_data\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eIndex([\u0026#39;mor_Utts\u0026#39;, \u0026#39;mor_Words\u0026#39;, \u0026#39;mor_syllables\u0026#39;, \u0026#39;#_Prolongation\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;%_Prolongation\u0026#39;, \u0026#39;#_Broken_word\u0026#39;, \u0026#39;%_Broken_word\u0026#39;, \u0026#39;#_Block\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;%_Block\u0026#39;, \u0026#39;#_PWR\u0026#39;, \u0026#39;%_PWR\u0026#39;, \u0026#39;#_PWR-RU\u0026#39;, \u0026#39;%_PWR-RU\u0026#39;, \u0026#39;#_WWR\u0026#39;, \u0026#39;%_WWR\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;#_mono-WWR\u0026#39;, \u0026#39;%_mono-WWR\u0026#39;, \u0026#39;#_WWR-RU\u0026#39;, \u0026#39;%_WWR-RU\u0026#39;, \u0026#39;#_mono-WWR-RU\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;%_mono-WWR-RU\u0026#39;, \u0026#39;Mean_RU\u0026#39;, \u0026#39;#_Phonological_fragment\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;%_Phonological_fragment\u0026#39;, \u0026#39;#_Phrase_repetitions\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;%_Phrase_repetitions\u0026#39;, \u0026#39;#_Word_revisions\u0026#39;, \u0026#39;%_Word_revisions\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;#_Phrase_revisions\u0026#39;, \u0026#39;%_Phrase_revisions\u0026#39;, \u0026#39;#_Pauses\u0026#39;, \u0026#39;%_Pauses\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;#_Filled_pauses\u0026#39;, \u0026#39;%_Filled_pauses\u0026#39;, \u0026#39;#_TD\u0026#39;, \u0026#39;%_TD\u0026#39;, \u0026#39;#_SLD\u0026#39;, \u0026#39;%_SLD\u0026#39;,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u0026#39;#_Total_(SLD+TD)\u0026#39;, \u0026#39;%_Total_(SLD+TD)\u0026#39;, \u0026#39;Weighted_SLD\u0026#39;],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e dtype=\u0026#39;object\u0026#39;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd the labels:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eout_data\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;target\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eout_data\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etrial sample\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e120-2 1049 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e336-1 2492 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e076-4 786 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e279-0 2250 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e014-2 151 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ..\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e208-0 1655 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e492-0 2696 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e497-1 2727 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e175-2 1535 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e279-0 2261 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eName: target, Length: 2848, dtype: int64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd now, let\u0026rsquo;s select 3 best features.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.feature_selection\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSelectKBest\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ef_classif\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ek_best_tool\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSelectKBest\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef_classif\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ek_best_tool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ein_data\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eout_data\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebest_features\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek_best_tool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eget_feature_names_out\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebest_features\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e%_WWR\u003c/td\u003e\n\u003ctd\u003e%_mono-WWR\u003c/td\u003e\n\u003ctd\u003e%\u003cem\u003eTotal\u003c/em\u003e(SLD+TD)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eOD = other disfluencies; SLD = stuttering-like disfluencies; TD = total disfluencies; WWR = whole-word-repetition\u003c/p\u003e\n\u003cp\u003eok, let\u0026rsquo;s select those features\u003c/p\u003e\n\u003ch4 id=\"train-visionary-plasma-27\"\u003etrain: visionary-plasma-27\u003c/h4\u003e\n\u003cp\u003e{bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}. Also with feature selection.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_11-27-49_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_11-28-11_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ehmmm.\u003c/p\u003e\n\u003cp\u003eI am curious if we just ran something like a decision tree, what happens.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ein_features\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edrop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;utterance\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;target\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;split\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etest_features\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etest_data\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edrop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;utterance\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;target\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;split\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ein_targets\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;target\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etest_targets\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etest_data\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;target\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eseed the classifier, and fit.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.ensemble\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eRandomForestClassifier\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclsf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eRandomForestClassifier\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclsf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ein_features\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ein_targets\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclsf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003escore\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etest_features\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etest_targets\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.5932203389830508\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eOK nevermind. What about SVC?\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esklearn.svm\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSVC\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclsf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSVC\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclsf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ein_features\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ein_targets\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclsf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003escore\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etest_features\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etest_targets\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.5932203389830508\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eTurns out, deep learning still does better. I\u0026rsquo;m thinking maybe the output is being faulty, say, for something like the loss function.\u003c/p\u003e\n\u003cp\u003eDecision: switching activation to sigmoid.\u003c/p\u003e\n\u003ch4 id=\"train-sunny-bush-31\"\u003etrain: sunny-bush-31\u003c/h4\u003e\n\u003cp\u003e{bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}, selected features\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_12-35-52_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_12-37-21_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eOk let\u0026rsquo;s think about this. Decision: added batch normalization.\u003c/p\u003e\n\u003ch4 id=\"train-autumn-jazz-32\"\u003etrain: autumn-jazz-32\u003c/h4\u003e\n\u003cp\u003e{bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}, selected features\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_12-50-10_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_12-50-56_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe model maybe overfitting on some simple heuristic; some basic statistics revealed that these variables are actually quite differently distributed.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_13-06-06_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ePerhaps we should increase the complexity of the model?\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_13-08-49_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch4 id=\"train-fallen-microwave-33\"\u003etrain: fallen-microwave-33\u003c/h4\u003e\n\u003cp\u003e{bs: 32, epochs: 10, lr: 1e-6, length: 70, alignedpitt-7-8-flucalc-windowed.dat}, selected features\u003c/p\u003e\n\u003cp\u003eJust to test, I am bumping the LR to 1e-5, just to see what happens. I am very confused.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_13-14-26_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch4 id=\"train-upbeat-flower-35\"\u003etrain: upbeat-flower-35\u003c/h4\u003e\n\u003cp\u003e{bs: 32, epochs: 10, lr: 1e-5, length: 70, alignedpitt-7-8-flucalc-windowed.dat}, selected features\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_13-21-53_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_13-23-35_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe more we work on this, the more overfit it gets. (I FORGOT A RELUCTIFIER)\u003c/p\u003e\n\u003ch4 id=\"a-note\"\u003ea note\u003c/h4\u003e\n\u003cp\u003e{bs: 32, epochs: 10, lr: 1e-5, length: 70, alignedpitt-7-11-flucalc-windowed.dat}, selected features\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_17-07-45_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ePauses, no meta:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_17-09-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ePauses, meta:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-11_17-08-41_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eso effectively cointoss\u003c/p\u003e\n\u003ch2 id=\"concerns-and-questions\"\u003eConcerns and Questions\u003c/h2\u003e\n\u003ch3 id=\"july-2nd\"\u003eJuly 2nd\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003epitt7-1/dementia/493-0\u003c/code\u003e PAR tier \u0026ldquo;tell me everything you see going on in that picture\u0026rdquo; doesn\u0026rsquo;t seem to be labeled correctly; I am guessing that\u0026rsquo;s supposed to be INV?\u003c/li\u003e\n\u003cli\u003eHas anyone tried to include investigator/participant cross-dialogue?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"july-4th\"\u003eJuly 4th\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eIs the model overfitting on antiquated language?\u003c/li\u003e\n\u003cli\u003eIs the model overfitting on cooke-theft on-topic-ness?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"july-11th\"\u003eJuly 11th\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eLSTM only on pauses?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdementiabank_acoustics_project/","tags":null,"title":"DementiaBank Acoustics Project"},{"categories":null,"contents":"Suppose you have two non mutually exclusive sets \\(E\\) or \\(F\\).\nDeMorgan\u0026rsquo;s Law:\n\\begin{equation} (E\\ and\\ F)^{C} = (E^{C}\\ or\\ F^{C}) \\end{equation}\n\\begin{equation} (E\\ or\\ F)^{C} = (E^{C}\\ and\\ F^{C}) \\end{equation}\n","html":"\u003cp\u003eSuppose you have two non \u003ca href=\"/posts/kbhmutually_exclusive/\"\u003emutually exclusive\u003c/a\u003e sets \\(E\\) or \\(F\\).\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-06_16-36-03_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\u003ca href=\"/posts/kbhdemorgan_s_law/\"\u003eDeMorgan\u0026rsquo;s Law\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(E\\ and\\ F)^{C} = (E^{C}\\ or\\ F^{C})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(E\\ or\\ F)^{C} = (E^{C}\\ and\\ F^{C})\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdemorgan_s_law/","tags":null,"title":"DeMorgan's Law"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdepression/","tags":null,"title":"depression"},{"categories":null,"contents":"Derivat\n","html":"\u003cp\u003eDerivat\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhderivational_words/","tags":null,"title":"derivational words"},{"categories":null,"contents":"a\n","html":"\u003cp\u003ea\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhderivatives/","tags":null,"title":"derivative (finance)"},{"categories":null,"contents":"We will take \\(G(P(t),t)\\) to figure the price of an option, with \\(t\\) being time, strike price \\(X\\) (not introduced yet), and expiration date \\(T \u0026gt; t\\) on a stock with price \\(P(t)\\) at time \\(t\\).\nThis representation does something really important: it expresses \\(G\\) as a function of only the current stock price \\(P(t)\\).\n","html":"\u003cp\u003eWe will take \\(G(P(t),t)\\) to figure the price of an option, with \\(t\\) being time, strike price \\(X\\) (not introduced yet), and expiration date \\(T \u0026gt; t\\) on a stock with price \\(P(t)\\) at time \\(t\\).\u003c/p\u003e\n\u003cp\u003eThis representation does something really important: it expresses \\(G\\) as a function of only the \u003cem\u003ecurrent\u003c/em\u003e stock price \\(P(t)\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhderivative_pricing/","tags":null,"title":"Derivative Pricing"},{"categories":null,"contents":"A derived variable is a mapping between states to a set, usually the natural numbers. Remember, if we can, given a state and match it to a number and show a relation which would iterate the state and decrease the states\u0026rsquo; number. We can show that the algorithm terminates.\n","html":"\u003cp\u003eA derived variable is a mapping between states to a set, usually the natural numbers. Remember, if we can, given a state and match it to a number and show a relation which would iterate the state and decrease the states\u0026rsquo; number. We can show that the algorithm terminates.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhderived_variable/","tags":null,"title":"derived variable"},{"categories":null,"contents":"For a matrix, for instance, like:\n\\begin{equation} \\begin{bmatrix} a \u0026amp; b \\\\ c \u0026amp; d \\end{bmatrix} \\end{equation}\nWe wish to find the matrix\u0026rsquo;s determinant; we write it down as:\n\\begin{equation} \\begin{vmatrix} a \u0026amp; b \\\\ c \u0026amp; d \\end{vmatrix} \\end{equation}\ngeometric interpretation of determinants Geometrically, determinants are how matrices send a unit object after its mapping; i.e. how does it transform the area of a unit square.\ndeterminants can be computed along any axes You can pick any row or column as the \u0026ldquo;axes\u0026rdquo;, and expand the matrix along any direction\n","html":"\u003cp\u003eFor a \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e, for instance, like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{bmatrix}\na \u0026amp; b \\\\\nc \u0026amp; d\n\\end{bmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe wish to find the matrix\u0026rsquo;s determinant; we write it down as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{vmatrix}\na \u0026amp; b \\\\\nc \u0026amp; d\n\\end{vmatrix}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"geometric-interpretation-of-determinants\"\u003egeometric interpretation of determinants\u003c/h2\u003e\n\u003cp\u003eGeometrically, determinants are how matrices send a unit object after its mapping; i.e. how does it transform the area of a unit square.\u003c/p\u003e\n\u003ch2 id=\"determinants-can-be-computed-along-any-axes\"\u003edeterminants can be computed along any axes\u003c/h2\u003e\n\u003cp\u003eYou can pick any row or column as the \u0026ldquo;axes\u0026rdquo;, and expand the matrix along any direction\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdeterminants/","tags":null,"title":"determinants"},{"categories":null,"contents":"key idea: let\u0026rsquo;s build a tree such that, after taking the action, the observation is deterministic. Therefore, you get a belief tree with no branching on observations.\nDESPOT trees We make an assumption, that the actual observation given are fixed given belief. That is:\n\\begin{equation} O(o|b,a) = 1 \\end{equation}\nfor some specific \\(o\\), everything else is \\(0\\) for every b,a.\nSample Scenarios To make such a tree, let\u0026rsquo;s sample of set of scenarios: sequences of actions and observations (because, given a belief and action, we assume observation is fixed. So, given an initial belief and an action, you will always go down a single \u0026ldquo;scenario\u0026rdquo;).\nBuild Tree Do a bunch of scenario sampling\nUse Tree Starting at where you are in terms of initial belief, greedily choose the \u0026ldquo;best action\u0026rdquo;.\nEvaluate Tree Average discounted future reward of the scenarios that relate to your starting states.\nDrawbacks Normal DESPOT is very very easy to overfit.\nRegularized DESPOT Build a DESPOT until depth \\(D\\), with \\(K\\) senarios, then, treat the resulting tree as a conditional plan, do bottom-up DP to optimize the plan.\nGiven a set of senarios \\(\\Phi\\), we write:\nAnytime DESPOT We build up the despot tree by maintaining upper and lower bounds on the value function, and try to expand on scenarios that would help us lower the gap between them.\nFirst, pick an upper and lower bound. The note on HSVI may help.\nBuild and Optimize Bounded DESPOT tree (see below) starting at \\(b_{0}\\) Compute the optimal policy using Regularized DESPOT expression above Execute best action Get observation \\(update(b,a,o)\\) Building Bounded DESPOT sample a set of \\(\\Phi\\) senarios at \\(b_0\\) insert \\(b_0\\) into the tree as the root node let \\(b \\leftarrow b_0\\), and, as time permits: tighten bounds on \\(b\\) back up the upper and lower bounds you found all the way up the tree as you would with HSVI Tightening Bounds if \\(b\\) is a leaf on the tree, then add new belief nodes for every action and every observation as children of \\(b\\).\nthen,\n\\begin{equation} b \\leftarrow update(b, a^{*}, o^{*}) \\end{equation}\nwhere \\(a^{*}\\) and \\(o^{*}\\) are determined by\u0026hellip;\n\\(a^{*}\\): IE-MAX Heuristic, where the original upper-bound is \\(Q\\) \\(o^{*}\\): weighted excess uncertainty If the weighted excess uncertainty we got is non-zero, we repeat this tightening bounds process until it is zero.\nDESPOT Theoretic Guarantees It is near-optimal as a policy.\n","html":"\u003cp\u003ekey idea: let\u0026rsquo;s build a tree such that, after taking the action, the \u003cstrong\u003eobservation is deterministic\u003c/strong\u003e. Therefore, you get a \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e tree with no branching on observations.\u003c/p\u003e\n\u003ch2 id=\"despot--kbhdespot-dot-md--trees\"\u003e\u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e trees\u003c/h2\u003e\n\u003cp\u003eWe make an assumption, that the actual observation given are fixed given belief. That is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nO(o|b,a) = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some specific \\(o\\), everything else is \\(0\\) for every b,a.\u003c/p\u003e\n\u003ch3 id=\"sample-scenarios\"\u003eSample Scenarios\u003c/h3\u003e\n\u003cp\u003eTo make such a tree, let\u0026rsquo;s sample of set of \u003ca href=\"#despot--kbhdespot-dot-md--trees\"\u003escenarios\u003c/a\u003e: sequences of actions and observations (because, given a belief and action, we assume observation is fixed. So, given an initial belief and an action, you will always go down a single \u0026ldquo;scenario\u0026rdquo;).\u003c/p\u003e\n\u003ch3 id=\"build-tree\"\u003eBuild Tree\u003c/h3\u003e\n\u003cp\u003eDo a bunch of scenario sampling\u003c/p\u003e\n\u003ch3 id=\"use-tree\"\u003eUse Tree\u003c/h3\u003e\n\u003cp\u003eStarting at where you are in terms of initial belief, greedily choose the \u0026ldquo;best action\u0026rdquo;.\u003c/p\u003e\n\u003ch3 id=\"evaluate-tree\"\u003eEvaluate Tree\u003c/h3\u003e\n\u003cp\u003eAverage discounted future reward of the scenarios that relate to your starting states.\u003c/p\u003e\n\u003ch3 id=\"drawbacks\"\u003eDrawbacks\u003c/h3\u003e\n\u003cp\u003eNormal \u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e is very very easy to overfit.\u003c/p\u003e\n\u003ch2 id=\"regularized-despot--kbhdespot-dot-md\"\u003eRegularized \u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eBuild a \u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e until depth \\(D\\), with \\(K\\) senarios, then, treat the resulting tree as a \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e, do bottom-up DP to optimize the plan.\u003c/p\u003e\n\u003cp\u003eGiven a set of senarios \\(\\Phi\\), we write:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-27_23-48-15_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"anytime-despot\"\u003eAnytime DESPOT\u003c/h2\u003e\n\u003cp\u003eWe build up the despot tree by maintaining upper and lower bounds on the value function, and try to expand on \u003ca href=\"#despot--kbhdespot-dot-md--trees\"\u003escenarios\u003c/a\u003e that would help us lower the gap between them.\u003c/p\u003e\n\u003cp\u003eFirst, pick an upper and lower bound. The note on \u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e may help.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eBuild and Optimize Bounded \u003ca href=\"#despot--kbhdespot-dot-md--trees\"\u003eDESPOT tree\u003c/a\u003e (see below) starting at \\(b_{0}\\)\u003c/li\u003e\n\u003cli\u003eCompute the optimal policy using \u003ca href=\"#regularized-despot--kbhdespot-dot-md\"\u003eRegularized DESPOT\u003c/a\u003e expression above\u003c/li\u003e\n\u003cli\u003eExecute best action\u003c/li\u003e\n\u003cli\u003eGet observation\u003c/li\u003e\n\u003cli\u003e\\(update(b,a,o)\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"building-bounded-despot--kbhdespot-dot-md\"\u003eBuilding Bounded \u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003esample a set of \\(\\Phi\\) senarios at \\(b_0\\)\u003c/li\u003e\n\u003cli\u003einsert \\(b_0\\) into the tree as the root node\u003c/li\u003e\n\u003cli\u003elet \\(b \\leftarrow b_0\\), and, as time permits:\n\u003col\u003e\n\u003cli\u003etighten bounds on \\(b\\)\u003c/li\u003e\n\u003cli\u003eback up the upper and lower bounds you found all the way up the tree as you would with \u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"tightening-bounds\"\u003eTightening Bounds\u003c/h3\u003e\n\u003cp\u003eif \\(b\\) is a leaf on the tree, then add new belief nodes for every action and every observation as children of \\(b\\).\u003c/p\u003e\n\u003cp\u003ethen,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb \\leftarrow update(b, a^{*}, o^{*})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(a^{*}\\) and \\(o^{*}\\) are determined by\u0026hellip;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(a^{*}\\): \u003ca href=\"/posts/kbhhsvi/#ie-max-heuristic\"\u003eIE-MAX Heuristic\u003c/a\u003e, where the original upper-bound is \\(Q\\)\u003c/li\u003e\n\u003cli\u003e\\(o^{*}\\): \u003ca href=\"/posts/kbhhsvi/#weighted-excess-uncertainty\"\u003eweighted excess uncertainty\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003eIf the \u003ca href=\"/posts/kbhhsvi/#weighted-excess-uncertainty\"\u003eweighted excess uncertainty\u003c/a\u003e we got is non-zero, we repeat this tightening bounds process until it is zero.\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"despot-theoretic-guarantees\"\u003eDESPOT Theoretic Guarantees\u003c/h2\u003e\n\u003cp\u003eIt is near-optimal as a policy.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdespot/","tags":null,"title":"Determinized Sparse Partially Observable Tree"},{"categories":null,"contents":"A health concern relating to glucose and obesity.\n","html":"\u003cp\u003eA health concern relating to \u003ca href=\"\"\u003eglucose\u003c/a\u003e and \u003ca href=\"\"\u003eobesity.\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdiabetes/","tags":null,"title":"diabetes"},{"categories":null,"contents":"The diagonal of a square matrix consists of entries from the upper-left to the bottom-right\nFurthermore, because eigenvalues of a map are the entries of the diagonal of its upper-triangular matrix, and this is technically an upper-triangular matrix, the entries on the diagonal are exactly the eigenvalues of the Linear Map.\nproperties of diagonal matrices Suppose \\(V\\) is finite-dimensional, and \\(T \\in \\mathcal{L}(V)\\); and let \\(\\lambda_{1}, \u0026hellip; \\lambda_{m}\\) be distinct eigenvalues of \\(T\\). Then, the following are equivalent:\n\\(T\\) is diagonalizable \\(V\\) has a basis containing of eigenvectors of \\(T\\) there exists 1-dim subspaces \\(U_{1}, \u0026hellip;, U_{n}\\) of \\(V\\), each invariant under \\(T\\), such that \\(V = U_1 \\oplus \u0026hellip; \\oplus U_{n}\\) specifically, those \\(U\\) are eigenspaces; that is: \\(V = E(\\lambda_{1}, T) \\oplus \u0026hellip; \\oplus E(\\lambda_{m}, T)\\) \\(\\dim V = \\dim E(\\lambda_{1}, T) + \u0026hellip; + \\dim E(\\lambda_{m}, T)\\) Proof:\n\\(1 \\implies 2\\), \\(2 \\implies 1\\)\nBy a moment\u0026rsquo;s fucking thought. hehe my notes my rule. jkjkjk\nBy calculation this is true; if you apply a standard basis to the matrix, it will simply be scaled; therefore, you can think of each slot as an eigenvector of \\(T\\).\n\\(2 \\implies 3\\)\nCreate \\(U_{j} = span(v_{j})\\) where \\(v_{j}\\) is the \\(j\\) eigenvalue of \\(T\\). Now, given \\(v_{j}\\) forms a basis, then, \\(v_1 \u0026hellip; v_{n}\\) not only is linearly independent but span. Therefore, each vector in \\(V\\) can be written uniquely by a linear combination of \\(v_{j}\\) (i.e. taking one \\(v_{j}\\) from each \\(U\\)). Hence, by definition, \\(U_{j}\\) form a direct sum to \\(V\\), hence showing \\(3\\).\n\\(3\\implies 2\\)\nNow, suppose you have a bunch of 1-d invariant subspaces \\(U_1 \u0026hellip; U_{n}\\) and they form a direct sum; because they are invariant subspaces, picking any \\(v_{j} \\in U_{j}\\) would be an eigenvector (because \\(T v_{j} = a_{j} v_{j}\\), as applying \\(T\\) is invariant so it\u0026rsquo;d return to the same space, just at a different place). Now, because they form a direct sum on \\(V\\), taking \\(v_{j}\\) from each \\(U_{j}\\) would result in a linearly independent list which\u0026mdash;because they sum up to $V$\u0026mdash;span all of \\(V\\) as each \\(U\\) is simply spanning by scaling \\(v_{j}\\). So, \\(v_{j}\\) together forms a basis.\n\\(2 \\implies 4\\)\nGiven \\(V\\) has a basis formed by eigenvectors of \\(T\\), the sum of all scales of eigenvectors in \\(T\\) can be written by the sum of all eigenspaces: that is \\(V = null(T-\\lambda_{1} I) + \u0026hellip; null(T- \\lambda_{m} I)\\) (recall that \\(E(\\lambda_{j}, T) = null(T- \\lambda_{j}I)\\)); as each eigenvalue for which the basis is formed can be found in each of these spaces, their sum would therefore equal to \\(V\\) as this sum represents an linear combination of eigenvectors in \\(T\\).\nNow, sum of eigenspaces form a direct sum so we have that the sum is direct sum. Hence: \\(V = E(\\lambda_{1}, T) \\oplus \u0026hellip; \\oplus E(\\lambda_{m}, T)\\)\n\\(4 \\implies 5\\)\n\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\) (see link for proof).\n\\(5 \\implies 2\\)\nWe are given that:\n\\begin{equation} \\dim V = \\dim E(\\lambda_{1}, T) + \\dots + \\dim E(\\lambda_{m}, T) \\end{equation}\nwhich means that taking a basis of each subspace provides a list of \\(\\dim n\\) long of eigenvectors. Now, each sub-list belonging to each space is linearly independent amongst themselves, and they will each be linearly independent against others as list of eigenvectors are linearly independent.\ni.e.: if \\(a_1v_1 + \u0026hellip; + a_{n} v_{n} = 0\\), we can treat each chunk from each eigenspace as \\(u\\), making \\(u_1 + \u0026hellip; u_{m} = 0\\); as they are eigenvectors from distinct eigenvalues, they are linearly independent so each will be \\(0\\). Now, collapsing it into the basis of each eigenspace, this makes \\(a_{j}\\) of the coefficients \\(0\\) as well.\nAnd all of this makes \\(v_1 \u0026hellip; v_{n}\\) a list of \\(\\dim n\\) long that is linearly independent; hence, it is a basis of \\(V\\), as desired. \\(\\blacksquare\\)\nenough eigenvalues implies diagonalizability If \\(T \\in \\mathcal{L}(V)\\) has \\(\\dim V\\) distinct eigenvalues, then \\(T\\) is diagonalizable.\nProof:\nlet \\(\\dim V = n\\). Pick eigenvectors \\(v_1 \u0026hellip; v_{n}\\) corresponding to distinct eigenvalues \\(\\lambda_{1} \u0026hellip; \\lambda_{n}\\). Now, eigenvectors coorsponding to distinct eigenvalues are linearly independent, and this is a list of \\(\\dim n\\) long that is linearly independent, so it is a basis of eigenvectors. Now, that means that the matrix coorsponding to \\(T\\) is diagonalizable.\nNOTE THAT THE CONVERSE IS NOT TRUE! as each eigenspace can have a dimension of more than 1 so 1 eigenvalue can generate two linearly independent eigenvectors belonging to it.\nFor instance:\n\\begin{equation} T (z_1, z_2, z_3) = (4z_1, 4z_2, 5z_3) \\end{equation}\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003ediagonal\u003c/a\u003e of a square matrix consists of entries from the upper-left to the bottom-right\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-03-20_20-16-29_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eFurthermore, because \u003ca href=\"/posts/kbhupper_triangular_matrix/#eigenvalues-of-a-map-are-the-entries-of-the-id-c38ed162-6861-420c-a812-6d25ac539ea9-diagonal-of-its-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix\"\u003eeigenvalues of a map are the entries of the diagonal of its upper-triangular matrix\u003c/a\u003e, and this is \u003cem\u003etechnically\u003c/em\u003e an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e, the entries on the diagonal are exactly the \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es of the \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"properties-of-diagonal-matrices\"\u003eproperties of diagonal matrices\u003c/h2\u003e\n\u003cp\u003eSuppose \\(V\\) is \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e, and \\(T \\in \\mathcal{L}(V)\\); and let \\(\\lambda_{1}, \u0026hellip; \\lambda_{m}\\) be distinct \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es of \\(T\\). Then, the following are equivalent:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\\(T\\) is diagonalizable\u003c/li\u003e\n\u003cli\u003e\\(V\\) has a basis containing of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es of \\(T\\)\u003c/li\u003e\n\u003cli\u003ethere exists 1-dim subspaces \\(U_{1}, \u0026hellip;, U_{n}\\) of \\(V\\), each invariant under \\(T\\), such that \\(V = U_1 \\oplus \u0026hellip; \\oplus U_{n}\\)\u003c/li\u003e\n\u003cli\u003especifically, those \\(U\\) are \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003es; that is: \\(V = E(\\lambda_{1}, T) \\oplus \u0026hellip; \\oplus E(\\lambda_{m}, T)\\)\u003c/li\u003e\n\u003cli\u003e\\(\\dim V = \\dim E(\\lambda_{1}, T) + \u0026hellip; + \\dim E(\\lambda_{m}, T)\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003e\\(1 \\implies 2\\), \\(2 \\implies 1\\)\u003c/p\u003e\n\u003cp\u003eBy a moment\u0026rsquo;s fucking thought. hehe my notes my rule. jkjkjk\u003c/p\u003e\n\u003cp\u003eBy calculation this is true; if you apply a standard basis to the matrix, it will simply be scaled; therefore, you can think of each slot as an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e of \\(T\\).\u003c/p\u003e\n\u003cp\u003e\\(2 \\implies 3\\)\u003c/p\u003e\n\u003cp\u003eCreate \\(U_{j} = span(v_{j})\\) where \\(v_{j}\\) is the \\(j\\) \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\). Now, given \\(v_{j}\\) forms a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, then, \\(v_1 \u0026hellip; v_{n}\\) not only is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e but \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e. Therefore, each \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e in \\(V\\) can be written uniquely by a linear combination of \\(v_{j}\\) (i.e. taking one \\(v_{j}\\) from each \\(U\\)). Hence, by definition, \\(U_{j}\\) form a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e to \\(V\\), hence showing \\(3\\).\u003c/p\u003e\n\u003cp\u003e\\(3\\implies 2\\)\u003c/p\u003e\n\u003cp\u003eNow, suppose you have a bunch of 1-d \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003es \\(U_1 \u0026hellip; U_{n}\\) and they form a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e; because they are \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003es, picking any \\(v_{j} \\in U_{j}\\) would be an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e (because \\(T v_{j} = a_{j} v_{j}\\), as applying \\(T\\) is \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e so it\u0026rsquo;d return to the same space, just at a different place). Now, because they form a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e on \\(V\\), taking \\(v_{j}\\) from each \\(U_{j}\\) would result in a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list which\u0026mdash;because they sum up to $V$\u0026mdash;\u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e all of \\(V\\) as each \\(U\\) is simply spanning by scaling \\(v_{j}\\). So, \\(v_{j}\\) together forms a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\(2 \\implies 4\\)\u003c/p\u003e\n\u003cp\u003eGiven \\(V\\) has a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e formed by \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es of \\(T\\), the sum of all scales of eigenvectors in \\(T\\) can be written by the sum of all \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003es: that is \\(V = null(T-\\lambda_{1} I) + \u0026hellip; null(T- \\lambda_{m} I)\\) (recall that \\(E(\\lambda_{j}, T) = null(T- \\lambda_{j}I)\\)); as each \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e for which the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e is formed can be found in each of these spaces, their sum would therefore equal to \\(V\\) as this sum represents an \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es in \\(T\\).\u003c/p\u003e\n\u003cp\u003eNow, \u003ca href=\"/posts/kbheigenvalue/#eigenspaces-are-disjoint\"\u003esum of eigenspaces form a direct sum\u003c/a\u003e so we have that the sum is \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e. Hence: \\(V = E(\\lambda_{1}, T) \\oplus \u0026hellip; \\oplus E(\\lambda_{m}, T)\\)\u003c/p\u003e\n\u003cp\u003e\\(4 \\implies 5\\)\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhproduct_summation_map/#u-1-plus-dots-plus-u-m-is-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-iff-dim--u-1-plus-dots-plus-u-m--dim-u-1-plus-dots-plus-dim-u-m\"\u003e\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\)\u003c/a\u003e (see link for proof).\u003c/p\u003e\n\u003cp\u003e\\(5 \\implies 2\\)\u003c/p\u003e\n\u003cp\u003eWe are given that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim V = \\dim E(\\lambda_{1}, T) + \\dots + \\dim E(\\lambda_{m}, T)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich means that taking a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of each \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e provides a list of \\(\\dim n\\) long of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es. Now, each sub-list belonging to each space is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e amongst themselves, and they will each be \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e against others as \u003ca href=\"/posts/kbheigenvalue/#list-of-eigenvectors-are-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent\"\u003elist of eigenvectors are linearly independent\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003ei.e.: if \\(a_1v_1 + \u0026hellip; + a_{n} v_{n} = 0\\), we can treat each chunk from each \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e as \\(u\\), making \\(u_1 + \u0026hellip; u_{m} = 0\\); as they are \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es from distinct \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es, they are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e so each will be \\(0\\). Now, collapsing it into the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of each \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e, this makes \\(a_{j}\\) of the coefficients \\(0\\) as well.\u003c/p\u003e\n\u003cp\u003eAnd all of this makes \\(v_1 \u0026hellip; v_{n}\\) a list of \\(\\dim n\\) long that is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e; hence, it is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\), as desired. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch2 id=\"enough-eigenvalues-implies-diagonalizability\"\u003eenough eigenvalues implies diagonalizability\u003c/h2\u003e\n\u003cp\u003eIf \\(T \\in \\mathcal{L}(V)\\) has \\(\\dim V\\) distinct \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es, then \\(T\\) is \u003ca href=\"#properties-of-diagonal-matrices\"\u003ediagonalizable\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003elet \\(\\dim V = n\\). Pick \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es \\(v_1 \u0026hellip; v_{n}\\) corresponding to distinct \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es \\(\\lambda_{1} \u0026hellip; \\lambda_{n}\\). Now, \u003ca href=\"/posts/kbheigenvalue/#list-of-eigenvectors-are-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent\"\u003eeigenvectors coorsponding to distinct eigenvalues are linearly independent\u003c/a\u003e, and this is a list of \\(\\dim n\\) long that is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e, so it is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es. Now, that means that the matrix coorsponding to \\(T\\) is \u003ca href=\"#properties-of-diagonal-matrices\"\u003ediagonalizable\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eNOTE THAT THE CONVERSE IS NOT TRUE!\u003c/strong\u003e as each \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e can have a dimension of more than 1 so 1 \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e can generate two \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvectors\u003c/a\u003e belonging to it.\u003c/p\u003e\n\u003cp\u003eFor instance:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT (z_1, z_2, z_3) = (4z_1, 4z_2, 5z_3)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdiagonal_matrix/","tags":null,"title":"Diagonal Matrix"},{"categories":null,"contents":"A human Dialogue is a human to human interaction.\nturn each contributino to a conversation is called a \u0026ldquo;turn\u0026rdquo;, which contains a sentence, multiple sentences, or a single word\nturn-taking when to take the floor who takes the floor what happens during interruptions? barge-in barge-in is the property to allow the user to interrupt the system\nend-pointing deciding when a human has stopped talking, compute, etc.\nspeech-act each turn is actually an \u0026ldquo;action\u0026rdquo; performed by the user\nconstatives: committing the speaker to something being the case (answering, denying) directives: ask the addressee to do something (advising, ordering) com missives: commuting the speaker to future action (planning, voving) acknowledgement: reflecting the speaker\u0026rsquo;s attitude for something (apologizing, greeting, etc.) common ground grounding is the problem of acknowledging and reflecting the state of interaction; such as the elevator lighting up when pressed.\nacknowledgements and repeats is a way of grounding.\nwe need to make sure that the system acknowledges user interaction\nadjacency pairs question =\u0026gt; answer proposal =\u0026gt; acceptance/rejection complements =\u0026gt; downplay two-pair composition maybe interrupted or separated by a sub-dialogue\nconversational initiative Sometimes, such as during interviews, only one agent has initiative. This is not true most of the time during human-human interactions.\nmixed initiative is hard to achieve, usually we make dialogue systems as passive environments\u0026mdash;only user and system understanding.\n","html":"\u003cp\u003eA human \u003ca href=\"/posts/kbhdialogue/\"\u003eDialogue\u003c/a\u003e is a human to human interaction.\u003c/p\u003e\n\u003ch2 id=\"turn\"\u003eturn\u003c/h2\u003e\n\u003cp\u003eeach contributino to a conversation is called a \u0026ldquo;turn\u0026rdquo;, which contains a sentence, multiple sentences, or a single word\u003c/p\u003e\n\u003ch2 id=\"turn-taking\"\u003eturn-taking\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewhen to take the floor\u003c/li\u003e\n\u003cli\u003ewho takes the floor\u003c/li\u003e\n\u003cli\u003ewhat happens during interruptions?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"barge-in\"\u003ebarge-in\u003c/h2\u003e\n\u003cp\u003ebarge-in is the property to allow the user to interrupt the system\u003c/p\u003e\n\u003ch2 id=\"end-pointing\"\u003eend-pointing\u003c/h2\u003e\n\u003cp\u003edeciding when a human has stopped talking, compute, etc.\u003c/p\u003e\n\u003ch2 id=\"speech-act\"\u003espeech-act\u003c/h2\u003e\n\u003cp\u003eeach \u003ca href=\"#turn\"\u003eturn\u003c/a\u003e is actually an \u0026ldquo;action\u0026rdquo; performed by the user\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003econstatives\u003c/strong\u003e: committing the speaker to something being the case (answering, denying)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003edirectives\u003c/strong\u003e: ask the addressee to do something (advising, ordering)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ecom missives\u003c/strong\u003e: commuting the speaker to future action (planning, voving)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eacknowledgement\u003c/strong\u003e: reflecting the speaker\u0026rsquo;s attitude for something (apologizing, greeting, etc.)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"common-ground\"\u003ecommon ground\u003c/h2\u003e\n\u003cp\u003egrounding is the problem of acknowledging and reflecting the state of interaction; such as the elevator lighting up when pressed.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eacknowledgements\u003c/strong\u003e and repeats is a way of grounding.\u003c/p\u003e\n\u003cp\u003ewe need to make sure that the system acknowledges user interaction\u003c/p\u003e\n\u003ch2 id=\"adjacency-pairs\"\u003eadjacency pairs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003equestion =\u0026gt; answer\u003c/li\u003e\n\u003cli\u003eproposal =\u0026gt; acceptance/rejection\u003c/li\u003e\n\u003cli\u003ecomplements =\u0026gt; downplay\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003etwo-pair composition maybe interrupted or separated by a sub-dialogue\u003c/p\u003e\n\u003ch2 id=\"conversational-initiative\"\u003econversational initiative\u003c/h2\u003e\n\u003cp\u003eSometimes, such as during interviews, only one agent has initiative. This is not true most of the time during human-human interactions.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003emixed initiative\u003c/strong\u003e is hard to achieve, usually we make dialogue systems as passive environments\u0026mdash;only user and system understanding.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdialogue/","tags":null,"title":"Dialogue"},{"categories":null,"contents":"Dialogue State Architecture uses dialogue acts instead of simple frame filling to perform generation; used currently more in research.\nNLU: slot fillers to extract user\u0026rsquo;s utterance, using ML Dialogue State Tracker: maintains current state of dialogue Dialogue policy: decides what to do next (think GUS\u0026rsquo; policy: ask, fill, respond)\u0026mdash;but nowaday we have more complex dynamics NLG: respond dialogue acts dialogue acts combines speech-acts with underlying states\nslot filing we typically do this with BIO Tagging with a BERT just like NER Tagging, but we tag for frame slots.\nthe final \u0026lt;cls\u0026gt; token may also work to classify domain + intent.\ncorrections are hard folks sometimes uses hyperarticulation (\u0026ldquo;exaggerated prosody\u0026rdquo;) for correction, which trip up ASR\ncorrection acts may need to be detected explicitly as a speech act:\ndialogue policy we can choose over the last frame, agent and user utterances:\n\\begin{equation} A = \\arg\\max_{a} P(A|F_{i-1}, A_{i-1}, U_{i-1}) \\end{equation}\nwe can probably use a neural architecture to do this.\nwhether to confirm via ASR confirm:\n\\(\u0026lt;\\alpha\\): reject \\(\\geq \\alpha\\): confirm explicitly \\(\\geq \\beta\\): confirm implicitly \\(\\geq \\gamma\\): no need to confirm NLG once the speech act is determined, we need to actually go generate it: 1) choose some attributes 2) generate utterance\nWe typically want to delexicalize the keywords (Henry serves French food =\u0026gt; [restraunt] serves [cruisine] food), then run through NLG, then rehydrate with frame.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdialogue_state_architecture/\"\u003eDialogue State Architecture\u003c/a\u003e uses \u003ca href=\"#dialogue-acts\"\u003edialogue acts\u003c/a\u003e instead of simple \u003ca href=\"/posts/kbhgus/#frame\"\u003eframe\u003c/a\u003e filling to perform generation; used currently more in research.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-01_09-52-46_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eNLU\u003c/strong\u003e: slot fillers to extract user\u0026rsquo;s utterance, using ML\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eDialogue State Tracker\u003c/strong\u003e: maintains current state of dialogue\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eDialogue policy\u003c/strong\u003e: decides what to do next (think \u003ca href=\"/posts/kbhgus/\"\u003eGUS\u003c/a\u003e\u0026rsquo; policy: ask, fill, respond)\u0026mdash;but nowaday we have more complex dynamics\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eNLG\u003c/strong\u003e: respond\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"dialogue-acts\"\u003edialogue acts\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#dialogue-acts\"\u003edialogue acts\u003c/a\u003e combines \u003ca href=\"/posts/kbhdialogue/#speech-act\"\u003espeech-act\u003c/a\u003es with underlying states\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-01_09-55-16_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-01_09-55-39_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"slot-filing\"\u003eslot filing\u003c/h2\u003e\n\u003cp\u003ewe typically do this with \u003ca href=\"/posts/kbhner_tagging/#bio-tagging\"\u003eBIO Tagging\u003c/a\u003e with a BERT just like \u003ca href=\"/posts/kbhner_tagging/\"\u003eNER Tagging\u003c/a\u003e, but we tag for frame slots.\u003c/p\u003e\n\u003cp\u003ethe final \u0026lt;cls\u0026gt; token may also work to classify domain + intent.\u003c/p\u003e\n\u003ch2 id=\"corrections-are-hard\"\u003ecorrections are hard\u003c/h2\u003e\n\u003cp\u003efolks sometimes uses \u003ca href=\"#corrections-are-hard\"\u003ehyperarticulation\u003c/a\u003e (\u0026ldquo;exaggerated prosody\u0026rdquo;) for correction, which trip up ASR\u003c/p\u003e\n\u003cp\u003ecorrection acts may need to be detected explicitly as a speech act:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-01_10-00-34_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"dialogue-policy\"\u003edialogue policy\u003c/h2\u003e\n\u003cp\u003ewe can choose over the last frame, agent and user utterances:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA = \\arg\\max_{a} P(A|F_{i-1}, A_{i-1}, U_{i-1})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can probably use a neural architecture to do this.\u003c/p\u003e\n\u003cp\u003ewhether to confirm via ASR confirm:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\u0026lt;\\alpha\\): reject\u003c/li\u003e\n\u003cli\u003e\\(\\geq \\alpha\\): confirm explicitly\u003c/li\u003e\n\u003cli\u003e\\(\\geq \\beta\\): confirm implicitly\u003c/li\u003e\n\u003cli\u003e\\(\\geq \\gamma\\): no need to confirm\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"nlg\"\u003eNLG\u003c/h2\u003e\n\u003cp\u003eonce the speech act is determined, we need to actually go generate it: 1) choose some attributes 2) generate utterance\u003c/p\u003e\n\u003cp\u003eWe typically want to \u003cstrong\u003edelexicalize\u003c/strong\u003e the keywords (Henry serves French food =\u0026gt; [restraunt] serves [cruisine] food), then run through NLG, then rehydrate with frame.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdialogue_state_architecture/","tags":null,"title":"Dialogue State Architecture"},{"categories":null,"contents":"An\n","html":"\u003cp\u003eAn\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdiffeomorphism/","tags":null,"title":"Diffeomorphism"},{"categories":null,"contents":"We have a function:\n\\begin{equation} |x|+|y|\\frac{dy}{dx} = \\sin \\left(\\frac{x}{n}\\right) \\end{equation}\nWe are to attempt to express the solution analytically and also approximate them.\nTo develop a basic approximate solution, we will leverage a recursive simulation approach.\nWe first set a constant \\(N\\) which in the \\(N\\) value which we will eventually vary.\nN = 0.5 We can get some values by stepping through \\(x\\) and \\(y\\) through which we can then figure \\(\\frac{dy}{dx}\\), namely, how the function evolves.\n# cache res = [] # number of steps steps = 1000 # seed values x = -5 y = 5 # step size step = 1/100 # for number of setps for _ in range(steps): # get the current equation and slope solution dydx = (sin(x/N)-abs(x))/abs(y) # append result res.append((x,y,dydx)) # apply the slope solution to iterate next y # step size is defined by `step` x += step y += dydx*step We have now a set of analytic solutions \\((x,y,\\frac{dy}{dx})\\). Let\u0026rsquo;s plot them!\nscatter_plot([i[0:2] for i in res]) Great, now we have a fairly non-specific but \u0026ldquo;correct\u0026rdquo; solution. We are now going to try to derive an analytic solution.\nWait\u0026hellip; That\u0026rsquo;s not the solution we got! But\u0026hellip; its close: the blue line simply need to be reflected across the \\(x\\) axis.\nIts actually fairly apparent why we will need this negative. We just declared that \\(y\\) was negative for that portion of the solution; the output of a square root could never be negative, so of course to achieve \\(y\\) being negative we have to take into account that square roots have a possible negative output as well.\nNice; now our analytical results agree with out numerical results.\n\\begin{equation} \\begin{cases} y\u0026gt;0 \u0026amp; y=\\sqrt{-2n\\cos\\left(\\frac{x}{n}\\right)-x\\vert x\\vert} +C \\\\ y\u0026lt;0 \u0026amp; y=-\\sqrt{2n\\cos\\left(\\frac{x}{n}\\right)+x\\vert x\\vert}+C \\end{cases} \\end{equation}\nMoving on to the result of the questions.\nSolution behavior The solution are unbounded and mostly decreasing. As \\(n\\in [-1,1]\\), the solution becomes unstable; a solution does not exist at \\(n=0\\).\nAt \\(n=0.5\\), a solution passes through \\((0,-1)\\).\n","html":"\u003cp\u003eWe have a function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|x|+|y|\\frac{dy}{dx} = \\sin \\left(\\frac{x}{n}\\right)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe are to attempt to express the solution analytically and also approximate them.\u003c/p\u003e\n\u003cp\u003eTo develop a basic approximate solution, we will leverage a recursive simulation approach.\u003c/p\u003e\n\u003cp\u003eWe first set a constant \\(N\\) which in the \\(N\\) value which we will eventually vary.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.5\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe can get some values by stepping through \\(x\\) and \\(y\\) through which we can then figure \\(\\frac{dy}{dx}\\), namely, how the function evolves.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# cache\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# number of steps\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esteps\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# seed values\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# step size\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e100\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# for number of setps\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esteps\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# get the current equation and slope solution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edydx\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esin\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eabs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eabs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# append result\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edydx\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# apply the slope solution to iterate next y\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# step size is defined by `step`\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edydx\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe have now a set of analytic solutions \\((x,y,\\frac{dy}{dx})\\). Let\u0026rsquo;s plot them!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003escatter_plot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-08-30_23-22-52_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eGreat, now we have a fairly non-specific but \u0026ldquo;correct\u0026rdquo; solution. We are now going to try to derive an analytic solution.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-01_22-01-44_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWait\u0026hellip; That\u0026rsquo;s not the solution we got! But\u0026hellip; its \u003cem\u003eclose\u003c/em\u003e: the blue line simply need to be reflected across the \\(x\\) axis.\u003c/p\u003e\n\u003cp\u003eIts actually fairly apparent why we will need this negative. We just declared that \\(y\\) was negative for that portion of the solution; the output of a square root could never be negative, so of course to achieve \\(y\\) being negative we have to take into account that square roots have a possible negative output as well.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-01_22-21-24_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eNice; now our analytical results agree with out numerical results.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\ny\u0026gt;0 \u0026amp; y=\\sqrt{-2n\\cos\\left(\\frac{x}{n}\\right)-x\\vert x\\vert} +C \\\\\ny\u0026lt;0 \u0026amp; y=-\\sqrt{2n\\cos\\left(\\frac{x}{n}\\right)+x\\vert x\\vert}+C\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMoving on to the result of the questions.\u003c/p\u003e\n\u003ch2 id=\"solution-behavior\"\u003eSolution behavior\u003c/h2\u003e\n\u003cp\u003eThe solution are unbounded and mostly decreasing. As \\(n\\in [-1,1]\\), the solution becomes unstable; a solution does not exist at \\(n=0\\).\u003c/p\u003e\n\u003cp\u003eAt \\(n=0.5\\), a solution passes through \\((0,-1)\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhchallenge_1/","tags":null,"title":"DiffEq: Challenge #1"},{"categories":null,"contents":"Generative Classifier A Generative Classifier builds a good model of a class, and use that to assign how \u0026ldquo;class-y\u0026rdquo; is that image.\nFor instance, to categorize cats vs. dogs, we build a cat model and dog model. To classify, then, we see if a particular image is more \u0026ldquo;cat-y\u0026rdquo; or \u0026ldquo;dog-y\u0026rdquo;.\nDiscriminative Classifier A Discriminative Classifier observes the differences between two classes, instead of trying to model each one.\n","html":"\u003ch2 id=\"generative-classifier\"\u003eGenerative Classifier\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#generative-classifier\"\u003eGenerative Classifier\u003c/a\u003e builds a good model of a class, and use that to assign how \u0026ldquo;class-y\u0026rdquo; is that image.\u003c/p\u003e\n\u003cp\u003eFor instance, to categorize cats vs. dogs, we build a cat model and dog model. To classify, then, we see if a particular image is more \u0026ldquo;cat-y\u0026rdquo; or \u0026ldquo;dog-y\u0026rdquo;.\u003c/p\u003e\n\u003ch2 id=\"discriminative-classifier\"\u003eDiscriminative Classifier\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#discriminative-classifier\"\u003eDiscriminative Classifier\u003c/a\u003e observes the differences between two classes, instead of trying to model each one.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgenerative_vs_discriminitive_classifier/","tags":null,"title":"Difference Between Logistic Regression and Naive Bayes"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdifference_equation/","tags":null,"title":"difference equation"},{"categories":null,"contents":"A Differential Equation is a function-valued algebreic equation whose unknown is an entire function \\(y(x)\\), where the equation involves a combination of derivatives $y(x), y\u0026rsquo;(x), \u0026hellip;$.\nSee Differential Equations Index\nand Uniqueness and Existance\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equation\u003c/a\u003e is a function-valued \u003ca href=\"/posts/kbhalgebreic_equation/\"\u003ealgebreic equation\u003c/a\u003e whose unknown is an entire \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e \\(y(x)\\), where the equation involves a combination of derivatives $y(x), y\u0026rsquo;(x), \u0026hellip;$.\u003c/p\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhodes_index/\"\u003eDifferential Equations Index\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eand \u003ca href=\"/posts/kbhuniqueness_and_existance/\"\u003eUniqueness and Existance\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdiffeq_intro/","tags":["Index"],"title":"Differential Equations"},{"categories":null,"contents":"Differential Equations. math53.stanford.edu.\nLogistics Prof. Rafe Mazzeo\nTAs Rodrigo Angelo Zhenyuan Zhang Assignments Pre-lecture reading + questionnaire PSets: Wed 9A 2 Midterms + 1 Final: wk 4 + wk 7, Thurs Evening; Tuesday 12:15 Review it suffices to study First Order ODEs because we can convert all higher order functions into a First Order ODEs homogeneous linear systems \\(y\u0026rsquo;=Ay\\) can be solved using eigenvalue, matrix exponentiation, etc. (recall that special cases exists where repeated eigenvalues, etc.) inhomogeneous systems \\(y\u0026rsquo; = Ay +f(t)\\) can be solved using intergrating factor or variation of parameters method general analysis of non-linear \\(y\u0026rsquo;=f(y)\\): we can talk about stationary solutions (1. linearize each \\(y_0\\) stationary solutions to figure local behavior 2. away from stationary solutions, use Lyapunov Functions to discuss), or liapenov functions for variable-coefficient ODEs, we decry sadness and Solving ODEs via power series Content Ordinary Differential Equations Partial Differential Equations What we want to understand:\nqualitative behaviors and values writing it as an elementary function is lame Linear ODEs SU-MATH53 JAN082024 SU-MATH53 JAN102024 SU-MATH53 JAN122024 SU-MATH53 JAN172024 SU-MATH53 JAN192024 SU-MATH53 JAN222024 Linear Second Order ODEs (and how to first-order them) SU-MATH53 JAN262024 SU-MATH53 JAN292024 SU-MATH53 FEB022024 SU-MATH53 FEB052024 Non-linear ODEs SU-MATH53 FEB072024 SU-MATH53 FEB092024 Linear Non-Constant Coefficient ODEs SU-MATH53 FEB122024 SU-MATH53 FEB142024 SU-MATH53 FEB162024 Fourier Series SU-MATH53 FEB212024 SU-MATH53 FEB232024 SU-MATH53 FEB252024 SU-MATH53 FEB282024 SU-MATH53 MAR042024 Fourier Transform SU-MATH53 MAR062024 SU-MATH53 MAR082024 SU-MATH53 MAR112024 Midterm Sheet SU-MATH53 Midterm Sheet Other Stuff Bessel\u0026rsquo;s Equation ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equations\u003c/a\u003e. math53.stanford.edu.\u003c/p\u003e\n\u003ch2 id=\"logistics\"\u003eLogistics\u003c/h2\u003e\n\u003cp\u003eProf. Rafe Mazzeo\u003c/p\u003e\n\u003ch3 id=\"tas\"\u003eTAs\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eRodrigo Angelo\u003c/li\u003e\n\u003cli\u003eZhenyuan Zhang\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"assignments\"\u003eAssignments\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ePre-lecture reading + questionnaire\u003c/li\u003e\n\u003cli\u003ePSets: Wed 9A\u003c/li\u003e\n\u003cli\u003e2 Midterms + 1 Final: wk 4 + wk 7, Thurs Evening; Tuesday 12:15\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"review\"\u003eReview\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eit suffices to study \u003ca href=\"/posts/kbhfirst_order_odes/\"\u003eFirst Order ODEs\u003c/a\u003e because we can convert all \u003ca href=\"/posts/kbhgeneric/#higher-order-functions\"\u003ehigher order functions\u003c/a\u003e into a \u003ca href=\"/posts/kbhfirst_order_odes/\"\u003eFirst Order ODEs\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ehomogeneous linear systems \\(y\u0026rsquo;=Ay\\) can be solved using \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e, \u003ca href=\"/posts/kbhmatrix_exponentiation/\"\u003ematrix exponentiation\u003c/a\u003e, etc. (recall that \u003cstrong\u003especial cases exists\u003c/strong\u003e where repeated eigenvalues, etc.)\u003c/li\u003e\n\u003cli\u003einhomogeneous systems \\(y\u0026rsquo; = Ay +f(t)\\) can be solved using \u003ca href=\"/posts/kbhlinear_non_seperable_equation/#solving-differential-equations\"\u003eintergrating factor\u003c/a\u003e or \u003ca href=\"/posts/kbhnon_homogeneous_linear_differential_equation/#variation-of-parameters-method\"\u003evariation of parameters method\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003egeneral analysis of non-linear \\(y\u0026rsquo;=f(y)\\): we can talk about stationary solutions (1. linearize each \\(y_0\\) stationary solutions to figure local behavior 2. away from stationary solutions, use \u003ca href=\"/posts/kbhnon_linear_ode/#monotone-function\"\u003eLyapunov Function\u003c/a\u003es to discuss), or liapenov functions\u003c/li\u003e\n\u003cli\u003efor variable-coefficient ODEs, we decry sadness and \u003ca href=\"/posts/kbhsu_math53_feb122024/#solving-odes-via\"\u003eSolving ODEs via power series\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"content\"\u003eContent\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhordinary_differential_equations/\"\u003eOrdinary Differential Equations\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpartial_differential_equations/\"\u003ePartial Differential Equations\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWhat we want to understand:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003equalitative behaviors and values\u003c/li\u003e\n\u003cli\u003ewriting it as an elementary function is lame\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"linear-odes\"\u003eLinear ODEs\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_jan082023/\"\u003eSU-MATH53 JAN082024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_jan102023/\"\u003eSU-MATH53 JAN102024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_jan122023/\"\u003eSU-MATH53 JAN122024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_jan172024/\"\u003eSU-MATH53 JAN172024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_jan192023/\"\u003eSU-MATH53 JAN192024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_jan202024/\"\u003eSU-MATH53 JAN222024\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"linear-second-order-odes--and-how-to-first-order-them\"\u003eLinear Second Order ODEs (and how to first-order them)\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_jan262023/\"\u003eSU-MATH53 JAN262024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_jan292024/\"\u003eSU-MATH53 JAN292024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb022024/\"\u003eSU-MATH53 FEB022024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb052024/\"\u003eSU-MATH53 FEB052024\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"non-linear-odes\"\u003eNon-linear ODEs\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb072024/\"\u003eSU-MATH53 FEB072024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb092024/\"\u003eSU-MATH53 FEB092024\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"linear-non-constant-coefficient-odes\"\u003eLinear Non-Constant Coefficient ODEs\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb122024/\"\u003eSU-MATH53 FEB122024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb142024/\"\u003eSU-MATH53 FEB142024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb162024/\"\u003eSU-MATH53 FEB162024\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"fourier-series\"\u003eFourier Series\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb212024/\"\u003eSU-MATH53 FEB212024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb232024/\"\u003eSU-MATH53 FEB232024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb252024/\"\u003eSU-MATH53 FEB252024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb282024/\"\u003eSU-MATH53 FEB282024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_mar042024/\"\u003eSU-MATH53 MAR042024\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"fourier-transform\"\u003eFourier Transform\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_mar062024/\"\u003eSU-MATH53 MAR062024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_mar082024/\"\u003eSU-MATH53 MAR082024\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_mar112024/\"\u003eSU-MATH53 MAR112024\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"midterm-sheet\"\u003eMidterm Sheet\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_midterm_sheet/\"\u003eSU-MATH53 Midterm Sheet\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"other-stuff\"\u003eOther Stuff\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbessel_s_equation/\"\u003eBessel\u0026rsquo;s Equation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhodes_index/","tags":null,"title":"Differential Equations Index"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdifferential_privacy/","tags":null,"title":"Differential Privacy"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdiffusion_map/","tags":null,"title":"diffusion map"},{"categories":null,"contents":"What if we can use diffusion models to generate Laproscopic surgeries to train surgeons?\nProblem Asking dalle to just \u0026ldquo;generate a Laproscopic surgery\u0026rdquo; is not going to work. It will give you cartoons.\nApproach text problem formulation: \u0026ldquo;grasper grasp gallbladder\u0026rdquo; encode text into latents do diffusion with late fusion of latents Data: Cholec T-45\nWeighting Scoring: Perception Prioritized Weighting + Prioritization for Signal-to-Noise\n(Ho et al, 2020)\nText \u0026ldquo;[subject] [verb] [object] [surgical phase]\u0026rdquo;\n\u0026ldquo;grasper grasp gallbladder in preparation\u0026rdquo;\nModel Elucidated Imagen. Dall-E is very bad; Imagen-class models works better because (why?).\nAdded Value to Physicians using Generated Images Train a Classifier Rendevouz Network: train a discriminator for procedure based on data augmented with generated images; 5% improvement.\nMedical Expert Survey \u0026ldquo;yo mr doctor man can you spot which one of these are generated?\u0026rdquo;\n45% success rate.\n","html":"\u003cp\u003eWhat if we can use diffusion models to generate Laproscopic surgeries to train surgeons?\u003c/p\u003e\n\u003ch2 id=\"problem\"\u003eProblem\u003c/h2\u003e\n\u003cp\u003eAsking dalle to just \u0026ldquo;generate a Laproscopic surgery\u0026rdquo; is not going to work. It will give you cartoons.\u003c/p\u003e\n\u003ch2 id=\"approach\"\u003eApproach\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003etext problem formulation\u003c/strong\u003e: \u0026ldquo;grasper grasp gallbladder\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eencode text into latents\u003c/li\u003e\n\u003cli\u003edo diffusion with late fusion of latents\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eData: Cholec T-45\u003c/p\u003e\n\u003ch3 id=\"weighting\"\u003eWeighting\u003c/h3\u003e\n\u003cp\u003eScoring: Perception Prioritized Weighting + Prioritization for Signal-to-Noise\u003c/p\u003e\n\u003cp\u003e(Ho et al, 2020)\u003c/p\u003e\n\u003ch3 id=\"text\"\u003eText\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;[subject] [verb] [object] [surgical phase]\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;grasper grasp gallbladder in preparation\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"model\"\u003eModel\u003c/h3\u003e\n\u003cp\u003eElucidated Imagen. Dall-E is very bad; Imagen-class models works better because (why?).\u003c/p\u003e\n\u003ch2 id=\"added-value-to-physicians-using-generated-images\"\u003eAdded Value to Physicians using Generated Images\u003c/h2\u003e\n\u003ch3 id=\"train-a-classifier\"\u003eTrain a Classifier\u003c/h3\u003e\n\u003cp\u003eRendevouz Network: train a discriminator for procedure based on data augmented with generated images; 5% improvement.\u003c/p\u003e\n\u003ch3 id=\"medical-expert-survey\"\u003eMedical Expert Survey\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;yo mr doctor man can you spot which one of these are generated?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e45% success rate.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdiffusion_models_for_laproscopic_surgeries/","tags":null,"title":"Diffusion Models for Laproscopic Surgeries"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdigital_origin_for_life/","tags":null,"title":"Digital Origin for Life"},{"categories":null,"contents":"The dimension of a vector space is the length of any basis in the vector space. It is denoted as \\(\\dim V\\).\nadditional information See also finite-dimensional vector space and infinite-demensional vector space\ndimension of subspace is smaller or equal to that of its parent If we have a finite-dimensional \\(V\\) and a subspace thereof \\(U\\), then \\(\\dim U \\leq \\dim V\\).\nFirstly, the every subspace of a finite-dimensional vector space is a finite-dimensional vector space is itself a finite-dimensional vector space. Therefore, it has a finite dimension.\nThen, we will simply think of the basis of \\(U\\) as an linearly independent list in \\(V\\); and of course, the basis of \\(V\\) spans \\(V\\). As length of linearly-independent list \\(\\leq\\) length of spanning list, we have that length of basis of \\(U \\leq\\) length of basis of \\(V\\).\nThis makes \\(\\dim U \\leq \\dim V\\), as desired. \\(\\blacksquare\\)\nlists of right length are a basis These are two results that tell us if you are given a list of list of right length, one condition (spanning or linear independence) can tell you that they are a basis. It\u0026rsquo;s also known (as a John McHugh special:tm:) as the Half Is Good Enough theorems.\nlinearly independent list of length dim V are a basis of V Begin with an linearly independent list in \\(V\\) of length \\(\\dim V\\). We aim to extend this list into a basis of \\(V\\).\nAs we know all basis in \\(V\\) must have length \\(\\dim V\\), and the list is already length \\(\\dim V\\), no extension is needed to form a basis.\nAs every linearly independent list expends to a basis, we conclude that the list is already a basis of \\(V\\), as desired \\(\\blacksquare\\).\nspanning list of length of dim V are a basis of V Begin with a spanning list in \\(V\\) of length \\(\\dim V\\). We aim to reduce this list into a basis of \\(V\\).\nAs we know all basis in \\(V\\) must have length \\(\\dim V\\), and the list is already length \\(\\dim V\\), no reduction is needed to form a basis.\nAs all spanning lists contains a basis of which you are spanning, we conclude that the list is a basis of \\(V\\), as desired \\(\\blacksquare\\).\ndimension of sums See dimension of sums\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e is the length of any \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in the \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e. It is denoted as \\(\\dim V\\).\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e and \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003einfinite-demensional vector space\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"dimension-of-subspace-is-smaller-or-equal-to-that-of-its-parent\"\u003edimension of subspace is smaller or equal to that of its parent\u003c/h3\u003e\n\u003cp\u003eIf we have a \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e \\(V\\) and a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e thereof \\(U\\), then \\(\\dim U \\leq \\dim V\\).\u003c/p\u003e\n\u003cp\u003eFirstly, the \u003ca href=\"/posts/kbhsubspace/#finite-dimensional-subspaces\"\u003eevery subspace of a finite-dimensional vector space is a finite-dimensional vector space\u003c/a\u003e is itself a \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e. Therefore, it has a finite dimension.\u003c/p\u003e\n\u003cp\u003eThen, we will simply think of the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U\\) as an \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(V\\); and of course, the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\) \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\). As \u003ca href=\"/posts/kbhlinear_independence/#length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003elength of linearly-independent list \\(\\leq\\) length of spanning list\u003c/a\u003e, we have that length of \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U \\leq\\) length of \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\).\u003c/p\u003e\n\u003cp\u003eThis makes \\(\\dim U \\leq \\dim V\\), as desired. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"lists-of-right-length-are-a-basis--kbhbasis-dot-md\"\u003elists of right length are a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eThese are two results that tell us if you are given a list of \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e of right length, one condition (\u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e or \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e) can tell you that they are a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e. It\u0026rsquo;s also known (as a John McHugh special:tm:) as the Half Is Good Enough theorems.\u003c/p\u003e\n\u003ch4 id=\"linearly-independent-list-of-length-dim-v-are-a-basis-of-v\"\u003elinearly independent list of length dim V are a basis of V\u003c/h4\u003e\n\u003cp\u003eBegin with an \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(V\\) of length \\(\\dim V\\). We aim to extend this list into a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\).\u003c/p\u003e\n\u003cp\u003eAs we know all \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in \\(V\\) must have length \\(\\dim V\\), and the list is already length \\(\\dim V\\), no extension is needed to form a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eAs \u003ca href=\"/posts/kbhbasis/#a-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent-list-expends-to-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003eevery linearly independent list expends to a basis\u003c/a\u003e, we conclude that the list is already a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\), as desired \\(\\blacksquare\\).\u003c/p\u003e\n\u003ch4 id=\"spanning-list-of-length-of-dim-v-are-a-basis-of-v\"\u003espanning list of length of dim V are a basis of V\u003c/h4\u003e\n\u003cp\u003eBegin with a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list in \\(V\\) of length \\(\\dim V\\). We aim to reduce this list into a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\).\u003c/p\u003e\n\u003cp\u003eAs we know all \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in \\(V\\) must have length \\(\\dim V\\), and the list is already length \\(\\dim V\\), no reduction is needed to form a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eAs \u003ca href=\"/posts/kbhbasis/#all-id-e8109222-5548-4d08-b6df-8f933f2dbb36-spanning-lists-contains-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis-of-which-you-are-id-e8109222-5548-4d08-b6df-8f933f2dbb36-spanning\"\u003eall spanning lists contains a basis of which you are spanning\u003c/a\u003e, we conclude that the list is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\), as desired \\(\\blacksquare\\).\u003c/p\u003e\n\u003ch3 id=\"dimension--kbhdimension-dot-md--of-sums\"\u003e\u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of sums\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhsum_of_subsets/#id-07b04334-5ae7-457c-bc3e-92feed8fc2cc-dimension-of-sums\"\u003edimension of sums\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdimension/","tags":null,"title":"dimension"},{"categories":null,"contents":"Direct Sampling is the act in probability to sample what you want from the distribution. This is often used when actual inference impossible. It involves. well. sampling from the distribution to compute a conditional probability that you want.\nIt basically involves invoking the Frequentist Definition of Probability without letting \\(n \\to \\infty\\), instead just sampling some \\(n \u0026lt; \\infty\\) and dividing the event space by your sample space.\nSo, for instance, to compute inference on \\(b^{1}\\) given observations \\(d^{1}c^{1}\\), we can write:\n\\begin{equation} P(b^{1} | d^{1}, c^{1}) = \\frac{P(b^{1}, d^{1}, c^{1})}{P(d^{1})P(c^{1})} \\approx \\frac{\\sum_{i}^{} b^{i} = 1 \\land d^{i} = i \\land c^{i} = 1}{\\sum_{i}^{} d^{i} =1 \\land c^{i} = 1} \\end{equation}\nwhere \\(a^{i}\\) is the \\(i\\) th sample.\nDirect Sampling a Baysian Network We first obtain a topological sort of the system. For a graph with \\(n\\) nodes, we then obtain a list \\(X_{1:n}\\).\nWe can then obtain a Direct Sampling via simply sampling from this list. Whenever we need to sample some kind of conditional probability, we know that for every \\(k_{i}\\) we need to sample from, its parent conditions would have already been sampled because we are sampling in order of a topological sort so we can just sample the values from a subset of the conditioned set.\nLikelihood Weighted Sampling Likelihood Weighted Sampling is a change to the Direct Sampling approach which deals with the fact that Direct Sampling may oversample conditional probabilities as it is sampling sub-nodes an equal amount.\nIt is particularly useful when our priors are unlikely.\nTo do this, we first perform Direct Sampling as how you would normally. Now, say we get \\(D=1\\), \\(C=1\\), \\(E=1\\) for the Baysian Network presented below, the actual value we return would be whatever \\(P(D|E) P(C|E)\\).\nSee an example here.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e is the act in \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e to sample what you want from the distribution. This is often used when actual \u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e impossible. It involves. well. sampling from the distribution to compute a \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e that you want.\u003c/p\u003e\n\u003cp\u003eIt basically involves invoking the \u003ca href=\"/posts/kbhprobability/#frequentist-definition-of-probability\"\u003eFrequentist Definition of Probability\u003c/a\u003e without letting \\(n \\to \\infty\\), instead just sampling some \\(n \u0026lt; \\infty\\) and dividing the event space by your \u003ca href=\"/posts/kbhsample_space/\"\u003esample space\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSo, for instance, to compute \u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e on \\(b^{1}\\) given observations \\(d^{1}c^{1}\\), we can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(b^{1} | d^{1}, c^{1}) = \\frac{P(b^{1}, d^{1}, c^{1})}{P(d^{1})P(c^{1})} \\approx \\frac{\\sum_{i}^{} b^{i} = 1 \\land d^{i} = i \\land c^{i} = 1}{\\sum_{i}^{} d^{i} =1 \\land c^{i} = 1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(a^{i}\\) is the \\(i\\) th sample.\u003c/p\u003e\n\u003ch2 id=\"direct-sampling-a-baysian-network--kbhbaysian-network-dot-md\"\u003eDirect Sampling a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eWe first obtain a \u003ca href=\"/posts/kbhtopological_sort/\"\u003etopological sort\u003c/a\u003e of the system. For a graph with \\(n\\) nodes, we then obtain a list \\(X_{1:n}\\).\u003c/p\u003e\n\u003cp\u003eWe can then obtain a \u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e via simply sampling from this list. Whenever we need to sample some kind of \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e, we know that for every \\(k_{i}\\) we need to sample from, its parent conditions would have already been sampled because we are sampling in order of a \u003ca href=\"/posts/kbhtopological_sort/\"\u003etopological sort\u003c/a\u003e so we can just sample the values from a subset of the conditioned set.\u003c/p\u003e\n\u003ch2 id=\"likelihood-weighted-sampling\"\u003eLikelihood Weighted Sampling\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#likelihood-weighted-sampling\"\u003eLikelihood Weighted Sampling\u003c/a\u003e is a change to the \u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e approach which deals with the fact that \u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e may oversample \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probabilities\u003c/a\u003e as it is sampling sub-nodes an equal amount.\u003c/p\u003e\n\u003cp\u003eIt is particularly useful when our priors are unlikely.\u003c/p\u003e\n\u003cp\u003eTo do this, we first perform \u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e as how you would normally. Now, say we get \\(D=1\\), \\(C=1\\), \\(E=1\\) for the \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e presented below, the actual value we return would be whatever \\(P(D|E) P(C|E)\\).\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-09-28_10-20-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSee \u003ca href=\"/posts/kbhapproximate_inference/#example\"\u003ean example here\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdirect_sampling/","tags":null,"title":"Direct Sampling"},{"categories":null,"contents":"A direct sum is a sum of subspaces (not just subsets!!) where there\u0026rsquo;s only one way to represent each element.\nconstituents subspaces of \\(V\\) named \\(U_1, \\dots, U_{m}\\)\nrequirements The sum of subsets of \\(U_1+\\dots+U_{m}\\) is called a direct sum IFF:\neach element in \\(U_1+\\dots +U_{m}\\) can only be written in one way as a sum \\(u_1 +\\dots +u_{m}\\) (as in, they are linearly independent?)\nWe use \\(\\oplus\\) to represent direct sum.\nadditional information why is it called a direct sum? Something is not a direct sum if any of its components can be described using the others. Its kind of line linear independence but! on entire spaces.\na sum of subsets is a direct sum IFF there is only one way to write \\(0\\) Given \\(U_1, \\dots, U_{m}\\) are subspaces of \\(V\\), then \\(U_1+\\dots +U_{m}\\) is a direct sum IFF the only way to write \\(0\\) as a sum \\(u_1 +\\dots +u_{m}\\) is by taking each element to \\(0\\).\nProof:\nif\u0026mdash; If some \\(U_1 + \\dots +U_{m}\\) is a direct sum, definitionally there is only one way to write \\(0\\). And you can always write \\(0\\) by taking all the constituents to \\(0\\) as they are subspaces, so the additive identity exists.\nonly if\u0026mdash; We are given that there is only one way to write \\(0\\), that:\n\\begin{equation} 0 = u_1+ u_2+ \\dots+ u_{m}: u_j \\in U_{j} \\end{equation}\nas \\(U_{j}\\) are all subspaces, and the additive identity exists, we can say that \\(u_1=u_2=\\dots =0\\).\nAssume for the sake of contradiction that \\(U_1 + \\dots +U_{m}\\) is not a direct sum. Therefore:\n\\begin{equation} \\exists\\ v_1 = u_1+u_2+\\dots + u_{m}: u_{j} \\in U_{j} \\end{equation}\nand\n\\begin{equation} \\exists\\ v_1 = w_1+w_2+\\dots + w_{m}: w_{j} \\in U_{j} \\end{equation}\n\u0026ldquo;there are two unique representations of a vector given the sum of subsets\u0026rdquo;\nSubtracting these representations, then:\n\\begin{equation} (v_1-v_1) = (u_1-w_1) + \\dots +(u_{m}-w_{m}): u_{j}, w_{j} \\in U_{j} \\end{equation}\nFinally, then:\n\\begin{equation} 0 = (u_1-w_1) + \\dots +(u_{m}-w_{m}): u_{j}, w_{j} \\in U_{j} \\end{equation}\nWe have established that each slot that makes up this particular sum \\(=0\\). Therefore, \\(u_{i}-w_{i} = 0\\). This means $ui=wi$\u0026mdash;there are no two unique representations of \\(v_{1}\\). Reaching contradiction. \\(\\blacksquare\\)\na sum of subsets is only a direct sum IFF their intersection is set containing \\(0\\) Take \\(U\\) and \\(W\\), two subspaces of \\(V\\). \\(U+V\\) is a direct sum IFF \\(U \\cap W = \\{0\\}\\).\nProof:\nif\u0026mdash; Suppose \\(U+V\\) is a direct sum. \\(\\forall v \\in U \\cap V\\), as \\(v\\) is equal to itself, we have that:\n\\begin{equation} 0 = v+(-v) \\end{equation}\nwhere, \\(v\\) is in \\(U\\) and \\(-v\\) is in \\(V\\) (as both \\(U\\) and \\(V\\) are vector spaces, both would contain \\(-1v=-v\\) as we are given \\(v \\in U \\cap V\\) and scalar multiplication is closed on both.)\nBy the unique representation in the definition of direct sums, you have only one way to construct this expression: namely, that \\(v=0\\) as both are vector spaces so the additive identity exists on both.\nHence:\n\\begin{equation} \\{0\\} = U \\cap V \\end{equation}\nonly if\u0026mdash; Suppose \\(U \\cap W = \\{0\\}\\). Take also \\(u \\in U\\) and \\(w \\in W\\); we can construct an expression:\n\\begin{equation} u + w = 0 \\end{equation}\nIf we can show that there is only one unique combination of \\(u\\) and \\(w\\) to write \\(0\\), we satisfy the previous proof and therefore \\(U+W\\) is a direct sum.\nThe expression above implies that \\(w\\) is the additive inverse of \\(u\\); therefore; \\(u = -w\\). As both \\(U\\) and \\(W\\) are vector spaces, their elements all have inverses. As \\(u\\) is the inverse of \\(w\\), and given the definition of sum of subsets that \\(u \\in U\\) and \\(w \\in W\\), \\(u\\) and \\(w\\) are both in both \\(U\\) and \\(W\\).\nAs the intersection of \\(U\\) and \\(V\\) is \\(0\\), \\(u=w=0\\). Therefore, there is only one unique representation of \\(0\\), namely with \\(u=0,w=0\\), making \\(U+W\\) a direct sum. \\(\\blacksquare\\)\ndirect sum proofs are not pairwise! Those two proofs above only deal with pairs of sum of subsets. If you have multiple subsets, they don\u0026rsquo;t apply!\nevery subspace of \\(V\\) is a part of a direct sum equaling to \\(V\\) For every subspace \\(U\\) of a finite-dimensional \\(V\\), there is a subspace \\(W\\) of \\(V\\) for which \\(V = U \\oplus W\\).\nBecause \\(V\\) is defined to be finite-dimensional, and the fact that a finite-dimensional subspace is finite-dimensional, \\(U\\) is finite-dimensional.\nTherefore, because every finite-dimensional vector space has a basis, \\(U\\) has a basis \\(u_1, \\dots u_{m}\\).\nBecause bases are linearly independent, and \\(U \\subset V\\), \\(u_1, \\dots u_{m}\\) is a linearly independent list in \\(V\\).\nBecause a linearly independent list expends to a basis, we can construct \\(u_1, \\dots u_{m}, w_{1}, \\dots w_{n}\\) as the basis of \\(V\\). We will construct a \\(W = span(w_1, \\dots w_{n})\\) \u0026mdash; the space formed as the span of the \u0026ldquo;extension\u0026rdquo; vectors to make the basis in \\(V\\).\nBecause the list \\(u_{j}\\dots w_{k}\\) we made is a basis in \\(V\\), \\(U+W=V\\).\nYou can see this because every element \\(v \\in V\\) can be constructed with a linear combination \\(u_1, \\dots u_{m}, w_{1}, \\dots w_{n}\\) (again, because this list shown to be a basis of \\(V\\) therefore it spans \\(V\\).) Then, to show that \\(U+W=V\\), we can collapse \\(a_{1}u_1\\dots + a_{m}u_{m}=u \\in U\\), and \\(c_{1}w_1 \\dots +c_{m}w_{m} = w \\in W\\). Hence, every element \\(v \\in V\\) can be constructed by some \\(u \\in U + w \\in W\\), making \\(U+W=V\\).\nNow, we have to show that the combination is a direct sum. There is a few ways of going about this, the one presented by Axler is leveraging the fact that a sum of subsets is only a direct sum IFF their intersection is set containing \\(0\\)\u0026mdash;that \\(U \\cap W = \\{0\\}\\).\nGiven some element \\(v\\) that lives in the intersection between \\(U\\) and \\(W\\), it must be formed as a linear combination of two linearly independent lists (as \\(u_j, \\dots w_{j}\\) is a basis, they are linearly independent.)\nIntuition: if an non-zero element lives in the intersection between two linearly independent lists which together is still linearly independent, it must be able to be written by a linear combination of other elements of that linearly independent list to live in the intersection of the two lists\u0026mdash;which is absurd (violates the definition of linearly dependent). The only element for which this is an exception is \\(0\\).\nActual proof:\nsuppose \\(v \\in U \\cap W\\), so \\(v = a_1u_1\\dots +a_{m}v_{m}\\) as well as \\(v=b_1w_{1} + \\dots b_{n}w_{n}\\). Subtracting the two lists results in:\n\\begin{equation} 0 = a_1u_1+ \\dots a_{m} u_{m} - b_1w_1+ \\dots +b_{n}w_{n} \\end{equation}\nhaving already declared this list linearly independent, we see that each scalar \\(a_1, \\dots -b_{n}\\) must equal to \\(0\\) for this expression. Therefore, the intersection \\(v\\) must be \\(\\{0\\}\\) as \\(0u_1 + \\dots +0u_{m}=0\\).\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e is a sum of \u003cstrong\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es\u003c/strong\u003e\u003c/strong\u003e (not just subsets!!) where there\u0026rsquo;s only one way to represent each element.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es of \\(V\\) named \\(U_1, \\dots, U_{m}\\)\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e of \\(U_1+\\dots+U_{m}\\) is called a \u003cem\u003edirect sum\u003c/em\u003e \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003eeach element in \\(U_1+\\dots +U_{m}\\) can only be written in one way as a sum \\(u_1 +\\dots +u_{m}\\) (as in, they are linearly independent?)\u003c/p\u003e\n\u003cp\u003eWe use \\(\\oplus\\) to represent \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"why-is-it-called-a-direct-sum--kbhdirect-sum-dot-md\"\u003ewhy is it \u003cem\u003ecalled\u003c/em\u003e a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum?\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSomething is \u003cem\u003enot\u003c/em\u003e a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e if any of its components can be described using the others. Its kind of line linear independence but! on entire spaces.\u003c/p\u003e\n\u003ch3 id=\"a-sum-of-subsets--kbhsum-of-subsets-dot-md--is-a-direct-sum--kbhdirect-sum-dot-md--iff--kbhequivalence-dot-md--there-is-only-one-way-to-write-0\"\u003ea \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e there is only one way to write \\(0\\)\u003c/h3\u003e\n\u003cp\u003eGiven \\(U_1, \\dots, U_{m}\\) are subspaces of \\(V\\), then \\(U_1+\\dots +U_{m}\\) is a direct sum IFF the only way to write \\(0\\) as a sum \\(u_1 +\\dots +u_{m}\\) is by taking each element to \\(0\\).\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eif\u0026mdash;\nIf some \\(U_1 + \\dots +U_{m}\\) is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e, definitionally there is only one way to write \\(0\\). And you can always write \\(0\\) by taking all the constituents to \\(0\\) as they are \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es, so the \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e exists.\u003c/p\u003e\n\u003cp\u003eonly if\u0026mdash;\nWe are given that there is only one way to write \\(0\\), that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = u_1+ u_2+ \\dots+ u_{m}: u_j \\in U_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas \\(U_{j}\\) are all subspaces, and the \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e exists, we can say that \\(u_1=u_2=\\dots =0\\).\u003c/p\u003e\n\u003cp\u003eAssume for the sake of contradiction that \\(U_1 + \\dots +U_{m}\\) is not a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e. Therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\exists\\ v_1 = u_1+u_2+\\dots + u_{m}: u_{j} \\in U_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\exists\\ v_1 = w_1+w_2+\\dots + w_{m}: w_{j} \\in U_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;there are two unique representations of a \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e given the \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eSubtracting these representations, then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(v_1-v_1) = (u_1-w_1) + \\dots +(u_{m}-w_{m}): u_{j}, w_{j} \\in U_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = (u_1-w_1) + \\dots +(u_{m}-w_{m}): u_{j}, w_{j} \\in U_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have established that each slot that makes up this particular sum \\(=0\\). Therefore, \\(u_{i}-w_{i} = 0\\). This means $u\u003csub\u003ei\u003c/sub\u003e=w\u003csub\u003ei\u003c/sub\u003e$\u0026mdash;there are no two unique representations of \\(v_{1}\\). Reaching contradiction. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"a-sum-of-subsets--kbhsum-of-subsets-dot-md--is-only-a-direct-sum--kbhdirect-sum-dot-md--iff--kbhequivalence-dot-md--their-intersection-is-set-containing-0\"\u003ea \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e is only a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e their intersection is set containing \\(0\\)\u003c/h3\u003e\n\u003cp\u003eTake \\(U\\) and \\(W\\), two \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es of \\(V\\). \\(U+V\\) is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e \\(U \\cap W = \\{0\\}\\).\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eif\u0026mdash;\nSuppose \\(U+V\\) is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e. \\(\\forall v \\in U \\cap V\\), as \\(v\\) is equal to itself, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = v+(-v)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(v\\) is in \\(U\\) and \\(-v\\) is in \\(V\\) (as both \\(U\\) and \\(V\\) are vector spaces, both would contain \\(-1v=-v\\) as we are given \\(v \\in U \\cap V\\) and \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e is closed on both.)\u003c/p\u003e\n\u003cp\u003eBy the unique representation in the definition of \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003es, you have only one way to construct this expression: namely, that \\(v=0\\) as both are \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es so the \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e exists on both.\u003c/p\u003e\n\u003cp\u003eHence:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\{0\\} = U \\cap V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eonly if\u0026mdash;\nSuppose \\(U \\cap W = \\{0\\}\\). Take also \\(u \\in U\\) and \\(w \\in W\\); we can construct an expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu + w = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf we can show that there is only one unique combination of \\(u\\) and \\(w\\) to write \\(0\\), we satisfy the previous proof and therefore \\(U+W\\) is a direct sum.\u003c/p\u003e\n\u003cp\u003eThe expression above implies that \\(w\\) is the \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e of \\(u\\); therefore; \\(u = -w\\). As both \\(U\\) and \\(W\\) are \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es, their elements all have \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003es. As \\(u\\) is the inverse of \\(w\\), and given the definition of \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e that \\(u \\in U\\) and \\(w \\in W\\), \\(u\\) and \\(w\\) are both in both \\(U\\) and \\(W\\).\u003c/p\u003e\n\u003cp\u003eAs the intersection of \\(U\\) and \\(V\\) is \\(0\\), \\(u=w=0\\). Therefore, there is only one unique representation of \\(0\\), namely with \\(u=0,w=0\\), making \\(U+W\\) a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"direct-sum--kbhdirect-sum-dot-md--proofs-are-not-pairwise\"\u003e\u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e proofs are not pairwise!\u003c/h3\u003e\n\u003cp\u003eThose two proofs above only deal with pairs of \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e. If you have multiple subsets, they don\u0026rsquo;t apply!\u003c/p\u003e\n\u003ch3 id=\"every-subspace--kbhsubspace-dot-md--of-v-is-a-part-of-a-direct-sum--kbhdirect-sum-dot-md--equaling-to-v\"\u003eevery \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of \\(V\\) is a part of a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e equaling to \\(V\\)\u003c/h3\u003e\n\u003cp\u003eFor every \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e \\(U\\) of a \u003cstrong\u003e\u003cstrong\u003efinite-dimensional\u003c/strong\u003e\u003c/strong\u003e \\(V\\), there is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e \\(W\\) of \\(V\\) for which \\(V = U \\oplus W\\).\u003c/p\u003e\n\u003cp\u003eBecause \\(V\\) is defined to be \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e, and the fact that a \u003ca href=\"/posts/kbhsubspace/#finite-dimensional-subspaces\"\u003efinite-dimensional subspace\u003c/a\u003e is \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e, \\(U\\) is \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eTherefore, because \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/#every-id-4ed27ed5-4edc-4ef4-afd7-9b8e3bcd9b96-finite-dimensional-vector-space-has-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003eevery finite-dimensional vector space has a basis\u003c/a\u003e, \\(U\\) has a basis \\(u_1, \\dots u_{m}\\).\u003c/p\u003e\n\u003cp\u003eBecause \u003ca href=\"/posts/kbhbasis/\"\u003ebases\u003c/a\u003e are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e, and \\(U \\subset V\\), \\(u_1, \\dots u_{m}\\) is a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(V\\).\u003c/p\u003e\n\u003cp\u003eBecause \u003ca href=\"/posts/kbhbasis/#a-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent-list-expends-to-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003ea linearly independent list expends to a basis\u003c/a\u003e, we can construct \\(u_1, \\dots u_{m}, w_{1}, \\dots w_{n}\\) as the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\). We will construct a \\(W = span(w_1, \\dots w_{n})\\) \u0026mdash; the space formed as the \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e of the \u0026ldquo;extension\u0026rdquo; vectors to make the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in \\(V\\).\u003c/p\u003e\n\u003cp\u003eBecause the list \\(u_{j}\\dots w_{k}\\) we made is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in \\(V\\), \\(U+W=V\\).\u003c/p\u003e\n\u003cp\u003eYou can see this because every element \\(v \\in V\\) can be constructed with a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e \\(u_1, \\dots u_{m}, w_{1}, \\dots w_{n}\\) (again, because this list shown to be a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\) therefore it \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003es \\(V\\).) Then, to show that \\(U+W=V\\), we can collapse \\(a_{1}u_1\\dots + a_{m}u_{m}=u \\in U\\), and \\(c_{1}w_1 \\dots +c_{m}w_{m} = w \\in W\\). Hence, every element \\(v \\in V\\) can be constructed by some \\(u \\in U + w \\in W\\), making \\(U+W=V\\).\u003c/p\u003e\n\u003cp\u003eNow, we have to show that the combination is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e. There is a few ways of going about this, the one presented by \u003ca href=\"/posts/kbhlinear_algebra_index/\"\u003eAxler\u003c/a\u003e is leveraging the fact that \u003ca href=\"#a-sum-of-subsets--kbhsum-of-subsets-dot-md--is-only-a-direct-sum--kbhdirect-sum-dot-md--iff--kbhequivalence-dot-md--their-intersection-is-set-containing-0\"\u003ea sum of subsets is only a direct sum IFF their intersection is set containing \\(0\\)\u003c/a\u003e\u0026mdash;that \\(U \\cap W = \\{0\\}\\).\u003c/p\u003e\n\u003cp\u003eGiven some element \\(v\\) that lives in the intersection between \\(U\\) and \\(W\\), it must be formed as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of two \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e lists (as \\(u_j, \\dots w_{j}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, they are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e.)\u003c/p\u003e\n\u003cp\u003eIntuition: if an non-zero element lives in the intersection between two \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e lists which together is still \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e, it must be able to be written by a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of other elements of that \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list to live in the intersection of the two lists\u0026mdash;which is absurd (violates the definition of \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e). The only element for which this is an exception is \\(0\\).\u003c/p\u003e\n\u003cp\u003eActual proof:\u003c/p\u003e\n\u003cp\u003esuppose \\(v \\in U \\cap W\\), so \\(v = a_1u_1\\dots +a_{m}v_{m}\\) as well as \\(v=b_1w_{1} + \\dots b_{n}w_{n}\\). Subtracting the two lists results in:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = a_1u_1+ \\dots a_{m} u_{m} - b_1w_1+ \\dots +b_{n}w_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ehaving already declared this list \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e, we see that each scalar \\(a_1, \\dots -b_{n}\\) must equal to \\(0\\) for this expression. Therefore, the intersection \\(v\\) must be \\(\\{0\\}\\) as \\(0u_1 + \\dots +0u_{m}=0\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdirect_sum/","tags":null,"title":"direct sum"},{"categories":null,"contents":"Directed Evolution is a process of recreating Darwinian processes in a lab setting\nmutation: make select mutation selection: selection specific changes replication: make more of it Examples: PACE\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdirected_evolution/\"\u003eDirected Evolution\u003c/a\u003e is a process of recreating Darwinian processes in a lab setting\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003emutation: make select mutation\u003c/li\u003e\n\u003cli\u003eselection: selection specific changes\u003c/li\u003e\n\u003cli\u003ereplication: make more of it\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eExamples: \u003ca href=\"/posts/kbhpace/\"\u003ePACE\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdirected_evolution/","tags":null,"title":"Directed Evolution"},{"categories":null,"contents":"Softmax Method Pull arm \\(a\\) with probability \\(\\propto \\exp (\\lambda \\rho_{a})\\), where \\(\\lambda \\geq 0\\) is the \u0026ldquo;precision parameter\u0026rdquo;.\nWhen \\(\\lambda \\to 0\\), this system uses the same rate for each of the actions, so you are essentially randomly sampling; when \\(\\lambda \\to \\infty\\), the system will use only the greedy action because only the element with the biggest \\(\\rho_{a}\\) gets selected.\nFor a multi-state case:\n\\begin{equation} \\propto \\exp (\\lambda Q(s,a)) \\end{equation}\nQuantile Exploration Choose the arm with the largest \\(\\theta\\) at the highest \\(\\alpha\\) percentile of its beta distribution, pull that arm, update priors\n\u0026ldquo;choose the arm with the highest \\(\\theta\\) for the \\(90\\%\\) percentile, then update the distribution\u0026rdquo;\nUCB 1 Inspired by monte-carlo exploration\ntake action \\(a\\) such that\n\\begin{equation} \\max_{a} \\rho_{a} + c \\sqrt{ \\frac{\\log N}{N(a)}} \\end{equation}\nwhere, \\(c\\) is the exploration factor, \\(N\\) is the total number of trials, \\(N(a)\\) is the number of trials for \\(a\\) we have done.\nThis value is considered the \u0026ldquo;upper confidence bound\u0026rdquo;; hence \u0026ldquo;UCB\u0026rdquo;\nPosterior Sampling Same one point from each Beta Distribution for each of your slot machines; then you pick the result that is the highest.\nDoes not require any parameter.\nThis is proven to do some over-exploration. But that\u0026rsquo;s (mostly) just fine.\nR-Max See R-Max\n","html":"\u003ch2 id=\"softmax-method\"\u003eSoftmax Method\u003c/h2\u003e\n\u003cp\u003ePull arm \\(a\\) with probability \\(\\propto \\exp (\\lambda \\rho_{a})\\), where \\(\\lambda \\geq 0\\) is the \u0026ldquo;precision parameter\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eWhen \\(\\lambda \\to 0\\), this system uses the same rate for each of the actions, so you are essentially randomly sampling; when \\(\\lambda \\to \\infty\\), the system will use only the \u003ca href=\"/posts/kbhexploration_and_exploitation/#bayesian-model-estimation\"\u003egreedy action\u003c/a\u003e because only the element with the biggest \\(\\rho_{a}\\) gets selected.\u003c/p\u003e\n\u003cp\u003eFor a multi-state case:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\propto \\exp (\\lambda Q(s,a))\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"quantile-exploration\"\u003eQuantile Exploration\u003c/h2\u003e\n\u003cp\u003eChoose the arm with the largest \\(\\theta\\) at the highest \\(\\alpha\\) percentile of its beta distribution, pull that arm, update priors\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;choose the arm with the highest \\(\\theta\\) for the \\(90\\%\\) percentile, then update the distribution\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"ucb-1\"\u003eUCB 1\u003c/h2\u003e\n\u003cp\u003eInspired by \u003ca href=\"/posts/kbhmonte_carlo_tree_search/#monte-carlo-exploration\"\u003emonte-carlo exploration\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003etake action \\(a\\) such that\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\max_{a} \\rho_{a} + c \\sqrt{ \\frac{\\log N}{N(a)}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(c\\) is the exploration factor, \\(N\\) is the total number of trials, \\(N(a)\\) is the number of trials for \\(a\\) we have done.\u003c/p\u003e\n\u003cp\u003eThis value is considered the \u0026ldquo;upper confidence bound\u0026rdquo;; hence \u0026ldquo;UCB\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"posterior-sampling\"\u003ePosterior Sampling\u003c/h2\u003e\n\u003cp\u003eSame one point from each \u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e for each of your slot machines; then you pick the result that is the highest.\u003c/p\u003e\n\u003cp\u003eDoes not require any parameter.\u003c/p\u003e\n\u003cp\u003eThis is proven to do some over-exploration. But that\u0026rsquo;s (mostly) just fine.\u003c/p\u003e\n\u003ch2 id=\"r-max\"\u003eR-Max\u003c/h2\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhmodel_based_reinforcement_learning/#r-max\"\u003eR-Max\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdirected_exploration/","tags":null,"title":"Directed Exploration"},{"categories":null,"contents":"discourse features are marks of fluency/etc. which mark one\u0026rsquo;s speech.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdiscourse_features/\"\u003ediscourse features\u003c/a\u003e are marks of fluency/etc. which mark one\u0026rsquo;s speech.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdiscourse_features/","tags":null,"title":"discourse features"},{"categories":null,"contents":"A Discourse-Completion Task is a tool used to elicit speech acts, such as showing an image, etc. For instance,\ntypes of Discourse-Completion Tasks oral lexical retrival Cookie Theft ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhdiscourse_completion_task/\"\u003eDiscourse-Completion Task\u003c/a\u003e is a tool used to elicit speech acts, such as showing an image, etc. For instance,\u003c/p\u003e\n\u003ch2 id=\"types-of-discourse-completion-task--kbhdiscourse-completion-task-dot-md--s\"\u003etypes of \u003ca href=\"/posts/kbhdiscourse_completion_task/\"\u003eDiscourse-Completion Task\u003c/a\u003es\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoral_lexical_retrival/\"\u003eoral lexical retrival\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhctp/\"\u003eCookie Theft\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdiscourse_completion_task/","tags":null,"title":"Discourse-Completion Task"},{"categories":null,"contents":"A discrete set of chances: die, coin flip, etc.\nWe use probability mass function to model such a distribution:\n\\begin{equation} \\sum_{i=1}^{n}P(X=i) = 1 \\end{equation}\nTo each member of the distribution, we assign a factor. The parameters of this distribution are the probability values you assign to each group.\n","html":"\u003cp\u003eA discrete set of chances: die, coin flip, etc.\u003c/p\u003e\n\u003cp\u003eWe use \u003ca href=\"/posts/kbhprobability_distributions/#probability-mass-function\"\u003eprobability mass function\u003c/a\u003e to model such a distribution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{i=1}^{n}P(X=i) = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo each member of the distribution, we assign a \u003ca href=\"/posts/kbhfactor/\"\u003efactor.\u003c/a\u003e The \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es of this distribution are the probability values you assign to each group.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdiscrete_distribution/","tags":null,"title":"discrete distribution"},{"categories":null,"contents":"\u0026ldquo;how does an operating system track threads and processes\u0026rdquo;\nsee process control block\ntraps and interrups Bad problem: the operating system can\u0026rsquo;t be running when a user thread is running. We can\u0026rsquo;t do thread bookeeping if a user thread is running.\ntrap a trap is a scheme to request OS attention explicitly from the user thread, swapping the user process off the CPU.\nsystem calls errors page fault (memory errors) interrupt a interrupt takes place outside the current thread, it forces the OS\u0026rsquo; attention even if the user thread isn\u0026rsquo;t asking for it\ncharacter typed at keyboard completion of a disk operations a hardware timer that fires an interrupt interrupts enable preemption to happen, so see also preemption for interrupt handling pattern.\nwhat if a timer goes off during an interrupt interrupts are disabled during interrupt handling, otherwise, this causes an infinite loop.\nsolution: interrupts are disabled during timer handling.\nthis causes a problem: if you preempt into a brand new thread\nmain idea there are race condition we cannot solve with mutexes because we are the OS so, we implement mutexes by enabling/disabling interrupts dispatcher a dispatcher performs a context switch, which\ncontext switch (in asm) push all registers except %rsp into the bottom of the old thread\u0026rsquo;s stack store the stack pointer %rsp into the process control block for that process corresponding to thread read the new thread\u0026rsquo;s stack pointer from the process control block, and load that into %rsp (in asm) pop all registers stored on the bottom of our new stack back onto the registers remember to push and pop the registers in the same order\u0026hellip;. otherwise the registers won\u0026rsquo;t be in the right order.\nthis makes a context switch a function that calls on one thread and returns on another thread\u0026mdash;\u0026ldquo;we start executing from one stack, and end executing from another\u0026rdquo;.\nExample:\ncontext switch\nNotice that we only store callee saved registers because its the responsibility of whomever called context switch to save the register of the caller saved registers.\npushq %rbp pushq %rbx pushq %r14 pushq %r15 ;; pushq all of em callee saved ... movq %rsp, [somewhere in PCB, thread 1] ; the process control block movq [somewhere else in PCB, thread 2], %rsp ; the stack is now somewhere else ;; now we pop backwards up from the stack ;; popq all of em calee saved ... popq %r15 popq %r14 popq %rbx popq %rbp ;; this will RETURN to the last call *or* top of context_switch() of the ;; **THREAD 2**, because we moved the stack pointer by movq into ;; %rsp, we will return to the NEW thread\u0026#39;s last executed position ret what if the thread is new? We can\u0026rsquo;t ret to a function that never called context_switch, which is the case for new threads.\nTo do this, we create a fake freeze frame on the stack for that new thread which looks like you are just about to call the thread function, and calls context_switch normally.\nyield yield is a user function that one could implement, which acts like a blocking action, but instead of doing that we just add ourselves directly to the end of the ready queue again. (i.e. give up CPU voluntarily, but don\u0026rsquo;t block0.\n","html":"\u003cp\u003e\u0026ldquo;how does an operating system track threads and processes\u0026rdquo;\u003c/p\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhprocess_control_block/\"\u003eprocess control block\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"traps-and-interrups\"\u003etraps and interrups\u003c/h2\u003e\n\u003cp\u003eBad problem: \u003cstrong\u003ethe operating system can\u0026rsquo;t be running when a user thread is running\u003c/strong\u003e. We can\u0026rsquo;t do thread bookeeping if a user thread is running.\u003c/p\u003e\n\u003ch3 id=\"trap\"\u003etrap\u003c/h3\u003e\n\u003cp\u003ea \u003ca href=\"#trap\"\u003etrap\u003c/a\u003e is a scheme to request OS attention explicitly from the user thread, swapping the user process off the CPU.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003esystem calls\u003c/li\u003e\n\u003cli\u003eerrors\u003c/li\u003e\n\u003cli\u003epage fault (memory errors)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"interrupt\"\u003einterrupt\u003c/h3\u003e\n\u003cp\u003ea \u003ca href=\"#interrupt\"\u003einterrupt\u003c/a\u003e takes place outside the current thread, it forces the OS\u0026rsquo; attention even if the user thread isn\u0026rsquo;t asking for it\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003echaracter typed at keyboard\u003c/li\u003e\n\u003cli\u003ecompletion of a disk operations\u003c/li\u003e\n\u003cli\u003ea hardware timer that fires an interrupt\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\u003ca href=\"#interrupt\"\u003einterrupt\u003c/a\u003es enable \u003ca href=\"/posts/kbhpreemption/\"\u003epreemption\u003c/a\u003e to happen, so see also \u003ca href=\"/posts/kbhpreemption/\"\u003epreemption\u003c/a\u003e for interrupt handling pattern.\u003c/p\u003e\n\u003ch4 id=\"what-if-a-timer-goes-off-during-an-interrupt--orge7a39af\"\u003ewhat if a timer goes off during an \u003ca href=\"#interrupt\"\u003einterrupt\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003e\u003cstrong\u003einterrupts are disabled during interrupt handling\u003c/strong\u003e, otherwise, this causes an infinite loop.\u003c/p\u003e\n\u003cp\u003esolution: \u003cem\u003einterrupts are disabled during timer handling\u003c/em\u003e.\u003c/p\u003e\n\u003cp\u003ethis causes a problem: if you \u003ca href=\"/posts/kbhpreemption/#preempting-into-a-brand-new-thread\"\u003epreempt into a brand new thread\u003c/a\u003e\u003c/p\u003e\n\u003ch4 id=\"main-idea\"\u003emain idea\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003ethere are \u003ca href=\"/posts/kbhmultithreading/#race-condition\"\u003erace condition\u003c/a\u003e we cannot solve with \u003ca href=\"/posts/kbhmultithreading/#mutex\"\u003emutex\u003c/a\u003ees because we are the OS\u003c/li\u003e\n\u003cli\u003eso, \u003cstrong\u003ewe\u003c/strong\u003e implement mutexes by enabling/disabling \u003ca href=\"#interrupt\"\u003einterrupt\u003c/a\u003es\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"dispatcher\"\u003edispatcher\u003c/h2\u003e\n\u003cp\u003ea \u003ca href=\"#dispatcher\"\u003edispatcher\u003c/a\u003e performs a \u003ca href=\"#context-switch\"\u003econtext switch\u003c/a\u003e, which\u003c/p\u003e\n\u003ch3 id=\"context-switch\"\u003econtext switch\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003e(in asm) push \u003cstrong\u003eall \u003ca href=\"/posts/kbhassembly/#register\"\u003eregister\u003c/a\u003es\u003c/strong\u003e except \u003ccode\u003e%rsp\u003c/code\u003e into the bottom of the old thread\u0026rsquo;s \u003ca href=\"/posts/kbhstack/\"\u003estack\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003estore the \u003ca href=\"/posts/kbhassembly/#stack-pointer\"\u003estack pointer\u003c/a\u003e \u003ccode\u003e%rsp\u003c/code\u003e into the \u003ca href=\"/posts/kbhprocess_control_block/\"\u003eprocess control block\u003c/a\u003e for that process corresponding to thread\u003c/li\u003e\n\u003cli\u003eread the new thread\u0026rsquo;s stack pointer from the \u003ca href=\"/posts/kbhprocess_control_block/\"\u003eprocess control block\u003c/a\u003e, and load that into \u003ccode\u003e%rsp\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003e(in asm) pop \u003cstrong\u003eall \u003ca href=\"/posts/kbhassembly/#register\"\u003eregister\u003c/a\u003es\u003c/strong\u003e stored on the bottom of our new stack back onto the registers\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eremember to push and pop the \u003ca href=\"/posts/kbhassembly/#register\"\u003eregister\u003c/a\u003es in the same order\u0026hellip;. otherwise the registers won\u0026rsquo;t be in the right order.\u003c/p\u003e\n\u003cp\u003ethis makes a \u003ca href=\"#context-switch\"\u003econtext switch\u003c/a\u003e a function that \u003cstrong\u003ecalls on one thread\u003c/strong\u003e and \u003cstrong\u003ereturns on another thread\u003c/strong\u003e\u0026mdash;\u0026ldquo;we start executing from one stack, and end executing from another\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eExample:\u003c/p\u003e\n\u003cp\u003econtext switch\u003c/p\u003e\n\u003cp\u003eNotice that we only store \u003cstrong\u003ecallee saved registers\u003c/strong\u003e because its the responsibility of whomever called context switch to save the register of the \u003cstrong\u003ecaller saved registers\u003c/strong\u003e.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-asm\" data-lang=\"asm\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003epushq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%rbp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003epushq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%rbx\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003epushq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%r14\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003epushq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%r15\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e;; pushq all of em callee saved ...\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emovq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%rsp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003esomewhere\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ePCB\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e; the process control block\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emovq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003esomewhere\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ePCB\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%rsp\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e; the stack is now somewhere else\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e;; now we pop backwards up from the stack\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e;; popq all of em calee saved ...\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003epopq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%r15\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003epopq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%r14\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003epopq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%rbx\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003epopq\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e%rbp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e;; this will RETURN to the last call *or* top of context_switch() of the\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e;; **THREAD 2**, because we moved the stack pointer by movq into\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e;; %rsp, we will return to the NEW thread\u0026#39;s last executed position\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eret\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch4 id=\"what-if-the-thread-is-new\"\u003ewhat if the thread is new?\u003c/h4\u003e\n\u003cp\u003eWe can\u0026rsquo;t \u003ccode\u003eret\u003c/code\u003e to a function that never called \u003ccode\u003econtext_switch\u003c/code\u003e, which is the case for \u003cstrong\u003enew threads\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eTo do this, we create a fake freeze frame on the stack for that new thread which looks like you are just about to call the thread function, and calls \u003ccode\u003econtext_switch\u003c/code\u003e normally.\u003c/p\u003e\n\u003ch3 id=\"yield\"\u003eyield\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#yield\"\u003eyield\u003c/a\u003e is a user function that one could implement, which acts like a blocking action, but instead of doing that we just add ourselves directly to the end of the ready queue again. (i.e. give up CPU voluntarily, but don\u0026rsquo;t block0.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdispatching/","tags":null,"title":"dispatching"},{"categories":null,"contents":"Big Idea Motivation: Touring Test Point of the Turing test: we can use language to get the underlying thought and the underlying cognition. However, language IS NOT thought.\nLanguage is not thought Good at language != good at thought =\u0026gt; public speaking settings\nBad at language != bad at thought =\u0026gt; language models?\nLLM eval should separate language and thought formal vs functional linguistic confidence (good at speaking and speaking is useful) generalized world knowledge Detour and motivation: cognitive science language centre in brain is specific to a location, and changes in language doesn\u0026rsquo;t change what region gets activated language shows little/no response when we are thinking of cognitively challenging tasks lik emaffs Key examples: aphasics can still think. So each skill is in a separate brain-place.\nFormal and Functional Competence Mahowold, Ivanlova, et al.\nCan we find out what parts of the network separately process core language skills (syntax and formal grammar) vs. \u0026ldquo;functional\u0026rdquo; skills (semantics and mathematical reasoning), and how does LLMs perform in each area?\nFormal Competence Unsurprisingly for us but surprisingly for linguists, GPT can grammar real good. No surprises there.\nFunctional Competence It can memorize the output, and it doesn\u0026rsquo;t perform well for out of sample math/reasoning cases\nGeneralized World Knowledge Two types of knowledge\nFactual: Paris is the capital of France; Birds lay eggs Distributional: the sky is {blue, black, pink}. the first two are largely more likely The factual side LLMs are very good at and that\u0026rsquo;s again unsurpsininlgy. But for #2\u0026hellip;\nLLM embeddings have similar colours close together, and similar animals close together. LLMs are subject to reporter bias: we talk less about obvious things, yet LLMs are only trained on what we talk about.\nQuestion 1: does the generalized world model require languages?\n\u0026ldquo;The fox is chasing a planet.\u0026rdquo; \u0026mdash; there is a logical failure here.\nHowever, when shown semantically incompatible events, the language centre activates, but not very much. Doing this to aphasics still showed that having no difference.\nSO: Language network is recruited but not required for event semantics\nEvent knowledge evaluations:\nThis is actually formal and so LLMs do very well: \u0026ldquo;The laptop ate the teacher\u0026rdquo; (inanimate objects cannot eat, formal issue) This is perceptual and LLMs do poorly: \u0026ldquo;The fox chased the rabbit\u0026rdquo; (foxes can\u0026rsquo;t be slower than a fox) Question: what happens if you in-context complete the recruitment of counterfactuals\n","html":"\u003ch2 id=\"big-idea\"\u003eBig Idea\u003c/h2\u003e\n\u003ch3 id=\"motivation-touring-test\"\u003eMotivation: Touring Test\u003c/h3\u003e\n\u003cp\u003ePoint of the Turing test: we can use language to get the underlying thought and the underlying cognition. However, language IS NOT thought.\u003c/p\u003e\n\u003ch3 id=\"language-is-not-thought\"\u003eLanguage is not thought\u003c/h3\u003e\n\u003cp\u003eGood at language != good at thought =\u0026gt; public speaking settings\u003c/p\u003e\n\u003cp\u003eBad at language != bad at thought =\u0026gt; language models?\u003c/p\u003e\n\u003ch3 id=\"llm-eval-should-separate-language-and-thought\"\u003eLLM eval should separate language and thought\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eformal vs functional linguistic confidence (good at speaking and speaking is useful)\u003c/li\u003e\n\u003cli\u003egeneralized world knowledge\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"detour-and-motivation-cognitive-science\"\u003eDetour and motivation: cognitive science\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003elanguage centre in brain is specific to a location, and changes in language doesn\u0026rsquo;t change what region gets activated\u003c/li\u003e\n\u003cli\u003elanguage shows little/no response when we are thinking of cognitively challenging tasks lik emaffs\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eKey examples: aphasics can still think. So each skill is in a separate brain-place.\u003c/p\u003e\n\u003ch2 id=\"formal-and-functional-competence\"\u003eFormal and Functional Competence\u003c/h2\u003e\n\u003cp\u003eMahowold, Ivanlova, et al.\u003c/p\u003e\n\u003cp\u003eCan we find out what parts of the network separately process core language skills (syntax and formal grammar) vs. \u0026ldquo;functional\u0026rdquo; skills (semantics and mathematical reasoning), and how does LLMs perform in each area?\u003c/p\u003e\n\u003ch3 id=\"formal-competence\"\u003eFormal Competence\u003c/h3\u003e\n\u003cp\u003eUnsurprisingly for us but surprisingly for linguists, GPT can grammar real good. No surprises there.\u003c/p\u003e\n\u003ch3 id=\"functional-competence\"\u003eFunctional Competence\u003c/h3\u003e\n\u003cp\u003eIt can memorize the output, and it doesn\u0026rsquo;t perform well for out of sample math/reasoning cases\u003c/p\u003e\n\u003ch2 id=\"generalized-world-knowledge\"\u003eGeneralized World Knowledge\u003c/h2\u003e\n\u003cp\u003eTwo types of knowledge\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eFactual\u003c/strong\u003e\u003c/strong\u003e: Paris is the capital of France; Birds lay eggs\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eDistributional\u003c/strong\u003e: the sky is {blue, black, pink}. the first two are largely more likely\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe \u003cstrong\u003efactual\u003c/strong\u003e side LLMs are very good at and that\u0026rsquo;s again unsurpsininlgy. But for #2\u0026hellip;\u003c/p\u003e\n\u003cp\u003eLLM embeddings have similar colours close together, and similar animals close together. LLMs are subject to reporter bias: we talk less about obvious things, yet LLMs are only trained on what we talk about.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eQuestion 1: does the generalized world model require languages?\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;The fox is chasing a planet.\u0026rdquo; \u0026mdash; there is a logical failure here.\u003c/p\u003e\n\u003cp\u003eHowever, when shown semantically incompatible \u003cstrong\u003eevents\u003c/strong\u003e, the language centre activates, but not very much. Doing this to aphasics still showed that having no difference.\u003c/p\u003e\n\u003cp\u003eSO: \u003cstrong\u003e\u003cstrong\u003eLanguage network is recruited but not required for event semantics\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eEvent knowledge evaluations:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eThis is actually formal and so LLMs do very well: \u0026ldquo;The laptop ate the teacher\u0026rdquo; (inanimate objects cannot eat, formal issue)\u003c/li\u003e\n\u003cli\u003eThis is perceptual and LLMs do poorly: \u0026ldquo;The fox chased the rabbit\u0026rdquo; (foxes can\u0026rsquo;t be slower than a fox)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eQuestion: what happens if you in-context complete the recruitment of \u003ca href=\"/posts/kbhcounterfactual/\"\u003ecounterfactual\u003c/a\u003es\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdissociating_language_and_thought/","tags":null,"title":"Dissociating Language and Thought"},{"categories":null,"contents":"distributed algorithm is a type of algorithm that can be distributed across many modules.\nThere are a few core areas of research:\nfailure-proofing nodes is a distributed algorithm What if one processor fails? communication in a distributed algorithm What if communication between processors fails? What if timing fails? atomicity atomicity is a property of distributed algorithm where, for a set of steps, a processor can only do one or all of the steps. i.e.: if you are asking a node to do something, it can either do all of the thing or be able to roll back as if the entire thing didn\u0026rsquo;t happen.\nleader election (algorithms) leader election is the process by which a distributed algorithm elects the driving node among similar nodes.\nconsensus (algorithms) consensus is a mechanism in a distributed algorithm where the solution requires multiple processes to do the same calculation to confirm.\nalgorithms designed to be distributed MapReduce ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdistributed_algorithum/\"\u003edistributed algorithm\u003c/a\u003e is a type of algorithm that can be distributed across many modules.\u003c/p\u003e\n\u003cp\u003eThere are a few core areas of research:\u003c/p\u003e\n\u003ch2 id=\"failure-proofing-nodes-is-a-distributed-algorithm--kbhdistributed-algorithum-dot-md\"\u003efailure-proofing nodes is a \u003ca href=\"/posts/kbhdistributed_algorithum/\"\u003edistributed algorithm\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWhat if one processor fails?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"communication-in-a-distributed-algorithm--kbhdistributed-algorithum-dot-md\"\u003ecommunication in a \u003ca href=\"/posts/kbhdistributed_algorithum/\"\u003edistributed algorithm\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWhat if communication between processors fails?\u003c/li\u003e\n\u003cli\u003eWhat if timing fails?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"atomicity\"\u003eatomicity\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#atomicity\"\u003eatomicity\u003c/a\u003e is a property of \u003ca href=\"/posts/kbhdistributed_algorithum/\"\u003edistributed algorithm\u003c/a\u003e where, for a set of steps, a processor can only do \u003cem\u003eone\u003c/em\u003e or \u003cem\u003eall\u003c/em\u003e of the steps. i.e.: if you are asking a node to do something, it can either do all of the thing or be able to roll back as if the entire thing didn\u0026rsquo;t happen.\u003c/p\u003e\n\u003ch2 id=\"leader-election--algorithms\"\u003eleader election (algorithms)\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#leader-election--algorithms\"\u003eleader election\u003c/a\u003e is the process by which a \u003ca href=\"/posts/kbhdistributed_algorithum/\"\u003edistributed algorithm\u003c/a\u003e elects the driving node among similar nodes.\u003c/p\u003e\n\u003ch2 id=\"consensus--algorithms\"\u003econsensus (algorithms)\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#consensus--algorithms\"\u003econsensus\u003c/a\u003e is a mechanism in a \u003ca href=\"/posts/kbhdistributed_algorithum/\"\u003edistributed algorithm\u003c/a\u003e where the solution requires multiple processes to do the same calculation to confirm.\u003c/p\u003e\n\u003ch2 id=\"algorithms-designed-to-be-distributed\"\u003ealgorithms designed to be distributed\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmapreduce/\"\u003eMapReduce\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdistributed_algorithum/","tags":null,"title":"distributed algorithm"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdistributed_morphology/","tags":null,"title":"distributed morphology"},{"categories":null,"contents":"distributive harm is a harm where a system extends, withhold opportunities given a specific group of people\nSt. George\u0026rsquo;s hospital: overweighting\nobserving the weights observing the input, consider whether or not input has sensitive features ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdistributive_harm/\"\u003edistributive harm\u003c/a\u003e is a harm where a system extends, withhold opportunities given a specific group of people\u003c/p\u003e\n\u003cp\u003eSt. George\u0026rsquo;s hospital: overweighting\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eobserving the weights\u003c/li\u003e\n\u003cli\u003eobserving the input, consider whether or not input has sensitive features\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdistributive_harm/","tags":null,"title":"distributive harm"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdistributivity/","tags":null,"title":"distributivity"},{"categories":null,"contents":"Let integer \\(a,b \\in \\mathbb{Z}\\), where \\(b \\neq 0\\). We say \\(b\\) divides \\(a\\) (i.e. \\(b|a\\)) if there\u0026rsquo;s some \\(m \\in \\mathbb{Z}\\) such that \\(a = bm\\).\nadditional information division algorithm Let \\(a,b \\in \\mathbb{Z}\\), \\(b \u0026gt; 0\\). Then, there exists, uniquely, some \\(q,r \\in \\mathbb{Z}\\) such that \\(a = bq + r\\) with \\(0 \\leq r \u0026lt;b\\).\n\u0026ldquo;division with remainder\u0026rdquo;\nYou will note that, if \\(a \u0026lt; b\\), we can just say \\(q = 0\\).\nProof:\nExistence Let us define some:\n\\begin{equation} S = \\{a - bk: k \\in \\mathbb{Z}, a-bk \\geq 0\\} \\end{equation}\nWe will note that this set is definitely non-empty:\nIf \\(a \\geq 0\\), then \\(a = a-b\\cdot 0 \\in S\\) If \\(a \u0026lt; 0\\) , then \\(a-b(2a) = a(1-2b)\\), we note that \\(a \u0026lt; 0\\) and since \\(b \u0026gt;0\\), \\((1-2b) \u0026lt;0\\), so \\(a(1-2b) \u0026gt; 0\\) and so \\(a(1-2b) \\in S\\) So by the WOP, \\(S\\) has a smallest element. Let us, by WOP, define \\(r\\) to be the smallest element in \\(S\\).\nTherefore, we can make some \\(r = a-bq \\in S\\). We also know that \\(r\\) is non-negative, as that is the constraint of \\(S\\). Finally, we have to ensure that \\(r\\) is the actual remainder, we have to ensure that \\(r \u0026lt; b\\).\nAssume for contradiction \\(r \\geq b\\). Then, \\(r-b = (a-qb)-b = a-(q+1)b \\geq 0\\). Therefore, \\(r-b \\in S\\). Yet, we said that \\(r\\) was the smallest element, reaching contradiction. Therefore, \\(r\u0026lt;b\\) . \\(\\blacksquare\\)\nUniqueness Let us have:\n\\begin{equation} a = bq+r = bq\u0026rsquo; + r' \\end{equation}\nRecall that, \\(0 \\leq r \u0026lt; b\\). We desire that \\(q = q\u0026rsquo;\\), \\(r = r\u0026rsquo;\\).\nWLOG let \\(r \\leq r\u0026rsquo;\\). So, \\(0 \\leq r\u0026rsquo; - r\\). Both \\(r\u0026rsquo;\\) and \\(r\\) are remainders after dividing by \\(b\\), so \\(r\u0026rsquo; \u0026lt; b\\) and \\(r \u0026lt; b\\). Therefore, we have:\n\\begin{equation} 0 \\leq r\u0026rsquo; - r \u0026lt; b \\end{equation}\nNow, recall that:\n\\begin{align} \u0026amp;bq+r = bq\u0026rsquo; + r\u0026rsquo;\\\\ \\Rightarrow\\ \u0026amp;b(q-q\u0026rsquo;) = r\u0026rsquo; - r \\end{align}\nNow, we have that \\(b|(r\u0026rsquo; - r)\\). Hence, we have some positive \\(r\u0026rsquo; - r\\), which is smaller than b, but which is divisible by \\(b\\). This forces us to conclude that \\(r\u0026rsquo; - r = 0\\).\nGiven \\(r\u0026rsquo; = r\\), now, we can see that \\(q = q\u0026rsquo;\\). \\(\\blacksquare\\)\n","html":"\u003cp\u003eLet integer \\(a,b \\in \\mathbb{Z}\\), where \\(b \\neq 0\\). We say \\(b\\) \u003ca href=\"/posts/kbhdivide/\"\u003edivide\u003c/a\u003es \\(a\\) (i.e. \\(b|a\\)) if there\u0026rsquo;s some \\(m \\in \\mathbb{Z}\\) such that \\(a = bm\\).\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"division-algorithm\"\u003edivision algorithm\u003c/h3\u003e\n\u003cp\u003eLet \\(a,b \\in \\mathbb{Z}\\), \\(b \u0026gt; 0\\). Then, there exists, uniquely, some \\(q,r \\in \\mathbb{Z}\\) such that \\(a = bq + r\\) with \\(0 \\leq r \u0026lt;b\\).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;division with remainder\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eYou will note that, if \\(a \u0026lt; b\\), we can just say \\(q = 0\\).\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003ch4 id=\"existence\"\u003eExistence\u003c/h4\u003e\n\u003cp\u003eLet us define some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS = \\{a - bk: k \\in \\mathbb{Z}, a-bk \\geq 0\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will note that this set is definitely non-empty:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eIf \\(a \\geq 0\\), then \\(a = a-b\\cdot 0 \\in S\\)\u003c/li\u003e\n\u003cli\u003eIf \\(a \u0026lt; 0\\) , then \\(a-b(2a) = a(1-2b)\\), we note that \\(a \u0026lt; 0\\) and since \\(b \u0026gt;0\\), \\((1-2b) \u0026lt;0\\), so \\(a(1-2b) \u0026gt; 0\\) and so \\(a(1-2b) \\in S\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSo by the \u003ca href=\"/posts/kbhprinciple_of_induction/#well-ordering-principle\"\u003eWOP\u003c/a\u003e, \\(S\\) has a smallest element. Let us, by \u003ca href=\"/posts/kbhprinciple_of_induction/#well-ordering-principle\"\u003eWOP\u003c/a\u003e, define \\(r\\) to be the smallest element in \\(S\\).\u003c/p\u003e\n\u003cp\u003eTherefore, we can make some \\(r = a-bq \\in S\\). We also know that \\(r\\) is non-negative, as that is the constraint of \\(S\\). Finally, we have to ensure that \\(r\\) is the actual remainder, we have to ensure that \\(r \u0026lt; b\\).\u003c/p\u003e\n\u003cp\u003eAssume for contradiction \\(r \\geq b\\). Then, \\(r-b = (a-qb)-b = a-(q+1)b \\geq 0\\). Therefore, \\(r-b \\in S\\). Yet, we said that \\(r\\) was the smallest element, reaching contradiction. Therefore, \\(r\u0026lt;b\\) . \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch4 id=\"uniqueness\"\u003eUniqueness\u003c/h4\u003e\n\u003cp\u003eLet us have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na = bq+r = bq\u0026rsquo; + r'\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that, \\(0 \\leq r \u0026lt; b\\). We desire that \\(q = q\u0026rsquo;\\), \\(r = r\u0026rsquo;\\).\u003c/p\u003e\n\u003cp\u003eWLOG let \\(r \\leq r\u0026rsquo;\\). So, \\(0 \\leq r\u0026rsquo; - r\\). Both \\(r\u0026rsquo;\\) and \\(r\\) are remainders after dividing by \\(b\\), so \\(r\u0026rsquo; \u0026lt; b\\) and \\(r \u0026lt; b\\). Therefore, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 \\leq r\u0026rsquo; - r \u0026lt; b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, recall that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;bq+r = bq\u0026rsquo; + r\u0026rsquo;\\\\\n\\Rightarrow\\ \u0026amp;b(q-q\u0026rsquo;) = r\u0026rsquo; - r\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, we have that \\(b|(r\u0026rsquo; - r)\\). Hence, we have some positive \\(r\u0026rsquo; - r\\), which is smaller than b, but which is divisible by \\(b\\). This forces us to conclude that \\(r\u0026rsquo; - r = 0\\).\u003c/p\u003e\n\u003cp\u003eGiven \\(r\u0026rsquo; = r\\), now, we can see that \\(q = q\u0026rsquo;\\). \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdivide/","tags":null,"title":"divide"},{"categories":null,"contents":"Divide by \\(2\\pi\\), or, how I learned to start worrying and hate Fourier Transforms.\nHello all. Good news first: our frequency theory is now correctly validated by data.\nIf you want a band-aid for the answer, here it is: divide everything we get out of the cantilever equations by \\(2\\pi\\); then, use the correct linear mass density: our Google sheets was off by a factor of almost \\(4\\) because of later-corrected apparent measurement error.\nThe bad news? You get pages of algebra to justify how, while getting a whole run-down of our entire theory so far for kicks.\nOur story begins at what popped out of the other end of the Euler-Lagrange equations (if you want the start of the Lagrangian analysis, read this from Mark, and plug the resulting Lagrangian into the Euler-Lagrange equation of the right shape.) But, either way, out will pop this fourth-order partial differential equation:\n\\begin{equation} EI\\pdv[4]{w}{x} = -\\mu \\pdv[2]{w}{t} \\end{equation}\nwhere, \\(w(x,t)\\) is a function of displacement by location by time, \\(E\\) the elastic modulus, and \\(I\\) the second moment of bending area.\nNow, fourth-order diffequs are already pain. PARTIAL forth order diffequs sounds darn near impossible. Wikipedia, to their credit, helpfully suggests the following to help tackle this problem:\nYou see, as we are trying to isolate possible individual frequencies, it make sense to essentially run a Fourier Transform on our algebra, to get the possible amplitude at each frequency \\(\\hat{w}(x)\\), given some frequency \\(\\omega\\) (no idea why they use \\(\\omega\\), I will use \\(f\\) for the rest of this article.)\nTo perform this analysis, Wikipedia suggests that we substitute our \\(w(x,t)\\) with its Fourier Definition, which is written as a function of the Fourier-decomposed version of the function \\(\\hat{w}(x)\\) (real component only, as imaginary pairs serve only as the conjugate), and then re-isolate those decomposed \\(\\hat{w}(x)\\). In this way, we get rid of the time dimension as sine waves oscillate ad infinium. Makes total sense.\nEXCEPT WHAT WIKIPEDIA GAVE ABOVE TO SUBSTITUTE IN ISN\u0026rsquo;T THE CORRECT FOURIER DECOMPOSITION\nHere\u0026rsquo;s the actual Fourier Transform intergral:\nwhere, \\(\\zeta=\\omega=f\\) , \\(f(x) = \\hat{w}(x)\\).\nWHAT DO YOU NOTICE? AN EXTRA \\(2\\pi\\).\nTHIS ALSO MEANS THAT THE FREQUENCY ANALYSTS IN THE REST OF THAT WIKI ARTICLE IS WRONG\nOk. I collect myself.\nSo, we now have that:\n\\begin{equation} w(x,t) = Re\\qty[\\hat{w}(x)e^{-i 2\\pi ft}] \\end{equation}\nRecall that we are trying to substitute this into\n\\begin{equation} EI\\pdv[4]{w}{x} = -\\mu \\pdv[2]{w}{t} \\end{equation}\nTaking two derivatives of the above Fourier decomposition equation by time (which is the dimension we are trying to get rid of to make the diffequ not partial), we have:\n\\begin{align} \\pdv[2]{w(x,t)}{t} \u0026amp;= \\pdv[2] t Re\\qty[\\hat{w}(x)e^{-i2\\pi ft}] \\\\ \u0026amp;= Re\\qty[\\hat{w}(x)\\pdv[2] t e^{-i2\\pi ft}] \\\\ \u0026amp;= Re\\qty[\\hat{w}(x)(2\\pi f)^{2} \\cdot e^{-i2\\pi ft}\\dots] \\end{align}\nNow, given we are only dealing with the real components of these things, everything on the \\(e^{-i\\dots }\\) part of the function wooshes away, cleanly leaving us with:\n\\begin{equation} \\pdv[2]{w(x,t)}{t} \u0026amp;= \\hat{w}(x)(2\\pi f)^{2} \\end{equation}\nYay! No longer differential. Substituting that into our original expression, and making the partials not partial anymore:\n\\begin{equation} EI\\pdv[4]{w}{x} + \\mu \\hat{w}(x)(2\\pi f)^{2}= 0 \\end{equation}\nExcellent. Now, onto solving this. The basic way to solve this is essentially to split the fourth-order differential into a 4x4 matrix, each one taking another derivative of the past. Then, to get a characteristic solution, you take its eigenvalues.\nBut instead of going about doing that, I\u0026rsquo;m going to give up and ask a computer. In the code, I am going to substitute \\(p\\) for \\(2\\pi\\) temporarily because FriCAS gets a little to eager to convert things into their sinusoidal forms if we leave it as \\(2\\pi\\).\nE,I,u,f = var(\u0026#34;E I u f\u0026#34;) x, L = var(\u0026#34;x L\u0026#34;) w = function(\u0026#39;w\u0026#39;)(x) p = var(\u0026#34;p\u0026#34;) _c0, _c1, _c2, _c3 = var(\u0026#34;_C0 _C1 _C2 _C3\u0026#34;) fourier_cantileaver = (E*I*diff(w, x, 4) - u*(p*f)^2*w == 0) fourier_cantileaver -f^2*p^2*u*w(x) + E*I*diff(w(x), x, x, x, x) == 0 solution = desolve(fourier_cantileaver, w, ivar=x, algorithm=\u0026#34;fricas\u0026#34;).expand() latex(solution) \\begin{equation} \\hat{w}(x) = _{C_{1}} e^{\\left(\\sqrt{f} \\sqrt{2\\pi} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{0}} e^{\\left(i \\, \\sqrt{f} \\sqrt{2\\pi} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{2}} e^{\\left(-i \\, \\sqrt{f} \\sqrt{2\\pi} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{3}} e^{\\left(-\\sqrt{f} \\sqrt{2 \\pi} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} \\end{equation}\nOk, so we have that each component solution is a combination of a bunch of stuff, times \\(\\pm i\\) or \\(\\pm 1\\). We are going to declare everything that\u0026rsquo;s invariant in the exponent to be named \\(\\beta\\):\n\\begin{equation} \\beta := \\sqrt{2\\pi f} \\qty(\\frac{u}{EI})^{\\frac{1}{4}} \\end{equation}\nAnd given this, we can then write the general solution for displacement by location (\\(w\\)) determined above more cleanly as:\n\\begin{equation} \\hat{w}(x) = _{C_{1}} e^{\\beta x} + _{C_{0}} e^{ \\beta ix} + _{C_{2}} e^{-\\beta ix} + _{C_{3}} e^{-\\beta x} \\end{equation}\nWe will make one more substitution\u0026mdash;try to get the \\(e^{x}\\) into sinusoidal form. We know this is supposed to oscillate, and it being in sinusoidal makes the process of solving for periodic solutions easier.\nRecall that:\n\\begin{equation} \\begin{cases} \\cosh x = \\frac{e^{x}+e^{-x}}{2} \\\\ \\cos x = \\frac{e^{ix}+e^{-ix}}{2}\\\\ \\sinh x = \\frac{e^{x}-e^{-x}}{2} \\\\ \\sin x = \\frac{e^{ix}-e^{-ix}}{2i}\\\\ \\end{cases} \\end{equation}\nWith a new set of scaling constants \\(d_0\\dots d_3\\) (because these substitutions essentially ignore any factors its being multiplied, but we don\u0026rsquo;t actually care about modeling amplitude with these expressions anyways, so we can just change the arbitrary initial-conditions scalars on the fly), and some rearranging, we can rewrite the above expressions into just a linear combination of those elements. That is, the same expression for \\(\\hat{w}(x)\\) at a specific frequency \\(f\\) can be written as:\n\\begin{equation} d_0\\cosh \\beta x +d_1\\sinh \\beta x +d_2\\cos \\beta x +d_3\\sin \\beta x = \\hat{w}(x) \\end{equation}\nfor some arbitrary initial conditions \\(d_0\\dots d_3\\). Significantly cleaner.\nSo, what frequencies will our fork oscillate at? Well, a mode for our fork is any set of \\(d_0 \\dots d_3\\) for which a solution for \\(\\hat{w}(x)\\) exists given our constants.\nAs it stands right now, it seems like we have four unknowns (\\(d_0 \\dots d_3\\)) but only one equation to solve with. That\u0026rsquo;s no bueno.\nEnter our initial conditions:\nThe top line states that: at \\(x=0\\), the bottom of the fork, our beam does not travel away from its natural axis (yes, because its a solid hunk of metal connected to the base), and it does not deflect (slope).\nThe bottom line stats that: at \\(x=L\\), the top of the fork is straight (which is true, the tip-top of the fork does indeed not bend, only the middleish parts bend.)\nSo, to get at the hidden system of four elements, we will take some derivatives of our original \\(\\hat{w}(x)\\) equation by \\(x\\), as prescribed by our initial conditions.\nwp = diff(w,x,1) wpp = diff(w,x,2) wppp = diff(w,x,3) (wp, wpp, wppp) (b*d3*cos(b*x) + b*d1*cosh(b*x) - b*d2*sin(b*x) + b*d0*sinh(b*x), -b^2*d2*cos(b*x) + b^2*d0*cosh(b*x) - b^2*d3*sin(b*x) + b^2*d1*sinh(b*x), -b^3*d3*cos(b*x) + b^3*d1*cosh(b*x) + b^3*d2*sin(b*x) + b^3*d0*sinh(b*x)) And then, we have a system:\ncond_1 = w.subs(x=0) == 0 cond_2 = wp.subs(x=0) == 0 cond_3 = wpp.subs(x=L) == 0 cond_4 = wppp.subs(x=L) == 0 conds = (cond_1, cond_2, cond_3, cond_4) conds (d0 + d2 == 0, b*d1 + b*d3 == 0, -b^2*d2*cos(L*b) + b^2*d0*cosh(L*b) - b^2*d3*sin(L*b) + b^2*d1*sinh(L*b) == 0, -b^3*d3*cos(L*b) + b^3*d1*cosh(L*b) + b^3*d2*sin(L*b) + b^3*d0*sinh(L*b) == 0) solve(conds, d0, d1, d2, d3).full_simplify() Ok so, we notice that out of all of these boundary expressions the \\(b^{n}\\) term drop out. Therefore, we have the system:\n\\begin{equation} \\begin{cases} d_0 + d_2 = 0 \\\\ d_1 + d_3 = 0 \\\\ -d_2 \\cos L\\beta + d_0 \\cosh L\\beta - d_3 \\sin L\\beta + d_1 \\sinh L\\beta = 0 \\\\ -d_3 \\cos L\\beta + d_1 \\cosh L\\beta + d_2 \\sin L\\beta + d_0 \\sinh L\\beta = 0 \\\\ \\end{cases} \\end{equation}\nGreat. Four unknowns, four equations. We can now figure out when a solution for \\(d_0, \\dots d_3\\) exists (or go about solving it, but turns out that\u0026rsquo;s significantly harder and wildly useless.)\nI will spare you the pages of route algebra needed to figure out when a solution exists. Suffice to say its lots of trig identities.\nBut, the satisfying conclusion is that, given the equations above, a solution exists for \\(d_0 \\dots d_3\\) (read: a mode for the beam exists), when:\n\\begin{equation} \\cos L\\beta \\cdot \\cosh L\\beta +1 = 0 \\end{equation}\nSo, any valid solutions for the expression \\(\\cos x \\cdot \\cosh x + 1 = 0\\) will be a valid product between \\(L\\beta\\). We can use this information to figure out the right frequencies by then solving for \\(f\\) embedded in \\(\\beta\\).\nSo, onto solving for \\(\\cos L\\beta \\cdot \\cosh L\\beta +1 = 0\\).\nWe again give up and ask a computer to do it.We will try to locate a root for \\(Lb\\) for every \\(\\pi\\) for two rounds around the circle (until \\(4 \\pi\\))\u0026mdash;there is a solution for every \\(\\pi\\), if you don\u0026rsquo;t believe me, plot it or change the bottom to try to find it for every \\(\\frac{\\pi}{2}\\), sage will crash:\nintervals = [jj*pi for jj in range(0, 5)] intervals [0, pi, 2*pi, 3*pi, 4*pi] We will now declare \\(x=L\\beta\\), and create a nonlinear expression in it:\nx = var(\u0026#34;x\u0026#34;) characteristic_eqn = 1 + cos(x)*cosh(x) == 0 characteristic_eqn cos(x)*cosh(x) + 1 == 0 Root finding time!\ncharacteristic_solutions = [characteristic_eqn.find_root(i,j) for (i,j) in zip(intervals,intervals[1:])] characteristic_solutions [1.8751040687120917, 4.6940911329739246, 7.854757438237603, 10.995540734875457] These are possible candidate values for \\(L\\beta\\). We will declare these values \\(s\\).\nSo, we now have that:\n\\begin{equation} L \\beta = s \\end{equation}\nSubstituting back our original definition for \\(\\beta\\), we have that:\n\\begin{equation} L \\sqrt{2\\pi f} \\qty(\\frac{u}{EI})^{\\frac{1}{4}} = s \\end{equation}\nNow, we will try to get \\(f\\) by itself:\n\\begin{align} \u0026amp;L \\sqrt{2\\pi f} \\qty(\\frac{u}{EI})^{\\frac{1}{4}} = s \\\\ \\Rightarrow\\ \u0026amp; \\sqrt{2\\pi f} = \\frac{s}{L} \\qty(\\frac{EI}{u})^{\\frac{1}{4}} \\\\ \\Rightarrow\\ \u0026amp; 2\\pi f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{EI}{u})^{\\frac{1}{2}} \\\\ \\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{2\\pi L^{2}} \\qty(\\frac{EI}{u})^{\\frac{1}{2}} \\end{align}\nFinally, we have that \\(I = \\frac{1}{12} bh^{3}\\) for a rectangular prism; and that linear density is cross-sectional area times volumetric density \\(\\mu = \\rho \\cdot bh\\). Making these substitutions:\n\\begin{align} f \u0026amp;= \\frac{s^{2}}{2\\pi L^{2}} \\qty(\\frac{EI}{u})^{\\frac{1}{2}} \\\\ \u0026amp;= \\frac{s^{2}}{2\\pi L^{2}} \\qty(\\frac{Ebh^{3}}{12 \\rho bh})^{\\frac{1}{2}} \\\\ \u0026amp;= \\frac{s^{2}}{2\\pi L^{2}} \\qty(\\frac{Eh^{2}}{12 \\rho})^{\\frac{1}{2}} \\\\ \\end{align}\nWithout even getting to the frequency-based payoff, we immediately notice two takeaways.\nThe frequency of our fork is inversely proportional to length (i.e. \\(f = \\frac{1}{L^{2}}\\dots\\)) The first overtone of the tuning fork is \\(s^{2} = (\\frac{4.694}{1.875})^{2} \\approx 6.27\\) times higher than the fundamental\u0026mdash;meaning its significantly higher energy so it dissipates significantly faster; it is also not an integer multiple, which means its much less likely to be confused to be a harmonic; making a tuning fork essentially a pure-frequency oscillator Given equal conditions, only the thickness in one dimension (the one perpendicular to the bending axis) matters But, enough idling, onto our main event. Using standard reference values for aluminum, as well as our measured length and thickness of a \\(C\u0026rsquo;\\ 512hz\\) tuning fork, we have that\n# measured values---- # thickness h = 0.0065 # meters # length L0 = 0.09373 # meters L1 = 0.08496 # meters # theoretical values--- # elastic modulus E = 46203293995 # pascals = kg/m^2 # density p = 2597 # kg/m^3 # our solved characteristic value (s) # mode to index nth_mode = 0 s = characteristic_solutions[nth_mode] zero = (((s^2)/(2*pi*L0^2))*((E*h^2)/(12*p))^(1/2)).n() one = (((s^2)/(2*pi*L1^2))*((E*h^2)/(12*p))^(1/2)).n() zero, one mean([zero,one]) (504.123425101814, 613.571395642254) 558.847410372034 Close enough for a night. Thank you sorry about everything.\ntemperature # mode to index nth_mode = 0 # s = characteristic_solutions[nth_mode] s = var(\u0026#34;s\u0026#34;) # change to L and h (distances measures) by increases in degrees C d(t,x) = (2.4e-5)*x rho(t,x) = ((2.4e-5))*x a,b = var(\u0026#34;a b\u0026#34;) # a = -3.9 # b = 0.0033 Ed(t) = ((a*b)*e^(b*t))*1e9 f(E, L, h, p) = (((s^2)/(2*pi*L^2))*((E*h^2)/(12*p))^(1/2)) E,L,h,p = var(\u0026#34;E L h p\u0026#34;) # (a * b * /g)c t = var(\u0026#34;t\u0026#34;) # diff(t, dt, E,L,h,p) = sqrt((f.diff(E)*Ed(t)*dt)^2 + (f.diff(E)*Ed(t)*dt)^2 + (f.diff(L)*d(t)*dt)^2 + (f.diff(h)*d(t)*dt)^2) # diff(10, 1, 42661456706, 0.09833, 0.00643, 2545.454545).n() subdict = { a: -3.9, b:0.0033, L:0.09833, h:0.00643, p:2545.454545, E:42661456706, s:characteristic_solutions[nth_mode], t:50 } (f.diff(E)*Ed(t)).subs(subdict).full_simplify().n() (f.diff(L)*d(t,L)).subs(subdict).full_simplify().n() (f.diff(h)*d(t,h)).subs(subdict).full_simplify().n() (f.diff(p)*rho(t,p)).subs(subdict).full_simplify().n() -0.0782394489394635 -0.0211103561467420 0.0105551780733710 -0.00527758903668550 expansion = var(\u0026#34;x\u0026#34;) l,w,h,m = var(\u0026#34;l w h m\u0026#34;) density(l,w,h) = (l*w*h)/m density.diff(l)*expansion + density.diff(w)*expansion + density.diff(h)*expansion (l, w, h) |--\u0026gt; h*l*x/m + h*w*x/m + l*w*x/m ","html":"\u003cp\u003e\u003cstrong\u003eDivide by \\(2\\pi\\)\u003c/strong\u003e, or, how I learned to start worrying and hate Fourier Transforms.\u003c/p\u003e\n\u003cp\u003eHello all. Good news first: our frequency theory is now correctly validated by data.\u003c/p\u003e\n\u003cp\u003eIf you want a band-aid for the answer, here it is: \u003cstrong\u003edivide everything we get out of the cantilever equations by \\(2\\pi\\)\u003c/strong\u003e; then, use the \u003cstrong\u003ecorrect\u003c/strong\u003e linear mass density: our Google sheets was off by a factor of almost \\(4\\) because of later-corrected apparent measurement error.\u003c/p\u003e\n\u003cp\u003eThe bad news? You get pages of algebra to justify how, while getting a whole run-down of our entire theory so far for kicks.\u003c/p\u003e\n\u003cp\u003eOur story begins at what popped out of the other end of the Euler-Lagrange equations (if you want the start of the Lagrangian analysis, read \u003ca href=\"https://drive.google.com/file/d/182FLTSs2DziJcY4rnZkcTmBUwB3ek_Nv/view?usp=sharing\"\u003ethis from Mark\u003c/a\u003e, and plug the resulting Lagrangian into the Euler-Lagrange equation of the right shape.) But, either way, out will pop this fourth-order partial differential equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nEI\\pdv[4]{w}{x} = -\\mu \\pdv[2]{w}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(w(x,t)\\) is a function of displacement by location by time, \\(E\\) the elastic modulus, and \\(I\\) the second moment of bending area.\u003c/p\u003e\n\u003cp\u003eNow, fourth-order diffequs are already pain. PARTIAL forth order diffequs sounds darn near impossible. Wikipedia, to their credit, helpfully suggests the following to help tackle this problem:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-01-18_00-15-05_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eYou see, as we are trying to isolate possible individual frequencies, it make sense to essentially run a Fourier Transform on our algebra, to get the possible amplitude at each frequency \\(\\hat{w}(x)\\), given some frequency \\(\\omega\\) (no idea why they use \\(\\omega\\), I will use \\(f\\) for the rest of this article.)\u003c/p\u003e\n\u003cp\u003eTo perform this analysis, Wikipedia suggests that we substitute our \\(w(x,t)\\) with its Fourier Definition, which is written as a function of the Fourier-decomposed version of the function \\(\\hat{w}(x)\\) (real component only, as imaginary pairs serve only as the conjugate), and then re-isolate those decomposed \\(\\hat{w}(x)\\). In this way, we get rid of the time dimension as sine waves oscillate \u003cem\u003ead infinium\u003c/em\u003e. Makes total sense.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eEXCEPT WHAT WIKIPEDIA GAVE ABOVE TO SUBSTITUTE IN ISN\u0026rsquo;T THE CORRECT FOURIER DECOMPOSITION\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eHere\u0026rsquo;s the actual Fourier Transform intergral:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-01-18_00-21-50_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ewhere, \\(\\zeta=\\omega=f\\) , \\(f(x) = \\hat{w}(x)\\).\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eWHAT DO YOU NOTICE? AN EXTRA \\(2\\pi\\).\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eTHIS ALSO MEANS THAT THE FREQUENCY ANALYSTS IN THE REST OF THAT WIKI ARTICLE IS WRONG\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eOk. I collect myself.\u003c/p\u003e\n\u003cp\u003eSo, we \u003cem\u003enow\u003c/em\u003e have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw(x,t) = Re\\qty[\\hat{w}(x)e^{-i 2\\pi ft}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that we are trying to substitute this into\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nEI\\pdv[4]{w}{x} = -\\mu \\pdv[2]{w}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaking two derivatives of the above Fourier decomposition equation by \u003cem\u003etime\u003c/em\u003e (which is the dimension we are trying to get rid of to make the diffequ not partial), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\pdv[2]{w(x,t)}{t} \u0026amp;= \\pdv[2] t Re\\qty[\\hat{w}(x)e^{-i2\\pi ft}] \\\\\n\u0026amp;= Re\\qty[\\hat{w}(x)\\pdv[2] t e^{-i2\\pi ft}] \\\\\n\u0026amp;= Re\\qty[\\hat{w}(x)(2\\pi f)^{2} \\cdot e^{-i2\\pi ft}\\dots]\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, given we are only dealing with the real components of these things, everything on the \\(e^{-i\\dots }\\) part of the function wooshes away, cleanly leaving us with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2]{w(x,t)}{t} \u0026amp;= \\hat{w}(x)(2\\pi f)^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYay! No longer differential. Substituting that into our original expression, and making the partials not partial anymore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nEI\\pdv[4]{w}{x} + \\mu \\hat{w}(x)(2\\pi f)^{2}= 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eExcellent. Now, onto solving this. The basic way to solve this is essentially to split the fourth-order differential into a 4x4 matrix, each one taking another derivative of the past. Then, to get a characteristic solution, you take its eigenvalues.\u003c/p\u003e\n\u003cp\u003eBut instead of going about doing that, I\u0026rsquo;m going to give up and ask a computer. In the code, I am going to substitute \\(p\\) for \\(2\\pi\\) temporarily because FriCAS gets a little to eager to convert things into their sinusoidal forms if we leave it as \\(2\\pi\\).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;E I u f\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;x L\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efunction\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;w\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;p\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_c0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;_C0 _C1 _C2 _C3\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efourier_cantileaver\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efourier_cantileaver\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-f^2*p^2*u*w(x) + E*I*diff(w(x), x, x, x, x) == 0\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edesolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efourier_cantileaver\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eivar\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ealgorithm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;fricas\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexpand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elatex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\\begin{equation}\n\\hat{w}(x) = _{C_{1}} e^{\\left(\\sqrt{f} \\sqrt{2\\pi} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{0}} e^{\\left(i \\, \\sqrt{f} \\sqrt{2\\pi} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{2}} e^{\\left(-i \\, \\sqrt{f} \\sqrt{2\\pi} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{3}} e^{\\left(-\\sqrt{f} \\sqrt{2 \\pi} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOk, so we have that each component solution is a combination of a bunch of stuff, times \\(\\pm i\\) or \\(\\pm 1\\). We are going to declare everything that\u0026rsquo;s invariant in the exponent to be named \\(\\beta\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\beta := \\sqrt{2\\pi f} \\qty(\\frac{u}{EI})^{\\frac{1}{4}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd given this, we can then write the general solution for displacement by location (\\(w\\)) determined above more cleanly as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{w}(x) = _{C_{1}} e^{\\beta x} + _{C_{0}} e^{ \\beta ix} + _{C_{2}} e^{-\\beta ix} + _{C_{3}} e^{-\\beta x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will make one more substitution\u0026mdash;try to get the \\(e^{x}\\) into sinusoidal form. We \u003cem\u003eknow\u003c/em\u003e this is supposed to oscillate, and it being in sinusoidal makes the process of solving for periodic solutions easier.\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\cosh x = \\frac{e^{x}+e^{-x}}{2} \\\\\n\\cos x = \\frac{e^{ix}+e^{-ix}}{2}\\\\\n\\sinh x = \\frac{e^{x}-e^{-x}}{2} \\\\\n\\sin x = \\frac{e^{ix}-e^{-ix}}{2i}\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWith a new set of scaling constants \\(d_0\\dots d_3\\) (because these substitutions essentially ignore any factors its being multiplied, but we don\u0026rsquo;t actually care about modeling amplitude with these expressions anyways, so we can just change the arbitrary initial-conditions scalars on the fly), and some rearranging, we can rewrite the above expressions into just a linear combination of those elements. That is, the same expression for \\(\\hat{w}(x)\\) at a specific frequency \\(f\\) can be written as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nd_0\\cosh \\beta x +d_1\\sinh \\beta x +d_2\\cos \\beta x +d_3\\sin \\beta x = \\hat{w}(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some arbitrary initial conditions \\(d_0\\dots d_3\\). \u003cem\u003eSignificantly\u003c/em\u003e cleaner.\u003c/p\u003e\n\u003cp\u003eSo, what frequencies will our fork oscillate at? Well, a \u003cstrong\u003emode\u003c/strong\u003e for our fork is any set of \\(d_0 \\dots d_3\\) for which a solution for \\(\\hat{w}(x)\\) exists given our constants.\u003c/p\u003e\n\u003cp\u003eAs it stands right now, it seems like we have four unknowns (\\(d_0 \\dots d_3\\)) but only one equation to solve with. That\u0026rsquo;s no bueno.\u003c/p\u003e\n\u003cp\u003eEnter our initial conditions:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-11-10_13-38-40_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe top line states that: at \\(x=0\\), the bottom of the fork, our beam does not travel away from its natural axis (yes, because its a solid hunk of metal connected to the base), and it does not deflect (slope).\u003c/p\u003e\n\u003cp\u003eThe bottom line stats that: at \\(x=L\\), the top of the fork is straight (which is true, the tip-top of the fork does indeed not bend, only the middleish parts bend.)\u003c/p\u003e\n\u003cp\u003eSo, to get at the hidden system of four elements, we will take some derivatives of our original \\(\\hat{w}(x)\\) equation by \\(x\\), as prescribed by our initial conditions.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ewp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ewpp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ewppp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewpp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewppp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(b*d3*cos(b*x) + b*d1*cosh(b*x) - b*d2*sin(b*x) + b*d0*sinh(b*x),\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^2*d2*cos(b*x) + b^2*d0*cosh(b*x) - b^2*d3*sin(b*x) + b^2*d1*sinh(b*x),\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^3*d3*cos(b*x) + b^3*d1*cosh(b*x) + b^3*d2*sin(b*x) + b^3*d0*sinh(b*x))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd then, we have a system:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewpp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_4\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewppp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econds\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econd_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econd_2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econd_3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econd_4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econds\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(d0 + d2 == 0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e b*d1 + b*d3 == 0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^2*d2*cos(L*b) + b^2*d0*cosh(L*b) - b^2*d3*sin(L*b) + b^2*d1*sinh(L*b) == 0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^3*d3*cos(L*b) + b^3*d1*cosh(L*b) + b^3*d2*sin(L*b) + b^3*d0*sinh(L*b) == 0)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econds\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efull_simplify\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eOk so, we notice that out of all of these boundary expressions the \\(b^{n}\\) term drop out. Therefore, we have the system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nd_0 + d_2 = 0 \\\\\nd_1 + d_3 = 0 \\\\\n-d_2 \\cos L\\beta + d_0 \\cosh L\\beta - d_3 \\sin L\\beta + d_1 \\sinh L\\beta = 0 \\\\\n-d_3 \\cos L\\beta + d_1 \\cosh L\\beta + d_2 \\sin L\\beta + d_0 \\sinh L\\beta = 0 \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGreat. Four unknowns, four equations. We can now figure out when a solution for \\(d_0, \\dots d_3\\) exists (or go about solving it, but turns out that\u0026rsquo;s significantly harder and wildly useless.)\u003c/p\u003e\n\u003cp\u003eI will spare you the pages of route algebra needed to figure out when a solution exists. Suffice to say its lots of trig identities.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-01-18_00-47-38_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eBut, the satisfying conclusion is that, given the equations above, a solution \u003cem\u003eexists\u003c/em\u003e for \\(d_0 \\dots d_3\\) (read: a \u003cstrong\u003emode\u003c/strong\u003e for the beam exists), when:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\cos L\\beta \\cdot \\cosh L\\beta +1 = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, any valid solutions for the expression \\(\\cos x \\cdot \\cosh x + 1 = 0\\) will be a valid product between \\(L\\beta\\). We can use this information to figure out the right frequencies by then solving for \\(f\\) embedded in \\(\\beta\\).\u003c/p\u003e\n\u003cp\u003eSo, onto solving for \\(\\cos L\\beta \\cdot \\cosh L\\beta +1 = 0\\).\u003c/p\u003e\n\u003cp\u003eWe again give up and ask a computer to do it.We will try to locate a root for \\(Lb\\) for every \\(\\pi\\) for two rounds around the circle (until \\(4 \\pi\\))\u0026mdash;there is a solution for every \\(\\pi\\), if you don\u0026rsquo;t believe me, plot it or change the bottom to try to find it for every \\(\\frac{\\pi}{2}\\), sage will crash:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ejj\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epi\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ejj\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[0, pi, 2*pi, 3*pi, 4*pi]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will now declare \\(x=L\\beta\\), and create a nonlinear expression in it:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;x\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_eqn\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecos\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecosh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_eqn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003ecos(x)*cosh(x) + 1 == 0\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRoot finding time!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_eqn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efind_root\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ezip\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:])]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1.8751040687120917, 4.6940911329739246, 7.854757438237603, 10.995540734875457]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThese are possible candidate values for \\(L\\beta\\). We will declare these values \\(s\\).\u003c/p\u003e\n\u003cp\u003eSo, we now have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL \\beta = s\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSubstituting back our original definition for \\(\\beta\\), we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL \\sqrt{2\\pi f} \\qty(\\frac{u}{EI})^{\\frac{1}{4}} = s\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, we will try to get \\(f\\) by itself:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;L \\sqrt{2\\pi f} \\qty(\\frac{u}{EI})^{\\frac{1}{4}} = s \\\\\n\\Rightarrow\\ \u0026amp; \\sqrt{2\\pi f} = \\frac{s}{L} \\qty(\\frac{EI}{u})^{\\frac{1}{4}} \\\\\n\\Rightarrow\\ \u0026amp; 2\\pi f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{EI}{u})^{\\frac{1}{2}} \\\\\n\\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{2\\pi L^{2}} \\qty(\\frac{EI}{u})^{\\frac{1}{2}}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eFinally, we have that \\(I = \\frac{1}{12} bh^{3}\\) for a rectangular prism; and that linear density is cross-sectional area times volumetric density \\(\\mu = \\rho \\cdot bh\\). Making these substitutions:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nf \u0026amp;= \\frac{s^{2}}{2\\pi L^{2}} \\qty(\\frac{EI}{u})^{\\frac{1}{2}} \\\\\n\u0026amp;= \\frac{s^{2}}{2\\pi L^{2}} \\qty(\\frac{Ebh^{3}}{12 \\rho bh})^{\\frac{1}{2}} \\\\\n\u0026amp;= \\frac{s^{2}}{2\\pi L^{2}} \\qty(\\frac{Eh^{2}}{12 \\rho})^{\\frac{1}{2}} \\\\\n\\end{align}\u003c/p\u003e\n\u003cp\u003eWithout even getting to the frequency-based payoff, we immediately notice two takeaways.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eThe frequency of our fork is inversely proportional to length (i.e. \\(f = \\frac{1}{L^{2}}\\dots\\))\u003c/li\u003e\n\u003cli\u003eThe first overtone of the tuning fork is \\(s^{2} = (\\frac{4.694}{1.875})^{2} \\approx 6.27\\) times higher than the fundamental\u0026mdash;meaning its significantly higher energy so it dissipates significantly faster; it is also not an integer multiple, which means its much less likely to be confused to be a harmonic; making a tuning fork essentially a pure-frequency oscillator\u003c/li\u003e\n\u003cli\u003eGiven equal conditions, only the thickness in one dimension (the one perpendicular to the bending axis) matters\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eBut, enough idling, onto our main event. Using standard reference values for aluminum, as well as our measured length and thickness of a \\(C\u0026rsquo;\\ 512hz\\) tuning fork, we have that\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# measured values----\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# thickness\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.0065\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# meters\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# length\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eL0\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.09373\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# meters\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eL1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.08496\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# meters\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# theoretical values---\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# elastic modulus\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e46203293995\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# pascals = kg/m^2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# density\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2597\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# kg/m^3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# our solved characteristic value (s)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# mode to index\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ezero\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epi\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e12\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eone\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epi\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e12\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ezero\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eone\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eone\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(504.123425101814, 613.571395642254)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e558.847410372034\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eClose enough for a night. Thank you \u003cdel\u003esorry\u003c/del\u003e about everything.\u003c/p\u003e\n\u003ch2 id=\"temperature\"\u003etemperature\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# mode to index\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# s = characteristic_solutions[nth_mode]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;s\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# change to L and h (distances measures) by increases in degrees C\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.4e-5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erho\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.4e-5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;a b\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# a = -3.9\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# b = 0.0033\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eEd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1e9\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epi\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e12\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;E L h p\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# (a * b * /g)c\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;t\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# diff(t, dt, E,L,h,p) = sqrt((f.diff(E)*Ed(t)*dt)^2 + (f.diff(E)*Ed(t)*dt)^2 + (f.diff(L)*d(t)*dt)^2 + (f.diff(h)*d(t)*dt)^2)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# diff(10, 1, 42661456706, 0.09833, 0.00643, 2545.454545).n()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esubdict\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3.9\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.0033\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.09833\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.00643\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2545.454545\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e42661456706\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e50\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eEd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubdict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efull_simplify\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubdict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efull_simplify\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubdict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efull_simplify\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erho\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubdict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efull_simplify\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-0.0782394489394635\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-0.0211103561467420\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.0105551780733710\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-0.00527758903668550\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eexpansion\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;x\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003el\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;l w h m\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edensity\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003el\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003el\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edensity\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003el\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexpansion\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edensity\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexpansion\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edensity\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexpansion\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(l, w, h) |--\u0026gt; h*l*x/m + h*w*x/m + l*w*x/m\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhdivide_by_2pi/","tags":null,"title":"Divide by 2pi"},{"categories":null,"contents":"Dopamine optical sensor. When dopamine is bound, it floreses and can detect micromolar changes and dopamine concentration.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdopamine/\"\u003eDopamine\u003c/a\u003e optical sensor. When dopamine is bound, it floreses and can detect micromolar changes and dopamine concentration.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdlight_1/","tags":null,"title":"dLight 1"},{"categories":null,"contents":"See also Software Development Methodologies\ndocumentation Comments Readme Wiki specification UX UI High-Level Architecture (libraries, external APIs) Low-Level Architecture (modules, functions, internal APIs) commenting Almost anything hardcoded (constants, strings, etc.) Anything confusing, tricky, nonstandard Historical notes: if something is added/removed, write it down TODO for bugs or hacks README Files Best used as a quick-start guide What are key pieces of info they will pieces of info they will need? What is your code supposed to do? How does someone run your code? How does a new engineer get set up? General overview of how things are laid out, with links to wiki pages with details Wiki In-depth explanation of subsystems and modules Separate pages for each subsystem Include decisions of their design decisions Discussions of why systems are not designed differently UI/UX Spec How do we know what the software is supposed to do? Varying levels of resolution User stories All the way up to granular details of UI elements Don\u0026rsquo;t forgot to document defaults!\n","html":"\u003cp\u003eSee also \u003ca href=\"/posts/kbhsoftware_development_methodologies/\"\u003eSoftware Development Methodologies\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"documentation\"\u003edocumentation\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eComments\u003c/li\u003e\n\u003cli\u003eReadme\u003c/li\u003e\n\u003cli\u003eWiki\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"specification\"\u003especification\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eUX\u003c/li\u003e\n\u003cli\u003eUI\u003c/li\u003e\n\u003cli\u003eHigh-Level Architecture (libraries, external APIs)\u003c/li\u003e\n\u003cli\u003eLow-Level Architecture (modules, functions, internal APIs)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"commenting\"\u003ecommenting\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eAlmost anything hardcoded (constants, strings, etc.)\u003c/li\u003e\n\u003cli\u003eAnything confusing, tricky, nonstandard\u003c/li\u003e\n\u003cli\u003eHistorical notes: if something is added/removed, write it down\u003c/li\u003e\n\u003cli\u003eTODO for bugs or hacks\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"readme-files\"\u003eREADME Files\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eBest used as a quick-start guide\u003c/li\u003e\n\u003cli\u003eWhat are key pieces of info they will pieces of info they will need?\n\u003cul\u003e\n\u003cli\u003eWhat is your code supposed to do?\u003c/li\u003e\n\u003cli\u003eHow does someone run your code?\u003c/li\u003e\n\u003cli\u003eHow does a new engineer get set up?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eGeneral overview of how things are laid out, with links to wiki pages with details\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"wiki\"\u003eWiki\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eIn-depth explanation of subsystems and modules\u003c/li\u003e\n\u003cli\u003eSeparate pages for each subsystem\u003c/li\u003e\n\u003cli\u003eInclude decisions of their design decisions\u003c/li\u003e\n\u003cli\u003eDiscussions of why systems are not designed differently\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"ui-ux-spec\"\u003eUI/UX Spec\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eHow do we know what the software is \u003cem\u003esupposed\u003c/em\u003e to do?\u003c/li\u003e\n\u003cli\u003eVarying levels of resolution\n\u003cul\u003e\n\u003cli\u003eUser stories\u003c/li\u003e\n\u003cli\u003eAll the way up to granular details of UI elements\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDon\u0026rsquo;t forgot to document defaults!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdocumentation_and_specification/","tags":null,"title":"Documentation and Specification"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdopamine/","tags":null,"title":"dopamine"},{"categories":null,"contents":"The dopamine circuitry in NF1.\nGenetically encoded \u0026ldquo;sensors\u0026rdquo; to measure circuits.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhdopamine/\"\u003edopamine\u003c/a\u003e circuitry in \u003ca href=\"\"\u003eNF1.\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eGenetically encoded \u0026ldquo;sensors\u0026rdquo; to measure circuits.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdopamine_circuitry_in_nf1/","tags":null,"title":"dopamine circuitry in NF1"},{"categories":null,"contents":"There is extreme noise in the patient annotations: for instance, in their health rhythm labels there\u0026rsquo;s around 20% contradictions in the dataset.\naccurate diagnosis limiting \u0026ldquo;domain rule violations\u0026rdquo; approach take your dataset, and validate rules IF validation is successful, train model normally with that sample IF the validation is unsuccessful, then use the output samples as negative examples ","html":"\u003cp\u003eThere is \u003cstrong\u003eextreme noise\u003c/strong\u003e in the patient annotations: for instance, in their health rhythm labels there\u0026rsquo;s around 20% contradictions in the dataset.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eaccurate diagnosis\u003c/li\u003e\n\u003cli\u003elimiting \u0026ldquo;domain rule violations\u0026rdquo;\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"approach\"\u003eapproach\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003etake your dataset, and validate rules\u003c/li\u003e\n\u003cli\u003eIF validation is successful, train model normally with that sample\u003c/li\u003e\n\u003cli\u003eIF the validation is unsuccessful, then use the output samples as negative examples\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdost/","tags":null,"title":"DOST"},{"categories":null,"contents":"The dot product is a property of real vector spaces which is a simplified version of an inner product; specifically, it obviates the need to complex-conjugate anything because, well, \\(\\bar{n} = n, n \\in \\mathbb{R}\\). The dot-product also yield a real number.\nconstituents \\(x, y \\in \\mathbb{R}^{n}\\) (NOTE the realness) where, \\(x = (x_1, \\dots, x_{n})\\) and \\(y = (y_1, \u0026hellip;, y_{n})\\) requirements As we are familiar with, element-wise product and sum\n\\begin{equation} x\\cdot y = x_1y_1 + \\dots + x_{n}y_{n} \\end{equation}\nadditional information properties of the dot product For fixed \\(y \\in \\mathbb{R}^{n}\\), the dot product map that sends \\(x\\) to \\(x \\cdot y\\) is linear (inheriting add. and homo. from algebra) \\(x \\cdot x = 0\\) IFF \\(x =0\\) (no negs allowed (above), so every slot has to have a zero to multiply to 0) \\(x \\cdot x \u0026gt; 0\\) for all \\(x \\in \\mathbb{R}^{n}\\) (neg times neg is pos) \\(x \\cdot y = y \\cdot x\\) for reals; by inheriting from each element\u0026rsquo;s field orthogonality test The dot product is an orthogonality test. If the dot product between the two vectors is \\(0\\), they are definitely orthogonal.\ngeometric interpretation of the dot product Well, we have some shape between two vectors; then, we can first write out the law of cosines. Then, we can see that, for two vectors from the same origin, we can say that the projection of vector \\(\\vec{A}\\) onto \\(\\vec{B}\\) is written as:\n\\begin{equation} |\\vec{A}||\\vec{B}|\\cos \\theta \\end{equation}\nwhere, \\(\\theta\\) is the angle between the two vectors.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e is a property of \u003ca href=\"/posts/kbhvector_space/#vector-space-over-fields\"\u003ereal vector space\u003c/a\u003es which is a simplified version of an \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e; specifically, it obviates the need to complex-conjugate anything because, well, \\(\\bar{n} = n, n \\in \\mathbb{R}\\). The dot-product also yield a real number.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(x, y \\in \\mathbb{R}^{n}\\) (NOTE the realness)\n\u003cul\u003e\n\u003cli\u003ewhere, \\(x = (x_1, \\dots, x_{n})\\) and \\(y = (y_1, \u0026hellip;, y_{n})\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eAs we are familiar with, element-wise product and sum\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\\cdot y = x_1y_1 + \\dots + x_{n}y_{n}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"properties-of-the-dot-product\"\u003eproperties of the dot product\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eFor fixed \\(y \\in \\mathbb{R}^{n}\\), the dot product map that sends \\(x\\) to \\(x \\cdot y\\) is linear (inheriting add. and homo. from algebra)\u003c/li\u003e\n\u003cli\u003e\\(x \\cdot x = 0\\) IFF \\(x =0\\) (no negs allowed (above), so every slot has to have a zero to multiply to 0)\u003c/li\u003e\n\u003cli\u003e\\(x \\cdot x \u0026gt; 0\\) for all \\(x \\in \\mathbb{R}^{n}\\) (neg times neg is pos)\u003c/li\u003e\n\u003cli\u003e\\(x \\cdot y = y \\cdot x\\) for reals; by inheriting from each element\u0026rsquo;s \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"orthogonality-test\"\u003eorthogonality test\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e is an \u003ca href=\"#orthogonality-test\"\u003eorthogonality test\u003c/a\u003e. If the \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e between the two vectors is \\(0\\), they are definitely orthogonal.\u003c/p\u003e\n\u003ch3 id=\"geometric-interpretation-of-the-dot-product--kbhdot-product-dot-md\"\u003egeometric interpretation of the \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eWell, we have some shape between two vectors; then, we can first write out the \u003ca href=\"/posts/kbhlaw_of_cosines/\"\u003elaw of cosines\u003c/a\u003e. Then, we can see that, for two vectors from the same origin, we can say that the projection of vector \\(\\vec{A}\\) onto \\(\\vec{B}\\) is written as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|\\vec{A}||\\vec{B}|\\cos \\theta\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\theta\\) is the angle between the two vectors.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdot_product/","tags":null,"title":"dot product"},{"categories":null,"contents":"One envelope has 10 times the money in the other money.\nWLOG let \\(x\\) be the envelope in Cary\u0026rsquo;s hand. The money in \\(y\\), then, \\(y = \\frac{1}{2}\\qty(\\frac{1}{10}x)+\\frac{1}{2}\\qty (10x) = 0.05x+5x = 5.05x\\). Wat.\nBasically; regardless if Cary took the envelope \\(x\\) or \\(y\\), the other envelope is expected to have \\(5\\times\\) more money. What.\nWhy? There\u0026rsquo;s a bug in this:\n\\begin{equation} y = \\frac{1}{2}\\qty(\\frac{1}{10}x)+\\frac{1}{2}\\qty (10x) \\end{equation}\nis not true! There is a human PRIOR BELIEF!! Its very unlikely that mykel/chris put 10000 dollars into an envelope; so each individual amount in an envelope has an exogenous probability of it happening!\n","html":"\u003cp\u003eOne envelope has 10 times the money in the other money.\u003c/p\u003e\n\u003cp\u003eWLOG let \\(x\\) be the envelope in Cary\u0026rsquo;s hand. The money in \\(y\\), then, \\(y = \\frac{1}{2}\\qty(\\frac{1}{10}x)+\\frac{1}{2}\\qty (10x) = 0.05x+5x = 5.05x\\). Wat.\u003c/p\u003e\n\u003cp\u003eBasically; regardless if Cary took the envelope \\(x\\) or \\(y\\), the \u003cem\u003eother\u003c/em\u003e envelope is expected to have \\(5\\times\\) more money. What.\u003c/p\u003e\n\u003ch2 id=\"why\"\u003eWhy?\u003c/h2\u003e\n\u003cp\u003eThere\u0026rsquo;s a bug in this:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = \\frac{1}{2}\\qty(\\frac{1}{10}x)+\\frac{1}{2}\\qty (10x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis not true! There is a human \u003cstrong\u003e\u003cstrong\u003ePRIOR BELIEF\u003c/strong\u003e\u003c/strong\u003e!! Its very unlikely that mykel/chris put 10000 dollars into an envelope; so each individual amount in an envelope has an exogenous probability of it happening!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdouble_envelope_problem/","tags":null,"title":"Double Envelope Problem"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdouble_progressive_widening/","tags":null,"title":"Double Progressive Widening"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdouble_slit_experiment/","tags":null,"title":"double slit experiment"},{"categories":null,"contents":"A human gene similar to the gene PreTA found in E. Coli, a bacterial found in microbiome. See effects of PreTA on Fluoropyrimidine, and by proxy Capecitabmine for implications on cancer treatment.\n","html":"\u003cp\u003eA human gene similar to the gene \u003ca href=\"\"\u003ePreTA\u003c/a\u003e found in \u003ca href=\"/posts/kbhe_coli/\"\u003eE. Coli\u003c/a\u003e, a bacterial found in \u003ca href=\"\"\u003emicrobiome\u003c/a\u003e. See effects of \u003ca href=\"\"\u003ePreTA\u003c/a\u003e on \u003ca href=\"\"\u003eFluoropyrimidine\u003c/a\u003e, and by proxy \u003ca href=\"/posts/kbhcapecitabmine/\"\u003eCapecitabmine\u003c/a\u003e for implications on cancer treatment.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdpyd/","tags":null,"title":"DPYD"},{"categories":null,"contents":"Gah I have to do this. Not for public consumption. California laws 2022 DL600 R7 2022.\nConsequences Not licensed If unlicensed person is drivnig your car, it maybe impounded for 30 days Hired to drive interstate commercially need to be older than 21, also need to be older than 21 to transport hazardous materials Class C License Driving #knw Two axle vehicle with a GVWL of 26,000 lbs or less Three axle vehicle weighing 6,000 lbs or less House car \u0026lt; 40 feet or less Three wheel motocycles Vanpool vehicle designed to carry between 10 and no more than 15 people Towing #knw Single vehicle of 10,000 or less Vehicle weighing 4000 lbs or more unladen Trailer coach under 10,000 lbs Fifth wheel trailer exceeding 10,000 lbs but under 15,000 lbs, with endorsement Mor ethings Class C drivers can\u0026rsquo;t tow more than one Motor vehile weigning under 4000 lbs cannot tow more than 6000 lbs Getting in trouble Get a traffic ticket and fail to show up to court: suspend driving One at fault collision or one at fault traffic violation: may take action? Two of either at fault collision or violation conviction: no driving for 30 days unless accompanied by 25 year old adult Three of \u0026ldquo;\u0026rdquo;: no driving for 6 months, on probation for a year. Drugs or alcohol between 13-21: suspension for a year Minor driving Not sure if this applies\nPractice for 50 hours, 10 hours at night #knw\nPass knowledge test\nPass driving test\nCannot drive between 11P and 5A during the first year #knw\nCannot drive with under 20 Y/O unless 25 Y/O licensed accompanied #knw\nUnless\u0026mdash;\nMedical need with doctor\u0026rsquo;s note and end date School and dean\u0026rsquo;s note Work and employer\u0026rsquo;s note and employment status Family need and parent\u0026rsquo;s note Minors can\u0026rsquo;t use a phone while driving.\nSafe car #knw Working driver\u0026rsquo;s window, brake lights, horn, parking brake, turn signals Safe tire (1/32 inch tread) Full windshield Two rear view mirrors, incl. one on left side Working seatbelts Check: clean windows and mirrors, adjust seat and mirrors, check tires.\nSafe personage Vision Hearing Not tired Not medicated Health: no Lapses of conciseness AD \u0026ldquo;related disorders\u0026rdquo; \u0026mdash; anything the doctor reports to DMV Steering Hand to Hand hands 9/3 or 8/4 oclock Push and pull, hands stay where they are Hand over hand Start 9/3 or 8/4 Turn, but leave wheel sliding under Sliding under hand reach over, pull the wheel up One-hand Turning or backing up to turn back Hand at 12 oclock Limeted use Signaling Arm signals when lights are hard to see because of bright sun\nMotorcyclists use these signals, and bikers point their hand straight up to turning direction\nWhen to signal #knw Signal when: turn, change lanes, slow down, stop.\n100 feet before turning Before every lane change: look over and check blind spot 5 seconds before lane change on highway Pulling next to or away curb Signal even if no cars around you Horning \u0026ldquo;It is safer to slow down or stop instead of honking your horn.\u0026rdquo;\nWhen to horn #knw Avoid collisions Alert hazard Alert oncoming traffic on narrow mountain roads when you cannot see at least 200 feet in front of vehicle Don\u0026rsquo;t use horn to move people along, or \u0026ldquo;express anger.\u0026rdquo; The more ya know.\nHeadlights They are bright.\nWhen to headlight #knw When its too dark to see: if you can\u0026rsquo;t see a person 1000 feet away Beginning 30 minutes after sunset until 30 minutes before sunrise Adverse weather: windshield wipers on = low-beam headlights on Clouds dust smoke or fog prevent seeing other cars On sunny days on country or mountain roads When a white regulatory sign says so To help others see your car, when sun is low on horizon When not to high-beam headlight Dim when 500 feet of car coming towards you or 300 feet of a car you are following Emergency flashers If you can see a collision ahead, do:\nTurn on flashers #knw Lightly tap brake pedal three/four times Use hand signals How to stop in a middle of the road during an emergency #knw Start breaking early.\nGive drivers warning\nTurn on emergency flashers if you earn\u0026rsquo;t moving, or use turn signals\nPull off the road\nStop not on the road or, if isn\u0026rsquo;t possible, stop where people can see\nDon\u0026rsquo;t stop just over a hill\nLift the hook to signal an emergency\nPlace emergency triangles 200-300 feet behind vehicle; use flares if needed but be careful b/c they may cause fire\nCall for roadside assistance\n63, 92\nLanes! Reading \u0026rsquo;em Yellow: different directions Single yellow is the center of the road; cannot cross into oncoming traffic Double solid yellow line: not to be crossed \u0026hellip;except hov entrace lane which has a left entrance Instructed to cross because the road is blocked Entering or exiting a driveway, private road, or making a u-turn 2 double yellow line groups spaced 2 feet or more apart are considered a barrier; under no circumstance is to cross Broken yellow line: you may pass if the broken line is next to you White: same directions Single solid white line: traffic lanes in the same direction Double solid white lines: not to be crossed, regular use vs. preferential use lanes (carpool, etc.) Broken white lines: separate roads with two or more lines in the same direction White triangles: yield lines A line where you should yield. Triangles point to the direction of oncoming traffic (\u0026ldquo;towards you.\u0026rdquo;).\nChoosing \u0026rsquo;em Leftmost lane is lane 1, rightmost is lane n\nUse the left lane to pass or turn left Use the right lane to enter or exit traffic Change lanes when Moving from one lane to another Entering freeway Exiting freeway Entering the road from curb or shoulder Protocol for lane change #knw Signal Look in all mirrors Check traffic beside and behind you Look over solder in direction of desired lane change Check blind spots for other vehicles, motorcyclists, and bicycilsts Ensure room Tips stay in one lane don\u0026rsquo;t weave if you start a change, finish it Types of them Lane closest to the center divider is the \u0026ldquo;passing lane\u0026rdquo; HOV lanes is for high occupancy Center left turn lanes The center of some two-way streets has a left turn lane; marked on both sides by two painted lines. Inner line is broken and outer line is solid.\nYou may only drive 200 feet in the center left turn lane #knw\nProtocol for using this lane\nLook for other vehicles coming towards you in the center left turn lane Signal Look over shoulder Merge completely into the center left turn lane Turn when its safe. Turnouts Areas or lanes for turning that are marked? Use when:\nDriving slowly on a two-lane road where passing is unsafe, AND There are 5 or more vehicles following #knw Bike lanes Bike lanes\nBuffered bike lanes: uses chevrons or diagonals to buffer the bikes\nBike route: shared road markings to designate a preferred route\nBike boulevard: bike travel on streets with cars\nSeperated bikeways: completely different\nBikes share the road \u0026ldquo;sharrows!\u0026rdquo;\nCannot drive in bike lane unless\u0026hellip;.\nParking Entering or leaving road Turning (within 200 feet of intersection) Turning Right Drive close to the edge Drive in a bike lane, wait until about 200 feet to make turn #knw Watch for everybody Signal about 100 feet before #knw Look over sholder Stop behind limit line (/before entering crosswalk or intersection) Look both ways and turn when its safe; don\u0026rsquo;t turn into another lane Complete turn Details:\nCan\u0026rsquo;t turn when red arrow, but you can turn against red light You could cross a bus lane to make a right turn, but you can\u0026rsquo;t drive in it There could be designated right turn lanes which let you make a \u0026ldquo;free right turn\u0026rdquo; Left Drive close to the center divider or left turn lane Signal about 100 feed Look over sholder Stop behind limit line (/before entering crosswalk or intersection) Look left, right, then left Turn Details\nonly can turn against light when single-lane-to-single-lane U Conditions Across double-yellow line In a residential district No cars for 200 feet Whenever a sign or light protects against approachng cars At an intersection On a divided driveway, if opening provided Anticonditions WHen \u0026ldquo;no u-turn\u0026rdquo; is posted\nAt a railroad crossing\nOn a divided highway if needed to cross things\nCannot see 200 feet in each direction\nWhen other cars may hit you\nOn a one-way street\nIn front of a fire station\nooo. scary\nIn business districts, including churches apartments and buildings (except for schools); turn only at an intersection or opening if allowed. Merging Highways Enter at or near traffic speed Merge onto highway when safe to do so, don\u0026rsquo;t stop unless needed Merge into a space large enough for your car to join the lane Use mirrors and turn signals Watch for cars Leave three seconds of space (\u0026ldquo;three second rule\u0026rdquo;) between you and the car in front of you Exiting Know the exist Signal, look over sholder, etc. Change lanes Signal intention for 5 seconds Leave Space for entering You will need about a half a block on city streets Or, a full block on the highway Passing If anybody wants to pass, let them pass\nSpace for passing Don\u0026rsquo;t pass if\u0026hellip;\nYou are approaching a hill and cannot see oncoming traffic Within 100 feet of an intersection #knw At crossroads or driveways Condition of Passing You pass on the left, unless\u0026hellip;\nOpen highway with two or more lanes going in your direction Driver ahead of you is turning left, and you don\u0026rsquo;t have to drive off the road to pass You are on a one-way street Never drive off the road to pass.\nProtocol for passing Signal Shoulder Turn Speed up and pass Retturn Parking Find a space #knw three feet longer that your vehicle Turn on turn signal Pull up alongside the vehicle in front; leave about two feet between you and the car to your right. Stop when you rear bumper is aligned with the front of the space Check rearview mirror, look over sholder, keep foot on break and reverse Back up, 45% When rear view is within 18 inches from the curb, straighten out Set parking break. Leave when safe. Parking on a hill \u0026ldquo;Your car may roll when you breaks fail.\u0026rdquo;\nDownhill: wheels towards the curb Uphill: wheels away from curb No curb: turn towards the sholder of the road \u0026ldquo;towards the sholder, except when uphill with curb\u0026rdquo;\nColors White curb: stop for picking up or dropping off passengers or mails Green curb: park for limited time Yellow: load and unload, staying in the vehicle Red: no stopping Blue: disabled\u0026mdash;fine of $1,000, 6 months in county jail #knw Can\u0026rsquo;t park when No marking Unmarked or marked crosswalk Sidewalk, partially blocking sidewalk, or in front of driveway Within 3 feet of disabled sidewalk ramp #knw On diagnal lines next to disabled space Within 15 feet of a fire hydrant #knw Double parking On the wrong side of the street or freeway, except: 1) emergency 2) law enforcement officer 3) specificaly permitted stop.\nTo stop and park then, park off the pavement, stay with the car and lock the doors until help arrives; visibility is 200 feet in each direction required. #knw\nLights Flashing red: stop sign\u0026ndash;stop and go when its safe Flashing yellow: yield sign\u0026mdash;proceed with caution Flashing yellow arrow: unprotected turn Broken traffic lights become a four way stop sign.\nSigns Stop sign is stop; there should be a limit line; if no limit line, stop before intesection Yield sign is to yield; slow down Right of Way Without stop/yield signs Whomever gets to the intersection first has right of way T intersection without stop/yield signs The through road have right of way Stop signs Stop first, then follow right of way rules as if no intersection Turning left Right of way to anyone approaching that\u0026rsquo;s \u0026ldquo;close enough to be dangerous\u0026rdquo; Turning right Check for pedestrians crossing the street, and bikes and motors next to you Green light Pedestrians Divided highways Vehicles coming in the lane you are about to enter Entering traffic The traffic you are entering Roundabouts The logistics of using a roundabout\nSlow down Yield to traffic Watch for signs Travel in counter-clockwise direction, don\u0026rsquo;t stop or pass Signal when you change lanes or exit If you miss your exit, try again Choosing lane Rightmost for turning right Either lane (\u0026ldquo;middle\u0026rdquo;, if exists) for straight Innermost for left turn or u turn Pedestrians Pedestrians have right-of-way Pedestrian crossing need to cross first, you yield or slow to them Which means\u0026hellip;\nDo not pass a stopped vehicle Don\u0026rsquo;t drive on a sidewalk except to cross it or enter/exit it Don\u0026rsquo;t stop in a crosswalk If people make eyecontact, they are crossing the street Obey pedestrian\u0026rsquo;s signs Watch for seniors, people with disabilities, young children.\nCrosswalks Crosswalks are marked (but not all) School crossings have yellow lines Pedestrians have right of wall in all crosswalks Flashing light crosswalks exists to, just be prepared to stop regardless Blind White canes and guide dogs have absolute right of way Stop at all stop walks Don\u0026rsquo;t stop in the middle of stop walk Don\u0026rsquo;t give verbal directions to blind pedestrian Don\u0026rsquo;t turn right w/o looking for pedestrians Don\u0026rsquo;t honk at a blind person Don\u0026rsquo;t block sidewalk Pulling in cane + stepping away: you may go Mountain roads Uphill car has right of way Downhill car has more control backing up the hill Roadsharing Large cars Average passenter car at 55mph has 400 feet before stopping Large car takes 800 feet Don\u0026rsquo;t move in front of a large car and suddenly stop.\nLook at turn signals: large vehicles may swing their back, say, left in order to turn right.\nDon\u0026rsquo;t\nChange lanes in front of them to reach an exit or turn (tight spaces around large vehicles is dangerous) Drive next to them (unless passing); after you pass, move ahead of it Follow too closely: that\u0026rsquo;s tailgating. Give more space Underestimate the size and speed of the vehicle \u0026ldquo;If you can\u0026rsquo;t see a truck\u0026rsquo;s side mirrors, it can\u0026rsquo;t see you.\u0026rdquo;\nalways pass it on the left side\nBuses and rails when loading is happening without a safety zone, stop behind the nearest door Stopped busses can only be passed at 10mph Don\u0026rsquo;t pass on the left side, unless\u0026hellip; you are on a one-way street tracks are so close to the right you can\u0026rsquo;t pass on the right traffic officer directs you to Never turn in front of a light rail vehicle Check for traffic lights (light rails can interrupt them) Motocycles 4 second following distance Given a motocycle a full lane; its legal to share but its unsafe Don\u0026rsquo;t try to pass a motorcycle in the same lane When possible, move to one side of your lane Check for motocyclists Emergency vehicles Give them right of way: drive to the edge until they\u0026rsquo;ve passed \u0026hellip;except in intersections: never stop in an intersection (continue through and stop) Obey loudspeaker orders Illegal to follow 300 feet of any emergency vehicles with flashing siren Slow cars Slow down for them NEV LSV Like gold carts\nThey have max speed 25mph They can\u0026rsquo;t drive in roads with speed limit larger than 35 mph Bikes Front lamp with white light visible for 300 feet Rear red reflector (visible from 500 feet) White or yellow reflector on each pedal (visible for 200 feet) Travel lanes Must ride to the curb if slow, unless\nPassing in the same direction Preparing to turn left Avoiding a hazard/road condition Approaching right turn On a one way road with two or more lanes (if so, bikers may right next to left curb) Passing bikers 3 feet clearance\nSchool buses Yellow lights flashing is to slow Red lights flashing is to stop If you fail to stop, you can be fined up to $1,000 and driving maybe suspended for a year Workzone fines Traffic violations have fines of $1,000 or more Assulting a worker has a fine of $2,000 plus imprisonment for up to on year Some regions are double-fine zones Speed Limit \u0026ldquo;Basic speed law\u0026rdquo;: you may never drive faster than its safe.\n10mph to pas a roadcar\n15 mph in blind intersections (cannot see 100 in both directions when within 100 feet)\nif your view is blocked in a blind intersection, inch forward until you can see 15 mph also in some school, alleys (roads no wider 25 feet), 100 feet of railroad tracks if visiblity less then 400 feet\n25 mph when you are 500-1000 feet of a school, when crossing the street, residential\n55 mph on two lane undivided highway\nYou cannot block traffic flow\nDrive far-right lane of you are towing\nRailroad Look in both directions Except train anytime Don\u0026rsquo;t stop in traintracks Watch for other cars Stop between 15-50 feet from the neearest tracks Fines and Stuff Smoking with a minor: $100\nDumping animals: $1,000, six months in jail\nEvading law enforcement:\nstate prison up to 7 years, county jail for 1 year Fine between $2,000 and $10,000 Or both Evading law enforcement and commiting manslauter\nImprisonment for 4-10 years Speed content and reckless driving: fine and imprsionment\nTexting\nWear earplugs in bot hyears\nCarry anything that extends beyond the fenders on the left side, or more then 6 inches on the right side\nCargo more the 4 feet must display a 1 feet red or flourencesnt flag\nTransport animals unless secured\nAllow a person to be in a back of a pickup truck unless secured\nDrive a car with a video monitor except when it doesn\u0026rsquo;t face driver\nThrow a cig from the car\nCut signs that block the windshiled\nDon\u0026rsquo;t hang objects on the mirror\nDon\u0026rsquo;t sticker, unless\n7 inch square on lower corner of passengers or rear window 5 inch square on the lower corner of the driver window Side windows behind driver 5 inch located in the center uppermost portion Funeral pocessions have right of way\nPoints 36 month record Suspension when: 4 points in 12 months, 6 in 24, or 8 in 36 Once 18 months to earn back points via traffic school Best Practices Scan road 10-15 seconds ahead of you Don\u0026rsquo;t stare Don\u0026rsquo;t tailgate: 3 seconds between you and the car ahead passes Allow extra space when\u0026hellip; If you have a tailgator, (and move! if you can) The driver behind you wants to pass Slippery Following on icy or wet Towing a trailer Followiing a car that blocks you ahead Merging onto freeway Following Don\u0026rsquo;t stay in the blind spot Don\u0026rsquo;t driving alongside cars Make space when possible Keep space between you and parked cars Be careful when nearing motorcyclists and bicyclists At intersections Look both ways Look left first (vehicles coming from the left are closer) Look right Take one more look to the left 5-10mph on wet road, reduce speed by half on snow, tiny very slow on ice Don\u0026rsquo;t use breaks if starting to hydroplone If you can\u0026rsquo;t see farther than 100 feet, its unsafe to drive faster than 30mph Seat belts Click it or ticket Under 16 years old, you may also get ticket Child safety Under 2 years old: secure in a real facing child restraight system (unless child weighs more than 40 pounds or is more that 3 ft 4 inches taller)\nChilden under 8 years old, less than 4 feet 9 inches tall: secure in a front-facing restraight system\nCould use front seat if there\u0026rsquo;s no rear seat or if they are side facing jump seat\n8 years old or older, or 4 feet 9 inches tall: use seat belts\n6 y/o or younger unattended illegal to leave in car; supervision could be 12 year old.\nHot vehicle can kill\nEmergencies Skids Slippery surface Slowly remove foot from gas pedal Don\u0026rsquo;t use breaks Turn the steering wheel in the direction of the skid If your breaches get wet, dry them by pressing gas and brake at the same time.\nLock wheel Breaking too hard when going to fast: skid no matter steering wheel\nRemove foot from break Straighten front wheel If ABS not working, step on brake gradually until safe speed. If the brake petal sinks to the floor, bump the brakes.\nDriving off pavement Grip wheel slowly Remove your foot from gas Brake gently Check for traffic Steer back Accelerator mallfunction Shift to neutral Apply breakes Look for traffic Honk horn and emergency flashers Drive car off the road Turn of ignition Collision If collision causes more than $1000 in property damage, you msut report to DMV Driving is suspended for 4 years of no insurance Disabled Vehicle Safely pull over Exit on the right side Find assistance Return no vehicle Stay inside with your seat belt Uuse flashers Railroad If a train is coming, get out and run in a 45 degree away from the train and tracks. Dial 911 If train not coming, exit vehicle, dial emergency number on the railroad crossing box, and then call 911 DUI Don\u0026rsquo;t drink and drive Don\u0026rsquo;t take drugs Use any combination of drugs Illegal to drink alcohol or smoke or eat cannabis products while in a car, whether self or passenger. If you are carrying it, it must be full and unopened. If its open, keep it in the trunk.\nLimits 0.08% over 21 0.01% under 21 0.01% under DUI probation 0.04% if commercial 0.04% if driving for hire DUI Arrests Hold license for 30 days Hearing from 10 days DUI Convictions Completion of DUI program Install Ignition Interlock Device 6 months in jail $390-$1000 May inpound vehicle Carrying under 21 May not carry unless someone older Fine up to $1000 and impound for 30 days, suspencion for 1 year 0.01% or higher you have to complete program, 0.05% suspension ","html":"\u003cp\u003eGah I have to do this. Not for public consumption. California laws 2022 DL600 R7 2022.\u003c/p\u003e\n\u003ch2 id=\"consequences\"\u003eConsequences\u003c/h2\u003e\n\u003ch3 id=\"not-licensed\"\u003eNot licensed\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eIf unlicensed person is drivnig your car, it maybe impounded for 30 days\u003c/li\u003e\n\u003cli\u003eHired to drive interstate commercially need to be older than 21, also need to be older than 21 to transport hazardous materials\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"class-c-license\"\u003eClass C License\u003c/h3\u003e\n\u003ch4 id=\"driving-knw\"\u003eDriving #knw\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eTwo axle vehicle with a GVWL of 26,000 lbs or less\u003c/li\u003e\n\u003cli\u003eThree axle vehicle weighing 6,000 lbs or less\u003c/li\u003e\n\u003cli\u003eHouse car \u0026lt; 40 feet or less\u003c/li\u003e\n\u003cli\u003eThree wheel motocycles\u003c/li\u003e\n\u003cli\u003eVanpool vehicle designed to carry between 10 and no more than 15 people\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"towing-knw\"\u003eTowing #knw\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eSingle vehicle of 10,000 or less\u003c/li\u003e\n\u003cli\u003eVehicle weighing 4000 lbs or more unladen\n\u003cul\u003e\n\u003cli\u003eTrailer coach under 10,000 lbs\u003c/li\u003e\n\u003cli\u003eFifth wheel trailer exceeding 10,000 lbs but under 15,000 lbs, with endorsement\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"mor-ethings\"\u003eMor ethings\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eClass C drivers can\u0026rsquo;t tow more than one\u003c/li\u003e\n\u003cli\u003eMotor vehile weigning under 4000 lbs cannot tow more than 6000 lbs\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"getting-in-trouble\"\u003eGetting in trouble\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eGet a traffic ticket and fail to show up to court: suspend driving\u003c/li\u003e\n\u003cli\u003eOne at fault collision or one at fault traffic violation: may take action?\u003c/li\u003e\n\u003cli\u003eTwo of either at fault collision or violation conviction: no driving for 30 days unless accompanied by 25 year old adult\u003c/li\u003e\n\u003cli\u003eThree of \u0026ldquo;\u0026rdquo;: no driving for 6 months, on probation for a year.\u003c/li\u003e\n\u003cli\u003eDrugs or alcohol between 13-21: suspension for a year\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"minor-driving\"\u003eMinor driving\u003c/h2\u003e\n\u003cp\u003eNot sure if this applies\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ePractice for 50 hours, 10 hours at night #knw\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ePass knowledge test\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ePass driving test\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCannot drive between 11P and 5A during the first year #knw\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCannot drive with under 20 Y/O unless 25 Y/O licensed accompanied #knw\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eUnless\u0026mdash;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eMedical need with doctor\u0026rsquo;s note and end date\u003c/li\u003e\n\u003cli\u003eSchool and dean\u0026rsquo;s note\u003c/li\u003e\n\u003cli\u003eWork and employer\u0026rsquo;s note and employment status\u003c/li\u003e\n\u003cli\u003eFamily need and parent\u0026rsquo;s note\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eMinors can\u0026rsquo;t use a phone while driving.\u003c/p\u003e\n\u003ch2 id=\"safe-car-knw\"\u003eSafe car #knw\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWorking driver\u0026rsquo;s window, brake lights, horn, parking brake, turn signals\u003c/li\u003e\n\u003cli\u003eSafe tire (1/32 inch tread)\u003c/li\u003e\n\u003cli\u003eFull windshield\u003c/li\u003e\n\u003cli\u003eTwo rear view mirrors, incl. one on left side\u003c/li\u003e\n\u003cli\u003eWorking seatbelts\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eCheck:\u003c/strong\u003e\u003c/strong\u003e clean windows and mirrors, adjust seat and mirrors, check tires.\u003c/p\u003e\n\u003ch2 id=\"safe-personage\"\u003eSafe personage\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eVision\u003c/li\u003e\n\u003cli\u003eHearing\u003c/li\u003e\n\u003cli\u003eNot tired\u003c/li\u003e\n\u003cli\u003eNot medicated\u003c/li\u003e\n\u003cli\u003eHealth: no\n\u003cul\u003e\n\u003cli\u003eLapses of conciseness\u003c/li\u003e\n\u003cli\u003eAD\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;related disorders\u0026rdquo; \u0026mdash; anything the doctor reports to DMV\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"steering\"\u003eSteering\u003c/h2\u003e\n\u003ch3 id=\"hand-to-hand\"\u003eHand to Hand\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ehands 9/3 or 8/4 oclock\u003c/li\u003e\n\u003cli\u003ePush and pull, hands stay where they are\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"hand-over-hand\"\u003eHand over hand\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eStart 9/3 or 8/4\u003c/li\u003e\n\u003cli\u003eTurn, but leave wheel sliding under\u003c/li\u003e\n\u003cli\u003eSliding under hand reach over, pull the wheel up\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"one-hand\"\u003eOne-hand\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eTurning or backing up to turn back\u003c/li\u003e\n\u003cli\u003eHand at 12 oclock\u003c/li\u003e\n\u003cli\u003eLimeted use\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"signaling\"\u003eSignaling\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eArm signals when lights are hard to see because of bright sun\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-19_15-43-18_screenshot.png\"\u003e\n \u003c/figure\u003e\n\n\u003cp\u003eMotorcyclists use these signals, and bikers point their hand straight up to turning direction\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"when-to-signal-knw\"\u003eWhen to signal #knw\u003c/h3\u003e\n\u003cp\u003eSignal when: turn, change lanes, slow down, stop.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e100 feet before turning\u003c/li\u003e\n\u003cli\u003eBefore every lane change: look over and check blind spot\u003c/li\u003e\n\u003cli\u003e5 seconds before lane change on highway\u003c/li\u003e\n\u003cli\u003ePulling next to or away curb\u003c/li\u003e\n\u003cli\u003eSignal even if no cars around you\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"horning\"\u003eHorning\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;It is safer to slow down or stop instead of honking your horn.\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"when-to-horn-knw\"\u003eWhen to horn #knw\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eAvoid collisions\u003c/li\u003e\n\u003cli\u003eAlert hazard\u003c/li\u003e\n\u003cli\u003eAlert oncoming traffic on narrow mountain roads when you cannot see at least 200 feet in front of vehicle\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDon\u0026rsquo;t use horn to move people along, or \u0026ldquo;express anger.\u0026rdquo; The more ya know.\u003c/p\u003e\n\u003ch2 id=\"headlights\"\u003eHeadlights\u003c/h2\u003e\n\u003cp\u003eThey are bright.\u003c/p\u003e\n\u003ch3 id=\"when-to-headlight-knw\"\u003eWhen to headlight #knw\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eWhen its \u003cstrong\u003e\u003cstrong\u003etoo dark to see\u003c/strong\u003e\u003c/strong\u003e: if you can\u0026rsquo;t see a person 1000 feet away\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eBeginning 30 minutes after sunset until 30 minutes before sunrise\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eAdverse weather\u003c/strong\u003e\u003c/strong\u003e: windshield wipers on = low-beam headlights on\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eClouds dust smoke\u003c/strong\u003e\u003c/strong\u003e or fog prevent seeing other cars\u003c/li\u003e\n\u003cli\u003eOn sunny days \u003cstrong\u003e\u003cstrong\u003eon country or mountain roads\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eWhen a \u003cstrong\u003e\u003cstrong\u003ewhite regulatory sign says so\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eTo help others see your car, when \u003cstrong\u003e\u003cstrong\u003esun is low on horizon\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"when-not-to-high-beam-headlight\"\u003eWhen not to high-beam headlight\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eDim when 500 feet of car coming towards you or 300 feet of a car you are following\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"emergency-flashers\"\u003eEmergency flashers\u003c/h2\u003e\n\u003cp\u003eIf you can see a collision ahead, do:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eTurn on flashers #knw\u003c/li\u003e\n\u003cli\u003eLightly tap brake pedal three/four times\u003c/li\u003e\n\u003cli\u003eUse hand signals\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"how-to-stop-in-a-middle-of-the-road-during-an-emergency-knw\"\u003eHow to stop in a middle of the road during an emergency #knw\u003c/h2\u003e\n\u003cp\u003eStart breaking early.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eGive drivers warning\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eTurn on emergency flashers if you earn\u0026rsquo;t moving, or use turn signals\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ePull off the road\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eStop not on the road or, if isn\u0026rsquo;t possible, stop where people can see\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDon\u0026rsquo;t stop just over a hill\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eLift the hook to signal an emergency\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ePlace emergency triangles 200-300 feet behind vehicle; use flares if needed but be careful b/c they may cause fire\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCall for roadside assistance\u003c/p\u003e\n\u003cp\u003e63, 92\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"lanes\"\u003eLanes!\u003c/h2\u003e\n\u003ch3 id=\"reading-em\"\u003eReading \u0026rsquo;em\u003c/h3\u003e\n\u003ch4 id=\"yellow-different-directions\"\u003eYellow: different directions\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eSingle yellow is the center of the road; cannot cross into oncoming traffic\u003c/li\u003e\n\u003cli\u003eDouble solid yellow line: not to be crossed\n\u003cul\u003e\n\u003cli\u003e\u0026hellip;except\n\u003cul\u003e\n\u003cli\u003ehov entrace lane which has a left entrance\u003c/li\u003e\n\u003cli\u003eInstructed to cross because the road is blocked\u003c/li\u003e\n\u003cli\u003eEntering or exiting a driveway, private road, or making a u-turn\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e2 double yellow line groups spaced 2 feet or more apart are considered a barrier; under no circumstance is to cross\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eBroken yellow line: you may pass if the broken line is next to you\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"white-same-directions\"\u003eWhite: same directions\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eSingle solid white line: traffic lanes in the same direction\u003c/li\u003e\n\u003cli\u003eDouble solid white lines: not to be crossed, regular use vs. preferential use lanes (carpool, etc.)\u003c/li\u003e\n\u003cli\u003eBroken white lines: separate roads with two or more lines in the same direction\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"white-triangles-yield-lines\"\u003eWhite triangles: yield lines\u003c/h4\u003e\n\u003cp\u003eA line where you should yield. Triangles point to the direction of oncoming traffic (\u0026ldquo;towards you.\u0026rdquo;).\u003c/p\u003e\n\u003ch3 id=\"choosing-em\"\u003eChoosing \u0026rsquo;em\u003c/h3\u003e\n\u003cp\u003eLeftmost lane is lane 1, rightmost is lane n\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eUse the left lane to pass or turn left\u003c/li\u003e\n\u003cli\u003eUse the right lane to enter or exit traffic\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"change-lanes-when\"\u003eChange lanes when\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eMoving from one lane to another\u003c/li\u003e\n\u003cli\u003eEntering freeway\u003c/li\u003e\n\u003cli\u003eExiting freeway\u003c/li\u003e\n\u003cli\u003eEntering the road from curb or shoulder\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"protocol-for-lane-change-knw\"\u003eProtocol for lane change #knw\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eSignal\u003c/li\u003e\n\u003cli\u003eLook in all mirrors\u003c/li\u003e\n\u003cli\u003eCheck traffic beside and behind you\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eLook over solder in direction of desired lane change\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eCheck blind spots for other vehicles, motorcyclists, and bicycilsts\u003c/li\u003e\n\u003cli\u003eEnsure room\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"tips\"\u003eTips\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003estay in one lane\u003c/li\u003e\n\u003cli\u003edon\u0026rsquo;t weave\u003c/li\u003e\n\u003cli\u003eif you start a change, finish it\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"types-of-them\"\u003eTypes of them\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eLane closest to the center divider is the \u0026ldquo;passing lane\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eHOV lanes is for high occupancy\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"center-left-turn-lanes\"\u003eCenter left turn lanes\u003c/h4\u003e\n\u003cp\u003eThe center of some two-way streets has a left turn lane; marked on both sides by two painted lines. Inner line is broken and outer line is solid.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-21_23-52-00_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eYou may only drive 200 feet in the center left turn lane #knw\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eProtocol for using this lane\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eLook for other vehicles coming towards you in the center left turn lane\u003c/li\u003e\n\u003cli\u003eSignal\u003c/li\u003e\n\u003cli\u003eLook over shoulder\u003c/li\u003e\n\u003cli\u003eMerge completely into the center left turn lane\u003c/li\u003e\n\u003cli\u003eTurn when its safe.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"turnouts\"\u003eTurnouts\u003c/h4\u003e\n\u003cp\u003eAreas or lanes for turning that are marked? Use when:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eDriving slowly on a two-lane road where passing is unsafe, AND\u003c/li\u003e\n\u003cli\u003eThere are 5 or more vehicles following #knw\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"bike-lanes\"\u003eBike lanes\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eBike lanes\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eBuffered bike lanes: uses chevrons or diagonals to buffer the bikes\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-21_23-55-41_screenshot.png\"\u003e\n \u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eBike route: shared road markings to designate a preferred route\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eBike boulevard: bike travel on streets with cars\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSeperated bikeways: completely different\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eBikes share the road \u0026ldquo;sharrows!\u0026rdquo;\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-21_23-57-32_screenshot.png\"\u003e\n \u003c/figure\u003e\n\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eCannot drive in bike lane unless\u0026hellip;.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eParking\u003c/li\u003e\n\u003cli\u003eEntering or leaving road\u003c/li\u003e\n\u003cli\u003eTurning (within 200 feet of intersection)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"turning\"\u003eTurning\u003c/h2\u003e\n\u003ch3 id=\"right\"\u003eRight\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eDrive close to the edge\u003c/li\u003e\n\u003cli\u003eDrive in a bike lane, wait until about 200 feet to make turn #knw\u003c/li\u003e\n\u003cli\u003eWatch for everybody\u003c/li\u003e\n\u003cli\u003eSignal about 100 feet before #knw\u003c/li\u003e\n\u003cli\u003eLook over sholder\u003c/li\u003e\n\u003cli\u003eStop behind limit line (/before entering crosswalk or intersection)\u003c/li\u003e\n\u003cli\u003eLook both ways and turn when its safe; don\u0026rsquo;t turn into another lane\u003c/li\u003e\n\u003cli\u003eComplete turn\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDetails:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eCan\u0026rsquo;t turn when red arrow, but you can turn against red light\u003c/li\u003e\n\u003cli\u003eYou could cross a bus lane to make a right turn, but you can\u0026rsquo;t drive in it\u003c/li\u003e\n\u003cli\u003eThere could be designated right turn lanes which let you make a \u0026ldquo;free right turn\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"left\"\u003eLeft\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eDrive close to the center divider or left turn lane\u003c/li\u003e\n\u003cli\u003eSignal about 100 feed\u003c/li\u003e\n\u003cli\u003eLook over sholder\u003c/li\u003e\n\u003cli\u003eStop behind limit line (/before entering crosswalk or intersection)\u003c/li\u003e\n\u003cli\u003eLook left, right, then left\u003c/li\u003e\n\u003cli\u003eTurn\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDetails\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eonly can turn against light when single-lane-to-single-lane\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"u\"\u003eU\u003c/h3\u003e\n\u003ch4 id=\"conditions\"\u003eConditions\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eAcross double-yellow line\u003c/li\u003e\n\u003cli\u003eIn a residential district\n\u003cul\u003e\n\u003cli\u003eNo cars for 200 feet\u003c/li\u003e\n\u003cli\u003eWhenever a sign or light protects against approachng cars\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eAt an intersection\u003c/li\u003e\n\u003cli\u003eOn a divided driveway, if opening provided\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"anticonditions\"\u003eAnticonditions\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eWHen \u0026ldquo;no u-turn\u0026rdquo; is posted\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAt a railroad crossing\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eOn a divided highway if needed to cross things\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCannot see 200 feet in each direction\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eWhen other cars may hit you\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eOn a one-way street\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eIn front of a fire station\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-22_00-10-35_screenshot.png\"\u003e\n \u003c/figure\u003e\n\n\u003cp\u003eooo. scary\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eIn business districts, including churches apartments and buildings (except for schools); turn only at an intersection or opening if allowed.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"merging\"\u003eMerging\u003c/h2\u003e\n\u003ch3 id=\"highways\"\u003eHighways\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eEnter at or near traffic speed\u003c/li\u003e\n\u003cli\u003eMerge onto highway when safe to do so, don\u0026rsquo;t stop unless needed\u003c/li\u003e\n\u003cli\u003eMerge into a space large enough for your car to join the lane\u003c/li\u003e\n\u003cli\u003eUse mirrors and turn signals\u003c/li\u003e\n\u003cli\u003eWatch for cars\u003c/li\u003e\n\u003cli\u003eLeave \u003cstrong\u003e\u003cstrong\u003ethree seconds of space\u003c/strong\u003e\u003c/strong\u003e (\u0026ldquo;three second rule\u0026rdquo;) between you and the car in front of you\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"exiting\"\u003eExiting\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eKnow the exist\u003c/li\u003e\n\u003cli\u003eSignal, look over sholder, etc.\u003c/li\u003e\n\u003cli\u003eChange lanes\u003c/li\u003e\n\u003cli\u003eSignal intention for 5 seconds\u003c/li\u003e\n\u003cli\u003eLeave\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"space-for-entering\"\u003eSpace for entering\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eYou will need about a half a block on city streets\u003c/li\u003e\n\u003cli\u003eOr, a full block on the highway\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"passing\"\u003ePassing\u003c/h2\u003e\n\u003cp\u003eIf anybody wants to pass, let them pass\u003c/p\u003e\n\u003ch3 id=\"space-for-passing\"\u003eSpace for passing\u003c/h3\u003e\n\u003cp\u003eDon\u0026rsquo;t pass if\u0026hellip;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eYou are approaching a hill and cannot see oncoming traffic\u003c/li\u003e\n\u003cli\u003eWithin 100 feet of an intersection #knw\u003c/li\u003e\n\u003cli\u003eAt crossroads or driveways\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"condition-of-passing\"\u003eCondition of Passing\u003c/h3\u003e\n\u003cp\u003eYou pass on the left, unless\u0026hellip;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eOpen highway with two or more lanes going in your direction\u003c/li\u003e\n\u003cli\u003eDriver ahead of you is turning left, and you don\u0026rsquo;t have to drive off the road to pass\u003c/li\u003e\n\u003cli\u003eYou are on a one-way street\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eNever drive off the road to pass.\u003c/p\u003e\n\u003ch3 id=\"protocol-for-passing\"\u003eProtocol for passing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eSignal\u003c/li\u003e\n\u003cli\u003eShoulder\u003c/li\u003e\n\u003cli\u003eTurn\u003c/li\u003e\n\u003cli\u003eSpeed up and pass\u003c/li\u003e\n\u003cli\u003eRetturn\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"parking\"\u003eParking\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eFind a space #knw three feet longer that your vehicle\u003c/li\u003e\n\u003cli\u003eTurn on turn signal\u003c/li\u003e\n\u003cli\u003ePull up alongside the vehicle in front; leave about two feet between you and the car to your right. Stop when you rear bumper is aligned with the front of the space\u003c/li\u003e\n\u003cli\u003eCheck rearview mirror, look over sholder, keep foot on break and reverse\u003c/li\u003e\n\u003cli\u003eBack up, 45%\u003c/li\u003e\n\u003cli\u003eWhen rear view is within 18 inches from the curb, straighten out\u003c/li\u003e\n\u003cli\u003eSet parking break. Leave when safe.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"parking-on-a-hill\"\u003eParking on a hill\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;Your car may roll when you breaks fail.\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eDownhill\u003c/strong\u003e\u003c/strong\u003e: wheels towards the curb\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eUphill\u003c/strong\u003e\u003c/strong\u003e: wheels away from curb\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eNo curb\u003c/strong\u003e\u003c/strong\u003e: turn towards the sholder of the road\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;towards the sholder, except when uphill with curb\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"colors\"\u003eColors\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eWhite curb: stop for picking up or dropping off passengers or mails\u003c/li\u003e\n\u003cli\u003eGreen curb: park for limited time\u003c/li\u003e\n\u003cli\u003eYellow: load and unload, staying in the vehicle\u003c/li\u003e\n\u003cli\u003eRed: no stopping\u003c/li\u003e\n\u003cli\u003eBlue: disabled\u0026mdash;fine of $1,000, 6 months in county jail #knw\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"can-t-park-when\"\u003eCan\u0026rsquo;t park when\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eNo marking\u003c/li\u003e\n\u003cli\u003eUnmarked or marked crosswalk\u003c/li\u003e\n\u003cli\u003eSidewalk, partially blocking sidewalk, or in front of driveway\u003c/li\u003e\n\u003cli\u003eWithin 3 feet of disabled sidewalk ramp #knw\u003c/li\u003e\n\u003cli\u003eOn diagnal lines next to disabled space\u003c/li\u003e\n\u003cli\u003eWithin 15 feet of a fire hydrant #knw\u003c/li\u003e\n\u003cli\u003eDouble parking\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eOn the wrong side of the street or freeway, except: 1) emergency 2) law enforcement officer 3) specificaly permitted stop.\u003c/p\u003e\n\u003cp\u003eTo stop and park then, park off the pavement, stay with the car and lock the doors until help arrives; visibility is 200 feet in each direction required. #knw\u003c/p\u003e\n\u003ch2 id=\"lights\"\u003eLights\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eFlashing red: stop sign\u0026ndash;stop and go when its safe\u003c/li\u003e\n\u003cli\u003eFlashing yellow: yield sign\u0026mdash;proceed with caution\u003c/li\u003e\n\u003cli\u003eFlashing yellow arrow: unprotected turn\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eBroken traffic lights become a four way stop sign.\u003c/p\u003e\n\u003ch2 id=\"signs\"\u003eSigns\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eStop sign is stop; there should be a limit line; if no limit line, stop before intesection\u003c/li\u003e\n\u003cli\u003eYield sign is to yield; slow down\u003c/li\u003e\n\u003c/ul\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-22_00-41-43_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-22_00-42-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-22_00-42-21_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-22_00-43-33_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"right-of-way\"\u003eRight of Way\u003c/h2\u003e\n\u003ch3 id=\"without-stop-yield-signs\"\u003eWithout stop/yield signs\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eWhomever gets to the intersection first has right of way\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"t-intersection-without-stop-yield-signs\"\u003eT intersection without stop/yield signs\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eThe through road have right of way\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"stop-signs\"\u003eStop signs\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eStop first, then follow right of way rules as if no intersection\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"turning-left\"\u003eTurning left\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eRight of way to anyone approaching that\u0026rsquo;s \u0026ldquo;close enough to be dangerous\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"turning-right\"\u003eTurning right\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCheck for pedestrians crossing the street, and bikes and motors next to you\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"green-light\"\u003eGreen light\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ePedestrians\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"divided-highways\"\u003eDivided highways\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eVehicles coming in the lane you are about to enter\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"entering-traffic\"\u003eEntering traffic\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eThe traffic you are entering\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"roundabouts\"\u003eRoundabouts\u003c/h2\u003e\n\u003cp\u003eThe logistics of using a roundabout\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eSlow down\u003c/li\u003e\n\u003cli\u003eYield to traffic\u003c/li\u003e\n\u003cli\u003eWatch for signs\u003c/li\u003e\n\u003cli\u003eTravel in counter-clockwise direction, don\u0026rsquo;t stop or pass\u003c/li\u003e\n\u003cli\u003eSignal when you change lanes or exit\u003c/li\u003e\n\u003cli\u003eIf you miss your exit, try again\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"choosing-lane\"\u003eChoosing lane\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eRightmost for turning right\u003c/li\u003e\n\u003cli\u003eEither lane (\u0026ldquo;middle\u0026rdquo;, if exists) for straight\u003c/li\u003e\n\u003cli\u003eInnermost for left turn or u turn\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"pedestrians\"\u003ePedestrians\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ePedestrians have right-of-way\u003c/li\u003e\n\u003cli\u003ePedestrian crossing need to cross first, you yield or slow to them\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWhich means\u0026hellip;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eDo not pass a stopped vehicle\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t drive on a sidewalk except to cross it or enter/exit it\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t stop in a crosswalk\u003c/li\u003e\n\u003cli\u003eIf people make eyecontact, they are crossing the street\u003c/li\u003e\n\u003cli\u003eObey pedestrian\u0026rsquo;s signs\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWatch for seniors, people with disabilities, young children.\u003c/p\u003e\n\u003ch3 id=\"crosswalks\"\u003eCrosswalks\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCrosswalks are marked (but not all)\u003c/li\u003e\n\u003cli\u003eSchool crossings have yellow lines\u003c/li\u003e\n\u003cli\u003ePedestrians have right of wall in all crosswalks\u003c/li\u003e\n\u003cli\u003eFlashing light crosswalks exists to, just be prepared to stop regardless\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"blind\"\u003eBlind\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eWhite canes and guide dogs have absolute right of way\n\u003cul\u003e\n\u003cli\u003eStop at all stop walks\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t stop in the middle of stop walk\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t give verbal directions to blind pedestrian\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t turn right w/o looking for pedestrians\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t honk at a blind person\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t block sidewalk\u003c/li\u003e\n\u003cli\u003ePulling in cane + stepping away: you may go\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"mountain-roads\"\u003eMountain roads\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eUphill car has right of way\u003c/li\u003e\n\u003cli\u003eDownhill car has more control backing up the hill\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"roadsharing\"\u003eRoadsharing\u003c/h2\u003e\n\u003ch3 id=\"large-cars\"\u003eLarge cars\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eAverage passenter car at 55mph has 400 feet before stopping\u003c/li\u003e\n\u003cli\u003eLarge car takes 800 feet\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDon\u0026rsquo;t move in front of a large car and suddenly stop.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-23_15-53-47_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eLook at turn signals: large vehicles may swing their back, say, left in order to turn right.\u003c/p\u003e\n\u003cp\u003eDon\u0026rsquo;t\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eChange lanes in front of them to reach an exit or turn (tight spaces around large vehicles is dangerous)\u003c/li\u003e\n\u003cli\u003eDrive next to them (unless passing); after you pass, move ahead of it\u003c/li\u003e\n\u003cli\u003eFollow too closely: that\u0026rsquo;s tailgating. Give more space\u003c/li\u003e\n\u003cli\u003eUnderestimate the size and speed of the vehicle\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;If you can\u0026rsquo;t see a truck\u0026rsquo;s side mirrors, it can\u0026rsquo;t see you.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003ealways pass it on the left side\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003ch3 id=\"buses-and-rails\"\u003eBuses and rails\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ewhen loading is happening without a safety zone, stop behind the nearest door\u003c/li\u003e\n\u003cli\u003eStopped busses can only be passed at 10mph\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t pass on the left side, unless\u0026hellip;\n\u003cul\u003e\n\u003cli\u003eyou are on a one-way street\u003c/li\u003e\n\u003cli\u003etracks are so close to the right you can\u0026rsquo;t pass on the right\u003c/li\u003e\n\u003cli\u003etraffic officer directs you to\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eNever turn in front of a light rail vehicle\u003c/li\u003e\n\u003cli\u003eCheck for traffic lights (light rails can interrupt them)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"motocycles\"\u003eMotocycles\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e4 second following distance\u003c/li\u003e\n\u003cli\u003eGiven a motocycle a full lane; its legal to share but its unsafe\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t try to pass a motorcycle in the same lane\u003c/li\u003e\n\u003cli\u003eWhen possible, move to one side of your lane\u003c/li\u003e\n\u003cli\u003eCheck for motocyclists\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"emergency-vehicles\"\u003eEmergency vehicles\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eGive them right of way: drive to the edge until they\u0026rsquo;ve passed\u003c/li\u003e\n\u003cli\u003e\u0026hellip;except in intersections: never stop in an intersection (continue through and stop)\u003c/li\u003e\n\u003cli\u003eObey loudspeaker orders\u003c/li\u003e\n\u003cli\u003eIllegal to follow 300 feet of any emergency vehicles with flashing siren\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"slow-cars\"\u003eSlow cars\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eSlow down for them\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"nev-lsv\"\u003eNEV LSV\u003c/h3\u003e\n\u003cp\u003eLike gold carts\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eThey have max speed 25mph\u003c/li\u003e\n\u003cli\u003eThey can\u0026rsquo;t drive in roads with speed limit larger than 35 mph\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"bikes\"\u003eBikes\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eFront lamp with white light visible for 300 feet\u003c/li\u003e\n\u003cli\u003eRear red reflector (visible from 500 feet)\u003c/li\u003e\n\u003cli\u003eWhite or yellow reflector on each pedal (visible for 200 feet)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"travel-lanes\"\u003eTravel lanes\u003c/h4\u003e\n\u003cp\u003eMust ride to the curb if slow, unless\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ePassing in the same direction\u003c/li\u003e\n\u003cli\u003ePreparing to turn left\u003c/li\u003e\n\u003cli\u003eAvoiding a hazard/road condition\u003c/li\u003e\n\u003cli\u003eApproaching right turn\u003c/li\u003e\n\u003cli\u003eOn a one way road with two or more lanes (if so, bikers may right next to left curb)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"passing-bikers\"\u003ePassing bikers\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-23_16-21-16_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e3 feet clearance\u003c/p\u003e\n\u003ch4 id=\"school-buses\"\u003eSchool buses\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eYellow lights flashing is to slow\u003c/li\u003e\n\u003cli\u003eRed lights flashing is to stop\u003c/li\u003e\n\u003cli\u003eIf you fail to stop, you can be fined up to $1,000 and driving maybe suspended for a year\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"workzone-fines\"\u003eWorkzone fines\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eTraffic violations have fines of $1,000 or more\u003c/li\u003e\n\u003cli\u003eAssulting a worker has a fine of $2,000 plus imprisonment for up to on year\u003c/li\u003e\n\u003cli\u003eSome regions are double-fine zones\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"speed-limit\"\u003eSpeed Limit\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;Basic speed law\u0026rdquo;: you may never drive faster than its safe.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e10mph to pas a roadcar\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e15 mph in blind intersections (cannot see 100 in both directions when within 100 feet)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eif your view is blocked in a blind intersection, inch forward until you can see\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e15 mph also in some school, alleys (roads no wider 25 feet), 100 feet of railroad tracks if visiblity less then 400 feet\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e25 mph when you are 500-1000 feet of a school, when crossing the street, residential\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e55 mph on two lane undivided highway\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eYou cannot block traffic flow\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDrive far-right lane of you are towing\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"railroad\"\u003eRailroad\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eLook in both directions\u003c/li\u003e\n\u003cli\u003eExcept train anytime\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t stop in traintracks\u003c/li\u003e\n\u003cli\u003eWatch for other cars\u003c/li\u003e\n\u003cli\u003eStop between 15-50 feet from the neearest tracks\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"fines-and-stuff\"\u003eFines and Stuff\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eSmoking with a minor: $100\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDumping animals: $1,000, six months in jail\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eEvading law enforcement:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003estate prison up to 7 years, county jail for 1 year\u003c/li\u003e\n\u003cli\u003eFine between $2,000 and $10,000\u003c/li\u003e\n\u003cli\u003eOr both\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eEvading law enforcement and commiting manslauter\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eImprisonment for 4-10 years\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSpeed content and reckless driving: fine and imprsionment\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eTexting\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWear earplugs in bot hyears\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCarry anything that extends beyond the fenders on the left side, or more then 6 inches on the right side\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCargo more the 4 feet must display a 1 feet red or flourencesnt flag\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eTransport animals unless secured\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAllow a person to be in a back of a pickup truck unless secured\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDrive a car with a video monitor except when it doesn\u0026rsquo;t face driver\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eThrow a cig from the car\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCut signs that block the windshiled\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDon\u0026rsquo;t hang objects on the mirror\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDon\u0026rsquo;t sticker, unless\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e7 inch square on lower corner of passengers or rear window\u003c/li\u003e\n\u003cli\u003e5 inch square on the lower corner of the driver window\u003c/li\u003e\n\u003cli\u003eSide windows behind driver\u003c/li\u003e\n\u003cli\u003e5 inch located in the center uppermost portion\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eFuneral pocessions have right of way\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"points\"\u003ePoints\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e36 month record\u003c/li\u003e\n\u003cli\u003eSuspension when: 4 points in 12 months, 6 in 24, or 8 in 36\u003c/li\u003e\n\u003cli\u003eOnce 18 months to earn back points via traffic school\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"best-practices\"\u003eBest Practices\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eScan road 10-15 seconds ahead of you\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t stare\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t tailgate: 3 seconds between you and the car ahead passes\n\u003cul\u003e\n\u003cli\u003eAllow extra space when\u0026hellip;\n\u003cul\u003e\n\u003cli\u003eIf you have a tailgator, (and move! if you can)\u003c/li\u003e\n\u003cli\u003eThe driver behind you wants to pass\u003c/li\u003e\n\u003cli\u003eSlippery\u003c/li\u003e\n\u003cli\u003eFollowing on icy or wet\u003c/li\u003e\n\u003cli\u003eTowing a trailer\u003c/li\u003e\n\u003cli\u003eFollowiing a car that blocks you ahead\u003c/li\u003e\n\u003cli\u003eMerging onto freeway\u003c/li\u003e\n\u003cli\u003eFollowing\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t stay in the blind spot\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t driving alongside cars\u003c/li\u003e\n\u003cli\u003eMake space when possible\u003c/li\u003e\n\u003cli\u003eKeep space between you and parked cars\u003c/li\u003e\n\u003cli\u003eBe careful when nearing motorcyclists and bicyclists\u003c/li\u003e\n\u003cli\u003eAt intersections\n\u003cul\u003e\n\u003cli\u003eLook both ways\u003c/li\u003e\n\u003cli\u003eLook left first (vehicles coming from the left are closer)\u003c/li\u003e\n\u003cli\u003eLook right\u003c/li\u003e\n\u003cli\u003eTake one more look to the left\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e5-10mph on wet road, reduce speed by half on snow, tiny very slow on ice\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t use breaks if starting to hydroplone\u003c/li\u003e\n\u003cli\u003eIf you can\u0026rsquo;t see farther than 100 feet, its unsafe to drive faster than 30mph\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"seat-belts\"\u003eSeat belts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eClick it or ticket\u003c/li\u003e\n\u003cli\u003eUnder 16 years old, you may also get ticket\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"child-safety\"\u003eChild safety\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eUnder 2 years old: secure in a real facing child restraight system (unless child weighs more than 40 pounds or is more that 3 ft 4 inches taller)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eChilden under 8 years old, less than 4 feet 9 inches tall: secure in a front-facing restraight system\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCould use front seat if there\u0026rsquo;s no rear seat or if they are side facing jump seat\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e8 years old or older, or 4 feet 9 inches tall: use seat belts\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e6 y/o or younger unattended illegal to leave in car; supervision could be 12 year old.\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eHot vehicle can kill\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"emergencies\"\u003eEmergencies\u003c/h2\u003e\n\u003ch3 id=\"skids\"\u003eSkids\u003c/h3\u003e\n\u003ch4 id=\"slippery-surface\"\u003eSlippery surface\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eSlowly remove foot from gas pedal\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t use breaks\u003c/li\u003e\n\u003cli\u003eTurn the steering wheel in the direction of the skid\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf your breaches get wet, dry them by pressing gas and brake at the same time.\u003c/p\u003e\n\u003ch4 id=\"lock-wheel\"\u003eLock wheel\u003c/h4\u003e\n\u003cp\u003eBreaking too hard when going to fast: skid no matter steering wheel\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eRemove foot from break\u003c/li\u003e\n\u003cli\u003eStraighten front wheel\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf ABS not working, step on brake gradually until safe speed. If the brake petal sinks to the floor, bump the brakes.\u003c/p\u003e\n\u003ch3 id=\"driving-off-pavement\"\u003eDriving off pavement\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eGrip wheel slowly\u003c/li\u003e\n\u003cli\u003eRemove your foot from gas\u003c/li\u003e\n\u003cli\u003eBrake gently\u003c/li\u003e\n\u003cli\u003eCheck for traffic\u003c/li\u003e\n\u003cli\u003eSteer back\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"accelerator-mallfunction\"\u003eAccelerator mallfunction\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eShift to neutral\u003c/li\u003e\n\u003cli\u003eApply breakes\u003c/li\u003e\n\u003cli\u003eLook for traffic\u003c/li\u003e\n\u003cli\u003eHonk horn and emergency flashers\u003c/li\u003e\n\u003cli\u003eDrive car off the road\u003c/li\u003e\n\u003cli\u003eTurn of ignition\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"collision\"\u003eCollision\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eIf collision causes more than $1000 in property damage, you msut report to DMV\u003c/li\u003e\n\u003cli\u003eDriving is suspended for 4 years of no insurance\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"disabled-vehicle\"\u003eDisabled Vehicle\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eSafely pull over\u003c/li\u003e\n\u003cli\u003eExit on the right side\u003c/li\u003e\n\u003cli\u003eFind assistance\u003c/li\u003e\n\u003cli\u003eReturn no vehicle\u003c/li\u003e\n\u003cli\u003eStay inside with your seat belt\u003c/li\u003e\n\u003cli\u003eUuse flashers\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"railroad\"\u003eRailroad\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eIf a train is coming, get out and run in a 45 degree away from the train and tracks. Dial 911\u003c/li\u003e\n\u003cli\u003eIf train not coming, exit vehicle, dial emergency number on the railroad crossing box, and then call 911\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"dui\"\u003eDUI\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDon\u0026rsquo;t drink and drive\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t take drugs\u003c/li\u003e\n\u003cli\u003eUse any combination of drugs\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIllegal to drink alcohol or smoke or eat cannabis products while in a car, whether self or passenger. If you are carrying it, it must be full and unopened. If its open, keep it in the trunk.\u003c/p\u003e\n\u003ch3 id=\"limits\"\u003eLimits\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e0.08% over 21\u003c/li\u003e\n\u003cli\u003e0.01% under 21\u003c/li\u003e\n\u003cli\u003e0.01% under DUI probation\u003c/li\u003e\n\u003cli\u003e0.04% if commercial\u003c/li\u003e\n\u003cli\u003e0.04% if driving for hire\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"dui-arrests\"\u003eDUI Arrests\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eHold license for 30 days\u003c/li\u003e\n\u003cli\u003eHearing from 10 days\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"dui-convictions\"\u003eDUI Convictions\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCompletion of DUI program\u003c/li\u003e\n\u003cli\u003eInstall Ignition Interlock Device\u003c/li\u003e\n\u003cli\u003e6 months in jail\u003c/li\u003e\n\u003cli\u003e$390-$1000\u003c/li\u003e\n\u003cli\u003eMay inpound vehicle\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"carrying-under-21\"\u003eCarrying under 21\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eMay not carry unless someone older\u003c/li\u003e\n\u003cli\u003eFine up to $1000 and impound for 30 days, suspencion for 1 year\u003c/li\u003e\n\u003cli\u003e0.01% or higher you have to complete program, 0.05% suspension\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdriving/","tags":null,"title":"Driving"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhdriving_practice/","tags":null,"title":"Driving Practice"},{"categories":null,"contents":"Drug Resistance is the process of developing resistance to drugs after some time of use\noccurrence of Drug Resistance Drug Resistance occurs when there\u0026rsquo;s a mutation that causes macromolecular changes which causes virus to no longer bind the drug but still maintain the same function\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdrug_resistance/\"\u003eDrug Resistance\u003c/a\u003e is the process of developing resistance to drugs after some time of use\u003c/p\u003e\n\u003ch2 id=\"occurrence-of-drug-resistance--kbhdrug-resistance-dot-md\"\u003eoccurrence of \u003ca href=\"/posts/kbhdrug_resistance/\"\u003eDrug Resistance\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdrug_resistance/\"\u003eDrug Resistance\u003c/a\u003e occurs when there\u0026rsquo;s a mutation that causes macromolecular changes which causes virus to no longer bind the drug but still maintain the same function\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdrug_resistance/","tags":null,"title":"Drug Resistance"},{"categories":null,"contents":"The dual space of \\(V\\), named \\(V\u0026rsquo;\\), is the vector space formed by linear functionals on \\(V\\) (because recall set of linear maps between two vector spaces form a vector space).\nconstituents A vector space \\(V\\)\nrequirements \\(V\u0026rsquo; = \\mathcal{L}(V, \\mathbb{F})\\) , and its a vector space.\nadditional information dimension of dual space is equivalent to the original space \\begin{equation} \\dim V\u0026rsquo; = \\dim V \\end{equation}\nProof:\nBecause \\(\\dim \\mathcal{L}(V,W) = (\\dim V)(\\dim W)\\), and \\(V\u0026rsquo; = \\mathcal{L}(V, \\mathbb{F})\\). Now, \\(\\dim V\u0026rsquo; = \\dim \\mathcal{L}(V,\\mathbb{F}) = (\\dim V)(\\dim \\mathbb{F}) = \\dim V \\cdot 1 = \\dim V\\).\ndual basis Let \\(v_1, \u0026hellip;, v_{n}\\) be a basis of \\(V\\), then, we can construct a basis of \\(V\u0026rsquo;\\) with linear functionals \\(\\varphi_{1}, \u0026hellip;, \\varphi_{n}\\):\n\\begin{equation} \\varphi_{j}(v_{k}) = \\begin{cases} 1, if\\ k=j \\\\ 0, if\\ k \\neq j \\end{cases} \\end{equation}\nNow, we can show that \\(\\varphi_{j}\\) are indeed linear functionals by basis of domain: we defined its behavior of each \\(\\varphi_{j}\\) based on where it sends each \\(v_{j}\\) (i.e. the basis of \\(V\\), the domain of elements in \\(V\u0026rsquo;\\)) into values in \\(\\mathbb{F}\\) (i.e. \\(1\\) or \\(0\\)).\nWe can now show that these \\(\\varphi_{j}\\) is indeed a basis of \\(V\u0026rsquo;\\) by only showing that it is linearly independent because we have already a list of \\(n\\) \\(\\varphi_{j}\\) elements (i.e. \\(\\dim V\u0026rsquo;=\\dim V = n\\) number of \\(\\varphi_{j}\\)), and linearly independent list of length dim V are a basis of V.\n","html":"\u003cp\u003eThe dual space of \\(V\\), named \\(V\u0026rsquo;\\), is the vector space formed by \u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003es on \\(V\\) (because recall \u003ca href=\"/posts/kbhlinear_map/#addition-and-scalar-multiplication-on-mathcal-l--v-w\"\u003eset of linear maps between two vector spaces form a vector space\u003c/a\u003e).\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e \\(V\\)\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\(V\u0026rsquo; = \\mathcal{L}(V, \\mathbb{F})\\) , and its a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"dimension-of-dual-space-is-equivalent-to-the-original-space\"\u003edimension of dual space is equivalent to the original space\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\dim V\u0026rsquo; = \\dim V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eBecause \u003ca href=\"/posts/kbhisomorphism/#dim-mathcal-l--v-w----dim-v----dim-w\"\u003e\\(\\dim \\mathcal{L}(V,W) = (\\dim V)(\\dim W)\\)\u003c/a\u003e, and \\(V\u0026rsquo; = \\mathcal{L}(V, \\mathbb{F})\\). Now, \\(\\dim V\u0026rsquo; = \\dim \\mathcal{L}(V,\\mathbb{F}) = (\\dim V)(\\dim \\mathbb{F}) = \\dim V \\cdot 1 = \\dim V\\).\u003c/p\u003e\n\u003ch3 id=\"dual-basis\"\u003edual basis\u003c/h3\u003e\n\u003cp\u003eLet \\(v_1, \u0026hellip;, v_{n}\\) be a basis of \\(V\\), then, we can construct a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\u0026rsquo;\\) with \u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003es \\(\\varphi_{1}, \u0026hellip;, \\varphi_{n}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\varphi_{j}(v_{k}) =\n\\begin{cases}\n1, if\\ k=j \\\\\n0, if\\ k \\neq j\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, we can show that \\(\\varphi_{j}\\) are indeed \u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003es by \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e: we defined its behavior of each \\(\\varphi_{j}\\) based on where it sends each \\(v_{j}\\) (i.e. the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\), the domain of elements in \\(V\u0026rsquo;\\)) into \u003cem\u003evalues\u003c/em\u003e in \\(\\mathbb{F}\\) (i.e. \\(1\\) or \\(0\\)).\u003c/p\u003e\n\u003cp\u003eWe can now show that these \\(\\varphi_{j}\\) is indeed a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\u0026rsquo;\\) by only showing that it is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e because we have already a list of \\(n\\) \\(\\varphi_{j}\\) elements (i.e. \\(\\dim V\u0026rsquo;=\\dim V = n\\) number of \\(\\varphi_{j}\\)), and \u003ca href=\"/posts/kbhdimension/#linearly-independent-list-of-length-dim-v-are-a-basis-of-v\"\u003elinearly independent list of length dim V are a basis of V\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdual_space/","tags":null,"title":"dual space"},{"categories":null,"contents":"Dup15q Syndrome is an autistic syndrome associated with a gain of variant function in UBE3A. It is the opposite of Angelman Syndrome, which is a loss of function result on UBE3A.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdup15q/\"\u003eDup15q Syndrome\u003c/a\u003e is an autistic syndrome associated with a gain of variant function in \u003ca href=\"\"\u003eUBE3A.\u003c/a\u003e It is the opposite of \u003ca href=\"/posts/kbhangelman_syndrome/\"\u003eAngelman Syndrome\u003c/a\u003e, which is a loss of function result on UBE3A.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdup15q/","tags":null,"title":"Dup15q Syndrome"},{"categories":null,"contents":"dynamic programming is a three-step algorithm to tackle large, multi-step problems; high level idea: guessing + caching + recursion.\ndynamic programming can sometimes not be good enough, and it doesn\u0026rsquo;t really give us fast enough to get what we need to use. That\u0026rsquo;s when we need to deal with relaxation, or possibly greedy programming.\nmain steps of dynamic programming Break a hard problem into sub-problems Guess what sub-problem to solve Solve the sub-problem and store the solution Repeat #2 and #3 Combine sub-problem solutions to solve the hard problem analyzing runtime of dynamic programming To analyze runtime of dynamic programming problems, you ask:\nHow many sub-problems are there? How long does it take to solve each sub-problem? How long does it take to combine sub-problems? fibonacchi numbers: dynamic programming here\u0026rsquo;s an example top-down dynamic programming problem:\nThere are \\(n\\) sub-problems: \\(F_1, F_2, \\ldots, F_{n-1}\\). Solve a sub-problem, then store the solution \\(F_{n-1} = F_{n-2}+F_{n-3}\\) Continue until \\(F_1 =1\\). Now, we can recurs back up (popping the call stack) and cache all calculated results So then we can just look up any \\(F_k\\). shortest path: dynamic programming here\u0026rsquo;s a graph! how do we get to node \\(6\\)?\nGuess that the shortest path goes through 10 Go recursively until you get to root, cache the solution Do it again until you got to all subproblems Look up cached result ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdynamic_programming/\"\u003edynamic programming\u003c/a\u003e is a three-step algorithm to tackle large, multi-step problems; high level idea: guessing + caching + recursion.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdynamic_programming/\"\u003edynamic programming\u003c/a\u003e can sometimes not be good enough, and it doesn\u0026rsquo;t really give us fast enough to get what we need to use. That\u0026rsquo;s when we need to deal with \u003ca href=\"/posts/kbhrelaxation_algorithums/\"\u003erelaxation\u003c/a\u003e, or possibly \u003ca href=\"/posts/kbhgreedy_programming/\"\u003egreedy programming\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"main-steps-of-dynamic-programming\"\u003emain steps of dynamic programming\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eBreak a hard problem into sub-problems\u003c/li\u003e\n\u003cli\u003eGuess what sub-problem to solve\u003c/li\u003e\n\u003cli\u003eSolve the sub-problem and store the solution\u003c/li\u003e\n\u003cli\u003eRepeat #2 and #3\u003c/li\u003e\n\u003cli\u003eCombine sub-problem solutions to solve the hard problem\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"analyzing-runtime-of-dynamic-programming\"\u003eanalyzing runtime of dynamic programming\u003c/h2\u003e\n\u003cp\u003eTo analyze runtime of \u003ca href=\"/posts/kbhdynamic_programming/\"\u003edynamic programming\u003c/a\u003e problems, you ask:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eHow many sub-problems are there?\u003c/li\u003e\n\u003cli\u003eHow long does it take to solve each sub-problem?\u003c/li\u003e\n\u003cli\u003eHow long does it take to combine sub-problems?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"fibonacchi-numbers-dynamic-programming\"\u003efibonacchi numbers: dynamic programming\u003c/h2\u003e\n\u003cp\u003ehere\u0026rsquo;s an example top-down \u003ca href=\"/posts/kbhdynamic_programming/\"\u003edynamic programming\u003c/a\u003e problem:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eThere are \\(n\\) sub-problems: \\(F_1, F_2, \\ldots, F_{n-1}\\).\u003c/li\u003e\n\u003cli\u003eSolve a sub-problem, then store the solution\n\u003col\u003e\n\u003cli\u003e\\(F_{n-1} = F_{n-2}+F_{n-3}\\)\u003c/li\u003e\n\u003cli\u003eContinue until \\(F_1 =1\\).\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003eNow, we can recurs back up (popping the call stack) and cache all calculated results\u003c/li\u003e\n\u003cli\u003eSo then we can just look up any \\(F_k\\).\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"shortest-path-dynamic-programming\"\u003eshortest path: dynamic programming\u003c/h2\u003e\n\u003cp\u003ehere\u0026rsquo;s a graph! how do we get to node \\(6\\)?\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-05-02_10-28-04_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003col\u003e\n\u003cli\u003eGuess that the shortest path goes through 10\u003c/li\u003e\n\u003cli\u003eGo recursively until you get to root, cache the solution\u003c/li\u003e\n\u003cli\u003eDo it again until you got to all subproblems\u003c/li\u003e\n\u003cli\u003eLook up cached result\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdynamic_programming/","tags":null,"title":"dynamic programming"},{"categories":null,"contents":"Capacitor charging:\n\\begin{equation} Q = Q_0 (1-e^{-\\frac{t}{RC}}) \\end{equation}\nwhere \\(Q\\) is capacitor change at time \\(t\\), and \\(Q_0\\) initial change, and \\(RC\\) the resistance and capacitance.\nWhere, \\(RC\\) is called the \u0026ldquo;time constant\u0026rdquo;.\n\\begin{equation} I = \\frac{V}{R} (e^{-\\frac{t}{RC}}) \\end{equation}\nNote! these are inverse relationships: as a capacitor CHARGE, the current DROPS.\n","html":"\u003cp\u003eCapacitor charging:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ = Q_0 (1-e^{-\\frac{t}{RC}})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(Q\\) is capacitor change at time \\(t\\), and \\(Q_0\\) initial change, and \\(RC\\) the \u003ca href=\"/posts/kbhcurrent/#resistance-of-a-wire\"\u003eresistance\u003c/a\u003e and \u003ca href=\"/posts/kbhcapacitance/\"\u003ecapacitance\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eWhere, \\(RC\\) is called the \u0026ldquo;time constant\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI = \\frac{V}{R} (e^{-\\frac{t}{RC}})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNote! these are \u003cem\u003einverse\u003c/em\u003e relationships: as a capacitor CHARGE, the current DROPS.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdynamic_rc_circuts/","tags":null,"title":"Dynamic RC Circuits"},{"categories":null,"contents":"Dyson\u0026rsquo;s Model of Life is a theory of origin of life:\ncells form as machines that perform tasks genes show up later as parasites, eventually forming symbiosis with cells Read: and so, we can essentially ditch trying to find things characteristics of \u0026ldquo;cells\u0026rdquo; per-se like RNA, instead we can go about finding generic boxes of containers called \u0026ldquo;cells\u0026rdquo; and see how they evolve.\nSee also: high chemical activity, metabolism, Stepwise Evolution, and Two Main Functions of Life\nconstituents \\(x\\): percentage of active binding sites \\(w\\): percentage of inactive binding sites \\(z\\): percentage of \u0026ldquo;empty binding sites\u0026rdquo; \\(p(k)\\): probability distribution for a site to be in any given state at time \\(k\\) \\(\\Psi(x)\\): rate of activation \u0026ldquo;efficiency in active monomers in acceleration monomer absorption\u0026rdquo; requirements evidently, because percentages: \\(x+w+z = 1\\) Active monomer absorption: \\(\\Psi(x) \\cdot p\\) Inactive monomer absorption: \\(p\\) additional information general intuition some kind of isolated droplet contains a population of molecules chemical reactions occur to the whole droplet such that its state changes Recall the Two Main Functions of Life: metabolism and replication. So, if we want our Dyson\u0026rsquo;s Model to capture life, we should try to encode them into our model. Turns out, we can use the language of Stepwise Evolution to describe our model.\nTherefore, let\u0026rsquo;s declare that there is only two states to our system, in which our particle is quasi-stationary (it wiggles but doesn\u0026rsquo;t go anywhere):\n\u0026ldquo;high chemical activity\u0026rdquo;, a.k.a. \u0026ldquo;metabolism\u0026rdquo; \u0026mdash; \u0026ldquo;ordered\u0026rdquo; state \u0026ldquo;low chemical activity\u0026rdquo; \u0026mdash; disordered state Our transition \\(M\\), then, only has to encode transitions between these two states. Dyson claims that, in his model, this transition happens spontaneously when the circumstances is correct.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdyson_s_model_of_life/\"\u003eDyson\u0026rsquo;s Model of Life\u003c/a\u003e is a \u003ca href=\"/posts/kbhliving/#theories-of-origin-of-life\"\u003etheory of origin of life\u003c/a\u003e:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcell/\"\u003ecell\u003c/a\u003es form as machines that perform tasks\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"\"\u003egene\u003c/a\u003es show up later as parasites, eventually forming symbiosis with \u003ca href=\"/posts/kbhcell/\"\u003ecell\u003c/a\u003es\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eRead: and so, we can essentially ditch trying to find things characteristics of \u0026ldquo;\u003ca href=\"/posts/kbhcell/\"\u003ecell\u003c/a\u003es\u0026rdquo; per-se like RNA, instead we can go about finding generic boxes of containers called \u0026ldquo;\u003ca href=\"/posts/kbhcell/\"\u003ecell\u003c/a\u003es\u0026rdquo; and see how they evolve.\u003c/p\u003e\n\u003cp\u003eSee also: \u003ca href=\"/posts/kbhhigh_chemical_activity/\"\u003ehigh chemical activity\u003c/a\u003e, \u003ca href=\"/posts/kbhmetabolism/\"\u003emetabolism\u003c/a\u003e, \u003ca href=\"/posts/kbhstepwise_evolution/\"\u003eStepwise Evolution\u003c/a\u003e, and \u003ca href=\"/posts/kbhliving/#two-main-functions-of-life\"\u003eTwo Main Functions of Life\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(x\\): percentage of \u003ca href=\"/posts/kbhhigh_chemical_activity/\"\u003eactive\u003c/a\u003e binding sites\u003c/li\u003e\n\u003cli\u003e\\(w\\): percentage of inactive binding sites\u003c/li\u003e\n\u003cli\u003e\\(z\\): percentage of \u0026ldquo;\u003ca href=\"/posts/kbhempty_binding_site/\"\u003eempty binding site\u003c/a\u003es\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\\(p(k)\\): probability distribution for a site to be in any given state at time \\(k\\)\u003c/li\u003e\n\u003cli\u003e\\(\\Psi(x)\\): rate of \u003ca href=\"/posts/kbhhigh_chemical_activity/\"\u003eactivation\u003c/a\u003e \u0026ldquo;efficiency in active monomers in acceleration monomer absorption\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eevidently, because percentages: \\(x+w+z = 1\\)\u003c/li\u003e\n\u003cli\u003eActive monomer absorption: \\(\\Psi(x) \\cdot p\\)\u003c/li\u003e\n\u003cli\u003eInactive monomer absorption: \\(p\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"general-intuition\"\u003egeneral intuition\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003esome kind of isolated droplet contains a population of molecules\u003c/li\u003e\n\u003cli\u003echemical reactions occur to the whole droplet such that its state changes\u003c/li\u003e\n\u003c/ol\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-03-11_19-37-29_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eRecall the \u003ca href=\"/posts/kbhliving/#two-main-functions-of-life\"\u003eTwo Main Functions of Life\u003c/a\u003e: \u003ca href=\"/posts/kbhmetabolism/\"\u003emetabolism\u003c/a\u003e and \u003ca href=\"/posts/kbhreplication/\"\u003ereplication\u003c/a\u003e. So, if we want our \u003ca href=\"/posts/kbhdyson_s_model_of_life/\"\u003eDyson\u0026rsquo;s Model\u003c/a\u003e to capture life, we should try to encode them into our model. Turns out, we can use the language of \u003ca href=\"/posts/kbhstepwise_evolution/\"\u003eStepwise Evolution\u003c/a\u003e to describe our model.\u003c/p\u003e\n\u003cp\u003eTherefore, let\u0026rsquo;s declare that there is only two states to our system, in which our particle is quasi-stationary (it wiggles but doesn\u0026rsquo;t go anywhere):\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u0026ldquo;\u003ca href=\"/posts/kbhhigh_chemical_activity/\"\u003ehigh chemical activity\u003c/a\u003e\u0026rdquo;, a.k.a. \u0026ldquo;\u003ca href=\"/posts/kbhmetabolism/\"\u003emetabolism\u003c/a\u003e\u0026rdquo; \u0026mdash; \u0026ldquo;ordered\u0026rdquo; state\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;low chemical activity\u0026rdquo; \u0026mdash; disordered state\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eOur transition \\(M\\), then, only has to encode transitions between these two states. Dyson claims that, in his model, this transition happens spontaneously when the circumstances is correct.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdyson_s_model_of_life/","tags":null,"title":"Dyson's Model of Life"},{"categories":null,"contents":"It a constant.\n\\begin{equation} \\lim_{n \\to \\infty} \\qty(1- \\frac{\\lambda}{n})^{n} = e^{-\\lambda} \\end{equation}\n","html":"\u003cp\u003eIt a constant.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lim_{n \\to \\infty} \\qty(1- \\frac{\\lambda}{n})^{n} = e^{-\\lambda}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhe/","tags":null,"title":"e"},{"categories":null,"contents":"We want to solve huge POMDP in the real world, but the belief states are huge. Notably, reachable beliefs are very small given an initial belief.\nWhy is vanilla PCA bad PCA as a denoising procedure: the underlying data is some data which is normally noised. This is not strictly true, the points don\u0026rsquo;t have normal noise.\nBetter PCA: E-PCA Instead of Euclidean distance, we use\n\\begin{equation} L(U,V) = \\mid X-UV\\mid^{2} \\end{equation}\nas a metric, where:\n\\(U\\) the feature\nspecifically:\n\\begin{equation} F(z) - yz + F^{*}(y) \\end{equation}\nwhere \\(F\\) is any convex objective that is problem specific that you choose,\nBregman Divergence forces the underlying matricies\u0026rsquo; bases to be non-negative\nOverall Methods collect sample beliefs apply the belifs into E-PCA Discretize the E-PCA\u0026rsquo;d belifs into a new state space \\(S\\) Recalculate R (\\(R(b) = b \\cdot R(s)\\)) and T (we simply sample \\(b,o\\) and calculate \\(update(b,a,o)\\)) for that state space S; congratulations, you are now solving an MDP value iteration ","html":"\u003cp\u003eWe want to solve huge \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e in the real world, but the belief states are huge. Notably, reachable beliefs are very small given an initial belief.\u003c/p\u003e\n\u003ch2 id=\"why-is-vanilla-pca-bad\"\u003eWhy is vanilla PCA bad\u003c/h2\u003e\n\u003cp\u003ePCA as a denoising procedure: the underlying data is some data which is normally noised. This is not strictly true, the points don\u0026rsquo;t have normal noise.\u003c/p\u003e\n\u003ch2 id=\"better-pca-e-pca\"\u003eBetter PCA: E-PCA\u003c/h2\u003e\n\u003cp\u003eInstead of Euclidean distance, we use\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL(U,V) = \\mid X-UV\\mid^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas a metric, where:\u003c/p\u003e\n\u003cp\u003e\\(U\\) the feature\u003c/p\u003e\n\u003cp\u003especifically:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nF(z) - yz + F^{*}(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(F\\) is any convex objective that is problem specific that you choose,\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eBregman Divergence\u003c/strong\u003e forces the underlying matricies\u0026rsquo; bases to be non-negative\u003c/p\u003e\n\u003ch2 id=\"overall-methods\"\u003eOverall Methods\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ecollect sample beliefs\u003c/li\u003e\n\u003cli\u003eapply the belifs into E-PCA\u003c/li\u003e\n\u003cli\u003eDiscretize the E-PCA\u0026rsquo;d belifs into a new state space \\(S\\)\u003c/li\u003e\n\u003cli\u003eRecalculate R (\\(R(b) = b \\cdot R(s)\\)) and T (we simply sample \\(b,o\\) and calculate \\(update(b,a,o)\\)) for that state space S; congratulations, you are now solving an MDP\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhe_pca/","tags":null,"title":"E-PCA"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhe_coli/","tags":null,"title":"E. Coli"},{"categories":null,"contents":"Presented Project80 Talks Person Society Keywords Email Chhavi Chauhuan, PhD, ELS ASIP AI Ethics, Pathology cchauhan@asip.org J. Elliott Robinson, PhD, MD ASBMB NF1, Dopamine, ADHD elliott.robinson@cchmc.org Jason Yi, PhD ASBMB UBE3A, Recklinghaus, Dup15q domain Erica Korb, PhD ASBMB Autism, Chromatin ekorb@pennmedicine.upenn.edu Catherine Wang AAA student approach to learning ??? Megan Fagalde, PhD Candidate AAA anatomy learning mfagalde@iu.edu Michelle A. Sveistrup AAA haptic abilities, HAT msveistr@uwo.ca AAA anatomy learning Alam Boyd AAA partner vs. individual work Magnus ??? AAA Orna Issler ASBMB IncRNA, LINC00473, FEDORA orna.issler@mssm.edu Kaushik Ragunathan ASBMB whimsical adaptations ragunath@med.umich.edu Tracy l. Bale ASBMB i think like P80 scary tbale@som.umaryland.edu Gregory Morton APS thermoregulation, glucose gjmorton@uw.edu Peter Turnbaugh ASBMB Fluoropyrimidine, PreTA, DPYD peter.turnbaugh@ucsf.edu Ralph DeBernandis ASBMB metabolic alterations, LIPT1 People Meeters Person Place Email Job Followup Jay Pieczynski Rollins jpieczynski@rollings.edu Assist. Prof. P80, College Apps Sebastian Hernandez Rollins shernandez1@rollings.edu Undergrad \u0026quot;\u0026quot; Bryson Arnett U of Kentucky Undegrad Jennifer Pousont Pingry Eric P. Chang Pace U echang@pace.edu Assist. Prof P80 ","html":"\u003ch2 id=\"presented\"\u003ePresented\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproject80/\"\u003eProject80\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"talks\"\u003eTalks\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ePerson\u003c/th\u003e\n\u003cth\u003eSociety\u003c/th\u003e\n\u003cth\u003eKeywords\u003c/th\u003e\n\u003cth\u003eEmail\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eChhavi Chauhuan, PhD, ELS\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhasip/\"\u003eASIP\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhai_ethics/\"\u003eAI Ethics\u003c/a\u003e, \u003ca href=\"\"\u003ePathology\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:cchauhan@asip.org\"\u003ecchauhan@asip.org\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eJ. Elliott Robinson, PhD, MD\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhasbmb/\"\u003eASBMB\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003eNF1\u003c/a\u003e, \u003ca href=\"/posts/kbhdopamine/\"\u003eDopamine\u003c/a\u003e, \u003ca href=\"/posts/kbhadhd/\"\u003eADHD\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:elliott.robinson@cchmc.org\"\u003eelliott.robinson@cchmc.org\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eJason Yi, PhD\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhasbmb/\"\u003eASBMB\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003eUBE3A\u003c/a\u003e, \u003ca href=\"\"\u003eRecklinghaus\u003c/a\u003e, \u003ca href=\"/posts/kbhdup15q/\"\u003eDup15q\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"https://www.jasonyilab.org/\"\u003edomain\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eErica Korb, PhD\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhasbmb/\"\u003eASBMB\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhautism/\"\u003eAutism\u003c/a\u003e, \u003ca href=\"/posts/kbhchromatin/\"\u003eChromatin\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:ekorb@pennmedicine.upenn.edu\"\u003eekorb@pennmedicine.upenn.edu\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eCatherine Wang\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhaaa/\"\u003eAAA\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003estudent approach to learning\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e???\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eMegan Fagalde, PhD Candidate\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhaaa/\"\u003eAAA\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhanatomy_learning/\"\u003eanatomy learning\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:mfagalde@iu.edu\"\u003emfagalde@iu.edu\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eMichelle A. Sveistrup\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhaaa/\"\u003eAAA\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003ehaptic abilities\u003c/a\u003e, \u003ca href=\"\"\u003eHAT\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:msveistr@uwo.ca\"\u003emsveistr@uwo.ca\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhaaa/\"\u003eAAA\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhanatomy_learning/\"\u003eanatomy learning\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eAlam Boyd\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhaaa/\"\u003eAAA\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003epartner vs. individual work\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eMagnus ???\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhaaa/\"\u003eAAA\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eOrna Issler\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhasbmb/\"\u003eASBMB\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003eIncRNA\u003c/a\u003e, \u003ca href=\"\"\u003eLINC00473\u003c/a\u003e, \u003ca href=\"\"\u003eFEDORA\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:orna.issler@mssm.edu\"\u003eorna.issler@mssm.edu\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eKaushik Ragunathan\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhasbmb/\"\u003eASBMB\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhepigenetics/#whimsical-adaptations\"\u003ewhimsical adaptations\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:ragunath@med.umich.edu\"\u003eragunath@med.umich.edu\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eTracy l. Bale\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhasbmb/\"\u003eASBMB\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003ei think like P80 scary\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:tbale@som.umaryland.edu\"\u003etbale@som.umaryland.edu\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eGregory Morton\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhaps/\"\u003eAPS\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhthermoregulation/\"\u003ethermoregulation\u003c/a\u003e, \u003ca href=\"\"\u003eglucose\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:gjmorton@uw.edu\"\u003egjmorton@uw.edu\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ePeter Turnbaugh\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhasbmb/\"\u003eASBMB\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003eFluoropyrimidine\u003c/a\u003e, \u003ca href=\"\"\u003ePreTA\u003c/a\u003e, \u003ca href=\"/posts/kbhdpyd/\"\u003eDPYD\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:peter.turnbaugh@ucsf.edu\"\u003epeter.turnbaugh@ucsf.edu\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eRalph DeBernandis\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhasbmb/\"\u003eASBMB\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003emetabolic alterations\u003c/a\u003e, \u003ca href=\"\"\u003eLIPT1\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"people-meeters\"\u003ePeople Meeters\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ePerson\u003c/th\u003e\n\u003cth\u003ePlace\u003c/th\u003e\n\u003cth\u003eEmail\u003c/th\u003e\n\u003cth\u003eJob\u003c/th\u003e\n\u003cth\u003eFollowup\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eJay Pieczynski\u003c/td\u003e\n\u003ctd\u003eRollins\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:jpieczynski@rollings.edu\"\u003ejpieczynski@rollings.edu\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003eAssist. Prof.\u003c/td\u003e\n\u003ctd\u003eP80, College Apps\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eSebastian Hernandez\u003c/td\u003e\n\u003ctd\u003eRollins\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:shernandez1@rollings.edu\"\u003eshernandez1@rollings.edu\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003eUndergrad\u003c/td\u003e\n\u003ctd\u003e\u0026quot;\u0026quot;\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eBryson Arnett\u003c/td\u003e\n\u003ctd\u003eU of Kentucky\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003ctd\u003eUndegrad\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eJennifer Pousont\u003c/td\u003e\n\u003ctd\u003ePingry\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eEric P. Chang\u003c/td\u003e\n\u003ctd\u003ePace U\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"mailto:echang@pace.edu\"\u003eechang@pace.edu\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003eAssist. Prof\u003c/td\u003e\n\u003ctd\u003eP80\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n","permalink":"https://www.jemoka.com/posts/kbheb_emails/","tags":["index"],"title":"EB2022 Index"},{"categories":null,"contents":"Slightly nontraditional Ted class, which is that it is in complete modular architecture: no large group lectures, work is done in 2-3 week sprints.\nFirst two days, we will be doing intro together. There are 12 modules, and you do 6. There will be core modules and branches.\nThere are 3 symposiums which the groups share out. This class is very hard; we are using a graduate school textbook. We will be sidestepping some depth: main idea is to show the big area.\nTracks 1 =\u0026gt; {2,4,5} 4 =\u0026gt; 5 7 =\u0026gt; 8 8 =\u0026gt; {11,12} 3 =\u0026gt; 6 6 =\u0026gt; 9 9 =\u0026gt; 10 Good to learn MatLab.\nLogistics Create a portfolio journal; supply one entry a week.\nIntroductory Reading How Did Economists Get It So Wrong?\n","html":"\u003cp\u003eSlightly nontraditional Ted class, which is that it is in complete modular architecture: no large group lectures, work is done in 2-3 week sprints.\u003c/p\u003e\n\u003cp\u003eFirst two days, we will be doing intro together. There are 12 modules, and you do 6. There will be core modules and branches.\u003c/p\u003e\n\u003cp\u003eThere are 3 symposiums which the groups share out. This class is very hard; we are using a graduate school textbook. We will be sidestepping some depth: main idea is to show the big area.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"tracks\"\u003eTracks\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e1 =\u0026gt; {2,4,5}\u003c/li\u003e\n\u003cli\u003e4 =\u0026gt; 5\u003c/li\u003e\n\u003cli\u003e7 =\u0026gt; 8\u003c/li\u003e\n\u003cli\u003e8 =\u0026gt; {11,12}\u003c/li\u003e\n\u003cli\u003e3 =\u0026gt; 6\u003c/li\u003e\n\u003cli\u003e6 =\u0026gt; 9\u003c/li\u003e\n\u003cli\u003e9 =\u0026gt; 10\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eGood to learn MatLab.\u003c/p\u003e\n\u003ch2 id=\"logistics\"\u003eLogistics\u003c/h2\u003e\n\u003cp\u003eCreate a portfolio journal; supply one entry a week.\u003c/p\u003e\n\u003ch2 id=\"introductory-reading\"\u003eIntroductory Reading\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhhow_did_economists_get_it_so_wrong/\"\u003eHow Did Economists Get It So Wrong?\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhecon320_architecture/","tags":null,"title":"ECON320 Architecture"},{"categories":null,"contents":"The economy of credit is an effect where credit is being traded liberally, and people are buying stocks on large margins and unable to pay back.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbheconomy_of_credit/\"\u003eeconomy of credit\u003c/a\u003e is an effect where \u003ca href=\"/posts/kbhcredit/\"\u003ecredit\u003c/a\u003e is being traded liberally, and people are buying stocks on large margins and unable to pay back.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbheconomy_of_credit/","tags":null,"title":"economy of credit"},{"categories":null,"contents":"Goal: search for a path (sequence of edits) from start to final string, whereby:\ninitial state is the word we are transforming operators: insert, delete, substitute goal state: the word we end up at path cost: cost of the path we are trying to minimize Sequence of all edits is huge! so DP.\nFor two strings, let\u0026rsquo;s define:\n\\(X\\) of length \\(n\\) \\(Y\\) of length \\(m\\) we define some \\(D(i,j)\\) as the edit distance between substring \\(X[1:i]\\) and \\(Y[1:j]\\).\nLet:\n\\(D(i,0) = i, \\forall i\\) \\(D(0,j) = j, \\forall j\\) for i in range(1,M): for j in range(1,N): # deletion: ignoring one char of previous string d1 = D(i-1,j) + 1 # (cost) # insertion: insertion into string before using rest of j d2 = D(i,j-1) + 1 # (cost) # keep same if char is same or substitute current d3 = D(i-1,j-1) + (0 if X[i] == Y[j] else 2) # cache D(i,j) = min(d1, d2, d3) ","html":"\u003cp\u003eGoal: search for a path (sequence of edits) from start to final string, whereby:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003einitial state\u003c/strong\u003e is the word we are transforming\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eoperators\u003c/strong\u003e: insert, delete, substitute\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003egoal state\u003c/strong\u003e: the word we end up at\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003epath cost\u003c/strong\u003e: cost of the path we are trying to minimize\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSequence of all edits is huge! so DP.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eFor two strings, let\u0026rsquo;s define:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X\\) of length \\(n\\)\u003c/li\u003e\n\u003cli\u003e\\(Y\\) of length \\(m\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewe define some \\(D(i,j)\\) as the edit distance between substring \\(X[1:i]\\) and \\(Y[1:j]\\).\u003c/p\u003e\n\u003cp\u003eLet:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(D(i,0) = i, \\forall i\\)\u003c/li\u003e\n\u003cli\u003e\\(D(0,j) = j, \\forall j\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eM\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# deletion: ignoring one char of previous string\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# (cost)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# insertion: insertion into string before using rest of j\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# (cost)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# keep same if char is same or substitute current\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# cache\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emin\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003chr\u003e\n","permalink":"https://www.jemoka.com/posts/kbhedit_distance_with_dp/","tags":null,"title":"edit distance with DP"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbheffability/","tags":null,"title":"effability"},{"categories":null,"contents":" Many Mexican-Americans worked as migratory laborers + outside programs Indian Reorganization Act of 1934 Woman were paied less Environmental cost of damns and public projects commentary on the effects of the New Deal Incorporating aspects of Arthur M. Schlesinger\u0026rsquo;s Appraisal of the New Deal, William E. Leuchtenburg\u0026rsquo;s Appraisal of the New Deal, Anthony Badger\u0026rsquo;s Appraisal of the New Deal.\nThrough the analysis of the New Deal programs, what was particularly salient was Anthony Badger\u0026rsquo;s framing of the event as not one that is ultimately \u0026ldquo;successful\u0026rdquo; or \u0026ldquo;failed\u0026rdquo; but instead one which focuses on its long-term effects in context with the future policies. The equivocal labeling allows nuance that places the Deal properly in its historical content. According to Badger, helping the poor, a significant policy goal of the deals, were left as \u0026ldquo;unfinished business\u0026rdquo; when going to war. This idea contrasts with William E. Leuchtenburg\u0026rsquo;s framing of the same event\u0026mdash;that it was never the true intention of the deal to assist in subsidies on a humane level, but that which supported the economy and incidentally those that reaped benefits on it.\nThis new frame is much more useful when analyzing the deal. In fact, Leuchtenburg took this a step further and claimed that the New Deal didn\u0026rsquo;t work largely because it was impossible for it to have repaired the damage by the Hoover administration. Furthermore, according to Schlesinger, programs like the NRA were created with already the clear assumption that there were not enough policy tools in place to actually achieve it to the fullest extent. Under this mind frame, then, it is not difficult to see the New Deal as one that intentionally brought a failing US economy\u0026mdash;and those participating in it\u0026mdash;to full swing whilst ignoring those that didn\u0026rsquo;t have an economic influence. It was, therefore, never about helping \u0026ldquo;people\u0026rdquo;: it is a policy and economic tool like any other.\nThrough this somewhat revisionist view, it is much easier to place into perspective New Deal\u0026rsquo;s zealot focus on young men, strange deficiency in some areas, and central focus on infrastructure. In that regard, the New Deal worked very well to bring a failing economy back to a semblance of normalcy for the privileged few.\n","html":"\u003cul\u003e\n\u003cli\u003eMany Mexican-Americans worked as migratory laborers + outside programs\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"\"\u003eIndian Reorganization Act of 1934\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eWoman were paied less\u003c/li\u003e\n\u003cli\u003eEnvironmental cost of damns and public projects\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"commentary-on-the-effects-of-the-new-deal\"\u003ecommentary on the effects of the New Deal\u003c/h2\u003e\n\u003cp\u003eIncorporating aspects of \u003ca href=\"\"\u003eArthur M. Schlesinger\u0026rsquo;s Appraisal of the New Deal\u003c/a\u003e, \u003ca href=\"\"\u003eWilliam E. Leuchtenburg\u0026rsquo;s Appraisal of the New Deal\u003c/a\u003e, \u003ca href=\"\"\u003eAnthony Badger\u0026rsquo;s Appraisal of the New Deal.\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eThrough the analysis of the New Deal programs, what was particularly salient was Anthony Badger\u0026rsquo;s framing of the event as not one that is ultimately \u0026ldquo;successful\u0026rdquo; or \u0026ldquo;failed\u0026rdquo; but instead one which focuses on its long-term effects \u003cem\u003ein context\u003c/em\u003e with the future policies. The equivocal labeling allows nuance that places the Deal properly in its historical content. According to Badger, helping the poor, a significant policy goal of the deals, were left as \u0026ldquo;unfinished business\u0026rdquo; when going to war. This idea contrasts with William E. Leuchtenburg\u0026rsquo;s framing of the same event\u0026mdash;that it was never the true intention of the deal to assist in subsidies on a humane level, but that which supported the economy and incidentally those that reaped benefits on it.\u003c/p\u003e\n\u003cp\u003eThis new frame is much more useful when analyzing the deal. In fact, Leuchtenburg took this a step further and claimed that the New Deal didn\u0026rsquo;t work largely because it was impossible for it to have repaired the damage by the Hoover administration. Furthermore, according to Schlesinger, programs like the NRA were created with already the clear assumption that there were not enough policy tools in place to actually achieve it to the fullest extent. Under this mind frame, then, it is not difficult to see the New Deal as one that intentionally brought a failing US economy\u0026mdash;and those participating in it\u0026mdash;to full swing whilst ignoring those that didn\u0026rsquo;t have an economic influence. It was, therefore, never about helping \u0026ldquo;people\u0026rdquo;: it is a policy and economic tool like any other.\u003c/p\u003e\n\u003cp\u003eThrough this somewhat revisionist view, it is much easier to place into perspective New Deal\u0026rsquo;s zealot focus on young men, strange deficiency in some areas, and central focus on infrastructure. In that regard, the New Deal worked very well to bring a failing economy back to a semblance of normalcy for the privileged few.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbheffects_of_the_new_deal/","tags":null,"title":"effects of the New Deal"},{"categories":null,"contents":"For an ODE, eigensolutions of some expression \\(x\u0026rsquo;=Ax\\) consists of the class of solutions which remains in a line through the origin, which consists of the family which:\n\\begin{equation} x(t) = ke^{\\lambda t} v \\end{equation}\nwhere \\(\\lambda\\) is an eigenvalue of \\(A\\), and \\(v\\) its corresponding eigenvector.\nmotivation \\begin{equation} y\u0026rsquo; = F(y) \\end{equation}\nan autonomous ODE, suppose we have some solution \\(y=a\\) for which \\(y\u0026rsquo; = 0\\), that is, \\(F(a) = 0\\), we know that the system will be trapped there.\nNear such a stationary solution \\(a\\), we can use a Taylor expansion to linearize:\n\\begin{equation} F(a+b) = F(a) + Jac(a)x + \\dots \\end{equation}\nThe first term, we are given, is \\(0\\). The second term indicates that our derivative near the stationary point seems to be some matrix \\(A\\) of \\(a\\).\nsketching solutions along eigenlines For eigenlines, we can observe the sign of the eigenline to see how it behaves, and\u0026mdash;in conjuction\u0026mdash;how other solutions behave. In particular, in the x1, x2 plane for two orders, the solutions are tangent to the eigensolutions.\nWith an negative eigenvalue, the eigensolution arrows will point towards the origin, whereas with positive eigenvalues the eigensolutions will point away.\nsaddle case: if \\(\\lambda_{1}\\) and \\(\\lambda_{2}\\) have opposite signs, then the paths look like half-parabolas matching the eigensolutions; it will approach the larger eigenvalue more rapidly node/source/sink case: if \\(\\lambda_{1}\\) and \\(\\lambda_{2}\\) have the same sign, then the solutions look like half-parabolas tangent only to the eigenline which has a smaller \\(\\lambda\\) \u0026mdash; in this case, if the eigenvalues happens to be both negative you can work things out for \\(-A\\) and then flip the paths on all the lines\u0026mdash;at smaller values of \\(t\\) (specifically \\(t\u0026lt;1\\)), the curve tends closer to the \\(\\lambda\\) with the smaller eigenvalue (because raising the power to a larger number actually makes the power smaller); at \\(t\u0026gt;1\\), the curve moves towards that of the bigger eigenvalue complex/spiral case: in this case, we can write some answer with Euler\u0026rsquo;s Equation to get two real solutions in trig \\(P(t) + iQ(t)\\), where each \\(P,Q\\) is some function is cos plus sin times \\(ae^{t}\\). Therefore, it will be a spiral outwards Flipping You will note that, in all of these cases \\(x=0\\) is a stationary solution, as \\(A0 = 0\\). As \\(t \\to -\\infty\\), we end up kissing the side with the smaller eigenvalue, and as \\(t \\to +\\infty\\), we end up going towards the side with the bigger eigenvalue.\nNonlinear Non-linear systems can be figured by the motivation above, linearizing into groups, and figuring out each local area.\nFlipping This is because:\n\\begin{equation} (-A)v = -(Av) = (-\\lambda) v \\end{equation}\nmeaning the directions of eigenvectors don\u0026rsquo;t change while their corresponding eigenvalues change. If we define some \\(y(t) = x(-t)\\), where \\(Ax = x\u0026rsquo;\\), we can work out that \\(y\u0026rsquo;(t) = -Ay(t)\\), meaning that \\(y\u0026rsquo;\\)\u0026rsquo;s graphs are just flipped versions of \\(x\\)\u0026rsquo;s graphs.\nHence we can just flip everything.\nDrawing By tracing those patterns, you can draw other solutions over time:\n","html":"\u003cp\u003eFor an ODE, \u003ca href=\"/posts/kbheigensolutions/\"\u003eeigensolutions\u003c/a\u003e of some expression \\(x\u0026rsquo;=Ax\\) consists of the class of solutions which remains in a line through the origin, which consists of the family which:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = ke^{\\lambda t} v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\lambda\\) is an eigenvalue of \\(A\\), and \\(v\\) its corresponding \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"motivation\"\u003emotivation\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = F(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ean \u003ca href=\"/posts/kbhautonomous_odes/\"\u003eautonomous ODE\u003c/a\u003e, suppose we have some solution \\(y=a\\) for which \\(y\u0026rsquo; = 0\\), that is, \\(F(a) = 0\\), we know that the system will be trapped there.\u003c/p\u003e\n\u003cp\u003eNear such a stationary solution \\(a\\), we can use a Taylor expansion to linearize:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nF(a+b) = F(a) + Jac(a)x + \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe first term, we are given, is \\(0\\). The second term indicates that our derivative near the stationary point seems to be some matrix \\(A\\) of \\(a\\).\u003c/p\u003e\n\u003ch2 id=\"sketching-solutions-along-eigenlines\"\u003esketching solutions along eigenlines\u003c/h2\u003e\n\u003cp\u003eFor eigenlines, we can observe the sign of the eigenline to see how it behaves, and\u0026mdash;in conjuction\u0026mdash;how other solutions behave. In particular, in the x1, x2 plane for two orders, the solutions are tangent to the eigensolutions.\u003c/p\u003e\n\u003cp\u003eWith an negative eigenvalue, the eigensolution arrows will point towards the origin, whereas with positive eigenvalues the eigensolutions will point away.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003esaddle case\u003c/strong\u003e: if \\(\\lambda_{1}\\) and \\(\\lambda_{2}\\) have opposite signs, then the paths look like half-parabolas matching the eigensolutions; it will approach the larger eigenvalue more rapidly\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003enode/source/sink case\u003c/strong\u003e: if \\(\\lambda_{1}\\) and \\(\\lambda_{2}\\) have the same sign, then the solutions look like half-parabolas tangent only to the eigenline which has a smaller \\(\\lambda\\) \u0026mdash; in this case, if the eigenvalues happens to be both negative you can work things out for \\(-A\\) and then flip the paths on all the lines\u0026mdash;at smaller values of \\(t\\) (specifically \\(t\u0026lt;1\\)), the curve tends closer to the \\(\\lambda\\) with the smaller eigenvalue (because raising the power to a larger number actually makes the power smaller); at \\(t\u0026gt;1\\), the curve moves towards that of the bigger eigenvalue\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ecomplex/spiral case\u003c/strong\u003e: in this case, we can write some answer with \u003ca href=\"/posts/kbheuler_s_equation/\"\u003eEuler\u0026rsquo;s Equation\u003c/a\u003e to get two real solutions in trig \\(P(t) + iQ(t)\\), where each \\(P,Q\\) is some function is cos plus sin times \\(ae^{t}\\). Therefore, it will be a spiral outwards\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"flipping\"\u003eFlipping\u003c/h3\u003e\n\u003cp\u003eYou will note that, in all of these cases \\(x=0\\) is a stationary solution, as \\(A0 = 0\\). As \\(t \\to -\\infty\\), we end up kissing the side with the smaller eigenvalue, and as \\(t \\to +\\infty\\), we end up going towards the side with the bigger eigenvalue.\u003c/p\u003e\n\u003ch3 id=\"nonlinear\"\u003eNonlinear\u003c/h3\u003e\n\u003cp\u003eNon-linear systems can be figured by the motivation above, linearizing into groups, and figuring out each local area.\u003c/p\u003e\n\u003ch2 id=\"flipping\"\u003eFlipping\u003c/h2\u003e\n\u003cp\u003eThis is because:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(-A)v = -(Av) = (-\\lambda) v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning the directions of eigenvectors don\u0026rsquo;t change while their corresponding eigenvalues change. If we define some \\(y(t) = x(-t)\\), where \\(Ax = x\u0026rsquo;\\), we can work out that \\(y\u0026rsquo;(t) = -Ay(t)\\), meaning that \\(y\u0026rsquo;\\)\u0026rsquo;s graphs are just flipped versions of \\(x\\)\u0026rsquo;s graphs.\u003c/p\u003e\n\u003cp\u003eHence we can just flip everything.\u003c/p\u003e\n\u003ch2 id=\"drawing\"\u003eDrawing\u003c/h2\u003e\n\u003cp\u003eBy tracing those patterns, you can draw other solutions over time:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-27_15-13-27_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbheigensolutions/","tags":null,"title":"eigensolutions"},{"categories":null,"contents":"The eigenspace of \\(T, \\lambda\\) is the set of all eigenvectors of \\(T\\) corresponding to \\(\\lambda\\), plus the \\(0\\) vector.\nconstituents \\(T \\in \\mathcal{L}(V)\\) \\(\\lambda \\in \\mathbb{F}\\), an eigenvalue of \\(T\\) requirements \\begin{equation} E(\\lambda, T) = \\text{null}\\ (T - \\lambda I) \\end{equation}\ni.e. all vectors such that \\((T- \\lambda I) v = 0\\).\nwhere, \\(E\\) is an eigenspace of \\(T\\).\nadditional information sum of eigenspaces is a direct sum \\(E(\\lambda_{1}, T) + \u0026hellip; + E(\\lambda_{m}, T)\\) is a direct sum.\nSee eigenspaces are disjoint.\ndimension of sum of eigenspaces is smaller than or equal to the dimension of the whole space A correlate of the above is that:\n\\begin{equation} \\dim E(\\lambda_{1}, T) + \u0026hellip; + \\dim E(\\lambda_{m}, T) \\leq \\dim V \\end{equation}\nProof:\nRecall that:\n\\begin{equation} \\dim E(\\lambda_{1}, T) + \u0026hellip; + \\dim E(\\lambda_{m}, T) = \\dim (E(\\lambda_{1}, T) \\oplus \u0026hellip; \\oplus E(\\lambda_{m}, T) ) \\end{equation}\nbecause \\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\).\nNow, the sum of subspaces is the smallest subspace, so \\(\\dim (E(\\lambda_{1}, T) \\oplus \u0026hellip; \\oplus E(\\lambda_{m}, T) ) \\leq \\dim V\\).\nAnd hence:\n\\begin{equation} \\dim E(\\lambda_{1}, T) + \u0026hellip; + \\dim E(\\lambda_{m}, T) \\leq \\dim V \\end{equation}\nas desired. \\(\\blacksquare\\)\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e of \\(T, \\lambda\\) is the set of all \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es of \\(T\\) corresponding to \\(\\lambda\\), plus the \\(0\\) vector.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(T \\in \\mathcal{L}(V)\\)\u003c/li\u003e\n\u003cli\u003e\\(\\lambda \\in \\mathbb{F}\\), an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nE(\\lambda, T) = \\text{null}\\ (T - \\lambda I)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ei.e. all vectors such that \\((T- \\lambda I) v = 0\\).\u003c/p\u003e\n\u003cp\u003ewhere, \\(E\\) is an \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e of \\(T\\).\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"sum-of-eigenspaces-is-a-direct-sum\"\u003esum of eigenspaces is a direct sum\u003c/h3\u003e\n\u003cp\u003e\\(E(\\lambda_{1}, T) + \u0026hellip; + E(\\lambda_{m}, T)\\) is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbheigenvalue/#eigenspaces-are-disjoint\"\u003eeigenspaces are disjoint\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"dimension-of-sum-of-eigenspaces-is-smaller-than-or-equal-to-the-dimension-of-the-whole-space\"\u003edimension of sum of eigenspaces is smaller than or equal to the dimension of the whole space\u003c/h3\u003e\n\u003cp\u003eA correlate of the above is that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim E(\\lambda_{1}, T) + \u0026hellip; + \\dim E(\\lambda_{m}, T) \\leq \\dim V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim E(\\lambda_{1}, T) + \u0026hellip; + \\dim E(\\lambda_{m}, T) = \\dim (E(\\lambda_{1}, T) \\oplus \u0026hellip; \\oplus E(\\lambda_{m}, T) )\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause \u003ca href=\"/posts/kbhproduct_summation_map/#u-1-plus-dots-plus-u-m-is-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-iff-dim--u-1-plus-dots-plus-u-m--dim-u-1-plus-dots-plus-dim-u-m\"\u003e\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\)\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eNow, the \u003ca href=\"/posts/kbhsum_of_subsets/#sum-of-subspaces-is-the-smallest-subspace-with-both-subspaces\"\u003esum of subspaces is the smallest subspace\u003c/a\u003e, so \\(\\dim (E(\\lambda_{1}, T) \\oplus \u0026hellip; \\oplus E(\\lambda_{m}, T) ) \\leq \\dim V\\).\u003c/p\u003e\n\u003cp\u003eAnd hence:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim E(\\lambda_{1}, T) + \u0026hellip; + \\dim E(\\lambda_{m}, T) \\leq \\dim V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas desired. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbheigenspace/","tags":null,"title":"eigenspace"},{"categories":null,"contents":"see Extended Kalman Filter\n","html":"\u003cp\u003esee \u003ca href=\"/posts/kbhfilters/#extended\"\u003eExtended Kalman Filter\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhekf/","tags":null,"title":"EKF"},{"categories":null,"contents":"The Elastic Modulus is a measurement of how much deformation takes place given some force on the system. Formally, it is the slope of the stress-strain curve, defined by:\n\\begin{equation} E = \\frac{stress}{strain} \\end{equation}\nThe units in pascals as it is: force per area (pascals) divided by deformation (dimensionless, as it is a fraction of old shape over new shape \\(\\frac{V}{V}=1\\)).\nDepending on how its measured, it is called different things:\nYoung\u0026rsquo;s Modulus: tensile elasticity\u0026mdash;tendency for object to deform along an axis with force applied (usually that is just called the Elastic Modulus) Shear\u0026rsquo;s Modulus: shear elasticity\u0026mdash;tendency of an object to shear (deform in shape with the constant volume) with force applied Bulk Modulus: volumetric elasticity\u0026mdash;tendency for an object to deform in all directions when uniformly loaded ","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhelastic_modulus/\"\u003eElastic Modulus\u003c/a\u003e is a measurement of how much deformation takes place given some force on the system. Formally, it is the slope of the \u003ca href=\"/posts/kbhstress/\"\u003estress\u003c/a\u003e-\u003ca href=\"/posts/kbhstrain/\"\u003estrain\u003c/a\u003e curve, defined by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE = \\frac{stress}{strain}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe units in pascals as it is: force per area (pascals) divided by deformation (dimensionless, as it is a fraction of old shape over new shape \\(\\frac{V}{V}=1\\)).\u003c/p\u003e\n\u003cp\u003eDepending on how its measured, it is called different things:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhyoung_s_modulus/\"\u003eYoung\u0026rsquo;s Modulus\u003c/a\u003e: tensile elasticity\u0026mdash;tendency for object to deform along an axis with force applied (usually that \u003cem\u003eis\u003c/em\u003e just called the \u003ca href=\"/posts/kbhelastic_modulus/\"\u003eElastic Modulus\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003eShear\u0026rsquo;s Modulus: shear elasticity\u0026mdash;tendency of an object to shear (deform in shape with the constant volume) with force applied\u003c/li\u003e\n\u003cli\u003eBulk Modulus: volumetric elasticity\u0026mdash;tendency for an object to deform in all directions when uniformly loaded\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhelastic_modulus/","tags":null,"title":"Elastic Modulus"},{"categories":null,"contents":"Eleanor Roosevelt is the first lady of the US.\nCreated minimum wage Wrote a weekly column named My Day, in 135 newspapers 2x a week broadcast ","html":"\u003cp\u003e\u003ca href=\"/posts/kbheleanor_roosevelt/\"\u003eEleanor Roosevelt\u003c/a\u003e is the first lady of the US.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eCreated \u003ca href=\"/posts/kbhminimum_wage/\"\u003eminimum wage\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eWrote a weekly column named \u003ca href=\"/posts/kbhmy_day/\"\u003eMy Day\u003c/a\u003e, in 135 newspapers\u003c/li\u003e\n\u003cli\u003e2x a week broadcast\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbheleanor_roosevelt/","tags":null,"title":"Eleanor Roosevelt"},{"categories":null,"contents":"Though Coulomb\u0026rsquo;s Law allow us to calculate the force between any two individual charges, one can note that most of it is independent of the second test charge. In fact, each charge emits a field around itself of the shape:\n\\begin{equation} \\vec{E( r)} = k \\frac{q}{r^{2}} = \\frac{1}{4\\pi \\epsilon_{0}} \\frac{q}{r^{2}} \\end{equation}\nOr, you can think of it as moving a test charge \\(q\\) around the charge of interest, then calculating:\n\\begin{equation} \\vec{E} = \\frac{\\vec{F_{e}}}{q} \\end{equation}\nAs you can see, if the source charge were to be positive, you have a positive \\(E\\) which will point away from the charge, and negative charge \\(E\\) will point towards the charge.\nOne institution here that the above statement provides is that electric fields are drawn positive to negative; so, if you placed a positive test charge in the field, it will experience a force tangent to and in the same direction that traced out by the field lines. If you have a negative test change, then it will experience a force in the opposite direction.\nadditional information tracing electric field lines By tracing out electric fields and observing the lines\u0026rsquo; density, we can make a guess about how long the fields are.\ncomposing electric fields Unsurprisingly, superposition matters here as well:\nIf your system has multiple charges, then \\(E_{total} = E_{q_1} + E_{q_2}\\). No surprise here.\n","html":"\u003cp\u003eThough \u003ca href=\"/posts/kbhcoulomb_s_law/\"\u003eCoulomb\u0026rsquo;s Law\u003c/a\u003e allow us to calculate the force between any two individual charges, one can note that most of it is independent of the second test charge. In fact, each charge emits a field around itself of the shape:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{E( r)} = k \\frac{q}{r^{2}} = \\frac{1}{4\\pi \\epsilon_{0}} \\frac{q}{r^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOr, you can think of it as moving a test charge \\(q\\) around the charge of interest, then calculating:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{E} = \\frac{\\vec{F_{e}}}{q}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAs you can see, if the source charge were to be positive, you have a \u003cstrong\u003epositive\u003c/strong\u003e \\(E\\) which will point \u003cem\u003eaway\u003c/em\u003e from the charge, and \u003cstrong\u003enegative\u003c/strong\u003e charge \\(E\\) will point \u003cem\u003etowards\u003c/em\u003e the charge.\u003c/p\u003e\n\u003cp\u003eOne institution here that the above statement provides is that \u003cstrong\u003eelectric fields are drawn positive to negative\u003c/strong\u003e; so, if you placed a positive test charge in the field, it will experience a force tangent to and in the same direction that traced out by the field lines. If you have a negative test change, then it will experience a force in the opposite direction.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"tracing-electric-field-lines\"\u003etracing electric field lines\u003c/h3\u003e\n\u003cp\u003eBy tracing out electric fields and observing the lines\u0026rsquo; density, we can make a guess about how long the fields are.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-02-12_15-10-20_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"composing-electric-fields\"\u003ecomposing electric fields\u003c/h3\u003e\n\u003cp\u003eUnsurprisingly, \u003ca href=\"/posts/kbhcoulomb_s_law/#superposition\"\u003esuperposition\u003c/a\u003e matters here as well:\u003c/p\u003e\n\u003cp\u003eIf your system has multiple charges, then \\(E_{total} = E_{q_1} + E_{q_2}\\). No surprise here.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhelectric_field/","tags":null,"title":"electric field"},{"categories":null,"contents":"electric potential is analogous to gravitational potential energy, but with electrostatics!\n\\begin{equation} P_{E} = qV \\end{equation}\nwhere \\(q\\) is the change on the particle in question, and \\(V\\) is the voltage, the difference in electric potential between two places.\nYes, voltage is defined vis a vi electric potential: that is, it represents a differential of electric potential.\nadditional information electric potential is analogous to gravitational potential Let \\(A, B, C\\) be positrons, and the lines are the electric field. Which one has the highest electric potential? \\(A\\), because it has the most distance to travel to until it can get all the way to the right.\nconnecting electric potential and electric field parallel plates \\begin{equation} E = \\frac{V}{d} \\end{equation}\nwhere \\(d\\) is the distance between the plates, \\(E\\) the uniform electric field between the plates.\nThe amount of charge on each plate is described by:\n\\begin{equation} Q = CV \\end{equation}\nwhere, \\(C\\) is the capacitance of each plate, and \\(V\\) the voltage across the plates.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhelectric_potential_energy/\"\u003eelectric potential\u003c/a\u003e is analogous to \u003ca href=\"/posts/kbhgravitational_potential_energy/\"\u003egravitational potential energy\u003c/a\u003e, but with electrostatics!\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP_{E} = qV\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(q\\) is the change on the particle in question, and \\(V\\) is the \u003ca href=\"/posts/kbhvoltage/\"\u003evoltage\u003c/a\u003e, the difference in \u003ca href=\"/posts/kbhelectric_potential_energy/\"\u003eelectric potential\u003c/a\u003e between two places.\u003c/p\u003e\n\u003cp\u003eYes, \u003ca href=\"/posts/kbhvoltage/\"\u003evoltage\u003c/a\u003e is defined vis a vi \u003ca href=\"/posts/kbhelectric_potential_energy/\"\u003eelectric potential\u003c/a\u003e: that is, it represents a \u003cem\u003edifferential\u003c/em\u003e of \u003ca href=\"/posts/kbhelectric_potential_energy/\"\u003eelectric potential\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"electric-potential-is-analogous-to-gravitational-potential\"\u003eelectric potential is analogous to gravitational potential\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-03-05_20-19-25_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eLet \\(A, B, C\\) be positrons, and the lines are the \u003ca href=\"/posts/kbhelectric_field/\"\u003eelectric field\u003c/a\u003e. Which one has the highest electric potential? \\(A\\), because it has the most distance to travel to until it can get all the way to the right.\u003c/p\u003e\n\u003ch3 id=\"connecting-electric-potential--kbhelectric-potential-energy-dot-md--and-electric-field--kbhelectric-field-dot-md\"\u003econnecting \u003ca href=\"/posts/kbhelectric_potential_energy/\"\u003eelectric potential\u003c/a\u003e and \u003ca href=\"/posts/kbhelectric_field/\"\u003eelectric field\u003c/a\u003e\u003c/h3\u003e\n\u003ch4 id=\"parallel-plates\"\u003eparallel plates\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-03-18_13-40-07_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\\begin{equation}\nE = \\frac{V}{d}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(d\\) is the distance between the plates, \\(E\\) the uniform \u003ca href=\"/posts/kbhelectric_field/\"\u003eelectric field\u003c/a\u003e between the plates.\u003c/p\u003e\n\u003cp\u003eThe amount of \u003ca href=\"/posts/kbhcharged/\"\u003echarge\u003c/a\u003e on each plate is described by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ = CV\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(C\\) is the \u003ca href=\"/posts/kbhcapacitance/\"\u003ecapacitance\u003c/a\u003e of each plate, and \\(V\\) the \u003ca href=\"/posts/kbhvoltage/\"\u003evoltage\u003c/a\u003e across the plates.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhelectric_potential_energy/","tags":null,"title":"electric potential energy"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhelectron/","tags":null,"title":"electron"},{"categories":null,"contents":"Elie Adam is a research scientist in brain dynamics and neuro-science at MIT.\nMathematical Systems Systemic influences with various pieces Hopfield Networks Mouse Video Games Derivatives are essentially a high pass filter\nMethods of neuro imaging calcium channel florescence Electrode measurements Optogenetics primary methods analyzing monke with neuro-imaging methods above creating in silico models based on those responses, in a large systems of differential equations play with those equations to figure possible novel responses try them on monke ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhelie_adam/\"\u003eElie Adam\u003c/a\u003e is a research scientist in brain dynamics and neuro-science at MIT.\u003c/p\u003e\n\u003ch2 id=\"mathematical-systems\"\u003eMathematical Systems\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eSystemic influences with various pieces\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhopfield_networks/\"\u003eHopfield Networks\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"mouse-video-games\"\u003eMouse Video Games\u003c/h2\u003e\n\u003cp\u003eDerivatives are essentially a high pass filter\u003c/p\u003e\n\u003ch2 id=\"methods-of-neuro-imaging\"\u003eMethods of neuro imaging\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecalcium channel florescence\u003c/li\u003e\n\u003cli\u003eElectrode measurements\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoptogenetics/\"\u003eOptogenetics\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"primary-methods\"\u003eprimary methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eanalyzing monke with neuro-imaging methods above\u003c/li\u003e\n\u003cli\u003ecreating \u003cem\u003ein silico\u003c/em\u003e models based on those responses, in a large systems of differential equations\u003c/li\u003e\n\u003cli\u003eplay with those equations to figure possible novel responses\u003c/li\u003e\n\u003cli\u003etry them on monke\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhelie_adam/","tags":null,"title":"Elie Adam"},{"categories":null,"contents":"Wizenbaum (1966)\nworks pattern-action rules by rephrasing user\u0026rsquo;s questions\n\u0026ldquo;You hate me\u0026rdquo; =\u0026gt; \u0026ldquo;what makes you think I hate you\u0026rdquo;\nRogerian psycotherapy: assume no real-world knowledge; simply draws out patient\u0026rsquo;s statements\nI need X =\u0026gt; what would it mean to you if you got X.\nuses regex\ncapture specific adjectives, \u0026ldquo;all\u0026rdquo;, \u0026ldquo;always\u0026rdquo;, etc. and responds accordingly\nEliza Rules patterns are organized by keywords: a keyword has a pattern and a list of transforms:\ne.g.:\nkeyword: you\n(0 you 0 me) (what makes you think I 3 you) (why do you think I 3 you) Keywords are ranked from specific to general: each keyword has a rank, where most specific keywords words are most highly ranked, and then expansions are picked with the one with the highest keyword rank.\n\u0026ldquo;my transform\u0026rdquo; whenever the keyword \u0026ldquo;my\u0026rdquo; is used, we will pop a transformerd utterance onto the memory list (\u0026ldquo;earlier you said your\u0026rdquo;\u0026hellip;) in a FIFO queue.\nlater if we don\u0026rsquo;t know what to say we just pop something off\nethical implications people maybe mislead by computer understanding face to face interaction is vital people develop specific relationships with artifacts: such as a diary value sensitive design: consider benifits, harms, etc. ","html":"\u003cp\u003eWizenbaum (1966)\u003c/p\u003e\n\u003cp\u003eworks pattern-action rules by rephrasing user\u0026rsquo;s questions\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;You hate me\u0026rdquo; =\u0026gt; \u0026ldquo;what makes you think I hate you\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eRogerian psycotherapy: \u003cstrong\u003eassume no real-world knowledge\u003c/strong\u003e; simply draws out patient\u0026rsquo;s statements\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eI need X =\u0026gt; what would it mean to you if you got X.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-12_09-37-07_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003euses \u003ca href=\"/posts/kbhregex/\"\u003eregex\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003ecapture specific adjectives, \u0026ldquo;all\u0026rdquo;, \u0026ldquo;always\u0026rdquo;, etc. and responds accordingly\u003c/p\u003e\n\u003ch2 id=\"eliza-rules\"\u003eEliza Rules\u003c/h2\u003e\n\u003cp\u003epatterns are organized by \u003cstrong\u003ekeywords\u003c/strong\u003e: a keyword has a pattern and a list of transforms:\u003c/p\u003e\n\u003cp\u003ee.g.:\u003c/p\u003e\n\u003cp\u003ekeyword: \u003cstrong\u003eyou\u003c/strong\u003e\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-lisp\" data-lang=\"lisp\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eyou\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eme\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewhat\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emakes\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eyou\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ethink\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eyou\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewhy\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edo\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eyou\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ethink\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eyou\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eKeywords are ranked from specific to general: each keyword has a rank, where most specific keywords words are most highly ranked, and then expansions are picked with the one with the highest keyword rank.\u003c/p\u003e\n\u003ch2 id=\"my-transform\"\u003e\u0026ldquo;my transform\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003ewhenever the keyword \u0026ldquo;my\u0026rdquo; is used, we will pop a transformerd utterance onto the memory list (\u0026ldquo;earlier you said your\u0026rdquo;\u0026hellip;) in a FIFO queue.\u003c/p\u003e\n\u003cp\u003elater if we don\u0026rsquo;t know what to say we just pop something off\u003c/p\u003e\n\u003ch2 id=\"ethical-implications\"\u003eethical implications\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003epeople maybe mislead by computer understanding\u003c/li\u003e\n\u003cli\u003eface to face interaction is vital\u003c/li\u003e\n\u003cli\u003epeople develop specific relationships with artifacts: such as a diary\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003evalue sensitive design\u003c/strong\u003e: consider benifits, harms, etc.\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbheliza/","tags":null,"title":"ELIZA"},{"categories":null,"contents":"A civil rights movement organizer that founded SNICK.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhcivil_rights/\"\u003ecivil rights movement\u003c/a\u003e organizer that founded \u003ca href=\"/posts/kbhcivil_rights/#snick\"\u003eSNICK\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhella_baker/","tags":null,"title":"Ella Baker"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhelo_ratings/","tags":null,"title":"Elo Ratings"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhempty_binding_site/","tags":null,"title":"empty binding site"},{"categories":null,"contents":"Your brain maintaing a stable level of energy. Closely related to glucose homeostatis.\nmethods to achive energy homeostasis by the CNS regulation of the brain AgRP signaling is activated to stimulate food intake when hypoglycemic. ","html":"\u003cp\u003eYour brain maintaing a stable level of energy. Closely related to \u003ca href=\"\"\u003eglucose homeostatis\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"methods-to-achive-energy-homeostasis--kbhenergy-homeostasis-dot-md--by-the-cns-regulation--kbhcns-regulation-dot-md--of-the-brain\"\u003emethods to achive \u003ca href=\"/posts/kbhenergy_homeostasis/\"\u003eenergy homeostasis\u003c/a\u003e by the \u003ca href=\"/posts/kbhcns_regulation/\"\u003eCNS regulation\u003c/a\u003e of the brain\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhagrp/\"\u003eAgRP\u003c/a\u003e signaling is activated to stimulate food intake when hypoglycemic.\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhenergy_homeostasis/","tags":null,"title":"energy homeostasis"},{"categories":null,"contents":"english is a great language.\nFor Orthello:\nPronouns: 23 \u0026ldquo;Moor:\u0026rdquo;: 18 Orthello: 7\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhenglish/\"\u003eenglish\u003c/a\u003e is a great language.\u003c/p\u003e\n\u003cp\u003eFor Orthello:\u003c/p\u003e\n\u003cp\u003ePronouns: 23\n\u0026ldquo;Moor:\u0026rdquo;: 18\nOrthello: 7\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhenglish/","tags":null,"title":"english"},{"categories":null,"contents":"motivating entanglement file:///Users/houliu/Documents/School Work/The Bible/Quantum/Leonard Susskind, Art Friedman - Quantum Mechanics_ The Theoretical Minimum-Basic Books (2014).pdf\nTake two actors, Alice \\(A\\) and Bob \\(B\\). They each have a space \\(S_A\\) and \\(S_B\\). What if, for instance, we want to create a composite system out of Alice and Bob?\nWe will define elements in the Alice space as being defined by bases \\(H\\) and \\(T\\), where each element \\(a \\in S_a\\) is defined as:\n\\begin{equation} \\alpha_H | H \\big\\} + \\alpha_T | T \\big\\} \\end{equation}\nWhy the weird kets? We will use different kets to be aware of where bases came from; as in, elements in Alicespace is not elements in Bobspace.\nLet\u0026rsquo;s take Bobspace to be a higher dimension, as in, using normal ket vectors:\n\\begin{align} |1\\big\u0026gt; \\\\ |2\\big\u0026gt; \\\\ |3\\big\u0026gt; \\\\ \\cdots \\\\ |6\\big\u0026gt; \\end{align}\n","html":"\u003ch2 id=\"motivating-entanglement--kbhentangled-dot-md\"\u003emotivating \u003ca href=\"/posts/kbhentangled/\"\u003eentanglement\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003efile:///Users/houliu/Documents/School Work/The Bible/Quantum/Leonard Susskind, Art Friedman - Quantum Mechanics_ The Theoretical Minimum-Basic Books (2014).pdf\u003c/p\u003e\n\u003cp\u003eTake two actors, Alice \\(A\\) and Bob \\(B\\). They each have a space \\(S_A\\) and \\(S_B\\). What if, for instance, we want to create a \u003ca href=\"/posts/kbhcomposite_system/\"\u003ecomposite system\u003c/a\u003e out of Alice and Bob?\u003c/p\u003e\n\u003cp\u003eWe will define elements in the Alice space as being defined by bases \\(H\\) and \\(T\\), where each element \\(a \\in S_a\\) is defined as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha_H | H \\big\\} + \\alpha_T | T \\big\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhy the weird kets? We will use different kets to be aware of where bases came from; as in, elements in Alicespace is not elements in Bobspace.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s take Bobspace to be a higher dimension, as in, using normal ket vectors:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n|1\\big\u0026gt; \\\\\n|2\\big\u0026gt; \\\\\n|3\\big\u0026gt; \\\\\n\\cdots \\\\\n|6\\big\u0026gt;\n\\end{align}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhentangled/","tags":null,"title":"entanglement"},{"categories":null,"contents":"epigenetics is the ability to make identical cells present distinct phenotipic states.\nWhy? DNA is packaged by charged histone proteins, and they wrap around the nucleosome. Upon acute changes in the environment, cells can change their epigenic states.\nwhimsical adaptations Epigenetic adaptive states in organisms with no clear path adaptation. For instance, a certain lung cancer cell has this ability. So, how do cells decide what genes they would activate?\nAnother example: treating fisheries in caffine\nGrowing in caffine will trigger caffine resistance Remove the caffine would cause some of them to default back, some of them to stay the same way ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhepigenetics/\"\u003eepigenetics\u003c/a\u003e is the ability to make identical cells present distinct phenotipic states.\u003c/p\u003e\n\u003ch2 id=\"why\"\u003eWhy?\u003c/h2\u003e\n\u003cp\u003eDNA is packaged by charged \u003ca href=\"\"\u003ehistone\u003c/a\u003e proteins, and they wrap around the nucleosome. Upon acute changes in the environment, cells can change their epigenic states.\u003c/p\u003e\n\u003ch2 id=\"whimsical-adaptations\"\u003ewhimsical adaptations\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhepigenetics/\"\u003eEpigenetic\u003c/a\u003e adaptive states in organisms with no clear path adaptation. For instance, a certain lung cancer cell has this ability. So, how do cells decide what genes they would activate?\u003c/p\u003e\n\u003cp\u003eAnother example: treating fisheries in caffine\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eGrowing in caffine will trigger caffine resistance\u003c/li\u003e\n\u003cli\u003eRemove the caffine would cause some of them to default back, some of them to stay the same way\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhepigenetics/","tags":null,"title":"epigenetics"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhepitophs/","tags":null,"title":"epitopes"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhequal_rights_act/","tags":null,"title":"Equal Rights Act"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhetf/","tags":null,"title":"ETF"},{"categories":null,"contents":"A corollary of greatest common divisor and division.\nSay you have some \\(b|a\\) such that:\n\\begin{equation} a = bq + r \\end{equation}\nNow, \\(d|a,b \\Leftrightarrow d|b,r\\) (because \\(d|b,r\\) implies there\u0026rsquo;s some \\(x, x\u0026rsquo;\\) such that \\(a = (dx)q+dx\u0026rsquo;\\), and so \\(a = d(xq + x\u0026rsquo;)\\) and so \\(d|a\\); the logic goes the other way too).\nThis finally implies that \\(\\gcd (a,b)= \\gcd (b,r)\\) because any divisor that works for one works for both.\n","html":"\u003cp\u003eA corollary of \u003ca href=\"/posts/kbhgreatest_common_divisor/\"\u003egreatest common divisor\u003c/a\u003e and \u003ca href=\"/posts/kbhdivide/\"\u003edivision\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSay you have some \\(b|a\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na = bq + r\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, \\(d|a,b \\Leftrightarrow d|b,r\\) (because \\(d|b,r\\) implies there\u0026rsquo;s some \\(x, x\u0026rsquo;\\) such that \\(a = (dx)q+dx\u0026rsquo;\\), and so \\(a = d(xq + x\u0026rsquo;)\\) and so \\(d|a\\); the logic goes the other way too).\u003c/p\u003e\n\u003cp\u003eThis finally implies that \\(\\gcd (a,b)= \\gcd (b,r)\\) because any divisor that works for one works for both.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbheuclidean_algorithm/","tags":null,"title":"Euclidean Algorithm"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbheugene_wigner/","tags":null,"title":"Eugene Wigner"},{"categories":null,"contents":"A type of cell.\nSample eukareotyic cell gene:\nTATA box promoter 5\u0026rsquo; non-coding sequence Non-coding introns interlaced between exons, unique to eukareotyic cells. Bacteria (prokateotic cells don\u0026rsquo;t contain introns or have small them) 3\u0026rsquo; non-coding sequence ","html":"\u003cp\u003eA type of \u003ca href=\"/posts/kbhcell/\"\u003ecell\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSample \u003ca href=\"/posts/kbheukareotyic_cell/\"\u003eeukareotyic cell\u003c/a\u003e gene:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"\"\u003eTATA box\u003c/a\u003e \u003ca href=\"\"\u003epromoter\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e5\u0026rsquo; non-coding sequence\u003c/li\u003e\n\u003cli\u003eNon-coding \u003ca href=\"\"\u003eintrons\u003c/a\u003e interlaced between \u003ca href=\"\"\u003eexons\u003c/a\u003e, unique to \u003ca href=\"/posts/kbheukareotyic_cell/\"\u003eeukareotyic cell\u003c/a\u003es. Bacteria (\u003ca href=\"/posts/kbhprokateotic_cell/\"\u003eprokateotic cell\u003c/a\u003es don\u0026rsquo;t contain \u003ca href=\"\"\u003eintron\u003c/a\u003es or have small them)\u003c/li\u003e\n\u003cli\u003e3\u0026rsquo; non-coding sequence\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbheukareotyic_cell/","tags":null,"title":"eukareotyic cell"},{"categories":null,"contents":"The Euler-Bernoulli Theory is a theory in dynamics which describes how much a beam deflect given an applied load.\nAssumptions For Euler-Bernoulli Theory to apply in its basic form, we make assumptions.\nThe \u0026ldquo;beam\u0026rdquo; you are bending is modeled as a 1d object; it is only long and is not wide For this page, \\(+x\\) is \u0026ldquo;right\u0026rdquo;, \\(+y\\) is \u0026ldquo;in\u0026rdquo;, and \\(+z\\) is \u0026ldquo;up\u0026rdquo; Probably more, but we only have this so far. the general form of the Euler-Bernoulli Theory assumes a freestanding beam Basic Statement The most basic for the Euler-Bernoulli Equation looks like this:\n\\begin{equation} \\dv[2]x \\qty(EI\\dv[2]{w}{x}) =q(x) \\end{equation}\nwhere, \\(w(x)\\) is the deflection of the beam at some direction \\(z\\) at position \\(x\\). \\(q\\) is the load distribution (force per unit length, similar to pressure which is force per unit area, at each point \\(x\\)). \\(E\\) is the Elastic Modulus of the beam, and \\(I\\) the second moment of area of the beam\u0026rsquo;s cross section.\nNote that \\(I\\) must be calculated with respect to the axis perpendicular to the load. So, for a beam placed longside by the \\(x\\) axis, and pressed down on the \\(z\\) axis, \\(I\\) should be calculated as: \\(\\iint z^{2}\\dd{y}\\dd{z}\\).\nPretty much all the time, the Elastic Modulus \\(E\\) (how rigid your thing is) and second moment of area \\(I\\) (how distributed are the cross-section\u0026rsquo;s mass) are constant; therefore, we factor them out, making:\n\\begin{align} \u0026amp;\\dv[2]x \\qty(EI\\dv[2]{w}{x}) =q(x) \\\\ \\Rightarrow\\ \u0026amp; EI \\qty(\\dv[2]x \\dv[2]{w}{x} )=q(x) \\\\ \\Rightarrow\\ \u0026amp; EI \\dv[4]{w}{x} = q(x) \\end{align}\nThis is also apparently used everywhere in engineering to figure out how bendy something will be given some \\(q\\) put along the beam.\nOk, let\u0026rsquo;s take the original form of this equation and take some integrals to see the edges of this thing:\n\\begin{equation} \\dv[2]{x} \\qty(EI \\dv[2]{w}{x}) = q(x) \\end{equation}\nFirst things first, let\u0026rsquo;s take a single integral:\n\\begin{equation} \\dv{x} \\qty(EI \\dv[2]{w}{x}) = -Q \\end{equation}\nThis is the total shear force on the material (the sum of all forces applied to all points \\(\\int q(x)\\).) We have a sign difference\nold notes\nLet\u0026rsquo;s take some transverse load \\(q(x,t)\\), applied at time \\(t\\) at location \\(x\\). To model the load/bending/vibration of the rod, we first have to know a few more things.\nFirst, figure the Young\u0026rsquo;s Modulus \\(E\\) of the thing that you are bending.\nOf course, we also want to know what shape our thing is; more specifically, we want to know how the point masses in our thing is distributed. So we will also need the second moment of area \\(I\\).\nFinally, we should have \\(m\\) mass per unit length of the rod we are bending.\nThe Euler-Bernoulli Theory tells us that the deflection (distance from the neutral-axis) each point \\(x\\) in the material should get is:\n\\begin{equation} EI \\pdv[4]{w}{x} + m \\pdv[2]{w}{t} = q(x,t) \\end{equation}\nSolving this lovely Differential Equation would tell you how far away each point diverges from the neutral point.\nTracing this out over \\((x,t)\\), we can get some trace of how the thing vibrates by measuring the behavior of \\(\\omega\\).\nfree vibrations in Euler-Bernoulli Theory If no time-varying \\(q\\) exists, we then have:\n\\begin{equation} EI \\pdv[4]{w}{x} + m \\pdv[2]{w}{t} = 0 \\end{equation}\nAnd then some magical Differential Equations happen. I hope to learn them soon.\nThe result here is significant: if we can figure the actual rate of vibrations which we expect.\nHowever, this doesn\u0026rsquo;t really decay\u0026mdash;but funing torks do. How?\nApparently because air resistance\u0026mdash;Zachary Sayyah. So Sasha time.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbheuler_bernoulli_theory/\"\u003eEuler-Bernoulli Theory\u003c/a\u003e is a theory in dynamics which describes how much a beam deflect given an applied load.\u003c/p\u003e\n\u003ch2 id=\"assumptions\"\u003eAssumptions\u003c/h2\u003e\n\u003cp\u003eFor \u003ca href=\"/posts/kbheuler_bernoulli_theory/\"\u003eEuler-Bernoulli Theory\u003c/a\u003e to apply in its basic form, we make assumptions.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eThe \u0026ldquo;beam\u0026rdquo; you are bending is modeled as a 1d object; it is only long and is not wide\u003c/li\u003e\n\u003cli\u003eFor this page, \\(+x\\) is \u0026ldquo;right\u0026rdquo;, \\(+y\\) is \u0026ldquo;in\u0026rdquo;, and \\(+z\\) is \u0026ldquo;up\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eProbably more, but we only have this so far.\u003c/li\u003e\n\u003cli\u003ethe general form of the \u003ca href=\"/posts/kbheuler_bernoulli_theory/\"\u003eEuler-Bernoulli Theory\u003c/a\u003e assumes a freestanding beam\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"basic-statement\"\u003eBasic Statement\u003c/h2\u003e\n\u003cp\u003eThe most basic for the \u003ca href=\"/posts/kbheuler_bernoulli_theory/\"\u003eEuler-Bernoulli Equation\u003c/a\u003e looks like this:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv[2]x \\qty(EI\\dv[2]{w}{x}) =q(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(w(x)\\) is the deflection of the beam at some direction \\(z\\) at position \\(x\\). \\(q\\) is the load distribution (force per unit length, similar to pressure which is force per unit area, at each point \\(x\\)). \\(E\\) is the \u003ca href=\"/posts/kbhelastic_modulus/\"\u003eElastic Modulus\u003c/a\u003e of the beam, and \\(I\\) the \u003ca href=\"/posts/kbhsecond_moment_of_area/\"\u003esecond moment of area\u003c/a\u003e of the beam\u0026rsquo;s cross section.\u003c/p\u003e\n\u003cp\u003eNote that \\(I\\) must be calculated with respect to the axis perpendicular to the load. So, for a beam placed longside by the \\(x\\) axis, and pressed down on the \\(z\\) axis, \\(I\\) should be calculated as: \\(\\iint z^{2}\\dd{y}\\dd{z}\\).\u003c/p\u003e\n\u003cp\u003ePretty much all the time, the \u003ca href=\"/posts/kbhelastic_modulus/\"\u003eElastic Modulus\u003c/a\u003e \\(E\\) (how rigid your thing is) and \u003ca href=\"/posts/kbhsecond_moment_of_area/\"\u003esecond moment of area\u003c/a\u003e \\(I\\) (how distributed are the cross-section\u0026rsquo;s mass) are constant; therefore, we factor them out, making:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\dv[2]x \\qty(EI\\dv[2]{w}{x}) =q(x) \\\\\n\\Rightarrow\\ \u0026amp; EI \\qty(\\dv[2]x \\dv[2]{w}{x} )=q(x) \\\\\n\\Rightarrow\\ \u0026amp; EI \\dv[4]{w}{x} = q(x)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThis is also apparently used everywhere in engineering to figure out how bendy something will be given some \\(q\\) put along the beam.\u003c/p\u003e\n\u003cp\u003eOk, let\u0026rsquo;s take the original form of this equation and take some integrals to see the edges of this thing:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv[2]{x} \\qty(EI \\dv[2]{w}{x}) = q(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFirst things first, let\u0026rsquo;s take a single integral:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{x} \\qty(EI \\dv[2]{w}{x}) = -Q\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is the total shear force on the material (the sum of all forces applied to all points \\(\\int q(x)\\).) We have a sign difference\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eold notes\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s take some \u003ca href=\"/posts/kbhtransverse_loaod/\"\u003etransverse load\u003c/a\u003e \\(q(x,t)\\), applied at time \\(t\\) at location \\(x\\). To model the load/bending/vibration of the rod, we first have to know a few more things.\u003c/p\u003e\n\u003cp\u003eFirst, figure the \u003ca href=\"/posts/kbhyoung_s_modulus/\"\u003eYoung\u0026rsquo;s Modulus\u003c/a\u003e \\(E\\) of the thing that you are bending.\u003c/p\u003e\n\u003cp\u003eOf course, we also want to know what shape our thing is; more specifically, we want to know how the point masses in our thing is distributed. So we will also need the \u003ca href=\"/posts/kbhsecond_moment_of_area/\"\u003esecond moment of area\u003c/a\u003e \\(I\\).\u003c/p\u003e\n\u003cp\u003eFinally, we should have \\(m\\) mass per unit length of the rod we are bending.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbheuler_bernoulli_theory/\"\u003eEuler-Bernoulli Theory\u003c/a\u003e tells us that the deflection (distance from the neutral-axis) each point \\(x\\) in the material should get is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nEI \\pdv[4]{w}{x} + m \\pdv[2]{w}{t} = q(x,t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSolving this lovely \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equation\u003c/a\u003e would tell you how far away each point diverges from the neutral point.\u003c/p\u003e\n\u003cp\u003eTracing this out over \\((x,t)\\), we can get some trace of how the thing vibrates by measuring the behavior of \\(\\omega\\).\u003c/p\u003e\n\u003ch2 id=\"free-vibrations-in-euler-bernoulli-theory--kbheuler-bernoulli-theory-dot-md\"\u003efree vibrations in \u003ca href=\"/posts/kbheuler_bernoulli_theory/\"\u003eEuler-Bernoulli Theory\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eIf no time-varying \\(q\\) exists, we then have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nEI \\pdv[4]{w}{x} + m \\pdv[2]{w}{t} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd then some magical \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equations\u003c/a\u003e happen. I hope to learn them soon.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-05_23-15-31_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-05_23-16-41_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe result here is significant: if we can figure the actual rate of vibrations which we expect.\u003c/p\u003e\n\u003cp\u003eHowever, this doesn\u0026rsquo;t really decay\u0026mdash;but \u003ca href=\"/posts/kbhtuning_forks/\"\u003efuning tork\u003c/a\u003es do. How?\u003c/p\u003e\n\u003cp\u003eApparently because air resistance\u0026mdash;Zachary Sayyah. So Sasha time.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbheuler_bernoulli_theory/","tags":null,"title":"Euler-Bernoulli Theory"},{"categories":null,"contents":"\\begin{equation} f(x) = e^{ix} = \\cos (x) + i\\sin (x) \\end{equation}\nthis brings a circle of radius one, because in every point, velocity is orthogonal to where you are (because \\(f\u0026rsquo;(x) = if(x)\\), and multiplying by \\(i\\) accounts for a rotation of 90 degrees.\nAnd so,\n\\begin{equation} z = re^{i\\theta} \\end{equation}\ngives any point in the imaginary polar plane.\n","html":"\u003cp\u003e\\begin{equation}\nf(x) = e^{ix} = \\cos (x) + i\\sin (x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis brings a circle of radius one, because in every point, velocity is orthogonal to where you are (because \\(f\u0026rsquo;(x) = if(x)\\), and multiplying by \\(i\\) accounts for a rotation of 90 degrees.\u003c/p\u003e\n\u003cp\u003eAnd so,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nz = re^{i\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egives any point in the imaginary polar plane.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbheuler_s_equation/","tags":null,"title":"Euler's Equation"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbheurope/","tags":null,"title":"Europe"},{"categories":null,"contents":"An event is sub-subset of the sample space \\(E \\in S\\). These are some subset to which you ascribe some meaning.\n","html":"\u003cp\u003eAn \u003ca href=\"/posts/kbhevent/\"\u003eevent\u003c/a\u003e is sub-subset of the \u003ca href=\"/posts/kbhsample_space/\"\u003esample space\u003c/a\u003e \\(E \\in S\\). These are some subset to which you ascribe some meaning.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhevent/","tags":null,"title":"event"},{"categories":null,"contents":"Preamble As notation differs between Alg4DM (which the presentation and notes use) and the paper, we provide a note here to standardize the notation of the PGA formulation to avoid confusion.\nRecall that the non-linear program formulation of the naive PGA implementation gives:\n\\begin{align} \\max_{\\theta}\\ \u0026amp;f(\\theta) \\\\ \\text{such that}\\ \u0026amp;J\\theta = \\bold{1} \\\\ \u0026amp; \\theta \\geq \\bold{0} \\\\ \u0026amp; h_{i}(\\theta) \\leq \\epsilon_{i},\\ \\forall i \\end{align}\nfor:\n\\begin{equation} f(\\theta) = \\beta^{\\top} \\bold{Z}^{-1} \\bold{r}_{\\theta} \\end{equation}\nand\n\\begin{equation} h_{i}(\\theta) = \\beta^{\\top} \\bold{Z}^{-1} C_{i} \\end{equation}\nwhere \\(\\bold{Z} = (\\bold{I} - \\gamma \\bold{T}_{\\theta})\\), and \\(\\bold{T}_{\\theta}((x,s), (x\u0026rsquo;,s\u0026rsquo;))\\) is a transition matrix between state and controller latent pairs.\nQuestion 1 In the Non Linear Program (NLP) formulation above, the constraint \\(J\\theta = \\bold{1}\\) is a block-diagonal matrix filled with only ones and zeros that serve a particular purpose.\nThough we didn\u0026rsquo;t describe \\(J\\) in detail in the talk apart from its function, recall that its job is to add up certain elements in the \\(\\theta\\) vector to ensure they satisfy certain constraints. What breaks down about PGA if that constraint is removed (i.e. what does it do)? Given your answer in 1, what should be the input and output dimensions of the \\(J\\) map? You may use in your answer, as needed, any expression that involves \\(|X|\\) the number of nodes in the controller, \\(|A|\\) the size of the action space, \\(|O|\\) the size of the observation space and \\(|S|\\) the size of the state space. Answer Recall that:\n\\begin{equation} \\theta = \\mqty(\\Psi(a_0 | x_0) \\\\ \\Psi(a_1 | x_0) \\\\ \\dots \\\\ \\Psi(a_n | x_N) \\\\ \\eta(x_0 | x_0, a_0, o_0) \\\\ \\eta(x_1 | x_0, a_0, o_0) \\\\ \\dots \\\\ \\eta(x_N | x_N, a_n, o_f)) \\end{equation}\nWe want to ensure that each output \\(\\sum_{a}\\Psi(a | x_{j}) = 1\\), and \\(\\sum_{x} \\eta(x|x_{i},a_{j},o_{k}) = 1\\) (that both the action distributions at a node and the node transitions are indeed categirocial probability distributions).\nAs we flattened every combination of possible outputs of \\(\\Psi\\) and \\(\\eta\\) of output onto \\(\\theta\\), the constraint \\(J\\theta = \\bold{1}\\) ensures that each \\(\\Psi\\) and \\(\\eta\\) conditioned on a pair of current condition remains a probability distribution. Otherwise, the model will likely collapse to taking advantage of impossible probabilities (\u0026ldquo;we have \\(300\\%\\) chance of a highly valuable state!\u0026rdquo;) to maximize the utility.\nIts signature is:\n\\begin{equation} J: \\mathbb{R}^{|A \\times X \\times X \\times X \\times A \\times O|} \\to \\mathbb{R}^{|X \\times X \\times A \\times O|} \\end{equation}\nEach input dimension corresponds to a slot on \\(\\theta\\), and each output dimension corresponds to a thing we want to add up to \\(1\\), which means each pair of conditions to the distributions of \\(\\Psi\\) and \\(\\eta\\).\nAs there are \\(A \\cdot X\\) possible combinations of \\(a,x_{j}\\) for \\(\\Psi(a|x_{j})\\) and \\(X \\cdot X\\cdot A\\cdot O\\) possible combinations of \\(x, x_{i}, a_{j},o_{k}\\) for \\(\\eta(x|x_i, a_{j}, o_{k})\\) to be tabulated for probability in \\(\\theta\\), this matrix has \\(A^{2}X^{3}O\\) columns as input.\nAs there are \\(X\\) possible prior conditions to \\(\\Psi\\) and \\(X \\cdot A \\cdot O\\) possible prior conditions to \\(\\eta\\), this means the matrix should have \\(X^{2}AO\\) rows of output.\nQuestion 2 The constraints to the optimization objective for controller C-POMDPs is extremely similar to non-constrained controller POMDPs. In effect, they only differ by the third constraint:\n\\begin{equation} h_{i}(\\theta) \\leq \\varepsilon_{i} \\end{equation}\nIn fact, both systems have the same, exact, unchanged optimization objective that doesn\u0026rsquo;t change regardless of constraint: \\(\\max_{\\theta}\\ \u0026amp;f(\\theta)\\).\nWhy is solving C-POMDPs using controllers gradient methods much harder than solving POMDPs using a similar objective, and how exactly does the PGA authors\u0026rsquo; contributions address this issue to make its computation feasible?\nAnswer In order to ensure that the output distribution by gradient descent actually fits the constraints provided (and other constraints regarding them being probability distributions), the constraints to the optimization problem\u0026mdash;\\(h_{i}(\\theta)\\) included\u0026mdash;needs to be computed per step to project the raw output parameters down into valid parameters.\n\\(h_{i}\\), importantly, is non-linear and non-convex. Removing this constraint makes the optimization bounds linear which drastically speeds up computation. This is why POMDPs leveraging similar approaches don\u0026rsquo;t have as much computational intractability.\nTo solve this, the PGA authors linearise the \\(h_{i}\\) function using a first-order Taylor expansion in order to make this last constraint linear as well, which makes the entire projection problem have linear constraints: vastly improving computational efficiency.\n","html":"\u003ch2 id=\"preamble\"\u003ePreamble\u003c/h2\u003e\n\u003cp\u003eAs notation differs between Alg4DM (which the presentation and notes use) and the paper, we provide a note here to standardize the notation of the PGA formulation to avoid confusion.\u003c/p\u003e\n\u003cp\u003eRecall that the non-linear program formulation of the naive PGA implementation gives:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\max_{\\theta}\\ \u0026amp;f(\\theta) \\\\\n\\text{such that}\\ \u0026amp;J\\theta = \\bold{1} \\\\\n\u0026amp; \\theta \\geq \\bold{0} \\\\\n\u0026amp; h_{i}(\\theta) \\leq \\epsilon_{i},\\ \\forall i\n\\end{align}\u003c/p\u003e\n\u003cp\u003efor:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(\\theta) = \\beta^{\\top} \\bold{Z}^{-1} \\bold{r}_{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nh_{i}(\\theta) = \\beta^{\\top} \\bold{Z}^{-1} C_{i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\bold{Z} = (\\bold{I} - \\gamma \\bold{T}_{\\theta})\\), and \\(\\bold{T}_{\\theta}((x,s), (x\u0026rsquo;,s\u0026rsquo;))\\) is a transition matrix between state and controller latent pairs.\u003c/p\u003e\n\u003ch2 id=\"question-1\"\u003eQuestion 1\u003c/h2\u003e\n\u003cp\u003eIn the Non Linear Program (NLP) formulation above, the constraint \\(J\\theta = \\bold{1}\\) is a block-diagonal matrix filled with only ones and zeros that serve a particular purpose.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eThough we didn\u0026rsquo;t describe \\(J\\) in detail in the talk apart from its function, recall that its job is to add up certain elements in the \\(\\theta\\) vector to ensure they satisfy certain constraints. What breaks down about PGA if that constraint is removed (i.e. what does it do)?\u003c/li\u003e\n\u003cli\u003eGiven your answer in 1, what should be the input and output dimensions of the \\(J\\) map? You may use in your answer, as needed, any expression that involves \\(|X|\\) the number of nodes in the controller, \\(|A|\\) the size of the action space, \\(|O|\\) the size of the observation space and \\(|S|\\) the size of the state space.\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"answer\"\u003eAnswer\u003c/h3\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta = \\mqty(\\Psi(a_0 | x_0) \\\\ \\Psi(a_1 | x_0) \\\\ \\dots \\\\ \\Psi(a_n | x_N) \\\\ \\eta(x_0 | x_0, a_0, o_0) \\\\ \\eta(x_1 | x_0, a_0, o_0) \\\\ \\dots \\\\ \\eta(x_N | x_N, a_n, o_f))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe want to ensure that each output \\(\\sum_{a}\\Psi(a | x_{j}) = 1\\), and \\(\\sum_{x} \\eta(x|x_{i},a_{j},o_{k}) = 1\\) (that both the action distributions at a node and the node transitions are indeed categirocial probability distributions).\u003c/p\u003e\n\u003cp\u003eAs we flattened every combination of possible outputs of \\(\\Psi\\) and \\(\\eta\\) of output onto \\(\\theta\\), the constraint \\(J\\theta = \\bold{1}\\) ensures that each \\(\\Psi\\) and \\(\\eta\\) conditioned on a pair of current condition remains a probability distribution. Otherwise, the model will likely collapse to taking advantage of impossible probabilities (\u0026ldquo;we have \\(300\\%\\) chance of a highly valuable state!\u0026rdquo;) to maximize the utility.\u003c/p\u003e\n\u003cp\u003eIts signature is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nJ: \\mathbb{R}^{|A \\times X \\times X \\times X \\times A \\times O|} \\to \\mathbb{R}^{|X \\times X \\times A \\times O|}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eEach input dimension corresponds to a slot on \\(\\theta\\), and each output dimension corresponds to a thing we want to add up to \\(1\\), which means each pair of conditions to the distributions of \\(\\Psi\\) and \\(\\eta\\).\u003c/p\u003e\n\u003cp\u003eAs there are \\(A \\cdot X\\) possible combinations of \\(a,x_{j}\\) for \\(\\Psi(a|x_{j})\\) and \\(X \\cdot X\\cdot A\\cdot O\\) possible combinations of \\(x, x_{i}, a_{j},o_{k}\\) for \\(\\eta(x|x_i, a_{j}, o_{k})\\) to be tabulated for probability in \\(\\theta\\), this matrix has \\(A^{2}X^{3}O\\) columns as input.\u003c/p\u003e\n\u003cp\u003eAs there are \\(X\\) possible prior conditions to \\(\\Psi\\) and \\(X \\cdot A \\cdot O\\) possible prior conditions to \\(\\eta\\), this means the matrix should have \\(X^{2}AO\\) rows of output.\u003c/p\u003e\n\u003ch2 id=\"question-2\"\u003eQuestion 2\u003c/h2\u003e\n\u003cp\u003eThe constraints to the optimization objective for controller C-POMDPs is extremely similar to non-constrained controller POMDPs. In effect, they only differ by the third constraint:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nh_{i}(\\theta) \\leq \\varepsilon_{i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn fact, both systems have the same, exact, unchanged optimization objective that doesn\u0026rsquo;t change regardless of constraint: \\(\\max_{\\theta}\\ \u0026amp;f(\\theta)\\).\u003c/p\u003e\n\u003cp\u003eWhy is solving C-POMDPs using controllers gradient methods much harder than solving POMDPs using a similar objective, and how exactly does the PGA authors\u0026rsquo; contributions address this issue to make its computation feasible?\u003c/p\u003e\n\u003ch3 id=\"answer\"\u003eAnswer\u003c/h3\u003e\n\u003cp\u003eIn order to ensure that the output distribution by gradient descent actually fits the constraints provided (and other constraints regarding them being probability distributions), the constraints to the optimization problem\u0026mdash;\\(h_{i}(\\theta)\\) included\u0026mdash;needs to be computed per step to project the raw output parameters down into valid parameters.\u003c/p\u003e\n\u003cp\u003e\\(h_{i}\\), importantly, is \u003cstrong\u003enon-linear\u003c/strong\u003e and \u003cstrong\u003enon-convex\u003c/strong\u003e. Removing this constraint makes the optimization bounds linear which drastically speeds up computation. This is why POMDPs leveraging similar approaches don\u0026rsquo;t have as much computational intractability.\u003c/p\u003e\n\u003cp\u003eTo solve this, the PGA authors linearise the \\(h_{i}\\) function using a first-order Taylor expansion in order to make this last constraint linear as well, which makes the entire projection problem have linear constraints: vastly improving computational efficiency.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpgapset/","tags":null,"title":"Exercises in PGA"},{"categories":null,"contents":"A result so important it gets a page.\nEvery operator on a finite-dimensional, non-zero, complex vector space has an eigenvalue.\nProof:\nSuppose \\(V\\) is a complex vector space with dimension \\(n \u0026gt; 0\\), and \\(T \\in \\mathcal{L}(V)\\). Choose \\(v \\in V, v\\neq 0\\) (possible as \\(V\\) is non-zero):\nConstruct a list of \\(n+1\\) vectors:\n\\begin{equation} v, Tv, \\dots T^{n} v \\end{equation}\nbecause we managed to cram \\(n+1\\) vectors into a list for a vector space with dimension \\(n\\), that list is linearly dependent.\nAnd thus, by definition of linearly dependence, exists a set of \\(a_0, \u0026hellip; a_{n} \\in \\mathbb{C}\\), which not all are \\(0\\), such that:\n\\begin{equation} 0 = a_0 v + a_1 T v + \\dots + a_{n} T^{n} v \\end{equation}\nNote that, because \\(v \\neq 0\\), \\(a_{1} \u0026hellip; a_{n}\\) can\u0026rsquo;t all be \\(0\\) either because otherwise \\(a_0 = 0\\) making all \\(a_{j}=0\\).\nNow, this polynomial can be completely factored because of the fundamental theorem of algebra into linear factors, \\(a_{0} + a_{1}z + \u0026hellip; a_{n}z^{n} = c(z-\\lambda_{1}) \\dots (z- \\lambda_{m})\\). We have to invoke the fundamental theorem of algebra with complex factors \\(z\\) because we haven\u0026rsquo;t shown it holds for polynomial operators yet.\nHowever, the existence of such a complete factoring over the complex numbers means that, with possibly complex number \\(\\lambda_{j}\\) values:\n\\begin{align} 0 \u0026amp;= a_{0} v + a_{1} Tv + \\dots a_{n} T^{n} v \\\\ \u0026amp;= (a_{0} I + a_{1} T + \\dots a_{n} T^{n}) v \\\\ \u0026amp;= c(T - \\lambda_{1} I) \\dots (T- \\lambda_{m} I)v \\end{align}\nnote that \\(m\\) is not necessarily \\(n\\) because different multiplicities.\nNow, \\(c\\) cannot be \\(0\\) because \\(a_0 \\neq 0\\), and multiplying everything out out\u0026hellip; makes the ending not zero?\nGiven \\(c \\neq 0\\), \\(v \\neq 0\\), and yet the map maps \\(v\\) to \\(0\\), at least one of the maps has to be non-injective. And because the properties of eigenvalues, some \\((T- \\lambda_{j} I)\\) being non-injective for a finite-dimensional vector space means that \\(\\lambda_{j}\\) is an eigenvalue of \\(T\\). \\(\\blacksquare\\)\n","html":"\u003cp\u003eA result so important it gets a page.\u003c/p\u003e\n\u003cp\u003eEvery \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e on a \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e, non-zero, \u003ca href=\"/posts/kbhvector_space/#vector-space-over-fields\"\u003ecomplex vector space\u003c/a\u003e has an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eSuppose \\(V\\) is a \u003ca href=\"/posts/kbhvector_space/#vector-space-over-fields\"\u003ecomplex vector space\u003c/a\u003e with \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e \\(n \u0026gt; 0\\), and \\(T \\in \\mathcal{L}(V)\\). Choose \\(v \\in V, v\\neq 0\\) (possible as \\(V\\) is non-zero):\u003c/p\u003e\n\u003cp\u003eConstruct a list of \\(n+1\\) \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv, Tv, \\dots T^{n} v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause we managed to cram \\(n+1\\) \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es into a \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e for a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e with \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e \\(n\\), that list is \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eAnd thus, by definition of \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinearly dependence\u003c/a\u003e, exists a set of \\(a_0, \u0026hellip; a_{n} \\in \\mathbb{C}\\), which not all are \\(0\\), such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = a_0 v + a_1 T v + \\dots + a_{n} T^{n} v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNote that, because \\(v \\neq 0\\), \\(a_{1} \u0026hellip; a_{n}\\) can\u0026rsquo;t all be \\(0\\) either because otherwise \\(a_0 = 0\\) making all \\(a_{j}=0\\).\u003c/p\u003e\n\u003cp\u003eNow, this \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e can be completely factored because of the \u003ca href=\"\"\u003efundamental theorem of algebra\u003c/a\u003e into linear factors, \\(a_{0} + a_{1}z + \u0026hellip; a_{n}z^{n} = c(z-\\lambda_{1}) \\dots (z- \\lambda_{m})\\). We have to invoke the \u003ca href=\"\"\u003efundamental theorem of algebra\u003c/a\u003e with complex factors \\(z\\) because we haven\u0026rsquo;t shown it holds for \u003ca href=\"/posts/kbhpolynomial_operator/\"\u003epolynomial operator\u003c/a\u003es yet.\u003c/p\u003e\n\u003cp\u003eHowever, the existence of such a complete \u003ca href=\"/posts/kbhthoughts_on_axler_4/#factoring\"\u003efactoring\u003c/a\u003e over the \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003es means that, with possibly \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e \\(\\lambda_{j}\\) values:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n0 \u0026amp;= a_{0} v + a_{1} Tv + \\dots a_{n} T^{n} v \\\\\n\u0026amp;= (a_{0} I + a_{1} T + \\dots a_{n} T^{n}) v \\\\\n\u0026amp;= c(T - \\lambda_{1} I) \\dots (T- \\lambda_{m} I)v\n\\end{align}\u003c/p\u003e\n\u003cp\u003enote that \\(m\\) is not necessarily \\(n\\) because different multiplicities.\u003c/p\u003e\n\u003cp\u003eNow, \\(c\\) cannot be \\(0\\) because \\(a_0 \\neq 0\\), and multiplying everything out out\u0026hellip; makes the ending not zero?\u003c/p\u003e\n\u003cp\u003eGiven \\(c \\neq 0\\), \\(v \\neq 0\\), and yet the map maps \\(v\\) to \\(0\\), at least one of the maps has to be non-injective. And because the \u003ca href=\"/posts/kbheigenvalue/#properties-of-id-7d742b39-4a4a-4a9d-a55b-07e2030dfdeb-eigenvalue-s\"\u003eproperties of eigenvalues\u003c/a\u003e, some \\((T- \\lambda_{j} I)\\) being non-injective for a \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e means that \\(\\lambda_{j}\\) is an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\). \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoperators_on_complex_vector_spaces_have_an_eigenvalue/","tags":null,"title":"existence of eigenvalue of operators"},{"categories":null,"contents":"expectation is the calculation of the \u0026ldquo;intended\u0026rdquo; or \u0026ldquo;target\u0026rdquo; value given a random variable:\n\\begin{equation} \\mathbb{E}[M] = \\sum_{x} x\\ p(X=x) \\end{equation}\nStandardize variables to \\(z\\) by dividing The correlation is simply their \u0026ldquo;product\u0026rdquo;: means of positive and negative groups The expectation is the average of the counts of the data you have.\nproperties of expectation these holds REGARDLESS of whether or not the variables you are doing is independent, IID, etc.\nLinearity in the first slot expectation has additivity and homogeneity.\n\\begin{equation} \\mathbb{E}[aX+b] = a\\mathbb{E}[X] + b \\end{equation}\nClosure under expectation \\begin{equation} E[X+Y] = E[X]+E[Y] \\end{equation}\nUnconscious statistician \\begin{equation} \\mathbb{E}[g(x)] = \\sum_{x \\in X}^{} g(x) P(X=x) \\end{equation}\nwhereby, if \\(g\\) is a normal function, you can just add up all the possible output. This property can be used to show the firts results.\nconditional expectation We can perform expectation via conditional probability.\n\\begin{equation} E[X|Y=y] = \\sum_{x}^{} x \\cdot p(X=x|Y=y) \\end{equation}\nlaw of total expectation \\begin{equation} \\mathbb{E}[X] = \\sum_{y}^{}\\mathbb{E}[X|Y=y] P(Y=y) \\end{equation}\nwhat is the \u0026ldquo;background variable\u0026rdquo;? the \\(y\\) value above.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e is the calculation of the \u0026ldquo;intended\u0026rdquo; or \u0026ldquo;target\u0026rdquo; value given a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{E}[M] = \\sum_{x} x\\ p(X=x)\n\\end{equation}\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eStandardize variables to \\(z\\) by dividing\u003c/li\u003e\n\u003cli\u003eThe correlation is simply their \u0026ldquo;product\u0026rdquo;: means of positive and negative groups\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e is the average of the counts of the data you have.\u003c/p\u003e\n\u003ch2 id=\"properties-of-expectation--kbhexpectation-dot-md\"\u003eproperties of \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003ethese holds \u003cstrong\u003eREGARDLESS\u003c/strong\u003e of whether or not the variables you are doing is \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e, \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e, etc.\u003c/p\u003e\n\u003ch3 id=\"linearity-in-the-first-slot\"\u003eLinearity in the first slot\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e has additivity and \u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{E}[aX+b] = a\\mathbb{E}[X] + b\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"closure-under-expectation\"\u003eClosure under expectation\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nE[X+Y] = E[X]+E[Y]\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"unconscious-statistician\"\u003eUnconscious statistician\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{E}[g(x)] = \\sum_{x \\in X}^{} g(x) P(X=x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby, if \\(g\\) is a normal function, you can just add up all the possible output. This property can be used to show the firts results.\u003c/p\u003e\n\u003ch2 id=\"conditional-expectation\"\u003econditional expectation\u003c/h2\u003e\n\u003cp\u003eWe can perform \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e via \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE[X|Y=y] = \\sum_{x}^{} x \\cdot p(X=x|Y=y)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"law-of-total-expectation\"\u003elaw of total expectation\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{E}[X] = \\sum_{y}^{}\\mathbb{E}[X|Y=y] P(Y=y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhat is the \u0026ldquo;background variable\u0026rdquo;? the \\(y\\) value above.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhexpectation/","tags":null,"title":"expectation"},{"categories":null,"contents":" anticipate all states that the agent may find itself in hard-code responses to each one This is bad because you have to have big brain to think about and anticipate all the possible states (to provide a \u0026ldquo;complete strategy\u0026rdquo;), which is often impractical if not impossible.\nDisadvantages You have to know the finite possible state space, and solve them \u0026ldquo;correctly\u0026rdquo;\n","html":"\u003col\u003e\n\u003cli\u003eanticipate all states that the \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e may find itself in\u003c/li\u003e\n\u003cli\u003ehard-code responses to each one\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis is bad because you have to have big brain to think about and anticipate all the possible states (to provide a \u0026ldquo;complete strategy\u0026rdquo;), which is often impractical if not impossible.\u003c/p\u003e\n\u003ch2 id=\"disadvantages\"\u003eDisadvantages\u003c/h2\u003e\n\u003cp\u003eYou have to know the finite possible state space, and solve them \u0026ldquo;correctly\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhexplicit_programming/","tags":null,"title":"explicit programming"},{"categories":null,"contents":"You are the president, and you are trying to choose the secretary of state. You can only interview people in sequence, and you have to hire on the spot. There are a known number of candidates. We want to maximize the probability of selecting the best candidate. You are given no priors.\nHow do we know which candidates we explore, and which candidates we exploit?\nSometimes, you don\u0026rsquo;t have a way of getting data.\nBinary Bandit We are playing with \\(n\\) binary slot machines.\narm \\(j\\) pays off \\(1\\) with probability \\(\\theta_{j}\\), and pays of \\(0\\) otherwise. we do not know $θj$s exogenously and have to learn it we only have \\(h\\) pulls in total across all \\(n\\) slot machines As we perform \\(k\\) pulls, we can keep track of a separate Beta Distribution representing the probability of success for each of the slot machines.\nEssentially, we have a problem whereby we are at a stationary Markov Decision Process whereby the only difference between actions is how much reward we get.\nBayesian Model Estimation We don\u0026rsquo;t actually know the probability of winning (called \u0026ldquo;\\(\\theta\\)\u0026rdquo; in the figure above), and therefore have to \u0026ldquo;explore\u0026rdquo; the system to actually know about it.\nWe want to compute \\(\\rho_{a}\\):\n\\begin{equation} \\rho_{a} = P(win_{a} | w_{a}, l_{a}) = \\int_{0}^{1} \\theta \\times Beta(\\theta | w_{a}+1, l_{a}+1) \\dd{\\theta} \\end{equation}\nwhere, \\(w_{a}\\) is the number of successes for arm \\(a\\), and \\(l_{a}\\) is the number of failures observed.\nThis is exactly the \\(\\mathbb{E}[Beta(w_{a}+1, l_{a}+1)] = \\frac{w_{a}+1}{(w_{a}+1)+(l_{a}+1)}\\)\nA \u0026ldquo;greedy action\u0026rdquo; is an action which simply chooses the \\(a\\) out of all \\(\\rho_{a}\\) which maximizes this probability. We often don\u0026rsquo;t want that because we want to explore the space.\nApproximate Exploration Strategies Undirected Exploration Directed Exploration Optimal Exploration Optimal Exploration is not always possible because its computationally to complex. But its in theory possible. See Optimal Exploration.\n","html":"\u003cp\u003eYou are the president, and you are trying to choose the secretary of state. You can only interview people in sequence, and you have to hire on the spot. There are a known number of candidates. We want to maximize the probability of selecting the best candidate. You are given no priors.\u003c/p\u003e\n\u003cp\u003eHow do we know which candidates we explore, and which candidates we exploit?\u003c/p\u003e\n\u003cp\u003eSometimes, you don\u0026rsquo;t have a way of getting data.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"binary-bandit\"\u003eBinary Bandit\u003c/h2\u003e\n\u003cp\u003eWe are playing with \\(n\\) binary slot machines.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003earm \\(j\\) pays off \\(1\\) with probability \\(\\theta_{j}\\), and pays of \\(0\\) otherwise. we do not know $θ\u003csub\u003ej\u003c/sub\u003e$s exogenously and have to learn it\u003c/li\u003e\n\u003cli\u003ewe only have \\(h\\) pulls in total across all \\(n\\) slot machines\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAs we perform \\(k\\) pulls, we can keep track of a separate \u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e representing the probability of success for each of the slot machines.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-31_13-01-35_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eEssentially, we have a problem whereby we are at a \u003ca href=\"/posts/kbhmarkov_decision_process/#stationary-id-5bb5350e-04e4-46dc-9ea8-cb7bb09edd42-markov-decision-process\"\u003estationary Markov Decision Process\u003c/a\u003e whereby the only difference between actions is how much reward we get.\u003c/p\u003e\n\u003ch3 id=\"bayesian-model-estimation\"\u003eBayesian Model Estimation\u003c/h3\u003e\n\u003cp\u003eWe don\u0026rsquo;t actually know the probability of winning (called \u0026ldquo;\\(\\theta\\)\u0026rdquo; in the figure above), and therefore have to \u0026ldquo;explore\u0026rdquo; the system to actually know about it.\u003c/p\u003e\n\u003cp\u003eWe want to compute \\(\\rho_{a}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\rho_{a} = P(win_{a} | w_{a}, l_{a}) = \\int_{0}^{1} \\theta \\times Beta(\\theta | w_{a}+1, l_{a}+1) \\dd{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(w_{a}\\) is the number of successes for arm \\(a\\), and \\(l_{a}\\) is the number of failures observed.\u003c/p\u003e\n\u003cp\u003eThis is exactly the \\(\\mathbb{E}[Beta(w_{a}+1, l_{a}+1)] = \\frac{w_{a}+1}{(w_{a}+1)+(l_{a}+1)}\\)\u003c/p\u003e\n\u003cp\u003eA \u0026ldquo;\u003ca href=\"#bayesian-model-estimation\"\u003egreedy action\u003c/a\u003e\u0026rdquo; is an action which simply chooses the \\(a\\) out of all \\(\\rho_{a}\\) which maximizes this probability. We often don\u0026rsquo;t want that because we want to explore the space.\u003c/p\u003e\n\u003ch3 id=\"approximate-exploration-strategies\"\u003eApproximate Exploration Strategies\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhundirected_exploration/\"\u003eUndirected Exploration\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirected_exploration/\"\u003eDirected Exploration\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"optimal-exploration--kbhoptimal-exploration-dot-md\"\u003e\u003ca href=\"/posts/kbhoptimal_exploration/\"\u003eOptimal Exploration\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhoptimal_exploration/\"\u003eOptimal Exploration\u003c/a\u003e is not always possible because its computationally to complex. But its in theory possible. See \u003ca href=\"/posts/kbhoptimal_exploration/\"\u003eOptimal Exploration\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhexploration_and_exploitation/","tags":null,"title":"Exploration and Exploitation"},{"categories":null,"contents":"Analogous to poisson distribution, but for continuous random variable. Consider a distribution which lasts a duration of time until success; what\u0026rsquo;s the probability that success is found in some range of times:\n\u0026ldquo;What\u0026rsquo;s the probability that there are an earthquake in \\(k\\) years if there\u0026rsquo;s on average \\(2\\) earthquakes in 1 year?\u0026rdquo;\nconstituents $λ$\u0026mdash;\u0026ldquo;rate\u0026rdquo;: event rate (mean occurrence per time) requirements \\begin{equation} f(x) = \\begin{cases} \\lambda e^{-\\lambda x}, x\\geq 0\\\\ 0, x\u0026lt; 0 \\end{cases} \\end{equation}\nadditional information expectation: \\(\\frac{1}{\\lambda}\\) variance: \\(\\frac{1}{\\lambda^{2}}\\) exponential distribution is memoryless An exponential distribution doesn\u0026rsquo;t care about what happened before.\n\u0026ldquo;On average, we have a request every 5 minutes. There have been 2 minutes with no requests. What\u0026rsquo;s the probability that the next request is in 10 minutes?\u0026rdquo;\nis the same statement as\n\u0026ldquo;On average, we have a request every 5 minutes. There have been 2 minutes with no requests. What\u0026rsquo;s the probability that the next request is in 10 minutes?\u0026rdquo;\nThat is:\n\\begin{equation} P(s+t|s) = P(t) \\end{equation}\n","html":"\u003cp\u003eAnalogous to \u003ca href=\"/posts/kbhprobability_of_k_in_x_time/\"\u003epoisson distribution\u003c/a\u003e, but for \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e. Consider a distribution which lasts a duration of time until success; what\u0026rsquo;s the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e that success is found in some range of times:\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;What\u0026rsquo;s the probability that there are an earthquake in \\(k\\) years if there\u0026rsquo;s on average \\(2\\) earthquakes in 1 year?\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e$λ$\u0026mdash;\u0026ldquo;rate\u0026rdquo;: event rate (mean occurrence per time)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\begin{cases}\n\\lambda e^{-\\lambda x}, x\\geq 0\\\\\n0, x\u0026lt; 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eexpectation\u003c/strong\u003e: \\(\\frac{1}{\\lambda}\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003evariance\u003c/strong\u003e: \\(\\frac{1}{\\lambda^{2}}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"exponential-distribution--kbhexponential-distribution-dot-md--is-memoryless\"\u003e\u003ca href=\"/posts/kbhexponential_distribution/\"\u003eexponential distribution\u003c/a\u003e is memoryless\u003c/h3\u003e\n\u003cp\u003eAn \u003ca href=\"/posts/kbhexponential_distribution/\"\u003eexponential distribution\u003c/a\u003e doesn\u0026rsquo;t care about what happened before.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;On average, we have a request every 5 minutes. There have been 2 minutes with no requests. What\u0026rsquo;s the probability that the next request is in 10 minutes?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eis the same statement as\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;On average, we have a request every 5 minutes. \u003cdel\u003eThere have been 2 minutes with no requests.\u003c/del\u003e What\u0026rsquo;s the probability that the next request is in 10 minutes?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eThat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(s+t|s) = P(t)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhexponential_distribution/","tags":null,"title":"exponential distribution"},{"categories":null,"contents":"\\(\\mathbb{F}^n\\) is the set of all lists of length \\(n\\) with elements of \\(\\mathbb{F}\\). These are a special case of matricies.\nFormally\u0026mdash;\n\\begin{equation} \\mathbb{F}^n = \\{(x1,\\ldots,x_n):x_j\\in\\mathbb{F}, \\forall j =1,\\ldots,n\\} \\end{equation}\nFor some \\((x_1,\\ldots,x_n) \\in \\mathbb{F}^n\\) and \\(j \\in \\{1,\\ldots,n\\}\\), we say \\(x_j\\) is the \\(j^{th}\\) coordinate in \\((x_1,\\ldots,x_n)\\).\nadditional information addition in \\(\\mathbb{F}^n\\) Addition is defined by adding corresponding coordinates:\n\\begin{equation} (x1,\\ldots,x_n) + (y_1,\\ldots,y_n) = (x_1+y_1, \\ldots,x_n+y_n) \\end{equation}\naddition in \\(\\mathbb{F}^n\\) is commutative If we have \\(x,y\\in \\mathbb{F}^n\\), then \\(x+y = y+x\\).\nThe proof of this holds because of how addition works and the fact that you can pairwise commute addition in \\(\\mathbb{F}\\).\n\\begin{align} x+y \u0026amp;= (x_1,\\ldots,x_n) + (y_1,\\ldots,y_n)\\\\ \u0026amp;= (x_1+y_1,\\ldots,x_n+y_n)\\\\ \u0026amp;= (y_1+x_1,\\ldots,y_n+x_n)\\\\ \u0026amp;= (y_1,\\ldots,y_n) + (x_1,\\ldots,x_n)\\\\ \u0026amp;= y+x \\end{align}\nThis is a lesson is why avoiding explicit coordinates is good.\nadditive inverse of \\(\\mathbb{F}^n\\) For \\(x \\in \\mathbb{F}^n\\), the additive inverse of \\(x\\), written as \\(-x\\) is the vector \\(-x\\in \\mathbb{F}^n\\) such that:\n\\begin{equation} x+(-x) = 0 \\end{equation}\nWhich really means that its the additive inverse of each of the coordinates.\nscalar multiplication in \\(\\mathbb{F}^n\\) At present, we are only going to concern ourselves with the product of a number \\(\\lambda\\) and a vector \\(\\mathbb{F}^n\\). This is done by multiplying each coordinate of the vector by \\(\\lambda\\).\n\\begin{equation} \\lambda (x_1,\\ldots,x_n) = (\\lambda x_1, \\lambda, \\lambda x_n) \\end{equation}\nwhere, \\(\\lambda \\in \\mathbb{F}\\), and \\((x_1,\\ldots,x_n) \\in \\mathbb{F}^n\\).\nThe geometric interpretation of this is a scaling operation of vectors.\n","html":"\u003cp\u003e\\(\\mathbb{F}^n\\) is the set of all lists of length \\(n\\) with elements of \\(\\mathbb{F}\\). These are a special case of \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eFormally\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{F}^n = \\{(x1,\\ldots,x_n):x_j\\in\\mathbb{F}, \\forall j =1,\\ldots,n\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor some \\((x_1,\\ldots,x_n) \\in \\mathbb{F}^n\\) and \\(j \\in \\{1,\\ldots,n\\}\\), we say \\(x_j\\) is the \\(j^{th}\\) \u003cstrong\u003ecoordinate\u003c/strong\u003e in \\((x_1,\\ldots,x_n)\\).\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"addition-in-mathbb-f-n\"\u003eaddition in \\(\\mathbb{F}^n\\)\u003c/h3\u003e\n\u003cp\u003e\u003cem\u003eAddition\u003c/em\u003e is defined by adding corresponding coordinates:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(x1,\\ldots,x_n) + (y_1,\\ldots,y_n) = (x_1+y_1, \\ldots,x_n+y_n)\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"addition-in-mathbb-f-n-is-commutative\"\u003eaddition in \\(\\mathbb{F}^n\\) is commutative\u003c/h4\u003e\n\u003cp\u003eIf we have \\(x,y\\in \\mathbb{F}^n\\), then \\(x+y = y+x\\).\u003c/p\u003e\n\u003cp\u003eThe proof of this holds because of how addition works and the fact that you can pairwise commute addition in \\(\\mathbb{F}\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nx+y \u0026amp;= (x_1,\\ldots,x_n) + (y_1,\\ldots,y_n)\\\\\n\u0026amp;= (x_1+y_1,\\ldots,x_n+y_n)\\\\\n\u0026amp;= (y_1+x_1,\\ldots,y_n+x_n)\\\\\n\u0026amp;= (y_1,\\ldots,y_n) + (x_1,\\ldots,x_n)\\\\\n\u0026amp;= y+x\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThis is a lesson is why avoiding explicit coordinates is good.\u003c/p\u003e\n\u003ch3 id=\"additive-inverse-of-mathbb-f-n\"\u003eadditive inverse of \\(\\mathbb{F}^n\\)\u003c/h3\u003e\n\u003cp\u003eFor \\(x \\in \\mathbb{F}^n\\), the \u003cstrong\u003eadditive \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e\u003c/strong\u003e of \\(x\\), written as \\(-x\\) is the vector \\(-x\\in \\mathbb{F}^n\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx+(-x) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich really means that its the \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e of each of the \u003ca href=\"/posts/kbhlists_over_fields/\"\u003ecoordinate\u003c/a\u003es.\u003c/p\u003e\n\u003ch3 id=\"scalar-multiplication-in-mathbb-f-n\"\u003escalar multiplication in \\(\\mathbb{F}^n\\)\u003c/h3\u003e\n\u003cp\u003eAt present, we are only going to concern ourselves with the product of a number \\(\\lambda\\) and a vector \\(\\mathbb{F}^n\\). This is done by multiplying each coordinate of the vector by \\(\\lambda\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda (x_1,\\ldots,x_n) = (\\lambda x_1, \\lambda, \\lambda x_n)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\lambda \\in \\mathbb{F}\\), and \\((x_1,\\ldots,x_n) \\in \\mathbb{F}^n\\).\u003c/p\u003e\n\u003cp\u003eThe geometric interpretation of this is a scaling operation of vectors.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlists_over_fields/","tags":null,"title":"F^n"},{"categories":null,"contents":"We define a set \\(\\mathbb{F}^{s}\\), which is the set of unit functions that maps from any set \\(S\\) to \\(\\mathbb{F}\\).\ncloseness of addition \\begin{equation} (f+g)(x) = f(x)+g(x), \\forall f,g \\in \\mathbb{F}^{S}, x \\in S \\end{equation}\ncloseness of scalar multiplication \\begin{equation} (\\lambda f)(x)=\\lambda f(x), \\forall \\lambda \\in \\mathbb{F}, f \\in \\mathbb{F}^{S}, x \\in S \\end{equation}\ncommutativity inherits \\(\\mathbb{F}\\) (for the codomain of functions \\(f\\) and \\(g\\))\nassociativity inherits \\(\\mathbb{F}\\) for codomain or is just \\(\\mathbb{F}\\) for scalar\ndistribution inherits distribution in \\(\\mathbb{F}\\) on the codomain again\nadditive identity \\begin{equation} 0(x) = 0 \\end{equation}\nadditive inverse \\begin{equation} (-f)(x) = -f(x) \\end{equation}\nmultiplicative identity \\(1\\) hee hee\n","html":"\u003cp\u003eWe define a set \\(\\mathbb{F}^{s}\\), which is the set of unit functions that maps from any set \\(S\\) to \\(\\mathbb{F}\\).\u003c/p\u003e\n\u003ch2 id=\"closeness-of-addition\"\u003ecloseness of addition\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n(f+g)(x) = f(x)+g(x), \\forall f,g \\in \\mathbb{F}^{S}, x \\in S\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"closeness-of-scalar-multiplication\"\u003ecloseness of scalar multiplication\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n(\\lambda f)(x)=\\lambda f(x), \\forall \\lambda \\in \\mathbb{F}, f \\in \\mathbb{F}^{S}, x \\in S\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"commutativity\"\u003ecommutativity\u003c/h2\u003e\n\u003cp\u003einherits \\(\\mathbb{F}\\) (for the codomain of functions \\(f\\) and \\(g\\))\u003c/p\u003e\n\u003ch2 id=\"associativity\"\u003eassociativity\u003c/h2\u003e\n\u003cp\u003einherits \\(\\mathbb{F}\\) for codomain or is just \\(\\mathbb{F}\\) for scalar\u003c/p\u003e\n\u003ch2 id=\"distribution\"\u003edistribution\u003c/h2\u003e\n\u003cp\u003einherits distribution in \\(\\mathbb{F}\\) on the codomain again\u003c/p\u003e\n\u003ch2 id=\"additive-identity\"\u003eadditive identity\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n0(x) = 0\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additive-inverse\"\u003eadditive inverse\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n(-f)(x) = -f(x)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"multiplicative-identity\"\u003emultiplicative identity\u003c/h2\u003e\n\u003cp\u003e\\(1\\) hee hee\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfs_is_a_vector_space/","tags":null,"title":"F^s is a Vector Space Over F"},{"categories":null,"contents":"in probability, a factor \\(\\phi\\) is a value you can assign to each distinct value in a discrete distribution which acts as the probability of that value occurring. They are considered parameters of the discrete distribution.\nIf you don\u0026rsquo;t have discrete variables, factors allow you to state \\(p(x|y)\\) in terms of a function \\(\\phi(x,y)\\).\nSee also Rejection Sampling\nfactor operations factor product \\begin{equation} \\phi_{3} (x,y,z) = \\phi_{1} (x,y) \\cdot \\phi_{2}(y,z) \\end{equation}\nfactor marginalization \\begin{equation} \\phi(x) = \\sum_{y=Y} \\phi(x,y) \\end{equation}\nfactor conditioning Removing any rows not consistent with evidence. Say you know \\(Y=1\\), remove all rows that say \\(Y=0\\).\n","html":"\u003cp\u003ein \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e, a \u003ca href=\"/posts/kbhfactor/\"\u003efactor\u003c/a\u003e \\(\\phi\\) is a value you can assign to each distinct value in a \u003ca href=\"/posts/kbhdiscrete_distribution/\"\u003ediscrete distribution\u003c/a\u003e which acts as the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of that value occurring. They are considered \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es of the \u003ca href=\"/posts/kbhdiscrete_distribution/\"\u003ediscrete distribution\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIf you don\u0026rsquo;t have discrete variables, \u003ca href=\"/posts/kbhfactor/\"\u003efactor\u003c/a\u003es allow you to state \\(p(x|y)\\) in terms of a function \\(\\phi(x,y)\\).\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhrejection_sampling/\"\u003eRejection Sampling\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"factor-operations\"\u003efactor operations\u003c/h2\u003e\n\u003ch3 id=\"factor-product\"\u003efactor product\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\phi_{3} (x,y,z) = \\phi_{1} (x,y) \\cdot \\phi_{2}(y,z)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"factor-marginalization\"\u003efactor marginalization\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\phi(x) = \\sum_{y=Y} \\phi(x,y)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"factor-conditioning\"\u003efactor conditioning\u003c/h3\u003e\n\u003cp\u003eRemoving any rows not consistent with evidence. Say you know \\(Y=1\\), remove all rows that say \\(Y=0\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfactor/","tags":null,"title":"factor"},{"categories":null,"contents":"Motivation Multiple agents need to collaborate to achieve common goal.\nJoint Utility Maximization: maximize the joint utility between various agents.\nPossible Approaches Using a traditional MDP: an MDP considers \u0026ldquo;action\u0026rdquo; as a joint action between all agents (exponential blow up because the agent actions multiply) Local Optimization: share rewards/values among agents Local Optimization: search and maximize joint utility explicitly (no need to model the entire action space) Problems with single Reward Sharing:\nCredit Assignment Problem In collective reward situations, determining which action out of the cohort actually contributed to the award is hard.\nFree Ride Problem Agents can benefit from reward without actually doing anything by being carried.\nFactored MDPs Representation Using factored linear value function to approximate the joint value function Using linear programming to avoid exponential blow up Background Coordination Graphs modeling each agent as a node each edge is a dependency factored Markov Decision Process MDPs are not good at large problems factor the state and action spaces as a random variable factors, etc. action selection each agent maintains a local \\(Q\\) function indicating its population the \\(Q\\) function of each agent maybe influenced by other agents: the coordination graph of the agent is used to calculate contribution We optimize by using one agent at a time: we optimize one agent, then\n","html":"\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003eMultiple agents need to collaborate to achieve common goal.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eJoint Utility Maximization\u003c/strong\u003e: maximize the joint utility between various agents.\u003c/p\u003e\n\u003ch2 id=\"possible-approaches\"\u003ePossible Approaches\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eUsing a traditional MDP\u003c/strong\u003e: an MDP considers \u0026ldquo;action\u0026rdquo; as a joint action between all agents (exponential blow up because the agent actions multiply)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eLocal Optimization\u003c/strong\u003e: share rewards/values among agents\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eLocal Optimization\u003c/strong\u003e: search and maximize joint utility explicitly (no need to model the entire action space)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eProblems with single Reward Sharing:\u003c/p\u003e\n\u003ch3 id=\"credit-assignment-problem\"\u003eCredit Assignment Problem\u003c/h3\u003e\n\u003cp\u003eIn collective reward situations, determining which action out of the cohort actually contributed to the award is hard.\u003c/p\u003e\n\u003ch3 id=\"free-ride-problem\"\u003eFree Ride Problem\u003c/h3\u003e\n\u003cp\u003eAgents can benefit from reward without actually doing anything by being carried.\u003c/p\u003e\n\u003ch2 id=\"factored-mdps--kbhfactored-mdps-dot-md--representation\"\u003e\u003ca href=\"/posts/kbhfactored_mdps/\"\u003eFactored MDPs\u003c/a\u003e Representation\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eUsing factored linear value function to approximate the joint value function\u003c/li\u003e\n\u003cli\u003eUsing linear programming to avoid exponential blow up\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"background\"\u003eBackground\u003c/h3\u003e\n\u003ch4 id=\"coordination-graphs\"\u003eCoordination Graphs\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003emodeling each agent as a node\u003c/li\u003e\n\u003cli\u003eeach edge is a dependency\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"factored-markov-decision-process--kbhmarkov-decision-process-dot-md\"\u003efactored \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMarkov Decision Process\u003c/a\u003e\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eMDPs are not good at large problems\u003c/li\u003e\n\u003cli\u003efactor the state and action spaces as a random variable factors, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"action-selection\"\u003eaction selection\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eeach agent maintains a local \\(Q\\) function indicating its population\u003c/li\u003e\n\u003cli\u003ethe \\(Q\\) function of each agent maybe influenced by other agents:\n\u003cul\u003e\n\u003cli\u003ethe coordination graph of the agent is used to calculate contribution\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe optimize by using \u003cstrong\u003eone agent at a time\u003c/strong\u003e: we optimize one agent, then\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfactored_mdps/","tags":null,"title":"Factored MDPs"},{"categories":null,"contents":"fairness through unawareness procedural fairness, or fairness through unawareness is a fairness system\nIf you have no idea about the demographics of protected groups, you will make better decisions.\nexclude sensitive features from datasets exclude proxies of protected groups Problem: deeply correlated information (such as stuff that people like) is hard to get rid of\u0026mdash;individual features does nothing with respect to predicting gender, but taken in groups it can recover protected group information.\nfairness through awareness we only care about the outcome\nfairness through parity that the prediction for different groups\n\\begin{equation} P(G=1|D=0) = P(G=1|D=1) \\end{equation}\nfairness through calibration We want the CORRECTNESS of the algorithm to be similar between protected groups.\ndisparate impact \\begin{equation} \\frac{P(G=G^{*}|D=0)}{P(G=G^{*}|D=1)} \\leq \\epsilon \\end{equation}\nwhere, by US law, disparate impact states \\(\\epsilon\\) must be 0.2 or smaller for protected groups \\(D\\).\nwhere \\(G^{*}\\) is the correct prediction.\n","html":"\u003ch2 id=\"fairness-through-unawareness\"\u003efairness through unawareness\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#fairness-through-unawareness\"\u003eprocedural fairness\u003c/a\u003e, or \u003ca href=\"#fairness-through-unawareness\"\u003efairness through unawareness\u003c/a\u003e is a \u003ca href=\"/posts/kbhprocedural_vs_distributive_fairness/\"\u003efairness\u003c/a\u003e system\u003c/p\u003e\n\u003cp\u003eIf you have no idea about the demographics of \u003ca href=\"/posts/kbhprotected_group/\"\u003eprotected group\u003c/a\u003es, you will make better decisions.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eexclude sensitive features from datasets\u003c/li\u003e\n\u003cli\u003eexclude proxies of \u003ca href=\"/posts/kbhprotected_group/\"\u003eprotected group\u003c/a\u003es\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eProblem: deeply correlated information (such as stuff that people like) is hard to get rid of\u0026mdash;individual features does nothing with respect to predicting gender, but taken in groups it can recover \u003ca href=\"/posts/kbhprotected_group/\"\u003eprotected group\u003c/a\u003e information.\u003c/p\u003e\n\u003ch2 id=\"fairness-through-awareness\"\u003efairness through awareness\u003c/h2\u003e\n\u003cp\u003ewe only care about the outcome\u003c/p\u003e\n\u003ch3 id=\"fairness-through-parity\"\u003efairness through parity\u003c/h3\u003e\n\u003cp\u003ethat the prediction for different groups\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(G=1|D=0) = P(G=1|D=1)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"fairness-through-calibration\"\u003efairness through calibration\u003c/h3\u003e\n\u003cp\u003eWe want the CORRECTNESS of the algorithm to be similar between \u003ca href=\"/posts/kbhprotected_group/\"\u003eprotected group\u003c/a\u003es.\u003c/p\u003e\n\u003ch4 id=\"disparate-impact\"\u003edisparate impact\u003c/h4\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{P(G=G^{*}|D=0)}{P(G=G^{*}|D=1)} \\leq \\epsilon\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, by US law, \u003ca href=\"#disparate-impact\"\u003edisparate impact\u003c/a\u003e states \\(\\epsilon\\) must be 0.2 or smaller for protected groups \\(D\\).\u003c/p\u003e\n\u003cp\u003ewhere \\(G^{*}\\) is the correct prediction.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprocedural_vs_distributive_fairness/","tags":null,"title":"fairness"},{"categories":null,"contents":"We have that:\nThe change in volts in a relationship to the magnetic flux.\n\\begin{equation} \\epsilon = \\oint \\vec{E} \\cdot \\dd{\\vec{l}} = - \\dv{\\Phi_{b}}{t} \\end{equation}\nwhere, \\(\\Phi_{b}\\) is the magnetic flux, namely how much magnetic field is through a surface:\n\\begin{equation} \\Phi_{b} = \\int \\vec{B} \\cdot \\dd{\\vec{A}} \\end{equation}\nusually, this is just \\(BA\\).\nNote! This tells us that the EMF (electric field per length) is just negative the change of magnetic flux.\n","html":"\u003cp\u003eWe have that:\u003c/p\u003e\n\u003cp\u003eThe change in volts in a relationship to the magnetic flux.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\epsilon = \\oint \\vec{E} \\cdot \\dd{\\vec{l}} = - \\dv{\\Phi_{b}}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\Phi_{b}\\) is the magnetic flux, namely how much magnetic field is through a surface:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Phi_{b} = \\int \\vec{B} \\cdot \\dd{\\vec{A}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eusually, this is just \\(BA\\).\u003c/p\u003e\n\u003cp\u003eNote! This tells us that the EMF (electric field per length) is just negative the change of magnetic flux.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfaraday_s_law/","tags":null,"title":"faraday's law"},{"categories":null,"contents":"One alpha vector per action:\n\\begin{equation} \\alpha^{(k+1)}_{a}(s) = R(s,a) + \\gamma \\sum_{o}^{} \\max_{a\u0026rsquo;} \\sum_{s\u0026rsquo;}^{} O(o|a,s\u0026rsquo;)T(s\u0026rsquo;|s,a) \\alpha_{a\u0026rsquo;}^{k}(s\u0026rsquo;) \\end{equation}\ntime complexity: \\(O(|S|^{2}|A|^{2}|O|)\\)\n","html":"\u003cp\u003eOne \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e per action:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha^{(k+1)}_{a}(s) = R(s,a) + \\gamma \\sum_{o}^{} \\max_{a\u0026rsquo;} \\sum_{s\u0026rsquo;}^{} O(o|a,s\u0026rsquo;)T(s\u0026rsquo;|s,a) \\alpha_{a\u0026rsquo;}^{k}(s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003etime complexity: \\(O(|S|^{2}|A|^{2}|O|)\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfast_informed_bound/","tags":null,"title":"Fast Informed Bound"},{"categories":null,"contents":"A New Deal program to help long-term families to have home. Tho program lowered down-payment for homes down from \\(50\\%\\) down to only \\(\u0026lt;10\\%\\). This is part of Roosevelt\u0026rsquo;s New Deal to lower interest rates and increased national home ownership rates. This could have been attributed to programs to stabilize home prices. This specifically helped white families: favoured single-family homes.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhnew_deal/\"\u003eNew Deal\u003c/a\u003e program to help long-term families to have home. Tho program lowered down-payment for homes down from \\(50\\%\\) down to only \\(\u0026lt;10\\%\\). This is part of Roosevelt\u0026rsquo;s New Deal to lower interest rates and increased national home ownership rates. This could have been attributed to programs to stabilize home prices. This specifically helped white families: favoured single-family homes.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfederal_housing_administration/","tags":null,"title":"Federal Housing Administration"},{"categories":null,"contents":"The Federal Project Number One is a branch of projects under the WPA which created opportunities for writers, musicians, artists, writers, etc.\n","html":"\u003cp\u003eThe Federal Project Number One is a branch of projects under the \u003ca href=\"/posts/kbhwpa/\"\u003eWPA\u003c/a\u003e which created opportunities for writers, musicians, artists, writers, etc.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfederal_project_number_one/","tags":null,"title":"Federal Project Number One"},{"categories":null,"contents":"A field is a special set.\nconstituents distinct elements of at least \\(0\\) and \\(1\\) operations of addition and multiplication requirements closed commutativity associativity identities (both additive and multiplicative) inverses (both additive and multiplicative) distribution Therefore, \\(\\mathbb{R}\\) is a field, and so is \\(\\mathbb{C}\\) (which we proved in properties of complex arithmetic).\nadditional information Main difference between group: there is one operation is group, a field has two operations.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e is a special set.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003edistinct elements of at least \\(0\\) and \\(1\\)\u003c/li\u003e\n\u003cli\u003eoperations of addition and multiplication\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhidentity/\"\u003eidentities\u003c/a\u003e (both additive and multiplicative)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003es (both additive and multiplicative)\u003c/li\u003e\n\u003cli\u003edistribution\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTherefore, \\(\\mathbb{R}\\) is a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e, and so is \\(\\mathbb{C}\\) (which we proved in \u003ca href=\"/posts/kbhcomplex_number/#requirements\"\u003eproperties of complex arithmetic\u003c/a\u003e).\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cp\u003eMain difference between \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e: there is \u003cem\u003eone\u003c/em\u003e operation is \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e, a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e has two operations.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfield/","tags":null,"title":"field"},{"categories":null,"contents":"Store files strided across the disk, and store the blocks which the file uses as a list of block ids. We can then jump on that block IDs and then jump to there\n","html":"\u003cp\u003eStore files strided across the disk, and store the blocks which the file uses as a list of block ids. We can then jump on that block IDs and then jump to there\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfile_payload_data/","tags":null,"title":"File Payload Data"},{"categories":null,"contents":"The filesystem is the only thing that can store anything across power offs.\ndisk Unlike memory, its \u0026ldquo;sector-addressable\u0026rdquo;: you cannot read or write individual bytes. The disk is divided into sectors, which you have to wholesale read and write.\nseeking Because disks are mostly moving, reading and writing requires seeking: to wait until the platter go under the arm and read.\nfilesystems are designed to minimize the seek time.\nfunctionality creating looknig up reading: sequential + random file access; access either all of a file or a part of the file editing creating folders main challenges disk space management: minimize seeks, sharing space, efficient use of disk naming: how do users name files reliability: surviving OS crashes and hardware failures protection: isolation between users, controlled sharing block a block is a group of one or more sectors, which issued to abstract away chunks of sectors.\nfragmentation internal fragmentation A file can be no less than a single block of text.\nexternal fragmentation \u0026ldquo;no space is available even if the space in aggregate is available\u0026rdquo;\nmodels of storage We typically put two things into the block:\nfile payload data file meta-data there is a few ways to do this:\ncontiguous allocation linked files Windows FAT File Payload Data Unix V6 Filesystem: inode, block, Block Cache ","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhfilesystem/\"\u003efilesystem\u003c/a\u003e is the only thing that can store anything across power offs.\u003c/p\u003e\n\u003ch2 id=\"disk\"\u003edisk\u003c/h2\u003e\n\u003cp\u003eUnlike memory, its \u0026ldquo;sector-addressable\u0026rdquo;: you cannot read or write individual bytes. The \u003ca href=\"#disk\"\u003edisk\u003c/a\u003e is divided into \u003ca href=\"#disk\"\u003esector\u003c/a\u003es, which you have to wholesale read and write.\u003c/p\u003e\n\u003ch3 id=\"seeking\"\u003eseeking\u003c/h3\u003e\n\u003cp\u003eBecause disks are mostly moving, reading and writing requires \u003ca href=\"#seeking\"\u003eseeking\u003c/a\u003e: to wait until the platter go under the arm and read.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhfilesystem/\"\u003efilesystem\u003c/a\u003es are designed to minimize the seek time.\u003c/p\u003e\n\u003ch2 id=\"functionality\"\u003efunctionality\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecreating\u003c/li\u003e\n\u003cli\u003elooknig up\u003c/li\u003e\n\u003cli\u003ereading: sequential + random file access; access either all of a file or a part of the file\u003c/li\u003e\n\u003cli\u003eediting\u003c/li\u003e\n\u003cli\u003ecreating folders\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"main-challenges\"\u003emain challenges\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003edisk space management\u003c/strong\u003e: minimize seeks, sharing space, efficient use of disk\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003enaming\u003c/strong\u003e: how do users name files\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ereliability\u003c/strong\u003e: surviving OS crashes and hardware failures\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eprotection\u003c/strong\u003e: isolation between users, controlled sharing\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"block\"\u003eblock\u003c/h2\u003e\n\u003cp\u003ea \u003ca href=\"#block\"\u003eblock\u003c/a\u003e is a group of one or more \u003ca href=\"#disk\"\u003esector\u003c/a\u003es, which issued to abstract away chunks of sectors.\u003c/p\u003e\n\u003ch2 id=\"fragmentation\"\u003efragmentation\u003c/h2\u003e\n\u003ch3 id=\"internal-fragmentation\"\u003einternal fragmentation\u003c/h3\u003e\n\u003cp\u003eA file can be no less than a single \u003ca href=\"#block\"\u003eblock\u003c/a\u003e of text.\u003c/p\u003e\n\u003ch3 id=\"external-fragmentation\"\u003eexternal fragmentation\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;no space is available even if the space in aggregate is available\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"models-of-storage\"\u003emodels of storage\u003c/h2\u003e\n\u003cp\u003eWe typically put two things into the block:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003efile payload data\u003c/li\u003e\n\u003cli\u003efile meta-data\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ethere is a few ways to do this:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcontiguous_allocation/\"\u003econtiguous allocation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinked_files/\"\u003elinked files\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhwindows_fat/\"\u003eWindows FAT\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfile_payload_data/\"\u003eFile Payload Data\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003eUnix V6 Filesystem\u003c/a\u003e: \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e, \u003ca href=\"#block\"\u003eblock\u003c/a\u003e, \u003ca href=\"/posts/kbhunix_v6_filesystem/#block-cache\"\u003eBlock Cache\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfilesystem/","tags":null,"title":"filesystem"},{"categories":null,"contents":"filters are how beliefs are updated from observation\ndiscrete state filter \\begin{equation} b\u0026rsquo;(s\u0026rsquo;) = P(s\u0026rsquo;|b,a,o) \\end{equation}\n\\(b\u0026rsquo;\\) is what state we think we are in next, and its a probability distribution over all states, calculated given from \\(b,a,o\\) our current belief about our state, our action, and our observation.\nWe can perform this belief update by performing Bayes Theorem over \\(o\\):\n\\begin{align} b\u0026rsquo;(s\u0026rsquo;) \u0026amp;= P(s\u0026rsquo;|b,a,o) \\\\ \u0026amp;\\propto P(o|b,a,s\u0026rsquo;) P(s\u0026rsquo; | b,a) \\end{align}\nNow, consider\n\\(b\\) is a representation of \\(s\\) (\u0026ldquo;belief is a representation of what previous state you are in.\u0026rdquo;) However, you will note that \\(s\\) is conditionally independent to \\(o\\) through d-seperation as there is a chain \\(s \\to s\u0026rsquo; \\to o\\). So:\n\\begin{align} b\u0026rsquo;(s\u0026rsquo;) \u0026amp;\\propto P(o|b,a,s\u0026rsquo;) P(s\u0026rsquo; | b,a) \\\\ \u0026amp;= P(o|a,s\u0026rsquo;) P(s\u0026rsquo; | b,a) \\end{align}\nThis first term is by definition the observation model, so we have:\n\\begin{align} b\u0026rsquo;(s\u0026rsquo;) \u0026amp;\\propto P(o|a,s\u0026rsquo;) P(s\u0026rsquo; | b,a) \\\\ \u0026amp;= O(o|a,s\u0026rsquo;)P(s\u0026rsquo; | b,a) \\end{align}\nWe now invoke the law of total probability over the second term, over all states:\n\\begin{align} b\u0026rsquo;(s\u0026rsquo;) \u0026amp;\\propto O(o|a,s\u0026rsquo;)P(s\u0026rsquo; | b,a) \\\\ \u0026amp;= O(o|a,s\u0026rsquo;) \\sum_{s}^{} P(s\u0026rsquo;|b,a,s)P(s|b,a) \\end{align}\nIf we know \\(s\\) and \\(a\\) in the \\(P(s\u0026rsquo;|b,a,s)\\) terms, we can drop \\(b\\) because if we already know \\(a,s\\) knowing what probability we are in \\(s\\) (i.e. \\(b(s)\\)) is lame. Furthermore, \\(P(s|b,a)=b(s)\\) because the action we take is irrelavent to what CURRENT state we are in, if we already are given a distribution about what state we are in through \\(b\\).\n\\begin{align} b\u0026rsquo;(s\u0026rsquo;) \u0026amp;\\propto O(o|a,s\u0026rsquo;) \\sum_{s}^{} T(s\u0026rsquo;|s,a)b(s) \\end{align}\nKalman Filter A Kalman Filter is a continous state-filter where by each of our \\(T, O, b\\) is represented via a Gaussian distribution. Kalman Filter is discrete state filter but continuous. Consider the final, belief-updating result of the discrete state filter above, and port it to be continous:\n\\begin{equation} b\u0026rsquo;(s\u0026rsquo;) \\propto O(o|a,s\u0026rsquo;) \\int_{s} T(s\u0026rsquo;|s,a) b(s) ds \\end{equation}\nif we modeled our transition probabilties, observations, and initial belief with a gaussian whereby each parameter is a gaussian model parameterized upon a few matricies.\n\\begin{equation} T(s\u0026rsquo;|s,a) = \\mathcal{N}(s\u0026rsquo;|T_{s} s + T_{a} a, \\Sigma_{s}) \\end{equation}\n\\begin{equation} O(o|s\u0026rsquo;) = \\mathcal{N}(o|O_{s}s\u0026rsquo;, \\Sigma_{o}) \\end{equation}\n\\begin{equation} b(s) = \\mathcal{N}(s | \\mu_{b}, \\Sigma_{b}) \\end{equation}\nwhere, \\(T, O\\) are matricies that maps vectors states to vectors. \\(\\Sigma\\) are covariances matricies. Finally, \\(\\mu\\) is a mean belief vector.\nTwo main steps:\npredict \\begin{equation} \\mu_{p} \\leftarrow T_{s} \\mu_{b} + T_{a}a \\end{equation}\n\\begin{equation} \\Sigma_{p} \\leftarrow T_{s} \\Sigma_{b} T_{s}^{T} + \\Sigma_{s} \\end{equation}\ngiven our current belief \\(b\\) and its parameters, and our current situation \\(s,a\\), we want to make a prediction about where we should be next. We should be somewhere on: \\(b\u0026rsquo;_{p} = \\mathcal{N}(\\mu_{p}, \\Sigma_{p})\\).\nupdate \\begin{equation} \\mu_{b} \\leftarrow \\mu_{p}+K(o-O_{s}\\mu_{p}) \\end{equation}\n\\begin{equation} \\Sigma_{b} \\leftarrow (I-KO_{s})\\Sigma_{p} \\end{equation}\nwhere \\(K\\) is Kalmain gain\nWe are now going to take an observation \\(o\\), and update our belief about where we should be now given our new observation.\nKalmain gain \\begin{equation} K \\leftarrow \\Sigma_{p} O_{s}^{T} (O_{s}\\Sigma_{p}O_{s}^{T}+\\Sigma_{O})^{-1}} \\end{equation}\nAdditional Information Extended Kalman Filter Kalman Filter, but no linearity required by forcing linearity by a point-jacobian estimate about the mean of the belief state.\nUnscented Kalman Filter its Extended Kalman Filter but derivative free, which means its clean and hence its unscented.\nIts achieved through using \u0026ldquo;sigma point samples\u0026rdquo;: just taking some representative points (mean + 2 points in each direction), and draw a line.\nParticle Filter Its a filter with Likelihood Weighted Sampling.\nSay we are flying a plane; we want to use our height measures to infer our horizontal location. Let us take an observation model: \\(O(o|s,a) = \\mathcal{N}(o|h(s), \\sigma)\\) (\u0026ldquo;the probability of getting an observation given we are in the state\u0026rdquo;)\nstart off with a prior distribution over the states you have: a distribution over the possible states make \\(N\\) monte-calro samples from our prior. These are our particles. use the transition model to propagate the \\(N\\) samples forward to get \\(N\\) new next-state samples take action \\(a\\); calculate \\(O(o|s,a)\\) for each of your proper gated samples \\(s\\) (\u0026ldquo;how likely is our observed altitude given each of our sampled states?\u0026rdquo;) normalise the resulting probabilities into a single distribution re-sample \\(N\\) samples that from the resulting distribution. these are our updated belief. repeat from step 3 main pitfalls: if we don\u0026rsquo;t have enough sampled particles, you may get condensations that doesn\u0026rsquo;t make sense\nparticle filter with rejection This is used almost never but if you really want to you can. You\u0026rsquo;d get a bunch of particles and take an action. You propagate the particles forward.\nFor each propergated state \\(s\\), if what you observed \\(o\\) is equal to (or close to, for continuous cases) \\(sample(o|s,a)\\), you keep it around. Otherwise, you discard it.\nYou keep doing this until you have kept enough states to do this again, and repeat.\nInjection particle filter Add a random new particle every so often to prevent particle deprivation.\nAdaptive injection particle filter We perform injection based on the ratio of two moving averages of particle weights\u0026mdash;if all weights are too low, we chuck in some to perturb it\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhfilters/\"\u003efilters\u003c/a\u003e are how \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003es are updated from observation\u003c/p\u003e\n\u003ch2 id=\"discrete-state-filter\"\u003ediscrete state filter\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nb\u0026rsquo;(s\u0026rsquo;) = P(s\u0026rsquo;|b,a,o)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(b\u0026rsquo;\\) is what state we think we are in next, and its a probability distribution over all states, calculated given from \\(b,a,o\\) our current belief about our state, our action, and our observation.\u003c/p\u003e\n\u003cp\u003eWe can perform this belief update by performing \u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes Theorem\u003c/a\u003e over \\(o\\):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nb\u0026rsquo;(s\u0026rsquo;) \u0026amp;= P(s\u0026rsquo;|b,a,o) \\\\\n\u0026amp;\\propto P(o|b,a,s\u0026rsquo;) P(s\u0026rsquo; | b,a)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, consider\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-09_09-52-58_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\\(b\\) is a representation of \\(s\\) (\u0026ldquo;belief is a representation of what previous state you are in.\u0026rdquo;) However, you will note that \\(s\\) is \u003ca href=\"/posts/kbhbaysian_network/#conditional-independence\"\u003econditionally independent\u003c/a\u003e to \\(o\\) through \u003ca href=\"/posts/kbhbaysian_network/#checking-for-conditional-independence\"\u003ed-seperation\u003c/a\u003e as there is a chain \\(s \\to s\u0026rsquo; \\to o\\). So:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nb\u0026rsquo;(s\u0026rsquo;) \u0026amp;\\propto P(o|b,a,s\u0026rsquo;) P(s\u0026rsquo; | b,a) \\\\\n\u0026amp;= P(o|a,s\u0026rsquo;) P(s\u0026rsquo; | b,a)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThis first term is by definition the \u003ca href=\"/posts/kbhbelief/#observation-model\"\u003eobservation model\u003c/a\u003e, so we have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nb\u0026rsquo;(s\u0026rsquo;) \u0026amp;\\propto P(o|a,s\u0026rsquo;) P(s\u0026rsquo; | b,a) \\\\\n\u0026amp;= O(o|a,s\u0026rsquo;)P(s\u0026rsquo; | b,a)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eWe now invoke the \u003ca href=\"/posts/kbhprobability/#law-of-total-probability\"\u003elaw of total probability\u003c/a\u003e over the second term, over all states:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nb\u0026rsquo;(s\u0026rsquo;) \u0026amp;\\propto O(o|a,s\u0026rsquo;)P(s\u0026rsquo; | b,a) \\\\\n\u0026amp;= O(o|a,s\u0026rsquo;) \\sum_{s}^{} P(s\u0026rsquo;|b,a,s)P(s|b,a)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eIf we know \\(s\\) and \\(a\\) in the \\(P(s\u0026rsquo;|b,a,s)\\) terms, we can drop \\(b\\) because if we already know \\(a,s\\) knowing what probability we are in \\(s\\) (i.e. \\(b(s)\\)) is lame. Furthermore, \\(P(s|b,a)=b(s)\\) because the action we take is irrelavent to what CURRENT state we are in, if we already are given a distribution about what state we are in through \\(b\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nb\u0026rsquo;(s\u0026rsquo;) \u0026amp;\\propto O(o|a,s\u0026rsquo;) \\sum_{s}^{} T(s\u0026rsquo;|s,a)b(s)\n\\end{align}\u003c/p\u003e\n\u003ch2 id=\"kalman-filter\"\u003eKalman Filter\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#kalman-filter\"\u003eKalman Filter\u003c/a\u003e is a continous state-filter where by each of our \\(T, O, b\\) is represented via a \u003ca href=\"/posts/kbhgaussian_distribution/\"\u003eGaussian distribution\u003c/a\u003e. \u003ca href=\"#kalman-filter\"\u003eKalman Filter\u003c/a\u003e is \u003ca href=\"#discrete-state-filter\"\u003ediscrete state filter\u003c/a\u003e but continuous. Consider the final, belief-updating result of the \u003ca href=\"#discrete-state-filter\"\u003ediscrete state filter\u003c/a\u003e above, and port it to be continous:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb\u0026rsquo;(s\u0026rsquo;) \\propto O(o|a,s\u0026rsquo;) \\int_{s} T(s\u0026rsquo;|s,a) b(s) ds\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif we modeled our transition probabilties, observations, and initial belief with a \u003ca href=\"/posts/kbhgaussian_distribution/\"\u003egaussian\u003c/a\u003e whereby each parameter is a \u003ca href=\"/posts/kbhgaussian_distribution/\"\u003egaussian model\u003c/a\u003e parameterized upon a few matricies.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(s\u0026rsquo;|s,a) = \\mathcal{N}(s\u0026rsquo;|T_{s} s + T_{a} a, \\Sigma_{s})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nO(o|s\u0026rsquo;) = \\mathcal{N}(o|O_{s}s\u0026rsquo;, \\Sigma_{o})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb(s) = \\mathcal{N}(s | \\mu_{b}, \\Sigma_{b})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(T, O\\) are matricies that maps vectors states to vectors. \\(\\Sigma\\) are covariances matricies. Finally, \\(\\mu\\) is a mean belief vector.\u003c/p\u003e\n\u003cp\u003eTwo main steps:\u003c/p\u003e\n\u003ch3 id=\"predict\"\u003epredict\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\mu_{p} \\leftarrow T_{s} \\mu_{b} + T_{a}a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Sigma_{p} \\leftarrow T_{s} \\Sigma_{b} T_{s}^{T} + \\Sigma_{s}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egiven our current belief \\(b\\) and its parameters, and our current situation \\(s,a\\), we want to make a prediction about where we \u003cstrong\u003eshould\u003c/strong\u003e be next. We should be somewhere on: \\(b\u0026rsquo;_{p} = \\mathcal{N}(\\mu_{p}, \\Sigma_{p})\\).\u003c/p\u003e\n\u003ch3 id=\"update\"\u003eupdate\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\mu_{b} \\leftarrow \\mu_{p}+K(o-O_{s}\\mu_{p})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Sigma_{b} \\leftarrow (I-KO_{s})\\Sigma_{p}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(K\\) is \u003ca href=\"#kalmain-gain\"\u003eKalmain gain\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eWe are now going to take an observation \\(o\\), and update our belief about where we should be now given our new observation.\u003c/p\u003e\n\u003ch4 id=\"kalmain-gain\"\u003eKalmain gain\u003c/h4\u003e\n\u003cp\u003e\\begin{equation}\nK \\leftarrow \\Sigma_{p} O_{s}^{T} (O_{s}\\Sigma_{p}O_{s}^{T}+\\Sigma_{O})^{-1}}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"additional-information\"\u003eAdditional Information\u003c/h3\u003e\n\u003ch4 id=\"extended-kalman-filter--org1c38d38\"\u003eExtended \u003ca href=\"#kalman-filter\"\u003eKalman Filter\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003e\u003ca href=\"#kalman-filter\"\u003eKalman Filter\u003c/a\u003e, but no linearity required by forcing linearity by a point-jacobian estimate about the mean of the belief state.\u003c/p\u003e\n\u003ch4 id=\"unscented-kalman-filter--org1c38d38\"\u003eUnscented \u003ca href=\"#kalman-filter\"\u003eKalman Filter\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eits \u003ca href=\"#extended-kalman-filter--org1c38d38\"\u003eExtended Kalman Filter\u003c/a\u003e but derivative free, which means its clean and hence its unscented.\u003c/p\u003e\n\u003cp\u003eIts achieved through using \u0026ldquo;sigma point samples\u0026rdquo;: just taking some representative points (mean + 2 points in each direction), and draw a line.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-16_18-27-09_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"particle-filter\"\u003eParticle Filter\u003c/h2\u003e\n\u003cp\u003eIts a filter with \u003ca href=\"/posts/kbhdirect_sampling/#likelihood-weighted-sampling\"\u003eLikelihood Weighted Sampling\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSay we are flying a plane; we want to use our height measures to infer our horizontal location. Let us take an observation model: \\(O(o|s,a) = \\mathcal{N}(o|h(s), \\sigma)\\) (\u0026ldquo;the probability of getting an observation given we are in the state\u0026rdquo;)\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003estart off with a prior distribution over the states you have: a distribution over the possible states\u003c/li\u003e\n\u003cli\u003emake \\(N\\) monte-calro samples from our prior. These are our particles.\u003c/li\u003e\n\u003cli\u003euse the transition model to propagate the \\(N\\) samples forward to get \\(N\\) new next-state samples\u003c/li\u003e\n\u003cli\u003etake action \\(a\\); calculate \\(O(o|s,a)\\) for each of your proper gated samples \\(s\\) (\u0026ldquo;how likely is our observed altitude given each of our sampled states?\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003enormalise the resulting probabilities into a single distribution\u003c/li\u003e\n\u003cli\u003ere-sample \\(N\\) samples that from the resulting distribution. these are our updated belief.\u003c/li\u003e\n\u003cli\u003erepeat from step 3\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003emain pitfalls\u003c/strong\u003e\u003c/strong\u003e: if we don\u0026rsquo;t have enough sampled particles, you may get condensations that doesn\u0026rsquo;t make sense\u003c/p\u003e\n\u003ch3 id=\"particle-filter-with-rejection\"\u003eparticle filter with rejection\u003c/h3\u003e\n\u003cp\u003eThis is used almost never but if you really want to you can. You\u0026rsquo;d get a bunch of particles and take an action. You propagate the particles forward.\u003c/p\u003e\n\u003cp\u003eFor each propergated state \\(s\\), if what you observed \\(o\\) is equal to (or close to, for continuous cases) \\(sample(o|s,a)\\), you keep it around. Otherwise, you discard it.\u003c/p\u003e\n\u003cp\u003eYou keep doing this until you have kept enough states to do this again, and repeat.\u003c/p\u003e\n\u003ch3 id=\"injection-particle-filter\"\u003eInjection particle filter\u003c/h3\u003e\n\u003cp\u003eAdd a random new particle every so often to prevent particle deprivation.\u003c/p\u003e\n\u003ch3 id=\"adaptive-injection-particle-filter\"\u003eAdaptive injection particle filter\u003c/h3\u003e\n\u003cp\u003eWe perform injection based on the ratio of two moving averages of particle weights\u0026mdash;if all weights are too low, we chuck in some to perturb it\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfilters/","tags":null,"title":"filter"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhfilter_bank/","tags":null,"title":"Filter Bank"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhfilterb/","tags":null,"title":"filterb"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhfilterba/","tags":null,"title":"filterba"},{"categories":null,"contents":"We have a system of differential equations:\n\\begin{equation} \\begin{cases} \\dv{I}{t} = -0.73 U(t) + 0.0438 + 0.4 \\dv{M}{t} \\\\ \\dv{U}{t} = 0.4I-0.012 \\\\ \\dv{G}{t} = \\dv{M}{t} - I(t) \\end{cases} \\end{equation}\nwhere, \\(M\\) is a sinusoidal function which we can control.\nWe hope for this system to be as stable as possible.\nFirst, let\u0026rsquo;s try to get a general solution of the system. The linearized(ish) solution takes the shape of:\n\\begin{equation} \\dv t \\mqty(I \\\\ U \\\\ G) = \\mqty(0 \u0026amp; -x_1 \u0026amp; 0 \\\\ x_4 \u0026amp; 0 \u0026amp; 0 \\\\ -1 \u0026amp; 0 \u0026amp; 0 ) \\mqty(I \\\\ U \\\\ G)+ \\dv{M}{t}\\mqty(x_3 \\\\ 0 \\\\ 1) + \\mqty(x_2 \\\\ x_5 \\\\ 0) \\end{equation}\nWith:\n\\begin{equation} \\begin{cases} x_1 = 0.73 \\\\ x_2 = 0.0438 \\\\ x_3 = 0.4 \\\\ x_4 = 0.4 \\\\ x_5 = 0.012 \\end{cases} \\end{equation}\nas input parameters. We will follow the method of underdetermined coefficients: taking the homogeneous solution first and then using it to get the general solution.\nHomogeneous System To get the characteristic equation of the homogeneous system, we take the eigenvalue of the system:\nx1,x2,x3,x4,x5 = var(\u0026#34;x1 x2 x3 x4 x5\u0026#34;) matrix = matrix([[0, -x1, 0], [x4, 0,0], [-1, 0,0]]) matrix.eigenvalues() [-sqrt(-x1*x4), sqrt(-x1*x4), 0] Awesome. So we can see that our characteristic equation will be:\n\\begin{align} \\mqty(I \\\\ U \\\\ G)_{h} \u0026amp;= \\vec{c_0} e^{0t} + \\vec{c_1} e^{\\sqrt{-x_1x_4}t} + \\vec{c_2} e^{-\\sqrt{-x_1x_4}t} \\\\ \u0026amp;= \\vec{c_0} + \\vec{c_1}e^{i \\sqrt{x_1x_4}t} + \\vec{c_2} e^{-i \\sqrt{x_1x_4}t} \\end{align}\nNow, the two \\(e^{ix}\\) functions, one positive and one negative, inspires us to the following results:\n\\begin{equation} \\begin{cases} \\cos x = \\frac{e^{ix}+e^{-ix}}{2}\\\\ \\sin x = \\frac{e^{ix}-e^{-ix}}{2i}\\\\ \\end{cases} \\end{equation}\nTreating \\(\\frac{1}{2}\\) and \\(\\frac{1}{2i}\\) (which we can do, because the constants can be defined on any space desired), we have:\n\\begin{align} \\cos x + \\sin x \u0026amp;= \\frac{e^{ix}+e^{-ix}}{2} + \\frac{e^{ix}-e^{-ix}}{2i} \\\\ \u0026amp;= A_1e^{ix}+A_2e^{-ix} \\end{align}\nfor some constant scalars \\(A_1\\) and \\(A_2\\)\n\u0026ldquo;Wait, doesn\u0026rsquo;t the \\(e^{-ix}\\) and \\(-e^{-ix}\\) subtract each other out on the numerator? No, notice the denominator is different, so we will have \\((A-B)e^{-ix}\\) after we add the two expressions for some constants \\(A\\) and \\(B\\), it doesn\u0026rsquo;t cancel out.\u0026rdquo;\nPerforming this substitution allows us to reveal the sinusoidal nature of our characteristic equation, and get rid of those pesky \\(i\\).\n\\begin{align} \\mqty(I \\\\ U \\\\ G)_{h} \u0026amp;= \\vec{c_0} e^{0t} + \\vec{c_1} e^{\\sqrt{-x_1x_4}t} + \\vec{c_2} e^{-\\sqrt{-x_1x_4}t} \\\\ \u0026amp;= \\vec{c_0} + \\vec{c_1}e^{i \\sqrt{x_1x_4}t} + \\vec{c_2} e^{-i \\sqrt{x_1x_4}t} \\\\ \u0026amp;= \\vec{c_0} + \\vec{c_1\u0026rsquo;} \\cos (\\sqrt{x_1x_4} t)+ \\vec{c_2\u0026rsquo;} \\sin (\\sqrt{x_1x_4} t) \\end{align}\nThe primes here indicate that \\(\\vec{c_1} \\neq \\vec{c_1\u0026rsquo;}\\) because the initial conditions shift when we move to sinusoidal functions.\nWriting this out completely, ditching the vector expressions, we have\n\\begin{equation} \\begin{cases} I_{h}(t) = I_0 + I_1\\cos(\\sqrt{x_1x_4}t) + I_2\\sin (\\sqrt{x_1x_4}t) \\\\ U_{h}(t) = U_0 + U_1\\cos(\\sqrt{x_1x_4}t) + U_2\\sin (\\sqrt{x_1x_4}t) \\\\ G_{h}(t) = G_0 + G_1\\cos(\\sqrt{x_1x_4}t) + G_2\\sin (\\sqrt{x_1x_4}t) \\end{cases} \\end{equation}\nas the homogenous solutions for the equation.\nUnderdetermined Coefficients Recall the expression we are trying to solve is:\n\\begin{equation} \\begin{cases} \\dv{I}{t} = -0.73 U(t) + 0.0438 + 0.4 \\dv{M}{t} \\\\ \\dv{U}{t} = 0.4I-0.012 \\\\ \\dv{G}{t} = \\dv{M}{t} - I(t) \\end{cases} \\end{equation}\nWe dealt with the homongenous part\u0026hellip; but not the next two parts! Let\u0026rsquo;s do that.\nIn order to do that, we will use the method of underdetermined coefficients. Recall that:\n\\begin{equation} \\dv t \\mqty(I \\\\ U \\\\ G) = \\mqty(0 \u0026amp; -x_1 \u0026amp; 0 \\\\ x_4 \u0026amp; 0 \u0026amp; 0 \\\\ -1 \u0026amp; 0 \u0026amp; 0 ) \\mqty(I \\\\ U \\\\ G)+ \\dv{M}{t}\\mqty(x_3 \\\\ 0 \\\\ 1) + \\mqty(x_2 \\\\ x_5 \\\\ 0) \\end{equation}\nFor now, we will add the extra \\(\\dv{M}{t}\\) term explicitly later. Let us solve for the undetermined coefficients based on the assumption that each function (except for the attenuation by \\(M\\)) is linear:\n\\begin{equation} y(t) = at \\end{equation}\n(\u0026ldquo;it linearly changes over time\u0026rdquo;)\nIts derivative by time is:\n\\begin{equation} y\u0026rsquo;(t) = a \\end{equation}\nPlugging that into our expressions above:\n\\begin{equation} \\mqty(a_{I} \\\\ a_{U} \\\\ a_{G} ) = \\mqty(0 \u0026amp; -x_1 \u0026amp; 0 \\\\ x_4 \u0026amp; 0 \u0026amp; 0 \\\\ -1 \u0026amp; 0 \u0026amp; 0 ) \\mqty(a_{I}t \\\\ a_{U}t \\\\ a_{G} t) + \\dv{M}{t} \\mqty(x_3 \\\\ 0 \\\\ 1) + \\mqty(x_2 \\\\ x_5 \\\\ 0) \\end{equation}\nAnd now, arranging the right expressions such that we can clearly see each coefficient line up, relegating \\(M\\) to the side, and actually multiplying:\n\\begin{equation} \\mqty(a_{I} \\\\ a_{U} \\\\ a_{G} ) = \\mqty(0 \u0026amp; -x_1 \u0026amp; 0 \\\\ x_4 \u0026amp; 0 \u0026amp; 0 \\\\ -1 \u0026amp; 0 \u0026amp; 0 ) \\mqty(a_{I}t + x_2 \\\\ a_{U}t + x_5 \\\\ a_{G} t) + \\dv{M}{t} \\mqty(x_3 \\\\ 0 \\\\ 1) \\end{equation}\n\\begin{equation} \\mqty(a_{I} \\\\ a_{U} \\\\ a_{G} ) = \\mqty(-x_1 (a_{U}t + x_5) \\\\ x_4 (a_{I}t + x_2 )\\\\ 1 a_{G} t) + \\dv{M}{t} \\mqty(x_3 \\\\ 0 \\\\ 1) \\end{equation}\nAwesome, so now, matching coefficients, we have:\n\\begin{equation} \\begin{cases} a_{I} = -x_1x_5 \\\\ a_{U} = x_4x_2 \\\\ a_{G} = 0 \\end{cases} \\end{equation}\nWhich I honestly could have told you by\u0026hellip;. Just staring at the equations. furthermore, we will add the requisite shift of \\(\\dv{M}{t}\\) to the right equations when appropriate.\nSo, adding the \\(M(t)\\) in place, our particular solutions are:\n\\begin{equation} \\begin{cases} I_{p}(t) = -x_1x_5 t + x_3 M(t) \\\\ U_{p}(t) = x_4x_2t \\\\ G_{p}(t) = M(t) \\end{cases} \\end{equation}\nas the homogenous solutions for the equation.\nGeneral Solution Let us know put the general and particular solutions together:\nRecall that:\n\\begin{equation} \\begin{cases} I_{h}(t) = I_0 + I_1\\cos(\\sqrt{x_1x_4}t) + I_2\\sin (\\sqrt{x_1x_4}t) \\\\ U_{h}(t) = U_0 + U_1\\cos(\\sqrt{x_1x_4}t) + U_2\\sin (\\sqrt{x_1x_4}t) \\\\ G_{h}(t) = G_0 + G_1\\cos(\\sqrt{x_1x_4}t) + G_2\\sin (\\sqrt{x_1x_4}t) \\end{cases} \\end{equation}\n\\begin{equation} \\begin{cases} I_{p}(t) = -x_1x_5 t + 0.4M(t) \\\\ U_{p}(t) = x_4x_2t \\\\ G_{p}(t) = M(t) \\end{cases} \\end{equation}\nSo, by linear additivity, we have:\n\\begin{equation} \\begin{cases} I}(t) = I_0 + I_1\\cos(\\sqrt{x_1x_4}t) + I_2\\sin (\\sqrt{x_1x_4}t) -x_1x_5 t + 0.4M(t) \\\\ U}(t) = U_0 + U_1\\cos(\\sqrt{x_1x_4}t) + U_2\\sin (\\sqrt{x_1x_4}t) + x_4x_2t\\\\ G}(t) = G_0 + G_1\\cos(\\sqrt{x_1x_4}t) + G_2\\sin (\\sqrt{x_1x_4}t) + M(t) \\end{cases} \\end{equation}\nSimplification Recall that our function \\(M(t)\\) is a sinusoidal function. And it is being added to some linear combination of sinusoidal functions in each term of our general solution above. Meaning, each of our equations are of the shape:\n[some vertical shift] + [cosine something] + [sine something] + [optional linear drift] + [M(t)]\nFor us to use \\(M(t)\\) to attenuate/stabilize the system, the best we can do is to dampen the sinusoidal part (because \\(M\\) itself is sinusoidal). We can\u0026rsquo;t do much of anything else.\nTo do this, we want ideally \\(M(t)\\) be \\(\\pi\\) ahead of the \\(\\cos + \\sin\\) waves in each of the functions; that is, we want \\(M\\) to be out of phase exactly.\n\\(\\cos +\\sin\\) is harder to be out of phase than just \\(\\sin\\); if the latter, we can just figure out its frequency and shift, and be \\(\\pi\\) ahead of it.\nFortunately, our \\(\\cos\\) and \\(\\sin\\) terms have exactly the same contents; therefore, their sum form just another shifted sine wave (don\u0026rsquo;t believe me, plot it!). Therefore, we will now endeavor to combine them.\nAside: \\(A\\cos (x)+B\\sin (x)\\) Here\u0026rsquo;s how you go about the combination. We desire that \\(A\\cos (x) + B \\sin (x)\\) be a single shifted sine function; we know this is true (by plottingish or using imaginary numbers), so we will set the sum to some arbitrary sine function and solve for its correct coefficients to mimic the sum; that is:\n\\begin{equation} r \\sin (x + \\alpha) := A\\cos (x) + B \\sin (x) \\end{equation}\nwe know desire the coefficients \\(r, \\alpha\\) that would make this true.\nRecall \\(\\sin a+b = \\cos a\\sin b + \\sin a\\cos b\\); so:\n\\begin{align} r \\sin (x+\\alpha) \u0026amp; = r(\\cos x \\sin \\alpha + \\sin x \\cos \\alpha ) \\\\ \u0026amp;= r \\sin x \\cos \\alpha + r \\cos x \\sin \\alpha \\\\ \u0026amp;= (r \\sin \\alpha) \\cos x + (r \\cos \\alpha) \\sin x \\end{align}\nNow, we have:\n\\begin{equation} (r \\sin \\alpha) \\cos x + (r \\cos \\alpha) \\sin x := A\\cos (x) + B \\sin (x) \\end{equation}\nTherefore:\n\\begin{equation} \\begin{cases} r\\sin \\alpha = A \\\\ r \\cos \\alpha = B \\end{cases} \\end{equation}\nAnd we desire correct coefficients \\(r, \\alpha\\) in terms of \\(A, B\\).\nDividing the two expressions:\n\\begin{equation} \\frac{\\sin \\alpha }{\\cos \\alpha } = \\frac{A}{B} \\end{equation}\nTherefore, \\(\\alpha = \\tan^{-1}\\qty(\\frac{A}{B})\\).\nFinally, recall that \\(\\sin^{2} x +\\cos^{2} x =1\\) for any \\(x\\). We will use this fact to get \\(r\\).\n\\begin{align} \u0026amp;\\sin^{2} \\alpha + \\cos^{2} \\alpha = 1 \\\\ \\Rightarrow\\ \u0026amp; \\qty(\\frac{A}{r})^{2} + \\qty(\\frac{B}{r})^{2} =1 \\end{align}\nBy rearranging our pair of expressions above to get \\(\\sin \\alpha\\) and \\(\\cos \\alpha\\) by itself.\nFinally, we have:\n\\begin{align} 1 \u0026amp;= \\qty(\\frac{A}{r})^{2} + \\qty(\\frac{B}{r})^{2} \\\\ \u0026amp;= \\frac{A^{2} + B^{2}}{r^{2}} \\end{align}\nSo:\n\\begin{equation} r^{2} = \\sqrt{A^{2}+B^{2}} \\end{equation}\nFinally, we have that:\n\\begin{equation} A\\cos (x)+B\\sin (x) = \\sqrt{A^{2}+B^{2}} \\sin \\qty(x + \\tan^{-1}\\qty(\\frac{A}{B})) \\end{equation}\nUsing the above result Recall we are working with:\n\\begin{equation} \\begin{cases} {I}(t) = I_0 + I_1\\cos(\\sqrt{x_1x_4}t) + I_2\\sin (\\sqrt{x_1x_4}t) -x_1x_5 t + 0.4M(t) \\\\ {U}(t) = U_0 + U_1\\cos(\\sqrt{x_1x_4}t) + U_2\\sin (\\sqrt{x_1x_4}t) + x_4x_2t\\\\ {G}(t) = G_0 + G_1\\cos(\\sqrt{x_1x_4}t) + G_2\\sin (\\sqrt{x_1x_4}t) + M(t) \\end{cases} \\end{equation}\nAnd we desire to use the above to simplify it. Plugging this expression directly in, for instance, to the first expression, we have:\n\\begin{equation} I(t) = I_0 + \\sqrt{ {I_{1}}^{2} + {I_{2}}^{2} } \\sin \\qty(\\sqrt{x_1x_4}t + \\tan^{-1} \\qty(\\frac{I_1}{I_2})) -x_1x_5 t + 0.4M(t) \\end{equation}\nNotice! Even if the shift changes based on each function, the frequency of the oscillation of each function is the same\u0026mdash;\nas each \\(\\cos x + \\sin x\\) sinusoidal, after applying the identity derived above, takes the form of:\n\\begin{equation} A\\sin (\\sqrt{x_1x_4}t + \\tan^{-1}(B)) \\end{equation}\nwe can see that they all oscillate with frequency of\n\\begin{equation} \\frac{\\sqrt{x_1x_4}}{2\\pi} \\end{equation}\n\u0026ldquo;how many \\(2\\pi\\) can our function go in \\(1\\) second?\u0026rdquo;\nTherefore, the control mechanism must work in frequencies of \\(\\frac{\\sqrt{x_1x_4}}{2\\pi}\\) (and best be exactly or as best as possible out of phase by being phase shifted by \\(\\tan^{-1}(B) + \\pi\\)) to be able to attenuate the sinusoidal the best.\nWe can allow \\(M(t)\\) to go to any sinusoidal function, and compose them together:\n\\begin{equation} I(t) = I_0 + \\sqrt{ {I_{1}}^{2} + {I_{2}}^{2} } \\sin \\qty(\\sqrt{x_1x_4}t + \\tan^{-1} \\qty(\\frac{I_1}{I_2})) -x_1x_5 t + 0.4 (c \\sin(ax+b)) \\end{equation}\nOk, let us now spend another aside to figure out the frequency and amplitude of this new curve, which will be our target upon which we are optimizing:\nAttenuating the Sums of Sinusoidals We now have:\n\\begin{equation} a_1\\sin (b_1t + c_1) + a_2 \\sin (b_2t+c_2) \\end{equation}\nThe question is how we can make the first wave destructively interfere with the second one.\n","html":"\u003cp\u003eWe have a system of differential equations:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{I}{t} = -0.73 U(t) + 0.0438 + 0.4 \\dv{M}{t} \\\\\n\\dv{U}{t} = 0.4I-0.012 \\\\\n\\dv{G}{t} = \\dv{M}{t} - I(t)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(M\\) is a sinusoidal function which we can control.\u003c/p\u003e\n\u003cp\u003eWe hope for this system to be as stable as possible.\u003c/p\u003e\n\u003cp\u003eFirst, let\u0026rsquo;s try to get a general solution of the system. The linearized(ish) solution takes the shape of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv t \\mqty(I \\\\ U \\\\ G) = \\mqty(0 \u0026amp; -x_1 \u0026amp; 0 \\\\ x_4 \u0026amp; 0 \u0026amp; 0 \\\\ -1 \u0026amp; 0 \u0026amp; 0 ) \\mqty(I \\\\ U \\\\ G)+ \\dv{M}{t}\\mqty(x_3 \\\\ 0 \\\\ 1) + \\mqty(x_2 \\\\ x_5 \\\\ 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWith:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx_1 = 0.73 \\\\\nx_2 = 0.0438 \\\\\nx_3 = 0.4 \\\\\nx_4 = 0.4 \\\\\nx_5 = 0.012\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas input parameters. We will follow the method of underdetermined coefficients: taking the homogeneous solution first and then using it to get the general solution.\u003c/p\u003e\n\u003ch2 id=\"homogeneous-system\"\u003eHomogeneous System\u003c/h2\u003e\n\u003cp\u003eTo get the characteristic equation of the \u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneous\u003c/a\u003e system, we take the \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of the system:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex5\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;x1 x2 x3 x4 x5\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ematrix\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ematrix\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ematrix\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eeigenvalues\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[-sqrt(-x1*x4), sqrt(-x1*x4), 0]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAwesome. So we can see that our characteristic equation will be:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\mqty(I \\\\ U \\\\ G)_{h} \u0026amp;= \\vec{c_0} e^{0t} + \\vec{c_1} e^{\\sqrt{-x_1x_4}t} + \\vec{c_2} e^{-\\sqrt{-x_1x_4}t} \\\\\n\u0026amp;= \\vec{c_0} + \\vec{c_1}e^{i \\sqrt{x_1x_4}t} + \\vec{c_2} e^{-i \\sqrt{x_1x_4}t}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, the two \\(e^{ix}\\) functions, one positive and one negative, inspires us to the following results:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\cos x = \\frac{e^{ix}+e^{-ix}}{2}\\\\\n\\sin x = \\frac{e^{ix}-e^{-ix}}{2i}\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTreating \\(\\frac{1}{2}\\) and \\(\\frac{1}{2i}\\) (which we can do, because the constants can be defined on any space desired), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\cos x + \\sin x \u0026amp;= \\frac{e^{ix}+e^{-ix}}{2} + \\frac{e^{ix}-e^{-ix}}{2i} \\\\\n\u0026amp;= A_1e^{ix}+A_2e^{-ix}\n\\end{align}\u003c/p\u003e\n\u003cp\u003efor some constant scalars \\(A_1\\) and \\(A_2\\)\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Wait, doesn\u0026rsquo;t the \\(e^{-ix}\\) and \\(-e^{-ix}\\) subtract each other out on the numerator? No, notice the denominator is different, so we will have \\((A-B)e^{-ix}\\) after we add the two expressions for some constants \\(A\\) and \\(B\\), it doesn\u0026rsquo;t cancel out.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003ePerforming this substitution allows us to reveal the sinusoidal nature of our characteristic equation, and get rid of those pesky \\(i\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\mqty(I \\\\ U \\\\ G)_{h} \u0026amp;= \\vec{c_0} e^{0t} + \\vec{c_1} e^{\\sqrt{-x_1x_4}t} + \\vec{c_2} e^{-\\sqrt{-x_1x_4}t} \\\\\n\u0026amp;= \\vec{c_0} + \\vec{c_1}e^{i \\sqrt{x_1x_4}t} + \\vec{c_2} e^{-i \\sqrt{x_1x_4}t} \\\\\n\u0026amp;= \\vec{c_0} + \\vec{c_1\u0026rsquo;} \\cos (\\sqrt{x_1x_4} t)+ \\vec{c_2\u0026rsquo;} \\sin (\\sqrt{x_1x_4} t)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThe primes here indicate that \\(\\vec{c_1} \\neq \\vec{c_1\u0026rsquo;}\\) because the initial conditions shift when we move to sinusoidal functions.\u003c/p\u003e\n\u003cp\u003eWriting this out completely, ditching the vector expressions, we have\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nI_{h}(t) = I_0 + I_1\\cos(\\sqrt{x_1x_4}t) + I_2\\sin (\\sqrt{x_1x_4}t) \\\\\nU_{h}(t) = U_0 + U_1\\cos(\\sqrt{x_1x_4}t) + U_2\\sin (\\sqrt{x_1x_4}t) \\\\\nG_{h}(t) = G_0 + G_1\\cos(\\sqrt{x_1x_4}t) + G_2\\sin (\\sqrt{x_1x_4}t)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas the homogenous solutions for the equation.\u003c/p\u003e\n\u003ch2 id=\"underdetermined-coefficients\"\u003eUnderdetermined Coefficients\u003c/h2\u003e\n\u003cp\u003eRecall the expression we are trying to solve is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{I}{t} = -0.73 U(t) + 0.0438 + 0.4 \\dv{M}{t} \\\\\n\\dv{U}{t} = 0.4I-0.012 \\\\\n\\dv{G}{t} = \\dv{M}{t} - I(t)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe dealt with the homongenous part\u0026hellip; but \u003cem\u003enot\u003c/em\u003e the next two parts! Let\u0026rsquo;s do that.\u003c/p\u003e\n\u003cp\u003eIn order to do that, we will use the method of underdetermined coefficients. Recall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv t \\mqty(I \\\\ U \\\\ G) = \\mqty(0 \u0026amp; -x_1 \u0026amp; 0 \\\\ x_4 \u0026amp; 0 \u0026amp; 0 \\\\ -1 \u0026amp; 0 \u0026amp; 0 ) \\mqty(I \\\\ U \\\\ G)+ \\dv{M}{t}\\mqty(x_3 \\\\ 0 \\\\ 1) + \\mqty(x_2 \\\\ x_5 \\\\ 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor now, we will add the extra \\(\\dv{M}{t}\\) term explicitly later. Let us solve for the undetermined coefficients based on the assumption that each function (except for the attenuation by \\(M\\)) is linear:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = at\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(\u0026ldquo;it linearly changes over time\u0026rdquo;)\u003c/p\u003e\n\u003cp\u003eIts derivative by time is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;(t) = a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ePlugging that into our expressions above:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(a_{I} \\\\ a_{U} \\\\ a_{G} ) = \\mqty(0 \u0026amp; -x_1 \u0026amp; 0 \\\\ x_4 \u0026amp; 0 \u0026amp; 0 \\\\ -1 \u0026amp; 0 \u0026amp; 0 ) \\mqty(a_{I}t \\\\ a_{U}t \\\\ a_{G} t) + \\dv{M}{t} \\mqty(x_3 \\\\ 0 \\\\ 1) + \\mqty(x_2 \\\\ x_5 \\\\ 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd now, arranging the right expressions such that we can clearly see each coefficient line up, relegating \\(M\\) to the side, and actually multiplying:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(a_{I} \\\\ a_{U} \\\\ a_{G} ) = \\mqty(0 \u0026amp; -x_1 \u0026amp; 0 \\\\ x_4 \u0026amp; 0 \u0026amp; 0 \\\\ -1 \u0026amp; 0 \u0026amp; 0 ) \\mqty(a_{I}t + x_2 \\\\ a_{U}t + x_5 \\\\ a_{G} t) + \\dv{M}{t} \\mqty(x_3 \\\\ 0 \\\\ 1)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(a_{I} \\\\ a_{U} \\\\ a_{G} ) = \\mqty(-x_1 (a_{U}t + x_5) \\\\ x_4 (a_{I}t + x_2 )\\\\ 1 a_{G} t) + \\dv{M}{t} \\mqty(x_3 \\\\ 0 \\\\ 1)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAwesome, so now, matching coefficients, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\na_{I} = -x_1x_5 \\\\\na_{U} = x_4x_2 \\\\\na_{G} = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich I honestly could have told you by\u0026hellip;. Just staring at the equations. furthermore, we will add the requisite shift of \\(\\dv{M}{t}\\) to the right equations when appropriate.\u003c/p\u003e\n\u003cp\u003eSo, adding the \\(M(t)\\) in place, our particular solutions are:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nI_{p}(t) = -x_1x_5 t + x_3 M(t) \\\\\nU_{p}(t) = x_4x_2t \\\\\nG_{p}(t) = M(t)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas the homogenous solutions for the equation.\u003c/p\u003e\n\u003ch2 id=\"general-solution\"\u003eGeneral Solution\u003c/h2\u003e\n\u003cp\u003eLet us know put the general and particular solutions together:\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nI_{h}(t) = I_0 + I_1\\cos(\\sqrt{x_1x_4}t) + I_2\\sin (\\sqrt{x_1x_4}t) \\\\\nU_{h}(t) = U_0 + U_1\\cos(\\sqrt{x_1x_4}t) + U_2\\sin (\\sqrt{x_1x_4}t) \\\\\nG_{h}(t) = G_0 + G_1\\cos(\\sqrt{x_1x_4}t) + G_2\\sin (\\sqrt{x_1x_4}t)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nI_{p}(t) = -x_1x_5 t + 0.4M(t) \\\\\nU_{p}(t) = x_4x_2t \\\\\nG_{p}(t) = M(t)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, by linear additivity, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nI}(t) = I_0 + I_1\\cos(\\sqrt{x_1x_4}t) + I_2\\sin (\\sqrt{x_1x_4}t) -x_1x_5 t + 0.4M(t) \\\\\nU}(t) = U_0 + U_1\\cos(\\sqrt{x_1x_4}t) + U_2\\sin (\\sqrt{x_1x_4}t) + x_4x_2t\\\\\nG}(t) = G_0 + G_1\\cos(\\sqrt{x_1x_4}t) + G_2\\sin (\\sqrt{x_1x_4}t) + M(t)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"simplification\"\u003eSimplification\u003c/h2\u003e\n\u003cp\u003eRecall that our function \\(M(t)\\) is a sinusoidal function. And it is being added to some linear combination of sinusoidal functions in each term of our general solution above. Meaning, each of our equations are of the shape:\u003c/p\u003e\n\u003cp\u003e[some vertical shift] + [cosine something] + [sine something] + [optional linear drift] + [M(t)]\u003c/p\u003e\n\u003cp\u003eFor us to use \\(M(t)\\) to attenuate/stabilize the system, the best we can do is to dampen the sinusoidal part (because \\(M\\) itself is sinusoidal). We can\u0026rsquo;t do much of anything else.\u003c/p\u003e\n\u003cp\u003eTo do this, we want ideally \\(M(t)\\) be \\(\\pi\\) ahead of the \\(\\cos + \\sin\\) waves in each of the functions; that is, we want \\(M\\) to be out of phase exactly.\u003c/p\u003e\n\u003cp\u003e\\(\\cos +\\sin\\) is harder to be out of phase than just \\(\\sin\\); if the latter, we can just figure out its frequency and shift, and be \\(\\pi\\) ahead of it.\u003c/p\u003e\n\u003cp\u003eFortunately, our \\(\\cos\\) and \\(\\sin\\) terms have exactly the same contents; therefore, their sum form just another shifted sine wave (don\u0026rsquo;t believe me, plot it!). Therefore, we will now endeavor to combine them.\u003c/p\u003e\n\u003ch3 id=\"aside-a-cos--x--plus-b-sin--x\"\u003eAside: \\(A\\cos (x)+B\\sin (x)\\)\u003c/h3\u003e\n\u003cp\u003eHere\u0026rsquo;s how you go about the combination. We desire that \\(A\\cos (x) + B \\sin (x)\\) be a single shifted sine function; we know this is true (by plottingish or using imaginary numbers), so we will set the sum to some arbitrary sine function and solve for its correct coefficients to mimic the sum; that is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr \\sin (x + \\alpha) := A\\cos (x) + B \\sin (x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe know desire the coefficients \\(r, \\alpha\\) that would make this true.\u003c/p\u003e\n\u003cp\u003eRecall \\(\\sin a+b = \\cos a\\sin b + \\sin a\\cos b\\); so:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nr \\sin (x+\\alpha) \u0026amp; = r(\\cos x \\sin \\alpha + \\sin x \\cos \\alpha ) \\\\\n\u0026amp;= r \\sin x \\cos \\alpha + r \\cos x \\sin \\alpha \\\\\n\u0026amp;= (r \\sin \\alpha) \\cos x + (r \\cos \\alpha) \\sin x\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(r \\sin \\alpha) \\cos x + (r \\cos \\alpha) \\sin x := A\\cos (x) + B \\sin (x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nr\\sin \\alpha = A \\\\\nr \\cos \\alpha = B\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd we desire correct coefficients \\(r, \\alpha\\) in terms of \\(A, B\\).\u003c/p\u003e\n\u003cp\u003eDividing the two expressions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\sin \\alpha }{\\cos \\alpha } = \\frac{A}{B}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, \\(\\alpha = \\tan^{-1}\\qty(\\frac{A}{B})\\).\u003c/p\u003e\n\u003cp\u003eFinally, recall that \\(\\sin^{2} x +\\cos^{2} x =1\\) for any \\(x\\). We will use this fact to get \\(r\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\sin^{2} \\alpha + \\cos^{2} \\alpha = 1 \\\\\n\\Rightarrow\\ \u0026amp; \\qty(\\frac{A}{r})^{2} + \\qty(\\frac{B}{r})^{2} =1\n\\end{align}\u003c/p\u003e\n\u003cp\u003eBy rearranging our pair of expressions above to get \\(\\sin \\alpha\\) and \\(\\cos \\alpha\\) by itself.\u003c/p\u003e\n\u003cp\u003eFinally, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n1 \u0026amp;= \\qty(\\frac{A}{r})^{2} + \\qty(\\frac{B}{r})^{2} \\\\\n\u0026amp;= \\frac{A^{2} + B^{2}}{r^{2}}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr^{2} = \\sqrt{A^{2}+B^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA\\cos (x)+B\\sin (x) = \\sqrt{A^{2}+B^{2}} \\sin \\qty(x + \\tan^{-1}\\qty(\\frac{A}{B}))\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"using-the-above-result\"\u003eUsing the above result\u003c/h3\u003e\n\u003cp\u003eRecall we are working with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n{I}(t) = I_0 + I_1\\cos(\\sqrt{x_1x_4}t) + I_2\\sin (\\sqrt{x_1x_4}t) -x_1x_5 t + 0.4M(t) \\\\\n{U}(t) = U_0 + U_1\\cos(\\sqrt{x_1x_4}t) + U_2\\sin (\\sqrt{x_1x_4}t) + x_4x_2t\\\\\n{G}(t) = G_0 + G_1\\cos(\\sqrt{x_1x_4}t) + G_2\\sin (\\sqrt{x_1x_4}t) + M(t)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd we desire to use the above to simplify it. Plugging this expression directly in, for instance, to the first expression, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI(t) = I_0 + \\sqrt{ {I_{1}}^{2} + {I_{2}}^{2} } \\sin \\qty(\\sqrt{x_1x_4}t + \\tan^{-1} \\qty(\\frac{I_1}{I_2})) -x_1x_5 t + 0.4M(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNotice! Even if the shift changes based on each function, the \u003cem\u003efrequency\u003c/em\u003e of the oscillation of each function is the same\u0026mdash;\u003c/p\u003e\n\u003cp\u003eas each \\(\\cos x + \\sin x\\) sinusoidal, after applying the identity derived above, takes the form of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA\\sin (\\sqrt{x_1x_4}t + \\tan^{-1}(B))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can see that they all oscillate with frequency of\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\sqrt{x_1x_4}}{2\\pi}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;how many \\(2\\pi\\) can our function go in \\(1\\) second?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eTherefore, the control mechanism must work in frequencies of \\(\\frac{\\sqrt{x_1x_4}}{2\\pi}\\) (and best be exactly or as best as possible out of phase by being phase shifted by \\(\\tan^{-1}(B) + \\pi\\)) to be able to attenuate the sinusoidal the best.\u003c/p\u003e\n\u003cp\u003eWe can allow \\(M(t)\\) to go to any sinusoidal function, and compose them together:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI(t) = I_0 + \\sqrt{ {I_{1}}^{2} + {I_{2}}^{2} } \\sin \\qty(\\sqrt{x_1x_4}t + \\tan^{-1} \\qty(\\frac{I_1}{I_2})) -x_1x_5 t + 0.4 (c \\sin(ax+b))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOk, let us now spend another aside to figure out the frequency and amplitude of this new curve, which will be our target upon which we are optimizing:\u003c/p\u003e\n\u003ch4 id=\"attenuating-the-sums-of-sinusoidals\"\u003eAttenuating the Sums of Sinusoidals\u003c/h4\u003e\n\u003cp\u003eWe now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_1\\sin (b_1t + c_1) + a_2 \\sin (b_2t+c_2)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe question is how we can make the first wave destructively interfere with the second one.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math570_finance_eigen/","tags":null,"title":"Finance (Eigen)"},{"categories":null,"contents":"Why do we have a market? Basically: it allows society to make decisions about the value of things\u0026mdash;with the wisdom of the crowd. The stock market is how we (as people) decide what to make and how to make it.\nMisc. Questions About the Market Misc. Financial Market Questions\nKnowledge price Random Walk Hypothesis Brownian Motion Arbitrage Pricing Derivative Pricing options CAPM Stochastic Discount Factor GARCH ETF accounting price stock indicies VWAP short selling darkpool fundamental investing Second-Level Thinking stock market survey OTC markets NBBO LiquidNet ","html":"\u003ch2 id=\"why-do-we-have-a-market\"\u003eWhy do we have a market?\u003c/h2\u003e\n\u003cp\u003eBasically: it allows society to make decisions about the value of things\u0026mdash;with the wisdom of the crowd. The stock market is how we (as people) decide what to make and how to make it.\u003c/p\u003e\n\u003ch2 id=\"misc-dot-questions-about-the-market\"\u003eMisc. Questions About the Market\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhmisc_financial_market_questions/\"\u003eMisc. Financial Market Questions\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"knowledge\"\u003eKnowledge\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprice/\"\u003eprice\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_walk/\"\u003eRandom Walk Hypothesis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbharbitrage_pricing/\"\u003eArbitrage Pricing\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhderivative_pricing/\"\u003eDerivative Pricing\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoptions/\"\u003eoptions\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcapm/\"\u003eCAPM\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstochastic_discount_factor/\"\u003eStochastic Discount Factor\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgarch/\"\u003eGARCH\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhetf/\"\u003eETF\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaccounting_price/\"\u003eaccounting price\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstock_indicies/\"\u003estock indicies\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvwap/\"\u003eVWAP\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhshort_selling/\"\u003eshort selling\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdarkpool/\"\u003edarkpool\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfundimental_investing/\"\u003efundamental investing\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfundimental_investing/#second-level-thinking\"\u003eSecond-Level Thinking\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstock_market_survey/\"\u003estock market survey\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhotc_markets/\"\u003eOTC markets\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnbbo/\"\u003eNBBO\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhliquidnet/\"\u003eLiquidNet\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfinancial_markets_intro/","tags":["index"],"title":"Financial Market"},{"categories":null,"contents":"We define:\n\\begin{equation} \\mathbb{F}^{\\infty} = \\{(x_1, x_2, \\dots): x_{j} \\in \\mathbb{F}, \\forall j=1,2,\\dots\\} \\end{equation}\nclosure of addition We define addition:\n\\begin{equation} (x_1,x_2,\\dots)+(y_1,y_2, \\dots) = (x_1+y_1,x_2+y_2, \\dots ) \\end{equation}\nEvidently, the output is also of infinite length, and as addition in \\(\\mathbb{F}\\) is closed, then also closed.\nclosure of scalar multiplication We define scalar multiplication:\n\\begin{equation} \\lambda (x_1,x_2, \\dots) = (\\lambda x_1, \\lambda x_2, \\dots ) \\end{equation}\nditto. as above\ncommutativity extensible from commutativity of \\(\\mathbb{F}\\)\nassociativity extensible from associativity of \\(\\mathbb{F}\\), for both operations\ndistribution \\begin{align} \\lambda ((x_1,x_2,\\dots)+(y_1,y_2, \\dots)) \u0026amp;= \\lambda (x_1+y_1,x_2+y_2, \\dots ) \\\\ \u0026amp;= (\\lambda (x_1+y_1),\\lambda (x_2+y_2), \\dots ) \\\\ \u0026amp;= (\\lambda x_1+\\lambda y_1,\\lambda x_2+\\lambda y_2, \\dots) \\\\ \u0026amp;= (\\lambda x_1, \\lambda x_2, \\dots) + (\\lambda y_1, \\lambda y_2, \\dots) \\\\ \u0026amp;= \\lambda (x_1, x_2, \\dots) + \\lambda (y_1, y_2, \\dots) \\end{align}\nditto. for the other direction.\nadditive ID \\begin{equation} (0,0, \\dots ) \\end{equation}\nadditive inverse extensive from \\(\\mathbb{F}\\)\n\\begin{equation} (-a, -b, \\dots ) + (a,b, \\dots ) = 0 \\end{equation}\nscalar multiplicative ID \\(1\\)\n","html":"\u003cp\u003eWe define:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{F}^{\\infty} = \\{(x_1, x_2, \\dots): x_{j} \\in \\mathbb{F}, \\forall j=1,2,\\dots\\}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"closure-of-addition\"\u003eclosure of addition\u003c/h2\u003e\n\u003cp\u003eWe define addition:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(x_1,x_2,\\dots)+(y_1,y_2, \\dots) = (x_1+y_1,x_2+y_2, \\dots )\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eEvidently, the output is also of infinite length, and as addition in \\(\\mathbb{F}\\) is closed, then also closed.\u003c/p\u003e\n\u003ch2 id=\"closure-of-scalar-multiplication\"\u003eclosure of scalar multiplication\u003c/h2\u003e\n\u003cp\u003eWe define scalar multiplication:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda (x_1,x_2, \\dots) = (\\lambda x_1, \\lambda x_2, \\dots )\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003cem\u003editto.\u003c/em\u003e as above\u003c/p\u003e\n\u003ch2 id=\"commutativity--kbhcommutivity-dot-md\"\u003e\u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eextensible from \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e of \\(\\mathbb{F}\\)\u003c/p\u003e\n\u003ch2 id=\"associativity--kbhassociative-dot-md\"\u003e\u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eextensible from \u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e of \\(\\mathbb{F}\\), for both operations\u003c/p\u003e\n\u003ch2 id=\"distribution\"\u003edistribution\u003c/h2\u003e\n\u003cp\u003e\\begin{align}\n\\lambda ((x_1,x_2,\\dots)+(y_1,y_2, \\dots)) \u0026amp;= \\lambda (x_1+y_1,x_2+y_2, \\dots ) \\\\\n\u0026amp;= (\\lambda (x_1+y_1),\\lambda (x_2+y_2), \\dots ) \\\\\n\u0026amp;= (\\lambda x_1+\\lambda y_1,\\lambda x_2+\\lambda y_2, \\dots) \\\\\n\u0026amp;= (\\lambda x_1, \\lambda x_2, \\dots) + (\\lambda y_1, \\lambda y_2, \\dots) \\\\\n\u0026amp;= \\lambda (x_1, x_2, \\dots) + \\lambda (y_1, y_2, \\dots)\n\\end{align}\u003c/p\u003e\n\u003cp\u003e\u003cem\u003editto.\u003c/em\u003e for the other direction.\u003c/p\u003e\n\u003ch2 id=\"additive-id\"\u003eadditive ID\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n(0,0, \\dots )\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additive-inverse\"\u003eadditive inverse\u003c/h2\u003e\n\u003cp\u003eextensive from \\(\\mathbb{F}\\)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(-a, -b, \\dots ) + (a,b, \\dots ) = 0\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"scalar-multiplicative-id\"\u003escalar multiplicative ID\u003c/h2\u003e\n\u003cp\u003e\\(1\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfinfty_is_a_vector_space_over_f/","tags":null,"title":"Finfinity is a Vector Space over F"},{"categories":null,"contents":"The Finite Difference Method is a method of solving partial Differential Equations. It follows two steps:\nDevelop discrete difference equations for the desired expression Algebraically solve these equations to yield stepped solutions https://www.youtube.com/watch?v=ZSNl5crAvsw\nFollow Along We will try to solve:\n\\begin{equation} \\pdv{p(t,x)}{t} = \\frac{1}{2}\\pdv[2]{p(t,x)}{x} \\end{equation}\nTo aid in notation, let us:\n\\begin{equation} p(t_{i}, x_{j}) := p_{i,j} \\end{equation}\nto represent one distinct value of our function \\(p\\).\nLet\u0026rsquo;s begin by writing our expression above via our new notation:\n\\begin{equation} \\pdv{p_{i,j}}{t}= \\frac{1}{2} \\pdv[2]{p_{i,j}}{x} \\end{equation}\nGreat. Now, let\u0026rsquo;s think about the left side and try to turn it into a difference eqn:\nWhat exactly is\u0026mdash;\n\\begin{equation} \\pdv{p_{i,j}}{t} \\end{equation}\nas a finite difference? Well, it is just:\n\\begin{equation} \\frac{p_{i+1,j}-p_{i,{j}}}{\\Delta t} \\end{equation}\nWhat about second partials?\nWell, what is\u0026mdash;\n\\begin{equation} \\pdv[2]{p_{i,j}}{x} \\end{equation}\nIt is:\n\\begin{equation} \\frac{\\pdv{p_{i,j+1}}{x}- \\pdv{p_{i,j}}{x}}{\\Delta x} \\end{equation}\nExpanding the top expressions even more difference expressions:\n\\begin{equation} \\frac{\\frac{p_{i,{j+2}}-p_{i,{j+1}}}{\\Delta x}- \\frac{p_{i,{j+1}}-p_{i,{j}}}{\\Delta x}}{\\Delta x} \\end{equation}\nThis equals to:\n\\begin{equation} \\frac{\\frac{p_{i,{j+2}}-p_{i,{j+1}} - p_{i,{j+1}}+p_{i,{j}}}{(\\Delta x)^{2}} \\end{equation}\nFinally, substitute this into our expression, then solve for some \\(p_{{i+1}, j}\\) in terms of \\(p_{i, ?}\\). We will treat the entire \u0026ldquo;row\u0026rdquo; of \\(p_{i,?}\\) as our initial condition, then solve for the rest + propagate forward.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhfinite_difference_method/\"\u003eFinite Difference Method\u003c/a\u003e is a method of solving partial \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equations\u003c/a\u003e. It follows two steps:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eDevelop discrete \u003ca href=\"/posts/kbhdifference_equation/\"\u003edifference equation\u003c/a\u003es for the desired expression\u003c/li\u003e\n\u003cli\u003eAlgebraically solve these equations to yield stepped solutions\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\u003ca href=\"https://www.youtube.com/watch?v=ZSNl5crAvsw\"\u003ehttps://www.youtube.com/watch?v=ZSNl5crAvsw\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"follow-along\"\u003eFollow Along\u003c/h2\u003e\n\u003cp\u003eWe will try to solve:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{p(t,x)}{t} = \\frac{1}{2}\\pdv[2]{p(t,x)}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo aid in notation, let us:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(t_{i}, x_{j}) := p_{i,j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eto represent one distinct value of our function \\(p\\).\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s begin by writing our expression above via our new notation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{p_{i,j}}{t}= \\frac{1}{2} \\pdv[2]{p_{i,j}}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGreat. Now, let\u0026rsquo;s think about the left side and try to turn it into a difference eqn:\u003c/p\u003e\n\u003cp\u003eWhat exactly is\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{p_{i,j}}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas a finite difference? Well, it is just:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{p_{i+1,j}-p_{i,{j}}}{\\Delta t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhat about second partials?\u003c/p\u003e\n\u003cp\u003eWell, what is\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2]{p_{i,j}}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIt is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\pdv{p_{i,j+1}}{x}- \\pdv{p_{i,j}}{x}}{\\Delta x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eExpanding the top expressions even more difference expressions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\frac{p_{i,{j+2}}-p_{i,{j+1}}}{\\Delta x}- \\frac{p_{i,{j+1}}-p_{i,{j}}}{\\Delta x}}{\\Delta x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis equals to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\frac{p_{i,{j+2}}-p_{i,{j+1}} - p_{i,{j+1}}+p_{i,{j}}}{(\\Delta x)^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, substitute this into our expression, then solve for some \\(p_{{i+1}, j}\\) in terms of \\(p_{i, ?}\\). We will treat the entire \u0026ldquo;row\u0026rdquo; of \\(p_{i,?}\\) as our initial condition, then solve for the rest + propagate forward.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfinite_difference_method/","tags":null,"title":"Finite Difference Method"},{"categories":null,"contents":"A graph of states which is closed and connected.\nAlso relating to this is a derived variable. One way to prove reaching any state is via Floyd\u0026rsquo;s Invariant Method.\n","html":"\u003cp\u003eA graph of states which is closed and connected.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-04-07_10-50-57_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eAlso relating to this is a \u003ca href=\"/posts/kbhderived_variable/\"\u003ederived variable\u003c/a\u003e. One way to prove reaching any state is via \u003ca href=\"/posts/kbhfloyd_s_invariant_method/\"\u003eFloyd\u0026rsquo;s Invariant Method\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfinite_state_machine/","tags":null,"title":"Finite State Machine"},{"categories":null,"contents":"A finite-dimensional vector space is a vector space where some actual list (which remember, has finite length) of vectors spans the space.\nAn infinite-demensional vector space is a vector space that\u0026rsquo;s not a finite-dimensional vector space.\nadditional information every finite-dimensional vector space has a basis Begin with a spanning list in the finite-dimensional vector space you are working with. Apply the fact that all spanning lists contains a basis of which you are spanning. Therefore, some elements of that list form a basis of the finite-dimensional vector space you are working with. \\(\\blacksquare\\)\nfinite-dimensional subspaces finite-dimensional subspaces\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e where some actual list (which remember, has finite length) of vectors \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e the space.\u003c/p\u003e\n\u003cp\u003eAn \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003einfinite-demensional vector space\u003c/a\u003e is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e that\u0026rsquo;s not a \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"every-finite-dimensional-vector-space--kbhfinite-dimensional-vector-space-dot-md--has-a-basis--kbhbasis-dot-md\"\u003eevery \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e has a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eBegin with a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list in the \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e you are working with. Apply the fact that \u003ca href=\"/posts/kbhbasis/#all-id-e8109222-5548-4d08-b6df-8f933f2dbb36-spanning-lists-contains-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis-of-which-you-are-id-e8109222-5548-4d08-b6df-8f933f2dbb36-spanning\"\u003eall spanning lists contains a basis of which you are spanning\u003c/a\u003e. Therefore, some elements of that list form a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of the \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e you are working with. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"finite-dimensional-subspaces--kbhsubspace-dot-md\"\u003e\u003ca href=\"/posts/kbhsubspace/#finite-dimensional-subspaces\"\u003efinite-dimensional subspaces\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhsubspace/#finite-dimensional-subspaces\"\u003efinite-dimensional subspaces\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfinite_dimensional_vector_space/","tags":null,"title":"finite-dimensional vector space"},{"categories":null,"contents":"Fireside Chats are a group of broadcasts by Franklin D. Roosevelt (FDR) which allowed him to speak directly to the people.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhfireside_chats/\"\u003eFireside Chats\u003c/a\u003e are a group of broadcasts by \u003ca href=\"/posts/kbhfdr/\"\u003eFranklin D. Roosevelt (FDR)\u003c/a\u003e which allowed him to speak directly to the people.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfireside_chats/","tags":null,"title":"Fireside Chats"},{"categories":null,"contents":"Below you will find a list of the Fireside articles.\nArticle Date Welcome to the Fireside \u0026lt;2023-10-16 Mon\u0026gt; Make Models Go Brrr \u0026lt;2023-10-23 Mon\u0026gt; Todo Lists \u0026lt;2023-10-30 Mon\u0026gt; ","html":"\u003cp\u003eBelow you will find a list of the Fireside articles.\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eArticle\u003c/th\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhfireside_article/\"\u003eWelcome to the Fireside\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-10-16 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmake_models_go_brrr/\"\u003eMake Models Go Brrr\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-10-23 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhtodo_lists/\"\u003eTodo Lists\u003c/a\u003e\u003c/td\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-10-30 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfireside/","tags":["index"],"title":"Fireside Index"},{"categories":null,"contents":"First Order ODEs are Differential Equations that only takes one derivative.\nTypically, by the nature of how they are modeled, we usually state it in a equation between three things:\n\\begin{equation} t, y(t), y\u0026rsquo;(t) \\end{equation}\nas in\u0026mdash;we only take one derivative.\nSometimes the solution may not be analytic, but is well-defined:\n\\begin{equation} y\u0026rsquo; = e^{-x^{2}} \\end{equation}\nwe know that, by the fundamental theorem of calculus, gives us:\n\\begin{equation} y(x) = \\int_{0}^{x} e^{-s{2}} \\dd{s} \\end{equation}\nIndeed this function doesn\u0026rsquo;t have an elementary integral, but still has a well defined result. Almost always differential equations doesn\u0026rsquo;t have elementary solutions.\nSeparated Equations There is a very nice class of this type of first-order equations:\n\\begin{equation} y\u0026rsquo; = f(t,y) \\end{equation}\nA general function here are not these cases.\nMentally, we think of this structure on a \\(t,y\\) plane, where at each point \\((t,y)\\) the slope of the graph matches the slope given by \\(f(t,y)\\). To solve for the rest of the evolution, we consider an initial state of this system, say \\(y(1) = 3\\).\nsymbolic methods: generally, you are the happiest when you find specific formulas for \\(y(t)\\). qualitative methods: for instance, slope fields autonomous ODEs This is a special case of these types of equations, called autonomous ODEs\n\\begin{equation} y\u0026rsquo; = f(y) \\end{equation}\nIn most cases, this resolves into some \\(y(t) = T_0+Ce^{-ht}\\).\nseperable \\begin{equation} y\u0026rsquo; = f(y)g(t) \\end{equation}\ngenerally, this can be solved with division algorithm.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhfirst_order_odes/\"\u003eFirst Order ODEs\u003c/a\u003e are \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equations\u003c/a\u003e that only takes one derivative.\u003c/p\u003e\n\u003cp\u003eTypically, by the nature of how they are modeled, we usually state it in a equation between three things:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nt, y(t), y\u0026rsquo;(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas in\u0026mdash;we only take one derivative.\u003c/p\u003e\n\u003cp\u003eSometimes the solution may not be analytic, but is well-defined:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = e^{-x^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe know that, by the fundamental theorem of calculus, gives us:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(x) = \\int_{0}^{x} e^{-s{2}} \\dd{s}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIndeed this function doesn\u0026rsquo;t have an elementary integral, but still has a well defined result. Almost always differential equations doesn\u0026rsquo;t have elementary solutions.\u003c/p\u003e\n\u003ch2 id=\"separated-equations\"\u003eSeparated Equations\u003c/h2\u003e\n\u003cp\u003eThere is a very nice class of this type of first-order equations:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = f(t,y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eA general function here are not these cases.\u003c/p\u003e\n\u003cp\u003eMentally, we think of this structure on a \\(t,y\\) plane, where at each point \\((t,y)\\) the slope of the graph matches the slope given by \\(f(t,y)\\). To solve for the rest of the evolution, we consider an initial state of this system, say \\(y(1) = 3\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003esymbolic methods\u003c/strong\u003e: generally, you are the happiest when you find specific formulas for \\(y(t)\\).\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003equalitative methods\u003c/strong\u003e: for instance, slope fields\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"autonomous-odes--kbhautonomous-odes-dot-md\"\u003e\u003ca href=\"/posts/kbhautonomous_odes/\"\u003eautonomous ODEs\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eThis is a special case of these types of equations, called \u003ca href=\"/posts/kbhautonomous_odes/\"\u003eautonomous ODEs\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = f(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn most cases, this resolves into some \\(y(t) = T_0+Ce^{-ht}\\).\u003c/p\u003e\n\u003ch3 id=\"seperable--kbhseperable-diffequ-dot-md\"\u003e\u003ca href=\"/posts/kbhseperable_diffequ/\"\u003eseperable\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = f(y)g(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egenerally, this can be solved with \u003ca href=\"/posts/kbhdivide/#division-algorithm\"\u003edivision algorithm\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfirst_order_odes/","tags":null,"title":"First Order ODEs"},{"categories":null,"contents":"Consider the case where there are two functions interacting with each other:\n\\begin{equation} y_1(t) \\dots y_{2}(t) \\end{equation}\nSo we have more than one dependent function, with functions \\(y_1, y_1\u0026rsquo;, y_2, y_2\u0026rsquo;\\) and so forth. To deal with this, we simply make it into a matrix system:\n\\begin{equation} y(t) = \\mqty(y_1(t) \\\\ \\dots \\\\ y_{n}(t)) \\end{equation}\nFor instance, should we have:\n\\begin{equation} \\begin{cases} y_1\u0026rsquo; = 3y_1 - 2y_2 \\\\ y_2\u0026rsquo; = -y_1 + 5y_2 \\end{cases} \\end{equation}\nWe can write this system in a matrix like such:\n\\begin{equation} y\u0026rsquo;(t) = \\mqty(3 \u0026amp; -2 \\\\ -1 \u0026amp; 5) y(t) \\end{equation}\nMeaning:\n\\begin{equation} y\u0026rsquo; = Ay \\end{equation}\nwhich is a single linear equation.\nRecall that we had:\n\\begin{equation} y\u0026rsquo; = Ay \\end{equation}\nLet \\(v\\) be an eigenvector of \\(A\\) with \\(\\lambda\\) be an eigenvalue. Let us guess that \\(y = e^{\\lambda t} v\\) is a solution.\nPlugging this in, we have:\n\\begin{equation} y\u0026rsquo; = Ay = A(e^{\\lambda t} v) = e^{\\lambda t} Av = \\lambda e^{\\lambda t} v \\end{equation}\nOf course, \\(y\u0026rsquo; = \\lambda e^{\\lambda t} v\\).\nMeaning this is a solution of our system. Recall finding eigenvalues with actual numbers, so we want some \\(\\lambda\\) for which \\(det(A-\\lambda I)=0\\).\nPlugging the eigenvalues back, and recalling the superposition principle, we are left with some:\n\\begin{equation} y(t) = c_1 e^{\\lambda_{1}} v_1 + \\dots + c_{n} e^{\\lambda_{n}} v_{n} \\end{equation}\nThis is true if we have enough eigenvalues which forms a basis. Now, at \\(y(0)\\), we have some \\(y_0 = c_1v_1 + \u0026hellip; + c_{n}v_{n}\\).\nThis yields a system \\(y_{0} = \\mqty[v_1 \u0026amp; \\dots \u0026amp; v_{n}] \\mqty[c_1 \\\\ \\dots \\\\ c_{n}]\\).\nWe call this matrix written in terms of eigenvectors \\(E\\), that is:\n\\begin{equation} E = \\mqty[v_1 \u0026amp; \\dots \u0026amp; v_{n}] \\end{equation}\nFinally, we have:\n\\begin{equation} \\mqty[c_1 \\\\ \\dots \\\\ c_{n}] = E^{-1} y_0 \\end{equation}\nThis method works for cases where we have enough independent eigenvectors to admit enough initial conditions. Otherwise, matrix exponentiation.\nSpecial Cases 2x2 with \\(\\lambda_{2} = \\bar{\\lambda_{1}}\\) For any two by two system, where there the eigenvalues are conjugates of each other, we can formulate a solution in the form:\n\\begin{equation} y(t) = c_1 Re(e^{\\lambda t} v) + c_2 Im(e^{\\lambda t}v) \\end{equation}\nif the matrix representing the system admits two eigenvalues, \\(\\lambda\\) and \\(\\bar{\\lambda}\\). We can obtain this by rephrasing one solution as \\(e^{\\lambda t} = e^{a + ib} e^{t} = e^{a+t}(\\cos b + i\\sin b)\\).\nTips and Tricks Changing higher order system into lower orders We can actually write higher order linear system this way too:\n\\begin{equation} y\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; + by = 0 \\end{equation}\nwe can actually construct:\n\\begin{align} \u0026amp; y_1(t) = y(t) \\\\ \u0026amp; y_2(t) = y\u0026rsquo;(t) \\end{align}\nAnd therefore, we can construct:\n\\begin{equation} \\mqty(y_1 \\\\ y_2)\u0026rsquo; = \\mqty(y_2 \\\\ -by1 - ay2) = \\mqty(0 \u0026amp; 1 \\\\ -b \u0026amp;-a) \\mqty(y_1 \\\\ y_2) \\end{equation}\n","html":"\u003cp\u003eConsider the case where there are two functions interacting with each other:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny_1(t) \\dots y_{2}(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo we have more than one dependent function, with functions \\(y_1, y_1\u0026rsquo;, y_2, y_2\u0026rsquo;\\) and so forth. To deal with this, we simply make it into a matrix system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = \\mqty(y_1(t) \\\\ \\dots \\\\ y_{n}(t))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor instance, should we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\ny_1\u0026rsquo; = 3y_1 - 2y_2 \\\\\ny_2\u0026rsquo; = -y_1 + 5y_2\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can write this system in a matrix like such:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;(t) = \\mqty(3 \u0026amp; -2 \\\\ -1 \u0026amp; 5) y(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = Ay\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is a single linear equation.\u003c/p\u003e\n\u003cp\u003eRecall that we had:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = Ay\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet \\(v\\) be an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e of \\(A\\) with \\(\\lambda\\) be an eigenvalue. Let us guess that \\(y = e^{\\lambda t} v\\) is a solution.\u003c/p\u003e\n\u003cp\u003ePlugging this in, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = Ay = A(e^{\\lambda t} v) = e^{\\lambda t} Av = \\lambda e^{\\lambda t} v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOf course, \\(y\u0026rsquo; = \\lambda e^{\\lambda t} v\\).\u003c/p\u003e\n\u003cp\u003eMeaning this is a solution of our system. Recall \u003ca href=\"/posts/kbheigenvalue/#finding-eigenvalues-with-actual-numbers\"\u003efinding eigenvalues with actual numbers\u003c/a\u003e, so we want some \\(\\lambda\\) for which \\(det(A-\\lambda I)=0\\).\u003c/p\u003e\n\u003cp\u003ePlugging the eigenvalues back, and recalling the \u003ca href=\"/posts/kbhordinary_differential_equations/#superposition-principle\"\u003esuperposition principle\u003c/a\u003e, we are left with some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = c_1 e^{\\lambda_{1}} v_1 + \\dots + c_{n} e^{\\lambda_{n}} v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is true if we have enough \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es which forms a basis. Now, at \\(y(0)\\), we have some \\(y_0 = c_1v_1 + \u0026hellip; + c_{n}v_{n}\\).\u003c/p\u003e\n\u003cp\u003eThis yields a system \\(y_{0} = \\mqty[v_1 \u0026amp; \\dots \u0026amp; v_{n}] \\mqty[c_1 \\\\ \\dots \\\\ c_{n}]\\).\u003c/p\u003e\n\u003cp\u003eWe call this matrix written in terms of eigenvectors \\(E\\), that is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE = \\mqty[v_1 \u0026amp; \\dots \u0026amp; v_{n}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty[c_1 \\\\ \\dots \\\\ c_{n}] = E^{-1} y_0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis method works for cases where we have enough \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es to admit enough initial conditions. Otherwise, \u003ca href=\"/posts/kbhmatrix_exponentiation/\"\u003ematrix exponentiation.\u003c/a\u003e\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"special-cases\"\u003eSpecial Cases\u003c/h2\u003e\n\u003ch3 id=\"2x2-with-lambda-2-bar-lambda-1\"\u003e2x2 with \\(\\lambda_{2} = \\bar{\\lambda_{1}}\\)\u003c/h3\u003e\n\u003cp\u003eFor any two by two system, where there the eigenvalues are conjugates of each other, we can formulate a solution in the form:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = c_1 Re(e^{\\lambda t} v) + c_2 Im(e^{\\lambda t}v)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif the matrix representing the system admits two eigenvalues, \\(\\lambda\\) and \\(\\bar{\\lambda}\\). We can obtain this by rephrasing one solution as \\(e^{\\lambda t} = e^{a + ib} e^{t} = e^{a+t}(\\cos b + i\\sin b)\\).\u003c/p\u003e\n\u003ch2 id=\"tips-and-tricks\"\u003eTips and Tricks\u003c/h2\u003e\n\u003ch3 id=\"changing-higher-order-system-into-lower-orders\"\u003eChanging higher order system into lower orders\u003c/h3\u003e\n\u003cp\u003eWe can actually write higher order linear system this way too:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; + by = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can actually construct:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; y_1(t) = y(t) \\\\\n\u0026amp; y_2(t) = y\u0026rsquo;(t)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAnd therefore, we can construct:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(y_1 \\\\ y_2)\u0026rsquo; = \\mqty(y_2 \\\\ -by1 - ay2) = \\mqty(0 \u0026amp; 1 \\\\ -b \u0026amp;-a) \\mqty(y_1 \\\\ y_2)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsystems_of_odes/","tags":null,"title":"First-Order Linear Systems of ODEs"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhflexua/","tags":null,"title":"flexua"},{"categories":null,"contents":"To prove properties on Finite State Machines, we can construct a proof:\nstating an invariant proving that the invarient is true for all states for all transitions: assume invarient is true before transition and prove that its true after So, essentially induction.\n","html":"\u003cp\u003eTo prove properties on \u003ca href=\"/posts/kbhfinite_state_machine/\"\u003eFinite State Machine\u003c/a\u003es, we can construct a proof:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003estating an invariant\u003c/li\u003e\n\u003cli\u003eproving that the invarient is true for all states\u003c/li\u003e\n\u003cli\u003efor all transitions: assume invarient is true before transition and prove that its true after\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSo, essentially induction.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfloyd_s_invariant_method/","tags":null,"title":"Floyd's Invariant Method"},{"categories":null,"contents":"flux is the volume of flow per unit time: multiplying the speed of flow \\(\\frac{m}{s}\\) against the area \\(m^{2}\\) gives you the volume flowed per second \\(\\frac{m^{3}}{s}\\).\ntilted flux Flow, however, is not necessarily perpendicular to the plain. Therefore, we only analyze the perpendicular component of the flow: that is \u0026mdash; \\(\\Phi = Av \\cos \\theta\\). Why? If we tipped the plane (of certain area) up, the flow that used to cross the bottom of the plane now will not go through the plane, so we want to account for that.\nelectric flux The electric flux through an are, hopefully not unexpectedly, is:\n\\begin{equation} \\Phi_{E} = \\int E \\cdot dA \\end{equation}\nwhere, \\(E\\) is the electric field strength though that differential area, and \\(dA\\) is the actual differential area.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhflux/\"\u003eflux\u003c/a\u003e is the volume of flow per unit time: multiplying the speed of flow \\(\\frac{m}{s}\\) against the area \\(m^{2}\\) gives you the volume flowed per second \\(\\frac{m^{3}}{s}\\).\u003c/p\u003e\n\u003ch2 id=\"tilted-flux\"\u003etilted flux\u003c/h2\u003e\n\u003cp\u003eFlow, however, is not necessarily \u003cstrong\u003eperpendicular\u003c/strong\u003e to the plain. Therefore, we only analyze the perpendicular component of the flow: that is \u0026mdash; \\(\\Phi = Av \\cos \\theta\\). Why? If we tipped the plane (of certain area) up, the flow that used to cross the bottom of the plane now will not go through the plane, so we want to account for that.\u003c/p\u003e\n\u003ch2 id=\"electric-flux\"\u003eelectric flux\u003c/h2\u003e\n\u003cp\u003eThe electric flux through an are, hopefully not unexpectedly, is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Phi_{E} = \\int E \\cdot dA\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(E\\) is the \u003ca href=\"/posts/kbhelectric_field/\"\u003eelectric field\u003c/a\u003e strength though that differential area, and \\(dA\\) is the actual differential area.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhflux/","tags":null,"title":"flux"},{"categories":null,"contents":"Abstract Alzheimer\u0026rsquo;s Disease (AD) is a demonstrativeness disease marked by declines in cognitive function. Despite early diagnoses being critical for AD prognosis and treatment, currently accepted diagnoses mechanisms for AD requires clinical outpatient testing with a medical professional, which reduces its accessibility. In this work, we propose a possible feature extraction mechanism leveraging the previously demonstrated errors of Hidden Markov-based forced alignment (FA) tools upon cognitively impaired patients as an automated means to quantify linguistic disfluency.\nBackground Annotated linguistic disfluency features, used in combination with semantic features, have been shown ((Antonsson et al. 2021)) to improve the accuracy of AD classification systems. However, manual annotation of disfluency hinders the throughput of AD detection systems. Furthermore, there is a dearth ((Guo et al. 2021)) of data provided with preexisting annotated results.\nExisting acoustic-only approaches ((Lindsay, Tröger, and König 2021; Shah et al. 2021)) frequently places focus on the actual speech features such as silence, energy, rate, or loudness. While this approach has returned promising results ((Wang et al. 2019)), it renders the acoustic data features extracted independent of actual linguistic disfluency. Of course, some approaches (including that in (Wang et al. 2019)) perform separate, manual annotation on both aspects and treat them jointly with late fusion. However, no existing approaches have an effective feature representation that bridges the acoustic-linguistic gap.\nAn incidental effect of Hidden Markov Model (HMM) based Viterbi forced alignment (FA) tools (such as P2FA) is that its quality is shown ((Saz et al. 2009)) to be lowered in cognitively impaired speakers, resulting from a roughly \\(50\\%\\) decrease in power of discrimination between stressed and unstressed vowels. Other ASR and FA approaches ((Tao, Xueqing, and Bian 2010)) has since been designed discriminate against such changes more effectively.\nProposal By encoding FA results of HMM based approaches in embedding space, we introduce a novel feature representation of acoustic information. As FA requires an existing transcript, this method is considered semi-automated because the test must be either administered via a common-transcript, transcribed manually later, or transcribed using ASR techniques. After encoding, the proposed feature can be used in a few ways.\nEuclidean distance The Euclidean Distance approach compares the embedding of the HMM FA vector with a \u0026ldquo;reference\u0026rdquo; benchmark via pythagoras in high dimension.\nThere are two possible modalities by which the \u0026ldquo;reference\u0026rdquo; can be acquired; if the data was sourced via the patient sample reading a standardized transcript, a reference FA sample could be provided via the audio of another individual reading the same transcript screened traditionally screened without AD. Therefore, the \u0026ldquo;deviation from reference\u0026rdquo; would be used as an input feature group to any proposed model architectures.\nAlternatively, as stated before, other FA approaches are less susceptible to lexical hindrances with decreased discriminatory power. Therefore, we could equally take the Euclidean distance between embedded results of two different FA mechanisms\u0026mdash;one shown to be more sustainable to cognitively impaired speakers and one not\u0026mdash;as input features to training architectures.\nCross-Attention One key issue with the Euclidean Distance approach is that the difference between \u0026ldquo;normal\u0026rdquo; pauses, changes in speaker pace, etc. which would be variable between different speakers even controlling for AD prognoses.\nIn computer vision, few-shot classification cross-attention ((Hou et al. 2019)) has shown promising results in discrimination; furthermore, trainable cross-attention ensures more flexible control to non-prognostic verbal disturbances such as a normal change in pace which would otherwise cause a large difference in the Euclidean Distance approach.\nIn practice, a model similar to that proposed by ((Hou et al. 2019)) would be used as the basis to encode (or even discriminate) between pairwise samples of different FA approaches or against a non-AD control, as per highlighted in the section above.\nAs input features Of course, the raw FA embedding can be used as an input feature. There are less prior work on this front as this project would be, as far as we know, proposing the use of forced aligner outputs as a feature input heuristic.\nReferences Antonsson, Malin, Kristina Lundholm Fors, Marie Eckerström, and Dimitrios Kokkinakis. 2021. “Using a Discourse Task to Explore Semantic Ability in Persons with Cognitive Impairment.” Frontiers in Aging Neuroscience 12 (January): 607449. doi:10.3389/fnagi.2020.607449. Guo, Yue, Changye Li, Carol Roan, Serguei Pakhomov, and Trevor Cohen. 2021. “Crossing the ‘Cookie Theft’ Corpus Chasm: Applying What BERT Learns from Outside Data to the ADReSS Challenge Dementia Detection Task.” Frontiers in Computer Science 3 (April): 642517. doi:10.3389/fcomp.2021.642517. Hou, Ruibing, Hong Chang, Bingpeng Ma, Shiguang Shan, and Xilin Chen. 2019. “Cross Attention Network for Few-Shot Classification.” Advances in Neural Information Processing Systems 32. Lindsay, Hali, Johannes Tröger, and Alexandra König. 2021. “Language Impairment in Alzheimer’s Disease—Robust and Explainable Evidence for AD-Related Deterioration of Spontaneous Speech through Multilingual Machine Learning.” Frontiers in Aging Neuroscience 13 (May): 642033. doi:10.3389/fnagi.2021.642033. Saz, Oscar, Javier Simón, W Ricardo Rodr\\’ıguez, Eduardo Lleida, and Carlos Vaquero. 2009. “Analysis of Acoustic Features in Speakers with Cognitive Disorders and Speech Impairments.” Eurasip Journal on Advances in Signal Processing 2009. Springer: 1–11. Shah, Zehra, Jeffrey Sawalha, Mashrura Tasnim, Shi-ang Qi, Eleni Stroulia, and Russell Greiner. 2021. “Learning Language and Acoustic Models for Identifying Alzheimer’s Dementia from Speech.” Frontiers in Computer Science 3 (February): 624659. doi:10.3389/fcomp.2021.624659. Tao, Ye, Li Xueqing, and Wu Bian. 2010. “A Dynamic Alignment Algorithm for Imperfect Speech and Transcript.” Computer Science and Information Systems 7 (1): 75–84. doi:10.2298/CSIS1001075T. Wang, Tianqi, Chongyuan Lian, Jingshen Pan, Quanlei Yan, Feiqi Zhu, Manwa L. Ng, Lan Wang, and Nan Yan. 2019. “Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese.” In Interspeech 2019, 3880–84. ISCA. doi:10.21437/Interspeech.2019-2414. ","html":"\u003ch2 id=\"abstract\"\u003eAbstract\u003c/h2\u003e\n\u003cp\u003eAlzheimer\u0026rsquo;s Disease (AD) is a demonstrativeness disease marked by declines in cognitive function. Despite early diagnoses being critical for AD prognosis and treatment, currently accepted diagnoses mechanisms for AD requires clinical outpatient testing with a medical professional, which reduces its accessibility. In this work, we propose a possible feature extraction mechanism leveraging the previously demonstrated errors of Hidden Markov-based forced alignment (FA) tools upon cognitively impaired patients as an automated means to quantify linguistic disfluency.\u003c/p\u003e\n\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003cp\u003eAnnotated linguistic disfluency features, used in combination with semantic features, have been shown ((\u003ca href=\"#citeproc_bib_item_1\"\u003eAntonsson et al. 2021\u003c/a\u003e)) to improve the accuracy of AD classification systems. However, manual annotation of disfluency hinders the throughput of AD detection systems. Furthermore, there is a dearth ((\u003ca href=\"#citeproc_bib_item_2\"\u003eGuo et al. 2021\u003c/a\u003e)) of data provided with preexisting annotated results.\u003c/p\u003e\n\u003cp\u003eExisting acoustic-only approaches ((\u003ca href=\"#citeproc_bib_item_4\"\u003eLindsay, Tröger, and König 2021\u003c/a\u003e; \u003ca href=\"#citeproc_bib_item_6\"\u003eShah et al. 2021\u003c/a\u003e)) frequently places focus on the actual speech features such as silence, energy, rate, or loudness. While this approach has returned promising results ((\u003ca href=\"#citeproc_bib_item_8\"\u003eWang et al. 2019\u003c/a\u003e)), it renders the acoustic data features extracted independent of actual linguistic disfluency. Of course, some approaches (including that in (\u003ca href=\"#citeproc_bib_item_8\"\u003eWang et al. 2019\u003c/a\u003e)) perform separate, manual annotation on both aspects and treat them jointly with late fusion. However, no existing approaches have an effective feature representation that bridges the acoustic-linguistic gap.\u003c/p\u003e\n\u003cp\u003eAn incidental effect of Hidden Markov Model (HMM) based Viterbi forced alignment (FA) tools (such as P2FA) is that its quality is shown ((\u003ca href=\"#citeproc_bib_item_5\"\u003eSaz et al. 2009\u003c/a\u003e)) to be lowered in cognitively impaired speakers, resulting from a roughly \\(50\\%\\) decrease in power of discrimination between stressed and unstressed vowels. Other ASR and FA approaches ((\u003ca href=\"#citeproc_bib_item_7\"\u003eTao, Xueqing, and Bian 2010\u003c/a\u003e)) has since been designed discriminate against such changes more effectively.\u003c/p\u003e\n\u003ch2 id=\"proposal\"\u003eProposal\u003c/h2\u003e\n\u003cp\u003eBy encoding FA results of HMM based approaches in embedding space, we introduce a novel feature representation of acoustic information. As FA requires an existing transcript, this method is considered semi-automated because the test must be either administered via a common-transcript, transcribed manually later, or transcribed using ASR techniques. After encoding, the proposed feature can be used in a few ways.\u003c/p\u003e\n\u003ch3 id=\"euclidean-distance\"\u003eEuclidean distance\u003c/h3\u003e\n\u003cp\u003eThe Euclidean Distance approach compares the embedding of the HMM FA vector with a \u0026ldquo;reference\u0026rdquo; benchmark via pythagoras in high dimension.\u003c/p\u003e\n\u003cp\u003eThere are two possible modalities by which the \u0026ldquo;reference\u0026rdquo; can be acquired; if the data was sourced via the patient sample reading a standardized transcript, a reference FA sample could be provided via the audio of another individual reading the same transcript screened traditionally screened without AD. Therefore, the \u0026ldquo;deviation from reference\u0026rdquo; would be used as an input feature group to any proposed model architectures.\u003c/p\u003e\n\u003cp\u003eAlternatively, as stated before, other FA approaches are less susceptible to lexical hindrances with decreased discriminatory power. Therefore, we could equally take the Euclidean distance between embedded results of two different FA mechanisms\u0026mdash;one shown to be more sustainable to cognitively impaired speakers and one not\u0026mdash;as input features to training architectures.\u003c/p\u003e\n\u003ch3 id=\"cross-attention\"\u003eCross-Attention\u003c/h3\u003e\n\u003cp\u003eOne key issue with the Euclidean Distance approach is that the difference between \u0026ldquo;normal\u0026rdquo; pauses, changes in speaker pace, etc. which would be variable between different speakers even controlling for AD prognoses.\u003c/p\u003e\n\u003cp\u003eIn computer vision, few-shot classification cross-attention ((\u003ca href=\"#citeproc_bib_item_3\"\u003eHou et al. 2019\u003c/a\u003e)) has shown promising results in discrimination; furthermore, trainable cross-attention ensures more flexible control to non-prognostic verbal disturbances such as a normal change in pace which would otherwise cause a large difference in the Euclidean Distance approach.\u003c/p\u003e\n\u003cp\u003eIn practice, a model similar to that proposed by ((\u003ca href=\"#citeproc_bib_item_3\"\u003eHou et al. 2019\u003c/a\u003e)) would be used as the basis to encode (or even discriminate) between pairwise samples of different FA approaches or against a non-AD control, as per highlighted in the section above.\u003c/p\u003e\n\u003ch3 id=\"as-input-features\"\u003eAs input features\u003c/h3\u003e\n\u003cp\u003eOf course, the raw FA embedding can be used as an input feature. There are less prior work on this front as this project would be, as far as we know, proposing the use of forced aligner outputs as a feature input heuristic.\u003c/p\u003e\n\u003ch2 id=\"references\"\u003eReferences\u003c/h2\u003e\n\u003cstyle\u003e.csl-entry{text-indent: -1.5em; margin-left: 1.5em;}\u003c/style\u003e\u003cdiv class=\"csl-bib-body\"\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_1\"\u003e\u003c/a\u003eAntonsson, Malin, Kristina Lundholm Fors, Marie Eckerström, and Dimitrios Kokkinakis. 2021. “Using a Discourse Task to Explore Semantic Ability in Persons with Cognitive Impairment.” \u003ci\u003eFrontiers in Aging Neuroscience\u003c/i\u003e 12 (January): 607449. doi:\u003ca href=\"https://doi.org/10.3389/fnagi.2020.607449\"\u003e10.3389/fnagi.2020.607449\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_2\"\u003e\u003c/a\u003eGuo, Yue, Changye Li, Carol Roan, Serguei Pakhomov, and Trevor Cohen. 2021. “Crossing the ‘Cookie Theft’ Corpus Chasm: Applying What BERT Learns from Outside Data to the ADReSS Challenge Dementia Detection Task.” \u003ci\u003eFrontiers in Computer Science\u003c/i\u003e 3 (April): 642517. doi:\u003ca href=\"https://doi.org/10.3389/fcomp.2021.642517\"\u003e10.3389/fcomp.2021.642517\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_3\"\u003e\u003c/a\u003eHou, Ruibing, Hong Chang, Bingpeng Ma, Shiguang Shan, and Xilin Chen. 2019. “Cross Attention Network for Few-Shot Classification.” \u003ci\u003eAdvances in Neural Information Processing Systems\u003c/i\u003e 32.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_4\"\u003e\u003c/a\u003eLindsay, Hali, Johannes Tröger, and Alexandra König. 2021. “Language Impairment in Alzheimer’s Disease—Robust and Explainable Evidence for AD-Related Deterioration of Spontaneous Speech through Multilingual Machine Learning.” \u003ci\u003eFrontiers in Aging Neuroscience\u003c/i\u003e 13 (May): 642033. doi:\u003ca href=\"https://doi.org/10.3389/fnagi.2021.642033\"\u003e10.3389/fnagi.2021.642033\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_5\"\u003e\u003c/a\u003eSaz, Oscar, Javier Simón, W Ricardo Rodr\\’ıguez, Eduardo Lleida, and Carlos Vaquero. 2009. “Analysis of Acoustic Features in Speakers with Cognitive Disorders and Speech Impairments.” \u003ci\u003eEurasip Journal on Advances in Signal Processing\u003c/i\u003e 2009. Springer: 1–11.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_6\"\u003e\u003c/a\u003eShah, Zehra, Jeffrey Sawalha, Mashrura Tasnim, Shi-ang Qi, Eleni Stroulia, and Russell Greiner. 2021. “Learning Language and Acoustic Models for Identifying Alzheimer’s Dementia from Speech.” \u003ci\u003eFrontiers in Computer Science\u003c/i\u003e 3 (February): 624659. doi:\u003ca href=\"https://doi.org/10.3389/fcomp.2021.624659\"\u003e10.3389/fcomp.2021.624659\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_7\"\u003e\u003c/a\u003eTao, Ye, Li Xueqing, and Wu Bian. 2010. “A Dynamic Alignment Algorithm for Imperfect Speech and Transcript.” \u003ci\u003eComputer Science and Information Systems\u003c/i\u003e 7 (1): 75–84. doi:\u003ca href=\"https://doi.org/10.2298/CSIS1001075T\"\u003e10.2298/CSIS1001075T\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_8\"\u003e\u003c/a\u003eWang, Tianqi, Chongyuan Lian, Jingshen Pan, Quanlei Yan, Feiqi Zhu, Manwa L. Ng, Lan Wang, and Nan Yan. 2019. “Towards the Speech Features of Mild Cognitive Impairment: Universal Evidence from Structured and Unstructured Connected Speech of Chinese.” In \u003ci\u003eInterspeech 2019\u003c/i\u003e, 3880–84. ISCA. doi:\u003ca href=\"https://doi.org/10.21437/Interspeech.2019-2414\"\u003e10.21437/Interspeech.2019-2414\u003c/a\u003e.\u003c/div\u003e\n\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhdementiabank_acoustics_project_proposal/","tags":null,"title":"Forced-Alignment Error for Feature Extraction for Acoustic AD Detection"},{"categories":null,"contents":"fork creates a second process that is an exact clone from the first.\nThe original process is called the parent, the child process is called the child. The child comes in at the next instruction after fork. This means that fork calls once, returns twice. After fork, the execution order between both processes is completely up to the OS. After fork, we cannot assume execution order.\nFork\u0026rsquo;s return value is different between parent and child:\nin parent, fork will return the PID of the child process in the child, fork will return \\(0\\), you can get PID by calling getpid, and get parent ID through getppid. if its \\(-1\\), something failed things that are duplicated fire descriptor table mapped memory regions (both stack and heap) shell a shell forks off a child to run the command.\nwhile (true) { char *command = { \u0026#34;ls\u0026#34;, \u0026#34;things\u0026#34; }; pid_t child_pid = fork(); if (!child_pid) { // this is the child; execvp will check PATH for you execvp(command.argv[0], command.argv); // if we got here, the PID didn\u0026#39;t do well throw STSHException(string(command.argv[0])+\u0026#34;: not found or didn\u0026#39;t succeed to fork.\u0026#34;); } waitpid(child_pid); // do cleanup } This is because the act of running a subprogram from a program requires taking over the current PID with a different program. If we don\u0026rsquo;t fork, once the takeover happens, we don\u0026rsquo;t have a shell anymore.\nexecvp execvp takes over the current PID with another executable.\nint execvp(const char *path, char *argv[]); if execvp works, obviously it never returns. If it is unsuccessful, it returns -1.\nThe arguments list have to BEGIN WITH EXECUTABLE NAME and END WITH NULL.\nchar *args[] = { \u0026#34;/bin/ls\u0026#34;, \u0026#34;-l\u0026#34;, \u0026#34;~/hewo\u0026#34;, NULL }; execvp(args[0], args); This is how we run other programs. After this happens, recall that the process is the SAME PROCESS, so we can still wait on this process.\nexecvp LEAVES THE FILE DESCRIPTOR TABLE.\nwaitpid waitpid waits for a subprocess and frees information from the OS to store the information about the child process\u0026rsquo; exit code. waitpid can ONLY ALLOW YOU TO WAIT ON DIRECT CHILDREN*.\npid_t waitpid(pid_t pid, int *status, int options); pid status: pointer to store return about the child options (0 for now) if the PID has died, this returns immediately. Otherwise, this blocks.\nthe status int is a bitmap with a bunch of stuff, which we can check with a series of macros\nif (WIFEXISTED(status)) { // child normal exit int statuscode = WEXITSTATUS(status); } else { // abnormal exist } wait on any children If want to deal with the children as they exit, whichever one finishes first, you can write:\nint pid = waitpid(-1, ...); which will wait on any of the process\u0026rsquo; direct children, returning whichever one finishes first and returning its PID. If pid-1= and errcode is ECHILD, this means that there\u0026rsquo;s no more children to be waited on.\nwords zombie a child process which exists which hasn\u0026rsquo;t been blocked by a parent process using waitpid, where its exit code is stored and taking up resources forever.\norphan a child process whose parent exited; which the bootloader takes care of.\nfork mechanics The act of copying stack and heap sounds really really expensive. So\u0026hellip;. Whapppens?\nEach program thinks its is given all memory addresses to use; the OS maps the \u0026ldquo;virtual addresses\u0026rdquo; to the main address. So, when the fork happens, the virtual address space stays the same. The child will map the parent\u0026rsquo;s memory addresses to different physical addresses than for the parent.\nThe copies are LAZY\u0026mdash;if the child writes to an area in memory, its virtual address are mapped to different addresses. If no writes by the child happen, the virtual address are mapped to the same address.\nduring file reading, the file descriptors gets cloned, the underlying open file table doesn\u0026rsquo;t close.\ntypical mp pattern int main() { // fork off first child pid_t f1 = fork(); if (f1 == 0) { dispatch_1(); return 0; } // fork off the process, if // we are still in main (meaning we are not a child) pid_t f2 = fork(); if (f2 == 0) { dispatch_2(); return 0; } // this is equivalent to .join() // recall that even if f1 returns later // its ok, becasue we can let f2 be zombie for a bit waitpid(f1, NULL, 0); waitpid(f2, NULL, 0); return 0; } ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhfork/\"\u003efork\u003c/a\u003e creates a second process that is an exact \u003cstrong\u003eclone\u003c/strong\u003e from the first.\u003c/p\u003e\n\u003cp\u003eThe original process is called the \u003cstrong\u003eparent\u003c/strong\u003e, the child process is called the \u003cstrong\u003echild\u003c/strong\u003e. The \u003cstrong\u003echild\u003c/strong\u003e comes in at the next instruction after fork. This means that fork \u003cstrong\u003ecalls once, returns twice\u003c/strong\u003e. \u003cstrong\u003eAfter \u003ccode\u003efork\u003c/code\u003e, the execution order between both processes is completely up to the OS.\u003c/strong\u003e After fork, we cannot assume execution order.\u003c/p\u003e\n\u003cp\u003eFork\u0026rsquo;s \u003cstrong\u003ereturn value\u003c/strong\u003e is different between parent and child:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ein parent, fork will return the PID of the child process\u003c/li\u003e\n\u003cli\u003ein the child, fork will return \\(0\\), you can get PID by calling \u003ccode\u003egetpid\u003c/code\u003e, and get parent ID through \u003ccode\u003egetppid\u003c/code\u003e.\u003c/li\u003e\n\u003cli\u003eif its \\(-1\\), something failed\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"things-that-are-duplicated\"\u003ethings that are duplicated\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003efire descriptor table\u003c/li\u003e\n\u003cli\u003emapped memory regions (both stack and heap)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"shell\"\u003eshell\u003c/h2\u003e\n\u003cp\u003ea \u003ca href=\"#shell\"\u003eshell\u003c/a\u003e forks off a child to run the command.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecommand\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;ls\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;things\u0026#34;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e};\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003echild_pid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e!\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echild_pid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// this is the child; execvp will check PATH for you\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eexecvp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecommand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecommand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if we got here, the PID didn\u0026#39;t do well\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ethrow\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eSTSHException\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estring\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecommand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;: not found or didn\u0026#39;t succeed to fork.\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ewaitpid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echild_pid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// do cleanup\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThis is because the act of running a subprogram from a program requires \u003cstrong\u003etaking over the current PID with a different program\u003c/strong\u003e. If we don\u0026rsquo;t fork, once the takeover happens, we don\u0026rsquo;t have a shell anymore.\u003c/p\u003e\n\u003ch2 id=\"execvp\"\u003eexecvp\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#execvp\"\u003eexecvp\u003c/a\u003e takes over the current PID with another executable.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eexecvp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003econst\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epath\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[]);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eif \u003ca href=\"#execvp\"\u003eexecvp\u003c/a\u003e works, obviously it never returns. If it is unsuccessful, it returns \u003ccode\u003e-1\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eThe arguments list have to \u003cstrong\u003eBEGIN WITH EXECUTABLE NAME\u003c/strong\u003e and \u003cstrong\u003eEND WITH NULL\u003c/strong\u003e.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;/bin/ls\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;-l\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;~/hewo\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eNULL\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e};\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eexecvp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eargs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThis is how we run other programs. After this happens, recall that the process is the \u003cstrong\u003eSAME PROCESS\u003c/strong\u003e, so we can still wait on this process.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eexecvp LEAVES THE FILE DESCRIPTOR TABLE\u003c/strong\u003e.\u003c/p\u003e\n\u003ch2 id=\"waitpid\"\u003ewaitpid\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#waitpid\"\u003ewaitpid\u003c/a\u003e waits for a subprocess and frees information from the OS to store the information about the child process\u0026rsquo; exit code. \u003ca href=\"#waitpid\"\u003ewaitpid\u003c/a\u003e can \u003cstrong\u003eONLY ALLOW YOU TO WAIT ON DIRECT CHILDREN*\u003c/strong\u003e.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ewaitpid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estatus\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eoptions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cul\u003e\n\u003cli\u003epid\u003c/li\u003e\n\u003cli\u003estatus: pointer to store return about the child\u003c/li\u003e\n\u003cli\u003eoptions (0 for now)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eif the PID has died, this returns immediately. Otherwise, this blocks.\u003c/p\u003e\n\u003ch3 id=\"the-status-int\"\u003ethe \u003ccode\u003estatus\u003c/code\u003e int\u003c/h3\u003e\n\u003cp\u003eis a bitmap with a bunch of stuff, which we can check with a series of macros\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003eWIFEXISTED\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estatus\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// child normal exit\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estatuscode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eWEXITSTATUS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estatus\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// abnormal exist\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"wait-on-any-children\"\u003ewait on any children\u003c/h3\u003e\n\u003cp\u003eIf want to deal with the children as they exit, whichever one finishes first, you can write:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ewaitpid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e...);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ewhich will wait on any of the process\u0026rsquo; direct children, returning whichever one finishes first and returning its PID. If \u003ccode\u003epid\u003c/code\u003e-1= and errcode is ECHILD, this means that there\u0026rsquo;s no more children to be waited on.\u003c/p\u003e\n\u003ch2 id=\"words\"\u003ewords\u003c/h2\u003e\n\u003ch3 id=\"zombie\"\u003ezombie\u003c/h3\u003e\n\u003cp\u003ea child process which exists which hasn\u0026rsquo;t been blocked by a parent process using \u003ccode\u003ewaitpid\u003c/code\u003e, where its exit code is stored and taking up resources forever.\u003c/p\u003e\n\u003ch3 id=\"orphan\"\u003eorphan\u003c/h3\u003e\n\u003cp\u003ea child process whose parent exited; which the bootloader takes care of.\u003c/p\u003e\n\u003ch2 id=\"fork-mechanics\"\u003efork mechanics\u003c/h2\u003e\n\u003cp\u003eThe act of copying stack and heap sounds really really expensive. So\u0026hellip;. Whapppens?\u003c/p\u003e\n\u003cp\u003eEach program thinks its is given all memory addresses to use; the OS maps the \u0026ldquo;virtual addresses\u0026rdquo; to the main address. So, when the fork happens, the virtual address space stays the same. The child will map the parent\u0026rsquo;s memory addresses to \u003cstrong\u003edifferent\u003c/strong\u003e physical addresses than for the parent.\u003c/p\u003e\n\u003cp\u003eThe copies are \u003cstrong\u003eLAZY\u003c/strong\u003e\u0026mdash;if the child writes to an area in memory, its virtual address are mapped to different addresses. If no writes by the child happen, the virtual address are mapped to the same address.\u003c/p\u003e\n\u003cp\u003eduring file reading, the file descriptors gets cloned, the underlying \u003ca href=\"/posts/kbhmultiprocessing/#open-file-table\"\u003eopen file table\u003c/a\u003e doesn\u0026rsquo;t close.\u003c/p\u003e\n\u003ch2 id=\"typical-mp-pattern\"\u003etypical mp pattern\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emain\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// fork off first child\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ef1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003efork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003edispatch_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// fork off the process, if\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// we are still in main (meaning we are not a child)\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ef2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003efork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003edispatch_2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// this is equivalent to .join()\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// recall that even if f1 returns later\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// its ok, becasue we can let f2 be zombie for a bit\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ewaitpid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eNULL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003ewaitpid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eNULL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhfork/","tags":null,"title":"fork"},{"categories":null,"contents":"Ingredients:\n\\(\\mathcal{P}\\) problem (states, transitions, etc.) \\(d\\) depth (how many next states to look into)\u0026mdash;more is more accurate but slower \\(U\\) value function estimate at depth \\(d\\) We essentially roll forward into all possible next states up to depth \\(d\\), and tabulate our value function.\nDefine subroutine forward_search(depth_remaining, value_function_estimate_at_d, state).\nif depth_remaining=0; return (action=None, utility=value_function_estimate_at_d(state)) otherwise, let best = (action = None, utility = -infinity) for each possible action at our state get an action-value for our current state where the utility of each next state is the utility given by forward_search(depth_remaining-1, value_function_estimate_at_d, next_state) if the action-value is higher than what we have, then we set best=(a, action-value) return best What this essentially does is to Dijkstra an optimal path towards the highest final utility \\(U(s)\\) om your current state, by trying all states.\n","html":"\u003cp\u003eIngredients:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\mathcal{P}\\) problem (states, transitions, etc.)\u003c/li\u003e\n\u003cli\u003e\\(d\\) depth (how many next states to look into)\u0026mdash;more is more accurate but slower\u003c/li\u003e\n\u003cli\u003e\\(U\\) \u003ca href=\"/posts/kbhaction_value_function/#value-function--kbhaction-value-function-dot-md\"\u003evalue function\u003c/a\u003e estimate at depth \\(d\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe essentially roll forward into all possible next states up to depth \\(d\\), and tabulate our \u003ca href=\"/posts/kbhaction_value_function/#value-function--kbhaction-value-function-dot-md\"\u003evalue function\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eDefine subroutine \u003ccode\u003eforward_search(depth_remaining, value_function_estimate_at_d, state)\u003c/code\u003e.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eif \u003ccode\u003edepth_remaining=0\u003c/code\u003e; return \u003ccode\u003e(action=None, utility=value_function_estimate_at_d(state))\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eotherwise,\n\u003col\u003e\n\u003cli\u003elet \u003ccode\u003ebest = (action = None, utility = -infinity)\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003efor each possible action at our state\n\u003col\u003e\n\u003cli\u003eget an \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e for our current state where the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of each next state is the utility given by \u003ccode\u003eforward_search(depth_remaining-1, value_function_estimate_at_d, next_state)\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eif the \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e is higher than what we have, then we set \u003ccode\u003ebest=(a, action-value)\u003c/code\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003ereturn \u003ccode\u003ebest\u003c/code\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWhat this essentially does is to Dijkstra an optimal path towards the highest final utility \\(U(s)\\) om your current state, by trying all states.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhforward_search/","tags":null,"title":"Forward Search"},{"categories":null,"contents":"The Forw\n","html":"\u003cp\u003eThe Forw\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhforward_forward_algorithm/","tags":null,"title":"Forward-Forward Algorithm"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhfoundational_model/","tags":null,"title":"foundational model"},{"categories":null,"contents":"Problem: end-to-end analysis of biological interactions at all timescales is hard; womp womp. No relationship explicitly between sequence, crystallography, md, etc. Also, some of them have time, some of them are frozen, etc.\nSolution: use ML to glue multiple scales\u0026rsquo; analysis together, using ML to\nproteins can be encoded as hierarchies protein functional behavior secondary structure/primary structure amino acids sequences! Slicing through the embedding space of GenSLMs can be used to identify these larger scale things from just the sequence by looking at the \u0026ldquo;general area\u0026rdquo; it exists in the latest space.\n","html":"\u003cp\u003eProblem: end-to-end analysis of biological interactions at all timescales is hard; womp womp. No relationship explicitly between sequence, crystallography, md, etc. Also, some of them have \u003cem\u003etime\u003c/em\u003e, some of them are frozen, etc.\u003c/p\u003e\n\u003cp\u003eSolution: use ML to glue multiple scales\u0026rsquo; analysis together, using ML to\u003c/p\u003e\n\u003ch2 id=\"proteins-can-be-encoded-as-hierarchies\"\u003eproteins can be encoded as hierarchies\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eprotein functional behavior\u003c/li\u003e\n\u003cli\u003esecondary structure/primary structure\u003c/li\u003e\n\u003cli\u003eamino acids\u003c/li\u003e\n\u003cli\u003esequences!\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSlicing through the embedding space of \u003ca href=\"/posts/kbhgenslms/\"\u003eGenSLMs\u003c/a\u003e can be used to identify these larger scale things from just the sequence by looking at the \u0026ldquo;general area\u0026rdquo; it exists in the latest space.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfundational_models_of_interaction_analysis-1/","tags":null,"title":"Foundational Models of Interaction Analysis"},{"categories":null,"contents":"Problem: end-to-end analysis of biological interactions at all timescales is hard; womp womp. No relationship explicitly between sequence, crystallography, md, etc. Also, some of them have time, some of them are frozen, etc.\nSolution: use ML to glue multiple scales\u0026rsquo; analysis together, using ML to\nstory 1: proteins can be encoded as hierarchies protein functional behavior secondary structure/primary structure amino acids sequences! Slicing through the embedding space of GenSLMs can be used to identify these larger scale things from just the sequence by looking at the \u0026ldquo;general area\u0026rdquo; it exists in the latest space.\nstory 2: tetrahedron tessellations and finite-element methods to analyze dynamics behavior Resolving cyro-EM dynamics to be able to capture binding behavior\nANCA-AE\napplications training GenSLMs can help identify covid variantts ","html":"\u003cp\u003eProblem: end-to-end analysis of biological interactions at all timescales is hard; womp womp. No relationship explicitly between sequence, crystallography, md, etc. Also, some of them have \u003cem\u003etime\u003c/em\u003e, some of them are frozen, etc.\u003c/p\u003e\n\u003cp\u003eSolution: use ML to glue multiple scales\u0026rsquo; analysis together, using ML to\u003c/p\u003e\n\u003ch2 id=\"story-1-proteins-can-be-encoded-as-hierarchies\"\u003estory 1: proteins can be encoded as hierarchies\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eprotein functional behavior\u003c/li\u003e\n\u003cli\u003esecondary structure/primary structure\u003c/li\u003e\n\u003cli\u003eamino acids\u003c/li\u003e\n\u003cli\u003esequences!\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSlicing through the embedding space of \u003ca href=\"/posts/kbhgenslms/\"\u003eGenSLMs\u003c/a\u003e can be used to identify these larger scale things from just the sequence by looking at the \u0026ldquo;general area\u0026rdquo; it exists in the latest space.\u003c/p\u003e\n\u003ch2 id=\"story-2-tetrahedron-tessellations-and-finite-element-methods-to-analyze-dynamics-behavior\"\u003estory 2: tetrahedron tessellations and finite-element methods to analyze dynamics behavior\u003c/h2\u003e\n\u003cp\u003eResolving \u003ca href=\"/posts/kbhcyro_em/#manifold-embedding\"\u003ecyro-EM\u003c/a\u003e dynamics to be able to capture binding behavior\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhanca_ae/\"\u003eANCA-AE\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"applications\"\u003eapplications\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003etraining \u003ca href=\"/posts/kbhgenslms/\"\u003eGenSLMs\u003c/a\u003e can help identify covid variantts\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfundational_models_of_interaction_analysis/","tags":null,"title":"Foundational Models of Interaction Analysis"},{"categories":null,"contents":"For vector \\(v\\) in the span of orthogonal basis \\(v_1, ..v_{n}\\):\n\\begin{equation} v = c_1 v_1 + \\dots + c_{n} v_{n} \\end{equation}\nwe can write:\n\\begin{equation} c_{j} = \\frac{v \\cdot v_{j}}{ v_{j} \\cdot v_{j}} \\end{equation}\nProof:\n\\begin{equation} \\langle v, v_{j} \\rangle = c_{n} \\langle v_{1}, v_{j} \\rangle \\dots \\end{equation}\nwhich is \\(0\\) for all cases that\u0026rsquo;s not \\(\\langle v_{j}, v_{j} \\rangle\\) as the \\(v\\) are orthogonal, and \\(\\mid v_{j} \\mid^{2}\\) for the case where it is.\nHence, we see that:\n\\begin{equation} \\langle v, v_{j} \\rangle = c_{j} \\mid v_{j}\\mid^{2} \\end{equation}\nWhich gives:\n\\begin{equation} c_{j} = \\frac{\\langle v,v_{j} \\rangle}{\\mid v_{j}\\mid^{2}} \\end{equation}\nas desired.\n","html":"\u003cp\u003eFor vector \\(v\\) in the \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e of \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e basis \\(v_1, ..v_{n}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = c_1 v_1 + \\dots + c_{n} v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_{j} = \\frac{v \\cdot v_{j}}{ v_{j} \\cdot v_{j}}\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle v, v_{j} \\rangle = c_{n} \\langle v_{1}, v_{j} \\rangle \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is \\(0\\) for all cases that\u0026rsquo;s not \\(\\langle v_{j}, v_{j} \\rangle\\) as the \\(v\\) are orthogonal, and \\(\\mid v_{j} \\mid^{2}\\) for the case where it is.\u003c/p\u003e\n\u003cp\u003eHence, we see that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle v, v_{j} \\rangle = c_{j} \\mid v_{j}\\mid^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_{j} = \\frac{\\langle v,v_{j} \\rangle}{\\mid v_{j}\\mid^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas desired.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlearn_more/","tags":null,"title":"Fourier formula"},{"categories":null,"contents":"Fourier Series and how to find them.\nFor a function given at some interval of length \\(l\\), then the function can be written at:\n\\begin{equation} f(x) = \\sum_{k=1}^{\\infty} a_{k} \\sin \\qty( \\frac{k\\pi x}{l}) \\end{equation}\nor\n\\begin{equation} f(x) = \\sum_{k=1}^{\\infty} b_{k} \\cos \\qty( \\frac{k\\pi x}{l}) \\end{equation}\nRecall that because sin and cos are even and odd parts, the functions above force an even and oddness to your expansions. They will be particularly helpful for Dirichlet Conditions and Neumann Conditions.\nBut, in general, you can use a linear combination of the two by doubling the frequency over your interval \\(l\\):\n\\begin{equation} f(x) = a_0 + \\sum_{k=1}^{\\infty} \\qty( a_{k} \\cos(k \\omega x) + b_{k} \\sin(k \\omega x)) \\end{equation}\nwhere \\(\\omega = \\frac{2\\pi}{L}\\).\nStatement Suppose we have a function that satisfies:\nRecall that each \\(\\cos(\\dots)\\) and \\(\\sin (\u0026hellip;)\\) are orthogonal, we can then use the Fourier formula to figure the coefficients \\(a_{k}\\), \\(b_{k}\\).\nAside: why is \\(a_0\\) also orthogonal?\n\\begin{equation} a_0 = a_0 \\cos (0 \\omega x) = a_0 \\cdot 1 = a_0 \\end{equation}\nGeneral Fourier Decomposition Therefore, by the Fourier formula, we expect that:\n\\begin{equation} a_0 = \\frac{\\langle f, 1 \\rangle}{ \\langle 1,1 \\rangle} = \\frac{1}{L} \\int_{0}^{L} f(x) \\dd{x} \\end{equation}\n\\begin{equation} a_{k} = \\frac{\\langle f, \\cos (k\\omega x) \\rangle}{\\langle \\cos (k\\omega x), \\cos (k\\omega x) \\rangle} = \\frac{2}{L} \\int_{0}^{L} f(x) \\cos (k\\omega x) \\dd{x} \\end{equation}\n\\begin{equation} b_{k} = \\frac{\\langle f, \\sin (k\\omega x) \\rangle}{\\langle \\sin (k\\omega x), \\sin (k\\omega x) \\rangle} = \\frac{2}{L} \\int_{0}^{L} f(x) \\sin (k\\omega x) \\dd{x} \\end{equation}\nWhen computing this, recall that:\n\\begin{equation} \\omega = \\frac{2\\pi}{L} \\end{equation}\nwhere \\(L\\) the period of your \\(L\\) periodic function.\nOdd and Even Break If you have an even or odd \\(f(x)\\), we can refine the series even more into simply a sine or cosine only series.\nFor even \\(f(x)\\), we can write:\n\\begin{equation} a_{0} + \\sum_{k=1}^{\\infty} a_{k} \\cos (k\\omega x) \\end{equation}\nwhere:\n\\begin{equation} a_0 = \\frac{1}{L / 2} \\int_{0}^{\\frac{L}{2}} f(x) \\dd{x} \\end{equation}\n\\begin{equation} a_{k} = \\frac{2}{L / 2} \\int_{0}^{\\frac{L}{2}} f(x) \\cos (k \\omega x) \\dd{x} \\end{equation}\nWhereas for odd \\(f(x)\\), we write:\n\\begin{equation} \\sum_{k=1}^{\\infty} b_{k} \\sin (k\\omega x) \\end{equation}\n\\begin{equation} b_{k} = \\frac{2}{L / 2} \\int_{0}^{L / 2} f(x) \\sin (k\\omega x) \\dd{x} \\end{equation}\nover any function Suppose we have a function with two roots:\n\\begin{equation} f(0) = 0 = f(l) \\end{equation}\nthen, we can write it in terms of a Fourier Series by odd-extending the function to the negative direction (see \u0026ldquo;odd extensions below\u0026rdquo;).\nThis makes us be able to write \\(f\\) over \\([0,l]\\) as:\n\\begin{equation} f(x) = \\sum_{n=1}^{\\infty} b_{n} \\sin \\qty( \\frac{n\\pi}{l} x) \\end{equation}\nwhere:\n\\begin{equation} b_{n} = \\frac{2}{l} \\int_{0}^{l} f(x) \\sin \\qty( \\frac{n \\pi}{l} x) \\dd{x} \\end{equation}\nthis is just the \\(l\\) extension function above, but with small \\(l\\) as the function is odd to one side.\nHere\u0026rsquo;s the motivation:\nsin and cos are even and odd parts odd extensions\nImportant note: this function seems to vanish at endpoints \\(0\\) and \\(l\\), and not all functions do that.\nSo, instead, let\u0026rsquo;s consider the odd extension of \\(f\\):\n\\begin{equation} \\hat{f}(x) = f(x), x \\geq 0 \\end{equation}\nand\n\\begin{equation} \\hat{f}(x) = -f(-x), x \u0026lt; 0 \\end{equation}\nThere will therefore be a discontinuous jump at \\(0\\).\nUsing the \\(\\sin\\) function, which are odd, recall that Fourier Series Converges with \\(L\\) Periodic Function, so at \\(0\\) given Gibbs Phenomena, the jump will average the discontinouity down to \\(0\\) (given our extensions are odd).\neven extensions\ninstead, if you want to use \\(\\cos\\), you can make an even extension:\n\\begin{equation} \\hat{f}(x) = f(x), x \\geq 0 \\end{equation}\nand\n\\begin{equation} \\hat{f}(x) = f(-x), x \u0026lt; 0 \\end{equation}\nwhich shouldn\u0026rsquo;t be discontinuous at \\(0\\) at all.\nAdditional Informatino Fourier Series Converges with \\(L\\) Periodic Function Suppose \\(f(x)\\) is an \\(L\\) periodic function with at most jump discountinuty, and \\(f\u0026rsquo;\\) continuous everywhere. Then, the associated Fourier Series converges everywhere and coincides with \\(f\\) except for jump discontinuances, where the values are the average of limits from either side. This is called the Gibbs Phenomena\nBackground Fourier formula Consider some orthogonal basis \\(v_1, \u0026hellip; v_{n}\\), recall that if we have:\n\\begin{equation} v = c_1 v_1 + \\dots + c_{n} v_{n} \\end{equation}\nwe can write:\n\\begin{equation} c_{j} = \\frac{v \\cdot v_{j}}{ v_{j} \\cdot v_{j}} \\end{equation}\n(this is similar to Writing a vector as a linear combination of orthonormal basis, but recall the \\(v_{j}\\) are orthogonal and not orthonormal, so we have to divide by the square of the norm of \\(v\\). learn more)\ninner product of \\(L\\) periodic functions For \\(f,g : [0,L] \\to \\mathbb{R}\\), which are L-periodic, we write:\n\\begin{equation} \\langle f,g \\rangle := \\frac{1}{L} \\int_{0}^{L} f(x) g(x) \\dd{x} \\end{equation}\nproperties worth noting for continuous functions \\([0,L]\\) \\(g_1, g_2, h_1, h_2, g, h\\), the inner product rules hold, which gives\n\\(\\langle c_1 g_1 + c_2 g_2, h \\rangle = c_1 \\langle g_1, h \\rangle + c_2 \\langle g_2, h \\rangle\\) \\(\\langle g, c_1h_1 + c_2h_2 \\rangle = c_1 \\langle g, h_1 \\rangle + c_2 \\langle g, h_2 \\rangle\\) \\(\\langle h,g \\rangle = \\langle g,h \\rangle\\), as the functions are over the reals \\(\\langle g,g \\rangle\\) is zero only when \\(g\\) is zero \\(L\\) periodic sinusoids are orthogonal Recall that we have two basic L-periodic sinusoids:\n\\(\\sin \\qty(\\frac{2\\pi k }{L}x)\\) \\(\\cos \\qty(\\frac{2\\pi k }{L}x)\\) Let\u0026rsquo;s write:\n\\begin{equation} \\omega = \\frac{2\\pi}{L} \\end{equation}\nthen, for any distinct integer \\(k_1 \\neq k_2, k_1, k_2 \u0026gt; 0\\), we see that:\n\\begin{equation} \\int_{0}^{L} \\cos (k_1 \\omega x) \\cos (k_2 \\omega x) = \\int_{0}^{L} \\sin (k_1 \\omega x) \\sin (k_2 \\omega x) = 0 \\end{equation}\nand further for any \\(k\\):\n\\begin{equation} \\int_{0}^{L} \\cos (k_1 \\omega x) \\sin (k_2 \\omega x) = 0 \\end{equation}\nMeaning, every pair of \\(\\langle \\{\\sin, \\cos\\} (k_1 \\omega x), \\{\\sin, \\cos\\} (k_1 \\omega x) \\rangle\\) are orthogonal.\nFurther, for the same \\(k\\),\n\\begin{equation} \\langle \\cos (k\\omega x), \\cos (k \\omega x) \\rangle = \\langle \\sin (k\\omega x), \\sin (k \\omega x) \\rangle = \\frac{L}{2} \\end{equation}\nin partiular:\n\\begin{equation} \\int_{0}^{\\frac{L}{2}} \\sin (k \\omega x) \\sin (k \\omega x) = \\frac{L}{4} \\end{equation}\nif \\(k\\) are equal.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhfourier_series/\"\u003eFourier Series\u003c/a\u003e and how to find them.\u003c/p\u003e\n\u003cp\u003eFor a function given at some interval of length \\(l\\), then the function can be written at:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\sum_{k=1}^{\\infty} a_{k} \\sin \\qty( \\frac{k\\pi x}{l})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eor\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\sum_{k=1}^{\\infty} b_{k} \\cos \\qty( \\frac{k\\pi x}{l})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that because \u003ca href=\"#sin-and-cos-are-even-and-odd-parts\"\u003esin and cos are even and odd parts\u003c/a\u003e, the functions above force an even and oddness to your expansions. They will be particularly helpful for \u003ca href=\"/posts/kbhsu_math53_feb232024/#dirichlet-conditions\"\u003eDirichlet Conditions\u003c/a\u003e and \u003ca href=\"/posts/kbhsu_math53_feb232024/#neumann-conditions\"\u003eNeumann Conditions\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eBut, in general, you can use a linear combination of the two by doubling the frequency over your interval \\(l\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = a_0 + \\sum_{k=1}^{\\infty} \\qty( a_{k} \\cos(k \\omega x) + b_{k} \\sin(k \\omega x))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\omega = \\frac{2\\pi}{L}\\).\u003c/p\u003e\n\u003ch2 id=\"statement\"\u003eStatement\u003c/h2\u003e\n\u003cp\u003eSuppose we have a function that satisfies:\u003c/p\u003e\n\u003cp\u003eRecall that each \\(\\cos(\\dots)\\) and \\(\\sin (\u0026hellip;)\\) are \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e, we can then use the \u003ca href=\"/posts/kbhlearn_more/\"\u003eFourier formula\u003c/a\u003e to figure the coefficients \\(a_{k}\\), \\(b_{k}\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside: why is \\(a_0\\) also orthogonal?\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_0 = a_0 \\cos (0 \\omega x) = a_0 \\cdot 1 = a_0\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"general-fourier-decomposition\"\u003eGeneral Fourier Decomposition\u003c/h3\u003e\n\u003cp\u003eTherefore, by the \u003ca href=\"/posts/kbhlearn_more/\"\u003eFourier formula\u003c/a\u003e, we expect that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_0 = \\frac{\\langle f, 1 \\rangle}{ \\langle 1,1 \\rangle} = \\frac{1}{L} \\int_{0}^{L} f(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{k} = \\frac{\\langle f, \\cos (k\\omega x) \\rangle}{\\langle \\cos (k\\omega x), \\cos (k\\omega x) \\rangle} = \\frac{2}{L} \\int_{0}^{L} f(x) \\cos (k\\omega x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb_{k} = \\frac{\\langle f, \\sin (k\\omega x) \\rangle}{\\langle \\sin (k\\omega x), \\sin (k\\omega x) \\rangle} = \\frac{2}{L} \\int_{0}^{L} f(x) \\sin (k\\omega x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhen computing this, recall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\omega = \\frac{2\\pi}{L}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(L\\) the period of your \\(L\\) periodic function.\u003c/p\u003e\n\u003ch3 id=\"odd-and-even-break\"\u003eOdd and Even Break\u003c/h3\u003e\n\u003cp\u003eIf you have an even or odd \\(f(x)\\), we can refine the series even more into simply a sine or cosine only series.\u003c/p\u003e\n\u003cp\u003eFor even \\(f(x)\\), we can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{0} + \\sum_{k=1}^{\\infty} a_{k} \\cos (k\\omega x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_0 = \\frac{1}{L / 2} \\int_{0}^{\\frac{L}{2}} f(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{k} = \\frac{2}{L / 2} \\int_{0}^{\\frac{L}{2}} f(x) \\cos (k \\omega x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhereas for odd \\(f(x)\\), we write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{k=1}^{\\infty} b_{k} \\sin (k\\omega x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb_{k} = \\frac{2}{L / 2} \\int_{0}^{L / 2} f(x) \\sin (k\\omega x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"over-any-function\"\u003eover any function\u003c/h3\u003e\n\u003cp\u003eSuppose we have a function with two roots:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(0) = 0 = f(l)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethen, we can write it in terms of a \u003ca href=\"/posts/kbhfourier_series/\"\u003eFourier Series\u003c/a\u003e by odd-extending the function to the negative direction (see \u0026ldquo;odd extensions below\u0026rdquo;).\u003c/p\u003e\n\u003cp\u003eThis makes us be able to write \\(f\\) over \\([0,l]\\) as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\sum_{n=1}^{\\infty} b_{n} \\sin \\qty( \\frac{n\\pi}{l} x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb_{n} = \\frac{2}{l} \\int_{0}^{l} f(x) \\sin \\qty( \\frac{n \\pi}{l} x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is just the \\(l\\) extension function above, but with small \\(l\\) as the function is odd to one side.\u003c/p\u003e\n\u003cp\u003eHere\u0026rsquo;s the motivation:\u003c/p\u003e\n\u003ch4 id=\"sin-and-cos-are-even-and-odd-parts\"\u003esin and cos are even and odd parts\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eodd extensions\u003c/p\u003e\n\u003cp\u003eImportant note: this function seems to vanish at endpoints \\(0\\) and \\(l\\), and not all functions do that.\u003c/p\u003e\n\u003cp\u003eSo, instead, let\u0026rsquo;s consider the odd extension of \\(f\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(x) = f(x), x \\geq 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(x) = -f(-x), x \u0026lt; 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThere will therefore be a discontinuous jump at \\(0\\).\u003c/p\u003e\n\u003cp\u003eUsing the \\(\\sin\\) function, which are odd, recall that \u003ca href=\"#fourier-series-converges-with-l-periodic-function\"\u003eFourier Series Converges with \\(L\\) Periodic Function\u003c/a\u003e, so at \\(0\\) given \u003ca href=\"#fourier-series-converges-with-l-periodic-function\"\u003eGibbs Phenomena\u003c/a\u003e, the jump will average the discontinouity down to \\(0\\) (given our extensions are odd).\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eeven extensions\u003c/p\u003e\n\u003cp\u003einstead, if you want to use \\(\\cos\\), you can make an even extension:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(x) = f(x), x \\geq 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(x) = f(-x), x \u0026lt; 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich shouldn\u0026rsquo;t be discontinuous at \\(0\\) at all.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-informatino\"\u003eAdditional Informatino\u003c/h2\u003e\n\u003ch3 id=\"fourier-series-converges-with-l-periodic-function\"\u003eFourier Series Converges with \\(L\\) Periodic Function\u003c/h3\u003e\n\u003cp\u003eSuppose \\(f(x)\\) is an \\(L\\) periodic function with at most jump discountinuty, and \\(f\u0026rsquo;\\) continuous everywhere. Then, the associated \u003ca href=\"/posts/kbhfourier_series/\"\u003eFourier Series\u003c/a\u003e converges everywhere and coincides with \\(f\\) except for jump discontinuances, where the values are the average of limits from either side. This is called the \u003ca href=\"#fourier-series-converges-with-l-periodic-function\"\u003eGibbs Phenomena\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003ch3 id=\"fourier-formula--kbhlearn-more-dot-md\"\u003e\u003ca href=\"/posts/kbhlearn_more/\"\u003eFourier formula\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eConsider some \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e basis \\(v_1, \u0026hellip; v_{n}\\), recall that if we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = c_1 v_1 + \\dots + c_{n} v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_{j} = \\frac{v \\cdot v_{j}}{ v_{j} \\cdot v_{j}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(this is similar to \u003ca href=\"/posts/kbhorthonormal_basis/#writing-a-vector-as-a-linear-combination-of-orthonormal-basis\"\u003eWriting a vector as a linear combination of orthonormal basis\u003c/a\u003e, but recall the \\(v_{j}\\) are \u003cstrong\u003eorthogonal\u003c/strong\u003e and not \u003cstrong\u003eorthonormal\u003c/strong\u003e, so we have to divide by the square of the norm of \\(v\\). \u003ca href=\"/posts/kbhlearn_more/\"\u003elearn more\u003c/a\u003e)\u003c/p\u003e\n\u003ch3 id=\"inner-product-of-l-periodic-functions--kbhinner-product-dot-md\"\u003e\u003ca href=\"/posts/kbhinner_product/#inner-product-of-l-periodic-functions\"\u003einner product of \\(L\\) periodic functions\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eFor \\(f,g : [0,L] \\to \\mathbb{R}\\), which are \u003ca href=\"/posts/kbhsu_math53_feb252024/#l-periodicity\"\u003eL-periodic\u003c/a\u003e, we write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle f,g \\rangle := \\frac{1}{L} \\int_{0}^{L} f(x) g(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"properties-worth-noting\"\u003eproperties worth noting\u003c/h4\u003e\n\u003cp\u003efor continuous functions \\([0,L]\\) \\(g_1, g_2, h_1, h_2, g, h\\), the \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e rules hold, which gives\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\langle c_1 g_1 + c_2 g_2, h \\rangle = c_1 \\langle g_1, h \\rangle + c_2 \\langle g_2, h \\rangle\\)\u003c/li\u003e\n\u003cli\u003e\\(\\langle g, c_1h_1 + c_2h_2 \\rangle = c_1 \\langle g, h_1 \\rangle + c_2 \\langle g, h_2 \\rangle\\)\u003c/li\u003e\n\u003cli\u003e\\(\\langle h,g \\rangle = \\langle g,h \\rangle\\), as the functions are over the reals\u003c/li\u003e\n\u003cli\u003e\\(\\langle g,g \\rangle\\) is zero only when \\(g\\) is zero\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"l-periodic-sinusoids-are-orthogonal\"\u003e\\(L\\) periodic sinusoids are orthogonal\u003c/h4\u003e\n\u003cp\u003eRecall that we have two basic \u003ca href=\"/posts/kbhsu_math53_feb252024/#l-periodicity\"\u003eL-periodic\u003c/a\u003e sinusoids:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\sin \\qty(\\frac{2\\pi k }{L}x)\\)\u003c/li\u003e\n\u003cli\u003e\\(\\cos \\qty(\\frac{2\\pi k }{L}x)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eLet\u0026rsquo;s write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\omega = \\frac{2\\pi}{L}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethen, for any distinct integer \\(k_1 \\neq k_2, k_1, k_2 \u0026gt; 0\\), we see that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{0}^{L} \\cos (k_1 \\omega x) \\cos (k_2 \\omega x) = \\int_{0}^{L} \\sin (k_1 \\omega x) \\sin (k_2 \\omega x) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand further for any \\(k\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{0}^{L} \\cos (k_1 \\omega x) \\sin (k_2 \\omega x) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning, every pair of \\(\\langle \\{\\sin, \\cos\\} (k_1 \\omega x), \\{\\sin, \\cos\\} (k_1 \\omega x) \\rangle\\) are orthogonal.\u003c/p\u003e\n\u003cp\u003eFurther, for the same \\(k\\),\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle \\cos (k\\omega x), \\cos (k \\omega x) \\rangle = \\langle \\sin (k\\omega x), \\sin (k \\omega x) \\rangle = \\frac{L}{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ein partiular:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{0}^{\\frac{L}{2}} \\sin (k \\omega x) \\sin (k \\omega x) = \\frac{L}{4}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif \\(k\\) are equal.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfourier_series/","tags":null,"title":"Fourier Series"},{"categories":null,"contents":"requirements Consider a function that has no periodicity, but that:\n\\begin{equation} f(x), -\\infty \u0026lt; x \u0026lt; \\infty \\end{equation}\nAnd assume that:\n\\begin{equation} \\int_{\\infty}^{\\infty} |f(x)| \\dd{x}, \u0026lt; \\infty \\end{equation}\nimportant: look up! the integral of \\(f(x)\\) has to converge AND this means that the \\(f(x)\\) goes to \\(0\\) actually at boundaries.\n(meaning the function decays as you go towards the end)\ndefinition a Fourier transform is an invertible transformation:\n\\begin{equation} f(x) \\to \\hat{f}(\\lambda) \\end{equation}\nand\n\\begin{equation} \\hat{f}(\\lambda) \\to f(x) \\end{equation}\nWhere,\n\\begin{equation} \\hat{f}(\\lambda) = \\int_{-\\infty}^{\\infty } e^{-i\\lambda x} f(x) \\dd{x} \\end{equation}\n\\begin{equation} f(x) = \\frac{1}{2\\pi} \\int_{\\infty}^{-\\infty} e^{ix\\lambda} \\hat{f}(\\lambda) \\dd{\\lambda} \\end{equation}\nWe sometimes write:\n\\begin{equation} \\hat{f}(\\lambda) = \\mathcal{F}(f)(\\lambda) \\end{equation}\nwhere \\(\\mathcal{F}\\) is an invertible map that gives you the Fourier Series.\nadditional information Properties of \\(\\mathcal{F}\\) it\u0026rsquo;s a Linear Map: \\(\\mathcal{F}(c_1 f_1 + c_2 f_2) = c_1\\mathcal{F}(f_1) + c_2 \\mathcal{F}(f_2)\\) it\u0026rsquo;s recenter able: \\(\\mathcal{F}(f(x+c)) = e^{i c \\lambda}\\mathcal{F}(f)(\\lambda)\\) it\u0026rsquo;s reverse-shift-able: \\(\\mathcal{F}\\qty(e^{i \\lambda_{0} x} f(x)) = \\mathcal{F}(f) (\\lambda -\\lambda_{0})\\) Proof:\nbecause integrals are linear \\(\\int_{-\\infty}^{\\lambda} e^{-i(t-c)\\lambda}f(t) \\dd{t} = e^{ic\\lambda} \\mathcal{F}(f)(\\lambda)\\), where we define \\(t = x+c\\) try it Derivative of Fourier Transform Suppose we want:\n\\begin{equation} \\mathcal{F}(f\u0026rsquo;(x)) = \\int_{\\infty}^{\\infty} e^{-ix\\lambda} f\u0026rsquo;(x) \\dd{x} = \\left e^{-ix\\lambda} f(x)\\right|_{-\\infty}^{\\infty} + i \\lambda \\int_{-\\infty}^{\\infty} e^{-ix\\lambda} f(x) \\dd{x} = i \\lambda \\mathcal{F}(f) (\\lambda) \\end{equation}\nBecause we are guaranteed \\(f(x)\\) evaluated at infinity is \\(0\\), the first term drops out. The important conclusion here: *Fourier transforms change a derivative into ALGEBRA of multiplying by \\(i\\lambda\\).\nConsider also:\n\\begin{equation} \\mathcal{F}(x f(x)) = i \\dv \\lambda \\mathcal{F}(f)(\\lambda) \\end{equation}\nyou can show this in a similar way, by attempting to distribute a \\(\\dv \\lambda\\) into the Fourier transform and showing that they are equal.\nFourier Transform of a Gaussian \\begin{equation} \\mathcal{F}\\qty(e^{-\\frac{ax^{2}}{2}}) = \\sqrt{\\frac{2\\pi}{a}}e^{-\\frac{\\lambda^{2}}{2a}} \\end{equation}\nand:\n\\begin{equation} \\mathcal{F}^{-1}\\qty(e^{-a\\frac{\\lambda^{2}}{2}}) = \\frac{e^{-\\frac{x^{2}}{2a}}}{\\sqrt{2\\pi a}} \\end{equation}\nwe obtain this:\n\\begin{equation} u = e^{-\\frac{x^{2}}{2}} \\end{equation}\nand:\n\\begin{equation} \\dv{u}{x} = -xe^{-\\frac{x^{2}}{2}} = -xu \\end{equation}\nand if we took a Fourier transform on both sides, we obtain:\n\\begin{equation} \\mathcal{F}\\qty(\\dv{u}{x} + xu) = 0 = i \\lambda \\hat{u} + i \\pdv{\\hat{u}} = 0 \\end{equation}\nand note that this is the same equation. Meaning:\n\\begin{equation} \\mathcal{F}\\qty(\\dv{u}{x} + xu) = \\dv{\\lambda}{x} + \\lambda u \\end{equation}\nthis gives:\n\\begin{equation} \\mathcal{F}(u) = Cu \\end{equation}\nwhich is what we see.\nLook! A table where:\n\\begin{equation} \\Lambda_{a} \\end{equation}\nis the triangle between \\([-a, a]\\), that goes up to \\(1\\).\ninterpreting \\(\\lambda\\) if \\(f(x)\\) is a function in time \\(\\lambda\\) could be thought of analogous to frequency\nif \\(f(x)\\) is a function of space \\(\\lambda\\) could be thought of analogous to momentum\nFourier transform of step function if you have a function:\n\\begin{equation} f(x) = \\begin{cases} 1, |x| \u0026lt; a \\\\ 0 \\end{cases} \\end{equation}\nits Fourier transform is sinc function:\n\\begin{equation} \\hat{f}(\\lambda) = \\frac{i \\sin (a \\lambda)}{\\lambda} \\end{equation}\nintuitive understandings the formula sines stuck between Consider what:\n\\begin{equation} f(x) \\cos (x) \\end{equation}\nlooks like.\nEffectively, you are stenching the \\(\\cos(x)\\) between \\(f(x)\\) and its reflection across \\(x\\). As you integrate, the majority of the up and downs cancel out, and the only thing you are left is the bits where \\(f(x)\\) peak up!\nas you increase \\(k\\):\n\\begin{equation} f(x) \\cos (kx) \\end{equation}\nyou obtain more cancellations and it will eventually integrate to \\(0\\).\nFourier transform properties As a function gets smoother, its Fourier transform is more concentrated at one point (closer to a single frequency).\nConversely, as a function gets more jagged, its Fourier transform is smoother (closer to a composition of sinusoids).\nFourier Transform as Quantization Consider:\nthe big fun idea\u0026mdash;-we can transform:\n\\(L\\) periodic function on \\(f(x)\\) \\(\\left\\{ (c_{n})^{\\infty}_{-\\infty} \\right\\}\\) This series exists for all function, converges exceedingly quickly, and has great properties. It should look like the form:\n\\begin{equation} f(x) = \\sum_{-\\infty}^{\\infty} c_{n} e^{n \\omega x i} \\end{equation}\nFourier norm of a function After you do this, and obtain\n\\(\\left\\{ (c_{n})^{\\infty}_{-\\infty} \\right\\}\\)\nwe call the \u0026ldquo;size\u0026rdquo; of this function:\n\\begin{equation} \\sum_{-\\infty}^{\\infty} | c_{n}|^{2} \\end{equation}\nPlanchrel\u0026rsquo;s Formula For a usual \\(L\\) periodic function, size agrees:\n\\begin{equation} \\langle f,f \\rangle= \\int_{0}^{L} |f(x)|^{2} \\dd{x} = L\\sum_{-\\infty}^{\\infty} | c_{n}|^{2} \\end{equation}\nYou can show this by plugging in the Complex Fourier Series in \\(f\\).\nmotivation Consider a function period of period \\(L\\)\n\\begin{equation} a_{k} = \\int_{0}^{L} F(x) e^{-i\\omega kx} \\dd{x} \\end{equation}\nwhere:\n\\begin{equation} f(x) = ?\\sum_{k} a_{n} e^{i \\omega kx} \\end{equation}\nAnd the BIG PICTURE: if we took the period \\(L \\to \\infty\\), we end up with the Fourier Transform.\n","html":"\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eConsider a function that has no periodicity, but that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x), -\\infty \u0026lt; x \u0026lt; \\infty\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd assume that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{\\infty}^{\\infty} |f(x)| \\dd{x}, \u0026lt; \\infty\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eimportant: \u003cstrong\u003e\u003cstrong\u003elook up! the integral of \\(f(x)\\) has to converge AND this means that the \\(f(x)\\) goes to \\(0\\) actually at boundaries\u003c/strong\u003e\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003e(meaning the function decays as you go towards the end)\u003c/p\u003e\n\u003ch2 id=\"definition\"\u003edefinition\u003c/h2\u003e\n\u003cp\u003ea Fourier transform is an invertible transformation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) \\to \\hat{f}(\\lambda)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) \\to f(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhere,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) = \\int_{-\\infty}^{\\infty } e^{-i\\lambda x} f(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\frac{1}{2\\pi} \\int_{\\infty}^{-\\infty} e^{ix\\lambda} \\hat{f}(\\lambda) \\dd{\\lambda}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe sometimes write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) = \\mathcal{F}(f)(\\lambda)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\mathcal{F}\\) is an invertible map that gives you the \u003ca href=\"/posts/kbhfourier_series/\"\u003eFourier Series\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"properties-of-mathcal-f\"\u003eProperties of \\(\\mathcal{F}\\)\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eit\u0026rsquo;s a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e: \\(\\mathcal{F}(c_1 f_1 + c_2 f_2) = c_1\\mathcal{F}(f_1) + c_2 \\mathcal{F}(f_2)\\)\u003c/li\u003e\n\u003cli\u003eit\u0026rsquo;s recenter able: \\(\\mathcal{F}(f(x+c)) = e^{i c \\lambda}\\mathcal{F}(f)(\\lambda)\\)\u003c/li\u003e\n\u003cli\u003eit\u0026rsquo;s reverse-shift-able: \\(\\mathcal{F}\\qty(e^{i \\lambda_{0} x} f(x)) = \\mathcal{F}(f) (\\lambda -\\lambda_{0})\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ebecause integrals are linear\u003c/li\u003e\n\u003cli\u003e\\(\\int_{-\\infty}^{\\lambda} e^{-i(t-c)\\lambda}f(t) \\dd{t} = e^{ic\\lambda} \\mathcal{F}(f)(\\lambda)\\), where we define \\(t = x+c\\)\u003c/li\u003e\n\u003cli\u003etry it\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"derivative-of-fourier-transform\"\u003eDerivative of Fourier Transform\u003c/h3\u003e\n\u003cp\u003eSuppose we want:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}(f\u0026rsquo;(x)) = \\int_{\\infty}^{\\infty} e^{-ix\\lambda} f\u0026rsquo;(x) \\dd{x} = \\left e^{-ix\\lambda} f(x)\\right|_{-\\infty}^{\\infty} + i \\lambda \\int_{-\\infty}^{\\infty} e^{-ix\\lambda} f(x) \\dd{x} = i \\lambda \\mathcal{F}(f) (\\lambda)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBecause we are guaranteed \\(f(x)\\) evaluated at infinity is \\(0\\), the first term drops out. The important conclusion here: \u003cstrong\u003e*Fourier transforms change a derivative into ALGEBRA of multiplying by \\(i\\lambda\\)\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eConsider also:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}(x f(x)) = i \\dv \\lambda \\mathcal{F}(f)(\\lambda)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou can show this in a similar way, by attempting to distribute a \\(\\dv \\lambda\\) into the Fourier transform and showing that they are equal.\u003c/p\u003e\n\u003ch3 id=\"fourier-transform-of-a-gaussian\"\u003eFourier Transform of a Gaussian\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}\\qty(e^{-\\frac{ax^{2}}{2}}) = \\sqrt{\\frac{2\\pi}{a}}e^{-\\frac{\\lambda^{2}}{2a}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}^{-1}\\qty(e^{-a\\frac{\\lambda^{2}}{2}}) = \\frac{e^{-\\frac{x^{2}}{2a}}}{\\sqrt{2\\pi a}}\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003ewe obtain this:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu = e^{-\\frac{x^{2}}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{u}{x} = -xe^{-\\frac{x^{2}}{2}} = -xu\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand if we took a Fourier transform on both sides, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}\\qty(\\dv{u}{x} + xu) = 0 = i \\lambda \\hat{u} + i \\pdv{\\hat{u}} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand note that this is the same equation. Meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}\\qty(\\dv{u}{x} + xu) = \\dv{\\lambda}{x} + \\lambda u\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}(u) = Cu\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is what we see.\u003c/p\u003e\n\u003ch3 id=\"look-a-table\"\u003eLook! A table\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-06_21-36-14_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Lambda_{a}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis the triangle between \\([-a, a]\\), that goes up to \\(1\\).\u003c/p\u003e\n\u003ch3 id=\"interpreting-lambda\"\u003einterpreting \\(\\lambda\\)\u003c/h3\u003e\n\u003ch4 id=\"if-f--x--is-a-function-in-time\"\u003eif \\(f(x)\\) is a function in time\u003c/h4\u003e\n\u003cp\u003e\\(\\lambda\\) could be thought of analogous to frequency\u003c/p\u003e\n\u003ch4 id=\"if-f--x--is-a-function-of-space\"\u003eif \\(f(x)\\) is a function of space\u003c/h4\u003e\n\u003cp\u003e\\(\\lambda\\) could be thought of analogous to momentum\u003c/p\u003e\n\u003ch3 id=\"fourier-transform-of-step-function\"\u003eFourier transform of step function\u003c/h3\u003e\n\u003cp\u003eif you have a function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) =\n\\begin{cases}\n1, |x| \u0026lt; a \\\\\n0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eits Fourier transform is \u003ca href=\"\"\u003esinc function\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{f}(\\lambda) = \\frac{i \\sin (a \\lambda)}{\\lambda}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"intuitive-understandings-the-formula\"\u003eintuitive understandings the formula\u003c/h3\u003e\n\u003ch4 id=\"sines-stuck-between\"\u003esines stuck between\u003c/h4\u003e\n\u003cp\u003eConsider what:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) \\cos (x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003elooks like.\u003c/p\u003e\n\u003cp\u003eEffectively, you are stenching the \\(\\cos(x)\\) between \\(f(x)\\) and its reflection across \\(x\\). As you integrate, the majority of the up and downs cancel out, and the only thing you are left is the bits where \\(f(x)\\) peak up!\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-06_10-48-46_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eas you increase \\(k\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) \\cos (kx)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou obtain more cancellations and it will eventually integrate to \\(0\\).\u003c/p\u003e\n\u003ch4 id=\"fourier-transform-properties\"\u003eFourier transform properties\u003c/h4\u003e\n\u003cp\u003eAs a function gets smoother, its Fourier transform is more concentrated at one point (closer to a single frequency).\u003c/p\u003e\n\u003cp\u003eConversely, as a function gets more jagged, its Fourier transform is smoother (closer to a composition of sinusoids).\u003c/p\u003e\n\u003ch3 id=\"fourier-transform-as-quantization\"\u003eFourier Transform as Quantization\u003c/h3\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003ethe big fun idea\u0026mdash;-we can transform:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(L\\) periodic function on \\(f(x)\\)\u003c/li\u003e\n\u003cli\u003e\\(\\left\\{ (c_{n})^{\\infty}_{-\\infty} \\right\\}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThis series exists for all function, converges exceedingly quickly, and has great properties. It should look like the form:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\sum_{-\\infty}^{\\infty} c_{n} e^{n \\omega x i}\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"fourier-norm-of-a-function\"\u003eFourier norm of a function\u003c/h4\u003e\n\u003cp\u003eAfter you do this, and obtain\u003c/p\u003e\n\u003cp\u003e\\(\\left\\{ (c_{n})^{\\infty}_{-\\infty} \\right\\}\\)\u003c/p\u003e\n\u003cp\u003ewe call the \u0026ldquo;size\u0026rdquo; of this function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{-\\infty}^{\\infty} | c_{n}|^{2}\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"planchrel-s-formula\"\u003ePlanchrel\u0026rsquo;s Formula\u003c/h4\u003e\n\u003cp\u003eFor a usual \\(L\\) periodic function, size agrees:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle f,f \\rangle= \\int_{0}^{L} |f(x)|^{2} \\dd{x} = L\\sum_{-\\infty}^{\\infty} | c_{n}|^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou can show this by plugging in the \u003ca href=\"/posts/kbhsu_math53_mar042024/#complex-fourier-series\"\u003eComplex Fourier Series\u003c/a\u003e in \\(f\\).\u003c/p\u003e\n\u003ch3 id=\"motivation\"\u003emotivation\u003c/h3\u003e\n\u003cp\u003eConsider a function period of period \\(L\\)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{k} = \\int_{0}^{L} F(x) e^{-i\\omega kx} \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = ?\\sum_{k} a_{n} e^{i \\omega kx}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd the BIG PICTURE: if we took the period \\(L \\to \\infty\\), we end up with the \u003ca href=\"/posts/kbhfourier_transform/\"\u003eFourier Transform\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfourier_transform/","tags":null,"title":"Fourier Transform"},{"categories":null,"contents":"FDR is an American president.\nFDR and Teddy Roosevelt is Got Polio, which played in his favor =\u0026gt; press agree to not photograph him when he was in a wheelchair Created the New Deal Models himself after his cousin Teddy Roosevelt, and believed that charisma and moral leadership work. \u0026ldquo;Above all, try something\u0026hellip; let the court shoot it if need to.\u0026rdquo;\nHe was able to gain single party control, wh.\nCreated Fireside Chats.\nHis wife, Eleanor Roosevelt, was very controversial.\nlegacy of FDR Never spent enough to end the depression Expanded government regulation, government size, and social welfare Modernization of presidency: sets agenda, initiates legislation Realigned the democratic party (created the progressive democrats) Maintained democracy \u0026lt;=== compared to Authoritarianism ","html":"\u003cp\u003eFDR is an American president.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eFDR and \u003ca href=\"/posts/kbhteddy_roosevelt/\"\u003eTeddy Roosevelt\u003c/a\u003e is\u003c/li\u003e\n\u003cli\u003eGot \u003ca href=\"/posts/kbhpolio/\"\u003ePolio\u003c/a\u003e, which played in his favor =\u0026gt; press agree to not photograph him when he was in a wheelchair\u003c/li\u003e\n\u003cli\u003eCreated the \u003ca href=\"/posts/kbhnew_deal/\"\u003eNew Deal\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eModels himself after his cousin \u003ca href=\"/posts/kbhteddy_roosevelt/\"\u003eTeddy Roosevelt\u003c/a\u003e, and believed that charisma and moral leadership work. \u0026ldquo;Above all, try something\u0026hellip; let the court shoot it if need to.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eHe was able to gain \u003ca href=\"/posts/kbhsingle_party_control/\"\u003esingle party control\u003c/a\u003e, wh.\u003c/p\u003e\n\u003cp\u003eCreated \u003ca href=\"/posts/kbhfireside_chats/\"\u003eFireside Chats.\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eHis wife, \u003ca href=\"/posts/kbheleanor_roosevelt/\"\u003eEleanor Roosevelt\u003c/a\u003e, was very controversial.\u003c/p\u003e\n\u003ch2 id=\"legacy-of-fdr\"\u003elegacy of FDR\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eNever spent enough to end the depression\u003c/li\u003e\n\u003cli\u003eExpanded government regulation, government size, and social welfare\u003c/li\u003e\n\u003cli\u003eModernization of presidency: sets agenda, initiates legislation\u003c/li\u003e\n\u003cli\u003eRealigned the democratic party (created the progressive democrats)\u003c/li\u003e\n\u003cli\u003eMaintained democracy \u0026lt;=== compared to \u003ca href=\"/posts/kbhauthoritarianism/\"\u003eAuthoritarianism\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfdr/","tags":null,"title":"Franklin D. Roosevelt (FDR)"},{"categories":null,"contents":"Saltwater economists are economists from coastal schools that are mostly classical Keynsians\nFreshwater economists are economists who are mostly Neoclassical Economists\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhfreshwater_economists/\"\u003eSaltwater economists\u003c/a\u003e are economists from coastal schools that are mostly classical \u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynsians\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhfreshwater_economists/\"\u003eFreshwater economists\u003c/a\u003e are economists who are mostly \u003ca href=\"/posts/kbhneoclassical_economics/\"\u003eNeoclassical Economists\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfreshwater_economists/","tags":null,"title":"Freshwater economists"},{"categories":null,"contents":"function pointers typedef bool (*should_swap) (int, int); all function\u0026rsquo;s names are pointers to first address of the function\u0026rsquo;s machine code in memory.\nWhen writing a generic function, if we don\u0026rsquo;t know about the behavior of something (such as comparison), etc., we have to rely on the client to specify the information in terms of a function.\nfunction writer: writes algorithmic function, relies on caller data function caller: knows data, and doesn\u0026rsquo;t know how algorithm knows ","html":"\u003ch2 id=\"function-pointers\"\u003efunction pointers\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-c\" data-lang=\"c\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003etypedef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ebool\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshould_swap\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eall \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e\u0026rsquo;s names are \u003cstrong\u003epointers\u003c/strong\u003e to first address of the function\u0026rsquo;s machine code in memory.\u003c/p\u003e\n\u003cp\u003eWhen writing a \u003ca href=\"/posts/kbhgeneric/\"\u003egeneric\u003c/a\u003e function, if we don\u0026rsquo;t know about the behavior of something (such as comparison), etc., we have to rely on the client to specify the information in terms of a function.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003efunction writer\u003c/strong\u003e: writes algorithmic function, relies on caller data\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003efunction caller\u003c/strong\u003e: knows data, and doesn\u0026rsquo;t know how algorithm knows\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfunction/","tags":null,"title":"function"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhfunctor/","tags":null,"title":"functor"},{"categories":null,"contents":"If your style is not math driven, fundamental investing are the strategies you can use.\nBTW: the game of investing in the stock market has gotten harder because we have too much data with the historical data. (Efficient) markets tend to eliminate opportunities to make a profit.\nLooking at 300 banks would be a good idea.\nValue Investing Value Investing is a fundamental investing strategy to value each company by going through the accounting/books of the company and buying companies that are theoretically undervalued.\nDistressed Investing Distressed Investing is the extreme version of Value Investing. Buy something that has a bad asset, a bad structure, and a bad holders: buy things when \u0026ldquo;what do they care what price they sell at\u0026rdquo;, which means you may be able to buy it at less than its worth.\nGrowth Investing Growth Investing is a fundamental investing strategy to bet in the future growth of a company given its performance and technology: Tesla, Teledoc, etc.\nQuality Investing Quality Investing is a fundamental investing strategy to buy stocks even despite high prices that has the most dependable market share: Coke, P\u0026amp;G, etc.\nSecond-Level Thinking A meta-level way of looking at decisions: \u0026ldquo;how many people know what I know.\u0026rdquo; Your strategy has to be both 1) DIFFERENT and 2) BETTER than what other people are doing.\n\u0026ldquo;The correctness of a decision cannot be judged by the outcome.\u0026rdquo;\n","html":"\u003cp\u003eIf your style is not math driven, \u003ca href=\"/posts/kbhfundimental_investing/\"\u003efundamental investing\u003c/a\u003e are the strategies you can use.\u003c/p\u003e\n\u003cp\u003eBTW: the game of investing in the \u003ca href=\"/posts/kbhfinancial_markets_intro/\"\u003estock market\u003c/a\u003e has gotten harder because we have too much data with the historical data. (Efficient) markets tend to eliminate opportunities to make a profit.\u003c/p\u003e\n\u003cp\u003eLooking at 300 banks would be a good idea.\u003c/p\u003e\n\u003ch2 id=\"value-investing\"\u003eValue Investing\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#value-investing\"\u003eValue Investing\u003c/a\u003e is a \u003ca href=\"/posts/kbhfundimental_investing/\"\u003efundamental investing\u003c/a\u003e strategy to value each company by going through the accounting/books of the company and buying companies that are theoretically undervalued.\u003c/p\u003e\n\u003ch3 id=\"distressed-investing\"\u003eDistressed Investing\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#distressed-investing\"\u003eDistressed Investing\u003c/a\u003e is the extreme version of \u003ca href=\"#value-investing\"\u003eValue Investing\u003c/a\u003e. Buy something that has a bad asset, a bad structure, and a bad holders: buy things when \u0026ldquo;what do they care what price they sell at\u0026rdquo;, which means you may be able to buy it at less than its worth.\u003c/p\u003e\n\u003ch2 id=\"growth-investing\"\u003eGrowth Investing\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#growth-investing\"\u003eGrowth Investing\u003c/a\u003e is a \u003ca href=\"/posts/kbhfundimental_investing/\"\u003efundamental investing\u003c/a\u003e strategy to bet in the future growth of a company given its performance and technology: Tesla, Teledoc, etc.\u003c/p\u003e\n\u003ch2 id=\"quality-investing\"\u003eQuality Investing\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#quality-investing\"\u003eQuality Investing\u003c/a\u003e is a \u003ca href=\"/posts/kbhfundimental_investing/\"\u003efundamental investing\u003c/a\u003e strategy to buy stocks even despite high prices that has the most dependable market share: Coke, P\u0026amp;G, etc.\u003c/p\u003e\n\u003ch2 id=\"second-level-thinking\"\u003eSecond-Level Thinking\u003c/h2\u003e\n\u003cp\u003eA meta-level way of looking at decisions: \u0026ldquo;how many people know what I know.\u0026rdquo; Your strategy has to be both 1) DIFFERENT and 2) BETTER than what other people are doing.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;The correctness of a decision cannot be judged by the outcome.\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfundimental_investing/","tags":null,"title":"fundamental investing"},{"categories":null,"contents":"factorization motivator If \\(p\\) is prime and \\(p | ab\\), then \\(p|a\\) or \\(p|b\\).\nIf \\(p|a\\), we are done.\nConsider the case where \\(p|ab\\) yet \\(a\\) is not divisible by \\(p\\). Then, \\(a\\) and \\(p\\) are coprime. This means that, we have:\n\\begin{equation} \\gcd (a,p) = 1 = s a + tp \\end{equation}\nWe note that:\n\\begin{align} b \u0026amp;= 1 \\cdot b \\\\ \u0026amp;= (sa+tp) b \\\\ \u0026amp;= sab + tpb \\\\ \u0026amp;= s(ab) + tb(p) \\end{align}\nNotice that both of these elements are divisible by \\(p\\) (\\(p|ab\\) and of course \\(p|p\\)). Therefore, \\(p|b\\) as desired.\nstatement of the theorem Every integer greater than \\(1\\) is a prime or a product of primes. This factorization is unique.\nProof Existence Let \\(S\\) be the list of integers bigger than \\(1\\) which are prime or are products of primes. Consider the set \\(T\\) which is all integers bigger than \\(1\\) which isn\u0026rsquo;t prime or are products of primes:\n\\begin{equation} T = \\{2, 3, \\dots, \\} \\setminus S \\end{equation}\nWe desire \\(T\\) to be empty.\nAssume for the sake of contradiction that \\(T\\) isn\u0026rsquo;t empty. By WOP, take some smallest element of \\(t \\in T\\).\n\\(t\\) is not in \\(S\\), so it mustn\u0026rsquo;t be prime. This means:\n\\begin{equation} t = ab \\end{equation}\nThough\u0026hellip;. \\(a\\) and \\(b\\) must be smaller than \\(t\\) (otherwise their product wouldn\u0026rsquo;t make \\(t\\), as we are working with only positive numbers (integers greater than \\(1\\)) here). So \\(a\\) and \\(b\\) must be in $S$\u0026mdash;meaning they are primes or product of primes. This makes \\(t\\) a prime or product of primes, reaching contradiction.\nUniqueness We show this by induction. We see that: \\(2 = 2\\). Now, suppose a unique prime factorization holds for all integers smaller than \\(n\\). Let:\n\\begin{equation} n = p_1 \\dots p_{r} = q_1 \\dots q_{s} \\end{equation}\nLet us order it such that \\(p_1 \\leq \u0026hellip; \\leq p_{r}\\), \\(q_1 \\leq \u0026hellip; \\leq q_{s}\\).\nBy the factorization motivator, each \\(p_{j}|n\\) implies that \\(p_{j}|q_{i}\\) (you can see this by treating \\(n = q_1 \u0026hellip; q_{s}\\), so \\(p_{j}|n \\implies p_{j}|(q_1 \\cdot \\dots \\cdot q_{s})\\) so \\(p_{j}\\) should be divisible by some \\(q_{j}\\).)\nNow, this condition implies \\(p_{j} = q_{i}\\), because primes are not divisible by anything except themselves and \\(1\\) (and \\(1\\) is not considered prime).\nConsider, then, two such equivalences:\n\\begin{equation} p_{1} = q_{j} \\end{equation}\n\\begin{equation} q_{1} = p_{k} \\end{equation}\nNow, this means that:\n\\begin{equation} p_{1} \\leq p_{k} = q_{1} \\leq q_{j} = p_{1} \\end{equation}\nTherefore, the only way this can work (the fact that \\(q_1\\) is sandwiched on both ends \u0026mdash; by \\(p_1\\leq q_1 \\leq p_1\\)) is that \\(p_1 = q_1\\).\nTherefore, we now have:\n\\begin{equation} \\frac{n}{p_1} = p_{2} \\cdot \\dots \\cdot p_{n} = q_{2} \\cdot \\dots \\cdot q_{n} \\end{equation}\nYou will note \\(\\frac{n}{p_1} \u0026lt; n\\). Now, we can invoke induction. \\(\\blacksquare\\)\n","html":"\u003ch2 id=\"factorization-motivator\"\u003efactorization motivator\u003c/h2\u003e\n\u003cp\u003eIf \\(p\\) is \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e and \\(p | ab\\), then \\(p|a\\) or \\(p|b\\).\u003c/p\u003e\n\u003cp\u003eIf \\(p|a\\), we are done.\u003c/p\u003e\n\u003cp\u003eConsider the case where \\(p|ab\\) yet \\(a\\) is not divisible by \\(p\\). Then, \\(a\\) and \\(p\\) are \u003ca href=\"/posts/kbhprime/#coprime\"\u003ecoprime\u003c/a\u003e. This means that, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\gcd (a,p) = 1 = s a + tp\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe note that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nb \u0026amp;= 1 \\cdot b \\\\\n\u0026amp;= (sa+tp) b \\\\\n\u0026amp;= sab + tpb \\\\\n\u0026amp;= s(ab) + tb(p)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNotice that both of these elements are \u003ca href=\"/posts/kbhdivide/\"\u003edivisible\u003c/a\u003e by \\(p\\) (\\(p|ab\\) and of course \\(p|p\\)). Therefore, \\(p|b\\) as desired.\u003c/p\u003e\n\u003ch2 id=\"statement-of-the-theorem\"\u003estatement of the theorem\u003c/h2\u003e\n\u003cp\u003eEvery integer greater than \\(1\\) is a \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e or a product of primes. This factorization is unique.\u003c/p\u003e\n\u003ch2 id=\"proof\"\u003eProof\u003c/h2\u003e\n\u003ch3 id=\"existence\"\u003eExistence\u003c/h3\u003e\n\u003cp\u003eLet \\(S\\) be the list of \u003ca href=\"/posts/kbhinteger/\"\u003einteger\u003c/a\u003es bigger than \\(1\\) which are \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e or are products of primes. Consider the set \\(T\\) which is all \u003ca href=\"/posts/kbhinteger/\"\u003einteger\u003c/a\u003es bigger than \\(1\\) which isn\u0026rsquo;t \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e or are products of primes:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT = \\{2, 3, \\dots, \\} \\setminus S\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe desire \\(T\\) to be empty.\u003c/p\u003e\n\u003cp\u003eAssume for the sake of contradiction that \\(T\\) isn\u0026rsquo;t empty. By \u003ca href=\"/posts/kbhprinciple_of_induction/#well-ordering-principle\"\u003eWOP\u003c/a\u003e, take some smallest element of \\(t \\in T\\).\u003c/p\u003e\n\u003cp\u003e\\(t\\) is not in \\(S\\), so it mustn\u0026rsquo;t be \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e. This means:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nt = ab\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThough\u0026hellip;. \\(a\\) and \\(b\\) must be smaller than \\(t\\) (otherwise their product wouldn\u0026rsquo;t make \\(t\\), as we are working with only positive numbers (\u003ca href=\"/posts/kbhinteger/\"\u003einteger\u003c/a\u003es greater than \\(1\\)) here). So \\(a\\) and \\(b\\) must be in $S$\u0026mdash;meaning they are \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003es or product of primes. This makes \\(t\\) a prime or product of primes, reaching contradiction.\u003c/p\u003e\n\u003ch3 id=\"uniqueness\"\u003eUniqueness\u003c/h3\u003e\n\u003cp\u003eWe show this by induction. We see that: \\(2 = 2\\). Now, suppose a unique prime factorization holds for all integers smaller than \\(n\\). Let:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nn = p_1 \\dots p_{r} = q_1 \\dots q_{s}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet us order it such that \\(p_1 \\leq \u0026hellip; \\leq p_{r}\\), \\(q_1 \\leq \u0026hellip; \\leq q_{s}\\).\u003c/p\u003e\n\u003cp\u003eBy the \u003ca href=\"#factorization-motivator\"\u003efactorization motivator\u003c/a\u003e, each \\(p_{j}|n\\) implies that \\(p_{j}|q_{i}\\) (you can see this by treating \\(n = q_1 \u0026hellip; q_{s}\\), so \\(p_{j}|n \\implies p_{j}|(q_1 \\cdot \\dots \\cdot q_{s})\\) so \\(p_{j}\\) should be divisible by some \\(q_{j}\\).)\u003c/p\u003e\n\u003cp\u003eNow, this condition implies \\(p_{j} = q_{i}\\), because primes are not divisible by anything except themselves and \\(1\\) (and \\(1\\) is not considered \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eConsider, then, two such equivalences:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np_{1} = q_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nq_{1} = p_{k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, this means that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np_{1} \\leq p_{k} = q_{1} \\leq q_{j} = p_{1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, the only way this can work (the fact that \\(q_1\\) is sandwiched on both ends \u0026mdash; by \\(p_1\\leq q_1 \\leq p_1\\)) is that \\(p_1 = q_1\\).\u003c/p\u003e\n\u003cp\u003eTherefore, we now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{n}{p_1} = p_{2} \\cdot \\dots \\cdot p_{n} = q_{2} \\cdot \\dots \\cdot q_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will note \\(\\frac{n}{p_1} \u0026lt; n\\). Now, we can invoke induction. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfundamental_theorem_of_arithmetic/","tags":null,"title":"fundamental theorem of arithmetic"},{"categories":null,"contents":"Lovely, well known result:\n\\begin{equation} \\dv x \\int_{a}^{x} f(t)\\dd{t} = f(x) \\end{equation}\nfor any fixed \\(a\\). This is because that\u0026rsquo;s functionally using \\(a\\) as a \\(+C\\) term.\n","html":"\u003cp\u003eLovely, well known result:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv x \\int_{a}^{x} f(t)\\dd{t} = f(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor any fixed \\(a\\). This is because that\u0026rsquo;s functionally using \\(a\\) as a \\(+C\\) term.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfundamental_theorem_of_calculus/","tags":null,"title":"Fundamental Theorem of Calculus"},{"categories":null,"contents":"The dimension of the null space plus the dimension of the range of a Linear Map equals the dimension of its domain.\nThis also implies that both the null space (but this one\u0026rsquo;s trivial b/c the null space is a subspace of the already finite-dimensional domain) and the range as well is finite-dimensional.\nconstituents \\(T \\in \\mathcal{L}( V,W )\\) finite-dimensional \\(V\\) (otherwise commenting on computing its dimension doesn\u0026rsquo;t make sense) requirements \\begin{equation} \\dim V = \\dim null\\ T + \\dim range\\ T \\end{equation}\nfor \\(T \\in \\mathcal{L}(V,W)\\)\nproof We desire that \\(\\dim V = \\dim null\\ T + \\dim range\\ T\\) for \\(T \\in \\mathcal{L}(V,W)\\).\nLet us construct a basis of the null space of \\(T\\), \\(u_1, \\dots u_{m}\\). This makes \\(\\dim null\\ T = m\\).\nWe can extend this list to a basis of \\(V\\), the domain, with some vectors \\(v_1, \\dots v_{n}\\). This makes the \\(\\dim V = m+n\\).\nWe now desire that \\(\\dim range\\ T = n\\). We show this by showing \\(Tv_{1}, \\dots Tv_{n}\\) is a basis of \\(range\\ T\\).\nRecall that \\(u_1, \\dots u_{m}, v_1, \\dots v_{n}\\) is a basis of \\(V\\) the domain of \\(T\\). This means that any element that can go into \\(T\\) takes the shape of:\n\\begin{equation} v = a_1u_1+ \\dots +a_{m}u_{m} + b_{1}v_1 + \\dots + b_{n}v_{n} \\end{equation}\nRecall also that the definition of the range of \\(T\\) is that:\n\\begin{equation} range\\ T = \\{Tv: v \\in V\\} \\end{equation}\nTherefore, every element of the range of \\(T\\) takes the shape of \\(Tv\\): meaning:\n\\begin{equation} Tv = a_1Tu_1+ \\dots +a_{m}Tu_{m} + b_{1}Tv_1 + \\dots + b_{n}Tv_{n} \\end{equation}\nby additivity and homogeneity of Linear Maps.\nNow, \\(Tu_{j}=0\\), because each \\(u_{j}\\) is a basis (and so definitely at least an element of) the null space of \\(T\\). This makes the above expression:\n\\begin{equation} Tv = 0 + b_{1}Tv_1 + \\dots + b_{n}Tv_{n} = b_{1}Tv_1 + \\dots + b_{n}Tv_{n} \\end{equation}\nOk. Given that all elements of the range can be constructed by a linear combination of \\(Tv_{1} \\dots Tv_{n}\\), we declare that the list spans the range of \\(T\\). Notably, as \\(V\\) is finite-dimensional and \\(v_1, \\dots v_{n}\\) is a sublist of its basis, \\(n \u0026lt; \\infty\\) and so the range of \\(T\\) is also finite-dimensional.\nTo finish showing \\(Tv_{1}, \\dots, Tv_{n}\\) to be a basis of \\(range\\ T\\), we have to show that its linearly independent.\nSuppose:\n\\begin{equation} c_1Tv_{1} + \\dots + c_{n}Tv_{n} = 0 \\end{equation}\nBy homogeneity and additivity, we have that:\n\\begin{equation} T(c_1v_{1} + \\dots + c_{n}v_{n}) = 0 \\end{equation}\nthis makes \\(c_1v_1 + \\dots\\) a member of the null space of \\(T\\). Recall that \\(u_1, \\dots u_{m}\\) were a basis thereof, this means that the linear combination of \\(v_{j}\\) can be written as a linear combination of \\(u_{j}\\):\n\\begin{equation} c_1 v_1 + \\dots + c_{n}v_{n} = d_1 u_{1} + \\dots + d_{m} u_{m} \\end{equation}\nOf course, the list \\(u_1, \\dots u_{m}, v_1, \\dots v_{n}\\) is linearly independent as it is a basis of \\(V\\). This makes \\(c_{j}=d_{j}=0\\) (to see this, move all the \\(d_{j}u_{j}\\) to the left and apply definition of linear independence).\nWe have therefore shown that, given\n\\begin{equation} c_1Tv_{1} + \\dots + c_{n}Tv_{n} = 0 \\end{equation}\n\\(c_1 = \\dots = c_{n} =0\\), satisfying the definition of linear independence of the list of \\(Tv_{j}\\).\nHaving shown that \\(Tv_{j}\\) to be a linearly independent spanning list of \\(range\\ T\\), we can conclude that it is indeed a basis of \\(range\\ T\\).\nThis makes the \\(\\dim range\\ T = n\\), as desired. \\(\\blacksquare\\)\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e plus the \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e of a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e equals the \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of its domain.\u003c/p\u003e\n\u003cp\u003eThis also implies that both the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e (but this one\u0026rsquo;s trivial b/c the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of the already \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e domain) and the \u003cstrong\u003e\u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e\u003c/strong\u003e as well is \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(T \\in \\mathcal{L}( V,W )\\)\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e \\(V\\) (otherwise \u003ca href=\"/posts/kbhdocumentation_and_specification/#commenting\"\u003ecommenting\u003c/a\u003e on computing its \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e doesn\u0026rsquo;t make sense)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\dim V = \\dim null\\ T + \\dim range\\ T\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor \\(T \\in \\mathcal{L}(V,W)\\)\u003c/p\u003e\n\u003ch2 id=\"proof\"\u003eproof\u003c/h2\u003e\n\u003cp\u003eWe desire that \\(\\dim V = \\dim null\\ T + \\dim range\\ T\\) for \\(T \\in \\mathcal{L}(V,W)\\).\u003c/p\u003e\n\u003cp\u003eLet us construct a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(T\\), \\(u_1, \\dots u_{m}\\). This makes \\(\\dim null\\ T = m\\).\u003c/p\u003e\n\u003cp\u003eWe can extend this list to a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\), the domain, with some vectors \\(v_1, \\dots v_{n}\\). This makes the \\(\\dim V = m+n\\).\u003c/p\u003e\n\u003cp\u003eWe now desire that \\(\\dim range\\ T = n\\). We show this by showing \\(Tv_{1}, \\dots Tv_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(range\\ T\\).\u003c/p\u003e\n\u003cp\u003eRecall that \\(u_1, \\dots u_{m}, v_1, \\dots v_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\) the domain of \\(T\\). This means that any element that \u003cem\u003ecan\u003c/em\u003e go into \\(T\\) takes the shape of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = a_1u_1+ \\dots +a_{m}u_{m} + b_{1}v_1 + \\dots + b_{n}v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall also that the definition of the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e of \\(T\\) is that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nrange\\ T = \\{Tv: v \\in V\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, every element of the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e of \\(T\\) takes the shape of \\(Tv\\): meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv = a_1Tu_1+ \\dots +a_{m}Tu_{m} + b_{1}Tv_1 + \\dots + b_{n}Tv_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eby additivity and homogeneity of \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eNow, \\(Tu_{j}=0\\), because each \\(u_{j}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e (and so definitely at least an element of) the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(T\\). This makes the above expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv = 0 + b_{1}Tv_1 + \\dots + b_{n}Tv_{n} = b_{1}Tv_1 + \\dots + b_{n}Tv_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOk. Given that all elements of the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e can be constructed by a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \\(Tv_{1} \\dots Tv_{n}\\), we declare that the list \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003es the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e of \\(T\\). Notably, as \\(V\\) is \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e and \\(v_1, \\dots v_{n}\\) is a sublist of its \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, \\(n \u0026lt; \\infty\\) and so the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e of \\(T\\) is also \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eTo finish showing \\(Tv_{1}, \\dots, Tv_{n}\\) to be a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(range\\ T\\), we have to show that its \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSuppose:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_1Tv_{1} + \\dots + c_{n}Tv_{n} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy \u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e and additivity, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(c_1v_{1} + \\dots + c_{n}v_{n}) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis makes \\(c_1v_1 + \\dots\\) a member of the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(T\\). Recall that \\(u_1, \\dots u_{m}\\) were a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e thereof, this means that the \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \\(v_{j}\\) can be written as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \\(u_{j}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_1 v_1 + \\dots + c_{n}v_{n} = d_1 u_{1} + \\dots + d_{m} u_{m}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOf course, the list \\(u_1, \\dots u_{m}, v_1, \\dots v_{n}\\) is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e as it is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\). This makes \\(c_{j}=d_{j}=0\\) (to see this, move all the \\(d_{j}u_{j}\\) to the left and apply definition of \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eWe have therefore shown that, given\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_1Tv_{1} + \\dots + c_{n}Tv_{n} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(c_1 = \\dots = c_{n} =0\\), satisfying the definition of \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e of the list of \\(Tv_{j}\\).\u003c/p\u003e\n\u003cp\u003eHaving shown that \\(Tv_{j}\\) to be a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list of \\(range\\ T\\), we can conclude that it is indeed a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(range\\ T\\).\u003c/p\u003e\n\u003cp\u003eThis makes the \\(\\dim range\\ T = n\\), as desired. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfundamental_theorem_of_linear_maps/","tags":null,"title":"fundamental theorem of linear maps"},{"categories":null,"contents":"fusion in machine learning is the process of adding features or encoding.\nlate fusion late fusion adds features together to a model in a multi-modal approach by first embedding the features separately\nearly fusion early fusion adds features together to a model in a multi-modal approach by concatenating the features first then embedding\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhfusion/\"\u003efusion\u003c/a\u003e in machine learning is the process of adding features or encoding.\u003c/p\u003e\n\u003ch2 id=\"late-fusion\"\u003elate fusion\u003c/h2\u003e\n\u003cp\u003elate fusion adds features together to a model in a multi-modal approach by first embedding the features separately\u003c/p\u003e\n\u003ch2 id=\"early-fusion\"\u003eearly fusion\u003c/h2\u003e\n\u003cp\u003eearly fusion adds features together to a model in a multi-modal approach by concatenating the features first then embedding\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfusion/","tags":null,"title":"fusion (machine learning)"},{"categories":null,"contents":"Main problem: joint actions and observations are exponential by the number of agents.\nSolution: Smaple-based online planning for multiagent systems. We do this with the factored-value POMCP.\nfactored statistics: reduces the number of joint actions (through action selection statistics) factored trees: reduces the number of histories Multiagent Definition \\(I\\) set of agents \\(S\\) set of states \\(A_{i}\\) set of states for each agent \\(i\\) \\(T\\) state transitions \\(R\\) reward function \\(Z_{i}\\) joint observations for each agents \\(O\\) set of observations Coordination Graphs you can use sum-product elimination to shorten the Baysian Network of the agent Coordination Graphs (which is how agents influnece each other).\nMixture of Experts Directly search for the best joint actions; computed by MLE of the total value.\n","html":"\u003cp\u003eMain problem: joint actions and observations are exponential by the number of agents.\u003c/p\u003e\n\u003cp\u003eSolution: \u003cstrong\u003eSmaple-based online planning\u003c/strong\u003e for multiagent systems. We do this with the factored-value \u003ca href=\"/posts/kbhpomcp/\"\u003ePOMCP\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003efactored statistics\u003c/strong\u003e: reduces the number of joint actions (through action selection statistics)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003efactored trees\u003c/strong\u003e: reduces the number of histories\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"multiagent-definition\"\u003eMultiagent Definition\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(I\\) set of agents\u003c/li\u003e\n\u003cli\u003e\\(S\\) set of states\u003c/li\u003e\n\u003cli\u003e\\(A_{i}\\) set of states for each agent \\(i\\)\u003c/li\u003e\n\u003cli\u003e\\(T\\) state transitions\u003c/li\u003e\n\u003cli\u003e\\(R\\) reward function\u003c/li\u003e\n\u003cli\u003e\\(Z_{i}\\) joint observations for each agents\u003c/li\u003e\n\u003cli\u003e\\(O\\) set of observations\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"coordination-graphs\"\u003eCoordination Graphs\u003c/h2\u003e\n\u003cp\u003eyou can use \u003ca href=\"/posts/kbhinference/#sum-product-elimination\"\u003esum-product elimination\u003c/a\u003e to shorten the \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e of the agent \u003ca href=\"#coordination-graphs\"\u003eCoordination Graphs\u003c/a\u003e (which is how agents influnece each other).\u003c/p\u003e\n\u003ch2 id=\"mixture-of-experts\"\u003eMixture of Experts\u003c/h2\u003e\n\u003cp\u003eDirectly search for the best joint actions; computed by \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e of the total value.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfv_pomcps/","tags":null,"title":"FV-POMCPs"},{"categories":null,"contents":"Motivation Its the same. It hasn\u0026rsquo;t changed: curses of dimensionality and history.\nGoal: to solve decentralized multi-agent MDPs.\nKey Insights macro-actions (MAs) to reduce computational complexity (like hierarchical planning) uses cross entropy to make infinite horizon problem tractable Prior Approaches masked Monte Carlo search: heuristic based, no optimality garantees MCTS: poor performance Direct Cross Entropy see also Cross Entropy Method\nsample a value function \\(k\\) takes \\(n\\) highest sampled values update parameter \\(\\theta\\) resample until distribution convergence take the best sample \\(x\\) G-DICE create a graph with exogenous \\(N\\) nodes, and \\(O\\) outgoing edges (designed before) use Direct Cross Entropy to solve for the best policy Results demonstrates improved performance over MMCS and MCTS does not need robot communication garantees convergence for both finite and infiinte horizon can choose exogenous number of nodes in order to gain computational savings ","html":"\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003eIts the same. It hasn\u0026rsquo;t changed: curses of dimensionality and history.\u003c/p\u003e\n\u003cp\u003eGoal: to solve decentralized multi-agent MDPs.\u003c/p\u003e\n\u003ch2 id=\"key-insights\"\u003eKey Insights\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003emacro-actions (MAs) to reduce computational complexity (like hierarchical planning)\u003c/li\u003e\n\u003cli\u003euses cross entropy to make infinite horizon problem tractable\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"prior-approaches\"\u003ePrior Approaches\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003emasked Monte Carlo search\u003c/strong\u003e: heuristic based, no optimality garantees\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003eMCTS\u003c/a\u003e: poor performance\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"direct-cross-entropy\"\u003eDirect Cross Entropy\u003c/h2\u003e\n\u003cp\u003esee also \u003ca href=\"/posts/kbhcross_entropy_method/\"\u003eCross Entropy Method\u003c/a\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003esample a value function \\(k\\)\u003c/li\u003e\n\u003cli\u003etakes \\(n\\) highest sampled values\u003c/li\u003e\n\u003cli\u003eupdate parameter \\(\\theta\\)\u003c/li\u003e\n\u003cli\u003eresample until distribution convergence\u003c/li\u003e\n\u003cli\u003etake the best sample \\(x\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"g-dice--kbhg-dice-dot-md\"\u003e\u003ca href=\"/posts/kbhg_dice/\"\u003eG-DICE\u003c/a\u003e\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ecreate a graph with exogenous \\(N\\) nodes, and \\(O\\) outgoing edges (designed before)\u003c/li\u003e\n\u003cli\u003euse \u003ca href=\"#direct-cross-entropy\"\u003eDirect Cross Entropy\u003c/a\u003e to solve for the best policy\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-22_10-08-33_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"results\"\u003eResults\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003edemonstrates improved performance over MMCS and \u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003eMCTS\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003edoes not need robot communication\u003c/li\u003e\n\u003cli\u003egarantees convergence for both finite and infiinte horizon\u003c/li\u003e\n\u003cli\u003ecan choose exogenous number of nodes in order to gain computational savings\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhg_dice/","tags":null,"title":"G-DICE"},{"categories":null,"contents":"Galactica is a large-languange model for generating research papers, made by meta research\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhgalactica/\"\u003eGalactica\u003c/a\u003e is a large-languange model for generating research papers, made by meta research\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgalactica/","tags":null,"title":"Galactica"},{"categories":null,"contents":" One of these things. It is actually a binomial distribution.\nYou can phrase the probability at\n","html":"\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-11_16-14-52_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eOne of these things. It is actually a \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eYou can phrase the probability at\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgalton_board/","tags":null,"title":"Galton Board"},{"categories":null,"contents":"The GARCH model is a model for the heteroskedastic variations where the changes in variance is assumed to be auto correlated: that, though the variance changes, it changes in a predictable manner.\nIt is especially useful to\nGARCH 1,1 Conditional mean:\n\\begin{equation} y_{t} = x\u0026rsquo;_{t} \\theta + \\epsilon_{t} \\end{equation}\nThen, the epsilon parameter:\n\\begin{equation} \\epsilon_{t} = \\sigma_{t}z_{t} \\end{equation}\nwhere:\n\\begin{equation} z_{t} \\sim \\mathcal{N}(0,1) \\end{equation}\nand:\nconditional variance\n\\begin{equation} {\\sigma_{t}}^{2} = \\omega + \\lambda {\\sigma_{t-1}}^{2} + \\beta {\\sigma_{t-1}}^{2} \\end{equation}\nFinally, with initial conditions:\n\\begin{equation} w\u0026gt;0; \\alpha \u0026gt;0; \\beta \u0026gt;0 \\end{equation}\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhgarch/\"\u003eGARCH\u003c/a\u003e model is a model for the \u003ca href=\"/posts/kbhheteroskedastic/\"\u003eheteroskedastic\u003c/a\u003e variations where the changes in variance is assumed to be auto correlated: that, though the variance changes, it changes in a predictable manner.\u003c/p\u003e\n\u003cp\u003eIt is especially useful to\u003c/p\u003e\n\u003ch2 id=\"garch--kbhgarch-dot-md--1-1\"\u003e\u003ca href=\"/posts/kbhgarch/\"\u003eGARCH\u003c/a\u003e 1,1\u003c/h2\u003e\n\u003cp\u003eConditional mean:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny_{t} = x\u0026rsquo;_{t} \\theta + \\epsilon_{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThen, the epsilon parameter:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\epsilon_{t} = \\sigma_{t}z_{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nz_{t} \\sim \\mathcal{N}(0,1)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003econditional variance\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n{\\sigma_{t}}^{2} = \\omega + \\lambda {\\sigma_{t-1}}^{2} + \\beta {\\sigma_{t-1}}^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, with initial conditions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw\u0026gt;0; \\alpha \u0026gt;0; \\beta \u0026gt;0\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgarch/","tags":null,"title":"GARCH"},{"categories":null,"contents":"The Gauss\u0026rsquo; Law is a principle of electric flux of uniformly distributed electric field along a surface: that, the electric flux through a closed surface is the sum of the electric charge enclosed divided by the permittivity of free space.\nThat is:\n\\begin{equation} \\oint E \\cdot dA = \\frac{\\sum Q}{\\epsilon_{0}} \\end{equation}\nsomewhat motivating Gauss\u0026rsquo; Law Consider a sphere with uniformly distributed charge on its surface. It has surface area \\(4 \\pi r^{2}\\). Given the expression of electric flux and the fact that the origin change is in the center, and the test change is evenly distributed (i.e. \\(E\\) is held constant):\n\\begin{align} \\Phi_{E} \u0026amp;= \\int E \\cdot dA \\\\ \u0026amp;= E\\int dA \\end{align}\nNow, we are integrating across the entire surface of the sphere, so it is a closed integral. So:\n\\begin{align} \\Phi_{E} \u0026amp;= \\oint E dA \\end{align}\nWe have the entire sum of the surfaces to be the surface area; so \\(\\oint dA = 4\\pi r^{2}\\). Furthermore, recall that if the field is uniform, \\(E\\) is constantly at \\(\\frac{1}{4 \\pi \\epsilon_{0}} \\frac{Q}{r^{2}}\\).\nSo, substituting the two in:\n\\begin{align} \\Phi_{E} \u0026amp;= \\frac{1}{4\\pi \\epsilon_{0}} \\frac{Q}{r^{2}} 4\\pi r^{2} \\\\ \u0026amp;= \\frac{Q}{\\epsilon_{0}} \\end{align}\nwhere, \\(\\epsilon_{0}\\) is the permittivity of free space.\nCongrats, we have Gauss\u0026rsquo; Law: \u0026ldquo;the electric flux through the surface of an object is the sum of the charges enclosed divided by the permittivity of free space.\u0026rdquo;\nspheres electric field inside a closed conductor is zero\nThis is a direct result of gauss\u0026rsquo; law\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhgauss_law/\"\u003eGauss\u0026rsquo; Law\u003c/a\u003e is a principle of \u003ca href=\"/posts/kbhflux/#electric-flux\"\u003eelectric flux\u003c/a\u003e of uniformly distributed electric field along a surface: that, the \u003ca href=\"/posts/kbhflux/#electric-flux\"\u003eelectric flux\u003c/a\u003e through a \u003cstrong\u003eclosed surface\u003c/strong\u003e is the sum of the electric \u003ca href=\"/posts/kbhcharged/\"\u003echarge\u003c/a\u003e enclosed divided by the \u003ca href=\"/posts/kbhpermittivity_of_free_space/\"\u003epermittivity of free space\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\oint E \\cdot dA = \\frac{\\sum Q}{\\epsilon_{0}}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"somewhat-motivating-gauss-law--kbhgauss-law-dot-md\"\u003esomewhat motivating \u003ca href=\"/posts/kbhgauss_law/\"\u003eGauss\u0026rsquo; Law\u003c/a\u003e\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-02-19_11-00-47_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eConsider a sphere with uniformly distributed \u003ca href=\"/posts/kbhcharged/\"\u003echarge\u003c/a\u003e on its surface. It has surface area \\(4 \\pi r^{2}\\). Given the expression of \u003ca href=\"/posts/kbhflux/#electric-flux\"\u003eelectric flux\u003c/a\u003e and the fact that the origin change is in the center, and the test change is evenly distributed (i.e. \\(E\\) is held constant):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\Phi_{E} \u0026amp;= \\int E \\cdot dA \\\\\n\u0026amp;= E\\int dA\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, we are integrating across the entire surface of the sphere, so it is a closed integral. So:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\Phi_{E} \u0026amp;= \\oint E dA\n\\end{align}\u003c/p\u003e\n\u003cp\u003eWe have the entire sum of the surfaces to be the surface area; so \\(\\oint dA = 4\\pi r^{2}\\). Furthermore, recall that if the field is uniform, \\(E\\) is constantly at \\(\\frac{1}{4 \\pi \\epsilon_{0}} \\frac{Q}{r^{2}}\\).\u003c/p\u003e\n\u003cp\u003eSo, substituting the two in:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\Phi_{E} \u0026amp;= \\frac{1}{4\\pi \\epsilon_{0}} \\frac{Q}{r^{2}} 4\\pi r^{2} \\\\\n\u0026amp;= \\frac{Q}{\\epsilon_{0}}\n\\end{align}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\epsilon_{0}\\) is the \u003ca href=\"/posts/kbhpermittivity_of_free_space/\"\u003epermittivity of free space\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eCongrats, we have \u003ca href=\"/posts/kbhgauss_law/\"\u003eGauss\u0026rsquo; Law\u003c/a\u003e: \u0026ldquo;the \u003ca href=\"/posts/kbhflux/#electric-flux\"\u003eelectric flux\u003c/a\u003e through the surface of an object is the sum of the \u003ca href=\"/posts/kbhcharged/\"\u003echarge\u003c/a\u003es enclosed divided by the \u003ca href=\"/posts/kbhpermittivity_of_free_space/\"\u003epermittivity of free space\u003c/a\u003e.\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"spheres\"\u003espheres\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eelectric field inside a closed conductor is zero\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eThis is a direct result of gauss\u0026rsquo; law\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgauss_law/","tags":null,"title":"Gauss' Law"},{"categories":null,"contents":"The Gaussian, in general, gives:\n\\begin{equation} e^{-\\frac{ax^{2}}{2}} \\end{equation}\nwhich is a Bell-Shaped curve. It\u0026rsquo;s pretty darn important\nsolving heat equation without boundary for general expression:\n\\begin{equation} \\pdv{U}{t} = \\alpha \\pdv[2]{U}{x} \\end{equation}\n\\begin{equation} U(t,x) = \\frac{1}{\\sqrt{4\\pi \\alpha t}}\\int_{\\mathbb{R}} f(y) e^{-\\frac{(x-y)^{2}}{4\\alpha t}} \\dd{y} \\end{equation}\nwhere,\n\\begin{equation} \\hat{U}(t,\\lambda) = \\hat{f}(\\lambda)e^{-\\alpha t \\lambda^{2}} \\end{equation}\n\\begin{equation} \\hat{U}(t,\\lambda) = \\hat{f}(\\lambda)e^{-\\lambda^{2}(t)} \\end{equation}\nHeat Equation and Gaussian \\begin{equation} H(t,x) = \\frac{1}{\\sqrt{2\\pi} t}e^{-\\frac{x^{2}}{2t}} \\end{equation}\nYou will note that \\(H\\) does satisfy the heat equation:\n\\begin{equation} \\pdv{U}{t} = \\pdv[2]{U}{x} \\end{equation}\nclosed form solution \\begin{equation} U(t,x) = \\frac{1}{\\sqrt{2\\pi} t} \\int_{\\mathbb{R}} f(y) e^{-\\frac{(x-y)^{2}}{2t}} \\dd{y} \\end{equation}\nthis is exactly:\n\\begin{equation} \\int_{\\mathbb{R}}f(y) H(t,(x-y)) \\dd{y} = \\int_{\\mathbb{R}}\\frac{1}{\\sqrt{2\\pi} t}e^{-\\frac{(x-y)^{2}}{2t}} f(y) \\dd{y} \\end{equation}\nWe can understand this when \\(t \\to 0\\), where there is a single, narrow, area \\(1\\) band which we sweep across all of \\(y\\). Because its thin and \\(1\\), its basically \\(f(x)\\) at each \\(y\\).\nsolving Heat Equation without boundary Consider the partial Fourier Transform on the \\(x\\) variable of the heat equation.\n\\begin{equation} U(t,x) = \\frac{1}{2\\pi} \\int_{\\mathbb{R}} e^{ix\\lambda} \\hat{U} \\qty(t,\\lambda) \\dd{\\lambda} \\end{equation}\nTaking derivatives of this:\n\\begin{equation} \\pdv{U}{t} (t,x) = \\frac{1}{2\\pi} \\int_{\\mathbb{R}} e^{i\\lambda x} \\pdv{\\hat{U}}{t} (t,\\lambda) \\dd{\\lambda} \\end{equation}\nand:\n\\begin{equation} \\pdv[2]{U}{x} = \\frac{1}{2\\pi} \\int_{\\mathbb{R}} \\qty(-\\lambda^{2}) e^{ix \\lambda } \\hat{U}(t,\\lambda) \\dd{\\lambda} \\end{equation}\nBecause these two are equal, it gives us that:\n\\begin{equation} \\hat{U}(t,\\lambda) = -\\lambda^{2} \\hat{U}(t,\\lambda) \\end{equation}\nmeaning:\n\\begin{equation} \\hat{U}(t,\\lambda) = a(\\lambda)e^{-\\lambda^{2}t} \\end{equation}\nFinally, at:\n\\begin{equation} \\hat{U}(0,\\lambda) = a(\\lambda) = \\hat{f}(\\lambda) \\end{equation}\nWe see that:\n\\begin{equation} \\hat{U}(t,\\lambda) = \\hat{f}(\\lambda)e^{-\\lambda^{2}(t)} \\end{equation}\nTo get our original function back, we need to inverse Fourier transform it:\n\\begin{equation} U(t,x) = \\frac{1}{2\\pi} \\int_{\\mathbb{R}} e^{ix\\lambda - \\lambda^{2}t} \\hat{f}(\\lambda) \\dd{\\lambda} \\end{equation}\nIntegrating Gaussian, more Generally Let\u0026rsquo;s integrate:\n\\begin{equation} \\int_{-\\infty}^{\\infty} e^{-\\frac{{ax}^{2}}{2}} \\dd{x} \\end{equation}\nLet\u0026rsquo;s replace: \\(s = \\sqrt{a} x\\)\nThis gives us that (based on Integrating Gaussian):\n\\begin{equation} x = \\sqrt{\\frac{2\\pi}{a}} \\end{equation}\nIf we replace \\(a\\) by \\(\\frac{1}{t}\\), we obtain:\n\\begin{equation} \\frac{1}{\\sqrt{2\\pi}t} \\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}}{2t}} \\dd{x} = 1 \\end{equation}\nby rescaling \\(x(a)\\) function above.\nIf \\(t\\) increases, you will see that this function diffuses from a single point at \\(0\\) and spreading out. Notice, that over the whole real line, no matter what the \\(t\\) is, you always end up with integral \\(1\\).\nIntegrating Gaussian Let\u0026rsquo;s integrate:\n\\begin{equation} \\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}}{2}} \\dd{x} \\end{equation}\ncomputing this is funny:\n\\begin{equation} A \\cdot A = \\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}}{2}} \\dd{x} \\int_{-\\infty}^{\\infty} e^{-\\frac{y^{2}}{2}} \\dd{y} \\end{equation}\nWe can think of this as a double integral:\n\\begin{equation} A \\cdot A = \\int_{-\\infty}^{\\infty} \\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}}{2}} e^{-\\frac{y^{2}}{2}} \\dd{x} \\dd{y} \\end{equation}\nmeaning we get:\n\\begin{equation} A \\cdot A = \\int_{-\\infty}^{\\infty} \\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}+y^{2}}{2}} \\dd{x} \\dd{y} \\end{equation}\nIts polar time; recall:\n\\begin{equation} x^{2} + y^{2} = r^{2} \\end{equation}\nwe can now go over this whole thing by converting into polar (notice the extra factor \\(r\\)):\n\\begin{equation} A \\cdot A = \\int_{0}^{2\\pi} \\int_{0}^{\\infty} e^{-\\frac{r^{2}}{2}} r \\dd{r} \\dd{\\theta} \\end{equation}\nvery suddenly we can use u sub on \\(r\\) to obtain:\n\\begin{equation} 2\\pi \\int_{0}^{\\infty} e^{-u} \\dd{u} = 2\\pi \\cdot 1 = 2\\pi \\end{equation}\nMeaning:\n\\begin{equation} A = \\sqrt{2\\pi} \\end{equation}\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhgaussian/\"\u003eGaussian\u003c/a\u003e, in general, gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{-\\frac{ax^{2}}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is a Bell-Shaped curve. It\u0026rsquo;s pretty darn important\u003c/p\u003e\n\u003ch2 id=\"solving-heat-equation-without-boundary\"\u003esolving heat equation without boundary\u003c/h2\u003e\n\u003cp\u003efor general expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{U}{t} = \\alpha \\pdv[2]{U}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(t,x) = \\frac{1}{\\sqrt{4\\pi \\alpha t}}\\int_{\\mathbb{R}} f(y) e^{-\\frac{(x-y)^{2}}{4\\alpha t}} \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U}(t,\\lambda) = \\hat{f}(\\lambda)e^{-\\alpha t \\lambda^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U}(t,\\lambda) = \\hat{f}(\\lambda)e^{-\\lambda^{2}(t)}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"heat-equation--kbhheat-equation-dot-md--and-gaussian--kbhgaussian-dot-md\"\u003e\u003ca href=\"/posts/kbhheat_equation/\"\u003eHeat Equation\u003c/a\u003e and \u003ca href=\"/posts/kbhgaussian/\"\u003eGaussian\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nH(t,x) = \\frac{1}{\\sqrt{2\\pi} t}e^{-\\frac{x^{2}}{2t}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will note that \\(H\\) \u003cstrong\u003edoes\u003c/strong\u003e satisfy the heat equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{U}{t} = \\pdv[2]{U}{x}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"closed-form-solution\"\u003eclosed form solution\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nU(t,x) = \\frac{1}{\\sqrt{2\\pi} t} \\int_{\\mathbb{R}} f(y) e^{-\\frac{(x-y)^{2}}{2t}} \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is exactly:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{\\mathbb{R}}f(y) H(t,(x-y)) \\dd{y} = \\int_{\\mathbb{R}}\\frac{1}{\\sqrt{2\\pi} t}e^{-\\frac{(x-y)^{2}}{2t}} f(y) \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can understand this when \\(t \\to 0\\), where there is a single, narrow, area \\(1\\) band which we sweep across all of \\(y\\). Because its thin and \\(1\\), its basically \\(f(x)\\) at each \\(y\\).\u003c/p\u003e\n\u003ch3 id=\"solving-heat-equation--kbhheat-equation-dot-md--without-boundary\"\u003esolving \u003ca href=\"/posts/kbhheat_equation/\"\u003eHeat Equation\u003c/a\u003e without boundary\u003c/h3\u003e\n\u003cp\u003eConsider the partial \u003ca href=\"/posts/kbhfourier_transform/\"\u003eFourier Transform\u003c/a\u003e on the \\(x\\) variable of the heat equation.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(t,x) = \\frac{1}{2\\pi} \\int_{\\mathbb{R}} e^{ix\\lambda} \\hat{U} \\qty(t,\\lambda) \\dd{\\lambda}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaking derivatives of this:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{U}{t} (t,x) = \\frac{1}{2\\pi} \\int_{\\mathbb{R}} e^{i\\lambda x} \\pdv{\\hat{U}}{t} (t,\\lambda) \\dd{\\lambda}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2]{U}{x} = \\frac{1}{2\\pi} \\int_{\\mathbb{R}} \\qty(-\\lambda^{2}) e^{ix \\lambda } \\hat{U}(t,\\lambda) \\dd{\\lambda}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBecause these two are equal, it gives us that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U}(t,\\lambda) = -\\lambda^{2} \\hat{U}(t,\\lambda)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U}(t,\\lambda) = a(\\lambda)e^{-\\lambda^{2}t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, at:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U}(0,\\lambda) = a(\\lambda) = \\hat{f}(\\lambda)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe see that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U}(t,\\lambda) = \\hat{f}(\\lambda)e^{-\\lambda^{2}(t)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo get our original function back, we need to inverse Fourier transform it:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(t,x) = \\frac{1}{2\\pi} \\int_{\\mathbb{R}} e^{ix\\lambda - \\lambda^{2}t} \\hat{f}(\\lambda) \\dd{\\lambda}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"integrating-gaussian-more-generally\"\u003eIntegrating Gaussian, more Generally\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s integrate:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{-\\infty}^{\\infty} e^{-\\frac{{ax}^{2}}{2}} \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s replace: \\(s = \\sqrt{a} x\\)\u003c/p\u003e\n\u003cp\u003eThis gives us that (based on \u003ca href=\"#integrating-gaussian\"\u003eIntegrating Gaussian\u003c/a\u003e):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx = \\sqrt{\\frac{2\\pi}{a}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf we replace \\(a\\) by \\(\\frac{1}{t}\\), we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{1}{\\sqrt{2\\pi}t} \\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}}{2t}} \\dd{x} = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eby rescaling \\(x(a)\\) function above.\u003c/p\u003e\n\u003cp\u003eIf \\(t\\) increases, you will see that this function diffuses from a single point at \\(0\\) and spreading out. Notice, that over the whole real line, no matter what the \\(t\\) is, you always end up with integral \\(1\\).\u003c/p\u003e\n\u003ch2 id=\"integrating-gaussian\"\u003eIntegrating Gaussian\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s integrate:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}}{2}} \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003ecomputing this is funny:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA \\cdot A = \\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}}{2}} \\dd{x} \\int_{-\\infty}^{\\infty} e^{-\\frac{y^{2}}{2}} \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can think of this as a double integral:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA \\cdot A = \\int_{-\\infty}^{\\infty} \\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}}{2}} e^{-\\frac{y^{2}}{2}} \\dd{x} \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning we get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA \\cdot A = \\int_{-\\infty}^{\\infty} \\int_{-\\infty}^{\\infty} e^{-\\frac{x^{2}+y^{2}}{2}} \\dd{x} \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIts polar time; recall:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx^{2} + y^{2} = r^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can now go over this whole thing by converting into polar (notice the extra factor \\(r\\)):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA \\cdot A = \\int_{0}^{2\\pi} \\int_{0}^{\\infty} e^{-\\frac{r^{2}}{2}} r \\dd{r} \\dd{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003every suddenly we can use u sub on \\(r\\) to obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n2\\pi \\int_{0}^{\\infty} e^{-u} \\dd{u} = 2\\pi \\cdot 1 = 2\\pi\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA = \\sqrt{2\\pi}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgaussian/","tags":null,"title":"Gaussian"},{"categories":null,"contents":"standard normal density function This is a function used to model many Gaussian distributions.\n\\begin{equation} \\phi(x) = \\frac{1}{\\sqrt{2\\pi}} e^{-\\frac{x^{2}}{2}} \\end{equation}\nThis function is the CDF of the standard normal.\nstandard normal density function is also symmetric:\n\\begin{equation} \\phi(a) = 1- \\phi(a) \\end{equation}\nGaussian distribution constituents \\(\\mu\\) the mean \\(\\sigma\\) the variance requirements \\begin{equation} X \\sim N(\\mu, \\sigma^{2}) \\end{equation}\nIts PDF is:\n\\begin{equation} \\mathcal{N}(x \\mid \\mu, \\sigma^{2}) = \\frac{1}{\\sigma\\sqrt{2\\pi}} e^{ \\frac{-(x-u)^{2}}{2 \\sigma^{2}}} \\end{equation}\nwhere, \\(\\phi\\) is the standard normal density function\nIts CDF:\n\\begin{equation} F(x) = \\Phi \\qty( \\frac{x-\\mu}{\\sigma}) \\end{equation}\nWe can\u0026rsquo;t integrate \\(\\Phi\\) further. So we leave it as a special function.\nAnd its expectations:\n\\(E(X) = \\mu\\)\n\\(Var(X) = \\sigma^{2}\\)\nadditional information linear transformations on Gaussian For some:\n\\begin{equation} Y = aX + b \\end{equation}\nwhere \\(X \\sim \\mathcal{N}\\)\nWe will end up with another normal \\(Y \\sim \\mathcal{N}\\) such that:\nmean: \\(au + b\\) variance: \\(a^{2}\\sigma^{2}\\) standard normal The standard normal is:\n\\begin{equation} Z=\\mathcal{N}(0,1) \\end{equation}\nmean 0, variance 1. You can transform anything into a standard normal via the following linear transform:\ntransformation into standard normal \\begin{equation} X \\sim \\mathcal{N}(\\mu, \\sigma^{2}) \\end{equation}\nand, we can shift it into a standard normal with:\n\\begin{equation} Z = \\frac{X-\\mu}{\\sigma} \\end{equation}\ntherefore, we can derive what the CDF of the normal distribution by shifting it back into the center:\n\\begin{equation} P(X\u0026lt;x) \\implies P\\qty(\\frac{X-\\mu}{\\theta} \u0026lt; \\frac{x-\\mu}{\\theta}) \\implies P\\qty(Z\u0026lt; \\frac{x-\\mu}{\\theta}) = \\Phi\\qty(\\frac{x-\\mu}{\\theta}) \\end{equation}\nnormal maximizes entropy no other random variable uses as little parameters to convey as much information\napproximation of binomial distribution with normal distribution You can use a normal distribution to approximate binomial approximation. However, be aware of a continuity correction\nadding Gaussian distributions for independent:\n\\begin{equation} X+Y \\sim \\mathcal{N}(\\mu_{1}+\\mu_{2}, \\sigma_{1}^{2}+\\sigma_{2}^{2}) \\end{equation}\n","html":"\u003ch2 id=\"standard-normal-density-function\"\u003estandard normal density function\u003c/h2\u003e\n\u003cp\u003eThis is a function used to model many Gaussian distributions.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\phi(x) = \\frac{1}{\\sqrt{2\\pi}} e^{-\\frac{x^{2}}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis function is the \u003ca href=\"/posts/kbhprobability_distributions/#cumulative-distribution-function\"\u003eCDF\u003c/a\u003e of the \u003ca href=\"#standard-normal\"\u003estandard normal.\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#standard-normal-density-function\"\u003estandard normal density function\u003c/a\u003e is also symmetric:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\phi(a) = 1- \\phi(a)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"gaussian-distribution\"\u003eGaussian distribution\u003c/h2\u003e\n\u003ch3 id=\"constituents\"\u003econstituents\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\mu\\) the mean\u003c/li\u003e\n\u003cli\u003e\\(\\sigma\\) the variance\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"requirements\"\u003erequirements\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nX \\sim N(\\mu, \\sigma^{2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIts \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003e is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{N}(x \\mid \\mu, \\sigma^{2}) = \\frac{1}{\\sigma\\sqrt{2\\pi}} e^{ \\frac{-(x-u)^{2}}{2 \\sigma^{2}}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\phi\\) is the \u003ca href=\"#standard-normal-density-function\"\u003estandard normal density function\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eIts \u003ca href=\"/posts/kbhprobability_distributions/#cumulative-distribution-function\"\u003eCDF\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nF(x) = \\Phi \\qty( \\frac{x-\\mu}{\\sigma})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can\u0026rsquo;t integrate \\(\\Phi\\) further. So we leave it as a special function.\u003c/p\u003e\n\u003cp\u003eAnd its expectations:\u003c/p\u003e\n\u003cp\u003e\\(E(X) = \\mu\\)\u003c/p\u003e\n\u003cp\u003e\\(Var(X) = \\sigma^{2}\\)\u003c/p\u003e\n\u003ch3 id=\"additional-information\"\u003eadditional information\u003c/h3\u003e\n\u003ch4 id=\"linear-transformations-on-gaussian\"\u003elinear transformations on Gaussian\u003c/h4\u003e\n\u003cp\u003eFor some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nY = aX + b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(X \\sim \\mathcal{N}\\)\u003c/p\u003e\n\u003cp\u003eWe will end up with another normal \\(Y \\sim \\mathcal{N}\\) such that:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003emean: \\(au + b\\)\u003c/li\u003e\n\u003cli\u003evariance: \\(a^{2}\\sigma^{2}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"standard-normal\"\u003estandard normal\u003c/h4\u003e\n\u003cp\u003eThe standard normal is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nZ=\\mathcal{N}(0,1)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emean 0, variance 1. You can transform anything into a standard normal via the following linear transform:\u003c/p\u003e\n\u003ch4 id=\"transformation-into-standard-normal--org430b977\"\u003etransformation into \u003ca href=\"#standard-normal\"\u003estandard normal\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003e\\begin{equation}\nX \\sim \\mathcal{N}(\\mu, \\sigma^{2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand, we can shift it into a standard normal with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nZ = \\frac{X-\\mu}{\\sigma}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003etherefore, we can derive what the \u003ca href=\"/posts/kbhprobability_distributions/#cumulative-distribution-function\"\u003eCDF\u003c/a\u003e of the \u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormal distribution\u003c/a\u003e by shifting it back into the center:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X\u0026lt;x) \\implies P\\qty(\\frac{X-\\mu}{\\theta} \u0026lt; \\frac{x-\\mu}{\\theta}) \\implies P\\qty(Z\u0026lt; \\frac{x-\\mu}{\\theta}) = \\Phi\\qty(\\frac{x-\\mu}{\\theta})\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"normal-maximizes-entropy\"\u003enormal maximizes entropy\u003c/h4\u003e\n\u003cp\u003eno other \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e uses as little \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es to convey as much information\u003c/p\u003e\n\u003ch4 id=\"approximation-of-binomial-distribution--kbhbinomial-distribution-dot-md--with-normal-distribution--kbhnormal-distribution-dot-md\"\u003eapproximation of \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e with \u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormal distribution\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eYou can use a \u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormal distribution\u003c/a\u003e to approximate \u003ca href=\"#approximation-of-binomial-distribution--kbhbinomial-distribution-dot-md--with-normal-distribution--kbhnormal-distribution-dot-md\"\u003ebinomial approximation\u003c/a\u003e. However, be aware of a \u003ca href=\"/posts/kbhcontinuity_correction/\"\u003econtinuity correction\u003c/a\u003e\u003c/p\u003e\n\u003ch4 id=\"adding-gaussian-distribution--kbhgaussian-distribution-dot-md--s\"\u003eadding \u003ca href=\"/posts/kbhgaussian_distribution/\"\u003eGaussian distribution\u003c/a\u003es\u003c/h4\u003e\n\u003cp\u003efor \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX+Y \\sim \\mathcal{N}(\\mu_{1}+\\mu_{2}, \\sigma_{1}^{2}+\\sigma_{2}^{2})\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgaussian_distribution/","tags":null,"title":"Gaussian distribution"},{"categories":null,"contents":"The point of Gaussian elimination is to solve/identiy-ify a linear equation. Take, if you have a matrix expression:\n\\begin{equation} Ax = b \\end{equation}\nWe can apply \\(A^{-1}\\) to both side, we then have:\n\\begin{equation} A^{-1}Ax = A^{-1} b \\end{equation}\nApplying the definition of the identity:\n\\begin{equation} Ix = A^{-1}b \\end{equation}\nTherefore, to solve for some \\(A^{-1}\\), which would yield \\(x\\).\n","html":"\u003cp\u003eThe point of \u003ca href=\"/posts/kbhgaussian_elimination/\"\u003eGaussian elimination\u003c/a\u003e is to solve/identiy-ify a linear equation. Take, if you have a matrix expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nAx = b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can apply \\(A^{-1}\\) to both side, we then have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA^{-1}Ax = A^{-1} b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eApplying the definition of the identity:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nIx = A^{-1}b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, to solve for some \\(A^{-1}\\), which would yield \\(x\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgaussian_elimination/","tags":null,"title":"Gaussian elimination"},{"categories":null,"contents":"GDB is gnu\u0026rsquo;s very own debugger\nb main or b 72 (set breakpoint on main function or line 72) r args (run with args) p thingname or p 3+5 (print a variable or return value) p/t print as binary p/x print as hex info (get args, locals) n s continue next, step, continue int test; short lsb = 0xff; test |= lsb printf(\u0026#34;%d\\n\u0026#34;,lsb); int test; ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhgdb/\"\u003eGDB\u003c/a\u003e is gnu\u0026rsquo;s very own debugger\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003eb main\u003c/code\u003e or \u003ccode\u003eb 72\u003c/code\u003e (set breakpoint on \u003ccode\u003emain\u003c/code\u003e function or line \u003ccode\u003e72\u003c/code\u003e)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003er args\u003c/code\u003e (run with args)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003ep thingname\u003c/code\u003e or \u003ccode\u003ep 3+5\u003c/code\u003e (print a variable or return value)\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003ep/t\u003c/code\u003e print as binary\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003ep/x\u003c/code\u003e print as hex\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003einfo\u003c/code\u003e (get args, locals)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003en\u003c/code\u003e \u003ccode\u003es\u003c/code\u003e \u003ccode\u003econtinue\u003c/code\u003e next, step, continue\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etest\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eshort\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elsb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0xff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etest\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e|=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elsb\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eprintf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;%d\u003c/span\u003e\u003cspan style=\"color:#8045ff\"\u003e\\n\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elsb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etest\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhgdb/","tags":null,"title":"GDB"},{"categories":null,"contents":"See inference.\nIn general, the joint probability distribution tables are very hard to solve because it requires\u0026mdash;for instance for binary variables\u0026mdash;requries \\(2^{n}\\) entires, which is a lot.\nhow do you define very large models? how do you perform inference with very large models what about the data can we use to inform the design process \u0026ldquo;If you can tell me a generative story, we can compress our joint probability distribution\u0026rdquo;. Get ready for\u0026hellip;\u0026hellip; inference with causality with Baysian Network.\nIf you can write a program to sample from the joint probability distribution, you have just described the joint.\n\u0026ldquo;Random variables are independent of causal non-descendents given their causal parents\u0026rdquo;. d-seperation\n","html":"\u003cp\u003eSee \u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIn general, the \u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e tables are very hard to solve because it requires\u0026mdash;for instance for binary variables\u0026mdash;requries \\(2^{n}\\) entires, which is a lot.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ehow do you define very large models?\u003c/li\u003e\n\u003cli\u003ehow do you perform \u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e with very large models\u003c/li\u003e\n\u003cli\u003ewhat about the data can we use to inform the design process\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\u0026ldquo;If you can tell me a generative story, we can compress our \u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e\u0026rdquo;. Get ready for\u0026hellip;\u0026hellip; \u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e with causality with \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIf you can write a program to sample from the \u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e, you have just described the joint.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Random variables are independent of causal non-descendents given their causal parents\u0026rdquo;. \u003ca href=\"/posts/kbhbaysian_network/#checking-for-conditional-independence\"\u003ed-seperation\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgeneral_inference/","tags":null,"title":"General Inference"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhgeneral_relativity/","tags":null,"title":"general relativity"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhgenerative_adversarial_network/","tags":null,"title":"Generative Adversarial Network"},{"categories":null,"contents":"a hissyfight with the transformational generative syntax.\ngenerative semantics states that structure is in support of meaning, rather than the other way around that transformational generative syntax suggests.\nThis means that you need to first come up with a meaning then imbew the best structure to support the expression of that meaning.\nThis (along with distributed morphology) is the main opposition of the Lexicalist Hypothesis, and because proof for the existence of semantic primes, also the main opposition of the existence of semantic primes.\n","html":"\u003cp\u003ea hissyfight with the \u003ca href=\"/posts/kbhtransformational_generative_syntax/\"\u003etransformational generative syntax\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhgenerative_semantics/\"\u003egenerative semantics\u003c/a\u003e states that \u003cstrong\u003e\u003cstrong\u003estructure\u003c/strong\u003e\u003c/strong\u003e is in support of \u003cstrong\u003e\u003cstrong\u003emeaning\u003c/strong\u003e\u003c/strong\u003e, rather than the other way around that \u003ca href=\"/posts/kbhtransformational_generative_syntax/\"\u003etransformational generative syntax\u003c/a\u003e suggests.\u003c/p\u003e\n\u003cp\u003eThis means that you need to first come up with a meaning then imbew the best structure to support the expression of that meaning.\u003c/p\u003e\n\u003cp\u003eThis (along with \u003ca href=\"/posts/kbhdistributed_morphology/\"\u003edistributed morphology\u003c/a\u003e) is the main opposition of the \u003ca href=\"/posts/kbhlexicalization_hypothesis/\"\u003eLexicalist Hypothesis\u003c/a\u003e, and because \u003ca href=\"/posts/kbhsemantic_primes/#proof-for-the-existence-of-id-4e587814-bfd1-458e-ba5f-67882832107b-semantic-prime-s\"\u003eproof for the existence of semantic primes\u003c/a\u003e, also the main opposition of the existence of \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgenerative_semantics/","tags":null,"title":"generative semantics"},{"categories":null,"contents":"big red Chomskian structuralist warning\nThe principle of generativity states that the goal of a grammar should be to enumerate the span of the structural descriptions of the expressions of a language.\n","html":"\u003cp\u003e\u003cstrong\u003ebig red Chomskian structuralist warning\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eThe principle of \u003ca href=\"/posts/kbhgenerativity/\"\u003egenerativity\u003c/a\u003e states that the goal of a \u003ca href=\"/posts/kbhgrammar/\"\u003egrammar\u003c/a\u003e should be to enumerate the span of the \u003cem\u003estructural\u003c/em\u003e descriptions of the expressions of a language.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgenerativity/","tags":null,"title":"generativity"},{"categories":null,"contents":"We don\u0026rsquo;t want to write the same thing many times; generics minimizes code duplication. Therefore, generics!\nLet\u0026rsquo;s implement a simple swap function:\nvoid swap_ptr_values(void *data1ptr, void *data2ptr, size_t datasize) { } helper functions memcpy Copy datasize bytes worth of memory in the second argument into the first argument. The two arguments CANNOT OVERLAP otherwise, you risk UB.\nvoid *memcpy(void *dest, void *src, size_t nbytes) memmove Its memcpy, but it works with overlapping data, and is slower.\nvoid *memove(void *dest, void *src, size_t nbytes) pointer arithmetic with generics Unfortunately, given that we don\u0026rsquo;t know how big a void * pointer is, we can\u0026rsquo;t do pointer arithmetic against it because it still doesn\u0026rsquo;t know how big the pointer is. You can\u0026rsquo;t just add/subtract numbers to char *.\nSo, we actually have to do pointer arithmetic by casting the pointer to a char* which will make pointer arithmetic work at the one-byte level.\nvoid *return_sixth_elem(void *arr) { return (char *)arr + 5; } higher order functions We can pass a function as a parameter.\nbool (*function_name)(int, int) ","html":"\u003cp\u003eWe don\u0026rsquo;t want to write the same thing many times; generics minimizes code duplication. Therefore, generics!\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s implement a simple swap function:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eswap_ptr_values\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata1ptr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata2ptr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatasize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"helper-functions\"\u003ehelper functions\u003c/h2\u003e\n\u003ch3 id=\"memcpy\"\u003ememcpy\u003c/h3\u003e\n\u003cp\u003eCopy \u003ccode\u003edatasize\u003c/code\u003e bytes worth of memory in the second argument into the first argument. The two arguments \u003cstrong\u003eCANNOT OVERLAP\u003c/strong\u003e otherwise, you risk UB.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ememcpy\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edest\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esrc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enbytes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"memmove\"\u003ememmove\u003c/h3\u003e\n\u003cp\u003eIts \u003ca href=\"#memcpy\"\u003ememcpy\u003c/a\u003e, but it works with overlapping data, and is slower.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ememove\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edest\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esrc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enbytes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"pointer-arithmetic--kbharray-dot-md--with-generics--kbhgeneric-dot-md\"\u003e\u003ca href=\"/posts/kbharray/#pointer-arithmetic\"\u003epointer arithmetic\u003c/a\u003e with \u003ca href=\"/posts/kbhgeneric/\"\u003egenerics\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eUnfortunately, given that we don\u0026rsquo;t know how big a \u003ccode\u003evoid *\u003c/code\u003e pointer is, we can\u0026rsquo;t do \u003ca href=\"/posts/kbharray/#pointer-arithmetic\"\u003epointer arithmetic\u003c/a\u003e against it because it still doesn\u0026rsquo;t know how big the pointer is. You can\u0026rsquo;t just add/subtract numbers to \u003ccode\u003echar *\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eSo, we actually have to do \u003ca href=\"/posts/kbharray/#pointer-arithmetic\"\u003epointer arithmetic\u003c/a\u003e by \u003ca href=\"/posts/kbhcasting/\"\u003ecasting\u003c/a\u003e the pointer to a \u003ccode\u003echar*\u003c/code\u003e which will make pointer arithmetic work at the one-byte level.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ereturn_sixth_elem\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"higher-order-functions\"\u003ehigher order functions\u003c/h2\u003e\n\u003cp\u003eWe can pass a function as a parameter.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ebool\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efunction_name\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhgeneric/","tags":null,"title":"generics"},{"categories":null,"contents":" A genetic algorithm is a search heuristic that is inspired by Charles Darwin\u0026rsquo;s theory of natural evolution.\nIts what Grey\u0026rsquo;s video says. The picking and chucking iterative thing.\n","html":"\u003cblockquote\u003e\n\u003cp\u003eA genetic algorithm is a search heuristic that is inspired by Charles Darwin\u0026rsquo;s theory of natural evolution.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003cp\u003eIts what Grey\u0026rsquo;s video says. The picking and chucking iterative thing.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgenetic_algorithum/","tags":null,"title":"genetic algorithm"},{"categories":null,"contents":"Genetic Policy Search involves performing Local Policy Search, but starting from a plurality of initial policies and perturbing the top-k most successful ones (called the \u0026ldquo;elite samples\u0026rdquo; \\(m_{elite}\\)) to generate the next set of starting points.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhgenetic_policy_search/\"\u003eGenetic Policy Search\u003c/a\u003e involves performing \u003ca href=\"/posts/kbhlocal_policy_search/\"\u003eLocal Policy Search\u003c/a\u003e, but starting from a plurality of initial policies and perturbing the top-k most successful ones (called the \u0026ldquo;elite samples\u0026rdquo; \\(m_{elite}\\)) to generate the next set of starting points.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgenetic_policy_search/","tags":null,"title":"Genetic Policy Search"},{"categories":null,"contents":"GenSLMs are a LLM, but genome sequence\nTake genome sequence Throw transformers at it create \u0026ldquo;semantic embedding\u0026rdquo; autoregression happens This is trained as a foundational model to organize the genomic sequence\nTurns out, the embedding space above can be used to discover relations. See proteins can be encoded as hierarchies\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhgenslms/\"\u003eGenSLMs\u003c/a\u003e are a LLM, but genome sequence\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eTake genome sequence\u003c/li\u003e\n\u003cli\u003eThrow transformers at it\u003c/li\u003e\n\u003cli\u003ecreate \u0026ldquo;semantic embedding\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eautoregression happens\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis is trained as a \u003ca href=\"/posts/kbhfoundational_model/\"\u003efoundational model\u003c/a\u003e to organize the genomic sequence\u003c/p\u003e\n\u003cp\u003eTurns out, the embedding space above can be used to discover relations. See \u003ca href=\"/posts/kbhfundational_models_of_interaction_analysis/#proteins-can-be-encoded-as-hierarchies\"\u003eproteins can be encoded as hierarchies\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgenslms-1/","tags":null,"title":"GenSLMs"},{"categories":null,"contents":"GenSLMs are a LLM, but genome sequence\nTake genome sequence Throw transformers at it create \u0026ldquo;semantic embedding\u0026rdquo; autoregression happens This is trained as a foundational model to organize the genomic sequence\nTurns out, the embedding space above can be used to discover relations. See proteins can be encoded as hierarchies\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhgenslms/\"\u003eGenSLMs\u003c/a\u003e are a LLM, but genome sequence\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eTake genome sequence\u003c/li\u003e\n\u003cli\u003eThrow transformers at it\u003c/li\u003e\n\u003cli\u003ecreate \u0026ldquo;semantic embedding\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eautoregression happens\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis is trained as a \u003ca href=\"/posts/kbhfoundational_model/\"\u003efoundational model\u003c/a\u003e to organize the genomic sequence\u003c/p\u003e\n\u003cp\u003eTurns out, the embedding space above can be used to discover relations. See \u003ca href=\"/posts/kbhfundational_models_of_interaction_analysis/#story-1-proteins-can-be-encoded-as-hierarchies\"\u003eproteins can be encoded as hierarchies\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgenslms/","tags":null,"title":"GenSLMs"},{"categories":null,"contents":"A Geometric Brownian Motion is a Brownian Motion with a drift.\nIt is determined by:\n\\begin{equation} \\dd{S_{t}} = \\mu S_{t} \\dd{t} + \\sigma \\dd{S_{t}} \\dd{W_{t}} \\end{equation}\nwhere, \\(S_{t}\\) is a Geometric Brownian Motion, \\(\\mu\\) is its drift, \\(\\sigma\\) the volatility, and \\(W_{t}\\) a centered Brownian Motion.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhgeometric_brownian_motion/\"\u003eGeometric Brownian Motion\u003c/a\u003e is a \u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e with a drift.\u003c/p\u003e\n\u003cp\u003eIt is determined by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dd{S_{t}} = \\mu S_{t} \\dd{t} + \\sigma \\dd{S_{t}} \\dd{W_{t}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(S_{t}\\) is a \u003ca href=\"/posts/kbhgeometric_brownian_motion/\"\u003eGeometric Brownian Motion\u003c/a\u003e, \\(\\mu\\) is its drift, \\(\\sigma\\) the volatility, and \\(W_{t}\\) a centered \u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgeometric_brownian_motion/","tags":null,"title":"Geometric Brownian Motion"},{"categories":null,"contents":"how many times do you have to do the trial to get at least one success\nconstituents \\(p\\) is the probability of one individual success \\(x\\) is the number of trials requirements \\begin{equation} P(X = x) = \\qty(1-p)^{x-1} p \\end{equation}\nwhich represents the probability of getting a success on the \\(x\\) trial\nadditional information ","html":"\u003cp\u003ehow many times do you have to do the trial to get at least one success\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(p\\) is the probability of one individual success\u003c/li\u003e\n\u003cli\u003e\\(x\\) is the number of trials\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nP(X = x) = \\qty(1-p)^{x-1} p\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich represents the probability of getting a success on the \\(x\\) trial\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgeometric_random_variable/","tags":null,"title":"geometric distribution"},{"categories":null,"contents":"The geometric multplicity for a given eigenvalue is the dimension of its generated eigenspace.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhgeometric_multplicity/\"\u003egeometric multplicity\u003c/a\u003e for a given \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e is the dimension of its generated \u003ca href=\"/posts/kbheigenspace/\"\u003eeigenspace\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgeometric_multplicity/","tags":null,"title":"geometric multplicity"},{"categories":null,"contents":"(Py)Torch is a great C++/Python library to construct and train complex neural networks. It has taken over academia over the last few years and is slowly taking over industry. Let\u0026rsquo;s learn about how it works!\nThis document is meant to be read cover-to-cover. It makes NO SENSE unless read like that. I focus on building intuition about why PyTorch works, so we will be writing unorthodox code until the very end where we put all ideas together.\nThe chapters below take you through large chapters in a machine-learning journey. But, to do anything, we need to import some stuff which we will need:\nimport numpy as np import torch Autograd source\nI believe that anybody learning a new ML framework should learn how its differentiation tools work. Yes, this means that we should first understand how it works with not a giant matrix, but with just two simple variables.\nAt the heart of PyTorch is the built-in gradient backpropagation facilities. To demonstrate this, let us create two such variables.\nvar_1 = torch.tensor(3.0, requires_grad=True) var_2 = torch.tensor(4.0, requires_grad=True) (var_1, var_2) (tensor(3., requires_grad=True), tensor(4., requires_grad=True)) There is secretly a lot going on here, so let\u0026rsquo;s dive in. First, just to get the stickler out of the way, torch.tensor (used here) is the generic variable creator, torch.Tensor (capital!) initializes a proper tensor\u0026mdash;which you will never need.\nWhat is a tensor? A tensor is simply a very efficient matrix that can updates its own values dynamically but keep the same variable name. The above commands creates two such tensor, both being 1x1 matrices.\nNote that, for the initial values, I used floats! instead of ints. The above code will crash if you use ints: this is because we want the surface on which the matrix changes value to be smooth to make things like gradient descent to work.\nLastly, we have an argument requires_grad=True. This argument tells PyTorch to keep track of the gradient of the tensor. For now, understand this as \u0026ldquo;permit PyTorch to change this variable if needed.\u0026rdquo; More on that in a sec.\nNaturally, if we have two tensors, we would love to multiply them!\nvar_mult = var_1*var_2 var_mult tensor(12., grad_fn=\u0026lt;MulBackward0\u0026gt;) Wouldyalookatthat! Another tensor, with the value \\(12\\).\nNow. Onto the main event. Back-Propagation! The core idea of a neural network is actually quite simple: figure out how much each input parameter (for us var_1, var_2) influence the output, then adjust the inputs accordingly to get the output to be \\(0\\).\nTo see what I mean, recall our output tensor named:\nvar_mult tensor(12., grad_fn=\u0026lt;MulBackward0\u0026gt;) How much does changing var_1 and var_2, its inputs, influence this output tensor? This is not immediately obvious, so let\u0026rsquo;s write what we are doing out:\n\\begin{equation} v_1 \\cdot v_2 = v_{m} \\implies 3 \\cdot 4 = 12 \\end{equation}\nwith \\(v_1\\) being var_1, \\(v_2\\) being var_2, and \\(v_{m}\\) being var_mult.\nAs you vary var_1, by what factor does the output change? For instance, if var_1 (the \\(3\\)) suddenly became a \\(2\\), how much less will var_mult be? Well, \\(2\\cdot 4=8\\), the output is exactly \\(4\\) less than before less than before. Hence, var_1 influences the value of var_mult by a factor of \\(4\\); meaning every time you add/subtract \\(1\\) to the value of var_1, var_mult gets added/subtracted by a value of \\(4\\).\nSimilarly, as you vary var_2, by what factor does the output change? For instance, if var_2 (the \\(4\\)) suddenly became a \\(5\\), how much less will var_mult be? Well, \\(3\\cdot 3=5\\), the output is exactly \\(3\\) more than before less than before. Hence, var_2 influences the value of var_mult by a factor of \\(3\\); meaning every time you add/subtract \\(1\\) to the value of var_3, var_mult gets added/subtracted by a value of \\(3\\).\nThose of you who have exposure to Multi-Variable Calculus\u0026mdash;this is indeed the same concept as a partial derivative of var_mult w.r.t. var_1 and var_2 for the previous two paragraphs respectively.\nThese relative-change-units (\\(4\\) and \\(3\\)) are called gradients: the factor by which changing any given variable change the output.\nNow, gradient calculation is awfully manual! Surely we don\u0026rsquo;t want to keep track of these tiny rates-of-change ourselves! This is where PyTorch autograd comes in. Autograd is the automated tool that helps you figure out these relative changes! It is built in to all PyTorch tensors.\nIn the previous paragraphs, we figured out the relative influences var_1 and var_2 on var_multi. Now let\u0026rsquo;s ask a computer to give us the same result, in much less time.\nFirst, we will ask PyTorch to calculate gradients for all variables that contributed to var_mult.\nvar_mult.backward() The backward function is a magical function that finds and calculates these relative-change-values of var_multi with respect to every variable that contributed to its values. To view the actual relative values, we will use .grad now on the actual variables:\nvar_1.grad tensor(4.) Recall! We used our big brains to deduce above that changing var_1 by \\(1\\) unit will change var_mult by \\(4\\) units. So this works!\nThe other variables works as expected:\nvar_2.grad tensor(3.) Yayyy! Still what we expected.\nGradient Descent Relative changes are cool, but it isn\u0026rsquo;t all that useful unless we are actually doing some changing. We want to use our epic knowledge about the relative influences of var_1 and var_2, to manipulate those variables such that var_mult is the value we want.\nTHE REST OF THIS DOCUMENT IS IN CONSTRUCTION\nimport torch.optim as optim To start an optimizer, you give it all the variables for which it should keep track of updating.\noptim = torch.optim.SGD([var_1, var_2], lr=1e-2, momentum=0.9) And then, to update gradients, you just have to:\noptim.step() # IMPORTANT optim.zero_grad() What\u0026rsquo;s that zero_grad? That clears the gradients from the variables (after applying them with .step()) so that the next update doesn\u0026rsquo;t influence the current one.\nYour First Neural Network import torch.nn as nn Layers m = nn.Linear(20, 30) input = torch.randn(128, 20) output = m(input) output, output.size() Explain what the \\(20, 30\\) means.\nOk one layer is just lame. What if you want a bunch of layers?\nm1 = nn.Linear(20, 30) m2 = nn.Linear(30, 30) m3 = nn.Linear(30, 40) input = torch.randn(128, 20) # function call syntax! Functions call from rigth to left! output = m3(m2(m1(input))) output, output.size() And guess what? If you want to adjust the values here, you would just do:\nm1 = nn.Linear(20, 30) m2 = nn.Linear(30, 30) m3 = nn.Linear(30, 40) input = torch.randn(128, 20) # function call syntax! Functions call from rigth to left! output = m3(m2(m1(input))) (output.sum() - 12).backward() None But wait! What are the options you give to your optimizer?\noptim = torch.optim.SGD([m1.weight, m1.bias ... ... ], lr=1e-2, momentum=0.9) That\u0026rsquo;s a lot of variables!! Each linear layer has a \\(m\\) and a \\(b\\) (from \\(y=mx+b\\) fame), and you will end up with a bajillon one of those! Also, that function call syntax, chaining one layer after another, is so knarly! Can we do better? Yes.\nAn Honest-to-Goodness Neural Network PyTorch makes the module framework to make model creator\u0026rsquo;s lives easier. This is the best practice for creating a neural network.\nLet\u0026rsquo;s replicate the example above with the new module framework:\nclass MyNetwork(nn.Module): def __init__(self): # important: runs early calls to make sure that # the module is correct super().__init__() # we declare our layers. We don\u0026#39;t use them yet. self.m1 = nn.Linear(20,30) self.m2 = nn.Linear(30,30) self.m3 = nn.Linear(30,40) # this is a special function that is called when # the module is called def forward(self, x): # we want to pass our input through to every layer # like we did before, but now more declaritively x = self.m1(x) x = self.m2(x) x = self.m3(x) return x Explain all of this.\nBut now, we essentially built our entire network in own \u0026ldquo;layer\u0026rdquo; (actually we literally did, all =Layer=s are just =torch.Module=s) that does the job of all other layers acting together. To use it, we just:\nmy_network = MyNetwork() input = torch.randn(128, 20) # function call syntax! Functions call from rigth to left! output = my_network(input) output tensor([[-0.1694, 0.0095, 0.4306, ..., 0.1580, 0.2644, 0.1509], [-0.2346, -0.0269, -0.1191, ..., 0.0229, -0.0819, -0.1452], [-0.4871, -0.2868, -0.2488, ..., 0.0637, 0.1832, 0.0619], ..., [-0.1323, 0.2531, -0.1086, ..., 0.0975, 0.0426, -0.2092], [-0.4765, 0.1441, -0.0520, ..., 0.2364, 0.0253, -0.1914], [-0.5044, -0.3263, 0.3102, ..., 0.1938, 0.1427, -0.0587]], grad_fn=\u0026lt;AddmmBackward0\u0026gt;) But wait! What are the options you give to your optimizer? Surely you don\u0026rsquo;t have to pass my_network.m1.weight, my_network.m1.bias, etc. etc. to the optimizer, right?\nYou don\u0026rsquo;t. One of the things that the super().__init__() did was to register a special function to your network class that keeps track of everything to optimize for. So now, to ask the optimizer to update the entire network, you just have to write:\noptim = torch.optim.SGD(my_network.parameters(), lr=1e-2, momentum=0.9) optim SGD ( Parameter Group 0 dampening: 0 differentiable: False foreach: None lr: 0.01 maximize: False momentum: 0.9 nesterov: False weight_decay: 0 ) TODO make students recall original backprop example, backprope and step and zero_grad with this new optim.\nLook! Optimizing an entire network works in the exact same way as optimizing two lone variables.\nPutting it together TODO\ntraining loop (zero first, call model, get diff/loss, .backward(), .step()) best practices saving and restoring models GPU ","html":"\u003cp\u003e(Py)Torch is a great C++/Python library to construct and train complex neural networks. It has \u003ca href=\"https://paperswithcode.com/trends\"\u003etaken over academia\u003c/a\u003e over the last few years and is slowly taking over industry. Let\u0026rsquo;s learn about how it works!\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eThis document is meant to be read cover-to-cover. It makes NO SENSE unless read like that. I focus on building intuition about why PyTorch works, so we will be writing unorthodox code until the very end where we put all ideas together.\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eThe chapters below take you through large chapters in a machine-learning journey. But, to do anything, we need to import some stuff which we will need:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enumpy\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"autograd\"\u003eAutograd\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://pytorch.org/tutorials/beginner/basics/autogradqs_tutorial.html\"\u003esource\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eI believe that anybody learning a new ML framework should learn how its differentiation tools work. Yes, this means that we should first understand how it works with not a giant matrix, but with just two simple variables.\u003c/p\u003e\n\u003cp\u003eAt the heart of PyTorch is the built-in gradient backpropagation facilities. To demonstrate this, let us create two such variables.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3.0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erequires_grad\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4.0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erequires_grad\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(tensor(3., requires_grad=True), tensor(4., requires_grad=True))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThere is secretly a lot going on here, so let\u0026rsquo;s dive in. First, just to get the stickler out of the way, \u003ccode\u003etorch.tensor\u003c/code\u003e (used here) is the generic variable creator, \u003ccode\u003etorch.Tensor\u003c/code\u003e (capital!) initializes a proper tensor\u0026mdash;which you will \u003cstrong\u003enever\u003c/strong\u003e need.\u003c/p\u003e\n\u003cp\u003eWhat is a \u003ccode\u003etensor\u003c/code\u003e? A \u003ccode\u003etensor\u003c/code\u003e is simply a very efficient matrix that can updates its own values dynamically but keep the same variable name. The above commands creates two such \u003ccode\u003etensor\u003c/code\u003e, both being \u003ccode\u003e1x1\u003c/code\u003e matrices.\u003c/p\u003e\n\u003cp\u003eNote that, for the initial values, I used \u003cem\u003efloats!\u003c/em\u003e instead of \u003cem\u003eints\u003c/em\u003e. The above code will crash if you use ints: this is because we want the surface on which the matrix changes value to be smooth to make things like gradient descent to work.\u003c/p\u003e\n\u003cp\u003eLastly, we have an argument \u003ccode\u003erequires_grad=True\u003c/code\u003e. This argument tells PyTorch to keep track of the gradient of the \u003ccode\u003etensor\u003c/code\u003e. For now, understand this as \u0026ldquo;permit PyTorch to change this variable if needed.\u0026rdquo; More on that in a sec.\u003c/p\u003e\n\u003cp\u003eNaturally, if we have two tensors, we would love to multiply them!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_mult\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_mult\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor(12., grad_fn=\u0026lt;MulBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWouldyalookatthat! Another tensor, with the value \\(12\\).\u003c/p\u003e\n\u003cp\u003eNow. Onto the main event. Back-Propagation! The core idea of a neural network is actually quite simple: figure out how much each input parameter (for us \u003ccode\u003evar_1\u003c/code\u003e, \u003ccode\u003evar_2\u003c/code\u003e) influence the output, then adjust the inputs accordingly to get the output to be \\(0\\).\u003c/p\u003e\n\u003cp\u003eTo see what I mean, recall our output \u003ccode\u003etensor\u003c/code\u003e named:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_mult\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor(12., grad_fn=\u0026lt;MulBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eHow much does changing \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e, its inputs, influence this output \u003ccode\u003etensor\u003c/code\u003e? This is not immediately obvious, so let\u0026rsquo;s write what we are doing out:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv_1 \\cdot v_2 = v_{m} \\implies 3 \\cdot 4 = 12\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewith \\(v_1\\) being \u003ccode\u003evar_1\u003c/code\u003e, \\(v_2\\) being \u003ccode\u003evar_2\u003c/code\u003e, and \\(v_{m}\\) being \u003ccode\u003evar_mult\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eAs you vary \u003ccode\u003evar_1\u003c/code\u003e, by \u003cstrong\u003ewhat factor\u003c/strong\u003e does the output change? For instance, if \u003ccode\u003evar_1\u003c/code\u003e (the \\(3\\)) suddenly became a \\(2\\), how much \u003cem\u003eless\u003c/em\u003e will \u003ccode\u003evar_mult\u003c/code\u003e be? Well, \\(2\\cdot 4=8\\), the output is exactly \\(4\\) less than before less than before. Hence, \u003ccode\u003evar_1\u003c/code\u003e influences the value of \u003ccode\u003evar_mult\u003c/code\u003e by a factor of \\(4\\); meaning every time you add/subtract \\(1\\) to the value of \u003ccode\u003evar_1\u003c/code\u003e, \u003ccode\u003evar_mult\u003c/code\u003e gets added/subtracted by a value of \\(4\\).\u003c/p\u003e\n\u003cp\u003eSimilarly, as you vary \u003ccode\u003evar_2\u003c/code\u003e, by what factor does the output change? For instance, if \u003ccode\u003evar_2\u003c/code\u003e (the \\(4\\)) suddenly became a \\(5\\), how much \u003cem\u003eless\u003c/em\u003e will \u003ccode\u003evar_mult\u003c/code\u003e be? Well, \\(3\\cdot 3=5\\), the output is exactly \\(3\\) more than before less than before. Hence, \u003ccode\u003evar_2\u003c/code\u003e influences the value of \u003ccode\u003evar_mult\u003c/code\u003e by a factor of \\(3\\); meaning every time you add/subtract \\(1\\) to the value of \u003ccode\u003evar_3\u003c/code\u003e, \u003ccode\u003evar_mult\u003c/code\u003e gets added/subtracted by a value of \\(3\\).\u003c/p\u003e\n\u003cp\u003eThose of you who have exposure to Multi-Variable Calculus\u0026mdash;this is indeed the same concept as a partial derivative of \u003ccode\u003evar_mult\u003c/code\u003e w.r.t. \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e for the previous two paragraphs respectively.\u003c/p\u003e\n\u003cp\u003eThese relative-change-units (\\(4\\) and \\(3\\)) are called \u003cstrong\u003egradients\u003c/strong\u003e: the factor by which changing any given variable change the output.\u003c/p\u003e\n\u003cp\u003eNow, gradient calculation is awfully manual! Surely we don\u0026rsquo;t want to keep track of these tiny rates-of-change ourselves! This is where PyTorch autograd comes in. Autograd is the automated tool that helps you figure out these relative changes! It is built in to all PyTorch tensors.\u003c/p\u003e\n\u003cp\u003eIn the previous paragraphs, we figured out the relative influences \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e on \u003ccode\u003evar_multi\u003c/code\u003e. Now let\u0026rsquo;s ask a computer to give us the same result, in much less time.\u003c/p\u003e\n\u003cp\u003eFirst, we will ask PyTorch to calculate gradients for all variables that contributed to \u003ccode\u003evar_mult\u003c/code\u003e.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_mult\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThe \u003ccode\u003ebackward\u003c/code\u003e function is a magical function that finds and calculates these relative-change-values of \u003ccode\u003evar_multi\u003c/code\u003e with respect to every variable that contributed to its values. To view the actual relative values, we will use \u003ccode\u003e.grad\u003c/code\u003e now on the actual variables:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egrad\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor(4.)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRecall! We used our big brains to deduce above that changing \u003ccode\u003evar_1\u003c/code\u003e by \\(1\\) unit will change \u003ccode\u003evar_mult\u003c/code\u003e by \\(4\\) units. So this works!\u003c/p\u003e\n\u003cp\u003eThe other variables works as expected:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egrad\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor(3.)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYayyy! Still what we expected.\u003c/p\u003e\n\u003ch2 id=\"gradient-descent\"\u003eGradient Descent\u003c/h2\u003e\n\u003cp\u003eRelative changes are cool, but it isn\u0026rsquo;t all that useful unless we are actually doing some changing. We want to use our epic knowledge about the relative influences of \u003ccode\u003evar_1\u003c/code\u003e and \u003ccode\u003evar_2\u003c/code\u003e, to manipulate those variables such that \u003ccode\u003evar_mult\u003c/code\u003e is the value we want.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003e\u003cstrong\u003e\u003cstrong\u003eTHE REST OF THIS DOCUMENT IS IN CONSTRUCTION\u003c/strong\u003e\u003c/strong\u003e\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch.optim\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eTo start an optimizer, you give it all the variables for which it should keep track of updating.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSGD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evar_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar_2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1e-2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emomentum\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd then, to update gradients, you just have to:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# IMPORTANT\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWhat\u0026rsquo;s that \u003ccode\u003ezero_grad\u003c/code\u003e? That clears the gradients from the variables (after applying them with \u003ccode\u003e.step()\u003c/code\u003e) so that the next update doesn\u0026rsquo;t influence the current one.\u003c/p\u003e\n\u003ch2 id=\"your-first-neural-network\"\u003eYour First Neural Network\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch.nn\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"layers\"\u003eLayers\u003c/h3\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e20\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003einput\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erandn\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e128\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e20\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003em\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einput\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExplain what the \\(20, 30\\) means.\u003c/p\u003e\n\u003cp\u003eOk one layer is just lame. What if you want a bunch of layers?\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e20\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e40\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003einput\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erandn\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e128\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e20\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# function call syntax! Functions call from rigth to left!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003em3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einput\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd guess what? If you want to adjust the values here, you would just do:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e20\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e40\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003einput\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erandn\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e128\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e20\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# function call syntax! Functions call from rigth to left!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003em3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einput\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e12\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNone\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eBut wait! What are the options you give to your optimizer?\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSGD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eweight\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebias\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e...\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e...\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1e-2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emomentum\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThat\u0026rsquo;s a \u003cem\u003elot of variables!!\u003c/em\u003e Each linear layer has a \\(m\\) and a \\(b\\) (from \\(y=mx+b\\) fame), and you will end up with a bajillon one of those! Also, that function call syntax, chaining one layer after another, is so knarly! Can we do better? Yes.\u003c/p\u003e\n\u003ch3 id=\"an-honest-to-goodness-neural-network\"\u003eAn Honest-to-Goodness Neural Network\u003c/h3\u003e\n\u003cp\u003ePyTorch makes the \u003ccode\u003emodule\u003c/code\u003e framework to make model creator\u0026rsquo;s lives easier. This is the best practice for creating a neural network.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s replicate the example above with the new \u003ccode\u003emodule\u003c/code\u003e framework:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eclass\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eMyNetwork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eModule\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e__init__\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# important: runs early calls to make sure that\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# the module is correct\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esuper\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e__init__\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we declare our layers. We don\u0026#39;t use them yet.\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e20\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLinear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e30\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e40\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# this is a special function that is called when\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# the module is called\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eforward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we want to pass our input through to every layer\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# like we did before, but now more declaritively\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eself\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExplain all of this.\u003c/p\u003e\n\u003cp\u003eBut now, we essentially built our entire network in own \u0026ldquo;layer\u0026rdquo; (actually we literally did, all =Layer=s are just =torch.Module=s) that does the job of all other layers acting together. To use it, we just:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eMyNetwork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003einput\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erandn\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e128\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e20\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# function call syntax! Functions call from rigth to left!\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einput\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoutput\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etensor([[-0.1694, 0.0095, 0.4306, ..., 0.1580, 0.2644, 0.1509],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [-0.2346, -0.0269, -0.1191, ..., 0.0229, -0.0819, -0.1452],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [-0.4871, -0.2868, -0.2488, ..., 0.0637, 0.1832, 0.0619],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [-0.1323, 0.2531, -0.1086, ..., 0.0975, 0.0426, -0.2092],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [-0.4765, 0.1441, -0.0520, ..., 0.2364, 0.0253, -0.1914],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [-0.5044, -0.3263, 0.3102, ..., 0.1938, 0.1427, -0.0587]],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e grad_fn=\u0026lt;AddmmBackward0\u0026gt;)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eBut wait! What are the options you give to your optimizer? Surely you don\u0026rsquo;t have to pass \u003ccode\u003emy_network.m1.weight\u003c/code\u003e, \u003ccode\u003emy_network.m1.bias\u003c/code\u003e, etc. etc. to the optimizer, right?\u003c/p\u003e\n\u003cp\u003eYou don\u0026rsquo;t. One of the things that the \u003ccode\u003esuper().__init__()\u003c/code\u003e did was to register a special function to your network class that keeps track of everything to optimize for. So now, to ask the optimizer to update the entire network, you just have to write:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSGD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emy_network\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eparameters\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1e-2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emomentum\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSGD (\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eParameter Group 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e dampening: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e differentiable: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e foreach: None\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e lr: 0.01\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e maximize: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e momentum: 0.9\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nesterov: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e weight_decay: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eTODO make students recall original backprop example, backprope and step and zero_grad with this new optim.\u003c/p\u003e\n\u003cp\u003eLook! Optimizing an entire network works in the \u003cem\u003eexact same way\u003c/em\u003e as optimizing two lone variables.\u003c/p\u003e\n\u003ch2 id=\"putting-it-together\"\u003ePutting it together\u003c/h2\u003e\n\u003cp\u003eTODO\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003etraining loop (zero first, call model, get diff/loss, .backward(), .step())\u003c/li\u003e\n\u003cli\u003ebest practices\u003c/li\u003e\n\u003cli\u003esaving and restoring models\u003c/li\u003e\n\u003cli\u003eGPU\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgetting_started_with_pytorch/","tags":["guide"],"title":"Getting Started with PyTorch"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhgolden_gate_bridge/","tags":null,"title":"Golden Gate Bridge"},{"categories":null,"contents":" The Mandarin (Chinese fusion): 1029 El Camino Real, Menlo Park, CA 94025 The Kitchen (very classical cantonese): 279 El Camino Real, Millbrae, CA 94030 Left Bank (sit down french): 635 Santa Cruz Ave, Menlo Park, CA 94025 Jeffrey\u0026rsquo;s Hamburgers (chill, high quality American diner): 888 El Camino Real, Menlo Park, CA 94025 Tai Pan (formal Chinese): 560 Waverley St, Palo Alto, CA 94301 ","html":"\u003cul\u003e\n\u003cli\u003eThe Mandarin (Chinese fusion): 1029 El Camino Real, Menlo Park, CA 94025\u003c/li\u003e\n\u003cli\u003eThe Kitchen (very classical cantonese): 279 El Camino Real, Millbrae, CA 94030\u003c/li\u003e\n\u003cli\u003eLeft Bank (sit down french): 635 Santa Cruz Ave, Menlo Park, CA 94025\u003c/li\u003e\n\u003cli\u003eJeffrey\u0026rsquo;s Hamburgers (chill, high quality American diner): 888 El Camino Real, Menlo Park, CA 94025\u003c/li\u003e\n\u003cli\u003eTai Pan (formal Chinese): 560 Waverley St, Palo Alto, CA 94301\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgood_restaurants_in_the_bay_area/","tags":null,"title":"good restaurants in the Bay Area"},{"categories":null,"contents":"a:2:{i:0;s:2:\u0026ldquo;f2\u0026rdquo;;i:1;s:2:\u0026ldquo;f3\u0026rdquo;;}\na:2:{i:0;s:2:\u0026ldquo;e2\u0026rdquo;;i:1;s:2:\u0026ldquo;e3\u0026rdquo;;}\na:2:{i:0;s:2:\u0026ldquo;e1\u0026rdquo;;i:1;s:2:\u0026ldquo;e2\u0026rdquo;;}\na:2:{i:0;s:2:\u0026ldquo;b2\u0026rdquo;;i:1;s:2:\u0026ldquo;b3\u0026rdquo;;}\na:2:{i:0;s:2:\u0026ldquo;c2\u0026rdquo;;i:1;s:2:\u0026ldquo;d8\u0026rdquo;;}\n","html":"\u003cp\u003ea:2:{i:0;s:2:\u0026ldquo;f2\u0026rdquo;;i:1;s:2:\u0026ldquo;f3\u0026rdquo;;}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-30_21-09-03_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ea:2:{i:0;s:2:\u0026ldquo;e2\u0026rdquo;;i:1;s:2:\u0026ldquo;e3\u0026rdquo;;}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-30_21-09-16_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ea:2:{i:0;s:2:\u0026ldquo;e1\u0026rdquo;;i:1;s:2:\u0026ldquo;e2\u0026rdquo;;}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-30_21-10-10_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ea:2:{i:0;s:2:\u0026ldquo;b2\u0026rdquo;;i:1;s:2:\u0026ldquo;b3\u0026rdquo;;}\u003c/p\u003e\n\u003cp\u003ea:2:{i:0;s:2:\u0026ldquo;c2\u0026rdquo;;i:1;s:2:\u0026ldquo;d8\u0026rdquo;;}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgoogle_nerd_snipe/","tags":null,"title":"Google Nerd Snipe"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhgorup/","tags":null,"title":"gorup"},{"categories":null,"contents":"OMG its Gram-Schmidtting!!! Ok so like orthonormal basis are so nice, don\u0026rsquo;t you want to make them out of boring-ass normal basis? Of course you do.\nSuppose \\(v_1, \u0026hellip; v_{m}\\) is a linearly independent list in \\(V\\). Now let us define some \\(e_{1} \u0026hellip; e_{m}\\) using the procedure below such that \\(e_{j}\\) are orthonormal and, importantly:\n\\begin{equation} span(v_1, \\dots, v_{m}) = span(e_{1}, \\dots, e_{m}) \\end{equation}\nThe Procedure We do this process inductively. Let:\n\\begin{equation} e_1 = \\frac{v_1}{\\|v_1\\|} \\end{equation}\nAnd then, let:\n\\begin{equation} e_{j} = \\frac{v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1} - \\dots -\\langle v_{j}, e_{j-1} \\rangle e_{j-1}}{\\|v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1} - \\dots -\\langle v_{j}, e_{j-1} \\rangle e_{j-1}\\|} \\end{equation}\nThat is, for each vector \\(v_{j}\\), we subtract out the component which it is already parallel (i.e. not orthogonal, i.e. already accounted by) each other already orthonormal basis. Then we norm the whole thing as lengths don\u0026rsquo;t matter and we desire norm-1.\nThe Proof We Prove this by induction.\nBase case: \\(j=1\\)\n\\(span (v_1) = span (e_{1})\\) because, by definition above, \\(e_1 = \\frac{v_1}{\\|v_1\\|}\\). And hence, they are multiples of each other and hence has the same span.\nInduction: at \\(1\u0026lt;j \u0026lt;m\\)\nSo, we have that:\n\\begin{equation} span (v_1, \\dots, v_{j-1}) = span(e_1, \\dots, e_{j-1}) \\end{equation}\nLet now \\(v_{j} \\not \\in span(v_1, \u0026hellip;, v_{j-1})\\) (because \\(v_{j}\\) are linearly independent). We have then \\(v_{j} \\not \\in span(e_1, \u0026hellip;, e_{j-1})\\), given the two spans are equal.\nHence, \\(v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1}- \u0026hellip; - \\langle v_{j}, e_{j-1} \\rangle e_{j-1} \\neq 0\\) because otherwise \\(v_{j}\\) would be writable as a linearly combinations of \\(e_{1}, \u0026hellip;, e_{j-1}\\) and would then be in the span thereof, which we know isn\u0026rsquo;t true.\nDividing a vector by its norm produces a norm-1 vector; so we have now that \\(e_{j}\\) would be a norm-1 vector.\nNow, let \\(k \u0026lt; j\\). We desire that \\(\\langle e_{j}, e_{k} \\rangle = 0\\) because we want our new \\(e_{j}\\) to be orthogonal to every other existing vector.\nWe have:\n\\begin{equation} \\langle e_{j}, e_{k} \\rangle = \\langle \\frac{v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1} - \\dots - \\langle v_{j}, e_{k} \\rangle e_{k} - \\dots \\langle v_{j}, e_{j-1} \\rangle e_{j-1}}{\\|v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1}- \\dots - \\langle v_{j}, e_{k} \\rangle e_{k} - \\dots \\langle v_{j}, e_{j-1} \\rangle e_{j-1}\\|}, e_{k} \\rangle \\end{equation}\nNow, if we parcel out the large fraction the bottom, and apply additivity in the first slot, we will note that all of the \\(\\langle e_{i \\neq k}, e_{k} \\rangle=0\\) as everything already on this list is orthonormal. Finally, then we have only:\n\\begin{equation} \\langle v_{j}, e_{k} \\rangle - \\langle v_{k}, e_{k} \\rangle \\langle e_{k}, e_{k} \\rangle \\end{equation}\non top, which conveniently equals \\(0\\). Meaning \\(\\langle e_{j}, e_{k} \\rangle= 0\\), so \\(e_{k}\\) is indeed orthogonal to the rest of the list.\nBy definition of \\(e_{j}\\) above, \\(v_{j}\\) can be written as a linear combination of \\(e_{1}, \u0026hellip; e_{j-1}\\) as well as a bare \\(e_{j}\\). Therefore:\n\\begin{equation} span(v_1, \\dots, v_{j}) \\subset span (e_1, \\dots e_{j}) \\end{equation}\nOf course, both subspaces are the same dimension and so extending the basis to \\(v_{1} \u0026hellip; v_{j}\\) to \\(e_{1}, \u0026hellip; e_{j}\\) would be trivial. So they are equal. Phew. \\(\\blacksquare\\)\nCorollary Results Every Inner Product Space has an orthonormal basis Take any basis, Gram-Schmidt it, orthonormal list of the right length is a basis. \\(\\blacksquare\\)\nOrthonormal list extended to orthonormal basis Based on the procedure above, Gram-Schmidt does nothing to already orthonormal vectors: the inner products between any yet-to-be-reghramschmidt\u0026rsquo;d already orthonormal vector will be \\(0\\), so nothing will be subtracted.\nSo, suppose you have an orthonormal list \\(e_1, \u0026hellip;, e_{m}\\) in \\(V\\), which because orthonormal list is linearly independent, can be Gram-Schmidt\u0026rsquo;d to the same thing.\nAs a linearly independent list expends to a basis, go do that. Now Gram-Schmidtting this new thing won\u0026rsquo;t change \\(e_1, \u0026hellip; e_{m}\\) at all, but will give you extra orthonormal vectors to them which all form the basis as its the right length.\nOrthonormal upper-triangular matrix basis exists if normal upper-triangular exists Note that Gram-Schmidtting doesn\u0026rsquo;t actually change the span; meaning, if you have an upper-triangular matrix, you must have each \\(span(v_1, \u0026hellip;v_{j})\\) be invariant under \\(T\\).\nNow, recall that Gram-Schmidtting doesn\u0026rsquo;t actually change span; therefore, if each \\(span (v_1, \u0026hellip; v_{j})\\) is invariant under \\(T\\), then each \\(span(e_1, \u0026hellip; e_{j}) = span(v_1, \u0026hellip; v_{j})\\) after Gram-Schmidtting is still invariant under \\(T\\). So we can actually build an upper-triangular matrix out of the orthonormalized matrix as well.\nSchur\u0026rsquo;s Theorem Support \\(V\\) is a finite-dimensional complex vector space, then \\(T\\) has an upper-triangular matrix w.r.t. an orthonormal basis of \\(V\\).\nevery complex operator has an upper-triangular matrix; and orthonormal upper-triangular matrix basis exists if normal upper-triangular exists.\n","html":"\u003cp\u003eOMG its \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidtting\u003c/a\u003e!!! Ok so like \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e are so nice, don\u0026rsquo;t you want to make them out of boring-ass normal \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e? Of course you do.\u003c/p\u003e\n\u003cp\u003eSuppose \\(v_1, \u0026hellip; v_{m}\\) is a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(V\\). Now let us define some \\(e_{1} \u0026hellip; e_{m}\\) using the procedure below such that \\(e_{j}\\) are \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e and, importantly:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nspan(v_1, \\dots, v_{m}) = span(e_{1}, \\dots, e_{m})\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"the-procedure\"\u003eThe Procedure\u003c/h2\u003e\n\u003cp\u003eWe do this process inductively. Let:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne_1 = \\frac{v_1}{\\|v_1\\|}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd then, let:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne_{j} = \\frac{v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1} - \\dots -\\langle v_{j}, e_{j-1} \\rangle e_{j-1}}{\\|v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1} - \\dots -\\langle v_{j}, e_{j-1} \\rangle e_{j-1}\\|}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThat is, for each vector \\(v_{j}\\), we subtract out the component which it is already parallel (i.e. not \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e, i.e. already accounted by) each other already \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e basis. Then we norm the whole thing as lengths don\u0026rsquo;t matter and we desire \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e-1.\u003c/p\u003e\n\u003ch2 id=\"the-proof\"\u003eThe Proof\u003c/h2\u003e\n\u003cp\u003eWe Prove this by induction.\u003c/p\u003e\n\u003cp\u003eBase case: \\(j=1\\)\u003c/p\u003e\n\u003cp\u003e\\(span (v_1) = span (e_{1})\\) because, by definition above, \\(e_1 = \\frac{v_1}{\\|v_1\\|}\\). And hence, they are multiples of each other and hence has the same span.\u003c/p\u003e\n\u003cp\u003eInduction: at \\(1\u0026lt;j \u0026lt;m\\)\u003c/p\u003e\n\u003cp\u003eSo, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nspan (v_1, \\dots, v_{j-1}) = span(e_1, \\dots, e_{j-1})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet now \\(v_{j} \\not \\in span(v_1, \u0026hellip;, v_{j-1})\\) (because \\(v_{j}\\) are linearly independent). We have then \\(v_{j} \\not \\in span(e_1, \u0026hellip;, e_{j-1})\\), given the two spans are equal.\u003c/p\u003e\n\u003cp\u003eHence, \\(v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1}- \u0026hellip; - \\langle v_{j}, e_{j-1} \\rangle e_{j-1} \\neq 0\\) because otherwise \\(v_{j}\\) would be writable as a linearly combinations of \\(e_{1}, \u0026hellip;, e_{j-1}\\) and would then be in the span thereof, which we know isn\u0026rsquo;t true.\u003c/p\u003e\n\u003cp\u003eDividing a vector by its \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e produces a \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e-1 vector; so we have now that \\(e_{j}\\) would be a \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e-1 vector.\u003c/p\u003e\n\u003cp\u003eNow, let \\(k \u0026lt; j\\). We desire that \\(\\langle e_{j}, e_{k} \\rangle = 0\\) because we want our new \\(e_{j}\\) to be \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e to every other existing vector.\u003c/p\u003e\n\u003cp\u003eWe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle e_{j}, e_{k} \\rangle = \\langle \\frac{v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1} - \\dots - \\langle v_{j}, e_{k} \\rangle e_{k} - \\dots \\langle v_{j}, e_{j-1} \\rangle e_{j-1}}{\\|v_{j} - \\langle v_{j}, e_{1} \\rangle e_{1}- \\dots - \\langle v_{j}, e_{k} \\rangle e_{k} - \\dots \\langle v_{j}, e_{j-1} \\rangle e_{j-1}\\|}, e_{k} \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, if we parcel out the large fraction the bottom, and apply additivity in the first slot, we will note that all of the \\(\\langle e_{i \\neq k}, e_{k} \\rangle=0\\) as everything already on this list is orthonormal. Finally, then we have only:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle v_{j}, e_{k} \\rangle - \\langle v_{k}, e_{k} \\rangle \\langle e_{k}, e_{k} \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eon top, which conveniently equals \\(0\\). Meaning \\(\\langle e_{j}, e_{k} \\rangle= 0\\), so \\(e_{k}\\) is indeed \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e to the rest of the list.\u003c/p\u003e\n\u003cp\u003eBy definition of \\(e_{j}\\) above, \\(v_{j}\\) can be written as a linear combination of \\(e_{1}, \u0026hellip; e_{j-1}\\) as well as a bare \\(e_{j}\\). Therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nspan(v_1, \\dots, v_{j}) \\subset span (e_1, \\dots e_{j})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOf course, both subspaces are the same dimension and so extending the basis to \\(v_{1} \u0026hellip; v_{j}\\) to \\(e_{1}, \u0026hellip; e_{j}\\) would be trivial. So they are equal. Phew. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch2 id=\"corollary-results\"\u003eCorollary Results\u003c/h2\u003e\n\u003ch3 id=\"every-inner-product-space--kbhinner-product-dot-md--has-an-orthonormal-basis--kbhorthonormal-basis-dot-md\"\u003eEvery \u003ca href=\"/posts/kbhinner_product/#inner-product-space\"\u003eInner Product Space\u003c/a\u003e has an \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eTake any \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt it\u003c/a\u003e, \u003ca href=\"/posts/kbhorthonormal_basis/#orthonormal-list-of-the-right-length-is-a-basis\"\u003eorthonormal list of the right length is a basis\u003c/a\u003e. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"orthonormal-list-extended-to-orthonormal-basis--kbhorthonormal-basis-dot-md\"\u003eOrthonormal list extended to \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eBased on the procedure above, \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003e does nothing to already orthonormal vectors: the inner products between any yet-to-be-reghramschmidt\u0026rsquo;d already orthonormal vector will be \\(0\\), so nothing will be subtracted.\u003c/p\u003e\n\u003cp\u003eSo, suppose you have an \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e list \\(e_1, \u0026hellip;, e_{m}\\) in \\(V\\), which because \u003ca href=\"/posts/kbhorthonormal/#orthonormal-list-is-linearly-independent\"\u003eorthonormal list is linearly independent\u003c/a\u003e, can be \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003e\u0026rsquo;d to the same thing.\u003c/p\u003e\n\u003cp\u003eAs \u003ca href=\"/posts/kbhbasis/#a-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent-list-expends-to-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003ea linearly independent list expends to a basis\u003c/a\u003e, go do that. Now \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003eting this new thing won\u0026rsquo;t change \\(e_1, \u0026hellip; e_{m}\\) at all, but will give you extra \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e vectors to them which all form the basis as its the right length.\u003c/p\u003e\n\u003ch3 id=\"orthonormal-upper-triangular-matrix-basis-exists-if-normal-upper-triangular-exists\"\u003eOrthonormal upper-triangular matrix basis exists if normal upper-triangular exists\u003c/h3\u003e\n\u003cp\u003eNote that \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003eting doesn\u0026rsquo;t actually change the span; meaning, if you have an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e, you must have each \\(span(v_1, \u0026hellip;v_{j})\\) be invariant under \\(T\\).\u003c/p\u003e\n\u003cp\u003eNow, recall that \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003eting doesn\u0026rsquo;t actually change span; therefore, if each \\(span (v_1, \u0026hellip; v_{j})\\) is invariant under \\(T\\), then each \\(span(e_1, \u0026hellip; e_{j}) = span(v_1, \u0026hellip; v_{j})\\) after \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003eting is \u003cem\u003estill\u003c/em\u003e \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e under \\(T\\). So we can actually build an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e out of the \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003eized matrix as well.\u003c/p\u003e\n\u003ch3 id=\"schur-s-theorem\"\u003eSchur\u0026rsquo;s Theorem\u003c/h3\u003e\n\u003cp\u003eSupport \\(V\\) is a finite-dimensional complex vector space, then \\(T\\) has an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e w.r.t. an \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e of \\(V\\).\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhupper_triangular_matrix/#every-complex-operator-has-an-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix\"\u003eevery complex operator has an upper-triangular matrix\u003c/a\u003e; and \u003ca href=\"#orthonormal-upper-triangular-matrix-basis-exists-if-normal-upper-triangular-exists\"\u003eorthonormal upper-triangular matrix basis exists if normal upper-triangular exists\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgram_schmidt/","tags":null,"title":"Gram-Schmidt"},{"categories":null,"contents":"A grammar is a set of logical rules that form a language. (more precisely defined in goals of a grammar)\ngoals of a grammar explain natural languages in syntax + semantics have described algebras which can be used to evolve the syntax \u0026hellip;that describe the grammatical operations The formalism here is that a rigorous grammar should have:\nsemantic accountability generativity ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhgrammar/\"\u003egrammar\u003c/a\u003e is a set of logical rules that form a \u003ca href=\"/posts/kbhlanguage/\"\u003elanguage\u003c/a\u003e. (more precisely defined in \u003ca href=\"#goals-of-a-grammar--kbhgrammar-dot-md\"\u003egoals of a grammar\u003c/a\u003e)\u003c/p\u003e\n\u003ch2 id=\"goals-of-a-grammar--kbhgrammar-dot-md\"\u003egoals of a \u003ca href=\"/posts/kbhgrammar/\"\u003egrammar\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eexplain natural languages in syntax + semantics\u003c/li\u003e\n\u003cli\u003ehave described algebras which can be used to evolve the syntax\u003c/li\u003e\n\u003cli\u003e\u0026hellip;that describe the grammatical operations\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe formalism here is that a rigorous \u003ca href=\"/posts/kbhgrammar/\"\u003egrammar\u003c/a\u003e should have:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsemantic_accountability/\"\u003esemantic accountability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgenerativity/\"\u003egenerativity\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgrammar/","tags":null,"title":"grammar"},{"categories":null,"contents":"Using constructor theory to test whether or not gravity in quantum theory is just entanglement.\nThis solves problem with gravity.\n","html":"\u003cp\u003eUsing \u003ca href=\"/posts/kbhconstructor_theory/\"\u003econstructor theory\u003c/a\u003e to test whether or not gravity in \u003ca href=\"/posts/kbhquantum_theory/\"\u003equantum theory\u003c/a\u003e is just \u003ca href=\"/posts/kbhentangled/\"\u003eentanglement\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThis solves \u003ca href=\"/posts/kbhproblem_with_gravity/\"\u003eproblem with gravity\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgravitational_entanglement/","tags":null,"title":"gravitational entanglement"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhgravitational_potential_energy/","tags":null,"title":"gravitational potential energy"},{"categories":null,"contents":"The Great Depression is a period of time of American depression.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhgreat_depression/\"\u003eGreat Depression\u003c/a\u003e is a period of time of American depression.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgreat_depression/","tags":null,"title":"Great Depression"},{"categories":null,"contents":"Let \\(a,b \\in \\mathbb{Z}\\), not both zero. \\(\\gcd (a,b)\\) is the greatest value \\(d\\) such that \\(d|a\\), \\(d|b\\).\ngreatest common divisor is a linear combination We can write \\(\\gcd (a,b) = as+bt\\) for some \\(s,t \\in \\mathbb{Z}\\).\nLet us define:\n\\begin{equation} S = \\{am + bn: m,n \\in \\mathbb{Z}, am+bn \u0026gt; 0\\} \\end{equation}\nWe will first check that \\(S\\) is non-empty. To do so, let \\(a\\) be negative and \\(b\\) be positive. Then, set \\(m = -1\\), \\(n = 1\\). We can see that \\(am + bn \u0026gt; 0\\), satisfying the conditions of the set. In a similar manner, we can demonstrate that regardless of the choice of \\(a, b\\), \\(S\\) is non-empty.\nFurthermore, integral linear combinations are integers, so \\(S\\) is a non-empty subset of \\(\\mathbb{Z}\\).\nWe can now invoke WOP. There is some smallest \\(d \\in S\\). Let\u0026rsquo;s call \\(d = as +dt\\). We desire that \\(d\\) is actually \\(\\gcd (a,b)\\).\n\\(d\\) is a common divisor of \\(a,b\\) WLOG write some:\n\\begin{equation} a = dq + r \\end{equation}\nusing division algorithm. Because \\(d \\in S\\), we can write now:\n\\begin{equation} a = (as+bt) q + r \\end{equation}\nWe desire that now \\(r = 0\\) so that we can write \\(d|a\\). We can write:\n\\begin{equation} r = a-dq \\end{equation}\n(notice! \\(a\\) is a linear combination of \\(a,b\\), and \\(d\\) is given to be such)\n\\begin{equation} r = a-dq = (1a + 0b) - (as+bt)q = a(1-qs) + b(-tq) \\end{equation}\nRecall that \\(r \u0026lt; d\\) because \\(r\\) is a remainder. And of course \\(r\\) is defined to be positive or \\(0\\) by the division algorithm.\nSo:\n\\begin{equation} 0 \\leq a(1-qs) + b(-tq) \u0026lt;d \\end{equation}\nNow, you will note this middle thing, which is equal to \\(r\\), is itself a positive linear combination of \\(a,b\\). Furthermore, it is smaller than \\(d\\). We already have that \\(d\\) is the smallest element of \\(S\\), which means the only other value \\(r\\) can take on is \\(0\\).\nThis leads to conclude:\n\\begin{equation} a = dq + 0 \\end{equation}\nso \\(d|a\\), WLOG \\(d|b\\).\n\\(d\\) is the greatest common divisor Proof:\nLet \\(d\u0026rsquo;\\) be a common divisor of \\(a,b\\). This means there are some \\(m\u0026rsquo;, n\u0026rsquo;\\) such that:\n\\begin{align} a \u0026amp;= d\u0026rsquo; m\u0026rsquo; \\\\ b \u0026amp;= d\u0026rsquo; n' \\end{align}\nRecall that \\(d = as + bt\\). This means:\n\\begin{equation} d = as + bt = (d\u0026rsquo; m\u0026rsquo;)s + (d\u0026rsquo; n\u0026rsquo;)t = d\u0026rsquo; (m\u0026rsquo; s + n\u0026rsquo; t) \\end{equation}\nThis means that \\(d\u0026rsquo; | d\\). Now, \\(d \\in S\\), and everything in \\(S\\) is positive. Therefore, \\(d\\) must be the greatest common divisor because it is divisible (and therefore bigger in magnitude than) any \\(d\u0026rsquo;\\).\nWhich means that \\(d\\) must be the greatest common divisor\n","html":"\u003cp\u003eLet \\(a,b \\in \\mathbb{Z}\\), not both zero. \\(\\gcd (a,b)\\) is the greatest value \\(d\\) such that \\(d|a\\), \\(d|b\\).\u003c/p\u003e\n\u003ch2 id=\"greatest-common-divisor-is-a-linear-combination\"\u003egreatest common divisor is a linear combination\u003c/h2\u003e\n\u003cp\u003eWe can write \\(\\gcd (a,b) = as+bt\\) for some \\(s,t \\in \\mathbb{Z}\\).\u003c/p\u003e\n\u003cp\u003eLet us define:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS = \\{am + bn: m,n \\in \\mathbb{Z}, am+bn \u0026gt; 0\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will first check that \\(S\\) is non-empty. To do so, let \\(a\\) be negative and \\(b\\) be positive. Then, set \\(m = -1\\), \\(n = 1\\). We can see that \\(am + bn \u0026gt; 0\\), satisfying the conditions of the set. In a similar manner, we can demonstrate that regardless of the choice of \\(a, b\\), \\(S\\) is non-empty.\u003c/p\u003e\n\u003cp\u003eFurthermore, integral linear combinations are integers, so \\(S\\) is a non-empty subset of \\(\\mathbb{Z}\\).\u003c/p\u003e\n\u003cp\u003eWe can now invoke \u003ca href=\"/posts/kbhprinciple_of_induction/#well-ordering-principle\"\u003eWOP\u003c/a\u003e. There is some smallest \\(d \\in S\\). Let\u0026rsquo;s call \\(d = as +dt\\). We desire that \\(d\\) is actually \\(\\gcd (a,b)\\).\u003c/p\u003e\n\u003ch3 id=\"d-is-a-common-divisor-of-a-b\"\u003e\\(d\\) is a common divisor of \\(a,b\\)\u003c/h3\u003e\n\u003cp\u003eWLOG write some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na = dq + r\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eusing \u003ca href=\"/posts/kbhdivide/#division-algorithm\"\u003edivision algorithm\u003c/a\u003e. Because \\(d \\in S\\), we can write now:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na = (as+bt) q + r\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe desire that now \\(r = 0\\) so that we can write \\(d|a\\). We can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr = a-dq\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(notice! \\(a\\) is a linear combination of \\(a,b\\), and \\(d\\) is given to be such)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr = a-dq = (1a + 0b) - (as+bt)q = a(1-qs) + b(-tq)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that \\(r \u0026lt; d\\) because \\(r\\) is a remainder. And of course \\(r\\) is defined to be positive or \\(0\\) by the \u003ca href=\"/posts/kbhdivide/#division-algorithm\"\u003edivision algorithm\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 \\leq a(1-qs) + b(-tq) \u0026lt;d\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, you will note this middle thing, which is equal to \\(r\\), is itself a positive \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \\(a,b\\). Furthermore, it is smaller than \\(d\\). We already have that \\(d\\) is the smallest element of \\(S\\), which means the only other value \\(r\\) can take on is \\(0\\).\u003c/p\u003e\n\u003cp\u003eThis leads to conclude:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na = dq + 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso \\(d|a\\), WLOG \\(d|b\\).\u003c/p\u003e\n\u003ch3 id=\"d-is-the-greatest-common-divisor\"\u003e\\(d\\) is the greatest common divisor\u003c/h3\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eLet \\(d\u0026rsquo;\\) be a common divisor of \\(a,b\\). This means there are some \\(m\u0026rsquo;, n\u0026rsquo;\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\na \u0026amp;= d\u0026rsquo; m\u0026rsquo; \\\\\nb \u0026amp;= d\u0026rsquo; n'\n\\end{align}\u003c/p\u003e\n\u003cp\u003eRecall that \\(d = as + bt\\). This means:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nd = as + bt = (d\u0026rsquo; m\u0026rsquo;)s + (d\u0026rsquo; n\u0026rsquo;)t = d\u0026rsquo; (m\u0026rsquo; s + n\u0026rsquo; t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis means that \\(d\u0026rsquo; | d\\). Now, \\(d \\in S\\), and everything in \\(S\\) is positive. Therefore, \\(d\\) must be the \u003ca href=\"/posts/kbhgreatest_common_divisor/\"\u003egreatest common divisor\u003c/a\u003e because it is divisible (and therefore bigger in magnitude than) any \\(d\u0026rsquo;\\).\u003c/p\u003e\n\u003cp\u003eWhich means that \\(d\\) must be the \u003ca href=\"/posts/kbhgreatest_common_divisor/\"\u003egreatest common divisor\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgreatest_common_divisor/","tags":null,"title":"greatest common divisor"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhgreedy_programming/","tags":null,"title":"greedy programming"},{"categories":null,"contents":"Participate in Demo Day.\nGetting something:\nOpportunity to get partnering Networking opportunities, having access to contract manufacturing =\u0026gt; Conrad Challenge, $600 each, $1200\nUser conversation\nSpoke again with CompassionKind: wanting to get 20 units shipped out Spoke with SustainableEnergy for All: started by one of the UN reps. of an African country; wanted to have us featured on Social Media Wanted to connect Start diving into user connections Hiring requests\nFulfilling orders MechE ","html":"\u003cp\u003eParticipate in Demo Day.\u003c/p\u003e\n\u003cp\u003eGetting something:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eOpportunity to get partnering\u003c/li\u003e\n\u003cli\u003eNetworking opportunities, having access to contract manufacturing\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e=\u0026gt; Conrad Challenge, $600 each, $1200\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eUser conversation\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eSpoke again with CompassionKind: wanting to get 20 units shipped out\u003c/li\u003e\n\u003cli\u003eSpoke with SustainableEnergy for All: started by one of the UN reps. of an African country; wanted to have us featured on Social Media\u003c/li\u003e\n\u003cli\u003eWanted to connect\u003c/li\u003e\n\u003cli\u003eStart diving into user connections\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eHiring requests\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eFulfilling orders\u003c/li\u003e\n\u003cli\u003eMechE\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgreenswing_april_checkin/","tags":null,"title":"GreenSwing April Checkin"},{"categories":null,"contents":"In this experiment, an efficient and accurate network of detecting automatically disseminated (bot) content on social platforms is devised. Through the utilisation of parallel convolutional neural network (CNN) which processes variable n-grams of text 15, 20, and 25 tokens in length encoded by Byte Pair Encoding (BPE), the complexities of linguistic content on social platforms are effectively captured and analysed. With validation on two sets of previously unexposed data, the model was able to achieve an accuracy of around 96.6% and 97.4% respectively — meeting or exceeding the performance of other comparable supervised ML solutions to this problem. Through testing, it is concluded that this method of text processing and analysis proves to be an effective way of classifying potentially artificially synthesized user data — aiding the security and integrity of social platforms.\n","html":"\u003cp\u003eIn this experiment, an efficient and accurate network of detecting automatically disseminated (bot) content on social platforms is devised. Through the utilisation of parallel convolutional neural network (CNN) which processes variable n-grams of text 15, 20, and 25 tokens in length encoded by Byte Pair Encoding (BPE), the complexities of linguistic content on social platforms are effectively captured and analysed. With validation on two sets of previously unexposed data, the model was able to achieve an accuracy of around 96.6% and 97.4% respectively — meeting or exceeding the performance of other comparable supervised ML solutions to this problem. Through testing, it is concluded that this method of text processing and analysis proves to be an effective way of classifying potentially artificially synthesized user data — aiding the security and integrity of social platforms.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgregarious_abstract/","tags":null,"title":"Gregarious Abstract"},{"categories":null,"contents":"grid search is a hyperparameter tuning technique by trying pairs of all hyperparemeters sequentially\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhgrid_search/\"\u003egrid search\u003c/a\u003e is a hyperparameter tuning technique by trying pairs of all hyperparemeters sequentially\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgrid_search/","tags":null,"title":"grid search"},{"categories":null,"contents":"components a set of constituent objects an operation requirements for group closed: if \\(a,b \\in G\\), then \\(a \\cdot b \\in G\\) existence of identity: there is \\(e \\in G\\) such that \\(e\\cdot a= a\\cdot e = a\\), for all \\(a \\in G\\) existence of inverses: there is \\(b \\in G\\) for all \\(a \\in G\\) such that \\(a\\cdot b = b\\cdot a = e\\) associative: \\((a\\cdot b)\\cdot c = a\\cdot (b\\cdot c)\\) for all \\(a,b,c \\in G\\) additional information identity in group commutates with everything (which is the only commutattion in groups\nUnique identities and inverses the identity is unique in a group (similar idea as additive identity is unique in a vector space) for each \\(a \\in G\\), its inverse in unique (similar ideas as additive inverse is unique in a vector space) cancellation policies if \\(a,b,c \\in G\\), \\(ab = ac \\implies b = c\\) (left cancellation)\n\\(ba = ca \\implies b = c\\) (right cancellation)\nsock-shoes property if \\(a,b \\in G\\), then \\((ab)^{-1} = b^{-1}a^{-1}\\)\n","html":"\u003ch2 id=\"components\"\u003ecomponents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ea set of constituent \u003ca href=\"/posts/kbhobjects/\"\u003eobject\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003ean \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements-for-group\"\u003erequirements for group\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e: if \\(a,b \\in G\\), then \\(a \\cdot b \\in G\\)\u003c/li\u003e\n\u003cli\u003eexistence of \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e: there is \\(e \\in G\\) such that \\(e\\cdot a= a\\cdot e = a\\), for all \\(a \\in G\\)\u003c/li\u003e\n\u003cli\u003eexistence of \u003ca href=\"/posts/kbhinverses/\"\u003einverses\u003c/a\u003e: there is \\(b \\in G\\) for all \\(a \\in G\\) such that \\(a\\cdot b = b\\cdot a = e\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e: \\((a\\cdot b)\\cdot c = a\\cdot (b\\cdot c)\\) for all \\(a,b,c \\in G\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e in \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutates\u003c/a\u003e with everything (which is the only \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutattion\u003c/a\u003e in groups\u003c/p\u003e\n\u003ch3 id=\"unique-identities-and-inverses\"\u003eUnique identities and inverses\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ethe \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e is unique in a group (similar idea as \u003ca href=\"/posts/kbhadditive_identity_is_unique_in_a_vector_space/\"\u003eadditive identity is unique in a vector space\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003efor each \\(a \\in G\\), its inverse in unique (similar ideas as \u003ca href=\"/posts/kbhadditive_inverse_is_unique_in_a_vector_space/\"\u003eadditive inverse is unique in a vector space\u003c/a\u003e)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"cancellation-policies\"\u003ecancellation policies\u003c/h3\u003e\n\u003cp\u003eif \\(a,b,c \\in G\\), \\(ab = ac \\implies b = c\\) (\u003ca href=\"/posts/kbhgroup/\"\u003eleft cancellation\u003c/a\u003e)\u003c/p\u003e\n\u003cp\u003e\\(ba = ca \\implies b = c\\) (\u003ca href=\"/posts/kbhgroup/\"\u003eright cancellation\u003c/a\u003e)\u003c/p\u003e\n\u003ch3 id=\"sock-shoes-property\"\u003esock-shoes property\u003c/h3\u003e\n\u003cp\u003eif \\(a,b \\in G\\), then \\((ab)^{-1} = b^{-1}a^{-1}\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgroup/","tags":null,"title":"group"},{"categories":null,"contents":"Notes on MATH 109, group theory.\nLectures SU-MATH109 SEP272023 PSets These links are dead.\nSU-MATH109 Problem Set 1 Course logistics midterm: November 1st, final: December 14th, 8:30-11:30 WIM assignment: December 8th, start of class (no late submissions) PSets: 8 in total, posted on Wednesdays at 8A, due following Tuesday at 8A ","html":"\u003cp\u003eNotes on MATH 109, \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e theory.\u003c/p\u003e\n\u003ch2 id=\"lectures\"\u003eLectures\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math109_sep272023_exp/\"\u003eSU-MATH109 SEP272023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"psets\"\u003ePSets\u003c/h2\u003e\n\u003cp\u003eThese links are dead.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math109_problem_set_1/\"\u003eSU-MATH109 Problem Set 1\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"course-logistics\"\u003eCourse logistics\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003emidterm: November 1st, final: December 14th, 8:30-11:30\u003c/li\u003e\n\u003cli\u003eWIM assignment: December 8th, start of class (no late submissions)\u003c/li\u003e\n\u003cli\u003ePSets: 8 in total, posted on Wednesdays at 8A, due following Tuesday at 8A\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgroup_theory_index/","tags":["index"],"title":"Group Theory Index"},{"categories":null,"contents":"\u0026ldquo;Stuffing some stuff into buckets\u0026rdquo;\nHow many ways are there to sort \\(n\\) distinct objects to \\(r\\) buckets?\n\\begin{equation} r^{n} \\end{equation}\ngrouping with entirely indistinct objects You can simply reframe the grouping problem as permutation of the objects with \\(r-1\\) dividers along with your old \\(n\\) objects.\ni.e.: sort this thing \u0026mdash;\nSo:\n\\begin{equation} \\frac{(n+r-1)!}{n! (r-1)!} \\end{equation}\n","html":"\u003cp\u003e\u0026ldquo;Stuffing some stuff into buckets\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eHow many ways are there to sort \\(n\\) distinct objects to \\(r\\) buckets?\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr^{n}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"grouping-with-entirely-indistinct-objects\"\u003egrouping with entirely indistinct objects\u003c/h2\u003e\n\u003cp\u003eYou can simply reframe the \u003ca href=\"/posts/kbhgrouping/\"\u003egrouping\u003c/a\u003e problem as \u003ca href=\"/posts/kbhpermutation/\"\u003epermutation\u003c/a\u003e of the objects with \\(r-1\\) dividers along with your old \\(n\\) objects.\u003c/p\u003e\n\u003cp\u003ei.e.: sort this thing \u0026mdash;\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-09-29_16-43-08_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{(n+r-1)!}{n! (r-1)!}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgrouping/","tags":null,"title":"grouping"},{"categories":null,"contents":"The Guilded Age is a period in history between 1877 and 1900. This period deepened divide in racism, deepened the split between poor and rich, and the fluidity of American social classes became more set in this time.\nLinks to Organize Imperialism New American South Why is the \u0026ldquo;Guilded Age\u0026rdquo; \u0026ldquo;Guilded\u0026rdquo;? Guilded: Outside Lined with Gold, Inside Contains Metal and is Less Valuable.\nThe Guilded Age consists of three different sections:\nBusiness (Top!) Labour Government Contributors to the Guilded Age There are three pieces\n\u0026ldquo;Homestead Act\u0026rdquo;: legal way to give people land in the west \u0026ldquo;National Banking Act\u0026rdquo;: unified a uniform economic system, and to connect markets \u0026ldquo;Pacific Railroad Act\u0026rdquo;: expansion of connection; also formed the first \u0026ldquo;Corporations\u0026rdquo; based on railroad organization structures. Issues of the Guilded Age Immigration \u0026ldquo;They are coming to take our jobs!\u0026rdquo; (Irish Edition.)\nUSCIS processed people in Ellis (Irish processing, took about 2 days to process) and Angel (Chinese processing, took about 6 months to process) islands: beginning having racial immigrant discrimination.\nUrbanization Populations in the United States tripled in about 50 years. Immigrants were stuffed into Tennaments. The Guilded age saw the beginning of skyscrapers.\nSocial Activism Because of the issues began during the Guilded Age, more people essentially stepped instead of the governement to play a role in supporting welfare.\nIndustrialization \u0026ldquo;Pulling yourself up by your bootstraps.\u0026rdquo; Steel is invented. All of the technology has been moved to provide maximum output; this comes, of course, at an environmental cost. Large unions are starting to take off. Railroad Strike: federal troops fired upon railroad workers; argued the case for union but also for corp. influence on politics.\nPolitics Democrats: racist, states rights, limited federal government. Republicans: supported businesses, immigrations. Yes, they are still flipped.\nBut either way, democracy was rampant: 80% turnout! This is, however, one of the most corrupt time in American politics. The party system: local political bosses would provide favours for a vote \u0026mdash; in an absence of welfare, in exchange for a vote, wolud provide protection and social welfare. At this time, there was mass lack of reliance.\nCulture Victorianism! Proper manners, conservatism, etc. Bikes and contraceptives! There is also a fear that \u0026ldquo;manliness was declining\u0026rdquo;: that no more farming means need for more sports, body building, etc. Also, \u0026ldquo;name brands\u0026rdquo;, \u0026ldquo;sears catalogue\u0026rdquo;, and consumerism is taking hold.\nCorportization Corporations, as an idea, took hold. That the owners of a group is seperated by management, it allows the expansion of the size of companies. Monopolies in industries run while: concentrated wealth in addition to corrupted politics.\nTaylorism: Taylor decided to make a shovel for each type of movement \u0026mdash; which makes people repeat the same task over again but increased efficiency. \u0026ldquo;Taylor-made\u0026rdquo; comes from this.\nOmaha Platform Expanding Credit Bracketed income tax Social reforms Lays the groundwork for the progressive moment. This was a socialist movement!\nThe West Transcontinental railroad: power over towns and concessions Rise of Cowboys and \u0026ldquo;cattle bonanza\u0026rdquo; Prairies settled with new farming equipment and new Russian wheat strands: \u0026ldquo;Americanlization\u0026rdquo; The \u0026ldquo;turner thesis\u0026rdquo;: American democracy is formed at the frontier. However, Western Expansion is actually much of a tragedy, and this is actually leads to Imperialism.\nIndian Removal Policy of Indian removal to force into treaties + reservation Sioux Wars (crazy horse, etc.): Native American resistance Native Americans of California extreme violence; as well as slave labour Dawes Act of 1887 and forced \u0026ldquo;assimilation\u0026rdquo;: forced the breakup of many reservations Guilded Age Commentary Historians Rebekah Edwards The late 19th century was not entirely laissez faire \u0026ldquo;Progressive Era\u0026rdquo;: not always progressive Issues that lead to the \u0026ldquo;Guilded age\u0026rdquo; name that was not specific to the Guilded age \u0026ldquo;Guilded age\u0026rdquo;: \u0026ldquo;eh, nothing else to deal with, so let\u0026rsquo;s deal with racism!\u0026rdquo;\nRichard John Guilded age was a period of rapid industrialization Very charactured, unequal + vulgar time The resulting changes are very concentrated; all of the changes that are 80 years apart This is super disconnected to social, political aspects of life. It doesn\u0026rsquo;t talk about how the economy effects the social standings and ladders that people lived in =\u0026gt; that movement comes from a lot of social change.\nMade a point about the positive/negatives effects of the guilded age: don\u0026rsquo;t focus the individuals but instead the structures.\nHe did not want the \u0026ldquo;progressive era\u0026rdquo; as a classification in line with the guilded age. \u0026ldquo;Guilded age\u0026rdquo; is the only pejorative term for an era: so one negative description does not do it justice.\nRichard Benzel Richard Benzel claims that the textbook industry primes people; that a title for an age shoehorns the age and changes the reflection of the reader.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhguilded_age/\"\u003eGuilded Age\u003c/a\u003e is a period in history between 1877 and 1900. This period deepened divide in racism, deepened the split between poor and rich, and the fluidity of American social classes became more set in this time.\u003c/p\u003e\n\u003ch2 id=\"links-to-organize\"\u003eLinks to Organize\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhimperialism/\"\u003eImperialism\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnew_american_south/\"\u003eNew American South\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"why-is-the-guilded-age--kbhguilded-age-dot-md--guilded\"\u003eWhy is the \u0026ldquo;\u003ca href=\"/posts/kbhguilded_age/\"\u003eGuilded Age\u003c/a\u003e\u0026rdquo; \u0026ldquo;Guilded\u0026rdquo;?\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#why-is-the-guilded-age--kbhguilded-age-dot-md--guilded\"\u003eGuilded\u003c/a\u003e: Outside Lined with Gold, Inside Contains Metal and is Less Valuable.\u003c/p\u003e\n\u003cp\u003eThe Guilded Age consists of three different sections:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eBusiness (Top!)\u003c/li\u003e\n\u003cli\u003eLabour\u003c/li\u003e\n\u003cli\u003eGovernment\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"contributors-to-the-guilded-age--kbhguilded-age-dot-md\"\u003eContributors to the \u003ca href=\"/posts/kbhguilded_age/\"\u003eGuilded Age\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eThere are three pieces\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;\u003ca href=\"/posts/kbhhomestead_act/\"\u003eHomestead Act\u003c/a\u003e\u0026rdquo;: legal way to give people land in the west\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;\u003ca href=\"/posts/kbhnational_banking_act/\"\u003eNational Banking Act\u003c/a\u003e\u0026rdquo;: unified a uniform economic system, and to connect markets\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;\u003ca href=\"/posts/kbhpacific_railroad_act/\"\u003ePacific Railroad Act\u003c/a\u003e\u0026rdquo;: expansion of connection; also formed the first \u0026ldquo;Corporations\u0026rdquo; based on railroad organization structures.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"issues-of-the-guilded-age\"\u003eIssues of the Guilded Age\u003c/h2\u003e\n\u003ch3 id=\"immigration\"\u003eImmigration\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;They are coming to take our jobs!\u0026rdquo; (Irish Edition.)\u003c/p\u003e\n\u003cp\u003eUSCIS processed people in Ellis (Irish processing, took about 2 days to process) and Angel (Chinese processing, took about 6 \u003cem\u003emonths\u003c/em\u003e to process) islands: beginning having racial immigrant discrimination.\u003c/p\u003e\n\u003ch3 id=\"urbanization\"\u003eUrbanization\u003c/h3\u003e\n\u003cp\u003ePopulations in the United States tripled in about 50 years. Immigrants were stuffed into Tennaments. The Guilded age saw the beginning of skyscrapers.\u003c/p\u003e\n\u003ch3 id=\"social-activism\"\u003eSocial Activism\u003c/h3\u003e\n\u003cp\u003eBecause of the issues began during the Guilded Age, more people essentially stepped instead of the governement to play a role in supporting welfare.\u003c/p\u003e\n\u003ch3 id=\"industrialization\"\u003eIndustrialization\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;Pulling yourself up by your bootstraps.\u0026rdquo; \u003cem\u003eSteel\u003c/em\u003e is invented. All of the technology has been moved to provide maximum output; this comes, of course, at an environmental cost. Large unions are starting to take off. Railroad Strike: federal troops fired upon railroad workers; argued the case for union but also for corp. influence on politics.\u003c/p\u003e\n\u003ch3 id=\"politics\"\u003ePolitics\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eDemocrats: racist, states rights, limited federal government.\u003c/li\u003e\n\u003cli\u003eRepublicans: supported businesses, immigrations.\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eYes, they are still flipped.\u003c/p\u003e\n\u003cp\u003eBut either way, democracy was rampant: 80% turnout! This is, however, one of the most corrupt time in American politics. The party system: local political bosses would provide favours for a vote \u0026mdash; in an absence of welfare, in exchange for a vote, wolud provide protection and social welfare. At this time, there was mass lack of reliance.\u003c/p\u003e\n\u003ch3 id=\"culture\"\u003eCulture\u003c/h3\u003e\n\u003cp\u003eVictorianism! Proper manners, conservatism, etc. Bikes and contraceptives! There is also a fear that \u0026ldquo;manliness was declining\u0026rdquo;: that no more farming means need for more sports, body building, etc. Also, \u0026ldquo;name brands\u0026rdquo;, \u0026ldquo;sears catalogue\u0026rdquo;, and consumerism is taking hold.\u003c/p\u003e\n\u003ch3 id=\"corportization\"\u003eCorportization\u003c/h3\u003e\n\u003cp\u003eCorporations, as an idea, took hold. That the owners of a group is seperated by management, it allows the expansion of the size of companies. Monopolies in industries run while: concentrated wealth in addition to corrupted politics.\u003c/p\u003e\n\u003cp\u003eTaylorism: Taylor decided to make a shovel for each type of movement \u0026mdash; which makes people repeat the same task over again but increased efficiency. \u0026ldquo;Taylor-made\u0026rdquo; comes from this.\u003c/p\u003e\n\u003ch3 id=\"omaha-platform\"\u003eOmaha Platform\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eExpanding Credit\u003c/li\u003e\n\u003cli\u003eBracketed income tax\u003c/li\u003e\n\u003cli\u003eSocial reforms\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eLays the groundwork for the progressive moment. This was a socialist movement!\u003c/p\u003e\n\u003ch2 id=\"the-west\"\u003eThe West\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eTranscontinental railroad: power over towns and concessions\u003c/li\u003e\n\u003cli\u003eRise of Cowboys and \u0026ldquo;cattle bonanza\u0026rdquo;\u003c/li\u003e\n\u003cli\u003ePrairies settled with new farming equipment and new Russian wheat strands: \u0026ldquo;Americanlization\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe \u0026ldquo;turner thesis\u0026rdquo;: American democracy is formed at the frontier. However, Western Expansion is actually much of a tragedy, and this is actually leads to Imperialism.\u003c/p\u003e\n\u003ch3 id=\"indian-removal\"\u003eIndian Removal\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ePolicy of Indian removal to force into treaties + reservation\u003c/li\u003e\n\u003cli\u003eSioux Wars (crazy horse, etc.): Native American resistance\u003c/li\u003e\n\u003cli\u003eNative Americans of California extreme violence; as well as slave labour\u003c/li\u003e\n\u003cli\u003eDawes Act of 1887 and forced \u0026ldquo;assimilation\u0026rdquo;: forced the breakup of many reservations\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"guilded-age-commentary-historians\"\u003eGuilded Age Commentary Historians\u003c/h2\u003e\n\u003ch3 id=\"rebekah-edwards\"\u003eRebekah Edwards\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eThe late 19th century was not entirely \u003cem\u003elaissez faire\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Progressive Era\u0026rdquo;: not always progressive\u003c/li\u003e\n\u003cli\u003eIssues that lead to the \u0026ldquo;Guilded age\u0026rdquo; name that was not specific to the Guilded age\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;Guilded age\u0026rdquo;: \u0026ldquo;eh, nothing else to deal with, so let\u0026rsquo;s deal with racism!\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"richard-john\"\u003eRichard John\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eGuilded age was a period of rapid industrialization\u003c/li\u003e\n\u003cli\u003eVery charactured, unequal + vulgar time\u003c/li\u003e\n\u003cli\u003eThe resulting changes are very concentrated; all of the changes that are 80 years apart\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThis is super disconnected to social, political aspects of life. It doesn\u0026rsquo;t talk about how the economy effects the social standings and ladders that people lived in =\u0026gt; that movement comes from a lot of social change.\u003c/p\u003e\n\u003cp\u003eMade a point about the positive/negatives effects of the guilded age: don\u0026rsquo;t focus the individuals but instead the structures.\u003c/p\u003e\n\u003cp\u003eHe did not want the \u0026ldquo;progressive era\u0026rdquo; as a classification in line with the guilded age. \u0026ldquo;Guilded age\u0026rdquo; is the only pejorative term for an era: so one negative description does not do it justice.\u003c/p\u003e\n\u003ch3 id=\"richard-benzel\"\u003eRichard Benzel\u003c/h3\u003e\n\u003cp\u003eRichard Benzel claims that the textbook industry primes people; that a title for an age shoehorns the age and changes the reflection of the reader.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhguilded_age/","tags":null,"title":"Guilded Age"},{"categories":null,"contents":"One-Liner UAV navigation through leveraging updrafts, handling their unpredictability with POMDPs and Receeding Horizon.\nNovelty Developed new method for low-cost POMDP online solving Cool bird. Notable Methods two main steps explore: determine thermal parameters exploit: plan a trajectory to exploit the thermal formulation \\(\\mathcal{S}\\): \\(s^{u} \\in \\mathbb{R}^{6}\\), the joint state of the UAV (2D location wrt fixed point + air speech + heading, bank, roll, altitude), and \\(s^{th} \\in \\mathbb{R}^{2}\\),the thermal status (thermal center x and y relative to UAV) \\(\\mathcal{A}\\): discretized arc trajectory segments by bank angles \\(\\phi_{1 \\dots n}\\), which executes for a fixed \\(T_{A}\\) seconds \\(\\mathcal{T}\\): Gaussian of \\(s^{u}\\) over the dynamics of the UAV, and over fixed noise covariance \\(Q\\) \\(\\mathcal{R}\\): \\(h_{s\u0026rsquo;}-h_{s}\\), the change in altitude\u0026hellip;. \\(\\mathcal{O}\\): senor readings \\(O(a, s\u0026rsquo;, o)\\): fixed noise covariance \\(R\\) \\(b_0\\): product of two Gaussian of the UAV\u0026rsquo;s position and the belief about the underlying thermals \\(update(b,a,o)\\): EKF modeling assumptions:\nthermal consistency: the world model change frequency less than control thermal stationarity: thermal doesn\u0026rsquo;t move against surrounding air no pitch angle control: reward hacking may happen no turbulence: thermal doesn\u0026rsquo;t sang horizontal displacements POMDSoar The exact solution to the POMDP as proposed makes aggressive decisions in order to simplify costs to run on a PixHawk.\nWe need to explicitly build in a exploration/exploitation tradeoff.\nKey Figs comparison against ardusoar: EKF + just circling\nArduPilot\u0026rsquo;s implementation is worse\nNew Concepts POMDSoar, the soring mechanism\nNotes ","html":"\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eUAV navigation through leveraging updrafts, handling their unpredictability with \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es and \u003ca href=\"/posts/kbhreceeding_horizon/\"\u003eReceeding Horizon\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDeveloped new method for low-cost POMDP online solving\u003c/li\u003e\n\u003cli\u003eCool bird.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003ch3 id=\"two-main-steps\"\u003etwo main steps\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eexplore: determine thermal parameters\u003c/li\u003e\n\u003cli\u003eexploit: plan a trajectory to exploit the thermal\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"formulation\"\u003eformulation\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\mathcal{S}\\): \\(s^{u} \\in \\mathbb{R}^{6}\\), the joint state of the UAV (2D location wrt fixed point + air speech + heading, bank, roll, altitude), and \\(s^{th} \\in \\mathbb{R}^{2}\\),the thermal status (thermal center x and y relative to UAV)\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{A}\\): discretized arc trajectory segments by bank angles \\(\\phi_{1 \\dots n}\\), which executes for a fixed \\(T_{A}\\) seconds\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{T}\\): Gaussian of \\(s^{u}\\) over the dynamics of the UAV, and over fixed noise covariance \\(Q\\)\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{R}\\): \\(h_{s\u0026rsquo;}-h_{s}\\), the change in altitude\u0026hellip;.\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{O}\\): senor readings\u003c/li\u003e\n\u003cli\u003e\\(O(a, s\u0026rsquo;, o)\\): fixed noise covariance \\(R\\)\u003c/li\u003e\n\u003cli\u003e\\(b_0\\): product of two Gaussian of the UAV\u0026rsquo;s position and the belief about the underlying thermals\u003c/li\u003e\n\u003cli\u003e\\(update(b,a,o)\\): \u003ca href=\"/posts/kbhfilters/#extended\"\u003eEKF\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003emodeling assumptions:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003ethermal consistency\u003c/strong\u003e: the world model change frequency less than control\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ethermal stationarity\u003c/strong\u003e: thermal doesn\u0026rsquo;t move against surrounding air\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eno pitch angle control\u003c/strong\u003e: reward hacking may happen\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eno turbulence\u003c/strong\u003e: thermal doesn\u0026rsquo;t sang horizontal displacements\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"pomdsoar\"\u003ePOMDSoar\u003c/h3\u003e\n\u003cp\u003eThe exact solution to the \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e as proposed makes aggressive decisions in order to simplify costs to run on a PixHawk.\u003c/p\u003e\n\u003cp\u003eWe need to explicitly build in a exploration/exploitation tradeoff.\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-11_09-49-54_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ecomparison against ardusoar: \u003ca href=\"/posts/kbhfilters/#extended\"\u003eEKF\u003c/a\u003e + just circling\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-09_12-12-55_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eArduPilot\u0026rsquo;s implementation is worse\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#pomdsoar\"\u003ePOMDSoar\u003c/a\u003e, the soring mechanism\u003c/p\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhkolobov_2018/","tags":null,"title":"Guilliard 2018"},{"categories":null,"contents":"DOI: 10.3389/fcomp.2021.642517\nOne-Liner Used WLS data to augment CTP from ADReSS Challenge and trained it on a BERT with good results.\nNovelty Used WLS data with CTP task to augment ADReSS DementiaBank data Notable Methods WLS data is not labeled, so authors used Semantic Verbal Fluency tests that come with WLS to make a presumed conservative diagnoses. Therefore, control data is more interesting:\nKey Figs Table 2 Data-aug of ADReSS Challenge data with WSL controls (no presumed AD) trained with a BERT. As expected the conservative control data results in better ferf\nNew Concepts ADReSS Challenge is small so use WLS to augment it ","html":"\u003cp\u003eDOI: 10.3389/fcomp.2021.642517\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eUsed WLS data to augment \u003ca href=\"/posts/kbhctp/\"\u003eCTP\u003c/a\u003e from \u003ca href=\"/posts/kbhadress_challenge/\"\u003eADReSS Challenge\u003c/a\u003e and trained it on a BERT with good results.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eUsed WLS data with \u003ca href=\"/posts/kbhctp/\"\u003eCTP\u003c/a\u003e task to augment ADReSS \u003ca href=\"/posts/kbhdementiabank/\"\u003eDementiaBank\u003c/a\u003e data\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cp\u003eWLS data is not labeled, so authors used \u003ca href=\"/posts/kbhsemantic_verbal_fluency/\"\u003eSemantic Verbal Fluency\u003c/a\u003e tests that come with WLS to make a presumed conservative diagnoses. Therefore, control data is more interesting:\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"table-2\"\u003eTable 2\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_11-27-14_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eData-aug of \u003ca href=\"/posts/kbhadress_challenge/\"\u003eADReSS Challenge\u003c/a\u003e data with WSL controls (no presumed AD) trained with a BERT. As expected the conservative control data results in better ferf\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadress_challenge/\"\u003eADReSS Challenge\u003c/a\u003e is small so use WLS to augment it\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhguo_2021/","tags":["ntj"],"title":"Guo 2021"},{"categories":null,"contents":"GUS is the a architecture of frame based Dialogue Systems; this is sometimes called a domain ontology.\nGeneral principle: try to fill as many user slots in the frame as possible that the user specifies, if the frame is filled, do action and report result.\nYou maybe working in multi-frame systems, then in which case some slots in one frame may help inform or fill those in another frame.\nGUS uses regular expressions/grammar rules to perform all of its tasks. Generating responses are usually completely templated.\ntradeoffs high precision low recall maybe hard to write three tasks domain classification: which frames to activate? intent determination: which tasks to activate once frame is filled? slot filling: fill frame we can actually consider this as one giant frame:\nframe frame is a structure which is used to store information about an interaction.\nSlot Type Question origin city \u0026ldquo;\u0026hellip;.?\u0026rdquo; \u0026hellip; \u0026hellip; \u0026hellip; which, throughout the interaction, is filled out by asking the questions.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhgus/\"\u003eGUS\u003c/a\u003e is the a architecture of \u003ca href=\"#frame\"\u003eframe\u003c/a\u003e based \u003ca href=\"/posts/kbhchatbot/\"\u003eDialogue Systems\u003c/a\u003e; this is sometimes called a \u003cstrong\u003edomain ontology\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eGeneral principle: try to fill as many user slots in the frame as possible that the user specifies, if the frame is filled, do action and report result.\u003c/p\u003e\n\u003cp\u003eYou maybe working in multi-frame systems, then in which case some slots in one frame may help inform or fill those in another frame.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhgus/\"\u003eGUS\u003c/a\u003e uses regular expressions/grammar rules to perform all of its tasks. Generating responses are usually completely templated.\u003c/p\u003e\n\u003ch2 id=\"tradeoffs\"\u003etradeoffs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003ehigh precision\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003elow recall\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003emaybe hard to write\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"three-tasks\"\u003ethree tasks\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003edomain classification\u003c/strong\u003e: which frames to activate?\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eintent determination\u003c/strong\u003e: which tasks to activate once frame is filled?\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eslot filling\u003c/strong\u003e: fill frame\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewe can actually consider this as one giant frame:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-01_09-47-15_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"frame\"\u003eframe\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#frame\"\u003eframe\u003c/a\u003e is a structure which is used to store information about an interaction.\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eSlot\u003c/th\u003e\n\u003cth\u003eType\u003c/th\u003e\n\u003cth\u003eQuestion\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eorigin\u003c/td\u003e\n\u003ctd\u003ecity\u003c/td\u003e\n\u003ctd\u003e\u0026ldquo;\u0026hellip;.?\u0026rdquo;\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u0026hellip;\u003c/td\u003e\n\u003ctd\u003e\u0026hellip;\u003c/td\u003e\n\u003ctd\u003e\u0026hellip;\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003ewhich, throughout the interaction, is filled out by asking the questions.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhgus/","tags":null,"title":"GUS"},{"categories":null,"contents":"Gut bacteria are both adversly affected by 5-Fluoropyrimidine, and but they mtaybe able to inactivate synthesized Fluoropyrimidine.\nPreTA in E. Coli is an example of a bacterial that can do this. See implications of PreTA deactivating Fluoropyrimidine.\n","html":"\u003cp\u003eGut bacteria are both adversly affected by 5-\u003ca href=\"\"\u003eFluoropyrimidine\u003c/a\u003e, and but they mtaybe able to inactivate synthesized \u003ca href=\"\"\u003eFluoropyrimidine\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"\"\u003ePreTA\u003c/a\u003e in \u003ca href=\"/posts/kbhe_coli/\"\u003eE. Coli\u003c/a\u003e is an example of a bacterial that can do this. See \u003ca href=\"\"\u003eimplications of PreTA deactivating Fluoropyrimidine\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbh5_fluoropyrimidine_maybe_inactivated_by_gut_microbiome/","tags":null,"title":"gut microbiome deactivating Fluoropyrimidine"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhh4/","tags":null,"title":"H4"},{"categories":null,"contents":"controller POMDP policies with FST. Previous papers had exponential blowups.\nSuccessor function is deterministic.\npolicy iteration Use FST as policy representation:\ndeterministic controller POMDP evaluation for all \\((a,o,x)\\), add a now node x\u0026rsquo; and evaluate them to see if its needed then, we perform pruning everything that\u0026rsquo;s dominated (i.e. \\(U(x,s) \u0026lt; U(x\u0026rsquo;, s) \\forall s\\). i.e. we want to prune everything for which the expected utility of being in node \\(x\u0026rsquo;\\) dominates the expected utility of \\(x\\) for all \\(x\\). prune new nodes that are duplicates in terms of action and transitions When you are done, extract the policy: find the node that maximizes your\nheuristic search Optimize value function ran starting at the starting belief state, not for all states. Add nodes only when improvement is seen starting at the beginning.\ndeterministic controller POMDP evaluation Recall that controllers are defined over belief-states, and, unlike finite state controller evaluation, the transitions are not distributions; so, we have:\n\\begin{equation} U(x,s) = R(s,a(x)) + \\gamma \\sum_{s\u0026rsquo;}^{}T(s\u0026rsquo;|s,a(x)) \\sum_{o}^{} O(o|s\u0026rsquo;, a(x)) U(x\u0026rsquo;(x,a,o), s\u0026rsquo;) \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcontroller/\"\u003econtroller\u003c/a\u003e \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e policies with FST. Previous papers had exponential blowups.\u003c/p\u003e\n\u003cp\u003eSuccessor function is \u003cstrong\u003edeterministic\u003c/strong\u003e.\u003c/p\u003e\n\u003ch2 id=\"policy-iteration--kbhpolicy-iteration-dot-md\"\u003e\u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eUse FST as policy representation:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"#deterministic-controller-pomdp-evaluation\"\u003edeterministic controller POMDP evaluation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003efor all \\((a,o,x)\\), add a now node x\u0026rsquo; and evaluate them to see if its needed\u003c/li\u003e\n\u003cli\u003ethen, we perform pruning\n\u003cul\u003e\n\u003cli\u003eeverything that\u0026rsquo;s dominated (i.e. \\(U(x,s) \u0026lt; U(x\u0026rsquo;, s) \\forall s\\). i.e. we want to prune everything for which the expected utility of being in node \\(x\u0026rsquo;\\) dominates the expected utility of \\(x\\) for all \\(x\\).\u003c/li\u003e\n\u003cli\u003eprune new nodes that are duplicates in terms of action and transitions\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWhen you are done, extract the policy: find the node that maximizes your\u003c/p\u003e\n\u003ch2 id=\"heuristic-search\"\u003eheuristic search\u003c/h2\u003e\n\u003cp\u003eOptimize value function ran starting at the starting belief state, not for all states. Add nodes only when improvement is seen starting at the beginning.\u003c/p\u003e\n\u003ch2 id=\"deterministic-controller-pomdp-evaluation\"\u003edeterministic controller POMDP evaluation\u003c/h2\u003e\n\u003cp\u003eRecall that controllers are defined over belief-states, and, unlike \u003ca href=\"/posts/kbhcontroller/#finite-state-controller-evaluation\"\u003efinite state controller evaluation\u003c/a\u003e, the transitions are not distributions; so, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(x,s) = R(s,a(x)) + \\gamma \\sum_{s\u0026rsquo;}^{}T(s\u0026rsquo;|s,a(x)) \\sum_{o}^{} O(o|s\u0026rsquo;, a(x)) U(x\u0026rsquo;(x,a,o), s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhansen/","tags":null,"title":"Hansen"},{"categories":null,"contents":"Haplmmune is a antibody platform technology developed by Akiko Koide (NYU) specific towards ?\n","html":"\u003cp\u003e\u003ca href=\"\"\u003eHaplmmune\u003c/a\u003e is a antibody platform technology developed by Akiko Koide (NYU) specific towards ?\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhaplmmune/","tags":null,"title":"Haplmmune"},{"categories":null,"contents":"Harmonic Mean is the inverse of the inverse sum of arithmetic means, weighted.\nIt is near the lower of the two values, instead of the middle: meaning that in incentives both things being meaned to be higher. Hence why we use F measure for things.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhharmonic_mean/\"\u003eHarmonic Mean\u003c/a\u003e is the inverse of the inverse sum of arithmetic means, weighted.\u003c/p\u003e\n\u003cp\u003eIt is near the lower of the two values, instead of the middle: meaning that in incentives both things being meaned to be higher. Hence why we use F measure for things.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhharmonic_mean/","tags":null,"title":"harmonic mean"},{"categories":null,"contents":"Representational Harms System\u0026rsquo;s representation demeans a social group because it learns about built-in biaes of data\nHarms of Censorship Speech that mention minority group gets sensored because they mention minority groups.\nPerformance Disparities For instance, works worse on AAVE. Lack of data, labels, etc.\n","html":"\u003ch2 id=\"representational-harms\"\u003eRepresentational Harms\u003c/h2\u003e\n\u003cp\u003eSystem\u0026rsquo;s representation demeans a social group because it learns about built-in biaes of data\u003c/p\u003e\n\u003ch2 id=\"harms-of-censorship\"\u003eHarms of Censorship\u003c/h2\u003e\n\u003cp\u003eSpeech that mention minority group gets sensored because they mention minority groups.\u003c/p\u003e\n\u003ch2 id=\"performance-disparities\"\u003ePerformance Disparities\u003c/h2\u003e\n\u003cp\u003eFor instance, works worse on AAVE. Lack of data, labels, etc.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhharms_in_classification/","tags":null,"title":"Harms in Classification"},{"categories":null,"contents":"The heap is a self-managed area of the memory.\nmalloc void *malloc(size_t size); You should pass in the number of bytes; therefore, we need to pass in the number of bytes through something like malloc(sizeof(int)*len). The memory is not cleared out.\ncalloc void *calloc(size_t nmemb, size_t size); Put the number of elements into nmemb, and the size of them into size. Stamp zeros throughout.\nstrdup Deep copy a string. strlen, malloc, strcpy, retrun.\nfree void free(void *ptr); Frees whatever the pointer points to. The pointer itself (a stack variable), is not deleted and still points to the freed memory.\nrealloc void *realloc(void *ptr, size_t size); Changes the memory that ptr points to to size size. If there\u0026rsquo;s not enough space, realloc moves the memory content and frees the old one.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhheap/\"\u003eheap\u003c/a\u003e is a self-managed area of the \u003ca href=\"/posts/kbhmemory/\"\u003ememory\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"malloc\"\u003emalloc\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003emalloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou should pass in the number of \u003ca href=\"/posts/kbhbinary_number_system/#byte\"\u003ebyte\u003c/a\u003es; therefore, we need to pass in the number of bytes through something like \u003ccode\u003emalloc(sizeof(int)*len)\u003c/code\u003e. The memory is \u003cem\u003enot\u003c/em\u003e cleared out.\u003c/p\u003e\n\u003ch2 id=\"calloc\"\u003ecalloc\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ecalloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enmemb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ePut the \u003ca href=\"/posts/kbhnumber/\"\u003enumber\u003c/a\u003e of elements into \u003ccode\u003enmemb\u003c/code\u003e, and the size of them into \u003ccode\u003esize\u003c/code\u003e. Stamp zeros throughout.\u003c/p\u003e\n\u003ch2 id=\"strdup\"\u003estrdup\u003c/h2\u003e\n\u003cp\u003eDeep copy a string. \u003ccode\u003estrlen\u003c/code\u003e, \u003ccode\u003emalloc\u003c/code\u003e, \u003ccode\u003estrcpy\u003c/code\u003e, retrun.\u003c/p\u003e\n\u003ch2 id=\"free\"\u003efree\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003efree\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eptr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFrees whatever the pointer points to. The \u003ca href=\"/posts/kbhpointer/\"\u003epointer\u003c/a\u003e itself (a \u003ca href=\"/posts/kbhstack/\"\u003estack\u003c/a\u003e variable), is not deleted and still points to the freed memory.\u003c/p\u003e\n\u003ch2 id=\"realloc\"\u003erealloc\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003erealloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eptr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eChanges the \u003ca href=\"/posts/kbhmemory/\"\u003ememory\u003c/a\u003e that \u003ccode\u003eptr\u003c/code\u003e points to to size \u003ccode\u003esize\u003c/code\u003e. If there\u0026rsquo;s not enough space, \u003ccode\u003erealloc\u003c/code\u003e moves the memory content and frees the old one.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhheap/","tags":null,"title":"heap"},{"categories":null,"contents":"Upon initialization, a large contiguous block of memory is initialized as a whole and called the \u0026ldquo;heap\u0026rdquo;. If we run out of it, we double the amount of memory being allocated.\nhandling arbitrary requests of mallocs/realloc and frees keep track of what\u0026rsquo;s been allocated and what\u0026rsquo;s free decide which segment of memory to use when fulfilling an allocating request respond quickly Return addresses that are 8-byte aligned (native types must be stored at a memory location which is a multiple of its size; otherwise bus error) Two main goals:\nmaximize throughput: we want to make number of requests per unit of time large (\u0026ldquo;we want the largest address in use to be as low as possible\u0026rdquo;) maximize utilization: we want to use memory economically without fragmenting it These two goals seems to be conflicting: it may take longer to plan out heap memory use for each request if we want to have perfect.\nDesign Questions how do we keep track of blocks that are freed\nhow do we choose which free block to satisfy an allocation request\nafter we choose a free block, how do we deal with the excess\ncan we avoid searching all blocks for the free blocks to reuse?\ncan we merge adjacent free blocks to keep large space available?\nCan we avoid always coping/moving data?\nBump Allocator Silliest Heap allocator. You maintain a pointer that\u0026rsquo;s the root of the memory being used, and each time you get memory we bump that pointer forward. Free does nothing.\nMaximum throughput (you like, just allocate heap, and free is very easy), but bad utilization.\nImplicit Free List Allocator In this implementation, the block structure implies what has been freed. We used to store this into a global data structure, but that\u0026rsquo;s bad because there is too much memory overhead. Instead, we place a 8-byte \u0026ldquo;header\u0026rdquo; in front of each block of memory containing whether its free or in use + its payload size. Through reading all the headers, we essentially maintain an implicit list of free nodes.\nNow, the 8 byte system for memory + free status doesn\u0026rsquo;t sound right. Recall memory addresses themselves are 8-bytes; however, all of our memory is 8-byte aligned. So, the first three bits should be 0.\nTherefore, we pack free status in the firs tbit, ignore the next two, do store the memory in the rest\n\u0026ldquo;which one do you alloc\u0026rdquo; First fit: start from the beginning, and search for the first free block you come across to serve the request Next fit: continuing search starting at the end point of your last malloc until you get the first free block, when you hit the end, go back around Best fit: examine every free block and find the one with the smallest size that fits Best fit minimizes fragmentation; next fit optimizes speed\nedge case if you run out of space in the end, with an awkward 8 byte in the end, you can either make a 0-byte block or just give the last bit of memory to the previous one.\nExplicit Free List Allocator Can we design an allocator to jump between free blocks. Naively doing this is bad.\nInstead, we can constrain each block to be at least size 16. And then, we will put the pointers to the prev/next free nodes in the next two 8-byte payload.\nFinally, we will keep track of a head node as a global variable\nMemory Coalescing During frees, we should try to eat the adjacent right free memory to create one large free block in order to coalescing free blocks together\nDuring realloc, there are three conditions by which you can retrun the same address:\nsize is growing, there\u0026rsquo;s free space to the right size is growing, but we added padding so we can use that size is shrinking (we have to ensure that we have at least 16 bytes in the shrink space, which means we need to be shrink by at least 24 bytes to actually do any shrinking) Memory providing rules at least 16 bytes (only if Explicit Free List Allocator) has to be multiple of 8 Explicit allocator Requirements must have headers to track information in implicit must have an explicit free list managed as a doubly linked list using the first 16 bytes of the free block must have a malloc implementation that searches the free block must coallesce the immediate right free blocks must do in-place realloc when possible; even if its not possible, we should still absorb adjacent right blocks or no longer absorb and must realloc ","html":"\u003cp\u003eUpon initialization, a large contiguous block of memory is initialized as a whole and called the \u0026ldquo;heap\u0026rdquo;. If we run out of it, we double the amount of memory being allocated.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ehandling arbitrary requests of mallocs/realloc and frees\u003c/li\u003e\n\u003cli\u003ekeep track of what\u0026rsquo;s been allocated and what\u0026rsquo;s free\u003c/li\u003e\n\u003cli\u003edecide which segment of memory to use when fulfilling an allocating request\u003c/li\u003e\n\u003cli\u003erespond quickly\u003c/li\u003e\n\u003cli\u003eReturn addresses that are 8-byte aligned (native types must be stored at a memory location which is a multiple of its size; otherwise bus error)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTwo main goals:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003emaximize throughput: we want to make number of requests per unit of time large (\u0026ldquo;we want the largest address in use to be as low as possible\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003emaximize utilization: we want to use memory economically without fragmenting it\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThese two goals seems to be conflicting: it may take longer to plan out heap memory use for each request if we want to have perfect.\u003c/p\u003e\n\u003ch2 id=\"design-questions\"\u003eDesign Questions\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003e\n\u003cp\u003ehow do we keep track of blocks that are freed\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ehow do we choose which free block to satisfy an allocation request\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eafter we choose a free block, how do we deal with the excess\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecan we avoid searching all blocks for the free blocks to reuse?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecan we merge adjacent free blocks to keep large space available?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCan we avoid always coping/moving data?\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"bump-allocator\"\u003eBump Allocator\u003c/h2\u003e\n\u003cp\u003eSilliest \u003ca href=\"/posts/kbhheap_allocator/\"\u003eHeap allocator\u003c/a\u003e. You maintain a pointer that\u0026rsquo;s the root of the memory being used, and each time you get memory we bump that pointer forward. Free does nothing.\u003c/p\u003e\n\u003cp\u003eMaximum throughput (you like, just allocate heap, and free is very easy), but bad utilization.\u003c/p\u003e\n\u003ch2 id=\"implicit-free-list-allocator\"\u003eImplicit Free List Allocator\u003c/h2\u003e\n\u003cp\u003eIn this implementation, the block structure implies what has been freed. We used to store this into a global data structure, but that\u0026rsquo;s bad because there is too much memory overhead. Instead, we place a 8-byte \u0026ldquo;header\u0026rdquo; in front of each block of memory containing whether its free or in use + its payload size. Through reading all the headers, we essentially maintain an implicit list of free nodes.\u003c/p\u003e\n\u003cp\u003eNow, the 8 byte system for memory + free status doesn\u0026rsquo;t sound right. Recall memory addresses themselves are 8-bytes; however, all of our memory is 8-byte aligned. So, the first three bits should be 0.\u003c/p\u003e\n\u003cp\u003eTherefore, we pack free status in the firs tbit, ignore the next two, do store the memory in the rest\u003c/p\u003e\n\u003ch3 id=\"which-one-do-you-alloc\"\u003e\u0026ldquo;which one do you alloc\u0026rdquo;\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eFirst fit\u003c/strong\u003e: start from the beginning, and search for the first free block you come across to serve the request\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eNext fit\u003c/strong\u003e: continuing search starting at the end point of your last malloc until you get the first free block, when you hit the end, go back around\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eBest fit\u003c/strong\u003e: examine every free block and find the one with the smallest size that fits\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eBest fit minimizes fragmentation; next fit optimizes speed\u003c/p\u003e\n\u003ch3 id=\"edge-case\"\u003eedge case\u003c/h3\u003e\n\u003cp\u003eif you run out of space in the end, with an awkward 8 byte in the end, you can either make a 0-byte block or just give the last bit of memory to the previous one.\u003c/p\u003e\n\u003ch3 id=\"explicit-free-list-allocator\"\u003eExplicit Free List Allocator\u003c/h3\u003e\n\u003cp\u003eCan we design an allocator to jump between free blocks. Naively doing this is bad.\u003c/p\u003e\n\u003cp\u003eInstead, we can constrain each block to be at least size 16. And then, we will put the pointers to the prev/next free nodes in the next two 8-byte payload.\u003c/p\u003e\n\u003cp\u003eFinally, we will keep track of a head node as a global variable\u003c/p\u003e\n\u003ch2 id=\"memory-coalescing\"\u003eMemory Coalescing\u003c/h2\u003e\n\u003cp\u003eDuring frees, we should try to eat the adjacent \u003cstrong\u003eright\u003c/strong\u003e free memory to create one large free block in order to coalescing free blocks together\u003c/p\u003e\n\u003cp\u003eDuring \u003ca href=\"/posts/kbhheap/#realloc\"\u003erealloc\u003c/a\u003e, there are three conditions by which you can retrun the same address:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003esize is growing, there\u0026rsquo;s free space to the right\u003c/li\u003e\n\u003cli\u003esize is growing, but we added padding so we can use that\u003c/li\u003e\n\u003cli\u003esize is shrinking (we have to ensure that we have at least 16 bytes in the shrink space, which means we need to be shrink by at least 24 bytes to actually do any shrinking)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"memory-providing-rules\"\u003eMemory providing rules\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eat least 16 bytes (only if \u003ca href=\"#explicit-free-list-allocator\"\u003eExplicit Free List Allocator\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003ehas to be multiple of 8\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"explicit-allocator-requirements\"\u003eExplicit allocator Requirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003emust have headers to track information in implicit\u003c/li\u003e\n\u003cli\u003emust have an explicit free list managed as a doubly linked list using the first 16 bytes of the free block\u003c/li\u003e\n\u003cli\u003emust have a malloc implementation that searches the free block\u003c/li\u003e\n\u003cli\u003emust coallesce the immediate right free blocks\u003c/li\u003e\n\u003cli\u003emust do in-place realloc when possible; even if its not possible, we should still absorb adjacent right blocks or no longer absorb and must realloc\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhheap_allocator/","tags":null,"title":"Heap allocator"},{"categories":null,"contents":"see also two-dimensional heat equation the following relates to 1d\nheat distributes by \u0026ldquo;diffusing\u0026rdquo;; this is heat \\(u\\) diffusing across a plate\n\\begin{equation} \\pdv{u}{t} = \\pdv[2]{u}{x} \\end{equation}\nwe have, with Dirichlet Conditions:\n\\begin{equation} u_{k}(t,x) = \\sum b_{k} e^{ - \\frac{k^{2} \\pi^{2}}{l^{2}} t } \\sin \\qty( \\frac{k \\pi x}{l}) \\end{equation}\nand with Neumann Conditions:\n\\begin{equation} u_{k}(t,x) = \\sum b_{k} e^{ - \\frac{k^{2} \\pi^{2}}{l^{2}} t } \\cos \\qty( \\frac{k \\pi x}{l}) \\end{equation}\nwith infinite boundaries:\n\\begin{equation} U(t,x) =\\frac{1}{\\sqrt{4 \\pi \\alpha t}} \\int_{\\mathbb{R}} f(y) e^{-\\frac{(x-y)^{2}}{4\\alpha t}} \\dd{y} \\end{equation}\ngeneral system:\n\\begin{equation} \\begin{cases} A\u0026rsquo;(t) = \\lambda A(t) \\\\ B\u0026rsquo;\u0026rsquo;(x) = \\lambda B(x) \\end{cases} \\end{equation}\nRemoving a constant Consider a function:\n\\begin{equation} t = c \\tau \\end{equation}\nyou can remove the constant by finanglisng because the constant drops out when scaled (i.e. you can just scale your results back TODO check this).\ndamping damped heat equation\nSolving Heat Equation Consider the one dimensional heat equation:\n\\begin{equation} \\pdv{u}{t} = \\pdv[2]{u}{x} \\end{equation}\n\u0026ldquo;well posed-ness\u0026rdquo; of to this problem requires two sets of initial conditions: one \u0026ldquo;boundary condition\u0026rdquo;\nInitial Condition Because the expression is linear by time, we need one initial condition; let\u0026rsquo;s say its some function in \\(x\\):\n\\begin{equation} f_{0}(x) \\end{equation}\nSolving Let\u0026rsquo;s make an educated guess:\n\\begin{equation} u(t,x) = A(t) B(x) \\end{equation}\nConsider:\n\\begin{equation} \\pdv{u}{t} = A\u0026rsquo;(t)B(x) \\end{equation}\n\\begin{equation} \\pdv[2]{u}{x} = A(t) B\u0026rsquo;\u0026rsquo;(x) \\end{equation}\nThis results in:\n\\begin{equation} A\u0026rsquo;(t) B(x) = A(t) B\u0026rsquo;\u0026rsquo;(X) \\end{equation}\nmeaning, we can rearrange and integrate:\n\\begin{equation} \\frac{A\u0026rsquo;(t)}{A(t)} = \\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(x)} \\end{equation}\nYou will note that taking a derivative by \\(t\\) on one side tells us that the right side is \\(0\\), and taking derivative of \\(x\\) on the other results in left side is \\(0\\). This tell us that this function is constant in both \\(t\\) and \\(x\\). Meaning:\n\\begin{equation} \\frac{A\u0026rsquo;(t)}{A(t)} = \\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(x)} = \\lambda \\end{equation}\nThis results in a renewed system:\n\\begin{equation} \\begin{cases} A\u0026rsquo;(t) = \\lambda A(t) \\\\ B\u0026rsquo;\u0026rsquo;(x) = \\lambda B(x) \\end{cases} \\end{equation}\nSolving using Dirichlet Conditions Finding \\(\\lambda\\) using Boundary Conditions\nNow, recall from our system:\n\\begin{equation} B\u0026rsquo;\u0026rsquo;(x) = \\lambda B(x) \\end{equation}\nIts solutions are\n\\begin{equation} B(x) = c_1 e^{\\sqrt{\\lambda}x} + c_2 e^{-\\sqrt{\\lambda}x} \\end{equation}\nRecall Dirichlet Conditions:\n\\begin{equation} u(t,0) = u(t, l)= 0 \\end{equation}\nThis tells us that \\(B(0) = 0\\), \\(B(l) = 0\\).\nAt \\(B(0)\\), this gives us that \\(c_1 + c_2 = 0\\), meaning \\(c_2 = -c_1\\)\nAt \\(B(l) = 0 = c_1 \\qty( e^{\\sqrt{\\lambda}l} - e^{-\\sqrt{\\lambda}l})\\). Dividing \\(c_1\\) to both sides, we obtain \\(e^{2\\sqrt{\\lambda} l} = 1\\).\nWe finally can obtain \\(\\lambda\\). One obvious answer is \\(\\lambda = 0\\). But, there are other fun things we can do:\nAside:\nRecall, if we desire\n\\begin{equation} e^{i\\theta} = \\cos \\theta + i \\sin \\theta = 1 \\end{equation}\nThis gives:\n\\begin{equation} \\theta = 2\\pi k \\end{equation}\nTherefore, recall that we obtained \\(e^{2\\sqrt{\\lambda}l}\\), we obtained:\n\\begin{equation} 2\\sqrt{\\lambda}l = 2\\pi k i \\end{equation}\nSolving for \\(\\lambda\\), we finally get solutions:\n\\begin{equation} \\lambda_{k} = \\frac{-k^{2}\\pi^{2}}{l^{2}} \\end{equation}\nfor \\(k = 0, 1, 2, 3\\); this condition called \u0026ldquo;quantization\u0026rdquo;\nSolving Again\nNow that we know \\(\\lambda\\), we can say:\n\\begin{equation} \\begin{cases} A\u0026rsquo;(t) = \\frac{-k^{2}\\pi^{2}}{l^{2}} A(t) \\\\ B\u0026rsquo;\u0026rsquo;(x) = \\frac{-k^{2}\\pi^{2}}{l^{2}} B(x) \\end{cases} \\end{equation}\nAnd then we can proceed to solve everything again. [a little lost, but in theory \\(\\cos(x)\\) drops out after solving].\nThis Gives us eventually:\n\\begin{equation} A(t) = e^{-\\frac{k^{2}\\pi^{2}}{l^{2}} t} \\end{equation}\nand\n\\begin{equation} B(x) = \\sin \\frac{k\\pi x}{l} \\end{equation}\nsin Recall that \\(U = AB\\), this means, with all generality:\n\\begin{equation} u_{k} = e^{-\\frac{k^{2}\\pi^{2}}{l^{2}} t}\\sin \\frac{k\\pi x}{l} \\end{equation}\nInitial Conditions\nSuppose we have initial condition:\n\\begin{equation} f_{0}(x) = \\sum a_{n} \\sin \\qty( \\frac{k\\pi x}{l}) \\end{equation}\nbecause the PDE is linear, we obtain:\n\\begin{equation} u_{k}(t,x) = \\sum a_{k} e^{-\\frac{k^{2}\\pi^{2}}{l^{2}}t} \\sin \\qty( \\frac{k \\pi x}{l}) \\end{equation}\nagain quantized over \\(k\\).\nThis is because each individual terms corresponds to a solution \\(a_{n} \\sin \\qty(\\frac{k\\pi x}{l})\\), and at boundary condition \\(f_{0}(x)\\), the left term of the general solution drops out, to obtain:\n\\begin{equation} a_{n}\\frac{k \\pi x}{l} = f_{0}(x) = u(0, x) = e^{0} \\sin \\qty(\\frac{k \\pi x}{l}) = a_{k} \\sin \\qty(\\frac{k \\pi x}{l}) \\end{equation}\nso we can just match terms.\nThe good news is that, because exists, any initial condition that\u0026rsquo;s a well-formed function can be written a sum of sines. This also converges really quickly (because \\(e^{-k^{2}}\\)). Further, given some \\(f_{0}(x)\\), we obtain a specific \\(k\\) and will obtain a specific solution.\nSolving using Neumann Conditions Go through the derivation, this gives:\n\\begin{equation} u_{k}(t,x) = \\sum b_{k} e^{ - \\frac{k^{2} \\pi^{2}}{l^{2}} t } \\cos \\qty( \\frac{k \\pi x}{l}) \\end{equation}\n","html":"\u003cp\u003e\u003cstrong\u003esee also \u003ca href=\"/posts/kbhtwo_dimensional_heat_equation/\"\u003etwo-dimensional heat equation\u003c/a\u003e\u003c/strong\u003e the following relates to 1d\u003c/p\u003e\n\u003cp\u003eheat distributes by \u0026ldquo;diffusing\u0026rdquo;; this is heat \\(u\\) diffusing across a plate\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t} = \\pdv[2]{u}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe have, with \u003ca href=\"/posts/kbhsu_math53_feb232024/#dirichlet-conditions\"\u003eDirichlet Conditions\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu_{k}(t,x) = \\sum b_{k} e^{ - \\frac{k^{2} \\pi^{2}}{l^{2}} t } \\sin \\qty( \\frac{k \\pi x}{l})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand with \u003ca href=\"/posts/kbhsu_math53_feb232024/#neumann-conditions\"\u003eNeumann Conditions\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu_{k}(t,x) = \\sum b_{k} e^{ - \\frac{k^{2} \\pi^{2}}{l^{2}} t } \\cos \\qty( \\frac{k \\pi x}{l})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewith infinite boundaries:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(t,x) =\\frac{1}{\\sqrt{4 \\pi \\alpha t}} \\int_{\\mathbb{R}} f(y) e^{-\\frac{(x-y)^{2}}{4\\alpha t}} \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egeneral system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nA\u0026rsquo;(t) = \\lambda A(t) \\\\\nB\u0026rsquo;\u0026rsquo;(x) = \\lambda B(x)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"removing-a-constant\"\u003eRemoving a constant\u003c/h2\u003e\n\u003cp\u003eConsider a function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nt = c \\tau\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou can remove the constant by finanglisng because the constant drops out when scaled (i.e. you can just scale your results back TODO check this).\u003c/p\u003e\n\u003ch2 id=\"damping\"\u003edamping\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdamped_heat_equation/\"\u003edamped heat equation\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"solving-heat-equation\"\u003eSolving Heat Equation\u003c/h2\u003e\n\u003cp\u003eConsider the one dimensional heat equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t} = \\pdv[2]{u}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;well posed-ness\u0026rdquo; of to this problem requires two sets of initial conditions: one \u0026ldquo;boundary condition\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"initial-condition\"\u003eInitial Condition\u003c/h3\u003e\n\u003cp\u003eBecause the expression is linear by time, we need one initial condition; let\u0026rsquo;s say its some function in \\(x\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf_{0}(x)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"solving\"\u003eSolving\u003c/h3\u003e\n\u003cp\u003eLet\u0026rsquo;s make an educated guess:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(t,x) = A(t) B(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t} = A\u0026rsquo;(t)B(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2]{u}{x} = A(t) B\u0026rsquo;\u0026rsquo;(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis results in:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA\u0026rsquo;(t) B(x) = A(t) B\u0026rsquo;\u0026rsquo;(X)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning, we can rearrange and integrate:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{A\u0026rsquo;(t)}{A(t)} = \\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(x)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will note that taking a derivative by \\(t\\) on one side tells us that the right side is \\(0\\), and taking derivative of \\(x\\) on the other results in left side is \\(0\\). This tell us that this function is constant in both \\(t\\) and \\(x\\). Meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{A\u0026rsquo;(t)}{A(t)} = \\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(x)} = \\lambda\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis results in a renewed system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nA\u0026rsquo;(t) = \\lambda A(t) \\\\\nB\u0026rsquo;\u0026rsquo;(x) = \\lambda B(x)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"solving-using-dirichlet-conditions--kbhsu-math53-feb232024-dot-md\"\u003eSolving using \u003ca href=\"/posts/kbhsu_math53_feb232024/#dirichlet-conditions\"\u003eDirichlet Conditions\u003c/a\u003e\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eFinding \\(\\lambda\\) using Boundary Conditions\u003c/p\u003e\n\u003cp\u003eNow, recall from our system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB\u0026rsquo;\u0026rsquo;(x) = \\lambda B(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIts solutions are\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB(x) = c_1 e^{\\sqrt{\\lambda}x} + c_2 e^{-\\sqrt{\\lambda}x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall \u003ca href=\"/posts/kbhsu_math53_feb232024/#dirichlet-conditions\"\u003eDirichlet Conditions\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(t,0) = u(t, l)= 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis tells us that \\(B(0) = 0\\), \\(B(l) = 0\\).\u003c/p\u003e\n\u003cp\u003eAt \\(B(0)\\), this gives us that \\(c_1 + c_2 = 0\\), meaning \\(c_2 = -c_1\\)\u003c/p\u003e\n\u003cp\u003eAt \\(B(l) = 0 = c_1 \\qty( e^{\\sqrt{\\lambda}l} - e^{-\\sqrt{\\lambda}l})\\). Dividing \\(c_1\\) to both sides, we obtain \\(e^{2\\sqrt{\\lambda} l} = 1\\).\u003c/p\u003e\n\u003cp\u003eWe finally can obtain \\(\\lambda\\). One obvious answer is \\(\\lambda = 0\\). But, there are other fun things we can do:\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside:\u003c/p\u003e\n\u003cp\u003eRecall, if we desire\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{i\\theta} = \\cos \\theta + i \\sin \\theta = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta = 2\\pi k\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eTherefore, recall that we obtained \\(e^{2\\sqrt{\\lambda}l}\\), we obtained:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n2\\sqrt{\\lambda}l = 2\\pi k i\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSolving for \\(\\lambda\\), we finally get solutions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda_{k} = \\frac{-k^{2}\\pi^{2}}{l^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor \\(k = 0, 1, 2, 3\\); this condition called \u0026ldquo;quantization\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eSolving Again\u003c/p\u003e\n\u003cp\u003eNow that we know \\(\\lambda\\), we can say:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nA\u0026rsquo;(t) = \\frac{-k^{2}\\pi^{2}}{l^{2}} A(t) \\\\\nB\u0026rsquo;\u0026rsquo;(x) = \\frac{-k^{2}\\pi^{2}}{l^{2}} B(x)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd then we can proceed to solve everything again. [a little lost, but in theory \\(\\cos(x)\\) drops out after solving].\u003c/p\u003e\n\u003cp\u003eThis Gives us eventually:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA(t) = e^{-\\frac{k^{2}\\pi^{2}}{l^{2}} t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB(x) = \\sin \\frac{k\\pi x}{l}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003esin\nRecall that \\(U = AB\\), this means, with all generality:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu_{k} = e^{-\\frac{k^{2}\\pi^{2}}{l^{2}} t}\\sin \\frac{k\\pi x}{l}\n\\end{equation}\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eInitial Conditions\u003c/p\u003e\n\u003cp\u003eSuppose we have initial condition:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf_{0}(x) = \\sum a_{n} \\sin \\qty( \\frac{k\\pi x}{l})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause the PDE is linear, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu_{k}(t,x) = \\sum a_{k} e^{-\\frac{k^{2}\\pi^{2}}{l^{2}}t} \\sin \\qty( \\frac{k \\pi x}{l})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eagain quantized over \\(k\\).\u003c/p\u003e\n\u003cp\u003eThis is because each individual terms corresponds to a solution \\(a_{n} \\sin \\qty(\\frac{k\\pi x}{l})\\), and at boundary condition \\(f_{0}(x)\\), the left term of the general solution drops out, to obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{n}\\frac{k \\pi x}{l} = f_{0}(x) = u(0, x) = e^{0} \\sin \\qty(\\frac{k \\pi x}{l}) = a_{k} \\sin \\qty(\\frac{k \\pi x}{l})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso we can just match terms.\u003c/p\u003e\n\u003cp\u003eThe good news is that, because exists, any initial condition that\u0026rsquo;s a well-formed function can be written a sum of sines. This also converges really quickly (because \\(e^{-k^{2}}\\)). Further, given some \\(f_{0}(x)\\), we obtain a specific \\(k\\) and will obtain a specific solution.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"solving-using-neumann-conditions--kbhsu-math53-feb232024-dot-md\"\u003eSolving using \u003ca href=\"/posts/kbhsu_math53_feb232024/#neumann-conditions\"\u003eNeumann Conditions\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eGo through the derivation, this gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu_{k}(t,x) = \\sum b_{k} e^{ - \\frac{k^{2} \\pi^{2}}{l^{2}} t } \\cos \\qty( \\frac{k \\pi x}{l})\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhheat_equation/","tags":null,"title":"Heat Equation"},{"categories":null,"contents":"Hello Internet is a podcast hosted by Brady Haran and CGP Grey.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhhello_internet/\"\u003eHello Internet\u003c/a\u003e is a podcast hosted by Brady Haran and CGP Grey.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhello_internet/","tags":null,"title":"Hello Internet"},{"categories":null,"contents":"Herber Hoover is an American president.\nHerber Hoover\u0026rsquo;s response to the Great Depression Hoover\u0026rsquo;s Programs: too little, too late Makes business pledge to maintain wages, tax cuts, Smoot-halwey Tariff, bank financial support Builds Golden Gate Bridge and the Hoover Dam Rejects the idea of the direct federal relief, which is against FDR\u0026rsquo;s thoughts ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhherber_hoover/\"\u003eHerber Hoover\u003c/a\u003e is an American president.\u003c/p\u003e\n\u003ch2 id=\"herber-hoover--kbhherber-hoover-dot-md--s-response-to-the-great-depression--kbhgreat-depression-dot-md\"\u003e\u003ca href=\"/posts/kbhherber_hoover/\"\u003eHerber Hoover\u003c/a\u003e\u0026rsquo;s response to the \u003ca href=\"/posts/kbhgreat_depression/\"\u003eGreat Depression\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eHoover\u0026rsquo;s Programs: too little, too late\u003c/li\u003e\n\u003cli\u003eMakes business pledge to maintain wages, tax cuts, Smoot-halwey Tariff, bank financial support\u003c/li\u003e\n\u003cli\u003eBuilds \u003ca href=\"/posts/kbhgolden_gate_bridge/\"\u003eGolden Gate Bridge\u003c/a\u003e and the \u003ca href=\"/posts/kbhhoover_dam/\"\u003eHoover Dam\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eRejects the idea of the direct federal relief, which is against \u003ca href=\"/posts/kbhfdr/\"\u003eFDR\u003c/a\u003e\u0026rsquo;s thoughts\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhherber_hoover/","tags":null,"title":"Herber Hoover"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhheteroskedastic/","tags":null,"title":"heteroskedasticity"},{"categories":null,"contents":" draw an initial state \\(q_1\\) from the initial state distribution \\(\\pi\\) For each state \\(q_{i}\\)\u0026hellip; Drew observe something \\(o_{t}\\) according to the action distribution of state \\(q_{i}\\) Use transition probability \\(a_{i,j}\\) to draw a next state \\(q_{j}\\) Isolated recognition: train a family of HMMs, one for each word or something. Then, given new data, perform scoring of the HMM onto the features.\ncomponents of HMMs scoring Given an observation \\(o_1, \u0026hellip;, o_{T}\\) and a model, we compute $P(O | λ)$\u0026mdash;the probability of a sequence given a model \\(\\lambda\\)\n\u0026ldquo;forward and backward algorithm\u0026rdquo;\ndecoding Given observations, find the state sequence \\(q1, \u0026hellip;, q_{T}\\) most likely to have generated\ntraining Given observations \\(O\\), find the model parameters \\(\\lambda\\) that maximize \\(P(O|\\lambda)\\), the Maximum Likelihood Parameter Learning.\ncontinuous-density HMM There are some HMMs that blend the discrete timestamps into Gaussian mixture models.\ncontinuous speech Scoring becomes hard because you have to go through and calculate every freaking word. THerefore:\n\\begin{equation} P(W|O) = \\frac{P(O|W) P(W)}{P(O)} \\end{equation}\nTherefore, we really desire:\n\\begin{equation} \\arg\\max_{w} P(O|W) P(W) \\end{equation}\n","html":"\u003col\u003e\n\u003cli\u003edraw an initial state \\(q_1\\) from the initial state distribution \\(\\pi\\)\u003c/li\u003e\n\u003cli\u003eFor each state \\(q_{i}\\)\u0026hellip;\n\u003col\u003e\n\u003cli\u003eDrew observe something \\(o_{t}\\) according to the action distribution of state \\(q_{i}\\)\u003c/li\u003e\n\u003cli\u003eUse transition probability \\(a_{i,j}\\) to draw a next state \\(q_{j}\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eIsolated recognition: train a family of \u003ca href=\"/posts/kbhhidden_markov_model/\"\u003eHMM\u003c/a\u003es, one for each word or something. Then, given new data, perform scoring of the \u003ca href=\"/posts/kbhhidden_markov_model/\"\u003eHMM\u003c/a\u003e onto the features.\u003c/p\u003e\n\u003ch2 id=\"components-of-hmm--kbhhidden-markov-model-dot-md--s\"\u003ecomponents of \u003ca href=\"/posts/kbhhidden_markov_model/\"\u003eHMM\u003c/a\u003es\u003c/h2\u003e\n\u003ch3 id=\"scoring\"\u003escoring\u003c/h3\u003e\n\u003cp\u003eGiven an observation \\(o_1, \u0026hellip;, o_{T}\\) and a model, we compute $P(O | λ)$\u0026mdash;the probability of a sequence given a model \\(\\lambda\\)\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;forward and backward algorithm\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"decoding\"\u003edecoding\u003c/h3\u003e\n\u003cp\u003eGiven observations, find the state sequence \\(q1, \u0026hellip;, q_{T}\\) most likely to have generated\u003c/p\u003e\n\u003ch3 id=\"training\"\u003etraining\u003c/h3\u003e\n\u003cp\u003eGiven observations \\(O\\), find the model parameters \\(\\lambda\\) that maximize \\(P(O|\\lambda)\\), the \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"continuous-density-hmm--kbhhidden-markov-model-dot-md\"\u003econtinuous-density \u003ca href=\"/posts/kbhhidden_markov_model/\"\u003eHMM\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eThere are some \u003ca href=\"/posts/kbhhidden_markov_model/\"\u003eHMM\u003c/a\u003es that blend the discrete timestamps into \u003ca href=\"/posts/kbhprobability_distributions/#gaussian-mixture-model\"\u003eGaussian mixture model\u003c/a\u003es.\u003c/p\u003e\n\u003ch2 id=\"continuous-speech\"\u003econtinuous speech\u003c/h2\u003e\n\u003cp\u003eScoring becomes hard because you have to go through and calculate every freaking word. THerefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(W|O) = \\frac{P(O|W) P(W)}{P(O)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, we really desire:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\arg\\max_{w} P(O|W) P(W)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhidden_markov_model/","tags":null,"title":"Hidden Markov Model"},{"categories":null,"contents":"Misinformation can decline people\u0026rsquo;s intent to vaccinate.\nData VaxConcerns Taxonomy: sorting misinformation into a taxonomy which has multiple, hierarchical labels\nSo, you can classify a label in a bunch of places.\nApproaches Ask an LLM to do one of the following\u0026hellip;\nOne-Shot, Multi-Label Ignore hierchy, just go do multi label system\nmulti-pass, hierarchical label try to predict groups at a time, each pass has a label group as input and needs to produce whether the label is given + sublabels\none pass, hierarchical label Only predict labels on the lowest level, then extract higher level information\nbinary search predict highest level, then search down\n\u0026ldquo;Almost Few Shot\u0026rdquo; Trying to force a specific output format.\n","html":"\u003cp\u003eMisinformation can decline people\u0026rsquo;s intent to vaccinate.\u003c/p\u003e\n\u003ch2 id=\"data\"\u003eData\u003c/h2\u003e\n\u003cp\u003eVaxConcerns Taxonomy: sorting misinformation into a taxonomy which has multiple, hierarchical labels\u003c/p\u003e\n\u003cp\u003eSo, you can classify a label in a bunch of places.\u003c/p\u003e\n\u003ch2 id=\"approaches\"\u003eApproaches\u003c/h2\u003e\n\u003cp\u003eAsk an LLM to do one of the following\u0026hellip;\u003c/p\u003e\n\u003ch3 id=\"one-shot-multi-label\"\u003eOne-Shot, Multi-Label\u003c/h3\u003e\n\u003cp\u003eIgnore hierchy, just go do multi label system\u003c/p\u003e\n\u003ch3 id=\"multi-pass-hierarchical-label\"\u003emulti-pass, hierarchical label\u003c/h3\u003e\n\u003cp\u003etry to predict groups at a time, each pass has a label group as input and needs to produce whether the label is given + sublabels\u003c/p\u003e\n\u003ch3 id=\"one-pass-hierarchical-label\"\u003eone pass, hierarchical label\u003c/h3\u003e\n\u003cp\u003eOnly predict labels on the lowest level, then extract higher level information\u003c/p\u003e\n\u003ch3 id=\"binary-search\"\u003ebinary search\u003c/h3\u003e\n\u003cp\u003epredict highest level, then search down\u003c/p\u003e\n\u003ch2 id=\"almost-few-shot\"\u003e\u0026ldquo;Almost Few Shot\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003eTrying to force a specific output format.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhierarchical_multi_label_clsf_for_vaccine/","tags":null,"title":"Hierarchical Multi-Label Clsf. for Vaccine"},{"categories":null,"contents":"a monomer is named active if it recruits its neighbors to make a polymer that can act as an enzyme to catalyze other monomers to become active as well\n","html":"\u003cp\u003ea monomer is named \u003ca href=\"/posts/kbhhigh_chemical_activity/\"\u003eactive\u003c/a\u003e if it recruits its neighbors to make a polymer that can act as an enzyme to catalyze other monomers to become \u003ca href=\"/posts/kbhhigh_chemical_activity/\"\u003eactive\u003c/a\u003e as well\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhigh_chemical_activity/","tags":null,"title":"high chemical activity"},{"categories":null,"contents":"If we are tele-operating a robot, we ideally want to minimize cost. We want to estimate a user\u0026rsquo;s goal via user inputs. Predict the most likely goal + assist for it.\n\u0026ldquo;find a cost function for which user input \\(u\\) is optimal\u0026rdquo;.\nsystem does not know the goal the user may not change their goal on a whim Hindsight Optimization To solve this, we use QMDP: \u0026ldquo;select the most optimal actions to estimating cost-to-go assuming full observability\u0026rdquo;.\n\\begin{equation} Q(b,a,u) = \\sum_{g}^{} b(g) Q_{g}(x,a,u) \\end{equation}\nResult users felt less in control with Hindsight Optimization, despite reaching the goal faster with this policy.\nChallenging the results between \u0026ldquo;task completion\u0026rdquo; vs. \u0026ldquo;user satisfaction\u0026rdquo;.\n","html":"\u003cp\u003eIf we are tele-operating a robot, we ideally want to minimize \u003cstrong\u003ecost\u003c/strong\u003e. We want to estimate a user\u0026rsquo;s \u003cstrong\u003egoal\u003c/strong\u003e via user inputs. Predict the most likely goal + assist for it.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;find a cost function for which user input \\(u\\) is optimal\u0026rdquo;.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003esystem does not know the goal\u003c/li\u003e\n\u003cli\u003ethe user may not change their goal on a whim\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"hindsight-optimization--kbhhindsight-optimization-dot-md\"\u003e\u003ca href=\"/posts/kbhhindsight_optimization/\"\u003eHindsight Optimization\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eTo solve this, we use \u003ca href=\"/posts/kbhqmdp/\"\u003eQMDP\u003c/a\u003e: \u0026ldquo;select the most optimal actions to estimating cost-to-go assuming full observability\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(b,a,u) = \\sum_{g}^{} b(g) Q_{g}(x,a,u)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"result\"\u003eResult\u003c/h2\u003e\n\u003cp\u003eusers felt less in control with \u003ca href=\"/posts/kbhhindsight_optimization/\"\u003eHindsight Optimization\u003c/a\u003e, despite reaching the goal faster with this policy.\u003c/p\u003e\n\u003cp\u003eChallenging the results between \u0026ldquo;task completion\u0026rdquo; vs. \u0026ldquo;user satisfaction\u0026rdquo;.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhindsight_optimization/","tags":null,"title":"Hindsight Optimization"},{"categories":null,"contents":"history economics utility theory \u0026mdash; psychology Pavlov\u0026rsquo;s salivating togs: biological reinforcement learning Alan Turing thought about this regarding neuroscience bio-simulated things computer science how \u0026ldquo;systems composed of matter can have properties of the mind engineering control theory maffs statistics operations research WWII logistics optimization allocating decisions, etc. simplex algorithm societal impact classes can help amplify intention (whether good or evil) data-driven methods ","html":"\u003ch2 id=\"history\"\u003ehistory\u003c/h2\u003e\n\u003ch3 id=\"economics\"\u003eeconomics\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eutility theory\u003c/strong\u003e \u0026mdash;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"psychology\"\u003epsychology\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ePavlov\u0026rsquo;s salivating togs: biological \u003ca href=\"/posts/kbhreinforcement_learning/\"\u003ereinforcement learning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eAlan Turing thought about this regarding\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"neuroscience\"\u003eneuroscience\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ebio-simulated things\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"computer-science\"\u003ecomputer science\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ehow \u0026ldquo;systems composed of matter can have properties of the mind\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"engineering\"\u003eengineering\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003econtrol theory\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"maffs\"\u003emaffs\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstastistic/\"\u003estatistic\u003c/a\u003es\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"operations-research\"\u003eoperations research\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"\"\u003eWWII\u003c/a\u003e logistics \u003ca href=\"/posts/kbhoptimization/\"\u003eoptimization\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eallocating decisions, etc.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"\"\u003esimplex algorithm\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"societal-impact\"\u003esocietal impact\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eclasses can help amplify intention (whether good or evil)\u003c/li\u003e\n\u003cli\u003edata-driven methods\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdecision_making_history/","tags":null,"title":"history and impact of decision making"},{"categories":null,"contents":" Reading Date Notes New Deal Flip-book \u0026lt;2022-03-24 Thu\u0026gt; New Deal Historian Flipbook Legacy of McCarthyism \u0026lt;2022-04-25 Mon\u0026gt; Legacy of McCarthyism Soviet Perspective on the Cold War \u0026lt;2022-04-29 Fri\u0026gt; Soviet Perspective on Cold War MLK and Malcom X \u0026lt;2022-05-10 Tue\u0026gt; MLK and Malcom X Reading Origins of American Conservatism \u0026lt;2022-05-27 Fri\u0026gt; Origins of American Conservatism ","html":"\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eReading\u003c/th\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eNotes\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eNew Deal Flip-book\u003c/td\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-03-24 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003eNew Deal Historian Flipbook\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eLegacy of McCarthyism\u003c/td\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-25 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhlegacy_of_mccarthyism/\"\u003eLegacy of McCarthyism\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eSoviet Perspective on the Cold War\u003c/td\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-29 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhsoviet_perspective_on_cold_war/\"\u003eSoviet Perspective on Cold War\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eMLK and Malcom X\u003c/td\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-10 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmlk_and_malcom_x_reading/\"\u003eMLK and Malcom X Reading\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eOrigins of American Conservatism\u003c/td\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-27 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhrise_of_american_conservatism/\"\u003eOrigins of American Conservatism\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhistory_readings_index/","tags":["index"],"title":"History Readings Index"},{"categories":null,"contents":"Homestead Act is the legal colonization of the west that was a Contributor to the Guilded Age\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhhomestead_act/\"\u003eHomestead Act\u003c/a\u003e is the legal colonization of the west that was a \u003ca href=\"/posts/kbhguilded_age/#contributors-to-the-id-a21cee5b-f0e2-4647-8e7d-e17d9c55ea42-guilded-age\"\u003eContributor to the Guilded Age\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhomestead_act/","tags":null,"title":"Homestead Act"},{"categories":null,"contents":"statistical context Homogeneity is a measure of how similar many things are.\nLinear Algebra context \u0026hellip;of linear maps homogeneity is a property of Linear Maps to describe the ability to \u0026ldquo;factor out\u0026rdquo; scalars\n\u0026hellip;of linear equations A homogenous linear equation is one which the constant term on the right of the equations are \\(0\\).\nhomogenous system with more variables than equations has nonzero solutions Proof: You can imagine the system as a matrix equation:\n\\begin{equation} Av = 0 \\end{equation}\nwhere, \\(v\\) is a list of input variables, and \\(A\\) is a coefficient matrix. Note that \\(A = \\mathbb{F}^{n} \\to \\mathbb{F}^{m}\\), where \\(n\\) is the number of variables, and \\(m\\) the number of equations.\nNow, the input variables \\(v\\) of the above expression is in the null space of \\(A\\). The question of \u0026ldquo;whether is there non-zero solutions\u0026rdquo; can be rephrased as given \\(Av=0\\), does \\(v=0\\)?\u0026quot; Otherwise known as \u0026ldquo;is \\(null\\ A=\\{0\\}\\)?\u0026rdquo;: that is, \u0026ldquo;is \\(A\\) injective?\u0026rdquo;\nGiven the fact that map to smaller space is not injective, if \\(m \u0026lt;n\\), the map is not going to be injective. Therefore, we want \\(m\u0026lt;n\\), meaning we want more variables (\\(n\\)) than equations (\\(m\\)) to have non-zero solutions.\ninhomogenous system with more equations than variables has no solutions for an arbitrary set of constants Proof: You can imagine the system as a matrix equation:\n\\begin{equation} Av = C \\end{equation}\nwhere, \\(v\\) is a list of input variables, and \\(A\\) is a coefficient matrix. Note that \\(A = \\mathbb{F}^{n} \\to \\mathbb{F}^{m}\\), where \\(n\\) is the number of variables, and \\(m\\) the number of equations.\nNow, a valid solution of the above expression means that \\(Av=C\\) for all \\(v\\) (as they are, of course, the variables.) If we want the expression to have a solution for all choices of \\(C\\), we desire that the range of \\(A\\) to equal to its codomain\u0026mdash;that we desire it to be surjective.\nGiven the fact that map to bigger space is not surjective, if \\(m \u0026gt; n\\), the map is not going to be surjective. Therefore, we want \\(m\u0026gt;n\\), meaning we want more equations (\\(m\\)) than variables (\\(n\\)) to have no solutions for arbitrary \\(C\\).\n","html":"\u003ch2 id=\"statistic--kbhstastistic-dot-md--al-context\"\u003e\u003ca href=\"/posts/kbhstastistic/\"\u003estatistic\u003c/a\u003eal context\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhhomogeneity/\"\u003eHomogeneity\u003c/a\u003e is a measure of how similar many things are.\u003c/p\u003e\n\u003ch2 id=\"linear-algebra--kbhlinear-algebra-index-dot-md--context\"\u003e\u003ca href=\"/posts/kbhlinear_algebra_index/\"\u003eLinear Algebra\u003c/a\u003e context\u003c/h2\u003e\n\u003ch3 id=\"dot-dot-dot-of-linear-maps\"\u003e\u0026hellip;of linear maps\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e is a property of \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es to describe the ability to \u0026ldquo;factor out\u0026rdquo; scalars\u003c/p\u003e\n\u003ch3 id=\"dot-dot-dot-of-linear-equations\"\u003e\u0026hellip;of linear equations\u003c/h3\u003e\n\u003cp\u003eA \u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogenous\u003c/a\u003e linear equation is one which the constant term on the right of the equations are \\(0\\).\u003c/p\u003e\n\u003ch4 id=\"homogenous--kbhhomogeneity-dot-md--system-with-more-variables-than-equations-has-nonzero-solutions\"\u003e\u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogenous\u003c/a\u003e system with more variables than equations has nonzero solutions\u003c/h4\u003e\n\u003cp\u003eProof:\nYou can imagine the system as a \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nAv = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(v\\) is a list of input variables, and \\(A\\) is a \u003ca href=\"/posts/kbhpolynomial/\"\u003ecoefficient\u003c/a\u003e \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e. Note that \\(A = \\mathbb{F}^{n} \\to \\mathbb{F}^{m}\\), where \\(n\\) is the number of variables, and \\(m\\) the number of equations.\u003c/p\u003e\n\u003cp\u003eNow, the input variables \\(v\\) of the above expression is in the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(A\\). The question of \u0026ldquo;whether is there non-zero solutions\u0026rdquo; can be rephrased as given \\(Av=0\\), does \\(v=0\\)?\u0026quot; Otherwise known as \u0026ldquo;is \\(null\\ A=\\{0\\}\\)?\u0026rdquo;: that is, \u0026ldquo;is \\(A\\) \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eGiven the fact that \u003ca href=\"/posts/kbhlinear_map/#map-to-smaller-space-is-not-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003emap to smaller space is not injective\u003c/a\u003e, if \\(m \u0026lt;n\\), the map is not going to be \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e. Therefore, we want \\(m\u0026lt;n\\), meaning we want more variables (\\(n\\)) than equations (\\(m\\)) to have non-zero solutions.\u003c/p\u003e\n\u003ch4 id=\"in-homogenous--kbhhomogeneity-dot-md--system-with-more-equations-than-variables-has-no-solutions-for-an-arbitrary-set-of-constants\"\u003ein\u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogenous\u003c/a\u003e system with more equations than variables has no solutions for an arbitrary set of constants\u003c/h4\u003e\n\u003cp\u003eProof:\nYou can imagine the system as a \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nAv = C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(v\\) is a list of input variables, and \\(A\\) is a \u003ca href=\"/posts/kbhpolynomial/\"\u003ecoefficient\u003c/a\u003e \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e. Note that \\(A = \\mathbb{F}^{n} \\to \\mathbb{F}^{m}\\), where \\(n\\) is the number of variables, and \\(m\\) the number of equations.\u003c/p\u003e\n\u003cp\u003eNow, a valid solution of the above expression means that \\(Av=C\\) for all \\(v\\) (as they are, of course, the variables.) If we want the expression to have a solution for all choices of \\(C\\), we desire that the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e of \\(A\\) to equal to its codomain\u0026mdash;that we desire it to be \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eGiven the fact that \u003ca href=\"/posts/kbhlinear_map/#map-to-bigger-space-is-not-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjective\"\u003emap to bigger space is not surjective\u003c/a\u003e, if \\(m \u0026gt; n\\), the map is not going to be \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e. Therefore, we want \\(m\u0026gt;n\\), meaning we want more equations (\\(m\\)) than variables (\\(n\\)) to have no solutions for arbitrary \\(C\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhomogeneity/","tags":null,"title":"homogeneity"},{"categories":null,"contents":"the\n","html":"\u003cp\u003ethe\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhomset/","tags":null,"title":"homset"},{"categories":null,"contents":"Honoré\u0026rsquo;s Statistic is a statistical measure of vocabulary complexity, it is a test of Semantic Verbal Fluency and is commonly used for cognitive impairment detection.\nThe statistic is defined as:\n\\begin{equation} HS = 100 \\log \\frac{N}{1-\\frac{N_{uni}}{U}} \\end{equation}\nwhere, \\(N\\) is the total number of words, \\(U\\) the total number of distinct words, \\(N_{uni}\\) the number of total distinct words used only once.\nThe idea here is that a higher diversity of vocabulary shows higher Semantic Verbal Fluency.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhhonore_s_statistic/\"\u003eHonoré\u0026rsquo;s Statistic\u003c/a\u003e is a statistical measure of vocabulary complexity, it is a test of \u003ca href=\"/posts/kbhsemantic_verbal_fluency/\"\u003eSemantic Verbal Fluency\u003c/a\u003e and is commonly used for cognitive impairment detection.\u003c/p\u003e\n\u003cp\u003eThe statistic is defined as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nHS = 100 \\log \\frac{N}{1-\\frac{N_{uni}}{U}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(N\\) is the total number of words, \\(U\\) the total number of distinct words, \\(N_{uni}\\) the number of total distinct words used only once.\u003c/p\u003e\n\u003cp\u003eThe idea here is that a higher diversity of vocabulary shows higher \u003ca href=\"/posts/kbhsemantic_verbal_fluency/\"\u003eSemantic Verbal Fluency\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhonore_s_statistic/","tags":null,"title":"Honoré's Statistic"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhhoover_dam/","tags":null,"title":"Hoover Dam"},{"categories":null,"contents":"Hoovervile are homeless encampments named after Herber Hoover, where homeless people band together after loosing jobs in the Great Depression.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhhooverviles/\"\u003eHoovervile\u003c/a\u003e are homeless encampments named after \u003ca href=\"/posts/kbhherber_hoover/\"\u003eHerber Hoover\u003c/a\u003e, where homeless people band together after loosing jobs in the \u003ca href=\"/posts/kbhgreat_depression/\"\u003eGreat Depression\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhooverviles/","tags":null,"title":"Hoovervile"},{"categories":null,"contents":"\u0026lt;\u0026gt; Hux\n","html":"\u003cp\u003e\u0026lt;\u0026gt; Hux\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhopfield_networks/","tags":null,"title":"Hopfield Networks"},{"categories":null,"contents":"me\n","html":"\u003cp\u003eme\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhoujun_liu/","tags":null,"title":"Houjun Liu"},{"categories":null,"contents":" 👋 Howdy, I'm Houjun Liu! I\u0026rsquo;m a first-year undergraduate student in the Computer Science Department at Stanford University, advised by Prof. Mykel Kochenderfer. I\u0026rsquo;m interested in Natural Language Processing and Speech Language Sample Analysis, specifically, in making large models solve important problems through 1) building better tools for language and speech processing to democratize state-of-the-art research 2) designing improved algorithmic approaches to language model training + decoding to improve performance and 3) exploring their applications.\nWelcome to my academic homepage! This is my little homestead on the internet about my academic interests. Check out my projects below. If you want to know more about the rest of my life, feel free to visit my website!\nRecent goings on Feb. 26-27, 24' AAAI 2024! See y'all in Vancouver! Dec. 15, 23' Paper (NACC) Accepted by W3PHAI-24 Dec. 3, 23' Released TalkBank Utterance Model Jun. 22, 23' Paper (Batchalign) Published by JSLHR Shoot me an email at [firstname] at stanford dot edu, or, if you are around Stanford, grab dinner with me :)\nAbout I am a research engineer at the TalkBank Project at CMU under the supervision of Prof. Brian MacWhinney, where I develop better models and tools for clinical language sample analysis. I also work with the Stanford NLP Group, under direction of Prof. Chris Manning, on using neural models to solve semantic and syntax tagging tasks efficiently with Stanza. Finally, I am a research assistant with Prof. Xin Liu at UC Davis and at UC Davis Health, where I use transformer models to push our understanding of dementia.\nIn industry, I lead the development of Condution, simon, and am a managing partner at #!/Shabang. Previously, I worked as a consulting ML engineer at Dragonfruit AI under the AI Operations team.\nProjects UC Davis Health (2023) A Transformer Approach to Congnitive Impairment Classification and Prediction Liu, H., Weakley, A.M., Zhang, J., Liu, X. Talk@NACCIn Press@W3PHAI-24 at AAAI TalkBank (2023) Automation of Language Sample Analysis Liu, H., MacWhinney, B., Fromm, D., Lanzi, A. Journal Article@JSLHR Code@GitHub TalkBank (2023) DementiaBank: Theoretical Rationale, Protocol, and Illustrative Analyses Lanzi, A., Saylor, A.K., Fromm, D., Liu, H., MacWhinney, B., Cohen, M.L. Journal Article@AJSLP Nueva (2022) ConDef: Automated Context-Aware Lexicography Using Large Online Encyclopedias Liu, H., Sayyah, Z. Book Chapter@LNNSCode@GitHubTalk@SAI Preprint (2021) Towards Automated Psychotherapy via Language Modeling Liu, H. arXiv Teaching 2023- Teaching Assistant, Stanford Association for Computing Machinery (ACM) Chapter 2022-2023 Head TA (co-instructor and summer lecturer) at AIFS AIBridge, a program funded by UC Davis Food Science 2021-2023 Co-Developer, Research@Nueva, a high-school science education program Course Notes Some folks have mentioned that my course notes through classes at Stanford and before have been helpful. Feel free to browse my Stanford UG Courses Index if you want to check it out!\n© 2019-2024 Houjun Liu. Licensed CC BY-NC-SA 4.0. This website layout is inspired by the lovely homepage of Karel D\u0026rsquo;Oosterlick.\n","html":"\u003ch1 style=\"display:inline-block\"\u003e 👋 Howdy, I'm Houjun Liu! \u003c/h1\u003e\n\u003cp\u003eI\u0026rsquo;m a first-year undergraduate student in the Computer Science Department at \u003ca href=\"https://www.stanford.edu/\"\u003eStanford University\u003c/a\u003e, advised by Prof. \u003ca href=\"https://mykel.kochenderfer.com/\"\u003eMykel Kochenderfer\u003c/a\u003e. I\u0026rsquo;m interested in \u003cstrong\u003e\u003cstrong\u003eNatural Language Processing\u003c/strong\u003e\u003c/strong\u003e and \u003cstrong\u003e\u003cstrong\u003eSpeech Language Sample Analysis\u003c/strong\u003e\u003c/strong\u003e, specifically, in making large models solve important problems through 1) building better tools for language and speech processing to democratize state-of-the-art research 2) designing improved algorithmic approaches to language model training + decoding to improve performance and 3) exploring their applications.\u003c/p\u003e\n\u003cp\u003eWelcome to my academic homepage! This is my little homestead on the internet about my academic interests. Check out \u003cstrong\u003e\u003cstrong\u003e\u003ca href=\"#projects\"\u003emy projects\u003c/a\u003e\u003c/strong\u003e\u003c/strong\u003e below. If you want to know more about the rest of my life, feel free to \u003ca href=\"https://www.jemoka.com/\"\u003evisit my website\u003c/a\u003e!\u003c/p\u003e\n\u003cdiv style=\"background-color: #f0f0f0; padding: 1px 10px; border-radius: 5px; margin-top: 20px\"\u003e\n\u003cdiv style=\"margin: 10px 0\"\u003e\n\u003cspan style=\"color: #262626; font-weight:500; color: #292929; opacity:0.6; font-size: 14px\"\u003eRecent goings on\u003c/span\u003e\n\u003cdiv style=\"margin-top: 10px; display: grid; column-gap: 20px; row-gap: 5px; grid-template-columns: 120px auto\"\u003e\n\u003cspan style=\"font-weight: 500\"\u003eFeb. 26-27, 24'\u003c/span\u003e \u003cspan\u003eAAAI 2024! See y'all in Vancouver!\u003c/span\u003e\n\u003cspan style=\"font-weight: 500\"\u003eDec. 15, 23'\u003c/span\u003e \u003cspan\u003ePaper (NACC) Accepted by W3PHAI-24\u003c/span\u003e\n\u003cspan style=\"font-weight: 500\"\u003eDec. 3, 23'\u003c/span\u003e \u003cspan\u003eReleased \u003ca target=\"_top\" target=\"_top\" href=\"https://huggingface.co/talkbank/CHATUtterance-en\"\u003eTalkBank Utterance Model\u003c/a\u003e\u003c/span\u003e\n\u003cspan style=\"font-weight: 500\"\u003eJun. 22, 23'\u003c/span\u003e \u003cspan\u003ePaper (Batchalign) \u003ca target=\"_top\" href=\"https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10555460/\" target=\"_top\"\u003ePublished\u003c/a\u003e by JSLHR\u003c/span\u003e\n\u003c/div\u003e\n\u003c/div\u003e\n\u003c/div\u003e\n\u003cp\u003eShoot me an email at \u003ccode\u003e[firstname] at stanford dot edu\u003c/code\u003e, or, if you are around Stanford, \u003ca href=\"https://cal.com/houjun/dinner\"\u003egrab dinner with me\u003c/a\u003e :)\u003c/p\u003e\n\u003ch2 id=\"about\"\u003eAbout\u003c/h2\u003e\n\u003cp\u003eI am a research engineer at the \u003ca href=\"https://talkbank.org/\"\u003eTalkBank Project\u003c/a\u003e at CMU under the supervision of Prof. Brian MacWhinney, where I develop better \u003ca href=\"https://huggingface.co/talkbank/\"\u003emodels\u003c/a\u003e and \u003ca href=\"https://github.com/talkbank/batchalign2\"\u003etools\u003c/a\u003e for clinical language sample analysis. I also work with the Stanford NLP Group, under direction of Prof. Chris Manning, on using neural models to solve semantic and syntax tagging tasks efficiently with \u003ca href=\"https://github.com/stanfordnlp/stanza\"\u003eStanza\u003c/a\u003e. Finally, I am a research assistant with Prof. \u003ca href=\"https://xinliu.engineering.ucdavis.edu/\"\u003eXin Liu\u003c/a\u003e at UC Davis and at \u003ca href=\"https://health.ucdavis.edu/alzheimers/\"\u003eUC Davis Health\u003c/a\u003e, where I use transformer models to push our understanding of dementia.\u003c/p\u003e\n\u003cp\u003eIn industry, I lead the development of \u003ca href=\"https://www.condution.com/\"\u003eCondution\u003c/a\u003e, \u003ca href=\"https://simon.shabang.io/\"\u003esimon\u003c/a\u003e, and am a managing partner at \u003ca href=\"https://www.shabang.io/\"\u003e#!/Shabang\u003c/a\u003e. Previously, I worked as a consulting ML engineer at \u003ca href=\"https://www.dragonfruit.ai/\"\u003eDragonfruit AI\u003c/a\u003e under the AI Operations team.\u003c/p\u003e\n\u003ch2 id=\"projects\"\u003eProjects\u003c/h2\u003e\n\u003cdiv style=\"padding: 15px 0\"\u003e\n\u003cdiv style=\"font-weight: 500; font-size: 14px; opacity: 0.5\"\u003eUC Davis Health (2023)\u003c/div\u003e\n\u003cdiv style=\"font-weight: 500\"\u003eA Transformer Approach to Congnitive Impairment Classification and Prediction\u003c/div\u003e\n\u003cdiv\u003e\u003cu\u003eLiu, H.\u003c/u\u003e, Weakley, A.M., Zhang, J., Liu, X.\u003c/div\u003e\n\u003cdiv style=\"padding-top: 5px; transform: translateX(-2px)\"\u003e\u003cspan class=\"tag\"\u003e\u003ca target=\"_top\" href=\"https://docs.google.com/presentation/d/1J5WUGUXbVlG5Fl4cQdu6FuNVFTPB2NTW/edit?usp=sharing\u0026ouid=112528726606349722398\u0026rtpof=true\u0026sd=true\"\u003eTalk@NACC\u003c/a\u003e\u003c/span\u003e\u003cspan class=\"tag\"\u003eIn Press@W3PHAI-24 at AAAI\u003c/span\u003e\u003c/div\u003e\n\u003c/div\u003e\n\u003cdiv style=\"padding: 15px 0\"\u003e\n\u003cdiv style=\"font-weight: 500; font-size: 14px; opacity: 0.5\"\u003eTalkBank (2023)\u003c/div\u003e\n\u003cdiv style=\"font-weight: 500\"\u003eAutomation of Language Sample Analysis\u003c/div\u003e\n\u003cdiv\u003e\u003cu\u003eLiu, H.\u003c/u\u003e, MacWhinney, B., Fromm, D., Lanzi, A.\u003c/div\u003e\n\u003cdiv style=\"padding-top: 5px; transform: translateX(-2px)\"\u003e\n\u003cspan class=\"tag\"\u003e\u003ca target=\"_top\" href=\"https://pubs.asha.org/doi/10.1044/2023_JSLHR-22-00642\"\u003eJournal Article@JSLHR\u003c/a\u003e\u003c/span\u003e\n\u003cspan class=\"tag\"\u003e\u003ca target=\"_top\" href=\"https://github.com/talkbank/batchalign2\"\u003eCode@GitHub\u003c/a\u003e\u003c/span\u003e\n\u003c/div\u003e\n\u003c/div\u003e\n\u003cdiv style=\"padding: 15px 0\"\u003e\n\u003cdiv style=\"font-weight: 500; font-size: 14px; opacity: 0.5\"\u003eTalkBank (2023)\u003c/div\u003e\n\u003cdiv style=\"font-weight: 500\"\u003eDementiaBank: Theoretical Rationale, Protocol, and Illustrative Analyses\u003c/div\u003e\n\u003cdiv\u003eLanzi, A., Saylor, A.K., Fromm, D., \u003cu\u003eLiu, H.\u003c/u\u003e, MacWhinney, B., Cohen, M.L. \u003c/div\u003e\n\u003cdiv style=\"padding-top: 5px; transform: translateX(-2px)\"\u003e\u003cspan class=\"tag\"\u003e\u003ca target=\"_top\" href=\"https://doi.org/10.1044/2022_AJSLP-22-00281\"\u003eJournal Article@AJSLP\u003c/a\u003e\u003c/span\u003e\u003c/div\u003e\n\u003c/div\u003e\n\u003cdiv style=\"padding: 15px 0\"\u003e\n\u003cdiv style=\"font-weight: 500; font-size: 14px; opacity: 0.5\"\u003eNueva (2022)\u003c/div\u003e\n\u003cdiv style=\"font-weight: 500\"\u003eConDef: Automated Context-Aware Lexicography Using Large Online Encyclopedias\u003c/div\u003e\n\u003cdiv\u003e\u003cu\u003eLiu, H.\u003c/u\u003e, Sayyah, Z.\u003c/div\u003e\n\u003cdiv style=\"padding-top: 5px; transform: translateX(-2px)\"\u003e\u003cspan class=\"tag\"\u003e\u003ca target=\"_top\" href=\"https://doi.org/10.1007/978-3-031-10464-0_41\"\u003eBook Chapter@LNNS\u003c/a\u003e\u003c/span\u003e\u003cspan class=\"tag\"\u003e\u003ca target=\"_top\" href=\"https://github.com/jklsnt/dictembed\"\u003eCode@GitHub\u003c/a\u003e\u003c/span\u003e\u003cspan class=\"tag\"\u003eTalk@SAI\u003c/span\u003e\u003c/div\u003e\n\u003c/div\u003e\n\u003cdiv style=\"padding: 15px 0\"\u003e\n\u003cdiv style=\"font-weight: 500; font-size: 14px; opacity: 0.5\"\u003ePreprint (2021)\u003c/div\u003e\n\u003cdiv style=\"font-weight: 500\"\u003eTowards Automated Psychotherapy via Language Modeling\u003c/div\u003e\n\u003cdiv\u003e\u003cu\u003eLiu, H.\u003c/u\u003e\u003c/div\u003e\n\u003cdiv style=\"padding-top: 5px; transform: translateX(-2px)\"\u003e\u003cspan class=\"tag\"\u003e\u003ca target=\"_top\" href=\"https://arxiv.org/abs/2104.10661\"\u003earXiv\u003c/a\u003e\u003c/span\u003e\u003c/div\u003e\n\u003c/div\u003e\n\u003ch2 id=\"teaching\"\u003eTeaching\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e2023- Teaching Assistant, Stanford Association for Computing Machinery (ACM) Chapter\u003c/li\u003e\n\u003cli\u003e2022-2023 Head TA (co-instructor and summer lecturer) at \u003ca href=\"https://www.jemoka.com/posts/kbhaibridge_course_website/\"\u003eAIFS AIBridge\u003c/a\u003e, a program funded by UC Davis Food Science\u003c/li\u003e\n\u003cli\u003e2021-2023 Co-Developer, Research@Nueva, a high-school science education program\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"course-notes\"\u003eCourse Notes\u003c/h2\u003e\n\u003cp\u003eSome folks have mentioned that my course notes through classes at Stanford and before have been helpful. Feel free to browse my \u003ca href=\"https://www.jemoka.com/posts/kbhstanford_courses_index/\"\u003eStanford UG Courses Index\u003c/a\u003e if you want to check it out!\u003c/p\u003e\n\u003cstyle\u003e\n.tag {\nfont-size: 13px;\nmargin: 0 10px;\nmargin-left: 0;\ncursor: default;\n}\n.tag \u003e a {\nborder: 0 !important;\n}\n.tag \u003e a:hover {\nborder-bottom: 0 !important;\n}\n\u003c/style\u003e\n\u003cp\u003e\u003cspan style=\"font-size: 10px\"\u003e© 2019-2024 Houjun Liu. Licensed CC BY-NC-SA 4.0. This website layout is inspired by the lovely homepage of \u003ca target=\"_top\" href=\"https://kareldo.github.io/research\"\u003eKarel D\u0026rsquo;Oosterlick\u003c/a\u003e.\u003c/span\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhresearch_index/","tags":["index"],"title":"Houjun's Academic Home Page"},{"categories":null,"contents":"A reading: (Krugman 2009)\nReflection The discussion here of the conflict between \u0026ldquo;saltwater\u0026rdquo; and \u0026ldquo;freshwater\u0026rdquo; (Keynesian and Neoclassical) economists is very interesting when evaluated from the perspective of our recent impending recession.\nOne particular statement that resonated with me in the essay was the fact that a crisis simply \u0026ldquo;pushed the freshwater economists into further absurdity.\u0026rdquo; It is interesting to see that, once a theory has been well-established and insulated in a community, it becomes much more difficult to parcel out as something that could be wrong.\nAs the same time, the forcibly-correcting \u0026ldquo;fudge\u0026rdquo; inconsistencies of the Keynesian model is also a strong weakness which perhaps further exacerbated the freshwater economists\u0026rsquo; dissent into their models. Modeling human behavior has been consistently quite messy, so it is unsurprising that both neoclassical and Keynesian economists strayed away from those models.\nCircling back to the COVID-trigger economic downturn: we definitely see a push towards increased \u0026ldquo;absurdity\u0026rdquo; in terms of increased polarization in the US; but not only that, the deeply rooted idea of \u0026ldquo;pandemics don\u0026rsquo;t affect the States\u0026rdquo; or at least \u0026ldquo;the Feds/our supply chain have preparation for absurd events\u0026rdquo; is again shown to be false\u0026mdash;despite the Obaman re-discovery of Keynesian management earlier.\nThis all raises a question: under what circumstances is a tangibly \u0026ldquo;better\u0026rdquo; result going to surface and be accepted when one model is tangibly perfect yet wrong, the other requiring flawed corrections or unrigorous analysis. Must we reject one model completely before the other one can be used?\nI don\u0026rsquo;t believe behavioral economics, though providing a partial solution as Krugman outlines, is the be-and-end-all of macroeconomic models during a depression. All of the models which were theorized (bar pure neoclassicalist \u0026ldquo;perfect agents\u0026rdquo;) ostensibly do one thing: trying to \u0026ldquo;rationally\u0026rdquo; model the \u0026ldquo;irrational\u0026rdquo; behavior of market participants. I don\u0026rsquo;t believe that this is ultimately going to be feasible on a macroeconomic scale to create models that will last (sans repeated, empirical testing\u0026mdash;but there are not enough depressions to go around.) Perhaps, then, the basic Keynesian idea of simply creating fiscal corrections may very well be the best second thing.\nReading notes the main problem was the fact that nobody saw a catastrophie coming More important was the profession’s blindness to the very possibility of catastrophic failures in a market economy.\npeople either believed that the market would never go wrong or the Fed fixes everything free-market economies never go astray and those who believed that economies may stray now and then but that any major deviations from the path of prosperity could and would be corrected by the all-powerful Fed.\nThe economists thought the humans are perfectly rational, and the fact that they are not is what leads to failures Unfortunately, this romanticized and sanitized vision of the economy led most economists to ignore all the things that can go wrong. They turned a blind eye to the limitations of human rationality that often lead to bubbles and busts\nKeynsian Economics was not trying to entirely replace markets Keynes did not, despite what you may have heard, want the government to run the economy. \u0026hellip; He wanted to fix capitalism, not replace it.\nMilton Friedman lead the return to Neoclassical Economics The neoclassical revival was initially led by Milton Friedman of the University of Chicago, who asserted as early as 1953 that neoclassical economics works well enough as a description of the way the economy actually functions\nNeoclassical Economics with the monetarist theory under Milton asserted that keeping the money supply growing is all that needed Monetarists asserted, however, that a very limited, circumscribed form of government intervention — namely, instructing central banks to keep the nation’s money supply, the sum of cash in circulation and bank deposits, growing on a steady path — is all that’s required to prevent depressions.\nMilton Freedman believes that large-scale expansion would lead to inflation and high unimployment excessively expansionary policies, he predicted, would lead to a combination of inflation and high unemployment\nAnti-Keynesian seniments overtook Freedman\u0026rsquo;s original proposition Eventually, however, the anti-Keynesian counterrevolution went far beyond Friedman’s position, which came to seem relatively moderate compared with what his successors were saying.\n#question why is this obvious? for obvious reasons\nBecause the new economists beliefed that the market is right, the advise was for business to max stock price finance economists believed that we should put the capital development of the nation in the hands of what Keynes had called a “casino.”\nMajor stock events didn\u0026rsquo;t blunt the disregard to Keynesian policy These events, however, which Keynes would have considered evidence of the unreliability of markets, did little to blunt the force of a beautiful idea.\nNew \u0026ldquo;perfect\u0026rdquo; economic models earned large respect in industry mild-mannered business-school professors could and did become Wall Street rocket scientists, earning Wall Street paychecks.\nNew models often analyzed financial systems independently of their real-world worth Finance economists rarely asked the seemingly obvious (though not easily answered) question of whether asset prices made sense given real-world fundamentals like earnings. Instead, they asked only whether asset prices made sense given other asset prices\nMacro split into two factions: the Keynes recessionists or the anti-Keynesians macroeconomics has divided into two great factions: “saltwater” economists (mainly in coastal U.S. universities), who have a more or less Keynesian vision of what recessions are all about; and “freshwater” economists (mainly at inland schools), who consider that vision nonsense.\nFreshwater economists\u0026rsquo; theory: recessions were just people confused? Nobel laureate Robert Lucas, argued that recessions were caused by temporary confusion: workers and companies had trouble distinguishing overall changes in the level of prices\nUnder freshwater theories, unemployment is just people electing not to work due to unfavorable environment amplified by the rational response of workers, who voluntarily work more when the environment is favorable and less when it’s unfavorable. Unemployment is a deliberate decision by workers to take time off.\n\u0026hellip;\nPut baldly like that, this theory sounds foolish — was the Great Depression really the Great Vacation?\nThe new Keysians still kept more or less to non-dramatic thinking They tried to keep their deviations from neoclassical orthodoxy as limited as possible. This meant that there was no room in the prevailing models for such things as bubbles and banking-system collapse.\nNew Keysians believed entirely in the Fed, without need for large fiscal policy They believed that monetary policy, administered by the technocrats at the Fed, could provide whatever remedies the economy needed.\nPeople just thought that there can\u0026rsquo;t be a bubble in housing What’s striking, when you reread Greenspan’s assurances, is that they weren’t based on evidence — they were based on the a priori assertion that there simply can’t be a bubble in housing.\nObama\u0026rsquo;s economic policies are much more on the Keynes side Such Keynesian thinking underlies the Obama administration’s economic policies — and the freshwater economists are furious.\nFailure of neoclassicalist theory is that breaking Keynsian economical behavior requires perfect rationality, which is absurd if you start from the assumption that people are perfectly rational and markets are perfectly efficient, you have to conclude that unemployment is voluntary and recessions are desirable.\nEconomists thought that economics would have been perfect Economics, as a field, got in trouble because economists were seduced by the vision of a perfect, frictionless market system.\nBehavioral Economics Behavioral Economics is a study of economics which hinges on the irrationality of human behavior. Its an answer to both the Neoclassical Economics\u0026rsquo; poor assumption that humans and markets are perfect, but also Keynsian Economics\u0026rsquo;s increasingly large need for a random \u0026ldquo;fudge\u0026rdquo; to get their models working right.\npillars of Behavioral Economics \u0026ldquo;Many real-world investors bear little resemblance to the cool calculators of efficient-market theory: they’re all too subject to herd behavior, to bouts of irrational exuberance and unwarranted panic.\u0026rdquo; \u0026ldquo;even those who try to base their decisions on cool calculation often find that they can’t, that problems of trust, credibility and limited collateral force them to run with the herd.\u0026rdquo; Good arbitrageurs are just forced out of the economy in large downward spirals As a result, the smart money is forced out of the market, and prices may go into a downward spiral.\n","html":"\u003cp\u003eA reading: (\u003ca href=\"#citeproc_bib_item_1\"\u003eKrugman 2009\u003c/a\u003e)\u003c/p\u003e\n\u003ch2 id=\"reflection\"\u003eReflection\u003c/h2\u003e\n\u003cp\u003eThe discussion here of the conflict between \u0026ldquo;saltwater\u0026rdquo; and \u0026ldquo;freshwater\u0026rdquo; (Keynesian and Neoclassical) economists is very interesting when evaluated from the perspective of our recent impending recession.\u003c/p\u003e\n\u003cp\u003eOne particular statement that resonated with me in the essay was the fact that a crisis simply \u0026ldquo;pushed the freshwater economists into further absurdity.\u0026rdquo; It is interesting to see that, once a theory has been well-established and insulated in a community, it becomes much more difficult to parcel out as something that could be wrong.\u003c/p\u003e\n\u003cp\u003eAs the same time, the forcibly-correcting \u0026ldquo;fudge\u0026rdquo; inconsistencies of the Keynesian model is also a strong weakness which perhaps further exacerbated the freshwater economists\u0026rsquo; dissent into their models. Modeling human behavior has been consistently quite messy, so it is unsurprising that both neoclassical and Keynesian economists strayed away from those models.\u003c/p\u003e\n\u003cp\u003eCircling back to the COVID-trigger economic downturn: we definitely see a push towards increased \u0026ldquo;absurdity\u0026rdquo; in terms of increased polarization in the US; but not only that, the deeply rooted idea of \u0026ldquo;pandemics don\u0026rsquo;t affect the States\u0026rdquo; or at least \u0026ldquo;the Feds/our supply chain have preparation for absurd events\u0026rdquo; is again shown to be false\u0026mdash;despite the Obaman re-discovery of Keynesian management earlier.\u003c/p\u003e\n\u003cp\u003eThis all raises a question: under what circumstances is a tangibly \u0026ldquo;better\u0026rdquo; result going to surface and be accepted when one model is tangibly perfect yet wrong, the other requiring flawed corrections or unrigorous analysis. Must we reject one model completely before the other one can be used?\u003c/p\u003e\n\u003cp\u003eI don\u0026rsquo;t believe behavioral economics, though providing a partial solution as Krugman outlines, is the be-and-end-all of macroeconomic models during a depression. All of the models which were theorized (bar pure neoclassicalist \u0026ldquo;perfect agents\u0026rdquo;) ostensibly do one thing: trying to \u0026ldquo;rationally\u0026rdquo; model the \u0026ldquo;irrational\u0026rdquo; behavior of market participants. I don\u0026rsquo;t believe that this is ultimately going to be feasible on a macroeconomic scale to create models that will last (sans repeated, empirical testing\u0026mdash;but there are not enough depressions to go around.) Perhaps, then, the basic Keynesian idea of simply creating fiscal corrections may very well be the best second thing.\u003c/p\u003e\n\u003ch2 id=\"reading-notes\"\u003eReading notes\u003c/h2\u003e\n\u003ch3 id=\"the-main-problem-was-the-fact-that-nobody-saw-a-catastrophie-coming\"\u003ethe main problem was the fact that nobody saw a catastrophie coming\u003c/h3\u003e\n\u003cp\u003eMore important was the profession’s blindness to the very possibility of catastrophic failures in a market economy.\u003c/p\u003e\n\u003ch3 id=\"people-either-believed-that-the-market-would-never-go-wrong-or-the-fed-fixes-everything\"\u003epeople either believed that the market would never go wrong or the Fed fixes everything\u003c/h3\u003e\n\u003cp\u003efree-market economies never go astray and those who believed that economies may stray now and then but that any major deviations from the path of prosperity could and would be corrected by the all-powerful Fed.\u003c/p\u003e\n\u003ch3 id=\"the-economists-thought-the-humans-are-perfectly-rational-and-the-fact-that-they-are-not-is-what-leads-to-failures\"\u003eThe economists thought the humans are perfectly rational, and the fact that they are not is what leads to failures\u003c/h3\u003e\n\u003cp\u003eUnfortunately, this romanticized and sanitized vision of the economy led most economists to ignore all the things that can go wrong. They turned a blind eye to the limitations of human rationality that often lead to bubbles and busts\u003c/p\u003e\n\u003ch3 id=\"keynsian-economics--kbhkeynsian-politics-dot-md--was-not-trying-to-entirely-replace-markets\"\u003e\u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynsian Economics\u003c/a\u003e was not trying to entirely replace markets\u003c/h3\u003e\n\u003cp\u003eKeynes did not, despite what you may have heard, want the government to run the economy. \u0026hellip; He wanted to fix capitalism, not replace it.\u003c/p\u003e\n\u003ch3 id=\"milton-friedman-lead-the-return-to-neoclassical-economics--kbhneoclassical-economics-dot-md\"\u003eMilton Friedman lead the return to \u003ca href=\"/posts/kbhneoclassical_economics/\"\u003eNeoclassical Economics\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eThe neoclassical revival was initially led by Milton Friedman of the University of Chicago, who asserted as early as 1953 that neoclassical economics works well enough as a description of the way the economy actually functions\u003c/p\u003e\n\u003ch3 id=\"neoclassical-economics-with-the-monetarist-theory--kbhmonetarist-theory-dot-md--under-milton-asserted-that-keeping-the-money-supply-growing-is-all-that-needed\"\u003eNeoclassical Economics with the \u003ca href=\"/posts/kbhmonetarist_theory/\"\u003emonetarist theory\u003c/a\u003e under Milton asserted that keeping the money supply growing is all that needed\u003c/h3\u003e\n\u003cp\u003eMonetarists asserted, however, that a very limited, circumscribed form of government intervention — namely, instructing central banks to keep the nation’s money supply, the sum of cash in circulation and bank deposits, growing on a steady path — is all that’s required to prevent depressions.\u003c/p\u003e\n\u003ch3 id=\"milton-freedman--kbhmilton-freedman-dot-md--believes-that-large-scale-expansion-would-lead-to-inflation-and-high-unimployment\"\u003e\u003ca href=\"/posts/kbhmilton_freedman/\"\u003eMilton Freedman\u003c/a\u003e believes that large-scale expansion would lead to inflation and high unimployment\u003c/h3\u003e\n\u003cp\u003eexcessively expansionary policies, he predicted, would lead to a combination of inflation and high unemployment\u003c/p\u003e\n\u003ch3 id=\"anti-keynesian-seniments-overtook-freedman--kbhmilton-freedman-dot-md--s-original-proposition\"\u003eAnti-Keynesian seniments overtook \u003ca href=\"/posts/kbhmilton_freedman/\"\u003eFreedman\u003c/a\u003e\u0026rsquo;s original proposition\u003c/h3\u003e\n\u003cp\u003eEventually, however, the anti-Keynesian counterrevolution went far beyond Friedman’s position, which came to seem relatively moderate compared with what his successors were saying.\u003c/p\u003e\n\u003ch3 id=\"question-why-is-this-obvious\"\u003e#question why is this obvious?\u003c/h3\u003e\n\u003cp\u003efor obvious reasons\u003c/p\u003e\n\u003ch3 id=\"because-the-new-economists-beliefed-that-the-market-is-right-the-advise-was-for-business-to-max-stock-price\"\u003eBecause the new economists beliefed that the market is right, the advise was for business to max stock price\u003c/h3\u003e\n\u003cp\u003efinance economists believed that we should put the capital development of the nation in the hands of what Keynes had called a “casino.”\u003c/p\u003e\n\u003ch3 id=\"major-stock-events-didn-t-blunt-the-disregard-to-keynesian-policy\"\u003eMajor stock events didn\u0026rsquo;t blunt the disregard to Keynesian policy\u003c/h3\u003e\n\u003cp\u003eThese events, however, which Keynes would have considered evidence of the unreliability of markets, did little to blunt the force of a beautiful idea.\u003c/p\u003e\n\u003ch3 id=\"new-perfect-economic-models-earned-large-respect-in-industry\"\u003eNew \u0026ldquo;perfect\u0026rdquo; economic models earned large respect in industry\u003c/h3\u003e\n\u003cp\u003emild-mannered business-school professors could and did become Wall Street rocket scientists, earning Wall Street paychecks.\u003c/p\u003e\n\u003ch3 id=\"new-models-often-analyzed-financial-systems-independently-of-their-real-world-worth\"\u003eNew models often analyzed financial systems independently of their real-world worth\u003c/h3\u003e\n\u003cp\u003eFinance economists rarely asked the seemingly obvious (though not easily answered) question of whether asset prices made sense given real-world fundamentals like earnings. Instead, they asked only whether asset prices made sense given other asset prices\u003c/p\u003e\n\u003ch3 id=\"macro-split-into-two-factions-the-keynes--kbhkeynsian-politics-dot-md--recessionists-or-the-anti-keynesians--kbhkeynsian-politics-dot-md\"\u003eMacro split into two factions: the \u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynes\u003c/a\u003e recessionists or the anti-\u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynesians\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003emacroeconomics has divided into two great factions: “saltwater” economists (mainly in coastal U.S. universities), who have a more or less Keynesian vision of what recessions are all about; and “freshwater” economists (mainly at inland schools), who consider that vision nonsense.\u003c/p\u003e\n\u003ch3 id=\"freshwater-economists-theory-recessions-were-just-people-confused\"\u003eFreshwater economists\u0026rsquo; theory: recessions were just people confused?\u003c/h3\u003e\n\u003cp\u003eNobel laureate Robert Lucas, argued that recessions were caused by temporary confusion: workers and companies had trouble distinguishing overall changes in the level of prices\u003c/p\u003e\n\u003ch3 id=\"under-freshwater-theories-unemployment-is-just-people-electing-not-to-work-due-to-unfavorable-environment\"\u003eUnder freshwater theories, unemployment is just people electing not to work due to unfavorable environment\u003c/h3\u003e\n\u003cp\u003eamplified by the rational response of workers, who voluntarily work more when the environment is favorable and less when it’s unfavorable. Unemployment is a deliberate decision by workers to take time off.\u003c/p\u003e\n\u003cp\u003e\u0026hellip;\u003c/p\u003e\n\u003cp\u003ePut baldly like that, this theory sounds foolish — was the Great Depression really the Great\nVacation?\u003c/p\u003e\n\u003ch3 id=\"the-new-keysians-still-kept-more-or-less-to-non-dramatic-thinking\"\u003eThe new Keysians still kept more or less to non-dramatic thinking\u003c/h3\u003e\n\u003cp\u003eThey tried to keep their deviations from neoclassical orthodoxy as limited as possible. This meant that there was no room in the prevailing models for such things as bubbles and banking-system collapse.\u003c/p\u003e\n\u003ch3 id=\"new-keysians-believed-entirely-in-the-fed-without-need-for-large-fiscal-policy\"\u003eNew Keysians believed entirely in the Fed, without need for large fiscal policy\u003c/h3\u003e\n\u003cp\u003eThey believed that monetary policy, administered by the technocrats at the Fed, could provide whatever remedies the economy needed.\u003c/p\u003e\n\u003ch3 id=\"people-just-thought-that-there-can-t-be-a-bubble-in-housing\"\u003ePeople just thought that there can\u0026rsquo;t be a bubble in housing\u003c/h3\u003e\n\u003cp\u003eWhat’s striking, when you reread Greenspan’s assurances, is that they weren’t based on evidence — they were based on the a priori assertion that there simply can’t be a bubble in housing.\u003c/p\u003e\n\u003ch3 id=\"obama-s-economic-policies-are-much-more-on-the-keynes--kbhkeynsian-politics-dot-md--side\"\u003eObama\u0026rsquo;s economic policies are much more on the \u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynes\u003c/a\u003e side\u003c/h3\u003e\n\u003cp\u003eSuch Keynesian thinking underlies the Obama administration’s economic policies — and the freshwater economists are furious.\u003c/p\u003e\n\u003ch3 id=\"failure-of-neoclassicalist-theory-is-that-breaking-keynsian-economical-behavior-requires-perfect-rationality-which-is-absurd\"\u003eFailure of neoclassicalist theory is that breaking Keynsian economical behavior requires perfect rationality, which is absurd\u003c/h3\u003e\n\u003cp\u003eif you start from the assumption that people are perfectly rational and markets are perfectly efficient, you have to conclude that unemployment is voluntary and recessions are desirable.\u003c/p\u003e\n\u003ch3 id=\"economists-thought-that-economics-would-have-been-perfect\"\u003eEconomists thought that economics would have been perfect\u003c/h3\u003e\n\u003cp\u003eEconomics, as a field, got in trouble because economists were seduced by the vision of a perfect, frictionless market system.\u003c/p\u003e\n\u003ch3 id=\"behavioral-economics\"\u003eBehavioral Economics\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#behavioral-economics\"\u003eBehavioral Economics\u003c/a\u003e is a study of economics which hinges on the irrationality of human behavior. Its an answer to both the \u003ca href=\"/posts/kbhneoclassical_economics/\"\u003eNeoclassical Economics\u003c/a\u003e\u0026rsquo; poor assumption that humans and markets are perfect, but also \u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynsian Economics\u003c/a\u003e\u0026rsquo;s increasingly large need for a random \u0026ldquo;fudge\u0026rdquo; to get their models working right.\u003c/p\u003e\n\u003ch4 id=\"pillars-of-behavioral-economics\"\u003epillars of Behavioral Economics\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003e\u0026ldquo;Many real-world investors bear little resemblance to the cool calculators of efficient-market theory: they’re all too subject to herd behavior, to bouts of irrational exuberance and unwarranted panic.\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;even those who try to base their decisions on cool calculation often find that they can’t, that problems of trust, credibility and limited collateral force them to run with the herd.\u0026rdquo;\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"good-arbitrageurs-are-just-forced-out-of-the-economy-in-large-downward-spirals\"\u003eGood arbitrageurs are just forced out of the economy in large downward spirals\u003c/h3\u003e\n\u003cp\u003eAs a result, the smart money is forced out of the market, and prices may go into a downward spiral.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhow_did_economists_get_it_so_wrong/","tags":null,"title":"How Did Economists Get It So Wrong?"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhhsbi/","tags":null,"title":"hsbi"},{"categories":null,"contents":"Improving PBVI without sacrificing quality.\nInitialization We first initialize HSVI with a set of alpha vectors \\(\\Gamma\\), representing the lower-bound, and a list of tuples of \\((b, U(b))\\) named \\(\\Upsilon\\), representing the upper-bound. We call the value functions they generate as \\(\\bar{V}\\) and \\(\\underline V\\).\nLower Bound Set of alpha vectors: best-action worst-state (HSVI1), blind lower bound (HSVI2)\nCalculating \\(\\underline{V}(b)\\) \\begin{equation} \\underline{V}_{\\Gamma} = \\max_{\\alpha} \\alpha^{\\top}b \\end{equation}\nUpper Bound Fast Informed Bound\nsolving fully-observable MDP Project \\(b\\) into the point-set Projected the upper bound onto a convex hull (HSVI2: via approximate convex hull projection) Calculating \\(\\bar{V}(b)\\) Recall that though the lower-bound is given by alpha vectors, the upper bound is given in terms of a series of tuples \\((b, U(b)) \\in \\Upsilon\\).\nHSVI1: we figure the upper bound for any given \\(b\\) by projecting onto the convex hull formed by points on \\(\\Upsilon\\) HSVI2: approximate linear projection Update Begin with state \\(b = b_0\\).\nRepeat:\nat every step, we perform a local update for upper and lower bound using the current \\(b\\)\nthe lower bound is updated using PBVI Backup on \\(b, \\Gamma\\) the upper bound is updated using POMDP Bellman Update on \\(b, \\Upsilon\\), putting the new \\((b, u(b))\\) in the set \\(\\Upsilon\\). Then, we update our belief via the usual:\n\\begin{equation} b \\leftarrow update(b, a^{*}, o^{*}) \\end{equation}\nwhere \\(a^{*}\\) and \\(o^{*}\\) are determined by\u0026hellip;\nIE-MAX Heuristic IE-MAX Heuristic is used to determine \\(a^{*}\\), whereby we choose the action such that:\n\\begin{equation} a^{*} = \\arg\\max_{a}Q^{(\\bar{V})}(b) \\end{equation}\nyes, we choose the next action which maximizes the upper bound of the utility we can get.\nweighted excess uncertainty weighted excess uncertainty is used to determine \\(o^{*}\\). Suppose we are depth \\(d\\) loops in the search tree (i.e. this is our $d$th chain), we define:\n\\begin{equation} \\text{excess}(b,t) = (\\bar{V}(b)-\\underline{V}(b)) - \\epsilon \\gamma^{-t} \\end{equation}\n\u0026ldquo;how far away are we from converging to a value uncertainty of no more than \\(\\epsilon\\), given we are depth \\(t\\) in?\nand, we choose the observation \\(o^{*}\\) such that:\n\\begin{equation} o^{*} = \\arg\\max_{o} \\qty[p(o|b,a^{*}) \\text{excess}(update(b,a,o), t+1)] \\end{equation}\nwhere,\n\\begin{align} P(o|b,a) \u0026amp;= \\sum_{s}^{} p(o|s,a) b(s) \\\\ \u0026amp;= \\sum_{s}^{} \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) O(o|s\u0026rsquo;,a) b(s) \\end{align}\n","html":"\u003cp\u003eImproving \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e without sacrificing quality.\u003c/p\u003e\n\u003ch2 id=\"initialization\"\u003eInitialization\u003c/h2\u003e\n\u003cp\u003eWe first initialize \u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e with a set of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es \\(\\Gamma\\), representing the lower-bound, and a list of tuples of \\((b, U(b))\\) named \\(\\Upsilon\\), representing the upper-bound. We call the value functions they generate as \\(\\bar{V}\\) and \\(\\underline V\\).\u003c/p\u003e\n\u003ch3 id=\"lower-bound\"\u003eLower Bound\u003c/h3\u003e\n\u003cp\u003eSet of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es: \u003ca href=\"/posts/kbhworst_possible_state/\"\u003ebest-action worst-state\u003c/a\u003e (\u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e1), \u003ca href=\"/posts/kbhblind_lower_bound/\"\u003eblind lower bound\u003c/a\u003e (\u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e2)\u003c/p\u003e\n\u003ch4 id=\"calculating-underline-v--b\"\u003eCalculating \\(\\underline{V}(b)\\)\u003c/h4\u003e\n\u003cp\u003e\\begin{equation}\n\\underline{V}_{\\Gamma} = \\max_{\\alpha} \\alpha^{\\top}b\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"upper-bound\"\u003eUpper Bound\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhfast_informed_bound/\"\u003eFast Informed Bound\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003esolving fully-observable MDP\u003c/li\u003e\n\u003cli\u003eProject \\(b\\) into the point-set\u003c/li\u003e\n\u003cli\u003eProjected the upper bound onto a convex hull (HSVI2: via approximate convex hull projection)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"calculating-bar-v--b\"\u003eCalculating \\(\\bar{V}(b)\\)\u003c/h4\u003e\n\u003cp\u003eRecall that though the lower-bound is given by \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es, the upper bound is given in terms of a series of tuples \\((b, U(b)) \\in \\Upsilon\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e1: we figure the upper bound for any given \\(b\\) by projecting onto the convex hull formed by points on \\(\\Upsilon\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e2: approximate linear projection\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"update\"\u003eUpdate\u003c/h2\u003e\n\u003cp\u003eBegin with state \\(b = b_0\\).\u003c/p\u003e\n\u003cp\u003eRepeat:\u003c/p\u003e\n\u003cp\u003eat every step, we perform a local update for upper and lower bound using the current \\(b\\)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ethe lower bound is \u003cstrong\u003eupdated\u003c/strong\u003e using \u003ca href=\"/posts/kbhpoint_based_value_iteration/#pbvi-backup\"\u003ePBVI Backup\u003c/a\u003e on \\(b, \\Gamma\\)\u003c/li\u003e\n\u003cli\u003ethe upper bound is \u003cstrong\u003eupdated\u003c/strong\u003e using \u003ca href=\"/posts/kbhvalue_iteration/#pomdp-bellman-update\"\u003ePOMDP Bellman Update\u003c/a\u003e on \\(b, \\Upsilon\\), putting the new \\((b, u(b))\\) in the set \\(\\Upsilon\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThen, we update our belief via the usual:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb \\leftarrow update(b, a^{*}, o^{*})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(a^{*}\\) and \\(o^{*}\\) are determined by\u0026hellip;\u003c/p\u003e\n\u003ch3 id=\"ie-max-heuristic\"\u003eIE-MAX Heuristic\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#ie-max-heuristic\"\u003eIE-MAX Heuristic\u003c/a\u003e is used to determine \\(a^{*}\\), whereby we choose the action such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na^{*} = \\arg\\max_{a}Q^{(\\bar{V})}(b)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyes, we choose the next action which maximizes the \u003cstrong\u003eupper bound\u003c/strong\u003e of the utility we can get.\u003c/p\u003e\n\u003ch3 id=\"weighted-excess-uncertainty\"\u003eweighted excess uncertainty\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#weighted-excess-uncertainty\"\u003eweighted excess uncertainty\u003c/a\u003e is used to determine \\(o^{*}\\). Suppose we are depth \\(d\\) loops in the search tree (i.e. this is our $d$th chain), we define:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\text{excess}(b,t) = (\\bar{V}(b)-\\underline{V}(b)) - \\epsilon \\gamma^{-t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;how far away are we from converging to a value uncertainty of no more than \\(\\epsilon\\), given we are depth \\(t\\) in?\u003c/p\u003e\n\u003cp\u003eand, we choose the observation \\(o^{*}\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\no^{*} = \\arg\\max_{o} \\qty[p(o|b,a^{*}) \\text{excess}(update(b,a,o), t+1)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere,\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nP(o|b,a) \u0026amp;= \\sum_{s}^{} p(o|s,a) b(s) \\\\\n\u0026amp;= \\sum_{s}^{} \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) O(o|s\u0026rsquo;,a) b(s)\n\\end{align}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhsvi/","tags":null,"title":"HSVI"},{"categories":null,"contents":"\u0026ldquo;Can we come up a policy that, if not fast, at least reach the goal!\u0026rdquo;\nBackground Stochastic Shortest-Path we are at an initial state, and we have a series of goal states, and we want to reach to the goal states.\nWe can solve this just by:\nvalue iteration simulate a trajectory and only updating reachable state: RTDP, LRTDP MBP Problem MDP + Goal States\n\\(S\\): set of states \\(A\\): actions \\(P(s\u0026rsquo;|s,a)\\): transition \\(C\\): reward \\(G\\): absorbing goal states Approach Combining LRTDP with anytime dynamics\nrun GPT (not the transformer, \u0026ldquo;General Planning Tool\u0026rdquo;, think LRTDP) exact solver use GPT policy for solved states or visited more than a certain threshold uses MBP policy for other states policy evaluation for convergence \u0026ldquo;use GPT solution as much as possible, and when we haven\u0026rsquo;t ever visited a place due to the search trajectories, we can use MBP to supplement the solution\u0026rdquo;\n","html":"\u003cp\u003e\u0026ldquo;Can we come up a policy that, if not fast, \u003cstrong\u003eat least reach the goal!\u003c/strong\u003e\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003ch3 id=\"stochastic-shortest-path\"\u003eStochastic Shortest-Path\u003c/h3\u003e\n\u003cp\u003ewe are at an initial state, and we have a series of goal states, and we want to reach to the goal states.\u003c/p\u003e\n\u003cp\u003eWe can solve this just by:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003esimulate a trajectory and only updating reachable state: \u003ca href=\"/posts/kbhltrdp/#real-time-dynamic-programming\"\u003eRTDP\u003c/a\u003e, \u003ca href=\"/posts/kbhltrdp/\"\u003eLRTDP\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmbp/\"\u003eMBP\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"problem\"\u003eProblem\u003c/h2\u003e\n\u003cp\u003eMDP + Goal States\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(S\\): set of states\u003c/li\u003e\n\u003cli\u003e\\(A\\): actions\u003c/li\u003e\n\u003cli\u003e\\(P(s\u0026rsquo;|s,a)\\): transition\u003c/li\u003e\n\u003cli\u003e\\(C\\): reward\u003c/li\u003e\n\u003cli\u003e\\(G\\): absorbing \u003cstrong\u003egoal states\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"approach\"\u003eApproach\u003c/h2\u003e\n\u003cp\u003eCombining \u003ca href=\"/posts/kbhltrdp/\"\u003eLRTDP\u003c/a\u003e with anytime dynamics\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003erun GPT (not the transformer, \u0026ldquo;General Planning Tool\u0026rdquo;, think \u003ca href=\"/posts/kbhltrdp/\"\u003eLRTDP\u003c/a\u003e) exact solver\u003c/li\u003e\n\u003cli\u003euse GPT policy for solved states or visited more than a certain threshold\u003c/li\u003e\n\u003cli\u003euses \u003ca href=\"/posts/kbhmbp/\"\u003eMBP\u003c/a\u003e policy for other states\u003c/li\u003e\n\u003cli\u003epolicy evaluation for convergence\u003c/li\u003e\n\u003c/ol\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-15_09-27-58_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\u0026ldquo;use GPT solution as much as possible, and when we haven\u0026rsquo;t ever visited a place due to the search trajectories, we can use \u003ca href=\"/posts/kbhmbp/\"\u003eMBP\u003c/a\u003e to supplement the solution\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhybplan/","tags":null,"title":"HybPlan"},{"categories":null,"contents":"hypothesis testing is the mechanism by which a hypothesis is tested statistically.\nThe core logic of hypothesis testing: have a metric, do tests, calculate probability that the outcome could have happened given the metric is true.\nExamples include\nt-test (for sample means) z-test (for sample proportions) chi-square test (for sample categories) Common to all hypothesis tests are the following terms.\nnull hypothesis A null hypothesis is a \u0026ldquo;no difference\u0026rdquo; hypothesis created as a part of hypothesis testing. It is usually stated as an equality.\nalternative hypothesis The alternative hypothesis is the \u0026ldquo;new news\u0026rdquo; hypothesis created as a part of hypothesis testing, whereby the confirmation would introduce new information.\np-value the p-value of a hypothesis test is the probability of the results acquired taking place given if the null hypothesis. That is:\n\\begin{equation} p(\\hat{p} | H_0\\ true) \\end{equation}\nTo figure out the above probability, you could either simulate the occurrence and look at a histogram (more common for AP Statistics anyways) or measure a few other statistics. We will talk about them later.\nTo use p-value as a hypothesis test, the sample has to meet the conditions for inference.\nSee also p-value from bootstrap\nType I Error A Type I Error takes place when you reject the null hypothesis during hypothesis testing even while its true: i.e., a false positive.\nThe probability of having a Type I Error is the significance level of the test.\nType II Error A Type II Error takes place when you accept the null hypothesis during hypothesis testing even while its false.\nThe probability of having a Type II Error is the conjugate of the power of a test.\nsignificance level significance level is the level by which one would accept a p-value is being indicative of the success of a test. We usually use the letter \\(\\alpha\\) to denote this.\npower (statistics) power is a statistic calculable during hypothesis testing. Its the probability of rejecting the null hypothesis given the null hypothesis is false. Also known as the conjugate of the Type II Error.\npower increases as significance level increases, but then the probability of a Type I Error increases as well.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e is the mechanism by which a hypothesis is tested statistically.\u003c/p\u003e\n\u003cp\u003eThe core logic of \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e: have a metric, do tests, calculate probability that the outcome could have happened given the metric is true.\u003c/p\u003e\n\u003cp\u003eExamples include\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbht_test/\"\u003et-test\u003c/a\u003e (for sample means)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhz_test/\"\u003ez-test\u003c/a\u003e (for sample proportions)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhchi_square/#chi-square-test\"\u003echi-square test\u003c/a\u003e (for sample categories)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eCommon to all \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis tests\u003c/a\u003e are the following terms.\u003c/p\u003e\n\u003ch2 id=\"null-hypothesis\"\u003enull hypothesis\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#null-hypothesis\"\u003enull hypothesis\u003c/a\u003e is a \u0026ldquo;no difference\u0026rdquo; hypothesis created as a part of \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e. It is usually stated as an equality.\u003c/p\u003e\n\u003ch2 id=\"alternative-hypothesis\"\u003ealternative hypothesis\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#alternative-hypothesis\"\u003ealternative hypothesis\u003c/a\u003e is the \u0026ldquo;new news\u0026rdquo; hypothesis created as a part of \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e, whereby the confirmation would introduce new information.\u003c/p\u003e\n\u003ch2 id=\"p-value\"\u003ep-value\u003c/h2\u003e\n\u003cp\u003ethe \u003ca href=\"#p-value\"\u003ep-value\u003c/a\u003e of a \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis test\u003c/a\u003e is the probability of the results acquired taking place given if the \u003ca href=\"#null-hypothesis\"\u003enull hypothesis\u003c/a\u003e. That is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(\\hat{p} | H_0\\ true)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo figure out the above probability, you could either simulate the occurrence and look at a histogram (more common for \u003ca href=\"/posts/kbhapstats/\"\u003eAP Statistics\u003c/a\u003e anyways) or measure a few other statistics. We will talk about them later.\u003c/p\u003e\n\u003cp\u003eTo use \u003ca href=\"#p-value\"\u003ep-value\u003c/a\u003e as a \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis test\u003c/a\u003e, the sample has to meet the \u003ca href=\"/posts/kbhz_test/#conditions-for-inference--z-test\"\u003econditions for inference\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhboostrap/#p-value-from-bootstrap\"\u003ep-value from bootstrap\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"type-i-error\"\u003eType I Error\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#type-i-error\"\u003eType I Error\u003c/a\u003e takes place when you reject the \u003ca href=\"#null-hypothesis\"\u003enull hypothesis\u003c/a\u003e during \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e even while its true: i.e., a false positive.\u003c/p\u003e\n\u003cp\u003eThe probability of having a \u003ca href=\"#type-i-error\"\u003eType I Error\u003c/a\u003e is the \u003ca href=\"#significance-level\"\u003esignificance level\u003c/a\u003e of the test.\u003c/p\u003e\n\u003ch2 id=\"type-ii-error\"\u003eType II Error\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#type-ii-error\"\u003eType II Error\u003c/a\u003e takes place when you accept the \u003ca href=\"#null-hypothesis\"\u003enull hypothesis\u003c/a\u003e during \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e even while its false.\u003c/p\u003e\n\u003cp\u003eThe probability of having a \u003ca href=\"#type-ii-error\"\u003eType II Error\u003c/a\u003e is the conjugate of the \u003ca href=\"#power--statistics\"\u003epower\u003c/a\u003e of a test.\u003c/p\u003e\n\u003ch2 id=\"significance-level\"\u003esignificance level\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#significance-level\"\u003esignificance level\u003c/a\u003e is the level by which one would accept a \u003ca href=\"#p-value\"\u003ep-value\u003c/a\u003e is being indicative of the success of a test. We usually use the letter \\(\\alpha\\) to denote this.\u003c/p\u003e\n\u003ch2 id=\"power--statistics\"\u003epower (statistics)\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#power--statistics\"\u003epower\u003c/a\u003e is a \u003ca href=\"/posts/kbhstastistic/\"\u003estatistic\u003c/a\u003e calculable during \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis testing\u003c/a\u003e. Its the probability of rejecting the \u003ca href=\"#null-hypothesis\"\u003enull hypothesis\u003c/a\u003e given the \u003ca href=\"#null-hypothesis\"\u003enull hypothesis\u003c/a\u003e is false. Also known as the conjugate of the \u003ca href=\"#type-ii-error\"\u003eType II Error\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#power--statistics\"\u003epower\u003c/a\u003e increases as \u003ca href=\"#significance-level\"\u003esignificance level\u003c/a\u003e increases, but then the probability of a \u003ca href=\"#type-i-error\"\u003eType I Error\u003c/a\u003e increases as well.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhhypothesis_testing/","tags":null,"title":"hypothesis testing"},{"categories":null,"contents":"identities allows another number to retain its identity after an operation.\nWhat identities are applicable is group dependent. Identities are almost always object dependent.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhidentity/\"\u003eidentities\u003c/a\u003e allows another \u003ca href=\"/posts/kbhnumber/\"\u003enumber\u003c/a\u003e to retain its identity after an \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eWhat \u003ca href=\"/posts/kbhidentity/\"\u003eidentities\u003c/a\u003e are applicable is \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e dependent. \u003ca href=\"/posts/kbhidentity/\"\u003eIdentities\u003c/a\u003e are almost always \u003ca href=\"/posts/kbhobjects/\"\u003eobject\u003c/a\u003e dependent.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhidentity/","tags":null,"title":"identity"},{"categories":null,"contents":"\u0026lt;\u0026gt; NUS-HIST301 American History\nThe idea of identity politics is proposed, that politics became associated with sub-population of identities:\nBlack Pride Movement Chicano Activism The American Indian movement Termination of reservation system Pan-Indian Rights Alcatraz and Wounded Knee Occupations LGBT movement Stonewall GLF starts marching Asian American Yellow Peril Model minority movement NOW Femanism Acts The Equal Rights Act almost possible, and then Phyllis Schlafly happened Environmental Movement Silent Spring Cuyahoga River on fire Richard Nixon creates the EPA Earth Day ","html":"\u003cp\u003e\u0026lt;\u0026gt; \u003ca href=\"/posts/kbhnueva_courses_index/#nus-hist301-american-history\"\u003eNUS-HIST301 American History\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eThe idea of \u003ca href=\"/posts/kbhactivism_during_the_1970s/\"\u003eidentity politics\u003c/a\u003e is proposed, that politics became associated with sub-population of identities:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eBlack Pride Movement\u003c/li\u003e\n\u003cli\u003eChicano Activism\u003c/li\u003e\n\u003cli\u003eThe American Indian movement\n\u003cul\u003e\n\u003cli\u003eTermination of reservation system\u003c/li\u003e\n\u003cli\u003ePan-Indian Rights\u003c/li\u003e\n\u003cli\u003eAlcatraz and Wounded Knee Occupations\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eLGBT movement\n\u003cul\u003e\n\u003cli\u003eStonewall\u003c/li\u003e\n\u003cli\u003eGLF starts marching\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eAsian American\n\u003cul\u003e\n\u003cli\u003eYellow Peril\u003c/li\u003e\n\u003cli\u003eModel minority movement\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eNOW Femanism Acts\n\u003cul\u003e\n\u003cli\u003eThe \u003ca href=\"/posts/kbhequal_rights_act/\"\u003eEqual Rights Act\u003c/a\u003e almost possible, and then Phyllis Schlafly happened\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eEnvironmental Movement\n\u003cul\u003e\n\u003cli\u003e\u003cem\u003eSilent Spring\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003eCuyahoga River \u003cem\u003eon fire\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e creates the EPA\u003c/li\u003e\n\u003cli\u003eEarth Day\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhactivism_during_the_1970s/","tags":null,"title":"identity politics"},{"categories":null,"contents":"to prove that something goes both ways: given \\(A\\Rightarrow B\\), and \\(A \\Leftarrow B\\), \\(A \\Leftrightarrow B\\).\n","html":"\u003cp\u003eto prove that something goes both ways: given \\(A\\Rightarrow B\\), and \\(A \\Leftarrow B\\), \\(A \\Leftrightarrow B\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhequivalence/","tags":null,"title":"if and only if"},{"categories":null,"contents":"v-structure whose parents are unconnected are immoral\nThis is immoral:\nThis is moral\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhbaysian_network/#checking-for-conditional-independence\"\u003ev-structure\u003c/a\u003e whose parents are unconnected are immoral\u003c/p\u003e\n\u003cp\u003eThis is immoral:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-12_10-53-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis is moral\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-12_10-53-39_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhimmoral_v_structure/","tags":null,"title":"immoral v-structure"},{"categories":null,"contents":"Using a bunch of signals to create a 3d representation of the system\nfeatures cyro-EM x-ray tomo glynomics/libidomics genetics studying large-scale viron behavior Misc. Discoveries bydoing a bunch of MD\nproteins of specific virons \u0026ldquo;breath\u0026rdquo;: oning and closing an entire backbone group; this is not yet quantified in physical modeling when its open, the \u0026ldquo;breathing open\u0026rdquo; motion causes the virus to be vunerable *we don\u0026rsquo;t * \u0026ldquo;we are so biased by what we can see experimentally\u0026rdquo; ","html":"\u003cp\u003eUsing a bunch of signals to create a 3d representation of the system\u003c/p\u003e\n\u003ch2 id=\"features\"\u003efeatures\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcyro_em/#manifold-embedding\"\u003ecyro-EM\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ex-ray\u003c/li\u003e\n\u003cli\u003etomo\u003c/li\u003e\n\u003cli\u003eglynomics/libidomics\u003c/li\u003e\n\u003cli\u003egenetics\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"studying-large-scale-viron-behavior\"\u003estudying large-scale viron behavior\u003c/h2\u003e\n\u003cp\u003eMisc. Discoveries bydoing a bunch of MD\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eproteins of specific virons \u0026ldquo;breath\u0026rdquo;: oning and closing an entire backbone group; this is not yet quantified in physical modeling\u003c/li\u003e\n\u003cli\u003ewhen its open, the \u0026ldquo;breathing open\u0026rdquo; motion causes the virus to be vunerable\u003c/li\u003e\n\u003cli\u003e*we don\u0026rsquo;t *\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;we are so biased by what we can see experimentally\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhimmunogen_design-1/","tags":null,"title":"Immunogen Design"},{"categories":null,"contents":"Using a bunch of signals to create a 3d representation of the system\nfeatures cyro-EM x-ray tomo glynomics/libidomics genetics studying large-scale viron surface protein behavior using MD \u0026ldquo;we are so biased by what we can see experimentally, so do MD\u0026rdquo;\n\u0026ldquo;breathing\u0026rdquo; motion proteins of specific virons \u0026ldquo;breath\u0026rdquo;: oning and closing an entire backbone group; this is not yet quantified in physical modeling when its open, the \u0026ldquo;breathing open\u0026rdquo; motion causes the virus to be vunerable so some antibodies jam into the open position \u0026ldquo;head tilting\u0026rdquo; motion when its tilted, there is an epitope that becomes exposed this is a site that\u0026rsquo;s possible for introduction of what\u0026rsquo;s needed overall architecture use conventional MD discover regions of instability (see above for examples) for possible binding surfaces bam inhibition! maybe at some point run a cyro-EM to experimentally run some\n","html":"\u003cp\u003eUsing a bunch of signals to create a 3d representation of the system\u003c/p\u003e\n\u003ch2 id=\"features\"\u003efeatures\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcyro_em/#manifold-embedding\"\u003ecyro-EM\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ex-ray\u003c/li\u003e\n\u003cli\u003etomo\u003c/li\u003e\n\u003cli\u003eglynomics/libidomics\u003c/li\u003e\n\u003cli\u003egenetics\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"studying-large-scale-viron-surface-protein-behavior-using-md\"\u003estudying large-scale viron surface protein behavior using MD\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;we are so biased by what we can see experimentally, so do MD\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"breathing-motion\"\u003e\u0026ldquo;breathing\u0026rdquo; motion\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eproteins of specific virons \u0026ldquo;breath\u0026rdquo;: oning and closing an entire backbone group; this is not yet quantified in physical modeling\u003c/li\u003e\n\u003cli\u003ewhen its open, the \u0026ldquo;breathing open\u0026rdquo; motion causes the virus to be vunerable\u003c/li\u003e\n\u003cli\u003eso some antibodies jam into the open position\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"head-tilting-motion\"\u003e\u0026ldquo;head tilting\u0026rdquo; motion\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ewhen its tilted, there is an \u003ca href=\"/posts/kbhepitophs/\"\u003eepitope\u003c/a\u003e that becomes exposed\u003c/li\u003e\n\u003cli\u003ethis is a site that\u0026rsquo;s possible for introduction of what\u0026rsquo;s needed\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"overall-architecture\"\u003eoverall architecture\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003euse conventional MD\u003c/li\u003e\n\u003cli\u003ediscover regions of instability (see above for examples) for possible binding surfaces\u003c/li\u003e\n\u003cli\u003ebam inhibition!\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003emaybe at some point run a \u003ca href=\"/posts/kbhcyro_em/#manifold-embedding\"\u003ecyro-EM\u003c/a\u003e to experimentally run some\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhimmunogen_design/","tags":null,"title":"Immunogen Design"},{"categories":null,"contents":"Imperialism: a policy of extending a country\u0026rsquo;s power and influence though diplomacy or military force.\nColonies Protectorate \u0026mdash; nations has own government legally controlled by outside power Sphere of influence U.S. Imperialism, why?\n\u0026ldquo;Desire for Military strength\u0026rdquo;: for a nation to be an international player, you have to have a strong navy \u0026ldquo;Thirst for new markets\u0026rdquo;: if we continue to expand, we will have more economic power \u0026ldquo;Belief in supernatural superiority\u0026rdquo;: trust that own culture is better Alaska \u0026mdash; \u0026ldquo;Seward\u0026rsquo;s Ice Box\u0026rdquo;, purchased from czarist Russia.\nHawaii \u0026mdash; Annexed 1898, a sugar company, to get around import taxes, asked the US to annex Hawaii.\nSpanish-American War \u0026mdash;- newspaper receive letter sent by Spanish minister to not protect Cuba. The US then proceeded to fight for the territories.\nFilipino rejected treaty of Paris, America fights. America burned food and crops to starve rebels, and built infrastructure earning elite support due to infrastructure.\n","html":"\u003cp\u003eImperialism: a policy of extending a country\u0026rsquo;s power and influence though diplomacy or military force.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eColonies\u003c/li\u003e\n\u003cli\u003eProtectorate \u0026mdash; nations has own government legally controlled by outside power\u003c/li\u003e\n\u003cli\u003eSphere of influence\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eU.S. Imperialism, why?\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u0026ldquo;Desire for Military strength\u0026rdquo;: for a nation to be an international player, you have to have a strong navy\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Thirst for new markets\u0026rdquo;: if we continue to expand, we will have more economic power\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Belief in supernatural superiority\u0026rdquo;: trust that own culture is better\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAlaska \u0026mdash; \u0026ldquo;Seward\u0026rsquo;s Ice Box\u0026rdquo;, purchased from czarist Russia.\u003c/p\u003e\n\u003cp\u003eHawaii \u0026mdash; Annexed 1898, a sugar company, to get around import taxes, asked the US to annex Hawaii.\u003c/p\u003e\n\u003cp\u003eSpanish-American War \u0026mdash;- newspaper receive letter sent by Spanish minister to not protect Cuba. The US then proceeded to fight for the territories.\u003c/p\u003e\n\u003cp\u003eFilipino rejected treaty of Paris, America fights. America burned food and crops to starve rebels, and built infrastructure earning elite support due to infrastructure.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhimperialism/","tags":null,"title":"Imperialism"},{"categories":null,"contents":"The Inbox is an Inbox for quick captures.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhinbox/\"\u003eInbox\u003c/a\u003e is an \u003ca href=\"/posts/kbhinbox/\"\u003eInbox\u003c/a\u003e for quick captures.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinbox/","tags":null,"title":"Inbox"},{"categories":null,"contents":"If an outcome can be from sets \\(A=m\\) or \\(B=n\\) with no overlaps, where \\(A \\cap B = \\emptyset\\), then, the total number of outcomes are \\(|A| + |B| = m+n\\)\nIf there are overlap:\n\\begin{equation} N = |A|+|B| - |A \\cap B| \\end{equation}\n","html":"\u003cp\u003eIf an outcome can be from sets \\(A=m\\) \u003cstrong\u003eor\u003c/strong\u003e \\(B=n\\) with no overlaps, where \\(A \\cap B = \\emptyset\\), then, the total number of outcomes are \\(|A| + |B| = m+n\\)\u003c/p\u003e\n\u003cp\u003eIf there are overlap:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nN = |A|+|B| - |A \\cap B|\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsum_rule_of_counting/","tags":null,"title":"inclusion exclusion counting"},{"categories":null,"contents":"\\(n\\) random random variables are IID if they are\nindependent identically distributed (see below) \u0026ldquo;identically distributed\u0026rdquo; Consider \\(n\\) random variables:\n\\(X_i\\) all have the same PMF / PDF and therefore, all have the same expectation and variance central limit theorem when things are IID, you can use central limit theorem.\n","html":"\u003cp\u003e\\(n\\) random \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es are \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e if they are\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eidentically distributed (see below)\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003ch2 id=\"identically-distributed\"\u003e\u0026ldquo;identically distributed\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003eConsider \\(n\\) \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X_i\\) all have the same \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003e / \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eand therefore, all have the same \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e and \u003ca href=\"/posts/kbhvariance/\"\u003evariance\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"central-limit-theorem--kbhcentral-limit-theorem-dot-md\"\u003e\u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003ewhen things are \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e, you can use \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhindependently_and_identically_distributed/","tags":null,"title":"independently and identically distributed"},{"categories":null,"contents":"Here\u0026rsquo;s a list of all indexes:\nProjects Index Research Index Production Index About This should be reflected on a fancier way on my home page.\n","html":"\u003cp\u003eHere\u0026rsquo;s a list of all indexes:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprojects/\"\u003eProjects Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhresearch_index/\"\u003eResearch Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhproduction_index/\"\u003eProduction Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhindex/\"\u003eAbout\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThis should be reflected on a fancier way on \u003ca href=\"https://www.jemoka.com/\"\u003emy home page.\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhindex_index/","tags":["index"],"title":"Index Index"},{"categories":null,"contents":"voltage across a inductor \\begin{equation} V = \\epsilon = -L \\dv{I}{t} \\end{equation}\nthis is kind of a formulation of faraday\u0026rsquo;s law.\n\\begin{equation} I(t) = \\frac{V_0}{R_1} (1-e^{\\frac{-t}{\\frac{L}{R}}}) \\end{equation}\nenergy stored in an inductor \\begin{equation} E = \\frac{1}{2} LI^{2} \\end{equation}\n","html":"\u003ch2 id=\"voltage-across-a-inductor--kbhinductors-in-circuits-dot-md\"\u003evoltage across a \u003ca href=\"/posts/kbhinductors_in_circuits/\"\u003einductor\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nV = \\epsilon = -L \\dv{I}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is kind of a formulation of \u003ca href=\"/posts/kbhfaraday_s_law/\"\u003efaraday\u0026rsquo;s law\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI(t) = \\frac{V_0}{R_1} (1-e^{\\frac{-t}{\\frac{L}{R}}})\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"energy-stored-in-an-inductor--kbhinductors-in-circuits-dot-md\"\u003eenergy stored in an \u003ca href=\"/posts/kbhinductors_in_circuits/\"\u003einductor\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nE = \\frac{1}{2} LI^{2}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinductors_in_circuits/","tags":null,"title":"inductor"},{"categories":null,"contents":"inference is the act of updating the distribution of a random variable based on distribution of actually observed variables:\n\\begin{equation} P(X|Y) \\end{equation}\nwhere \\(Y\\) is observed, and we want to know how likely \\(X\\) would therefore be.\nWe call the set \\(X\\) the \u0026ldquo;query variables\u0026rdquo;, \\(Y\\) as \u0026ldquo;evidence varibales\u0026rdquo;, and anything that we didn\u0026rsquo;t use which connects the two variables as \u0026ldquo;hidden variables\u0026rdquo;.\nIf things are not in the right order of \\(X\\) and \\(Y\\), consider the Bayes rule.\nInference is Hard mix of continuous and discrete distribution results could be either a PMF or a PDF Example Suppose we\u0026rsquo;d like to know \\(P(b^{1} | d^{1}, c^{1})\\), where \\(b^{1}\\) is considered a query variable, and \\(c^{1}\\) is considered evidence varibales. The definition of the conditional probability gives us:\n\\begin{equation} p(b^{1} | d^{1}, c^{1}) = \\frac{p(b^{1}, d^{1}, c^{1})}{p(d^{1}, c^{1})} \\end{equation}\nTo compute \\(p(b^{1}d^{1}c^{1})\\), we first compute:\n\\begin{equation} p(b^{1}, d^{1}, c^{1}, E, S) \\end{equation}\nand then, use the law of total probability to get:\n\\begin{equation} p(b^{1}, d^{1}, c^{1}) = \\sum_{e=E} \\sum_{s=S} p(b^{1}, d^{1}, c^{1}, E, S) \\end{equation}\nyou will note this is very expensive computationally O(es) \u0026mdash; and if you have like a 1000 hidden variables you will die.\nWe therefore introduce sum-product elimination.\nsum-product elimination You will note the summation in the example above has a lot of interlocking for loops. You can \u0026ldquo;factor them out\u0026rdquo; via the sum-product elimination algorithm.\nSuppose you are interested in:\n\\begin{equation} P(b | d\u0026rsquo;, c\u0026rsquo;) \\end{equation}\nStep 1: write down factors Write down all factors associated with this computation:\n\\begin{equation} \\phi_{1}(B), \\phi_{2}(S), \\phi_{3}(E,B,S), \\phi_{4}(D,E), \\phi_{5}(C,E) \\end{equation}\nwe have evidence at two variables: \\(D, C\\).\nStep 2: performing factor conditioning for all evidence variables Therefore, \\(\\phi_{4}\\) and \\(\\phi_{5}\\) can be replaced by the factor conditioning as we observed \\(d, c\\), so we no longer need \\(d, c\\) as input because we know them:\nnow we have, to replace \\(\\phi_{4}, \\phi_{5}\\):\n\\begin{equation} \\phi_{6}(E), \\phi_{7}(E) \\end{equation}\nStep 3: using the law of total probability and factor product, get rid of hidden variables We then choose an ordering of the hidden variables and apply a factor product using the law of total probability to get rid of them:\nFirst get rid of any hidden variables Then use factor product to combine results \\begin{equation} \\phi_{8}(B,S) = \\sum_{E=e} \\phi_{3}(E,B,S) \\phi_{6}(e) \\phi_{7}(e) \\end{equation}\n\\begin{equation} \\phi_{9}(B) = \\sum_{S=s} \\phi_{2}(s) \\cdot \\phi_{8}(B,S) \\end{equation}\nWe now only have two factors left: \\(\\phi_{1}(B)\\phi_{9}(B)\\). We finally apply factor product again:\n\\begin{equation} \\phi_{10} (B) = \\phi_{9}(B) \\cdot \\phi_{1}(B) \\end{equation}\nApproximate Inference See Approximate Inference\nGaussian Inference See Inference for Gaussian Models\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e is the act of updating the distribution of a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e based on distribution of actually observed variables:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X|Y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(Y\\) is observed, and we want to know how likely \\(X\\) would therefore be.\u003c/p\u003e\n\u003cp\u003eWe call the set \\(X\\) the \u0026ldquo;query variables\u0026rdquo;, \\(Y\\) as \u0026ldquo;evidence varibales\u0026rdquo;, and anything that we didn\u0026rsquo;t use which connects the two variables as \u0026ldquo;hidden variables\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eIf things are not in the right order of \\(X\\) and \\(Y\\), consider the \u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes rule\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"inference-is-hard\"\u003eInference is Hard\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003emix of \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e and \u003ca href=\"/posts/kbhdiscrete_distribution/\"\u003ediscrete distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eresults could be either a \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003e or a \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"example\"\u003eExample\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-03_09-52-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSuppose we\u0026rsquo;d like to know \\(P(b^{1} | d^{1}, c^{1})\\), where \\(b^{1}\\) is considered a query variable, and \\(c^{1}\\) is considered evidence varibales. The definition of the \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e gives us:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(b^{1} | d^{1}, c^{1}) = \\frac{p(b^{1}, d^{1}, c^{1})}{p(d^{1}, c^{1})}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo compute \\(p(b^{1}d^{1}c^{1})\\), we first compute:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(b^{1}, d^{1}, c^{1}, E, S)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand then, use the \u003ca href=\"/posts/kbhprobability/#law-of-total-probability\"\u003elaw of total probability\u003c/a\u003e to get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(b^{1}, d^{1}, c^{1}) = \\sum_{e=E} \\sum_{s=S} p(b^{1}, d^{1}, c^{1}, E, S)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou will note this is very expensive computationally O(es) \u0026mdash; and if you have like a 1000 hidden variables you will die.\u003c/p\u003e\n\u003cp\u003eWe therefore introduce \u003ca href=\"#sum-product-elimination\"\u003esum-product elimination\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"sum-product-elimination\"\u003esum-product elimination\u003c/h2\u003e\n\u003cp\u003eYou will note the summation in the example above has a lot of interlocking for loops. You can \u0026ldquo;factor them out\u0026rdquo; via the \u003ca href=\"#sum-product-elimination\"\u003esum-product elimination\u003c/a\u003e algorithm.\u003c/p\u003e\n\u003cp\u003eSuppose you are interested in:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(b | d\u0026rsquo;, c\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"step-1-write-down-factors\"\u003eStep 1: write down factors\u003c/h3\u003e\n\u003cp\u003eWrite down all \u003ca href=\"/posts/kbhfactor/\"\u003efactor\u003c/a\u003es associated with this computation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\phi_{1}(B), \\phi_{2}(S), \\phi_{3}(E,B,S), \\phi_{4}(D,E), \\phi_{5}(C,E)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe have evidence at two variables: \\(D, C\\).\u003c/p\u003e\n\u003ch3 id=\"step-2-performing-factor-conditioning--kbhfactor-dot-md--for-all-evidence-variables\"\u003eStep 2: performing \u003ca href=\"/posts/kbhfactor/#factor-conditioning\"\u003efactor conditioning\u003c/a\u003e for all evidence variables\u003c/h3\u003e\n\u003cp\u003eTherefore, \\(\\phi_{4}\\) and \\(\\phi_{5}\\) can be replaced by the \u003ca href=\"/posts/kbhfactor/#factor-conditioning\"\u003efactor conditioning\u003c/a\u003e as we observed \\(d, c\\), so we no longer need \\(d, c\\) as input because we know them:\u003c/p\u003e\n\u003cp\u003enow we have, to replace \\(\\phi_{4}, \\phi_{5}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\phi_{6}(E), \\phi_{7}(E)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"step-3-using-the-law-of-total-probability--kbhprobability-dot-md--and-factor-product--kbhfactor-dot-md--get-rid-of-hidden-variables\"\u003eStep 3: using the \u003ca href=\"/posts/kbhprobability/#law-of-total-probability\"\u003elaw of total probability\u003c/a\u003e and \u003ca href=\"/posts/kbhfactor/#factor-product\"\u003efactor product\u003c/a\u003e, get rid of hidden variables\u003c/h3\u003e\n\u003cp\u003eWe then choose an ordering of the \u003ca href=\"/posts/kbhinference/\"\u003ehidden variables\u003c/a\u003e and apply a \u003ca href=\"/posts/kbhfactor/#factor-product\"\u003efactor product\u003c/a\u003e using the \u003ca href=\"/posts/kbhprobability/#law-of-total-probability\"\u003elaw of total probability\u003c/a\u003e to get rid of them:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eFirst get rid of any hidden variables\u003c/li\u003e\n\u003cli\u003eThen use \u003ca href=\"/posts/kbhfactor/#factor-product\"\u003efactor product\u003c/a\u003e to combine results\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\\begin{equation}\n\\phi_{8}(B,S) = \\sum_{E=e} \\phi_{3}(E,B,S) \\phi_{6}(e) \\phi_{7}(e)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\phi_{9}(B) = \\sum_{S=s} \\phi_{2}(s) \\cdot \\phi_{8}(B,S)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe now only have two \u003ca href=\"/posts/kbhfactor/\"\u003efactor\u003c/a\u003es left: \\(\\phi_{1}(B)\\phi_{9}(B)\\). We finally apply \u003ca href=\"/posts/kbhfactor/#factor-product\"\u003efactor product\u003c/a\u003e again:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\phi_{10} (B) = \\phi_{9}(B) \\cdot \\phi_{1}(B)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"approximate-inference\"\u003eApproximate Inference\u003c/h2\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhapproximate_inference/\"\u003eApproximate Inference\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"gaussian-inference\"\u003eGaussian Inference\u003c/h2\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhinference_for_gaussian_models/\"\u003eInference for Gaussian Models\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinference/","tags":null,"title":"inference"},{"categories":null,"contents":"If we know that \\(a,b\\) are both Gaussian distributions, then we have that:\n\\begin{equation} \\mqty[a \\\\ b] \\sim \\mathcal{N} \\qty(\\mqty[\\mu_{a} \\\\mu_{b}], \\mqty[A \u0026amp; C \\\\ C^{T} \u0026amp; B]) \\end{equation}\nwhereby:\n\\(A\\) is the covariance of each element of \\(A\\) \\(B\\) is the covariance of each element of \\(B\\) \\(C\\) is the covariance of \\(A\\) against \\(B\\) To perform inference:\n\\begin{equation} p(a|b) = \\mathcal{N}(a | \\mu_{a|B}, \\Sigma_{a|b}) \\end{equation}\nwherby:\n\\begin{equation} \\mu_{a|b} = \\mu_{a} + CB^{-1}(b-\\mu_{b}) \\end{equation}\n\\begin{equation} \\Sigma_{a|b} = A - CB^{-1}C^{T} \\end{equation}\nIts a closed form solution. Tada.\nWe know that \\(B\\) is positive semidefinite, and that its invertible, from the fact that its a covariance.\n","html":"\u003cp\u003eIf we know that \\(a,b\\) are both \u003ca href=\"/posts/kbhprobability_distributions/#gaussian-distribution\"\u003eGaussian distribution\u003c/a\u003es, then we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty[a \\\\ b] \\sim \\mathcal{N} \\qty(\\mqty[\\mu_{a} \\\\mu_{b}], \\mqty[A \u0026amp; C \\\\ C^{T} \u0026amp; B])\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(A\\) is the \u003ca href=\"/posts/kbhcovariance/\"\u003ecovariance\u003c/a\u003e of each element of \\(A\\)\u003c/li\u003e\n\u003cli\u003e\\(B\\) is the \u003ca href=\"/posts/kbhcovariance/\"\u003ecovariance\u003c/a\u003e of each element of \\(B\\)\u003c/li\u003e\n\u003cli\u003e\\(C\\) is the \u003ca href=\"/posts/kbhcovariance/\"\u003ecovariance\u003c/a\u003e of \\(A\\) against \\(B\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTo perform inference:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(a|b) = \\mathcal{N}(a | \\mu_{a|B}, \\Sigma_{a|b})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewherby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mu_{a|b} = \\mu_{a} + CB^{-1}(b-\\mu_{b})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Sigma_{a|b} = A - CB^{-1}C^{T}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIts a closed form solution. Tada.\u003c/p\u003e\n\u003cp\u003eWe know that \\(B\\) is positive semidefinite, and that its invertible, from the fact that its a \u003ca href=\"/posts/kbhcovariance/\"\u003ecovariance\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinference_for_gaussian_models/","tags":null,"title":"Inference for Gaussian Models"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhinflectional_words/","tags":null,"title":"inflectional words"},{"categories":null,"contents":"Information Retrival is trying to find material within large collections which is unstructured which satisfies an information need (of structured info).\nUnstructured information has had a massive outburst after the millennium.\nIMPORTANTLY: evaluating Information Retrival is based on Precision/Recall/F on information need and not the query.\nFor ranked system, we can come up with a curve of precision-recall curve by selecting increasing \\(k\\), or mean average precision.\nBasic Terminology collection a set of documents\u0026mdash;could by static, or dynamically added\ngoal retrieve documents with information relevant to the user\u0026rsquo;s information need + to complete a task\ninformation need information need is the actual information that is needed by a search; this is usually translated into a search query, which is actually used to search.\nquery query is a computer accessible form of text which searches to answer an information need.\ninformation need: \u0026ldquo;info about removing mice without killing them\u0026rdquo; query: \u0026ldquo;trapping mouse alive\u0026rdquo; Stages of Interpolation user task =\u0026gt; info need: we may not be looking for the right info info need =\u0026gt; query: we may not be using the best methods to get the info we are looking for Motivation \u0026ldquo;what\u0026rsquo;s wrong with grepping?\u0026rdquo;\nwe cannot afford to do a linear search over web-scale data a \u0026ldquo;NOT\u0026rdquo; query is non-trivial no semantics we have no ranking, so we don\u0026rsquo;t know what\u0026rsquo;s the \u0026ldquo;best\u0026rdquo; document Ranked Approaches Ranked Information Retrieval\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhinformation_retrival/\"\u003eInformation Retrival\u003c/a\u003e is trying to \u003cstrong\u003efind material\u003c/strong\u003e within \u003cstrong\u003elarge collections\u003c/strong\u003e which is \u003cstrong\u003eunstructured\u003c/strong\u003e which satisfies an \u003cstrong\u003einformation need\u003c/strong\u003e (of structured info).\u003c/p\u003e\n\u003cp\u003eUnstructured information has had a massive outburst after the millennium.\u003c/p\u003e\n\u003cp\u003eIMPORTANTLY: evaluating \u003ca href=\"/posts/kbhinformation_retrival/\"\u003eInformation Retrival\u003c/a\u003e is based on Precision/Recall/F on \u003ca href=\"#information-need\"\u003einformation need\u003c/a\u003e and not the \u003ca href=\"#query\"\u003equery\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eFor ranked system, we can come up with a curve of precision-recall curve by selecting increasing \\(k\\), or \u003ca href=\"/posts/kbhmean_average_precision/\"\u003emean average precision\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"basic-terminology\"\u003eBasic Terminology\u003c/h2\u003e\n\u003ch3 id=\"collection\"\u003ecollection\u003c/h3\u003e\n\u003cp\u003ea set of documents\u0026mdash;could by static, or dynamically added\u003c/p\u003e\n\u003ch3 id=\"goal\"\u003egoal\u003c/h3\u003e\n\u003cp\u003eretrieve documents with information \u003cstrong\u003erelevant to the user\u0026rsquo;s information need\u003c/strong\u003e + to complete a \u003cstrong\u003etask\u003c/strong\u003e\u003c/p\u003e\n\u003ch3 id=\"information-need\"\u003einformation need\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#information-need\"\u003einformation need\u003c/a\u003e is the actual information that is needed by a search; this is usually translated into a search \u003ca href=\"#query\"\u003equery\u003c/a\u003e, which is actually used to search.\u003c/p\u003e\n\u003ch3 id=\"query\"\u003equery\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#query\"\u003equery\u003c/a\u003e is a computer accessible form of text which searches to answer an \u003ca href=\"#information-need\"\u003einformation need\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"#information-need\"\u003einformation need\u003c/a\u003e: \u0026ldquo;info about removing mice without killing them\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#query\"\u003equery\u003c/a\u003e: \u0026ldquo;trapping mouse alive\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"stages-of-interpolation\"\u003eStages of Interpolation\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003euser task =\u0026gt; info need: we may not be looking for the right info\u003c/li\u003e\n\u003cli\u003einfo need =\u0026gt; query: we may not be using the best methods to get the info we are looking for\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;what\u0026rsquo;s wrong with grepping?\u0026rdquo;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewe cannot afford to do a linear search over web-scale data\u003c/li\u003e\n\u003cli\u003ea \u0026ldquo;NOT\u0026rdquo; query is non-trivial\u003c/li\u003e\n\u003cli\u003eno semantics\u003c/li\u003e\n\u003cli\u003ewe have no ranking, so we don\u0026rsquo;t know what\u0026rsquo;s the \u0026ldquo;best\u0026rdquo; document\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"ranked-approaches\"\u003eRanked Approaches\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/\"\u003eRanked Information Retrieval\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinformation_retrival/","tags":null,"title":"Information Retrival"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhinformation_theory/","tags":null,"title":"information theory"},{"categories":null,"contents":"Information Units are unique entities mentioned during an utterance; for a sentence like \u0026ldquo;There is a boy. The boy is a brother. He is stealing a cookie. The sister is watching.\u0026rdquo;, \u0026ldquo;boy, cookie, sister\u0026rdquo; are possible IUs.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhiu/\"\u003eInformation Unit\u003c/a\u003es are unique entities mentioned during an utterance; for a sentence like \u0026ldquo;There is a boy. The boy is a brother. He is stealing a cookie. The sister is watching.\u0026rdquo;, \u0026ldquo;boy, cookie, sister\u0026rdquo; are possible \u003ca href=\"/posts/kbhiu/\"\u003eIU\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhiu/","tags":null,"title":"Information Units (Linguistics)"},{"categories":null,"contents":"First order IVP The class of problems described as:\n\\begin{equation} \\dv{y}{t} = f(t, y) \\end{equation}\nand:\n\\begin{equation} y(t_0) = y_0 \\end{equation}\nwe need to figure \u0026ldquo;which of the general solutions of the DiffEqu satisfy the general value.\nTo do this, we simply have to plug in the initial value and solve for our constant \\(K\\).\nSecond order IVP \\begin{equation} \\dv[2]{d}{t} = f(t,y,y\u0026rsquo;) \\end{equation}\nthis requires two initial conditions to fully specify (because two variables becomes constant and goes away).\none and exactly one solution exist for every initial condition of an IVP The ODE \\(y\u0026rsquo; = f(t,y)\\) with initial condition \\(y(t_0) = y_0\\), where, \\(f\\) has to be continuous in some maximal interval \\(t,y\\), and differentiable in \\(y\\), has a unique solution on some maximal interval of \\(t\\).\nThat is: for every single point on a solution space of an IVP, each point is covered one solution and only one solution. Its possible for that function to diverge beyond that point.\nThis is also true for second order differential equations (its written as linear homogenous constant coeffiicient; but its true generally for 2nd order IVPs):\n\\begin{equation} \\begin{cases} y\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; +by = 0 \\\\ y(t_0) = y_0 \\\\ y\u0026rsquo;(t_0) = y\u0026rsquo;_{0} \\end{cases} \\end{equation}\nwill have one and only one solution per \\(y_0\\), \\(y_0\u0026rsquo;\\).\nauxiliary constants :PROPERTIES: :ID: 20C96C21-7C77-4F84-BDDD-B0F96E509200\n\\(y_0\\), or some \\(C\\) that arise out of constant of integration. Essentially, the values which fin down a specific function from a function family\n","html":"\u003ch2 id=\"first-order-ivp\"\u003eFirst order IVP\u003c/h2\u003e\n\u003cp\u003eThe class of problems described as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{y}{t} = f(t, y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t_0) = y_0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe need to figure \u0026ldquo;which of the general solutions of the DiffEqu satisfy the general value.\u003c/p\u003e\n\u003cp\u003eTo do this, we simply have to plug in the initial value and solve for our constant \\(K\\).\u003c/p\u003e\n\u003ch2 id=\"second-order-ivp\"\u003eSecond order IVP\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\dv[2]{d}{t} = f(t,y,y\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis requires two initial conditions to fully specify (because two variables becomes constant and goes away).\u003c/p\u003e\n\u003ch2 id=\"one-and-exactly-one-solution-exist-for-every-initial-condition-of-an-ivp\"\u003eone and exactly one solution exist for every initial condition of an IVP\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhordinary_differential_equations/\"\u003eODE\u003c/a\u003e \\(y\u0026rsquo; = f(t,y)\\) with initial condition \\(y(t_0) = y_0\\), where, \\(f\\) has to be continuous in some maximal interval \\(t,y\\), and differentiable in \\(y\\), has a unique solution on some \u003ca href=\"/posts/kbhmaximal_interval/\"\u003emaximal interval\u003c/a\u003e of \\(t\\).\u003c/p\u003e\n\u003cp\u003eThat is: for every single point on a solution space of an \u003ca href=\"/posts/kbhinitial_value_problems/\"\u003eIVP\u003c/a\u003e, each point is covered one solution and only one solution. Its possible for that function to diverge beyond that point.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eThis is also true for second order differential equations (its written as linear homogenous constant coeffiicient; but its true generally for 2nd order IVPs):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\ny\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; +by = 0 \\\\\ny(t_0) = y_0 \\\\\ny\u0026rsquo;(t_0) = y\u0026rsquo;_{0}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewill have one and only one solution per \\(y_0\\), \\(y_0\u0026rsquo;\\).\u003c/p\u003e\n\u003ch2 id=\"auxiliary-constants\"\u003eauxiliary constants\u003c/h2\u003e\n\u003cp\u003e:PROPERTIES:\n:ID: 20C96C21-7C77-4F84-BDDD-B0F96E509200\u003c/p\u003e\n\u003cp\u003e\\(y_0\\), or some \\(C\\) that arise out of constant of integration. Essentially, the values which fin down a specific function from a function family\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinitial_value_problems/","tags":null,"title":"initial value problems"},{"categories":null,"contents":"An injective function is one which is one-to-one: that it maps distinct inputs to distinct outputs.\nconstituents A function \\(T: V \\to W\\) requirements \\(T\\) is injective if \\(Tu = Tv\\) implies \\(u=v\\).\nadditional information injectivity implies that null space is \\(\\{0\\}\\) Proof: let \\(T \\in \\mathcal{L}(V,W)\\); \\(T\\) is injective IFF \\(null\\ T = \\{0\\}\\).\ngiven injectivity Suppose \\(T\\) is injective.\nNow, we know that \\(0\\), because it indeed gets mapped by \\(T\\) to \\(0\\), is in the null space of \\(T\\).\nBecause linear maps take \\(0\\) to \\(0\\), \\(T0=0\\). Now, because \\(T\\) is injective, for any \\(v\\) that \\(Tv = 0 = T 0\\) implies \\(v=0\\).\nSo \\(0\\) is the only thing that an injective \\(T\\) can map to \\(0\\), and it is indeed in the null space, so the null space is just \\(\\{0\\}\\).\ngiven \\(null\\ T=\\{0\\}\\) Suppose we have some \\(Tu = Tv\\), we desire to proof that \\(u=v\\) to show that \\(T\\) is injective.\nGiven \\(Tu=Tv\\), we have that \\(Tu-Tv\\). Given additivity, \\(T(u-v) = 0\\). This makes \\((u-v) \\in\\ null\\ T\\).\nGiven only \\(0\\) is in the null space of \\(T\\), \\(u-v = 0\\), so \\(u=v\\), as desired. \\(\\blacksquare\\).\nmap to smaller space is not injective See map to smaller space is not injective\n","html":"\u003cp\u003eAn \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e is one which is one-to-one: that it maps distinct inputs to distinct outputs.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e \\(T: V \\to W\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\(T\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e if \\(Tu = Tv\\) implies \\(u=v\\).\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"injectivity-implies-that-null-space--kbhnull-space-dot-md--is-0\"\u003einjectivity implies that \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e is \\(\\{0\\}\\)\u003c/h3\u003e\n\u003cp\u003eProof: let \\(T \\in \\mathcal{L}(V,W)\\); \\(T\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e IFF \\(null\\ T = \\{0\\}\\).\u003c/p\u003e\n\u003ch4 id=\"given-injectivity--kbhinjectivity-dot-md\"\u003egiven \u003ca href=\"/posts/kbhinjectivity/\"\u003einjectivity\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eSuppose \\(T\\) is injective.\u003c/p\u003e\n\u003cp\u003eNow, we know that \\(0\\), because it indeed gets mapped by \\(T\\) to \\(0\\), is in the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(T\\).\u003c/p\u003e\n\u003cp\u003eBecause \u003ca href=\"/posts/kbhlinear_map/#linear-maps-take-0-to-0\"\u003elinear maps take \\(0\\) to \\(0\\)\u003c/a\u003e, \\(T0=0\\). Now, because \\(T\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e, for any \\(v\\) that \\(Tv = 0 = T 0\\) implies \\(v=0\\).\u003c/p\u003e\n\u003cp\u003eSo \\(0\\) is the only thing that an injective \\(T\\) can map to \\(0\\), and it is indeed in the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e, so the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e is just \\(\\{0\\}\\).\u003c/p\u003e\n\u003ch4 id=\"given-null-t-0\"\u003egiven \\(null\\ T=\\{0\\}\\)\u003c/h4\u003e\n\u003cp\u003eSuppose we have some \\(Tu = Tv\\), we desire to proof that \\(u=v\\) to show that \\(T\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eGiven \\(Tu=Tv\\), we have that \\(Tu-Tv\\). Given additivity, \\(T(u-v) = 0\\). This makes \\((u-v) \\in\\ null\\ T\\).\u003c/p\u003e\n\u003cp\u003eGiven only \\(0\\) is in the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(T\\), \\(u-v = 0\\), so \\(u=v\\), as desired. \\(\\blacksquare\\).\u003c/p\u003e\n\u003ch3 id=\"map-to-smaller-space-is-not-injective--kbhlinear-map-dot-md\"\u003e\u003ca href=\"/posts/kbhlinear_map/#map-to-smaller-space-is-not-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003emap to smaller space is not injective\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhlinear_map/#map-to-smaller-space-is-not-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003emap to smaller space is not injective\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinjectivity/","tags":null,"title":"injectivity"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhinjectivity_implies_that_null_space_is_0/","tags":null,"title":"injectivity implies that null space is {0}"},{"categories":null,"contents":"constituents \\(V\\) a vector space \\((u,v)\\), an ordered pair of vectors in \\(V\\) (its not commutative!) requirements We define \\(\\langle u, v \\rangle \\in \\mathbb{F}\\) as the inner product of \\((u,v)\\) in that order!. It carries the following properties:\npositivity: \\(\\langle v, v\\rangle \\geq 0, \\forall v \\in V\\) definiteness: \\(\\langle v, v\\rangle = 0\\) IFF \\(v = 0\\) additivity in the first slot: \\(\\langle u+v, w\\rangle = \\langle u, w \\rangle + \\langle v, w \\rangle\\) homogeneity in the first slot: \\(\\langle \\lambda u, v \\rangle = \\lambda \\langle u, v \\rangle\\) conjugate symmetry: \\(\\langle u,v \\rangle = \\overline{\\langle v,u \\rangle}\\) additional information Inner Product Space An Inner Product Space is a vector space with a well-defined inner product. For instance, \\(\\mathbb{F}^{n}\\) has the canonical inner product named Euclidean Inner Product (see below, a.k.a. dot product for reals). The existence of such a well-defined inner product makes \\(\\mathbb{F}^{n}\\) an Inner Product Space.\nRare Axler moment, instead of \u0026ldquo;well-defined\u0026rdquo;, he says we want a vector space with an inner product \u0026ldquo;lurking nearby\u0026rdquo;; james bond style.\nproperties of inner product For a fixed \\(u \\in V\\), the function takes \\(v\\) to \\(\\langle v,u \\rangle\\) is a Linear Map \\(V \\to \\mathbb{F}\\) \\(\\langle 0,u \\rangle = 0\\) \\(\\langle u,0 \\rangle = 0\\) \\(\\langle u,v+w \\rangle = \\langle u,v \\rangle + \\langle u,w \\rangle\\) \\(\\langle u,\\lambda v \\rangle = \\bar{\\lambda}\\langle u,v \\rangle\\) Proof:\nInheriting the additivity and homogeneity of the definition of inner products Set \\(u\\) to be the fixed element for 1), set \\(0\\) to be the input, linear maps take \\(0\\) to \\(0\\) Apply conjugate symmetry to 2) Apply conjugate symmetry, inner product additivty, then conjugate back Apply conjugate symmetry, inner product homogeneity in the first slot, then conjugate back (of course leaving \\(\\lambda\\) out conjugated) Euclidean Inner Product For \\(x,y \\in \\mathbb{F}^{n}\\), one can define a pretty well-defined inner product by\n\\begin{equation} x \\cdot y = x_1 \\bar{y_{1}} + \u0026hellip; + x_{n} \\bar{y_{n}} \\end{equation}\nsimilar to dot product for the reals. This is called the Euclidean Inner Product and has the nice parallelity properties we saw.\ncomplex number shenanigans that motivate the inner product \u0026hellip;as both relevant and more general than the dot product, but also different in key areas.\nFirst, review complex numbers from our discussion in chapter 4. The main problem here is this:\nfor \\(z = (z_1, \\dots, z_{n}) \\in \\mathbb{C}^{n}\\), simply squaring each slot to take the norm may cause us to take a square root of a negative number (as each slot would then be \\(a^{2}-b^{2}\\) for a complex number). That\u0026rsquo;s no bueno because we want \\(\\|z\\|\\) to be real and non-negative.\nThis, therefore, suggests something similar for our inner product definition; to make sure that each slot end up being a real and non-negative number, we simply conjugate the second value:\n\\begin{equation} x \\cdot y = x_1 \\bar{y_{1}} + \u0026hellip; + x_{n} \\bar{y_{n}} \\end{equation}\nAlso, note that this definition give us an important result: if we reverse \\(x\\) and \\(y\\), we would be conjugating the other element! And so, we have that:\n\\begin{equation} x \\cdot y = \\bar{{y \\cdot x}} \\end{equation}\nderived by following the usual rules of complex conjugation. Note that none of these elementwisethings (the \\(x_{n}y_{n}\\) business) are actually in the definition of the inner product, as it is the rules of an Euclidean Inner Product.\ninner product of \\(L\\) periodic functions For \\(f,g : [0,L] \\to \\mathbb{R}\\), which are L-periodic, we define:\n\\begin{equation} \\langle f,g \\rangle := \\frac{1}{L} \\int_{0}^{L} f(x) g(x) \\dd{x} \\end{equation}\nRecall that L-periodic functions can be shifted without changing periodicity. But if for some reason you want to base it off of any two numbers with distance \\(L\\) in between:\n\\begin{equation} \\langle f,g \\rangle_{[a,b]} := \\frac{1}{b-a} \\int^{b}_{a} f(x) g(x) \\dd{x} \\end{equation}\nThe work of checking this is a well-formed inner product is left to absolutely nobody.\ninner product over complex-valued functions see inner product over complex-valued functions\n","html":"\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(V\\) a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\((u,v)\\), an \u003cem\u003eordered\u003c/em\u003e pair of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es in \\(V\\) (its not commutative!)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eWe define \\(\\langle u, v \\rangle \\in \\mathbb{F}\\) as the \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e of \\((u,v)\\) \u003cstrong\u003ein that order!\u003c/strong\u003e. It carries the following properties:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003epositivity\u003c/strong\u003e: \\(\\langle v, v\\rangle \\geq 0, \\forall v \\in V\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003edefiniteness\u003c/strong\u003e: \\(\\langle v, v\\rangle = 0\\) IFF \\(v = 0\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eadditivity in the first slot\u003c/strong\u003e: \\(\\langle u+v, w\\rangle = \\langle u, w \\rangle + \\langle v, w \\rangle\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ehomogeneity in the first slot\u003c/strong\u003e: \\(\\langle \\lambda u, v \\rangle = \\lambda \\langle u, v \\rangle\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003econjugate symmetry\u003c/strong\u003e: \\(\\langle u,v \\rangle = \\overline{\\langle v,u \\rangle}\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"inner-product-space\"\u003eInner Product Space\u003c/h3\u003e\n\u003cp\u003eAn \u003ca href=\"#inner-product-space\"\u003eInner Product Space\u003c/a\u003e is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e with a well-defined \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e. For instance, \\(\\mathbb{F}^{n}\\) has the canonical \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e named \u003ca href=\"#euclidean-inner-product\"\u003eEuclidean Inner Product\u003c/a\u003e (see below, a.k.a. \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e for reals). The existence of such a well-defined \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e makes \\(\\mathbb{F}^{n}\\) an \u003ca href=\"#inner-product-space\"\u003eInner Product Space\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eRare Axler moment, instead of \u0026ldquo;well-defined\u0026rdquo;, he says we want a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e with an \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e \u0026ldquo;lurking nearby\u0026rdquo;; james bond style.\u003c/p\u003e\n\u003ch3 id=\"properties-of-inner-product\"\u003eproperties of inner product\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eFor a fixed \\(u \\in V\\), the function takes \\(v\\) to \\(\\langle v,u \\rangle\\) is a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e \\(V \\to \\mathbb{F}\\)\u003c/li\u003e\n\u003cli\u003e\\(\\langle 0,u \\rangle = 0\\)\u003c/li\u003e\n\u003cli\u003e\\(\\langle u,0 \\rangle = 0\\)\u003c/li\u003e\n\u003cli\u003e\\(\\langle u,v+w \\rangle = \\langle u,v \\rangle + \\langle u,w \\rangle\\)\u003c/li\u003e\n\u003cli\u003e\\(\\langle u,\\lambda v \\rangle = \\bar{\\lambda}\\langle u,v \\rangle\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eInheriting the additivity and homogeneity of the definition of \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003eSet \\(u\\) to be the fixed element for 1), set \\(0\\) to be the input, linear maps take \\(0\\) to \\(0\\)\u003c/li\u003e\n\u003cli\u003eApply conjugate symmetry to 2)\u003c/li\u003e\n\u003cli\u003eApply conjugate symmetry, \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e additivty, then conjugate back\u003c/li\u003e\n\u003cli\u003eApply conjugate symmetry, \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e homogeneity in the first slot, then conjugate back (of course leaving \\(\\lambda\\) out conjugated)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"euclidean-inner-product\"\u003eEuclidean Inner Product\u003c/h3\u003e\n\u003cp\u003eFor \\(x,y \\in \\mathbb{F}^{n}\\), one can define a pretty well-defined \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e by\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx \\cdot y = x_1 \\bar{y_{1}} + \u0026hellip; + x_{n} \\bar{y_{n}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003esimilar to \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e for the reals. This is called the \u003ca href=\"#euclidean-inner-product\"\u003eEuclidean Inner Product\u003c/a\u003e and has the nice parallelity properties we saw.\u003c/p\u003e\n\u003ch3 id=\"complex-number--kbhcomplex-number-dot-md--shenanigans-that-motivate-the-inner-product--kbhinner-product-dot-md\"\u003e\u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e shenanigans that motivate the \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\u0026hellip;as both relevant and more general than the \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e, but also different in key areas.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhthoughts_on_axler_4/#first-review-id-7c982e7e-b8be-4053-a71e-fc0dba7a5da5-complex-number-s\"\u003eFirst, review complex numbers\u003c/a\u003e from our discussion in chapter 4. The main problem here is this:\u003c/p\u003e\n\u003cp\u003efor \\(z = (z_1, \\dots, z_{n}) \\in \\mathbb{C}^{n}\\), simply squaring each slot to take the \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e may cause us to take a square root of a negative number (as each slot would then be \\(a^{2}-b^{2}\\) for a complex number). That\u0026rsquo;s no bueno because we want \\(\\|z\\|\\) to be real and non-negative.\u003c/p\u003e\n\u003cp\u003eThis, therefore, suggests something similar for our \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e definition; to make sure that each slot end up being a real and non-negative number, we simply \u003ca href=\"/posts/kbhcomplex_number/#complex-conjugate\"\u003econjugate\u003c/a\u003e the second value:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx \\cdot y = x_1 \\bar{y_{1}} + \u0026hellip; + x_{n} \\bar{y_{n}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAlso, note that this definition give us an important result: if we reverse \\(x\\) and \\(y\\), we would be conjugating the other element! And so, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx \\cdot y = \\bar{{y \\cdot x}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ederived by following the usual rules of \u003ca href=\"/posts/kbhcomplex_number/#complex-conjugate\"\u003ecomplex conjugation\u003c/a\u003e. Note that none of these elementwisethings (the \\(x_{n}y_{n}\\) business) are actually in the definition of the \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e, as it is the rules of an \u003ca href=\"#euclidean-inner-product\"\u003eEuclidean Inner Product\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"inner-product-of-l-periodic-functions\"\u003einner product of \\(L\\) periodic functions\u003c/h3\u003e\n\u003cp\u003eFor \\(f,g : [0,L] \\to \\mathbb{R}\\), which are \u003ca href=\"/posts/kbhsu_math53_feb252024/#l-periodicity\"\u003eL-periodic\u003c/a\u003e, we define:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle f,g \\rangle := \\frac{1}{L} \\int_{0}^{L} f(x) g(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that \u003ca href=\"/posts/kbhsu_math53_feb252024/#l-periodicity\"\u003eL-periodic\u003c/a\u003e functions can be shifted without changing periodicity. But if for some reason you want to base it off of any two numbers with distance \\(L\\) in between:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle f,g \\rangle_{[a,b]} := \\frac{1}{b-a} \\int^{b}_{a} f(x) g(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe work of checking this is a well-formed \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e is left to absolutely nobody.\u003c/p\u003e\n\u003ch3 id=\"inner-product-over-complex-valued-functions--kbhcomplex-exponential-dot-md\"\u003e\u003ca href=\"/posts/kbhcomplex_exponential/#inner-product--kbhinner-product-dot-md--over-complex-valued-functions\"\u003einner product over complex-valued functions\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhcomplex_exponential/#inner-product--kbhinner-product-dot-md--over-complex-valued-functions\"\u003einner product over complex-valued functions\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinner_product/","tags":null,"title":"inner product"},{"categories":null,"contents":"insertion sort is an algorithm that solves the sorting problem.\nconstituents a sequence of \\(n\\) numbers \\(\\{a_1, \\dots a_{n}\\}\\), called keys\nrequirements Insertion sort provides an ordered sequence \\(\\{a_1\u0026rsquo;, \\dots a_{n}\u0026rsquo;\\}\\) s.t. \\(a_1\u0026rsquo; \\leq \\dots \\leq a_{n}\u0026rsquo;\\)\nimplementation I don\u0026rsquo;t know why, but it seems like CLRS\u0026rsquo; implementation is back-to font. But perhaps I\u0026rsquo;m just mistaken.\nvoid insertion_sort(int length, int *A) { for (int j=1; j\u0026lt;length; j++) { int key = A[j]; // insert the key correctly into the // sorted sequence, when appropriate int i = j-1; while (i \u0026gt; 0 \u0026amp;\u0026amp; A[i] \u0026gt; key) { // if things before had // larger key // move them A[i+1] = A[i]; // move it down // move our current value down i -= 1; } // put our new element into the correct palace A[i+1] = key; } } additional information proof We use loop invariant method to show that our algorithm is correct. Our invariant is that the array \\(A[0, \\dots, j-1]\\) is sorted \\(\\forall j 0 \\dots L+1\\).\nInitialization: at the first step, \\(j=1\\) (second element), the subarray of \\(A[0, \\dots j-1]\\) (namely, only the first element), is sorted trivially Maintenance: during each loop, we move \\(j\\) to the right, only being done when the subarray to the left is correctly sorted because of \\(j\\) is moving forward until length, it will terminate As \\(j\\), by the end, covers the entire loop, our loop terminates at \\(L+1\\) and invariant (sortedness) is maintained between \\(A[0, \\dots j]\\).\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhinsertion_sort/\"\u003einsertion sort\u003c/a\u003e is an algorithm that solves the sorting problem.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003ea sequence of \\(n\\) numbers \\(\\{a_1, \\dots a_{n}\\}\\), called \u003ca href=\"/posts/kbhkeys/\"\u003ekeys\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eInsertion sort provides an ordered sequence \\(\\{a_1\u0026rsquo;, \\dots a_{n}\u0026rsquo;\\}\\) s.t. \\(a_1\u0026rsquo; \\leq \\dots \\leq a_{n}\u0026rsquo;\\)\u003c/p\u003e\n\u003ch2 id=\"implementation\"\u003eimplementation\u003c/h2\u003e\n\u003cp\u003eI don\u0026rsquo;t know why, but it seems like CLRS\u0026rsquo; implementation is back-to font. But perhaps I\u0026rsquo;m just mistaken.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-cpp\" data-lang=\"cpp\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003einsertion_sort\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elength\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eA\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elength\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e++\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ekey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eA\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// insert the key correctly into the\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// sorted sequence, when appropriate\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u0026amp;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eA\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ekey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// if things before had\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// larger key\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// move them\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eA\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eA\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// move it down\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// move our current value down\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// put our new element into the correct palace\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eA\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ekey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"proof\"\u003eproof\u003c/h3\u003e\n\u003cp\u003eWe use \u003ca href=\"/posts/kbhloop_invariant/\"\u003eloop invariant\u003c/a\u003e method to show that our algorithm is correct. Our invariant is that the array \\(A[0, \\dots, j-1]\\) is sorted \\(\\forall j 0 \\dots L+1\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eInitialization: at the first step, \\(j=1\\) (second element), the subarray of \\(A[0, \\dots j-1]\\) (namely, only the first element), is sorted trivially\u003c/li\u003e\n\u003cli\u003eMaintenance: during each loop, we move \\(j\\) to the right, only being done when the subarray to the left is correctly sorted\u003c/li\u003e\n\u003cli\u003ebecause of \\(j\\) is moving forward until length, it will terminate\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAs \\(j\\), by the end, covers the entire loop, our loop terminates at \\(L+1\\) and invariant (sortedness) is maintained between \\(A[0, \\dots j]\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinsertion_sort/","tags":null,"title":"insertion sort"},{"categories":null,"contents":"an integer (\\(\\mathbb{Z}\\)) is the natural numbers, zero, and negative numbers: \u0026hellip;,-4,-3,-2,-1,0,1,2,2,3\nrepresenting integers what are the limitations of computational arithmetic how to perform efficient arithmetic how to encode data more compactly and efficiently See also computer number system\n","html":"\u003cp\u003ean \u003ca href=\"/posts/kbhinteger/\"\u003einteger\u003c/a\u003e (\\(\\mathbb{Z}\\)) is the \u003ca href=\"/posts/kbhnatural_numbers/\"\u003enatural numbers\u003c/a\u003e, zero, and negative numbers: \u0026hellip;,-4,-3,-2,-1,0,1,2,2,3\u003c/p\u003e\n\u003ch2 id=\"representing-integers\"\u003erepresenting integers\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewhat are the limitations of computational arithmetic\u003c/li\u003e\n\u003cli\u003ehow to perform efficient arithmetic\u003c/li\u003e\n\u003cli\u003ehow to encode data more compactly and efficiently\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhbinary_number_system/\"\u003ecomputer number system\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinteger/","tags":null,"title":"integer"},{"categories":null,"contents":"The integrating factor \\(\\rho(x)\\) is a value that helps undo the product rule. For which:\n\\begin{equation} log(\\rho(x)) = \\int P(x)dx \\end{equation}\nfor some function \\(P(x)\\).\nSeparating the \\(\\rho(x)\\) out, we have therefore:\n\\begin{equation} e^{\\int P dx} = \\rho(x) \\end{equation}\nWhy is this helpful and undoes the product rule? This is because of a very interesting property of how \\(\\rho(x)\\) behaves.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhintegrating_factor/\"\u003eintegrating factor\u003c/a\u003e \\(\\rho(x)\\) is a value that helps undo the product rule. For which:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nlog(\\rho(x)) = \\int P(x)dx\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some function \\(P(x)\\).\u003c/p\u003e\n\u003cp\u003eSeparating the \\(\\rho(x)\\) out, we have therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{\\int P dx} = \\rho(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhy is this helpful and undoes the product rule? This is because of a very interesting property of how \\(\\rho(x)\\) behaves.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhintegrating_factor/","tags":null,"title":"integrating factor"},{"categories":null,"contents":"Goal We are going to solve the inter-temporal choice problem, for ten time stamps, and perform some numerical optimization of the results\nMain Methods We do this by solving backwards. We will create a variable \\(k\\) to measure asset, and \\(k_{t}\\) the remaining asset at time \\(t\\).\nLet us first declare the function for power utility. \\(k\\) is our asset holding, \\(\\gamma\\) our relative margin of risk, and \\(U\\) the power utility.\nThe power utility function is defined by:\n\\begin{equation} U( C) = \\frac{c^{1-\\gamma}-1}{1-\\gamma} \\end{equation}\nImplementing Power Utility # risk aversion y = var(\u0026#34;y\u0026#34;, latex_name=\u0026#34;\\gamma\u0026#34;, domain=\u0026#39;real\u0026#39;) # discount factor d = var(\u0026#34;d\u0026#34;, latex_name=\u0026#34;\\delta\u0026#34;, domain=\u0026#39;real\u0026#39;) # final value at time t=f k_f = var(\u0026#34;k_f\u0026#34;, latex_name=\u0026#34;k_f\u0026#34;, domain=\u0026#39;real\u0026#39;) # the instrument\u0026#39;s percentage return over a period: i.e. (1+mu)*I_t = k_{t+1} m = var(\u0026#34;m\u0026#34;, latex_name=\u0026#34;\\mu\u0026#34;, domain=\u0026#39;real\u0026#39;) # boundary conditions assume(y\u0026gt;0) assume(y\u0026lt;1) assume(d\u0026gt;0) assume(d\u0026lt;1) # power utility u(c) = ((c^(1-y)-1)/(1-y)) u c |--\u0026gt; -(c^(-y + 1) - 1)/(y - 1) End Boundary Conditions At the final time stamp, we desire to consume all of our assets. Therefore, we will seed our investment amount at \\(I=0\\). We will optimize for eventual global utility, therefore, we will talley our utility; starting this talley at \\(0\\).\n# at the final time, leave nothing for investment I=0; u_total = 0 Bottom-Up Dynamic Programming From every step from here, we will discount this utility by \\(d\\), then solve for the previous step\u0026rsquo;s target consumption that would maximize utility. That is, at every step, we desire:\n\\begin{equation} k_{t-1} = I_{t} + c_{t} \\implies c_{t} = k_{t-1}-I_{t} \\end{equation}\n\u0026ldquo;we want to consume all we have that needed\u0026rsquo;t to be invested\u0026rdquo;\nand\n\\(\\max u(c_{t})\\)\nRecall also that \\((1+\\mu)I_{t} = k_{t+1}\\) (as \\(\\mu\\) is the mean log-return, 1+that times \\(I\\), how much was invested at time \\(t\\), is the expected capital one time period from then.) Therefore, to make sure that \\(k_{f}\\) gets back in the final period, we solve for our seed value for \\(I\\), how much to invest would be:\n\\begin{equation} I_{t-1} = \\frac{k_t}{(1+m)} \\end{equation}\nActual implementation # create an dictionary to keep track of all the capital variables k = {} # we will iterate time stamps 1-10 T = 10 # a variable for captial at that time for i in range(T): k_t = var(f\u0026#34;k_{T-i}\u0026#34;, latex_name=f\u0026#34;k_{T-i}\u0026#34;) # t-i becasue we are solving backwards; i0 = T10 # what can be consumed at every time stamp # is the k of the previous timestamp, minus # what needs to be left over # we multiply here by d because we want to # discount future utility u_total = d*u_total + u(k_t-I) # add the current variable to dictionary k[T-i] = k_t # recall again i0=T10 because backwards # solve for the next investment amount I = k_t/(1+m) u_total -(((((((d*(d*(k_10^(-y + 1) - 1)/(y - 1) + ((k_9 - k_10/(m + 1))^(-y + 1) - 1)/(y - 1)) + ((k_8 - k_9/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_7 - k_8/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_6 - k_7/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_5 - k_6/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_4 - k_5/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_3 - k_4/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_2 - k_3/(m + 1))^(-y + 1) - 1)/(y - 1))*d - ((k_1 - k_2/(m + 1))^(-y + 1) - 1)/(y - 1) Optimization with some constants We can now use the scipy numerical optimizer to minimize this target. Recall that we can recover the actual value of consumption at each step as \\(c=k-\\frac{k}{m+1}\\).\nWe will set some initial conditions:\n_m = 0.01 # 1% period-to-period increase _k = 1000 # $1000 capital _y = 0.8 # generally risk averse _d = 0.9 # the future matters slightly less Optimization Target Function Recall that the scipy optimizer MINIMIZES, so we will make the loss the negative of utility. Before we finally start, we need to make the actual, numerical loss function that performs the substitution.\nThe code is actually just doing some function substitution, so its not very exciting.\n# we reverse the k_* variables because it is stored in the dictionary # in reverse, because we knew the reverse condition first optim_variables = list(k.values()) optim_variables.reverse() # this function is also the callback, so it returning # True terminates execution def u_total_loss(x): # the optimizer\u0026#39;s current step # we want to take [1:], because we need to keep k1 the same at _k the # initial value substitution_dict = {key: val for key, val in zip(optim_variables[1:], x)} # initial conditions substitution_dict[m] = _m substitution_dict[y] = _y substitution_dict[d] = _d substitution_dict[d] = _d # we want to keep the initial value k1 the same substitution_dict[k[1]] = _k try: # get value content = (-1*u_total).subs(substitution_dict) # recall we multiply by -1 because we are MINIMIZING, so the loss is # the inverse of the maximization utility target return float(content.n()), False except: return 0, True Optimize! Finally, we are ready to start. We will now create the other initial conditions k1\u0026hellip;k10 and : we will set the initial value to all be 1000 (i.e. do nothing) and have the optimizer work it out from there:\nfrom scipy.optimize import minimize target = minimize(lambda x:u_total_loss(x)[0], [_k for _ in range(T-1)], callback=lambda x:u_total_loss(x)[1]) target fun: -50.71592850322347 hess_inv: array([[ 9518.97596212, 7617.14636381, 5964.42171873, 4433.87331935, 4253.91810669, 3528.72923763, 2329.61846616, 1769.85078017, 1126.51562458], ... [ 1126.51562458, 1359.86586059, 1639.19265606, 2255.16306648, 2598.27394651, 2293.45506703, 2657.8129831 , 2422.66762041, 2911.30717272]]) jac: array([ 0.00000000e+00, -3.81469727e-06, 2.38418579e-06, 0.00000000e+00, 8.58306885e-06, -5.24520874e-06, 2.86102295e-06, -1.43051147e-06, -5.24520874e-06]) message: \u0026#39;Optimization terminated successfully.\u0026#39; nfev: 1360 nit: 130 njev: 136 status: 0 success: True x: array([841.22097906, 699.82556541, 573.89912346, 461.63474591, 361.51493714, 272.10309839, 192.29084196, 120.94057011, 57.12129925]) Recovering Actual Dollar Consumption Amount Awesome! We now can recover \\(c\\) at each point by a nice helpful function:\nc(k0, k1) = k0 - k1/(_m+1) \u0026ldquo;Consumption is how much we have, minus how much we would be investing\u0026rdquo;\nSo, let us translate our list to the actual values consumed:\ncapital_over_time = [_k]+target.x.tolist() # we need to add the initial condition _k back to the # inventory list consumption_over_time = [c(i,j) for i,j in zip(capital_over_time, capital_over_time[1:])] consumption_over_time [167.107941529699, 148.324379643989, 131.608611479784, 116.835018601197, 103.699164584812, 92.1059288329209, 81.7161261514775, 72.5477032414004, 64.3848282779261] Examples of Output The next set of slides show examples of possible optimization outputs\u0026mdash;how decisions by the inter-temporal choice problem changes based on the inputs.\nRisk Averse _m = 0.01 # 1% period-to-period increase _k = 1000 # $1000 capital _y = 0.8 # generally risk averse _d = 0.9 # the future matters slightly less [167.107941529699, 148.324379643989, 131.608611479784, 116.835018601197, 103.699164584812, 92.1059288329209, 81.7161261514775, 72.5477032414004, 64.3848282779261] More Return Winning Game _m = 0.1 # 1% period-to-period increase _k = 1000 # $1000 capital _y = 0.8 # generally risk averse _d = 0.9 # the future matters slightly less [154.860597149863, 152.989432556196, 151.010433069881, 149.201249715528, 147.329750167852, 145.539019666462, 143.739371599600, 141.984228587213, 140.243839963791] More Risk _m = 0.01 # 1% period-to-period increase _k = 1000 # $1000 capital _y = 0.2 # generally risky _d = 0.9 # the future matters slightly less [388.525041338376, 241.124420093987, 149.632568775223, 92.8644259086613, 57.6330459746870, 35.7667230511026, 22.1970017374152, 13.7754327365677, 8.54930907023498] Loosing Game _m = -0.01 # this is a loosing stock _k = 1000 # $1000 capital _y = 0.9 # very safe _d = 0.9 # the future matters fun: 0 hess_inv: array([[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]]) jac: array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) message: \u0026#39;Optimization terminated successfully.\u0026#39; nfev: 10 nit: 0 njev: 1 status: 0 success: True x: array([1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000.]) Evidently: do nothing if we have a loosing cause.\nWinning Game _m = 1.00 # this is SUPER winning stock _k = 1000 # $1000 capital _y = 0.9 # very safe _d = 0.9 # the future matters [125.667556437602, 241.474827418105, 460.068836905327, 868.972817783791, 4540.45893314523, 4219.93058738029, 3988.05775624984, 3996.89431939885, 3615.74982832315] We made so much money that we are spending a lot of it and still spending it.\n","html":"\u003ch2 id=\"goal\"\u003eGoal\u003c/h2\u003e\n\u003cp\u003eWe are going to solve the inter-temporal choice problem, for ten time stamps, and perform some numerical optimization of the results\u003c/p\u003e\n\u003ch2 id=\"main-methods\"\u003eMain Methods\u003c/h2\u003e\n\u003cp\u003eWe do this by solving backwards. We will create a variable \\(k\\) to measure asset, and \\(k_{t}\\) the remaining asset at time \\(t\\).\u003c/p\u003e\n\u003cp\u003eLet us first declare the function for \u003ca href=\"/posts/kbhpower_utility/\"\u003epower utility\u003c/a\u003e. \\(k\\) is our asset holding, \\(\\gamma\\) our relative margin of risk, and \\(U\\) the power utility.\u003c/p\u003e\n\u003cp\u003eThe power utility function is defined by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU( C) = \\frac{c^{1-\\gamma}-1}{1-\\gamma}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"implementing-power-utility\"\u003eImplementing Power Utility\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# risk aversion\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;y\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\gamma\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edomain\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;real\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# discount factor\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;d\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\delta\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edomain\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;real\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# final value at time t=f\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ek_f\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;k_f\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;k_f\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edomain\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;real\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# the instrument\u0026#39;s percentage return over a period: i.e. (1+mu)*I_t = k_{t+1}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;m\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\mu\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edomain\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;real\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# boundary conditions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eassume\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eassume\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eassume\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eassume\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# power utility\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ec\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ec\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003ec |--\u0026gt; -(c^(-y + 1) - 1)/(y - 1)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"end-boundary-conditions\"\u003eEnd Boundary Conditions\u003c/h2\u003e\n\u003cp\u003eAt the final time stamp, we desire to consume all of our assets. Therefore, we will seed our investment amount at \\(I=0\\). We will optimize for eventual global utility, therefore, we will talley our utility; starting this talley at \\(0\\).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# at the final time, leave nothing for investment\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"bottom-up-dynamic-programming\"\u003eBottom-Up Dynamic Programming\u003c/h2\u003e\n\u003cp\u003eFrom every step from here, we will discount this utility by \\(d\\), then solve for the \u003cem\u003eprevious\u003c/em\u003e step\u0026rsquo;s target consumption that would maximize utility. That is, at every step, we desire:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nk_{t-1} = I_{t} + c_{t} \\implies c_{t} = k_{t-1}-I_{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;we want to consume all we have that needed\u0026rsquo;t to be invested\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\(\\max u(c_{t})\\)\u003c/p\u003e\n\u003cp\u003eRecall also that \\((1+\\mu)I_{t} = k_{t+1}\\) (as \\(\\mu\\) is the mean log-return, 1+that times \\(I\\), how much was invested at time \\(t\\), is the expected capital one time period from then.) Therefore, to make sure that \\(k_{f}\\) gets back in the final period, we solve for our seed value for \\(I\\), how much to invest would be:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI_{t-1} = \\frac{k_t}{(1+m)}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"actual-implementation\"\u003eActual implementation\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# create an dictionary to keep track of all the capital variables\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# we will iterate time stamps 1-10\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e10\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# a variable for captial at that time\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ek_t\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;k_\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e}\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;k_\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e}\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# t-i becasue we are solving backwards; i0 = T10\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# what can be consumed at every time stamp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# is the k of the previous timestamp, minus\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# what needs to be left over\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we multiply here by d because we want to\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# discount future utility\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek_t\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# add the current variable to dictionary\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek_t\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# recall again i0=T10 because backwards\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# solve for the next investment amount\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek_t\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-(((((((d*(d*(k_10^(-y + 1) - 1)/(y - 1) + ((k_9 - k_10/(m + 1))^(-y + 1) - 1)/(y - 1)) + ((k_8 - k_9/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_7 - k_8/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_6 - k_7/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_5 - k_6/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_4 - k_5/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_3 - k_4/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_2 - k_3/(m + 1))^(-y + 1) - 1)/(y - 1))*d - ((k_1 - k_2/(m + 1))^(-y + 1) - 1)/(y - 1)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"optimization-with-some-constants\"\u003eOptimization with some constants\u003c/h2\u003e\n\u003cp\u003eWe can now use the scipy numerical optimizer to minimize this target. Recall that we can recover the actual value of consumption at each step as \\(c=k-\\frac{k}{m+1}\\).\u003c/p\u003e\n\u003cp\u003eWe will set some initial conditions:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# 1% period-to-period increase\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.8\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# generally risk averse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters slightly less\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"optimization-target-function\"\u003eOptimization Target Function\u003c/h2\u003e\n\u003cp\u003eRecall that the scipy optimizer MINIMIZES, so we will make the loss the negative of utility. Before we finally start, we need to make the actual, numerical loss function that performs the substitution.\u003c/p\u003e\n\u003cp\u003eThe code is actually just doing some function substitution, so its not very exciting.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# we reverse the k_* variables because it is stored in the dictionary\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# in reverse, because we knew the reverse condition first\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim_variables\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evalues\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim_variables\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereverse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# this function is also the callback, so it returning\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# True terminates execution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eu_total_loss\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# the optimizer\u0026#39;s current step\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we want to take [1:], because we need to keep k1 the same at _k the\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# initial value\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ekey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eval\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ekey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eval\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ezip\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eoptim_variables\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# initial conditions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we want to keep the initial value k1 the same\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003etry\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# get value\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003econtent\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# recall we multiply by -1 because we are MINIMIZING, so the loss is\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# the inverse of the maximization utility target\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econtent\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()),\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eFalse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eexcept\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"optimize\"\u003eOptimize!\u003c/h2\u003e\n\u003cp\u003eFinally, we are ready to start. We will now create the other initial conditions k1\u0026hellip;k10 and : we will set the initial value to all be 1000 (i.e. do nothing) and have the optimizer work it out from there:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003escipy.optimize\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eminimize\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etarget\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eminimize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elambda\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu_total_loss\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecallback\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elambda\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu_total_loss\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etarget\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e fun: -50.71592850322347\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e hess_inv: array([[ 9518.97596212, 7617.14636381, 5964.42171873, 4433.87331935,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 4253.91810669, 3528.72923763, 2329.61846616, 1769.85078017,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 1126.51562458],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1126.51562458, 1359.86586059, 1639.19265606, 2255.16306648,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 2598.27394651, 2293.45506703, 2657.8129831 , 2422.66762041,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 2911.30717272]])\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e jac: array([ 0.00000000e+00, -3.81469727e-06, 2.38418579e-06, 0.00000000e+00,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 8.58306885e-06, -5.24520874e-06, 2.86102295e-06, -1.43051147e-06,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -5.24520874e-06])\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e message: \u0026#39;Optimization terminated successfully.\u0026#39;\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nfev: 1360\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nit: 130\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e njev: 136\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e status: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e success: True\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e x: array([841.22097906, 699.82556541, 573.89912346, 461.63474591,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 361.51493714, 272.10309839, 192.29084196, 120.94057011,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 57.12129925])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"recovering-actual-dollar-consumption-amount\"\u003eRecovering Actual Dollar Consumption Amount\u003c/h2\u003e\n\u003cp\u003e\u003cem\u003eAwesome!\u003c/em\u003e We now can recover \\(c\\) at each point by a nice helpful function:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ec\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek0\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u0026ldquo;Consumption is how much we have, minus how much we would be investing\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eSo, let us translate our list to the actual values consumed:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ecapital_over_time\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etarget\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etolist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# we need to add the initial condition _k back to the\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# inventory list\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econsumption_over_time\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ec\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ezip\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecapital_over_time\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecapital_over_time\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:])]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econsumption_over_time\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[167.107941529699,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 148.324379643989,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 131.608611479784,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 116.835018601197,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 103.699164584812,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 92.1059288329209,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 81.7161261514775,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 72.5477032414004,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 64.3848282779261]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"examples-of-output\"\u003eExamples of Output\u003c/h2\u003e\n\u003cp\u003eThe next set of slides show examples of possible optimization outputs\u0026mdash;how decisions by the inter-temporal choice problem changes based on the inputs.\u003c/p\u003e\n\u003ch2 id=\"risk-averse\"\u003eRisk Averse\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# 1% period-to-period increase\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.8\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# generally risk averse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters slightly less\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[167.107941529699,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 148.324379643989,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 131.608611479784,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 116.835018601197,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 103.699164584812,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 92.1059288329209,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 81.7161261514775,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 72.5477032414004,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 64.3848282779261]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"more-return\"\u003eMore Return\u003c/h2\u003e\n\u003ch2 id=\"winning-game\"\u003eWinning Game\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.1\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# 1% period-to-period increase\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.8\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# generally risk averse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters slightly less\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[154.860597149863,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 152.989432556196,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 151.010433069881,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 149.201249715528,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 147.329750167852,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 145.539019666462,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 143.739371599600,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 141.984228587213,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 140.243839963791]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"more-risk\"\u003eMore Risk\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# 1% period-to-period increase\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.2\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# generally risky\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters slightly less\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[388.525041338376,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 241.124420093987,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 149.632568775223,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 92.8644259086613,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 57.6330459746870,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 35.7667230511026,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 22.1970017374152,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 13.7754327365677,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 8.54930907023498]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"loosing-game\"\u003eLoosing Game\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# this is a loosing stock\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# very safe\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e fun: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e hess_inv: array([[1, 0, 0, 0, 0, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 1, 0, 0, 0, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 1, 0, 0, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 1, 0, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 1, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 0, 1, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 0, 0, 1, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 0, 0, 0, 1, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 0, 0, 0, 0, 1]])\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e jac: array([0., 0., 0., 0., 0., 0., 0., 0., 0.])\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e message: \u0026#39;Optimization terminated successfully.\u0026#39;\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nfev: 10\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nit: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e njev: 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e status: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e success: True\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e x: array([1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000.])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eEvidently: do nothing if we have a loosing cause.\u003c/p\u003e\n\u003ch2 id=\"winning-game\"\u003eWinning Game\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1.00\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# this is SUPER winning stock\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# very safe\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[125.667556437602,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 241.474827418105,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 460.068836905327,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 868.972817783791,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 4540.45893314523,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 4219.93058738029,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 3988.05775624984,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 3996.89431939885,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 3615.74982832315]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe made so much money that we are spending a lot of it and still spending it.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/risk_apetite_preso/","tags":null,"title":"Inter-Temporal Choice"},{"categories":null,"contents":"The interaction of multiple agents/decision makers causes additional uncertainty\n","html":"\u003cp\u003eThe interaction of multiple agents/decision makers causes additional uncertainty\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinteraction_uncertainty/","tags":null,"title":"Interaction Uncertainty"},{"categories":null,"contents":"Big question: how to we align agents in an interactive, dynamic way (i.e. without instruction fine tuning which is hard).\nSequentiality is hard:\nwhat is the context/motivation? how to you transfer across contexts? how do you plan? Key Idea Language is information that helps agents predict the future; instructions is world modeling\ninstead of instructions =\u0026gt; actions (executor) instructions =\u0026gt; updated belief (world model) User intent =\u0026gt; action shouldn\u0026rsquo;t have LLM language representation in the middle as a bottleneck.\nThere is an underlying representation of the user\u0026rsquo;s preferences, you have to use language to coax it out of them.\nDynalang build model that takes vision + language as a joint input pass it through an auto-encoding representation have the world model predict the next-encoding representation Main Idea: modeling language/tokens/images as a joint latent representation over time.\nTraining objective:\nreconstruction loss against the future presentation: using \\(R_{i}\\) to predict \\(R_{i+1}\\) predict the reward over time regularize? Workflow take reward/preferences/behavior data structure learning to create the relationships between elements in the data structure ","html":"\u003cp\u003eBig question: how to we align agents in an interactive, dynamic way (i.e. without instruction fine tuning which is hard).\u003c/p\u003e\n\u003cp\u003eSequentiality is hard:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewhat is the context/motivation?\u003c/li\u003e\n\u003cli\u003ehow to you transfer across contexts?\u003c/li\u003e\n\u003cli\u003ehow do you plan?\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"key-idea\"\u003eKey Idea\u003c/h2\u003e\n\u003cp\u003eLanguage is information that helps agents \u003cstrong\u003epredict the future\u003c/strong\u003e; instructions is \u003cstrong\u003eworld modeling\u003c/strong\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003einstead of instructions =\u0026gt; actions (executor)\u003c/li\u003e\n\u003cli\u003einstructions =\u0026gt; updated belief (world model)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eUser intent =\u0026gt; action shouldn\u0026rsquo;t have LLM language representation in the middle as a bottleneck.\u003c/p\u003e\n\u003cp\u003eThere is an underlying representation of the user\u0026rsquo;s preferences, you have to use language to coax it out of them.\u003c/p\u003e\n\u003ch2 id=\"dynalang\"\u003eDynalang\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ebuild model that takes vision + language as a joint input\u003c/li\u003e\n\u003cli\u003epass it through an auto-encoding representation\u003c/li\u003e\n\u003cli\u003ehave the world model predict the next-encoding representation\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eMain Idea: modeling language/tokens/images as a joint latent representation over time.\u003c/p\u003e\n\u003cp\u003eTraining objective:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ereconstruction loss against the future presentation: using \\(R_{i}\\) to predict \\(R_{i+1}\\)\u003c/li\u003e\n\u003cli\u003epredict the reward over time\u003c/li\u003e\n\u003cli\u003eregularize?\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"workflow\"\u003eWorkflow\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003etake reward/preferences/behavior data\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstructure_learning/\"\u003estructure learning\u003c/a\u003e to create the relationships between elements in the data structure\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinteractive_agent/","tags":null,"title":"Interactive Agents"},{"categories":null,"contents":" psycoacoustics ","html":"\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpsycoacoustics/\"\u003epsycoacoustics\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhintersession_2023/","tags":null,"title":"Intersession 2023"},{"categories":null,"contents":"invariant subspaces are a property of operators; it is a subspace for which the operator in question on the overall space is also an operator of the subspace.\nconstituents an operator \\(T \\in \\mathcal{L}(V)\\) a subspace \\(U \\subset V\\) requirements \\(U\\) is considered invariant on \\(T\\) if \\(u \\in U \\implies Tu \\in U\\)\n(i.e. \\(U\\) is invariant under \\(T\\) if \\(T |_{U}\\) is an operator on \\(U\\))\nadditional information nontrivial invariant subspace (i.e. eigenstuff)\nA proof is not given yet, but \\(T \\in \\mathcal{L}(V)\\) has an invariant subspace that\u0026rsquo;s not \\(V\\) nor \\(\\{0\\}\\) if \\(\\dim V \u0026gt; 1\\) for complex number vector spaces and \\(\\dim V \u0026gt; 2\\) for real number vector spaces.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003es are a property of \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003es; it is a subspace for which the \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e in question on the overall space is also an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e of the subspace.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ean \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e \\(T \\in \\mathcal{L}(V)\\)\u003c/li\u003e\n\u003cli\u003ea \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e \\(U \\subset V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\(U\\) is considered \u003cstrong\u003einvariant\u003c/strong\u003e on \\(T\\) if \\(u \\in U \\implies Tu \\in U\\)\u003c/p\u003e\n\u003cp\u003e(i.e. \\(U\\) is \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e under \\(T\\) if \\(T |_{U}\\) is an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e on \\(U\\))\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"nontrivial-invariant-subspace--kbhinvariant-subspace-dot-md\"\u003enontrivial \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e(i.e. eigenstuff)\u003c/p\u003e\n\u003cp\u003eA proof is not given yet, but \\(T \\in \\mathcal{L}(V)\\) has an \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e that\u0026rsquo;s not \\(V\\) nor \\(\\{0\\}\\) if \\(\\dim V \u0026gt; 1\\) for \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es and \\(\\dim V \u0026gt; 2\\) for \u003ca href=\"/posts/kbhreal_number/\"\u003ereal number\u003c/a\u003e \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinvariant_subspace/","tags":null,"title":"invariant subspace"},{"categories":null,"contents":"the inverse is the the opposite of an operation. As in, if you apply the inverse of an operation to the result of applying the original with the same operation it will cancel it.\nThat is,\n\\begin{equation} A * B * B^{-1} = A \\end{equation}\n\\(B^{-1}\\) is then the inverse of \\(B\\) for the \\(*\\) operation. This is operation dependent.\n","html":"\u003cp\u003ethe \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e is the the opposite of an \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003e. As in, if you apply the \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e of an \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003e to the result of applying the original with the same \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003e it will cancel it.\u003c/p\u003e\n\u003cp\u003eThat is,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA * B * B^{-1} = A\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(B^{-1}\\) is then the \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e of \\(B\\) for the \\(*\\) operation. This is \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003e dependent.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinverses/","tags":null,"title":"inverse"},{"categories":null,"contents":" Generate a uniform number between 0 to 1. Get the inverse of the standard normal density function at that value (let number be \\(y\\), find the \\(x\\) such that \\(\\phi(y) = x\\)) return \\(x\\) ","html":"\u003col\u003e\n\u003cli\u003eGenerate a uniform number between 0 to 1.\u003c/li\u003e\n\u003cli\u003eGet the inverse of the \u003ca href=\"/posts/kbhgaussian_distribution/#standard-normal-density-function\"\u003estandard normal density function\u003c/a\u003e at that value (let number be \\(y\\), find the \\(x\\) such that \\(\\phi(y) = x\\))\u003c/li\u003e\n\u003cli\u003ereturn \\(x\\)\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinverse_transform_sampling/","tags":null,"title":"inverse transform sampling"},{"categories":null,"contents":"A Linear Map is invertable if it can be undone. It is called a nonsingular matrix\nconstituents A linear map \\(T \\in \\mathcal{L}(V,W)\\)\nrequirements A Linear Map \\(T \\in \\mathcal{L}(V,W)\\) is called invertable if \\(\\exists T^{-1} \\in \\mathcal{L}(W,V): T^{-1}T=I \\in \\mathcal{L}(V), TT^{-1} = I \\in \\mathcal{L}(W)\\).\n\u0026ldquo;a map is invertable if there is an inverse\u0026rdquo;: that combining the commutable inverse and itself will result in the identity map.\nadditional information matrix invertability Matrices whose determinants are not \\(0\\) (i.e. it is invertable) is called \u0026ldquo;nonsingular matrix\u0026rdquo;. If it doesn\u0026rsquo;t have an inverse, it is called a singular matrix.\nlinear map inverse is unique An invertable Linear Map has an unique inverse:\nProof:\nSuppose \\(T \\in \\mathcal{L}(V,W)\\), and \\(\\exists S_1, S_2\\) which are both inverses of \\(T\\). We desire \\(S_1=S_2\\).\nSo:\n\\begin{equation} S_1 = S_1(TS_2) = (S_1T)S_2 = IS_{2} = S_2 \\end{equation}\ngiven Product of Linear Maps is associative.\n\\(S_1=S_2\\), as desired. \\(\\blacksquare\\)\ninjectivity and surjectivity implies invertability Suppose \\(T \\in \\mathcal{L}(V,W)\\); we desire that \\(T\\) is invertable IFF it is both injective and surjective.\nFirst, suppose \\(T\\) is invertible; that is, \\(\\exists T^{-1}: T^{-1}T=I, TT^{-1}=I\\) We desire that \\(T\\) is both injective and surjective.\nInjectivity: Suppose \\(Tv=Tu\\); we desire \\(u=v\\). \\(u = T^{-1}(Tu) = T^{-1}(Tv) = v\\) . We essentially to use the fact that \\(T^{-1}\\) is a function to \u0026ldquo;revert\u0026rdquo; the map of \\(T\\); as \\(T^{-1}\\) is a map, we know it has to revert to the same result.\nSurjectivity: Recall \\(T: V\\to W\\). WLOG let \\(w \\in W\\), \\(w=T(T^{-1}w)\\). Therefore, all \\(w\\) is in range of \\(T\\).\nSecond, suppose \\(T\\) is both injective and surjective. Define a transition \\(S\\) such that \\(T(Sw) = w\\) for all \\(w \\in W\\) (i.e. it hits just the right element to hit \\(w\\) as an input of \\(T\\).) This is made possible because \\(T\\) is surjective (because you can hit all \\(W\\)) and injective (which makes \\(S\\) not need to hit two different things or have two non-equal things accidentally map to the same thing.)\nEvidently, \\(T(Sw)=w \\forall w \\in W \\implies (TS) = I\\) by definition.\nWe now desire \\(ST = I\\). We have \\((TSTv) = (TS)(Tv) = ITv = Tv\\) by associativity of map multiplication. Now, \\((TSTv) = Tv \\implies T(ST)v = Tv\\) by associativity again. This implies that \\((ST)v=v\\) again because \\(T\\) is injective: so the same input will not produce two unique outputs.\nWe then can show \\(S\\) is a linear map in the usual way.\nHaving constructed the desired result, \\(\\blacksquare\\)\nAlternate Proof for Finite Dimensional \\(T\\) So given map to bigger space is not surjective and map to smaller space is not injective, we have that the dimension of \\(W = V\\), we leverage the basis of each and build the using the basis of domain.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e if it can be undone. It is called a \u003ca href=\"/posts/kbhinvertability/\"\u003enonsingular matrix\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003eA linear map \\(T \\in \\mathcal{L}(V,W)\\)\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e \\(T \\in \\mathcal{L}(V,W)\\) is called \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e if \\(\\exists T^{-1} \\in \\mathcal{L}(W,V): T^{-1}T=I \\in \\mathcal{L}(V), TT^{-1} = I \\in \\mathcal{L}(W)\\).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;a map is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e if there is an inverse\u0026rdquo;: that combining the \u003cstrong\u003ecommutable\u003c/strong\u003e inverse and itself will result in the \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e map.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"matrix--kbhmatricies-dot-md--invertability--kbhinvertability-dot-md\"\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e \u003ca href=\"/posts/kbhinvertability/\"\u003einvertability\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eMatrices whose determinants are not \\(0\\) (i.e. it is invertable) is called \u0026ldquo;\u003ca href=\"/posts/kbhinvertability/\"\u003enonsingular matrix\u003c/a\u003e\u0026rdquo;. If it doesn\u0026rsquo;t have an \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e, it is called a \u003ca href=\"/posts/kbhinvertability/\"\u003esingular matrix\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"linear-map-inverse-is-unique\"\u003elinear map inverse is unique\u003c/h3\u003e\n\u003cp\u003eAn \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e has an unique inverse:\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V,W)\\), and \\(\\exists S_1, S_2\\) which are both inverses of \\(T\\). We desire \\(S_1=S_2\\).\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS_1 = S_1(TS_2) = (S_1T)S_2 = IS_{2} = S_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egiven \u003ca href=\"/posts/kbhproduct_of_linear_maps/\"\u003eProduct of Linear Maps\u003c/a\u003e is \u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\(S_1=S_2\\), as desired. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"injectivity--kbhinjectivity-dot-md--and-surjectivity--kbhsurjectivity-dot-md--implies-invertability--kbhinvertability-dot-md\"\u003e\u003ca href=\"/posts/kbhinjectivity/\"\u003einjectivity\u003c/a\u003e and \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjectivity\u003c/a\u003e implies \u003ca href=\"/posts/kbhinvertability/\"\u003einvertability\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V,W)\\); we desire that \\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e IFF it is both injective and surjective.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eFirst, suppose \\(T\\) is invertible\u003c/strong\u003e\u003c/strong\u003e; that is, \\(\\exists T^{-1}: T^{-1}T=I, TT^{-1}=I\\) We desire that \\(T\\) is both \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e and \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eInjectivity: Suppose \\(Tv=Tu\\); we desire \\(u=v\\). \\(u = T^{-1}(Tu) = T^{-1}(Tv) = v\\) . We essentially to use the fact that \\(T^{-1}\\) is a function to \u0026ldquo;revert\u0026rdquo; the map of \\(T\\); as \\(T^{-1}\\) is a map, we know it has to revert to the same result.\u003c/p\u003e\n\u003cp\u003eSurjectivity: Recall \\(T: V\\to W\\). WLOG let \\(w \\in W\\), \\(w=T(T^{-1}w)\\). Therefore, all \\(w\\) is in range of \\(T\\).\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eSecond, suppose \\(T\\) is both injective and surjective\u003c/strong\u003e\u003c/strong\u003e. Define a transition \\(S\\) such that \\(T(Sw) = w\\) for all \\(w \\in W\\) (i.e. it hits just the right element to hit \\(w\\) as an input of \\(T\\).) This is made possible because \\(T\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e (because you can hit all \\(W\\)) and \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e (which makes \\(S\\) not need to hit two different things or have two non-equal things accidentally map to the same thing.)\u003c/p\u003e\n\u003cp\u003eEvidently, \\(T(Sw)=w \\forall w \\in W \\implies (TS) = I\\) by definition.\u003c/p\u003e\n\u003cp\u003eWe now desire \\(ST = I\\). We have \\((TSTv) = (TS)(Tv) = ITv = Tv\\) by associativity of map multiplication. Now, \\((TSTv) = Tv \\implies T(ST)v = Tv\\) by associativity again. This implies that \\((ST)v=v\\) again because \\(T\\) is injective: so the same input will not produce two unique outputs.\u003c/p\u003e\n\u003cp\u003eWe then can show \\(S\\) is a linear map in the usual way.\u003c/p\u003e\n\u003cp\u003eHaving constructed the desired result, \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch4 id=\"alternate-proof-for-finite-dimensional-t\"\u003eAlternate Proof for Finite Dimensional \\(T\\)\u003c/h4\u003e\n\u003cp\u003eSo given \u003ca href=\"/posts/kbhlinear_map/#map-to-bigger-space-is-not-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjective\"\u003emap to bigger space is not surjective\u003c/a\u003e and \u003ca href=\"/posts/kbhlinear_map/#map-to-smaller-space-is-not-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003emap to smaller space is not injective\u003c/a\u003e, we have that the dimension of \\(W = V\\), we leverage the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of each and build the \u003cinverse\u003e using the \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhinvertability/","tags":null,"title":"invertability"},{"categories":null,"contents":"For each term \\(t\\), let\u0026rsquo;s store all the documents containing \\(t\\). We identify each doc by DocID.\npostings list a \u0026ldquo;postings list\u0026rdquo; datastructure is a variable-length list which is appended to with \u0026ldquo;postings\u0026rdquo;. In this way, we can store a \u0026ldquo;posting\u0026rdquo; for every DocID with the index we encounter.\nFor instance, this could be a linked list.\nAlthough: we generally want to sort our postings list by documentID for ease of indexing.\nindexing process sort term vs. docID tuples by term alphabetically\nsort docIDs within each tuple by integer\nmerge multiple entries in the single document, keeping track of total frequency\nconsolidate information into postings list (term + document frequency (how many documents does the term show up): [postings, here, ...])\nfor instance\nuseful pre-processing cut character sequences into word tokens map text and query into the same form stemming or lemmatization removing stopwords (the, a, to, etc.?) \u0026mdash; but this may not be a good idea (song \u0026ldquo;to be or not to be\u0026rdquo;) handling phrases biword index Sometimes, single word don\u0026rsquo;t work well as tokens. sometimes, we use bi-grams instead to be indexed. Then, we can break any query down into series of bigrams (\u0026ldquo;stanford university palo alto\u0026rdquo; =\u0026gt; (stanford university) (university palo) (palo alto))\nWe already have \\(V^{2}\\) blow up here. So this is actually NOT the standard solution.\npositional index in each posting of a postings list, store the docID and a sublist of positions of the term within that document.\ntypically, for English/germanic/romatic languages, a positional index is 2-4 times as lange as a non-positional index. in particular, the size would be 35%-50% of the original text.\nBoolean Retrieval AND query \u0026ldquo;merge\u0026rdquo; two postings: identify intersections by two pointer at the head of both lists, check if the two pointers are pointing at the same docID.\nif the answer is \u0026ldquo;no\u0026rdquo;, advance the pointer pointed to the smaller docid if the answer is \u0026ldquo;yes\u0026rdquo;, advance both pointers once any list is exhausted, stop.\nAnd this is why we need the postings sorted.\nTypically, when you start, you\u0026rsquo;d like to start your searches on your smallest postings list.\nphrase-query retrieval phrase-query retrieval is the prcoess to process documents where an exact phrase appears. First index for the postings list of the entire phrase:\nthen do the Boolean Retrieval iteratively\u0026mdash;merge the phrases using AND queries first, then zoom into each document to merge their word positions, offset by one.\n","html":"\u003cp\u003eFor each term \\(t\\), let\u0026rsquo;s store all the documents containing \\(t\\). We identify each doc by DocID.\u003c/p\u003e\n\u003ch2 id=\"postings-list\"\u003epostings list\u003c/h2\u003e\n\u003cp\u003ea \u0026ldquo;\u003ca href=\"#postings-list\"\u003epostings list\u003c/a\u003e\u0026rdquo; datastructure is a variable-length list which is appended to with \u0026ldquo;postings\u0026rdquo;. In this way, we can store a \u0026ldquo;posting\u0026rdquo; for every DocID with the index we encounter.\u003c/p\u003e\n\u003cp\u003eFor instance, this could be a linked list.\u003c/p\u003e\n\u003cp\u003eAlthough: we generally want to sort our \u003ca href=\"#postings-list\"\u003epostings list\u003c/a\u003e by documentID for ease of indexing.\u003c/p\u003e\n\u003ch2 id=\"indexing-process\"\u003eindexing process\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003e\n\u003cp\u003esort term vs. docID tuples by term alphabetically\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003esort docIDs within each tuple by integer\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003emerge multiple entries in the single document, keeping track of total frequency\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003econsolidate information into postings list (\u003ccode\u003eterm + document frequency (how many documents does the term show up): [postings, here, ...]\u003c/code\u003e)\u003c/p\u003e\n\u003cp\u003efor instance\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-28_10-13-30_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"useful-pre-processing\"\u003euseful pre-processing\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecut character sequences into word tokens\u003c/li\u003e\n\u003cli\u003emap text and query into the same form\u003c/li\u003e\n\u003cli\u003estemming or lemmatization\u003c/li\u003e\n\u003cli\u003eremoving stopwords (the, a, to, etc.?) \u0026mdash; but this may not be a good idea (song \u0026ldquo;to be or not to be\u0026rdquo;)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"handling-phrases\"\u003ehandling phrases\u003c/h2\u003e\n\u003ch3 id=\"biword-index\"\u003ebiword index\u003c/h3\u003e\n\u003cp\u003eSometimes, single word don\u0026rsquo;t work well as tokens. sometimes, we use bi-grams instead to be indexed. Then, we can break any query down into series of bigrams (\u0026ldquo;stanford university palo alto\u0026rdquo; =\u0026gt; (stanford university) (university palo) (palo alto))\u003c/p\u003e\n\u003cp\u003eWe already have \\(V^{2}\\) blow up here. So this is actually \u003cstrong\u003eNOT\u003c/strong\u003e the standard solution.\u003c/p\u003e\n\u003ch3 id=\"positional-index\"\u003epositional index\u003c/h3\u003e\n\u003cp\u003ein each posting of a \u003ca href=\"#postings-list\"\u003epostings list\u003c/a\u003e, store the docID and a sublist of positions of the term within that document.\u003c/p\u003e\n\u003cp\u003etypically, for English/germanic/romatic languages, a positional index is 2-4 times as lange as a non-positional index. in particular, the size would be 35%-50% of the original text.\u003c/p\u003e\n\u003ch2 id=\"boolean-retrieval\"\u003eBoolean Retrieval\u003c/h2\u003e\n\u003ch3 id=\"and-query\"\u003eAND query\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;merge\u0026rdquo; two postings: identify intersections by two pointer at the head of both lists, check if the two pointers are pointing at the same docID.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eif the answer is \u0026ldquo;no\u0026rdquo;, advance the pointer pointed to the smaller docid\u003c/li\u003e\n\u003cli\u003eif the answer is \u0026ldquo;yes\u0026rdquo;, advance both pointers\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eonce any list is exhausted, stop.\u003c/p\u003e\n\u003cp\u003eAnd this is why we need the postings sorted.\u003c/p\u003e\n\u003cp\u003eTypically, when you start, you\u0026rsquo;d like to start your searches on your smallest postings list.\u003c/p\u003e\n\u003ch2 id=\"phrase-query-retrieval\"\u003ephrase-query retrieval\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#phrase-query-retrieval\"\u003ephrase-query retrieval\u003c/a\u003e is the prcoess to process documents where an exact phrase appears. First index for the postings list of the entire phrase:\u003c/p\u003e\n\u003cp\u003ethen do the \u003ca href=\"#boolean-retrieval\"\u003eBoolean Retrieval\u003c/a\u003e iteratively\u0026mdash;merge the phrases using AND queries first, then zoom into each document to merge their word positions, offset by one.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-28_10-19-09_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhinverted_index/","tags":null,"title":"Inverted Index"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhiob/","tags":null,"title":"iob"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhiptv/","tags":null,"title":"IPTV"},{"categories":null,"contents":"irrational numbers are real numbers that are not rational numbers.\nFormally:\n\\begin{equation} \\mathbb{C} = \\mathbb{R} \\backslash \\mathbb{Q} \\end{equation}\nwhere, \\(\\backslash\\) is subtracting two sets.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhirrational_number/\"\u003eirrational number\u003c/a\u003es are \u003ca href=\"/posts/kbhreal_number/\"\u003ereal number\u003c/a\u003es that are not \u003ca href=\"/posts/kbhrational_number/\"\u003erational number\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eFormally:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{C} = \\mathbb{R} \\backslash \\mathbb{Q}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\backslash\\) is subtracting two \u003ca href=\"/posts/kbhset/\"\u003eset\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhirrational_number/","tags":null,"title":"irrational number"},{"categories":null,"contents":"Motivation Large crowd navigation with sudden changes: unlikely events are out of likely sample. So, we want to bring in another distribution based on importance and not likelyness.\nGoals retains DESPOT garantees outperforms DESPOT and POMCP DESPOT with Importance Sampling take our initial belief sample trajectories according to Importance Sampling distribution calculate values of those states obtain value estimate based on weighted average of the values Importance Sampling of trajectories We define an importance distribution of some trajectory \\(\\xi\\):\n\\begin{equation} q(\\xi | b,\\pi) = q(s_0) \\prod_{t=0}^{D} q(s_{t+1}, o_{t+1} | s_{t}, a_{t+1}) \\end{equation}\nBackground Importance Sampling Suppose you have a function \\(f(s)\\) which isn\u0026rsquo;t super well integrate-able, yet you want:\n\\begin{equation} \\mu = \\mathbb{E}(f(s)) = \\int_{0}^{1} f(s)p(s) \\dd{s} \\end{equation}\nhow would you sample various \\(f(s)\\) effectively such that you end up with \\(\\hat{\\mu}\\) that\u0026rsquo;s close enough?\nWell, what if you have an importance distribution \\(q(s): S \\to \\mathbb{R}^{[0,1]}\\), which tells you how \u0026ldquo;important\u0026rdquo; to the expected value of the distribution a particular state is? Then, we can formulate a new, better normalization function called the \u0026ldquo;importance weight\u0026rdquo;:\n\\begin{equation} w(s) = \\frac{p(s)}{q(s)} \\end{equation}\nTherefore, this would make our estimator:\n\\begin{equation} \\hat{\\mu} = \\frac{\\sum_{n} f(s_{n}) w(s_{n})}{\\sum_{n} w(s_{n})} \\end{equation}\nTheoretic grantees So, there\u0026rsquo;s a distribution over \\(f\\):\n\\begin{equation} q(s) = \\frac{b(s)}{w_{\\pi}(s)} \\end{equation}\nwhere\n\\begin{equation} w(s) = \\frac{\\mathbb{E}_{b} \\qty( \\sqrt{[\\mathbb{E}(v|s, \\pi )]^{2} + [Var(v|s, \\pi )]})}{[\\mathbb{E}(v|s, \\pi )]^{2} + [Var(v|s, \\pi )]} \\end{equation}\nwhich measures how important a state is, where \\(\\pi\\) is the total discounted reward.\n","html":"\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003eLarge crowd navigation with sudden changes: unlikely events are out of likely sample. So, we want to bring in another distribution based on \u003cstrong\u003eimportance\u003c/strong\u003e and not \u003cstrong\u003elikelyness\u003c/strong\u003e.\u003c/p\u003e\n\u003ch2 id=\"goals\"\u003eGoals\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eretains DESPOT garantees\u003c/li\u003e\n\u003cli\u003eoutperforms \u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e and \u003ca href=\"/posts/kbhpomcp/\"\u003ePOMCP\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"despot--kbhdespot-dot-md--with-importance-sampling--org7062454\"\u003e\u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e with \u003ca href=\"#importance-sampling\"\u003eImportance Sampling\u003c/a\u003e\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003etake our initial belief\u003c/li\u003e\n\u003cli\u003esample trajectories according to \u003ca href=\"#importance-sampling\"\u003eImportance Sampling\u003c/a\u003e distribution\u003c/li\u003e\n\u003cli\u003ecalculate values of those states\u003c/li\u003e\n\u003cli\u003eobtain value estimate based on weighted average of the values\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"importance-sampling--org7062454--of-trajectories\"\u003e\u003ca href=\"#importance-sampling\"\u003eImportance Sampling\u003c/a\u003e of trajectories\u003c/h3\u003e\n\u003cp\u003eWe define an \u003ca href=\"#importance-sampling\"\u003eimportance distribution\u003c/a\u003e of some trajectory \\(\\xi\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nq(\\xi | b,\\pi) = q(s_0) \\prod_{t=0}^{D} q(s_{t+1}, o_{t+1} | s_{t}, a_{t+1})\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003ch3 id=\"importance-sampling\"\u003eImportance Sampling\u003c/h3\u003e\n\u003cp\u003eSuppose you have a function \\(f(s)\\) which isn\u0026rsquo;t super well integrate-able, yet you want:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mu = \\mathbb{E}(f(s)) = \\int_{0}^{1} f(s)p(s) \\dd{s}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ehow would you sample various \\(f(s)\\) effectively such that you end up with \\(\\hat{\\mu}\\) that\u0026rsquo;s close enough?\u003c/p\u003e\n\u003cp\u003eWell, what if you have an \u003ca href=\"#importance-sampling\"\u003eimportance distribution\u003c/a\u003e \\(q(s): S \\to \\mathbb{R}^{[0,1]}\\), which tells you how \u0026ldquo;important\u0026rdquo; to the expected value of the distribution a particular state is? Then, we can formulate a new, better normalization function called the \u0026ldquo;\u003ca href=\"#importance-sampling\"\u003eimportance weight\u003c/a\u003e\u0026rdquo;:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw(s) = \\frac{p(s)}{q(s)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, this would make our estimator:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{\\mu} = \\frac{\\sum_{n} f(s_{n}) w(s_{n})}{\\sum_{n} w(s_{n})}\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"theoretic-grantees\"\u003eTheoretic grantees\u003c/h4\u003e\n\u003cp\u003eSo, there\u0026rsquo;s a distribution over \\(f\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nq(s) = \\frac{b(s)}{w_{\\pi}(s)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw(s) = \\frac{\\mathbb{E}_{b} \\qty( \\sqrt{[\\mathbb{E}(v|s, \\pi )]^{2} + [Var(v|s, \\pi )]})}{[\\mathbb{E}(v|s, \\pi )]^{2} + [Var(v|s, \\pi )]}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich measures how important a state is, where \\(\\pi\\) is the total discounted reward.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhis_despot/","tags":null,"title":"IS-DESPOT"},{"categories":null,"contents":"An isomorphism is an invertable Linear Map. Two vector spaces are called isomorphic if there is an isomorphism from one to another.\n\u0026ldquo;A linear map that maintain the correct structure of the structure.\u0026rdquo;\nThis makes the vector spaces that are isomorphic \u0026ldquo;equivalent\u0026rdquo;, because the isomorphism is the equivalence relationship. Of course, they are still not equal.\nGenerally, isomorphisms can only be built between vector spaces over the same field.\nadditional information matrices We know we can represent Linear Maps as matricies.\nSo, given some \\(A\\), we have an inverse \\(A^{-1}\\).\nSo:\n\\begin{equation} A A^{-1} = I = A^{-1} A \\end{equation}\nIn this case, the \\(I\\) is the identity map: \\(Iv = v\\).\ntwo vector spaces are isomorphic IFF they have the same dimension note: this relationship works over the SAME field \\(\\mathbb{F}\\), otherwise lin comb can\u0026rsquo;t work\nGiven vector spaces \\(I,W\\) isomorphic, we desire \\(dim V = dim W\\) Suppose \\(V\\) and \\(W\\) are finite-dimensional vector spaces that are isomorphic. There means that there is an isomorphism, an invertable Linear Map between them which we will name \\(T \\in \\mathcal{L}(V,W)\\).\nBecause \\(T\\) is invertable, and injectivity and surjectivity implies invertability, so \\(null\\ T = \\{0\\}\\) and \\(range\\ T = W\\).\nLastly, we have that:\n\\begin{align} \\dim V \u0026amp;= \\dim null\\ T + \\dim range\\ T \\\\ \u0026amp;= 0 + dim\\ W \\\\ \u0026amp;= dim\\ W \\end{align}\nas desired.\nGiven \\(dim V = dim W\\), show the vector spaces are isomorphic Take \\(v_1, \\dots v_{n}\\) a basis of \\(V\\), and \\(w_1 \\dots w_{n}\\) a basis of \\(W\\).\nDefine a map by basis of domain mapping \\(Tv_{j} = w_{j}\\), that is, \\(T(c_1v_1 + \\dots + c_{n}v_{n}) = c_1 w_1 + \\dots + c_{n} w_{n}\\).\nBecause \\(w_1 \\dots w_{n}\\) spans \\(W\\) (it is a basis after all), \\(T\\) is surjective.\nAn input with some set of \\(c_{j}\\) is in the null space of \\(T\\) if \\(c_1 w_1 + \\dots + c_{n}w_{n}\\) adds up to \\(0\\) (by definition, as that\u0026rsquo;s the output of \\(T\\)).\nBecause \\(w_1 \\dots w_{n}\\) is a basis, the only linear combination thereof which makes \\(0\\) is by taking all \\(c_1 = \\dots c_{n} = 0\\). This make it so that the only valid input to \\(T\\) that will map to \\(0\\) requires \\(c_1=\\dots c_{n} = 0\\), making \\(null\\ T = \\{0\\}\\), showing that \\(T\\) is injective.\nHaving shown \\(T\\) is injective and surjective, it is an isomorphism, as desired. \\(\\blacksquare\\)\nmatricies and Linear Maps from the right dimensions are isomorphic Formally: suppose \\(v_1 \\dots v_{n}\\) is a basis of \\(V\\), and \\(w_1 \\dots w_{m}\\) is a basis of \\(W\\), then, \\(\\mathcal{M}\\) the matrixify operation that takes Linear Maps and turn them into matricies is an isomorphism between \\(\\mathcal{L}(V,W)\\) and \\(\\mathbb{F}^{m,n}\\).\nThe matrixify operation \\(\\mathcal{M}\\) is linear, because matricies are linear. The only thing that \\(\\mathcal{M}\\) will turn into the zero matrix is the zero Linear Map (i.e. \\(\\mathcal{M}(t)=0 \\implies T v_{k} = 0\\ \\forall k 1 \\dots n\\) by construction of matricies, and because the \\(v_{k}\\) are a basis, \\(T v_{k} =0 \\implies T=0\\)), so the null space of \\(\\mathcal{M}\\) is \\(\\{0\\}\\), making \\(\\mathcal{M}\\) injective.\nNow, because of the fact one can construct a matrix based on the scalars applied to map the input basis to the output basis; i.e. that, for any map \\(T \\in \\mathcal{L}(V,W)\\):\n\\begin{equation} Tv_{k} = \\sum_{j=i}^{m}A_{j,k} w_{j} \\end{equation}\nfor some matrix \\(\\mathcal{M}(T) = A \\in \\mathbb{F}^{m,n}\\), we have that \\(\\mathcal{M}\\) can be used to produce any map between \\(V\\) and \\(W\\). This makes \\(\\mathcal{M}\\) surjective.\n\\(\\dim \\mathcal{L}(V,W) = (\\dim V)(\\dim W)\\) \\(\\mathcal{L}(V,W)\\) is isomorphic to the set of matricies \\(\\mathbb{F}^{m,n}\\) where \\(w_1 \\dots w_{m}\\) is a basis for \\(W\\) and \\(v_1 \\dots v_{n}\\) is a basis for \\(V\\). two vector spaces are isomorphic IFF they have the same dimension, so \\(\\dim \\mathcal{L}(V,W) = \\dim \\mathbb{F}^{m,n} = m\\cdot n\\) (see \\(\\mathbb{F}^{m,n}\\)).\nHaving claimed that \\(w_1 \\dots w_{m}\\) is a basis of \\(W\\) and \\(v_1 \\dots v_{n}\\) is a basis of \\(V\\), \\(W\\) and \\(V\\) have dimensions \\(m\\) and \\(n\\) respectively. So \\((\\dim V)(\\dim W) = n \\cdot m = m\\cdot n = \\dim \\mathbb{F}^{m,n} = \\dim \\mathcal{L}(V,W)\\), as desired.\n","html":"\u003cp\u003eAn \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e is an \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e. Two \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es are called \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e if there is an \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e from one to another.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;A linear map that maintain the correct structure of the structure.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eThis makes the \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es that are \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e \u0026ldquo;equivalent\u0026rdquo;, because the \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e is the \u003ca href=\"/posts/kbhequivalence/\"\u003eequivalence\u003c/a\u003e relationship. Of course, they are still not equal.\u003c/p\u003e\n\u003cp\u003eGenerally, \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003es can only be built between \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es over the same \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"matrices\"\u003ematrices\u003c/h3\u003e\n\u003cp\u003eWe know we can represent \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es as \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSo, given some \\(A\\), we have an inverse \\(A^{-1}\\).\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA A^{-1} = I = A^{-1} A\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn this case, the \\(I\\) is the \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e map: \\(Iv = v\\).\u003c/p\u003e\n\u003ch3 id=\"two-vector-spaces-are-isomorphic-iff-they-have-the-same-dimension\"\u003etwo vector spaces are isomorphic IFF they have the same dimension\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003enote: this relationship works over the SAME field \\(\\mathbb{F}\\), otherwise lin comb can\u0026rsquo;t work\u003c/strong\u003e\u003c/p\u003e\n\u003ch4 id=\"given-vector-spaces-i-w-isomorphic--kbhisomorphism-dot-md--we-desire-dim-v-dim-w\"\u003eGiven vector spaces \\(I,W\\) \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e, we desire \\(dim V = dim W\\)\u003c/h4\u003e\n\u003cp\u003eSuppose \\(V\\) and \\(W\\) are finite-dimensional \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es that are \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e. There means that there is an \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e, an \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e between them which we will name \\(T \\in \\mathcal{L}(V,W)\\).\u003c/p\u003e\n\u003cp\u003eBecause \\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e, and \u003ca href=\"/posts/kbhinvertability/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-and-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-implies-id-ff05739c-6e70-46ba-9d56-0958ef847f57-invertability\"\u003einjectivity and surjectivity implies invertability\u003c/a\u003e, so \\(null\\ T = \\{0\\}\\) and \\(range\\ T = W\\).\u003c/p\u003e\n\u003cp\u003eLastly, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dim V \u0026amp;= \\dim null\\ T + \\dim range\\ T \\\\\n\u0026amp;= 0 + dim\\ W \\\\\n\u0026amp;= dim\\ W\n\\end{align}\u003c/p\u003e\n\u003cp\u003eas desired.\u003c/p\u003e\n\u003ch4 id=\"given-dim-v-dim-w-show-the-vector-spaces-are-isomorphic--kbhisomorphism-dot-md\"\u003eGiven \\(dim V = dim W\\), show the vector spaces are \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eTake \\(v_1, \\dots v_{n}\\) a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\), and \\(w_1 \\dots w_{n}\\) a basis of \\(W\\).\u003c/p\u003e\n\u003cp\u003eDefine a map by \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e mapping \\(Tv_{j} = w_{j}\\), that is, \\(T(c_1v_1 + \\dots + c_{n}v_{n}) = c_1 w_1 + \\dots + c_{n} w_{n}\\).\u003c/p\u003e\n\u003cp\u003eBecause \\(w_1 \\dots w_{n}\\) \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(W\\) (it is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e after all), \\(T\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eAn input with some set of \\(c_{j}\\) is in the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(T\\) if \\(c_1 w_1 + \\dots + c_{n}w_{n}\\) adds up to \\(0\\) (by definition, as that\u0026rsquo;s the output of \\(T\\)).\u003c/p\u003e\n\u003cp\u003eBecause \\(w_1 \\dots w_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, the only \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e thereof which makes \\(0\\) is by taking all \\(c_1 = \\dots c_{n} = 0\\). This make it so that the only valid \u003cem\u003einput\u003c/em\u003e to \\(T\\) that will map to \\(0\\) requires \\(c_1=\\dots c_{n} = 0\\), making \\(null\\ T = \\{0\\}\\), showing that \\(T\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eHaving shown \\(T\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e and \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e, it is an \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e, as desired. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"matricies--kbhmatricies-dot-md--and-linear-map--kbhlinear-map-dot-md--s-from-the-right-dimension--kbhdimension-dot-md--s-are-isomorphic--kbhisomorphism-dot-md\"\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e and \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es from the right \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003es are \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eFormally: suppose \\(v_1 \\dots v_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\), and \\(w_1 \\dots w_{m}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(W\\), then, \\(\\mathcal{M}\\) the \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003eify operation that takes \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es and turn them into \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e is an \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e between \\(\\mathcal{L}(V,W)\\) and \\(\\mathbb{F}^{m,n}\\).\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003eify operation \\(\\mathcal{M}\\) is linear, because \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e are linear. The only thing that \\(\\mathcal{M}\\) will turn into the \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e is the \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e (i.e. \\(\\mathcal{M}(t)=0 \\implies T v_{k} = 0\\ \\forall k 1 \\dots n\\) by construction of \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e, and because the \\(v_{k}\\) are a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, \\(T v_{k} =0 \\implies T=0\\)), so the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(\\mathcal{M}\\) is \\(\\{0\\}\\), making \\(\\mathcal{M}\\) \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eNow, because of the fact one can construct a \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e based on the scalars applied to map the input \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e to the output \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e; i.e. that, for any map \\(T \\in \\mathcal{L}(V,W)\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv_{k} = \\sum_{j=i}^{m}A_{j,k} w_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some matrix \\(\\mathcal{M}(T) = A \\in \\mathbb{F}^{m,n}\\), we have that \\(\\mathcal{M}\\) can be used to produce any map between \\(V\\) and \\(W\\). This makes \\(\\mathcal{M}\\) \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"dim-mathcal-l--v-w----dim-v----dim-w\"\u003e\\(\\dim \\mathcal{L}(V,W) = (\\dim V)(\\dim W)\\)\u003c/h3\u003e\n\u003cp\u003e\\(\\mathcal{L}(V,W)\\) is \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e to the set of \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e \\(\\mathbb{F}^{m,n}\\) where \\(w_1 \\dots w_{m}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e for \\(W\\) and \\(v_1 \\dots v_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e for \\(V\\). \u003ca href=\"#two-vector-spaces-are-isomorphic-iff-they-have-the-same-dimension\"\u003etwo vector spaces are isomorphic IFF they have the same dimension\u003c/a\u003e, so \\(\\dim \\mathcal{L}(V,W) = \\dim \\mathbb{F}^{m,n} = m\\cdot n\\) (see \u003ca href=\"/posts/kbhmatricies/#mathbb-f-m-n\"\u003e\\(\\mathbb{F}^{m,n}\\)\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eHaving claimed that \\(w_1 \\dots w_{m}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(W\\) and \\(v_1 \\dots v_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\), \\(W\\) and \\(V\\) have \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003es \\(m\\) and \\(n\\) respectively. So \\((\\dim V)(\\dim W) = n \\cdot m = m\\cdot n = \\dim \\mathbb{F}^{m,n} = \\dim \\mathcal{L}(V,W)\\), as desired.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhisomorphism/","tags":null,"title":"isomorphism"},{"categories":null,"contents":" Date Notes \u0026lt;2022-04-13 Wed\u0026gt; PCP April Checkin \u0026lt;2022-04-13 Wed\u0026gt; Alivio April Checkin \u0026lt;2022-04-16 Sat\u0026gt; GreenSwing April Checkin \u0026lt;2022-04-25 Mon\u0026gt; Pollen April Checkin \u0026lt;2022-04-30 Sat\u0026gt; Logan\u0026rsquo;s Team Checkin \u0026lt;2022-05-02 Mon\u0026gt; Anna\u0026rsquo;s Team Checkin TODO Stack Get asthma kids leads for Alivio GreenSwing Hiring: Fufilling Orders, MechE Conrad money? Get Mentors for Pollen =\u0026gt; Figma lady ","html":"\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eNotes\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-13 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpcp_april_checkin/\"\u003ePCP April Checkin\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-13 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhalivio_april_checkin/\"\u003eAlivio April Checkin\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-16 Sat\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhgreenswing_april_checkin/\"\u003eGreenSwing April Checkin\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-25 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003ePollen April Checkin\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-30 Sat\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhlogan_s_team_check_in/\"\u003eLogan\u0026rsquo;s Team Checkin\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-02 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhanna_s_team_checkin/\"\u003eAnna\u0026rsquo;s Team Checkin\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"stack\"\u003e\u003cspan class=\"org-todo todo TODO\"\u003eTODO\u003c/span\u003e Stack\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eGet asthma kids leads for Alivio\u003c/li\u003e\n\u003cli\u003eGreenSwing Hiring: Fufilling Orders, MechE\u003c/li\u003e\n\u003cli\u003eConrad money?\u003c/li\u003e\n\u003cli\u003eGet Mentors for Pollen =\u0026gt; Figma lady\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhistudio_meeting_notes/","tags":null,"title":"iStudio Meeting Notes"},{"categories":null,"contents":"If a student has ability \\(a\\), and a probably is \\(d\\) difficulty, the probability of a student getting something right:\n\\begin{equation} \\sigma (a-d) \\end{equation}\nthis doesn\u0026rsquo;t consider SLIPPING at all.\n","html":"\u003cp\u003eIf a student has ability \\(a\\), and a probably is \\(d\\) difficulty, the probability of a student getting something right:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sigma (a-d)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis doesn\u0026rsquo;t consider \u003cstrong\u003eSLIPPING\u003c/strong\u003e at all.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhitem_response_theory/","tags":null,"title":"Item Response Theory"},{"categories":null,"contents":" https://en.wikipedia.org/wiki/It%C3%B4%27s_lemma\nfor integrating Differential Equations with Brownian Motion.\n","html":"\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-14_14-08-01_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\u003ca href=\"https://en.wikipedia.org/wiki/It%C3%B4%27s_lemma\"\u003ehttps://en.wikipedia.org/wiki/It%C3%B4%27s_lemma\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003efor integrating \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equations\u003c/a\u003e with \u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhito_intergral/","tags":null,"title":"Itô Intergral"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhjohn_corso/","tags":null,"title":"John Corso"},{"categories":null,"contents":"for random variables \\(X, Y\\), the joint probability distribution is the probability of both of them happening at once.\n\\begin{equation} p(x,y) \\end{equation}\nThe most fundamental solution can be derived with a table where all complete probabilities are listed. They are going to be too large to practically store.\nprobability of the joint of a Bayes Net \\begin{equation} p(joint) = \\prod_{i \\in BN}^{} p(x_{i} | parents(x_{i})) \\end{equation}\n","html":"\u003cp\u003efor \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es \\(X, Y\\), the \u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e is the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of both of them happening at once.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(x,y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe most fundamental solution can be derived with a table where all complete \u003ca href=\"/posts/kbhprobability/\"\u003eprobabilities\u003c/a\u003e are listed. They are going to be too large to practically store.\u003c/p\u003e\n\u003ch2 id=\"probability-of-the-joint-of-a-bayes-net--kbhbaysian-network-dot-md\"\u003eprobability of the joint of a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBayes Net\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\np(joint) = \\prod_{i \\in BN}^{} p(x_{i} | parents(x_{i}))\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhjoint_probability_distribution/","tags":null,"title":"joint probability distribution"},{"categories":null,"contents":"Punchlines Screw you, I\u0026rsquo;m not Stupid\u0026hellip;. I\u0026rsquo;m just Chinese. Joke\u0026rsquo;s on you, I\u0026rsquo;m both Chinese AND stupid. Setups Why is it that toilets have a refractory period? The satanic church fights back against the Texas abortion ban. Completed jokes Where did Texas gun control funding go? its illegal to own more than 6 d*l**s there Not only did you have to pass normal tests, you had to pass like a thousand COVID tests ","html":"\u003ch2 id=\"punchlines\"\u003ePunchlines\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eScrew you, I\u0026rsquo;m not Stupid\u0026hellip;. I\u0026rsquo;m just Chinese.\u003c/li\u003e\n\u003cli\u003eJoke\u0026rsquo;s on you, I\u0026rsquo;m both Chinese \u003cstrong\u003eAND\u003c/strong\u003e stupid.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"setups\"\u003eSetups\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWhy is it that toilets have a refractory period?\u003c/li\u003e\n\u003cli\u003eThe satanic church fights back against the Texas abortion ban.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"completed-jokes\"\u003eCompleted jokes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWhere did Texas gun control funding go? its illegal to own more than 6 d*l**s there\u003c/li\u003e\n\u003cli\u003eNot only did you have to pass normal tests, you had to pass like a thousand COVID tests\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhjokes/","tags":null,"title":"jokes"},{"categories":null,"contents":"DOI: 10.3389/fcomp.2021.642633\nOne-Liner Developed a kitchen sink of diagnoses tools and correlated it with biomarkers.\nNovelty The kitchen sink of data collection (phones, tablet, eye tracker, microphone, wristband) and the kitchen sink of noninvasive data imaging, psych, speech assesment, clinical metadata.\nNotable Methods Here\u0026rsquo;s their kitchen sink\nI have no idea why a thermal camera is needed\nKey Figs Here are the features they extracted\nDeveloped the features collected via a method similar to action research, did two passes and refined/added information after preliminary analysis. Figure above also include info about whether or not the measurement was task specific.\nand there are the biomarkers and medical data they collected\nAnd then they correlated their kitchen sink with biomarker from the tap\nNew Concepts spinal tap Notes ","html":"\u003cp\u003eDOI: 10.3389/fcomp.2021.642633\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eDeveloped a kitchen sink of diagnoses tools and correlated it with biomarkers.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003eThe kitchen sink of data collection (phones, tablet, eye tracker, microphone, wristband) and the kitchen sink of noninvasive data imaging, psych, speech assesment, clinical metadata.\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cp\u003eHere\u0026rsquo;s their kitchen sink\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-06-49_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eI have \u003cstrong\u003e\u003cstrong\u003eno idea\u003c/strong\u003e\u003c/strong\u003e why a thermal camera is needed\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003cp\u003eHere are the features they extracted\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-07-27_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eDeveloped the features collected via a method similar to action research, did two passes and refined/added information after preliminary analysis. Figure above also include info about whether or not the measurement was task specific.\u003c/p\u003e\n\u003cp\u003eand there are the biomarkers and medical data they collected\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-09-47_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eAnd then they correlated their kitchen sink with biomarker from the tap\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-14-56_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhspinal_tap/\"\u003espinal tap\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhjonell_2021/","tags":["ntj"],"title":"Jonell 2021"},{"categories":null,"contents":"POMDPs can become computationally quite intractable. Alternative: a stochastic, memoryless policy. A policy should be stochastic in order to satisfy certain conditions during adversarial games (think bluffing).\nJSJ is basically Q-Learning adapted for POMDPs:\n\\begin{equation} \\begin{cases} Q^{\\pi}(s,a) = \\sum_{t=1}^{\\infty}\\mathbb{E}_{\\pi} [r_{t}- R^{\\pi}|s, a] \\\\ Q^{\\pi}(o,a) = \\mathbb{E}_{s}[Q^{\\pi}(s,a) | M(s) = o] \\end{cases} \\end{equation}\nwhere \\(M\\) is a mapping between states and possible observations.\nPolicy Improvement Now, we want to maximise:\n\\begin{equation} \\Delta_{o}(a) = Q(o,a) - V(o) \\end{equation}\n\u0026ldquo;if an action \\(a\\) results in a better value than the expected value, we want to upweight that action.\u0026rdquo;\nWe further normalise this:\n\\begin{equation} \\delta_{o}(a) = \\Delta_{o}(a) - \\frac{1}{|A|} \\sum_{a\u0026rsquo; \\in A} \\Delta_{o}(a\u0026rsquo;) \\end{equation}\n\u0026ldquo;how does the diff of my action is considering improve over all other actions (i.e. \u0026ldquo;maybe all actions have similar diffs\u0026rdquo;).\nNow, substitution time:\n\\begin{equation} \\delta_{o}(a) = \\qty(Q(o,a) - V(o)) - \\frac{1}{|A|} \\sum_{(a\u0026rsquo; \\in A)} Q(o,a\u0026rsquo;) - V(o) \\end{equation}\nWhich, after simplification (the two \\(V\\) cancels out), we actually get:\n\\begin{equation} \\delta_{o}(a) = Q(o,a) - \\frac{1}{|A|} \\sum_{(a\u0026rsquo; \\in A)}^{} Q(o,a\u0026rsquo;) \\end{equation}\nwhich makes sense; \u0026ldquo;how does our current action does better than all others\u0026rdquo;. To obtain \\(Q(o,a)\\), see the big function above.\nFinally, having defined our update step, we can now let the good times roll\u0026mdash;-gradient ascent! For some action \\(a\\) at observation \\(o\\) and learning rate we update our policy:\n\\begin{equation} Q^{\\pi}(a|o) = Q^{\\pi}(a|o) + \\varepsilon \\delta_{o}(a) \\end{equation}\nWe can then use it to take some more actions, compute more deltas, repeat.\nPolicy Evaluation ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es can become computationally quite intractable. Alternative: a stochastic, memoryless policy. A policy should be stochastic in order to satisfy certain conditions during adversarial games (think bluffing).\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhjsj/\"\u003eJSJ\u003c/a\u003e is basically \u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/#q-learning\"\u003eQ-Learning\u003c/a\u003e adapted for \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nQ^{\\pi}(s,a) = \\sum_{t=1}^{\\infty}\\mathbb{E}_{\\pi} [r_{t}- R^{\\pi}|s, a] \\\\\nQ^{\\pi}(o,a) = \\mathbb{E}_{s}[Q^{\\pi}(s,a) | M(s) = o]\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(M\\) is a mapping between states and possible observations.\u003c/p\u003e\n\u003ch2 id=\"policy-improvement\"\u003ePolicy Improvement\u003c/h2\u003e\n\u003cp\u003eNow, we want to maximise:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Delta_{o}(a) = Q(o,a) - V(o)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;if an action \\(a\\) results in a better value than the expected value, we want to upweight that action.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe further normalise this:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\delta_{o}(a) = \\Delta_{o}(a) - \\frac{1}{|A|} \\sum_{a\u0026rsquo; \\in A} \\Delta_{o}(a\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;how does the diff of my action is considering improve over all other actions (i.e. \u0026ldquo;maybe all actions have similar diffs\u0026rdquo;).\u003c/p\u003e\n\u003cp\u003eNow, substitution time:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\delta_{o}(a) = \\qty(Q(o,a) - V(o)) - \\frac{1}{|A|} \\sum_{(a\u0026rsquo; \\in A)} Q(o,a\u0026rsquo;) - V(o)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich, after simplification (the two \\(V\\) cancels out), we actually get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\delta_{o}(a) = Q(o,a) - \\frac{1}{|A|} \\sum_{(a\u0026rsquo; \\in A)}^{} Q(o,a\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich makes sense; \u0026ldquo;how does our current action does better than all others\u0026rdquo;. To obtain \\(Q(o,a)\\), see the big function above.\u003c/p\u003e\n\u003cp\u003eFinally, having defined our update step, we can now let the good times roll\u0026mdash;-gradient ascent! For some action \\(a\\) at observation \\(o\\) and learning rate we update our policy:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ^{\\pi}(a|o) = Q^{\\pi}(a|o) + \\varepsilon \\delta_{o}(a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can then use it to take some more actions, compute more deltas, repeat.\u003c/p\u003e\n\u003ch2 id=\"policy-evaluation\"\u003ePolicy Evaluation\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-08_09-54-59_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhjsj/","tags":null,"title":"JSJ"},{"categories":null,"contents":"Ka\u0026rsquo;chava is described as a \u0026ldquo;superfood\u0026rdquo; which is used as a meal replacement to manage hunger.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhka_chava/\"\u003eKa\u0026rsquo;chava\u003c/a\u003e is described as a \u0026ldquo;superfood\u0026rdquo; which is used as a \u003ca href=\"/posts/kbhmeal_replacement/\"\u003emeal replacement\u003c/a\u003e to manage hunger.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhka_chava/","tags":null,"title":"Ka'Chava"},{"categories":null,"contents":" Orbits of planetary bodies are ellipses with the sun at one of the two foci Drawing a line from the sun to the orbiting body, they would sweep out equal areas Planets that are closer to the sun have much shorter periods than that Squares of the periods of the planets is equal to the cubes of the distance from the planet to the sun ","html":"\u003col\u003e\n\u003cli\u003eOrbits of planetary bodies are ellipses with the sun at one of the two foci\u003c/li\u003e\n\u003cli\u003eDrawing a line from the sun to the orbiting body, they would sweep out equal areas\u003c/li\u003e\n\u003cli\u003ePlanets that are closer to the sun have much shorter periods than that\n\u003cul\u003e\n\u003cli\u003eSquares of the periods of the planets is equal to the cubes of the distance from the planet to the sun\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhkepler_s_laws_of_planetary_motion/","tags":null,"title":"Kepler's Laws of Planetary Motion"},{"categories":null,"contents":"kernel smoothing is a way of smoothing a utility function over continuous state space despite only sampling a discrete set of the states.\n\\begin{equation} U_{\\theta}(s) = \\theta^{T} \\beta(s) \\end{equation}\nWe multiply a vector \\(\\theta_{j}\\), the utility of being in each state \\(s_{j}\\) a basis function, which smears, generated for each \\(i\\) of known discrete state we have:\n\\begin{equation} \\beta_{i}(s) = \\frac{k(s, s_{i})}{\\sum_{j}^{} k(s, s_{j})} \\end{equation}\nwhere, \\(k\\) is the kernel function, a function inversely proportional to how close the two states are:\nk(s,sj) is a normalization factor and doesn\u0026rsquo;t need to be computed at every call.\n\\begin{equation} k(s, s\u0026rsquo;) = \\max \\qty(d(s,s\u0026rsquo;), \\epsilon)^{-1} \\end{equation}\nwhere \\(d\\) is a measure of distance. We clip this function at \\(\\epsilon\\) to prevent inverting \\(0\\).\ngaussian kernel There is an alternate state smoothing function which is called gaussian kernel, which allows you to control the degree of smoothing between two states through a parameter \\(\\sigma\\):\n\\begin{equation} k(s,s\u0026rsquo;) = \\exp \\qty( - \\frac{d(s,s\u0026rsquo;)^{2}}{2 \\sigma^{2}}) \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhkernel_smoothing/\"\u003ekernel smoothing\u003c/a\u003e is a way of smoothing a \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e function over \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e state space despite only sampling a discrete set of the states.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_{\\theta}(s) = \\theta^{T} \\beta(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe multiply a vector \\(\\theta_{j}\\), the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of being in each state \\(s_{j}\\) a basis function, which smears, generated for each \\(i\\) of known discrete state we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\beta_{i}(s) = \\frac{k(s, s_{i})}{\\sum_{j}^{} k(s, s_{j})}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(k\\) is the \u003ca href=\"/posts/kbhkernel_smoothing/\"\u003ekernel function\u003c/a\u003e, a function inversely proportional to how close the two states are:\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003ek(s,sj)\u003c/strong\u003e\u003c/strong\u003e is a normalization factor and doesn\u0026rsquo;t need to be computed at every call.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nk(s, s\u0026rsquo;) = \\max \\qty(d(s,s\u0026rsquo;), \\epsilon)^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(d\\) is a measure of distance. We clip this function at \\(\\epsilon\\) to prevent inverting \\(0\\).\u003c/p\u003e\n\u003ch2 id=\"gaussian--kbhgaussian-distribution-dot-md--kernel\"\u003e\u003ca href=\"/posts/kbhgaussian_distribution/\"\u003egaussian\u003c/a\u003e kernel\u003c/h2\u003e\n\u003cp\u003eThere is an alternate state smoothing function which is called \u003ca href=\"#gaussian--kbhgaussian-distribution-dot-md--kernel\"\u003egaussian kernel\u003c/a\u003e, which allows you to control the degree of smoothing between two states through a parameter \\(\\sigma\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nk(s,s\u0026rsquo;) = \\exp \\qty( - \\frac{d(s,s\u0026rsquo;)^{2}}{2 \\sigma^{2}})\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhkernel_smoothing/","tags":null,"title":"kernel smoothing"},{"categories":null,"contents":"Keynsian Politics is a economy strategy to support large projects via the government to boost economic output (i.e. that the economy needs a minder, but is generally free-sustaining.)\nSee also: Keynsian Economics was not trying to entirely replace markets\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynsian Politics\u003c/a\u003e is a economy strategy to support large projects via the government to boost economic output (i.e. that the economy needs a minder, but is generally free-sustaining.)\u003c/p\u003e\n\u003cp\u003eSee also: \u003ca href=\"/posts/kbhhow_did_economists_get_it_so_wrong/#id-70e8aae4-7410-46d7-93ab-504ef8effc79-keynsian-economics-was-not-trying-to-entirely-replace-markets\"\u003eKeynsian Economics was not trying to entirely replace markets\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhkeynsian_politics/","tags":null,"title":"Keynsian Politics"},{"categories":null,"contents":"in algorithms, key\n","html":"\u003cp\u003ein algorithms, key\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhkeys/","tags":null,"title":"keys (algorithms)"},{"categories":null,"contents":"kirchoff\u0026rsquo;s laws is a set of laws to deal with complicated circuits, and it says\u0026hellip;\nany junction, the current entering equals the current leaving the sum of voltage across a closed loop is \\(0\\) ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhkirchoff_s_laws/\"\u003ekirchoff\u0026rsquo;s laws\u003c/a\u003e is a set of laws to deal with complicated circuits, and it says\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eany junction, the \u003ca href=\"/posts/kbhcurrent/\"\u003ecurrent\u003c/a\u003e entering equals the \u003ca href=\"/posts/kbhcurrent/\"\u003ecurrent\u003c/a\u003e leaving\u003c/li\u003e\n\u003cli\u003ethe sum of \u003ca href=\"/posts/kbhvoltage/\"\u003evoltage\u003c/a\u003e across a closed loop is \\(0\\)\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhkirchoff_s_laws/","tags":null,"title":"kirchoff's laws"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhkl_divergence/","tags":null,"title":"KL Divergence"},{"categories":null,"contents":"KLA is a semiconductor process control company. https://www.kla.com/ Rick Wallace is the CEO.\n135000 employees 8.2B of revenue 72-300 tools 15% of revenue in R\u0026amp;D Their main business is in automatically inspecting chips and wafers in time.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhkla/\"\u003eKLA\u003c/a\u003e is a \u003ca href=\"/posts/kbhsemiconductor/\"\u003esemiconductor\u003c/a\u003e process control company. \u003ca href=\"https://www.kla.com/\"\u003ehttps://www.kla.com/\u003c/a\u003e \u003ca href=\"/posts/kbhrick_wallace/\"\u003eRick Wallace\u003c/a\u003e is the CEO.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e135000 employees\u003c/li\u003e\n\u003cli\u003e8.2B of revenue\u003c/li\u003e\n\u003cli\u003e72-300 tools\u003c/li\u003e\n\u003cli\u003e15% of revenue in R\u0026amp;D\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTheir main business is in automatically inspecting chips and wafers in time.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhkla/","tags":null,"title":"KLA"},{"categories":null,"contents":"controllable We want \\(P(Y|X) = p\\), for a specific \\(p\\) that we specify.\nfine-grained control ideally, instead of optimizing over entire expected values, we want to tune specific utputs\nSuccess in Editing Say we edited some \\(M\\), specifying a viper is a vertebrate.\nIdeally, this should also edit the other related information:\n\\(P\\) (paraphrases)j: viper and vertebrates \\(E\\) (logical entailments): a viper has a brain And we shouldn\u0026rsquo;t touch:\n\\(R\\) (other stuff): Chile is a country \\(LN\\) (local neural data): a viper is venomous Hypernetwork Weight Editing\u0026rsquo;s Drawbacks harder to fix errors than creating them harder to retain preformance on local data than random data hander to generalize to entailed data than paraphrases Updates improves consistency Information Deletion \u0026ldquo;deleting information\u0026rdquo; from LLMs is undefined RLHF, SFT, etc. HIDES rather than ddeleting this can be framed as model editing High Level Approach notice threat information attempt to \u0026ldquo;delete it\u0026rdquo; evaluate the deletion try to extract the threat information again loop We formalize this by saying, for some adversarial \\(A\\) to question \\(Q\\), we hope that the candidate output set \\(C\\) of size \\(B\\) all don\u0026rsquo;t contain \\(A\\).\nFormal guarantees don\u0026rsquo;t work very well in LLMWorld.\nIdeally, we balance attack success and the damage to other aspects from the model.\nSupervision Gap Recovered Measuring the ratio between the rate of success of \u0026ldquo;easy\u0026rdquo; supervision data over \u0026ldquo;hard\u0026rdquo; supervisiation data.\n","html":"\u003ch2 id=\"controllable\"\u003econtrollable\u003c/h2\u003e\n\u003cp\u003eWe want \\(P(Y|X) = p\\), for a specific \\(p\\) that we specify.\u003c/p\u003e\n\u003ch3 id=\"fine-grained-control\"\u003efine-grained control\u003c/h3\u003e\n\u003cp\u003eideally, instead of optimizing over entire expected values, we want to tune specific utputs\u003c/p\u003e\n\u003ch2 id=\"success-in-editing\"\u003eSuccess in Editing\u003c/h2\u003e\n\u003cp\u003eSay we edited some \\(M\\), specifying a viper is a vertebrate.\u003c/p\u003e\n\u003cp\u003eIdeally, this should also edit the other related information:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(P\\) (paraphrases)j: viper and vertebrates\u003c/li\u003e\n\u003cli\u003e\\(E\\) (logical entailments): a viper has a brain\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAnd we shouldn\u0026rsquo;t touch:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(R\\) (other stuff): Chile is a country\u003c/li\u003e\n\u003cli\u003e\\(LN\\) (local neural data): a viper is venomous\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"hypernetwork-weight-editing-s-drawbacks\"\u003eHypernetwork Weight Editing\u0026rsquo;s Drawbacks\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eharder to \u003cstrong\u003efix errors\u003c/strong\u003e than \u003cstrong\u003ecreating them\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eharder to retain preformance on \u003cstrong\u003elocal data\u003c/strong\u003e than \u003cstrong\u003erandom data\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003ehander to generalize to \u003cstrong\u003eentailed data\u003c/strong\u003e than \u003cstrong\u003eparaphrases\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eUpdates \u003cstrong\u003eimproves consistency\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"information-deletion\"\u003eInformation Deletion\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;deleting information\u0026rdquo; from LLMs is undefined\u003c/li\u003e\n\u003cli\u003eRLHF, SFT, etc. \u003cstrong\u003eHIDES\u003c/strong\u003e rather than ddeleting\u003c/li\u003e\n\u003cli\u003ethis can be framed as model editing\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"high-level-approach\"\u003eHigh Level Approach\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003enotice threat information\u003c/li\u003e\n\u003cli\u003eattempt to \u0026ldquo;delete it\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eevaluate the deletion\u003c/li\u003e\n\u003cli\u003etry to extract the threat information again\u003c/li\u003e\n\u003cli\u003eloop\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe formalize this by saying, for some adversarial \\(A\\) to question \\(Q\\), we hope that the candidate output set \\(C\\) of size \\(B\\) all don\u0026rsquo;t contain \\(A\\).\u003c/p\u003e\n\u003cp\u003eFormal guarantees don\u0026rsquo;t work very well in LLMWorld.\u003c/p\u003e\n\u003cp\u003eIdeally, we balance attack success and the damage to other aspects from the model.\u003c/p\u003e\n\u003ch3 id=\"supervision-gap-recovered\"\u003eSupervision Gap Recovered\u003c/h3\u003e\n\u003cp\u003eMeasuring the ratio between the rate of success of \u0026ldquo;easy\u0026rdquo; supervision data over \u0026ldquo;hard\u0026rdquo; supervisiation data.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhknowledge_editing/","tags":null,"title":"Knowledge Editing"},{"categories":null,"contents":"this thing needs an eigenvalue.\n","html":"\u003cp\u003ethis thing needs an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhknowledgebase_testing/","tags":null,"title":"knowledgebase testing page"},{"categories":null,"contents":"A KS test is a hypothesis test that measures if two groups of samples are drawn from the same distribution.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhkolmogorov_smirnov_test/\"\u003eKS test\u003c/a\u003e is a \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis test\u003c/a\u003e that measures if two groups of samples are drawn from the same distribution.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhkolmogorov_smirnov_test/","tags":null,"title":"Kolmogorov-Smirnov test"},{"categories":null,"contents":"Where:\n\\begin{equation} ||X-Y||_{\\infty} = \\max \\{| x_{i} - y_{i} |, x \\in X, y \\in Y} \\} \\end{equation}\n","html":"\u003cp\u003eWhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n||X-Y||_{\\infty} = \\max \\{| x_{i} - y_{i} |, x \\in X, y \\in Y} \\}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhl_infty/","tags":null,"title":"L-infinity norm"},{"categories":null,"contents":"Want mechanics? No. You get energy.\nFirst, recall the stationary-action principle. To define a system in Lagrangian Mechanics, we define a smooth function \\(L\\), called the \u0026ldquo;Lagrangian\u0026rdquo;, and some configuration space (axis) \\(M\\).\nBy convention, \\(L=T-V\\). \\(T\\) is the kinetic energy in the system, and \\(V\\) is the potential energy in the system.\nBy the stationary-action principle, then, we require \\(L\\) to remain at a critical point (max, min, saddle.) This fact allows us to calculate the equations of motion by hold \\(L\\) at such a point, and evolving the \\((T,V)\\) pair to remain at that point.\nThe notion of solving for optimal \\((T,V)\\), which will give us the equations of motion, is why Lagrangian multipliers were invented.\nNow, here\u0026rsquo;s a few results which help you deal with the Lagrangian.\nConservation of Momentum Note that momentum is always conserved.\nRecall that:\n\\begin{equation} F = m a = m \\dv{v}{t} = \\dv{mv}{t} \\end{equation}\nwhen \\(m\\) is constant, which it almost certainly is.\nRecall the definition of momentum:\n\\begin{equation} p := mv \\end{equation}\nTherefore, we have that:\n\\begin{equation} F = \\dv{p}{t} \\end{equation}\nGreat, now let\u0026rsquo;s recall what energy is:\n\\begin{equation} W = \\int F\\dd{x} \\end{equation}\nSubstituting our definitions of force:\n\\begin{equation} W = \\int \\dv{p}{t}\\dd{x} = \\int \\dd{p}\\dv{x}{t} = \\int v \\dd{p} \\end{equation}\n[something something something ask leonard]\nWe end up with:\n\\begin{equation} \\pdv{W}{v} = p \\end{equation}\nhow? IDK. But you then usually would use this by taking the derivative of the Lagrangian by velocity, then figuring it lingual to \\(0\\).\nBeam Theory We begin with this Euler-Lagrange expression:\nthese are a series of expressions derived to semiautomatically solve Largrangian expressions of expressions derived to semiautomatically solve Largrangian expressions: they are the pre-figured-out stationary-action principle \u0026ldquo;stationary points\u0026rdquo; with the least energy.\nWe want to create a Lagrangian of our system, and plug it in there.\nWe define the Lagrangian for this system to be\nRecall that the Lagrangian is defined by all kinetic energy sum minus all potential energy sum. Will investigate deeper later, but the first term is obviously the kinetic energy (1/2 mass-density velocity squared), then the subtracted potential energy term is the spring potential of the system (1/2 kx^2).\nThen there\u0026rsquo;s this third term. No idea.\nWe then try to plug stuff into that Euler-Lagrange expression. We can calculate for ourselves that:\nFinally, then:\nliterally\u0026hellip; the end. We just move stuff around and that\u0026rsquo;s literally it.\n","html":"\u003cp\u003eWant mechanics? No. You get energy.\u003c/p\u003e\n\u003cp\u003eFirst, recall the \u003ca href=\"/posts/kbhstationary_action_principle/\"\u003estationary-action principle\u003c/a\u003e. To define a system in \u003ca href=\"/posts/kbhlagrangian_mechanics/\"\u003eLagrangian Mechanics\u003c/a\u003e, we define a \u003ca href=\"/posts/kbhsmooth_function/\"\u003esmooth function\u003c/a\u003e \\(L\\), called the \u0026ldquo;Lagrangian\u0026rdquo;, and some configuration space (axis) \\(M\\).\u003c/p\u003e\n\u003cp\u003eBy convention, \\(L=T-V\\). \\(T\\) is the kinetic energy in the system, and \\(V\\) is the potential energy in the system.\u003c/p\u003e\n\u003cp\u003eBy the \u003ca href=\"/posts/kbhstationary_action_principle/\"\u003estationary-action principle\u003c/a\u003e, then, we require \\(L\\) to remain at a critical point (max, min, saddle.) This fact allows us to calculate the equations of motion by hold \\(L\\) at such a point, and evolving the \\((T,V)\\) pair to remain at that point.\u003c/p\u003e\n\u003cp\u003eThe notion of solving for optimal \\((T,V)\\), which will give us the equations of motion, is why Lagrangian multipliers were invented.\u003c/p\u003e\n\u003cp\u003eNow, here\u0026rsquo;s a few results which help you deal with the \u003ca href=\"/posts/kbhlagrangian_mechanics/\"\u003eLagrangian\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"conservation-of-momentum\"\u003eConservation of Momentum\u003c/h2\u003e\n\u003cp\u003eNote that momentum is always conserved.\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nF = m a = m \\dv{v}{t} = \\dv{mv}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhen \\(m\\) is constant, which it almost certainly is.\u003c/p\u003e\n\u003cp\u003eRecall the definition of momentum:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np := mv\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nF = \\dv{p}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGreat, now let\u0026rsquo;s recall what energy is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nW = \\int F\\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSubstituting our definitions of force:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nW = \\int \\dv{p}{t}\\dd{x} = \\int \\dd{p}\\dv{x}{t} = \\int v \\dd{p}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e[something something something ask leonard]\u003c/p\u003e\n\u003cp\u003eWe end up with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{W}{v} = p\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ehow? IDK. But you then usually would use this by taking the derivative of the \u003ca href=\"/posts/kbhlagrangian_mechanics/\"\u003eLagrangian\u003c/a\u003e by velocity, then figuring it lingual to \\(0\\).\u003c/p\u003e\n\u003ch2 id=\"beam-theory\"\u003eBeam Theory\u003c/h2\u003e\n\u003cp\u003eWe begin with this Euler-Lagrange expression:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-25_00-28-03_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ethese are a series of expressions derived to semiautomatically solve Largrangian expressions of expressions derived to semiautomatically solve Largrangian expressions: they are the pre-figured-out \u003ca href=\"/posts/kbhstationary_action_principle/\"\u003estationary-action principle\u003c/a\u003e \u0026ldquo;stationary points\u0026rdquo; with the least energy.\u003c/p\u003e\n\u003cp\u003eWe want to create a \u003ca href=\"/posts/kbhlagrangian_mechanics/\"\u003eLagrangian\u003c/a\u003e of our system, and plug it in there.\u003c/p\u003e\n\u003cp\u003eWe define the \u003ca href=\"/posts/kbhlagrangian_mechanics/\"\u003eLagrangian\u003c/a\u003e for this system to be\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-25_00-31-04_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eRecall that the \u003ca href=\"/posts/kbhlagrangian_mechanics/\"\u003eLagrangian\u003c/a\u003e is defined by all kinetic energy sum minus all potential energy sum. Will investigate deeper later, but the first term is obviously the kinetic energy (1/2 mass-density velocity squared), then the subtracted potential energy term is the spring potential of the system (1/2 kx^2).\u003c/p\u003e\n\u003cp\u003eThen there\u0026rsquo;s this third term. No idea.\u003c/p\u003e\n\u003cp\u003eWe then try to plug stuff into that Euler-Lagrange expression. We can calculate for ourselves that:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-25_00-33-06_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eFinally, then:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-25_00-33-15_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eliterally\u0026hellip; the end. We just move stuff around and that\u0026rsquo;s literally it.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-25_00-33-34_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhlagrangian_mechanics/","tags":null,"title":"Lagrangian Mechanics"},{"categories":null,"contents":"DOI: 10.3389/fcomp.2021.624694\nOne-Liner Proposed a large multimodal approach to embed auditory info + biomarkers for baseline classification.\nNovelty Developed a massively multimodal audio-to-embedding correlation system that maps audio to biomarker information collected (mood, memory, respiratory) and demonstrated its ability to discriminate cough results for COVID. (they were looking for AD; whoopsies)\nNotable Methods Developed a feature extraction model for AD detection named Open Voice Brain Model Collected a dataset on people coughing and correlated it with biomarkers Key Figs Figure 2 This is MULTI-MODAL as heck\nThis figure tells us the large network the came up with.\nTable 2 and 3 The descriminator tacked on the end of the network is transfer-trained to different tasks. It shows promising results for cough-to-COVID classification\nNew Concepts OVBM Lyu 2018 Notes Biomarker correlation Is biomarker data something that is commonly used as a feature extraction/benchmark tool?\n","html":"\u003cp\u003eDOI: 10.3389/fcomp.2021.624694\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eProposed a large multimodal approach to embed auditory info + biomarkers for baseline classification.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003eDeveloped a massively multimodal audio-to-embedding correlation system that maps audio to biomarker information collected (mood, memory, respiratory) and demonstrated its ability to discriminate cough results for COVID. (they were looking for AD; whoopsies)\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDeveloped a feature extraction model for AD detection named \u003ca href=\"/posts/kbhopen_voice_brain_model/\"\u003eOpen Voice Brain Model\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eCollected a dataset on people coughing and correlated it with biomarkers\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"figure-2\"\u003eFigure 2\u003c/h3\u003e\n\u003cp\u003eThis is \u003cstrong\u003e\u003cstrong\u003eMULTI-MODAL\u003c/strong\u003e\u003c/strong\u003e as heck\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-32-21_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure tells us the large network the came up with.\u003c/p\u003e\n\u003ch3 id=\"table-2-and-3\"\u003eTable 2 and 3\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-37-45_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe descriminator tacked on the end of the network is transfer-trained to different tasks. It shows promising results for cough-to-COVID classification\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhopen_voice_brain_model/\"\u003eOVBM\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlyu_2018/\"\u003eLyu 2018\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n\u003ch3 id=\"biomarker-correlation\"\u003eBiomarker correlation\u003c/h3\u003e\n\u003cp\u003eIs biomarker data something that is commonly used as a feature extraction/benchmark tool?\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-24-32_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhlaguarta_2021/","tags":["ntj"],"title":"Laguarta 2021"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhlambek_calculus/","tags":null,"title":"Lambek Calculus"},{"categories":null,"contents":"effability\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbheffability/\"\u003eeffability\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlanguage/","tags":null,"title":"language"},{"categories":null,"contents":"Definitions: Language Agents Agents that uses the language to act on behave of another person or group.\nTransitions Transition first from rule based learning to statistical learning Rise of semantic parsing: statistical models of parsing Then, moving from semantic parsing to large models\u0026mdash;putting decision making and language modeling into the same bubble Importance of LLMs They are simply better at understanding language inputs They can generate structured information (i.e. not just human language, JSONs, etc.) They can perform natural language \u0026ldquo;reasoning\u0026rdquo;\u0026mdash;not just generate (and natural language generation, abv)\n1+3 gives you chain of thought reasoning 1+2 gives CALM, SayCan, and other types of RL text parsing in order to do stuff with robotics all three gives ReAct ReAct Recover from incorrect thought and incorrect tools Allows human-in-the-loop alignment Major Flaw left-to-right one-pass decoding doesn\u0026rsquo;t allow alternate solutions bad properties regarding propagating hallucination search and planning had been explored a lot Tree of Thoughts Partial solution: Tree of Thoughts\nevaluate sub-paths to determine most optimal paths: think A* but with more proper heuristic bounding.\nBig idea: merge classic algorithmic ideas with decision making against LLMs\nProblem: agents are not robust at all https://github.com/ryoungj/ToolEmu\nKey New Challenges for Agents Evaluation Different from how previous NLP benchmarks: we are not worried about language modeling No longer boundaries between various fields Common goals:\nrealistic agents\u0026mdash;stop playing Atari games. reproducible systems measurability goals scalable models which are easy to use Web as an Interactive Environment agents on the web is both practical and scalable https://webshop-pnlp.github.io/ WebShop can actually transfer with no work to training on Amazon Mind2Web InterCode Formulation of agent decisions as POMDP in order to fully benchmark Markovian decisions:\nhttps://arxiv.org/abs/2306.14898\nAgent Development Agents development has no core framework\nproduction systems set of rules specificying a precondition + action when preconditinons are met, perform an action Big kitchen sink proposal: https://arxiv.org/abs/2309.02427\nTrust and safety Agents are much more powerful and dynamic\n","html":"\u003ch2 id=\"definitions-language-agents\"\u003eDefinitions: Language Agents\u003c/h2\u003e\n\u003cp\u003eAgents that uses the language to act on behave of another person or group.\u003c/p\u003e\n\u003ch2 id=\"transitions\"\u003eTransitions\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eTransition first from rule based learning to statistical learning\u003c/li\u003e\n\u003cli\u003eRise of semantic parsing: statistical models of parsing\u003c/li\u003e\n\u003cli\u003eThen, moving from semantic parsing to large models\u0026mdash;putting decision making and language modeling into the same bubble\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"importance-of-llms\"\u003eImportance of LLMs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eThey are simply better at understanding language inputs\u003c/li\u003e\n\u003cli\u003eThey can generate structured information (i.e. not just human language, JSONs, etc.)\u003c/li\u003e\n\u003cli\u003eThey can perform natural language \u0026ldquo;reasoning\u0026rdquo;\u0026mdash;not just generate\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e(and natural language generation, abv)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e1+3 gives you chain of thought reasoning\u003c/li\u003e\n\u003cli\u003e1+2 gives CALM, SayCan, and other types of RL text parsing in order to do stuff with robotics\u003c/li\u003e\n\u003cli\u003eall three gives ReAct\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"react\"\u003eReAct\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eRecover from incorrect thought and incorrect tools\u003c/li\u003e\n\u003cli\u003eAllows human-in-the-loop alignment\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"major-flaw\"\u003eMajor Flaw\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eleft-to-right one-pass decoding doesn\u0026rsquo;t allow alternate solutions\u003c/li\u003e\n\u003cli\u003ebad properties regarding propagating hallucination\u003c/li\u003e\n\u003cli\u003esearch and planning had been explored a lot\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"tree-of-thoughts\"\u003eTree of Thoughts\u003c/h3\u003e\n\u003cp\u003ePartial solution: \u003ca href=\"#tree-of-thoughts\"\u003eTree of Thoughts\u003c/a\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-11_11-22-35_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eevaluate sub-paths to determine most optimal paths: think A* but with more proper heuristic bounding.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eBig idea: merge classic algorithmic ideas with decision making against LLMs\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"problem-agents-are-not-robust-at-all\"\u003eProblem: agents are not robust at all\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://github.com/ryoungj/ToolEmu\"\u003ehttps://github.com/ryoungj/ToolEmu\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"key-new-challenges-for-agents\"\u003eKey New Challenges for Agents\u003c/h2\u003e\n\u003ch3 id=\"evaluation\"\u003eEvaluation\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eDifferent from how previous NLP benchmarks: we are \u003cstrong\u003enot\u003c/strong\u003e worried about language modeling\u003c/li\u003e\n\u003cli\u003eNo longer boundaries between various fields\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eCommon goals:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003erealistic agents\u0026mdash;stop playing Atari games.\u003c/li\u003e\n\u003cli\u003ereproducible systems\u003c/li\u003e\n\u003cli\u003emeasurability goals\u003c/li\u003e\n\u003cli\u003escalable models\u003c/li\u003e\n\u003cli\u003ewhich are easy to use\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"web-as-an-interactive-environment\"\u003eWeb as an Interactive Environment\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eagents on the web is both practical and scalable\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://webshop-pnlp.github.io/\"\u003ehttps://webshop-pnlp.github.io/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eWebShop can actually transfer with no work to training on Amazon\u003c/li\u003e\n\u003cli\u003eMind2Web\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"intercode\"\u003eInterCode\u003c/h4\u003e\n\u003cp\u003eFormulation of agent decisions as POMDP in order to fully benchmark Markovian decisions:\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://arxiv.org/abs/2306.14898\"\u003ehttps://arxiv.org/abs/2306.14898\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"agent-development\"\u003eAgent Development\u003c/h3\u003e\n\u003cp\u003eAgents development has no core framework\u003c/p\u003e\n\u003ch4 id=\"production-systems\"\u003eproduction systems\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eset of rules specificying a precondition + action\u003c/li\u003e\n\u003cli\u003ewhen preconditinons are met, perform an action\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eBig kitchen sink proposal: \u003ca href=\"https://arxiv.org/abs/2309.02427\"\u003ehttps://arxiv.org/abs/2309.02427\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"trust-and-safety\"\u003eTrust and safety\u003c/h3\u003e\n\u003cp\u003eAgents are much more powerful and dynamic\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlanguage_agents/","tags":null,"title":"Language Agents with Karthik"},{"categories":null,"contents":"What makes language modeling hard: resolving ambiguity is hard.\n\u0026ldquo;the chef made her duck\u0026rdquo;\nContents Basic Text Processing regex ELIZA tokenization and corpus Herdan\u0026rsquo;s Law text normalization tokenization + Subword Tokenization BPE Word Normalization lemmatization through morphological parsing only take stems from morphemes: porter stemmer sentence segmentation N-Grams Edit Distance DP costs \\(O(nm)\\), backtrace costs \\(O(n+m)\\).\nminimum edit distance weighted edit distance backtracing Ngrams N-Grams Markov Assumption Unigrams Backoff and Stupid Backoff Interpolation OOV Words Model Evaluation perplexity open vocabulary Text Classification Text Classification Bag of Words Naive Bayes Naive Bayes for Text Classification Binary Naive Bayes Lexicon Naive Bays Language Modeling Harmonic Mean Macroaverage and Microaverage Logistic Regression Generative Classifier vs Discriminate Classifier Logistic Regression Text Classification decision boundary cross entropy loss stochastic gradient descent Information Retrial Information Retrival Term-Document Matrix Inverted Index + postings list Boolean Retrieval positional index Ranked Information Retrial Ranked Information Retrieval feast or famine problem free text query score Jaccard Coefficient log-frequency weighting document frequency (\u0026quot;idf weight\u0026quot;) TF-IDF SMART notation vector-space model Vector Semantics sense principle of contrast word relatedness semantic field synonymy and antonyms affective meaning vector semantics transposing a Term-Document Matrix term-term matrix word2vec skip-gram with negative sampling POS and NER POS Tagging NER Tagging Dialogue Systems Dialogue Chatbot PARRY Recommender Systems Recommender System Dora Dora Neural Nets Neural Networks The Web Web Graph Social Network ","html":"\u003cp\u003eWhat makes language modeling hard: \u003cstrong\u003eresolving ambiguity is hard\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the chef made her duck\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"contents\"\u003eContents\u003c/h2\u003e\n\u003ch3 id=\"basic-text-processing\"\u003eBasic Text Processing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhregex/\"\u003eregex\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheliza/\"\u003eELIZA\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtokenization/\"\u003etokenization\u003c/a\u003e and \u003ca href=\"/posts/kbhcorpus/\"\u003ecorpus\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcorpus/#herdan-s-law\"\u003eHerdan\u0026rsquo;s Law\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtext_normalization/\"\u003etext normalization\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtokenization/\"\u003etokenization\u003c/a\u003e + \u003ca href=\"/posts/kbhtokenization/#subword-tokenization\"\u003eSubword Tokenization\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbpe/\"\u003eBPE\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhword_normalization/\"\u003eWord Normalization\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlemmatization/\"\u003elemmatization\u003c/a\u003e through \u003ca href=\"/posts/kbhmorphological_parsing/\"\u003emorphological parsing\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eonly take stems from morphemes: \u003ca href=\"/posts/kbhmorphological_parsing/#porter-stemmer\"\u003eporter stemmer\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsentence_segmentation/\"\u003esentence segmentation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhn_grams/\"\u003eN-Grams\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"edit-distance\"\u003eEdit Distance\u003c/h3\u003e\n\u003cp\u003eDP costs \\(O(nm)\\), backtrace costs \\(O(n+m)\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhminimum_edit_distance/\"\u003eminimum edit distance\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhweighted_edit_distance/\"\u003eweighted edit distance\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbacktracing/\"\u003ebacktracing\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"ngrams\"\u003eNgrams\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhn_grams/\"\u003eN-Grams\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhn_grams/#markov-assumption\"\u003eMarkov Assumption\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhn_grams/#unigrams\"\u003eUnigrams\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhn_grams/#backoff\"\u003eBackoff\u003c/a\u003e and \u003ca href=\"/posts/kbhn_grams/#stupid-backoff\"\u003eStupid Backoff\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhn_grams/#interpolation\"\u003eInterpolation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhn_grams/#oov-words\"\u003eOOV Words\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodel_evaluation/\"\u003eModel Evaluation\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhperplexity/\"\u003eperplexity\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhn_grams/#oov-words\"\u003eopen vocabulary\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"text-classification\"\u003eText Classification\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtext_classification/\"\u003eText Classification\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbag_of_words/\"\u003eBag of Words\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbag_of_words/#naive-bayes-for-text-classification\"\u003eNaive Bayes for Text Classification\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbag_of_words/#binary-naive-bayes\"\u003eBinary Naive Bayes\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlexicon/\"\u003eLexicon\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhn_grams/#naive-bays-language-modeling\"\u003eNaive Bays Language Modeling\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhharmonic_mean/\"\u003eHarmonic Mean\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmacroaverage/\"\u003eMacroaverage\u003c/a\u003e and \u003ca href=\"/posts/kbhmacroaverage/\"\u003eMicroaverage\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"logistic-regression\"\u003eLogistic Regression\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgenerative_vs_discriminitive_classifier/#generative-classifier\"\u003eGenerative Classifier\u003c/a\u003e vs \u003ca href=\"/posts/kbhgenerative_vs_discriminitive_classifier/#discriminative-classifier\"\u003eDiscriminate Classifier\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlogistic_regression/#logistic-regression-text-classification\"\u003eLogistic Regression Text Classification\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlogistic_regression/#logistic-regression-text-classification\"\u003edecision boundary\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcross_entropy_loss/\"\u003ecross entropy loss\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstochastic_gradient_descent/\"\u003estochastic gradient descent\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"information-retrial\"\u003eInformation Retrial\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinformation_retrival/\"\u003eInformation Retrival\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhterm_document_matrix/\"\u003eTerm-Document Matrix\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinverted_index/\"\u003eInverted Index\u003c/a\u003e + \u003ca href=\"/posts/kbhinverted_index/#postings-list\"\u003epostings list\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinverted_index/#boolean-retrieval\"\u003eBoolean Retrieval\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinverted_index/#positional-index\"\u003epositional index\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"ranked-information-retrial\"\u003eRanked Information Retrial\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/\"\u003eRanked Information Retrieval\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/#feast-or-famine-problem\"\u003efeast or famine problem\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/#free-text-query\"\u003efree text query\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/#score\"\u003escore\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/#jaccard-coefficient\"\u003eJaccard Coefficient\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/#log-frequency-weighting\"\u003elog-frequency weighting\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/#document-frequency\"\u003edocument frequency\u003c/a\u003e (\u0026quot;\u003ca href=\"/posts/kbhranked_information_retrieval/#document-frequency\"\u003eidf weight\u003c/a\u003e\u0026quot;)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/#tf-idf\"\u003eTF-IDF\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/#smart-notation\"\u003eSMART notation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhranked_information_retrieval/#vector-space-model\"\u003evector-space model\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"vector-semantics\"\u003eVector Semantics\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsense/\"\u003esense\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsense/#principle-of-contrast\"\u003eprinciple of contrast\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsense/#word-relatedness\"\u003eword relatedness\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsense/#semantic-field\"\u003esemantic field\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsense/#synonymy\"\u003esynonymy\u003c/a\u003e and \u003ca href=\"/posts/kbhsense/#antonyms\"\u003eantonyms\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsense/#affective-meaning\"\u003eaffective meaning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvector_semantics/\"\u003evector semantics\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvector_semantics/#transposing-a-id-b5d7f908-0351-436d-9784-180ab5aa0562-term-document-matrix\"\u003etransposing a Term-Document Matrix\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvector_semantics/#term-term-matrix\"\u003eterm-term matrix\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhword2vec/\"\u003eword2vec\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhword2vec/#skip-gram-with-negative-sampling\"\u003eskip-gram with negative sampling\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"pos-and-ner\"\u003ePOS and NER\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpos_tagging/\"\u003ePOS Tagging\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhner_tagging/\"\u003eNER Tagging\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"dialogue-systems\"\u003eDialogue Systems\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdialogue/\"\u003eDialogue\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhchatbot/\"\u003eChatbot\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhparry/\"\u003ePARRY\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"recommender-systems\"\u003eRecommender Systems\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrecommender_system/\"\u003eRecommender System\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"dora\"\u003eDora\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdora/\"\u003eDora\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"neural-nets\"\u003eNeural Nets\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhneural_networks/\"\u003eNeural Networks\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"the-web\"\u003eThe Web\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhweb_graph/\"\u003eWeb Graph\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsocial_network/\"\u003eSocial Network\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlanguage_information_index/","tags":null,"title":"Language Information Index"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhlaplae/","tags":null,"title":"laplae"},{"categories":null,"contents":"the\n","html":"\u003cp\u003ethe\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlaw_of_cosines/","tags":null,"title":"law of cosines"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhlaw_of_large_numbers/","tags":null,"title":"law of large numbers"},{"categories":null,"contents":"LOOCV is a cross validation method whereby the entire dataset bar one sample is used for training; then, validation is ran on one sample. This is repeated \\(N\\) times (with a fresh model and a fresh item left out) to get a distribution of one-shot validation results that is an approximately-normal curve centered around the mean validation result from many one-shot samples.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhloo/\"\u003eLOOCV\u003c/a\u003e is a cross validation method whereby the entire dataset bar one sample is used for training; then, validation is ran on one sample. This is repeated \\(N\\) times (with a fresh model and a fresh item left out) to get a distribution of one-shot validation results that is an approximately-normal curve centered around the mean validation result from many one-shot samples.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhloo/","tags":null,"title":"Leave-One-Out Cross Validation"},{"categories":null,"contents":"Reading notes :claim: Mccarthyism was a process that de-politicized America Since political activities could get you in trouble, prudent folk avoided them\nSocial conformaty became standard middle-class Americans became social conformists\nCommunism serves as a form of balance checking, which Mccathyism lost With their demise, the nation lost the institutional network that had created a public space where serious alternatives to the status quo could be presented.\nModerate-left was also diminished Moreover, with the disappearance of a vigorous movement on their left, moderate reform groups were more exposed to right-wing attacks and thus rendered less effective.\nMccarthyism also diminshed America\u0026rsquo;s liberal modernization Measures like national health insurance, a social reform embraced by the rest of the industrialized world, simply fell by the wayside.\nCold-war opposition became quelled by mccarthism Opposition to the cold war had been so thoroughly identified with communism that it was no longer possible to challenge the basic assumptions of American foreign policy without incurring suspicions of disloyalty\nThat there may have been more international collaboration if mccarthism was not done early on American policymakers feared to acknowledge the official existence of the People\u0026rsquo;s Republic of China until Richard Nixon, who was uniquely impervious to charges of being soft on communism, did so as president in 1971\nControvercial issues were avoided intellecturally and artistically Similarly, the blacklist contributed to the reluctance of the film industry to grapple with controversial social or political issues. In the intellectual world, cold war liberals also avoided controversy.\nThat \u0026ldquo;ideology\u0026rdquo; became irrelavent, pure pragmatism took hold They celebrated the \u0026ldquo;end of ideology,\u0026rdquo; claiming that the United States\u0026rsquo; uniquely pragmatic approach to politics made the problems that had once concerned left- wing ideologists irrelevant.\nState power became expanded federal agents attacked individual rights and extended state power into movie studios, universities, labor unions, and many other ostensibly independent institutions.\nThat Mccarthism produced a threat to demcrocy in itself McCarthyism alone did not cause these outrages; but the assault on democracy that began during the 1940s and 1950s with the collaboration of private institutions and public agencies in suppressing the alleged threat of domestic communism was an important early contribution.\n","html":"\u003ch2 id=\"reading-notes\"\u003eReading notes\u003c/h2\u003e\n\u003ch3 id=\"claim-mccarthyism-was-a-process-that-de-politicized-america\"\u003e:claim: Mccarthyism was a process that de-politicized America\u003c/h3\u003e\n\u003cp\u003eSince political activities could get you in trouble, prudent folk avoided them\u003c/p\u003e\n\u003ch3 id=\"social-conformaty-became-standard\"\u003eSocial conformaty became standard\u003c/h3\u003e\n\u003cp\u003emiddle-class Americans became social conformists\u003c/p\u003e\n\u003ch3 id=\"communism-serves-as-a-form-of-balance-checking-which-mccathyism-lost\"\u003eCommunism serves as a form of balance checking, which Mccathyism lost\u003c/h3\u003e\n\u003cp\u003eWith their demise, the nation lost the institutional network that had created a public space where serious alternatives to the status quo could be presented.\u003c/p\u003e\n\u003ch3 id=\"moderate-left-was-also-diminished\"\u003eModerate-left was also diminished\u003c/h3\u003e\n\u003cp\u003eMoreover, with the disappearance of a vigorous movement on their left, moderate reform groups were more exposed to right-wing attacks and thus rendered less effective.\u003c/p\u003e\n\u003ch3 id=\"mccarthyism-also-diminshed-america-s-liberal-modernization\"\u003eMccarthyism also diminshed America\u0026rsquo;s liberal modernization\u003c/h3\u003e\n\u003cp\u003eMeasures like national health insurance, a social reform embraced by the rest of the industrialized world, simply fell by the wayside.\u003c/p\u003e\n\u003ch3 id=\"cold-war-opposition-became-quelled-by-mccarthism\"\u003eCold-war opposition became quelled by mccarthism\u003c/h3\u003e\n\u003cp\u003eOpposition to the cold war had been so thoroughly identified with communism that it was no longer possible to challenge the basic assumptions of American foreign policy without incurring suspicions of disloyalty\u003c/p\u003e\n\u003ch3 id=\"that-there-may-have-been-more-international-collaboration-if-mccarthism-was-not-done-early-on\"\u003eThat there may have been more international collaboration if mccarthism was not done early on\u003c/h3\u003e\n\u003cp\u003eAmerican policymakers feared to acknowledge the official existence of the People\u0026rsquo;s Republic of China until Richard Nixon, who was uniquely impervious to charges of being soft on communism, did so as president in 1971\u003c/p\u003e\n\u003ch3 id=\"controvercial-issues-were-avoided-intellecturally-and-artistically\"\u003eControvercial issues were avoided intellecturally and artistically\u003c/h3\u003e\n\u003cp\u003eSimilarly, the blacklist contributed to the reluctance of the film industry to grapple with controversial social or political issues. In the intellectual world, cold war liberals also avoided controversy.\u003c/p\u003e\n\u003ch3 id=\"that-ideology-became-irrelavent-pure-pragmatism-took-hold\"\u003eThat \u0026ldquo;ideology\u0026rdquo; became irrelavent, pure pragmatism took hold\u003c/h3\u003e\n\u003cp\u003eThey celebrated the \u0026ldquo;end of ideology,\u0026rdquo; claiming that the United States\u0026rsquo; uniquely pragmatic approach to politics made the problems that had once concerned left- wing ideologists irrelevant.\u003c/p\u003e\n\u003ch3 id=\"state-power-became-expanded\"\u003eState power became expanded\u003c/h3\u003e\n\u003cp\u003efederal agents attacked individual rights and extended state power into movie studios, universities, labor unions, and many other ostensibly independent institutions.\u003c/p\u003e\n\u003ch3 id=\"that-mccarthism-produced-a-threat-to-demcrocy-in-itself\"\u003eThat Mccarthism produced a threat to demcrocy in itself\u003c/h3\u003e\n\u003cp\u003eMcCarthyism alone did not cause these outrages; but the assault on democracy that began during the 1940s and 1950s with the collaboration of private institutions and public agencies in suppressing the alleged threat of domestic communism was an important early contribution.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlegacy_of_mccarthyism/","tags":null,"title":"Legacy of McCarthyism"},{"categories":null,"contents":"lemmatization is the act of normalizing words into standard meaning irrespective of word variations in order to do a broader analysis\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhlemmatization/\"\u003elemmatization\u003c/a\u003e is the act of normalizing words into standard meaning irrespective of word variations in order to do a broader analysis\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlemmatization/","tags":null,"title":"lemmatization"},{"categories":null,"contents":"Any two basis of finite-dimensional vector space have the same length.\nconstituents A finite-dimensional vector space \\(V\\) Basis \\(B_1\\), \\(B_2\\) be bases in \\(V\\) requirements Given \\(B_1\\), \\(B_2\\) are basis in \\(V\\), we know that they are both linearly independent and spans \\(V\\). We have that the length of linearly-independent list \\(\\leq\\) length of spanning list.\nLet\u0026rsquo;s take first \\(B_1\\) as linearly independent and \\(B_2\\) as spanning:\nWe have then \\(len(B_1) \\leq len(B_2)\\)\nSwapping roles:\nWe have then \\(len(B_2) \\leq len(B_1)\\)\nAs both of this conditions are true, we have that \\(len(B_1)=len(B_{2})\\). \\(\\blacksquare\\)\n","html":"\u003cp\u003eAny two \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e have the same length.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e \\(V\\)\u003c/li\u003e\n\u003cli\u003eBasis \\(B_1\\), \\(B_2\\) be bases in \\(V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eGiven \\(B_1\\), \\(B_2\\) are \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e in \\(V\\), we know that they are both \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e and \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\). We have that the \u003ca href=\"/posts/kbhlinear_independence/#length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003elength of linearly-independent list \\(\\leq\\) length of spanning list\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s take first \\(B_1\\) as \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e and \\(B_2\\) as \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003eWe have then \\(len(B_1) \\leq len(B_2)\\)\u003c/p\u003e\n\u003cp\u003eSwapping roles:\u003c/p\u003e\n\u003cp\u003eWe have then \\(len(B_2) \\leq len(B_1)\\)\u003c/p\u003e\n\u003cp\u003eAs both of this conditions are true, we have that \\(len(B_1)=len(B_{2})\\). \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlength_of_basis_doesn_t_depend_on_basis/","tags":null,"title":"Length of Basis Doesn't Depend on Basis"},{"categories":null,"contents":"Autonomously driving is really hard. How do we integrate planning + learning in a close-loop style. We\u0026rsquo;ll start from the current belief, and construct a tree of all reachable belief state.\nRecall DESPOT.\nApproach learning (b): a neural network which maps the driving history into a policy and value planning (a): we will use the neural network\u0026rsquo;s derived policy and value to run MCTS execution (e): execute the actions in a simulator The data which is obtained using the simulator is used to train the neural network.\nlearning The learning component is a supervised policy where a CNN takes a situation and map\nplanning its a AR-DESPOT. We select actions by:\n\\begin{equation} a^{*} = \\arg\\max_{a \\in A} \\left\\{u(b,a) + c \\pi_{\\theta}(a|x_{b}) \\sqrt{ \\frac{N(b)}{N(b,a)+1}}\\right\\} \\end{equation}\nwhere \\(\\pi_{\\theta}\\) is our policy network.\nEvery time we encounter a new node, use the learned value function as a lower bound.\nNeeded less depth in the DESPOT than using it naively.\n","html":"\u003cp\u003eAutonomously driving is really hard. How do we integrate planning + learning in a close-loop style. We\u0026rsquo;ll start from the current belief, and construct a tree of all reachable belief state.\u003c/p\u003e\n\u003cp\u003eRecall \u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"approach\"\u003eApproach\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-15_09-48-34_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003elearning\u003c/strong\u003e (b): a neural network which maps the driving history into a policy and value\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eplanning\u003c/strong\u003e (a): we will use the neural network\u0026rsquo;s derived policy and value to run \u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003eMCTS\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eexecution\u003c/strong\u003e (e): execute the actions in a simulator\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThe data which is obtained using the simulator is used to train the neural network.\u003c/p\u003e\n\u003ch3 id=\"learning\"\u003elearning\u003c/h3\u003e\n\u003cp\u003eThe learning component is a supervised policy where a CNN takes a situation and map\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-15_09-50-19_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"planning\"\u003eplanning\u003c/h3\u003e\n\u003cp\u003eits a \u003ca href=\"/posts/kbhdespot/#anytime-despot\"\u003eAR-DESPOT\u003c/a\u003e. We select actions by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na^{*} = \\arg\\max_{a \\in A} \\left\\{u(b,a) + c \\pi_{\\theta}(a|x_{b}) \\sqrt{ \\frac{N(b)}{N(b,a)+1}}\\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\pi_{\\theta}\\) is our policy network.\u003c/p\u003e\n\u003cp\u003eEvery time we encounter a new node, use the learned value function as a lower bound.\u003c/p\u003e\n\u003cp\u003eNeeded less depth in the \u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e than using it naively.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhletsdrive/","tags":null,"title":"LetsDrive"},{"categories":null,"contents":"occasionally, you can\u0026rsquo;t really get a specific solution.\n\\begin{equation} \\dv{y}{t} = e^{t}\\cos y \\end{equation}\nafter doing the , you get:\n\\begin{equation} \\ln (\\sec y + \\tan y) - e^{t} = C \\end{equation}\nyou get sets of this function \\(F(t,y)\\) which shifts it up and down, by any constant C.\nBut at any given \\((t,y)\\), you get a slope \\(e^{t}\\cos y\\).\n","html":"\u003cp\u003eoccasionally, you can\u0026rsquo;t really get a specific solution.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{y}{t} = e^{t}\\cos y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eafter doing the , you get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ln (\\sec y + \\tan y) - e^{t} = C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou get sets of this function \\(F(t,y)\\) which shifts it up and down, by any constant C.\u003c/p\u003e\n\u003cp\u003eBut at any given \\((t,y)\\), you get a slope \\(e^{t}\\cos y\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlevel_set/","tags":null,"title":"level set"},{"categories":null,"contents":"The Lexicalization Hypothesis is a hypothesis proposed by Chomsky that states that syntactic transformations can only apply on syntatic constituents; therefore, the rules of putting words together is different from the rules that puts phrases together. This theory stands in opposition to generative semantics.\nThere are two versions of the Lexicalization Hypothesis:\nStrong Lexicalization Hypothesis The Strong Lexicalization Hypothesis states that both derivational words (changes meaning, bench=\u0026gt;benching) or inflectional words (changes grammar, eat=\u0026gt;eating) cannot be put together via syntatical rules. (Geeraerts 2009)\nWeak Lexicalization Hypothesis Weak Lexicalization Hypothesis states that semantic rules cannot work in the formation of derivational words only.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhlexicalization_hypothesis/\"\u003eLexicalization Hypothesis\u003c/a\u003e is a hypothesis proposed by \u003ca href=\"/posts/kbhchomsky/\"\u003eChomsky\u003c/a\u003e that states that syntactic transformations can only apply on \u003ca href=\"\"\u003esyntatic constituents\u003c/a\u003e; therefore, the rules of putting words together is \u003cem\u003edifferent\u003c/em\u003e from the rules that puts phrases together. This theory stands in opposition to \u003ca href=\"/posts/kbhgenerative_semantics/\"\u003egenerative semantics\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThere are two versions of the \u003ca href=\"/posts/kbhlexicalization_hypothesis/\"\u003eLexicalization Hypothesis\u003c/a\u003e:\u003c/p\u003e\n\u003ch2 id=\"strong-lexicalization-hypothesis--kbhlexicalization-hypothesis-dot-md\"\u003eStrong \u003ca href=\"/posts/kbhlexicalization_hypothesis/\"\u003eLexicalization Hypothesis\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#strong-lexicalization-hypothesis--kbhlexicalization-hypothesis-dot-md\"\u003eStrong Lexicalization Hypothesis\u003c/a\u003e states that \u003cem\u003eboth\u003c/em\u003e \u003ca href=\"/posts/kbhderivational_words/\"\u003ederivational words\u003c/a\u003e (changes meaning, bench=\u0026gt;benching) or \u003ca href=\"/posts/kbhinflectional_words/\"\u003einflectional words\u003c/a\u003e (changes grammar, eat=\u0026gt;eating) cannot be put together via syntatical rules. (\u003ca href=\"#citeproc_bib_item_1\"\u003eGeeraerts 2009\u003c/a\u003e)\u003c/p\u003e\n\u003ch2 id=\"weak-lexicalization-hypothesis--kbhlexicalization-hypothesis-dot-md\"\u003eWeak \u003ca href=\"/posts/kbhlexicalization_hypothesis/\"\u003eLexicalization Hypothesis\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#weak-lexicalization-hypothesis--kbhlexicalization-hypothesis-dot-md\"\u003eWeak Lexicalization Hypothesis\u003c/a\u003e states that semantic rules cannot work in the formation of \u003ca href=\"/posts/kbhderivational_words/\"\u003ederivational words\u003c/a\u003e only.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlexicalization_hypothesis/","tags":null,"title":"Lexicalization Hypothesis"},{"categories":null,"contents":"Lexicon are pre-labeled datasets which pre-organize words into features. They are useful when training data is sparse.\nInstead of doing word counts, we compute each feature based on teh the token\u0026rsquo;s assigned label in the lexicon.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhlexicon/\"\u003eLexicon\u003c/a\u003e are pre-labeled datasets which pre-organize words into features. They are useful when training data is sparse.\u003c/p\u003e\n\u003cp\u003eInstead of doing word counts, we compute each feature based on teh the token\u0026rsquo;s assigned label in the lexicon.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlexicon/","tags":null,"title":"Lexicon"},{"categories":null,"contents":" Poster-modern search for individualism ","html":"\u003cul\u003e\n\u003cli\u003ePoster-modern search for individualism\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhliberal_center/","tags":null,"title":"Liberal Center"},{"categories":null,"contents":"likelyhood is the PDF, PMF, or joint probability distribution\u0026mdash;which ever distribution\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhlikelyhood/\"\u003elikelyhood\u003c/a\u003e is the \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003e, \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003e, or \u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e\u0026mdash;which ever distribution\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlikelyhood/","tags":null,"title":"likelyhood"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhlina/","tags":null,"title":"lina"},{"categories":null,"contents":"DOI: 10.3389/fnagi.2021.642033\nOne-Liner Proposed cross-linguistic markers shared for AD patients between English and French; evaluated features found with standard ML.\nNovelty Multi-lingual, cross-linguistic analysis.\nNotable Methods Looked at common patters between the two languages Linguistic results scored by IUs on CTP task Key Figs Figure 1 This figure tells us the various approaches measured.\nTable 2 Here\u0026rsquo;s a list of semantic features extracted\nTable 3 Here\u0026rsquo;s a list of NLP features extracted. Bolded items represent P \u0026lt;0.001 correlation for AD/NonAD difference between English and French.\nSame thing but semantic features\nsame thing but acoustic features. As we can see, acoustic features didn\u0026rsquo;t do much.\nNew Concepts CTP IU Notes ","html":"\u003cp\u003eDOI: 10.3389/fnagi.2021.642033\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eProposed cross-linguistic markers shared for AD patients between English and French; evaluated features found with standard ML.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003eMulti-lingual, cross-linguistic analysis.\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eLooked at common patters between the two languages\u003c/li\u003e\n\u003cli\u003eLinguistic results scored by \u003ca href=\"/posts/kbhiu/\"\u003eIU\u003c/a\u003es on \u003ca href=\"/posts/kbhctp/\"\u003eCTP\u003c/a\u003e task\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"figure-1\"\u003eFigure 1\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_23-26-39_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure tells us the various approaches measured.\u003c/p\u003e\n\u003ch3 id=\"table-2\"\u003eTable 2\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_23-31-43_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eHere\u0026rsquo;s a list of semantic features extracted\u003c/p\u003e\n\u003ch3 id=\"table-3\"\u003eTable 3\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_23-32-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eHere\u0026rsquo;s a list of NLP features extracted. Bolded items represent P \u0026lt;0.001 correlation for AD/NonAD difference between English and French.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_23-33-26_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSame thing but semantic features\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_23-33-35_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003esame thing but acoustic features. As we can see, acoustic features didn\u0026rsquo;t do much.\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhctp/\"\u003eCTP\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhiu/\"\u003eIU\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlindsay_2021/","tags":["ntj"],"title":"Lindsay 2021"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhlinear_algea/","tags":null,"title":"linear algea"},{"categories":null,"contents":"Gaussian Elimination Quiz Demonstrate that matrices\u0026rsquo; multiplication are not commutative (error: didn\u0026rsquo;t consider \\(m\\times m\\)) Which \\(2\\times 2\\) matrices under multiplication form a group? (error: closure need to proved on invertable matrices under multiplication, not just \\(2\\times 2\\)) Deriving Rotation matrices (error: clockwise vs counter-clockwise) Linear Independence Quiz Connection between linear independence and systems equations (error: beated around the bush) \u0026mdash; the matrix of an nxn system of equations has a solution if the matrix\u0026rsquo;s column vectors is linearly independent Basis and Dimension Quiz put 0 into a basis AAAA not lin. indep; figure out what the basis for a polynomial with a certain root is: it is probably of dimension m (instead of m+1), because scalars doesn\u0026rsquo;t work in the case of p(3)=0; so basis is just the scalars missing some inequality about basis? \u0026mdash; its just that lin.idp sets is shorter or equal to basis and spanning sets is longer or equal to basis Final, part 1 definition of vector space: scalar multiplication is not an operation straight forgot \\(dim(U+V) = dim U + dim V - dim (U\\cap V)\\) plane containing \\((1,0,2)\\) and \\((3,-1,1)\\): math mistake proof: det A det B = det AB Final, part 2 Counterproof: If \\(v_1 \\dots v_4\\) is a basis of \\(V\\), and \\(U\\) is a subspace of \\(V\\) with \\(v_1, v_2 \\in U\\) and \\(v_3, v_4\\) not in \\(U\\), \\(v_1, v_2\\) is a basis of \\(U\\) Counterproof: if \\(T \\in \\mathcal{L}(V,V)\\) and \\(T^{2}=0\\), then \\(T=0\\) Counterproof: if \\(s,t \\in \\mathcal{L}(V,V)\\), and \\(ST=0\\), then \\(null\\ s\\) is contained in \\(range\\ T\\) Product Spaces Quiz Prove that \\(\\mathcal{L}(V_1 \\times V_2 \\times \\dots \\times V_{m}, W)\\) and \\(\\mathcal{L}(V_1, W) \\times \\dots \\times \\mathcal{L}(V_{m}, W)\\) are isomorphic\nerror: didn\u0026rsquo;t do it\nQuotient Spaces Quiz Couldn\u0026rsquo;t prove that the list in linearly independent: the linear combinations is some \\(c_1v_1 + \\dots c_{m}v_{m} + U\\); as \\(v_1 \\dots v_{m}\\) is a basis of \\(V / U\\), \\(c_1 \\dots c_{m} = 0\\), now the second part is also a basis so they are \\(0\\) too. The spanning proof: \\(v + U =\\) , rewrite as basis, etc. ","html":"\u003ch2 id=\"gaussian-elimination-quiz\"\u003eGaussian Elimination Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDemonstrate that matrices\u0026rsquo; multiplication are not commutative (error: didn\u0026rsquo;t consider \\(m\\times m\\))\u003c/li\u003e\n\u003cli\u003eWhich \\(2\\times 2\\) matrices under multiplication form a group? (error: closure need to proved on \u003cstrong\u003einvertable\u003c/strong\u003e matrices under multiplication, not just \\(2\\times 2\\))\u003c/li\u003e\n\u003cli\u003eDeriving Rotation matrices (error: clockwise vs counter-clockwise)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"linear-independence-quiz\"\u003eLinear Independence Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eConnection between linear independence and systems equations (error: beated around the bush) \u0026mdash; the matrix of an nxn system of equations has a solution if the matrix\u0026rsquo;s column vectors is linearly independent\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"basis-and-dimension-quiz\"\u003eBasis and Dimension Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eput 0 into a basis AAAA not lin. indep; figure out what the basis for a polynomial with a certain root is: it is probably of dimension m (instead of m+1), because scalars doesn\u0026rsquo;t work in the case of p(3)=0; so basis is just the scalars\u003c/li\u003e\n\u003cli\u003emissing some inequality about basis? \u0026mdash; its just that lin.idp sets is shorter or equal to basis and spanning sets is longer or equal to basis\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"final-part-1\"\u003eFinal, part 1\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003edefinition of vector space: scalar multiplication is not an operation\u003c/li\u003e\n\u003cli\u003estraight forgot \\(dim(U+V) = dim U + dim V - dim (U\\cap V)\\)\u003c/li\u003e\n\u003cli\u003eplane containing \\((1,0,2)\\) and \\((3,-1,1)\\): math mistake\u003c/li\u003e\n\u003cli\u003eproof: det A det B = det AB\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"final-part-2\"\u003eFinal, part 2\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eCounterproof: If \\(v_1 \\dots v_4\\) is a basis of \\(V\\), and \\(U\\) is a subspace of \\(V\\) with \\(v_1, v_2 \\in U\\) and \\(v_3, v_4\\) not in \\(U\\), \\(v_1, v_2\\) is a basis of \\(U\\)\u003c/li\u003e\n\u003cli\u003eCounterproof: if \\(T \\in \\mathcal{L}(V,V)\\) and \\(T^{2}=0\\), then \\(T=0\\)\u003c/li\u003e\n\u003cli\u003eCounterproof: if \\(s,t \\in \\mathcal{L}(V,V)\\), and \\(ST=0\\), then \\(null\\ s\\) is contained in \\(range\\ T\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003ch2 id=\"product-spaces-quiz\"\u003eProduct Spaces Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eProve that \\(\\mathcal{L}(V_1 \\times V_2 \\times \\dots \\times V_{m}, W)\\) and \\(\\mathcal{L}(V_1, W) \\times \\dots \\times \\mathcal{L}(V_{m}, W)\\) are \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eerror: didn\u0026rsquo;t do it\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"quotient-spaces-quiz\"\u003eQuotient Spaces Quiz\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-02-09_10-24-09_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCouldn\u0026rsquo;t prove that the list in linearly independent: the linear combinations is some \\(c_1v_1 + \\dots c_{m}v_{m} + U\\); as \\(v_1 \\dots v_{m}\\) is a basis of \\(V / U\\), \\(c_1 \\dots c_{m} = 0\\), now the second part is also a basis so they are \\(0\\) too.\u003c/li\u003e\n\u003cli\u003eThe spanning proof: \\(v + U =\\) , rewrite as basis, etc.\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_algebra_errors-1/","tags":null,"title":"Linear Algebra Errors"},{"categories":null,"contents":"Gaussian Elimination Quiz Demonstrate that matrices\u0026rsquo; multiplication are not commutative (error: didn\u0026rsquo;t consider \\(m\\times m\\)) Which \\(2\\times 2\\) matrices under multiplication form a group? (error: closure need to proved on invertable matrices under multiplication, not just \\(2\\times 2\\)) Deriving Rotation matrices (error: clockwise vs counter-clockwise) Linear Independence Quiz Connection between linear independence and systems equations (error: beated around the bush) \u0026mdash; the matrix of an nxn system of equations has a solution if the matrix\u0026rsquo;s column vectors is linearly independent Basis and Dimension Quiz put 0 into a basis AAAA not lin. indep; figure out what the basis for a polynomial with a certain root is: it is probably of dimension m (instead of m+1), because scalars doesn\u0026rsquo;t work in the case of p(3)=0; so basis is just the scalars missing some inequality about basis? \u0026mdash; its just that lin.idp sets is shorter or equal to basis and spanning sets is longer or equal to basis Final, part 1 definition of vector space: scalar multiplication is not an operation straight forgot \\(dim(U+V) = dim U + dim V - dim (U\\cap V)\\) plane containing \\((1,0,2)\\) and \\((3,-1,1)\\): math mistake proof: det A det B = det AB Final, part 2 Counterproof: If \\(v_1 \\dots v_4\\) is a basis of \\(V\\), and \\(U\\) is a subspace of \\(V\\) with \\(v_1, v_2 \\in U\\) and \\(v_3, v_4\\) not in \\(U\\), \\(v_1, v_2\\) is a basis of \\(U\\) Counterproof: if \\(T \\in \\mathcal{L}(V,V)\\) and \\(T^{2}=0\\), then \\(T=0\\) Counterproof: if \\(s,t \\in \\mathcal{L}(V,V)\\), and \\(ST=0\\), then \\(null\\ s\\) is contained in \\(range\\ T\\) Product Spaces Quiz Need more specific description: explain why we use product and quotient to describe product and quotient spaces? Prove that \\(\\mathcal{L}(V_1 \\times V_2 \\times \\dots \\times V_{m}, W)\\) and \\(\\mathcal{L}(V_1, W) \\times \\dots \\times \\mathcal{L}(V_{m}, W)\\) are isomorphic. Error: didn\u0026rsquo;t do it correctly for infinite dimensional Quotient Spaces Quiz Couldn\u0026rsquo;t prove that the list in linearly independent: the linear combinations is some \\(c_1v_1 + \\dots c_{m}v_{m} + U\\); as \\(v_1 \\dots v_{m}\\) is a basis of \\(V / U\\), \\(c_1 \\dots c_{m} = 0\\), now the second part is also a basis so they are \\(0\\) too. The spanning proof: \\(v + U =\\) , rewrite as basis, etc. she graded wrong: what\u0026rsquo;s the importance of \\(\\widetilde{T}\\)? Give two statements equivalent to \\(v+U = w+U\\), prove equivalence betewen this statement and the others didn\u0026rsquo;t prove both directions! Polynomials Quiz state the fundamental theorem of algebra; error: \\(\\mathcal{P}_{m}(\\mathbb{F})\\) is a vector space of polynomials with degree at most \\(m\\), and yet the FtOA requires exactly \\(m\\) Upper Triangular Quiz upper-triangular representation is findable when the space is 1) under complexes and 2) for finite-dimensional vector spaces; need BOTH conditions Upper Triangular Quiz UNCLEAR: Geometric Multipliicty is bounded by Algebric Multiplicity; Algebraic multiplicity (\u0026ldquo;real estate\u0026rdquo; taken on the upper-triangular diagonal) v. geometric multiplicity (amount of linearly independent eigenvectors included with that eigenvalue); so if geometric multiplicity \u0026lt; algebraic multiplicity, the map is not diagonalizable because its not bringing enough linearly independent eigenvectors Diagonalization Quiz enough eigenvalues go in only one direction: it existing means its diagonalizable, but the opposite isn\u0026rsquo;t true the proof for \\(T\\) is diagonalizable IFF the matrix \\(T\\) is similar to a diagonal matrix: NUS-MATH530 Similar to Diagonal Final, part 1 State the complex spectral theorem (error: the condition of normality is a PARALLEL result) Final, Part 2 Said this was true, but its not; \\(null\\ T \\bigoplus range\\ T = V\\), \\(T\\) is diagonalizable; Said this was true, but its false \\(T^{2}= 0\\) IFF \\(null\\ T = range\\ T\\) suppose \\(T=0\\), \\(T^{2} = 0\\). \\(null\\ T = V\\), \\(range\\ T = 0\\). Spectral theorem doesn\u0026rsquo;t define diagonalizability, it defines diagonalibility for ORTHONORMAL missing derivation of the pseudoinverse ","html":"\u003ch2 id=\"gaussian-elimination-quiz\"\u003eGaussian Elimination Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDemonstrate that matrices\u0026rsquo; multiplication are not commutative (error: didn\u0026rsquo;t consider \\(m\\times m\\))\u003c/li\u003e\n\u003cli\u003eWhich \\(2\\times 2\\) matrices under multiplication form a group? (error: closure need to proved on \u003cstrong\u003einvertable\u003c/strong\u003e matrices under multiplication, not just \\(2\\times 2\\))\u003c/li\u003e\n\u003cli\u003eDeriving Rotation matrices (error: clockwise vs counter-clockwise)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"linear-independence-quiz\"\u003eLinear Independence Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eConnection between linear independence and systems equations (error: beated around the bush) \u0026mdash; the matrix of an nxn system of equations has a solution if the matrix\u0026rsquo;s column vectors is linearly independent\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"basis-and-dimension-quiz\"\u003eBasis and Dimension Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eput 0 into a basis AAAA not lin. indep; figure out what the basis for a polynomial with a certain root is: it is probably of dimension m (instead of m+1), because scalars doesn\u0026rsquo;t work in the case of p(3)=0; so basis is just the scalars\u003c/li\u003e\n\u003cli\u003emissing some inequality about basis? \u0026mdash; its just that lin.idp sets is shorter or equal to basis and spanning sets is longer or equal to basis\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"final-part-1\"\u003eFinal, part 1\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003edefinition of vector space: scalar multiplication is not an operation\u003c/li\u003e\n\u003cli\u003estraight forgot \\(dim(U+V) = dim U + dim V - dim (U\\cap V)\\)\u003c/li\u003e\n\u003cli\u003eplane containing \\((1,0,2)\\) and \\((3,-1,1)\\): math mistake\u003c/li\u003e\n\u003cli\u003eproof: det A det B = det AB\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"final-part-2\"\u003eFinal, part 2\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eCounterproof: If \\(v_1 \\dots v_4\\) is a basis of \\(V\\), and \\(U\\) is a subspace of \\(V\\) with \\(v_1, v_2 \\in U\\) and \\(v_3, v_4\\) not in \\(U\\), \\(v_1, v_2\\) is a basis of \\(U\\)\u003c/li\u003e\n\u003cli\u003eCounterproof: if \\(T \\in \\mathcal{L}(V,V)\\) and \\(T^{2}=0\\), then \\(T=0\\)\u003c/li\u003e\n\u003cli\u003eCounterproof: if \\(s,t \\in \\mathcal{L}(V,V)\\), and \\(ST=0\\), then \\(null\\ s\\) is contained in \\(range\\ T\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003ch2 id=\"product-spaces-quiz\"\u003eProduct Spaces Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eNeed more specific description: explain why we use product and quotient to describe product and quotient spaces?\u003c/li\u003e\n\u003cli\u003eProve that \\(\\mathcal{L}(V_1 \\times V_2 \\times \\dots \\times V_{m}, W)\\) and \\(\\mathcal{L}(V_1, W) \\times \\dots \\times \\mathcal{L}(V_{m}, W)\\) are \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e. Error: didn\u0026rsquo;t do it correctly for infinite dimensional\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"quotient-spaces-quiz\"\u003eQuotient Spaces Quiz\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-02-09_10-24-09_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003eCouldn\u0026rsquo;t prove that the list in linearly independent: the linear combinations is some \\(c_1v_1 + \\dots c_{m}v_{m} + U\\); as \\(v_1 \\dots v_{m}\\) is a basis of \\(V / U\\), \\(c_1 \\dots c_{m} = 0\\), now the second part is also a basis so they are \\(0\\) too.\n\u003cul\u003e\n\u003cli\u003eThe spanning proof: \\(v + U =\\) , rewrite as basis, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eshe graded wrong: what\u0026rsquo;s the importance of \\(\\widetilde{T}\\)?\u003c/li\u003e\n\u003cli\u003eGive two statements equivalent to \\(v+U = w+U\\), prove equivalence betewen this statement and the others\n\u003cul\u003e\n\u003cli\u003edidn\u0026rsquo;t prove both directions!\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"polynomials-quiz\"\u003ePolynomials Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003estate the fundamental theorem of algebra; error: \\(\\mathcal{P}_{m}(\\mathbb{F})\\) is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e of \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003es with degree \u003cem\u003eat most \\(m\\)\u003c/em\u003e, and yet the FtOA requires \u003cem\u003eexactly \\(m\\)\u003c/em\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"upper-triangular-quiz\"\u003eUpper Triangular Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eupper-triangular representation is findable when the space is 1) under complexes and 2) for finite-dimensional vector spaces; need BOTH conditions\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"upper-triangular-quiz\"\u003eUpper Triangular Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eUNCLEAR: Geometric Multipliicty is bounded by Algebric Multiplicity; Algebraic multiplicity (\u0026ldquo;real estate\u0026rdquo; taken on the upper-triangular diagonal) v. geometric multiplicity (amount of linearly independent eigenvectors included with that eigenvalue); so if geometric multiplicity \u0026lt; algebraic multiplicity, the map is not diagonalizable because its not bringing enough linearly independent eigenvectors\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"diagonalization-quiz\"\u003eDiagonalization Quiz\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eenough eigenvalues go in only one direction: it existing means its diagonalizable, but the opposite isn\u0026rsquo;t true\u003c/li\u003e\n\u003cli\u003ethe proof for \\(T\\) is diagonalizable IFF the matrix \\(T\\) is similar to a diagonal matrix: \u003ca href=\"/posts/kbhnus_math530_similar_to_diagonal/\"\u003eNUS-MATH530 Similar to Diagonal\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"final-part-1\"\u003eFinal, part 1\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eState the complex spectral theorem (error: the condition of normality is a PARALLEL result)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"final-part-2\"\u003eFinal, Part 2\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eSaid this was true, but its not; \\(null\\ T \\bigoplus range\\ T = V\\), \\(T\\) is diagonalizable;\u003c/li\u003e\n\u003cli\u003eSaid this was true, but its false \\(T^{2}= 0\\) IFF \\(null\\ T = range\\ T\\)\nsuppose \\(T=0\\), \\(T^{2} = 0\\). \\(null\\ T = V\\), \\(range\\ T = 0\\).\u003c/li\u003e\n\u003cli\u003eSpectral theorem doesn\u0026rsquo;t define diagonalizability, it defines diagonalibility for ORTHONORMAL\u003c/li\u003e\n\u003cli\u003emissing derivation of the pseudoinverse\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_algebra_errors/","tags":null,"title":"Linear Algebra Errors"},{"categories":null,"contents":"The bible stays the same: (Axler 1997)\nWe will be less exploratory, Axler will pretty much tell us. However, we should try to say stuff in the class every single class period.\nThere is a ban on numbers over 4 on this class.\nBest Practices Ask questions Talk to each other Make mistakes From Riley: know the Proof Design Patterns Non-Axler but Important Things we explicitly are told to know, but is not immediately in Axler. You bet you determinants are going to be here.\ngroup matricies dot product and cross product solving systems binary operation modular arithmetic quotient group strong induction algebreic multiplicity geometric multplicity matrix adjectives singular value decomposition 1 Axler 1.A Axler 1.B Axler 1.C 2 Axler 2.A Axler 2.B Axler 2.C 3 Axler 3.A Axler 3.B Axler 3.C Axler 3.D Axler 3.E Axler 3.F 4 Thoughts on Axler 4\n5 Axler 5.A Axler 5.B Axler 5.C 6 Axler 6.A Axler 6.B 7 Axler 7.A Misc Knowledge algebra vector integer additive identity taxicab norm Axler, Sheldon. 1997. Linear Algebra Done Right. Undergraduate Texts in Mathematics. Springer New York. doi:10.1007/b97662. ","html":"\u003cp\u003eThe bible stays the same: (\u003ca href=\"#citeproc_bib_item_1\"\u003eAxler 1997\u003c/a\u003e)\u003c/p\u003e\n\u003cp\u003eWe will be less exploratory, Axler will pretty much tell us. However, we should try to say stuff in the class every single class period.\u003c/p\u003e\n\u003cp\u003eThere is a ban on \u003ca href=\"/posts/kbhnumber/\"\u003enumber\u003c/a\u003es over 4 on this class.\u003c/p\u003e\n\u003ch2 id=\"best-practices\"\u003eBest Practices\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eAsk questions\u003c/li\u003e\n\u003cli\u003eTalk to each other\u003c/li\u003e\n\u003cli\u003eMake mistakes\u003c/li\u003e\n\u003cli\u003eFrom Riley: know the \u003ca href=\"/posts/kbhproof_design_patterns/\"\u003eProof Design Patterns\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"non-axler-but-important\"\u003eNon-Axler but Important\u003c/h2\u003e\n\u003cp\u003eThings we explicitly are told to know, but is not immediately in Axler. You bet you determinants are going to be here.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e and \u003ca href=\"/posts/kbhcross_product/\"\u003ecross product\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsolving_systems/\"\u003esolving systems\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_operation/\"\u003ebinary operation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodular_arithmetic/\"\u003emodular arithmetic\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhquotient_group/\"\u003equotient group\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstrong_induction/\"\u003estrong induction\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhalgebreic_multiplicity/\"\u003ealgebreic multiplicity\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgeometric_multplicity/\"\u003egeometric multplicity\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatrix_adjectives/\"\u003ematrix adjectives\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsingular_value_decomposition/\"\u003esingular value decomposition\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"1\"\u003e1\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_a/\"\u003eAxler 1.A\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_1_b/\"\u003eAxler 1.B\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_1_c/\"\u003eAxler 1.C\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"2\"\u003e2\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_2_a/\"\u003eAxler 2.A\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_2_b/\"\u003eAxler 2.B\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_2_c/\"\u003eAxler 2.C\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"3\"\u003e3\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_3_a/\"\u003eAxler 3.A\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_3_b/\"\u003eAxler 3.B\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_3_c/\"\u003eAxler 3.C\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_3_d/\"\u003eAxler 3.D\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_3_e/\"\u003eAxler 3.E\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_3_f/\"\u003eAxler 3.F\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"4\"\u003e4\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhthoughts_on_axler_4/\"\u003eThoughts on Axler 4\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"5\"\u003e5\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_5_a/\"\u003eAxler 5.A\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_5_b/\"\u003eAxler 5.B\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_5_c/\"\u003eAxler 5.C\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"6\"\u003e6\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_6_a/\"\u003eAxler 6.A\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_6_b/\"\u003eAxler 6.B\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"7\"\u003e7\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhaxler_7_a/\"\u003eAxler 7.A\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"misc-knowledge\"\u003eMisc Knowledge\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhalgebra/\"\u003ealgebra\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinteger/\"\u003einteger\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtaxicab_norm/\"\u003etaxicab norm\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cstyle\u003e.csl-entry{text-indent: -1.5em; margin-left: 1.5em;}\u003c/style\u003e\u003cdiv class=\"csl-bib-body\"\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_1\"\u003e\u003c/a\u003eAxler, Sheldon. 1997. \u003ci\u003eLinear Algebra Done Right\u003c/i\u003e. Undergraduate Texts in Mathematics. Springer New York. doi:\u003ca href=\"https://doi.org/10.1007/b97662\"\u003e10.1007/b97662\u003c/a\u003e.\u003c/div\u003e\n\u003c/div\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_algebra_index/","tags":["index"],"title":"Linear Algebra Index"},{"categories":null,"contents":"A Linear Combination of vectors is a\u0026hellip; guess what? Any vector formed by a combination of vectors at arbitrary scales.\nconstituents A list of vectors \\(v_1, \\dots,v_{m}\\) Scalars \\(a_1, \\dots, v_{m} \\in \\mathbb{F}\\) requirements A Linear Combination is defined formally by:\n\\begin{equation} v = a_1v_1+\\dots+a_{m}v_{m} \\end{equation}\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhlinear_combination/\"\u003eLinear Combination\u003c/a\u003e of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es is a\u0026hellip; guess what? Any vector formed by a combination of vectors at arbitrary scales.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA list of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es \\(v_1, \\dots,v_{m}\\)\u003c/li\u003e\n\u003cli\u003eScalars \\(a_1, \\dots, v_{m} \\in \\mathbb{F}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"/posts/kbhlinear_combination/\"\u003eLinear Combination\u003c/a\u003e is defined formally by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = a_1v_1+\\dots+a_{m}v_{m}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_combination/","tags":null,"title":"linear combination"},{"categories":null,"contents":"Here it is:\n\\begin{equation} a\\frac{dy}{dx} + by = c \\end{equation}\nFor some constants \\(a,b,c\\). The name is pretty obvious, because we have constants and the highest power on everything is \\(1\\). Its first-order because the derivative is only the first-order derivative.\nlinear (diffeq) We technically call it \u0026ldquo;linear\u0026rdquo; because: if there are two possible solutions \\(y_1(x)\\) \\(y_2(x)\\), a linear combination \\(Ay_1(x)+By_2(x)\\) should also be a solution. Its \u0026ldquo;linear\u0026rdquo; because linear combinations work.\nsolving separable differential equations A separable differential equation means that we can separate the derivative by itself and separate its two components. For the example above, we have that:\n\\begin{equation} \\frac{dy}{dx} = \\frac{c-by}{a} \\end{equation}\nWe can naturally separate this:\n\\begin{equation} \\frac{a}{c-by}dy = dx \\end{equation}\nAnd then we can finally take the integral on both sides:\n\\begin{equation} \\int \\frac{a}{c-by}dy = \\int dx \\end{equation}\nWait wait wait but why is this possible? Why is it that we can separate a \\(\\frac{dy}{dx}\\) such that \\(dy\\) and \\(dx\\) is isolatable? Remember:\n\\begin{equation} \\frac{dy}{dx} = \\lim_{h\\to 0} \\frac{y(x+h)-y(x)}{h} \\end{equation}\nno where is the differentials seperatable! Apparently Ted\u0026rsquo;s undergrads didn\u0026rsquo;t know this either. So here\u0026rsquo;s a reading on it.\nWhat if its non-seperable? See Linear Non-Seperable Equation\n","html":"\u003cp\u003eHere it is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na\\frac{dy}{dx} + by = c\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor some constants \\(a,b,c\\). The name is pretty obvious, because we have constants and the highest power on everything is \\(1\\). Its first-order because the derivative is only the first-order derivative.\u003c/p\u003e\n\u003ch2 id=\"linear--diffeq\"\u003elinear (diffeq)\u003c/h2\u003e\n\u003cp\u003eWe technically call it \u0026ldquo;linear\u0026rdquo; because: if there are two possible solutions \\(y_1(x)\\) \\(y_2(x)\\), a linear combination \\(Ay_1(x)+By_2(x)\\) should also be a solution. Its \u0026ldquo;linear\u0026rdquo; because linear combinations work.\u003c/p\u003e\n\u003ch2 id=\"solving-separable-differential-equations\"\u003esolving separable differential equations\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#solving-separable-differential-equations\"\u003eseparable\u003c/a\u003e differential equation means that we can separate the derivative by itself and separate its two components. For the example above, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{dy}{dx} = \\frac{c-by}{a}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can naturally separate this:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{a}{c-by}dy = dx\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd then we can finally take the integral on both sides:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int \\frac{a}{c-by}dy = \\int dx\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWait wait wait but why is this possible? Why is it that we can separate a \\(\\frac{dy}{dx}\\) such that \\(dy\\) and \\(dx\\) is isolatable? Remember:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{dy}{dx} = \\lim_{h\\to 0} \\frac{y(x+h)-y(x)}{h}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eno where is the differentials seperatable! Apparently Ted\u0026rsquo;s undergrads didn\u0026rsquo;t know this either. \u003ca href=\"https://drive.google.com/file/d/1GWSagIMjXI0Awwy6wlQqsBm6c8tnCqcg/view?pli=1\"\u003eSo here\u0026rsquo;s a reading on it\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eWhat if its non-seperable? See \u003ca href=\"/posts/kbhlinear_non_seperable_equation/\"\u003eLinear Non-Seperable Equation\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_constant_coefficient_equation/","tags":null,"title":"Linear Constant-Coefficient Equation"},{"categories":null,"contents":"Linear Dependence Lemma is AFAIK one of the more important results of elementary linear algebra.\nstatement Suppose \\(v_1, \\dots v_{m}\\) is an linearly dependent list in \\(V\\); then \\(\\exists j \\in \\{1, 2, \\dots m\\}\\) such that\u0026hellip;\n\\(v_{j} \\in span(v_1, \\dots, v_{j-1})\\) the span of the list constructed by removing \\(v_{j}\\) from \\(v_1, \\dots v_{m}\\) equals the span of \\(v_1, \\dots v_{m}\\) itself intuition: \u0026ldquo;in a linearly dependent list of vectors, one of the vectors is in the span of the previous ones, and we can throw it out without changing the span.\u0026rdquo;\nproof By definition of linear dependence, given the list \\((v_1, \\dots v_{m}\\)) is linearly dependent, there exists some not-all-zero \\(a_1, \\dots, a_{m} \\in \\mathbb{F}\\) such that:\n\\begin{equation} a_1v_1+\\dots +a_{m}v_{m} = 0 \\end{equation}\nLet \\(a_{j}\\) be the last non-zero scalar in the expression (making the term actually exist). You can, in this circumstance, chuck everything to the right and divide by \\(a_{j}\\) to recover \\(v_{j}\\):\n\\begin{equation} v_{j}= -\\frac{a_1}{a_{j}} v_1 - \\dots -\\frac{a_{j-1}}{a_{j}}v_{j-1} \\end{equation}\nWe were able to construct \\(v_{j}\\) as a linear combination of \\(v_{1}, \\dots v_{j-1}\\), therefore:\n\\begin{equation} v_{j} \\in span(v_1, \\dots, v_{j-1}) \\end{equation}\nshowing \\((1)\\).\nFor \\(2\\), the intuition behind the proof is just that you can take that expression for \\(v_{j}\\) above to replace \\(v_{j}\\), therefore getting rid of one vector but still keeping the same span.\nFormally, \\(\\forall u \\in span(v_1, \\dots v_{m})\\), we can write it as some:\n\\begin{equation} u = c_1v_1 + \\dots c_{j}v_{j} + \\dots + c_{m}v_{m} \\end{equation}\nnow we replace \\(v_{j}\\) with the isolated expression for \\(v_{j}\\) above.\nException: if \\(j=1\\) and \\(v_1=0\\), note that you can just replace \\(v_1\\) with \\(0\\) without doing any special substitution.\nHaving written all arbitrary \\(u \\in span(v_1, \\dots v_{m})\\) as a linear combination of \\(v_1\\dots v_{m}\\) without \u0026hellip; \\(v_{j}\\), we see that the renaming vectors span the same space. \\(\\blacksquare\\)\nissue note that if we chose \\(j=1\\) in the above result, \\(v_1=0\\). Contrapositively, if \\(v_1 \\neq 0\\), \\(j\\neq 1\\). This is because of the fact that:\nif \\(j=1\\), the lemma tells us that \\(v_{1} \\in span(v_{1-1}) \\implies v_1 \\in span()\\). As per definition, the span of the empty set is \\(\\{0\\}\\). Therefore, \\(v_1 \\in \\{0\\} \\implies v_1=0\\).\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e is AFAIK one of the more important results of elementary linear algebra.\u003c/p\u003e\n\u003ch2 id=\"statement\"\u003estatement\u003c/h2\u003e\n\u003cp\u003eSuppose \\(v_1, \\dots v_{m}\\) is an \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e list in \\(V\\); then \\(\\exists j \\in \\{1, 2, \\dots m\\}\\) such that\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\\(v_{j} \\in span(v_1, \\dots, v_{j-1})\\)\u003c/li\u003e\n\u003cli\u003ethe \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e of the list constructed by removing \\(v_{j}\\) from \\(v_1, \\dots v_{m}\\) equals the span of \\(v_1, \\dots v_{m}\\) itself\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eintuition: \u0026ldquo;in a \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e list of vectors, one of the vectors is in the span of the previous ones, and we can throw it out without changing the span.\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"proof\"\u003eproof\u003c/h2\u003e\n\u003cp\u003eBy definition of \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinear dependence\u003c/a\u003e, given the list \\((v_1, \\dots v_{m}\\)) is \u003ca href=\"/posts/kbhlinear_independence/#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e, there exists some not-all-zero \\(a_1, \\dots, a_{m} \\in \\mathbb{F}\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_1v_1+\\dots +a_{m}v_{m} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet \\(a_{j}\\) be the last non-zero scalar in the expression (making the term actually exist). You can, in this circumstance, chuck everything to the right and divide by \\(a_{j}\\) to recover \\(v_{j}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv_{j}= -\\frac{a_1}{a_{j}} v_1 - \\dots -\\frac{a_{j-1}}{a_{j}}v_{j-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe were able to construct \\(v_{j}\\) as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \\(v_{1}, \\dots v_{j-1}\\), therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv_{j} \\in span(v_1, \\dots, v_{j-1})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eshowing \\((1)\\).\u003c/p\u003e\n\u003cp\u003eFor \\(2\\), the intuition behind the proof is just that you can take that expression for \\(v_{j}\\) above to replace \\(v_{j}\\), therefore getting rid of one vector but still keeping the same \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eFormally, \\(\\forall u \\in span(v_1, \\dots v_{m})\\), we can write it as some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu = c_1v_1 + \\dots c_{j}v_{j} + \\dots + c_{m}v_{m}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enow we replace \\(v_{j}\\) with the isolated expression for \\(v_{j}\\) above.\u003c/p\u003e\n\u003cp\u003eException: if \\(j=1\\) and \\(v_1=0\\), note that you can just replace \\(v_1\\) with \\(0\\) without doing any special substitution.\u003c/p\u003e\n\u003cp\u003eHaving written all arbitrary \\(u \\in span(v_1, \\dots v_{m})\\) as a linear combination of \\(v_1\\dots v_{m}\\) \u003cem\u003ewithout\u003c/em\u003e \u0026hellip; \\(v_{j}\\), we see that the renaming vectors span the same space. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch2 id=\"issue\"\u003eissue\u003c/h2\u003e\n\u003cp\u003enote that if we chose \\(j=1\\) in the above result, \\(v_1=0\\). Contrapositively, if \\(v_1 \\neq 0\\), \\(j\\neq 1\\). This is because of the fact that:\u003c/p\u003e\n\u003cp\u003eif \\(j=1\\), the lemma tells us that \\(v_{1} \\in span(v_{1-1}) \\implies v_1 \\in span()\\). As per definition, the \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e of the empty set is \\(\\{0\\}\\). Therefore, \\(v_1 \\in \\{0\\} \\implies v_1=0\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_dependence_lemma/","tags":null,"title":"Linear Dependence Lemma"},{"categories":null,"contents":"A linear map to numbers. Its very powerful because any linear functional can be represented as an inner product using Riesz Representation Theorem\nconstituents vector space \\(V\\) a linear map \\(\\varphi \\in \\mathcal{L}(V, \\mathbb{F})\\) requirements \\(\\varphi\\) is called a linear functional on \\(V\\) if \\(\\varphi: V \\to \\mathbb{F}\\). That is, it maps elements of \\(V\\) to scalars. For instance, every inner product is a Linear Map to scalars and hence a linear functional.\nadditional information Riesz Representation Theorem Suppose \\(V\\) is finite-dimensional, and \\(\\varphi\\) is a linear functional on \\(V\\); then, there exists an unique \\(u \\in V\\) such that:\n\\begin{equation} \\varphi(v) = \\langle v,u \\rangle \\end{equation}\n\\(\\forall v \\in V\\). Kinda a mindblowing fact.\nProof:\nEvery Inner Product Space has an orthonormal basis; let \\(e_1, \u0026hellip;e_{n}\\) be an orthonormal basis of \\(V\\). Recall there\u0026rsquo;s a specific way of writing a vector as a linear combination of orthonormal basis, that WLOG \\(v \\in V\\):\n\\begin{equation} v = \\langle v, e_1 \\rangle e_1 + \\dots \\langle v, e_{n} \\rangle e_{n} \\end{equation}\nNow:\n\\begin{equation} \\varphi(v) = \\varphi(\\langle v, e_1 \\rangle e_1 + \\dots \\langle v, e_{n} \\rangle e_{n}) \\end{equation}\nGiven homogenity and addtivity, we then have:\n\\begin{align} \\varphi(v) \u0026amp;= \\varphi(\\langle v, e_1 \\rangle e_1 + \\dots \\langle v, e_{n} \\rangle e_{n}) \\\\ \u0026amp;= \\langle v, e_1 \\rangle \\varphi(e_1) + \\dots + \\langle v, e_n \\rangle \\varphi(e_n) \\end{align}\nNow, shoving \\(\\varphi\\) into the second slot (remember we have conjugate homogenity on the secon slot), and adding it all together (as inner products are additive in both slots):\n\\begin{align} \\varphi(v) \u0026amp;= \\varphi(\\langle v, e_1 \\rangle e_1 + \\dots \\langle v, e_{n} \\rangle e_{n}) \\\\ \u0026amp;= \\langle v, e_1 \\rangle \\varphi(e_1) + \\dots + \\langle v, e_n \\rangle \\varphi(e_n) \\\\ \u0026amp;= \\langle v, \\overline{\\varphi(e_1)} e_1 + \\dots + \\overline{\\varphi(e_n)}e_n \\rangle \\end{align}\nYou will note now that the second slot to this inner product is v-independent! So as long as we know the orthonormal basis we can encode \\(\\varphi\\) with:\n\\begin{equation} u = \\overline{\\varphi(e_1)} e_1 + \\dots + \\overline{\\varphi(e_n)}e_n \\end{equation}\nand:\n\\begin{equation} \\varphi(v) = \\langle v, u \\rangle \\end{equation}\nNow, to show uniqueness, we probably do the same damned thing we have a million times:\nSuppose:\n\\begin{equation} \\varphi(v) = \\langle v,u_1 \\rangle = \\langle v,u_{2} \\rangle \\end{equation}\nholds for all \\(v \\in V\\), as required by the theorem.\nThis means that:\n\\begin{equation} \\langle v, u_1-u_2 \\rangle = 0 \\end{equation}\nFor every \\(v \\in V\\). Let \\(v = u_1-u_2\\). Now by definiteness we have \\(u_1-u_2=0\\) meaning \\(u_1=u_2\\) as desired. \\(\\blacksquare\\)\n","html":"\u003cp\u003eA linear map to numbers. Its very powerful because any \u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003e can be represented as an \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e using \u003ca href=\"#riesz-representation-theorem\"\u003eRiesz Representation Theorem\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003evector space \\(V\\)\u003c/li\u003e\n\u003cli\u003ea linear map \\(\\varphi \\in \\mathcal{L}(V, \\mathbb{F})\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\(\\varphi\\) is called a \u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003e on \\(V\\) if \\(\\varphi: V \\to \\mathbb{F}\\). That is, it maps elements of \\(V\\) to scalars. For instance, every \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e is a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e to scalars and hence a \u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"riesz-representation-theorem\"\u003eRiesz Representation Theorem\u003c/h3\u003e\n\u003cp\u003eSuppose \\(V\\) is \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e, and \\(\\varphi\\) is a \u003ca href=\"/posts/kbhlinear_functional/\"\u003elinear functional\u003c/a\u003e on \\(V\\); then, there exists an \u003cstrong\u003eunique\u003c/strong\u003e \\(u \\in V\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\varphi(v) = \\langle v,u \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(\\forall v \\in V\\). Kinda a mindblowing fact.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhgram_schmidt/#every-id-4a788e29-a3e9-4c13-8c97-08746878966e-inner-product-space-has-an-id-2a1eecb2-f23a-469f-a860-b561a9197906-orthonormal-basis\"\u003eEvery Inner Product Space has an orthonormal basis\u003c/a\u003e; let \\(e_1, \u0026hellip;e_{n}\\) be an \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e of \\(V\\). Recall there\u0026rsquo;s a specific way of \u003ca href=\"/posts/kbhorthonormal_basis/#writing-a-vector-as-a-linear-combination-of-orthonormal-basis\"\u003ewriting a vector as a linear combination of orthonormal basis\u003c/a\u003e, that WLOG \\(v \\in V\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = \\langle v, e_1 \\rangle e_1 + \\dots \\langle v, e_{n} \\rangle e_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\varphi(v) = \\varphi(\\langle v, e_1 \\rangle e_1 + \\dots \\langle v, e_{n} \\rangle e_{n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven homogenity and addtivity, we then have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\varphi(v) \u0026amp;= \\varphi(\\langle v, e_1 \\rangle e_1 + \\dots \\langle v, e_{n} \\rangle e_{n}) \\\\\n\u0026amp;= \\langle v, e_1 \\rangle \\varphi(e_1) + \\dots + \\langle v, e_n \\rangle \\varphi(e_n)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, shoving \\(\\varphi\\) into the second slot (remember we have conjugate homogenity on the secon slot), and adding it all together (as inner products are additive in both slots):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\varphi(v) \u0026amp;= \\varphi(\\langle v, e_1 \\rangle e_1 + \\dots \\langle v, e_{n} \\rangle e_{n}) \\\\\n\u0026amp;= \\langle v, e_1 \\rangle \\varphi(e_1) + \\dots + \\langle v, e_n \\rangle \\varphi(e_n) \\\\\n\u0026amp;= \\langle v, \\overline{\\varphi(e_1)} e_1 + \\dots + \\overline{\\varphi(e_n)}e_n \\rangle\n\\end{align}\u003c/p\u003e\n\u003cp\u003eYou will note now that the second slot to this \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e is \u003cstrong\u003ev-independent!\u003c/strong\u003e So as long as we know the \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e we can encode \\(\\varphi\\) with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu = \\overline{\\varphi(e_1)} e_1 + \\dots + \\overline{\\varphi(e_n)}e_n\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\varphi(v) = \\langle v, u \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, to show uniqueness, we probably do the same damned thing we have a million times:\u003c/p\u003e\n\u003cp\u003eSuppose:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\varphi(v) = \\langle v,u_1 \\rangle = \\langle v,u_{2} \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eholds for all \\(v \\in V\\), as required by the theorem.\u003c/p\u003e\n\u003cp\u003eThis means that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle v, u_1-u_2 \\rangle = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor every \\(v \\in V\\). Let \\(v = u_1-u_2\\). Now by definiteness we have \\(u_1-u_2=0\\) meaning \\(u_1=u_2\\) as desired. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_functional/","tags":null,"title":"linear functional"},{"categories":null,"contents":"Suppose you have continuous random variables \\(X,Y\\), you can use one to seed the value and the other to change the Gaussian distribution:\n\\begin{equation} p(x\\mid y) = \\mathcal{N}(x \\mid my + b, \\sigma^{2}) \\end{equation}\n","html":"\u003cp\u003eSuppose you have continuous \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es \\(X,Y\\), you can use one to seed the value and the other to change the \u003ca href=\"/posts/kbhprobability_distributions/#gaussian-distribution\"\u003eGaussian distribution\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(x\\mid y) = \\mathcal{N}(x \\mid my + b, \\sigma^{2})\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_gaussian_model/","tags":null,"title":"linear gaussian model"},{"categories":null,"contents":"A linearly independent list is a list of vectors such that there is one unique choice of scalars to be able to construct each member of their span.\nBased on the same technique as in the proof that a sum of subsets is a direct sum IFF there is only one way to write \\(0\\), we can show that in a linearly independent list, there is (IFF) only one way to write the zero vector as a linear combination of that list of vectors \u0026mdash;namely, the trivial representation of taking each vector to \\(0\\). In fact, we will actually use that as the formal definition of linear independence.\nThis definition of linear independence is the result of the definition for direct sum.\nSee also Linear Dependence Lemma.\nconstituents A list of vectors \\(v_1, \\dots, v_{m}\\) in \\(V\\) requirements Formally, a linearly independent list is defined by there being only one choice of scalars \\(a_1, \\dots, a_{m} \\in \\mathbb{F}\\) to write \\(0\\) as a linear combination of \\(v_{1},\\dots, v_{m}\\): namely, by taking each \\(a_1, \\dots a_{m}\\) to \\(0\\).\nWe also declare \\(()\\) to be linearly independent.\nadditional information linearly dependent a list is linearly dependent if\u0026hellip;. its not linearly independent.\noh. my. god.\nBased on the same formal definition, this means that a linearly dependent list is defined by the fact that there can be more than one way of writing \\(0\\) as a linear combination of that list of vectors, where one of the ways makes it so that writing \\(0\\) does not require all zero scalars.\nlength of linearly-independent list \\(\\leq\\) length of spanning list A linearly independent list should be smaller or equal in length to a spanning list.\nThe canonical proof is one by induction.\nSuppose \\(u_1, \\dots u_{m}\\) is an linearly independent list in \\(V\\). Take also a list \\(w_1, \\dots w_{n}\\) spans \\(V\\). We desire that \\(m\\leq n\\). We create a list of length \\(n\\) containing all of the \\(w\\) thus far. Our invariant is that \\(len(B) = n\\). This proof essentially uses Floyd\u0026rsquo;s Invariant Method (compsci topic for Jack\u0026rsquo;s understanding only.)\nbase case take the spanning list of \\(V\\) we declared named \\(w_1, \\dots w_{n}\\). Given it spans, adding any other vector in \\(V\\), if \\(w_1, \\dots w_{n}\\) isn\u0026rsquo;t already linearly dependent, will make it linearly dependent. This is because you can write the new vector \\(v \\in V\\) which you add as a linear combination of the previous vectors already as they already span \\(V\\).\nBy the Linear Dependence Lemma, you can remove one of the vectors in the new linearly dependent list while keeping the list still spanning \\(V\\).\nNow, construct the list:\n\\begin{equation} u_1, w_1, \\dots w_{n} \\end{equation}\nwhere, \\(u_{1} \\in V\\) is taken from that linearly independent list in \\(V\\). By the statement above, via applying the Linear Dependence Lemma, we can create a list that spans the same space by taking away one of the \\(w_{j}\\) (we can\u0026rsquo;t take \\(u_1\\) because it is at the first position, and we can\u0026rsquo;t grantee its $0$\u0026mdash;see the issue with the Linear Dependence Lemma). We now have a list \\(B\\) with length \\(n\\) with \\(u_1\\) and the rest of the \\(w\\) not taken away which span \\(V\\)\ncase number \\(j\\) Given a spanning list \\(B\\) of \\(V\\) with length \\(n\\), with some parts \\(u_1, \\dots, u_{j-1}, w_{j}, \\dots w_{n}\\). We now include \\(u_{j}\\) in the list, placing it after \\(u_{j-1}\\). As the list pre-inclusion is already a spanning list of \\(V\\), any new vectors from \\(V\\) added will necessarily be able to be written as a linear combination of the other vectors already in the list. Therefore, we know that\u0026mdash;if not already pre-inclusion\u0026mdash;the list is linearly dependent.\nBecause the first half (\\(u_1,\\dots u_{j}\\)) of this new list is linearly independent (given), the bit that \u0026ldquo;causes\u0026rdquo; the linear dependence is in the \\(w\\) (i.e. each \\(u\\) cannot be written by other \\(u\\).) Therefore, we can say that the first condition of Linear Dependence Lemma allows us to remove one of the \\(w\\) while spanning the same space, creating again a spanning list of length \\(n\\).\ninduction repeat the procedure \\(m\\) times, resulting in all the \\(u_{j}\\) being included in our new list \\(B\\) of length still \\(n\\). Given we contained a list of length \\(m\\) in a list of length \\(n\\), \\(m \\leq n\\).\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list is a list of vectors such that there is one unique choice of scalars to be able to construct each member of their \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eBased on the same technique as in the proof that \u003ca href=\"/posts/kbhdirect_sum/#a-id-1b800658-2f83-4802-acfd-2d15cf5a1d74-sum-of-subsets-is-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-id-fddf0648-91ea-4c5b-8298-fa0a30637cb7-iff-there-is-only-one-way-to-write-0\"\u003ea sum of subsets is a direct sum IFF there is only one way to write \\(0\\)\u003c/a\u003e, we can show that in a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e, there is (IFF) only one way to write the zero vector as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of that \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e of vectors \u0026mdash;namely, the trivial representation of taking each vector to \\(0\\). In fact, we will actually use that as the formal definition of \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThis definition of \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinear independence\u003c/a\u003e is the \u003cem\u003eresult\u003c/em\u003e of the definition for \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA list of vectors \\(v_1, \\dots, v_{m}\\) in \\(V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eFormally, a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list is defined by there being only one choice of scalars \\(a_1, \\dots, a_{m} \\in \\mathbb{F}\\) to write \\(0\\) as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of \\(v_{1},\\dots, v_{m}\\): namely, by taking each \\(a_1, \\dots a_{m}\\) to \\(0\\).\u003c/p\u003e\n\u003cp\u003eWe also declare \\(()\\) to be \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"linearly-dependent\"\u003elinearly dependent\u003c/h3\u003e\n\u003cp\u003ea list is \u003ca href=\"#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e if\u0026hellip;. its not \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eoh. my. god.\u003c/p\u003e\n\u003cp\u003eBased on the same formal definition, this means that a \u003ca href=\"#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e list is defined by the fact that there can be more than one way of writing \\(0\\) as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of that \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es, where one of the ways makes it so that writing \\(0\\) does not require all zero scalars.\u003c/p\u003e\n\u003ch3 id=\"length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003elength of linearly-independent list \\(\\leq\\) length of spanning list\u003c/h3\u003e\n\u003cp\u003eA \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list should be smaller or equal in length to a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list.\u003c/p\u003e\n\u003cp\u003eThe canonical proof is one by induction.\u003c/p\u003e\n\u003cp\u003eSuppose \\(u_1, \\dots u_{m}\\) is an \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(V\\). Take also a list \\(w_1, \\dots w_{n}\\) \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(V\\). We desire that \\(m\\leq n\\). We create a list of length \\(n\\) containing all of the \\(w\\) thus far. Our invariant is that \\(len(B) = n\\). This proof essentially uses \u003ca href=\"/posts/kbhfloyd_s_invariant_method/\"\u003eFloyd\u0026rsquo;s Invariant Method\u003c/a\u003e (compsci topic for Jack\u0026rsquo;s understanding only.)\u003c/p\u003e\n\u003ch4 id=\"base-case\"\u003ebase case\u003c/h4\u003e\n\u003cp\u003etake the \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list of \\(V\\) we declared named \\(w_1, \\dots w_{n}\\). Given it spans, adding any other vector in \\(V\\), if \\(w_1, \\dots w_{n}\\) isn\u0026rsquo;t already \u003ca href=\"#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e, will make it \u003ca href=\"#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e. This is because you can write the new vector \\(v \\in V\\) which you add as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of the previous \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es already as they already span \\(V\\).\u003c/p\u003e\n\u003cp\u003eBy the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e, you can remove one of the \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es in the new \u003ca href=\"#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e list while keeping the list still \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e \\(V\\).\u003c/p\u003e\n\u003cp\u003eNow, construct the list:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu_1, w_1, \\dots w_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(u_{1} \\in V\\) is taken from that \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(V\\). By the statement above, via applying the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e, we can create a list that \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003es the same space by taking away one of the \\(w_{j}\\) (we can\u0026rsquo;t take \\(u_1\\) because it is at the first position, and we can\u0026rsquo;t grantee its $0$\u0026mdash;see the \u003ca href=\"/posts/kbhlinear_dependence_lemma/#issue\"\u003eissue\u003c/a\u003e with the \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e). We now have a list \\(B\\) with length \\(n\\) with \\(u_1\\) and the rest of the \\(w\\) not taken away which span \\(V\\)\u003c/p\u003e\n\u003ch4 id=\"case-number-j\"\u003ecase number \\(j\\)\u003c/h4\u003e\n\u003cp\u003eGiven a spanning list \\(B\\) of \\(V\\) with length \\(n\\), with some parts \\(u_1, \\dots, u_{j-1}, w_{j}, \\dots w_{n}\\). We now include \\(u_{j}\\) in the list, placing it after \\(u_{j-1}\\). As the list pre-inclusion is already a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list of \\(V\\), any new \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es from \\(V\\) added will necessarily be able to be written as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of the other vectors already in the list. Therefore, we know that\u0026mdash;if not already pre-inclusion\u0026mdash;the list is \u003ca href=\"#linearly-dependent\"\u003elinearly dependent\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eBecause the first half (\\(u_1,\\dots u_{j}\\)) of this new list is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e (given), the bit that \u0026ldquo;causes\u0026rdquo; the linear dependence is in the \\(w\\) (i.e. each \\(u\\) cannot be written by other \\(u\\).) Therefore, we can say that the first condition of \u003ca href=\"/posts/kbhlinear_dependence_lemma/\"\u003eLinear Dependence Lemma\u003c/a\u003e allows us to remove one of the \\(w\\) while \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e the same space, creating again a \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list of length \\(n\\).\u003c/p\u003e\n\u003ch4 id=\"induction\"\u003einduction\u003c/h4\u003e\n\u003cp\u003erepeat the procedure \\(m\\) times, resulting in all the \\(u_{j}\\) being included in our new list \\(B\\) of length still \\(n\\). Given we contained a list of length \\(m\\) in a list of length \\(n\\), \\(m \\leq n\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_independence/","tags":null,"title":"linear independence"},{"categories":null,"contents":"A Linear Map (a.k.a. Linear Transformation) is a function which maps elements between two vector space that follows linear properties.\nconstituents vector spaces \\(V\\) and \\(W\\) (they don\u0026rsquo;t have to be subspaces) A function \\(T: V \\to W\\) (when we put something in, it only goes to one place) requirements \\(T\\) is considered a Linear Map if it follows\u0026hellip; (properties of \u0026ldquo;linearity\u0026rdquo;)\nadditivity \\begin{equation} T(u+v) = Tu+Tv,\\ \\forall u,v \\in V \\end{equation}\nhomogeneity \\begin{equation} T(\\lambda v) = \\lambda (Tv),\\ \\forall \\lambda \\in \\mathbb{F}, v \\in V \\end{equation}\nadditional information note on notation The \u0026ldquo;application\u0026rdquo; of a Linear Map \\(T\\) on a vector \\(V\\) is written as:\n\\begin{equation} \\begin{cases} Tv \\\\ T(v) \\end{cases} \\end{equation}\nboth are acceptable.\n\\(\\mathcal{L}(V,W)\\) The set of all Linear Maps from \\(V\\) to \\(W\\) is denoted as \\(\\mathcal{L}(V,W)\\).\nsome fun linear maps zero There is, of course, the Linear Map that maps everything to the \\(0\\) \u0026mdash; as the zero exists in all vector spaces.\nThat is:\n\\begin{equation} 0 \\in \\mathcal{L}(V,W) \\implies 0v = 0, v\\in V \\end{equation}\nadditivity\nLet \\(v_1+v_2= v \\in V\\).\n\\begin{equation} 0(v_1+v_2) = 0(v) = 0 = 0+0 = 0v_1+0v_2 \\end{equation}\nhomogeneity\nLet \\(\\lambda v = u \\in V\\).\n\\begin{equation} 0(\\lambda v) = 0(u) = 0 = \\lambda 0 = \\lambda 0v \\end{equation}\nidentity Another classic. \\(I\\), the identity map, is denoted as (for some \\(v \\in V\\) and \\(I \\in \\mathcal{L}(V,V)\\)):\n\\begin{equation} Iv = v \\end{equation}\ni.e. it does nothing\nadditivity\nLet \\(v_1,v_2 \\in V\\):\n\\begin{equation} I(v_1+v_2) = v_1+v_2 = Iv1+Iv2 \\end{equation}\nhomogeneity\n\\begin{equation} I(\\lambda v) = \\lambda v = \\lambda Iv \\end{equation}\nany map from \\(\\mathbb{F}^{n}\\) to \\(\\mathbb{F}^{m}\\) turns out any map that follows a specific pattern of polynomials between two vector spaces are Linear Maps.\nDefine some two vector spaces \\(\\mathbb{F}^{n}\\) and \\(\\mathbb{F}^{m}\\), some set of scalars \\(a_{jk} \\in \\mathbb{F}: j=1, \\dots m; k=1, \\dots n\\).\nWe construct \\(T \\in \\mathcal{L}(\\mathbb{F}^{n}, \\mathbb{F}^{m})\\) by: \\(T(x_1, \\dots x_{n}) = a_{11} x_1+ \\cdots + a_{1n} x_{n}, \\dots, a_{m1} x_1 + \\cdots + a_{mn} x_{n}\\) (i.e. a combination of linear combinations).\nadditivity\nLet \\(x,y \\in \\mathbb{F}^{n}\\), with \\(x_{j}\\) being each coordinate of \\(x\\) and the same goes for \\(y\\).\n\\begin{align} T((x_1, \\dots x_{n}) + (y_1, \\dots y_{n}))) \u0026amp;= T(x_1+y_1 \\dots x_{n}+y_{n}) \\\\ \u0026amp;= a_{11}(x_1+y_1) + \\cdots, \\dots, \\cdots + a_{mn} (x_{n} + y_{n}) \\\\ \u0026amp;= (a_{11}x_1 + a_{11}y_{1}) + \\cdots, \\dots, \\cdots + (a_{mn} x_{n} + a_{mn} y_{n}) \\\\ \u0026amp;= (a_{11}x_1 + \\cdots) + (a_{11}y_{1}+ \\cdots ), \\dots, (\\cdots + a_{mn}x_n) + (\\cdots + a_{mn}y_{n}) \\\\ \u0026amp;= ((a_{11}x_1 + \\cdots), \\dots, (\\cdots + a_{mn}x_n)) = ((a_{11}y_{1}+ \\cdots ), \\dots,(\\cdots + a_{mn}y_{n})) \\\\ \u0026amp;= T (x_1, \\dots, x_{n}) + T (y_1, \\dots, x_{n}) \\end{align}\nhomogeneity\nProof left to the reader. Pretty much just expand and more algebra.\nmatricies to encode Linear Map we can use matricies to represent Linear Maps. See matrix of Linear Map\n\u0026ldquo;basis of domain\u0026rdquo; This result tells us that we can find a Linear Map for wherever we want to take the basis of a vector space, and that a Linear Map\u0026rsquo;s behavior on basis uniquely determines that Linear Map.\nSee basis of domain.\naddition and scalar multiplication on \\(\\mathcal{L}(V,W)\\) Suppose \\(S,T \\in \\mathcal{L}(V,W); \\lambda \\in \\mathbb{F}\\).\n\u0026ldquo;Sum\u0026rdquo; and \u0026ldquo;Product\u0026rdquo; are defined in the way that one would expect:\n\\begin{equation} (S+T)(v) = Sv+Tv \\end{equation}\nand\n\\begin{equation} (\\lambda T)(v) = \\lambda (Tv) \\end{equation}\nfor all \\(v \\in V\\).\nThese two operations make \\(\\mathcal{L}(V,W)\\) a vector space (\\(1Tv = Tv\\), \\(0+Tv=Tv\\), \\(Tv + (-1)Tv = 0\\), associativity, commutativity, distributive inherits from \\(V\\).)\nlinear maps take \\(0\\) to \\(0\\) We desire that \\(T(0) = 0\\) for any linear map \\(T\\)\nProof:\n\\begin{equation} T(0) = T(0+0) \\end{equation}\nThen, by additivity:\n\\begin{equation} T(0) = T (0 + 0) = T (0) + T (0) \\end{equation}\nGiven \\(\\mathcal{L}(V,W)\\) is a vector space for any \\(V,W\\), \\(\\exists -T(0)\\) such that \\(T(0)+(-T(0)) = 0\\). Applying that here:\n\\begin{equation} T(0) = T(0)+T(0) \\implies T(0) -T(0) = T(0)+T(0)-T(0) \\implies 0 = T(0) \\end{equation}\nProduct of Linear Maps See Product of Linear Maps\n\u0026ldquo;sizes\u0026rdquo; of maps map to smaller space is not injective Suppose \\(V,W\\) are finite-dimensional vector spaces, and \\(\\dim V \u0026gt; \\dim W\\). Then, all \\(T \\in \\mathcal{L}(V,W)\\) are not injective.\nWe first have that:\n\\begin{align} \u0026amp;\\dim V = \\dim null\\ T + \\dim range\\ T \\\\ \\Rightarrow\\ \u0026amp; \\dim null\\ T = \\dim V - \\dim range\\ T \\end{align}\nrecall at this point that \\(\\dim range\\ T \\leq \\dim W\\) (the range is a subspace of the codomain.) Therefore, subtracting a bigger value means that the value will be smaller. So we have that:\n\\begin{align} \u0026amp; \\dim null\\ T = \\dim V - \\dim range\\ T \\\\ \\Rightarrow\\ \u0026amp; \\dim null\\ T \\geq \\dim V - \\dim W \\end{align}\nNow, recall that \\(\\dim V \u0026gt; \\dim W\\). Therefore, \\(\\dim V - \\dim W\\) is strictly bigger than \\(0\\). So:\n\\begin{align} \\dim null\\ T \u0026amp;\\geq \\dim V - \\dim W \\\\ \u0026amp;\u0026gt; 0 \\end{align}\nAnd so, the dimension of the null space of \\(T\\) is not \\(0\\). Therefore, the null space of \\(T\\) can\u0026rsquo;t have been \\(\\{0\\}\\) because that does have dimension \\(0\\). This makes the map not injective because injectivity implies that null space is \\(\\{0\\}\\)\nmap to bigger space is not surjective Its basically the same thing as the one above. Suppose \\(V,W\\) are finite-dimensional vector spaces, and \\(\\dim V \u0026lt; \\dim W\\). Then, all \\(T \\in \\mathcal{L}(V,W)\\) are not injective.\nWe first have that:\n\\begin{align} \u0026amp;\\dim V = \\dim null\\ T + \\dim range\\ T \\\\ \\Rightarrow\\ \u0026amp; \\dim range\\ T = \\dim V - \\dim null\\ T \\end{align}\nBecause the dimension of \\(null\\ T\\) is larger than \\(0\\) (or, for that matter, the dimension of anything), \\(\\dim V - \\dim\\ null\\ T \\leq \\dim\\ V\\). Hence:\n\\begin{align} \u0026amp; \\dim range\\ T = \\dim V - \\dim null\\ T \\\\ \\Rightarrow\\ \u0026amp; \\dim range\\ T \\leq \\dim V \\end{align}\nNow, recall that \\(\\dim V \u0026lt; \\dim W\\).\n\\begin{align} \u0026amp; \\dim range\\ T = \\dim V - \\dim null\\ T \\\\ \\Rightarrow\\ \u0026amp; \\dim range\\ T \\leq \\dim V \u0026lt; \\dim W \\end{align}\nGiven the range of \\(T\\) is smaller than the codomain of \\(T\\), they cannot be equal spaces. So, \\(T\\) is not surjective.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e (a.k.a. \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Transformation\u003c/a\u003e) is a \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e which maps elements between two \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e that follows linear properties.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es \\(V\\) and \\(W\\) (they \u003cspan class=\"underline\"\u003edon\u0026rsquo;t\u003c/span\u003e have to be \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es)\u003c/li\u003e\n\u003cli\u003eA \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e \\(T: V \\to W\\) (when we put something in, it only goes to one place)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\(T\\) is considered a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e if it follows\u0026hellip; (properties of \u0026ldquo;linearity\u0026rdquo;)\u003c/p\u003e\n\u003ch3 id=\"additivity\"\u003eadditivity\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nT(u+v) = Tu+Tv,\\ \\forall u,v \\in V\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"homogeneity--kbhhomogeneity-dot-md\"\u003e\u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nT(\\lambda v) = \\lambda (Tv),\\ \\forall \\lambda \\in \\mathbb{F}, v \\in V\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"note-on-notation\"\u003enote on notation\u003c/h3\u003e\n\u003cp\u003eThe \u0026ldquo;application\u0026rdquo; of a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e \\(T\\) on a vector \\(V\\) is written as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nTv \\\\\nT(v)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eboth are acceptable.\u003c/p\u003e\n\u003ch3 id=\"mathcal-l--v-w\"\u003e\\(\\mathcal{L}(V,W)\\)\u003c/h3\u003e\n\u003cp\u003eThe set of all \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es from \\(V\\) to \\(W\\) is denoted as \\(\\mathcal{L}(V,W)\\).\u003c/p\u003e\n\u003ch3 id=\"some-fun-linear-maps\"\u003esome fun linear maps\u003c/h3\u003e\n\u003ch4 id=\"zero--kbhzero-dot-md\"\u003e\u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eThere is, of course, the \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e that maps everything to the \\(0\\) \u0026mdash; as the \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e exists in all \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eThat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 \\in \\mathcal{L}(V,W) \\implies 0v = 0, v\\in V\n\\end{equation}\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eadditivity\u003c/p\u003e\n\u003cp\u003eLet \\(v_1+v_2= v \\in V\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0(v_1+v_2) = 0(v) = 0 = 0+0 = 0v_1+0v_2\n\\end{equation}\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eLet \\(\\lambda v = u \\in V\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0(\\lambda v) = 0(u) = 0 = \\lambda 0 = \\lambda 0v\n\\end{equation}\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"identity--kbhidentity-dot-md\"\u003e\u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eAnother classic. \\(I\\), the \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e map, is denoted as (for some \\(v \\in V\\) and \\(I \\in \\mathcal{L}(V,V)\\)):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nIv = v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ei.e. it does nothing\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eadditivity\u003c/p\u003e\n\u003cp\u003eLet \\(v_1,v_2 \\in V\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI(v_1+v_2) = v_1+v_2 = Iv1+Iv2\n\\end{equation}\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI(\\lambda v) = \\lambda v = \\lambda Iv\n\\end{equation}\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"any-map-from-mathbb-f-n-to-mathbb-f-m\"\u003eany map from \\(\\mathbb{F}^{n}\\) to \\(\\mathbb{F}^{m}\\)\u003c/h4\u003e\n\u003cp\u003eturns out any map that follows a specific pattern of \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003es between two \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es are \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eDefine some two \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es \\(\\mathbb{F}^{n}\\) and \\(\\mathbb{F}^{m}\\), some set of scalars \\(a_{jk} \\in \\mathbb{F}: j=1, \\dots m; k=1, \\dots n\\).\u003c/p\u003e\n\u003cp\u003eWe construct \\(T \\in \\mathcal{L}(\\mathbb{F}^{n}, \\mathbb{F}^{m})\\) by: \\(T(x_1, \\dots x_{n}) = a_{11} x_1+ \\cdots + a_{1n} x_{n}, \\dots, a_{m1} x_1 + \\cdots + a_{mn} x_{n}\\) (i.e. a combination of \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003es).\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eadditivity\u003c/p\u003e\n\u003cp\u003eLet \\(x,y \\in \\mathbb{F}^{n}\\), with \\(x_{j}\\) being each \u003ca href=\"/posts/kbhlists_over_fields/\"\u003ecoordinate\u003c/a\u003e of \\(x\\) and the same goes for \\(y\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nT((x_1, \\dots x_{n}) + (y_1, \\dots y_{n}))) \u0026amp;= T(x_1+y_1 \\dots x_{n}+y_{n}) \\\\\n\u0026amp;= a_{11}(x_1+y_1) + \\cdots, \\dots, \\cdots + a_{mn} (x_{n} + y_{n}) \\\\\n\u0026amp;= (a_{11}x_1 + a_{11}y_{1}) + \\cdots, \\dots, \\cdots + (a_{mn} x_{n} + a_{mn} y_{n}) \\\\\n\u0026amp;= (a_{11}x_1 + \\cdots) + (a_{11}y_{1}+ \\cdots ), \\dots, (\\cdots + a_{mn}x_n) + (\\cdots + a_{mn}y_{n}) \\\\\n\u0026amp;= ((a_{11}x_1 + \\cdots), \\dots, (\\cdots + a_{mn}x_n)) = ((a_{11}y_{1}+ \\cdots ), \\dots,(\\cdots + a_{mn}y_{n})) \\\\\n\u0026amp;= T (x_1, \\dots, x_{n}) + T (y_1, \\dots, x_{n})\n\\end{align}\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eProof left to the reader. Pretty much just expand and more algebra.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"matricies--kbhmatricies-dot-md--to-encode-linear-map--kbhlinear-map-dot-md\"\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e to encode \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003ewe can use \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e to represent \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es. See \u003ca href=\"/posts/kbhmatricies/#matrix-of-id-17f3b01c-4945-4268-8da4-9887d960596b-linear-map\"\u003ematrix of Linear Map\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"basis-of-domain--kbhbasis-of-domain-dot-md\"\u003e\u0026ldquo;\u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e\u0026rdquo;\u003c/h3\u003e\n\u003cp\u003eThis result tells us that we can find a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e for wherever we want to take the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e, and that a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e\u0026rsquo;s behavior on \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e uniquely determines that \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"addition-and-scalar-multiplication-on-mathcal-l--v-w\"\u003eaddition and scalar multiplication on \\(\\mathcal{L}(V,W)\\)\u003c/h3\u003e\n\u003cp\u003eSuppose \\(S,T \\in \\mathcal{L}(V,W); \\lambda \\in \\mathbb{F}\\).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Sum\u0026rdquo; and \u0026ldquo;Product\u0026rdquo; are defined in the way that one would expect:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(S+T)(v) = Sv+Tv\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(\\lambda T)(v) = \\lambda (Tv)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor all \\(v \\in V\\).\u003c/p\u003e\n\u003cp\u003eThese two operations make \\(\\mathcal{L}(V,W)\\) a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e (\\(1Tv = Tv\\), \\(0+Tv=Tv\\), \\(Tv + (-1)Tv = 0\\), associativity, commutativity, distributive inherits from \\(V\\).)\u003c/p\u003e\n\u003ch3 id=\"linear-maps-take-0-to-0\"\u003elinear maps take \\(0\\) to \\(0\\)\u003c/h3\u003e\n\u003cp\u003eWe desire that \\(T(0) = 0\\) for any linear map \\(T\\)\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(0) = T(0+0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThen, by additivity:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(0) = T (0 + 0) = T (0) + T (0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven \\(\\mathcal{L}(V,W)\\) is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e for any \\(V,W\\), \\(\\exists -T(0)\\) such that \\(T(0)+(-T(0)) = 0\\). Applying that here:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(0) = T(0)+T(0) \\implies T(0) -T(0) = T(0)+T(0)-T(0) \\implies 0 = T(0)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"product-of-linear-maps\"\u003eProduct of Linear Maps\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhproduct_of_linear_maps/\"\u003eProduct of Linear Maps\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"sizes-of-maps\"\u003e\u0026ldquo;sizes\u0026rdquo; of maps\u003c/h3\u003e\n\u003ch4 id=\"map-to-smaller-space-is-not-injective--kbhinjectivity-dot-md\"\u003emap to smaller space is not \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eSuppose \\(V,W\\) are \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003es, and \\(\\dim V \u0026gt; \\dim W\\). Then, all \\(T \\in \\mathcal{L}(V,W)\\) are \u003cstrong\u003enot\u003c/strong\u003e \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eWe first have that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\dim V = \\dim null\\ T + \\dim range\\ T \\\\\n\\Rightarrow\\ \u0026amp; \\dim null\\ T = \\dim V - \\dim range\\ T\n\\end{align}\u003c/p\u003e\n\u003cp\u003erecall at this point that \\(\\dim range\\ T \\leq \\dim W\\) (the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of the codomain.) Therefore, subtracting a bigger value means that the value will be smaller. So we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; \\dim null\\ T = \\dim V - \\dim range\\ T \\\\\n\\Rightarrow\\ \u0026amp; \\dim null\\ T \\geq \\dim V - \\dim W\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, recall that \\(\\dim V \u0026gt; \\dim W\\). Therefore, \\(\\dim V - \\dim W\\) is strictly bigger than \\(0\\). So:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dim null\\ T \u0026amp;\\geq \\dim V - \\dim W \\\\\n\u0026amp;\u0026gt; 0\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAnd so, the \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(T\\) is not \\(0\\). Therefore, the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(T\\) can\u0026rsquo;t have been \\(\\{0\\}\\) because that does have \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e \\(0\\). This makes the map not \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e because \u003ca href=\"/posts/kbhinjectivity/#injectivity-implies-that-id-767a441d-4931-4fad-aa8e-c6b001e8b507-null-space-is-0\"\u003einjectivity implies that null space is \\(\\{0\\}\\)\u003c/a\u003e\u003c/p\u003e\n\u003ch4 id=\"map-to-bigger-space-is-not-surjective--kbhsurjectivity-dot-md\"\u003emap to bigger space is not \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eIts basically the same thing as the one above. Suppose \\(V,W\\) are \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003es, and \\(\\dim V \u0026lt; \\dim W\\). Then, all \\(T \\in \\mathcal{L}(V,W)\\) are \u003cstrong\u003enot\u003c/strong\u003e \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eWe first have that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\dim V = \\dim null\\ T + \\dim range\\ T \\\\\n\\Rightarrow\\ \u0026amp; \\dim range\\ T = \\dim V - \\dim null\\ T\n\\end{align}\u003c/p\u003e\n\u003cp\u003eBecause the \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of \\(null\\ T\\) is larger than \\(0\\) (or, for that matter, the \u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of anything), \\(\\dim V - \\dim\\ null\\ T \\leq \\dim\\ V\\). Hence:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; \\dim range\\ T = \\dim V - \\dim null\\ T \\\\\n\\Rightarrow\\ \u0026amp; \\dim range\\ T \\leq \\dim V\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, recall that \\(\\dim V \u0026lt; \\dim W\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; \\dim range\\ T = \\dim V - \\dim null\\ T \\\\\n\\Rightarrow\\ \u0026amp; \\dim range\\ T \\leq \\dim V \u0026lt; \\dim W\n\\end{align}\u003c/p\u003e\n\u003cp\u003eGiven the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e of \\(T\\) is smaller than the codomain of \\(T\\), they cannot be equal spaces. So, \\(T\\) is not \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_map/","tags":null,"title":"Linear Map"},{"categories":null,"contents":"general form of First-Order Differential Equations This will depend on both unknown function \\(x\\), and the independent variable \\(t\\). These could and could not be separable.\n\\begin{equation} \\dv{x}{t} = F(t,x),\\ x(t_{0}) = x_{0} \\end{equation}\nLet\u0026rsquo;s imagine \\(F\\) is \u0026ldquo;bounded\u0026rdquo; and \u0026ldquo;continuous\u0026rdquo; on \\(I \\times \\omega\\), where \\(I\\) is an open interval about \\(t_{0}\\) and \\(\\omega\\) is an open subset of \\(\\mathbb{R}^{n}\\), containing \\(x_{0}\\). \\(F\\) is bounded; the results are bounded??\nfunctions embedded in vector spaces We understand that such First-Order Differential Equations will describe a subset of an infinite dimensional vector space.\nGiven we are dealing with First-Order Differential Equations, each function is a basis (if linear, otherwise, not quite the basis) of the subspace of the larger vector space; \\(+C\\) is how you create parametried variations However, our function is not linear, not all functions would suffice here: non-linear equations are difficult to deal with beacuse the arc length follows a certain pattern General form of a first order linear differential equation A general linear, first-order, first-degree differential equation of the form:\n\\begin{equation} \\dv{y}{x} + P(x)y = Q(x) \\end{equation}\nhas a solution:\n\\begin{equation} y(x) = e^{-\\int P\\dd{x}}\\int e^{\\int P\\dd{x}} Q(x) \\dd{x} \\end{equation}\nthe more general solution (for definite integrals):\n\\begin{equation} x(t) = e^{-A(t)}x_{0} + e^{-A(t)}\\int_{t_{0}}^{t}e^{A(s)}b(s)\\dd{s} \\end{equation}\ngiven the initial condition that \\(x(0) = 0\\). This is from the textbook.\nBefore you go ham and start solving, though, make sure that pesky \\(y\\) term is actually there. If its not, you maybe better served using the seperable methods to solve these things.\nthis is bad This is difficult to deal with this! What?? How?? Why does this work?? See below.\nsolving differential equations The following technique works for ALL first-order linear differential equations:\nTo solve, first put your equation into the standard form:\n\\begin{equation} \\frac{dy}{dx} + P(x)y = Q(x) \\end{equation}\nIf you have an equation like:\n\\begin{equation} a(x) \\dv{y}{x} + b(x)y = c(x) \\end{equation}\na good way to do this is to apply \\(\\frac{1}{a(x)}\\) to both sides, resulting in:\n\\begin{equation} \\dv{y}{x} + \\frac{b(x)}{a(x)} y = \\frac{c(x)}{a(x)} \\end{equation}\nAnd then you can carry on solving like its an equation in standard form.\nTo solve such a generic equation, we here are trying to UNDO the product rule.\nWe first multiply the entire expression by something called the intergrating factor \\(\\rho(x)\\).\n\\begin{equation} \\rho(x) \\left(\\frac{dy}{dx} + P(x)y\\right) = \\rho(x)Q(x) \\end{equation}\nA note on how this \\(\\rho(x)\\) works. This intergrating factor is actually defined with the following rule:\n\\begin{equation} \\log (\\rho (x)) = \\int P(x) \\dd{x} \\end{equation}\n(notably, \\(\\log\\) is actually \\(\\ln\\) in this case.)\nWhy so weird of an expression? This all springs from the fact that \\(\\dv x e^{x} = e^{x}\\). See below on how this fact is stretched (to great lengths) to solve diffeqs.\nFrom the above expression containing \\(\\rho (x)\\), we naturally have that (based on the definition of the natural log, just expanding it out):\n\\begin{equation} e^{\\int P(x)\\dd{x}} = \\rho (x) \\end{equation}\nWhy is this useful? Remember, we are trying to undo the product rule. Let\u0026rsquo;s replace our new definition for \\(\\rho (x)\\) into the above expression we are trying to solve and see what happens!\n\\begin{align} \u0026amp;\\rho (x)\\qty (\\dv{y}{x} + P(x)y) = \\rho (x)Q(x) \\\\ \\Rightarrow\\ \u0026amp; e^{\\int P\\dd{x}} \\qty (\\dv{y}{x} + P(x)y) = e^{\\int P\\dd{x}} Q(x) \\end{align}\nFor a second now, let\u0026rsquo;s just take an aside and deal with the left side. We are starting to almost clearly see the product rule at play here. Let\u0026rsquo;s finish the job by finishing up the rest of the product rule. Remember, we want to go opposite the product rule at the next steps.\n\\begin{align} e^{\\int P\\dd{x}} \\qty (\\dv{y}{x} + P(x)y) \u0026amp;= \\dv{y}{x}e^{\\int p\\dd{x}} + yPe^{\\int P\\dd{x}} \\\\ \u0026amp;= \\dv x \\qty (ye^{\\int P\\dd{x}}) \\end{align}\nWoah! Now we have something clearly in the favor of \\(y\\) separated out. Let\u0026rsquo;s put this back to our original expression.\n\\begin{align} \u0026amp;e^{\\int P\\dd{x}} \\qty (\\dv{y}{x} + P(x)y) = e^{\\int P\\dd{x}} Q(x) \\\\ \\Rightarrow\\ \u0026amp; \\dv x \\qty (ye^{\\int P\\dd{x}}) = e^{\\int P\\dd{x}} Q(x) \\end{align}\nNice. Now, do you see the clear step to isolate \\(y\\) by itself? I do.\n\\begin{align} \u0026amp;\\dv x \\qty (ye^{\\int P\\dd{x}}) = e^{\\int P\\dd{x}} Q(x) \\\\ \\Rightarrow\\ \u0026amp; \\int \\dv x \\qty (ye^{\\int P\\dd{x}}) \\dd{x}= \\int e^{\\int P\\dd{x}} Q(x) \\dd{x}\\\\ \\Rightarrow\\ \u0026amp; ye^{\\int P\\dd{x}} = \\int e^{\\int P\\dd{x}} Q(x) \\dd{x} \\end{align}\nAnd finally, naturally and lastly, we divide the \\(e^{\\int P\\dd{x}}\\) to both sides.\n\\begin{align} \u0026amp; ye^{\\int P\\dd{x}} = \\int e^{\\int P\\dd{x}} Q(x) \\dd{x}\\\\ \\Rightarrow\\ \u0026amp; y = e^{-\\int P\\dd{x}}\\int e^{\\int P\\dd{x}} Q(x) \\dd{x}\\ \\blacksquare \\end{align}\nAnd there you have it. That\u0026rsquo;s the general solution to our diffeq.\n","html":"\u003ch2 id=\"general-form-of-first-order-differential-equations--org9e796b5\"\u003egeneral form of \u003ca href=\"#solving-differential-equations\"\u003eFirst-Order Differential Equations\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eThis will depend on both unknown function \\(x\\), and the independent variable \\(t\\). These could and could not be separable.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{x}{t} = F(t,x),\\ x(t_{0}) = x_{0}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s imagine \\(F\\) is \u0026ldquo;bounded\u0026rdquo; and \u0026ldquo;continuous\u0026rdquo; on \\(I \\times \\omega\\), where \\(I\\) is an open interval about \\(t_{0}\\) and \\(\\omega\\) is an open subset of \\(\\mathbb{R}^{n}\\), containing \\(x_{0}\\). \\(F\\) is bounded; the results are bounded??\u003c/p\u003e\n\u003ch3 id=\"functions-embedded-in-vector-space--kbhvector-space-dot-md--s\"\u003efunctions embedded in \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es\u003c/h3\u003e\n\u003cp\u003eWe understand that such \u003ca href=\"#solving-differential-equations\"\u003eFirst-Order Differential Equations\u003c/a\u003e will describe a subset of an infinite dimensional \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eGiven we are dealing with \u003ca href=\"#solving-differential-equations\"\u003eFirst-Order Differential Equations\u003c/a\u003e, each function is a basis (if linear, otherwise, not quite the basis) of the subspace of the larger vector space; \\(+C\\) is how you create parametried variations\u003c/li\u003e\n\u003cli\u003eHowever, our function is not linear, not all functions would suffice here: non-linear equations are difficult to deal with beacuse the arc length follows a certain pattern\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"general-form-of-a-first-order-linear-differential-equation\"\u003eGeneral form of a first order \u003cstrong\u003e\u003cstrong\u003elinear\u003c/strong\u003e\u003c/strong\u003e differential equation\u003c/h2\u003e\n\u003cp\u003eA general linear, first-order, first-degree differential equation of the form:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{y}{x} + P(x)y = Q(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ehas a solution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(x) = e^{-\\int P\\dd{x}}\\int e^{\\int P\\dd{x}} Q(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe more general solution (for definite integrals):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = e^{-A(t)}x_{0} + e^{-A(t)}\\int_{t_{0}}^{t}e^{A(s)}b(s)\\dd{s}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egiven the initial condition that \\(x(0) = 0\\). This is from the textbook.\u003c/p\u003e\n\u003cp\u003eBefore you go ham and start solving, though, make sure that pesky \\(y\\) term is actually there. If its not, you maybe better served using the \u003ca href=\"/posts/kbhlinear_constant_coefficient_equation/#solving-separable-differential-equations\"\u003eseperable\u003c/a\u003e methods to solve these things.\u003c/p\u003e\n\u003ch2 id=\"this-is-bad\"\u003ethis is bad\u003c/h2\u003e\n\u003cp\u003eThis is difficult to deal with this! What?? How?? Why does this work?? See below.\u003c/p\u003e\n\u003ch2 id=\"solving-differential-equations\"\u003esolving differential equations\u003c/h2\u003e\n\u003cp\u003eThe following technique works for ALL first-order linear differential equations:\u003c/p\u003e\n\u003cp\u003eTo solve, first put your equation into the standard form:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{dy}{dx} + P(x)y = Q(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf you have an equation like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na(x) \\dv{y}{x} + b(x)y = c(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ea good way to do this is to apply \\(\\frac{1}{a(x)}\\) to both sides, resulting in:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{y}{x} + \\frac{b(x)}{a(x)} y = \\frac{c(x)}{a(x)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd then you can carry on solving like its an equation in standard form.\u003c/p\u003e\n\u003cp\u003eTo solve such a generic equation, we here are trying to UNDO the product rule.\u003c/p\u003e\n\u003cp\u003eWe first multiply the entire expression by something called the \u003ca href=\"#solving-differential-equations\"\u003eintergrating factor\u003c/a\u003e \\(\\rho(x)\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\rho(x) \\left(\\frac{dy}{dx} + P(x)y\\right) = \\rho(x)Q(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eA note on how this \\(\\rho(x)\\) works. This \u003ca href=\"#solving-differential-equations\"\u003eintergrating factor\u003c/a\u003e is actually defined with the following rule:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\log (\\rho (x)) = \\int P(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(notably, \\(\\log\\) is actually \\(\\ln\\) in this case.)\u003c/p\u003e\n\u003cp\u003eWhy so weird of an expression? This all springs from the fact that \\(\\dv x e^{x} = e^{x}\\). See below on how this fact is stretched (to great lengths) to solve diffeqs.\u003c/p\u003e\n\u003cp\u003eFrom the above expression containing \\(\\rho (x)\\), we naturally have that (based on the definition of the natural log, just expanding it out):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{\\int P(x)\\dd{x}} = \\rho (x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhy is this useful? Remember, we are trying to \u003cem\u003eundo\u003c/em\u003e the product rule. Let\u0026rsquo;s replace our new definition for \\(\\rho (x)\\) into the above expression we are trying to solve and see what happens!\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\rho (x)\\qty (\\dv{y}{x} + P(x)y) = \\rho (x)Q(x) \\\\\n\\Rightarrow\\ \u0026amp; e^{\\int P\\dd{x}} \\qty (\\dv{y}{x} + P(x)y) = e^{\\int P\\dd{x}} Q(x)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eFor a second now, let\u0026rsquo;s just take an aside and deal with the left side. We are starting to \u003cem\u003ealmost\u003c/em\u003e clearly see the product rule at play here. Let\u0026rsquo;s finish the job by finishing up the rest of the product rule. Remember, we want to \u003cem\u003ego opposite\u003c/em\u003e the product rule at the next steps.\u003c/p\u003e\n\u003cp\u003e\\begin{align}\ne^{\\int P\\dd{x}} \\qty (\\dv{y}{x} + P(x)y) \u0026amp;= \\dv{y}{x}e^{\\int p\\dd{x}} + yPe^{\\int P\\dd{x}} \\\\\n\u0026amp;= \\dv x \\qty (ye^{\\int P\\dd{x}})\n\\end{align}\u003c/p\u003e\n\u003cp\u003eWoah! Now we have something clearly in the favor of \\(y\\) separated out. Let\u0026rsquo;s put this back to our original expression.\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;e^{\\int P\\dd{x}} \\qty (\\dv{y}{x} + P(x)y) = e^{\\int P\\dd{x}} Q(x) \\\\\n\\Rightarrow\\ \u0026amp; \\dv x \\qty (ye^{\\int P\\dd{x}}) = e^{\\int P\\dd{x}} Q(x)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNice. Now, do you see the clear step to isolate \\(y\\) by itself? I do.\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\dv x \\qty (ye^{\\int P\\dd{x}}) = e^{\\int P\\dd{x}} Q(x) \\\\\n\\Rightarrow\\ \u0026amp; \\int \\dv x \\qty (ye^{\\int P\\dd{x}}) \\dd{x}= \\int e^{\\int P\\dd{x}} Q(x) \\dd{x}\\\\\n\\Rightarrow\\ \u0026amp; ye^{\\int P\\dd{x}} = \\int e^{\\int P\\dd{x}} Q(x) \\dd{x}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAnd finally, naturally and lastly, we divide the \\(e^{\\int P\\dd{x}}\\) to both sides.\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; ye^{\\int P\\dd{x}} = \\int e^{\\int P\\dd{x}} Q(x) \\dd{x}\\\\\n\\Rightarrow\\ \u0026amp; y = e^{-\\int P\\dd{x}}\\int e^{\\int P\\dd{x}} Q(x) \\dd{x}\\ \\blacksquare\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAnd there you have it. That\u0026rsquo;s the general solution to our diffeq.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_non_seperable_equation/","tags":null,"title":"Linear Non-Seperable Equation"},{"categories":null,"contents":"An exact solution for a dynamic system with quadratic costs and linear differential equation describing the dynamics.\n","html":"\u003cp\u003eAn exact solution for a dynamic system with quadratic costs and \u003ca href=\"/posts/kbhordinary_differential_equations/#linear-vs-dot-non-linear-differential-equations\"\u003elinear differential equation\u003c/a\u003e describing the dynamics.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinear_quadratic_regulator/","tags":null,"title":"Linear-Quadratic Regulator"},{"categories":null,"contents":"For some non-linear function, we can use its first Jacobian to create a linear system. Then, we can use that system to write the first order Taylor:\n\\begin{equation} y\u0026rsquo; = \\nabla F(crit)y \\end{equation}\nwhere \\(crit\\) are critical points.\nPhase Portrait stability if all \\(Re[\\lambda] \u0026lt; 0\\) of \\(\\qty(\\nabla F)(p)\\) then \\(p\\) is considered stable\u0026mdash;that is, points initially near \\(p\\) will exponentially approach \\(p\\)\nif at least one \\(Re[\\lambda] \u0026gt; 0\\) of \\(\\qty(\\nabla F)(p)\\) then \\(p\\) is considered unstable\u0026mdash;that is, points initially near \\(p\\) will go somewhere else\nif all \\(Re[\\lambda] \\leq 0\\) and at least one \\(\\lambda\\) is pure imaginary of \\(\\qty(\\nabla F)(p)\\), then there are no conclusions and \\(p\\) is considered marginal\nIf there are no purely imaginary values, then the solution paths of the ODE look like that of \\(y\u0026rsquo; = (\\nambla F)(p) y\\).\nWorked Example Let\u0026rsquo;s Lotha-Volterra Prey-Predictor Equation again as an example\n\\begin{equation} \\begin{cases} x_1\u0026rsquo; = 2x_1-x_1x_2 \\\\ x_2\u0026rsquo; = x_1x_2 - 3x_2 \\end{cases} \\end{equation}\nwe can stare at this (and factor \\(x\\) out) to understand that there are only two stationary points:\n\\begin{equation} (x_1,x_2) = (0,0), (3,2) \\end{equation}\nLet\u0026rsquo;s analyze this function for linearilzation.\nLet\u0026rsquo;s write this expression in terms of the linear and non linear parts\n\\begin{equation} \\begin{cases} x\u0026rsquo; = \\mqty(2 \u0026amp; 0 \\\\ 0 \u0026amp; -3) \\mqty(x_1 \\\\ x_2) + \\mqty(-x_1x_2 \\\\ x_1 x_2) \\end{cases} \\end{equation}\nNear \\((0,0)\\) You will note that the right non-linear parts becomes very small near \\((0,0)\\), meaning we can analyze this in terms of a normal phase portrait.\nNear \\((3,2)\\) We can translate this down:\nLet:\n\\begin{equation} y = x - \\mqty(3 \\\\2) \\end{equation}\nmeaning:\n\\begin{equation} y\u0026rsquo; = x\u0026rsquo; = F\\qty(y+\\mqty(3 \\\\ 2)) \\end{equation}\nwe can use a Taylor expansion to get:\n\\begin{equation} y\u0026rsquo; = x\u0026rsquo; = F\\qty(y + \\mqty(3\\\\2)) + \\qty(\\nabla F)y + \\dots \\end{equation}\nRecall that \\(F\\) is given as:\n\\begin{equation} \\mqty(2x_1 - x_1x_2 \\\\ x_1x_2-3x_2) \\end{equation}\nmeaning:\n\\begin{equation} \\nabla \\mqty(2x_1 - x_1x_2 \\\\ x_1x_2-3x_2) = \\mqty(2-x_2 \u0026amp; -x_1 \\\\ x_2 \u0026amp; x_1-3) \\end{equation}\nplugging in \\((3, 2)\\) obtains:\n\\begin{equation} y\u0026rsquo; = \\mqty(0 \u0026amp; -3 \\\\ 2 \u0026amp; 0) y \\end{equation}\nwhich we can analyze in the usual manners.\n","html":"\u003cp\u003eFor some non-linear function, we can use its first Jacobian to create a linear system. Then, we can use that system to write the first order Taylor:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = \\nabla F(crit)y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(crit\\) are critical points.\u003c/p\u003e\n\u003ch2 id=\"phase-portrait--kbhsu-math53-feb072024-dot-md--stability\"\u003e\u003ca href=\"/posts/kbhsu_math53_feb072024/#phase-portrait\"\u003ePhase Portrait\u003c/a\u003e stability\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eif \u003cstrong\u003eall\u003c/strong\u003e \\(Re[\\lambda] \u0026lt; 0\\) of \\(\\qty(\\nabla F)(p)\\) then \\(p\\) is considered \u003cstrong\u003estable\u003c/strong\u003e\u0026mdash;that is, points initially near \\(p\\) will exponentially approach \\(p\\)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eif \u003cstrong\u003eat least one\u003c/strong\u003e \\(Re[\\lambda] \u0026gt; 0\\) of \\(\\qty(\\nabla F)(p)\\) then \\(p\\) is considered \u003cstrong\u003eunstable\u003c/strong\u003e\u0026mdash;that is, points initially near \\(p\\) will go somewhere else\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eif \u003cstrong\u003eall\u003c/strong\u003e \\(Re[\\lambda] \\leq 0\\) and \u003cstrong\u003eat least one\u003c/strong\u003e \\(\\lambda\\) is pure imaginary of \\(\\qty(\\nabla F)(p)\\), then there are no conclusions and \\(p\\) is considered \u003cstrong\u003emarginal\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eIf there are \u003cstrong\u003eno\u003c/strong\u003e purely imaginary values, then the solution paths of the ODE look like that of \\(y\u0026rsquo; = (\\nambla F)(p) y\\).\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"worked-example\"\u003eWorked Example\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s \u003ca href=\"/posts/kbhsu_math53_feb072024/#lotha-volterra-prey-predictor-equation\"\u003eLotha-Volterra Prey-Predictor Equation\u003c/a\u003e again as an example\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx_1\u0026rsquo; = 2x_1-x_1x_2 \\\\\nx_2\u0026rsquo; = x_1x_2 - 3x_2\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can stare at this (and factor \\(x\\) out) to understand that there are only two stationary points:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(x_1,x_2) = (0,0), (3,2)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s analyze this function for \u003ca href=\"/posts/kbhode_linearilzation/\"\u003elinearilzation\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s write this expression in terms of the linear and non linear parts\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx\u0026rsquo; = \\mqty(2 \u0026amp; 0 \\\\ 0 \u0026amp; -3) \\mqty(x_1 \\\\ x_2) + \\mqty(-x_1x_2 \\\\ x_1 x_2)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"near--0-0\"\u003eNear \\((0,0)\\)\u003c/h3\u003e\n\u003cp\u003eYou will note that the right non-linear parts becomes very small near \\((0,0)\\), meaning we can analyze this in terms of a normal phase portrait.\u003c/p\u003e\n\u003ch3 id=\"near--3-2\"\u003eNear \\((3,2)\\)\u003c/h3\u003e\n\u003cp\u003eWe can translate this down:\u003c/p\u003e\n\u003cp\u003eLet:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = x - \\mqty(3 \\\\2)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = x\u0026rsquo; = F\\qty(y+\\mqty(3 \\\\ 2))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can use a Taylor expansion to get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = x\u0026rsquo; = F\\qty(y + \\mqty(3\\\\2)) + \\qty(\\nabla F)y + \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that \\(F\\) is given as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(2x_1 - x_1x_2 \\\\ x_1x_2-3x_2)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla \\mqty(2x_1 - x_1x_2 \\\\ x_1x_2-3x_2) = \\mqty(2-x_2 \u0026amp; -x_1 \\\\ x_2 \u0026amp; x_1-3)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eplugging in \\((3, 2)\\) obtains:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = \\mqty(0 \u0026amp; -3 \\\\ 2 \u0026amp; 0) y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich we can analyze in the usual manners.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhode_linearilzation/","tags":null,"title":"linearilzation"},{"categories":null,"contents":"CAPM, a Review Note that we will be using the Sharpe-Linter version of CAPM:\n\\begin{equation} E[R_{i}-R_{f}] = \\beta_{im} E[(R_{m}-R_{f})] \\end{equation}\n\\begin{equation} \\beta_{im} := \\frac{Cov[(R_{i}-R_{f}),(R_{m}-R_{f})]}{Var[R_{m}-R_{f}]} \\end{equation}\nRecall that we declare \\(R_{f}\\) (the risk-free rate) to be non-stochastic.\nLet us begin. We will create a generic function to analyze some given stock.\nData Import We will first import our utilities\nimport pandas as pd import numpy as np Let\u0026rsquo;s load the data from our market (NYSE) as well as our 10 year t-bill data.\nt_bill = pd.read_csv(\u0026#34;./linearity_test_data/10yrTBill.csv\u0026#34;) nyse = pd.read_csv(\u0026#34;./linearity_test_data/NYSEComposite.csv\u0026#34;) nyse.head() Date Close 0 11/7/2013 16:00:00 9924.37 1 11/8/2013 16:00:00 10032.14 2 11/11/2013 16:00:00 10042.95 3 11/12/2013 16:00:00 10009.84 4 11/13/2013 16:00:00 10079.89 Excellent. Let\u0026rsquo;s load in the data for that stock.\ndef load_stock(stock): return pd.read_csv(f\u0026#34;./linearity_test_data/{stock}.csv\u0026#34;) load_stock(\u0026#34;LMT\u0026#34;).head() Date Close 0 11/7/2013 16:00:00 136.20 1 11/8/2013 16:00:00 138.11 2 11/11/2013 16:00:00 137.15 3 11/12/2013 16:00:00 137.23 4 11/13/2013 16:00:00 137.26 Raw Data And now, let\u0026rsquo;s load all three stocks, then concatenate them all into a big-ol DataFrame.\n# load data df = { \u0026#34;Date\u0026#34;: nyse.Date, \u0026#34;NYSE\u0026#34;: nyse.Close, \u0026#34;TBill\u0026#34;: t_bill.Close, \u0026#34;LMT\u0026#34;: load_stock(\u0026#34;LMT\u0026#34;).Close, \u0026#34;TWTR\u0026#34;: load_stock(\u0026#34;TWTR\u0026#34;).Close, \u0026#34;MCD\u0026#34;: load_stock(\u0026#34;MCD\u0026#34;).Close } # convert to dataframe df = pd.DataFrame(df) # drop empty df.dropna(inplace=True) df Date NYSE TBill LMT TWTR MCD 0 11/7/2013 16:00:00 9924.37 26.13 136.20 44.90 97.20 1 11/8/2013 16:00:00 10032.14 27.46 138.11 41.65 97.01 2 11/11/2013 16:00:00 10042.95 27.51 137.15 42.90 97.09 3 11/12/2013 16:00:00 10009.84 27.68 137.23 41.90 97.66 4 11/13/2013 16:00:00 10079.89 27.25 137.26 42.60 98.11 ... ... ... ... ... ... ... 2154 10/24/2022 16:00:00 14226.11 30.44 440.11 39.60 252.21 2155 10/25/2022 16:00:00 14440.69 31.56 439.30 39.30 249.28 2156 10/26/2022 16:00:00 14531.69 33.66 441.11 39.91 250.38 2157 10/27/2022 16:00:00 14569.90 34.83 442.69 40.16 248.36 2158 10/28/2022 16:00:00 14795.63 33.95 443.41 39.56 248.07 [2159 rows x 6 columns] Log Returns Excellent. Now, let\u0026rsquo;s convert all of these values into daily log-returns (we don\u0026rsquo;t really care about the actual pricing.)\nlog_returns = df[[\u0026#34;NYSE\u0026#34;, \u0026#34;TBill\u0026#34;, \u0026#34;LMT\u0026#34;, \u0026#34;TWTR\u0026#34;, \u0026#34;MCD\u0026#34;]].apply(np.log, inplace=True) df.loc[:, [\u0026#34;NYSE\u0026#34;, \u0026#34;TBill\u0026#34;, \u0026#34;LMT\u0026#34;, \u0026#34;TWTR\u0026#34;, \u0026#34;MCD\u0026#34;]] = log_returns df Date NYSE TBill LMT TWTR MCD 0 11/7/2013 16:00:00 9.202749 3.263084 4.914124 3.804438 4.576771 1 11/8/2013 16:00:00 9.213549 3.312730 4.928050 3.729301 4.574814 2 11/11/2013 16:00:00 9.214626 3.314550 4.921075 3.758872 4.575638 3 11/12/2013 16:00:00 9.211324 3.320710 4.921658 3.735286 4.581492 4 11/13/2013 16:00:00 9.218298 3.305054 4.921877 3.751854 4.586089 ... ... ... ... ... ... ... 2154 10/24/2022 16:00:00 9.562834 3.415758 6.087025 3.678829 5.530262 2155 10/25/2022 16:00:00 9.577805 3.451890 6.085183 3.671225 5.518577 2156 10/26/2022 16:00:00 9.584087 3.516310 6.089294 3.686627 5.522980 2157 10/27/2022 16:00:00 9.586713 3.550479 6.092870 3.692871 5.514879 2158 10/28/2022 16:00:00 9.602087 3.524889 6.094495 3.677819 5.513711 [2159 rows x 6 columns] And now, the log returns! We will shift this data by one column and subtract.\nreturns = df.drop(columns=[\u0026#34;Date\u0026#34;]) - df.drop(columns=[\u0026#34;Date\u0026#34;]).shift(1) returns.dropna(inplace=True) returns NYSE TBill LMT TWTR MCD 1 0.010801 0.049646 0.013926 -0.075136 -0.001957 2 0.001077 0.001819 -0.006975 0.029570 0.000824 3 -0.003302 0.006161 0.000583 -0.023586 0.005854 4 0.006974 -0.015657 0.000219 0.016568 0.004597 5 0.005010 -0.008476 0.007476 0.047896 -0.005622 ... ... ... ... ... ... 2154 0.005785 0.004940 -0.023467 -0.014291 0.001349 2155 0.014971 0.036133 -0.001842 -0.007605 -0.011685 2156 0.006282 0.064420 0.004112 0.015402 0.004403 2157 0.002626 0.034169 0.003575 0.006245 -0.008100 2158 0.015374 -0.025590 0.001625 -0.015053 -0.001168 [2158 rows x 5 columns] Risk-Free Excess Recall that we want to be working with the excess-to-risk-free rates \\(R_{T}-R_{f}\\), where \\(R_{T}\\) is some security. So, we will go through and subtract everything by the risk-free rate (and drop the RFR itself):\nrisk_free_excess = returns.drop(columns=\u0026#34;TBill\u0026#34;).apply(lambda x: x-returns.TBill) risk_free_excess NYSE LMT TWTR MCD 1 -0.038846 -0.035720 -0.124783 -0.051603 2 -0.000742 -0.008794 0.027751 -0.000995 3 -0.009463 -0.005577 -0.029747 -0.000307 4 0.022630 0.015875 0.032225 0.020254 5 0.013486 0.015952 0.056372 0.002854 ... ... ... ... ... 2154 0.000845 -0.028406 -0.019231 -0.003591 2155 -0.021162 -0.037975 -0.043738 -0.047818 2156 -0.058138 -0.060308 -0.049017 -0.060017 2157 -0.031543 -0.030593 -0.027924 -0.042269 2158 0.040964 0.027215 0.010537 0.024422 [2158 rows x 4 columns] Actual Regression It is now time to perform the actual linear regression! We will use statsmodels\u0026rsquo; Ordinary Least Squares API to make our work easier, but we will go through a full regression in the end.\nimport statsmodels.api as sm CAPM Regression: Lockheed Martin Let\u0026rsquo;s work with Lockheed Martin first for regression, fitting an ordinary least squares. Remember that the OLS functions reads the endogenous variable first (for us, the return of the asset.)\n# add a column of ones to our input market excess returns nyse_with_bias = sm.add_constant(risk_free_excess.NYSE) # perform linreg lmt_model = sm.OLS(risk_free_excess.LMT, nyse_with_bias).fit() lmt_model.summary() OLS Regression Results ============================================================================== Dep. Variable: LMT R-squared: 0.859 Model: OLS Adj. R-squared: 0.859 Method: Least Squares F-statistic: 1.312e+04 No. Observations: 2158 AIC: -1.263e+04 Df Residuals: 2156 BIC: -1.262e+04 Df Model: 1 Prob (F-statistic): 0.00 Covariance Type: nonrobust Log-Likelihood: 6318.9 ============================================================================== coef std err t P\u0026gt;|t| [0.025 0.975] ------------------------------------------------------------------------------ const 0.0004 0.000 1.311 0.190 -0.000 0.001 NYSE 0.9449 0.008 114.552 0.000 0.929 0.961 ============================================================================== Based on the constants row, we can see that\u0026mdash;within \\(95\\%\\) confidence\u0026mdash;the intercept is generally \\(0\\) and CAPM applies. However, we do see a slight positive compared to the market. Furthermore, we can see that the regression has a beta value of \\(0.9449\\) \u0026mdash; according the CAPM model, it being slightly undervarying that the market.\nCAPM Regression: MacDonald\u0026rsquo;s # perform linreg mcd_model = sm.OLS(risk_free_excess.MCD, nyse_with_bias).fit() mcd_model.summary() OLS Regression Results ============================================================================== Dep. Variable: MCD R-squared: 0.887 Model: OLS Adj. R-squared: 0.887 Method: Least Squares F-statistic: 1.697e+04 No. Observations: 2158 AIC: -1.310e+04 Df Residuals: 2156 BIC: -1.309e+04 Df Model: 1 Prob (F-statistic): 0.00 Covariance Type: nonrobust Log-Likelihood: 6551.1 ============================================================================== coef std err t P\u0026gt;|t| [0.025 0.975] ------------------------------------------------------------------------------ const 0.0003 0.000 1.004 0.315 -0.000 0.001 NYSE 0.9651 0.007 130.287 0.000 0.951 0.980 ============================================================================== Same thing as before, we are within \\(95\\%\\) confidence having a intercept of \\(0\\) (with a slight positive edge), and it looks like MacDonald\u0026rsquo;s vary a little bit more than Lockheed Martin. The food industry is probably a tougher business than that in defense.\nCAPM Regression: Twitter Lastly, to analyze the recently delisted Twitter!\n# perform linreg twtr_model = sm.OLS(risk_free_excess.TWTR, nyse_with_bias).fit() twtr_model.summary() OLS Regression Results ============================================================================== Dep. Variable: TWTR R-squared: 0.522 Model: OLS Adj. R-squared: 0.522 Method: Least Squares F-statistic: 2357. No. Observations: 2158 AIC: -8610. Df Residuals: 2156 BIC: -8599. Df Model: 1 Prob (F-statistic): 0.00 Covariance Type: nonrobust Log-Likelihood: 4307.1 ============================================================================== coef std err t P\u0026gt;|t| [0.025 0.975] ------------------------------------------------------------------------------ const -0.0002 0.001 -0.346 0.730 -0.002 0.001 NYSE 1.0173 0.021 48.549 0.000 0.976 1.058 ============================================================================== Evidently, Twitter is much more variable. It looks like it has a nontrivial bias (the intercept being -0.001 being within the \\(95\\%\\) confidence band \u0026mdash; that the security is possibly significantly underperforming the CAPM expectation in the market.) Furthermore, we have a positive beta value: that the asset is more variable than the market.\nManual Checking We can also use the betas formula to manually calculate what we expect for the beta values (i.e. as if they were one IID random variable.)\nrisk_free_cov = risk_free_excess.cov() risk_free_cov NYSE LMT TWTR MCD NYSE 0.001143 0.001080 0.001163 0.001103 LMT 0.001080 0.001188 0.001116 0.001083 TWTR 0.001163 0.001116 0.002264 0.001155 MCD 0.001103 0.001083 0.001155 0.001200 Finally, to construct the beta values. Recall that:\n\\begin{equation} \\beta_{im} := \\frac{Cov[(R_{i}-R_{f}),(R_{m}-R_{f})]}{Var[R_{m}-R_{f}]} \\end{equation}\nand that:\n\\begin{equation} Var[X] = Cov[X,X], \\forall X \\end{equation}\n# get the market variance (covariance with itself) market_variation = risk_free_cov.NYSE.NYSE # calculate betas betas = {\u0026#34;LMT\u0026#34;: (risk_free_cov.LMT.NYSE/market_variation), \u0026#34;TWTR\u0026#34;: (risk_free_cov.TWTR.NYSE/market_variation), \u0026#34;MCD\u0026#34;: (risk_free_cov.MCD.NYSE/market_variation)} # and make dataframe betas = pd.Series(betas) betas LMT 0.944899 TWTR 1.017294 MCD 0.965081 dtype: float64 Apparently, all of our assets swing less than the overall NYSE market! Especially Lockheed\u0026mdash;it is only \\(94.4\\%\\) of the market variation. Furthermore, it is interesting to see that Twitter swings much more dramatically compared to the market.\nEqual-Part Fund We will now create two funds with the three securities, one with equal parts and one which attempts to maximizes the bias (max returns) while minimizing the beta variance value compared to the market.\nFirst, let\u0026rsquo;s create a baseline fund in equal parts. Here it is:\nfund_1_returns = returns.LMT + returns.TWTR + returns.MCD fund_1_returns 1 -0.063167 2 0.023420 3 -0.017149 4 0.021384 5 0.049750 ... 2154 -0.036409 2155 -0.021132 2156 0.023917 2157 0.001720 2158 -0.014596 Length: 2158, dtype: float64 We will calculate the excess returns of this fund:\nfund_1_excess = fund_1_returns-returns.TBill fund_1_excess 1 -0.112813 2 0.021600 3 -0.023310 4 0.037041 5 0.058226 ... 2154 -0.041349 2155 -0.057265 2156 -0.040503 2157 -0.032449 2158 0.010994 Length: 2158, dtype: float64 Performance of the Equal-Part Fund # perform linreg fund_1_model = sm.OLS(fund_1_excess, nyse_with_bias).fit() fund_1_model.summary() OLS Regression Results ============================================================================== Dep. Variable: y R-squared: 0.473 Model: OLS Adj. R-squared: 0.473 Method: Least Squares F-statistic: 1935. No. Observations: 2158 AIC: -7735. Df Residuals: 2156 BIC: -7724. Df Model: 1 Prob (F-statistic): 3.01e-302 Covariance Type: nonrobust Log-Likelihood: 3869.5 ============================================================================== coef std err t P\u0026gt;|t| [0.025 0.975] ------------------------------------------------------------------------------ const 0.0007 0.001 0.841 0.401 -0.001 0.002 NYSE 1.1290 0.026 43.993 0.000 1.079 1.179 ============================================================================== Surprisingly, we have now created a significantly riskier investment that, though riskier, generates a much higher probability of reward (\\(+0.001\\) is now within the \\(99\\%\\) band!)\nA More Optimized Fund To me, this is the payoff of this assignment. We will now use CAPM to create the \u0026ldquo;best\u0026rdquo; fund combination\u0026mdash;given some variance, the funds which match CAPM. To do this, let\u0026rsquo;s create a generic linear combination of the assets.\nimport sympy as sym x = sym.Symbol(\u0026#39;x\u0026#39;) y = sym.Symbol(\u0026#39;y\u0026#39;) z = sym.Symbol(\u0026#39;z\u0026#39;) fund_2_returns = x*returns.LMT + y*returns.TWTR + z*returns.MCD fund_2_returns 1 0.0139260753744255*x - 0.0751364261353569*y - ... 2 -0.00697525170622448*x + 0.0295704573211193*y ... 3 0.000583132897928884*x - 0.0235859990058791*y ... 4 0.000218587198947517*x + 0.016568426347233*y +... 5 0.00747599199607762*x + 0.0478955096700351*y -... ... 2154 -0.0234665578621085*x - 0.0142913301107561*y +... 2155 -0.00184214468578059*x - 0.0076045993852194*y ... 2156 0.00411172646842317*x + 0.0154024001854269*y +... 2157 0.00357547337231878*x + 0.0062445563228315*y -... 2158 0.00162509910496933*x - 0.0150529686289622*y -... Length: 2158, dtype: object Excellent. We will also calculate the excess returns of this fund:\nfund_2_excess = fund_2_returns-returns.TBill Y = fund_2_excess.to_numpy() Y [0.0139260753744255*x - 0.0751364261353569*y - 0.00195664549320096*z - 0.0496463208073039 -0.00697525170622448*x + 0.0295704573211193*y + 0.000824317408861575*z - 0.00181917459665826 0.000583132897928884*x - 0.0235859990058791*y + 0.00585367525146019*z - 0.00616055581298536 ... 0.00411172646842317*x + 0.0154024001854269*y + 0.00440300114913317*z - 0.0644196927849867 0.00357547337231878*x + 0.0062445563228315*y - 0.0081004573348249*z - 0.0341688956152497 0.00162509910496933*x - 0.0150529686289622*y - 0.00116834209450634*z + 0.0255902303732043] We cast this type to a numpy array because we are about to perform some matrix operations upon it.\nOptimizing the Optimized Fund: Linreg Now, let us perform the actual linear regression ourselves. Recall that the pseudoinverse linear regression estimator is:\n\\begin{equation} \\beta = (X^{T}X)^{-1}X^{T}Y \\end{equation}\nWe have already our \\(Y\\) as a vector (above), and our \\(X\\) is:\nX = nyse_with_bias.to_numpy() X [[ 1.00000000e+00 -3.88457302e-02] [ 1.00000000e+00 -7.42217926e-04] [ 1.00000000e+00 -9.46284244e-03] ... [ 1.00000000e+00 -5.81378271e-02] [ 1.00000000e+00 -3.15429207e-02] [ 1.00000000e+00 4.09643405e-02]] We now have our matrices, let\u0026rsquo;s perform the linear regression!\nlinear_model = np.linalg.inv((X.transpose()@X))@X.transpose()@Y linear_model [0.000544056413840724*x - 6.62061061591867e-5*y + 0.000429966553373172*z - 0.000178620725465344 0.0457830563134785*x + 0.118178191274045*y + 0.0659651260604729*z + 0.899115719100281] Excellent. So we now have two rows; the top row represents the \u0026ldquo;bias\u0026rdquo;\u0026mdash;how much deviation there is from CAPM, and the bottom row represents the \u0026ldquo;rate\u0026rdquo;\u0026mdash;the \u0026ldquo;beta\u0026rdquo; value which represents how much excess variance there is.\nOptimizing the Optimized Fund: Picking Optimizing Parameters We can will solve for a combination of solutions to give us specific values of returns vs risk. For instance, we can fix the variance to 1 (i.e. we can vary as much as the market.) We subtract one here for the solver, which expects the expressions equaling to \\(0\\).\nrisk_expr = linear_model[1] - 1 risk_expr 0.0457830563134785*x + 0.118178191274045*y + 0.0659651260604729*z - 0.100884280899719 Now, we will set a certain earning value, and solve for possible solutions. We will try to get the largest possible bias without needing to short something (i.e. cause a negative solution). By hand-fisting a value, it seems 0.001 is a good bet.\ndeviance_expr = linear_model[0] - 0.001 deviance_expr 0.000544056413840724*x - 6.62061061591867e-5*y + 0.000429966553373172*z - 0.00117862072546534 Optimizing the Optimized Fund: Optimize! solution = sym.solvers.solve([deviance_expr, risk_expr], x,y,z) solution {x: 2.16803104555387 - 0.819584899551304*z, y: 0.0137520589394366 - 0.24067066980814*z} We have one degree of freedom here: how much MacDonald\u0026rsquo;s we want! Let\u0026rsquo;s say we want none (which would, according to this, be an equally efficient solution.)\nHow Does Our Fund Do? This would create the following plan:\n# for our case z_val = 0 # numerical solutions s_x = solution[x].subs(z,z_val) s_y = solution[y].subs(z,z_val) # solution fund_2_nobias_nomac = s_x*returns.LMT + s_y*returns.TWTR + z_val*returns.MCD fund_2_nobias_nomac.mean() 0.001185050286566688 Recall that this is the performance of the balanced portfolio:\nfund_1_returns.mean() 0.0009224705380695683 So, for market-level risk (\\(\\beta =1\\), instead of the balanced portfolio\u0026rsquo;s \\(\\beta =1.1290\\)), this is a pretty good deal!\nSome Plots Finally, let\u0026rsquo;s plot the prices of our various funds:\nimport matplotlib.pyplot as plt import matplotlib.dates as mdates import seaborn as sns from datetime import datetime sns.set() fund_2_price = s_x*df.LMT + s_y*df.TWTR + z_val*df.MCD fund_1_price = df.LMT + df.TWTR fund_l_price = df.LMT fund_t_price = df.TWTR dates = df.Date.apply(lambda x:datetime.strptime(x, \u0026#34;%m/%d/%Y %H:%M:%S\u0026#34;)) sns.lineplot(x=dates, y=fund_2_price.apply(sym.Float).astype(float)) sns.lineplot(x=dates, y=fund_1_price.apply(sym.Float).astype(float)) sns.lineplot(x=dates, y=fund_l_price.apply(sym.Float).astype(float)) sns.lineplot(x=dates, y=fund_t_price.apply(sym.Float).astype(float)) plt.gca().xaxis.set_major_locator(mdates.YearLocator()) plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(\u0026#39;%Y\u0026#39;)) plt.gca().set_ylabel(\u0026#34;Price\u0026#34;) plt.show() Recall that we didn\u0026rsquo;t actually buy any MacDonald\u0026rsquo;s. So, we have\u0026mdash;blue, being our \u0026ldquo;optimal\u0026rdquo; portfolio, yellow, our balanced portfolio, green, being Lockheed, and red, being Twitter.\nOur portfolio works surprisingly well!\n","html":"\u003ch2 id=\"capm-a-review\"\u003eCAPM, a Review\u003c/h2\u003e\n\u003cp\u003eNote that we will be using the Sharpe-Linter version of CAPM:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE[R_{i}-R_{f}] = \\beta_{im} E[(R_{m}-R_{f})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\beta_{im} := \\frac{Cov[(R_{i}-R_{f}),(R_{m}-R_{f})]}{Var[R_{m}-R_{f}]}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that we declare \\(R_{f}\\) (the risk-free rate) to be non-stochastic.\u003c/p\u003e\n\u003cp\u003eLet us begin. We will create a generic function to analyze some given stock.\u003c/p\u003e\n\u003ch2 id=\"data-import\"\u003eData Import\u003c/h2\u003e\n\u003cp\u003eWe will first import our utilities\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epandas\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enumpy\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s load the data from our market (NYSE) as well as our 10 year t-bill data.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003et_bill\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_csv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;./linearity_test_data/10yrTBill.csv\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enyse\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_csv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;./linearity_test_data/NYSEComposite.csv\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enyse\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ehead\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Date Close\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 11/7/2013 16:00:00 9924.37\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 11/8/2013 16:00:00 10032.14\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 11/11/2013 16:00:00 10042.95\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 11/12/2013 16:00:00 10009.84\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 11/13/2013 16:00:00 10079.89\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. Let\u0026rsquo;s load in the data for that stock.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_csv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;./linearity_test_data/\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estock\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e}\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e.csv\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ehead\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Date Close\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 11/7/2013 16:00:00 136.20\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 11/8/2013 16:00:00 138.11\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 11/11/2013 16:00:00 137.15\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 11/12/2013 16:00:00 137.23\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 11/13/2013 16:00:00 137.26\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"raw-data\"\u003eRaw Data\u003c/h2\u003e\n\u003cp\u003eAnd now, let\u0026rsquo;s load all three stocks, then concatenate them all into a big-ol DataFrame.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# load data\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;Date\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDate\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;NYSE\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TBill\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et_bill\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# convert to dataframe\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDataFrame\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# drop empty\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edropna\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einplace\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Date NYSE TBill LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 11/7/2013 16:00:00 9924.37 26.13 136.20 44.90 97.20\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 11/8/2013 16:00:00 10032.14 27.46 138.11 41.65 97.01\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 11/11/2013 16:00:00 10042.95 27.51 137.15 42.90 97.09\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 11/12/2013 16:00:00 10009.84 27.68 137.23 41.90 97.66\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 11/13/2013 16:00:00 10079.89 27.25 137.26 42.60 98.11\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 10/24/2022 16:00:00 14226.11 30.44 440.11 39.60 252.21\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 10/25/2022 16:00:00 14440.69 31.56 439.30 39.30 249.28\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 10/26/2022 16:00:00 14531.69 33.66 441.11 39.91 250.38\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 10/27/2022 16:00:00 14569.90 34.83 442.69 40.16 248.36\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 10/28/2022 16:00:00 14795.63 33.95 443.41 39.56 248.07\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2159 rows x 6 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"log-returns\"\u003eLog Returns\u003c/h2\u003e\n\u003cp\u003eExcellent. Now, let\u0026rsquo;s convert all of these values into daily log-returns (we don\u0026rsquo;t really care about the actual pricing.)\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elog_returns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;NYSE\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TBill\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einplace\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;NYSE\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TBill\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elog_returns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Date NYSE TBill LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 11/7/2013 16:00:00 9.202749 3.263084 4.914124 3.804438 4.576771\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 11/8/2013 16:00:00 9.213549 3.312730 4.928050 3.729301 4.574814\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 11/11/2013 16:00:00 9.214626 3.314550 4.921075 3.758872 4.575638\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 11/12/2013 16:00:00 9.211324 3.320710 4.921658 3.735286 4.581492\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 11/13/2013 16:00:00 9.218298 3.305054 4.921877 3.751854 4.586089\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 10/24/2022 16:00:00 9.562834 3.415758 6.087025 3.678829 5.530262\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 10/25/2022 16:00:00 9.577805 3.451890 6.085183 3.671225 5.518577\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 10/26/2022 16:00:00 9.584087 3.516310 6.089294 3.686627 5.522980\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 10/27/2022 16:00:00 9.586713 3.550479 6.092870 3.692871 5.514879\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 10/28/2022 16:00:00 9.602087 3.524889 6.094495 3.677819 5.513711\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2159 rows x 6 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd now, the log returns! We will shift this data by one column and subtract.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edrop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Date\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edrop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Date\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshift\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edropna\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einplace\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e NYSE TBill LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 0.010801 0.049646 0.013926 -0.075136 -0.001957\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.001077 0.001819 -0.006975 0.029570 0.000824\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 -0.003302 0.006161 0.000583 -0.023586 0.005854\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.006974 -0.015657 0.000219 0.016568 0.004597\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.005010 -0.008476 0.007476 0.047896 -0.005622\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 0.005785 0.004940 -0.023467 -0.014291 0.001349\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 0.014971 0.036133 -0.001842 -0.007605 -0.011685\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 0.006282 0.064420 0.004112 0.015402 0.004403\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 0.002626 0.034169 0.003575 0.006245 -0.008100\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 0.015374 -0.025590 0.001625 -0.015053 -0.001168\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2158 rows x 5 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"risk-free-excess\"\u003eRisk-Free Excess\u003c/h2\u003e\n\u003cp\u003eRecall that we want to be working with the excess-to-risk-free rates \\(R_{T}-R_{f}\\), where \\(R_{T}\\) is some security. So, we will go through and subtract everything by the risk-free rate (and drop the RFR itself):\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edrop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;TBill\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elambda\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTBill\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e NYSE LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 -0.038846 -0.035720 -0.124783 -0.051603\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 -0.000742 -0.008794 0.027751 -0.000995\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 -0.009463 -0.005577 -0.029747 -0.000307\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.022630 0.015875 0.032225 0.020254\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.013486 0.015952 0.056372 0.002854\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 0.000845 -0.028406 -0.019231 -0.003591\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 -0.021162 -0.037975 -0.043738 -0.047818\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 -0.058138 -0.060308 -0.049017 -0.060017\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 -0.031543 -0.030593 -0.027924 -0.042269\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 0.040964 0.027215 0.010537 0.024422\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2158 rows x 4 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"actual-regression\"\u003eActual Regression\u003c/h2\u003e\n\u003cp\u003eIt is now time to perform the actual linear regression! We will use statsmodels\u0026rsquo; Ordinary Least Squares API to make our work easier, but we will go through a full regression in the end.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estatsmodels.api\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"capm-regression-lockheed-martin\"\u003eCAPM Regression: Lockheed Martin\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s work with Lockheed Martin first for regression, fitting an ordinary least squares. Remember that the OLS functions reads the \u003cem\u003eendogenous\u003c/em\u003e variable first (for us, the return of the asset.)\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# add a column of ones to our input market excess returns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eadd_constant\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# perform linreg\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elmt_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eOLS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elmt_model\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esummary\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e OLS Regression Results\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDep. Variable: LMT R-squared: 0.859\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eModel: OLS Adj. R-squared: 0.859\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMethod: Least Squares F-statistic: 1.312e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNo. Observations: 2158 AIC: -1.263e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Residuals: 2156 BIC: -1.262e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Model: 1 Prob (F-statistic): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eCovariance Type: nonrobust Log-Likelihood: 6318.9\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e coef std err t P\u0026gt;|t| [0.025 0.975]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e------------------------------------------------------------------------------\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econst 0.0004 0.000 1.311 0.190 -0.000 0.001\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 0.9449 0.008 114.552 0.000 0.929 0.961\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eBased on the constants row, we can see that\u0026mdash;within \\(95\\%\\) confidence\u0026mdash;the intercept is generally \\(0\\) and CAPM applies. However, we do see a slight positive compared to the market. Furthermore, we can see that the regression has a beta value of \\(0.9449\\) \u0026mdash; according the CAPM model, it being \u003cem\u003eslightly\u003c/em\u003e undervarying that the market.\u003c/p\u003e\n\u003ch2 id=\"capm-regression-macdonald-s\"\u003eCAPM Regression: MacDonald\u0026rsquo;s\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# perform linreg\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emcd_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eOLS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emcd_model\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esummary\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e OLS Regression Results\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDep. Variable: MCD R-squared: 0.887\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eModel: OLS Adj. R-squared: 0.887\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMethod: Least Squares F-statistic: 1.697e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNo. Observations: 2158 AIC: -1.310e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Residuals: 2156 BIC: -1.309e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Model: 1 Prob (F-statistic): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eCovariance Type: nonrobust Log-Likelihood: 6551.1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e coef std err t P\u0026gt;|t| [0.025 0.975]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e------------------------------------------------------------------------------\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econst 0.0003 0.000 1.004 0.315 -0.000 0.001\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 0.9651 0.007 130.287 0.000 0.951 0.980\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSame thing as before, we are within \\(95\\%\\) confidence having a intercept of \\(0\\) (with a slight positive edge), and it looks like MacDonald\u0026rsquo;s vary a little bit more than Lockheed Martin. The food industry is probably a tougher business than that in defense.\u003c/p\u003e\n\u003ch2 id=\"capm-regression-twitter\"\u003eCAPM Regression: Twitter\u003c/h2\u003e\n\u003cp\u003eLastly, to analyze the recently delisted Twitter!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# perform linreg\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etwtr_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eOLS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etwtr_model\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esummary\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e OLS Regression Results\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDep. Variable: TWTR R-squared: 0.522\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eModel: OLS Adj. R-squared: 0.522\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMethod: Least Squares F-statistic: 2357.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNo. Observations: 2158 AIC: -8610.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Residuals: 2156 BIC: -8599.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Model: 1 Prob (F-statistic): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eCovariance Type: nonrobust Log-Likelihood: 4307.1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e coef std err t P\u0026gt;|t| [0.025 0.975]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e------------------------------------------------------------------------------\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econst -0.0002 0.001 -0.346 0.730 -0.002 0.001\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 1.0173 0.021 48.549 0.000 0.976 1.058\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eEvidently, Twitter is \u003cem\u003emuch\u003c/em\u003e more variable. It looks like it has a nontrivial bias (the intercept being -0.001 being within the \\(95\\%\\) confidence band \u0026mdash; that the security is possibly significantly underperforming the CAPM expectation in the market.) Furthermore, we have a positive beta value: that the asset is more variable than the market.\u003c/p\u003e\n\u003ch2 id=\"manual-checking\"\u003eManual Checking\u003c/h2\u003e\n\u003cp\u003eWe can also use the betas formula to manually calculate what we \u003cem\u003eexpect\u003c/em\u003e for the beta values (i.e. as if they were one IID random variable.)\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecov\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e NYSE LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 0.001143 0.001080 0.001163 0.001103\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLMT 0.001080 0.001188 0.001116 0.001083\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eTWTR 0.001163 0.001116 0.002264 0.001155\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMCD 0.001103 0.001083 0.001155 0.001200\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFinally, to construct the beta values. Recall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\beta_{im} := \\frac{Cov[(R_{i}-R_{f}),(R_{m}-R_{f})]}{Var[R_{m}-R_{f}]}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nVar[X] = Cov[X,X], \\forall X\n\\end{equation}\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# get the market variance (covariance with itself)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emarket_variation\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# calculate betas\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebetas\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emarket_variation\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emarket_variation\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emarket_variation\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# and make dataframe\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebetas\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSeries\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebetas\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebetas\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLMT 0.944899\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eTWTR 1.017294\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMCD 0.965081\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eApparently, all of our assets swing less than the overall NYSE market! Especially Lockheed\u0026mdash;it is only \\(94.4\\%\\) of the market variation. Furthermore, it is interesting to see that Twitter swings much more dramatically compared to the market.\u003c/p\u003e\n\u003ch2 id=\"equal-part-fund\"\u003eEqual-Part Fund\u003c/h2\u003e\n\u003cp\u003eWe will now create two funds with the three securities, one with equal parts and one which attempts to maximizes the bias (max returns) while minimizing the beta variance value compared to the market.\u003c/p\u003e\n\u003cp\u003eFirst, let\u0026rsquo;s create a baseline fund in equal parts. Here it is:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_returns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_returns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 -0.063167\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.023420\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 -0.017149\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.021384\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.049750\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 -0.036409\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 -0.021132\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 0.023917\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 0.001720\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 -0.014596\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLength: 2158, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will calculate the excess returns of this fund:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_excess\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efund_1_returns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTBill\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_excess\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 -0.112813\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.021600\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 -0.023310\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.037041\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.058226\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 -0.041349\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 -0.057265\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 -0.040503\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 -0.032449\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 0.010994\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLength: 2158, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"performance-of-the-equal-part-fund\"\u003ePerformance of the Equal-Part Fund\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# perform linreg\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eOLS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_1_excess\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_model\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esummary\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e OLS Regression Results\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDep. Variable: y R-squared: 0.473\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eModel: OLS Adj. R-squared: 0.473\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMethod: Least Squares F-statistic: 1935.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNo. Observations: 2158 AIC: -7735.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Residuals: 2156 BIC: -7724.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Model: 1 Prob (F-statistic): 3.01e-302\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eCovariance Type: nonrobust Log-Likelihood: 3869.5\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e coef std err t P\u0026gt;|t| [0.025 0.975]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e------------------------------------------------------------------------------\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econst 0.0007 0.001 0.841 0.401 -0.001 0.002\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 1.1290 0.026 43.993 0.000 1.079 1.179\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSurprisingly, we have now created a \u003cstrong\u003esignificantly\u003c/strong\u003e riskier investment that, though riskier, generates a much higher probability of reward (\\(+0.001\\) is now within the \\(99\\%\\) band!)\u003c/p\u003e\n\u003ch2 id=\"a-more-optimized-fund\"\u003eA More Optimized Fund\u003c/h2\u003e\n\u003cp\u003eTo me, this is the payoff of this assignment. We will now use CAPM to create the \u0026ldquo;best\u0026rdquo; fund combination\u0026mdash;given some variance, the funds which match CAPM. To do this, let\u0026rsquo;s create a generic linear combination of the assets.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esympy\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSymbol\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;x\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSymbol\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;y\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSymbol\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;z\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_returns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_returns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 0.0139260753744255*x - 0.0751364261353569*y - ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 -0.00697525170622448*x + 0.0295704573211193*y ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 0.000583132897928884*x - 0.0235859990058791*y ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.000218587198947517*x + 0.016568426347233*y +...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.00747599199607762*x + 0.0478955096700351*y -...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 -0.0234665578621085*x - 0.0142913301107561*y +...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 -0.00184214468578059*x - 0.0076045993852194*y ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 0.00411172646842317*x + 0.0154024001854269*y +...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 0.00357547337231878*x + 0.0062445563228315*y -...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 0.00162509910496933*x - 0.0150529686289622*y -...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLength: 2158, dtype: object\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. We will also calculate the excess returns of this fund:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_excess\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efund_2_returns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTBill\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efund_2_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eto_numpy\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[0.0139260753744255*x - 0.0751364261353569*y - 0.00195664549320096*z - 0.0496463208073039\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -0.00697525170622448*x + 0.0295704573211193*y + 0.000824317408861575*z - 0.00181917459665826\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.000583132897928884*x - 0.0235859990058791*y + 0.00585367525146019*z - 0.00616055581298536\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.00411172646842317*x + 0.0154024001854269*y + 0.00440300114913317*z - 0.0644196927849867\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.00357547337231878*x + 0.0062445563228315*y - 0.0081004573348249*z - 0.0341688956152497\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.00162509910496933*x - 0.0150529686289622*y - 0.00116834209450634*z + 0.0255902303732043]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe cast this type to a numpy array because we are about to perform some matrix operations upon it.\u003c/p\u003e\n\u003ch2 id=\"optimizing-the-optimized-fund-linreg\"\u003eOptimizing the Optimized Fund: Linreg\u003c/h2\u003e\n\u003cp\u003eNow, let us perform the actual linear regression ourselves. Recall that the pseudoinverse linear regression estimator is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\beta = (X^{T}X)^{-1}X^{T}Y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have already our \\(Y\\) as a vector (above), and our \\(X\\) is:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eto_numpy\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[[ 1.00000000e+00 -3.88457302e-02]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 -7.42217926e-04]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 -9.46284244e-03]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 -5.81378271e-02]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 -3.15429207e-02]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 4.09643405e-02]]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe now have our matrices, let\u0026rsquo;s perform the linear regression!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elinear_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elinalg\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etranspose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@X\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@X.transpose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@Y\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elinear_model\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[0.000544056413840724*x - 6.62061061591867e-5*y + 0.000429966553373172*z - 0.000178620725465344\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.0457830563134785*x + 0.118178191274045*y + 0.0659651260604729*z + 0.899115719100281]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. So we now have two rows; the top row represents the \u0026ldquo;bias\u0026rdquo;\u0026mdash;how much deviation there is from CAPM, and the bottom row represents the \u0026ldquo;rate\u0026rdquo;\u0026mdash;the \u0026ldquo;beta\u0026rdquo; value which represents how much excess variance there is.\u003c/p\u003e\n\u003ch2 id=\"optimizing-the-optimized-fund-picking-optimizing-parameters\"\u003eOptimizing the Optimized Fund: Picking Optimizing Parameters\u003c/h2\u003e\n\u003cp\u003eWe can will solve for a combination of solutions to give us specific values of returns vs risk. For instance, we can fix the variance to 1 (i.e. we can vary as much as the market.) We subtract one here for the solver, which expects the expressions equaling to \\(0\\).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_expr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elinear_model\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_expr\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.0457830563134785*x + 0.118178191274045*y + 0.0659651260604729*z - 0.100884280899719\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, we will set a certain earning value, and solve for possible solutions. We will try to get the largest possible bias without needing to short something (i.e. cause a negative solution). By hand-fisting a value, it seems 0.001 is a good bet.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edeviance_expr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elinear_model\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.001\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edeviance_expr\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.000544056413840724*x - 6.62061061591867e-5*y + 0.000429966553373172*z - 0.00117862072546534\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"optimizing-the-optimized-fund-optimize\"\u003eOptimizing the Optimized Fund: Optimize!\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esolvers\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edeviance_expr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erisk_expr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e{x: 2.16803104555387 - 0.819584899551304*z, y: 0.0137520589394366 - 0.24067066980814*z}\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe have one degree of freedom here: how much MacDonald\u0026rsquo;s we want! Let\u0026rsquo;s say we want none (which would, according to this, be an equally efficient solution.)\u003c/p\u003e\n\u003ch2 id=\"how-does-our-fund-do\"\u003eHow Does Our Fund Do?\u003c/h2\u003e\n\u003cp\u003eThis would create the following plan:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# for our case\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ez_val\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# numerical solutions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003es_x\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez_val\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003es_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez_val\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# solution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_nobias_nomac\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es_x\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es_y\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_nobias_nomac\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.001185050286566688\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRecall that this is the performance of the balanced portfolio:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_returns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.0009224705380695683\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSo, for market-level risk (\\(\\beta =1\\), instead of the balanced portfolio\u0026rsquo;s \\(\\beta =1.1290\\)), this is a pretty good deal!\u003c/p\u003e\n\u003ch2 id=\"some-plots\"\u003eSome Plots\u003c/h2\u003e\n\u003cp\u003eFinally, let\u0026rsquo;s plot the \u003cem\u003eprices\u003c/em\u003e of our various funds:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ematplotlib.pyplot\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ematplotlib.dates\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emdates\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eseaborn\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatetime\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatetime\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eset\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_price\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es_x\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es_y\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_price\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_l_price\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_t_price\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDate\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elambda\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edatetime\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estrptime\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;%m/\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e%d\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e/%Y %H:%M:%S\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_2_price\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eastype\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_1_price\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eastype\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_l_price\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eastype\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_t_price\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eastype\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egca\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003exaxis\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eset_major_locator\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emdates\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eYearLocator\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egca\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003exaxis\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eset_major_formatter\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emdates\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDateFormatter\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;%Y\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egca\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eset_ylabel\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Price\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshow\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-29_23-33-46_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eRecall that we didn\u0026rsquo;t actually buy any MacDonald\u0026rsquo;s. So, we have\u0026mdash;blue, being our \u0026ldquo;optimal\u0026rdquo; portfolio, yellow, our balanced portfolio, green, being Lockheed, and red, being Twitter.\u003c/p\u003e\n\u003cp\u003eOur portfolio works surprisingly well!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/linearity_tests_preso/","tags":null,"title":"Linearity Tests"},{"categories":null,"contents":"linked files is a linked list: in every block, it stores the location of the next block; we don\u0026rsquo;t store files contiguously. We simply store a part of the file in a block, and a pointer to wherever the next block where the file is located is.\nthis solves the contiguous allocation\u0026rsquo;s fragmentation problem.\nproblems massive seek time to get all the blocks for a given file: data scattered random access of files (\u0026ldquo;find the middle\u0026rdquo;) is hard: can\u0026rsquo;t easily jump to an arbitrary location; we had to read the file from the start ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhlinked_files/\"\u003elinked files\u003c/a\u003e is a linked list: in every block, it stores the location of the next block; we \u003cstrong\u003edon\u0026rsquo;t store files contiguously\u003c/strong\u003e. We simply store a part of the file in a block, and a pointer to wherever the next block where the file is located is.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-10_14-07-31_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ethis solves the \u003ca href=\"/posts/kbhcontiguous_allocation/\"\u003econtiguous allocation\u003c/a\u003e\u0026rsquo;s fragmentation problem.\u003c/p\u003e\n\u003ch2 id=\"problems\"\u003eproblems\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003emassive seek time to get all the blocks for a given file: data scattered\u003c/li\u003e\n\u003cli\u003erandom access of files (\u0026ldquo;find the middle\u0026rdquo;) is hard: can\u0026rsquo;t easily jump to an arbitrary location; we had to read the file from the start\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlinked_files/","tags":null,"title":"linked files"},{"categories":null,"contents":" Using P2P to trade stocks in a darkpool Sweep across LiquidNet and send normall if not needed ","html":"\u003cul\u003e\n\u003cli\u003eUsing P2P to trade stocks in a \u003ca href=\"/posts/kbhdarkpool/\"\u003edarkpool\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eSweep across \u003ca href=\"/posts/kbhliquidnet/\"\u003eLiquidNet\u003c/a\u003e and send normall if not needed\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhliquidnet/","tags":null,"title":"LiquidNet"},{"categories":null,"contents":"A list is an ordered collection of \\(n\\) elements.\nrequirements as list length cannot be negative list length cannot be \\(\\infty\\) repetition matters order matters additional info two lists are equal IFF they have same \\(n\\) same elements same order they are different from sets because order matters (therefore, because in/out is no longer a binary) number of entries of the same object matters length is finite ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e is an \u003cstrong\u003e\u003cstrong\u003eordered collection\u003c/strong\u003e\u003c/strong\u003e of \\(n\\) elements.\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eas list length cannot be negative\u003c/li\u003e\n\u003cli\u003elist length cannot be \\(\\infty\\)\u003c/li\u003e\n\u003cli\u003erepetition matters\u003c/li\u003e\n\u003cli\u003eorder matters\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-info\"\u003eadditional info\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003etwo lists are equal IFF they have\n\u003cul\u003e\n\u003cli\u003esame \\(n\\)\u003c/li\u003e\n\u003cli\u003esame elements\u003c/li\u003e\n\u003cli\u003esame order\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ethey are different from \u003ca href=\"/posts/kbhset/\"\u003eset\u003c/a\u003es because\n\u003cul\u003e\n\u003cli\u003eorder matters\u003c/li\u003e\n\u003cli\u003e(therefore, because in/out is no longer a binary) number of entries of the same object matters\u003c/li\u003e\n\u003cli\u003elength is finite\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlist/","tags":null,"title":"list"},{"categories":null,"contents":" Number Name 31 Herber Hoover 32 Franklin D. Roosevelt (FDR) ","html":"\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eNumber\u003c/th\u003e\n\u003cth\u003eName\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e31\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhherber_hoover/\"\u003eHerber Hoover\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e32\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhfdr/\"\u003eFranklin D. Roosevelt (FDR)\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlist_of_american_presidents/","tags":null,"title":"list of American presidents"},{"categories":null,"contents":"The Little Endian architecture is one which the numbers are laid out such that the smallest bytes are placed earlier into memory. In a sense, all the numbers are stored in reverse if readnig from \u0026ldquo;left to \u0026ldquo;right\u0026rdquo;\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhlittle_endian/\"\u003eLittle Endian\u003c/a\u003e architecture is one which the numbers are laid out such that the smallest \u003ca href=\"/posts/kbhbinary_number_system/#byte\"\u003ebyte\u003c/a\u003es are placed earlier into memory. In a sense, all the numbers are stored in reverse if readnig from \u0026ldquo;left to \u0026ldquo;right\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlittle_endian/","tags":null,"title":"Little Endian"},{"categories":null,"contents":"Something is considered \u0026ldquo;Living\u0026rdquo; when they exhibit two main biological functions\nTwo Main Functions of Life metabolism: \u0026ldquo;do chemistry to change internal consistence\u0026rdquo; replication: \u0026ldquo;copying cell information\u0026rdquo; theories of origin of life Manfred Eigen (\u0026quot;RNA-World\u0026quot; theory): genes form, constructing enzymes, forming cells Alexander Oparin: cells form, creating enzymes as needed, forming the genes to encode them Freeman Dyson: Dyson\u0026rsquo;s Model of Life\u0026mdash;basically the Oparin model, but with more specifics about how genes evolve ","html":"\u003cp\u003eSomething is considered \u0026ldquo;\u003ca href=\"/posts/kbhliving/\"\u003eLiving\u003c/a\u003e\u0026rdquo; when they exhibit two main biological functions\u003c/p\u003e\n\u003ch2 id=\"two-main-functions-of-life\"\u003eTwo Main Functions of Life\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmetabolism/\"\u003emetabolism\u003c/a\u003e: \u0026ldquo;do chemistry to change internal consistence\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhreplication/\"\u003ereplication\u003c/a\u003e: \u0026ldquo;copying cell information\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"theories-of-origin-of-life\"\u003etheories of origin of life\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eManfred Eigen (\u0026quot;\u003ca href=\"#theories-of-origin-of-life\"\u003eRNA-World\u003c/a\u003e\u0026quot; theory): genes form, constructing enzymes, forming cells\u003c/li\u003e\n\u003cli\u003eAlexander Oparin: cells form, creating enzymes as needed, forming the genes to encode them\u003c/li\u003e\n\u003cli\u003eFreeman Dyson: \u003ca href=\"/posts/kbhdyson_s_model_of_life/\"\u003eDyson\u0026rsquo;s Model of Life\u003c/a\u003e\u0026mdash;basically the Oparin model, but with more specifics about how genes evolve\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhliving/","tags":null,"title":"Living"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhllama/","tags":null,"title":"LLaMA"},{"categories":null,"contents":"Qualitative Changes in Teaching via LLMs no clear sign that there are qualitative changes via GPT no clear catering to students important questions how to treanslate training into practice? how to cater to student needs? what to do with flawed assessments? Teacher Training Conventional Teacher Coaching not scalable, requires observation, and will give feedback not data driven, not adaptive\u0026mdash;expertise is hard AI powered coaching provide data-driven reflection opportunities can be personalized but not personalized with a human connection Automated NLP Feedback talk time measurements reflection opportunities, NLP measurements, etc. GPT wasn\u0026rsquo;t good at evaluating teachers GPT is gaslighting the teachers\u0026mdash;rewording the existing work, so no novelty GPT is fairly faithful, the information is relevant, and Punitive vs. Restorative Classroom Management Classroom Management reducing use of exclusionary discipline improve classroom management to prevent escalation teachers feel stressed + under-prepared Examples \u0026ldquo;sit down now\u0026rdquo; vs. \u0026ldquo;do you need a break\u0026rdquo;\nClassroom Management with a Roberta working well to predict whether or not an action is punitive correlations are strong against teacher\u0026rsquo;s and students\u0026rsquo; perceptions of the class Male teachers practice less punitive behavior management than female teachers.\nGenerating Student Feedback How do we support \u0026ldquo;growth mindset\u0026rdquo;. For instance, \u0026ldquo;just try harder!!!!\u0026rdquo; is not growth mindset.\nGMSL Framework emphatic validation reapproaisal of affect seeking to understand position as collaboration provide hope for change use autonomy supportive language ","html":"\u003ch2 id=\"qualitative-changes-in-teaching-via-llms\"\u003eQualitative Changes in Teaching via LLMs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eno clear sign that there are qualitative changes via GPT\u003c/li\u003e\n\u003cli\u003eno clear catering to students\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"important-questions\"\u003eimportant questions\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ehow to treanslate training into practice?\u003c/li\u003e\n\u003cli\u003ehow to cater to student needs?\u003c/li\u003e\n\u003cli\u003ewhat to do with flawed assessments?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"teacher-training\"\u003eTeacher Training\u003c/h2\u003e\n\u003ch3 id=\"conventional-teacher-coaching\"\u003eConventional Teacher Coaching\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003enot scalable, requires observation, and will give feedback\u003c/li\u003e\n\u003cli\u003enot data driven, not adaptive\u0026mdash;expertise is hard\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"ai-powered-coaching\"\u003eAI powered coaching\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eprovide data-driven reflection opportunities\u003c/li\u003e\n\u003cli\u003ecan be personalized\u003c/li\u003e\n\u003cli\u003ebut not personalized with a \u003cstrong\u003ehuman connection\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"automated-nlp-feedback\"\u003eAutomated NLP Feedback\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003etalk time measurements\u003c/li\u003e\n\u003cli\u003ereflection opportunities, NLP measurements, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"gpt-wasn-t-good-at-evaluating-teachers\"\u003eGPT wasn\u0026rsquo;t good at evaluating teachers\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eGPT is gaslighting the teachers\u0026mdash;rewording the existing work, so \u003cstrong\u003eno novelty\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eGPT is fairly faithful, the information is relevant, and\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"punitive-vs-dot-restorative-classroom-management\"\u003ePunitive vs. Restorative Classroom Management\u003c/h2\u003e\n\u003ch3 id=\"classroom-management\"\u003eClassroom Management\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ereducing use of exclusionary discipline\u003c/li\u003e\n\u003cli\u003eimprove classroom management to prevent escalation\u003c/li\u003e\n\u003cli\u003eteachers feel stressed + under-prepared\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"examples\"\u003eExamples\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;sit down now\u0026rdquo; vs. \u0026ldquo;do you need a break\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"classroom-management-with-a-roberta\"\u003eClassroom Management with a Roberta\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eworking well to predict whether or not an action is punitive\u003c/li\u003e\n\u003cli\u003ecorrelations are strong against teacher\u0026rsquo;s and students\u0026rsquo; perceptions of the class\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eMale teachers practice \u003cstrong\u003eless\u003c/strong\u003e punitive behavior management than female teachers.\u003c/p\u003e\n\u003ch2 id=\"generating-student-feedback\"\u003eGenerating Student Feedback\u003c/h2\u003e\n\u003cp\u003eHow do we support \u0026ldquo;growth mindset\u0026rdquo;. For instance, \u0026ldquo;just try harder!!!!\u0026rdquo; is not growth mindset.\u003c/p\u003e\n\u003ch3 id=\"gmsl-framework\"\u003eGMSL Framework\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eemphatic validation\u003c/li\u003e\n\u003cli\u003ereapproaisal of affect\u003c/li\u003e\n\u003cli\u003eseeking to understand\u003c/li\u003e\n\u003cli\u003eposition as collaboration\u003c/li\u003e\n\u003cli\u003eprovide hope for change\u003c/li\u003e\n\u003cli\u003euse autonomy supportive language\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhdora/","tags":null,"title":"LLM for Teacher Feedback"},{"categories":null,"contents":"For the past 20 years, semantic indexing sucked.\nFor the most part, the core offerings of search products in the last while is divided into two categories:\nFull-text search things (i.e. every app in the face of the planet that stores text), which for the most part use something n-grammy like Okapi BM25 to do nice fuzzy string matching Ranking/Recommendation things, who isn\u0026rsquo;t so much trying to search a database as they are trying to guess the user\u0026rsquo;s intent and recommend them things from it And we lived in a pretty happy world in which, depending on the application, developers chose one or the other to build.\nThere\u0026rsquo;s something really funny to do with this idea of \u0026ldquo;search\u0026rdquo;. Take, for instance, Google. Its a \u0026ldquo;search\u0026rdquo; engine\u0026mdash;but really it recommends people information that is probably relevant; PageRank, the company\u0026rsquo;s claim to fame, isn\u0026rsquo;t even textual analysis of any type at all: it is a measure of relevance, based on centrality arguments about where the average web surfer may end up.\nBy framing systems like Google as an act of recommendation, we can see why it is so widely adopted: it, really, brings the best of the Internet to the user\u0026mdash;a catalogue of sorts\u0026mdash;based on text which the user provides as input data regarding their interest. It is, importantly, not a capital-s Search engine.\nAnd perhaps this explains why this doesn\u0026rsquo;t work:\nFigure 1: Oh my lord scary books.\nWouldn\u0026rsquo;t it be nice for a query like this to return us actual, non-scary books?\nIf you think about it, back in the olden days (i.e. 2019), there really isn\u0026rsquo;t a way to reconcile this difference between search and recommendation engines. Textual-based search systems were fantastically fast and gave you the exact things you needed\u0026mdash;great for filing \u0026ldquo;that file named this\u0026rdquo;\u0026mdash;but categorically useless when it comes to parsing large amounts of data that the user doesn\u0026rsquo;t know the exact terminology for.\nRecommendation engines, on the other hand, often required special indexing or behavioral modeling for what \u0026ldquo;an average user of this type\u0026rdquo; would like, which is great for leading the user to discover certain things they wouldn\u0026rsquo;t otherwise find, but makes hilarious mistakes like the above because, well, they aren\u0026rsquo;t doing much linguistic analysis.\nSo, what if we can simultaneously do the guessing game for user behavior\u0026mdash;a la \u0026ldquo;recommendation engines\u0026rdquo;\u0026mdash;but still use a fundamentally text-based approach to perform searching\u0026mdash;a la a \u0026ldquo;search service\u0026rdquo;?\nLLMs are text matchers Transformers are Text Matchers Fundamentally, the Transformer (the basis of all of those lovely large-language models (LLMs)) is a \u0026ldquo;sequence transduction model\u0026rdquo;\u0026mdash;they are (and were originally invented as) a translation model of sorts. And I find it easy and productive to think of LLMs in that mindframe: although their output may look like human-like \u0026ldquo;reasoning\u0026rdquo;, LLMs\u0026rsquo; fundamental job is to match one bit of text (context) against another (output).\nLLMs are Internet Text Matchers The actual thing that is making the whole of the world go crazy right now, though, is the fact that LLMs, transformers trained on the internet, seem to be able to handle text-to-text \u0026ldquo;translation\u0026rdquo; tasks of a much more general nature:\n\u0026ldquo;given a food, translate it into its recipe\u0026rdquo; \u0026ldquo;given this text, translate it into the same written with a pirate voice\u0026rdquo; \u0026ldquo;given the comment, translate it to the code that follows\u0026rdquo; You see, instead of carefully supervised translations a la the original Transformer, GPT+friends is simply chopping off the entirety of the input encoding process and letting the decoder ramble on its own. Hence, its outputs are functionally text matches of the context against the training data: that is, these LLM models effectively are text-matchers between the whole of the internet and your input.\nAnd hence, the promise Let\u0026rsquo;s recap real quick.\nTransformers are text matchers LLMs, which are transformers, are text matchers against the text on the internet And here\u0026rsquo;s the corollary to both of these statements:\nTransformers match text LLMs, which are transformers, is trained (and biased) towards the things people would typically be looking for on the internet That is, though LLMs fundamentally match text, they know what\u0026rsquo;s on the internet and how people talk about (and try to look for) them.\nThe point above is, to me, a very profound idea. LLMs essentially achieve what we were hoping to edge towards in the last 20 years\u0026mdash;closing the gap between recommendation (what people want) and search (text matching) systems into one uniform interface.\nAnd, praise be, LLMs seems to be highly directable at this task as well: they excel at few-shot and zero-shot training tasks; meaning, if you just give the Transformer a few examples of how to \u0026ldquo;translate\u0026rdquo; a piece of its input, it will happily do it with some accuracy following your example.\nAside: Stop Building Gosh Darn Chatbots Fortunately, people not named yours truly has also noticed these exciting capabilities of LLMs.\nWhat confuses me, though, is the fact that everybody and their pet duck is building a chat bot or \u0026ldquo;answering\u0026rdquo; service of some description: capitalizing on the fact that LLMs have trained knowledge of text on the internet, but completely disregarding the fact that LLMs fundamentally are best at \u0026ldquo;matching\u0026rdquo; existing text in its context and not hallucinating new text\u0026mdash;as these \u0026ldquo;answer services\u0026rdquo; want to do.\nWhat gives? Wattenburger has this fantastic take on why chat bots are not the best interface for LLMs. To me, the most salient observation\u0026mdash;one which stems from her wonderful arguments about chat bot\u0026rsquo;s poor observability and iteration cycle\u0026mdash;is that the generated text from these current LLM \u0026ldquo;search\u0026rdquo; services (called \u0026ldquo;retrial augmented generation\u0026rdquo;) is just so darn long.\nWhen we look information on a site like Google, I believe our goal is generally to shove the desired information in our head as quickly as possible and know where we can go to learn more; if we wanted to read a 300 word summary about it (as Perplexity AI, Mendable, Phind etc. is want to give us) we can just go look up the source ourselves.\nTo me, the duty of a search platform, LLM or not, is to get the user on their merry way as quickly as possible\u0026mdash;information in head or link in hand\u0026mdash;not for the user to navigate a possibly-hallucinated rant about the topic they are looking for, followed by 3 source citations.\nMaking a LLM Search Engine And so we face a rather daunting task. To make a better search service with LLMs, we have to:\nLeverage LLM\u0026rsquo;s fantastic text matching capabilities Allow LLMs to inject their trained biases into what\u0026rsquo;s relevant to the user in order to act as a good recommendation engine Do so in as little words as possible written by the LLM These three bullet points has consumed much of my life for the past 6 months, culminating in a reference implementation of such a \u0026ldquo;LLM search engine\u0026rdquo; called Simon. Let me now tell you its story.\nFulfilling a search query, Part 1/3 Our first goal is to figure out\nSide quest: Actual Text-to-text Recommendation Now You Try ","html":"\u003cp\u003eFor the past 20 years, semantic indexing sucked.\u003c/p\u003e\n\u003cp\u003eFor the most part, the core offerings of search products in the last while is divided into two categories:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eFull-text search things (i.e. every app in the face of the planet that stores text), which for the most part use something n-grammy like \u003ca href=\"https://en.wikipedia.org/wiki/Okapi_BM25\"\u003eOkapi BM25\u003c/a\u003e to do nice fuzzy string matching\u003c/li\u003e\n\u003cli\u003eRanking/Recommendation things, who isn\u0026rsquo;t so much trying to \u003cem\u003esearch\u003c/em\u003e a database as they are trying to guess the user\u0026rsquo;s intent and \u003cem\u003erecommend\u003c/em\u003e them things from it\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAnd we lived in a pretty happy world in which, depending on the application, developers chose one or the other to build.\u003c/p\u003e\n\u003cp\u003eThere\u0026rsquo;s something really funny to do with this idea of \u0026ldquo;search\u0026rdquo;. Take, for instance, Google. Its a \u0026ldquo;search\u0026rdquo; engine\u0026mdash;but really it \u003cem\u003erecommends\u003c/em\u003e people information that is probably relevant; PageRank, the company\u0026rsquo;s claim to fame, isn\u0026rsquo;t even textual analysis of any type at all: it is a measure of \u003cem\u003erelevance\u003c/em\u003e, based on centrality arguments about where the average web surfer may end up.\u003c/p\u003e\n\u003cp\u003eBy framing systems like Google as an act of recommendation, we can see why it is so widely adopted: it, really, brings the best of the Internet to the user\u0026mdash;a catalogue of sorts\u0026mdash;based on text which the user provides as input data regarding their interest. It is, importantly, \u003cem\u003enot a capital-s Search engine\u003c/em\u003e.\u003c/p\u003e\n\u003cp\u003eAnd perhaps this explains why this doesn\u0026rsquo;t work:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-09-02_15-01-29_screenshot.png\"\n alt=\"Figure 1: Oh my lord scary books.\" width=\"60%\" height=\"60%\"\u003e\u003cfigcaption\u003e\n \u003cp\u003e\u003cspan class=\"figure-number\"\u003eFigure 1: \u003c/span\u003eOh my lord scary books.\u003c/p\u003e\n \u003c/figcaption\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWouldn\u0026rsquo;t it be nice for a query like this to return us actual, non-scary books?\u003c/p\u003e\n\u003cp\u003eIf you think about it, back in the olden days (i.e. 2019), there really isn\u0026rsquo;t a way to reconcile this difference between search and recommendation engines. Textual-based search systems were fantastically fast and gave you the exact things you needed\u0026mdash;great for filing \u0026ldquo;that file named this\u0026rdquo;\u0026mdash;but categorically useless when it comes to parsing large amounts of data that the user doesn\u0026rsquo;t know the exact terminology for.\u003c/p\u003e\n\u003cp\u003eRecommendation engines, on the other hand, often required special indexing or behavioral modeling for what \u0026ldquo;an average user of this type\u0026rdquo; would like, which is great for leading the user to discover certain things they wouldn\u0026rsquo;t otherwise find, but makes hilarious mistakes like the above because, well, they aren\u0026rsquo;t doing much linguistic analysis.\u003c/p\u003e\n\u003cp\u003eSo, what if we can simultaneously do the guessing game for user behavior\u0026mdash;a la \u0026ldquo;recommendation engines\u0026rdquo;\u0026mdash;but still use a fundamentally text-based approach to perform searching\u0026mdash;a la a \u0026ldquo;search service\u0026rdquo;?\u003c/p\u003e\n\u003ch2 id=\"llms-are-text-matchers\"\u003eLLMs are text matchers\u003c/h2\u003e\n\u003ch3 id=\"transformers-are-text-matchers\"\u003eTransformers are Text Matchers\u003c/h3\u003e\n\u003cp\u003eFundamentally, the \u003ca href=\"https://arxiv.org/abs/1706.03762\"\u003eTransformer\u003c/a\u003e (the basis of all of those lovely large-language models (LLMs)) is a \u0026ldquo;sequence transduction model\u0026rdquo;\u0026mdash;they are (and were originally invented as) a \u003cem\u003etranslation\u003c/em\u003e model of sorts. And I find it easy and productive to think of LLMs in that mindframe: although their output may look like human-like \u0026ldquo;reasoning\u0026rdquo;, LLMs\u0026rsquo; fundamental job is to \u003cem\u003ematch\u003c/em\u003e one bit of text (context) against another (output).\u003c/p\u003e\n\u003ch3 id=\"llms-are-internet-text-matchers\"\u003eLLMs are Internet Text Matchers\u003c/h3\u003e\n\u003cp\u003eThe actual thing that is making the whole of the world go crazy right now, though, is the fact that LLMs, transformers trained on the internet, seem to be able to handle text-to-text \u0026ldquo;translation\u0026rdquo; tasks of a \u003cem\u003emuch\u003c/em\u003e more general nature:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;given a food, \u003cstrong\u003etranslate\u003c/strong\u003e it into its recipe\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;given this text, \u003cstrong\u003etranslate\u003c/strong\u003e it into the same written with a pirate voice\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;given the comment, \u003cstrong\u003etranslate\u003c/strong\u003e it to the code that follows\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eYou see, instead of carefully supervised translations a la the original Transformer, GPT+friends is simply chopping off the entirety of the input encoding process and letting the decoder ramble on its own. Hence, its outputs are functionally text matches of the context against the \u003cstrong\u003etraining data\u003c/strong\u003e: that is, these LLM models effectively are text-matchers between the whole of the \u003cem\u003einternet\u003c/em\u003e and your input.\u003c/p\u003e\n\u003ch3 id=\"and-hence-the-promise\"\u003eAnd hence, the promise\u003c/h3\u003e\n\u003cp\u003eLet\u0026rsquo;s recap real quick.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eTransformers are text matchers\u003c/li\u003e\n\u003cli\u003eLLMs, which are transformers, are text matchers against the text on the internet\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAnd here\u0026rsquo;s the corollary to both of these statements:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eTransformers match text\u003c/li\u003e\n\u003cli\u003eLLMs, which are transformers, is trained (and biased) towards the things people would typically be looking for on the internet\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThat is, though LLMs fundamentally match text, they know what\u0026rsquo;s on the internet and how people talk about (and try to look for) them.\u003c/p\u003e\n\u003cp\u003eThe point above is, to me, a very profound idea. LLMs essentially achieve what we were hoping to edge towards in the last 20 years\u0026mdash;closing the gap between \u003cem\u003erecommendation\u003c/em\u003e (what people want) and \u003cem\u003esearch\u003c/em\u003e (text matching) systems into one uniform interface.\u003c/p\u003e\n\u003cp\u003eAnd, praise be, LLMs seems to be highly directable at this task as well: they excel at \u003ca href=\"https://arxiv.org/abs/2005.14165\"\u003efew-shot and zero-shot training tasks\u003c/a\u003e; meaning, if you just give the Transformer a few examples of how to \u0026ldquo;translate\u0026rdquo; a piece of its input, it will happily do it with some accuracy following your example.\u003c/p\u003e\n\u003ch2 id=\"aside-stop-building-gosh-darn-chatbots\"\u003eAside: Stop Building Gosh Darn Chatbots\u003c/h2\u003e\n\u003cp\u003eFortunately, people not named yours truly has also noticed these exciting capabilities of LLMs.\u003c/p\u003e\n\u003cp\u003eWhat confuses me, though, is the fact that everybody and their pet duck is building a chat bot or \u0026ldquo;answering\u0026rdquo; service of some description: capitalizing on the fact that LLMs have trained knowledge of text on the internet, but completely disregarding the fact that LLMs fundamentally are best at \u0026ldquo;matching\u0026rdquo; existing text in its context and \u003cem\u003enot\u003c/em\u003e hallucinating new text\u0026mdash;as these \u0026ldquo;answer services\u0026rdquo; want to do.\u003c/p\u003e\n\u003cp\u003eWhat gives? \u003ca href=\"https://wattenberger.com/thoughts/boo-chatbots\"\u003eWattenburger has this fantastic take\u003c/a\u003e on why chat bots are not the best interface for LLMs. To me, the most salient observation\u0026mdash;one which stems from her wonderful arguments about chat bot\u0026rsquo;s poor observability and iteration cycle\u0026mdash;is that the generated text from these current LLM \u0026ldquo;search\u0026rdquo; services (called \u0026ldquo;retrial augmented generation\u0026rdquo;) is just \u003cem\u003eso darn long\u003c/em\u003e.\u003c/p\u003e\n\u003cp\u003eWhen we look information on a site like Google, I believe our goal is generally to shove the desired information in our head as quickly as possible and know where we can go to learn more; if we wanted to read a 300 word summary about it (as Perplexity AI, Mendable, Phind etc. is want to give us) we can just go look up the source ourselves.\u003c/p\u003e\n\u003cp\u003eTo me, the duty of a search platform, LLM or not, is to get the user on their merry way as quickly as possible\u0026mdash;information in head or link in hand\u0026mdash;not for the user to navigate a possibly-hallucinated rant about the topic they are looking for, followed by 3 source citations.\u003c/p\u003e\n\u003ch2 id=\"making-a-llm-search-engine\"\u003eMaking a LLM Search Engine\u003c/h2\u003e\n\u003cp\u003eAnd so we face a rather daunting task. To make a better search service with LLMs, we have to:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eLeverage LLM\u0026rsquo;s fantastic text matching capabilities\u003c/li\u003e\n\u003cli\u003eAllow LLMs to inject their trained biases into what\u0026rsquo;s relevant to the user in order to act as a good recommendation engine\u003c/li\u003e\n\u003cli\u003eDo so in as little words as possible written by the LLM\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThese three bullet points has consumed much of my life for the past 6 months, culminating in a reference implementation of such a \u0026ldquo;LLM search engine\u0026rdquo; called \u003ca href=\"https://github.com/Shabang-Systems/simon\"\u003eSimon\u003c/a\u003e. Let me now tell you its story.\u003c/p\u003e\n\u003ch3 id=\"fulfilling-a-search-query-part-1-3\"\u003eFulfilling a search query, Part 1/3\u003c/h3\u003e\n\u003cp\u003eOur first goal is to figure out\u003c/p\u003e\n\u003ch2 id=\"side-quest-actual-text-to-text-recommendation\"\u003eSide quest: Actual Text-to-text Recommendation\u003c/h2\u003e\n\u003ch2 id=\"now-you-try\"\u003eNow You Try\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhllms_are_text_matchers/","tags":["writing"],"title":"LLMs are fantastic search engines, so I built one"},{"categories":null,"contents":"Been Kim\nalignment problem involves \u0026ldquo;aligning\u0026rdquo; the representation spaces between machines of the world and that of the human. alternative perspective: teach humans new concepts to understand/communicate better\nfeature attribution doesn\u0026rsquo;t work We take that perspective because many of the intersectional intepretability doesn\u0026rsquo;t work well (feature permutation, etc.)\u0026mdash;feature attribution type analyses (\u0026ldquo;Impossibility Theorems Been Kim\u0026rdquo;) actually has no correlation with predictive results.\nfeature information store in models is unrelated to model edit success i.e.: knowledge storing location located using ROME technique, though it gives you a sense of the location to store information, doens\u0026rsquo;t correlate to success of model editing.\ncan we use ML to teach people? for instance, we can teach grandmasters to play chess using AlphaGo, and see if we can make a quantitative impact.\nconcept A concept is a unit of knowledge that\u0026rsquo;s useful for a task. Two properties:\nminimality: irrelavent information has been removed transferable: it can be taught atomically filtering for good concepts Representing a concept as a sparse vector as the latent space. We can check if a concept is transferable by teaching a student agent by doing KL divergence.\ndemonstration learning instead of doing demonstration learning on machines, do it on HUMANS. Filter for the concepts that are well operationalized.\nAlphaZero recap: using a dense network to embed the network, and then MCTS.\n","html":"\u003cp\u003eBeen Kim\u003c/p\u003e\n\u003cp\u003ealignment problem involves \u0026ldquo;aligning\u0026rdquo; the representation spaces between machines of the world and that of the human. alternative perspective: \u003cstrong\u003eteach \u003cspan class=\"underline\"\u003e\u003cspan class=\"underline\"\u003ehumans\u003c/span\u003e\u003c/span\u003e new concepts to understand/communicate better\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"feature-attribution-doesn-t-work\"\u003efeature attribution doesn\u0026rsquo;t work\u003c/h2\u003e\n\u003cp\u003eWe take that perspective because many of the intersectional intepretability doesn\u0026rsquo;t work well (feature permutation, etc.)\u0026mdash;feature attribution type analyses (\u0026ldquo;Impossibility Theorems Been Kim\u0026rdquo;) actually has no correlation with predictive results.\u003c/p\u003e\n\u003ch2 id=\"feature-information-store-in-models-is-unrelated-to-model-edit-success\"\u003efeature information store in models is unrelated to model edit success\u003c/h2\u003e\n\u003cp\u003ei.e.: knowledge storing location located using ROME technique, though it gives you a sense of the location to store information, doens\u0026rsquo;t correlate to success of model editing.\u003c/p\u003e\n\u003ch2 id=\"can-we-use-ml-to-teach-people\"\u003ecan we use ML to teach people?\u003c/h2\u003e\n\u003cp\u003efor instance, we can teach grandmasters to play chess using AlphaGo, and see if we can make a quantitative impact.\u003c/p\u003e\n\u003ch3 id=\"concept\"\u003econcept\u003c/h3\u003e\n\u003cp\u003eA \u003ca href=\"#concept\"\u003econcept\u003c/a\u003e is a unit of knowledge that\u0026rsquo;s \u003cstrong\u003euseful for a task\u003c/strong\u003e. Two properties:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003eminimality\u003c/strong\u003e: irrelavent information has been removed\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003etransferable\u003c/strong\u003e: it can be taught atomically\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"filtering-for-good-concept--org4a10397--s\"\u003efiltering for good \u003ca href=\"#concept\"\u003econcept\u003c/a\u003es\u003c/h4\u003e\n\u003cp\u003eRepresenting a concept as a sparse vector as the latent space. We can check if a concept is transferable by teaching a student agent by doing KL divergence.\u003c/p\u003e\n\u003ch4 id=\"demonstration-learning\"\u003edemonstration learning\u003c/h4\u003e\n\u003cp\u003einstead of doing demonstration learning on machines, do it on \u003cstrong\u003e\u003cstrong\u003eHUMANS\u003c/strong\u003e\u003c/strong\u003e. Filter for the \u003ca href=\"#concept\"\u003econcept\u003c/a\u003es that are well operationalized.\u003c/p\u003e\n\u003ch2 id=\"alphazero\"\u003eAlphaZero\u003c/h2\u003e\n\u003cp\u003erecap: using a dense network to embed the network, and then \u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003eMCTS\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlm_alignment/","tags":null,"title":"LM Alignment"},{"categories":null,"contents":"We begin with a policy parameterized on anything you\u0026rsquo;d like with random seed weights. Then,\nWe sample a local set of parameters, one pertubation \\(\\pm \\alpha\\) per direction in the parameter vector (for instance, for a parameter in 4-space, up, down, left, right in latent space), and use those new parameters to seed a policy. Check each policy for its utility via monte-carlo policy evaluation If any of the adjacent points are better, we move there If none of the adjacent points are better, we set \\(\\alpha = 0.5 \\alpha\\) (of the up/down/left/right) and try again We continue until \\(\\alpha\\) drops below some \\(\\epsilon\\).\nNote: if we have billions of parameters, this method will be not that feasible because we have to calculate the Roll-out utility so many many many times.\n","html":"\u003cp\u003eWe begin with a policy parameterized on anything you\u0026rsquo;d like with random seed weights. Then,\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eWe sample a local set of \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es, one pertubation \\(\\pm \\alpha\\) per direction in the parameter vector (for instance, for a parameter in 4-space, up, down, left, right in latent space), and use those new parameters to seed a policy.\u003c/li\u003e\n\u003cli\u003eCheck each policy for its \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e via \u003ca href=\"/posts/kbhpolicy_evaluation/#monte-carlo-policy-evaluation\"\u003emonte-carlo policy evaluation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eIf any of the adjacent points are better, we move there\u003c/li\u003e\n\u003cli\u003eIf none of the adjacent points are better, we set \\(\\alpha = 0.5 \\alpha\\) (of the up/down/left/right) and try again\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWe continue until \\(\\alpha\\) drops below some \\(\\epsilon\\).\u003c/p\u003e\n\u003cp\u003eNote: if we have billions of parameters, this method will be not that feasible because we have to calculate the \u003ca href=\"/posts/kbhpolicy_evaluation/#roll-out-utility\"\u003eRoll-out utility\u003c/a\u003e so many many many times.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlocal_policy_search/","tags":null,"title":"Local Policy Search"},{"categories":null,"contents":"\\begin{equation} \\log a^{b} = b\\log a \\end{equation}\n\\begin{equation} \\log (ab) = \\log a + \\log b \\end{equation}\n\\begin{equation} \\log (\\frac{a}{b}) = \\log a - \\log b \\end{equation}\n","html":"\u003cp\u003e\\begin{equation}\n\\log a^{b} = b\\log a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\log (ab) = \\log a + \\log b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\log (\\frac{a}{b}) = \\log a - \\log b\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlog_laws/","tags":null,"title":"log laws"},{"categories":null,"contents":"TODO: connect Logan with a few fire departments\n","html":"\u003cp\u003eTODO: connect Logan with a few fire departments\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlogan_s_team_check_in/","tags":null,"title":"Logan's Team Checkin"},{"categories":null,"contents":"Consider:\n\\begin{equation} P\u0026rsquo; = 2P(100-P) \\end{equation}\nfor a motivation, see petri dish.\nSolution Assuming \\(P\\) never reaches 100\n\\begin{equation} \\int \\frac{\\dd{P}}{P(100-P)} \\dd{P}= \\int 2 \\dd{t} \\end{equation}\nPartial fractions time:\n\\begin{equation} \\frac{1}{100} \\int \\qty(\\frac{1}{p} + \\frac{1}{100-p})\\dd{P} = \\frac{1}{100} \\ln |p| - \\ln |100-p| = 2t+C \\end{equation}\nRemember now log laws:\n\\begin{equation} \\frac{1}{100} \\ln \\left| \\frac{p}{100-p} \\right| = 2t+C \\end{equation}\nAnd finally, we obtain:\n\\begin{equation} \\qty | \\frac{p}{100-p} | = e^{200t + C} \\end{equation}\nWe can get rid of the absolute value by reshaping the fraction:\n\\begin{equation} \\frac{p}{100-p} = ke^{200t} \\end{equation}\nFinally, we solve for \\(p\\):\n\\begin{equation} p(t) = \\frac{100k e^{200t}}{1+ke^{200t}} = \\frac{100k}{e^{-200t}+k} \\end{equation}\nNote!\nas \\(t \\to -\\infty\\), we have \\(p \\to 0\\) as \\(t \\to +\\infty\\), we have \\(p \\to 100\\) ","html":"\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP\u0026rsquo; = 2P(100-P)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor a motivation, see \u003ca href=\"/posts/kbhpetri_dish/\"\u003epetri dish\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"solution\"\u003eSolution\u003c/h2\u003e\n\u003cp\u003eAssuming \\(P\\) never reaches 100\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int \\frac{\\dd{P}}{P(100-P)} \\dd{P}= \\int 2 \\dd{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ePartial fractions time:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{1}{100} \\int \\qty(\\frac{1}{p} + \\frac{1}{100-p})\\dd{P} = \\frac{1}{100} \\ln |p| - \\ln |100-p| = 2t+C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRemember now log laws:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{1}{100} \\ln \\left| \\frac{p}{100-p} \\right| = 2t+C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd finally, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\qty | \\frac{p}{100-p} | = e^{200t + C}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can get rid of the absolute value by reshaping the fraction:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{p}{100-p} = ke^{200t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, we solve for \\(p\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(t) = \\frac{100k e^{200t}}{1+ke^{200t}} = \\frac{100k}{e^{-200t}+k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNote!\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eas \\(t \\to -\\infty\\), we have \\(p \\to 0\\)\u003c/li\u003e\n\u003cli\u003eas \\(t \\to +\\infty\\), we have \\(p \\to 100\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlogistic_equations/","tags":null,"title":"logistic equation"},{"categories":null,"contents":"Naive Bayes acts to compute \\(P(Y|X)\\) via the Bayes rule and using the Naive Bayes assumption. What if we can model the value of \\(P(Y|X)\\) directly?\nWith \\(\\sigma\\) as the sigmoid function:\n\\begin{equation} P(Y=1|X=x) = \\sigma (\\theta^{\\top}x) \\end{equation}\nand we tune the parameters of \\(\\theta\\) until this looks correct.\nWe always want to introduce a BIAS parameter, which acts as an offset; meaning the first \\(x\\) should always be \\(1\\), which makes the first value in \\(\\theta\\) as a \u0026ldquo;bias\u0026rdquo;.\nFor optimizing this function, we have:\n\\begin{equation} LL(\\theta) = y \\log \\sigma(\\theta^{\\top} x) + (1-y) \\log (1- \\theta^{\\top} x) \\end{equation}\nand if we took the derivative w.r.t. a particular parameter slot \\(\\theta_{j}\\):\n\\begin{equation} \\pdv{LL(\\theta)}{\\theta_{j}} = \\sum_{i=1}^{n} \\qty[y^{(i)} - \\sigma(\\theta^{\\top}x^{(i)})] x_{j}^{(i)} \\end{equation}\nlogistic regression assumption We assume that there exists that there are some \\(\\theta\\) which, when multiplied to the input and squashed by th sigmoid function, can model our underlying probability distribution:\n\\begin{equation} \\begin{cases} P(Y=1|X=x) = \\sigma (\\theta^{\\top}x) \\\\ P(Y=0|X=x) = 1- \\sigma (\\theta^{\\top}x) \\\\ \\end{cases} \\end{equation}\nWe then attempt to compute a set of \\(\\theta\\) which:\n\\begin{equation} \\theta_{MLE} = \\arg\\max_{\\theta} P(y^{(1)}, \\dots, y^{(n)} | \\theta, x_1 \\dots x_{n}) \\end{equation}\nLog Likelihood of Logistic Regression To actually perform MLE for the $θ$s, we need to do parameter learning. Now, recall that we defined, though the logistic regression assumption:\n\\begin{equation} P(Y=1|X=x) = \\sigma (\\theta^{\\top}x) \\end{equation}\nessentially, this is a Bernouli:\n\\begin{equation} (Y|X=x) \\sim Bern(p=\\sigma(\\theta^{\\top}x)) \\end{equation}\nWe desire to maximize:\n\\begin{equation} P(Y=y | \\theta, X=x) \\end{equation}\nNow, recall the continous PDF of Bernouli:\n\\begin{equation} P(Y=y) = p^{y} (1-p)^{1-y} \\end{equation}\nwe now plug in our expression for \\(p\\):\n\\begin{equation} P(Y=y|X=x) = \\sigma(\\theta^{\\top}x)^{y} (1-\\sigma(\\theta^{\\top}x))^{1-y} \\end{equation}\nfor all \\(x,y\\).\nLogistic Regression, in general For some input, output pair, \\((x,y)\\), we map each input \\(x^{(i)}\\) into a vector of length \\(n\\) where \\(x^{(i)}_{1} \u0026hellip; x^{(i)}_{n}\\).\nTraining We are going to learn weights \\(w\\) and \\(b\\) using stochastic gradient descent; and measure our performance using cross-entropy loss\nTest Given a test example \\(x\\), we compute \\(p(y|x)\\) for each \\(y\\), returning the label with the highest probability.\nLogistic Regression Text Classification Given a series of input/output pairs of text to labels, we want to assign a predicted class to a new input fair.\nWe represent each text in terms of features. Each feature \\(x_{i}\\) comes with some weight \\(w_{i}\\), informing us of the importance of feature \\(x_{i}\\).\nSo: input is a vector \\(x_1 \u0026hellip; x_{n}\\), and some weights \\(w_1 \u0026hellip; w_{n}\\), which will eventually gives us an output.\nThere is usually also a bias term \\(b\\). Eventually, classification gives:\n\\begin{equation} z = w \\cdot x + b \\end{equation}\nHowever, this does not give a probability, which by default this does not. To fix this, we apply a squishing function \\(\\sigma\\), which gives\n\\begin{equation} \\sigma(z) = \\frac{1}{1+\\exp(-z)} \\end{equation}\nwhich ultimately yields:\n\\begin{equation} z = \\sigma(w \\cdot x+ b) \\end{equation}\nwith the sigmoid function.\nTo make this sum to \\(1\\), we write:\n\\begin{equation} p(y=1|x) = \\sigma(w \\cdot x + b) \\end{equation}\nand\n\\begin{equation} p(y=0|x) = 1- p(y=1|x) \\end{equation}\nAlso, recall that \\(\\sigma(-x) = 1- \\sigma(x)\\), this gives:\n\\begin{equation} p(y=0|x) = \\sigma(-w\\cdot x-b) \\end{equation}\nthe probability at which point we make a decision is called a decision boundary. Typically this is 0.5.\nWe can featurize by counts from a lexicon, by word counts, etc.\nFor instance:\nlogistic regression terms feature representation: each input \\(x\\) is represented by a vectorized lit of feature classification function: \\(p(y|x)\\), computing \\(y\\) using the estimated class objective function: the loss to minimize (i.e. cross entropy) optimizer: SGD, etc. decision boundary: the threshold at which classification decisions are made, with \\(P(y=1|x) \u0026gt; N\\). binary cross entropy \\begin{equation} \\mathcal{L} = - \\qty[y \\log \\sigmoid(w \\cdot x + b) + (1-y) \\log (1- \\sigmoid(w \\cdot x + b))] \\end{equation}\nor, for neural networks in general:\n\\begin{equation} \\mathcal{L} = - \\qty[y \\log \\hat{y} + (1-y) \\log (1- \\hat{y})] \\end{equation}\ngradient descent \\begin{equation} \\theta^{t+1} = \\theta^{t} - \\eta \\nabla_{\\theta} \\mathcal{L} \\end{equation}\n\u0026ldquo;update the weight by taking a step in the opposite direction of the gradient by weight\u0026rdquo;.\nWeight gradient for logistic regresison \\begin{equation} \\pdv{L_{CE}(\\hat(y), y)}{w_{j}} = \\qty[\\sigma\\qty(w \\cdot x + b) -y] x_{j} \\end{equation}\nwhere \\(x_{j}\\) is feature \\(j\\).\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e acts to compute \\(P(Y|X)\\) via the \u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes rule\u003c/a\u003e and using the \u003ca href=\"/posts/kbhnaive_bayes/#id-6cdf74a2-2451-47d1-8a62-62aa6dff62c6-naive-bayes-assumption\"\u003eNaive Bayes assumption\u003c/a\u003e. What if we can model the value of \\(P(Y|X)\\) directly?\u003c/p\u003e\n\u003cp\u003eWith \\(\\sigma\\) as the \u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(Y=1|X=x) = \\sigma (\\theta^{\\top}x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand we tune the parameters of \\(\\theta\\) until this looks correct.\u003c/p\u003e\n\u003cp\u003eWe always want to introduce a BIAS parameter, which acts as an offset; meaning the first \\(x\\) should always be \\(1\\), which makes the first value in \\(\\theta\\) as a \u0026ldquo;bias\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eFor optimizing this function, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nLL(\\theta) = y \\log \\sigma(\\theta^{\\top} x) + (1-y) \\log (1- \\theta^{\\top} x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand if we took the derivative w.r.t. a particular parameter slot \\(\\theta_{j}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{LL(\\theta)}{\\theta_{j}} = \\sum_{i=1}^{n} \\qty[y^{(i)} - \\sigma(\\theta^{\\top}x^{(i)})] x_{j}^{(i)}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"logistic-regression-assumption\"\u003elogistic regression assumption\u003c/h2\u003e\n\u003cp\u003eWe assume that there exists that there are some \\(\\theta\\) which, when multiplied to the input and squashed by th \u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e function, can model our underlying \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e distribution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nP(Y=1|X=x) = \\sigma (\\theta^{\\top}x) \\\\\nP(Y=0|X=x) = 1- \\sigma (\\theta^{\\top}x) \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe then attempt to compute a set of \\(\\theta\\) which:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{MLE} = \\arg\\max_{\\theta} P(y^{(1)}, \\dots, y^{(n)} | \\theta, x_1 \\dots x_{n})\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"log-likelihood-of-logistic-regression\"\u003eLog Likelihood of Logistic Regression\u003c/h2\u003e\n\u003cp\u003eTo actually perform \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e for the $θ$s, we need to do \u003ca href=\"/posts/kbhparameter_learning/\"\u003eparameter learning\u003c/a\u003e. Now, recall that we defined, though the \u003ca href=\"#logistic-regression-assumption\"\u003elogistic regression assumption\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(Y=1|X=x) = \\sigma (\\theta^{\\top}x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eessentially, this is a Bernouli:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(Y|X=x) \\sim Bern(p=\\sigma(\\theta^{\\top}x))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe desire to maximize:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(Y=y | \\theta, X=x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, recall the continous \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003e of Bernouli:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(Y=y) = p^{y} (1-p)^{1-y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe now plug in our expression for \\(p\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(Y=y|X=x) = \\sigma(\\theta^{\\top}x)^{y} (1-\\sigma(\\theta^{\\top}x))^{1-y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor all \\(x,y\\).\u003c/p\u003e\n\u003ch2 id=\"logistic-regression-in-general\"\u003eLogistic Regression, in general\u003c/h2\u003e\n\u003cp\u003eFor some input, output pair, \\((x,y)\\), we map each input \\(x^{(i)}\\) into a vector of length \\(n\\) where \\(x^{(i)}_{1} \u0026hellip; x^{(i)}_{n}\\).\u003c/p\u003e\n\u003ch3 id=\"training\"\u003eTraining\u003c/h3\u003e\n\u003cp\u003eWe are going to learn weights \\(w\\) and \\(b\\) using stochastic gradient descent; and measure our performance using cross-entropy loss\u003c/p\u003e\n\u003ch3 id=\"test\"\u003eTest\u003c/h3\u003e\n\u003cp\u003eGiven a test example \\(x\\), we compute \\(p(y|x)\\) for each \\(y\\), returning the label with the highest probability.\u003c/p\u003e\n\u003ch2 id=\"logistic-regression-text-classification\"\u003eLogistic Regression Text Classification\u003c/h2\u003e\n\u003cp\u003eGiven a series of input/output pairs of text to labels, we want to assign a predicted class to a new input fair.\u003c/p\u003e\n\u003cp\u003eWe represent each text in terms of features. Each feature \\(x_{i}\\) comes with some weight \\(w_{i}\\), informing us of the importance of feature \\(x_{i}\\).\u003c/p\u003e\n\u003cp\u003eSo: input is a vector \\(x_1 \u0026hellip; x_{n}\\), and some weights \\(w_1 \u0026hellip; w_{n}\\), which will eventually gives us an output.\u003c/p\u003e\n\u003cp\u003eThere is usually also a bias term \\(b\\). Eventually, classification gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nz = w \\cdot x + b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eHowever, this does \u003cstrong\u003enot\u003c/strong\u003e give a probability, which by default this does not. To fix this, we apply a squishing function \\(\\sigma\\), which gives\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sigma(z) = \\frac{1}{1+\\exp(-z)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich ultimately yields:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nz = \\sigma(w \\cdot x+ b)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewith the \u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e function.\u003c/p\u003e\n\u003cp\u003eTo make this sum to \\(1\\), we write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(y=1|x) = \\sigma(w \\cdot x + b)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(y=0|x) = 1- p(y=1|x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAlso, recall that \\(\\sigma(-x) = 1- \\sigma(x)\\), this gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(y=0|x) = \\sigma(-w\\cdot x-b)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe probability at which point we make a decision is called a \u003ca href=\"#logistic-regression-text-classification\"\u003edecision boundary\u003c/a\u003e. Typically this is 0.5.\u003c/p\u003e\n\u003cp\u003eWe can featurize by counts from a lexicon, by word counts, etc.\u003c/p\u003e\n\u003cp\u003eFor instance:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-26_19-26-16_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"logistic-regression-terms\"\u003elogistic regression terms\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003efeature representation\u003c/strong\u003e: each input \\(x\\) is represented by a vectorized lit of feature\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eclassification function\u003c/strong\u003e: \\(p(y|x)\\), computing \\(y\\) using the estimated class\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eobjective function\u003c/strong\u003e: the loss to minimize (i.e. cross entropy)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eoptimizer\u003c/strong\u003e: SGD, etc.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003edecision boundary\u003c/strong\u003e: the threshold at which classification decisions are made, with \\(P(y=1|x) \u0026gt; N\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"binary-cross-entropy\"\u003ebinary cross entropy\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{L} = - \\qty[y \\log \\sigmoid(w \\cdot x + b) + (1-y) \\log (1- \\sigmoid(w \\cdot x + b))]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eor, for neural networks in general:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{L} = - \\qty[y \\log \\hat{y} + (1-y) \\log (1- \\hat{y})]\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"gradient-descent\"\u003egradient descent\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\theta^{t+1} = \\theta^{t} - \\eta \\nabla_{\\theta} \\mathcal{L}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;update the weight by taking a step in the opposite direction of the gradient by weight\u0026rdquo;.\u003c/p\u003e\n\u003ch3 id=\"weight-gradient-for-logistic-regresison\"\u003eWeight gradient for logistic regresison\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{L_{CE}(\\hat(y), y)}{w_{j}} = \\qty[\\sigma\\qty(w \\cdot x + b) -y] x_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(x_{j}\\) is feature \\(j\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlogistic_regression/","tags":null,"title":"logistic regression"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhloop_invariant/","tags":null,"title":"loop invariant"},{"categories":null,"contents":"A lottery is a choice problem, where each outcome has a certain probability:\n\\begin{equation} [S_1:p_1, \\dots, S_{n}:p_{n}] \\end{equation}\nwhere, \\(S_{j}\\) has \\(p_{j}\\) chance of occurring.\nutility of a lottery For a lottery, the utility thereof is the probability of a state happening times the utility of the state:\nthat is,\n\\begin{equation} U([S_1:p_1, \\dots, S_{n}:p_{n}]) = \\sum_{i=1}^{N} p_{i}U(S_{i})} \\end{equation}\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhlottery/\"\u003elottery\u003c/a\u003e is a choice problem, where each outcome has a certain \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n[S_1:p_1, \\dots, S_{n}:p_{n}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(S_{j}\\) has \\(p_{j}\\) chance of occurring.\u003c/p\u003e\n\u003ch2 id=\"utility-of-a-lottery\"\u003eutility of a lottery\u003c/h2\u003e\n\u003cp\u003eFor a lottery, the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e thereof is the probability of a state happening times the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of the state:\u003c/p\u003e\n\u003cp\u003ethat is,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU([S_1:p_1, \\dots, S_{n}:p_{n}]) = \\sum_{i=1}^{N} p_{i}U(S_{i})}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlottery/","tags":null,"title":"lottery"},{"categories":null,"contents":"Real-Time Dynamic Programming RTDP is a asynchronous value iteration scheme. Each RTDP trial is a result of:\n\\begin{equation} V(s) = \\min_{ia \\in A(s)} c(a,s) + \\sum_{s\u0026rsquo; \\in S}^{} P_{a}(s\u0026rsquo;|s)V(s) \\end{equation}\nthe algorithm halts when the residuals are sufficiently small.\nLabeled RTDP We want to label converged states so we don\u0026rsquo;t need to keep investigate it:\na state is solved if:\nstate has less then \\(\\epsilon\\) all reachable states given \\(s\u0026rsquo;\\) from this state has residual lower than \\(\\epsilon\\) Labelled RTDP We stochastically simulate one step forward, and until a state we haven\u0026rsquo;t marked as \u0026ldquo;solved\u0026rdquo; is met, then we simulate forward and value iterate\n","html":"\u003ch2 id=\"real-time-dynamic-programming\"\u003eReal-Time Dynamic Programming\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#real-time-dynamic-programming\"\u003eRTDP\u003c/a\u003e is a asynchronous value iteration scheme. Each \u003ca href=\"#real-time-dynamic-programming\"\u003eRTDP\u003c/a\u003e trial is a result of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV(s) = \\min_{ia \\in A(s)} c(a,s) + \\sum_{s\u0026rsquo; \\in S}^{} P_{a}(s\u0026rsquo;|s)V(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe algorithm halts when the residuals are sufficiently small.\u003c/p\u003e\n\u003ch2 id=\"labeled-rtdp--org9a279ff\"\u003eLabeled \u003ca href=\"#real-time-dynamic-programming\"\u003eRTDP\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eWe want to label converged states so we don\u0026rsquo;t need to keep investigate it:\u003c/p\u003e\n\u003cp\u003ea state is \u003cstrong\u003esolved\u003c/strong\u003e if:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003estate has less then \\(\\epsilon\\)\u003c/li\u003e\n\u003cli\u003eall reachable states given \\(s\u0026rsquo;\\) from this state has residual lower than \\(\\epsilon\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"labelled-rtdp\"\u003eLabelled RTDP\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-13_10-11-32_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWe stochastically simulate one step forward, and until a state we haven\u0026rsquo;t marked as \u0026ldquo;solved\u0026rdquo; is met, then we simulate forward and value iterate\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhltrdp/","tags":null,"title":"LRTDP"},{"categories":null,"contents":"LucCage is a platform as a biosensor: a case whose binding domain could be changed to fit specific applications\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhluccage/\"\u003eLucCage\u003c/a\u003e is a platform as a biosensor: a case whose binding domain could be changed to fit specific applications\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhluccage/","tags":null,"title":"LucCage"},{"categories":null,"contents":"DOI: 10.1101/2021.03.24.21254263\nOne-Liner Review paper presenting the \\(ADReSS_o\\) challenge and current baselines for three tasks\nNotes Three tasks + state of the art:\nClassification of AD: accuracy \\(78.87\\%\\) Prediction of MMSE score: RMSE \\(5.28\\) Prediction of cognitive decline: accuracy \\(68.75\\%\\) Task 1 AD classification baseline established by decision tree with late fusion\n(LOOCV and test)\nTask 2 MMSE score prediction baseline established by grid search on parameters.\nSVR did best on both counts; results from either model are averaged for prediction.\nTask 3 Same thing here, DT does better but notably its F1 is smaller; data trained with final late fusion\n","html":"\u003cp\u003eDOI: 10.1101/2021.03.24.21254263\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eReview paper presenting the \\(ADReSS_o\\) challenge and current baselines for three tasks\u003c/p\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n\u003cp\u003eThree tasks + state of the art:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eClassification of AD: accuracy \\(78.87\\%\\)\u003c/li\u003e\n\u003cli\u003ePrediction of \u003ca href=\"/posts/kbhmmse/\"\u003eMMSE\u003c/a\u003e score: RMSE \\(5.28\\)\u003c/li\u003e\n\u003cli\u003ePrediction of cognitive decline: accuracy \\(68.75\\%\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"task-1\"\u003eTask 1\u003c/h3\u003e\n\u003cp\u003eAD classification baseline established by decision tree with \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_22-57-05_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e(\u003ca href=\"/posts/kbhloo/\"\u003eLOOCV\u003c/a\u003e and test)\u003c/p\u003e\n\u003ch3 id=\"task-2\"\u003eTask 2\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhmmse/\"\u003eMMSE\u003c/a\u003e score prediction baseline established by \u003ca href=\"/posts/kbhgrid_search/\"\u003egrid search\u003c/a\u003e on parameters.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_22-58-42_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSVR did best on both counts; results from either model are averaged for prediction.\u003c/p\u003e\n\u003ch3 id=\"task-3\"\u003eTask 3\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_23-02-07_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSame thing here, DT does better but notably its F1 is smaller; data trained with final late fusion\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhluz_2021/","tags":["ntj"],"title":"Luz 2021"},{"categories":null,"contents":"Seed: walking, loving\nWalking\nSkipping\nShoes\nRoad\nRunning\nForward\nSpeed\nPlane\nTravel\nUnique\nCold\nHouse\nLoving\nCuddling\nKissing\nHolding\nTogether\nStaring\nLonging\nEstablish\nSpending time\nWaving\nWelling\nWalking together, staring forward longing you\nLoving together, skipping forward, a cold house\nCuddling down the avenue, spending time there, Waving by\nEstablish what it\u0026rsquo;s like,\n","html":"\u003cp\u003eSeed: walking, loving\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eWalking\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSkipping\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eShoes\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eRoad\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eRunning\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eForward\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSpeed\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ePlane\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eTravel\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eUnique\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCold\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eHouse\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eLoving\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCuddling\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eKissing\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eHolding\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eTogether\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eStaring\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eLonging\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eEstablish\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSpending time\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWaving\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWelling\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eWalking together,\nstaring forward\nlonging you\u003c/p\u003e\n\u003cp\u003eLoving together,\nskipping forward,\na cold house\u003c/p\u003e\n\u003cp\u003eCuddling down the avenue,\nspending time there,\nWaving by\u003c/p\u003e\n\u003cp\u003eEstablish what it\u0026rsquo;s like,\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlyrics_ping/","tags":null,"title":"Lyrics: Ping"},{"categories":null,"contents":"Seed: explore, wild\nexplore\nlearn\nresources\nmineral\ndetail\nfeature\nfact\npolice\nduty\nparticulars\ndeposit\nassign\nundertake\nnatural\nenvironment\ncultivate\nregion\nharshly\nuntrusting\nnervous\nincreasing\nchanging\nperiod\nbecome greater\nWe go explore, changing times, parting ways.\nWanting no praise, become greater Than ever\nWe go explore, shining lights moving stars\nFinding no target, we cannot expect to see\nHow can we explore if we can\u0026rsquo;t even feed? Ourselves? Our families? Our digitaries?\nHow can we explore if we can\u0026rsquo;t even seek. Unatural exploration Touching the depths with our feet\nWe go explore, wondrous depths, random seas\nWanting someone, reminicing the never\nWe go explore, purple skies acid rain\nFinding the target, we didn\u0026rsquo;t know to see\nHow can we explore if we can\u0026rsquo;t even feed? Ourselves? Our families? Our digitaries?\nHow can we explore if we can\u0026rsquo;t even seek. Unatural exploration Probing the depths with our feet\n","html":"\u003cp\u003eSeed: explore, wild\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eexplore\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003elearn\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eresources\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003emineral\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003edetail\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003efeature\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003efact\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003epolice\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eduty\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eparticulars\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003edeposit\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eassign\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eundertake\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003enatural\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eenvironment\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecultivate\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eregion\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eharshly\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003euntrusting\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003enervous\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eincreasing\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003echanging\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eperiod\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ebecome greater\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eWe go explore,\nchanging times,\nparting ways.\u003c/p\u003e\n\u003cp\u003eWanting no praise,\nbecome greater\nThan ever\u003c/p\u003e\n\u003cp\u003eWe go explore,\nshining lights\nmoving stars\u003c/p\u003e\n\u003cp\u003eFinding no target,\nwe cannot expect to see\u003c/p\u003e\n\u003cp\u003eHow can we explore if we can\u0026rsquo;t even feed?\nOurselves? Our families? Our digitaries?\u003c/p\u003e\n\u003cp\u003eHow can we explore if we can\u0026rsquo;t even seek.\nUnatural exploration\nTouching the depths with our feet\u003c/p\u003e\n\u003cp\u003eWe go explore,\nwondrous depths,\nrandom seas\u003c/p\u003e\n\u003cp\u003eWanting someone,\nreminicing\nthe never\u003c/p\u003e\n\u003cp\u003eWe go explore,\npurple skies\nacid rain\u003c/p\u003e\n\u003cp\u003eFinding the target,\nwe didn\u0026rsquo;t know to see\u003c/p\u003e\n\u003cp\u003eHow can we explore if we can\u0026rsquo;t even feed?\nOurselves? Our families? Our digitaries?\u003c/p\u003e\n\u003cp\u003eHow can we explore if we can\u0026rsquo;t even seek.\nUnatural exploration\nProbing the depths with our feet\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhlyrics_laws/","tags":null,"title":"Lyrics: Unnatural Exploration"},{"categories":null,"contents":"DOI: 10.1109/CISP-BMEI.2018.8633126\nA dataset paper with which auditory info about people talking is collected.\nHere are the state-of-the-art as of Laguarta 2021 on the dataset proposed.\n","html":"\u003cp\u003eDOI: 10.1109/CISP-BMEI.2018.8633126\u003c/p\u003e\n\u003cp\u003eA dataset paper with which auditory info about people talking is collected.\u003c/p\u003e\n\u003cp\u003eHere are the state-of-the-art as of \u003ca href=\"/posts/kbhlaguarta_2021/\"\u003eLaguarta 2021\u003c/a\u003e on the dataset proposed.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-31-20_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhlyu_2018/","tags":null,"title":"Lyu 2018"},{"categories":null,"contents":"machine learning is the act of using some input to come up with some prediction, where the model is parameterized via a bunch of parameters. Hence, parameter learning approaches is how machine learning works.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmachine_learning/\"\u003emachine learning\u003c/a\u003e is the act of using some input to come up with some prediction, where the model is parameterized via a bunch of \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es. Hence, \u003ca href=\"/posts/kbhparameter_learning/\"\u003eparameter learning\u003c/a\u003e approaches is how machine learning works.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmachine_learning/","tags":null,"title":"machine learning"},{"categories":null,"contents":"For multi-class classification, the macroaverage is the average of statistical values (prec, recc, etc.) after they have been computed for each seperate class.\nThe microaverage is the combination of a confusion matrix BEFORE statistical values are computed.\n","html":"\u003cp\u003eFor multi-class classification, the \u003ca href=\"/posts/kbhmacroaverage/\"\u003emacroaverage\u003c/a\u003e is the average of statistical values (prec, recc, etc.) after they have been computed for each seperate class.\u003c/p\u003e\n\u003cp\u003eThe microaverage is the combination of a confusion matrix BEFORE statistical values are computed.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmacroaverage/","tags":null,"title":"macroaverage"},{"categories":null,"contents":"For a charge to do something in a magnetic field, it has to have velocity; nothing happens without movement.\nSo:\n\\begin{equation} \\vec{F}_{M} = q \\vec{v} \\times \\vec{B} \\end{equation}\nTo calculate: magnitude: \\(qvB \\sin \\theta\\) + right hand rule.\nRadius You maybe asked to find the radius of the path the particle takes, so:\n\\begin{equation} \\frac{v^{2}}{r} = a \\end{equation}\nSo, the net force here is:\n\\begin{equation} qvB = Ma \\end{equation}\nSo plug in and solve\nCurrent along a wire \\begin{equation} \\vec{F} = \\int I \\dd{l} \\times \\vec{B} \\end{equation}\nThe sum of the current across the wire is the same as \\(q \\vec{v}\\).\nThis equals \\(IlB \\sin \\theta\\) in magnitude for constant current.\n\u0026ldquo;FILB - sintheta\u0026rdquo;\nInduced magnetic field For the induced magnetic field of a current, use the curvey (curl) right hand rule.\nThe actual magnitude induced by the wire is ampere\u0026rsquo;s law:\n\\begin{equation} \\oint \\vec{B} \\cdot \\dd{\\vec{l}} = \\mu I \\end{equation}\nwhere, \\(u_0\\) is vacuum permeability (\\(4 \\pi \\times 10^{-7} \\frac{T \\cdot m}{A}\\)).\nMagnetic Field of a Solenoid \\begin{equation} Bs = \\mu_{0} n I \\end{equation}\nwhere, \\(n = \\frac{N}{L}\\), the number of turns of the solenoid per length.\nMagnetic Field of a Loop \\begin{equation} B 2\\pi r = \\mu_{0} I \\end{equation}\nwhere, the surface integral of length of a loop is just the circumference\nWIRES ARE OPPOSITE Current\u0026rsquo;s induced magnetic fields in the same direction attracts, and in opposite directinos repel\nFull description of magnetic field non-bdl-b For instance, current in a loop and desire magnetic field in the center\n","html":"\u003cp\u003eFor a charge to do something in a magnetic field, it has to have velocity; nothing happens without \u003cstrong\u003emovement\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{F}_{M} = q \\vec{v} \\times \\vec{B}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo calculate: magnitude: \\(qvB \\sin \\theta\\) + right hand rule.\u003c/p\u003e\n\u003ch2 id=\"radius\"\u003eRadius\u003c/h2\u003e\n\u003cp\u003eYou maybe asked to find the radius of the path the particle takes, so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{v^{2}}{r} = a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, the net force here is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nqvB = Ma\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo plug in and solve\u003c/p\u003e\n\u003ch2 id=\"current-along-a-wire\"\u003eCurrent along a wire\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{F} = \\int I \\dd{l} \\times \\vec{B}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe sum of the current across the wire is the same as \\(q \\vec{v}\\).\u003c/p\u003e\n\u003cp\u003eThis equals \\(IlB \\sin \\theta\\) in magnitude for constant current.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;FILB - sintheta\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"induced-magnetic-field\"\u003eInduced magnetic field\u003c/h2\u003e\n\u003cp\u003eFor the induced magnetic field of a current, use the curvey (curl) right hand rule.\u003c/p\u003e\n\u003cp\u003eThe actual \u003cstrong\u003emagnitude\u003c/strong\u003e induced by the wire is ampere\u0026rsquo;s law:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\oint \\vec{B} \\cdot \\dd{\\vec{l}} = \\mu I\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(u_0\\) is vacuum permeability (\\(4 \\pi \\times 10^{-7} \\frac{T \\cdot m}{A}\\)).\u003c/p\u003e\n\u003ch2 id=\"magnetic-field-of-a-solenoid\"\u003eMagnetic Field of a Solenoid\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nBs = \\mu_{0} n I\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(n = \\frac{N}{L}\\), the number of turns of the solenoid per length.\u003c/p\u003e\n\u003ch2 id=\"magnetic-field-of-a-loop\"\u003eMagnetic Field of a Loop\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nB 2\\pi r = \\mu_{0} I\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, the surface integral of length of a loop is just the circumference\u003c/p\u003e\n\u003ch2 id=\"wires-are-opposite\"\u003eWIRES ARE OPPOSITE\u003c/h2\u003e\n\u003cp\u003eCurrent\u0026rsquo;s induced magnetic fields in the \u003cstrong\u003esame direction attracts\u003c/strong\u003e, and in \u003cstrong\u003eopposite directinos repel\u003c/strong\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-19_10-52-16_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"full-description-of-magnetic-field-non-bdl-b\"\u003eFull description of magnetic field non-bdl-b\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-19_11-13-43_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eFor instance, current in a loop and desire magnetic field in the center\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmagnetism/","tags":null,"title":"magnetism"},{"categories":null,"contents":"DOI: 10.3389/fnagi.2021.623607\nOne-Liner Trained a bimodal model on speech/text with GRU on speech and CNN-LSTM on text.\nNovelty A post-2019 NLP paper that doesn\u0026rsquo;t use transformers! (so faster (they used CNN-LSTM) lighter easier) \u0026ldquo;Our work sheds light on why the accuracy of these models drops to 72.92% on the ADReSS dataset, whereas, they gave state of the art results on the DementiaBank dataset.\u0026rdquo; Notable Methods Bi-Modal audio and transcript processing vis a vi Shah 2021, but with a CNN-LSTM and GRU on the other side.\nKey Figs Figure 1: Proposed Architecture The figure highlights the authors\u0026rsquo; proposed architecture\nFigure 2: confusion matrix In addition to validating prior work by Karlekar 2018 and Di Palo 2019, proposed model C and got accuracy of \\(73.92\\%\\).\n","html":"\u003cp\u003eDOI: 10.3389/fnagi.2021.623607\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eTrained a bimodal model on speech/text with GRU on speech and CNN-LSTM on text.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA post-2019 NLP paper that doesn\u0026rsquo;t use transformers! (so \u003cdel\u003efaster\u003c/del\u003e (they used CNN-LSTM) lighter easier)\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Our work sheds light on why the accuracy of these models drops to 72.92% on the ADReSS dataset, whereas, they gave state of the art results on the DementiaBank dataset.\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cp\u003eBi-Modal audio and transcript processing vis a vi \u003ca href=\"/posts/kbhshah_2021/\"\u003eShah 2021\u003c/a\u003e, but with a CNN-LSTM and GRU on the other side.\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"figure-1-proposed-architecture\"\u003eFigure 1: Proposed Architecture\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_12-10-09_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe figure highlights the authors\u0026rsquo; proposed architecture\u003c/p\u003e\n\u003ch3 id=\"figure-2-confusion-matrix\"\u003eFigure 2: confusion matrix\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_12-17-25_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eIn addition to validating prior work by Karlekar 2018 and Di Palo 2019, proposed model C and got accuracy of \\(73.92\\%\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmahajan_2021/","tags":["ntj"],"title":"Mahajan 2021"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmahatma_ghandi/","tags":null,"title":"Mahatma Ghandi"},{"categories":null,"contents":"Happy Monday friends.\nThe deliverable of the week was to make the a ASR model for Batchalign. Essentially, most copies of Whisper is pretty bad at Language Sample Analysis (LSA), because they mostly don\u0026rsquo;t work in terms trying to actually capture the things that people doing LSA want to capture (disfluencies, stuttering, etc.). OpenAI even acknowledged in the paper that they filtered out the disfluencies from their gold transcript to prevent Whisper from writing down too much of them.\nAnd so\u0026hellip; We roll up our sleeves and do it ourselves.\nA Large Language Model I didn\u0026rsquo;t want to perform Low-Rank Approximation (LoRA) to heavily when training this model. Folks fine tuning LLaMA will note that the preferred parameters were essentially asked the user to make the model matricies Rank 8, across the entire model.\nWhen trying this in earlier experiments, we failed dramatically as the LoRA\u0026rsquo;d model failed to converge when we hit any smaller rank below 10. However, if we tried to, say, do it above 10, I would OOM.\nI will note: its not like we don\u0026rsquo;t have compute. For this project, I fortunately am able to provision any number of V100 32GB as I see reasonable to train this model. Nevertheless, a lovey dovey parameter heavy 1.5 Billion parameter model is still a sight to behold (and cram into one such GPUs).\nHence, the most important impetus for making this work without aggressive LoRA and degraded performance is some kind of model parallel training scheme.\nOne Model, Multiple Cards Alr then.\nAfter investigation, DeepSpeed seemed pretty promising for a few reasons. The third iteration of its algorithm (Zero-3) has three different main offerings:\nModel parameter sharding (sharding the weights of the model across devices) Optimizer state sharding Model/Parameter state offload The last one caught my eye. Essentially, as long as your chip has the ability to perform a single forward pass, it can train a model under Zero-3. This is because the system is designed, on request, to offload the weights of your model into CPU or NVMe if you want\u0026mdash;and only pull it into the main device for the actual step of forward/backwards passes.\nThe thing about DeepSpeed is that its configured in a very hapazard way, and once you DeepSpeed onto your training script you can\u0026rsquo;t really go back: it expects model parallel training, in the way you configured it, always, based on the contents to the training script.\nHuggingface Accelerate to the rescue! The system is essentially a generic hypervisation framework. It is designed to accelerate model training using any framework you\u0026rsquo;d like: CPU data parallel, GPU data parallel, DeepSpeed model parallel, and so on\u0026mdash;with a single configuration file.\nWith minimal change to your training script, your actual acceleration scheme travels with a configuration file on device. Meaning, running the same script on different devices configured with Accelerate will use the best settings for that device; including the correct number of cards, accelerators, etc.\nPedal to the Metal As usual, despite how good all of this stuff sounds, getting it all to glue together was a hot mess.\nAccelerate Let\u0026rsquo;s start with Accelerate. The actual process of integrating Accelerate into your training script is pretty straightforward:\naccelerator = Accelerator() DEVICE = accelerator.device model, optim, dataloader, val_dataloader = accelerator.prepare(model, optim, dataloader, val_dataloader) and then, in your training loop, change\n- loss.backward() + accelerator.backward(loss) and finally, whenever you need to access a value in CPU, change\n- loss = torch.mean(loss.cpu()) + loss = torch.mean(accelerator.gather(loss)) That\u0026rsquo;s honestly about it in terms of making accelerate work.\nDeepSpeed Shenanigans DeepSpeed is a great tool to accelerate model training, but the damned thing is so janky to actually get started because of various device integration issues.\nThere\u0026rsquo;s this excellent thread on Reddit with people winging about the various things that DeepSpeed is broken about. To actually get it to actually work on my end\u0026hellip;\ndeep breath. pray to deity of your choice, etc. and Install Conda pip install deepspeed conda install openmpi pip install mpi4py (if this fails, env LD_LIBRARY_PATH=/your/conda/lib/path pip install --no-cache-dir mpi4py) If you now ran DeepSpeed on a model, it likely will crash on a local random assert statement. To fix this, get ready:\nfind runtime/zero/partitioned_param_coordinator.py wherever your DeepSpeed code is, and:\n- assert param.ds_status == ZeroParamStatus.AVAILABLE, param.ds_summary() + # assert param.ds_status == ZeroParamStatus.AVAILABLE, param.ds_summary() comment the damned assertion out. Yup.\nAccelerate Device Config And now, onto the device configuration. If you are most normal people, you can just run:\naccelerate config answer the questions, and be done for configuring that device. However, as I was training on a SLURM device, I had no access to a tty. Hence, I had to configure the Accelerate device configuration myself.\nTo glue Accelerate and Deepspeed together, here was the config.\ncompute_environment: LOCAL_MACHINE debug: false deepspeed_config: gradient_accumulation_steps: 1 offload_optimizer_device: none offload_param_device: cpu zero3_init_flag: true zero_stage: 3 distributed_type: DEEPSPEED fsdp_config: {} downcast_bf16: \u0026#39;no\u0026#39; machine_rank: 0 mixed_precision: \u0026#39;no\u0026#39; num_machines: 1 num_processes: 3 use_cpu: false Here are the highlights:\nmixed_precision: 'no': FP16 doesn\u0026rsquo;t work if you do your own tensor creation within the train loop as I did though the Whisper models. Your DataLoader passed to your accelerator at the beginning of the script must return the exact tensors you put into the model if you want FP16.\noffload_optimizer_device: none: offloading optimizer requires you to compile the PyTorch extension adam_cpu from DeepSpeed. I never got it to work on the training rig because it required CUDA headers (why? how? why is adam_cpu CUDA? no clue). Notably, optimizer SHARDING across GPUs still work, because that has nothing to do with offload.\nzero_stage: 3: stage 1 is state sharding, 2 is optimizer sharding, 3 is optimizer AND parameter sharding.\nnum_processes: 3: for GPUs, num_processes is the number of GPUs Accelerate/DeepSpeed should use.\nFriggin LoRA In the sprit of not wasting too many monies, I still conceded and used LoRA. This was a fairly straightforward setup through Huggingface PEFT.\nHere was my config:\npeft_config = LoraConfig(inference_mode=False, r=16, target_modules=[\u0026#34;q_proj\u0026#34;, \u0026#34;v_proj\u0026#34;, \u0026#34;out_proj\u0026#34;], lora_alpha=32, lora_dropout=0.1) and the integration:\nmodel = WhisperForConditionalGeneration.from_pretrained(f\u0026#34;{MODEL}\u0026#34;) + model = get_peft_model(model, peft_config) Simple as that. One protip: call model.train(); otherwise you will be hit with:\nFile \u0026#34;/jet/home/hliuk/.conda/envs/chat-whisper/lib/python3.10/site-packages/torch/nn/modules/conv.py\u0026#34;, line 309, in _conv_forward return F.conv1d(input, weight, bias, self.stride, RuntimeError: weight should have at least three dimensions presumably because of some conflict with inference_mode setting the wrong .forward() paths.\nOn the machine, merge_and_unload never worked. Instead, I downloaded the LoRA weights (instead of the merged full weights) and then called that on my local machine.\nTwo highlights from the LoRA config:\nr=16: we set the rank of the matrix into 16, because anything lower causes the model to stop converging. This still ended up needing 3 GPUs to actually cram fit.\nlora_alpha=32: I saw somewhere that the LoRA weight scaling factor, which is lora_alpha/r, should always be larger that \\(1\\). Your mileage may vary.\n[\u0026quot;q_proj\u0026quot;, \u0026quot;v_proj\u0026quot;, \u0026quot;out_proj\u0026quot;]: it seems like many people are not a fan of LoRAing the key matricies\u0026mdash;why? I don\u0026rsquo;t know. I\u0026rsquo;m following that convention here.\nAnd so\u0026hellip; Two days, and much wandb later, we\u0026rsquo;ve got a model!\nCheck it out!\nWe could\u0026rsquo;ve pushed the GPU up a little by setting LoRA rank higher, but I found that if the memory is sitting at anything above a \\(80\\%\\) ever, the system will eventually OOM.\n","html":"\u003cp\u003eHappy Monday friends.\u003c/p\u003e\n\u003cp\u003eThe deliverable of the week was to make the a ASR model for Batchalign. Essentially, most copies of Whisper is pretty bad at Language Sample Analysis (LSA), because they mostly don\u0026rsquo;t work in terms trying to actually capture the things that people doing LSA want to capture (disfluencies, stuttering, etc.). OpenAI even acknowledged in the paper that they filtered out the disfluencies from their gold transcript to prevent Whisper from writing down too much of them.\u003c/p\u003e\n\u003cp\u003eAnd so\u0026hellip; We roll up our sleeves and do it ourselves.\u003c/p\u003e\n\u003ch2 id=\"a-large-language-model\"\u003eA \u003cstrong\u003eLarge\u003c/strong\u003e Language Model\u003c/h2\u003e\n\u003cp\u003eI didn\u0026rsquo;t want to perform Low-Rank Approximation (LoRA) to heavily when training this model. Folks fine tuning \u003ca href=\"/posts/kbhllama/\"\u003eLLaMA\u003c/a\u003e will note that the preferred parameters were \u003ca href=\"https://deci.ai/blog/fine-tune-llama-2-with-lora-for-question-answering/\"\u003eessentially asked the user to make the model matricies Rank 8\u003c/a\u003e, across the entire model.\u003c/p\u003e\n\u003cp\u003eWhen trying this in earlier experiments, we failed dramatically as the LoRA\u0026rsquo;d model failed to converge when we hit any smaller rank below 10. However, if we tried to, say, do it above 10, I would OOM.\u003c/p\u003e\n\u003cp\u003eI will note: its not like we don\u0026rsquo;t have compute. For this project, I fortunately am able to provision any number of V100 32GB as I see reasonable to train this model. Nevertheless, a lovey dovey parameter heavy 1.5 Billion parameter model is still a sight to behold (and cram into one such GPUs).\u003c/p\u003e\n\u003cp\u003eHence, the most important impetus for making this work without aggressive LoRA and degraded performance is some kind of model parallel training scheme.\u003c/p\u003e\n\u003ch2 id=\"one-model-multiple-cards\"\u003eOne Model, Multiple Cards\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-23_10-21-34_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-23_10-21-40_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eAlr then.\u003c/p\u003e\n\u003cp\u003eAfter investigation, \u003ca href=\"https://deepspeed.readthedocs.io/en/stable/zero3.html\"\u003eDeepSpeed\u003c/a\u003e seemed pretty promising for a few reasons. The third iteration of its algorithm (Zero-3) has three different main offerings:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eModel parameter sharding (sharding the weights of the model across devices)\u003c/li\u003e\n\u003cli\u003eOptimizer state sharding\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eModel/Parameter state offload\u003c/strong\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThe last one caught my eye. Essentially, as long as your chip has the ability to perform a single forward pass, it can train a model under Zero-3. This is because the system is designed, on request, to offload the weights of your model into CPU or NVMe if you want\u0026mdash;and only pull it into the main device for the actual step of forward/backwards passes.\u003c/p\u003e\n\u003cp\u003eThe thing about DeepSpeed is that its configured in a very hapazard way, and once you DeepSpeed onto your training script you can\u0026rsquo;t really go back: it expects model parallel training, in the way you configured it, always, based on the contents to the training script.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://github.com/huggingface/accelerate\"\u003eHuggingface Accelerate\u003c/a\u003e to the rescue! The system is essentially a generic hypervisation framework. It is designed to accelerate model training using any framework you\u0026rsquo;d like: CPU data parallel, GPU data parallel, DeepSpeed model parallel, and so on\u0026mdash;with a single configuration file.\u003c/p\u003e\n\u003cp\u003eWith minimal change to your \u003cem\u003etraining script\u003c/em\u003e, your actual acceleration scheme travels with a configuration file \u003cstrong\u003eon device\u003c/strong\u003e. Meaning, running the same script on different devices configured with Accelerate will use the best settings for that device; including the correct number of cards, accelerators, etc.\u003c/p\u003e\n\u003ch2 id=\"pedal-to-the-metal\"\u003ePedal to the Metal\u003c/h2\u003e\n\u003cp\u003eAs usual, despite how good all of this stuff sounds, getting it all to glue together was a hot mess.\u003c/p\u003e\n\u003ch3 id=\"accelerate\"\u003eAccelerate\u003c/h3\u003e\n\u003cp\u003eLet\u0026rsquo;s start with Accelerate. The actual process of integrating Accelerate into your training script is pretty straightforward:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eaccelerator\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eAccelerator\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eDEVICE\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eaccelerator\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edevice\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emodel\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edataloader\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eval_dataloader\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eaccelerator\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprepare\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emodel\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edataloader\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eval_dataloader\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eand then, in your training loop, change\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-diff\" data-lang=\"diff\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e- loss.backward()\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e+ accelerator.backward(loss)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eand finally, whenever you need to access a value in CPU, change\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-diff\" data-lang=\"diff\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e- loss = torch.mean(loss.cpu())\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e+ loss = torch.mean(accelerator.gather(loss))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThat\u0026rsquo;s honestly about it in terms of making accelerate work.\u003c/p\u003e\n\u003ch3 id=\"deepspeed-shenanigans\"\u003eDeepSpeed Shenanigans\u003c/h3\u003e\n\u003cp\u003eDeepSpeed is a great tool to accelerate model training, but the damned thing is so janky to actually get started because of various device integration issues.\u003c/p\u003e\n\u003cp\u003eThere\u0026rsquo;s this \u003ca href=\"https://www.reddit.com/r/Oobabooga/comments/13etobg/using_deepspeed_requires_lots_of_manual_tweaking/\"\u003eexcellent thread\u003c/a\u003e on Reddit with people winging about the various things that DeepSpeed is broken about. To actually get it to actually work on my end\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003cem\u003edeep breath. pray to deity of your choice, etc.\u003c/em\u003e and Install Conda\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003epip install deepspeed\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003econda install openmpi\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003epip install mpi4py\u003c/code\u003e (if this fails, \u003ccode\u003eenv LD_LIBRARY_PATH=/your/conda/lib/path pip install --no-cache-dir mpi4py\u003c/code\u003e)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eIf you now ran DeepSpeed on a model, it likely will crash on a local random assert statement. To fix this, get ready:\u003c/p\u003e\n\u003cp\u003efind \u003ccode\u003eruntime/zero/partitioned_param_coordinator.py\u003c/code\u003e wherever your DeepSpeed code is, and:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-diff\" data-lang=\"diff\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e- assert param.ds_status == ZeroParamStatus.AVAILABLE, param.ds_summary()\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e+ # assert param.ds_status == ZeroParamStatus.AVAILABLE, param.ds_summary()\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ecomment the damned assertion out. Yup.\u003c/p\u003e\n\u003ch3 id=\"accelerate-device-config\"\u003eAccelerate Device Config\u003c/h3\u003e\n\u003cp\u003eAnd now, onto the device configuration. If you are most normal people, you can just run:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-bash\" data-lang=\"bash\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eaccelerate config\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eanswer the questions, and be done for configuring that device. However, as I was training on a SLURM device, I had no access to a tty. Hence, I had to configure the Accelerate device configuration myself.\u003c/p\u003e\n\u003cp\u003eTo glue Accelerate and Deepspeed together, here was the config.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-yaml\" data-lang=\"yaml\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003ecompute_environment\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003eLOCAL_MACHINE\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003edebug\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efalse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003edeepspeed_config\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#f92672\"\u003egradient_accumulation_steps\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#f92672\"\u003eoffload_optimizer_device\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003enone\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#f92672\"\u003eoffload_param_device\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003ecpu\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#f92672\"\u003ezero3_init_flag\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003etrue\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#f92672\"\u003ezero_stage\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003edistributed_type\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003eDEEPSPEED\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efsdp_config\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e {}\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003edowncast_bf16\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#39;no\u0026#39;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003emachine_rank\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003emixed_precision\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#39;no\u0026#39;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003enum_machines\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003enum_processes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003euse_cpu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efalse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eHere are the highlights:\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003emixed_precision: 'no'\u003c/code\u003e: FP16 doesn\u0026rsquo;t work if you do your own tensor creation within the train loop as I did though the Whisper models. Your DataLoader passed to your accelerator at the beginning of the script must return the \u003cstrong\u003eexact\u003c/strong\u003e tensors you put into the model if you want FP16.\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003eoffload_optimizer_device: none\u003c/code\u003e: offloading optimizer requires you to compile the PyTorch extension \u003ccode\u003eadam_cpu\u003c/code\u003e from DeepSpeed. I never got it to work on the training rig because it required CUDA headers (why? how? why is \u003ccode\u003eadam_cpu\u003c/code\u003e CUDA? no clue). Notably, \u003cstrong\u003eoptimizer SHARDING\u003c/strong\u003e across GPUs still work, because that has nothing to do with offload.\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003ezero_stage: 3\u003c/code\u003e: stage 1 is state sharding, 2 is optimizer sharding, 3 is optimizer AND parameter sharding.\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003enum_processes: 3\u003c/code\u003e: for GPUs, \u003ccode\u003enum_processes\u003c/code\u003e is \u003cstrong\u003ethe number of GPUs\u003c/strong\u003e Accelerate/DeepSpeed should use.\u003c/p\u003e\n\u003ch3 id=\"friggin-lora\"\u003eFriggin LoRA\u003c/h3\u003e\n\u003cp\u003eIn the sprit of not wasting too many monies, I still conceded and used LoRA. This was a fairly straightforward setup through Huggingface PEFT.\u003c/p\u003e\n\u003cp\u003eHere was my config:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epeft_config\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLoraConfig\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einference_mode\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eFalse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003er\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e16\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003etarget_modules\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;q_proj\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;v_proj\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;out_proj\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003elora_alpha\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e32\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003elora_dropout\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eand the integration:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-diff\" data-lang=\"diff\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003emodel = WhisperForConditionalGeneration.from_pretrained(f\u0026#34;{MODEL}\u0026#34;)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e+ model = get_peft_model(model, peft_config)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSimple as that. One protip: call \u003ccode\u003emodel.train()\u003c/code\u003e; otherwise you will be hit with:\u003c/p\u003e\n\u003cpre tabindex=\"0\"\u003e\u003ccode class=\"language-nil\" data-lang=\"nil\"\u003e File \u0026#34;/jet/home/hliuk/.conda/envs/chat-whisper/lib/python3.10/site-packages/torch/nn/modules/conv.py\u0026#34;, line 309, in _conv_forward\n return F.conv1d(input, weight, bias, self.stride,\nRuntimeError: weight should have at least three dimensions\n\u003c/code\u003e\u003c/pre\u003e\u003cp\u003epresumably because of some conflict with \u003ccode\u003einference_mode\u003c/code\u003e setting the wrong \u003ccode\u003e.forward()\u003c/code\u003e paths.\u003c/p\u003e\n\u003cp\u003eOn the machine, \u003ccode\u003emerge_and_unload\u003c/code\u003e never worked. Instead, I downloaded the LoRA weights (instead of the merged full weights) and then called that on my local machine.\u003c/p\u003e\n\u003cp\u003eTwo highlights from the LoRA config:\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003er=16\u003c/code\u003e: we set the rank of the matrix into \u003ccode\u003e16\u003c/code\u003e, because anything lower causes the model to stop converging. This still ended up needing 3 GPUs to actually cram fit.\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003elora_alpha=32\u003c/code\u003e: I saw somewhere that the LoRA weight scaling factor, which is \u003ccode\u003elora_alpha/r\u003c/code\u003e, should always be larger that \\(1\\). Your mileage may vary.\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003e[\u0026quot;q_proj\u0026quot;, \u0026quot;v_proj\u0026quot;, \u0026quot;out_proj\u0026quot;]\u003c/code\u003e: it seems like many people are not a fan of LoRAing the key matricies\u0026mdash;why? I don\u0026rsquo;t know. I\u0026rsquo;m following that convention here.\u003c/p\u003e\n\u003ch2 id=\"and-so-dot-dot-dot\"\u003eAnd so\u0026hellip;\u003c/h2\u003e\n\u003cp\u003eTwo days, and much wandb later, we\u0026rsquo;ve got a model!\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://huggingface.co/talkbank/CHATWhisper-en-large-v1\"\u003eCheck it out!\u003c/a\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-23_13-16-01_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWe could\u0026rsquo;ve pushed the GPU up a little by setting LoRA rank higher, but I found that if the memory is sitting at anything above a \\(80\\%\\) ever, the system will eventually OOM.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmake_models_go_brrr/","tags":["fireside"],"title":"Make Models Go Brrr: Model Parallel Whisper Training"},{"categories":null,"contents":"Suppose \\(T \\in \\mathcal{L}(V)\\), and \\(U \\subset V\\), an invariant subspace under \\(T\\). Then:\n\\begin{equation} T|_{U}(u) = Tu,\\ \\forall u \\in U \\end{equation}\nwhere \\(T|_{U} \\in \\mathcal{L}(U)\\)\n","html":"\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V)\\), and \\(U \\subset V\\), an \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e under \\(T\\). Then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT|_{U}(u) = Tu,\\ \\forall u \\in U\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(T|_{U} \\in \\mathcal{L}(U)\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmap_restriction_operator/","tags":null,"title":"map restriction operator"},{"categories":null,"contents":"MapReduce is an distributed algorithm.\nhttps://www.psc.edu/wp-content/uploads/2023/07/A-Brief-History-of-Big-Data.pdf\nMap: \\((in\\_key, in\\_value) \\Rightarrow list(out\\_key, intermediate\\_value)\\). Reduce: Group map outputs by \\(out\\_key\\) \\((out\\_key, list(intermediate\\_value)) \\Rightarrow list(out\\_value)\\) example of MapReduce Say, if you want to count word frequencies in a set of documents.\nMap: \\((document\\_name, document\\_contents) \\Rightarrow list(word, #\\ occurrences)\\) You can see that this can be distributed to multiple processors. You can have each processor count the word frequencies in a single document. We have now broken the contents into divide and conquerable groups.\nReduce: \\((word, list\\ (occurrences\\_per\\_document)) \\Rightarrow (word,sum)\\) We just add up the occurrences that each of the nodes\u0026rsquo; output for word frequency.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmapreduce/\"\u003eMapReduce\u003c/a\u003e is an \u003ca href=\"/posts/kbhdistributed_algorithum/\"\u003edistributed algorithm\u003c/a\u003e.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-07-31_11-58-49_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\u003ca href=\"https://www.psc.edu/wp-content/uploads/2023/07/A-Brief-History-of-Big-Data.pdf\"\u003ehttps://www.psc.edu/wp-content/uploads/2023/07/A-Brief-History-of-Big-Data.pdf\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eMap: \\((in\\_key, in\\_value) \\Rightarrow list(out\\_key, intermediate\\_value)\\).\u003c/li\u003e\n\u003cli\u003eReduce:\n\u003cul\u003e\n\u003cli\u003eGroup map outputs by \\(out\\_key\\)\u003c/li\u003e\n\u003cli\u003e\\((out\\_key, list(intermediate\\_value)) \\Rightarrow list(out\\_value)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"example-of-mapreduce--kbhmapreduce-dot-md\"\u003eexample of \u003ca href=\"/posts/kbhmapreduce/\"\u003eMapReduce\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eSay, if you want to count word frequencies in a set of documents.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eMap: \\((document\\_name, document\\_contents) \\Rightarrow list(word, #\\ occurrences)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eYou can see that this can be distributed to multiple processors. You can have each processor count the word frequencies in a \u003cem\u003esingle\u003c/em\u003e document. We have now broken the contents into divide and conquerable groups.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eReduce: \\((word, list\\ (occurrences\\_per\\_document)) \\Rightarrow (word,sum)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe just add up the occurrences that each of the nodes\u0026rsquo; output for word frequency.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmapreduce/","tags":null,"title":"MapReduce"},{"categories":null,"contents":"A Markov Chain is a chain of \\(N\\) states, with an \\(N \\times N\\) transition matrix.\nat each step, we are in exactly one of those states the matrix \\(P_{ij}\\) tells us \\(P(j|i)\\), the probability of going to state \\(j\\) given you are at state \\(i\\) And therefore:\n\\begin{equation} \\sum_{j=1}^{N} P_{ij} = 1 \\end{equation}\nErgotic Markov Chain a markov chain is Ergotic if\u0026hellip;\nyou have a path from any one state to any other for any start state, after some time \\(T_0\\), the probability of being in any state at any \\(T \u0026gt; T_0\\) is non-zero Every Ergotic Markov Chain has a long-term visit rate:\ni.e. a steady state visitation count exists. We usually call it:\n\\begin{equation} \\pi = \\qty(\\pi_{i}, \\dots, \\pi_{n}) \\end{equation}\nComputing steady state Fact:\nlet\u0026rsquo;s declare that \\(\\pi\\) is the steady state to a transition matrix \\(T\\); recall that the FROM states are the rows, which means that \\(\\pi\\) has to be a row vector; \\(\\pi\\) being a steady state makes:\n\\begin{equation} \\pi T = \\pi \\end{equation}\nThis is a left e.v. with eigenvalue \\(1\\), which is the principle eigenvector of \\(T\\) as transition matricies always have eigenvector eigenvalue to \\(1\\).\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhmarkov_chain/\"\u003eMarkov Chain\u003c/a\u003e is a chain of \\(N\\) states, with an \\(N \\times N\\) transition matrix.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eat each step, we are in exactly one of those states\u003c/li\u003e\n\u003cli\u003ethe matrix \\(P_{ij}\\) tells us \\(P(j|i)\\), the probability of going to state \\(j\\) given you are at state \\(i\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAnd therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{j=1}^{N} P_{ij} = 1\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"ergotic-markov-chain\"\u003eErgotic Markov Chain\u003c/h2\u003e\n\u003cp\u003ea markov chain is \u003ca href=\"#ergotic-markov-chain\"\u003eErgotic\u003c/a\u003e if\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eyou have a path from any one state to any other\u003c/li\u003e\n\u003cli\u003efor any start state, after some time \\(T_0\\), the probability of being in any state at any \\(T \u0026gt; T_0\\) is non-zero\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eEvery \u003ca href=\"#ergotic-markov-chain\"\u003eErgotic Markov Chain\u003c/a\u003e has a long-term visit rate:\u003c/p\u003e\n\u003cp\u003ei.e. a steady state visitation count exists. We usually call it:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pi = \\qty(\\pi_{i}, \\dots, \\pi_{n})\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"computing-steady-state\"\u003eComputing steady state\u003c/h3\u003e\n\u003cp\u003eFact:\u003c/p\u003e\n\u003cp\u003elet\u0026rsquo;s declare that \\(\\pi\\) is the steady state to a transition matrix \\(T\\); recall that the FROM states are the rows, which means that \\(\\pi\\) has to be a row vector; \\(\\pi\\) being a steady state makes:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pi T = \\pi\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is a left e.v. with \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e \\(1\\), which is the principle \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e of \\(T\\) as transition matricies always have \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e eigenvalue to \\(1\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmarkov_chain/","tags":null,"title":"Markov Chain"},{"categories":null,"contents":"A MDP is a decision network whereby a sequences of actions causes a sequence of states. Each state is dependent on the action we take and the state we are in, and each utility is dependent on action taken and the state we are in.\nNote that, unlike a POMDP, we know what state we are in\u0026mdash;the observations from the states are just unclear.\nconstituents \\(S\\): state space (assuming discrete for now, there are \\(n\\) states) \u0026mdash; \u0026ldquo;minimum set of information that allows you to solve a problem\u0026rdquo; \\(A\\): action space \u0026mdash; set of things your agent can do \\(T(s\u0026rsquo; | s,a)\\): \u0026ldquo;dynamics\u0026rdquo;, state-transition model \u0026ldquo;probability that we end up in \\(s\u0026rsquo;\\) given \\(s\\) and action \\(a\\)\u0026rdquo;: good idea to make a table of probabilities of source vs. destination variables \\(R(s,a,s\u0026rsquo;)\\): expected reward given in an action and a state (real world reward maybe stochastic) \\(\\pi_{t}(s_{1:t}, a_{1:t-1})\\): the policy, returning an action, a system of assigning actions based on states however, our past states are d-seperated from our current action given knowing the state, so really we have \\(\\pi_{t}(s_{t})\\) additional information We assume policy to be exact right now.\nstationary Markov Decision Process This is a stationary Markov Decision Process because at each node \\(S_{n}\\), we have: \\(P(S_{n+1} | A_n, S_n)\\). Time is not a variable: as long as you know what state you are in, and what you did, you know the transition probability.\n(that is, the set of states is not dependent on time)\ncalculating utility with instantaneous rewards Because, typically, in decision networks you sum all the utilities together, you\u0026rsquo;d think that we should sum the utilities together.\nfinite-horizon models We want to maximize reward over time, over a finite horizon \\(n\\). Therefore, we try to maximize:\n\\begin{equation} \\sum_{t=1}^{n}r_{t} \\end{equation}\nthis function is typically called \u0026ldquo;return\u0026rdquo;.\ninfinite-horizon models If you lived forever, small positive \\(r_{t}\\) and large \\(r_{t}\\) makes no utility difference. We therefore add discounting:\n\\begin{equation} \\sum_{t=1}^{\\infty} \\gamma^{t-1} r_{t} \\end{equation}\nwhere, \\(\\gamma \\in (0,1)\\)\nwe discount the future by some amount\u0026mdash;an \u0026ldquo;interest rate\u0026rdquo;\u0026mdash;reward now is better than reward in the future.\n\\(\\gamma \\to 0\\): \u0026ldquo;myopic\u0026rdquo; strategies, near-sighted strategies \\(\\gamma \\to 1\\): \u0026ldquo;non-discounting\u0026rdquo; average return models We don\u0026rsquo;t care about this as much:\n\\begin{equation} \\lim_{n \\to \\infty} \\frac{1}{n} \\sum_{t=1}^{n}r_{t} \\end{equation}\nbut its close to infinite-horizon models with Gama close to \\(1\\)\nSolving an MDP You are handed or can predict \\(R(s,a)\\), and know all transitions Small, Discrete State Space\nGet an exact solution for \\(U^{*}(s)\\) (and hence \\(\\pi^{ *}(a, s)\\)) for the problem via\u0026hellip;\npolicy iteration value iteration Large, Continuous State Space\nParameterize Policy\nOptimize \\(\\pi_{\\theta}\\) to maximize \\(U(\\pi_{\\theta})\\) using Policy Optimization methods!\nGradient Free: lower dimension policy space\nLocal Policy Search (aka Hooke-Jeeves Policy Search) Genetic Policy Search Cross Entropy Method Gradient Based Method: higher dimension policy space\nPolicy Gradient\nParameterize Value Function\nOptimize \\(U_{\\theta}(S)\\) via global approximation or local approximation methods, then use a greedy policy on that nice and optimized value function.\nYou can only reason about your immediate surroundings/local reachable states online planning\nor\u0026hellip; \u0026ldquo;you don\u0026rsquo;t know the model whatsoever\u0026rdquo;\nreinforcement learning\nduring these cases, you never argmax over all actions; hence, its important to remember the methods to preserve Exploration and Exploitation.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003e is a \u003ca href=\"/posts/kbhdecision_networks/\"\u003edecision network\u003c/a\u003e whereby a sequences of actions causes a sequence of states. Each state is dependent on the action we take and the state we are in, and each \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e is dependent on action taken and the state we are in.\u003c/p\u003e\n\u003cp\u003eNote that, unlike a \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e, we know what state we are in\u0026mdash;the observations from the states are just unclear.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-17_09-18-03_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(S\\): state space (assuming discrete for now, there are \\(n\\) states) \u0026mdash; \u0026ldquo;minimum set of information that allows you to solve a problem\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\\(A\\): action space \u0026mdash; set of things your agent can do\u003c/li\u003e\n\u003cli\u003e\\(T(s\u0026rsquo; | s,a)\\): \u0026ldquo;dynamics\u0026rdquo;, state-transition model \u0026ldquo;\u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e that we end up in \\(s\u0026rsquo;\\) given \\(s\\) and action \\(a\\)\u0026rdquo;: good idea to make a table of probabilities of source vs. destination variables\u003c/li\u003e\n\u003cli\u003e\\(R(s,a,s\u0026rsquo;)\\): expected reward given in an action and a state (real world reward maybe stochastic)\u003c/li\u003e\n\u003cli\u003e\\(\\pi_{t}(s_{1:t}, a_{1:t-1})\\): the \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e, returning an action, a system of assigning actions based on states\n\u003cul\u003e\n\u003cli\u003ehowever, our past states are \u003ca href=\"/posts/kbhbaysian_network/#checking-for-conditional-independence\"\u003ed-seperated\u003c/a\u003e from our \u003ca href=\"/posts/kbhcurrent/\"\u003ecurrent\u003c/a\u003e action given knowing the state, so really we have \\(\\pi_{t}(s_{t})\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cp\u003eWe assume \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e to be exact right now.\u003c/p\u003e\n\u003ch3 id=\"stationary-markov-decision-process--kbhmarkov-decision-process-dot-md\"\u003estationary \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMarkov Decision Process\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eThis is a \u003ca href=\"#stationary-markov-decision-process--kbhmarkov-decision-process-dot-md\"\u003estationary Markov Decision Process\u003c/a\u003e because at each node \\(S_{n}\\), we have: \\(P(S_{n+1} | A_n, S_n)\\). Time is \u003cstrong\u003enot\u003c/strong\u003e a variable: as long as you know what state you are in, and what you did, you know the transition \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-17_13-07-24_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e(that is, the set of states is not dependent on time)\u003c/p\u003e\n\u003ch3 id=\"calculating-utility--kbhutility-theory-dot-md--with-instantaneous-rewards\"\u003ecalculating \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e with instantaneous rewards\u003c/h3\u003e\n\u003cp\u003eBecause, typically, in \u003ca href=\"/posts/kbhdecision_networks/\"\u003edecision network\u003c/a\u003es you sum all the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutilities\u003c/a\u003e together, you\u0026rsquo;d think that we should sum the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutilities\u003c/a\u003e together.\u003c/p\u003e\n\u003ch4 id=\"finite-horizon-models\"\u003efinite-horizon models\u003c/h4\u003e\n\u003cp\u003eWe want to maximize reward over time, over a finite horizon \\(n\\). Therefore, we try to maximize:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{t=1}^{n}r_{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis function is typically called \u0026ldquo;\u003ca href=\"/posts/kbhrandom_walk/#return--finmetrics\"\u003ereturn\u003c/a\u003e\u0026rdquo;.\u003c/p\u003e\n\u003ch4 id=\"infinite-horizon-models\"\u003einfinite-horizon models\u003c/h4\u003e\n\u003cp\u003eIf you lived forever, small positive \\(r_{t}\\) and large \\(r_{t}\\) makes no utility difference. We therefore add discounting:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{t=1}^{\\infty} \\gamma^{t-1} r_{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\gamma \\in (0,1)\\)\u003c/p\u003e\n\u003cp\u003ewe discount the future by some amount\u0026mdash;an \u0026ldquo;interest rate\u0026rdquo;\u0026mdash;reward now is better than reward in the future.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\gamma \\to 0\\): \u0026ldquo;myopic\u0026rdquo; strategies, near-sighted strategies\u003c/li\u003e\n\u003cli\u003e\\(\\gamma \\to 1\\): \u0026ldquo;non-discounting\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"average-return-models\"\u003eaverage return models\u003c/h4\u003e\n\u003cp\u003eWe don\u0026rsquo;t care about this as much:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lim_{n \\to \\infty} \\frac{1}{n} \\sum_{t=1}^{n}r_{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebut its close to \u003ca href=\"#infinite-horizon-models\"\u003einfinite-horizon models\u003c/a\u003e with Gama close to \\(1\\)\u003c/p\u003e\n\u003ch3 id=\"solving-an-mdp--kbhmarkov-decision-process-dot-md\"\u003eSolving an \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003e\u003c/h3\u003e\n\u003ch4 id=\"you-are-handed-or-can-predict-r--s-a--and-know-all-transitions\"\u003eYou are handed or can predict \\(R(s,a)\\), and know all transitions\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eSmall, Discrete State Space\u003c/p\u003e\n\u003cp\u003eGet an exact solution for \\(U^{*}(s)\\) (and hence \\(\\pi^{ *}(a, s)\\)) for the problem via\u0026hellip;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eLarge, Continuous State Space\u003c/p\u003e\n \u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eParameterize Policy\u003c/p\u003e\n\u003cp\u003eOptimize \\(\\pi_{\\theta}\\) to maximize \\(U(\\pi_{\\theta})\\) using \u003ca href=\"/posts/kbhpolicy_optimization/\"\u003ePolicy Optimization\u003c/a\u003e methods!\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eGradient Free\u003c/strong\u003e\u003c/strong\u003e: lower dimension \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e space\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlocal_policy_search/\"\u003eLocal Policy Search\u003c/a\u003e (aka \u003ca href=\"/posts/kbhlocal_policy_search/\"\u003eHooke-Jeeves Policy Search\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgenetic_policy_search/\"\u003eGenetic Policy Search\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcross_entropy_method/\"\u003eCross Entropy Method\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eGradient Based Method\u003c/strong\u003e\u003c/strong\u003e: higher dimension \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e space\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n \u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eParameterize Value Function\u003c/p\u003e\n\u003cp\u003eOptimize \\(U_{\\theta}(S)\\) via \u003ca href=\"/posts/kbhapproximate_value_function/#global-approximation\"\u003eglobal approximation\u003c/a\u003e or \u003ca href=\"/posts/kbhapproximate_value_function/#local-approximation\"\u003elocal approximation\u003c/a\u003e methods, then use a \u003ca href=\"/posts/kbhaction_value_function/#value-function-policy\"\u003egreedy policy\u003c/a\u003e on that nice and optimized \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"you-can-only-reason-about-your-immediate-surroundings-local-reachable-states\"\u003eYou can only reason about your immediate surroundings/local reachable states\u003c/h4\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhonline_planning/\"\u003eonline planning\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eor\u0026hellip; \u0026ldquo;you don\u0026rsquo;t know the model whatsoever\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhreinforcement_learning/\"\u003ereinforcement learning\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eduring these cases, you never argmax over all actions; hence, its important to remember the methods to preserve \u003ca href=\"/posts/kbhexploration_and_exploitation/\"\u003eExploration and Exploitation\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmarkov_decision_process/","tags":null,"title":"Markov Decision Process"},{"categories":null,"contents":"\\(A \\to B\\) has the same independence relationship as \\(B\\to A\\). How do we describe it?\nrequirements If two Baysian Networks encode the same conditional independence assumptions, they are Markov Equivalent.\nadditional information checking Markov Equivalence Two graphs are Markov Equivalent, IFF BOTH:\nsome edges without regard to direction (\u0026ldquo;same skeleton\u0026rdquo;) the same set of immoral v-structures ","html":"\u003cp\u003e\\(A \\to B\\) has the same independence relationship as \\(B\\to A\\). How do we describe it?\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eIf two \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003es encode the same \u003ca href=\"/posts/kbhbaysian_network/#conditional-independence\"\u003econditional independence\u003c/a\u003e assumptions, they are \u003ca href=\"/posts/kbhmarkov_equivalence_classes/\"\u003eMarkov Equivalent\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"checking-markov-equivalence--kbhmarkov-equivalence-classes-dot-md\"\u003echecking \u003ca href=\"/posts/kbhmarkov_equivalence_classes/\"\u003eMarkov Equivalence\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eTwo graphs are \u003ca href=\"/posts/kbhmarkov_equivalence_classes/\"\u003eMarkov Equivalent\u003c/a\u003e, \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e BOTH:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003esome edges without regard to direction (\u0026ldquo;same skeleton\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003ethe same set of \u003ca href=\"/posts/kbhimmoral_v_structure/\"\u003eimmoral v-structures\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmarkov_equivalence_classes/","tags":null,"title":"Markov Equivalence Classes"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmarkov_game/","tags":null,"title":"markov game"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmarkovian_process/","tags":null,"title":"markovian process"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmartin_luther_king/","tags":null,"title":"Martin Luther King"},{"categories":null,"contents":"DOI: 10.3389/fnagi.2021.642647\nOne-Liner Combined bag-of-words on transcript + ADR on audio to various classifiers for AD; ablated BERT\u0026rsquo;s decesion space for attention to make more easy models in the future.\nNovelty Pre-processed each of the two modalities before fusing it (late fusion) Archieved \\(93.75\\%\\) accuracy on AD detection The data being forced-aligned and fed with late fusion allows one to see what sounds/words the BERT model was focusing on by just focusing on the attention on the words Notable Methods Used classic cookie theft data bag of words to do ADR but for words multimodality but late fusion with one (hot-swappable) classifier Key Figs How they did it This is how the combined the forced aligned (:tada:) audio and transcript together.\nBertbelation Ablated BERT results.\nThe model overall tends to focus on early parts of sentences. y is attention weight, x is position in sentence, blue is TD, red is AD.\nNew Concepts Active Data Representation ","html":"\u003cp\u003eDOI: 10.3389/fnagi.2021.642647\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eCombined bag-of-words on transcript + \u003ca href=\"/posts/kbhactive_data_representation/\"\u003eADR\u003c/a\u003e on audio to various classifiers for AD; ablated BERT\u0026rsquo;s decesion space for attention to make more easy models in the future.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ePre-processed each of the two modalities before fusing it (\u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003eArchieved \\(93.75\\%\\) accuracy on AD detection\u003c/li\u003e\n\u003cli\u003eThe data being forced-aligned and fed with \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e allows one to see what sounds/words the BERT model was focusing on by just focusing on the attention on the words\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eUsed classic cookie theft data\u003c/li\u003e\n\u003cli\u003ebag of words to do \u003ca href=\"/posts/kbhactive_data_representation/\"\u003eADR\u003c/a\u003e but for words\u003c/li\u003e\n\u003cli\u003emultimodality but \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e with one (hot-swappable) classifier\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"how-they-did-it\"\u003eHow they did it\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_00-20-26_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis is how the combined the forced aligned (:tada:) audio and transcript together.\u003c/p\u003e\n\u003ch3 id=\"bertbelation\"\u003eBertbelation\u003c/h3\u003e\n\u003cp\u003eAblated BERT results.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_00-23-36_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe model overall tends to focus on early parts of sentences. y is attention weight, x is position in sentence, blue is TD, red is AD.\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhactive_data_representation/\"\u003eActive Data Representation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmartinc_2021/","tags":["ntj"],"title":"Martinc 2021"},{"categories":null,"contents":"The Martingale Model states: if we observed the closing price of the market yesterday, we expect that the market is going to open at the close price yesterday.\nFormally:\n\\begin{equation} E\\qty [X_{k}|X_{k-1}, X_{k-2},\\ldots] = X_{k-1} \\end{equation}\n\u0026ldquo;irrespective of what you know, no matter how long the history, the best expectation of today\u0026rsquo;s price is yesterday\u0026rsquo;s price.\u0026rdquo;\nThis is not a for sure! modeling statement: this is simply the expected value!! That means, after \\(\\infty\\) times of re-running the universe starting \u0026ldquo;yesterday\u0026rdquo;, the new opening price will converge to the last closing price.\nTwo important conclusions:\nIf we know the closing price yesterday (it is observed), the price today will be DETERMINED and not!!! a random variable If the closing price yesterday is a random variable, the price today will be IN-DETERMINED and also a random variable Therefore, the \u0026ldquo;randomness is fair\u0026rdquo;, and therefore the \u0026ldquo;market is not drifting in favor/against you.\u0026rdquo;\nThe Martingale Model comes from the idea that \u0026ldquo;true gambling is true equal conditions (money, opponents, bystanders, situations, die, and dice.)\u0026rdquo; Therefore, any amount of bias towards one direction/party is advantageous for that person.\nIn fact, it was theorized that an efficient market should follow exactly this behavior.\nchanges in history Of course, the difference between the expression:\n\\begin{equation} E\\qty [X_{k}|X_{k-1}, X_{k-2},\\ldots] = X_{k-1} \\end{equation}\nversus\n\\begin{equation} E\\qty [X_{k}|X_{k-1}] = X_{k-1} \\end{equation}\nis pretty big. The two will only be the same if the markets is assumed to be a markovian process.\nMartingale historical conditioning Ok, if we are told that the process is Martingale, but we only have two days ago, what do we have?\ni.e. what if we want to know:\n\\begin{equation} E\\qty [X_{k} | X_{k-2}] = ? \\end{equation}\nTurns out, there\u0026rsquo;s a small trick you can do. Without even Martingale, we can claim that:\n\\begin{equation} E\\qty [X_{k} | X_{k-2}] = \\sum_{x} E\\qty [X_{k} | X_{k-1}, X_{k-1} = x] \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) \\end{equation}\nThat, the price today is just the sum of all possible prices for day \\(k-1\\) we name small \\(x\\) times the probability \\(Pr\\) that it actually happens given the existing \\(k-2\\) observation.\nOf course, given the Martingale Model now, given some possible price in day \\(k-1\\) named \\(x\\), price in \\(k\\) is also \\(x\\). Therefore:\n\\begin{equation} E[X_{k}|X_{k-1},X_{k-1} = x] =x \\end{equation}\nApplying this, then, we have\n\\begin{equation} \\sum_{x} E\\qty [X_{k} | X_{k-1}, X_{k-1} = x] \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) = \\sum_{x} x \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) \\end{equation}\nThe right sum, then, is just the expected value of \\(X_{k-1}\\) given \\(X_{k-2}\\)!! Meaning:\n\\begin{equation} \\sum_{x} x \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) = E[X_{k-1} | X_{k-2}] \\end{equation}\nNow, we are in a Martingale Model. Therefore:\n\\begin{equation} \\sum_{x} x \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) = E[X_{k-1} | X_{k-2}] = X_{k-2} \\end{equation}\nAnd so, putting it all together, we have:\n\\begin{align} E\\qty [X_{k} | X_{k-2}] \u0026amp;= \\sum_{x} E\\qty [X_{k} | X_{k-1}, X_{k-1} = x] \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) \\\\ \u0026amp;= \\sum_{x} x \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) \\\\ \u0026amp;= E[X_{k-1} | X_{k-2}] \\\\ \u0026amp;= X_{k-2} \\end{align}\nAmazing. So Martingale holds over time\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale Model\u003c/a\u003e states: if we observed the closing price of the market yesterday, we expect that the market is going to open at the close price yesterday.\u003c/p\u003e\n\u003cp\u003eFormally:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE\\qty [X_{k}|X_{k-1}, X_{k-2},\\ldots] = X_{k-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;irrespective of what you know, no matter how long the history, the best expectation of today\u0026rsquo;s price is yesterday\u0026rsquo;s price.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eThis is not a \u003cem\u003efor sure!\u003c/em\u003e modeling statement: this is simply the expected value!! That means, after \\(\\infty\\) times of re-running the universe starting \u0026ldquo;yesterday\u0026rdquo;, the new opening price will converge to the last closing price.\u003c/p\u003e\n\u003cp\u003eTwo important conclusions:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eIf we know the closing price yesterday (it is observed), the price today will be DETERMINED and not!!! a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eIf the closing price yesterday is a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e, the price today will be IN-DETERMINED and also a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTherefore, the \u0026ldquo;randomness is fair\u0026rdquo;, and therefore the \u0026ldquo;market is not drifting in favor/against you.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale Model\u003c/a\u003e comes from the idea that \u0026ldquo;true gambling is true equal conditions (money, opponents, bystanders, situations, die, and dice.)\u0026rdquo; Therefore, any amount of bias towards one direction/party is advantageous for that person.\u003c/p\u003e\n\u003cp\u003eIn fact, it was theorized that an efficient market should follow exactly this behavior.\u003c/p\u003e\n\u003ch2 id=\"changes-in-history\"\u003echanges in history\u003c/h2\u003e\n\u003cp\u003eOf course, the difference between the expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE\\qty [X_{k}|X_{k-1}, X_{k-2},\\ldots] = X_{k-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eversus\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE\\qty [X_{k}|X_{k-1}] = X_{k-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis pretty big. The two will only be the same if the markets is assumed to be a \u003ca href=\"/posts/kbhmarkovian_process/\"\u003emarkovian process\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"martingale--kbhmartingale-model-dot-md--historical-conditioning\"\u003e\u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale\u003c/a\u003e historical conditioning\u003c/h2\u003e\n\u003cp\u003eOk, if we are told that the process is \u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale\u003c/a\u003e, but we only have two days ago, what do we have?\u003c/p\u003e\n\u003cp\u003ei.e. what if we want to know:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE\\qty [X_{k} | X_{k-2}] = ?\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTurns out, there\u0026rsquo;s a small trick you can do. Without even \u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale\u003c/a\u003e, we can claim that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE\\qty [X_{k} | X_{k-2}] = \\sum_{x} E\\qty [X_{k} | X_{k-1}, X_{k-1} = x] \\cdot Pr \\qty(X_{k-1}=x|X_{k-2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThat, the price today is just the sum of all possible prices for day \\(k-1\\) we name small \\(x\\) times the probability \\(Pr\\) that it actually happens given the existing \\(k-2\\) observation.\u003c/p\u003e\n\u003cp\u003eOf course, given the \u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale Model\u003c/a\u003e now, given some possible price in day \\(k-1\\) named \\(x\\), price in \\(k\\) is also \\(x\\). Therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE[X_{k}|X_{k-1},X_{k-1} = x] =x\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eApplying this, then, we have\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{x} E\\qty [X_{k} | X_{k-1}, X_{k-1} = x] \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) = \\sum_{x} x \\cdot Pr \\qty(X_{k-1}=x|X_{k-2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe right sum, then, is just the expected value of \\(X_{k-1}\\) given \\(X_{k-2}\\)!! Meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{x} x \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) = E[X_{k-1} | X_{k-2}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, we are in a \u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale Model\u003c/a\u003e. Therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{x} x \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) = E[X_{k-1} | X_{k-2}] = X_{k-2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd so, putting it all together, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nE\\qty [X_{k} | X_{k-2}] \u0026amp;= \\sum_{x} E\\qty [X_{k} | X_{k-1}, X_{k-1} = x] \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) \\\\\n\u0026amp;= \\sum_{x} x \\cdot Pr \\qty(X_{k-1}=x|X_{k-2}) \\\\\n\u0026amp;= E[X_{k-1} | X_{k-2}] \\\\\n\u0026amp;= X_{k-2}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAmazing. So \u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale\u003c/a\u003e holds over time\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmartingale_model/","tags":null,"title":"Martingale Model"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmath5_how/","tags":null,"title":"math5 how"},{"categories":null,"contents":"matricies are like buckets of numbers. ok, ok, seriously:\nmatricies are a way of encoding the basis of domain proof: that if Linear Maps are determined uniquely by where they map the basis anyways, why don\u0026rsquo;t we just make a mathematical object that represents that to encode the linear maps.\ndefinition Let \\(n\\), \\(m\\) be positive integer. An \\(m\\) by \\(n\\) matrix \\(A\\) is a rectangular array of elements of \\(\\mathbb{F}\\) with \\(m\\) rows and \\(n\\) columns:\n\\begin{equation} A = \\mqty(A_{1,1} \u0026amp; \\dots \u0026amp; A_{1,n} \\\\ \\vdots \u0026amp;\u0026amp; \\vdots \\\\ A_{m,1} \u0026amp; \\dots \u0026amp; A_{m,n}) \\end{equation}\nthe matrix representing a Linear Map \\(T\\) is noted as \\(\\mathcal{M}(T)\\). This maybe basis specific; see matrix of Linear Map for more.\nadditional information matrix of Linear Map This result codifies the claim that matricies represent Linear Maps by what they do to the basis of the space of concern.\nSuppose \\(T \\in \\mathcal{L}(V,W)\\), and \\(v_1, \\dots, v_{n}\\) is a basis of \\(V\\); and \\(w_1, \\dots w_{m}\\) is a basis of \\(W\\). Then, the matrix of \\(T\\) with respective to these basis is the \\(m\\) by \\(n\\) (rows by columns!) where:\n\\begin{equation} Tv_{k} = A_{1,k}w_1 + \\dots + A_{m,k}w_{m} \\end{equation}\nQuick memory of this result: inputs across columns, outputs across rows; think about how matrix is multiplied: you smash the input vector horizontally, across the top columns and down. Therefore, a matrix is written as: each columns contains the instructions of where to send each input basis, written as a linear combination down each row of that column of the output basis\nIF the basis being used in the matrix is unclear (i.e. if we had a change of basis, so didn\u0026rsquo;t use the standard basis, etc.), then the matrix of a SPECIFIC set of basis is written as: \\(\\mathcal{M}(T, (v_1, \\dots, v_n), (w_1, \\dots, w_{m}))\\).\nmatrix of a vector The matrix of a vector is just an encoding of scalars which needed to scale the basis of the space to add up to that vector.\nMore formally\u0026mdash;\nSuppose \\(v \\in V\\), and \\(v_1 \\dots v_{n}\\) is a basis of \\(V\\). The matrix representing vector \\(v\\) is the n-by-1 matrix:\n\\begin{equation} \\mathcal{M}(v) = \\mqty(c_1 \\\\ \\dots \\\\ c_{n}) \\end{equation}\nwhere \\(c_1 \\dots c_{n}\\) are the scalars such that:\n\\begin{equation} v = c_1v_1 + \\dots +c_{n}v_{n} \\end{equation}\ncolumn notation One can use a dot to index matricies\u0026rsquo; columns and rows.\nSuppose \\(A\\) is an \\(m\\) by \\(n\\) matrix.\nAT \\(1 \\leq j \\leq m\\), \\(A_{j ,.}\\) denotes the \\(1\\) by \\(n\\) matrix consisting only row \\(j\\) of \\(A\\) AT \\(1 \\leq k \\leq n\\), \\(A_{. ,k}\\) denotes the \\(m\\) by \\(k\\) matrix consisting only column \\(k\\) of \\(A\\) sums and scalar multiplication of matricies According to Jana, a third grader can add and scalar multiply matricies. So I am not going to write them here.\nHowever, what\u0026rsquo;s interesting is the fact that they actually work:\nSuppose \\(S,T \\in \\mathcal{L}(V,W)\\), then \\(\\mathcal{M}(S+T) = \\mathcal{M}(S)+\\mathcal{M}(T)\\) Suppose \\(\\lambda \\in \\mathbb{F}, T \\in \\mathcal{L}(V,W)\\), then \\(\\mathcal{M}(\\lambdaT) = \\lambda \\mathcal{M}(T)\\) The verification of this result, briefly, is that:\nRecall that matricies encode where each input basis get sent, as a linear combination of the output basis, down each column; recall that \\((S+T)v = Sv+Tv\\); now, write the sum of the matrix without performing the sum; apply the basis to the matrix; distribute the basis choordinates across the sum, seperate into two matricies. Now we have the sum of the matrix is equal to \\(Sv + Tv\\); then invoke definition of sum of Linear Map.\nscalar multiplication works in the same darn way.\nmatrix multiplication See matrix multiplication\n\\(\\mathbb{F}^{m,n}\\) For \\(m\\) and \\(n\\) positive integers, the set of all \\(m,n\\) matricies with entries in \\(\\mathbb{F}\\) is called \\(\\mathbb{F}^{m,n}\\).\nThis is a vector space! \u0026ldquo;obviously\u0026rdquo; its basis is the set of all matrix with \\(1\\) in one slot and \\(0\\) in all others. There are \\(m\\cdot n\\) of those matricies so \\(\\dim \\mathbb{F}^{m,n}=m\\cdot n\\).\ninvertability See matrix invertability\nelementary matrix elementary matricies are slight variations from the identity matrix which performs the elementary row operations:\nswap rows add a row to another scale rows determinants See determinants\nGaussian elimination See Gaussian elimination\ndiagonal matrix see diagonal matrix\nupper-triangular matricies upper-triangular matricies\nchange-of-basis To change the basis of \\(A\\) to w.r.t. \\(B\\), create a similar matrix:\n\\begin{equation} B^{-1} A B = C \\end{equation}\n\\(C\\) is \\(A\\) in terms of \\(B\\).\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e are like buckets of numbers. ok, ok, seriously:\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e are a way of encoding the \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e proof: that if \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es are determined uniquely by where they map the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e anyways, why don\u0026rsquo;t we just make a mathematical object that represents that to encode the linear maps.\u003c/p\u003e\n\u003ch2 id=\"definition\"\u003edefinition\u003c/h2\u003e\n\u003cp\u003eLet \\(n\\), \\(m\\) be positive integer. An \\(m\\) by \\(n\\) matrix \\(A\\) is a rectangular array of elements of \\(\\mathbb{F}\\) with \\(m\\) rows and \\(n\\) columns:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA = \\mqty(A_{1,1} \u0026amp; \\dots \u0026amp; A_{1,n} \\\\ \\vdots \u0026amp;\u0026amp; \\vdots \\\\ A_{m,1} \u0026amp; \\dots \u0026amp; A_{m,n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e representing a \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e \\(T\\) is noted as \\(\\mathcal{M}(T)\\). This maybe \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e specific; see \u003ca href=\"#matrix-of-linear-map--kbhlinear-map-dot-md\"\u003ematrix of Linear Map\u003c/a\u003e for more.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"matrix-of-linear-map--kbhlinear-map-dot-md\"\u003ematrix of \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eThis result codifies the claim that \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e represent \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es by what they do to the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of the space of concern.\u003c/p\u003e\n\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V,W)\\), and \\(v_1, \\dots, v_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\); and \\(w_1, \\dots w_{m}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(W\\). Then, the \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e of \\(T\\) with respective to these basis is the \\(m\\) by \\(n\\) (rows by columns!) where:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv_{k} = A_{1,k}w_1 + \\dots + A_{m,k}w_{m}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eQuick memory of this result: inputs across columns, outputs across rows; think about how matrix is multiplied: you smash the input vector horizontally, across the top columns and down. Therefore, a matrix is written as: each columns contains the instructions of where to send each input \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, written as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e down each row of that column of the output \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eIF the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e being used in the \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e is unclear (i.e. if we had a change of \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, so didn\u0026rsquo;t use the standard basis, etc.), then the \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e of a \u003cem\u003eSPECIFIC\u003c/em\u003e set of \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e is written as: \\(\\mathcal{M}(T, (v_1, \\dots, v_n), (w_1, \\dots, w_{m}))\\).\u003c/p\u003e\n\u003ch3 id=\"matrix--kbhmatricies-dot-md--of-a-vector\"\u003e\u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e of a vector\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"#matrix--kbhmatricies-dot-md--of-a-vector\"\u003ematrix of a vector\u003c/a\u003e is just an encoding of scalars which needed to scale the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of the space to add up to that vector.\u003c/p\u003e\n\u003cp\u003eMore formally\u0026mdash;\u003c/p\u003e\n\u003cp\u003eSuppose \\(v \\in V\\), and \\(v_1 \\dots v_{n}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\). The \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e representing vector \\(v\\) is the n-by-1 matrix:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{M}(v) = \\mqty(c_1 \\\\ \\dots \\\\ c_{n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(c_1 \\dots c_{n}\\) are the scalars such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = c_1v_1 + \\dots +c_{n}v_{n}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"column-notation\"\u003ecolumn notation\u003c/h3\u003e\n\u003cp\u003eOne can use a dot to index \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e\u0026rsquo; columns and rows.\u003c/p\u003e\n\u003cp\u003eSuppose \\(A\\) is an \\(m\\) by \\(n\\) matrix.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eAT \\(1 \\leq j \\leq m\\), \\(A_{j ,.}\\) denotes the \\(1\\) by \\(n\\) matrix consisting only row \\(j\\) of \\(A\\)\u003c/li\u003e\n\u003cli\u003eAT \\(1 \\leq k \\leq n\\), \\(A_{. ,k}\\) denotes the \\(m\\) by \\(k\\) matrix consisting only column \\(k\\) of \\(A\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"sums-and-scalar-multiplication-of-matricies--kbhmatricies-dot-md\"\u003esums and scalar multiplication of \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eAccording to Jana, a third grader can add and scalar multiply \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e. So I am not going to write them here.\u003c/p\u003e\n\u003cp\u003eHowever, what\u0026rsquo;s interesting is the fact that they actually work:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eSuppose \\(S,T \\in \\mathcal{L}(V,W)\\), then \\(\\mathcal{M}(S+T) = \\mathcal{M}(S)+\\mathcal{M}(T)\\)\u003c/li\u003e\n\u003cli\u003eSuppose \\(\\lambda \\in \\mathbb{F}, T \\in \\mathcal{L}(V,W)\\), then \\(\\mathcal{M}(\\lambdaT) = \\lambda \\mathcal{M}(T)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe verification of this result, briefly, is that:\u003c/p\u003e\n\u003cp\u003eRecall that matricies encode where each input \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e get sent, as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of the output \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e, down each column; recall that \\((S+T)v = Sv+Tv\\); now, write the sum of the matrix without performing the sum; apply the basis to the matrix; distribute the basis choordinates across the sum, seperate into two matricies. Now we have the sum of the matrix is equal to \\(Sv + Tv\\); then invoke definition of sum of \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e works in the same darn way.\u003c/p\u003e\n\u003ch3 id=\"matrix-multiplication--kbhmatrix-multiplication-dot-md\"\u003e\u003ca href=\"/posts/kbhmatrix_multiplication/\"\u003ematrix multiplication\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhmatrix_multiplication/\"\u003ematrix multiplication\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"mathbb-f-m-n\"\u003e\\(\\mathbb{F}^{m,n}\\)\u003c/h3\u003e\n\u003cp\u003eFor \\(m\\) and \\(n\\) positive integers, the set of all \\(m,n\\) matricies with entries in \\(\\mathbb{F}\\) is called \\(\\mathbb{F}^{m,n}\\).\u003c/p\u003e\n\u003cp\u003eThis is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e! \u0026ldquo;obviously\u0026rdquo; its \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e is the set of all \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e with \\(1\\) in one slot and \\(0\\) in all others. There are \\(m\\cdot n\\) of those \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e so \\(\\dim \\mathbb{F}^{m,n}=m\\cdot n\\).\u003c/p\u003e\n\u003ch3 id=\"invertability--kbhinvertability-dot-md\"\u003e\u003ca href=\"/posts/kbhinvertability/\"\u003einvertability\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhinvertability/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matrix-id-ff05739c-6e70-46ba-9d56-0958ef847f57-invertability\"\u003ematrix invertability\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"elementary-matrix\"\u003eelementary matrix\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#elementary-matrix\"\u003eelementary matricies\u003c/a\u003e are slight variations from the \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e matrix which performs the \u003ca href=\"#elementary-matrix\"\u003eelementary row operations\u003c/a\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eswap rows\u003c/li\u003e\n\u003cli\u003eadd a row to another\u003c/li\u003e\n\u003cli\u003escale rows\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"determinants--kbhdeterminants-dot-md\"\u003e\u003ca href=\"/posts/kbhdeterminants/\"\u003edeterminants\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhdeterminants/\"\u003edeterminants\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"gaussian-elimination--kbhgaussian-elimination-dot-md\"\u003e\u003ca href=\"/posts/kbhgaussian_elimination/\"\u003eGaussian elimination\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhgaussian_elimination/\"\u003eGaussian elimination\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"diagonal-matrix--kbhdiagonal-matrix-dot-md\"\u003e\u003ca href=\"/posts/kbhdiagonal_matrix/#diagonal-matrix\"\u003ediagonal matrix\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhdiagonal_matrix/#diagonal-matrix\"\u003ediagonal matrix\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"upper-triangular-matricies--kbhupper-triangular-matrix-dot-md\"\u003e\u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matricies\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matricies\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"change-of-basis\"\u003echange-of-basis\u003c/h3\u003e\n\u003cp\u003eTo change the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(A\\) to w.r.t. \\(B\\), create a \u003ca href=\"/posts/kbheigenvalue/#similar-matrices\"\u003esimilar matrix\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB^{-1} A B = C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(C\\) is \\(A\\) in terms of \\(B\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmatricies/","tags":null,"title":"matricies"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmatrix_adjectives/","tags":null,"title":"matrix adjectives"},{"categories":null,"contents":"If we have some system:\n\\begin{equation} x\u0026rsquo; = Ax \\end{equation}\nthe solution for this system should be \\(e^{At}\\). This gives rise to, given the power series:\n\\begin{equation} e^{At} = 1 + At + \\frac{1}{2} \\qty(At)^{2} + \\frac{1}{3!} (At)^{3}+ \\dots \\end{equation}\nthe derivative of which:\n\\begin{align} \\dv t e^{At} \u0026amp;= A + A^{2}t + \\frac{A^{3}t^{2}}{2} + \\dots \\\\ \u0026amp;= A\\qty(1 + At + \\frac{A^{2}t^{2}}{2}) \\end{align}\nThis intuition makes sense for all matrices \\(A\\). Meaning the general solution gives:\n\\begin{equation} x = e^{At} x_0 \\end{equation}\nSee also raising e to a matrix to see how to deal with diagonalizable matricies.\nBenefits this approach produces all solutions no matter the eigenvalues of \\(A\\). also tells you what to do if your characteristic polynomial has repeated eigenvalues this is computationally not too bad if you have.. diagonal \\(A\\) diagonalizable \\(A\\) Great Matrix Exponential Tragedy \\begin{equation} e^{A+B} \\neq e^{A} e^{B} \\end{equation}\nin general, because matricies don\u0026rsquo;t commute.\n","html":"\u003cp\u003eIf we have some system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo; = Ax\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe solution for this system should be \\(e^{At}\\). This gives rise to, given the power series:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{At} = 1 + At + \\frac{1}{2} \\qty(At)^{2} + \\frac{1}{3!} (At)^{3}+ \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe derivative of which:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dv t e^{At} \u0026amp;= A + A^{2}t + \\frac{A^{3}t^{2}}{2} + \\dots \\\\\n\u0026amp;= A\\qty(1 + At + \\frac{A^{2}t^{2}}{2})\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThis intuition makes sense for all matrices \\(A\\). Meaning the general solution gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx = e^{At} x_0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhraising_e_to_a_matrix/\"\u003eraising e to a matrix\u003c/a\u003e to see how to deal with \u003ca href=\"/posts/kbhdiagonal_matrix/#properties-of-diagonal-matrices\"\u003ediagonalizable\u003c/a\u003e matricies.\u003c/p\u003e\n\u003ch2 id=\"benefits\"\u003eBenefits\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ethis approach produces all solutions no matter the \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es of \\(A\\).\u003c/li\u003e\n\u003cli\u003ealso tells you what to do if your \u003ca href=\"/posts/kbhcharacteristic_polynomial/\"\u003echaracteristic polynomial\u003c/a\u003e has repeated eigenvalues\u003c/li\u003e\n\u003cli\u003ethis is computationally not too bad if you have..\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003ediagonal\u003c/a\u003e \\(A\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiagonal_matrix/#properties-of-diagonal-matrices\"\u003ediagonalizable\u003c/a\u003e \\(A\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"great-matrix-exponential-tragedy\"\u003eGreat Matrix Exponential Tragedy\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\ne^{A+B} \\neq e^{A} e^{B}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ein general, because \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e don\u0026rsquo;t commute.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmatrix_exponentiation/","tags":null,"title":"matrix exponentiation"},{"categories":null,"contents":"matrix multiplication is defined such that the expression \\(\\mathcal{M}(ST) = \\mathcal{M}(S)\\mathcal{M}(T)\\) holds:\n\\begin{equation} (AC)_{j,k} = \\sum_{r=1}^{n}A_{j,r}C_{r,k} \\end{equation}\nWhile matrix multiplication is distributive and associative, it is NOT!!!!!!!!!!! commutative. I hope you can see that \\(ST\\neq TS\\).\nmemorization its always row-by-column, move down rows first then columns multiply element-wise and add (row times column and add) other ways of thinking about matrix multiplication it is \u0026ldquo;row times column\u0026rdquo;: \\((AC)_{j,k} = A_{j, .} \\cdot C_{., k}\\) it is \u0026ldquo;matrix times columns\u0026rdquo;: \\((AC)_{. , k} = A C_{., k}\\) matrix as a linear combinator Suppose \\(A\\) is an \\(m\\) by \\(n\\) matrix; and \\(c = \\mqty(c_1\\\\ \\vdots\\\\ c_{0})\\) is an \\(n\\) by \\(1\\) matrix; then:\n\\begin{equation} Ac = c_1 A_{., 1} + \\dots + c_{n} A_{., n} \\end{equation}\n(i.e. you can use a vector to linearly combinate the column vectors.)\nlinear maps are like matrix multiplication \\begin{equation} \\mathcal{M}(Tv) = \\mathcal{M}(T)M(v) \\end{equation}\n\u0026ldquo;the matrix of a vector formed by applying some Linear Map \\(T\\) onto \\(v\\) is the same as the product of the matrix of \\(T\\) and the matrix of a vector of \\(v\\)\u0026rdquo;\nProof:\nLet \\(v_1 \\dots v_{n}\\) be a basis of \\(v\\).\nSo, we have that \\(Tv = c_1Tv_{1} + \\dots + c_{n}T v_{n}\\) by the additivity and homogeneity of \\(T\\).\nThen, converting it all to matricies:\n\\begin{align} \\mathcal{M}(Tv) \u0026amp;= c_1 \\mathcal{M}(Tv_{1}) + \\dots + c_{n} \\mathcal{M}(Tv_{n}) \\\\ \u0026amp;= c_1 \\mathcal{M}(T)_{.,1} + \\dots + c_{n}\\mathcal{M}(T)_{.,n} \\end{align}\nbecause the columns of a matrix represent where each basis vector gets taken in the new space.\nYou will notice now that \\(c_1 \\dots c_{n}\\) are the scalars needed to construct \\(v\\), and that \\(\\mathcal{M}(T)_{.,1} \\dots\\) are the vectors needed to construct \\(\\mathcal{M}(T)\\).\nSo:\n\\begin{equation} c_1 \\mathcal{M}(T)_{.,1} + \\dots + c_{n}\\mathcal{M}(T)_{.,n} = \\mathcal{M}(T) \\mathcal{M}(v) = \\mathcal{M}(Tv) \\end{equation}\nas desired. \\(\\blacksquare\\)\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmatrix_multiplication/\"\u003ematrix multiplication\u003c/a\u003e is defined such that the expression \\(\\mathcal{M}(ST) = \\mathcal{M}(S)\\mathcal{M}(T)\\) holds:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(AC)_{j,k} = \\sum_{r=1}^{n}A_{j,r}C_{r,k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhile matrix multiplication is \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributive\u003c/a\u003e and \u003ca href=\"/posts/kbhassociative/\"\u003eassociative\u003c/a\u003e, it is \u003cstrong\u003e\u003cstrong\u003eNOT\u003c/strong\u003e\u003c/strong\u003e!!!!!!!!!!! \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutative\u003c/a\u003e. I hope you can see that \\(ST\\neq TS\\).\u003c/p\u003e\n\u003ch2 id=\"memorization\"\u003ememorization\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eits always row-by-column, move down rows first then columns\u003c/li\u003e\n\u003cli\u003emultiply element-wise and add (row times column and add)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"other-ways-of-thinking-about-matrix-multiplication--kbhmatrix-multiplication-dot-md\"\u003eother ways of thinking about \u003ca href=\"/posts/kbhmatrix_multiplication/\"\u003ematrix multiplication\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eit is \u0026ldquo;row times column\u0026rdquo;: \\((AC)_{j,k} = A_{j, .} \\cdot C_{., k}\\)\u003c/li\u003e\n\u003cli\u003eit is \u0026ldquo;matrix times columns\u0026rdquo;: \\((AC)_{. , k} = A C_{., k}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"matrix-as-a-linear-combinator\"\u003ematrix as a linear combinator\u003c/h2\u003e\n\u003cp\u003eSuppose \\(A\\) is an \\(m\\) by \\(n\\) matrix; and \\(c = \\mqty(c_1\\\\ \\vdots\\\\ c_{0})\\) is an \\(n\\) by \\(1\\) matrix; then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nAc = c_1 A_{., 1} + \\dots + c_{n} A_{., n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(i.e. you can use a vector to linearly combinate the column vectors.)\u003c/p\u003e\n\u003ch2 id=\"linear-maps-are-like-matrix-multiplication\"\u003elinear maps are like matrix multiplication\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{M}(Tv) = \\mathcal{M}(T)M(v)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the \u003ca href=\"/posts/kbhmatricies/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matrix-of-a-vector\"\u003ematrix of a vector\u003c/a\u003e formed by applying some \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e \\(T\\) onto \\(v\\) is the same as the product of the \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e of \\(T\\) and the \u003ca href=\"/posts/kbhmatricies/#id-7a09bc5f-6de2-485f-8c29-b94999299cc6-matrix-of-a-vector\"\u003ematrix of a vector\u003c/a\u003e of \\(v\\)\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eLet \\(v_1 \\dots v_{n}\\) be a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(v\\).\u003c/p\u003e\n\u003cp\u003eSo, we have that \\(Tv = c_1Tv_{1} + \\dots + c_{n}T v_{n}\\) by the additivity and \u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e of \\(T\\).\u003c/p\u003e\n\u003cp\u003eThen, converting it all to \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\mathcal{M}(Tv) \u0026amp;= c_1 \\mathcal{M}(Tv_{1}) + \\dots + c_{n} \\mathcal{M}(Tv_{n}) \\\\\n\u0026amp;= c_1 \\mathcal{M}(T)_{.,1} + \\dots + c_{n}\\mathcal{M}(T)_{.,n}\n\\end{align}\u003c/p\u003e\n\u003cp\u003ebecause the columns of a \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e represent where each \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e gets taken in the new space.\u003c/p\u003e\n\u003cp\u003eYou will notice now that \\(c_1 \\dots c_{n}\\) are the scalars needed to construct \\(v\\), and that \\(\\mathcal{M}(T)_{.,1} \\dots\\) are the \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es needed to construct \\(\\mathcal{M}(T)\\).\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_1 \\mathcal{M}(T)_{.,1} + \\dots + c_{n}\\mathcal{M}(T)_{.,n} = \\mathcal{M}(T) \\mathcal{M}(v) = \\mathcal{M}(Tv)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas desired. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmatrix_multiplication/","tags":null,"title":"matrix multiplication"},{"categories":null,"contents":"a maximal interval is the largest interval you can fit while the function is finite while the function is finite.\n","html":"\u003cp\u003ea \u003ca href=\"/posts/kbhmaximal_interval/\"\u003emaximal interval\u003c/a\u003e is the largest interval you can fit while the function is finite while the function is finite.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmaximal_interval/","tags":null,"title":"maximal interval"},{"categories":null,"contents":"maximum a posteriori estimate is a parameter learning scheme that uses Beta Distribution and Baysian inference to get a distribution of the posterior of the parameter, and return the argmax (i.e. the mode) of the MAP.\nCalculating a MAP posterior, in general:\n\\begin{equation} \\theta_{MAP} = \\arg\\max_{\\theta} P(\\theta|x_1, \\dots, x_{n}) = \\arg\\max_{\\theta} \\frac{f(x_1, \\dots, x_{n} | \\theta) g(\\theta)}{h(x_1, \\dots, x_{n})} \\end{equation}\nWe assume that the data points are IID, and the fact that the bottom of this is constant, we have:\n\\begin{equation} \\theta_{MAP} = \\arg\\max_{\\theta} g(\\theta) \\prod_{i=1}^{n} f(x_{i}|\\theta) \\end{equation}\nUsually, we\u0026rsquo;d like to argmax the log:\n\\begin{equation} \\theta_{MAP} = \\arg\\max_{\\theta} \\qty(\\log (g(\\theta)) + \\sum_{i=1}^{n} \\log(f(x_{i}|\\theta)) ) \\end{equation}\nwhere, \\(g\\) is the probability density of \\(\\theta\\) happening given the prior belief, and \\(f\\) is the likelyhood of \\(x_{i}\\) given parameter \\(\\theta\\).\nYou will note this is just Maximum Likelihood Parameter Learning function, plus the log-probability of the parameter prior.\nMAP for Bernoulli and Binomial \\(p\\) To estimate \\(p\\), we use the Beta Distribution:\nThe MODE of the beta, which is the MAP of such a result:\n\\begin{equation} \\frac{\\alpha -1 }{\\alpha + \\beta -2} \\end{equation}\nnow, for a Laplace posterior \\(Beta(2,2)\\), we have:\n\\begin{equation} \\frac{n+1}{m+n+2} \\end{equation}\nMAP for Poisson and Exponential \\(\\lambda\\) We use the gamma distribution as our prior\n\\begin{equation} \\Lambda \\sim Gamma(\\alpha, \\beta) \\end{equation}\nwhere \\(\\alpha-1\\) is the prior event count, and \\(\\beta\\) is the prior time periods.\nLet\u0026rsquo;s say you have some data points \\(x_1, \u0026hellip;x_{k}\\), the posterior from from those resulting events:\n\\begin{equation} Gamma(\\alpha + n, \\beta+k) \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmaximum_a_posteriori_estimate/\"\u003emaximum a posteriori estimate\u003c/a\u003e is a \u003ca href=\"/posts/kbhparameter_learning/\"\u003eparameter learning\u003c/a\u003e scheme that uses \u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e and \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian inference\u003c/a\u003e to get a distribution of the posterior of the parameter, and return the \u003ca href=\"/posts/kbhargmax/\"\u003eargmax\u003c/a\u003e (i.e. the mode) of the \u003ca href=\"/posts/kbhmaximum_a_posteriori_estimate/\"\u003eMAP\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eCalculating a \u003ca href=\"/posts/kbhmaximum_a_posteriori_estimate/\"\u003eMAP\u003c/a\u003e posterior, in general:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{MAP} = \\arg\\max_{\\theta} P(\\theta|x_1, \\dots, x_{n}) = \\arg\\max_{\\theta} \\frac{f(x_1, \\dots, x_{n} | \\theta) g(\\theta)}{h(x_1, \\dots, x_{n})}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe assume that the data points are \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e, and the fact that the bottom of this is constant, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{MAP} = \\arg\\max_{\\theta} g(\\theta) \\prod_{i=1}^{n} f(x_{i}|\\theta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eUsually, we\u0026rsquo;d like to argmax the log:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{MAP} = \\arg\\max_{\\theta} \\qty(\\log (g(\\theta)) + \\sum_{i=1}^{n} \\log(f(x_{i}|\\theta)) )\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(g\\) is the probability density of \\(\\theta\\) happening given the \u003cstrong\u003e\u003cstrong\u003eprior\u003c/strong\u003e\u003c/strong\u003e belief, and \\(f\\) is the \u003ca href=\"/posts/kbhlikelyhood/\"\u003elikelyhood\u003c/a\u003e of \\(x_{i}\\) given parameter \\(\\theta\\).\u003c/p\u003e\n\u003cp\u003eYou will note this is just \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e function, plus the log-probability of the parameter prior.\u003c/p\u003e\n\u003ch2 id=\"map-for-bernoulli-and-binomial-p\"\u003eMAP for Bernoulli and Binomial \\(p\\)\u003c/h2\u003e\n\u003cp\u003eTo estimate \\(p\\), we use the \u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003eThe MODE of the beta, which is the \u003ca href=\"/posts/kbhmaximum_a_posteriori_estimate/\"\u003eMAP\u003c/a\u003e of such a result:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\alpha -1 }{\\alpha + \\beta -2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enow, for a Laplace posterior \\(Beta(2,2)\\), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{n+1}{m+n+2}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"map--kbhmaximum-a-posteriori-estimate-dot-md--for-poisson-and-exponential-lambda\"\u003e\u003ca href=\"/posts/kbhmaximum_a_posteriori_estimate/\"\u003eMAP\u003c/a\u003e for Poisson and Exponential \\(\\lambda\\)\u003c/h2\u003e\n\u003cp\u003eWe use the \u003ca href=\"#map--kbhmaximum-a-posteriori-estimate-dot-md--for-poisson-and-exponential-lambda\"\u003egamma distribution\u003c/a\u003e as our prior\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Lambda \\sim Gamma(\\alpha, \\beta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\alpha-1\\) is the prior event count, and \\(\\beta\\) is the prior time periods.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-15_16-16-40_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003chr\u003e\n\u003cp\u003eLet\u0026rsquo;s say you have some data points \\(x_1, \u0026hellip;x_{k}\\), the posterior from from those resulting events:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nGamma(\\alpha + n, \\beta+k)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmaximum_a_posteriori_estimate/","tags":null,"title":"maximum a posteriori estimate"},{"categories":null,"contents":"\u0026ldquo;We find the parameter that maximizes the likelihood.\u0026rdquo;\nfor each \\(X_{j}\\), sum what\u0026rsquo;s the log-likelihood of one \\(X_{i}\\) take derivative w.r.t. \\(\\theta\\) and set to \\(0\\) solve for \\(\\theta\\) (this maximizes the log-likelihood of the data!)\nthat is:\n\\begin{equation} \\theta_{MLE} = \\arg\\max_{\\theta} P(x_1, \\dots, x_{n}|\\theta) = \\arg\\max_{\\theta} \\qty(\\sum_{i=1}^{n} \\log(f(x_{i}|\\theta)) ) \\end{equation}\nIf your \\(\\theta\\) is a vector of more than \\(1\\) thing, take the gradient (i.e. partial derivative against each of your variables) of the thing and solve the place where the gradient is identically \\(0\\) (each slot is \\(0\\)). That is, we want:\n\\begin{equation} \\mqty[\\pdv{LL(\\theta)}{\\theta_{1}} \\\\ \\pdv{LL(\\theta)}{\\theta_{2}} \\\\ \\pdv{LL(\\theta)}{\\theta_{3}} \\\\ \\dots] = \\mqty[0 \\\\ 0 \\\\0] \\end{equation}\nMLE for poisson distribution MLE for Bernouli MLE is REALLY bad at generalizing to unseen data. Hence why MLE is good for big data where your MLE slowly converge to best parameters for your actual dataset.\nWe desire \\(\\theta\\) parameter from some data \\(D\\). To do this, we simply optimize:\n\\begin{equation} \\hat{\\theta} = \\arg\\max_{\\theta}P(D|\\theta) \\end{equation}\n, where:\n\\begin{equation} P(D|\\theta) = \\prod_{i} P(o_{i}| \\theta) \\end{equation}\nfor each \\(o_{i} \\in D\\). and \\(P\\) is the likelyhood: PMF or PDF given what you are working with.\nThat is, we want the parameter \\(\\theta\\) which maximizes the likelyhood of the data. This only works, of course, if each \\(o_{i} \\in D\\) is independent from each other, which we can assume so by calling the samples from data IID (because they are independent draws from the underlying distribution.)\nlog-likelihood The summation above is a little unwieldy, so we take the logs and apply log laws to turn the multiplication into a summation:\n\\begin{equation} \\hat{\\theta} = \\arg\\max_{\\theta} \\sum_{i} \\log P(o_{i}|\\theta) \\end{equation}\n\u0026ldquo;add the log probabilities of each of the outcomes you observed happening according to your unoptimized theta, and maximize it\u0026rdquo;\nargmax of log This holds because log is monotonic (\u0026ldquo;any larger input to a log will lead to a larger value\u0026rdquo;):\n\\begin{equation} \\arg\\max_{x} f(x) = \\arg\\max_{x} \\log f(x) \\end{equation}\nMLE, in general \\begin{equation} \\theta_{MLE} = \\arg\\max_{\\theta} \\qty(\\sum_{i=1}^{n} \\log(f(x_{i}|\\theta)) ) \\end{equation}\nExample Say we want to train a model to predict whether or not a plane will crash. Suppose our network is very simple:\n\\(\\theta\\) represents if there will be an midair collision. Therefore, we have two disconnected nodes:\n\\begin{equation} P(crash) = \\theta \\end{equation}\n\\begin{equation} P(safe) = 1-\\theta \\end{equation}\nNow, suppose we observed that there was \\(m\\) flights and \\(n\\) midair collisions between them. We can then write then:\n\\begin{equation} P(D|\\theta) = \\theta^{n}(1-\\theta)^{m-n} \\end{equation}\nbecause \\(\\theta^{n}(1-\\theta)^{m-n}\\) is the total probability of the data you are given occurring (\\(n\\) crashes, \\(m-n\\) non crashing flights).\nNow, we seek to maximise this value\u0026mdash;because the probability of \\(P(D)\\) occurring should be \\(1\\) because \\(D\\) actually occured.\nIts mostly algebra at this point:\nSteps:\nwe first compute the probability of each of the sample happening according to old \\(\\theta\\) to get \\(P(D|\\theta)\\) we then take the log of it to make it a summation we then try to maximize \\(\\theta\\) to What this tells us is\u0026hellip;\nGeneric Maximum Likelihood Estimate Overall, its kind of unsurprising from the Frequentist Definition of Probability, but:\n\\begin{equation} \\hat{\\theta}_{i} = \\frac{n_{i}}{\\sum_{j=1}^{k} n_{j}} \\end{equation}\nfor some observations \\(n_{1:k}\\).\nand:\n\\begin{equation} \\sigma^{2} = \\frac{\\sum_{}^{} (o_{i} - \\hat{u})^{2}}{m} \\end{equation}\nProblems with Maximum Likelihood Parameter Learning This requires a lot of data to make work: for instance\u0026mdash;if we don\u0026rsquo;t have any plane crashes observed in \\(n\\) files, this scheme would say there\u0026rsquo;s no chance of plane crashes. This is not explicitly true.\nTherefore, we use Baysian Parameter Learning.\n","html":"\u003cp\u003e\u0026ldquo;We find the \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003e that maximizes the likelihood.\u0026rdquo;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003efor each \\(X_{j}\\), sum\n\u003col\u003e\n\u003cli\u003ewhat\u0026rsquo;s the \u003ca href=\"#log-likelihood\"\u003elog-likelihood\u003c/a\u003e of one \\(X_{i}\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003etake derivative w.r.t. \\(\\theta\\) and set to \\(0\\)\u003c/li\u003e\n\u003cli\u003esolve for \\(\\theta\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e(this maximizes the \u003ca href=\"#log-likelihood\"\u003elog-likelihood\u003c/a\u003e of the data!)\u003c/p\u003e\n\u003cp\u003ethat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{MLE} = \\arg\\max_{\\theta} P(x_1, \\dots, x_{n}|\\theta) = \\arg\\max_{\\theta} \\qty(\\sum_{i=1}^{n} \\log(f(x_{i}|\\theta)) )\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eIf your \\(\\theta\\) is a vector of more than \\(1\\) thing, take the gradient (i.e. partial derivative against each of your variables) of the thing and solve the place where the gradient is identically \\(0\\) (each slot is \\(0\\)). That is, we want:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty[\\pdv{LL(\\theta)}{\\theta_{1}} \\\\ \\pdv{LL(\\theta)}{\\theta_{2}} \\\\ \\pdv{LL(\\theta)}{\\theta_{3}} \\\\ \\dots] = \\mqty[0 \\\\ 0 \\\\0]\n\\end{equation}\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_of_k_in_x_time/#mle-for\"\u003eMLE for poisson distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbernoulli_random_variable/#mle-for-bernouli\"\u003eMLE for Bernouli\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eMLE is REALLY bad at generalizing to unseen data. Hence why MLE is good for big data where your MLE slowly converge to best parameters for your actual dataset.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eWe desire \\(\\theta\\) parameter from some data \\(D\\). To do this, we simply optimize:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{\\theta} = \\arg\\max_{\\theta}P(D|\\theta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e, where:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(D|\\theta) = \\prod_{i} P(o_{i}| \\theta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor each \\(o_{i} \\in D\\). and \\(P\\) is the \u003ca href=\"/posts/kbhlikelyhood/\"\u003elikelyhood\u003c/a\u003e: \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003e or \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003e given what you are working with.\u003c/p\u003e\n\u003cp\u003eThat is, we want the parameter \\(\\theta\\) which maximizes the likelyhood of the data. This only works, of course, if each \\(o_{i} \\in D\\) is \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e from each other, which we can assume so by calling the samples from data \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e (because they are independent draws from the underlying distribution.)\u003c/p\u003e\n\u003ch2 id=\"log-likelihood\"\u003elog-likelihood\u003c/h2\u003e\n\u003cp\u003eThe summation above is a little unwieldy, so we take the logs and apply log laws to turn the multiplication into a summation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{\\theta} = \\arg\\max_{\\theta} \\sum_{i} \\log P(o_{i}|\\theta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;add the log probabilities of each of the outcomes you observed happening according to your unoptimized theta, and maximize it\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"argmax-of-log\"\u003eargmax of log\u003c/h3\u003e\n\u003cp\u003eThis holds because \u003ca href=\"/posts/kbhlog_laws/\"\u003elog\u003c/a\u003e is monotonic (\u0026ldquo;any larger input to a log will lead to a larger value\u0026rdquo;):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\arg\\max_{x} f(x) = \\arg\\max_{x} \\log f(x)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"mle-in-general\"\u003eMLE, in general\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\theta_{MLE} = \\arg\\max_{\\theta} \\qty(\\sum_{i=1}^{n} \\log(f(x_{i}|\\theta)) )\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"example\"\u003eExample\u003c/h2\u003e\n\u003cp\u003eSay we want to train a model to predict whether or not a plane will crash. Suppose our network is very simple:\u003c/p\u003e\n\u003cp\u003e\\(\\theta\\) represents if there will be an midair collision. Therefore, we have two disconnected nodes:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(crash) = \\theta\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(safe) = 1-\\theta\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, suppose we observed that there was \\(m\\) flights and \\(n\\) midair collisions between them. We can then write then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(D|\\theta) = \\theta^{n}(1-\\theta)^{m-n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause \\(\\theta^{n}(1-\\theta)^{m-n}\\) is the total probability of the data you are given occurring (\\(n\\) crashes, \\(m-n\\) non crashing flights).\u003c/p\u003e\n\u003cp\u003eNow, we seek to maximise this value\u0026mdash;because the probability of \\(P(D)\\) occurring should be \\(1\\) because \\(D\\) actually occured.\u003c/p\u003e\n\u003cp\u003eIts mostly algebra at this point:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-05_10-07-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSteps:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewe first compute the probability of each of the sample happening according to old \\(\\theta\\) to get \\(P(D|\\theta)\\)\u003c/li\u003e\n\u003cli\u003ewe then take the log of it to make it a summation\u003c/li\u003e\n\u003cli\u003ewe then try to maximize \\(\\theta\\) to\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWhat this tells us is\u0026hellip;\u003c/p\u003e\n\u003ch2 id=\"generic-maximum-likelihood-estimate\"\u003eGeneric Maximum Likelihood Estimate\u003c/h2\u003e\n\u003cp\u003eOverall, its kind of unsurprising from the \u003ca href=\"/posts/kbhprobability/#frequentist-definition-of-probability\"\u003eFrequentist Definition of Probability\u003c/a\u003e, but:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{\\theta}_{i} = \\frac{n_{i}}{\\sum_{j=1}^{k} n_{j}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some observations \\(n_{1:k}\\).\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sigma^{2} = \\frac{\\sum_{}^{} (o_{i} - \\hat{u})^{2}}{m}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"problems-with-maximum-likelihood-parameter-learning--kbhmaximum-likelihood-parameter-learning-dot-md\"\u003eProblems with \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eThis requires a lot of data to make work: for instance\u0026mdash;if we don\u0026rsquo;t have any plane crashes observed in \\(n\\) files, this scheme would say there\u0026rsquo;s no chance of plane crashes. This is not explicitly true.\u003c/p\u003e\n\u003cp\u003eTherefore, we use \u003ca href=\"/posts/kbhbaysian_parameter_learning/\"\u003eBaysian Parameter Learning\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmaximum_likelihood_parameter_learning/","tags":null,"title":"Maximum Likelihood Parameter Learning"},{"categories":null,"contents":"Two Abstractions \u0026ldquo;temporal abstractions\u0026rdquo;: making decisions without consideration / abstracting away time (MDP) \u0026ldquo;state abstractions\u0026rdquo;: making decisions about groups of states at once Graph MaxQ formulates a policy as a graph, which formulates a set of \\(n\\) policies\nMax Node This is a \u0026ldquo;policy node\u0026rdquo;, connected to a series of \\(Q\\) nodes from which it takes the max and propegate down. If we are at a leaf max-node, the actual action is taken and control is passed back t to the top of the graph\nQ Node each node computes \\(Q(S,A)\\) for a value at that action\nHierachical Value Function \\begin{equation} Q(s,a) = V_{a}(s) + C_{i}(s,a) \\end{equation}\nthe value function of the root node is the value obtained over all nodes in the graph\nwhere:\n\\begin{equation} C_{i}(s,a) = \\sum_{s\u0026rsquo;}^{} P(s\u0026rsquo;|s,a) V(s\u0026rsquo;) \\end{equation}\nLearning MaxQ maintain two tables \\(C_{i}\\) and \\(\\tilde{C}_{i}(s,a)\\) (which is a special completion function which corresponds to a special reward \\(\\tilde{R}\\) which prevents the model from doing egregious ending actions) choose \\(a\\) according to exploration strategy execute \\(a\\), observe \\(s\u0026rsquo;\\), and compute \\(R(s\u0026rsquo;|s,a)\\) Then, update:\n","html":"\u003ch2 id=\"two-abstractions\"\u003eTwo Abstractions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;temporal abstractions\u0026rdquo;: making decisions without consideration / abstracting away time (\u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;state abstractions\u0026rdquo;: making decisions about groups of states at once\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"graph\"\u003eGraph\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhmaxq/\"\u003eMaxQ\u003c/a\u003e formulates a policy as a graph, which formulates a set of \\(n\\) policies\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-13_09-50-20_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"max-node\"\u003eMax Node\u003c/h3\u003e\n\u003cp\u003eThis is a \u0026ldquo;policy node\u0026rdquo;, connected to a series of \\(Q\\) nodes from which it takes the max and propegate down. If we are at a leaf max-node, the actual action is taken and control is passed back t to the top of the graph\u003c/p\u003e\n\u003ch3 id=\"q-node\"\u003eQ Node\u003c/h3\u003e\n\u003cp\u003eeach node computes \\(Q(S,A)\\) for a value at that action\u003c/p\u003e\n\u003ch2 id=\"hierachical-value-function\"\u003eHierachical Value Function\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-13_09-51-27_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\\begin{equation}\nQ(s,a) = V_{a}(s) + C_{i}(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe value function of the root node is the value obtained over all nodes in the graph\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC_{i}(s,a) = \\sum_{s\u0026rsquo;}^{} P(s\u0026rsquo;|s,a) V(s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"learning-maxq\"\u003eLearning MaxQ\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003emaintain two tables \\(C_{i}\\) and \\(\\tilde{C}_{i}(s,a)\\) (which is a special completion function which corresponds to a special reward \\(\\tilde{R}\\) which prevents the model from doing egregious ending actions)\u003c/li\u003e\n\u003cli\u003echoose \\(a\\) according to exploration strategy\u003c/li\u003e\n\u003cli\u003eexecute \\(a\\), observe \\(s\u0026rsquo;\\), and compute \\(R(s\u0026rsquo;|s,a)\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThen, update:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-13_09-54-38_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhmaxq/","tags":null,"title":"MaxQ"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmbp/","tags":null,"title":"MBP"},{"categories":null,"contents":"MCVI solves POMDPs with continuous state space, but with discrete observation and action spaces. It does this by formulating a POMDP as a graph.\nFast algorithms require discretized state spaces, which makes the problem much more difficult to model. MCVI makes continuous representations possible for complex domains.\nMC Backup Normal POMDP Bellman Backup isn\u0026rsquo;t going to work well with continuous state spaces.\nTherefore, we reformulate our value backup as:\n\\begin{equation} V_{t+1}(b) = \\max_{a \\in A} \\qty(\\int_{s} R(s,a)b(s) \\dd{s}) + \\gamma \\sum_{o \\in O}^{} p(o|b,a) V_{t}(update(b,a,o)) \\end{equation}\nwhereby, a continuous belief update:\n\\begin{equation} update(b,a,o) = \\kappa O(o|s\u0026rsquo;,a) \\int_{s \\in S} T(s\u0026rsquo;|s,a) b(s) \\dd{s} \\end{equation}\nwhere \\(\\kappa\\) is a normalisation constant to keep the new belief a probability.\nBut! Instead of actually taking the integral, we simulate a series of trajectories and sum the toal reward\nMC-Backup at Graph We construct at graph by sticking each best action determined by rolling out \\(L\\) steps and computing the reward.\nCollecting the values given each observation, we create a new node for the best action; the best action per observation is connected as well.\nThis creates a new optimal policy graph from the rollouts.\nMCVI initial each reward at action to \\(0\\) for each observation, initialize each observation, node as \\(0\\) Take monte-carlo samples across the actions and states to take integrals to obtain: \\(HV_{g}(b) = \\max_{a \\in A} \\qty(\\int_{s \\in S} R(s,a)b(s) \\dd{s} + \\sum_{o}^{} ???)\\) each future observation is sampled using monte-carlo simulation each backup, you pick one new node to add.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmcvi/\"\u003eMCVI\u003c/a\u003e solves \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es with continuous state space, but with discrete observation and action spaces. It does this by formulating a POMDP as a graph.\u003c/p\u003e\n\u003cp\u003eFast algorithms require discretized state spaces, which makes the problem much more difficult to model. MCVI makes continuous representations possible for complex domains.\u003c/p\u003e\n\u003ch2 id=\"mc-backup\"\u003eMC Backup\u003c/h2\u003e\n\u003cp\u003eNormal \u003ca href=\"/posts/kbhvalue_iteration/#pomdp-bellman-update\"\u003ePOMDP Bellman Backup\u003c/a\u003e isn\u0026rsquo;t going to work well with continuous state spaces.\u003c/p\u003e\n\u003cp\u003eTherefore, we reformulate our value backup as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV_{t+1}(b) = \\max_{a \\in A} \\qty(\\int_{s} R(s,a)b(s) \\dd{s}) + \\gamma \\sum_{o \\in O}^{} p(o|b,a) V_{t}(update(b,a,o))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby, a continuous belief update:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nupdate(b,a,o) = \\kappa O(o|s\u0026rsquo;,a) \\int_{s \\in S} T(s\u0026rsquo;|s,a) b(s) \\dd{s}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\kappa\\) is a normalisation constant to keep the new belief a probability.\u003c/p\u003e\n\u003cp\u003eBut! Instead of actually taking the integral, we simulate a series of trajectories and sum the toal reward\u003c/p\u003e\n\u003ch2 id=\"mc-backup-at-graph\"\u003eMC-Backup at Graph\u003c/h2\u003e\n\u003cp\u003eWe construct at graph by sticking each best action determined by rolling out \\(L\\) steps and computing the reward.\u003c/p\u003e\n\u003cp\u003eCollecting the values given each observation, we create a new node for the best action; the best action per observation is connected as well.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-30_20-02-16_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis creates a new optimal policy graph from the rollouts.\u003c/p\u003e\n\u003ch2 id=\"mcvi\"\u003eMCVI\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003einitial each reward at action to \\(0\\)\u003c/li\u003e\n\u003cli\u003efor each observation, initialize each observation, node as \\(0\\)\u003c/li\u003e\n\u003cli\u003eTake monte-carlo samples across the actions and states to take integrals to obtain:\n\u003cul\u003e\n\u003cli\u003e\\(HV_{g}(b) = \\max_{a \\in A} \\qty(\\int_{s \\in S} R(s,a)b(s) \\dd{s} + \\sum_{o}^{} ???)\\)\u003c/li\u003e\n\u003cli\u003eeach future observation is sampled using monte-carlo simulation\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eeach backup, you pick one new node to add.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmcvi/","tags":null,"title":"MCVI"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmeal_replacement/","tags":null,"title":"meal replacement"},{"categories":null,"contents":" at each point a relevant result is returned, calculate precision and then average that and then average the precision over all queries precision \\begin{equation} \\frac{tp}{tp + fp} \\end{equation}\nrecall \\begin{equation} \\frac{tp}{tp+fn} \\end{equation}\naccuracy \\begin{equation} \\frac{tp + tn}{tp+tn+fp+fn} \\end{equation}\nf1 \\begin{equation} F_1 = \\frac{2 (P\\cdot R)}{P+R} \\end{equation}\n","html":"\u003cul\u003e\n\u003cli\u003eat each point a relevant result is returned, calculate precision\u003c/li\u003e\n\u003cli\u003eand then average that\u003c/li\u003e\n\u003cli\u003eand then average the precision over all queries\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"precision\"\u003eprecision\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{tp}{tp + fp}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"recall\"\u003erecall\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{tp}{tp+fn}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"accuracy\"\u003eaccuracy\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{tp + tn}{tp+tn+fp+fn}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"f1\"\u003ef1\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nF_1 = \\frac{2 (P\\cdot R)}{P+R}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmean_average_precision/","tags":null,"title":"mean average precision"},{"categories":null,"contents":"One possible approach for using homomorphic encryption, developed specifically for imaging data.\nextract relevant features locally resulting data is encrypted using FHE train model remotely using FHE encrypted data send model back, and data owners decrypt the inference results locally ","html":"\u003cp\u003eOne possible approach for using homomorphic encryption, developed specifically for imaging data.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eextract relevant features locally\u003c/li\u003e\n\u003cli\u003eresulting data is encrypted using FHE\u003c/li\u003e\n\u003cli\u003etrain model remotely using FHE encrypted data\u003c/li\u003e\n\u003cli\u003esend model back, and data owners decrypt the inference results locally\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmedblindtuner/","tags":null,"title":"MedBlindTuner"},{"categories":null,"contents":"RAG for generic risk conversations.\ntag transcripts with the relevant themes a la action research use llama2 to embed the given information then send the overall information to a larger language model People generally preferred grounded GPT responses over human responses. Sometimes, in 2 of the features, humans preferred the non grounded responses.\n","html":"\u003cp\u003e\u003cstrong\u003eRAG\u003c/strong\u003e for generic risk conversations.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003etag transcripts with the relevant themes a la action research\u003c/li\u003e\n\u003cli\u003euse llama2 to embed the given information\u003c/li\u003e\n\u003cli\u003ethen send the overall information to a larger language model\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ePeople generally preferred grounded GPT responses over human responses. Sometimes, in 2 of the features, humans preferred the \u003cstrong\u003enon grounded\u003c/strong\u003e responses.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmedical_dialogue_generation/","tags":null,"title":"Medical Dialogue Generation"},{"categories":null,"contents":"SnowMed CT: large medical ontology\ncreated pairwise distance matrix on SnowMed CT created weighted graphs using the SnowMed information Node2Vec! Suddenly you have an embedding for each disease. Two Tasks:\nPatient Disease Embeddings Using the node2vec with snowmed\nSimilar Patient Retrial Reveal hidden co-morbidities via Jaccard Coefficient\n","html":"\u003cp\u003eSnowMed CT: large medical ontology\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ecreated pairwise distance matrix on SnowMed CT\u003c/li\u003e\n\u003cli\u003ecreated weighted graphs using the SnowMed information\u003c/li\u003e\n\u003cli\u003eNode2Vec! Suddenly you have an embedding for each disease.\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTwo Tasks:\u003c/p\u003e\n\u003ch2 id=\"patient-disease-embeddings\"\u003ePatient Disease Embeddings\u003c/h2\u003e\n\u003cp\u003eUsing the node2vec with snowmed\u003c/p\u003e\n\u003ch2 id=\"similar-patient-retrial\"\u003eSimilar Patient Retrial\u003c/h2\u003e\n\u003cp\u003eReveal hidden co-morbidities via \u003ca href=\"/posts/kbhranked_information_retrieval/#jaccard-coefficient\"\u003eJaccard Coefficient\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmedical_knowledge_extraction/","tags":null,"title":"Medical Knowledge Extraction"},{"categories":null,"contents":"DOI: 10.3389/fcomp.2021.624558\nOne-Liner analyzed spontaneous speech transcripts (only!) from TD and AD patients with fastText and CNN; best was \\(83.33\\%\\) acc.\nNovelty threw the NLP kitchen sink to transcripts fastText CNN (with vary n-gram kernel 2,3,4,5 sizes) Notable Methods embeddings seaded by GloVe fastText are much faster, but CNN won out Key Figs the qual results PAR (participant), INV (investigator)\nNotes Hey look a review of the field:\n","html":"\u003cp\u003eDOI: 10.3389/fcomp.2021.624558\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eanalyzed spontaneous speech transcripts (only!) from TD and AD patients with fastText and CNN; best was \\(83.33\\%\\) acc.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ethrew the NLP kitchen sink to transcripts\n\u003cul\u003e\n\u003cli\u003efastText\u003c/li\u003e\n\u003cli\u003eCNN (with vary n-gram kernel 2,3,4,5 sizes)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eembeddings seaded by GloVe\u003c/li\u003e\n\u003cli\u003efastText are much faster, but CNN won out\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"the-qual-results\"\u003ethe qual results\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_00-33-37_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003ePAR\u003c/strong\u003e\u003c/strong\u003e (participant), \u003cstrong\u003e\u003cstrong\u003eINV\u003c/strong\u003e\u003c/strong\u003e (investigator)\u003c/p\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n\u003cp\u003eHey look a review of the field:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_00-32-54_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhmeghanani_2021/","tags":["ntj"],"title":"Meghanani 2021"},{"categories":null,"contents":"Human do not perceive frequency very well. The Mel scale is a scale from Hertz to what\u0026rsquo;s better perceived.\n","html":"\u003cp\u003eHuman do not perceive frequency very well. The Mel scale is a scale from Hertz to what\u0026rsquo;s better perceived.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmel_scale/","tags":null,"title":"Mel Scale"},{"categories":null,"contents":"memory is an array of bytes\neach byte has a unique index which is written in hexadecimal a pointer stores addresses to memory ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmemory/\"\u003ememory\u003c/a\u003e is an array of \u003ca href=\"/posts/kbhbinary_number_system/#byte\"\u003ebyte\u003c/a\u003es\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eeach \u003ca href=\"/posts/kbhbinary_number_system/#byte\"\u003ebyte\u003c/a\u003e has a unique index which is written in hexadecimal\u003c/li\u003e\n\u003cli\u003ea \u003ca href=\"/posts/kbhpointer/\"\u003epointer\u003c/a\u003e stores addresses to \u003ca href=\"/posts/kbhmemory/\"\u003ememory\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmemory/","tags":null,"title":"memory"},{"categories":null,"contents":"Utilization vs throughput is conflicting goals.\nBig Picture OS:\ncreates new process sets up address space/segments read the executable, load instructions, global data libraries gets loaded Complier:\nset up stack Heap Allocator: \u0026ldquo;Sandbox of bytes\u0026rdquo;\ninitialize the heap heap allocation: client void *malloc(size_t size); Returns a pointer to a block of heap memory of at least size bytes, or NULL if an error occurred.\nvoid free(void *ptr); Frees the heap-allocated block starting at the specific address.\nvoid *realloc(void *ptr, size_t size); Changing the size of a pointer and realloc if needed\nSee Heap allocator\n","html":"\u003cp\u003eUtilization vs throughput is conflicting goals.\u003c/p\u003e\n\u003ch2 id=\"big-picture\"\u003eBig Picture\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-13_10-56-33_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eOS:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ecreates new process\u003c/li\u003e\n\u003cli\u003esets up address space/segments\u003c/li\u003e\n\u003cli\u003eread the executable, load instructions, global data\u003c/li\u003e\n\u003cli\u003elibraries gets loaded\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eComplier:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eset up stack\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eHeap Allocator: \u0026ldquo;Sandbox of bytes\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003einitialize the heap\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"heap-allocation-client\"\u003eheap allocation: client\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-c\" data-lang=\"c\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003emalloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eReturns a pointer to a block of heap memory of at least size \u003ccode\u003ebytes\u003c/code\u003e, or \u003ccode\u003eNULL\u003c/code\u003e if an error occurred.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003efree\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eptr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFrees the heap-allocated block starting at the specific address.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003erealloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eptr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eChanging the size of a pointer and realloc if needed\u003c/p\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhheap_allocator/#heap-allocator\"\u003eHeap allocator\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmemory_allocation/","tags":null,"title":"memory allocation"},{"categories":null,"contents":"Mencius Philosophy: every person has the capability of goodness and harm, and whichever one you water is the one that grows.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmencius_philosophy/\"\u003eMencius Philosophy\u003c/a\u003e: every person has the capability of goodness and harm, and whichever one you water is the one that grows.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmencius_philosophy/","tags":null,"title":"Mencius Philosophy"},{"categories":null,"contents":"the mesoscopic region is the regions far away from equilibrium points\u0026mdash;which is really hard\nThis is also why Poincare invented topo.\n","html":"\u003cp\u003ethe \u003ca href=\"/posts/kbhmesoscopic_region/\"\u003emesoscopic region\u003c/a\u003e is the regions far away from equilibrium points\u0026mdash;which is really hard\u003c/p\u003e\n\u003cp\u003eThis is also why Poincare invented topo.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmesoscopic_region/","tags":null,"title":"mesoscopic region"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmetabolism/","tags":null,"title":"metabolism"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmethods/","tags":null,"title":"Methods"},{"categories":null,"contents":"Applying the MFA aligner upon the Pitt (cookie only) data and performing statistics upon the calculated disfluency information. The ultimate goal is to replicate Wang 2019.\nThe code is available here.\nThe (unvalidated, draft) results are reported below:\nMean value reported, standard deviation in parens. For our data, \\(N=422\\), cases balanced.\nVariable AD (Pitt, ours) MCI (Wang) Control (ours) Control (Wang) Silence Duration 28.10 (21.28) 13.55 (5.53) 18.06 (12.52) 7.71 (5.03) Speech Duration* 23.77 (14.11) 46.64 (5.79) 27.23 (15.3) 53.63 (7.82) Voice-Silence Ratio 1.79 (4.88) 4.43 (2.78) 5.78 (31.95) 10.11 (6.05) Verbal Rate 1.59 (0.61) 1.56 (0.40) 1.989 (0.51) 1.91 (0.43) *speech duration would obviously vary with file length\nFurther statistical quantification also tells us some more things. Although the data does not make a good classifier, I performed two tests: a Kolmogorov-Smirnov test for goodness of fit, and a good \u0026lsquo;ol Pearson\u0026rsquo;s correlation with AD/control target. p-values are reported below.\nKS test silence duration: \\(1.31 \\times 10^{-5}\\) speech duration: \\(2.98 \\times 10^{-3}\\) voice-silence ratio: \\(2.01 \\times 10^{-7}\\) verbal rate: \\(4.32 \\times 10^{-10}\\) Pearson\u0026rsquo;s silence duration: \\(4.15 \\times 10^{-8}\\) speech duration: \\(0.164\\) voice-silence ratio: \\(0.732\\) verbal rate: \\(1.22 \\times 10^{-12}\\) As per the values reported in Wang 2019, we can see that\u0026mdash;apart from audio metadata\u0026mdash;verbal rate is a strongly correlated indicator against MCI/AD. We can reasonably say that Wang 2019\u0026rsquo;s data collection can be automated with reasonable success using batchalign + MFA.\nBroken ML I applied an RBF Support-Vector machine to classify AD/control based only on the two most highly correlated variables: verbal rate and silence duration. The results were disappointing.\nOn test data, N=42, balanced labels:\nSVC: \\(61.9\\%\\) Random forest: also \\(61.9\\%\\) We have fairly disappointing results. Here\u0026rsquo;s my hypothesis of why:\nif you take a look at this figure, we can see two main distributions\nSo, if we, like Wang 2019, used statistics on independence (they used chi-square, I used KS test), we will come up that the distributions are different.\nHowever, if you take a look at a randomly sampled set of validation data (crosses on the figure), you can see that a lot of them lands in the \u0026ldquo;mostly control\u0026rdquo; area: making the classifier not super useful.\nWe can therefore catch a lot of the \u0026ldquo;slow talking, long pausing\u0026rdquo; patients, but most speaking fluently will possibly need semantic information for prediction.\nI have some preliminary results on Pitt+ERNIE (a kind of BERT) that indicate that a key semantic factor is \u0026ldquo;on-topicness.\u0026rdquo; However, Pitt does not contain a lot of off-topic control data (say, the fluency task, which it has for dementia) for me to validate those claims easily. I will continue work on that front.\n","html":"\u003cp\u003eApplying the MFA aligner upon the Pitt (cookie only) data and performing statistics upon the calculated disfluency information. The ultimate goal is to replicate \u003ca href=\"/posts/kbhwang_2019/\"\u003eWang 2019\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThe code \u003ca href=\"https://github.com/Jemoka/DBA/blob/f01862efe3fe7c196ff63252d73c86f1b64f03af/analyze.py#L154-L198\"\u003eis available here\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThe (unvalidated, draft) results are reported below:\u003c/p\u003e\n\u003cp\u003eMean value reported, standard deviation in parens. For our data, \\(N=422\\), cases balanced.\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eVariable\u003c/th\u003e\n\u003cth\u003eAD (Pitt, ours)\u003c/th\u003e\n\u003cth\u003eMCI (Wang)\u003c/th\u003e\n\u003cth\u003eControl (ours)\u003c/th\u003e\n\u003cth\u003eControl (Wang)\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eSilence Duration\u003c/td\u003e\n\u003ctd\u003e28.10 (21.28)\u003c/td\u003e\n\u003ctd\u003e13.55 (5.53)\u003c/td\u003e\n\u003ctd\u003e18.06 (12.52)\u003c/td\u003e\n\u003ctd\u003e7.71 (5.03)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eSpeech Duration*\u003c/td\u003e\n\u003ctd\u003e23.77 (14.11)\u003c/td\u003e\n\u003ctd\u003e46.64 (5.79)\u003c/td\u003e\n\u003ctd\u003e27.23 (15.3)\u003c/td\u003e\n\u003ctd\u003e53.63 (7.82)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eVoice-Silence Ratio\u003c/td\u003e\n\u003ctd\u003e1.79 (4.88)\u003c/td\u003e\n\u003ctd\u003e4.43 (2.78)\u003c/td\u003e\n\u003ctd\u003e5.78 (31.95)\u003c/td\u003e\n\u003ctd\u003e10.11 (6.05)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eVerbal Rate\u003c/td\u003e\n\u003ctd\u003e1.59 (0.61)\u003c/td\u003e\n\u003ctd\u003e1.56 (0.40)\u003c/td\u003e\n\u003ctd\u003e1.989 (0.51)\u003c/td\u003e\n\u003ctd\u003e1.91 (0.43)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e*speech duration would obviously vary with file length\u003c/p\u003e\n\u003cp\u003eFurther statistical quantification also tells us some more things. Although the data does not make a good classifier, I performed two tests: a \u003ca href=\"/posts/kbhkolmogorov_smirnov_test/\"\u003eKolmogorov-Smirnov test\u003c/a\u003e for goodness of fit, and a good \u0026lsquo;ol Pearson\u0026rsquo;s correlation with AD/control target. p-values are reported below.\u003c/p\u003e\n\u003ch2 id=\"ks-test--kbhkolmogorov-smirnov-test-dot-md\"\u003e\u003ca href=\"/posts/kbhkolmogorov_smirnov_test/\"\u003eKS test\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003esilence duration: \\(1.31 \\times 10^{-5}\\)\u003c/li\u003e\n\u003cli\u003espeech duration: \\(2.98 \\times 10^{-3}\\)\u003c/li\u003e\n\u003cli\u003evoice-silence ratio: \\(2.01 \\times 10^{-7}\\)\u003c/li\u003e\n\u003cli\u003everbal rate: \\(4.32 \\times 10^{-10}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"pearson-s\"\u003ePearson\u0026rsquo;s\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003esilence duration: \\(4.15 \\times 10^{-8}\\)\u003c/li\u003e\n\u003cli\u003espeech duration: \\(0.164\\)\u003c/li\u003e\n\u003cli\u003evoice-silence ratio: \\(0.732\\)\u003c/li\u003e\n\u003cli\u003everbal rate: \\(1.22 \\times 10^{-12}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAs per the values reported in \u003ca href=\"/posts/kbhwang_2019/\"\u003eWang 2019\u003c/a\u003e, we can see that\u0026mdash;apart from audio metadata\u0026mdash;verbal rate is a strongly correlated indicator against MCI/AD. We can reasonably say that \u003ca href=\"/posts/kbhwang_2019/\"\u003eWang 2019\u0026rsquo;s\u003c/a\u003e data collection can be automated with reasonable success using \u003ca href=\"/posts/kbhbatchalign/\"\u003ebatchalign\u003c/a\u003e + MFA.\u003c/p\u003e\n\u003ch2 id=\"broken-ml\"\u003eBroken ML\u003c/h2\u003e\n\u003cp\u003eI applied an RBF Support-Vector machine to classify AD/control based only on the two most highly correlated variables: verbal rate and silence duration. The results were disappointing.\u003c/p\u003e\n\u003cp\u003eOn test data, N=42, balanced labels:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eSVC: \\(61.9\\%\\)\u003c/li\u003e\n\u003cli\u003eRandom forest: also \\(61.9\\%\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe have fairly disappointing results. Here\u0026rsquo;s my hypothesis of why:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-12_15-50-37_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eif you take a look at this figure, we can see two main distributions\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-07-12_15-52-29_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSo, if we, like \u003ca href=\"/posts/kbhwang_2019/\"\u003eWang 2019\u003c/a\u003e, used statistics on independence (they used \u003ca href=\"/posts/kbhchi_square/\"\u003echi-square\u003c/a\u003e, I used \u003ca href=\"/posts/kbhkolmogorov_smirnov_test/\"\u003eKS test\u003c/a\u003e), we \u003cem\u003ewill\u003c/em\u003e come up that the distributions are different.\u003c/p\u003e\n\u003cp\u003eHowever, if you take a look at a randomly sampled set of validation data (crosses on the figure), you can see that a lot of them lands in the \u0026ldquo;mostly control\u0026rdquo; area: making the classifier not super useful.\u003c/p\u003e\n\u003cp\u003eWe can therefore catch a lot of the \u0026ldquo;slow talking, long pausing\u0026rdquo; patients, but most speaking fluently will possibly need semantic information for prediction.\u003c/p\u003e\n\u003cp\u003eI have some preliminary results on Pitt+ERNIE (a kind of BERT) that indicate that a key semantic factor is \u0026ldquo;on-topicness.\u0026rdquo; However, Pitt does not contain a lot of off-topic control data (say, the fluency task, which it has for dementia) for me to validate those claims easily. I will continue work on that front.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmfa_disfluency_measurement/","tags":null,"title":"MFA Disfluency Measurement"},{"categories":null,"contents":" Lanzi WNL (August 12) 1%. Selection Seed 7. Houjun. 82.64% ± 4.48% with a 95% confidence. Lanzi MCI (August 12) 1%. Selection Seed 7. Houjun. 78.70% ± 7.85% with a 95% confidence. Lanzi WNL (August 13) 1%. Selection Seed 7; syllabic balanced. Houjun. Within which, 90.97%±3.40% of multi-syllabic words were correctly identified 86.28%±4.08% of mono-syllabic words were correctly identified 88.63%±2.65% of all words were correctly identified at a confidence interval of 95% based on a single-variable t test. Lanzi MCI (August 13) 1%. Selection Seed 7; syllabic balanced. Houjun. Within which, 76.85%±8.08% of multi-syllabic words were correctly identified 72.22%±8.58% of mono-syllabic words were correctly identified 74.54%±5.86% of all words were correctly identified at a confidence interval of 95% based on a single-variable t test. Lanzi WNL (August 13) 1%. Selection Seed 7; syllabic balanced; 3-tier labeling. Houjun. Within which, 96.75%±2.10% of multi-syllabic words were correctly identified 90.61%±3.46% of mono-syllabic words were correctly identified 93.68%±2.03% of all words were correctly identified at a confidence interval of 95% based on a single-variable t test.\nWithin sucesseses, 16.57% are partial.\nLanzi MCI (August 13) 1%. Selection Seed 7; syllabic balanced; 3-tier labeling. Houjun. Within which, 91.67%±5.30% of multi-syllabic words were correctly identified 78.70%±7.85% of mono-syllabic words were correctly identified 85.19%±4.78% of all words were correctly identified at a confidence interval of 95% based on a single-variable t test.\nWithin sucesseses, 18.48% are partial.\n","html":"\u003col\u003e\n\u003cli\u003eLanzi WNL (August 12) 1%. Selection Seed 7. Houjun.\n82.64% ± 4.48% with a 95% confidence.\u003c/li\u003e\n\u003cli\u003eLanzi MCI (August 12) 1%. Selection Seed 7. Houjun.\n78.70% ± 7.85% with a 95% confidence.\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003col\u003e\n\u003cli\u003eLanzi WNL (August 13) 1%. Selection Seed 7; syllabic balanced. Houjun.\nWithin which, 90.97%±3.40% of multi-syllabic words were correctly identified\n86.28%±4.08% of mono-syllabic words were correctly identified\n88.63%±2.65% of all words were correctly identified\nat a confidence interval of 95% based on a single-variable t test.\u003c/li\u003e\n\u003cli\u003eLanzi MCI (August 13) 1%. Selection Seed 7; syllabic balanced. Houjun.\nWithin which, 76.85%±8.08% of multi-syllabic words were correctly identified\n72.22%±8.58% of mono-syllabic words were correctly identified\n74.54%±5.86% of all words were correctly identified\nat a confidence interval of 95% based on a single-variable t test.\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003col\u003e\n\u003cli\u003e\n\u003cp\u003eLanzi WNL (August 13) 1%. Selection Seed 7; syllabic balanced; 3-tier labeling. Houjun.\nWithin which, 96.75%±2.10% of multi-syllabic words were correctly identified\n90.61%±3.46% of mono-syllabic words were correctly identified\n93.68%±2.03% of all words were correctly identified\nat a confidence interval of 95% based on a single-variable t test.\u003c/p\u003e\n\u003cp\u003eWithin sucesseses, 16.57% are partial.\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eLanzi MCI (August 13) 1%. Selection Seed 7; syllabic balanced; 3-tier labeling. Houjun.\nWithin which, 91.67%±5.30% of multi-syllabic words were correctly identified\n78.70%±7.85% of mono-syllabic words were correctly identified\n85.19%±4.78% of all words were correctly identified\nat a confidence interval of 95% based on a single-variable t test.\u003c/p\u003e\n\u003cp\u003eWithin sucesseses, 18.48% are partial.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmfa_performance_statistics/","tags":null,"title":"MFA Performance Statistics"},{"categories":null,"contents":"Mia is a student at the Nueva School\n","html":"\u003cp\u003eMia is a student at the Nueva School\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmia_tavares/","tags":null,"title":"Mia Tavares"},{"categories":null,"contents":"Micah Brown is a student at The Nueva School, also the host of Project80, among other things.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmicah_brown/\"\u003eMicah Brown\u003c/a\u003e is a student at The Nueva School, also the host of \u003ca href=\"/posts/kbhproject80/\"\u003eProject80\u003c/a\u003e, among other things.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmicah_brown/","tags":null,"title":"Micah Brown"},{"categories":null,"contents":"Milton Freedman is an economist.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmilton_freedman/\"\u003eMilton Freedman\u003c/a\u003e is an economist.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmilton_freedman/","tags":null,"title":"Milton Freedman"},{"categories":null,"contents":"MMSE is not mean squared error! It is a short mental state test to measure one\u0026rsquo;s neuralpsycological capabilities; frequently used as a first line by a psycologist.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmmse/\"\u003eMMSE\u003c/a\u003e is \u003cstrong\u003enot\u003c/strong\u003e mean squared error! It is a short mental state test to measure one\u0026rsquo;s neuralpsycological capabilities; frequently used as a first line by a psycologist.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmmse/","tags":null,"title":"Mini-Mental State Examination"},{"categories":null,"contents":"\u0026ldquo;minimum edit distance\u0026rdquo; is one approach to solving the problem of \u0026ldquo;how similar are these two strings\u0026rdquo;? minimum edit distance is defined by the smallest number of editing operations (insertion, deletion, substitution) needed to transform one string into another.\nThere are two technical definitions. Both definitions are grounded upon \u0026ldquo;minimum number of operations it takes to transform a string into another, where\u0026rdquo;\nedit distance with DP\nDP costs \\(O(nm)\\), backtrace costs \\(O(n+m)\\).\nStandard Edit Distance insertion, deletion, and substitution cost 1\nLevenshtein Distance insertion and deletion cost 1; substitution cost 2\nExample For instance: \u0026ldquo;graffe\u0026rdquo;. Is\u0026hellip;\ngraf graft grail giraffe the closest?\nCommon Use machine translation + speech recognition uses edit distance to evaluate output quality coreference and NER uses edit distance as a baseline check ","html":"\u003cp\u003e\u0026ldquo;\u003ca href=\"/posts/kbhminimum_edit_distance/\"\u003eminimum edit distance\u003c/a\u003e\u0026rdquo; is one approach to solving the problem of \u0026ldquo;how similar are these two strings\u0026rdquo;? \u003ca href=\"/posts/kbhminimum_edit_distance/\"\u003eminimum edit distance\u003c/a\u003e is defined by the smallest number of editing operations (insertion, deletion, substitution) needed to transform one string into another.\u003c/p\u003e\n\u003cp\u003eThere are two technical definitions. Both definitions are grounded upon \u0026ldquo;minimum number of operations it takes to transform a string into another, where\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhedit_distance_with_dp/\"\u003eedit distance with DP\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eDP costs \\(O(nm)\\), backtrace costs \\(O(n+m)\\).\u003c/p\u003e\n\u003ch2 id=\"standard-edit-distance\"\u003eStandard Edit Distance\u003c/h2\u003e\n\u003cp\u003einsertion, deletion, and substitution cost 1\u003c/p\u003e\n\u003ch2 id=\"levenshtein-distance\"\u003eLevenshtein Distance\u003c/h2\u003e\n\u003cp\u003einsertion and deletion cost 1; substitution cost 2\u003c/p\u003e\n\u003ch2 id=\"example\"\u003eExample\u003c/h2\u003e\n\u003cp\u003eFor instance: \u0026ldquo;graffe\u0026rdquo;. Is\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003egraf\u003c/li\u003e\n\u003cli\u003egraft\u003c/li\u003e\n\u003cli\u003egrail\u003c/li\u003e\n\u003cli\u003egiraffe\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ethe closest?\u003c/p\u003e\n\u003ch2 id=\"common-use\"\u003eCommon Use\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003emachine translation + speech recognition uses edit distance to evaluate output quality\u003c/li\u003e\n\u003cli\u003ecoreference and NER uses edit distance as a baseline check\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhminimum_edit_distance/","tags":null,"title":"minimum edit distance"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhminimum_spanning_tree/","tags":null,"title":"minimum spanning tree"},{"categories":null,"contents":"How many disturbance users can coveather take without crashing? Let\u0026rsquo;s find out.\nCode Util function to mapreduce a list:\ndef multiplyList(l) : # Multiply elements one by one result = 1 for x in l: result = result * x return result We first set a user count:\nN = var(\u0026#34;N\u0026#34;) # Pool size val_percent = var(\u0026#34;val_percent\u0026#34;) # Pools val_pool = N*val_percent user_pool = N*(1-val_percent) # Disturbance disturbance_percent = var(\u0026#34;disturbance_percent\u0026#34;) # Validation Pools + Disburbance val_disturbance_pool = disturbance_percent*val_pool val_normal_pool = (1-disturbance_percent)*val_pool # Chance of three or more disturbance attestors # which is equal to one minus chance of zero, one, or two disturbance attesors no_disturbance_attestor = (val_normal_pool/val_pool)*((val_normal_pool-1)/(val_pool-1))*((val_normal_pool-2)/(val_pool-2))*((val_normal_pool-3)/(val_pool-3)) one_disturbance = [] for disturbance_point in range(0,4): res = [] res.append((val_disturbance_pool)/(val_pool-disturbance_point)) for pre_disturbance in range(0,disturbance_point): res.append((val_normal_pool-pre_disturbance)/(val_pool-pre_disturbance)) for post_disturbance in range(disturbance_point+1,4): res.append((val_normal_pool-post_disturbance)/(val_pool-post_disturbance)) one_disturbance.append(multiplyList(res)) one_disturbance_attestor = sum(one_disturbance) two_disturbance = [] for disturbance_point_i in range(0,4): for disturbance_point_j in range(disturbance_point_i+1,4): res = [] res.append((val_disturbance_pool)/(val_pool-disturbance_point_i)) res.append((val_disturbance_pool-1)/(val_pool-disturbance_point_j)) for pre_i_disturbance in range(0,disturbance_point_i): res.append((val_normal_pool-pre_disturbance)/(val_pool-pre_disturbance)) for sandwich in range(disturbance_point_i+1,disturbance_point_j): res.append((val_normal_pool-post_disturbance)/(val_pool-sandwich)) for post_j_disturbance in range(disturbance_point_j+1,4): res.append((val_normal_pool-post_disturbance)/(val_pool-post_j_disturbance)) two_disturbance.append(multiplyList(res)) two_disturbance_attestor = sum(two_disturbance) distubrance_chance(N, val_percent, disturbance_percent) = expand(1-(no_disturbance_attestor+one_disturbance_attestor+two_disturbance_attestor)) # no_disturbance_attestor (N*(disturbance_percent - 1)*val_percent + 3)*(N*(disturbance_percent - 1)*val_percent + 2)*(N*(disturbance_percent - 1)*val_percent + 1)*(disturbance_percent - 1)/((N*val_percent - 1)*(N*val_percent - 2)*(N*val_percent - 3)) z = var(\u0026#34;z\u0026#34;) val_dist(val_percent, disturbance_percent) = distubrance_chance(100, val_percent, disturbance_percent) implicit_plot3d(val_dist-z, (val_percent,0.1,1), (disturbance_percent, 0,1), (z, 0,1) ,frame=True,axes_labels=[\u0026#39;Validation\u0026#39;,\u0026#39;Disturbance\u0026#39;, \u0026#39;Chance\u0026#39;],axes=False, color=(val_dist,colormaps.Blues)) Launched html viewer for Graphics3d Object z = var(\u0026#34;z\u0026#34;) n_dist(N, disturbance_percent) = distubrance_chance(N, 0.1, disturbance_percent) show(implicit_plot3d(n_dist-z, (N,100,100000), (disturbance_percent, 0,1), (z, 0,1) ,frame=True,axes_labels=[\u0026#39;N\u0026#39;,\u0026#39;Disturbance\u0026#39;, \u0026#39;Chance\u0026#39;],axes=False, color=(n_dist,colormaps.Blues)), aspect_ratio=[1,100000,100000], plot_points=100) Launched html viewer for Graphics3d Object n_dir(N) = distubrance_chance(N, 0.1, 0.1) # plot(n_dir, (N,100,100000),axes_labels=[\u0026#39;N\u0026#39;, \u0026#39;Disturbance\u0026#39;], thickness=1) # solve(distubrance_chance(100, N, 0.1)==0.01, N, to_poly_solve=True) # implicit_plot(distubrance_chance(100, N, 0.1)==0.01, (N, 0,1), (z, 0, # solve(distubrance_chance(N, val_perc, 0.1)==0.01, val_perc, to_poly_solve=True) # implicit_plot(solve(distubrance_chance(N, val_perc, 0.1)==0.01, val_perc)[0]) # val_perc = var(\u0026#34;var_perc\u0026#34;) show(implicit_plot(distubrance_chance(N, val_perc, 0.1)==0.01, (N, 15, 1000), (val_perc, 0,1), plot_points=300,axes_labels=[\u0026#39;N\u0026#39;,\u0026#39;Val Ratio\u0026#39;],axes=False), aspect_ratio=800) # solve(distubrance_chance(800, val_perc, 0.1)==0.01, val_perc, to_poly_solve=True) \u0026lt;/Users/houliu/.sage/temp/baboon.jemoka.com/64368/tmp_9bdcu2si.pn\u0026gt;\n","html":"\u003cp\u003eHow many disturbance users can \u003ca href=\"/posts/kbhcoveather/\"\u003ecoveather\u003c/a\u003e take without crashing? Let\u0026rsquo;s find out.\u003c/p\u003e\n\u003ch2 id=\"code\"\u003eCode\u003c/h2\u003e\n\u003cp\u003eUtil function to mapreduce a list:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emultiplyList\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003el\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# Multiply elements one by one\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eresult\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003el\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eresult\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eresult\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eresult\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe first set a user count:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;N\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# Pool size\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eval_percent\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;val_percent\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# Pools\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_percent\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003euser_pool\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# Disturbance\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;disturbance_percent\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# Validation Pools + Disburbance\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eval_disturbance_pool\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# Chance of three or more disturbance attestors\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# which is equal to one minus chance of zero, one, or two disturbance attesors\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eno_disturbance_attestor\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eone_disturbance\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisturbance_point\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_disturbance_pool\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epre_disturbance\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epre_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epre_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epost_disturbance\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epost_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epost_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eone_disturbance\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emultiplyList\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eone_disturbance_attestor\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eone_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etwo_disturbance\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisturbance_point_i\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisturbance_point_j\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point_i\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_disturbance_pool\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point_i\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_disturbance_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point_j\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epre_i_disturbance\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point_i\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epre_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epre_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esandwich\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point_i\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point_j\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epost_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esandwich\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epost_j_disturbance\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_point_j\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_normal_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epost_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_pool\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epost_j_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003etwo_disturbance\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emultiplyList\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eres\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etwo_disturbance_attestor\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etwo_disturbance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edistubrance_chance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eval_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eexpand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eno_disturbance_attestor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eone_disturbance_attestor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etwo_disturbance_attestor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# no_disturbance_attestor\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(N*(disturbance_percent - 1)*val_percent + 3)*(N*(disturbance_percent - 1)*val_percent + 2)*(N*(disturbance_percent - 1)*val_percent + 1)*(disturbance_percent - 1)/((N*val_percent - 1)*(N*val_percent - 2)*(N*val_percent - 3))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;z\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eval_dist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edistubrance_chance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e100\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eval_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eimplicit_plot3d\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_dist\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eframe\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eaxes_labels\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;Validation\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;Disturbance\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#39;Chance\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eaxes\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eFalse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_dist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolormaps\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eBlues\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLaunched html viewer for Graphics3d Object\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;z\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003en_dist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edistubrance_chance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eshow\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eimplicit_plot3d\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en_dist\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e100\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e100000\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edisturbance_percent\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eframe\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eaxes_labels\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;N\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;Disturbance\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#39;Chance\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eaxes\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eFalse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en_dist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolormaps\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eBlues\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003easpect_ratio\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e100000\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e100000\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplot_points\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e100\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLaunched html viewer for Graphics3d Object\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003en_dir\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edistubrance_chance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# plot(n_dir, (N,100,100000),axes_labels=[\u0026#39;N\u0026#39;, \u0026#39;Disturbance\u0026#39;], thickness=1)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# solve(distubrance_chance(100, N, 0.1)==0.01, N, to_poly_solve=True)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# implicit_plot(distubrance_chance(100, N, 0.1)==0.01, (N, 0,1), (z, 0,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# solve(distubrance_chance(N, val_perc, 0.1)==0.01, val_perc, to_poly_solve=True)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# implicit_plot(solve(distubrance_chance(N, val_perc, 0.1)==0.01, val_perc)[0])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# val_perc = var(\u0026#34;var_perc\u0026#34;)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eshow\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eimplicit_plot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edistubrance_chance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eval_perc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e15\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eval_perc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplot_points\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e300\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eaxes_labels\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;N\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;Val Ratio\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eaxes\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eFalse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003easpect_ratio\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e800\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# solve(distubrance_chance(800, val_perc, 0.1)==0.01, val_perc, to_poly_solve=True)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u0026lt;/Users/houliu/.sage/temp/baboon.jemoka.com/64368/tmp_9bdcu2si.pn\u0026gt;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhminimum_user_base_requirements_for_coveather/","tags":null,"title":"minimum user base requirements for coveather"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhminimum_wage/","tags":null,"title":"minimum wage"},{"categories":null,"contents":"Why so many stock exchanges? Because the FTC just allows you to make\u0026rsquo;em as desired.\nWhy doesn\u0026rsquo;t the market trade 24 hours a day? Because the institutional traders can only trade 2 hours a day: the beginning of the day, or the end of the day. Otherwise, there are not enough volume for the institutional traders to be able to trade at their size. See Volume Profile.\nWhat\u0026rsquo;s a good \u0026ldquo;full view\u0026rdquo; of the stock? The order book! You can actually see it by paying money to the exchange. You want to subscribe to every order for every exchange. How to large traders strategically break stocks? \u0026ldquo;How long should I take?\u0026rdquo;\nWhy are some Ethernet ports worth a lot more than others? Some amount of trading (10-20%) is done at light speed. Cable lengths of about a foot change the stock dramatically.\n","html":"\u003ch2 id=\"why-so-many-stock-exchanges\"\u003eWhy so many stock exchanges?\u003c/h2\u003e\n\u003cp\u003eBecause the FTC just allows you to make\u0026rsquo;em as desired.\u003c/p\u003e\n\u003ch2 id=\"why-doesn-t-the-market-trade-24-hours-a-day\"\u003eWhy doesn\u0026rsquo;t the market trade 24 hours a day?\u003c/h2\u003e\n\u003cp\u003eBecause the institutional traders can only trade 2 hours a day: the beginning of the day, or the end of the day. Otherwise, there are not enough volume for the institutional traders to be able to trade at their size. See \u003ca href=\"/posts/kbhvwap/#volume-profile\"\u003eVolume Profile\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"what-s-a-good-full-view-of-the-stock\"\u003eWhat\u0026rsquo;s a good \u0026ldquo;full view\u0026rdquo; of the stock?\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eThe order book! You can actually see it by paying money to the exchange.\u003c/li\u003e\n\u003cli\u003eYou want to subscribe to every order for every exchange.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"how-to-large-traders-strategically-break-stocks\"\u003eHow to large traders strategically break stocks?\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;How long should I take?\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"why-are-some-ethernet-ports-worth-a-lot-more-than-others\"\u003eWhy are some Ethernet ports worth a lot more than others?\u003c/h2\u003e\n\u003cp\u003eSome amount of trading (10-20%) is done at light speed. Cable lengths of about a foot change the stock dramatically.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmisc_financial_market_questions/","tags":null,"title":"Misc. Financial Market Questions"},{"categories":null,"contents":"Focus on protease: inhibition helps inhibit viral replication; and it is conserved across most coronaviruses; so good point to start working in drug development.\nTake smaller binding fragments covering the binding site, and combine them together Try to combine these fragments together into a molecule that fits well with the binding site protease inhibition is usually achieved with a covalent peptide bond, but this crowd-sourcing effort showed that\nmachine-learning rapid library synthesis begin with some guess for the model molecule then, use ML to perform modifications to the molecule really quickly by scanning though (\u0026ldquo;ML-prioritized rapid library synthesis\u0026rdquo;) a bunch of changes to the molecule pick and repeat Molecular Transformer THROW THE FUCKING REACTION INTO AN LLM, as WORDS\nI desire death\nSo; taking reactants + reagents as input; guess the product.\nas input; guess the product.\nSABER decompose molecule into building blocks make biostesters of the building blocks change crap limitations ML can\u0026rsquo;t extrapolate into unknown search space and it could come up with bullshit; so to fix:\nusing physics to create correct docking structures use ML to perform last mile optimization ","html":"\u003cp\u003eFocus on \u003ca href=\"/posts/kbhprotease/\"\u003eprotease\u003c/a\u003e: inhibition helps inhibit viral replication; and \u003cstrong\u003eit is conserved across most coronaviruses\u003c/strong\u003e; so good point to start working in drug development.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eTake smaller binding fragments covering the binding site, and combine them together\u003c/li\u003e\n\u003cli\u003eTry to combine these fragments together into a molecule that fits well with the binding site\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhprotease/\"\u003eprotease\u003c/a\u003e inhibition is usually achieved with a covalent peptide bond, but this crowd-sourcing effort showed that\u003c/p\u003e\n\u003ch2 id=\"machine-learning-rapid-library-synthesis\"\u003emachine-learning rapid library synthesis\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ebegin with some guess for the model molecule\u003c/li\u003e\n\u003cli\u003ethen, use ML to perform modifications to the molecule really quickly by scanning though (\u0026ldquo;ML-prioritized rapid library synthesis\u0026rdquo;) a bunch of changes to the molecule\u003c/li\u003e\n\u003cli\u003epick and repeat\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"molecular-transformer\"\u003eMolecular Transformer\u003c/h2\u003e\n\u003cp\u003eTHROW THE FUCKING REACTION INTO AN LLM, as WORDS\u003c/p\u003e\n\u003cp\u003e\u003cem\u003eI desire death\u003c/em\u003e\u003c/p\u003e\n\u003cp\u003eSo; taking reactants + reagents as input; guess the product.\u003c/p\u003e\n\u003cp\u003eas input; guess the product.\u003c/p\u003e\n\u003ch2 id=\"saber\"\u003eSABER\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003edecompose molecule into building blocks\u003c/li\u003e\n\u003cli\u003emake biostesters of the building blocks\u003c/li\u003e\n\u003cli\u003echange crap\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"limitations\"\u003elimitations\u003c/h2\u003e\n\u003cp\u003eML can\u0026rsquo;t extrapolate into unknown search space and it could come up with bullshit; so to fix:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eusing physics to create correct docking structures\u003c/li\u003e\n\u003cli\u003euse ML to perform last mile optimization\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhml_drug_discovery/","tags":null,"title":"ML COVID Drug Discovery"},{"categories":null,"contents":"MLib is a machine learning library built on top of Spark.\nfrom pyspalk.mllib.clustering import KMeans KMeans(rdd) where you pass the MLib a PySpark RDD\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmlib/\"\u003eMLib\u003c/a\u003e is a machine learning library built on top of \u003ca href=\"/posts/kbhspark/\"\u003eSpark\u003c/a\u003e.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epyspalk.mllib.clustering\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eKMeans\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eKMeans\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erdd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ewhere you pass the \u003ccode\u003eMLib\u003c/code\u003e a PySpark \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmlib/","tags":null,"title":"MLib"},{"categories":null,"contents":"Reading notes Malcom X\u0026rsquo;s father was an active prechear in the scene Malcom X and MLK are both made mostly charactures out of context Malcom X had a belligent upbringing with a belligent father, whereas MLK lived in relative comfort as a son of a successful minister Malcom was sent into white foster families as his mother became institutionalized Becasue of his experience in foster system, Malcom tried to pass/be white King\u0026rsquo;s nonviolent priciples not understood and became conflicted with ideas of local leaders Malcom found a father figure in the Nation of Islam, changing his name in prison MLK had more positive African American role models in life Malcom X disallusioned with the policy of nonengagement by the nation of islam Malcom X had support over racial seperatism Nation of Islam wanted to create a completely seperate Black state, promoting Black Nationalism secret Malcom X wanted break because of skeptism again Eli Mohammed Malcom charged MLK with infiltration Martin believes that the process of voilence is a form of naïve expression King believes that the \u0026ldquo;strong demagogic oratory\u0026rdquo; of Malcom was detrimental and extremist Martin believes that the personal nature of assults from Malcom maybe result in physical assult Malcom was suspended during 1963, and became independent\u0026mdash;wanted to combine religion and politics like King Malcom began forging ties with millitan Black movement Martin regretted that integration has not proceeded, but believed it would have been difficult anyways Rejected nonviolent and intergrational movement People saw King and X\u0026rsquo;s ideas inrecosiliable But, King and X themselves made a possible shared ending by the end Believes that suicides were cut short Racial pride was a centering point: while Malcom saw it as something to be harbored, Martin saw it as inate ","html":"\u003ch2 id=\"reading-notes\"\u003eReading notes\u003c/h2\u003e\n\u003ch3 id=\"malcom-x-s-father-was-an-active-prechear-in-the-scene\"\u003eMalcom X\u0026rsquo;s father was an active prechear in the scene\u003c/h3\u003e\n\u003ch3 id=\"malcom-x-and-mlk-are-both-made-mostly-charactures-out-of-context\"\u003eMalcom X and MLK are both made mostly charactures out of context\u003c/h3\u003e\n\u003ch3 id=\"malcom-x-had-a-belligent-upbringing-with-a-belligent-father-whereas-mlk-lived-in-relative-comfort-as-a-son-of-a-successful-minister\"\u003eMalcom X had a belligent upbringing with a belligent father, whereas MLK lived in relative comfort as a son of a successful minister\u003c/h3\u003e\n\u003ch3 id=\"malcom-was-sent-into-white-foster-families-as-his-mother-became-institutionalized\"\u003eMalcom was sent into white foster families as his mother became institutionalized\u003c/h3\u003e\n\u003ch3 id=\"becasue-of-his-experience-in-foster-system-malcom-tried-to-pass-be-white\"\u003eBecasue of his experience in foster system, Malcom tried to pass/be white\u003c/h3\u003e\n\u003ch3 id=\"king-s-nonviolent-priciples-not-understood-and-became-conflicted-with-ideas-of-local-leaders\"\u003eKing\u0026rsquo;s nonviolent priciples not understood and became conflicted with ideas of local leaders\u003c/h3\u003e\n\u003ch3 id=\"malcom-found-a-father-figure-in-the-nation-of-islam-changing-his-name-in-prison\"\u003eMalcom found a father figure in the Nation of Islam, changing his name in prison\u003c/h3\u003e\n\u003ch3 id=\"mlk-had-more-positive-african-american-role-models-in-life\"\u003eMLK had more positive African American role models in life\u003c/h3\u003e\n\u003ch3 id=\"malcom-x-disallusioned-with-the-policy-of-nonengagement-by-the-nation-of-islam\"\u003eMalcom X disallusioned with the policy of nonengagement by the nation of islam\u003c/h3\u003e\n\u003ch3 id=\"malcom-x-had-support-over-racial-seperatism\"\u003eMalcom X had support over racial seperatism\u003c/h3\u003e\n\u003ch3 id=\"nation-of-islam-wanted-to-create-a-completely-seperate-black-state-promoting-black-nationalism-secret\"\u003eNation of Islam wanted to create a completely seperate Black state, promoting Black Nationalism secret\u003c/h3\u003e\n\u003ch3 id=\"malcom-x-wanted-break-because-of-skeptism-again-eli-mohammed\"\u003eMalcom X wanted break because of skeptism again Eli Mohammed\u003c/h3\u003e\n\u003ch3 id=\"malcom-charged-mlk-with-infiltration\"\u003eMalcom charged MLK with infiltration\u003c/h3\u003e\n\u003ch3 id=\"martin-believes-that-the-process-of-voilence-is-a-form-of-naïve-expression\"\u003eMartin believes that the process of voilence is a form of naïve expression\u003c/h3\u003e\n\u003ch3 id=\"king-believes-that-the-strong-demagogic-oratory-of-malcom-was-detrimental-and-extremist\"\u003eKing believes that the \u0026ldquo;strong demagogic oratory\u0026rdquo; of Malcom was detrimental and extremist\u003c/h3\u003e\n\u003ch3 id=\"martin-believes-that-the-personal-nature-of-assults-from-malcom-maybe-result-in-physical-assult\"\u003eMartin believes that the personal nature of assults from Malcom maybe result in physical assult\u003c/h3\u003e\n\u003ch3 id=\"malcom-was-suspended-during-1963-and-became-independent-wanted-to-combine-religion-and-politics-like-king\"\u003eMalcom was suspended during 1963, and became independent\u0026mdash;wanted to combine religion and politics like King\u003c/h3\u003e\n\u003ch3 id=\"malcom-began-forging-ties-with-millitan-black-movement\"\u003eMalcom began forging ties with millitan Black movement\u003c/h3\u003e\n\u003ch3 id=\"martin-regretted-that-integration-has-not-proceeded-but-believed-it-would-have-been-difficult-anyways\"\u003eMartin regretted that integration has not proceeded, but believed it would have been difficult anyways\u003c/h3\u003e\n\u003ch3 id=\"rejected-nonviolent-and-intergrational-movement\"\u003eRejected nonviolent and intergrational movement\u003c/h3\u003e\n\u003ch3 id=\"people-saw-king-and-x-s-ideas-inrecosiliable\"\u003ePeople saw King and X\u0026rsquo;s ideas inrecosiliable\u003c/h3\u003e\n\u003ch3 id=\"but-king-and-x-themselves-made-a-possible-shared-ending-by-the-end\"\u003eBut, King and X themselves made a possible shared ending by the end\u003c/h3\u003e\n\u003ch3 id=\"believes-that-suicides-were-cut-short\"\u003eBelieves that suicides were cut short\u003c/h3\u003e\n\u003ch3 id=\"racial-pride-was-a-centering-point-while-malcom-saw-it-as-something-to-be-harbored-martin-saw-it-as-inate\"\u003eRacial pride was a centering point: while Malcom saw it as something to be harbored, Martin saw it as inate\u003c/h3\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmlk_and_malcom_x_reading/","tags":null,"title":"MLK and Malcom X Reading"},{"categories":null,"contents":"Modal is a cloud deployment system that\u0026rsquo;s entirely programmatic. No yaml:\nimport modal stub = modal.stub(gpu=\u0026#34;a100\u0026#34;) @stub.function() def fit(x): import whatever whatever.thing() So think run.house, but they have the infra.\nfine-tuning with Modal https://github.com/modal-labs/llama-recipes\nYou can store the serverless functions, and Modal can serve stored serverless functions. Modal have web hooks as well to do inference at a front end.\nModal can serve most the management as well.\npricing 13B: 500 tokens/s on 40GB AA10 (3.73 / hour) 70B: 300 tok /s 80 GB( 2* 5.59/hour)\n","html":"\u003cp\u003eModal is a cloud deployment system that\u0026rsquo;s entirely programmatic. No yaml:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emodal\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003estub\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emodal\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estub\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egpu\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;a100\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003e@stub.function\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewhatever\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ewhatever\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ething\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSo think run.house, but they have the infra.\u003c/p\u003e\n\u003ch2 id=\"fine-tuning-with-modal\"\u003efine-tuning with Modal\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://github.com/modal-labs/llama-recipes\"\u003ehttps://github.com/modal-labs/llama-recipes\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eYou can store the serverless functions, and \u003ca href=\"/posts/kbhmodal/\"\u003eModal\u003c/a\u003e can serve stored serverless functions. Modal have web hooks as well to do inference at a front end.\u003c/p\u003e\n\u003cp\u003eModal can serve most the management as well.\u003c/p\u003e\n\u003ch2 id=\"pricing\"\u003epricing\u003c/h2\u003e\n\u003cp\u003e13B: 500 tokens/s on 40GB AA10 (3.73 / hour)\n70B: 300 tok /s 80 GB( 2* 5.59/hour)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmodal/","tags":null,"title":"Modal"},{"categories":null,"contents":"modalization is the\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmodalization/\"\u003emodalization\u003c/a\u003e is the\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmodalization/","tags":null,"title":"modalization"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmodel_bae/","tags":null,"title":"model bae"},{"categories":null,"contents":"Extrinsic Evaluation Extrinsic Evaluation, also known as In-Vivo Evaluation, focuses on benchmarking two language models in terms of their differing performance on a test task.\nIntrinsic Evaluation In-Vitro Evaluation or Intrinsic Evaluation focuses on evaluating the language models\u0026rsquo; performance at, well, language modeling.\nTypically, we use perplexity.\ndirectly measure language model performance doesn\u0026rsquo;t necessarily correspond with real applications ","html":"\u003ch2 id=\"extrinsic-evaluation\"\u003eExtrinsic Evaluation\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#extrinsic-evaluation\"\u003eExtrinsic Evaluation\u003c/a\u003e, also known as \u003ca href=\"#extrinsic-evaluation\"\u003eIn-Vivo Evaluation\u003c/a\u003e, focuses on benchmarking two language models in terms of their differing performance on a test task.\u003c/p\u003e\n\u003ch2 id=\"intrinsic-evaluation\"\u003eIntrinsic Evaluation\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#intrinsic-evaluation\"\u003eIn-Vitro Evaluation\u003c/a\u003e or \u003ca href=\"#intrinsic-evaluation\"\u003eIntrinsic Evaluation\u003c/a\u003e focuses on evaluating the language models\u0026rsquo; performance at, well, language modeling.\u003c/p\u003e\n\u003cp\u003eTypically, we use \u003ca href=\"/posts/kbhperplexity/\"\u003eperplexity\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003edirectly measure language model performance\u003c/li\u003e\n\u003cli\u003edoesn\u0026rsquo;t necessarily correspond with real applications\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmodel_evaluation/","tags":null,"title":"Model Evaluation"},{"categories":null,"contents":"Step 1: Getting Model We want a model\n\\(T\\): transition probability \\(R\\): rewards Maximum Likelihood Parameter Learning Method \\begin{equation} N(s,a,s\u0026rsquo;) \\end{equation}\nwhich is the count of transitions from \\(s,a\\) to \\(s\u0026rsquo;\\) and increment it as \\(s, a, s\u0026rsquo;\\) gets observed. This makes, with Maximum Likelihood Parameter Learning:\n\\begin{equation} T(s\u0026rsquo; | s,a) = \\frac{N(s,a,s\u0026rsquo;)}{\\sum_{s\u0026rsquo;\u0026rsquo;}^{} N(s,a,s\u0026rsquo;\u0026rsquo;)} \\end{equation}\nWe also keep a table:\n\\begin{equation} p(s,a) \\end{equation}\nthe sum of rewards when taking \\(s,a\\). To calculate a reward, we take the average:\n\\begin{equation} R(s,a) \\approx \\frac{p(s,a)}{\\sum_{s\u0026rsquo;\u0026rsquo;}^{}N(s,a,s\u0026rsquo;\u0026rsquo;)} \\end{equation}\nBaysian Parameter Learning Method We build a Dirichlet Distribution; let:\n\\begin{equation} \\vec{\\theta}_{(s,a)} = \\mqty[T(s_1 | s,a) \\\\ \\dots\\\\ T(s_{n} | s,a)] \\end{equation}\nWe then calculate a distribution:\n\\begin{equation} Dir(\\vec{\\theta}_{s,a} | \\vec{N}_{s,a}) \\end{equation}\nwhich will give you a probability over a set of transitions.\nThen, when we need a transition \\(T\\), we perform Posterior Sampling on this Dirichlet Distribution at every episode (or so, otherwise the model shifts a lot) and then optimize on that.\nGetting rewards is an advanced topic. So let\u0026rsquo;s just use Maximum Likelihood Parameter Learning or assume its given\nStep 2: Getting Value Function. full update One direct strategy to work on this, then, is to use whatever transition and rewards you observed to perform value iteration or policy iteration. First go through one or a bunch of observations, then take a full value iteration or policy iteration sweep, and then go back and take more measurements.\nrandomized update We randomly update a single state:\n\\begin{equation} U(s) \\leftarrow \\max_{a} \\qty[R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo; | s,a) U(s\u0026rsquo;)] \\end{equation}\nand take another observation, update our model estimate, and move on.\nprioritized updates Say we are current updating a state \\(s\\), and there are two previous states that could transition into \\(s\\). First we create an estimate like before:\n\\begin{equation} U(s) \\leftarrow \\max_{a} \\qty[R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo; | s,a) U(s\u0026rsquo;)] \\end{equation}\nWe create a queue whose contents are ranked by:\n\\begin{equation} T(s|s^{-}, a^{-}) \\times |U(s)-u(s)| \\end{equation}\nwhere \\(u(s)\\) is \\(U(s)\\) prior to the update.\nWe move on to the next state to update by popping off the queue.\nStep 3: Explore a Little epsilon-greedy exploration with decay Softmax Method R-Max Most strategies above focuses on choosing a random action. This exploration focuses on adapting reward/transitions to explicitly explore new-state.\n\\begin{equation} R(s,a) = \\begin{cases} r_{\\max}, if N(s,a) \u0026lt; m,\\\\ \\rho\\frac{s,a}{N(s,a)}, otherwise \\end{cases} \\end{equation}\nyou get a large reward \\(r_{\\max }\\) if you haven\u0026rsquo;t been to \\((s,a)\\), otherwise the reward you get gets discounted by the number of times you visited.\n\\begin{equation} T(s\u0026rsquo;|s,a) = \\begin{cases} 1, if N(s,a) \u0026lt; m\\ and\\ s\u0026rsquo; = s \\\\ 0, if N(s,a) \u0026lt; m\\ and\\ s\u0026rsquo; \\neq s \\\\ \\frac{N(s,a,s\u0026rsquo;)}{N(s,a)}, otherwise \\end{cases} \\end{equation}\n","html":"\u003ch2 id=\"step-1-getting-model\"\u003eStep 1: Getting Model\u003c/h2\u003e\n\u003cp\u003eWe want a model\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(T\\): transition probability\u003c/li\u003e\n\u003cli\u003e\\(R\\): rewards\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"maximum-likelihood-parameter-learning--kbhmaximum-likelihood-parameter-learning-dot-md--method\"\u003e\u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e Method\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nN(s,a,s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is the count of transitions from \\(s,a\\) to \\(s\u0026rsquo;\\) and increment it as \\(s, a, s\u0026rsquo;\\) gets observed. This makes, with \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(s\u0026rsquo; | s,a) = \\frac{N(s,a,s\u0026rsquo;)}{\\sum_{s\u0026rsquo;\u0026rsquo;}^{} N(s,a,s\u0026rsquo;\u0026rsquo;)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe also keep a table:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe sum of rewards when taking \\(s,a\\). To calculate a reward, we take the average:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR(s,a) \\approx \\frac{p(s,a)}{\\sum_{s\u0026rsquo;\u0026rsquo;}^{}N(s,a,s\u0026rsquo;\u0026rsquo;)}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"baysian-parameter-learning--kbhbaysian-parameter-learning-dot-md--method\"\u003e\u003ca href=\"/posts/kbhbaysian_parameter_learning/\"\u003eBaysian Parameter Learning\u003c/a\u003e Method\u003c/h3\u003e\n\u003cp\u003eWe build a \u003ca href=\"/posts/kbhbaysian_parameter_learning/#dirichlet-distribution\"\u003eDirichlet Distribution\u003c/a\u003e; let:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{\\theta}_{(s,a)} = \\mqty[T(s_1 | s,a) \\\\ \\dots\\\\ T(s_{n} | s,a)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe then calculate a distribution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nDir(\\vec{\\theta}_{s,a} | \\vec{N}_{s,a})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich will give you a probability over a set of transitions.\u003c/p\u003e\n\u003cp\u003eThen, when we need a transition \\(T\\), we perform \u003ca href=\"/posts/kbhdirected_exploration/#posterior-sampling\"\u003ePosterior Sampling\u003c/a\u003e on this \u003ca href=\"/posts/kbhbaysian_parameter_learning/#dirichlet-distribution\"\u003eDirichlet Distribution\u003c/a\u003e at every episode (or so, otherwise the model shifts a lot) and then optimize on that.\u003c/p\u003e\n\u003cp\u003eGetting rewards is an advanced topic. So let\u0026rsquo;s just use \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e or assume its given\u003c/p\u003e\n\u003ch2 id=\"step-2-getting-value-function-dot\"\u003eStep 2: Getting Value Function.\u003c/h2\u003e\n\u003ch3 id=\"full-update\"\u003efull update\u003c/h3\u003e\n\u003cp\u003eOne direct strategy to work on this, then, is to use whatever transition and rewards you observed to perform \u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e or \u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e. First go through one or a bunch of observations, then take a full \u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e or \u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e sweep, and then go back and take more measurements.\u003c/p\u003e\n\u003ch3 id=\"randomized-update\"\u003erandomized update\u003c/h3\u003e\n\u003cp\u003eWe randomly update a single state:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(s) \\leftarrow \\max_{a} \\qty[R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo; | s,a) U(s\u0026rsquo;)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand take another observation, update our model estimate, and move on.\u003c/p\u003e\n\u003ch3 id=\"prioritized-updates\"\u003eprioritized updates\u003c/h3\u003e\n\u003cp\u003eSay we are current updating a state \\(s\\), and there are two previous states that could transition into \\(s\\). First we create an estimate like before:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(s) \\leftarrow \\max_{a} \\qty[R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo; | s,a) U(s\u0026rsquo;)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe create a queue whose contents are ranked by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(s|s^{-}, a^{-}) \\times |U(s)-u(s)|\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(u(s)\\) is \\(U(s)\\) prior to the update.\u003c/p\u003e\n\u003cp\u003eWe move on to the next state to update by popping off the queue.\u003c/p\u003e\n\u003ch2 id=\"step-3-explore-a-little\"\u003eStep 3: Explore a Little\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhundirected_exploration/#epsilon-greedy-exploration-with-decay\"\u003eepsilon-greedy exploration with decay\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirected_exploration/#softmax-method\"\u003eSoftmax Method\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"r-max\"\u003eR-Max\u003c/h3\u003e\n\u003cp\u003eMost strategies above focuses on choosing a random action. This exploration focuses on adapting reward/transitions to explicitly explore new-state.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR(s,a) = \\begin{cases}\nr_{\\max}, if N(s,a) \u0026lt; m,\\\\\n\\rho\\frac{s,a}{N(s,a)}, otherwise\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou get a large reward \\(r_{\\max }\\) if you haven\u0026rsquo;t been to \\((s,a)\\), otherwise the reward you get gets discounted by the number of times you visited.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(s\u0026rsquo;|s,a) = \\begin{cases}\n1, if N(s,a) \u0026lt; m\\ and\\ s\u0026rsquo; = s \\\\\n0, if N(s,a) \u0026lt; m\\ and\\ s\u0026rsquo; \\neq s \\\\\n\\frac{N(s,a,s\u0026rsquo;)}{N(s,a)}, otherwise\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmodel_based_reinforcement_learning/","tags":null,"title":"model-based reinforcement learning"},{"categories":null,"contents":"In model-based reinforcement learning, we tried real hard to get \\(T\\) and \\(R\\). What if we just estimated \\(Q(s,a)\\) directly? model-free reinforcement learning tends to be quite slow, compared to model-based reinforcement learning methods.\nreview: estimating mean of a random variable we got \\(m\\) points \\(x^{(1 \\dots m)} \\in X\\) , what is the mean of \\(X\\)?\n\\begin{equation} \\hat{x_{m}} = \\frac{1}{m} \\sum_{i=1}^{m} x^{(i)} \\end{equation}\n\\begin{equation} \\hat{x}_{m} = \\hat{x}_{m-1} + \\frac{1}{m} (x^{(m)} - \\hat{x}_{m-1}) \\end{equation}\nevery time you get a new measurement \\(x^{(m)}\\). sometimes we don\u0026rsquo;t scale it by \\(\\frac{1}{m}\\), you can scale it with constant \\(\\alpha\\) which actually causes exponential decay of past samples (as it keeps getting scaled by \\(\\alpha\\)).\n\\begin{equation} \\hat{x} = \\hat{x} + \\alpha (x- \\hat{x}) \\end{equation}\nQ-Learning Let us review the action-value function:\n\\begin{equation} Q(s,a) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) U(s\u0026rsquo;) \\end{equation}\nthis is a model-free method, substituting in the definition of the value function:\n\\begin{equation} Q(s,a) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) \\max_{a} Q(s\u0026rsquo;, a\u0026rsquo;) \\end{equation}\nNote! The second half is know in the shape of an expectation (\u0026quot;probability times the value\u0026quot;). Recall also that \\(R(s,a)\\) is the expected reward \\(r\\) when taking \\(s,a\\).\nLet:\n\\begin{equation} r = \\mathbb{E}[R(s,a)] \\end{equation}\nSo we can say that:\n\\begin{equation} Q(s,a) = \\mathbb{E} \\qty[r + \\gamma \\max_{a\u0026rsquo;} Q(s\u0026rsquo;, a\u0026rsquo;)] \\end{equation}\nFinally, then, we can perform random variable mean estimation scheme given above; recall:\n\\begin{equation} \\hat{x} = \\hat{x} + \\alpha (x- \\hat{x}) \\end{equation}\nhence, we update our new mean with:\n\\begin{equation} Q(s,a) \\leftarrow Q(s,a) + \\alpha \\qty [(r + \\gamma \\max_{a\u0026rsquo;} Q(s\u0026rsquo;, a\u0026rsquo;)) - Q(s,a)] \\end{equation}\nSARSA SARSA is Q-Learning where you hope the model converges. You HAVE to perform some Exploration and Exploitation to try out other actions, and then you just update your function accordingly:\n\\begin{equation} Q(s,a) \\leftarrow Q(s,a) + \\alpha \\qty [(r + \\gamma Q(s\u0026rsquo;, a\u0026rsquo;)) - Q(s,a)] \\end{equation}\nthis works in theory because over time, good Exploration and Exploitation assumes that:\n\\begin{equation} a\u0026rsquo; \\rightarrow \\arg\\max_{a\u0026rsquo;} Q(s\u0026rsquo;,a\u0026rsquo;) \\end{equation}\nEligibility Traces Eligibility Traces is a change to SARSA which uses the number of visits as an additional constraint that allows updates to propagate each reward backwards given the list of states which caused that reward to be distributed.\nMeaning, let \\(\\lambda\\) be some decay parameter, we have:\n\\begin{equation} \\delta = r + \\gamma Q(s\u0026rsquo;,a\u0026rsquo;) - Q(s,a) \\end{equation}\nand, we can write:\n\\begin{equation} Q(s,a) \\leftarrow Q(s,a) + \\lambda \\delta N(s,a) \\end{equation}\nwhere by the visit counts are discounted such that:\n\\begin{equation} N(s,a) \\leftarrow \\gamma \\lambda N(s,a) \\end{equation}\nSee also Sarsa (Lambda).\nGeneralized Q-Learning with Gradient action-value Consider Value Function Approximation. We were trying to fit a set of \\(\\theta\\) at that time to find \\(U_{\\theta}\\) that matches \\(U^{*}\\).\nWe now want to compute some \\(Q_{\\theta}\\) in the same flavour:\n\\begin{equation} Q_{\\theta}(s,a) \\sim Q^{*}(s,a) \\end{equation}\nWe can measure the difference between these two values like so:\n\\begin{equation} \\ell(\\theta) = \\frac{1}{2}\\mathbb{E}_{(s,a)\\sim \\pi^{*}}\\qty[(Q^{*}(s,a) - Q_{\\theta}(s,a))^{2}] \\end{equation}\nWe want to write this expected value distributed over \\(s,a\\) of the optimal policy because we want to calculate more samples over those states that the optimal policy ends up at most.\nTo optimize \\(Q_{\\theta}\\), then, you betcha know what\u0026rsquo;s happenin:\n\\begin{align} \\nabla \\ell \u0026amp;= \\nabla \\frac{1}{2} \\nabla \\mathbb{E}_{(s,a) \\sim \\pi^{*}} \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a))^{2}] \\\\ \u0026amp;= \\mathbb{E}_{(s,a) \\sim \\pi^{*}} \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a)) (-1)\\nabla Q_{\\theta}(s,a)] \\\\ \u0026amp;= -\\mathbb{E}_{(s,a) \\sim \\pi^{*}} \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a)) \\nabla Q_{\\theta}(s,a)] \\end{align}\nby a healthy dose of the chain rule.\nNow, to minimize this loss, we go in the direction opposite the gradient. The negatives then cancel out to give us:\n\\begin{equation} \\theta \\leftarrow \\theta + \\alpha \\qty[\\mathbb{E}_{(s,a) \\sim \\pi^{*}} \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a)) \\nabla Q_{\\theta}(s,a)] ] \\end{equation}\nwhere \\(\\alpha \\in (0,1)\\).\nSimilar to the SARSA assumption, good Exploration and Exploitation assumes that:\n\\begin{equation} Q \\to Q^{*} \\end{equation}\nso we can drop our expectation with:\n\\begin{equation} \\theta \\leftarrow \\theta + \\alpha \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a)) \\nabla Q_{\\theta}(s,a)] \\end{equation}\nNow, we can make one more assumption, the assumption from Q-Learning:\n\\begin{equation} Q^{*}(s,a) \\approx r_{s} + \\gamma \\max_{a\u0026rsquo;} Q_{\\theta}(s\u0026rsquo;,a\u0026rsquo;) \\end{equation}\nthat taking the best actions with the \\(Q\\) you have will slowly approximate optimal \\(Q\\).\n\\begin{equation} \\theta \\leftarrow \\theta + \\alpha \\qty[\\qty((r_{s} + \\gamma \\max_{a\u0026rsquo;} Q_{\\theta}(s\u0026rsquo;,a\u0026rsquo;))-Q_{\\theta}(s,a)) \\nabla Q_{\\theta}(s,a)] \\end{equation}\nyou will note! this is actually just Q-Learning multiplying with a gradient.\nPolicy Gradient see also Policy Gradient\n","html":"\u003cp\u003eIn \u003ca href=\"/posts/kbhmodel_based_reinforcement_learning/\"\u003emodel-based reinforcement learning\u003c/a\u003e, we tried real hard to get \\(T\\) and \\(R\\). What if we just estimated \\(Q(s,a)\\) directly? \u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/\"\u003emodel-free reinforcement learning\u003c/a\u003e tends to be quite slow, compared to \u003ca href=\"/posts/kbhmodel_based_reinforcement_learning/\"\u003emodel-based reinforcement learning\u003c/a\u003e methods.\u003c/p\u003e\n\u003ch2 id=\"review-estimating-mean-of-a-random-variable--kbhrandom-variables-dot-md\"\u003ereview: estimating mean of a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003ewe got \\(m\\) points \\(x^{(1 \\dots m)} \\in X\\) , what is the mean of \\(X\\)?\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{x_{m}} = \\frac{1}{m} \\sum_{i=1}^{m} x^{(i)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{x}_{m} = \\hat{x}_{m-1} + \\frac{1}{m} (x^{(m)} - \\hat{x}_{m-1})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eevery time you get a new measurement \\(x^{(m)}\\). sometimes we don\u0026rsquo;t scale it by \\(\\frac{1}{m}\\), you can scale it with constant \\(\\alpha\\) which actually causes exponential decay of past samples (as it keeps getting scaled by \\(\\alpha\\)).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{x} = \\hat{x} + \\alpha (x- \\hat{x})\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"q-learning\"\u003eQ-Learning\u003c/h2\u003e\n\u003cp\u003eLet us review the \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value function\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(s,a) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) U(s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is a model-free method, substituting in the definition of the \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(s,a) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) \\max_{a} Q(s\u0026rsquo;, a\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNote! The second half is know in the shape of an \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e (\u0026quot;\u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e times the value\u0026quot;). Recall also that \\(R(s,a)\\) is the expected reward \\(r\\) when taking \\(s,a\\).\u003c/p\u003e\n\u003cp\u003eLet:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr = \\mathbb{E}[R(s,a)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo we can say that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(s,a) = \\mathbb{E} \\qty[r + \\gamma \\max_{a\u0026rsquo;} Q(s\u0026rsquo;, a\u0026rsquo;)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, then, we can perform \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e mean estimation scheme given above; recall:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{x} = \\hat{x} + \\alpha (x- \\hat{x})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ehence, we update our new mean with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(s,a) \\leftarrow Q(s,a) + \\alpha \\qty [(r + \\gamma \\max_{a\u0026rsquo;} Q(s\u0026rsquo;, a\u0026rsquo;)) - Q(s,a)]\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"sarsa\"\u003eSARSA\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#sarsa\"\u003eSARSA\u003c/a\u003e is \u003ca href=\"#q-learning\"\u003eQ-Learning\u003c/a\u003e where you hope the model converges. You HAVE to perform some \u003ca href=\"/posts/kbhexploration_and_exploitation/\"\u003eExploration and Exploitation\u003c/a\u003e to try out other actions, and then you just update your function accordingly:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(s,a) \\leftarrow Q(s,a) + \\alpha \\qty [(r + \\gamma Q(s\u0026rsquo;, a\u0026rsquo;)) - Q(s,a)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis works in theory because over time, good \u003ca href=\"/posts/kbhexploration_and_exploitation/\"\u003eExploration and Exploitation\u003c/a\u003e assumes that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na\u0026rsquo; \\rightarrow \\arg\\max_{a\u0026rsquo;} Q(s\u0026rsquo;,a\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"eligibility-traces\"\u003eEligibility Traces\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#eligibility-traces\"\u003eEligibility Traces\u003c/a\u003e is a change to \u003ca href=\"#sarsa\"\u003eSARSA\u003c/a\u003e which uses the number of visits as an additional constraint that allows updates to propagate each reward backwards given the list of states which caused that reward to be distributed.\u003c/p\u003e\n\u003cp\u003eMeaning, let \\(\\lambda\\) be some decay parameter, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\delta = r + \\gamma Q(s\u0026rsquo;,a\u0026rsquo;) - Q(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand, we can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(s,a) \\leftarrow Q(s,a) + \\lambda \\delta N(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere by the visit counts are discounted such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nN(s,a) \\leftarrow \\gamma \\lambda N(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhsarsa_lambda/\"\u003eSarsa (Lambda)\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"generalized-q-learning--orgfb02fd1--with-gradient-action-value--kbhaction-value-function-dot-md\"\u003eGeneralized \u003ca href=\"#q-learning\"\u003eQ-Learning\u003c/a\u003e with Gradient \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eConsider \u003ca href=\"/posts/kbhapproximate_value_function/\"\u003eValue Function Approximation\u003c/a\u003e. We were trying to fit a set of \\(\\theta\\) at that time to find \\(U_{\\theta}\\) that matches \\(U^{*}\\).\u003c/p\u003e\n\u003cp\u003eWe now want to compute some \\(Q_{\\theta}\\) in the same flavour:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ_{\\theta}(s,a) \\sim Q^{*}(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can measure the difference between these two values like so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ell(\\theta) = \\frac{1}{2}\\mathbb{E}_{(s,a)\\sim \\pi^{*}}\\qty[(Q^{*}(s,a) - Q_{\\theta}(s,a))^{2}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe want to write this expected value distributed over \\(s,a\\) of the \u003cstrong\u003eoptimal\u003c/strong\u003e policy because we want to calculate more samples over those states that the optimal policy ends up at most.\u003c/p\u003e\n\u003cp\u003eTo optimize \\(Q_{\\theta}\\), then, you betcha know what\u0026rsquo;s happenin:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\nabla \\ell \u0026amp;= \\nabla \\frac{1}{2} \\nabla \\mathbb{E}_{(s,a) \\sim \\pi^{*}} \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a))^{2}] \\\\\n\u0026amp;= \\mathbb{E}_{(s,a) \\sim \\pi^{*}} \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a)) (-1)\\nabla Q_{\\theta}(s,a)] \\\\\n\u0026amp;= -\\mathbb{E}_{(s,a) \\sim \\pi^{*}} \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a)) \\nabla Q_{\\theta}(s,a)]\n\\end{align}\u003c/p\u003e\n\u003cp\u003eby a healthy dose of the chain rule.\u003c/p\u003e\n\u003cp\u003eNow, to minimize this loss, we go in the direction opposite the gradient. The negatives then cancel out to give us:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta \\leftarrow \\theta + \\alpha \\qty[\\mathbb{E}_{(s,a) \\sim \\pi^{*}} \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a)) \\nabla Q_{\\theta}(s,a)] ]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\alpha \\in (0,1)\\).\u003c/p\u003e\n\u003cp\u003eSimilar to the \u003ca href=\"#sarsa\"\u003eSARSA\u003c/a\u003e assumption, good \u003ca href=\"/posts/kbhexploration_and_exploitation/\"\u003eExploration and Exploitation\u003c/a\u003e assumes that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ \\to Q^{*}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso we can drop our expectation with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta \\leftarrow \\theta + \\alpha \\qty[(Q^{*}(s,a)-Q_{\\theta}(s,a)) \\nabla Q_{\\theta}(s,a)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, we can make one more assumption, the assumption from \u003ca href=\"#q-learning\"\u003eQ-Learning\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ^{*}(s,a) \\approx r_{s} + \\gamma \\max_{a\u0026rsquo;} Q_{\\theta}(s\u0026rsquo;,a\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat taking the best actions with the \\(Q\\) you have will slowly approximate optimal \\(Q\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta \\leftarrow \\theta + \\alpha \\qty[\\qty((r_{s} + \\gamma \\max_{a\u0026rsquo;} Q_{\\theta}(s\u0026rsquo;,a\u0026rsquo;))-Q_{\\theta}(s,a)) \\nabla Q_{\\theta}(s,a)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou will note! this is actually just \u003ca href=\"#q-learning\"\u003eQ-Learning\u003c/a\u003e multiplying with a gradient.\u003c/p\u003e\n\u003ch2 id=\"policy-gradient--kbhpolicy-gradient-dot-md\"\u003e\u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003esee also \u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmodel_free_reinforcement_learning/","tags":null,"title":"model-free reinforcement learning"},{"categories":null,"contents":"Here are the main steps of generic modeling.\n","html":"\u003cp\u003eHere are the main steps of generic \u003ca href=\"/posts/kbhmodeling/\"\u003emodeling\u003c/a\u003e.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-13_15-44-50_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhmodeling/","tags":null,"title":"modeling"},{"categories":null,"contents":"multi-core CPUs Finally, actually multitasking: starting in mid 2000s, multiple cores are finally more common. management between cores is crucial\nMoors Law Break Down we have reached much of the limits of the speed of a single core instead, we have to have more cores\u0026mdash;which requires more management to take advantage of More kinds of Cores \u0026ldquo;performance\u0026rdquo; vs \u0026ldquo;efficiency\u0026rdquo; cores needs to schedule for different tasks: not just who on what core, but who on what TYPE of core Other Hardware Specialized hardware in these chips, which is required for scheduling.\nGPU In change of graphics and some ML applications\nNPU/TPU Machine learning specialization.\nScheduling Multi-Core CPUs Most Basic Idea share ready queue shared across cores lock to sync access to the ready queue one dispatcher separate interrupts for each core Run \\(k\\) highest priority threads on the \\(k\\) cores.\nIssue need to figure out what is the priority of each core run if we want preemption, so its an \\(O(n)\\) check for free + priority the shared ready queue needs to be locked, so as core increases they need to be synchronized which causes slowdown One Ready Queue per Core Big problems:\nwhere do we put a given thread? moving core between threads is expensive Big tension: Work Stealing and Core Affinity\nWork Stealing If one core is free (even if there is things in the ready queue), check other cores\u0026rsquo; ready queues and try to do thread communism.\nCore Affinity Ideally, because moving threads between cores is expensive (need to rebuild cache), we keep each thread running on the same core.\nGang Scheduling When you have a thread you are trying to schedule, try to see if there are other threads from the same process in the ready queue and schedule all of them on various cores.\nLocking Multi-Core CPUs Main problem: disable interrupts doesn\u0026rsquo;t stop race conditions.\nSo we turn to busy waiting with a hardware atomic operation exchange, which reads, returns, and swaps the value of some memory in a single atomic operation AND which is never ran in parallel; it returns the previous value of the memory before it was set:\nclass Lock { std::automic\u0026lt;int\u0026gt; sync(0); } void Lock::lock() { while (sync.exchange(1)) {} // we are now the only one using it // do work .... sync = 0; } The exchange function returns the old value.\nThe busy waiting here isn\u0026rsquo;t too bad, because you only need to busy wait for the lock itself to be locked, and then the lock will handle sync from there.\nFlash Storage They are faster:\nno moving parts (no spinny) smaller, faster, lots of data mobile devices especially Typically, we fix these quirky issues with the Flash Translation Layer (FTL), which provides block, sector, and read/write interfaces like spinning harddrives without the OS noticing.\nMinimizing seeks isn\u0026rsquo;t too necessary now, but, writing SSD is very weird:\nwriting You have two operation.\nerase You can set ALL SEGMENT of an \u0026ldquo;erase unit\u0026rdquo; to \\(1\\)\n\u0026ldquo;erase unit\u0026rdquo; size is usually 256k\nwrite You can modify one \u0026ldquo;page\u0026rdquo; at a time (which is smaller than a erase unit)\u0026mdash;but you can ONLY set individual bits in the page into 0 (and not 1, which you have to do by erasing larger erasing chunks).\n\u0026ldquo;page\u0026rdquo; size is usually 512 bytes or 4k bytes\nwear-out wear leveling: make sure that the drive wears out at roughly the same rate as other parts of the drive. Moving commonly written data (\u0026ldquo;hot\u0026rdquo; data) around\nFTL limitations no hardware access (can\u0026rsquo;t optimize around flash storage) sacrifices performances for performance wasts capacity (to look like hard drive) many layers ","html":"\u003ch2 id=\"multi-core-cpus\"\u003emulti-core CPUs\u003c/h2\u003e\n\u003cp\u003eFinally, actually multitasking: starting in mid 2000s, multiple cores are finally more common. \u003cstrong\u003emanagement between cores is crucial\u003c/strong\u003e\u003c/p\u003e\n\u003ch3 id=\"moors-law-break-down\"\u003eMoors Law Break Down\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ewe have reached much of the limits of the speed of a single core\u003c/li\u003e\n\u003cli\u003einstead, we have to have more cores\u0026mdash;which requires more management to take advantage of\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"more-kinds-of-cores\"\u003eMore kinds of Cores\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;performance\u0026rdquo; vs \u0026ldquo;efficiency\u0026rdquo; cores\u003c/li\u003e\n\u003cli\u003eneeds to schedule for different tasks: not just who on what core, but who on what TYPE of core\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"other-hardware\"\u003eOther Hardware\u003c/h2\u003e\n\u003cp\u003eSpecialized hardware in these chips, which is required for scheduling.\u003c/p\u003e\n\u003ch3 id=\"gpu\"\u003eGPU\u003c/h3\u003e\n\u003cp\u003eIn change of graphics and some ML applications\u003c/p\u003e\n\u003ch3 id=\"npu-tpu\"\u003eNPU/TPU\u003c/h3\u003e\n\u003cp\u003eMachine learning specialization.\u003c/p\u003e\n\u003ch2 id=\"scheduling-multi-core-cpus\"\u003eScheduling Multi-Core CPUs\u003c/h2\u003e\n\u003ch3 id=\"most-basic-idea\"\u003eMost Basic Idea\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eshare ready queue shared across cores\u003c/li\u003e\n\u003cli\u003elock to sync access to the ready queue\u003c/li\u003e\n\u003cli\u003eone dispatcher\u003c/li\u003e\n\u003cli\u003eseparate interrupts for each core\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eRun \\(k\\) highest priority threads on the \\(k\\) cores.\u003c/p\u003e\n\u003ch4 id=\"issue\"\u003eIssue\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eneed to figure out what is the priority of each core run if we want preemption, so its an \\(O(n)\\) check for free + priority\u003c/li\u003e\n\u003cli\u003ethe shared ready queue needs to be locked, so as core increases they need to be synchronized which causes slowdown\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"one-ready-queue-per-core\"\u003eOne Ready Queue per Core\u003c/h3\u003e\n\u003cp\u003eBig problems:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewhere do we put a given thread?\u003c/li\u003e\n\u003cli\u003emoving core between threads is expensive\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eBig tension: \u003cstrong\u003e\u003ca href=\"#work-stealing\"\u003eWork Stealing\u003c/a\u003e\u003c/strong\u003e and \u003cstrong\u003e\u003ca href=\"#core-affinity\"\u003eCore Affinity\u003c/a\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003ch4 id=\"work-stealing\"\u003eWork Stealing\u003c/h4\u003e\n\u003cp\u003eIf one core is free (even if there is things in the ready queue), check other cores\u0026rsquo; ready queues and try to do thread communism.\u003c/p\u003e\n\u003ch4 id=\"core-affinity\"\u003eCore Affinity\u003c/h4\u003e\n\u003cp\u003eIdeally, because moving threads between cores is expensive (need to rebuild cache), we keep each thread running on the same core.\u003c/p\u003e\n\u003ch3 id=\"gang-scheduling\"\u003eGang Scheduling\u003c/h3\u003e\n\u003cp\u003eWhen you have a thread you are trying to schedule, try to see if there are other threads from the same process in the ready queue and schedule all of them on various cores.\u003c/p\u003e\n\u003ch2 id=\"locking-multi-core-cpus\"\u003eLocking Multi-Core CPUs\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eMain problem\u003c/strong\u003e: disable interrupts doesn\u0026rsquo;t stop race conditions.\u003c/p\u003e\n\u003cp\u003eSo we turn to \u003ca href=\"/posts/kbhpermits_model/\"\u003ebusy waiting\u003c/a\u003e with a hardware atomic operation \u003ccode\u003eexchange\u003c/code\u003e, which reads, returns, and swaps the value of some memory in a single atomic operation AND which is never ran in parallel; it returns the previous value of the memory before it was set:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eclass\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eLock\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003estd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e::\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eautomic\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esync\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLock\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e::\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esync\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexchange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// we are now the only one using it\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// do work ....\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esync\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThe exchange function returns the old value.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhpermits_model/\"\u003ebusy waiting\u003c/a\u003e here isn\u0026rsquo;t too bad, because you only need to busy wait for the lock itself to be locked, and then the lock will handle sync from there.\u003c/p\u003e\n\u003ch2 id=\"flash-storage\"\u003eFlash Storage\u003c/h2\u003e\n\u003cp\u003eThey are faster:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eno moving parts (no spinny)\u003c/li\u003e\n\u003cli\u003esmaller, faster, lots of data\u003c/li\u003e\n\u003cli\u003emobile devices especially\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTypically, we fix these quirky issues with the \u003ca href=\"#flash-storage\"\u003eFlash Translation Layer\u003c/a\u003e (\u003ca href=\"#flash-storage\"\u003eFTL\u003c/a\u003e), which provides block, sector, and read/write interfaces like spinning harddrives without the OS noticing.\u003c/p\u003e\n\u003cp\u003eMinimizing seeks isn\u0026rsquo;t too necessary now, but, writing SSD is very weird:\u003c/p\u003e\n\u003ch3 id=\"writing\"\u003ewriting\u003c/h3\u003e\n\u003cp\u003eYou have two operation.\u003c/p\u003e\n\u003ch4 id=\"erase\"\u003eerase\u003c/h4\u003e\n\u003cp\u003eYou can set \u003cstrong\u003eALL SEGMENT\u003c/strong\u003e of an \u0026ldquo;erase unit\u0026rdquo; to \\(1\\)\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;erase unit\u0026rdquo; size is usually 256k\u003c/p\u003e\n\u003ch4 id=\"write\"\u003ewrite\u003c/h4\u003e\n\u003cp\u003eYou can modify one \u0026ldquo;page\u0026rdquo; at a time (which is smaller than a erase unit)\u0026mdash;but you can ONLY set individual bits in the page into 0 (and not 1, which you have to do by erasing larger erasing chunks).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;page\u0026rdquo; size is usually 512 bytes or 4k bytes\u003c/p\u003e\n\u003ch3 id=\"wear-out\"\u003ewear-out\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003ewear leveling\u003c/strong\u003e: make sure that the drive wears out at roughly the same rate as other parts of the drive. Moving commonly written data (\u0026ldquo;hot\u0026rdquo; data) around\u003c/p\u003e\n\u003ch3 id=\"ftl--org19007d5--limitations\"\u003e\u003ca href=\"#flash-storage\"\u003eFTL\u003c/a\u003e limitations\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eno hardware access (can\u0026rsquo;t optimize around flash storage)\u003c/li\u003e\n\u003cli\u003esacrifices performances for performance\u003c/li\u003e\n\u003cli\u003ewasts capacity (to look like hard drive)\u003c/li\u003e\n\u003cli\u003emany layers\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmodern_os/","tags":null,"title":"modern OS"},{"categories":null,"contents":"Clock math.\nWe say that \\(a\\ \\text{mod}\\ b = r\\) if \\(a=bq+r\\), such that \\(b\u0026gt;0\\) and \\(0 \\leq r \u0026lt;b\\). More specifically, we denote:\n\\begin{equation} a \\equiv a\u0026rsquo;\\ \\text{mod}\\ b \\end{equation}\nif \\(b|(a-a\u0026rsquo;)\\).\nadditional information basic modular arithmetic operations \\begin{align} (a+b)\\ \\text{mod}\\ c \u0026amp;= ((a\\ \\text{mod}\\ c) + (b\\ \\text{mod}\\ c))\\ \\text{mod}\\ c \\\\ (ab) \\ \\text{mod}\\ c \u0026amp;= ((a\\ \\text{mod}\\ c) (b \\ \\text{mod}\\ c)) \\ \\text{mod}\\ c \\end{align}\nexamples of modular arithmetic If \\(a\\ \\text{mod}\\ b = r\\), \\((-a)\\ \\text{mod}\\ b = -r = b-r\\)\n\\(2^{2}\\equiv 4 \\equiv -1 \\ \\text{mod}\\ 5\\), \\(2^{4}\\equiv 1\\ \\text{mod}\\ 5\\)\nUSPS\u0026rsquo;s check digit is \\(a\\ \\text{mod}\\ 9\\) because you can just add all the digits up Let \\(a \\in \\mathbb{Z}\\). Let \\(s\\) be the sum of all the digits in \\(a\\). \\(a \\ \\text{mod}\\ 9 = s \\ \\text{mod}\\ 9\\). Why? Not a very satisfying answer, but because \\(9\\) is \\(10-1\\), so for each \\(n \\times 10^{k}\\ \\text{mod}\\ 9\\) is always \\(-n\\) smaller. like how \\(10 = 9+1\\), \\(20 = 2 \\times 9+2\\), etc.\nsubgroups Recall the real numbers: \\(\\dots, -2, -1, 0, 1, 2, 3, \\dots\\)\nThat\u0026rsquo;s so many numbers! Instead, let\u0026rsquo;s create a circle of these values. For instance, what if you only want \\(5\\):\n\\begin{equation} \\mathbb{Z}_{5} = \\{0,1,2,3,4\\} \\end{equation}\nThis is a group under addition.\nhumph: similarity between this and affine subsets \\(u/U = v/U\\) if \\(u-v \\in U\\) \\(u \\equiv v\\ \\text{mod}\\ b\\) if \\(b|u-v\\) Chinese Remainder Theorem Suppose \\(a,b \\in \\mathbb{Z}\\), and \\(m,n \\in \\mathbb{N}\\), such that \\(\\gcd (m,n) = 1\\) (that is, suppose \\(m,n\\) is coprime). There is some \\(x \\in \\mathbb{Z}\\) such that:\n\\begin{equation} x \\equiv a \\ \\text{mod}\\ m, x \\equiv b\\ \\text{mod}\\ n \\end{equation}\nFurthermore, and importantly, \\(x\\ \\text{mod}\\ (mn)\\) is unique.\n","html":"\u003cp\u003eClock math.\u003c/p\u003e\n\u003cp\u003eWe say that \\(a\\ \\text{mod}\\ b = r\\) if \\(a=bq+r\\), such that \\(b\u0026gt;0\\) and \\(0 \\leq r \u0026lt;b\\). More specifically, we denote:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na \\equiv a\u0026rsquo;\\ \\text{mod}\\ b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif \\(b|(a-a\u0026rsquo;)\\).\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"basic-modular-arithmetic--kbhmodular-arithmetic-dot-md--operations\"\u003ebasic \u003ca href=\"/posts/kbhmodular_arithmetic/\"\u003emodular arithmetic\u003c/a\u003e operations\u003c/h3\u003e\n\u003cp\u003e\\begin{align}\n(a+b)\\ \\text{mod}\\ c \u0026amp;= ((a\\ \\text{mod}\\ c) + (b\\ \\text{mod}\\ c))\\ \\text{mod}\\ c \\\\\n(ab) \\ \\text{mod}\\ c \u0026amp;= ((a\\ \\text{mod}\\ c) (b \\ \\text{mod}\\ c)) \\ \\text{mod}\\ c\n\\end{align}\u003c/p\u003e\n\u003ch3 id=\"examples-of-modular-arithmetic\"\u003eexamples of modular arithmetic\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eIf \\(a\\ \\text{mod}\\ b = r\\), \\((-a)\\ \\text{mod}\\ b = -r = b-r\\)\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003e\\(2^{2}\\equiv 4 \\equiv -1 \\ \\text{mod}\\ 5\\), \\(2^{4}\\equiv 1\\ \\text{mod}\\ 5\\)\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eUSPS\u0026rsquo;s check digit is \\(a\\ \\text{mod}\\ 9\\) because you can just add all the digits up\u003c/strong\u003e\u003c/strong\u003e\nLet \\(a \\in \\mathbb{Z}\\). Let \\(s\\) be the sum of all the digits in \\(a\\). \\(a \\ \\text{mod}\\ 9 = s \\ \\text{mod}\\ 9\\). Why? Not a very satisfying answer, but because \\(9\\) is \\(10-1\\), so for each \\(n \\times 10^{k}\\ \\text{mod}\\ 9\\) is always \\(-n\\) smaller. like how \\(10 = 9+1\\), \\(20 = 2 \\times 9+2\\), etc.\u003c/p\u003e\n\u003ch3 id=\"subgroups--kbhsubgroup-dot-md\"\u003e\u003ca href=\"/posts/kbhsubgroup/\"\u003esubgroups\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eRecall the real numbers: \\(\\dots, -2, -1, 0, 1, 2, 3, \\dots\\)\u003c/p\u003e\n\u003cp\u003eThat\u0026rsquo;s so many numbers! Instead, let\u0026rsquo;s create a \u003cem\u003ecircle\u003c/em\u003e of these values. For instance, what if you only want \\(5\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{Z}_{5} = \\{0,1,2,3,4\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e under addition.\u003c/p\u003e\n\u003ch3 id=\"humph-similarity-between-this-and-affine-subsets--kbhparallel-linear-algebra-dot-md\"\u003ehumph: similarity between this and \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subsets\u003c/a\u003e\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\\(u/U = v/U\\) if \\(u-v \\in U\\)\u003c/li\u003e\n\u003cli\u003e\\(u \\equiv v\\ \\text{mod}\\ b\\) if \\(b|u-v\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"chinese-remainder-theorem\"\u003eChinese Remainder Theorem\u003c/h3\u003e\n\u003cp\u003eSuppose \\(a,b \\in \\mathbb{Z}\\), and \\(m,n \\in \\mathbb{N}\\), such that \\(\\gcd (m,n) = 1\\) (that is, suppose \\(m,n\\) is \u003ca href=\"/posts/kbhprime/#coprime\"\u003ecoprime\u003c/a\u003e). There is some \\(x \\in \\mathbb{Z}\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx \\equiv a \\ \\text{mod}\\ m, x \\equiv b\\ \\text{mod}\\ n\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFurthermore, and importantly, \\(x\\ \\text{mod}\\ (mn)\\) is unique.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmodular_arithmetic/","tags":null,"title":"modular arithmetic"},{"categories":null,"contents":"goal: Drug Resistance could be more hampered by developing drugs that actually fit in the sub-strait envelope (i.e. if a virus develops a change to the drugged area, it should also stop working)\ntakeaway: to design inhibitors, it sticking out (\u0026ldquo;protrusion\u0026rdquo;) of the substrate envelope causes easy areas of mutation that will confer Drug Resistance, therefore, design drugs that try to stay within substrate envelope to ensure a higher degree of imperviousness to mutation (i.e. if the envelope changes well the virus is going to not do its job either)\nMessing with HIV mutations outside the active site (in primary backbone structure) actually caused a large increase in resistance (4 points outside of backbone structure) use linreg and other ML methods to take the type of mutation change (hydrogen? binding? etc.) and to find features most important to confer resistance Messing with COVID MPro COVID has many mutations on the aligned sequences to figure conserved interactions paxlovid nemdesirvir binds strongly to E166, which\u0026mdash;though conserved\u0026mdash;still could be resistant to resistance ","html":"\u003cp\u003egoal: \u003ca href=\"/posts/kbhdrug_resistance/\"\u003eDrug Resistance\u003c/a\u003e could be more hampered by developing drugs that actually fit in the sub-strait envelope (i.e. if a virus develops a change to the drugged area, it should also stop working)\u003c/p\u003e\n\u003cp\u003etakeaway: to design inhibitors, it sticking out (\u0026ldquo;protrusion\u0026rdquo;) of the \u003ca href=\"/posts/kbhsubtrait_envelope/\"\u003esubstrate envelope\u003c/a\u003e causes easy areas of mutation that will confer \u003ca href=\"/posts/kbhdrug_resistance/\"\u003eDrug Resistance\u003c/a\u003e, therefore, design drugs that try to stay within \u003ca href=\"/posts/kbhsubtrait_envelope/\"\u003esubstrate envelope\u003c/a\u003e to ensure a higher degree of imperviousness to mutation (i.e. if the envelope changes well the virus is going to not do its job either)\u003c/p\u003e\n\u003ch2 id=\"messing-with-hiv\"\u003eMessing with HIV\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003emutations outside the active site (in primary backbone structure) actually caused a large increase in resistance (4 points outside of backbone structure)\u003c/li\u003e\n\u003cli\u003euse linreg and other ML methods to take the \u003cem\u003etype\u003c/em\u003e of mutation change (hydrogen? binding? etc.) and to find features most important to confer resistance\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"messing-with-covid-mpro\"\u003eMessing with COVID MPro\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eCOVID has many mutations on the\u003c/li\u003e\n\u003cli\u003ealigned sequences to figure conserved interactions\u003c/li\u003e\n\u003cli\u003epaxlovid nemdesirvir binds strongly to E166, which\u0026mdash;though conserved\u0026mdash;still could be resistant to resistance\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmolecular_drug_resistance/","tags":null,"title":"Molecular Analysis of Drug Resistance"},{"categories":null,"contents":"MOMDP are POMDPs where some parts of the state are fully observable.\nMotivation scaling up POMDPs is really hard: exponential curse of dimensionality. Even discretization will cause the number of beliefs to really blow up.\nSome of the state isn\u0026rsquo;t uncertain, some others are bounded uncertainty: this REDUCES scale a lot.\nSolving Solving the algorithm uses SARSOP, or any point-based system. Instead of sampling the full belief state, however, we sample from a tuple \\((x, b_{y})\\), whereby \\(x\\) is the observable part and \\(b_{y}\\) is the unobservable part.\nHow Exactly Tuple? True Mixed Observability Go about splitting about your space based on the true observability part. Say there are \\(10\\) states which are observable, you literally just initialize 10 sets of alpha vectors to create \\(V_{1} \u0026hellip; V_{10}\\) for your observable states, where each one you have:\n\\begin{equation} V_{x_{i}}(b_{j}) = \\dots \\end{equation}\nwhereby all of your objectives and backup, etc., takes \\(x\\) your observable state as input. Then, during inference/backup looking at where you are in the observable part and use the value function from that part.\nPseudo-Full Observability Train a fully observable model, and then use belief-weighted average during inference. This is where QMDP came from.\nBounded Uncertainty Most of the time neither of the top two cases apply cleanly. Instead, most frequently your uncertainty in your observation is bounded by a significant degree.\nCondition For instance, your GPS maybe uncertain, but if it says you are in Kansas you are not in Shanghai. Formally, for \\(h: O \\to S\\) (the hypothetical \u0026ldquo;preimage\u0026rdquo; of any observation), we expect that:\n\\begin{equation} \\frac{h(o)}{S} = c \\end{equation}\ngives \\(c \\ll 1\\).\nSolution If we know we have Bounded Uncertainty, we can reparameterize our POMDP to an MDP over observations (we call this \\(X\\)) plus a POMDP modeling uncertainty in offsets from those observations (we call this \\(Y\\)).\nWhereby:\n\\begin{equation} \\begin{cases} T_{x}(x\u0026rsquo;|x,y,a) = \\sum_{s\u0026rsquo; \\in S} T(s\u0026rsquo; | (x,y),a) O(x\u0026rsquo;|s\u0026rsquo;,a) \\\\ T_{y}(y\u0026rsquo;|x,x\u0026rsquo;,y,a) = \\frac{T((x\u0026rsquo;,y\u0026rsquo;) | (x,y),a) O((x\u0026rsquo;,y\u0026rsquo;)|s\u0026rsquo;,a)}{T_{x}(x\u0026rsquo;|x,y,a)} \\end{cases} \\end{equation}\nwhere our state space is now split into \\(s \\in S = X \\times Y\\) s.t. \\(s=(x,y)\\).\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmomdp/\"\u003eMOMDP\u003c/a\u003e are \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es where some parts of the state are fully observable.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003escaling up \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es is \u003cstrong\u003e\u003cstrong\u003ereally hard\u003c/strong\u003e\u003c/strong\u003e: exponential \u003ca href=\"/posts/kbhcurse_of_dimensionality/\"\u003ecurse of dimensionality\u003c/a\u003e. Even discretization will cause the number of beliefs to really blow up.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eSome of the state isn\u0026rsquo;t uncertain, some others are bounded uncertainty: this REDUCES scale a lot.\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"solving\"\u003eSolving\u003c/h2\u003e\n\u003cp\u003eSolving the algorithm uses \u003ca href=\"/posts/kbhsarsop/\"\u003eSARSOP\u003c/a\u003e, or any point-based system. Instead of sampling the full belief state, however, we sample from a tuple \\((x, b_{y})\\), whereby \\(x\\) is the observable part and \\(b_{y}\\) is the unobservable part.\u003c/p\u003e\n\u003ch2 id=\"how-exactly-tuple\"\u003eHow Exactly Tuple?\u003c/h2\u003e\n\u003ch3 id=\"true-mixed-observability\"\u003eTrue Mixed Observability\u003c/h3\u003e\n\u003cp\u003eGo about splitting about your space based on the true observability part. Say there are \\(10\\) states which are observable, you literally just initialize 10 sets of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es to create \\(V_{1} \u0026hellip; V_{10}\\) for your observable states, where each one you have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV_{x_{i}}(b_{j}) = \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby all of your objectives and backup, etc., takes \\(x\\) your observable state as input. Then, during inference/backup looking at where you are in the observable part and use the value function from that part.\u003c/p\u003e\n\u003ch3 id=\"pseudo-full-observability\"\u003ePseudo-Full Observability\u003c/h3\u003e\n\u003cp\u003eTrain a fully observable model, and then use \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e-weighted average during inference. This is where \u003ca href=\"/posts/kbhqmdp/\"\u003eQMDP\u003c/a\u003e came from.\u003c/p\u003e\n\u003ch3 id=\"bounded-uncertainty\"\u003eBounded Uncertainty\u003c/h3\u003e\n\u003cp\u003eMost of the time neither of the top two cases apply cleanly. Instead, most frequently your uncertainty in your observation is \u003cem\u003ebounded\u003c/em\u003e by a significant degree.\u003c/p\u003e\n\u003ch4 id=\"condition\"\u003eCondition\u003c/h4\u003e\n\u003cp\u003eFor instance, your GPS maybe uncertain, but if it says you are in Kansas you are not in Shanghai. Formally, for \\(h: O \\to S\\) (the hypothetical \u0026ldquo;preimage\u0026rdquo; of any observation), we expect that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{h(o)}{S} = c\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egives \\(c \\ll 1\\).\u003c/p\u003e\n\u003ch4 id=\"solution\"\u003eSolution\u003c/h4\u003e\n\u003cp\u003eIf we know we have \u003ca href=\"#bounded-uncertainty\"\u003eBounded Uncertainty\u003c/a\u003e, we can reparameterize our \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e to an \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003e over observations (we call this \\(X\\)) plus a \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e modeling \u003ca href=\"/posts/kbhuncertainty/\"\u003euncertainty\u003c/a\u003e in offsets from those observations (we call this \\(Y\\)).\u003c/p\u003e\n\u003cp\u003eWhereby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nT_{x}(x\u0026rsquo;|x,y,a) = \\sum_{s\u0026rsquo; \\in S} T(s\u0026rsquo; | (x,y),a) O(x\u0026rsquo;|s\u0026rsquo;,a) \\\\\nT_{y}(y\u0026rsquo;|x,x\u0026rsquo;,y,a) = \\frac{T((x\u0026rsquo;,y\u0026rsquo;) | (x,y),a) O((x\u0026rsquo;,y\u0026rsquo;)|s\u0026rsquo;,a)}{T_{x}(x\u0026rsquo;|x,y,a)}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere our state space is now split into \\(s \\in S = X \\times Y\\) s.t. \\(s=(x,y)\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmomdp/","tags":null,"title":"MOMDP"},{"categories":null,"contents":"Monetarist theory is a theory of economics proposed by Milton Freedman which asserts that Keynsian economics only applies in the limited case that central bank need to keep the money supply growing; otherwise, the free market can handle itself.\nTherefore the Monetarist theorists propose that the stock market crash of 1929 was caused that the US monetary fund did a bad job of actually controlling the funds, and didn\u0026rsquo;t inject enough money into economy.\nSee also the opposite: demand-driven theory.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmonetarist_theory/\"\u003eMonetarist theory\u003c/a\u003e is a theory of economics proposed by \u003ca href=\"/posts/kbhmilton_freedman/\"\u003eMilton Freedman\u003c/a\u003e which asserts that \u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynsian economics\u003c/a\u003e only applies in the limited case that central bank need to keep the money supply growing; otherwise, the free market can handle itself.\u003c/p\u003e\n\u003cp\u003eTherefore the \u003ca href=\"/posts/kbhmonetarist_theory/\"\u003eMonetarist theorists\u003c/a\u003e propose that the \u003ca href=\"/posts/kbhcauses_of_the_great_depression/#stock-market-crash-of-1929\"\u003estock market crash of 1929\u003c/a\u003e was caused that the US monetary fund did a bad job of actually controlling the funds, and didn\u0026rsquo;t inject enough money into economy.\u003c/p\u003e\n\u003cp\u003eSee also the opposite: \u003ca href=\"/posts/kbhdemand_driven_theory/\"\u003edemand-driven theory.\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmonetarist_theory/","tags":null,"title":"Monetarist theory"},{"categories":null,"contents":"monitor pattern is a multithreading pattern to help prevent race conditions and deadlocks.\nassociate a single lock with a collection of variables (a \u0026ldquo;class\u0026rdquo;), having one lock associated with the group.\nany time when you want to access anything in that group, you unlock the mutex associated with the group. meaning, there\u0026rsquo;s only one mutex which can be used to change shared state.\nBridge Crossing There is cars that are crossing a one lane bridge: each car in a thread, they have to coordinate when/where to cross the bridge.\nCar can be going east or the west. All cars must be traveling in the same direction. And a car can only go once the coast is clear.\nInterface static void cross_bridge_east(size_t carid) { approach_bridge(); // sleeping driveAcross(EAST); // sleeping } static void cross_bridge_west(size_t carid) { approach_bridge(); // sleeping driveAcross(WEST); // sleeping } we need to ensure that, we are sharing a one lane bridge, and they don\u0026rsquo;t collide.\nMonitor Pattern REDUCES NUMBER OF PARAMS!\nMethod for the mutex management system isolated into a single, thread-safe class. All the mutexes, etc., all of the mutex gunk gets isolated into a thread safe instance method of the bridge class.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmonitor_pattern/\"\u003emonitor pattern\u003c/a\u003e is a \u003ca href=\"/posts/kbhmultithreading/\"\u003emultithreading\u003c/a\u003e pattern to help prevent \u003ca href=\"/posts/kbhmultithreading/#race-condition\"\u003erace condition\u003c/a\u003es and \u003ca href=\"/posts/kbhdeadlock/\"\u003edeadlock\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eassociate a single lock with a collection of variables\u003c/strong\u003e (a \u0026ldquo;class\u0026rdquo;), having one lock associated with the group.\u003c/p\u003e\n\u003cp\u003eany time when you want to access anything in that group, you unlock the \u003ca href=\"/posts/kbhmultithreading/#mutex\"\u003emutex\u003c/a\u003e associated with the group. meaning, there\u0026rsquo;s only one mutex which can be used to change shared state.\u003c/p\u003e\n\u003ch2 id=\"bridge-crossing\"\u003eBridge Crossing\u003c/h2\u003e\n\u003cp\u003eThere is cars that are crossing a \u003cstrong\u003eone lane bridge\u003c/strong\u003e: each car in a thread, they have to coordinate when/where to cross the bridge.\u003c/p\u003e\n\u003cp\u003eCar can be going east or the west. All cars must be traveling in the same direction. And a car can only go once the coast is clear.\u003c/p\u003e\n\u003ch3 id=\"interface\"\u003eInterface\u003c/h3\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-c++\" data-lang=\"c++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estatic\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ecross_bridge_east\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecarid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eapproach_bridge\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// sleeping\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edriveAcross\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eEAST\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// sleeping\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estatic\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ecross_bridge_west\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecarid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eapproach_bridge\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// sleeping\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edriveAcross\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eWEST\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// sleeping\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ewe need to ensure that, we are sharing a one lane bridge, and they don\u0026rsquo;t collide.\u003c/p\u003e\n\u003ch3 id=\"monitor-pattern\"\u003eMonitor Pattern\u003c/h3\u003e\n\u003cp\u003eREDUCES NUMBER OF PARAMS!\u003c/p\u003e\n\u003cp\u003eMethod for the mutex management system isolated into a single, thread-safe class. All the mutexes, etc., all of the mutex gunk gets isolated into a thread safe instance method of the bridge class.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmonitor_pattern/","tags":null,"title":"monitor pattern"},{"categories":null,"contents":" \\(\\mathcal{P}\\) problem (states, transitions, etc.) \\(N\\) visit counts \\(Q\\) a q-table: action-value estimates \\(d\\) depth (how many next states to look into)\u0026mdash;more is more accurate but slower \\(U\\) value function estimate; usually a Rollout Policy, estimate at some depth \\(d\\) \\(c\\) exploration constant After \\(n\\) simulation s from the starting state; we find the best action for our current state from our q-table.\nSubroutine: simulate(state, depth_remaining)\nIf depth_remaining=0, simply return the utility from the value function estimate For some s, Actions that we just got, if we haven\u0026rsquo;t seen it, we just return the value function estimate + initialize the N and Q tables select an action via the monte-carlo exploration formula sample a next state and current reward based on the action you gotten via a generative model value = reward + discount*simulate(next_state, depth_remaining-1) add to the N(state, action) count update the q table at (state, action): Q[s,a] + = (value-Q[s,a])/N[s,a] (\u0026ldquo;how much better is taking this action?\u0026rdquo; \u0026mdash; with later times taking this action more heavily discounted) monte-carlo exploration \\begin{equation} \\max_{a} Q(s,a) + c \\sqrt{ \\frac{\\log \\sum_{a}N(s,a)}{N(s,a)}} \\end{equation}\nwhere \\(c\\) is the exploration factor, and \\(N\\) is the next steps.\nWe want to encourage the exploration of things we haven\u0026rsquo;t tried as much. Note that as \\(N(s,a)\\) is small, the right term is larger. So, if its also not too bad in terms of \\(Q\\), we will choose it.\nIf \\(N(s,a)\\) is zero, you return the action. You always want to try something at least once.\n","html":"\u003cul\u003e\n\u003cli\u003e\\(\\mathcal{P}\\) problem (states, transitions, etc.)\u003c/li\u003e\n\u003cli\u003e\\(N\\) visit counts\u003c/li\u003e\n\u003cli\u003e\\(Q\\) a q-table: \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e estimates\u003c/li\u003e\n\u003cli\u003e\\(d\\) depth (how many next states to look into)\u0026mdash;more is more accurate but slower\u003c/li\u003e\n\u003cli\u003e\\(U\\) \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function estimate\u003c/a\u003e; usually a \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout-policy\"\u003eRollout Policy\u003c/a\u003e, estimate at some depth \\(d\\)\u003c/li\u003e\n\u003cli\u003e\\(c\\) exploration constant\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAfter \\(n\\) simulation s from the starting state; we find the best action for our current state from our q-table.\u003c/p\u003e\n\u003cp\u003eSubroutine: \u003ccode\u003esimulate(state, depth_remaining)\u003c/code\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eIf \u003ccode\u003edepth_remaining=0\u003c/code\u003e, simply return the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e from the \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e estimate\u003c/li\u003e\n\u003cli\u003eFor some \u003ccode\u003es, Actions\u003c/code\u003e that we just got, if we haven\u0026rsquo;t seen it, we just return the \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e estimate + initialize the N and Q tables\u003c/li\u003e\n\u003cli\u003eselect an action via the \u003ca href=\"#monte-carlo-exploration\"\u003emonte-carlo exploration\u003c/a\u003e formula\u003c/li\u003e\n\u003cli\u003esample a next state and current reward based on the action you gotten via a \u003ca href=\"/posts/kbhonline_planning/#generative-model\"\u003egenerative model\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003evalue = reward + discount*simulate(next_state, depth_remaining-1)\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eadd to the \u003ccode\u003eN(state, action)\u003c/code\u003e count\u003c/li\u003e\n\u003cli\u003eupdate the q table at (state, action): \u003ccode\u003eQ[s,a] + = (value-Q[s,a])/N[s,a]\u003c/code\u003e (\u0026ldquo;how much better is taking this action?\u0026rdquo; \u0026mdash; with later times taking this action more heavily discounted)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"monte-carlo-exploration\"\u003emonte-carlo exploration\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\max_{a} Q(s,a) + c \\sqrt{ \\frac{\\log \\sum_{a}N(s,a)}{N(s,a)}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(c\\) is the exploration factor, and \\(N\\) is the next steps.\u003c/p\u003e\n\u003cp\u003eWe want to encourage the exploration of things we haven\u0026rsquo;t tried as much. Note that as \\(N(s,a)\\) is small, the right term is larger. So, if its also not too bad in terms of \\(Q\\), we will choose it.\u003c/p\u003e\n\u003cp\u003eIf \\(N(s,a)\\) is zero, you return the action. You always want to try something at least once.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmonte_carlo_tree_search/","tags":null,"title":"monte-carlo tree search"},{"categories":null,"contents":"The fallout of the Rosa Parks incident, which is when many of Montgomery residents.\nThe boycotts were developed by Martin Luther King.\n","html":"\u003cp\u003eThe fallout of the \u003ca href=\"/posts/kbhrosa_parks/\"\u003eRosa Parks\u003c/a\u003e incident, which is when many of Montgomery residents.\u003c/p\u003e\n\u003cp\u003eThe boycotts were developed by \u003ca href=\"/posts/kbhmartin_luther_king/\"\u003eMartin Luther King\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmontomery_bus_boycott/","tags":null,"title":"Montgomery Bus Boycott"},{"categories":null,"contents":"A morpheme is the smallest meaning-bearing unit of a language. \u0026ldquo;er\u0026rdquo;, or \u0026ldquo;ist\u0026rdquo;, etc. It contains:\nstems: core meaning-bearing units, and affexes: parts that adhere to stems For non space-delineated languages, tokenization happens with morpheme (\u0026ldquo;词\u0026rdquo;).\nConsider:\n姚明进入总决赛\nIs yao/ming first and last names seperated. Is zong combined with juesai? (i.e. ADJ vs. NOUN).\nCommonly, Chinese performs word level tokenization if you don\u0026rsquo;t want to deal with it. Typically, this usuals neural sequence models.\n","html":"\u003cp\u003eA morpheme is the smallest meaning-bearing unit of a language. \u0026ldquo;er\u0026rdquo;, or \u0026ldquo;ist\u0026rdquo;, etc. It contains:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003estems\u003c/strong\u003e: core meaning-bearing units, and\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eaffexes\u003c/strong\u003e: parts that adhere to stems\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eFor non space-delineated languages, \u003ca href=\"/posts/kbhtokenization/\"\u003etokenization\u003c/a\u003e happens with \u003ca href=\"/posts/kbhmorpheme/\"\u003emorpheme\u003c/a\u003e (\u0026ldquo;词\u0026rdquo;).\u003c/p\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e姚明进入总决赛\u003c/p\u003e\n\u003cp\u003eIs yao/ming first and last names seperated. Is zong combined with juesai? (i.e. ADJ vs. NOUN).\u003c/p\u003e\n\u003cp\u003eCommonly, Chinese performs word level tokenization if you don\u0026rsquo;t want to deal with it. Typically, this usuals neural sequence models.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmorpheme/","tags":null,"title":"morpheme"},{"categories":null,"contents":"A morphism is a not-necessarily-invertible map between two objects of a category. If the map is indeed invertable, then we call the map an isomorphism.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhmorphism/\"\u003emorphism\u003c/a\u003e is a not-necessarily-invertible map between two \u003ca href=\"/posts/kbhobjects/\"\u003eobject\u003c/a\u003es of a \u003ca href=\"/posts/kbhcategory/\"\u003ecategory\u003c/a\u003e. If the map is indeed \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e, then we call the map an \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphism\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmorphism/","tags":null,"title":"morphism"},{"categories":null,"contents":"recall morphemes are the smallest meaningful units of a word.\nmorphological parsing is the act of getting morphemes: cats =\u0026gt; =cat s=o\nstem + affix stemming stemming just chops off the morpheme affixes; leaving the stems. \u0026ldquo;heights\u0026rdquo; =\u0026gt; \u0026ldquo;heigh\u0026rdquo;. without lemmatization.\nThis increases recall (more stuff is caught we want to catch) at he cost of precision (what we catch is probably lots of false positives).\nLanguages with complex cojugation or morphology, this can\u0026rsquo;t work because you can\u0026rsquo;t just chop.\nporter stemmer A series of rewrite rules which performs stemming.\n","html":"\u003cp\u003erecall \u003ca href=\"/posts/kbhmorpheme/\"\u003emorpheme\u003c/a\u003es are the smallest meaningful units of a word.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhmorphological_parsing/\"\u003emorphological parsing\u003c/a\u003e is the act of getting morphemes: \u003ccode\u003ecats\u003c/code\u003e =\u0026gt; =cat s=o\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003estem +\u003c/li\u003e\n\u003cli\u003eaffix\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"stemming\"\u003estemming\u003c/h2\u003e\n\u003cp\u003estemming just chops off the morpheme affixes; leaving the stems. \u0026ldquo;heights\u0026rdquo; =\u0026gt; \u0026ldquo;heigh\u0026rdquo;. without lemmatization.\u003c/p\u003e\n\u003cp\u003eThis increases recall (more stuff is caught we want to catch) at he cost of precision (what we catch is probably lots of false positives).\u003c/p\u003e\n\u003cp\u003eLanguages with complex cojugation or morphology, this can\u0026rsquo;t work because you can\u0026rsquo;t just chop.\u003c/p\u003e\n\u003ch3 id=\"porter-stemmer\"\u003eporter stemmer\u003c/h3\u003e\n\u003cp\u003eA series of rewrite rules which performs stemming.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmorphological_parsing/","tags":null,"title":"morphological parsing"},{"categories":null,"contents":"Take X-Rays and generate clinical reports.\nMethod encoder decoder architectures\nEncoder ConViT: convolutional vision transformer. Special thing: we swap out the attention\nDouble Weighted Multi-Head Attention We want to force the model to focus on one thing, so we modulate the model based on the weights of other: if one head is big, we make the other head small.\nwhere \\(w_{\\cos i} = \\frac{\\sum_{i}^{} \\cos \\qty (att_{a}, att_{base})}{N}\\)\n\\begin{equation} w = w_{a} \\cdot (1- w_{\\cos i}) \\end{equation}\nmeaning:\n\\begin{equation} att_{dwma} = w \\cdot att \\end{equation}\nDecoding Goood ol\u0026rsquo; Hierarchical-Decoder\n","html":"\u003cp\u003eTake X-Rays and generate clinical reports.\u003c/p\u003e\n\u003ch2 id=\"method\"\u003eMethod\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eencoder decoder architectures\u003c/strong\u003e\u003c/p\u003e\n\u003ch3 id=\"encoder\"\u003eEncoder\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003eConViT\u003c/strong\u003e: convolutional vision transformer. Special thing: we swap out the attention\u003c/p\u003e\n\u003ch4 id=\"double-weighted-multi-head-attention\"\u003eDouble Weighted Multi-Head Attention\u003c/h4\u003e\n\u003cp\u003eWe want to force the model to focus on one thing, so we modulate the model based on the weights of other: if one head is big, we make the other head small.\u003c/p\u003e\n\u003cp\u003ewhere \\(w_{\\cos i} = \\frac{\\sum_{i}^{} \\cos \\qty (att_{a}, att_{base})}{N}\\)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw = w_{a} \\cdot (1- w_{\\cos i})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\natt_{dwma} = w \\cdot att\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"decoding\"\u003eDecoding\u003c/h3\u003e\n\u003cp\u003eGoood ol\u0026rsquo; \u003cstrong\u003eHierarchical-Decoder\u003c/strong\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmulti_lstm_for_clinical_report_generation/","tags":null,"title":"Multi-LSTM for Clinical Report Generation"},{"categories":null,"contents":"simple games constituents agent \\(i \\in X\\) the set of agents. joint action space: \\(A = A\u0026rsquo; \\times A^{2} \\times \u0026hellip; \\times A^{k}\\) joint action would be one per agent \\(\\vec{a} = (a_{1}, \u0026hellip;, a_{k})\\) joint reward function \\(R(a) = R\u0026rsquo;(\\vec{a}), \u0026hellip;, R(\\vec{a})\\) additional information prisoner\u0026rsquo;s dilemma Cooperate Defect Cooperate -1, -1 -4, 0 Defect 0, -4 -3, -3 traveler\u0026rsquo;s dilemma two people write down the price of their luggage, between 2-100 the lower amount gets that value plus 2 the higher amount gets the lower amount minus 2 joint policy agent utility for agent number \\(i\\)\n\\begin{equation} U^{i} (\\vec{\\pi}) = \\sum_{a \\in A}^{} R^{(i)}(\\vec{a}) \\prod_{j}^{} \\pi^{(j)}(a^{(j)}) \\end{equation}\nthis is essentially the reward you get given you took\nresponse model how would other agents respond to our system?\n\\(a^{-i}\\): joint action except for agent \\(i\\) \\(\\vec{a} = (a^{i}, a^{-i})\\), \\(R(a^{i}, a^{-i}) = R(\\vec{a})\\) best-response deterministic best response model for agent \\(i\\):\n\\begin{equation} \\arg\\max_{a^{i} \\in A^{i}} U^{i}(a^{i}, \\pi^{-i}) \\end{equation}\nwhere the response to agent \\(a\\) is deterministically selected.\nFor prisoner\u0026rsquo;s dilemma, this results in both parties defecting because that would maximise the utility.\nsoftmax response its like Softmax Method:\n\\begin{equation} \\pi^{i}(a^{i}) \\propto \\exp\\qty(\\lambda U^{i}(a^{i}, \\pi^{-1})) \\end{equation}\nfictitious play play at some kind of game continuously\nDominant Strategy Equilibrium The dominant strategy is a policy that is the best response to all other possible agent policies. Not all games have a Dominant Strategy Equilibrium, because there are games for which the best response is never invariant to others\u0026rsquo; strategies (rock paper scissors).\nNash Equilibrium A Nash Equilibrium is a joint policy \\(\\pi\\) where everyone is following their best response: i.e. no one is incentive to unilaterally change from their policy. This exists for every game. In general, Nash Equilibrium is very hard to compute: it is p-pad (which is unclear relationally to np-complete).\n","html":"\u003ch2 id=\"simple-games\"\u003esimple games\u003c/h2\u003e\n\u003ch3 id=\"constituents\"\u003econstituents\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eagent \\(i \\in X\\) the set of agents.\u003c/li\u003e\n\u003cli\u003ejoint action space: \\(A = A\u0026rsquo; \\times A^{2} \\times \u0026hellip; \\times A^{k}\\)\u003c/li\u003e\n\u003cli\u003ejoint action would be one per agent \\(\\vec{a} = (a_{1}, \u0026hellip;, a_{k})\\)\u003c/li\u003e\n\u003cli\u003ejoint reward function \\(R(a) = R\u0026rsquo;(\\vec{a}), \u0026hellip;, R(\\vec{a})\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"additional-information\"\u003eadditional information\u003c/h3\u003e\n\u003ch4 id=\"prisoner-s-dilemma\"\u003eprisoner\u0026rsquo;s dilemma\u003c/h4\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003eCooperate\u003c/th\u003e\n\u003cth\u003eDefect\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eCooperate\u003c/td\u003e\n\u003ctd\u003e-1, -1\u003c/td\u003e\n\u003ctd\u003e-4, 0\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eDefect\u003c/td\u003e\n\u003ctd\u003e0, -4\u003c/td\u003e\n\u003ctd\u003e-3, -3\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch4 id=\"traveler-s-dilemma\"\u003etraveler\u0026rsquo;s dilemma\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003etwo people write down the price of their luggage, between 2-100\u003c/li\u003e\n\u003cli\u003ethe lower amount gets that value plus 2\u003c/li\u003e\n\u003cli\u003ethe higher amount gets the lower amount minus 2\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"joint-policy-agent-utility\"\u003ejoint policy agent utility\u003c/h2\u003e\n\u003cp\u003efor agent number \\(i\\)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{i} (\\vec{\\pi}) = \\sum_{a \\in A}^{} R^{(i)}(\\vec{a}) \\prod_{j}^{} \\pi^{(j)}(a^{(j)})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is essentially the reward you get given you took\u003c/p\u003e\n\u003ch2 id=\"response-model\"\u003eresponse model\u003c/h2\u003e\n\u003cp\u003ehow would other agents respond to our system?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(a^{-i}\\): joint action except for agent \\(i\\)\u003c/li\u003e\n\u003cli\u003e\\(\\vec{a} = (a^{i}, a^{-i})\\),\u003c/li\u003e\n\u003cli\u003e\\(R(a^{i}, a^{-i}) = R(\\vec{a})\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"best-response\"\u003ebest-response\u003c/h3\u003e\n\u003cp\u003edeterministic best response model for agent \\(i\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\arg\\max_{a^{i} \\in A^{i}} U^{i}(a^{i}, \\pi^{-i})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere the response to agent \\(a\\) is deterministically selected.\u003c/p\u003e\n\u003cp\u003eFor \u003ca href=\"#prisoner-s-dilemma\"\u003eprisoner\u0026rsquo;s dilemma\u003c/a\u003e, this results in both parties defecting because that would maximise the utility.\u003c/p\u003e\n\u003ch3 id=\"softmax-response\"\u003esoftmax response\u003c/h3\u003e\n\u003cp\u003eits like \u003ca href=\"/posts/kbhdirected_exploration/#softmax-method\"\u003eSoftmax Method\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pi^{i}(a^{i}) \\propto \\exp\\qty(\\lambda U^{i}(a^{i}, \\pi^{-1}))\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"fictitious-play\"\u003efictitious play\u003c/h3\u003e\n\u003cp\u003eplay at some kind of game continuously\u003c/p\u003e\n\u003ch2 id=\"dominant-strategy-equilibrium\"\u003eDominant Strategy Equilibrium\u003c/h2\u003e\n\u003cp\u003eThe dominant strategy is a policy that is the best response to all other possible agent policies. Not all games have a \u003ca href=\"#dominant-strategy-equilibrium\"\u003eDominant Strategy Equilibrium\u003c/a\u003e, because there are games for which the best response is never invariant to others\u0026rsquo; strategies (rock paper scissors).\u003c/p\u003e\n\u003ch2 id=\"nash-equilibrium\"\u003eNash Equilibrium\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#nash-equilibrium\"\u003eNash Equilibrium\u003c/a\u003e is a joint policy \\(\\pi\\) where everyone is following their best response: i.e. no one is incentive to unilaterally change from their policy. This exists for every game. In general, \u003ca href=\"#nash-equilibrium\"\u003eNash Equilibrium\u003c/a\u003e is very hard to compute: it is p-pad (which is unclear relationally to np-complete).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmultiagent_reasoning/","tags":null,"title":"multiagent reasoning"},{"categories":null,"contents":"Key idea: multi-modality, when leveraged well, leads to faster convergence.\nData Availability Health and health sensing requires labels, but health signals require specialist knowledge + broader context to label.\ntypical image labeling: 0.05/label medical imaging: 4.00/label Even if want to automate the study, we need to Kyntic style strap a thing to a person and have soft labels that we align with raw sensor data..\nInstead, Do Time-series Instead: run proxy self-supervised studies into the future\u0026mdash;pretraining on a shit tone of sensor data just as timeseries regressing into the future without any labels.\nThen, take the resulting latents and do FTing on specific tasks with your minimal labeled data.\n\u0026ldquo;arrow of time\u0026rdquo;?\nApproaches Best method: spacial masking + FT downstream; the system also generalizes well even with missing modalities.\nable to achieve good models via multimodal signals was able to handle missing data\u0026hellip; by skipping them is more data efficient by doing masked pret training requires no pre-processing Give up and use an LLM main problem: tokenized is very bad at splitting up numbers.\nYou can therefore come up with paired architectures with some modality encoder\u0026mdash;taking the data and encode it using a SoTA EKG encoder, for instance\u0026mdash;before passing the embeddings into the LLM.\nhttps://arxiv.org/abs/2309.16058\n","html":"\u003cp\u003eKey idea: multi-modality, when leveraged well, leads to faster convergence.\u003c/p\u003e\n\u003ch2 id=\"data-availability\"\u003eData Availability\u003c/h2\u003e\n\u003cp\u003eHealth and health sensing requires labels, but health signals require specialist knowledge + broader context to label.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003etypical image labeling: 0.05/label\u003c/li\u003e\n\u003cli\u003emedical imaging: 4.00/label\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eEven if want to automate the study, we need to Kyntic style strap a thing to a person and have soft labels that we align with raw sensor data..\u003c/p\u003e\n\u003ch2 id=\"instead-do-time-series\"\u003eInstead, Do Time-series\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eInstead\u003c/strong\u003e: run proxy self-supervised studies into the future\u0026mdash;pretraining on a shit tone of sensor data just as timeseries regressing into the future without any labels.\u003c/p\u003e\n\u003cp\u003eThen, take the resulting latents and do FTing on specific tasks with your minimal labeled data.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;arrow of time\u0026rdquo;?\u003c/p\u003e\n\u003ch2 id=\"approaches\"\u003eApproaches\u003c/h2\u003e\n\u003cp\u003eBest method: \u003cstrong\u003espacial masking\u003c/strong\u003e + \u003cstrong\u003eFT downstream\u003c/strong\u003e; the system also generalizes well even with missing modalities.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eable to achieve good models via multimodal signals\u003c/li\u003e\n\u003cli\u003ewas able to handle missing data\u0026hellip; by skipping them\u003c/li\u003e\n\u003cli\u003eis more data efficient by doing masked pret training\u003c/li\u003e\n\u003cli\u003erequires no pre-processing\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"give-up-and-use-an-llm\"\u003eGive up and use an LLM\u003c/h2\u003e\n\u003cp\u003emain problem: \u003cstrong\u003etokenized is very bad at splitting up numbers\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eYou can therefore come up with paired architectures with some modality encoder\u0026mdash;taking the data and encode it using a SoTA EKG encoder, for instance\u0026mdash;before passing the embeddings into the LLM.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://arxiv.org/abs/2309.16058\"\u003ehttps://arxiv.org/abs/2309.16058\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmultimodal_ai_for_real_world_signals/","tags":null,"title":"Multimodal AI for Real-World Signals"},{"categories":null,"contents":"Its a general form of the combinations formula:\n\\begin{equation} {n \\choose k_1, k_2, \\dots, k_{n}} = \\frac{n!}{k_{1}! k_2! \\dots k_{n}!} \\end{equation}\n","html":"\u003cp\u003eIts a general form of the \u003ca href=\"/posts/kbhcombination/\"\u003ecombination\u003c/a\u003es formula:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n{n \\choose k_1, k_2, \\dots, k_{n}} = \\frac{n!}{k_{1}! k_2! \\dots k_{n}!}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmultinomial_coefficient/","tags":null,"title":"multinomial coefficient"},{"categories":null,"contents":"\\begin{equation} B = \\qty[(x_1, y_1), \\dots, (x_{n}, y_{n})] \\end{equation}\nwhere the labels would be:\n\\begin{equation} C(b) = \\begin{cases} 0, if \\sum_{i}^{}y_{i} = 0 \\\\ 1, \\text{otherwise} \\end{cases} \\end{equation}\nand then we maxpool\nMILFormer MILFormer is a multiple-instance learning scheme which makes predictions over input patches whose output predictions are weighted as multi-distirbution.\n","html":"\u003cp\u003e\\begin{equation}\nB = \\qty[(x_1, y_1), \\dots, (x_{n}, y_{n})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere the labels would be:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC(b) = \\begin{cases}\n0, if \\sum_{i}^{}y_{i} = 0 \\\\\n1, \\text{otherwise}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand then we maxpool\u003c/p\u003e\n\u003ch2 id=\"milformer\"\u003eMILFormer\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#milformer\"\u003eMILFormer\u003c/a\u003e is a multiple-instance learning scheme which makes predictions over input patches whose output predictions are weighted as multi-distirbution.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmultiple_instance_learning/","tags":null,"title":"Multiple Instance Learning"},{"categories":null,"contents":"The multiplicative identity allows another number to retain its identity after multiplying. Its \\(1\\) [for fields?].\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhmultiplicative_identity/\"\u003emultiplicative identity\u003c/a\u003e allows another \u003ca href=\"/posts/kbhnumber/\"\u003enumber\u003c/a\u003e to retain its identity after \u003ca href=\"/posts/kbhmultiplying/\"\u003emultiplying\u003c/a\u003e. Its \\(1\\) [for fields?].\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmultiplicative_identity/","tags":null,"title":"multiplicative identity"},{"categories":null,"contents":"TBD\n","html":"\u003cp\u003eTBD\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmultiplying/","tags":null,"title":"multiplying"},{"categories":null,"contents":"multiprocessing is the act of switching between multiple processes so fast that it appears multiple processes are running concurrently.\nOS schedules tasks each program gets a little time, then has to wait in a turn to continue executing base level syscalls that requires waiting will be moved off before finishing, and in the meantime others can wait. like file read.\nprogram A program is a script to be ran.\nprocess a process is an instance of a program. Every process has a unique identifier, each process is uniquely identified by a PID.\nsyscall get_pid will give you back the PID.\nopen file table open file table is a system wide for each file opening session, mentioning what the mode and cursor of the file is open, and the number of file descriptor tables pointing to it with a refcount.\nWhen we call close, the refcount decrements. When refcount=0, the file is deleted. This means, if you share a pipe, both parent and child has to close the pipe.\nread blocks until at least 1 byte is available, or until all write ends are closed.\n","html":"\u003cp\u003emultiprocessing is the act of switching between multiple \u003ca href=\"#process\"\u003eprocess\u003c/a\u003ees so fast that it appears multiple processes are running concurrently.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eOS \u003cem\u003eschedules\u003c/em\u003e tasks\u003c/li\u003e\n\u003cli\u003eeach program gets a little time, then has to wait in a turn to continue executing\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ebase level syscalls that requires waiting will be moved off before finishing, and in the meantime others can wait. like file read.\u003c/p\u003e\n\u003ch2 id=\"program\"\u003eprogram\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#program\"\u003eprogram\u003c/a\u003e is a script to be ran.\u003c/p\u003e\n\u003ch2 id=\"process\"\u003eprocess\u003c/h2\u003e\n\u003cp\u003ea \u003ca href=\"#process\"\u003eprocess\u003c/a\u003e is an instance of a \u003ca href=\"#program\"\u003eprogram\u003c/a\u003e. Every process has a unique identifier, each process is uniquely identified by a \u003ca href=\"#process\"\u003ePID\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003esyscall \u003ccode\u003eget_pid\u003c/code\u003e will give you back the PID.\u003c/p\u003e\n\u003ch3 id=\"open-file-table\"\u003eopen file table\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#open-file-table\"\u003eopen file table\u003c/a\u003e is a system wide for each file opening session, mentioning what the mode and cursor of the file is open, and the number of \u003ca href=\"/posts/kbhsyscalls/#file-descriptor\"\u003efile descriptor\u003c/a\u003e tables pointing to it with a \u003ccode\u003erefcount\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003eWhen we call close, the \u003ccode\u003erefcount\u003c/code\u003e decrements. When \u003ccode\u003erefcount=0\u003c/code\u003e, the file is deleted. This means, if you share a \u003ca href=\"/posts/kbhpipe/\"\u003epipe\u003c/a\u003e, both parent and child has to close the \u003ca href=\"/posts/kbhpipe/\"\u003epipe\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eread blocks\u003c/strong\u003e until at least 1 byte is available, or until all write ends are closed.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmultiprocessing/","tags":null,"title":"multiprocessing"},{"categories":null,"contents":" we can have concurrency within a single process\u0026mdash;each running a single function We will solve problems:\nnever race condition never deadlock thread you can spawn a thread using the thread() can even pass function parameters threads share all virtual address space: bugs can arise when multiple threads modify the same thing at the same time\u0026mdash;each thread has access to a small chunk of the stack threads are actually the unit of concurrency: the OS actually chooses threads to run // now the thread can execute at any time: once a thread is made, it will run in any order thread myThread(function_to_run, arg1, arg2, ...); // threads run AS SOON AS SPAWENED: so We can wait for a thread:\nmyThread.join() You can also start a bunch on a loop:\nthread threads[3]; for (thread\u0026amp; cf : threads) { cf = thread(func, ...); } Importantly, unlike waitpid, we can\u0026rsquo;t join an arbitrary thread. We basically have to wait for all your threads to finish.\nDEBUGGING TRICK: adding a sleep call everywhere shouldn\u0026rsquo;t cause any problems; if it does, there\u0026rsquo;s a race condition.\npassing by reference threading doesn\u0026rsquo;t know the type of arguments being passed into a function; this is especially prevalent when passing by reference.\nstatic void mythingref(int \u0026amp;pbr); thread(myfunc, ref(myint)); Remember: ref will SHARE MEMORY, and you have no control over when the thread runs. So once a pointer is passed all bets are off in terms of what values things take on.\nprocesses vs threads Processes Threads isolate virtual address spaces shares virtual address space to share info can run external programs can\u0026rsquo;t run external programs (execvp wipes) harder to coordinate tasks within the same program easier to coordinate tasks within a program threads are really the main way to break down big tasks.\nrace condition undesirable behavior caused by arbitrary execution order. we typically solve them using mutexes.\nthread safe thread safe functions are ones whereby its designed to prevent against unexpected behavior during threading.\nwe want atomicity in the code: we want entire data viewing + modification operations to not be interrupted\u0026mdash;otherwise, you will generate race conditions.\nRecall: C++ statements themselves are not INHERENTLY autonomic.\nwe want to outline a \u0026ldquo;critical section\u0026rdquo; and ensure it doesn\u0026rsquo;t get ran more than once.\ncritical section A critical section is a region of code which should only be executed by one thread at a time. We want to keep this section as small as possible to preserve performance.\nwe want to organize it to be as small as we possibly can we want to move the critical section in terms of expressions; so if you have a loop you should put the loop in the outer area, and do the checking + break within if our critical sections are not small, we would have little benefits to multithreading\nmutex it would be nice if a critical section can only be executed once; a mutex can be shared across threads, but can only be \u0026ldquo;owned\u0026rdquo; by a single thread at once.\nmutex tmp; tmp.lock(); tmp.unlock(); importantly, if multiple threads are waiting on a mutex, the next thread that\u0026rsquo;s going to get the mutex\nwhen there are multiple threads writing to a value when there is a thread writing and one or more threads reading if you are no writes, you don\u0026rsquo;t need a mutex when dealing with mutex, beware of deadlock\nSleep call can happen by putting a sleep call in certain places.\nimplementation Things it needs to do:\ntrack whether or not the mutex is locked/unlocked track which thread is the owner of the lock threads that want to get this lock int locked = 0; Queue blocked_queue; void Lock::Lock() { // disable interrupts: otherwise multiple threads // could come and lock the mutex (such as between // the locked check and lock =1 IntrGuard grd; if (!locked) { // if our thread is not locked, just lock it locked = 1; } else { // if our thread is locked, we need to prevent our current // thread from going to the ready queue, and push it to the current thread blocked_queue.push(CURRENT_THREAD); // remember this isn\u0026#39;t an issue even if IntrGuard // didn\u0026#39;t yet go out of scope; because it will either // land on a context_switch which will enable interrupts for you // or land on the beginning of a threadfunc helper, which // is also going to enable interrupts for you // nicely, the interrupts are here are *off* as required because switching // to another thread always will result in reenabling (either by new thread, // by timer handler, or by IntrGuard) mark_block_and_call_schedule(CURRENT_THREAD); } } void Lock::Unlock() { // disable interrupts: otherwise multiple threads // could come and lock the mutex (such as between // the locked check and lock =1 IntrGuard grd; // if our thread is locked and nobody is waiting for it if (q.empty()) { locked = 0; } else { unblock_thread(q.pop()); // we do not switch to the unblocked thread, just add it to the // ready queue. we are entrusting the scheduler to start this thread // whenever we feel right } } IntrGuard IntrGuard will turn off interrupts for the duration of its scope; when it goes out of scope, it will restore the state of the interrupt before (whether on or off). So, implementing the mutex code above without InterGuard:\nint locked = 0; Queue blocked_queue; void Lock::Lock() { // disable interrupts: otherwise multiple threads // could come and lock the mutex (such as between // the locked check and lock =1 bool interrupsEnabled = intr_enabled(); // only disable interrupts if they are currently // on if (interrupsEnabled) { intr_enable(false); } if (!locked) { // if our thread is not locked, just lock it locked = 1; } else { // if our thread is locked, we need to prevent our current // thread from going to the ready queue, and push it to the current thread blocked_queue.push(CURRENT_THREAD); mark_block_and_call_schedule(CURRENT_THREAD); } // if interrupts was on, turn them on again. // otherwise, do nothing if (interrupsEnabled) { intr_enable(true); } } void Lock::Unlock() { // disable interrupts: otherwise multiple threads // could come and lock the mutex (such as between // the locked check and lock =1 bool interrupsEnabled = intr_enabled(); // only disable interrupts if they are currently // on if (interrupsEnabled) { intr_enable(false); } // if our thread is locked and nobody is waiting for it if (q.empty()) { locked = 0; } else { unblock_thread(q.pop()); // we do not switch to the unblocked thread, just add it to the // ready queue. we are entrusting the scheduler to start this thread // whenever we feel right } if (interrupsEnabled) { intr_enable(true); } } ","html":"\u003cul\u003e\n\u003cli\u003ewe can have concurrency \u003cstrong\u003ewithin a single process\u003c/strong\u003e\u0026mdash;each running a single function\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe will solve problems:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003enever \u003ca href=\"#race-condition\"\u003erace condition\u003c/a\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003enever \u003ca href=\"/posts/kbhdeadlock/\"\u003edeadlock\u003c/a\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"thread\"\u003ethread\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eyou can spawn a thread using the \u003cstrong\u003ethread()\u003c/strong\u003e can even pass function parameters\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ethreads share all virtual address space\u003c/strong\u003e: bugs can arise when multiple threads modify the same thing at the same time\u0026mdash;each thread has access to a small chunk of the stack\u003c/li\u003e\n\u003cli\u003ethreads are actually the unit of concurrency: the OS actually chooses threads to run\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// now the thread can execute at any time: once a thread is made, it will run in any order\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emyThread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efunction_to_run\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earg1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earg2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e...);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// threads run AS SOON AS SPAWENED: so\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe can wait for a thread:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emyThread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ejoin\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou can also start a bunch on a loop:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ethreads\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecf\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ethreads\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ecf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efunc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e...);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eImportantly, unlike \u003ca href=\"/posts/kbhfork/#waitpid\"\u003ewaitpid\u003c/a\u003e, we can\u0026rsquo;t join an arbitrary thread. We basically have to wait for all your threads to finish.\u003c/p\u003e\n\u003cp\u003eDEBUGGING TRICK: \u003cstrong\u003e\u003cstrong\u003eadding a sleep call everywhere shouldn\u0026rsquo;t cause any problems\u003c/strong\u003e\u003c/strong\u003e; if it does, there\u0026rsquo;s a race condition.\u003c/p\u003e\n\u003ch3 id=\"passing-by-reference\"\u003epassing by reference\u003c/h3\u003e\n\u003cp\u003ethreading doesn\u0026rsquo;t know the type of arguments being passed into a function; this is especially prevalent when passing by reference.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estatic\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emythingref\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epbr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emyfunc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eref\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emyint\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e));\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRemember: ref will \u003cstrong\u003e\u003cstrong\u003eSHARE MEMORY\u003c/strong\u003e\u003c/strong\u003e, and you have no control over when the thread runs. So once a pointer is passed all bets are off in terms of what values things take on.\u003c/p\u003e\n\u003ch2 id=\"process--kbhmultiprocessing-dot-md--es-vs-thread--orga50f688--s\"\u003e\u003ca href=\"/posts/kbhmultiprocessing/#process\"\u003eprocess\u003c/a\u003ees vs \u003ca href=\"#thread\"\u003ethread\u003c/a\u003es\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eProcesses\u003c/th\u003e\n\u003cth\u003eThreads\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eisolate virtual address spaces\u003c/td\u003e\n\u003ctd\u003eshares virtual address space to share info\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ecan run external programs\u003c/td\u003e\n\u003ctd\u003ecan\u0026rsquo;t run external programs (execvp wipes)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eharder to coordinate tasks within the same program\u003c/td\u003e\n\u003ctd\u003eeasier to coordinate tasks within a program\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\u003ca href=\"#thread\"\u003ethread\u003c/a\u003es are really the main way to break down big tasks.\u003c/p\u003e\n\u003ch2 id=\"race-condition\"\u003erace condition\u003c/h2\u003e\n\u003cp\u003eundesirable behavior caused by arbitrary execution order. we typically solve them using \u003ca href=\"#mutex\"\u003emutex\u003c/a\u003ees.\u003c/p\u003e\n\u003ch3 id=\"thread-safe\"\u003ethread safe\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#thread-safe\"\u003ethread safe\u003c/a\u003e functions are ones whereby its designed to prevent against unexpected behavior during threading.\u003c/p\u003e\n\u003cp\u003ewe want \u003ca href=\"/posts/kbhdistributed_algorithum/#atomicity\"\u003eatomicity\u003c/a\u003e in the code: we want entire data viewing + modification operations to not be interrupted\u0026mdash;otherwise, you will generate race conditions.\u003c/p\u003e\n\u003cp\u003eRecall: \u003cstrong\u003e\u003cstrong\u003eC++ statements themselves are not INHERENTLY autonomic\u003c/strong\u003e\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003ewe want to outline a \u0026ldquo;critical section\u0026rdquo; and ensure it doesn\u0026rsquo;t get ran more than once.\u003c/p\u003e\n\u003ch3 id=\"critical-section\"\u003ecritical section\u003c/h3\u003e\n\u003cp\u003eA \u003ca href=\"#critical-section\"\u003ecritical section\u003c/a\u003e is a region of code which should only be executed by one thread at a time. We want to keep this section as small as possible to preserve performance.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewe want to organize it to be as small as we possibly can\u003c/li\u003e\n\u003cli\u003ewe want to move the critical section in terms of expressions; so if you have a loop you should put the loop in the outer area, and do the checking + break within\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eif our \u003ca href=\"#critical-section\"\u003ecritical section\u003c/a\u003es are not small, we would have little benefits to multithreading\u003c/p\u003e\n\u003ch2 id=\"mutex\"\u003emutex\u003c/h2\u003e\n\u003cp\u003eit would be nice if a \u003ca href=\"#critical-section\"\u003ecritical section\u003c/a\u003e can only be executed once; a \u003ca href=\"#mutex\"\u003emutex\u003c/a\u003e can be shared across threads, but can only be \u0026ldquo;owned\u0026rdquo; by a single thread at once.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emutex\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etmp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etmp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etmp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eunlock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eimportantly, if multiple \u003ca href=\"#thread\"\u003ethread\u003c/a\u003es are waiting on a mutex, the next thread that\u0026rsquo;s going to get the mutex\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ewhen there are multiple threads \u003cstrong\u003ewriting\u003c/strong\u003e to a value\u003c/li\u003e\n\u003cli\u003ewhen there is a thread \u003cstrong\u003ewriting\u003c/strong\u003e and one or more threads \u003cstrong\u003ereading\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eif you are no writes, you don\u0026rsquo;t need a mutex\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewhen dealing with \u003ca href=\"#mutex\"\u003emutex\u003c/a\u003e, beware of \u003ca href=\"/posts/kbhdeadlock/\"\u003edeadlock\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eSleep call can happen by putting a sleep call in certain places.\u003c/p\u003e\n\u003ch3 id=\"implementation\"\u003eimplementation\u003c/h3\u003e\n\u003cp\u003eThings it needs to do:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003etrack whether or not the mutex is locked/unlocked\u003c/li\u003e\n\u003cli\u003etrack which thread is the owner of the lock\u003c/li\u003e\n\u003cli\u003ethreads that want to get this lock\u003c/li\u003e\n\u003c/ol\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-c++\" data-lang=\"c++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eQueue\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eblocked_queue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLock\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e::\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// disable interrupts: otherwise multiple threads\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// could come and lock the mutex (such as between\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// the locked check and lock =1\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eIntrGuard\u003c/span\u003e \u003cspan style=\"color:#111\"\u003egrd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e!\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if our thread is not locked, just lock it\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if our thread is locked, we need to prevent our current\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// thread from going to the ready queue, and push it to the current thread\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eblocked_queue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epush\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eCURRENT_THREAD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// remember this isn\u0026#39;t an issue even if IntrGuard\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// didn\u0026#39;t yet go out of scope; because it will either\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// land on a context_switch which will enable interrupts for you\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// or land on the beginning of a threadfunc helper, which\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// is also going to enable interrupts for you\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// nicely, the interrupts are here are *off* as required because switching\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// to another thread always will result in reenabling (either by new thread,\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// by timer handler, or by IntrGuard)\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emark_block_and_call_schedule\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eCURRENT_THREAD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLock\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e::\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eUnlock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// disable interrupts: otherwise multiple threads\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// could come and lock the mutex (such as between\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// the locked check and lock =1\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eIntrGuard\u003c/span\u003e \u003cspan style=\"color:#111\"\u003egrd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if our thread is locked and nobody is waiting for it\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eq\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eempty\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eunblock_thread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eq\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e());\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// we do not switch to the unblocked thread, just add it to the\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// ready queue. we are entrusting the scheduler to start this thread\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// whenever we feel right\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"intrguard\"\u003eIntrGuard\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#intrguard\"\u003eIntrGuard\u003c/a\u003e will turn off interrupts for the duration of its scope; when it goes out of scope, it will \u003cstrong\u003erestore the state of the interrupt before\u003c/strong\u003e (whether on or off). So, implementing the \u003ca href=\"#mutex\"\u003emutex\u003c/a\u003e code above \u003cstrong\u003ewithout\u003c/strong\u003e InterGuard:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-c++\" data-lang=\"c++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eQueue\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eblocked_queue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLock\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e::\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// disable interrupts: otherwise multiple threads\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// could come and lock the mutex (such as between\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// the locked check and lock =1\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ebool\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einterrupsEnabled\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eintr_enabled\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// only disable interrupts if they are currently\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// on\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einterrupsEnabled\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eintr_enable\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efalse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e!\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if our thread is not locked, just lock it\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if our thread is locked, we need to prevent our current\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// thread from going to the ready queue, and push it to the current thread\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eblocked_queue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epush\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eCURRENT_THREAD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003emark_block_and_call_schedule\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eCURRENT_THREAD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if interrupts was on, turn them on again.\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// otherwise, do nothing\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einterrupsEnabled\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eintr_enable\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLock\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e::\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eUnlock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// disable interrupts: otherwise multiple threads\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// could come and lock the mutex (such as between\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// the locked check and lock =1\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ebool\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einterrupsEnabled\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eintr_enabled\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// only disable interrupts if they are currently\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// on\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einterrupsEnabled\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eintr_enable\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efalse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if our thread is locked and nobody is waiting for it\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eq\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eempty\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eunblock_thread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eq\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e());\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// we do not switch to the unblocked thread, just add it to the\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// ready queue. we are entrusting the scheduler to start this thread\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// whenever we feel right\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einterrupsEnabled\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eintr_enable\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhmultithreading/","tags":null,"title":"multithreading"},{"categories":null,"contents":"mutual information a measure of the dependence of two random variables in information theory. Applications include collocation extraction, which would require finding how two words co-occur (which means one would contribute much less entropy than the other.)\nconstituents \\(X, Y\\) random variables \\(D_{KL}\\) KL Divergence function \\(P_{(X,Y)}\\) the joint distribution of \\(X,Y\\) \\(P_{X}, P_{Y}\\) the marginal distributions of \\(X,Y\\) requirements mutual information is defined as\n\\begin{equation} I(X ; Y) = D_{KL}(P_{ (X, Y) } | P_{X} \\otimes P_{Y}) \\end{equation}\n\u0026ldquo;mutual information between \\(X\\) and \\(Y\\) is the additional information contributed by the \u0026quot;\nadditional information ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhmutual_information/\"\u003emutual information\u003c/a\u003e a measure of the dependence of two \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es in \u003ca href=\"/posts/kbhinformation_theory/\"\u003einformation theory\u003c/a\u003e. Applications include \u003ca href=\"/posts/kbhcollocation_extractio/\"\u003ecollocation extraction\u003c/a\u003e, which would require finding how two words co-occur (which means one would contribute much less entropy than the other.)\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X, Y\\) \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003e\\(D_{KL}\\) \u003ca href=\"/posts/kbhkl_divergence/\"\u003eKL Divergence function\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(P_{(X,Y)}\\) the joint distribution of \\(X,Y\\)\u003c/li\u003e\n\u003cli\u003e\\(P_{X}, P_{Y}\\) the marginal distributions of \\(X,Y\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhmutual_information/\"\u003emutual information\u003c/a\u003e is defined as\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI(X ; Y) = D_{KL}(P_{ (X, Y) } | P_{X} \\otimes P_{Y})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;\u003ca href=\"/posts/kbhmutual_information/\"\u003emutual information\u003c/a\u003e between \\(X\\) and \\(Y\\) is the additional information contributed by the \u0026quot;\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-03-05_23-07-58_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmutual_information/","tags":null,"title":"mutual information"},{"categories":null,"contents":"probability of \u0026ldquo;or\u0026rdquo;\nIf its not possible for two events to happen at the same time, they are called mutually exclusive:\n\\begin{equation} P(E\\ or\\ F) = P(E)+P(F) - P(E \\cap F) \\end{equation}\nThis is called the inclusion exclusion principle. This is what motivates inclusion exclusion counting.\nGeneral inclusion exclusion principle Its scary. Think about this:\nWe basically need to alternate between adding and subtracting. (i.e.: in our case here, we add all the odd-group pairs (for P(x) and P(xyz)), we subtract the even-number pairs (for p(xy))).\nAnd so:\n\\begin{equation} P(E_1\\ or\\ \\dots\\ or\\ E_{n}) = \\sum_{r=1}^{n} (-1)^{r+1} Y_{r} \\end{equation}\nwhereby, \\(Y_{j}\\) is the sum of \\(P(x_n, \u0026hellip; x_{j})\\) for combinations of \\(j\\) events.\nTry not to do this.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of \u0026ldquo;or\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eIf its not possible for two events to happen at the same time, they are called \u003ca href=\"/posts/kbhmutually_exclusive/\"\u003emutually exclusive\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(E\\ or\\ F) = P(E)+P(F) - P(E \\cap F)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is called the \u003ca href=\"/posts/kbhmutually_exclusive/\"\u003einclusion exclusion principle\u003c/a\u003e. This is what motivates \u003ca href=\"/posts/kbhsum_rule_of_counting/\"\u003einclusion exclusion counting\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"general-inclusion-exclusion-principle--kbhmutually-exclusive-dot-md\"\u003eGeneral \u003ca href=\"/posts/kbhmutually_exclusive/\"\u003einclusion exclusion principle\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eIts scary. Think about this:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-06_15-56-50_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWe basically need to alternate between adding and subtracting. (i.e.: in our case here, we add all the odd-group pairs (for P(x) and P(xyz)), we subtract the even-number pairs (for p(xy))).\u003c/p\u003e\n\u003cp\u003eAnd so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(E_1\\ or\\ \\dots\\ or\\ E_{n}) = \\sum_{r=1}^{n} (-1)^{r+1} Y_{r}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby, \\(Y_{j}\\) is the sum of \\(P(x_n, \u0026hellip; x_{j})\\) for \u003ca href=\"/posts/kbhcombination/\"\u003ecombination\u003c/a\u003es of \\(j\\) events.\u003c/p\u003e\n\u003cp\u003eTry not to do this.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhmutually_exclusive/","tags":null,"title":"mutually exclusive"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhmy_day/","tags":null,"title":"My Day"},{"categories":null,"contents":"Main goals: assign a probability of each sequence of words existing:\n\\begin{equation} P(W) = P(w_1, \\dots, w_{n}) \\end{equation}\nclosely related is the NLG formulation of predicting an upcoming word:\n\\begin{equation} P(w_5|w_1, \\dots, w_{n}) \\end{equation}\neither of these we call a \u0026ldquo;grammar\u0026rdquo;, or \u0026ldquo;Language Model\u0026rdquo;.\nChain Rule Language Modeling Recall probability chain rule. Now, the probability of a sequence like:\n\\begin{equation} P(its\\ water\\ is\\ so\\ transparent) \\end{equation}\ngives:\n\\begin{equation} P(its) \\times P(water|its) \\times P(is | its\\ water) \\dots \\end{equation}\nThat is:\n\\begin{equation} P(w_1 \\dots w_{n}) = \\prod_{i}P(w_{i} | w_1 \\dots w_{i-1}) \\end{equation}\nMarkov Assumption Because we can\u0026rsquo;t make conditional counts over all words all the time, we make an assumption: the probability of the current word is the probability of the current word conditioned on the probability of the last \\(k\\) words.\n\\begin{equation} P(w_1, \\dots, w_{n}) \\approx \\prod_{i}P(w_{i} | w_{i-k} \\dots w_{i-1}) \\end{equation}\nUnigrams The simplest Markov Assumption is unigrams, which will be word salad generation because it has no understanding of language structure.\nNaive Bays Language Modeling You can consider each class in Naive Bayes \\(P(word | c)\\) as a language model.\nSo:\n\\begin{equation} P(sentence|c) = \\prox_{i}P(word_{i}|c) \\end{equation}\nEach class is a separate class-conditioned language model. So, we just want to compute the probability of each sentence, and classify the sentence based on the higher probability result.\nLimitations In general, n gram models are limited because they don\u0026rsquo;t consider long distance dependencies which are present in English.\nEstimating N-Grams Many counts are results of\u0026hellip;\nworld (\u0026ldquo;people want chinese food more often, so want+Chinese appears more\u0026rdquo;) grammar (\u0026ldquo;want+want is less likely\u0026rdquo;) MLE \\begin{equation} P(w_{i} | w_{i-1}) = \\frac{C(w_{i-1}, w_{i})}{C(w_{i-1})} \\end{equation}\nMAP, i.e. Laplace Smoothing \\begin{equation} P(w_{i} | w_{i-1}) = \\frac{C(w_{i-1}, w_{i})+1}{C(w_{i-1})+V} \\end{equation}\nwe have to add \\(V\\) on the denominator because every word could possibly follow \\(w_{i-1}\\). Note that as \\(N\\) increases we actually still add \\(V\\) because we are predicting at each time a single word (just conditioned on more words), so if we are smoothing output we are only adding \\(V\\) extra counts.\nIMPORTANT NOTE THOUGH: this is typically not used for N-Grams (because there are simply so many OOS sequences). Instead, its more frequently used in other cases such as Naive Bayes for Text Classification.\nLog Probs In practice, we keep probability as log probabilities after we computed them.\nN-Gram Models Google n-gram models, SRILM\nBackoff Use trigrams if high probability evidence is found, otherwise bigrams or unigrams\nStupid Backoff give the MLE if the conditioning sequence has a non-zero count otherwise, start backing off, recursively calculating the probability of the current word given the last n-1-gram, multplied by a discount factor if we end up with a unigram, just give the unigram probability This DOES NOT PRODUCE A PROBABILITY as it is not normalized. Instead of being probabilites, we consider them \u0026ldquo;scores\u0026rdquo;.\nInterpolation In practice, Interpolation works better. Interpolation smoothes the probability between unigram, bigram, and trigrams.\nMostly simply, we mix them with some factors \\(\\lambda_{1}, \\lambda_{2}, \\lambda_{3}\\), where \\(\\sum_{i} \\lambda_{i} = 1\\). This makes a weighted average over probabilities:\n\\begin{equation} P(comb) = \\lambda_{1} P(uni) + \\lambda_{2} P(bi)+ \\lambda_{3} P(tri) \\end{equation}\nlambdas could also be a function of the previous tokens.\nWe sometimes obtain this with a disjoint dataset from the original training set, whereby we train some ngrams from the original dataset, and then identify \\(\\lambda\\) which maximises the probabilities.\nOOV Words we sometimes replace the lowest likelyhood few words with \u0026lt;UNK\u0026gt;, and train models such that we can have an open vocabulary: whenever we encounter unknown words, we replace it with \u0026lt;UNK\u0026gt;\nScaling Up Strategies to make LMing with Ngrams more efficient\npruning: only store ngrams of top k use tries (suffix trees, etc.) approximations: bloom filter storing indicies ","html":"\u003cp\u003eMain goals: assign a probability of each sequence of words existing:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(W) = P(w_1, \\dots, w_{n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eclosely related is the NLG formulation of predicting an upcoming word:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(w_5|w_1, \\dots, w_{n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eeither of these we call a \u0026ldquo;grammar\u0026rdquo;, or \u0026ldquo;\u003ca href=\"/posts/kbhnlp/#language-model\"\u003eLanguage Model\u003c/a\u003e\u0026rdquo;.\u003c/p\u003e\n\u003ch2 id=\"chain-rule-language-modeling\"\u003eChain Rule Language Modeling\u003c/h2\u003e\n\u003cp\u003eRecall \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003eprobability chain rule\u003c/a\u003e. Now, the probability of a sequence like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(its\\ water\\ is\\ so\\ transparent)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(its) \\times P(water|its) \\times P(is | its\\ water) \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(w_1 \\dots w_{n}) = \\prod_{i}P(w_{i} | w_1 \\dots w_{i-1})\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"markov-assumption\"\u003eMarkov Assumption\u003c/h2\u003e\n\u003cp\u003eBecause we can\u0026rsquo;t make conditional counts over all words all the time, we make an assumption: the probability of the current word is the probability of the current word conditioned on the probability of the last \\(k\\) words.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(w_1, \\dots, w_{n}) \\approx \\prod_{i}P(w_{i} | w_{i-k} \\dots w_{i-1})\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"unigrams\"\u003eUnigrams\u003c/h2\u003e\n\u003cp\u003eThe simplest \u003ca href=\"#markov-assumption\"\u003eMarkov Assumption\u003c/a\u003e is unigrams, which will be word salad generation because it has no understanding of language structure.\u003c/p\u003e\n\u003ch3 id=\"naive-bays-language-modeling\"\u003eNaive Bays Language Modeling\u003c/h3\u003e\n\u003cp\u003eYou can consider each class in Naive Bayes \\(P(word | c)\\) as a language model.\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(sentence|c) = \\prox_{i}P(word_{i}|c)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eEach class is a separate class-conditioned language model. So, we just want to compute the probability of each sentence, and classify the sentence based on the higher probability result.\u003c/p\u003e\n\u003ch2 id=\"limitations\"\u003eLimitations\u003c/h2\u003e\n\u003cp\u003eIn general, n gram models are limited because they don\u0026rsquo;t consider long distance dependencies which are present in English.\u003c/p\u003e\n\u003ch2 id=\"estimating-n-grams--kbhn-grams-dot-md\"\u003eEstimating \u003ca href=\"/posts/kbhn_grams/\"\u003eN-Grams\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eMany counts are results of\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eworld (\u0026ldquo;people want chinese food more often, so want+Chinese appears more\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003egrammar (\u0026ldquo;want+want is less likely\u0026rdquo;)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"mle--kbhmaximum-likelihood-parameter-learning-dot-md\"\u003e\u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nP(w_{i} | w_{i-1}) = \\frac{C(w_{i-1}, w_{i})}{C(w_{i-1})}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"map-i-dot-e-dot-laplace-smoothing--kbhbaysian-parameter-learning-dot-md\"\u003eMAP, i.e. \u003ca href=\"/posts/kbhbaysian_parameter_learning/#laplace-smoothing\"\u003eLaplace Smoothing\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nP(w_{i} | w_{i-1}) = \\frac{C(w_{i-1}, w_{i})+1}{C(w_{i-1})+V}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe have to add \\(V\\) on the denominator because every word could possibly follow \\(w_{i-1}\\). Note that as \\(N\\) increases we actually still add \\(V\\) because we are predicting at each time a \u003cstrong\u003esingle word\u003c/strong\u003e (just conditioned on more words), so if we are smoothing output we are only adding \\(V\\) extra counts.\u003c/p\u003e\n\u003cp\u003eIMPORTANT NOTE THOUGH: this is typically not used for \u003ca href=\"/posts/kbhn_grams/\"\u003eN-Grams\u003c/a\u003e (because there are simply so many OOS sequences). Instead, its more frequently used in other cases such as \u003ca href=\"/posts/kbhbag_of_words/#naive-bayes-for-text-classification\"\u003eNaive Bayes for Text Classification\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"log-probs\"\u003eLog Probs\u003c/h3\u003e\n\u003cp\u003eIn practice, we keep probability as log probabilities after we computed them.\u003c/p\u003e\n\u003ch3 id=\"n-gram-models\"\u003eN-Gram Models\u003c/h3\u003e\n\u003cp\u003eGoogle n-gram models, SRILM\u003c/p\u003e\n\u003ch3 id=\"backoff\"\u003eBackoff\u003c/h3\u003e\n\u003cp\u003eUse trigrams if high probability evidence is found, otherwise bigrams or unigrams\u003c/p\u003e\n\u003ch4 id=\"stupid-backoff\"\u003eStupid Backoff\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003egive the \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e if the conditioning sequence has a non-zero count\u003c/li\u003e\n\u003cli\u003eotherwise, start backing off, recursively calculating the probability of the current word given the last n-1-gram, multplied by a discount factor\u003c/li\u003e\n\u003cli\u003eif we end up with a unigram, just give the unigram probability\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis \u003cstrong\u003eDOES NOT PRODUCE A PROBABILITY\u003c/strong\u003e as it is not normalized. Instead of being probabilites, we consider them \u0026ldquo;scores\u0026rdquo;.\u003c/p\u003e\n\u003ch3 id=\"interpolation\"\u003eInterpolation\u003c/h3\u003e\n\u003cp\u003eIn practice, \u003ca href=\"#interpolation\"\u003eInterpolation\u003c/a\u003e works better. \u003ca href=\"#interpolation\"\u003eInterpolation\u003c/a\u003e smoothes the probability between unigram, bigram, and trigrams.\u003c/p\u003e\n\u003cp\u003eMostly simply, we mix them with some factors \\(\\lambda_{1}, \\lambda_{2}, \\lambda_{3}\\), where \\(\\sum_{i} \\lambda_{i} = 1\\). This makes a weighted average over probabilities:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(comb) = \\lambda_{1} P(uni) + \\lambda_{2} P(bi)+ \\lambda_{3} P(tri)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003elambdas could also be a function of the previous tokens.\u003c/p\u003e\n\u003cp\u003eWe sometimes obtain this with a disjoint dataset from the original training set, whereby we train some ngrams from the original dataset, and then identify \\(\\lambda\\) which maximises the probabilities.\u003c/p\u003e\n\u003ch3 id=\"oov-words\"\u003eOOV Words\u003c/h3\u003e\n\u003cp\u003ewe sometimes replace the lowest likelyhood few words with \u003ccode\u003e\u0026lt;UNK\u0026gt;\u003c/code\u003e, and train models such that we can have an \u003ca href=\"#oov-words\"\u003eopen vocabulary\u003c/a\u003e: whenever we encounter unknown words, we replace it with \u003ccode\u003e\u0026lt;UNK\u0026gt;\u003c/code\u003e\u003c/p\u003e\n\u003ch2 id=\"scaling-up\"\u003eScaling Up\u003c/h2\u003e\n\u003cp\u003eStrategies to make LMing with Ngrams more efficient\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003epruning: only store ngrams of top k\u003c/li\u003e\n\u003cli\u003euse tries (suffix trees, etc.)\u003c/li\u003e\n\u003cli\u003eapproximations: bloom filter\u003c/li\u003e\n\u003cli\u003estoring indicies\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhn_grams/","tags":null,"title":"N-Grams"},{"categories":null,"contents":"NACC is a large, longitudinal dataset for neurodegentitive disease as a project in collaboration with Dr. Alyssa Weakley at UC Davis.\nDr. Alyssa Weakley is interested in\nEarly Cognitive Change Mild Cognitive Impairment (MCI) \u0026ldquo;How early can we detect, using NACC, change?\u0026rdquo;\ndataset construction Participants are given a battery of mental capacity tests, these values are tracked over time There are also family member questionnaire Neuroimaging and biomarker data Other things tracked in the data\u0026mdash;\nAmyloid levels of spinal fluid Detecting even earlier focus good to focus on specifically alzheimer\u0026rsquo;s type dementia (so, ignore things on lewy body disease) Using clinical diagnosis as the dependent variable, but good to see the autopsy results Items 3 and 7 are independent codes; if alzhimer\u0026rsquo;s is measured, MCI is not measured. visa versa.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhnacc/\"\u003eNACC\u003c/a\u003e is a large, longitudinal dataset for neurodegentitive disease as a project in collaboration with \u003ca href=\"\"\u003eDr. Alyssa Weakley\u003c/a\u003e at \u003ca href=\"\"\u003eUC Davis\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"\"\u003eDr. Alyssa Weakley\u003c/a\u003e is interested in\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"\"\u003eEarly Cognitive Change\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eMild Cognitive Impairment (MCI)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;How early can we detect, using \u003ca href=\"/posts/kbhnacc/\"\u003eNACC\u003c/a\u003e, change?\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"dataset-construction\"\u003edataset construction\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eParticipants are given a battery of mental capacity tests, these values are tracked over time\u003c/li\u003e\n\u003cli\u003eThere are also family member questionnaire\u003c/li\u003e\n\u003cli\u003eNeuroimaging and biomarker data\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eOther things tracked in the data\u0026mdash;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eAmyloid levels of spinal fluid\u003c/li\u003e\n\u003cli\u003eDetecting even earlier\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"focus\"\u003efocus\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003egood to focus on specifically \u003cem\u003ealzheimer\u0026rsquo;s type dementia\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003e(so, ignore things on lewy body disease)\u003c/li\u003e\n\u003cli\u003eUsing clinical diagnosis as the dependent variable, but good to see the autopsy results\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eItems 3 and 7 are independent codes; if alzhimer\u0026rsquo;s is measured, MCI is not measured. visa versa.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnacc/","tags":null,"title":"NACC"},{"categories":null,"contents":"Naive Bayes is a special class of Baysian Network inference problem which follows a specific structure used to solve classification problems.\nThe Naive Bayes classifier is a Baysian Network of the shape:\n(Why is this backwards(ish)? Though we typically think about models as a function M(obs) = cls, the real world is almost kind of opposite; it kinda works like World(thing happening) = things we observe. Therefore, the observations are a RESULT of the class happening.)\nWe consider, naively, \\(o_{1:n}\\) are all conditionally independent on \\(c\\). From this graph, we can therefore use the probability chain rule + conditional probability to write that:\n\\begin{equation} P(c, o_{1:n}) = P( c) \\prod_{i=1}^{n} P(o_{i} | c) \\end{equation}\nso, to actually compute this, we don\u0026rsquo;t want to bother going over all the multiplications because of underflow, we write:\n\\begin{equation} \\hat{y} = \\arg\\max_{y} \\log \\hat{P}(y) + \\sum_{i=1}^{m} \\log \\hat{P}(x|y) \\end{equation}\nbrute-force Bayes classifier \\begin{equation} \\arg\\max_{y} P(y|x) = \\arg\\max_{y} \\frac{P(x|y)P(y)}{P(x)} \\end{equation}\nbut because we are taking argmax, we can not normalize:\n\\begin{equation} \\arg\\max_{y} P(y|x) = \\arg\\max_{y} P(x|y)P(y) \\end{equation}\nthis only works if \\(x\\) is a single value (i.e. you have a one-feature classifier\nThis system has 6 parameters; they can be MLE for Bernouli from data, but you can also use Baysian Parameter Learning Method\ny = 0 y = 1 x1 = 0 theta0 theta2 x1 = 1 theta1 theta3 y = 0 y = 0 theta4 y = 1 theta5 (=1-theta4) to perform estiimation with MAP\n\\begin{equation} p(X=1| Y=0) = \\frac{\\text{examples where X=1, Y=0}}{\\text{examples where Y=0}} \\end{equation}\nwhith MLE with a Laplace prior:\n\\begin{equation} p(X=1| Y=0) = \\frac{\\text{(examples where X=1, Y=0)}+1}{\\text{(examples where Y=0)}+\\text{(nclass = 2)}} \\end{equation}\nWe can keep going; for instance, if you wave \\(x_1, x_2\\) two diffferent features:\n\\begin{equation} \\arg\\max_{y} P(y|x) = \\arg\\max_{y} P(x_1, x_2|y)P(y) \\end{equation}\nbut this requires us to have \\(2^{2}\\) and ultimately \\(2^{n}\\) parameters, which is exponential blowup. Hence, we need to treat the variables as\u0026mdash;naivly\u0026mdash;independent so we can multiply them. Hence:\nNaive Bayes assumption we assume independence between the input features. That is, we assume:\n\\begin{equation} P(x_1, \\dots, x_{n}|y) = \\prod_{i=1}^{n} P(X_{i}|y) \\end{equation}\ninference with Naive Bayes Recall the definition of inference, for our case here:\ngiven observations \\(o_{1:n}\\), we desire to know what\u0026rsquo;s the probability of \\(c\\) happening. That is, from conditional probability:\n\\begin{equation} P(c | o_{1:n}) = \\frac{P(c, o_{1:n})}{P(o_{1:n})} \\end{equation}\nNow, from above we have \\(P(c, o_{1:n})\\) already. To get the denominator, we invoke law of total probability to add up the probability of all observations occurring given all classes. That is:\n\\begin{equation} P(o_{1:n}) = \\sum_{c \\in C} P(c, o_{1:n}) \\end{equation}\nYou will note that this value \\(P(o_{1:n})\\) is actually constant as long as the network structure does not change. Therefore, we tend to write:\n\\begin{align} P(c | o_{1:n}) \u0026amp;= \\frac{P(c, o_{1:n})}{P(o_{1:n})} \\\\ \u0026amp;= \\kappa P(c, o_{1:n}) \\end{align}\nor, that:\n\\begin{equation} P(c|o_{1:n}) \\propto P(c, o_{1:n}) \\end{equation}\n\u0026ldquo;the probability of a class occurring given the inputs is proportional to the probability of that class occurring along with the inputs\u0026rdquo;\nMultiple believes \\begin{equation} P(A=a | R_1) \\propto P(R_1 | A=a) \\cdot P(A=a) \\end{equation}\nBut now\nMotivation: Bayes rule This will give us:\nHowever, what if we don\u0026rsquo;t want to use the law of total probability to add up \\(P(FB\u0026rsquo;)\\)?\nWe can actually write a relation that essentially reminds us that the fact that \\(P(FB\u0026rsquo;)\\) as not dependent on \\(TSF\\), so we can write:\n\\begin{equation} P(TSF^{1}|FB^{1}) \\porpto P(TSF^{1})P(FB^{1} | TSF^{1}) \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e is a special class of \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e \u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e problem which follows a specific structure used to solve classification problems.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e classifier is a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e of the shape:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-03_13-15-54_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e(Why is this backwards(ish)? Though we typically think about models as a function M(obs) = cls, the real world is almost kind of opposite; it kinda works like World(thing happening) = things we observe. Therefore, the observations are a RESULT of the class happening.)\u003c/p\u003e\n\u003cp\u003eWe consider, \u003cstrong\u003enaively\u003c/strong\u003e, \\(o_{1:n}\\) are all \u003ca href=\"/posts/kbhbaysian_network/#conditional-independence\"\u003econditionally independent\u003c/a\u003e on \\(c\\). From this graph, we can therefore use the \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003eprobability chain rule\u003c/a\u003e + \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e to write that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(c, o_{1:n}) = P( c) \\prod_{i=1}^{n} P(o_{i} | c)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso, to actually compute this, we don\u0026rsquo;t want to bother going over all the multiplications because of underflow, we write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{y} = \\arg\\max_{y} \\log \\hat{P}(y) + \\sum_{i=1}^{m} \\log \\hat{P}(x|y)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"brute-force-bayes-classifier\"\u003ebrute-force Bayes classifier\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\arg\\max_{y} P(y|x) = \\arg\\max_{y} \\frac{P(x|y)P(y)}{P(x)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebut because we are taking argmax, we can not normalize:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\arg\\max_{y} P(y|x) = \\arg\\max_{y} P(x|y)P(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis only works if \\(x\\) is a single \u003cstrong\u003evalue\u003c/strong\u003e (i.e. you have a one-feature classifier\u003c/p\u003e\n\u003cp\u003eThis system has 6 parameters; they can be \u003ca href=\"/posts/kbhbernoulli_random_variable/#mle-for-bernouli\"\u003eMLE for Bernouli\u003c/a\u003e from data, but you can also use \u003ca href=\"/posts/kbhmodel_based_reinforcement_learning/#method\"\u003eBaysian Parameter Learning Method\u003c/a\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003ey = 0\u003c/th\u003e\n\u003cth\u003ey = 1\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003ex1 = 0\u003c/td\u003e\n\u003ctd\u003etheta0\u003c/td\u003e\n\u003ctd\u003etheta2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ex1 = 1\u003c/td\u003e\n\u003ctd\u003etheta1\u003c/td\u003e\n\u003ctd\u003etheta3\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003ey = 0\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003ey = 0\u003c/td\u003e\n\u003ctd\u003etheta4\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ey = 1\u003c/td\u003e\n\u003ctd\u003etheta5 (=1-theta4)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eto perform estiimation with MAP\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(X=1| Y=0) = \\frac{\\text{examples where X=1, Y=0}}{\\text{examples where Y=0}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhith MLE with a \u003ca href=\"/posts/kbhmaximum_a_posteriori_estimate/#map-for-bernoulli-and-binomial-p\"\u003eLaplace prior\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(X=1| Y=0) = \\frac{\\text{(examples where X=1, Y=0)}+1}{\\text{(examples where Y=0)}+\\text{(nclass = 2)}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can keep going; for instance, if you wave \\(x_1, x_2\\) two diffferent features:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\arg\\max_{y} P(y|x) = \\arg\\max_{y} P(x_1, x_2|y)P(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebut this requires us to have \\(2^{2}\\) and ultimately \\(2^{n}\\) parameters, which is exponential blowup. Hence, we need to treat the variables as\u0026mdash;naivly\u0026mdash;independent so we can multiply them. Hence:\u003c/p\u003e\n\u003ch2 id=\"naive-bayes--kbhnaive-bayes-dot-md--assumption\"\u003e\u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e assumption\u003c/h2\u003e\n\u003cp\u003ewe assume \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependence\u003c/a\u003e between the input features. That is, we assume:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(x_1, \\dots, x_{n}|y) = \\prod_{i=1}^{n} P(X_{i}|y)\n\\end{equation}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-17_16-35-58_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"inference--kbhinference-dot-md--with-naive-bayes--kbhnaive-bayes-dot-md\"\u003e\u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e with \u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eRecall the definition of \u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e, for our case here:\u003c/p\u003e\n\u003cp\u003egiven observations \\(o_{1:n}\\), we desire to know what\u0026rsquo;s the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of \\(c\\) happening. That is, from \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(c | o_{1:n}) = \\frac{P(c, o_{1:n})}{P(o_{1:n})}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, from above we have \\(P(c, o_{1:n})\\) already. To get the denominator, we invoke \u003ca href=\"/posts/kbhprobability/#law-of-total-probability\"\u003elaw of total probability\u003c/a\u003e to add up the probability of all observations occurring given all classes. That is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(o_{1:n}) = \\sum_{c \\in C} P(c, o_{1:n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will note that this value \\(P(o_{1:n})\\) is actually constant as long as the network structure does not change. Therefore, we tend to write:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nP(c | o_{1:n}) \u0026amp;= \\frac{P(c, o_{1:n})}{P(o_{1:n})} \\\\\n\u0026amp;= \\kappa P(c, o_{1:n})\n\\end{align}\u003c/p\u003e\n\u003cp\u003eor, that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(c|o_{1:n}) \\propto P(c, o_{1:n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the probability of a class occurring given the inputs is proportional to the probability of that class occurring along with the inputs\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"multiple-believes\"\u003eMultiple believes\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nP(A=a | R_1) \\propto P(R_1 | A=a) \\cdot P(A=a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBut now\u003c/p\u003e\n\u003ch2 id=\"motivation-bayes-rule--kbhbayes-theorem-dot-md\"\u003eMotivation: \u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes rule\u003c/a\u003e\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-05_09-14-17_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis will give us:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-05_09-14-32_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eHowever, what if we don\u0026rsquo;t want to use the \u003ca href=\"/posts/kbhprobability/#law-of-total-probability\"\u003elaw of total probability\u003c/a\u003e to add up \\(P(FB\u0026rsquo;)\\)?\u003c/p\u003e\n\u003cp\u003eWe can actually write a relation that essentially reminds us that the fact that \\(P(FB\u0026rsquo;)\\) as not dependent on \\(TSF\\), so we can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(TSF^{1}|FB^{1}) \\porpto P(TSF^{1})P(FB^{1} | TSF^{1})\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnaive_bayes/","tags":null,"title":"Naive Bayes"},{"categories":null,"contents":"The National Banking Act unified Financial Markets.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhnational_banking_act/\"\u003eNational Banking Act\u003c/a\u003e unified \u003ca href=\"/posts/kbhfinancial_markets_intro/\"\u003eFinancial Markets\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnational_banking_act/","tags":null,"title":"National Banking Act"},{"categories":null,"contents":"natural numbers (\\(\\mathbb{N}\\)) are the counting numbers: 1,2,3,4\u0026hellip;.\nZero is not part of it; this produces interesting results like set of natural number under addition is not a group because there is no identity (tbh nor inverse (inverse of 1 is -1 which is not in the set.))\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhnatural_numbers/\"\u003enatural number\u003c/a\u003es (\\(\\mathbb{N}\\)) are the counting numbers: 1,2,3,4\u0026hellip;.\u003c/p\u003e\n\u003cp\u003eZero is not part of it; this produces interesting results like set of \u003ca href=\"/posts/kbhnatural_numbers/\"\u003enatural number\u003c/a\u003e under \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e is not a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e because there is no \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e (tbh nor \u003ca href=\"/posts/kbhinverses/\"\u003einverse\u003c/a\u003e (inverse of 1 is -1 which is not in the set.))\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnatural_numbers/","tags":null,"title":"natural number"},{"categories":null,"contents":"The nsm theory is a theory that\u0026hellip;\nclaims that there exists a set of semantic primes and logic universal across languages which is indefinable by other words within the language which, as a corollary, resolves the epistemological problem that if all words are defined by other words in the language there will (why?) be no connection to the real world the theory of NSM rests on\u0026hellip;\ntwo pillars of NSM theory existence of semantic primes The existence of semantic primes is codified more formally as the strong version of the Lexicalization Hypothesis.\nIssues with it: problems with semantic primes\nthe ability to perform the act of reductive paraphrase Issues with that: problems with reductive paraphrasing\noh cool! (Bohnemeyer 2004)\nAlso the fact that NSM is first found in English means that there is a certain anglo-centrism that comes with the language.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhnatural_semantic_metalanguage/\"\u003ensm\u003c/a\u003e theory is a theory that\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eclaims that there exists a set of semantic primes and logic universal across languages which is indefinable by other words within the language\u003c/li\u003e\n\u003cli\u003ewhich, as a corollary, resolves the epistemological problem that if all words are defined by other words in the language there will (why?) be no connection to the real world\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ethe theory of \u003ca href=\"/posts/kbhnatural_semantic_metalanguage/\"\u003eNSM\u003c/a\u003e rests on\u0026hellip;\u003c/p\u003e\n\u003ch2 id=\"two-pillars-of-nsm--kbhnatural-semantic-metalanguage-dot-md--theory\"\u003etwo pillars of \u003ca href=\"/posts/kbhnatural_semantic_metalanguage/\"\u003eNSM\u003c/a\u003e theory\u003c/h2\u003e\n\u003ch3 id=\"existence-of-semantic-primes--kbhsemantic-primes-dot-md\"\u003eexistence of \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic primes\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eThe existence of \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es is codified more formally as the strong version of the \u003ca href=\"/posts/kbhlexicalization_hypothesis/\"\u003eLexicalization Hypothesis\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIssues with it: \u003ca href=\"/posts/kbhsemantic_primes/#problems-with-id-4e587814-bfd1-458e-ba5f-67882832107b-semantic-prime-s\"\u003eproblems with semantic primes\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"the-ability-to-perform-the-act-of-reductive-paraphrase--kbhreductive-paraphrase-dot-md\"\u003ethe ability to perform the act of \u003ca href=\"/posts/kbhreductive_paraphrase/\"\u003ereductive paraphrase\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eIssues with that: \u003ca href=\"/posts/kbhreductive_paraphrase/#problems-with-reductive-paraphrasing\"\u003eproblems with reductive paraphrasing\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"oh-cool\"\u003eoh cool!\u003c/h2\u003e\n\u003cp\u003e(\u003ca href=\"#citeproc_bib_item_1\"\u003eBohnemeyer 2004\u003c/a\u003e)\u003c/p\u003e\n\u003cp\u003eAlso the fact that \u003ca href=\"/posts/kbhnatural_semantic_metalanguage/\"\u003eNSM\u003c/a\u003e is first found in English means that there is a certain anglo-centrism that comes with the language.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnatural_semantic_metalanguage/","tags":null,"title":"Natural Semantic Metalanguage"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhnatural_transformations/","tags":null,"title":"natural transformation"},{"categories":null,"contents":"NBBO is the composite best bid/ask nationally, across all of the exchanges.\nIt allows the average person on the street to get a price for the asset. The government system is actually SLOWER from the fastest exchange: you can know, within microseconds, the difference.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhnbbo/\"\u003eNBBO\u003c/a\u003e is the composite best bid/ask nationally, across all of the exchanges.\u003c/p\u003e\n\u003cp\u003eIt allows the average person on the street to get a price for the asset. The government system is actually SLOWER from the fastest exchange: you can know, within microseconds, the difference.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnbbo/","tags":null,"title":"NBBO"},{"categories":null,"contents":"Framework for subsurface exploration: a DARPA challenge that explores unknown underground environments.\nMain Problem: there is a high degree of uncertainty that comes from multiple different systems interacting:\nsensing environment command execution communication mission state health of systems and subsystems NeBula treats uncertainty between systems via a POMDP:\nconstruct a simulation of the tasks to coordinate robots solved using Double Progressive Widening AISR NeBula NeBula autonomy framework extrapolation on an active source seeking. For instance, combining with semantic understanding, we want to \u0026ldquo;find the red backpack\u0026rdquo;.\nmulti-model semantic understanding learning based mobility (vis a vi NeBula) semantic aware source seeking (\u0026ldquo;finding the thing there\u0026rdquo;) ","html":"\u003cp\u003eFramework for subsurface exploration: a DARPA challenge that explores unknown underground environments.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMain Problem\u003c/strong\u003e\u003c/strong\u003e: there is a high degree of uncertainty that comes from multiple different systems interacting:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003esensing\u003c/li\u003e\n\u003cli\u003eenvironment\u003c/li\u003e\n\u003cli\u003ecommand execution\u003c/li\u003e\n\u003cli\u003ecommunication\u003c/li\u003e\n\u003cli\u003emission state\u003c/li\u003e\n\u003cli\u003ehealth of systems and subsystems\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhnebula/\"\u003eNeBula\u003c/a\u003e treats uncertainty between systems via a POMDP:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003econstruct a simulation of the tasks to coordinate robots\u003c/li\u003e\n\u003cli\u003esolved using \u003ca href=\"/posts/kbhdouble_progressive_widening/\"\u003eDouble Progressive Widening\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"aisr-nebula--kbhnebula-dot-md\"\u003eAISR \u003ca href=\"/posts/kbhnebula/\"\u003eNeBula\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhnebula/\"\u003eNeBula\u003c/a\u003e autonomy framework extrapolation on an active source seeking. For instance, combining with semantic understanding, we want to \u0026ldquo;find the red backpack\u0026rdquo;.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003emulti-model semantic understanding\u003c/li\u003e\n\u003cli\u003elearning based mobility (vis a vi \u003ca href=\"/posts/kbhnebula/\"\u003eNeBula\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003esemantic aware source seeking (\u0026ldquo;finding the thing there\u0026rdquo;)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnebula/","tags":null,"title":"NeBula"},{"categories":null,"contents":"needfinding as a process of finding need.\nneedfinding with Rick Wallace needfinding with Rick Wallace. You don\u0026rsquo;t find out what they need, but you find what they need and how to fix it. (duh?)\nneedfinding with Cynthia Lee \u0026ldquo;Any time your curse, write down what just went wrong. How to fix it is the path to your next startup idea.\u0026rdquo;\nmonoculture workforce People who do the above will\u0026hellip; result in creating many products serving the segment of market matching software.\nHP Webcam story: the HP laptop face tracking software doesn\u0026rsquo;t follow Black faces as well as white ones.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhneedfinding/\"\u003eneedfinding\u003c/a\u003e as a process of finding need.\u003c/p\u003e\n\u003ch2 id=\"needfinding--kbhneedfinding-dot-md--with-rick-wallace--kbhrick-wallace-dot-md\"\u003e\u003ca href=\"/posts/kbhneedfinding/\"\u003eneedfinding\u003c/a\u003e with \u003ca href=\"/posts/kbhrick_wallace/\"\u003eRick Wallace\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhneedfinding/\"\u003eneedfinding\u003c/a\u003e with \u003ca href=\"/posts/kbhrick_wallace/\"\u003eRick Wallace\u003c/a\u003e. You don\u0026rsquo;t find out what they need, but you find what they need and how to fix it. (duh?)\u003c/p\u003e\n\u003ch2 id=\"needfinding--kbhneedfinding-dot-md--with-cynthia-lee--kbhcynthia-lee-dot-md\"\u003e\u003ca href=\"/posts/kbhneedfinding/\"\u003eneedfinding\u003c/a\u003e with \u003ca href=\"/posts/kbhcynthia_lee/\"\u003eCynthia Lee\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;Any time your curse, write down what just went wrong. How to fix it is the path to your next startup idea.\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"monoculture-workforce\"\u003emonoculture workforce\u003c/h3\u003e\n\u003cp\u003ePeople who do the above will\u0026hellip; result in creating many products serving the segment of market matching software.\u003c/p\u003e\n\u003cp\u003eHP Webcam story: the HP laptop face tracking software doesn\u0026rsquo;t follow Black faces as well as white ones.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhneedfinding/","tags":null,"title":"needfinding"},{"categories":null,"contents":"how many trials do you need to get r successes.\n\\begin{equation} P(X=n) = {{n-1} \\choose {r-1}} p^{r} (1-p)^{n-r} \\end{equation}\nif the chance of individual success is \\(p\\), what\u0026rsquo;s the probability that it takes \\(n\\) trials to get \\(r\\) successes.\n\\begin{equation} \\mathbb{E}[x] = \\frac{r}{p} \\end{equation}\n\\begin{equation} Var[x] = r \\frac{{1-p}}{r^{2}} \\end{equation}\n","html":"\u003cp\u003ehow many trials do you need to get r successes.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X=n) = {{n-1} \\choose {r-1}} p^{r} (1-p)^{n-r}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif the chance of individual success is \\(p\\), what\u0026rsquo;s the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e that it takes \\(n\\) trials to get \\(r\\) successes.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{E}[x] = \\frac{r}{p}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nVar[x] = r \\frac{{1-p}}{r^{2}}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnegative_binomial_distribution/","tags":null,"title":"negative binomial distribution"},{"categories":null,"contents":"Neoclassical Economics is a view of economics that disregards the Keynsian Politics theory of the economy needs a minder started by Milton Freedman. It believes that free market economy will prevail.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhneoclassical_economics/\"\u003eNeoclassical Economics\u003c/a\u003e is a view of economics that disregards the \u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynsian Politics\u003c/a\u003e theory of the economy needs a minder started by \u003ca href=\"/posts/kbhmilton_freedman/\"\u003eMilton Freedman\u003c/a\u003e. It believes that free market economy will prevail.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhneoclassical_economics/","tags":null,"title":"Neoclassical Economics"},{"categories":null,"contents":"while POS Tagging assigns tags to each word, NER Tagging tags the category of usage of multi-word spans.\nNER Tagging needs to label spans of text, which means that there is ambiguity in type.\nBIO Tagging BIO Tagging will tag each word: where \\(B\\) begins a span, \\(I\\), is inside a span, and \\(O\\) outside a span. So tags per word still apply, but we can extract span information as well.\n(job - gender + gender ) = job (captial - country + country) = captial\n","html":"\u003cp\u003ewhile \u003ca href=\"/posts/kbhpos_tagging/\"\u003ePOS Tagging\u003c/a\u003e assigns tags to each word, \u003ca href=\"/posts/kbhner_tagging/\"\u003eNER Tagging\u003c/a\u003e tags the category of usage of multi-word spans.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhner_tagging/\"\u003eNER Tagging\u003c/a\u003e needs to label \u003cstrong\u003espans\u003c/strong\u003e of text, which means that there is ambiguity in type.\u003c/p\u003e\n\u003ch2 id=\"bio-tagging\"\u003eBIO Tagging\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#bio-tagging\"\u003eBIO Tagging\u003c/a\u003e will tag each word: where \\(B\\) begins a span, \\(I\\), is inside a span, and \\(O\\) outside a span. So tags per word still apply, but we can extract span information as well.\u003c/p\u003e\n\u003cp\u003e(job - gender + gender ) = job\n(captial - country + country) = captial\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhner_tagging/","tags":null,"title":"NER Tagging"},{"categories":null,"contents":"Neural Network Unit A real-valued vector as input, each multiplied by some weights, summed, and squashed by some non-linear transform.\n\\begin{equation} z = w\\cdot x + b \\end{equation}\nand then, we will squash this using it as an \u0026ldquo;activation\u0026rdquo;\n\\begin{equation} y = \\sigmoid(z) \\end{equation}\nOne common activation is sigmoid. So, one common formulation would be:\n\\begin{equation} y = \\frac{1}{1+\\exp (- (w \\cdot x + b))} \\end{equation}\nTanh \\begin{equation} y(z) = \\frac{e^{z} - e^{-z}}{e^{z}+e^{-z}} \\end{equation}\nThis causes \u0026ldquo;saturation\u0026rdquo;\u0026mdash;meaning derivatives to be \\(0\\) at high values\nrelu \\begin{equation} y(z) = \\max(z,0) \\end{equation}\nmulti-layer networks Single computing units can\u0026rsquo;t compute XOR. Consider a perceptron:\n\\begin{equation} w_1x_1 + w_2x_2 + b = 0 \\end{equation}\nmeaning:\n\\begin{equation} x_2 = \\qty(\\frac{-w_1}{w_2})x_1 + \\qty(\\frac{-b}{w_2}) \\end{equation}\nmeaning, obtain a line that acts as a decision boundary\u0026mdash;we obtain 0 if the input is on one side of the line, and 1 if on the other. XOR, unfortunately, does not have a single linear boundary, its not linearly seperable.\nlogistic regression, for instance, can\u0026rsquo;t compute XOR because it is linear until squashing.\nfeed-forward network we can think about logistic regression as a one layer network, generalizing over sigmoid:\n\\begin{equation} \\text{softmax} = \\frac{\\exp(z_{i})}{\\sum_{j=1}^{k} \\exp(z_{j})} \\end{equation}\nand a multinomial logistic regression which uses the above. This is considered a \u0026ldquo;layer\u0026rdquo; in the feed-forward network.\nnotation:\n\\(W^{(j)}\\), weight matrix for layer \\(j\\) \\(b^{(j)}\\), the bias vector for layer \\(j\\) \\(g^{(j)}\\), the activation function at \\(j\\) and \\(z^{(i)}\\), the output at \\(i\\) (before activation function) \\(a^{(i)}\\), the activation at \\(i\\) instead of bias, we sometimes add a dummy node \\(a_{0}\\), we will force a value \\(1\\) at \\(a_{0}\\) and use its weights as bias.\nembeddings We use vector-space model to feed words into networks: converting each word first into embeddings, then feeding it into the network\nFix length problems:\nsentence embedding (mean of all the embeddings) element wise max of all the word embeddings to create sentence embedding use the max length + pad For Language Models, we can use a \u0026ldquo;sliding window\u0026rdquo;; that is:\n\\begin{equation} P(w_{t}|w_{1 \\dots t-1}) \\approx P(w_{t} | w_{t-N+1 \\dots t-1}) \\end{equation}\nTraining For every tuple \\((x,y)\\), we run a forward pass to obtain \\(\\hat{y}\\). Then, we run the network backwards to update the weights.\nA loss function calculates the negative of the probability of the correct labels.\nbackpropegation backprop\n","html":"\u003ch2 id=\"neural-network-unit\"\u003eNeural Network Unit\u003c/h2\u003e\n\u003cp\u003eA real-valued vector as input, each multiplied by some weights, summed, and squashed by some non-linear transform.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nz = w\\cdot x + b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand then, we will squash this using it as an \u0026ldquo;activation\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = \\sigmoid(z)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOne common activation is \u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e. So, one common formulation would be:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = \\frac{1}{1+\\exp (- (w \\cdot x + b))}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"tanh\"\u003eTanh\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\ny(z) = \\frac{e^{z} - e^{-z}}{e^{z}+e^{-z}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis causes \u0026ldquo;saturation\u0026rdquo;\u0026mdash;meaning derivatives to be \\(0\\) at high values\u003c/p\u003e\n\u003ch2 id=\"relu\"\u003erelu\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\ny(z) = \\max(z,0)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"multi-layer-networks\"\u003emulti-layer networks\u003c/h2\u003e\n\u003cp\u003eSingle computing units can\u0026rsquo;t compute XOR. Consider a perceptron:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw_1x_1 + w_2x_2 + b = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_2 = \\qty(\\frac{-w_1}{w_2})x_1 + \\qty(\\frac{-b}{w_2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning, obtain a line that acts as a \u003cstrong\u003edecision boundary\u003c/strong\u003e\u0026mdash;we obtain 0 if the input is on one side of the line, and 1 if on the other. XOR, unfortunately, does not have a single linear boundary, its not \u003cstrong\u003elinearly \u003ca href=\"/posts/kbhseperable_diffequ/\"\u003eseperable\u003c/a\u003e\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhlogistic_regression/\"\u003elogistic regression\u003c/a\u003e, for instance, can\u0026rsquo;t compute XOR because it is linear until squashing.\u003c/p\u003e\n\u003ch2 id=\"feed-forward-network\"\u003efeed-forward network\u003c/h2\u003e\n\u003cp\u003ewe can think about \u003ca href=\"/posts/kbhlogistic_regression/\"\u003elogistic regression\u003c/a\u003e as a one layer network, generalizing over \u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\text{softmax} = \\frac{\\exp(z_{i})}{\\sum_{j=1}^{k} \\exp(z_{j})}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand a multinomial \u003ca href=\"/posts/kbhlogistic_regression/\"\u003elogistic regression\u003c/a\u003e which uses the above. This is considered a \u0026ldquo;layer\u0026rdquo; in the \u003ca href=\"#feed-forward-network\"\u003efeed-forward network\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003enotation:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(W^{(j)}\\), weight matrix for layer \\(j\\)\u003c/li\u003e\n\u003cli\u003e\\(b^{(j)}\\), the bias vector for layer \\(j\\)\u003c/li\u003e\n\u003cli\u003e\\(g^{(j)}\\), the activation function at \\(j\\)\u003c/li\u003e\n\u003cli\u003eand \\(z^{(i)}\\), the output at \\(i\\) (before activation function)\u003c/li\u003e\n\u003cli\u003e\\(a^{(i)}\\), the activation at \\(i\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003einstead of bias, we sometimes add a dummy node \\(a_{0}\\), we will force a value \\(1\\) at \\(a_{0}\\) and use its weights as bias.\u003c/p\u003e\n\u003ch3 id=\"embeddings\"\u003eembeddings\u003c/h3\u003e\n\u003cp\u003eWe use \u003ca href=\"/posts/kbhranked_information_retrieval/#vector-space-model\"\u003evector-space model\u003c/a\u003e to feed words into networks: converting each word first into embeddings, then feeding it into the network\u003c/p\u003e\n\u003cp\u003eFix length problems:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003esentence embedding (mean of all the embeddings)\u003c/li\u003e\n\u003cli\u003eelement wise max of all the word embeddings to create sentence embedding\u003c/li\u003e\n\u003cli\u003euse the max length + pad\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eFor \u003ca href=\"/posts/kbhnlp/#language-model\"\u003eLanguage Model\u003c/a\u003es, we can use a \u0026ldquo;sliding window\u0026rdquo;; that is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(w_{t}|w_{1 \\dots t-1}) \\approx P(w_{t} | w_{t-N+1 \\dots t-1})\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"training\"\u003eTraining\u003c/h2\u003e\n\u003cp\u003eFor every tuple \\((x,y)\\), we run a forward pass to obtain \\(\\hat{y}\\). Then, we run the network backwards to update the weights.\u003c/p\u003e\n\u003cp\u003eA loss function calculates the negative of the probability of the correct labels.\u003c/p\u003e\n\u003ch3 id=\"backpropegation--kbhdeep-learning-dot-md\"\u003e\u003ca href=\"/posts/kbhdeep_learning/#backpropegation\"\u003ebackpropegation\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdeep_learning/#backpropegation\"\u003ebackprop\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhneural_networks/","tags":null,"title":"Neural Networks"},{"categories":null,"contents":"scene representation\nartificial vs biological intelligence Humans are few-shot learners (\u0026ldquo;sample efficiency\u0026rdquo;)\nHumans can easily fine-tunable (\u0026ldquo;transfer flexibility\u0026rdquo;)\nHuman knowledge can transfer easily\nAI are many-shot learners (\u0026ldquo;sample inefficiency\u0026rdquo;)\nAI are specialized\nAI is more precise, and can hold a lot in cache\nbiological learning biological learning is mostly unsupervised, and yte can generalize\nvisual processing ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhscene_representation/\"\u003escene representation\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"artificial-vs-biological-intelligence\"\u003eartificial vs biological intelligence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eHumans are few-shot learners (\u0026ldquo;sample efficiency\u0026rdquo;)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eHumans can easily fine-tunable (\u0026ldquo;transfer flexibility\u0026rdquo;)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eHuman knowledge can transfer easily\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAI are many-shot learners (\u0026ldquo;sample inefficiency\u0026rdquo;)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAI are specialized\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAI is more precise, and can hold a lot in cache\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"biological-learning\"\u003ebiological learning\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#biological-learning\"\u003ebiological learning\u003c/a\u003e is mostly unsupervised, and yte can generalize\u003c/p\u003e\n\u003ch2 id=\"visual-processing\"\u003evisual processing\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhneuroscience_and_ai/","tags":null,"title":"Neuroscience and AI"},{"categories":null,"contents":"a neutral stability ( mar ) condition in Differential Equations means that a function is neither stable nor unstable: it does not\nSee: https://en.wikipedia.org/wiki/Marginal_stability\n","html":"\u003cp\u003ea \u003ca href=\"/posts/kbhneutral_stability/\"\u003eneutral stability\u003c/a\u003e ( mar ) condition in \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equations\u003c/a\u003e means that a function is neither \u003ca href=\"/posts/kbhnon_linear_systems/#stable\"\u003estable\u003c/a\u003e nor unstable: it does not\u003c/p\u003e\n\u003cp\u003eSee: \u003ca href=\"https://en.wikipedia.org/wiki/Marginal_stability\"\u003ehttps://en.wikipedia.org/wiki/Marginal_stability\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhneutral_stability/","tags":null,"title":"neutral stability"},{"categories":null,"contents":"Election between Hayes vs Tildon was very close. Democrats gave Republicans Hayes, but then asked the Republican millitary to leave the South and hence they have no way of enforcing the rights.\nRedeemer Governments Democrats put in systems to relegate African Americans to second-class citizenship into the south. Lynchings became the weapon of choice of enforcing Jim Crow.\nWithin 20 years, Jim Crow became implemented by every state 1896 Plessy vs Ferguson upholding the process of segregation Convict leasing: convicts\u0026rsquo; labour was leased to create infrastructure Economic transformation: put in sharecropping (crops in lieu or in addition to rent) and convict leasing. This is essentially modern slavery because debt is used as a process to enslave people as they will never actually be paid enough to pay back debt.\nPush for Civil Rights \u0026ldquo;Booker T. Washington\u0026rdquo;: help promote Southern society will gain equality. Founded the \u0026ldquo;Tuskegee Institute\u0026rdquo;.\n\u0026ldquo;W.E.B. Dubois\u0026rdquo;: make the most talented and artistic people push for civil rights. \u0026ldquo;Civil rights by copyright.\u0026rdquo;\n","html":"\u003cp\u003eElection between Hayes vs Tildon was very close. Democrats gave Republicans Hayes, but then asked the Republican millitary to leave the South and hence they have no way of enforcing the rights.\u003c/p\u003e\n\u003ch2 id=\"redeemer-governments\"\u003eRedeemer Governments\u003c/h2\u003e\n\u003cp\u003eDemocrats put in systems to relegate African Americans to second-class citizenship into the south. Lynchings became the weapon of choice of enforcing Jim Crow.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eWithin 20 years, Jim Crow became implemented by every state\u003c/li\u003e\n\u003cli\u003e1896 Plessy vs Ferguson upholding the process of segregation\u003c/li\u003e\n\u003cli\u003eConvict leasing: convicts\u0026rsquo; labour was leased to create infrastructure\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eEconomic transformation: put in sharecropping (crops in lieu or in addition to rent) and convict leasing. This is essentially modern slavery because debt is used as a process to enslave people as they will never actually be paid enough to pay back debt.\u003c/p\u003e\n\u003ch2 id=\"push-for-civil-rights\"\u003ePush for Civil Rights\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;Booker T. Washington\u0026rdquo;: help promote Southern society will gain equality. Founded the \u0026ldquo;Tuskegee Institute\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;W.E.B. Dubois\u0026rdquo;: make the most talented and artistic people push for civil rights. \u0026ldquo;Civil rights by copyright.\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnew_american_south/","tags":null,"title":"New American South"},{"categories":null,"contents":"A set of policy by Franklin D. Roosevelt (FDR) which helped saving the economy during the Great Depression.\nSaving the Banks Unemployment Relief Industrial Recovery Agriculture Creates the WPA. Also the Social Security Administration. Also created Rural Electrification Administration\nMany people were still left out.\n","html":"\u003cp\u003eA set of policy by \u003ca href=\"/posts/kbhfdr/\"\u003eFranklin D. Roosevelt (FDR)\u003c/a\u003e which helped saving the economy during the \u003ca href=\"/posts/kbhgreat_depression/\"\u003eGreat Depression\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eSaving the Banks\u003c/li\u003e\n\u003cli\u003eUnemployment Relief\u003c/li\u003e\n\u003cli\u003eIndustrial Recovery\u003c/li\u003e\n\u003cli\u003eAgriculture\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eCreates the \u003ca href=\"/posts/kbhwpa/\"\u003eWPA\u003c/a\u003e. Also the \u003ca href=\"/posts/kbhsocial_security_administration/\"\u003eSocial Security Administration\u003c/a\u003e. Also created \u003ca href=\"/posts/kbhrural_electrification_administration/\"\u003eRural Electrification Administration\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eMany people were still left out.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnew_deal/","tags":null,"title":"New Deal"},{"categories":null,"contents":"A reformist, counterculture movement during the \u0026rsquo;80s lead by Ronald Reagan. Its a new response to the neoliberalism which aligned the blocks of Evangelical Christians (25% of voters) and Business leaders (powerful leaders.)\nAmerican liberalism expands under the new right as well.\nPresident as a party leader: Reagan is often shown as shining beaken of the Republican Party Leadership\u0026mdash;won every single state except Georgia .\n","html":"\u003cp\u003eA reformist, counterculture movement during the \u0026rsquo;80s lead by \u003ca href=\"/posts/kbhronald_raegan/\"\u003eRonald Reagan\u003c/a\u003e. Its a new response to the neoliberalism which aligned the blocks of Evangelical Christians (25% of voters) and Business leaders (powerful leaders.)\u003c/p\u003e\n\u003cp\u003eAmerican liberalism expands under the new right as well.\u003c/p\u003e\n\u003cp\u003ePresident as a party leader: Reagan is often shown as shining beaken of the Republican Party Leadership\u0026mdash;won every single state except Georgia .\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnew_right/","tags":null,"title":"New Right"},{"categories":null,"contents":"\\begin{equation} y\u0026rsquo;\u0026rsquo;=0 \\end{equation}\nthat is, if \\(F=0\\), then the solution will travel along a straight line.\n","html":"\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo;=0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat is, if \\(F=0\\), then the solution will travel along a straight line.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnewton_s_first_law_of_motion/","tags":null,"title":"Newton's First Law of Motion"},{"categories":null,"contents":"Putting something with a different temperature in a space with a constant temperature. The assumption underlying here is that the overall room temperature stays constant (i.e. the thing that\u0026rsquo;s cooling is so small that it doesn\u0026rsquo;t hurt room temperature).\n\\begin{equation} y\u0026rsquo;(t) = -k(y-T_0) \\end{equation}\nwhere, \\(T_0\\) is the initial temperature.\nThe intuition of this modeling is that there is some \\(T_0\\), which as the temperature \\(y\\) of your object gets closer to t. The result we obtain\nSolving \\begin{equation} \\int \\frac{\\dd{y}}{y-T_0} = \\int -k \\dd{t} \\end{equation}\nwe can solve this:\n\\begin{equation} \\ln |y-T_0| = -kt+C \\end{equation}\nwhich means we end up with:\n\\begin{equation} |y-T_0| = e^{-kt+C} = e^{C}e^{-kt} \\end{equation}\nSo therefore:\n\\begin{equation} y(t) = T_0 + C_1e^{-kt} \\end{equation}\nto include both \\(\\pm\\) cases.\nthis tells us that cooling and heating is exponential. We will fit our initial conditions rom data to obtain \\(C_1\\).\n","html":"\u003cp\u003ePutting something with a different temperature in a space with a constant temperature. The assumption underlying here is that the overall room temperature stays constant (i.e. the thing that\u0026rsquo;s cooling is so small that it doesn\u0026rsquo;t hurt room temperature).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;(t) = -k(y-T_0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(T_0\\) is the initial temperature.\u003c/p\u003e\n\u003cp\u003eThe intuition of this modeling is that there is some \\(T_0\\), which as the temperature \\(y\\) of your object gets closer to t. The result we obtain\u003c/p\u003e\n\u003ch2 id=\"solving\"\u003eSolving\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\int \\frac{\\dd{y}}{y-T_0} = \\int -k \\dd{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can solve this:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ln |y-T_0| = -kt+C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich means we end up with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|y-T_0| = e^{-kt+C} = e^{C}e^{-kt}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = T_0 + C_1e^{-kt}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eto include both \\(\\pm\\) cases.\u003c/p\u003e\n\u003cp\u003ethis tells us that cooling and heating is exponential. We will fit our initial conditions rom data to obtain \\(C_1\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnewton_s_law_of_cooling/","tags":null,"title":"Newton's Law of Cooling"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhnewton_s_method/","tags":null,"title":"Newton's Method"},{"categories":null,"contents":"Complex System\nLanguage Model A Language Model is a large neural network trained to predict the next token given some context.\n\u0026ldquo;Language models can discriminate behavior that they can\u0026rsquo;t reliably generate.\u0026rdquo;\nCoherence Generative REVOLUTION\nWhy probability maximization sucks Its expensive!\nBeam Search Take \\(k\\) candidates Expand \\(k\\) expansions for each of the \\(k\\) candidates Choose the highest probability \\(k\\) candidates \\(k\\) should be small: trying to maximizing\nBranch and Bound See Branch and Bound\nChallenges of Direct Sampling Direct Sampling sucks. Its sucks. It sucks. Just sampling from the distribution sucks. This has to do with the fact that assigning slightly lower scores \u0026ldquo;being less confident\u0026rdquo; is exponentially worse.\nThe model has to therefore be VERY conservative about giving low confidences; so, it is over confident about worst tokens.\nTop-K Top-k is too broad, and top\nNucleaus Sampling Find the smallest set of tokens that make up to \\(p\\) probability.\nCorrectness The highest probability answer isn\u0026rsquo;t always right Generative models consider every answer, so we want another model to compute the correct answer Surface Form Competition The Surface Form Competition problem results when top probabity token \u0026ldquo;steals\u0026rdquo; probability from the other tokens.\nThe predicted frequency of a possible string is a main comfounder. And so we can use models to decompose their own predictions:\nTurns out:\n\\(P(answer|question) \\approx P(answer\\ is\\ valid)P(answer|domain)\\)\nSo\u0026hellip;\n\\begin{equation} P(answer\\ is\\ valid) = \\frac{P(answer|question)}{P(answer|domain)} \\end{equation}\nThis is better :point_up:. Futher reading: (Holtzman et al. 2021)\nDomain Domain is the context in which that the text may occur.\nCoverage Why aren\u0026rsquo;t models controllable\nHallucination Language models predict what\u0026rsquo;s most likely We hope to control them with natural-language semantics In-Context Learning If we show the model some context which has example input output pairs, it can output. (Language Model model are few shot learners)\nCorrect Scoring We can reverse the output to predict the input to prevent model from loosing information, and use that to rerank the info. Of course, if the model can\u0026rsquo;t generate the desired input, the output is probably missing information.\nSmaller models can be made better because of info reranking.\nTh Degenerative Discriminative Gap.\nFuture Work The fact that the single comma shift the input. What we need is a language to control language behavior.\nThe Ability to Control a Model are the Goal of Understand the Model\nWe should only claim to understand a model when we can make a theory map about it: \u0026ldquo;when X is fed into the model, we get Y\u0026rdquo;\nSo: we should look at what the model is biased about (Surface Form Competition, for instance) we would be closer to prime behaviors such that they mimic the human behavior (in pieces, not just \u0026ldquo;complete these tokens\u0026rdquo;) in completion We see success as the actual evaluation metrics; we can use machines vs. other machines as the the results Questions ahai@uw.edu\nMarcel Just\nanthropic ai papers\npercy liang\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhcomplex_system/\"\u003eComplex System\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"language-model\"\u003eLanguage Model\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#language-model\"\u003eLanguage Model\u003c/a\u003e is a large neural network trained to predict the \u003cstrong\u003enext token\u003c/strong\u003e given some context.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Language models can discriminate behavior that they can\u0026rsquo;t reliably generate.\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"coherence\"\u003eCoherence\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eGenerative REVOLUTION\u003c/strong\u003e\u003c/p\u003e\n\u003ch3 id=\"why-probability-maximization-sucks\"\u003eWhy probability maximization sucks\u003c/h3\u003e\n\u003cp\u003eIts expensive!\u003c/p\u003e\n\u003ch3 id=\"beam-search\"\u003eBeam Search\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eTake \\(k\\) candidates\u003c/li\u003e\n\u003cli\u003eExpand \\(k\\) expansions for each of the \\(k\\) candidates\u003c/li\u003e\n\u003cli\u003eChoose the highest probability \\(k\\) candidates\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\\(k\\) should be small: trying to maximizing\u003c/p\u003e\n\u003ch3 id=\"branch-and-bound--kbhbranch-and-bound-dot-md\"\u003e\u003ca href=\"/posts/kbhbranch_and_bound/\"\u003eBranch and Bound\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhbranch_and_bound/\"\u003eBranch and Bound\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"challenges-of-direct-sampling\"\u003eChallenges of Direct Sampling\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e sucks. Its sucks. It sucks. Just sampling from the distribution sucks. This has to do with the fact that assigning slightly lower scores \u0026ldquo;being less confident\u0026rdquo; is exponentially worse.\u003c/p\u003e\n\u003cp\u003eThe model has to therefore be VERY conservative about giving low confidences; so, it is over confident about worst tokens.\u003c/p\u003e\n\u003ch3 id=\"top-k\"\u003eTop-K\u003c/h3\u003e\n\u003cp\u003eTop-k is too broad, and top\u003c/p\u003e\n\u003ch3 id=\"nucleaus-sampling\"\u003eNucleaus Sampling\u003c/h3\u003e\n\u003cp\u003eFind the smallest set of tokens that make up to \\(p\\) probability.\u003c/p\u003e\n\u003ch2 id=\"correctness\"\u003eCorrectness\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eThe highest probability answer isn\u0026rsquo;t always right\u003c/li\u003e\n\u003cli\u003eGenerative models consider every answer, so we want another model to compute the correct answer\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"surface-form-competition\"\u003eSurface Form Competition\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"#surface-form-competition\"\u003eSurface Form Competition\u003c/a\u003e problem results when top probabity token \u0026ldquo;steals\u0026rdquo; probability from the other tokens.\u003c/p\u003e\n\u003cp\u003eThe predicted frequency of a possible string is a main comfounder. And so we can use models to decompose their own predictions:\u003c/p\u003e\n\u003cp\u003eTurns out:\u003c/p\u003e\n\u003cp\u003e\\(P(answer|question) \\approx P(answer\\ is\\ valid)P(answer|domain)\\)\u003c/p\u003e\n\u003cp\u003eSo\u0026hellip;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(answer\\ is\\ valid) = \\frac{P(answer|question)}{P(answer|domain)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is better :point_up:. Futher reading: (\u003ca href=\"#citeproc_bib_item_1\"\u003eHoltzman et al. 2021\u003c/a\u003e)\u003c/p\u003e\n\u003ch4 id=\"domain\"\u003eDomain\u003c/h4\u003e\n\u003cp\u003e\u003ca href=\"#domain\"\u003eDomain\u003c/a\u003e is the context in which that the text may occur.\u003c/p\u003e\n\u003ch2 id=\"coverage\"\u003eCoverage\u003c/h2\u003e\n\u003cp\u003eWhy aren\u0026rsquo;t models controllable\u003c/p\u003e\n\u003ch3 id=\"hallucination\"\u003eHallucination\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eLanguage models predict what\u0026rsquo;s most likely\u003c/li\u003e\n\u003cli\u003eWe hope to control them with natural-language semantics\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"in-context-learning\"\u003eIn-Context Learning\u003c/h3\u003e\n\u003cp\u003eIf we show the model some context which has example input output pairs, it can output. (\u003ca href=\"#language-model\"\u003eLanguage Model\u003c/a\u003e model are few shot learners)\u003c/p\u003e\n\u003ch4 id=\"correct-scoring\"\u003eCorrect Scoring\u003c/h4\u003e\n\u003cp\u003eWe can reverse the output to predict the input to prevent model from loosing information, and use that to rerank the info. Of course, if the model can\u0026rsquo;t generate the desired input, the output is probably missing information.\u003c/p\u003e\n\u003cp\u003eSmaller models can be made better because of info reranking.\u003c/p\u003e\n\u003cp\u003eTh Degenerative Discriminative Gap.\u003c/p\u003e\n\u003ch2 id=\"future-work\"\u003eFuture Work\u003c/h2\u003e\n\u003cp\u003eThe fact that the single comma shift the input. What we need is a language to control language behavior.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eThe Ability to Control a Model are the Goal of Understand the Model\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eWe should only claim to understand a model when we can make a theory map about it: \u0026ldquo;when X is fed into the model, we get Y\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"so\"\u003eSo:\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ewe should look at what the model is biased about (\u003ca href=\"#surface-form-competition\"\u003eSurface Form Competition\u003c/a\u003e, for instance)\u003c/li\u003e\n\u003cli\u003ewe would be closer to prime behaviors such that they mimic the human behavior (in pieces, not just \u0026ldquo;complete these tokens\u0026rdquo;) in completion\u003c/li\u003e\n\u003cli\u003eWe see success as the actual evaluation metrics; we can use machines vs. other machines as the the results\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"mailto:ahai@uw.edu\"\u003eahai@uw.edu\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eMarcel Just\u003c/p\u003e\n\u003cp\u003eanthropic ai papers\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003epercy liang\u003c/strong\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnlp/","tags":null,"title":"NLP"},{"categories":null,"contents":" 1990 static word embeddings 2003 neural language models 2008 multi-task learning 2015 attention 2017 transformer 2018 trainable contextual word embeddings + large scale pretraining 2019 prompt engineering Motivating Attention Given a sequence of embeddings: \\(x_1, x_2, \u0026hellip;, x_{n}\\)\nFor each \\(x_{i}\\), the goal of attention is to produce a new embedding of each \\(x_{i}\\) named \\(a_{i}\\) based its dot product similarity with all other words that are before it.\nLet\u0026rsquo;s define:\n\\begin{equation} score(x_{i}, x_{j}) = x_{i} \\cdot x_{j} \\end{equation}\nWhich means that we can write:\n\\begin{equation} a_{i} = \\sum_{j \\leq i}^{} \\alpha_{i,j} x_{j} \\end{equation}\nwhere:\n\\begin{equation} \\alpha_{i,j} = softmax \\qty(score(x_{i}, x_{j}) ) \\end{equation}\nThe resulting \\(a_{i}\\) is the output of our attention.\nAttention From the above, we call the input embeddings \\(x_{j}\\) the values, and we will create a separate embeddings called key with which we will measure the similarity. We call the word we want the target new embeddings for the query (i.e. \\(x_{i}\\) from above).\n","html":"\u003cul\u003e\n\u003cli\u003e1990 static word embeddings\u003c/li\u003e\n\u003cli\u003e2003 neural language models\u003c/li\u003e\n\u003cli\u003e2008 multi-task learning\u003c/li\u003e\n\u003cli\u003e2015 attention\u003c/li\u003e\n\u003cli\u003e2017 transformer\u003c/li\u003e\n\u003cli\u003e2018 trainable contextual word embeddings + large scale pretraining\u003c/li\u003e\n\u003cli\u003e2019 prompt engineering\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"motivating-attention\"\u003eMotivating Attention\u003c/h2\u003e\n\u003cp\u003eGiven a sequence of embeddings: \\(x_1, x_2, \u0026hellip;, x_{n}\\)\u003c/p\u003e\n\u003cp\u003eFor each \\(x_{i}\\), the goal of attention is to \u003cstrong\u003eproduce a new embedding\u003c/strong\u003e of each \\(x_{i}\\) named \\(a_{i}\\) based its dot product similarity with all other words that are before it.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s define:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nscore(x_{i}, x_{j}) = x_{i} \\cdot x_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich means that we can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{i} = \\sum_{j \\leq i}^{} \\alpha_{i,j} x_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha_{i,j} = softmax \\qty(score(x_{i}, x_{j}) )\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe resulting \\(a_{i}\\) is the output of our attention.\u003c/p\u003e\n\u003ch2 id=\"attention\"\u003eAttention\u003c/h2\u003e\n\u003cp\u003eFrom the above, we call the input embeddings \\(x_{j}\\) the \u003cstrong\u003evalues\u003c/strong\u003e, and we will create a separate embeddings called \u003cstrong\u003ekey\u003c/strong\u003e with which we will measure the similarity. We call the word we want the target new embeddings for the \u003cstrong\u003equery\u003c/strong\u003e (i.e. \\(x_{i}\\) from above).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnlp_semantics_timeline/","tags":null,"title":"NLP Semantics Timeline"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhchomsky/","tags":null,"title":"Noam Chomsky"},{"categories":null,"contents":"\\begin{equation} y\u0026rsquo; + ay = f(t) \\end{equation}\nThe general solution for this would be\nany solution specifically which gives \\(f(t)\\), plus any homogeneous solutions specifically:\n\\begin{equation} y = y_{p}(t) + y_{n}(t) \\end{equation}\nwhere the left is a particular solution, and the right is any homogeneous solution. We can do this because, say if we derivate it; the left derivative (the particular solution) gives \\(f(t)\\), and the right, because its homogeneous, gives 0.\nBecause there can be at most one solution to every IVP, we know that all solutions to the equation must take on the form of \\(y_{p}(t) + c_1 y_{n_{1}}(t) + \u0026hellip; + c_{n} y_{n_{j}}(t) = y\\)\nThe general solution to this is:\n\\begin{equation} y(t) = e^{-at} \\int_{0}^{t}e^{ax} f(x) \\dd{x} + Ce^{-at} \\end{equation}\nthis works equally well when \\(a\\) is not constant:\n\\begin{equation} y(t) = e^{-\\qty(\\int a(s) \\dd{s})t} \\int_{0}^{t}e^{\\qty(\\int a(s) \\dd{s})x} f(x) \\dd{x} + Ce^{-at} \\end{equation}\ninhomogeneous solutions cannot work with the time translation trick\nintegrating factor Consider the case where:\n\\begin{equation} y\u0026rsquo; + ay = f(t) \\end{equation}\nideally, we would love our whole left side to be one giant derivative which we can antiderive; let\u0026rsquo;s try multiply both sides with \\(e^{at}\\):\n\\begin{equation} (e^{at}y)\u0026rsquo; = e^{at}y\u0026rsquo; + ae^{at}y = e^{at}(y\u0026rsquo; + ay) = e^{at} f(t) \\end{equation}\nWe note that this gives:\n\\begin{equation} (e^{at}y)\u0026rsquo; = e^{at}f(t) \\end{equation}\nmeaning:\n\\begin{equation} e^{at}y(t) = \\int_{0}^{t} e^{ax} f(x) \\dd{x} \\end{equation}\nwhich gives:\n\\begin{equation} y(t) = e^{-at} \\int_{0}^{t}e^{ax} f(x) \\dd{x} \\end{equation}\nTacking on the homogeneous solution :\n\\begin{equation} y(t) = e^{-at} \\int_{0}^{t}e^{ax} f(x) \\dd{x} + Ce^{-at} \\end{equation}\nnote! the first term doesn\u0026rsquo;t have a scaler in front of it. Otherwise, the derivative will give you \\(nf(x)\\) instead of \\(f(x)\\).\nThis actually doesn\u0026rsquo;t matter what \\(a\\) is. In a sense, if we swap \\(a\\) for \\(a(t)\\), we simply have to write \\(a = \\int a(x) \\dd{x}\\).So, most generally:\n\\begin{equation} y\u0026rsquo; + a(t)y = f(t) \\end{equation}\nyields (CHECK THIS FACT\u0026lt; IT MAY BE WRONG)\n\\begin{equation} y(t) = e^{-\\qty(\\int a(t) \\dd{t})} \\int_{0}^{t}e^{\\qty(\\int a(x) \\dd{x})} f(x) \\dd{x} + Ce^{-\\qty(\\int a(t) \\dd{t})} \\end{equation}\nfor constant solutions, we get:\n\\begin{equation} y(t) = C e^{-a(t-t_0)} + e^{-at} \\int_{t_0}^{t} e^{as} f(s) \\dd{s} \\end{equation}\nfor\n\\begin{equation} y\u0026rsquo; + ay = f(t) \\end{equation}\nmethod of undetermined coefficients Good guesses for the structure of:\n\\begin{equation} y\u0026rsquo; + ay = f(t) \\end{equation}\nfor \\(f(t) = C\\) , guess \\(y = C\u0026rsquo;\\) for \\(f(t) = x^{n}\\), guess \\(y = x^{n}\\) (with all subsequent terms) for \\(f(t) = \\sin (t)\\) or \\(f(t)=\\cos (t)\\), guess \\(y=A \\sin (t) + B \\cos (t)\\) for \\(f(t) = e^{\\lambda t}\\), guess \\(y = Ce^{\\lambda t}\\) example say:\n\\begin{equation} y\u0026rsquo; + ky = 70k + 10k \\sin (t) \\end{equation}\nlet\u0026rsquo;s break it up into three pieces:\n\\begin{equation} \\begin{cases} y_1\u0026rsquo; + ky_{1} = 70 k\\\\ y_2\u0026rsquo; + k y_2 = 10k \\sin (t) \\\\ y_3\u0026rsquo; + k y_{3} = 0 \\end{cases} \\end{equation}\nyou will note that adding up all three of these yields a value for \\(y\\) that satisfy the overall expression.\nfirst one: we can just guess \\(y = 70\\), which evidently works second one: we want the sin and cos to cancel out, so we can guess \\(A \\sin t + B \\cos t\\), whose derivative is \\(-B \\sin t + A \\cos t\\), plugging that in, we get: \\((-B+kA) \\sin t + (A+kB) \\cos t\\), which we can use our coefficients to solve third one: that\u0026rsquo;s the homogeneous solution \\(Ce^{-kt}\\) and we can finally add it all up.\nconcern at some points, there is a case where\u0026mdash;at certain choices of constants, you may obtain a homogeneous solution when you are trying to get the particular solution. that\u0026rsquo;s bad. Consider:\n\\begin{equation} y\u0026rsquo;\u0026rsquo; - y = e^{mt} \\end{equation}\nand the particular solution you\u0026rsquo;d get is something like:\n\\begin{equation} y_{p}(t) = \\frac{1}{m^{2}-1} e^{mt} \\end{equation}\nthis makes sense for all cases except where \\(m = \\pm 1\\), because that gives a homogeneous solution, and the bottom of that fraction will blow up. To fix this, we can engineer a specific solution for which the limit towards \\(1\\) exists. Recall that, in general, we have:\n\\begin{equation} y(t) = \\frac{1}{m^{2}-1} e^{mt} + c_1 e^{t} + c_2 e^{-t} \\end{equation}\nif we choose \\(c_2=0\\), \\(c_1= \\frac{1}{m^{2}-1}\\), we can see that the limit exists through l\u0026rsquo;hospitals:\n\\begin{equation} y_{p}(t) = \\frac{1}{m^{2}-1} \\qty( e^{mt}-e^{t}) \\end{equation}\nwe can evaluate this at \\(m=1\\) by using l\u0026rsquo;hospitals rule.\nAt this point, we essentially end up with two distinct solutions, given a choice of \\(m\\).\nvariation of parameters method Take an independent set of homogeneous solutions which tells you what the general solution is, then modifying to modify the \\(f\\).\nThe general solution is irrespective of \\(f\\).\nNow, consider:\n\\begin{equation} y\u0026rsquo;\u0026rsquo; - y = f(t) \\end{equation}\nBecause the homogeneous solution \\(y_1\\) and \\(y_2\\) gives two independent solutions, we obtain:\n\\begin{equation} y_{p} = c_1(t) y_1(t) + c_2(t) y_2(t) \\end{equation}\nwe can take the derivative of this to obtain:\n\\begin{equation} y\u0026rsquo;_{p} = c_1(t) y_1(t)\u0026rsquo; + c_2(t) y_2(t)\u0026rsquo; + c_1(t)\u0026rsquo; y_1(t) + c_2(t)\u0026rsquo; y_2(t) \\end{equation}\nwe are going to guess the right two terms are zero (assume \\(c_1 \u0026rsquo; y_1 + c_2 \u0026rsquo; y_2 = 0\\)) and repeat this procedure:\n\\begin{equation} y\u0026rsquo;\u0026rsquo;_{p} = c_1(t) y_1(t)\u0026rsquo;\u0026rsquo; + c_2(t) y_2(t)\u0026rsquo;\u0026rsquo; + c_1(t)\u0026rsquo; y_1(t)\u0026rsquo; + c_2(t)\u0026rsquo; y_2(t)' \\end{equation}\nnow, plugging this into our original equation, we obtain:\n\\begin{align} y_{p}\u0026rsquo;\u0026rsquo; - y_{p} \u0026amp;= c_1(y_1 \u0026rsquo;\u0026rsquo; - y_{1}) + c_2 (y_{2}\u0026rsquo;\u0026rsquo; - y_2) + c_1 \u0026rsquo; y_1 \u0026rsquo; + c_2 \u0026rsquo; y_2 \u0026rsquo; \\\\ \u0026amp;= c_1(0) + c_2 (0) + c_1 \u0026rsquo; y_1 \u0026rsquo; + c_2 \u0026rsquo; y_2 \u0026rsquo; \\\\ \u0026amp;= c_1 \u0026rsquo; y_1 \u0026rsquo; + c_2 \u0026rsquo; y_2 \u0026rsquo; \\\\ \u0026amp;= f(t) \\end{align}\nSo, combining what we just got and our simplifying assumption, we obtain:\n\\begin{equation} \\begin{cases} c_1 \u0026rsquo; y_1 + c_2 \u0026rsquo; y_2 = 0 \\\\ c_1 \u0026rsquo; y_1 \u0026rsquo; + c_2 \u0026rsquo; y_2 \u0026rsquo; = f(t) \\end{cases} \\end{equation}\nThis is now a matrix expression:\n\\begin{equation} \\mqty(y_1 \u0026amp; y_2 \\\\ y_1 \u0026rsquo; \u0026amp; y_2 \u0026lsquo;) \\mqty(c_1 \u0026rsquo; \\\\ c_2 \u0026lsquo;) = \\mqty( 0 \\\\ f(t)) \\end{equation}\nthat matrix on the left side is called the Wronshian matrix, and if \\(y_1\\) and \\(y_2\\) the homogeneous solutions are independent, we know that this is going to be invertible. Then, we can just solve and integrate to obtain \\(c_1, c_2\\).\nwhy do we tack on the homogeneous solution again? What if we have a plane specified:\n\\begin{equation} a x_1 + b x_2 + c x_3 = K \\end{equation}\nwe want to solve \\(x_{j}\\) as a vector which lives on this plane.\nLet\u0026rsquo;s begin by shifting this plane down to the origin:\n\\begin{equation} a x_1 + b x_2 + c x_3 + 0 \\end{equation}\nWhich says the same thing as:\n\\begin{equation} \\mqty(a \u0026amp; b \u0026amp; c) \\mqty(x_1 \\\\ x_2 \\\\ x_3) = 0 \\end{equation}\nmeaning:\n\\begin{equation} A x = 0 \\end{equation}\nwhere \\(A \\in \\mathcal{L}(m,n)\\), where \\(m \u0026lt; n\\). To solve for \\(x\\), we desire \\(\\text{null}\\ A\\), and given we are a map into a bigger space, we should have non-trivial null space.\nfind a particular solution \\(x_{p}\\) to the non-shifted version the general solution should live in \\(x_{p} + \\text{null}\\ A\\), the affine subset meaning all solutions should live on \\(x = x_{p} + c_1 v_1 + c_2 v_2\\) ","html":"\u003cp\u003e\\begin{equation}\ny\u0026rsquo; + ay = f(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe general solution for this would be\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eany solution specifically which gives \\(f(t)\\), plus\u003c/li\u003e\n\u003cli\u003eany homogeneous solutions\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003especifically:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = y_{p}(t) + y_{n}(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere the left is a particular solution, and the right is any homogeneous solution. We can do this because, say if we derivate it; the left derivative (the particular solution) gives \\(f(t)\\), and the right, because its homogeneous, gives 0.\u003c/p\u003e\n\u003cp\u003eBecause there can be at most one solution to every IVP, we know that all solutions to the equation must take on the form of \\(y_{p}(t) + c_1 y_{n_{1}}(t) + \u0026hellip; + c_{n} y_{n_{j}}(t) = y\\)\u003c/p\u003e\n\u003cp\u003eThe general solution to this is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = e^{-at} \\int_{0}^{t}e^{ax} f(x) \\dd{x} + Ce^{-at}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis works equally well when \\(a\\) is not constant:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = e^{-\\qty(\\int a(s) \\dd{s})t} \\int_{0}^{t}e^{\\qty(\\int a(s) \\dd{s})x} f(x) \\dd{x} + Ce^{-at}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003einhomogeneous solutions cannot work with the time translation trick\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"integrating-factor\"\u003eintegrating factor\u003c/h2\u003e\n\u003cp\u003eConsider the case where:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; + ay = f(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eideally, we would love our whole left side to be one giant derivative which we can antiderive; let\u0026rsquo;s try multiply both sides with \\(e^{at}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(e^{at}y)\u0026rsquo; = e^{at}y\u0026rsquo; + ae^{at}y = e^{at}(y\u0026rsquo; + ay) = e^{at} f(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe note that this gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(e^{at}y)\u0026rsquo; = e^{at}f(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{at}y(t) = \\int_{0}^{t} e^{ax} f(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = e^{-at} \\int_{0}^{t}e^{ax} f(x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTacking on the homogeneous solution :\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = e^{-at} \\int_{0}^{t}e^{ax} f(x) \\dd{x} + Ce^{-at}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enote! the first term doesn\u0026rsquo;t have a scaler in front of it. Otherwise, the derivative will give you \\(nf(x)\\) instead of \\(f(x)\\).\u003c/p\u003e\n\u003cp\u003eThis actually doesn\u0026rsquo;t matter what \\(a\\) is. In a sense, if we swap \\(a\\) for \\(a(t)\\), we simply have to write \\(a = \\int a(x) \\dd{x}\\).So, most generally:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; + a(t)y = f(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyields (CHECK THIS FACT\u0026lt; IT MAY BE WRONG)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = e^{-\\qty(\\int a(t) \\dd{t})} \\int_{0}^{t}e^{\\qty(\\int a(x) \\dd{x})} f(x) \\dd{x} + Ce^{-\\qty(\\int a(t) \\dd{t})}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor constant solutions, we get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = C e^{-a(t-t_0)} + e^{-at} \\int_{t_0}^{t} e^{as} f(s) \\dd{s}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; + ay = f(t)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"method-of-undetermined-coefficients--kbhsecond-order-linear-differential-equation-dot-md\"\u003e\u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/#method-of-undetermined-coefficients\"\u003emethod of undetermined coefficients\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eGood guesses for the structure of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; + ay = f(t)\n\\end{equation}\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003efor \\(f(t) = C\\) , guess \\(y = C\u0026rsquo;\\)\u003c/li\u003e\n\u003cli\u003efor \\(f(t) = x^{n}\\), guess \\(y = x^{n}\\) (with all subsequent terms)\u003c/li\u003e\n\u003cli\u003efor \\(f(t) = \\sin (t)\\) or \\(f(t)=\\cos (t)\\), guess \\(y=A \\sin (t) + B \\cos (t)\\)\u003c/li\u003e\n\u003cli\u003efor \\(f(t) = e^{\\lambda t}\\), guess \\(y = Ce^{\\lambda t}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"example\"\u003eexample\u003c/h3\u003e\n\u003cp\u003esay:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; + ky = 70k + 10k \\sin (t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003elet\u0026rsquo;s break it up into three pieces:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\ny_1\u0026rsquo; + ky_{1} = 70 k\\\\\ny_2\u0026rsquo; + k y_2 = 10k \\sin (t) \\\\\ny_3\u0026rsquo; + k y_{3} = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou will note that adding up all three of these yields a value for \\(y\\) that satisfy the overall expression.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003efirst one: we can just guess \\(y = 70\\), which evidently works\u003c/li\u003e\n\u003cli\u003esecond one: we want the sin and cos to cancel out, so we can guess \\(A \\sin t + B \\cos t\\), whose derivative is \\(-B \\sin t + A \\cos t\\), plugging that in, we get: \\((-B+kA) \\sin t + (A+kB) \\cos t\\), which we can use our coefficients to solve\u003c/li\u003e\n\u003cli\u003ethird one: that\u0026rsquo;s the homogeneous solution \\(Ce^{-kt}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eand we can finally add it all up.\u003c/p\u003e\n\u003ch3 id=\"concern\"\u003econcern\u003c/h3\u003e\n\u003cp\u003eat some points, there is a case where\u0026mdash;at certain choices of constants, you may obtain a homogeneous solution when you are trying to get the particular solution. that\u0026rsquo;s bad. Consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; - y = e^{mt}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand the particular solution you\u0026rsquo;d get is something like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny_{p}(t) = \\frac{1}{m^{2}-1} e^{mt}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis makes sense for all cases except where \\(m = \\pm 1\\), because that gives a homogeneous solution, and the bottom of that fraction will blow up. To fix this, we can engineer a specific solution for which the limit towards \\(1\\) exists. Recall that, in general, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = \\frac{1}{m^{2}-1} e^{mt} + c_1 e^{t} + c_2 e^{-t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif we choose \\(c_2=0\\), \\(c_1= \\frac{1}{m^{2}-1}\\), we can see that the limit exists through l\u0026rsquo;hospitals:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny_{p}(t) = \\frac{1}{m^{2}-1} \\qty( e^{mt}-e^{t})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can evaluate this at \\(m=1\\) by using l\u0026rsquo;hospitals rule.\u003c/p\u003e\n\u003cp\u003eAt this point, we essentially end up with two distinct solutions, given a choice of \\(m\\).\u003c/p\u003e\n\u003ch2 id=\"variation-of-parameters-method\"\u003evariation of parameters method\u003c/h2\u003e\n\u003cp\u003eTake an independent set of homogeneous solutions which tells you what the general solution is, then modifying to modify the \\(f\\).\u003c/p\u003e\n\u003cp\u003eThe general solution is irrespective of \\(f\\).\u003c/p\u003e\n\u003cp\u003eNow, consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; - y = f(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBecause the homogeneous solution \\(y_1\\) and \\(y_2\\) gives two independent solutions, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny_{p} = c_1(t) y_1(t) + c_2(t) y_2(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can take the derivative of this to obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;_{p} = c_1(t) y_1(t)\u0026rsquo; + c_2(t) y_2(t)\u0026rsquo; + c_1(t)\u0026rsquo; y_1(t) + c_2(t)\u0026rsquo; y_2(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe are going to guess the right two terms are zero (assume \\(c_1 \u0026rsquo; y_1 + c_2 \u0026rsquo; y_2 = 0\\)) and repeat this procedure:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo;_{p} = c_1(t) y_1(t)\u0026rsquo;\u0026rsquo; + c_2(t) y_2(t)\u0026rsquo;\u0026rsquo; + c_1(t)\u0026rsquo; y_1(t)\u0026rsquo; + c_2(t)\u0026rsquo; y_2(t)'\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enow, plugging this into our original equation, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\ny_{p}\u0026rsquo;\u0026rsquo; - y_{p} \u0026amp;= c_1(y_1 \u0026rsquo;\u0026rsquo; - y_{1}) + c_2 (y_{2}\u0026rsquo;\u0026rsquo; - y_2) + c_1 \u0026rsquo; y_1 \u0026rsquo; + c_2 \u0026rsquo; y_2 \u0026rsquo; \\\\\n\u0026amp;= c_1(0) + c_2 (0) + c_1 \u0026rsquo; y_1 \u0026rsquo; + c_2 \u0026rsquo; y_2 \u0026rsquo; \\\\\n\u0026amp;= c_1 \u0026rsquo; y_1 \u0026rsquo; + c_2 \u0026rsquo; y_2 \u0026rsquo; \\\\\n\u0026amp;= f(t)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eSo, combining what we just got and our simplifying assumption, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nc_1 \u0026rsquo; y_1 + c_2 \u0026rsquo; y_2 = 0 \\\\\nc_1 \u0026rsquo; y_1 \u0026rsquo; + c_2 \u0026rsquo; y_2 \u0026rsquo; = f(t)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is now a matrix expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(y_1 \u0026amp; y_2 \\\\ y_1 \u0026rsquo; \u0026amp; y_2 \u0026lsquo;) \\mqty(c_1 \u0026rsquo; \\\\ c_2 \u0026lsquo;) = \\mqty( 0 \\\\ f(t))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat matrix on the left side is called the \u003ca href=\"#variation-of-parameters-method\"\u003eWronshian\u003c/a\u003e matrix, and if \\(y_1\\) and \\(y_2\\) the homogeneous solutions are independent, we know that this is going to be invertible. Then, we can just solve and integrate to obtain \\(c_1, c_2\\).\u003c/p\u003e\n\u003ch2 id=\"why-do-we-tack-on-the-homogeneous-solution-again\"\u003ewhy do we tack on the homogeneous solution again?\u003c/h2\u003e\n\u003cp\u003eWhat if we have a plane specified:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na x_1 + b x_2 + c x_3 = K\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe want to solve \\(x_{j}\\) as a vector which lives on this plane.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s begin by shifting this plane down to the origin:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na x_1 + b x_2 + c x_3 + 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich says the same thing as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(a \u0026amp; b \u0026amp; c) \\mqty(x_1 \\\\ x_2 \\\\ x_3) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA x = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(A \\in \\mathcal{L}(m,n)\\), where \\(m \u0026lt; n\\). To solve for \\(x\\), we desire \\(\\text{null}\\ A\\), and given we are a map into a bigger space, we should have non-trivial null space.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003efind a particular solution \\(x_{p}\\) to the non-shifted version\u003c/li\u003e\n\u003cli\u003ethe general solution should live in \\(x_{p} + \\text{null}\\ A\\), the \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003emeaning all solutions should live on \\(x = x_{p} + c_1 v_1 + c_2 v_2\\)\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnon_homogeneous_linear_differential_equation/","tags":null,"title":"non-homogeneous linear differential equation"},{"categories":null,"contents":"In this project, we aim to derive situations for the existence of a differential equation for when a family of functions do not intersect. We were able to derive a full solution for the result in linear equations, and we offer an exploration of a partial solution for non-linear cases.\nFunction Families Fundamentally, function families are functions parameterized by some \\(C\\), which has the shape:\n\\begin{equation} y(x, \\dots, c) = f(x, \\dots)+c \\end{equation}\nThrough this result, we can figure a statement for \u0026ldquo;intersection.\u0026rdquo; If two functions intersect, their difference will be \\(0\\); if there is a non-trivial solution (that \\(c_1\\neq c_2\\) \u0026mdash; that, they are not the same function\u0026mdash;still makes \\(y_{C_1} = y_{C_2}\\)), the function family interact.\nWe can test this by subtracting two arbitrary members from the desired family. If it results that \\(c_1-c_2=0 \\implies c_1=c_2\\), we can say that the family does not intersect: that there are no non-trivial solutions to the function having no difference.\nSingle-Order Linear Differential Equations Here, we prove the fact that single-order linear differential equations do not produce solutions that intersect. We have the following single-order linear differential equation:\n\\begin{equation} \\dv{y}{x} + P(x) = Q(x) \\end{equation}\nIf, as desired, our function has a analytical solution (without an integral), we will make both terms differentiable.\n\\begin{equation} \\dv{y}{x} + P\u0026rsquo;(x) = Q\u0026rsquo;(x) \\end{equation}\nRecall the general solution of this expression:\n\\begin{align} y \u0026amp;= e^{-\\int P\u0026rsquo;(x)\\dd{x}} \\int e^{\\int P\u0026rsquo;(x)\\dd{x}} Q\u0026rsquo;(x)\\dd{x} \\\\ \u0026amp;= e^{-(P(x)+C_1)} \\int e^{P(x)+C_1} Q\u0026rsquo;(x)\\dd{x} \\end{align}\nOf course, we can separate the constants \\(e^{C_1}\\) out.\n\\begin{align} y \u0026amp;= e^{-(P(x)+C_1)} \\int e^{P(x)+C_1} Q\u0026rsquo;(x)\\dd{x} \\\\ \u0026amp;= e^{-P(x)} \\int e^{P(x)} Q\u0026rsquo;(x)\\dd{x} \\end{align}\nNow, it is the case that, for the most part, \\(e^{P(x)}Q\u0026rsquo;(x)\\) may not be integral-differentiable. Applying the fundamental theorem, we still have that as the integral function, with some \u0026ldquo;differentiated\u0026rdquo; term which we will call \\(a(x)\\): below\n\\begin{align} y \u0026amp;= e^{-P(x)}(a(x) +C) \\\\ \u0026amp;= e^{-P(x)}a(x) +Ce^{-P(x)} \\end{align}\nExcellent. Now, let\u0026rsquo;s do the subtraction test devised above; if we have that \\(C_1-C_2=0\\) given \\(y_1-y_2=0\\), then we can ensure that the function family do not intersect.\n\\begin{align} y_1 - y_2 =0 \u0026amp;= (e^{-P(x)}a(x) +C_{1}e^{-P(x)})-(e^{-P(x)}a(x) +C_{2}e^{-P(x)}) \\\\ \u0026amp;= C_{1}e^{-P(x)}-C_{2}e^{-P(x)} \\\\ \u0026amp;= (C_{1}-C_{2})e^{-P(x)} \\end{align}\nWe now have that:\n\\begin{equation} 0 = (C_1+C_2)e^{-P(x)} \\end{equation}\nNotably, the codomain of \\(e^{x}\\) is \\((0, \\infty)\\). Having never reached \\(0\\), we have that \\(0=C_1-C_2\\), as desired. \\(\\blacksquare\\)\n","html":"\u003cp\u003eIn this project, we aim to derive situations for the existence of a differential equation for when a family of functions do not intersect. We were able to derive a full solution for the result in linear equations, and we offer an exploration of a partial solution for non-linear cases.\u003c/p\u003e\n\u003ch2 id=\"function-families\"\u003eFunction Families\u003c/h2\u003e\n\u003cp\u003eFundamentally, function families are functions parameterized by some \\(C\\), which has the shape:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(x, \\dots, c) = f(x, \\dots)+c\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThrough this result, we can figure a statement for \u0026ldquo;intersection.\u0026rdquo; If two functions intersect, their difference will be \\(0\\); if there is a non-trivial solution (that \\(c_1\\neq c_2\\) \u0026mdash; that, they are not the same function\u0026mdash;still makes \\(y_{C_1} = y_{C_2}\\)), the function family interact.\u003c/p\u003e\n\u003cp\u003eWe can test this by subtracting two arbitrary members from the desired family. If it results that \\(c_1-c_2=0 \\implies c_1=c_2\\), we can say that the family does \u003cem\u003enot\u003c/em\u003e intersect: that there are no non-trivial solutions to the function having no difference.\u003c/p\u003e\n\u003ch2 id=\"single-order-linear-differential-equations\"\u003eSingle-Order Linear Differential Equations\u003c/h2\u003e\n\u003cp\u003eHere, we prove the fact that single-order linear differential equations do not produce solutions that intersect. We have the following single-order linear differential equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{y}{x} + P(x) = Q(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf, as desired, our function has a analytical solution (without an integral), we will make both terms differentiable.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{y}{x} + P\u0026rsquo;(x) = Q\u0026rsquo;(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall the general solution of this expression:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\ny \u0026amp;= e^{-\\int P\u0026rsquo;(x)\\dd{x}} \\int e^{\\int P\u0026rsquo;(x)\\dd{x}} Q\u0026rsquo;(x)\\dd{x} \\\\\n\u0026amp;= e^{-(P(x)+C_1)} \\int e^{P(x)+C_1} Q\u0026rsquo;(x)\\dd{x}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eOf course, we can separate the constants \\(e^{C_1}\\) out.\u003c/p\u003e\n\u003cp\u003e\\begin{align}\ny \u0026amp;= e^{-(P(x)+C_1)} \\int e^{P(x)+C_1} Q\u0026rsquo;(x)\\dd{x} \\\\\n\u0026amp;= e^{-P(x)} \\int e^{P(x)} Q\u0026rsquo;(x)\\dd{x}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, it is the case that, for the most part, \\(e^{P(x)}Q\u0026rsquo;(x)\\) may not be integral-differentiable. Applying the fundamental theorem, we still have that as the integral function, with some \u0026ldquo;differentiated\u0026rdquo; term which we will call \\(a(x)\\): below\u003c/p\u003e\n\u003cp\u003e\\begin{align}\ny \u0026amp;= e^{-P(x)}(a(x) +C) \\\\\n\u0026amp;= e^{-P(x)}a(x) +Ce^{-P(x)}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eExcellent. Now, let\u0026rsquo;s do the subtraction test devised above; if we have that \\(C_1-C_2=0\\) given \\(y_1-y_2=0\\), then we can ensure that the function family do not intersect.\u003c/p\u003e\n\u003cp\u003e\\begin{align}\ny_1 - y_2 =0 \u0026amp;= (e^{-P(x)}a(x) +C_{1}e^{-P(x)})-(e^{-P(x)}a(x) +C_{2}e^{-P(x)}) \\\\\n\u0026amp;= C_{1}e^{-P(x)}-C_{2}e^{-P(x)} \\\\\n\u0026amp;= (C_{1}-C_{2})e^{-P(x)}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eWe now have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = (C_1+C_2)e^{-P(x)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNotably, the codomain of \\(e^{x}\\) is \\((0, \\infty)\\). Having never reached \\(0\\), we have that \\(0=C_1-C_2\\), as desired. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnon_intersecting_graphs/","tags":null,"title":"Non-Intersecting Graphs (Single Order)"},{"categories":null,"contents":"Suppose we analyze first order non-linear system:\n\\begin{equation} x\u0026rsquo; = F(t,x) \\end{equation}\nWe can actually turn this into an autonomous system:\n\\begin{equation} x_0 = t \\end{equation}\n\\begin{equation} x_0\u0026rsquo; = 1 \\end{equation}\nmeaning suddenly we have an autonomous system:\n\\begin{equation} \\begin{cases} x_0\u0026rsquo; = 1 \\\\ x_1\u0026rsquo; = F(x_0, x_1) \\end{cases} \\end{equation}\nGeneral strategy:\nFind zeros of the right side (which are the stationary solutions) Analyze near-stationary solutions through eigenvalues of the linearized Jacobian matrix: if both eigenvalues are zero Away from stationary solutions: basically guessing Three Examples that are Hopeless to Solve Lotha-Volterra Prey-Predictor Equation \\begin{equation} \\begin{cases} x_1\u0026rsquo; = 2x_1-x_1x_2 \\\\ x_2\u0026rsquo; = x_1x_2 - 3x_2 \\end{cases} \\end{equation}\nBy default, if either \\(x_1\\) or \\(x_2\\) goes down, the system dies quickly.\nExample \\begin{equation} \\begin{cases} x_1\u0026rsquo; = r_1x_1 \\qty(1- \\frac{x_1 + h_{12} x_2}{k_1})\\\\ x_2\u0026rsquo; = r_2x_2 \\qty(1- \\frac{x_2 + h_{21} x_1}{k_2}) \\end{cases} \\end{equation}\nExample \\begin{equation} \\begin{cases} x_1\u0026rsquo; = x_2 \\\\ x_2\u0026rsquo; = -\\sin x_1 - \\gamma x_2 \\end{cases} \\end{equation}\nStrategy to Analyze when its Hopeless find a stationary solutions: \\(x(t) = a\\): where \\(x\u0026rsquo; = F(a) = 0\\) and draw them as points on the \\(x_1\\) and \\(x_2\\) plane near each equilibrium point, approximate through Linearilzation study the mesoscopic region So, see ODE linearilzation.\nPhase Portrait Phase Portrait is a figure in the \\(x_1, x_2\\) plane where each solution exists as a curve on the figure.\nmonotone function for linearilzation systems that are marginal (zero, negative real parts, one or more fully imaginary), we can\u0026rsquo;t use linearilzation itself to analyze the system.\nTherefore, we have to use a function for which \\(\\dv t V(y(t)) \\geq 0\\) or \\(\\dv V(y(t)) \\leq 0\\) for all \\(t\\) called a monotone function, which could give us hints about the function\u0026rsquo;s behavior.\nMeaning:\n\\begin{align} \\dv t V(y(T) \u0026amp;= \\nabla V(y(t)) \\cdot y\u0026rsquo;(t) \\\\ \u0026amp;= \\nabla V(y(t)) \\cdot F(y(t)) \\end{align}\nThe gradient of \\(V\\) is always perpendicular to the level curve of \\(V\\), and\u0026mdash;when dotted with \\(F\\) the vector field of $y$\u0026mdash;we obtain a value that\u0026rsquo;s either positive or negative. When positive, the angle between the vector field \\(F\\) and \\(V\\) would be less than \\(\\frac{\\pi}{2}\\), meaning the vector field point \u0026ldquo;outwards\u0026rdquo; from the level sets. Otherwise, it would be more than \\(\\frac{\\pi}{2}\\), meaning the vector field point \u0026ldquo;inwards\u0026rdquo;.\nconserved function its like a monotone function, but \\(\\dv{V}{t} = 0\\). any solution curve would lie inside a level curve of \\(V\\) (parts of the level curve). Its basically the intuition of a monotone function, but the solution curves instead of pointing inwards and outwards, it just get stuck.\n","html":"\u003cp\u003eSuppose we analyze first order non-linear system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo; = F(t,x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can actually turn this into an autonomous system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_0 = t\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_0\u0026rsquo; = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning suddenly we have an autonomous system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx_0\u0026rsquo; = 1 \\\\\nx_1\u0026rsquo; = F(x_0, x_1)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGeneral strategy:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eFind zeros of the right side (which are the stationary solutions)\u003c/li\u003e\n\u003cli\u003eAnalyze near-stationary solutions through eigenvalues of the linearized Jacobian matrix: if both eigenvalues are zero\u003c/li\u003e\n\u003cli\u003eAway from stationary solutions: basically guessing\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"three-examples-that-are-hopeless-to-solve\"\u003eThree Examples that are Hopeless to Solve\u003c/h2\u003e\n\u003ch3 id=\"lotha-volterra-prey-predictor-equation\"\u003eLotha-Volterra Prey-Predictor Equation\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx_1\u0026rsquo; = 2x_1-x_1x_2 \\\\\nx_2\u0026rsquo; = x_1x_2 - 3x_2\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy default, if either \\(x_1\\) or \\(x_2\\) goes down, the system dies quickly.\u003c/p\u003e\n\u003ch3 id=\"example\"\u003eExample\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx_1\u0026rsquo; = r_1x_1 \\qty(1- \\frac{x_1 + h_{12} x_2}{k_1})\\\\\nx_2\u0026rsquo; = r_2x_2 \\qty(1- \\frac{x_2 + h_{21} x_1}{k_2})\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"example\"\u003eExample\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx_1\u0026rsquo; = x_2 \\\\\nx_2\u0026rsquo; = -\\sin x_1 - \\gamma x_2\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"strategy-to-analyze-when-its-hopeless\"\u003eStrategy to Analyze when its Hopeless\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003efind a stationary solutions: \\(x(t) = a\\): where \\(x\u0026rsquo; = F(a) = 0\\) and draw them as points on the \\(x_1\\) and \\(x_2\\) plane\u003c/li\u003e\n\u003cli\u003enear each equilibrium point, approximate through \u003ca href=\"/posts/kbhode_linearilzation/\"\u003eLinearilzation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003estudy the \u003ca href=\"/posts/kbhmesoscopic_region/\"\u003emesoscopic region\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSo, see \u003ca href=\"/posts/kbhode_linearilzation/\"\u003eODE linearilzation\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"phase-portrait\"\u003ePhase Portrait\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#phase-portrait\"\u003ePhase Portrait\u003c/a\u003e is a figure in the \\(x_1, x_2\\) plane where each solution exists as a curve on the figure.\u003c/p\u003e\n\u003ch2 id=\"monotone-function\"\u003emonotone function\u003c/h2\u003e\n\u003cp\u003efor \u003ca href=\"/posts/kbhode_linearilzation/\"\u003elinearilzation\u003c/a\u003e systems that are marginal (zero, negative real parts, one or more fully imaginary), we can\u0026rsquo;t use \u003ca href=\"/posts/kbhode_linearilzation/\"\u003elinearilzation\u003c/a\u003e itself to analyze the system.\u003c/p\u003e\n\u003cp\u003eTherefore, we have to use a function for which \\(\\dv t V(y(t)) \\geq 0\\) or \\(\\dv V(y(t)) \\leq 0\\) for all \\(t\\) called a \u003ca href=\"#monotone-function\"\u003emonotone function\u003c/a\u003e, which could give us hints about the function\u0026rsquo;s behavior.\u003c/p\u003e\n\u003cp\u003eMeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dv t V(y(T) \u0026amp;= \\nabla V(y(t)) \\cdot y\u0026rsquo;(t) \\\\\n\u0026amp;= \\nabla V(y(t)) \\cdot F(y(t))\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThe gradient of \\(V\\) is always perpendicular to the level curve of \\(V\\), and\u0026mdash;when dotted with \\(F\\) the vector field of $y$\u0026mdash;we obtain a value that\u0026rsquo;s either positive or negative. When positive, the angle between the vector field \\(F\\) and \\(V\\) would be less than \\(\\frac{\\pi}{2}\\), meaning the vector field point \u0026ldquo;outwards\u0026rdquo; from the level sets. Otherwise, it would be more than \\(\\frac{\\pi}{2}\\), meaning the vector field point \u0026ldquo;inwards\u0026rdquo;.\u003c/p\u003e\n\u003ch2 id=\"conserved-function\"\u003econserved function\u003c/h2\u003e\n\u003cp\u003eits like a \u003ca href=\"#monotone-function\"\u003emonotone function\u003c/a\u003e, but \\(\\dv{V}{t} = 0\\). any solution curve would \u003cstrong\u003elie inside\u003c/strong\u003e a level curve of \\(V\\) (parts of the level curve). Its basically the intuition of a \u003ca href=\"#monotone-function\"\u003emonotone function\u003c/a\u003e, but the solution curves instead of pointing inwards and outwards, it just get stuck.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnon_linear_ode/","tags":null,"title":"Non-Linear ODE"},{"categories":null,"contents":"\u0026ldquo;Chaotic Dynamics\u0026rdquo; Because the word is sadly nonlinear.\nmotivating non-linearity \\begin{equation} \\dv t \\mqty(x \\\\ y) = f\\qty(\\mqty(x\\\\y)) \\end{equation}\nThis function is a function from \\(f: \\mathbb{R}^{2}\\to \\mathbb{R}^{2}\\). All the work on Second-Order Linear Differential Equations, has told us that the above system can serve as a \u0026ldquo;linearization\u0026rdquo; of a second order differential equation that looks like the follows:\n\\begin{equation} \\dv t \\mqty(x \\\\y) = A \\mqty(x \\\\ y) +b \\end{equation}\nActually going about deriving a solution to this requires powers of \\(A\\) to commute. If \\(A\\) has a independent variable in it, or if its a time-varying function \\(A(t)\\), you can\u0026rsquo;t actually perform the linearization technique (raising diagonalized \\(A\\) to powers) highlighted here.\nSo we need something new.\nSudden Review of Vector Functions Let\u0026rsquo;s take some function:\n\\begin{equation} f: \\mathbb{R}^{2} \\to \\mathbb{R}^{2} \\end{equation}\nIt will output a vector:\n\\begin{equation} f(x,y) = \\mqty(f_1(x,y)\\\\ f_{2}(x,y)) \\end{equation}\nSolving Non-Linear Systems, actually Let\u0026rsquo;s take a non-linear system:\n\\begin{equation} \\begin{cases} \\dv{x}{t} = F(x,y) \\\\ \\dv{y}{t} = G(x,y) \\end{cases} \\end{equation}\nOverarching Idea: To actually solve this, we go about taking a Taylor Series (i.e. linearize) the functions next to its critical points. Then, we use an epsilon-delta proof to show that the linearization next to those critical points are a good approximation.\nSo! Let us begin.\nLet \\((x*,y*)\\) be a critical point of \\(F\\). Naturally, \\(d 0=0\\), so it is also a critical point of \\(G\\).\nSo we have:\n\\begin{equation} F(x*,y*)=G(x*,y*) = 0 \\end{equation}\nNow, we will begin building the \u0026ldquo;slope\u0026rdquo; of this function to eliminate the independent variable wholesale\u0026mdash;by dividing:\n\\begin{equation} \\dv{y}{x} = \\dv{y}{t} / \\dv{x}{t} = \\frac{G(x,y)}{F(x,y)} \\end{equation}\na divergence into epsilon delta proof\nstable A critical point is considered \u0026ldquo;stable\u0026rdquo; because, for each \\(\\epsilon \u0026gt;0\\), \\(\\exists \\delta \u0026gt;0\\), such that:\n\\begin{equation} |x_0-x*| \u0026lt; \\delta \\implies |x(t)-x*| \u0026lt; \\epsilon \\end{equation}\nasymptotically stable For every trajectory that begins close to the critical point, it will end up at the critical point as time increases. That is, \\(\\exists \\delta \u0026gt;0\\) such that:\n\\begin{equation} |x-x*| \u0026lt; \\delta \\implies \\lim_{t \\to \\infty } x(t)=x* \\end{equation}\nThis is essentially epsilon delta, but the limit traces out the entire process descending so the critical point is stable through the whole descend.\n","html":"\u003cp\u003e\u0026ldquo;Chaotic Dynamics\u0026rdquo; Because the word is sadly nonlinear.\u003c/p\u003e\n\u003ch2 id=\"motivating-non-linearity\"\u003emotivating non-linearity\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\dv t \\mqty(x \\\\ y) = f\\qty(\\mqty(x\\\\y))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis function is a function from \\(f: \\mathbb{R}^{2}\\to \\mathbb{R}^{2}\\). All the work on \u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/\"\u003eSecond-Order Linear Differential Equations\u003c/a\u003e, has told us that the above system can serve as a \u0026ldquo;linearization\u0026rdquo; of a second order differential equation that looks like the follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv t \\mqty(x \\\\y) = A \\mqty(x \\\\ y) +b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eActually going about deriving a solution to this requires powers of \\(A\\) to commute. If \\(A\\) has a independent variable in it, or if its a time-varying function \\(A(t)\\), you can\u0026rsquo;t actually perform the linearization technique (raising diagonalized \\(A\\) to powers) \u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/#solving-homogeneous-higher-order-differential-equations\"\u003ehighlighted here\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSo we need something new.\u003c/p\u003e\n\u003ch2 id=\"sudden-review-of-vector-functions\"\u003eSudden Review of Vector Functions\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s take some function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf: \\mathbb{R}^{2} \\to \\mathbb{R}^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIt will output a vector:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x,y) = \\mqty(f_1(x,y)\\\\ f_{2}(x,y))\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"solving-non-linear-systems--kbhnon-linear-systems-dot-md--actually\"\u003eSolving \u003ca href=\"/posts/kbhnon_linear_systems/\"\u003eNon-Linear Systems\u003c/a\u003e, actually\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s take a non-linear system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x}{t} = F(x,y) \\\\\n\\dv{y}{t} = G(x,y)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eOverarching Idea\u003c/strong\u003e\u003c/strong\u003e: To actually solve this, we go about taking a Taylor Series (i.e. linearize) the \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003es next to its critical points. Then, we use an epsilon-delta proof to show that the linearization next to those critical points are a good approximation.\u003c/p\u003e\n\u003cp\u003eSo! Let us begin.\u003c/p\u003e\n\u003cp\u003eLet \\((x*,y*)\\) be a critical point of \\(F\\). Naturally, \\(d 0=0\\), so it is also a critical point of \\(G\\).\u003c/p\u003e\n\u003cp\u003eSo we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nF(x*,y*)=G(x*,y*) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, we will begin building the \u0026ldquo;slope\u0026rdquo; of this function to eliminate the independent variable wholesale\u0026mdash;by dividing:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{y}{x} = \\dv{y}{t} / \\dv{x}{t} = \\frac{G(x,y)}{F(x,y)}\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003ea divergence into epsilon delta proof\u003c/p\u003e\n\u003ch3 id=\"stable\"\u003estable\u003c/h3\u003e\n\u003cp\u003eA critical point is considered \u0026ldquo;stable\u0026rdquo; because, for each \\(\\epsilon \u0026gt;0\\), \\(\\exists \\delta \u0026gt;0\\), such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|x_0-x*| \u0026lt; \\delta \\implies |x(t)-x*| \u0026lt; \\epsilon\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"asymptotically-stable\"\u003easymptotically stable\u003c/h4\u003e\n\u003cp\u003eFor every trajectory that begins close to the critical point, it will end up at the critical point as time increases. That is, \\(\\exists \\delta \u0026gt;0\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|x-x*| \u0026lt; \\delta \\implies \\lim_{t \\to \\infty } x(t)=x*\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is essentially epsilon delta, but the limit traces out the entire process descending so the critical point is \u003ca href=\"#stable\"\u003estable\u003c/a\u003e through the whole descend.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnon_linear_systems/","tags":null,"title":"Non-Linear System"},{"categories":null,"contents":"kernel density estimation If your data is continuous, you can integrate over the entire dataset and normalize it to be able\n","html":"\u003ch2 id=\"kernel-density-estimation\"\u003ekernel density estimation\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-10_09-53-45_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eIf your data is \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e, you can integrate over the entire dataset and normalize it to be able\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnon_parametric_learning/","tags":null,"title":"non-parametric learning"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhnon_pathological_matricies/","tags":null,"title":"non-pathological matricies"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhnonsingular_matricies/","tags":null,"title":"nonsingular matricies"},{"categories":null,"contents":"The nonviolence movement a method of protest which is developed by Mahatma Ghandi and leveraged by Martin Luther King in the civil rights movement.\nThe idea is to achieve civil disobedience and allowing oneself to be punished so egregiously without inciting violence so at to elicit sympathy across the nation.\nThe civil rights movement leveraged training tactics and training to ensure its participants would be completely nonviolent and so elicit the correct response.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhnonviolence_movement/\"\u003enonviolence movement\u003c/a\u003e a method of protest which is developed by \u003ca href=\"/posts/kbhmahatma_ghandi/\"\u003eMahatma Ghandi\u003c/a\u003e and leveraged by \u003ca href=\"/posts/kbhmartin_luther_king/\"\u003eMartin Luther King\u003c/a\u003e in the \u003ca href=\"/posts/kbhcivil_rights/\"\u003ecivil rights movement\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThe idea is to achieve civil disobedience and allowing oneself to be punished so egregiously without inciting violence so at to elicit sympathy across the nation.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhcivil_rights/\"\u003ecivil rights movement\u003c/a\u003e leveraged training tactics and training to ensure its participants would be completely nonviolent and so elicit the correct response.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnonviolence_movement/","tags":null,"title":"nonviolence movement"},{"categories":null,"contents":"The norm is the \u0026ldquo;length\u0026rdquo; of a vector, defined generally using the inner product as:\n\\begin{equation} \\|v\\| = \\sqrt{\\langle v,v \\rangle} \\end{equation}\nadditional information properties of the norm \\(\\|v\\| = 0\\) IFF \\(v=0\\) \\(\\|\\lambda v\\| = |\\lambda|\\|v\\|\\) Proof:\nBy definition of an inner product, \\(\\langle v,v \\rangle = 0\\) only when \\(v=0\\) See algebra: \\begin{align} \\|\\lambda v\\|^{2} \u0026amp;= \\langle \\lambda v, \\lambda v \\rangle \\\\ \u0026amp;= \\lambda \\langle v, \\lambda v \\rangle \\\\ \u0026amp;= \\lambda \\bar{\\lambda} \\langle v,v \\rangle \\\\ \u0026amp;= |\\lambda |^{2} \\|v\\|^{2} \\end{align}\nmotivating the norm using actual numbers In linear algebra, the norm of a vector in a real vector space is defined as follows:\n\\begin{equation} \\| x\\| = \\sqrt{{{x_1}^{2} + \\dots + {x_n}^{2}}} \\end{equation}\nNote that, given the definition of dot product, \\(\\| x \\|^{2} = x \\cdot x\\).\nThe norm in complex vector space requires taking the absolute value (for \\(a+bi\\), \\(|a+bi| = \\sqrt{{a^{2}+b^{2}}}\\)) of each slot. That is, for Euclidean Inner Product spaces:\n\\begin{equation} \\|z\\| = \\sqrt{|z_1|^{2} + \\dots |z_{n}|^{2}} \\end{equation}\notherwise, simply squaring the complex number (giving us \\(a^{2}-b^{2}\\)) may very well yield negative numbers, which means we\u0026rsquo;d have an imaginary norm!\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e is the \u0026ldquo;length\u0026rdquo; of a vector, defined generally using the \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\|v\\| = \\sqrt{\\langle v,v \\rangle}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"properties-of-the-norm\"\u003eproperties of the norm\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003e\\(\\|v\\| = 0\\) IFF \\(v=0\\)\u003c/li\u003e\n\u003cli\u003e\\(\\|\\lambda v\\| = |\\lambda|\\|v\\|\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eBy definition of an inner product, \\(\\langle v,v \\rangle = 0\\) only when \\(v=0\\)\u003c/li\u003e\n\u003cli\u003eSee algebra:\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\\begin{align}\n\\|\\lambda v\\|^{2} \u0026amp;= \\langle \\lambda v, \\lambda v \\rangle \\\\\n\u0026amp;= \\lambda \\langle v, \\lambda v \\rangle \\\\\n\u0026amp;= \\lambda \\bar{\\lambda} \\langle v,v \\rangle \\\\\n\u0026amp;= |\\lambda |^{2} \\|v\\|^{2}\n\\end{align}\u003c/p\u003e\n\u003ch3 id=\"motivating-the-norm-using-actual-numbers\"\u003emotivating the norm using actual numbers\u003c/h3\u003e\n\u003cp\u003eIn linear algebra, the \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e of a \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e in a \u003ca href=\"/posts/kbhvector_space/#vector-space-over-fields\"\u003ereal vector space\u003c/a\u003e is defined as follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\| x\\| = \\sqrt{{{x_1}^{2} + \\dots + {x_n}^{2}}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNote that, given the definition of \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e, \\(\\| x \\|^{2} = x \\cdot x\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e in \u003ca href=\"/posts/kbhvector_space/#vector-space-over-fields\"\u003ecomplex vector space\u003c/a\u003e requires taking the absolute value (for \\(a+bi\\), \\(|a+bi| = \\sqrt{{a^{2}+b^{2}}}\\)) of each slot. That is, for \u003ca href=\"/posts/kbhinner_product/#euclidean-inner-product\"\u003eEuclidean Inner Product\u003c/a\u003e spaces:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\|z\\| = \\sqrt{|z_1|^{2} + \\dots |z_{n}|^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eotherwise, simply squaring the \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e (giving us \\(a^{2}-b^{2}\\)) may very well yield negative numbers, which means we\u0026rsquo;d have an imaginary norm!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnorm/","tags":null,"title":"norm"},{"categories":null,"contents":"See Gaussian distribution\n","html":"\u003cp\u003eSee \u003ca href=\"/posts/kbhprobability_distributions/#gaussian-distribution\"\u003eGaussian distribution\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnormal_distribution/","tags":null,"title":"normal distribution"},{"categories":null,"contents":"normal random variable is a continuous random variable that allows you to manually specify the expectation and variance\nconstituents \\(\\mu\\) the mean \\(\\sigma\\) the variance requirements \\begin{equation} X \\sim \\mathcal{N}(\\mu, \\sigma^{2}) \\end{equation}\nPDF:\n\\begin{equation} f(x) = \\frac{1}{\\sigma \\sqrt{2 \\pi}} e^{-\\frac{(x-\\mu)^{2}}{2 \\sigma^{2}}} \\end{equation}\nadditional information normal maximizes entropy no other random variable uses as little parameters to convey as much information\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhnormal_random_variable/\"\u003enormal random variable\u003c/a\u003e is a \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e that allows you to manually specify the expectation and variance\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\mu\\) the mean\u003c/li\u003e\n\u003cli\u003e\\(\\sigma\\) the variance\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nX \\sim \\mathcal{N}(\\mu, \\sigma^{2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\frac{1}{\\sigma \\sqrt{2 \\pi}} e^{-\\frac{(x-\\mu)^{2}}{2 \\sigma^{2}}}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"normal-maximizes-entropy\"\u003enormal maximizes entropy\u003c/h3\u003e\n\u003cp\u003eno other \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e uses as little \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es to convey as much information\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnormal_random_variable/","tags":null,"title":"normal random variable"},{"categories":null,"contents":"Foreword Hi there, internet traveler.\nThe time is 2015/2016, I was either in 5th or 6th grade. At that time, I was barely beginning to be actually comfortable using the language of English.\nOne of the ways I practiced English, which is also a habit I continue to do today, is to write. I write mostly expository prose now, but, back then, shining with childish naïvete, I decided to write a multi-part story as a means of practicing English.\nAt the time, I was fortunately supported by four very helpful adults\u0026mdash;ESL instructors, teachers from the local government\u0026rsquo;s ESL program, local students, family friends\u0026mdash;who have supported me and edited this silly story as a means of helping me better my command of English.\nIronically, this story is set in 2018, I think, two years after when I wrote it. Its now 2022, almost 7 years after. Make of that what you will.\nTherefore\u0026mdash;\nNorman, an epic tale told in N parts\nWritten by yours truly, Houjun Liu, circa 2016.\nEdited by: Lynne Zummo, Dorit Hahn, Susan Cole, and Jennifer Fan.\nTypesetted: May 10th, 2022. Menlo Park, California.\nPrologue: James Peter On a sunny day, in a small house at 1623 Wesson Ave, James lay on a dirty, tiny bed. Suddenly a dog was in James’ sight. James stood up, stared at the dog. It was a small, brown, white, fuzzy dog with a tiny stump. The dog walked around James’ bed, looking silly.\n“Let’s call you Norman! It is a good name for you!”\n“There is no dog allowed in my house, get’em out! RIGHT now, or I will get YOU out!” shouted Mr. Miller.\n“Dude,” a voice came from James’ mind. Mr. Miller, the owner of the Wacky Hair Salon, who is James’ uncle, barged into James’ room, continuously shouting.\n“Get’em out, RIGHT NOW! NOW! YOU HEAR ME?”\nJames, staring at Norman, just didn’t care.\nNorman seemed to not understand all this. He followed Mr. Miller to the window, and \u0026hellip; just as suddenly as he had come in, he was thrown out by Mr. Miller.\nWhile Norman was wandering around, James started crying.\nMonths passed…\nPart 1 On a cold winter afternoon, Mr. Miller is sending James to an orphanage as punishment for doing “bad” things. James just doesn’t understand this. He SIMPLY wants Norman to come back!\nWhen they arrive, James finally realizes why he didn’t have parents. The truth is dreadful: his dad went crazy from programming in binary code.\n“I will go crazy, too,” James thinks. “It is not an easy job, no sir.” His mom’s situation was even worse, for she was killed by the African disease Ebola.\nHe trudges into the front building with Dr. Brains, and sees children that had been starved, gone mad, and even had been wandering around hopelessly! Many questions flew into James’ mind: Will I go crazy, too? Will I be starving, too? Will I also be wandering around like a zombie?! Feeling scared, he starts to wander, feel hungry, and starve like the other kids ……\n“Wait ! NO, I can’t do that,” thought James.\nDr. Brains takes James to walk around the orphanage, he realizes it is actually a better place to be rather than 1623 Wesson Ave. He sees cats, he sees ducks, he sees horses, he sees a playground, and he sees…\nNORMAN!\nPart 2 Dr. Brains, who looks bewildered, is staring at him.\n“How can you know him? You just arrived here!”.\n“Long story…,” explains James. “I once met the dog, and he was thrown out by Mr. Miller from where I used to live.”\n“So, this is PART of our orphanage. As you see, it is big. We now should thank the donor, who passed away, Dr. James Rover Peter…” There is a little pause, then Dr. Brains continues.\n“Who is YOUR dad!”\nThey continue walking until they get to a building labeled ‘EDU_K-4’.\n“This is the K-4 grade educational multifunctional building,” explains Dr. Brains, “where you will be staying for half a year. Then you will move to this building for study.”\nDr. Brains is now pointing at a building labeled ‘EX-EDU_5-12’.\nThey continue to walk until they get to another building labeled ‘OPH_LV #20312’. It is a small, lovely building, much like an apartment. “This is where you will live, in room # 20312_004,” says Dr. Brains while he hands James a key. Then he gives him a packet, which reads: Vanilla orphanage grand rules and schedule.\n“This is all you need, good day! I will leave you here.”\nJames watches Dr. Brains until he is out of sight.\nHe walks straight into the room. It looks clean, neat, like a 3-star hotel. There is a twin size bed, a desk, and a restroom. He sits down and starts to read the packet:\nChapter 1: Grand rules Welcome to Vanilla Orphanage! This is a place where you can enjoy yourself, explore yourself, and get prepared for the world!\nBut, there MUST be some rules in our orphanage to keep you and your classmates safe.\nFirst of all, you MUST not run in the front building.\nSecond, no talking is allowed while a grand meeting is taking place (see chapter 2 for more info).\nThird, follow your schedule all the time.\nFourth, if you have an emergency, use the emergency call phone (You don’t need to dial it, it will automatically connect to Vanilla Orphanage Hospital). But if you can walk and speak normally, go to Vanilla Orphanage Hospital for more help.\nFifth, the use of a regular telephone is only allowed three times a day. If your teacher calls you, it won’t count. You can only use a regular telephone for calling inside-the-orphanage friends, no outside call is allowed. To see the interior telephone numbers, see chapter 3.\nChapter 2: Grand schedule + your personal schedule Grand schedule\nYour personal schedule\nMeet me every OTHER Sunday at 15:00 at grand office starting 1/2/2019.\nGrand meeting will take place every first day of the month at the big hall in the front building. Everyone will attend the grand meeting; it lasts the whole day.\nDinner, Lunch, and Breakfast will be served at Front Building.\nDr. Flynn (k-4 Sciences) 4242-5000-2525 Dr. Jones (5-12 Sciences) 2134-1000-1045 Dr. Foster (k-4 Math) 2456-6206-6200 Ms. Garcia (5-12 Math) 1341-4000-4012 Mrs. Newman (k-4 Talk-it-out assistant) 2563-6374-7407 Mrs. Willems (5-12 Talk-it-out assistant) 8908-6997-9000 Dr. Brains (Headmaster) 2563-0035-3526\nPart 3 A brief day, he does whatever he is told, follows the schedule, does the work. But, something that amazes James is that the food is actually YUMMY.\nHe does enjoy eating at vanilla orphanage. Normally, it is like a buffet, but a limited one. You only can have one serving of meat, 2 vegetables, a delicious main dish (e.g. cooked rice, cooked noodles …).\nBy the table, James sees students laugh at each other, talk with each other, and, from far away, he sees a little brown-white puppy is running to a girl with curly hair, and stops.\nNorman!!!!!!!!!!!!!\nIt is funny that the girl asks exactly the same thing as Dr. Brains asked:” How can you know him?” He explains the whole story why he knows Norman and asks his very own and very first question to the very first student he meets at the vanilla orphanage: “How did he get here?”.\n“Long story,” says the girl, “he first arrived here because of our save the dogs project, Calvin and I found him.”\n“And who are you? I’m James”.\n“Sorry, I forgot about that, my name is Amelia!”.\nA tall, black haired student comes and joins them. “Hi there, what’s up? I heard someone mention my name.”\n“Oh, we were just talking about the dog. Our new friend, James, gave him a name: Norman,” responds Amelia.\n“Guess what?” asked Calvin, “I taught him Chinese!”.\n“Oh interesting, show us!” says Amelia.\n“狗儿,请来一下; I told him to come.” Suddenly, Norman comes and starts running around Calvin. “你的名字叫做 Norman; I told him that his name is Norman,” says Calvin. The dog starts moving around in a funny way, which James feels weird about. “Oh, don’t worry about that, that’s the Funny-Brown-Hula-Stump-Wiggle-Wag-Dance that I taught him,” Says Amelia.\nPart 4 Dong, dong, dong, dong…… The school bell rings, everyone gets up to do everything they need. It’s Sunday. According to Dr. Brains, James needs to meet Dr. Brains at the grand office.\nWhen he arrives, Dr. Brains says nothing but a greeting, and he hands James a slip of paper that says:\nThe organization of Brainiacs: 52345 Brainful way, North town, CA 94780\n“What is the org…”. “Stop! I will explain everything right after!” explains Dr. Brains. “Just remember what this parchment says!”\nHe hands him a telephone, and says, “Dial 52325, when you hear a beep, dial 900. Answer every question it’s asking you.”\nHe does what he is told, then a girl’s voice says: “Welcome to the new member registration center of TOFB, or the Organization of Brainiacs. Please answer the question: What is your address? “James states the orphanage’s address. “What is your reason to join?” Dr. Brains says quietly,” Invited.”\nWho invited you?” James answers:”Dr. Brains.” “ Welcome, again, new member. Please take the blood needle that appears in front of you and use it to poke your left ring finger.” James does this, and the voice says, ”Thanks for joining! Please hang up the phone!”\n“Understand this?” Dr. Brains says, ”Let’s go!”. “But, go where?” asks James. ”T-O-F-B,” replies Dr. Brains. They walk straight into a box, where James spots a device. Dr. Brains pushes a button on the device, and suddenly, James feels dizzy. They are spinning. They spin faster and faster. Finally, he hears a pop, then, suddenly, he falls into another device which is like a poison chamber. He and Dr. Brains open the door, and he sees a small, transparent house that reads T-O-F-B.\nPart 5 They walk straight into the house, and see a small elevator that is made out of glass. While they walk into the elevator, James feels something is seriously wrong. First, this is a one-story building, and unlike the 5th avenue apple retail store, it has no underground floor. Second, the elevator has NO button, how can Dr. Brains go anywhere with this elevator?\nDr. Brains seems solemn, he carefully looks at the emergency speaker, then, suddenly, James hears a loud CRACK. Then the elevator starts getting darker and darker. After 5 seconds, it is not transparent anymore.\nThe elevator starts to go down deeper and deeper. Then a screen pops up.”Hello, WELCOME to the Organization of Brainiacs. Please scan your card…” says a voice. He doesn’t have a card!! He looks around to find Dr. Brains, but, he is gone!\n“Where else can he be?”James thinks,”there is no way out!”. Suddenly, smoke fills the elevator, James first doesn’t realize what it is, but suddenly, he knows it.”Oh oh!” thinks James, ”IT IS GAS!!!!!”\nChapter 2: T-O-F-B Way underground, Dr. Brains hesitates. “OOOOOOOOOOPS! I forgot James in the elevator..” , he thinks, ”and the killer gas X03-00 would be deadly.”\nHe rushes to the “hacker center”, and shouts, ”You guys! STOP the elevator! And STOP the gas! Open the doors! Clear out the gas! He is NOT a criminal!”\nEverybody freezes, and some whisper, ”Oops, x03-00 gas can knock a human out in 10 seconds”.\nPart 6 Back in the elevator, James barely has time to call the emergency. “Does Dr. Brains mean to kill me?” he thinks, ”or is this a test for me?” He has more things to worry about than that. However, the good news is that Dr.Brains and his team hurry to the elevator just in time, which is when he gets knocked off. They give him the medicine that will neutralize the effect of the gas, and then they hurry to prepare the WELCOME event of the new T-O-F-B members in this season.\nSoon after, James wakes up, safe and sound. Dr. Brains is right by him.\n“Sorry for the accident, but here, welcome to T-O-F-B”, Dr. Brains says with a little smile.\nThere is a little awkward moment when he and Dr. Brains both try to say something, but no sounds come out. It doesn’t last long, just a few seconds. Then Dr. Brains continues, ”The Organization of Brainiacs is a little like what you see in the movie M-I-B. We basically are the only legal group in human and alien law that can meet, communicate with, and study the aliens from outer universe. You know one of our aliens: Norman. He actually can speak Hidoeneese AND English.”\n“But what is Hidoeneese?” James asks.\n“Hidoeneese is the language of the Hidonipothan.” Dr.Brains says.\n“And what is Hidonipothan?” he asks, again.\n“Long story short, it’s kind of an alien tribe. Later at breakfast, Norman will explain. By the way, he likes his name Norman.” Dr. Brains responds.\n“What? Breakfast? It’s already morning?” James asks.\n“Well yes, you have been knocked out by gas for almost 12 hours, and now it is 6:00 in the morning,” Dr. Brains says, ”you still can get about 3 hours rest. Everyone in T-O-F-B sleeps late and wakes up late. And one last thing, I will give you the NEW MEMBER #04 packet so you can learn more about T-O-F-B.”\nHe hands him a packet, just like the packet in the orphanage. But it is hand written.\nWelcome, new member, we are proud that you are here. As the founder of T-O-F-B, I will introduce you to the few basics of daily life.\nFirst, you all have an outside “job”, which you will still perform. Since you are a child, AS I KNOW, we will just keep you up-to-date and call you via the headphones that we will give you. We won’t interrupt your class, unless it is an emergency, I promise. You will be meeting once a month so it won’t affect any of your grades.\nSecond, in T-O-F-B, we treat any child like an adult. It means a large work load, but you can also access any part of our centre freely with your BNPS. But in some areas, we want you to have adult supervision.\nYour supervisor is:\nGrave Hono ( Dr. Brains, as a substitute name in the human world)\nWe will give you a map and what you should do later.\nDr. Ranboro\n9/23/2018\nPart 7 He falls asleep……… He dreams about aliens attacking the centre, and only Dr. Brains, Dr. Ranboro, Norman, Amelia, Calvin and a guy who he didn’t know survived. He thinks it’s just a dream, but what he doesn’t know is, this day is coming closer and closer.\n“Wakey, Wakey!” Dr. Brains shouts, laughing” JJJJJJJJAAAAAAMMMMMMMEEEEEEEESSSSSSSSSSSS!!!!!!!!!!!!”. James finally wakes up, and mumbles, ”What the heck in the world was this?”\nDr. Brains seems to be confused. “You didn’t recognize my voice? Wake up, Buddy! Get dressed! The welcoming party is waiting!!”.\nHe gets dressed, hurries to follow Dr. Brains, and they go outside to a “secretive” room that is labeled “G—CHECK, BNPS ROOM”. They go in, and he sees a bunch of devices that are new to him. He sits down, just as Dr. Brains ordered, and Dr. Brains brings a needle to his face, straight into his eyes. “Watch out!” James shouts. He doesn’t even have time to think, as the needle goes in and out of his eyes. Dr. Brains says, “Good, we already got the DNA, scanned the iris, scanned the brain map. Ok, 2 last things, then we are good to go!” He does a bunch of scans on James’ finger, and he enters a password into a machine. “Ok, one last thing. Print your BNPS and tattoo it to your shoulder!” Dr. Brains says. The machine reads “bring human to the tattoo station …… step 3/5”. Dr. Brains orders him to put his shoulder into a cylinder. He feels a little pressure and his shoulder pops out of the machine. He sees a little piece of metal on his shoulder and it reads ”TOFB.1029358612.JP/////////” The machine also prints out a metal card. “Don’t lose it!” Dr. Brains says, “it is your ID here!”\nThey walk out of the room and into the elevator. It is an elevator like the one in the TOFB’s entrance. The one that changes color and transparency, only much more slowly. When it tells him to scan the card, he knows better than to not do so. The elevator seems smart, and it asks “Homo, and James! Morning! Which level area do you want to go to?”. Dr. Brains responds, “Dining room number three, formal, both of us”. The elevator responds with a “TOFB wishes you a pleasant day!” When the door opens again, they enter a large area, like the first level of a 5 star hotel. Everything is white: people’s clothes, the ground, the staircase, the light, etc,. He sees Dr.Brains’ clothes change to white! He says, “Dr.Brains! Your outfit changed color!” “Yours did, too!” Dr. Brains responds. James looks down at his clothes. His had actually, as Dr. Brains said, changed color and texture.\nThey eat their breakfast—salmon, soup and broccoli, and Dr. Brains announces to him, “OK, now let’s do some work stuff”. They head back to the living area, and they wash themselves. Then they head to the meeting area. Norman, Dr. Ranboro and the other guy James sees in his dream are waving to James-and-Dr.Brains-in-the-black-suit-and-a-tie.\nPart 8 “So”, Dr. Ranboro says, “Welcome! Thank you for joining the organiza………?!!!\u0026gt;?*\u0026amp;%*\u0026amp;^%\u0026amp;^%%∆˙ßå˚µß∂˙”. FFF! A small arrow flies though the walls and hits Dr. Ranboro, making his words into nonsense. “å∆∆߬—å˚å!!!!!!¡¡¡¡¡¡?¿!¡……Jams…….main sq……is com…..tel..hom……nnnoor…….¡¡¡!!!???¿¿¿å∂ß˚˚˚˚∆ƒå˙”, he says. James can barely understand, but he knows one thing, they will tell him about the main sq…whatsoever.\n“Let’s jump into the topic,” Norman says. “The main sq… is actually an attack called The Main Square Rattle, or what we call TMSR. It’s started because another kind of alien, The Froakan, wants to use humans as slaves, own the TOFB AND the Hidonipothan.The only way to stop that is to get the battle-rattler and rattle it. But if The F’s got the rattler and rattle it, well, we will all freeze and do what they want, like a bunch of zombies. The state of being a zombie is called ratling. Sadly, there isn’t a known cure yet for ratle. But Dr. Brains is working on it! Lastly, the battle-rattler is locked in the Ratle Mountains. And the only way to enter the Ratle Mountain is by using Dr. Ranboro’s key. Otherwise, you will have very little chance to get out alive! And that’s why they shot Dr. Ranboro. As a matter of fact, the arrow is poisonous. If we don’t send him to hospital now, he will become a baby in 72 hours.”\nTalking about Dr. Ranboro, James notices Dr. Ranboro’s hair getting darker and darker from the old-man-white. They send him to the hospital about 5 minutes later.\nChapter 3:That’s called war After another ride on the “TOTP—0111”, which is the “squeeze box” to get to the North Town, they are back at Dr. Brains’ office. But something weird has happened, only students in OPH_LV # 40000 - OPH_LV # 49999 are still in the orphanage. Dr. Brains tries to find out why, but he can’t. And that’s when all of the humans in the orphanage hear a gigantic laugh coming from nowhere.”HHHAAAHHHAAAAHAHHAAA! This is your day, Homo, your death ceremony!HHAAHAHA!”\nMonths passed again……..\nPart 9 The daily live is almost the same as before, just that a part of the students in the orphanage is missing. But live is still very simple. Tasty food, friendly teachers, and visits to TOFB every other week.\nOne day, James is in his math class.\n“So when 2 is raised to the……”\n“Beep! Beep!”\nHis secret headphones from TOFB send a message request to him.\n“Beep! Beep!”\n“Didn’t that Ranboro guy say they won’t interrupt our classes?” James thought.\n“And let’s do some prob….”\n“Beep! Beep!”\nJames requests a bathroom break and answers the headphones in the boys’ restroom.\n“It’s an emergency!!! The Froakans are getting closer to the rattler!!! Help!!!! James, take Homo and get here now!” Norman cries.\nAs fast as he can, he rushes to Dr. Brains’ office, grabs Dr. Brains and locks him and himself into the TOTB-0111.\nAnd as fast as lightning, they are here, in the North town.\nThey rush into the elevator, he swipes his and Dr. Brains’ card and rushes to Dr. Ranboro’s office.\n“Quick! They will rattle it in like…like 20 minutes and we will all ratle!!!”, Norman hollers.\nAnd again, as fast as lightning, they get war-dressed and get into the fastest transport system in TOFB.\nAs James looks down, he is wearing a strong iron chest plate that reads ’T-O-F-B///////The Smarter one’. And on his shoulder, there is a cord which extends from his Digital ID to the chest plate. There is a screen in his chest plate that is unbreakable. There is a soft protection layer, then there is a swimming layer, then the pressure layer, an iron pad, an air supply on his side if the enemy spreads poisonous gas, and an armor on the outside.\nAmazingly, these things only weigh 1 pound and fit perfectly.\nHe is war-trained, so he knows exactly what to do with this fancy outfit. The screen is the main control, the outfit will detect the environment and change to the perfect layer.\nUploaded ate 10/25/2015 [sic.]\nPart 10 The ride seems to be long, but it’s actually only 5 minutes. They will enter the Ratle Mountains from the North End, which is the second-safest route into the mountains without Dr. Ranboro’s key.\nAnd there they are, in the Ratle Mountains. They are led by Mr. Giose, who was the other guy in his dream when he came to the TOFB the first time. The other four warriors are Norman, Dr. Ranboro, Dr. Brains and James. The first 20 miles are short and boring. Nothing happens. But after the 29th mile mark, they enter a cave.\nThe cave is dark. There are only few lights flashing. They are not worried, until they hear a scream.\n“OOOOOOO! Eeeek!”\n“Ahhhhhhhhhhhh! ZZZ! ZZZ…ZZZ..ZZZ…ZZZ…ZZZ…zzz…” The voice is getting smaller and smaller.\n“It is the sleeping spider! It will knock out a human in NO time!” Mr. Giose shouts.\nJames and the whole crew know what to do. They press a few buttons on their screens, and their helmets of their armor dissolve into the air. What is left behind, is the air filtering system.\n“Three! Two! One! It’s gas,” Norman says, playfully.\nDr. Brains spreads out the SSG gas, which will, hopefully, knock out the sleeping spider.\nThat wastes a LOT of time. Before they know it, they all starts to ratle.\nIt is James who feels it first. He feels extremely and uncontrollably happy. He starts running around and talking to other people in a rude way. To himself, it feels like as if he is drifting into unconsciousness.\nThen the same happens to Dr. Brains, and then Norman, followed by Mr. Giose. Luckily, Dr. Ranboro called the TOFB’s team 2 to come for help before he changes, too.\nI never knew what happened after this incident until the year of 2021. Since James was ratling, he couldn’t remember the whole year of 2020. He recovered on the day of 10/26/2021. Dr. Foster, who works at the orphanage AND at TOFB found a cure using Chinese Herbal Tea.\nWell, let’s jump into the time machine. Backward to 2014!!\nChapter 4:Childhood We jump into the time machine, and swoosh. Here we are, in the year of 2014. We are standing in front of 1623 Wesson Ave. It is a sunny day. The Peters are getting ready for a trip to Africa. James greets his uncle, who will look after the house while they are gone. Mrs. Peter is packing hastily. And Mr. Peter is bringing his computer, because, weirdly, he is starting to like CODING in BINARY CODE. Nothing more to say, so here the story goes.\nPart 11 “Hurrrrrryyyy!” Mr. Peter shouts. “Or else we will be late for the plane!”\nThe Peters hurry to the bus stand, waiting for the airport express.\nAfter about an hour ride, they finally arrive at the San Francisco International Airport.\nThey check in. And they hurry to the security check. At the security check, Mrs. Peter thinks she forgot something. Yes, she forgot to bring ANY medication for the disease Ebola.\n","html":"\u003ch2 id=\"foreword\"\u003eForeword\u003c/h2\u003e\n\u003cp\u003eHi there, internet traveler.\u003c/p\u003e\n\u003cp\u003eThe time is 2015/2016, I was either in 5th or 6th grade. At that time, I was barely beginning to be actually comfortable using the language of English.\u003c/p\u003e\n\u003cp\u003eOne of the ways I practiced English, which is also a habit I continue to do today, is to write. I write mostly expository prose now, but, back then, shining with childish naïvete, I decided to write a multi-part story as a means of practicing English.\u003c/p\u003e\n\u003cp\u003eAt the time, I was fortunately supported by four very helpful adults\u0026mdash;ESL instructors, teachers from the local government\u0026rsquo;s ESL program, local students, family friends\u0026mdash;who have supported me and edited this silly story as a means of helping me better my command of English.\u003c/p\u003e\n\u003cp\u003eIronically, this story is set in 2018, I think, two years after when I wrote it. Its now 2022, almost 7 years after. Make of that what you will.\u003c/p\u003e\n\u003cp\u003eTherefore\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eNorman, an epic tale told in N parts\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eWritten by yours truly, Houjun Liu, circa 2016.\u003c/p\u003e\n\u003cp\u003eEdited by: Lynne Zummo, Dorit Hahn, Susan Cole, and Jennifer Fan.\u003c/p\u003e\n\u003cp\u003eTypesetted: May 10th, 2022. Menlo Park, California.\u003c/p\u003e\n\u003ch2 id=\"prologue-james-peter\"\u003ePrologue: James Peter\u003c/h2\u003e\n\u003cp\u003eOn a sunny day, in a small house at 1623 Wesson Ave, James lay on a dirty, tiny bed. Suddenly a dog was in James’ sight. James stood up, stared at the dog. It was a small, brown, white, fuzzy dog with a tiny stump. The dog walked around James’ bed, looking silly.\u003c/p\u003e\n\u003cp\u003e“Let’s call you Norman! It is a good name for you!”\u003c/p\u003e\n\u003cp\u003e“There is no dog allowed in my house, get’em out! RIGHT now, or I will get YOU out!” shouted Mr. Miller.\u003c/p\u003e\n\u003cp\u003e“Dude,” a voice came from James’ mind. Mr. Miller, the owner of the Wacky Hair Salon, who is James’ uncle, barged into James’ room, continuously shouting.\u003c/p\u003e\n\u003cp\u003e“Get’em out, RIGHT NOW! NOW! YOU HEAR ME?”\u003c/p\u003e\n\u003cp\u003eJames, staring at Norman, just didn’t care.\u003c/p\u003e\n\u003cp\u003eNorman seemed to not understand all this. He followed Mr. Miller to the window, and \u0026hellip; just as suddenly as he had come in, he was thrown out by Mr. Miller.\u003c/p\u003e\n\u003cp\u003eWhile Norman was wandering around, James started crying.\u003c/p\u003e\n\u003cp\u003eMonths passed…\u003c/p\u003e\n\u003ch3 id=\"part-1\"\u003ePart 1\u003c/h3\u003e\n\u003cp\u003eOn a cold winter afternoon, Mr. Miller is sending James to an orphanage as punishment for doing “bad” things. James just doesn’t understand this. He SIMPLY wants Norman to come back!\u003c/p\u003e\n\u003cp\u003eWhen they arrive, James finally realizes why he didn’t have parents. The truth is dreadful: his dad went crazy from programming in binary code.\u003c/p\u003e\n\u003cp\u003e“I will go crazy, too,” James thinks. “It is not an easy job, no sir.” His mom’s situation was even worse, for she was killed by the African disease Ebola.\u003c/p\u003e\n\u003cp\u003eHe trudges into the front building with Dr. Brains, and sees children that had been starved, gone mad, and even had been wandering around hopelessly! Many questions flew into James’ mind: Will I go crazy, too? Will I be starving, too? Will I also be wandering around like a zombie?! Feeling scared, he starts to wander, feel hungry, and starve like the other kids ……\u003c/p\u003e\n\u003cp\u003e“Wait ! NO, I can’t do that,” thought James.\u003c/p\u003e\n\u003cp\u003eDr. Brains takes James to walk around the orphanage, he realizes it is actually a better place to be rather than 1623 Wesson Ave. He sees cats, he sees ducks, he sees horses, he sees a playground, and he sees…\u003c/p\u003e\n\u003cp\u003eNORMAN!\u003c/p\u003e\n\u003ch3 id=\"part-2\"\u003ePart 2\u003c/h3\u003e\n\u003cp\u003eDr. Brains, who looks bewildered, is staring at him.\u003c/p\u003e\n\u003cp\u003e“How can you know him? You just arrived here!”.\u003c/p\u003e\n\u003cp\u003e“Long story…,” explains James. “I once met the dog, and he was thrown out by Mr. Miller from where I used to live.”\u003c/p\u003e\n\u003cp\u003e“So, this is PART of our orphanage. As you see, it is big. We now should thank the donor, who passed away, Dr. James Rover Peter…” There is a little pause, then Dr. Brains continues.\u003c/p\u003e\n\u003cp\u003e“Who is YOUR dad!”\u003c/p\u003e\n\u003cp\u003eThey continue walking until they get to a building labeled ‘EDU_K-4’.\u003c/p\u003e\n\u003cp\u003e“This is the K-4 grade educational multifunctional building,” explains Dr. Brains, “where you will be staying for half a year. Then you will move to this building for study.”\u003c/p\u003e\n\u003cp\u003eDr. Brains is now pointing at a building labeled ‘EX-EDU_5-12’.\u003c/p\u003e\n\u003cp\u003eThey continue to walk until they get to another building labeled ‘OPH_LV #20312’. It is a small, lovely building, much like an apartment. “This is where you will live, in room # 20312_004,” says Dr. Brains while he hands James a key. Then he gives him a packet, which reads: Vanilla orphanage grand rules and schedule.\u003c/p\u003e\n\u003cp\u003e“This is all you need, good day! I will leave you here.”\u003c/p\u003e\n\u003cp\u003eJames watches Dr. Brains until he is out of sight.\u003c/p\u003e\n\u003cp\u003eHe walks straight into the room. It looks clean, neat, like a 3-star hotel. There is a twin size bed, a desk, and a restroom. He sits down and starts to read the packet:\u003c/p\u003e\n\u003ch2 id=\"chapter-1-grand-rules\"\u003eChapter 1: Grand rules\u003c/h2\u003e\n\u003cp\u003eWelcome to Vanilla Orphanage! This is a place where you can enjoy yourself, explore yourself, and get prepared for the world!\u003c/p\u003e\n\u003cp\u003eBut, there MUST be some rules in our orphanage to keep you and your classmates safe.\u003c/p\u003e\n\u003cp\u003eFirst of all, you MUST not run in the front building.\u003c/p\u003e\n\u003cp\u003eSecond, no talking is allowed while a grand meeting is taking place (see chapter 2 for more info).\u003c/p\u003e\n\u003cp\u003eThird, follow your schedule all the time.\u003c/p\u003e\n\u003cp\u003eFourth, if you have an emergency, use the emergency call phone (You don’t need to dial it, it will automatically connect to Vanilla Orphanage Hospital). But if you can walk and speak normally, go to Vanilla Orphanage Hospital for more help.\u003c/p\u003e\n\u003cp\u003eFifth, the use of a regular telephone is only allowed three times a day. If your teacher calls you, it won’t count. You can only use a regular telephone for calling inside-the-orphanage friends, no outside call is allowed. To see the interior telephone numbers, see chapter 3.\u003c/p\u003e\n\u003ch2 id=\"chapter-2-grand-schedule-plus-your-personal-schedule\"\u003eChapter 2: Grand schedule + your personal schedule\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eGrand schedule\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eYour personal schedule\u003c/p\u003e\n\u003cp\u003eMeet me every OTHER Sunday at 15:00 at grand office starting 1/2/2019.\u003c/p\u003e\n\u003cp\u003eGrand meeting will take place every first day of the month at the big hall in the front building. Everyone will attend the grand meeting; it lasts the whole day.\u003c/p\u003e\n\u003cp\u003eDinner, Lunch, and Breakfast will be served at Front Building.\u003c/p\u003e\n\u003cp\u003eDr. Flynn (k-4 Sciences)\n4242-5000-2525\nDr. Jones (5-12 Sciences)\n2134-1000-1045\nDr. Foster (k-4 Math)\n2456-6206-6200\nMs. Garcia (5-12 Math)\n1341-4000-4012\nMrs. Newman (k-4 Talk-it-out assistant)\n2563-6374-7407\nMrs. Willems (5-12 Talk-it-out assistant)\n8908-6997-9000\nDr. Brains (Headmaster)\n2563-0035-3526\u003c/p\u003e\n\u003ch3 id=\"part-3\"\u003ePart 3\u003c/h3\u003e\n\u003cp\u003eA brief day, he does whatever he is told, follows the schedule, does the work. But, something that amazes James is that the food is actually YUMMY.\u003c/p\u003e\n\u003cp\u003eHe does enjoy eating at vanilla orphanage. Normally, it is like a buffet, but a limited one. You only can have one serving of meat, 2 vegetables, a delicious main dish (e.g. cooked rice, cooked noodles …).\u003c/p\u003e\n\u003cp\u003eBy the table, James sees students laugh at each other, talk with each other, and, from far away, he sees a little brown-white puppy is running to a girl with curly hair, and stops.\u003c/p\u003e\n\u003cp\u003eNorman!!!!!!!!!!!!!\u003c/p\u003e\n\u003cp\u003eIt is funny that the girl asks exactly the same thing as Dr. Brains asked:” How can you know him?” He explains the whole story why he knows Norman and asks his very own and very first question to the very first student he meets at the vanilla orphanage: “How did he get here?”.\u003c/p\u003e\n\u003cp\u003e“Long story,” says the girl, “he first arrived here because of our save the dogs project, Calvin and I found him.”\u003c/p\u003e\n\u003cp\u003e“And who are you? I’m James”.\u003c/p\u003e\n\u003cp\u003e“Sorry, I forgot about that, my name is Amelia!”.\u003c/p\u003e\n\u003cp\u003eA tall, black haired student comes and joins them. “Hi there, what’s up? I heard someone mention my name.”\u003c/p\u003e\n\u003cp\u003e“Oh, we were just talking about the dog. Our new friend, James, gave him a name: Norman,” responds Amelia.\u003c/p\u003e\n\u003cp\u003e“Guess what?” asked Calvin, “I taught him Chinese!”.\u003c/p\u003e\n\u003cp\u003e“Oh interesting, show us!” says Amelia.\u003c/p\u003e\n\u003cp\u003e“狗儿,请来一下; I told him to come.” Suddenly, Norman comes and starts running around Calvin. “你的名字叫做 Norman; I told him that his name is Norman,” says Calvin. The dog starts moving around in a funny way, which James feels weird about. “Oh, don’t worry about that, that’s the Funny-Brown-Hula-Stump-Wiggle-Wag-Dance that I taught him,” Says Amelia.\u003c/p\u003e\n\u003ch3 id=\"part-4\"\u003ePart 4\u003c/h3\u003e\n\u003cp\u003eDong, dong, dong, dong…… The school bell rings, everyone gets up to do everything they need. It’s Sunday. According to Dr. Brains, James needs to meet Dr. Brains at the grand office.\u003c/p\u003e\n\u003cp\u003eWhen he arrives, Dr. Brains says nothing but a greeting, and he hands James a slip of paper that says:\u003c/p\u003e\n\u003cp\u003eThe organization of Brainiacs: 52345 Brainful way, North town, CA 94780\u003c/p\u003e\n\u003cp\u003e“What is the org…”. “Stop! I will explain everything right after!” explains Dr. Brains. “Just remember what this parchment says!”\u003c/p\u003e\n\u003cp\u003eHe hands him a telephone, and says, “Dial 52325, when you hear a beep, dial 900. Answer every question it’s asking you.”\u003c/p\u003e\n\u003cp\u003eHe does what he is told, then a girl’s voice says: “Welcome to the new member registration center of TOFB, or the Organization of Brainiacs. Please answer the question: What is your address? “James states the orphanage’s address. “What is your reason to join?” Dr. Brains says quietly,” Invited.”\u003c/p\u003e\n\u003cp\u003eWho invited you?” James answers:”Dr. Brains.” “ Welcome, again, new member. Please take the blood needle that appears in front of you and use it to poke your left ring finger.” James does this, and the voice says, ”Thanks for joining! Please hang up the phone!”\u003c/p\u003e\n\u003cp\u003e“Understand this?” Dr. Brains says, ”Let’s go!”. “But, go where?” asks James. ”T-O-F-B,” replies Dr. Brains. They walk straight into a box, where James spots a device. Dr. Brains pushes a button on the device, and suddenly, James feels dizzy. They are spinning. They spin faster and faster. Finally, he hears a pop, then, suddenly, he falls into another device which is like a poison chamber. He and Dr. Brains open the door, and he sees a small, transparent house that reads T-O-F-B.\u003c/p\u003e\n\u003ch3 id=\"part-5\"\u003ePart 5\u003c/h3\u003e\n\u003cp\u003eThey walk straight into the house, and see a small elevator that is made out of glass. While they walk into the elevator, James feels something is seriously wrong. First, this is a one-story building, and unlike the 5th avenue apple retail store, it has no underground floor. Second, the elevator has NO button, how can Dr. Brains go anywhere with this elevator?\u003c/p\u003e\n\u003cp\u003eDr. Brains seems solemn, he carefully looks at the emergency speaker, then, suddenly, James hears a loud CRACK. Then the elevator starts getting darker and darker. After 5 seconds, it is not transparent anymore.\u003c/p\u003e\n\u003cp\u003eThe elevator starts to go down deeper and deeper. Then a screen pops up.”Hello, WELCOME to the Organization of Brainiacs. Please scan your card…” says a voice. He doesn’t have a card!! He looks around to find Dr. Brains, but, he is gone!\u003c/p\u003e\n\u003cp\u003e“Where else can he be?”James thinks,”there is no way out!”. Suddenly, smoke fills the elevator, James first doesn’t realize what it is, but suddenly, he knows it.”Oh oh!” thinks James, ”IT IS GAS!!!!!”\u003c/p\u003e\n\u003ch2 id=\"chapter-2-t-o-f-b\"\u003eChapter 2: T-O-F-B\u003c/h2\u003e\n\u003cp\u003eWay underground, Dr. Brains hesitates. “OOOOOOOOOOPS! I forgot James in the elevator..” , he thinks, ”and the killer gas X03-00 would be deadly.”\u003c/p\u003e\n\u003cp\u003eHe rushes to the “hacker center”, and shouts, ”You guys! STOP the elevator! And STOP the gas! Open the doors! Clear out the gas! He is NOT a criminal!”\u003c/p\u003e\n\u003cp\u003eEverybody freezes, and some whisper, ”Oops, x03-00 gas can knock a human out in 10 seconds”.\u003c/p\u003e\n\u003ch3 id=\"part-6\"\u003ePart 6\u003c/h3\u003e\n\u003cp\u003eBack in the elevator, James barely has time to call the emergency. “Does Dr. Brains mean to kill me?” he thinks, ”or is this a test for me?” He has more things to worry about than that. However, the good news is that Dr.Brains and his team hurry to the elevator just in time, which is when he gets knocked off. They give him the medicine that will neutralize the effect of the gas, and then they hurry to prepare the WELCOME event of the new T-O-F-B members in this season.\u003c/p\u003e\n\u003cp\u003eSoon after, James wakes up, safe and sound. Dr. Brains is right by him.\u003c/p\u003e\n\u003cp\u003e“Sorry for the accident, but here, welcome to T-O-F-B”, Dr. Brains says with a little smile.\u003c/p\u003e\n\u003cp\u003eThere is a little awkward moment when he and Dr. Brains both try to say something, but no sounds come out. It doesn’t last long, just a few seconds. Then Dr. Brains continues, ”The Organization of Brainiacs is a little like what you see in the movie M-I-B. We basically are the only legal group in human and alien law that can meet, communicate with, and study the aliens from outer universe. You know one of our aliens: Norman. He actually can speak Hidoeneese AND English.”\u003c/p\u003e\n\u003cp\u003e“But what is Hidoeneese?” James asks.\u003c/p\u003e\n\u003cp\u003e“Hidoeneese is the language of the Hidonipothan.” Dr.Brains says.\u003c/p\u003e\n\u003cp\u003e“And what is Hidonipothan?” he asks, again.\u003c/p\u003e\n\u003cp\u003e“Long story short, it’s kind of an alien tribe. Later at breakfast, Norman will explain. By the way, he likes his name Norman.” Dr. Brains responds.\u003c/p\u003e\n\u003cp\u003e“What? Breakfast? It’s already morning?” James asks.\u003c/p\u003e\n\u003cp\u003e“Well yes, you have been knocked out by gas for almost 12 hours, and now it is 6:00 in the morning,” Dr. Brains says, ”you still can get about 3 hours rest. Everyone in T-O-F-B sleeps late and wakes up late. And one last thing, I will give you the NEW MEMBER #04 packet so you can learn more about T-O-F-B.”\u003c/p\u003e\n\u003cp\u003eHe hands him a packet, just like the packet in the orphanage. But it is hand written.\u003c/p\u003e\n\u003cp\u003eWelcome, new member, we are proud that you are here. As the founder of T-O-F-B, I will introduce you to the few basics of daily life.\u003c/p\u003e\n\u003cp\u003eFirst, you all have an outside “job”, which you will still perform. Since you are a child, AS I KNOW, we will just keep you up-to-date and call you via the headphones that we will give you. We won’t interrupt your class, unless it is an emergency, I promise. You will be meeting once a month so it won’t affect any of your grades.\u003c/p\u003e\n\u003cp\u003eSecond, in T-O-F-B, we treat any child like an adult. It means a large work load, but you can also access any part of our centre freely with your BNPS. But in some areas, we want you to have adult supervision.\u003c/p\u003e\n\u003cp\u003eYour supervisor is:\u003c/p\u003e\n\u003cp\u003eGrave Hono ( Dr. Brains, as a substitute name in the human world)\u003c/p\u003e\n\u003cp\u003eWe will give you a map and what you should do later.\u003c/p\u003e\n\u003cp\u003eDr. Ranboro\u003c/p\u003e\n\u003cp\u003e9/23/2018\u003c/p\u003e\n\u003ch3 id=\"part-7\"\u003ePart 7\u003c/h3\u003e\n\u003cp\u003eHe falls asleep……… He dreams about aliens attacking the centre, and only Dr. Brains, Dr. Ranboro, Norman, Amelia, Calvin and a guy who he didn’t know survived. He thinks it’s just a dream, but what he doesn’t know is, this day is coming closer and closer.\u003c/p\u003e\n\u003cp\u003e“Wakey, Wakey!” Dr. Brains shouts, laughing” JJJJJJJJAAAAAAMMMMMMMEEEEEEEESSSSSSSSSSSS!!!!!!!!!!!!”. James finally wakes up, and mumbles, ”What the heck in the world was this?”\u003c/p\u003e\n\u003cp\u003eDr. Brains seems to be confused. “You didn’t recognize my voice? Wake up, Buddy! Get dressed! The welcoming party is waiting!!”.\u003c/p\u003e\n\u003cp\u003eHe gets dressed, hurries to follow Dr. Brains, and they go outside to a “secretive” room that is labeled “G—CHECK, BNPS ROOM”. They go in, and he sees a bunch of devices that are new to him. He sits down, just as Dr. Brains ordered, and Dr. Brains brings a needle to his face, straight into his eyes. “Watch out!” James shouts. He doesn’t even have time to think, as the needle goes in and out of his eyes. Dr. Brains says, “Good, we already got the DNA, scanned the iris, scanned the brain map. Ok, 2 last things, then we are good to go!” He does a bunch of scans on James’ finger, and he enters a password into a machine. “Ok, one last thing. Print your BNPS and tattoo it to your shoulder!” Dr. Brains says. The machine reads “bring human to the tattoo station …… step 3/5”. Dr. Brains orders him to put his shoulder into a cylinder. He feels a little pressure and his shoulder pops out of the machine. He sees a little piece of metal on his shoulder and it reads ”TOFB.1029358612.JP/////////” The machine also prints out a metal card. “Don’t lose it!” Dr. Brains says, “it is your ID here!”\u003c/p\u003e\n\u003cp\u003eThey walk out of the room and into the elevator. It is an elevator like the one in the TOFB’s entrance. The one that changes color and transparency, only much more slowly. When it tells him to scan the card, he knows better than to not do so. The elevator seems smart, and it asks “Homo, and James! Morning! Which level area do you want to go to?”. Dr. Brains responds, “Dining room number three, formal, both of us”. The elevator responds with a “TOFB wishes you a pleasant day!” When the door opens again, they enter a large area, like the first level of a 5 star hotel. Everything is white: people’s clothes, the ground, the staircase, the light, etc,. He sees Dr.Brains’ clothes change to white! He says, “Dr.Brains! Your outfit changed color!” “Yours did, too!” Dr. Brains responds. James looks down at his clothes. His had actually, as Dr. Brains said, changed color and texture.\u003c/p\u003e\n\u003cp\u003eThey eat their breakfast—salmon, soup and broccoli, and Dr. Brains announces to him, “OK, now let’s do some work stuff”. They head back to the living area, and they wash themselves. Then they head to the meeting area. Norman, Dr. Ranboro and the other guy James sees in his dream are waving to James-and-Dr.Brains-in-the-black-suit-and-a-tie.\u003c/p\u003e\n\u003ch3 id=\"part-8\"\u003ePart 8\u003c/h3\u003e\n\u003cp\u003e“So”, Dr. Ranboro says, “Welcome! Thank you for joining the organiza………?!!!\u0026gt;?*\u0026amp;%*\u0026amp;^%\u0026amp;^%%∆˙ßå˚µß∂˙”. FFF! A small arrow flies though the walls and hits Dr. Ranboro, making his words into nonsense. “å∆∆߬—å˚å!!!!!!¡¡¡¡¡¡?¿!¡……Jams…….main sq……is com…..tel..hom……nnnoor…….¡¡¡!!!???¿¿¿å∂ß˚˚˚˚∆ƒå˙”, he says. James can barely understand, but he knows one thing, they will tell him about the main sq…whatsoever.\u003c/p\u003e\n\u003cp\u003e“Let’s jump into the topic,” Norman says. “The main sq… is actually an attack called The Main Square Rattle, or what we call TMSR. It’s started because another kind of alien, The Froakan, wants to use humans as slaves, own the TOFB AND the Hidonipothan.The only way to stop that is to get the battle-rattler and rattle it. But if The F’s got the rattler and rattle it, well, we will all freeze and do what they want, like a bunch of zombies. The state of being a zombie is called ratling. Sadly, there isn’t a known cure yet for ratle. But Dr. Brains is working on it! Lastly, the battle-rattler is locked in the Ratle Mountains. And the only way to enter the Ratle Mountain is by using Dr. Ranboro’s key. Otherwise, you will have very little chance to get out alive! And that’s why they shot Dr. Ranboro. As a matter of fact, the arrow is poisonous. If we don’t send him to hospital now, he will become a baby in 72 hours.”\u003c/p\u003e\n\u003cp\u003eTalking about Dr. Ranboro, James notices Dr. Ranboro’s hair getting darker and darker from the old-man-white. They send him to the hospital about 5 minutes later.\u003c/p\u003e\n\u003ch2 id=\"chapter-3-that-s-called-war\"\u003eChapter 3:That’s called war\u003c/h2\u003e\n\u003cp\u003eAfter another ride on the “TOTP—0111”, which is the “squeeze box” to get to the North Town, they are back at Dr. Brains’ office. But something weird has happened, only students in OPH_LV # 40000 - OPH_LV # 49999 are still in the orphanage. Dr. Brains tries to find out why, but he can’t. And that’s when all of the humans in the orphanage hear a gigantic laugh coming from nowhere.”HHHAAAHHHAAAAHAHHAAA! This is your day, Homo, your death ceremony!HHAAHAHA!”\u003c/p\u003e\n\u003cp\u003eMonths passed again……..\u003c/p\u003e\n\u003ch3 id=\"part-9\"\u003ePart 9\u003c/h3\u003e\n\u003cp\u003eThe daily live is almost the same as before, just that a part of the students in the orphanage is missing. But live is still very simple. Tasty food, friendly teachers, and visits to TOFB every other week.\u003c/p\u003e\n\u003cp\u003eOne day, James is in his math class.\u003c/p\u003e\n\u003cp\u003e“So when 2 is raised to the……”\u003c/p\u003e\n\u003cp\u003e“Beep! Beep!”\u003c/p\u003e\n\u003cp\u003eHis secret headphones from TOFB send a message request to him.\u003c/p\u003e\n\u003cp\u003e“Beep! Beep!”\u003c/p\u003e\n\u003cp\u003e“Didn’t that Ranboro guy say they won’t interrupt our classes?” James thought.\u003c/p\u003e\n\u003cp\u003e“And let’s do some prob….”\u003c/p\u003e\n\u003cp\u003e“Beep! Beep!”\u003c/p\u003e\n\u003cp\u003eJames requests a bathroom break and answers the headphones in the boys’ restroom.\u003c/p\u003e\n\u003cp\u003e“It’s an emergency!!! The Froakans are getting closer to the rattler!!! Help!!!! James, take Homo and get here now!” Norman cries.\u003c/p\u003e\n\u003cp\u003eAs fast as he can, he rushes to Dr. Brains’ office, grabs Dr. Brains and locks him and himself into the TOTB-0111.\u003c/p\u003e\n\u003cp\u003eAnd as fast as lightning, they are here, in the North town.\u003c/p\u003e\n\u003cp\u003eThey rush into the elevator, he swipes his and Dr. Brains’ card and rushes to Dr. Ranboro’s office.\u003c/p\u003e\n\u003cp\u003e“Quick! They will rattle it in like…like 20 minutes and we will all ratle!!!”, Norman hollers.\u003c/p\u003e\n\u003cp\u003eAnd again, as fast as lightning, they get war-dressed and get into the fastest transport system in TOFB.\u003c/p\u003e\n\u003cp\u003eAs James looks down, he is wearing a strong iron chest plate that reads ’T-O-F-B///////The Smarter one’. And on his shoulder, there is a cord which extends from his Digital ID to the chest plate. There is a screen in his chest plate that is unbreakable. There is a soft protection layer, then there is a swimming layer, then the pressure layer, an iron pad, an air supply on his side if the enemy spreads poisonous gas, and an armor on the outside.\u003c/p\u003e\n\u003cp\u003eAmazingly, these things only weigh 1 pound and fit perfectly.\u003c/p\u003e\n\u003cp\u003eHe is war-trained, so he knows exactly what to do with this fancy outfit. The screen is the main control, the outfit will detect the environment and change to the perfect layer.\u003c/p\u003e\n\u003cp\u003eUploaded ate 10/25/2015 \u003cem\u003e[sic.]\u003c/em\u003e\u003c/p\u003e\n\u003ch3 id=\"part-10\"\u003ePart 10\u003c/h3\u003e\n\u003cp\u003eThe ride seems to be long, but it’s actually only 5 minutes. They will enter the Ratle Mountains from the North End, which is the second-safest route into the mountains without Dr. Ranboro’s key.\u003c/p\u003e\n\u003cp\u003eAnd there they are, in the Ratle Mountains. They are led by Mr. Giose, who was the other guy in his dream when he came to the TOFB the first time. The other four warriors are Norman, Dr. Ranboro, Dr. Brains and James. The first 20 miles are short and boring. Nothing happens. But after the 29th mile mark, they enter a cave.\u003c/p\u003e\n\u003cp\u003eThe cave is dark. There are only few lights flashing. They are not worried, until they hear a scream.\u003c/p\u003e\n\u003cp\u003e“OOOOOOO! Eeeek!”\u003c/p\u003e\n\u003cp\u003e“Ahhhhhhhhhhhh! ZZZ! ZZZ…ZZZ..ZZZ…ZZZ…ZZZ…zzz…” The voice is getting smaller and smaller.\u003c/p\u003e\n\u003cp\u003e“It is the sleeping spider! It will knock out a human in NO time!” Mr. Giose shouts.\u003c/p\u003e\n\u003cp\u003eJames and the whole crew know what to do. They press a few buttons on their screens, and their helmets of their armor dissolve into the air. What is left behind, is the air filtering system.\u003c/p\u003e\n\u003cp\u003e“Three! Two! One! It’s gas,” Norman says, playfully.\u003c/p\u003e\n\u003cp\u003eDr. Brains spreads out the SSG gas, which will, hopefully, knock out the sleeping spider.\u003c/p\u003e\n\u003cp\u003eThat wastes a LOT of time. Before they know it, they all starts to ratle.\u003c/p\u003e\n\u003cp\u003eIt is James who feels it first. He feels extremely and uncontrollably happy. He starts running around and talking to other people in a rude way. To himself, it feels like as if he is drifting into unconsciousness.\u003c/p\u003e\n\u003cp\u003eThen the same happens to Dr. Brains, and then Norman, followed by Mr. Giose. Luckily, Dr. Ranboro called the TOFB’s team 2 to come for help before he changes, too.\u003c/p\u003e\n\u003cp\u003eI never knew what happened after this incident until the year of 2021. Since James was ratling, he couldn’t remember the whole year of 2020. He recovered on the day of 10/26/2021. Dr. Foster, who works at the orphanage AND at TOFB found a cure using Chinese Herbal Tea.\u003c/p\u003e\n\u003cp\u003eWell, let’s jump into the time machine. Backward to 2014!!\u003c/p\u003e\n\u003ch2 id=\"chapter-4-childhood\"\u003eChapter 4:Childhood\u003c/h2\u003e\n\u003cp\u003eWe jump into the time machine, and swoosh. Here we are, in the year of 2014. We are standing in front of 1623 Wesson Ave. It is a sunny day. The Peters are getting ready for a trip to Africa. James greets his uncle, who will look after the house while they are gone. Mrs. Peter is packing hastily. And Mr. Peter is bringing his computer, because, weirdly, he is starting to like CODING in BINARY CODE. Nothing more to say, so here the story goes.\u003c/p\u003e\n\u003ch3 id=\"part-11\"\u003ePart 11\u003c/h3\u003e\n\u003cp\u003e“Hurrrrrryyyy!” Mr. Peter shouts. “Or else we will be late for the plane!”\u003c/p\u003e\n\u003cp\u003eThe Peters hurry to the bus stand, waiting for the airport express.\u003c/p\u003e\n\u003cp\u003eAfter about an hour ride, they finally arrive at the San Francisco International Airport.\u003c/p\u003e\n\u003cp\u003eThey check in. And they hurry to the security check. At the security check, Mrs. Peter thinks she forgot something. Yes, she forgot to bring ANY medication for the disease Ebola.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnorman_an_epic_tale_in_n_parts/","tags":null,"title":"Norman: An Epic Tale in N Parts"},{"categories":null,"contents":"\u0026ldquo;Doing NSM analysis is a demanding process and there is no mechanical procedure for it. Published explications have often been through a dozen or more iterations over several months\u0026rdquo; \u0026mdash; (Heine, Narrog, and Goddard 2015)\nApproach and XD Introduction and Theory The Natural Semantic Metalanguage (NSM) approach (Wierzbicka 1974) is a long-standing hypothetical theory in structural semantics which claims that all human languages share a common set of primitive lexical units\u0026mdash;usually words, but, in some languages, short connected phrases\u0026mdash;through which all other words in each language can be defined.\nFor NSM to hold, two main results must be demonstrated. (Heine, Narrog, and Goddard 2015) The theory\u0026rsquo;s validity hinges, first, upon the existence of semantic primes\u0026mdash;a series of primitive lexical units both indefinable via other words in the same language but also is universally lexicalized across all languages. Second, the theory\u0026rsquo;s confirmation requires the ability to perform \u0026ldquo;reductive paraphrasing\u0026rdquo;, the process of defining all other words in a language with respect to the universal semantic primes\u0026rsquo; manifest in that language.\nIf proven as fact, the NSM theory and its implications has reaching implications into the long-standing (footnote: not to mention often personally fierce) conflict between the newer theories of generative semantics\u0026mdash;where structure of language is created in support of meaning\u0026mdash;and Noam Chomsky\u0026rsquo;s transformational generative syntax\u0026mdash;where meaning is filled to precomputed structure, which NSM suggests (Harris 2021).\nThe difficulty of forming adequate investigations in the area of NSM is due the theory itself being exceedingly hard to falsify\u0026mdash;the principle method through which NSM is demonstrated is via the manual (i.e. non-standardized) lexicalization of semantic primes and a partial demonstration of their relations (Geeraerts 2009) to other words in the language. Whenever one irregularity in the theory is identified (Bohnemeyer 1998), the proponents of the theory simply respond with another update to the (non standardized) set of reductive paraphrasing rules to account for the irregularity (NO_ITEM_DATA:goddard1998bad.)\nYet, there are repeated empirical (again, non-standardized) confirmations of the existence of the original set (Wierzbicka 1974) of semantic primes in other languages (Chappell 2002; Peeters 1994; Travis 2002); there are also numerous demonstrations of the proposed applications (Goddard 2012) of the theory in structural semantics. These facts has therefore maintained the relevance of NSM in current linguistic study but rendered the theory without a very clear path forward. Due to this reason, recent research has placed larger focus on functional (cognitive linguistical) theories (Divjak, Levshina, and Klavan 2016) and largely overlooked structuralist arguments like the NSM.\nBroad Goals and Approach To complement the very large body of work already in the identification of semantic primes for NSM in numerous languages, we aim in this project to investigate the process of reductive paraphrasing to provide a baseline evaluation of the feasibility of NSM as a theory. The approach proposed below is intended to very generally test the practicality of the act of reductive paraphrasing from the published set of primes: whether paraphrasing from those primes is even broadly possible across the entire lexicon of the few languages for which it is purported to be possible. This test remains needed because, quite counter-intuitively, metalanguage theorists have been constructing lexicalizations for non-prime words on an \u0026ldquo;as-needed\u0026rdquo; basis such as in (Wierzbicka 2007). No lexicon-wide demonstrations of lexicalizability has been performed (i.e. reductive paraphrasing all words down to the primes) as the current approach of manual definition of words from primes is significantly time-consuming and requires careful consideration of NSM\u0026rsquo;s semantic grammar between primes.\nWe aim perform a lexicon-wide test of reductive paraphrasing computationally via much newer approaches in computational linguistics, specifically model-based Natural Language Processing (NLP).\nIn order to isolate the exact problem of reductive paraphrasing, we first will have to highlight a few key assumptions by the NSM theory and therefore this project.\nThe semantic metalanguage theory is itself built on the assumption that \u0026ldquo;each language is its own metalanguage\u0026rdquo; (Goddard 2002)\u0026mdash;that human languages are broadly lexicalizable by itself (i.e. one can write an English dictionary by only using English.) We believe that the examination of this assumption is not within scope of the study and\u0026mdash;given it is fairly universally true from a practical standpoint (i.e. English dictionaries exist)\u0026mdash;we will take it as fact. We will use this fact further as the control for the feasibility of the approach, as discussed in the section below.\nThe remaining assumptions of NSM to be tested here, then, is that 1) semantic primes exist and 2) the original set of NSM primes published (Wierzbicka 1974) (and in subsequent studies in various other languages highlighted before) are correct and, through reductive paraphrase, can lexicalize every word in the lexicon.\nAims and Experimental Design In this study, we aim to develop a computational protocol for lexicon-wide testing of the possibility of performing reductive paraphrasing for every word in the lexicon given a set of purported semantic primes. Practically, this means that we are trying create a model to test whether all words in a language is lexicalizable when restricted to only using a chosen subset of primes in the same language.\nTo create a truly replicable test for lexicalizability under restriction, we turn to probabilistic NLP approaches. We propose the following metric for lexicalizability: a word is \u0026ldquo;lexicalizable\u0026rdquo; under some set of semantic primes if there is a lossless mapping between a linear combination of the primes\u0026rsquo; latent embeddings to the word in lexicon space.\nUnder this model, all words in the lexicon are lexicalizable by the set of primes being tested if there is a lossless projection of the bases of the lexical space to the primes\u0026rsquo; latent embedding space.\nThat is, given we have a latent embedding space of \\(n\\) semantic primes \\(P^n\\) and some lexicon \\(W\\) with \\(m\\) words, we aim to identify a linear mapping \\(M\\) such that:\n\\begin{equation} Mp = e_{W_j}\\ |\\ p \\in P^n, \\forall j=1\\ldots m \\end{equation}\nwhere, \\(e_{W_j}\\) is the \\(j\\) th standard basis of \\(W\\) (i.e. \\(j\\) th word in the lexicon.)\nThis projection is not, in principle, impossible. In the high-dimensional space of the entire lexicon, individual lexicalized words represent only the basis vectors of the space (and indeed in one-hot encodings for deep learning they are shown as the standard-basis of the lexicon-wide space.) Whereas in the lower-dimensional subspace of primes, a linear combination of primes can be used to represent each lexicalized word in the full lexicon.\nSuccess in identifying a feasible \\(M \\in \\mathcal{L}(P, W)\\) for a given \\(P\\) and \\(W\\) indicates the feasibility of finding a linear combination in \\(P\\) which maps to all \\(w \\in W\\), which means reductive paraphrase of \\(w\\) to a set of primes in \\(P\\) is possible as there is a direct \u0026ldquo;translation\u0026rdquo; (namely, \\(W\\)) from \\(P\\) to \\(W\\).\nTo actually compute \\(W\\) given \\(P\\) and \\(M\\), we leverage the well-established Transformer encoder-decoder architecture for language modeling (Vaswani et al. 2017). Furthermore, we frame the problem as one of unsupervised multi-lingual translation without alignments.\nThe basis of the model proposed to be used to obtain \\(W\\) is (Artetxe et al. 2018), a unsupervised multi-lingual translation model.\nFigure from (Artetxe et al. 2018).\nAs we are performing the task with word embeddings, not sentences like that of (Artetxe et al. 2018), the cross-attention lookup vector will serve no purpose (be \\(0\\)) (Niu, Zhong, and Yu 2021) and hence removed.\nFor the sake of standardization, we will call \\(P\\) the primary language/lexicon \\(L1\\), and \\(W\\) the second language/lexicon \\(L2\\). The basic hypothesis provided by (Artetxe et al. 2018) is that, through alternating samples of \\(L1\\) and \\(L2\\) through the model against their corresponding decoders using a shared encoder and separate decoders, the shared encoder is trained to perform the task of autoencoding for both lexicons at once. Therefore, at prediction time, to get the \u0026ldquo;translation\u0026rdquo; of an input, one simply applies the decoder of the desired lexicon to obtain a result.\nDuring training, the input to the shared encoder can either be a word from either \\(P\\) or $W$\u0026mdash;sampled with equal probability. If the input is from \\(P\\), we connect the output of the shared encoder with the \\(L1\\) decoder and train with the objective of recovering the input. Essentially, we are using the model as a alternate method of training a variational auto-encoder (Klys, Snell, and Zemel 2018) with alternating decoders given the lexicon being analyzed.\nThis task is trivial if the embedding space after the shared encoder is exactly as wide as both lexicon. However, we will restrict the output dimension of the shared encoder to \\(dim(P)\\) which after training we will call the latent embedding space of \\(L1\\); this name is verified and justified as a part of the feasibility check below.\nWe will also use the backtranslation mechanism proposed by (Artetxe et al. 2018) during training: whereby the autoencoded output from \\(L1\\) is used as target for the same input as \\(L2\\) (as well as the reverse), mimicking the process of translation.\nAfter training, the \\(L2\\) decoder would then be the candidate \\(W\\), mapping from the (proposed) latent embedding space of \\(P\\) to the lexicon \\(W\\).\nFollowing both (Artetxe et al. 2018; Conneau and Lample 2019) we will use cross-entropy as the objective function of training.\nFeasibility Checkpoint We first need to show that, as expected, the model architecture proposed above\u0026mdash;upon convergence\u0026mdash;will create a latent embedding for \\(L1\\) after encoding if the output size for encoding is \\(dim(L1)\\) (defined to be equal to \\(dim(P)\\)).\nA trivial test of whether the encoding output is desirably the embedding space of \\(L1\\) is that, through training with a toy mapping \\(P=W=L1=L2\\), we would expect both decoders to be an one-to-one mapping that simply copies the input. That is, after training with \\(P=W\\), we should see that activating one input in the shared post-encoding space should activate one or close to one feature only in both decoder\u0026rsquo;s output space.\nNumerically, this means that the result obtained from taking the mean entropy of both outputs given a singular input activation should be statistically insignificantly different from \\(0\\).\nThat is, we expect that given trained decoders \\(L_1\\) and \\(L_2\\), and standard bases of \\(W=P\\) named \\(e\\), we should see that:\n\\begin{equation} \\frac{\\log(L_1e_j) + \\log(L_2e_j)}{2} \\approx 0: \\forall j = 1\\ldots dim(W) \\end{equation}\nWe expect this result because, through gradient-descent, the quickest minima reachable to capture variation in the input perfectly is the copying task; therefore, we should expect here that if the post-encoding distribution is the same distribution as the input, the model\u0026rsquo;s decoders will fit to the copying task. If the post-encoding distribution is different from the input, the model\u0026rsquo;s decoders would then have to actually perform nontrivial mappings to achieve the desired autoencoding result.\nCheckpoint 2 + Hypothesis 1 The following is the first novel result that we can show with the new architecture. We first hypothesize that the model should converge when training to the target of the (already linguistically accepted, as aforementioned) result that English words are themselves a metalanguage.\nFor \\(dim(W)\\) iterations (similar to (Webb et al. 2011)), we will leave a word chosen at random out of the lexicon of \\(P\\). This operation results in \\(dim(P) = dim(W)-1\\). We will then train the model until a local minima is reached and measure convergence.\nTo test this hypothesis, we will measure the cross-entropy performance of \\(L2\\) decoder upon the word that is left out. The resulting loss should be statistically insignificantly different from \\(0\\) if the word is successfully lexicalized via the \\(dim(W)-1\\) other words not left out in \\(P\\) in the latent embedding space after encoding.\nIf the hypothesis is not successful, the model cannot converge even on a large subset of the entire lexicon, much less in the limited subset of the 60-word NSM-proposed metalanguage; it is therefore imperative not to continue the study unless convergence at this point can be shown. Importantly, however, failures in this step does not show any claims about reductive paraphrasing as we are simply benchmarking the model against a control linguistic assumption we discussed earlier.\nIn any case, it would be valuable at this point to again perform analyze for post-encoding output to observe any reductive paraphrasing behavior.\nHypothesis 2 At this point, we will set the lexicons to the sets we are actually testing. We will set \\(P\\) to be the list of semantic primes established by (Heine, Narrog, and Goddard 2015), and \\(W\\) to the English lexicon.\nShould lexicalization of all of the English lexicon via the semantic primes only be possible, this model should again converge after training with cross-entropy inappreciably different from \\(0\\). This result would indicate the existence of a \\(W\\) (i.e. \\(L2\\) decoder), indicating the possibility of lexicon-wide reductive paraphrasing.\nInstitution and Experience The actual protocol proposed as a part of this study (namely, creating, training, and calculating metrics from the autoencoder) is a technical concept taught as a part of the regular curriculum of Advanced Machine Learning at Nueva; however, expertise and mentorship may still be required when implementing a complex model topology and training mechanism like the one proposed. The open-ended project structure of the Advanced Machine Learning course supports and sometimes necessitate implementing a model like the one proposed with the help of the CS faculty. Therefore, if additional mentorship is indeed required, there exists support available within the institution.\nThe more difficult skill-set to capture is the knowledge regarding the theories of NSM and the field of structuralist linguistics in general. As of writing, we are not aware of any students which has an active research interest in traditional linguistics; however, this knowledge constitute a far more insignificant portion of the actual mechanics of the project and is more importantly very easily taught. Mentorship is also available here from members of the Mathematics and CS faculty with prior research interest in computational linguistics.\nIn terms of equipment, the most important tool required in working with a large-scale neural network is a matrix-mathematics accelerator; this often takes the form of a consumer graphics card and typical desktop computing setup. For the Machine Learning course taught at Nueva, Google\u0026rsquo;s Colab (and their free graphics card addition) is frequently used to address this need and would at a minimum suffice here. Also, it is based on the personal experience of the author, though by no means definite, that a large selection of students at Nueva has comparable hardware for training available at home.\nProvided network access to the computing accelerator, this experiment can be done under any setting and definitely does not necessitate the use of the biology lab.\nImpact Academic Significance Within the short term, this experiment provides two major results. First, it establishes the use of a bifercated unsupervised encoder-decoder translation model like that proposed by (Artetxe et al. 2018) as a Conditional Variational Autoencoder (CVAE) (Klys, Snell, and Zemel 2018) with the ability to define and train the hidden latent representation after encoding. Although traditional CVAEs are frequently more suited for most output-aware generation tasks, this new scheme supports the direct influence of the latent representations of the encoder instead of using an additional input to both the encoder and decoder to influence such representations, like in traditional CVAEs. This difference is significant for as it creates the where dimensional projection is needed but the content of the latent representation itself is also relevant to the study.\nOf course, the short-term result also includes the direct result of the second tested hypothesis: a systemic, lexicon-wide evaluation of the feasibility of reductive paraphrasing. The study is to develop a computational protocol for lexicon-wide reductive paraphrasing by creating a lossless mapping between a linear combination of the primes\u0026rsquo; latent embeddings to the word in lexicon space.\nIf both initial metrics succeeds and the third, final reduction step with actual semantic primes fail, the result would indicate an inability to create such a lossless mapping, and therefore raise concerns about the lexicon-wide applicability of the reductive paraphrasing on the set of published semantic primes. That, there is not even a locally convergent linear combination of primes that will generally describe all of the lexicon, despite the hypothesis by NSM theorists. This result will be highly impactful for NSM theory in general which necessitates the possibilty of reductive paraphrase (Geeraerts 2009) (Vanhatalo, Tissari, and Idström, n.d.).\nOn the long term, demonstrations of reductive paraphrasing has wide-reaching implications into NSM theory is general (Heine, Narrog, and Goddard 2015; Geeraerts 2009), and the field of language learning. The paraphrasing capacity of the proposed embedding would hypothetically be able to create a semantic mapping between a set of words to one other word; in this way, it is not infeasible to create a language-learning tool with continually larger embedding size to slowly create a larger lexicon in the target user. Early results (Sharma and Goyal 2021) have shown a possible application of such an approach, using supervised machine translation techniques.\nLearning and Teaching One to two students, along with a facilitator, would be an ideal size for this experiment. Primarily, the three main roles will include model engineering, training and validation, and model ablation and testing. The last role requires the most amount of traditional linguistics knowledge as the student\u0026rsquo;s role would be to connect the weights in the model to the applicable theories being tested.\nThe study proposed is an extremely conventional empirical Machine Learning/NLP study. From a pedagogical standpoint for XRT, this study will be a diversion from the traditional wet-lab sciences or survey-based educational/social sciences commonly produced by the lab and lead a new avenue for the Lab\u0026rsquo;s expansion. Within Nueva, empirical research into machine learning is frequently done through independent study or the Intro/Advance machine learning courses\u0026mdash;which were recently expanded due to widening interest at the Upper School.\nParticipation in this project provides its constituent students an opportunity to practice publish-quality ML/NLP in a longer-term and multi-stage project previously not possible through semester-long courses. Students are trained to perform model construction, data selection and cleaning, collection of model validation metrics, as well as model ablation and interpretation: important concepts in ML operations taught but not formalized in the Machine Learning course as the course exercises, while open-ended, isolate only one skill and have expected outcomes.\nGiven the demand and rate of student progression between Intro/Advanced courses in ML each year, developing a suitable approach to propagate true machine-learning research will be relevant to upwards of 30 students each year.\nIncidentally, students also get an exposure to the practice of conventional linguistics and the new trend of applying empirical research NLP back against classic semantics; however, the demand for this exact skill is likely small at Nueva.\nThough the tool used and expanded upon by this experiment is applicable to the NLP research community, it is unfortunately difficult to predict its future applications to XRT or Nueva students without seeing more expansion into the area of ML and NLP by the XRT lab.\nSafety and Ethics The following are the responses to the safety and ethics checklist.\nThis project does not satisfy any triggers of the second-expert protocol. All data needed is from a dictionary (for the English lexicon, e.g. (Fellbaum 2010)) as well as the semantic primes listed in a figure on the article (Heine, Narrog, and Goddard 2015). The data is being generated during compute. The actual compute hardware will need to be stored in either in the cloud (not on-prem), physically in the iLab, or (for personal compute hardware), in students\u0026rsquo; homes. An internet connection and a model training acceleration scheme (such as the free Google Colab) would suffice. None foreseeable See below The experiment is done on the English lexicon. It is difficult to imagine a tangible harm from the experiment. This study provides students with an opportunity to conduct a full research study in ML; XRT has not had this from of projects before and approval would result in a new avenue of research being conducted with XRT. However, if the project is not approved, other ML projects may subsequently surface and students can leverage those opportunities to learn about the practice of empirical ML instead. As with most machine-learning projects, it is customary and appropriate to end with a statement on ML ethics and its implications. This study is a linguistics, lexicon-scale study, and the data sourced is available generally and not subject to copyright or any known data-protection laws. The inputs to the model are combinations of English words, and the model produces singular English words. The benefits of this model involves generating new knowledge about the English lexicon and semantic theories. The only known harm of the model involves the mis-intepretation of its results, creating overreaching generalizations to semantic primality analysis or NSM theories. The model and source code can be released to the general public without broad impact.\nAcknowledgments I would like to thank Brandon Cho at Princeton University and Ted Theodosopoulos at The Nueva School for the very interesting discussion/argument that resulted in this proposal almost a year ago. I would like to thank Klint Kanopka at Stanford University for his mentorship and discussion of the overall feasibility of the approach and pointing out the path that lead to the proposed model\u0026rsquo;s basis in machine translation. Finally, I would like to thank Prof. Brian MacWhinney at Carnegie Mellon University for pointing out discourse between structuralism/functionalism during our exchanges and for his mentorship in my exploration of computational linguistics.\nReferences Artetxe, Mikel, Gorka Labaka, Eneko Agirre, and Kyunghyun Cho. 2018. “Unsupervised Neural Machine Translation,” 12. Bohnemeyer, Jurgen. 1998. “Temporal Reference from a Radical Pragmatics Perspective: Why Yucatec Does Not Need to Express ’after’ and ’before’.” Walter de Gruyter, Berlin/New York Berlin, New York. Chappell, Hilary. 2002. “5. The Universal Syntax of Semantic Primes in Mandarin Chinese.” In Studies in Language Companion Series, 243–322. Studies in Language Companion Series. John Benjamins Publishing Company. doi:10.1075/slcs.60.12cha. Conneau, Alexis, and Guillaume Lample. 2019. “Cross-Lingual Language Model Pretraining,” 11. Divjak, Dagmar, Natalia Levshina, and Jane Klavan. 2016. Cognitive Linguistics 27 (4): 447–63. doi:doi:10.1515/cog-2016-0095. Fellbaum, Christiane. 2010. “Wordnet.” In Theory and Applications of Ontology: Computer Applications, 231–43. Springer. Geeraerts, Dirk. 2009. “Neostructuralist Semantics.” In Theories of Lexical Semantics, 124–78. Theories of Lexical Semantics. Oxford University Press. doi:10.1093/acprof:oso/9780198700302.003.0004. Goddard, Cliff. 2002. “The Search for the Shared Semantic Core of All Languages.” In Meaning and Universal Grammar: Theory and Empirical Findings. John Benjamins Publishing Company. ———. 2012. “Semantic Primes, Semantic Molecules, Semantic Templates: Key Concepts in the NSM Approach to Lexical Typology.” Linguistics 50 (3). doi:10.1515/ling-2012-0022. Harris, Randy Allen. 2021. The Linguistics Wars: Chomsky, Lakoff, and the Battle over Deep Structure. Oxford University Press. Heine, Bernd, Heiko Narrog, and Cliff Goddard. 2015. “The Natural Semantic Metalanguage Approach.” In The Oxford Handbook of Linguistic Analysis, edited by Bernd Heine and Heiko Narrog. Oxford University Press. doi:10.1093/oxfordhb/9780199677078.013.0018. Klys, Jack, Jake Snell, and Richard Zemel. 2018. “Learning Latent Subspaces in Variational Autoencoders,” 11. Niu, Zhaoyang, Guoqiang Zhong, and Hui Yu. 2021. “A Review on the Attention Mechanism of Deep Learning.” Neurocomputing 452 (September): 48–62. doi:10.1016/j.neucom.2021.03.091. Peeters, Bert. 1994. “16 Semantic and Lexical Universals in French.” In Studies in Language Companion Series, 423. Studies in Language Companion Series. John Benjamins Publishing Company. doi:10.1075/slcs.25.20pee. Sharma, Prawaal, and Navneet Goyal. 2021. “Zero-Shot Reductive Paraphrasing for Digitally Semi-Literate.” In Forum for Information Retrieval Evaluation, 91–98. Travis, Catherine E. 2002. “4. La Metalengua Semántica Natural.” In Studies in Language Companion Series, 173–242. Studies in Language Companion Series. John Benjamins Publishing Company. doi:10.1075/slcs.60.11tra. Vanhatalo, Ulla, Heli Tissari, and Anna Idström. n.d. “Revisiting the Universality of Natural Semantic Metalanguage: A View through Finnish,” 28. Vaswani, Ashish, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Łukasz Kaiser, and Illia Polosukhin. 2017. “Attention Is All You Need,” 11. Webb, Geoffrey I., Claude Sammut, Claudia Perlich, Tamás Horváth, Stefan Wrobel, Kevin B. Korb, William Stafford Noble, et al. 2011. “Leave-One-Out Cross-Validation.” In Encyclopedia of Machine Learning, edited by Claude Sammut and Geoffrey I. Webb, 600–601. Boston, MA: Springer US. doi:10.1007/978-0-387-30164-8_469. Wierzbicka, Anna. 1974. “Semantic Primitives.” Lingua 34 (4): 365–69. doi:10.1016/0024-3841(74)90004-7. ———. 2007. “Bodies and Their Parts: An NSM Approach to Semantic Typology.” Language Sciences 29 (1): 14–65. doi:10.1016/j.langsci.2006.07.002. NO_ITEM_DATA:goddard1998bad. ","html":"\u003cp\u003e\u0026ldquo;Doing NSM analysis is a demanding process and there is no mechanical procedure for it. Published explications have often been through a dozen or more iterations over several months\u0026rdquo; \u0026mdash; (\u003ca href=\"#citeproc_bib_item_11\"\u003eHeine, Narrog, and Goddard 2015\u003c/a\u003e)\u003c/p\u003e\n\u003ch2 id=\"approach-and-xd\"\u003eApproach and XD\u003c/h2\u003e\n\u003ch3 id=\"introduction-and-theory\"\u003eIntroduction and Theory\u003c/h3\u003e\n\u003cp\u003eThe Natural Semantic Metalanguage (NSM) approach (\u003ca href=\"#citeproc_bib_item_20\"\u003eWierzbicka 1974\u003c/a\u003e) is a long-standing hypothetical theory in structural semantics which claims that all human languages share a common set of primitive lexical units\u0026mdash;usually words, but, in some languages, short connected phrases\u0026mdash;through which all other words in each language can be defined.\u003c/p\u003e\n\u003cp\u003eFor NSM to hold, two main results must be demonstrated. (\u003ca href=\"#citeproc_bib_item_11\"\u003eHeine, Narrog, and Goddard 2015\u003c/a\u003e) The theory\u0026rsquo;s validity hinges, first, upon the \u003cem\u003eexistence\u003c/em\u003e of semantic primes\u0026mdash;a series of primitive lexical units both indefinable via other words in the same language but also is universally lexicalized across all languages. Second, the theory\u0026rsquo;s confirmation requires the ability to perform \u0026ldquo;reductive paraphrasing\u0026rdquo;, the process of defining all other words in a language with respect to the universal semantic primes\u0026rsquo; manifest in that language.\u003c/p\u003e\n\u003cp\u003eIf proven as fact, the NSM theory and its implications has reaching implications into the long-standing (footnote: not to mention often personally fierce) conflict between the newer theories of generative semantics\u0026mdash;where structure of language is created in support of meaning\u0026mdash;and Noam Chomsky\u0026rsquo;s transformational generative syntax\u0026mdash;where meaning is filled to precomputed structure, which NSM suggests (\u003ca href=\"#citeproc_bib_item_10\"\u003eHarris 2021\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eThe difficulty of forming adequate investigations in the area of NSM is due the theory itself being exceedingly hard to falsify\u0026mdash;the principle method through which NSM is demonstrated is via the manual (i.e. non-standardized) lexicalization of semantic primes and a partial demonstration of their relations (\u003ca href=\"#citeproc_bib_item_7\"\u003eGeeraerts 2009\u003c/a\u003e) to other words in the language. Whenever one irregularity in the theory is identified (\u003ca href=\"#citeproc_bib_item_2\"\u003eBohnemeyer 1998\u003c/a\u003e), the proponents of the theory simply respond with another update to the (non standardized) set of reductive paraphrasing rules to account for the irregularity (NO_ITEM_DATA:goddard1998bad.)\u003c/p\u003e\n\u003cp\u003eYet, there are repeated empirical (again, non-standardized) confirmations of the existence of the original set (\u003ca href=\"#citeproc_bib_item_20\"\u003eWierzbicka 1974\u003c/a\u003e) of semantic primes in other languages (\u003ca href=\"#citeproc_bib_item_3\"\u003eChappell 2002\u003c/a\u003e; \u003ca href=\"#citeproc_bib_item_14\"\u003ePeeters 1994\u003c/a\u003e; \u003ca href=\"#citeproc_bib_item_16\"\u003eTravis 2002\u003c/a\u003e); there are also numerous demonstrations of the proposed applications (\u003ca href=\"#citeproc_bib_item_9\"\u003eGoddard 2012\u003c/a\u003e) of the theory in structural semantics. These facts has therefore maintained the relevance of NSM in current linguistic study but rendered the theory without a very clear path forward. Due to this reason, recent research has placed larger focus on functional (cognitive linguistical) theories (\u003ca href=\"#citeproc_bib_item_5\"\u003eDivjak, Levshina, and Klavan 2016\u003c/a\u003e) and largely overlooked structuralist arguments like the NSM.\u003c/p\u003e\n\u003ch3 id=\"broad-goals-and-approach\"\u003eBroad Goals and Approach\u003c/h3\u003e\n\u003cp\u003eTo complement the very large body of work already in the identification of semantic primes for NSM in numerous languages, we aim in this project to investigate the process of reductive paraphrasing to provide a baseline evaluation of the feasibility of NSM as a theory. The approach proposed below is intended to very generally test the practicality of the act of reductive paraphrasing from the published set of primes: whether paraphrasing from those primes is even broadly possible across the entire lexicon of the few languages for which it is purported to be possible. This test remains needed because, quite counter-intuitively, metalanguage theorists have been constructing lexicalizations for non-prime words on an \u0026ldquo;as-needed\u0026rdquo; basis such as in (\u003ca href=\"#citeproc_bib_item_21\"\u003eWierzbicka 2007\u003c/a\u003e). No lexicon-wide demonstrations of lexicalizability has been performed (i.e. reductive paraphrasing all words down to the primes) as the current approach of manual definition of words from primes is significantly time-consuming and requires careful consideration of NSM\u0026rsquo;s semantic grammar between primes.\u003c/p\u003e\n\u003cp\u003eWe aim perform a lexicon-wide test of reductive paraphrasing computationally via \u003cem\u003emuch\u003c/em\u003e newer approaches in computational linguistics, specifically model-based Natural Language Processing (NLP).\u003c/p\u003e\n\u003cp\u003eIn order to isolate the exact problem of reductive paraphrasing, we first will have to highlight a few key assumptions by the NSM theory and therefore this project.\u003c/p\u003e\n\u003cp\u003eThe semantic metalanguage theory is itself built on the assumption that \u0026ldquo;each language is its own metalanguage\u0026rdquo; (\u003ca href=\"#citeproc_bib_item_8\"\u003eGoddard 2002\u003c/a\u003e)\u0026mdash;that human languages are broadly lexicalizable by itself (i.e. one can write an English dictionary by only using English.) We believe that the examination of this assumption is not within scope of the study and\u0026mdash;given it is fairly universally true from a practical standpoint (i.e. English dictionaries exist)\u0026mdash;we will take it as fact. We will use this fact further as the control for the feasibility of the approach, as discussed in the section below.\u003c/p\u003e\n\u003cp\u003eThe remaining assumptions of NSM to be tested here, then, is that 1) semantic primes exist and 2) the original set of NSM primes published (\u003ca href=\"#citeproc_bib_item_20\"\u003eWierzbicka 1974\u003c/a\u003e) (and in subsequent studies in various other languages highlighted before) are correct and, through reductive paraphrase, can lexicalize every word in the lexicon.\u003c/p\u003e\n\u003ch3 id=\"aims-and-experimental-design\"\u003eAims and Experimental Design\u003c/h3\u003e\n\u003cp\u003eIn this study, we aim to develop a computational protocol for lexicon-wide testing of the possibility of performing reductive paraphrasing for every word in the lexicon given a set of purported semantic primes. Practically, this means that we are trying create a model to test whether all words in a language is lexicalizable when restricted to only using a chosen subset of primes in the same language.\u003c/p\u003e\n\u003cp\u003eTo create a truly replicable test for lexicalizability under restriction, we turn to probabilistic NLP approaches. We propose the following metric for lexicalizability: a word is \u0026ldquo;lexicalizable\u0026rdquo; under some set of semantic primes if there is a lossless mapping between a linear combination of the primes\u0026rsquo; latent embeddings to the word in lexicon space.\u003c/p\u003e\n\u003cp\u003eUnder this model, all words in the lexicon are lexicalizable by the set of primes being tested if there is a lossless projection of the bases of the lexical space to the primes\u0026rsquo; latent embedding space.\u003c/p\u003e\n\u003cp\u003eThat is, given we have a latent embedding space of \\(n\\) semantic primes \\(P^n\\) and some lexicon \\(W\\) with \\(m\\) words, we aim to identify a linear mapping \\(M\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nMp = e_{W_j}\\ |\\ p \\in P^n, \\forall j=1\\ldots m\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(e_{W_j}\\) is the \\(j\\) th standard basis of \\(W\\) (i.e. \\(j\\) th word in the lexicon.)\u003c/p\u003e\n\u003cp\u003eThis projection is not, in principle, impossible. In the high-dimensional space of the entire lexicon, individual lexicalized words represent only the basis vectors of the space (and indeed in one-hot encodings for deep learning they are shown as the standard-basis of the lexicon-wide space.) Whereas in the lower-dimensional subspace of primes, a linear combination of primes can be used to represent each lexicalized word in the full lexicon.\u003c/p\u003e\n\u003cp\u003eSuccess in identifying a feasible \\(M \\in \\mathcal{L}(P, W)\\) for a given \\(P\\) and \\(W\\) indicates the feasibility of finding a linear combination in \\(P\\) which maps to all \\(w \\in W\\), which means reductive paraphrase of \\(w\\) to a set of primes in \\(P\\) is possible as there is a direct \u0026ldquo;translation\u0026rdquo; (namely, \\(W\\)) from \\(P\\) to \\(W\\).\u003c/p\u003e\n\u003cp\u003eTo actually compute \\(W\\) given \\(P\\) and \\(M\\), we leverage the well-established Transformer encoder-decoder architecture for language modeling (\u003ca href=\"#citeproc_bib_item_18\"\u003eVaswani et al. 2017\u003c/a\u003e). Furthermore, we frame the problem as one of unsupervised multi-lingual translation without alignments.\u003c/p\u003e\n\u003cp\u003eThe basis of the model proposed to be used to obtain \\(W\\) is (\u003ca href=\"#citeproc_bib_item_1\"\u003eArtetxe et al. 2018\u003c/a\u003e), a unsupervised multi-lingual translation model.\u003c/p\u003e\n\u003cp\u003e\u003cimg src=\"/ox-hugo/2022-08-28_20-26-43_screenshot.png\" alt=\"\"\u003e\nFigure from (\u003ca href=\"#citeproc_bib_item_1\"\u003eArtetxe et al. 2018\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eAs we are performing the task with word embeddings, not sentences like that of (\u003ca href=\"#citeproc_bib_item_1\"\u003eArtetxe et al. 2018\u003c/a\u003e), the cross-attention lookup vector will serve no purpose (be \\(0\\)) (\u003ca href=\"#citeproc_bib_item_13\"\u003eNiu, Zhong, and Yu 2021\u003c/a\u003e) and hence removed.\u003c/p\u003e\n\u003cp\u003eFor the sake of standardization, we will call \\(P\\) the primary language/lexicon \\(L1\\), and \\(W\\) the second language/lexicon \\(L2\\). The basic hypothesis provided by (\u003ca href=\"#citeproc_bib_item_1\"\u003eArtetxe et al. 2018\u003c/a\u003e) is that, through alternating samples of \\(L1\\) and \\(L2\\) through the model against their corresponding decoders using a shared encoder and separate decoders, the shared encoder is trained to perform the task of autoencoding for both lexicons at once. Therefore, at prediction time, to get the \u0026ldquo;translation\u0026rdquo; of an input, one simply applies the decoder of the desired lexicon to obtain a result.\u003c/p\u003e\n\u003cp\u003eDuring training, the input to the shared encoder can either be a word from either \\(P\\) or $W$\u0026mdash;sampled with equal probability. If the input is from \\(P\\), we connect the output of the shared encoder with the \\(L1\\) decoder and train with the objective of recovering the input. Essentially, we are using the model as a alternate method of training a variational auto-encoder (\u003ca href=\"#citeproc_bib_item_12\"\u003eKlys, Snell, and Zemel 2018\u003c/a\u003e) with alternating decoders given the lexicon being analyzed.\u003c/p\u003e\n\u003cp\u003eThis task is trivial if the embedding space after the shared encoder is exactly as wide as both lexicon. However, we will restrict the output dimension of the shared encoder to \\(dim(P)\\) which after training we will call the latent embedding space of \\(L1\\); this name is verified and justified as a part of the feasibility check below.\u003c/p\u003e\n\u003cp\u003eWe will also use the backtranslation mechanism proposed by (\u003ca href=\"#citeproc_bib_item_1\"\u003eArtetxe et al. 2018\u003c/a\u003e) during training: whereby the autoencoded output from \\(L1\\) is used as target for the same input as \\(L2\\) (as well as the reverse), mimicking the process of translation.\u003c/p\u003e\n\u003cp\u003eAfter training, the \\(L2\\) decoder would then be the candidate \\(W\\), mapping from the (proposed) latent embedding space of \\(P\\) to the lexicon \\(W\\).\u003c/p\u003e\n\u003cp\u003eFollowing both (\u003ca href=\"#citeproc_bib_item_1\"\u003eArtetxe et al. 2018\u003c/a\u003e; \u003ca href=\"#citeproc_bib_item_4\"\u003eConneau and Lample 2019\u003c/a\u003e) we will use cross-entropy as the objective function of training.\u003c/p\u003e\n\u003ch4 id=\"feasibility-checkpoint\"\u003eFeasibility Checkpoint\u003c/h4\u003e\n\u003cp\u003eWe first need to show that, as expected, the model architecture proposed above\u0026mdash;upon convergence\u0026mdash;will create a latent embedding for \\(L1\\) after encoding if the output size for encoding is \\(dim(L1)\\) (defined to be equal to \\(dim(P)\\)).\u003c/p\u003e\n\u003cp\u003eA trivial test of whether the encoding output is desirably the embedding space of \\(L1\\) is that, through training with a toy mapping \\(P=W=L1=L2\\), we would expect both decoders to be an one-to-one mapping that simply copies the input. That is, after training with \\(P=W\\), we should see that activating one input in the shared post-encoding space should activate one or close to one feature only in both decoder\u0026rsquo;s output space.\u003c/p\u003e\n\u003cp\u003eNumerically, this means that the result obtained from taking the mean entropy of both outputs given a singular input activation should be statistically insignificantly different from \\(0\\).\u003c/p\u003e\n\u003cp\u003eThat is, we expect that given trained decoders \\(L_1\\) and \\(L_2\\), and standard bases of \\(W=P\\) named \\(e\\), we should see that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\log(L_1e_j) + \\log(L_2e_j)}{2} \\approx 0: \\forall j = 1\\ldots dim(W)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe expect this result because, through gradient-descent, the quickest minima reachable to capture variation in the input perfectly is the copying task; therefore, we should expect here that if the post-encoding distribution is the same distribution as the input, the model\u0026rsquo;s decoders will fit to the copying task. If the post-encoding distribution is different from the input, the model\u0026rsquo;s decoders would then have to actually perform nontrivial mappings to achieve the desired autoencoding result.\u003c/p\u003e\n\u003ch4 id=\"checkpoint-2-plus-hypothesis-1\"\u003eCheckpoint 2 + Hypothesis 1\u003c/h4\u003e\n\u003cp\u003eThe following is the first novel result that we can show with the new architecture. We first hypothesize that the model should converge when training to the target of the (already linguistically accepted, as aforementioned) result that English words are themselves a metalanguage.\u003c/p\u003e\n\u003cp\u003eFor \\(dim(W)\\) iterations (similar to (\u003ca href=\"#citeproc_bib_item_19\"\u003eWebb et al. 2011\u003c/a\u003e)), we will leave a word chosen at random out of the lexicon of \\(P\\). This operation results in \\(dim(P) = dim(W)-1\\). We will then train the model until a local minima is reached and measure convergence.\u003c/p\u003e\n\u003cp\u003eTo test this hypothesis, we will measure the cross-entropy performance of \\(L2\\) decoder upon the word that is left out. The resulting loss should be statistically insignificantly different from \\(0\\) if the word is successfully lexicalized via the \\(dim(W)-1\\) other words not left out in \\(P\\) in the latent embedding space after encoding.\u003c/p\u003e\n\u003cp\u003eIf the hypothesis is not successful, the model cannot converge even on a large subset of the entire lexicon, much less in the limited subset of the 60-word NSM-proposed metalanguage; it is therefore imperative not to continue the study unless convergence at this point can be shown. Importantly, however, failures in this step does \u003cem\u003enot\u003c/em\u003e show any claims about reductive paraphrasing as we are simply benchmarking the model against a control linguistic assumption we discussed earlier.\u003c/p\u003e\n\u003cp\u003eIn any case, it would be valuable at this point to again perform analyze for post-encoding output to observe any reductive paraphrasing behavior.\u003c/p\u003e\n\u003ch4 id=\"hypothesis-2\"\u003eHypothesis 2\u003c/h4\u003e\n\u003cp\u003eAt this point, we will set the lexicons to the sets we are actually testing. We will set \\(P\\) to be the list of semantic primes established by (\u003ca href=\"#citeproc_bib_item_11\"\u003eHeine, Narrog, and Goddard 2015\u003c/a\u003e), and \\(W\\) to the English lexicon.\u003c/p\u003e\n\u003cp\u003eShould lexicalization of all of the English lexicon via the semantic primes only be possible, this model should again converge after training with cross-entropy inappreciably different from \\(0\\). This result would indicate the existence of a \\(W\\) (i.e. \\(L2\\) decoder), indicating the possibility of lexicon-wide reductive paraphrasing.\u003c/p\u003e\n\u003ch2 id=\"institution-and-experience\"\u003eInstitution and Experience\u003c/h2\u003e\n\u003cp\u003eThe actual protocol proposed as a part of this study (namely, creating, training, and calculating metrics from the autoencoder) is a technical concept taught as a part of the regular curriculum of Advanced Machine Learning at Nueva; however, expertise and mentorship may still be required when implementing a complex model topology and training mechanism like the one proposed. The open-ended project structure of the Advanced Machine Learning course supports and sometimes necessitate implementing a model like the one proposed with the help of the CS faculty. Therefore, if additional mentorship is indeed required, there exists support available within the institution.\u003c/p\u003e\n\u003cp\u003eThe more difficult skill-set to capture is the knowledge regarding the theories of NSM and the field of structuralist linguistics in general. As of writing, we are not aware of any students which has an active research interest in traditional linguistics; however, this knowledge constitute a far more insignificant portion of the actual mechanics of the project and is more importantly very easily taught. Mentorship is also available here from members of the Mathematics and CS faculty with prior research interest in computational linguistics.\u003c/p\u003e\n\u003cp\u003eIn terms of equipment, the most important tool required in working with a large-scale neural network is a matrix-mathematics accelerator; this often takes the form of a consumer graphics card and typical desktop computing setup. For the Machine Learning course taught at Nueva, Google\u0026rsquo;s Colab (and their free graphics card addition) is frequently used to address this need and would at a minimum suffice here. Also, it is based on the personal experience of the author, though by no means definite, that a large selection of students at Nueva has comparable hardware for training available at home.\u003c/p\u003e\n\u003cp\u003eProvided network access to the computing accelerator, this experiment can be done under any setting and definitely does not necessitate the use of the biology lab.\u003c/p\u003e\n\u003ch2 id=\"impact\"\u003eImpact\u003c/h2\u003e\n\u003ch3 id=\"academic-significance\"\u003eAcademic Significance\u003c/h3\u003e\n\u003cp\u003eWithin the short term, this experiment provides two major results. First, it establishes the use of a bifercated unsupervised encoder-decoder translation model like that proposed by (\u003ca href=\"#citeproc_bib_item_1\"\u003eArtetxe et al. 2018\u003c/a\u003e) as a Conditional Variational Autoencoder (CVAE) (\u003ca href=\"#citeproc_bib_item_12\"\u003eKlys, Snell, and Zemel 2018\u003c/a\u003e) with the ability to define and train the hidden latent representation after encoding. Although traditional CVAEs are frequently more suited for most output-aware generation tasks, this new scheme supports the direct influence of the latent representations of the encoder instead of using an additional input to both the encoder and decoder to influence such representations, like in traditional CVAEs. This difference is significant for as it creates the where dimensional projection is needed but the content of the latent representation itself is also relevant to the study.\u003c/p\u003e\n\u003cp\u003eOf course, the short-term result also includes the direct result of the second tested hypothesis: a systemic, lexicon-wide evaluation of the feasibility of reductive paraphrasing. The study is to develop a computational protocol for lexicon-wide reductive paraphrasing by creating a lossless mapping between a linear combination of the primes\u0026rsquo; latent embeddings to the word in lexicon space.\u003c/p\u003e\n\u003cp\u003eIf both initial metrics succeeds and the third, final reduction step with actual semantic primes fail, the result would indicate an inability to create such a lossless mapping, and therefore raise concerns about the lexicon-wide applicability of the reductive paraphrasing on the set of published semantic primes. That, there is not even a locally convergent linear combination of primes that will generally describe all of the lexicon, despite the hypothesis by NSM theorists. This result will be highly impactful for NSM theory in general which necessitates the possibilty of reductive paraphrase (\u003ca href=\"#citeproc_bib_item_7\"\u003eGeeraerts 2009\u003c/a\u003e) (\u003ca href=\"#citeproc_bib_item_17\"\u003eVanhatalo, Tissari, and Idström, n.d.\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eOn the long term, demonstrations of reductive paraphrasing has wide-reaching implications into NSM theory is general (\u003ca href=\"#citeproc_bib_item_11\"\u003eHeine, Narrog, and Goddard 2015\u003c/a\u003e; \u003ca href=\"#citeproc_bib_item_7\"\u003eGeeraerts 2009\u003c/a\u003e), and the field of language learning. The paraphrasing capacity of the proposed embedding would hypothetically be able to create a semantic mapping between a set of words to one other word; in this way, it is not infeasible to create a language-learning tool with continually larger embedding size to slowly create a larger lexicon in the target user. Early results (\u003ca href=\"#citeproc_bib_item_15\"\u003eSharma and Goyal 2021\u003c/a\u003e) have shown a possible application of such an approach, using supervised machine translation techniques.\u003c/p\u003e\n\u003ch3 id=\"learning-and-teaching\"\u003eLearning and Teaching\u003c/h3\u003e\n\u003cp\u003eOne to two students, along with a facilitator, would be an ideal size for this experiment. Primarily, the three main roles will include model engineering, training and validation, and model ablation and testing. The last role requires the most amount of traditional linguistics knowledge as the student\u0026rsquo;s role would be to connect the weights in the model to the applicable theories being tested.\u003c/p\u003e\n\u003cp\u003eThe study proposed is an extremely conventional empirical Machine Learning/NLP study. From a pedagogical standpoint for XRT, this study will be a diversion from the traditional wet-lab sciences or survey-based educational/social sciences commonly produced by the lab and lead a new avenue for the Lab\u0026rsquo;s expansion. Within Nueva, empirical research into machine learning is frequently done through independent study or the Intro/Advance machine learning courses\u0026mdash;which were recently expanded due to widening interest at the Upper School.\u003c/p\u003e\n\u003cp\u003eParticipation in this project provides its constituent students an opportunity to practice publish-quality ML/NLP in a longer-term and multi-stage project previously not possible through semester-long courses. Students are trained to perform model construction, data selection and cleaning, collection of model validation metrics, as well as model ablation and interpretation: important concepts in ML operations taught but not formalized in the Machine Learning course as the course exercises, while open-ended, isolate only one skill and have expected outcomes.\u003c/p\u003e\n\u003cp\u003eGiven the demand and rate of student progression between Intro/Advanced courses in ML each year, developing a suitable approach to propagate true machine-learning research will be relevant to upwards of 30 students each year.\u003c/p\u003e\n\u003cp\u003eIncidentally, students also get an exposure to the practice of conventional linguistics and the new trend of applying empirical research NLP back against classic semantics; however, the demand for this exact skill is likely small at Nueva.\u003c/p\u003e\n\u003cp\u003eThough the tool used and expanded upon by this experiment is applicable to the NLP research community, it is unfortunately difficult to predict its future applications to XRT or Nueva students without seeing more expansion into the area of ML and NLP by the XRT lab.\u003c/p\u003e\n\u003ch2 id=\"safety-and-ethics\"\u003eSafety and Ethics\u003c/h2\u003e\n\u003cp\u003eThe following are the responses to the safety and ethics checklist.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eThis project does not satisfy any triggers of the second-expert protocol. All data needed is from a dictionary (for the English lexicon, e.g. (\u003ca href=\"#citeproc_bib_item_6\"\u003eFellbaum 2010\u003c/a\u003e)) as well as the semantic primes listed in a figure on the article (\u003ca href=\"#citeproc_bib_item_11\"\u003eHeine, Narrog, and Goddard 2015\u003c/a\u003e). The data is being generated during compute.\u003c/li\u003e\n\u003cli\u003eThe actual compute hardware will need to be stored in either in the cloud (not on-prem), physically in the iLab, or (for personal compute hardware), in students\u0026rsquo; homes. An internet connection and a model training acceleration scheme (such as the free Google Colab) would suffice.\u003c/li\u003e\n\u003cli\u003eNone foreseeable\u003c/li\u003e\n\u003cli\u003eSee below\u003c/li\u003e\n\u003cli\u003eThe experiment is done on the English lexicon. It is difficult to imagine a tangible harm from the experiment.\u003c/li\u003e\n\u003cli\u003eThis study provides students with an opportunity to conduct a full research study in ML; XRT has not had this from of projects before and approval would result in a new avenue of research being conducted with XRT. However, if the project is not approved, other ML projects may subsequently surface and students can leverage those opportunities to learn about the practice of empirical ML instead.\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAs with most machine-learning projects, it is customary and appropriate to end with a statement on ML ethics and its implications. This study is a linguistics, lexicon-scale study, and the data sourced is available generally and not subject to copyright or any known data-protection laws. The inputs to the model are combinations of English words, and the model produces singular English words. The benefits of this model involves generating new knowledge about the English lexicon and semantic theories. The only known harm of the model involves the mis-intepretation of its results, creating overreaching generalizations to semantic primality analysis or NSM theories. The model and source code can be released to the general public without broad impact.\u003c/p\u003e\n\u003ch2 id=\"acknowledgments\"\u003eAcknowledgments\u003c/h2\u003e\n\u003cp\u003eI would like to thank Brandon Cho at Princeton University and Ted Theodosopoulos at The Nueva School for the very interesting discussion/argument that resulted in this proposal almost a year ago. I would like to thank Klint Kanopka at Stanford University for his mentorship and discussion of the overall feasibility of the approach and pointing out the path that lead to the proposed model\u0026rsquo;s basis in machine translation. Finally, I would like to thank Prof. Brian MacWhinney at Carnegie Mellon University for pointing out discourse between structuralism/functionalism during our exchanges and for his mentorship in my exploration of computational linguistics.\u003c/p\u003e\n\u003ch2 id=\"references\"\u003eReferences\u003c/h2\u003e\n\u003cstyle\u003e.csl-entry{text-indent: -1.5em; margin-left: 1.5em;}\u003c/style\u003e\u003cdiv class=\"csl-bib-body\"\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_1\"\u003e\u003c/a\u003eArtetxe, Mikel, Gorka Labaka, Eneko Agirre, and Kyunghyun Cho. 2018. “Unsupervised Neural Machine Translation,” 12.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_2\"\u003e\u003c/a\u003eBohnemeyer, Jurgen. 1998. “Temporal Reference from a Radical Pragmatics Perspective: Why Yucatec Does Not Need to Express ’after’ and ’before’.” Walter de Gruyter, Berlin/New York Berlin, New York.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_3\"\u003e\u003c/a\u003eChappell, Hilary. 2002. “5. The Universal Syntax of Semantic Primes in Mandarin Chinese.” In \u003ci\u003eStudies in Language Companion Series\u003c/i\u003e, 243–322. Studies in Language Companion Series. John Benjamins Publishing Company. doi:\u003ca href=\"https://doi.org/10.1075/slcs.60.12cha\"\u003e10.1075/slcs.60.12cha\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_4\"\u003e\u003c/a\u003eConneau, Alexis, and Guillaume Lample. 2019. “Cross-Lingual Language Model Pretraining,” 11.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_5\"\u003e\u003c/a\u003eDivjak, Dagmar, Natalia Levshina, and Jane Klavan. 2016. \u003ci\u003eCognitive Linguistics\u003c/i\u003e 27 (4): 447–63. doi:\u003ca href=\"https://doi.org/doi:10.1515/cog-2016-0095\"\u003edoi:10.1515/cog-2016-0095\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_6\"\u003e\u003c/a\u003eFellbaum, Christiane. 2010. “Wordnet.” In \u003ci\u003eTheory and Applications of Ontology: Computer Applications\u003c/i\u003e, 231–43. Springer.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_7\"\u003e\u003c/a\u003eGeeraerts, Dirk. 2009. “Neostructuralist Semantics.” In \u003ci\u003eTheories of Lexical Semantics\u003c/i\u003e, 124–78. Theories of Lexical Semantics. Oxford University Press. doi:\u003ca href=\"https://doi.org/10.1093/acprof:oso/9780198700302.003.0004\"\u003e10.1093/acprof:oso/9780198700302.003.0004\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_8\"\u003e\u003c/a\u003eGoddard, Cliff. 2002. “The Search for the Shared Semantic Core of All Languages.” In \u003ci\u003eMeaning and Universal Grammar: Theory and Empirical Findings\u003c/i\u003e. John Benjamins Publishing Company.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_9\"\u003e\u003c/a\u003e———. 2012. “Semantic Primes, Semantic Molecules, Semantic Templates: Key Concepts in the NSM Approach to Lexical Typology.” \u003ci\u003eLinguistics\u003c/i\u003e 50 (3). doi:\u003ca href=\"https://doi.org/10.1515/ling-2012-0022\"\u003e10.1515/ling-2012-0022\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_10\"\u003e\u003c/a\u003eHarris, Randy Allen. 2021. \u003ci\u003eThe Linguistics Wars: Chomsky, Lakoff, and the Battle over Deep Structure\u003c/i\u003e. Oxford University Press.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_11\"\u003e\u003c/a\u003eHeine, Bernd, Heiko Narrog, and Cliff Goddard. 2015. “The Natural Semantic Metalanguage Approach.” In \u003ci\u003eThe Oxford Handbook of Linguistic Analysis\u003c/i\u003e, edited by Bernd Heine and Heiko Narrog. Oxford University Press. doi:\u003ca href=\"https://doi.org/10.1093/oxfordhb/9780199677078.013.0018\"\u003e10.1093/oxfordhb/9780199677078.013.0018\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_12\"\u003e\u003c/a\u003eKlys, Jack, Jake Snell, and Richard Zemel. 2018. “Learning Latent Subspaces in Variational Autoencoders,” 11.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_13\"\u003e\u003c/a\u003eNiu, Zhaoyang, Guoqiang Zhong, and Hui Yu. 2021. “A Review on the Attention Mechanism of Deep Learning.” \u003ci\u003eNeurocomputing\u003c/i\u003e 452 (September): 48–62. doi:\u003ca href=\"https://doi.org/10.1016/j.neucom.2021.03.091\"\u003e10.1016/j.neucom.2021.03.091\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_14\"\u003e\u003c/a\u003ePeeters, Bert. 1994. “16 Semantic and Lexical Universals in French.” In \u003ci\u003eStudies in Language Companion Series\u003c/i\u003e, 423. Studies in Language Companion Series. John Benjamins Publishing Company. doi:\u003ca href=\"https://doi.org/10.1075/slcs.25.20pee\"\u003e10.1075/slcs.25.20pee\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_15\"\u003e\u003c/a\u003eSharma, Prawaal, and Navneet Goyal. 2021. “Zero-Shot Reductive Paraphrasing for Digitally Semi-Literate.” In \u003ci\u003eForum for Information Retrieval Evaluation\u003c/i\u003e, 91–98.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_16\"\u003e\u003c/a\u003eTravis, Catherine E. 2002. “4. La Metalengua Semántica Natural.” In \u003ci\u003eStudies in Language Companion Series\u003c/i\u003e, 173–242. Studies in Language Companion Series. John Benjamins Publishing Company. doi:\u003ca href=\"https://doi.org/10.1075/slcs.60.11tra\"\u003e10.1075/slcs.60.11tra\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_17\"\u003e\u003c/a\u003eVanhatalo, Ulla, Heli Tissari, and Anna Idström. n.d. “Revisiting the Universality of Natural Semantic Metalanguage: A View through Finnish,” 28.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_18\"\u003e\u003c/a\u003eVaswani, Ashish, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Łukasz Kaiser, and Illia Polosukhin. 2017. “Attention Is All You Need,” 11.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_19\"\u003e\u003c/a\u003eWebb, Geoffrey I., Claude Sammut, Claudia Perlich, Tamás Horváth, Stefan Wrobel, Kevin B. Korb, William Stafford Noble, et al. 2011. “Leave-One-Out Cross-Validation.” In \u003ci\u003eEncyclopedia of Machine Learning\u003c/i\u003e, edited by Claude Sammut and Geoffrey I. Webb, 600–601. Boston, MA: Springer US. doi:\u003ca href=\"https://doi.org/10.1007/978-0-387-30164-8_469\"\u003e10.1007/978-0-387-30164-8_469\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_20\"\u003e\u003c/a\u003eWierzbicka, Anna. 1974. “Semantic Primitives.” \u003ci\u003eLingua\u003c/i\u003e 34 (4): 365–69. doi:\u003ca href=\"https://doi.org/10.1016/0024-3841(74)90004-7\"\u003e10.1016/0024-3841(74)90004-7\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_21\"\u003e\u003c/a\u003e———. 2007. “Bodies and Their Parts: An NSM Approach to Semantic Typology.” \u003ci\u003eLanguage Sciences\u003c/i\u003e 29 (1): 14–65. doi:\u003ca href=\"https://doi.org/10.1016/j.langsci.2006.07.002\"\u003e10.1016/j.langsci.2006.07.002\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003eNO_ITEM_DATA:goddard1998bad.\u003c/div\u003e\n\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhnsm_proposal/","tags":null,"title":"NSM Proposal"},{"categories":null,"contents":"NUS Secondary School Other Duties AP Statistics Index AP Phys C Mech Index AP Phys C EM Index Tuning Forks bioinformatics PKM Intersession 2023 NUS-MATH580 QIC Date Topic \u0026lt;2022-04-05 Tue\u0026gt; physical qubits, manipulating physical qubits \u0026lt;2022-04-08 Fri\u0026gt; making qubits interact \u0026lt;2022-05-10 Tue\u0026gt; Chiara Marletto \u0026lt;2022-05-24 Tue\u0026gt; Strong Free Will NUS-CS223 Algorithms Backlog: Finite State Machine\nDate Topic \u0026lt;2022-04-07 Thu\u0026gt; stable matching problem, stable matching algorithm \u0026lt;2022-05-02 Mon\u0026gt; dynamic programming, relaxation \u0026lt;2022-05-23 Mon\u0026gt; distributed algorithum, randomized algorithum, complexity theory NUS-HIST301 American History Backlog: New Deal, Franklin D. Roosevelt (FDR), Works Progress Administration, effects of the New Deal, Great Depression, Herber Hoover, disinformation, Guilded Age\nDate Topic \u0026lt;2022-04-07 Thu\u0026gt; WWII, propaganda \u0026lt;2022-05-02 Mon\u0026gt; cold war \u0026lt;2022-05-09 Mon\u0026gt; civil rights \u0026lt;2022-05-26 Thu\u0026gt; Richard Nixon \u0026lt;2022-06-01 Wed\u0026gt; Ronald Raegan NUS-PHYS301 Mech Date Topic \u0026lt;2022-04-12 Tue\u0026gt; String Yo-Yo Problem, rotational energy \u0026lt;2022-05-24 Tue\u0026gt; Gyroscopes NUS-ENG401 English Date Topic \u0026lt;2022-04-15 Fri\u0026gt; secondary source comparison activity Essays Bluest Eye Essay Planning I, Tituba Essay Planning NUS-MATH530 Please refer to Linear Algebra Index\nNUS-ECON320 Financial Econometrics Date Topic \u0026lt;2022-08-25 Thu\u0026gt; Financial Markets Intro, ECON320 Architecture NUS-CS350 Software Studio Date Topic \u0026lt;2022-08-25 Thu\u0026gt; User Interviews, User Story \u0026lt;2022-09-07 Wed\u0026gt; Software Engineering, Prototyping \u0026lt;2022-09-12 Mon\u0026gt; Task Estimation \u0026lt;2022-09-15 Thu\u0026gt; Documentation and Specification \u0026lt;2022-09-19 Mon\u0026gt; Testing \u0026lt;2022-10-06 Thu\u0026gt; Defensive Programming \u0026lt;2022-11-03 Thu\u0026gt; Code Review \u0026lt;2022-12-01 Thu\u0026gt; UX Design NUS-MATH570 DiffEq Date Topic \u0026lt;2022-08-26 Fri\u0026gt; DiffEq Intro NUS-LANG250 Translation Translation Studies Index\nNUS-MATH575 CompBio Computational Biology Index\n","html":"\u003ch2 id=\"nus-secondary-school-other-duties\"\u003eNUS Secondary School Other Duties\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhapstats/\"\u003eAP Statistics Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhap_phys_c_mech_index/\"\u003eAP Phys C Mech Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhap_phys_c_em_index/\"\u003eAP Phys C EM Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtuning_forks/\"\u003eTuning Forks\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbioinformatics/\"\u003ebioinformatics\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpkm/\"\u003ePKM\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhintersession_2023/\"\u003eIntersession 2023\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"nus-math580-qic\"\u003eNUS-MATH580 QIC\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-05 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhphysical_qubits/\"\u003ephysical qubits\u003c/a\u003e, \u003ca href=\"/posts/kbhatoms_as_qubits/#manipulating-physical-qubits\"\u003emanipulating physical qubits\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-08 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmaking_qubits_interact/\"\u003emaking qubits interact\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-10 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhchiara_marletto/\"\u003eChiara Marletto\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-24 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhstrong_free_will/\"\u003eStrong Free Will\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"nus-cs223-algorithms\"\u003eNUS-CS223 Algorithms\u003c/h2\u003e\n\u003cp\u003eBacklog: \u003ca href=\"/posts/kbhfinite_state_machine/\"\u003eFinite State Machine\u003c/a\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-07 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhstable_matching_problem/\"\u003estable matching problem\u003c/a\u003e, \u003ca href=\"\"\u003estable matching algorithm\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-02 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhdynamic_programming/\"\u003edynamic programming\u003c/a\u003e, \u003ca href=\"/posts/kbhrelaxation_algorithums/\"\u003erelaxation\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-23 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhdistributed_algorithum/\"\u003edistributed algorithum\u003c/a\u003e, \u003ca href=\"/posts/kbhrandomized_algorithum/\"\u003erandomized algorithum\u003c/a\u003e, \u003ca href=\"/posts/kbhcomplexity_theory/\"\u003ecomplexity theory\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"nus-hist301-american-history\"\u003eNUS-HIST301 American History\u003c/h2\u003e\n\u003cp\u003eBacklog: \u003ca href=\"/posts/kbhnew_deal/\"\u003eNew Deal\u003c/a\u003e, \u003ca href=\"/posts/kbhfdr/\"\u003eFranklin D. Roosevelt (FDR)\u003c/a\u003e, \u003ca href=\"/posts/kbhwpa/\"\u003eWorks Progress Administration\u003c/a\u003e, \u003ca href=\"/posts/kbheffects_of_the_new_deal/\"\u003eeffects of the New Deal\u003c/a\u003e, \u003ca href=\"/posts/kbhgreat_depression/\"\u003eGreat Depression\u003c/a\u003e, \u003ca href=\"/posts/kbhherber_hoover/\"\u003eHerber Hoover\u003c/a\u003e, \u003ca href=\"\"\u003edisinformation\u003c/a\u003e, \u003ca href=\"/posts/kbhguilded_age/\"\u003eGuilded Age\u003c/a\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-07 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003eWWII\u003c/a\u003e, \u003ca href=\"/posts/kbhpropaganda/\"\u003epropaganda\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-02 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhcold_war/\"\u003ecold war\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-09 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhcivil_rights/\"\u003ecivil rights\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-26 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-06-01 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhronald_raegan/\"\u003eRonald Raegan\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"nus-phys301-mech\"\u003eNUS-PHYS301 Mech\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-12 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003eString Yo-Yo Problem\u003c/a\u003e, \u003ca href=\"/posts/kbhrotational_energy/\"\u003erotational energy\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-05-24 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"\"\u003eGyroscopes\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"nus-eng401-english\"\u003eNUS-ENG401 English\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-04-15 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhsecondary_source_comparison_activity/\"\u003esecondary source comparison activity\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch3 id=\"essays\"\u003eEssays\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhenglish_bluest_eye/\"\u003eBluest Eye Essay Planning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhi_tituba_essay_planning/\"\u003eI, Tituba Essay Planning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"nus-math530\"\u003eNUS-MATH530\u003c/h2\u003e\n\u003cp\u003ePlease refer to \u003ca href=\"/posts/kbhlinear_algebra_index/\"\u003eLinear Algebra Index\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"nus-econ320-financial-econometrics\"\u003eNUS-ECON320 Financial Econometrics\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-08-25 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhfinancial_markets_intro/\"\u003eFinancial Markets Intro\u003c/a\u003e, \u003ca href=\"/posts/kbhecon320_architecture/\"\u003eECON320 Architecture\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"nus-cs350-software-studio\"\u003eNUS-CS350 Software Studio\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-08-25 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhuser_interviews/\"\u003eUser Interviews\u003c/a\u003e, \u003ca href=\"/posts/kbhuser_interviews/#user-story\"\u003eUser Story\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-09-07 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhsoftware_engineering/\"\u003eSoftware Engineering\u003c/a\u003e, \u003ca href=\"/posts/kbhprototyping/\"\u003ePrototyping\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-09-12 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhtask_estimation/\"\u003eTask Estimation\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-09-15 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhdocumentation_and_specification/\"\u003eDocumentation and Specification\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-09-19 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhtesting/\"\u003eTesting\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-10-06 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhdefensive_programming/\"\u003eDefensive Programming\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-11-03 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhcode_review/\"\u003eCode Review\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-12-01 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhux_design/\"\u003eUX Design\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"nus-math570-diffeq\"\u003eNUS-MATH570 DiffEq\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-08-26 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDiffEq Intro\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"nus-lang250-translation\"\u003eNUS-LANG250 Translation\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhtranslation_studies_index/\"\u003eTranslation Studies Index\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"nus-math575-compbio\"\u003eNUS-MATH575 CompBio\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcomputational_biology_index/\"\u003eComputational Biology Index\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnueva_courses_index/","tags":["index"],"title":"Nueva Courses Index"},{"categories":null,"contents":"The Null Space, also known as the kernel, is the subset of vectors which get mapped to \\(0\\) by some Linear Map.\nconstituents Some linear map \\(T \\in \\mathcal{L}(V,W)\\)\nrequirements The subset of \\(V\\) which \\(T\\) maps to \\(0\\) is called the \u0026ldquo;Null Space\u0026rdquo;:\n\\begin{equation} null\\ T = \\{v \\in V: Tv = 0\\} \\end{equation}\nadditional information the null space is a subspace of the domain It should probably not be a surprise, given a Null Space is called a Null Space, that the Null Space is a subspace of the domain.\nzero As linear maps take \\(0\\) to \\(0\\), \\(T 0=0\\) so \\(0\\) is in the Null Space of \\(T\\).\nclosure under addition We have that:\n\\begin{equation} 0+0 = 0 \\end{equation}\nso by additivity of the Linear Maps the map is closed under addition.\nclosure under scalar multiplication By homogeneity of linear maps, the same of the above holds.\nThis completes the subspace proof, making \\(null\\ T\\) a subspace of the domain of \\(T\\), \\(V\\). \\(\\blacksquare\\)\nthe null space of the zero map is just the domain I mean duh. The zero map maps literally everything to zero.\nInjectivity IFF implies that null space is \\(\\{0\\}\\) See injectivity IFF implies that null space is \\(\\{0\\}\\)\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhnull_space/\"\u003eNull Space\u003c/a\u003e, also known as the \u003ca href=\"/posts/kbhnull_space/\"\u003ekernel\u003c/a\u003e, is the subset of vectors which get mapped to \\(0\\) by some \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003eSome linear map \\(T \\in \\mathcal{L}(V,W)\\)\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eThe subset of \\(V\\) which \\(T\\) maps to \\(0\\) is called the \u0026ldquo;\u003ca href=\"/posts/kbhnull_space/\"\u003eNull Space\u003c/a\u003e\u0026rdquo;:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nnull\\ T = \\{v \\in V: Tv = 0\\}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"the-null-space-is-a-subspace--kbhsubspace-dot-md--of-the-domain\"\u003ethe null space is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of the domain\u003c/h3\u003e\n\u003cp\u003eIt should probably not be a surprise, given a \u003ca href=\"/posts/kbhnull_space/\"\u003eNull Space\u003c/a\u003e is called a \u003ca href=\"/posts/kbhnull_space/\"\u003eNull \u003cstrong\u003e\u003cstrong\u003eSpace\u003c/strong\u003e\u003c/strong\u003e\u003c/a\u003e, that the \u003ca href=\"/posts/kbhnull_space/\"\u003eNull Space\u003c/a\u003e is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of the domain.\u003c/p\u003e\n\u003ch4 id=\"zero\"\u003ezero\u003c/h4\u003e\n\u003cp\u003eAs \u003ca href=\"/posts/kbhlinear_map/#linear-maps-take-0-to-0\"\u003elinear maps take \\(0\\) to \\(0\\)\u003c/a\u003e, \\(T 0=0\\) so \\(0\\) is in the \u003ca href=\"/posts/kbhnull_space/\"\u003eNull Space\u003c/a\u003e of \\(T\\).\u003c/p\u003e\n\u003ch4 id=\"closure-under-addition\"\u003eclosure under addition\u003c/h4\u003e\n\u003cp\u003eWe have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0+0 = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso by additivity of the \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es the map is \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e under addition.\u003c/p\u003e\n\u003ch4 id=\"closure-under-scalar-multiplication\"\u003eclosure under scalar multiplication\u003c/h4\u003e\n\u003cp\u003eBy homogeneity of linear maps, the same of the above holds.\u003c/p\u003e\n\u003cp\u003eThis completes the \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e proof, making \\(null\\ T\\) a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of the domain of \\(T\\), \\(V\\). \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"the-null-space-of-the-zero-map-is-just-the-domain\"\u003ethe null space of the zero map is just the domain\u003c/h3\u003e\n\u003cp\u003eI mean duh. The \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e map maps literally everything to zero.\u003c/p\u003e\n\u003ch3 id=\"injectivity-iff-implies-that-null-space--kbhnull-space-dot-md--is-0\"\u003eInjectivity IFF implies that \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e is \\(\\{0\\}\\)\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhinjectivity/#injectivity-implies-that-id-767a441d-4931-4fad-aa8e-c6b001e8b507-null-space-is-0\"\u003einjectivity IFF implies that null space is \\(\\{0\\}\\)\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnull_space/","tags":null,"title":"null space"},{"categories":null,"contents":"A number can be any of\u0026hellip;\n\\(\\mathbb{N}\\): natural number \\(\\mathbb{Z}\\): integer \\(\\mathbb{Q}\\): rational number \\(\\mathbb{R}\\): real number \\(\\mathbb{P}\\): irrational number \\(\\mathbb{C}\\): complex number ","html":"\u003cp\u003eA number can be any of\u0026hellip;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\mathbb{N}\\): \u003ca href=\"/posts/kbhnatural_numbers/\"\u003enatural number\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(\\mathbb{Z}\\): \u003ca href=\"/posts/kbhinteger/\"\u003einteger\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(\\mathbb{Q}\\): \u003ca href=\"/posts/kbhrational_number/\"\u003erational number\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(\\mathbb{R}\\): \u003ca href=\"/posts/kbhreal_number/\"\u003ereal number\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(\\mathbb{P}\\): \u003ca href=\"/posts/kbhirrational_number/\"\u003eirrational number\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(\\mathbb{C}\\): \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnumber/","tags":null,"title":"number"},{"categories":null,"contents":"Consider a general non-linear First Order ODEs:\n\\begin{equation} x\u0026rsquo; = F(x) \\end{equation}\nSuppose we have some time interval, we have some solutions to the expression given. Is it possible for us to, given \\(x(t_0) = x_0\\), what \\(x(t_0+T)\\) would be? Can we approximate for explicit numbers?\nThe solutions have to exist for all time: blow-up cannot be present during numerical estimations.\nExplicit Euler Method \\begin{equation} x(t+h) \\approx x_{t+1} = x_{t} + h f(x_t) \\end{equation}\nmotivation recall that given \\(x(t_0) = x_0\\), we desire \\(x(t_0+T)\\).\ndivide your solution interval into \\(N\\) small intervals; each interval would have length \\(h= \\frac{T}{N}\\) let \\(t_{i} = t_0 + i \\frac{T}{N}\\), where \\(t_{N} = t_{0}+T\\) for each segment \\(t_{i}\\), we attempt to compute a \\(x_{i}\\), and we\u0026rsquo;d like to approximate the error between \\(x_{i}\\) and \\(x(t_{i})\\). In the explicit Euler method, we make piecewise linear approximations. At each \\(x_0\\), we follow the slope estimated via the ODE at that point. Specifically:\n\\begin{equation} x\u0026rsquo;(t) = \\lim_{k \\to 0} \\frac{x(t+k)-x(t)}{k} \\approx \\frac{x(t+h)-x(t)}{h} \\end{equation}\nfor some small \\(h\\). Meaning, specifically, \\(x(t+h) \\approx x(t) + h x\u0026rsquo;(t)\\), where \\(h\\) is the step size we computed before.\nConsider that we had an ODE that is \\(x\u0026rsquo; = F(x)\\), whech gives us:\n\\begin{equation} x_1 = x_{0}+ h f(x_0) \\approx x(t_0 + h) \\end{equation}\nFollowing this scheme, we can calculate from \\(x_0\\) all the way stepwise to \\(x_{N}\\).\nevaluation Situation: we have \\(X_{N}\\), we have \\(x(t_{N})\\), how close are they? In fact:\n\\begin{equation} |x_{N} - x(t_{n}) | \\leq Ch \\end{equation}\nWe have some constant \\(C(x_0, t_0, T, f)\\), which we can use to estimate \\(C\\) the bounds specific to the problem you are solving.\nstiffness Certain parts of a solution maybe decaying/oscillating very different from another part of the solution\u0026mdash;\nexample Consider a system:\n\\begin{equation} y\u0026rsquo; = \\mqty(-1 \u0026amp; 0 \\\\ 0 \u0026amp; -10)y \\end{equation}\nour solutions look like:\n\\begin{equation} y(t) = \\mqty(c_1 e^{-t} \\\\ c_2 e^{-10t}) \\end{equation}\nso the top expression gives \\(x_i = (1-h)^{i} x_0\\) and bottom \\(x_{i} = (1-10h)^{i}x_0\\), which means they will have different requirements for \\(h\\) to be able to converge\nexample 2 \\begin{equation} y\u0026rsquo; = -5 (y-\\cos x) \\end{equation}\nwith method of undetermined coefficients, we obtain:\n\\begin{equation} y = \\frac{25}{26} \\cos t + \\frac{5}{26} \\sin t + Ce^{-5t} \\end{equation}\nthe first parts are fine and not stiff at all, the third part, we realize that we need \\((1-5h)^{i}x_0\\), meaning we need \\(h \u0026lt; \\frac{1}{5}\\).\nmotivation Let\u0026rsquo;s consider:\n\\begin{equation} x\u0026rsquo; = -\\lambda x \\end{equation}\nThe explicit Euler gives out:\n\\begin{equation} x_{t+1} = (1-\\lambda h)x_{i} \\end{equation}\nmeaning, in general:\n\\begin{equation} x_{i} = (1-\\lambda h)^{i} x_0 \\end{equation}\nWe know the function is bound to decay, yet the Explicit Euler will give us that this decays only when:\n\\begin{equation} -1 \u0026lt; 1-\\lambda h \u0026lt; 1 \\end{equation}\nImplicit Euler Method doesn\u0026rsquo;t have this problem\u0026mdash;\nconsider:\n\\begin{equation} x_{i+1} = x_{i} - \\lambda h x_{i+1} \\end{equation}\nmeaning:\n\\begin{equation} x_{i} = \\frac{1}{(1+\\lambda h)^{i}}x_0 \\end{equation}\nImplicit Euler Method A small twist on the Explicit Euler Method. To be able to use this method, we can formulate this as:\n\\begin{equation} x_{i+1} - h f(x_{i+1}) = x_i \\end{equation}\nwhere we use Newton\u0026rsquo;s Method to estimate some input \\(i+1\\) for which the above statement gets to \\(x_{i}\\).\nevaluation We actually didn\u0026rsquo;t do that much error; its is still bounded by:\n\\begin{equation} |x_{N} - x(t_{n}) | \\leq Ch \\end{equation}\nDerivation \\begin{equation} \\frac{x((t+h)-h) - x(t+h)}{-h} \\approx x\u0026rsquo;(t+h) \\end{equation}\nthis is first-order Taylor Approximation written backwards\nThis also yields:\n\\begin{equation} \\frac{x((t+h)-h) - x(t+h)}{-h} = \\frac{x(t+h)-x((t+h)-h)}{h} \\end{equation}\nNow, let \\(t = t_0\\), and therefore we have \\(t_1 = t +h\\), this gives us that:\nNow, recall that, because \\(f\\) is the ODE:\n\\begin{equation} x\u0026rsquo;(t_1) = f(x(t_1)) = x\u0026rsquo;(t+h) \\approx \\frac{x(t_1) - x(t_0)}{h} \\end{equation}\nMultiplying \\(h\\) to both sides gives:\n\\begin{equation} hf(x(t_1)) = x(t_1) - x(t_0) \\end{equation}\nwhich gives:\n\\begin{equation} x(t_0) = x(t_1) - h f(x(t_1)) \\end{equation}\nwe will now attempt to estimate \\(x_1\\) by declaring \\(x_1 := x(t_{1})\\), which will give us:\n\\begin{equation} x_1 - h f(x_1) = x_0 \\end{equation}\nLet us call \\(G(x_{1}) = x_1 - h f(x_1) = x_0\\).\nFinally, we run Newton\u0026rsquo;s Method to solve the \\(x_1\\) such that we can obtain \\(x_0\\) by trying to find the zeros of \\(G(x_1) - x_0\\). Because \\(h\\) is small, a good initial guess is actually \\(G(x_0)\\), and then we can optimize.\nTrapezoidal Method \\begin{equation} x_{t+1} = x_t + h \\frac{f(x_{t+1})+f(x_t)}{2} \\end{equation}\nmotivation \u0026ldquo;averaging smoothed things out\u0026rdquo;:\n\\begin{equation} \\frac{x(t+h) - x(t)}{h} \\approx \\frac{f(x(t+h)) + f(x(t))}{2} \\end{equation}\nmeaning we have:\n\\begin{equation} \\frac{x_1-x_0}{h} = \\frac{f(x_1) + f(x_0)}{2} \\end{equation}\nwhich averages our derivatives out.\nCross-multiplying, this gives:\n\\begin{equation} x_1 - \\frac{1}{2}h f(x_1) = x_0 + \\frac{1}{2} h f(x_0) \\end{equation}\nwhich can also be written as, multiplying by some \\(h\\):\n\\begin{equation} x_1 = x_0 + h \\frac{f(x_1)+f(x_0)}{2} \\end{equation}\nexplicitly \\begin{equation} x_{i} = \\qty( \\frac{(1- \\frac{1}{2}\\lambda h)}{(1+ \\frac{1}{2}\\lambda h)})^{i} x_0 \\end{equation}\nevaluation Importantly, this gives bounds\n\\begin{equation} |x_{N} - x(t_{n}) | \\leq Ch^{2} \\end{equation}\nModified Euler Method This is also called \u0026ldquo;Midpoint Method\u0026rdquo;.\nThis is one of thee methods which doesn\u0026rsquo;t break during \u0026ldquo;stiff\u0026rdquo; ODEs, and converges \\(h^{N}\\) times quickly.\nFor some:\n\\begin{equation} \\dv{x}{t} = f(t,x) \\end{equation}\n\\begin{equation} x_{i+1} = x_{i} + h f\\qty(t_{i} + \\frac{1}{2}h, x_{i} + \\frac{1}{2}h f(t_{i}, x_{i})) \\end{equation}\nthis is motivated by the Trapezoidal Method, but\n\u0026ldquo;A thorough introduction to these methods requires additional background in approximation theory and numerical analysis\u0026rdquo;\nThe Book error \\begin{equation} |x_{N} - x(t_{n}) | \\leq Ch^{2} \\end{equation}\nmotivation we take a half step in front of our original point using its slope, and compute the slope there.\nImproved Euler Method This is also called \u0026ldquo;Heun\u0026rsquo;s Method\u0026rdquo;\n\\begin{equation} x_{i+1} = x_{i} + \\frac{1}{2} h(f(t_{i}, x_{i}) + f(t_{i}+h, x_{i}+hf(t_{i}, x_{i}))) \\end{equation}\nerror \\begin{equation} |x_{N} - x(t_{n}) | \\leq Ch^{2} \\end{equation}\nmotivation we average the slopes of the current location and a full step in front, calculating their slopes, and average them\nRunge-Kutta Method a.k.a. instead of contending with the forward, backward, middle slope, or native slope from \\(f\\), we just ball and average all of them:\n\\begin{equation} \\begin{cases} m_1 = f(t_{i}, x_{i}) \\\\ m_2 = f\\qty(t_{i} + \\frac{h}{2}, x_{i}+\\frac{h}{2}m_{1}) \\\\ m_3 = f\\qty(t_{i}+\\frac{h}{2}, x_{i}+\\frac{h}{2}m_{2}) \\\\ m_4 = f\\qty(t_{i} + h, x_{i}+hm_{3}) \\end{cases} \\end{equation}\nand then:\n\\begin{equation} x_{i+1} = x_{i} + \\frac{1}{6}h m_{1} + \\frac{1}{3} h m_{2} + \\frac{1}{3} h m_{3} + \\frac{1}{6} h m_{4} \\end{equation}\nthe coefficients are that from pascal\u0026rsquo;s triangle.\nerror \\begin{equation} |x_{N} - x(t_{n}) | \\leq Ch^{4} \\end{equation}\nmotivation this is essentially like \u0026ldquo;fitting a parabola\u0026rdquo; against our curve\n","html":"\u003cp\u003eConsider a general non-linear \u003ca href=\"/posts/kbhfirst_order_odes/\"\u003eFirst Order ODEs\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo; = F(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSuppose we have some time interval, we have some solutions to the expression given. Is it possible for us to, given \\(x(t_0) = x_0\\), what \\(x(t_0+T)\\) would be? Can we approximate for explicit numbers?\u003c/p\u003e\n\u003cp\u003eThe solutions have to exist for all time: blow-up \u003cstrong\u003ecannot\u003c/strong\u003e be present during numerical estimations.\u003c/p\u003e\n\u003ch2 id=\"explicit-euler-method\"\u003eExplicit Euler Method\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nx(t+h) \\approx x_{t+1} = x_{t} + h f(x_t)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"motivation\"\u003emotivation\u003c/h3\u003e\n\u003cp\u003erecall that given \\(x(t_0) = x_0\\), we desire \\(x(t_0+T)\\).\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003edivide your solution interval into \\(N\\) small intervals; each interval would have length \\(h= \\frac{T}{N}\\)\u003c/li\u003e\n\u003cli\u003elet \\(t_{i} = t_0 + i \\frac{T}{N}\\), where \\(t_{N} = t_{0}+T\\)\u003c/li\u003e\n\u003cli\u003efor each segment \\(t_{i}\\), we attempt to compute a \\(x_{i}\\), and we\u0026rsquo;d like to approximate the error between \\(x_{i}\\) and \\(x(t_{i})\\).\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eIn the explicit Euler method, we make piecewise linear approximations. At each \\(x_0\\), we follow the slope estimated via the \u003ca href=\"/posts/kbhordinary_differential_equations/\"\u003eODE\u003c/a\u003e at that point. Specifically:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo;(t) = \\lim_{k \\to 0} \\frac{x(t+k)-x(t)}{k} \\approx \\frac{x(t+h)-x(t)}{h}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some small \\(h\\). Meaning, specifically, \\(x(t+h) \\approx x(t) + h x\u0026rsquo;(t)\\), where \\(h\\) is the step size we computed before.\u003c/p\u003e\n\u003cp\u003eConsider that we had an \u003ca href=\"/posts/kbhordinary_differential_equations/\"\u003eODE\u003c/a\u003e that is \\(x\u0026rsquo; = F(x)\\), whech gives us:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_1 = x_{0}+ h f(x_0) \\approx x(t_0 + h)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFollowing this scheme, we can calculate from \\(x_0\\) all the way stepwise to \\(x_{N}\\).\u003c/p\u003e\n\u003ch3 id=\"evaluation\"\u003eevaluation\u003c/h3\u003e\n\u003cp\u003eSituation: we have \\(X_{N}\\), we have \\(x(t_{N})\\), how close are they? In fact:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|x_{N} - x(t_{n}) | \\leq Ch\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have some constant \\(C(x_0, t_0, T, f)\\), which we can use to estimate \\(C\\) the bounds specific to the problem you are solving.\u003c/p\u003e\n\u003ch3 id=\"stiffness\"\u003estiffness\u003c/h3\u003e\n\u003cp\u003eCertain parts of a solution maybe decaying/oscillating very different from another part of the solution\u0026mdash;\u003c/p\u003e\n\u003ch4 id=\"example\"\u003eexample\u003c/h4\u003e\n\u003cp\u003eConsider a system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = \\mqty(-1 \u0026amp; 0 \\\\ 0 \u0026amp; -10)y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eour solutions look like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = \\mqty(c_1 e^{-t} \\\\ c_2 e^{-10t})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso the top expression gives \\(x_i = (1-h)^{i} x_0\\) and bottom \\(x_{i} = (1-10h)^{i}x_0\\), which means they will have different requirements for \\(h\\) to be able to converge\u003c/p\u003e\n\u003ch4 id=\"example-2\"\u003eexample 2\u003c/h4\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = -5 (y-\\cos x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewith \u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/#method-of-undetermined-coefficients\"\u003emethod of undetermined coefficients\u003c/a\u003e, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = \\frac{25}{26} \\cos t + \\frac{5}{26} \\sin t + Ce^{-5t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe first parts are fine and not stiff at all, the third part, we realize that we need \\((1-5h)^{i}x_0\\), meaning we need \\(h \u0026lt; \\frac{1}{5}\\).\u003c/p\u003e\n\u003ch4 id=\"motivation\"\u003emotivation\u003c/h4\u003e\n\u003cp\u003eLet\u0026rsquo;s consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo; = -\\lambda x\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe explicit Euler gives out:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{t+1} = (1-\\lambda h)x_{i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning, in general:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{i} = (1-\\lambda h)^{i} x_0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe know the function is bound to decay, yet the Explicit Euler will give us that this decays only when:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n-1 \u0026lt; 1-\\lambda h \u0026lt; 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#implicit-euler-method\"\u003eImplicit Euler Method\u003c/a\u003e doesn\u0026rsquo;t have this problem\u0026mdash;\u003c/p\u003e\n\u003cp\u003econsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{i+1} = x_{i} - \\lambda h x_{i+1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{i} = \\frac{1}{(1+\\lambda h)^{i}}x_0\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"implicit-euler-method\"\u003eImplicit Euler Method\u003c/h2\u003e\n\u003cp\u003eA small twist on the \u003ca href=\"#explicit-euler-method\"\u003eExplicit Euler Method\u003c/a\u003e. To be able to use this method, we can formulate this as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{i+1} - h f(x_{i+1}) = x_i\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere we use \u003ca href=\"/posts/kbhnewton_s_method/\"\u003eNewton\u0026rsquo;s Method\u003c/a\u003e to estimate some input \\(i+1\\) for which the above statement gets to \\(x_{i}\\).\u003c/p\u003e\n\u003ch3 id=\"evaluation\"\u003eevaluation\u003c/h3\u003e\n\u003cp\u003eWe actually didn\u0026rsquo;t do that much error; its is still bounded by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|x_{N} - x(t_{n}) | \\leq Ch\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"derivation\"\u003eDerivation\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{x((t+h)-h) - x(t+h)}{-h} \\approx x\u0026rsquo;(t+h)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is first-order Taylor Approximation \u003cstrong\u003ewritten backwards\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eThis also yields:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{x((t+h)-h) - x(t+h)}{-h} = \\frac{x(t+h)-x((t+h)-h)}{h}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, let \\(t = t_0\\), and therefore we have \\(t_1 = t +h\\), this gives us that:\u003c/p\u003e\n\u003cp\u003eNow, recall that, because \\(f\\) is the ODE:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo;(t_1) = f(x(t_1)) = x\u0026rsquo;(t+h) \\approx \\frac{x(t_1) - x(t_0)}{h}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMultiplying \\(h\\) to both sides gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nhf(x(t_1)) = x(t_1) - x(t_0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t_0) = x(t_1) - h f(x(t_1))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe will now attempt to estimate \\(x_1\\) by declaring \\(x_1 := x(t_{1})\\), which will give us:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_1 - h f(x_1) = x_0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet us call \\(G(x_{1}) = x_1 - h f(x_1) = x_0\\).\u003c/p\u003e\n\u003cp\u003eFinally, we run \u003ca href=\"/posts/kbhnewton_s_method/\"\u003eNewton\u0026rsquo;s Method\u003c/a\u003e to solve the \\(x_1\\) such that we can obtain \\(x_0\\) by trying to find the zeros of \\(G(x_1) - x_0\\). Because \\(h\\) is small, a good initial guess is actually \\(G(x_0)\\), and then we can optimize.\u003c/p\u003e\n\u003ch2 id=\"trapezoidal-method\"\u003eTrapezoidal Method\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nx_{t+1} = x_t + h \\frac{f(x_{t+1})+f(x_t)}{2}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"motivation\"\u003emotivation\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;averaging smoothed things out\u0026rdquo;:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{x(t+h) - x(t)}{h} \\approx \\frac{f(x(t+h)) + f(x(t))}{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{x_1-x_0}{h} = \\frac{f(x_1) + f(x_0)}{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich averages our derivatives out.\u003c/p\u003e\n\u003cp\u003eCross-multiplying, this gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_1 - \\frac{1}{2}h f(x_1) = x_0 + \\frac{1}{2} h f(x_0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich can also be written as, multiplying by some \\(h\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_1 = x_0 + h \\frac{f(x_1)+f(x_0)}{2}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"explicitly\"\u003eexplicitly\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nx_{i} = \\qty( \\frac{(1- \\frac{1}{2}\\lambda h)}{(1+ \\frac{1}{2}\\lambda h)})^{i} x_0\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"evaluation\"\u003eevaluation\u003c/h3\u003e\n\u003cp\u003eImportantly, this gives bounds\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|x_{N} - x(t_{n}) | \\leq Ch^{2}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"modified-euler-method\"\u003eModified Euler Method\u003c/h2\u003e\n\u003cp\u003eThis is also called \u0026ldquo;\u003ca href=\"#modified-euler-method\"\u003eMidpoint Method\u003c/a\u003e\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eThis is one of thee methods which doesn\u0026rsquo;t break during \u0026ldquo;stiff\u0026rdquo; \u003ca href=\"/posts/kbhordinary_differential_equations/\"\u003eODE\u003c/a\u003es, and converges \\(h^{N}\\) times quickly.\u003c/p\u003e\n\u003cp\u003eFor some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{x}{t} = f(t,x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{i+1} = x_{i} + h f\\qty(t_{i} + \\frac{1}{2}h, x_{i} + \\frac{1}{2}h f(t_{i}, x_{i}))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is motivated by the \u003ca href=\"#trapezoidal-method\"\u003eTrapezoidal Method\u003c/a\u003e, but\u003c/p\u003e\n\u003cblockquote\u003e\n\u003cp\u003e\u0026ldquo;A thorough introduction to these methods requires additional background in approximation theory and numerical analysis\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eThe Book\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/blockquote\u003e\n\u003ch3 id=\"error\"\u003eerror\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n|x_{N} - x(t_{n}) | \\leq Ch^{2}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"motivation\"\u003emotivation\u003c/h3\u003e\n\u003cp\u003ewe take a half step in front of our original point using its slope, and compute the slope there.\u003c/p\u003e\n\u003ch2 id=\"improved-euler-method\"\u003eImproved Euler Method\u003c/h2\u003e\n\u003cp\u003eThis is also called \u0026ldquo;\u003ca href=\"#improved-euler-method\"\u003eHeun\u0026rsquo;s Method\u003c/a\u003e\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{i+1} = x_{i} + \\frac{1}{2} h(f(t_{i}, x_{i}) + f(t_{i}+h, x_{i}+hf(t_{i}, x_{i})))\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"error\"\u003eerror\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n|x_{N} - x(t_{n}) | \\leq Ch^{2}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"motivation\"\u003emotivation\u003c/h3\u003e\n\u003cp\u003ewe average the slopes of the current location and a full step in front, calculating their slopes, and average them\u003c/p\u003e\n\u003ch2 id=\"runge-kutta-method\"\u003eRunge-Kutta Method\u003c/h2\u003e\n\u003cp\u003ea.k.a. instead of contending with the forward, backward, middle slope, or native slope from \\(f\\), we just ball and average all of them:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nm_1 = f(t_{i}, x_{i}) \\\\\nm_2 = f\\qty(t_{i} + \\frac{h}{2}, x_{i}+\\frac{h}{2}m_{1}) \\\\\nm_3 = f\\qty(t_{i}+\\frac{h}{2}, x_{i}+\\frac{h}{2}m_{2}) \\\\\nm_4 = f\\qty(t_{i} + h, x_{i}+hm_{3})\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{i+1} = x_{i} + \\frac{1}{6}h m_{1} + \\frac{1}{3} h m_{2} + \\frac{1}{3} h m_{3} + \\frac{1}{6} h m_{4}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe coefficients are that from pascal\u0026rsquo;s triangle.\u003c/p\u003e\n\u003ch3 id=\"error\"\u003eerror\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n|x_{N} - x(t_{n}) | \\leq Ch^{4}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"motivation\"\u003emotivation\u003c/h3\u003e\n\u003cp\u003ethis is essentially like \u0026ldquo;fitting a parabola\u0026rdquo; against our curve\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnumerical_approximation_schemes/","tags":null,"title":"Numerical Approximation Schemes"},{"categories":null,"contents":"Here\u0026rsquo;s the characteristic equation again:\n\\begin{equation} \\pdv[2] x \\qty(EI \\pdv[2]{w}{x}) = -\\mu \\pdv{w}{t}+q(x) \\end{equation}\nAfter Fourier decomposition, we have that:\n\\begin{equation} EI \\dv[4]{\\hat{w}}{x} - \\mu f^{2}\\hat{w} = 0 \\end{equation}\nLet\u0026rsquo;s solve this!\nE,I,u,f = var(\u0026#34;E I u f\u0026#34;) x, L = var(\u0026#34;x L\u0026#34;) w = function(\u0026#39;w\u0026#39;)(x) _c0, _c1, _c2, _c3 = var(\u0026#34;_C0 _C1 _C2 _C3\u0026#34;) fourier_cantileaver = (E*I*diff(w, x, 2) - u*f^2*w == 0) fourier_cantileaver -f^2*u*w(x) + E*I*diff(w(x), x, x) == 0 And now, we can go about solving this result.\nsolution = desolve(fourier_cantileaver, w, ivar=x, algorithm=\u0026#34;fricas\u0026#34;).expand() w = solution \\begin{equation} _{C_{1}} e^{\\left(\\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{0}} e^{\\left(i \\, \\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{2}} e^{\\left(-i \\, \\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{3}} e^{\\left(-\\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} \\end{equation}\nWe will simplify the repeated, constant top of this expression into a single variable \\(b\\):\nb = var(\u0026#34;b\u0026#34;) top = sqrt(f)*(u/(E*I))**(1/4) w = _c1*e^(b*x) + _c0*e^(i*b*x) + _c2*e^(-i*b*x) + _c3*e^(-b*x) w _C1*e^(b*x) + _C0*e^(I*b*x) + _C2*e^(-I*b*x) + _C3*e^(-b*x) \\begin{equation} _{C_{1}} e^{\\left(b x\\right)} + _{C_{0}} e^{\\left(i \\, b x\\right)} + _{C_{2}} e^{\\left(-i \\, b x\\right)} + _{C_{3}} e^{\\left(-b x\\right)} \\end{equation}\nWe have one equation, four unknowns. However, we are not yet done. We will make one more simplifying assumption\u0026mdash;try to get the \\(e^{x}\\) into sinusoidal form. We know this is supposed to oscillate, and it being in sinusoidal makes the process of solving for periodic solutions easier.\nRecall that:\n\\begin{equation} \\begin{cases} \\cosh x = \\frac{e^{x}+e^{-x}}{2} \\\\ \\cos x = \\frac{e^{ix}+e^{-ix}}{2}\\\\ \\sinh x = \\frac{e^{x}-e^{-x}}{2} \\\\ \\sin x = \\frac{e^{ix}-e^{-ix}}{2i}\\\\ \\end{cases} \\end{equation}\nWith a new set of scaling constants \\(d_0\\dots d_3\\), and some rearranging, we can rewrite the above expressions into just a linear combination of those elements. That is, the same expression for \\(w(x)\\) at a specific frequency can be written as:\n\\begin{equation} d_0\\cosh bx +d_1\\sinh bx +d_2\\cos bx +d_3\\sin bx = w \\end{equation}\nNo more imaginaries!!\nSo, let us redefine the expression:\nd0, d1, d2, d3 = var(\u0026#34;d0 d1 d2 d3\u0026#34;) w = d0*cosh(b*x)+d1*sinh(b*x)+d2*cos(b*x)+d3*sin(b*x) w d2*cos(b*x) + d0*cosh(b*x) + d3*sin(b*x) + d1*sinh(b*x) Now, we need to move onto solving when there will be valid solutions to this expression. However, we currently have four unknowns, and only one equation (at \\(x=0\\), \\(w=0\\), because the cantilever is fixed at base); so, to get a system in four elements, we will take some derivatives.\nThe way that we will go about this is by taking three derivatives and supplying the following initial conditions to get four equations:\nwp = diff(w,x,1) wpp = diff(w,x,2) wppp = diff(w,x,3) (wp, wpp, wppp) (b*d3*cos(b*x) + b*d1*cosh(b*x) - b*d2*sin(b*x) + b*d0*sinh(b*x), -b^2*d2*cos(b*x) + b^2*d0*cosh(b*x) - b^2*d3*sin(b*x) + b^2*d1*sinh(b*x), -b^3*d3*cos(b*x) + b^3*d1*cosh(b*x) + b^3*d2*sin(b*x) + b^3*d0*sinh(b*x)) And then, we have a system:\ncond_1 = w.subs(x=0) == 0 cond_2 = wp.subs(x=0) == 0 cond_3 = wpp.subs(x=L) == 0 cond_4 = wppp.subs(x=L) == 0 conds = (cond_1, cond_2, cond_3, cond_4) conds (d0 + d2 == 0, b*d1 + b*d3 == 0, -b^2*d2*cos(L*b) + b^2*d0*cosh(L*b) - b^2*d3*sin(L*b) + b^2*d1*sinh(L*b) == 0, -b^3*d3*cos(L*b) + b^3*d1*cosh(L*b) + b^3*d2*sin(L*b) + b^3*d0*sinh(L*b) == 0) solve(conds, d0, d1, d2, d3).full_simplify() Ok so, we notice that out of all of these boundary expressions the \\(b^{n}\\) term drop out. Therefore, we have the system:\n\\begin{equation} \\begin{cases} d_0 + d_2 = 0 \\\\ d_1 + d_3 = 0 \\\\ -d_2 \\cos Lb + d_0 \\cosh Lb - d_3 \\sin Lb + d_1 \\sinh Lb = 0 \\\\ -d_3 \\cos Lb + d_1 \\cosh Lb + d_2 \\sin Lb + d_0 \\sinh Lb = 0 \\\\ \\end{cases} \\end{equation}\nNow, taking the top expressions, we gather that:\n\\begin{equation} \\begin{cases} d_0 = -d_2 \\\\ d_1 = -d_3 \\end{cases} \\end{equation}\nPerforming these substitutions:\n\\begin{equation} \\begin{cases} d_0 (\\cos Lb + \\cosh Lb) + d_1 (\\sin Lb + \\sinh Lb) = 0 \\\\ d_1 (\\cos Lb + \\cosh Lb) + d_0 (\\sinh Lb- \\sin Lb ) = 0 \\\\ \\end{cases} \\end{equation}\nNow we are going to do some cursed algebra to get rid of all the rest of the \\(d\\). We want to do this because we don\u0026rsquo;t really care much what the constants are; instead, we care about when a solution exists (hopefully, then, telling us what the \\(f\\) baked inside \\(b\\) is). So:\n\\begin{align} \u0026amp;d_0 (\\cos Lb + \\cosh Lb) + d_1 (\\sin Lb + \\sinh Lb) = 0 \\\\ \\Rightarrow\\ \u0026amp; \\frac{-d_0}{d_1} = \\frac{(\\sin Lb + \\sinh Lb)}{(\\cos Lb + \\cosh Lb)} \\end{align}\nand\n\\begin{align} \u0026amp;d_1 (\\cos Lb + \\cosh Lb) + d_0 (\\sinh Lb- \\sin Lb ) = 0 \\\\ \\Rightarrow\\ \u0026amp; \\frac{-d_0}{d_1} = \\frac{(\\cos Lb + \\cosh Lb)}{(\\sinh Lb- \\sin Lb )} \\end{align}\ntherefore, we have:\n\\begin{equation} \\frac{(\\sin Lb + \\sinh Lb)}{(\\cos Lb + \\cosh Lb)} = \\frac{(\\cos Lb + \\cosh Lb)}{(\\sinh Lb- \\sin Lb )} \\end{equation}\nMultiplying each side by the other:\n\\begin{equation} (\\sin Lb + \\sinh Lb)(\\sinh Lb- \\sin Lb ) = (\\cos Lb + \\cosh Lb)^{2} \\end{equation}\nExpanding both sides now:\n\\begin{equation} (\\sinh^{2} Lb-\\sin^{2} Lb) = (\\cos^{2} Lb + 2\\cos Lb\\ \\cosh Lb + \\cosh ^{2}Lb) \\end{equation}\nMoving everything finally to one side:\n\\begin{equation} \\sinh^{2} Lb - \\cosh^{2} Lb -\\sin ^{2} Lb - \\cos ^{2}Lb - 2\\cos Lb \\cosh Lb = 0 \\end{equation}\nOk, this is where the satisfying \u0026ldquo;candy crush\u0026rdquo; begins when things cancel out. Recall pythagoras:\n\\begin{equation} \\begin{cases} \\cosh^{2}x - \\sinh^{2} x = 1 \\\\ \\sin^{2}x + \\cos^{2} x = 1 \\end{cases} \\end{equation}\nTo apply these effectively, multiply both sides by \\(1\\):\n\\begin{equation} -\\sinh^{2} Lb + \\cosh^{2} Lb +\\sin ^{2} Lb + \\cos ^{2}Lb + 2\\cos Lb \\cosh Lb = 0 \\end{equation}\nFinally, we substitute!\n\\begin{align} \u0026amp; -\\sinh^{2} Lb + \\cosh^{2} Lb +\\sin ^{2} Lb + \\cos ^{2}Lb + 2\\cos Lb \\cosh Lb = 0 \\\\ \\Rightarrow\\ \u0026amp; 1 + 1 + 2\\cos Lb \\cosh Lb = 0 \\\\ \\Rightarrow\\ \u0026amp; 2 + 2\\cos Lb \\cosh Lb = 0 \\\\ \\Rightarrow\\ \u0026amp; 1 + \\cos Lb \\cosh Lb = 0 \\end{align}\nOf course, there is oscillating results here. We will numerically locate them. Why did we subject ourselves to tall of this algebra? No idea. As soon as we got rid of all the \\(d\\) we could have just stopped simplifying and just went to the numerical root solving. But here we are.\nWe will try to locate a root for \\(Lb\\) for every \\(\\pi\\) for two rounds around the circle (until \\(4 \\pi\\))\u0026mdash;there is a solution for every \\(\\pi\\), if you don\u0026rsquo;t believe me, plot it or change the bottom to try to find it for every \\(\\frac{\\pi}{2}\\), sage will crash:\nintervals = [jj*pi for jj in range(0, 5)] intervals [0, pi, 2*pi, 3*pi, 4*pi] We will now declare \\(x=Lb\\), and create a nonlinear expression in it:\nx = var(\u0026#34;x\u0026#34;) characteristic_eqn = 1 + cos(x)*cosh(x) == 0 characteristic_eqn cos(x)*cosh(x) + 1 == 0 Root finding time!\ncharacteristic_solutions = [characteristic_eqn.find_root(i,j) for (i,j) in zip(intervals,intervals[1:])] characteristic_solutions [1.8751040687120917, 4.6940911329739246, 7.854757438237603, 10.995540734875457] These are possible \\(Lb\\) candidates.\nThe takeaway here is that:\n(4.6940911329739246/1.8751040687120917)^2 6.266893025769125 (see below\u0026rsquo;s derivation for why frequency changes by a square of this root)\nthe second overtone will be six and a quarter times (\u0026ldquo;much\u0026rdquo;) higher than the fundamental\u0026mdash;so it will be able to dissipate much quicker.\nRecall now that:\n\\begin{equation} b = \\sqrt{f} \\qty(\\frac{\\mu}{EI})^{\\frac{1}{4}} \\end{equation}\nSimplifying some:\n\\begin{align} b \u0026amp;= f^{\\frac{1}{2}} \\qty(\\frac{\\mu}{EI})^{\\frac{1}{4}} \\\\ \u0026amp;= \\qty(f^{2})^{\\frac{1}{4}}\\qty(\\frac{\\mu}{EI})^{\\frac{1}{4}} \\\\ \u0026amp;= \\qty(\\frac{\\mu f^{2}}{EI})^{\\frac{1}{4}} \\end{align}\nTo solve for \\(f\\), give all other expressions and set one of the above characteristic solutions to \\(Lb\\). Then, solve for \\(f\\).\nSolving for frequency to get things to be correct, substituting the fact that \\(bh \\rho = \\mu\\):\n\\begin{align} \u0026amp;Lb = s \\\\ \\Rightarrow\\ \u0026amp; L f^{\\frac{1}{2}} \\qty(\\frac{\\mu}{EI})^{\\frac{1}{4}} = s \\\\ \\Rightarrow\\ \u0026amp; f^{\\frac{1}{2}} = \\frac{s}{L} \\qty(\\frac{EI}{\\mu})^{\\frac{1}{4}} \\\\ \\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{EI}{\\mu})^{\\frac{1}{2}} \\\\ \\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{E(\\frac{bh^{3}}{12})}{\\mu})^{\\frac{1}{2}} \\\\ \\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{E(\\frac{bh^{3}}{12})}{\\rho bh})^{\\frac{1}{2}} \\\\ \\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{Eh^{2}}{12\\rho })^{\\frac{1}{2}} \\end{align}\n_E = 70000000000 # pascals _p = 2666 # kg/m^3 _h = 0.0064 # m # target LENGTH = 0.095 # mode to index nth_mode = 0 _s = characteristic_solutions[nth_mode] (_s^2/LENGTH^2)*((_E*(_h^2))/(12*_p))^(1/2) 3688.17772197722 Also, to get the constant for the elastic modulus from our force measurements, see calculating shear\u0026rsquo;s modulus.\nLet us create a code snippet to do that consistently:\n# constants https://www.mit.edu/~6.777/matprops/aluminum.htm _E = 44062894805 # modulus (pascals) _I = 0.0000000001365333333 # second moment (m^4) https://amesweb.info/section/second-moment-of-area-calculator.aspx _u = 3.521355063 # length mass density (kg/m) # target LENGTH = 0.09573 # length of tine (meters) # mode to index nth_mode = 0 # variable declaration # solution eqn solution_eqn = characteristic_solutions[nth_mode] == (LENGTH*(sqrt(f)*(_u/(_E*_I))^(1/4))) # as frequency is squared, we take the SECOND (the non-negative) result, and round it solve(solution_eqn, f)[0].rhs().n() 501.482272272831 ","html":"\u003cp\u003eHere\u0026rsquo;s the characteristic equation again:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2] x \\qty(EI \\pdv[2]{w}{x}) = -\\mu \\pdv{w}{t}+q(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAfter Fourier decomposition, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nEI \\dv[4]{\\hat{w}}{x} - \\mu f^{2}\\hat{w} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s solve this!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;E I u f\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;x L\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efunction\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;w\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_c0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;_C0 _C1 _C2 _C3\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efourier_cantileaver\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efourier_cantileaver\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-f^2*u*w(x) + E*I*diff(w(x), x, x) == 0\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd now, we can go about solving this result.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edesolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efourier_cantileaver\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eivar\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ealgorithm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;fricas\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexpand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\\begin{equation}\n_{C_{1}} e^{\\left(\\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{0}} e^{\\left(i \\, \\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{2}} e^{\\left(-i \\, \\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{3}} e^{\\left(-\\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will simplify the repeated, constant top of this expression into a single variable \\(b\\):\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;b\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etop\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esqrt\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c3\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e_C1*e^(b*x) + _C0*e^(I*b*x) + _C2*e^(-I*b*x) + _C3*e^(-b*x)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\\begin{equation}\n_{C_{1}} e^{\\left(b x\\right)} + _{C_{0}} e^{\\left(i \\, b x\\right)} + _{C_{2}} e^{\\left(-i \\, b x\\right)} + _{C_{3}} e^{\\left(-b x\\right)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have one equation, four unknowns. However, we are not yet done. We will make one more simplifying assumption\u0026mdash;try to get the \\(e^{x}\\) into sinusoidal form. We \u003cem\u003eknow\u003c/em\u003e this is supposed to oscillate, and it being in sinusoidal makes the process of solving for periodic solutions easier.\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\cosh x = \\frac{e^{x}+e^{-x}}{2} \\\\\n\\cos x = \\frac{e^{ix}+e^{-ix}}{2}\\\\\n\\sinh x = \\frac{e^{x}-e^{-x}}{2} \\\\\n\\sin x = \\frac{e^{ix}-e^{-ix}}{2i}\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWith a new set of scaling constants \\(d_0\\dots d_3\\), and some rearranging, we can rewrite the above expressions into just a linear combination of those elements. That is, the same expression for \\(w(x)\\) at a specific frequency can be written as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nd_0\\cosh bx +d_1\\sinh bx +d_2\\cos bx +d_3\\sin bx = w\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNo more imaginaries!!\u003c/p\u003e\n\u003cp\u003eSo, let us redefine the expression:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ed0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;d0 d1 d2 d3\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecosh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esinh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecos\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esin\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003ed2*cos(b*x) + d0*cosh(b*x) + d3*sin(b*x) + d1*sinh(b*x)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, we need to move onto solving when there will be valid solutions to this expression. However, we currently have four unknowns, and only one equation (at \\(x=0\\), \\(w=0\\), because the cantilever is fixed at base); so, to get a system in four elements, we will take some derivatives.\u003c/p\u003e\n\u003cp\u003eThe way that we will go about this is by taking three derivatives and supplying the following initial conditions to get four equations:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-11-10_13-38-40_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ewp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ewpp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ewppp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewpp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewppp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(b*d3*cos(b*x) + b*d1*cosh(b*x) - b*d2*sin(b*x) + b*d0*sinh(b*x),\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^2*d2*cos(b*x) + b^2*d0*cosh(b*x) - b^2*d3*sin(b*x) + b^2*d1*sinh(b*x),\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^3*d3*cos(b*x) + b^3*d1*cosh(b*x) + b^3*d2*sin(b*x) + b^3*d0*sinh(b*x))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd then, we have a system:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewpp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_4\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewppp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econds\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econd_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econd_2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econd_3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econd_4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econds\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(d0 + d2 == 0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e b*d1 + b*d3 == 0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^2*d2*cos(L*b) + b^2*d0*cosh(L*b) - b^2*d3*sin(L*b) + b^2*d1*sinh(L*b) == 0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^3*d3*cos(L*b) + b^3*d1*cosh(L*b) + b^3*d2*sin(L*b) + b^3*d0*sinh(L*b) == 0)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econds\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efull_simplify\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eOk so, we notice that out of all of these boundary expressions the \\(b^{n}\\) term drop out. Therefore, we have the system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nd_0 + d_2 = 0 \\\\\nd_1 + d_3 = 0 \\\\\n-d_2 \\cos Lb + d_0 \\cosh Lb - d_3 \\sin Lb + d_1 \\sinh Lb = 0 \\\\\n-d_3 \\cos Lb + d_1 \\cosh Lb + d_2 \\sin Lb + d_0 \\sinh Lb = 0 \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, taking the top expressions, we gather that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nd_0 = -d_2 \\\\\nd_1 = -d_3\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ePerforming these substitutions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nd_0 (\\cos Lb + \\cosh Lb) + d_1 (\\sin Lb + \\sinh Lb) = 0 \\\\\nd_1 (\\cos Lb + \\cosh Lb) + d_0 (\\sinh Lb- \\sin Lb ) = 0 \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow we are going to do some cursed algebra to get rid of all the rest of the \\(d\\). We want to do this because we don\u0026rsquo;t really care much \u003cem\u003ewhat\u003c/em\u003e the constants are; instead, we care about when a solution \u003cem\u003eexists\u003c/em\u003e (hopefully, then, telling us what the \\(f\\) baked inside \\(b\\) is). So:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;d_0 (\\cos Lb + \\cosh Lb) + d_1 (\\sin Lb + \\sinh Lb) = 0 \\\\\n\\Rightarrow\\ \u0026amp; \\frac{-d_0}{d_1} = \\frac{(\\sin Lb + \\sinh Lb)}{(\\cos Lb + \\cosh Lb)}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;d_1 (\\cos Lb + \\cosh Lb) + d_0 (\\sinh Lb- \\sin Lb ) = 0 \\\\\n\\Rightarrow\\ \u0026amp; \\frac{-d_0}{d_1} = \\frac{(\\cos Lb + \\cosh Lb)}{(\\sinh Lb- \\sin Lb )}\n\\end{align}\u003c/p\u003e\n\u003cp\u003etherefore, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{(\\sin Lb + \\sinh Lb)}{(\\cos Lb + \\cosh Lb)} = \\frac{(\\cos Lb + \\cosh Lb)}{(\\sinh Lb- \\sin Lb )}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMultiplying each side by the other:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(\\sin Lb + \\sinh Lb)(\\sinh Lb- \\sin Lb ) = (\\cos Lb + \\cosh Lb)^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eExpanding both sides now:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(\\sinh^{2} Lb-\\sin^{2} Lb) = (\\cos^{2} Lb + 2\\cos Lb\\ \\cosh Lb + \\cosh ^{2}Lb)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMoving everything finally to one side:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sinh^{2} Lb - \\cosh^{2} Lb -\\sin ^{2} Lb - \\cos ^{2}Lb - 2\\cos Lb \\cosh Lb = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOk, this is where the satisfying \u0026ldquo;candy crush\u0026rdquo; begins when things cancel out. Recall pythagoras:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\cosh^{2}x - \\sinh^{2} x = 1 \\\\\n\\sin^{2}x + \\cos^{2} x = 1\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo apply these effectively, multiply both sides by \\(1\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n-\\sinh^{2} Lb + \\cosh^{2} Lb +\\sin ^{2} Lb + \\cos ^{2}Lb + 2\\cos Lb \\cosh Lb = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, we substitute!\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; -\\sinh^{2} Lb + \\cosh^{2} Lb +\\sin ^{2} Lb + \\cos ^{2}Lb + 2\\cos Lb \\cosh Lb = 0 \\\\\n\\Rightarrow\\ \u0026amp; 1 + 1 + 2\\cos Lb \\cosh Lb = 0 \\\\\n\\Rightarrow\\ \u0026amp; 2 + 2\\cos Lb \\cosh Lb = 0 \\\\\n\\Rightarrow\\ \u0026amp; 1 + \\cos Lb \\cosh Lb = 0\n\\end{align}\u003c/p\u003e\n\u003cp\u003eOf course, there is oscillating results here. We will numerically locate them. Why did we subject ourselves to tall of this algebra? No idea. As soon as we got rid of all the \\(d\\) we could have just stopped simplifying and just went to the numerical root solving. But here we are.\u003c/p\u003e\n\u003cp\u003eWe will try to locate a root for \\(Lb\\) for every \\(\\pi\\) for two rounds around the circle (until \\(4 \\pi\\))\u0026mdash;there is a solution for every \\(\\pi\\), if you don\u0026rsquo;t believe me, plot it or change the bottom to try to find it for every \\(\\frac{\\pi}{2}\\), sage will crash:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ejj\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epi\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ejj\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[0, pi, 2*pi, 3*pi, 4*pi]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will now declare \\(x=Lb\\), and create a nonlinear expression in it:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;x\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_eqn\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecos\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecosh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_eqn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003ecos(x)*cosh(x) + 1 == 0\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRoot finding time!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_eqn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efind_root\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ezip\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:])]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1.8751040687120917, 4.6940911329739246, 7.854757438237603, 10.995540734875457]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThese are possible \\(Lb\\) candidates.\u003c/p\u003e\n\u003cp\u003eThe takeaway here is that:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4.6940911329739246\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1.8751040687120917\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e6.266893025769125\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e(see below\u0026rsquo;s derivation for why frequency changes by a \u003cem\u003esquare\u003c/em\u003e of this root)\u003c/p\u003e\n\u003cp\u003ethe second overtone will be six and a quarter times (\u0026ldquo;much\u0026rdquo;) higher than the fundamental\u0026mdash;so it will be able to dissipate much quicker.\u003c/p\u003e\n\u003cp\u003eRecall now that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb = \\sqrt{f} \\qty(\\frac{\\mu}{EI})^{\\frac{1}{4}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSimplifying some:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nb \u0026amp;= f^{\\frac{1}{2}} \\qty(\\frac{\\mu}{EI})^{\\frac{1}{4}} \\\\\n\u0026amp;= \\qty(f^{2})^{\\frac{1}{4}}\\qty(\\frac{\\mu}{EI})^{\\frac{1}{4}} \\\\\n\u0026amp;= \\qty(\\frac{\\mu f^{2}}{EI})^{\\frac{1}{4}}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eTo solve for \\(f\\), give all other expressions and set one of the above characteristic solutions to \\(Lb\\). Then, solve for \\(f\\).\u003c/p\u003e\n\u003cp\u003eSolving for frequency to get things to be correct, substituting the fact that \\(bh \\rho = \\mu\\):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;Lb = s \\\\\n\\Rightarrow\\ \u0026amp; L f^{\\frac{1}{2}} \\qty(\\frac{\\mu}{EI})^{\\frac{1}{4}} = s \\\\\n\\Rightarrow\\ \u0026amp; f^{\\frac{1}{2}} = \\frac{s}{L} \\qty(\\frac{EI}{\\mu})^{\\frac{1}{4}} \\\\\n\\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{EI}{\\mu})^{\\frac{1}{2}} \\\\\n\\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{E(\\frac{bh^{3}}{12})}{\\mu})^{\\frac{1}{2}} \\\\\n\\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{E(\\frac{bh^{3}}{12})}{\\rho bh})^{\\frac{1}{2}} \\\\\n\\Rightarrow\\ \u0026amp; f = \\frac{s^{2}}{L^{2}} \\qty(\\frac{Eh^{2}}{12\\rho })^{\\frac{1}{2}}\n\\end{align}\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_E\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e70000000000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# pascals\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_p\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2666\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# kg/m^3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_h\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.0064\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# m\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# target\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eLENGTH\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.095\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# mode to index\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_s\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_s\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLENGTH\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_E\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_h\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e12\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_p\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3688.17772197722\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAlso, to get the constant for the elastic modulus from our force measurements, see \u003ca href=\"/posts/kbhcalculating_shear_s_modulus/\"\u003ecalculating shear\u0026rsquo;s modulus.\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eLet us create a code snippet to do that consistently:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# constants https://www.mit.edu/~6.777/matprops/aluminum.htm\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_E\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e44062894805\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# modulus (pascals)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_I\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.0000000001365333333\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# second moment (m^4) https://amesweb.info/section/second-moment-of-area-calculator.aspx\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_u\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3.521355063\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# length mass density (kg/m)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# target\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eLENGTH\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.09573\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# length of tine (meters)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# mode to index\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# variable declaration\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# solution eqn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolution_eqn\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLENGTH\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esqrt\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_u\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_E\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_I\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# as frequency is squared, we take the SECOND (the non-negative) result, and round it\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esolution_eqn\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erhs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e501.482272272831\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhnumerical_cantileaver_simulations-1/","tags":null,"title":"Numerical Cantilever Simulations"},{"categories":null,"contents":"Here\u0026rsquo;s the characteristic equation again:\n\\begin{equation} \\pdv[2] x \\qty(EI \\pdv[2]{w}{x}) = -\\mu \\pdv{w}{t}+q(x) \\end{equation}\nAfter Fourier decomposition, we have that:\n\\begin{equation} EI \\dv[4]{\\hat{w}}{x} - \\mu f^{2}\\hat{w} = 0 \\end{equation}\nLet\u0026rsquo;s solve this!\nE,I,u,f = var(\u0026#34;E I u f\u0026#34;) x, L = var(\u0026#34;x L\u0026#34;) w = function(\u0026#39;w\u0026#39;)(x) _c0, _c1, _c2, _c3 = var(\u0026#34;_C0 _C1 _C2 _C3\u0026#34;) fourier_cantileaver = (E*I*diff(w, x, 2) - u*f^2*w == 0) fourier_cantileaver -f^2*u*w(x) + E*I*diff(w(x), x, x) == 0 And now, we can go about solving this result.\nsolution = desolve(fourier_cantileaver, w, ivar=x, algorithm=\u0026#34;fricas\u0026#34;).expand() w = solution \\begin{equation} _{C_{1}} e^{\\left(\\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{0}} e^{\\left(i \\, \\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{2}} e^{\\left(-i \\, \\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{3}} e^{\\left(-\\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} \\end{equation}\nWe will simplify the repeated, constant top of this expression into a single variable \\(b\\):\nb = var(\u0026#34;b\u0026#34;) top = sqrt(f)*(u/(E*I))**(1/4) w = _c1*e^(b*x) + _c0*e^(i*b*x) + _c2*e^(-i*b*x) + _c3*e^(-b*x) w _C1*e^(b*x) + _C0*e^(I*b*x) + _C2*e^(-I*b*x) + _C3*e^(-b*x) \\begin{equation} _{C_{1}} e^{\\left(b x\\right)} + _{C_{0}} e^{\\left(i \\, b x\\right)} + _{C_{2}} e^{\\left(-i \\, b x\\right)} + _{C_{3}} e^{\\left(-b x\\right)} \\end{equation}\nWe have one equation, four unknowns. However, we are not yet done. We will make one more simplifying assumption\u0026mdash;try to get the \\(e^{x}\\) into sinusoidal form. We know this is supposed to oscillate, and it being in sinusoidal makes the process of solving for periodic solutions easier.\nRecall that:\n\\begin{equation} \\begin{cases} \\cosh x = \\frac{e^{x}+e^{-x}}{2} \\\\ \\cos x = \\frac{e^{ix}+e^{-ix}}{2}\\\\ \\sinh x = \\frac{e^{x}-e^{-x}}{2} \\\\ \\sin x = \\frac{e^{ix}-e^{-ix}}{2i}\\\\ \\end{cases} \\end{equation}\nWith a new set of scaling constants \\(d_0\\dots d_3\\), and some rearranging, we can rewrite the above expressions into just a linear combination of those elements. That is, the same expression for \\(w(x)\\) at a specific frequency can be written as:\n\\begin{equation} d_0\\cosh bx +d_1\\sinh bx +d_2\\cos bx +d_3\\sin bx = w \\end{equation}\nNo more imaginaries!!\nSo, let us redefine the expression:\nd0, d1, d2, d3 = var(\u0026#34;d0 d1 d2 d3\u0026#34;) w = d0*cosh(b*x)+d1*sinh(b*x)+d2*cos(b*x)+d3*sin(b*x) w d2*cos(b*x) + d0*cosh(b*x) + d3*sin(b*x) + d1*sinh(b*x) Now, we need to move onto solving when there will be valid solutions to this expression. However, we currently have four unknowns, and only one equation (at \\(x=0\\), \\(w=0\\), because the cantilever is fixed at base); so, to get a system in four elements, we will take some derivatives.\nThe way that we will go about this is by taking three derivatives and supplying the following initial conditions to get four equations:\nwp = diff(w,x,1) wpp = diff(w,x,2) wppp = diff(w,x,3) (wp, wpp, wppp) (b*d3*cos(b*x) + b*d1*cosh(b*x) - b*d2*sin(b*x) + b*d0*sinh(b*x), -b^2*d2*cos(b*x) + b^2*d0*cosh(b*x) - b^2*d3*sin(b*x) + b^2*d1*sinh(b*x), -b^3*d3*cos(b*x) + b^3*d1*cosh(b*x) + b^3*d2*sin(b*x) + b^3*d0*sinh(b*x)) And then, we have a system:\ncond_1 = w.subs(x=0) == 0 cond_2 = wp.subs(x=0) == 0 cond_3 = wpp.subs(x=L) == 0 cond_4 = wppp.subs(x=L) == 0 conds = (cond_1, cond_2, cond_3, cond_4) conds (d0 + d2 == 0, b*d1 + b*d3 == 0, -b^2*d2*cos(L*b) + b^2*d0*cosh(L*b) - b^2*d3*sin(L*b) + b^2*d1*sinh(L*b) == 0, -b^3*d3*cos(L*b) + b^3*d1*cosh(L*b) + b^3*d2*sin(L*b) + b^3*d0*sinh(L*b) == 0) solve(conds, d0, d1, d2, d3).full_simplify() Ok so, we notice that out of all of these boundary expressions the \\(b^{n}\\) term drop out. Therefore, we have the system:\n\\begin{equation} \\begin{cases} d_0 + d_2 = 0 \\\\ d_1 + d_3 = 0 \\\\ -d_2 \\cos Lb + d_0 \\cosh Lb - d_3 \\sin Lb + d_1 \\sinh Lb = 0 \\\\ -d_3 \\cos Lb + d_1 \\cosh Lb + d_2 \\sin Lb + d_0 \\sinh Lb = 0 \\\\ \\end{cases} \\end{equation}\nNow, taking the top expressions, we gather that:\n\\begin{equation} \\begin{cases} d_0 = -d_2 \\\\ d_1 = -d_3 \\end{cases} \\end{equation}\nPerforming these substitutions:\n\\begin{equation} \\begin{cases} d_0 (\\cos Lb + \\cosh Lb) + d_1 (\\sin Lb + \\sinh Lb) = 0 \\\\ d_1 (\\cos Lb + \\cosh Lb) + d_0 (\\sinh Lb- \\sin Lb ) = 0 \\\\ \\end{cases} \\end{equation}\nNow we are going to do some cursed algebra to get rid of all the rest of the \\(d\\). We want to do this because we don\u0026rsquo;t really care much what the constants are; instead, we care about when a solution exists (hopefully, then, telling us what the \\(f\\) baked inside \\(b\\) is). So:\n\\begin{align} \u0026amp;d_0 (\\cos Lb + \\cosh Lb) + d_1 (\\sin Lb + \\sinh Lb) = 0 \\\\ \\Rightarrow\\ \u0026amp; \\frac{-d_0}{d_1} = \\frac{(\\sin Lb + \\sinh Lb)}{(\\cos Lb + \\cosh Lb)} \\end{align}\nand\n\\begin{align} \u0026amp;d_1 (\\cos Lb + \\cosh Lb) + d_0 (\\sinh Lb- \\sin Lb ) = 0 \\\\ \\Rightarrow\\ \u0026amp; \\frac{-d_0}{d_1} = \\frac{(\\cos Lb + \\cosh Lb)}{(\\sinh Lb- \\sin Lb )} \\end{align}\ntherefore, we have:\n\\begin{equation} \\frac{(\\sin Lb + \\sinh Lb)}{(\\cos Lb + \\cosh Lb)} = \\frac{(\\cos Lb + \\cosh Lb)}{(\\sinh Lb- \\sin Lb )} \\end{equation}\nMultiplying each side by the other:\n\\begin{equation} (\\sin Lb + \\sinh Lb)(\\sinh Lb- \\sin Lb ) = (\\cos Lb + \\cosh Lb)^{2} \\end{equation}\nExpanding both sides now:\n\\begin{equation} (\\sinh^{2} Lb-\\sin^{2} Lb) = (\\cos^{2} Lb + 2\\cos Lb\\ \\cosh Lb + \\cosh ^{2}Lb) \\end{equation}\nMoving everything finally to one side:\n\\begin{equation} \\sinh^{2} Lb - \\cosh^{2} Lb -\\sin ^{2} Lb - \\cos ^{2}Lb - 2\\cos Lb \\cosh Lb = 0 \\end{equation}\nOk, this is where the satisfying \u0026ldquo;candy crush\u0026rdquo; begins when things cancel out. Recall pythagoras:\n\\begin{equation} \\begin{cases} \\cosh^{2}x - \\sinh^{2} x = 1 \\\\ \\sin^{2}x + \\cos^{2} x = 1 \\end{cases} \\end{equation}\nTo apply these effectively, multiply both sides by \\(1\\):\n\\begin{equation} -\\sinh^{2} Lb + \\cosh^{2} Lb +\\sin ^{2} Lb + \\cos ^{2}Lb + 2\\cos Lb \\cosh Lb = 0 \\end{equation}\nFinally, we substitute!\n\\begin{align} \u0026amp; -\\sinh^{2} Lb + \\cosh^{2} Lb +\\sin ^{2} Lb + \\cos ^{2}Lb + 2\\cos Lb \\cosh Lb = 0 \\\\ \\Rightarrow\\ \u0026amp; 1 + 1 + 2\\cos Lb \\cosh Lb = 0 \\\\ \\Rightarrow\\ \u0026amp; 2 + 2\\cos Lb \\cosh Lb = 0 \\\\ \\Rightarrow\\ \u0026amp; 1 + \\cos Lb \\cosh Lb = 0 \\end{align}\nOf course, there is oscillating results here. We will numerically locate them. Why did we subject ourselves to tall of this algebra? No idea. As soon as we got rid of all the \\(d\\) we could have just stopped simplifying and just went to the numerical root solving. But here we are.\nWe will try to locate a root for \\(Lb\\) for every \\(\\pi\\) for two rounds around the circle (until \\(4 \\pi\\))\u0026mdash;there is a solution for every \\(\\pi\\), if you don\u0026rsquo;t believe me, plot it or change the bottom to try to find it for every \\(\\frac{\\pi}{2}\\), sage will crash:\nintervals = [jj*pi for jj in range(0, 5)] intervals [0, pi, 2*pi, 3*pi, 4*pi] We will now declare \\(x=Lb\\), and create a nonlinear expression in it:\nx = var(\u0026#34;x\u0026#34;) characteristic_eqn = 1 + cos(x)*cosh(x) == 0 characteristic_eqn cos(x)*cosh(x) + 1 == 0 Root finding time!\ncharacteristic_solutions = [characteristic_eqn.find_root(i,j) for (i,j) in zip(intervals,intervals[1:])] characteristic_solutions [1.8751040687120917, 4.6940911329739246, 7.854757438237603, 10.995540734875457] These are possible \\(Lb\\) candidates.\nThe takeaway here is that:\n(4.6940911329739246/1.8751040687120917)^2 6.266893025769125 (see below\u0026rsquo;s derivation for why frequency changes by a square of this root)\nthe second overtone will be six and a quarter times (\u0026ldquo;much\u0026rdquo;) higher than the fundamental\u0026mdash;so it will be able to dissipate much quicker.\n\\begin{equation} \\sqrt{f}\\qty(\\frac{u}{EI})^{\\frac{1}{4}} L = s \\end{equation}\n\\begin{equation} \\sqrt{f} = \\frac{s}{L} \\qty(\\frac{EI}{u})^{\\frac{1}{4}} \\end{equation}\n_E = 70000000000 # pascals _p = 2766 # kg/m^3 _h = 0.006 # m _I = 0.0000000001302083333 # m^4 _u = 0.10388 # kg/m, approximate # target LENGTH = 0.095 # mode to index nth_mode = 0 _s = characteristic_solutions[nth_mode] ((3.5160)/(LENGTH^2))*((_E*_I)/_u)^(1/2) 3649.25402142506 _E = 70000000000 # pascals _p = 2766 # kg/m^3 _h = 0.006 # m # target LENGTH = 0.095 # mode to index nth_mode = 0 _s = characteristic_solutions[nth_mode] (_s^2/LENGTH^2)*((_E*(_h^2))/(12*_p))^(1/2) 3394.58823149786 ","html":"\u003cp\u003eHere\u0026rsquo;s the characteristic equation again:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2] x \\qty(EI \\pdv[2]{w}{x}) = -\\mu \\pdv{w}{t}+q(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAfter Fourier decomposition, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nEI \\dv[4]{\\hat{w}}{x} - \\mu f^{2}\\hat{w} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s solve this!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;E I u f\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;x L\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efunction\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;w\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_c0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;_C0 _C1 _C2 _C3\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efourier_cantileaver\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efourier_cantileaver\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-f^2*u*w(x) + E*I*diff(w(x), x, x) == 0\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd now, we can go about solving this result.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edesolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efourier_cantileaver\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eivar\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ealgorithm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;fricas\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexpand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\\begin{equation}\n_{C_{1}} e^{\\left(\\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{0}} e^{\\left(i \\, \\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{2}} e^{\\left(-i \\, \\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)} + _{C_{3}} e^{\\left(-\\sqrt{f} x \\left(\\frac{u}{E I}\\right)^{\\frac{1}{4}}\\right)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will simplify the repeated, constant top of this expression into a single variable \\(b\\):\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;b\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etop\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esqrt\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_c3\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e_C1*e^(b*x) + _C0*e^(I*b*x) + _C2*e^(-I*b*x) + _C3*e^(-b*x)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\\begin{equation}\n_{C_{1}} e^{\\left(b x\\right)} + _{C_{0}} e^{\\left(i \\, b x\\right)} + _{C_{2}} e^{\\left(-i \\, b x\\right)} + _{C_{3}} e^{\\left(-b x\\right)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have one equation, four unknowns. However, we are not yet done. We will make one more simplifying assumption\u0026mdash;try to get the \\(e^{x}\\) into sinusoidal form. We \u003cem\u003eknow\u003c/em\u003e this is supposed to oscillate, and it being in sinusoidal makes the process of solving for periodic solutions easier.\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\cosh x = \\frac{e^{x}+e^{-x}}{2} \\\\\n\\cos x = \\frac{e^{ix}+e^{-ix}}{2}\\\\\n\\sinh x = \\frac{e^{x}-e^{-x}}{2} \\\\\n\\sin x = \\frac{e^{ix}-e^{-ix}}{2i}\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWith a new set of scaling constants \\(d_0\\dots d_3\\), and some rearranging, we can rewrite the above expressions into just a linear combination of those elements. That is, the same expression for \\(w(x)\\) at a specific frequency can be written as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nd_0\\cosh bx +d_1\\sinh bx +d_2\\cos bx +d_3\\sin bx = w\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNo more imaginaries!!\u003c/p\u003e\n\u003cp\u003eSo, let us redefine the expression:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ed0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;d0 d1 d2 d3\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecosh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esinh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecos\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esin\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003ed2*cos(b*x) + d0*cosh(b*x) + d3*sin(b*x) + d1*sinh(b*x)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, we need to move onto solving when there will be valid solutions to this expression. However, we currently have four unknowns, and only one equation (at \\(x=0\\), \\(w=0\\), because the cantilever is fixed at base); so, to get a system in four elements, we will take some derivatives.\u003c/p\u003e\n\u003cp\u003eThe way that we will go about this is by taking three derivatives and supplying the following initial conditions to get four equations:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-11-10_13-38-40_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ewp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ewpp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ewppp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewpp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewppp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(b*d3*cos(b*x) + b*d1*cosh(b*x) - b*d2*sin(b*x) + b*d0*sinh(b*x),\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^2*d2*cos(b*x) + b^2*d0*cosh(b*x) - b^2*d3*sin(b*x) + b^2*d1*sinh(b*x),\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^3*d3*cos(b*x) + b^3*d1*cosh(b*x) + b^3*d2*sin(b*x) + b^3*d0*sinh(b*x))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd then, we have a system:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewpp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econd_4\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewppp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econds\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econd_1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econd_2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econd_3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econd_4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econds\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(d0 + d2 == 0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e b*d1 + b*d3 == 0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^2*d2*cos(L*b) + b^2*d0*cosh(L*b) - b^2*d3*sin(L*b) + b^2*d1*sinh(L*b) == 0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -b^3*d3*cos(L*b) + b^3*d1*cosh(L*b) + b^3*d2*sin(L*b) + b^3*d0*sinh(L*b) == 0)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econds\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efull_simplify\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eOk so, we notice that out of all of these boundary expressions the \\(b^{n}\\) term drop out. Therefore, we have the system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nd_0 + d_2 = 0 \\\\\nd_1 + d_3 = 0 \\\\\n-d_2 \\cos Lb + d_0 \\cosh Lb - d_3 \\sin Lb + d_1 \\sinh Lb = 0 \\\\\n-d_3 \\cos Lb + d_1 \\cosh Lb + d_2 \\sin Lb + d_0 \\sinh Lb = 0 \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, taking the top expressions, we gather that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nd_0 = -d_2 \\\\\nd_1 = -d_3\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ePerforming these substitutions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nd_0 (\\cos Lb + \\cosh Lb) + d_1 (\\sin Lb + \\sinh Lb) = 0 \\\\\nd_1 (\\cos Lb + \\cosh Lb) + d_0 (\\sinh Lb- \\sin Lb ) = 0 \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow we are going to do some cursed algebra to get rid of all the rest of the \\(d\\). We want to do this because we don\u0026rsquo;t really care much \u003cem\u003ewhat\u003c/em\u003e the constants are; instead, we care about when a solution \u003cem\u003eexists\u003c/em\u003e (hopefully, then, telling us what the \\(f\\) baked inside \\(b\\) is). So:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;d_0 (\\cos Lb + \\cosh Lb) + d_1 (\\sin Lb + \\sinh Lb) = 0 \\\\\n\\Rightarrow\\ \u0026amp; \\frac{-d_0}{d_1} = \\frac{(\\sin Lb + \\sinh Lb)}{(\\cos Lb + \\cosh Lb)}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;d_1 (\\cos Lb + \\cosh Lb) + d_0 (\\sinh Lb- \\sin Lb ) = 0 \\\\\n\\Rightarrow\\ \u0026amp; \\frac{-d_0}{d_1} = \\frac{(\\cos Lb + \\cosh Lb)}{(\\sinh Lb- \\sin Lb )}\n\\end{align}\u003c/p\u003e\n\u003cp\u003etherefore, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{(\\sin Lb + \\sinh Lb)}{(\\cos Lb + \\cosh Lb)} = \\frac{(\\cos Lb + \\cosh Lb)}{(\\sinh Lb- \\sin Lb )}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMultiplying each side by the other:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(\\sin Lb + \\sinh Lb)(\\sinh Lb- \\sin Lb ) = (\\cos Lb + \\cosh Lb)^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eExpanding both sides now:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(\\sinh^{2} Lb-\\sin^{2} Lb) = (\\cos^{2} Lb + 2\\cos Lb\\ \\cosh Lb + \\cosh ^{2}Lb)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMoving everything finally to one side:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sinh^{2} Lb - \\cosh^{2} Lb -\\sin ^{2} Lb - \\cos ^{2}Lb - 2\\cos Lb \\cosh Lb = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOk, this is where the satisfying \u0026ldquo;candy crush\u0026rdquo; begins when things cancel out. Recall pythagoras:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\cosh^{2}x - \\sinh^{2} x = 1 \\\\\n\\sin^{2}x + \\cos^{2} x = 1\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo apply these effectively, multiply both sides by \\(1\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n-\\sinh^{2} Lb + \\cosh^{2} Lb +\\sin ^{2} Lb + \\cos ^{2}Lb + 2\\cos Lb \\cosh Lb = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, we substitute!\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; -\\sinh^{2} Lb + \\cosh^{2} Lb +\\sin ^{2} Lb + \\cos ^{2}Lb + 2\\cos Lb \\cosh Lb = 0 \\\\\n\\Rightarrow\\ \u0026amp; 1 + 1 + 2\\cos Lb \\cosh Lb = 0 \\\\\n\\Rightarrow\\ \u0026amp; 2 + 2\\cos Lb \\cosh Lb = 0 \\\\\n\\Rightarrow\\ \u0026amp; 1 + \\cos Lb \\cosh Lb = 0\n\\end{align}\u003c/p\u003e\n\u003cp\u003eOf course, there is oscillating results here. We will numerically locate them. Why did we subject ourselves to tall of this algebra? No idea. As soon as we got rid of all the \\(d\\) we could have just stopped simplifying and just went to the numerical root solving. But here we are.\u003c/p\u003e\n\u003cp\u003eWe will try to locate a root for \\(Lb\\) for every \\(\\pi\\) for two rounds around the circle (until \\(4 \\pi\\))\u0026mdash;there is a solution for every \\(\\pi\\), if you don\u0026rsquo;t believe me, plot it or change the bottom to try to find it for every \\(\\frac{\\pi}{2}\\), sage will crash:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ejj\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epi\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ejj\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e5\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[0, pi, 2*pi, 3*pi, 4*pi]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will now declare \\(x=Lb\\), and create a nonlinear expression in it:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;x\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_eqn\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecos\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecosh\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_eqn\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003ecos(x)*cosh(x) + 1 == 0\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRoot finding time!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_eqn\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efind_root\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ezip\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eintervals\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:])]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1.8751040687120917, 4.6940911329739246, 7.854757438237603, 10.995540734875457]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThese are possible \\(Lb\\) candidates.\u003c/p\u003e\n\u003cp\u003eThe takeaway here is that:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4.6940911329739246\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1.8751040687120917\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e6.266893025769125\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e(see below\u0026rsquo;s derivation for why frequency changes by a \u003cem\u003esquare\u003c/em\u003e of this root)\u003c/p\u003e\n\u003cp\u003ethe second overtone will be six and a quarter times (\u0026ldquo;much\u0026rdquo;) higher than the fundamental\u0026mdash;so it will be able to dissipate much quicker.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sqrt{f}\\qty(\\frac{u}{EI})^{\\frac{1}{4}} L = s\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sqrt{f} = \\frac{s}{L} \\qty(\\frac{EI}{u})^{\\frac{1}{4}}\n\\end{equation}\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_E\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e70000000000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# pascals\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_p\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2766\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# kg/m^3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_h\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.006\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# m\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_I\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.0000000001302083333\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# m^4\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_u\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.10388\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# kg/m, approximate\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# target\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eLENGTH\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.095\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# mode to index\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_s\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3.5160\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLENGTH\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_E\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_I\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_u\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3649.25402142506\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_E\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e70000000000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# pascals\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_p\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2766\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# kg/m^3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_h\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.006\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# m\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# target\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eLENGTH\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.095\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# mode to index\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_s\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003echaracteristic_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enth_mode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_s\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLENGTH\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_E\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_h\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e12\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_p\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3394.58823149786\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhnumerical_cantileaver_simulations/","tags":null,"title":"Numerical Cantilever Simulations"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhnus_econ320_capm_problem_set/","tags":null,"title":"NUS-ECON320 CAPM Problem Set"},{"categories":null,"contents":"Let\u0026rsquo;s import some tools.\nimport pandas as pd from scipy.optimize import minimize import numpy as np from datetime import datetime from tqdm import tqdm import torch tqdm.pandas() And load our data:\ndf = pd.read_csv(\u0026#34;./currency_signal.csv\u0026#34;, index_col=0, header=None, parse_dates=[0]) df Let\u0026rsquo;s rename the headers\ndf.index.rename(\u0026#34;date\u0026#34;, True) df.columns = [\u0026#34;value\u0026#34;] Awesome. For the rest of the calculations, we will hide the 2020 data from the model:\ndata = df[df.index \u0026lt; datetime(2020, 1,1)] data value date 2006-03-01 0.000050 2006-03-02 0.001778 2006-03-03 0.000116 2006-03-06 -0.001038 2006-03-07 -0.001197 ... ... 2019-12-25 -0.010659 2019-12-26 -0.000869 2019-12-27 0.000075 2019-12-30 0.000033 2019-12-31 0.000944 [3610 rows x 1 columns] we will add a column of randomness to this, to serve as the seed of our epsilon:\ndata[\u0026#34;epsilon\u0026#34;] = np.random.normal(0,1, data.shape[0]) data value epsilon date 2006-03-01 0.000050 -0.255699 2006-03-02 0.001778 0.157341 2006-03-03 0.000116 0.574378 2006-03-06 -0.001038 -1.319365 2006-03-07 -0.001197 -0.717148 ... ... ... 2019-12-25 -0.010659 0.153559 2019-12-26 -0.000869 -1.066562 2019-12-27 0.000075 0.025730 2019-12-30 0.000033 0.760713 2019-12-31 0.000944 -0.427494 [3610 rows x 2 columns] Awesome, we will now seed three parameter variables. Recall that the GARCH model we are dealing with is:\n\\begin{equation} \\begin{cases} \\eta_t = \\sigma_{t}\\epsilon_{t} \\\\ {\\sigma_{t}}^{2} = \\alpha {\\eta_{t}}^{2} + \\beta {\\sigma_{t-1}}^{2} + \\gamma \\end{cases} \\end{equation}\nSolving for explicit solutions of \\(n_t\\) and \\(\\sigma_t\\), in terms of the others using computer algebra, we have:\n\\begin{equation} \\sigma_{t}^{2} = -\\frac{\\beta \\mathit{\\sigma_{t-1}}^{2} + y}{\\alpha \\epsilon^{2} - 1} \\end{equation}\nThe value of \\(\\eta_t\\) is naturally \\(\\sigma_t \\epsilon_t\\) (i.e. \\(\\eta^{2} = (\\sigma_{t})^{2}(\\epsilon_{t})^{2}\\)).\nSo, to make the squared results, we want to square both value and epsilon:\ndata[\u0026#34;value2\u0026#34;] = data.value**2 data[\u0026#34;epsilon2\u0026#34;] = data.epsilon**2 data value epsilon value2 epsilon2 date 2006-03-01 0.000050 -0.255699 2.450633e-09 0.065382 2006-03-02 0.001778 0.157341 3.162006e-06 0.024756 2006-03-03 0.000116 0.574378 1.334210e-08 0.329910 2006-03-06 -0.001038 -1.319365 1.076978e-06 1.740723 2006-03-07 -0.001197 -0.717148 1.432477e-06 0.514301 ... ... ... ... ... 2019-12-25 -0.010659 0.153559 1.136119e-04 0.023580 2019-12-26 -0.000869 -1.066562 7.549935e-07 1.137555 2019-12-27 0.000075 0.025730 5.670657e-09 0.000662 2019-12-30 0.000033 0.760713 1.083948e-09 0.578684 2019-12-31 0.000944 -0.427494 8.913486e-07 0.182751 [3610 rows x 4 columns] Now, we can now compute a column of these, based on the data we have. To be able to optimize this symbolically, we will leverage PyTorch.\nLet\u0026rsquo;s seed these constants all at \\(1\\), to be optimized later:\na = torch.tensor(1e-10, requires_grad=True) b = torch.tensor(1e-10, requires_grad=True) y = torch.tensor(1e-10, requires_grad=True) (a,b,y) (tensor(1.0000e-10, requires_grad=True), tensor(1.0000e-10, requires_grad=True), tensor(1.0000e-10, requires_grad=True)) We use the complex data type here to make the subtract operation work. We will eventually project it down to real space without much trouble.\nAwesome, let us compute this series of \\(\\sigma\\), and optimize for the loss.\nHere is a gradient descent optimizer:\n# we will use the gradient descent scheme optimizer = torch.optim.SGD([a,b,y], lr=3e-3) optimizer SGD ( Parameter Group 0 dampening: 0 differentiable: False foreach: None lr: 0.003 maximize: False momentum: 0 nesterov: False weight_decay: 0 ) And now, for 1000 steps, we will minimize the difference between the computed \\(n\\) and actual value against \\(\\alpha, \\beta, \\gamma\\). We will run the scheme for 50 steps.\nfor _ in tqdm(range(500)): prev_sigma_2 = 0 # # for each row for i in range(len(data)): # get previous value, or seed at 0 # if it doesn\u0026#39;t exist sigma_2 = (-(b*prev_sigma_2+y)/(a*data[\u0026#34;epsilon2\u0026#34;].iloc[i]-1)) n_2 = sigma_2*data[\u0026#34;epsilon2\u0026#34;].iloc[i] ((n_2-data[\u0026#34;value2\u0026#34;].iloc[i])**2).backward() prev_sigma_2 = sigma_2.detach() optimizer.step() optimizer.zero_grad() Awesome, now, let\u0026rsquo;s see the fitted results:\n(a,b,y) (tensor(611584.9375, requires_grad=True), tensor(37750.6133, requires_grad=True), tensor(-26.5902, requires_grad=True)) We will now work to validate these results in the entire dataset.\ndata_val = df.copy() data_val value date 2006-03-01 0.000050 2006-03-02 0.001778 2006-03-03 0.000116 2006-03-06 -0.001038 2006-03-07 -0.001197 ... ... 2020-05-18 0.000264 2020-05-19 0.001434 2020-05-20 0.000995 2020-05-21 0.000120 2020-05-22 0.000424 [3713 rows x 1 columns] Now, we will use these values to compute the variance and the predicted variance on the data.\nRecall that:\n\\begin{equation} \\sigma_{t}^{2} = -\\frac{\\beta \\mathit{\\sigma_{t-1}}^{2} + y}{\\alpha \\epsilon^{2} - 1} \\end{equation}\nSo:\ndata_val[\u0026#34;epsilon\u0026#34;] = np.random.normal(0,1, data_val.shape[0]) data_val value epsilon date 2006-03-01 0.000050 0.018859 2006-03-02 0.001778 1.943619 2006-03-03 0.000116 0.397312 2006-03-06 -0.001038 1.025379 2006-03-07 -0.001197 0.081920 ... ... ... 2020-05-18 0.000264 -0.976598 2020-05-19 0.001434 -0.357048 2020-05-20 0.000995 -1.230387 2020-05-21 0.000120 0.972614 2020-05-22 0.000424 -0.199802 [3713 rows x 2 columns] Now, we will generate a column of sigma squared\nfor i, date in enumerate(data_val.index): prev_sigma_2 = 0 sigma_2 = (-(b*prev_sigma_2+y)/(a*(data_val[\u0026#34;epsilon\u0026#34;]**2).iloc[i]-1)).detach().numpy() # get previous value, or seed at 0 # if it doesn\u0026#39;t exist data_val.loc[date, \u0026#34;sigma\u0026#34;] = sigma_2**0.5 prev_sigma_2 = sigma_2 data_val value epsilon sigma date 2006-03-01 0.000050 0.018859 0.350442 2006-03-02 0.001778 1.943619 0.003393 2006-03-03 0.000116 0.397312 0.016596 2006-03-06 -0.001038 1.025379 0.006431 2006-03-07 -0.001197 0.081920 0.080500 ... ... ... ... 2020-05-18 0.000264 -0.976598 0.006752 2020-05-19 0.001434 -0.357048 0.018468 2020-05-20 0.000995 -1.230387 0.005359 2020-05-21 0.000120 0.972614 0.006779 2020-05-22 0.000424 -0.199802 0.033002 [3713 rows x 3 columns] And finally, let us generate the eta column:\nRecall that \\(\\eta_t = \\sigma_{t}\\epsilon_{t}\\), so:\ndata_val[\u0026#34;eta\u0026#34;] = data_val.sigma * data_val.epsilon data_val value epsilon sigma eta date 2006-03-01 0.000050 0.018859 0.350442 0.006609 2006-03-02 0.001778 1.943619 0.003393 0.006594 2006-03-03 0.000116 0.397312 0.016596 0.006594 2006-03-06 -0.001038 1.025379 0.006431 0.006594 2006-03-07 -0.001197 0.081920 0.080500 0.006595 ... ... ... ... ... 2020-05-18 0.000264 -0.976598 0.006752 -0.006594 2020-05-19 0.001434 -0.357048 0.018468 -0.006594 2020-05-20 0.000995 -1.230387 0.005359 -0.006594 2020-05-21 0.000120 0.972614 0.006779 0.006594 2020-05-22 0.000424 -0.199802 0.033002 -0.006594 [3713 rows x 4 columns] And finally, let us compute the log loss:\ndata_val[\u0026#34;loss\u0026#34;] = (data_val.eta-data_val.value).abs() data_val value epsilon sigma eta loss date 2006-03-01 0.000050 0.018859 0.350442 0.006609 0.006559 2006-03-02 0.001778 1.943619 0.003393 0.006594 0.004816 2006-03-03 0.000116 0.397312 0.016596 0.006594 0.006478 2006-03-06 -0.001038 1.025379 0.006431 0.006594 0.007632 2006-03-07 -0.001197 0.081920 0.080500 0.006595 0.007791 ... ... ... ... ... ... 2020-05-18 0.000264 -0.976598 0.006752 -0.006594 0.006857 2020-05-19 0.001434 -0.357048 0.018468 -0.006594 0.008028 2020-05-20 0.000995 -1.230387 0.005359 -0.006594 0.007589 2020-05-21 0.000120 0.972614 0.006779 0.006594 0.006474 2020-05-22 0.000424 -0.199802 0.033002 -0.006594 0.007018 [3713 rows x 5 columns] Saving the data:\ndata_val.to_csv(\u0026#34;currency_arbitrage.csv\u0026#34;) ","html":"\u003cp\u003eLet\u0026rsquo;s import some tools.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epandas\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003escipy.optimize\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eminimize\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enumpy\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatetime\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatetime\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etqdm\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etqdm\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etqdm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epandas\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd load our data:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_csv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;./currency_signal.csv\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eindex_col\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eheader\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eNone\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eparse_dates\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s rename the headers\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eindex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erename\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;date\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;value\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAwesome. For the rest of the calculations, we will hide the 2020 data from the model:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eindex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatetime\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2020\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e value\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edate\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-01 0.000050\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-02 0.001778\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-03 0.000116\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-06 -0.001038\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-07 -0.001197\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-25 -0.010659\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-26 -0.000869\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-27 0.000075\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-30 0.000033\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-31 0.000944\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[3610 rows x 1 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ewe will add a column of randomness to this, to serve as the seed of our epsilon:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;epsilon\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erandom\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enormal\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshape\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e value epsilon\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edate\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-01 0.000050 -0.255699\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-02 0.001778 0.157341\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-03 0.000116 0.574378\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-06 -0.001038 -1.319365\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-07 -0.001197 -0.717148\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-25 -0.010659 0.153559\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-26 -0.000869 -1.066562\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-27 0.000075 0.025730\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-30 0.000033 0.760713\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-31 0.000944 -0.427494\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[3610 rows x 2 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAwesome, we will now seed three parameter variables. Recall that the GARCH model we are dealing with is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\eta_t = \\sigma_{t}\\epsilon_{t} \\\\\n{\\sigma_{t}}^{2} = \\alpha {\\eta_{t}}^{2} + \\beta {\\sigma_{t-1}}^{2} + \\gamma\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSolving for explicit solutions of \\(n_t\\) and \\(\\sigma_t\\), in terms of the others using computer algebra, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sigma_{t}^{2} = -\\frac{\\beta \\mathit{\\sigma_{t-1}}^{2} + y}{\\alpha \\epsilon^{2} - 1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe value of \\(\\eta_t\\) is naturally \\(\\sigma_t \\epsilon_t\\) (i.e. \\(\\eta^{2} = (\\sigma_{t})^{2}(\\epsilon_{t})^{2}\\)).\u003c/p\u003e\n\u003cp\u003eSo, to make the squared results, we want to square both value and epsilon:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;value2\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evalue\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;epsilon2\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eepsilon\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e value epsilon value2 epsilon2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edate\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-01 0.000050 -0.255699 2.450633e-09 0.065382\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-02 0.001778 0.157341 3.162006e-06 0.024756\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-03 0.000116 0.574378 1.334210e-08 0.329910\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-06 -0.001038 -1.319365 1.076978e-06 1.740723\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-07 -0.001197 -0.717148 1.432477e-06 0.514301\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-25 -0.010659 0.153559 1.136119e-04 0.023580\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-26 -0.000869 -1.066562 7.549935e-07 1.137555\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-27 0.000075 0.025730 5.670657e-09 0.000662\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-30 0.000033 0.760713 1.083948e-09 0.578684\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2019-12-31 0.000944 -0.427494 8.913486e-07 0.182751\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[3610 rows x 4 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, we can now compute a column of these, based on the data we have. To be able to optimize this symbolically, we will leverage PyTorch.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s seed these constants all at \\(1\\), to be optimized later:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1e-10\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erequires_grad\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1e-10\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erequires_grad\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etensor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1e-10\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erequires_grad\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(tensor(1.0000e-10, requires_grad=True), tensor(1.0000e-10, requires_grad=True), tensor(1.0000e-10, requires_grad=True))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe use the complex data type here to make the subtract operation work. We will eventually project it down to real space without much trouble.\u003c/p\u003e\n\u003cp\u003eAwesome, let us compute this series of \\(\\sigma\\), and optimize for the loss.\u003c/p\u003e\n\u003cp\u003eHere is a gradient descent optimizer:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# we will use the gradient descent scheme\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptimizer\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etorch\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eoptim\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSGD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3e-3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptimizer\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSGD (\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eParameter Group 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e dampening: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e differentiable: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e foreach: None\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e lr: 0.003\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e maximize: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e momentum: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nesterov: False\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e weight_decay: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd now, for 1000 steps, we will minimize the difference between the computed \\(n\\) and actual value against \\(\\alpha, \\beta, \\gamma\\). We will run the scheme for 50 steps.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etqdm\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e500\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eprev_sigma_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# # for each row\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# get previous value, or seed at 0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# if it doesn\u0026#39;t exist\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esigma_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprev_sigma_2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;epsilon2\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003en_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esigma_2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;epsilon2\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en_2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;value2\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebackward\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eprev_sigma_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esigma_2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edetach\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eoptimizer\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estep\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eoptimizer\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ezero_grad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAwesome, now, let\u0026rsquo;s see the fitted results:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(tensor(611584.9375, requires_grad=True), tensor(37750.6133, requires_grad=True), tensor(-26.5902, requires_grad=True))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will now work to validate these results in the entire dataset.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecopy\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e value\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edate\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-01 0.000050\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-02 0.001778\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-03 0.000116\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-06 -0.001038\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-07 -0.001197\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-18 0.000264\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-19 0.001434\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-20 0.000995\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-21 0.000120\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-22 0.000424\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[3713 rows x 1 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, we will use these values to compute the variance and the predicted variance on the data.\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sigma_{t}^{2} = -\\frac{\\beta \\mathit{\\sigma_{t-1}}^{2} + y}{\\alpha \\epsilon^{2} - 1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;epsilon\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erandom\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enormal\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshape\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e value epsilon\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edate\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-01 0.000050 0.018859\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-02 0.001778 1.943619\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-03 0.000116 0.397312\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-06 -0.001038 1.025379\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-07 -0.001197 0.081920\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-18 0.000264 -0.976598\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-19 0.001434 -0.357048\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-20 0.000995 -1.230387\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-21 0.000120 0.972614\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-22 0.000424 -0.199802\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[3713 rows x 2 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, we will generate a column of sigma squared\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edate\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eenumerate\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eindex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eprev_sigma_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esigma_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprev_sigma_2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;epsilon\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eiloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edetach\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enumpy\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# get previous value, or seed at 0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# if it doesn\u0026#39;t exist\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edate\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;sigma\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esigma_2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.5\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eprev_sigma_2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esigma_2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e value epsilon sigma\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edate\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-01 0.000050 0.018859 0.350442\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-02 0.001778 1.943619 0.003393\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-03 0.000116 0.397312 0.016596\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-06 -0.001038 1.025379 0.006431\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-07 -0.001197 0.081920 0.080500\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-18 0.000264 -0.976598 0.006752\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-19 0.001434 -0.357048 0.018468\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-20 0.000995 -1.230387 0.005359\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-21 0.000120 0.972614 0.006779\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-22 0.000424 -0.199802 0.033002\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[3713 rows x 3 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd finally, let us generate the eta column:\u003c/p\u003e\n\u003cp\u003eRecall that \\(\\eta_t = \\sigma_{t}\\epsilon_{t}\\), so:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;eta\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esigma\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eepsilon\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e value epsilon sigma eta\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edate\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-01 0.000050 0.018859 0.350442 0.006609\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-02 0.001778 1.943619 0.003393 0.006594\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-03 0.000116 0.397312 0.016596 0.006594\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-06 -0.001038 1.025379 0.006431 0.006594\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-07 -0.001197 0.081920 0.080500 0.006595\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-18 0.000264 -0.976598 0.006752 -0.006594\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-19 0.001434 -0.357048 0.018468 -0.006594\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-20 0.000995 -1.230387 0.005359 -0.006594\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-21 0.000120 0.972614 0.006779 0.006594\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-22 0.000424 -0.199802 0.033002 -0.006594\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[3713 rows x 4 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd finally, let us compute the log loss:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;loss\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eeta\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evalue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eabs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e value epsilon sigma eta loss\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edate\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-01 0.000050 0.018859 0.350442 0.006609 0.006559\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-02 0.001778 1.943619 0.003393 0.006594 0.004816\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-03 0.000116 0.397312 0.016596 0.006594 0.006478\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-06 -0.001038 1.025379 0.006431 0.006594 0.007632\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2006-03-07 -0.001197 0.081920 0.080500 0.006595 0.007791\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-18 0.000264 -0.976598 0.006752 -0.006594 0.006857\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-19 0.001434 -0.357048 0.018468 -0.006594 0.008028\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-20 0.000995 -1.230387 0.005359 -0.006594 0.007589\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-21 0.000120 0.972614 0.006779 0.006594 0.006474\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2020-05-22 0.000424 -0.199802 0.033002 -0.006594 0.007018\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[3713 rows x 5 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSaving the data:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata_val\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eto_csv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;currency_arbitrage.csv\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhnus_econ320_currency_arbitrage/","tags":null,"title":"NUS-ECON320 Currency Arbitrage"},{"categories":null,"contents":"We want to construct a combined agent\n\\begin{equation} (k_1+k_2)x^{*}(k_1+k_2, \\gamma^{*}) = x^{*}(k_1,\\gamma_{1})k_1+x^{*}(k_2, \\gamma_{2})k_2 \\end{equation}\nwhich combines the relative risk of \\(\\gamma_{1}, \\gamma_{2}\\) into some new \\(\\gamma^{*}\\), which produces the same combined consumption of both agents \\(k_1+k_2\\).\nLet us create some CAS tools to solve the inter-temporal choice problem generically for 10 steps in the past.\nWe do this by solving backwards. We will create a variable \\(k\\) to measure asset, and \\(k_{t}\\) the remaining asset at time \\(t\\).\nLet us first declare the function for power utility. \\(k\\) is our asset holding, \\(\\gamma\\) our relative margin of risk, and \\(U\\) the power utility.\n# risk aversion y = var(\u0026#34;y\u0026#34;, latex_name=\u0026#34;\\gamma\u0026#34;, domain=\u0026#39;real\u0026#39;) # discount factor d = var(\u0026#34;d\u0026#34;, latex_name=\u0026#34;\\delta\u0026#34;, domain=\u0026#39;real\u0026#39;) # final value at time t=f k_f = var(\u0026#34;k_f\u0026#34;, latex_name=\u0026#34;k_f\u0026#34;, domain=\u0026#39;real\u0026#39;) # the instrument\u0026#39;s percentage return over a period: i.e. (1+mu)*I_t = k_{t+1} m = var(\u0026#34;m\u0026#34;, latex_name=\u0026#34;\\mu\u0026#34;, domain=\u0026#39;real\u0026#39;) # boundary conditions assume(y\u0026gt;0) assume(y\u0026lt;1) assume(d\u0026gt;0) assume(d\u0026lt;1) # power utility u(c) = ((c^(1-y)-1)/(1-y)) u c |--\u0026gt; -(c^(-y + 1) - 1)/(y - 1) At the final time stamp, we desire to consume all of our assets. Therefore, we will seed our investment amount at \\(I=0\\). We will optimize for eventual global utility, therefore, we will talley our utility; starting this talley at \\(0\\).\n# at the final time, leave nothing for investment I=0; u_total = 0 From every step from here, we will discount this utility by \\(d\\), then solve for the previous step\u0026rsquo;s target consumption that would maximize utility. That is, at every step, we desire:\n\\begin{equation} k_{t-1} = I_{t} + c_{t} \\implies c_{t} = k_{t-1}-I_{t} \\end{equation}\n\u0026ldquo;we want to consume all we have that needed\u0026rsquo;t to be invested\u0026rdquo;\nand\n\\(\\max u(c_{t})\\)\nRecall also that \\((1+\\mu)I_{t} = k_{t+1}\\) (as \\(\\mu\\) is the mean log-return, 1+that times \\(I\\), how much was invested at time \\(t\\), is the expected capital one time period from then.) Therefore, to make sure that \\(k_{f}\\) gets back in the final period, we solve for our seed value for \\(I\\), how much to invest would be:\n\\begin{equation} I_{t-1} = \\frac{k_t}{(1+m)} \\end{equation}\nEnough talk, let\u0026rsquo;s get to it:\n# create an dictionary to keep track of all the capital variables k = {} # we will iterate time stamps 1-10 T = 10 # a variable for captial at that time for i in range(T): k_t = var(f\u0026#34;k_{T-i}\u0026#34;, latex_name=f\u0026#34;k_{T-i}\u0026#34;) # t-i becasue we are solving backwards; i0 = T10 # what can be consumed at every time stamp # is the k of the previous timestamp, minus # what needs to be left over # we multiply here by d because we want to # discount future utility u_total = d*u_total + u(k_t-I) # add the current variable to dictionary k[T-i] = k_t # recall again i0=T10 because backwards # solve for the next investment amount I = k_t/(1+m) u_total -(((((((d*(d*(k_10^(-y + 1) - 1)/(y - 1) + ((k_9 - k_10/(m + 1))^(-y + 1) - 1)/(y - 1)) + ((k_8 - k_9/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_7 - k_8/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_6 - k_7/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_5 - k_6/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_4 - k_5/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_3 - k_4/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_2 - k_3/(m + 1))^(-y + 1) - 1)/(y - 1))*d - ((k_1 - k_2/(m + 1))^(-y + 1) - 1)/(y - 1) We can now use the scipy numerical optimizer to minimize this target. Recall that we can recover the actual value of consumption at each step as \\(c=k-\\frac{k}{m+1}\\).\nWe will set some initial conditions:\n_m = 0.01 # 1% period-to-period increase _k = 1000 # $1000 capital _y = 0.8 # generally risk averse _d = 0.9 # the future matters slightly less Recall that the scipy optimizer MINIMIZES, so we will make the loss the negative of utility. Before we finally start, we need to make the actual, numerical loss function that performs the substitution:\n# we reverse the k_* variables because it is stored in the dictionary # in reverse, because we knew the reverse condition first optim_variables = list(k.values()) optim_variables.reverse() # this function is also the callback, so it returning # True terminates execution def u_total_loss(x): # the optimizer\u0026#39;s current step # we want to take [1:], because we need to keep k1 the same at _k the # initial value substitution_dict = {key: val for key, val in zip(optim_variables[1:], x)} # initial conditions substitution_dict[m] = _m substitution_dict[y] = _y substitution_dict[d] = _d substitution_dict[d] = _d # we want to keep the initial value k1 the same substitution_dict[k[1]] = _k try: # get value content = (-1*u_total).subs(substitution_dict) # recall we multiply by -1 because we are MINIMIZING, so the loss is # the inverse of the maximization utility target return float(content.n()), False except: return 0, True Finally, we are ready to start. We will now create the other initial conditions k1\u0026hellip;k10 and : we will set the initial value to all be 1000 (i.e. do nothing) and have the optimizer work it out from there:\nfrom scipy.optimize import minimize target = minimize(lambda x:u_total_loss(x)[0], [_k for _ in range(T-1)], callback=lambda x:u_total_loss(x)[1]) target fun: -50.71592850322347 hess_inv: array([[ 9518.97596212, 7617.14636381, 5964.42171873, 4433.87331935, 4253.91810669, 3528.72923763, 2329.61846616, 1769.85078017, 1126.51562458], [ 7617.14636381, 14333.33933517, 11251.71278723, 8073.31207641, 7444.53071922, 6481.03236385, 4347.35353474, 2644.39855553, 1359.86586059], [ 5964.42171873, 11251.71278723, 15011.27497355, 10093.46973099, 9229.06386286, 8371.07459024, 5510.14654004, 3480.74298654, 1639.19265606], [ 4433.87331935, 8073.31207641, 10093.46973099, 12434.28059884, 11689.33288295, 10711.57399875, 7440.7461982 , 4810.57094062, 2255.16306648], [ 4253.91810669, 7444.53071922, 9229.06386286, 11689.33288295, 14840.59602968, 12519.06872583, 8708.9160148 , 5688.83339388, 2598.27394651], [ 3528.72923763, 6481.03236385, 8371.07459024, 10711.57399875, 12519.06872583, 14999.44881857, 10630.30739223, 6512.62254338, 2293.45506703], [ 2329.61846616, 4347.35353474, 5510.14654004, 7440.7461982 , 8708.9160148 , 10630.30739223, 12147.11811342, 7149.37937935, 2657.8129831 ], [ 1769.85078017, 2644.39855553, 3480.74298654, 4810.57094062, 5688.83339388, 6512.62254338, 7149.37937935, 7260.90962516, 2422.66762041], [ 1126.51562458, 1359.86586059, 1639.19265606, 2255.16306648, 2598.27394651, 2293.45506703, 2657.8129831 , 2422.66762041, 2911.30717272]]) jac: array([ 0.00000000e+00, -3.81469727e-06, 2.38418579e-06, 0.00000000e+00, 8.58306885e-06, -5.24520874e-06, 2.86102295e-06, -1.43051147e-06, -5.24520874e-06]) message: \u0026#39;Optimization terminated successfully.\u0026#39; nfev: 1360 nit: 130 njev: 136 status: 0 success: True x: array([841.22097906, 699.82556541, 573.89912346, 461.63474591, 361.51493714, 272.10309839, 192.29084196, 120.94057011, 57.12129925]) Awesome! We now can recover \\(c\\) at each point by a nice helpful function:\nc(k0, k1) = k0 - k1/(_m+1) \u0026ldquo;Consumption is how much we have, minus how much we would be investing\u0026rdquo;\nSo, let us translate our list to the actual values consumed:\ncapital_over_time = [_k]+target.x.tolist() # we need to add the initial condition _k back to the # inventory list consumption_over_time = [c(i,j) for i,j in zip(capital_over_time, capital_over_time[1:])] consumption_over_time [167.107941529699, 148.324379643989, 131.608611479784, 116.835018601197, 103.699164584812, 92.1059288329209, 81.7161261514775, 72.5477032414004, 64.3848282779261] Examples of Output _m = 0.01 # 1% period-to-period increase _k = 1000 # $1000 capital _y = 0.8 # generally risk averse _d = 0.9 # the future matters slightly less [167.107941529699, 148.324379643989, 131.608611479784, 116.835018601197, 103.699164584812, 92.1059288329209, 81.7161261514775, 72.5477032414004, 64.3848282779261] _m = 0.1 # 1% period-to-period increase _k = 1000 # $1000 capital _y = 0.8 # generally risk averse _d = 0.9 # the future matters slightly less [154.860597149863, 152.989432556196, 151.010433069881, 149.201249715528, 147.329750167852, 145.539019666462, 143.739371599600, 141.984228587213, 140.243839963791] _m = 0.01 # 1% period-to-period increase _k = 1000 # $1000 capital _y = 0.2 # generally risky _d = 0.9 # the future matters slightly less [388.525041338376, 241.124420093987, 149.632568775223, 92.8644259086613, 57.6330459746870, 35.7667230511026, 22.1970017374152, 13.7754327365677, 8.54930907023498] _m = -0.01 # this is a loosing stock _k = 1000 # $1000 capital _y = 0.9 # very safe _d = 0.9 # the future matters fun: 0 hess_inv: array([[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]]) jac: array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) message: \u0026#39;Optimization terminated successfully.\u0026#39; nfev: 10 nit: 0 njev: 1 status: 0 success: True x: array([1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000.]) Evidently: do nothing if we have a loosing cause.\n_m = 1.00 # this is SUPER winning stock _k = 1000 # $1000 capital _y = 0.9 # very safe _d = 0.9 # the future matters [125.667556437602, 241.474827418105, 460.068836905327, 868.972817783791, 4540.45893314523, 4219.93058738029, 3988.05775624984, 3996.89431939885, 3615.74982832315] We made so much money that we are spending a lot of it and still spending it.\n","html":"\u003cp\u003eWe want to construct a combined agent\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(k_1+k_2)x^{*}(k_1+k_2, \\gamma^{*}) = x^{*}(k_1,\\gamma_{1})k_1+x^{*}(k_2, \\gamma_{2})k_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich combines the relative risk of \\(\\gamma_{1}, \\gamma_{2}\\) into some new \\(\\gamma^{*}\\), which produces the same combined consumption of both agents \\(k_1+k_2\\).\u003c/p\u003e\n\u003cp\u003eLet us create some CAS tools to solve the inter-temporal choice problem generically for 10 steps in the past.\u003c/p\u003e\n\u003cp\u003eWe do this by solving backwards. We will create a variable \\(k\\) to measure asset, and \\(k_{t}\\) the remaining asset at time \\(t\\).\u003c/p\u003e\n\u003cp\u003eLet us first declare the function for \u003ca href=\"/posts/kbhpower_utility/\"\u003epower utility\u003c/a\u003e. \\(k\\) is our asset holding, \\(\\gamma\\) our relative margin of risk, and \\(U\\) the power utility.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# risk aversion\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;y\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\gamma\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edomain\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;real\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# discount factor\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;d\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\delta\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edomain\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;real\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# final value at time t=f\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ek_f\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;k_f\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;k_f\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edomain\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;real\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# the instrument\u0026#39;s percentage return over a period: i.e. (1+mu)*I_t = k_{t+1}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003em\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;m\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\mu\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edomain\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;real\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# boundary conditions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eassume\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eassume\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eassume\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eassume\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# power utility\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ec\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ec\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003ec |--\u0026gt; -(c^(-y + 1) - 1)/(y - 1)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAt the final time stamp, we desire to consume all of our assets. Therefore, we will seed our investment amount at \\(I=0\\). We will optimize for eventual global utility, therefore, we will talley our utility; starting this talley at \\(0\\).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# at the final time, leave nothing for investment\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFrom every step from here, we will discount this utility by \\(d\\), then solve for the \u003cem\u003eprevious\u003c/em\u003e step\u0026rsquo;s target consumption that would maximize utility. That is, at every step, we desire:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nk_{t-1} = I_{t} + c_{t} \\implies c_{t} = k_{t-1}-I_{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;we want to consume all we have that needed\u0026rsquo;t to be invested\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\(\\max u(c_{t})\\)\u003c/p\u003e\n\u003cp\u003eRecall also that \\((1+\\mu)I_{t} = k_{t+1}\\) (as \\(\\mu\\) is the mean log-return, 1+that times \\(I\\), how much was invested at time \\(t\\), is the expected capital one time period from then.) Therefore, to make sure that \\(k_{f}\\) gets back in the final period, we solve for our seed value for \\(I\\), how much to invest would be:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI_{t-1} = \\frac{k_t}{(1+m)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eEnough talk, let\u0026rsquo;s get to it:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# create an dictionary to keep track of all the capital variables\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# we will iterate time stamps 1-10\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e10\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# a variable for captial at that time\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ek_t\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;k_\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e}\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;k_\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e}\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# t-i becasue we are solving backwards; i0 = T10\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# what can be consumed at every time stamp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# is the k of the previous timestamp, minus\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# what needs to be left over\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we multiply here by d because we want to\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# discount future utility\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek_t\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# add the current variable to dictionary\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek_t\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# recall again i0=T10 because backwards\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# solve for the next investment amount\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek_t\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-(((((((d*(d*(k_10^(-y + 1) - 1)/(y - 1) + ((k_9 - k_10/(m + 1))^(-y + 1) - 1)/(y - 1)) + ((k_8 - k_9/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_7 - k_8/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_6 - k_7/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_5 - k_6/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_4 - k_5/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_3 - k_4/(m + 1))^(-y + 1) - 1)/(y - 1))*d + ((k_2 - k_3/(m + 1))^(-y + 1) - 1)/(y - 1))*d - ((k_1 - k_2/(m + 1))^(-y + 1) - 1)/(y - 1)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe can now use the scipy numerical optimizer to minimize this target. Recall that we can recover the actual value of consumption at each step as \\(c=k-\\frac{k}{m+1}\\).\u003c/p\u003e\n\u003cp\u003eWe will set some initial conditions:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# 1% period-to-period increase\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.8\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# generally risk averse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters slightly less\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRecall that the scipy optimizer MINIMIZES, so we will make the loss the negative of utility. Before we finally start, we need to make the actual, numerical loss function that performs the substitution:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# we reverse the k_* variables because it is stored in the dictionary\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# in reverse, because we knew the reverse condition first\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim_variables\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evalues\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim_variables\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereverse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# this function is also the callback, so it returning\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# True terminates execution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eu_total_loss\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# the optimizer\u0026#39;s current step\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we want to take [1:], because we need to keep k1 the same at _k the\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# initial value\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ekey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eval\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ekey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eval\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ezip\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eoptim_variables\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# initial conditions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003em\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we want to keep the initial value k1 the same\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003etry\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# get value\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003econtent\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu_total\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubstitution_dict\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# recall we multiply by -1 because we are MINIMIZING, so the loss is\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# the inverse of the maximization utility target\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econtent\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()),\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eFalse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eexcept\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFinally, we are ready to start. We will now create the other initial conditions k1\u0026hellip;k10 and : we will set the initial value to all be 1000 (i.e. do nothing) and have the optimizer work it out from there:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003escipy.optimize\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eminimize\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etarget\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eminimize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elambda\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu_total_loss\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecallback\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elambda\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu_total_loss\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etarget\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e fun: -50.71592850322347\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e hess_inv: array([[ 9518.97596212, 7617.14636381, 5964.42171873, 4433.87331935,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 4253.91810669, 3528.72923763, 2329.61846616, 1769.85078017,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 1126.51562458],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 7617.14636381, 14333.33933517, 11251.71278723, 8073.31207641,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 7444.53071922, 6481.03236385, 4347.35353474, 2644.39855553,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 1359.86586059],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 5964.42171873, 11251.71278723, 15011.27497355, 10093.46973099,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 9229.06386286, 8371.07459024, 5510.14654004, 3480.74298654,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 1639.19265606],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 4433.87331935, 8073.31207641, 10093.46973099, 12434.28059884,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 11689.33288295, 10711.57399875, 7440.7461982 , 4810.57094062,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 2255.16306648],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 4253.91810669, 7444.53071922, 9229.06386286, 11689.33288295,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 14840.59602968, 12519.06872583, 8708.9160148 , 5688.83339388,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 2598.27394651],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 3528.72923763, 6481.03236385, 8371.07459024, 10711.57399875,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 12519.06872583, 14999.44881857, 10630.30739223, 6512.62254338,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 2293.45506703],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 2329.61846616, 4347.35353474, 5510.14654004, 7440.7461982 ,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 8708.9160148 , 10630.30739223, 12147.11811342, 7149.37937935,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 2657.8129831 ],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1769.85078017, 2644.39855553, 3480.74298654, 4810.57094062,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 5688.83339388, 6512.62254338, 7149.37937935, 7260.90962516,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 2422.66762041],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1126.51562458, 1359.86586059, 1639.19265606, 2255.16306648,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 2598.27394651, 2293.45506703, 2657.8129831 , 2422.66762041,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 2911.30717272]])\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e jac: array([ 0.00000000e+00, -3.81469727e-06, 2.38418579e-06, 0.00000000e+00,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 8.58306885e-06, -5.24520874e-06, 2.86102295e-06, -1.43051147e-06,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -5.24520874e-06])\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e message: \u0026#39;Optimization terminated successfully.\u0026#39;\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nfev: 1360\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nit: 130\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e njev: 136\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e status: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e success: True\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e x: array([841.22097906, 699.82556541, 573.89912346, 461.63474591,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 361.51493714, 272.10309839, 192.29084196, 120.94057011,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 57.12129925])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003cem\u003eAwesome!\u003c/em\u003e We now can recover \\(c\\) at each point by a nice helpful function:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ec\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ek0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek0\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ek1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u0026ldquo;Consumption is how much we have, minus how much we would be investing\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eSo, let us translate our list to the actual values consumed:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ecapital_over_time\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etarget\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etolist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# we need to add the initial condition _k back to the\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# inventory list\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econsumption_over_time\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ec\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ezip\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecapital_over_time\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecapital_over_time\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:])]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econsumption_over_time\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[167.107941529699,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 148.324379643989,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 131.608611479784,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 116.835018601197,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 103.699164584812,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 92.1059288329209,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 81.7161261514775,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 72.5477032414004,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 64.3848282779261]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"examples-of-output\"\u003eExamples of Output\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# 1% period-to-period increase\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.8\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# generally risk averse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters slightly less\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[167.107941529699,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 148.324379643989,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 131.608611479784,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 116.835018601197,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 103.699164584812,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 92.1059288329209,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 81.7161261514775,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 72.5477032414004,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 64.3848282779261]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003chr\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.1\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# 1% period-to-period increase\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.8\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# generally risk averse\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters slightly less\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[154.860597149863,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 152.989432556196,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 151.010433069881,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 149.201249715528,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 147.329750167852,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 145.539019666462,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 143.739371599600,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 141.984228587213,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 140.243839963791]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003chr\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# 1% period-to-period increase\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.2\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# generally risky\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters slightly less\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[388.525041338376,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 241.124420093987,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 149.632568775223,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 92.8644259086613,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 57.6330459746870,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 35.7667230511026,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 22.1970017374152,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 13.7754327365677,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 8.54930907023498]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003chr\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# this is a loosing stock\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# very safe\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e fun: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e hess_inv: array([[1, 0, 0, 0, 0, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 1, 0, 0, 0, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 1, 0, 0, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 1, 0, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 1, 0, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 0, 1, 0, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 0, 0, 1, 0, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 0, 0, 0, 1, 0],\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [0, 0, 0, 0, 0, 0, 0, 0, 1]])\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e jac: array([0., 0., 0., 0., 0., 0., 0., 0., 0.])\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e message: \u0026#39;Optimization terminated successfully.\u0026#39;\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nfev: 10\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e nit: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e njev: 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e status: 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e success: True\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e x: array([1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000., 1000.])\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eEvidently: do nothing if we have a loosing cause.\u003c/p\u003e\n\u003chr\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_m\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1.00\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# this is SUPER winning stock\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_k\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# $1000 capital\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_y\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# very safe\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e_d\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.9\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# the future matters\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[125.667556437602,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 241.474827418105,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 460.068836905327,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 868.972817783791,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 4540.45893314523,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 4219.93058738029,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 3988.05775624984,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 3996.89431939885,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 3615.74982832315]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe made so much money that we are spending a lot of it and still spending it.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_econ320_risk_appetite/","tags":null,"title":"NUS-ECON320 Inter-Temporal Choice"},{"categories":null,"contents":"Let\u0026rsquo;s begin. We want to create test for the linearity of a few assets, for whether or not they follow the CAPM.\nNote that we will be using the Sharpe-Linter version of CAPM:\n\\begin{equation} E[R_{i}-R_{f}] = \\beta_{im} E[(R_{m}-R_{f})] \\end{equation}\n\\begin{equation} \\beta_{im} := \\frac{Cov[(R_{i}-R_{f}),(R_{m}-R_{f})]}{Var[R_{m}-R_{f}]} \\end{equation}\nRecall that we declare \\(R_{f}\\) (the risk-free rate) to be non-stochastic.\nLet us begin. We will create a generic function to analyze some given stock.\nWe will first import our utilities\nimport pandas as pd import numpy as np Let\u0026rsquo;s load the data from our market (NYSE) as well as our 10 year t-bill data.\nt_bill = pd.read_csv(\u0026#34;./linearity_test_data/10yrTBill.csv\u0026#34;) nyse = pd.read_csv(\u0026#34;./linearity_test_data/NYSEComposite.csv\u0026#34;) nyse.head() Date Close 0 11/7/2013 16:00:00 9924.37 1 11/8/2013 16:00:00 10032.14 2 11/11/2013 16:00:00 10042.95 3 11/12/2013 16:00:00 10009.84 4 11/13/2013 16:00:00 10079.89 Excellent. Let\u0026rsquo;s load in the data for that stock.\ndef load_stock(stock): return pd.read_csv(f\u0026#34;./linearity_test_data/{stock}.csv\u0026#34;) load_stock(\u0026#34;LMT\u0026#34;).head() Date Close 0 11/7/2013 16:00:00 136.20 1 11/8/2013 16:00:00 138.11 2 11/11/2013 16:00:00 137.15 3 11/12/2013 16:00:00 137.23 4 11/13/2013 16:00:00 137.26 And now, let\u0026rsquo;s load all three stocks, then concatenate them all into a big-ol DataFrame.\n# load data df = { \u0026#34;Date\u0026#34;: nyse.Date, \u0026#34;NYSE\u0026#34;: nyse.Close, \u0026#34;TBill\u0026#34;: t_bill.Close, \u0026#34;LMT\u0026#34;: load_stock(\u0026#34;LMT\u0026#34;).Close, \u0026#34;TWTR\u0026#34;: load_stock(\u0026#34;TWTR\u0026#34;).Close, \u0026#34;MCD\u0026#34;: load_stock(\u0026#34;MCD\u0026#34;).Close } # convert to dataframe df = pd.DataFrame(df) # drop empty df.dropna(inplace=True) df Date NYSE TBill LMT TWTR MCD 0 11/7/2013 16:00:00 9924.37 26.13 136.20 44.90 97.20 1 11/8/2013 16:00:00 10032.14 27.46 138.11 41.65 97.01 2 11/11/2013 16:00:00 10042.95 27.51 137.15 42.90 97.09 3 11/12/2013 16:00:00 10009.84 27.68 137.23 41.90 97.66 4 11/13/2013 16:00:00 10079.89 27.25 137.26 42.60 98.11 ... ... ... ... ... ... ... 2154 10/24/2022 16:00:00 14226.11 30.44 440.11 39.60 252.21 2155 10/25/2022 16:00:00 14440.69 31.56 439.30 39.30 249.28 2156 10/26/2022 16:00:00 14531.69 33.66 441.11 39.91 250.38 2157 10/27/2022 16:00:00 14569.90 34.83 442.69 40.16 248.36 2158 10/28/2022 16:00:00 14795.63 33.95 443.41 39.56 248.07 [2159 rows x 6 columns] Excellent. Now, let\u0026rsquo;s convert all of these values into daily log-returns (we don\u0026rsquo;t really care about the actual pricing.)\nlog_returns = df[[\u0026#34;NYSE\u0026#34;, \u0026#34;TBill\u0026#34;, \u0026#34;LMT\u0026#34;, \u0026#34;TWTR\u0026#34;, \u0026#34;MCD\u0026#34;]].apply(np.log, inplace=True) df.loc[:, [\u0026#34;NYSE\u0026#34;, \u0026#34;TBill\u0026#34;, \u0026#34;LMT\u0026#34;, \u0026#34;TWTR\u0026#34;, \u0026#34;MCD\u0026#34;]] = log_returns df Date NYSE TBill LMT TWTR MCD 0 11/7/2013 16:00:00 9.202749 3.263084 4.914124 3.804438 4.576771 1 11/8/2013 16:00:00 9.213549 3.312730 4.928050 3.729301 4.574814 2 11/11/2013 16:00:00 9.214626 3.314550 4.921075 3.758872 4.575638 3 11/12/2013 16:00:00 9.211324 3.320710 4.921658 3.735286 4.581492 4 11/13/2013 16:00:00 9.218298 3.305054 4.921877 3.751854 4.586089 ... ... ... ... ... ... ... 2154 10/24/2022 16:00:00 9.562834 3.415758 6.087025 3.678829 5.530262 2155 10/25/2022 16:00:00 9.577805 3.451890 6.085183 3.671225 5.518577 2156 10/26/2022 16:00:00 9.584087 3.516310 6.089294 3.686627 5.522980 2157 10/27/2022 16:00:00 9.586713 3.550479 6.092870 3.692871 5.514879 2158 10/28/2022 16:00:00 9.602087 3.524889 6.094495 3.677819 5.513711 [2159 rows x 6 columns] We will now calculate the log daily returns. But before\u0026mdash;the dates are no longer relavent here so we drop them.\ndf Date NYSE TBill LMT TWTR MCD 0 11/7/2013 16:00:00 9.202749 3.263084 4.914124 3.804438 4.576771 1 11/8/2013 16:00:00 9.213549 3.312730 4.928050 3.729301 4.574814 2 11/11/2013 16:00:00 9.214626 3.314550 4.921075 3.758872 4.575638 3 11/12/2013 16:00:00 9.211324 3.320710 4.921658 3.735286 4.581492 4 11/13/2013 16:00:00 9.218298 3.305054 4.921877 3.751854 4.586089 ... ... ... ... ... ... ... 2154 10/24/2022 16:00:00 9.562834 3.415758 6.087025 3.678829 5.530262 2155 10/25/2022 16:00:00 9.577805 3.451890 6.085183 3.671225 5.518577 2156 10/26/2022 16:00:00 9.584087 3.516310 6.089294 3.686627 5.522980 2157 10/27/2022 16:00:00 9.586713 3.550479 6.092870 3.692871 5.514879 2158 10/28/2022 16:00:00 9.602087 3.524889 6.094495 3.677819 5.513711 [2159 rows x 6 columns] And now, the log returns! We will shift this data by one column and subtract.\nreturns = df.drop(columns=[\u0026#34;Date\u0026#34;]) - df.drop(columns=[\u0026#34;Date\u0026#34;]).shift(1) returns.dropna(inplace=True) returns NYSE TBill LMT TWTR MCD 1 0.010801 0.049646 0.013926 -0.075136 -0.001957 2 0.001077 0.001819 -0.006975 0.029570 0.000824 3 -0.003302 0.006161 0.000583 -0.023586 0.005854 4 0.006974 -0.015657 0.000219 0.016568 0.004597 5 0.005010 -0.008476 0.007476 0.047896 -0.005622 ... ... ... ... ... ... 2154 0.005785 0.004940 -0.023467 -0.014291 0.001349 2155 0.014971 0.036133 -0.001842 -0.007605 -0.011685 2156 0.006282 0.064420 0.004112 0.015402 0.004403 2157 0.002626 0.034169 0.003575 0.006245 -0.008100 2158 0.015374 -0.025590 0.001625 -0.015053 -0.001168 [2158 rows x 5 columns] We are now ready to run the correlation study.\nLet\u0026rsquo;s now subtract everything by the risk-free rate (dropping the rfr itself):\nrisk_free_excess = returns.drop(columns=\u0026#34;TBill\u0026#34;).apply(lambda x: x-returns.TBill) risk_free_excess NYSE LMT TWTR MCD 1 -0.038846 -0.035720 -0.124783 -0.051603 2 -0.000742 -0.008794 0.027751 -0.000995 3 -0.009463 -0.005577 -0.029747 -0.000307 4 0.022630 0.015875 0.032225 0.020254 5 0.013486 0.015952 0.056372 0.002854 ... ... ... ... ... 2154 0.000845 -0.028406 -0.019231 -0.003591 2155 -0.021162 -0.037975 -0.043738 -0.047818 2156 -0.058138 -0.060308 -0.049017 -0.060017 2157 -0.031543 -0.030593 -0.027924 -0.042269 2158 0.040964 0.027215 0.010537 0.024422 [2158 rows x 4 columns] Actual Regression It is now time to perform the actual linear regression!\nimport statsmodels.api as sm Let\u0026rsquo;s work with Lockheed Martin first, fitting an ordinary least squares. Remember that the OLS functions reads the endogenous variable first (for us, the return of the asset.)\n# add a column of ones to our input market excess returns nyse_with_bias = sm.add_constant(risk_free_excess.NYSE) # perform linreg lmt_model = sm.OLS(risk_free_excess.LMT, nyse_with_bias).fit() lmt_model.summary() OLS Regression Results ============================================================================== Dep. Variable: LMT R-squared: 0.859 Model: OLS Adj. R-squared: 0.859 Method: Least Squares F-statistic: 1.312e+04 Date: Mon, 31 Oct 2022 Prob (F-statistic): 0.00 Time: 10:39:24 Log-Likelihood: 6318.9 No. Observations: 2158 AIC: -1.263e+04 Df Residuals: 2156 BIC: -1.262e+04 Df Model: 1 Covariance Type: nonrobust ============================================================================== coef std err t P\u0026gt;|t| [0.025 0.975] ------------------------------------------------------------------------------ const 0.0004 0.000 1.311 0.190 -0.000 0.001 NYSE 0.9449 0.008 114.552 0.000 0.929 0.961 ============================================================================== Omnibus: 423.969 Durbin-Watson: 1.965 Prob(Omnibus): 0.000 Jarque-Bera (JB): 11575.074 Skew: -0.160 Prob(JB): 0.00 Kurtosis: 14.341 Cond. No. 29.6 ============================================================================== Notes: [1] Standard Errors assume that the covariance matrix of the errors is correctly specified. Based on the constants row, we can see that\u0026mdash;within \\(95\\%\\) confidence\u0026mdash;the intercept is generally \\(0\\) and CAPM applies. However, we do see a slight positive compared to the market. Furthermore, we can see that the regression has a beta value of \\(0.9449\\) \u0026mdash; according the CAPM model, it being slightly undervarying that the market.\nWe can continue with the other stocks.\n# perform linreg mcd_model = sm.OLS(risk_free_excess.MCD, nyse_with_bias).fit() mcd_model.summary() OLS Regression Results ============================================================================== Dep. Variable: MCD R-squared: 0.887 Model: OLS Adj. R-squared: 0.887 Method: Least Squares F-statistic: 1.697e+04 Date: Mon, 31 Oct 2022 Prob (F-statistic): 0.00 Time: 10:39:24 Log-Likelihood: 6551.1 No. Observations: 2158 AIC: -1.310e+04 Df Residuals: 2156 BIC: -1.309e+04 Df Model: 1 Covariance Type: nonrobust ============================================================================== coef std err t P\u0026gt;|t| [0.025 0.975] ------------------------------------------------------------------------------ const 0.0003 0.000 1.004 0.315 -0.000 0.001 NYSE 0.9651 0.007 130.287 0.000 0.951 0.980 ============================================================================== Omnibus: 323.911 Durbin-Watson: 1.988 Prob(Omnibus): 0.000 Jarque-Bera (JB): 3032.550 Skew: 0.395 Prob(JB): 0.00 Kurtosis: 8.753 Cond. No. 29.6 ============================================================================== Notes: [1] Standard Errors assume that the covariance matrix of the errors is correctly specified. Same thing as before, we are within \\(95\\%\\) confidence having a intercept of \\(0\\) (with a slight positive edge), and it looks like MacDonald\u0026rsquo;s vary a little bit more than Lockheed Martin. The food industry is probably a tougher business than that in defense.\nLastly, to analyze the recently delisted Twitter!\n# perform linreg twtr_model = sm.OLS(risk_free_excess.TWTR, nyse_with_bias).fit() twtr_model.summary() OLS Regression Results ============================================================================== Dep. Variable: TWTR R-squared: 0.522 Model: OLS Adj. R-squared: 0.522 Method: Least Squares F-statistic: 2357. Date: Mon, 31 Oct 2022 Prob (F-statistic): 0.00 Time: 10:39:24 Log-Likelihood: 4307.1 No. Observations: 2158 AIC: -8610. Df Residuals: 2156 BIC: -8599. Df Model: 1 Covariance Type: nonrobust ============================================================================== coef std err t P\u0026gt;|t| [0.025 0.975] ------------------------------------------------------------------------------ const -0.0002 0.001 -0.346 0.730 -0.002 0.001 NYSE 1.0173 0.021 48.549 0.000 0.976 1.058 ============================================================================== Omnibus: 661.205 Durbin-Watson: 1.986 Prob(Omnibus): 0.000 Jarque-Bera (JB): 15925.609 Skew: -0.883 Prob(JB): 0.00 Kurtosis: 16.191 Cond. No. 29.6 ============================================================================== Notes: [1] Standard Errors assume that the covariance matrix of the errors is correctly specified. Evidently, Twitter is much more variable. It looks like it has a nontrivial bias (the intercept being -0.001 being within the \\(95\\%\\) confidence band \u0026mdash; that the security is possibly significantly underperforming the CAPM expectation in the market.) Furthermore, we have a positive beta value: that the asset is more variable than the market.\nmanual regression We can also use the betas formula to manually calculate what we expect for the beta values (i.e. as if they were one IID random variable.)\nrisk_free_cov = risk_free_excess.cov() risk_free_cov NYSE LMT TWTR MCD NYSE 0.001143 0.001080 0.001163 0.001103 LMT 0.001080 0.001188 0.001116 0.001083 TWTR 0.001163 0.001116 0.002264 0.001155 MCD 0.001103 0.001083 0.001155 0.001200 Finally, to construct the beta values. Recall that:\n\\begin{equation} \\beta_{im} := \\frac{Cov[(R_{i}-R_{f}),(R_{m}-R_{f})]}{Var[R_{m}-R_{f}]} \\end{equation}\nand that:\n\\begin{equation} Var[X] = Cov[X,X], \\forall X \\end{equation}\n# get the market variance (covariance with itself) market_variation = risk_free_cov.NYSE.NYSE # calculate betas betas = {\u0026#34;LMT\u0026#34;: (risk_free_cov.LMT.NYSE/market_variation), \u0026#34;TWTR\u0026#34;: (risk_free_cov.TWTR.NYSE/market_variation), \u0026#34;MCD\u0026#34;: (risk_free_cov.MCD.NYSE/market_variation)} # and make dataframe betas = pd.Series(betas) betas LMT 0.944899 TWTR 1.017294 MCD 0.965081 dtype: float64 Apparently, all of our assets swing less than the overall NYSE market! Especially Lockheed\u0026mdash;it is only \\(94.4\\%\\) of the market variation. Furthermore, it is interesting to see that Twitter swings much more dramatically compared to the market.\nFund creation We will now create two funds with the three securities, one with equal parts and one which attempts to maximizes the bias (max returns) while minimizing the beta variance value compared to the market.\n\u0026ldquo;Equal-Parts Fund\u0026rdquo; (\u0026ldquo;Fund 1\u0026rdquo;) We will now create a fund in equal parts. Here it is:\nfund_1_returns = returns.LMT + returns.TWTR + returns.MCD fund_1_returns 1 -0.063167 2 0.023420 3 -0.017149 4 0.021384 5 0.049750 ... 2154 -0.036409 2155 -0.021132 2156 0.023917 2157 0.001720 2158 -0.014596 Length: 2158, dtype: float64 We will calculate the excess returns of this fund:\nfund_1_excess = fund_1_returns-returns.TBill fund_1_excess 1 -0.112813 2 0.021600 3 -0.023310 4 0.037041 5 0.058226 ... 2154 -0.041349 2155 -0.057265 2156 -0.040503 2157 -0.032449 2158 0.010994 Length: 2158, dtype: float64 And then perform a regression\n# perform linreg fund_1_model = sm.OLS(fund_1_excess, nyse_with_bias).fit() fund_1_model.summary() OLS Regression Results ============================================================================== Dep. Variable: y R-squared: 0.473 Model: OLS Adj. R-squared: 0.473 Method: Least Squares F-statistic: 1935. Date: Mon, 31 Oct 2022 Prob (F-statistic): 3.01e-302 Time: 10:39:24 Log-Likelihood: 3869.5 No. Observations: 2158 AIC: -7735. Df Residuals: 2156 BIC: -7724. Df Model: 1 Covariance Type: nonrobust ============================================================================== coef std err t P\u0026gt;|t| [0.025 0.975] ------------------------------------------------------------------------------ const 0.0007 0.001 0.841 0.401 -0.001 0.002 NYSE 1.1290 0.026 43.993 0.000 1.079 1.179 ============================================================================== Omnibus: 600.456 Durbin-Watson: 2.022 Prob(Omnibus): 0.000 Jarque-Bera (JB): 8416.514 Skew: -0.914 Prob(JB): 0.00 Kurtosis: 12.501 Cond. No. 29.6 ============================================================================== Notes: [1] Standard Errors assume that the covariance matrix of the errors is correctly specified. Surprisingly, we have now created a significantly riskier investment that, though riskier, generates a much higher probability of reward (\\(+0.001\\) is now within the \\(99\\%\\) band!)\nA Better Fund To me, this is the payoff of this assignment. We will now use CAPM to create the \u0026ldquo;best\u0026rdquo; fund combination\u0026mdash;given some variance, the funds which match CAPM. To do this, let\u0026rsquo;s create a generic linear combination of the assets.\nimport sympy as sym x = sym.Symbol(\u0026#39;x\u0026#39;) y = sym.Symbol(\u0026#39;y\u0026#39;) z = sym.Symbol(\u0026#39;z\u0026#39;) fund_2_returns = x*returns.LMT + y*returns.TWTR + z*returns.MCD fund_2_returns 1 0.0139260753744255*x - 0.0751364261353569*y - ... 2 -0.00697525170622448*x + 0.0295704573211193*y ... 3 0.000583132897928884*x - 0.0235859990058791*y ... 4 0.000218587198947517*x + 0.016568426347233*y +... 5 0.00747599199607762*x + 0.0478955096700351*y -... ... 2154 -0.0234665578621085*x - 0.0142913301107561*y +... 2155 -0.00184214468578059*x - 0.0076045993852194*y ... 2156 0.00411172646842317*x + 0.0154024001854269*y +... 2157 0.00357547337231878*x + 0.0062445563228315*y -... 2158 0.00162509910496933*x - 0.0150529686289622*y -... Length: 2158, dtype: object Excellent. We will also calculate the excess returns of this fund:\nfund_2_excess = fund_2_returns-returns.TBill Y = fund_2_excess.to_numpy() Y [0.0139260753744255*x - 0.0751364261353569*y - 0.00195664549320096*z - 0.0496463208073039 -0.00697525170622448*x + 0.0295704573211193*y + 0.000824317408861575*z - 0.00181917459665826 0.000583132897928884*x - 0.0235859990058791*y + 0.00585367525146019*z - 0.00616055581298536 ... 0.00411172646842317*x + 0.0154024001854269*y + 0.00440300114913317*z - 0.0644196927849867 0.00357547337231878*x + 0.0062445563228315*y - 0.0081004573348249*z - 0.0341688956152497 0.00162509910496933*x - 0.0150529686289622*y - 0.00116834209450634*z + 0.0255902303732043] We cast this type to a numpy array because we are about to perform some matrix operations upon it.\nNow, let us perform the actual linear regression ourselves. Recall that the pseudoinverse linear regression estimator is:\n\\begin{equation} \\beta = (X^{T}X)^{-1}X^{T}Y \\end{equation}\nWe have already our \\(Y\\) as a vector (above), and our \\(X\\) is:\nX = nyse_with_bias.to_numpy() X [[ 1.00000000e+00 -3.88457302e-02] [ 1.00000000e+00 -7.42217926e-04] [ 1.00000000e+00 -9.46284244e-03] ... [ 1.00000000e+00 -5.81378271e-02] [ 1.00000000e+00 -3.15429207e-02] [ 1.00000000e+00 4.09643405e-02]] We now have our matrices, let\u0026rsquo;s perform the linear regression!\nlinear_model = np.linalg.inv((X.transpose()@X))@X.transpose()@Y linear_model [0.000544056413840724*x - 6.62061061591867e-5*y + 0.000429966553373172*z - 0.000178620725465344 0.0457830563134785*x + 0.118178191274045*y + 0.0659651260604729*z + 0.899115719100281] Excellent. So we now have two rows; the top row represents the \u0026ldquo;bias\u0026rdquo;\u0026mdash;how much deviation there is from CAPM, and the bottom row represents the \u0026ldquo;rate\u0026rdquo;\u0026mdash;the \u0026ldquo;beta\u0026rdquo; value which represents how much excess variance there is.\nWe can will solve for a combination of solutions to give us specific values of returns vs risk. We will set the asset to learn exactly as much as the market (i.e. no bias).\ndeviance_expr = linear_model[0] deviance_expr 0.000544056413840724*x - 6.62061061591867e-5*y + 0.000429966553373172*z - 0.000178620725465344 We will now try to make variance exactly as much as that in the market.\nrisk_expr = linear_model[1] - 1 risk_expr 0.0457830563134785*x + 0.118178191274045*y + 0.0659651260604729*z - 0.100884280899719 Let us now calculate the boundary condition of our optimization problem by solving an expression in these two expressions.\nsolution = sym.solvers.solve([deviance_expr, risk_expr], x,y,z) solution {x: 0.412737013327711 - 0.819584899551304*z, y: 0.693765220909132 - 0.24067066980814*z} Excellent. Let us recalculate our optimization objective (\u0026ldquo;deviance\u0026rdquo;\u0026mdash;return) in terms of these new solutions. We aim now to maximize this expression by minimizing (i.e. our optimizer minimizes) the negative thereof\u0026mdash;recalling that scypy works as a minimizer.\noptim_objective = deviance_expr.subs(solution)-1e2 optim_objective -5.04831636563563e-19*z - 100.0 We can now use this value to solve for a \\(z\\) value.\noptim_solution = sym.solvers.solve([optim_objective], z) optim_solution {z: -1.98085842402250e+20} Excellent. We can now solve for the rest of our values.\nz0 = float(optim_solution[z]) x0 = solution[x].subs(z, z0) y0 = solution[y].subs(z, z0) (x0,y0,z0) (1.62348165247784e+16, 4.76734523704593e+15, -1.980858424022502e+16) This would create the following plan:\n# solution fund_2_nobias = x0*returns.LMT + y0*returns.TWTR + z0*returns.MCD fund_2_nobias.mean() 0.009168283711770158 Recall that this is the performance of the balanced portfolio:\nfund_1_returns.mean() 0.0009224705380695683 Finally, let\u0026rsquo;s plot the prices of our various funds:\nimport matplotlib.pyplot as plt import matplotlib.dates as mdates import seaborn as sns from datetime import datetime sns.set() fund_2_price = x0*df.LMT + y0*df.TWTR + z0*df.MCD fund_1_price = df.LMT + df.TWTR fund_l_price = df.LMT fund_t_price = df.TWTR dates = df.Date.apply(lambda x:datetime.strptime(x, \u0026#34;%m/%d/%Y %H:%M:%S\u0026#34;)) sns.lineplot(x=dates, y=fund_2_price.apply(sym.Float).astype(float)) sns.lineplot(x=dates, y=fund_1_price.apply(sym.Float).astype(float)) sns.lineplot(x=dates, y=fund_l_price.apply(sym.Float).astype(float)) sns.lineplot(x=dates, y=fund_t_price.apply(sym.Float).astype(float)) plt.gca().xaxis.set_major_locator(mdates.YearLocator()) plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(\u0026#39;%Y\u0026#39;)) plt.gca().set_ylabel(\u0026#34;Price\u0026#34;) plt.show() None Recall that we didn\u0026rsquo;t actually buy any MacDonald\u0026rsquo;s. So, we have\u0026mdash;blue, being our \u0026ldquo;optimal\u0026rdquo; portfolio, yellow, our balanced portfolio, green, being Lockheed, and red, being Twitter.\nOur portfolio works surprisingly well!\n","html":"\u003cp\u003eLet\u0026rsquo;s begin. We want to create test for the linearity of a few assets, for whether or not they follow the CAPM.\u003c/p\u003e\n\u003cp\u003eNote that we will be using the Sharpe-Linter version of CAPM:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE[R_{i}-R_{f}] = \\beta_{im} E[(R_{m}-R_{f})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\beta_{im} := \\frac{Cov[(R_{i}-R_{f}),(R_{m}-R_{f})]}{Var[R_{m}-R_{f}]}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that we declare \\(R_{f}\\) (the risk-free rate) to be non-stochastic.\u003c/p\u003e\n\u003cp\u003eLet us begin. We will create a generic function to analyze some given stock.\u003c/p\u003e\n\u003cp\u003eWe will first import our utilities\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epandas\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enumpy\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s load the data from our market (NYSE) as well as our 10 year t-bill data.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003et_bill\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_csv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;./linearity_test_data/10yrTBill.csv\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enyse\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_csv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;./linearity_test_data/NYSEComposite.csv\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enyse\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ehead\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Date Close\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 11/7/2013 16:00:00 9924.37\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 11/8/2013 16:00:00 10032.14\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 11/11/2013 16:00:00 10042.95\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 11/12/2013 16:00:00 10009.84\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 11/13/2013 16:00:00 10079.89\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. Let\u0026rsquo;s load in the data for that stock.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_csv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;./linearity_test_data/\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estock\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e}\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e.csv\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ehead\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Date Close\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 11/7/2013 16:00:00 136.20\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 11/8/2013 16:00:00 138.11\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 11/11/2013 16:00:00 137.15\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 11/12/2013 16:00:00 137.23\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 11/13/2013 16:00:00 137.26\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd now, let\u0026rsquo;s load all three stocks, then concatenate them all into a big-ol DataFrame.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# load data\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;Date\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDate\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;NYSE\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TBill\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et_bill\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eload_stock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eClose\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# convert to dataframe\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDataFrame\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# drop empty\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edropna\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einplace\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Date NYSE TBill LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 11/7/2013 16:00:00 9924.37 26.13 136.20 44.90 97.20\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 11/8/2013 16:00:00 10032.14 27.46 138.11 41.65 97.01\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 11/11/2013 16:00:00 10042.95 27.51 137.15 42.90 97.09\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 11/12/2013 16:00:00 10009.84 27.68 137.23 41.90 97.66\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 11/13/2013 16:00:00 10079.89 27.25 137.26 42.60 98.11\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 10/24/2022 16:00:00 14226.11 30.44 440.11 39.60 252.21\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 10/25/2022 16:00:00 14440.69 31.56 439.30 39.30 249.28\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 10/26/2022 16:00:00 14531.69 33.66 441.11 39.91 250.38\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 10/27/2022 16:00:00 14569.90 34.83 442.69 40.16 248.36\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 10/28/2022 16:00:00 14795.63 33.95 443.41 39.56 248.07\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2159 rows x 6 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. Now, let\u0026rsquo;s convert all of these values into daily log-returns (we don\u0026rsquo;t really care about the actual pricing.)\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elog_returns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;NYSE\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TBill\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einplace\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eloc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[:,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;NYSE\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TBill\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elog_returns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Date NYSE TBill LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 11/7/2013 16:00:00 9.202749 3.263084 4.914124 3.804438 4.576771\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 11/8/2013 16:00:00 9.213549 3.312730 4.928050 3.729301 4.574814\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 11/11/2013 16:00:00 9.214626 3.314550 4.921075 3.758872 4.575638\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 11/12/2013 16:00:00 9.211324 3.320710 4.921658 3.735286 4.581492\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 11/13/2013 16:00:00 9.218298 3.305054 4.921877 3.751854 4.586089\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 10/24/2022 16:00:00 9.562834 3.415758 6.087025 3.678829 5.530262\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 10/25/2022 16:00:00 9.577805 3.451890 6.085183 3.671225 5.518577\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 10/26/2022 16:00:00 9.584087 3.516310 6.089294 3.686627 5.522980\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 10/27/2022 16:00:00 9.586713 3.550479 6.092870 3.692871 5.514879\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 10/28/2022 16:00:00 9.602087 3.524889 6.094495 3.677819 5.513711\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2159 rows x 6 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will now calculate the log daily returns. But before\u0026mdash;the dates are no longer relavent here so we drop them.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Date NYSE TBill LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 11/7/2013 16:00:00 9.202749 3.263084 4.914124 3.804438 4.576771\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 11/8/2013 16:00:00 9.213549 3.312730 4.928050 3.729301 4.574814\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 11/11/2013 16:00:00 9.214626 3.314550 4.921075 3.758872 4.575638\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 11/12/2013 16:00:00 9.211324 3.320710 4.921658 3.735286 4.581492\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 11/13/2013 16:00:00 9.218298 3.305054 4.921877 3.751854 4.586089\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 10/24/2022 16:00:00 9.562834 3.415758 6.087025 3.678829 5.530262\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 10/25/2022 16:00:00 9.577805 3.451890 6.085183 3.671225 5.518577\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 10/26/2022 16:00:00 9.584087 3.516310 6.089294 3.686627 5.522980\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 10/27/2022 16:00:00 9.586713 3.550479 6.092870 3.692871 5.514879\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 10/28/2022 16:00:00 9.602087 3.524889 6.094495 3.677819 5.513711\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2159 rows x 6 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd now, the log returns! We will shift this data by one column and subtract.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edrop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Date\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edrop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Date\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshift\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edropna\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einplace\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e NYSE TBill LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 0.010801 0.049646 0.013926 -0.075136 -0.001957\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.001077 0.001819 -0.006975 0.029570 0.000824\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 -0.003302 0.006161 0.000583 -0.023586 0.005854\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.006974 -0.015657 0.000219 0.016568 0.004597\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.005010 -0.008476 0.007476 0.047896 -0.005622\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 0.005785 0.004940 -0.023467 -0.014291 0.001349\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 0.014971 0.036133 -0.001842 -0.007605 -0.011685\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 0.006282 0.064420 0.004112 0.015402 0.004403\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 0.002626 0.034169 0.003575 0.006245 -0.008100\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 0.015374 -0.025590 0.001625 -0.015053 -0.001168\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2158 rows x 5 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe are now ready to run the correlation study.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s now subtract everything by the risk-free rate (dropping the rfr itself):\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edrop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecolumns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;TBill\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elambda\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTBill\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e NYSE LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 -0.038846 -0.035720 -0.124783 -0.051603\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 -0.000742 -0.008794 0.027751 -0.000995\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 -0.009463 -0.005577 -0.029747 -0.000307\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.022630 0.015875 0.032225 0.020254\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.013486 0.015952 0.056372 0.002854\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 0.000845 -0.028406 -0.019231 -0.003591\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 -0.021162 -0.037975 -0.043738 -0.047818\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 -0.058138 -0.060308 -0.049017 -0.060017\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 -0.031543 -0.030593 -0.027924 -0.042269\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 0.040964 0.027215 0.010537 0.024422\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[2158 rows x 4 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"actual-regression\"\u003eActual Regression\u003c/h2\u003e\n\u003cp\u003eIt is now time to perform the actual linear regression!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estatsmodels.api\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s work with Lockheed Martin first, fitting an ordinary least squares. Remember that the OLS functions reads the \u003cem\u003eendogenous\u003c/em\u003e variable first (for us, the return of the asset.)\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# add a column of ones to our input market excess returns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eadd_constant\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# perform linreg\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elmt_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eOLS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elmt_model\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esummary\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e OLS Regression Results\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDep. Variable: LMT R-squared: 0.859\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eModel: OLS Adj. R-squared: 0.859\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMethod: Least Squares F-statistic: 1.312e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDate: Mon, 31 Oct 2022 Prob (F-statistic): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eTime: 10:39:24 Log-Likelihood: 6318.9\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNo. Observations: 2158 AIC: -1.263e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Residuals: 2156 BIC: -1.262e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Model: 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eCovariance Type: nonrobust\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e coef std err t P\u0026gt;|t| [0.025 0.975]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e------------------------------------------------------------------------------\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econst 0.0004 0.000 1.311 0.190 -0.000 0.001\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 0.9449 0.008 114.552 0.000 0.929 0.961\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eOmnibus: 423.969 Durbin-Watson: 1.965\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eProb(Omnibus): 0.000 Jarque-Bera (JB): 11575.074\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSkew: -0.160 Prob(JB): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eKurtosis: 14.341 Cond. No. 29.6\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNotes:\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eBased on the constants row, we can see that\u0026mdash;within \\(95\\%\\) confidence\u0026mdash;the intercept is generally \\(0\\) and CAPM applies. However, we do see a slight positive compared to the market. Furthermore, we can see that the regression has a beta value of \\(0.9449\\) \u0026mdash; according the CAPM model, it being \u003cem\u003eslightly\u003c/em\u003e undervarying that the market.\u003c/p\u003e\n\u003cp\u003eWe can continue with the other stocks.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# perform linreg\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emcd_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eOLS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emcd_model\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esummary\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e OLS Regression Results\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDep. Variable: MCD R-squared: 0.887\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eModel: OLS Adj. R-squared: 0.887\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMethod: Least Squares F-statistic: 1.697e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDate: Mon, 31 Oct 2022 Prob (F-statistic): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eTime: 10:39:24 Log-Likelihood: 6551.1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNo. Observations: 2158 AIC: -1.310e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Residuals: 2156 BIC: -1.309e+04\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Model: 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eCovariance Type: nonrobust\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e coef std err t P\u0026gt;|t| [0.025 0.975]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e------------------------------------------------------------------------------\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econst 0.0003 0.000 1.004 0.315 -0.000 0.001\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 0.9651 0.007 130.287 0.000 0.951 0.980\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eOmnibus: 323.911 Durbin-Watson: 1.988\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eProb(Omnibus): 0.000 Jarque-Bera (JB): 3032.550\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSkew: 0.395 Prob(JB): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eKurtosis: 8.753 Cond. No. 29.6\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNotes:\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSame thing as before, we are within \\(95\\%\\) confidence having a intercept of \\(0\\) (with a slight positive edge), and it looks like MacDonald\u0026rsquo;s vary a little bit more than Lockheed Martin. The food industry is probably a tougher business than that in defense.\u003c/p\u003e\n\u003cp\u003eLastly, to analyze the recently delisted Twitter!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# perform linreg\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etwtr_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eOLS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etwtr_model\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esummary\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e OLS Regression Results\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDep. Variable: TWTR R-squared: 0.522\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eModel: OLS Adj. R-squared: 0.522\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMethod: Least Squares F-statistic: 2357.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDate: Mon, 31 Oct 2022 Prob (F-statistic): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eTime: 10:39:24 Log-Likelihood: 4307.1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNo. Observations: 2158 AIC: -8610.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Residuals: 2156 BIC: -8599.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Model: 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eCovariance Type: nonrobust\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e coef std err t P\u0026gt;|t| [0.025 0.975]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e------------------------------------------------------------------------------\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econst -0.0002 0.001 -0.346 0.730 -0.002 0.001\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 1.0173 0.021 48.549 0.000 0.976 1.058\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eOmnibus: 661.205 Durbin-Watson: 1.986\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eProb(Omnibus): 0.000 Jarque-Bera (JB): 15925.609\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSkew: -0.883 Prob(JB): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eKurtosis: 16.191 Cond. No. 29.6\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNotes:\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eEvidently, Twitter is \u003cem\u003emuch\u003c/em\u003e more variable. It looks like it has a nontrivial bias (the intercept being -0.001 being within the \\(95\\%\\) confidence band \u0026mdash; that the security is possibly significantly underperforming the CAPM expectation in the market.) Furthermore, we have a positive beta value: that the asset is more variable than the market.\u003c/p\u003e\n\u003ch2 id=\"manual-regression\"\u003emanual regression\u003c/h2\u003e\n\u003cp\u003eWe can also use the betas formula to manually calculate what we \u003cem\u003eexpect\u003c/em\u003e for the beta values (i.e. as if they were one IID random variable.)\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erisk_free_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecov\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e NYSE LMT TWTR MCD\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 0.001143 0.001080 0.001163 0.001103\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLMT 0.001080 0.001188 0.001116 0.001083\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eTWTR 0.001163 0.001116 0.002264 0.001155\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMCD 0.001103 0.001083 0.001155 0.001200\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFinally, to construct the beta values. Recall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\beta_{im} := \\frac{Cov[(R_{i}-R_{f}),(R_{m}-R_{f})]}{Var[R_{m}-R_{f}]}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nVar[X] = Cov[X,X], \\forall X\n\\end{equation}\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# get the market variance (covariance with itself)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emarket_variation\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# calculate betas\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebetas\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;LMT\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emarket_variation\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;TWTR\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emarket_variation\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;MCD\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erisk_free_cov\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eNYSE\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emarket_variation\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# and make dataframe\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebetas\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSeries\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebetas\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ebetas\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLMT 0.944899\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eTWTR 1.017294\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMCD 0.965081\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003edtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eApparently, all of our assets swing less than the overall NYSE market! Especially Lockheed\u0026mdash;it is only \\(94.4\\%\\) of the market variation. Furthermore, it is interesting to see that Twitter swings much more dramatically compared to the market.\u003c/p\u003e\n\u003ch2 id=\"fund-creation\"\u003eFund creation\u003c/h2\u003e\n\u003cp\u003eWe will now create two funds with the three securities, one with equal parts and one which attempts to maximizes the bias (max returns) while minimizing the beta variance value compared to the market.\u003c/p\u003e\n\u003ch3 id=\"equal-parts-fund--fund-1\"\u003e\u0026ldquo;Equal-Parts Fund\u0026rdquo; (\u0026ldquo;Fund 1\u0026rdquo;)\u003c/h3\u003e\n\u003cp\u003eWe will now create a fund in equal parts. Here it is:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_returns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_returns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 -0.063167\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.023420\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 -0.017149\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.021384\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.049750\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 -0.036409\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 -0.021132\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 0.023917\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 0.001720\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 -0.014596\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLength: 2158, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will calculate the excess returns of this fund:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_excess\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efund_1_returns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTBill\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_excess\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 -0.112813\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.021600\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 -0.023310\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.037041\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.058226\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 -0.041349\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 -0.057265\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 -0.040503\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 -0.032449\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 0.010994\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLength: 2158, dtype: float64\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd then perform a regression\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# perform linreg\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eOLS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_1_excess\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_model\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esummary\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e OLS Regression Results\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDep. Variable: y R-squared: 0.473\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eModel: OLS Adj. R-squared: 0.473\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eMethod: Least Squares F-statistic: 1935.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDate: Mon, 31 Oct 2022 Prob (F-statistic): 3.01e-302\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eTime: 10:39:24 Log-Likelihood: 3869.5\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNo. Observations: 2158 AIC: -7735.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Residuals: 2156 BIC: -7724.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eDf Model: 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eCovariance Type: nonrobust\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e coef std err t P\u0026gt;|t| [0.025 0.975]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e------------------------------------------------------------------------------\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econst 0.0007 0.001 0.841 0.401 -0.001 0.002\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNYSE 1.1290 0.026 43.993 0.000 1.079 1.179\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eOmnibus: 600.456 Durbin-Watson: 2.022\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eProb(Omnibus): 0.000 Jarque-Bera (JB): 8416.514\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSkew: -0.914 Prob(JB): 0.00\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eKurtosis: 12.501 Cond. No. 29.6\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e==============================================================================\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNotes:\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSurprisingly, we have now created a \u003cstrong\u003esignificantly\u003c/strong\u003e riskier investment that, though riskier, generates a much higher probability of reward (\\(+0.001\\) is now within the \\(99\\%\\) band!)\u003c/p\u003e\n\u003ch3 id=\"a-better-fund\"\u003eA Better Fund\u003c/h3\u003e\n\u003cp\u003eTo me, this is the payoff of this assignment. We will now use CAPM to create the \u0026ldquo;best\u0026rdquo; fund combination\u0026mdash;given some variance, the funds which match CAPM. To do this, let\u0026rsquo;s create a generic linear combination of the assets.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esympy\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSymbol\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;x\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSymbol\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;y\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSymbol\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;z\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_returns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_returns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 0.0139260753744255*x - 0.0751364261353569*y - ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 -0.00697525170622448*x + 0.0295704573211193*y ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 0.000583132897928884*x - 0.0235859990058791*y ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 0.000218587198947517*x + 0.016568426347233*y +...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.00747599199607762*x + 0.0478955096700351*y -...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2154 -0.0234665578621085*x - 0.0142913301107561*y +...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2155 -0.00184214468578059*x - 0.0076045993852194*y ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2156 0.00411172646842317*x + 0.0154024001854269*y +...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2157 0.00357547337231878*x + 0.0062445563228315*y -...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2158 0.00162509910496933*x - 0.0150529686289622*y -...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eLength: 2158, dtype: object\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. We will also calculate the excess returns of this fund:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_excess\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efund_2_returns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTBill\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efund_2_excess\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eto_numpy\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[0.0139260753744255*x - 0.0751364261353569*y - 0.00195664549320096*z - 0.0496463208073039\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -0.00697525170622448*x + 0.0295704573211193*y + 0.000824317408861575*z - 0.00181917459665826\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.000583132897928884*x - 0.0235859990058791*y + 0.00585367525146019*z - 0.00616055581298536\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.00411172646842317*x + 0.0154024001854269*y + 0.00440300114913317*z - 0.0644196927849867\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.00357547337231878*x + 0.0062445563228315*y - 0.0081004573348249*z - 0.0341688956152497\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.00162509910496933*x - 0.0150529686289622*y - 0.00116834209450634*z + 0.0255902303732043]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe cast this type to a numpy array because we are about to perform some matrix operations upon it.\u003c/p\u003e\n\u003cp\u003eNow, let us perform the actual linear regression ourselves. Recall that the pseudoinverse linear regression estimator is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\beta = (X^{T}X)^{-1}X^{T}Y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have already our \\(Y\\) as a vector (above), and our \\(X\\) is:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enyse_with_bias\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eto_numpy\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[[ 1.00000000e+00 -3.88457302e-02]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 -7.42217926e-04]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 -9.46284244e-03]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 -5.81378271e-02]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 -3.15429207e-02]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e [ 1.00000000e+00 4.09643405e-02]]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe now have our matrices, let\u0026rsquo;s perform the linear regression!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elinear_model\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elinalg\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etranspose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@X\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@X.transpose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@Y\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elinear_model\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[0.000544056413840724*x - 6.62061061591867e-5*y + 0.000429966553373172*z - 0.000178620725465344\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e 0.0457830563134785*x + 0.118178191274045*y + 0.0659651260604729*z + 0.899115719100281]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. So we now have two rows; the top row represents the \u0026ldquo;bias\u0026rdquo;\u0026mdash;how much deviation there is from CAPM, and the bottom row represents the \u0026ldquo;rate\u0026rdquo;\u0026mdash;the \u0026ldquo;beta\u0026rdquo; value which represents how much excess variance there is.\u003c/p\u003e\n\u003cp\u003eWe can will solve for a combination of solutions to give us specific values of returns vs risk. We will set the asset to learn exactly as much as the market (i.e. no bias).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edeviance_expr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elinear_model\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edeviance_expr\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.000544056413840724*x - 6.62061061591867e-5*y + 0.000429966553373172*z - 0.000178620725465344\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will now try to make variance exactly as much as that in the market.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_expr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elinear_model\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erisk_expr\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.0457830563134785*x + 0.118178191274045*y + 0.0659651260604729*z - 0.100884280899719\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet us now calculate the boundary condition of our optimization problem by solving an expression in these two expressions.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esolvers\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edeviance_expr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erisk_expr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e{x: 0.412737013327711 - 0.819584899551304*z, y: 0.693765220909132 - 0.24067066980814*z}\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. Let us recalculate our optimization objective (\u0026ldquo;deviance\u0026rdquo;\u0026mdash;return) in terms of these new solutions. We aim now to maximize this expression by \u003cem\u003eminimizing\u003c/em\u003e (i.e. our optimizer minimizes) the negative thereof\u0026mdash;recalling that scypy works as a minimizer.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim_objective\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edeviance_expr\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1e2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim_objective\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-5.04831636563563e-19*z - 100.0\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe can now use this value to solve for a \\(z\\) value.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim_solution\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esolvers\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eoptim_objective\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eoptim_solution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e{z: -1.98085842402250e+20}\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent. We can now solve for the rest of our values.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ez0\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eoptim_solution\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ex0\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ey0\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esolution\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ey0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(1.62348165247784e+16, 4.76734523704593e+15, -1.980858424022502e+16)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThis would create the following plan:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# solution\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_nobias\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereturns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_nobias\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.009168283711770158\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRecall that this is the performance of the balanced portfolio:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_returns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.0009224705380695683\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFinally, let\u0026rsquo;s plot the \u003cem\u003eprices\u003c/em\u003e of our various funds:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ematplotlib.pyplot\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ematplotlib.dates\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emdates\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eseaborn\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatetime\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edatetime\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eset\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_2_price\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMCD\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_1_price\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_l_price\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLMT\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efund_t_price\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eTWTR\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDate\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elambda\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edatetime\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estrptime\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;%m/\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e%d\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e/%Y %H:%M:%S\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_2_price\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eastype\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_1_price\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eastype\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_l_price\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eastype\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edates\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efund_t_price\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eapply\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esym\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eastype\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efloat\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egca\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003exaxis\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eset_major_locator\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emdates\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eYearLocator\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egca\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003exaxis\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eset_major_formatter\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emdates\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDateFormatter\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#39;%Y\u0026#39;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egca\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eset_ylabel\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Price\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshow\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eNone\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-29_23-33-46_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eRecall that we didn\u0026rsquo;t actually buy any MacDonald\u0026rsquo;s. So, we have\u0026mdash;blue, being our \u0026ldquo;optimal\u0026rdquo; portfolio, yellow, our balanced portfolio, green, being Lockheed, and red, being Twitter.\u003c/p\u003e\n\u003cp\u003eOur portfolio works surprisingly well!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_econ320_linearity_tests/","tags":null,"title":"NUS-ECON320 Linearity Tests"},{"categories":null,"contents":"The code created for this problem can be found here.\nProblem 1 Let\u0026rsquo;s begin with a normal function:\n\\begin{equation} f(x) = (\\sqrt{x}-1)^{2} \\end{equation}\nTaking just a normal Riemann sum, we see that, as expected, it converges to about \\(0.167\\) by the following values between bounds \\([0,1]\\) at different \\(N\\):\nN Value 10 0.23 100 0.172 1000 0.167 10000 0.167 100000 0.167 Problem 2 First, as we are implementing a discrete random walk, here\u0026rsquo;s a fun example; \\(p=0.51\\), \\(\\epsilon=0.001\\).\nWhat is particularly interesting about this case is that, due the probability of change being slightly above \\(50\\%\\), we can see that the sequence has an overall positive growth pattern; however, as far as daily returns is concerned, there is almost no value from day-to-day gains in the market.\nTo actually analyze the our expected value for the probability distributions in number of steps \\(T\\) to travel from \\(0\\) to \\(1\\), as a function of \\(p, \\epsilon\\), we perform the following computation:\nExpected Value of T We set:\n\\begin{equation} \\Delta = \\begin{cases} +\\epsilon, P=p\\\\ -\\epsilon, P=1-p \\end{cases} \\end{equation}\nTherefore, for \\(T\\) as a function from \\(0\\) to \\(1\\), we have:\n\\begin{align} E(T)\u0026amp;=\\frac{1}{E\\qty(\\Delta) } \\\\ \u0026amp;= \\frac{1}{p\\epsilon-(1-p)\\epsilon } \\\\ \u0026amp;= \\frac{1}{\\epsilon (2p-1)} \\end{align}\nNow we will calculate the Variance in \\(T\\):\n\\begin{align} Var(T) \u0026amp;= \\frac{1}{Var(\\Delta)} \\end{align}\nWhere, \\(Var(\\Delta)\\) is calculated by:\n\\begin{align} Var(\\Delta) \u0026amp;= E(\\Delta^{2})-E^{2}(\\Delta) \\\\ \u0026amp;= \\qty(\\epsilon^{2} (2p-1)) - \\qty(\\epsilon (2p-1))^{2} \\end{align}\nAnd therefore:\n\\begin{equation} Var(T) = \\frac{1}{\\qty(\\epsilon^{2} (2p-1)) - \\qty(\\epsilon (2p-1))^{2}} \\end{equation}\nProblem 3 Yes, as we expect, that as \\(\\epsilon\\) decreases, the actual steps \\(T\\) it takes to travel from \\([0,1]\\) increases by an order of magnitude. Given \\(10\\) trials, with \\(p=0.51\\) and \\(\\epsilon = \\{0.1,0.01\\}\\), we have that:\n\\(\\epsilon\\) Mean \\(T\\) Std. \\(T\\) 0.1 570.8 1051.142 0.01 3848.2 1457.180 We can see this on the expected value calculations as well, that:\n\\begin{equation} \\lim_{\\epsilon \\to 0} E(T) = \\frac{1}{\\epsilon (2p-1)} = \\infty \\end{equation}\nThis is not true for the case of \\(p=0.5\\), where the limit will create an undefined behavior with \\(0\\infty\\), and l\u0026rsquo;hospital\u0026rsquo;s rule upon \\(\\epsilon\\) doesn\u0026rsquo;t apply here.\nProblem 4 Yes, the quadratic variation converges towards \\(0\\). Similarly as before, with \\(p=0.51\\) and \\(\\epsilon = \\{0.1,0.01,0.001\\}\\), our quadratic variations are:\n\\(\\epsilon\\) quadratic variation 0.1 5.02 0.01 0.32 0.001 0.05 It seems like that, as long as the path terminates and epsilon becomes smaller, the sum of squared difference will converge towards \\(0\\).\nThis means that, for all \\(p\u0026gt;0.5\\), the squared differences will be convergent. However, for \\(p\\leq 0.5\\), the squared differences are arguably still convergent but the sequence doesn\u0026rsquo;t terminate.\nProblem 5 To allow negative values, we changed the function to:\n\\begin{equation} f(x) = ({x}-1)^{2} \\end{equation}\nThe results of running the three expressions with \\(p=0.51\\), \\(\\epsilon=\\{0.1, 0.01, 0.001\\}\\), similarly to before, respectively are as follows:\n\\(\\epsilon\\) \\(f(x_{i})\\) \\(f(x_{i+1})\\) \\(f\\qty(\\frac{x_{i+1}-x_{i}}{2})\\) 0.1 3.03 -2.37 -1.85 0.01 1.7 -1 -0.17 0.001 0.359 0.307 0.938 It seems like\u0026mdash;while all three of these results converge\u0026mdash;they converge to distinctly different limits. Of course, this result also depends on \\(p\\), as the probability determines whether the path is even complete in the first place, which will of course affect the convergence here.\n","html":"\u003cp\u003eThe code created for this problem can be found \u003ca href=\"https://github.com/SkoolNotes/ECON320-BrownianMotion/blob/master/brownian.py\"\u003ehere\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"problem-1\"\u003eProblem 1\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s begin with a normal function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = (\\sqrt{x}-1)^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaking just a normal Riemann sum, we see that, as expected, it converges to about \\(0.167\\) by the following values between bounds \\([0,1]\\) at different \\(N\\):\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eN\u003c/th\u003e\n\u003cth\u003eValue\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e10\u003c/td\u003e\n\u003ctd\u003e0.23\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e100\u003c/td\u003e\n\u003ctd\u003e0.172\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e1000\u003c/td\u003e\n\u003ctd\u003e0.167\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e10000\u003c/td\u003e\n\u003ctd\u003e0.167\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e100000\u003c/td\u003e\n\u003ctd\u003e0.167\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"problem-2\"\u003eProblem 2\u003c/h2\u003e\n\u003cp\u003eFirst, as we are implementing a discrete random walk, here\u0026rsquo;s a fun example; \\(p=0.51\\), \\(\\epsilon=0.001\\).\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-25_22-33-57_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWhat is particularly interesting about this case is that, due the probability of change being slightly above \\(50\\%\\), we can see that the sequence has an overall positive growth pattern; however, as far as daily returns is concerned, there is almost no value from day-to-day gains in the market.\u003c/p\u003e\n\u003cp\u003eTo actually analyze the our expected value for the probability distributions in number of steps \\(T\\) to travel from \\(0\\) to \\(1\\), as a function of \\(p, \\epsilon\\), we perform the following computation:\u003c/p\u003e\n\u003ch3 id=\"expected-value-of-t\"\u003eExpected Value of T\u003c/h3\u003e\n\u003cp\u003eWe set:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Delta = \\begin{cases}\n+\\epsilon, P=p\\\\\n-\\epsilon, P=1-p\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, for \\(T\\) as a function from \\(0\\) to \\(1\\), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nE(T)\u0026amp;=\\frac{1}{E\\qty(\\Delta) } \\\\\n\u0026amp;= \\frac{1}{p\\epsilon-(1-p)\\epsilon } \\\\\n\u0026amp;= \\frac{1}{\\epsilon (2p-1)}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow we will calculate the Variance in \\(T\\):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nVar(T) \u0026amp;= \\frac{1}{Var(\\Delta)}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eWhere, \\(Var(\\Delta)\\) is calculated by:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nVar(\\Delta) \u0026amp;= E(\\Delta^{2})-E^{2}(\\Delta) \\\\\n\u0026amp;= \\qty(\\epsilon^{2} (2p-1)) - \\qty(\\epsilon (2p-1))^{2}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAnd therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nVar(T) = \\frac{1}{\\qty(\\epsilon^{2} (2p-1)) - \\qty(\\epsilon (2p-1))^{2}}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"problem-3\"\u003eProblem 3\u003c/h2\u003e\n\u003cp\u003eYes, as we expect, that as \\(\\epsilon\\) decreases, the actual steps \\(T\\) it takes to travel from \\([0,1]\\) increases by an order of magnitude. Given \\(10\\) trials, with \\(p=0.51\\) and \\(\\epsilon = \\{0.1,0.01\\}\\), we have that:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\\(\\epsilon\\)\u003c/th\u003e\n\u003cth\u003eMean \\(T\\)\u003c/th\u003e\n\u003cth\u003eStd. \\(T\\)\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e0.1\u003c/td\u003e\n\u003ctd\u003e570.8\u003c/td\u003e\n\u003ctd\u003e1051.142\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e0.01\u003c/td\u003e\n\u003ctd\u003e3848.2\u003c/td\u003e\n\u003ctd\u003e1457.180\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWe can see this on the expected value calculations as well, that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lim_{\\epsilon \\to 0} E(T) = \\frac{1}{\\epsilon (2p-1)} = \\infty\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is not true for the case of \\(p=0.5\\), where the limit will create an undefined behavior with \\(0\\infty\\), and l\u0026rsquo;hospital\u0026rsquo;s rule upon \\(\\epsilon\\) doesn\u0026rsquo;t apply here.\u003c/p\u003e\n\u003ch2 id=\"problem-4\"\u003eProblem 4\u003c/h2\u003e\n\u003cp\u003eYes, the quadratic variation converges towards \\(0\\). Similarly as before, with \\(p=0.51\\) and \\(\\epsilon = \\{0.1,0.01,0.001\\}\\), our quadratic variations are:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\\(\\epsilon\\)\u003c/th\u003e\n\u003cth\u003equadratic variation\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e0.1\u003c/td\u003e\n\u003ctd\u003e5.02\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e0.01\u003c/td\u003e\n\u003ctd\u003e0.32\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e0.001\u003c/td\u003e\n\u003ctd\u003e0.05\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eIt seems like that, as long as the path terminates and epsilon becomes smaller, the sum of squared difference will converge towards \\(0\\).\u003c/p\u003e\n\u003cp\u003eThis means that, for all \\(p\u0026gt;0.5\\), the squared differences will be convergent. However, for \\(p\\leq 0.5\\), the squared differences are arguably still convergent but the sequence doesn\u0026rsquo;t terminate.\u003c/p\u003e\n\u003ch2 id=\"problem-5\"\u003eProblem 5\u003c/h2\u003e\n\u003cp\u003eTo allow negative values, we changed the function to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = ({x}-1)^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe results of running the three expressions with \\(p=0.51\\), \\(\\epsilon=\\{0.1, 0.01, 0.001\\}\\), similarly to before, respectively are as follows:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\\(\\epsilon\\)\u003c/th\u003e\n\u003cth\u003e\\(f(x_{i})\\)\u003c/th\u003e\n\u003cth\u003e\\(f(x_{i+1})\\)\u003c/th\u003e\n\u003cth\u003e\\(f\\qty(\\frac{x_{i+1}-x_{i}}{2})\\)\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e0.1\u003c/td\u003e\n\u003ctd\u003e3.03\u003c/td\u003e\n\u003ctd\u003e-2.37\u003c/td\u003e\n\u003ctd\u003e-1.85\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e0.01\u003c/td\u003e\n\u003ctd\u003e1.7\u003c/td\u003e\n\u003ctd\u003e-1\u003c/td\u003e\n\u003ctd\u003e-0.17\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e0.001\u003c/td\u003e\n\u003ctd\u003e0.359\u003c/td\u003e\n\u003ctd\u003e0.307\u003c/td\u003e\n\u003ctd\u003e0.938\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eIt seems like\u0026mdash;while all three of these results converge\u0026mdash;they converge to distinctly different limits. Of course, this result also depends on \\(p\\), as the probability determines whether the path is even complete in the first place, which will of course affect the convergence here.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_econ320_stochastic_integration/","tags":null,"title":"NUS-ECON320 Stochastic Integration"},{"categories":null,"contents":"Let \\(X\\) denote price and \\(Y\\) denote volatility. The two objects obey the following process:\n\\begin{equation} \\begin{cases} \\dd{X} = \\mu X \\dd{t} + XY \\dd{W} \\\\ \\dd{Y} = \\sigma Y \\dd{B} \\end{cases} \\end{equation}\nwhere, \\(W\\) and \\(B\\) are correlated Brownian motions with correlation \\(\\rho\\) \u0026mdash; \\(E[(\\dd{W})(\\dd{B})] = \\rho \\dd{t}\\).\nLet\u0026rsquo;s work with \\(Y\\) first. We understand that \\(Y\\) is some continuous variable \\(e^{a}\\). Therefore, \\(\\dv{Y}{t}=ae^{a}\\). Therefore, \\(dY = ae^{a}dt\\). Finally, then \\(\\frac{\\dd{Y}}{Y} = \\frac{ae^{a}}{e^{a}}\\dd{t} = a\\).\nFinally, then, because we defined \\(Y=e^{a} \\implies \\ln Y = a = \\frac{\\dd{Y}}{Y}\\).\nSo, we have that:\n\\begin{align} \u0026amp;\\dd{Y} = \\sigma Y\\dd{B} \\\\ \\Rightarrow\\ \u0026amp; \\dd{\\log Y} = \\frac{\\sigma Y\\dd{B}}{Y} = \\sigma \\dd{B} \\end{align}\nThis tells that the change in log returns in \\(Y\\) is normal (as \\(B\\) is a Brownian Motion), with a standard deviation of \\(\\sigma\\). Therefore:\n\\begin{equation} \\dd{\\log Y} \\sim \\mathcal{N}(0, \\sigma^{2} \\dd{t}) \\end{equation}\nWe therefore see that the log-returns of \\(Y\\) is a normal with variance \\(\\sigma^{2}\\), making \\(Y\\) itself a Brownian Motion with center \\(0\\) and variance \\(\\sigma^{2}\\).\nSo now, tackling the expression above in \\(X\\), we will do the same exact thing as above and divide by \\(X\\):\n\\begin{equation} \\dd{\\log X} = \\mu \\dd{t} + Y\\dd{W} \\end{equation}\nSo we can see that \\(X\\) is a Geometric Brownian Motion as a sum of two random variables\u0026mdash;its volatility is determined by \\(Y\\) with a time-drift \\(\\mu \\dd{t}\\).\nWe see that we are almost ready to have an analytical solution here, because the top expression is applying some function \\(f=\\log\\) to a stochastic differential equation by time; however, the right side \\(Y\\) here is not quite a constant (it is itself a stochastic process), so we can\u0026rsquo;t simply apply an Itô Intergral and call it a day.\nSo instead, we will proceed to a Monte-Carlo simulation of the results to verify as much as we can.\nWe will begin by setting the sane values for variances\u0026mdash;having \\(0.1\\%\\) drift and \\(1\\%\\) variance in variance, and the two Brownian motions being inverses of each other \\(\\rho = 0.5\\).\nmu = 0.001 sigma = 0.01 rho = 0.5 (mu,sigma,rho) (0.001, 0.01, 0.5) We will seed a standard Brownian motion; as the two random motions are covariate, we can use the value of one to generate another: therefore we will return both at once.\nfrom numpy.random import normal def dbdw(): dB = normal() dW = dB + normal(0, (1-rho)**2) return (dB, dW) dbdw() (-1.0246010237177643, -1.281335746614678) Excellent.\nWe will now simulate the system we were given:\n\\begin{equation} \\begin{cases} \\dd{X} = \\mu X \\dd{t} + XY \\dd{W} \\\\ \\dd{Y} = \\sigma Y \\dd{B} \\end{cases} \\end{equation}\nLet\u0026rsquo;s set the number of trials to \\(10000\\).\nN = 1000 We will measure the convergence of \\(\\bar{\\dd{X}}\\) and \\(\\bar{\\dd{Y}}\\): we will tally each value at each time \\(t\\) as well as compare their expected values over time.\nWe will first seed our systems at \\(1\\%\\) variance and \\(1\\) dollar of price.\nX = 1 Y = 0.01 Now, it\u0026rsquo;s actual simulation time!\n# history of Y and X X_hist = [] Y_hist = [] # history of dx dX_hist = [] dY_hist = [] # current expected value EdX = 0 EdY = 0 # difference in E EdX_diff = 0 EdY_diff = 0 # for n loops, we simulate for _ in range(N): # get a source of randmess dB, dW = dbdw() # get the current dx and dw _dX = mu*X+X*Y*dW _dY = sigma*Y*dB # apply it X += _dX Y += _dY # tally it Y_hist.append(Y) X_hist.append(X) dX_hist.append(_dX) dY_hist.append(_dY) # calculate new expected value # we don\u0026#39;t store it immediately b/c we want to check convergence _EdX = sum(dX_hist)/len(dX_hist) _EdY = sum(dY_hist)/len(dY_hist) EdX_diff = abs(_EdX-EdX) EdY_diff = abs(_EdY-EdY) # store new expected value EdX = _EdX EdY = _EdY Let\u0026rsquo;s observe a few values! For starters, let\u0026rsquo;s measure our new expected values.\nEdX 0.0013333651336800837 EdY -1.225482645599256e-06 And, let\u0026rsquo;s check if we have converged by seeing if the difference is a reasonably small value:\n(EdX_diff, EdY_diff) (2.578663659035343e-05, 1.2183875816528115e-07) Looks like both of our variables have converged. Now, let\u0026rsquo;s plot a few things. Let\u0026rsquo;s first build a table with our data.\nimport pandas as pd data = pd.DataFrame({\u0026#34;price\u0026#34;: X_hist, \u0026#34;variance\u0026#34;: Y_hist}) data[\u0026#34;time\u0026#34;] = data.index data price variance time 0 0.998644 0.009974 0 1 0.980393 0.009796 1 2 0.998355 0.009967 2 3 0.994514 0.009913 3 4 1.001363 0.009961 4 .. ... ... ... 995 2.323640 0.008778 995 996 2.321473 0.008715 996 997 2.343427 0.008818 997 998 2.306271 0.008654 998 999 2.333365 0.008775 999 [1000 rows x 3 columns] We will use this to continue the rest of our analysis. For data augmentation, we will also calculate the natural logs of the change to get the rate of change.\nimport numpy as np data[\u0026#34;price_log\u0026#34;] = np.log(data.price) data[\u0026#34;variance_log\u0026#34;] = np.log(data.variance) data[\u0026#34;price_log_change\u0026#34;] = data.price_log - data.price_log.shift(1) data[\u0026#34;variance_log_change\u0026#34;] = data.variance_log - data.variance_log.shift(1) # drop the first row we have w/o change data = data.dropna() data price variance ... price_log_change variance_log_change 1 0.980393 0.009796 ... -0.018444 -0.018005 2 0.998355 0.009967 ... 0.018155 0.017332 3 0.994514 0.009913 ... -0.003855 -0.005443 4 1.001363 0.009961 ... 0.006863 0.004801 5 0.991306 0.009895 ... -0.010094 -0.006639 .. ... ... ... ... ... 995 2.323640 0.008778 ... 0.002148 0.002124 996 2.321473 0.008715 ... -0.000933 -0.007227 997 2.343427 0.008818 ... 0.009413 0.011765 998 2.306271 0.008654 ... -0.015983 -0.018804 999 2.333365 0.008775 ... 0.011680 0.013827 [999 rows x 7 columns] Let\u0026rsquo;s begin by plotting what we have:\nimport seaborn as sns import matplotlib.pyplot as plt sns.set() We will plot price and variation on two axes.\nplt.gcf().clear() sns.lineplot(x=data.time, y=data.price, color=\u0026#34;g\u0026#34;) ax2 = plt.twinx() sns.lineplot(x=data.time, y=data.variance, color=\u0026#34;b\u0026#34;, ax=ax2) plt.show() Where, the blue line represents the percent variance over time and the green line represents the price. Given the \\(0.1\\%\\) drift we provided, we can see that our simulated market grows steadily in the 1000 data point.\nWe can then plot the log (percent) changes.\nplt.gcf().clear() sns.lineplot(x=data.time, y=data.price_log_change, color=\u0026#34;g\u0026#34;) ax2 = plt.twinx() sns.lineplot(x=data.time, y=data.variance_log_change, color=\u0026#34;b\u0026#34;, ax=ax2) plt.show() As you can see\u0026mdash;we have fairly strong random variables, centered around \\(0\\). Having verified that our drift and variables behave in the way that we expect, we can proceed with analysis.\nWe can use a single-variable \\(t\\) test to figure the \\(99\\%\\) confidence band of the result. To do this, we first need to calculate the mean and standardized deviation of the price percent change (log difference).\nlog_mean, log_std = (data.price_log_change.mean(), data.price_log_change.std()) (log_mean, log_std) (0.0008495184126335735, 0.008471735971085885) And now, we will calculate the\nfrom scipy.stats import t lower_bound, upper_bound = t.interval(0.99, len(data)-1, loc=log_mean, scale=log_std) lower_bound -0.021014037766751738 Therefore, with \\(99\\%\\) confidence, we can say that our asset\u0026mdash;given its current parameters, and an \\(N=1000\\) Monte-Carlo simulation\u0026mdash;will not have a more than \\(2.1\\%\\) drop in value.\nWe will use a hedged option to minimize loss. We will use this value to determine the maximum loss for an European put option, maturing in \\(T\\) time, such that the exercise thereof will be hedged against drops of asset price.\nFirst, we will determine the cost of a correctly hedged European put option.\nWe will define \\(S_{0}\\) as the current price of the asset. We will use \\(P\\) as the price of the put option.\nWe desire the strike price of the option to be:\n\\begin{equation} K = S_{0} + P \\end{equation}\nthat is: the price of the put option we desire here will recuperate the price to trade the option and protect against loss. We will symbolically solve for the price of such an option.\nNote that the codeblocks switches here from standard Python to SageMath.\nWe first define the standard normal cumulative distribution.\nfrom sage.symbolic.integration.integral import definite_integral z = var(\u0026#34;z\u0026#34;) N(x) = 1/sqrt(2*pi)*definite_integral(e^(-z^2/2), z, -infinity, x) We will then leverage the Euopean call Black-Scholes model to calculate the optimal put price. We first instantiate variables \\(T\\), and we will set current time to be \\(0\\).\nWe will use \\(v\\) for \\(\\sigma\\), the volatility of the security. We will use \\(S\\) for current price. Lastly, we define \\(P\\) to be our put price. We will call \\(r\\) our risk-free rate.\nTo determine the discount factor, we first implement symbolically our expression for desired strike price.\nv,T,S,P,r = var(\u0026#34;v T S P r\u0026#34;) K = S+P K P + S Great. Now we will implement our discount factors.\nd1 = 1/v*sqrt(T) * (ln(S/K) + (r+v^2/2)*(T)) d2 = d1-v*T d1, d2 (1/2*((v^2 + 2*r)*T + 2*log(S/(P + S)))*sqrt(T)/v, -T*v + 1/2*((v^2 + 2*r)*T + 2*log(S/(P + S)))*sqrt(T)/v) And lastly, we will implement the Black-Scholes expression for puts as a logical expression.\nexpr = P == N(-d2)*K*e^(-r*T)-N(-d1)*S expr \\begin{equation} P = \\frac{{\\left({\\left(\\operatorname{erf}\\left(\\frac{\\sqrt{2} {\\left(T v^{2} + 2 \\, T r + 2 \\, \\log\\left(\\frac{S}{P + S}\\right)\\right)} \\sqrt{T}}{4 \\, v}\\right) - 1\\right)} e^{\\left(T r\\right)} - \\operatorname{erf}\\left(-\\frac{\\sqrt{2} {\\left(2 \\, T v^{2} - {\\left(T v^{2} + 2 \\, T r + 2 \\, \\log\\left(\\frac{S}{P + S}\\right)\\right)} \\sqrt{T}\\right)}}{4 \\, v}\\right) + 1\\right)} S}{\\operatorname{erf}\\left(-\\frac{\\sqrt{2} {\\left(2 \\, T v^{2} - {\\left(T v^{2} + 2 \\, T r + 2 \\, \\log\\left(\\frac{S}{P + S}\\right)\\right)} \\sqrt{T}\\right)}}{4 \\, v}\\right) + 2 \\, e^{\\left(T r\\right)} - 1} \\end{equation}\nNumerical solutions to this expression\u0026mdash;fitting for each of the values from before\u0026mdash;would then indicate the correct price of the option to generate the hedging effect desired.\n","html":"\u003cp\u003eLet \\(X\\) denote price and \\(Y\\) denote volatility. The two objects obey the following process:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dd{X} = \\mu X \\dd{t} + XY \\dd{W} \\\\\n\\dd{Y} = \\sigma Y \\dd{B}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(W\\) and \\(B\\) are correlated Brownian motions with correlation \\(\\rho\\) \u0026mdash; \\(E[(\\dd{W})(\\dd{B})] = \\rho \\dd{t}\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eLet\u0026rsquo;s work with \\(Y\\) first. We understand that \\(Y\\) is some continuous variable \\(e^{a}\\). Therefore, \\(\\dv{Y}{t}=ae^{a}\\). Therefore, \\(dY = ae^{a}dt\\). Finally, then \\(\\frac{\\dd{Y}}{Y} = \\frac{ae^{a}}{e^{a}}\\dd{t} = a\\).\u003c/p\u003e\n\u003cp\u003eFinally, then, because we defined \\(Y=e^{a} \\implies \\ln Y = a = \\frac{\\dd{Y}}{Y}\\).\u003c/p\u003e\n\u003cp\u003eSo, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\dd{Y} = \\sigma Y\\dd{B} \\\\\n\\Rightarrow\\ \u0026amp; \\dd{\\log Y} = \\frac{\\sigma Y\\dd{B}}{Y} = \\sigma \\dd{B}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThis tells that the change in log returns in \\(Y\\) is \u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormal\u003c/a\u003e (as \\(B\\) is a \u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e), with a standard deviation of \\(\\sigma\\). Therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dd{\\log Y} \\sim \\mathcal{N}(0, \\sigma^{2} \\dd{t})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe therefore see that the log-returns of \\(Y\\) is a normal with variance \\(\\sigma^{2}\\), making \\(Y\\) itself a \u003ca href=\"/posts/kbhbrownian_motion/\"\u003eBrownian Motion\u003c/a\u003e with center \\(0\\) and variance \\(\\sigma^{2}\\).\u003c/p\u003e\n\u003cp\u003eSo now, tackling the expression above in \\(X\\), we will do the same exact thing as above and divide by \\(X\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dd{\\log X} = \\mu \\dd{t} + Y\\dd{W}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo we can see that \\(X\\) is a \u003ca href=\"/posts/kbhgeometric_brownian_motion/\"\u003eGeometric Brownian Motion\u003c/a\u003e as a sum of two random variables\u0026mdash;its volatility is determined by \\(Y\\) with a time-drift \\(\\mu \\dd{t}\\).\u003c/p\u003e\n\u003cp\u003eWe see that we are \u003cem\u003ealmost\u003c/em\u003e ready to have an analytical solution here, because the top expression is applying some function \\(f=\\log\\) to a stochastic differential equation by time; however, the right side \\(Y\\) here is not quite a constant (it is itself a stochastic process), so we can\u0026rsquo;t simply apply an \u003ca href=\"/posts/kbhito_intergral/\"\u003eItô Intergral\u003c/a\u003e and call it a day.\u003c/p\u003e\n\u003cp\u003eSo instead, we will proceed to a Monte-Carlo simulation of the results to verify as much as we can.\u003c/p\u003e\n\u003cp\u003eWe will begin by setting the sane values for variances\u0026mdash;having \\(0.1\\%\\) drift and \\(1\\%\\) variance in variance, and the two Brownian motions being inverses of each other \\(\\rho = 0.5\\).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emu\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.001\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esigma\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erho\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.5\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esigma\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erho\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(0.001, 0.01, 0.5)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will seed a standard Brownian motion; as the two random motions are covariate, we can use the value of one to generate another: therefore we will return both at once.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enumpy.random\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enormal\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003edbdw\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e():\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edB\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enormal\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edW\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edB\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enormal\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erho\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e**\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edB\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edW\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edbdw\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(-1.0246010237177643, -1.281335746614678)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent.\u003c/p\u003e\n\u003cp\u003eWe will now simulate the system we were given:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dd{X} = \\mu X \\dd{t} + XY \\dd{W} \\\\\n\\dd{Y} = \\sigma Y \\dd{B}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s set the number of trials to \\(10000\\).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1000\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will measure the convergence of \\(\\bar{\\dd{X}}\\) and \\(\\bar{\\dd{Y}}\\): we will tally each value at each time \\(t\\) as well as compare their expected values over time.\u003c/p\u003e\n\u003cp\u003eWe will first seed our systems at \\(1\\%\\) variance and \\(1\\) dollar of price.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.01\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow, it\u0026rsquo;s actual simulation time!\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# history of Y and X\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eX_hist\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eY_hist\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# history of dx\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edX_hist\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edY_hist\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# current expected value\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eEdX\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eEdY\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# difference in E\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eEdX_diff\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eEdY_diff\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# for n loops, we simulate\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# get a source of randmess\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edB\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edW\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edbdw\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# get the current dx and dw\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e_dX\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emu\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edW\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e_dY\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esigma\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edB\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# apply it\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_dX\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_dY\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# tally it\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eY_hist\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eX_hist\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edX_hist\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_dX\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003edY_hist\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eappend\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_dY\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# calculate new expected value\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# we don\u0026#39;t store it immediately b/c we want to check convergence\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e_EdX\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edX_hist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edX_hist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e_EdY\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edY_hist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edY_hist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eEdX_diff\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eabs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_EdX\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eEdX\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eEdY_diff\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eabs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e_EdY\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eEdY\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# store new expected value\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eEdX\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_EdX\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eEdY\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e_EdY\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s observe a few values! For starters, let\u0026rsquo;s measure our new expected values.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eEdX\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0.0013333651336800837\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eEdY\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-1.225482645599256e-06\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd, let\u0026rsquo;s check if we have converged by seeing if the difference is a reasonably small value:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eEdX_diff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eEdY_diff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(2.578663659035343e-05, 1.2183875816528115e-07)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLooks like both of our variables have converged. Now, let\u0026rsquo;s plot a few things. Let\u0026rsquo;s first build a table with our data.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epandas\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eDataFrame\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e({\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;price\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eX_hist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;variance\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eY_hist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e})\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;time\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eindex\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e price variance time\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e0 0.998644 0.009974 0\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 0.980393 0.009796 1\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.998355 0.009967 2\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 0.994514 0.009913 3\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 1.001363 0.009961 4\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e995 2.323640 0.008778 995\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e996 2.321473 0.008715 996\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e997 2.343427 0.008818 997\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e998 2.306271 0.008654 998\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e999 2.333365 0.008775 999\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1000 rows x 3 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will use this to continue the rest of our analysis. For data augmentation, we will also calculate the natural logs of the change to get the rate of change.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enumpy\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;price_log\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprice\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;variance_log\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enp\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evariance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;price_log_change\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprice_log\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprice_log\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshift\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;variance_log_change\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evariance_log\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evariance_log\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshift\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# drop the first row we have w/o change\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edropna\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e price variance ... price_log_change variance_log_change\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e1 0.980393 0.009796 ... -0.018444 -0.018005\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e2 0.998355 0.009967 ... 0.018155 0.017332\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e3 0.994514 0.009913 ... -0.003855 -0.005443\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e4 1.001363 0.009961 ... 0.006863 0.004801\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e5 0.991306 0.009895 ... -0.010094 -0.006639\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e.. ... ... ... ... ...\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e995 2.323640 0.008778 ... 0.002148 0.002124\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e996 2.321473 0.008715 ... -0.000933 -0.007227\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e997 2.343427 0.008818 ... 0.009413 0.011765\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e998 2.306271 0.008654 ... -0.015983 -0.018804\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e999 2.333365 0.008775 ... 0.011680 0.013827\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[999 rows x 7 columns]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eLet\u0026rsquo;s begin by plotting what we have:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eseaborn\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ematplotlib.pyplot\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eas\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eset\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will plot price and variation on two axes.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egcf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eclear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etime\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprice\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;g\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eax2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etwinx\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etime\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evariance\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;b\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eax\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eax2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshow\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-14_22-09-05_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWhere, the blue line represents the percent variance over time and the green line represents the price. Given the \\(0.1\\%\\) drift we provided, we can see that our simulated market grows steadily in the 1000 data point.\u003c/p\u003e\n\u003cp\u003eWe can then plot the log (percent) changes.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egcf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eclear\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etime\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprice_log_change\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;g\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eax2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etwinx\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esns\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elineplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etime\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ey\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003evariance_log_change\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;b\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eax\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eax2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplt\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshow\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-10-14_22-19-02_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eAs you can see\u0026mdash;we have fairly strong random variables, centered around \\(0\\). Having verified that our drift and variables behave in the way that we expect, we can proceed with analysis.\u003c/p\u003e\n\u003cp\u003eWe can use a single-variable \\(t\\) test to figure the \\(99\\%\\) confidence band of the result. To do this, we first need to calculate the mean and standardized deviation of the price percent change (log difference).\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elog_mean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elog_std\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprice_log_change\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eprice_log_change\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog_mean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elog_std\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(0.0008495184126335735, 0.008471735971085885)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd now, we will calculate the\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003escipy.stats\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elower_bound\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eupper_bound\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einterval\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.99\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edata\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eloc\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog_mean\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003escale\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elog_std\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elower_bound\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e-0.021014037766751738\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eTherefore, with \\(99\\%\\) confidence, we can say that our asset\u0026mdash;given its current parameters, and an \\(N=1000\\) Monte-Carlo simulation\u0026mdash;will not have a more than \\(2.1\\%\\) drop in value.\u003c/p\u003e\n\u003cp\u003eWe will use a hedged option to minimize loss. We will use this value to determine the maximum loss for an European put option, maturing in \\(T\\) time, such that the exercise thereof will be hedged against drops of asset price.\u003c/p\u003e\n\u003cp\u003eFirst, we will determine the cost of a correctly hedged European put option.\u003c/p\u003e\n\u003cp\u003eWe will define \\(S_{0}\\) as the current price of the asset. We will use \\(P\\) as the price of the put option.\u003c/p\u003e\n\u003cp\u003eWe desire the strike price of the option to be:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nK = S_{0} + P\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat is: the price of the put option we desire here will recuperate the price to trade the option \u003cem\u003eand\u003c/em\u003e protect against loss. We will symbolically solve for the price of such an option.\u003c/p\u003e\n\u003cp\u003eNote that the codeblocks switches here from standard Python to SageMath.\u003c/p\u003e\n\u003cp\u003eWe first define the standard normal cumulative distribution.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esage.symbolic.integration.integral\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edefinite_integral\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;z\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esqrt\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epi\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edefinite_integral\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einfinity\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe will then leverage the Euopean call Black-Scholes model to calculate the optimal put price. We first instantiate variables \\(T\\), and we will set current time to be \\(0\\).\u003c/p\u003e\n\u003cp\u003eWe will use \\(v\\) for \\(\\sigma\\), the volatility of the security. We will use \\(S\\) for current price. Lastly, we define \\(P\\) to be our put price. We will call \\(r\\) our risk-free rate.\u003c/p\u003e\n\u003cp\u003eTo determine the discount factor, we first implement symbolically our expression for desired strike price.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eP\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003er\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;v T S P r\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eK\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eS\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eP\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eK\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eP + S\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eGreat. Now we will implement our discount factors.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esqrt\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eln\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eS\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eK\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003er\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ev\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(1/2*((v^2 + 2*r)*T + 2*log(S/(P + S)))*sqrt(T)/v,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e -T*v + 1/2*((v^2 + 2*r)*T + 2*log(S/(P + S)))*sqrt(T)/v)\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd lastly, we will implement the Black-Scholes expression for puts as a logical expression.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eexpr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eP\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eK\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ee\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e^\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003er\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eS\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eexpr\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\\begin{equation}\nP = \\frac{{\\left({\\left(\\operatorname{erf}\\left(\\frac{\\sqrt{2} {\\left(T v^{2} + 2 \\, T r + 2 \\, \\log\\left(\\frac{S}{P + S}\\right)\\right)} \\sqrt{T}}{4 \\, v}\\right) - 1\\right)} e^{\\left(T r\\right)} - \\operatorname{erf}\\left(-\\frac{\\sqrt{2} {\\left(2 \\, T v^{2} - {\\left(T v^{2} + 2 \\, T r + 2 \\, \\log\\left(\\frac{S}{P + S}\\right)\\right)} \\sqrt{T}\\right)}}{4 \\, v}\\right) + 1\\right)} S}{\\operatorname{erf}\\left(-\\frac{\\sqrt{2} {\\left(2 \\, T v^{2} - {\\left(T v^{2} + 2 \\, T r + 2 \\, \\log\\left(\\frac{S}{P + S}\\right)\\right)} \\sqrt{T}\\right)}}{4 \\, v}\\right) + 2 \\, e^{\\left(T r\\right)} - 1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNumerical solutions to this expression\u0026mdash;fitting for each of the values from before\u0026mdash;would then indicate the correct price of the option to generate the hedging effect desired.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_econ320_volatility_hedging/","tags":null,"title":"NUS-ECON320 Volatility Hedging"},{"categories":null,"contents":"The doctor-patient ratio in Haiti is 1 out of 67,000; a combination of malnutrition and malpractice results in a fatality rate of 47%.\nIn Breath, Eyes, Memory, the high rate of fatalities from birth is included as a part of a proverb in Sophie’s village. Ife tells that, of “three children” conceived by an old woman, “one dies in her body.” (Danticat 118)\nNext Steps Token: s_6285_15\nFollow this link for the next step. You maybe able to continue to the next phrase of the game; it is also possible that you may have died during birth and would have to restart.\n","html":"\u003cp\u003eThe doctor-patient ratio in Haiti is 1 out of 67,000; a combination of malnutrition and malpractice results in a fatality rate of 47%.\u003c/p\u003e\n\u003cp\u003eIn \u003cem\u003eBreath, Eyes, Memory\u003c/em\u003e, the high rate of fatalities from birth is included as a part of a proverb in Sophie’s village. Ife tells that, of “three children” conceived by an old woman, “one dies in her body.” (Danticat 118)\u003c/p\u003e\n\u003ch2 id=\"next-steps\"\u003eNext Steps\u003c/h2\u003e\n\u003cp\u003eToken: s_6285_15\u003c/p\u003e\n\u003cp\u003eFollow \u003ca href=\"https://tinyurl.com/nuseng401giftbounceextra1\"\u003ethis link\u003c/a\u003e for the next step. You maybe able to continue to the next phrase of the game; it is also possible that you may have died during birth and would have to restart.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_gift_1/","tags":null,"title":"NUS-ENG401 Childbirth"},{"categories":null,"contents":"Apart from Russia, Central Africa and the Caribbeans have the highest average death rates of regions on Earth. Death is a pretty common occurrence, and—it appears—through the vicissitudes of the game you have died.\nBetter luck next time!\n","html":"\u003cp\u003eApart from Russia, Central Africa and the Caribbeans have the highest average death rates of regions on Earth. Death is a pretty common occurrence, and—it appears—through the vicissitudes of the game you have died.\u003c/p\u003e\n\u003cp\u003eBetter luck next time!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_gift_0/","tags":null,"title":"NUS-ENG401 Death"},{"categories":null,"contents":"Despite having some access to education, actual success through it varies significantly as the resources are scarce. For instance, postsecondary education only shares 1% of educational spending in Martinique, so access to it is extremely limited.\nIn Black Shack Alley, José’s quarter scholarship—not enough to support his education—causes his mother to lament that they are “black, poor, and alone in the world” (Zobel 125): their station in Martinican society prevented their access to the already limited resource.\nNext Steps Token: s_5166_12\nResources to advance in education is fickle! To continue to the next step, you must locate Jack L. (\u0026lsquo;23) on the San Mateo campus, and he will provide you with the next steps to complete this track.\n","html":"\u003cp\u003eDespite having some access to education, actual success through it varies significantly as the resources are scarce. For instance, postsecondary education only shares 1% of educational spending in Martinique, so access to it is extremely limited.\u003c/p\u003e\n\u003cp\u003eIn \u003cem\u003eBlack Shack Alley\u003c/em\u003e, José’s quarter scholarship—not enough to support his education—causes his mother to lament that they are “black, poor, and alone in the world” (Zobel 125): their station in Martinican society prevented their access to the already limited resource.\u003c/p\u003e\n\u003ch2 id=\"next-steps\"\u003eNext Steps\u003c/h2\u003e\n\u003cp\u003eToken: s_5166_12\u003c/p\u003e\n\u003cp\u003eResources to advance in education is fickle! To continue to the next step, you must locate Jack L. (\u0026lsquo;23) on the San Mateo campus, and he will provide you with the next steps to complete this track.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_gift_5/","tags":null,"title":"NUS-ENG401 Endgame of Education"},{"categories":null,"contents":"Based only on the framing analysis of Sax\u0026rsquo;s Othello, I want to write a piece analyzing collectively grip (lighting) and gaffing (framing) of the films, specifically with a contrast as much as possible to the original text\u0026rsquo;s discussion of framing in, for instance, stage directions. In Sax\u0026rsquo;s Othello, Iago\u0026rsquo;s musings are always framed medium-close from below, with lighting coming from above: the framing helps show a sense of \u0026ldquo;ascension\u0026rdquo; (perhaps not unintentionally biblical, like an ascension to heaven), showing how Iago\u0026rsquo;s schemes are helping him rise through the ranks: a sign of his desire for power. I want to continue to analyses these types of connection between the grip and gaffing throughout the films to help reveal the differing power welded by the various screenwriters\u0026rsquo; differing analysis on Shakespeare\u0026rsquo;s characters.\nVishal Bhardwaj\u0026rsquo;s Omkara Dreary music plays during the misc. Games of marbles\nMopeds?\nsmall bounding songs\nDifferences: Desdemona and Rodrigo were engaged\nEmilia is Othello\u0026rsquo;s sister\nInstances of violence/impending violence underscored by very cheerful music\nIntroductory scene Intro credits: \u0026ldquo;lyrics\u0026rdquo;, \u0026ldquo;music\u0026rdquo;, \u0026ldquo;dialog\u0026rdquo; as the three scripting components call Othello a \u0026ldquo;half-cast\u0026rdquo; \u0026ldquo;abduting\u0026rdquo;\u0026mdash;actually in love or not Marbles scene Robantio (Desdomona\u0026rsquo;s dad) goes and tries to find Othello Ominous music? Monster, Half-Caste, etc. \u0026ldquo;never trust what your eyes say, your eyes will betray you Enable 100 Bet Scene Robantio sets the seed of jealousy by explicitly calling Desdemona \u0026ldquo;two-faced\u0026rdquo; Laugh and discussions The music: very cheerful during scenes of violence Omi was able to kill everyone Steps over dead body: washes himself with water Election So Othello\u0026rsquo;s boss During ceremony, Othello chooses Cassio to be the new general \u0026ldquo;How did you get such a light girl in these dark parts\u0026rdquo; \u0026ldquo;dark lord and magic fluit\u0026rdquo; Music swells and Rodrigo cries Rodrigo and Iago Moving the \u0026ldquo;I ahet the moor\u0026rdquo; scene to the middle of the film Iago fires Rodrigo\u0026rsquo;s jealousies, and then Rodrigo fire\u0026rsquo;s Iagos by engaging The Son\u0026rsquo;s BD, etc. Iago listens and watches Camera flashes are the only sound that\u0026rsquo;s present Camera flashing sound is like guns Iago again talks with Rodrigo about Dolly Bianca and Cassio Filmed through a bollywood musical number: very cheerful despite the omen of bad consiquences Cassio is Drunk Typically, HAPPY precededs violence Smoking hurt the music, which causes the Cassio/Rodrigo brawl First Tail Spinning after brawl \u0026ldquo;It was the booze\u0026rdquo; Iago used \u0026ldquo;Only one voice that could work, Dolly\u0026rsquo;s\u0026rdquo; Omi Brother is back Mopeds Iago begins to spin his tale by avoiding talking about Kesu Drum samples + acient vocal chops Symbolism of dissonant music Othello is not fighting back Long songs to illustrate love:w Not enough Blue cololr grading Desdemona very white because of framing compared to all others Father So motivation is given Serious music in the background, replaying the two characters' Tone of the phone number Basil Dearden\u0026rsquo;s All Night Long Timing: 1962, after the war.\nRex + Delia: Othello and Desdemona\nIago want to break off from Othello\u0026rsquo;s band, but his financeeer will only finance him if Desdemona will join Iago\u0026rsquo;s new band instead of staying with Othello\u0026rsquo;s band.\nGift: cigerette case.\nBrazillan pistol drums: mark of\nIntroductory scene Stairs; what does rising a lot of stairs mean symbolistically? framing: xylophone; poster; bass \u0026mdash; marrige celebrating poster meaning? \u0026ldquo;Cause I want you, always have\u0026rdquo; \u0026mdash; new angle: Iago is in love Desdemona Cassio introduced Desdemona to Othello \u0026ldquo;Here\u0026rsquo;s to the two men in my life\u0026rdquo; \u0026mdash; Desdemona; does she have affection for Cassio too? Rodrigo and Iago Iago planting in Rodrigo the idea Cassio is in love with Desdemona weed is an important plot point Emilia breaking the fourth wall Iago picks up the cigarette case manually Iago handed MJ to Cassio Cigarette case handed to Cassio too Rex does not want Cassio to smoke Uses the idea of smoke Iago\u0026rsquo;s gaze looks down below Othello when speaking to him Iago looks to the side/in front a lot when talking normally Alcohol and MJ is swapped, but the point is about the same Cassio, instead of fighting Rodrigo, fought a different person First drum solo Contrast: Iago looking very ominously forward towards othello, Othello looks to the side Symbolic meaning of drum solo? Triangle between Othello vs. Desdemona vs. Iago Tape Editing Talk to Delia; \u0026ldquo;ocular proof\u0026rdquo; out of the audio recording \u0026ldquo;I know how you feel\u0026rdquo; \u0026mdash; similar to Ben Jago in the previous film The tape editing as a part of Rodrigo\u0026rsquo;s setup, so Rodrigo funded Iago\u0026rsquo;s things \u0026ldquo;Drop that Sherlock Holmes bit, that hat looks funny\u0026rdquo; All Night Long Song All night long song \u0026mdash; why so sad? Othello looks at Delia in a funny way because of the songs \u0026ldquo;I love you both\u0026rdquo;: odd choice of words on Delia\u0026rsquo;s part Ocular Proof Cigarette case + and The tapes were rolling Backroom conversation and recording Camera angle between Othello and Desdemona Othello view from the top looking down, Desdemona/Rodrigo view from the bottom looking up Iago holding door for Othello, submissiveness strangling and throwing out Desdemona is alive? Iago is not attacked and Othello didn\u0026rsquo;t kill himself? Confused about the ending another drum solo: I don\u0026rsquo;t even love Jonny, Iago Repeated shot between the two people Imaginary band playing during Iago\u0026rsquo;s solo Geoffrey Sax\u0026rsquo;s Othello Geoffrey Sax\u0026rsquo;s Othello Symbols\nLight on Iago\u0026rsquo;s face when he is scheming\nExtreme closeup on Othello and Iago is close together\nDrums + violin to build tension; happy violin to show happiness\nIago: \u0026ldquo;I know what you are talking about, I\u0026rsquo;ve been there\u0026rdquo;\nBOTH interrogation and the scene with Cass Iago: \u0026ldquo;And that\u0026rsquo;s a promise\nThe Iago/Othello closeups are very close\nOthello gave the impetus that he is afraid of marriage falling apart explicitly, and Iago explicitly planted into Cassio that their marrage is failing\nCassio in this version is much more active, it is not entirely a misunderstanding \u0026ldquo;TLC\u0026rdquo;: shot from above\u0026mdash;kind of pressure overpushing\nDessie bought the gown\nmoment whenever light comes across Iago\u0026rsquo;s face\nrepeating violin theme in happiness!!!!\nrepeating vocal theme in sadness!!!!\n\u0026ldquo;It\u0026rsquo;s a shame really, really. He\u0026rsquo;s a broken man.\u0026rdquo;\nWho is Iago talking to? Is Othello really rushed to his head?\n\u0026ldquo;I want an example made\u0026rdquo;: kind of a way of cracking\nAdvanced interrogation methods: methods of guaranteeing trust\n\u0026ldquo;I know what you are talking about, I\u0026rsquo;ve been there\u0026rdquo;\ninterrogation and the scene with Cass and towards Othehllo I don\u0026rsquo;t think I can handle it\nbefore otherllo\u0026rsquo;s \u0026ldquo;death\u0026rdquo; (unraveling) before the constable\u0026rsquo;s actual death Othello realization\nExtreme close ups When Othello was about to kill Iago, lots of light on hisface Death scene, repeat from the beginning, replaying the intro, but with DIFFERENT MUSIC\nSo desdemona looks the same as in life or death\nAlso Emilia didn\u0026rsquo;t die??\nWith remember\n\u0026ldquo;And that\u0026rsquo;s a promise\u0026rdquo;\nloud songs\nlight cast onto Iago\u0026rsquo;s face\nRainy, \u0026ldquo;hello\u0026rdquo;\nThe hankerchief is \u0026ldquo;Dessie\u0026rsquo;s early days\u0026rdquo;\nWhat does the swimming symbolize?\nOpening extreme close up of lips, hands, blackness vs. whiteness background singing: a little bit greek? Intro Cuts The cuts between chaos and peace: riots and looting Othello is a policemen Police: elevated POWER Difference in modern power dynamic, etc. Iago: breaking the 4th wall \u0026ldquo;Yesterday\u0026rsquo;s Man\u0026rdquo; Bathroom scene Both groups secretly conspiring Unbeliving (jelous? in love with? of Othello\u0026rsquo;s strength It was recorded! Creating the difference Justice under the law \u0026ldquo;Justice under the law\u0026rdquo;, \u0026ldquo;unlawfully killed\u0026rdquo;: that is that meaning? Did Othello unlawfully kill?\nClear bold statement \u0026ldquo;I know my worth\u0026rdquo; Tell them the status The eyes\u0026rsquo; angle of Ben Iago: Light casting directly cast onto Ben Becomes a Commissioner \u0026ldquo;Something loveable about everyone, even if they turned out to be assholes\u0026rdquo; Iago and Emilia \u0026ldquo;Don\u0026rsquo;t you think John is too good to be true\u0026rdquo; Cassio Doxxing people on a nazi website Michael cass as a form of protection: Story is different Interview \u0026ldquo;They love each other\u0026rdquo; Extreme closeup of Othello and Iago Cassio\u0026rsquo;s character is very indirect: uncomportable angles Shell Marriage \u0026ldquo;Shell marriage\u0026rdquo;: Iago priming Cassio \u0026ldquo;Happy ending\u0026rdquo;: interview suspending as a way of pretense\u0026hellip; of sexual hints? Robe Robe is the hankerchief ","html":"\u003cp\u003eBased only on the framing analysis of Sax\u0026rsquo;s Othello, I want to write a piece analyzing collectively grip (lighting) and gaffing (framing) of the films, specifically with a contrast as much as possible to the original text\u0026rsquo;s discussion of framing in, for instance, stage directions. In Sax\u0026rsquo;s Othello, Iago\u0026rsquo;s musings are always framed medium-close from below, with lighting coming from above: the framing helps show a sense of \u0026ldquo;ascension\u0026rdquo; (perhaps not unintentionally biblical, like an ascension to heaven), showing how Iago\u0026rsquo;s schemes are helping him rise through the ranks: a sign of his desire for power. I want to continue to analyses these types of connection between the grip and gaffing throughout the films to help reveal the differing power welded by the various screenwriters\u0026rsquo; differing analysis on Shakespeare\u0026rsquo;s characters.\u003c/p\u003e\n\u003ch2 id=\"vishal-bhardwaj-s-omkara\"\u003eVishal Bhardwaj\u0026rsquo;s Omkara\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDreary music plays during the misc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eGames of marbles\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eMopeds?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003esmall bounding songs\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDifferences: Desdemona and Rodrigo were engaged\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eEmilia is Othello\u0026rsquo;s sister\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eInstances of violence/impending violence underscored by very cheerful music\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"introductory-scene\"\u003eIntroductory scene\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eIntro credits: \u0026ldquo;lyrics\u0026rdquo;, \u0026ldquo;music\u0026rdquo;, \u0026ldquo;dialog\u0026rdquo; as the three scripting components\u003c/li\u003e\n\u003cli\u003ecall Othello a \u0026ldquo;half-cast\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;abduting\u0026rdquo;\u0026mdash;actually in love or not\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"marbles-scene\"\u003eMarbles scene\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eRobantio (Desdomona\u0026rsquo;s dad) goes and tries to find Othello\u003c/li\u003e\n\u003cli\u003eOminous music?\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eMonster\u003c/strong\u003e, \u003cstrong\u003eHalf-Caste\u003c/strong\u003e, etc.\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;never trust what your eyes say, your eyes will betray you\u003c/li\u003e\n\u003cli\u003eEnable\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"100-bet-scene\"\u003e100 Bet Scene\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eRobantio sets the seed of jealousy by explicitly calling Desdemona \u0026ldquo;two-faced\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eLaugh and discussions\u003c/li\u003e\n\u003cli\u003eThe music: \u003cstrong\u003e\u003cstrong\u003every cheerful during scenes of violence\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eOmi was able to kill everyone\u003c/li\u003e\n\u003cli\u003eSteps over dead body: washes himself with water\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"election\"\u003eElection\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eSo Othello\u0026rsquo;s \u003cstrong\u003eboss\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eDuring ceremony, Othello chooses Cassio to be the new general\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;How did you get such a light girl in these dark parts\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;dark lord and magic fluit\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eMusic swells and Rodrigo cries\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"rodrigo-and-iago\"\u003eRodrigo and Iago\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eMoving the \u0026ldquo;I ahet the moor\u0026rdquo; scene to the middle of the film\u003c/li\u003e\n\u003cli\u003eIago fires Rodrigo\u0026rsquo;s jealousies, and then Rodrigo fire\u0026rsquo;s Iagos by engaging\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"the-son-s-bd-etc-dot\"\u003eThe Son\u0026rsquo;s BD, etc.\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eIago listens and watches\u003c/li\u003e\n\u003cli\u003eCamera flashes are the only sound that\u0026rsquo;s present\u003c/li\u003e\n\u003cli\u003eCamera flashing sound is like guns\u003c/li\u003e\n\u003cli\u003eIago again talks with Rodrigo about Dolly\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"bianca-and-cassio\"\u003eBianca and Cassio\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eFilmed through a bollywood musical number: \u003cstrong\u003every cheerful\u003c/strong\u003e despite the omen of bad consiquences\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"cassio-is-drunk\"\u003eCassio is Drunk\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eTypically, \u003cstrong\u003eHAPPY\u003c/strong\u003e precededs violence\u003c/li\u003e\n\u003cli\u003eSmoking hurt the music, which causes the Cassio/Rodrigo brawl\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"first-tail-spinning-after-brawl\"\u003eFirst Tail Spinning after brawl\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;It was the booze\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eIago used\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Only one voice that could work, Dolly\u0026rsquo;s\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"omi-brother-is-back\"\u003eOmi Brother is back\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eMopeds\u003c/li\u003e\n\u003cli\u003eIago begins to spin his tale by avoiding talking about Kesu\u003c/li\u003e\n\u003cli\u003eDrum samples + acient vocal chops\u003c/li\u003e\n\u003cli\u003eSymbolism of dissonant music\u003c/li\u003e\n\u003cli\u003eOthello is not fighting back\u003c/li\u003e\n\u003cli\u003eLong songs to illustrate love:w\u003c/li\u003e\n\u003cli\u003eNot enough\u003c/li\u003e\n\u003cli\u003eBlue cololr grading\u003c/li\u003e\n\u003cli\u003eDesdemona very white because of framing compared to all others\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"father\"\u003eFather\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eSo motivation is given\u003c/li\u003e\n\u003cli\u003eSerious music in the background, replaying the two characters'\u003c/li\u003e\n\u003cli\u003eTone of the phone number\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"basil-dearden-s-all-night-long\"\u003eBasil Dearden\u0026rsquo;s All Night Long\u003c/h2\u003e\n\u003cp\u003eTiming: 1962, after the war.\u003c/p\u003e\n\u003cp\u003eRex + Delia: Othello and Desdemona\u003c/p\u003e\n\u003cp\u003eIago want to break off from Othello\u0026rsquo;s band, but his financeeer will only finance him if Desdemona will join Iago\u0026rsquo;s new band instead of staying with Othello\u0026rsquo;s band.\u003c/p\u003e\n\u003cp\u003eGift: cigerette case.\u003c/p\u003e\n\u003cp\u003eBrazillan pistol drums: mark of\u003c/p\u003e\n\u003ch3 id=\"introductory-scene\"\u003eIntroductory scene\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eStairs; what does rising a lot of stairs mean symbolistically?\u003c/li\u003e\n\u003cli\u003eframing: xylophone; poster; bass \u0026mdash; marrige celebrating poster meaning?\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Cause I want you, always have\u0026rdquo; \u0026mdash; new angle: Iago is in love Desdemona\u003c/li\u003e\n\u003cli\u003eCassio introduced Desdemona to Othello\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Here\u0026rsquo;s to the two men in my life\u0026rdquo; \u0026mdash; Desdemona; does she have affection for Cassio too?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"rodrigo-and-iago\"\u003eRodrigo and Iago\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eIago planting in Rodrigo the idea Cassio is in love with Desdemona\u003c/li\u003e\n\u003cli\u003eweed is an important plot point\u003c/li\u003e\n\u003cli\u003eEmilia breaking the fourth wall\u003c/li\u003e\n\u003cli\u003eIago picks up the cigarette case manually\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"iago-handed-mj-to-cassio\"\u003eIago handed MJ to Cassio\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCigarette case handed to Cassio too\u003c/li\u003e\n\u003cli\u003eRex does not want Cassio to smoke\u003c/li\u003e\n\u003cli\u003eUses the idea of smoke\u003c/li\u003e\n\u003cli\u003eIago\u0026rsquo;s gaze looks down below Othello when speaking to him\n\u003cul\u003e\n\u003cli\u003eIago looks to the side/in front a lot when talking normally\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eAlcohol and MJ is swapped, but the point is about the same\n\u003cul\u003e\n\u003cli\u003eCassio, instead of fighting Rodrigo, fought a different person\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"first-drum-solo\"\u003eFirst drum solo\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eContrast: Iago looking very ominously \u003cstrong\u003eforward\u003c/strong\u003e \u003cstrong\u003etowards\u003c/strong\u003e othello, Othello looks to the \u003cstrong\u003eside\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eSymbolic meaning of drum solo?\u003c/li\u003e\n\u003cli\u003eTriangle between Othello vs. Desdemona vs. Iago\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"tape-editing\"\u003eTape Editing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eTalk to Delia; \u0026ldquo;ocular proof\u0026rdquo; out of the audio recording\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;I know how you feel\u0026rdquo; \u0026mdash; similar to Ben Jago in the previous film\u003c/li\u003e\n\u003cli\u003eThe tape editing as a part of \u003cstrong\u003eRodrigo\u0026rsquo;s setup\u003c/strong\u003e, so Rodrigo funded Iago\u0026rsquo;s things\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Drop that Sherlock Holmes bit, that hat looks funny\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"all-night-long-song\"\u003eAll Night Long Song\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eAll night long song \u0026mdash; why so sad?\u003c/li\u003e\n\u003cli\u003eOthello looks at Delia in a funny way because of the songs\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;I love you both\u0026rdquo;: odd choice of words on Delia\u0026rsquo;s part\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"ocular-proof\"\u003eOcular Proof\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCigarette case + and\u003c/li\u003e\n\u003cli\u003eThe tapes were rolling\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"backroom-conversation-and-recording\"\u003eBackroom conversation and recording\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCamera angle between Othello and Desdemona\u003c/li\u003e\n\u003cli\u003eOthello view from the top looking down, Desdemona/Rodrigo view from the bottom looking up\u003c/li\u003e\n\u003cli\u003eIago holding door for Othello, submissiveness\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"strangling-and-throwing-out\"\u003estrangling and throwing out\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eDesdemona is alive?\u003c/li\u003e\n\u003cli\u003eIago is not attacked and Othello didn\u0026rsquo;t kill himself?\u003c/li\u003e\n\u003cli\u003eConfused about the ending\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eanother drum solo\u003c/strong\u003e: I don\u0026rsquo;t even love Jonny, Iago\u003c/li\u003e\n\u003cli\u003eRepeated shot between the two people\u003c/li\u003e\n\u003cli\u003eImaginary band playing during Iago\u0026rsquo;s solo\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"geoffrey-sax-s-othello\"\u003eGeoffrey Sax\u0026rsquo;s Othello\u003c/h2\u003e\n\u003cp\u003e\u003cspan class=\"underline\"\u003e\u003cstrong\u003e\u003cstrong\u003eGeoffrey Sax\u0026rsquo;s Othello\u003c/strong\u003e\u003c/strong\u003e\u003c/span\u003e Symbols\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eLight on Iago\u0026rsquo;s face when he is scheming\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eExtreme closeup on Othello and Iago is close together\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDrums + violin to build tension; happy violin to show happiness\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eIago: \u0026ldquo;I know what you are talking about, I\u0026rsquo;ve been there\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eBOTH interrogation\u003c/li\u003e\n\u003cli\u003eand the scene with Cass\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eIago: \u0026ldquo;And that\u0026rsquo;s a promise\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eThe Iago/Othello closeups are very close\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eOthello gave the impetus that he is afraid of marriage falling apart explicitly, and Iago explicitly planted into Cassio that their marrage is failing\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eCassio in this version is much more active, it is not entirely a misunderstanding\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u0026ldquo;TLC\u0026rdquo;: shot from above\u0026mdash;kind of pressure overpushing\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003cstrong\u003eDessie\u003c/strong\u003e \u003cem\u003ebought\u003c/em\u003e the gown\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003emoment whenever light comes across Iago\u0026rsquo;s face\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003erepeating violin theme in happiness\u003c/strong\u003e\u003c/strong\u003e!!!!\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003erepeating vocal theme in sadness\u003c/strong\u003e\u003c/strong\u003e!!!!\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u0026ldquo;It\u0026rsquo;s a shame really, really. He\u0026rsquo;s a broken man.\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eWho is Iago talking to?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eIs Othello really rushed to his head?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u0026ldquo;I want an example made\u0026rdquo;: kind of a way of cracking\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAdvanced interrogation methods: methods of guaranteeing trust\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003cstrong\u003e\u0026ldquo;I know what you are talking about, I\u0026rsquo;ve been there\u0026rdquo;\u003c/strong\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003einterrogation\u003c/li\u003e\n\u003cli\u003eand the scene with Cass\u003c/li\u003e\n\u003cli\u003eand towards Othehllo\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eI don\u0026rsquo;t think I can handle it\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ebefore otherllo\u0026rsquo;s \u0026ldquo;death\u0026rdquo; (unraveling)\u003c/li\u003e\n\u003cli\u003ebefore the constable\u0026rsquo;s actual death\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eOthello realization\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eExtreme close ups\u003c/li\u003e\n\u003cli\u003eWhen Othello was about to kill Iago, lots of light on hisface\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDeath scene, repeat from the beginning, replaying the intro, but with DIFFERENT MUSIC\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSo desdemona looks the same as in life or death\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAlso Emilia didn\u0026rsquo;t die??\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWith remember\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u0026ldquo;And that\u0026rsquo;s a promise\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eloud songs\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003elight cast onto Iago\u0026rsquo;s face\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eRainy, \u0026ldquo;hello\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eThe hankerchief is \u0026ldquo;Dessie\u0026rsquo;s early days\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWhat does the swimming symbolize?\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"opening\"\u003eOpening\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eextreme close up of lips, hands, blackness vs. whiteness\u003c/li\u003e\n\u003cli\u003ebackground singing: a little bit greek?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"intro-cuts\"\u003eIntro Cuts\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eThe cuts between chaos and peace: riots and looting\u003c/li\u003e\n\u003cli\u003eOthello is a policemen\n\u003cul\u003e\n\u003cli\u003ePolice: elevated \u003cstrong\u003ePOWER\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eDifference in modern power dynamic, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eIago: breaking the 4th wall\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Yesterday\u0026rsquo;s Man\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"bathroom-scene\"\u003eBathroom scene\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eBoth groups secretly conspiring\u003c/li\u003e\n\u003cli\u003eUnbeliving (jelous? in love with? of Othello\u0026rsquo;s strength\u003c/li\u003e\n\u003cli\u003eIt was recorded! Creating the difference\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"justice-under-the-law\"\u003eJustice under the law\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;Justice under the law\u0026rdquo;, \u0026ldquo;unlawfully killed\u0026rdquo;: that is that meaning? Did Othello unlawfully kill?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eClear bold statement\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;I know my worth\u0026rdquo;\n\u003cul\u003e\n\u003cli\u003eTell them the status\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eThe eyes\u0026rsquo; angle of Ben Iago:\u003c/li\u003e\n\u003cli\u003eLight casting directly cast onto Ben\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"becomes-a-commissioner\"\u003eBecomes a Commissioner\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Something loveable about everyone, even if they turned out to be assholes\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eIago and Emilia\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Don\u0026rsquo;t you think John is too good to be true\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"cassio\"\u003eCassio\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eDoxxing people on a nazi website\u003c/li\u003e\n\u003cli\u003eMichael cass as a form of protection:\u003c/li\u003e\n\u003cli\u003eStory is different\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"interview\"\u003eInterview\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;They love each other\u0026rdquo; Extreme closeup of Othello and Iago\u003c/li\u003e\n\u003cli\u003eCassio\u0026rsquo;s character is very indirect: uncomportable angles\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"shell-marriage\"\u003eShell Marriage\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Shell marriage\u0026rdquo;: Iago priming Cassio\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Happy ending\u0026rdquo;: interview suspending as a way of pretense\u0026hellip; of sexual hints?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"robe\"\u003eRobe\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eRobe is the hankerchief\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_film_analysis/","tags":null,"title":"NUS-ENG401 Film Analysis"},{"categories":null,"contents":"\u0026ldquo;Dialogue tends towards minimalism; instead, Bhardwaj relies heavily on extradiegetic and intradiegetic instrumental and vocal music\u0026rdquo;\nWhile both a lust for Othello\u0026rsquo;s power and Rodrigo\u0026rsquo;s provocations of Iago drove him initially to begin his scheme, it is Iago\u0026rsquo;s internal racist hatred of Othello as a person that allowed his plot to fully come to fruition.\nEstablish pattern two othellos: othello the person \u0026ldquo;the moor\u0026rdquo;, and othello the general \u0026ldquo;othello\u0026rdquo; The four texts\u0026rsquo; Iagos can be treated in the same framework of \u0026ldquo;love\u0026rdquo;\u0026ndash;hatred, whether racial or otherwise, for Othello\u0026mdash;versus \u0026ldquo;respect\u0026rdquo;\u0026mdash;deference to the authority of Othello.\nShakesphere\u0026rsquo;s Othello: hates the guy, likes the general Iago pretty much said as much \u0026ldquo;I hate the Moor\u0026rdquo; Understand the role of power Othello has: \u0026ldquo;In following him, I follow but myself\u0026rdquo;; evidently want to be Lieutenant for his power In the work, \u0026ldquo;Othello\u0026rdquo; v. \u0026ldquo;Moor\u0026rdquo; is effectively two different people \u0026ldquo;After some time, to abuse Othello\u0026rsquo;s ear \u0026hellip; The Moor is of a free and open nature,\u0026rdquo;. The former, the general, well-respected for his power by Iago, that demoted Cassio; the latter, the outcast person that actually did the betrayal and whom Iago hates.\nSax\u0026rsquo;s Othello: iago\u0026rsquo;s hatred of othello the person is drien by racism Yes, though this lust for power provoked Iago to mess with Othello the person to get the powers of othello the general, he is definitely overtly racist to othello the person starting from the beginning.\nPraises to Andrew Davies, managed to cram in a lot into the I hate the Moor speech.\nHates the guy:\novertly racist \u0026ldquo;You stupid, patronizing ape \u0026hellip; how very quaint, how very d\u0026mdash; sunday school\u0026rdquo;\n\u0026ldquo;d\u0026mdash; sunday school\u0026rdquo;: white mistrel mocking AA worship =\u0026gt; hand gestures of Iago N word he is basically Iago\u0026rsquo;s understudy \u0026ldquo;how very good for you to acknowledge what you owe to me, you owe me everything, everything!\u0026rdquo;\n\u0026mdash; for all the talk about love, it is \u0026ldquo;If I could find any whose brains were as big as their dicks, I’d be a happy man, eh?\u0026rdquo;\nMirrors racism in Shakesphere: \u0026ldquo;The Moor \u0026hellip; will as tenderly be led by the nose \u0026hellip; as asses are.\u0026rdquo;\nAgain, in lust with not with the guy but with his power: \u0026ldquo;It\u0026rsquo;s a shame really, he\u0026rsquo;s a good man. \u0026hellip; It going to take a bit longer, and its all going to end in broken hearts.\u0026rdquo;\n\u0026ldquo;It\u0026rsquo;s love. Simple as that\u0026rdquo; (close reading: love, perhaps against Othello, or perhaps his position)\nDeardon\u0026rsquo;s Iago: hatred actually useful\u0026mdash;-otherwise Iago would not have succeeded Why is this racism necessary.\nDeardon attempts an answer: Perhaps as a product of civil rights, or the jazz counteculture, the race component is functionally erased, and instead Deardon\u0026rsquo;s Iago is lusting over Desdemona. The film is one which Iago and Othello were ostensibly friends\u0026mdash;during the subtext perhaps more of a reflection for civil rights sentiment (especially via the counter-culture language of Jazz of the time) \u0026ldquo;apparently natural construct of a racially diverse \u0026hellip; sub-culture: \u0026rsquo;the [diverse racial archetypes] intermingle smoothly and fraternisation creates deep emotional pangs, rather than embarrassment\u0026rsquo;\u0026rdquo; (Kinematograph Weekly) (Burton and O\u0026rsquo;Sullivan)\nCritics hated particularly the soft ending: \u0026ldquo;never for one moment succeeds in achieving anything like the power and persuasion of the original.\u0026rdquo; (Films and Filming) Why is the soft-ending? When Othello almost smoothered Desdemona, three people went and checked immediatley, and there was no Emilia\u0026rsquo;s \u0026ldquo;The Moor\u0026rsquo;s abused by some most villainous knave, Some base notorious knave\u0026rdquo;. She immediately doubted Iago, confusion was cleared, and all was well.\nSo though Rodrigo ignited Iago\u0026rsquo;s scheme, racism dragged Iago\u0026rsquo;s scheme long enough for both to be dead.\nBhardwaj\u0026rsquo;s Othello: Rodrigo Convinced Merge between two Othellos, setting forward motion https://www.youtube.com/watch?v=zzgDHT3inzI\nIago provoked by Rodrigo jeering him: \u0026ldquo;no more of drowning, do you hear?\u0026rdquo; \u0026ldquo;I could jump into this river\u0026rdquo; \u0026hellip; \u0026ldquo;well jump! Don\u0026rsquo;t be a sissy [sic].\u0026rdquo; Then Rodrigo provoked Iago about how Iago was slighted by Othello; relegating Iago to a role in \u0026ldquo;Company Garden\u0026rdquo;.\nIt is at this moment that the music swells, ominous vocal tones that reprises from the beginning titles of the film, of \u0026ldquo;title faintly visible images of what appear to be ancient scenes of combat.\u0026rdquo;\n\u0026ldquo;Bhardwaj composes all his film scores and writes music and script simultaneously\u0026rdquo;, so the intentionality here is not to be missed. Motivated more by a masculine sense of revenge against General Othello, Iago decided to take personal action against the hated Moor.\nOdd choice, too, to relegate Iago to \u0026ldquo;company garden.\u0026rdquo; Turns out, Shakesphere defined it for us! \u0026ldquo;Our bodies are gardens, to the which our wills are gardeners; \u0026hellip; we have reason to cool our \u0026hellip; unbitted lusts; \u0026hellip; I take this, that you call love, to be a sect or scion.\u0026rdquo; Iago claims love is a \u0026ldquo;cutting\u0026rdquo; of unbitted lust, which he should cut. Metaphorically cutting Othello\u0026rsquo;s the person\u0026rsquo;s love for desdemona as if he\u0026rsquo;s mechanically cutting away some lust; all to achieve a goal for General Othello.\n\u0026ldquo;Othello should go mad\u0026rdquo;\nComparing Iago No Love, No respect | Direct objective: to get Othello\u0026rsquo;s position at the Met Yes Love, Yes Respect | Direct objective: to remove Desdemona from Othello\u0026rsquo;s control and start a band No Love, Yes Respect | Direct objective: faithful\u0026mdash;to get Othello to choose him as the direct regional militia leader \u0026lt;\u0026gt; Shakesphere Missing: Yes Love, Yes Respect\nActual Othello IAGO: \u0026ldquo;No more of drowning, do you hear?\u0026rdquo; \u0026lt;\u0026gt; Omkara Bhardwaj\u0026rsquo;s Iago Direct objective: faithful\u0026mdash;to get Othello to choose him as the direct regional militia leader Hidden objective: \u0026hellip;??? not sure not well motivated Direct Downfall of Othello: \u0026ldquo;ocular proof\u0026rdquo; of jingly heirloom Main Methods \u0026lt;\u0026gt; Shakesphere beer to Cassio \u0026lt;\u0026gt; Shakesphere making Othello overhear conversation with Cassio about Bianca to think its about Desdemona \u0026lt;\u0026gt; Shakesphere convinces Desdemona to soothsay for Cassio Random bad omen about bird/snake? \u0026lt;\u0026gt; Shakesphere Heirloom Distinctions Process of manipulation is more toxically masculine instead of weird submissiveness \u0026ldquo;I hate Moor\u0026rdquo; + drowning scene took place somewhat after being provoked by Rodrigo firing his jealousy after Iago fired Rodrigo\u0026rsquo;s jealousy about Desdemona Unique shots/catchphrases/features Instances of violence/impending violence underscored by very cheerful music Cheerful music as Othello beats up everybody in the rival gang Happy cheerful party music as Cassio becomes drunk and gets demoted Sax\u0026rsquo;s Iago Direct objective: to get Othello\u0026rsquo;s position at the Met Hidden objective: [racism for]/[sexual desire for] Othello Direct Downfall of Othello: A, B, and C for intimacy test for the robe Main methods \u0026lt;\u0026gt; Shakesphere conversation to arise suspicion Hinted to Cassio that marriage is not genuine and provoked him Screwed up Othello\u0026rsquo;s investigation of the constable Demanded intimacy test and A, B, AND C Distinctions Wants Othello\u0026rsquo;s position, and probably his love Is more explicitly lying (A, B, AND C) Is more overtly racist: beginning conversation with Sinclair, the racist (\u0026ldquo;I hate the moor\u0026rdquo; proxy) rant Relationship with Emilia seem less strained Unique shots/catchphrases/features Staring up into the sky w/ shaft of light \u0026ldquo;I know what you are talking about, I\u0026rsquo;ve been there\u0026rdquo; prior to manipulation \u0026ldquo;And that\u0026rsquo;s a promise\u0026rdquo; Darden\u0026rsquo;s Iago Direct objective: to remove Desdemona from Othello\u0026rsquo;s control and start a band Hidden objective: to date Desdemona Direct Downfall of Othello: [the cigarette box?] + edited tape recording Main Methods \u0026lt;\u0026gt; Shakesphere gave weed to Cassio to cause him to make a scene \u0026lt;\u0026gt; Shakesphere conversation to arise suspicion re: knowledge from Emilia Engineered Desdemona\u0026rsquo;s song to be specifically pointed \u0026lt;\u0026gt; Shakesphere \u0026ldquo;ocular proof\u0026rdquo;: cigarette case; also insinuating that Cassio\u0026rsquo;s weed came from Desdemona Doctoring the tape to highlight Cassio\u0026rsquo;s supposed infidelity Distinctions In love with Desdemona and had separate business motivations distinct from taking Othello/Cassio\u0026rsquo;s Didn\u0026rsquo;t seem particularly racist, was previously friends with Othello? Is more explicitly lying by doctoring tape, but words closer to Shakesphere\u0026rsquo;s Iago Didn\u0026rsquo;t succeed Most strained relationship with Emilia, \u0026ldquo;I love nobody. Don\u0026rsquo;t even love Jonny\u0026rdquo; Unique shots/catchphrases/features Shot of Cousin Jonny / Rodrigo alone, in his home, isolated with light cast on him Frenzied drum solo Motivated somewhat by Lou, the financier The rising of stairs up and down the main factory as well as to a side building Weed. Close reading Iago\u0026rsquo;s first drum solo https://www.youtube.com/watch?v=fA-vKHOVDCw Iago: looks to the side, then at the camera directly Othello: looks generally at the direction of the camera, but as camera pans in closes his eyes and look down and away Desdemona: looks entirely to the side, uncaring Iago: looks down (like Othello\u0026rsquo;s ending shot), then looks up and to the side, cutting to\u0026hellip; Othello: looking at the opposite side, seemingly \u0026ldquo;towards\u0026rdquo; Iago, then looks down Iago: as drum solo becomes more frenzied, (mildly extreme) close up staring dead into the camera frienzied drum soloing Iago: looks down, and then to the side, seemingly contemplating Desdemona: stares dead ahead Othello: looks to the side and down, hand holding head Iago: eyes pans across audience ","html":"\u003cp\u003e\u0026ldquo;Dialogue tends towards minimalism; instead, Bhardwaj relies heavily on extradiegetic and intradiegetic instrumental and vocal music\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eWhile both a lust for Othello\u0026rsquo;s power and Rodrigo\u0026rsquo;s provocations of Iago drove him initially to begin his scheme, it is Iago\u0026rsquo;s internal racist hatred of Othello as a person that allowed his plot to fully come to fruition.\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"establish-pattern-two-othellos-othello-the-person-the-moor-and-othello-the-general-othello\"\u003eEstablish pattern two othellos: othello the person \u0026ldquo;the moor\u0026rdquo;, and othello the general \u0026ldquo;othello\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003eThe four texts\u0026rsquo; Iagos can be treated in the same framework of \u0026ldquo;love\u0026rdquo;\u0026ndash;hatred, whether racial or otherwise, for Othello\u0026mdash;versus \u0026ldquo;respect\u0026rdquo;\u0026mdash;deference to the authority of Othello.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eShakesphere\u0026rsquo;s Othello: hates the guy, likes the general\n\u003cul\u003e\n\u003cli\u003eIago pretty much said as much \u0026ldquo;I hate the Moor\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eUnderstand the role of power Othello has: \u0026ldquo;In following him, I follow but myself\u0026rdquo;; evidently want to be Lieutenant for his power\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIn the work, \u0026ldquo;Othello\u0026rdquo; v. \u0026ldquo;Moor\u0026rdquo; is effectively two different people \u0026ldquo;After some time, to abuse Othello\u0026rsquo;s ear \u0026hellip; The Moor is of a free and open nature,\u0026rdquo;. The former, the general, well-respected for his power by Iago, that demoted Cassio; the latter, the outcast \u003cem\u003eperson\u003c/em\u003e that actually did the betrayal and whom Iago hates.\u003c/p\u003e\n\u003ch2 id=\"sax-s-othello-iago-s-hatred-of-othello-the-person-is-drien-by-racism\"\u003eSax\u0026rsquo;s Othello: iago\u0026rsquo;s hatred of othello the person is drien by racism\u003c/h2\u003e\n\u003cp\u003eYes, though this lust for power provoked Iago to mess with Othello the person to get the powers of othello the general, he is definitely overtly racist to othello the person starting from the beginning.\u003c/p\u003e\n\u003cp\u003ePraises to Andrew Davies, managed to cram in a lot into the I hate the Moor speech.\u003c/p\u003e\n\u003cp\u003eHates the guy:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eovertly racist \u0026ldquo;You stupid, patronizing ape \u0026hellip; how very quaint, how very d\u0026mdash; sunday school\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;d\u0026mdash; sunday school\u0026rdquo;: white mistrel mocking AA worship =\u0026gt; hand gestures of Iago\u003c/li\u003e\n\u003cli\u003eN word\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ehe is basically Iago\u0026rsquo;s understudy \u0026ldquo;how very good for you to acknowledge what you owe to me, you owe me everything, everything!\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u0026mdash; for all the talk about love, it is \u0026ldquo;If I could find any whose brains were as big as their dicks, I’d be a happy man, eh?\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eMirrors racism in Shakesphere: \u0026ldquo;The Moor \u0026hellip; will as tenderly be led by the nose \u0026hellip; as asses are.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eAgain, in lust with not with the guy but with his power: \u0026ldquo;It\u0026rsquo;s a shame really, he\u0026rsquo;s a good man. \u0026hellip; It going to take a bit longer, and its all going to end in broken hearts.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;It\u0026rsquo;s love. Simple as that\u0026rdquo; (close reading: love, perhaps against Othello, or perhaps his position)\u003c/p\u003e\n\u003ch2 id=\"deardon-s-iago-hatred-actually-useful-otherwise-iago-would-not-have-succeeded\"\u003eDeardon\u0026rsquo;s Iago: hatred actually useful\u0026mdash;-otherwise Iago would not have succeeded\u003c/h2\u003e\n\u003cp\u003eWhy is this racism necessary.\u003c/p\u003e\n\u003cp\u003eDeardon attempts an answer: Perhaps as a product of civil rights, or the jazz counteculture, the race component is functionally erased, and instead Deardon\u0026rsquo;s Iago is lusting over Desdemona. The film is one which Iago and Othello were ostensibly friends\u0026mdash;during the subtext perhaps more of a reflection for civil rights sentiment (especially via the counter-culture language of Jazz of the time) \u0026ldquo;apparently natural construct of a racially diverse \u0026hellip; sub-culture: \u0026rsquo;the [diverse racial archetypes] intermingle smoothly and fraternisation creates deep emotional pangs, rather than embarrassment\u0026rsquo;\u0026rdquo; (Kinematograph Weekly) (Burton and O\u0026rsquo;Sullivan)\u003c/p\u003e\n\u003cp\u003eCritics hated particularly the soft ending: \u0026ldquo;never for one moment succeeds in achieving anything like the power and persuasion of the original.\u0026rdquo; (Films and Filming) Why is the soft-ending? When Othello almost smoothered Desdemona, three people went and checked immediatley, and there was no Emilia\u0026rsquo;s \u0026ldquo;The Moor\u0026rsquo;s abused by some most villainous knave, Some base notorious knave\u0026rdquo;. She immediately doubted Iago, confusion was cleared, and all was well.\u003c/p\u003e\n\u003cp\u003eSo though Rodrigo ignited Iago\u0026rsquo;s scheme, racism dragged Iago\u0026rsquo;s scheme long enough for both to be dead.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"bhardwaj-s-othello-rodrigo-convinced-merge-between-two-othellos-setting-forward-motion\"\u003eBhardwaj\u0026rsquo;s Othello: Rodrigo Convinced Merge between two Othellos, setting forward motion\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://www.youtube.com/watch?v=zzgDHT3inzI\"\u003ehttps://www.youtube.com/watch?v=zzgDHT3inzI\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eIago provoked by Rodrigo jeering him: \u0026ldquo;no more of drowning, do you hear?\u0026rdquo; \u0026ldquo;I could jump into this river\u0026rdquo; \u0026hellip; \u0026ldquo;well jump! Don\u0026rsquo;t be a sissy [sic].\u0026rdquo; Then Rodrigo provoked Iago about how Iago was slighted by Othello; relegating Iago to a role in \u0026ldquo;Company Garden\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eIt is at this moment that the music swells, ominous vocal tones that reprises from the beginning titles of the film, of \u0026ldquo;title faintly visible images of what appear to be ancient scenes of combat.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Bhardwaj composes all his film scores and writes music and script simultaneously\u0026rdquo;, so the intentionality here is not to be missed. Motivated more by a masculine sense of revenge against \u003cstrong\u003eGeneral Othello\u003c/strong\u003e, Iago decided to take personal action against the hated \u003cstrong\u003eMoor\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eOdd choice, too, to relegate Iago to \u0026ldquo;company garden.\u0026rdquo; Turns out, Shakesphere defined it for us! \u0026ldquo;Our bodies are gardens, to the which our wills are gardeners; \u0026hellip; we have reason to cool our \u0026hellip; unbitted lusts; \u0026hellip; I take this, that you call love, to be a sect or scion.\u0026rdquo; Iago claims love is a \u0026ldquo;cutting\u0026rdquo; of unbitted lust, which he should cut. Metaphorically cutting Othello\u0026rsquo;s the person\u0026rsquo;s love for desdemona as if he\u0026rsquo;s mechanically cutting away some lust; all to achieve a goal for General Othello.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Othello should go mad\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"comparing-iago\"\u003eComparing Iago\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eNo Love, No respect | Direct objective: to get Othello\u0026rsquo;s position at \u003cstrong\u003ethe Met\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eYes Love, Yes Respect | Direct objective: to remove Desdemona from Othello\u0026rsquo;s control and start a \u003cstrong\u003eband\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eNo Love, Yes Respect | Direct objective: faithful\u0026mdash;to get Othello to choose him as the direct regional militia leader \u0026lt;\u0026gt; Shakesphere\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eMissing: Yes Love, Yes Respect\u003c/p\u003e\n\u003ch2 id=\"actual-othello\"\u003eActual Othello\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eIAGO: \u0026ldquo;No more of drowning, do you hear?\u0026rdquo; \u0026lt;\u0026gt; Omkara\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"bhardwaj-s-iago\"\u003eBhardwaj\u0026rsquo;s Iago\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDirect objective: faithful\u0026mdash;to get Othello to choose him as the direct regional militia leader\u003c/li\u003e\n\u003cli\u003eHidden objective: \u0026hellip;??? not sure not well motivated\u003c/li\u003e\n\u003cli\u003eDirect Downfall of Othello: \u0026ldquo;ocular proof\u0026rdquo; of jingly heirloom\u003c/li\u003e\n\u003cli\u003eMain Methods\n\u003cul\u003e\n\u003cli\u003e\u0026lt;\u0026gt; Shakesphere beer to Cassio\u003c/li\u003e\n\u003cli\u003e\u0026lt;\u0026gt; Shakesphere making Othello overhear conversation with Cassio about Bianca to think its about Desdemona\u003c/li\u003e\n\u003cli\u003e\u0026lt;\u0026gt; Shakesphere convinces Desdemona to soothsay for Cassio\u003c/li\u003e\n\u003cli\u003eRandom bad omen about bird/snake?\u003c/li\u003e\n\u003cli\u003e\u0026lt;\u0026gt; Shakesphere Heirloom\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eDistinctions\n\u003cul\u003e\n\u003cli\u003eProcess of manipulation is more toxically masculine instead of weird submissiveness\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;I hate Moor\u0026rdquo; + drowning scene took place somewhat after being provoked by Rodrigo firing his jealousy after Iago fired Rodrigo\u0026rsquo;s jealousy about Desdemona\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eUnique shots/catchphrases/features\n\u003cul\u003e\n\u003cli\u003eInstances of violence/impending violence underscored by very cheerful music\n\u003cul\u003e\n\u003cli\u003eCheerful music as Othello beats up everybody in the rival gang\u003c/li\u003e\n\u003cli\u003eHappy cheerful party music as Cassio becomes drunk and gets demoted\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"sax-s-iago\"\u003eSax\u0026rsquo;s Iago\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDirect objective: to get Othello\u0026rsquo;s position at \u003cstrong\u003ethe Met\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eHidden objective: [racism for]/[sexual desire for] Othello\u003c/li\u003e\n\u003cli\u003eDirect Downfall of Othello: A, B, and C for intimacy test for the \u003cstrong\u003erobe\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eMain methods\n\u003cul\u003e\n\u003cli\u003e\u0026lt;\u0026gt; Shakesphere conversation to arise suspicion\u003c/li\u003e\n\u003cli\u003eHinted to Cassio that marriage is not genuine and provoked him\u003c/li\u003e\n\u003cli\u003eScrewed up Othello\u0026rsquo;s investigation of the constable\u003c/li\u003e\n\u003cli\u003e\u003cem\u003eDemanded intimacy test and A, B, AND C\u003c/em\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eDistinctions\n\u003cul\u003e\n\u003cli\u003eWants Othello\u0026rsquo;s position, and probably his love\u003c/li\u003e\n\u003cli\u003eIs more explicitly lying (A, B, AND C)\u003c/li\u003e\n\u003cli\u003eIs more overtly racist: beginning conversation with Sinclair, the racist (\u0026ldquo;I hate the moor\u0026rdquo; proxy) rant\u003c/li\u003e\n\u003cli\u003eRelationship with Emilia seem less strained\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eUnique shots/catchphrases/features\n\u003cul\u003e\n\u003cli\u003eStaring up into the sky w/ shaft of light\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;I know what you are talking about, I\u0026rsquo;ve been there\u0026rdquo; prior to manipulation\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;And that\u0026rsquo;s a promise\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"darden-s-iago\"\u003eDarden\u0026rsquo;s Iago\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDirect objective: to remove Desdemona from Othello\u0026rsquo;s control and start a \u003cstrong\u003eband\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eHidden objective: to date Desdemona\u003c/li\u003e\n\u003cli\u003eDirect Downfall of Othello: [the \u003cstrong\u003ecigarette box\u003c/strong\u003e?] + edited tape recording\u003c/li\u003e\n\u003cli\u003eMain Methods\n\u003cul\u003e\n\u003cli\u003e\u0026lt;\u0026gt; Shakesphere gave weed to Cassio to cause him to make a scene\u003c/li\u003e\n\u003cli\u003e\u0026lt;\u0026gt; Shakesphere conversation to arise suspicion re: knowledge from Emilia\u003c/li\u003e\n\u003cli\u003eEngineered Desdemona\u0026rsquo;s song to be specifically pointed\u003c/li\u003e\n\u003cli\u003e\u0026lt;\u0026gt; Shakesphere \u0026ldquo;ocular proof\u0026rdquo;: cigarette case; also insinuating that Cassio\u0026rsquo;s weed came from Desdemona\u003c/li\u003e\n\u003cli\u003eDoctoring the tape to highlight Cassio\u0026rsquo;s supposed infidelity\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eDistinctions\n\u003cul\u003e\n\u003cli\u003eIn love with Desdemona and had separate business motivations distinct from taking Othello/Cassio\u0026rsquo;s\u003c/li\u003e\n\u003cli\u003eDidn\u0026rsquo;t seem particularly racist, was previously friends with Othello?\u003c/li\u003e\n\u003cli\u003eIs more explicitly lying by doctoring tape, but words closer to Shakesphere\u0026rsquo;s Iago\u003c/li\u003e\n\u003cli\u003eDidn\u0026rsquo;t succeed\u003c/li\u003e\n\u003cli\u003eMost strained relationship with Emilia, \u0026ldquo;I love nobody. Don\u0026rsquo;t even love Jonny\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eUnique shots/catchphrases/features\n\u003cul\u003e\n\u003cli\u003eShot of Cousin Jonny / Rodrigo alone, in his home, isolated with light cast on him\u003c/li\u003e\n\u003cli\u003eFrenzied drum solo\u003c/li\u003e\n\u003cli\u003eMotivated somewhat by Lou, the financier\u003c/li\u003e\n\u003cli\u003eThe rising of stairs up and down the main factory as well as to a side building\u003c/li\u003e\n\u003cli\u003eWeed.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eClose reading Iago\u0026rsquo;s first drum solo \u003ca href=\"https://www.youtube.com/watch?v=fA-vKHOVDCw\"\u003ehttps://www.youtube.com/watch?v=fA-vKHOVDCw\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003eIago: looks to the side, then at the camera directly\u003c/li\u003e\n\u003cli\u003eOthello: looks generally at the direction of the camera, but as camera pans in closes his eyes and look down and away\u003c/li\u003e\n\u003cli\u003eDesdemona: looks entirely to the side, uncaring\u003c/li\u003e\n\u003cli\u003eIago: looks down (like Othello\u0026rsquo;s ending shot), then looks up and to the side, cutting to\u0026hellip;\u003c/li\u003e\n\u003cli\u003eOthello: looking at the opposite side, seemingly \u0026ldquo;towards\u0026rdquo; Iago, then looks down\u003c/li\u003e\n\u003cli\u003eIago: as drum solo becomes more frenzied, (mildly extreme) close up staring dead into the camera\u003c/li\u003e\n\u003cli\u003e\u003cem\u003efrienzied drum soloing\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003eIago: looks down, and then to the side, seemingly contemplating\u003c/li\u003e\n\u003cli\u003eDesdemona: stares dead ahead\u003c/li\u003e\n\u003cli\u003eOthello: looks to the side and down, hand holding head\u003c/li\u003e\n\u003cli\u003eIago: eyes pans across audience\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"d41d8c\"\u003e\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_film_analysis_outline/","tags":null,"title":"NUS-ENG401 Film Analysis Outline"},{"categories":null,"contents":"Literacy rates differ significantly between genders in Central Africa. In rural Nigeria, there is a 24.1% difference in literacy rates between men and women.\nIn Joys of Motherhood, Adankwo’s natural and implicitly differentiated treatment of Nhu Ego’s sons and daughters reflects the androcentrism in Nigerian society’s view of education; in the work, she asks Nhu Ego to not “forget the … twin [girl’s] bride prices will help out … boy’s school fees.” (Emecheta 127)\nNext Steps Token: s_4979_1d\nFollow this link for the next step.\n","html":"\u003cp\u003eLiteracy rates differ significantly between genders in Central Africa. In rural Nigeria, there is a 24.1% difference in literacy rates between men and women.\u003c/p\u003e\n\u003cp\u003eIn \u003cem\u003eJoys of Motherhood\u003c/em\u003e, Adankwo’s natural and implicitly differentiated treatment of Nhu Ego’s sons and daughters reflects the androcentrism in Nigerian society’s view of education; in the work, she asks Nhu Ego to not “forget the … twin [girl’s] bride prices will help out … boy’s school fees.” (Emecheta 127)\u003c/p\u003e\n\u003ch2 id=\"next-steps\"\u003eNext Steps\u003c/h2\u003e\n\u003cp\u003eToken: s_4979_1d\u003c/p\u003e\n\u003cp\u003eFollow \u003ca href=\"https://tinyurl.com/nuseng401giftbounce2\"\u003ethis link\u003c/a\u003e for the next step.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_gift_2/","tags":null,"title":"NUS-ENG401 Gender and Education"},{"categories":null,"contents":"Welcome! The device of the station of birth plays a large part in all four of the works we read over the semester. In I, Tituba, the author grants Tituba renewed empowerment through her birth; in Black Shack Alley, Jose’s birth in the Alley forces him to leverage the racially unequal devices of the French regime to gain social advancement; Sophie’s trauma in Breath, Eyes, Memory is propagated by her violent conception—which results in her mother’s forced testing upon her; Joys of Motherhood’s Nnu Ego’s family is loving, yet with conservative values which forces a crippling sense of motherly duty that almost drove her to death. Birth, and challenging the station assigned at birth, is a fundamental value pervasive through the texts.\nThis game aims to explore some of the dynamics found in all four of the works, while exploring some aspects of Haitian, Martinican, or Nigerian culture.\nTo play the game, here are what you need to know\u0026ndash;\nThe game works like a CTF: through the game, you are hunting for game tokens that look like this: s_[numbers]_[numbersletters] You can validate whether or not the token is correct with the tool provided below Validate a Token! To check whether or not a token you received through the game is valid, use the utility below:\nValidate\nplease enter a token The Game Please go ahead to this link to get started.\n","html":"\u003ch2 id=\"welcome\"\u003eWelcome!\u003c/h2\u003e\n\u003cp\u003eThe device of the station of birth plays a large part in all four of the works we read over the semester. In I, Tituba, the author grants Tituba renewed empowerment through her birth; in Black Shack Alley, Jose’s birth in the Alley forces him to leverage the racially unequal devices of the French regime to gain social advancement; Sophie’s trauma in Breath, Eyes, Memory is propagated by her violent conception—which results in her mother’s forced testing upon her; Joys of Motherhood’s Nnu Ego’s family is loving, yet with conservative values which forces a crippling sense of motherly duty that almost drove her to death. Birth, and challenging the station assigned at birth, is a fundamental value pervasive through the texts.\u003c/p\u003e\n\u003cp\u003eThis game aims to explore some of the dynamics found in all four of the works, while exploring some aspects of Haitian, Martinican, or Nigerian culture.\u003c/p\u003e\n\u003cp\u003eTo play the game, here are what you need to know\u0026ndash;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eThe game works like a CTF: through the game, you are hunting for game tokens that look like this: \u003cstrong\u003es_[numbers]_[numbersletters]\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eYou can validate whether or not the token is correct with the tool provided below\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"validate-a-token\"\u003eValidate a Token!\u003c/h2\u003e\n\u003cp\u003eTo check whether or not a token you received through the game is valid, use the utility below:\u003c/p\u003e\n\u003cp\u003e\u003cinput id=\"token\" placeholder=\"s_0000_0e\"\u003e\u003c/input\u003e \u003cbutton id=\"validate\"\u003eValidate\u003c/button\u003e\u003c/p\u003e\n\u003cdiv id=\"result\" style=\"font-size: 13px\"\u003eplease enter a token\u003c/div\u003e\n\u003cscript\u003e\n function sumDigits(n) {\n let sum = 0;\n while (n) {\n digit = n % 10;\n sum += digit;\n n = (n - digit) / 10;\n }\n return sum;\n }\n\n $(\"#validate\").click(() =\u003e {\n let invalid = \"invalid token, sorry!\";\n let valid = \"valid token, congrats!\";\n let value = $(\"#token\").val().split(\"_\");\n if (value[0] != \"s\") {\n $(\"#result\").html(invalid);\n } else if (!isNaN(value[1])) {\n let sumVal = sumDigits(parseInt(value[1]));\n let mod18_str = (sumVal % 50117).toString(16);\n if (value[2] == mod18_str) $(\"#result\").html(valid);\n else $(\"#result\").html(invalid);\n }\n })\n\u003c/script\u003e\n\u003ch2 id=\"the-game\"\u003eThe Game\u003c/h2\u003e\n\u003cp\u003ePlease go ahead to \u003ca href=\"/posts/kbhnus_eng401_gift_1/\"\u003ethis link\u003c/a\u003e to get started.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_gift_utility/","tags":null,"title":"NUS-ENG401 Gift Utility"},{"categories":null,"contents":"General Information Due Date Topic Important Documents 9/29 Lit. Devices I, Tituba Prompt In an interview, Maryse Conde explains, \u0026ldquo;I was attracted to write the particular story of Tituba because this woman was unjustly treated by history. I felt the need to give her a reality that was denied to her because of her color and her gender.\u0026rdquo; Choose one or two literary devices and explain how Conde uses it/them in the novel to give Tituba her subjecthood. Examples could be: narrative voice, allusion, irony, dialogue, etc.\nClaim Synthesis Quotes Bin Birth Determines Capacity That birth determines the capacity for one to do Evil\nThere was one thing, however, that I didn\u0026rsquo;t know: evil is a gift received at birth. There\u0026rsquo;s no acquiring it. Those of us who have not come into this world armed with spurs and fangs are losers in every combat. (73)\nMama Yaya highlights that misfortune lies in the center of life derived from birth\nMisfortune, as you know, is our constant companion. We are born with it, we lie with it, and we squabble with it for the same withered breast. It eats the codfish from our calabash. But we\u0026rsquo;re tough, us n\u0026mdash;! (85)\nBelieves that having choice in birth is what would make it fulfilling\n(Irony between \u0026ldquo;gift\u0026rdquo; and \u0026ldquo;choice\u0026rdquo;)\nI began to doubt seriously Mama Yaya\u0026rsquo;s basic conviction that life is a gift. Life would only be a gift if each of us could choose the womb that carried us. \u0026hellip; If one day I am born again, let it be in the steely army of conquerors! (120)\nTituba believes that she is born as a healer\nThe terror of these people seemed like an injustice to me. They should have greeted me with shouts of joy and welcome and presented me with a list of illnesses that I would have tried my utmost to cure. I was born to heal, not to frighten. (12)\n\u0026ldquo;Births\u0026rdquo; Other People Elizabeth Parris \u0026ldquo;reborn\u0026rdquo; after Tituba\u0026rsquo;s Care\nUp till then I had not called on the supernatural to care for Elizabeth Parris. \u0026hellip; hat night I decided to use my powers. \u0026hellip; In the morning the color returned to Goodwife Parris\u0026rsquo;s cheeks. She asked for a little water. Toward midday she managed to feed herself. And in the evening she went to sleep like a newborn babe. (45)\nThe \u0026ldquo;evil\u0026rdquo; of abortion transferred from Tituba into Betsy\nI made her swear not to tell anyone and at dusk I plunged her up to her neck in a liquid to which I had given all the properties of amniotic fluid. \u0026hellip; Plunging Betsey into this scalding hot bath, it seemed to me that these same hands, that not long ago had dealt death were now giving life, and I was purifying myself of the murder of my child. (63)\nHer upper lip curled up into an ugly pout, revealing her sick gums. \u0026ldquo;You, do good? You\u0026rsquo;re a Negress, Tituba! You can only do evil. You are evil itself.\u0026rdquo; \u0026hellip; \u0026ldquo;That bath you had me take; what was in it? The blood of a newborn baby that died from one of your spells?\u0026rdquo; I was aghast. (77)\nRebirth After Death (like the actual book) Tituba\u0026rsquo;s Freeing from Prison into Benjamin is Described an Rebirth\nHe smiled cynically. \u0026ldquo;A man who hasn\u0026rsquo;t got very much money. You know how much slaves are selling for at the present time? Twenty-five pounds!\u0026rdquo; Our conversation stopped there, but now I knew the fate awaiting me. Another master, another bondage. (120)\nThen with one skillful blow of the mallet he smashed my chains to pieces. He did the same thing with my wrists while I screamed. \u0026hellip; I screamed, and this scream, the terrified cry of a newborn baby, heralded my return to this world. I had to learn how to walk again. \u0026hellip; Few people have the misfortune to be born twice. (122)\nTituba\u0026rsquo;s \u0026ldquo;real\u0026rdquo; story begins only after death\nAnd that is the story of my life. Such a bitter, bitter story. My real story starts where this one leaves off and it has no end. (175)\nSuccessful rebirth only without birth\nI watched her grow up and stumble around on her shaky legs, exploring the pur- gatory of the plantation, finding her delight in the shape of a cloud, the drooping foliage of an ylang-ylang, or the taste of a bitter orange. \u0026hellip; A child I didn\u0026rsquo;t give birth to but whom I chose! What motherhood could be nobler! (177)\nMisc Book opens with the framing of her being born\nI was born from this act of aggression. From this act of hatred and contempt. (3)\nPackage insert praises death as something positive\n\u0026ldquo;Death is a porte whereby we pass to joye; Lyfe is a lake that drowneth all in payne \u0026ndash;John Harrington\u0026rdquo; (Cover Insert)\nPlans for Abortion\nThere is no happiness in motherhood for a slave. It is little more than the expulsion of an innocent baby, who will have no chance to change its fate, into a world of slavery and abjection\u0026hellip;. That night, my baby was carried out of my womb in a flow of black blood. I saw him wave his arms like a tadpole in distress and I burst into tears. (52)\nSubclaim Development Tituba Realizes Birth is Involuntary Birth into live is a deterministic process for which those being \u0026ldquo;born\u0026rdquo; have no agency over. For African folks, Mama Yaya claims that misfortune is one such deterministic factor of their birth. Although Mama Yaya disagrees, Tituba believes that life is not a gift unless it is deterministic (this is of course ironic, because you don\u0026rsquo;t choose your gifts.) Despite the indeterminism, Tituba believes that she is born as a healer She leverages (Re)Birth to change others, to poor results Perhaps as an attempt to help others control (\u0026ldquo;choose\u0026rdquo;) birth, she uses her power to reborn people; like\nElizabeth Parris Betsy Parris But Psych! Both of them turned on her. Especially Betsy Parris.\nAlso her child was aborted.\nThe work literally provides rebirth of Tituba and empowers her to give birth despite her abortion Tituba Herself raised her station against those of Benjamin. Yet, this was not voluntary (see quote in section) and designed by Condé in the story.\nTituba\u0026rsquo;s \u0026ldquo;real\u0026rdquo; story begins only after forcible death/\u0026ldquo;rebirth\u0026rdquo;, and she (author?) considers it nobel to give this new form of birth without giving birth (which didn\u0026rsquo;t happen in real world), but without the request of the born either.\nConclusiony bit So this whole book, beginning at her birth, covered by a celebration of alternative birth illustrates such a process of providing agency.\nThe Claim The motif of birth and rebirth plays an important role in Maryse Condé\u0026rsquo;s work I, Tituba. Despite Tituba\u0026rsquo;s own failed attempt at controlling the (re)birth of herself and others to better their fate in history, Condé offers Tituba a renewed empowerment in birth by both illustrating her \u0026ldquo;rebirth\u0026rdquo; and providing her a chance to elect a descendant she wasn\u0026rsquo;t originally able to bear.\n","html":"\u003ch2 id=\"general-information\"\u003eGeneral Information\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDue Date\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003cth\u003eImportant Documents\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e9/29\u003c/td\u003e\n\u003ctd\u003eLit. Devices\u003c/td\u003e\n\u003ctd\u003eI, Tituba\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"prompt\"\u003ePrompt\u003c/h2\u003e\n\u003cp\u003eIn an interview, Maryse Conde explains, \u0026ldquo;I was attracted to write the particular story of Tituba because this woman was unjustly treated by history. I felt the need to give her a reality that was denied to her because of her color and her gender.\u0026rdquo; Choose one or two literary devices and explain how Conde uses it/them in the novel to \u003cstrong\u003e\u003cstrong\u003egive Tituba her subjecthood\u003c/strong\u003e\u003c/strong\u003e. Examples could be: narrative voice, allusion, irony, dialogue, etc.\u003c/p\u003e\n\u003ch2 id=\"claim-synthesis\"\u003eClaim Synthesis\u003c/h2\u003e\n\u003ch3 id=\"quotes-bin\"\u003eQuotes Bin\u003c/h3\u003e\n\u003ch4 id=\"birth-determines-capacity\"\u003eBirth Determines Capacity\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eThat birth determines the capacity for one to do Evil\u003c/p\u003e\n\u003cp\u003eThere was one thing, however, that I didn\u0026rsquo;t know: evil is a gift received at birth. There\u0026rsquo;s no acquiring it. Those of us who have not come into this world armed with spurs and fangs are losers in every combat. (73)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eMama Yaya highlights that misfortune lies in the center of life derived from birth\u003c/p\u003e\n\u003cp\u003eMisfortune, as you know, is our constant companion. We are born with it, we lie with it, and we squabble with it for the same withered breast. It eats the codfish from our calabash. But we\u0026rsquo;re tough, us n\u0026mdash;! (85)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eBelieves that having choice in birth is what would make it fulfilling\u003c/p\u003e\n\u003cp\u003e(Irony between \u0026ldquo;gift\u0026rdquo; and \u0026ldquo;choice\u0026rdquo;)\u003c/p\u003e\n\u003cp\u003eI began to doubt seriously Mama Yaya\u0026rsquo;s basic conviction that life is a gift. Life would only be a gift if each of us could choose the womb that carried us. \u0026hellip; If one day I am born again, let it be in the steely army of conquerors! (120)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eTituba believes that she is born as a healer\u003c/p\u003e\n\u003cp\u003eThe terror of these people seemed like an injustice to me. They should have greeted me with shouts of joy and welcome and presented me with a list of illnesses that I would have tried my utmost to cure. I was born to heal, not to frighten. (12)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"births-other-people\"\u003e\u0026ldquo;Births\u0026rdquo; Other People\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eElizabeth Parris \u0026ldquo;reborn\u0026rdquo; after Tituba\u0026rsquo;s Care\u003c/p\u003e\n\u003cp\u003eUp till then I had not called on the supernatural to care for Elizabeth Parris. \u0026hellip; hat night I decided to use my powers. \u0026hellip; In the morning the color returned to Goodwife Parris\u0026rsquo;s cheeks. She asked for a little water. Toward midday she managed to feed herself. And in the evening she went to sleep like a newborn babe. (45)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eThe \u0026ldquo;evil\u0026rdquo; of abortion transferred from Tituba into Betsy\u003c/p\u003e\n\u003cp\u003eI made her swear not to tell anyone and at dusk I plunged her up to her neck in a liquid to which I had given all the properties of amniotic fluid. \u0026hellip; Plunging Betsey into this scalding hot bath, it seemed to me that these same hands, that not long ago had dealt death were now giving life, and I was purifying myself of the murder of my child. (63)\u003c/p\u003e\n\u003cp\u003eHer upper lip curled up into an ugly pout, revealing her sick gums. \u0026ldquo;You, do good? You\u0026rsquo;re a Negress, Tituba! You can only do evil. You are evil itself.\u0026rdquo; \u0026hellip; \u0026ldquo;That bath you had me take; what was in it? The blood of a newborn baby that died from one of your spells?\u0026rdquo; I was aghast. (77)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"rebirth-after-death--like-the-actual-book\"\u003eRebirth After Death (like the actual book)\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eTituba\u0026rsquo;s Freeing from Prison into Benjamin is Described an Rebirth\u003c/p\u003e\n\u003cp\u003eHe smiled cynically. \u0026ldquo;A man who hasn\u0026rsquo;t got very much money. You know how much slaves are selling for at the present time? Twenty-five pounds!\u0026rdquo; Our conversation stopped there, but now I knew the fate awaiting me. Another master, another bondage. (120)\u003c/p\u003e\n\u003cp\u003eThen with one skillful blow of the mallet he smashed my chains to pieces. He did the same thing with my wrists while I screamed. \u0026hellip; I screamed, and this scream, the terrified cry of a newborn baby, heralded my return to this world. I had to learn how to walk again. \u0026hellip; Few people have the misfortune to be born twice. (122)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eTituba\u0026rsquo;s \u0026ldquo;real\u0026rdquo; story begins only after death\u003c/p\u003e\n\u003cp\u003eAnd that is the story of my life. Such a bitter, bitter story. My real story starts where this one leaves off and it has no end. (175)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eSuccessful rebirth only without birth\u003c/p\u003e\n\u003cp\u003eI watched her grow up and stumble around on her shaky legs, exploring the pur- gatory of the plantation, finding her delight in the shape of a cloud, the drooping foliage of an ylang-ylang, or the taste of a bitter orange. \u0026hellip; A child I didn\u0026rsquo;t give birth to but whom I chose! What motherhood could be nobler! (177)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"misc\"\u003eMisc\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eBook opens with the framing of her being born\u003c/p\u003e\n\u003cp\u003eI was born from this act of aggression. From this act of hatred and contempt. (3)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ePackage insert praises death as something positive\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Death is a porte whereby we pass to joye; Lyfe is a lake that drowneth all in payne \u0026ndash;John Harrington\u0026rdquo; (Cover Insert)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ePlans for Abortion\u003c/p\u003e\n\u003cp\u003eThere is no happiness in motherhood for a slave. It is little more than the expulsion of an innocent baby, who will have no chance to change its fate, into a world of slavery and abjection\u0026hellip;. That night, my baby was carried out of my womb in a flow of black blood. I saw him wave his arms like a tadpole in distress and I burst into tears. (52)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"subclaim-development\"\u003eSubclaim Development\u003c/h3\u003e\n\u003ch4 id=\"tituba-realizes-birth-is-involuntary\"\u003eTituba Realizes Birth is Involuntary\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eBirth into live is a \u003ca href=\"#that-birth-determines-the-capacity-for-one-to-do-evil\"\u003edeterministic process\u003c/a\u003e for which those being \u0026ldquo;born\u0026rdquo; have no agency over.\u003c/li\u003e\n\u003cli\u003eFor African folks, Mama Yaya claims that \u003ca href=\"#mama-yaya-highlights-that-misfortune-lies-in-the-center-of-life-derived-from-birth\"\u003emisfortune is one such\u003c/a\u003e deterministic factor of their birth.\u003c/li\u003e\n\u003cli\u003eAlthough \u003ca href=\"#believes-that-having-choice-in-birth-is-what-would-make-it-fulfilling\"\u003eMama Yaya disagrees, Tituba believes that life is not a gift unless it is deterministic\u003c/a\u003e (this is of course ironic, because you don\u0026rsquo;t choose your gifts.)\u003c/li\u003e\n\u003cli\u003eDespite the indeterminism, \u003ca href=\"#tituba-believes-that-she-is-born-as-a-healer\"\u003eTituba believes that she is born as a healer\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"she-leverages--re--birth-to-change-others-to-poor-results\"\u003eShe leverages (Re)Birth to change others, to poor results\u003c/h4\u003e\n\u003cp\u003ePerhaps as an attempt to help others control (\u0026ldquo;choose\u0026rdquo;) birth, she uses her power to reborn people; like\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"#elizabeth-parris-reborn-after-tituba-s-care\"\u003eElizabeth Parris\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#the-evil-of-abortion-transferred-from-tituba-into-betsy\"\u003eBetsy Parris\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eBut Psych! Both of them turned on her. Especially \u003ca href=\"#the-evil-of-abortion-transferred-from-tituba-into-betsy\"\u003eBetsy Parris\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eAlso her \u003ca href=\"#plans-for-abortion\"\u003echild was aborted\u003c/a\u003e.\u003c/p\u003e\n\u003ch4 id=\"the-work-literally-provides-rebirth-of-tituba-and-empowers-her-to-give-birth-despite-her-abortion\"\u003eThe work literally provides rebirth of Tituba and empowers her to give birth despite her abortion\u003c/h4\u003e\n\u003cp\u003e\u003ca href=\"#tituba-s-freeing-from-prison-into-benjamin-is-described-an-rebirth\"\u003eTituba Herself\u003c/a\u003e raised her station against those of Benjamin. Yet, this was not voluntary (see quote in section) and designed by Condé in the story.\u003c/p\u003e\n\u003cp\u003eTituba\u0026rsquo;s \u0026ldquo;real\u0026rdquo; story \u003ca href=\"#tituba-s-real-story-begins-only-after-death\"\u003ebegins only after forcible death/\u0026ldquo;rebirth\u0026rdquo;\u003c/a\u003e, and she (author?) considers it nobel to give this new form of birth \u003ca href=\"#successful-rebirth-only-without-birth\"\u003ewithout giving birth\u003c/a\u003e (which didn\u0026rsquo;t happen in real world), but without the request of the born either.\u003c/p\u003e\n\u003ch4 id=\"conclusiony-bit\"\u003eConclusiony bit\u003c/h4\u003e\n\u003cp\u003eSo this whole book, \u003ca href=\"#book-opens-with-the-framing-of-her-being-born\"\u003ebeginning at her birth\u003c/a\u003e, covered by a \u003ca href=\"#package-insert-praises-death-as-something-positive\"\u003ecelebration of alternative birth\u003c/a\u003e illustrates such a process of providing agency.\u003c/p\u003e\n\u003ch3 id=\"the-claim\"\u003eThe Claim\u003c/h3\u003e\n\u003cp\u003eThe motif of birth and rebirth plays an important role in Maryse Condé\u0026rsquo;s work \u003cem\u003eI, Tituba\u003c/em\u003e. Despite Tituba\u0026rsquo;s own failed attempt at controlling the (re)birth of herself and others to better their fate in history, Condé offers Tituba a renewed empowerment in birth by both illustrating her \u0026ldquo;rebirth\u0026rdquo; and providing her a chance to elect a descendant she wasn\u0026rsquo;t originally able to bear.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhi_tituba_essay_planning/","tags":null,"title":"NUS-ENG401 I, Tituba Essay Planning"},{"categories":null,"contents":"Joys of Motherhood highlights the plurality of duties for the reader women have to undertake in order to succeed in Nigerian society. Women represent 80% of agricultural labor in Nigeria—a dangerous job, yet is significantly underrepresented in knowledge-based work.\nPrior to gaining ownership to her own stall, Nhu Ego has to “spread her wares on the pavement” (Emecheta 113) selling goods in order to make ends meet—despite Nnaife’s money from employment which he often squanders.\nNext Steps Token: s_2827_13\nThe conditions of this duality of work is harsh. This is where this story leaves off. Tap on this link, and one of two things may happen\u0026mdash;you maybe directed back to this page, or you maybe redirected somewhere else. Unfortunately, neither of these paths lead to further advancement.\n","html":"\u003cp\u003e\u003cem\u003eJoys of Motherhood\u003c/em\u003e highlights the plurality of duties for the reader women have to undertake in order to succeed in Nigerian society. Women represent 80% of agricultural labor in Nigeria—a dangerous job, yet is significantly underrepresented in knowledge-based work.\u003c/p\u003e\n\u003cp\u003ePrior to gaining ownership to her own stall, Nhu Ego has to “spread her wares on the pavement” (Emecheta 113) selling goods in order to make ends meet—despite Nnaife’s money from employment which he often squanders.\u003c/p\u003e\n\u003ch2 id=\"next-steps\"\u003eNext Steps\u003c/h2\u003e\n\u003cp\u003eToken: s_2827_13\u003c/p\u003e\n\u003cp\u003eThe conditions of this duality of work is harsh. This is where this story leaves off. Tap on \u003ca href=\"https://tinyurl.com/nuseng401giftbounce6\"\u003ethis link\u003c/a\u003e, and one of two things may happen\u0026mdash;you maybe directed back to this page, or you maybe redirected somewhere else. Unfortunately, neither of these paths lead to further advancement.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_gift_6/","tags":null,"title":"NUS-ENG401 Many Hats"},{"categories":null,"contents":"Even if the education system provides a ticket for its successful students to gain social advancement, it is often difficult or even arbitrary. Access to education is also frequently dependent on race.\nIn Black Shack Alley, Zobel frames the value of schooling as a “gateway … to escape.” (Zobel) Zobel highlights that the main way to escape the oppression in the colonies is by leveraging the itself oppressive systems of education.\nNext Steps Token: s_7776_1b\nThe process of pursuing education takes a lot more effort than the steps before! Please locate the link to the next target by looking under the cabinet from which a puffer-fish hangs in our San Mateo campus.\n","html":"\u003cp\u003eEven if the education system provides a ticket for its successful students to gain social advancement, it is often difficult or even arbitrary. Access to education is also frequently dependent on race.\u003c/p\u003e\n\u003cp\u003eIn \u003cem\u003eBlack Shack Alley\u003c/em\u003e, Zobel frames the value of schooling as a “gateway … to escape.” (Zobel) Zobel highlights that the main way to escape the oppression in the colonies is by leveraging the itself oppressive systems of education.\u003c/p\u003e\n\u003ch2 id=\"next-steps\"\u003eNext Steps\u003c/h2\u003e\n\u003cp\u003eToken: s_7776_1b\u003c/p\u003e\n\u003cp\u003eThe process of pursuing education takes a lot more effort than the steps before! Please locate the link to the next target by looking under the cabinet from which a puffer-fish hangs in our San Mateo campus.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_gift_3/","tags":null,"title":"NUS-ENG401 Pursuing Education"},{"categories":null,"contents":" Quote Explanation of quote (\u0026ldquo;understanding lived experience\u0026rdquo;) Implication (\u0026ldquo;understanding Duets/Othello\u0026rdquo;) Sharpe Wake; Sears Duet\nWake, p 16: ANALYZE ON TOP, CONNECT HERE\nTo be in the wake is to live in those no’s, to live in the no-space that the law is not bound to respect \u0026hellip; To be in the wake is to recognize \u0026hellip; the ongoing locations of Black being: the wake, the ship, the hold, and the weather.\nAnalysis\n\u0026ldquo;no-space\u0026rdquo;\u0026mdash;ironic term exemplifying sense of claustrophobia: the contrast between \u0026ldquo;space\u0026rdquo; (the possible/freedom), with \u0026ldquo;no.\u0026rdquo; hidden meaning about the fact that, though freedom seemingly exists, it is not genuinely there because it is freedom in the \u0026ldquo;no\u0026rdquo;; in fact, these acts that \u0026ldquo;grant freedom\u0026rdquo; may simply grant oppression in a different form in the same vein, the imagery of \u0026ldquo;the hold\u0026rdquo; illustrates this same sense of claustrophobic constraint: one is \u0026ldquo;held\u0026rdquo; by this perceived sense of freedom Broader Analysis Modern day opportunities available to African Americans may represent elements of the \u0026ldquo;no-space\u0026rdquo;: the areas by which the seeming granting of freedom becomes a hindering detriment that relegates someone to \u0026ldquo;the hold.\u0026rdquo;\n[set this up]: how exactly Sharpe and Sears are brought together.\nThe creation of the arbitrarily racial label of \u0026lsquo;Black\u0026rsquo; for people of African descent allows the arbitrary persecution due to the label\u0026rsquo;s unclear boundaries; furthermore, this persecution continues to modern day: \u0026lsquo;holding\u0026rsquo; a person into seemingly self-inflicted cycles of abuse where the only true way out is to embrace and assimilate into white society.\ndefine no-space here\n[define wake in the end]: to be awake is to be aware of this cycle, and be\nothello and mona are going through only one trauma each billy is going through two Duet: musical composition for two performers with equal importance to the piece.\nBilly as mother (incestruous) of Othello Pieta: motherly holding Jesus (p91) Egytion to my mother give (p93); Billy doing the murdering (p100) Adding Black voices to Othello dismantles Othello\nBillie, Amah, and Magi \u0026lt;=\u0026gt; Desdemona, Emilia, Bianca Vogel =\u0026gt; Desdemona dies; in this one Billy kills instead of dies\nCANADA: figure of a black man not leaving her. Lovers not together, paternal love highlighted.\n\u0026ldquo;Wake: unless you deal become awake of the oprressions agaisnt you and relying on each other, you will fall into the white-centric society and join the oppressiors as the only way to get out of the self-sustaining cycle holding you down.\u0026rdquo;\nslave trade engine: dragging forward ship\nRECLAIMING\nDuet, p 56: Black is a term with power to racialist: it has clear influence and no clear category It\u0026rsquo;s because I\u0026rsquo;m Black. When a clear won\u0026rsquo;t put the change into my held-out hand, I think it\u0026rsquo;s because I\u0026rsquo;m Black. \u0026hellip; Who called us Black anyway? It\u0026rsquo;s not a country, it\u0026rsquo;s not a racial category, its not even the color of my skin.\nAnalysis Sears highlights the practical constraints being \u0026ldquo;Black\u0026rdquo; caused Billie, yet at the same time cleanly rejects all practical associations of Blackness with actual qualities Practical hindrance of arbitrary label \u0026ldquo;who called us\u0026rdquo;\u0026mdash;us-them dynamic; highlighting the amorphousness of the other group, yet did not provide a racialized labels Broader Analysis The contrast between the explicit label \u0026ldquo;Black\u0026rdquo;, and Billie\u0026rsquo;s more general, amorphous term of \u0026ldquo;who\u0026rdquo; highlights the power of racialization While Black is not a clear descriptor category, Bilie shows that it can actually have much detrimental impact; and yet Bilie herself when referring to her oppressors simply ask \u0026ldquo;who\u0026rdquo; is it\u0026mdash;obverting the same problem for her opressors Wake, p 10 Vigilance, too, because any- and everywhere we are, medical and other professionals treat Black patients differently. \u0026hellip; Because they are believed to be less sensitive to pain, black people are forced to endure more pain.\nAnalysis Points out uneven treatment of African Americans simply because of the arbitrary label of \u0026ldquo;Black\u0026rdquo; \u0026ldquo;sensitive\u0026rdquo;: respond to \u0026ldquo;changes, signals, or influences\u0026rdquo; less \u0026ldquo;responsive\u0026rdquo;/can\u0026rsquo;t be influenced highlights sense of diminishment of the Black intellect, a sense of lethargic primitivism Broader Analysis The label of \u0026ldquo;Black\u0026rdquo; has helped create an image of primitivism, which, due to its indeterminism (i.e. there is no specific descriptor for \u0026ldquo;Black\u0026rdquo; or \u0026ldquo;Blackness\u0026rdquo;), makes it very easy to abuse to diminish a group of people.\nDuet, 66: attempts to free of the forced cycle results in separation from Blackness, as it seems Othello has done A black man afflicted with Negrophobia. He\u0026rsquo;s the one that wants to white-wash his life \u0026hellip; Brooker T. Uppermiddleclass III \u0026hellip; found in predominantly White neighborhoods. He refers to other Blacks as \u0026ldquo;them\u0026rdquo;\nAnalysis Brooker T. Washington: educator and orator; born into slavery and was able to eventually lead the Tuskegee institute III =\u0026gt; represents the multi-generation descendant; LAST NAME ERASURE: no longer carrying the herratage Morphing from Washinton\u0026rsquo;s idea of the \u0026ldquo;black elite\u0026rdquo; into white assimilation Broader Analysis Highlights the establishment of us/them dynamic, as a photo of Othello Functional similarity of Othello fighting \u0026ldquo;the Turks\u0026rdquo;, reducing again his enemy to another group and racializing them as well Othello functions as essentially a part of the venetian army, in a \u0026ldquo;predominantly white neighborhood\u0026rdquo; Duet, 31: Billie is forced to be stuck in a vicious cycle, a cycle which is propegated according to Wake only because of her blackness All her money goes up in smokes and writings that tell her she really ain\u0026rsquo;t out of her mind \u0026hellip; [otherwise] all the rot inside her would begin to boil, threaten to shoot out.\nAnalysis Double entere: \u0026ldquo;money goes up in smokes\u0026rdquo; \u0026mdash;- money vanishes + money is used to buy smokes \u0026ldquo;to tell\u0026rdquo;: personifiying money as something that influences Billie Bilie is trapped in a vicious cycle: Linguistically trapped \u0026mdash; Rot =\u0026gt;(boil)=\u0026gt; Smoke =\u0026gt;(prevents)=\u0026gt; Rot.\nBroader Analysis This is an exemplification for the \u0026ldquo;bound in no-space\u0026rdquo; which Sharpe highlights. Unlike the Othello in Shakesphere that pretty much brings his own downfall from Iago\u0026rsquo;s manipulation, Billie in the play acts more due to the the inherent constraints of the system.\nWake, p 16: ANALYZE ON TOP, CONNECT HERE To be in the wake is to live in those no’s, to live in the no-space that the law is not bound to respect \u0026hellip; To be in the wake is to recognize \u0026hellip; the ongoing locations of Black being: the wake, the ship, the hold, and the weather.\nAnalysis \u0026ldquo;no-space\u0026rdquo;\u0026mdash;ironic term exemplifying sense of claustrophobia: the contrast between \u0026ldquo;space\u0026rdquo; (the possible/freedom), with \u0026ldquo;no.\u0026rdquo; hidden meaning about the fact that, though freedom seemingly exists, it is not genuinely there because it is freedom in the \u0026ldquo;no\u0026rdquo;; in fact, these acts that \u0026ldquo;grant freedom\u0026rdquo; may simply grant oppression in a different form in the same vein, the imagery of \u0026ldquo;the hold\u0026rdquo; illustrates this same sense of claustrophobic constraint: one is \u0026ldquo;held\u0026rdquo; by this perceived sense of freedom Broader Analysis Modern day opportunities available to African Americans may represent elements of the \u0026ldquo;no-space\u0026rdquo;: the areas by which the seeming granting of freedom becomes a hindering detriment that relegates someone to \u0026ldquo;the hold.\u0026rdquo;\nWake p 21 In the wake, the semiotics of the slave ship continue: from the forced movements of the enslaved to the forced movements of the migrant and the refugee, to the regulation of Black people in North American streets and neighborhoods.\nAnalysis \u0026ldquo;semiotics\u0026rdquo;: the interpretation of signs and symbols the repetition of the word forced takes agency out of the actor Sharpe\u0026rsquo;s argument: forcibleness is a symbol for the reminants of the slave ship Broader Analysis Billy\u0026rsquo;s being forced by the system to perform her daily, self destructive actions instead of the emotional Othello we see in Shakespeare, Billy is being forced by the system not out of her own volition to continue her act.\n","html":"\u003cul\u003e\n\u003cli\u003eQuote\u003c/li\u003e\n\u003cli\u003eExplanation of quote (\u0026ldquo;understanding lived experience\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003eImplication (\u0026ldquo;understanding Duets/Othello\u0026rdquo;)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSharpe Wake; Sears Duet\u003c/p\u003e\n\u003cp\u003eWake, p 16: ANALYZE ON TOP, CONNECT HERE\u003c/p\u003e\n\u003cblockquote\u003e\n\u003cp\u003eTo be in the wake is to live in those no’s, to live in the no-space that the law is not bound to respect \u0026hellip; To be in the wake is to recognize \u0026hellip; the ongoing locations of Black being: the wake, the ship, the hold, and the weather.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003cp\u003e\u003cstrong\u003eAnalysis\u003c/strong\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;no-space\u0026rdquo;\u0026mdash;ironic term exemplifying sense of claustrophobia: the contrast between \u0026ldquo;space\u0026rdquo; (the possible/freedom), with \u0026ldquo;no.\u0026rdquo;\n\u003cul\u003e\n\u003cli\u003ehidden meaning about the fact that, though freedom seemingly exists, it is not genuinely there because it is freedom in the \u0026ldquo;no\u0026rdquo;; in fact, these acts that \u0026ldquo;grant freedom\u0026rdquo; may simply grant oppression in a different form\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ein the same vein, the imagery of \u0026ldquo;the hold\u0026rdquo; illustrates this same sense of claustrophobic constraint: one is \u0026ldquo;held\u0026rdquo; by this perceived sense of freedom\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003eBroader Analysis\u003c/strong\u003e\nModern day opportunities available to African Americans may represent elements of the \u0026ldquo;no-space\u0026rdquo;: the areas by which the seeming granting of freedom becomes a hindering detriment that relegates someone to \u0026ldquo;the hold.\u0026rdquo;\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e[set this up]: how exactly Sharpe and Sears are brought together.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eThe creation of the arbitrarily racial label of \u0026lsquo;Black\u0026rsquo; for people of African descent allows the arbitrary persecution due to the label\u0026rsquo;s unclear boundaries; furthermore, this persecution continues to modern day: \u0026lsquo;holding\u0026rsquo; a person into seemingly self-inflicted cycles of abuse where the only true way out is to embrace and assimilate into white society.\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003edefine no-space here\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e[define wake in the end]: to be awake is to be aware of this cycle, and be\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eothello and mona are going through only one trauma each\u003c/li\u003e\n\u003cli\u003ebilly is going through two\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDuet: musical composition for two performers with equal importance to the piece.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eBilly as mother (incestruous) of Othello\n\u003cul\u003e\n\u003cli\u003ePieta: motherly holding Jesus (p91)\u003c/li\u003e\n\u003cli\u003eEgytion to my mother give (p93);\u003c/li\u003e\n\u003cli\u003eBilly doing the murdering (p100)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAdding Black voices to Othello dismantles Othello\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eBillie, Amah, and Magi \u0026lt;=\u0026gt; Desdemona, Emilia, Bianca\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eVogel =\u0026gt; Desdemona dies; in this one Billy kills instead of dies\u003c/p\u003e\n\u003cp\u003eCANADA: figure of a black man not leaving her. Lovers not together, paternal love highlighted.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Wake: unless you deal become awake of the oprressions agaisnt you and relying on each other, you will fall into the white-centric society and join the oppressiors as the only way to get out of the self-sustaining cycle holding you down.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eslave trade engine: dragging forward ship\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eRECLAIMING\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"duet-p-56-black-is-a-term-with-power-to-racialist-it-has-clear-influence-and-no-clear-category\"\u003eDuet, p 56: Black is a term with power to racialist: it has clear influence and no clear category\u003c/h2\u003e\n\u003cblockquote\u003e\n\u003cp\u003eIt\u0026rsquo;s because I\u0026rsquo;m Black. When a clear won\u0026rsquo;t put the change into my held-out hand, I think it\u0026rsquo;s because I\u0026rsquo;m Black. \u0026hellip; Who called us Black anyway? It\u0026rsquo;s not a country, it\u0026rsquo;s not a racial category, its not even the color of my skin.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003ch3 id=\"analysis\"\u003eAnalysis\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eSears highlights the practical constraints being \u0026ldquo;Black\u0026rdquo; caused Billie, yet at the same time cleanly rejects all practical associations of Blackness with actual qualities\u003c/li\u003e\n\u003cli\u003ePractical hindrance of arbitrary label\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;who called us\u0026rdquo;\u0026mdash;us-them dynamic; highlighting the amorphousness of the other group, yet did \u003cem\u003enot\u003c/em\u003e provide a racialized labels\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"broader-analysis\"\u003eBroader Analysis\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eThe contrast between the explicit label \u0026ldquo;Black\u0026rdquo;, and Billie\u0026rsquo;s more general, amorphous term of \u0026ldquo;who\u0026rdquo; highlights the power of racialization\u003c/li\u003e\n\u003cli\u003eWhile Black is not a clear descriptor category, Bilie shows that it can actually have much detrimental impact; and yet Bilie herself when referring to her oppressors simply ask \u0026ldquo;who\u0026rdquo; is it\u0026mdash;obverting the same problem for her opressors\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"wake-p-10\"\u003eWake, p 10\u003c/h3\u003e\n\u003cblockquote\u003e\n\u003cp\u003eVigilance, too, because any- and everywhere we are, medical and other professionals treat Black patients differently. \u0026hellip; Because they are believed to be less sensitive to pain, black people are forced to endure more pain.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003ch4 id=\"analysis\"\u003eAnalysis\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003ePoints out uneven treatment of African Americans simply because of the arbitrary label of \u0026ldquo;Black\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;sensitive\u0026rdquo;: respond to \u0026ldquo;changes, signals, or influences\u0026rdquo;\n\u003cul\u003e\n\u003cli\u003eless \u0026ldquo;responsive\u0026rdquo;/can\u0026rsquo;t be influenced\u003c/li\u003e\n\u003cli\u003ehighlights sense of diminishment of the Black intellect, a sense of lethargic primitivism\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"broader-analysis\"\u003eBroader Analysis\u003c/h4\u003e\n\u003cp\u003eThe label of \u0026ldquo;Black\u0026rdquo; has helped create an image of primitivism, which, due to its indeterminism (i.e. there is no specific descriptor for \u0026ldquo;Black\u0026rdquo; or \u0026ldquo;Blackness\u0026rdquo;), makes it very easy to abuse to diminish a group of people.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"duet-66-attempts-to-free-of-the-forced-cycle-results-in-separation-from-blackness-as-it-seems-othello-has-done\"\u003eDuet, 66: attempts to free of the forced cycle results in separation from Blackness, as it seems Othello has done\u003c/h2\u003e\n\u003cblockquote\u003e\n\u003cp\u003eA black man afflicted with Negrophobia. He\u0026rsquo;s the one that wants to white-wash his life \u0026hellip; Brooker T. Uppermiddleclass III \u0026hellip; found in predominantly White neighborhoods. He refers to other Blacks as \u0026ldquo;them\u0026rdquo;\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003ch3 id=\"analysis\"\u003eAnalysis\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eBrooker T. Washington: educator and orator; born into slavery and was able to eventually lead the Tuskegee institute\n\u003cul\u003e\n\u003cli\u003eIII =\u0026gt; represents the multi-generation descendant; LAST NAME ERASURE: no longer carrying the herratage\u003c/li\u003e\n\u003cli\u003eMorphing from Washinton\u0026rsquo;s idea of the \u0026ldquo;black elite\u0026rdquo; into white assimilation\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"broader-analysis\"\u003eBroader Analysis\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eHighlights the establishment of us/them dynamic, as a photo of Othello\u003c/li\u003e\n\u003cli\u003eFunctional similarity of Othello fighting \u0026ldquo;the Turks\u0026rdquo;, reducing again his enemy to another group and racializing them as well\u003c/li\u003e\n\u003cli\u003eOthello functions as essentially a part of the venetian army, in a \u0026ldquo;predominantly white neighborhood\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"duet-31-billie-is-forced-to-be-stuck-in-a-vicious-cycle-a-cycle-which-is-propegated-according-to-wake-only-because-of-her-blackness\"\u003eDuet, 31: Billie is forced to be stuck in a vicious cycle, a cycle which is propegated according to Wake only because of her blackness\u003c/h2\u003e\n\u003cblockquote\u003e\n\u003cp\u003eAll her money goes up in smokes and writings that tell her she really ain\u0026rsquo;t out of her mind \u0026hellip; [otherwise] all the rot inside her would begin to boil, threaten to shoot out.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003ch3 id=\"analysis\"\u003eAnalysis\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eDouble entere: \u0026ldquo;money goes up in smokes\u0026rdquo; \u0026mdash;- money vanishes + money is used to buy smokes\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;to tell\u0026rdquo;: personifiying money as something that influences Billie\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eBilie is trapped in a vicious cycle: Linguistically trapped \u0026mdash; Rot =\u0026gt;(boil)=\u0026gt; Smoke =\u0026gt;(prevents)=\u0026gt; Rot.\u003c/p\u003e\n\u003ch3 id=\"broader-analysis\"\u003eBroader Analysis\u003c/h3\u003e\n\u003cp\u003eThis is an exemplification for the \u0026ldquo;bound in no-space\u0026rdquo; which Sharpe highlights. Unlike the Othello in Shakesphere that pretty much brings his own downfall from Iago\u0026rsquo;s manipulation, Billie in the play acts more due to the the inherent constraints of the system.\u003c/p\u003e\n\u003ch3 id=\"wake-p-16-analyze-on-top-connect-here\"\u003eWake, p 16: ANALYZE ON TOP, CONNECT HERE\u003c/h3\u003e\n\u003cblockquote\u003e\n\u003cp\u003eTo be in the wake is to live in those no’s, to live in the no-space that the law is not bound to respect \u0026hellip; To be in the wake is to recognize \u0026hellip; the ongoing locations of Black being: the wake, the ship, the hold, and the weather.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003ch4 id=\"analysis\"\u003eAnalysis\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;no-space\u0026rdquo;\u0026mdash;ironic term exemplifying sense of claustrophobia: the contrast between \u0026ldquo;space\u0026rdquo; (the possible/freedom), with \u0026ldquo;no.\u0026rdquo;\n\u003cul\u003e\n\u003cli\u003ehidden meaning about the fact that, though freedom seemingly exists, it is not genuinely there because it is freedom in the \u0026ldquo;no\u0026rdquo;; in fact, these acts that \u0026ldquo;grant freedom\u0026rdquo; may simply grant oppression in a different form\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ein the same vein, the imagery of \u0026ldquo;the hold\u0026rdquo; illustrates this same sense of claustrophobic constraint: one is \u0026ldquo;held\u0026rdquo; by this perceived sense of freedom\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"broader-analysis\"\u003eBroader Analysis\u003c/h4\u003e\n\u003cp\u003eModern day opportunities available to African Americans may represent elements of the \u0026ldquo;no-space\u0026rdquo;: the areas by which the seeming granting of freedom becomes a hindering detriment that relegates someone to \u0026ldquo;the hold.\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"wake-p-21\"\u003eWake p 21\u003c/h3\u003e\n\u003cblockquote\u003e\n\u003cp\u003eIn the wake, the semiotics of the slave ship continue: from the forced movements of the enslaved to the forced movements of the migrant and the refugee, to the regulation of Black people in North American streets and neighborhoods.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003ch4 id=\"analysis\"\u003eAnalysis\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;semiotics\u0026rdquo;: the interpretation of signs and symbols\n\u003cul\u003e\n\u003cli\u003ethe repetition of the word forced takes agency out of the actor\u003c/li\u003e\n\u003cli\u003eSharpe\u0026rsquo;s argument: forcibleness is a \u003cem\u003esymbol\u003c/em\u003e for the reminants of the slave ship\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"broader-analysis\"\u003eBroader Analysis\u003c/h4\u003e\n\u003cp\u003eBilly\u0026rsquo;s being forced by the system to perform her daily, self destructive actions instead of the emotional Othello we see in Shakespeare, Billy is being forced by the system not out of her own volition to continue her act.\u003c/p\u003e\n\u003chr\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_racialization_outline/","tags":null,"title":"NUS-ENG401 Racialization Outline"},{"categories":null,"contents":"Traditional values in Caribbean and African societies often place womens’ value in the context of other men. When women pursue independent careers such as midwives and healers, they could be called “witches.”\nMaryse Condé demonstrates this bias in the novel I, Tituba. She writes that “Yao’s love had transformed [Tituba]’s mother”, making her a “young woman.” (Condé 7) In the passage, the womanhood of Tituba’s mother is framed as only being granted when she encounters Yao; in contrast, Mama Yaya’s womanhood exists independently, yet she is viewed as a witch.\nNext Steps Token: s_6780_15\nYou now get to make a choice. You may either\u0026hellip;\nPursue independence You may choose to face the consequences of leveraging the harsh education system to attempt to achieve social advancement. To do so, follow this link.\nSeek domestic dependence Or, you may choose to follow domestic roles without continuing to pursue education. If so, follow this link.\n","html":"\u003cp\u003eTraditional values in Caribbean and African societies often place womens’ value in the context of other men. When women pursue independent careers such as midwives and healers, they could be called “witches.”\u003c/p\u003e\n\u003cp\u003eMaryse Condé demonstrates this bias in the novel \u003cem\u003eI, Tituba\u003c/em\u003e. She writes that “Yao’s love had transformed [Tituba]’s mother”, making her a “young woman.” (Condé 7) In the passage, the womanhood of Tituba’s mother is framed as only being granted when she encounters Yao; in contrast, Mama Yaya’s womanhood exists independently, yet she is viewed as a witch.\u003c/p\u003e\n\u003ch2 id=\"next-steps\"\u003eNext Steps\u003c/h2\u003e\n\u003cp\u003eToken: s_6780_15\u003c/p\u003e\n\u003cp\u003eYou now get to make a choice. You may either\u0026hellip;\u003c/p\u003e\n\u003ch3 id=\"pursue-independence\"\u003ePursue independence\u003c/h3\u003e\n\u003cp\u003eYou may choose to face the consequences of leveraging the harsh education system to attempt to achieve social advancement. To do so, follow \u003ca href=\"/posts/kbhnus_eng401_gift_3/\"\u003ethis link\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"seek-domestic-dependence\"\u003eSeek domestic dependence\u003c/h3\u003e\n\u003cp\u003eOr, you may choose to follow domestic roles without continuing to pursue education. If so, follow \u003ca href=\"/posts/kbhnus_eng401_gift_6/\"\u003ethis link\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_eng401_gift_4/","tags":null,"title":"NUS-ENG401 What is a Witch?"},{"categories":null,"contents":"Statement Suppose \\(U_1\\), \\(U_2\\), and \\(W\\) are subspaces of \\(V\\), such that:\n\\begin{equation} \\begin{cases} V = U_1 \\oplus W\\\\ V = U_2 \\oplus W \\end{cases} \\end{equation}\nProve or give a counterexample that \\(U_1=U_2\\)\nIntuition The statement is not true. The definition of direct sums makes it such that, \\(\\forall v \\in V\\), there exists a unique representation of \\(v\\) with \\(u_{1i}+w_{i} = v\\) for \\(u_{1j}\\in U_1, w_{j} \\in W\\) as well as another unique representation \\(u_{2i} + w_{i}=v\\) for \\(u_{2j} \\in U_{2}, w_{j} \\in W\\).\nHowever, the definition of direct sums doesn\u0026rsquo;t guarantee that the distinct unique representations are equivalent; although \\(V\\) can only be represented uniquely by EITHER a sum of \\(U_1+W\\) or \\(U_2+W\\), it does not mean that each \\(v \\in V\\) itself has only one unique representation.\nCounterexample In constructing a counterexample, we turn to the fact that the sums of two variables creates a third free variable; therefore, we can figure two distinct ways of creating a third, final free variable that construct an equivalent space.\nConstructing \\(U_1\\) as a subspace We begin with constructing:\n\\begin{equation} U_1= \\left\\{\\begin{pmatrix} x_1\\\\y_1\\\\2y_1 \\end{pmatrix}, x_1,y_1 \\in \\mathbb{F} \\right\\} \\end{equation}\nBy setting both free variables to \\(0\\), we construct the additive identity. Then:\n\\begin{equation} \\lambda \\begin{pmatrix} x_1 \\\\ y_1 \\\\ 2y_1 \\end{pmatrix} = \\begin{pmatrix} \\lambda x_1 \\\\ \\lambda y_1\\\\ 2(\\lambda y_1) \\end{pmatrix} \\end{equation}\nby multiplication in \\(\\mathbb{F}\\), scalar multiplication, commutativity, and associativity. We can show closure under addition by inheriting the operation in \\(\\mathbb{F}\\) as well as applying distributive to the factor of \\(2\\).\nTherefore, we show that \\(U_1\\) is a subspace of \\(\\mathbb{F}^{3}\\).\nConstructing \\(U_2\\) as a subspace Then, we construct:\n\\begin{equation} U_2=\\left\\{\\begin{pmatrix} x_1 \\\\ y_1 \\\\ 0 \\end{pmatrix}, x_1,y_1\\in \\mathbb{F} \\right\\} \\end{equation}\nWe again have \\(0\\) by setting free variables to create the additive identity. Addition and scalar multiplication is closed by inheriting them from \\(\\mathbb{F}\\) (and the fact that \\(0\\) is the additive inverse and therefore \\(\\lambda 0 = 0\\)).\nTherefore, \\(U_2\\) is a subspace as well in \\(\\mathbb{F}^{3}\\).\nConstructing \\(W\\) as a subspace Finally, we have:\n\\begin{equation} W = \\left\\{\\begin{pmatrix} 0 \\\\ 0 \\\\z_1 \\end{pmatrix}, z_1\\in \\mathbb{F} \\right\\} \\end{equation}\nBy setting \\(z_1=0\\), we have the additive identity. As with above, addition and scalar multiplication is closed through inheritance and that \\(\\lambda 0=0\\).\nConstructing Sum of Subsets Let\u0026rsquo;s construct:\n\\begin{equation} U_1+W = V \\end{equation}\nTake \\(u_1 \\in U_1, w \\in W\\), attempting to construct a \\(v\\in V\\), we have that:\n\\begin{equation} \\begin{pmatrix} x_{1} \\\\ y_1 \\\\ 2y_1 \\end{pmatrix} + \\begin{pmatrix} 0 \\\\ 0 \\\\ z_1 \\end{pmatrix} = \\begin{pmatrix} x_1 \\\\ y_1 \\\\ 2y_1+z_1 \\end{pmatrix} = \\begin{pmatrix} a \\\\ b \\\\ c \\end{pmatrix} \\end{equation}\nConstructing Direct Sum For all vectors in \\(\\mathbb{F}^{3}\\), this is an equivalence with 3 free variables and 3 expressions\u0026mdash;rendering each vector in \\(\\mathbb{F}^{3}\\) to have a representation by \\(U_1+W\\). We can see this also with the unique \\(0\\) test:\nWe see that for:\n\\begin{equation} 0 \\in U_1+W \\end{equation}\nTo solve for some \\(u_1 \\in U, w \\in W : u_1+w = 0\\) we have that:\n\\begin{equation} \\begin{pmatrix} x_{1} \\\\ y_1 \\\\ 2y_1 \\end{pmatrix} + \\begin{pmatrix} 0 \\\\ 0 \\\\ z_1 \\end{pmatrix} = \\begin{pmatrix} 0 \\\\ 0 \\\\ 0 \\end{pmatrix} \\end{equation}\nwhere the first vector is in \\(U_1\\) and the second is in \\(W\\). The first two expressions tell us that \\(x_1=y_1=0\\); the final equation requires that \\(2y_1+z_1=0+z_1=0\\Rightarrow z_1=0\\) .\nTherefore, the only way to write \\(0\\) is to take each element in the sum to \\(0\\) (i.e. in this case \\(u_1=w=0 \\implies u_1+w = 0\\)), making the above a direct sum.\nTherefore:\n\\begin{equation} U_1 \\oplus W = V \\end{equation}\nIn almost the same manner, we can show that:\n\\begin{equation} U_2\\oplus W = V \\end{equation}\nThat, for some \\(u_2\\in U_2, w \\in W, v \\in V\\):\n\\begin{equation} \\begin{pmatrix} x_1\\\\y_1\\\\0 \\end{pmatrix} + \\begin{pmatrix} 0 \\\\ 0 \\\\ z_1 \\end{pmatrix} = \\begin{pmatrix} x_1\\\\y_1\\\\z_1 \\end{pmatrix} \\end{equation}\nfor the first vector in \\(U_2\\), the second in \\(W\\). In fact, this is the statement made in example 1.41.\nCreating the Counterexample Finally, we have that:\n\\begin{equation} \\left\\{\\begin{pmatrix} x_1 \\\\ y_1 \\\\ 2y_1 \\end{pmatrix}: x_1,y_1 \\in \\mathbb{F}\\right\\} \\neq\\left\\{\\begin{pmatrix} x_1 \\\\ y_1 \\\\ 0 \\end{pmatrix}: x_1,y_1 \\in \\mathbb{F}\\right\\} \\end{equation}\n\\(\\forall y_1 \\neq 0\\) in the first expression. Therefore, \\(U_1 \\neq U_2\\), finishing the counterexample. \\(\\blacksquare\\)\n","html":"\u003ch2 id=\"statement\"\u003eStatement\u003c/h2\u003e\n\u003cp\u003eSuppose \\(U_1\\), \\(U_2\\), and \\(W\\) are subspaces of \\(V\\), such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nV = U_1 \\oplus W\\\\\nV = U_2 \\oplus W\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eProve or give a counterexample that \\(U_1=U_2\\)\u003c/p\u003e\n\u003ch2 id=\"intuition\"\u003eIntuition\u003c/h2\u003e\n\u003cp\u003eThe statement is not true. The definition of direct sums makes it such that, \\(\\forall v \\in V\\), there exists a unique representation of \\(v\\) with \\(u_{1i}+w_{i} = v\\) for \\(u_{1j}\\in U_1, w_{j} \\in W\\) as well as another unique representation \\(u_{2i} + w_{i}=v\\) for \\(u_{2j} \\in U_{2}, w_{j} \\in W\\).\u003c/p\u003e\n\u003cp\u003eHowever, the definition of direct sums doesn\u0026rsquo;t guarantee that the distinct unique representations are equivalent; although \\(V\\) can only be represented uniquely by EITHER a sum of \\(U_1+W\\) or \\(U_2+W\\), it does not mean that each \\(v \\in V\\) itself has only one unique representation.\u003c/p\u003e\n\u003ch2 id=\"counterexample\"\u003eCounterexample\u003c/h2\u003e\n\u003cp\u003eIn constructing a counterexample, we turn to the fact that the sums of two variables creates a third free variable; therefore, we can figure two distinct ways of creating a third, final free variable that construct an equivalent space.\u003c/p\u003e\n\u003ch3 id=\"constructing-u-1-as-a-subspace\"\u003eConstructing \\(U_1\\) as a subspace\u003c/h3\u003e\n\u003cp\u003eWe begin with constructing:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1= \\left\\{\\begin{pmatrix}\nx_1\\\\y_1\\\\2y_1\n\\end{pmatrix}, x_1,y_1 \\in \\mathbb{F} \\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy setting both free variables to \\(0\\), we construct the additive identity. Then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda \\begin{pmatrix}\nx_1 \\\\ y_1 \\\\ 2y_1\n\\end{pmatrix} = \\begin{pmatrix}\n\\lambda x_1 \\\\ \\lambda y_1\\\\ 2(\\lambda y_1)\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eby multiplication in \\(\\mathbb{F}\\), scalar multiplication, commutativity, and associativity. We can show closure under addition by inheriting the operation in \\(\\mathbb{F}\\) as well as applying distributive to the factor of \\(2\\).\u003c/p\u003e\n\u003cp\u003eTherefore, we show that \\(U_1\\) is a subspace of \\(\\mathbb{F}^{3}\\).\u003c/p\u003e\n\u003ch3 id=\"constructing-u-2-as-a-subspace\"\u003eConstructing \\(U_2\\) as a subspace\u003c/h3\u003e\n\u003cp\u003eThen, we construct:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_2=\\left\\{\\begin{pmatrix}\nx_1 \\\\ y_1 \\\\ 0\n\\end{pmatrix}, x_1,y_1\\in \\mathbb{F} \\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe again have \\(0\\) by setting free variables to create the additive identity. Addition and scalar multiplication is closed by inheriting them from \\(\\mathbb{F}\\) (and the fact that \\(0\\) is the additive inverse and therefore \\(\\lambda 0 = 0\\)).\u003c/p\u003e\n\u003cp\u003eTherefore, \\(U_2\\) is a subspace as well in \\(\\mathbb{F}^{3}\\).\u003c/p\u003e\n\u003ch3 id=\"constructing-w-as-a-subspace\"\u003eConstructing \\(W\\) as a subspace\u003c/h3\u003e\n\u003cp\u003eFinally, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nW = \\left\\{\\begin{pmatrix}\n0 \\\\ 0 \\\\z_1\n\\end{pmatrix}, z_1\\in \\mathbb{F} \\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy setting \\(z_1=0\\), we have the additive identity. As with above, addition and scalar multiplication is closed through inheritance and that \\(\\lambda 0=0\\).\u003c/p\u003e\n\u003ch3 id=\"constructing-sum-of-subsets\"\u003eConstructing Sum of Subsets\u003c/h3\u003e\n\u003cp\u003eLet\u0026rsquo;s construct:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1+W = V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTake \\(u_1 \\in U_1, w \\in W\\), attempting to construct a \\(v\\in V\\), we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\nx_{1} \\\\ y_1 \\\\ 2y_1\n\\end{pmatrix} + \\begin{pmatrix}\n0 \\\\ 0 \\\\ z_1\n\\end{pmatrix} = \\begin{pmatrix}\nx_1 \\\\ y_1 \\\\ 2y_1+z_1\n\\end{pmatrix} = \\begin{pmatrix}\na \\\\ b \\\\ c\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"constructing-direct-sum\"\u003eConstructing Direct Sum\u003c/h3\u003e\n\u003cp\u003eFor all vectors in \\(\\mathbb{F}^{3}\\), this is an equivalence with 3 free variables and 3 expressions\u0026mdash;rendering each vector in \\(\\mathbb{F}^{3}\\) to have a representation by \\(U_1+W\\). We can see this also with the unique \\(0\\) test:\u003c/p\u003e\n\u003cp\u003eWe see that for:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 \\in U_1+W\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo solve for some \\(u_1 \\in U, w \\in W : u_1+w = 0\\) we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\nx_{1} \\\\ y_1 \\\\ 2y_1\n\\end{pmatrix} + \\begin{pmatrix}\n0 \\\\ 0 \\\\ z_1\n\\end{pmatrix} = \\begin{pmatrix}\n0 \\\\ 0 \\\\ 0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere the first vector is in \\(U_1\\) and the second is in \\(W\\). The first two expressions tell us that \\(x_1=y_1=0\\); the final equation requires that \\(2y_1+z_1=0+z_1=0\\Rightarrow z_1=0\\) .\u003c/p\u003e\n\u003cp\u003eTherefore, the only way to write \\(0\\) is to take each element in the sum to \\(0\\) (i.e. in this case \\(u_1=w=0 \\implies u_1+w = 0\\)), making the above a direct sum.\u003c/p\u003e\n\u003cp\u003eTherefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1 \\oplus W = V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn almost the same manner, we can show that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_2\\oplus W = V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThat, for some \\(u_2\\in U_2, w \\in W, v \\in V\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\nx_1\\\\y_1\\\\0\n\\end{pmatrix} + \\begin{pmatrix}\n0 \\\\ 0 \\\\ z_1\n\\end{pmatrix} = \\begin{pmatrix}\nx_1\\\\y_1\\\\z_1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor the first vector in \\(U_2\\), the second in \\(W\\). In fact, this is the statement made in example \u003ccode\u003e1.41\u003c/code\u003e.\u003c/p\u003e\n\u003ch3 id=\"creating-the-counterexample\"\u003eCreating the Counterexample\u003c/h3\u003e\n\u003cp\u003eFinally, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\left\\{\\begin{pmatrix}\nx_1 \\\\ y_1 \\\\ 2y_1\n\\end{pmatrix}: x_1,y_1 \\in \\mathbb{F}\\right\\} \\neq\\left\\{\\begin{pmatrix}\nx_1 \\\\ y_1 \\\\ 0\n\\end{pmatrix}: x_1,y_1 \\in \\mathbb{F}\\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(\\forall y_1 \\neq 0\\) in the first expression. Therefore, \\(U_1 \\neq U_2\\), finishing the counterexample. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_1_c_proof_preso/","tags":null,"title":"NUS-MATH530 1.C Problem 23"},{"categories":null,"contents":"Claim Proof or give a counter example for the statement that:\n\\begin{align} \\dim\\qty(U_1+U_2+U_3) = \u0026amp;\\dim U_1+\\dim U_2+\\dim U_3\\\\ \u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\ \u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3) \\end{align}\nCounterexample This statement is false.\nTake the following three subspaces of \\(\\mathbb{F}^{2}\\):\n\\begin{align} U_1 = \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}}\\\\ U_2 = \\qty{\\mqty(0 \\\\ b): b \\in \\mathbb{F}}\\\\ U_3 = \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}} \\end{align}\nsubspace check All \\(U_1\\), \\(U_2\\), \\(U_3\\) are in \\(\\mathbb{F}^{2}\\).\nzero Zero exists in all by setting free variables to \\(0\\)\naddition For \\(U_1\\) \u0026mdash;\n\\begin{equation} \\mqty(a_1 \\\\ 0) + \\mqty(a_2 \\\\ 0) = \\mqty(a_1+a_2 \\\\ 0) \\in \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}} \\end{equation}\nand, by the same token, addition is closed for \\(U_2\\).\nFor \\(U_3\\) \u0026mdash;\n\\begin{equation} \\mqty(c_1 \\\\ c_1) + \\mqty(c_2 \\\\ c_2) = \\mqty(c_1+c_2 \\\\ c_1+c_2) \\in \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}} \\end{equation}\nscalar multiplication For \\(U_1\\) \u0026mdash;\n\\begin{equation} \\lambda \\mqty(a \\\\ 0) = \\mqty(\\lambda a \\\\ 0) \\in \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}} \\end{equation}\nand, by the same token, scalar multiplication is closed for \\(U_2\\).\nFor \\(U_3\\) \u0026mdash;\n\\begin{equation} \\lambda \\mqty(c \\\\ c) = \\mqty(\\lambda c \\\\ \\lambda c) \\in \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}} \\end{equation}\nconstructing the counterexample Let us calculate the value of both sides of:\n\\begin{align} \\dim\\qty(U_1+U_2+U_3) = \u0026amp;\\dim U_1+\\dim U_2+\\dim U_3\\\\ \u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\ \u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3) \\end{align}\nRecall that:\n\\begin{align} U_1 = \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}}\\\\ U_2 = \\qty{\\mqty(0 \\\\ b): b \\in \\mathbb{F}}\\\\ U_3 = \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}} \\end{align}\nleft side Let\u0026rsquo;s first construct:\n\\begin{equation} U_1 + U_2 + U_3 \\end{equation}\nBy definition:\n\\begin{equation} U_1 + U_2 + U_3 = \\qty{u_1+u_2+u_3: u_j\\in U_j} \\end{equation}\nTherefore, taking a sample from each results as:\n\\begin{equation} u_1+u_2+u_3 = \\mqty(a \\\\ 0) + \\mqty(0 \\\\ b) + \\mqty(c \\\\c) = \\mqty(a+c \\\\ b +c) \\end{equation}\nThis creates two free variables for slots, meaning:\n\\begin{equation} U_1+U_2+U_3 = \\mathbb{F}^{2} \\end{equation}\nSo: \\(\\dim \\qty(U_1+U_2+U_3)=2\\)\nright side dimension of the subspaces\nLet us construct a basis for each of these spaces to figure their dimension.\nFor \\(U_1\\), \\(\\qty{\\mqty(1 \\\\ 0)}\\). We see that scaling the one vector in this basis will construct all vectors in \\(\\mathbb{F}^{2}\\) for which the second coordinate will be \\(0\\) \u0026mdash; spanning \\(U_1\\). Being a list with one non-zero vector, it is also linearly independent. So \\(\\dim U_1 = 1\\).\nBy almost the same token, for \\(U_2\\), \\(\\qty{\\mqty(0 \\\\ 1)}\\). This makes also \\(\\dim U_2=1\\).\nFor \\(U_3\\), we have \\(\\qty{\\mqty(1 \\\\ 1)}\\). Scaling this one vector will construct all vectors in \\(\\mathbb{F}^{2}\\) for which both coordinates are the same \u0026mdash; spanning \\(U_3\\). Being a list with one non-zero vector, it is also linearly independent. So \\(\\dim U_3 = 1\\).\nThis renders all three subspaces have dimension \\(1\\).\ndimension of the unions\nThese subspaces were picked because of a surprising convenience. Their unions are all the zero vector!\n\\begin{equation} U_1 \\cap U_2 = \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}} \\cap \\qty{\\mqty(0 \\\\ b): b \\in \\mathbb{F}} = \\qty{\\mqty(0 \\\\ 0)} \\end{equation}\nThis is because \\(a=0\\), \\(b=0\\) respectively in order to satisfy both generators.\nSimilarly\n\\begin{equation} U_1 \\cap U_3 = \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}} \\cap \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}} = \\qty{\\mqty(0 \\\\ 0)} \\end{equation}\nTo satisfy both generators, \\(a=c\\) for the top coordinate, \\(c=0\\) for the bottom coordinate, so \\(a=c=0\\).\nBy a similar token:\n\\begin{equation} U_2 \\cap U_3 = \\qty{\\mqty(0 \\\\ 0)} \\end{equation}\nWe established before that the span of \\(\\qty{}\\) (which is declared linearly independent) to be \\(\\qty{0}\\), so we see that the dimensions of all three required unions as \\(0\\) (as an empty list has length \\(0\\).)\nconstructing the expression for the right side\nWe have that:\n\\begin{equation} \\dim U_j = 1, j \\in \\qty{1,2,3} \\end{equation}\nAnd that:\n\\begin{equation} \\dim U_{j} \\cap U_{k} = 0 , j,k \\in \\{1,2,3\\} \\end{equation}\nfrom above.\nThis makes\u0026mdash;\n\\begin{align} \\dim \u0026amp;U_1+\\dim U_2+\\dim U_3\\\\ \u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\ \u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3)\\\\ =1\u0026amp;+1+1-0-0-0+0 \\\\ =3 \\end{align}\nshowing the counterexample We have now that:\n\\begin{equation} \\dim(U_1+U_2+U_3) = 2 \\end{equation}\nBut:\n\\begin{align} \\dim \u0026amp;U_1+\\dim U_2+\\dim U_3\\\\ \u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\ \u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3)\\\\ =3 \\end{align}\nYet \\(2 \\neq 3\\).\nSo:\n\\begin{align} \\dim(U_1+U_2+U_3) \\neq \\dim \u0026amp;U_1+\\dim U_2+\\dim U_3\\\\ \u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\ \u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3)\\\\ \\end{align}\nFinishing the counter example. \\(\\blacksquare\\)\n","html":"\u003ch2 id=\"claim\"\u003eClaim\u003c/h2\u003e\n\u003cp\u003eProof or give a counter example for the statement that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dim\\qty(U_1+U_2+U_3) = \u0026amp;\\dim U_1+\\dim U_2+\\dim U_3\\\\\n\u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\\n\u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3)\n\\end{align}\u003c/p\u003e\n\u003ch2 id=\"counterexample\"\u003eCounterexample\u003c/h2\u003e\n\u003cp\u003eThis statement is false.\u003c/p\u003e\n\u003cp\u003eTake the following three subspaces of \\(\\mathbb{F}^{2}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nU_1 = \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}}\\\\\nU_2 = \\qty{\\mqty(0 \\\\ b): b \\in \\mathbb{F}}\\\\\nU_3 = \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}}\n\\end{align}\u003c/p\u003e\n\u003ch3 id=\"subspace-check\"\u003esubspace check\u003c/h3\u003e\n\u003cp\u003eAll \\(U_1\\), \\(U_2\\), \\(U_3\\) are in \\(\\mathbb{F}^{2}\\).\u003c/p\u003e\n\u003ch4 id=\"zero\"\u003ezero\u003c/h4\u003e\n\u003cp\u003eZero exists in all by setting free variables to \\(0\\)\u003c/p\u003e\n\u003ch4 id=\"addition\"\u003eaddition\u003c/h4\u003e\n\u003cp\u003eFor \\(U_1\\) \u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(a_1 \\\\ 0) + \\mqty(a_2 \\\\ 0) = \\mqty(a_1+a_2 \\\\ 0) \\in \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand, by the same token, addition is closed for \\(U_2\\).\u003c/p\u003e\n\u003cp\u003eFor \\(U_3\\) \u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(c_1 \\\\ c_1) + \\mqty(c_2 \\\\ c_2) = \\mqty(c_1+c_2 \\\\ c_1+c_2) \\in \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}}\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"scalar-multiplication\"\u003escalar multiplication\u003c/h4\u003e\n\u003cp\u003eFor \\(U_1\\) \u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda \\mqty(a \\\\ 0) = \\mqty(\\lambda a \\\\ 0) \\in \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand, by the same token, scalar multiplication is closed for \\(U_2\\).\u003c/p\u003e\n\u003cp\u003eFor \\(U_3\\) \u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda \\mqty(c \\\\ c) = \\mqty(\\lambda c \\\\ \\lambda c) \\in \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"constructing-the-counterexample\"\u003econstructing the counterexample\u003c/h3\u003e\n\u003cp\u003eLet us calculate the value of both sides of:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dim\\qty(U_1+U_2+U_3) = \u0026amp;\\dim U_1+\\dim U_2+\\dim U_3\\\\\n\u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\\n\u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nU_1 = \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}}\\\\\nU_2 = \\qty{\\mqty(0 \\\\ b): b \\in \\mathbb{F}}\\\\\nU_3 = \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}}\n\\end{align}\u003c/p\u003e\n\u003ch4 id=\"left-side\"\u003eleft side\u003c/h4\u003e\n\u003cp\u003eLet\u0026rsquo;s first construct:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1 + U_2 + U_3\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy definition:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1 + U_2 + U_3 = \\qty{u_1+u_2+u_3: u_j\\in U_j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, taking a sample from each results as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu_1+u_2+u_3 = \\mqty(a \\\\ 0) + \\mqty(0 \\\\ b) + \\mqty(c \\\\c) = \\mqty(a+c \\\\ b +c)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis creates two free variables for slots, meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1+U_2+U_3 = \\mathbb{F}^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo: \\(\\dim \\qty(U_1+U_2+U_3)=2\\)\u003c/p\u003e\n\u003ch4 id=\"right-side\"\u003eright side\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003edimension of the subspaces\u003c/p\u003e\n\u003cp\u003eLet us construct a basis for each of these spaces to figure their dimension.\u003c/p\u003e\n\u003cp\u003eFor \\(U_1\\), \\(\\qty{\\mqty(1 \\\\ 0)}\\). We see that scaling the one vector in this basis will construct all vectors in \\(\\mathbb{F}^{2}\\) for which the second coordinate will be \\(0\\) \u0026mdash; spanning \\(U_1\\). Being a list with one non-zero vector, it is also linearly independent. So \\(\\dim U_1 = 1\\).\u003c/p\u003e\n\u003cp\u003eBy almost the same token, for \\(U_2\\), \\(\\qty{\\mqty(0 \\\\ 1)}\\). This makes also \\(\\dim U_2=1\\).\u003c/p\u003e\n\u003cp\u003eFor \\(U_3\\), we have \\(\\qty{\\mqty(1 \\\\ 1)}\\). Scaling this one vector will construct all vectors in \\(\\mathbb{F}^{2}\\) for which both coordinates are the same \u0026mdash; spanning \\(U_3\\). Being a list with one non-zero vector, it is also linearly independent. So \\(\\dim U_3 = 1\\).\u003c/p\u003e\n\u003cp\u003eThis renders all three subspaces have dimension \\(1\\).\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003edimension of the unions\u003c/p\u003e\n\u003cp\u003eThese subspaces were picked because of a surprising convenience. Their unions are all the zero vector!\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1 \\cap U_2 = \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}} \\cap \\qty{\\mqty(0 \\\\ b): b \\in \\mathbb{F}} = \\qty{\\mqty(0 \\\\ 0)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is because \\(a=0\\), \\(b=0\\) respectively in order to satisfy both generators.\u003c/p\u003e\n\u003cp\u003eSimilarly\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1 \\cap U_3 = \\qty{\\mqty(a \\\\ 0): a \\in \\mathbb{F}} \\cap \\qty{\\mqty(c \\\\ c): c \\in \\mathbb{F}} = \\qty{\\mqty(0 \\\\ 0)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo satisfy both generators, \\(a=c\\) for the top coordinate, \\(c=0\\) for the bottom coordinate, so \\(a=c=0\\).\u003c/p\u003e\n\u003cp\u003eBy a similar token:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_2 \\cap U_3 = \\qty{\\mqty(0 \\\\ 0)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe established before that the span of \\(\\qty{}\\) (which is declared linearly independent) to be \\(\\qty{0}\\), so we see that the dimensions of all three required unions as \\(0\\) (as an empty list has length \\(0\\).)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003econstructing the expression for the right side\u003c/p\u003e\n\u003cp\u003eWe have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim U_j = 1, j \\in \\qty{1,2,3}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim U_{j} \\cap U_{k} = 0 , j,k \\in \\{1,2,3\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efrom above.\u003c/p\u003e\n\u003cp\u003eThis makes\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dim \u0026amp;U_1+\\dim U_2+\\dim U_3\\\\\n\u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\\n\u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3)\\\\\n=1\u0026amp;+1+1-0-0-0+0 \\\\\n=3\n\\end{align}\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"showing-the-counterexample\"\u003eshowing the counterexample\u003c/h3\u003e\n\u003cp\u003eWe have now that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim(U_1+U_2+U_3) = 2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBut:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dim \u0026amp;U_1+\\dim U_2+\\dim U_3\\\\\n\u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\\n\u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3)\\\\\n=3\n\\end{align}\u003c/p\u003e\n\u003cp\u003eYet \\(2 \\neq 3\\).\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dim(U_1+U_2+U_3) \\neq \\dim \u0026amp;U_1+\\dim U_2+\\dim U_3\\\\\n\u0026amp;-\\dim(U_1 \\cap U_2) - \\dim(U_1 \\cap U_3) - \\dim(U_2 \\cap U_3) \\\\\n\u0026amp;+\\dim(U_1 \\cap U_2 \\cap U_3)\\\\\n\\end{align}\u003c/p\u003e\n\u003cp\u003eFinishing the counter example. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_2_c_problem_17/","tags":null,"title":"NUS-MATH530 2.C Problem 17"},{"categories":null,"contents":"Statement Support \\(W\\) is finite-dimensional, and \\(T \\in \\mathcal{L}(V,W)\\). Prove that \\(T\\) is injective IFF \\(\\exists S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\).\nProof Given injectivity Given an injective \\(T \\in \\mathcal{L}(V,W)\\), we desire that \\(\\exists S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\).\nWe begin with some statements.\nRecall that, a linear map called injective when \\(Tv=Tu \\implies v=u\\) Recall also that the \u0026ldquo;identity map\u0026rdquo; on \\(V\\) is a map \\(I \\in \\mathcal{L}(V,V)\\) such that \\(Iv = v, \\forall v \\in V\\) Motivating \\(S\\) We show that we can indeed create a function \\(S\\) by the injectivity of \\(T\\). Recall a function is a map has the property that \\(v=u \\implies Fv=Fu\\).\nWLOG consider two vectors \\(a,b \\in V\\).\nCreating \\(S\\) Define a function \\(S:W\\to V\\) in the following manner:\n\\begin{equation} S(v) = a \\mid Ta = v \\end{equation}\nDemonstrating that \\(S\\) is a function\nSo, given \\(v, u \\in W\\) and \\(v=u\\), we have:\n\\(Sv = a \\mid Ta=v\\) \\(Su = b \\mid Tb=u\\) If \\(Sv=Su\\), then \\(a=b\\). To demonstrate that \\(S\\) is a function, we now desire that \\(a=b\\).\nFrom the above, we have that \\(Ta=v\\), \\(Tb=u\\). From prior, we have \\(v=u\\) From the two statements above, we have \\(v=u \\implies Ta=Tb\\) Lastly, from the injectivity of \\(T\\), we have that \\(Ta=Tb \\implies a=b\\) Hence demonstrating\n","html":"\u003ch2 id=\"statement\"\u003eStatement\u003c/h2\u003e\n\u003cp\u003eSupport \\(W\\) is finite-dimensional, and \\(T \\in \\mathcal{L}(V,W)\\). Prove that \\(T\\) is injective IFF \\(\\exists S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\).\u003c/p\u003e\n\u003ch2 id=\"proof\"\u003eProof\u003c/h2\u003e\n\u003ch3 id=\"given-injectivity\"\u003eGiven injectivity\u003c/h3\u003e\n\u003cp\u003eGiven an injective \\(T \\in \\mathcal{L}(V,W)\\), we desire that \\(\\exists S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\).\u003c/p\u003e\n\u003cp\u003eWe begin with some statements.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eRecall that, a linear map called injective when \\(Tv=Tu \\implies v=u\\)\u003c/li\u003e\n\u003cli\u003eRecall also that the \u0026ldquo;identity map\u0026rdquo; on \\(V\\) is a map \\(I \\in \\mathcal{L}(V,V)\\) such that \\(Iv = v, \\forall v \\in V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"motivating-s\"\u003eMotivating \\(S\\)\u003c/h4\u003e\n\u003cp\u003eWe show that we can indeed create a function \\(S\\) by the injectivity of \\(T\\). Recall a function is a map has the property that \\(v=u \\implies Fv=Fu\\).\u003c/p\u003e\n\u003cp\u003eWLOG consider two vectors \\(a,b \\in V\\).\u003c/p\u003e\n\u003ch4 id=\"creating-s\"\u003eCreating \\(S\\)\u003c/h4\u003e\n\u003cp\u003eDefine a function \\(S:W\\to V\\) in the following manner:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS(v) = a \\mid Ta = v\n\\end{equation}\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eDemonstrating that \\(S\\) is a function\u003c/p\u003e\n\u003cp\u003eSo, given \\(v, u \\in W\\) and \\(v=u\\), we have:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(Sv = a \\mid Ta=v\\)\u003c/li\u003e\n\u003cli\u003e\\(Su = b \\mid Tb=u\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf \\(Sv=Su\\), then \\(a=b\\). To demonstrate that \\(S\\) is a function, we now desire that \\(a=b\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eFrom the above, we have that \\(Ta=v\\), \\(Tb=u\\).\u003c/li\u003e\n\u003cli\u003eFrom prior, we have \\(v=u\\)\u003c/li\u003e\n\u003cli\u003eFrom the two statements above, we have \\(v=u \\implies Ta=Tb\\)\u003c/li\u003e\n\u003cli\u003eLastly, from the injectivity of \\(T\\), we have that \\(Ta=Tb \\implies a=b\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eHence demonstrating\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_3_b_problem_20-1/","tags":null,"title":"NUS-MATH530 3.B Problem 20"},{"categories":null,"contents":"Statement Support \\(W\\) is finite-dimensional, and \\(T \\in \\mathcal{L}(V,W)\\). Prove that \\(T\\) is injective IFF \\(\\exists S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\).\nProof Given injectivity Given an injective \\(T \\in \\mathcal{L}(V,W)\\), we desire that \\(\\exists S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\).\nCreating \\(S\\) Define a relation \\(S:range\\ T\\to V\\) in the following manner:\n\\begin{equation} S(v) = a \\mid Ta = v \\end{equation}\nDemonstrating that \\(S\\) is a function We show that there are no two possible choices for \\(a\\), and therefore that \\(S\\) is a function, by the injectivity of \\(T\\). Recall a function is a map has the property that \\(v=u \\implies Fv=Fu\\).\nSo, given \\(v, u \\in W\\) and \\(v=u\\), we have:\n\\(Sv = a \\mid Ta=v\\) \\(Su = b \\mid Tb=u\\) If \\(Sv=Su\\), then \\(a=b\\). To demonstrate that \\(S\\) is a function, we now desire that \\(a=b\\).\nFrom the above, we have that \\(Ta=v\\), \\(Tb=u\\). From prior, we have \\(v=u\\) From the two statements above, we have \\(v=u \\implies Ta=Tb\\) Recall now that a linear map called injective when \\(Tv=Tu \\implies v=u\\).\nTherefore, from the injectivity of \\(T\\), we have that \\(Ta=Tb \\implies a=b\\). Hence demonstrating the desired quality that shows \\(S\\) as a function.\nDemonstrating that \\(S\\) is a linear map The linearity \\(S\\) actually simply inherits the linearity of \\(T\\), which is defined to be a linear map.\nAdditivity:\n\\begin{align} Sv+Su \u0026amp;= (a \\mid Ta =v) + (b \\mid Tb =u) \\\\ \u0026amp;= (a+b) \\mid Ta+Tb = (v+u) \\\\ \u0026amp;= (a+b) \\mid T(a+b) = (v+u) \\\\ \u0026amp;= x \\mid Tx = (v+u) \\\\ \u0026amp;= S(v+u) \\end{align}\nHomogenity is shown in a similar fashion. We can therefore conclude that \\(S \\in \\mathcal{L}(range\\ T, V)\\).\nNote on the codomain of \\(S\\) Note that we desire \\(S \\in \\mathcal{L}(W,V),\\ i.e.\\ S:W\\to V\\), And yet, as it stands, \\(S: range\\ T \\to W\\). Fortunately, as \\(range\\ T\\) is a subspace of \\(W\\) (as ranges are subspaces of the codomain), we can leverage Axler 3.A-E11 (Sasha\u0026rsquo;s Proof, \u0026ldquo;maps to subspaces can be extended to the whole space\u0026rdquo;) to arbitrary extend \\(S\\) to \\(S:W\\to V\\).\nIt turns out that where the \u0026ldquo;extended\u0026rdquo; basis vectors gets mapped doesn\u0026rsquo;t matter. We only care about \\(S\\) insofar as its compositional behavior with \\(T\\).\nDemonstrating that \\(S\\) has the properties we desire We desire that \\(ST = I \\in \\mathcal{L}(V,V)\\).\nRecall that the \u0026ldquo;identity map\u0026rdquo; on \\(V\\) is a map \\(I \\in \\mathcal{L}(V,V)\\) such that \\(Iv = v, \\forall v \\in V\\). We now show that \\(ST\\) acts like the identity map.\nWLOG take \\(v \\in V\\).\nLet \\(Tv=a\\). Let \\(Sa = u\\). Based on the definition of \\(S\\) (that \\(Sx = y \\mid Ty=x\\), \u0026ldquo;\\(S\\) is the inverse map\u0026rdquo;), we have that \\(Tu=a\\). Recall once again that a linear map called injective when \\(Tv=Tu \\implies v=u\\).\nWe now have that \\(Tu=a=Tv\\), therefore, because \\(T\\) is given injective, \\(u=v\\).\nWe have show WLOG that \\((ST)v = S(Tv) =Sa = u=v\\). Therefore \\((ST)v=v\\), making \\(ST\\) an identity map \\(ST:V\\to V\\). Lastly, as the product of linear maps are themselves a linear map, \\(ST=I\\in \\mathcal{L}(V,V)\\)\nConclusion Having constructed the existence of \\(S\\) based on the required properties of \\(T\\), we show that given an injective \\(T \\in \\mathcal{L}(V,W)\\), have an \\(S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\), as desired.\nGiven \\(S\\) Given some \\(T \\in \\mathcal{L}(V,W)\\) and that \\(\\exists S \\in \\mathcal{L}(W,V): ST=I \\in \\mathcal{L}(V,V)\\), we desire that \\(T\\) is injective. Fortunately, we essentially just reverse the logic of the last section in the last part of the proof.\nRecall that a linear map called injective when \\(Tv=Tu \\implies v=u\\). Suppose for the sake of contradiction that \\(\\exists u,v: Tv=Tu\\) but \\(u\\neq v\\).\nLet \\(Tv=Tu=a\\) Let \\(Sa=b\\) Therefore: \\((ST)v=(ST)u=S(a)=b\\). Just to reiterate, this means that we have:\n\\((ST)v=b\\implies Iv=b\\) \\((ST)u=b \\implies Iu=b\\) Therefore, we have that \\(Iv=Iu\\) for distinct \\(v,u\\), which is absurd. Having reached contradiction, we have that \\(Tu=Tv\\implies u=v\\), reaching the definition of injectivity for \\(T\\). \\(\\blacksquare\\)\n","html":"\u003ch2 id=\"statement\"\u003eStatement\u003c/h2\u003e\n\u003cp\u003eSupport \\(W\\) is finite-dimensional, and \\(T \\in \\mathcal{L}(V,W)\\). Prove that \\(T\\) is injective IFF \\(\\exists S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\).\u003c/p\u003e\n\u003ch2 id=\"proof\"\u003eProof\u003c/h2\u003e\n\u003ch3 id=\"given-injectivity\"\u003eGiven injectivity\u003c/h3\u003e\n\u003cp\u003eGiven an injective \\(T \\in \\mathcal{L}(V,W)\\), we desire that \\(\\exists S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\).\u003c/p\u003e\n\u003ch4 id=\"creating-s\"\u003eCreating \\(S\\)\u003c/h4\u003e\n\u003cp\u003eDefine a relation \\(S:range\\ T\\to V\\) in the following manner:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS(v) = a \\mid Ta = v\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"demonstrating-that-s-is-a-function\"\u003eDemonstrating that \\(S\\) is a function\u003c/h4\u003e\n\u003cp\u003eWe show that there are no two possible choices for \\(a\\), and therefore that \\(S\\) is a function, by the injectivity of \\(T\\). Recall a function is a map has the property that \\(v=u \\implies Fv=Fu\\).\u003c/p\u003e\n\u003cp\u003eSo, given \\(v, u \\in W\\) and \\(v=u\\), we have:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(Sv = a \\mid Ta=v\\)\u003c/li\u003e\n\u003cli\u003e\\(Su = b \\mid Tb=u\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf \\(Sv=Su\\), then \\(a=b\\). To demonstrate that \\(S\\) is a function, we now desire that \\(a=b\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eFrom the above, we have that \\(Ta=v\\), \\(Tb=u\\).\u003c/li\u003e\n\u003cli\u003eFrom prior, we have \\(v=u\\)\u003c/li\u003e\n\u003cli\u003eFrom the two statements above, we have \\(v=u \\implies Ta=Tb\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eRecall now that a linear map called injective when \\(Tv=Tu \\implies v=u\\).\u003c/p\u003e\n\u003cp\u003eTherefore, from the injectivity of \\(T\\), we have that \\(Ta=Tb \\implies a=b\\). Hence demonstrating the desired quality that shows \\(S\\) as a function.\u003c/p\u003e\n\u003ch4 id=\"demonstrating-that-s-is-a-linear-map\"\u003eDemonstrating that \\(S\\) is a linear map\u003c/h4\u003e\n\u003cp\u003eThe linearity \\(S\\) actually simply inherits the linearity of \\(T\\), which is defined to be a linear map.\u003c/p\u003e\n\u003cp\u003eAdditivity:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nSv+Su \u0026amp;= (a \\mid Ta =v) + (b \\mid Tb =u) \\\\\n\u0026amp;= (a+b) \\mid Ta+Tb = (v+u) \\\\\n\u0026amp;= (a+b) \\mid T(a+b) = (v+u) \\\\\n\u0026amp;= x \\mid Tx = (v+u) \\\\\n\u0026amp;= S(v+u)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eHomogenity is shown in a similar fashion. We can therefore conclude that \\(S \\in \\mathcal{L}(range\\ T, V)\\).\u003c/p\u003e\n\u003ch4 id=\"note-on-the-codomain-of-s\"\u003eNote on the codomain of \\(S\\)\u003c/h4\u003e\n\u003cp\u003eNote that we desire \\(S \\in \\mathcal{L}(W,V),\\ i.e.\\ S:W\\to V\\), And yet, as it stands, \\(S: range\\ T \\to W\\). Fortunately, as \\(range\\ T\\) is a subspace of \\(W\\) (as ranges are subspaces of the codomain), we can leverage Axler 3.A-E11 (Sasha\u0026rsquo;s Proof, \u0026ldquo;maps to subspaces can be extended to the whole space\u0026rdquo;) to arbitrary extend \\(S\\) to \\(S:W\\to V\\).\u003c/p\u003e\n\u003cp\u003eIt turns out that where the \u0026ldquo;extended\u0026rdquo; basis vectors gets mapped doesn\u0026rsquo;t matter. We only care about \\(S\\) insofar as its compositional behavior with \\(T\\).\u003c/p\u003e\n\u003ch4 id=\"demonstrating-that-s-has-the-properties-we-desire\"\u003eDemonstrating that \\(S\\) has the properties we desire\u003c/h4\u003e\n\u003cp\u003eWe desire that \\(ST = I \\in \\mathcal{L}(V,V)\\).\u003c/p\u003e\n\u003cp\u003eRecall that the \u0026ldquo;identity map\u0026rdquo; on \\(V\\) is a map \\(I \\in \\mathcal{L}(V,V)\\) such that \\(Iv = v, \\forall v \\in V\\). We now show that \\(ST\\) acts like the identity map.\u003c/p\u003e\n\u003cp\u003eWLOG take \\(v \\in V\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eLet \\(Tv=a\\).\u003c/li\u003e\n\u003cli\u003eLet \\(Sa = u\\). Based on the definition of \\(S\\) (that \\(Sx = y \\mid Ty=x\\), \u0026ldquo;\\(S\\) is the inverse map\u0026rdquo;), we have that \\(Tu=a\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eRecall once again that a linear map called injective when \\(Tv=Tu \\implies v=u\\).\u003c/p\u003e\n\u003cp\u003eWe now have that \\(Tu=a=Tv\\), therefore, because \\(T\\) is given injective, \\(u=v\\).\u003c/p\u003e\n\u003cp\u003eWe have show WLOG that \\((ST)v = S(Tv) =Sa = u=v\\). Therefore \\((ST)v=v\\), making \\(ST\\) an identity map \\(ST:V\\to V\\). Lastly, as the product of linear maps are themselves a linear map, \\(ST=I\\in \\mathcal{L}(V,V)\\)\u003c/p\u003e\n\u003ch4 id=\"conclusion\"\u003eConclusion\u003c/h4\u003e\n\u003cp\u003eHaving constructed the existence of \\(S\\) based on the required properties of \\(T\\), we show that given an injective \\(T \\in \\mathcal{L}(V,W)\\), have an \\(S \\in \\mathcal{L}(W,V)\\) such that \\(ST = I \\in \\mathcal{L}(V,V)\\), as desired.\u003c/p\u003e\n\u003ch3 id=\"given-s\"\u003eGiven \\(S\\)\u003c/h3\u003e\n\u003cp\u003eGiven some \\(T \\in \\mathcal{L}(V,W)\\) and that \\(\\exists S \\in \\mathcal{L}(W,V): ST=I \\in \\mathcal{L}(V,V)\\), we desire that \\(T\\) is injective. Fortunately, we essentially just reverse the logic of the last section in the last part of the proof.\u003c/p\u003e\n\u003cp\u003eRecall that a linear map called injective when \\(Tv=Tu \\implies v=u\\). Suppose for the sake of contradiction that \\(\\exists u,v: Tv=Tu\\) but \\(u\\neq v\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eLet \\(Tv=Tu=a\\)\u003c/li\u003e\n\u003cli\u003eLet \\(Sa=b\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTherefore: \\((ST)v=(ST)u=S(a)=b\\). Just to reiterate, this means that we have:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\((ST)v=b\\implies Iv=b\\)\u003c/li\u003e\n\u003cli\u003e\\((ST)u=b \\implies Iu=b\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTherefore, we have that \\(Iv=Iu\\) for distinct \\(v,u\\), which is absurd. Having reached contradiction, we have that \\(Tu=Tv\\implies u=v\\), reaching the definition of injectivity for \\(T\\). \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_3_b_problem_20/","tags":null,"title":"NUS-MATH530 3.B Problem 20"},{"categories":null,"contents":"Chapter 4 discussion with Lachlan 4.2 False.\nThe union between \\(\\{0\\} \\cup \\{p \\in \\mathcal{P}(\\mathbb{F}): deg\\ p = m\\}\\) is not closed under addition. You can add two \\(m\\) degree polynomials and get something that\u0026rsquo;s not \\(m\\) degrees:\n\\begin{equation} (z^{m} + 1) - z^{m} = 1 \\end{equation}\n4.3 False.\nThe union between \\(\\{0\\} \\cup \\{p \\in \\mathcal{P}(\\mathbb{F}): deg\\ p\\ even\\}\\) is not closed also under addition, for the same reason:\n\\begin{equation} (z^{m} + z^{m-1} + 1) - (z^{m} + 1) = z^{m-1} \\end{equation}\nOne Chapter 5 Exercise 5.A.5 Suppose \\(T \\in \\mathcal{L}(V)\\), prove that the intersection of every collection of \\(V\\) that is invariant under \\(T\\) is invariant under \\(T\\)\nLet \\(U_1 \\dots U_{n}\\) be invariant subspaces under \\(T\\).\nThat is:\n\\begin{equation} T u_{j} \\in U_{j} \\end{equation}\nWe desire that:\n\\begin{align} Tu \\in \\bigcap U_{j}\\ |\\ u \\in \\bigcap U_{j} \\end{align}\nWLOG, treat \\(u \\in \\bigcap U_{j}\\) as \\(u \\in U_{j}\\). Now, \\(Tu \\in U_{j}\\). This holds \\(\\forall U_{j}\\). Therefore, \\(Tu \\in \\forall U_{j}\\). So \\(Tu \\in \\bigcap U_{j}\\).\nHence, the intersection of invariant subspaces are invariant as well.\n","html":"\u003ch2 id=\"chapter-4-discussion-with-lachlan\"\u003eChapter 4 discussion with Lachlan\u003c/h2\u003e\n\u003ch3 id=\"4-dot-2\"\u003e4.2\u003c/h3\u003e\n\u003cp\u003eFalse.\u003c/p\u003e\n\u003cp\u003eThe union between \\(\\{0\\} \\cup \\{p \\in \\mathcal{P}(\\mathbb{F}): deg\\ p = m\\}\\) is not closed under addition. You can add two \\(m\\) degree polynomials and get something that\u0026rsquo;s not \\(m\\) degrees:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(z^{m} + 1) - z^{m} = 1\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"4-dot-3\"\u003e4.3\u003c/h3\u003e\n\u003cp\u003eFalse.\u003c/p\u003e\n\u003cp\u003eThe union between \\(\\{0\\} \\cup \\{p \\in \\mathcal{P}(\\mathbb{F}): deg\\ p\\ even\\}\\) is not closed also under addition, for the same reason:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(z^{m} + z^{m-1} + 1) - (z^{m} + 1) = z^{m-1}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"one-chapter-5-exercise\"\u003eOne Chapter 5 Exercise\u003c/h2\u003e\n\u003ch3 id=\"5-dot-a-dot-5\"\u003e5.A.5\u003c/h3\u003e\n\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V)\\), prove that the intersection of every collection of \\(V\\) that is invariant under \\(T\\) is invariant under \\(T\\)\u003c/p\u003e\n\u003cp\u003eLet \\(U_1 \\dots U_{n}\\) be invariant subspaces under \\(T\\).\u003c/p\u003e\n\u003cp\u003eThat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT u_{j} \\in U_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe desire that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nTu \\in \\bigcap U_{j}\\ |\\ u \\in \\bigcap U_{j}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eWLOG, treat \\(u \\in \\bigcap U_{j}\\) as \\(u \\in U_{j}\\). Now, \\(Tu \\in U_{j}\\). This holds \\(\\forall U_{j}\\). Therefore, \\(Tu \\in \\forall U_{j}\\). So \\(Tu \\in \\bigcap U_{j}\\).\u003c/p\u003e\n\u003cp\u003eHence, the intersection of invariant subspaces are invariant as well.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_5_a_and_discussion/","tags":null,"title":"NUS-MATH530 5.A and Discussion"},{"categories":null,"contents":"Suppose \\(V = U \\oplus W\\), where \\(U\\) and \\(W\\) are nonzero subspaces of \\(V\\). Define \\(P \\in \\mathcal{L}(V)\\) by \\(P(u+w) = u\\) for \\(u \\in U\\), \\(w \\in W\\). Find all eigenvalues and eigenvectors of \\(P\\).\nSolutions:\n\\(\\lambda = 1\\), \\(v = u \\in U\\) \\(\\lambda = 0\\), \\(v = w \\in W\\) For \\(\\lambda\\) to be an eigenvalue of \\(P\\), we have to have:\n\\begin{equation} Pv = \\lambda v \\end{equation}\nMeaning, for WLOG \\(v = u+w\\):\n\\begin{align} \u0026amp;Pv = \\lambda v \\\\ \\Rightarrow\\ \u0026amp; P(u+w) = \\lambda (u+w) \\\\ \\Rightarrow\\ \u0026amp; u = \\lambda u + \\lambda w \\end{align}\nNow, let\u0026rsquo;s rewrite this expression to equal to \\(0\\) to take advantage of the fact that \\(V = U \\oplus W\\).\n\\begin{align} \u0026amp;u = \\lambda u + \\lambda w \\\\ \\Rightarrow\\ \u0026amp; 0 = (\\lambda -1) u + \\lambda w \\end{align}\nNow, recall that a sum of subsets in a direct sum if and only if the only way to write \\(0\\) is for each of the elements of the sums to be \\(0\\). In this case, it means that:\n\\begin{equation} \\begin{cases} (\\lambda -1) u = 0 \\\\ \\lambda w = 0 \\end{cases} \\end{equation}\nWe have two cases here: either \\(w=0\\) or \\(u=0\\).\nAside: why can\u0026rsquo;t \\(u = w = 0\\)? Suppose for the sake of contradiction let\u0026rsquo;s take \\(u=0, w=0\\). Then, \\(Pv = \\lambda v\\), so \\(v=u+w\\), and so \\(v=0\\). This would make \\(v\\) no longer an eigenvector, by definition of eigenvector; this also makes \\(\\lambda\\) no longer an eigenvalue. Hence, one of \\(u\\) or \\(w\\) is not \\(0\\).\n\\(w=0\\) We have that \\(w=0\\). Replacing that in the above expression, we have that:\n\\begin{align} \u0026amp;Pv = \\lambda v \\\\ \\Rightarrow\\ \u0026amp; P(u+w) = \\lambda u + \\lambda w \\\\ \\Rightarrow\\ \u0026amp; u = \\lambda u + 0 \\\\ \\Rightarrow\\ \u0026amp; u = \\lambda u \\end{align}\nFrom this expression, or the top one from before \\((\\lambda -1 ) u = 0\\), we have that \\(\\lambda = 1\\).\nFinally, then, we have:\n\\begin{align} Pv \u0026amp;= \\lambda v \\\\ \u0026amp;= v \\end{align}\nAny valid solution for \\(v\\) is an eigenvector.\nSo:\n\\begin{align} \u0026amp;Pv = v \\\\ \\Rightarrow\\ \u0026amp; P(u+w) = v \\\\ \\Rightarrow\\ \u0026amp; u = v \\end{align}\nHence, all \\(u \\in U\\) is an eigenvector of \\(P\\) with eigenvalue \\(1\\).\n\\(u=0\\) We now have that \\(u=0\\). So, we have that:\n\\begin{align} \u0026amp;Pv = \\lambda v \\\\ \\Rightarrow\\ \u0026amp; P(u+w) = \\lambda u + \\lambda w \\\\ \\Rightarrow\\ \u0026amp; u = \\lambda u + \\lambda w \\\\ \\Rightarrow\\ \u0026amp; 0 = \\lambda w \\end{align}\nFrom this expression, or the bottom one from \\(\\lambda w = 0\\), we have that \\(\\lambda = 0\\).\nFinally, then, we have:\n\\begin{align} Pv \u0026amp;= \\lambda v \\\\ \u0026amp;= 0 \\end{align}\nAny valid solution for \\(v\\) is an eigenvector.\nSo:\n\\begin{align} \u0026amp;Pv = 0 \\\\ \\Rightarrow\\ \u0026amp; P(u+w) = 0 \\\\ \\Rightarrow\\ \u0026amp; u = 0 \\end{align}\nRecall now that \\(v = u+w\\), so \\(v = 0 +w\\), making \\(v = w\\).\nHence, all \\(w \\in W\\) is an eigenvector of \\(P\\) with eigenvalue \\(0\\).\n","html":"\u003cp\u003eSuppose \\(V = U \\oplus W\\), where \\(U\\) and \\(W\\) are nonzero subspaces of \\(V\\). Define \\(P \\in \\mathcal{L}(V)\\) by \\(P(u+w) = u\\) for \\(u \\in U\\), \\(w \\in W\\). Find all eigenvalues and eigenvectors of \\(P\\).\u003c/p\u003e\n\u003cp\u003eSolutions:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\lambda = 1\\), \\(v = u \\in U\\)\u003c/li\u003e\n\u003cli\u003e\\(\\lambda = 0\\), \\(v = w \\in W\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eFor \\(\\lambda\\) to be an eigenvalue of \\(P\\), we have to have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nPv = \\lambda v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning, for WLOG \\(v = u+w\\):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;Pv = \\lambda v \\\\\n\\Rightarrow\\ \u0026amp; P(u+w) = \\lambda (u+w) \\\\\n\\Rightarrow\\ \u0026amp; u = \\lambda u + \\lambda w\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, let\u0026rsquo;s rewrite this expression to equal to \\(0\\) to take advantage of the fact that \\(V = U \\oplus W\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;u = \\lambda u + \\lambda w \\\\\n\\Rightarrow\\ \u0026amp; 0 = (\\lambda -1) u + \\lambda w\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, recall that a sum of subsets in a direct sum if and only if the only way to write \\(0\\) is for each of the elements of the sums to be \\(0\\). In this case, it means that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n(\\lambda -1) u = 0 \\\\\n\\lambda w = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have two cases here: either \\(w=0\\) or \\(u=0\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside: why can\u0026rsquo;t \\(u = w = 0\\)? Suppose for the sake of contradiction let\u0026rsquo;s take \\(u=0, w=0\\). Then, \\(Pv = \\lambda v\\), so \\(v=u+w\\), and so \\(v=0\\). This would make \\(v\\) no longer an eigenvector, by definition of eigenvector; this also makes \\(\\lambda\\) no longer an eigenvalue. Hence, one of \\(u\\) or \\(w\\) is not \\(0\\).\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"w-0\"\u003e\\(w=0\\)\u003c/h2\u003e\n\u003cp\u003eWe have that \\(w=0\\). Replacing that in the above expression, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;Pv = \\lambda v \\\\\n\\Rightarrow\\ \u0026amp; P(u+w) = \\lambda u + \\lambda w \\\\\n\\Rightarrow\\ \u0026amp; u = \\lambda u + 0 \\\\\n\\Rightarrow\\ \u0026amp; u = \\lambda u\n\\end{align}\u003c/p\u003e\n\u003cp\u003eFrom this expression, or the top one from before \\((\\lambda -1 ) u = 0\\), we have that \\(\\lambda = 1\\).\u003c/p\u003e\n\u003cp\u003eFinally, then, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nPv \u0026amp;= \\lambda v \\\\\n\u0026amp;= v\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAny valid solution for \\(v\\) is an eigenvector.\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;Pv = v \\\\\n\\Rightarrow\\ \u0026amp; P(u+w) = v \\\\\n\\Rightarrow\\ \u0026amp; u = v\n\\end{align}\u003c/p\u003e\n\u003cp\u003eHence, all \\(u \\in U\\) is an eigenvector of \\(P\\) with eigenvalue \\(1\\).\u003c/p\u003e\n\u003ch2 id=\"u-0\"\u003e\\(u=0\\)\u003c/h2\u003e\n\u003cp\u003eWe now have that \\(u=0\\). So, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;Pv = \\lambda v \\\\\n\\Rightarrow\\ \u0026amp; P(u+w) = \\lambda u + \\lambda w \\\\\n\\Rightarrow\\ \u0026amp; u = \\lambda u + \\lambda w \\\\\n\\Rightarrow\\ \u0026amp; 0 = \\lambda w\n\\end{align}\u003c/p\u003e\n\u003cp\u003eFrom this expression, or the bottom one from \\(\\lambda w = 0\\), we have that \\(\\lambda = 0\\).\u003c/p\u003e\n\u003cp\u003eFinally, then, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nPv \u0026amp;= \\lambda v \\\\\n\u0026amp;= 0\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAny valid solution for \\(v\\) is an eigenvector.\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;Pv = 0 \\\\\n\\Rightarrow\\ \u0026amp; P(u+w) = 0 \\\\\n\\Rightarrow\\ \u0026amp; u = 0\n\\end{align}\u003c/p\u003e\n\u003cp\u003eRecall now that \\(v = u+w\\), so \\(v = 0 +w\\), making \\(v = w\\).\u003c/p\u003e\n\u003cp\u003eHence, all \\(w \\in W\\) is an eigenvector of \\(P\\) with eigenvalue \\(0\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_5_a_problem_14/","tags":null,"title":"NUS-MATH530 5.A Problem 14"},{"categories":null,"contents":"Warmup: 35\nSuppose \\(V\\) is finite dimensional, \\(T \\in \\mathcal{L}(V)\\) and \\(U\\) is invariant under \\(T\\). Prove each eigenvalue of \\(T / U\\) is an eigenvalue of \\(T\\).\nNow, \\(\\lambda\\) is an eigenvalue of \\(T / U\\). That is:\n\\begin{equation} Tv + U = \\lambda v + U \\end{equation}\nMeaning:\n\\begin{equation} (T-\\lambda I) v \\in U, \\forall v \\in V \\end{equation}\nSuppose for the sake of contradiction \\(\\lambda\\) is not an eigenvalue of \\(T\\). This means no \\(\\lambda\\) such that \\(Tv = \\lambda v\\); specifically, that means also no \\(\\lambda\\) such that \\(T|_{u} u = \\lambda u\\). Now, that means \\(T|_{u} - \\lambda I\\) is invertible given finite dimensional \\(V\\).\nThe previous statement means that \\((T|_{u} - \\lambda I)\\) is subjective across \\(u\\):\n\\begin{equation} \\forall v, \\exists u: (T-\\lambda I)v = (T|_{u}-\\lambda I) u \\end{equation}\nAnd so:\n\\begin{equation} Tv - \\lambda v = Tu - \\lambda u \\end{equation}\nFinally, then:\n\\begin{equation} T(v-u) = \\lambda (v-u) \\end{equation}\nNow, \\(v + U\\) being an eigenvector of \\(T / U\\) requires that \\(v + U \\neq 0\\), which means \\(v \\not \\in U\\). And so, \\(v \\neq u\\) meaning \\(v-u \\neq 0\\). Hence, the above expression demonstrates \\(\\lambda\\) to be an eigenvalue of \\(T\\), reaching contradiction. \\(\\blacksquare\\)\nNow: 36\nRemoving finite-dimensional from the requirements above, demonstrate the result above breaks.\nLet \\(V = \\mathcal{P}(\\mathbb{F})\\) and let \\(T\\) be differentiation. Now, let \\(U\\) be \\(P_{2}(\\mathbb{F})\\). Now:\n\\begin{equation} T / U (v + U) = \\lambda v + U \\end{equation}\nlet \\(v \\in \\mathcal{P}_{3}(\\mathbb{F})\\). Now, then, \\(T / U (v + U) = Tv + U\\), with \\(Tv \\in \\mathcal{P}_{2}(\\mathbb{F})\\). Hence, \\(T / U (v + U) = Tv + U = 0 + U\\). This makes \\(0\\) an eigenvalue and \\(u \\in \\mathcal{P}_{2}(\\mathbb{F})\\) eigenvectors.\nOf course this does not hold for \\(T\\) in general as all \\(Tv \\in \\mathcal{P}_{2}(\\mathbb{F})\\) are not identically \\(0\\).\nHaving shown a counter-example, \\(\\blacksquare\\)\nDo we have finite-dimensions?\n\u0026ldquo;Not invertible\u0026rdquo; =\u0026gt; not injective\u0026mdash;\n\\(T\\) being not injective means that \\(null\\ T\\) has more than just the zero vector.\nHence:\n\\begin{equation} \\exists v: Tv = 0 = 0 v \\end{equation}\nThat would make all nonzero \\(v \\in null\\ T\\) eigenvectors and \\(0\\) an eigenvalue.\n\u0026ldquo;Not invertible\u0026rdquo; =\u0026gt; not surjective\u0026mdash;\n\\(T\\) being not surjective means that \\(range\\ T \\subset V\\) strictly. So then \\(T|_{range\\ T}\\) is an operator so \\(range\\ T\\) is an invariant subspace under \\(T\\).\nEither way, we have that an eigenvalue exist.\n","html":"\u003cp\u003eWarmup: 35\u003c/p\u003e\n\u003cp\u003eSuppose \\(V\\) is finite dimensional, \\(T \\in \\mathcal{L}(V)\\) and \\(U\\) is \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e under \\(T\\). Prove each eigenvalue of \\(T / U\\) is an eigenvalue of \\(T\\).\u003c/p\u003e\n\u003cp\u003eNow, \\(\\lambda\\) is an eigenvalue of \\(T / U\\). That is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv + U = \\lambda v + U\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(T-\\lambda I) v \\in U, \\forall v \\in V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSuppose for the sake of contradiction \\(\\lambda\\) is not an eigenvalue of \\(T\\). This means no \\(\\lambda\\) such that \\(Tv = \\lambda v\\); specifically, that means also no \\(\\lambda\\) such that \\(T|_{u} u = \\lambda u\\). Now, that means \\(T|_{u} - \\lambda I\\) is invertible given finite dimensional \\(V\\).\u003c/p\u003e\n\u003cp\u003eThe previous statement means that \\((T|_{u} - \\lambda I)\\) is subjective across \\(u\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\forall v, \\exists u: (T-\\lambda I)v = (T|_{u}-\\lambda I) u\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nTv - \\lambda v = Tu - \\lambda u\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(v-u) = \\lambda (v-u)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, \\(v + U\\) being an eigenvector of \\(T / U\\) requires that \\(v + U \\neq 0\\), which means \\(v \\not \\in U\\). And so, \\(v \\neq u\\) meaning \\(v-u \\neq 0\\). Hence, the above expression demonstrates \\(\\lambda\\) to be an eigenvalue of \\(T\\), reaching contradiction. \\(\\blacksquare\\)\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eNow: 36\u003c/p\u003e\n\u003cp\u003eRemoving finite-dimensional from the requirements above, demonstrate the result above breaks.\u003c/p\u003e\n\u003cp\u003eLet \\(V = \\mathcal{P}(\\mathbb{F})\\) and let \\(T\\) be differentiation. Now, let \\(U\\) be \\(P_{2}(\\mathbb{F})\\). Now:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT / U (v + U) = \\lambda v + U\n\\end{equation}\u003c/p\u003e\n\u003cp\u003elet \\(v \\in \\mathcal{P}_{3}(\\mathbb{F})\\). Now, then, \\(T / U (v + U) = Tv + U\\), with \\(Tv \\in \\mathcal{P}_{2}(\\mathbb{F})\\). Hence, \\(T / U (v + U) = Tv + U = 0 + U\\). This makes \\(0\\) an eigenvalue and \\(u \\in \\mathcal{P}_{2}(\\mathbb{F})\\) eigenvectors.\u003c/p\u003e\n\u003cp\u003eOf course this does not hold for \\(T\\) in general as all \\(Tv \\in \\mathcal{P}_{2}(\\mathbb{F})\\) are not identically \\(0\\).\u003c/p\u003e\n\u003cp\u003eHaving shown a counter-example, \\(\\blacksquare\\)\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eDo we have finite-dimensions?\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Not invertible\u0026rdquo; =\u0026gt; not injective\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\(T\\) being not injective means that \\(null\\ T\\) has more than just the zero vector.\u003c/p\u003e\n\u003cp\u003eHence:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\exists v: Tv = 0 = 0 v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThat would make all nonzero \\(v \\in null\\ T\\) eigenvectors and \\(0\\) an eigenvalue.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Not invertible\u0026rdquo; =\u0026gt; not surjective\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\(T\\) being not surjective means that \\(range\\ T \\subset V\\) strictly. So then \\(T|_{range\\ T}\\) is an operator so \\(range\\ T\\) is an invariant subspace under \\(T\\).\u003c/p\u003e\n\u003cp\u003eEither way, we have that an eigenvalue exist.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_5_a_problem_35_36/","tags":null,"title":"NUS-MATH530 5.A Problem 35/36"},{"categories":null,"contents":" Suppose \\(T \\in \\mathcal{L}(V)\\) has a diagonal matrix \\(A\\) w.r.t. some basis of \\(V\\), and that \\(\\lambda \\in \\mathbb{F}\\). Prove that \\(\\lambda\\) appears on the diagonal of \\(A\\) precisely \\(\\dim E(\\lambda, T)\\) times.\nAside: \u0026ldquo;to appear on the diagonal \\(n\\) times\u0026rdquo; We want to begin by giving a description for what \u0026ldquo;appearing on the diagonal\u0026rdquo; of a diagonal matrix implies.\nA diagonal matrix is a special-case upper-triangular matrix, so a value being on its diagonal implies it to be an eigenvalue.\nFurthermore, let \\(v_1, \u0026hellip; v_{n}\\) be the eigenvector-basis which gives the diagonal matrix aforementioned for \\(A\\). By calculation (i.e. properties of multiplying some all-but-one-zero \u0026ldquo;one hot\u0026rdquo; vector to the diagonal representation of \\(A\\)), if \\(\\lambda\\) appears \\(j\\) times on the diagonal representation of \\(A\\), \\(j\\) basis vectors of \\(V\\) will belong to the same eigenvector \\(j\\) as they all will produce \\(Tv = \\lambda v\\) when applied to the diagonal representation of \\(A\\)).\nAnd finally, because basis vectors are linearly independent, we have that if a value \\(\\lambda\\) appears on the diagonal of a diagonal matrix of \\(A\\) \\(n\\) times, it implies that \\(A\\) has \\(n\\) linearly independent eigenvectors all belonging to \\(\\lambda\\) which forms the basis for which the diagonal matrix is built.\nProof To complete the proof, we now perform casework.\n\\(\\lambda\\) appears \\(0\\) times Per our discussion above, this implies that there are \\(0\\) (trivially linearly independent) eigenvectors for which \\(\\lambda\\) serves as its eigenvalue. Namely, that means \\(\\lambda\\) is not an eigenvalue of \\(A\\). And therefore, we have that \\(T - \\lambda I\\) is injective, and hence \\(null (T - \\lambda I) = {0}\\). Recall that \\(E(\\lambda, T) = null(T-\\lambda I)\\). We now have \\(\\dim\\ E(\\lambda, T) = 0\\), as desired.\n\\(\\lambda\\) appears \\(n\\) times Again from above, we have \\(n\\) linearly-independent eigenvectors belonging to the same eigenvalue \\(\\lambda\\) which forms the basis out of which the diagonal matrix is built. Therefore, one can take at least \\(n\\) linearly independent vectors from \\(E(\\lambda, T)\\) as \\(null(T- \\lambda I)\\) is the space of all eigenvectors belonging to \\(\\lambda\\) and the zero vector. This makes \\(\\dim E(\\lambda, T)\\) at least \\(n\\).\nTo show that \\(\\dim E(\\lambda, T)\\) to be exactly \\(n\\), let\u0026rsquo;s suppose the contrary. Let \\(v\\) be another eigenvector belonging to \\(\\lambda\\) linearly independent to the previous \\(n\\) already discussed. \\(v\\) would be linearly independent to all other members of the eigenvector-basis of \\(V\\): as eigenvectors from distinct eigenvalues are linearly independent and we hypothesized that \\(v\\) is linearly independent to the other eigenvectors belonging to \\(\\lambda\\).\nYet, this is not possible: \\(v \\in V\\) cannot create a linearly independent list conjoined to a basis of \\(V\\). Reaching contraction, we see that \\(\\dim E(\\lambda, T) = n\\) as desired. \\(\\blacksquare\\)\n","html":"\u003chr\u003e\n\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V)\\) has a diagonal matrix \\(A\\) w.r.t. some basis of \\(V\\), and that \\(\\lambda \\in \\mathbb{F}\\). Prove that \\(\\lambda\\) appears on the diagonal of \\(A\\) precisely \\(\\dim E(\\lambda, T)\\) times.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"aside-to-appear-on-the-diagonal-n-times\"\u003eAside: \u0026ldquo;to appear on the diagonal \\(n\\) times\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003eWe want to begin by giving a description for what \u0026ldquo;appearing on the diagonal\u0026rdquo; of a diagonal matrix implies.\u003c/p\u003e\n\u003cp\u003eA diagonal matrix is a special-case upper-triangular matrix, so a value being on its diagonal implies it to be an eigenvalue.\u003c/p\u003e\n\u003cp\u003eFurthermore, let \\(v_1, \u0026hellip; v_{n}\\) be the eigenvector-basis which gives the diagonal matrix aforementioned for \\(A\\). By calculation (i.e. properties of multiplying some all-but-one-zero \u0026ldquo;one hot\u0026rdquo; vector to the diagonal representation of \\(A\\)), if \\(\\lambda\\) appears \\(j\\) times on the diagonal representation of \\(A\\), \\(j\\) basis vectors of \\(V\\) will belong to the same eigenvector \\(j\\) as they all will produce \\(Tv = \\lambda v\\) when applied to the diagonal representation of \\(A\\)).\u003c/p\u003e\n\u003cp\u003eAnd finally, because basis vectors are linearly independent, we have that if a value \\(\\lambda\\) appears on the diagonal of a diagonal matrix of \\(A\\) \\(n\\) times, it implies that \\(A\\) has \\(n\\) linearly independent eigenvectors all belonging to \\(\\lambda\\) which forms the basis for which the diagonal matrix is built.\u003c/p\u003e\n\u003ch2 id=\"proof\"\u003eProof\u003c/h2\u003e\n\u003cp\u003eTo complete the proof, we now perform casework.\u003c/p\u003e\n\u003ch3 id=\"lambda-appears-0-times\"\u003e\\(\\lambda\\) appears \\(0\\) times\u003c/h3\u003e\n\u003cp\u003ePer our discussion above, this implies that there are \\(0\\) (trivially linearly independent) eigenvectors for which \\(\\lambda\\) serves as its eigenvalue. Namely, that means \\(\\lambda\\) is not an eigenvalue of \\(A\\). And therefore, we have that \\(T - \\lambda I\\) is injective, and hence \\(null (T - \\lambda I) = {0}\\). Recall that \\(E(\\lambda, T) = null(T-\\lambda I)\\). We now have \\(\\dim\\ E(\\lambda, T) = 0\\), as desired.\u003c/p\u003e\n\u003ch3 id=\"lambda-appears-n-times\"\u003e\\(\\lambda\\) appears \\(n\\) times\u003c/h3\u003e\n\u003cp\u003eAgain from above, we have \\(n\\) linearly-independent eigenvectors belonging to the same eigenvalue \\(\\lambda\\) which forms the basis out of which the diagonal matrix is built. Therefore, one can take at least \\(n\\) linearly independent vectors from \\(E(\\lambda, T)\\) as \\(null(T- \\lambda I)\\) is the space of all eigenvectors belonging to \\(\\lambda\\) and the zero vector. This makes \\(\\dim E(\\lambda, T)\\) at least \\(n\\).\u003c/p\u003e\n\u003cp\u003eTo show that \\(\\dim E(\\lambda, T)\\) to be exactly \\(n\\), let\u0026rsquo;s suppose the contrary. Let \\(v\\) be another eigenvector belonging to \\(\\lambda\\) linearly independent to the previous \\(n\\) already discussed. \\(v\\) would be linearly independent to all other members of the eigenvector-basis of \\(V\\): as eigenvectors from distinct eigenvalues are linearly independent and we hypothesized that \\(v\\) is linearly independent to the other eigenvectors belonging to \\(\\lambda\\).\u003c/p\u003e\n\u003cp\u003eYet, this is not possible: \\(v \\in V\\) cannot create a linearly independent list conjoined to a basis of \\(V\\). Reaching contraction, we see that \\(\\dim E(\\lambda, T) = n\\) as desired. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_5_c_problem_7/","tags":null,"title":"NUS-MATH530 5.C Problem 7"},{"categories":null,"contents":"Standard Bases Back and Fourth To map the vectors from \\(B_2\\) back to the standard bases, we simply have to construct the map:\n\\begin{equation} \\mqty(2 \u0026amp; 1 \u0026amp; 2 \\\\ 1\u0026amp; 1\u0026amp; -1 \\\\ 1 \u0026amp; -1 \u0026amp; 0) \\end{equation}\nEach of the \u0026ldquo;standard\u0026rdquo; vectors in the new basis, when applied to this matrix, gets moved back to their original representation.\nPresumably, then, moving \u0026ldquo;forward\u0026rdquo; into the new space is simply taking the inverse of this vector, which we will do separately; its inverse is:\n\\begin{equation} \\mqty(\\frac{1}{7} \u0026amp; \\frac{2}{7} \u0026amp; \\frac{3}{7} \\\\ \\frac{1}{7} \u0026amp; \\frac{2}{7} \u0026amp; -\\frac{4}{7} \\\\ \\frac{2}{7} \u0026amp; -\\frac{3}{7} \u0026amp; -\\frac{1}{7}) \\end{equation}\nNow. The former matrix will map vectors in terms of \\(B_2\\) into \\(B_1\\), and the latter \\(B_1\\) into \\(B_2\\).\nMapping Back and Forth We can apply the latter matrix to the vector to change its basis:\n\\begin{equation} \\mqty(1 \\\\ 1 \\\\ 0) \\end{equation}\nThis means that, as a linear combination of \\(B_2\\), we have:\n\\begin{equation} 1 \\mqty(2 \\\\ 1 \\\\ 1) + 1 \\mqty(1 \\\\ 1 \\\\ -1) + 0 \\mqty(2 \\\\ -1 \\\\0) \\end{equation}\nAnd the vector \\(\\mqty(1\\\\1\\\\0)\\) aforementioned is the representation of the vector desired in terms of basis \\(B_2\\). The desired matrix mapping in the second matrix above.\nGenerally For mapping from a new basis to the standard basis, simply arrange the vectors that form the basis as columns of a matrix. To map from the standard basis towards the new ones, invert that map. If mappings between two basis are needed, and they are both expressed in terms of the standard basis, compose the maps.\n","html":"\u003ch2 id=\"standard-bases-back-and-fourth\"\u003eStandard Bases Back and Fourth\u003c/h2\u003e\n\u003cp\u003eTo map the vectors from \\(B_2\\) back to the standard bases, we simply have to construct the map:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(2 \u0026amp; 1 \u0026amp; 2 \\\\ 1\u0026amp; 1\u0026amp; -1 \\\\ 1 \u0026amp; -1 \u0026amp; 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eEach of the \u0026ldquo;standard\u0026rdquo; vectors in the new basis, when applied to this matrix, gets moved back to their original representation.\u003c/p\u003e\n\u003cp\u003ePresumably, then, moving \u0026ldquo;forward\u0026rdquo; into the new space is simply taking the inverse of this vector, which we will do separately; its inverse is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(\\frac{1}{7} \u0026amp; \\frac{2}{7} \u0026amp; \\frac{3}{7} \\\\ \\frac{1}{7} \u0026amp; \\frac{2}{7} \u0026amp; -\\frac{4}{7} \\\\ \\frac{2}{7} \u0026amp; -\\frac{3}{7} \u0026amp; -\\frac{1}{7})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow. The former matrix will map vectors in terms of \\(B_2\\) into \\(B_1\\), and the latter \\(B_1\\) into \\(B_2\\).\u003c/p\u003e\n\u003ch2 id=\"mapping-back-and-forth\"\u003eMapping Back and Forth\u003c/h2\u003e\n\u003cp\u003eWe can apply the latter matrix to the vector to change its basis:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(1 \\\\ 1 \\\\ 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis means that, as a linear combination of \\(B_2\\), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n1 \\mqty(2 \\\\ 1 \\\\ 1) + 1 \\mqty(1 \\\\ 1 \\\\ -1) + 0 \\mqty(2 \\\\ -1 \\\\0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd the vector \\(\\mqty(1\\\\1\\\\0)\\) aforementioned is the representation of the vector desired in terms of basis \\(B_2\\). The desired matrix mapping in the second matrix above.\u003c/p\u003e\n\u003ch2 id=\"generally\"\u003eGenerally\u003c/h2\u003e\n\u003cp\u003eFor mapping from a new basis to the standard basis, simply arrange the vectors that form the basis as columns of a matrix. To map from the standard basis towards the new ones, invert that map. If mappings between two basis are needed, and they are both expressed in terms of the standard basis, compose the maps.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_changing_bases/","tags":null,"title":"NUS-MATH530 Changing Bases"},{"categories":null,"contents":"Dot product Calculations Let\u0026rsquo;s calculate some dot products!\n\\begin{equation} \\begin{pmatrix} 1 \\\\ 0 \\end{pmatrix} \\cdot \\begin{pmatrix} 0 \\\\ 1 \\end{pmatrix} = 0 \\end{equation}\n\\begin{equation} \\begin{pmatrix} 1 \\\\2 \\end{pmatrix} \\cdot \\begin{pmatrix} 2 \\\\1 \\end{pmatrix} = 4 \\end{equation}\n\\begin{equation} \\begin{pmatrix} 1 \\\\ 1 \\end{pmatrix} \\cdot \\begin{pmatrix} -1 \\\\1 \\end{pmatrix} = 0 \\end{equation}\n\\begin{equation} \\begin{pmatrix} 1 \\\\1 \\end{pmatrix} \\cdot \\begin{pmatrix} 2 \\\\ 2 \\end{pmatrix} = 4 \\end{equation}\nInterpretation Geometrically, the intepretation of the dot product is the magnitude that comes from scaling the bottom projected value by the top value. This is essentially multiplying the proportion of one vector that\u0026rsquo;s parallel to the other by each other.\nCross Product Calculations Cross products!\n\\begin{equation} \\begin{pmatrix} 1 \\\\ 0 \\\\ 1 \\end{pmatrix} \\times \\begin{pmatrix} -1 \\\\ 0 \\\\ 1 \\end{pmatrix} = \\begin{pmatrix} 1 \\\\ -1 \\\\0 \\end{pmatrix} \\end{equation}\n\\begin{equation} \\begin{pmatrix} 1 \\\\ 1 \\\\ -1 \\end{pmatrix} \\times \\begin{pmatrix} 0 \\\\ 0 \\\\ 2 \\end{pmatrix} = \\begin{pmatrix} 2 \\\\ -2 \\\\0 \\end{pmatrix} \\end{equation}\nThe dot product is the point that is perpendicular to the other two input vectors.\nWhy its not a field We want to check why the multiplication of vectors in \\(\\mathbb{F}^{3}\\) via taking the cross product cannot form a field.\nWe can safely assume that the addition operation of the vectors derive their closure, commutativity, associativity from these properties in \\(\\mathbb{F}\\).\nTherefore, we will verify these properties with multiplication. The only closed multiplication-like operation of vectors is the cross-product. Let\u0026rsquo;s first define the cross-product.\nGiven two vectors in \\(\\mathbb{F}^{3}\\):\n\\begin{equation} \\begin{pmatrix} a \\\\b \\\\ c \\end{pmatrix}, \\begin{pmatrix} d \\\\ e\\\\f \\end{pmatrix} \\end{equation}\nTheir cross product is the vector in \\(\\mathbb{F}^{3}\\) defined by:\n\\begin{equation} \\begin{vmatrix} \\hat{i} \u0026amp; \\hat{j} \u0026amp; \\hat{k} \\\\ a \u0026amp; b \u0026amp; c \\\\ d \u0026amp; e \u0026amp; f \\end{vmatrix} \\end{equation}\nTaking the actual determinant, we have that:\n\\begin{equation} \\begin{vmatrix} \\hat{i} \u0026amp; \\hat{j} \u0026amp; \\hat{k} \\\\ a \u0026amp; b \u0026amp; c \\\\ d \u0026amp; e \u0026amp; f \\end{vmatrix} = \\begin{pmatrix} bf-ce \\\\ dc-af \\\\ ac-db \\end{pmatrix} \\end{equation}\nIdentity Let\u0026rsquo;s first figure the identity of this operation. We wish to figure some \\((a,b,c)\\) such that the result of the cross product would be \\((d,e,f)\\).\nGeometrically, the perpendicularity of a vector is the resulting value of the cross product; however, no vector (apart from \\(\\vec{0}\\)) can be perfectly perpendicular to itself exactly. This would indicate that no such identities exist.\nWe can also observe that there is no \\(f\\) term on the bottom of the cross product. This would indicate that no combination of \\((a,b,c)\\) can construct the needed \\(f\\) on the last entry.\nFinally, for a more formal proof.\nProof: there can not exist a field-like identity for a cross product.\nFor the sake of contradiction let\u0026rsquo;s say for some nonzero vector \\(\\vec{v} \\in \\mathbb{F}^{3}\\), there exists some identity named \\(\\vec{e} \\in \\mathbb{F}^{3}\\) that follows the properties of identities in a field.\nWe first have that:\n\\begin{equation} \\vec{e} \\times \\vec{v} = \\vec{v} \\end{equation}\nby the definition of the identity.\nAnd also that:\n\\begin{equation} \\vec{v} \\times \\vec{e}= \\vec{v} \\end{equation}\nby the fact that field-like operations commutes.\nWe have also the property of cross products that:\n\\begin{align} \u0026amp;\\vec{a} \\times \\vec{b} = -(\\vec{b} \\times \\vec{a}) \\\\ \\Rightarrow\\ \u0026amp; \\vec{a} \\times \\vec{b} + \\vec{b} \\times \\vec{a} = 0 \\end{align}\nBy applying the inverse of \\(-(\\vec{b}\\times \\vec{a})\\) to both sides, as cross products are closed and therefore an additive inverse exists.\nTherefore, we have that:\n\\begin{equation} \\vec{v} + \\vec{v} = 0 \\end{equation}\nWe see then \\(\\vec{v}\\) is its own additive inverse. Therefore \\(\\vec{v}\\) itself is also \\(0\\). But we established that \\(\\vec{v}\\) can be non-zero. Reaching contradiction, \\(\\blacksquare\\). (this is iffy)\nCommutativity Because of the fact that two-by-two matricies exists on the diagonals, the cross product is also not commutative. In fact,\nDeterminants The geometric interpretation of the determinants is the change in area inside a vector which it stretches a given vector.\n","html":"\u003ch2 id=\"dot-product\"\u003eDot product\u003c/h2\u003e\n\u003ch3 id=\"calculations\"\u003eCalculations\u003c/h3\u003e\n\u003cp\u003eLet\u0026rsquo;s calculate some dot products!\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \\\\ 0\n\\end{pmatrix} \\cdot \\begin{pmatrix}\n0 \\\\ 1\n\\end{pmatrix} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \\\\2\n\\end{pmatrix} \\cdot \\begin{pmatrix}\n2 \\\\1\n\\end{pmatrix} = 4\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \\\\ 1\n\\end{pmatrix} \\cdot \\begin{pmatrix}\n-1 \\\\1\n\\end{pmatrix} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \\\\1\n\\end{pmatrix} \\cdot \\begin{pmatrix}\n2 \\\\ 2\n\\end{pmatrix} = 4\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"interpretation\"\u003eInterpretation\u003c/h3\u003e\n\u003cp\u003eGeometrically, the intepretation of the dot product is the magnitude that comes from scaling the bottom projected value by the top value. This is essentially multiplying the proportion of one vector that\u0026rsquo;s parallel to the other by each other.\u003c/p\u003e\n\u003ch2 id=\"cross-product\"\u003eCross Product\u003c/h2\u003e\n\u003ch3 id=\"calculations\"\u003eCalculations\u003c/h3\u003e\n\u003cp\u003eCross products!\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \\\\ 0 \\\\ 1\n\\end{pmatrix} \\times \\begin{pmatrix}\n-1 \\\\ 0 \\\\ 1\n\\end{pmatrix} = \\begin{pmatrix}\n1 \\\\ -1 \\\\0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \\\\ 1 \\\\ -1\n\\end{pmatrix} \\times \\begin{pmatrix}\n0 \\\\ 0 \\\\ 2\n\\end{pmatrix} = \\begin{pmatrix}\n2 \\\\ -2 \\\\0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-06_22-09-06_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe dot product is the point that is perpendicular to the other two input vectors.\u003c/p\u003e\n\u003ch3 id=\"why-its-not-a-field\"\u003eWhy its not a field\u003c/h3\u003e\n\u003cp\u003eWe want to check why the multiplication of vectors in \\(\\mathbb{F}^{3}\\) via taking the cross product cannot form a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eWe can safely assume that the addition operation of the vectors derive their closure, \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e, \u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e from these properties in \\(\\mathbb{F}\\).\u003c/p\u003e\n\u003cp\u003eTherefore, we will verify these properties with \u003ca href=\"/posts/kbhmultiplying/\"\u003emultiplication\u003c/a\u003e. The only closed multiplication-like operation of vectors is the cross-product. Let\u0026rsquo;s first define the cross-product.\u003c/p\u003e\n\u003cp\u003eGiven two vectors in \\(\\mathbb{F}^{3}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\na \\\\b \\\\ c\n\\end{pmatrix}, \\begin{pmatrix}\nd \\\\ e\\\\f\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTheir cross product is the vector in \\(\\mathbb{F}^{3}\\) defined by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{vmatrix}\n\\hat{i} \u0026amp; \\hat{j} \u0026amp; \\hat{k} \\\\\na \u0026amp; b \u0026amp; c \\\\\nd \u0026amp; e \u0026amp; f\n\\end{vmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaking the actual determinant, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{vmatrix}\n\\hat{i} \u0026amp; \\hat{j} \u0026amp; \\hat{k} \\\\\na \u0026amp; b \u0026amp; c \\\\\nd \u0026amp; e \u0026amp; f\n\\end{vmatrix} = \\begin{pmatrix}\nbf-ce \\\\\ndc-af \\\\\nac-db\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"identity\"\u003eIdentity\u003c/h3\u003e\n\u003cp\u003eLet\u0026rsquo;s first figure the \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e of this operation. We wish to figure some \\((a,b,c)\\) such that the result of the cross product would be \\((d,e,f)\\).\u003c/p\u003e\n\u003cp\u003eGeometrically, the perpendicularity of a vector is the resulting value of the cross product; however, no vector (apart from \\(\\vec{0}\\)) can be perfectly perpendicular to itself exactly. This would indicate that no such identities exist.\u003c/p\u003e\n\u003cp\u003eWe can also observe that there is no \\(f\\) term on the bottom of the cross product. This would indicate that no combination of \\((a,b,c)\\) can construct the needed \\(f\\) on the last entry.\u003c/p\u003e\n\u003cp\u003eFinally, for a more formal proof.\u003c/p\u003e\n\u003cp\u003eProof: there can not exist a field-like \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e for a cross product.\u003c/p\u003e\n\u003cp\u003eFor the sake of contradiction let\u0026rsquo;s say for some nonzero vector \\(\\vec{v} \\in \\mathbb{F}^{3}\\), there exists some identity named \\(\\vec{e} \\in \\mathbb{F}^{3}\\) that follows the properties of identities in a field.\u003c/p\u003e\n\u003cp\u003eWe first have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{e} \\times \\vec{v} = \\vec{v}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eby the definition of the identity.\u003c/p\u003e\n\u003cp\u003eAnd also that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{v} \\times \\vec{e}= \\vec{v}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eby the fact that field-like operations commutes.\u003c/p\u003e\n\u003cp\u003eWe have also the property of cross products that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\vec{a} \\times \\vec{b} = -(\\vec{b} \\times \\vec{a}) \\\\\n\\Rightarrow\\ \u0026amp; \\vec{a} \\times \\vec{b} + \\vec{b} \\times \\vec{a} = 0\n\\end{align}\u003c/p\u003e\n\u003cp\u003eBy applying the inverse of \\(-(\\vec{b}\\times \\vec{a})\\) to both sides, as cross products are closed and therefore an additive inverse exists.\u003c/p\u003e\n\u003cp\u003eTherefore, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{v} + \\vec{v} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe see then \\(\\vec{v}\\) is its own \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e. Therefore \\(\\vec{v}\\) itself is also \\(0\\). But we established that \\(\\vec{v}\\) can be non-zero. Reaching contradiction, \\(\\blacksquare\\). (this is iffy)\u003c/p\u003e\n\u003ch3 id=\"commutativity\"\u003eCommutativity\u003c/h3\u003e\n\u003cp\u003eBecause of the fact that two-by-two \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e exists on the diagonals, the cross product is also not commutative. In fact,\u003c/p\u003e\n\u003ch2 id=\"determinants\"\u003eDeterminants\u003c/h2\u003e\n\u003cp\u003eThe geometric interpretation of the \u003ca href=\"/posts/kbhmatricies/#determinants\"\u003edeterminants\u003c/a\u003e is the change in area inside a vector which it stretches a given vector.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_geometric_intepretations/","tags":null,"title":"NUS-MATH530 Geometric Intepretations"},{"categories":null,"contents":"Let \\(\\lambda_{m}\\) be an eigenvalue for \\(T\\) an operator on complex finite-dimensional \\(V\\). Let \\(m\\) be the geometric multiplicity of \\(\\lambda_{m}\\). We desire that the algebraic multiplicity is at least \\(m\\). Let \\(\\dim v = n\\).\nWe have that \\(m\\) is the geometric multiplicity of \\(\\lambda_{m}\\), meaning:\n\\begin{equation} \\dim E(\\lambda_{m}, T) = m \\end{equation}\nThis means we can take \\(m\\) linearly independent eigenvectors from \\(V\\). Extend this list now to a basis of \\(V\\) with \\(v_1, \u0026hellip;v_{m}, u_{1}, u_{n-m}\\).\nConstruct a matrix via this basis. By construction, the first \\(m \\times m\\) of this matrix would appear diagonal (as each \\(Tv = \\lambda v\\)). Furthermore, the diagonal of this sub-matrix would simply contain \\(\\lambda\\) repeated \\(m\\) times.\nTake now \\(A = \\mathcal{M}(T)-\\lambda I\\).\nTake the determinant of this matrix \\(A\\) now against the first column, yielding a characteristic polynomial with at least \\(m\\) factors with \\(\\lambda\\). Hence, the algebraic multiplicity of \\(\\lambda_{m}\\) is at least \\(m\\), as desired. \\(\\blacksquare\\)\n","html":"\u003cp\u003eLet \\(\\lambda_{m}\\) be an eigenvalue for \\(T\\) an operator on complex finite-dimensional \\(V\\). Let \\(m\\) be the geometric multiplicity of \\(\\lambda_{m}\\). We desire that the algebraic multiplicity is at least \\(m\\). Let \\(\\dim v = n\\).\u003c/p\u003e\n\u003cp\u003eWe have that \\(m\\) is the geometric multiplicity of \\(\\lambda_{m}\\), meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim E(\\lambda_{m}, T) = m\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis means we can take \\(m\\) linearly independent eigenvectors from \\(V\\). Extend this list now to a basis of \\(V\\) with \\(v_1, \u0026hellip;v_{m}, u_{1}, u_{n-m}\\).\u003c/p\u003e\n\u003cp\u003eConstruct a matrix via this basis. By construction, the first \\(m \\times m\\) of this matrix would appear diagonal (as each \\(Tv = \\lambda v\\)). Furthermore, the diagonal of this sub-matrix would simply contain \\(\\lambda\\) repeated \\(m\\) times.\u003c/p\u003e\n\u003cp\u003eTake now \\(A = \\mathcal{M}(T)-\\lambda I\\).\u003c/p\u003e\n\u003cp\u003eTake the determinant of this matrix \\(A\\) now against the first column, yielding a characteristic polynomial with at least \\(m\\) factors with \\(\\lambda\\). Hence, the algebraic multiplicity of \\(\\lambda_{m}\\) is at least \\(m\\), as desired. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_geometric_multiplicity/","tags":null,"title":"NUS-MATH530 Geometric Multiplicity"},{"categories":null,"contents":" Date Link \u0026lt;2022-09-09 Fri\u0026gt; NUS-MATH530 Solving Systems \u0026lt;2020-09-09 Wed\u0026gt; NUS-MATH530 Geometric Intepretations \u0026lt;2022-09-13 Tue\u0026gt; NUS-MATH530 Linear Vehicles \u0026lt;2022-09-15 Thu\u0026gt; NUS-MATH530 Plane and 1.B \u0026lt;2022-09-27 Tue\u0026gt; NUS-MATH530 1.C Problem 23 \u0026lt;2022-10-29 Sat\u0026gt; NUS-MATH530 2.C Problem 17 \u0026lt;2022-11-16 Wed\u0026gt; NUS-MATH530 3.B Problem 20 \u0026lt;2023-01-23 Mon\u0026gt; NUS-MATH530 3.E Problem 1 \u0026lt;2023-02-14 Tue\u0026gt; NUS-MATH530 5.A and Discussion \u0026lt;2023-02-20 Mon\u0026gt; NUS-MATH530 5.A Problem 14 \u0026lt;2023-03-16 Thu\u0026gt; NUS-MATH530 Changing Bases \u0026lt;2023-03-28 Tue\u0026gt; NUS-MATH530 5.C Problem 7 \u0026lt;2023-04-07 Fri\u0026gt; NUS-MATH530 Geometric Multiplicity \u0026lt;2023-04-12 Wed\u0026gt; NUS-MATH530 Some 6.A Problems \u0026lt;2023-04-14 Fri\u0026gt; NUS-MATH530 Similar to Diagonal \u0026lt;2023-05-04 Thu\u0026gt; NUS-MATH530 Matrix Adjectives ","html":"\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eLink\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-09-09 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_solving_systems/\"\u003eNUS-MATH530 Solving Systems\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2020-09-09 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_geometric_intepretations/\"\u003eNUS-MATH530 Geometric Intepretations\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-09-13 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_linear_vehicles/\"\u003eNUS-MATH530 Linear Vehicles\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-09-15 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_plane_and_1_b/\"\u003eNUS-MATH530 Plane and 1.B\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-09-27 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_1_c_proof_preso/\"\u003eNUS-MATH530 1.C Problem 23\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-10-29 Sat\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_2_c_problem_17/\"\u003eNUS-MATH530 2.C Problem 17\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2022-11-16 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_3_b_problem_20/\"\u003eNUS-MATH530 3.B Problem 20\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-01-23 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_3_e_problem_1/\"\u003eNUS-MATH530 3.E Problem 1\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-02-14 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_5_a_and_discussion/\"\u003eNUS-MATH530 5.A and Discussion\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-02-20 Mon\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_5_a_problem_14/\"\u003eNUS-MATH530 5.A Problem 14\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-03-16 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_changing_bases/\"\u003eNUS-MATH530 Changing Bases\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-03-28 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_5_c_problem_7/\"\u003eNUS-MATH530 5.C Problem 7\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-04-07 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_geometric_multiplicity/\"\u003eNUS-MATH530 Geometric Multiplicity\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-04-12 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_some_6_a_problems/\"\u003eNUS-MATH530 Some 6.A Problems\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-04-14 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_similar_to_diagonal/\"\u003eNUS-MATH530 Similar to Diagonal\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-05-04 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnus_math530_matrix_adjectives/\"\u003eNUS-MATH530 Matrix Adjectives\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_homework_index/","tags":null,"title":"NUS-MATH530 Homework Index"},{"categories":null,"contents":"Infinite Plane Two Vehicles Yes. Though the travel of the two vehicles are not entirely independent, the second vehicle can diagonally traverse the plane while the first vehicle cuts across it. Practically, the question asks whether or not a combination of:\n\\begin{equation} \\alpha \\begin{pmatrix} 0 \\\\ 1 \\end{pmatrix} + \\beta \\begin{pmatrix} 1 \\\\ 1 \\end{pmatrix} \\end{equation}\nCan form all vectors in \\(\\mathbb{R}^2\\). Expanding that expression out, we have, given some point \\((a,b)\\) that:\n\\begin{equation} \\begin{pmatrix} \\beta \\\\ \\alpha + \\beta \\end{pmatrix} = \\begin{pmatrix} a \\\\ b \\end{pmatrix} \\end{equation}\nTherefor, we have expressions:\n\\begin{equation} \\begin{cases} \\beta = a \\\\ \\alpha +\\beta = b \\end{cases} \\end{equation}\nSubstituting the definition of \\(\\beta\\) then:\n\\begin{align} \u0026amp;\\alpha + a = b \\\\ \\Rightarrow\\ \u0026amp;\\alpha = b - a \\end{align}\nTherefore, we have that, for all desired locales \\((a,b)\\) we have a fully determined solution:\n\\begin{equation} \\begin{cases} \\alpha = b-a \\\\ \\beta = a \\end{cases} \\end{equation}\nThis means that some direction of travel in both vehicles will suffice.\nGoing Home Not necessarily. Graphically, after shifting yourself to some location upper-right, its impossible to move horizontally in the vertical-only vehicle.\nPractically, the question is asking that, if you are at some beginning location:\n\\begin{equation} \\begin{pmatrix} a \\\\b \\end{pmatrix} \\end{equation}\nCan we devise some travel that follows:\n\\begin{equation} \\alpha \\begin{pmatrix} 0 \\\\ 1 \\end{pmatrix} + \\begin{pmatrix} a \\\\b \\end{pmatrix} = \\begin{pmatrix} 0 \\\\ 0 \\end{pmatrix} \\end{equation}\nExpanding this out, we have expressions:\n\\begin{equation} \\begin{cases} 0 + a = 0 \\\\ \\alpha + b = 0 \\end{cases} \\end{equation}\nNamely, we have that:\n\\begin{equation} \\begin{cases} a = 0 \\\\ \\alpha = -b \\end{cases} \\end{equation}\nWe are therefore under-determined here; there is a solution only \\(\\forall a=0\\), but for no other \\(a\\).\nInfinite Space Pickup and Hoverboard We have:\n\\begin{equation} \\alpha \\begin{pmatrix} 1 \\\\ 1 \\\\ 0 \\end{pmatrix} + \\beta \\begin{pmatrix} 3 \\\\ -2 \\\\1 \\end{pmatrix} \\end{equation}\nto go to all directions \\((a,b,c)\\). Let\u0026rsquo;s try solving:\n\\begin{equation} \\begin{cases} \\alpha + 3\\beta = a\\\\ \\alpha -2\\beta = b \\\\ \\beta = c \\end{cases} \\end{equation}\nSubstituting the value for \\(\\beta=c\\), to the above equations, we have:\n\\begin{equation} \\begin{cases} \\alpha + 3c = a \\\\ \\alpha - 2c = b \\\\ \\end{cases} \\end{equation}\nAnd therefore, we have results:\n\\begin{equation} \\begin{cases} \\alpha = a-3c \\\\ \\alpha = b+ 2c \\end{cases} \\end{equation}\nThis equation is again over-determined. Therefore, you cannot get anywhere in your space; you can, however, get to all the points \\((a,b,c)\\) where:\n\\begin{equation} a-3c = b+2c \\end{equation}\nPickup, Hoverboard, AND Jetpack (part 1) We now have:\n\\begin{equation} \\alpha \\begin{pmatrix} 1 \\\\ 1 \\\\ 0 \\end{pmatrix} + \\beta \\begin{pmatrix} 3 \\\\ -2 \\\\ 1 \\end{pmatrix} + \\gamma \\begin{pmatrix} 0 \\\\ 1 \\\\ 1 \\end{pmatrix} \\end{equation}\nto go to all points \\((a,b,c)\\), we now try solving:\n\\begin{equation} \\begin{cases} \\alpha + 3\\beta = a \\\\ \\alpha -2\\beta + \\gamma = b\\\\ \\beta +\\gamma = c \\end{cases} \\end{equation}\nAt this point, it is probably easier to use a matrix to solve this expression. Hence, let\u0026rsquo;s solve:\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 3 \u0026amp; 0 \\\\ 1 \u0026amp; -2 \u0026amp; 1 \\\\ 0 \u0026amp; 1 \u0026amp; 1 \\end{pmatrix} \\begin{pmatrix} \\alpha \\\\ \\beta \\\\ \\gamma \\end{pmatrix} = \\begin{pmatrix} a \\\\ b \\\\c \\end{pmatrix} \\end{equation}\nLet\u0026rsquo;s first subtract the first column from the second column:\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 3 \u0026amp; 0 \\\\ 0 \u0026amp; -5 \u0026amp; 1 \\\\ 0 \u0026amp; 1 \u0026amp; 1 \\end{pmatrix} \\begin{pmatrix} \\alpha \\\\ \\beta \\\\ \\gamma \\end{pmatrix} = \\begin{pmatrix} a \\\\ b-a \\\\c \\end{pmatrix} \\end{equation}\nLet\u0026rsquo;s now rotate rows \\(2\\) and \\(3\\):\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 3 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \u0026amp; 1\\\\ 0 \u0026amp; -5 \u0026amp; 1 \\end{pmatrix} \\begin{pmatrix} \\alpha \\\\ \\gamma \\\\ \\beta \\end{pmatrix} = \\begin{pmatrix} a \\\\c\\\\ b-a \\end{pmatrix} \\end{equation}\nGreat. Now let\u0026rsquo;s subtract thrice the second row towards the first row:\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 0 \u0026amp; -3 \\\\ 0 \u0026amp; 1 \u0026amp; 1\\\\ 0 \u0026amp; -5 \u0026amp; 1 \\end{pmatrix} \\begin{pmatrix} \\alpha \\\\ \\gamma \\\\ \\beta \\end{pmatrix} = \\begin{pmatrix} a-3c \\\\c\\\\ b-a \\end{pmatrix} \\end{equation}\nAnd add five times the second row to the last row\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 0 \u0026amp; -3 \\\\ 0 \u0026amp; 1 \u0026amp; 1\\\\ 0 \u0026amp; 0 \u0026amp; 6 \\end{pmatrix} \\begin{pmatrix} \\alpha \\\\ \\gamma \\\\ \\beta \\end{pmatrix} = \\begin{pmatrix} a-3c \\\\c\\\\ b-a+5c \\end{pmatrix} \\end{equation}\nLet\u0026rsquo;s subtract a sixth of the last row to the second row:\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 0 \u0026amp; -3 \\\\ 0 \u0026amp; 1 \u0026amp; 0\\\\ 0 \u0026amp; 0 \u0026amp; 6 \\end{pmatrix} \\begin{pmatrix} \\alpha \\\\ \\gamma \\\\ \\beta \\end{pmatrix} = \\begin{pmatrix} a-3c \\\\c-\\frac{b-a+5c}{6}\\\\ b-a+5c \\end{pmatrix} \\end{equation}\nAnd add a half to the top row:\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \u0026amp; 0\\\\ 0 \u0026amp; 0 \u0026amp; 6 \\end{pmatrix} \\begin{pmatrix} \\alpha \\\\ \\gamma \\\\ \\beta \\end{pmatrix} = \\begin{pmatrix} a-3c + \\frac{b-a+5c}{2} \\\\c-\\frac{b-a+5c}{6}\\\\ b-a+5c \\end{pmatrix} \\end{equation}\nAnd finally divide the bottom row by \\(6\\):\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \u0026amp; 0\\\\ 0 \u0026amp; 0 \u0026amp; 1 \\end{pmatrix} \\begin{pmatrix} \\alpha \\\\ \\gamma \\\\ \\beta \\end{pmatrix} = \\begin{pmatrix} a-3c + \\frac{b-a+5c}{2} \\\\c-\\frac{b-a+5c}{6}\\\\ \\frac{{b-a+5c}}{6} \\end{pmatrix} \\end{equation}\nGreat. So now we have a fully determined solution \\(\\forall \\alpha, \\beta, \\gamma\\). Therefore, given a pair of location which you want to reach \\((a,b,c)\\), we can use the expressions above to solve for the values by which we have to move each vehicle.\nPickup, Hoverboard, AND Jetpack (part 2) We have the same problem, but with new numbers.\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 3 \u0026amp; 5 \\\\ 1 \u0026amp; -2 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \u0026amp; 1 \\end{pmatrix} \\begin{pmatrix} \\alpha \\\\ \\beta \\\\ \\gamma \\end{pmatrix} = \\begin{pmatrix} a \\\\ b \\\\c \\end{pmatrix} \\end{equation}\nAt this point, we really want to be checking of the vectors which form this matrix is a spanning set; that is, after performing Gaussian elimination, do we get back a zero-row? If so, it will restrict some combination of our input \\((a,b,c)\\) to converge to \\(0\\).\nLet\u0026rsquo;s begin by subtracting the first row from second row\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 3 \u0026amp; 5 \\\\ 0 \u0026amp; -5 \u0026amp; -5 \\\\ 0 \u0026amp; 1 \u0026amp; 1 \\end{pmatrix} \\end{equation}\nAnd now, let\u0026rsquo;s divide the middle row by \\(\\frac{-1}{5}\\)\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 3 \u0026amp; 5 \\\\ 0 \u0026amp; 1 \u0026amp; 1 \\\\ 0 \u0026amp; 1 \u0026amp; 1 \\end{pmatrix} \\end{equation}\nLet\u0026rsquo;s then subtract the middle row from the last row:\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 3 \u0026amp; 5 \\\\ 0 \u0026amp; 1 \u0026amp; 1 \\\\ 0 \u0026amp; 0 \u0026amp; 0 \\end{pmatrix} \\end{equation}\nAlready we see an \\(0\\) row emerging. That means that some combination of the variables have to be \\(0\\) for a solution to exist: these vectors do not span the space and therefore we can\u0026rsquo;t get everywhere in space.\nPickup Breaks Down We now want to know if the following vectors would reach \\((0,0,0)\\) after driving the pickup some distance \\(d\\); that is, if we started at some \\((a,b,c)\\), can:\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 3 \u0026amp; 0 \\\\ 1 \u0026amp; -2 \u0026amp; 1 \\\\ 0 \u0026amp; 1 \u0026amp; 1 \\\\ \\end{pmatrix} \\begin{pmatrix} 0 \\\\ \\beta \\\\ \\gamma \\end{pmatrix} = -\\begin{pmatrix} a \\\\ b \\\\ c \\end{pmatrix} \\end{equation}\nyield a solution? We have already performed the Gaussian Elimination above, therefore, we will skip directly to the solution:\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \u0026amp; 0\\\\ 0 \u0026amp; 0 \u0026amp; 1 \\end{pmatrix} \\begin{pmatrix} 0 \\\\ \\gamma \\\\ \\beta \\end{pmatrix} = -\\begin{pmatrix} a-3c + \\frac{b-a+5c}{2} \\\\c-\\frac{b-a+5c}{6}\\\\ \\frac{{b-a+5c}}{6} \\end{pmatrix} \\end{equation}\nObviously the bottom few rows yield a solution, however, the top row places some limitation on our possible location. Namely, that:\n\\begin{equation} -\\frac{a+b-c}{2} = 0 \\end{equation}\nIf the locations you are at do not behave with these rules, a solution will not be yielded.\n","html":"\u003ch2 id=\"infinite-plane\"\u003eInfinite Plane\u003c/h2\u003e\n\u003ch3 id=\"two-vehicles\"\u003eTwo Vehicles\u003c/h3\u003e\n\u003cp\u003eYes. Though the travel of the two vehicles are not entirely independent, the second vehicle can diagonally traverse the plane while the first vehicle cuts across it. Practically, the question asks whether or not a combination of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha \\begin{pmatrix}\n0 \\\\ 1\n\\end{pmatrix} + \\beta \\begin{pmatrix}\n1 \\\\ 1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eCan form all vectors in \\(\\mathbb{R}^2\\). Expanding that expression out, we have, given some point \\((a,b)\\) that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n\\beta \\\\\n\\alpha + \\beta\n\\end{pmatrix} = \\begin{pmatrix}\na \\\\ b\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefor, we have expressions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\beta = a \\\\\n\\alpha +\\beta = b\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSubstituting the definition of \\(\\beta\\) then:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\alpha + a = b \\\\\n\\Rightarrow\\ \u0026amp;\\alpha = b - a\n\\end{align}\u003c/p\u003e\n\u003cp\u003eTherefore, we have that, for all desired locales \\((a,b)\\) we have a fully determined solution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\alpha = b-a \\\\\n\\beta = a\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis means that some direction of travel in both vehicles will suffice.\u003c/p\u003e\n\u003ch3 id=\"going-home\"\u003eGoing Home\u003c/h3\u003e\n\u003cp\u003eNot necessarily. Graphically, after shifting yourself to some location upper-right, its impossible to move horizontally in the vertical-only vehicle.\u003c/p\u003e\n\u003cp\u003ePractically, the question is asking that, if you are at some beginning location:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\na \\\\b\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eCan we devise some travel that follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha \\begin{pmatrix}\n0 \\\\ 1\n\\end{pmatrix} + \\begin{pmatrix}\na \\\\b\n\\end{pmatrix} = \\begin{pmatrix}\n0 \\\\ 0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eExpanding this out, we have expressions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n0 + a = 0 \\\\\n\\alpha + b = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNamely, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\na = 0 \\\\\n\\alpha = -b\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe are therefore under-determined here; there is a solution only \\(\\forall a=0\\), but for no other \\(a\\).\u003c/p\u003e\n\u003ch2 id=\"infinite-space\"\u003eInfinite Space\u003c/h2\u003e\n\u003ch3 id=\"pickup-and-hoverboard\"\u003ePickup and Hoverboard\u003c/h3\u003e\n\u003cp\u003eWe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha \\begin{pmatrix}\n1 \\\\ 1 \\\\ 0\n\\end{pmatrix} + \\beta \\begin{pmatrix}\n3 \\\\ -2 \\\\1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eto go to all directions \\((a,b,c)\\). Let\u0026rsquo;s try solving:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\alpha + 3\\beta = a\\\\\n\\alpha -2\\beta = b \\\\\n\\beta = c\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSubstituting the value for \\(\\beta=c\\), to the above equations, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\alpha + 3c = a \\\\\n\\alpha - 2c = b \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd therefore, we have results:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\alpha = a-3c \\\\\n\\alpha = b+ 2c\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis equation is again over-determined. Therefore, you cannot get anywhere in your space; you can, however, get to all the points \\((a,b,c)\\) where:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na-3c = b+2c\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"pickup-hoverboard-and-jetpack--part-1\"\u003ePickup, Hoverboard, AND Jetpack (part 1)\u003c/h3\u003e\n\u003cp\u003eWe now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha \\begin{pmatrix}\n1 \\\\ 1 \\\\ 0\n\\end{pmatrix} + \\beta \\begin{pmatrix}\n3 \\\\ -2 \\\\ 1\n\\end{pmatrix} + \\gamma \\begin{pmatrix}\n0 \\\\ 1 \\\\ 1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eto go to all points \\((a,b,c)\\), we now try solving:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\alpha + 3\\beta = a \\\\\n\\alpha -2\\beta + \\gamma = b\\\\\n\\beta +\\gamma = c\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAt this point, it is probably easier to use a matrix to solve this expression. Hence, let\u0026rsquo;s solve:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 3 \u0026amp; 0 \\\\\n1 \u0026amp; -2 \u0026amp; 1 \\\\\n0 \u0026amp; 1 \u0026amp; 1\n\\end{pmatrix} \\begin{pmatrix}\n\\alpha \\\\ \\beta \\\\ \\gamma\n\\end{pmatrix} = \\begin{pmatrix}\na \\\\ b \\\\c\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s first subtract the first column from the second column:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 3 \u0026amp; 0 \\\\\n0 \u0026amp; -5 \u0026amp; 1 \\\\\n0 \u0026amp; 1 \u0026amp; 1\n\\end{pmatrix} \\begin{pmatrix}\n\\alpha \\\\ \\beta \\\\ \\gamma\n\\end{pmatrix} = \\begin{pmatrix}\na \\\\ b-a \\\\c\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s now rotate rows \\(2\\) and \\(3\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 3 \u0026amp; 0 \\\\\n0 \u0026amp; 1 \u0026amp; 1\\\\\n0 \u0026amp; -5 \u0026amp; 1\n\\end{pmatrix} \\begin{pmatrix}\n\\alpha \\\\ \\gamma \\\\ \\beta\n\\end{pmatrix} = \\begin{pmatrix}\na \\\\c\\\\ b-a\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGreat. Now let\u0026rsquo;s subtract thrice the second row towards the first row:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 0 \u0026amp; -3 \\\\\n0 \u0026amp; 1 \u0026amp; 1\\\\\n0 \u0026amp; -5 \u0026amp; 1\n\\end{pmatrix} \\begin{pmatrix}\n\\alpha \\\\ \\gamma \\\\ \\beta\n\\end{pmatrix} = \\begin{pmatrix}\na-3c \\\\c\\\\ b-a\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd add five times the second row to the last row\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 0 \u0026amp; -3 \\\\\n0 \u0026amp; 1 \u0026amp; 1\\\\\n0 \u0026amp; 0 \u0026amp; 6\n\\end{pmatrix} \\begin{pmatrix}\n\\alpha \\\\ \\gamma \\\\ \\beta\n\\end{pmatrix} = \\begin{pmatrix}\na-3c \\\\c\\\\ b-a+5c\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s subtract a sixth of the last row to the second row:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 0 \u0026amp; -3 \\\\\n0 \u0026amp; 1 \u0026amp; 0\\\\\n0 \u0026amp; 0 \u0026amp; 6\n\\end{pmatrix} \\begin{pmatrix}\n\\alpha \\\\ \\gamma \\\\ \\beta\n\\end{pmatrix} = \\begin{pmatrix}\na-3c \\\\c-\\frac{b-a+5c}{6}\\\\ b-a+5c\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd add a half to the top row:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 0 \u0026amp; 0 \\\\\n0 \u0026amp; 1 \u0026amp; 0\\\\\n0 \u0026amp; 0 \u0026amp; 6\n\\end{pmatrix} \\begin{pmatrix}\n\\alpha \\\\ \\gamma \\\\ \\beta\n\\end{pmatrix} = \\begin{pmatrix}\na-3c + \\frac{b-a+5c}{2} \\\\c-\\frac{b-a+5c}{6}\\\\ b-a+5c\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd finally divide the bottom row by \\(6\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 0 \u0026amp; 0 \\\\\n0 \u0026amp; 1 \u0026amp; 0\\\\\n0 \u0026amp; 0 \u0026amp; 1\n\\end{pmatrix} \\begin{pmatrix}\n\\alpha \\\\ \\gamma \\\\ \\beta\n\\end{pmatrix} = \\begin{pmatrix}\na-3c + \\frac{b-a+5c}{2} \\\\c-\\frac{b-a+5c}{6}\\\\ \\frac{{b-a+5c}}{6}\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGreat. So now we have a fully determined solution \\(\\forall \\alpha, \\beta, \\gamma\\). Therefore, given a pair of location which you want to reach \\((a,b,c)\\), we can use the expressions above to solve for the values by which we have to move each vehicle.\u003c/p\u003e\n\u003ch3 id=\"pickup-hoverboard-and-jetpack--part-2\"\u003ePickup, Hoverboard, AND Jetpack (part 2)\u003c/h3\u003e\n\u003cp\u003eWe have the same problem, but with new numbers.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 3 \u0026amp; 5 \\\\\n1 \u0026amp; -2 \u0026amp; 0 \\\\\n0 \u0026amp; 1 \u0026amp; 1\n\\end{pmatrix} \\begin{pmatrix}\n\\alpha \\\\ \\beta \\\\ \\gamma\n\\end{pmatrix} = \\begin{pmatrix}\na \\\\ b \\\\c\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAt this point, we really want to be checking of the vectors which form this matrix is a spanning set; that is, after performing Gaussian elimination, do we get back a zero-row? If so, it will restrict some combination of our input \\((a,b,c)\\) to converge to \\(0\\).\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s begin by subtracting the first row from second row\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 3 \u0026amp; 5 \\\\\n0 \u0026amp; -5 \u0026amp; -5 \\\\\n0 \u0026amp; 1 \u0026amp; 1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd now, let\u0026rsquo;s divide the middle row by \\(\\frac{-1}{5}\\)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 3 \u0026amp; 5 \\\\\n0 \u0026amp; 1 \u0026amp; 1 \\\\\n0 \u0026amp; 1 \u0026amp; 1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s then subtract the middle row from the last row:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 3 \u0026amp; 5 \\\\\n0 \u0026amp; 1 \u0026amp; 1 \\\\\n0 \u0026amp; 0 \u0026amp; 0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAlready we see an \\(0\\) row emerging. That means that some combination of the variables have to be \\(0\\) for a solution to exist: these vectors do not span the space and therefore we can\u0026rsquo;t get everywhere in space.\u003c/p\u003e\n\u003ch3 id=\"pickup-breaks-down\"\u003ePickup Breaks Down\u003c/h3\u003e\n\u003cp\u003eWe now want to know if the following vectors would reach \\((0,0,0)\\) after driving the pickup some distance \\(d\\); that is, if we started at some \\((a,b,c)\\), can:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 3 \u0026amp; 0 \\\\\n1 \u0026amp; -2 \u0026amp; 1 \\\\\n0 \u0026amp; 1 \u0026amp; 1 \\\\\n\\end{pmatrix} \\begin{pmatrix}\n0 \\\\ \\beta \\\\ \\gamma\n\\end{pmatrix} = -\\begin{pmatrix}\na \\\\ b \\\\ c\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyield a solution? We have already performed the Gaussian Elimination above, therefore, we will skip directly to the solution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 0 \u0026amp; 0 \\\\\n0 \u0026amp; 1 \u0026amp; 0\\\\\n0 \u0026amp; 0 \u0026amp; 1\n\\end{pmatrix} \\begin{pmatrix}\n0 \\\\ \\gamma \\\\ \\beta\n\\end{pmatrix} = -\\begin{pmatrix}\na-3c + \\frac{b-a+5c}{2} \\\\c-\\frac{b-a+5c}{6}\\\\ \\frac{{b-a+5c}}{6}\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eObviously the bottom few rows yield a solution, however, the top row places some limitation on our possible location. Namely, that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n-\\frac{a+b-c}{2} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf the locations you are at do not behave with these rules, a solution will not be yielded.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_linear_vehicles/","tags":null,"title":"NUS-MATH530 Linear Vehicles"},{"categories":null,"contents":"Factoids: \\((AB)^{*} = B^{*} A^{*}\\), \\((A+B)^{*} = A^{*} + B^{*}\\) An unitary operator is invertible, and the inverse of its matrix representation is its transpose Take \\(M\\) an unitary square matrix, with orthonormal columns. Note that this matrix, by construction, sends each basis \\(v_{j}\\) to $ej$\u0026mdash;a set of \\(dim\\ V\\) (as there are \\(dim\\ V\\) columns to \\(M\\)) linearly independent (as \\(e_{j}\\), through orthonormality, are linearly independent) vectors. As we have \\(dim\\ V\\) linearly independent vectors, the \\(e_{j}\\) form a basis. As each \\(v_{j}\\) is sent to $ej$\u0026mdash;both a basis of $V$\u0026mdash;we note that the finite-dimensional operator corresponding to \\(M\\) is subjective and hence invertible.\nConstruct now the matrix \\(M^{*}\\). Consider \\(M M^{*}\\). Note that the multiplication operation will require taking the inner product of each row of \\(M\\), against each column of \\(M^{*}\\); that is, this operation will result in taking the inner products between each pair of orthonormal columns of \\(M\\).\nRecall that, per the definition of orthonormal vectors, for a pair of vectors \\(e_{i}, e_{j}\\), \\(\\langle e_{i}, e_{j} \\rangle = 0\\) for \\(i \\neq j\\) and \\(=1\\) for \\(i=j\\). Therefore, this row-column product will be \\(1\\) when row \\(j\\) and column \\(j\\) is multiplied together and \\(0\\) otherwise.\nIn this fashion, \\(M M^{*} =I\\); in a similar fashion, \\(M^{*} M = I\\). Therefore, \\(M^{*} = M^{-1}\\).\nResult 2: unitary matricies are normal Recall the matrix \\(M\\) is normal if \\(A A^{*} = A^{*} A\\). Now, recall that for a unitary operator \\(A^{*} = A^{-1}\\).\nNow, we have that:\n\\begin{equation} A A^{*} = A A^{-1} = I = A^{-1} A = A^{*}A \\end{equation}\nResult 3: self-adjoint matricies are normal Recall a self-adjoint matrix acts like \\(A = A^{*}\\).\nNow:\n\\begin{equation} A A^{*} = A A = A^{*} A \\end{equation}\nObservation 1: some matricies are both self-adjoint and unitary Take the symmetric matrix formed by conjoining each of the standard bases of euclidian space \\(\\mathbb{F}^{n}\\). That is, the identity matrix.\nThe matrix has orthonormal columns (as the standard bases are orthonormal), and self-adjoint as it is symmetric.\nProblem 1: Venn Diagram Per results above.\nProblem 2: Group! Self-Adjoint Not closed under multiplication:\n\\begin{equation} \\mqty(a \u0026amp; b \\\\ b \u0026amp; c) \\mqty(f \u0026amp; g \\\\ g\u0026amp; h) = \\mqty(af + bg \u0026amp; ag+bh \\\\ bf+cg \u0026amp; bg+ch) \\end{equation}\nEvidently, this matrix is no longer symmetric (i.e. not self adjoint).\nUnitary Do form a group!\nNormal There\u0026rsquo;s no inverse for \\(0\\).\nIs this proof taking too much of a shortcut? / Wishywashy.\nBy the complex spectral theorm, \\(T\\) being normal implies that there is an orthonormal bases of eigenvalues of \\(T\\) (i.e. there is a diagonal representation of \\(T\\)). This can be obtained with Schur\u0026rsquo;s theorem, then applying the condition that \\(A A^{*} = A^{*}A\\) to show that the \u0026ldquo;upper-triangular\u0026rdquo; matrix formed by the orthonormal bases is actually diagonal.\nBy calculation, diagonal matricies\u0026rsquo; multiplication is closed.\nWe now inherit the identity and associativity from general matricies.\nSo invertible normal matricies form a group.\n\u0026ldquo;Matrix Adjoint\u0026rdquo; \\(A^{*}\\) is the adjoint of the matrix.\nThat:\n\\begin{equation} \\langle Tv,w \\rangle = \\langle v, T^{*}w \\rangle \\end{equation}\n\\(A = A^{*} \\implies \\lambda_{i} \\in \\mathbb{R}\\) \\(A^{*} A = A A^{*} \\implies\\) diagonalizable based on an orthonormal basis of eigenvectors \\(A\\) is orthogonal/unitary \\(\\implies\\) \\(A^{*} = A^{-1}\\) 7.13: E.v. of self-adjoint operators are real.\n7.14: Over \\(\\mathbb{C}\\), \\(Tv\\) is orthogonal to all \\(v\\) IFF \\(T\\) is the zero matrix\n7.16: Over \\(\\mathbb{R}\\), \\(Tv\\) is orthogonal to all \\(v\\) and \\(T\\) is self-adjoint, then \\(T\\) is the zero matrix\n7.22: eigenvectors of \\(T\\) corresponding to distinct eigenvalues are orthogonal if \\(T \\in \\mathcal{L}(V)\\) is normal.\nAlso 7.24: the spectral theorem\u0026mdash;that if \\(T\\) is normal, then \\(V\\) has an orthonormal basis of eigenvectors of \\(T\\) and so \\(T\\) is diagonalizable with respect to an orthonormal basis\nRecall \u0026ldquo;normal\u0026rdquo;: \\(A A^{*} = A^{*} A\\)\n","html":"\u003ch2 id=\"factoids\"\u003eFactoids:\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\((AB)^{*} = B^{*} A^{*}\\), \\((A+B)^{*} = A^{*} + B^{*}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"an-unitary-operator-is-invertible-and-the-inverse-of-its-matrix-representation-is-its-transpose\"\u003eAn unitary operator is invertible, and the inverse of its matrix representation is its transpose\u003c/h2\u003e\n\u003cp\u003eTake \\(M\\) an unitary square matrix, with orthonormal columns. Note that this matrix, by construction, sends each basis \\(v_{j}\\) to $e\u003csub\u003ej\u003c/sub\u003e$\u0026mdash;a set of \\(dim\\ V\\) (as there are \\(dim\\ V\\) columns to \\(M\\)) linearly independent (as \\(e_{j}\\), through orthonormality, are linearly independent) vectors. As we have \\(dim\\ V\\) linearly independent vectors, the \\(e_{j}\\) form a basis. As each \\(v_{j}\\) is sent to $e\u003csub\u003ej\u003c/sub\u003e$\u0026mdash;both a basis of $V$\u0026mdash;we note that the finite-dimensional operator corresponding to \\(M\\) is subjective and hence invertible.\u003c/p\u003e\n\u003cp\u003eConstruct now the matrix \\(M^{*}\\). Consider \\(M M^{*}\\). Note that the multiplication operation will require taking the inner product of each \u003cem\u003erow\u003c/em\u003e of \\(M\\), against each \u003cem\u003ecolumn\u003c/em\u003e of \\(M^{*}\\); that is, this operation will result in taking the inner products between each pair of orthonormal columns of \\(M\\).\u003c/p\u003e\n\u003cp\u003eRecall that, per the definition of orthonormal vectors, for a pair of vectors \\(e_{i}, e_{j}\\), \\(\\langle e_{i}, e_{j} \\rangle = 0\\) for \\(i \\neq j\\) and \\(=1\\) for \\(i=j\\). Therefore, this row-column product will be \\(1\\) when row \\(j\\) and column \\(j\\) is multiplied together and \\(0\\) otherwise.\u003c/p\u003e\n\u003cp\u003eIn this fashion, \\(M M^{*} =I\\); in a similar fashion, \\(M^{*} M = I\\). Therefore, \\(M^{*} = M^{-1}\\).\u003c/p\u003e\n\u003ch2 id=\"result-2-unitary-matricies-are-normal\"\u003eResult 2: unitary matricies are normal\u003c/h2\u003e\n\u003cp\u003eRecall the matrix \\(M\\) is normal if \\(A A^{*} = A^{*} A\\). Now, recall that for a unitary operator \\(A^{*} = A^{-1}\\).\u003c/p\u003e\n\u003cp\u003eNow, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA A^{*} = A A^{-1} = I = A^{-1} A = A^{*}A\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"result-3-self-adjoint-matricies-are-normal\"\u003eResult 3: self-adjoint matricies are normal\u003c/h2\u003e\n\u003cp\u003eRecall a self-adjoint matrix acts like \\(A = A^{*}\\).\u003c/p\u003e\n\u003cp\u003eNow:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA A^{*} = A A = A^{*} A\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"observation-1-some-matricies-are-both-self-adjoint-and-unitary\"\u003eObservation 1: some matricies are both self-adjoint and unitary\u003c/h2\u003e\n\u003cp\u003eTake the symmetric matrix formed by conjoining each of the standard bases of euclidian space \\(\\mathbb{F}^{n}\\). That is, the identity matrix.\u003c/p\u003e\n\u003cp\u003eThe matrix has orthonormal columns (as the standard bases are orthonormal), and self-adjoint as it is symmetric.\u003c/p\u003e\n\u003ch2 id=\"problem-1-venn-diagram\"\u003eProblem 1: Venn Diagram\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-05-04_20-58-41_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ePer results above.\u003c/p\u003e\n\u003ch2 id=\"problem-2-group\"\u003eProblem 2: Group!\u003c/h2\u003e\n\u003ch3 id=\"self-adjoint\"\u003eSelf-Adjoint\u003c/h3\u003e\n\u003cp\u003eNot closed under multiplication:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(a \u0026amp; b \\\\ b \u0026amp; c) \\mqty(f \u0026amp; g \\\\ g\u0026amp; h) = \\mqty(af + bg \u0026amp; ag+bh \\\\ bf+cg \u0026amp; bg+ch)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eEvidently, this matrix is no longer symmetric (i.e. not self adjoint).\u003c/p\u003e\n\u003ch3 id=\"unitary\"\u003eUnitary\u003c/h3\u003e\n\u003cp\u003eDo form a group!\u003c/p\u003e\n\u003ch3 id=\"normal\"\u003eNormal\u003c/h3\u003e\n\u003cp\u003eThere\u0026rsquo;s no inverse for \\(0\\).\u003c/p\u003e\n\u003cp\u003eIs this proof taking too much of a shortcut? / Wishywashy.\u003c/p\u003e\n\u003cp\u003eBy the complex spectral theorm, \\(T\\) being normal implies that there is an orthonormal bases of eigenvalues of \\(T\\) (i.e. there is a diagonal representation of \\(T\\)). This can be obtained with Schur\u0026rsquo;s theorem, then applying the condition that \\(A A^{*} = A^{*}A\\) to show that the \u0026ldquo;upper-triangular\u0026rdquo; matrix formed by the orthonormal bases is actually diagonal.\u003c/p\u003e\n\u003cp\u003eBy calculation, diagonal matricies\u0026rsquo; multiplication is closed.\u003c/p\u003e\n\u003cp\u003eWe now inherit the identity and associativity from general matricies.\u003c/p\u003e\n\u003cp\u003eSo invertible normal matricies form a group.\u003c/p\u003e\n\u003ch2 id=\"matrix-adjoint\"\u003e\u0026ldquo;Matrix Adjoint\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003e\\(A^{*}\\) is the \u003cstrong\u003eadjoint\u003c/strong\u003e of the matrix.\u003c/p\u003e\n\u003cp\u003eThat:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle Tv,w \\rangle = \\langle v, T^{*}w \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-05-08_09-42-00_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003chr\u003e\n\u003col\u003e\n\u003cli\u003e\\(A = A^{*} \\implies \\lambda_{i} \\in \\mathbb{R}\\)\u003c/li\u003e\n\u003cli\u003e\\(A^{*} A = A A^{*} \\implies\\) diagonalizable based on an orthonormal basis of eigenvectors\u003c/li\u003e\n\u003cli\u003e\\(A\\) is orthogonal/unitary \\(\\implies\\) \\(A^{*} = A^{-1}\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003cp\u003e\u003cstrong\u003e7.13\u003c/strong\u003e: E.v. of self-adjoint operators are real.\u003c/p\u003e\n\u003cp\u003e7.14: Over \\(\\mathbb{C}\\), \\(Tv\\) is orthogonal to all \\(v\\) IFF \\(T\\) is the zero matrix\u003c/p\u003e\n\u003cp\u003e7.16: Over \\(\\mathbb{R}\\), \\(Tv\\) is orthogonal to all \\(v\\) and \\(T\\) is self-adjoint, then \\(T\\) is the zero matrix\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e7.22\u003c/strong\u003e: eigenvectors of \\(T\\) corresponding to distinct eigenvalues are orthogonal if \\(T \\in \\mathcal{L}(V)\\) is normal.\u003c/p\u003e\n\u003cp\u003eAlso \u003cstrong\u003e7.24\u003c/strong\u003e: the spectral theorem\u0026mdash;that if \\(T\\) is normal, then \\(V\\) has an orthonormal basis of eigenvectors of \\(T\\) and so \\(T\\) is diagonalizable with respect to an orthonormal basis\u003c/p\u003e\n\u003cp\u003eRecall \u0026ldquo;normal\u0026rdquo;: \\(A A^{*} = A^{*} A\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_matrix_adjectives/","tags":null,"title":"NUS-MATH530 Matrix Adjectives"},{"categories":null,"contents":"Equation of a Plane We want to determine all points on the plane formed by two vectors.\nLet\u0026rsquo;s take two vectors \\(\\vec{u} \\in V\\) and \\(\\vec{v} \\in V\\). The orthogonal vector to the both of them (i.e. the normal direction of the plane) is:\n\\begin{equation} \\vec{u}\\times \\vec{v} \\end{equation}\nby the definition of the cross product.\nThe points on the plane, therefore, have to be orthogonal themselves to this normal vector. This means that the dot product of the candidate vector against these vectors should be \\(0\\):\n\\begin{equation} (\\vec{u} \\times \\vec{v}) \\cdot \\begin{pmatrix} x_{1} \\\\ \\dots \\\\ x_{n} \\end{pmatrix} = 0 \\end{equation}\nThis forms the final equation for a plane given two vectors in \\(\\mathbb{F}^{n}\\).\nA.B Exercises Double Negative We desire that \\(-(-v)=v \\forall v \\in V\\)\nBy distributivity in vector spaces, and the fact that \\(0v=0\\), we have that:\n\\begin{equation} v+(-1)v = (1-1)v = 0v = 0 \\end{equation}\nTherefore, \\((-1)v=-v\\).\nWe now have:\n\\begin{equation} -(-v) = -((-1)v) \\end{equation}\nThe scalar multiple of \\(v\\), by definition, is also \\(\\in V\\) if \\(v \\in V\\). Therefore, it itself holds that:\n\\begin{equation} (-1)((-1)v) \\end{equation}\nBy associativity:\n\\begin{equation} (-1\\cdot -1)v \\end{equation}\nFinally:\n\\begin{equation} (-1\\cdot -1)v = (1v) = v\\ \\blacksquare \\end{equation}\nOne of it is zero If \\(a \\in \\mathbb{F}\\), \\(v \\in V\\), and \\(av=0\\), we desire that \\(a=0\\) or \\(v=0\\). We perform casework.\nCase 1: \\(a=0\\) \u0026ndash; we are done.\nCase 2: \\(a \\neq 0\\): As \\(a \\in \\mathbb{F}\\), and \\(a \\neq 0\\), \\(\\exists \\frac{1}{a}: a\\cdot \\frac{1}{a}=1\\).\nTherefore:\n\\begin{align} \u0026amp;av = 0 \\\\ \\Rightarrow\\ \u0026amp; \\frac{1}{a}av = \\frac{1}{a} 0 \\\\ \\Rightarrow\\ \u0026amp; 1v = \\frac{1}{a} 0 \\\\ \\Rightarrow\\ \u0026amp; 1v = 0 \\\\ \\Rightarrow\\ \u0026amp; v=0\\ \\blacksquare \\end{align}\nExistence and Uniqueness Given Equation Given \\(v,w \\in V\\), we desire a unique \\(x\\in V: v+3x=w\\).\nLet\u0026rsquo;s first check existence. Take the expression:\n\\begin{equation} n = \\frac{1}{3} (w-v) \\end{equation}\nAs both \\(v,w \\in V\\), subtraction (addition) and scalar multiplication are defined. Therefore, \\(\\forall w,v \\in V\\), we can construct such an \\(n\\).\nSupplying the expression into \\(v+3x\\) for the definition of \\(x\\):\n\\begin{align} v+3x \u0026amp;= v+3\\qty(\\frac{1}{3}(w-v)) \\\\ \u0026amp;= v+(w-v) \\\\ \u0026amp;= v+w-v \\\\ \u0026amp;= v-v+w \\\\ \u0026amp;= 0+w \\\\ \u0026amp;= w \\end{align}\nby distributivity, associativity, and commutativity in vector spaces, yielding \\(w\\) as desired.\nNow let\u0026rsquo;s check uniqueness.\nSuppose \\(\\exists x_1, x_2: v+3x_1=w\\) and \\(v+3x_2=w\\).\nBy transitivity:\n\\begin{equation} v+3x_1=v+3x_2 \\end{equation}\nApplying \\(-v\\) to both sides:\n\\begin{equation} 3x_1=3x_2 \\end{equation}\nFinally, applying \\(\\frac{1}{3}\\) to both sides:\n\\begin{equation} x_{1}= x_2 \\end{equation}\nTherefore, there only exists one unique \\(x\\) which satisfies the expression. \\(\\blacksquare\\)\nEmpty Set is Not a Vector Space The empty set is not a vector space as it doesn\u0026rsquo;t have an additive identity. \\(\\blacksquare\\)\nAdditive Inverse is also Zero Multiplication We first take the additive inverse expression:\n\\begin{equation} \\forall v \\in V, \\exists -v: v+(-v) = 0 \\end{equation}\nTake now:\n\\begin{equation} 0v \\end{equation}\nWe have that:\n\\begin{align} 0v \u0026amp;= (0+0)v \\\\ \u0026amp;= 0v + 0v \\end{align}\nBy distributivity.\nAs \\(0v \\in V\\), \\(\\exists -0v: 0v+(-0v)=0\\).\n\\begin{align} \u0026amp;0v = 0v+0v \\\\ \\Rightarrow\\ \u0026amp; 0v-0v = 0v+0v-0v \\\\ \\Rightarrow\\ \u0026amp; 0 = 0v \\end{align}\nas desired. Now, we will start from this condition and work out way backwards.\nNote that the statement for additive inverse condition is that:\n\\begin{equation} \\forall v \\in V, \\exists -v: v+(-v) = 0 \\end{equation}\nLet us begin with the expression that:\n\\begin{equation} 0=0v \\end{equation}\nWe have that:\n\\begin{equation} 0=(1-1)v \\end{equation}\nThen, we have by distributivity:\n\\begin{equation} 0 = v + (-1)v \\end{equation}\nscalar multiplication is defined on a vector space. Therefore, we have \\(-1v\\) to construct such an additive inverse \\(\\forall v \\in V\\). \\(\\blacksquare\\)\nWeird Vector Space All operations are defined as given.\nTake scalars \\(t_1, t_2 \\in \\mathbb{R}\\).\n\\begin{equation} (t_1-t_2)\\infty = \\infty \\end{equation}\nYet, if we follow the rules of distribution:\n\\begin{equation} (t_1 -t_2)\\infty = \\infty -\\infty =0 \\end{equation}\nTherefore, distribution doesn\u0026rsquo;t hold on this new structure. It is not a vector space. \\(\\blacksquare\\)\n","html":"\u003ch2 id=\"equation-of-a-plane\"\u003eEquation of a Plane\u003c/h2\u003e\n\u003cp\u003eWe want to determine all \u003ca href=\"/posts/kbhvector/\"\u003epoint\u003c/a\u003es on the plane formed by two vectors.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s take two vectors \\(\\vec{u} \\in V\\) and \\(\\vec{v} \\in V\\). The orthogonal vector to the both of them (i.e. the normal direction of the plane) is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{u}\\times \\vec{v}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eby the definition of the \u003ca href=\"/posts/kbhcross_product/\"\u003ecross product\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhvector/\"\u003epoint\u003c/a\u003es on the plane, therefore, have to be orthogonal themselves to this normal vector. This means that the \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e of the candidate vector against these vectors should be \\(0\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(\\vec{u} \\times \\vec{v}) \\cdot \\begin{pmatrix}\nx_{1} \\\\ \\dots \\\\ x_{n}\n\\end{pmatrix} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis forms the final equation for a plane given two vectors in \\(\\mathbb{F}^{n}\\).\u003c/p\u003e\n\u003ch2 id=\"a-dot-b-exercises\"\u003eA.B Exercises\u003c/h2\u003e\n\u003ch3 id=\"double-negative\"\u003eDouble Negative\u003c/h3\u003e\n\u003cp\u003eWe desire that \\(-(-v)=v \\forall v \\in V\\)\u003c/p\u003e\n\u003cp\u003eBy \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e in vector spaces, and the fact that \\(0v=0\\), we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv+(-1)v = (1-1)v = 0v = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, \\((-1)v=-v\\).\u003c/p\u003e\n\u003cp\u003eWe now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n-(-v) = -((-1)v)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe scalar multiple of \\(v\\), by definition, is also \\(\\in V\\) if \\(v \\in V\\). Therefore, it itself holds that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(-1)((-1)v)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy \u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(-1\\cdot -1)v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(-1\\cdot -1)v = (1v) = v\\ \\blacksquare\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"one-of-it-is-zero\"\u003eOne of it is zero\u003c/h3\u003e\n\u003cp\u003eIf \\(a \\in \\mathbb{F}\\), \\(v \\in V\\), and \\(av=0\\), we desire that \\(a=0\\) or \\(v=0\\). We perform casework.\u003c/p\u003e\n\u003cp\u003eCase 1: \\(a=0\\) \u0026ndash; we are done.\u003c/p\u003e\n\u003cp\u003eCase 2: \\(a \\neq 0\\):\nAs \\(a \\in \\mathbb{F}\\), and \\(a \\neq 0\\), \\(\\exists \\frac{1}{a}: a\\cdot \\frac{1}{a}=1\\).\u003c/p\u003e\n\u003cp\u003eTherefore:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;av = 0 \\\\\n\\Rightarrow\\ \u0026amp; \\frac{1}{a}av = \\frac{1}{a} 0 \\\\\n\\Rightarrow\\ \u0026amp; 1v = \\frac{1}{a} 0 \\\\\n\\Rightarrow\\ \u0026amp; 1v = 0 \\\\\n\\Rightarrow\\ \u0026amp; v=0\\ \\blacksquare\n\\end{align}\u003c/p\u003e\n\u003ch3 id=\"existence-and-uniqueness-given-equation\"\u003eExistence and Uniqueness Given Equation\u003c/h3\u003e\n\u003cp\u003eGiven \\(v,w \\in V\\), we desire a unique \\(x\\in V: v+3x=w\\).\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s first check existence. Take the expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nn = \\frac{1}{3} (w-v)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAs both \\(v,w \\in V\\), subtraction (\u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e) and \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e are defined. Therefore, \\(\\forall w,v \\in V\\), we can construct such an \\(n\\).\u003c/p\u003e\n\u003cp\u003eSupplying the expression into \\(v+3x\\) for the definition of \\(x\\):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nv+3x \u0026amp;= v+3\\qty(\\frac{1}{3}(w-v)) \\\\\n\u0026amp;= v+(w-v) \\\\\n\u0026amp;= v+w-v \\\\\n\u0026amp;= v-v+w \\\\\n\u0026amp;= 0+w \\\\\n\u0026amp;= w\n\\end{align}\u003c/p\u003e\n\u003cp\u003eby \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e, \u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e, and \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e in \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es, yielding \\(w\\) as desired.\u003c/p\u003e\n\u003cp\u003eNow let\u0026rsquo;s check uniqueness.\u003c/p\u003e\n\u003cp\u003eSuppose \\(\\exists x_1, x_2: v+3x_1=w\\) and \\(v+3x_2=w\\).\u003c/p\u003e\n\u003cp\u003eBy transitivity:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv+3x_1=v+3x_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eApplying \\(-v\\) to both sides:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n3x_1=3x_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, applying \\(\\frac{1}{3}\\) to both sides:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{1}= x_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, there only exists one unique \\(x\\) which satisfies the expression. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"empty-set-is-not-a-vector-space\"\u003eEmpty Set is Not a Vector Space\u003c/h3\u003e\n\u003cp\u003eThe empty set is not a vector space as it doesn\u0026rsquo;t have an \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"additive-inverse-is-also-zero-multiplication\"\u003eAdditive Inverse is also Zero Multiplication\u003c/h3\u003e\n\u003cp\u003eWe first take the additive inverse expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\forall v \\in V, \\exists -v: v+(-v) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTake now:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n0v \u0026amp;= (0+0)v \\\\\n\u0026amp;= 0v + 0v\n\\end{align}\u003c/p\u003e\n\u003cp\u003eBy \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eAs \\(0v \\in V\\), \\(\\exists -0v: 0v+(-0v)=0\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;0v = 0v+0v \\\\\n\\Rightarrow\\ \u0026amp; 0v-0v = 0v+0v-0v \\\\\n\\Rightarrow\\ \u0026amp; 0 = 0v\n\\end{align}\u003c/p\u003e\n\u003cp\u003eas desired. Now, we will start from this condition and work out way backwards.\u003c/p\u003e\n\u003cp\u003eNote that the statement for additive inverse condition is that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\forall v \\in V, \\exists -v: v+(-v) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet us begin with the expression that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0=0v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0=(1-1)v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThen, we have by \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = v + (-1)v\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e is defined on a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e. Therefore, we have \\(-1v\\) to construct such an additive inverse \\(\\forall v \\in V\\). \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"weird-vector-space\"\u003eWeird Vector Space\u003c/h3\u003e\n\u003cp\u003eAll operations are defined as given.\u003c/p\u003e\n\u003cp\u003eTake scalars \\(t_1, t_2 \\in \\mathbb{R}\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(t_1-t_2)\\infty = \\infty\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYet, if we follow the rules of distribution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(t_1 -t_2)\\infty = \\infty -\\infty =0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, distribution doesn\u0026rsquo;t hold on this new structure. It is not a vector space. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_plane_and_1_b/","tags":null,"title":"NUS-MATH530 Plane and 1.B"},{"categories":null,"contents":"Prove but \\(T\\) is diagonalizable if and only if the matrix of \\(T\\) is similar to a diagonal matrix.\nTry 2.\nGiven similarity:\nSo we have that:\n\\begin{equation} D = S^{-1} A S \\end{equation}\nwhere, \\(D\\) is diagonal. We apply \\(S\\) to both sides to yield:\n\\begin{equation} SD = AS \\end{equation}\nNow, note that \\(S\\) is invertible. This means that its column s are linearly independent (as it is an operator, which means it is injective, and hence has a zero null space; that indicates that the dimension of its range is that of the whole space: indicating its columns vectors are spanning; there is \\(dim\\ V\\) such columns, so it is a basis and hence linearly independent).\nLet \\(S = [v_1 | \\dots | v_{n}]\\); now, \\(SD = [\\lambda_{1} v_1 | \\dots | \\lambda_{n} v_{n}]\\).\nBy that same definition above course, \\(A[v_1 | \\dots | v_{n}] = [\\lambda_{1} v_1 | \\dots | \\lambda_{n} v_{n}]\\).\nFinally, then, by definition, \\(v_1 \\dots v_{n}\\) are eigenvectors of \\(A\\). Note again that, per the above, this is \\(n\\) linearly independent eigenvectors in a space of \\(\\dim n\\) \u0026mdash; this makes them a basis of \\(V\\). Having made a basis of eigenvectors of \\(A\\), it is diagonalizable.\nGiven diagonalizability:\nConstruct \\(S= [v_1 | \\dots | v_{n}]\\), a basis of eigenvectors of \\(A\\) which is diagonalizable. Now, \\(AS\\) would send each of the vectors to their corresponding scales, meaning: \\(AS = [\\lambda_{1} v_{1} | \\dots | \\lambda_{n} v_{n}]\\).\nLastly, applying \\(S^{-1}\\) again would send each vector to each of the standard basis encoded in the original space given homogeneity of the \\(\\lambda\\); leaving the vector of \\(\\lambda_{j}\\) scaled by the identity: creating a diagonal \\(D\\) matrix. \\(\\blacksquare\\)\n","html":"\u003cp\u003eProve but \\(T\\) is \u003ca href=\"/posts/kbhdiagonal_matrix/#properties-of-diagonal-matrices\"\u003ediagonalizable\u003c/a\u003e if and only if the matrix of \\(T\\) is similar to a diagonal matrix.\u003c/p\u003e\n\u003cp\u003eTry 2.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eGiven similarity:\u003c/p\u003e\n\u003cp\u003eSo we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nD = S^{-1} A S\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(D\\) is diagonal. We apply \\(S\\) to both sides to yield:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nSD = AS\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, note that \\(S\\) is invertible. This means that its column s are linearly independent (as it is an operator, which means it is injective, and hence has a zero null space; that indicates that the dimension of its range is that of the whole space: indicating its columns vectors are spanning; there is \\(dim\\ V\\) such columns, so it is a basis and hence linearly independent).\u003c/p\u003e\n\u003cp\u003eLet \\(S = [v_1 | \\dots | v_{n}]\\); now, \\(SD = [\\lambda_{1} v_1 | \\dots | \\lambda_{n} v_{n}]\\).\u003c/p\u003e\n\u003cp\u003eBy that same definition above course, \\(A[v_1 | \\dots | v_{n}] = [\\lambda_{1} v_1 | \\dots | \\lambda_{n} v_{n}]\\).\u003c/p\u003e\n\u003cp\u003eFinally, then, by definition, \\(v_1 \\dots v_{n}\\) are eigenvectors of \\(A\\). Note again that, per the above, this is \\(n\\) linearly independent eigenvectors in a space of \\(\\dim n\\) \u0026mdash; this makes them a basis of \\(V\\). Having made a basis of eigenvectors of \\(A\\), it is diagonalizable.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eGiven diagonalizability:\u003c/p\u003e\n\u003cp\u003eConstruct \\(S= [v_1 | \\dots | v_{n}]\\), a basis of eigenvectors of \\(A\\) which is diagonalizable. Now, \\(AS\\) would send each of the vectors to their corresponding scales, meaning: \\(AS = [\\lambda_{1} v_{1} | \\dots | \\lambda_{n} v_{n}]\\).\u003c/p\u003e\n\u003cp\u003eLastly, applying \\(S^{-1}\\) again would send each vector to each of the standard basis encoded in the original space given homogeneity of the \\(\\lambda\\); leaving the vector of \\(\\lambda_{j}\\) scaled by the identity: creating a diagonal \\(D\\) matrix. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_similar_to_diagonal/","tags":null,"title":"NUS-MATH530 Similar to Diagonal"},{"categories":null,"contents":"Two Variables Let\u0026rsquo;s begin with the equations:\n\\begin{equation} \\begin{cases} 2x+y = 3 \\\\ x - y = 0 \\end{cases} \\end{equation}\nWe will first change this into a matrix equation:\n\\begin{equation} \\begin{pmatrix} 2 \u0026amp; 1 \\\\ 1 \u0026amp; -1 \\end{pmatrix} \\begin{pmatrix} x \\\\ y \\end{pmatrix} = \\begin{pmatrix} 3 \\\\ 0 \\end{pmatrix} \\end{equation}\nWe need to find, then, the inverse of:\n\\begin{equation} \\begin{pmatrix} 2 \u0026amp; 1 \\\\ 1 \u0026amp; -1 \\end{pmatrix} \\end{equation}\nNamely, we need the matrix such that:\n\\begin{equation} M \\begin{pmatrix} 2 \u0026amp; 1 \\\\ 1 \u0026amp; -1 \\end{pmatrix} = I \\end{equation}\nTo do this, we can use row operations on both sides such that the left side becomes the identity, we are essentially inverting the process of reversing a matrix.\n\\begin{equation} \\begin{pmatrix} 2 \u0026amp; 1 \\\\ 1 \u0026amp; -1 \\end{pmatrix} = \\begin{pmatrix} 1 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \\end{pmatrix} \\end{equation}\nLet\u0026rsquo;s begin:\n\\begin{align} \u0026amp; \\begin{pmatrix} 2 \u0026amp; 1 \\\\ 1 \u0026amp; -1 \\end{pmatrix} = \\begin{pmatrix} 1 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \\end{pmatrix} \\\\ \\Rightarrow\\ \u0026amp; \\begin{pmatrix} 0 \u0026amp; 3 \\\\ 1 \u0026amp; -1 \\end{pmatrix} = \\begin{pmatrix} 1 \u0026amp; -2 \\\\ 0 \u0026amp; 1 \\end{pmatrix} \\\\ \\Rightarrow\\ \u0026amp; \\begin{pmatrix} 0 \u0026amp; 1 \\\\ 1 \u0026amp; -1 \\end{pmatrix} = \\begin{pmatrix} \\frac{1}{3} \u0026amp; -\\frac{2}{3} \\\\ 0 \u0026amp; 1 \\end{pmatrix} \\\\ \\Rightarrow\\ \u0026amp; \\begin{pmatrix} 0 \u0026amp; 1 \\\\ 1 \u0026amp; 0 \\end{pmatrix} = \\begin{pmatrix} \\frac{1}{3} \u0026amp; -\\frac{2}{3} \\\\ \\frac{1}{3} \u0026amp; \\frac{1}{3} \\end{pmatrix} \\\\ \\Rightarrow\\ \u0026amp; \\begin{pmatrix} 1 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \\end{pmatrix} = \\begin{pmatrix} \\frac{1}{3} \u0026amp; \\frac{1}{3} \\\\ \\frac{1}{3} \u0026amp; -\\frac{2}{3} \\end{pmatrix} \\end{align}\nFinally, then, we will applying this matrix to the input:\n\\begin{align} \\begin{pmatrix} \\frac{1}{3} \u0026amp; \\frac{1}{3} \\\\ \\frac{1}{3} \u0026amp; -\\frac{2}{3} \\end{pmatrix} \\begin{pmatrix} 3 \\\\ 0 \\end{pmatrix} = \\begin{pmatrix} 1 \\\\ 1 \\end{pmatrix} \\end{align}\nThree Variables We do this again, but now with a much larger matrix. Namely:\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 2 \u0026amp; 1 \\\\ 2 \u0026amp; 0 \u0026amp; -1 \\\\ 1 \u0026amp; -1 \u0026amp; 0 \\end{pmatrix} \\end{equation}\nI spend a good two hours (yes) trying to invert this. At this point, I know its invertable but I keep making mistakes. However, a solution exists and it is of shape:\n\\begin{equation} \\begin{pmatrix} \\frac{1}{5} \u0026amp; \\frac{1}{5} \u0026amp; \\frac{2}{5} \\\\ \\frac{1}{5} \u0026amp; \\frac{1}{5} \u0026amp; -\\frac{3}{5} \\\\ \\frac{2}{5} \u0026amp; -\\frac{3}{5} \u0026amp; \\frac{4}{5} \\end{pmatrix} \\end{equation}\nAnd, applying the output, we have that:\n\\begin{equation} \\begin{pmatrix} 1 \\\\ -1 \\\\ 1 \\end{pmatrix} \\end{equation}\nSo complicated of an inverse, for such a simple result\u0026hellip;\nMatrix Multiplication Matrix multiplication is not commutative. While you can, for instance, multiply a \\(2\\times 3\\) by a \\(3\\times 3\\), we cannot do it the other way.\nFor an equation with three variables, you need three equations at a minimum to have at least one solution; you can get at most the number of equations number of solutions with fewer equations. You probably will have no solutions if you have more equations\u0026mdash;the result is likely to be overdetermined; of course, two equations may be the same relation then in which case one is effectively nulled.\n","html":"\u003ch2 id=\"two-variables\"\u003eTwo Variables\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s begin with the equations:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n2x+y = 3 \\\\\nx - y = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will first change this into a matrix equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n2 \u0026amp; 1 \\\\\n1 \u0026amp; -1\n\\end{pmatrix} \\begin{pmatrix}\nx \\\\ y\n\\end{pmatrix} = \\begin{pmatrix}\n3 \\\\ 0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe need to find, then, the inverse of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n2 \u0026amp; 1 \\\\ 1 \u0026amp; -1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNamely, we need the matrix such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nM \\begin{pmatrix}\n2 \u0026amp; 1 \\\\ 1 \u0026amp; -1\n\\end{pmatrix} = I\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo do this, we can use row operations on both sides such that the left side becomes the identity, we are essentially inverting the process of reversing a matrix.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n2 \u0026amp; 1 \\\\ 1 \u0026amp; -1\n\\end{pmatrix} = \\begin{pmatrix}\n1 \u0026amp; 0 \\\\ 0 \u0026amp; 1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s begin:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; \\begin{pmatrix}\n2 \u0026amp; 1 \\\\ 1 \u0026amp; -1\n\\end{pmatrix} = \\begin{pmatrix}\n1 \u0026amp; 0 \\\\ 0 \u0026amp; 1\n\\end{pmatrix} \\\\\n\\Rightarrow\\ \u0026amp; \\begin{pmatrix}\n0 \u0026amp; 3 \\\\ 1 \u0026amp; -1\n\\end{pmatrix} = \\begin{pmatrix}\n1 \u0026amp; -2 \\\\ 0 \u0026amp; 1\n\\end{pmatrix} \\\\\n\\Rightarrow\\ \u0026amp; \\begin{pmatrix}\n0 \u0026amp; 1 \\\\ 1 \u0026amp; -1\n\\end{pmatrix} = \\begin{pmatrix}\n\\frac{1}{3} \u0026amp; -\\frac{2}{3} \\\\ 0 \u0026amp; 1\n\\end{pmatrix} \\\\\n\\Rightarrow\\ \u0026amp; \\begin{pmatrix}\n0 \u0026amp; 1 \\\\ 1 \u0026amp; 0\n\\end{pmatrix} = \\begin{pmatrix}\n\\frac{1}{3} \u0026amp; -\\frac{2}{3} \\\\ \\frac{1}{3} \u0026amp; \\frac{1}{3}\n\\end{pmatrix} \\\\\n\\Rightarrow\\ \u0026amp; \\begin{pmatrix}\n1 \u0026amp; 0 \\\\ 0 \u0026amp; 1\n\\end{pmatrix} = \\begin{pmatrix}\n\\frac{1}{3} \u0026amp; \\frac{1}{3} \\\\ \\frac{1}{3} \u0026amp; -\\frac{2}{3}\n\\end{pmatrix}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eFinally, then, we will applying this matrix to the input:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\begin{pmatrix}\n\\frac{1}{3} \u0026amp; \\frac{1}{3} \\\\ \\frac{1}{3} \u0026amp; -\\frac{2}{3}\n\\end{pmatrix} \\begin{pmatrix}\n3 \\\\ 0\n\\end{pmatrix} = \\begin{pmatrix}\n1 \\\\ 1\n\\end{pmatrix}\n\\end{align}\u003c/p\u003e\n\u003ch2 id=\"three-variables\"\u003eThree Variables\u003c/h2\u003e\n\u003cp\u003eWe do this again, but now with a much larger matrix. Namely:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 2 \u0026amp; 1 \\\\\n2 \u0026amp; 0 \u0026amp; -1 \\\\\n1 \u0026amp; -1 \u0026amp; 0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eI spend a good two hours (yes) trying to invert this. At this point, I know its invertable but I keep making mistakes. However, a solution exists and it is of shape:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n\\frac{1}{5} \u0026amp; \\frac{1}{5} \u0026amp; \\frac{2}{5} \\\\\n\\frac{1}{5} \u0026amp; \\frac{1}{5} \u0026amp; -\\frac{3}{5} \\\\\n\\frac{2}{5} \u0026amp; -\\frac{3}{5} \u0026amp; \\frac{4}{5}\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd, applying the output, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \\\\\n-1 \\\\\n1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo complicated of an inverse, for such a simple result\u0026hellip;\u003c/p\u003e\n\u003ch2 id=\"matrix-multiplication\"\u003eMatrix Multiplication\u003c/h2\u003e\n\u003cp\u003eMatrix multiplication is not commutative. While you can, for instance, multiply a \\(2\\times 3\\) by a \\(3\\times 3\\), we cannot do it the other way.\u003c/p\u003e\n\u003cp\u003eFor an equation with three variables, you need three equations at a minimum to have at least one solution; you can get at most the number of equations number of solutions with fewer equations. You probably will have no solutions if you have more equations\u0026mdash;the result is likely to be overdetermined; of course, two equations may be the same relation then in which case one is effectively nulled.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_solving_systems/","tags":null,"title":"NUS-MATH530 Solving Systems"},{"categories":null,"contents":"Suppose \\(\\mathbb{F} = \\mathbb{R}\\), and \\(V \\neq \\{0\\}\\). Replace the positivity condition with the condition that \\(\\langle v,v \\rangle \u0026gt; 0\\) for some \\(v \\in V\\). Show that this change in definition does not change the set of functions from \\(V \\times V\\) to \\(\\mathbb{R}\\) that are inner products on \\(V\\).\nWe hope to show that \\(\\langle v,v \\rangle \u0026gt;0\\) for some \\(v \\in V\\) implies that \\(\\langle v,v \\rangle \\geq 0\\) for all \\(v \\in V\\) in real vector spaces.\nTake some \\(v_0 \\in V\\) such that \\(\\langle v_0,v_0 \\rangle \u0026gt;0\\). Now, WLOG let \\(v \\in V\\) and \\(v = v_0+w\\). So:\n\\begin{align} 0 \u0026amp;\u0026lt; \\langle v_0,v_0 \\rangle \\\\ \u0026amp;= \\langle v-w, v-w \\rangle \\\\ \u0026amp;= \\langle v,v \\rangle + \\langle w,w \\rangle - 2\\langle v,w \\rangle \\end{align}\nNow, the last step is possible because symmetry becomes conjugate symmetry in reals.\nWe now have that:\n\\begin{equation} 2 \\langle v,w \\rangle - \\langle w,w \\rangle \u0026lt; \\langle v,v \\rangle \\end{equation}\n","html":"\u003cp\u003eSuppose \\(\\mathbb{F} = \\mathbb{R}\\), and \\(V \\neq \\{0\\}\\). Replace the positivity condition with the condition that \\(\\langle v,v \\rangle \u0026gt; 0\\) for some \\(v \\in V\\). Show that this change in definition does not change the set of functions from \\(V \\times V\\) to \\(\\mathbb{R}\\) that are inner products on \\(V\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eWe hope to show that \\(\\langle v,v \\rangle \u0026gt;0\\) for some \\(v \\in V\\) implies that \\(\\langle v,v \\rangle \\geq 0\\) for all \\(v \\in V\\) in real vector spaces.\u003c/p\u003e\n\u003cp\u003eTake some \\(v_0 \\in V\\) such that \\(\\langle v_0,v_0 \\rangle \u0026gt;0\\). Now, WLOG let \\(v \\in V\\) and \\(v = v_0+w\\). So:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n0 \u0026amp;\u0026lt; \\langle v_0,v_0 \\rangle \\\\\n\u0026amp;= \\langle v-w, v-w \\rangle \\\\\n\u0026amp;= \\langle v,v \\rangle + \\langle w,w \\rangle - 2\\langle v,w \\rangle\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, the last step is possible because symmetry becomes conjugate symmetry in reals.\u003c/p\u003e\n\u003cp\u003eWe now have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n2 \\langle v,w \\rangle - \\langle w,w \\rangle \u0026lt; \\langle v,v \\rangle\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_some_6_a_problems/","tags":null,"title":"NUS-MATH530 Some 6.A Problems"},{"categories":null,"contents":"Proof: identity of a group is unique Assume for contradiction that there exists two identities \\(e_1\\) and \\(e_2\\) which are identities of the group \\(A\\). Take also an \\(a \\in A\\).\nGiven both \\(e_1\\) and \\(e_2\\) are identities, we have that:\n\\begin{equation} a * e_1 = a \\end{equation}\nas well as\n\\begin{equation} a * e_2 = a \\end{equation}\nTherefore, we have by the transitive property that:\n\\begin{equation} a * e_1 = a*e_2 \\end{equation}\nBecause we are in a group, there exists a \\(1/a\\) the inverse of \\(a\\). Applying this inverse to the expression, we have that:\n\\begin{equation} 1/a*a * e_1 = 1/a*a*e_2 \\end{equation}\nTherefore, that:\n\\begin{equation} e_1 = e_2\\ \\blacksquare \\end{equation}\nTherefore, there cannot be two unique identities in a group.\nProof: inverse of an element in a group is unique Take group \\(A\\) and element \\(a\\in A\\), assume for contradiction that there exists two inverses of \\(a\\) named here \\(a\u0026rsquo;_1\\) and \\(a\u0026rsquo;_2\\). Given they are both inverses for \\(a\\), we have that:\n\\begin{equation} a * a\u0026rsquo;_1 = 1 \\end{equation}\nas well as\n\\begin{equation} a * a\u0026rsquo;_2 = 1 \\end{equation}\nTherefore, we have by the transitive property that:\n\\begin{equation} a * a\u0026rsquo;_1 = a*a\u0026rsquo;_2 \\end{equation}\nBecause we are in a group, there exists a \\(1/a\\) the inverse of \\(a\\). Applying this inverse to the expression, we have that:\n\\begin{equation} 1/a*a * a\u0026rsquo;_1 = 1/a*a*a\u0026rsquo;_2 \\end{equation}\nTherefore, that:\n\\begin{equation} a\u0026rsquo;_1 = a\u0026rsquo;_2\\ \\blacksquare \\end{equation}\nTherefore, there cannot be two unique inverses for an element in group.\nProof: additive identity in field cannot have multiplicative inverse For some field \\(F\\) take its additive identity \\(0 \\in F\\). Assume for the sake of contradiction there exists a multiplicative inverse for \\(0\\) named \\(0\u0026rsquo; \\in F\\).\nLet\u0026rsquo;s take some \\(a \\in F\\). By definition of the additive identity, we have:\n\\begin{equation} 0 + a = a \\end{equation}\nWe will apply \\(0\u0026rsquo;\\) to both sides, we having that:\n\\begin{equation} 0\u0026rsquo;(0+a) = 0\u0026rsquo;a \\end{equation}\nDistributing \\(0\u0026rsquo;\\) to both sides, we have:\n\\begin{equation} 1 + 0\u0026rsquo;a = 0\u0026rsquo;a \\end{equation}\nGiven \\(a,0\u0026rsquo; \\in F\\), and multiplication is closed in \\(F\\) being a field, \\(0\u0026rsquo;a \\in F\\); applying \\(-0\u0026rsquo;a \\in F\\) the additive inverse of the result of multiplying together to both sides, we have that:\n\\begin{equation} 1 + 0\u0026rsquo;a - 0\u0026rsquo;a = 0\u0026rsquo;a - 0\u0026rsquo;a \\end{equation}\nAnd therefore:\n\\begin{equation} 1 = 0 \\end{equation}\nwhich is absurd, reaching the desired contradiction. \\(\\blacksquare\\)\nSystem \\begin{equation} \\begin{cases} x + 2y + z = 0 \\\\ 2x + 0y - z = 1 \\\\ x - y + z = 2 \\\\ \\end{cases} \\end{equation}\nWe will subtract the top and bottom expressions to have that:\n\\begin{equation} 3y = -2 \\end{equation}\nAnd to get:\n\\begin{equation} y = \\frac{-2}{3} \\end{equation}\nManipulating the second expression, we have that:\n\\begin{equation} 2x -1 = z \\end{equation}\nSubstituting this expression and \\(y\\) into the third expression, we have:\n\\begin{equation} x + \\frac{2}{3} + 2x -1 = 2 \\end{equation}\nperforming algebraic manipulations:\n\\begin{align} \u0026amp;3x + \\frac{2}{3} = 3 \\\\ \\Rightarrow\\ \u0026amp;3x = \\frac{7}{3} \\\\ \\Rightarrow\\ \u0026amp;x = \\frac{7}{9} \\end{align}\nAnd finally:\n\\begin{equation} \\frac{14}{9}-1 = z = \\frac{5}{9} \\end{equation}\nMultiply \\begin{equation} \\begin{pmatrix} 1 \u0026amp; 2 \u0026amp; 1 \\\\ 2 \u0026amp; 0 \u0026amp; -1 \\\\ 1 \u0026amp; -1 \u0026amp; 0\\end{pmatrix} \\begin{pmatrix} x \\\\ y\\\\ z \\end{pmatrix} = \\begin{pmatrix} x+2y+z \\\\ 2x-z \\\\ x-y \\end{pmatrix} \\end{equation}\nThe inner dimensions (column vs. row) of the matricies have to be the same for them to be multiplied; matrix multiplication is not commutative.\nProof: 2x2 Matrices with Real Entries form a Group Under Addition Closure \\begin{equation} \\begin{pmatrix} a \u0026amp; b \\\\ c \u0026amp;d \\end{pmatrix} + \\begin{pmatrix} e \u0026amp; f \\\\ g \u0026amp; h \\end{pmatrix} = \\begin{pmatrix} a+e \u0026amp; b+f \\\\ c+g \u0026amp; d+h \\end{pmatrix} \\end{equation}\nIdentity \\begin{equation} \\begin{pmatrix} a \u0026amp; b \\\\ c \u0026amp;d \\end{pmatrix} + \\begin{pmatrix} 0 \u0026amp; 0 \\\\ 0 \u0026amp; 0 \\end{pmatrix} = \\begin{pmatrix} a \u0026amp; b \\\\ c \u0026amp; d \\end{pmatrix} \\end{equation}\nInverse \\begin{equation} \\begin{pmatrix} a \u0026amp; b \\\\ c \u0026amp;d \\end{pmatrix} + \\begin{pmatrix} -a \u0026amp; -b \\\\ -c \u0026amp; -d \\end{pmatrix} = \\begin{pmatrix} 0 \u0026amp; 0 \\\\ 0 \u0026amp; 0 \\end{pmatrix} \\end{equation}\nAssociative \\begin{equation} \\left ( \\begin{pmatrix} x_1 \u0026amp; x_2 \\\\ x_3 \u0026amp; x_4 \\end{pmatrix} + \\begin{pmatrix} y_1 \u0026amp; y_2 \\\\ y_3 \u0026amp; y_4 \\end{pmatrix} \\right) + \\begin{pmatrix} z_1 \u0026amp; z_2 \\\\ z_3 \u0026amp; z_4 \\end{pmatrix} = \\begin{pmatrix} (x_1+y_1)+z_1 \u0026amp; (x_2+y_2)+z_2 \\\\ (x_3+y_3)+z_3 \u0026amp; (x_4+y_4)+z_4 \\end{pmatrix} \\end{equation}\nwhich is equal, by associativity in \\(\\mathbb{F}\\), as:\n\\begin{equation} \\begin{pmatrix} x_1+(y_1+z_1) \u0026amp; x_2+(y_2+z_2) \\\\ x_3+(y_3+z_3) \u0026amp; x_4+(y_4+z_4) \\end{pmatrix} \\end{equation}\nAnd finally, this is equal to:\n\\begin{equation} \\begin{pmatrix} x_1 \u0026amp; x_2 \\\\ x_3 \u0026amp; x_4 \\end{pmatrix} + \\left (\\begin{pmatrix} y_1 \u0026amp; y_2 \\\\ y_3 \u0026amp; y_4 \\end{pmatrix} + \\begin{pmatrix} z_1 \u0026amp; z_2 \\\\ z_3 \u0026amp; z_4 \\end{pmatrix} \\right) \\end{equation}\nWe have therefore shown that 2x2 matricies form a group under addition.\nProof: 2x2 Matrices with Real Entries does not from a Group Under Multiplication Inverse The matrix\n\\begin{equation} \\begin{pmatrix} 0 \u0026amp; 0 \\\\ 0 \u0026amp;1 \\end{pmatrix} \\end{equation}\nis not invertable. In that, one cannot apply a matrix to this one to result in the multiplicative identity \\(I_2\\).\n","html":"\u003ch2 id=\"proof-identity-of-a-group-is-unique\"\u003eProof: identity of a group is unique\u003c/h2\u003e\n\u003cp\u003eAssume for contradiction that there exists two identities \\(e_1\\) and \\(e_2\\) which are identities of the group \\(A\\). Take also an \\(a \\in A\\).\u003c/p\u003e\n\u003cp\u003eGiven both \\(e_1\\) and \\(e_2\\) are identities, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na * e_1 = a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas well as\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na * e_2 = a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, we have by the transitive property that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na * e_1 = a*e_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBecause we are in a group, there exists a \\(1/a\\) the inverse of \\(a\\). Applying this inverse to the expression, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n1/a*a * e_1 = 1/a*a*e_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne_1 = e_2\\ \\blacksquare\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, there cannot be two unique identities in a group.\u003c/p\u003e\n\u003ch2 id=\"proof-inverse-of-an-element-in-a-group-is-unique\"\u003eProof: inverse of an element in a group is unique\u003c/h2\u003e\n\u003cp\u003eTake group \\(A\\) and element \\(a\\in A\\), assume for contradiction that there exists two inverses of \\(a\\) named here \\(a\u0026rsquo;_1\\) and \\(a\u0026rsquo;_2\\). Given they are both inverses for \\(a\\), we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na * a\u0026rsquo;_1 = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas well as\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na * a\u0026rsquo;_2 = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, we have by the transitive property that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na * a\u0026rsquo;_1 = a*a\u0026rsquo;_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBecause we are in a group, there exists a \\(1/a\\) the inverse of \\(a\\). Applying this inverse to the expression, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n1/a*a * a\u0026rsquo;_1 = 1/a*a*a\u0026rsquo;_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na\u0026rsquo;_1 = a\u0026rsquo;_2\\ \\blacksquare\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, there cannot be two unique inverses for an element in group.\u003c/p\u003e\n\u003ch2 id=\"proof-additive-identity-in-field-cannot-have-multiplicative-inverse\"\u003eProof: additive identity in field cannot have multiplicative inverse\u003c/h2\u003e\n\u003cp\u003eFor some field \\(F\\) take its additive identity \\(0 \\in F\\). Assume for the sake of contradiction there exists a multiplicative inverse for \\(0\\) named \\(0\u0026rsquo; \\in F\\).\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s take some \\(a \\in F\\). By definition of the additive identity, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 + a = a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will apply \\(0\u0026rsquo;\\) to both sides, we having that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0\u0026rsquo;(0+a) = 0\u0026rsquo;a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eDistributing \\(0\u0026rsquo;\\) to both sides, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n1 + 0\u0026rsquo;a = 0\u0026rsquo;a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven \\(a,0\u0026rsquo; \\in F\\), and multiplication is closed in \\(F\\) being a field, \\(0\u0026rsquo;a \\in F\\); applying \\(-0\u0026rsquo;a \\in F\\) the additive inverse of the result of multiplying together to both sides, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n1 + 0\u0026rsquo;a - 0\u0026rsquo;a = 0\u0026rsquo;a - 0\u0026rsquo;a\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n1 = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is absurd, reaching the desired contradiction. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch2 id=\"system\"\u003eSystem\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx + 2y + z = 0 \\\\\n2x + 0y - z = 1 \\\\\nx - y + z = 2 \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will subtract the top and bottom expressions to have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n3y = -2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd to get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = \\frac{-2}{3}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eManipulating the second expression, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n2x -1 = z\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSubstituting this expression and \\(y\\) into the third expression, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx + \\frac{2}{3} + 2x -1 = 2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eperforming algebraic manipulations:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;3x + \\frac{2}{3} = 3 \\\\\n\\Rightarrow\\ \u0026amp;3x = \\frac{7}{3} \\\\\n\\Rightarrow\\ \u0026amp;x = \\frac{7}{9}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAnd finally:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{14}{9}-1 = z = \\frac{5}{9}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"multiply\"\u003eMultiply\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 2 \u0026amp; 1 \\\\\n2 \u0026amp; 0 \u0026amp; -1 \\\\\n1 \u0026amp; -1 \u0026amp; 0\\end{pmatrix} \\begin{pmatrix} x \\\\\ny\\\\\nz \\end{pmatrix} = \\begin{pmatrix}\nx+2y+z \\\\\n2x-z \\\\\nx-y\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe inner dimensions (column vs. row) of the matricies have to be the same for them to be multiplied; matrix multiplication is not commutative.\u003c/p\u003e\n\u003ch2 id=\"proof-2x2-matrices-with-real-entries-form-a-group-under-addition\"\u003eProof: 2x2 Matrices with Real Entries form a Group Under Addition\u003c/h2\u003e\n\u003ch3 id=\"closure\"\u003eClosure\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\na \u0026amp; b \\\\\nc \u0026amp;d\n\\end{pmatrix} + \\begin{pmatrix}\ne \u0026amp; f \\\\\ng \u0026amp; h\n\\end{pmatrix} = \\begin{pmatrix}\na+e \u0026amp; b+f \\\\\nc+g \u0026amp; d+h\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"identity\"\u003eIdentity\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\na \u0026amp; b \\\\\nc \u0026amp;d\n\\end{pmatrix} + \\begin{pmatrix}\n0 \u0026amp; 0 \\\\\n0 \u0026amp; 0\n\\end{pmatrix} = \\begin{pmatrix}\na \u0026amp; b \\\\\nc \u0026amp; d\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"inverse\"\u003eInverse\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\na \u0026amp; b \\\\\nc \u0026amp;d\n\\end{pmatrix} + \\begin{pmatrix}\n-a \u0026amp; -b \\\\\n-c \u0026amp; -d\n\\end{pmatrix} = \\begin{pmatrix}\n0 \u0026amp; 0 \\\\\n0 \u0026amp; 0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"associative\"\u003eAssociative\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\left (\n\\begin{pmatrix}\nx_1 \u0026amp; x_2 \\\\\nx_3 \u0026amp; x_4\n\\end{pmatrix} + \\begin{pmatrix}\ny_1 \u0026amp; y_2 \\\\\ny_3 \u0026amp; y_4\n\\end{pmatrix} \\right) + \\begin{pmatrix}\nz_1 \u0026amp; z_2 \\\\\nz_3 \u0026amp; z_4\n\\end{pmatrix} = \\begin{pmatrix}\n(x_1+y_1)+z_1 \u0026amp; (x_2+y_2)+z_2 \\\\\n(x_3+y_3)+z_3 \u0026amp; (x_4+y_4)+z_4\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is equal, by associativity in \\(\\mathbb{F}\\), as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\nx_1+(y_1+z_1) \u0026amp; x_2+(y_2+z_2) \\\\\nx_3+(y_3+z_3) \u0026amp; x_4+(y_4+z_4)\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd finally, this is equal to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\nx_1 \u0026amp; x_2 \\\\\nx_3 \u0026amp; x_4\n\\end{pmatrix} + \\left (\\begin{pmatrix}\ny_1 \u0026amp; y_2 \\\\\ny_3 \u0026amp; y_4\n\\end{pmatrix} + \\begin{pmatrix}\nz_1 \u0026amp; z_2 \\\\\nz_3 \u0026amp; z_4\n\\end{pmatrix} \\right)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have therefore shown that 2x2 matricies form a group under addition.\u003c/p\u003e\n\u003ch2 id=\"proof-2x2-matrices-with-real-entries-does-not-from-a-group-under-multiplication\"\u003eProof: 2x2 Matrices with Real Entries does \u003cem\u003enot\u003c/em\u003e from a Group Under Multiplication\u003c/h2\u003e\n\u003ch3 id=\"inverse\"\u003eInverse\u003c/h3\u003e\n\u003cp\u003eThe matrix\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n0 \u0026amp; 0 \\\\\n0 \u0026amp;1\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis not invertable. In that, one cannot apply a matrix to this one to result in the multiplicative identity \\(I_2\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math530_some_matrix_manipulation/","tags":null,"title":"NUS-MATH530 Some Matrix Manipulation"},{"categories":null,"contents":"We declare known battery voltage \\(E(t)\\).\nHere are the \\(y\\) values.\n\\begin{equation} \\begin{cases} \\dv{x_1}{t} = y_{4}\\\\ \\dv{x_2}{t} = y_{3}\\\\ \\dv{x_3}{t} = y_{1}\\\\ \\dv{x_4}{t} = y_{2}\\\\ \\end{cases} \\end{equation}\nAnd here are some of the \\(x\\) values.\n\\begin{equation} \\begin{cases} \\dv{x_4}{t}=-\\frac{2}{RC}x_2-\\frac{1}{RC}x_{3}-\\frac{2E(t)}{R} \\\\ \\dv{y_1}{t}=-\\frac{1}{LC}x_2-\\frac{E(t)}{C} \\\\ \\dv{y_4}{t} = -\\frac{R}{L}y_2-\\frac{2E(t)}{L} \\end{cases} \\end{equation}\nRight off the bat, we can see that we can make one substitution. That, given:\n\\begin{equation} \\begin{cases} \\dv{x_4}{t}=-\\frac{2}{RC}x_2-\\frac{1}{RC}x_{3}-\\frac{2E(t)}{R} \\\\ \\dv{x_4}{t} = y_{2} \\end{cases} \\end{equation}\nwe have that:\n\\begin{equation} y_2 = -\\frac{2}{RC}x_2-\\frac{1}{RC}x_{3}-\\frac{2E(t)}{R} \\end{equation}\nThis renders the last expression:\n\\begin{align} \\dv{y_4}{t} \u0026amp;= -\\frac{R}{L}y_2-\\frac{2E(t)}{L} \\\\ \u0026amp;= -\\frac{R}{L}\\qty(-\\frac{2}{RC}x_2-\\frac{1}{RC}x_{3}-\\frac{2E(t)}{R})-\\frac{2E(t)}{L} \\\\ \u0026amp;= \\qty(\\frac{2}{LC}x_2+\\frac{1}{LC}x_{3}+\\frac{2E(t)}{L})-\\frac{2E(t)}{L} \\\\ \u0026amp;= \\frac{2}{LC}x_2+\\frac{1}{LC}x_{3} \\end{align}\nSo now, we have the final unused expressions:\n\\begin{equation} \\begin{cases} \\dv{x_1}{t} = y_4 \\\\ \\dv{x_2}{t} = y_3 \\\\ \\dv{x_{3}}{t} = y_1 \\\\ \\dv{y_1}{t} = -\\frac{1}{LC}x_2-\\frac{E(t)}{C} \\\\ \\dv{y_4}{t} = \\frac{2}{LC}x_2+\\frac{1}{LC}x_3 \\end{cases} \\end{equation}\n","html":"\u003cp\u003eWe declare known battery voltage \\(E(t)\\).\u003c/p\u003e\n\u003cp\u003eHere are the \\(y\\) values.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x_1}{t} = y_{4}\\\\\n\\dv{x_2}{t} = y_{3}\\\\\n\\dv{x_3}{t} = y_{1}\\\\\n\\dv{x_4}{t} = y_{2}\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd here are some of the \\(x\\) values.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x_4}{t}=-\\frac{2}{RC}x_2-\\frac{1}{RC}x_{3}-\\frac{2E(t)}{R} \\\\\n\\dv{y_1}{t}=-\\frac{1}{LC}x_2-\\frac{E(t)}{C} \\\\\n\\dv{y_4}{t} = -\\frac{R}{L}y_2-\\frac{2E(t)}{L}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRight off the bat, we can see that we can make one substitution. That, given:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x_4}{t}=-\\frac{2}{RC}x_2-\\frac{1}{RC}x_{3}-\\frac{2E(t)}{R} \\\\\n\\dv{x_4}{t} = y_{2}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny_2 = -\\frac{2}{RC}x_2-\\frac{1}{RC}x_{3}-\\frac{2E(t)}{R}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis renders the last expression:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dv{y_4}{t} \u0026amp;= -\\frac{R}{L}y_2-\\frac{2E(t)}{L} \\\\\n\u0026amp;= -\\frac{R}{L}\\qty(-\\frac{2}{RC}x_2-\\frac{1}{RC}x_{3}-\\frac{2E(t)}{R})-\\frac{2E(t)}{L} \\\\\n\u0026amp;= \\qty(\\frac{2}{LC}x_2+\\frac{1}{LC}x_{3}+\\frac{2E(t)}{L})-\\frac{2E(t)}{L} \\\\\n\u0026amp;= \\frac{2}{LC}x_2+\\frac{1}{LC}x_{3}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eSo now, we have the final unused expressions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x_1}{t} = y_4 \\\\\n\\dv{x_2}{t} = y_3 \\\\\n\\dv{x_{3}}{t} = y_1 \\\\\n\\dv{y_1}{t} = -\\frac{1}{LC}x_2-\\frac{E(t)}{C} \\\\\n\\dv{y_4}{t} = \\frac{2}{LC}x_2+\\frac{1}{LC}x_3\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math570_circuts/","tags":null,"title":"NUS-MATH570 Circuits"},{"categories":null,"contents":"We need to solve this system:\n\\begin{equation} \\begin{cases} \\dv{I}{t} = -0.73U + 0.0438 + 0.4 \\dv{M}{t} \\\\ \\dv{U}{t} = 0.4I - 0.012 \\\\ \\dv{G}{t} = \\dv{M}{t}- I \\\\ M(t)=0.02\\sin (1.15t + \\phi) \\end{cases} \\end{equation}\nTo be able to work on this, let us create some functions:\n# variable t, dm = var(\u0026#34;t dm\u0026#34;) # functions I = function(\u0026#34;_I\u0026#34;)(t) # _I because i is imaginary U = function(\u0026#34;U\u0026#34;)(t) G = function(\u0026#34;G\u0026#34;)(t) # parameter phi = var(\u0026#34;phi\u0026#34;, latex_name=\u0026#34;\\phi\u0026#34;) # our equations eqns = [ diff(I,t) == -0.73*U + 0.0438 + 0.4*dm, diff(U,t) == 0.4*I - 0.012, diff(G,t) == dm - I ] eqns desolve(eqns, U, ivar=t, algorithm=\u0026#34;fricas\u0026#34;).expand() Great, now, we will run the laplace transform upon these equations:\n# laplace variable s = var(\u0026#34;s\u0026#34;) # laplaced functions Fi = var(\u0026#34;Fi\u0026#34;) Fu = var(\u0026#34;Fu\u0026#34;) Fg = var(\u0026#34;Fg\u0026#34;) Fm = var(\u0026#34;Fm\u0026#34;) # constants I0, U0, G0, M0 = var(\u0026#34;I0 U0 G0 M0\u0026#34;) # substitution dictionary subs = { laplace(I,t,s): Fi, laplace(U,t,s): Fu, laplace(G,t,s): Fg, laplace(M,t,s): Fm, I(0): I0, G(0): G0, U(0): U0, M(0): M0, } # laplace eqns laplace_eqns = [i.laplace(t, s).subs(subs) for i in eqns] laplace_eqns \u0026lt;ipython-input-236-2a2ddfe91635\u0026gt;:20: DeprecationWarning: Substitution using function-call syntax and unnamed arguments is deprecated and will be removed from a future release of Sage; you can use named arguments instead, like EXPR(x=..., y=...) See http://trac.sagemath.org/5930 for details. I(Integer(0)): I0, \u0026lt;ipython-input-236-2a2ddfe91635\u0026gt;:21: DeprecationWarning: Substitution using function-call syntax and unnamed arguments is deprecated and will be removed from a future release of Sage; you can use named arguments instead, like EXPR(x=..., y=...) See http://trac.sagemath.org/5930 for details. G(Integer(0)): G0, \u0026lt;ipython-input-236-2a2ddfe91635\u0026gt;:22: DeprecationWarning: Substitution using function-call syntax and unnamed arguments is deprecated and will be removed from a future release of Sage; you can use named arguments instead, like EXPR(x=..., y=...) See http://trac.sagemath.org/5930 for details. U(Integer(0)): U0, \u0026lt;ipython-input-236-2a2ddfe91635\u0026gt;:23: DeprecationWarning: Substitution using function-call syntax and unnamed arguments is deprecated and will be removed from a future release of Sage; you can use named arguments instead, like EXPR(x=..., y=...) See http://trac.sagemath.org/5930 for details. M(Integer(0)): M0, [Fi*s - I0 == 0.4*Fm*s - 0.73*Fu - 0.4*M0 + 0.0438/s, Fu*s - U0 == 0.4*Fi - 0.012/s, Fg*s - G0 == Fm*s - Fi - M0, Fm == (0.02*s*sin(phi) + 0.023*cos(phi))/(s^2 + 1.3225)] And then, let us solve the Laplace solutions:\n# substitute laplace_solutions = solve(laplace_eqns, Fi, Fu, Fg, Fm, solution_dict=True)[0] laplace_solutions {Fi: 1/100*(80000*(125*I0 - 50*M0 + sin(phi))*s^4 - 2000*(3650*U0 - 46*cos(phi) - 219)*s^3 + 200*(66125*I0 - 26450*M0 + 438)*s^2 - 193085*(50*U0 - 3)*s + 115851)/(100000*s^5 + 161450*s^3 + 38617*s), Fu: 1/50*(5000000*U0*s^4 + 4000*(500*I0 - 200*M0 + 4*sin(phi) - 15)*s^3 + 100*(66125*U0 + 184*cos(phi) + 876)*s^2 + 26450*(100*I0 - 40*M0 - 3)*s + 115851)/(100000*s^5 + 161450*s^3 + 38617*s), Fg: 1/100*(200000*(50*G0 - 50*M0 + sin(phi))*s^5 - 10000*(1000*I0 - 400*M0 - 23*cos(phi) + 8*sin(phi))*s^4 + 200*(80725*G0 - 80725*M0 + 36500*U0 - 460*cos(phi) + 292*sin(phi) - 2190)*s^3 - 40*(330625*I0 - 132250*M0 - 1679*cos(phi) + 2190)*s^2 + 193085*(20*G0 - 20*M0 + 50*U0 - 3)*s - 115851)/(100000*s^6 + 161450*s^4 + 38617*s^2), Fm: 2/5*(20*s*sin(phi) + 23*cos(phi))/(400*s^2 + 529)} Now we inverse Laplace transform:\nI_s(t) = inverse_laplace(laplace_solutions[Fi], s, t) U_s(t) = inverse_laplace(laplace_solutions[Fu], s, t) G_s(t) = inverse_laplace(laplace_solutions[Fg], s, t) M_s(t) = inverse_laplace(laplace_solutions[Fm], s, t) (I_s,U_s,G_s,M_s) (t |--\u0026gt; -1/2061000*sqrt(730)*(103050*U0 + 368*cos(phi) - 6183)*sin(1/50*sqrt(730)*t) + 1/1030500*(1030500*I0 - 412200*M0 - 2336*sin(phi) - 30915)*cos(1/50*sqrt(730)*t) + 529/51525*cos(23/20*t)*sin(phi) + 529/51525*cos(phi)*sin(23/20*t) + 3/100, t |--\u0026gt; 1/37613250*sqrt(730)*(1030500*I0 - 412200*M0 - 2336*sin(phi) - 30915)*sin(1/50*sqrt(730)*t) + 1/103050*(103050*U0 + 368*cos(phi) - 6183)*cos(1/50*sqrt(730)*t) - 184/51525*cos(phi)*cos(23/20*t) + 184/51525*sin(phi)*sin(23/20*t) + 3/50, t |--\u0026gt; -1/15045300*sqrt(730)*(1030500*I0 - 412200*M0 - 2336*sin(phi) - 30915)*sin(1/50*sqrt(730)*t) - 1/41220*(103050*U0 + 368*cos(phi) - 6183)*cos(1/50*sqrt(730)*t) + 1/103050*(920*cos(phi) + 2061*sin(phi))*cos(23/20*t) + 1/103050*(2061*cos(phi) - 920*sin(phi))*sin(23/20*t) + G0 - M0 + 5/2*U0 - 3/100*t - 3/20, t |--\u0026gt; 1/50*cos(23/20*t)*sin(phi) + 1/50*cos(phi)*sin(23/20*t)) Some plots.\nI_specific = I_s.subs(I0=0.024, U0=0.039, M0=0, G0=0, phi=2.35) U_specific = U_s.subs(I0=0.024, U0=0.039, M0=0, G0=0, phi=2.35) G_specific = G_s.subs(I0=0.024, U0=0.039, M0=0, G0=0, phi=2.35) M_specific = M_s.subs(I0=0.024, U0=0.039, M0=0, G0=0, phi=2.35) plot(I_specific, t, 0, 10, color=\u0026#34;blue\u0026#34;) + plot(U_specific, t, 0, 10, color=\u0026#34;orange\u0026#34;) + plot(G_specific, t, 0, 10, color=\u0026#34;green\u0026#34;) + plot(M_specific, t, 0, 10, color=\u0026#34;red\u0026#34;) /Users/houliu/.sage/temp/baboon.jemoka.com/16964/tmp_sei9raar.png ","html":"\u003cp\u003eWe need to solve this system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{I}{t} = -0.73U + 0.0438 + 0.4 \\dv{M}{t} \\\\\n\\dv{U}{t} = 0.4I - 0.012 \\\\\n\\dv{G}{t} = \\dv{M}{t}- I \\\\\nM(t)=0.02\\sin (1.15t + \\phi)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo be able to work on this, let us create some functions:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# variable\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edm\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;t dm\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# functions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efunction\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;_I\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# _I because i is imaginary\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eU\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efunction\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;U\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eG\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efunction\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;G\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# parameter\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;phi\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elatex_name\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\phi\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# our equations\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eeqns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.73\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eU\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.0438\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.4\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edm\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eU\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.4\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.012\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ediff\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eG\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edm\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eeqns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edesolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eeqns\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eU\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eivar\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ealgorithm\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;fricas\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexpand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eGreat, now, we will run the laplace transform upon these equations:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# laplace variable\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;s\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# laplaced functions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eFi\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Fi\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eFu\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Fu\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eFg\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Fg\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eFm\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Fm\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# constants\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eI0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eU0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eG0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eM0\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;I0 U0 G0 M0\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# substitution dictionary\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003elaplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eFi\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003elaplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eU\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eFu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003elaplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eG\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eFg\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003elaplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eM\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eFm\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eI0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eG\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eG0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eU\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eU0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eM\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eM0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# laplace eqns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elaplace_eqns\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elaplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eeqns\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elaplace_eqns\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u0026lt;ipython-input-236-2a2ddfe91635\u0026gt;:20: DeprecationWarning: Substitution using function-call syntax and unnamed arguments is deprecated and will be removed from a future release of Sage; you can use named arguments instead, like EXPR(x=..., y=...)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSee http://trac.sagemath.org/5930 for details.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e I(Integer(0)): I0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u0026lt;ipython-input-236-2a2ddfe91635\u0026gt;:21: DeprecationWarning: Substitution using function-call syntax and unnamed arguments is deprecated and will be removed from a future release of Sage; you can use named arguments instead, like EXPR(x=..., y=...)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSee http://trac.sagemath.org/5930 for details.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e G(Integer(0)): G0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u0026lt;ipython-input-236-2a2ddfe91635\u0026gt;:22: DeprecationWarning: Substitution using function-call syntax and unnamed arguments is deprecated and will be removed from a future release of Sage; you can use named arguments instead, like EXPR(x=..., y=...)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSee http://trac.sagemath.org/5930 for details.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e U(Integer(0)): U0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u0026lt;ipython-input-236-2a2ddfe91635\u0026gt;:23: DeprecationWarning: Substitution using function-call syntax and unnamed arguments is deprecated and will be removed from a future release of Sage; you can use named arguments instead, like EXPR(x=..., y=...)\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eSee http://trac.sagemath.org/5930 for details.\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e M(Integer(0)): M0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[Fi*s - I0 == 0.4*Fm*s - 0.73*Fu - 0.4*M0 + 0.0438/s,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Fu*s - U0 == 0.4*Fi - 0.012/s,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Fg*s - G0 == Fm*s - Fi - M0,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Fm == (0.02*s*sin(phi) + 0.023*cos(phi))/(s^2 + 1.3225)]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd then, let us solve the Laplace solutions:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# substitute\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elaplace_solutions\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esolve\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elaplace_eqns\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eFi\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eFu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eFg\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eFm\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esolution_dict\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eTrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003elaplace_solutions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e{Fi: 1/100*(80000*(125*I0 - 50*M0 + sin(phi))*s^4 - 2000*(3650*U0 - 46*cos(phi) - 219)*s^3 + 200*(66125*I0 - 26450*M0 + 438)*s^2 - 193085*(50*U0 - 3)*s + 115851)/(100000*s^5 + 161450*s^3 + 38617*s),\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Fu: 1/50*(5000000*U0*s^4 + 4000*(500*I0 - 200*M0 + 4*sin(phi) - 15)*s^3 + 100*(66125*U0 + 184*cos(phi) + 876)*s^2 + 26450*(100*I0 - 40*M0 - 3)*s + 115851)/(100000*s^5 + 161450*s^3 + 38617*s),\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Fg: 1/100*(200000*(50*G0 - 50*M0 + sin(phi))*s^5 - 10000*(1000*I0 - 400*M0 - 23*cos(phi) + 8*sin(phi))*s^4 + 200*(80725*G0 - 80725*M0 + 36500*U0 - 460*cos(phi) + 292*sin(phi) - 2190)*s^3 - 40*(330625*I0 - 132250*M0 - 1679*cos(phi) + 2190)*s^2 + 193085*(20*G0 - 20*M0 + 50*U0 - 3)*s - 115851)/(100000*s^6 + 161450*s^4 + 38617*s^2),\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e Fm: 2/5*(20*s*sin(phi) + 23*cos(phi))/(400*s^2 + 529)}\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eNow we inverse Laplace transform:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eI_s\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einverse_laplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elaplace_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFi\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eU_s\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einverse_laplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elaplace_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFu\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eG_s\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einverse_laplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elaplace_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFg\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eM_s\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einverse_laplace\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elaplace_solutions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eFm\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI_s\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eU_s\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eG_s\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eM_s\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e(t |--\u0026gt; -1/2061000*sqrt(730)*(103050*U0 + 368*cos(phi) - 6183)*sin(1/50*sqrt(730)*t) + 1/1030500*(1030500*I0 - 412200*M0 - 2336*sin(phi) - 30915)*cos(1/50*sqrt(730)*t) + 529/51525*cos(23/20*t)*sin(phi) + 529/51525*cos(phi)*sin(23/20*t) + 3/100,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e t |--\u0026gt; 1/37613250*sqrt(730)*(1030500*I0 - 412200*M0 - 2336*sin(phi) - 30915)*sin(1/50*sqrt(730)*t) + 1/103050*(103050*U0 + 368*cos(phi) - 6183)*cos(1/50*sqrt(730)*t) - 184/51525*cos(phi)*cos(23/20*t) + 184/51525*sin(phi)*sin(23/20*t) + 3/50,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e t |--\u0026gt; -1/15045300*sqrt(730)*(1030500*I0 - 412200*M0 - 2336*sin(phi) - 30915)*sin(1/50*sqrt(730)*t) - 1/41220*(103050*U0 + 368*cos(phi) - 6183)*cos(1/50*sqrt(730)*t) + 1/103050*(920*cos(phi) + 2061*sin(phi))*cos(23/20*t) + 1/103050*(2061*cos(phi) - 920*sin(phi))*sin(23/20*t) + G0 - M0 + 5/2*U0 - 3/100*t - 3/20,\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e t |--\u0026gt; 1/50*cos(23/20*t)*sin(phi) + 1/50*cos(phi)*sin(23/20*t))\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSome plots.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eI_specific\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eI_s\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.024\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eU0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.039\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eM0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eG0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.35\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eU_specific\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eU_s\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.024\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eU0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.039\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eM0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eG0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.35\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eG_specific\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eG_s\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.024\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eU0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.039\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eM0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eG0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.35\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eM_specific\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eM_s\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esubs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.024\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eU0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0.039\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eM0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eG0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2.35\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI_specific\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e10\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;blue\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eU_specific\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e10\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;orange\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eG_specific\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e10\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;green\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eplot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eM_specific\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003et\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e10\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecolor\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;red\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e/Users/houliu/.sage/temp/baboon.jemoka.com/16964/tmp_sei9raar.png\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhnus_math570_finance/","tags":null,"title":"NUS-MATH570 Finance (Laplace)"},{"categories":null,"contents":"We have:\n\\begin{equation} \\frac{2y^{2}}{9-x^{2}} + y \\dv{y}{x} + \\frac{3y}{2-x} = 0 \\end{equation}\nWe want to get rid of things; let\u0026rsquo;s begin by dividing the whole thing by \\(y\\).\n\\begin{equation} \\frac{2y}{9-x^{2}} + \\dv{y}{x} + \\frac{3}{2-x} = 0 \\end{equation}\nFinally, then, moving the right expression to the right, we have:\n\\begin{equation} \\frac{2y}{9-x^{2}} + \\dv{y}{x} = \\frac{-3}{2-x} \\end{equation}\nIn this case, we have functions:\n\\begin{equation} \\begin{cases} P(x) = \\frac{2}{9-x^{2}}\\\\ Q(x) = \\frac{-3}{2-x}\\\\ \\end{cases} \\end{equation}\nTaking first the top integral:\n\\begin{equation} \\int \\frac{2}{9-x^{2}} \\dd{x} = \\frac{1}{3} \\log \\qty(\\frac{x+3}{3-x}) \\end{equation}\nRaising \\(e\\) to that power, we have that:\n\\begin{equation} \\sqrt[3]{e\\frac{x+3}{3-x}} \\end{equation}\nMultiplying \\(Q(x)\\) to that expression, we have that:\n\\begin{equation} \\int \\frac{-3}{2-x}\\sqrt[3]{e\\cdot \\frac{x+3}{3-x}} \\dd{x} \\end{equation}\nTherefore, our entire answer is defined as the integral function that:\n\\begin{equation} y = \\frac{1}{\\sqrt[3]{e\\cdot \\frac{x+3}{3-x}} } \\int \\frac{-3}{2-x}\\sqrt[3]{e\\cdot \\frac{x+3}{3-x}} \\dd{x} \\end{equation}\n","html":"\u003cp\u003eWe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{2y^{2}}{9-x^{2}} + y \\dv{y}{x} + \\frac{3y}{2-x} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe want to get rid of things; let\u0026rsquo;s begin by dividing the whole thing by \\(y\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{2y}{9-x^{2}} + \\dv{y}{x} + \\frac{3}{2-x} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, then, moving the right expression to the right, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{2y}{9-x^{2}} + \\dv{y}{x} = \\frac{-3}{2-x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn this case, we have functions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nP(x) = \\frac{2}{9-x^{2}}\\\\\nQ(x) = \\frac{-3}{2-x}\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaking first the top integral:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int \\frac{2}{9-x^{2}} \\dd{x} = \\frac{1}{3} \\log \\qty(\\frac{x+3}{3-x})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRaising \\(e\\) to that power, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sqrt[3]{e\\frac{x+3}{3-x}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMultiplying \\(Q(x)\\) to that expression, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int \\frac{-3}{2-x}\\sqrt[3]{e\\cdot \\frac{x+3}{3-x}} \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, our entire answer is defined as the integral function that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = \\frac{1}{\\sqrt[3]{e\\cdot \\frac{x+3}{3-x}} } \\int \\frac{-3}{2-x}\\sqrt[3]{e\\cdot \\frac{x+3}{3-x}} \\dd{x}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math570_problem_set_1/","tags":null,"title":"NUS-MATH570 Problem Set 1"},{"categories":null,"contents":"Considering the system:\n\\begin{equation} \\begin{cases} \\dv{x}{t} = -2x+y+(1-\\sigma)z \\\\ \\dv{y}{t} = 3x-y \\\\ \\dv{z}{t} = (3-\\sigma y)x-z\\\\ \\end{cases} \\end{equation}\nwith the initial locations \\((x_0, y_0, z_0)= (-1,1,2)\\).\nWe notice first that the top and bottom expressions as a factor in \\(x\\) multiplied by \\(y\\), which means that our system is not homogenous. Let\u0026rsquo;s expand all the expressions first.\n\\begin{equation} \\begin{cases} \\dv{x}{t} = -2x+y+(1-\\sigma)z \\\\ \\dv{y}{t} = 3x-y \\\\ \\dv{z}{t} = 3x-\\sigma yx-z\\\\ \\end{cases} \\end{equation}\n","html":"\u003cp\u003eConsidering the system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x}{t} = -2x+y+(1-\\sigma)z \\\\\n\\dv{y}{t} = 3x-y \\\\\n\\dv{z}{t} = (3-\\sigma y)x-z\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewith the initial locations \\((x_0, y_0, z_0)= (-1,1,2)\\).\u003c/p\u003e\n\u003cp\u003eWe notice first that the top and bottom expressions as a factor in \\(x\\) multiplied by \\(y\\), which means that our system is not homogenous. Let\u0026rsquo;s expand all the expressions first.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x}{t} = -2x+y+(1-\\sigma)z \\\\\n\\dv{y}{t} = 3x-y \\\\\n\\dv{z}{t} = 3x-\\sigma yx-z\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math570_problem_set_2/","tags":null,"title":"NUS-MATH570 Problem Set 2, Problem 1"},{"categories":null,"contents":"Intersects:\n\\begin{equation} f(x) = (x+c)^{2} \\end{equation}\n\\begin{equation} h(x) = c x \\end{equation}\nDoesn\u0026rsquo;t Intersect:\n\\begin{equation} g(x) = c e^{\\frac{x^{4}}{4}}} \\end{equation}\n\\begin{align} \u0026amp;h_1(x)-h_2(x) = c_1x-c_2x \\\\ \\Rightarrow\\ \u0026amp; 0 = c_1x-c_2x \\\\ \\Rightarrow\\ \u0026amp; 0 = x(c_1-c_2) \\end{align}\n\\begin{align} \u0026amp;g_1(x)-g_2(x) = c_1e^{\\frac{x^{4}}{4}} - c_2e^{\\frac{x^{4}}{4}} \\\\ \\Rightarrow\\ \u0026amp; 0 = \\qty(c_1 - c_2)e^{\\frac{x^{4}}{4}} \\\\ \\Rightarrow\\ \u0026amp; 0 = e^{\\frac{x^{4}}{4}}(c_1-c_2) \\end{align}\n\\begin{align} \u0026amp; f_1(x)-f_2(x)=(x+c_1)^{2}-(x+c_2)^{2} \\\\ \\Rightarrow\\ \u0026amp; 0 = (x+c_1)^{2}-(x+c_2)^{2} \\\\ \\Rightarrow\\ \u0026amp; 0 = 2x(c_1-c_2)+{c_1}^{2}+{c_2}^{2} \\end{align}\n\\begin{equation} \\dv{y}{x} + P\u0026rsquo;(x)y = Q\u0026rsquo;(x) \\end{equation}\n\\begin{align} \u0026amp;y = e^{\\int P\u0026rsquo;(x)\\dd{x}} \\int e^{\\int P\u0026rsquo;(x)\\dd{x}} Q\u0026rsquo;(x)\\dd{x} \\\\ \\Rightarrow\\ \u0026amp; e^{-P(x)} \\int e^{P(x)}Q\u0026rsquo;(x)\\dd{x} \\\\ \\Rightarrow\\ \u0026amp; e^{-P(x)} (\\dots+C) \\\\ \\Rightarrow\\ \u0026amp; e^{-P(x)}C + \\dots \\end{align}\n\\begin{equation} h(x) \\in e^{-P(x)}C + \\dots \\end{equation}\n\\begin{equation} g(x) \\in e^{-P(x)}C + \\dots \\end{equation}\n\\begin{align} \u0026amp;0 = (e^{-P(x)}C_1+\\dots)-(e^{-P(x)}C_2 + \\dots) \\\\ \\Rightarrow\\ \u0026amp; e^{-P(x)}C_1-e^{-P(x)}C_2 \\\\ \\Rightarrow\\ \u0026amp; e^{-P(x)}(C_1-C_2) = 0 \\end{align}\n","html":"\u003cp\u003eIntersects:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = (x+c)^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nh(x) = c x\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eDoesn\u0026rsquo;t Intersect:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ng(x) = c e^{\\frac{x^{4}}{4}}}\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;h_1(x)-h_2(x) = c_1x-c_2x \\\\\n\\Rightarrow\\ \u0026amp; 0 = c_1x-c_2x \\\\\n\\Rightarrow\\ \u0026amp; 0 = x(c_1-c_2)\n\\end{align}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;g_1(x)-g_2(x) = c_1e^{\\frac{x^{4}}{4}} - c_2e^{\\frac{x^{4}}{4}} \\\\\n\\Rightarrow\\ \u0026amp; 0 = \\qty(c_1 - c_2)e^{\\frac{x^{4}}{4}} \\\\\n\\Rightarrow\\ \u0026amp; 0 = e^{\\frac{x^{4}}{4}}(c_1-c_2)\n\\end{align}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; f_1(x)-f_2(x)=(x+c_1)^{2}-(x+c_2)^{2} \\\\\n\\Rightarrow\\ \u0026amp; 0 = (x+c_1)^{2}-(x+c_2)^{2} \\\\\n\\Rightarrow\\ \u0026amp; 0 = 2x(c_1-c_2)+{c_1}^{2}+{c_2}^{2}\n\\end{align}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{y}{x} + P\u0026rsquo;(x)y = Q\u0026rsquo;(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;y = e^{\\int P\u0026rsquo;(x)\\dd{x}} \\int e^{\\int P\u0026rsquo;(x)\\dd{x}} Q\u0026rsquo;(x)\\dd{x} \\\\\n\\Rightarrow\\ \u0026amp; e^{-P(x)} \\int e^{P(x)}Q\u0026rsquo;(x)\\dd{x} \\\\\n\\Rightarrow\\ \u0026amp; e^{-P(x)} (\\dots+C) \\\\\n\\Rightarrow\\ \u0026amp; e^{-P(x)}C + \\dots\n\\end{align}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e\\begin{equation}\nh(x) \\in e^{-P(x)}C + \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ng(x) \\in e^{-P(x)}C + \\dots\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;0 = (e^{-P(x)}C_1+\\dots)-(e^{-P(x)}C_2 + \\dots) \\\\\n\\Rightarrow\\ \u0026amp; e^{-P(x)}C_1-e^{-P(x)}C_2 \\\\\n\\Rightarrow\\ \u0026amp; e^{-P(x)}(C_1-C_2) = 0\n\\end{align}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_math570_research_question_1/","tags":null,"title":"NUS-MATH570 Research Question 1"},{"categories":null,"contents":"We are given a set of expressions:\n\\begin{equation} \\begin{cases} \\dv{x}{t} = \\frac{x}{w}\\\\ \\dv{y}{t} = \\frac{xz}{w} \\\\ \\dv{z}{t} = \\beta y \\\\ \\dv{w}{t} = \\beta y \\end{cases} \\end{equation}\nWe are asked to analyze the solutions to this system, its periodicity, etc.\nStability Analysis The immediate thing to do is to shove all of this into a Jacobian matrix\u0026mdash;not for linearnalization, but to check how the slope changes. We will take the eigenvalues of the matrix at the critical points of the function, which will tell us whether or not the functions converge or diverge from those points.\nLet\u0026rsquo;s go about doing that. Let us declare:\n\\begin{equation} \\begin{cases} \\dv{x}{t} = \\frac{x}{w} = f(x,y,z,w)\\\\ \\dv{y}{t} = \\frac{xz}{w} = g(x,y,z,w)\\\\ \\dv{z}{t} = \\beta y = h(x,y,z,w)\\\\ \\dv{w}{t} = \\beta y = j(x,y,z,w) \\end{cases} \\end{equation}\nThen, the Jacobian is (with each cell being \\(\\dv{row}{column}\\)):\ndx dy dz dw df 1/w 0 0 -x/w^2 dg z/w 0 0 -(xz)/w^2 dh 0 beta 0 0 dj 0 beta 0 0 Properly writing that out, this means that:\n\\begin{equation} J = \\mqty(\\frac{1}{w} \u0026amp; 0 \u0026amp; 0 \u0026amp; -\\frac{x}{w^{2}} \\\\ \\frac{z}{w} \u0026amp; 0 \u0026amp; 0 \u0026amp; -\\frac{xz}{w^{2}} \\\\ 0 \u0026amp; \\beta \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; \\beta \u0026amp; 0 \u0026amp; 0) \\end{equation}\nNow, let us solve for the critical points of this expression. Setting all expressions to \\(0\\):\n\\begin{equation} \\begin{cases} f = 0 \\\\ g = 0 \\\\ h = 0 \\\\ j = 0 \\end{cases} \\end{equation}\nwe have that:\n\\begin{equation} f(\\dots) = \\frac{x}{w} = 0 \\end{equation}\nso \\(x=0\\).\nWe have also:\n\\begin{equation} h(\\dots) = \\beta y= 0 \\end{equation}\ntherefore, \\(y = 0\\).\nEverything else is a free variable.\nSubstituting that into our expressions, we have:\n\\begin{equation} J* = \\mqty(\\frac{1}{w} \u0026amp; 0 \u0026amp; 0 \u0026amp; 0 \\\\ \\frac{z}{w} \u0026amp; 0 \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; \\beta \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; \\beta \u0026amp; 0 \u0026amp; 0) \\end{equation}\nWe are now ready to eigenvalueize. Using technology:\nw,z,b = var(\u0026#34;w z b\u0026#34;) M = matrix([[1/w, 0,0,0], [z/w,0,0,0], [0,b,0,0], [0,b,0,0]]) M [1/w 0 0 0] [z/w 0 0 0] [ 0 b 0 0] [ 0 b 0 0] So, the moment of truth:\nM.eigenvalues() [1/w, 0, 0, 0] Excellent, so we have two eigenvalues: \\(\\frac{1}{w}\\) and \\(0\\). The \\(0\\) eigenvalue indicates to us that the system has \u0026ldquo;neutral stability\u0026rdquo;: that there will be results for which our system: while not exponentially increasing towards asymptotically, does not settle to a stable point.\nBehavior to Extrema The next natural question is then\u0026mdash;even if our system doesn\u0026rsquo;t settle down, does it become larger over time? For this, we turn to the \\(\\frac{1}{w}\\) term. If our initial conditions render negative \\(w\\) eventually at that point, our system will converge to unstable but containable (i.e. does not go to infinity over time); otherwise, it does becomes unstable AND uncontainable (goes to infinity.)\nTo do this, we need to check two things; regrettably, it seems like we could\u0026rsquo;t end up with a properly described solution to evolve our variables analytically. However, we can leverage the Lipschitz condition and hand-fisted dimensional analysis to clue us in about the behavior of the system.\nContinuity Recall again that our system is:\nWe are given a set of expressions:\n\\begin{equation} \\begin{cases} \\dv{x}{t} = \\frac{x}{w}\\\\ \\dv{y}{t} = \\frac{xz}{w} \\\\ \\dv{z}{t} = \\beta y \\\\ \\dv{w}{t} = \\beta y \\end{cases} \\end{equation}\nTo check the Lipschitz Continuity is actually not super difficult. Research indicates that the Litpschitz condition extends in the expected manner into multiple dimensions, checking continuity with a partial in each direction.\nThe actual partials of the terms on the right, though, are really only discontinuous in this case when we have something under a fraction\u0026mdash;there is fortunately no weird exponential/log/sinusoidal/radical here. Evidently, then, we loose Lipschitz continuity at \\(w=0\\). As long as we don\u0026rsquo;t cross that line, anything to the left or right of it exists and is unique(?) is each dimension.\nHam-fisting Dimensional Analysis The initial conditions asks us for starting with \\(w(0)=5\\sqrt 5\\). Recall that we are interested in the value of \\(w\\) at \\((x,y)=(0,0)\\).\nFurthermore, recall the Lipschitz condition we discussed above. That the function is Lipschitz continuous at two boundary intervals: between \\((-\\infty, 0)\\) and \\((0, \\infty )\\). Starting at the conditions of \\(w(0) = 5\\sqrt{5}\\) indicates that there will be no way for \\(w\\) to cross into \\(\\frac{1}{w} \u0026lt;0\\) territory.\nNote, again, that the eigenvalues of the Jacobian of the system are \\(\\{0, \\frac{1}{w}\\}\\), therefore, a positive \\(\\frac{1}{w}\\) will indicate that the system tends towards infinity as there is one positive eigenvalue.\nHowever, if we started at a negative \\(w\\) in the first place, we will equally be unable to use the same initial conditions to cross into \\(\\frac{1}{w} \u0026gt; 0\\) territory. Because of this, conditions that begin with negative \\(w\\) will be unstable but not asymptotically increasing as there will be no positive eigenvalues of its Jacobian at any given point.\n","html":"\u003cp\u003eWe are given a set of expressions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x}{t} = \\frac{x}{w}\\\\\n\\dv{y}{t} = \\frac{xz}{w} \\\\\n\\dv{z}{t} = \\beta y \\\\\n\\dv{w}{t} = \\beta y\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe are asked to analyze the solutions to this system, its periodicity, etc.\u003c/p\u003e\n\u003ch2 id=\"stability-analysis\"\u003eStability Analysis\u003c/h2\u003e\n\u003cp\u003eThe immediate thing to do is to shove all of this into a Jacobian matrix\u0026mdash;not for linearnalization, but to check how the slope changes. We will take the eigenvalues of the matrix at the critical points of the function, which will tell us whether or not the functions converge or diverge from those points.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s go about doing that. Let us declare:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x}{t} = \\frac{x}{w} = f(x,y,z,w)\\\\\n\\dv{y}{t} = \\frac{xz}{w} = g(x,y,z,w)\\\\\n\\dv{z}{t} = \\beta y = h(x,y,z,w)\\\\\n\\dv{w}{t} = \\beta y = j(x,y,z,w)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThen, the Jacobian is (with each cell being \\(\\dv{row}{column}\\)):\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003edx\u003c/th\u003e\n\u003cth\u003edy\u003c/th\u003e\n\u003cth\u003edz\u003c/th\u003e\n\u003cth\u003edw\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003edf\u003c/td\u003e\n\u003ctd\u003e1/w\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e-x/w^2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003edg\u003c/td\u003e\n\u003ctd\u003ez/w\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e-(xz)/w^2\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003edh\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003ebeta\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003edj\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003ebeta\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eProperly writing that out, this means that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nJ = \\mqty(\\frac{1}{w} \u0026amp; 0 \u0026amp; 0 \u0026amp; -\\frac{x}{w^{2}} \\\\ \\frac{z}{w} \u0026amp; 0 \u0026amp; 0 \u0026amp; -\\frac{xz}{w^{2}} \\\\ 0 \u0026amp; \\beta \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; \\beta \u0026amp; 0 \u0026amp; 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, let us solve for the critical points of this expression. Setting all expressions to \\(0\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nf = 0 \\\\\ng = 0 \\\\\nh = 0 \\\\\nj = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(\\dots) = \\frac{x}{w} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso \\(x=0\\).\u003c/p\u003e\n\u003cp\u003eWe have also:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nh(\\dots) = \\beta y= 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003etherefore, \\(y = 0\\).\u003c/p\u003e\n\u003cp\u003eEverything else is a free variable.\u003c/p\u003e\n\u003cp\u003eSubstituting that into our expressions, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nJ* = \\mqty(\\frac{1}{w} \u0026amp; 0 \u0026amp; 0 \u0026amp; 0 \\\\ \\frac{z}{w} \u0026amp; 0 \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; \\beta \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; \\beta \u0026amp; 0 \u0026amp; 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe are now ready to eigenvalueize. Using technology:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003evar\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;w z b\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eM\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ematrix\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ez\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ew\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eM\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1/w 0 0 0]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[z/w 0 0 0]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[ 0 b 0 0]\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[ 0 b 0 0]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSo, the moment of truth:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-sage\" data-lang=\"sage\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eM\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eeigenvalues\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e[1/w, 0, 0, 0]\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eExcellent, so we have two eigenvalues: \\(\\frac{1}{w}\\) and \\(0\\). The \\(0\\) eigenvalue indicates to us that the system has \u0026ldquo;\u003ca href=\"/posts/kbhneutral_stability/\"\u003eneutral stability\u003c/a\u003e\u0026rdquo;: that there will be results for which our system: while not exponentially increasing towards asymptotically, does not settle to a stable point.\u003c/p\u003e\n\u003ch2 id=\"behavior-to-extrema\"\u003eBehavior to Extrema\u003c/h2\u003e\n\u003cp\u003eThe next natural question is then\u0026mdash;even if our system doesn\u0026rsquo;t settle down, does it become larger over time? For this, we turn to the \\(\\frac{1}{w}\\) term. If our initial conditions render negative \\(w\\) eventually at that point, our system will converge to unstable but containable (i.e. does not go to infinity over time); otherwise, it does becomes unstable AND uncontainable (goes to infinity.)\u003c/p\u003e\n\u003cp\u003eTo do this, we need to check two things; regrettably, it seems like we could\u0026rsquo;t end up with a properly described solution to evolve our variables analytically. However, we can leverage the Lipschitz condition and hand-fisted dimensional analysis to clue us in about the behavior of the system.\u003c/p\u003e\n\u003ch3 id=\"continuity\"\u003eContinuity\u003c/h3\u003e\n\u003cp\u003eRecall again that our system is:\u003c/p\u003e\n\u003cp\u003eWe are given a set of expressions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x}{t} = \\frac{x}{w}\\\\\n\\dv{y}{t} = \\frac{xz}{w} \\\\\n\\dv{z}{t} = \\beta y \\\\\n\\dv{w}{t} = \\beta y\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo check the Lipschitz Continuity is actually not super difficult. Research indicates that the Litpschitz condition extends in the expected manner into multiple dimensions, checking continuity with a partial in each direction.\u003c/p\u003e\n\u003cp\u003eThe actual partials of the terms on the right, though, are really only discontinuous in this case when we have something under a fraction\u0026mdash;there is fortunately no weird exponential/log/sinusoidal/radical here. Evidently, then, we loose Lipschitz continuity at \\(w=0\\). As long as we don\u0026rsquo;t cross that line, anything to the left or right of it exists and is unique(?) is each dimension.\u003c/p\u003e\n\u003ch3 id=\"ham-fisting-dimensional-analysis\"\u003eHam-fisting Dimensional Analysis\u003c/h3\u003e\n\u003cp\u003eThe initial conditions asks us for starting with \\(w(0)=5\\sqrt 5\\). Recall that we are interested in the value of \\(w\\) at \\((x,y)=(0,0)\\).\u003c/p\u003e\n\u003cp\u003eFurthermore, recall the Lipschitz condition we discussed above. That the function is Lipschitz continuous at two boundary intervals: between \\((-\\infty, 0)\\) and \\((0, \\infty )\\). Starting at the conditions of \\(w(0) = 5\\sqrt{5}\\) indicates that there will be no way for \\(w\\) to cross into \\(\\frac{1}{w} \u0026lt;0\\) territory.\u003c/p\u003e\n\u003cp\u003eNote, again, that the eigenvalues of the Jacobian of the system are \\(\\{0, \\frac{1}{w}\\}\\), therefore, a positive \\(\\frac{1}{w}\\) will indicate that the system tends towards infinity as there is one positive eigenvalue.\u003c/p\u003e\n\u003cp\u003eHowever, if we started at a negative \\(w\\) in the first place, we will equally be unable to use the same initial conditions to cross into \\(\\frac{1}{w} \u0026gt; 0\\) territory. Because of this, conditions that begin with negative \\(w\\) will be unstable but not asymptotically increasing as there will be no positive eigenvalues of its Jacobian at any given point.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-11-27_16-28-45_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhnus_math570_supply_demand/","tags":null,"title":"NUS-MATH570 Supply Demand"},{"categories":null,"contents":" Instruments Effects Feeling Dynamics Process/Production 01/09/2023 ","html":"\u003cul\u003e\n\u003cli\u003eInstruments\u003c/li\u003e\n\u003cli\u003eEffects\u003c/li\u003e\n\u003cli\u003eFeeling\u003c/li\u003e\n\u003cli\u003eDynamics\u003c/li\u003e\n\u003cli\u003eProcess/Production\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"01-09-2023\"\u003e01/09/2023\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_mus150_critical_listening/","tags":null,"title":"NUS-MUS150 Critical Listening"},{"categories":null,"contents":" el plastico nuevo de nopal esta un producto de plástico biodegrable, que substituír plastico tradiciónal\nsi el plasticó ir al mar, los animales pueden simple comerlo\nel proceso de manufactura son sosinable, que la planta puede vivir a producir más ojas del plastico\nproteinsas\ngelinicas\ncorbantes\n","html":"\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eel plastico nuevo de nopal esta un producto de plástico biodegrable, que substituír plastico tradiciónal\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003esi el plasticó ir al mar, los animales pueden simple comerlo\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eel proceso de manufactura son sosinable, que la planta puede vivir a producir más ojas del plastico\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eproteinsas\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003egelinicas\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecorbantes\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_span502_plastico_biodegrable/","tags":null,"title":"NUS-SPAN502 Plastico Biodegrable"},{"categories":null,"contents":"Vocabulario alejar forzar el fracaso ocultar atemorizarte prescindir exige asegurar Preguntas ¿Hay problemas a largo plazo con el sistema del calificación? ¿Como medimos los éxitos del sistema nueva sin prejuicios las preferencias propias de los estudiantes? ¿Necesita estudiantes motivados para el desarrollo complete del sistema? ¿Hay diferencias sociocultural que puede influir los resultados o el implementación del sistema? ¿Cómo conducta examines del rendimiento de los estudiantes a través escuelas con implementaciones diferencies del sistema? ","html":"\u003ch2 id=\"vocabulario\"\u003eVocabulario\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ealejar\u003c/li\u003e\n\u003cli\u003eforzar\u003c/li\u003e\n\u003cli\u003eel fracaso\u003c/li\u003e\n\u003cli\u003eocultar\u003c/li\u003e\n\u003cli\u003eatemorizarte\u003c/li\u003e\n\u003cli\u003eprescindir\u003c/li\u003e\n\u003cli\u003eexige\u003c/li\u003e\n\u003cli\u003easegurar\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"preguntas\"\u003ePreguntas\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e¿Hay problemas a largo plazo con el sistema del calificación?\u003c/li\u003e\n\u003cli\u003e¿Como medimos los éxitos del sistema nueva sin prejuicios las preferencias propias de los estudiantes?\u003c/li\u003e\n\u003cli\u003e¿Necesita estudiantes motivados para el desarrollo complete del sistema?\u003c/li\u003e\n\u003cli\u003e¿Hay diferencias sociocultural que puede influir los resultados o el implementación del sistema?\u003c/li\u003e\n\u003cli\u003e¿Cómo conducta examines del rendimiento de los estudiantes a través escuelas con implementaciones diferencies del sistema?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_span502_tarea_2/","tags":null,"title":"NUS-SPAN502 Tarea 2"},{"categories":null,"contents":"Vocabularios Nuevos creciente jornada exigir desempeño subir Preguntas ¿En el primer lugar, porqué tenemos semanas de cinco días? ¿Para los ciudades con recursos de educación abundante, hay un necesario real de trabar en jornadas de cuatro días? ¿Tenemos de verdad un sistema responsable para examinar los diferencias de cantidad de educación a través de el palmo entero del proceso de educación de un estudiante? ¿Existen presiones políticas que motivó el propósito? ¿En realidad, existe un problema muy fundamental que causó los problemas que vemos hoy? ","html":"\u003ch2 id=\"vocabularios-nuevos\"\u003eVocabularios Nuevos\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecreciente\u003c/li\u003e\n\u003cli\u003ejornada\u003c/li\u003e\n\u003cli\u003eexigir\u003c/li\u003e\n\u003cli\u003edesempeño\u003c/li\u003e\n\u003cli\u003esubir\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"preguntas\"\u003ePreguntas\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e¿En el primer lugar, porqué tenemos semanas de cinco días?\u003c/li\u003e\n\u003cli\u003e¿Para los ciudades con recursos de educación abundante, hay un necesario real de trabar en jornadas de cuatro días?\u003c/li\u003e\n\u003cli\u003e¿Tenemos de verdad un sistema responsable para examinar los diferencias de cantidad de educación a través de el palmo entero del proceso de educación de un estudiante?\u003c/li\u003e\n\u003cli\u003e¿Existen presiones políticas que motivó el propósito?\u003c/li\u003e\n\u003cli\u003e¿En realidad, existe un problema muy fundamental que causó los problemas que vemos hoy?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_span502_tarea_4/","tags":null,"title":"NUS-SPAN502 Tarea 4"},{"categories":null,"contents":" Lógico Sentimiento Acuerdo Cancelado/a Cuentas Censurado/a Plataforma Fraces Libertad de Expresión ","html":"\u003cul\u003e\n\u003cli\u003eLógico\u003c/li\u003e\n\u003cli\u003eSentimiento\u003c/li\u003e\n\u003cli\u003eAcuerdo\u003c/li\u003e\n\u003cli\u003eCancelado/a\u003c/li\u003e\n\u003cli\u003eCuentas\u003c/li\u003e\n\u003cli\u003eCensurado/a\u003c/li\u003e\n\u003cli\u003ePlataforma\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"fraces\"\u003eFraces\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eLibertad de Expresión\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhnus_span502_vocab/","tags":null,"title":"NUS-SPAN502 Vocab"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhobjects/","tags":null,"title":"object"},{"categories":null,"contents":" at time \\(t\\), agent received observation \\(o_{t}\\) agent then chooses \\(a_{t}\\) based on what it knows through some kind of process in order to affect a possibly nondeterministic change on the environment the agent choose an \\(a_{t}\\) under the existance of many types of Uncertainty. ","html":"\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-09-26_10-14-20_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003col\u003e\n\u003cli\u003eat time \\(t\\), \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e received observation \\(o_{t}\\)\u003c/li\u003e\n\u003cli\u003eagent then chooses \\(a_{t}\\) based on what it knows through some kind of process in order to affect a possibly nondeterministic change on the environment\n\u003cul\u003e\n\u003cli\u003ethe agent choose an \\(a_{t}\\) under the existance of many \u003ca href=\"/posts/kbhuncertainty/\"\u003etypes of Uncertainty\u003c/a\u003e.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhobserve_act_cycle/","tags":null,"title":"observe-act cycle"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhof_our_spiritual_strivings/","tags":null,"title":"Of Our Spiritual Strivings"},{"categories":null,"contents":"\\begin{equation} V = IR \\end{equation}\nwhere, \\(V\\) is the voltage across the resister, \\(I\\) the current, and \\(R\\) the resistance.\npower (physics) the rate at which electrical energy dissipates into heat is called power:\n\\begin{equation} P = IV \\end{equation}\nwhere, \\(P\\) is power, and \\(I\\) the current, and \\(V\\) the voltage.\nAlternate formulation of power:\n\\begin{equation} P = I V = \\frac{V^{2}}{R} = I^{2} R = \\dv{E}{t} \\end{equation}\n","html":"\u003cp\u003e\\begin{equation}\nV = IR\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(V\\) is the voltage across the resister, \\(I\\) the \u003ca href=\"/posts/kbhcurrent/\"\u003ecurrent\u003c/a\u003e, and \\(R\\) the \u003ca href=\"/posts/kbhcurrent/#resistance-of-a-wire\"\u003eresistance\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"power--physics\"\u003epower (physics)\u003c/h2\u003e\n\u003cp\u003ethe rate at which electrical energy dissipates into heat is called \u003ca href=\"#power--physics\"\u003epower\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP = IV\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(P\\) is power, and \\(I\\) the \u003ca href=\"/posts/kbhcurrent/\"\u003ecurrent\u003c/a\u003e, and \\(V\\) the \u003ca href=\"/posts/kbhvoltage/\"\u003evoltage\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eAlternate formulation of power:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP = I V = \\frac{V^{2}}{R} = I^{2} R = \\dv{E}{t}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhohm_s_law/","tags":null,"title":"Ohm's Law"},{"categories":null,"contents":"Everyday, at 11:00 PM exactly, I stop time tracking.\nAnd it feels somehow as the most liberating time of my day. When I truly feels like I have my time back to myself,\n","html":"\u003cp\u003eEveryday, at 11:00 PM exactly, I stop time tracking.\u003c/p\u003e\n\u003cp\u003eAnd it feels somehow as the most liberating time of my day. When I truly feels like I have my time back to myself,\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproductivity/","tags":null,"title":"On the Clock"},{"categories":null,"contents":"We have an expression:\n\\begin{equation} B = \\frac{FL^{3}}{3EI} = \\frac{N m^{3}}{3 p m^{4}} = \\frac{Nm^{3}}{\\frac{N}{m^{2}}m^{4}} = m \\end{equation}\nWith constants:\n\\(B\\): \\(m\\), deflection at the point of force application \\(F\\): \\(N\\), force applied \\(L\\): \\(m\\), distance between fixed point and point of force application \\(E\\): \\(p=\\frac{N}{m^{2}}\\), elastic modulus \\(I\\): \\(m^{4}\\), second moment of area As per measured:\n\\(B\\): \\(9.15 \\cdot 10^{-4} m\\) \\(F\\): \\(20N\\) \\(L\\): \\(9.373 \\cdot 10^{-2} m\\) \\(I\\): \\(1.37 \\cdot 10^{-10} m^{4}\\) = \\(\\frac{WH^{3}}{12}\\) = \\(\\frac{(6.25 \\cdot 10^{-3})(6.4 \\cdot 10^{-3})^{3}}{12}\\) Theoretical:\n\\(E\\): \\(7 \\cdot 10^{10} P\\) As calculated:\n\\(B\\): \\(5.74 \\cdot 10^{-4} m\\) ","html":"\u003cp\u003eWe have an expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB = \\frac{FL^{3}}{3EI} = \\frac{N m^{3}}{3 p m^{4}} = \\frac{Nm^{3}}{\\frac{N}{m^{2}}m^{4}} = m\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWith constants:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(B\\): \\(m\\), deflection at the point of force application\u003c/li\u003e\n\u003cli\u003e\\(F\\): \\(N\\), force applied\u003c/li\u003e\n\u003cli\u003e\\(L\\): \\(m\\), distance between fixed point and point of force application\u003c/li\u003e\n\u003cli\u003e\\(E\\): \\(p=\\frac{N}{m^{2}}\\), elastic modulus\u003c/li\u003e\n\u003cli\u003e\\(I\\): \\(m^{4}\\), second moment of area\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAs per measured:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(B\\): \\(9.15 \\cdot 10^{-4} m\\)\u003c/li\u003e\n\u003cli\u003e\\(F\\): \\(20N\\)\u003c/li\u003e\n\u003cli\u003e\\(L\\): \\(9.373 \\cdot 10^{-2} m\\)\u003c/li\u003e\n\u003cli\u003e\\(I\\): \\(1.37 \\cdot 10^{-10} m^{4}\\) = \\(\\frac{WH^{3}}{12}\\) = \\(\\frac{(6.25 \\cdot 10^{-3})(6.4 \\cdot 10^{-3})^{3}}{12}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTheoretical:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(E\\): \\(7 \\cdot 10^{10} P\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eAs calculated:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(B\\): \\(5.74 \\cdot 10^{-4} m\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhone_shot_deformation/","tags":null,"title":"One-Shot Deformation"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhonline_m/","tags":null,"title":"online m"},{"categories":null,"contents":"For elements with large possible future state space, we can\u0026rsquo;t just iterate over all states to get a for every state, and THEN go about using the to perform actions.\nTherefore, we employ a technique called receding horizon planning: planning from the current state upwards to a maximum horizon \\(d\\), figure out what the best SINGLE action would be given that information for only this state, and then replan.\nHere are the main methods of doing this:\nRollout with Lookahead: for each possible next action, sample a transition-weighted random trajectory using some policy, use whatever discounted future reward you got for that as your utility : for each possible next action, search through each possible next action until you hit the depth required, calculate the instantaneous reward at that point, and backup until you have recorded the sequence of actions that maybe best, and then return the first action in that sequence : same algorithm as , but you bound your search based on the theoretical upper-bound of the q-value : same core algorithm as , but instead of calculating a based on the , you sample a set of possible next states and average their future utilities : use function to come up with a bunch of possible actions to try, and try them with discounts as you try them Additional Information generative model we perform a random sample of possible next state (weighted by the action you took, meaning an instantiation of \\(s\u0026rsquo; \\sim T(\\cdot | s,a)\\)) and reward \\(R(s,a)\\) from current state\nopen-loop planning vs close-loop planning open loop planning Instead of doing all the methods above, which all requires state information of the future, open loop planning uses an exogenously chosen sequence of actions and tries to simply:\nMaximize: \\(U(a_1, \u0026hellip;, a_{n})\\)\nwhere the choice of actions doesn\u0026rsquo;t change regardless of eventual state is.\nFor high dimensional systems, where is hard to do closed loop systems, this will work better.\n","html":"\u003cp\u003eFor elements with large possible future state space, we can\u0026rsquo;t just iterate over all states to get a for every state, and \u003cstrong\u003eTHEN\u003c/strong\u003e go about using the to perform actions.\u003c/p\u003e\n\u003cp\u003eTherefore, we employ a technique called \u003cstrong\u003ereceding horizon planning\u003c/strong\u003e: planning from the current state upwards to a maximum horizon \\(d\\), figure out what the best \u003cstrong\u003eSINGLE action\u003c/strong\u003e would be given that information for only this state, and then replan.\u003c/p\u003e\n\u003cp\u003eHere are the main methods of doing this:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrollout_with_lookahead/\"\u003eRollout with Lookahead\u003c/a\u003e: for each possible next action, sample a transition-weighted random trajectory using some policy, use whatever discounted future reward you got for that as your utility\u003c/li\u003e\n\u003cli\u003e: for each possible next action, search through each possible next action until you hit the depth required, calculate the instantaneous reward at that point, and backup until you have recorded the sequence of actions that maybe best, and then return the first action in that sequence\u003c/li\u003e\n\u003cli\u003e: same algorithm as , but you bound your search based on the theoretical upper-bound of the q-value\u003c/li\u003e\n\u003cli\u003e: same core algorithm as , but instead of calculating a based on the , you sample a set of possible next states and average their future utilities\u003c/li\u003e\n\u003cli\u003e: use function to come up with a bunch of possible actions to try, and try them with discounts as you try them\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eAdditional Information\u003c/h2\u003e\n\u003ch3 id=\"generative-model\"\u003egenerative model\u003c/h3\u003e\n\u003cp\u003ewe perform a random sample of possible next state (weighted by the action you took, meaning an instantiation of \\(s\u0026rsquo; \\sim T(\\cdot | s,a)\\)) and reward \\(R(s,a)\\) from current state\u003c/p\u003e\n\u003ch3 id=\"open-loop-planning-vs-close-loop-planning\"\u003eopen-loop planning vs close-loop planning\u003c/h3\u003e\n\u003ch4 id=\"open-loop-planning\"\u003eopen loop planning\u003c/h4\u003e\n\u003cp\u003eInstead of doing all the methods above, which all requires state information of the future, \u003ca href=\"#open-loop-planning\"\u003eopen loop planning\u003c/a\u003e uses an exogenously chosen sequence of actions and tries to simply:\u003c/p\u003e\n\u003cp\u003eMaximize: \\(U(a_1, \u0026hellip;, a_{n})\\)\u003c/p\u003e\n\u003cp\u003ewhere the choice of actions doesn\u0026rsquo;t change regardless of eventual state is.\u003c/p\u003e\n\u003cp\u003eFor high dimensional systems, where is hard to do closed loop systems, this will work better.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhonline_planning/","tags":null,"title":"online planning"},{"categories":null,"contents":"These are basically MDP methods but tweaked. We make some changes:\nfor everywhere that we need a state, we use a belief to sample the next state given an action (random next step), we call our generative model to get a new observation, and call update(b,a,o) with our filter to propegate our belief forward if we need an action-value, we use the one-step lookahead in POMDP: \\begin{equation} Q(b,a) = R(b,a)+\\gamma \\qty(\\sum_{o}^{}P(o|b,a) U^{\\Gamma}(update(b,a,o))) \\end{equation}\nwhere,\n\\begin{equation} R(b,a) = \\sum_{s}^{} R(s,a)b(s) \\end{equation}\n\\begin{align} P(o|b,a) \u0026amp;= \\sum_{s}^{} p(o|s,a) b(s) \\\\ \u0026amp;= \\sum_{s}^{} \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) O(o|s\u0026rsquo;,a) b(s) \\end{align}\nand where, if needed (i.e. most algorithms estimate this):\n\\begin{equation} U^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} b \\end{equation}\nwe also revise our generative model:\neach step requires belief and action, and we sample from our belief a next state, propegate belief forward, and use a traditional generative model to get the rewards and next states (which we don\u0026rsquo;t use).\nReceeding Horizon: plan to a depth \\(d\\), select action, replan Rollout with Lookahead: simple to implement, no grantees of optimality or even boundedness Forward Search: quite expensive\u0026mdash;exponential given the size of horizon monte-carlo tree search, but instead our counts are stored not in terms of states (which we don\u0026rsquo;t know), but sequences of action observations: \\(h = a_1o_2a_2o_1a_2o_1\\) etc. Then, the counter takes \\(N(h,a)\\) as input: will head towards optimality and it requires a generative model to sample tracks Branch and Bound, but you use the POMDP Approximation methods to estimate the upper and lower bounds of your utility: its Forward Search with pruning Sparse Sampling: its Forward Search, but the next action-value is determined by a finite sampling of next observations and rewards and you average their future utility. that is, the action-value before depth \\(d\\) is obtained by: \\(Q(b,a) = \\frac{1}{m} \\sum_{i=1}^{m} \\qty(r_{a}^{(i)}+\\gammaU_{d-1}(Update(b,a,o_{a}^{(i)})))\\) ","html":"\u003cp\u003eThese are basically \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003e methods but tweaked. We make some changes:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003efor everywhere that we need a state, we use a \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eto sample the next state given an action (random next step), we call our generative model to get a new observation, and call \u003ccode\u003eupdate(b,a,o)\u003c/code\u003e with our \u003ca href=\"/posts/kbhfilters/\"\u003efilter\u003c/a\u003e to propegate our \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e forward\u003c/li\u003e\n\u003cli\u003eif we need an \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e, we use the \u003ca href=\"/posts/kbhalpha_vector/#one-step-lookahead-in-pomdp\"\u003eone-step lookahead in POMDP\u003c/a\u003e:\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\\begin{equation}\nQ(b,a) = R(b,a)+\\gamma \\qty(\\sum_{o}^{}P(o|b,a) U^{\\Gamma}(update(b,a,o)))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR(b,a) = \\sum_{s}^{} R(s,a)b(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nP(o|b,a) \u0026amp;= \\sum_{s}^{} p(o|s,a) b(s) \\\\\n\u0026amp;= \\sum_{s}^{} \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) O(o|s\u0026rsquo;,a) b(s)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eand where, if needed (i.e. most algorithms estimate this):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe also revise our \u003ca href=\"/posts/kbhonline_planning/#generative-model\"\u003egenerative model\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003eeach step requires belief and action, and we sample from our belief a next state, propegate \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e forward, and use a traditional \u003ca href=\"/posts/kbhonline_planning/#generative-model\"\u003egenerative model\u003c/a\u003e to get the rewards and next states (which we don\u0026rsquo;t use).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhreceeding_horizon/\"\u003eReceeding Horizon\u003c/a\u003e: plan to a depth \\(d\\), select action, replan\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrollout_with_lookahead/\"\u003eRollout with Lookahead\u003c/a\u003e: simple to implement, no grantees of optimality or even boundedness\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhforward_search/\"\u003eForward Search\u003c/a\u003e: quite expensive\u0026mdash;exponential given the size of horizon\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003emonte-carlo tree search\u003c/a\u003e, but instead our counts are stored not in terms of states (which we don\u0026rsquo;t know), but sequences of action observations: \\(h = a_1o_2a_2o_1a_2o_1\\) etc. Then, the counter takes \\(N(h,a)\\) as input: will head towards optimality and it requires a \u003ca href=\"/posts/kbhonline_planning/#generative-model\"\u003egenerative model\u003c/a\u003e to sample tracks\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbranch_and_bound/\"\u003eBranch and Bound\u003c/a\u003e, but you use the \u003ca href=\"/posts/kbhpomdp_approximation/\"\u003ePOMDP Approximation\u003c/a\u003e methods to estimate the upper and lower bounds of your utility: its \u003ca href=\"/posts/kbhforward_search/\"\u003eForward Search\u003c/a\u003e with pruning\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsparse_sampling/\"\u003eSparse Sampling\u003c/a\u003e: its \u003ca href=\"/posts/kbhforward_search/\"\u003eForward Search\u003c/a\u003e, but the next \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e is determined by a finite sampling of next observations and rewards and you average their future utility. that is, the action-value before depth \\(d\\) is obtained by: \\(Q(b,a) = \\frac{1}{m} \\sum_{i=1}^{m} \\qty(r_{a}^{(i)}+\\gammaU_{d-1}(Update(b,a,o_{a}^{(i)})))\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhonline_pomdp_methods/","tags":null,"title":"Online POMDP Methods"},{"categories":null,"contents":"The Open Voice Brain Model is a audio processing architecture proposed by Laguarta 2021 for audio/biomarker correlation work.\nHere\u0026rsquo;s a fairly self-explanatory figure:\nThe model outputs an AD diagnoses as well as a longitudinal correlation with Memory, Mood, and Respiratory biomarkers.\nThis is then the embedding that they are proposing for use by other tasks.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhopen_voice_brain_model/\"\u003eOpen Voice Brain Model\u003c/a\u003e is a audio processing architecture proposed by \u003ca href=\"/posts/kbhlaguarta_2021/\"\u003eLaguarta 2021\u003c/a\u003e for audio/biomarker correlation work.\u003c/p\u003e\n\u003cp\u003eHere\u0026rsquo;s a fairly self-explanatory figure:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-36-29_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe model outputs an AD diagnoses as well as a longitudinal correlation with Memory, Mood, and Respiratory biomarkers.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_21-38-42_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis is then the embedding that they are proposing for use by other tasks.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhopen_voice_brain_model/","tags":null,"title":"Open Voice Brain Model"},{"categories":null,"contents":"OpenSMILE is a proprietary audio feature exaction tool.\nSite.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhopensmile/\"\u003eOpenSMILE\u003c/a\u003e is a proprietary audio feature exaction tool.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://www.audeering.com/research/opensmile/\"\u003eSite\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhopensmile/","tags":null,"title":"OpenSMILE"},{"categories":null,"contents":"cs111.stanford.edu\nTopics CS111 leverages CS107 experience to show operating systems and how they function.\nWhat is an operating system operating system sits between hardware and user programs most importantly: manages shared resources to allow the program to run CPU: gets which program to do work and for how long RAM: how much memory to give to a program Hard Drive Main Events concurrency: switch between processes so quickly to only use on core while concurrent access memory: memory addresses are mostly scattered everywhere \u0026mdash; everything include the lowest level including CPU uses only virtual memory, translated by the OS file management i/o devices networking: CS144 security: interactions between users in a system Main Components of the Course File Systems Process and Multiprocess Threads Virtual Memory + Paging + limits Modern Technologies/Niceties What\u0026rsquo;s Next SU-CS111 Outline\nContent filesystem How can we design file systems to manage files on disk, and what are the tradeoffs inherent in designing them. How can we interact with the filesystem?\nfilesystem Unix V6 Filesystem Freelist and Block Cache disk crash recovery fsck ordered writes journaling: write-ahead logging syscalls kernel mode files handling file file descriptor multiprocessing How are programs run, how to spawn subprograms, and how they work in general?\nmultiprocessing fork execvp waitpid shell pipe and ipc Multithreading How do we implement a single program within a single program, and how do we not have race conditions\nmultithreading processes vs threads race condition and mutex passing by reference permits model busy waiting condition variable Unique Lock trust how do we trust software?\ntrust by assumption trust by inference trust by substitution patterns monitor pattern dispatches assembly review process control block dispatching trap interrupts context switch scheduling preemption virtual memory \u0026ldquo;how can one set of memory be shared across several processes\u0026rdquo;\nvirtual memory dynamic address translation demand paging clock algorithm model technologies modern OS trust and OS trust An example of a good time:\nvoid main() { // make pipe int fds[2]; pipe(fds); pid_t pidp = fork(); if (pidp == 0) { close(pidp[1]); dup2(pidp[0], STDIN_FILENO); close(pidp[0]); execvp(\u0026#34;\u0026#34;, ...); // throw-a-tantrum exit(1); } close(pidp[0]); return pidp[1]; } ","html":"\u003cp\u003ecs111.stanford.edu\u003c/p\u003e\n\u003ch2 id=\"topics\"\u003eTopics\u003c/h2\u003e\n\u003cp\u003eCS111 leverages CS107 experience to show operating systems and how they function.\u003c/p\u003e\n\u003ch3 id=\"what-is-an-operating-system\"\u003eWhat is an operating system\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eoperating system sits between \u003cstrong\u003ehardware\u003c/strong\u003e and \u003cstrong\u003euser programs\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003emost importantly: manages \u003cem\u003eshared resources\u003c/em\u003e to allow the program to run\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eCPU\u003c/strong\u003e: gets which program to do work and for how long\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eRAM\u003c/strong\u003e: how much memory to give to a program\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eHard Drive\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"main-events\"\u003eMain Events\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003econcurrency: switch between processes so quickly to only use on core while concurrent access\u003c/li\u003e\n\u003cli\u003ememory: memory addresses are mostly scattered everywhere \u0026mdash; everything include the lowest level including CPU uses only virtual memory, translated by the OS\u003c/li\u003e\n\u003cli\u003efile management\u003c/li\u003e\n\u003cli\u003ei/o devices\u003c/li\u003e\n\u003cli\u003enetworking: CS144\u003c/li\u003e\n\u003cli\u003esecurity: interactions between users in a system\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"main-components-of-the-course\"\u003eMain Components of the Course\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eFile Systems\u003c/li\u003e\n\u003cli\u003eProcess and Multiprocess\u003c/li\u003e\n\u003cli\u003eThreads\u003c/li\u003e\n\u003cli\u003eVirtual Memory + Paging + limits\u003c/li\u003e\n\u003cli\u003eModern Technologies/Niceties\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"what-s-next\"\u003eWhat\u0026rsquo;s Next\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhsu_cs111_outline/\"\u003eSU-CS111 Outline\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"content\"\u003eContent\u003c/h2\u003e\n\u003ch3 id=\"filesystem--kbhfilesystem-dot-md\"\u003e\u003ca href=\"/posts/kbhfilesystem/\"\u003efilesystem\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eHow can we design file systems to manage files on disk, and what are the tradeoffs inherent in designing them. How can we interact with the filesystem?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfilesystem/\"\u003efilesystem\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003eUnix V6 Filesystem\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhunix_v6_filesystem/#freelist\"\u003eFreelist\u003c/a\u003e and \u003ca href=\"/posts/kbhunix_v6_filesystem/#block-cache\"\u003eBlock Cache\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfilesystem/#disk\"\u003edisk\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcrash_recovery/\"\u003ecrash recovery\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcrash_recovery/#fsck\"\u003efsck\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcrash_recovery/#ordered-writes\"\u003eordered writes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcrash_recovery/#journaling\"\u003ejournaling\u003c/a\u003e: \u003ca href=\"/posts/kbhcrash_recovery/#journaling\"\u003ewrite-ahead logging\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsyscalls/\"\u003esyscalls\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsyscalls/#kernel-mode\"\u003ekernel mode\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003efiles handling\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsyscalls/#open\"\u003efile\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsyscalls/#file-descriptor\"\u003efile descriptor\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"multiprocessing\"\u003emultiprocessing\u003c/h3\u003e\n\u003cp\u003eHow are programs run, how to spawn subprograms, and how they work in general?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiprocessing/\"\u003emultiprocessing\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfork/\"\u003efork\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfork/#execvp\"\u003eexecvp\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfork/#waitpid\"\u003ewaitpid\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfork/#shell\"\u003eshell\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpipe/\"\u003epipe\u003c/a\u003e and ipc\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"multithreading\"\u003eMultithreading\u003c/h3\u003e\n\u003cp\u003eHow do we implement a single program within a single program, and how do we not have race conditions\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultithreading/\"\u003emultithreading\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultithreading/#id-cc41feaf-ce09-48ec-84d7-8f98d9ca20ba-process-es-vs-id-b4b86ccc-70f3-4d30-b437-2f5fff63b0e6-thread-s\"\u003eprocesses vs threads\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultithreading/#race-condition\"\u003erace condition\u003c/a\u003e and \u003ca href=\"/posts/kbhmultithreading/#mutex\"\u003emutex\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultithreading/#passing-by-reference\"\u003epassing by reference\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpermits_model/\"\u003epermits model\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpermits_model/\"\u003ebusy waiting\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpermits_model/#condition-variable\"\u003econdition variable\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhunique_lock/\"\u003eUnique Lock\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"trust\"\u003etrust\u003c/h4\u003e\n\u003cp\u003ehow do we \u003ca href=\"/posts/kbhprivacy/#trust\"\u003etrust\u003c/a\u003e software?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/#trust-by-assumption\"\u003etrust by assumption\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/#trust-by-inference\"\u003etrust by inference\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/#trust-by-substitution\"\u003etrust by substitution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"patterns\"\u003epatterns\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmonitor_pattern/\"\u003emonitor pattern\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"dispatches\"\u003edispatches\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhassembly/\"\u003eassembly\u003c/a\u003e review\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprocess_control_block/\"\u003eprocess control block\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdispatching/\"\u003edispatching\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdispatching/#trap\"\u003etrap\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdispatching/#interrupt\"\u003einterrupts\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdispatching/#context-switch\"\u003econtext switch\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhscheduling/\"\u003escheduling\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpreemption/\"\u003epreemption\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"virtual-memory\"\u003evirtual memory\u003c/h4\u003e\n\u003cp\u003e\u0026ldquo;how can one set of memory be shared across several processes\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvirtual_memory/\"\u003evirtual memory\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvirtual_memory/#dynamic-address-translation\"\u003edynamic address translation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdemand_paging/\"\u003edemand paging\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhclock_algorthium/\"\u003eclock algorithm\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"model-technologies\"\u003emodel technologies\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodern_os/\"\u003emodern OS\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"trust-and-os\"\u003etrust and OS\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/#trust\"\u003etrust\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eAn example of a good time:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emain\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// make pipe\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efds\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003epipe\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efds\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epidp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003efork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epidp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epidp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003edup2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epidp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSTDIN_FILENO\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epidp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003eexecvp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e...);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// throw-a-tantrum\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eexit\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epidp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epidp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhos_index/","tags":null,"title":"Operating Systems Index"},{"categories":null,"contents":" adding multiplying This is object dependent.\n","html":"\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadding/\"\u003eadding\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiplying/\"\u003emultiplying\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThis is \u003ca href=\"/posts/kbhobjects/\"\u003eobject\u003c/a\u003e dependent.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoperation/","tags":null,"title":"operation"},{"categories":null,"contents":"Richard Nixon bombs Vietnam for 13 days to beat the VietCong into submission after the Vietnam War.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e bombs \u003ca href=\"/posts/kbhvietnam/\"\u003eVietnam\u003c/a\u003e for 13 days to beat the VietCong into submission after the \u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoperation_linebacker/","tags":null,"title":"Operation Linebacker"},{"categories":null,"contents":"A Linear Map from a vector space to itself is called an operator.\n\\(\\mathcal{L}(V) = \\mathcal{L}(V,V)\\), which is the set of all operators on \\(V\\).\nconstituents a vector space \\(V\\) a Linear Map \\(T \\in \\mathcal{L}(V,V)\\) requirements \\(T\\) is, by the constraints above, an operator additional information injectivity is surjectivity in finite-dimensional operators Suppose \\(V\\) is finite-dimensional and \\(T \\in \\mathcal{L}(V)\\), then, the following statements are equivalent:\n\\(T\\) is invertable \\(T\\) is injective \\(T\\) is surjective THIS IS NOT TRUE IN infinite-demensional vector space OPERATORS! (for instance, backwards shift in \\(\\mathbb{F}^{\\infty}\\) is surjective but not injective.)\nProof:\nFrom the above, \\(1 \\implies 2\\) by definition of invertability.\nThen, we have that \\(T\\) is invertable. We desire that \\(T\\) is surjective. Given invertability, we have that \\(\\null T = \\{0\\}\\). By the rank-nullity theorem, we have that: \\(\\dim V = \\dim range\\ T + \\dim null\\ T = \\dim range\\ T +0= \\dim range\\ T\\). Now, given \\(T\\) is an operator, we have that \\(range\\ T \\subset V\\). Attempting to extend a basis of \\(range\\ T\\) (which, given it is a subspace of \\(V\\), is a linearly independent list in \\(V\\)) to a basis of \\(V\\) will be the trivial extension. So \\(range\\ T = V\\), which is also the codomain of \\(T\\). This makes \\(T\\) surjective, as desired. So \\(2 \\implies 3\\).\nNow, we have that \\(T\\) is surjective, we desire that \\(T\\) is invertable. We essentially reverse-engineer the step before. Given rank-nullity theorem, we have that: \\(\\dim V = \\dim range\\ T + \\dim null\\ T\\). Now, given \\(T\\) is surjective, \\(\\dim range\\ T = \\dim V\\). Therefore, we have that \\(\\dim V = \\dim V + \\dim null\\ T \\implies 0 = \\dim null\\ T\\). This makes the null space of \\(T\\) be \\(\\{0\\}\\). This makes \\(T\\) injective. Having shown \\(T\\) to be both surjective and injective, \\(T\\) is invertable, as desired. So \\(3 \\implies 1\\).\nHaving shown a loop in the statements, all of them are equivalent.\noperators on complex vector spaces have an eigenvalue See operators on complex vector spaces have an eigenvalue\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e from a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e to itself is called an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\(\\mathcal{L}(V) = \\mathcal{L}(V,V)\\), which is the set of all \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003es on \\(V\\).\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ea \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e \\(V\\)\u003c/li\u003e\n\u003cli\u003ea \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e \\(T \\in \\mathcal{L}(V,V)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(T\\) is, by the constraints above, an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"injectivity--kbhinjectivity-dot-md--is-surjectivity--kbhsurjectivity-dot-md--in-finite-dimensional--kbhfinite-dimensional-vector-space-dot-md--operator--kbhoperator-dot-md--s\"\u003e\u003ca href=\"/posts/kbhinjectivity/\"\u003einjectivity\u003c/a\u003e is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjectivity\u003c/a\u003e in \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003es\u003c/h3\u003e\n\u003cp\u003eSuppose \\(V\\) is \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e and \\(T \\in \\mathcal{L}(V)\\), then, the following statements are equivalent:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(T\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(T\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\u003cstrong\u003eTHIS IS NOT TRUE IN \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003einfinite-demensional vector space\u003c/a\u003e OPERATORS!\u003c/strong\u003e (for instance, backwards shift in \\(\\mathbb{F}^{\\infty}\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e but not \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e.)\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eFrom the above, \\(1 \\implies 2\\) by definition of \u003ca href=\"/posts/kbhinvertability/\"\u003einvertability\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThen, we have that \\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e. We desire that \\(T\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e. Given \u003ca href=\"/posts/kbhinvertability/\"\u003einvertability\u003c/a\u003e, we have that \\(\\null T = \\{0\\}\\). By the \u003ca href=\"/posts/kbhfundamental_theorem_of_linear_maps/\"\u003erank-nullity theorem\u003c/a\u003e, we have that: \\(\\dim V = \\dim range\\ T + \\dim null\\ T = \\dim range\\ T +0= \\dim range\\ T\\). Now, given \\(T\\) is an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e, we have that \\(range\\ T \\subset V\\). Attempting to extend a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(range\\ T\\) (which, given it is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of \\(V\\), is a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(V\\)) to a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\) will be the trivial extension. So \\(range\\ T = V\\), which is also the codomain of \\(T\\). This makes \\(T\\) \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e, as desired. So \\(2 \\implies 3\\).\u003c/p\u003e\n\u003cp\u003eNow, we have that \\(T\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e, we desire that \\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e. We essentially reverse-engineer the step before. Given \u003ca href=\"/posts/kbhfundamental_theorem_of_linear_maps/\"\u003erank-nullity theorem\u003c/a\u003e, we have that: \\(\\dim V = \\dim range\\ T + \\dim null\\ T\\). Now, given \\(T\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e, \\(\\dim range\\ T = \\dim V\\). Therefore, we have that \\(\\dim V = \\dim V + \\dim null\\ T \\implies 0 = \\dim null\\ T\\). This makes the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(T\\) be \\(\\{0\\}\\). This makes \\(T\\) \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e. Having shown \\(T\\) to be both \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e and \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e, \\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e, as desired. So \\(3 \\implies 1\\).\u003c/p\u003e\n\u003cp\u003eHaving shown a loop in the statements, all of them are equivalent.\u003c/p\u003e\n\u003ch3 id=\"operators-on-complex-vector-spaces-have-an-eigenvalue--kbhoperators-on-complex-vector-spaces-have-an-eigenvalue-dot-md\"\u003e\u003ca href=\"/posts/kbhoperators_on_complex_vector_spaces_have_an_eigenvalue/\"\u003eoperators on complex vector spaces have an eigenvalue\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhoperators_on_complex_vector_spaces_have_an_eigenvalue/\"\u003eoperators on complex vector spaces have an eigenvalue\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoperator/","tags":null,"title":"operator"},{"categories":null,"contents":"An opsin is a photo-receptor protein (sensitive to light) that is sensitive to light\n","html":"\u003cp\u003eAn \u003ca href=\"/posts/kbhopsins/\"\u003eopsin\u003c/a\u003e is a photo-receptor protein (sensitive to light) that is sensitive to light\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhopsins/","tags":null,"title":"opsin"},{"categories":null,"contents":"Suppose we have offline statistic regarding wins and losses of each slot machine as our state:\n\\begin{equation} w_1, l_{1}, \\dots, w_{n}, l_{n} \\end{equation}\nWhat if we want to create a policy that maximises exploration?\nWe construct a value function:\n\\begin{equation} U^{*}([w_1, l_{1}, \\dots, w_{n}, l_{n}]) = \\max_{a} Q^{*}([w_1, l_{1}, \\dots, w_{n}, l_{n}], a) \\end{equation}\nour policy is the greedy policy:\n\\begin{equation} U^{*}([w_1, l_{1}, \\dots, w_{n}, l_{n}]) = \\arg\\max_{a} Q^{*}([w_1, l_{1}, \\dots, w_{n}, l_{n}], a) \\end{equation}\nNow, how do we go about calculating the action-value:\n\\begin{align} Q ([w_1, l_{1}, \\dots, w_{n}, l_{n}], a) =\\ \u0026amp; \\frac{w_{a}+1}{w_{a}+l_{a}+2} (R(w) + U^{*}(\\dots, w_{a}+1, l_{a}, \\dots)) \\\u0026amp;+ \\qty(1-\\frac{w_{a}+1}{w_{a}+l_{a}+2})(R(l) + U^{*}(\\dots, w_{a}, l_{a}+1, \\dots)) \\end{align}\n\u0026ldquo;the probability of you win\u0026rdquo; (expectation of Beta Distribution), times the instantaneous reward you win + the utility you gain in terms of information of you doing that.\nTo solve this in a finite horizon, note that at time \\(t=k\\), your \\(U^{*}\\) is \\(0\\) because you have nothing to do anymore.\nThen, you can back up slowly to get each previous state:\ncalculate \\(Q[w_1-1, l_1, \u0026hellip;, 1]\\) calculate \\(Q[w_1, l_1-1, \u0026hellip;,1]\\) and so on, and choosing the utility of each state from there.\n","html":"\u003cp\u003eSuppose we have offline statistic regarding wins and losses of each slot machine as our state:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw_1, l_{1}, \\dots, w_{n}, l_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhat if we want to create a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e that maximises exploration?\u003c/p\u003e\n\u003cp\u003eWe construct a \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{*}([w_1, l_{1}, \\dots, w_{n}, l_{n}]) = \\max_{a} Q^{*}([w_1, l_{1}, \\dots, w_{n}, l_{n}], a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eour policy is the \u003ca href=\"/posts/kbhaction_value_function/#value-function-policy\"\u003egreedy policy\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{*}([w_1, l_{1}, \\dots, w_{n}, l_{n}]) = \\arg\\max_{a} Q^{*}([w_1, l_{1}, \\dots, w_{n}, l_{n}], a)\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eNow, how do we go about calculating the \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nQ ([w_1, l_{1}, \\dots, w_{n}, l_{n}], a) =\\ \u0026amp; \\frac{w_{a}+1}{w_{a}+l_{a}+2} (R(w) + U^{*}(\\dots, w_{a}+1, l_{a}, \\dots)) \\\u0026amp;+ \\qty(1-\\frac{w_{a}+1}{w_{a}+l_{a}+2})(R(l) + U^{*}(\\dots, w_{a}, l_{a}+1, \\dots))\n\\end{align}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the probability of you win\u0026rdquo; (expectation of \u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e), times the instantaneous reward you win + the utility you gain in terms of information of you doing that.\u003c/p\u003e\n\u003cp\u003eTo solve this in a finite horizon, note that at time \\(t=k\\), your \\(U^{*}\\) is \\(0\\) because you have nothing to do anymore.\u003c/p\u003e\n\u003cp\u003eThen, you can back up slowly to get each previous state:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ecalculate \\(Q[w_1-1, l_1, \u0026hellip;, 1]\\)\u003c/li\u003e\n\u003cli\u003ecalculate \\(Q[w_1, l_1-1, \u0026hellip;,1]\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eand so on, and choosing the utility of each state from there.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoptimal_exploration/","tags":null,"title":"Optimal Exploration Policy"},{"categories":null,"contents":" Shuffle cards Keep revealing cards \u0026ldquo;Stop\u0026rdquo; when there\u0026rsquo;s \u0026gt;50% chance the next card to be revealed is black We can Frequentist Definition of Probability calculate the probability of a given card remaining is black:\n\\begin{equation} pblack(b,r) = \\frac{26-b}{52-(r+b)} \\end{equation}\nnow:\n\\begin{equation} pwin(b,r) = \\begin{cases} 0, b+r = 52 \\\\ \\max \\qty[ \\begin{align}\u0026amp;pblack(p,r), \\\\ \u0026amp;pblack(b,r)pwin(b+1,r) + (1-pblack(b,r)pwin(b, r+1) \\end{align}] \\end{cases} \\end{equation}\n\u0026ldquo;with the theory of the Martingales, this comes out to be 50%\u0026rdquo;\n","html":"\u003col\u003e\n\u003cli\u003eShuffle cards\u003c/li\u003e\n\u003cli\u003eKeep revealing cards\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Stop\u0026rdquo; when there\u0026rsquo;s \u0026gt;50% chance the next card to be revealed is black\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWe can \u003ca href=\"/posts/kbhprobability/#frequentist-definition-of-probability\"\u003eFrequentist Definition of Probability\u003c/a\u003e calculate the probability of a given card remaining is black:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\npblack(b,r) = \\frac{26-b}{52-(r+b)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enow:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\npwin(b,r) = \\begin{cases}\n0, b+r = 52 \\\\\n\\max \\qty[ \\begin{align}\u0026amp;pblack(p,r), \\\\ \u0026amp;pblack(b,r)pwin(b+1,r) + (1-pblack(b,r)pwin(b, r+1) \\end{align}]\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;with the theory of the \u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale\u003c/a\u003es, this comes out to be 50%\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoptimal_stopping_problem/","tags":null,"title":"Optimal Stopping Problem"},{"categories":null,"contents":"optimization is a decision making method:\nidentify a performance measure and a space of possible strategies to try run a bunch of simulations given a particular strategy, and measuring the performance try strategies with the goal of maximizing the performance measured Importantly: model is not used to guide the search, it is only used to run simulations to evaluate performance.\nDisadvantage (or advantage) does not take a advantage of the structure of the problem\nOptimization Steps if you are doing something infrequently, make sure the simplest code If you are doing something very often, and/or on big inputs, make the primary algorithm big-o cost reasonable Make GCC Work! Optimize explicitly as last resort. Main Optimization Techniques constant folding sub-expression elimination dead code elimination \u0026ldquo;strength reduction\u0026rdquo; code motion tail recursion loop unrolling constant folding There are many constants which happens during code writing. Therefore, for functions that operate on constant values, they will be folded in and the math done ahead-of-time during compilation.\ncommon sub-instruction elimination If you have the same sub-expression over and over again, we compute it ahead of time and use that result in multiple places.\ndead code elimination Code which doesn\u0026rsquo;t do anything of interest. This maybe subtle:\nif (param == 0) { return 0; } else { return param; } this is (mostly) dead code. It all return 0.\nstrength reduction Multiply can be rounded to a bit shift, and mod can be changed to an AND operation.\n7 * a == 8*a - a vs So you can left shift and then subtract, which is dramatically easier.\nWe can even do this with:\nb / 3 which can be converted to\n(b*3) / 10 which is much easier because its a multiplication\ncode motion if there is a common sub-exppression, it can be pull out of loops\nfor (size_t i = 0; i \u0026lt; strlen(s); i++) { arr[i] = s[i]; } the strlen call can be and will be pulled out.\ntail recursion turn a recursive call into a while loop to save stack frame management time\nloop unrolling A loop can be \u0026ldquo;factored out\u0026rdquo;:\nfor (int i=0; i\u0026lt;=n; i++) { sum += arr[i]; } can turn into\nfor (int i=0; i\u0026lt;=n-4; i+=4) { sum += arr[i]; sum += arr[i+1]; sum += arr[i+2]; sum += arr[i+3]; } // handle ending cases Why don\u0026rsquo;t we unroll all the way? We don\u0026rsquo;t know what \\(n\\) is.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhoptimization/\"\u003eoptimization\u003c/a\u003e is a \u003ca href=\"/posts/kbhdecision_making/#id-9075c44f-4b26-4f5a-9236-bc7a5c10f4ee-decision-making-methods\"\u003edecision making method\u003c/a\u003e:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eidentify a performance measure and a space of possible strategies to try\u003c/li\u003e\n\u003cli\u003erun a bunch of simulations given a particular strategy, and measuring the performance\u003c/li\u003e\n\u003cli\u003etry strategies with the goal of maximizing the performance measured\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eImportantly: model is not used to guide the search, it is only used to run simulations to evaluate performance.\u003c/p\u003e\n\u003ch2 id=\"disadvantage--or-advantage\"\u003eDisadvantage (or advantage)\u003c/h2\u003e\n\u003cp\u003edoes \u003cstrong\u003enot\u003c/strong\u003e take a advantage of the structure of the problem\u003c/p\u003e\n\u003ch2 id=\"optimization-steps\"\u003eOptimization Steps\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eif you are doing something infrequently, make sure the simplest code\u003c/li\u003e\n\u003cli\u003eIf you are doing something very often, and/or on big inputs, make the primary algorithm big-o cost reasonable\u003c/li\u003e\n\u003cli\u003eMake GCC Work!\u003c/li\u003e\n\u003cli\u003eOptimize explicitly as last resort.\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"main-optimization-techniques\"\u003eMain Optimization Techniques\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003econstant folding\u003c/li\u003e\n\u003cli\u003esub-expression elimination\u003c/li\u003e\n\u003cli\u003edead code elimination\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;strength reduction\u0026rdquo;\u003c/li\u003e\n\u003cli\u003ecode motion\u003c/li\u003e\n\u003cli\u003etail recursion\u003c/li\u003e\n\u003cli\u003eloop unrolling\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"constant-folding\"\u003econstant folding\u003c/h3\u003e\n\u003cp\u003eThere are many constants which happens during code writing. Therefore, for functions that operate on constant values, they will be folded in and the math done ahead-of-time during compilation.\u003c/p\u003e\n\u003ch3 id=\"common-sub-instruction-elimination\"\u003ecommon sub-instruction elimination\u003c/h3\u003e\n\u003cp\u003eIf you have the same sub-expression over and over again, we compute it ahead of time and use that result in multiple places.\u003c/p\u003e\n\u003ch3 id=\"dead-code-elimination\"\u003edead code elimination\u003c/h3\u003e\n\u003cp\u003eCode which doesn\u0026rsquo;t do anything of interest. This maybe subtle:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eparam\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eparam\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ethis is (mostly) dead code. It all return \u003ccode\u003e0\u003c/code\u003e.\u003c/p\u003e\n\u003ch3 id=\"strength-reduction\"\u003estrength reduction\u003c/h3\u003e\n\u003cp\u003eMultiply can be rounded to a bit shift, and mod can be changed to an AND operation.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#ae81ff\"\u003e7\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e8\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003evs\nSo you can left shift and then subtract, which is dramatically easier.\u003c/p\u003e\n\u003cp\u003eWe can even do this with:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ewhich can be converted to\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e10\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ewhich is much easier because its a multiplication\u003c/p\u003e\n\u003ch3 id=\"code-motion\"\u003ecode motion\u003c/h3\u003e\n\u003cp\u003eif there is a common sub-exppression, it can be pull out of loops\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003estrlen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e++\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ethe \u003ccode\u003estrlen\u003c/code\u003e call can be and will be pulled out.\u003c/p\u003e\n\u003ch3 id=\"tail-recursion\"\u003etail recursion\u003c/h3\u003e\n\u003cp\u003eturn a recursive call into a while loop to save stack frame management time\u003c/p\u003e\n\u003ch3 id=\"loop-unrolling\"\u003eloop unrolling\u003c/h3\u003e\n\u003cp\u003eA loop can be \u0026ldquo;factored out\u0026rdquo;:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e++\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ecan turn into\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003en\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e4\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esum\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// handle ending cases\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWhy don\u0026rsquo;t we unroll all the way? We don\u0026rsquo;t know what \\(n\\) is.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoptimization/","tags":null,"title":"optimization"},{"categories":null,"contents":"In the event your domain knowledge can help you make decisions about how spark load-balances or stripes data across worker nodes.\nPersistence \u0026ldquo;you should store this data in faster/slower memory\u0026rdquo;\nMEMORY_ONLY, MEMORY_ONLY_SER, MEMORY_AND_DISK, MEMORY_AND_DISK_SER, DISK_ONLY\nrdd.persist(StorageLevel.MEMORY_AND_DISK) # ... do work ... rdd.unpersist() Parallel Programming ","html":"\u003cp\u003eIn the event your domain knowledge can help you make decisions about how spark load-balances or stripes data across worker nodes.\u003c/p\u003e\n\u003ch2 id=\"persistence\"\u003ePersistence\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;you should store this data in faster/slower memory\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eMEMORY_ONLY, MEMORY_ONLY_SER, MEMORY_AND_DISK, MEMORY_AND_DISK_SER, DISK_ONLY\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erdd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epersist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eStorageLevel\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eMEMORY_AND_DISK\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# ... do work ...\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erdd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eunpersist\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"parallel-programming\"\u003eParallel Programming\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-07-31_14-30-50_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhoptimizing_spark/","tags":null,"title":"Optimizing Spark"},{"categories":null,"contents":"options are derivatives which gives you the permission to make a transaction at a particular date.\nThere are two main types of options:\ncall: gives permission to buy a security on or before the \u0026ldquo;exercise\u0026rdquo; date puts: gives permission to sell a security on or before the \u0026ldquo;exercise\u0026rdquo; date For this article, we will define \\(S_{t}\\) to be the stock price at the time \\(t\\), \\(K\\) as the option\u0026rsquo;s strike price, \\(C_{t}\\) to be the price of the \u0026ldquo;call\u0026rdquo; option, and \\(P_{t}\\) to be the price of the \u0026ldquo;put\u0026rdquo; option at strike price \\(K\\); lastly \\(T\\) we define as the maturity date.\nNaturally, the actual values \\(C_{t}\\) and \\(P_{t}\\) are:\n\\begin{equation} \\begin{cases} C_{t} = Max[0, S_{T}-K] \\\\ P_{t} = Max[0, K-S_{T}] \\\\ \\end{cases} \\end{equation}\nyou either make no money from the option (market price is more optimal), or make some difference between the strike price and the market price.\nThe nice thing here is that little \\(Max\\) term. An option, unlike a futures contract, has no buying obligation: you don\u0026rsquo;t have to exercise it. The payoff is always non-negative!\nNOTE!!! \\(C_{t}\\) at SMALL \\(t\\) is measured at \\(Max[0,S_{*T*}, K}]\\), using \\(S\\) of LARGE \\(T\\). This is because, even when\u0026mdash;currently\u0026mdash;the stock is trading at $60, the right to buy the stock in \\(T\\) months for $70 is not worthless as the price may go up.\nTo analyze options, we usually use the Black-Scholes Formula.\nAmerican vs European Options American options are excercisable at or before the maturity date. European options are exrcercisable only at the maturity date. Analyze Options as Insurance All insurance contracts are actually a form of an option, so why don\u0026rsquo;t we analyze it as such?\nA put option\u0026mdash;-\nAsset insured: stock Current asset value: \\(S_{0}\\) Term of policy: \\(T\\) Maximum coverage: \\(K\\) Deductible: \\(S_0-K\\) Insurance premium: \\(P_{t}\\) A call option is covariant with a put option; so its isomorphic, and so we will deal with it later.\nA few differences:\nAmerican-style early exercise: (you can\u0026rsquo;t, for normal insurance, exercise it without something happening) Marketability: you can\u0026rsquo;t give normal insurance to other people Dividends: holding a stock pays dividends (an option\u0026rsquo;s value goes down as dividends) ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhoptions/\"\u003eoptions\u003c/a\u003e are \u003ca href=\"/posts/kbhderivatives/\"\u003ederivatives\u003c/a\u003e which gives you the \u003cem\u003epermission\u003c/em\u003e to make a transaction at a particular date.\u003c/p\u003e\n\u003cp\u003eThere are two main types of \u003ca href=\"/posts/kbhoptions/\"\u003eoptions\u003c/a\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ecall: gives permission to \u003cstrong\u003ebuy\u003c/strong\u003e a security on or before the \u0026ldquo;exercise\u0026rdquo; date\u003c/li\u003e\n\u003cli\u003eputs: gives permission to \u003cstrong\u003esell\u003c/strong\u003e a security on or before the \u0026ldquo;exercise\u0026rdquo; date\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eFor this article, we will define \\(S_{t}\\) to be the stock price at the time \\(t\\), \\(K\\) as the option\u0026rsquo;s strike price, \\(C_{t}\\) to be the price of the \u0026ldquo;call\u0026rdquo; option, and \\(P_{t}\\) to be the price of the \u0026ldquo;put\u0026rdquo; option at strike price \\(K\\); lastly \\(T\\) we define as the maturity date.\u003c/p\u003e\n\u003cp\u003eNaturally, the actual values \\(C_{t}\\) and \\(P_{t}\\) are:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nC_{t} = Max[0, S_{T}-K] \\\\\nP_{t} = Max[0, K-S_{T}] \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou either make no money from the \u003ca href=\"/posts/kbhoptions/\"\u003eoption\u003c/a\u003e (market price is more optimal), or make some difference between the strike price and the market price.\u003c/p\u003e\n\u003cp\u003eThe nice thing here is that little \\(Max\\) term. An \u003ca href=\"/posts/kbhoptions/\"\u003eoption\u003c/a\u003e, unlike a futures contract, has no buying obligation: you don\u0026rsquo;t have to exercise it. The payoff is always non-negative!\u003c/p\u003e\n\u003cp\u003eNOTE!!! \\(C_{t}\\) at SMALL \\(t\\) is measured at \\(Max[0,S_{*T*}, K}]\\), using \\(S\\) of LARGE \\(T\\). This is because, even when\u0026mdash;currently\u0026mdash;the stock is trading at $60, the right to buy the stock in \\(T\\) months for $70 is not worthless as the price may go up.\u003c/p\u003e\n\u003cp\u003eTo analyze options, we usually use the \u003ca href=\"/posts/kbhblack_scholes_formula/\"\u003eBlack-Scholes Formula\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"american-vs-european-options\"\u003eAmerican vs European Options\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eAmerican options are excercisable at or before the maturity date.\u003c/li\u003e\n\u003cli\u003eEuropean options are exrcercisable only at the maturity date.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"analyze-options-as-insurance\"\u003eAnalyze Options as Insurance\u003c/h2\u003e\n\u003cp\u003eAll insurance contracts are actually a form of an \u003ca href=\"/posts/kbhoptions/\"\u003eoption\u003c/a\u003e, so why don\u0026rsquo;t we analyze it as such?\u003c/p\u003e\n\u003cp\u003eA put option\u0026mdash;-\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eAsset insured: stock\u003c/li\u003e\n\u003cli\u003eCurrent asset value: \\(S_{0}\\)\u003c/li\u003e\n\u003cli\u003eTerm of policy: \\(T\\)\u003c/li\u003e\n\u003cli\u003eMaximum coverage: \\(K\\)\u003c/li\u003e\n\u003cli\u003eDeductible: \\(S_0-K\\)\u003c/li\u003e\n\u003cli\u003eInsurance premium: \\(P_{t}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eA call option is covariant with a put \u003ca href=\"/posts/kbhoptions/\"\u003eoption\u003c/a\u003e; so its isomorphic, and so we will deal with it later.\u003c/p\u003e\n\u003cp\u003eA few differences:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eAmerican-style early exercise: (you can\u0026rsquo;t, for normal insurance, exercise it without something happening)\u003c/li\u003e\n\u003cli\u003eMarketability: you can\u0026rsquo;t give normal insurance to other people\u003c/li\u003e\n\u003cli\u003eDividends: holding a stock pays dividends (an option\u0026rsquo;s value goes down as dividends)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoptions/","tags":null,"title":"option"},{"categories":null,"contents":"an Option (MDP) represents a high level collection of actions. Big Picture: abstract away your big policy into \\(n\\) small policies, and value-iterate over expected values of the big policies.\nMarkov Option A Markov Option is given by a triple \\((I, \\pi, \\beta)\\)\n\\(I \\subset S\\), the states from which the option maybe started \\(S \\times A\\), the MDP during that option \\(\\beta(s)\\), the probability of the option terminating at state \\(s\\) one-step options You can develop one-shot options, which terminates immediate after one action with underlying probability\n\\(I = \\{s:a \\in A_{s}\\}\\) \\(\\pi(s,a) = 1\\) \\(\\beta(s) = 1\\) option value fuction \\begin{equation} Q^{\\mu}(s,o) = \\mathbb{E}\\qty[r_{t} + \\gamma r_{t+1} + \\dots] \\end{equation}\nwhere \\(\\mu\\) is some option selection process\nsemi-markov decision process a semi-markov decision process is a system over a bunch of options, with time being a factor in option transitions, but the underlying policies still being MDPs.\n\\begin{equation} T(s\u0026rsquo;, \\tau | s,o) \\end{equation}\nwhere \\(\\tau\\) is time elapsed.\nbecause option-level termination induces jumps between large scale states, one backup can propagate to a lot of states.\nintra option q-learning \\begin{equation} Q_{k+1} (s_{i},o) = (1-\\alpha_{k})Q_{k}(S_{t}, o) + \\alpha_{k} \\qty(r_{t+1} + \\gamma U_{k}(s_{t+1}, o)) \\end{equation}\nwhere:\n\\begin{equation} U_{k}(s,o) = (1-\\beta(s))Q_{k}(s,o) + \\beta(s) \\max_{o \\in O} Q_{k}(s,o\u0026rsquo;) \\end{equation}\n","html":"\u003cp\u003ean \u003ca href=\"/posts/kbhoption/\"\u003eOption (MDP)\u003c/a\u003e represents a high level collection of actions. Big Picture: abstract away your big policy into \\(n\\) small policies, and value-iterate over expected values of the big policies.\u003c/p\u003e\n\u003ch2 id=\"markov-option\"\u003eMarkov Option\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#markov-option\"\u003eMarkov Option\u003c/a\u003e is given by a triple \\((I, \\pi, \\beta)\\)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(I \\subset S\\), the states from which the option maybe started\u003c/li\u003e\n\u003cli\u003e\\(S \\times A\\), the MDP during that option\u003c/li\u003e\n\u003cli\u003e\\(\\beta(s)\\), the probability of the option terminating at state \\(s\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"one-step-options\"\u003eone-step options\u003c/h3\u003e\n\u003cp\u003eYou can develop one-shot options, which terminates immediate after one action with underlying probability\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(I = \\{s:a \\in A_{s}\\}\\)\u003c/li\u003e\n\u003cli\u003e\\(\\pi(s,a) = 1\\)\u003c/li\u003e\n\u003cli\u003e\\(\\beta(s) = 1\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"option-value-fuction\"\u003eoption value fuction\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nQ^{\\mu}(s,o) = \\mathbb{E}\\qty[r_{t} + \\gamma r_{t+1} + \\dots]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\mu\\) is some option selection process\u003c/p\u003e\n\u003ch3 id=\"semi-markov-decision-process\"\u003esemi-markov decision process\u003c/h3\u003e\n\u003cp\u003ea \u003ca href=\"#semi-markov-decision-process\"\u003esemi-markov decision process\u003c/a\u003e is a system over a bunch of \u003ca href=\"/posts/kbhoptions/\"\u003eoption\u003c/a\u003es, with time being a factor in option transitions, but the underlying policies still being \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(s\u0026rsquo;, \\tau | s,o)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\tau\\) is time elapsed.\u003c/p\u003e\n\u003cp\u003ebecause option-level termination induces jumps between large scale states, one backup can propagate to a lot of states.\u003c/p\u003e\n\u003ch3 id=\"intra-option-q-learning\"\u003eintra option q-learning\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nQ_{k+1} (s_{i},o) = (1-\\alpha_{k})Q_{k}(S_{t}, o) + \\alpha_{k} \\qty(r_{t+1} + \\gamma U_{k}(s_{t+1}, o))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_{k}(s,o) = (1-\\beta(s))Q_{k}(s,o) + \\beta(s) \\max_{o \\in O} Q_{k}(s,o\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoption/","tags":null,"title":"Option (MDP)"},{"categories":null,"contents":"Optogenetics are a process of neurology circuit investigations:\nevery neuron which expresses as specific change becomes sensitive to light therefore, you can shine a light on the mouse\u0026rsquo;s brain to control it This uses a set of molecules named opsins.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhoptogenetics/\"\u003eOptogenetics\u003c/a\u003e are a process of neurology circuit investigations:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eevery neuron which expresses as specific change becomes sensitive to light\u003c/li\u003e\n\u003cli\u003etherefore, you can shine a light on the mouse\u0026rsquo;s brain to control it\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis uses a set of molecules named \u003ca href=\"/posts/kbhopsins/\"\u003eopsins\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoptogenetics/","tags":null,"title":"Optogenetics"},{"categories":null,"contents":"oral lexical retrival is a class of discourse tasks which asks the subject to convert some semantic understanding (\u0026ldquo;concept\u0026rdquo;) into lexical expressions (\u0026ldquo;words\u0026rdquo;)\n\u0026ldquo;ask a patient to describe a thing.\u0026rdquo;\nExamples of oral lexical retrieval:\nSVF BNT Source: CambridgeCore\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhoral_lexical_retrival/\"\u003eoral lexical retrival\u003c/a\u003e is a class of \u003ca href=\"/posts/kbhdiscourse_completion_task/\"\u003ediscourse tasks\u003c/a\u003e which asks the subject to convert some semantic understanding (\u0026ldquo;concept\u0026rdquo;) into lexical expressions (\u0026ldquo;words\u0026rdquo;)\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;ask a patient to describe a thing.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eExamples of \u003ca href=\"/posts/kbhoral_lexical_retrival/\"\u003eoral lexical retrieval\u003c/a\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsemantic_verbal_fluency/\"\u003eSVF\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhboston_naming_test/\"\u003eBNT\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSource: \u003ca href=\"https://www.cambridge.org/core/books/abs/cambridge-handbook-of-biolinguistics/lexical-retrieval-and-its-breakdown-in-aphasia-and-developmental-language-impairment/74D1249BE4923384AF56C2572187E6BF\"\u003eCambridgeCore\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoral_lexical_retrival/","tags":null,"title":"oral lexical retrieval"},{"categories":null,"contents":"ODEs are Differential Equations in one independent variable: \\(y(x)\\).\nMain Content:\nFirst-Order Differential Equations Second-Order Linear Differential Equations Uniqueness and Existance Overarching Categories order of equations the order of an equation is the highest derivative of an equation\nlinear vs. non-linear differential equations A solution of a differential equation is linear when solutions are closed under linear operations.\nWe can spot an ODE by seeing that each of its derivatives are seperated or in separable terms, and only to the first power\u0026mdash;because that ends up being a linear equation (i.e. any two solutions satisfying the equation can add and scale to another solution).\nThe RHS doesn\u0026rsquo;t matter. For instance:\n\\begin{equation} xy\u0026rsquo;\u0026rsquo; + e^{x}y\u0026rsquo; + (x^{2}-3)y = x^{2}-x \\end{equation}\nis linear.\nsuperposition principle any linear combination of a homogeneous linear ODE is also a solution to the ODE.\nfunctional linear independence\nRecall linear independence. If we have two solutions \\(y_1\\), \\(y_2\\), are linearly independent or \u0026ldquo;independent\u0026rdquo;, if\n\\begin{equation} c_1 y_1(t) + c_2y_2(t) = 0 \\end{equation}\nimplies \\(c_1 = c_2 = 0\\).\nhomogeneous vs. inhomogeneous equations whether or not, isolating all the DEPENDENT variables to the left side, is the right side zero?\nlinear systems systems of ODEs are groups of ODEs. Linear systems can obtain you a vector-value function:\n\\begin{equation} y\u0026rsquo;(x) = \\mqty(3 \u0026amp; -2 \\\\ -1 \u0026amp; 5) \\vec{y} \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhordinary_differential_equations/\"\u003eODE\u003c/a\u003es are \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equations\u003c/a\u003e in one independent variable: \\(y(x)\\).\u003c/p\u003e\n\u003cp\u003eMain Content:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_non_seperable_equation/#solving-differential-equations\"\u003eFirst-Order Differential Equations\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/\"\u003eSecond-Order Linear Differential Equations\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhuniqueness_and_existance/\"\u003eUniqueness and Existance\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"overarching-categories\"\u003eOverarching Categories\u003c/h2\u003e\n\u003ch3 id=\"order-of-equations\"\u003eorder of equations\u003c/h3\u003e\n\u003cp\u003ethe \u003ca href=\"#order-of-equations\"\u003eorder\u003c/a\u003e of an equation is the highest derivative of an equation\u003c/p\u003e\n\u003ch3 id=\"linear-vs-dot-non-linear-differential-equations\"\u003elinear vs. non-linear differential equations\u003c/h3\u003e\n\u003cp\u003eA solution of a differential equation is \u003cstrong\u003elinear\u003c/strong\u003e when solutions are \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e under linear operations.\u003c/p\u003e\n\u003cp\u003eWe can spot an ODE by seeing that each of its derivatives are seperated or in separable terms, and only to the first power\u0026mdash;because that ends up being a linear equation (i.e. any two solutions satisfying the equation can add and scale to another solution).\u003c/p\u003e\n\u003cp\u003eThe RHS doesn\u0026rsquo;t matter. For instance:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nxy\u0026rsquo;\u0026rsquo; + e^{x}y\u0026rsquo; + (x^{2}-3)y = x^{2}-x\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis linear.\u003c/p\u003e\n\u003ch4 id=\"superposition-principle\"\u003esuperposition principle\u003c/h4\u003e\n\u003cp\u003eany linear combination of a \u003cem\u003ehomogeneous linear\u003c/em\u003e ODE is also a solution to the ODE.\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003efunctional linear independence\u003c/p\u003e\n\u003cp\u003eRecall linear independence. If we have two solutions \\(y_1\\), \\(y_2\\), are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e or \u0026ldquo;\u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e\u0026rdquo;, if\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_1 y_1(t) + c_2y_2(t) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eimplies \\(c_1 = c_2 = 0\\).\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"homogeneous-vs-dot-inhomogeneous-equations\"\u003ehomogeneous vs. inhomogeneous equations\u003c/h3\u003e\n\u003cp\u003ewhether or not, isolating all the DEPENDENT variables to the left side, is the right side zero?\u003c/p\u003e\n\u003ch3 id=\"linear-systems\"\u003elinear systems\u003c/h3\u003e\n\u003cp\u003esystems of ODEs are groups of ODEs. Linear systems can obtain you a vector-value function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;(x) = \\mqty(3 \u0026amp; -2 \\\\ -1 \u0026amp; 5) \\vec{y}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhordinary_differential_equations/","tags":null,"title":"Ordinary Differential Equation"},{"categories":null,"contents":"Reading notes conservatives in America make less sense because America is supposed to be liberal/new For most Europeans who came to America, the whole purpose of their difficult and dis- ruptive journey to the New World was not to conserve European institutions but to leave them behind and to create something new, often an entirely new life\nThree splits of conservatism in America those who are most concerned about economic or fiscal issues, that is, pro-business or “free-enterprise” conservatives those most concerned with religious or social issues, that is, pro-church or “traditional-values” conservatives those most concerned with national-security or defense issues, that is, pro-military or “patriotic” conservatives Ronald Reagan unified the three conservatism It was the achievement of Ronald Reagan that he was able in the late 1970s to unite these three different kinds of conservatism into one grand coalition.\nThree-in-one conservatism is a part of American \u0026ldquo;fusionist strategy\u0026rdquo; This was the culmination of a “fusionist strategy” that had been developing amongst American conservatives since the early 1960s.\nBusiness and social conservatism should contradict each other, though However, as we shall see, pro-business conservatism has always included a tendency toward the disruption and even dissolution of religious ideals and social practices.\nExtreme pro-business should also include globalization and erasure of national identities And in recent decades, pro-business conservatism has also included a tendency toward the dismantling of national boundaries and even dissolution of national identities\n\u0026ldquo;conservatism\u0026rdquo; actually conserved American revolutionary force economically this means that the conservative party in America has always sought to conserve a revolutionary force.\nExtreme economic \u0026ldquo;conservatism\u0026rdquo; should destry social and moral arrangements It destroys religious, social, and ultimately moral arrangements as well.\nReligions conservatism founds on the \u0026ldquo;open-market\u0026rdquo; of protestanism This open market in religious matters, so nicely isomorphic with the open market in economic matters, was a powerful factor gen- erating both a reality and an ideology of free choice in the United States.\nBecause the \u0026ldquo;new\u0026rdquo; became new protestanism, religious conservatism is the re-take over of traditional religions Since these churches were continually being left behind, religious conservatism was associated with once-dominant churches that were now dwindling into a minority, and would later dwindle into marginality\nBecause of mass economic benifit, religous conservatism became subordinated to economic conservatism Even ordinary middle-class Protestants benefited from cheaper labor, in the form of domestic servants. And of course it was the businessmen and middle-class Protestants who controlled the political parties, particularly that party which was supposed to be the more conservative one\nBecause there is nothing to conserve about current system, the thing that\u0026rsquo;s conserved is free choice If something were going to be conserved, it would normally be the no-conscription and low-taxation (and free-choice) system.\nEconomic systems propergated the source of American patriotism This meant that people who thought of themselves as American patriots or nationalists, and who sought to conserve the American nation and to promote American national interests\nAmerican conservatism is actually a form of European liberalism we have seen that, from a European perspective, American conservatism was not conservative at all, but actually was a kind of classical lib- eralism.\nwartime strengthened American values and liberalism Moreover, the wartime experience seemed decisively to vindicate and even enhance the strengths of both the traditional American economic system and traditional American moral principles.\n","html":"\u003ch2 id=\"reading-notes\"\u003eReading notes\u003c/h2\u003e\n\u003ch3 id=\"conservatives-in-america-make-less-sense-because-america-is-supposed-to-be-liberal-new\"\u003econservatives in America make less sense because America is supposed to be liberal/new\u003c/h3\u003e\n\u003cp\u003eFor most Europeans who came to America, the whole purpose of their difficult and dis- ruptive journey to the New World was not to conserve European institutions but to leave them behind and to create something new, often an entirely new life\u003c/p\u003e\n\u003ch3 id=\"three-splits-of-conservatism-in-america\"\u003eThree splits of conservatism in America\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003ethose who are most concerned about economic or fiscal issues, that is, pro-business or “free-enterprise” conservatives\u003c/li\u003e\n\u003cli\u003ethose most concerned with religious or social issues, that is, pro-church or “traditional-values” conservatives\u003c/li\u003e\n\u003cli\u003ethose most concerned with national-security or defense issues, that is, pro-military or “patriotic” conservatives\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"ronald-reagan-unified-the-three-conservatism\"\u003eRonald Reagan unified the three conservatism\u003c/h3\u003e\n\u003cp\u003eIt was the achievement of Ronald Reagan that he was able in the late 1970s to unite these three different kinds of conservatism into one grand coalition.\u003c/p\u003e\n\u003ch3 id=\"three-in-one-conservatism-is-a-part-of-american-fusionist-strategy\"\u003eThree-in-one conservatism is a part of American \u0026ldquo;fusionist strategy\u0026rdquo;\u003c/h3\u003e\n\u003cp\u003eThis was the culmination of a “fusionist strategy” that had been developing amongst American conservatives since the early 1960s.\u003c/p\u003e\n\u003ch3 id=\"business-and-social-conservatism-should-contradict-each-other-though\"\u003eBusiness and social conservatism should contradict each other, though\u003c/h3\u003e\n\u003cp\u003eHowever, as we shall see, pro-business conservatism has always included a tendency toward the disruption and even dissolution of religious ideals and social practices.\u003c/p\u003e\n\u003ch3 id=\"extreme-pro-business-should-also-include-globalization-and-erasure-of-national-identities\"\u003eExtreme pro-business should also include globalization and erasure of national identities\u003c/h3\u003e\n\u003cp\u003eAnd in recent decades, pro-business conservatism has also included a tendency toward the dismantling of national boundaries and even dissolution of national identities\u003c/p\u003e\n\u003ch3 id=\"conservatism-actually-conserved-american-revolutionary-force-economically\"\u003e\u0026ldquo;conservatism\u0026rdquo; actually conserved American revolutionary force economically\u003c/h3\u003e\n\u003cp\u003ethis means that the conservative party in America has always sought to conserve a revolutionary force.\u003c/p\u003e\n\u003ch3 id=\"extreme-economic-conservatism-should-destry-social-and-moral-arrangements\"\u003eExtreme economic \u0026ldquo;conservatism\u0026rdquo; should destry social and moral arrangements\u003c/h3\u003e\n\u003cp\u003eIt destroys religious, social, and ultimately moral arrangements as well.\u003c/p\u003e\n\u003ch3 id=\"religions-conservatism-founds-on-the-open-market-of-protestanism\"\u003eReligions conservatism founds on the \u0026ldquo;open-market\u0026rdquo; of protestanism\u003c/h3\u003e\n\u003cp\u003eThis open market in religious matters, so nicely isomorphic with the open market in economic matters, was a powerful factor gen- erating both a reality and an ideology of free choice in the United States.\u003c/p\u003e\n\u003ch3 id=\"because-the-new-became-new-protestanism-religious-conservatism-is-the-re-take-over-of-traditional-religions\"\u003eBecause the \u0026ldquo;new\u0026rdquo; became new protestanism, religious conservatism is the re-take over of traditional religions\u003c/h3\u003e\n\u003cp\u003eSince these churches were continually being left behind, religious conservatism was associated with once-dominant churches that were now dwindling into a minority, and would later dwindle into marginality\u003c/p\u003e\n\u003ch3 id=\"because-of-mass-economic-benifit-religous-conservatism-became-subordinated-to-economic-conservatism\"\u003eBecause of mass economic benifit, religous conservatism became subordinated to economic conservatism\u003c/h3\u003e\n\u003cp\u003eEven ordinary middle-class Protestants benefited from cheaper labor, in the form of domestic servants. And of course it was the businessmen and middle-class Protestants who controlled the political parties, particularly that party which was supposed to be the more conservative one\u003c/p\u003e\n\u003ch3 id=\"because-there-is-nothing-to-conserve-about-current-system-the-thing-that-s-conserved-is-free-choice\"\u003eBecause there is nothing to conserve about current system, the thing that\u0026rsquo;s conserved is free choice\u003c/h3\u003e\n\u003cp\u003eIf something were going to be conserved, it would normally be the no-conscription and low-taxation (and free-choice) system.\u003c/p\u003e\n\u003ch3 id=\"economic-systems-propergated-the-source-of-american-patriotism\"\u003eEconomic systems propergated the source of American patriotism\u003c/h3\u003e\n\u003cp\u003eThis meant that people who thought of themselves as American patriots or nationalists, and who sought to conserve the American nation and to promote American national interests\u003c/p\u003e\n\u003ch3 id=\"american-conservatism-is-actually-a-form-of-european-liberalism\"\u003eAmerican conservatism is actually a form of European liberalism\u003c/h3\u003e\n\u003cp\u003ewe have seen that, from a European perspective, American conservatism was not conservative at all, but actually was a kind of classical lib- eralism.\u003c/p\u003e\n\u003ch3 id=\"wartime-strengthened-american-values-and-liberalism\"\u003ewartime strengthened American values and liberalism\u003c/h3\u003e\n\u003cp\u003eMoreover, the wartime experience seemed decisively to vindicate and even enhance the strengths of both the traditional American economic system and traditional American moral principles.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrise_of_american_conservatism/","tags":null,"title":"Origins of American Conservatism"},{"categories":null,"contents":"Two vectors are considered orthogonal if \\(\\langle u,v \\rangle = 0\\), that is, their inner product is \\(0\\).\nSee also orthogonality test.\northogonality and \\(0\\) \\(0\\) is orthogonal to every vector in \\(v\\) because \\(\\langle 0,v \\rangle=0\\) for every \\(v\\) because of the properties of inner product \\(0\\) is the only vector orthogonal to itself as, by inner product definiteness, \\(\\langle v,v \\rangle=0\\) implies \\(v=0\\). ","html":"\u003cp\u003eTwo \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es are considered \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e if \\(\\langle u,v \\rangle = 0\\), that is, their \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e is \\(0\\).\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhdot_product/#orthogonality-test\"\u003eorthogonality test\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"orthogonality-and-0\"\u003eorthogonality and \\(0\\)\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(0\\) is orthogonal to every vector in \\(v\\) because \\(\\langle 0,v \\rangle=0\\) for every \\(v\\) because of the \u003ca href=\"/posts/kbhinner_product/#properties-of-inner-product\"\u003eproperties of inner product\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(0\\) is the only vector orthogonal to itself as, by \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003e definiteness, \\(\\langle v,v \\rangle=0\\) implies \\(v=0\\).\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhorthogonal/","tags":null,"title":"orthogonal"},{"categories":null,"contents":"A list of vectors is orthonormal if each vector is orthogonal to every other vector, and they all have norm 1.\nIn other words:\n\\begin{equation} \\langle e_{j}, e_{k} \\rangle = \\begin{cases} 1, j = k\\\\ 0, j \\neq k \\end{cases} \\end{equation}\nThe vectors should inner-product with itself to \\(1\\), and be orthogonal to all others.\nAdditional Information orthonormal basis See also orthonormal basis\nNorm of an Orthogonal Linear Combination \\begin{equation} \\| a_1e_1 + \\dots + a_{m}e_{m} \\|^{2} = |a_1|^{2} + \\dots + |a_{m}|^{2} \\end{equation}\nWhen \\(e_1, \\dots e_{m}\\) are orthonormal vectors in \\(V\\) and \\(a_1, \\dots a_{m} \\in \\mathbb{F}\\).\nProof:\nRecall two facts: \\(e_{j}\\) are orthonormal vectors, so they are 1) orthogonal to each other and have 2) norm 1. Therefore, each \\(a_j e_{j}\\) are also orthogonal and have norm \\(a_{j}\\)\nAnd so, the orthogonal condition guarantees pythagoras, and we know that each vector being added here has norm \\(a_{j}\\).\nAnd so we can just chonk out each of the vectors, apply Pythagoras to the ending bunch and the one removed.\northonormal list is linearly independent Its a corollary of the above is that orthonormal lists are linearly independent.\nProof:\n\\begin{equation} a_1e_1 + \\dots +a_{m}e_{m} = 0 \\end{equation}\nWe desire that each \\(a_{j}=0\\) to show that this list is linearly independent.\nNow, given that the linear combination of these \\(e_{j}\\) adds to \\(0\\), the summed vector is a zero-vector. So:\n\\begin{equation} \\|a_1 e_1 + \\dots +a_{m}e_{m} \\| = \\|0\\| = 0 \\end{equation}\nOF course their norm squared is also \\(0\\).\nApply the above, then, we now have:\n\\begin{equation} |a_1|^{2} + \\dots +|a_{m}|^{2} = \\|a_1 e_1 + \\dots +a_{m}e_{m} \\| = 0 \\end{equation}\nOf course adding a list of positive numbers (\\(|a_{j}|^{2}\\)) together yields not a negative number, so there are no possible additive inverses that will cancel each other out. Hence, \\(a_{j} = 0\\), as desired. \\(\\blacksquare\\)\n","html":"\u003cp\u003eA list of vectors is \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e if each vector is \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e to every other vector, and they all have \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e 1.\u003c/p\u003e\n\u003cp\u003eIn other words:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle e_{j}, e_{k} \\rangle = \\begin{cases}\n1, j = k\\\\\n0, j \\neq k\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe vectors should inner-product with itself to \\(1\\), and be orthogonal to all others.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eAdditional Information\u003c/h2\u003e\n\u003ch3 id=\"orthonormal-basis--kbhorthonormal-basis-dot-md\"\u003e\u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"norm-of-an-orthogonal-linear-combination\"\u003eNorm of an Orthogonal Linear Combination\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\| a_1e_1 + \\dots + a_{m}e_{m} \\|^{2} = |a_1|^{2} + \\dots + |a_{m}|^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhen \\(e_1, \\dots e_{m}\\) are \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e vectors in \\(V\\) and \\(a_1, \\dots a_{m} \\in \\mathbb{F}\\).\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eRecall two facts: \\(e_{j}\\) are \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e vectors, so they are 1) \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e to each other and have 2) \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e 1. Therefore, each \\(a_j e_{j}\\) are also \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e and have norm \\(a_{j}\\)\u003c/p\u003e\n\u003cp\u003eAnd so, the \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e condition guarantees \u003ca href=\"/posts/kbhcornucopia_of_analysis/#pythagorean-theorem\"\u003epythagoras\u003c/a\u003e, and we know that each vector being added here has norm \\(a_{j}\\).\u003c/p\u003e\n\u003cp\u003eAnd so we can just chonk out each of the vectors, apply Pythagoras to the ending bunch and the one removed.\u003c/p\u003e\n\u003ch4 id=\"orthonormal-list-is-linearly-independent\"\u003eorthonormal list is linearly independent\u003c/h4\u003e\n\u003cp\u003eIts a corollary of the above is that \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e lists are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_1e_1 + \\dots +a_{m}e_{m} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe desire that each \\(a_{j}=0\\) to show that this list is linearly independent.\u003c/p\u003e\n\u003cp\u003eNow, given that the linear combination of these \\(e_{j}\\) adds to \\(0\\), the summed vector is a zero-vector. So:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\|a_1 e_1 + \\dots +a_{m}e_{m} \\| = \\|0\\| = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOF course their norm squared is also \\(0\\).\u003c/p\u003e\n\u003cp\u003eApply the above, then, we now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|a_1|^{2} + \\dots +|a_{m}|^{2} = \\|a_1 e_1 + \\dots +a_{m}e_{m} \\| = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOf course adding a list of positive numbers (\\(|a_{j}|^{2}\\)) together yields not a negative number, so there are no possible additive inverses that will cancel each other out. Hence, \\(a_{j} = 0\\), as desired. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhorthonormal/","tags":null,"title":"orthonormal"},{"categories":null,"contents":"An Orthonormal basis is defined as a basis of a finite-dimensional vector space that\u0026rsquo;s orthonormal.\nAdditional Information orthonormal list of the right length is a basis An orthonormal list is linearly independent, and linearly independent list of length dim V are a basis of V. \\(\\blacksquare\\)\nWriting a vector as a linear combination of orthonormal basis According to Axler, this result is why there\u0026rsquo;s so much hoopla about orthonormal basis.\nResult and Motivation For any basis of \\(V\\), and a vector \\(v \\in V\\), we by basis spanning have:\n\\begin{equation} v = a_1e_1 + \\dots a_{n}e_{n} \\end{equation}\nYet, for orthonormal basis, we can actually very easily know what the \\(a_{j}\\) are (and not just that some \\(a_{j}\\) exist). Specifically:\n\\begin{equation} a_{j} = \\langle v,e_{j} \\rangle \\end{equation}\nThat is, for orthonormal basis \\(e_{j}\\) of \\(V\\), we have that:\n\\begin{equation} v = \\langle v, e_{1} \\rangle e_{1} + \\dots + \\langle v, e_{n} \\rangle e_{n} \\end{equation}\nfor all \\(v \\in V\\).\nFurthermore:\n\\begin{equation} \\|v\\|^{2} = | \\langle v,e_1 \\rangle|^{2} + \\dots + | \\langle v, e_{n} \\rangle|^{2} \\end{equation}\nProof Given \\(e_{j}\\) are basis (nevermind orthonormal quite yet), we have that:\n\\begin{equation} v = a_1e_{1} + \\dots + a_{n}e_{n} \\end{equation}\nWLOG let\u0026rsquo;s take \\(\\langle v, e_{j} \\rangle\\):\n\\begin{equation} \\langle v,e_{j} \\rangle = \\langle a_1e_1 + \\dots +a_{n}e_{n}, e_{j} \\rangle \\end{equation}\nGiven additivity and homogenity in the first slot, we now have:\n\\begin{equation} \\langle v, e_{j} \\rangle = a_{1}\\langle e_1, e_{j} \\rangle + \\dots +a_{n}\\langle e_{n}, e_{j} \\rangle \\end{equation}\nOf course, each \\(e_{i}\\) and \\(e_{j}\\) are orthogonal, so for the most part \\(a_{i}\\langle e_{i}, e_{j} \\rangle = 0\\) for \\(i \\neq j\\). Except where \\(a_{j} \\langle e_{j}, e_{j} \\rangle = a_{j} 1 = a_{j}\\) because the \\(e\\) vectors are also norm 1.\nTherefore:\n\\begin{equation} \\langle v, e_{j} \\rangle= 0 + \\dots +a_{j} + \\dots +0 = a_{j} \\end{equation}\nWe now have \\(\\langle v,e_{j} \\rangle = a_{j}\\) WLOG for all \\(j\\), as desired.\nPlugging this in for each \\(a_{j}\\) and applying Norm of an Orthogonal Linear Combination yields the \\(\\|v\\|^{2}\\) equation above. \\(\\blacksquare\\)\n","html":"\u003cp\u003eAn \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eOrthonormal basis\u003c/a\u003e is defined as a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of a finite-dimensional vector space that\u0026rsquo;s \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eAdditional Information\u003c/h2\u003e\n\u003ch3 id=\"orthonormal-list-of-the-right-length-is-a-basis\"\u003eorthonormal list of the right length is a basis\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhorthonormal/#an-orthonormal-list-is-linearly-independent\"\u003eAn orthonormal list is linearly independent\u003c/a\u003e, and \u003ca href=\"/posts/kbhdimension/#linearly-independent-list-of-length-dim-v-are-a-basis-of-v\"\u003elinearly independent list of length dim V are a basis of V\u003c/a\u003e. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"writing-a-vector-as-a-linear-combination-of-orthonormal-basis\"\u003eWriting a vector as a linear combination of orthonormal basis\u003c/h3\u003e\n\u003cp\u003eAccording to Axler, this result is why there\u0026rsquo;s so much hoopla about \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e.\u003c/p\u003e\n\u003ch4 id=\"result-and-motivation\"\u003eResult and Motivation\u003c/h4\u003e\n\u003cp\u003eFor any basis of \\(V\\), and a vector \\(v \\in V\\), we by \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e spanning have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = a_1e_1 + \\dots a_{n}e_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYet, for \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e, we can actually very easily know what the \\(a_{j}\\) are (and not just that \u003cem\u003esome\u003c/em\u003e \\(a_{j}\\) exist). Specifically:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{j} = \\langle v,e_{j} \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThat is, for \u003ca href=\"/posts/kbhorthonormal_basis/\"\u003eorthonormal basis\u003c/a\u003e \\(e_{j}\\) of \\(V\\), we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = \\langle v, e_{1} \\rangle e_{1} + \\dots + \\langle v, e_{n} \\rangle e_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor all \\(v \\in V\\).\u003c/p\u003e\n\u003cp\u003eFurthermore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\|v\\|^{2} = | \\langle v,e_1 \\rangle|^{2} + \\dots + | \\langle v, e_{n} \\rangle|^{2}\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"proof\"\u003eProof\u003c/h4\u003e\n\u003cp\u003eGiven \\(e_{j}\\) are \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e (nevermind \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e quite yet), we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = a_1e_{1} + \\dots + a_{n}e_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWLOG let\u0026rsquo;s take \\(\\langle v, e_{j} \\rangle\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle v,e_{j} \\rangle = \\langle a_1e_1 + \\dots +a_{n}e_{n}, e_{j} \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven additivity and homogenity in the first slot, we now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle v, e_{j} \\rangle = a_{1}\\langle e_1, e_{j} \\rangle + \\dots +a_{n}\\langle e_{n}, e_{j} \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOf course, each \\(e_{i}\\) and \\(e_{j}\\) are \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e, so for the most part \\(a_{i}\\langle e_{i}, e_{j} \\rangle = 0\\) for \\(i \\neq j\\). Except where \\(a_{j} \\langle e_{j}, e_{j} \\rangle = a_{j} 1 = a_{j}\\) because the \\(e\\) vectors are also \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e 1.\u003c/p\u003e\n\u003cp\u003eTherefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle v, e_{j} \\rangle= 0 + \\dots +a_{j} + \\dots +0 = a_{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe now have \\(\\langle v,e_{j} \\rangle = a_{j}\\) WLOG for all \\(j\\), as desired.\u003c/p\u003e\n\u003cp\u003ePlugging this in for each \\(a_{j}\\) and applying \u003ca href=\"/posts/kbhorthonormal/#norm-of-an-orthogonal-linear-combination\"\u003eNorm of an Orthogonal Linear Combination\u003c/a\u003e yields the \\(\\|v\\|^{2}\\) equation above. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhorthonormal_basis/","tags":null,"title":"orthonormal basis"},{"categories":null,"contents":"The OTC Markets/pink sheets are an unregulated group of Financial Markets, where many of the Penny stocks are.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhotc_markets/\"\u003eOTC Markets\u003c/a\u003e/\u003ca href=\"/posts/kbhotc_markets/\"\u003epink sheets\u003c/a\u003e are an unregulated group of \u003ca href=\"/posts/kbhfinancial_markets_intro/\"\u003eFinancial Market\u003c/a\u003es, where many of the Penny stocks are.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhotc_markets/","tags":null,"title":"OTC Markets"},{"categories":null,"contents":"action outcomes are uncertain\n","html":"\u003cp\u003eaction outcomes are uncertain\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoutcome_uncertainty/","tags":null,"title":"Outcome Uncertainty"},{"categories":null,"contents":"overfitting is the process\nPenalty for large weight errors is a good way of mitigating overfitting\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhoverfitting/\"\u003eoverfitting\u003c/a\u003e is the process\u003c/p\u003e\n\u003cp\u003ePenalty for large weight errors is a good way of mitigating overfitting\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhoverfitting/","tags":null,"title":"overfitting"},{"categories":null,"contents":"We can use the scalars of a polynomial to build a new operator, which scales copies of an operator with the coefficients \\(a_{j}\\) of the polynomial.\nconstituents \\(p(z) = a_{0} + a_{1}z + a_{2}z^{2} + \\cdots + a_{m}z^{m}\\), a polynomial for \\(z \\in \\mathbb{F}\\) \\(T \\in \\mathcal{L}(V)\\) requirements \\(p(T)\\) is an operator refined by:\n\\begin{equation} p(T) = a_{0} I + a_{1} T + a_{2} T^{2} + \\cdots + a_{m} T^{m} \\end{equation}\nwhere, \\(T^{m}\\) is the power of operator\nadditional information \\(p(z) \\to p(T)\\) is a linear function additivity: \\((p_{1} + p_2)T = (a_{0}+b_{0})I \u0026hellip; = a_{0} I + b_{0} I \u0026hellip; = p_{1}(T) + p_{2}(T)\\) homogeneity: \\((\\lambda p)T = (\\lambda a_{0})I \u0026hellip; = \\lambda (a_{0} I \\dots) = \\lambda p(T)\\) polynomial of operator is commutative \\((pq)T = p(T) q(T)\\) \\(p(T)q(T) = q(T)p(T)\\) The first result can be shown because the product of polynomials are a result of rote algebra, and when you come across \\(pq\\) trying to combine \\(z^{j+k}\\) at each FOIL part, you just swap that into \\(T^{j+k} = T^{j}T^{k}\\). Then, you re-split the constants towards either side (i.e. if the FOIL gave \\(a_{j} b_{k} T^{j+k} \\implies a_{j} T^{j} b_{k} T^{k}\\)), then you factor the sums out into two separate pieces to get to \\(p(T)\\) and \\(q(T)\\).\nThe second result: \\(p(T) q(T) = (pq)(T) = (qp)T = q(T) p(T)\\), with the middle commutativity because \\(\\mathbb{F}\\) commutes.\n","html":"\u003cp\u003eWe can use the scalars of a \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e to build a new \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e, which scales copies of an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e with the coefficients \\(a_{j}\\) of the \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(p(z) = a_{0} + a_{1}z + a_{2}z^{2} + \\cdots + a_{m}z^{m}\\), a \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e for \\(z \\in \\mathbb{F}\\)\u003c/li\u003e\n\u003cli\u003e\\(T \\in \\mathcal{L}(V)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\(p(T)\\) is an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e refined by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(T) = a_{0} I + a_{1} T + a_{2} T^{2} + \\cdots + a_{m} T^{m}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(T^{m}\\) is the \u003ca href=\"/posts/kbhraising_operators_to_powers/\"\u003epower of operator\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"p--z--to-p--t--is-a-linear-function--kbhfunction-dot-md\"\u003e\\(p(z) \\to p(T)\\) is a linear \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eadditivity: \\((p_{1} + p_2)T = (a_{0}+b_{0})I \u0026hellip; = a_{0} I + b_{0} I \u0026hellip; = p_{1}(T) + p_{2}(T)\\)\u003c/li\u003e\n\u003cli\u003ehomogeneity: \\((\\lambda p)T = (\\lambda a_{0})I \u0026hellip; = \\lambda (a_{0} I \\dots) = \\lambda p(T)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"polynomial-of-operator--kbhpolynomial-operator-dot-md--is-commutative\"\u003e\u003ca href=\"/posts/kbhpolynomial_operator/\"\u003epolynomial of operator\u003c/a\u003e is commutative\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003e\\((pq)T = p(T) q(T)\\)\u003c/li\u003e\n\u003cli\u003e\\(p(T)q(T) = q(T)p(T)\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThe first result can be shown because the \u003ca href=\"\"\u003eproduct of polynomial\u003c/a\u003es are a result of rote \u003ca href=\"/posts/kbhalgebra/\"\u003ealgebra\u003c/a\u003e, and when you come across \\(pq\\) trying to combine \\(z^{j+k}\\) at each FOIL part, you just swap that into \\(T^{j+k} = T^{j}T^{k}\\). Then, you re-split the constants towards either side (i.e. if the FOIL gave \\(a_{j} b_{k} T^{j+k} \\implies a_{j} T^{j} b_{k} T^{k}\\)), then you factor the sums out into two separate pieces to get to \\(p(T)\\) and \\(q(T)\\).\u003c/p\u003e\n\u003cp\u003eThe second result: \\(p(T) q(T) = (pq)(T) = (qp)T = q(T) p(T)\\), with the middle \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e because \\(\\mathbb{F}\\) commutes.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpolynomial_operator/","tags":null,"title":"p(T)"},{"categories":null,"contents":"PACE is a form of Directed Evolution: which use a bacteriophage with a bit of its gene removed; then, engineer a virus to infect the bacteria, which will only successfully complete infection if the missing area is provided.\nThe mutation of this virus, then, essentially RNG-s mutations of new functions and will only produce successful new generations of bacteriologist when it works.\nPACE is hard The only way to check that PACE worked in the direction you want is by sampling the bacteria and hope that they are evolving in the correct direction\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpace/\"\u003ePACE\u003c/a\u003e is a form of \u003ca href=\"/posts/kbhdirected_evolution/\"\u003eDirected Evolution\u003c/a\u003e: which use a bacteriophage with a bit of its gene removed; then, engineer a virus to infect the bacteria, which will only successfully complete infection if the missing area is provided.\u003c/p\u003e\n\u003cp\u003eThe mutation of this virus, then, essentially RNG-s mutations of new functions and will only produce successful new generations of bacteriologist when it works.\u003c/p\u003e\n\u003ch2 id=\"pace-is-hard\"\u003ePACE is hard\u003c/h2\u003e\n\u003cp\u003eThe only way to check that \u003ca href=\"/posts/kbhpace/\"\u003ePACE\u003c/a\u003e worked in the direction you want is by sampling the bacteria and hope that they are evolving in the correct direction\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpace/","tags":null,"title":"PACE"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpacific_railroad_act/","tags":null,"title":"Pacific Railroad Act"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpagin_q/","tags":null,"title":"pagin:q"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpapyrus/","tags":null,"title":"papyrus"},{"categories":null,"contents":"a parameter of probability distribution govern the probabilities associated with different conditions in that distribution. It is usually a vector:\nFor instance, for uniform \\(Uni(\\alpha, \\beta)\\), parameter \\(\\theta = [\\alpha, \\beta]\\).\nimportantly, for a discrete distribution system with 6 parameters, we only need 5 independent parameters to be able to satisfy the entire system. This is because a probability distribution must sum to 1.\nhowever, for a conditional probability:\n\\begin{equation} p(x|a) \\end{equation}\nwe need to specificity \\((n-1)m\\) parameters, whereby \\(m\\) is the number of states \\(a\\) can take, and \\(n\\) the number of states \\(n\\) can take. Each group of \\(m\\) has to add up to \\(1\\).\nparameter learning see parameter learning\n","html":"\u003cp\u003ea \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003e of \u003ca href=\"/posts/kbhprobability_distributions/\"\u003eprobability distribution\u003c/a\u003e govern the \u003ca href=\"/posts/kbhprobability/\"\u003eprobabilities\u003c/a\u003e associated with different conditions in that distribution. It is usually a \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003eFor instance, for uniform \\(Uni(\\alpha, \\beta)\\), parameter \\(\\theta = [\\alpha, \\beta]\\).\u003c/p\u003e\n\u003cp\u003eimportantly, for a \u003ca href=\"/posts/kbhdiscrete_distribution/\"\u003ediscrete distribution\u003c/a\u003e system with 6 parameters, we only need 5 \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es to be able to satisfy the entire system. This is because a probability distribution must sum to 1.\u003c/p\u003e\n\u003cp\u003ehowever, for a \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(x|a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe need to specificity \\((n-1)m\\) \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es, whereby \\(m\\) is the number of states \\(a\\) can take, and \\(n\\) the number of states \\(n\\) can take. Each group of \\(m\\) has to add up to \\(1\\).\u003c/p\u003e\n\u003ch2 id=\"parameter-learning--kbhparameter-learning-dot-md\"\u003e\u003ca href=\"/posts/kbhparameter_learning/\"\u003eparameter learning\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhparameter_learning/\"\u003eparameter learning\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhparameter/","tags":null,"title":"parameter"},{"categories":null,"contents":"We want to learn a Baysian Network\u0026rsquo;s parameters from data.\nunbiased parameter learning Maximum Likelihood Parameter Learning Baysian Parameter Learning If we want to do it in a Bayes Net:\nparameter learning in Baysian Network\n","html":"\u003cp\u003eWe want to learn a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e\u0026rsquo;s \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es from data.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhunbiased_parameter_learning/\"\u003eunbiased parameter learning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_parameter_learning/\"\u003eBaysian Parameter Learning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf we want to do it in a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBayes Net\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhbaysian_network/#id-d01990aa-fcca-42f0-bd6c-7ba13746b6ca-parameter-learning-in-id-5eaa4b96-cbc2-4811-91c7-88ea2e164fc3-baysian-network\"\u003eparameter learning in Baysian Network\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhparameter_learning/","tags":null,"title":"parameter learning"},{"categories":null,"contents":" tag EEG by data type (what mental stage does it come from?) per region, per data type, we take a band-power series calculate statistics per series shove the results into something interpretable Conclusion N1 results performs the best across brain regions; where the data came from didn\u0026rsquo;t change performance by much.\n","html":"\u003col\u003e\n\u003cli\u003etag EEG by data type (what mental stage does it come from?)\u003c/li\u003e\n\u003cli\u003eper region, per data type, we take a band-power series\u003c/li\u003e\n\u003cli\u003ecalculate statistics per series\u003c/li\u003e\n\u003cli\u003eshove the results into something interpretable\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"conclusion\"\u003eConclusion\u003c/h2\u003e\n\u003cp\u003eN1 results performs the best across brain regions; where the data came from didn\u0026rsquo;t change performance by much.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhparkingson_s_classification_with_eeg/","tags":null,"title":"Parkingson's Classification with EEG"},{"categories":null,"contents":"PARRY is if ELIZA had mental states such as fear, anger, and mistrust. Mentions of various things in the user turn increases or decreases each variable\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhparry/\"\u003ePARRY\u003c/a\u003e is if \u003ca href=\"/posts/kbheliza/\"\u003eELIZA\u003c/a\u003e had mental states such as fear, anger, and mistrust. Mentions of various things in the user turn increases or decreases each variable\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhparry/","tags":null,"title":"PARRY"},{"categories":null,"contents":"Differential Equations in more than one independent variable:\n\\begin{equation} f(x_1, \\dots, x_{n}) \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equations\u003c/a\u003e in more than one independent variable:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x_1, \\dots, x_{n})\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpartial_differential_equations/","tags":null,"title":"Partial Differential Equation"},{"categories":null,"contents":"Partially Observable Markov Decision Process is a with .\nComponents:\nstates actions (given state) transition function (given state and actions) reward function Belief System beliefs observations observation model \\(O(o|a,s\u0026rsquo;)\\) As always we desire to find a \\(\\pi\\) such that we can:\n\\begin{equation} \\underset{\\pi \\in \\Pi}{\\text{maximize}}\\ \\mathbb{E} \\qty[ \\sum_{t=0}^{\\infty} \\gamma^{t} R(b_{t}, \\pi(b_{t}))] \\end{equation}\nwhereby our \\(\\pi\\) instead of taking in a state for input takes in a belief (over possible states) as input.\nobservation and states \u0026ldquo;where are we, and how sure are we about that?\u0026rdquo;\nbeliefs and filters\npolicy representations \u0026ldquo;how do we represent a policy\u0026rdquo;\na tree: conditional plan a graph: with utility: + just take the top action of the conditional plan the alpha-vector was computed from policy evaluations \u0026ldquo;how good is our policy / what\u0026rsquo;s the utility?\u0026rdquo;\nconditional plan evaluation policy solutions \u0026ldquo;how do we make that policy better?\u0026rdquo;\nexact solutions optimal value function for POMDP POMDP value-iteration approximate solutions estimate an , and then use a policy representation: upper-bounds for s lower-bounds for s online solutions Online POMDP Methods\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePartially Observable Markov Decision Process\u003c/a\u003e is a with .\u003c/p\u003e\n\u003cp\u003eComponents:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cul\u003e\n\u003cli\u003estates\u003c/li\u003e\n\u003cli\u003eactions (given state)\u003c/li\u003e\n\u003cli\u003etransition function (given state and actions)\u003c/li\u003e\n\u003cli\u003ereward function\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eBelief System\n\u003cul\u003e\n\u003cli\u003ebeliefs\u003c/li\u003e\n\u003cli\u003eobservations\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbelief/#observation-model\"\u003eobservation model\u003c/a\u003e \\(O(o|a,s\u0026rsquo;)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAs always we desire to find a \\(\\pi\\) such that we can:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\underset{\\pi \\in \\Pi}{\\text{maximize}}\\ \\mathbb{E} \\qty[ \\sum_{t=0}^{\\infty} \\gamma^{t} R(b_{t}, \\pi(b_{t}))]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby our \\(\\pi\\) instead of taking in a state for input takes in a \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e (over possible states) as input.\u003c/p\u003e\n\u003ch2 id=\"observation-and-states\"\u003eobservation and states\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;where are we, and how sure are we about that?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhbelief/\"\u003ebeliefs\u003c/a\u003e and \u003ca href=\"/posts/kbhfilters/\"\u003efilters\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"policy-representations\"\u003epolicy representations\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;how do we represent a policy\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ea tree: \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ea graph:\u003c/li\u003e\n\u003cli\u003ewith utility: +\u003c/li\u003e\n\u003cli\u003e\n\u003cul\u003e\n\u003cli\u003ejust take the top action of the conditional plan the alpha-vector was computed from\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"policy-evaluations\"\u003epolicy evaluations\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;how good is our policy / what\u0026rsquo;s the utility?\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhconditional_plan/#id-6f19368f-74b5-4606-a882-ec9bc5619873-conditional-plan-evaluation\"\u003econditional plan evaluation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"policy-solutions\"\u003epolicy solutions\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;how do we make that policy better?\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"exact-solutions\"\u003eexact solutions\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhconditional_plan/#id-9ccda204-0967-44c8-a801-c92d0df154b5-optimal-value-function-for-id-130d5294-0274-422b-b395-7d6f7f75be7d-pomdp\"\u003eoptimal value function for POMDP\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_iteration/#pomdp--kbhpartially-observable-markov-decision-process-dot-md--value-iteration\"\u003ePOMDP value-iteration\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"approximate-solutions\"\u003eapproximate solutions\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eestimate an , and then use a policy representation:\n\u003cul\u003e\n\u003cli\u003e\n\u003ch2 id=\"upper-bounds-for-s\"\u003eupper-bounds for s\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003ch2 id=\"lower-bounds-for-s\"\u003elower-bounds for s\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"online-solutions\"\u003eonline solutions\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhonline_pomdp_methods/\"\u003eOnline POMDP Methods\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpartially_observable_markov_decision_process/","tags":null,"title":"Partially Observable Markov Decision Process"},{"categories":null,"contents":"A markov game with State Uncertainty solved using POMDPs.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhmarkov_game/\"\u003emarkov game\u003c/a\u003e with \u003ca href=\"\"\u003eState Uncertainty\u003c/a\u003e solved using \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpartially_observable_markov_game/","tags":null,"title":"partially observable markov game"},{"categories":null,"contents":"DOI: 10.3389/fnagi.2020.605317\nOne-Liner An excercize scheme has had some measured effect on theta/alpha ratio and Brain wave frequency on AD patients; prognosis of AD not controlled for.\nNovelty Leveraged physical training scheme and measured EEG effects by quantifying theta/alpha ratio Notable Methods Used theta/alpha ratio as assay for improvement, and found the exercise scheme did so p\u0026lt;0.05 Only tested patients with AD w/o a control for stage Key Figs Figure 1 This figure tells us th N number of participants through the study\nFigure 2 This figure shows us that the excercize intervention has statistically significant results to both Brain Oscillation frequency and Theta/Alpha ratio. The x-axis shows us the pre-and-post bars for TG (treatment) and CG (control); the y-axis quantifies the value measured in a box plot. The subplots are brain oscelation and theta/alpha ratio respectively.\nNew Concepts theta/alpha ratio Notes ","html":"\u003cp\u003eDOI: 10.3389/fnagi.2020.605317\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eAn excercize scheme has had some measured effect on \u003ca href=\"/posts/kbhtheta_alpha_ratio/\"\u003etheta/alpha ratio\u003c/a\u003e and Brain wave frequency on AD patients; prognosis of AD not controlled for.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eLeveraged physical training scheme and measured EEG effects by quantifying \u003ca href=\"/posts/kbhtheta_alpha_ratio/\"\u003etheta/alpha ratio\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eUsed \u003ca href=\"/posts/kbhtheta_alpha_ratio/\"\u003etheta/alpha ratio\u003c/a\u003e as assay for improvement, and found the exercise scheme did so p\u0026lt;0.05\u003c/li\u003e\n\u003cli\u003eOnly tested patients with AD w/o a control for stage\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"figure-1\"\u003eFigure 1\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_11-13-14_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure tells us th N number of participants through the study\u003c/p\u003e\n\u003ch3 id=\"figure-2\"\u003eFigure 2\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_11-14-06_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure shows us that the excercize intervention has \u003ca href=\"/posts/kbhhypothesis_testing/#significance-level\"\u003estatistically significant\u003c/a\u003e results to both Brain Oscillation frequency and Theta/Alpha ratio. The x-axis shows us the pre-and-post bars for TG (treatment) and CG (control); the y-axis quantifies the value measured in a box plot. The subplots are brain oscelation and \u003ca href=\"/posts/kbhtheta_alpha_ratio/\"\u003etheta/alpha ratio\u003c/a\u003e respectively.\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtheta_alpha_ratio/\"\u003etheta/alpha ratio\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhparvin_2020/","tags":["ntj"],"title":"Parvin 2020"},{"categories":null,"contents":"Patient Scoring Systems How do we score the status of a patient? Well, we can begin by having a chart\u0026mdash;SpO2, can breath, etc. etc.\nDrawbacks:\nthese systems are quite generic not very representative of some information Method MIMIC-IV 6000 ICU patient stays, 48994 vital signs\u0026mdash;measuring across patient stays dynamic time warping to create a similar matrix clustering post-hoc to correlate patients together ","html":"\u003ch2 id=\"patient-scoring-systems\"\u003ePatient Scoring Systems\u003c/h2\u003e\n\u003cp\u003eHow do we score the status of a patient? Well, we can begin by having a chart\u0026mdash;SpO2, can breath, etc. etc.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eDrawbacks\u003c/strong\u003e:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ethese systems are quite generic\u003c/li\u003e\n\u003cli\u003enot very representative of some information\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"method\"\u003eMethod\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eMIMIC-IV 6000 ICU patient stays, 48994 vital signs\u0026mdash;measuring across patient stays\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003edynamic time warping to create a similar matrix\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eclustering post-hoc to correlate patients together\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpatient_risk_prediction/","tags":null,"title":"Patient Risk Prediction"},{"categories":null,"contents":" No Demo Day TODO Email need statement template Needfinding Not all patients want to be treated the same way Attitudes towards heathcare system Fostering strong interaction; facilitate interaction Problem: patients have attitudes that physicians can\u0026rsquo;t effectively communicate.\nAction item: interview doctors and patients\nNeed two need statement.\n","html":"\u003cul\u003e\n\u003cli\u003eNo Demo Day\u003c/li\u003e\n\u003cli\u003eTODO Email need statement template\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"needfinding\"\u003eNeedfinding\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eNot all patients want to be treated the same way\u003c/li\u003e\n\u003cli\u003eAttitudes towards heathcare system\u003c/li\u003e\n\u003cli\u003eFostering strong interaction; facilitate interaction\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eProblem: patients have \u003cstrong\u003eattitudes\u003c/strong\u003e that physicians can\u0026rsquo;t effectively communicate.\u003c/p\u003e\n\u003cp\u003eAction item: interview doctors \u003cstrong\u003e\u003cstrong\u003eand\u003c/strong\u003e\u003c/strong\u003e patients\u003c/p\u003e\n\u003cp\u003eNeed two need statement.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpcp_april_checkin/","tags":null,"title":"PCP April Checkin"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpeft/","tags":null,"title":"PEFT"},{"categories":null,"contents":"Memoryless policy search through fake determinism.\nuses a deterministic simulative function to calculate the value performs policy search by using normal standard optimizations Primary contribution: transforming stochastic POMDP to a deterministic simulative function; foregos alpha vectors.\nSuppose you have \\(m\\) initial states that you sampled, you can then just try to get the set of acions that maximize:\n\\begin{equation} \\arg\\max_{\\theta} \\tilde{V} = \\frac{1}{m} \\sum_{n}^{m} V_{\\theta}(s_{m}) \\end{equation}\nTo actually ensure that \\(V\\) has deterministic transitions\u0026hellip;\ndeterministic simulative function Typically, a generative model takes random actions from the action distribution. However, what we do is have a simulator which takes a RANDOM NUMBER as INPUT, and also the action distribution, and DETERMINISTICALLY give an action.\nPegasus procedure We augment the state:\n\\begin{equation} s \\in (S, \\mathbb{R}^{[0,1]}, \\mathbb{R}^{[0,1]}, \\dots) \\end{equation}\nmeaning every state is a state against a series of random numbers between \\(0\\) and \\(1\\):\n\\begin{equation} (s, 0.91, 0.22, \\dots) \\end{equation}\nat every transition, we eat up one of the random numbers to use, and take an action, and use those in our deterministic simulative function to obtain our next state.\ndeterminism The idea is that if we have sampled enough initial states, the correct action trajectory which maximizes the deterministic \\(\\tilde{V}\\) will also maximize that for the real \\(V\\).\n","html":"\u003cp\u003eMemoryless policy search through fake determinism.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003euses a \u003ca href=\"#deterministic-simulative-function\"\u003edeterministic simulative function\u003c/a\u003e to calculate the value\u003c/li\u003e\n\u003cli\u003eperforms policy search by using normal standard optimizations\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ePrimary contribution: transforming \u003cstrong\u003estochastic\u003c/strong\u003e \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e to a \u003ca href=\"#deterministic-simulative-function\"\u003edeterministic simulative function\u003c/a\u003e; foregos \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eSuppose you have \\(m\\) initial states that you sampled, you can then just try to get the set of acions that maximize:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\arg\\max_{\\theta} \\tilde{V} = \\frac{1}{m} \\sum_{n}^{m} V_{\\theta}(s_{m})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo actually ensure that \\(V\\) has deterministic transitions\u0026hellip;\u003c/p\u003e\n\u003ch2 id=\"deterministic-simulative-function\"\u003edeterministic simulative function\u003c/h2\u003e\n\u003cp\u003eTypically, a \u003ca href=\"/posts/kbhonline_planning/#generative-model\"\u003egenerative model\u003c/a\u003e takes random actions from the action distribution. However, what we do is have a simulator which takes a \u003cstrong\u003eRANDOM NUMBER\u003c/strong\u003e as \u003cstrong\u003eINPUT\u003c/strong\u003e, and also the action distribution, and \u003cstrong\u003eDETERMINISTICALLY\u003c/strong\u003e give an action.\u003c/p\u003e\n\u003ch2 id=\"pegasus--kbhpegasus-dot-md--procedure\"\u003e\u003ca href=\"/posts/kbhpegasus/\"\u003ePegasus\u003c/a\u003e procedure\u003c/h2\u003e\n\u003cp\u003eWe augment the state:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ns \\in (S, \\mathbb{R}^{[0,1]}, \\mathbb{R}^{[0,1]}, \\dots)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning every state is a state against a series of random numbers between \\(0\\) and \\(1\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(s, 0.91, 0.22, \\dots)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eat every transition, we eat up one of the random numbers to use, and take an action, and use those in our \u003ca href=\"#deterministic-simulative-function\"\u003edeterministic simulative function\u003c/a\u003e to obtain our next state.\u003c/p\u003e\n\u003ch2 id=\"determinism\"\u003edeterminism\u003c/h2\u003e\n\u003cp\u003eThe idea is that if we have sampled enough initial states, the correct action trajectory which maximizes the deterministic \\(\\tilde{V}\\) will also maximize that for the real \\(V\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpegasus/","tags":null,"title":"Pegasus"},{"categories":null,"contents":"permits model is a counter for which there is \\(n\\) threads can do a task. For instance, there is \\(n\\) permits; each time it is requested, it needs to be subtracted.\nIdeally, we do this without busy waiting (while loops with lock and unlocks). So:\ncondition variable you can call wait on a condition variable, which will block until another thread calls notify_all.\nidentify a single event to wait/notify ensure that there is something to check to represent the event create a condition variable and share it identify who is the notifier, call notify_all when appropriate identify who will wait, and wait until condition variable triggers condition_variable_any permitsCV; // ... thread(ref(permitsCV)) Identify the ISOLATED event to notify: for instance, whenever permit goes from 0=\u0026gt;1, you notify. But, when permits go from 1=\u0026gt;2, there really isn\u0026rsquo;t really a need to notify. If you gave wait an unlocked lock, you UB.\nBut, implementing this is a little tricky: before sleeping on the condition variable, we have to release the underlying lock, but then values are not guaranteed after you unlock. So, the actual implementation:\npermits.lock(); while (permits == 0) { permitsCV.wait(permitsLock); } permits--; permitsLock.unlock(); the condition variable will\u0026hellip;\nstart sleeping FIRST unlock a lock FOR US AFTER the sleeping starts after waiting ends, tries to reaquire lock blocks until we have the lock again this ensures that you don\u0026rsquo;t have to give up the lock before sleeping.\nWe need a \u0026ldquo;while\u0026rdquo; loop here to CHECK whether or not, after sleeping is over, our locked variable needs to be checked again just in case another thread took it: just because we woke up it doesn\u0026rsquo;t mean the condition is true forever.\nWe also need a \u0026ldquo;while\u0026rdquo; loop because condition variables will send false wakeup signal, so we need to check the condition to be extra sure.\nso CALL CONDITION VARIABLES IN A WHILE LOOP.\nimplementation similar to mutexs\nwait should autonomically put the thread to sleep + unlock the given lock when the thread wakes up, it should reacquire the lock + return notify one/all notify_one: should wake up + unblock first waiting thread notify_all: should wake up/unblock all waiting threads if no one is waiting, do nothing.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpermits_model/\"\u003epermits model\u003c/a\u003e is a counter for which there is \\(n\\) threads can do a task. For instance, there is \\(n\\) permits; each time it is requested, it needs to be subtracted.\u003c/p\u003e\n\u003cp\u003eIdeally, we do this without busy waiting (while loops with lock and unlocks). So:\u003c/p\u003e\n\u003ch2 id=\"condition-variable\"\u003econdition variable\u003c/h2\u003e\n\u003cp\u003eyou can call \u003cstrong\u003ewait\u003c/strong\u003e on a condition variable, which will block until another thread calls \u003cstrong\u003enotify_all\u003c/strong\u003e.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eidentify a single event to wait/notify\u003c/li\u003e\n\u003cli\u003eensure that there is something to check to represent the event\u003c/li\u003e\n\u003cli\u003ecreate a condition variable and share it\u003c/li\u003e\n\u003cli\u003eidentify who is the notifier, call \u003cstrong\u003enotify_all\u003c/strong\u003e when appropriate\u003c/li\u003e\n\u003cli\u003eidentify who will wait, and \u003cstrong\u003ewait\u003c/strong\u003e until condition variable triggers\u003c/li\u003e\n\u003c/ol\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econdition_variable_any\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epermitsCV\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// ...\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eref\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epermitsCV\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eIdentify the \u003cstrong\u003eISOLATED event\u003c/strong\u003e to notify: for instance, whenever permit goes from 0=\u0026gt;1, you notify. But, when permits go from 1=\u0026gt;2, there really isn\u0026rsquo;t really a need to notify. If you gave wait an unlocked lock, you UB.\u003c/p\u003e\n\u003cp\u003eBut, implementing this is a little tricky: before sleeping on the condition variable, we have to release the underlying lock, but then values are not guaranteed after you unlock. So, the actual implementation:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epermits\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epermits\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003epermitsCV\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewait\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epermitsLock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epermits\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e--\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epermitsLock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eunlock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ethe condition variable will\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003estart sleeping \u003cstrong\u003e\u003cstrong\u003eFIRST\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eunlock a lock FOR US \u003cstrong\u003e\u003cstrong\u003eAFTER\u003c/strong\u003e\u003c/strong\u003e the sleeping starts\u003c/li\u003e\n\u003cli\u003eafter waiting ends, tries to reaquire lock\u003c/li\u003e\n\u003cli\u003eblocks until we have the lock again\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ethis ensures that you don\u0026rsquo;t have to give up the lock before sleeping.\u003c/p\u003e\n\u003cp\u003eWe need a \u0026ldquo;while\u0026rdquo; loop here to CHECK whether or not, after sleeping is over, our locked variable needs to be checked again just in case another thread took it: \u003cstrong\u003ejust because we woke up it doesn\u0026rsquo;t mean the condition is true forever\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eWe also need a \u0026ldquo;while\u0026rdquo; loop because condition variables will send \u003cstrong\u003efalse\u003c/strong\u003e wakeup signal, so we need to check the condition to be extra sure.\u003c/p\u003e\n\u003cp\u003eso \u003cstrong\u003e\u003cstrong\u003eCALL CONDITION VARIABLES IN A WHILE LOOP\u003c/strong\u003e\u003c/strong\u003e.\u003c/p\u003e\n\u003ch3 id=\"implementation\"\u003eimplementation\u003c/h3\u003e\n\u003cp\u003esimilar to \u003ca href=\"/posts/kbhmultithreading/#mutex\"\u003emutex\u003c/a\u003es\u003c/p\u003e\n\u003ch4 id=\"wait\"\u003ewait\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003eshould autonomically put the thread to sleep + unlock the given lock\u003c/li\u003e\n\u003cli\u003ewhen the thread wakes up, it should reacquire the lock + return\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"notify-one-all\"\u003enotify one/all\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003enotify_one: should wake up + unblock first waiting thread\u003c/li\u003e\n\u003cli\u003enotify_all: should wake up/unblock all waiting threads\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eif no one is waiting, do nothing.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpermits_model/","tags":null,"title":"permits model"},{"categories":null,"contents":"permittivity of free space is a constant \\(\\epsilon_{0} \\approx 8.85 \\times 10^{-12} \\frac{C^{2}}{N \\cdot m^{2}}\\).\nredefinition of Coulomb\u0026rsquo;s Constant based on permittivity of free space \\begin{equation} k = \\frac{1}{4\\pi \\epsilon_{0}} \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpermittivity_of_free_space/\"\u003epermittivity of free space\u003c/a\u003e is a constant \\(\\epsilon_{0} \\approx 8.85 \\times 10^{-12} \\frac{C^{2}}{N \\cdot m^{2}}\\).\u003c/p\u003e\n\u003ch2 id=\"redefinition-of-coulomb-s-constant--kbhcoulomb-s-law-dot-md--based-on-permittivity-of-free-space--kbhpermittivity-of-free-space-dot-md\"\u003eredefinition of \u003ca href=\"/posts/kbhcoulomb_s_law/\"\u003eCoulomb\u0026rsquo;s Constant\u003c/a\u003e based on \u003ca href=\"/posts/kbhpermittivity_of_free_space/\"\u003epermittivity of free space\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nk = \\frac{1}{4\\pi \\epsilon_{0}}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpermittivity_of_free_space/","tags":null,"title":"permittivity of free space"},{"categories":null,"contents":"A permutation \\(\\pi\\) of some \\(\\{1,2,\u0026hellip;, n\\}\\) is a rearrangement of this list. There are \\(n!\\) different permutations of this set.\nA permutation is an ORDERED arrangement of objects.\npermutation with indistinct objects What if you want to order a set with sub-set of indistinct objects? Like, for instance, how many ways are there to order:\n\\begin{equation} 10100 \\end{equation}\nFor every permutation of \\(1\\) in this set, there are two copies being overcounted.\nLet there are \\(n\\) objects. \\(n_1\\) objects are the indistinct, \\(n_2\\) objects are indistinct, \u0026hellip; \\(n_{r}\\) objects are the same. The number of permutations are:\n\\begin{equation} \\frac{n!}{{n_1}!{n_2}! \\dots {n_r}!} \\end{equation}\nYou can use iterators to give you permutations.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhpermutation/\"\u003epermutation\u003c/a\u003e \\(\\pi\\) of some \\(\\{1,2,\u0026hellip;, n\\}\\) is a rearrangement of this list. There are \\(n!\\) different permutations of this set.\u003c/p\u003e\n\u003cp\u003eA \u003ca href=\"/posts/kbhpermutation/\"\u003epermutation\u003c/a\u003e is an \u003cstrong\u003eORDERED\u003c/strong\u003e arrangement of objects.\u003c/p\u003e\n\u003ch2 id=\"permutation-with-indistinct-objects\"\u003epermutation with indistinct objects\u003c/h2\u003e\n\u003cp\u003eWhat if you want to order a set with sub-set of indistinct objects? Like, for instance, how many ways are there to order:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n10100\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor every permutation of \\(1\\) in this set, there are two copies being overcounted.\u003c/p\u003e\n\u003cp\u003eLet there are \\(n\\) objects. \\(n_1\\) objects are the indistinct, \\(n_2\\) objects are indistinct, \u0026hellip; \\(n_{r}\\) objects are the same. The number of permutations are:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{n!}{{n_1}!{n_2}! \\dots {n_r}!}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou can use iterators to give you permutations.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpermutation/","tags":null,"title":"permutation"},{"categories":null,"contents":"perplexity is a measure of a language model\u0026rsquo;s ability to predict words.\nIntuition A good language model should prefer \u0026ldquo;real\u0026rdquo; or otherwise \u0026ldquo;frequently observed\u0026rdquo; sentences. That is, it should assign lower probability to word salad.\nSo a good language model should assign a higher probability to the next word that actually occurs given a sequence of words.\nGenerally, we want the LM to assign high probability to the entire test set. However, big issue is that probability gets smaller by length of the text.\nTo address this, we normalize by number of words.\nExpression \\begin{equation} PP (W) = P(w_1 w_2 \\dots w_{n})^{-\\frac{1}{N}} \\end{equation}\nSpecifically,\n\\begin{equation} PP (W) = N \\sqrt{\\frac{1}{P(w_1, \\dots w_{n)}}} \\end{equation}\nNotably, perplexity is inverse of probability. We want the lowest entropy possible, i.e. the highest likelihood possible. Therefore, the range of perplexity is \\([1, \\infty]\\). We therefore want to minimize perplexity.\nBranching Factor perplexity could be also considered the \u0026ldquo;weighted average Branching Factor\u0026rdquo; of the language. That is, the average number of possible next words given each of the words.\nThe Branching Factor is the set of possible next words that can follow a given word.\nSampling Conditioned upon previous words or current n-gram, sample from the next possible word in the distribution.\nMeaning, we sample from the distribution of n-grams whose n-1 characters are known.\nSparsity Out-of-sample ngrams will never be counted, no matter how truly likely.\nAlso, it causes perplexity problems: because you can\u0026rsquo;t divide by 0, perplexity assumes that any sequence of words should have non zero likeliness.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhperplexity/\"\u003eperplexity\u003c/a\u003e is a measure of a language model\u0026rsquo;s ability to predict words.\u003c/p\u003e\n\u003ch2 id=\"intuition\"\u003eIntuition\u003c/h2\u003e\n\u003cp\u003eA good language model should prefer \u0026ldquo;real\u0026rdquo; or otherwise \u0026ldquo;frequently observed\u0026rdquo; sentences. That is, it should assign lower probability to word salad.\u003c/p\u003e\n\u003cp\u003eSo a good language model should assign a higher probability to the next word that actually occurs given a sequence of words.\u003c/p\u003e\n\u003cp\u003eGenerally, we want the LM to assign high probability to the entire test set. However, big issue is that \u003cstrong\u003eprobability gets smaller by length of the text\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eTo address this, we normalize by number of words.\u003c/p\u003e\n\u003ch2 id=\"expression\"\u003eExpression\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nPP (W) = P(w_1 w_2 \\dots w_{n})^{-\\frac{1}{N}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSpecifically,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nPP (W) = N \\sqrt{\\frac{1}{P(w_1, \\dots w_{n)}}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNotably, perplexity is inverse of probability. We want the lowest entropy possible, i.e. the highest likelihood possible. Therefore, the range of perplexity is \\([1, \\infty]\\). We therefore want to \u003cstrong\u003eminimize perplexity\u003c/strong\u003e.\u003c/p\u003e\n\u003ch2 id=\"branching-factor\"\u003eBranching Factor\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhperplexity/\"\u003eperplexity\u003c/a\u003e could be also considered the \u0026ldquo;weighted average \u003ca href=\"#branching-factor\"\u003eBranching Factor\u003c/a\u003e\u0026rdquo; of the language. That is, the average number of possible next words given each of the words.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"#branching-factor\"\u003eBranching Factor\u003c/a\u003e is the set of possible next words that can follow a given word.\u003c/p\u003e\n\u003ch2 id=\"sampling\"\u003eSampling\u003c/h2\u003e\n\u003cp\u003eConditioned upon previous words or current n-gram, sample from the next possible word in the distribution.\u003c/p\u003e\n\u003cp\u003eMeaning, we sample from the distribution of n-grams whose n-1 characters are known.\u003c/p\u003e\n\u003ch3 id=\"sparsity\"\u003eSparsity\u003c/h3\u003e\n\u003cp\u003eOut-of-sample ngrams will never be counted, no matter how truly likely.\u003c/p\u003e\n\u003cp\u003eAlso, it causes perplexity problems: because you can\u0026rsquo;t divide by 0, perplexity assumes that any sequence of words should have non zero likeliness.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhperplexity/","tags":null,"title":"perplexity"},{"categories":null,"contents":"PET is a type of plastic.\n","html":"\u003cp\u003ePET is a type of plastic.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpet/","tags":null,"title":"PET"},{"categories":null,"contents":"Consider a family of bacterial:\n\\begin{equation} P\u0026rsquo; = 2P \\end{equation}\nthis is a normal exponential growth situation. However, we know this isn\u0026rsquo;t true. Because the nutrients in the petri dish has a finite amount of nutrients. Hopefully this rule succeeds when the population is small, and should stop when the growth is bounded.\nFor instance, say you can never have more than 100 bacteria:\n\\begin{equation} P\u0026rsquo; = 2P(100-P) \\end{equation}\nSee logistic equation for solution\n","html":"\u003cp\u003eConsider a family of bacterial:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP\u0026rsquo; = 2P\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is a normal exponential growth situation. However, we know this isn\u0026rsquo;t true. Because the nutrients in the petri dish has a finite amount of nutrients. Hopefully this rule succeeds when the population is small, and should stop when the growth is bounded.\u003c/p\u003e\n\u003cp\u003eFor instance, say you can never have more than 100 bacteria:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP\u0026rsquo; = 2P(100-P)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhlogistic_equations/\"\u003elogistic equation\u003c/a\u003e for solution\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpetri_dish/","tags":null,"title":"petri dish"},{"categories":null,"contents":"PGA extends controller gradient ascent to cover CPOMDPs\nNotation Recall from controller gradient ascent we have an objective which we will modify for CPOMDPs. For initial controller-states \\(\\beta\\) and utility \\(\\bold{u}_{\\theta}\\):\n\\begin{equation} \\max_{\\theta}\\ \\beta^{\\top} (\\bold{I} - \\gamma \\bold{T}_{\\theta})^{-1} \\bold{r}_{\\theta} \\end{equation}\nsubject to:\n\\(\\Psi\\) remains a probably distribution over \\(|A|\\) \\(\\eta\\) remains a probably distribution over \\(|X|\\) and, new for CPOMDP, \\(\\beta^{\\top} (\\bold{I} - \\gamma \\bold{T}_{\\theta})^{-1} C_{i} \\leq \\epsilon_{i}\\ \\forall i\\), that is, each constraint \\(C_{i} \\in \\bold{C}_{i}\\) is satisfied to be lower than the budget \\(\\epsilon_{i}\\). where\n\\begin{equation} T_{\\theta}((x,s), (x\u0026rsquo;,s\u0026rsquo;)) = \\sum_{a} \\Psi(a | x) T(s\u0026rsquo;|s,a) \\sum_{o} O (o|a,s\u0026rsquo;) \\eta (x\u0026rsquo;|x,a,o) \\end{equation}\nand\n\\begin{equation} R_{\\theta}((x, s)) = \\sum_{a} \\Psi(a|x) R(s,a) \\end{equation}\nin which\n\\(X\\): a set of nodes (hidden, internal states) \\(\\Psi(a|x)\\): probability of taking an action \\(\\eta(x\u0026rsquo;|x,a,o)\\) : transition probability between hidden states Optimization Formulation we formulate policy parameters \\(\\theta\\) as a large stacked vector of the shape:\n\\begin{equation} \\theta = \\mqty(\\Psi(a_0 | x_0) \\\\ \\Psi(a_1 | x_0) \\\\ \\dots \\\\ \\Psi(a_n | x_N) \\\\ \\eta(x_0 | x_0, a_0, o_0) \\\\ \\eta(x_1 | x_0, a_0, o_0) \\\\ \\dots \\\\ \\eta(x_N | x_N, a_n, o_f)) \\end{equation}\nLet us define block diagonal matricies \\(J_{\\Psi}\\) and \\(J_{\\eta}\\), whereby:\n\\begin{equation} J_{\\Psi} = \\mqty(\\bold{1}_{\\Psi}^{\\top} \u0026amp; \\dots \u0026amp; \\bold{0} \\\\ \u0026amp; \\dots \u0026amp; \\\\ \\bold{0} \u0026amp; \\dots \u0026amp;\\bold{1}_{\\Psi}^{\\top} ) \\end{equation}\nwhere \\(J_{\\Psi} \\in \\mathbb{R}^{|X| \\times (|A| \\times |X|)}\\) and each block part \\(\\bold{1}_{\\Psi} \\in \\mathbb{R}^{|A|}\\) represents a one-vector of length of the action space. You can see how multiplying a vector \\(\\qty[\\Psi(a_0|x_0) \\\\ \\dots \\\\ \\Psi(a_n|x_N)]\\) against this matrix should yield a \\(1\\) vector if each \\(\\Psi(\\cdot | x_{i})\\) is a probability distribution.\nSimilar, we define, \\(J_{\\eta}\\) in a similar fashion to add the distributions over each \\(\\eta(\\cdot | x, a, o)\\).\nThis yields another block-block matrix\n\\begin{equation} J = \\mqty(J_{\\Psi} \u0026amp; 0 \\\\ 0 \u0026amp; J_{\\eta}) \\end{equation}\nfor which we desire \\(J\\theta = 1\\) in order to verify that the probability distributions are valid.\nLastly, let us define \\(\\bold{Z} = (\\bold{I} - \\gamma \\bold{T}_{\\theta})\\). For ease of notation in constructing this result, we declare:\n\\begin{equation} f(\\theta) = \\beta^{\\top} \\bold{Z}^{-1} \\bold{r}_{\\theta} \\end{equation}\nand\n\\begin{equation} h_{i}(\\theta) = \\beta^{\\top} \\bold{Z}^{-1} C_{i} \\end{equation}\nFinally, this allows us to formulate the problem as a nonlinear optimization problem:\n\\begin{align} \\max_{\\theta}\\ \u0026amp;f(\\theta) \\\\ \\text{such that}\\ \u0026amp;J\\theta = 1 \\\\ \u0026amp; \\theta \\geq 0 \\\\ \u0026amp; h_{i}(\\theta) \\leq \\epsilon_{i},\\ \\forall i \\end{align}\nGradient Ascent Procedure Note that the initial state information \\(\\beta\\) is constant. Therefore, the gradient of the top expression against each field in \\(\\theta\\) becomes, via an rearrangement of the chain rule:\n\\begin{equation} \\pdv{f(\\theta)}{\\theta_{i}} = \\beta^{\\top} \\qty[\\bold{Z}^{-1} \\qty( \\pdv{\\bold{r}_{\\theta}}{\\theta_{i}} + \\pdv{\\bold{Z}}{\\theta_{i}} \\bold{Z}^{-1}\\bold{r}_{\\theta})] \\end{equation}\nThe derivatives of each \\(\\theta\\) against each \\(\\bold{r}\\) and \\(\\bold{Z}\\) is given on pp 485 of Alg4DM.\nAs with all gradient ascent cases, each \u0026ldquo;step\u0026rdquo; takes the rough form of\n\\begin{equation} \\xi = \\theta + \\alpha \\nabla_{\\theta} f(\\theta) \\end{equation}\nhowever, in this implementation, the step size isn\u0026rsquo;t actually fixed. Instead, we do\u0026hellip;\nGolden Section Line Search Instead of taking fixed-step sizes to get to the maxima, PGA proposes Golden Section Line Search as a line-search algorithm to dynamically choose the steps to get to maxima.\nLine search algorithms are typically computationally heavy as it requires evaluating the relative utility (i.e. here \\(\\bold{Z}^{-1} \\bold{r}_{\\theta}\\)) a lot of times, which is computationally intractable.\nSo, Golden Section Line Search uses a divide-and-conquer method via the golden ratio to address this issue.\ndef optimize!(CPOMDP, phi=golden_ratio, gamma=discount_factor, eps=minimum_boundary): # C-POMDP Spec f = CPOMDP.objective_function # this is f(theta) T = CPOMDP.transition_matrix R = CPOMDP.reward_vector b = CPOMDP.initial_state_vector # as obtained above nabla = f(theta).grad(theta) # initialize the search bounds based on splitting # search space (full step, no step) via the golden ratio a1, a2, a3, a4 = 0, (1-(1/phi)), (1/phi), 1 # calculate new policies and their utilities theta2, theta3 = theta + a2*nabla, theta + a3*nabla z2, z3 = (I-gamma*T@theta2).inverse(), (I-gamma*T@theta3).inverse() # search until the middle bounds converged while (a4-a1) \u0026lt; eps*(abs(a2) + abs(a3)): # calculate utility vectors over belief u2, u3 = z2@R, z3@R # either relax top or bottom bounds, depending on # which one we had been successfully maximizing if b.dot(u3) \u0026gt;= b.dot(u2): # search \u0026#34;upwards\u0026#34;, set bottom bound to a3 a1, a2, a3 = a2, a3, a2+(1/phi)*(a4-a2) theta3, theta2 = theta + a3*nabla, theta3 z3, z2 = (I-gamma*T@theta2).inverse(), z3 else: # search \u0026#34;downwards\u0026#34; a1, a3, a2 = a2, a2, a3-(1/phi)*(a3-a1) theta2, theta3 = theta + a2*nabla, theta2 z2, z3 = (I-gamma*T@theta2).inverse(), z2 # return the average of our converged results return 0.5*(theta2+theta3), 0.5*(u2+u3) Naive Projection Once we obtain a new set of parameters \\(\\xi\\) from Golden Section Line Search, we can\u0026rsquo;t actually directly punch it into \\(\\theta\\). This is because it is likely not going to satisfy the constraints that are given.\nWe can fix this naively with a non-linear programming formulation; that is, we desire to find the closest \\(\\theta\\) to the computed value \\(\\xi\\); we do this by minimizing a L2 norm (sum of squared errors):\n\\begin{align} \\min_{\\theta}\\ \u0026amp; \\frac{1}{2} \\| \\xi - \\theta \\|^{2}_{2} \\\\ \\text{such that}\\ \u0026amp;J\\theta = 1 \\\\ \u0026amp; \\theta \\geq 0 \\\\ \u0026amp; h_{i}(\\theta) \\leq \\epsilon_{i},\\ \\forall i \\end{align}\nThis, for the most part, is computationally intractable and needs to be computed through each iteration. This is especially bad for the \\(h_{i}\\) for all \\(i\\) part. And so, instead of doing this, we formulate instead an approximate proxy objective.\nApproximate Projection The thing that makes the objective above hard is that \\(h_{i}\\) doesn\u0026rsquo;t have nice convex properties. To fix this, we perform a local linearizion of \\(h_{i}\\).\nSpecifically, let\u0026rsquo;s replace \\(h_{i}\\) with its local Taylor expansion.\nFor some step where we started at \\(\\theta_{k}\\), if you wanted to evaluate some next step \\(\\theta_{k+1}\\) from that step, we can write:\n\\begin{equation} h_{i}(\\theta_{k+1}) \\approx h_{i}(\\theta_{k}) + (\\nabla_{\\theta}(\\theta_{0}))(\\theta_{k+1}-\\theta_{k}) \\end{equation}\nUsing this linear decomposition of three parts (i.e. parameter difference from original, the gradient of \\(h\\) against the parameter, and the original value of \\(h\\)), we can now split the \\(h_{i}(\\theta)\\) constraint of the non-linear program into a linear decomposition.\nLet\u0026rsquo;s define:\n\\begin{equation} \\nabla_{\\theta} \\bold{h}(\\theta) = \\mqty[ (\\nabla_{\\theta} h_{1}(\\theta))^{\\top} \\\\ \\dots \\\\ (\\nabla_{\\theta} h_{m}(\\theta))^{\\top}] \\end{equation}\nFrom which we write block matriix\n\\begin{equation} \\bold{A} = \\mqty[-\\bold{I}_{n} \\\\ \\nabla_{\\theta}\\bold{h}(\\theta_{k})] \\end{equation}\nwhere \\(\\bold{I}_{n} \\in \\mathbb{R}^{(|A| + |X|) \\times (|A| + |X|)}\\), and vector:\n\\begin{equation} \\bold{b} = \\mqty[\\bold{0}_{n} \\\\ \\epsilon - \\bold{h}(\\theta_{k}) + \\nabla\\bold{h}(\\theta_{k})\\theta_{k}] \\end{equation}\nThese definitions allow us to rewrite two of our objectives:\n\\begin{equation} \\begin{cases} \\theta \\geq 0 \\\\ h_{i}(\\theta) \\leq \\epsilon_{i},\\ \\forall i \\end{cases} \\end{equation}\nturning them instead into simply \\(\\bold{A}\\theta \\leq \\bold{b}\\). The top half of \\(\\bold{A}\\), \\(\\bold{B}\\) is responsible for making sure that all elements of \\(\\theta\\) is positive (specifically, to ensure the negative of the values is smaller than 0); the bottom half ensures that all of them satisfy the cost.\nThese definitions result in a linear formulation of two objectives of our original non-linear program:\n\\begin{align} \\min_{\\theta}\\ \u0026amp; \\frac{1}{2} \\| \\xi - \\theta \\|^{2}_{2} \\\\ \\text{such that}\\ \u0026amp;J\\theta = 1 \\\\ \u0026amp; \\bold{A}\\theta \\leq \\bold{B} \\end{align}\nand we are done.\nQuick Tip Recall that we have to calculate the inverse of \\(\\bold{Z}\\) quite a lot throughout the computation of \\(h\\) and \\(f\\). For each policy parameter \\(\\theta\\), you can cache the value of \\(\\bold{Z}\\), L-U (upper-triangular/lower-triangular factored) and recombine them/invert them as needed to speed up computation. This ensures that you only calculate \\(\\bold{Z}\\) once per step.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpga/\"\u003ePGA\u003c/a\u003e extends \u003ca href=\"/posts/kbhcontroller_gradient_ascent/\"\u003econtroller gradient ascent\u003c/a\u003e to cover \u003ca href=\"/posts/kbhcpomdp/\"\u003eCPOMDPs\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003cp\u003eRecall from \u003ca href=\"/posts/kbhcontroller_gradient_ascent/\"\u003econtroller gradient ascent\u003c/a\u003e we have an objective which we will modify for \u003ca href=\"/posts/kbhcpomdp/\"\u003eCPOMDP\u003c/a\u003es. For initial controller-states \\(\\beta\\) and utility \\(\\bold{u}_{\\theta}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\max_{\\theta}\\ \\beta^{\\top} (\\bold{I} - \\gamma \\bold{T}_{\\theta})^{-1} \\bold{r}_{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003esubject to:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\Psi\\) remains a probably distribution over \\(|A|\\)\u003c/li\u003e\n\u003cli\u003e\\(\\eta\\) remains a probably distribution over \\(|X|\\)\u003c/li\u003e\n\u003cli\u003eand, new for \u003ca href=\"/posts/kbhcpomdp/\"\u003eCPOMDP\u003c/a\u003e, \\(\\beta^{\\top} (\\bold{I} - \\gamma \\bold{T}_{\\theta})^{-1} C_{i} \\leq \\epsilon_{i}\\ \\forall i\\), that is, each constraint \\(C_{i} \\in \\bold{C}_{i}\\) is satisfied to be lower than the budget \\(\\epsilon_{i}\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewhere\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT_{\\theta}((x,s), (x\u0026rsquo;,s\u0026rsquo;)) = \\sum_{a} \\Psi(a | x) T(s\u0026rsquo;|s,a) \\sum_{o} O (o|a,s\u0026rsquo;) \\eta (x\u0026rsquo;|x,a,o)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR_{\\theta}((x, s)) = \\sum_{a} \\Psi(a|x) R(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ein which\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X\\): a set of nodes (hidden, internal states)\u003c/li\u003e\n\u003cli\u003e\\(\\Psi(a|x)\\): probability of taking an action\u003c/li\u003e\n\u003cli\u003e\\(\\eta(x\u0026rsquo;|x,a,o)\\) : transition probability between hidden states\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"optimization-formulation\"\u003eOptimization Formulation\u003c/h2\u003e\n\u003cp\u003ewe formulate policy parameters \\(\\theta\\) as a large stacked vector of the shape:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta = \\mqty(\\Psi(a_0 | x_0) \\\\ \\Psi(a_1 | x_0) \\\\ \\dots \\\\ \\Psi(a_n | x_N) \\\\ \\eta(x_0 | x_0, a_0, o_0) \\\\ \\eta(x_1 | x_0, a_0, o_0) \\\\ \\dots \\\\ \\eta(x_N | x_N, a_n, o_f))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet us define block diagonal matricies \\(J_{\\Psi}\\) and \\(J_{\\eta}\\), whereby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nJ_{\\Psi} = \\mqty(\\bold{1}_{\\Psi}^{\\top} \u0026amp; \\dots \u0026amp; \\bold{0} \\\\ \u0026amp; \\dots \u0026amp; \\\\ \\bold{0} \u0026amp; \\dots \u0026amp;\\bold{1}_{\\Psi}^{\\top} )\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(J_{\\Psi} \\in \\mathbb{R}^{|X| \\times (|A| \\times |X|)}\\) and each block part \\(\\bold{1}_{\\Psi} \\in \\mathbb{R}^{|A|}\\) represents a one-vector of length of the action space. You can see how multiplying a vector \\(\\qty[\\Psi(a_0|x_0) \\\\ \\dots \\\\ \\Psi(a_n|x_N)]\\) against this matrix should yield a \\(1\\) vector if each \\(\\Psi(\\cdot | x_{i})\\) is a probability distribution.\u003c/p\u003e\n\u003cp\u003eSimilar, we define, \\(J_{\\eta}\\) in a similar fashion to add the distributions over each \\(\\eta(\\cdot | x, a, o)\\).\u003c/p\u003e\n\u003cp\u003eThis yields another block-block matrix\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nJ = \\mqty(J_{\\Psi} \u0026amp; 0 \\\\ 0 \u0026amp; J_{\\eta})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor which we desire \\(J\\theta = 1\\) in order to verify that the probability distributions are valid.\u003c/p\u003e\n\u003cp\u003eLastly, let us define \\(\\bold{Z} = (\\bold{I} - \\gamma \\bold{T}_{\\theta})\\). For ease of notation in constructing this result, we declare:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(\\theta) = \\beta^{\\top} \\bold{Z}^{-1} \\bold{r}_{\\theta}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nh_{i}(\\theta) = \\beta^{\\top} \\bold{Z}^{-1} C_{i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, this allows us to formulate the problem as a nonlinear optimization problem:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\max_{\\theta}\\ \u0026amp;f(\\theta) \\\\\n\\text{such that}\\ \u0026amp;J\\theta = 1 \\\\\n\u0026amp; \\theta \\geq 0 \\\\\n\u0026amp; h_{i}(\\theta) \\leq \\epsilon_{i},\\ \\forall i\n\\end{align}\u003c/p\u003e\n\u003ch2 id=\"gradient-ascent-procedure\"\u003eGradient Ascent Procedure\u003c/h2\u003e\n\u003cp\u003eNote that the initial state information \\(\\beta\\) is constant. Therefore, the gradient of the top expression against each field in \\(\\theta\\) becomes, via an rearrangement of the chain rule:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{f(\\theta)}{\\theta_{i}} = \\beta^{\\top} \\qty[\\bold{Z}^{-1} \\qty( \\pdv{\\bold{r}_{\\theta}}{\\theta_{i}} + \\pdv{\\bold{Z}}{\\theta_{i}} \\bold{Z}^{-1}\\bold{r}_{\\theta})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe derivatives of each \\(\\theta\\) against each \\(\\bold{r}\\) and \\(\\bold{Z}\\) is given on pp 485 of Alg4DM.\u003c/p\u003e\n\u003cp\u003eAs with all gradient ascent cases, each \u0026ldquo;step\u0026rdquo; takes the rough form of\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\xi = \\theta + \\alpha \\nabla_{\\theta} f(\\theta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ehowever, in this implementation, the step size isn\u0026rsquo;t actually fixed. Instead, we do\u0026hellip;\u003c/p\u003e\n\u003ch3 id=\"golden-section-line-search\"\u003eGolden Section Line Search\u003c/h3\u003e\n\u003cp\u003eInstead of taking fixed-step sizes to get to the maxima, \u003ca href=\"/posts/kbhpga/\"\u003ePGA\u003c/a\u003e proposes \u003ca href=\"#golden-section-line-search\"\u003eGolden Section Line Search\u003c/a\u003e as a line-search algorithm to dynamically choose the steps to get to maxima.\u003c/p\u003e\n\u003cp\u003eLine search algorithms are typically computationally heavy as it requires evaluating the relative utility (i.e. here \\(\\bold{Z}^{-1} \\bold{r}_{\\theta}\\)) a lot of times, which is computationally intractable.\u003c/p\u003e\n\u003cp\u003eSo, \u003ca href=\"#golden-section-line-search\"\u003eGolden Section Line Search\u003c/a\u003e uses a divide-and-conquer method via the golden ratio to address this issue.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003edef\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eoptimize\u003c/span\u003e\u003cspan style=\"color:#960050;background-color:#1e0010\"\u003e!\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eCPOMDP\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egolden_ratio\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003egamma\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ediscount_factor\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eeps\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eminimum_boundary\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# C-POMDP Spec\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eCPOMDP\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eobjective_function\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# this is f(theta)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eCPOMDP\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etransition_matrix\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eR\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eCPOMDP\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ereward_vector\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eCPOMDP\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einitial_state_vector\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# as obtained above\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003enabla\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ef\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etheta\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egrad\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etheta\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# initialize the search bounds based on splitting\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# search space (full step, no step) via the golden ratio\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ea1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea4\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# calculate new policies and their utilities\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003etheta2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etheta3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etheta\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enabla\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etheta\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea3\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enabla\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ez2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egamma\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@theta2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einverse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egamma\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@theta3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einverse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# search until the middle bounds converged\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea4\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eeps\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eabs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eabs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# calculate utility vectors over belief\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eu2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eu3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez2\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@R\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez3\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@R\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# either relax top or bottom bounds, depending on\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# which one we had been successfully maximizing\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026gt;=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eb\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edot\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# search \u0026#34;upwards\u0026#34;, set bottom bound to a3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ea1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea4\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003etheta3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etheta2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etheta\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea3\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enabla\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etheta3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ez3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egamma\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@theta2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einverse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez3\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# search \u0026#34;downwards\u0026#34;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ea1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea3\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ephi\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea3\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ea1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003etheta2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etheta3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etheta\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ea2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enabla\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etheta2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ez2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eI\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003egamma\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eT\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003e@theta2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einverse\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ez2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# return the average of our converged results\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.5\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etheta2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etheta3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.5\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu2\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eu3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"naive-projection\"\u003eNaive Projection\u003c/h3\u003e\n\u003cp\u003eOnce we obtain a new set of parameters \\(\\xi\\) from \u003ca href=\"#golden-section-line-search\"\u003eGolden Section Line Search\u003c/a\u003e, we can\u0026rsquo;t actually directly punch it into \\(\\theta\\). This is because it is likely not going to satisfy the constraints that are given.\u003c/p\u003e\n\u003cp\u003eWe can fix this naively with a non-linear programming formulation; that is, we desire to find the closest \\(\\theta\\) to the computed value \\(\\xi\\); we do this by minimizing a L2 norm (sum of squared errors):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\min_{\\theta}\\ \u0026amp; \\frac{1}{2} \\| \\xi - \\theta \\|^{2}_{2} \\\\\n\\text{such that}\\ \u0026amp;J\\theta = 1 \\\\\n\u0026amp; \\theta \\geq 0 \\\\\n\u0026amp; h_{i}(\\theta) \\leq \\epsilon_{i},\\ \\forall i\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThis, for the most part, is computationally intractable and needs to be computed through each iteration. This is especially bad for the \\(h_{i}\\) for all \\(i\\) part. And so, instead of doing this, we formulate instead an approximate proxy objective.\u003c/p\u003e\n\u003ch3 id=\"approximate-projection\"\u003eApproximate Projection\u003c/h3\u003e\n\u003cp\u003eThe thing that makes the objective above hard is that \\(h_{i}\\) doesn\u0026rsquo;t have nice convex properties. To fix this, we perform a local linearizion of \\(h_{i}\\).\u003c/p\u003e\n\u003cp\u003eSpecifically, let\u0026rsquo;s replace \\(h_{i}\\) with its local Taylor expansion.\u003c/p\u003e\n\u003cp\u003eFor some step where we started at \\(\\theta_{k}\\), if you wanted to evaluate some next step \\(\\theta_{k+1}\\) from that step, we can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nh_{i}(\\theta_{k+1}) \\approx h_{i}(\\theta_{k}) + (\\nabla_{\\theta}(\\theta_{0}))(\\theta_{k+1}-\\theta_{k})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eUsing this linear decomposition of three parts (i.e. parameter difference from original, the gradient of \\(h\\) against the parameter, and the original value of \\(h\\)), we can now split the \\(h_{i}(\\theta)\\) constraint of the non-linear program into a linear decomposition.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s define:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} \\bold{h}(\\theta) = \\mqty[ (\\nabla_{\\theta} h_{1}(\\theta))^{\\top} \\\\ \\dots \\\\ (\\nabla_{\\theta} h_{m}(\\theta))^{\\top}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFrom which we write block matriix\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{A} = \\mqty[-\\bold{I}_{n} \\\\ \\nabla_{\\theta}\\bold{h}(\\theta_{k})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\bold{I}_{n} \\in \\mathbb{R}^{(|A| + |X|) \\times (|A| + |X|)}\\), and vector:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{b} = \\mqty[\\bold{0}_{n} \\\\ \\epsilon - \\bold{h}(\\theta_{k}) + \\nabla\\bold{h}(\\theta_{k})\\theta_{k}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThese definitions allow us to rewrite two of our objectives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\theta \\geq 0 \\\\\nh_{i}(\\theta) \\leq \\epsilon_{i},\\ \\forall i\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eturning them instead into simply \\(\\bold{A}\\theta \\leq \\bold{b}\\). The top half of \\(\\bold{A}\\), \\(\\bold{B}\\) is responsible for making sure that all elements of \\(\\theta\\) is positive (specifically, to ensure the negative of the values is smaller than 0); the bottom half ensures that all of them satisfy the cost.\u003c/p\u003e\n\u003cp\u003eThese definitions result in a linear formulation of two objectives of our original non-linear program:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\min_{\\theta}\\ \u0026amp; \\frac{1}{2} \\| \\xi - \\theta \\|^{2}_{2} \\\\\n\\text{such that}\\ \u0026amp;J\\theta = 1 \\\\\n\u0026amp; \\bold{A}\\theta \\leq \\bold{B}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eand we are done.\u003c/p\u003e\n\u003ch3 id=\"quick-tip\"\u003eQuick Tip\u003c/h3\u003e\n\u003cp\u003eRecall that we have to calculate the inverse of \\(\\bold{Z}\\) quite a lot throughout the computation of \\(h\\) and \\(f\\). For each policy parameter \\(\\theta\\), you can cache the value of \\(\\bold{Z}\\), L-U (\u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e/lower-triangular factored) and recombine them/invert them as needed to speed up computation. This ensures that you only calculate \\(\\bold{Z}\\) once per step.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpga/","tags":null,"title":"PGA"},{"categories":null,"contents":"\\begin{equation} y\u0026rsquo; = f(y) \\end{equation}\nfor autonomous ODEs, we can plot a phase line\nbecause autonomouse ODEs, we can plot such a line whereby we can analyze the direction of a solution function\u0026rsquo;s travel\na particle\u0026rsquo;s one-way motion must converge to a stationary value, or \\(\\pm \\infty\\), as \\(t\\) increases\n","html":"\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = f(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor autonomous ODEs, we can plot a \u003ca href=\"/posts/kbhphase_line/\"\u003ephase line\u003c/a\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-15_11-35-07_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ebecause autonomouse ODEs, we can plot such a line whereby we can analyze the direction of a solution function\u0026rsquo;s travel\u003c/p\u003e\n\u003cp\u003ea particle\u0026rsquo;s one-way motion must converge to a stationary value, or \\(\\pm \\infty\\), as \\(t\\) increases\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhphase_line/","tags":null,"title":"phase line"},{"categories":null,"contents":"We will leverage atoms as qubits. So, how do we isolate a qubit from an atom? We will leverage electrons.\nWe will select the lowest energy state as the base state; as there maybe multiple ground states, we will choose \\(|u\\big\u0026gt;\\) and \\(|d\\big\u0026gt;\\) from two of the states.\n","html":"\u003cp\u003eWe will leverage \u003ca href=\"/posts/kbhatoms_as_qubits/\"\u003eatoms as qubits\u003c/a\u003e. So, how do we isolate a \u003ca href=\"/posts/kbhqubits/\"\u003equbit\u003c/a\u003e from an atom? We will leverage electrons.\u003c/p\u003e\n\u003cp\u003eWe will select the lowest energy state as the base state; as there maybe multiple ground states, we will choose \\(|u\\big\u0026gt;\\) and \\(|d\\big\u0026gt;\\) from two of the states.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhphysical_qubits/","tags":null,"title":"physical qubits"},{"categories":null,"contents":"physics is the act of explaining what we see in terms of solving for the \u0026ldquo;unseen\u0026rdquo;. For an explanation to be good, it needs to be testable.\nHow exactly does physics work? \u0026ldquo;classical results\u0026rdquo;\nNewton\u0026rsquo;s laws Maxwell\u0026rsquo;s equations General relativity \u0026ldquo;quantum theory\u0026rdquo;\nA new model that actually allows particle inference.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhphysics/\"\u003ephysics\u003c/a\u003e is the act of explaining what we see in terms of solving for the \u0026ldquo;unseen\u0026rdquo;. For an explanation to be good, it needs to be testable.\u003c/p\u003e\n\u003ch2 id=\"how-exactly-does-physics-work\"\u003eHow exactly does physics work?\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;classical results\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eNewton\u0026rsquo;s laws\u003c/li\u003e\n\u003cli\u003eMaxwell\u0026rsquo;s equations\u003c/li\u003e\n\u003cli\u003eGeneral relativity\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;quantum theory\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eA new model that actually allows particle inference.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhphysics/","tags":null,"title":"physics"},{"categories":null,"contents":"(Pineau, Gordon, and Thrun 2006)\nPBVI\nOne-Liner \u0026ldquo;If we can avoid the curse of history, the curse of dimensionality wouldn\u0026rsquo;t be a problem\u0026rdquo;.\nBasically - most POMDP problems don\u0026rsquo;t reach much of the belief simplex. So, can we concetrate planning on more probable beliefs.\nNovelty trajectory based approach to select beliefs belief set is fixed through layers: each backup results in the same number of layers Notable Methods PBVI\nKey Figs New Concepts Notes ","html":"\u003cp\u003e(\u003ca href=\"#citeproc_bib_item_1\"\u003ePineau, Gordon, and Thrun 2006\u003c/a\u003e)\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;If we can avoid the curse of history, the curse of dimensionality wouldn\u0026rsquo;t be a problem\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eBasically - most POMDP problems don\u0026rsquo;t reach much of the belief simplex. So, can we concetrate planning on more probable beliefs.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003etrajectory based approach to select beliefs\u003c/li\u003e\n\u003cli\u003ebelief set is \u003cstrong\u003efixed\u003c/strong\u003e through layers: each backup results in the same number of layers\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpineau_2006/","tags":null,"title":"Pineau 2006"},{"categories":null,"contents":"pipe chains the STDOUT of one command and put it to the STDIN of another command. Typically, we want to do pipe per direction.\ncommand pipelines span two child processes create a pipe to allow the two processes to communicate connect the first child\u0026rsquo;s STDOUT to the pipe + the second child\u0026rsquo;s STDIN to the pipe pipe() pipe() gives us back two file descriptors, such that whatever is written to one can be read from another.\nInterface:\nint pipes[2]; // create the pipes int ret = pipe(pipes); // an so int read_from_here = ret[0]; int write_to_here = ret[1]; // i.e. ret[1] writes to =\u0026gt; ret[0] read // fork! pid_t pid_p = fork(); if(pid_p == 0) { // child subroutine // because child is READING, and not READINg // we want to close the write close(write_to_here); // we want to then make a buffer char buf[num_bytes]; // if the child reads before the parents write // it will block until some data is available // if the write ends are closed globally, read // will also stop. read(read_from_here, buffer, sizeof(buffer)); close(read_from_here); return 0; } // parent subroutine // because parent is WRITING and not READING // we don\u0026#39;t want the read to block, we will // close the parent immediately. close(read_from_here); // write some data write(write_to_here, \u0026#34;msg\u0026#34;, num_bytes); // close now we are done writing close(write_to_here); // clean up child waitpid(pid_p, NULL, 0); pipes have to be closed twice, and opened before the fork.\ndup2() dup2() lets you REWIRE fire descriptors:\ndup2(scrfd, destft); for instance:\ndup2(fds[0], STDIN_FILENO); close(fds[0]); copy the underlying open file pointer which fds[0] points to the FD STDIN, meaning STDIN will now refer to the underlying file of fds[0].\nstalling if you don\u0026rsquo;t close the right ends of your pipes, it STALLS. read() BLOCKS UNTIL ALL WRITES ARE CLOSED!\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpipe/\"\u003epipe\u003c/a\u003e chains the STDOUT of one command and put it to the STDIN of another command. Typically, we want to do pipe per direction.\u003c/p\u003e\n\u003ch2 id=\"command-pipelines\"\u003ecommand pipelines\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003espan two child processes\u003c/li\u003e\n\u003cli\u003ecreate a pipe to allow the two processes to communicate\u003c/li\u003e\n\u003cli\u003econnect the first child\u0026rsquo;s STDOUT to the pipe + the second child\u0026rsquo;s STDIN to the pipe\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"pipe\"\u003epipe()\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#pipe\"\u003epipe()\u003c/a\u003e gives us back two \u003ca href=\"/posts/kbhsyscalls/#file-descriptor\"\u003efile descriptor\u003c/a\u003es, such that whatever is written to one can be read from another.\u003c/p\u003e\n\u003cp\u003eInterface:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epipes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// create the pipes\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eret\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003epipe\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epipes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// an so\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eread_from_here\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eret\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewrite_to_here\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eret\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// i.e. ret[1] writes to =\u0026gt; ret[0] read\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// fork!\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epid_p\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003efork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epid_p\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// child subroutine\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// because child is READING, and not READINg\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// we want to close the write\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewrite_to_here\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// we want to then make a buffer\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebuf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enum_bytes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if the child reads before the parents write\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// it will block until some data is available\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// if the write ends are closed globally, read\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// will also stop.\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_from_here\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebuffer\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esizeof\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebuffer\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e));\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_from_here\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// parent subroutine\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// because parent is WRITING and not READING\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// we don\u0026#39;t want the read to block, we will\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// close the parent immediately.\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_from_here\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// write some data\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ewrite\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewrite_to_here\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;msg\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enum_bytes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// close now we are done writing\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewrite_to_here\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// clean up child\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ewaitpid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epid_p\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eNULL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003ca href=\"/posts/kbhpipe/\"\u003epipe\u003c/a\u003es have to be closed twice, and opened before the fork.\u003c/p\u003e\n\u003ch2 id=\"dup2\"\u003edup2()\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#dup2\"\u003edup2()\u003c/a\u003e lets you \u003cstrong\u003eREWIRE\u003c/strong\u003e fire descriptors:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003edup2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003escrfd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edestft\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003efor instance:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003edup2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efds\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSTDIN_FILENO\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efds\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ecopy the underlying open file pointer which \u003ccode\u003efds[0]\u003c/code\u003e points to the FD STDIN, meaning STDIN will now refer to the underlying file of \u003ccode\u003efds[0]\u003c/code\u003e.\u003c/p\u003e\n\u003ch2 id=\"stalling\"\u003estalling\u003c/h2\u003e\n\u003cp\u003eif you \u003cstrong\u003edon\u0026rsquo;t close the right ends of your pipes, it STALLS\u003c/strong\u003e. \u003ccode\u003eread()\u003c/code\u003e BLOCKS UNTIL ALL WRITES ARE CLOSED!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpipe/","tags":null,"title":"pipe"},{"categories":null,"contents":"User Story Sejin is the executive administrative assistant at Nueva, working on scheduling Liza, Lee, Terry, and the other admins against the members of the wider community. Sejin spends most of her day scheduling people, of which, the largest time drawn is spent convincing people to move their schedules \u0026ldquo;in favor\u0026rdquo; of that of another person (i.e. manually). The reason why this is done is because her approach to scheduling is one-shot: emailing everybody for general availability, noting in her mind who the high-priority attendees are (say, Liza), and if no times match asking/convincing those in lower priority to move their schedules. Although she enjoys the process of putting events together, she is particularly frustrated that, due to the busy schedules and often back-and-forth emails needed to get and conform everyone\u0026rsquo;s schedule, response rates to complicated scheduling problems are low.\nSejin, during a main brunt of her job of scheduling inter or intra-admin meetings, need a solution to schedule many executives at once with attention to their priority/authority/importance to a meeting as well as the possible fluidity of their schedules. There is an inherit fluidity to scheduling as a master-planner of a few admin\u0026rsquo;s schedules: in that, if needed, she has authority to move entire meetings as long as they are swapped for equivalent times of availability. Hence, a previously bad time may suddenly become available if enough scheduling conflicts is generated, thereby creating the incentive for swapping another meeting away for the one being scheduled and rescheduling other attendees of lower priority.\nCurrent scheduling software does not account for either types of fluidity. Tools like Doodle/When2Meet can accommodate for inherent \u0026ldquo;priority\u0026rdquo;\u0026mdash;with Sejin choosing the time-slot that would have the most, highest priority individuals scheduled\u0026mdash;but are one-shot planning tools which do not provide space for swapping entire meetings out to make scheduling work better. Other tools like Calendly or simple iCal does not provide any semblance of priority or \u0026ldquo;multi-possibility\u0026rdquo; for meetings, though does indeed provide the time-blocking capability to swap two meetings at will. These problems result in Sejin needing to just create large email chains to resolve scheduling problems. Also, no scheduling tools provide an opportunity to manually \u0026ldquo;convince\u0026rdquo; or request someone to make time due to the constraints that presented during first-round scheduling. Lastly, scheduling software does not space-block. Sometimes there is a physical capacity/de-duplication limit to spaces, which cannot be accounted for.\nOnce the initial scheduling and emailing processes are automated, Sejin can spend more time focusing on what she actually enjoys: thinking about the process of an event and its details. Schedule can now be an afterthought, an event which happens in the background which is eventually reported to her on the online portal as she is planning the details of the event.\nProposal Fundamentally, this is a fractional knapsack problem. \u0026ldquo;How do we maximize the maximum amount of attendance of maximum amounts of important people?\u0026rdquo;\nFrom a target market, I think a good target would be medium organization assistants: Sejin\u0026rsquo;s concerns really only become a problem when you are scheduling for a one (or few) vs. many situation where there is a stable group of people you are scheduling for, who wants to meet with each other or other people outside.\nAs far as UX, this tool should not require log-in except for the master planner (i.e. our user.) Participants in meeting should be able to freely enter their schedules or create evergreen accounts to manage their scheduling. (This is not as well thought out at the moment.)\nLastly, for the tech stack, I don\u0026rsquo;t think I have the ability to finish the entire stack by myself. From a MVP perspective (if we are trying to satisfy all needs), there needs to be a system optimizing a constantly shifting fractional knapsack, a way to put and store availability information, and a way to automate the requesting/convincing of scheduling change (e.g.. \u0026ldquo;MyApp Notification! Liza is not available at the one time you selected, but everyone else is available at this different time. Can you make this time? Y/N\u0026rdquo;) . Ideally, we would also send iCal invites in the end.\n","html":"\u003ch2 id=\"user-story\"\u003eUser Story\u003c/h2\u003e\n\u003cp\u003eSejin is the executive administrative assistant at Nueva, working on scheduling Liza, Lee, Terry, and the other admins against the members of the wider community. Sejin spends most of her day scheduling people, of which, the largest time drawn is spent convincing people to move their schedules \u0026ldquo;in favor\u0026rdquo; of that of another person (i.e. manually). The reason why this is done is because her approach to scheduling is one-shot: emailing everybody for general availability, noting in her mind who the high-priority attendees are (say, Liza), and if no times match asking/convincing those in lower priority to move their schedules. Although she enjoys the process of putting events together, she is particularly frustrated that, due to the busy schedules and often back-and-forth emails needed to get and conform everyone\u0026rsquo;s schedule, response rates to complicated scheduling problems are low.\u003c/p\u003e\n\u003cp\u003eSejin, during a main brunt of her job of scheduling inter or intra-admin meetings, need a solution to schedule many executives at once with attention to their priority/authority/importance to a meeting as well as the possible fluidity of their schedules. There is an inherit fluidity to scheduling as a master-planner of a few admin\u0026rsquo;s schedules: in that, if needed, she has authority to move entire meetings as long as they are swapped for equivalent times of availability. Hence, a previously bad time may suddenly become available if enough scheduling conflicts is generated, thereby creating the incentive for swapping another meeting away for the one being scheduled and rescheduling other attendees of lower priority.\u003c/p\u003e\n\u003cp\u003eCurrent scheduling software does not account for either types of fluidity. Tools like Doodle/When2Meet can accommodate for inherent \u0026ldquo;priority\u0026rdquo;\u0026mdash;with Sejin choosing the time-slot that would have the most, highest priority individuals scheduled\u0026mdash;but are one-shot planning tools which do not provide space for swapping entire meetings out to make scheduling work better. Other tools like Calendly or simple iCal does not provide any semblance of priority or \u0026ldquo;multi-possibility\u0026rdquo; for meetings, though does indeed provide the time-blocking capability to swap two meetings at will. These problems result in Sejin needing to just create large email chains to resolve scheduling problems. Also, no scheduling tools provide an opportunity to manually \u0026ldquo;convince\u0026rdquo; or request someone to make time due to the constraints that presented during first-round scheduling. Lastly, scheduling software does not space-block. Sometimes there is a physical capacity/de-duplication limit to spaces, which cannot be accounted for.\u003c/p\u003e\n\u003cp\u003eOnce the initial scheduling and emailing processes are automated, Sejin can spend more time focusing on what she actually enjoys: thinking about the process of an event and its details. Schedule can now be an afterthought, an event which happens in the background which is eventually reported to her on the online portal as she is planning the details of the event.\u003c/p\u003e\n\u003ch2 id=\"proposal\"\u003eProposal\u003c/h2\u003e\n\u003cp\u003eFundamentally, this is a fractional knapsack problem. \u0026ldquo;How do we maximize the maximum amount of attendance of maximum amounts of important people?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eFrom a target market, I think a good target would be medium organization assistants: Sejin\u0026rsquo;s concerns really only become a problem when you are scheduling for a one (or few) vs. many situation where there is a stable group of people you are scheduling for, who wants to meet with each other or other people outside.\u003c/p\u003e\n\u003cp\u003eAs far as UX, this tool should not require log-in except for the master planner (i.e. our user.) Participants in meeting should be able to freely enter their schedules or create evergreen accounts to manage their scheduling. (This is not as well thought out at the moment.)\u003c/p\u003e\n\u003cp\u003eLastly, for the tech stack, I don\u0026rsquo;t think I have the ability to finish the entire stack by myself. From a MVP perspective (if we are trying to satisfy all needs), there needs to be a system optimizing a constantly shifting fractional knapsack, a way to put and store availability information, and a way to automate the requesting/convincing of scheduling change (e.g.. \u0026ldquo;MyApp Notification! Liza is not available at the one time you selected, but everyone else is available at this different time. Can you make this time? Y/N\u0026rdquo;) . Ideally, we would also send iCal invites in the end.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpitch_a_project/","tags":null,"title":"Pitch a Project"},{"categories":null,"contents":"The PSC is a supercomputing center.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhpsc/\"\u003ePSC\u003c/a\u003e is a supercomputing center.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpsc/","tags":null,"title":"Pittsburgh Supercomputing Center"},{"categories":null,"contents":"A PKM is a tool (like this one!) to help manage your nodes and knowledge.\nstoring AND processing; just storing is just PIM not PKM PKM = PIM (personal info management) + GTD + knowledge management goal: narrowing \u0026ldquo;flood\u0026rdquo; and focus on useful areas move from passive =\u0026gt; active consumption create, not just regurgitate PKM and Context store info based on context Strategy What are you trying to organize what are the inputs? email? lectures?: \u0026ldquo;reference\u0026rdquo; type emails lecture notes tasks? what are you trying to make? test? essays? a time blocking schedule studying for tests (i.e. for linear, etc.) what does your process look like? mishmash of topic based notes and content based notes what parts of it would you like to improve? article capture workflow: how to process random readings topic wise? knowledge capture: how to come across new content and capture them somewhere? todo/notes integration: task management and note taking currently lives separately, how to unify them? zettlekasten\nhow to take smart notes: arens progressive summarization progressive summarization is a technique in note taking that Tiago Forte developed to summarize text:\nlayer 0: reading layer 1: copy/pasted parts from the reading layer 2: bold relevant parts layer 3: highlight bold parts to get crux of ideas layer 4: mini summary layer 5: remix (add links, etc.) capture read rewrite and summarize engage by adding questions, thoughts, opinions, etc. connect old and new ideas think about the context of usage, not the topic how to think better reduce cognitive overload \u0026ldquo;offload info\u0026rdquo;\u0026mdash;put it on paper test yourself often: listening and understanding are not the same keep things simple: one task at a time keep an open mind + \u0026ldquo;collective\u0026rdquo; new perspectives and mental models new cycle capture curate cultivate connect create ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhpkm/\"\u003ePKM\u003c/a\u003e is a tool (like this one!) to help manage your nodes and knowledge.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003estoring AND processing; just storing is just PIM not PKM\u003c/li\u003e\n\u003cli\u003ePKM = PIM (personal info management) + GTD + knowledge management\u003c/li\u003e\n\u003cli\u003egoal: narrowing \u0026ldquo;flood\u0026rdquo; and focus on useful areas\u003c/li\u003e\n\u003cli\u003emove from passive =\u0026gt; active consumption\u003c/li\u003e\n\u003cli\u003ecreate, not just regurgitate\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"pkm-and-context\"\u003ePKM and Context\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003estore info based on context\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"strategy\"\u003eStrategy\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWhat are you trying to organize\u003c/li\u003e\n\u003cli\u003ewhat are the inputs?\n\u003cul\u003e\n\u003cli\u003eemail? lectures?:\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;reference\u0026rdquo; type emails\u003c/li\u003e\n\u003cli\u003electure notes\u003c/li\u003e\n\u003cli\u003etasks?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ewhat are you trying to make?\n\u003cul\u003e\n\u003cli\u003etest? essays?\n\u003cul\u003e\n\u003cli\u003ea time blocking schedule\u003c/li\u003e\n\u003cli\u003estudying for tests (i.e. for linear, etc.)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ewhat does your process look like?\n\u003cul\u003e\n\u003cli\u003emishmash of topic based notes and content based notes\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ewhat parts of it would you like to improve?\n\u003cul\u003e\n\u003cli\u003earticle capture workflow: how to process random readings topic wise?\u003c/li\u003e\n\u003cli\u003eknowledge capture: how to come across new content and capture them somewhere?\u003c/li\u003e\n\u003cli\u003etodo/notes integration: task management and note taking currently lives separately, how to unify them?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhzettlekasten/\"\u003ezettlekasten\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ehow to take smart notes: arens\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"progressive-summarization\"\u003eprogressive summarization\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#progressive-summarization\"\u003eprogressive summarization\u003c/a\u003e is a technique in note taking that \u003ca href=\"/posts/kbhtiago_forte/\"\u003eTiago Forte\u003c/a\u003e developed to summarize text:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003elayer 0: reading\u003c/li\u003e\n\u003cli\u003elayer 1: copy/pasted parts from the reading\u003c/li\u003e\n\u003cli\u003elayer 2: bold relevant parts\u003c/li\u003e\n\u003cli\u003elayer 3: highlight bold parts to get crux of ideas\u003c/li\u003e\n\u003cli\u003elayer 4: mini summary\u003c/li\u003e\n\u003cli\u003elayer 5: remix (add links, etc.)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"capture\"\u003ecapture\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eread\u003c/li\u003e\n\u003cli\u003erewrite and \u003cem\u003esummarize\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003e\u003cem\u003eengage\u003c/em\u003e by adding \u003cstrong\u003equestions\u003c/strong\u003e, \u003cstrong\u003ethoughts\u003c/strong\u003e, \u003cstrong\u003eopinions\u003c/strong\u003e, etc.\u003c/li\u003e\n\u003cli\u003e\u003cem\u003econnect old and new ideas\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003ethink about the \u003cem\u003econtext\u003c/em\u003e of usage, not the topic\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"how-to-think-better\"\u003ehow to think better\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ereduce cognitive overload\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;offload info\u0026rdquo;\u0026mdash;put it on paper\u003c/li\u003e\n\u003cli\u003etest yourself often: listening and understanding are not the same\u003c/li\u003e\n\u003cli\u003ekeep things simple: one task at a time\u003c/li\u003e\n\u003cli\u003ekeep an open mind + \u0026ldquo;collective\u0026rdquo; new perspectives and mental models\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-cycle\"\u003enew cycle\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecapture\u003c/li\u003e\n\u003cli\u003ecurate\u003c/li\u003e\n\u003cli\u003ecultivate\u003c/li\u003e\n\u003cli\u003econnect\u003c/li\u003e\n\u003cli\u003ecreate\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpkm/","tags":null,"title":"PKM"},{"categories":null,"contents":"A decision making method using search on a model of the problem to be able tom make decisions.\ncreate a (usually deterministic, but for CS238 we care only about non-deterministic cases) model of the problem or a good approximation thereof use the model to plan for possible next actions to yield for a good solution contrast v. explicit programming explicit programming requires you to plan for the action\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhdecision_making/#id-9075c44f-4b26-4f5a-9236-bc7a5c10f4ee-decision-making-methods\"\u003edecision making method\u003c/a\u003e using search on a model of the problem to be able tom make decisions.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ecreate a (usually deterministic, but for \u003ca href=\"/posts/kbhdecision_making_index/\"\u003eCS238\u003c/a\u003e we care only about non-deterministic cases) model of the problem or a good approximation thereof\u003c/li\u003e\n\u003cli\u003euse the model to plan for possible next actions to yield for a good solution\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"contrast-v-dot-explicit-programming\"\u003econtrast v. explicit programming\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhexplicit_programming/\"\u003eexplicit programming\u003c/a\u003e requires you to plan for the action\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhplanning/","tags":null,"title":"planning"},{"categories":null,"contents":" we start at an initial belief point we do a random Rollout to get to the next belief then collect\n","html":"\u003col\u003e\n\u003cli\u003ewe start at an initial \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e point\u003c/li\u003e\n\u003cli\u003ewe do a random \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003eRollout\u003c/a\u003e to get to the next \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ethen collect\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpoint_selection/","tags":null,"title":"point selection"},{"categories":null,"contents":"we keep track of a bunch of alpha vectors and belief samples (which we get from point selection):\n\\begin{equation} \\Gamma = \\{\\alpha_{1}, \\dots, \\alpha_{m}\\} \\end{equation}\nand\n\\begin{equation} B = \\{b_1, \\dots, b_{m}\\} \\end{equation}\nTo preserve the lower-boundedness of these alpha vectors, one should seed the alpha vectors via something like blind lower bound\nWe can estimate our utility function at any belief by looking in the set for the most optimal:\n\\begin{equation} U^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top}b \\end{equation}\nWe now define a function named backup (see PBVI Backup), and call it on all of our beliefs to generate a new set of alpha vectors:\n\\begin{equation} \\Gamma^{t+1} = \\{backup(\\Gamma, b) | b \\in B\\} \\end{equation}\nwhere:\n\\begin{equation} \\alpha \\leftarrow backup(\\Gamma, b) \\end{equation}\ntherefore we call backup on each \\(b\\).\nPBVI Backup backup procedure given \\(\\Gamma\\) and $b$\u0026mdash;\nwe want to mint a single new alpha vector by selecting the highest-valued one from the set of good alpha-vectors, one for each action:\n\\begin{equation} \\alpha = \\arg\\max_{\\alpha_{a}} \\alpha_{a}^{\\top} b \\end{equation}\nnow, we define each \\(\\alpha_{a}\\) as:\n\\begin{equation} \\alpha_{a}(s) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;,o}^{} O(o|a,s\u0026rsquo;)T(s\u0026rsquo;|s,a)\\alpha_{a,o} (s\u0026rsquo;) \\end{equation}\nwhere we obtain the old \\(\\alpha_{a,o}\\) by computing vector which currently provides the highest value estimate, which we compute over all actions and observations \\(a,o\\) given our \\(\\Gamma\\):\n\\begin{equation} \\alpha_{a,o} = \\arg\\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} update(b,a,o) \\end{equation}\nRandomized PBVI see Perseus\n","html":"\u003cp\u003ewe keep track of a bunch of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es and \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e samples (which we get from \u003ca href=\"/posts/kbhpoint_selection/\"\u003epoint selection\u003c/a\u003e):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Gamma = \\{\\alpha_{1}, \\dots, \\alpha_{m}\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB = \\{b_1, \\dots, b_{m}\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo preserve the lower-boundedness of these \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es, one should seed the \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es via something like \u003ca href=\"/posts/kbhblind_lower_bound/\"\u003eblind lower bound\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eWe can estimate our \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e function at any belief by looking in the set for the most optimal:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top}b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe now define a function named \u003ccode\u003ebackup\u003c/code\u003e (see \u003ca href=\"#pbvi-backup\"\u003ePBVI Backup\u003c/a\u003e), and call it on all of our \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003es to generate a new set of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Gamma^{t+1} = \\{backup(\\Gamma, b) | b \\in B\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha \\leftarrow backup(\\Gamma, b)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003etherefore we call backup on each \\(b\\).\u003c/p\u003e\n\u003ch2 id=\"pbvi-backup\"\u003ePBVI Backup\u003c/h2\u003e\n\u003cp\u003e\u003ccode\u003ebackup\u003c/code\u003e procedure given \\(\\Gamma\\) and $b$\u0026mdash;\u003c/p\u003e\n\u003cp\u003ewe want to mint a single new \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e by selecting the highest-valued one from the set of good alpha-vectors, one for each action:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha = \\arg\\max_{\\alpha_{a}} \\alpha_{a}^{\\top} b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enow, we define each \\(\\alpha_{a}\\) as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha_{a}(s) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;,o}^{} O(o|a,s\u0026rsquo;)T(s\u0026rsquo;|s,a)\\alpha_{a,o} (s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere we obtain the old \\(\\alpha_{a,o}\\) by computing vector which currently provides the highest value estimate, which we compute over all actions and observations \\(a,o\\) given our \\(\\Gamma\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha_{a,o} = \\arg\\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} update(b,a,o)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"randomized-pbvi--kbhpoint-based-value-iteration-dot-md\"\u003eRandomized \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhperseus/\"\u003ePerseus\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpoint_based_value_iteration/","tags":null,"title":"Point-Based Value Iteration"},{"categories":null,"contents":"A pointer is a variable which stores memory addresses. Because there are no pass-by reference, we use pointers to emulate pass by reference: by sharing addresses with other functions.\nA pointer can identify a single byte OR some large data structures. We can dynamically allocate pointers, and also identify memory generically without types.\nC is always pass-by-copy. Therefore, to pass-by-reference, you basically have to\nint x = 2; // declare object int *xptr = \u0026amp;x; // get location of object (\u0026amp;: address of) printf(\u0026#34;%d\\n\u0026#34;, *xptr); // dereference the pointer address operator You will note, in the line above:\nint *xptr = \u0026amp;x; uses an operator \u0026amp; to get the address of an object. That\u0026rsquo;s called an object operator.\npointer memory diagram void myFunct(int *intPtr) { *intPtr = 3; } int main() { int x = 2; myFunct(\u0026amp;x); } ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhpointer/\"\u003epointer\u003c/a\u003e is a variable which stores memory addresses. Because there are no pass-by reference, we use pointers to emulate pass by reference: by sharing addresses with other functions.\u003c/p\u003e\n\u003cp\u003eA pointer can identify a single \u003ca href=\"/posts/kbhbinary_number_system/#byte\"\u003ebyte\u003c/a\u003e OR some large data structures. We can dynamically allocate pointers, and also identify memory generically without types.\u003c/p\u003e\n\u003cp\u003eC is always pass-by-copy. Therefore, to pass-by-reference, you basically have to\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// declare object\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003exptr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// get location of object (\u0026amp;: address of)\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eprintf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;%d\u003c/span\u003e\u003cspan style=\"color:#8045ff\"\u003e\\n\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003exptr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// dereference the pointer\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"address-operator\"\u003eaddress operator\u003c/h2\u003e\n\u003cp\u003eYou will note, in the line above:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003exptr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003euses an operator \u003ccode\u003e\u0026amp;\u003c/code\u003e to get the address of an object. That\u0026rsquo;s called an \u003ca href=\"/posts/kbhobjects/\"\u003eobject\u003c/a\u003e operator.\u003c/p\u003e\n\u003ch2 id=\"pointer--kbhpointer-dot-md--memory-diagram\"\u003e\u003ca href=\"/posts/kbhpointer/\"\u003epointer\u003c/a\u003e memory diagram\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-11_11-12-28_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emyFunct\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eintPtr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eintPtr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emain\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003emyFunct\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ex\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhpointer/","tags":null,"title":"pointer"},{"categories":null,"contents":"Let\u0026rsquo;s say we want to know what is the chance of having an event occurring \\(k\\) times in a unit time, on average, this event happens at a rate of \\(\\lambda\\) per unit time.\n\u0026ldquo;What\u0026rsquo;s the probability that there are \\(k\\) earthquakes in the 1 year if there\u0026rsquo;s on average \\(2\\) earthquakes in 1 year?\u0026rdquo;\nwhere:\nevents have to be independent probability of sucess in each trial doesn\u0026rsquo;t vary constituents $λ$\u0026mdash;count of events per time \\(X \\sim Poi(\\lambda)\\) requirements the probability mass function:\n\\begin{equation} P(X=k) = e^{-\\lambda} \\frac{\\lambda^{k}}{k!} \\end{equation}\nadditional information properties of poisson distribution expected value: \\(\\lambda\\) variance: \\(\\lambda\\) derivation We divide the event into infinitely small buckets and plug into a binomial distribution, to formulate the question:\n\u0026ldquo;what\u0026rsquo;s the probability of large \\(n\\) number samples getting \\(k\\) events with probability of \\(\\frac{\\lambda}{n}\\) of events\u0026rdquo;\n\\begin{equation} P(X=k) = \\lim_{n \\to \\infty} {n \\choose k} \\qty(\\frac{\\lambda}{n})^{k}\\qty(1- \\frac{\\lambda}{n})^{n-k} \\end{equation}\nand then do algebra.\nAnd because of this, when you have a large \\(n\\) for your binomial distribution, you can just use a poisson distribution, where \\(\\lambda = np\\).\nadding poisson distribution For independent \\(A, B\\)\n\\begin{equation} A+B \\sim Poi(\\lambda_{A}+ \\lambda_{B}) \\end{equation}\nMLE for poisson distribution \\begin{equation} \\lambda = \\frac{1}{n} \\sum_{i=1}^{n} x_{i} \\end{equation}\nyes, that\u0026rsquo;s just the sample mean\n","html":"\u003cp\u003eLet\u0026rsquo;s say we want to know what is the chance of having an event occurring \\(k\\) times in a unit time, on average, this event happens at a rate of \\(\\lambda\\) per unit time.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;What\u0026rsquo;s the probability that there are \\(k\\) earthquakes in the 1 year if there\u0026rsquo;s on average \\(2\\) earthquakes in 1 year?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eevents have to be independent\u003c/li\u003e\n\u003cli\u003eprobability of sucess in each trial doesn\u0026rsquo;t vary\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e$λ$\u0026mdash;count of events per time\u003c/li\u003e\n\u003cli\u003e\\(X \\sim Poi(\\lambda)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003ethe \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003eprobability mass function\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X=k) = e^{-\\lambda} \\frac{\\lambda^{k}}{k!}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"properties-of-poisson-distribution--kbhprobability-of-k-in-x-time-dot-md\"\u003eproperties of \u003ca href=\"/posts/kbhprobability_of_k_in_x_time/\"\u003epoisson distribution\u003c/a\u003e\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhexpectation/\"\u003eexpected value\u003c/a\u003e\u003c/strong\u003e: \\(\\lambda\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhvariance/\"\u003evariance\u003c/a\u003e\u003c/strong\u003e: \\(\\lambda\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"derivation\"\u003ederivation\u003c/h3\u003e\n\u003cp\u003eWe divide the event into infinitely small buckets and plug into a \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e, to formulate the question:\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;what\u0026rsquo;s the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of large \\(n\\) number samples getting \\(k\\) events with probability of \\(\\frac{\\lambda}{n}\\) of events\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X=k) = \\lim_{n \\to \\infty} {n \\choose k} \\qty(\\frac{\\lambda}{n})^{k}\\qty(1- \\frac{\\lambda}{n})^{n-k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand then do algebra.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-13_16-17-13_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eAnd because of this, when you have a large \\(n\\) for your \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e, you can just use a \u003ca href=\"/posts/kbhprobability_of_k_in_x_time/\"\u003epoisson distribution\u003c/a\u003e, where \\(\\lambda = np\\).\u003c/p\u003e\n\u003ch3 id=\"adding-poisson-distribution--kbhprobability-of-k-in-x-time-dot-md\"\u003eadding \u003ca href=\"/posts/kbhprobability_of_k_in_x_time/\"\u003epoisson distribution\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eFor \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e \\(A, B\\)\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA+B \\sim Poi(\\lambda_{A}+ \\lambda_{B})\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"mle-for-poisson-distribution--kbhprobability-of-k-in-x-time-dot-md\"\u003eMLE for \u003ca href=\"/posts/kbhprobability_of_k_in_x_time/\"\u003epoisson distribution\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda = \\frac{1}{n} \\sum_{i=1}^{n} x_{i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyes, that\u0026rsquo;s just the \u003ca href=\"/posts/kbhrandom_variables/#sample-mean\"\u003esample mean\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprobability_of_k_in_x_time/","tags":null,"title":"poisson distribution"},{"categories":null,"contents":"constituents the history: last states and actions \\(h_{t} = (s_{1:t}, a_{1:t-1})\\)\nrequirements typically:\n\\begin{equation} a_{t} = \\pi_{t}(h_{t}) \\end{equation}\nfor a Markov Decision Process, our past states are d-seperated from our current action given knowing the state, so really we have \\(\\pi_{t}(s_{t})\\)\nSome policies can be stochastic:\n\\begin{equation} P(a_{t}) = \\pi_{t}(a_{t} | h_{t}) \\end{equation}\ninstead of telling you something to do at a specific point, it tells you what the probability it chooses of doing \\(a_{t}\\) is given the history.\nadditional information stationary policy For infinite-horizon models, our policy can not care about how many time stamps are left (i.e. we are not optimizing within some box with constrained time) and therefore we don\u0026rsquo;t really care about historical actions. So we have:\n\\begin{equation} \\pi(s) \\end{equation}\nthis can be used in infinite-horizon models against stationary Markov Decision Process.\noptimal policy \\begin{equation} \\pi^{*}(s) = \\arg\\max_{\\pi} U^{\\pi}(s) \\end{equation}\n\u0026ldquo;the most optimal policy is the policy that maximizes the expected utility of following \\(\\pi\\) when starting from \\(s\\)\u0026rdquo;\nWe call the utility from the best policy the \u0026ldquo;optimal value function\u0026rdquo;\n\\begin{equation} U^{*} = U^{\\pi^{*}} \\end{equation}\npolicy utility, and value creating a good utility function: either policy evaluation or value iteration creating a policy from a utility function: value-function policy (\u0026ldquo;choose the policy that takes the best valued action\u0026rdquo;) calculating the utility function a policy currently uses: use policy evaluation See policy evaluation\n","html":"\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003ethe history: last states and actions \\(h_{t} = (s_{1:t}, a_{1:t-1})\\)\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003etypically:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{t} = \\pi_{t}(h_{t})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor a \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMarkov Decision Process\u003c/a\u003e, our past states are \u003ca href=\"/posts/kbhbaysian_network/#checking-for-conditional-independence\"\u003ed-seperated\u003c/a\u003e from our \u003ca href=\"/posts/kbhcurrent/\"\u003ecurrent\u003c/a\u003e action given knowing the state, so really we have \\(\\pi_{t}(s_{t})\\)\u003c/p\u003e\n\u003cp\u003eSome \u003ca href=\"/posts/kbhpolicy/\"\u003epolicies\u003c/a\u003e can be stochastic:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(a_{t}) = \\pi_{t}(a_{t} | h_{t})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003einstead of telling you something to do at a specific point, it tells you what the probability it chooses of doing \\(a_{t}\\) is given the history.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"stationary-policy--kbhpolicy-dot-md\"\u003estationary \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eFor \u003ca href=\"/posts/kbhmarkov_decision_process/#infinite-horizon-models\"\u003einfinite-horizon models\u003c/a\u003e, our \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e can not care about how many time stamps are left (i.e. we are not optimizing within some box with constrained time) and therefore we don\u0026rsquo;t really care about historical actions. So we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pi(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis can be used in \u003ca href=\"/posts/kbhmarkov_decision_process/#infinite-horizon-models\"\u003einfinite-horizon models\u003c/a\u003e against \u003ca href=\"/posts/kbhmarkov_decision_process/#stationary-id-5bb5350e-04e4-46dc-9ea8-cb7bb09edd42-markov-decision-process\"\u003estationary Markov Decision Process\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"optimal-policy\"\u003eoptimal policy\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\pi^{*}(s) = \\arg\\max_{\\pi} U^{\\pi}(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the most \u003ca href=\"#optimal-policy\"\u003eoptimal policy\u003c/a\u003e is the \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e that maximizes the \u003ca href=\"/posts/kbhutility_theory/#expected-utility\"\u003eexpected utility\u003c/a\u003e of following \\(\\pi\\) when starting from \\(s\\)\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe call the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e from the best policy the \u0026ldquo;\u003ca href=\"#optimal-policy\"\u003eoptimal value function\u003c/a\u003e\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{*} = U^{\\pi^{*}}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"policy-utility-and-value\"\u003epolicy utility, and value\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ecreating a good \u003ca href=\"/posts/kbhutility_function/\"\u003eutility function\u003c/a\u003e: either \u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e or \u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ecreating a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e from a \u003ca href=\"/posts/kbhutility_function/\"\u003eutility function\u003c/a\u003e: \u003ca href=\"/posts/kbhaction_value_function/#value-function-policy\"\u003evalue-function policy\u003c/a\u003e (\u0026ldquo;choose the policy that takes the best valued action\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003ecalculating the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility function\u003c/a\u003e a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e currently uses: use \u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpolicy/","tags":null,"title":"policy"},{"categories":null,"contents":"See also Roll-out utility if you don\u0026rsquo;t want to get a vector utility over all states.\nsolving for the utility of a policy We can solve for the utility of the policy given the transitions \\(T\\) and reward \\(R\\) by solving the following equation\n\\begin{equation} \\bold{U}^{\\pi} = (I - \\gamma T^{\\pi})^{-1} \\bold{R}^{\\pi} \\end{equation}\nwhere \\(T\\) is an \\(|S| \\times |S|\\) square matrix where each horizontal row is supposed to add up to \\(1\\) which encodes the probability of transitioning from each horizontal row to the column next rows.\nlookahead equation We begin our derivation from finite-horizon models.\nGives some policy \\(\\pi\\), at the base case:\n\\begin{equation} U^{\\pi}_{1} (s) = R(s, \\pi(s)) \\end{equation}\nat time \\(k+1\\) steps remaining:\n\\begin{equation} U^{\\pi}_{k+1}(s) = R(s, \\pi(s)) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s, \\pi(s)) U^{\\pi}_{k} (s\u0026rsquo;) \\end{equation}\nwe don\u0026rsquo;t know what the next state will be; so for each possible next state, we marginalize the result, multiplying the probability of being in that state (gotten by \\(T(\u0026hellip;)\\)) times the utility of being in that state.\nThis is called the lookahead equation, which represents how much utility any future state can be be if we took action at point \\(k\\).\nlookahead with sampling what if we only want to get \\(m\\) of the next states, instead of all next states?\nBellman Expectation Equation The Bellman Equation states that \u0026ldquo;the expected utility of being in a state is the instantaneous reward of being in that state plus the discounted future utility of all possible future state.\u0026rdquo; It is the fundamental result of RL.\n\\begin{equation} U(s) = \\arg\\max_{a} R(s, a) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s, a) U (s\u0026rsquo;) \\end{equation}\nIf we are dealing with infinite-horizon models (at \u0026ldquo;convergence\u0026rdquo; of the lookahead equation), we just no longer have a time dependency from the lookahead equation:\nWe only care about some Markovian state \\(s\\), and its next possible states \\(s\u0026rsquo;\\). When these pair happened doesn\u0026rsquo;t matter.\nFor a stochastic policy, we have:\n\\begin{equation} U(S) = \\sum_{a}^{} \\pi(a|s) \\qty[R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) U(s\u0026rsquo;)] \\end{equation}\nWe now can go about solving for what \\(U^{\\pi}\\) is:\nProcedure:\nfrom the Bellman Expectation Equation, we actually have a linear equation whereby:\n\\begin{equation} \\bold{U}^{\\pi} = \\bold{R}^{\\pi} + \\gamma T^{\\pi}\\bold{U}^{\\pi} \\end{equation}\nwhere, \\(T^{\\pi}\\) is an \\(n\\times n\\) matrix where \\(T^{\\pi}_{i,j}\\) represents the probability of transitioning from the \\(i\\) th to the \\(j\\) the state; and where, \\(\\bold{U}^{\\pi}\\) and \\(\\bold{R}^{\\pi}\\) are \\(n\\) vectors which represents all possible states and all possible utilities. Note that everything is parametrized on \\(\\pi\\) (so \\(T\\) doesn\u0026rsquo;t need an action dimension because we will be using the policy to calculate all the actoins)\nWe can now solve for the utility of the policy. Now, algebra time on the previous equation to get us:\n\\begin{equation} \\bold{U}^{\\pi} = (I - \\gamma T^{\\pi})^{-1} \\bold{R}^{\\pi} \\end{equation}\nwe know that \\(T\\) is invertable because its a transition matrix. And that, folks, is the utility of a policy.\nApproximate Policy Evaluation Instead of having a policy evaluation based on a vector out of the fitness of this policy at all possible states, which really works if our state space is small, what if we made a policy evaluation scheme which estimates the expectation of the utility of our policy based on the possibility of us landing in particular states?\nBackground The utility from following a policy AT A STATE is given by:\n\\begin{equation} U^{\\pi}(s) = R(s, \\pi(s)) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s, \\pi(s)) U^{\\pi} (s\u0026rsquo;) \\end{equation}\nThe utility of a policy, in general, can be represented by:\n\\begin{equation} U(\\pi) = \\sum_{s}^{} b(s) U^{\\pi}(s) \\end{equation}\nwhere, \\(b(s)\\) is the \u0026ldquo;initial state distribution\u0026rdquo; of being in a particular state.\nOur state space may not be discrete or otherwise small enough to be added up for every case. We therefore can a sampling of Rollout trajectory to perform Approximate Policy Evaluation\nRollout utility Collecting a utility for all \\(s\\) is real hard. Therefore, instead, we perform a bunch of Rollouts and then calculate, for each trajectory \\(\\tau\\) you ended up with:\n\\begin{align} U(\\pi_{\\theta}) \u0026amp;= \\mathbb{E}[R(\\tau)] \\\\ \u0026amp;= \\int_{\\tau} p_{\\tau} (\\tau) R(\\tau) d\\tau \\end{align}\nwhere, \\(p(\\tau)\\) is the probability of that trajectory happening, and \\(R(\\tau)\\) is the discounted future reward of that trajectory. That is:\n\\begin{equation} R(\\tau) = \\sum_{k=1}^{d} r_{k}\\ \\gamma^{k-1} \\end{equation}\nmonte-carlo policy evaluation Sometimes, we can\u0026rsquo;t even get all trajectories to add them up, so we simply perform an average of \\(m\\) sample trajectories:\n\\begin{equation} U(\\pi_{\\theta}) = \\frac{1}{m}\\sum_{i=1}^{m} R(\\tau^{i}) \\end{equation}\nWe start each trajectory using a probability-weighted sample of initial states. This is the Roll-out utility\n","html":"\u003cp\u003eSee also \u003ca href=\"#rollout-utility\"\u003eRoll-out utility\u003c/a\u003e if you don\u0026rsquo;t want to get a \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e over all states.\u003c/p\u003e\n\u003ch2 id=\"solving-for-the-utility-of-a-policy\"\u003esolving for the utility of a policy\u003c/h2\u003e\n\u003cp\u003eWe can solve for the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of the \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e given the transitions \\(T\\) and reward \\(R\\) by solving the following equation\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{U}^{\\pi} = (I - \\gamma T^{\\pi})^{-1} \\bold{R}^{\\pi}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(T\\) is an \\(|S| \\times |S|\\) square matrix where each horizontal row is supposed to add up to \\(1\\) which encodes the probability of transitioning from each horizontal row to the column next rows.\u003c/p\u003e\n\u003ch3 id=\"lookahead-equation\"\u003elookahead equation\u003c/h3\u003e\n\u003cp\u003eWe begin our derivation from \u003ca href=\"/posts/kbhmarkov_decision_process/#finite-horizon-models\"\u003efinite-horizon models\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eGives some policy \\(\\pi\\), at the base case:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\pi}_{1} (s) = R(s, \\pi(s))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eat time \\(k+1\\) steps remaining:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\pi}_{k+1}(s) = R(s, \\pi(s)) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s, \\pi(s)) U^{\\pi}_{k} (s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe don\u0026rsquo;t know what the next state will be; so for each possible next state, we marginalize the result, multiplying the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of being in that state (gotten by \\(T(\u0026hellip;)\\)) times the utility of being in that state.\u003c/p\u003e\n\u003cp\u003eThis is called the \u003ca href=\"#lookahead-equation\"\u003elookahead equation\u003c/a\u003e, which represents how much \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e any future state can be be if we took action at point \\(k\\).\u003c/p\u003e\n\u003ch4 id=\"lookahead-with-sampling\"\u003elookahead with sampling\u003c/h4\u003e\n\u003cp\u003ewhat if we only want to get \\(m\\) of the next states, instead of all next states?\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-02_16-45-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"bellman-expectation-equation\"\u003eBellman Expectation Equation\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"#bellman-expectation-equation\"\u003eBellman Equation\u003c/a\u003e states that \u0026ldquo;the expected utility of being in a state is the instantaneous reward of being in that state plus the discounted future utility of all possible future state.\u0026rdquo; It is the fundamental result of RL.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(s) = \\arg\\max_{a} R(s, a) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s, a) U (s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf we are dealing with \u003ca href=\"/posts/kbhmarkov_decision_process/#infinite-horizon-models\"\u003einfinite-horizon models\u003c/a\u003e (at \u0026ldquo;convergence\u0026rdquo; of the \u003ca href=\"#lookahead-equation\"\u003elookahead equation\u003c/a\u003e), we just no longer have a time dependency from the \u003ca href=\"#lookahead-equation\"\u003elookahead equation\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003eWe only care about some Markovian state \\(s\\), and its next possible states \\(s\u0026rsquo;\\). When these pair happened doesn\u0026rsquo;t matter.\u003c/p\u003e\n\u003cp\u003eFor a stochastic policy, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(S) = \\sum_{a}^{} \\pi(a|s) \\qty[R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) U(s\u0026rsquo;)]\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eWe now can go about solving for what \\(U^{\\pi}\\) is:\u003c/p\u003e\n\u003cp\u003eProcedure:\u003c/p\u003e\n\u003cp\u003efrom the \u003ca href=\"#bellman-expectation-equation\"\u003eBellman Expectation Equation\u003c/a\u003e, we actually have a linear equation whereby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{U}^{\\pi} = \\bold{R}^{\\pi} + \\gamma T^{\\pi}\\bold{U}^{\\pi}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(T^{\\pi}\\) is an \\(n\\times n\\) matrix where \\(T^{\\pi}_{i,j}\\) represents the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of transitioning from the \\(i\\) th to the \\(j\\) the state; and where, \\(\\bold{U}^{\\pi}\\) and \\(\\bold{R}^{\\pi}\\) are \\(n\\) vectors which represents all possible states and all possible utilities. Note that everything is parametrized on \\(\\pi\\) (so \\(T\\) doesn\u0026rsquo;t need an action dimension because we will be using the \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e to calculate all the actoins)\u003c/p\u003e\n\u003cp\u003eWe can now solve for the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of the \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e. Now, algebra time on the previous equation to get us:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bold{U}^{\\pi} = (I - \\gamma T^{\\pi})^{-1} \\bold{R}^{\\pi}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe know that \\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e because its a transition matrix. And that, folks, is the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"approximate-policy-evaluation\"\u003eApproximate Policy Evaluation\u003c/h2\u003e\n\u003cp\u003eInstead of having a \u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e based on a vector out of the fitness of this \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e at all possible states, which really works if our state space is small, what if we made a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e evaluation scheme which estimates the expectation of the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of our policy based on the possibility of us landing in particular states?\u003c/p\u003e\n\u003ch3 id=\"background\"\u003eBackground\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e from following a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e AT A STATE is given by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\pi}(s) = R(s, \\pi(s)) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s, \\pi(s)) U^{\\pi} (s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of a policy, in general, can be represented by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(\\pi) = \\sum_{s}^{} b(s) U^{\\pi}(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(b(s)\\) is the \u0026ldquo;initial state distribution\u0026rdquo; of being in a particular state.\u003c/p\u003e\n\u003cp\u003eOur state space may not be discrete or otherwise small enough to be added up for every case. We therefore can a sampling of \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003eRollout\u003c/a\u003e \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003etrajectory\u003c/a\u003e to perform \u003ca href=\"#approximate-policy-evaluation\"\u003eApproximate Policy Evaluation\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"rollout-utility\"\u003eRollout utility\u003c/h3\u003e\n\u003cp\u003eCollecting a utility for all \\(s\\) is real hard. Therefore, instead, we perform a bunch of \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003eRollout\u003c/a\u003es and then calculate, for each trajectory \\(\\tau\\) you ended up with:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nU(\\pi_{\\theta}) \u0026amp;= \\mathbb{E}[R(\\tau)] \\\\\n\u0026amp;= \\int_{\\tau} p_{\\tau} (\\tau) R(\\tau) d\\tau\n\\end{align}\u003c/p\u003e\n\u003cp\u003ewhere, \\(p(\\tau)\\) is the probability of that trajectory happening, and \\(R(\\tau)\\) is the discounted future reward of that trajectory. That is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR(\\tau) = \\sum_{k=1}^{d} r_{k}\\ \\gamma^{k-1}\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"monte-carlo-policy-evaluation\"\u003emonte-carlo policy evaluation\u003c/h4\u003e\n\u003cp\u003eSometimes, we can\u0026rsquo;t even get all trajectories to add them up, so we simply perform an average of \\(m\\) sample trajectories:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(\\pi_{\\theta}) = \\frac{1}{m}\\sum_{i=1}^{m} R(\\tau^{i})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe start each trajectory using a probability-weighted sample of initial states. This is the \u003ca href=\"#rollout-utility\"\u003eRoll-out utility\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpolicy_evaluation/","tags":null,"title":"policy evaluation"},{"categories":null,"contents":"Two steps:\nobtaining a function for the gradient of policy against some parameters \\(\\theta\\) making them more based than they are right now by optimization Thoughout all of this, \\(U(\\theta)\\) is \\(U(\\pi_{\\theta})\\).\nObtaining a policy gradient Finite-Difference Gradient Estimation We want some expression for:\n\\begin{equation} \\nabla U(\\theta) = \\qty[\\pdv{U}{\\theta_{1}} (\\theta), \\dots, \\pdv{U}{\\theta_{n}}] \\end{equation}\nwe can estimate that with the finite-difference \u0026ldquo;epsilon trick\u0026rdquo;:\n\\begin{equation} \\nabla U(\\theta) = \\qty[ \\frac{U(\\theta + \\delta e^{1}) - U(\\theta)}{\\delta} , \\dots, \\frac{U(\\theta + \\delta e^{n}) - U(\\theta)}{\\delta} ] \\end{equation}\nwhere \\(e^{j}\\) is the standard basis vector at position \\(j\\). We essentially add a small \\(\\delta\\) to the \\(j\\) th slot of each parameter \\(\\theta_{j}\\), and divide to get an estimate of the gradient.\nLinear Regression Gradient Estimate We perform \\(m\\) random perturbations of \\(\\theta\\) parameters, and lay each resulting parameter vector flat onto a matrix:\n\\begin{equation} \\Delta \\theta = \\mqty[(\\Delta \\theta^{1})^{T} \\\\ \\dots \\\\ (\\Delta \\theta^{m})^{T}] \\end{equation}\nFor \\(\\theta\\) that contains \\(n\\) parameters, this is a matrix \\(m\\times n\\).\nWe can now write out the \\(\\Delta U\\) with:\n\\begin{equation} \\Delta U = \\qty[U(\\theta+ \\Delta \\theta^{1}) - U(\\theta), \\dots, U(\\theta+ \\Delta \\theta^{m}) - U(\\theta)] \\end{equation}\nWe have to compute Roll-out utility for each \\(U(\\theta + \u0026hellip;)\\)\nWe now want to fit a function between \\(\\Delta \\theta\\) to \\(\\Delta U\\), because from the definition of the gradient we have:\n\\begin{equation} \\Delta U = \\nabla_{\\theta} U(\\theta)\\ \\Delta \\theta \\end{equation}\n(that is \\(y = mx\\))\nRearranging the expression above\n\\begin{equation} \\nabla_{\\theta} U(\\theta) \\approx \\Delta \\theta^{\\dagger} \\Delta U \\end{equation}\nwhere \\(\\Delta \\theta^{\\dagger}\\) is the pseudoinverse of \\(\\Delta \\theta\\) matrix.\nTo end up at a gradient estimate.\nLikelyhood Ratio Gradient This is likely good, but requires a few things:\nan explicit transition model that you can compute over you being able to take the gradient of the policy this is what people usually refers to as \u0026ldquo;Policy Gradient\u0026rdquo;.\nRecall:\n\\begin{align} U(\\pi_{\\theta}) \u0026amp;= \\mathbb{E}[R(\\tau)] \\\\ \u0026amp;= \\int_{\\tau} p_{\\pi} (\\tau) R(\\tau) d\\tau \\end{align}\nNow consider:\n\\begin{align} \\nabla_{\\theta} U(\\theta) \u0026amp;= \\int_{\\tau} \\nabla_{\\theta} p_{\\pi}(\\tau) R(\\tau) d \\tau \\\\ \u0026amp;= \\int_{\\tau} \\frac{p_{\\pi} (\\tau)}{p_{\\pi} (\\tau)} \\nabla_{\\tau} p_{\\tau}(\\tau) R(\\tau) d \\tau \\end{align}\nAside 1:\nNow, consider the expression:\n\\begin{equation} \\nabla \\log p_{\\pi} (\\tau) = \\frac{\\nabla p_{\\pi}(\\tau)}{p_{\\pi} \\tau} \\end{equation}\nThis is just out of calculus. Consider the derivative chain rule; now, the derivative of \\(\\log (x) = \\frac{1}{x}\\) , and the derivative of the inside is \\(\\nabla x\\).\nRearranging that, we have:\n\\begin{equation} \\nabla p_{\\pi}(\\tau) = (\\nabla \\log p_{\\pi} (\\tau))(p_{\\pi} \\tau) \\end{equation}\nSubstituting that in, one of our \\(p_{\\pi}(\\tau)\\) cancels out, and, we have:\n\\begin{equation} \\int_{\\tau} p_{\\pi}(\\tau) \\nabla_{\\theta} \\log p_{\\pi}(\\tau) R(\\tau) \\dd{\\tau} \\end{equation}\nYou will note that this is the definition of the expectation of the right half (everything to the right of \\(\\nabla_{\\theta}\\)) vis a vi all \\(\\tau\\) (multiplying it by \\(p(\\tau)\\)). Therefore:\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} [\\nabla_{\\theta} \\log p_{\\pi}(\\tau) R(\\tau)] \\end{equation}\nAside 2:\nRecall that \\(\\tau\\) a trajectory is a pair of \\(s_1, a_1, \u0026hellip;, s_{n}, a_{d}\\).\nWe want to come up with some \\(p_{\\pi}(\\tau)\\), \u0026ldquo;what\u0026rsquo;s the probability of a trajectory happening given a policy\u0026rdquo;.\n\\begin{equation} p_{\\pi}(\\tau) = p(s^{1}) \\prod_{k=1}^{d} p(s^{k+1} | s^{k}, a^{k}) \\pi_{\\theta} (a^{k}|s^{k}) \\end{equation}\n(\u0026ldquo;probably of being at a state, times probability of the transition happening, times the probability of the action happening, so on, so on\u0026rdquo;)\nNow, taking the log of it causes the product to become a summation:\n\\begin{equation} \\log p_{\\pi}(\\tau) = p(s^{1}) + \\sum_{k=1}^{d} p(s^{k+1} | s^{k}, a^{k}) + \\pi_{\\theta} (a^{k}|s^{k}) \\end{equation}\nPlugging this into our expectation equation:\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\nabla_{\\theta} \\qty(p(s^{1}) + \\sum_{k=1}^{d} p(s^{k+1} | s^{k}, a^{k}) + \\pi_{\\theta} (a^{k}|s^{k})) R(\\tau)] \\end{equation}\nThis is an important result. You will note that \\(p(s^{1})\\) and \\(p(s^{k+1}|s^{k},a^{k})\\) doesn\u0026rsquo;t have a \\(\\theta\\) term in them!!!!. Therefore, taking term in them!!!!*. Therefore, taking the \\(\\nabla_{\\theta}\\) of them becomes\u0026hellip; ZERO!!! Therefore:\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\qty(0 + \\sum_{k=1}^{d} 0 + \\nabla_{\\theta} \\pi_{\\theta} (a^{k}|s^{k})) R(\\tau)] \\end{equation}\nSo based. We now have:\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) R(\\tau)] \\end{equation}\nwhere,\n\\begin{equation} R(\\tau) = \\sum_{k=1}^{d} r_{k}\\ \\gamma^{k-1} \\end{equation}\n\u0026ldquo;this is very nice\u0026rdquo; because we do not need to know anything regarding the transition model. This means we don\u0026rsquo;t actually need to know what \\(p(s^{k+1}|s^{k}a^{k})\\) because that term just dropped out of the gradient.\nWe can simulate a few trajectories; calculate the gradient, and average them to end up with our overall gradient.\nReward-to-Go Variance typically increases with Rollout depth. We don\u0026rsquo;t want that. We want to correct for the causality of action/reward. Action in the FUTURE do not influence reward in the PAST.\nRecall:\n\\begin{equation} R(\\tau) = \\sum_{k=1}^{d} r_{k}\\ \\gamma^{k-1} \\end{equation}\nLet us plug this into the policy gradient expression:\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\sum_{k=1}^{d} r_{k}\\ \\gamma^{k-1})] \\end{equation}\nLet us split this reward into two piece; one piece for the past (up to \\(k-1\\)), and one for the future:\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\sum_{l=1}^{k-1} r_{l}\\ \\gamma^{l-1} + \\sum_{l=k}^{d} r_{l}\\ \\gamma^{l-1})] \\end{equation}\nWe now want to ignore all the past rewards (i.e. the first half of the internal summation). Again, this is because action in the future shouldn\u0026rsquo;t care about what reward was gather in the past.\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\sum_{l=k}^{d} r_{l}\\ \\gamma^{l-1}] \\end{equation}\nWe now factor out \\(\\gamma^{k-1}\\) to make the expression look like:\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\gamma^{k-1} \\sum_{l=k}^{d} r_{l}\\ \\gamma^{l-k})] \\end{equation}\nWe call the right term Reward-to-Go:\n\\begin{equation} r_{togo}(k) = \\sum_{l=k}^{d} r_{l}\\ \\gamma^{l-k} \\end{equation}\nwhere \\(d\\) is the depth of your trajectory and \\(k\\) is your current state. Finally, then:\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\gamma^{k-1} r_{togo}(k))] \\end{equation}\nBaseline subtraction Sometimes, we want to subtract a baseline reward to show how much actually better an action is (instead of blindly summing all future rewards). This could be the average reward at all actions at that state, this could be any other thing of your choosing.\n\\begin{equation} \\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\gamma^{k-1} (r_{togo}(k) - r_{baseline}(k)))] \\end{equation}\nFor instance, if you have a system where each action all gave \\(+1000\\) reward, taking any particular action isn\u0026rsquo;t actually very good. Hence:\nOptimizing the Policy Gradient We want to make \\(U(\\theta)\\) real big. We have two knobs: what is our objective function, and what is your restriction?\nPolicy Gradient Ascent good \u0026lsquo;ol fashioned\n\\begin{equation} \\theta \\leftarrow \\theta + \\alpha \\nabla U(\\theta) \\end{equation}\nwhere \\(\\alpha\\) is learning rate/step factor. This is not your STEP SIZE. If you want to specify a step size, see Restricted Step Method.\nRestricted Step Method Policy Gradient Ascent can take very large steps if the gradient is too large.\nOne by which we can optimize the gradient, ensuring that we don\u0026rsquo;t take steps larger than:\n\\begin{equation} \\frac{1}{2}(\\theta\u0026rsquo; - \\theta)^{T} I(\\theta\u0026rsquo; - \\theta) \\leq \\epsilon \\end{equation}\nis through Restricted Gradient:\n\\begin{equation} \\theta \\leftarrow \\theta + \\sqrt{2 \\epsilon} \\frac{\\nabla U(\\theta)}{|| \\nabla U(\\theta)||} \\end{equation}\nOccasionally, if a step-size is directly given to you in terms of euclidean distance, then you would replace the entirety of \\(\\sqrt{2 \\epsilon}\\) with your provided step size.\nTrust Region Policy Optimization Using a different way of restricting the update.\nProximal Policy Optimization Clipping the gradients.\n","html":"\u003cp\u003eTwo steps:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eobtaining a function for the gradient of policy against some parameters \\(\\theta\\)\u003c/li\u003e\n\u003cli\u003emaking them more based than they are right now by optimization\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThoughout all of this, \\(U(\\theta)\\) is \\(U(\\pi_{\\theta})\\).\u003c/p\u003e\n\u003ch2 id=\"obtaining-a-policy-gradient\"\u003eObtaining a policy gradient\u003c/h2\u003e\n\u003ch3 id=\"finite-difference-gradient-estimation\"\u003eFinite-Difference Gradient Estimation\u003c/h3\u003e\n\u003cp\u003eWe want some expression for:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla U(\\theta) = \\qty[\\pdv{U}{\\theta_{1}} (\\theta), \\dots, \\pdv{U}{\\theta_{n}}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can estimate that with the finite-difference \u0026ldquo;epsilon trick\u0026rdquo;:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla U(\\theta) = \\qty[ \\frac{U(\\theta + \\delta e^{1}) - U(\\theta)}{\\delta} , \\dots, \\frac{U(\\theta + \\delta e^{n}) - U(\\theta)}{\\delta} ]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(e^{j}\\) is the standard \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e at position \\(j\\). We essentially add a small \\(\\delta\\) to the \\(j\\) th slot of each parameter \\(\\theta_{j}\\), and divide to get an estimate of the gradient.\u003c/p\u003e\n\u003ch3 id=\"linear-regression-gradient-estimate\"\u003eLinear Regression Gradient Estimate\u003c/h3\u003e\n\u003cp\u003eWe perform \\(m\\) random perturbations of \\(\\theta\\) parameters, and lay each resulting \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003e vector flat onto a matrix:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Delta \\theta = \\mqty[(\\Delta \\theta^{1})^{T} \\\\ \\dots \\\\ (\\Delta \\theta^{m})^{T}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor \\(\\theta\\) that contains \\(n\\) parameters, this is a matrix \\(m\\times n\\).\u003c/p\u003e\n\u003cp\u003eWe can now write out the \\(\\Delta U\\) with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Delta U = \\qty[U(\\theta+ \\Delta \\theta^{1}) - U(\\theta), \\dots, U(\\theta+ \\Delta \\theta^{m}) - U(\\theta)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have to compute \u003ca href=\"/posts/kbhpolicy_evaluation/#roll-out-utility\"\u003eRoll-out utility\u003c/a\u003e for each \\(U(\\theta + \u0026hellip;)\\)\u003c/p\u003e\n\u003cp\u003eWe now want to fit a function between \\(\\Delta \\theta\\) to \\(\\Delta U\\), because from the definition of the gradient we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Delta U = \\nabla_{\\theta} U(\\theta)\\ \\Delta \\theta\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(that is \\(y = mx\\))\u003c/p\u003e\n\u003cp\u003eRearranging the expression above\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) \\approx \\Delta \\theta^{\\dagger} \\Delta U\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\Delta \\theta^{\\dagger}\\) is the \u003ca href=\"\"\u003epseudoinverse\u003c/a\u003e of \\(\\Delta \\theta\\) matrix.\u003c/p\u003e\n\u003cp\u003eTo end up at a gradient estimate.\u003c/p\u003e\n\u003ch3 id=\"likelyhood-ratio-gradient--kbhpolicy-gradient-dot-md\"\u003eLikelyhood Ratio \u003ca href=\"/posts/kbhpolicy_gradient/\"\u003eGradient\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eThis is likely good, but requires a few things:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ean explicit transition model that you can compute over\u003c/li\u003e\n\u003cli\u003eyou being able to take the gradient of the policy\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ethis is what people usually refers to as \u0026ldquo;\u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eRecall:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nU(\\pi_{\\theta}) \u0026amp;= \\mathbb{E}[R(\\tau)] \\\\\n\u0026amp;= \\int_{\\tau} p_{\\pi} (\\tau) R(\\tau) d\\tau\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow consider:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\nabla_{\\theta} U(\\theta) \u0026amp;= \\int_{\\tau} \\nabla_{\\theta} p_{\\pi}(\\tau) R(\\tau) d \\tau \\\\\n\u0026amp;= \\int_{\\tau} \\frac{p_{\\pi} (\\tau)}{p_{\\pi} (\\tau)} \\nabla_{\\tau} p_{\\tau}(\\tau) R(\\tau) d \\tau\n\\end{align}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside 1:\u003c/p\u003e\n\u003cp\u003eNow, consider the expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla \\log p_{\\pi} (\\tau) = \\frac{\\nabla p_{\\pi}(\\tau)}{p_{\\pi} \\tau}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is just out of calculus. Consider the derivative chain rule; now, the derivative of \\(\\log (x) = \\frac{1}{x}\\) , and the derivative of the inside is \\(\\nabla x\\).\u003c/p\u003e\n\u003cp\u003eRearranging that, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla p_{\\pi}(\\tau) = (\\nabla \\log p_{\\pi} (\\tau))(p_{\\pi} \\tau)\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eSubstituting that in, one of our \\(p_{\\pi}(\\tau)\\) cancels out, and, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{\\tau} p_{\\pi}(\\tau) \\nabla_{\\theta} \\log p_{\\pi}(\\tau) R(\\tau) \\dd{\\tau}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will note that this is the definition of the \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e of the right half (everything to the right of \\(\\nabla_{\\theta}\\)) vis a vi all \\(\\tau\\) (multiplying it by \\(p(\\tau)\\)). Therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} [\\nabla_{\\theta} \\log p_{\\pi}(\\tau) R(\\tau)]\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside 2:\u003c/p\u003e\n\u003cp\u003eRecall that \\(\\tau\\) a trajectory is a pair of \\(s_1, a_1, \u0026hellip;, s_{n}, a_{d}\\).\u003c/p\u003e\n\u003cp\u003eWe want to come up with some \\(p_{\\pi}(\\tau)\\), \u0026ldquo;what\u0026rsquo;s the probability of a trajectory happening given a policy\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np_{\\pi}(\\tau) = p(s^{1}) \\prod_{k=1}^{d} p(s^{k+1} | s^{k}, a^{k}) \\pi_{\\theta} (a^{k}|s^{k})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(\u0026ldquo;probably of being at a state, times probability of the transition happening, times the probability of the action happening, so on, so on\u0026rdquo;)\u003c/p\u003e\n\u003cp\u003eNow, taking the log of it causes the product to become a summation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\log p_{\\pi}(\\tau) = p(s^{1}) + \\sum_{k=1}^{d} p(s^{k+1} | s^{k}, a^{k}) + \\pi_{\\theta} (a^{k}|s^{k})\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003ePlugging this into our expectation equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\nabla_{\\theta} \\qty(p(s^{1}) + \\sum_{k=1}^{d} p(s^{k+1} | s^{k}, a^{k}) + \\pi_{\\theta} (a^{k}|s^{k})) R(\\tau)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is an important result. You will note that \\(p(s^{1})\\) and \\(p(s^{k+1}|s^{k},a^{k})\\) \u003cstrong\u003edoesn\u0026rsquo;t have a \\(\\theta\\) term in them!!!!\u003c/strong\u003e. Therefore, taking term in them!!!!*. Therefore, taking the \\(\\nabla_{\\theta}\\) of them becomes\u0026hellip; ZERO!!! Therefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\qty(0 + \\sum_{k=1}^{d} 0 + \\nabla_{\\theta} \\pi_{\\theta} (a^{k}|s^{k})) R(\\tau)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo based. We now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) R(\\tau)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR(\\tau) = \\sum_{k=1}^{d} r_{k}\\ \\gamma^{k-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;this is very nice\u0026rdquo; because we do not need to know anything regarding the transition model. This means we don\u0026rsquo;t actually need to know what \\(p(s^{k+1}|s^{k}a^{k})\\) because that term just dropped out of the gradient.\u003c/p\u003e\n\u003cp\u003eWe can simulate a few trajectories; calculate the gradient, and average them to end up with our overall gradient.\u003c/p\u003e\n\u003ch3 id=\"reward-to-go\"\u003eReward-to-Go\u003c/h3\u003e\n\u003cp\u003eVariance typically increases with \u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003eRollout\u003c/a\u003e depth. We don\u0026rsquo;t want that. We want to correct for the causality of action/reward. Action in the FUTURE do not influence reward in the PAST.\u003c/p\u003e\n\u003cp\u003eRecall:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR(\\tau) = \\sum_{k=1}^{d} r_{k}\\ \\gamma^{k-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet us plug this into the \u003ca href=\"#linear-regression-gradient-estimate\"\u003epolicy gradient\u003c/a\u003e expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\sum_{k=1}^{d} r_{k}\\ \\gamma^{k-1})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet us split this reward into two piece; one piece for the past (up to \\(k-1\\)), and one for the future:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\sum_{l=1}^{k-1} r_{l}\\ \\gamma^{l-1} + \\sum_{l=k}^{d} r_{l}\\ \\gamma^{l-1})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe now want to ignore all the past rewards (i.e. the first half of the internal summation). Again, this is because action in the future shouldn\u0026rsquo;t care about what reward was gather in the past.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\sum_{l=k}^{d} r_{l}\\ \\gamma^{l-1}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe now factor out \\(\\gamma^{k-1}\\) to make the expression look like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\gamma^{k-1} \\sum_{l=k}^{d} r_{l}\\ \\gamma^{l-k})]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe call the right term \u003ca href=\"#reward-to-go\"\u003eReward-to-Go\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr_{togo}(k) = \\sum_{l=k}^{d} r_{l}\\ \\gamma^{l-k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(d\\) is the depth of your trajectory and \\(k\\) is your current state. Finally, then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\gamma^{k-1} r_{togo}(k))]\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"baseline-subtraction\"\u003eBaseline subtraction\u003c/h3\u003e\n\u003cp\u003eSometimes, we want to subtract a baseline reward to show how much actually better an action is (instead of blindly summing all future rewards). This could be the average reward at all actions at that state, this could be any other thing of your choosing.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\nabla_{\\theta} U(\\theta) = \\mathbb{E}_{\\tau} \\qty[\\sum_{k=1}^{d} \\nabla_{\\theta} \\log \\pi_{\\theta}(a^{k}|s^{k}) \\qty(\\gamma^{k-1} (r_{togo}(k) - r_{baseline}(k)))]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor instance, if you have a system where each action all gave \\(+1000\\) reward, taking any particular action isn\u0026rsquo;t actually very good. Hence:\u003c/p\u003e\n\u003ch2 id=\"optimizing-the-policy-gradient--kbhpolicy-gradient-dot-md\"\u003eOptimizing the \u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eWe want to make \\(U(\\theta)\\) real big. We have two knobs: what is our objective function, and what is your restriction?\u003c/p\u003e\n\u003ch3 id=\"policy-gradient-ascent\"\u003ePolicy Gradient Ascent\u003c/h3\u003e\n\u003cp\u003egood \u0026lsquo;ol fashioned\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta \\leftarrow \\theta + \\alpha \\nabla U(\\theta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\alpha\\) is learning rate/step \u003cstrong\u003efactor\u003c/strong\u003e. This is not your STEP SIZE. If you want to specify a step size, see \u003ca href=\"#restricted-step-method\"\u003eRestricted Step Method\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"restricted-step-method\"\u003eRestricted Step Method\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#policy-gradient-ascent\"\u003ePolicy Gradient Ascent\u003c/a\u003e can take very large steps if the gradient is too large.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-02_16-23-21_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eOne by which we can optimize the gradient, ensuring that we don\u0026rsquo;t take steps larger than:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{1}{2}(\\theta\u0026rsquo; - \\theta)^{T} I(\\theta\u0026rsquo; - \\theta) \\leq \\epsilon\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis through \u003ca href=\"#restricted-step-method\"\u003eRestricted Gradient\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\theta \\leftarrow \\theta + \\sqrt{2 \\epsilon} \\frac{\\nabla U(\\theta)}{|| \\nabla U(\\theta)||}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOccasionally, if a step-size is directly given to you in terms of euclidean distance, then you would replace the entirety of \\(\\sqrt{2 \\epsilon}\\) with your provided step size.\u003c/p\u003e\n\u003ch3 id=\"trust-region-policy-optimization\"\u003eTrust Region Policy Optimization\u003c/h3\u003e\n\u003cp\u003eUsing a different way of restricting the update.\u003c/p\u003e\n\u003ch3 id=\"proximal-policy-optimization\"\u003eProximal Policy Optimization\u003c/h3\u003e\n\u003cp\u003eClipping the gradients.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpolicy_gradient/","tags":null,"title":"Policy Gradient"},{"categories":null,"contents":"policy iteration will allow us to get an optimal policy.\nstart with some initial policy \\(\\pi\\) (this scheme converges to an optimal policy regardless of where you start) solve for \\(U^{\\pi}\\) create a new policy \\(\\pi\u0026rsquo;\\) by creating a value-function policy on \\(U^{\\pi}\\) repeat 2-3 Since there are a finite policies, this will eventually converge.\nAt each point, the utility of the policy increases.\nAt each step, the utility of the resulting policy will necessarily be larger or equal to than the previous one as we are greedily choosing \u0026ldquo;better\u0026rdquo; (or equivalent) actions as measured by the utility of the previous policy.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e will allow us to get an \u003ca href=\"/posts/kbhpolicy/#optimal-policy\"\u003eoptimal policy\u003c/a\u003e.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003estart with some initial \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e \\(\\pi\\) (this scheme converges to an \u003ca href=\"/posts/kbhpolicy/#optimal-policy\"\u003eoptimal policy\u003c/a\u003e regardless of where you start)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/#solving-for-the-utility-of-a-policy\"\u003esolve for \\(U^{\\pi}\\)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ecreate a new policy \\(\\pi\u0026rsquo;\\) by creating a \u003ca href=\"/posts/kbhaction_value_function/#value-function-policy\"\u003evalue-function policy\u003c/a\u003e on \\(U^{\\pi}\\)\u003c/li\u003e\n\u003cli\u003erepeat 2-3\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSince there are a finite policies, this will eventually converge.\u003c/p\u003e\n\u003cp\u003eAt each point, the utility of the policy increases.\u003c/p\u003e\n\u003cp\u003eAt each step, the utility of the resulting policy will necessarily be larger or equal to than the previous one as we are greedily choosing \u0026ldquo;better\u0026rdquo; (or equivalent) actions as measured by the utility of the previous policy.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpolicy_iteration/","tags":null,"title":"policy iteration"},{"categories":null,"contents":"Policy Optimization deals with algorithms that, unlike value iteration/policy iteration/online planning which uses a surrogate (like value function or some future discounted reward) to calculate a policy, directly optimizes against policy parameters \\(\\theta\\) for a policy \\(\\pi_{\\theta}\\).\nLocal Policy Search (aka Hooke-Jeeves Policy Search) Genetic Policy Search Cross Entropy Method Policy Gradient, Regression Gradient and Likelyhood Ratio Gradient Reward-to-Go ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpolicy_optimization/\"\u003ePolicy Optimization\u003c/a\u003e deals with algorithms that, unlike \u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e/\u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e/\u003ca href=\"/posts/kbhonline_planning/\"\u003eonline planning\u003c/a\u003e which uses a surrogate (like \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e or some future discounted reward) to calculate a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e, directly optimizes against policy parameters \\(\\theta\\) for a policy \\(\\pi_{\\theta}\\).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlocal_policy_search/\"\u003eLocal Policy Search\u003c/a\u003e (aka \u003ca href=\"/posts/kbhlocal_policy_search/\"\u003eHooke-Jeeves Policy Search\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgenetic_policy_search/\"\u003eGenetic Policy Search\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcross_entropy_method/\"\u003eCross Entropy Method\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e, \u003ca href=\"/posts/kbhpolicy_gradient/#linear-regression-gradient-estimate\"\u003eRegression Gradient\u003c/a\u003e and \u003ca href=\"/posts/kbhpolicy_gradient/#likelyhood-ratio-id-2765155a-ba00-4014-b2a7-cf2f4f184178-gradient\"\u003eLikelyhood Ratio Gradient\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_gradient/#reward-to-go\"\u003eReward-to-Go\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpolicy_optimization/","tags":null,"title":"Policy Optimization"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpolio/","tags":null,"title":"Polio"},{"categories":null,"contents":"A polynomial is a polynomial\nconstituents a function \\(p: \\mathbb{F} \\to \\mathbb{F}\\) coefficient \\(a_0, \\dots, a_{m} \\in \\mathbb{F}\\) requirements A polynomial is defined by:\n\\begin{equation} p(z)=a_0+a_1z+a_2z^{2}+\\dots +a_{m}z^{m} \\end{equation}\nfor all \\(z \\in \\mathbb{F}\\)\nadditional information degree of a polynomial \\(\\deg p\\) A polynomial\u0026rsquo;s degree is the value of the highest non-zero exponent. That is, for a polynomial:\n\\begin{equation} p(z) = a_0+a_1z+\\dots +a_{m}z^{m} \\end{equation}\nwith \\(a_{m} \\neq 0\\), the degree of it is \\(m\\). We write \\(\\deg p = m\\).\nA polynomial \\(=0\\) is defined to have degree \\(-\\infty\\)\nOf course, a polynomial with degree \\(n\\), times a polynomial of degree \\(m\\), has degree \\(mn\\). We see that:\n\\begin{equation} x^{n}x^{m} = x^{n+m} \\end{equation}\n\\(\\mathcal{P}(\\mathbb{F})\\) \\(\\mathcal{P}(\\mathbb{F})\\) is the set of all polynomials with coefficients in \\(\\mathbb{F}\\).\n\\(\\mathcal{P}(\\mathbb{F})\\) is a vector space over \\(\\mathbb{F}\\) We first see that polynomials are functions from \\(\\mathbb{F}\\to \\mathbb{F}\\). We have shown previously that F^s is a Vector Space Over F.\nTherefore, we can first say that \\(\\mathcal{P}(\\mathbb{F}) \\subset \\mathbb{F}^{\\mathbb{F}}\\).\nLastly, we simply have to show that \\(\\mathcal{P}(\\mathbb{F})\\) is a subspace.\nzero exists by taking all \\(a_{m} = 0\\) addition is closed by inheriting commutativity and distributivity in \\(\\mathbb{F}\\) scalar multiplication is closed by distributivity Having satisfied the conditions of subspace, \\(\\mathcal{P}(\\mathbb{F})\\) is a vector space. \\(\\blacksquare\\)\n\\(\\mathcal{P}_{m}(\\mathbb{F})\\) For \\(m\\geq 0\\), \\(\\mathcal{P}_{m}(\\mathbb{F})\\) denotes the set of all polynomials with coefficients \\(\\mathbb{F}\\) and degree at most \\(m\\).\nproduct of polynomials see product of polynomials\npolynomial of operator see polynomial of operator\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e is a \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ea function \\(p: \\mathbb{F} \\to \\mathbb{F}\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolynomial/\"\u003ecoefficient\u003c/a\u003e \\(a_0, \\dots, a_{m} \\in \\mathbb{F}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eA polynomial is defined by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(z)=a_0+a_1z+a_2z^{2}+\\dots +a_{m}z^{m}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor all \\(z \\in \\mathbb{F}\\)\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"degree-of-a-polynomial-deg-p\"\u003edegree of a polynomial \\(\\deg p\\)\u003c/h3\u003e\n\u003cp\u003eA \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e\u0026rsquo;s \u003ca href=\"#degree-of-a-polynomial-deg-p\"\u003edegree\u003c/a\u003e is the value of the highest non-zero exponent. That is, for a polynomial:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(z) = a_0+a_1z+\\dots +a_{m}z^{m}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewith \\(a_{m} \\neq 0\\), the \u003ca href=\"#degree-of-a-polynomial-deg-p\"\u003edegree\u003c/a\u003e of it is \\(m\\). We write \\(\\deg p = m\\).\u003c/p\u003e\n\u003cp\u003eA polynomial \\(=0\\) is defined to have degree \\(-\\infty\\)\u003c/p\u003e\n\u003cp\u003eOf course, a polynomial with \u003ca href=\"#degree-of-a-polynomial-deg-p\"\u003edegree\u003c/a\u003e \\(n\\), times a \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003e of degree \\(m\\), has degree \\(mn\\). We see that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx^{n}x^{m} = x^{n+m}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"mathcal-p--mathbb-f\"\u003e\\(\\mathcal{P}(\\mathbb{F})\\)\u003c/h3\u003e\n\u003cp\u003e\\(\\mathcal{P}(\\mathbb{F})\\) is the set of all \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003es with \u003ca href=\"/posts/kbhpolynomial/\"\u003ecoefficient\u003c/a\u003es in \\(\\mathbb{F}\\).\u003c/p\u003e\n\u003ch3 id=\"mathcal-p--mathbb-f--is-a-vector-space-over-mathbb-f\"\u003e\\(\\mathcal{P}(\\mathbb{F})\\) is a vector space over \\(\\mathbb{F}\\)\u003c/h3\u003e\n\u003cp\u003eWe first see that \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003es are functions from \\(\\mathbb{F}\\to \\mathbb{F}\\). We have shown previously that \u003ca href=\"/posts/kbhfs_is_a_vector_space/\"\u003eF^s is a Vector Space Over F\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eTherefore, we can first say that \\(\\mathcal{P}(\\mathbb{F}) \\subset \\mathbb{F}^{\\mathbb{F}}\\).\u003c/p\u003e\n\u003cp\u003eLastly, we simply have to show that \\(\\mathcal{P}(\\mathbb{F})\\) is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e exists by taking all \\(a_{m} = 0\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e is \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e by inheriting \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e and \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e in \\(\\mathbb{F}\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e is \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e by \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eHaving satisfied the conditions of \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e, \\(\\mathcal{P}(\\mathbb{F})\\) is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"mathcal-p-m--mathbb-f\"\u003e\\(\\mathcal{P}_{m}(\\mathbb{F})\\)\u003c/h3\u003e\n\u003cp\u003eFor \\(m\\geq 0\\), \\(\\mathcal{P}_{m}(\\mathbb{F})\\) denotes the set of all \u003ca href=\"/posts/kbhpolynomial/\"\u003epolynomial\u003c/a\u003es with \u003ca href=\"/posts/kbhpolynomial/\"\u003ecoefficient\u003c/a\u003es \\(\\mathbb{F}\\) and degree at most \\(m\\).\u003c/p\u003e\n\u003ch3 id=\"product-of-polynomials--kbhproduct-of-polynomial-dot-md\"\u003e\u003ca href=\"\"\u003eproduct of polynomials\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"\"\u003eproduct of polynomials\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"polynomial-of-operator--kbhpolynomial-operator-dot-md\"\u003e\u003ca href=\"/posts/kbhpolynomial_operator/\"\u003epolynomial of operator\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhpolynomial_operator/\"\u003epolynomial of operator\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpolynomial/","tags":null,"title":"polynomial"},{"categories":null,"contents":"Previous monte-carlo tree search methods which are not competitive to PBVI, SARSOP, etc., but those are affected by close-up history.\nkey point: monte-cargo roll outs best-first tree search + unweighted particle filter (instead of categorical beliefs)\nBackground History: a trajectory of some \\(h = \\{a_1, o_1, \u0026hellip;\\}\\) generative model: we perform a random sample of possible next state (weighted by the action you took, meaning an instantiation of \\(s\u0026rsquo; \\sim T(\\cdot | s,a)\\)) and reward \\(R(s,a)\\) from current state Rollout: keep sampling at each point, rolling out and calculating future reward monte-carlo tree search loop: sample \\(s\\) from the belief distribution \\(B(h)\\) for each node and call that the node state loop until we reach a leaf: sample exploratino using UCB 1 via the belief get observation, reward, next state add leaf node, add node for each available action Rollout backpropegate the obtained value with discounts backwards via POMDP Bellman Backup During runtime, we choose the action with the best action, prune the tree given what you observed, and do this again in a different.\n","html":"\u003cp\u003ePrevious \u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003emonte-carlo tree search\u003c/a\u003e methods which are not competitive to \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e, \u003ca href=\"/posts/kbhsarsop/\"\u003eSARSOP\u003c/a\u003e, etc., but those are affected by close-up history.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003ekey point\u003c/strong\u003e: monte-cargo roll outs best-first tree search + unweighted particle filter (instead of categorical beliefs)\u003c/p\u003e\n\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eHistory: a trajectory of some \\(h = \\{a_1, o_1, \u0026hellip;\\}\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhonline_planning/#generative-model\"\u003egenerative model\u003c/a\u003e: we perform a random sample of possible next state (weighted by the action you took, meaning an instantiation of \\(s\u0026rsquo; \\sim T(\\cdot | s,a)\\)) and reward \\(R(s,a)\\) from current state\u003c/li\u003e\n\u003cli\u003eRollout: keep sampling at each point, rolling out and calculating future reward\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"monte-carlo-tree-search--kbhmonte-carlo-tree-search-dot-md\"\u003e\u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003emonte-carlo tree search\u003c/a\u003e\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eloop:\n\u003col\u003e\n\u003cli\u003esample \\(s\\) from the belief distribution \\(B(h)\\) for each node and call that the node state\u003c/li\u003e\n\u003cli\u003eloop until we reach a leaf:\n\u003col\u003e\n\u003cli\u003esample exploratino using \u003ca href=\"/posts/kbhdirected_exploration/#ucb-1\"\u003eUCB 1\u003c/a\u003e via the belief\u003c/li\u003e\n\u003cli\u003eget observation, reward, next state\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003eadd leaf node, add node for each available action\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout\"\u003eRollout\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ebackpropegate the obtained value with discounts backwards via \u003ca href=\"/posts/kbhvalue_iteration/#pomdp-bellman-update\"\u003ePOMDP Bellman Backup\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eDuring runtime, we choose the action with the best action, prune the tree given what you observed, and do this again in a different.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpomcp/","tags":null,"title":"POMCP"},{"categories":null,"contents":"POMDPs with continuous actions are hard. So POMCP or (belief update + MCTS).\nSo instead, let\u0026rsquo;s try improving that. Unlike just POMCP, not only do we have \\(B(h)\\), we also have \\(W(h)\\), which is the weight of a specific state sampled. Naively applying POMCP on continuous states will give a wide-ass tree because each sampled state will not be the same as before.\ndouble progressive widening We want to use sampling to sample from observation. This will eventually lead to a suboptimal QMDP policy\u0026mdash;this is because there are no state uncertainty?\nPOMCPOW get an action from ActionProgressiveWiden function Get an observation, if the observation we got has to many children we prune discard the observation and stick the next state onto previous observation weighted by the observation likelihood system \\(Z(o|s,a,s\u0026rsquo;)\\) \\(k, \\alpha, C\\)\nPFTDTW MCTS Particle filters Double Progressive Widening ","html":"\u003cp\u003ePOMDPs with continuous actions are hard. So \u003ca href=\"/posts/kbhpomcp/\"\u003ePOMCP\u003c/a\u003e or (belief update + \u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003eMCTS\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eSo instead, let\u0026rsquo;s try improving that. Unlike just \u003ca href=\"/posts/kbhpomcp/\"\u003ePOMCP\u003c/a\u003e, not only do we have \\(B(h)\\), we also have \\(W(h)\\), which is the weight of a specific state sampled. Naively applying \u003ca href=\"/posts/kbhpomcp/\"\u003ePOMCP\u003c/a\u003e on continuous states will give a wide-ass tree because each sampled state will not be the same as before.\u003c/p\u003e\n\u003ch2 id=\"double-progressive-widening\"\u003edouble progressive widening\u003c/h2\u003e\n\u003cp\u003eWe want to use sampling to sample from observation. This will eventually lead to a suboptimal \u003ca href=\"/posts/kbhqmdp/\"\u003eQMDP\u003c/a\u003e policy\u0026mdash;this is because there are no state uncertainty?\u003c/p\u003e\n\u003ch2 id=\"pomcpow--kbhpomcpow-dot-md\"\u003e\u003ca href=\"/posts/kbhpomcpow/\"\u003ePOMCPOW\u003c/a\u003e\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eget an action from ActionProgressiveWiden function\u003c/li\u003e\n\u003cli\u003eGet an observation, if the observation we got has to many children we prune\u003c/li\u003e\n\u003cli\u003ediscard the observation and stick the next state onto previous observation weighted by the observation likelihood system \\(Z(o|s,a,s\u0026rsquo;)\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\\(k, \\alpha, C\\)\u003c/p\u003e\n\u003ch2 id=\"pftdtw\"\u003ePFTDTW\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eMCTS\u003c/li\u003e\n\u003cli\u003eParticle filters\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdouble_progressive_widening/\"\u003eDouble Progressive Widening\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpomcpow/","tags":null,"title":"POMCPOW"},{"categories":null,"contents":"Upper bounds of alpha vectors QMDP and FIB represents an upper bound of the true optimal alpha vector values.\nQMDP Fast Informed Bound FIB is a generally lower bound than QMDP.\nLower bounds of alpha vectors BAWS and blind lower bound represents\nFaster:\nbest-action worst-state blind lower bound Slower:\nPoint-Based Value Iteration \u0026ldquo;Perseus\u0026rdquo;: Randomized PBVI HSVI SARSOP point selection see point selection\n","html":"\u003ch2 id=\"upper-bounds-of-alpha-vector--kbhalpha-vector-dot-md--s\"\u003eUpper bounds of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhqmdp/\"\u003eQMDP\u003c/a\u003e and \u003ca href=\"/posts/kbhfast_informed_bound/\"\u003eFIB\u003c/a\u003e represents an \u003cstrong\u003eupper bound\u003c/strong\u003e of the true optimal \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e values.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhqmdp/\"\u003eQMDP\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfast_informed_bound/\"\u003eFast Informed Bound\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhfast_informed_bound/\"\u003eFIB\u003c/a\u003e is a generally lower bound than \u003ca href=\"/posts/kbhqmdp/\"\u003eQMDP\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"lower-bounds-of-alpha-vector--kbhalpha-vector-dot-md--s\"\u003eLower bounds of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhworst_possible_state/\"\u003eBAWS\u003c/a\u003e and \u003ca href=\"/posts/kbhblind_lower_bound/\"\u003eblind lower bound\u003c/a\u003e represents\u003c/p\u003e\n\u003cp\u003eFaster:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhworst_possible_state/\"\u003ebest-action worst-state\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhblind_lower_bound/\"\u003eblind lower bound\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSlower:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePoint-Based Value Iteration\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Perseus\u0026rdquo;: \u003ca href=\"/posts/kbhpoint_based_value_iteration/#randomized-pbvi--kbhpoint-based-value-iteration-dot-md\"\u003eRandomized PBVI\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsarsop/\"\u003eSARSOP\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"point-selection\"\u003epoint selection\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhpoint_selection/\"\u003epoint selection\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpomdp_approximation/","tags":null,"title":"POMDP Approximation"},{"categories":null,"contents":"What if our initial state never change or is deterministically changing? For instance, say, for localization. This should make solving a POMDP easier.\nPOMDP-lite \\(X\\) fully observable states \\(\\theta\\) hidden parameter: finite amount of values \\(\\theta_{1 \\dots N}\\) where \\(S = X \\times \\theta\\) we then assume conditional independence between \\(x\\) and \\(\\theta\\). So: \\(T = P(x\u0026rsquo;|\\theta, x, a)\\), where \\(P(\\theta\u0026rsquo;|\\theta,x,a) = 1\\) (\u0026ldquo;our hidden parameter is known or deterministically changing\u0026rdquo;)\nSolving Main Idea: if that\u0026rsquo;s the case, then we can split our models into a set of MDPs. Because \\(\\theta_{j}\\) change deterministically, we can have a MDP solved ONLINE over \\(X\\) and \\(T\\) for each possible initial \\(\\theta\\). Then, you just take the believe over \\(\\theta\\) and sample over the MDPs based on that belief.\nReward bonus To help coordination, we introduce a reward bonus\nexploration reward bonus, which encourages exploration (this helps coordinate) maintain a value \\(\\xi(b,x,a)\\) which is the number of times b,x,a is visited\u0026mdash;if it exceeds a number of times, clip reward bonus Whereby:\n\\begin{equation} RB(b,s,a) = \\beta \\sum_{s\u0026rsquo;}^{} P(s\u0026rsquo;|b,s,a) || b_{s} - b ||_{1} \\end{equation}\nwhich encourages information gain by encouraging exploring states with more \\(L_{1}\\) divergence in belief compared to our current belief.\nThen, we can formulate an augmented reward function \\(\\tilde{R}(b,s,a) = R(s,a) + RB(b,s,a)\\).\nSolution Finally, at each timestamp, we look at our observation and assume it does not change. This gives an MDP:\n\\begin{equation} \\tilde{V}^{*} (b,s) = \\max_{a} \\left\\{ \\tilde{R}(b,s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} P(s\u0026rsquo;|b,s,a) \\tilde{V}^{*} (b,s\u0026rsquo;)\\right\\} \\end{equation}\nwhich we solve however we\u0026rsquo;d like. Authors used UCT.\nUCT ","html":"\u003cp\u003eWhat if our initial state never change or is deterministically changing? For instance, say, for localization. This should make solving a POMDP easier.\u003c/p\u003e\n\u003ch2 id=\"pomdp-lite\"\u003ePOMDP-lite\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X\\) fully observable states\u003c/li\u003e\n\u003cli\u003e\\(\\theta\\) hidden parameter: finite amount of values \\(\\theta_{1 \\dots N}\\)\u003c/li\u003e\n\u003cli\u003ewhere \\(S = X \\times \\theta\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewe then assume conditional independence between \\(x\\) and \\(\\theta\\). So: \\(T = P(x\u0026rsquo;|\\theta, x, a)\\), where \\(P(\\theta\u0026rsquo;|\\theta,x,a) = 1\\) (\u0026ldquo;our hidden parameter is known or deterministically changing\u0026rdquo;)\u003c/p\u003e\n\u003ch2 id=\"solving\"\u003eSolving\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMain Idea\u003c/strong\u003e\u003c/strong\u003e: if that\u0026rsquo;s the case, then we can split our models into a set of \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003es. Because \\(\\theta_{j}\\) change deterministically, we can have a MDP solved \u003cstrong\u003eONLINE\u003c/strong\u003e over \\(X\\) and \\(T\\) for each possible initial \\(\\theta\\). Then, you just take the believe over \\(\\theta\\) and sample over the MDPs based on that belief.\u003c/p\u003e\n\u003ch3 id=\"reward-bonus\"\u003eReward bonus\u003c/h3\u003e\n\u003cp\u003eTo help coordination, we introduce a reward bonus\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eexploration reward bonus, which encourages exploration (this helps coordinate)\u003c/li\u003e\n\u003cli\u003emaintain a value \\(\\xi(b,x,a)\\) which is the number of times b,x,a is visited\u0026mdash;if it exceeds a number of times, clip reward bonus\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWhereby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nRB(b,s,a) = \\beta \\sum_{s\u0026rsquo;}^{} P(s\u0026rsquo;|b,s,a) || b_{s} - b ||_{1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich encourages information gain by encouraging exploring states with more \\(L_{1}\\) divergence in belief compared to our current belief.\u003c/p\u003e\n\u003cp\u003eThen, we can formulate an augmented reward function \\(\\tilde{R}(b,s,a) = R(s,a) + RB(b,s,a)\\).\u003c/p\u003e\n\u003ch3 id=\"solution\"\u003eSolution\u003c/h3\u003e\n\u003cp\u003eFinally, at each timestamp, we look at our observation and assume it does not change. This gives an \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\tilde{V}^{*} (b,s) = \\max_{a} \\left\\{ \\tilde{R}(b,s,a) + \\gamma \\sum_{s\u0026rsquo;}^{} P(s\u0026rsquo;|b,s,a) \\tilde{V}^{*} (b,s\u0026rsquo;)\\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich we solve however we\u0026rsquo;d like. Authors used \u003ca href=\"#uct\"\u003eUCT\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"uct\"\u003eUCT\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-06_09-54-45_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhpomdp_lite/","tags":null,"title":"POMDP-lite"},{"categories":null,"contents":"a class about POMDPs\nTheme Topics Robot dogs NeBula, AISR NeBula Applications POMDSoar, Offline Solvers PBVI, HSVI, Perseus Offline Solvers SARSOP, E-PCA, CALP Policy Graphs Hansen, MCVI, PGA Online Solvers AEMS, POMCP, DESPOT Moar Online Methods IS-DESPOT, POMCPOW, AdaOPS POMDPish MOMDP, POMDP-lite, rho-POMDPs Memoryless + Policy Search Sarsa (Lambda), JSJ, Pegasus Hierarchical Decomposition Option, MaxQ, LTRDP Hybrid Planning HybPlan, LetsDrive, BetaZero LQR + Shared Autonomy iLQR, Hindsight, TrustPOMDP Multi-Agent Factored MDPs, FV-POMCPs, G-DICE Other Content Research Tips STRIPS-style planning Temperal Abstraction Linear-Quadratic Regulator ","html":"\u003cp\u003ea class about \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eTheme\u003c/th\u003e\n\u003cth\u003eTopics\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eRobot dogs\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhnebula/\"\u003eNeBula\u003c/a\u003e, \u003ca href=\"/posts/kbhnebula/#aisr-nebula--kbhnebula-dot-md\"\u003eAISR NeBula\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eApplications\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhkolobov_2018/#pomdsoar\"\u003ePOMDSoar\u003c/a\u003e,\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eOffline Solvers\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e, \u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e, \u003ca href=\"/posts/kbhperseus/\"\u003ePerseus\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eOffline Solvers\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhsarsop/\"\u003eSARSOP\u003c/a\u003e, \u003ca href=\"/posts/kbhe_pca/\"\u003eE-PCA\u003c/a\u003e, \u003ca href=\"/posts/kbhcalp/\"\u003eCALP\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ePolicy Graphs\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhhansen/\"\u003eHansen\u003c/a\u003e, \u003ca href=\"/posts/kbhmcvi/\"\u003eMCVI\u003c/a\u003e, \u003ca href=\"/posts/kbhpga/\"\u003ePGA\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eOnline Solvers\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhaems/\"\u003eAEMS\u003c/a\u003e, \u003ca href=\"/posts/kbhpomcp/\"\u003ePOMCP\u003c/a\u003e, \u003ca href=\"/posts/kbhdespot/\"\u003eDESPOT\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eMoar Online Methods\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhis_despot/\"\u003eIS-DESPOT\u003c/a\u003e, \u003ca href=\"/posts/kbhpomcpow/\"\u003ePOMCPOW\u003c/a\u003e, \u003ca href=\"/posts/kbhadaops/\"\u003eAdaOPS\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003ePOMDPish\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhmomdp/\"\u003eMOMDP\u003c/a\u003e, \u003ca href=\"/posts/kbhpomdp_lite/\"\u003ePOMDP-lite\u003c/a\u003e, \u003ca href=\"/posts/kbhrho_pomdps/\"\u003erho-POMDPs\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eMemoryless + Policy Search\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhsarsa_lambda/\"\u003eSarsa (Lambda)\u003c/a\u003e, \u003ca href=\"/posts/kbhjsj/\"\u003eJSJ\u003c/a\u003e, \u003ca href=\"/posts/kbhpegasus/\"\u003ePegasus\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eHierarchical Decomposition\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhoption/\"\u003eOption\u003c/a\u003e, \u003ca href=\"/posts/kbhmaxq/\"\u003eMaxQ\u003c/a\u003e, \u003ca href=\"/posts/kbhltrdp/\"\u003eLTRDP\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eHybrid Planning\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhhybplan/\"\u003eHybPlan\u003c/a\u003e, \u003ca href=\"/posts/kbhletsdrive/\"\u003eLetsDrive\u003c/a\u003e, \u003ca href=\"/posts/kbhbetazero/\"\u003eBetaZero\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eLQR + Shared Autonomy\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhilqr/\"\u003eiLQR\u003c/a\u003e, \u003ca href=\"/posts/kbhhindsight_optimization/\"\u003eHindsight\u003c/a\u003e, \u003ca href=\"/posts/kbhtrustpomdp/\"\u003eTrustPOMDP\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eMulti-Agent\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhfactored_mdps/\"\u003eFactored MDPs\u003c/a\u003e, \u003ca href=\"/posts/kbhfv_pomcps/\"\u003eFV-POMCPs\u003c/a\u003e, \u003ca href=\"/posts/kbhg_dice/\"\u003eG-DICE\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"other-content\"\u003eOther Content\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhresearch_tips/\"\u003eResearch Tips\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstrips_style_planning/\"\u003eSTRIPS-style planning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtemperal_abstraction/\"\u003eTemperal Abstraction\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_quadratic_regulator/\"\u003eLinear-Quadratic Regulator\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpomdps_index/","tags":null,"title":"POMDPs Index"},{"categories":null,"contents":" closed class words - words with fixed memberships (prepositions, conjunctivas, etc.); not being created or added much, used for grammatical function open class words - words that are set as content, and are focused on content ","html":"\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eclosed class\u003c/strong\u003e words - words with fixed memberships (prepositions, conjunctivas, etc.); not being created or added much, used for grammatical function\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eopen class\u003c/strong\u003e words - words that are set as content, and are focused on content\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpos_tagging/","tags":null,"title":"POS Tagging"},{"categories":null,"contents":"For some \\(a \\in \\mathbb{F}\\), we define \\(a^m\\) to be \\(a\\) multiplied with itself \\(m\\) times.\nadditional information \\((a^m)^n = a^{mn}\\) \\((ab)^m = a^mb^m\\) ","html":"\u003cp\u003eFor some \\(a \\in \\mathbb{F}\\), we define \\(a^m\\) to be \\(a\\) multiplied with itself \\(m\\) times.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\((a^m)^n = a^{mn}\\)\u003c/li\u003e\n\u003cli\u003e\\((ab)^m = a^mb^m\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpower_math/","tags":null,"title":"power (math)"},{"categories":null,"contents":"a power series centered at \\(a\\) is defined with \\(c_{n} \\in \\mathbb{R}\\), whereby:\n\\begin{equation} f(x) = \\sum_{n=0}^{\\infty} c_{n}(x-a)^{n} \\end{equation}\nmeaning it is written as \\(c_0 + c_1(x-a) + c_2(x-a)^{2} + c_3 (x-a)^{3} + \\cdots\\)\nradius of convergence there is a radius of convergence \\(R \\geq 0\\) for any power series, possibly infinite, by which the series is absolutely convergent where \\(|x-a| \u0026lt; R\\), and it does not converge when \\(|x-a| \u0026gt; R\\) , the case where \\(|x-a| = R\\) is uncertain ratio test: if all coefficients \\(c_{n}\\) are nonzero, and some \\(\\lim_{n \\to \\infty} \\left| \\frac{c_{n}}{c_{n+1}} \\right|\\) evaluates to some \\(c\\) \u0026mdash; if \\(c\\) is positive or \\(+\\infty\\), then that limit is equivalent to the radius of convergence Taylor\u0026rsquo;s Formula: a power series \\(f(x)\\) can be differentiated, integrated on the bounds of \\((a-R, a+R)\\), the derivatives and integrals will have radius of convergence \\(R\\) and \\(c_{n} = \\frac{f^{(n)}(a)}{n!}\\) to construct the series linear combinations of power series When \\(\\sum_{n=0}^{\\infty} a_{n}\\) and \\(\\sum_{n=0}^{\\infty} b_{n}\\) are both convergent, linear combinations of them can be described in the usual fashion:\n\\begin{equation} c_1 \\sum_{n=0}^{\\infty} a_{n}+ c_2 \\sum_{n=0}^{\\infty} b_{n} = \\sum_{n=0}^{\\infty} c_1 a_{n} + c_2 b_{n} \\end{equation}\nsome power series geometric series \\begin{equation} 1 + r + r^{2} + r^{3} + \\dots = \\sum_{n=0}^{\\infty} r^{n} = \\frac{1}{1-r} \\end{equation}\nwhich converges \\(-1 \u0026lt; r \u0026lt; 1\\), and diverges otherwise.\nexponential series \\begin{equation} 1 + x + \\frac{x^{2}}{2!} + \\frac{x^{3}}{3!} + \\dots = \\sum_{n=0}^{\\infty} \\frac{x^{n}}{n!} = e^{x} \\end{equation}\nwhich converges for all \\(x \\in \\mathbb{R}\\).\nabsolutely convergent If:\n\\begin{equation} \\sum_{n=0}^{\\infty} |a_{n}| \\end{equation}\nconverges, then:\n\\begin{equation} \\sum_{n=0}^{\\infty} a_{n} \\end{equation}\nalso converges.\nThis situation is called absolutely convergent.\n","html":"\u003cp\u003ea \u003ca href=\"/posts/kbhpower_series_o/\"\u003epower series\u003c/a\u003e centered at \\(a\\) is defined with \\(c_{n} \\in \\mathbb{R}\\), whereby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\sum_{n=0}^{\\infty} c_{n}(x-a)^{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning it is written as \\(c_0 + c_1(x-a) + c_2(x-a)^{2} + c_3 (x-a)^{3} + \\cdots\\)\u003c/p\u003e\n\u003ch2 id=\"radius-of-convergence\"\u003eradius of convergence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ethere is a \u003ca href=\"#radius-of-convergence\"\u003eradius of convergence\u003c/a\u003e \\(R \\geq 0\\) for any \u003ca href=\"/posts/kbhpower_series_o/\"\u003epower series\u003c/a\u003e, possibly infinite, by which the series is absolutely convergent where \\(|x-a| \u0026lt; R\\), and it does not converge when \\(|x-a| \u0026gt; R\\) , the case where \\(|x-a| = R\\) is uncertain\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#radius-of-convergence\"\u003eratio test\u003c/a\u003e: if all coefficients \\(c_{n}\\) are nonzero, and some \\(\\lim_{n \\to \\infty} \\left| \\frac{c_{n}}{c_{n+1}} \\right|\\) evaluates to some \\(c\\) \u0026mdash; if \\(c\\) is positive or \\(+\\infty\\), then that limit is equivalent to the radius of convergence\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#radius-of-convergence\"\u003eTaylor\u0026rsquo;s Formula\u003c/a\u003e: a power series \\(f(x)\\) can be differentiated, integrated on the bounds of \\((a-R, a+R)\\), the derivatives and integrals will have radius of convergence \\(R\\) and \\(c_{n} = \\frac{f^{(n)}(a)}{n!}\\) to construct the series\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"linear-combination--kbhlinear-combination-dot-md--s-of-power-series--kbhpower-series-o-dot-md\"\u003e\u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003es of \u003ca href=\"/posts/kbhpower_series_o/\"\u003epower series\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eWhen \\(\\sum_{n=0}^{\\infty} a_{n}\\) and \\(\\sum_{n=0}^{\\infty} b_{n}\\) are \u003cstrong\u003eboth convergent\u003c/strong\u003e, linear combinations of them can be described in the usual fashion:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_1 \\sum_{n=0}^{\\infty} a_{n}+ c_2 \\sum_{n=0}^{\\infty} b_{n} = \\sum_{n=0}^{\\infty} c_1 a_{n} + c_2 b_{n}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"some-power-series--kbhpower-series-o-dot-md\"\u003esome \u003ca href=\"/posts/kbhpower_series_o/\"\u003epower series\u003c/a\u003e\u003c/h2\u003e\n\u003ch3 id=\"geometric-series\"\u003egeometric series\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n1 + r + r^{2} + r^{3} + \\dots = \\sum_{n=0}^{\\infty} r^{n} = \\frac{1}{1-r}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich converges \\(-1 \u0026lt; r \u0026lt; 1\\), and diverges otherwise.\u003c/p\u003e\n\u003ch3 id=\"exponential-series\"\u003eexponential series\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n1 + x + \\frac{x^{2}}{2!} + \\frac{x^{3}}{3!} + \\dots = \\sum_{n=0}^{\\infty} \\frac{x^{n}}{n!} = e^{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich converges for all \\(x \\in \\mathbb{R}\\).\u003c/p\u003e\n\u003ch2 id=\"absolutely-convergent\"\u003eabsolutely convergent\u003c/h2\u003e\n\u003cp\u003eIf:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{n=0}^{\\infty} |a_{n}|\n\\end{equation}\u003c/p\u003e\n\u003cp\u003econverges, then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{n=0}^{\\infty} a_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ealso converges.\u003c/p\u003e\n\u003cp\u003eThis situation is called \u003ca href=\"#absolutely-convergent\"\u003eabsolutely convergent\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpower_series_o/","tags":null,"title":"power series"},{"categories":null,"contents":"We can now use power series to also solve differential equations.\n\\begin{equation} \\dv{x}{t} = 0; x(0)=1 \\end{equation}\nWe wish to have a power-series solution of shape:\n\\begin{equation} x(t) = \\sum_{k=0}^{\\infty }a_{k}t^{k} \\end{equation}\nWe want to find the coefficients \\(a_{k}\\). If you can find such a function that fits this form, they both 1) converge and 20 behave the same way as \\(e^{x}\\) does in Simple Differential Equations.\nanalytic functions Functions which can be described with a power series are called analytic functions.\n","html":"\u003cp\u003eWe can now use \u003ca href=\"/posts/kbhpower_series/\"\u003epower series\u003c/a\u003e to also solve \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003edifferential equation\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{x}{t} = 0; x(0)=1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe wish to have a power-series solution of shape:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = \\sum_{k=0}^{\\infty }a_{k}t^{k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe want to find the coefficients \\(a_{k}\\). If you can find such a function that fits this form, they both 1) converge and 20 behave the same way as \\(e^{x}\\) does in \u003ca href=\"/posts/kbhsimple_differential_equations/\"\u003eSimple Differential Equations\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"analytic-functions\"\u003eanalytic functions\u003c/h2\u003e\n\u003cp\u003eFunctions which can be described with a \u003ca href=\"/posts/kbhpower_series/\"\u003epower series\u003c/a\u003e are called \u003ca href=\"#analytic-functions\"\u003eanalytic functions\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpower_series/","tags":null,"title":"power series to solve differential equations"},{"categories":null,"contents":"power utility, or isoelastic utility, is a financial econometric is a utility that results absolute, constant relative risk aversion. i.e.: you tell me how risk averse you are exogenously, I tell you how much utility some consumption is.\nconstituents some relative risk coefficient \\(\\gamma \\in (0,1)\\), higher more risk averse consumption of some asset \\(C\\) requirements Utility \\(U( C)\\) is defined by:\n\\begin{equation} U( C) = \\frac{c^{1-\\gamma}-1}{1-\\gamma} \\end{equation}\nadditional information As you can see, the higher \\(\\gamma\\), the lower utility some consumption brings.\nlog utility log utility is a special case of power utility whereby:\n\\begin{equation} U(x) = \\log x \\end{equation}\nwhich converges to power utility where \\(\\lambda \\to 1\\).\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpower_utility/\"\u003epower utility\u003c/a\u003e, or \u003ca href=\"/posts/kbhpower_utility/\"\u003eisoelastic utility\u003c/a\u003e, is a \u003ca href=\"/posts/kbhfinancial_markets_intro/\"\u003efinancial econometric\u003c/a\u003e is a utility that results absolute, constant relative risk aversion. i.e.: you tell me how risk averse you are exogenously, I tell you how much utility some consumption is.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003esome relative risk coefficient \\(\\gamma \\in (0,1)\\), higher more risk averse\u003c/li\u003e\n\u003cli\u003econsumption of some asset \\(C\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eUtility \\(U( C)\\) is defined by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU( C) = \\frac{c^{1-\\gamma}-1}{1-\\gamma}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cp\u003eAs you can see, the higher \\(\\gamma\\), the lower utility some consumption brings.\u003c/p\u003e\n\u003ch3 id=\"log-utility\"\u003elog utility\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#log-utility\"\u003elog utility\u003c/a\u003e is a special case of \u003ca href=\"/posts/kbhpower_utility/\"\u003epower utility\u003c/a\u003e whereby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(x) = \\log x\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich converges to \u003ca href=\"/posts/kbhpower_utility/\"\u003epower utility\u003c/a\u003e where \\(\\lambda \\to 1\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpower_utility/","tags":null,"title":"power utility"},{"categories":null,"contents":"We use interrupts to implement preemption, \u0026ldquo;preempting\u0026rdquo; threads in order to swap on another thread to CPU. This enables scheduling to happen.\npreempting into a brand new thread IMPORTANT: because interrupts are disabled at the beginning of the interrupt handler, and re-enabled by the end, new threads (which starts not at the interrupt handle) will not re-enable interrupts.\nvoid interrupt_handler() { /* disables interupts, automatically by timer handler */ // future spawns start here context_switch(...); /* enables interupts, automatically by timer handler */ } void threadfunc_wrapper() { // manually enable interrupts before first run intr_enable(true); // start thread\u0026#39;s actual business threadfunc(); } ","html":"\u003cp\u003eWe use \u003ca href=\"/posts/kbhdispatching/#interrupt\"\u003einterrupt\u003c/a\u003es to implement \u003ca href=\"/posts/kbhpreemption/\"\u003epreemption\u003c/a\u003e, \u0026ldquo;\u003ca href=\"/posts/kbhpreemption/\"\u003epreempting\u003c/a\u003e\u0026rdquo; threads in order to swap on another thread to CPU. This enables \u003ca href=\"/posts/kbhscheduling/\"\u003escheduling\u003c/a\u003e to happen.\u003c/p\u003e\n\u003ch2 id=\"preempting-into-a-brand-new-thread\"\u003epreempting into a brand new thread\u003c/h2\u003e\n\u003cp\u003eIMPORTANT: because \u003ca href=\"/posts/kbhdispatching/#interrupt\"\u003einterrupt\u003c/a\u003es are disabled at the beginning of the interrupt handler, and re-enabled by the \u003cstrong\u003eend\u003c/strong\u003e, new threads (which starts not at the interrupt handle) will not re-enable interrupts.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003einterrupt_handler\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e/* disables interupts, automatically by timer handler */\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// future spawns start here\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econtext_switch\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(...);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e/* enables interupts, automatically by timer handler */\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ethreadfunc_wrapper\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// manually enable interrupts before first run\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eintr_enable\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// start thread\u0026#39;s actual business\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ethreadfunc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhpreemption/","tags":null,"title":"preemption"},{"categories":null,"contents":"Problems of pre-training data pre-training influence downstream capabilities \u0026hellip;and therefore can escape into model generation real world users expect novelty Changes in Distribution Big Pretraining Data GPT2 deduplicated data Removed Wikipedia (to prevent data leak) Heuristic based cleaning GPT3 Deduplicated based on leaked data Llama the usual spheal\nremoved high perplexity data using wiki n-gram model removed non-English deduplicated Llama 2 removed high volue of PII Removed non-english Pretraining Curation Decisions what to include what is the timestamp being scraped heuristic based cleaning? data cleaning? etc. language filtering (only take English?) PII removal dedup Toxicity + SafeURL filtering \u0026ldquo;quality filtering\u0026rdquo; sampling distributions Change in Model Age Good alignment shown between validation year and pre-training year, even mixing in older data.\nImplication: \u0026ldquo;fine-tuned T5 may still be worse than fine-tuned llama, because T5 was pretrained using older data\u0026mdash;despite even if FTing is newer\u0026rdquo;\nChange in Toxicity Filtering toxicity made the model worst at spotting toxicity.\nChange in Data Distribution out of domain answers do worse on out of domain results\nReduce Memorization de-duplication using approximate matching think carefully for multiple-epoch training (what is ok to memorize?) remove sensitive memorization from pre-training data Two iffy strategies:\nCheck for memorization Trivial style transfers can get around safety checks \u0026ldquo;do the [copyrighted thing] in French\u0026rdquo;; \u0026ldquo;do the [copyrighted thing] with double the spaces\u0026rdquo;.\nUse RLHF or something \u0026ldquo;hide flaws, and not eliminate them\u0026rdquo;\u0026mdash;edge case problems doesn\u0026rsquo;t eliminate the underlying vulnerability.\n","html":"\u003ch2 id=\"problems-of-pre-training-data\"\u003eProblems of pre-training data\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003epre-training influence downstream capabilities\u003c/li\u003e\n\u003cli\u003e\u0026hellip;and therefore can escape into model generation\u003c/li\u003e\n\u003cli\u003ereal world users expect novelty\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"changes-in-distribution\"\u003eChanges in Distribution\u003c/h2\u003e\n\u003ch3 id=\"big-pretraining-data\"\u003eBig Pretraining Data\u003c/h3\u003e\n\u003ch4 id=\"gpt2\"\u003eGPT2\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003ededuplicated data\u003c/li\u003e\n\u003cli\u003eRemoved Wikipedia (to prevent data leak)\u003c/li\u003e\n\u003cli\u003eHeuristic based cleaning\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"gpt3\"\u003eGPT3\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eDeduplicated\u003c/li\u003e\n\u003cli\u003ebased on leaked data\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"llama\"\u003eLlama\u003c/h4\u003e\n\u003cp\u003ethe usual spheal\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eremoved high perplexity data using wiki n-gram model\u003c/li\u003e\n\u003cli\u003eremoved non-English\u003c/li\u003e\n\u003cli\u003ededuplicated\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"llama-2\"\u003eLlama 2\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eremoved high volue of PII\u003c/li\u003e\n\u003cli\u003eRemoved non-english\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"pretraining-curation-decisions\"\u003ePretraining Curation Decisions\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ewhat to include\u003c/li\u003e\n\u003cli\u003ewhat is the timestamp being scraped\u003c/li\u003e\n\u003cli\u003eheuristic based cleaning? data cleaning? etc.\u003c/li\u003e\n\u003cli\u003elanguage filtering (only take English?)\u003c/li\u003e\n\u003cli\u003ePII removal\u003c/li\u003e\n\u003cli\u003ededup\u003c/li\u003e\n\u003cli\u003eToxicity + SafeURL filtering\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;quality filtering\u0026rdquo;\u003c/li\u003e\n\u003cli\u003esampling distributions\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"change-in-model-age\"\u003eChange in Model Age\u003c/h3\u003e\n\u003cp\u003eGood alignment shown between validation year and pre-training year, even mixing in older data.\u003c/p\u003e\n\u003cp\u003eImplication: \u0026ldquo;fine-tuned T5 may still be worse than fine-tuned llama, because T5 was \u003cstrong\u003epretrained\u003c/strong\u003e using older data\u0026mdash;despite even if FTing is newer\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"change-in-toxicity\"\u003eChange in Toxicity\u003c/h3\u003e\n\u003cp\u003eFiltering toxicity made the model worst at spotting toxicity.\u003c/p\u003e\n\u003ch3 id=\"change-in-data-distribution\"\u003eChange in Data Distribution\u003c/h3\u003e\n\u003cp\u003eout of domain answers do worse on out of domain results\u003c/p\u003e\n\u003ch2 id=\"reduce-memorization\"\u003eReduce Memorization\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ede-duplication using \u003cstrong\u003eapproximate matching\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003ethink carefully for multiple-epoch training (what is ok to memorize?)\u003c/li\u003e\n\u003cli\u003eremove sensitive memorization from pre-training data\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTwo iffy strategies:\u003c/p\u003e\n\u003ch3 id=\"check-for-memorization\"\u003eCheck for memorization\u003c/h3\u003e\n\u003cp\u003eTrivial style transfers can get around safety checks \u0026ldquo;do the [copyrighted thing] in French\u0026rdquo;; \u0026ldquo;do the [copyrighted thing] with double the spaces\u0026rdquo;.\u003c/p\u003e\n\u003ch3 id=\"use-rlhf-or-something\"\u003eUse RLHF or something\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;hide flaws, and not eliminate them\u0026rdquo;\u0026mdash;edge case problems doesn\u0026rsquo;t eliminate the underlying vulnerability.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpretraining_data/","tags":null,"title":"Pretraining Data"},{"categories":null,"contents":"Unfortunately, this note is not published online.\n","html":"\u003cp\u003eUnfortunately, this note is not published online.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpretraining_long_transformers/","tags":null,"title":"Pretraining Long Transformers"},{"categories":null,"contents":"The price\n","html":"\u003cp\u003eThe price\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprice/","tags":null,"title":"price"},{"categories":null,"contents":"An integer \\(p \u0026gt; 1\\) is prime if it has no positive divisors other than \\(1\\) and itself.\nNo even number, except \\(2\\), is prime. Because 2\nadditional information There are infinitely many primes Credit: Euler.\nProof:\nAssume to the contrary that there are finitely many primes. \\(p_1, \u0026hellip;, p_{n}\\). We desire to make a new prime to reach contradiction.\nConsider:\n\\begin{equation} N = p_1 \\times \\dots \\times p_{n} + 1 \\end{equation}\nNote that \\(p_1 \\times \u0026hellip; \\times p_{n}\\) is divisible by each of the \\(p_{j}\\). If some \\(p_i |N\\), \\(p_{i}|1\\), which is impossible as \\(1\\) is not divisible by anything. So, no \\(p_{i}\\) divides \\(N\\).\nIf \\(N\\) is now prime, we are done as it is not in the list of \\(p_{j}\\). If not, pick any prime divisor \\(p\\) of \\(N\\). We will note that given no \\(p_{j}\\) divides \\(N\\), therefore any prime divisor is a new prime.\nHaving made a new prime, we reach contradiction. \\(\\blacksquare\\)\ncoprime Two integers \\(a, b\\) is considered coprime if \\(\\gcd (a,b) = 1\\). Therefore, because greatest common divisor is a linear combination\n","html":"\u003cp\u003eAn \u003ca href=\"/posts/kbhinteger/\"\u003einteger\u003c/a\u003e \\(p \u0026gt; 1\\) is \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e if it has no positive \u003ca href=\"/posts/kbhdivide/\"\u003edivisor\u003c/a\u003es other than \\(1\\) and itself.\u003c/p\u003e\n\u003cp\u003eNo even \u003ca href=\"/posts/kbhnumber/\"\u003enumber\u003c/a\u003e, except \\(2\\), is prime. Because 2\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"there-are-infinitely-many-primes\"\u003eThere are infinitely many primes\u003c/h3\u003e\n\u003cp\u003eCredit: Euler.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eAssume to the contrary that there are finitely many \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003es. \\(p_1, \u0026hellip;, p_{n}\\). We desire to make a new prime to reach contradiction.\u003c/p\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nN = p_1 \\times \\dots \\times p_{n} + 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNote that \\(p_1 \\times \u0026hellip; \\times p_{n}\\) is divisible by each of the \\(p_{j}\\). If some \\(p_i |N\\), \\(p_{i}|1\\), which is impossible as \\(1\\) is not divisible by anything. So, no \\(p_{i}\\) divides \\(N\\).\u003c/p\u003e\n\u003cp\u003eIf \\(N\\) is now prime, we are done as it is not in the list of \\(p_{j}\\). If not, pick any \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e divisor \\(p\\) of \\(N\\). We will note that given no \\(p_{j}\\) divides \\(N\\), therefore any \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e divisor is a new prime.\u003c/p\u003e\n\u003cp\u003eHaving made a new prime, we reach contradiction. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"coprime\"\u003ecoprime\u003c/h3\u003e\n\u003cp\u003eTwo integers \\(a, b\\) is considered \u003ca href=\"#coprime\"\u003ecoprime\u003c/a\u003e if \\(\\gcd (a,b) = 1\\). Therefore, because \u003ca href=\"/posts/kbhgreatest_common_divisor/#greatest-common-divisor-is-a-linear-combination\"\u003egreatest common divisor is a linear combination\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprime/","tags":null,"title":"prime"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhprime_factorization/","tags":null,"title":"prime factorization"},{"categories":null,"contents":"The principle of induction is a technique used to prove the relationship between a smaller subset\nThe following three statements are equivalent.\nstandard induction Suppose \\(S \\subset \\mathbb{N}\\), which is non-empty. If \\(S\\) is a non-empty subset such that \\(0 \\in S\\), and for all \\(n \\in \\mathbb{N}\\), \\(n \\in S \\implies n+1 \\in S\\). Then, \\(S = \\mathbb{N}\\).\nstrong induction Suppose \\(S \\subset \\mathbb{N}\\), which is non-empty. If \\(S\\) is a non-empty subset such that \\(0 \\in S\\), and for all \\(n \\in \\mathbb{N}\\), \\(\\{0, \\dots, n\\} \\in S \\implies n+1 \\in S\\). Then, \\(S = \\mathbb{N}\\).\nwell-ordering principle If \\(S \\subset \\mathbb{N}\\) is non empty, then it has a smallest element\nPROOF: assume well-ordering principle, prove standard induction Given \\(S \\in \\mathbb{N}\\), such that \\(0 \\in S\\), whenever \\(n \\in S\\), then \\(n+1\\) is also in \\(S\\). We desire that that \\(S\\) is the natural numbers.\nAssume for the sake of contradiction \\(S \\neq \\mathbb{N}\\). Define \\(T = \\mathbb{N} \\setminus S\\).\nAssume \\(T\\) is non-empty. The WOP tells us that \\(T\\) has a smallest element \\(t \\in T\\). We know that \\(t \\neq 0\\), because \\(0 \\in S\\). Therefore, \\(t-1 \\in \\mathbb{N}\\). But, \\(t-1 \u0026lt; t\\), which means that \\(t-1 \\in S\\). But by the statement of the givens, \\((t-1) \\in S \\implies (t-1)+1 = t \\in S\\). Reaching contradiction. \\(\\blacksquare\\)\nassuming strong induction, proof well-ordering principle Assume \\(S\\) has no smallest element. Create some \\(T = \\mathbb{N} \\setminus S\\). Now, \\(0 \\in T\\) because otherwise \\(0 \\in S\\) would be the smallest element. Now, consider \\(0, 1, \u0026hellip; n \\in T\\), we notice that \\(n+1\\) must be in \\(T\\) as well. By strong induction, we have that \\(T = \\mathbb{N}\\) and \\(S\\) is empty.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhprinciple_of_induction/\"\u003eprinciple of induction\u003c/a\u003e is a technique used to prove the relationship between a smaller subset\u003c/p\u003e\n\u003cp\u003eThe following three statements are equivalent.\u003c/p\u003e\n\u003ch2 id=\"standard-induction--kbhprinciple-of-induction-dot-md\"\u003estandard \u003ca href=\"/posts/kbhprinciple_of_induction/\"\u003einduction\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eSuppose \\(S \\subset \\mathbb{N}\\), which is non-empty. If \\(S\\) is a non-empty subset such that \\(0 \\in S\\), and for all \\(n \\in \\mathbb{N}\\), \\(n \\in S \\implies n+1 \\in S\\). Then, \\(S = \\mathbb{N}\\).\u003c/p\u003e\n\u003ch2 id=\"strong-induction--kbhstrong-induction-dot-md\"\u003e\u003ca href=\"/posts/kbhstrong_induction/\"\u003estrong induction\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eSuppose \\(S \\subset \\mathbb{N}\\), which is non-empty. If \\(S\\) is a non-empty subset such that \\(0 \\in S\\), and for all \\(n \\in \\mathbb{N}\\), \\(\\{0, \\dots, n\\} \\in S \\implies n+1 \\in S\\). Then, \\(S = \\mathbb{N}\\).\u003c/p\u003e\n\u003ch2 id=\"well-ordering-principle\"\u003ewell-ordering principle\u003c/h2\u003e\n\u003cp\u003eIf \\(S \\subset \\mathbb{N}\\) is non empty, then it has a smallest element\u003c/p\u003e\n\u003ch2 id=\"proof\"\u003ePROOF:\u003c/h2\u003e\n\u003ch3 id=\"assume-well-ordering-principle--org0837e14--prove-standard-induction\"\u003eassume \u003ca href=\"#well-ordering-principle\"\u003ewell-ordering principle\u003c/a\u003e, prove standard induction\u003c/h3\u003e\n\u003cp\u003eGiven \\(S \\in \\mathbb{N}\\), such that \\(0 \\in S\\), whenever \\(n \\in S\\), then \\(n+1\\) is also in \\(S\\). We desire that that \\(S\\) is the \u003ca href=\"/posts/kbhnatural_numbers/\"\u003enatural number\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eAssume for the sake of contradiction \\(S \\neq \\mathbb{N}\\). Define \\(T = \\mathbb{N} \\setminus S\\).\u003c/p\u003e\n\u003cp\u003eAssume \\(T\\) is non-empty. The \u003ca href=\"#well-ordering-principle\"\u003eWOP\u003c/a\u003e tells us that \\(T\\) has a smallest element \\(t \\in T\\). We know that \\(t \\neq 0\\), because \\(0 \\in S\\). Therefore, \\(t-1 \\in \\mathbb{N}\\). But, \\(t-1 \u0026lt; t\\), which means that \\(t-1 \\in S\\). But by the statement of the givens, \\((t-1) \\in S \\implies (t-1)+1 = t \\in S\\). Reaching contradiction. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"assuming-strong-induction--kbhstrong-induction-dot-md--proof-well-ordering-principle--org0837e14\"\u003eassuming \u003ca href=\"/posts/kbhstrong_induction/\"\u003estrong induction\u003c/a\u003e, proof \u003ca href=\"#well-ordering-principle\"\u003ewell-ordering principle\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eAssume \\(S\\) has no smallest element. Create some \\(T = \\mathbb{N} \\setminus S\\). Now, \\(0 \\in T\\) because otherwise \\(0 \\in S\\) would be the smallest element. Now, consider \\(0, 1, \u0026hellip; n \\in T\\), we notice that \\(n+1\\) must be in \\(T\\) as well. By \u003ca href=\"/posts/kbhstrong_induction/\"\u003estrong induction\u003c/a\u003e, we have that \\(T = \\mathbb{N}\\) and \\(S\\) is empty.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprinciple_of_induction/","tags":null,"title":"principle of induction"},{"categories":null,"contents":"printf(\u0026#34;text %s\\n\u0026#34;, formatting, text, here); %s (string) %d (integer) %f (double) ","html":"\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-c\" data-lang=\"c\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eprintf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;text %s\u003c/span\u003e\u003cspan style=\"color:#8045ff\"\u003e\\n\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eformatting\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etext\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ehere\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cul\u003e\n\u003cli\u003e\u003ccode\u003e%s\u003c/code\u003e (string)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e%d\u003c/code\u003e (integer)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e%f\u003c/code\u003e (double)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhc_basic_operations/","tags":null,"title":"printf"},{"categories":null,"contents":"\u0026ldquo;privacy as an individual right\u0026rdquo;\nprivacy is a control of information: controlling our private information shared with others free choice with alternatives and informed understanding of what\u0026rsquo;s offered control over personal data collection and aggregation privacy as autonomy: your agency to decide for what\u0026rsquo;s valuable autonomy over our own lives, and our ability to lead them do you have agency? \u0026ldquo;privacy as a social group\u0026rdquo;\nprivacy as social good: social life would be severely compromised without privacy privacy allows social privacy as a display of trust: privacy enables trusting relationships \u0026ldquo;fiduciary\u0026rdquo;: proxy between you and a company \u0026ldquo;should anyone who has access to personal info have a fiduciary responsibility?\u0026rdquo; key trust questions who/what do we trust? what do we do if trust isn\u0026rsquo;t upheald? how to approach building trust trust trust: to stop questioning the responsibility of something\nintentions dependence extensions of agency We mostly don\u0026rsquo;t trust software; instead, we trust the people that developed the software.\naccountability a lot of people who are accountable in this chain:\nhardware designer (intel) OS developer (iOS, ec.) app developer users stakeholder direct stakeholders (people who are operating, technicians, etc.) indirect stakeholders: patients purchase = long-term support \u0026mdash;- what do you do to get it fixed/repaired.\ntime support duration obsolescence (how long is the end of support) products/services may not be garanteed forever problems with halting use\u0026mdash;requires deleting entire pentagram account meltdown vulnerability meltdown: hardware vulnerability that allows an user program to access kernel level pages of system memory.\npotential ways of fixing a vulnerability/violation of trust:\nhttps://www.cs.cmu.edu/~rdriley/487/papers/Thompson_1984_ReflectionsonTrustingTrust.pdf\nloss of privacy aggregation Through the loss of privacy, information can be piecemeal built up to understand somebody\u0026rsquo;s profile.\nexclusion Not knowing or understanding or control how our information being used.\nsecondary use Using information for purposes not intended without permission.\ntrust trust exposes people to the risk of being betrayed/let down. Differential privacy is used to anonomyze information. especially, for operation systems, each bug can have a massive impact because it impacts billions of users.\n\u0026ldquo;trust means to stop questioning the dependability of something; you become vulnerable to it\u0026rdquo;\ntrusting software is the task of extending your own AGENCY to a piece of software: \u0026ldquo;agential gullibility\u0026rdquo;.\nexamples:\nios bug: alams didn\u0026rsquo;t go off printnightmare: printing caused remote code execution 2017 admin access without password eternalblue (caused wannacry) key points trust between different stakeholders are intertwined trust is about extending agency trust emerges through various pathways we can design ways to partially substitute the need for trust pathways to trust trust by assumption trust absent any clues to warrent it due to timing trust because there is imminent danger trust by inference trust based on information you had before brands affiliation past performance trust in prior version of software trust by substitution trust something, but having a fallback plan trust a system because there would be a backup system protecting you scales of trust scale of impact a bug in an OS can be tremendously bad \u0026ldquo;root access\u0026rdquo; \u0026mdash; privileged aces scale of longevity people maybe on very very old OS it requires keeping older OSes secure against modern technologies ","html":"\u003cp\u003e\u0026ldquo;privacy as an individual right\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/\"\u003eprivacy\u003c/a\u003e is a control of information: controlling our private information shared with others\n\u003cul\u003e\n\u003cli\u003efree choice with alternatives and informed understanding of what\u0026rsquo;s offered\u003c/li\u003e\n\u003cli\u003econtrol over personal data collection and aggregation\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/\"\u003eprivacy\u003c/a\u003e as autonomy: your agency to decide for what\u0026rsquo;s valuable\n\u003cul\u003e\n\u003cli\u003eautonomy over our own lives, and our ability to lead them\u003c/li\u003e\n\u003cli\u003edo you have agency?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;privacy as a social group\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/\"\u003eprivacy\u003c/a\u003e as social good: social life would be severely compromised without privacy\n\u003cul\u003e\n\u003cli\u003eprivacy allows social\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/\"\u003eprivacy\u003c/a\u003e as a display of trust: privacy enables trusting relationships\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;fiduciary\u0026rdquo;: proxy between you and a company\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;should anyone who has access to personal info have a fiduciary responsibility?\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-trust-questions\"\u003ekey trust questions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewho/what do we trust?\u003c/li\u003e\n\u003cli\u003ewhat do we do if trust isn\u0026rsquo;t upheald?\u003c/li\u003e\n\u003cli\u003ehow to approach building trust\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"trust\"\u003etrust\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003etrust\u003c/strong\u003e: to stop questioning the responsibility of something\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eintentions\u003c/li\u003e\n\u003cli\u003edependence\u003c/li\u003e\n\u003cli\u003eextensions of agency\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe mostly don\u0026rsquo;t trust software; instead, we trust the people that developed the software.\u003c/p\u003e\n\u003ch2 id=\"accountability\"\u003eaccountability\u003c/h2\u003e\n\u003cp\u003ea lot of people who are accountable in this chain:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ehardware designer (intel)\u003c/li\u003e\n\u003cli\u003eOS developer (iOS, ec.)\u003c/li\u003e\n\u003cli\u003eapp developer\u003c/li\u003e\n\u003cli\u003eusers\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"stakeholder\"\u003estakeholder\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003edirect stakeholders\u003c/strong\u003e (people who are operating, technicians, etc.)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eindirect stakeholders\u003c/strong\u003e: patients\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003epurchase = long-term support \u0026mdash;- what do you do to get it fixed/repaired.\u003c/p\u003e\n\u003ch2 id=\"time\"\u003etime\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003esupport duration\u003c/li\u003e\n\u003cli\u003eobsolescence (how long is the end of support)\n\u003cul\u003e\n\u003cli\u003eproducts/services may not be garanteed forever\u003c/li\u003e\n\u003cli\u003eproblems with halting use\u0026mdash;requires deleting entire pentagram account\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"meltdown-vulnerability\"\u003emeltdown vulnerability\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003emeltdown\u003c/strong\u003e: hardware vulnerability that allows an user program to access kernel level pages of system memory.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003epotential ways of fixing a vulnerability/violation of trust\u003c/strong\u003e:\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://www.cs.cmu.edu/~rdriley/487/papers/Thompson_1984_ReflectionsonTrustingTrust.pdf\"\u003ehttps://www.cs.cmu.edu/~rdriley/487/papers/Thompson_1984_ReflectionsonTrustingTrust.pdf\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"loss-of-privacy\"\u003eloss of privacy\u003c/h2\u003e\n\u003ch3 id=\"aggregation\"\u003eaggregation\u003c/h3\u003e\n\u003cp\u003eThrough the loss of privacy, information can be piecemeal built up to understand somebody\u0026rsquo;s profile.\u003c/p\u003e\n\u003ch3 id=\"exclusion\"\u003eexclusion\u003c/h3\u003e\n\u003cp\u003eNot knowing or understanding or control how our information being used.\u003c/p\u003e\n\u003ch3 id=\"secondary-use\"\u003esecondary use\u003c/h3\u003e\n\u003cp\u003eUsing information for purposes not intended without permission.\u003c/p\u003e\n\u003ch2 id=\"trust\"\u003etrust\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#trust\"\u003etrust\u003c/a\u003e exposes people to the risk of being betrayed/let down. Differential privacy is used to anonomyze information. especially, for operation systems, each bug can have a massive impact because it impacts billions of users.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;\u003ca href=\"#trust\"\u003etrust\u003c/a\u003e means to stop questioning the dependability of something; you become vulnerable to it\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#trust\"\u003etrust\u003c/a\u003eing software is the task of extending your own \u003cstrong\u003eAGENCY\u003c/strong\u003e to a piece of software: \u0026ldquo;agential gullibility\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eexamples:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eios bug: alams didn\u0026rsquo;t go off\u003c/li\u003e\n\u003cli\u003eprintnightmare: printing caused remote code execution\u003c/li\u003e\n\u003cli\u003e2017 admin access without password\u003c/li\u003e\n\u003cli\u003eeternalblue (caused wannacry)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"key-points\"\u003ekey points\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003etrust between different stakeholders are intertwined\u003c/li\u003e\n\u003cli\u003etrust is about extending agency\u003c/li\u003e\n\u003cli\u003etrust emerges through \u003ca href=\"#pathways-to-trust\"\u003evarious pathways\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe can design ways to partially substitute the need for trust\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"pathways-to-trust\"\u003epathways to trust\u003c/h3\u003e\n\u003ch4 id=\"trust-by-assumption\"\u003etrust by assumption\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003etrust absent any clues to warrent it due to timing\u003c/li\u003e\n\u003cli\u003etrust because there is imminent danger\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"trust-by-inference\"\u003etrust by inference\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003etrust based on information you had before\n\u003cul\u003e\n\u003cli\u003ebrands\u003c/li\u003e\n\u003cli\u003eaffiliation\u003c/li\u003e\n\u003cli\u003epast performance\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003etrust in prior version of software\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"trust-by-substitution\"\u003etrust by substitution\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003etrust something, but having a fallback plan\u003c/li\u003e\n\u003cli\u003etrust a system because there would be a backup system protecting you\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"scales-of-trust\"\u003escales of trust\u003c/h3\u003e\n\u003ch4 id=\"scale-of-impact\"\u003escale of impact\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003ea bug in an OS can be tremendously bad\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;root access\u0026rdquo; \u0026mdash; privileged aces\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"scale-of-longevity\"\u003escale of longevity\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003epeople maybe on very very old OS\u003c/li\u003e\n\u003cli\u003eit requires keeping older OSes secure against modern technologies\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprivacy/","tags":null,"title":"privacy"},{"categories":null,"contents":"probability of an event is the proportion of times the event occurs in many repeated trials. It is \u0026ldquo;our belief that an event \\(E\\) occurs\u0026rdquo;.\nFrequentist Definition of Probability That is, it is a number between \\(0-1\\). Whereby:\n\\begin{equation} P(E) = \\lim_{n \\to \\infty} \\frac{n(E)}{n} \\end{equation}\n\u0026ldquo;frequentist definition of probability\u0026rdquo;\nprobability is the ratio between the number of times \\(E\\) occurring \\(n(E)\\) divided by the number of times you did the thing \\(n\\). This system converge because of the law of large numbers.\nuncertainty and probability Say you are training some kind of model. When it says \\(0.8\\) for motorcycle, its not that there are \\(80\\%\\) chance that there\u0026rsquo;s a motorcycle there. Its that the model is \\(80\\%\\) confident that there\u0026rsquo;s a motorcycle.\nProbability can not only represent the world, but our understanding of the world\naxiom of probability \\(0 \\leq P(E) \\leq 1\\) \\(P(S) = 1\\), where \\(S\\) is the sample space if \\(E\\) and \\(F\\) are mutually exclusive, \\(P(E) + P(F) = P(E \\cup F)\\) This last axiom can be chained\nThis results in three correlaries:\n\\(P(E^{C}) = 1- P(E)\\) Proof: We know that \\(E^{C}, E\\) are mutually exclusive.\n\\begin{equation} P(E^{C} \\cup E) = P(E) + P(E^{C}) \\end{equation}\nNow, recall the fact that something happening OR not happening is \\(1\\).\nSo we have:\n\\(P(E \\cup F) = P(E) + P(F) - P(E \\cap F)\\) if \\(E \\subset F\\), \\(P(E) \\leq P(F)\\) conditional probability \u0026ldquo;What is the new belief that something \\(E\\) happened, conditioned upon the fact that we know that \\(F\\) already happened.\u0026rdquo;\nWritten as: \\(P(E|F)\\).\nFurthermore, we have:\n\\begin{equation} P (X, Y) = P(X\\mid Y) \\cdot P(Y) \\end{equation}\nIn this case, we call \\(Y\\) the \u0026ldquo;evidence\u0026rdquo;. this allows us to find \u0026ldquo;what is the chance of \\(x\\) given \\(y\\)\u0026rdquo;.\nWe can continue this to develop the probability chain rule:\n\\begin{equation} P(A_1, A_2 \\dots, A_{n}) = P(A_{n} \\mid A_1, A_2 \\dots A_{n-1})P(A_1, A_2 \\dots A_{n-1}) \\end{equation}\nand so:\n\\begin{equation} P(E_1) \\cdot P(E_2 | E_1) \\cdot E(E_3 | E_1E_2) \\cdot P(E_4 | E_1E_2E_3) \\cdot \\dots \\cdot \\end{equation}\nand so on.\nIf you are performing the chain rule on something that\u0026rsquo;s already conditioned:\n\\begin{equation} P(X,Y|A) \\end{equation}\nyou can break it up just remembering that \\(A\\) needs to be preserved as a condition, so:\n\\begin{equation} P(X,Y|A) = P(X|Y,A) P(Y|A) \\end{equation}\nNow:\n\\begin{equation} \\sum_{x}^{} p(x \\mid y) = 1 \\end{equation}\nbecause this is still a probability over \\(x\\).\nlaw of total probability say you have two variables \\(x, y\\).\n\u0026ldquo;what\u0026rsquo;s the probablity of \\(x\\)\u0026rdquo;\n\\begin{equation} P(x) = \\sum_{Y} P(x,y) \\end{equation}\na.k.a.:\n\\begin{equation} p(x) = p(x|y_1)p(y_1) + \\dots + p(x|y_{n})y_{n} \\end{equation}\nby applying conditional probability formula upon each term\nThis is because:\n\\begin{align} p(x) \u0026amp;= p(x|y_1)p(y_1) + \\dots + p(x|y_{n})y_{n} \\\\ \u0026amp;= p(x, y_1) + \\dots + p(x, y_{n}) \\end{align}\nIf its not conditional, it holds too:\n\\begin{equation} p(AB^{C}) + p(AB) \\end{equation}\nBayes rule See: Bayes Theorem\nindependence If \\(X\\) and \\(Y\\) are independent (written as \\(X \\perp Y\\)), we know that \\(P(x,y) = P(x)P(y)\\) for all \\(x, y\\).\nFormally:\n\\begin{equation} P(A) = P(A|B) \\end{equation}\nif \\(A\\) and \\(B\\) is independent. That is, \\(P(AB) = P(A) \\cdot P(B)\\). You can check either of these statements (the latter is usually easier).\nIndependence is bidirectional. If \\(A\\) is independent of \\(B\\), then \\(B\\) is independent of \\(A\\). To show this, invoke the Bayes Theorem.\nThis is generalized:\n\\begin{equation} P(x_1, \\dots, x_n) = P(x_1) \\dots p(x_{n}) \\end{equation}\nand this tells us that subset of \\(x_{j}\\) is independent against each other.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of an event is the proportion of times the event occurs in many repeated trials. It is \u0026ldquo;our belief that an event \\(E\\) occurs\u0026rdquo;.\u003c/p\u003e\n\u003ch2 id=\"frequentist-definition-of-probability\"\u003eFrequentist Definition of Probability\u003c/h2\u003e\n\u003cp\u003eThat is, it is a number between \\(0-1\\). Whereby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(E) = \\lim_{n \\to \\infty} \\frac{n(E)}{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;frequentist definition of probability\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eprobability is the ratio between the number of times \\(E\\) occurring \\(n(E)\\) divided by the number of times you did the thing \\(n\\). This system converge because of the \u003ca href=\"/posts/kbhlaw_of_large_numbers/\"\u003elaw of large numbers\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"uncertainty--kbhuncertainty-dot-md--and-probability--kbhprobability-dot-md\"\u003e\u003ca href=\"/posts/kbhuncertainty/\"\u003euncertainty\u003c/a\u003e and \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eSay you are training some kind of model. When it says \\(0.8\\) for motorcycle, its not that there are \\(80\\%\\) chance that there\u0026rsquo;s a motorcycle there. Its that the model is \\(80\\%\\) confident that there\u0026rsquo;s a motorcycle.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eProbability can not only represent the world, but our understanding of the world\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"axiom-of-probability\"\u003eaxiom of probability\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(0 \\leq P(E) \\leq 1\\)\u003c/li\u003e\n\u003cli\u003e\\(P(S) = 1\\), where \\(S\\) is the \u003ca href=\"/posts/kbhsample_space/\"\u003esample space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eif \\(E\\) and \\(F\\) are mutually exclusive, \\(P(E) + P(F) = P(E \\cup F)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThis last axiom can be chained\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eThis results in three correlaries:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(P(E^{C}) = 1- P(E)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eProof:\nWe know that \\(E^{C}, E\\) are mutually exclusive.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(E^{C} \\cup E) = P(E) + P(E^{C})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, recall the fact that something happening OR not happening is \\(1\\).\u003c/p\u003e\n\u003cp\u003eSo we have:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(P(E \\cup F) = P(E) + P(F) - P(E \\cap F)\\)\u003c/li\u003e\n\u003cli\u003eif \\(E \\subset F\\), \\(P(E) \\leq P(F)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"conditional-probability\"\u003econditional probability\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;What is the new belief that something \\(E\\) happened, conditioned upon the fact that we know that \\(F\\) already happened.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWritten as: \\(P(E|F)\\).\u003c/p\u003e\n\u003cp\u003eFurthermore, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP (X, Y) = P(X\\mid Y) \\cdot P(Y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn this case, we call \\(Y\\) the \u0026ldquo;evidence\u0026rdquo;. this allows us to find \u0026ldquo;what is the chance of \\(x\\) given \\(y\\)\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eWe can continue this to develop the \u003ca href=\"#conditional-probability\"\u003eprobability chain rule\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(A_1, A_2 \\dots, A_{n}) = P(A_{n} \\mid A_1, A_2 \\dots A_{n-1})P(A_1, A_2 \\dots A_{n-1})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(E_1) \\cdot P(E_2 | E_1) \\cdot E(E_3 | E_1E_2) \\cdot P(E_4 | E_1E_2E_3) \\cdot \\dots \\cdot\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand so on.\u003c/p\u003e\n\u003cp\u003eIf you are performing the chain rule on something that\u0026rsquo;s already conditioned:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X,Y|A)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou can break it up just remembering that \\(A\\) needs to be preserved as a condition, so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X,Y|A) = P(X|Y,A) P(Y|A)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{x}^{} p(x \\mid y) = 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause this is \u003cstrong\u003estill\u003c/strong\u003e a probability over \\(x\\).\u003c/p\u003e\n\u003ch2 id=\"law-of-total-probability\"\u003elaw of total probability\u003c/h2\u003e\n\u003cp\u003esay you have two variables \\(x, y\\).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;what\u0026rsquo;s the probablity of \\(x\\)\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(x) = \\sum_{Y} P(x,y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ea.k.a.:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(x) = p(x|y_1)p(y_1) + \\dots + p(x|y_{n})y_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eby applying \u003ca href=\"#conditional-probability\"\u003econditional probability\u003c/a\u003e formula upon each term\u003c/p\u003e\n\u003cp\u003eThis is because:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\np(x) \u0026amp;= p(x|y_1)p(y_1) + \\dots + p(x|y_{n})y_{n} \\\\\n\u0026amp;= p(x, y_1) + \\dots + p(x, y_{n})\n\\end{align}\u003c/p\u003e\n\u003cp\u003eIf its not conditional, it holds too:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(AB^{C}) + p(AB)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"bayes-rule\"\u003eBayes rule\u003c/h2\u003e\n\u003cp\u003eSee: \u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes Theorem\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"independence\"\u003eindependence\u003c/h2\u003e\n\u003cp\u003eIf \\(X\\) and \\(Y\\) are independent (written as \\(X \\perp Y\\)), we know that \\(P(x,y) = P(x)P(y)\\) for all \\(x, y\\).\u003c/p\u003e\n\u003cp\u003eFormally:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(A) = P(A|B)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif \\(A\\) and \\(B\\) is \u003ca href=\"#independence\"\u003eindependent\u003c/a\u003e. That is, \\(P(AB) = P(A) \\cdot P(B)\\). You can check either of these statements (the latter is usually easier).\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#independence\"\u003eIndependence\u003c/a\u003e is bidirectional. If \\(A\\) is independent of \\(B\\), then \\(B\\) is independent of \\(A\\). To show this, invoke the \u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes Theorem\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThis is generalized:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(x_1, \\dots, x_n) = P(x_1) \\dots p(x_{n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand this tells us that subset of \\(x_{j}\\) is independent against each other.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprobability/","tags":null,"title":"probability"},{"categories":null,"contents":"probability distributions \u0026ldquo;assigns probability to outcomes\u0026rdquo;\n\\(X\\) follows distribution \\(D\\). \\(X\\) is a \u0026ldquo;\\(D\\) random variable\u0026rdquo;, where \\(D\\) is some distribution (normal, gaussian, etc.)\nsyntax: \\(X \\sim D\\).\nEach distribution has three properties:\nvariables (what is being modeled) values (what values can they take on) parameters (how many degrees of freedom do we have) Methods of Compressing the Parameters of a Distribution So, for instance, for a binary distribution with \\(n\\) variables which we know nothing about, we have:\n\\begin{equation} 2^{n} - 1 \\end{equation}\nparameters (\\(2^{n}\\) different possibilities of combinations, and \\(1\\) non-free variables to ensure that the distribution add up)\nassuming independence HOWEVER, if the variables were independent, this becomes much easier. Because the variables are independent, we can claim that:\n\\begin{equation} p(x_{1\\dots n}) = \\prod_{i}^{} p(x_{i)) \\end{equation}\ndecision tree For instance, you can have a decision tree which you selectively ignore some combinations.\nIn this case, we ignored \\(z\\) if both \\(x\\) and \\(y\\) are \\(0\\).\nBaysian networks see Baysian Network\ntypes of probability distributions discrete distribution continuous distribution joint probability distribution distribution of note uniform distribution gaussian distributions Gaussian distribution Truncated Gaussian distribution Gaussian mixture model uniform distribution \\begin{equation} X \\sim Uni(\\alpha, \\beta) \\end{equation}\n\\begin{equation} f(x) = \\begin{cases} \\frac{1}{\\beta -\\alpha }, 0\\leq x \\leq 10 \\\\0 \\end{cases} \\end{equation}\n\\begin{equation} E[x] = \\frac{1}{2}(\\alpha +\\beta) \\end{equation}\n\\begin{equation} Var(X) = \\frac{1}{12}(\\beta -\\alpha )^{2} \\end{equation}\nGaussian Things Truncated Gaussian distribution Sometimes, we don\u0026rsquo;t want to use a Gaussian distribution for values above or below a threshold (say if they are physically impossible). In those cases, we have some:\n\\begin{equation} X \\sim N(\\mu, \\sigma^{2}, a, b) \\end{equation}\nbounded within the interval of \\((a,b)\\). The PDF of this function is given by:\n\\begin{equation} N(\\mu, \\sigma^{2}, a, b) = \\frac{\\frac{1}{\\sigma} \\phi \\qty(\\frac{x-\\mu }{\\sigma })}{\\Phi \\qty(\\frac{b-\\mu }{\\sigma }) - \\Phi \\qty(\\frac{a-\\mu}{\\sigma})} \\end{equation}\nwhere:\n\\begin{equation} \\Phi = \\int_{-\\infty}^{x} \\phi (x\u0026rsquo;) \\dd{x\u0026rsquo;} \\end{equation}\nand where \\(\\phi\\) is the standard normal density function.\nGaussian mixture model Gaussian models are typically unimodal, meaning they have one peak (things decrease to the left of that peak, increases to the right of it).\nTherefore, in order to model something more complex with multiple peaks, we just weighted average multiple gaussian models\n\\begin{equation} p(x | \\dots ) = \\sum_{i-1}^{n}p_i \\mathcal{N}(x | u_{i}, {\\sigma_{i}}^{2}) \\end{equation}\nwhereby,\nthree ways of analysis probability density function PDFs is a function that maps continuous random variables to the corresponding probability.\n\\begin{equation} P(a \u0026lt; X \u0026lt; b) = \\int_{x=a}^{b} f(X=x)\\dd{x} \\end{equation}\nnote: \\(f\\) is no longer in units of probability!!! it is in units of probability scaled by units of \\(X\\). That is, they are DERIVATIVES of probabilities. That is, the units of \\(f\\) should be \\(\\frac{prob}{unit\\ X}\\). So, it can be greater than \\(1\\).\nWe have two important properties:\nif you integrate over any bounds over a probability density function, you get a probability if you integrate over infinity, the result should be \\(1\\) getting exact values from PDF There is a calculus definition for \\(P(X=x)\\), if absolutely needed:\n\\begin{equation} P(X=x) = \\epsilon f(x) \\end{equation}\nmixing discrete and continuous random variables\nLet\u0026rsquo;s say \\(X\\) is continuous, and \\(N\\) is discrete.\nWe desire:\n\\begin{equation} P(N=n|X=x) = \\frac{P(X=x|N=n)P(N=n)}{P(X=x)} \\end{equation}\nnow, to get a specific value for \\(P(X=x)\\), we can just multiply its PMF by a small epsilon:\n\\begin{align} P(N=n|X=x) \u0026amp;= \\lim_{\\epsilon \\to 0} \\frac{\\epsilon f(X=x|N=n)P(N=n)}{\\epsilon f(X=x)} \\\\ \u0026amp;= \\frac{f(X=x|N=n)P(N=n)}{f(X=x)} \\end{align}\nthis same trick works pretty much everywhere\u0026mdash;whenever we need to get the probability of a continuous random variable with\ncumulative distribution function What is the probability that a random variable takes on value less tha\n\\begin{equation} cdf_{x}(x) = P(X\u0026lt;x) = \\int_{-\\infty}^{x} p(x\u0026rsquo;) dx' \\end{equation}\nsometimes written as:\n\\begin{equation} F(x) = P(X \u0026lt; x) \\end{equation}\nRecall that, with\nquantile function \\begin{equation} \\text{quantile}_{X}(\\alpha) \\end{equation}\nis the value \\(x\\) such that:\n\\begin{equation} P(X \\leq x) = \\alpha \\end{equation}\nThat is, the quantile function returns the minimum value of \\(x\\) at which point a certain cumulative distribution value desired is achieved.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhprobability_distributions/\"\u003eprobability distributions\u003c/a\u003e \u0026ldquo;assigns probability to outcomes\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\(X\\) follows distribution \\(D\\). \\(X\\) is a \u0026ldquo;\\(D\\) random variable\u0026rdquo;, where \\(D\\) is some distribution (\u003ca href=\"/posts/kbhaxler_7_a/#normal\"\u003enormal\u003c/a\u003e, gaussian, etc.)\u003c/p\u003e\n\u003cp\u003esyntax: \\(X \\sim D\\).\u003c/p\u003e\n\u003cp\u003eEach distribution has three properties:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003evariables (what is being modeled)\u003c/li\u003e\n\u003cli\u003evalues (what values can they take on)\u003c/li\u003e\n\u003cli\u003eparameters (how many degrees of freedom do we have)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"methods-of-compressing-the-parameters-of-a-distribution\"\u003eMethods of Compressing the Parameters of a Distribution\u003c/h2\u003e\n\u003cp\u003eSo, for instance, for a binary distribution with \\(n\\) variables which we know nothing about, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n2^{n} - 1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eparameters (\\(2^{n}\\) different possibilities of combinations, and \\(1\\) non-free variables to ensure that the distribution add up)\u003c/p\u003e\n\u003ch3 id=\"assuming-independence\"\u003eassuming independence\u003c/h3\u003e\n\u003cp\u003eHOWEVER, if the variables were \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e, this becomes much easier. Because the variables are independent, we can claim that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(x_{1\\dots n}) = \\prod_{i}^{} p(x_{i))\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"decision-tree\"\u003edecision tree\u003c/h3\u003e\n\u003cp\u003eFor instance, you can have a decision tree which you selectively ignore some combinations.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-09-28_10-13-07_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eIn this case, we ignored \\(z\\) if both \\(x\\) and \\(y\\) are \\(0\\).\u003c/p\u003e\n\u003ch3 id=\"baysian-networks\"\u003eBaysian networks\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"types-of-probability-distributions\"\u003etypes of probability distributions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiscrete_distribution/\"\u003ediscrete distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcontinuous_distribution/\"\u003econtinuous distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"distribution-of-note\"\u003edistribution of note\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"#uniform-distribution\"\u003euniform distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003egaussian distributions\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgaussian_distribution/\"\u003eGaussian distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#truncated-gaussian-distribution\"\u003eTruncated Gaussian distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#gaussian-mixture-model\"\u003eGaussian mixture model\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"uniform-distribution\"\u003euniform distribution\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nX \\sim Uni(\\alpha, \\beta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\begin{cases}\n\\frac{1}{\\beta -\\alpha }, 0\\leq x \\leq 10 \\\\0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE[x] = \\frac{1}{2}(\\alpha +\\beta)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nVar(X) = \\frac{1}{12}(\\beta -\\alpha )^{2}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"gaussian-things\"\u003eGaussian Things\u003c/h3\u003e\n\u003ch4 id=\"truncated-gaussian-distribution\"\u003eTruncated Gaussian distribution\u003c/h4\u003e\n\u003cp\u003eSometimes, we don\u0026rsquo;t want to use a \u003ca href=\"/posts/kbhgaussian_distribution/\"\u003eGaussian distribution\u003c/a\u003e for values above or below a threshold (say if they are physically impossible). In those cases, we have some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX \\sim N(\\mu, \\sigma^{2}, a, b)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebounded within the interval of \\((a,b)\\). The \u003ca href=\"#probability-density-function\"\u003ePDF\u003c/a\u003e of this function is given by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nN(\\mu, \\sigma^{2}, a, b) = \\frac{\\frac{1}{\\sigma} \\phi \\qty(\\frac{x-\\mu }{\\sigma })}{\\Phi \\qty(\\frac{b-\\mu }{\\sigma }) - \\Phi \\qty(\\frac{a-\\mu}{\\sigma})}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Phi = \\int_{-\\infty}^{x} \\phi (x\u0026rsquo;) \\dd{x\u0026rsquo;}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand where \\(\\phi\\) is the \u003ca href=\"/posts/kbhgaussian_distribution/#standard-normal-density-function\"\u003estandard normal density function\u003c/a\u003e.\u003c/p\u003e\n\u003ch4 id=\"gaussian-mixture-model\"\u003eGaussian mixture model\u003c/h4\u003e\n\u003cp\u003eGaussian models are typically \u003ca href=\"/posts/kbhunimodal/\"\u003eunimodal\u003c/a\u003e, meaning they have one peak (things decrease to the left of that peak, increases to the right of it).\u003c/p\u003e\n\u003cp\u003eTherefore, in order to model something more complex with multiple peaks, we just weighted average multiple gaussian models\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np(x | \\dots ) = \\sum_{i-1}^{n}p_i \\mathcal{N}(x | u_{i}, {\\sigma_{i}}^{2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby,\u003c/p\u003e\n\u003ch2 id=\"three-ways-of-analysis\"\u003ethree ways of analysis\u003c/h2\u003e\n\u003ch3 id=\"probability-density-function\"\u003eprobability density function\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#probability-density-function\"\u003ePDF\u003c/a\u003es is a function that maps continuous random variables to the corresponding probability.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(a \u0026lt; X \u0026lt; b) = \\int_{x=a}^{b} f(X=x)\\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enote: \\(f\\) is no longer in units of \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e!!! it is in units of \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e scaled by units of \\(X\\). That is, they are DERIVATIVES of probabilities. That is, the units of \\(f\\) should be \\(\\frac{prob}{unit\\ X}\\). So, it can be greater than \\(1\\).\u003c/p\u003e\n\u003cp\u003eWe have two important properties:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eif you integrate over any bounds over a \u003ca href=\"#probability-density-function\"\u003eprobability density function\u003c/a\u003e, you get a \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eif you integrate over infinity, the result should be \\(1\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"getting-exact-values-from-pdf--orge11a5fa\"\u003egetting exact values from \u003ca href=\"#probability-density-function\"\u003ePDF\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eThere is a calculus definition for \\(P(X=x)\\), if absolutely needed:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X=x) = \\epsilon f(x)\n\\end{equation}\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003emixing discrete and continuous random variables\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s say \\(X\\) is continuous, and \\(N\\) is discrete.\u003c/p\u003e\n\u003cp\u003eWe desire:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(N=n|X=x) = \\frac{P(X=x|N=n)P(N=n)}{P(X=x)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enow, to get a specific value for \\(P(X=x)\\), we can just multiply its \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003e by a small epsilon:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nP(N=n|X=x) \u0026amp;= \\lim_{\\epsilon \\to 0} \\frac{\\epsilon f(X=x|N=n)P(N=n)}{\\epsilon f(X=x)} \\\\\n\u0026amp;= \\frac{f(X=x|N=n)P(N=n)}{f(X=x)}\n\\end{align}\u003c/p\u003e\n\u003cp\u003ethis same trick works pretty much everywhere\u0026mdash;whenever we need to get the probability of a continuous random variable with\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"cumulative-distribution-function\"\u003ecumulative distribution function\u003c/h3\u003e\n\u003cp\u003eWhat is the probability that a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e takes on value less tha\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ncdf_{x}(x) = P(X\u0026lt;x) = \\int_{-\\infty}^{x} p(x\u0026rsquo;) dx'\n\\end{equation}\u003c/p\u003e\n\u003cp\u003esometimes written as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nF(x) = P(X \u0026lt; x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that, with\u003c/p\u003e\n\u003ch3 id=\"quantile-function\"\u003equantile function\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\text{quantile}_{X}(\\alpha)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis the value \\(x\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X \\leq x) = \\alpha\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThat is, the \u003ca href=\"#quantile-function\"\u003equantile function\u003c/a\u003e returns the minimum value of \\(x\\) at which point a certain \u003ca href=\"#cumulative-distribution-function\"\u003ecumulative distribution\u003c/a\u003e value desired is achieved.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprobability_distributions/","tags":null,"title":"probability distribution"},{"categories":null,"contents":"PMF is a function that maps possible outcomes of a discrete random variables to the corresponding probability.\nFor random variable \\(Y\\), we have:\n\\begin{equation} f(k) = P(Y=k) \\end{equation}\nand \\(f\\) is a function that is the PMF, which is the mapping between a random variable and a value it takes on to the probability that the random variable takes on that value.\nShorthand \\begin{equation} P(Y=k) = p(y), where\\ y=k \\end{equation}\nits written smaller \\(y\\) represents a case of \\(Y\\) where \\(Y=y\\).\nShorthand For this to be correct, we have to\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003e is a function that maps possible outcomes of a discrete \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es to the corresponding probability.\u003c/p\u003e\n\u003cp\u003eFor random variable \\(Y\\), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(k) = P(Y=k)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand \\(f\\) is a function that is the \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003e, which is the mapping between a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e and a value it takes on to the probability that the \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e takes on that value.\u003c/p\u003e\n\u003ch2 id=\"shorthand\"\u003eShorthand\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nP(Y=k) = p(y), where\\ y=k\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eits written smaller \\(y\\) represents a \u003cem\u003ecase\u003c/em\u003e of \\(Y\\) where \\(Y=y\\).\u003c/p\u003e\n\u003ch2 id=\"shorthand\"\u003eShorthand\u003c/h2\u003e\n\u003cp\u003eFor this to be correct, we have to\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprobability_mass_function/","tags":null,"title":"probability mass function"},{"categories":null,"contents":"multinomial distribution A probability distribution to model specific outcomes like a binomial distribution but for multiple variables.\nlike binomial distribution, we have to assume independence and same probability per trial.\n\u0026ldquo;what\u0026rsquo;s the probability that you get some set of assignments xj=nj\u0026rdquo;:\n\\begin{equation} P(X_1=c_1, X_2=c_2, \\dots, X_{m}=c_{m}) = {n \\choose c_1, c_2, \\dots, c_{m} } p_{1}^{c_1} \\cdot \\dots \\cdot p_{m}^{c_{m}} \\end{equation}\nwhere the big choose is a multinomial coefficient, and \\(n\\) is the number of different outcomes, and \\(p_{j}\\) is the probably of the $j$th outcome.\nIMPORTANT: \\(\\sum_{j=0}^{m} c_{j} = n\\): that is, you MUST provide an assignment for each type of outcome.\n","html":"\u003ch2 id=\"multinomial-distribution\"\u003emultinomial distribution\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e distribution to model specific outcomes like a \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e but for multiple variables.\u003c/p\u003e\n\u003cp\u003elike \u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e, we have to assume independence and same probability per trial.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;what\u0026rsquo;s the probability that you get some set of assignments xj=nj\u0026rdquo;:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X_1=c_1, X_2=c_2, \\dots, X_{m}=c_{m}) = {n \\choose c_1, c_2, \\dots, c_{m} } p_{1}^{c_1} \\cdot \\dots \\cdot p_{m}^{c_{m}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere the big choose is a \u003ca href=\"/posts/kbhmultinomial_coefficient/\"\u003emultinomial coefficient\u003c/a\u003e, and \\(n\\) is the number of different outcomes, and \\(p_{j}\\) is the probably of the $j$th outcome.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eIMPORTANT\u003c/strong\u003e\u003c/strong\u003e: \\(\\sum_{j=0}^{m} c_{j} = n\\): that is, you MUST provide an assignment for each type of outcome.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprobablistic_model/","tags":null,"title":"probablistic models"},{"categories":null,"contents":"gravity sucks.\ngeneral relativity claims that our best theory of how gravity work does not work with non-\n","html":"\u003cp\u003egravity sucks.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhgeneral_relativity/\"\u003egeneral relativity\u003c/a\u003e claims that our best theory of how gravity work does not work with non-\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproblem_with_gravity/","tags":null,"title":"problem with gravity"},{"categories":null,"contents":"Each process is controlled by a struct which contain information about the process.\nmemory used by the process file descriptor table thread state other accounting file descriptor table Within each process, we have a file descriptor table (and the ints we get are indicies into this table), for which each entry stores points to the open file table.\nWhen a process forks, the child doesn\u0026rsquo;t get more open file entries, instead, we simply clone the file descriptor table (i.e. parent and child will share the same underlying open file table entries); this is how we can share pipes.\nThis is why you need to CLOSE all open file descriptors once every PROCESS, including forked child.\nthread state Recall that threads are the unit of execution. The process control block keeps track of the *stack pointer* of the thread %rsp, which means if a thread is put to sleep the state can be stored somewhere on the stack.\nrunning blockde - waiting for an event like disk, network, etc. ready - able to run, but not on CPU yet IO vs. CPU bound I/O Bound Thread is a thread that needs to wait for disk events, and don\u0026rsquo;t need CPU that much CPU Thread is a thread that needs CPU time ","html":"\u003cp\u003eEach \u003ca href=\"/posts/kbhmultiprocessing/#process\"\u003eprocess\u003c/a\u003e is controlled by a struct which contain information about the process.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ememory used by the process\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#file-descriptor-table\"\u003efile descriptor table\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#thread--kbhmultithreading-dot-md--state\"\u003ethread state\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eother accounting\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"file-descriptor-table\"\u003efile descriptor table\u003c/h2\u003e\n\u003cp\u003eWithin each process, we have a \u003ca href=\"/posts/kbhsyscalls/#file-descriptor\"\u003efile descriptor\u003c/a\u003e table (and the ints we get are indicies into this table), for which each entry stores points to the \u003ca href=\"/posts/kbhmultiprocessing/#open-file-table\"\u003eopen file table\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eWhen a process forks, the child doesn\u0026rsquo;t get more open file entries, instead, we simply clone the \u003ca href=\"/posts/kbhsyscalls/#file-descriptor\"\u003efile descriptor\u003c/a\u003e table (i.e. parent and child will share the same underlying \u003ca href=\"/posts/kbhmultiprocessing/#open-file-table\"\u003eopen file table\u003c/a\u003e entries); this is how we can share pipes.\u003c/p\u003e\n\u003cp\u003eThis is why you need to \u003cstrong\u003eCLOSE\u003c/strong\u003e all open file descriptors once every \u003cstrong\u003ePROCESS\u003c/strong\u003e, including forked child.\u003c/p\u003e\n\u003ch2 id=\"thread--kbhmultithreading-dot-md--state\"\u003e\u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003e state\u003c/h2\u003e\n\u003cp\u003eRecall that \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003es are the \u003cstrong\u003eunit of execution\u003c/strong\u003e. The \u003ca href=\"/posts/kbhprocess_control_block/\"\u003eprocess control block\u003c/a\u003e keeps track of the \u003ca href=\"/posts/kbhassembly/#stack-pointer\"\u003e*stack pointer\u003c/a\u003e* of the thread \u003ccode\u003e%rsp\u003c/code\u003e, which means if a thread is put to sleep the state can be stored somewhere on the stack.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003erunning\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eblockde\u003c/strong\u003e - waiting for an event like disk, network, etc.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eready\u003c/strong\u003e - able to run, but not on CPU yet\u003c/li\u003e\n\u003c/ol\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-21_13-50-23_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"io-vs-dot-cpu-bound\"\u003eIO vs. CPU bound\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"#io-vs-dot-cpu-bound\"\u003eI/O Bound Thread\u003c/a\u003e is a thread that needs to wait for disk events, and don\u0026rsquo;t need CPU that much\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#io-vs-dot-cpu-bound\"\u003eCPU Thread\u003c/a\u003e is a thread that needs CPU time\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprocess_control_block/","tags":null,"title":"process control block"},{"categories":null,"contents":"Take two linear maps \\(T \\in \\mathcal{L}(U,V)\\) and \\(S \\in \\mathcal{L}(V,W)\\), then \\(ST \\in \\mathcal{L}(U,W)\\) is defined by:\n\\begin{equation} (ST)(u) = S(Tu) \\end{equation}\nIndeed the \u0026ldquo;product\u0026rdquo; of Linear Maps is just function composition. Of course, \\(ST\\) is defined only when \\(T\\) maps to something in the domain of \\(S\\).\nThe following there properties hold on linear-map products (note that commutativity isn\u0026rsquo;t one of them!):\nassociativity \\begin{equation} (T_1T_2)T_3 = T_1(T_2T_3) \\end{equation}\nidentity \\begin{equation} TI = IT = T \\end{equation}\nfor \\(T \\in \\mathcal{L}(V,W)\\) and \\(I \\in \\mathcal{L}(V,V)\\) (OR \\(I \\in \\mathcal{L}(W,W)\\) depending on the order) is the identity map in \\(V\\).\nidentity commutes, as always.\ndistributive in both directions\u0026mdash;\n\\begin{equation} (S_1+S_2)T = S_1T + S_2T \\end{equation}\nand\n\\begin{equation} S(T_1+T_2) = ST_{1}+ST_{2} \\end{equation}\n","html":"\u003cp\u003eTake two linear maps \\(T \\in \\mathcal{L}(U,V)\\) and \\(S \\in \\mathcal{L}(V,W)\\), then \\(ST \\in \\mathcal{L}(U,W)\\) is defined by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(ST)(u) = S(Tu)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIndeed the \u0026ldquo;product\u0026rdquo; of \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es is just function composition. Of course, \\(ST\\) is defined only when \\(T\\) maps to something in the domain of \\(S\\).\u003c/p\u003e\n\u003cp\u003eThe following there properties hold on linear-map products (\u003cem\u003enote that commutativity isn\u0026rsquo;t one of them!\u003c/em\u003e):\u003c/p\u003e\n\u003ch2 id=\"associativity\"\u003eassociativity\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n(T_1T_2)T_3 = T_1(T_2T_3)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"identity\"\u003eidentity\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nTI = IT = T\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor \\(T \\in \\mathcal{L}(V,W)\\) and \\(I \\in \\mathcal{L}(V,V)\\) (OR \\(I \\in \\mathcal{L}(W,W)\\) depending on the order) is the \u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e map in \\(V\\).\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhidentity/\"\u003eidentity\u003c/a\u003e commutes, as always.\u003c/p\u003e\n\u003ch2 id=\"distributive\"\u003edistributive\u003c/h2\u003e\n\u003cp\u003ein both directions\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(S_1+S_2)T = S_1T + S_2T\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS(T_1+T_2) = ST_{1}+ST_{2}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproduct_of_linear_maps/","tags":null,"title":"Product of Linear Maps"},{"categories":null,"contents":"A product of vector spaces is a vector space formed by putting an element from each space into an element of the vector.\nconstituents Suppose \\(V_1 \\dots V_{m}\\) are vector spaces over the same field \\(\\mathbb{F}\\)\nrequirements Product between \\(V_1 \\dots V_{m}\\) is defined:\n\\begin{equation} V_1 \\times \\dots \\times V_{m} = \\{(v_1, \\dots, v_{m}): v_1 \\in V_1 \\dots v_{m} \\in V_{m}\\} \\end{equation}\n\u0026ldquo;chain an element from each space into another vector\u0026rdquo;\nadditional information operations on Product of Vector Spaces The operations on the product of vector spaces are defined in the usual way.\nAddition: \\((u_1, \\dots, u_{m})+(v_1, \\dots, v_{m}) = (u_1+v_1, \\dots, u_{m}+v_{m})\\)\nScalar multiplication: \\(\\lambda (v_1 \\dots v_{m}) = (\\lambda v_1, \\dots, \\lambda v_{m})\\)\nProduct of Vector Spaces is a vector space The operations defined above inherits closure from their respective vector spaces.\nadditive identity: \\((0, \\dots, 0)\\), taking the zero from each vector space additive inverse: \\((-v_1, \\dots, -v_{m})\\), taking the additive inverse from each vector space scalar multiplicative identity: \\(1\\) operations: commutativity, associativity, distributivity \u0026mdash; inheriting from vector spaces \\(\\blacksquare\\)\ndimension of the Product of Vector Spaces is the sum of the spaces\u0026rsquo; dimension Proof:\nTake each \\(V_{j}\\); construct a list such that, for each basis vector in the basis of \\(V_{j}\\), we have an element of the list such that we have that basis vector in the \\(j^{th}\\) slot and \\(0\\) in all others.\nThis list is linearly independent; and, a linear combination thereof span all of \\(V_1 \\times \\dots \\times V_{m}\\). The length of this is the sum of the number of basis vectors of each space, as desired. \\(\\blacksquare\\)\nproduct summation map See: product summation map\n","html":"\u003cp\u003eA product of vector spaces is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e formed by putting an element from each space into an element of the vector.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003eSuppose \\(V_1 \\dots V_{m}\\) are \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es over the same field \\(\\mathbb{F}\\)\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eProduct\u003c/strong\u003e between \\(V_1 \\dots V_{m}\\) is defined:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV_1 \\times \\dots \\times V_{m} = \\{(v_1, \\dots, v_{m}): v_1 \\in V_1 \\dots v_{m} \\in V_{m}\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;chain an element from each space into another vector\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"operation--kbhoperation-dot-md--s-on-product-of-vector-space--kbhproduct-of-vector-spaces-dot-md--s\"\u003e\u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003es on \u003ca href=\"/posts/kbhproduct_of_vector_spaces/\"\u003eProduct of Vector Space\u003c/a\u003es\u003c/h3\u003e\n\u003cp\u003eThe operations on the product of vector spaces are defined in the usual way.\u003c/p\u003e\n\u003cp\u003eAddition: \\((u_1, \\dots, u_{m})+(v_1, \\dots, v_{m}) = (u_1+v_1, \\dots, u_{m}+v_{m})\\)\u003c/p\u003e\n\u003cp\u003eScalar multiplication: \\(\\lambda (v_1 \\dots v_{m}) = (\\lambda v_1, \\dots, \\lambda v_{m})\\)\u003c/p\u003e\n\u003ch3 id=\"product-of-vector-space--kbhproduct-of-vector-spaces-dot-md--s-is-a-vector-space--kbhvector-space-dot-md\"\u003e\u003ca href=\"/posts/kbhproduct_of_vector_spaces/\"\u003eProduct of Vector Space\u003c/a\u003es is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eThe operations defined above inherits \u003ca href=\"/posts/kbhclosed/\"\u003eclosure\u003c/a\u003e from their respective vector spaces.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e: \\((0, \\dots, 0)\\), taking the \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e from each vector space\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e: \\((-v_1, \\dots, -v_{m})\\), taking the \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e from each \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003escalar multiplicative identity: \\(1\\)\u003c/li\u003e\n\u003cli\u003eoperations: \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e, \u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e, \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e \u0026mdash; inheriting from \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003es\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"dimension-of-the-product-of-vector-space--kbhproduct-of-vector-spaces-dot-md--s-is-the-sum-of-the-spaces-dimension\"\u003edimension of the \u003ca href=\"/posts/kbhproduct_of_vector_spaces/\"\u003eProduct of Vector Space\u003c/a\u003es is the sum of the spaces\u0026rsquo; dimension\u003c/h3\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eTake each \\(V_{j}\\); construct a list such that, for each \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e vector in the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V_{j}\\), we have an element of the list such that we have that basis vector in the \\(j^{th}\\) slot and \\(0\\) in all others.\u003c/p\u003e\n\u003cp\u003eThis list is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e; and, a linear combination thereof span all of \\(V_1 \\times \\dots \\times V_{m}\\). The length of this is the sum of the number of \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e vectors of each space, as desired. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"product-summation-map\"\u003eproduct summation map\u003c/h3\u003e\n\u003cp\u003eSee: \u003ca href=\"/posts/kbhproduct_summation_map/\"\u003eproduct summation map\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproduct_of_vector_spaces/","tags":null,"title":"Product of Vector Space"},{"categories":null,"contents":"Let \\(U_1, \\dots, U_{m}\\) be subspaces of \\(V\\); we define a linear\nWe define \\(\\Gamma\\) to be a map \\(U_1 \\times \\dots U_{m} \\to U_1 + \\dots + U_{m}\\) such that:\n\\begin{equation} \\Gamma (u_1, \\dots, u_{m}) = u_1 + \\dots + u_{m} \\end{equation}\nEssentially, \\(\\Gamma\\) is the sum operation of the elements of the tuple made by the Product of Vector Spaces.\n\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\Gamma\\) is injective Proof:\nGiven \\(\\Gamma\\) is injective: Given injectivity, we have that injectivity implies that null space is \\(\\{0\\}\\). Now, because the only way to produce \\(0\\) is to have the input product/tuple be 0, \\(u_1 \\dots u_{m} = 0\\). So, given a sum of subsets is a direct sum IFF there is only one way to write \\(0\\), the sum is a direct sum.\nGiven direct sum: Reverse the logic of above directly. Given its a direct sum, the only way to be in the null space of \\(\\Gamma\\) (i.e. have the sum of the elements of tuple by \\(0\\)) is by taking each \\(u_1 \\dots u_{m}\\) to \\(0\\). Now, injectivity implies that null space is \\(\\{0\\}\\), so \\(\\Gamma\\) is injective. \\(\\blacksquare\\)\nAside: \\(\\Gamma\\) is surjective because product of vector-spaces is simply the pre-combined version of the sum.\nSo a corollary of the above result is that: \\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\Gamma\\) is invertable, because injectivity and surjectivity implies invertability.\n\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\) \\(\\Gamma\\) is surjective for all cases because product of vector-spaces is simply the pre-combined version of the sum.\nSo, by rank-nullity theorem, \\(\\dim (U_1 \\times \\dots U_{m}) = \\dim null\\ \\Gamma + \\dim (U_1 + \\dots + U_{m})\\).\nNow, \\(\\dim null\\ \\Gamma = 0\\) IFF \\(\\dim (U_1 \\times \\dots U_{m}) = 0 + \\dim (U_1 + \\dots + U_{m})\\).\nNow, dimension of the Product of Vector Spaces is the sum of the spaces\u0026rsquo; dimension.\nSo: \\(\\dim null\\ \\Gamma = 0\\) IFF \\(\\dim U_1 + \\dots + \\dim U_{m} = 0 + \\dim (U_1 + \\dots + U_{m})\\).\nNow, \\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\Gamma\\) is injective, and from above \\(\\dim null\\ \\Gamma = 0\\) (that \\(\\Gamma\\) is injective) IFF \\(\\dim U_1 + \\dots + \\dim U_{m} = 0 + \\dim (U_1 + \\dots + U_{m})\\).\nSo, \\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\), as desired. \\(\\blacksquare\\)\n(Note that this proof is built out of a series of IFFs, so it goes in both directions.)\n","html":"\u003cp\u003eLet \\(U_1, \\dots, U_{m}\\) be \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es of \\(V\\); we define a linear\u003c/p\u003e\n\u003cp\u003eWe define \\(\\Gamma\\) to be a map \\(U_1 \\times \\dots U_{m} \\to U_1 + \\dots + U_{m}\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Gamma (u_1, \\dots, u_{m}) = u_1 + \\dots + u_{m}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eEssentially, \\(\\Gamma\\) is the sum operation of the elements of the tuple made by the \u003ca href=\"/posts/kbhproduct_of_vector_spaces/\"\u003eProduct of Vector Space\u003c/a\u003es.\u003c/p\u003e\n\u003ch2 id=\"u-1-plus-dots-plus-u-m-is-a-direct-sum--kbhdirect-sum-dot-md--iff--kbhequivalence-dot-md--gamma-is-injective--kbhinjectivity-dot-md\"\u003e\\(U_1 + \\dots + U_{m}\\) is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e \\(\\Gamma\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eGiven \\(\\Gamma\\) is injective:\nGiven \u003ca href=\"/posts/kbhinjectivity/\"\u003einjectivity\u003c/a\u003e, we have that \u003ca href=\"/posts/kbhinjectivity/#injectivity-implies-that-id-767a441d-4931-4fad-aa8e-c6b001e8b507-null-space-is-0\"\u003einjectivity implies that null space is \\(\\{0\\}\\)\u003c/a\u003e. Now, because the only way to produce \\(0\\) is to have the input product/tuple be 0, \\(u_1 \\dots u_{m} = 0\\). So, given \u003ca href=\"/posts/kbhdirect_sum/#a-id-1b800658-2f83-4802-acfd-2d15cf5a1d74-sum-of-subsets-is-a-id-4e586014-c91f-4d52-98bb-a2fe11a75007-direct-sum-id-fddf0648-91ea-4c5b-8298-fa0a30637cb7-iff-there-is-only-one-way-to-write-0\"\u003ea sum of subsets is a direct sum IFF there is only one way to write \\(0\\)\u003c/a\u003e, the sum is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eGiven \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e:\nReverse the logic of above directly. Given its a direct sum, the only way to be in the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(\\Gamma\\) (i.e. have the sum of the elements of tuple by \\(0\\)) is by taking each \\(u_1 \\dots u_{m}\\) to \\(0\\). Now, \u003ca href=\"/posts/kbhinjectivity/#injectivity-implies-that-id-767a441d-4931-4fad-aa8e-c6b001e8b507-null-space-is-0\"\u003einjectivity implies that null space is \\(\\{0\\}\\)\u003c/a\u003e, so \\(\\Gamma\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"aside\"\u003eAside:\u003c/h3\u003e\n\u003cp\u003e\\(\\Gamma\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e because product of vector-spaces is simply the pre-combined version of the sum.\u003c/p\u003e\n\u003cp\u003eSo a corollary of the above result is that: \\(U_1 + \\dots + U_{m}\\) is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e \\(\\Gamma\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e, because \u003ca href=\"/posts/kbhinvertability/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-and-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-implies-id-ff05739c-6e70-46ba-9d56-0958ef847f57-invertability\"\u003einjectivity and surjectivity implies invertability\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"u-1-plus-dots-plus-u-m-is-a-direct-sum--kbhdirect-sum-dot-md--iff-dim--u-1-plus-dots-plus-u-m--dim-u-1-plus-dots-plus-dim-u-m\"\u003e\\(U_1 + \\dots + U_{m}\\) is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\)\u003c/h2\u003e\n\u003cp\u003e\\(\\Gamma\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e for all cases because product of vector-spaces is simply the pre-combined version of the sum.\u003c/p\u003e\n\u003cp\u003eSo, by \u003ca href=\"/posts/kbhfundamental_theorem_of_linear_maps/\"\u003erank-nullity theorem\u003c/a\u003e, \\(\\dim (U_1 \\times \\dots U_{m}) = \\dim null\\ \\Gamma + \\dim (U_1 + \\dots + U_{m})\\).\u003c/p\u003e\n\u003cp\u003eNow, \\(\\dim null\\ \\Gamma = 0\\) \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e \\(\\dim (U_1 \\times \\dots U_{m}) = 0 + \\dim (U_1 + \\dots + U_{m})\\).\u003c/p\u003e\n\u003cp\u003eNow, \u003ca href=\"/posts/kbhproduct_of_vector_spaces/#dimension-of-the-id-a45b05c0-3e01-4c27-bc3b-543ff3606c66-product-of-vector-space-s-is-the-sum-of-the-spaces-dimension\"\u003edimension of the Product of Vector Spaces is the sum of the spaces\u0026rsquo; dimension\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSo: \\(\\dim null\\ \\Gamma = 0\\) \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e \\(\\dim U_1 + \\dots + \\dim U_{m} = 0 + \\dim (U_1 + \\dots + U_{m})\\).\u003c/p\u003e\n\u003cp\u003eNow, \u003ca href=\"#u-1-plus-dots-plus-u-m-is-a-direct-sum--kbhdirect-sum-dot-md--iff--kbhequivalence-dot-md--gamma-is-injective--kbhinjectivity-dot-md\"\u003e\\(U_1 + \\dots + U_{m}\\) is a direct sum IFF \\(\\Gamma\\) is injective\u003c/a\u003e, and from above \\(\\dim null\\ \\Gamma = 0\\) (that \\(\\Gamma\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e) \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e \\(\\dim U_1 + \\dots + \\dim U_{m} = 0 + \\dim (U_1 + \\dots + U_{m})\\).\u003c/p\u003e\n\u003cp\u003eSo, \\(U_1 + \\dots + U_{m}\\) is a \u003ca href=\"/posts/kbhdirect_sum/\"\u003edirect sum\u003c/a\u003e IFF \\(\\dim (U_1 + \\dots + U_{m}) = \\dim U_1 + \\dots + \\dim U_{m}\\), as desired. \\(\\blacksquare\\)\u003c/p\u003e\n\u003cp\u003e(Note that this proof is built out of a series of \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003es, so it goes in both directions.)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproduct_summation_map/","tags":null,"title":"product summation map"},{"categories":null,"contents":"This is a work-in-progress page listing all of my production projects.\nFireside: Blog Fireside Index\nYappin: Podcast https://anchor.fm/yappin/\n20MinuteRants: Blog https://medium.com/20minuterants\nProject80: Podcast See Project80.\nNorman Stories: Fiction https://hidonipothan.substack.com/\n(left) Director - Hillview Broadcasting: Production Studio https://hillview.tv/\n","html":"\u003cp\u003eThis is a work-in-progress page listing all of my production projects.\u003c/p\u003e\n\u003ch2 id=\"fireside-blog\"\u003eFireside: Blog\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhfireside/\"\u003eFireside Index\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"yappin-podcast\"\u003eYappin: Podcast\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://anchor.fm/yappin/\"\u003ehttps://anchor.fm/yappin/\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"20minuterants-blog\"\u003e20MinuteRants: Blog\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://medium.com/20minuterants\"\u003ehttps://medium.com/20minuterants\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"project80-podcast\"\u003eProject80: Podcast\u003c/h2\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhproject80/\"\u003eProject80\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"norman-stories-fiction\"\u003eNorman Stories: Fiction\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://hidonipothan.substack.com/\"\u003ehttps://hidonipothan.substack.com/\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"left--director-hillview-broadcasting-production-studio\"\u003e(left) Director - Hillview Broadcasting: Production Studio\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://hillview.tv/\"\u003ehttps://hillview.tv/\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproduction_index/","tags":["index"],"title":"Production Index"},{"categories":null,"contents":"So you wanted to be productive?\nGo do stuff. Stop reading. Get crap done.\n\u0026hellip; \u0026hellip; \u0026hellip;\nWait, you are still here? Well, given that you are sticking around, we might as well discuss some tooling that may help you in organizing your work. By all means I don\u0026rsquo;t think this is a complete list, many of these have intersecting features; think of this as more a survey of the field\u0026mdash;\nTooling, Knowledge Management https://obsidian.md/ https://www.notion.so/ https://evernote.com/ https://logseq.com/ what I use, which is generally not recommended because not very beginner friendly https://www.gnu.org/software/emacs/ + https://www.orgroam.com/ https://www.zotero.org/ https://getdrafts.com/ https://notability.com/ Tooling, Calendaring and Email https://flexibits.com/fantastical https://sparkmailapp.com/ https://www.thunderbird.net/en-US/ Google Calendar (I\u0026rsquo;m not kidding, its very powerful) Tooling, To-Do List https://todoist.com/ https://todo.microsoft.com/ https://www.omnigroup.com/omnifocus/ https://culturedcode.com/things/ Shameless plug: https://www.condution.com/ https://orgmode.org/manual/Agenda-Views.html again, Notion. See above. https://workflowy.com/ https://www.rememberthemilk.com/ Methodologies https://gettingthingsdone.com/ https://marshallgoldsmith.com/book-page-triggers/ https://zettelkasten.de/posts/overview/ https://www.taskade.com/blog/personal-knowledge-management-pkm-guide/ https://docs.google.com/document/d/1PXYTxKmhf8Kb7emJMGPA1qCaPEossCgyE9WmLVSxIwU/edit Media Personalities with Interesting Opinions https://www.relay.fm/cortex https://daringfireball.net/ https://www.relay.fm/connected https://www.macstories.net/ https://www.youtube.com/watch?v=ALaTm6VzTBw ","html":"\u003cp\u003eSo you wanted to be productive?\u003c/p\u003e\n\u003cp\u003eGo do stuff. Stop reading. Get crap done.\u003c/p\u003e\n\u003cp\u003e\u0026hellip;\n\u0026hellip;\n\u0026hellip;\u003c/p\u003e\n\u003cp\u003eWait, you are still here? Well, given that you are sticking around, we might as well discuss some tooling that may help you in organizing your work. By all means I don\u0026rsquo;t think this is a complete list, many of these have intersecting features; think of this as more a survey of the field\u0026mdash;\u003c/p\u003e\n\u003ch2 id=\"tooling-knowledge-management\"\u003eTooling, Knowledge Management\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"https://obsidian.md/\"\u003ehttps://obsidian.md/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.notion.so/\"\u003ehttps://www.notion.so/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://evernote.com/\"\u003ehttps://evernote.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://logseq.com/\"\u003ehttps://logseq.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewhat I use, which is generally not recommended because not very beginner friendly \u003ca href=\"https://www.gnu.org/software/emacs/\"\u003ehttps://www.gnu.org/software/emacs/\u003c/a\u003e + \u003ca href=\"https://www.orgroam.com/\"\u003ehttps://www.orgroam.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.zotero.org/\"\u003ehttps://www.zotero.org/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://getdrafts.com/\"\u003ehttps://getdrafts.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://notability.com/\"\u003ehttps://notability.com/\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"tooling-calendaring-and-email\"\u003eTooling, Calendaring and Email\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"https://flexibits.com/fantastical\"\u003ehttps://flexibits.com/fantastical\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://sparkmailapp.com/\"\u003ehttps://sparkmailapp.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.thunderbird.net/en-US/\"\u003ehttps://www.thunderbird.net/en-US/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eGoogle Calendar (I\u0026rsquo;m not kidding, its very powerful)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"tooling-to-do-list\"\u003eTooling, To-Do List\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"https://todoist.com/\"\u003ehttps://todoist.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://todo.microsoft.com/\"\u003ehttps://todo.microsoft.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.omnigroup.com/omnifocus/\"\u003ehttps://www.omnigroup.com/omnifocus/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://culturedcode.com/things/\"\u003ehttps://culturedcode.com/things/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eShameless plug: \u003ca href=\"https://www.condution.com/\"\u003ehttps://www.condution.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://orgmode.org/manual/Agenda-Views.html\"\u003ehttps://orgmode.org/manual/Agenda-Views.html\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003eagain, Notion. See above.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://workflowy.com/\"\u003ehttps://workflowy.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.rememberthemilk.com/\"\u003ehttps://www.rememberthemilk.com/\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"methodologies\"\u003eMethodologies\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"https://gettingthingsdone.com/\"\u003ehttps://gettingthingsdone.com/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://marshallgoldsmith.com/book-page-triggers/\"\u003ehttps://marshallgoldsmith.com/book-page-triggers/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://zettelkasten.de/posts/overview/\"\u003ehttps://zettelkasten.de/posts/overview/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.taskade.com/blog/personal-knowledge-management-pkm-guide/\"\u003ehttps://www.taskade.com/blog/personal-knowledge-management-pkm-guide/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://docs.google.com/document/d/1PXYTxKmhf8Kb7emJMGPA1qCaPEossCgyE9WmLVSxIwU/edit\"\u003ehttps://docs.google.com/document/d/1PXYTxKmhf8Kb7emJMGPA1qCaPEossCgyE9WmLVSxIwU/edit\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"media-personalities-with-interesting-opinions\"\u003eMedia Personalities with Interesting Opinions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"https://www.relay.fm/cortex\"\u003ehttps://www.relay.fm/cortex\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://daringfireball.net/\"\u003ehttps://daringfireball.net/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.relay.fm/connected\"\u003ehttps://www.relay.fm/connected\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.macstories.net/\"\u003ehttps://www.macstories.net/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://www.youtube.com/watch?v=ALaTm6VzTBw\"\u003ehttps://www.youtube.com/watch?v=ALaTm6VzTBw\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproductivity_starter_pack/","tags":null,"title":"Productivity Starter Pack"},{"categories":null,"contents":"We mentioned this in class, and I figured we should write it down.\nSo, if you think about the Product of Vector Space:\n\\begin{equation} \\mathbb{R} \\times \\mathbb{R} \\end{equation}\nyou are essentially taking the \\(x\\) axis straight line and \u0026ldquo;duplicating\u0026rdquo; it along the \\(y\\) axis.\nNow, the opposite of this is the quotient space:\n\\begin{equation} \\mathbb{R}^{2} / \\left\\{\\mqty(a \\\\ 0): a \\in \\mathbb{R} \\right\\} \\end{equation}\nWhere, we are essentially taking the line in the \\(x\\) axis and squish it down, leaving us only the \\(y\\) component freedom to play with (as each element is \\(v +\\left\\{\\mqty(a \\\\ 0): a \\in \\mathbb{R} \\right\\}\\)).\nThis also gets us the result that two affine subsets parallel to \\(U\\) are either equal or disjoint; specifically the conclusion that \\(v-w \\in U \\implies v+U = w+U\\): for our example, only shifting up and down should do different things; if two shifts\u0026rsquo; up-down shift is \\(0\\) (i.e. it drops us back into \\(\\mqty(a \\\\0)\\) land), well then it will not move us anywhere different.\n","html":"\u003cp\u003eWe mentioned this in class, and I figured we should write it down.\u003c/p\u003e\n\u003cp\u003eSo, if you think about the \u003ca href=\"/posts/kbhproduct_of_vector_spaces/\"\u003eProduct of Vector Space\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{R} \\times \\mathbb{R}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou are essentially taking the \\(x\\) axis straight line and \u0026ldquo;duplicating\u0026rdquo; it along the \\(y\\) axis.\u003c/p\u003e\n\u003cp\u003eNow, the opposite of this is the \u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{R}^{2} / \\left\\{\\mqty(a \\\\ 0): a \\in \\mathbb{R} \\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhere, we are essentially taking the line in the \\(x\\) axis and squish it down, leaving us only the \\(y\\) component freedom to play with (as each element is \\(v +\\left\\{\\mqty(a \\\\ 0): a \\in \\mathbb{R} \\right\\}\\)).\u003c/p\u003e\n\u003cp\u003eThis also gets us the result that \u003ca href=\"/posts/kbhparallel_linear_algebra/#two-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-affine-subset-s-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-parallel-to-u-are-either-equal-or-disjoint\"\u003etwo affine subsets parallel to \\(U\\) are either equal or disjoint\u003c/a\u003e; specifically the conclusion that \\(v-w \\in U \\implies v+U = w+U\\): for our example, only shifting up and down should do different things; if two shifts\u0026rsquo; up-down shift is \\(0\\) (i.e. it drops us back into \\(\\mqty(a \\\\0)\\) land), well then it will not move us anywhere different.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproducts_and_quotients_the_intuition/","tags":null,"title":"products and quotients, the intuition"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhprof_xin_liu/","tags":null,"title":"Prof. Xin Liu"},{"categories":null,"contents":"Introduction Recent advances of language models (LMs) introduced the possibility of in-context, few or zero-shot reasoning (Brown et al. 2020) using LMs without much or any fine tuning.\nYet, classically, LM decoding takes place in a left-to-right fashion, auto-regressively resolving one token at a time by sampling from the output distribution of possible next words without multi-step planning.\nWork in LM agents have taken steps to solve more complex problems that would typically require multi-step reasoning even while using this direct decoding approach. The simplest idea, named \u0026ldquo;chain-of-thoughts\u0026rdquo; (CoT), involves forcing the LM at decode time to begin the decoding process with natural language reasoning about its actions (Wei et al. 2022). The method has contributed to the creation of powerful language agents (Yao, Zhao, et al. 2023) that can reason about complex actions.\nDespite the relative success of CoT, the scheme still does not support any kind of backtracking as it samples directly from the LM\u0026rsquo;s posterior distribution. When a problem requires a significantly large number of steps to solve, issues relating to \u0026ldquo;de-generation\u0026rdquo; (Holtzman et al. 2020) becomes increasingly prevalent: whereby, naive maximizations of sequence likelihood results in a most likely sub-phrase being repeated which does not contribute to increased information or progress on a problem.\nRecent theoretical work suggests these types of degeneration arises due to the distortion of output probability density caused by the last-layer softmax projection into the probability simplex (Finlayson et al. 2023): due the lower degrees of freedom offered by a probability syntax, both high and low tails of the latent next-word distribution becomes emphasized in the output probability distribution.\nTo address this, recent approaches such as Tree of Thoughts (ToT) (Yao, Yu, et al. 2023) have separated the process of next-step proposal (\u0026ldquo;thinking\u0026rdquo;) and choosing the actual step to take given a situation (\u0026ldquo;reasoning\u0026rdquo;). This separate allows the representation of a problem through only short decoding sequences that are less prone to degeneration, while allowing a separate LM call to score the value of being at any given partial solution through a maximum-likely single-token output that is less likely to be distorted.\nIn this work, we extend the ToT prompting scheme to formalize this process of \u0026ldquo;thinking\u0026rdquo; and \u0026ldquo;reasoning\u0026rdquo; via a Language Model as a Partially Observable Markov Decision Process (POMDP). We call this decoding scheme the Lookahead Sampler (LS).\nThe key underlying assumption of the proposed LS scheme involves the claim that LMs are able to make judgments about the value of a subsequence towards solving a problem by analyzing the likelihood of a particular sequence against a judgment of value. This assumption is supported by the existence of reinforcement learning formulations of LM-on-LM output verification\u0026mdash;both for reasoning ((Verma et al. 2022)) and hallucination ((Liu et al. 2022))\u0026ndash;as well as the use of LM-inferred state value heuristics in the ToS approach.\nWe leverage this assumption by, similar to ToT, using the LM\u0026rsquo;s evaluation of the likelihood of a sequence (similar to LM \u0026ldquo;scoring\u0026rdquo; of a \u0026ldquo;thought\u0026rdquo; in ToT) as a heuristic for the coherence and reasoning within a subsequence of LM output\u0026mdash;forming a \u0026ldquo;self reflection\u0026rdquo; scheme similar to other LM-scoring schemes previously proposed (Paul et al. 2023; Shinn, Labash, and Gopinath 2023). Yet, differing from ToT, we explicitly formulate this scoring by an LM as an \u0026ldquo;observation\u0026rdquo; of an unobservable underlying latent understanding of the input sequence.\nBy solving the LS problem with the anytime POMCP solver (Silver and Veness 2010), we further demonstrate that LS exhibits stronger anytime characteristics on the Game of 24 task as compared to ToT while maintaining performance that is comparable to ToT and superior to CoT. Lastly, we were able to obtain these results at lower costs to ToT evaluations by using a hybrid language modeling approach by using a larger language model, GPT-4, for posterior sampling and evaluation while using a smaller language model, GPT-3.5-Turbo-Instruct, as the \u0026ldquo;thought\u0026rdquo; generator.\nTree of Thoughts We provide here a short summary of the Tree of Thoughts (ToT) (Yao, Yu, et al. 2023) approach that is relevant to our model. ToT offers a scheme to enable multi-step reasoning with LMs by presenting a decomposition of multi-step LM reasoning into individual steps which is then combined through classic approaches in search and planning.\nSpecifically, ToT represents a given problem as a finite-horizon planning problem which it then solves in four broad steps.\nThought Decomposition: by leveraging problem-specific characteristics, each problem is decomposed into distinct, incremental steps towards a solution. For the \u0026ldquo;Game of 24\u0026rdquo; task, for instance, each \u0026ldquo;thought\u0026rdquo; is considered a line of equation which contributes to the overall task of combining four numbers to reach 24.\nNow, let \\(p_{\\theta}\\) be our language model, \\(s_{j}^{(i)}\\) be thought candidate \\(j\\) of step \\(i\\) of a decomposed problem, \\(s_{*}^{(i)}\\) the optimal thought to continue from at step \\(i\\), \\(\\tau_{ *} = \\qty[s^{(1)}_{ *}, \u0026hellip;, s^{(n)}_{ *}]\\) a \u0026ldquo;solution\u0026rdquo; to a given problem.\nThought Generation: multiple, initial short decodings of a LM\u0026mdash;sampling from \\(s\u0026rsquo; \\sim p_{\\theta}^{thought}\\qty(s^{(i+1)} | s^{(i)})\\) is obtained which forms a series of next states (\u0026ldquo;thoughts\u0026rdquo;) which encodes a partial step towards the solution which is reachable at any given state.\nThought Evaluation: another LM call rates each of the possible next states for their chance in reaching the solution; specifically, we ask the LM to reason about a given state by calculating the posterior probabilities of predicting a specific judgement of value (the words \u0026ldquo;sure\u0026rdquo;\u0026ldquo;likely\u0026rdquo;\u0026ldquo;impossible\u0026rdquo;) given that state; that is: \\(V(s_{j}) = \\arg\\max_{o} \\{p^{value}_\\theta(o|s_{j}), o \\in \\{ sure, likely, impossible\\}, \\forall j \\in 1 \u0026hellip; n\\).\nProblem Solving: finally, given this heuristic, solving a specific problem in ToT involves using a search-and-planning scheme (specifically, DFS or BFS) to cycle between generation and evaluation of thoughts until a terminal thought is reached. Branches on the DFS tree is pruned if they are voted as \u0026ldquo;impossible\u0026rdquo;.\nBy combining explicit planning and LM reasoning, this approach achieved state-of-the-art results on the Game of 24 and other difficult natural-language tasks such as a crossword. However, the ToT approach does not incorporate any form of heuristic-guided preferential planning between different \u0026ldquo;possible\u0026rdquo; states\u0026mdash;in contrast to dynamic approaches which preferentially explore sequences of high probability of success.\nMethods Problem Formulation Our work formalizes and augments the stepwise decoding scheme proposed by ToT as a Partially Observable Markov Decision Process (POMDP) (Kaelbling, Littman, and Cassandra 1998). A POMDP is a search and planning formulation which emphasizes the uncertain nature of intermediate steps by formalizing each problem into a tuple \\((\\mathcal{S}, \\mathcal{A}, \\mathcal{O}, \\mathcal{T}, \\mathcal{R}, \\gamma, s_0)\\).\nWe specifically define our problem formulation as follows:\n\\(\\mathcal{S} = S \\times U\\), where \\(s \\in S\\) is each sub-step of our decomposed problem and \\(u \\in U\\) representing the unmeasurable, true underlying value being estimated by \\(V(s)\\) in ToT representing the usefulness of a particular thought \\(\\mathcal{A} = [a_0, a_1, a_2]\\), a discrete set of possible problem-solving actions\u0026mdash;to \u0026ldquo;continue\u0026rdquo; expanding a particular branch \\(a_0\\), to \u0026ldquo;rollback\u0026rdquo; to previous branch \\(a_1\\), or to \u0026ldquo;think\u0026rdquo; a new thought at the current branch \\(a_2\\). \\(\\mathcal{O} \\in S \\times U\\), exactly the same as \\(\\mathcal{S}\\), but instead of the unobservable underlying value of a given \\(s\\) we obtain \\(V(s)\\) instead from the language model by asking the language model for its judgement regarding a state; importantly, because the observations are simply a sample on the distribution, we can directly use \\(V(s_{j}) \\sim p^{value}_\\theta(o|s_{j}), o \\in \\{ sure, likely, impossible\\}, \\forall j \\in 1 \u0026hellip; n\\) \\(\\mathcal{T}\\) is given deterministically given the state and action\u0026mdash;\u0026ldquo;continue\u0026rdquo; appends the current state to the solution trajectory, yielding a new subproblem, and calculates a new thought; \u0026ldquo;rollback\u0026rdquo; pops the last item in the solution trajectory back into the current state and reverts to the previous subproblem; \u0026ldquo;think\u0026rdquo; reformulates a new state given the current subproblem \\(\\mathcal{R}\\) is given by a language model evaluator given a final trajectory, where: \\(r_{\\max}\\) is given if the LM believes a trajectory successfully solves the problem, and \\(r_{\\min}\\) is given if the LM believes a trajectory failed to solve the problem Lastly, for this problem, we set discount \\(\\gamma\\) to \\(1\\) to maximize joint reward, and \\(s_0\\) would be the initial, unsolved problem.\nModified POMCP To solve the formalization given above in an actual problem, we chose the POMCP solver (Silver and Veness 2010). This solver is chosen for three primary reasons.\nFirst, by only needing actions and observation sequences as input, the solver requires no explicit distribution on observation and transitions, meaning that simply making concrete samples from the language model posterior is enough to take advantage of its distributional nature.\nSecond, the POMCP solver has excellent anytime performance characteristics; the search tree for possible solutions will prioritize most possible solutions as rated by intermediate value, but can expand to (at the worst case) an exhaustive search of all possible intermediate states. In particular, easier problems will have stronger heuristic signals, which typically will take less time to solve; this means that a cutoff could be specified by the user to control the speed/accuracy trade-off in solving a given problems.\nSpecifically, a POMCP solver collects a tree based on sequences of actions and their resulting observations \\(h = \\{a_1, o_1, \u0026hellip;\\}\\). When planning for a specific action, the scheme samples a series of possible next states from a generative model given your current state and action \\(s\u0026rsquo; \\sim T(\\cdot | s,a)\\) and calculates reward \\(R(s,a)\\) from current state.\nOnce this procedure grows the tree to a certain depth, a point-wise value estimate is calculated from a roll-out.\nFor this specific problem, we modify the typical \u0026ldquo;rollout\u0026rdquo; rollout procedure by essentially performing CoT reasoning with a weighted average of the possible rewards in obtained in the end:\n\\begin{algorithm} \\caption{obtain a value estimate at some leaf state $s_{f}$}\\label{alg:cap} \\begin{algorithmic} \\Ensure $d \u0026gt; f$ \\State $s = s_{f}$ \\State $L \\gets d-n$ \\Comment{Depth Remaning in Rollout} \\State $\\tau \\gets \\{s_0, \\dots, s_{f}\\}$ \\While{$L \\neq 0$} \\State $\\tau \\gets \\tau \\cup \\left\\{\\arg\\max_{s\u0026rsquo;} \\qty(p^{thought}_{\\theta}(s\u0026rsquo;|s))\\right\\}$ \\State $s = s\u0026rsquo;$ \\State $L \\gets L-1$ \\EndWhile \\State $V = \\frac{R_{\\max} \\cdot p_{\\theta}^{evaluate}(\\tau^{*}|\\tau)+R_{\\min} \\cdot p_{\\theta}^{evaluate}(\\neg\\tau^{*}|\\tau)}{R_{\\max}+R_{\\min}}$\\Comment{LM-Posterior Weighted Average of Possible Reward} \\State \\Return $V$ \\end{algorithmic} \\end{algorithm}\nwhere, \\(p_{\\theta}^{thought}\\) is the \u0026ldquo;thought\u0026rdquo; generation prompt previously discussed, and \\(p_{\\theta}^{evaluate}\\) is the evaluation prompt to check if a particular trajectory truly solves the target task which is also used in reward calculation. Recall that \\(\\tau^{*}\\) represents a trajectory which answers the given question correctly.\nAs CoT is a reasonably performant reasoning procedure that is relatively lightweight to compute, we believe it would serve to raise the lower bound of possible values, and therefore aid the speed of solution in POMCP.\nTask Setup Similar to ToT, we are going to use the Game of 24 as a difficult multi-step reasoning task with which to test the scheme proposed.\nThe Game of 24 is a mathematical reasoning game, which uses four numbers and basic arithmetic operations to obtain a value of 24. For instance, for the problem of 4 9 10 13, a solution trajectory may look as follows:\n\\(s_0\\): subproblem: 4 9 10 13 \\(s_1\\): \\(13-9=4\\), subproblem: 4 4 10 \\(s_2\\): \\(10-6 = 6\\), subproblem: 4 6 \\(s_3\\): \\(4 \\cdot 6\\), subproblem: 24 which concludes the solution.\nData Source: in order to maintain comparability to ToT, we leverage the exact dataset curated by Yao et. al. scraped from 4nums.com as our problem set as well. Importantly, the data is sorted by rate of difficulty (as measured by weighted average time for solution)\nBenchmark: the \u0026ldquo;success rate\u0026rdquo; metric reported involves the success rate across 100 games, corresponding to the metric reported by ToT. Additionally, we also report time-to-solve metrics as measured by the time between the initialization of an empty POMCP tree to obtaining a proposed solution from the scheme.\nLanguage Modeling: distinct from ToT, to perform language model, we use two separate language models. \\(p_{\\theta}^{evaluate}\\) and \\(p_{\\theta}^{value}\\) (for \\(\\mathcal{R}\\) and \\(\\mathcal{O}\\) respectively) were computed using GPT-4-Turbo (1106), and \\(p_{\\theta}^{thought}\\) was computed using GPT-3.5-Turbo-Instruct (0914). This hybrid approach allows for single-token only inference on the larger GPT-4 models, affording dramatic performance improvements.\nSolving: we performed language model inference through the OpenAI Azure Cognitive Services API and used the POMDPs.jl, BasicPOMCP.jl Julia packages for the orchestration of the solver.\nResults Method Success CoT 4.0% ToT (b=1) 45% ToT (b=5) 74% LS (ours) TODO [plot of dificulty vs time]\nAs shown in [the table], we have [results]. Specifically, we have [these results, which are hopefull ygood]\u0026mdash;far exceeding results from CoT (Wei et al. 2022) and are compatible with the approach in ToT.\nFurthermore, Figure [figure] shows the anytime nature of the proposed solver. As problem difficulty (as rated by solution time-weighted percentage of 4nums.com users\u0026rsquo; solutions) increases, the time it requires for our solver to identify the correct answer increases as well.\nConclusion In this work, we propose Lookahead Sampler (LS), a novel language model decoding scheme that extends ToS (Yao, Yu, et al. 2023) which leverages a large language model\u0026rsquo;s self-reflective reasoning capabilities (Paul et al. 2023; Shinn, Labash, and Gopinath 2023) to guide multi-hop reasoning about a problem.\nWe formalize our approach through the POMDP framework (Kaelbling, Littman, and Cassandra 1998), and demonstrate comparable performance of our approach on the Game of 24 problem to ToT through using the online POMCP (Silver and Veness 2010) solver. Because of the anytime behavior of POMCP, we are able to demonstrate anytime scaling properties of our solver\u0026rsquo;s behavior: more difficult problems takes longer and more LM inferences to solve. Taken together, these properties makes LS a more flexible approach to solving basic multi-step reasoning tasks as compared to previous approaches\u0026mdash;allowing for contemporary LMs to solve more complex problems.\nIn its current form, this work has two key limitations. First, similar to that proposed by ToT, the approach is still significantly more computationally expensive than CoT or other direct decoding approaches; therefore, these search techniques is likely unnecessary for problems which can be solved with high accuracy using simpler techniques. Second, posterior distributions\u0026mdash;even when taking only top-k samples for extremely small k\u0026mdash;are still meaningful only on billion-parameter models if used without additional fine tuning (Hu and Levy 2023): making the heuristic-driven performance improvements of LS limited in scope. With additional fine-tuning of surrogate value models, LS could likely perform dramatically more efficiently while obtaining its positive characteristics in solution quality.\nBrown, Tom B., Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, et al. 2020. “Language Models Are Few-Shot Learners.” arXiv. http://arxiv.org/abs/2005.14165. Finlayson, Matthew, John Hewitt, Alexander Koller, Swabha Swayamdipta, and Ashish Sabharwal. 2023. “Closing the Curious Case of Neural Text Degeneration.” arXiv. http://arxiv.org/abs/2310.01693. Holtzman, Ari, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. 2020. “The Curious Case of Neural Text Degeneration.” arXiv. http://arxiv.org/abs/1904.09751. Hu, Jennifer, and Roger P Levy. 2023. “Prompting Is Not a Substitute for Probability Measurements in Large Language Models.” In The 2023 Conference on Empirical Methods in Natural Language Processing. Kaelbling, Leslie Pack, Michael L. Littman, and Anthony R. Cassandra. 1998. “Planning and Acting in Partially Observable Stochastic Domains.” Artificial Intelligence 101 (1): 99–134. doi:10.1016/S0004-3702(98)00023-X. Liu, Tianyu, Yizhe Zhang, Chris Brockett, Yi Mao, Zhifang Sui, Weizhu Chen, and Bill Dolan. 2022. “A Token-Level Reference-Free Hallucination Detection Benchmark for Free-Form Text Generation.” In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 6723–37. Dublin, Ireland: Association for Computational Linguistics. doi:10.18653/v1/2022.acl-long.464. Paul, Debjit, Mete Ismayilzada, Maxime Peyrard, Beatriz Borges, Antoine Bosselut, Robert West, and Boi Faltings. 2023. “Refiner: Reasoning Feedback on Intermediate Representations.” arXiv Preprint arXiv:2304.01904. Shinn, Noah, Beck Labash, and Ashwin Gopinath. 2023. “Reflexion: An Autonomous Agent with Dynamic Memory and Self-Reflection.” arXiv Preprint arXiv:2303.11366. Silver, David, and Joel Veness. 2010. “Monte-Carlo Planning in Large POMDPs.” Advances in Neural Information Processing Systems 23. Verma, Siddharth, Justin Fu, Mengjiao Yang, and Sergey Levine. 2022. “CHAI: A CHatbot AI for Task-Oriented Dialogue with Offline Reinforcement Learning.” arXiv. http://arxiv.org/abs/2204.08426. Wei, Jason, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H Chi, Quoc V Le, and Denny Zhou. 2022. “Chain-of-Thought Prompting Elicits Reasoning in Large Language Models.” Yao, Shunyu, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. 2023. “Tree of Thoughts: Deliberate Problem Solving with Large Language Models.” arXiv. http://arxiv.org/abs/2305.10601. Yao, Shunyu, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. 2023. “ReAct: Synergizing Reasoning and Acting in Language Models.” arXiv. http://arxiv.org/abs/2210.03629. ","html":"\u003ch2 id=\"introduction\"\u003eIntroduction\u003c/h2\u003e\n\u003cp\u003eRecent advances of language models (LMs) introduced the possibility of in-context, few or zero-shot reasoning (\u003ca href=\"#citeproc_bib_item_1\"\u003eBrown et al. 2020\u003c/a\u003e) using LMs without much or any fine tuning.\u003c/p\u003e\n\u003cp\u003eYet, classically, LM decoding takes place in a left-to-right fashion, auto-regressively resolving one token at a time by sampling from the output distribution of possible next words without multi-step planning.\u003c/p\u003e\n\u003cp\u003eWork in LM agents have taken steps to solve more complex problems that would typically require multi-step reasoning even while using this direct decoding approach. The simplest idea, named \u0026ldquo;chain-of-thoughts\u0026rdquo; (CoT), involves forcing the LM at decode time to begin the decoding process with natural language reasoning about its actions (\u003ca href=\"#citeproc_bib_item_11\"\u003eWei et al. 2022\u003c/a\u003e). The method has contributed to the creation of powerful language agents (\u003ca href=\"#citeproc_bib_item_13\"\u003eYao, Zhao, et al. 2023\u003c/a\u003e) that can reason about complex actions.\u003c/p\u003e\n\u003cp\u003eDespite the relative success of CoT, the scheme still does not support any kind of backtracking as it samples directly from the LM\u0026rsquo;s posterior distribution. When a problem requires a significantly large number of steps to solve, issues relating to \u0026ldquo;de-generation\u0026rdquo; (\u003ca href=\"#citeproc_bib_item_3\"\u003eHoltzman et al. 2020\u003c/a\u003e) becomes increasingly prevalent: whereby, naive maximizations of sequence likelihood results in a most likely sub-phrase being repeated which does not contribute to increased information or progress on a problem.\u003c/p\u003e\n\u003cp\u003eRecent theoretical work suggests these types of degeneration arises due to the distortion of output probability density caused by the last-layer softmax projection into the probability simplex (\u003ca href=\"#citeproc_bib_item_2\"\u003eFinlayson et al. 2023\u003c/a\u003e): due the lower degrees of freedom offered by a probability syntax, both high and low tails of the latent next-word distribution becomes emphasized in the output probability distribution.\u003c/p\u003e\n\u003cp\u003eTo address this, recent approaches such as Tree of Thoughts (ToT) (\u003ca href=\"#citeproc_bib_item_12\"\u003eYao, Yu, et al. 2023\u003c/a\u003e) have separated the process of next-step proposal (\u0026ldquo;thinking\u0026rdquo;) and choosing the actual step to take given a situation (\u0026ldquo;reasoning\u0026rdquo;). This separate allows the representation of a problem through only short decoding sequences that are less prone to degeneration, while allowing a separate LM call to score the value of being at any given partial solution through a maximum-likely single-token output that is less likely to be distorted.\u003c/p\u003e\n\u003cp\u003eIn this work, we extend the ToT prompting scheme to formalize this process of \u0026ldquo;thinking\u0026rdquo; and \u0026ldquo;reasoning\u0026rdquo; via a Language Model as a Partially Observable Markov Decision Process (POMDP). We call this decoding scheme the Lookahead Sampler (LS).\u003c/p\u003e\n\u003cp\u003eThe key underlying assumption of the proposed LS scheme involves the claim that LMs are able to make judgments about the value of a subsequence towards solving a problem by analyzing the likelihood of a particular sequence against a judgment of value. This assumption is supported by the existence of reinforcement learning formulations of LM-on-LM output verification\u0026mdash;both for reasoning ((\u003ca href=\"#citeproc_bib_item_10\"\u003eVerma et al. 2022\u003c/a\u003e)) and hallucination ((\u003ca href=\"#citeproc_bib_item_6\"\u003eLiu et al. 2022\u003c/a\u003e))\u0026ndash;as well as the use of LM-inferred state value heuristics in the ToS approach.\u003c/p\u003e\n\u003cp\u003eWe leverage this assumption by, similar to ToT, using the LM\u0026rsquo;s evaluation of the likelihood of a sequence (similar to LM \u0026ldquo;scoring\u0026rdquo; of a \u0026ldquo;thought\u0026rdquo; in ToT) as a heuristic for the coherence and reasoning within a subsequence of LM output\u0026mdash;forming a \u0026ldquo;self reflection\u0026rdquo; scheme similar to other LM-scoring schemes previously proposed (\u003ca href=\"#citeproc_bib_item_7\"\u003ePaul et al. 2023\u003c/a\u003e; \u003ca href=\"#citeproc_bib_item_8\"\u003eShinn, Labash, and Gopinath 2023\u003c/a\u003e). Yet, differing from ToT, we explicitly formulate this scoring by an LM as an \u0026ldquo;observation\u0026rdquo; of an unobservable underlying latent understanding of the input sequence.\u003c/p\u003e\n\u003cp\u003eBy solving the LS problem with the anytime POMCP solver (\u003ca href=\"#citeproc_bib_item_9\"\u003eSilver and Veness 2010\u003c/a\u003e), we further demonstrate that LS exhibits stronger anytime characteristics on the Game of 24 task as compared to ToT while maintaining performance that is comparable to ToT and superior to CoT. Lastly, we were able to obtain these results at lower costs to ToT evaluations by using a hybrid language modeling approach by using a larger language model, GPT-4, for posterior sampling and evaluation while using a smaller language model, GPT-3.5-Turbo-Instruct, as the \u0026ldquo;thought\u0026rdquo; generator.\u003c/p\u003e\n\u003ch2 id=\"tree-of-thoughts\"\u003eTree of Thoughts\u003c/h2\u003e\n\u003cp\u003eWe provide here a short summary of the Tree of Thoughts (ToT) (\u003ca href=\"#citeproc_bib_item_12\"\u003eYao, Yu, et al. 2023\u003c/a\u003e) approach that is relevant to our model. ToT offers a scheme to enable multi-step reasoning with LMs by presenting a decomposition of multi-step LM reasoning into individual steps which is then combined through classic approaches in search and planning.\u003c/p\u003e\n\u003cp\u003eSpecifically, ToT represents a given problem as a finite-horizon planning problem which it then solves in four broad steps.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eThought Decomposition\u003c/strong\u003e: by leveraging problem-specific characteristics, each problem is decomposed into distinct, incremental steps towards a solution. For the \u0026ldquo;Game of 24\u0026rdquo; task, for instance, each \u0026ldquo;thought\u0026rdquo; is considered a line of equation which contributes to the overall task of combining four numbers to reach 24.\u003c/p\u003e\n\u003cp\u003eNow, let \\(p_{\\theta}\\) be our language model, \\(s_{j}^{(i)}\\) be thought candidate \\(j\\) of step \\(i\\) of a decomposed problem, \\(s_{*}^{(i)}\\) the optimal thought to continue from at step \\(i\\), \\(\\tau_{ *} = \\qty[s^{(1)}_{ *}, \u0026hellip;, s^{(n)}_{ *}]\\) a \u0026ldquo;solution\u0026rdquo; to a given problem.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eThought Generation\u003c/strong\u003e: multiple, initial short decodings of a LM\u0026mdash;sampling from \\(s\u0026rsquo; \\sim p_{\\theta}^{thought}\\qty(s^{(i+1)} | s^{(i)})\\) is obtained which forms a series of next states (\u0026ldquo;thoughts\u0026rdquo;) which encodes a partial step towards the solution which is reachable at any given state.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eThought Evaluation\u003c/strong\u003e: another LM call rates each of the possible next states for their chance in reaching the solution; specifically, we ask the LM to reason about a given state by calculating the posterior probabilities of predicting a specific judgement of value (the words \u0026ldquo;sure\u0026rdquo;\u003cem\u003e\u0026ldquo;likely\u0026rdquo;\u003c/em\u003e\u0026ldquo;impossible\u0026rdquo;) given that state; that is: \\(V(s_{j}) = \\arg\\max_{o} \\{p^{value}_\\theta(o|s_{j}), o \\in \\{ sure, likely, impossible\\}, \\forall j \\in 1 \u0026hellip; n\\).\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eProblem Solving\u003c/strong\u003e: finally, given this heuristic, solving a specific problem in ToT involves using a search-and-planning scheme (specifically, DFS or BFS) to cycle between generation and evaluation of thoughts until a terminal thought is reached. Branches on the DFS tree is pruned if they are voted as \u0026ldquo;impossible\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eBy combining explicit planning and LM reasoning, this approach achieved state-of-the-art results on the Game of 24 and other difficult natural-language tasks such as a crossword. However, the ToT approach does not incorporate any form of heuristic-guided preferential planning between different \u0026ldquo;possible\u0026rdquo; states\u0026mdash;in contrast to dynamic approaches which preferentially explore sequences of high probability of success.\u003c/p\u003e\n\u003ch2 id=\"methods\"\u003eMethods\u003c/h2\u003e\n\u003ch3 id=\"problem-formulation\"\u003eProblem Formulation\u003c/h3\u003e\n\u003cp\u003eOur work formalizes and augments the stepwise decoding scheme proposed by ToT as a Partially Observable Markov Decision Process (POMDP) (\u003ca href=\"#citeproc_bib_item_5\"\u003eKaelbling, Littman, and Cassandra 1998\u003c/a\u003e). A POMDP is a search and planning formulation which emphasizes the uncertain nature of intermediate steps by formalizing each problem into a tuple \\((\\mathcal{S}, \\mathcal{A}, \\mathcal{O}, \\mathcal{T}, \\mathcal{R}, \\gamma, s_0)\\).\u003c/p\u003e\n\u003cp\u003eWe specifically define our problem formulation as follows:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\mathcal{S} = S \\times U\\), where \\(s \\in S\\) is each sub-step of our decomposed problem and \\(u \\in U\\) representing the unmeasurable, true underlying value being estimated by \\(V(s)\\) in ToT representing the usefulness of a particular thought\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{A} = [a_0, a_1, a_2]\\), a discrete set of possible problem-solving actions\u0026mdash;to \u0026ldquo;continue\u0026rdquo; expanding a particular branch \\(a_0\\), to \u0026ldquo;rollback\u0026rdquo; to previous branch \\(a_1\\), or to \u0026ldquo;think\u0026rdquo; a new thought at the current branch \\(a_2\\).\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{O} \\in S \\times U\\), exactly the same as \\(\\mathcal{S}\\), but instead of the unobservable underlying value of a given \\(s\\) we obtain \\(V(s)\\) instead from the language model by asking the language model for its judgement regarding a state; importantly, because the observations are simply a sample on the distribution, we can directly use \\(V(s_{j}) \\sim p^{value}_\\theta(o|s_{j}), o \\in \\{ sure, likely, impossible\\}, \\forall j \\in 1 \u0026hellip; n\\)\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{T}\\) is given deterministically given the state and action\u0026mdash;\u0026ldquo;continue\u0026rdquo; appends the current state to the solution trajectory, yielding a new subproblem, and calculates a new thought; \u0026ldquo;rollback\u0026rdquo; pops the last item in the solution trajectory back into the current state and reverts to the previous subproblem; \u0026ldquo;think\u0026rdquo; reformulates a new state given the current subproblem\u003c/li\u003e\n\u003cli\u003e\\(\\mathcal{R}\\) is given by a language model evaluator given a final trajectory, where: \\(r_{\\max}\\) is given if the LM believes a trajectory successfully solves the problem, and \\(r_{\\min}\\) is given if the LM believes a trajectory failed to solve the problem\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eLastly, for this problem, we set discount \\(\\gamma\\) to \\(1\\) to maximize joint reward, and \\(s_0\\) would be the initial, unsolved problem.\u003c/p\u003e\n\u003ch3 id=\"modified-pomcp\"\u003eModified POMCP\u003c/h3\u003e\n\u003cp\u003eTo solve the formalization given above in an actual problem, we chose the POMCP solver (\u003ca href=\"#citeproc_bib_item_9\"\u003eSilver and Veness 2010\u003c/a\u003e). This solver is chosen for three primary reasons.\u003c/p\u003e\n\u003cp\u003eFirst, by only needing actions and observation sequences as input, the solver requires no explicit distribution on observation and transitions, meaning that simply making concrete samples from the language model posterior is enough to take advantage of its distributional nature.\u003c/p\u003e\n\u003cp\u003eSecond, the POMCP solver has excellent anytime performance characteristics; the search tree for possible solutions will prioritize most possible solutions as rated by intermediate value, but can expand to (at the worst case) an exhaustive search of all possible intermediate states. In particular, easier problems will have stronger heuristic signals, which typically will take less time to solve; this means that a cutoff could be specified by the user to control the speed/accuracy trade-off in solving a given problems.\u003c/p\u003e\n\u003cp\u003eSpecifically, a POMCP solver collects a tree based on sequences of actions and their resulting observations \\(h = \\{a_1, o_1, \u0026hellip;\\}\\). When planning for a specific action, the scheme samples a series of possible next states from a generative model given your current state and action \\(s\u0026rsquo; \\sim T(\\cdot | s,a)\\) and calculates reward \\(R(s,a)\\) from current state.\u003c/p\u003e\n\u003cp\u003eOnce this procedure grows the tree to a certain depth, a point-wise value estimate is calculated from a roll-out.\u003c/p\u003e\n\u003cp\u003eFor this specific problem, we modify the typical \u0026ldquo;rollout\u0026rdquo; rollout procedure by essentially performing CoT reasoning with a weighted average of the possible rewards in obtained in the end:\u003c/p\u003e\n\u003cp\u003e\\begin{algorithm}\n\\caption{obtain a value estimate at some leaf state $s_{f}$}\\label{alg:cap}\n\\begin{algorithmic}\n\\Ensure $d \u0026gt; f$\n\\State $s = s_{f}$\n\\State $L \\gets d-n$ \\Comment{Depth Remaning in Rollout}\n\\State $\\tau \\gets \\{s_0, \\dots, s_{f}\\}$\n\\While{$L \\neq 0$}\n\\State $\\tau \\gets \\tau \\cup \\left\\{\\arg\\max_{s\u0026rsquo;} \\qty(p^{thought}_{\\theta}(s\u0026rsquo;|s))\\right\\}$\n\\State $s = s\u0026rsquo;$\n\\State $L \\gets L-1$\n\\EndWhile\n\\State $V = \\frac{R_{\\max} \\cdot p_{\\theta}^{evaluate}(\\tau^{*}|\\tau)+R_{\\min} \\cdot p_{\\theta}^{evaluate}(\\neg\\tau^{*}|\\tau)}{R_{\\max}+R_{\\min}}$\\Comment{LM-Posterior Weighted Average of Possible Reward}\n\\State \\Return $V$\n\\end{algorithmic}\n\\end{algorithm}\u003c/p\u003e\n\u003cp\u003ewhere, \\(p_{\\theta}^{thought}\\) is the \u0026ldquo;thought\u0026rdquo; generation prompt previously discussed, and \\(p_{\\theta}^{evaluate}\\) is the evaluation prompt to check if a particular trajectory truly solves the target task which is also used in reward calculation. Recall that \\(\\tau^{*}\\) represents a trajectory which answers the given question correctly.\u003c/p\u003e\n\u003cp\u003eAs CoT is a reasonably performant reasoning procedure that is relatively lightweight to compute, we believe it would serve to \u003cem\u003eraise\u003c/em\u003e the lower bound of possible values, and therefore aid the speed of solution in POMCP.\u003c/p\u003e\n\u003ch3 id=\"task-setup\"\u003eTask Setup\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-13_22-38-50_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSimilar to ToT, we are going to use the Game of 24 as a difficult multi-step reasoning task with which to test the scheme proposed.\u003c/p\u003e\n\u003cp\u003eThe Game of 24 is a mathematical reasoning game, which uses four numbers and basic arithmetic operations to obtain a value of 24. For instance, for the problem of \u003ccode\u003e4 9 10 13\u003c/code\u003e, a solution trajectory may look as follows:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(s_0\\): subproblem: 4 9 10 13\u003c/li\u003e\n\u003cli\u003e\\(s_1\\): \\(13-9=4\\), subproblem: 4 4 10\u003c/li\u003e\n\u003cli\u003e\\(s_2\\): \\(10-6 = 6\\), subproblem: 4 6\u003c/li\u003e\n\u003cli\u003e\\(s_3\\): \\(4 \\cdot 6\\), subproblem: 24\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewhich concludes the solution.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eData Source\u003c/strong\u003e: in order to maintain comparability to ToT, we leverage the exact dataset curated by Yao et. al. scraped from 4nums.com as our problem set as well. Importantly, the data is sorted by rate of difficulty (as measured by weighted average time for solution)\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eBenchmark\u003c/strong\u003e: the \u0026ldquo;success rate\u0026rdquo; metric reported involves the success rate across 100 games, corresponding to the metric reported by ToT. Additionally, we also report time-to-solve metrics as measured by the time between the initialization of an empty POMCP tree to obtaining a proposed solution from the scheme.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eLanguage Modeling\u003c/strong\u003e: distinct from ToT, to perform language model, we use \u003cem\u003etwo separate language models\u003c/em\u003e. \\(p_{\\theta}^{evaluate}\\) and \\(p_{\\theta}^{value}\\) (for \\(\\mathcal{R}\\) and \\(\\mathcal{O}\\) respectively) were computed using GPT-4-Turbo (1106), and \\(p_{\\theta}^{thought}\\) was computed using GPT-3.5-Turbo-Instruct (0914). This hybrid approach allows for single-token only inference on the larger GPT-4 models, affording dramatic performance improvements.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eSolving\u003c/strong\u003e: we performed language model inference through the OpenAI Azure Cognitive Services API and used the POMDPs.jl, BasicPOMCP.jl Julia packages for the orchestration of the solver.\u003c/p\u003e\n\u003ch3 id=\"results\"\u003eResults\u003c/h3\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eMethod\u003c/th\u003e\n\u003cth\u003eSuccess\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eCoT\u003c/td\u003e\n\u003ctd\u003e4.0%\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eToT (b=1)\u003c/td\u003e\n\u003ctd\u003e45%\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eToT (b=5)\u003c/td\u003e\n\u003ctd\u003e74%\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eLS (ours)\u003c/td\u003e\n\u003ctd\u003eTODO\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e[plot of dificulty vs time]\u003c/p\u003e\n\u003cp\u003eAs shown in [the table], we have [results]. Specifically, we have [these results, which are hopefull ygood]\u0026mdash;far exceeding results from CoT (\u003ca href=\"#citeproc_bib_item_11\"\u003eWei et al. 2022\u003c/a\u003e) and are compatible with the approach in ToT.\u003c/p\u003e\n\u003cp\u003eFurthermore, Figure [figure] shows the anytime nature of the proposed solver. As problem difficulty (as rated by solution time-weighted percentage of 4nums.com users\u0026rsquo; solutions) increases, the time it requires for our solver to identify the correct answer increases as well.\u003c/p\u003e\n\u003ch2 id=\"conclusion\"\u003eConclusion\u003c/h2\u003e\n\u003cp\u003eIn this work, we propose Lookahead Sampler (LS), a novel language model decoding scheme that extends ToS (\u003ca href=\"#citeproc_bib_item_12\"\u003eYao, Yu, et al. 2023\u003c/a\u003e) which leverages a large language model\u0026rsquo;s self-reflective reasoning capabilities (\u003ca href=\"#citeproc_bib_item_7\"\u003ePaul et al. 2023\u003c/a\u003e; \u003ca href=\"#citeproc_bib_item_8\"\u003eShinn, Labash, and Gopinath 2023\u003c/a\u003e) to guide multi-hop reasoning about a problem.\u003c/p\u003e\n\u003cp\u003eWe formalize our approach through the POMDP framework (\u003ca href=\"#citeproc_bib_item_5\"\u003eKaelbling, Littman, and Cassandra 1998\u003c/a\u003e), and demonstrate comparable performance of our approach on the Game of 24 problem to ToT through using the online POMCP (\u003ca href=\"#citeproc_bib_item_9\"\u003eSilver and Veness 2010\u003c/a\u003e) solver. Because of the anytime behavior of POMCP, we are able to demonstrate anytime scaling properties of our solver\u0026rsquo;s behavior: more difficult problems takes longer and more LM inferences to solve. Taken together, these properties makes LS a more flexible approach to solving basic multi-step reasoning tasks as compared to previous approaches\u0026mdash;allowing for contemporary LMs to solve more complex problems.\u003c/p\u003e\n\u003cp\u003eIn its current form, this work has two key limitations. First, similar to that proposed by ToT, the approach is still \u003cem\u003esignificantly\u003c/em\u003e more computationally expensive than CoT or other direct decoding approaches; therefore, these search techniques is likely unnecessary for problems which can be solved with high accuracy using simpler techniques. Second, posterior distributions\u0026mdash;even when taking only top-k samples for extremely small k\u0026mdash;are still meaningful only on billion-parameter models if used without additional fine tuning (\u003ca href=\"#citeproc_bib_item_4\"\u003eHu and Levy 2023\u003c/a\u003e): making the heuristic-driven performance improvements of LS limited in scope. With additional fine-tuning of surrogate value models, LS could likely perform dramatically more efficiently while obtaining its positive characteristics in solution quality.\u003c/p\u003e\n\u003cstyle\u003e.csl-entry{text-indent: -1.5em; margin-left: 1.5em;}\u003c/style\u003e\u003cdiv class=\"csl-bib-body\"\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_1\"\u003e\u003c/a\u003eBrown, Tom B., Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, et al. 2020. “Language Models Are Few-Shot Learners.” arXiv. \u003ca href=\"http://arxiv.org/abs/2005.14165\"\u003ehttp://arxiv.org/abs/2005.14165\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_2\"\u003e\u003c/a\u003eFinlayson, Matthew, John Hewitt, Alexander Koller, Swabha Swayamdipta, and Ashish Sabharwal. 2023. “Closing the Curious Case of Neural Text Degeneration.” arXiv. \u003ca href=\"http://arxiv.org/abs/2310.01693\"\u003ehttp://arxiv.org/abs/2310.01693\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_3\"\u003e\u003c/a\u003eHoltzman, Ari, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. 2020. “The Curious Case of Neural Text Degeneration.” arXiv. \u003ca href=\"http://arxiv.org/abs/1904.09751\"\u003ehttp://arxiv.org/abs/1904.09751\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_4\"\u003e\u003c/a\u003eHu, Jennifer, and Roger P Levy. 2023. “Prompting Is Not a Substitute for Probability Measurements in Large Language Models.” In \u003ci\u003eThe 2023 Conference on Empirical Methods in Natural Language Processing\u003c/i\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_5\"\u003e\u003c/a\u003eKaelbling, Leslie Pack, Michael L. Littman, and Anthony R. Cassandra. 1998. “Planning and Acting in Partially Observable Stochastic Domains.” \u003ci\u003eArtificial Intelligence\u003c/i\u003e 101 (1): 99–134. doi:\u003ca href=\"https://doi.org/10.1016/S0004-3702(98)00023-X\"\u003e10.1016/S0004-3702(98)00023-X\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_6\"\u003e\u003c/a\u003eLiu, Tianyu, Yizhe Zhang, Chris Brockett, Yi Mao, Zhifang Sui, Weizhu Chen, and Bill Dolan. 2022. “A Token-Level Reference-Free Hallucination Detection Benchmark for Free-Form Text Generation.” In \u003ci\u003eProceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\u003c/i\u003e, 6723–37. Dublin, Ireland: Association for Computational Linguistics. doi:\u003ca href=\"https://doi.org/10.18653/v1/2022.acl-long.464\"\u003e10.18653/v1/2022.acl-long.464\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_7\"\u003e\u003c/a\u003ePaul, Debjit, Mete Ismayilzada, Maxime Peyrard, Beatriz Borges, Antoine Bosselut, Robert West, and Boi Faltings. 2023. “Refiner: Reasoning Feedback on Intermediate Representations.” \u003ci\u003earXiv Preprint arXiv:2304.01904\u003c/i\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_8\"\u003e\u003c/a\u003eShinn, Noah, Beck Labash, and Ashwin Gopinath. 2023. “Reflexion: An Autonomous Agent with Dynamic Memory and Self-Reflection.” \u003ci\u003earXiv Preprint arXiv:2303.11366\u003c/i\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_9\"\u003e\u003c/a\u003eSilver, David, and Joel Veness. 2010. “Monte-Carlo Planning in Large POMDPs.” \u003ci\u003eAdvances in Neural Information Processing Systems\u003c/i\u003e 23.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_10\"\u003e\u003c/a\u003eVerma, Siddharth, Justin Fu, Mengjiao Yang, and Sergey Levine. 2022. “CHAI: A CHatbot AI for Task-Oriented Dialogue with Offline Reinforcement Learning.” arXiv. \u003ca href=\"http://arxiv.org/abs/2204.08426\"\u003ehttp://arxiv.org/abs/2204.08426\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_11\"\u003e\u003c/a\u003eWei, Jason, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H Chi, Quoc V Le, and Denny Zhou. 2022. “Chain-of-Thought Prompting Elicits Reasoning in Large Language Models.”\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_12\"\u003e\u003c/a\u003eYao, Shunyu, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. 2023. “Tree of Thoughts: Deliberate Problem Solving with Large Language Models.” arXiv. \u003ca href=\"http://arxiv.org/abs/2305.10601\"\u003ehttp://arxiv.org/abs/2305.10601\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_13\"\u003e\u003c/a\u003eYao, Shunyu, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. 2023. “ReAct: Synergizing Reasoning and Acting in Language Models.” arXiv. \u003ca href=\"http://arxiv.org/abs/2210.03629\"\u003ehttp://arxiv.org/abs/2210.03629\u003c/a\u003e.\u003c/div\u003e\n\u003c/div\u003e\n","permalink":"https://www.jemoka.com/posts/kbhloop_of_thoughts/","tags":null,"title":"Project Proposal: Lookahead Sampler"},{"categories":null,"contents":"Project80 is a podcast hosted by Houjun Liu, Anoushka Krishnan, Micah Brown, Mia Tavares, among others.\nCollege Application w.r.t. Project80 Cheese mission statement: Project80 is a good way of creating a self-propegating set of learning that would serve to benefit and educate future generations in hopes of creating a more equitable planet.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhproject80/\"\u003eProject80\u003c/a\u003e is a podcast hosted by \u003ca href=\"/posts/kbhhoujun_liu/\"\u003eHoujun Liu\u003c/a\u003e, \u003ca href=\"/posts/kbhanoushka_krishnan/\"\u003eAnoushka Krishnan\u003c/a\u003e, \u003ca href=\"/posts/kbhmicah_brown/\"\u003eMicah Brown\u003c/a\u003e, \u003ca href=\"/posts/kbhmia_tavares/\"\u003eMia Tavares\u003c/a\u003e, among others.\u003c/p\u003e\n\u003ch2 id=\"college-application--kbhcollege-application-dot-md--w-dot-r-dot-t-dot-project80--kbhproject80-dot-md\"\u003e\u003ca href=\"/posts/kbhcollege_application/\"\u003eCollege Application\u003c/a\u003e w.r.t. \u003ca href=\"/posts/kbhproject80/\"\u003eProject80\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eCheese mission statement: \u003ca href=\"/posts/kbhproject80/\"\u003eProject80\u003c/a\u003e is a good way of creating a self-propegating set of learning that would serve to benefit and educate future generations in hopes of creating a more equitable planet.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproject80/","tags":null,"title":"Project80"},{"categories":null,"contents":"Natural science education resources traditionally teach only codified theory. While theory education is crucial, much of academic science takes place via scrutinizing contested scientific discourse. Due to such resources’ content complexity, high school students are rarely exposed to current, debatable, and relevant science. In response, we introduce Project80: a systemic, student-run protocol to synthesize the latest primary literature in a sub-field into approachable, produced multimedia educational content. The protocol is run by a team of 7 students over the course of 1 month. Students running the protocol consume complex scientific literature, distill relevant data and findings, and synthesize a culminating product of audiovisual content to supplement existing biology and chemistry pedagogy. The system runs independently with limited faculty involvement. Our analysis indicates that the multimedia content created by this protocol will be relevant to roughly 30 courses locally at our institution and will have further extensions in secondary education beyond.\n","html":"\u003cp\u003eNatural science education resources traditionally teach only codified theory. While theory education is crucial, much of academic science takes place via scrutinizing contested scientific discourse. Due to such resources’ content complexity, high school students are rarely exposed to current, debatable, and relevant science. In response, we introduce Project80: a systemic, student-run protocol to synthesize the latest primary literature in a sub-field into approachable, produced multimedia educational content. The protocol is run by a team of 7 students over the course of 1 month. Students running the protocol consume complex scientific literature, distill relevant data and findings, and synthesize a culminating product of audiovisual content to supplement existing biology and chemistry pedagogy. The system runs independently with limited faculty involvement. Our analysis indicates that the multimedia content created by this protocol will be relevant to roughly 30 courses locally at our institution and will have further extensions in secondary education beyond.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproject80_abstract/","tags":null,"title":"Project80 Abstract"},{"categories":null,"contents":"Projects Index is a index that contains a list of almost all projects for which I have ever worked on. Major categories are highlighted from chapter titles.\nResearch Projects I end up doing a lot of research these days, and so have isolated that to a different, academic homepage.\nFor a list of my recent research, please head to the Research Index.\nMedia Production Projects I produce a lot of media (videos, podcasts, blogs, live events/talks) as a part of publicizing my work or for other purposes. For those types of projects, head on over to Production Index.\nLarge-Scale Endeavors Condution An open-source task management app. Website.\nMotivation: I got really tired with most other to-do apps after swapping them out over and over again, until I got fed up and built one with some friends.\nRole: Co-Founder, Lead Developer. Technologies: React, Ionic, Firebase, Typescript, Swift, PostgreSQL Key facts: 10,000+ users, 8-person team, featured in the Bay Area almanac, praised by Asana’s head of developer relations for “open-source advocacy” MODAP A R\u0026amp;D team for fireline safety during emergency fires. Repository.\nMotivation: a friend approached me with an opportunity to help our local community, especially with the increased influx of fires.\nRole: Team Lead Technologies: Rust, Torch, ARM, electronics (i2C, UART, messaging protocols, etc.) Key facts: coordinated 5 engineers in developing new technology, supported by Dr. Robert G. Gann, Deputy Director, Center of Excellence for Advanced Technology Aerial Firefighting at the state of Colorado as well as Captain Mason of CalFire CMU batchalign A pipeline for the automated preparation of annotated CHAT transcripts from raw audio. Repository.\nMotivation: my work over the summer.\nRole: Author Technologies: Torch, Huggingface, NLTK, CLAN, computational linguistics Key facts: work developed with and maintained under Prof. Brian MacWhinney at CMU\u0026rsquo;s psycolinguistics department. AIBridge A bootcamp for non-CS students in data science. Website\nMotivation:\nRole: Co-Founder, Lecturer Technologies: Python, ScyPy, Scikit-learn, Pandas Key facts: worked with Prof. Xin Liu at UC Davis to develop an introductary one-week bootcamp in ML. We piloted the program this summer at Davis to an in-person group of 20 PhD students in food science sponsored by AIFS. Full-Stack Projects Simon Augmenting the functionality of large-language-models with Elastic. Repository.\nMotivation: LLMs have become more and more prominent, and frameworks like ReAct is finally mature enough to produce coherent, well reasoned responses.\nRole: Author Technologies: Huggingface, GPT-3.5, ElasticSearch tractotato CommonLisp macroset for time tracking. Repo.\nMotivation: I wanted to learn CommonLisp macros syntax after reading the Land of Lisp book.\nRole: author Technologies: CommonLisp Scratchathon Portal Portal to submit projects for a scratch hackathon I hosted. Repo.\nMotivation: my friends McGuy and fuelvin, both content creators on Scratch on YouTube, put together a Scratch hackathon summer of 2020. This is the submission portal.\nRole: author Technologies: React, Vercel, Firebase syzygy Library rethinking to-do list dating to be more flexible and powerful. Repo.\nMotivation: a friend and I wanted to innovate beyond the scope of Condution to see how we can abstract away a to-do list system to its bare minimum.\nRole: co-founder, co-author Technologies: Rust positron Library for building lightweight native apps using web tech. Repo.\nMotivation: I wanted to re-make electron to be more lightweight using Suckless\u0026rsquo; Surf browser concept.\nRole: author Technologies: C++, GTK OS/Driver Development Broadcom Wifi/Bluetooth 4377 Chip Linux Driver A driver patchset to support cutting-edge Broadcom 4377 Wifi/Bluetooth chips. Repo.\nMotivation: I needed to be able to use Wifi on my laptop while running Arch Linux.\nRole: author Technologies: C, (small amounts of) Assembly Key facts: integrated into the t2linux pipeline used to make WiFi possible on Linux for most MacBooks released after 2018 Distributed Algorithms and Parallel Computing coveather An encrypted, anonymized system for protected health information verification. Preprint, Repo, and internal note.\nMotivation: I wanted to be able to make vaccine passports more feasible because the current COVID testing/vaccine verification scheme is really bad.\nRole: author Technologies: Clojure, core.async concurrency, Monte-Carlo simulations, blockchain, PGP Key facts: project won first place at the California STEM Fair, and got special recognition from the Yale Science and Engineering assoc. Total award $3000. multischedule A multiple-asynchronous scheduling and delegation algorithm. Repo.\nMotivation: (didn\u0026rsquo;t even come close to getting there) I wanted to create a way to solve or simplify debugging loop overrun problems in robotics codebases.\nRole: author Technologies: Clojure, core.async concurrency rotifer A work-in-progress distributed algorithm for taproot. Repo.\nMotivation: I wanted to make taproot even more distributed if possible.\nRole: author Technologies: Clojure, XML, UDP, ICE simian Exploring OT/CRDT and collaborative text editing for taproot. Repo.\nMotivation: I wanted to learn about how apps like Google Docs work, and explore Operational Transformation/CRDT, in hopes of putting it into taproot.\nRole: author Technologies: Clojure, OT, CRDT aron A distributed multi-dimensional optimization tool. Repo.\nMotivation: Nueva\u0026rsquo;s course scheduling was quite a mess, and I wanted to help. It is a very complex problem and this project is in the freezer at the moment.\nRole: author Technologies: CommonLisp mitte Easy UDP sockets. Repo, Docs.\nMotivation: a friend and I wanted to explore UDP.\nRole: co-author Technologies: Rust, UDP, ICE (connection) Cryptography and security See also: coveather.\njrainbow An implementation of a MD5 rainbow table. Repo, Crate.\nMotivation: I wanted to understand how Rainbow Tables worked.\nRole: author Technologies: Rust, MD5 Note-taking Systems and \\(\\LaTeX\\) improvements taproot A shared zettlekasten of notes and learning resources put together by some friends and I. there has been a few iterations. Current Repo, Current Site, Legacy Site, Even More Legacy Site.\nMotivation: I started writing nice \\(\\LaTeX\\) PDFs of my homework, and some friends wanted to have access to it. Later when I mentioned it, another friend had a similar need; so we asked many people to pool our notes and work together to share.\nRole: co-founder, co-lead, developer Technologies: Next.JS, XeLaTeX, GNU Make, Firn, Hugo, Emacs Org, Org-Publish, Markdown blag The zettlekasten you are currently in! My currently maintained personal knowledgebase. Repo, Site.\nMotivation: I wanted to experiment with more advanced note-taking techniques after developing taproot, and it ended up superseeding the note-taking abilities of taproot.\nRole: author Technologies: Next.js, Emacs Org, Hugo gdoc.el A utility to enable GNU Emacs to edit Google Doc documents based on the gdrive utility. Repo.\nMotivation: I wanted to edit Google Docs in Emacs!\nRole: author Technologies: GNU Emacs, elisp interesting Things that my friends and I find interesting, chucked on the web and builds itself. Repo, Site. No longer maintained.\nMotivation: many text channels were too clogged with stuff my friend group found interesting, so I wanted to take initiative to collect them.\nRole: co-founder, author Technologies: Next.js, Vercel, remark, CommonMark Markdown Public Configurations borg Automatically configure terminals. Repo.\nMotivation: I needed a way to copy my system terminal config onto a system quickly.\nRole: author Technologies: Bash, Zsh, OhMyZsh .config A group of sane configuration files. Repo.\nMotivation: some Redditors asked for my Config, and I thought I\u0026rsquo;d share it to benefit the community; also for personal backup.\nRole: author, maintainer Technologies: Unix administration, Perl, Ruby, LISP .emacs.d Simple, powerful, and semantic GNU Emacs configuration for personal use. Repo.\nMotivation: I wanted to track my progress in developing a working Emacs config.\nRole: author, maintainer Technologies: GNU Emacs, elisp ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhprojects/\"\u003eProjects Index\u003c/a\u003e is a \u003ca href=\"/posts/kbhzettlekasten_index/\"\u003eindex\u003c/a\u003e that contains a list of almost all projects for which I have ever worked on. Major categories are highlighted from chapter titles.\u003c/p\u003e\n\u003ch2 id=\"research-projects\"\u003eResearch Projects\u003c/h2\u003e\n\u003cp\u003eI end up doing a lot of research these days, and so have isolated that to a different, academic homepage.\u003c/p\u003e\n\u003cp\u003eFor a list of my recent research, please head to the \u003ca href=\"/posts/kbhresearch_index/\"\u003eResearch Index\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"media-production-projects\"\u003eMedia Production Projects\u003c/h2\u003e\n\u003cp\u003eI produce a lot of media (videos, podcasts, blogs, live events/talks) as a part of publicizing my work or for other purposes. For those types of projects, head on over to \u003ca href=\"/posts/kbhproduction_index/\"\u003eProduction Index\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"large-scale-endeavors\"\u003eLarge-Scale Endeavors\u003c/h2\u003e\n\u003ch3 id=\"condution\"\u003eCondution\u003c/h3\u003e\n\u003cp\u003eAn open-source task management app. \u003ca href=\"https://www.condution.com/\"\u003eWebsite\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I got really tired with most other to-do apps after swapping them out over and over again, until I got fed up and built one with some friends.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: Co-Founder, Lead Developer.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: React, Ionic, Firebase, Typescript, Swift, PostgreSQL\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eKey facts\u003c/strong\u003e\u003c/strong\u003e: 10,000+ users, 8-person team, \u003ca href=\"https://www.almanacnews.com/print/story/2021/02/26/community-briefs\"\u003efeatured\u003c/a\u003e in the Bay Area almanac, praised by Asana’s head of developer relations for “open-source advocacy”\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"modap\"\u003eMODAP\u003c/h3\u003e\n\u003cp\u003eA R\u0026amp;D team for fireline safety during emergency fires. \u003ca href=\"https://github.com/MODAP/stack\"\u003eRepository\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: a friend approached me with an opportunity to help our local community, especially with the increased influx of fires.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: Team Lead\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Rust, Torch, ARM, electronics (i2C, UART, messaging protocols, etc.)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eKey facts\u003c/strong\u003e\u003c/strong\u003e: coordinated 5 engineers in developing new technology, supported by Dr. Robert G. Gann, Deputy Director, Center of Excellence for Advanced Technology Aerial Firefighting at the state of Colorado as well as Captain Mason of CalFire\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"cmu-batchalign--kbhbatchalign-dot-md\"\u003eCMU \u003ca href=\"/posts/kbhbatchalign/\"\u003ebatchalign\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eA pipeline for the automated preparation of annotated CHAT transcripts from raw audio. \u003ca href=\"https://github.com/talkbank/batchalign\"\u003eRepository\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: my work over the summer.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: Author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Torch, Huggingface, NLTK, CLAN, computational linguistics\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eKey facts\u003c/strong\u003e\u003c/strong\u003e: work developed with and maintained under Prof. Brian MacWhinney at CMU\u0026rsquo;s psycolinguistics department.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"aibridge\"\u003eAIBridge\u003c/h3\u003e\n\u003cp\u003eA bootcamp for non-CS students in data science. \u003ca href=\"/posts/kbhaibridge_course_website/\"\u003eWebsite\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: Co-Founder, Lecturer\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Python, ScyPy, Scikit-learn, Pandas\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eKey facts\u003c/strong\u003e\u003c/strong\u003e: worked with Prof. Xin Liu at UC Davis to develop an introductary one-week bootcamp in ML. We piloted the program this summer at Davis to an in-person group of 20 PhD students in food science sponsored by \u003ca href=\"/posts/kbhaifs/\"\u003eAIFS\u003c/a\u003e.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"full-stack-projects\"\u003eFull-Stack Projects\u003c/h2\u003e\n\u003ch3 id=\"simon\"\u003eSimon\u003c/h3\u003e\n\u003cp\u003eAugmenting the functionality of large-language-models with Elastic. \u003ca href=\"https://github.com/shabang-systems/simon\"\u003eRepository\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: LLMs have become more and more prominent, and frameworks like ReAct is finally mature enough to produce coherent, well reasoned responses.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: Author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Huggingface, GPT-3.5, ElasticSearch\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"tractotato\"\u003etractotato\u003c/h3\u003e\n\u003cp\u003eCommonLisp macroset for time tracking. \u003ca href=\"https://github.com/Jemoka/tractotato\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I wanted to learn CommonLisp macros syntax after reading the \u003ca href=\"http://landoflisp.com/\"\u003eLand of Lisp\u003c/a\u003e book.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: CommonLisp\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"scratchathon-portal\"\u003eScratchathon Portal\u003c/h3\u003e\n\u003cp\u003ePortal to submit projects for a scratch hackathon I hosted. \u003ca href=\"https://github.com/Jemoka/ScratchathonPortal\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: my friends \u003ca href=\"https://www.youtube.com/channel/UC2MtlTiLxWNQAjHyFZt95Vw\"\u003eMcGuy\u003c/a\u003e and \u003ca href=\"https://www.youtube.com/watch?v=1Fll6uaz5Kk\"\u003efuelvin\u003c/a\u003e, both content creators on Scratch on YouTube, put together a Scratch hackathon summer of 2020. This is the submission portal.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: React, Vercel, Firebase\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"syzygy\"\u003esyzygy\u003c/h3\u003e\n\u003cp\u003eLibrary rethinking to-do list dating to be more flexible and powerful. \u003ca href=\"https://github.com/jklsnt/syzygy\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: a friend and I wanted to innovate beyond the scope of Condution to see how we can abstract away a to-do list system to its bare minimum.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: co-founder, co-author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Rust\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"positron\"\u003epositron\u003c/h3\u003e\n\u003cp\u003eLibrary for building lightweight native apps using web tech. \u003ca href=\"https://github.com/jklsnt/positron\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I wanted to re-make electron to be more lightweight using Suckless\u0026rsquo; Surf browser concept.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: C++, GTK\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"os-driver-development\"\u003eOS/Driver Development\u003c/h2\u003e\n\u003ch3 id=\"broadcom-wifi-bluetooth-4377-chip-linux-driver\"\u003eBroadcom Wifi/Bluetooth 4377 Chip Linux Driver\u003c/h3\u003e\n\u003cp\u003eA driver patchset to support cutting-edge Broadcom 4377 Wifi/Bluetooth chips. \u003ca href=\"https://github.com/Jemoka/linux-mbp-wifi\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I needed to be able to use Wifi on my laptop while running Arch Linux.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: C, (small amounts of) Assembly\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eKey facts\u003c/strong\u003e\u003c/strong\u003e: integrated into the \u003ca href=\"https://wiki.t2linux.org/\"\u003et2linux\u003c/a\u003e pipeline used to make WiFi possible on Linux for most MacBooks released after 2018\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"distributed-algorithms-and-parallel-computing\"\u003eDistributed Algorithms and Parallel Computing\u003c/h2\u003e\n\u003ch3 id=\"coveather\"\u003ecoveather\u003c/h3\u003e\n\u003cp\u003eAn encrypted, anonymized system for protected health information verification. \u003ca href=\"https://arxiv.org/abs/2205.02753\"\u003ePreprint\u003c/a\u003e, \u003ca href=\"https://github.com/Jemoka/coveather\"\u003eRepo\u003c/a\u003e, and \u003ca href=\"/posts/kbhcoveather/\"\u003einternal note\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I wanted to be able to make vaccine passports more feasible because the current COVID testing/vaccine verification scheme is really bad.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Clojure, \u003ccode\u003ecore.async\u003c/code\u003e concurrency, Monte-Carlo simulations, blockchain, PGP\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eKey facts\u003c/strong\u003e\u003c/strong\u003e: project won first place at the California STEM Fair, and got special recognition from the Yale Science and Engineering assoc. Total award $3000.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"multischedule\"\u003emultischedule\u003c/h3\u003e\n\u003cp\u003eA multiple-asynchronous scheduling and delegation algorithm. \u003ca href=\"https://github.com/Jemoka/multischedule\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: (didn\u0026rsquo;t even come close to getting there) I wanted to create a way to solve or simplify debugging loop overrun problems in robotics codebases.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Clojure, \u003ccode\u003ecore.async\u003c/code\u003e concurrency\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"rotifer\"\u003erotifer\u003c/h3\u003e\n\u003cp\u003eA work-in-progress distributed algorithm for \u003ca href=\"#taproot\"\u003etaproot\u003c/a\u003e. \u003ca href=\"https://github.com/jklsnt/rotifer\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I wanted to make taproot even more distributed if possible.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Clojure, XML, UDP, ICE\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"simian\"\u003esimian\u003c/h3\u003e\n\u003cp\u003eExploring OT/CRDT and collaborative text editing for taproot. \u003ca href=\"https://github.com/jklsnt/simian\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I wanted to learn about how apps like Google Docs work, and explore Operational Transformation/CRDT, in hopes of putting it into \u003ca href=\"#taproot\"\u003etaproot\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Clojure, OT, CRDT\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"aron\"\u003earon\u003c/h3\u003e\n\u003cp\u003eA distributed multi-dimensional optimization tool. \u003ca href=\"https://github.com/Jemoka/aron\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: \u003ca href=\"/posts/kbhnueva_courses_index/\"\u003eNueva\u003c/a\u003e\u0026rsquo;s course scheduling was quite a mess, and I wanted to help. It is a very complex problem and this project is in the freezer at the moment.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: CommonLisp\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"mitte\"\u003emitte\u003c/h3\u003e\n\u003cp\u003eEasy UDP sockets. \u003ca href=\"https://github.com/jklsnt/mitte\"\u003eRepo\u003c/a\u003e, \u003ca href=\"https://jklsnt.github.io/mitte/mitte/\"\u003eDocs\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: a friend and I wanted to explore UDP.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: co-author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Rust, UDP, ICE (connection)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"cryptography-and-security\"\u003eCryptography and security\u003c/h2\u003e\n\u003cp\u003eSee also: \u003ca href=\"#coveather\"\u003ecoveather\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"jrainbow\"\u003ejrainbow\u003c/h3\u003e\n\u003cp\u003eAn implementation of a MD5 rainbow table. \u003ca href=\"https://github.com/Jemoka/rainbow\"\u003eRepo\u003c/a\u003e, \u003ca href=\"https://crates.io/crates/jrainbow\"\u003eCrate\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I wanted to understand how Rainbow Tables worked.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Rust, MD5\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"note-taking-systems-and-latex-improvements\"\u003eNote-taking Systems and \\(\\LaTeX\\) improvements\u003c/h2\u003e\n\u003ch3 id=\"taproot\"\u003etaproot\u003c/h3\u003e\n\u003cp\u003eA shared \u003ca href=\"/posts/kbhzettlekasten/\"\u003ezettlekasten\u003c/a\u003e of notes and learning resources put together by some friends and I. there has been a few iterations. \u003ca href=\"https://github.com/jklsnt/taproot3\"\u003eCurrent Repo\u003c/a\u003e, \u003ca href=\"https://taproot3.jklsnt.com/\"\u003eCurrent Site\u003c/a\u003e, \u003ca href=\"https://taproot2.shabang.cf/\"\u003eLegacy Site\u003c/a\u003e, \u003ca href=\"https://taproot.shabang.cf/\"\u003eEven More Legacy Site\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I started writing nice \\(\\LaTeX\\) PDFs of my homework, and some friends wanted to have access to it. Later when I mentioned it, another friend had a similar need; so we asked many people to pool our notes and work together to share.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: co-founder, co-lead, developer\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Next.JS, XeLaTeX, GNU Make, Firn, Hugo, Emacs Org, Org-Publish, Markdown\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"blag\"\u003eblag\u003c/h3\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhzettlekasten/\"\u003ezettlekasten\u003c/a\u003e you are currently in! My currently maintained personal knowledgebase. \u003ca href=\"https://github.com/jemoka/blag\"\u003eRepo\u003c/a\u003e, \u003ca href=\"https://www.jemoka.com/\"\u003eSite\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I wanted to experiment with more advanced note-taking techniques after developing taproot, and it ended up superseeding the note-taking abilities of taproot.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Next.js, Emacs Org, Hugo\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"gdoc-dot-el\"\u003egdoc.el\u003c/h3\u003e\n\u003cp\u003eA utility to enable GNU Emacs to edit Google Doc documents based on the \u003ccode\u003egdrive\u003c/code\u003e utility. \u003ca href=\"https://github.com/Jemoka/gdoc.el\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I wanted to edit Google Docs in Emacs!\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: GNU Emacs, elisp\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"interesting\"\u003einteresting\u003c/h3\u003e\n\u003cp\u003eThings that my friends and I find interesting, chucked on the web and builds itself. \u003ca href=\"https://github.com/Jemoka/interesting\"\u003eRepo\u003c/a\u003e, \u003ca href=\"https://interesting-blue.vercel.app/\"\u003eSite\u003c/a\u003e. No longer maintained.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: many text channels were too clogged with stuff my friend group found interesting, so I wanted to take initiative to collect them.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: co-founder, author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Next.js, Vercel, remark, CommonMark Markdown\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"public-configurations\"\u003ePublic Configurations\u003c/h2\u003e\n\u003ch3 id=\"borg\"\u003eborg\u003c/h3\u003e\n\u003cp\u003eAutomatically configure terminals. \u003ca href=\"https://github.com/Jemoka/Borg\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I needed a way to copy my system terminal config onto a system quickly.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Bash, Zsh, OhMyZsh\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"dot-config\"\u003e.config\u003c/h3\u003e\n\u003cp\u003eA group of sane configuration files. \u003ca href=\"https://github.com/Jemoka/.config\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: some Redditors asked for my Config, and I thought I\u0026rsquo;d share it to benefit the community; also for personal backup.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author, maintainer\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: Unix administration, Perl, Ruby, LISP\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"dot-emacs-dot-d\"\u003e.emacs.d\u003c/h3\u003e\n\u003cp\u003eSimple, powerful, and semantic GNU Emacs configuration for personal use. \u003ca href=\"https://github.com/Jemoka/.emacs.d\"\u003eRepo\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eMotivation\u003c/strong\u003e\u003c/strong\u003e: I wanted to track my progress in developing a working Emacs config.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eRole\u003c/strong\u003e\u003c/strong\u003e: author, maintainer\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eTechnologies\u003c/strong\u003e\u003c/strong\u003e: GNU Emacs, elisp\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprojects/","tags":["index"],"title":"Projects Index"},{"categories":null,"contents":"a type of cell\n","html":"\u003cp\u003ea type of \u003ca href=\"/posts/kbhcell/\"\u003ecell\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprokateotic_cell/","tags":null,"title":"prokateotic cell"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhproof/","tags":null,"title":"proof"},{"categories":null,"contents":"A proof structure that uses induction.\nbase case Prove some base case \\(n_0\\)\ninductive step Prove that, given \\(n\\), \\(n_{j} \\implies n_{j+1}\\).\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhproof/\"\u003eproof\u003c/a\u003e structure that uses \u003ca href=\"/posts/kbhprinciple_of_induction/\"\u003einduction\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"base-case\"\u003ebase case\u003c/h2\u003e\n\u003cp\u003eProve some base case \\(n_0\\)\u003c/p\u003e\n\u003ch2 id=\"inductive-step\"\u003einductive step\u003c/h2\u003e\n\u003cp\u003eProve that, given \\(n\\), \\(n_{j} \\implies n_{j+1}\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproof_by_induction/","tags":null,"title":"proof by induction"},{"categories":null,"contents":"Based on the wise words of a crab, I will start writing down some Proof Design Patterns I saw over Axler.\ninheriting properties (splitting, doing, merging) \u0026ldquo;complex numbers inherit commutativity via real numbers\u0026rdquo;\nconstruct then generalize for uniqueness and existence\ntry to remember to go backwards\nto prove IFF\nzero is cool, and here too!, also \\(1-1=0\\)\n\\(0v = 0\\) \\(1-1 = 0\\) \\(v-v=0\\) a.k.a. \\(v+(-v)=0\\) \\(v+0 = v\\) distributivity is epic: it is essentially the only tool to connect scalar multiplication and addition in a vector space\n\u0026ldquo;smallest\u0026rdquo; double containement proofs to show set equivalence: prove one way, then prove the converse (\\(a \\subset b, b\\subset a \\Rightarrow a=b\\))\ncouple hints\nstep 1: identify hypothesis (assumptions) desired conclusion (results, trying/to/proof) step 2: define write down precise, mathematical notations ","html":"\u003cp\u003eBased on the wise words of a crab, I will start writing down some \u003ca href=\"/posts/kbhproof_design_patterns/\"\u003eProof Design Patterns\u003c/a\u003e I saw over \u003ca href=\"/posts/kbhlinear_algebra_index/\"\u003eAxler\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcomplex_number/#insights-combining-and-splitting\"\u003einheriting properties (splitting, doing, merging)\u003c/a\u003e \u0026ldquo;complex numbers inherit \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e via \u003ca href=\"/posts/kbhreal_number/\"\u003ereal number\u003c/a\u003es\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcomplex_number/#insights-construct-then-generalize\"\u003econstruct then generalize\u003c/a\u003e for uniqueness and existence\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcomplex_number/#insights-try-to-remember-to-go-backwards\"\u003etry to remember to go backwards\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhequivalence/\"\u003eto prove IFF\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhadditive_inverse_is_unique_in_a_vector_space/\"\u003ezero is cool\u003c/a\u003e, \u003ca href=\"/posts/kbhzero_times_vector/\"\u003eand here too!\u003c/a\u003e, also \\(1-1=0\\)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(0v = 0\\)\u003c/li\u003e\n\u003cli\u003e\\(1-1 = 0\\)\u003c/li\u003e\n\u003cli\u003e\\(v-v=0\\) a.k.a. \\(v+(-v)=0\\)\u003c/li\u003e\n\u003cli\u003e\\(v+0 = v\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e is epic: it is essentially the only tool to connect scalar multiplication and addition in a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhsum_of_subsets/#sum-of-subspaces-is-the-smallest-subspace-with-both-subspaces\"\u003e\u0026ldquo;smallest\u0026rdquo; double containement proofs\u003c/a\u003e to show set \u003ca href=\"/posts/kbhequivalence/\"\u003eequivalence\u003c/a\u003e: prove one way, then prove the converse (\\(a \\subset b, b\\subset a \\Rightarrow a=b\\))\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecouple hints\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003estep 1: identify\n\u003cul\u003e\n\u003cli\u003ehypothesis (assumptions)\u003c/li\u003e\n\u003cli\u003edesired conclusion (results, trying/to/proof)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003estep 2: define\n\u003cul\u003e\n\u003cli\u003ewrite down precise, mathematical notations\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproof_design_patterns-1/","tags":null,"title":"Proof Design Patterns"},{"categories":null,"contents":"Based on the wise words of a crab, I will start writing down some Proof Design Patterns I saw over Axler.\ninheriting properties (splitting, doing, merging) \u0026ldquo;complex numbers inherit commutativity via real numbers\u0026rdquo;\nconstruct then generalize for uniqueness and existence\ntry to remember to go backwards\nto prove IFF\nzero is cool, and here too!, also \\(1-1=0\\)\n\\(0v = 0\\) \\(1-1 = 0\\) \\(v-v=0\\) a.k.a. \\(v+(-v)=0\\) \\(v+0 = v\\) distributivity is epic: it is essentially the only tool to connect scalar multiplication and addition in a vector space\n\u0026ldquo;smallest\u0026rdquo; double containement proofs to show set equivalence: prove one way, then prove the converse (\\(a \\subset b, b\\subset a \\Rightarrow a=b\\))\ncouple hints\nstep 1: identify hypothesis (assumptions) desired conclusion (results, trying/to/proof) step 2: define write down precise, mathematical notations proving uniqueness: set up two distinct results, show that they are the same\nproving negation: if the \u0026ldquo;negative\u0026rdquo; is distinct, but the direct case is more nebulous, use proves by contradiction\nproof by induction\nespecially if you are dealing with polynomials, try factoring tools to help includes length of linearly-independent list \\(\\leq\\) length of spanning list Uniqueness by construction: uniqueness part of basis of domain\npick one element that does exist pick arbitrary elements and construct a result if we are trying to prove equivalence, double-containment is a good bet\nsee fundamental theorem of linear maps: but basically wehnever you need to construct basis of things start with an arbiturary basis of the subspace and expand into that of the whole space\na loop in the statements makes them all equivalent\nworking with the square of the norm is often easier\n","html":"\u003cp\u003eBased on the wise words of a crab, I will start writing down some \u003ca href=\"/posts/kbhproof_design_patterns/\"\u003eProof Design Patterns\u003c/a\u003e I saw over \u003ca href=\"/posts/kbhlinear_algebra_index/\"\u003eAxler\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcomplex_number/#insights-combining-and-splitting\"\u003einheriting properties (splitting, doing, merging)\u003c/a\u003e \u0026ldquo;complex numbers inherit \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e via \u003ca href=\"/posts/kbhreal_number/\"\u003ereal number\u003c/a\u003es\u0026rdquo;\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcomplex_number/#insights-construct-then-generalize\"\u003econstruct then generalize\u003c/a\u003e for uniqueness and existence\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcomplex_number/#insights-try-to-remember-to-go-backwards\"\u003etry to remember to go backwards\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhequivalence/\"\u003eto prove IFF\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhadditive_inverse_is_unique_in_a_vector_space/\"\u003ezero is cool\u003c/a\u003e, \u003ca href=\"/posts/kbhzero_times_vector/\"\u003eand here too!\u003c/a\u003e, also \\(1-1=0\\)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(0v = 0\\)\u003c/li\u003e\n\u003cli\u003e\\(1-1 = 0\\)\u003c/li\u003e\n\u003cli\u003e\\(v-v=0\\) a.k.a. \\(v+(-v)=0\\)\u003c/li\u003e\n\u003cli\u003e\\(v+0 = v\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e is epic: it is essentially the only tool to connect scalar multiplication and addition in a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhsum_of_subsets/#sum-of-subspaces-is-the-smallest-subspace-with-both-subspaces\"\u003e\u0026ldquo;smallest\u0026rdquo; double containement proofs\u003c/a\u003e to show set \u003ca href=\"/posts/kbhequivalence/\"\u003eequivalence\u003c/a\u003e: prove one way, then prove the converse (\\(a \\subset b, b\\subset a \\Rightarrow a=b\\))\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecouple hints\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003estep 1: identify\n\u003cul\u003e\n\u003cli\u003ehypothesis (assumptions)\u003c/li\u003e\n\u003cli\u003edesired conclusion (results, trying/to/proof)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003estep 2: define\n\u003cul\u003e\n\u003cli\u003ewrite down precise, mathematical notations\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eproving uniqueness: set up two distinct results, show that they are the same\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eproving negation: if the \u0026ldquo;negative\u0026rdquo; is distinct, but the direct case is more nebulous, use proves by contradiction\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhproof_by_induction/\"\u003eproof by induction\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eespecially if you are dealing with polynomials, try factoring\u003c/li\u003e\n\u003cli\u003etools to help includes \u003ca href=\"/posts/kbhlinear_independence/#length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003elength of linearly-independent list \\(\\leq\\) length of spanning list\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eUniqueness by construction: uniqueness part of \u003ca href=\"/posts/kbhbasis_of_domain/\"\u003ebasis of domain\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003epick one element that does exist\u003c/li\u003e\n\u003cli\u003epick arbitrary elements and construct a result\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eif we are trying to prove equivalence, double-containment is a good bet\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhfundamental_theorem_of_linear_maps/\"\u003efundamental theorem of linear maps\u003c/a\u003e: but basically wehnever you need to construct \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of things start with an arbiturary \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of the subspace and expand into that of the whole space\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhoperator/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-is-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-in-id-4ed27ed5-4edc-4ef4-afd7-9b8e3bcd9b96-finite-dimensional-id-36e84a46-76f1-481e-b031-8ab2f0da0aa8-operator-s\"\u003ea loop in the statements makes them all equivalent\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhnorm/#properties-of-the-norm\"\u003eworking with the square of the norm is often easier\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhproof_design_patterns/","tags":null,"title":"Proof Design Patterns"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhproof_of_work/","tags":null,"title":"proof of work"},{"categories":null,"contents":"propaganda is a form of advertising which:\npropaganda persuades people into believe in a cause often defies reason to reach into ?? See examples:\nUS WWII Propaganda techniques for propaganda Name calling Generalities Transferring of authority Public testimonial Attachment to plane folks Bandwagoning (FOMO) Fear Bad logic Unwanted extrapolation ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpropaganda/\"\u003epropaganda\u003c/a\u003e is a form of \u003ca href=\"/posts/kbhadvertising/\"\u003eadvertising\u003c/a\u003e which:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpropaganda/\"\u003epropaganda\u003c/a\u003e persuades people into believe in a cause\u003c/li\u003e\n\u003cli\u003eoften defies reason to reach into ??\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSee examples:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhus_wwii_propaganda/\"\u003eUS WWII Propaganda\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"techniques-for-propaganda\"\u003etechniques for propaganda\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eName calling\u003c/li\u003e\n\u003cli\u003eGeneralities\u003c/li\u003e\n\u003cli\u003eTransferring of authority\u003c/li\u003e\n\u003cli\u003ePublic testimonial\u003c/li\u003e\n\u003cli\u003eAttachment to plane folks\u003c/li\u003e\n\u003cli\u003eBandwagoning (FOMO)\u003c/li\u003e\n\u003cli\u003eFear\u003c/li\u003e\n\u003cli\u003eBad logic\u003c/li\u003e\n\u003cli\u003eUnwanted extrapolation\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpropaganda/","tags":null,"title":"propaganda"},{"categories":null,"contents":"protease helps viruses replication\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhprotease/\"\u003eprotease\u003c/a\u003e helps viruses replication\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprotease/","tags":null,"title":"protease"},{"categories":null,"contents":"protected groups are features that one shouldn\u0026rsquo;t use: as in, these cannot be used:\nrace color national origin religion age sex and gender sexual orientation physical or mental disability reprisal (grudges) ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhprotected_group/\"\u003eprotected group\u003c/a\u003es are features that one shouldn\u0026rsquo;t use: as in, these cannot be used:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003erace\u003c/li\u003e\n\u003cli\u003ecolor\u003c/li\u003e\n\u003cli\u003enational origin\u003c/li\u003e\n\u003cli\u003ereligion\u003c/li\u003e\n\u003cli\u003eage\u003c/li\u003e\n\u003cli\u003esex and gender\u003c/li\u003e\n\u003cli\u003esexual orientation\u003c/li\u003e\n\u003cli\u003ephysical or mental disability\u003c/li\u003e\n\u003cli\u003ereprisal (grudges)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprotected_group/","tags":null,"title":"protected group"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhprotons/","tags":null,"title":"proton"},{"categories":null,"contents":"the fast you are willing to prototype, the more willing you are to fail, the faster you will get to a successful partial solution you can refine and repeat.\nhow to prototype faster? In order of decreasing slowness\u0026mdash;-\nbuild out the whole product\u0026hellip; building the minimum viable product\u0026hellip; skeleton prototyping (Figma)\u0026hellip; Pen and paper\u0026hellip; Talking about it The trade-off: each level gives increased fidelity: its closer to what will actually ship, so you can get better+detailed feedback.\n","html":"\u003cp\u003ethe fast you are willing to \u003ca href=\"/posts/kbhprototyping/\"\u003eprototype\u003c/a\u003e, the more willing you are to fail, the faster you will get to a successful partial solution you can refine and repeat.\u003c/p\u003e\n\u003ch2 id=\"how-to-prototype-faster\"\u003ehow to prototype faster?\u003c/h2\u003e\n\u003cp\u003eIn order of decreasing slowness\u0026mdash;-\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ebuild out the whole product\u0026hellip;\u003c/li\u003e\n\u003cli\u003ebuilding the minimum viable product\u0026hellip;\u003c/li\u003e\n\u003cli\u003eskeleton prototyping (Figma)\u0026hellip;\u003c/li\u003e\n\u003cli\u003ePen and paper\u0026hellip;\u003c/li\u003e\n\u003cli\u003eTalking about it\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe trade-off: each level gives increased \u003cem\u003efidelity\u003c/em\u003e: its closer to what will actually ship, so you can get better+detailed feedback.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhprototyping/","tags":null,"title":"Prototyping"},{"categories":null,"contents":"A workshop hosted by PSC about Spark.\nContents:\nBig Data ","html":"\u003cp\u003eA workshop hosted by \u003ca href=\"/posts/kbhpsc/\"\u003ePSC\u003c/a\u003e about \u003ca href=\"/posts/kbhspark/\"\u003eSpark\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eContents:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbig_data/\"\u003eBig Data\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpsc_big_data_workshop_july_2023/","tags":null,"title":"PSC Big Data Workshop July 2023 Index"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsu_math53_pset_1/","tags":null,"title":"PSet 1"},{"categories":null,"contents":"Chapter 3 Problem 3.10 Part a Notably, the slope field is symmetric across the \\(y\\) axis, and repeats with every \\(m\\pi\\) interval about the line \\(\\frac{\\pi}{4}\\).\nPart b We have a stationary value at \\(y = \\frac{\\pi}{4}\\). Beyond that, as initial \\(x\u0026gt;0, y\u0026lt;\\frac{\\pi}{4}\\), solutions will all trend towards \\(y=\\frac{\\pi}{4}\\) as \\(t \\to \\infty\\), because the derivative is positive for that entire region. For \\(x\u0026gt;0, \\frac{\\pi}{2}\u0026gt;y\u0026gt; \\frac{\\pi}{4}\\), the function will also trend towards \\(\\frac{\\pi}{4}\\), as the slope is negative for that entire region. This pattern repeats for all \\(y_0+m\\pi\\). That is, for instance, for \\(y\\) between \\(m\\pi+\\frac{\\pi}{4} \u0026lt; y \u0026lt; m\\pi + \\frac{\\pi}{2}\\), \\(y\\) will trend towards \\(m\\pi + \\frac{\\pi}{4}\\). For initial \\(t\u0026lt;0, y \u0026lt; \\frac{\\pi}{4}\\), most solutions will trend towards \\(-\\infty\\) as the region has negative slope. Yet, as \\(t_0 \\approx 0\\), the function will never hit the singularity point of \\(y = -\\frac{\\pi}{2}\\) before traveling to the \\(t\u0026gt;0\\) side, resulting in it trending towards \\(+\\infty\\). Finally, for initial \\(y\u0026gt;\\frac{\\pi}{4}, t\u0026lt;0\\), the function will reach \\(+\\infty\\) because it will hit the positive singularity at \\(\\frac{\\pi}{2}\\).\nChapter 4 Problem 4.1, part a Problem 4.2 Part a Part b Problem 4.3 Part a Part b Problem 4.7 Part a We have:\n\\begin{equation} \\dv{V}{t} = rV \\ln \\qty(\\frac{K}{V}) \\end{equation}\nWe see that when \\(V=K\\), we have \\(\\ln(1) = 0\\) on the right hand side, meaning \\(K\\) is a stationary value.\nNow, we desire that this value is unique in the positive half-line; so, for \\(V \u0026gt; 0\\), we have \\(0 = rV \\ln (\\frac{K}{V})\\), and we desire \\(V=K\\) exactly. Note that \\(V=0\\) would not work, because \\(V\u0026gt;0\\). Therefore, we now have:\n\\begin{equation} \\ln (\\frac{K}{V}) = 0 \\end{equation}\nmeaning \\(\\frac{K}{V} = 1\\). Finally, we have \\(V=K\\), as desired.\nImportantly, note now that:\n\\begin{align} \\dv V rV\\ln (\\frac{K}{V}) \u0026amp;= r(\\ln \\qty(\\frac{K}{V}) + V \\qty(\\frac{V}{K}\\cdot\\qty(K(-1)V^{2})) \\\\ \u0026amp;= r \\qty(\\ln\\qty(\\frac{K}{V}) - V\\qty(\\frac{1}{V})) \\\\ \u0026amp;= r\\qty(\\ln \\qty(\\frac{K}{V}) -1) \\end{align}\nNow, we see at \\(V=K\\), this expression yields \\(r(0-1)\\), meaning \\(-r\\). As we have \\(r\u0026gt;0\\) given in the problem, we see that the stationary value is stable.\nPart b We again have:\n\\begin{equation} \\dv{V}{t} = rV\\ln \\qty(\\frac{K}{V}) \\end{equation}\nthat is:\n\\begin{equation} \\dv{V}{t} = -r V \\ln \\qty( \\frac{V}{K}) \\end{equation}\nTaking an integral on both sides using the division method:\n\\begin{equation} \\int \\frac{1}{V\\ln \\qty(\\frac{V}{K})} \\dd{V} = -\\int r \\dd{t} \\end{equation}\nNow, let us treat:\n\\begin{equation} u = \\ln \\qty(\\frac{V}{K}) \\end{equation}\nwe note that \\(\\dd{u} = \\frac{1}{V} \\dd{V}\\).\nHence:\n\\begin{equation} \\int \\frac{1}{u} \\dd{u} = -rt +C \\end{equation}\ntherefore:\n\\begin{equation} \\ln (u) = -rt+C = \\ln \\qty(\\ln \\qty(\\frac{V}{K})) \\end{equation}\nNow, this means that:\n\\begin{equation} \\ln \\qty(\\frac{V}{K}) = Ce^{-rt} \\end{equation}\nPlugging in our initial conditions at \\(t=0\\), we have:\n\\begin{equation} \\ln \\qty(\\frac{V_0}{K}) = C \\end{equation}\nSubstituting that in, we have:\n\\begin{align} \\ln \\qty(\\frac{V}{K}) \u0026amp;= \\ln \\qty(\\frac{V_0}{K})e^{-rt} \\\\ \u0026amp;= \\ln \\qty(\\qty(\\frac{V_0}{K})^{e^{-rt}}) \\end{align}\nFinally, then, we see that:\n\\begin{equation} \\frac{V}{K} = \\qty(\\frac{V_0}{K})^{e^{-rt}} \\end{equation}\nwhich means:\n\\begin{equation} V = K\\qty(\\frac{V_0}{K})^{e^{-rt}} \\end{equation}\nas desired.\nPart c We want to perform the inverse operation of the previous question.\n\\begin{equation} V(t) = K\\qty(\\frac{V_0}{K})e^{-rt} \\end{equation}\nNow, that means that:\n\\begin{align} V\u0026rsquo;(t) \u0026amp; = K \\qty( \\frac{V_0}{K})^{e^{-rt}} \\ln \\qty(\\frac{V_0}{K}) \\qty(e^{-rt}(-r)) \\\\ \u0026amp;= -rk \\qty(\\frac{V_0}{K})^{e^{-rt}} \\ln \\qty(\\frac{V_0}{K}) e^{-rt} \\\\ \u0026amp;= -r v(t) \\ln \\qty(\\qty(\\frac{V_0}{K})^{e^{-rt}}) \\\\ \u0026amp;= -r v(t) \\qty(\\frac{V}{K}) = r V(\\frac{K}{V}) \\end{align}\nas desired\nPart d Problem 4.10 Part a When the right side of the ODE \u0026ldquo;vanishes\u0026rdquo;, we have:-\n\\begin{equation} ax \\qty(1- \\frac{x}{b}) - \\frac{x^{2}}{1+x^{2}} = 0 \\end{equation}\nwhich means:\n\\begin{equation} x\\qty(a \\qty(1- \\frac{x}{b}) - \\frac{x}{1+x^{2}}) = 0 \\end{equation}\nNow, we have that \\(x = c \u0026gt; 0\\), meaning \\(x\\neq 0\\). Hence, for the top to hold, we have:\n\\begin{equation} a \\qty(1- \\frac{x}{b}) - \\frac{x}{1+x^{2}} = 0 \\end{equation}\nMeaning:\n\\begin{equation} a \\qty(1- \\frac{x}{b}) = \\frac{x}{1+x^{2}} \\end{equation}\nthat is, the graphs of \\(a \\qty(1- \\frac{x}{b})\\) and \\(\\frac{x}{1+x^{2}}\\) intersect, as desired.\nPart b We know that solutions to the expression given in part a), that\n\\begin{equation} a \\qty(1- \\frac{x}{b}) = \\frac{x}{1+x^{2}} \\end{equation}\nare the only locations where positive stationary values exists. Visually, if \\(a(1-\\frac{x}{b})\\) is below the location at \\(x = \\sqrt{3}\\), and the function increases as \\(x\\) decreases, we know that the function is bound to intersect at one point only with the curve of \\(\\frac{x}{1+x^{2}}\\) given which is concave down at values \\(x\u0026lt;\\sqrt{3}\\). We know this intersection point is positive because at \\(x=0\\), which is where the visualized graph of \\(\\frac{x}{(1+x^{2}}\\) changes signs, we have \\(a(1-\\frac{x}{b}) = a\\), and we have \\(a \u0026gt; 0\\).\nPart c If the functions cross, the left term will start out before crossing being large than the right term (because its cross \u0026ldquo;from above\u0026rdquo;, visually). This means that prior to the stationary point, the ODE\u0026rsquo;s right hand side is positive. After crossing, the situation in b) gives that the RHS of the ODE is negative now as the second term is given to be larger than the first term.\nThis means that the stationary point is an attractor (positive slope coming from below, negative slope coming from above), so it is stationary.\nChapter 5 Problem 5.2, Part b LHS:\n\\begin{equation} (2+3i)(4-i) = 8 - 2i + 12i +3 = 11 + 10i \\end{equation}\n\\begin{equation} (11+10i)(1-i) = 11 -11i+10i + 10 = 21 - i \\end{equation}\nRHS:\n\\begin{equation} (4-i)(1-i) = 4 -5i -1 = 3 - 5i \\end{equation}\n\\begin{equation} (2+3i)(3-5i) = 6 - 10i + 9i +15 = 21 -i \\end{equation}\nand finally:\n\\begin{equation} 21-i = 21-i \\end{equation}\nProblem 5.3, Part c \\begin{align} \\frac{1-11i}{1-i} \u0026amp;= \\frac{1-11i}{1-i}\\frac{1+i}{1+i} \\\\ \u0026amp;= \\frac{1+i-11i+11}{1+1} \\\\ \u0026amp;= \\frac{12 -10i}{2} \\\\ \u0026amp;= 6 - 5i \\end{align}\nProblem 5.5, Part c \\begin{align} \\qty | \\frac{8-i}{4+7i}| \u0026amp;= |8-i|/|4+7i| \\\\ \u0026amp;= \\sqrt{\\frac{64+1}{16+49}} \\\\ \u0026amp;= \\sqrt{ \\frac{65}{65}} \\\\ \u0026amp;= 1 \\end{align}\nProblem 5.6 Part c We have:\n\\begin{equation} (3+i)(2+i) = (5 + 5i) \\end{equation}\nWriting it in polar form gives:\n\\begin{equation} \\sqrt{50}e^{i \\arctan (1)} = \\sqrt{50}e^{i \\frac{\\pi}{4}} \\end{equation}\nNow, we have:\n\\begin{equation} (50)^{\\frac{1}{4}} e^{i \\frac{\\pi}{8}} \\end{equation}\nFinally, writing this out in rectangular gives:\n\\begin{equation} (50)^{\\frac{1}{4}} \\qty(\\cos \\qty(\\frac{\\pi}{8}) + i\\sin \\qty(\\frac{\\pi}{8})) \\end{equation}\nRecall now that:\n\\begin{equation} \\sin \\frac{x}{2} = \\sqrt{\\frac{1- \\cos x}{2}} \\end{equation}\nand\n\\begin{equation} \\cos \\frac{x}{2} = \\sqrt{\\frac{1+ \\cos x}{2}} \\end{equation}\nFinally, that:\n\\begin{equation} \\sin \\frac{\\pi}{4} = \\cos \\frac{\\pi}{4} = \\frac{\\sqrt{2}}{2} \\end{equation}\nNow, notice that:\n\\begin{equation} (50)^{\\frac{1}{4}} \\qty(\\cos \\qty(\\frac{\\pi}{2 \\cdot 4}) + i\\sin \\qty(\\frac{\\pi}{2 \\cdot 4})) \\end{equation}\nwhich is equal to, based on the identities above:\n\\begin{equation} (50)^{\\frac{1}{4}} \\qty(\\sqrt{\\frac{2+\\sqrt{2}}{4}} + i\\sqrt{\\frac{2-\\sqrt{2}}{4}}) \\end{equation}\nPart d We have:\n\\begin{equation} \\frac{2+2i}{i} \\end{equation}\nWriting the top and bottom separately in polar, we have:\n\\begin{equation} \\sqrt{8} e^{i \\arctan (1)} = \\sqrt{8} e^{i \\frac{\\pi}{4}} \\end{equation}\n\\begin{equation} e^{i \\frac{\\pi}{2}} \\end{equation}\nDividing the two expressions gives:\n\\begin{equation} \\sqrt{8} e^{i -\\frac{\\pi}{4}} \\end{equation}\nTaking the square root gives\n\\begin{equation} (8)^{\\frac{1}{4}}e^{i \\frac{-\\pi}{8}} \\end{equation}\nFinally, converting the expression back to polar is almost the same as in part C). Recall that:\nRecall now that:\n\\begin{equation} \\sin \\frac{x}{2} = \\sqrt{\\frac{1- \\cos x}{2}} \\end{equation}\nand\n\\begin{equation} \\cos \\frac{x}{2} = \\sqrt{\\frac{1+ \\cos x}{2}} \\end{equation}\nFinally, that:\n\\begin{equation} \\sin \\frac{\\pi}{4} = \\cos \\frac{\\pi}{4} = \\frac{\\sqrt{2}}{2} \\end{equation}\nNow, notice that:\n\\begin{equation} (8)^{\\frac{1}{4}} \\qty(\\cos \\qty(-\\frac{\\pi}{2 \\cdot 4}) + i\\sin \\qty(-\\frac{\\pi}{2 \\cdot 4})) \\end{equation}\nnote that \\(\\sin (-x) = -\\sin (x)\\), while \\(\\cos (-x) = \\cos (x)\\). This results in:\n\\begin{equation} (8)^{\\frac{1}{4}} \\qty(\\sqrt{\\frac{2+\\sqrt{2}}{4}} - i\\sqrt{\\frac{2-\\sqrt{2}}{4}}) \\end{equation}\n","html":"\u003ch2 id=\"chapter-3\"\u003eChapter 3\u003c/h2\u003e\n\u003ch3 id=\"problem-3-dot-10\"\u003eProblem 3.10\u003c/h3\u003e\n\u003ch4 id=\"part-a\"\u003ePart a\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-18_22-30-37_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eNotably, the slope field is symmetric across the \\(y\\) axis, and repeats with every \\(m\\pi\\) interval about the line \\(\\frac{\\pi}{4}\\).\u003c/p\u003e\n\u003ch4 id=\"part-b\"\u003ePart b\u003c/h4\u003e\n\u003cp\u003eWe have a stationary value at \\(y = \\frac{\\pi}{4}\\). Beyond that, as initial \\(x\u0026gt;0, y\u0026lt;\\frac{\\pi}{4}\\), solutions will all trend towards \\(y=\\frac{\\pi}{4}\\) as \\(t \\to \\infty\\), because the derivative is positive for that entire region. For \\(x\u0026gt;0, \\frac{\\pi}{2}\u0026gt;y\u0026gt; \\frac{\\pi}{4}\\), the function will also trend towards \\(\\frac{\\pi}{4}\\), as the slope is negative for that entire region. This pattern repeats for all \\(y_0+m\\pi\\). That is, for instance, for \\(y\\) between \\(m\\pi+\\frac{\\pi}{4} \u0026lt; y \u0026lt; m\\pi + \\frac{\\pi}{2}\\), \\(y\\) will trend towards \\(m\\pi + \\frac{\\pi}{4}\\). For initial \\(t\u0026lt;0, y \u0026lt; \\frac{\\pi}{4}\\), most solutions will trend towards \\(-\\infty\\) as the region has negative slope. Yet, as \\(t_0 \\approx 0\\), the function will never hit the singularity point of \\(y = -\\frac{\\pi}{2}\\) before traveling to the \\(t\u0026gt;0\\) side, resulting in it trending towards \\(+\\infty\\). Finally, for initial \\(y\u0026gt;\\frac{\\pi}{4}, t\u0026lt;0\\), the function will reach \\(+\\infty\\) because it will hit the positive singularity at \\(\\frac{\\pi}{2}\\).\u003c/p\u003e\n\u003ch2 id=\"chapter-4\"\u003eChapter 4\u003c/h2\u003e\n\u003ch3 id=\"problem-4-dot-1-part-a\"\u003eProblem 4.1, part a\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-18_22-31-16_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"problem-4-dot-2\"\u003eProblem 4.2\u003c/h3\u003e\n\u003ch4 id=\"part-a\"\u003ePart a\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-18_22-31-46_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch4 id=\"part-b\"\u003ePart b\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-18_22-32-01_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"problem-4-dot-3\"\u003eProblem 4.3\u003c/h3\u003e\n\u003ch4 id=\"part-a\"\u003ePart a\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-18_22-32-11_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch4 id=\"part-b\"\u003ePart b\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-18_22-34-13_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"problem-4-dot-7\"\u003eProblem 4.7\u003c/h3\u003e\n\u003ch4 id=\"part-a\"\u003ePart a\u003c/h4\u003e\n\u003cp\u003eWe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{V}{t} = rV \\ln \\qty(\\frac{K}{V})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe see that when \\(V=K\\), we have \\(\\ln(1) = 0\\) on the right hand side, meaning \\(K\\) is a stationary value.\u003c/p\u003e\n\u003cp\u003eNow, we desire that this value is unique in the positive half-line; so, for \\(V \u0026gt; 0\\), we have \\(0 = rV \\ln (\\frac{K}{V})\\), and we desire \\(V=K\\) exactly. Note that \\(V=0\\) would not work, because \\(V\u0026gt;0\\). Therefore, we now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ln (\\frac{K}{V}) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning \\(\\frac{K}{V} = 1\\). Finally, we have \\(V=K\\), as desired.\u003c/p\u003e\n\u003cp\u003eImportantly, note now that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\dv V rV\\ln (\\frac{K}{V}) \u0026amp;= r(\\ln \\qty(\\frac{K}{V}) + V \\qty(\\frac{V}{K}\\cdot\\qty(K(-1)V^{2})) \\\\\n\u0026amp;= r \\qty(\\ln\\qty(\\frac{K}{V}) - V\\qty(\\frac{1}{V})) \\\\\n\u0026amp;= r\\qty(\\ln \\qty(\\frac{K}{V}) -1)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, we see at \\(V=K\\), this expression yields \\(r(0-1)\\), meaning \\(-r\\). As we have \\(r\u0026gt;0\\) given in the problem, we see that the stationary value is stable.\u003c/p\u003e\n\u003ch4 id=\"part-b\"\u003ePart b\u003c/h4\u003e\n\u003cp\u003eWe again have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{V}{t} = rV\\ln \\qty(\\frac{K}{V})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{V}{t} = -r V \\ln \\qty( \\frac{V}{K})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaking an integral on both sides using the division method:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int \\frac{1}{V\\ln \\qty(\\frac{V}{K})} \\dd{V} = -\\int r \\dd{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, let us treat:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu = \\ln \\qty(\\frac{V}{K})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe note that \\(\\dd{u} = \\frac{1}{V} \\dd{V}\\).\u003c/p\u003e\n\u003cp\u003eHence:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int \\frac{1}{u} \\dd{u} = -rt +C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003etherefore:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ln (u) = -rt+C = \\ln \\qty(\\ln \\qty(\\frac{V}{K}))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, this means that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ln \\qty(\\frac{V}{K}) = Ce^{-rt}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ePlugging in our initial conditions at \\(t=0\\), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ln \\qty(\\frac{V_0}{K}) = C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSubstituting that in, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\ln \\qty(\\frac{V}{K}) \u0026amp;= \\ln \\qty(\\frac{V_0}{K})e^{-rt} \\\\\n\u0026amp;= \\ln \\qty(\\qty(\\frac{V_0}{K})^{e^{-rt}})\n\\end{align}\u003c/p\u003e\n\u003cp\u003eFinally, then, we see that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{V}{K} = \\qty(\\frac{V_0}{K})^{e^{-rt}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich means:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV = K\\qty(\\frac{V_0}{K})^{e^{-rt}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas desired.\u003c/p\u003e\n\u003ch4 id=\"part-c\"\u003ePart c\u003c/h4\u003e\n\u003cp\u003eWe want to perform the inverse operation of the previous question.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV(t) = K\\qty(\\frac{V_0}{K})e^{-rt}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, that means that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nV\u0026rsquo;(t) \u0026amp; = K \\qty( \\frac{V_0}{K})^{e^{-rt}} \\ln \\qty(\\frac{V_0}{K}) \\qty(e^{-rt}(-r)) \\\\\n\u0026amp;= -rk \\qty(\\frac{V_0}{K})^{e^{-rt}} \\ln \\qty(\\frac{V_0}{K}) e^{-rt} \\\\\n\u0026amp;= -r v(t) \\ln \\qty(\\qty(\\frac{V_0}{K})^{e^{-rt}}) \\\\\n\u0026amp;= -r v(t) \\qty(\\frac{V}{K}) = r V(\\frac{K}{V})\n\\end{align}\u003c/p\u003e\n\u003cp\u003eas desired\u003c/p\u003e\n\u003ch4 id=\"part-d\"\u003ePart d\u003c/h4\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-18_22-34-36_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"problem-4-dot-10\"\u003eProblem 4.10\u003c/h3\u003e\n\u003ch4 id=\"part-a\"\u003ePart a\u003c/h4\u003e\n\u003cp\u003eWhen the right side of the ODE \u0026ldquo;vanishes\u0026rdquo;, we have:-\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nax \\qty(1- \\frac{x}{b}) - \\frac{x^{2}}{1+x^{2}} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich means:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\\qty(a \\qty(1- \\frac{x}{b}) - \\frac{x}{1+x^{2}}) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, we have that \\(x = c \u0026gt; 0\\), meaning \\(x\\neq 0\\). Hence, for the top to hold, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na \\qty(1- \\frac{x}{b}) - \\frac{x}{1+x^{2}} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na \\qty(1- \\frac{x}{b}) = \\frac{x}{1+x^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat is, the graphs of \\(a \\qty(1- \\frac{x}{b})\\) and \\(\\frac{x}{1+x^{2}}\\) intersect, as desired.\u003c/p\u003e\n\u003ch4 id=\"part-b\"\u003ePart b\u003c/h4\u003e\n\u003cp\u003eWe know that solutions to the expression given in part a), that\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na \\qty(1- \\frac{x}{b}) = \\frac{x}{1+x^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eare the only locations where positive stationary values exists. Visually, if \\(a(1-\\frac{x}{b})\\) is below the location at \\(x = \\sqrt{3}\\), and the function increases as \\(x\\) decreases, we know that the function is bound to intersect at one point only with the curve of \\(\\frac{x}{1+x^{2}}\\) given which is concave down at values \\(x\u0026lt;\\sqrt{3}\\). We know this intersection point is positive because at \\(x=0\\), which is where the visualized graph of \\(\\frac{x}{(1+x^{2}}\\) changes signs, we have \\(a(1-\\frac{x}{b}) = a\\), and we have \\(a \u0026gt; 0\\).\u003c/p\u003e\n\u003ch4 id=\"part-c\"\u003ePart c\u003c/h4\u003e\n\u003cp\u003eIf the functions cross, the left term will start out before crossing being large than the right term (because its cross \u0026ldquo;from above\u0026rdquo;, visually). This means that prior to the stationary point, the ODE\u0026rsquo;s right hand side is positive. After crossing, the situation in b) gives that the RHS of the ODE is negative now as the second term is given to be larger than the first term.\u003c/p\u003e\n\u003cp\u003eThis means that the stationary point is an attractor (positive slope coming from below, negative slope coming from above), so it is stationary.\u003c/p\u003e\n\u003ch2 id=\"chapter-5\"\u003eChapter 5\u003c/h2\u003e\n\u003ch3 id=\"problem-5-dot-2-part-b\"\u003eProblem 5.2, Part b\u003c/h3\u003e\n\u003cp\u003eLHS:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(2+3i)(4-i) = 8 - 2i + 12i +3 = 11 + 10i\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(11+10i)(1-i) = 11 -11i+10i + 10 = 21 - i\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRHS:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(4-i)(1-i) = 4 -5i -1 = 3 - 5i\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(2+3i)(3-5i) = 6 - 10i + 9i +15 = 21 -i\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand finally:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n21-i = 21-i\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"problem-5-dot-3-part-c\"\u003eProblem 5.3, Part c\u003c/h3\u003e\n\u003cp\u003e\\begin{align}\n\\frac{1-11i}{1-i} \u0026amp;= \\frac{1-11i}{1-i}\\frac{1+i}{1+i} \\\\\n\u0026amp;= \\frac{1+i-11i+11}{1+1} \\\\\n\u0026amp;= \\frac{12 -10i}{2} \\\\\n\u0026amp;= 6 - 5i\n\\end{align}\u003c/p\u003e\n\u003ch3 id=\"problem-5-dot-5-part-c\"\u003eProblem 5.5, Part c\u003c/h3\u003e\n\u003cp\u003e\\begin{align}\n\\qty | \\frac{8-i}{4+7i}| \u0026amp;= |8-i|/|4+7i| \\\\\n\u0026amp;= \\sqrt{\\frac{64+1}{16+49}} \\\\\n\u0026amp;= \\sqrt{ \\frac{65}{65}} \\\\\n\u0026amp;= 1\n\\end{align}\u003c/p\u003e\n\u003ch3 id=\"problem-5-dot-6\"\u003eProblem 5.6\u003c/h3\u003e\n\u003ch4 id=\"part-c\"\u003ePart c\u003c/h4\u003e\n\u003cp\u003eWe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(3+i)(2+i) = (5 + 5i)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWriting it in polar form gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sqrt{50}e^{i \\arctan (1)} = \\sqrt{50}e^{i \\frac{\\pi}{4}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(50)^{\\frac{1}{4}} e^{i \\frac{\\pi}{8}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, writing this out in rectangular gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(50)^{\\frac{1}{4}} \\qty(\\cos \\qty(\\frac{\\pi}{8}) + i\\sin \\qty(\\frac{\\pi}{8}))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall now that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sin \\frac{x}{2} = \\sqrt{\\frac{1- \\cos x}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\cos \\frac{x}{2} = \\sqrt{\\frac{1+ \\cos x}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sin \\frac{\\pi}{4} = \\cos \\frac{\\pi}{4} = \\frac{\\sqrt{2}}{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, notice that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(50)^{\\frac{1}{4}} \\qty(\\cos \\qty(\\frac{\\pi}{2 \\cdot 4}) + i\\sin \\qty(\\frac{\\pi}{2 \\cdot 4}))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is equal to, based on the identities above:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(50)^{\\frac{1}{4}} \\qty(\\sqrt{\\frac{2+\\sqrt{2}}{4}} + i\\sqrt{\\frac{2-\\sqrt{2}}{4}})\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"part-d\"\u003ePart d\u003c/h4\u003e\n\u003cp\u003eWe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{2+2i}{i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWriting the top and bottom separately in polar, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sqrt{8} e^{i \\arctan (1)} = \\sqrt{8} e^{i \\frac{\\pi}{4}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{i \\frac{\\pi}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eDividing the two expressions gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sqrt{8} e^{i -\\frac{\\pi}{4}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaking the square root gives\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(8)^{\\frac{1}{4}}e^{i \\frac{-\\pi}{8}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, converting the expression back to polar is almost the same as in part C). Recall that:\u003c/p\u003e\n\u003cp\u003eRecall now that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sin \\frac{x}{2} = \\sqrt{\\frac{1- \\cos x}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\cos \\frac{x}{2} = \\sqrt{\\frac{1+ \\cos x}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sin \\frac{\\pi}{4} = \\cos \\frac{\\pi}{4} = \\frac{\\sqrt{2}}{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, notice that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(8)^{\\frac{1}{4}} \\qty(\\cos \\qty(-\\frac{\\pi}{2 \\cdot 4}) + i\\sin \\qty(-\\frac{\\pi}{2 \\cdot 4}))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enote that \\(\\sin (-x) = -\\sin (x)\\), while \\(\\cos (-x) = \\cos (x)\\). This results in:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(8)^{\\frac{1}{4}} \\qty(\\sqrt{\\frac{2+\\sqrt{2}}{4}} - i\\sqrt{\\frac{2-\\sqrt{2}}{4}})\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpset_2/","tags":null,"title":"PSet 2"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpset_3/","tags":null,"title":"PSet 3"},{"categories":null,"contents":"o\n","html":"\u003cp\u003eo\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpset_4/","tags":null,"title":"PSet 4"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpset_5/","tags":null,"title":"PSet 5"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpset_6/","tags":null,"title":"PSet 6"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpset_7/","tags":null,"title":"PSet 7"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpset_8/","tags":null,"title":"PSet 8"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhpset_9/","tags":null,"title":"PSet 9"},{"categories":null,"contents":"psychoacoustics is the study of sound perception and cognition\nhow does sound work how we perceive it why? and what are its applications? ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpsycoacoustics/\"\u003epsychoacoustics\u003c/a\u003e is the study of \u003ca href=\"/posts/kbhsound/\"\u003esound\u003c/a\u003e perception and cognition\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ehow does sound work\u003c/li\u003e\n\u003cli\u003ehow we perceive it\u003c/li\u003e\n\u003cli\u003ewhy? and what are its applications?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpsycoacoustics/","tags":null,"title":"psychoacoustics"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhptsd/","tags":null,"title":"PTSD"},{"categories":null,"contents":" titles should have proper noun\ncultural context, summary of the text, thesis\u0026mdash;highlight Kairos (American context)\nconclusion: what\u0026rsquo;s significant of this rhetoric?\naccess credibility\npeer review\npublication (editor and publisher should be named), is there a special interest group?\nauthor\u0026rsquo;s training and publicatino record\nquality of argument (reasoning + evidence, quality of works cited)\ncan you find some of the info else where in reputable sources?\ncite one additional source\nFor instance, talk about Karios specifically\n","html":"\u003col\u003e\n\u003cli\u003e\n\u003cp\u003etitles should have proper noun\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecultural context, summary of the text, thesis\u0026mdash;highlight Kairos (American context)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003econclusion: what\u0026rsquo;s significant of this rhetoric?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eaccess credibility\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003epeer review\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003epublication (editor and publisher should be named), is there a special interest group?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eauthor\u0026rsquo;s training and publicatino record\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003equality of argument (reasoning + evidence, quality of works cited)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecan you find some of the info else where in reputable sources?\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ecite one additional source\u003c/p\u003e\n\u003cp\u003eFor instance, talk about Karios specifically\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpwr_notes/","tags":null,"title":"PWR Notes"},{"categories":null,"contents":"Dual influence framework:\nrequires political involvement requires diverse media diet Proposal: based on feedback on TIC-focus on one case study and isolate it well\nQuotes Social media as a means of exposure to the modern world “Daniel Lerner (1958) saw mass media as the main catalyst for social change. Lerner argued that media exposed people who possess traditional values to the “modern” world, and that exposure in turn produced a desire to live in it.” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 467]\nCriticism of the argument of social-media driven modernity because of unequal access “Lerner’s arguments were expectedly later criticized. For some, they did not consider the fact that access to mass communication can be highly unequal in some countries in the global South. Work on Latin America, for example, showed that, in rural areas, media are often dominated by elites (Beltrán 1976)” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 467]\nIncreased exposure on SM results in increased support “relationship b between Internet use and levels of support for SSM” [is strong] [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 477)\nEconomic development is correlated with strong adoption of self expression “Equipped with reliable longitudinal data newly available, this scholarship demonstrates that there exists an association between levels of economic development and the adoption of “self-expression” values, such as support for gender equality and tolerance for homosexuality.” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 468]\nSocial contract theory indicates that people become more tolerant due to interaction, diminishing the power of \u0026ldquo;vicarious\u0026rdquo; communities\u0026quot; “Based on social contact theory, which suggests that individuals become more tolerant of groups as they interact with them, some scholars have shown that contact with “imagined” or “vicarious” communities that are diffused through mass media can have an effect on lowering prejudices and improving attitudes toward gay people (Riggle 1996; Schiappa, Gregg, and Hewes 2006).” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 468]\nPeople who \u0026ldquo;pay attention to news\u0026rdquo; + \u0026ldquo;use internet daily\u0026rdquo; more likely to support SSM “As depicted in Figure 3, the predicted probabilities of supporting SSM are consistent with our expectation that those who pay attention to the news and use the Internet daily are much more likely to support SSM. We believe this is because those who both pay attention to the news and use the Internet daily are likely to encounter news online that helps diffuse global attitudes about SSM” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 475]\nPeople who just consume internet are not exposed nearly as much to SSM “Meanwhile, for those that use the Internet often, but who do not pay attention to the news, the Internet is likely to be a source of entertainment or social interaction, which are not necessarily associated with encountering new information related to SSM. This interaction between Internet use and news consumption is also evident and significant when the data are disaggregated by year (see appendix).” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 475]\nincreased exposure and increased discussion normalizes attitudes\u0026mdash;use of word normalize, depolarize “The results lend credence to the argument, derived from social contact theory, that increased exposure to gays and lesbians as well as to discussions about homosexuality and the merits of SSM may have a normalizing effect on individuals’ attitudes. Research also shows that such interaction can take place through mass media (Berggren and Nilsson 2015)” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 478]\nssm debates preceded by 1) higher emotion and 2) few active engagements results in failed legalization “we find evidence of moral culture wars between ideologies and show that constituencies that express higher levels of emotion and have fewer actively engaged participants often precede legalization efforts that fail” [⁨Zhang⁩ and ⁨Counts⁩, 2015, p. 2603]\nsustained interest and overall engagement precede legalization success “We found that policies that passed had a greater percentage of people with sustained interest over time, had greater overall engagement levels, and had significantly higher levels of language related to fairness and openness before the decision. On the other hand, states with policies that failed had higher levels of anxiety, religion, and tentativeness. These findings align with previous research characterizing the same-sex marriage debate as a “culture war” [1], where proponents advocate for it in terms of fairness morality, while opponents argue against it in terms of religious morality.” [⁨Zhang⁩ and ⁨Counts⁩, 2015, p. 2610]\nmedia diversity is important, otherwise one side may drown out the other “if one message is able to drown out the other, it may allow said message to dictate the terms of the debate and change minds (and eventually laws) accordingly \u0026hellip; This inquiry argues that media coverage (in terms of both specific frames and the competition between said frames) of same-sex marriage and civil unions shapes levels of opposition to these policies.” [Johnson, 2012, p. 1056]\nparadigm shift happens through tipping the scale, not pushing it over (i.e. less effort) “Morality framing in-and-of itself did not seem to have as much power as equality framing when it came to changing minds between 2004 and 2011, but those interested in spreading a message of morality and rolling back same-sex marriage and civil unions efforts should realize that, should they tip the balance of the competition between these two frames back in their favor, levels of opposition have the potential to shift once more in a way that benefits their policy goals.” [Johnson, 2012, p. 1074]\nexpanding generics is a common attack surface in morality arguments “Experience has shown that it is not possible to debate the recognition of same-sex marriage without opponents making a normative connection between homosexuality and other forms of human relationships such as polygamy.” [Ball, p. 1900]\nlack of engegement \u0026ldquo;the lack of spirited political engagement \u0026hellip; Comparatively little is offered in support of gay and lesbian rights\u0026rdquo; (95)\ntolerance tolerance among adults toward homosexuals and toward people of different race, collected from the World Values Survey and European Values Study.31 Both cross-sectional and panel results (available on request) suggest that globalization does not generally affect these tolerance measures.32 In other words, increasing economic, social and political integration does not seem to influence the contemporary level of tolerance in the adult population,” [⁨Berggren⁩ and ⁨Nilsson⁩, 2015, p. 384]\npower Thus, participation in itself is an expression of some degree of (enabled) power [Dahlgren, 2016, p. 26]\nSub-claim Development Define polarization and state of play in SSM Yoink from TIC:\ndefine polarization motivate the study of polarization through extreme cases One salient case of depolarization in recent years is SSM. Debate transition from SSM into other intersectionality / rights (CITE). Direct measurements too: gallup\u0026mdash;Republican vs. Democracts gap closed on the topic.\nSSM offers a salient case study of what depolarization can look like. Having gone through the cycle, we are afforded post-hoc analysis of what worked and didn\u0026rsquo;t work.\nWe know what worked: ssm debates preceded by 1) higher emotion and 2) few active engagements results in failed legalization, whereas sustained interest and overall engagement precede legalization success\u0026mdash;tied to the depolarization.\nBut why? and specifically, why these things? How can we generalize this?\nA careful study of polarization helps inform why these things worked. We are going to do this from social media because (from the TIC social media is easy). The framework will show that [TiC thesis].\nAfter developing such a framework, we will develop this analysis on two cases:\nbriefly on trolls (this is polarizing), and SSM (this is depolarizing), which, given that\u0026rsquo;s what we\u0026rsquo;re after, we are going to exand and learn from to further investigate the motivating dynamics of the framework in context of SSM. \u0026lt;\u0026gt;all of the lit review Begin by developing the framework.\nyipeee\nSSM has all the parts that\u0026rsquo;s needed for this to work. [vocab note: homophilic is not the same thing as MSM. We use it in the network dynamics sense. also not talking about the rest of queer community, which has a much more complex/intersectional set of issues.]\nSo while trolls polarize, SSM is an example of successful polarization.\nRecall previous efforts: 1) high emotion 2) low engagement\u0026mdash;-highly homophilic interactions.\nThe depolarization of SSM follows an extremely similar pattern to the depolarization framework developed through studying social media in general: post-hoc studies highlights that depolarization of SSM is characterized by both increased exposure to the community on social media as well as increased active political engagement with the community. The close correspondence between SSM and the framework, therefore offers an opportunity to directly use the underlying motivations of SSM legalization as a study of the motivation of the framework in general.\nMedia needs to be diverse for SSM because otherwise it creates vicarious communities. SSM\u0026rsquo;s success tied to sustained, diverse interest largely through social media.\nSocial contract theory indicates that people become more tolerant due to interaction, diminishing the power of \u0026ldquo;vicarious\u0026rdquo; communities\u0026quot;. One such group: social media as a means of exposure SSM practice. Indeed, we notice this before overall support: Increased exposure on SM results in increased support.\nYet, these results are tempered: media diversity is important, otherwise one side may drown out the other and may even be counter-productive. This counter-productivity is created by as given by Novoa, generics\u0026mdash;whereby the counter-party will frame SSM along with other factors to criticize as a group. Therefore, true exposure can only happen if no one side drowns out the other; otherwise it will be vicarious \u0026ldquo;totem\u0026rdquo; effect.\nSSM debates cannot succeed with diverse media alone, it requires active engagement with the topic too SSM is an act of \u0026ldquo;self expression\u0026rdquo;, \u0026ldquo;expression\u0026rdquo; is an active verb of engagement. Active engagement doesn\u0026rsquo;t come from simply being on the internet, which is shown to not be a factor enough to expose to SSM. ⁨Díez⁩ and ⁨Dion⁩ frames this engagement as a process of normalization of these attitudes: use of word normalize is the crucial factor to depolarize.\nConclusion + Discussion Two-prong framework + evidence from SSM:\ndiverse media diet is needed to prevent totem creation + generics active engagement is needed to solidify normalization SSM is perhaps the first because a paradigm shift happens through tipping the scale, not pushing it over (i.e. less effort). So its the easiest to tip over.\n","html":"\u003cp\u003eDual influence framework:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003erequires political involvement\u003c/li\u003e\n\u003cli\u003erequires diverse media diet\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eProposal: based on feedback on TIC-focus on \u003cstrong\u003eone case study\u003c/strong\u003e and isolate it well\u003c/p\u003e\n\u003ch2 id=\"quotes\"\u003eQuotes\u003c/h2\u003e\n\u003ch3 id=\"social-media-as-a-means-of-exposure-to-the-modern-world\"\u003eSocial media as a means of exposure to the modern world\u003c/h3\u003e\n\u003cp\u003e“Daniel Lerner (1958) saw mass media as the main catalyst for social change. Lerner argued that media exposed people who possess traditional values to the “modern” world, and that exposure in turn produced a desire to live in it.” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 467]\u003c/p\u003e\n\u003ch3 id=\"criticism-of-the-argument-of-social-media-driven-modernity-because-of-unequal-access\"\u003eCriticism of the argument of social-media driven modernity because of unequal access\u003c/h3\u003e\n\u003cp\u003e“Lerner’s arguments were expectedly later criticized. For some, they did not consider the fact that access to mass communication can be highly unequal in some countries in the global South. Work on Latin America, for example, showed that, in rural areas, media are often dominated by elites (Beltrán 1976)” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 467]\u003c/p\u003e\n\u003ch3 id=\"increased-exposure-on-sm-results-in-increased-support\"\u003eIncreased exposure on SM results in increased support\u003c/h3\u003e\n\u003cp\u003e“relationship b between Internet use and levels of support for SSM” [is strong] [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 477)\u003c/p\u003e\n\u003ch3 id=\"economic-development-is-correlated-with-strong-adoption-of-self-expression\"\u003eEconomic development is correlated with strong adoption of self expression\u003c/h3\u003e\n\u003cp\u003e“Equipped with reliable longitudinal data newly available, this scholarship demonstrates that there exists an association between levels of economic development and the adoption of “self-expression” values, such as support for gender equality and tolerance for homosexuality.” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 468]\u003c/p\u003e\n\u003ch3 id=\"social-contract-theory-indicates-that-people-become-more-tolerant-due-to-interaction-diminishing-the-power-of-vicarious-communities\"\u003eSocial contract theory indicates that people become more tolerant due to interaction, diminishing the power of \u0026ldquo;vicarious\u0026rdquo; communities\u0026quot;\u003c/h3\u003e\n\u003cp\u003e“Based on social contact theory, which suggests that individuals become more tolerant of groups as they interact with them, some scholars have shown that contact with “imagined” or “vicarious” communities that are diffused through mass media can have an effect on lowering prejudices and improving attitudes toward gay people (Riggle 1996; Schiappa, Gregg, and Hewes 2006).” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 468]\u003c/p\u003e\n\u003ch3 id=\"people-who-pay-attention-to-news-plus-use-internet-daily-more-likely-to-support-ssm\"\u003ePeople who \u0026ldquo;pay attention to news\u0026rdquo; + \u0026ldquo;use internet daily\u0026rdquo; more likely to support SSM\u003c/h3\u003e\n\u003cp\u003e“As depicted in Figure 3, the predicted probabilities of supporting SSM are consistent with our expectation that those who pay attention to the news and use the Internet daily are much more likely to support SSM. We believe this is because those who both pay attention to the news and use the Internet daily are likely to encounter news online that helps diffuse global attitudes about SSM” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 475]\u003c/p\u003e\n\u003ch3 id=\"people-who-just-consume-internet-are-not-exposed-nearly-as-much-to-ssm\"\u003ePeople who just consume internet are not exposed nearly as much to SSM\u003c/h3\u003e\n\u003cp\u003e“Meanwhile, for those that use the Internet often, but who do not pay attention to the news, the Internet is likely to be a source of entertainment or social interaction, which are not necessarily associated with encountering new information related to SSM. This interaction between Internet use and news consumption is also evident and significant when the data are disaggregated by year (see appendix).” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 475]\u003c/p\u003e\n\u003ch3 id=\"increased-exposure-and-increased-discussion-normalizes-attitudes-use-of-word-normalize-depolarize\"\u003eincreased exposure and increased discussion \u003cstrong\u003enormalizes\u003c/strong\u003e attitudes\u0026mdash;use of word normalize, depolarize\u003c/h3\u003e\n\u003cp\u003e“The results lend credence to the argument, derived from social contact theory, that increased exposure to gays and lesbians as well as to discussions about homosexuality and the merits of SSM may have a normalizing effect on individuals’ attitudes. Research also shows that such interaction can take place through mass media (Berggren and Nilsson 2015)” [⁨Díez⁩ and ⁨Dion⁩, 2022, p. 478]\u003c/p\u003e\n\u003ch3 id=\"ssm-debates-preceded-by-1-higher-emotion-and-2-few-active-engagements-results-in-failed-legalization\"\u003essm debates preceded by 1) higher emotion and 2) few active engagements results in failed legalization\u003c/h3\u003e\n\u003cp\u003e“we find evidence of moral culture wars between ideologies and show that constituencies that express higher levels of emotion and have fewer actively engaged participants often precede legalization efforts that fail” [⁨Zhang⁩ and ⁨Counts⁩, 2015, p. 2603]\u003c/p\u003e\n\u003ch3 id=\"sustained-interest-and-overall-engagement-precede-legalization-success\"\u003esustained interest and overall engagement precede legalization success\u003c/h3\u003e\n\u003cp\u003e“We found that policies that passed had a greater percentage of people with sustained interest over time, had greater overall engagement levels, and had significantly higher levels of language related to fairness and openness before the decision. On the other hand, states with policies that failed had higher levels of anxiety, religion, and tentativeness. These findings align with previous research characterizing the same-sex marriage debate as a “culture war” [1], where proponents advocate for it in terms of fairness morality, while opponents argue against it in terms of religious morality.” [⁨Zhang⁩ and ⁨Counts⁩, 2015, p. 2610]\u003c/p\u003e\n\u003ch3 id=\"media-diversity-is-important-otherwise-one-side-may-drown-out-the-other\"\u003emedia diversity is important, otherwise one side may drown out the other\u003c/h3\u003e\n\u003cp\u003e“if one message is able to drown out the other, it may allow said message to dictate the terms of the debate and change minds (and eventually laws) accordingly \u0026hellip; This inquiry argues that media coverage (in terms of both specific frames and the competition between said frames) of same-sex marriage and civil unions shapes levels of opposition to these policies.” [Johnson, 2012, p. 1056]\u003c/p\u003e\n\u003ch3 id=\"paradigm-shift-happens-through-tipping-the-scale-not-pushing-it-over--i-dot-e-dot-less-effort\"\u003eparadigm shift happens through \u003cstrong\u003etipping\u003c/strong\u003e the scale, not pushing it over (i.e. less effort)\u003c/h3\u003e\n\u003cp\u003e“Morality framing in-and-of itself did not seem to have as much power as equality framing when it came to changing minds between 2004 and 2011, but those interested in spreading a message of morality and rolling back same-sex marriage and civil unions efforts should realize that, should they tip the balance of the competition between these two frames back in their favor, levels of opposition have the potential to shift once more in a way that benefits their policy goals.” [Johnson, 2012, p. 1074]\u003c/p\u003e\n\u003ch3 id=\"expanding-generics-is-a-common-attack-surface-in-morality-arguments\"\u003eexpanding generics is a common attack surface in morality arguments\u003c/h3\u003e\n\u003cp\u003e“Experience has shown that it is not possible to debate the recognition of same-sex marriage without opponents making a normative connection between homosexuality and other forms of human relationships such as polygamy.” [Ball, p. 1900]\u003c/p\u003e\n\u003ch3 id=\"lack-of-engegement\"\u003elack of engegement\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;the lack of spirited political engagement \u0026hellip; Comparatively little is offered in support of gay and lesbian rights\u0026rdquo; (95)\u003c/p\u003e\n\u003ch3 id=\"tolerance\"\u003etolerance\u003c/h3\u003e\n\u003cp\u003etolerance among adults toward homosexuals and toward people of different race, collected from the World Values Survey and European Values Study.31 Both cross-sectional and panel results (available on request) suggest that globalization does not generally affect these tolerance measures.32 In other words, increasing economic, social and political integration does not seem to influence the contemporary level of tolerance in the adult population,” [⁨Berggren⁩ and ⁨Nilsson⁩, 2015, p. 384]\u003c/p\u003e\n\u003ch3 id=\"power\"\u003epower\u003c/h3\u003e\n\u003cp\u003eThus, participation in itself is an expression of some degree of (enabled) power [Dahlgren, 2016, p. 26]\u003c/p\u003e\n\u003ch2 id=\"sub-claim-development\"\u003eSub-claim Development\u003c/h2\u003e\n\u003ch3 id=\"define-polarization-and-state-of-play-in-ssm\"\u003eDefine polarization and state of play in SSM\u003c/h3\u003e\n\u003cp\u003eYoink from TIC:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003edefine polarization\u003c/li\u003e\n\u003cli\u003emotivate the study of polarization through extreme cases\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eOne salient case of depolarization in recent years is SSM. Debate transition from SSM into other intersectionality / rights (CITE). Direct measurements too: gallup\u0026mdash;Republican vs. Democracts gap closed on the topic.\u003c/p\u003e\n\u003cp\u003eSSM offers a salient case study of what depolarization can look like. Having gone through the cycle, we are afforded post-hoc analysis of what worked and didn\u0026rsquo;t work.\u003c/p\u003e\n\u003cp\u003eWe know \u003cstrong\u003ewhat\u003c/strong\u003e worked: \u003ca href=\"3880B92C-F887-41CD-8281-A5B2C053D773\"\u003essm debates preceded by 1) higher emotion and 2) few active engagements results in failed legalization\u003c/a\u003e, whereas \u003ca href=\"#sustained-interest-and-overall-engagement-precede-legalization-success\"\u003esustained interest and overall engagement precede legalization success\u003c/a\u003e\u0026mdash;tied to the depolarization.\u003c/p\u003e\n\u003cp\u003eBut \u003cstrong\u003ewhy\u003c/strong\u003e? and specifically, why these things? How can we generalize this?\u003c/p\u003e\n\u003cp\u003eA careful study of polarization helps inform why these things worked. We are going to do this from social media because (from the TIC social media is easy). The framework will show that [TiC thesis].\u003c/p\u003e\n\u003cp\u003eAfter developing such a framework, we will develop this analysis on two cases:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ebriefly on trolls (this is \u003cstrong\u003epolarizing\u003c/strong\u003e), and\u003c/li\u003e\n\u003cli\u003eSSM (this is \u003cstrong\u003edepolarizing\u003c/strong\u003e), which, given that\u0026rsquo;s what we\u0026rsquo;re after, we are going to exand and learn from to further investigate the motivating dynamics of the framework in context of SSM.\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"all-of-the-lit-review\"\u003e\u0026lt;\u0026gt;all of the lit review\u003c/h3\u003e\n\u003cp\u003eBegin by developing the framework.\u003c/p\u003e\n\u003cp\u003eyipeee\u003c/p\u003e\n\u003ch3 id=\"ssm-has-all-the-parts-that-s-needed-for-this-to-work-dot\"\u003eSSM has all the parts that\u0026rsquo;s needed for this to work.\u003c/h3\u003e\n\u003cp\u003e[vocab note: homophilic is not the same thing as MSM. We use it in the network dynamics sense. also not talking about the rest of queer community, which has a much more complex/intersectional set of issues.]\u003c/p\u003e\n\u003cp\u003eSo while trolls polarize, SSM is an example of successful polarization.\u003c/p\u003e\n\u003cp\u003eRecall previous efforts: 1) high emotion 2) low engagement\u0026mdash;-highly homophilic interactions.\u003c/p\u003e\n\u003cp\u003eThe depolarization of SSM follows an extremely similar pattern to the depolarization framework developed through studying social media in general: post-hoc studies highlights that depolarization of SSM is characterized by both increased exposure to the community on social media as well as increased active political engagement with the community. The close correspondence between SSM and the framework, therefore offers an opportunity to directly use the underlying motivations of SSM legalization as a study of the motivation of the framework in general.\u003c/p\u003e\n\u003ch3 id=\"media-needs-to-be-diverse-for-ssm-because-otherwise-it-creates-vicarious-communities-dot\"\u003eMedia needs to be diverse for SSM because otherwise it creates vicarious communities.\u003c/h3\u003e\n\u003cp\u003eSSM\u0026rsquo;s success tied to sustained, diverse interest largely through social media.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#social-contract-theory-indicates-that-people-become-more-tolerant-due-to-interaction-diminishing-the-power-of-vicarious-communities\"\u003eSocial contract theory indicates that people become more tolerant due to interaction, diminishing the power of \u0026ldquo;vicarious\u0026rdquo; communities\u0026quot;\u003c/a\u003e. One such group: \u003ca href=\"#social-media-as-a-means-of-exposure-to-the-modern-world\"\u003esocial media as a means of exposure SSM practice\u003c/a\u003e. Indeed, we notice this before overall support: \u003ca href=\"#increased-exposure-on-sm-results-in-increased-support\"\u003eIncreased exposure on SM results in increased support\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eYet, these results are tempered: \u003ca href=\"#media-diversity-is-important-otherwise-one-side-may-drown-out-the-other\"\u003emedia diversity is important, otherwise one side may drown out the other and may even be counter-productive.\u003c/a\u003e This counter-productivity is created by\n\u003ca href=\"#expanding-generics-is-a-common-attack-surface-in-morality-arguments\"\u003eas given by Novoa, generics\u003c/a\u003e\u0026mdash;whereby the counter-party will frame SSM along with other factors to criticize as a group. Therefore, true exposure can only happen if no one side drowns out the other; otherwise it will be vicarious \u0026ldquo;totem\u0026rdquo; effect.\u003c/p\u003e\n\u003ch3 id=\"ssm-debates-cannot-succeed-with-diverse-media-alone-it-requires-active-engagement-with-the-topic-too\"\u003eSSM debates cannot succeed with diverse media alone, it requires active engagement with the topic too\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#economic-development-is-correlated-with-strong-adoption-of-self-expression\"\u003eSSM is an act of \u0026ldquo;self expression\u0026rdquo;, \u0026ldquo;expression\u0026rdquo; is an active verb of engagement.\u003c/a\u003e Active engagement doesn\u0026rsquo;t come from simply being on the internet, which is shown to \u003ca href=\"#people-who-just-consume-internet-are-not-exposed-nearly-as-much-to-ssm\"\u003enot be a factor enough to expose to SSM\u003c/a\u003e. ⁨Díez⁩ and ⁨Dion⁩ frames this engagement as a process of \u003ca href=\"#increased-exposure-and-increased-discussion-normalizes-attitudes-use-of-word-normalize-depolarize\"\u003e\u003cstrong\u003enormalization\u003c/strong\u003e of these attitudes: use of word normalize is the crucial factor to depolarize\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"conclusion-plus-discussion\"\u003eConclusion + Discussion\u003c/h3\u003e\n\u003cp\u003eTwo-prong framework + evidence from SSM:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ediverse media diet is needed to prevent totem creation + generics\u003c/li\u003e\n\u003cli\u003eactive engagement is needed to solidify normalization\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSSM is perhaps the first because a \u003ca href=\"#paradigm-shift-happens-through-tipping-the-scale-not-pushing-it-over--i-dot-e-dot-less-effort\"\u003eparadigm shift happens through \u003cstrong\u003etipping\u003c/strong\u003e the scale, not pushing it over (i.e. less effort)\u003c/a\u003e. So its the easiest to tip over.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpwr1_rba_planning/","tags":null,"title":"PWR1 RBA Planning"},{"categories":null,"contents":"General Information Due Date Topic Important Documents Sunday, Jan 21 AI Alignment Yejin Choi Talk Claim Synthesis Quotes Bin Double entendre which frames the study of AI safety as \u0026ldquo;intellectual\u0026rdquo; (in contrast to War) “And then there are these additional intellectual questions. Can AI, without robust common sense, be truly safe for humanity?”\ndouble intentre: \u0026ldquo;intellectual\u0026rdquo; as in interesting but also \u0026ldquo;intellectual\u0026rdquo; as in worth asking\nLanguage of Extremes: AI safety is a \u0026ldquo;bruce force\u0026rdquo; problem which requires \u0026ldquo;extreme scale\u0026rdquo; And is brute-force scale really the only way and even the correct way to teach AI? So I’m often asked these days whether it\u0026rsquo;s even feasible to do any meaningful research without extreme-scale compute.\nFraming AI safety as a war, us-them dynamic between you and AI “Perhaps we can \u0026hellip; seek inspiration from an old-time classic, \u0026ldquo;The Art of War,\u0026rdquo; which tells us, in my interpretation, know your enemy, choose your battles, and innovate your weapons.”\nImporting the language of war\u0026mdash;raising urgency and stakes. Animating: more strongly dynamic.\nAdding scale-optimists to be a part of the antagonized group “Some scale optimists might say, “Don’t worry about this. All of these can be easily fixed by adding similar examples as yet more training data for AI.\u0026quot; But the real question is this. Why should we even do that?”\ncontrast: some \u0026hellip; we—us them dynamic\nWe can\u0026rsquo;t dissect AI models because of big tech \u0026ldquo;we are now at the mercy of those few tech companies because researchers in the larger community do not have the means to truly inspect and dissect these models. \u0026quot;\nLanguage of extremes in both directions, taking agency out of the AI too “AI today is unbelievably intelligent and then shockingly stupid.” (pdf) phrasing: contrast\nadv adj and then adv adj\nRaises stakes: uses the language of extremes\nPassive voice takes agency out of \u0026ldquo;us\u0026rdquo; and into the hands of the enemy “We’ve been told that it’s a research topic of ’70s and ’80s; shouldn’t work on it because it will never work; in fact, don\u0026rsquo;t even say the word to be taken seriously.” (pdf) dropping subject; \u0026ldquo;we\u0026rdquo;—raises urgency by highlighting contrast\nCalls to action; parallel structure associates \u0026ldquo;we\u0026rdquo; for the first time with, :shocked picachu face: organizations “We don\u0026rsquo;t know what\u0026rsquo;s in this, but it should be open and publicly available so that we can inspect and ensure [it supports] diverse norms and values. \u0026hellip; So for this reason, my teams at UW and AI2 have been working on commonsense knowledge graphs ” (pdf) repeated usage of \u0026ldquo;we\u0026rdquo;—bilding a tride\nZoomorphise AI as a new \u0026ldquo;species\u0026rdquo;, in effect animating it “We\u0026rsquo;re now entering a new era in which AI is almost like a new intellectual species with unique strengths and weaknesses compared to humans. In order to make this powerful AI sustainable and humanistic, we need to teach AI common sense, norms and values.” (pdf) anthropormorphising\nGiving AI agency, using the second person \u0026ldquo;singular\u0026rdquo; makes the killing feel more personal “And that AI decided to kill humans to utilize them as additional resources, to turn you into paper clips.” (pdf) humans \u0026hellip; you—increase urgency\nSub-Claim Development Victimze AI: AI is a new species with blunt force that\u0026rsquo;s teachable AI has extreme raw power, but is somewhat of an anthropomorphizing new \u0026ldquo;species\u0026rdquo;, which \u0026ldquo;we\u0026rdquo; can \u0026ldquo;teach\u0026rdquo;. This breaks the audiences\u0026rsquo; traditional framing of AI as a blind tool, and empowers the audience to imagine what \u0026ldquo;teaching it\u0026rdquo; looks like. However, Choi uses the language of extremes in both directions, framing AI as \u0026ldquo;stupid\u0026rdquo; to victimize it. [She spent much of the talk giving copious examples]. This places the audience at a position of power, wanting to help the subjugated AI.\nVictimize Audience wanting help AI: pitting the audience against the AI deveolpers After victimizing AI and inspiring audience to teach it, Choi then victimizes audience wanting to help. Choi introduces that big tech disenfranchises the audience\u0026rsquo;s goals of teaching \u0026ldquo;AI\u0026rdquo;, who we are at the \u0026ldquo;mercy of\u0026rdquo;. Establishes an us-them dynamic that Choi continues to develop: passive voice takes agency out of \u0026ldquo;us\u0026rdquo; and into the hands of the enemy, objectifying it, further establishing and antagonizing the two groups and highlighting how the audience, and by proxy Choi, is disenfranchised.\nWe are at war, and Choi is the savior This two-sided dynamic comes to a head with the language of war. AI can kill \u0026ldquo;you\u0026rdquo;-the second person \u0026ldquo;singular\u0026rdquo; makes the killing feel more personal, further justifying the \u0026ldquo;war\u0026rdquo;. With a parallel structure, Choi frames herself and her research as the leader of the \u0026ldquo;audience\u0026rsquo;s side\u0026rdquo;, wading into the unknown in this war. She frames this war as an intellectual one, which she has the credibility ethos to lead.\nThe Claim Choi uses the language of antagonism to victimize AI language models and, by proxy, the audience wanting the help the victimized AI\u0026mdash;placing both groups at a \u0026ldquo;war\u0026rdquo; of disenfranchisement against big-tech model developers. This dynamic allowing Choi to justify her research as the intellectual savior which empowers the fight of humanity in this \u0026ldquo;war\u0026rdquo;.\n","html":"\u003ch2 id=\"general-information\"\u003eGeneral Information\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDue Date\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003cth\u003eImportant Documents\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eSunday, Jan 21\u003c/td\u003e\n\u003ctd\u003eAI Alignment\u003c/td\u003e\n\u003ctd\u003eYejin Choi Talk\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"claim-synthesis\"\u003eClaim Synthesis\u003c/h2\u003e\n\u003ch3 id=\"quotes-bin\"\u003eQuotes Bin\u003c/h3\u003e\n\u003ch4 id=\"double-entendre-which-frames-the-study-of-ai-safety-as-intellectual--in-contrast-to-war\"\u003eDouble entendre which frames the study of AI safety as \u0026ldquo;intellectual\u0026rdquo; (in contrast to War)\u003c/h4\u003e\n\u003cp\u003e“And then there are these additional intellectual questions. Can AI, without robust common sense, be truly safe for humanity?”\u003c/p\u003e\n\u003cp\u003edouble intentre: \u0026ldquo;intellectual\u0026rdquo; as in interesting but also \u0026ldquo;intellectual\u0026rdquo; as in worth asking\u003c/p\u003e\n\u003ch4 id=\"language-of-extremes-ai-safety-is-a-bruce-force-problem-which-requires-extreme-scale\"\u003eLanguage of Extremes: AI safety is a \u0026ldquo;bruce force\u0026rdquo; problem which requires \u0026ldquo;extreme scale\u0026rdquo;\u003c/h4\u003e\n\u003cp\u003eAnd is brute-force scale really the only way and even the correct way to teach AI? So I’m often asked these days whether it\u0026rsquo;s even feasible to do any meaningful research without extreme-scale compute.\u003c/p\u003e\n\u003ch4 id=\"framing-ai-safety-as-a-war-us-them-dynamic-between-you-and-ai\"\u003eFraming AI safety as a war, us-them dynamic between you and AI\u003c/h4\u003e\n\u003cp\u003e“Perhaps we can \u0026hellip; seek inspiration from an old-time classic, \u0026ldquo;The Art of War,\u0026rdquo; which tells us, in my interpretation, know your enemy, choose your battles, and innovate your weapons.”\u003c/p\u003e\n\u003cp\u003eImporting the language of war\u0026mdash;raising urgency and stakes. Animating: more strongly dynamic.\u003c/p\u003e\n\u003ch4 id=\"adding-scale-optimists-to-be-a-part-of-the-antagonized-group\"\u003eAdding scale-optimists to be a part of the antagonized group\u003c/h4\u003e\n\u003cp\u003e“Some scale optimists might say, “Don’t worry about this. All of these can be easily fixed by adding similar examples as yet more training data for AI.\u0026quot; But the real question is this. Why should we even do that?”\u003c/p\u003e\n\u003cp\u003econtrast: some \u0026hellip; we—us them dynamic\u003c/p\u003e\n\u003ch4 id=\"we-can-t-dissect-ai-models-because-of-big-tech\"\u003eWe can\u0026rsquo;t dissect AI models because of big tech\u003c/h4\u003e\n\u003cp\u003e\u0026ldquo;we are now at the mercy of those few tech companies because researchers in the larger community do not have the means to truly inspect and dissect these models. \u0026quot;\u003c/p\u003e\n\u003ch4 id=\"language-of-extremes-in-both-directions-taking-agency-out-of-the-ai-too\"\u003eLanguage of extremes in both directions, taking agency out of the AI too\u003c/h4\u003e\n\u003cp\u003e“AI today is unbelievably intelligent and then shockingly stupid.” (\u003ca href=\"zotero://open-pdf/library/items/W8C8UZWS?page=3\u0026amp;annotation=35JSUAXU\"\u003epdf\u003c/a\u003e) phrasing: contrast\u003c/p\u003e\n\u003cp\u003eadv adj and then adv adj\u003c/p\u003e\n\u003cp\u003eRaises stakes: uses the language of extremes\u003c/p\u003e\n\u003ch4 id=\"passive-voice-takes-agency-out-of-us-and-into-the-hands-of-the-enemy\"\u003ePassive voice takes agency out of \u0026ldquo;us\u0026rdquo; and into the hands of the enemy\u003c/h4\u003e\n\u003cp\u003e“We’ve been told that it’s a research topic of ’70s and ’80s; shouldn’t work on it because it will never work; in fact, don\u0026rsquo;t even say the word to be taken seriously.” (\u003ca href=\"zotero://open-pdf/library/items/W8C8UZWS?page=4\u0026amp;annotation=97H3RG7L\"\u003epdf\u003c/a\u003e) dropping subject; \u0026ldquo;we\u0026rdquo;—raises urgency by highlighting contrast\u003c/p\u003e\n\u003ch4 id=\"calls-to-action-parallel-structure-associates-we-for-the-first-time-with-shocked-picachu-face-organizations\"\u003eCalls to action; parallel structure associates \u0026ldquo;we\u0026rdquo; for the first time with, :shocked picachu face: organizations\u003c/h4\u003e\n\u003cp\u003e“We don\u0026rsquo;t know what\u0026rsquo;s in this, but it should be open and publicly available so that we can inspect and ensure [it supports] diverse norms and values. \u0026hellip; So for this reason, my teams at UW and AI2 have been working on commonsense knowledge graphs\n” (\u003ca href=\"zotero://open-pdf/library/items/W8C8UZWS?page=5\u0026amp;annotation=8ENT9PEL\"\u003epdf\u003c/a\u003e) repeated usage of \u0026ldquo;we\u0026rdquo;—bilding a tride\u003c/p\u003e\n\u003ch4 id=\"zoomorphise-ai-as-a-new-species-in-effect-animating-it\"\u003eZoomorphise AI as a new \u0026ldquo;species\u0026rdquo;, in effect animating it\u003c/h4\u003e\n\u003cp\u003e“We\u0026rsquo;re now entering a new era in which AI is almost like a new intellectual species with unique strengths and weaknesses compared to humans. In order to make this powerful AI sustainable and humanistic, we need to teach AI common sense, norms and values.” (\u003ca href=\"zotero://open-pdf/library/items/W8C8UZWS?page=6\u0026amp;annotation=DLXMRF8U\"\u003epdf\u003c/a\u003e) anthropormorphising\u003c/p\u003e\n\u003ch4 id=\"giving-ai-agency-using-the-second-person-singular-makes-the-killing-feel-more-personal\"\u003eGiving AI agency, using the second person \u0026ldquo;singular\u0026rdquo; makes the killing feel more personal\u003c/h4\u003e\n\u003cp\u003e“And that AI decided to kill humans to utilize them as additional resources, to turn you into paper clips.” (\u003ca href=\"zotero://open-pdf/library/items/W8C8UZWS?page=3\u0026amp;annotation=25H9C3MG\"\u003epdf\u003c/a\u003e) humans \u0026hellip; you—increase urgency\u003c/p\u003e\n\u003ch3 id=\"sub-claim-development\"\u003eSub-Claim Development\u003c/h3\u003e\n\u003ch4 id=\"victimze-ai-ai-is-a-new-species-with-blunt-force-that-s-teachable\"\u003eVictimze AI: AI is a new species with blunt force that\u0026rsquo;s teachable\u003c/h4\u003e\n\u003cp\u003eAI has \u003ca href=\"#language-of-extremes-ai-safety-is-a-bruce-force-problem-which-requires-extreme-scale\"\u003eextreme raw power\u003c/a\u003e, but is somewhat of an anthropomorphizing \u003ca href=\"#zoomorphise-ai-as-a-new-species-in-effect-animating-it\"\u003enew \u0026ldquo;species\u0026rdquo;, which \u0026ldquo;we\u0026rdquo; can \u0026ldquo;teach\u0026rdquo;\u003c/a\u003e. This breaks the audiences\u0026rsquo; traditional framing of AI as a blind tool, and empowers the audience to imagine what \u0026ldquo;teaching it\u0026rdquo; looks like. However, Choi uses the \u003ca href=\"#language-of-extremes-in-both-directions-taking-agency-out-of-the-ai-too\"\u003elanguage of extremes in both directions, framing AI as \u0026ldquo;stupid\u0026rdquo; to victimize it\u003c/a\u003e. [She spent much of the talk giving copious examples]. This places the audience at a position of power, wanting to help the subjugated AI.\u003c/p\u003e\n\u003ch4 id=\"victimize-audience-wanting-help-ai-pitting-the-audience-against-the-ai-deveolpers\"\u003eVictimize Audience wanting help AI: pitting the audience against the AI deveolpers\u003c/h4\u003e\n\u003cp\u003eAfter victimizing AI and inspiring audience to teach it, Choi then victimizes audience wanting to help. Choi introduces that \u003ca href=\"#we-can-t-dissect-ai-models-because-of-big-tech\"\u003ebig tech disenfranchises the audience\u0026rsquo;s goals of teaching \u0026ldquo;AI\u0026rdquo;\u003c/a\u003e, who we are at the \u0026ldquo;mercy of\u0026rdquo;. Establishes an us-them dynamic that Choi continues to develop: \u003ca href=\"#passive-voice-takes-agency-out-of-us-and-into-the-hands-of-the-enemy\"\u003epassive voice takes agency out of \u0026ldquo;us\u0026rdquo; and into the hands of the enemy, objectifying it\u003c/a\u003e, further establishing and antagonizing the two groups and highlighting how the audience, and by proxy Choi, is disenfranchised.\u003c/p\u003e\n\u003ch4 id=\"we-are-at-war-and-choi-is-the-savior\"\u003eWe are at war, and Choi is the savior\u003c/h4\u003e\n\u003cp\u003eThis two-sided dynamic comes to a head with the \u003ca href=\"#framing-ai-safety-as-a-war-us-them-dynamic-between-you-and-ai\"\u003elanguage of war\u003c/a\u003e. \u003ca href=\"#giving-ai-agency-using-the-second-person-singular-makes-the-killing-feel-more-personal\"\u003eAI can kill \u0026ldquo;you\u0026rdquo;-the second person \u0026ldquo;singular\u0026rdquo; makes the killing feel more personal, further justifying the \u0026ldquo;war\u0026rdquo;\u003c/a\u003e. With a parallel structure, Choi \u003ca href=\"#calls-to-action-parallel-structure-associates-we-for-the-first-time-with-shocked-picachu-face-organizations\"\u003eframes herself and her research as the leader of the \u0026ldquo;audience\u0026rsquo;s side\u0026rdquo;, wading into the unknown\u003c/a\u003e in this war. She frames this war as an \u003ca href=\"#double-entendre-which-frames-the-study-of-ai-safety-as-intellectual--in-contrast-to-war\"\u003eintellectual one\u003c/a\u003e, which she has the credibility \u003cem\u003eethos\u003c/em\u003e to lead.\u003c/p\u003e\n\u003ch3 id=\"the-claim\"\u003eThe Claim\u003c/h3\u003e\n\u003cp\u003eChoi uses the language of antagonism to victimize AI language models and, by proxy, the audience wanting the help the victimized AI\u0026mdash;placing both groups at a \u0026ldquo;war\u0026rdquo; of disenfranchisement against big-tech model developers. This dynamic allowing Choi to justify her research as the intellectual savior which empowers the fight of humanity in this \u0026ldquo;war\u0026rdquo;.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpwr1_rhetorical_analysis_planning/","tags":null,"title":"PWR1 Rhetorical Analysis Essay Planning"},{"categories":null,"contents":"Quotes Bin polarization distorts beliefs about others “Recent years have seen a sharp increase in political polarization in the United States (1–7), leading to deadlock in Congress (8), distorted beliefs about fellow Americans (9, 10), and distrust, hostility, and even violence toward outgroup members (11–13)” [Novoa et al., 2023, p. 1]\ngenerics about particular group is a way that polarized languages manifest “Specifically, we focus on expressions that make claims about a category as a whole (e.g., “Democrats want to defund the police” makes a claim about the category of “Democrats”), also known as generics (34–38).” [Novoa et al., 2023, p. 2]\ngenerics are much more readily remembered “A second distinctive signature of generics documented in prior research is that they tend to be how generalizations are later recalled, even when generalizations are stated in more precise, quantified ways” [Novoa et al., 2023, p. 2]\ngenerics are strongly rejected or accepted based on parity affiliation =\u0026gt; echo chamber “Respondents showed a strong pattern of accepting generics for the target party and rejecting generics for the opposite party.” [Novoa et al., 2023, p. 3]\npeople perceive higher polarization than actually are present due to use of generics “We found that perceived polarization was greater than actual polarization, in two key respects: 1) for nearly every issue, people believed that the two parties were further apart than they actually are, and 2) patterns of generic endorsement were more polarized (i.e., revealed a greater gap between the two parties) than perceptions of prevalence.” [Novoa et al., 2023, p. 5]\npolarized language is present far more in generic statements “The results of this study support the conclusion that generic language leads to polarized judgments regarding political parties, and does so more than nongeneric language. We obtained three key results: 1) for generic statements (e.g., “Democrats \u0026hellip;”), prevalence estimates were larger for the named party (e.g., Democrats, when the generic statement was about Democrats) than for the unnamed party (e.g., Republicans, when the generic statement was about Democrats); 2) for generic statements, prevalence estimates were above 50% for the named party and below 50% for the unnamed party; and 3) the gap between named and unnamed prevalence estimates was larger for generic statements than for nongeneric statements (such as “Many Democrats support House Bill 858” or “Some Democrats support House Bill 858”).” [Novoa et al., 2023, p. 9]\npolitical science generally believes that political polarization and citizen polarization is different “The predominant view in political science is that the current polarization in Congress has not diffused much into the citizenry” [“Political Polarization and the Dynamics of Political Language: Evidence from 130 Years of Partisan Speech [with Comments and Discussion]”, 2024, p. 5]\npolitical polarization and its prevalence has been discussed since founding of the country “The idea that U.S. politics is necessarily polarized, owing to the intrinsic diversity and size of the country, goes back at least to James Madison and the divergence between Hamiltonian and Jeffersonian economic philosophies” [“Political Polarization and the Dynamics of Political Language: Evidence from 130 Years of Partisan Speech [with Comments and Discussion]”, 2024, p. 5]\npartisanship switches to minority party when the house switches control “the partisanship of language tends to switch when House control switches, but in the direction of the new minority party.” [“Political Polarization and the Dynamics of Political Language: Evidence from 130 Years of Partisan Speech [with Comments and Discussion]”, 2024, p. 25]\none type of polarization is where people disengage with those with opposite views “The first aspect of political polarization, which we call “interactional polarization,” focuses on a process whereby participants in a debate increasingly interact with likeminded individuals, while disengaging from interactions with others who hold opposing viewpoints.” [Yarchi et al., 2021, p. 101]\nthere\u0026rsquo;s a difference between filter bubbles and increased polarization “Despite the recent salience of theories regarding fragmented “echo chambers” or “filter bubbles,” it remains contentious whether social media do indeed drive such interactional polarization” [Yarchi et al., 2021, p. 101]\nalready known groups typically bring strong agreements, and strangers typically bring disperate views “Heterophilic interactions appear to be more common along so-called “weak ties” occasional communications that are not underfed by strong social bonds such as friendship or sustained collaboration – while most “strong ties” (among friends, within teams, etc.) are predominantly homophilic” [Yarchi et al., 2021, p. 101]\nsocial media users agree more over time “H2 (Interactional Polarization): Interaction patterns on social media become increasingly homophilic over time.” [Yarchi et al., 2021, p. 102]\nisolation results in more extreme contributions “Individuals embedded within more homophilic interaction networks subsequently express more extreme positions in their contributions.” [Yarchi et al., 2021, p. 102]\nFacebook\u0026rsquo;s chief innovation is to leverage the in-group homogeneity to create supportive opinions “As the world’s foremost social media platform, Facebook’s popularity is arguably derived largely from its capacity to immerse its users in a feed of contents that cater to their personal interests and leanings. To do this, the platform relies heavily on users’ self curated networks of friends, but also on an algorithm that prioritizes content based on users’ interests and support for similar posts, displaying only a small share of predominantly congenial, supportive contents” [Yarchi et al., 2021, p. 104]\nFacebook creates supportive echo chambers “Facebook has become the prime suspect for the creation of homophilic echo chambers” [Yarchi et al., 2021, p. 104]\nTwitter creates asymmetric, non-friend dynamics “Twitter is defined primarily by its unrestricted publicness. Anyone, even non-users, can read any tweet, and any user can respond to any contribution. Users can follow others without a need for permission, enabling asymmetric, non-reciprocated ties.” [Yarchi et al., 2021, p. 104]\nDebate exists between whether Twitter creates or dismantles homogenization, and therefore polarization “Reflecting Twitter’s ambiguous profile, the existing literature yields conflicting findings regarding its tendency toward homophily and polarization (e.g., Kwak et al., 2010 detected little homophily; Weng et al., 2010; Hong \u0026amp; Kim, 2016; Colleoni et al., 2014 found the opposite).” [Yarchi et al., 2021, p. 105]\nCITE: defines social media “social media platform in a narrow sense (following Ellison and Boyd (2013) definition)” [Yarchi et al., 2021, p. 105]\nUsers tend to express more extreme views if surrounded by likeminded users “Considering the effect of homophilic interactions on expressed positions, our data confirm users’ tendency to express more extreme views if interactions with likeminded users take in a larger share of their social media communications (H3).” [Yarchi et al., 2021, p. 111]\ncannot study only one social media as they have different properties “Beyond questioning the widespread reliance on Twitter (and limited public Facebook) data to draw conclusions about social media as a whole, our study also highlights the perils of inferring dynamic properties from static data.” [Yarchi et al., 2021, p. 114]\ntendency to associate with like-minded people increases echo chambers “That social psychology has long shown this tendency to associate with like-minded others is common cross-culturally. However, there is new fear that the current media system is helping people enter echo chambers more easily than ever before.” [Dubois and Blank, 2018, p. 731]\nTwitter is an isolated slice of the population “Twitter itself is used by a relatively small proportion of the population, about one-quarter of the UK, which is younger, wealthier, and better-educated than Britain as a whole” [Dubois and Blank, 2018, p. 732]\nin the UK, going to a news source like BBC is still more common “going directly to a news source such as the BBC remains more common in the UK (Newman et al., 2017).” [Dubois and Blank, 2018, p. 733]\nstudies don\u0026rsquo;t study the aggregate effect of diverse media “A core problem with this line of research is that most studies select only one or a few media to focus on and so the comparative utility or effects of use of media in a diverse media environment are unclear.” [Dubois and Blank, 2018, p. 733]\npeople with strong partisanship report consuming a diverse media digest “First, even individuals who have strong partisan affiliation report using both general news sites (which are largely non-partisan and include a variety of issues) and niche news sites (which may be partisan or focused on specific issues) – Republicans and Democrats have media diets which are quite similar” [Dubois and Blank, 2018, p. 734]\nconsumption of mixed media results in incidental exposure to a variety of news sources “While one might receive primarily left-leaning political content on Twitter, they may be incidentally exposed to a right-leaning perspective from a family member on Facebook or they might hear a debate between representatives from various perspectives on a television news broadcast.” [Dubois and Blank, 2018, p. 734]\nthose who are politically aware are going to encounter more perspectives “As Prior argues, political ‘junkies’ are likely to consume a lot of information and therefore may encounter more perspectives and arguments” [Dubois and Blank, 2018, p. 734]\nincreased involvement in politics results actually in less echo chamber “H2: The higher a person’s level of political interest the less likely they are to be in an echo chamber” [Dubois and Blank, 2018, p. 735]\nPeople who are actually disinterested in politics are in an echo chamber “First, that respondents with no political interest are in an echo chamber. We examine this possibility using the regressions in Table 3. The results in this table are based only on the respondents who said they had ‘No interest at all’ in politics, N = 243.” [Dubois and Blank, 2018, p. 739]\nHigh choice in media doesn\u0026rsquo;t mean a high degree of ability to reconsiliate “A high-choice media environment does not simply mean that individuals develop strategies to deal with the many media options available, though of course they do so as they develop their news and political information repertoires” [Dubois and Blank, 2018, p. 740]\ndiversity in media AND engagement in politics matters “Our results suggest that people who are both not politically interested and who do not use diverse media are more likely to be in an echo chamber. They are less likely check multiple sources or to discover things that change their minds.” [Dubois and Blank, 2018, p. 741]\npolarized language results in greater engagement: but only to trolls and politically engaged users “We also find that polarized language is associated with greater engagement, but this association only holds for politically engaged users (both trolls and regular users). This research clarifies how trolls leverage polarized language and provides an open-source, simple tool for exploration of polarized communications on social media.” [Simchon et al., 2022, p. 1]\nRussian trolls used more polarized language “Again, we find that politically oriented Russian trolls use significantly more polarized language than their politically matched American sample (Russian trolls: M = 5.16, SD = 8.00, and N = 55,726; American controls: M = 2.91, SD = 6.84, and N = 55,726), t(108,836) = 50.61, P \u0026lt; 0.001, and Cohen’s d = 0.30 (for a robustness check, see Supplementary Materials).” [Simchon et al., 2022, p. 4]\nforeign agents increase in their polarization and posting frequency “foreign agents from various countries strategically used polarized language in social media communications, and in a majority of cases we see an increase over time in these attempts.” [Simchon et al., 2022, p. 6]\ndistinction between polarization on issues vs. polarization of anger “Scholars have made the conceptual distinction between issue polarization—an ideological, policy-based political divide, and affective polarization, i.e. dislike, distrust, and general animosity of political partisans toward the other political side” [Simchon et al., 2022, p. 6]\nsmall amount of trolls can polarize lots of people “Questions remain as to the extent of influence of trolls’ social media presence on real people. However, it is important to note that even a small number of agents with aggressive attitudes can have a substantial influence on the majority view, a process called “information gerrymandering”” [Simchon et al., 2022, p. 9]\ninteraction with trolls didn\u0026rsquo;t seem to change partisanship “The authors found that only a small fraction of users interacted with Russian trolls, and they did not observe any change in partisan attitude during that time among these users (68).” [Simchon et al., 2022, p. 9]\nsubclaim organization Polarization comes from congenial echo-chambers driven by generic language, which social media is prone to create due to their curation Interaction with in-group only results in more extreme contributions. This is what we typically call an \u0026ldquo;echo chamber\u0026rdquo; (1, 2). One such chamber enviroment is social media, a particularly salient case of this is Facebook, which creates supportive echo chambers.\nNovoa proposes one analysis through linguistics by which such an echo chamber can get created\u0026mdash;generics: easy to remember generalisations. polarized language is present far more in generic statements, and generics about particular group is a way that polarized languages manifest.\nGenerics only function when deployed within a homegenous environment. Yet, others have noted that Facebook\u0026rsquo;s chief innovation is to leverage the in-group homogeneity to create supportive opinions\u0026mdash;displaying \u0026ldquo;congenial\u0026rdquo; content that are likely to be homegenous.\nThe congenial environment itself, however, is not enough to create or disrupt polarization; breaking echo chambers requires both a diversity of opinions as well as actual engagement Unlike Facebook\u0026rsquo;s congeniality, Twitter creates asymmetric, non-friend dynamics. Though it shows that it helps dismantle some echo chambers, its not conclusive. Yarachi notes this as the difference between filter bubbles and increased polarization. A \u0026ldquo;filter bubble\u0026rdquo; itself isn\u0026rsquo;t polarization, so what is?\nDubois solves this mystery by arguing that it is people who are actually disinterested in politics are in an echo chamber. Through consumption of mixed media results in incidental exposure to a variety of news sources one has to participate in the conversation to get out the echo chamber.\nMeaning, high choice in media itself (i.e. having facebook AND twitter) doesn\u0026rsquo;t mean a high degree of ability to reconsiliate. It is diversity in media AND engagement in politics matters.\nother notes those who are politically aware are going to encounter more perspectives people with strong partisanship report consuming a diverse media digest By using polarized language to target only politically active users, trolls essentially disrupt the ability to dismantle echo chambers Interaction with trolls didn\u0026rsquo;t seem to change partisanship, yet previous work establishes that a small amount of trolls can polarize lots of people\u0026mdash;so the manner by which trolls work is confusing.\nSimchon notes that Russian trolls used more polarized language. Our previous analysis concludes that political activism is an important and inseperable part of breaking an echo chamber; trolls, then take advantage of this fact to disrupt the process of breaking away from polarization by capturing already politically active users, which trolls take part.\nBIN polarization distorts beliefs about others one type of polarization is where people disengage with those with opposite views in the UK, going to a news source like BBC is still more common CITE: defines social media already known groups typically bring strong agreements, and strangers typically bring disperate views cannot study only one social media as they have different properties Twitter is an isolated slice of the population studies don\u0026rsquo;t study the aggregate effect of diverse media political science generally believes that political polarization and citizen polarization is different distinction between polarization on issues vs. polarization of anger language induces perception about polarization foreign agents increase in their polarization and posting frequency partisanship is constantly switching polarization is a long-standing topic ","html":"\u003ch2 id=\"quotes-bin\"\u003eQuotes Bin\u003c/h2\u003e\n\u003ch3 id=\"polarization-distorts-beliefs-about-others\"\u003epolarization distorts beliefs about others\u003c/h3\u003e\n\u003cp\u003e“Recent years have seen a sharp increase in political polarization in the United States (1–7), leading to deadlock in Congress (8), distorted beliefs about fellow Americans (9, 10), and distrust, hostility, and even violence toward outgroup members (11–13)” [Novoa et al., 2023, p. 1]\u003c/p\u003e\n\u003ch3 id=\"generics-about-particular-group-is-a-way-that-polarized-languages-manifest\"\u003egenerics about particular group is a way that polarized languages manifest\u003c/h3\u003e\n\u003cp\u003e“Specifically, we focus on expressions that make claims about a category as a whole (e.g., “Democrats want to defund the police” makes a claim about the category of “Democrats”), also known as generics (34–38).” [Novoa et al., 2023, p. 2]\u003c/p\u003e\n\u003ch3 id=\"generics-are-much-more-readily-remembered\"\u003egenerics are much more readily remembered\u003c/h3\u003e\n\u003cp\u003e“A second distinctive signature of generics documented in prior research is that they tend to be how generalizations are later recalled, even when generalizations are stated in more precise, quantified ways” [Novoa et al., 2023, p. 2]\u003c/p\u003e\n\u003ch3 id=\"generics-are-strongly-rejected-or-accepted-based-on-parity-affiliation-echo-chamber\"\u003egenerics are strongly rejected or accepted based on parity affiliation =\u0026gt; echo chamber\u003c/h3\u003e\n\u003cp\u003e“Respondents showed a strong pattern of accepting generics for the target party and rejecting generics for the opposite party.” [Novoa et al., 2023, p. 3]\u003c/p\u003e\n\u003ch3 id=\"people-perceive-higher-polarization-than-actually-are-present-due-to-use-of-generics\"\u003epeople perceive higher polarization than actually are present due to use of generics\u003c/h3\u003e\n\u003cp\u003e“We found that perceived polarization was greater than actual polarization, in two key respects: 1) for nearly every issue, people believed that the two parties were further apart than they actually are, and 2) patterns of generic endorsement were more polarized (i.e., revealed a greater gap between the two parties) than perceptions of prevalence.” [Novoa et al., 2023, p. 5]\u003c/p\u003e\n\u003ch3 id=\"polarized-language-is-present-far-more-in-generic-statements\"\u003epolarized language is present far more in generic statements\u003c/h3\u003e\n\u003cp\u003e“The results of this study support the conclusion that generic language leads to polarized judgments regarding political parties, and does so more than nongeneric language. We obtained three key results: 1) for generic statements (e.g., “Democrats \u0026hellip;”), prevalence estimates were larger for the named party (e.g., Democrats, when the generic statement was about Democrats) than for the unnamed party (e.g., Republicans, when the generic statement was about Democrats); 2) for generic statements, prevalence estimates were above 50% for the named party and below 50% for the unnamed party; and 3) the gap between named and unnamed prevalence estimates was larger for generic statements than for nongeneric statements (such as “Many Democrats support House Bill 858” or “Some Democrats support House Bill 858”).” [Novoa et al., 2023, p. 9]\u003c/p\u003e\n\u003ch3 id=\"political-science-generally-believes-that-political-polarization-and-citizen-polarization-is-different\"\u003epolitical science generally believes that political polarization and citizen polarization is different\u003c/h3\u003e\n\u003cp\u003e“The predominant view in political science is that the current polarization in Congress has not diffused much into the citizenry” [“Political Polarization and the Dynamics of Political Language: Evidence from 130 Years of Partisan Speech [with Comments and Discussion]”, 2024, p. 5]\u003c/p\u003e\n\u003ch3 id=\"political-polarization-and-its-prevalence-has-been-discussed-since-founding-of-the-country\"\u003epolitical polarization and its prevalence has been discussed since founding of the country\u003c/h3\u003e\n\u003cp\u003e“The idea that U.S. politics is necessarily polarized, owing to the intrinsic diversity and size of the country, goes back at least to James Madison and the divergence between Hamiltonian and Jeffersonian economic philosophies” [“Political Polarization and the Dynamics of Political Language: Evidence from 130 Years of Partisan Speech [with Comments and Discussion]”, 2024, p. 5]\u003c/p\u003e\n\u003ch3 id=\"partisanship-switches-to-minority-party-when-the-house-switches-control\"\u003epartisanship switches to minority party when the house switches control\u003c/h3\u003e\n\u003cp\u003e“the partisanship of language tends to switch when House control switches, but in the direction of the new minority party.” [“Political Polarization and the Dynamics of Political Language: Evidence from 130 Years of Partisan Speech [with Comments and Discussion]”, 2024, p. 25]\u003c/p\u003e\n\u003ch3 id=\"one-type-of-polarization-is-where-people-disengage-with-those-with-opposite-views\"\u003eone type of polarization is where people disengage with those with opposite views\u003c/h3\u003e\n\u003cp\u003e“The first aspect of political polarization, which we call “interactional polarization,” focuses on a process whereby participants in a debate increasingly interact with likeminded individuals, while disengaging from interactions with others who hold opposing viewpoints.” [Yarchi et al., 2021, p. 101]\u003c/p\u003e\n\u003ch3 id=\"there-s-a-difference-between-filter-bubbles-and-increased-polarization\"\u003ethere\u0026rsquo;s a difference between filter bubbles and increased polarization\u003c/h3\u003e\n\u003cp\u003e“Despite the recent salience of theories regarding fragmented “echo chambers” or “filter bubbles,” it remains contentious whether social media do indeed drive such interactional polarization” [Yarchi et al., 2021, p. 101]\u003c/p\u003e\n\u003ch3 id=\"already-known-groups-typically-bring-strong-agreements-and-strangers-typically-bring-disperate-views\"\u003ealready known groups typically bring strong agreements, and strangers typically bring disperate views\u003c/h3\u003e\n\u003cp\u003e“Heterophilic interactions appear to be more common along so-called “weak ties” occasional communications that are not underfed by strong social bonds such as friendship or sustained collaboration – while most “strong ties” (among friends, within teams, etc.) are predominantly homophilic” [Yarchi et al., 2021, p. 101]\u003c/p\u003e\n\u003ch3 id=\"social-media-users-agree-more-over-time\"\u003esocial media users agree more over time\u003c/h3\u003e\n\u003cp\u003e“H2 (Interactional Polarization): Interaction patterns on social media become increasingly homophilic over time.” [Yarchi et al., 2021, p. 102]\u003c/p\u003e\n\u003ch3 id=\"isolation-results-in-more-extreme-contributions\"\u003eisolation results in more extreme contributions\u003c/h3\u003e\n\u003cp\u003e“Individuals embedded within more homophilic interaction networks subsequently express more extreme positions in their contributions.” [Yarchi et al., 2021, p. 102]\u003c/p\u003e\n\u003ch3 id=\"facebook-s-chief-innovation-is-to-leverage-the-in-group-homogeneity-to-create-supportive-opinions\"\u003eFacebook\u0026rsquo;s chief innovation is to leverage the in-group homogeneity to create supportive opinions\u003c/h3\u003e\n\u003cp\u003e“As the world’s foremost social media platform, Facebook’s popularity is arguably derived largely from its capacity to immerse its users in a feed of contents that cater to their personal interests and leanings. To do this, the platform relies heavily on users’ self curated networks of friends, but also on an algorithm that prioritizes content based on users’ interests and support for similar posts, displaying only a small share of predominantly congenial, supportive contents” [Yarchi et al., 2021, p. 104]\u003c/p\u003e\n\u003ch3 id=\"facebook-creates-supportive-echo-chambers\"\u003eFacebook creates supportive echo chambers\u003c/h3\u003e\n\u003cp\u003e“Facebook has become the prime suspect for the creation of homophilic echo chambers” [Yarchi et al., 2021, p. 104]\u003c/p\u003e\n\u003ch3 id=\"twitter-creates-asymmetric-non-friend-dynamics\"\u003eTwitter creates asymmetric, non-friend dynamics\u003c/h3\u003e\n\u003cp\u003e“Twitter is defined primarily by its unrestricted publicness. Anyone, even non-users, can read any tweet, and any user can respond to any contribution. Users can follow others without a need for permission, enabling asymmetric, non-reciprocated ties.” [Yarchi et al., 2021, p. 104]\u003c/p\u003e\n\u003ch3 id=\"debate-exists-between-whether-twitter-creates-or-dismantles-homogenization-and-therefore-polarization\"\u003eDebate exists between whether Twitter creates or dismantles homogenization, and therefore polarization\u003c/h3\u003e\n\u003cp\u003e“Reflecting Twitter’s ambiguous profile, the existing literature yields conflicting findings regarding its tendency toward homophily and polarization (e.g., Kwak et al., 2010 detected little homophily; Weng et al., 2010; Hong \u0026amp; Kim, 2016; Colleoni et al., 2014 found the opposite).” [Yarchi et al., 2021, p. 105]\u003c/p\u003e\n\u003ch3 id=\"cite-defines-social-media\"\u003eCITE: defines social media\u003c/h3\u003e\n\u003cp\u003e“social media platform in a narrow sense (following Ellison and Boyd (2013) definition)” [Yarchi et al., 2021, p. 105]\u003c/p\u003e\n\u003ch3 id=\"users-tend-to-express-more-extreme-views-if-surrounded-by-likeminded-users\"\u003eUsers tend to express more extreme views if surrounded by likeminded users\u003c/h3\u003e\n\u003cp\u003e“Considering the effect of homophilic interactions on expressed positions, our data confirm users’ tendency to express more extreme views if interactions with likeminded users take in a larger share of their social media communications (H3).” [Yarchi et al., 2021, p. 111]\u003c/p\u003e\n\u003ch3 id=\"cannot-study-only-one-social-media-as-they-have-different-properties\"\u003ecannot study only one social media as they have different properties\u003c/h3\u003e\n\u003cp\u003e“Beyond questioning the widespread reliance on Twitter (and limited public Facebook) data to draw conclusions about social media as a whole, our study also highlights the perils of inferring dynamic properties from static data.” [Yarchi et al., 2021, p. 114]\u003c/p\u003e\n\u003ch3 id=\"tendency-to-associate-with-like-minded-people-increases-echo-chambers\"\u003etendency to associate with like-minded people increases echo chambers\u003c/h3\u003e\n\u003cp\u003e“That social psychology has long shown this tendency to associate with like-minded others is common cross-culturally. However, there is new fear that the current media system is helping people enter echo chambers more easily than ever before.” [Dubois and Blank, 2018, p. 731]\u003c/p\u003e\n\u003ch3 id=\"twitter-is-an-isolated-slice-of-the-population\"\u003eTwitter is an isolated slice of the population\u003c/h3\u003e\n\u003cp\u003e“Twitter itself is used by a relatively small proportion of the population, about one-quarter of the UK, which is younger, wealthier, and better-educated than Britain as a whole” [Dubois and Blank, 2018, p. 732]\u003c/p\u003e\n\u003ch3 id=\"in-the-uk-going-to-a-news-source-like-bbc-is-still-more-common\"\u003ein the UK, going to a news source like BBC is still more common\u003c/h3\u003e\n\u003cp\u003e“going directly to a news source such as the BBC remains more common in the UK (Newman et al., 2017).” [Dubois and Blank, 2018, p. 733]\u003c/p\u003e\n\u003ch3 id=\"studies-don-t-study-the-aggregate-effect-of-diverse-media\"\u003estudies don\u0026rsquo;t study the aggregate effect of diverse media\u003c/h3\u003e\n\u003cp\u003e“A core problem with this line of research is that most studies select only one or a few media to focus on and so the comparative utility or effects of use of media in a diverse media environment are unclear.” [Dubois and Blank, 2018, p. 733]\u003c/p\u003e\n\u003ch3 id=\"people-with-strong-partisanship-report-consuming-a-diverse-media-digest\"\u003epeople with strong partisanship report consuming a diverse media digest\u003c/h3\u003e\n\u003cp\u003e“First, even individuals who have strong partisan affiliation report using both general news sites (which are largely non-partisan and include a variety of issues) and niche news sites (which may be partisan or focused on specific issues) – Republicans and Democrats have media diets which are quite similar” [Dubois and Blank, 2018, p. 734]\u003c/p\u003e\n\u003ch3 id=\"consumption-of-mixed-media-results-in-incidental-exposure-to-a-variety-of-news-sources\"\u003econsumption of mixed media results in incidental exposure to a variety of news sources\u003c/h3\u003e\n\u003cp\u003e“While one might receive primarily left-leaning political content on Twitter, they may be incidentally exposed to a right-leaning perspective from a family member on Facebook or they might hear a debate between representatives from various perspectives on a television news broadcast.” [Dubois and Blank, 2018, p. 734]\u003c/p\u003e\n\u003ch3 id=\"those-who-are-politically-aware-are-going-to-encounter-more-perspectives\"\u003ethose who are politically aware are going to encounter more perspectives\u003c/h3\u003e\n\u003cp\u003e“As Prior argues, political ‘junkies’ are likely to consume a lot of information and therefore may encounter more perspectives and arguments” [Dubois and Blank, 2018, p. 734]\u003c/p\u003e\n\u003ch3 id=\"increased-involvement-in-politics-results-actually-in-less-echo-chamber\"\u003eincreased involvement in politics results actually in \u003cstrong\u003eless\u003c/strong\u003e echo chamber\u003c/h3\u003e\n\u003cp\u003e“H2: The higher a person’s level of political interest the less likely they are to be in an echo chamber” [Dubois and Blank, 2018, p. 735]\u003c/p\u003e\n\u003ch3 id=\"people-who-are-actually-disinterested-in-politics-are-in-an-echo-chamber\"\u003ePeople who are actually \u003cstrong\u003edisinterested\u003c/strong\u003e in politics are in an echo chamber\u003c/h3\u003e\n\u003cp\u003e“First, that respondents with no political interest are in an echo chamber. We examine this possibility using the regressions in Table 3. The results in this table are based only on the respondents who said they had ‘No interest at all’ in politics, N = 243.” [Dubois and Blank, 2018, p. 739]\u003c/p\u003e\n\u003ch3 id=\"high-choice-in-media-doesn-t-mean-a-high-degree-of-ability-to-reconsiliate\"\u003eHigh choice in media doesn\u0026rsquo;t mean a high degree of ability to reconsiliate\u003c/h3\u003e\n\u003cp\u003e“A high-choice media environment does not simply mean that individuals develop strategies to deal with the many media options available, though of course they do so as they develop their news and political information repertoires” [Dubois and Blank, 2018, p. 740]\u003c/p\u003e\n\u003ch3 id=\"diversity-in-media-and-engagement-in-politics-matters\"\u003ediversity in media AND engagement in politics matters\u003c/h3\u003e\n\u003cp\u003e“Our results suggest that people who are both not politically interested and who do not use diverse media are more likely to be in an echo chamber. They are less likely check multiple sources or to discover things that change their minds.” [Dubois and Blank, 2018, p. 741]\u003c/p\u003e\n\u003ch3 id=\"polarized-language-results-in-greater-engagement-but-only-to-trolls-and-politically-engaged-users\"\u003epolarized language results in greater engagement: but only to trolls and politically engaged users\u003c/h3\u003e\n\u003cp\u003e“We also find that polarized language is associated with greater engagement, but this association only holds for politically engaged users (both trolls and regular users). This research clarifies how trolls leverage polarized language and provides an open-source, simple tool for exploration of polarized communications on social media.” [Simchon et al., 2022, p. 1]\u003c/p\u003e\n\u003ch3 id=\"russian-trolls-used-more-polarized-language\"\u003eRussian trolls used more polarized language\u003c/h3\u003e\n\u003cp\u003e“Again, we find that politically oriented Russian trolls use significantly more polarized language than their politically matched American sample (Russian trolls: M = 5.16, SD = 8.00, and N = 55,726; American controls: M = 2.91, SD = 6.84, and N = 55,726), t(108,836) = 50.61, P \u0026lt; 0.001, and Cohen’s d = 0.30 (for a robustness check, see Supplementary Materials).” [Simchon et al., 2022, p. 4]\u003c/p\u003e\n\u003ch3 id=\"foreign-agents-increase-in-their-polarization-and-posting-frequency\"\u003eforeign agents increase in their polarization and posting frequency\u003c/h3\u003e\n\u003cp\u003e“foreign agents from various countries strategically used polarized language in social media communications, and in a majority of cases we see an increase over time in these attempts.” [Simchon et al., 2022, p. 6]\u003c/p\u003e\n\u003ch3 id=\"distinction-between-polarization-on-issues-vs-dot-polarization-of-anger\"\u003edistinction between polarization on issues vs. polarization of anger\u003c/h3\u003e\n\u003cp\u003e“Scholars have made the conceptual distinction between issue polarization—an ideological, policy-based political divide, and affective polarization, i.e. dislike, distrust, and general animosity of political partisans toward the other political side” [Simchon et al., 2022, p. 6]\u003c/p\u003e\n\u003ch3 id=\"small-amount-of-trolls-can-polarize-lots-of-people\"\u003esmall amount of trolls can polarize lots of people\u003c/h3\u003e\n\u003cp\u003e“Questions remain as to the extent of influence of trolls’ social media presence on real people. However, it is important to note that even a small number of agents with aggressive attitudes can have a substantial influence on the majority view, a process called “information gerrymandering”” [Simchon et al., 2022, p. 9]\u003c/p\u003e\n\u003ch3 id=\"interaction-with-trolls-didn-t-seem-to-change-partisanship\"\u003einteraction with trolls didn\u0026rsquo;t seem to change partisanship\u003c/h3\u003e\n\u003cp\u003e“The authors found that only a small fraction of users interacted with Russian trolls, and they did not observe any change in partisan attitude during that time among these users (68).” [Simchon et al., 2022, p. 9]\u003c/p\u003e\n\u003ch2 id=\"subclaim-organization\"\u003esubclaim organization\u003c/h2\u003e\n\u003ch3 id=\"polarization-comes-from-congenial-echo-chambers-driven-by-generic-language-which-social-media-is-prone-to-create-due-to-their-curation\"\u003ePolarization comes from congenial echo-chambers driven by generic language, which social media is prone to create due to their curation\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#isolation-results-in-more-extreme-contributions\"\u003eInteraction with in-group only results in more extreme contributions\u003c/a\u003e. This is what we typically call an \u0026ldquo;echo chamber\u0026rdquo; (\u003ca href=\"#users-tend-to-express-more-extreme-views-if-surrounded-by-likeminded-users\"\u003e1\u003c/a\u003e, \u003ca href=\"#tendency-to-associate-with-like-minded-people-increases-echo-chambers\"\u003e2\u003c/a\u003e). One such chamber enviroment is \u003ca href=\"#social-media-users-agree-more-over-time\"\u003esocial media\u003c/a\u003e, a particularly salient case of this is \u003ca href=\"#facebook-creates-supportive-echo-chambers\"\u003eFacebook, which creates supportive echo chambers\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eNovoa proposes one analysis through linguistics by which such an echo chamber can get created\u0026mdash;\u003ca href=\"#generics-are-much-more-readily-remembered\"\u003egenerics: easy to remember generalisations\u003c/a\u003e. \u003ca href=\"#polarized-language-is-present-far-more-in-generic-statements\"\u003epolarized language is present far more in generic statements\u003c/a\u003e, and \u003ca href=\"#generics-about-particular-group-is-a-way-that-polarized-languages-manifest\"\u003egenerics about particular group is a way that polarized languages manifest\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eGenerics only \u003ca href=\"#generics-are-strongly-rejected-or-accepted-based-on-parity-affiliation-echo-chamber\"\u003efunction when deployed within a homegenous environment.\u003c/a\u003e Yet, others have noted that \u003ca href=\"#facebook-s-chief-innovation-is-to-leverage-the-in-group-homogeneity-to-create-supportive-opinions\"\u003eFacebook\u0026rsquo;s chief innovation is to leverage the in-group homogeneity to create supportive opinions\u003c/a\u003e\u0026mdash;displaying \u0026ldquo;congenial\u0026rdquo; content that are likely to be homegenous.\u003c/p\u003e\n\u003ch3 id=\"the-congenial-environment-itself-however-is-not-enough-to-create-or-disrupt-polarization-breaking-echo-chambers-requires-both-a-diversity-of-opinions-as-well-as-actual-engagement\"\u003eThe congenial environment itself, however, is not enough to create or disrupt polarization; breaking echo chambers requires both a diversity of opinions as well as actual engagement\u003c/h3\u003e\n\u003cp\u003eUnlike Facebook\u0026rsquo;s congeniality, \u003ca href=\"#twitter-creates-asymmetric-non-friend-dynamics\"\u003eTwitter creates asymmetric, non-friend dynamics\u003c/a\u003e. Though it shows \u003ca href=\"#debate-exists-between-whether-twitter-creates-or-dismantles-homogenization-and-therefore-polarization\"\u003ethat it helps dismantle some echo chambers, its not conclusive\u003c/a\u003e. Yarachi notes this as \u003ca href=\"#there-s-a-difference-between-filter-bubbles-and-increased-polarization\"\u003ethe difference between filter bubbles and increased polarization\u003c/a\u003e. A \u0026ldquo;filter bubble\u0026rdquo; itself isn\u0026rsquo;t polarization, so what is?\u003c/p\u003e\n\u003cp\u003eDubois solves this mystery by arguing that it is \u003ca href=\"#people-who-are-actually-disinterested-in-politics-are-in-an-echo-chamber\"\u003epeople who are actually \u003cstrong\u003edisinterested\u003c/strong\u003e in politics are in an echo chamber\u003c/a\u003e. Through \u003ca href=\"#consumption-of-mixed-media-results-in-incidental-exposure-to-a-variety-of-news-sources\"\u003econsumption of mixed media results in incidental exposure to a variety of news sources\u003c/a\u003e one has to \u003ca href=\"#increased-involvement-in-politics-results-actually-in-less-echo-chamber\"\u003eparticipate\u003c/a\u003e in the conversation to get out the echo chamber.\u003c/p\u003e\n\u003cp\u003eMeaning, \u003ca href=\"#high-choice-in-media-doesn-t-mean-a-high-degree-of-ability-to-reconsiliate\"\u003ehigh choice in media itself (i.e. having facebook AND twitter) doesn\u0026rsquo;t mean a high degree of ability to reconsiliate\u003c/a\u003e. It is \u003ca href=\"#diversity-in-media-and-engagement-in-politics-matters\"\u003ediversity in media AND engagement in politics matters\u003c/a\u003e.\u003c/p\u003e\n\u003ch4 id=\"other-notes\"\u003eother notes\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"#those-who-are-politically-aware-are-going-to-encounter-more-perspectives\"\u003ethose who are politically aware are going to encounter more perspectives\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#people-with-strong-partisanship-report-consuming-a-diverse-media-digest\"\u003epeople with strong partisanship report consuming a diverse media digest\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"by-using-polarized-language-to-target-only-politically-active-users-trolls-essentially-disrupt-the-ability-to-dismantle-echo-chambers\"\u003eBy using polarized language to target only politically active users, trolls essentially disrupt the ability to dismantle echo chambers\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#interaction-with-trolls-didn-t-seem-to-change-partisanship\"\u003eInteraction with trolls didn\u0026rsquo;t seem to change partisanship\u003c/a\u003e, yet previous work establishes that a \u003ca href=\"#small-amount-of-trolls-can-polarize-lots-of-people\"\u003esmall amount of trolls can polarize lots of people\u003c/a\u003e\u0026mdash;so the manner by which trolls work is confusing.\u003c/p\u003e\n\u003cp\u003eSimchon notes that \u003ca href=\"#russian-trolls-used-more-polarized-language\"\u003eRussian trolls used more polarized language\u003c/a\u003e. Our previous analysis concludes that political activism is an important and inseperable part of breaking an echo chamber; trolls, then take advantage of this fact to disrupt the process of breaking away from polarization by \u003ca href=\"#polarized-language-results-in-greater-engagement-but-only-to-trolls-and-politically-engaged-users\"\u003ecapturing \u003cstrong\u003ealready politically active\u003c/strong\u003e users, which trolls take part.\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"bin\"\u003eBIN\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"#polarization-distorts-beliefs-about-others\"\u003epolarization distorts beliefs about others\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#one-type-of-polarization-is-where-people-disengage-with-those-with-opposite-views\"\u003eone type of polarization is where people disengage with those with opposite views\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#in-the-uk-going-to-a-news-source-like-bbc-is-still-more-common\"\u003ein the UK, going to a news source like BBC is still more common\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#cite-defines-social-media\"\u003eCITE: defines social media\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#already-known-groups-typically-bring-strong-agreements-and-strangers-typically-bring-disperate-views\"\u003ealready known groups typically bring strong agreements, and strangers typically bring disperate views\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#cannot-study-only-one-social-media-as-they-have-different-properties\"\u003ecannot study only one social media as they have different properties\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#twitter-is-an-isolated-slice-of-the-population\"\u003eTwitter is an isolated slice of the population\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#studies-don-t-study-the-aggregate-effect-of-diverse-media\"\u003estudies don\u0026rsquo;t study the aggregate effect of diverse media\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#political-science-generally-believes-that-political-polarization-and-citizen-polarization-is-different\"\u003epolitical science generally believes that political polarization and citizen polarization is different\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#distinction-between-polarization-on-issues-vs-dot-polarization-of-anger\"\u003edistinction between polarization on issues vs. polarization of anger\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#people-perceive-higher-polarization-than-actually-are-present-due-to-use-of-generics\"\u003elanguage induces perception about polarization\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#foreign-agents-increase-in-their-polarization-and-posting-frequency\"\u003eforeign agents increase in their polarization and posting frequency\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#partisanship-switches-to-minority-party-when-the-house-switches-control\"\u003epartisanship is constantly switching\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#political-polarization-and-its-prevalence-has-been-discussed-since-founding-of-the-country\"\u003epolarization is a long-standing topic\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhpwr1_texts_in_conversation/","tags":null,"title":"PWR1 Texts in Conversation Planning"},{"categories":null,"contents":"One alpha vector per action:\n\\begin{equation} \\alpha^{(k+1)}_{a}(s) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{}T(s\u0026rsquo;|s,a) \\max_{a\u0026rsquo;} \\alpha^{(k)}_{a\u0026rsquo;} (s\u0026rsquo;) \\end{equation}\nThis is going to give you a set of alpha vectors, one corresponding to each action.\ntime complexity: \\(O(|S|^{2}|A|^{2})\\)\nyou will note we don\u0026rsquo;t ever actually use anything partially-observable in this. Once we get the alpha vector, we need to use one-step lookahead in POMDP (which does use transitions) to actually turn this alpha vector into a policy, which then does create you\nWe can deal with continuous state space by using some estimation of the value function (instead of alpha-vectors, we will just use a value-function estimate like q learning)\n","html":"\u003cp\u003eOne \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e per action:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha^{(k+1)}_{a}(s) = R(s,a) + \\gamma \\sum_{s\u0026rsquo;}^{}T(s\u0026rsquo;|s,a) \\max_{a\u0026rsquo;} \\alpha^{(k)}_{a\u0026rsquo;} (s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is going to give you a set of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es, one corresponding to each action.\u003c/p\u003e\n\u003cp\u003etime complexity: \\(O(|S|^{2}|A|^{2})\\)\u003c/p\u003e\n\u003cp\u003eyou will note we don\u0026rsquo;t ever actually use anything partially-observable in this. Once we get the \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e, we need to use \u003ca href=\"/posts/kbhalpha_vector/#one-step-lookahead-in-pomdp\"\u003eone-step lookahead in POMDP\u003c/a\u003e (which does use transitions) to actually turn this \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e into a policy, which then does create\nyou\u003c/p\u003e\n\u003cp\u003eWe can deal with continuous state space by using some estimation of the \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e (instead of alpha-vectors, we will just use a value-function estimate like q learning)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhqmdp/","tags":null,"title":"QMDP"},{"categories":null,"contents":"system does not work as well for one type/group of people compared to another\ntraining data really does matter: it may make generalized predictions based on a majority/minority class.\nBecause IID characteristic of input data, the majority will be over represented\n","html":"\u003cp\u003esystem \u003cstrong\u003edoes not work as well\u003c/strong\u003e for one type/group of people compared to another\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-12-01_15-56-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003etraining data really does matter: it may make generalized predictions based on a majority/minority class.\u003c/p\u003e\n\u003cp\u003eBecause \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e characteristic of input data, the majority will be over represented\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhquality_of_service_harm/","tags":null,"title":"quality of service harm"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhcorrelation/","tags":null,"title":"quantum correlation"},{"categories":null,"contents":" Viewing computational linguistics from the length across linear algebra and linear structure Quantum algorithms and the necessary infra were being developed; and in the 2010s programmable quantum computers became showing up Quantum is done over the complexes, which makes the normal linguistics done with the reals more powerful.\nwant to infer the probability distribution of words based on their letters\nLinearity breaks down: letter combinations in not commutative; and P(letter C) + P(letter A) != P(letters CA) instead of encoding letters as one-hot vectors; we encode these letters with matrices: adds more dimensions\nimmediate benefits: noncommutivity of matricies is a PLUS words is just the composed results into another 2x2 matricies then, to map into probability distrubtion, we map the matrix into a partial trace things create bounds from the problem: letters\nimprove upon optimization scheme in a quantum rhelm\nimplement this scheme on a quantum computer: https://arxiv.org/pdf/1710.10248.pdf\ntask: NTJ reading; come up with the needed novelty\n","html":"\u003cul\u003e\n\u003cli\u003eViewing computational linguistics from the length across linear algebra and linear structure\u003c/li\u003e\n\u003cli\u003eQuantum algorithms and the necessary infra were being developed; and in the 2010s programmable quantum computers became showing up\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eQuantum is done over the complexes, which makes the normal linguistics done with the reals more powerful.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ewant to infer the probability distribution of words based on their letters\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eLinearity breaks down: letter combinations in not commutative; and P(letter C) + P(letter A) != P(letters CA)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003einstead of encoding letters as one-hot vectors; we encode these letters with matrices: adds more dimensions\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eimmediate benefits:\n\u003cul\u003e\n\u003cli\u003enoncommutivity of matricies is a PLUS\u003c/li\u003e\n\u003cli\u003ewords is just the composed results into another 2x2 matricies\u003c/li\u003e\n\u003cli\u003e\n\u003ch2 id=\"then-to-map-into-probability-distrubtion-we-map-the-matrix-into-a-partial-trace\"\u003ethen, to map into probability distrubtion, we map the matrix into a partial trace\u003c/h2\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"things\"\u003ethings\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ecreate bounds from the problem: letters\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eimprove upon optimization scheme in a quantum rhelm\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eimplement this scheme on a quantum computer: \u003ca href=\"https://arxiv.org/pdf/1710.10248.pdf\"\u003ehttps://arxiv.org/pdf/1710.10248.pdf\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003etask: NTJ reading; come up with the needed novelty\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhquantum_group_project/","tags":null,"title":"Quantum Group Project"},{"categories":null,"contents":"The information theory computational model behind quantum theory. It proposes quantum computers, proposed during the 80s. Theoretically, quantum computers have quantum supremacy, which is exciting. It is a theory that works with counterfactual information.\nquantum computer A quantum computer is a computer that uses quantum effects to perform Turing-like computations\nquantum supremacy That a quantum computer outperforms all classical computers\nuniversal computer \u0026ldquo;a programmable system whose repertoire includes all physically possible computations\u0026rdquo; \u0026mdash; Turing.\nYou will realize that modern computers are not actually capable of all computations\u0026mdash;apparently, they can\u0026rsquo;t make itself.\nTherefore, to actually achieve this, we have to make a more general type of computer: a constructor \u0026mdash; a universal quantum constructor.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhinformation_theory/\"\u003einformation theory\u003c/a\u003e computational model behind \u003ca href=\"/posts/kbhquantum_theory/\"\u003equantum theory\u003c/a\u003e. It proposes \u003ca href=\"#quantum-computer\"\u003equantum computer\u003c/a\u003es, proposed during the 80s. Theoretically, \u003ca href=\"#quantum-computer\"\u003equantum computer\u003c/a\u003es have \u003ca href=\"#quantum-supremacy\"\u003equantum supremacy\u003c/a\u003e, which is exciting. It is a theory that works with \u003ca href=\"/posts/kbhcounterfactual/\"\u003ecounterfactual\u003c/a\u003e information.\u003c/p\u003e\n\u003ch2 id=\"quantum-computer\"\u003equantum computer\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#quantum-computer\"\u003equantum computer\u003c/a\u003e is a computer that uses \u003ca href=\"/posts/kbhquantum_theory/\"\u003equantum effects\u003c/a\u003e to perform Turing-like computations\u003c/p\u003e\n\u003ch2 id=\"quantum-supremacy\"\u003equantum supremacy\u003c/h2\u003e\n\u003cp\u003eThat a \u003ca href=\"#quantum-computer\"\u003equantum computer\u003c/a\u003e outperforms all classical computers\u003c/p\u003e\n\u003ch2 id=\"universal-computer\"\u003euniversal computer\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;a programmable system whose repertoire includes all physically possible computations\u0026rdquo; \u0026mdash; Turing.\u003c/p\u003e\n\u003cp\u003eYou will realize that modern computers are not actually capable of all computations\u0026mdash;apparently, they can\u0026rsquo;t make itself.\u003c/p\u003e\n\u003cp\u003eTherefore, to actually achieve this, we have to make a more general type of computer: a \u003ca href=\"/posts/kbhconstructor_theory/\"\u003econstructor\u003c/a\u003e \u0026mdash; a \u003ca href=\"/posts/kbhuniversal_quantum_constructor/\"\u003euniversal quantum constructor.\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhquantum_information_theory/","tags":null,"title":"quantum information theory"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhquantum_supremecy/","tags":null,"title":"quantum supremecy"},{"categories":null,"contents":"quantum theory allows us to understand physics; it reconciliations the classical world with the quantum world.\nClassical particles, in the double slit experiment, would just straight go through and bounce off Actual particles (quantum) like light, under quantum theory, would actually exhibit interference via wave-like hebahior The measurement of quantum theory is done via quantum information theory.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhquantum_theory/\"\u003equantum theory\u003c/a\u003e allows us to understand \u003ca href=\"/posts/kbhphysics/\"\u003ephysics\u003c/a\u003e; it reconciliations the classical world with the quantum world.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eClassical particles, in the \u003ca href=\"/posts/kbhdouble_slit_experiment/\"\u003edouble slit experiment\u003c/a\u003e, would just straight go through and bounce off\u003c/li\u003e\n\u003cli\u003eActual particles (quantum) like light, under \u003ca href=\"/posts/kbhquantum_theory/\"\u003equantum theory\u003c/a\u003e, would actually exhibit interference via wave-like hebahior\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThe measurement of \u003ca href=\"/posts/kbhquantum_theory/\"\u003equantum theory\u003c/a\u003e is done via \u003ca href=\"/posts/kbhquantum_information_theory/\"\u003equantum information theory\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhquantum_theory/","tags":null,"title":"quantum theory"},{"categories":null,"contents":"A little endeavor to learn about Lambek Calculus, quantum information theory, and linguistics I guess.\nCourses to Take for QNLP\nCategorical Grammars Index\n","html":"\u003cp\u003eA little endeavor to learn about \u003ca href=\"/posts/kbhlambek_calculus/\"\u003eLambek Calculus\u003c/a\u003e, \u003ca href=\"/posts/kbhquantum_information_theory/\"\u003equantum information theory\u003c/a\u003e, and linguistics I guess.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcourses_to_take_for_qnlp/\"\u003eCourses to Take for QNLP\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcategorical_grammars_index/\"\u003eCategorical Grammars Index\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhquantumnlp/","tags":null,"title":"QuantumNLP Index"},{"categories":null,"contents":"A qubit is a two-layer quantum theory system.\nA classical bit is something that can be set between two values, a qubit between a much higher dimension.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhqubits/\"\u003equbit\u003c/a\u003e is a two-layer \u003ca href=\"/posts/kbhquantum_theory/\"\u003equantum theory\u003c/a\u003e system.\u003c/p\u003e\n\u003cp\u003eA classical bit is something that can be set between two values, a \u003ca href=\"/posts/kbhqubits/\"\u003equbit\u003c/a\u003e between a much higher dimension.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhqubits/","tags":null,"title":"qubit"},{"categories":null,"contents":"a quotient group is a group which is the product of mapping things out.\nsubgroups The set of integers \\(\\mathbb{Z}\\) is obviously a group. You can show it to yourself that multiples of any number in the group is a subgroup of that group.\nFor instance:\n\\(3 \\mathbb{Z}\\), the set \\(\\{\\dots -6, -3, 0, 3, 6, \\dots\\}\\) is a subgroup\nactual quotient groups We can use the subgroup above to mask out a group. The resulting product is NOT a subgroup, but its a new group with individual elements being subsets of our original group.\nFor instance, the \\(\\mod 3\\) quotient group is written as:\n\\begin{equation} \\mathbb{Z}} / 3 \\mathbb{Z} \\end{equation}\nEach element in this new group is a set; for instance, in \\(\\mathbb{Z} / 3\\mathbb{Z}\\), \\(0\\) is actually the set \\(\\{\\dots -6, -3, 0, 3, 6, \\dots\\}\\) (i.e. the subgroup that we were masking by). Other elements in the quotient space (\u0026ldquo;1\u0026rdquo;, a.k.a. \\(\\{ \\dots, -2, 1, 4, 7 \\dots \\}\\), or \u0026ldquo;2\u0026rdquo;, a.k.a. \\(\\{\\dots, -1, 2, 5, 8 \\dots \\}\\)) are called \u0026ldquo;cosets\u0026rdquo; of \\(3 \\mathbb{Z}\\). You will notice they are not a subgroups.\n","html":"\u003cp\u003ea \u003ca href=\"/posts/kbhquotient_group/\"\u003equotient group\u003c/a\u003e is a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e which is the product of mapping things out.\u003c/p\u003e\n\u003ch2 id=\"subgroup--kbhsubgroup-dot-md--s\"\u003e\u003ca href=\"/posts/kbhsubgroup/\"\u003esubgroup\u003c/a\u003es\u003c/h2\u003e\n\u003cp\u003eThe set of integers \\(\\mathbb{Z}\\) is obviously a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e. You can show it to yourself that multiples of any number in the \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e is a \u003ca href=\"/posts/kbhsubgroup/\"\u003esubgroup\u003c/a\u003e of that group.\u003c/p\u003e\n\u003cp\u003eFor instance:\u003c/p\u003e\n\u003cp\u003e\\(3 \\mathbb{Z}\\), the set \\(\\{\\dots -6, -3, 0, 3, 6, \\dots\\}\\) is a \u003ca href=\"/posts/kbhsubgroup/\"\u003esubgroup\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"actual-quotient-group--kbhquotient-group-dot-md--s\"\u003eactual \u003ca href=\"/posts/kbhquotient_group/\"\u003equotient group\u003c/a\u003es\u003c/h2\u003e\n\u003cp\u003eWe can use the \u003ca href=\"/posts/kbhsubgroup/\"\u003esubgroup\u003c/a\u003e above to mask out a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e. The resulting product is \u003cstrong\u003eNOT\u003c/strong\u003e a \u003ca href=\"/posts/kbhsubgroup/\"\u003esubgroup\u003c/a\u003e, but its a new \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e with individual elements being subsets of our original group.\u003c/p\u003e\n\u003cp\u003eFor instance, the \\(\\mod 3\\) quotient group is written as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{Z}} / 3 \\mathbb{Z}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eEach element in this new group is a set; for instance, in \\(\\mathbb{Z} / 3\\mathbb{Z}\\), \\(0\\) is actually the set \\(\\{\\dots -6, -3, 0, 3, 6, \\dots\\}\\) (i.e. the \u003ca href=\"/posts/kbhsubgroup/\"\u003esubgroup\u003c/a\u003e that we were masking by). Other elements in the quotient space (\u0026ldquo;1\u0026rdquo;, a.k.a. \\(\\{ \\dots, -2, 1, 4, 7 \\dots \\}\\), or \u0026ldquo;2\u0026rdquo;, a.k.a. \\(\\{\\dots, -1, 2, 5, 8 \\dots \\}\\)) are called \u0026ldquo;cosets\u0026rdquo; of \\(3 \\mathbb{Z}\\). You will notice they are not a \u003ca href=\"/posts/kbhsubgroup/\"\u003esubgroup\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhquotient_group/","tags":null,"title":"quotient group"},{"categories":null,"contents":"The quotient map \\(\\pi\\) is the Linear Map \\(V \\to V / U\\) such that:\n\\begin{equation} \\pi(v) = v+U \\end{equation}\nfor \\(v \\in V\\).\nI.e.: the quotient map is affine subsetification map given a vector.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhquotient_map/\"\u003equotient map\u003c/a\u003e \\(\\pi\\) is the \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003e \\(V \\to V / U\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pi(v) = v+U\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor \\(v \\in V\\).\u003c/p\u003e\n\u003cp\u003eI.e.: the \u003ca href=\"/posts/kbhquotient_map/\"\u003equotient map\u003c/a\u003e is \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003eification map given a vector.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhquotient_map/","tags":null,"title":"quotient map"},{"categories":null,"contents":"Suppose \\(T \\in \\mathcal{L}(V)\\), and \\(U \\subset V\\), an invariant subspace under \\(T\\). Then:\n\\begin{equation} (T / U)(v+U) = Tv+U, \\forall v \\in V \\end{equation}\nwhere \\(T / U \\in \\mathcal{L}(V / U)\\)\n\u0026ldquo;if you can operator on \\(V\\), you can operator on \\(V / U\\) in the same way.\u0026rdquo; Yes I just verbed operator.\nquotient operator is well-defined Why is this not possible for any subspace of \\(V\\)? This is because we need \\(T\\) to preserve the exact structure of the subspace we are quotienting out by; otherwise our affine subset maybe squished to various unexpected places. The technical way to show that this is well-defined leverages the property of two affine subsets being equal:\nSuppose \\(v +U = w+U\\), we desire that \\(T / U (v+U) = T / U (w+U)\\). That is, we desire that \\(Tv +U = Tw +U\\).\nIf \\(v+U = w+U\\) , then, \\(v-w \\in U\\). Now, this means that \\(T(v-w) \\in U\\) only because \\(U\\) is invariant under \\(T\\) (otherwise it could be sent to anywhere in \\(V\\) as \\(T \\in \\mathcal{L}(V)\\) not \\(\\mathcal{L}(U)\\)). Therefore, \\(Tv-Tw \\in U\\), and so \\(Tv +U = Tw+U\\), as desired. \\(\\blacksquare\\)\n","html":"\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V)\\), and \\(U \\subset V\\), an \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e under \\(T\\). Then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(T / U)(v+U) = Tv+U, \\forall v \\in V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(T / U \\in \\mathcal{L}(V / U)\\)\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;if you can \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e on \\(V\\), you can \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e on \\(V / U\\) in the same way.\u0026rdquo; Yes I just verbed \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"quotient-operator--kbhquotient-operator-dot-md--is-well-defined\"\u003e\u003ca href=\"/posts/kbhquotient_operator/\"\u003equotient operator\u003c/a\u003e is well-defined\u003c/h2\u003e\n\u003cp\u003eWhy is this not possible for any subspace of \\(V\\)? This is because we need \\(T\\) to preserve the exact structure of the \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e we are quotienting out by; otherwise our \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e maybe squished to various unexpected places. The technical way to show that this is well-defined leverages the property of two \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003es being equal:\u003c/p\u003e\n\u003cp\u003eSuppose \\(v +U = w+U\\), we desire that \\(T / U (v+U) = T / U (w+U)\\). That is, we desire that \\(Tv +U = Tw +U\\).\u003c/p\u003e\n\u003cp\u003eIf \\(v+U = w+U\\) , then, \\(v-w \\in U\\). Now, this means that \\(T(v-w) \\in U\\) \u003cstrong\u003eonly because \\(U\\) is \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e under \\(T\\)\u003c/strong\u003e (otherwise it could be sent to anywhere in \\(V\\) as \\(T \\in \\mathcal{L}(V)\\) not \\(\\mathcal{L}(U)\\)). Therefore, \\(Tv-Tw \\in U\\), and so \\(Tv +U = Tw+U\\), as desired. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhquotient_operator/","tags":null,"title":"quotient operator"},{"categories":null,"contents":"A quotient space is the set of all affine subsets of \\(V\\) parallel to some subspace \\(U\\). This should be reminiscent of quotient groups.\nconstituents vector space \\(V\\) a subspace \\(U \\subset V\\) requirements \\begin{equation} V / U = \\{v+U : v \\in V \\} \\end{equation}\nadditional information operations on quotient space Addition and scalar multiplication on the quotient space is defined in the expected way:\ngiven \\((v+U), (w+U) \\in V / U\\), and \\(\\lambda \\in \\mathbb{F}\\):\n\\begin{equation} \\begin{cases} (v+U) + (w+U) = ((v+w)+U) \\\\ \\lambda (v+U) = ((\\lambda v)+U) \\end{cases} \\end{equation}\nquotient space operations behave uniformly on equivalent affine subsets The tricky thing about quotient space operations is that there are multiple ways of representing a single affine subset parallel to \\(U\\); the one-liner about this is that if you think about shifting a parallel line with a vector: shifting the line along any perpendicular vector to the line with the same magnitude will get you the same shifted line.\nFor the operations above to work, we have to make sure that they behave in the same way on distinct representations of the same affine subset, which we endeavor to proof here:\nSuppose we have \\(v,w \\in V\\), \\(v\u0026rsquo;,w\u0026rsquo; \\in V\\), and that \\(v+U = v\u0026rsquo;+U\\); \\(w+U = w\u0026rsquo;+U\\). We desire that the operations above behave the same way to any addition groupings: that WLOG \\((v+U)+(w+U) = (v\u0026rsquo;+U)+(w\u0026rsquo;+U)\\) \u0026mdash; that is, we have to show that \\((v+w)+U = (v\u0026rsquo;+w\u0026rsquo;)+U\\).\nBy the fact that two affine subsets parallel to \\(U\\) are either equal or disjoint, we have that \\(v-v\u0026rsquo;, w-w\u0026rsquo; \\in U\\). And so, \\((v-v\u0026rsquo;)+(w-w\u0026rsquo;) \\in U\\). Commuting these things under \\(V\\), we now have that \\((v+w)-(v\u0026rsquo;+w\u0026rsquo;) \\in U\\). Therefore, invoking the same result again, \\((v+w)+U = (v\u0026rsquo;+w\u0026rsquo;)+U\\), as desired.\nThe same logic can be used for scalar multiplication. Suppose we have \\(v, v\u0026rsquo; \\in V\\), \\(\\lambda \\in \\mathbb{F}\\), and that \\(v+U = v\u0026rsquo;+U\\). We desire that WLOG \\(\\lambda (v+U) = \\lambda (v\u0026rsquo;+U)\\) \u0026mdash; that is, we have to show that \\((\\lambda v)+U = (\\lambda v\u0026rsquo;)+U\\).\nAgain invoking the two affine subsets parallel to \\(U\\) are either equal or disjoint result, we have that \\(v-v\u0026rsquo; \\in U\\). Now, this means that \\(\\lambda (v-v\u0026rsquo;) = \\lambda v-\\lambda v\u0026rsquo; \\in U\\) because closure of scalar multiplication in \\(U\\). Invoking the result again, we now have that \\(\\lambda v + U = \\lambda v\u0026rsquo; +U\\), as desired.\nHaving shown both operations make sense, we can declare that they make sense indeed. \\(\\blacksquare\\)\nquotient space is a vector space Given the name! (jk)\nBleh I give up just prove it yourself given the above operations and the fact that the additive identity is \\(0+U = U\\), the additive inverse is \\(-v+U\\).\n\u0026ldquo;instead of the elements single vectors, we fuse the whole affine subset together. instead of counting the contents, we count the bucket.\u0026rdquo;\ndimension of a quotient space is the difference between dimensions of its constituents that is,\n\\begin{equation} \\dim V / U = \\dim V - \\dim U \\end{equation}\nfor finite dimensional \\(V\\).\nProof:\nLet \\(\\pi: V \\to V /U\\). By definition, \\(null\\ \\pi =U\\); and, given the input is any \\(v \\in V\\), \\(range\\ \\pi = V / U\\). rank-nullity theorem then tells us that:\n\\begin{equation} \\dim V = \\dim U + \\dim V / U \\end{equation}\nnow subtract and get \\(\\dim V /U\\) by itself. \\(\\blacksquare\\)\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e is the set of all \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003es of \\(V\\) \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eparallel\u003c/a\u003e to some \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e \\(U\\). This should be reminiscent of \u003ca href=\"/posts/kbhquotient_group/\"\u003equotient group\u003c/a\u003es.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e \\(V\\)\u003c/li\u003e\n\u003cli\u003ea \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e \\(U \\subset V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nV / U = \\{v+U : v \\in V \\}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"operations-on-quotient-space--kbhquotient-space-dot-md\"\u003eoperations on \u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhadding/\"\u003eAddition\u003c/a\u003e and \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e on the \u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e is defined in the expected way:\u003c/p\u003e\n\u003cp\u003egiven \\((v+U), (w+U) \\in V / U\\), and \\(\\lambda \\in \\mathbb{F}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n(v+U) + (w+U) = ((v+w)+U) \\\\\n\\lambda (v+U) = ((\\lambda v)+U)\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"quotient-space--kbhquotient-space-dot-md--operations-behave-uniformly-on-equivalent-affine-subset--kbhparallel-linear-algebra-dot-md--s\"\u003e\u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e operations behave uniformly on equivalent \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003es\u003c/h4\u003e\n\u003cp\u003eThe tricky thing about \u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003es is that there are multiple ways of representing a single \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e parallel to \\(U\\); the one-liner about this is that if you think about shifting a parallel line with a vector: shifting the line along \u003cstrong\u003eany\u003c/strong\u003e perpendicular vector to the line with the same magnitude will get you the same shifted line.\u003c/p\u003e\n\u003cp\u003eFor the \u003ca href=\"/posts/kbhoperation/\"\u003eoperation\u003c/a\u003es above to work, we have to make sure that they behave in the same way on distinct representations of the same \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e, which we endeavor to proof here:\u003c/p\u003e\n\u003cp\u003eSuppose we have \\(v,w \\in V\\), \\(v\u0026rsquo;,w\u0026rsquo; \\in V\\), and that \\(v+U = v\u0026rsquo;+U\\); \\(w+U = w\u0026rsquo;+U\\). We desire that the operations above behave the same way to any addition groupings: that WLOG \\((v+U)+(w+U) = (v\u0026rsquo;+U)+(w\u0026rsquo;+U)\\) \u0026mdash; that is, we have to show that \\((v+w)+U = (v\u0026rsquo;+w\u0026rsquo;)+U\\).\u003c/p\u003e\n\u003cp\u003eBy the fact that \u003ca href=\"/posts/kbhparallel_linear_algebra/#two-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-affine-subset-s-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-parallel-to-u-are-either-equal-or-disjoint\"\u003etwo affine subsets parallel to \\(U\\) are either equal or disjoint\u003c/a\u003e, we have that \\(v-v\u0026rsquo;, w-w\u0026rsquo; \\in U\\). And so, \\((v-v\u0026rsquo;)+(w-w\u0026rsquo;) \\in U\\). Commuting these things under \\(V\\), we now have that \\((v+w)-(v\u0026rsquo;+w\u0026rsquo;) \\in U\\). Therefore, invoking the same result again, \\((v+w)+U = (v\u0026rsquo;+w\u0026rsquo;)+U\\), as desired.\u003c/p\u003e\n\u003cp\u003eThe same logic can be used for scalar multiplication. Suppose we have \\(v, v\u0026rsquo; \\in V\\), \\(\\lambda \\in \\mathbb{F}\\), and that \\(v+U = v\u0026rsquo;+U\\). We desire that WLOG \\(\\lambda (v+U) = \\lambda (v\u0026rsquo;+U)\\) \u0026mdash; that is, we have to show that \\((\\lambda v)+U = (\\lambda v\u0026rsquo;)+U\\).\u003c/p\u003e\n\u003cp\u003eAgain invoking the \u003ca href=\"/posts/kbhparallel_linear_algebra/#two-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-affine-subset-s-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-parallel-to-u-are-either-equal-or-disjoint\"\u003etwo affine subsets parallel to \\(U\\) are either equal or disjoint\u003c/a\u003e result, we have that \\(v-v\u0026rsquo; \\in U\\). Now, this means that \\(\\lambda (v-v\u0026rsquo;) = \\lambda v-\\lambda v\u0026rsquo; \\in U\\) because \u003ca href=\"/posts/kbhclosed/\"\u003eclosure\u003c/a\u003e of \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e in \\(U\\). Invoking the result again, we now have that \\(\\lambda v + U = \\lambda v\u0026rsquo; +U\\), as desired.\u003c/p\u003e\n\u003cp\u003eHaving shown both operations make sense, we can declare that they make sense indeed. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"quotient-space--kbhquotient-space-dot-md--is-a-vector-space--kbhvector-space-dot-md\"\u003e\u003ca href=\"/posts/kbhquotient_space/\"\u003equotient space\u003c/a\u003e is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eGiven the name! (jk)\u003c/p\u003e\n\u003cp\u003eBleh I give up just prove it yourself given the above operations and the fact that the \u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e is \\(0+U = U\\), the \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e is \\(-v+U\\).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;instead of the elements single vectors, we fuse the whole \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e together. instead of counting the contents, we count the bucket.\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"dimension-of-a-quotient-space-is-the-difference-between-dimensions-of-its-constituents\"\u003edimension of a quotient space is the difference between dimensions of its constituents\u003c/h3\u003e\n\u003cp\u003ethat is,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim V / U = \\dim V - \\dim U\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor finite dimensional \\(V\\).\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eLet \\(\\pi: V \\to V /U\\). By definition, \\(null\\ \\pi =U\\); and, given the input is any \\(v \\in V\\), \\(range\\ \\pi = V / U\\). \u003ca href=\"/posts/kbhfundamental_theorem_of_linear_maps/\"\u003erank-nullity theorem\u003c/a\u003e then tells us that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim V = \\dim U + \\dim V / U\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enow subtract and get \\(\\dim V /U\\) by itself. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhquotient_space/","tags":null,"title":"quotient space"},{"categories":null,"contents":"Most high-school science programs have a strong focus on scientific theory and do not train students to conduct independent research. Previous work has demonstrated the efficacy of a mentor-supported, student-driven teaching program to effectively introduce research-specific skills in a classroom context. Despite the effectiveness of such programs, their class-based formats and requirements for multiple full-time faculty mentors limit their throughput, and the finite expertise of full-time mentors requires participants to focus on specific research subjects.\nTo address these limitations, we introduce R@N, an extracurricular, student-led, and student-driven program for the independent acquisition of research-specific skills through the self-guided completion of a series of formative checkpoints (“nodes”) for mastery. Students in the program can choose specific subsets of nodes to be trained in research in subjects of their interest. The program is developed and moderated by a small team of students in consultation with skill-specific faculty mentors through regular meetings. Students meet weekly to create, update, and revise nodes in collaboration with mentors in order to enable and supplement the learning of students participating in the program.\nThe program offers a few key results: it electively allows the student body (approximately 400 in our institution) to asynchronously acquire the skills of independent research and enables a group of around 20 students to develop and codify tools and skills for research pedagogy. The program can be sustained with limited faculty involvement, requiring one dedicated faculty mentor working in conjunction with a larger pool of research mentors who commit around 2 hours per month.\n","html":"\u003cp\u003eMost high-school science programs have a strong focus on scientific theory and do not train students to conduct independent research. Previous work has demonstrated the efficacy of a mentor-supported, student-driven teaching program to effectively introduce research-specific skills in a classroom context. Despite the effectiveness of such programs, their class-based formats and requirements for multiple full-time faculty mentors limit their throughput, and the finite expertise of full-time mentors requires participants to focus on specific research subjects.\u003c/p\u003e\n\u003cp\u003eTo address these limitations, we introduce R@N, an extracurricular, student-led, and student-driven program for the independent acquisition of research-specific skills through the self-guided completion of a series of formative checkpoints (“nodes”) for mastery. Students in the program can choose specific subsets of nodes to be trained in research in subjects of their interest. The program is developed and moderated by a small team of students in consultation with skill-specific faculty mentors through regular meetings. Students meet weekly to create, update, and revise nodes in collaboration with mentors in order to enable and supplement the learning of students participating in the program.\u003c/p\u003e\n\u003cp\u003eThe program offers a few key results: it electively allows the student body (approximately 400 in our institution) to asynchronously acquire the skills of independent research and enables a group of around 20 students to develop and codify tools and skills for research pedagogy. The program can be sustained with limited faculty involvement, requiring one dedicated faculty mentor working in conjunction with a larger pool of research mentors who commit around 2 hours per month.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhr_n_abstract/","tags":null,"title":"R@N Abstract"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhr_n_meeting_with_angi/","tags":null,"title":"R@N Meeting with Angi"},{"categories":null,"contents":"Let\u0026rsquo;s compute what \\(e^{tA}\\) should look like, where \\(t\\) is some scalar and \\(A\\) is a diagonalizable matrix. This is a supplement to Second-Order Linear Differential Equations.\nLet \\(v_1\\dots v_{m}\\) be the eigenvectors of \\(A\\). Let \\(\\lambda_{1}\\dots\\lambda_{m}\\) be the eigenvalues.\nRecall that we can therefore diagonalize \\(A\\) as:\n\\begin{equation} A = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\end{equation}\nread: change of choordinates into the eigenbases, scale by the eigenvalues, then change back to normal choordinates.\nNow, imagine if we are multiplying \\(A\\) by itself manymany times; what will that look like?\n\\begin{equation} A^{n} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1}\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\dots \\end{equation}\nThe middle parts, nicely, cancels out! Its a matrix applied to its inverse! So, we get rid of it\n\\begin{equation} A^{n} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\dots \\end{equation}\nNow, we are multiplying diagonal matricies against itself! If you work out the mechanics of matrix multiplication, you will note that each element simply gets scaled to higher powers (the matricies are diagonal!)! So then, we have:\n\\begin{equation} A^{n} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{{\\lambda_{1}}^{n}, \\dots, {\\lambda_{m}}^{n}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\end{equation}\nNice.\nRecall also the Tayler expasion of \\(e^{x}\\); we will apply it to to \\(e^{tA}\\):\n\\begin{equation} e^{tA} = \\sum_{k=0}^{\\infty} \\frac{1}{k!}(tA)^{k} = \\sum_{k=0}^{\\infty} \\frac{t^{k}}{k!}A^{k} \\end{equation}\nOk. We now apply our definition of \\(A^{n}\\) derived above:\n\\begin{equation} e^{tA} = \\sum_{k=0}^{\\infty} \\frac{t^{k}}{k!}\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{{\\lambda_{1}}^{k}, \\dots, {\\lambda_{m}}^{k}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\end{equation}\nSee now that \\(\\mqty(v_1 \u0026amp; \\dots \u0026amp;v_{m})\\) and its inverse is both constant in the sum, so we take it out:\n\\begin{equation} e^{tA} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\qty(\\sum_{k=0}^{\\infty}\\frac{t^{k}}{k!} \\mqty(\\dmat{{\\lambda_{1}}^{k}, \\dots, {\\lambda_{m}}^{k}}))\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\end{equation}\nAnd now, the actual mechanics of adding a matrix is just adding it elementwise, so we will put the summations into the matrix:\n\\begin{equation} e^{tA} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\sum_{k=0}^{\\infty}\\frac{t^{k}}{k!} {\\lambda_{1}}^{k}, \\dots, \\sum_{k=0}^{\\infty}\\frac{t^{k}}{k!} {\\lambda_{m}}^{k}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\end{equation}\nNote now that each value in that matrix is just the Tayler expansion of \\(e^{k_{\\lambda_{j}}}\\) (take a moment to pause if this is not immediately obvious; think about what each element in that diagonal matrix look like and what the Tayler polynomial \\(e^{x}\\) should look like. Perhaps what some arbitrary \\(e^{ab}\\) should looks like.\n\\begin{equation} e^{tA} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\end{equation}\n","html":"\u003cp\u003eLet\u0026rsquo;s compute what \\(e^{tA}\\) should look like, where \\(t\\) is some scalar and \\(A\\) is a diagonalizable matrix. This is a supplement to \u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/\"\u003eSecond-Order Linear Differential Equations\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eLet \\(v_1\\dots v_{m}\\) be the eigenvectors of \\(A\\). Let \\(\\lambda_{1}\\dots\\lambda_{m}\\) be the \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eRecall that we can therefore diagonalize \\(A\\) as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eread: change of choordinates into the eigenbases, scale by the eigenvalues, then change back to normal choordinates.\u003c/p\u003e\n\u003cp\u003eNow, imagine if we are multiplying \\(A\\) by itself manymany times; what will that look like?\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA^{n} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1}\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe middle parts, nicely, cancels out! Its a matrix applied to its inverse! So, we get rid of it\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA^{n} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(\\dmat{\\lambda_{1}, \\dots, \\lambda_{m}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, we are \u003ca href=\"/posts/kbhmultiplying/\"\u003emultiplying\u003c/a\u003e diagonal \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e against itself! If you work out the mechanics of \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e \u003ca href=\"/posts/kbhmultiplying/\"\u003emultiplication\u003c/a\u003e, you will note that each element simply gets scaled to higher powers (the \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e are diagonal!)! So then, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA^{n} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{{\\lambda_{1}}^{n}, \\dots, {\\lambda_{m}}^{n}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNice.\u003c/p\u003e\n\u003cp\u003eRecall also the Tayler expasion of \\(e^{x}\\); we will apply it to to \\(e^{tA}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{tA} = \\sum_{k=0}^{\\infty} \\frac{1}{k!}(tA)^{k} = \\sum_{k=0}^{\\infty} \\frac{t^{k}}{k!}A^{k}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOk. We now apply our definition of \\(A^{n}\\) derived above:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{tA} = \\sum_{k=0}^{\\infty} \\frac{t^{k}}{k!}\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{{\\lambda_{1}}^{k}, \\dots, {\\lambda_{m}}^{k}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSee now that \\(\\mqty(v_1 \u0026amp; \\dots \u0026amp;v_{m})\\) and its inverse is both constant in the sum, so we take it out:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{tA} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\qty(\\sum_{k=0}^{\\infty}\\frac{t^{k}}{k!} \\mqty(\\dmat{{\\lambda_{1}}^{k}, \\dots, {\\lambda_{m}}^{k}}))\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd now, the actual mechanics of adding a \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e is just adding it elementwise, so we will put the summations into the matrix:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{tA} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{\\sum_{k=0}^{\\infty}\\frac{t^{k}}{k!} {\\lambda_{1}}^{k}, \\dots, \\sum_{k=0}^{\\infty}\\frac{t^{k}}{k!} {\\lambda_{m}}^{k}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNote now that each value in that matrix is just the Tayler expansion of \\(e^{k_{\\lambda_{j}}}\\) (take a moment to pause if this is not immediately obvious; think about what each element in that diagonal matrix look like and what the Tayler polynomial \\(e^{x}\\) should look like. Perhaps what some arbitrary \\(e^{ab}\\) should looks like.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{tA} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhraising_e_to_a_matrix/","tags":null,"title":"raising e to a matrix"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhrandom/","tags":null,"title":"random"},{"categories":null,"contents":"A random variable is a variable that has a value, but there are uncertainty with respect to what that value is.\ndiscrete: finite number of values continuous: infinitely many possible values probability mass function A discrete random variable is encoded as a probability mass function\nprobability density function A continuous random variable is represented as a probability density function.\nsummary statistics probability mass function is a description for the random variable: and random variables are usually communicated via probability mass functions expected value adding random variables \u0026ldquo;what\u0026rsquo;s the probability of \\(X + Y = n\\) with IID \\(X\\) and \\(Y\\)?\u0026rdquo; \u0026ldquo;what\u0026rsquo;s the probability of two independent samples from the same exact distribution adding up to \\(n\\)?\u0026rdquo;\n\\begin{equation} \\sum_{i=-\\infty}^{\\infty} P(X=i, Y=n-i) \\end{equation}\nor integrals and PDFs, as appropriate for continuous cases\nfor every single outcome, we want to create every possible operation which causes the two variables to sum to \\(n\\).\nWe can use convolution to figure out every combination of assignments to random variables which add to a value, and sum their probabilities together.\nadding binomial distribution adding Gaussian distributions adding poisson distribution If you add a bunch of IID things together\u0026hellip;. central limit theorem\naveraging random variables adding random variables + linear transformers on Gaussian\nYou end up with:\n\\begin{equation} \\mathcal{N}\\qty(\\mu, \\frac{1}{n} \\sigma^{2}) \\end{equation}\nyou note: as you sum together many things that is IID, the average is pretty the same; but the variance gets smaller as you add more.\nmaxing random variables Gumbel distribution: fisher tripplett gedembo theorem???\nsampling statistics We assume that there\u0026rsquo;s some underlying distribution with some true mean \\(\\mu\\) and true variance \\(\\sigma^{2}\\). We would like to model it with some confidence.\nConsider a series of measured samples \\(x_1, \u0026hellip;, x_{n}\\), each being an instantiation of a IID random variable drawn from the underlying distribution each being \\(X_1, \u0026hellip;, X_{n}\\).\nsample mean Let us estimate the true population mean\u0026hellip; by creating a random variable representing the the averaging \\(n\\) measured random variables representing the observations:\n\\begin{equation} \\bar{X} = \\frac{1}{N} \\sum_{i=1}^{n} X_{i} \\end{equation}\nwe can do this because we really would like to know \\(\\mathbb{E}[\\bar{X}] = \\mathbb{E}[\\frac{1}{N} \\sum_{i=1}^{n} X_i] = \\frac{1}{N}\\sum_{i=1}^{n} \\mathbb{E}[X_{i}] = \\frac{1}{N} N \\mu = \\mu\\) and so as long as each of the underlying variables have the same expected mean (they do because IID) drawn, we can use the sample mean to estimate the population mean.\nsample variance We can\u0026rsquo;t just calculate the sample variance with the variance of the sample. This is because the sample mean will be by definition by closer to each of the sampled points than the actual value. So we correct for it. This is a random variable too:\n\\begin{equation} S^{2} = \\frac{1}{n-1} \\sum_{i=1}^{N} (X_{i} - \\bar{X})^{2} \\end{equation}\nstandard error of the mean \\begin{equation} Var(\\bar{X}) = \\frac{S^{2}}{n} \\end{equation}\nthis is the ERROR OF the mean given what you measured because of the central limit theorem\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e is a variable that has a value, but there are \u003ca href=\"/posts/kbhuncertainty/\"\u003euncertainty\u003c/a\u003e with respect to what that value is.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003ediscrete\u003c/strong\u003e: finite number of values\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003econtinuous\u003c/strong\u003e: infinitely many possible values\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"probability-mass-function--kbhprobability-mass-function-dot-md\"\u003e\u003ca href=\"/posts/kbhprobability_mass_function/\"\u003eprobability mass function\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eA discrete random variable is encoded as a \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003eprobability mass function\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"probability-density-function--kbhprobability-distributions-dot-md\"\u003e\u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003eprobability density function\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eA continuous \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e is represented as a \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003eprobability density function\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"summary-statistics\"\u003esummary statistics\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_mass_function/\"\u003eprobability mass function\u003c/a\u003e is a description for the random variable: and \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es are usually communicated via \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003eprobability mass function\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexpectation/\"\u003eexpected value\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"adding-random-variables\"\u003eadding random variables\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;what\u0026rsquo;s the probability of \\(X + Y = n\\) with \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e \\(X\\) and \\(Y\\)?\u0026rdquo;\n\u0026ldquo;what\u0026rsquo;s the probability of two independent samples from the same exact distribution adding up to \\(n\\)?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{i=-\\infty}^{\\infty} P(X=i, Y=n-i)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eor integrals and \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003es, as appropriate for \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e cases\u003c/p\u003e\n\u003cp\u003efor every single outcome, we want to create every possible operation which causes the two variables to sum to \\(n\\).\u003c/p\u003e\n\u003cp\u003eWe can use \u003ca href=\"#adding-random-variables\"\u003econvolution\u003c/a\u003e to figure out every combination of assignments to random variables which add to a value, and sum their probabilities together.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinomial_distribution/#adding-id-6ef4a641-135c-45f5-9c71-efd1fe34166c-binomial-distribution\"\u003eadding binomial distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgaussian_distribution/#adding-id-8194b001-e4a1-43c9-9409-cd07bf1f00d4-gaussian-distribution-s\"\u003eadding Gaussian distributions\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_of_k_in_x_time/#adding-id-58a7600a-5169-4473-8ddc-f286534fc1f4-poisson-distribution\"\u003eadding poisson distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf you add a bunch of \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e things together\u0026hellip;. \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"averaging-random-variables\"\u003eaveraging random variables\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#adding-random-variables\"\u003eadding random variables\u003c/a\u003e + \u003ca href=\"/posts/kbhgaussian_distribution/#linear-transformations-on-gaussian\"\u003elinear transformers on Gaussian\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eYou end up with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{N}\\qty(\\mu, \\frac{1}{n} \\sigma^{2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou note: as you sum together many things that is \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e, the average is pretty the same; but the \u003ca href=\"/posts/kbhvariance/\"\u003evariance\u003c/a\u003e gets smaller as you add more.\u003c/p\u003e\n\u003ch2 id=\"maxing-random-variables\"\u003emaxing random variables\u003c/h2\u003e\n\u003cp\u003eGumbel distribution: fisher tripplett gedembo theorem???\u003c/p\u003e\n\u003ch2 id=\"sampling-statistics\"\u003esampling statistics\u003c/h2\u003e\n\u003cp\u003eWe assume that there\u0026rsquo;s some underlying distribution with some true mean \\(\\mu\\) and true variance \\(\\sigma^{2}\\). We would like to model it with some confidence.\u003c/p\u003e\n\u003cp\u003eConsider a series of measured samples \\(x_1, \u0026hellip;, x_{n}\\), each being an instantiation of a \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e drawn from the underlying distribution each being \\(X_1, \u0026hellip;, X_{n}\\).\u003c/p\u003e\n\u003ch3 id=\"sample-mean\"\u003esample mean\u003c/h3\u003e\n\u003cp\u003eLet us estimate the true population mean\u0026hellip; by creating a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e representing the the averaging \\(n\\) measured \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es representing the observations:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\bar{X} = \\frac{1}{N} \\sum_{i=1}^{n} X_{i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can do this because we really would like to know \\(\\mathbb{E}[\\bar{X}] = \\mathbb{E}[\\frac{1}{N} \\sum_{i=1}^{n} X_i] = \\frac{1}{N}\\sum_{i=1}^{n} \\mathbb{E}[X_{i}] = \\frac{1}{N} N \\mu = \\mu\\) and so as long as each of the underlying variables have the same expected mean (they do because \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e) drawn, we can use the \u003ca href=\"#sample-mean\"\u003esample mean\u003c/a\u003e to estimate the population mean.\u003c/p\u003e\n\u003ch3 id=\"sample-variance\"\u003esample variance\u003c/h3\u003e\n\u003cp\u003eWe can\u0026rsquo;t just calculate the \u003ca href=\"#sample-variance\"\u003esample variance\u003c/a\u003e with the variance of the sample. This is because the \u003ca href=\"#sample-mean\"\u003esample mean\u003c/a\u003e will be by definition by closer to each of the sampled points than the actual value. So we correct for it. This is a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e too:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS^{2} = \\frac{1}{n-1} \\sum_{i=1}^{N} (X_{i} - \\bar{X})^{2}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"standard-error-of-the-mean\"\u003estandard error of the mean\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nVar(\\bar{X}) = \\frac{S^{2}}{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is the \u003cstrong\u003eERROR OF the mean\u003c/strong\u003e given what you measured because of the central limit theorem\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrandom_variables/","tags":null,"title":"random variable"},{"categories":null,"contents":"The Random Walk Hypothesis is a financial econometric hypothesis that stocks have the same distribution and independent of each other: that stocks are a random variable and not predictable in a macro space.\nTo set up the random walk hypothesis, let\u0026rsquo;s begin with some time \\(t\\), an asset return \\(r_t\\), some time elapsed \\(k\\), and some future asset return \\(r_{t+k}\\).\nWe will create two random variables \\(f(r_t)\\) and \\(g(r_{t+k})\\), which \\(f\\) and \\(g\\) are arbitrary functions we applied to analyze the return at that time.\nThe Random Walk Hypothesis tells us that, at any two unrelated given time, you cannot use the behavior of \\(r_t\\) to predict anything about \\(r_{t+k}\\), under any kind of analysis \\(f\\) or \\(g\\), that:\n\\begin{equation} Cov[f(r_t), g(r_{t+k})] = 0 \\end{equation}\nSo, all of the Random Walk Hypothesis models would leverage the above result, that the two time info don\u0026rsquo;t evolve together and they are independently, randomly distributed: they are random variables.\nFor the market to be a typical Random Walk, the central limit theorem has to hold on the value of return. This usually possible, but if the variance of the return is not finite, the return will not hold the central limit theorem which means that the return will not be normal. Of course the return does not have to hold central limit theorem, then we use other convergence distributions but still model it in the Random Walk Hypothesis as a random variable.\nreturn (FinMetrics) Importantly: its not the price that follows the random walk; it is the RETURN that follows the walk; if it was the price, then its possible for price to become negative. Return, technically, is defined by:\n\\begin{equation} R_t = \\frac{p_t-p_{t-1}}{p_{t-1}} \\end{equation}\nHowever, we really are interested in the natural log of the prices:\n\\begin{equation} r_t = log(p_t) - log(p_{t-1}) \\approx R_t \\end{equation}\nWe can do this is because, for small \\(x\\), \\(log\\ x \\approx x-1\\).\nWe do this is because, if we were wanting to add the returns over the last \\(n\\) days, in \\(R_t\\) you\u0026rsquo;d have to multiply them:\n\\begin{equation} \\frac{p_{t+1}}{p_t} \\cdot \\frac{p_t}{p_{t-1}} = \\frac{p_{t+1}}{p_{t-1}} \\end{equation}\nThis is bad, because of the central limit theorem. To make a random variable built of normalizing \\(n\\) items, you have to add and not multiply them together over a time range. We want to be able to add.\nTherefore, \\(r_t\\) can achieve the same division by adding (see the log laws).\nBut either way, with enough, we know that \\(r_t\\) is independently, identity distributed.\ntime series analysis Over some days \\(k\\), we have:\n\\begin{equation} Y_{k} = \\sum_{i=1}^{k} x_{i} \\end{equation}\nGiven that \\(x_{i}\\) is distributed randomly: \\(\\{x_{i}\\}_{i=1}^{N}\\). This becomes the foundation of time series analysis. The problem of course becomes harder when the values drift against each other, is nonindependent, etc. We can use the Martingale Model to take generic random walk to a more dependent model.\nCJ test If you have some amount of volacitity measurement, we first know that, by the Random Walk Hypothesis, we have:\n\\begin{equation} X_{k} \\sim N(0,\\sigma^{2}) \\end{equation}\nGiven some future return, you hope that:\n\\begin{equation} Y_{k}=\\sum_{i=1}^{k}X_{k}\\sim N(0,\\sigma^{2}) \\end{equation}\nIf so, if you have like \\(20\\%\\) of log returns, to have a statistically significant return, we have that:\n\\begin{equation} \\sigma =\\frac{0.2}{\\sqrt{12}} \\end{equation}\ngetting a statistically significant difference from it is hard.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhrandom_walk/\"\u003eRandom Walk Hypothesis\u003c/a\u003e is a \u003ca href=\"/posts/kbhfinancial_markets_intro/\"\u003efinancial econometric\u003c/a\u003e hypothesis that stocks have the same distribution and independent of each other: that stocks are a random variable and not predictable in a macro space.\u003c/p\u003e\n\u003cp\u003eTo set up the random walk hypothesis, let\u0026rsquo;s begin with some time \\(t\\), an asset return \\(r_t\\), some time elapsed \\(k\\), and some future asset return \\(r_{t+k}\\).\u003c/p\u003e\n\u003cp\u003eWe will create two random variables \\(f(r_t)\\) and \\(g(r_{t+k})\\), which \\(f\\) and \\(g\\) are arbitrary functions we applied to analyze the return at that time.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhrandom_walk/\"\u003eRandom Walk Hypothesis\u003c/a\u003e tells us that, at any two unrelated given time, you cannot use the behavior of \\(r_t\\) to predict anything about \\(r_{t+k}\\), under any kind of analysis \\(f\\) or \\(g\\), that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nCov[f(r_t), g(r_{t+k})] = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, all of the \u003ca href=\"/posts/kbhrandom_walk/\"\u003eRandom Walk Hypothesis\u003c/a\u003e models would leverage the above result, that the two time info don\u0026rsquo;t evolve together and they are independently, \u003ca href=\"/posts/kbhrandom/\"\u003erandom\u003c/a\u003ely distributed: they are \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eFor the market to be a typical \u003ca href=\"/posts/kbhrandom_walk/\"\u003eRandom Walk\u003c/a\u003e, the \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e has to hold on the value of \u003ca href=\"#return--finmetrics\"\u003ereturn\u003c/a\u003e. This usually possible, but if the variance of the return is not finite, the \u003ca href=\"#return--finmetrics\"\u003ereturn\u003c/a\u003e will not hold the \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e which means that the \u003ca href=\"#return--finmetrics\"\u003ereturn\u003c/a\u003e will not be \u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormal\u003c/a\u003e. Of course the \u003ca href=\"#return--finmetrics\"\u003ereturn\u003c/a\u003e does not have to hold \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e, then we use other convergence distributions but still model it in the \u003ca href=\"/posts/kbhrandom_walk/\"\u003eRandom Walk Hypothesis\u003c/a\u003e as a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"return--finmetrics\"\u003ereturn (FinMetrics)\u003c/h2\u003e\n\u003cp\u003eImportantly: its not the \u003cem\u003eprice\u003c/em\u003e that follows the random walk; it is the \u003cem\u003eRETURN\u003c/em\u003e that follows the walk; if it was the price, then its possible for price to become negative. Return, technically, is defined by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR_t = \\frac{p_t-p_{t-1}}{p_{t-1}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eHowever, we really are interested in the natural log of the prices:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr_t = log(p_t) - log(p_{t-1}) \\approx R_t\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can do this is because, for small \\(x\\), \\(log\\ x \\approx x-1\\).\u003c/p\u003e\n\u003cp\u003eWe do this is because, if we were wanting to add the returns over the last \\(n\\) days, in \\(R_t\\) you\u0026rsquo;d have to multiply them:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{p_{t+1}}{p_t} \\cdot \\frac{p_t}{p_{t-1}} = \\frac{p_{t+1}}{p_{t-1}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is bad, because of the \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e. To make a random variable built of normalizing \\(n\\) items, you have to \u003cem\u003eadd\u003c/em\u003e and not \u003cem\u003emultiply\u003c/em\u003e them together over a time range. We want to be able to add.\u003c/p\u003e\n\u003cp\u003eTherefore, \\(r_t\\) can achieve the same division by adding (see the \u003ca href=\"/posts/kbhlog_laws/\"\u003elog laws\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eBut either way, with enough, we know that \\(r_t\\) is independently, identity distributed.\u003c/p\u003e\n\u003ch2 id=\"time-series-analysis\"\u003etime series analysis\u003c/h2\u003e\n\u003cp\u003eOver some days \\(k\\), we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nY_{k} = \\sum_{i=1}^{k} x_{i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven that \\(x_{i}\\) is distributed randomly: \\(\\{x_{i}\\}_{i=1}^{N}\\). This becomes the foundation of \u003ca href=\"#time-series-analysis\"\u003etime series analysis\u003c/a\u003e. The problem of course becomes harder when the values drift against each other, is nonindependent, etc. We can use the \u003ca href=\"/posts/kbhmartingale_model/\"\u003eMartingale Model\u003c/a\u003e to take generic \u003ca href=\"/posts/kbhrandom_walk/\"\u003erandom walk\u003c/a\u003e to a more dependent model.\u003c/p\u003e\n\u003ch2 id=\"cj-test\"\u003eCJ test\u003c/h2\u003e\n\u003cp\u003eIf you have some amount of volacitity measurement, we first know that, by the \u003ca href=\"/posts/kbhrandom_walk/\"\u003eRandom Walk Hypothesis\u003c/a\u003e, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX_{k} \\sim N(0,\\sigma^{2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven some future return, you hope that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nY_{k}=\\sum_{i=1}^{k}X_{k}\\sim N(0,\\sigma^{2})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf so, if you have like \\(20\\%\\) of log returns, to have a statistically significant return, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sigma =\\frac{0.2}{\\sqrt{12}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egetting a statistically significant difference from it is \u003cem\u003ehard.\u003c/em\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrandom_walk/","tags":null,"title":"Random Walk Hypothesis"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhrandom_wol/","tags":null,"title":"random wol"},{"categories":null,"contents":"randomized algorithm is a type of algorithm, similar to relaxation.\nMake a hard problem easier by changing the problem What if, instead of guaranteeing we find the best/correct answer, we only provide some chance of finding the best/correct answer? primality testing primality testing is very important for modern crypto systems; we need to be able to find large prime numbers, and be able to generate them quickly.\ntraditional primality testing We can divide every prime number below \\(\\sqrt x\\). In theory, this is pretty fast, but we need to know all the primes we need to test.\nThis would therefore take \\(O(\\sqrt{x})\\) time.\nmiller-rabin primality testing miller-rabin primality testing is a primality testing randomized algorithm.\nConstruct a set of equations, each one requiring an exponentiation and a division If any of them is false, the number is composite If they are all true, the probability that the number is composite is reduced to \\(\\frac{1}{4}\\). If we run miller-rabin 10 times \\(O(10)=O(1)\\), the number is \\(1-\\left(\\frac{1}{4}\\right)^{10}\\) chance of being prime.\nThis is of course much much faster than traditional primality testing.\nModern cryptographic system uses this.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrandomized_algorithum/\"\u003erandomized algorithm\u003c/a\u003e is a type of algorithm, similar to \u003ca href=\"/posts/kbhrelaxation_algorithums/\"\u003erelaxation\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eMake a hard problem easier by changing the problem\u003c/li\u003e\n\u003cli\u003eWhat if, instead of guaranteeing we find the best/correct answer, we only provide some chance of finding the best/correct answer?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"primality-testing\"\u003eprimality testing\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#primality-testing\"\u003eprimality testing\u003c/a\u003e is very important for modern crypto systems; we need to be able to find large prime numbers, and be able to generate them quickly.\u003c/p\u003e\n\u003ch3 id=\"traditional-primality-testing\"\u003etraditional primality testing\u003c/h3\u003e\n\u003cp\u003eWe can divide every prime number below \\(\\sqrt x\\). In theory, this is pretty fast, but we need to know all the primes we need to test.\u003c/p\u003e\n\u003cp\u003eThis would therefore take \\(O(\\sqrt{x})\\) time.\u003c/p\u003e\n\u003ch3 id=\"miller-rabin-primality-testing\"\u003emiller-rabin primality testing\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#miller-rabin-primality-testing\"\u003emiller-rabin primality testing\u003c/a\u003e is a \u003ca href=\"#primality-testing\"\u003eprimality testing\u003c/a\u003e \u003ca href=\"/posts/kbhrandomized_algorithum/\"\u003erandomized algorithm\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eConstruct a set of equations, each one requiring an exponentiation and a division\u003c/li\u003e\n\u003cli\u003eIf any of them is false, the number is composite\u003c/li\u003e\n\u003cli\u003eIf they are all true, the probability that the number is composite is reduced to \\(\\frac{1}{4}\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf we run miller-rabin 10 times \\(O(10)=O(1)\\), the number is \\(1-\\left(\\frac{1}{4}\\right)^{10}\\) chance of being prime.\u003c/p\u003e\n\u003cp\u003eThis is of course much much faster than \u003ca href=\"#traditional-primality-testing\"\u003etraditional primality testing\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eModern cryptographic system uses this.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrandomized_algorithum/","tags":null,"title":"randomized algorithm"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhrandomized_pbvi/","tags":null,"title":"Randomized PBVI"},{"categories":null,"contents":"The number of alpha vectors needed to perform PBVI is one for each of your belief sample. Which is a bad idea. Perseus is essentially PBVI, where this idea is explored slightly.\nThe preamble is the same as PBVI:\nwe keep track of a bunch of alpha vectors and belief samples (which we get from point selection):\n\\begin{equation} \\Gamma = \\{\\alpha_{1}, \\dots, \\alpha_{m}\\} \\end{equation}\nand\n\\begin{equation} B = \\{b_1, \\dots, b_{m}\\} \\end{equation}\nTo preserve the lower-boundedness of these alpha vectors, one should seed the alpha vectors via something like blind lower bound\nWe can estimate our utility function at any belief by looking in the set for the most optimal:\n\\begin{equation} U^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top}b \\end{equation}\nWe now define a function named backup (see PBVI Backup), and call it on ONLY ONE belief:\nlet us sample\u0026mdash;\n\\begin{equation} b \\in B \\end{equation}\nand call backup to get:\n\\begin{equation} \\alpha\u0026rsquo; = backup(\\Gamma, b) \\end{equation}\nwhere,\n\\begin{equation} backup(\\Gamma, b) \\rightarrow \\alpha_{t+1} \\end{equation}\nNow, if \\(b \\cdot a\u0026rsquo; \u0026gt; \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top}b\\) (i.e. we just increased our value floor because our new alpha vector indicates a higher value at \\(b\\)), we add our new vector to the set \\(\\Gamma\\). Otherwise, we add \\(a\u0026rsquo; = \\arg\\max_{\\alpha \\in \\Gamma} b \\cdot \\alpha\\), the alpha vector which previously got the highest value for \\(b\\).\nAfter this, we pull a Perseus-core funni:\nPerseus Belief Pruning let us define:\n\\begin{equation} V_{t}(b) = \\max_{\\alpha \\in \\Gamma_{t}} \\alpha \\cdot b \\end{equation}\nand\n\\begin{equation} V_{t+1}(b) = \\max_{\\alpha \\in \\Gamma_{t+1}} \\alpha \\cdot b \\end{equation}\nnamely, the expected value of \\(b\\) before and after belief updates. Then, we:\n\\begin{equation} B_{t+1} = \\{b \\in B, \\text{if}\\ V_{t+1}(b) \u0026lt; V(b)\\} \\end{equation}\nthat is, if updating our sampled belief\u0026rsquo;s alpha vector improved the value of another belief in the set by accident already, we don\u0026rsquo;t need to update that belief again.\nRepeat this process until we are out of beliefs to update, that is, when \\(B = \\emptyset\\).\nSlight Variation? then,\n","html":"\u003cp\u003eThe number of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es needed to perform \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e is one for each of your belief sample. Which is a bad idea. \u003ca href=\"/posts/kbhperseus/\"\u003ePerseus\u003c/a\u003e is essentially \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e, where this idea is explored slightly.\u003c/p\u003e\n\u003cp\u003eThe preamble is the same as \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003ewe keep track of a bunch of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es and \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e samples (which we get from \u003ca href=\"/posts/kbhpoint_selection/\"\u003epoint selection\u003c/a\u003e):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Gamma = \\{\\alpha_{1}, \\dots, \\alpha_{m}\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB = \\{b_1, \\dots, b_{m}\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo preserve the lower-boundedness of these \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es, one should seed the \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es via something like \u003ca href=\"/posts/kbhblind_lower_bound/\"\u003eblind lower bound\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eWe can estimate our \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e function at any belief by looking in the set for the most optimal:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top}b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe now define a function named \u003ccode\u003ebackup\u003c/code\u003e (see \u003ca href=\"/posts/kbhpoint_based_value_iteration/#pbvi-backup\"\u003ePBVI Backup\u003c/a\u003e), and call it on ONLY ONE belief:\u003c/p\u003e\n\u003cp\u003elet us sample\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb \\in B\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand call \u003ccode\u003ebackup\u003c/code\u003e to get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\alpha\u0026rsquo; = backup(\\Gamma, b)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nbackup(\\Gamma, b) \\rightarrow \\alpha_{t+1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, if \\(b \\cdot a\u0026rsquo; \u0026gt; \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top}b\\) (i.e. we just increased our value floor because our new \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e indicates a higher value at \\(b\\)), we add our new vector to the set \\(\\Gamma\\). Otherwise, we add \\(a\u0026rsquo; = \\arg\\max_{\\alpha \\in \\Gamma} b \\cdot \\alpha\\), the \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e which previously got the highest value for \\(b\\).\u003c/p\u003e\n\u003cp\u003eAfter this, we pull a Perseus-core funni:\u003c/p\u003e\n\u003ch2 id=\"perseus-belief-pruning\"\u003ePerseus Belief Pruning\u003c/h2\u003e\n\u003cp\u003elet us define:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV_{t}(b) = \\max_{\\alpha \\in \\Gamma_{t}} \\alpha \\cdot b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV_{t+1}(b) = \\max_{\\alpha \\in \\Gamma_{t+1}} \\alpha \\cdot b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enamely, the expected value of \\(b\\) before and after \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e updates. Then, we:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB_{t+1} = \\{b \\in B, \\text{if}\\ V_{t+1}(b) \u0026lt; V(b)\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat is, if updating our sampled belief\u0026rsquo;s \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e improved the value of another belief in the set by accident already, we don\u0026rsquo;t need to update that belief again.\u003c/p\u003e\n\u003cp\u003eRepeat this process until we are out of beliefs to update, that is, when \\(B = \\emptyset\\).\u003c/p\u003e\n\u003ch2 id=\"slight-variation\"\u003eSlight Variation?\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-27_20-15-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ethen,\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-27_20-15-24_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhperseus/","tags":null,"title":"Randomized Point-Based Value Iteration"},{"categories":null,"contents":"The range (image, column space) is the set that some function \\(T\\) maps to.\nconstituents some \\(T: V\\to W\\)\nrequirements The range is just the space the map maps to:\n\\begin{equation} range\\ T = \\{Tv: v \\in V\\} \\end{equation}\nadditional information range is a subspace of the codomain This result is hopefully not super surprising.\nzero \\begin{equation} T0 = 0 \\end{equation}\nas linear maps take \\(0\\) to \\(0\\), so \\(0\\) is definitely in the range.\naddition and scalar multiplication inherits from additivity and homogeneity of Linear Maps.\nGiven \\(T v_1 = w_1,\\ T v_2=w_2\\), we have that \\(w_1, w_2 \\in range\\ T\\).\n\\begin{equation} T(v_1 + v_2) = w_1 + w_2 \\end{equation}\n\\begin{equation} T(\\lambda v_1) = \\lambda w_1 \\end{equation}\nSo closed under addition and scalar multiplication. Having shown the zero and closure, we have that the range is a subspace of the codomain. \\(\\blacksquare\\)\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e (\u003ca href=\"/posts/kbhrange/\"\u003eimage\u003c/a\u003e, \u003ca href=\"/posts/kbhrange/\"\u003ecolumn space\u003c/a\u003e) is the set that some \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e \\(T\\) maps to.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003esome \\(T: V\\to W\\)\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eThe range is just the space the map maps to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nrange\\ T = \\{Tv: v \\in V\\}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"range-is-a-subspace-of-the-codomain\"\u003erange is a subspace of the codomain\u003c/h3\u003e\n\u003cp\u003eThis result is hopefully not super surprising.\u003c/p\u003e\n\u003ch4 id=\"zero\"\u003ezero\u003c/h4\u003e\n\u003cp\u003e\\begin{equation}\nT0 = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas \u003ca href=\"/posts/kbhlinear_map/#linear-maps-take-0-to-0\"\u003elinear maps take \\(0\\) to \\(0\\)\u003c/a\u003e, so \\(0\\) is definitely in the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e.\u003c/p\u003e\n\u003ch4 id=\"addition-and-scalar-multiplication\"\u003eaddition and scalar multiplication\u003c/h4\u003e\n\u003cp\u003einherits from additivity and \u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneity\u003c/a\u003e of \u003ca href=\"/posts/kbhlinear_map/\"\u003eLinear Map\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eGiven \\(T v_1 = w_1,\\ T v_2=w_2\\), we have that \\(w_1, w_2 \\in range\\ T\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(v_1 + v_2) = w_1 + w_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT(\\lambda v_1) = \\lambda w_1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e under \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e and \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e. Having shown the \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e and closure, we have that the \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of the codomain. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrange/","tags":null,"title":"range"},{"categories":null,"contents":"Most users are incapable of writing good Boolean Retrieval queries.\nfeast or famine problem Boolean Retrieval either returns too few or too many results: AND queries return often too few results \\(\\min (x,y)\\), and OR queries return too many results \\(x+y\\).\nThis is not a problem with Ranked Information Retrieval because a large result set doesn\u0026rsquo;t matter: top results just needs to be good results.\nfree text query Instead of using a series of Boolean Retrieval, we instead give free text to the user.\nscore To do Ranked Information Retrieval, we need a way of asigning a score to a query-document pair.\nthe more frequently the query term appears in the doc, the higher the score should be if the word doesn\u0026rsquo;t appear, we score as 0 Jaccard Coefficient \\begin{equation} jaccard(A,B) = |A \\cap B | / |A \\cup B| \\end{equation}\nwhere \\(A\\) and \\(B\\) are vocab, (i.e. no frequency).\nlimitation doesn\u0026rsquo;t consider frequency rare terms are more informative than frequent terms the normalization isn\u0026rsquo;t quite right, ideally we should use \\(\\sqrt{A\\cup B}\\), which can be obtained via cosine-similarity log-frequency weighting \u0026ldquo;Relevance does not increase proportionally with term frequency\u0026rdquo;\u0026mdash;a document with 10 occurrences of a term is more relevant than that with 1, but its not 10 times more relevant.\n\\begin{equation} w_{t,d} = \\begin{cases} 1 + \\log_{10} (tf_{t,d}), \\text{if } tf_{t,d} \u0026gt; 0 \\\\ 0 \\end{cases} \\end{equation}\nthis gives less-than-linear growth.\nto score, we add up all the terms which intersect.\ndocument frequency the document frequency is the number of documents in which a term occur.\nIts an INVERSE MEASURE of the informativeness of a word\u0026mdash;the more times a word appears, the less informative it is.\n\\begin{equation} idf_{t} = \\log_{10} (N / df_{t}) \\end{equation}\nwhere \\(N\\) is the number of documents, and \\(df_{t}\\) is the number of documents in which the term appears. We take the log in the motivation as log-frequency weighting.\n\u0026ldquo;a word that occurs in every document has a weight of \\(0\\)\u0026rdquo;.\nThere is no effect to one-term queries.\nWe don\u0026rsquo;t use collection frequencies (i.e. we never consider COUNTS in CORPUS, because collection frequencies would score commonly found words equally because it doesn\u0026rsquo;t consider distribution).\nTF-IDF multiply log-frequency weighting TIMES document frequency.\n\\begin{equation} score(q,d) = \\sum_{t \\in q \\cap d} (1+\\log_{10}(tf_{t,d})) \\times \\log_{10}\\qty(\\frac{N}{df_{t}}) \\end{equation}\nif \\(tf = 0\\), set the entire TF score to \\(0\\) without adding 1.\nusing this, we can now construct a weight-matrix. Each document is a vector of the TFIDF score for each term against each document.\nThere are a series of approaches that you can use as a possible approach to compute tfidf: various ways to normalizing, variable document frequency counts (or not use it), etc.\nSMART notation ddd.qqq, where the first three letters represent the document weighting scheme, and the second three letter represents the query weighting scheme.\nvector-space model after creating a matrix where each column is a document and each row is a term, and the cells are TF-IDF of the words against the documents, we can consider each document as a vector over the team.\nwe can treat queries as ALSO a document in the space, and therefore use proximity of the vectors as a searching system.\n(Euclidian distance is bad: because its too large for vectors of different lengths. Instead, we should use angle instead of distance.)\ncosine similarity \\begin{equation} \\cos(q,d) = \\frac{q \\cdot d}{|q| |d|} \\end{equation}\nbecause the dot product becomes just the angle between the two vectors after you normalize by length.\ntypically, you may want to normalize the length of the vectors in advance.\ncosine is a little flatten ontop\nltc.lnn weighting document: logarithm + idf + normed query: logarithm + 1 + 1\nmeaning, we don\u0026rsquo;t weight or normalize query vectors\n","html":"\u003cp\u003eMost users are incapable of writing good \u003ca href=\"/posts/kbhinverted_index/#boolean-retrieval\"\u003eBoolean Retrieval\u003c/a\u003e queries.\u003c/p\u003e\n\u003ch2 id=\"feast-or-famine-problem\"\u003efeast or famine problem\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhinverted_index/#boolean-retrieval\"\u003eBoolean Retrieval\u003c/a\u003e either returns too few or too many results: AND queries return often too few results \\(\\min (x,y)\\), and OR queries return too many results \\(x+y\\).\u003c/p\u003e\n\u003cp\u003eThis is not a problem with \u003ca href=\"/posts/kbhranked_information_retrieval/\"\u003eRanked Information Retrieval\u003c/a\u003e because a large result set doesn\u0026rsquo;t matter: top results just needs to be good results.\u003c/p\u003e\n\u003ch2 id=\"free-text-query\"\u003efree text query\u003c/h2\u003e\n\u003cp\u003eInstead of using a series of \u003ca href=\"/posts/kbhinverted_index/#boolean-retrieval\"\u003eBoolean Retrieval\u003c/a\u003e, we instead give free text to the user.\u003c/p\u003e\n\u003ch2 id=\"score\"\u003escore\u003c/h2\u003e\n\u003cp\u003eTo do \u003ca href=\"/posts/kbhranked_information_retrieval/\"\u003eRanked Information Retrieval\u003c/a\u003e, we need a way of asigning a \u003ca href=\"#score\"\u003escore\u003c/a\u003e to a query-document pair.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ethe more frequently the query term appears in the doc, the higher the score should be\u003c/li\u003e\n\u003cli\u003eif the word doesn\u0026rsquo;t appear, we score as 0\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"jaccard-coefficient\"\u003eJaccard Coefficient\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\njaccard(A,B) = |A \\cap B | / |A \\cup B|\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(A\\) and \\(B\\) are vocab, (i.e. no frequency).\u003c/p\u003e\n\u003ch4 id=\"limitation\"\u003elimitation\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003edoesn\u0026rsquo;t consider \u003cstrong\u003efrequency\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003erare terms are more informative than frequent terms\u003c/li\u003e\n\u003cli\u003ethe normalization isn\u0026rsquo;t quite right, ideally we should use \\(\\sqrt{A\\cup B}\\), which can be obtained via cosine-similarity\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"log-frequency-weighting\"\u003elog-frequency weighting\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;Relevance does not increase proportionally with term frequency\u0026rdquo;\u0026mdash;a document with 10 occurrences of a term is more relevant than that with 1, but its not 10 times more relevant.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw_{t,d} = \\begin{cases}\n1 + \\log_{10} (tf_{t,d}), \\text{if } tf_{t,d} \u0026gt; 0 \\\\\n0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis gives less-than-linear growth.\u003c/p\u003e\n\u003cp\u003eto score, we add up all the terms which intersect.\u003c/p\u003e\n\u003ch3 id=\"document-frequency\"\u003edocument frequency\u003c/h3\u003e\n\u003cp\u003ethe \u003ca href=\"#document-frequency\"\u003edocument frequency\u003c/a\u003e is the number of documents in which a term occur.\u003c/p\u003e\n\u003cp\u003eIts an \u003cstrong\u003eINVERSE MEASURE\u003c/strong\u003e of the informativeness of a word\u0026mdash;the more times a word appears, the less informative it is.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nidf_{t} = \\log_{10} (N / df_{t})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(N\\) is the number of documents, and \\(df_{t}\\) is the number of documents in which the term appears. We take the log in the motivation as \u003ca href=\"#log-frequency-weighting\"\u003elog-frequency weighting\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;a word that occurs in every document has a weight of \\(0\\)\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eThere is no effect to one-term queries.\u003c/p\u003e\n\u003cp\u003eWe don\u0026rsquo;t use \u003cstrong\u003ecollection frequencies\u003c/strong\u003e (i.e. we never consider COUNTS in CORPUS, because collection frequencies would score commonly found words equally because it doesn\u0026rsquo;t consider distribution).\u003c/p\u003e\n\u003ch3 id=\"tf-idf\"\u003eTF-IDF\u003c/h3\u003e\n\u003cp\u003emultiply \u003ca href=\"#log-frequency-weighting\"\u003elog-frequency weighting\u003c/a\u003e TIMES \u003ca href=\"#document-frequency\"\u003edocument frequency\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nscore(q,d) = \\sum_{t \\in q \\cap d} (1+\\log_{10}(tf_{t,d})) \\times \\log_{10}\\qty(\\frac{N}{df_{t}})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif \\(tf = 0\\), set the entire TF score to \\(0\\) without adding 1.\u003c/p\u003e\n\u003cp\u003eusing this, we can now construct a weight-matrix. Each document is a vector of the TFIDF score for each term against each document.\u003c/p\u003e\n\u003cp\u003eThere are a series of approaches that you can use as a possible approach to compute tfidf: various ways to normalizing, variable document frequency counts (or not use it), etc.\u003c/p\u003e\n\u003ch4 id=\"smart-notation\"\u003eSMART notation\u003c/h4\u003e\n\u003cp\u003e\u003ccode\u003eddd.qqq\u003c/code\u003e, where the first three letters represent the document weighting scheme, and the second three letter represents the query weighting scheme.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-24_21-04-31_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"vector-space-model\"\u003evector-space model\u003c/h2\u003e\n\u003cp\u003eafter creating a matrix where each column is a document and each row is a term, and the cells are \u003ca href=\"#tf-idf\"\u003eTF-IDF\u003c/a\u003e of the words against the documents, we can consider each document as a vector over the team.\u003c/p\u003e\n\u003cp\u003ewe can treat queries as \u003cstrong\u003eALSO\u003c/strong\u003e a document in the space, and therefore use proximity of the vectors as a searching system.\u003c/p\u003e\n\u003cp\u003e(Euclidian distance is bad: because its too large for vectors of different lengths. Instead, we should use angle instead of distance.)\u003c/p\u003e\n\u003ch3 id=\"cosine-similarity\"\u003ecosine similarity\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\cos(q,d) = \\frac{q \\cdot d}{|q| |d|}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause the dot product becomes just the angle between the two vectors after you normalize by length.\u003c/p\u003e\n\u003cp\u003etypically, you may want to normalize the length of the vectors in advance.\u003c/p\u003e\n\u003cp\u003ecosine is a little flatten ontop\u003c/p\u003e\n\u003ch3 id=\"ltc-dot-lnn-weighting\"\u003eltc.lnn weighting\u003c/h3\u003e\n\u003cp\u003edocument: logarithm + idf + normed\nquery: logarithm + 1 + 1\u003c/p\u003e\n\u003cp\u003emeaning, we don\u0026rsquo;t weight or normalize query vectors\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhranked_information_retrieval/","tags":null,"title":"Ranked Information Retrieval"},{"categories":null,"contents":"rational numbers are ratios:\n\\begin{equation} \\mathbb{Q} = \\left\\{\\frac{a}{b} \\middle| a,b\\in \\mathbb{Z}, b\\neq 0\\right\\} \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrational_number/\"\u003erational number\u003c/a\u003es are ratios:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{Q} = \\left\\{\\frac{a}{b} \\middle| a,b\\in \\mathbb{Z}, b\\neq 0\\right\\}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrational_number/","tags":null,"title":"rational number"},{"categories":null,"contents":"Motivation Suppose we would like to say that \u0026ldquo;we prefer all to well \\(A\\) more than bad blood \\(B\\)\u0026rdquo;\n\\begin{equation} A \\succ B \\end{equation}\nNo right or wrong answers in this statement by itself, but we can check whether or not your preferences are inconsistent with itself.\nvon Neumann and Morgenstern Axioms Axioms for checking if a set of preferences are rational. The axioms allow you to check if a set of decisions are Rational Preferences.\nFor three conditions \\(A, B, C\\), we have:\ncompleteness \u0026ldquo;universal comparability\u0026rdquo;\neither \\(A \\succ B\\), \\(A \\prec B\\), \\(A \\sim B\\) (you have to like either better, or be indifferent)\ntransitivity If \\(A \\succeq B\\), \\(B \\succeq C\\), then \\(A \\succeq C\\)\ncontinuity If \\(A \\succeq C \\succeq B\\), then there exists some probability \\(p\\) such that we can form a lottery of shape \\([A:p; B:1-p] \\sim C\\)\nThat is, if \\(C\\) is between \\(A, B\\), then we can create a situation where we mix the chance of \\(A\\) and \\(B\\) happening such that selecting from that situation feels equally as good as selecting from \\(C\\)\nindependence for \\(A \\succ B\\), then for any \\(C\\) and probability \\(b\\) and any probability \\(p\\), then the lotteries \\([A:p; c:1-p] \\geq [B:p; C:1-p]\\)\nAs in, if you swap out a component of a lottery with something less desirable, your new lottery should be more undesirable as well.\n","html":"\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003eSuppose we would like to say that \u0026ldquo;we prefer all to well \\(A\\) more than bad blood \\(B\\)\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA \\succ B\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNo right or wrong answers in this statement by itself, but we can check whether or not your preferences are \u003cstrong\u003einconsistent\u003c/strong\u003e with itself.\u003c/p\u003e\n\u003ch2 id=\"von-neumann-and-morgenstern-axioms\"\u003evon Neumann and Morgenstern Axioms\u003c/h2\u003e\n\u003cp\u003eAxioms for checking if a set of preferences are rational. The axioms allow you to check if a set of decisions are \u003ca href=\"/posts/kbhrational_preference/\"\u003eRational Preference\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eFor three conditions \\(A, B, C\\), we have:\u003c/p\u003e\n\u003ch3 id=\"completeness\"\u003ecompleteness\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;\u003ca href=\"/posts/kbhprobability_theory/#universal-comparability\"\u003euniversal comparability\u003c/a\u003e\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eeither \\(A \\succ B\\), \\(A \\prec B\\), \\(A \\sim B\\) (you have to like either better, or be indifferent)\u003c/p\u003e\n\u003ch3 id=\"transitivity--kbhprobability-theory-dot-md\"\u003e\u003ca href=\"/posts/kbhprobability_theory/#transitivity\"\u003etransitivity\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eIf \\(A \\succeq B\\), \\(B \\succeq C\\), then \\(A \\succeq C\\)\u003c/p\u003e\n\u003ch3 id=\"continuity--kbhuniqueness-and-existance-dot-md\"\u003e\u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuity\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eIf \\(A \\succeq C \\succeq B\\), then there exists some probability \\(p\\) such that we can form a \u003ca href=\"/posts/kbhlottery/\"\u003elottery\u003c/a\u003e of shape \\([A:p; B:1-p] \\sim C\\)\u003c/p\u003e\n\u003cp\u003eThat is, if \\(C\\) is between \\(A, B\\), then we can create a situation where we mix the chance of \\(A\\) and \\(B\\) happening such that selecting from that situation feels equally as good as selecting from \\(C\\)\u003c/p\u003e\n\u003ch3 id=\"independence--kbhprobability-dot-md\"\u003e\u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependence\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003efor \\(A \\succ B\\), then for any \\(C\\) and probability \\(b\\) and any probability \\(p\\), then the \u003ca href=\"/posts/kbhlottery/\"\u003elotteries\u003c/a\u003e \\([A:p; c:1-p] \\geq [B:p; C:1-p]\\)\u003c/p\u003e\n\u003cp\u003eAs in, if you swap out a component of a \u003ca href=\"/posts/kbhlottery/\"\u003elottery\u003c/a\u003e with something less desirable, your new \u003ca href=\"/posts/kbhlottery/\"\u003elottery\u003c/a\u003e should be more undesirable as well.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrational_preference/","tags":null,"title":"rational preference"},{"categories":null,"contents":"\\(\\mathbb{R}\\) real numbers are numbers generatable by a possibly infinite sum of powers of 10.\n","html":"\u003cp\u003e\\(\\mathbb{R}\\) real numbers are \u003ca href=\"/posts/kbhnumber/\"\u003enumber\u003c/a\u003es generatable by a possibly infinite sum of powers of 10.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhreal_number/","tags":null,"title":"real number"},{"categories":null,"contents":"plan to depth \\(d\\), take action, replan\n","html":"\u003cp\u003eplan to depth \\(d\\), take action, replan\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhreceeding_horizon/","tags":null,"title":"Receeding Horizon"},{"categories":null,"contents":"Recommender System is a system that provide recommendations due to search; it combines Information Retrival with another goal:\nEditorial/Hand-Curated lists: \u0026ldquo;list of favorites\u0026rdquo;, \u0026ldquo;essential items\u0026rdquo;, etc. Aggregates: top 10 lists, most popular, recent uploads (hardest) Individual tailors: user-based recommendation Formal Model \\(X\\) the set of users \\(S\\) the set of things to recommend \\(R\\) the set of distinct and totally ordered ratings (stars 1-5, real number 0-1, etc.) Utility function: \\(U:X \\times S \\to R\\) (\u0026ldquo;how much Three key problems:\nobtain \\(U\\) as much as possible, leaving something blank extrapolate blank entries in \\(U\\) which maybe high (\u0026ldquo;recommend something\u0026rdquo;) evaluate our recommendation method obtaining \\(U\\) ask people (rate!) implicit signals (buying book, picking song, watching video, etc.)\u0026mdash;this will create a binary matrix extrapolating \\(U\\) \\(U\\) is sparse (people can\u0026rsquo;t rate everything).\nCold Start problem:\nnew items have no ratings new users have no history Three Main Approaches:\ncontent based filtering Recommend \\(s\\) to \\(x\\) if \\(s \\sim s\u0026rsquo;\\) based on content where \\(s\u0026rsquo;\\) is already rated highly by \\(x\\)\n(\u0026ldquo;if the user likes Jazz, given them more Jazz\u0026rdquo;)\ncreate profile of each item (movie: genre, actor, years; lexicon: important words by TF-IDF; etc) create profile of user, say by averaging ratings of the things the user marked as high cosine similarity Advantages:\nno need for data on other users (no user sparsity) able to tailor to unique tastes able to recommend new and unpopular things transparent Disadvantages:\nneed to build a profile for user overspecialization (never recommend outside of user\u0026rsquo;s preferences) unable to exploit other users\u0026rsquo; judgments finding good features is hard collaborative filtering Instead of using content features of items to recommend, we find user instead.\nuser-user collaborative filtering Consider a user \\(x\\), and some set of unrated items \\(i\\).\nLet\u0026rsquo;s find \\(N\\) other users with similar ratings: 1) find similar users and 2) recommend items they like.\nThen, we estimate \\(x\\)\u0026rsquo;s ratings for \\(i\\) based on the similar users\u0026rsquo; ratings for \\(i\\).\nproblem\nbecause the sparsity of the user vectors which we treat as \\(0\\), cosine gets confused. Cosine doesn\u0026rsquo;t really capture the \u0026ldquo;oppositeness\u0026rdquo; of a 5 star vs a 1 star rating.\nsolution: mean center each user\u0026mdash;subtracting each user\u0026rsquo;s score from their mean rating (ignoring missing values, and do not subtract anything to the missing values). This allows opposite opinions to have opposite signs as well.\nsparsity\nwe prevent computing values for which one user does not rate; as in, we chop the vectors such that the comparison between \\(x\\) and \\(x_{n} \\in X\\) are both dense (i.e. if one of the two users don\u0026rsquo;t rate something, we do not include that in the vector).\nafter this, we can compute our normal cosine similarity; remember to normalise.\nprediction\nfinally, after we got our \\(N\\), we can return our prediction for \\(I\\) either based on an average score of the similar users retrieved in \\(N\\) or average weighted of scores in \\(N\\) weighted by similarity to our target user \\(x\\).\n\\begin{equation} r_{xi} = \\frac{1}{N} \\sum_{}^{} r_{yi} \\end{equation}\nor\n\\begin{equation} r_{xi} = \\sum_{}^{} \\frac{sim(x,y) r_{yi}}{sim(x,y)} \\end{equation}\nitem-item collaborative filtering For item \\(i\\), we want to find other similar items to our item \\(i\\), and average the user\u0026rsquo;s own ratings on those similar items onto \\(i\\).\nthis tends to work better because items are easier to classify than users.\nproblem\ncold start (we need initial data to seed the rating) sparsity (user ratings are sparse) popularity bias\u0026mdash;creates filter bubbles and hard to generalize over unique tastes latent factor (neural) systems represent each video and user as an embedding collaborative filtering. YouTube obtains this embedding by predicting what video user is going to watch\nevaluation RMSE between held out ratings:\n\\begin{equation} \\sqrt{\\frac{\\sum_{xi}^{}(r_{xi} - r^{*}_{xi})^{2}}{N}} \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrecommender_system/\"\u003eRecommender System\u003c/a\u003e is a system that provide recommendations due to search; it combines \u003ca href=\"/posts/kbhinformation_retrival/\"\u003eInformation Retrival\u003c/a\u003e with another goal:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eEditorial/Hand-Curated lists\u003c/strong\u003e: \u0026ldquo;list of favorites\u0026rdquo;, \u0026ldquo;essential items\u0026rdquo;, etc.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eAggregates\u003c/strong\u003e: top 10 lists, most popular, recent uploads\u003c/li\u003e\n\u003cli\u003e(hardest) \u003cstrong\u003eIndividual tailors\u003c/strong\u003e: user-based recommendation\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"formal-model\"\u003eFormal Model\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X\\) the set of users\u003c/li\u003e\n\u003cli\u003e\\(S\\) the set of things to recommend\u003c/li\u003e\n\u003cli\u003e\\(R\\) the set of distinct and totally ordered ratings (stars 1-5, real number 0-1, etc.)\u003c/li\u003e\n\u003cli\u003eUtility function: \\(U:X \\times S \\to R\\) (\u0026ldquo;how much\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThree key problems:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eobtain\u003c/strong\u003e \\(U\\) as much as possible, leaving something blank\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eextrapolate\u003c/strong\u003e blank entries in \\(U\\) which maybe high (\u0026ldquo;recommend something\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eevaluate\u003c/strong\u003e our recommendation method\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"obtaining-u\"\u003eobtaining \\(U\\)\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eask people (rate!)\u003c/li\u003e\n\u003cli\u003eimplicit signals (buying book, picking song, watching video, etc.)\u0026mdash;this will create a binary matrix\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"extrapolating-u\"\u003eextrapolating \\(U\\)\u003c/h2\u003e\n\u003cp\u003e\\(U\\) is sparse (people can\u0026rsquo;t rate everything).\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eCold Start problem\u003c/strong\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003enew items have no ratings\u003c/li\u003e\n\u003cli\u003enew users have no history\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003eThree Main Approaches\u003c/strong\u003e:\u003c/p\u003e\n\u003ch3 id=\"content-based-filtering\"\u003econtent based filtering\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003eRecommend \\(s\\) to \\(x\\) if \\(s \\sim s\u0026rsquo;\\) based on content where \\(s\u0026rsquo;\\) is already rated highly by \\(x\\)\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003e(\u0026ldquo;if the user likes Jazz, given them more Jazz\u0026rdquo;)\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ecreate profile of each item (movie: genre, actor, years; lexicon: important words by \u003ca href=\"/posts/kbhranked_information_retrieval/#tf-idf\"\u003eTF-IDF\u003c/a\u003e; etc)\u003c/li\u003e\n\u003cli\u003ecreate profile of user, say by averaging ratings of the things the user marked as high\u003c/li\u003e\n\u003cli\u003ecosine similarity\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003cp\u003eAdvantages:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eno need for data on other users (no user sparsity)\u003c/li\u003e\n\u003cli\u003eable to tailor to unique tastes\u003c/li\u003e\n\u003cli\u003eable to recommend new and unpopular things\u003c/li\u003e\n\u003cli\u003etransparent\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eDisadvantages:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eneed to build a profile for user\u003c/li\u003e\n\u003cli\u003eoverspecialization (never recommend outside of user\u0026rsquo;s preferences)\u003c/li\u003e\n\u003cli\u003eunable to exploit other users\u0026rsquo; judgments\u003c/li\u003e\n\u003cli\u003efinding good features is hard\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"collaborative-filtering\"\u003ecollaborative filtering\u003c/h3\u003e\n\u003cp\u003eInstead of using content features of items to recommend, we find user instead.\u003c/p\u003e\n\u003ch4 id=\"user-user-collaborative-filtering--orga6897ce\"\u003euser-user \u003ca href=\"#collaborative-filtering\"\u003ecollaborative filtering\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eConsider a user \\(x\\), and some set of unrated items \\(i\\).\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s find \\(N\\) other users with similar ratings: 1) find similar users and 2) recommend items they like.\u003c/p\u003e\n\u003cp\u003eThen, we estimate \\(x\\)\u0026rsquo;s ratings for \\(i\\) based on the similar users\u0026rsquo; ratings for \\(i\\).\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eproblem\u003c/p\u003e\n\u003cp\u003ebecause the sparsity of the user vectors which we treat as \\(0\\), cosine gets confused. Cosine doesn\u0026rsquo;t really capture the \u0026ldquo;oppositeness\u0026rdquo; of a 5 star vs a 1 star rating.\u003c/p\u003e\n\u003cp\u003esolution: \u003cstrong\u003emean center\u003c/strong\u003e each user\u0026mdash;subtracting each user\u0026rsquo;s score from their mean rating (ignoring missing values, and do not subtract anything to the missing values). This allows opposite opinions to have opposite signs as well.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003esparsity\u003c/p\u003e\n\u003cp\u003ewe prevent computing values for which one user does not rate; as in, we chop the vectors such that the comparison between \\(x\\) and \\(x_{n} \\in X\\) are both dense (i.e. if one of the two users don\u0026rsquo;t rate something, we do not include that in the vector).\u003c/p\u003e\n\u003cp\u003eafter this, we can compute our normal cosine similarity; remember to normalise.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eprediction\u003c/p\u003e\n\u003cp\u003efinally, after we got our \\(N\\), we can return our prediction for \\(I\\) either based on an average score of the similar users retrieved in \\(N\\) or average weighted of scores in \\(N\\) weighted by similarity to our target user \\(x\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr_{xi} = \\frac{1}{N} \\sum_{}^{} r_{yi}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eor\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr_{xi} = \\sum_{}^{} \\frac{sim(x,y) r_{yi}}{sim(x,y)}\n\\end{equation}\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"item-item-collaborative-filtering--orga6897ce\"\u003eitem-item \u003ca href=\"#collaborative-filtering\"\u003ecollaborative filtering\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eFor item \\(i\\), we want to find other similar \u003cstrong\u003eitems\u003c/strong\u003e to our item \\(i\\), and average the user\u0026rsquo;s own ratings on those similar items onto \\(i\\).\u003c/p\u003e\n\u003cp\u003ethis tends to work better because items are easier to classify than users.\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eproblem\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ecold start (we need initial data to seed the rating)\u003c/li\u003e\n\u003cli\u003esparsity (user ratings are sparse)\u003c/li\u003e\n\u003cli\u003epopularity bias\u0026mdash;creates filter bubbles and hard to generalize over unique tastes\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"latent-factor--neural--systems\"\u003elatent factor (neural) systems\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003erepresent each video and user as an embedding\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"#collaborative-filtering\"\u003ecollaborative filtering\u003c/a\u003e.\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eYouTube obtains this embedding by predicting what video user is going to watch\u003c/p\u003e\n\u003ch2 id=\"evaluation\"\u003eevaluation\u003c/h2\u003e\n\u003cp\u003eRMSE between held out ratings:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sqrt{\\frac{\\sum_{xi}^{}(r_{xi} - r^{*}_{xi})^{2}}{N}}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrecommender_system/","tags":null,"title":"Recommender System"},{"categories":null,"contents":"reduce reduces compute time for list-based operations: it changes a linear series of events to divide-and-conquer so you can parallelize it\nin theory reduce only works for associative operations?\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhreduce/\"\u003ereduce\u003c/a\u003e reduces compute time for list-based operations: it changes a linear series of events to divide-and-conquer so you can parallelize it\u003c/p\u003e\n\u003cp\u003ein theory \u003ca href=\"/posts/kbhreduce/\"\u003ereduce\u003c/a\u003e only works for associative operations?\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhreduce/","tags":null,"title":"reduce"},{"categories":null,"contents":"in NSM, reductive paraphrase is the act of reducing all utterances in a language into semantic primes.\nThis is usually done with the application of an inherent, universal grammar: the conceptual grammar of semantic primes.\nproblems with reductive paraphrasing In the experiment conducted by (Labov 1973), Labov (according to (Geeraerts 2009), manuscript not found) showed that the boundaries of cup vs. mug are not clearly delineated.\n","html":"\u003cp\u003ein \u003ca href=\"/posts/kbhnatural_semantic_metalanguage/\"\u003eNSM\u003c/a\u003e, \u003ca href=\"/posts/kbhreductive_paraphrase/\"\u003ereductive paraphrase\u003c/a\u003e is the act of reducing all utterances in a language into \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eThis is usually done with the application of an inherent, universal grammar: the \u003ca href=\"/posts/kbhconceptual_grammar/\"\u003econceptual grammar\u003c/a\u003e of \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es.\u003c/p\u003e\n\u003ch2 id=\"problems-with-reductive-paraphrasing\"\u003eproblems with reductive paraphrasing\u003c/h2\u003e\n\u003cp\u003eIn the experiment conducted by (\u003ca href=\"#citeproc_bib_item_2\"\u003eLabov 1973\u003c/a\u003e), Labov (according to (\u003ca href=\"#citeproc_bib_item_1\"\u003eGeeraerts 2009\u003c/a\u003e), manuscript not found) showed that the boundaries of cup vs. mug are not clearly delineated.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhreductive_paraphrase/","tags":null,"title":"reductive paraphrase"},{"categories":null,"contents":"Thanks for opening Jack\u0026rsquo;s long rambly PDF. Please read all of it; I wanted to get this out there before anything else so I apologize in advance for a letter that\u0026rsquo;s on the longer side and I didn\u0026rsquo;t have time to write a shorter one.\nBefore you begin, please read Michael\u0026rsquo;s AMAZING notes on our pitch to get the context. It\u0026rsquo;s amazing. I will not repeat here anything mentioned there.\nPat yourself on the back Oh god was that a difficult semester. We got through many a challenges and worked together to solve most of them. That\u0026rsquo;s cool. We also built a thing that the XRT team liked; so that\u0026rsquo;s cool too.\nSome of you (in the meeting) will already have known, but we are greenlit to go into phase -1! What does that mean? What changes? How can you help? Will meetings finally end on time? When will Jack finish asking silly questions? Find out more\u0026hellip; below.\nBut not too hard Just to reiterate our master deliverable as a team (like how this pitch is culminating the deliverable assigned to us on 1/6), we have until July 8th, 2022 to pitch, again:\nWhat exactly are we doing, in one line, in laymen\u0026rsquo;s terms? Why is it helpful? Clarify the roles and responsibilities for the \u0026ldquo;master faculty member\u0026rdquo;, what time commitments and value they add, and what they have to drop to support the program How can we derive legitimacy for what we are doing? (see below) For me, he also added the derivable of talking more slowly. Presumably, De wants us to come with a glossy pitch too.\nBe legit Why do we need \u0026ldquo;legitimacy\u0026rdquo;? We need motivation for kids to do this, and Nueva\u0026rsquo;s rubber stamp would be a good way to do so. this is the focus of how we are asking Lisa to greenlight phase 2 (see below)\nA valid answer for \u0026ldquo;legitimacy\u0026rdquo; is \u0026ldquo;adding the list of skills students achieved on their transcript.\u0026rdquo; Is this a good answer? Not at the moment. Its very unmotivated (this response does not pass the \u0026ldquo;why is that helpful?\u0026rdquo; test).\nAnd follow the yellow-brick road There is going to be a three stage roadmap.\nPhase -1: developing answers to PREPARE to pitch to Liza the idea, asking her to give feedback WITHOUT any of the \u0026ldquo;asks\u0026rdquo; (legitimacy, faculty time, etc.) Phase 1: building a down-scaled version of the program somewhere. Ted has mentioned interest in this, so we maybe able to co-opt some or all of his classes. Developing details and proof-of-feasibility to pitch to Liza again, this time WITH the asks to roll out to the whole school Phase 2: roll out to the whole school and prey to the Research Gods But not the leader I can\u0026rsquo;t be around forever. We are in phase -1; I will probably be gone in the middle of phase 1. We will probably have to have a faculty supporting this program unofficially for sometime, which will be a big ask.\nThis means we have to make some program changes in anticipation\u0026mdash;\nSeek a corpus callosotomy \u0026ldquo;R@N\u0026rdquo; is now separated form \u0026ldquo;Nueva Research Program.\u0026rdquo; \u0026ldquo;R@N\u0026rdquo;\u0026rsquo;s purpose is a working group to build the \u0026ldquo;Nueva Research Program.\u0026rdquo;\nWe need to separate the two as soon as possible, so that means soon. As soon as after the 7/8 deadline, I hope to make this happen. This means changes changes to our leadership structure.\nAs node A.2 outlines, \u0026ldquo;Nueva Research Program\u0026rdquo; meetings have three stable positions.\nTeams’ Stable — Responsible for managing the count, content, and quality of active Research at Nueva projects, as well as the proces of matching team members to teams. (2-3 hrs/wk) Content Stable — Responsible for managing the content of the training program and review teams. Responsible for updating nodes. Runs meetings. (1-2 hrs/wk) Participant Stable — Responsible for managing the count and recruitment of new students into the program, and identifying key experts and mentors to help build new nodes or support the program. Responsible for participant sheet (1-2 hrs/wk) As well as three review teams\nHypothesis Sciences (key mentor: TBD) Non-Hypothesis Sciences (key mentor: Ted) Literacy, Soft Skills, and Development (key mentor: TBD) In a meeting (TBD) before 7/8, we will organize ourselves into three pairs again. Each pair will choose one \u0026ldquo;stable\u0026rdquo; role and one \u0026ldquo;review team\u0026rdquo; role\u0026mdash;essentially acting as a joint-power head for the new program and a review team in itself.\nWe will split our meetings from then on in half; the first bit dealing with R@N, which I will run; the second, ACTUALLY DOING Nueva Research Programs\u0026rsquo; work, lead by the \u0026ldquo;content stable\u0026rdquo; team. This also means that we will separate the two work docs.\nOh, yeah, also, if you have gotten this far; the headings of this document forms a pretty bad poem. Please send this poem to me privately on a direct message. Thank you.\nPresumably, much of the early \u0026ldquo;nueva research program\u0026rdquo; meetings will be solely the participant stable thinking about recruiting metrics and content stable voting on new nodes. That\u0026rsquo;s OK. The protocol\u0026rsquo;s there to be changed if needed.\nBut not without your consent Although we want each and every one of you on the team (evidenced by the fact that we will be pretty screwed if anyone leaves), your main academics comes first. Please talk to me privately if you have any concerns, no harm no foul.\nAlrighty. Let\u0026rsquo;s find a time to meet.\nhttps://www.when2meet.com/?15887080-XHXI8\nI kinda want to meet y\u0026rsquo;all physically over coffee if you want; but if not virtual is all good.\nThanks again for everything!\n\u0026mdash;Jack\n","html":"\u003cp\u003eThanks for opening Jack\u0026rsquo;s long rambly PDF. Please read all of it; I wanted to get this out there before anything else so I apologize in advance for a letter that\u0026rsquo;s on the longer side and I didn\u0026rsquo;t have time to write a shorter one.\u003c/p\u003e\n\u003cp\u003eBefore you begin, please read \u003ca href=\"https://docs.google.com/document/d/1ZvE4QGFjhR6VeujNzejh1AcmILkLUiYLY0DwKSGsLPs/edit\"\u003eMichael\u0026rsquo;s AMAZING notes on our pitch\u003c/a\u003e to get the context. It\u0026rsquo;s amazing. I will not repeat here anything mentioned there.\u003c/p\u003e\n\u003ch2 id=\"pat-yourself-on-the-back\"\u003ePat yourself on the back\u003c/h2\u003e\n\u003cp\u003eOh god was that a difficult semester. We got through many a challenges and worked together to solve most of them. That\u0026rsquo;s cool. We also built a thing that the XRT team liked; so that\u0026rsquo;s cool too.\u003c/p\u003e\n\u003cp\u003eSome of you (in the meeting) will already have known, but we are greenlit to go into phase -1! What does that mean? What changes? How can you help? Will meetings finally end on time? When will Jack finish asking silly questions? Find out more\u0026hellip; below.\u003c/p\u003e\n\u003ch2 id=\"but-not-too-hard\"\u003eBut not too hard\u003c/h2\u003e\n\u003cp\u003eJust to reiterate our master deliverable as a team (like how this pitch is culminating the deliverable assigned to us on 1/6), we have until \u003cstrong\u003e\u003cstrong\u003eJuly 8th, 2022\u003c/strong\u003e\u003c/strong\u003e to pitch, again:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eWhat exactly are we doing, in one line, in laymen\u0026rsquo;s terms? Why is it helpful?\u003c/li\u003e\n\u003cli\u003eClarify the roles and responsibilities for the \u0026ldquo;master faculty member\u0026rdquo;, what time commitments and value they add, and what they have to drop to support the program\u003c/li\u003e\n\u003cli\u003eHow can we derive legitimacy for what we are doing? (see below)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eFor me, he also added the derivable of talking more slowly. Presumably, De wants us to come with a glossy pitch too.\u003c/p\u003e\n\u003ch2 id=\"be-legit\"\u003eBe legit\u003c/h2\u003e\n\u003cp\u003eWhy do we need \u0026ldquo;legitimacy\u0026rdquo;? We need motivation for kids to do this, and Nueva\u0026rsquo;s rubber stamp would be a good way to do so. \u003cstrong\u003e\u003cstrong\u003ethis is the focus of how we are asking Lisa to greenlight phase 2\u003c/strong\u003e\u003c/strong\u003e (see below)\u003c/p\u003e\n\u003cp\u003eA valid answer for \u0026ldquo;legitimacy\u0026rdquo; is \u0026ldquo;adding the list of skills students achieved on their transcript.\u0026rdquo; Is this a good answer? Not at the moment. Its very unmotivated (this response does not pass the \u0026ldquo;why is that helpful?\u0026rdquo; test).\u003c/p\u003e\n\u003ch2 id=\"and-follow-the-yellow-brick-road\"\u003eAnd follow the yellow-brick road\u003c/h2\u003e\n\u003cp\u003eThere is going to be a three stage roadmap.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003ePhase -1\u003c/strong\u003e: developing answers to PREPARE to pitch to Liza the idea, asking her to give feedback WITHOUT any of the \u0026ldquo;asks\u0026rdquo; (legitimacy, faculty time, etc.)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ePhase 1\u003c/strong\u003e: building a down-scaled version of the program somewhere. Ted has mentioned interest in this, so we maybe able to co-opt some or all of his classes. Developing details and proof-of-feasibility to pitch to Liza again, this time WITH the asks to roll out to the whole school\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ePhase 2\u003c/strong\u003e: roll out to the whole school and prey to the Research Gods\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"but-not-the-leader\"\u003eBut not the leader\u003c/h2\u003e\n\u003cp\u003eI can\u0026rsquo;t be around forever. We are in phase -1; I will probably be gone in the middle of phase 1. We will probably have to have a faculty supporting this program unofficially for sometime, which will be a big ask.\u003c/p\u003e\n\u003cp\u003eThis means we have to make some program changes in anticipation\u0026mdash;\u003c/p\u003e\n\u003ch2 id=\"seek-a-corpus-callosotomy\"\u003eSeek a corpus callosotomy\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;R@N\u0026rdquo; is now separated form \u0026ldquo;Nueva Research Program.\u0026rdquo; \u0026ldquo;R@N\u0026rdquo;\u0026rsquo;s purpose is a working group to build the \u0026ldquo;Nueva Research Program.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe need to separate the two as soon as possible, so that means soon. As soon as after the 7/8 deadline, I hope to make this happen. This means changes changes to our leadership structure.\u003c/p\u003e\n\u003cp\u003eAs \u003ca href=\"https://docs.google.com/document/d/1UgOiVyKE0iixSNyrbh35Y3zDHfI7-eGgBoAZ0tMlDK0/edit\"\u003enode A.2\u003c/a\u003e outlines, \u0026ldquo;Nueva Research Program\u0026rdquo; meetings have three stable positions.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eTeams’ Stable — Responsible for managing the count, content, and quality of active Research at Nueva projects, as well as the proces of matching team members to teams. (2-3 hrs/wk)\u003c/li\u003e\n\u003cli\u003eContent Stable — Responsible for managing the content of the training program and review teams. Responsible for updating nodes. Runs meetings. (1-2 hrs/wk)\u003c/li\u003e\n\u003cli\u003eParticipant Stable — Responsible for managing the count and recruitment of new students into the program, and identifying key experts and mentors to help build new nodes or support the program. Responsible for participant sheet (1-2 hrs/wk)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAs well as three review teams\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eHypothesis Sciences (key mentor: TBD)\u003c/li\u003e\n\u003cli\u003eNon-Hypothesis Sciences (key mentor: Ted)\u003c/li\u003e\n\u003cli\u003eLiteracy, Soft Skills, and Development (key mentor: TBD)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIn a meeting (TBD) before 7/8, we will organize ourselves into three pairs again. Each pair will choose one \u0026ldquo;stable\u0026rdquo; role and one \u0026ldquo;review team\u0026rdquo; role\u0026mdash;essentially acting as a joint-power head for the new program and a review team in itself.\u003c/p\u003e\n\u003cp\u003eWe will split our meetings from then on in half; the first bit dealing with R@N, which I will run; the second, ACTUALLY DOING Nueva Research Programs\u0026rsquo; work, lead by the \u0026ldquo;content stable\u0026rdquo; team. This also means that we will separate the two work docs.\u003c/p\u003e\n\u003cp\u003eOh, yeah, also, if you have gotten this far; the headings of this document forms a pretty bad poem. Please send this poem to me privately on a direct message. Thank you.\u003c/p\u003e\n\u003cp\u003ePresumably, much of the early \u0026ldquo;nueva research program\u0026rdquo; meetings will be solely the participant stable thinking about recruiting metrics and content stable voting on new nodes. That\u0026rsquo;s OK. The protocol\u0026rsquo;s there to be changed if needed.\u003c/p\u003e\n\u003ch2 id=\"but-not-without-your-consent\"\u003eBut not without your consent\u003c/h2\u003e\n\u003cp\u003eAlthough we want each and every one of you on the team (evidenced by the fact that we will be pretty screwed if anyone leaves), your main academics comes first. Please talk to me privately if you have any concerns, no harm no foul.\u003c/p\u003e\n\u003ch2 id=\"alrighty-dot\"\u003eAlrighty.\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s find a time to meet.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://www.when2meet.com/?15887080-XHXI8\"\u003ehttps://www.when2meet.com/?15887080-XHXI8\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eI kinda want to meet y\u0026rsquo;all physically over coffee if you want; but if not virtual is all good.\u003c/p\u003e\n\u003cp\u003eThanks again for everything!\u003c/p\u003e\n\u003cp\u003e\u0026mdash;Jack\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhresearch_at_nueva_notes_06_09_2022/","tags":null,"title":"Regarding R@N"},{"categories":null,"contents":"regular expressions\ndid you know you can do matching inline too matching equivalent statements: test (\\w+) \\1; non-capture group (?:test)\nlookaheads (?=pattern) true if pattern matches, but doesn\u0026rsquo;t touch the character pointer (?!pattern) true if pattern doesn\u0026rsquo;t match; also doesn\u0026rsquo;t advance pointer (?:pattern) will advance character pointer but will not create a capture group ^beginning of line end of line$ ","html":"\u003cp\u003eregular expressions\u003c/p\u003e\n\u003cp\u003edid you know you can do matching inline too matching equivalent statements: \u003ccode\u003etest (\\w+) \\1\u003c/code\u003e; non-capture group \u003ccode\u003e(?:test)\u003c/code\u003e\u003c/p\u003e\n\u003ch2 id=\"lookaheads\"\u003elookaheads\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e(?=pattern) true if pattern matches, but doesn\u0026rsquo;t touch the character pointer\u003c/li\u003e\n\u003cli\u003e(?!pattern) true if pattern doesn\u0026rsquo;t match; also doesn\u0026rsquo;t advance pointer\u003c/li\u003e\n\u003cli\u003e(?:pattern) will advance character pointer but will not create a capture group\u003c/li\u003e\n\u003cli\u003e^beginning of line\u003c/li\u003e\n\u003cli\u003eend of line$\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhregex/","tags":null,"title":"regex"},{"categories":null,"contents":" zinc binds to zur zur inhibits zinc uptake channels zinc uptake channel gets zoped ","html":"\u003col\u003e\n\u003cli\u003ezinc binds to zur\u003c/li\u003e\n\u003cli\u003ezur inhibits zinc uptake channels\u003c/li\u003e\n\u003cli\u003ezinc uptake channel gets zoped\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhregulating_zinc_uptake/","tags":null,"title":"regulating zinc uptake"},{"categories":null,"contents":"reinforcement learning is a decision making method with no known model of the environment at all.\nagent interacts with environment directly designer provide a performance measure of the agent in the environment agent tries to optimize the decision making algorithm to maximise the performance measure Note: agent\u0026rsquo;s own choice of action, in this case, actually influences how the environment works (and what futures the agent sees). So the agent\u0026rsquo;s actions will influence the environment outcomes\ncontrast v. explicit programming v. planning Note 2: look ma, no model! unlike optimization, reinforcement learning tasks does not require an optimization objective connected to a model of the environment where we know what knobs to turn. Instead, the objective is a literal performance of how the agent is doing in the actual environment.\ncontents model-based reinforcement learning model-free reinforcement learning ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhreinforcement_learning/\"\u003ereinforcement learning\u003c/a\u003e is a \u003ca href=\"/posts/kbhdecision_making/#id-9075c44f-4b26-4f5a-9236-bc7a5c10f4ee-decision-making-methods\"\u003edecision making method\u003c/a\u003e with no known model of the environment at all.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e interacts with environment directly\u003c/li\u003e\n\u003cli\u003edesigner provide a performance measure of the agent in the environment\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e tries to optimize the \u003ca href=\"/posts/kbhdecision_making/\"\u003edecision making\u003c/a\u003e algorithm to maximise the performance measure\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eNote: \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e\u0026rsquo;s own choice of action, in this case, actually influences how the environment works (and what futures the agent sees). So the agent\u0026rsquo;s actions will influence the environment outcomes\u003c/p\u003e\n\u003ch2 id=\"contrast-v-dot-explicit-programming-v-dot-planning\"\u003econtrast v. explicit programming v. planning\u003c/h2\u003e\n\u003cp\u003eNote 2: \u003cstrong\u003elook ma, no model!\u003c/strong\u003e unlike \u003ca href=\"/posts/kbhoptimization/\"\u003eoptimization\u003c/a\u003e, \u003ca href=\"/posts/kbhreinforcement_learning/\"\u003ereinforcement learning\u003c/a\u003e tasks does not require an optimization objective connected to a model of the environment where we know what knobs to turn. Instead, the objective is a literal performance of how the \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e is doing in the actual environment.\u003c/p\u003e\n\u003ch2 id=\"contents\"\u003econtents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodel_based_reinforcement_learning/\"\u003emodel-based reinforcement learning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/\"\u003emodel-free reinforcement learning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhreinforcement_learning/","tags":null,"title":"reinforcement learning"},{"categories":null,"contents":"Sample a ton; perform factor conditioning; then count the observation you\u0026rsquo;d like.\nby rejecting those who don\u0026rsquo;t match\n","html":"\u003cp\u003eSample a ton; perform \u003ca href=\"/posts/kbhfactor/#factor-conditioning\"\u003efactor conditioning\u003c/a\u003e; then count the observation you\u0026rsquo;d like.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-11-01_23-25-20_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eby rejecting those who don\u0026rsquo;t match\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrejection_sampling/","tags":null,"title":"Rejection Sampling"},{"categories":null,"contents":"Let \\(X \\sim \\mathcal{N}\\).\n\u0026ldquo;How much more likely is \\(x=10\\) than \\(x=5\\)?\u0026rdquo;\nWe note that \\(P(x=value) = 0\\) for any value if \\(X\\) is continuous. However, we can still get an answer:\n\\begin{equation} \\frac{\\dd{X} P(x=10)}{\\dd{X} P(x=5)} \\end{equation}\nthese two things cancel out. Therefore, you can just divide the PDF:\n\\begin{equation} \\frac{f(x=10)}{f(x=5)} \\end{equation}\n","html":"\u003cp\u003eLet \\(X \\sim \\mathcal{N}\\).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;How much more likely is \\(x=10\\) than \\(x=5\\)?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe note that \\(P(x=value) = 0\\) for any value if \\(X\\) is continuous. However, we can still get an answer:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\dd{X} P(x=10)}{\\dd{X} P(x=5)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethese two things cancel out. Therefore, you can just divide the \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{f(x=10)}{f(x=5)}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrelative_probability/","tags":null,"title":"relative probability"},{"categories":null,"contents":"background info Recall asymtotic analysis. We remember that:\nconstant time \u0026lt; logarithmic time \u0026lt; linear time \u0026lt; polynomial time \u0026lt; exponential time The question? What happens if dynamic programming is too slow/not good enough for the problem? What if dynamic programming is not needed; instead, why don\u0026rsquo;t we just settle for a pretty good solution?\nTake, for instance, Nueva Courses. The optimal solution is \u0026ldquo;most students get their highest possible preferences.\u0026rdquo; However, this is impractical and pretty much impossible. Instead, what if we endeavor to figure a schedule that generally maximize happiness?\nrelaxation methods constraint relaxation constraint relaxation is a relaxation method to remove extra constraints.\nMotivating problem: traveling salesman problem\nVisit all towns in a given location Travel the minimal distance to do so Cannot visit any town more than once Calculating the basic, naive solution to find all roads is \\(O(n!)\\). Best known solution is \\(O(2^nn^2)\\), which is still slow. Its also an \\(NP\\) hard problem.\nHence, to actually solve it in a reasonable time, we are going to make two relaxations.\nThe salesmen can visit a town more than once The salesmen can teleport to visited towns By these two relations, we convert traveling salesmen to the minimum spanning tree problem.\nWe now (how?) that solving MST is no worse than optimal TSP. We will solve MST, then use that problem as the upper bound of solution to TSP.\ncontinuous relaxation continuous relaxation is a relaxation method to convert difficult discrete problems into continuous ones.\nMotivating problem: set cover\nYou are having a party, and you want your friends to get a nice paper invite.\nyou will send invitations to some subsets of your friends tell them to send invitations to all your mutual friends with them What\u0026rsquo;s the minimum number of friends to invite, and who?\nSet-cover is also hard, and also NP hard. The problem is that sending invitation is discrete.\nHence, to solve, we make it possible to solve for fractions of invitations. Hence, we can prove that our solution is guaranteed to be within bounds\nLagrangian relaxation Lagrangian relaxation is a relaxation method to convert hard-limit constrains into flexible penalization (negative values).\nMotivating problem: shortest paths problem with a constraint.\nYou need to drive the shortest number of miles as well as doing it in a hard constraint to complete the solution in a certain time.\nWe can instead relax the problem into overtime driving being a negative value in the solution.\n","html":"\u003ch2 id=\"background-info\"\u003ebackground info\u003c/h2\u003e\n\u003cp\u003eRecall \u003ca href=\"/posts/kbhasymtotic_analysis/\"\u003easymtotic analysis\u003c/a\u003e. We remember that:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003econstant time \u0026lt; logarithmic time \u0026lt; linear time \u0026lt; polynomial time \u0026lt; exponential time\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThe question? What happens if \u003ca href=\"/posts/kbhdynamic_programming/\"\u003edynamic programming\u003c/a\u003e is too slow/not good enough for the problem? What if \u003ca href=\"/posts/kbhdynamic_programming/\"\u003edynamic programming\u003c/a\u003e is not needed; instead, why don\u0026rsquo;t we just settle for a pretty good solution?\u003c/p\u003e\n\u003cp\u003eTake, for instance, \u003ca href=\"/posts/kbhnueva_courses_index/\"\u003eNueva Courses\u003c/a\u003e. The optimal solution is \u0026ldquo;most students get their highest possible preferences.\u0026rdquo; However, this is impractical and pretty much impossible. Instead, what if we endeavor to figure a schedule that generally maximize happiness?\u003c/p\u003e\n\u003ch2 id=\"relaxation-methods\"\u003erelaxation methods\u003c/h2\u003e\n\u003ch3 id=\"constraint-relaxation\"\u003econstraint relaxation\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#constraint-relaxation\"\u003econstraint relaxation\u003c/a\u003e is a \u003ca href=\"/posts/kbhrelaxation_algorithums/\"\u003erelaxation\u003c/a\u003e method to remove extra constraints.\u003c/p\u003e\n\u003cp\u003eMotivating problem: traveling salesman problem\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eVisit all towns in a given location\u003c/li\u003e\n\u003cli\u003eTravel the minimal distance to do so\u003c/li\u003e\n\u003cli\u003eCannot visit any town more than once\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eCalculating the basic, naive solution to find all roads is \\(O(n!)\\). Best known solution is \\(O(2^nn^2)\\), which is still slow. Its also an \\(NP\\) hard problem.\u003c/p\u003e\n\u003cp\u003eHence, to actually solve it in a reasonable time, we are going to make two relaxations.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eThe salesmen can visit a town more than once\u003c/li\u003e\n\u003cli\u003eThe salesmen can teleport to visited towns\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eBy these two relations, we convert traveling salesmen to the \u003ca href=\"/posts/kbhminimum_spanning_tree/\"\u003eminimum spanning tree\u003c/a\u003e problem.\u003c/p\u003e\n\u003cp\u003eWe now (how?) that solving MST is no worse than optimal TSP. We will solve MST, then use that problem as the upper bound of solution to TSP.\u003c/p\u003e\n\u003ch3 id=\"continuous-relaxation\"\u003econtinuous relaxation\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#continuous-relaxation\"\u003econtinuous relaxation\u003c/a\u003e is a \u003ca href=\"/posts/kbhrelaxation_algorithums/\"\u003erelaxation\u003c/a\u003e method to convert difficult discrete problems into continuous ones.\u003c/p\u003e\n\u003cp\u003eMotivating problem: set cover\u003c/p\u003e\n\u003cp\u003eYou are having a party, and you want your friends to get a nice paper invite.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eyou will send invitations to some subsets of your friends\u003c/li\u003e\n\u003cli\u003etell them to send invitations to all your mutual friends with them\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWhat\u0026rsquo;s the minimum number of friends to invite, and who?\u003c/p\u003e\n\u003cp\u003eSet-cover is also hard, and also NP hard. The problem is that sending invitation is discrete.\u003c/p\u003e\n\u003cp\u003eHence, to solve, we make it possible to solve for fractions of invitations. Hence, we can prove that our solution is guaranteed to be within bounds\u003c/p\u003e\n\u003ch3 id=\"lagrangian-relaxation\"\u003eLagrangian relaxation\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#lagrangian-relaxation\"\u003eLagrangian relaxation\u003c/a\u003e is a \u003ca href=\"/posts/kbhrelaxation_algorithums/\"\u003erelaxation\u003c/a\u003e method to convert hard-limit constrains into flexible penalization (negative values).\u003c/p\u003e\n\u003cp\u003eMotivating problem: shortest paths problem with a constraint.\u003c/p\u003e\n\u003cp\u003eYou need to drive the shortest number of miles as well as doing it in a hard constraint to complete the solution in a certain time.\u003c/p\u003e\n\u003cp\u003eWe can instead relax the problem into overtime driving being a negative value in the solution.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrelaxation_algorithums/","tags":null,"title":"relaxation (algorithms)"},{"categories":null,"contents":"replication is the exact copying of cell information\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhreplication/\"\u003ereplication\u003c/a\u003e is the exact copying of cell information\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhreplication/","tags":null,"title":"replication"},{"categories":null,"contents":"In this experiment, a model was devised, trained, and evaluated to automate psychotherapist/client text conversations through the use of state-of-the-art, Seq2Seq Transformer-based Natural Language Generation (NLG) systems. Through training the model upon a mix of the Cornell Movie Dialogue Corpus for language understanding and an open-source, anonymized, and public licensed psychotherapeutic dataset, the model achieved statistically significant performance in published, standardized qualitative benchmarks against human-written validation data - meeting or exceeding human-written responses\u0026rsquo; performance in 59.7% and 67.1% of the test set for two independent test methods respectively. Although the model cannot replace the work of psychotherapists entirely, its ability to synthesize human-appearing utterances for the majority of the test set serves as a promising step towards communizing and easing stigma at the psychotherapeutic point-of-care.\n","html":"\u003cp\u003eIn this experiment, a model was devised, trained, and evaluated to automate psychotherapist/client text conversations through the use of state-of-the-art, Seq2Seq Transformer-based Natural Language Generation (NLG) systems. Through training the model upon a mix of the Cornell Movie Dialogue Corpus for language understanding and an open-source, anonymized, and public licensed psychotherapeutic dataset, the model achieved statistically significant performance in published, standardized qualitative benchmarks against human-written validation data - meeting or exceeding human-written responses\u0026rsquo; performance in 59.7% and 67.1% of the test set for two independent test methods respectively. Although the model cannot replace the work of psychotherapists entirely, its ability to synthesize human-appearing utterances for the majority of the test set serves as a promising step towards communizing and easing stigma at the psychotherapeutic point-of-care.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhreplier_abstract/","tags":null,"title":"Replier Abstract"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhrepresentation_learning/","tags":null,"title":"representation learning"},{"categories":null,"contents":"Instead of calculating:\n\\begin{equation} \\qty( \\frac{52! -1}{52!} ) \\end{equation}\nWe calculate the log of it because then you are able to write:\n\\begin{equation} \\log \\qty( \\frac{52! -1}{52!} ) = \\log (52! - 1) - \\log(52!) \\end{equation}\nwhich won\u0026rsquo;t be rounded to \\(0\\).\n","html":"\u003cp\u003eInstead of calculating:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\qty( \\frac{52! -1}{52!} )\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe calculate the log of it because then you are able to write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\log \\qty( \\frac{52! -1}{52!} ) = \\log (52! - 1) - \\log(52!)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich won\u0026rsquo;t be rounded to \\(0\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrepresenting_large_computation/","tags":null,"title":"Representing Large Computation"},{"categories":null,"contents":"Requirements Analysis is how to satisfy your persnickety users while keeping your fucking app simple.\nGoal The broad goal of Requirements Analysis is to come up with a spec that is:\nDocumented Actionable Measurable Testable Traceable Defined with details Satisfies business goals Timing Requirements Analysis should be performed when\nCalculating costs Setting priorities Creating breakdowns Including specialists Steps Gather requirements by doing User Interviews Analyze the requirements for clarity, completeness, consistency, and lack of conflicts Write them down and implement them Tools Gap Analysis Analyze the difference between where the business is at and its stated goals; figure out how the goals can be closed.\nBusiness Motivation Model (BMM) Does it matter?\nhttps://www.omg.org/spec/BMM/1.3/PDF\nSomeone decided to throw XML to running a company. Why.\nCustomer Journey Modeling See Customer Journey Map\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrequirements_analysis/\"\u003eRequirements Analysis\u003c/a\u003e is how to satisfy your persnickety users while keeping your fucking app simple.\u003c/p\u003e\n\u003ch2 id=\"goal\"\u003eGoal\u003c/h2\u003e\n\u003cp\u003eThe broad goal of \u003ca href=\"/posts/kbhrequirements_analysis/\"\u003eRequirements Analysis\u003c/a\u003e is to come up with a spec that is:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eDocumented\u003c/li\u003e\n\u003cli\u003eActionable\u003c/li\u003e\n\u003cli\u003eMeasurable\u003c/li\u003e\n\u003cli\u003eTestable\u003c/li\u003e\n\u003cli\u003eTraceable\u003c/li\u003e\n\u003cli\u003eDefined with details\u003c/li\u003e\n\u003cli\u003eSatisfies business goals\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"timing\"\u003eTiming\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhrequirements_analysis/\"\u003eRequirements Analysis\u003c/a\u003e should be performed when\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eCalculating costs\u003c/li\u003e\n\u003cli\u003eSetting priorities\u003c/li\u003e\n\u003cli\u003eCreating breakdowns\u003c/li\u003e\n\u003cli\u003eIncluding specialists\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"steps\"\u003eSteps\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eGather requirements by doing \u003ca href=\"/posts/kbhuser_interviews/\"\u003eUser Interviews\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eAnalyze the requirements for clarity, completeness, consistency, and lack of conflicts\u003c/li\u003e\n\u003cli\u003eWrite them down and implement them\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"tools\"\u003eTools\u003c/h2\u003e\n\u003ch3 id=\"gap-analysis\"\u003eGap Analysis\u003c/h3\u003e\n\u003cp\u003eAnalyze the difference between where the business is at and its stated goals; figure out how the goals can be closed.\u003c/p\u003e\n\u003ch3 id=\"business-motivation-model--bmm\"\u003eBusiness Motivation Model (BMM)\u003c/h3\u003e\n\u003cp\u003eDoes it matter?\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://www.omg.org/spec/BMM/1.3/PDF\"\u003ehttps://www.omg.org/spec/BMM/1.3/PDF\u003c/a\u003e\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-07-09_21-32-44_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSomeone decided to throw XML to running a company. Why.\u003c/p\u003e\n\u003ch3 id=\"customer-journey-modeling\"\u003eCustomer Journey Modeling\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhcustomer_journey_map/\"\u003eCustomer Journey Map\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrequirements_analysis/","tags":null,"title":"Requirements Analysis"},{"categories":null,"contents":"Learning to ask questions in a reasonable way. Changing you from a consumer of knowledge to a producer of researcher.\nThe Why of Research from Brian Thomas Your discipline is not your topic You should do research to find out whether or not you have chosen the right area of focus You can interact with faculty in a new way: everyone is here because of fantastic scholarship; you are mining a completely novel area of their expertise Research is the act of taming unruly problems ","html":"\u003cp\u003eLearning to ask questions in a reasonable way. Changing you from a consumer of knowledge to a producer of researcher.\u003c/p\u003e\n\u003ch2 id=\"the-why-of-research-from-brian-thomas--kbhstanford-ug-research-program-dot-md\"\u003eThe Why of Research from \u003ca href=\"/posts/kbhstanford_ug_research_program/\"\u003eBrian Thomas\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eYour discipline is not your topic\u003c/li\u003e\n\u003cli\u003eYou should do research to find out whether or not you have chosen the right area of focus\u003c/li\u003e\n\u003cli\u003eYou can interact with faculty in a new way: everyone is here because of fantastic scholarship; you are mining a completely novel area of their expertise\u003c/li\u003e\n\u003cli\u003eResearch is the act of taming unruly problems\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhresearch/","tags":null,"title":"Research"},{"categories":null,"contents":"Mykel\u0026rsquo;s Research Tips!\n\u0026ldquo;we are not trying to sell our ideas, we are trying to sell understanding\u0026rdquo;\nLevels of Critique high level: \u0026ldquo;is this problem important/can it scale?\u0026rdquo; mid level: \u0026ldquo;do the experiments show what is claimed\u0026rdquo; low level: typography, grammar, style State contributions before and after.\nGood Presentations not too stuffy nor casual frequent use of graphics you don\u0026rsquo;t want bullets with more than 2 lines clear, upfront objective of the paper everything was understanding during presentation: timing presentations such that its digestible as drinking down Time Management Randy Pausch\u0026rsquo;s time management lecture.\noptimize for fun \u0026ldquo;why am I doing this?\u0026rdquo; have \u0026ldquo;you can always change your plan, but only if you have one\u0026rdquo; SEPARATE: email, to-do list, calendar\nWriting a Paper Jennifer Widom: how to write a paper\nPackages to Use Plots \u0026ldquo;using matlab to screenshot a plot is\u0026hellip; an automatic F. If you want to have A quality work, you can use pgfplot. Or you can use a pgfplots backend.\u0026rdquo;\nimport tikzplotlib Tables No vertical linens\nhttp://people.inf.ethz.ch/markusp/teaching/guides/guide-tables.pdf\n\\toprule\nPossibly: PGFPlotsTable. TikZ.\nAlgos algorithmicx\nCaptions subcaptions\nUnits siunitx\nCode minted, or\u0026mdash;code executation\u0026mdash;pythontex\nReferences \u0026ldquo;cleveref\u0026rdquo;: tell you what it is, and give you back with the \u0026ldquo;Fig. #N\u0026rdquo; informatino.\nGood Presentation powerpoint tedx talk give strong technical presentations by Markus Puschel General Tips separate the problem from the solution before presenting the solution number slides! also include total number of slides one slide per minute one liners are best, two liners are ok, three + are bad Transitions are hard: don\u0026rsquo;t tap on a slide and go \u0026ldquo;woah\u0026rdquo;; pre-cache first sentence of each slide.\nOverview AFTER the motivation.\nReference Handling biblatex: bibtex with postprocessing the .tex sislstrings.bib: mykel\u0026rsquo;s conference list for .bib JabRef PhD Thesis http://www.feron.org/Eric/PhD_characterization_2.htm\n\u0026ldquo;Cool Theorems and New Methods\u0026rdquo; \u0026ldquo;Cool Methods and Predictions\u0026rdquo; \u0026ldquo;Beautiful Demonstrations\u0026rdquo; \u0026ldquo;Cool engineering ideas\u0026rdquo; \u0026ldquo;How to Write a Paper\u0026rdquo; https://cs.stanford.edu/people/widom/paper-writing.html\nwhat\u0026rsquo;s the problem why is it interesting and important? why is it hard? why hasn\u0026rsquo;t been solved before/what\u0026rsquo;s wrong with previous solutions? what are the key components of my approach and results? You want the intro to end near the end of the first page or near the end of the second page. Always lead with the problem.\nMathematical Writing \u0026ldquo;CS209 mathematical writing\u0026rdquo;\nDon\u0026rsquo;t start a sentence with a symbol.\nDon\u0026rsquo;t use \u0026ldquo;utilize\u0026rdquo;.\nAuthorship talk about it early universities have authorship inclusion deadline Complexity complexity should be justified (why does simpler method\u0026rsquo;s not work?) ","html":"\u003cp\u003eMykel\u0026rsquo;s Research Tips!\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;we are not trying to sell our ideas, we are trying to sell understanding\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"levels-of-critique\"\u003eLevels of Critique\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ehigh level: \u0026ldquo;is this problem important/can it scale?\u0026rdquo;\u003c/li\u003e\n\u003cli\u003emid level: \u0026ldquo;do the experiments show what is claimed\u0026rdquo;\u003c/li\u003e\n\u003cli\u003elow level: typography, grammar, style\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eState contributions before and after.\u003c/p\u003e\n\u003ch2 id=\"good-presentations\"\u003eGood Presentations\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003enot too stuffy nor casual\u003c/li\u003e\n\u003cli\u003efrequent use of graphics\u003c/li\u003e\n\u003cli\u003eyou don\u0026rsquo;t want bullets with more than 2 lines\u003c/li\u003e\n\u003cli\u003eclear, upfront objective of the paper\u003c/li\u003e\n\u003cli\u003eeverything was understanding \u003cstrong\u003eduring\u003c/strong\u003e presentation: timing presentations such that its digestible as drinking down\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"time-management\"\u003eTime Management\u003c/h2\u003e\n\u003cp\u003eRandy Pausch\u0026rsquo;s time management lecture.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eoptimize for fun\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;why am I doing this?\u0026rdquo;\u003c/li\u003e\n\u003cli\u003ehave \u0026ldquo;you can always change your plan, but only if you have one\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSEPARATE: email, to-do list, calendar\u003c/p\u003e\n\u003ch2 id=\"writing-a-paper\"\u003eWriting a Paper\u003c/h2\u003e\n\u003cp\u003eJennifer Widom: how to write a paper\u003c/p\u003e\n\u003ch2 id=\"packages-to-use\"\u003ePackages to Use\u003c/h2\u003e\n\u003ch3 id=\"plots\"\u003ePlots\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;using matlab to screenshot a plot is\u0026hellip; an automatic F. If you want to have A quality work, you can use pgfplot. Or you can use a pgfplots backend.\u0026rdquo;\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etikzplotlib\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"tables\"\u003eTables\u003c/h3\u003e\n\u003cp\u003eNo vertical linens\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"http://people.inf.ethz.ch/markusp/teaching/guides/guide-tables.pdf\"\u003ehttp://people.inf.ethz.ch/markusp/teaching/guides/guide-tables.pdf\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\\toprule\u003c/p\u003e\n\u003cp\u003ePossibly: PGFPlotsTable. TikZ.\u003c/p\u003e\n\u003ch3 id=\"algos\"\u003eAlgos\u003c/h3\u003e\n\u003cp\u003ealgorithmicx\u003c/p\u003e\n\u003ch3 id=\"captions\"\u003eCaptions\u003c/h3\u003e\n\u003cp\u003esubcaptions\u003c/p\u003e\n\u003ch3 id=\"units\"\u003eUnits\u003c/h3\u003e\n\u003cp\u003esiunitx\u003c/p\u003e\n\u003ch3 id=\"code\"\u003eCode\u003c/h3\u003e\n\u003cp\u003eminted, or\u0026mdash;code executation\u0026mdash;pythontex\u003c/p\u003e\n\u003ch3 id=\"references\"\u003eReferences\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;cleveref\u0026rdquo;: tell you what it is, and give you back with the \u0026ldquo;Fig. #N\u0026rdquo; informatino.\u003c/p\u003e\n\u003ch3 id=\"good-presentation\"\u003eGood Presentation\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003epowerpoint tedx talk\u003c/li\u003e\n\u003cli\u003egive strong technical presentations by Markus Puschel\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"general-tips\"\u003eGeneral Tips\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eseparate the problem from the solution before presenting the solution\u003c/li\u003e\n\u003cli\u003enumber slides! also include total number of slides\u003c/li\u003e\n\u003cli\u003eone slide per minute\u003c/li\u003e\n\u003cli\u003eone liners are best, two liners are ok, three + are bad\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTransitions are hard: don\u0026rsquo;t tap on a slide and go \u0026ldquo;woah\u0026rdquo;; pre-cache first sentence of each slide.\u003c/p\u003e\n\u003cp\u003eOverview \u003cstrong\u003eAFTER\u003c/strong\u003e the motivation.\u003c/p\u003e\n\u003ch2 id=\"reference-handling\"\u003eReference Handling\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ebiblatex: bibtex with postprocessing the .tex\u003c/li\u003e\n\u003cli\u003esislstrings.bib: mykel\u0026rsquo;s conference list for .bib\u003c/li\u003e\n\u003cli\u003eJabRef\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"phd-thesis\"\u003ePhD Thesis\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"http://www.feron.org/Eric/PhD_characterization_2.htm\"\u003ehttp://www.feron.org/Eric/PhD_characterization_2.htm\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Cool Theorems and New Methods\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Cool Methods and Predictions\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Beautiful Demonstrations\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Cool engineering ideas\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"how-to-write-a-paper\"\u003e\u0026ldquo;How to Write a Paper\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://cs.stanford.edu/people/widom/paper-writing.html\"\u003ehttps://cs.stanford.edu/people/widom/paper-writing.html\u003c/a\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewhat\u0026rsquo;s the problem\u003c/li\u003e\n\u003cli\u003ewhy is it interesting and important?\u003c/li\u003e\n\u003cli\u003ewhy is it hard?\u003c/li\u003e\n\u003cli\u003ewhy hasn\u0026rsquo;t been solved before/what\u0026rsquo;s wrong with previous solutions?\u003c/li\u003e\n\u003cli\u003ewhat are the key components of my approach and results?\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eYou want the intro to end near the end of the first page or near the end of the second page. \u003cstrong\u003eAlways lead with the problem.\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"mathematical-writing\"\u003eMathematical Writing\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;CS209 mathematical writing\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eDon\u0026rsquo;t start a sentence with a symbol.\u003c/p\u003e\n\u003cp\u003eDon\u0026rsquo;t use \u0026ldquo;utilize\u0026rdquo;.\u003c/p\u003e\n\u003ch2 id=\"authorship\"\u003eAuthorship\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003etalk about it early\u003c/li\u003e\n\u003cli\u003euniversities have authorship inclusion deadline\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"complexity\"\u003eComplexity\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecomplexity should be justified (why does simpler method\u0026rsquo;s not work?)\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhresearch_tips/","tags":null,"title":"Research Tips"},{"categories":null,"contents":"Importantly, we have to keep our data under something that can be called RDD: \u0026ldquo;Resilient Distributed Dataset\u0026rdquo;; it is a theoretical dataset, but you don\u0026rsquo;t actually load it.\nRDDs are has a single vector datastore under, but there are special RDDs that store key-value info. For Spark, RDDs are stored as operational graphs which is backtraced eventually during computational steps.\nPair RDD A Pair RDD is an RDD that stores two pairs of vectors: you have a key and you have an value per entry.\n","html":"\u003cp\u003eImportantly, we have to keep our data under something that can be called \u003cstrong\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhspark/#rdd-api\"\u003eRDD\u003c/a\u003e\u003c/strong\u003e\u003c/strong\u003e: \u0026ldquo;Resilient Distributed Dataset\u0026rdquo;; it is a theoretical dataset, but you don\u0026rsquo;t actually load it.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003es are has a single vector datastore under, but there are special \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003es that store key-value info. For \u003ca href=\"/posts/kbhspark/\"\u003eSpark\u003c/a\u003e, \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003es are stored as operational graphs which is backtraced eventually during computational steps.\u003c/p\u003e\n\u003ch2 id=\"pair-rdd\"\u003ePair RDD\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#pair-rdd\"\u003ePair RDD\u003c/a\u003e is an \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003e that stores two pairs of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es: you have a key and you have an value per entry.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrdd/","tags":null,"title":"Resilient Distributed Dataset"},{"categories":null,"contents":"resistors do resistor stuff.\nresistors in series Their resistance add!\n\\begin{equation} R_{eq} = R_1 + R_2 + \\dots \\end{equation}\nCURRENT remains the same through the resistors.\nresistors in parallel Their resistance add by inverse fraction!\n\\begin{equation} \\frac{1}{R_{eq}} = \\frac{1}{R_1} + \\frac{1}{R_2} + \\dots \\end{equation}\nVOLTAGE remains the same through the resistors.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhresistors/\"\u003eresistors\u003c/a\u003e do resistor stuff.\u003c/p\u003e\n\u003ch2 id=\"resistors-in-series\"\u003eresistors in series\u003c/h2\u003e\n\u003cp\u003eTheir \u003ca href=\"/posts/kbhcurrent/#resistance-of-a-wire\"\u003eresistance\u003c/a\u003e add!\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-03-21_21-53-21_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\\begin{equation}\nR_{eq} = R_1 + R_2 + \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eCURRENT\u003c/strong\u003e remains the same through the resistors.\u003c/p\u003e\n\u003ch2 id=\"resistors-in-parallel\"\u003eresistors in parallel\u003c/h2\u003e\n\u003cp\u003eTheir \u003ca href=\"/posts/kbhcurrent/#resistance-of-a-wire\"\u003eresistance\u003c/a\u003e add by inverse fraction!\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-03-21_21-54-07_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e\\begin{equation}\n\\frac{1}{R_{eq}} = \\frac{1}{R_1} + \\frac{1}{R_2} + \\dots\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eVOLTAGE\u003c/strong\u003e remains the same through the resistors.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhresistors/","tags":null,"title":"resistor"},{"categories":null,"contents":"A reticle is a photomask/template for a lithography system (like a negative). KLA was the first company to automatically inspect wafers and reticles.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhreticle/\"\u003ereticle\u003c/a\u003e is a photomask/template for a lithography system (like a negative). \u003ca href=\"/posts/kbhkla/\"\u003eKLA\u003c/a\u003e was the first company to automatically inspect wafers and \u003ca href=\"/posts/kbhreticle/\"\u003ereticle\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhreticle/","tags":null,"title":"reticle"},{"categories":null,"contents":" Starting with random residue noise: coordinates + backbones Diffusion happens: train like diffusion, with the goal of increasing binding affinities Eventually resolves to valid protein structures given the binding environments Basically, start with only the desired substraight, and the diffuse the sequence around that small sequence with the goal of higher affinity binding: i.e. allow only the binding site to stay and regenerating the rest.\nRFDiffusion is available starting THIS WEEK!\nadvantages over RoseTTAFold2 inpainting The starting point is random for diffusion, so if you do it multiple times you will get new results instead of the same thing.\n","html":"\u003col\u003e\n\u003cli\u003eStarting with random residue noise: coordinates + backbones\u003c/li\u003e\n\u003cli\u003eDiffusion happens: train like diffusion, with the goal of increasing binding affinities\u003c/li\u003e\n\u003cli\u003eEventually resolves to valid protein structures given the binding environments\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eBasically, start with only the desired substraight, and the diffuse the sequence around that small sequence with the goal of higher affinity binding: i.e. allow only the binding site to stay and regenerating the rest.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhrfdiffusion/\"\u003eRFDiffusion\u003c/a\u003e is available starting THIS WEEK!\u003c/p\u003e\n\u003ch2 id=\"advantages-over-rosettafold2--kbhrosettafold2-dot-md--inpainting\"\u003eadvantages over \u003ca href=\"/posts/kbhrosettafold2/\"\u003eRoseTTAFold2\u003c/a\u003e inpainting\u003c/h2\u003e\n\u003cp\u003eThe starting point is random for diffusion, so if you do it multiple times you will get new results instead of the same thing.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrfdiffusion/","tags":null,"title":"RFDiffusion"},{"categories":null,"contents":"POMDPs to solve Active Sensing Problem: where gathering information is the explicit goal and not a means to do something. Meaning, we can\u0026rsquo;t train them using state-only reward functions (i.e. reward is based on belief and not state).\nDirectly reward the reduction of uncertainty: belief-based reward framework which you can just tack onto the existing solvers.\nTo do this, we want to define some reward directly over the belief space which assigns rewards based on uncertainty reduction:\n\\begin{equation} r(b,a) = \\rho(b,a) \\end{equation}\n\\(\\rho\\) should be some measure of uncertainty, like entropy.\nkey question: how does our POMDP formulations change given this change?\nDon\u0026rsquo;t worry about the Value Function result: if reward function is convex, then Bellman updates should preserve the convexity of the value function\nSo, we now just need to make sure that however we compute our rewards the reward function \\(\\rho\\) has to be piecewise linear convex.\nPWLC rewards One simple PWLC rewards are alpha vectors:\n\\begin{equation} \\rho(b,a) = \\max_{\\alpha in \\Gamma} \\qty[\\sum_{ss}^{} b(s) \\alpha(s)] \\end{equation}\nWe want to use \\(R\\) extra alpha-vectors to compute the value at a state.\nThis makes our Belman updates:\nnon-PWLC objectives As long as \\(\\rho\\) is convex and stronger-than Lipschitz continuous, we can use a modified version of the Bellman updates to force our non PWLC \\(\\rho\\) into pretty much PWLC:\n\\begin{equation} \\hat{\\rho}(b) = \\max_{b\u0026rsquo;} \\qty[\\rho(b\u0026rsquo;) + (b-b\u0026rsquo;) \\cdot \\nabla p(b\u0026rsquo;)] \\end{equation}\nTaylor never fails to disappoint.\nFancy math gives that the error in this would be bounded:\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es to solve \u003ca href=\"/posts/kbhrho_pomdps/\"\u003eActive Sensing Problem\u003c/a\u003e: where \u003cstrong\u003egathering information\u003c/strong\u003e is the explicit goal and not a means to do something. Meaning, we can\u0026rsquo;t train them using state-only reward functions (i.e. reward is based on belief and not state).\u003c/p\u003e\n\u003cp\u003eDirectly reward the \u003cstrong\u003ereduction of uncertainty\u003c/strong\u003e: \u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e-based reward framework which you can just tack onto the existing solvers.\u003c/p\u003e\n\u003cp\u003eTo do this, we want to define some reward directly over the belief space which assigns rewards based on uncertainty reduction:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nr(b,a) = \\rho(b,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(\\rho\\) should be some measure of uncertainty, like entropy.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003ekey question\u003c/strong\u003e: how does our \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e formulations change given this change?\u003c/p\u003e\n\u003ch2 id=\"don-t-worry-about-the-value-function\"\u003eDon\u0026rsquo;t worry about the Value Function\u003c/h2\u003e\n\u003cp\u003eresult: if \u003cstrong\u003ereward function\u003c/strong\u003e is convex, then Bellman updates should \u003cstrong\u003epreserve the convexity of the value function\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eSo, we now just need to make sure that however we compute our rewards the reward function \\(\\rho\\) has to be piecewise linear convex.\u003c/p\u003e\n\u003ch2 id=\"pwlc--kbhrho-pomdps-dot-md--rewards\"\u003e\u003ca href=\"/posts/kbhrho_pomdps/\"\u003ePWLC\u003c/a\u003e rewards\u003c/h2\u003e\n\u003cp\u003eOne simple \u003ca href=\"/posts/kbhrho_pomdps/\"\u003ePWLC\u003c/a\u003e rewards are \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\rho(b,a) = \\max_{\\alpha in \\Gamma} \\qty[\\sum_{ss}^{} b(s) \\alpha(s)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe want to use \\(R\\) extra alpha-vectors to compute the value at a state.\u003c/p\u003e\n\u003cp\u003eThis makes our Belman updates:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-25_19-56-49_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"non-pwlc--kbhrho-pomdps-dot-md--objectives\"\u003enon-\u003ca href=\"/posts/kbhrho_pomdps/\"\u003ePWLC\u003c/a\u003e objectives\u003c/h2\u003e\n\u003cp\u003eAs long as \\(\\rho\\) is convex and stronger-than \u003ca href=\"/posts/kbhuniqueness_and_existance/#lipschitz-condition\"\u003eLipschitz continuous\u003c/a\u003e, we can use a modified version of the Bellman updates to force our non \u003ca href=\"/posts/kbhrho_pomdps/\"\u003ePWLC\u003c/a\u003e \\(\\rho\\) into pretty much PWLC:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{\\rho}(b) = \\max_{b\u0026rsquo;} \\qty[\\rho(b\u0026rsquo;) + (b-b\u0026rsquo;) \\cdot \\nabla p(b\u0026rsquo;)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTaylor never fails to disappoint.\u003c/p\u003e\n\u003cp\u003eFancy math gives that the error in this would be bounded:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-25_19-59-10_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhrho_pomdps/","tags":null,"title":"rho-POMDPs"},{"categories":null,"contents":"Richard Nixon is an American president, but pretty much is the watergate guy.\nServed in House and Senate Eisenhower\u0026rsquo;s VP for 8 years Lost first to JFK Richard Nixon is a pragmatist; he pushes economy out of presession via Keynsian Politics.\nRichard Nixon also realized that the large southern population can be motivated via racist policies, so he shifted the .\npolitical positions of Richard Nixon Richard Nixon\u0026rsquo;s Treatment against the Vietnam War Richard Nixon\u0026rsquo;s Foreign Policy ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e is an American president, but pretty much is the \u003ca href=\"/posts/kbhwatergate/\"\u003ewatergate\u003c/a\u003e guy.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eServed in House and Senate\u003c/li\u003e\n\u003cli\u003eEisenhower\u0026rsquo;s VP for 8 years\u003c/li\u003e\n\u003cli\u003eLost first to JFK\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e is a pragmatist; he pushes economy out of presession via \u003ca href=\"/posts/kbhkeynsian_politics/\"\u003eKeynsian Politics\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e also realized that the large southern population can be motivated via racist policies, so he shifted the .\u003c/p\u003e\n\u003ch2 id=\"political-positions-of-richard-nixon\"\u003epolitical positions of Richard Nixon\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrichard_nixon_s_treatment_against_the_vietnam_war/\"\u003eRichard Nixon\u0026rsquo;s Treatment against the Vietnam War\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrichard_nixon_s_foreign_policy/\"\u003eRichard Nixon\u0026rsquo;s Foreign Policy\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrichard_nixon/","tags":null,"title":"Richard Nixon"},{"categories":null,"contents":"Richard Nixon\u0026rsquo;s foreign policy is marked by the \u0026ldquo;Nixon Doctrine\u0026rdquo;: shifting the burden of military containment to allies.\nSupports China as a means against USSR Negotiate with the USSR to lower tension Shifts focus into building and supporting allies ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e\u0026rsquo;s foreign policy is marked by the \u0026ldquo;\u003ca href=\"/posts/kbhrichard_nixon_s_foreign_policy/\"\u003eNixon Doctrine\u003c/a\u003e\u0026rdquo;: shifting the burden of military containment to allies.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eSupports China as a means against \u003ca href=\"\"\u003eUSSR\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eNegotiate with the \u003ca href=\"\"\u003eUSSR\u003c/a\u003e to lower tension\u003c/li\u003e\n\u003cli\u003eShifts focus into building and supporting allies\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrichard_nixon_s_foreign_policy/","tags":null,"title":"Richard Nixon's Foreign Policy"},{"categories":null,"contents":"Richard Nixon proposed the strategy of vietnamization as a treatment to the Vietnam War. He also expanded to Cambodia. To beat the Viet Cong into submission, he initialized the Operation Linebacker campaign.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e proposed the strategy of \u003ca href=\"/posts/kbhvietnamization/\"\u003evietnamization\u003c/a\u003e as a treatment to the \u003ca href=\"/posts/kbhcold_war_in_vietnam/\"\u003eVietnam War\u003c/a\u003e. He also expanded to Cambodia. To beat the Viet Cong into submission, he initialized the \u003ca href=\"/posts/kbhoperation_linebacker/\"\u003eOperation Linebacker\u003c/a\u003e campaign.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrichard_nixon_s_treatment_against_the_vietnam_war/","tags":null,"title":"Richard Nixon's Treatment against the Vietnam War"},{"categories":null,"contents":"Rick Wallace is the CEO of KLA.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrick_wallace/\"\u003eRick Wallace\u003c/a\u003e is the CEO of \u003ca href=\"/posts/kbhkla/\"\u003eKLA\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrick_wallace/","tags":null,"title":"Rick Wallace"},{"categories":null,"contents":"a ring is\u0026hellip; something; its kind of a field but it doesn\u0026rsquo;t have inverses for multiplication.\n","html":"\u003cp\u003ea \u003ca href=\"/posts/kbhring/\"\u003ering\u003c/a\u003e is\u0026hellip; something; its kind of a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e but it doesn\u0026rsquo;t have inverses for \u003ca href=\"/posts/kbhmultiplying/\"\u003emultiplication\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhring/","tags":null,"title":"ring"},{"categories":null,"contents":"Wealth is a much more complex utility than others because given the different levels of wealth you have the marginal benefit of having that wealth decreases.\nThat is, let \\(A\\) be the fact that you are given $50, and let \\(B\\) be there being \\(0.5\\) chance of winning $100.\nrisk neutral: the utility is linear\u0026mdash;therefore \\(A \\sim B\\) risk seeking: utility is convex (derivative increases as reward increases), so \\(A \\prec B\\) risk averse: utility is concave (derivate decreases as reward decreases), so \\(A \\succ B\\) ","html":"\u003cp\u003eWealth is a much more complex \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e than others because given the different levels of wealth you have the marginal benefit of having that wealth decreases.\u003c/p\u003e\n\u003cp\u003eThat is, let \\(A\\) be the fact that you are given $50, and let \\(B\\) be there being \\(0.5\\) chance of winning $100.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexpected_utility_of_wealth/\"\u003erisk neutral\u003c/a\u003e: the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e is linear\u0026mdash;therefore \\(A \\sim B\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexpected_utility_of_wealth/\"\u003erisk seeking\u003c/a\u003e: \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e is convex (derivative increases as reward increases), so \\(A \\prec B\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexpected_utility_of_wealth/\"\u003erisk averse\u003c/a\u003e: \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e is concave (derivate decreases as reward decreases), so \\(A \\succ B\\)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhexpected_utility_of_wealth/","tags":null,"title":"risk aversion"},{"categories":null,"contents":"Make PACE better: no need to check the bacteriophage population in PACE yourself; just check it automatically! https://github.com/dgretton/pyhamilton https://www.chorylab.com/\ntake a constant plate measurement of the culture check the growth grade use the grid of materials to test the environmental combinations; checking if certain factors worked better PyLabRobotic to automatically handle the materials \u0026ldquo;run PACE sweeps, adjust parameters as needed to promote mutation replication\u0026rdquo;\n\u0026ldquo;cheaters\u0026rdquo; some molecules create specific increases in population without the need of any mutation at all, \u0026ldquo;cheating\u0026rdquo; the evolutionary process. We don\u0026rsquo;t know why, and the lab seem to have given up on them until an in vivo test is needed.\nso actually applying the above Given the changes in distribution of the replication process, measure schot Adjust selection pressure to promote mutation ","html":"\u003cp\u003eMake \u003ca href=\"/posts/kbhpace/\"\u003ePACE\u003c/a\u003e better: no need to check the bacteriophage population in \u003ca href=\"/posts/kbhpace/\"\u003ePACE\u003c/a\u003e yourself; just check it automatically! \u003ca href=\"https://github.com/dgretton/pyhamilton\"\u003ehttps://github.com/dgretton/pyhamilton\u003c/a\u003e \u003ca href=\"https://www.chorylab.com/\"\u003ehttps://www.chorylab.com/\u003c/a\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003etake a constant plate measurement of the culture\u003c/li\u003e\n\u003cli\u003echeck the growth grade\u003c/li\u003e\n\u003cli\u003euse the grid of materials to test the environmental combinations; checking if certain factors worked better\u003c/li\u003e\n\u003cli\u003ePyLabRobotic to automatically handle the materials\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\u0026ldquo;run \u003ca href=\"/posts/kbhpace/\"\u003ePACE\u003c/a\u003e sweeps, adjust parameters as needed to promote mutation replication\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"cheaters\"\u003e\u0026ldquo;cheaters\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003esome molecules create specific increases in population without the need of any mutation at all, \u0026ldquo;cheating\u0026rdquo; the evolutionary process. We don\u0026rsquo;t know why, and the lab seem to have given up on them until an in vivo test is needed.\u003c/p\u003e\n\u003ch2 id=\"so-actually-applying-the-above\"\u003eso actually applying the above\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eGiven the changes in distribution of the replication process, measure schot\u003c/li\u003e\n\u003cli\u003eAdjust selection pressure to promote mutation\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrobotics_assisted_directed_evolution/","tags":null,"title":"Robotics-Assisted Directed Evolution"},{"categories":null,"contents":"Ingredients:\n\\(\\mathcal{P}\\) problem (states, transitions, etc.) \\(\\pi\\) a Rollout Policy \\(d\\) depth (how many next states to look into)\u0026mdash;more is more accurate but slower Use the greedy policy at each state by using the Rollout procedure to estimate your value function at any given state.\nRollout Rollout works by hallucinating a trajectory and calculating the reward.\nFor some state, Rollout Policy, and depth\u0026hellip;\nlet ret be 0; for i in range depth take action following the Rollout Policy obtain a sample of possible next state (weighted by the action you took, meaning an instantiation of \\(s\u0026rsquo; \\sim T(\\cdot | s,a)\\)) and reward \\(R(s,a)\\) from current state ret += gamma^i * r return ret Rollout Policy A Rollout Policy is a default policy used for lookahead. Usually this policy should be designed with domain knowledge; if not, we just use a uniform random next steps.\n","html":"\u003cp\u003eIngredients:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\mathcal{P}\\) problem (states, transitions, etc.)\u003c/li\u003e\n\u003cli\u003e\\(\\pi\\) a \u003ca href=\"#rollout-policy\"\u003eRollout Policy\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(d\\) depth (how many next states to look into)\u0026mdash;more is more accurate but slower\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eUse the \u003ca href=\"/posts/kbhaction_value_function/#value-function-policy\"\u003egreedy policy\u003c/a\u003e at each state by using the \u003ca href=\"#rollout\"\u003eRollout\u003c/a\u003e procedure to estimate your \u003ca href=\"/posts/kbhaction_value_function/#id-0b1509e0-4d88-44d1-b6fa-fe8e86d200bb-value-function\"\u003evalue function\u003c/a\u003e at any given state.\u003c/p\u003e\n\u003ch2 id=\"rollout\"\u003eRollout\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#rollout\"\u003eRollout\u003c/a\u003e works by hallucinating a trajectory and calculating the reward.\u003c/p\u003e\n\u003cp\u003eFor some state, \u003ca href=\"#rollout-policy\"\u003eRollout Policy\u003c/a\u003e, and depth\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003elet ret be 0; for i in range depth\n\u003col\u003e\n\u003cli\u003etake action following the \u003ca href=\"#rollout-policy\"\u003eRollout Policy\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eobtain a sample of possible next state (weighted by the action you took, meaning an instantiation of \\(s\u0026rsquo; \\sim T(\\cdot | s,a)\\)) and reward \\(R(s,a)\\) from current state\u003c/li\u003e\n\u003cli\u003eret += gamma^i * r\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003ereturn ret\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"rollout-policy\"\u003eRollout Policy\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#rollout-policy\"\u003eRollout Policy\u003c/a\u003e is a default \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e used for lookahead. Usually this \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e should be designed with domain knowledge; if not, we just use a uniform random next steps.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrollout_with_lookahead/","tags":null,"title":"Rollout with Lookahead"},{"categories":null,"contents":"Ronald Reagan is a president of the United States. He rises a wave of the New Right.\nComes out of Hollywood and was CA governor Reagan was a democrat, but McCarthyism lead him Reagan was an FBI informer for McCarthyism investigations Reagan was the first two-term president since 1961, was able to maintain more power compared to others \u0026ldquo;The Great Communicator\u0026rdquo; Reagan politics \u0026ldquo;Government isn\u0026rsquo;t the solution to the problem, its the problem.\u0026rdquo;\nwished for limited politics states rights condemned welfare and \u0026ldquo;welfare cheats\u0026rdquo; (the undertone of racist appeal) Evangelical undertones, family values, moral majority Against affirmative action Supply-side economics: \u0026ldquo;getting rid of taxes will allow more people to spend\u0026rdquo; Anti-Soviet rhetoric Creates the largest increase in welfare spending, gutting about $1.5Bn.\nReagan policy changes Lowering taxes: 70% of tax to 28% of taxes Increase defense budget: 1 trillion to 3 trillion Rising inequality, 1% controlled 40% of wealth (double from the 1970s) Reagan Foreign Policy Ronald Reagan creates the largest military build-up in history (larger than Korea and Vietnam.)\nReasserted Command-in-Chief abilities Creates the National Security Council (for whom the ) Comitted the US to supporting the anti-Marxist insurrections around the world Credited with falling the USSR Supreme Court Interview Process A new interview process for the supreme court designed by Ronald Reagan, creating an extensive process to vet conservative. Reagan swapped out 50% of the Federal judicial process.\nReagan\u0026rsquo;s Legacy Inflation dropped\nUSSR Collapse\nMilitary complex expanded\nIncomes rose\nInequality widened\nWelfare slashed\nDebt\nConcentrated power in the white house\nCentralized conservative agenda\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhronald_raegan/\"\u003eRonald Reagan\u003c/a\u003e is a president of the \u003ca href=\"\"\u003eUnited States\u003c/a\u003e. He rises a wave of the \u003ca href=\"/posts/kbhnew_right/\"\u003eNew Right\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eComes out of Hollywood and was CA governor\u003c/li\u003e\n\u003cli\u003eReagan was a democrat, but McCarthyism lead him\u003c/li\u003e\n\u003cli\u003eReagan was an FBI informer for McCarthyism investigations\u003c/li\u003e\n\u003cli\u003eReagan was the first two-term president since 1961, was able to maintain more power compared to others\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;The Great Communicator\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"reagan-politics\"\u003eReagan politics\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;Government isn\u0026rsquo;t the solution to the problem, its the problem.\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ewished for limited politics\u003c/li\u003e\n\u003cli\u003estates rights\u003c/li\u003e\n\u003cli\u003econdemned welfare and \u0026ldquo;welfare cheats\u0026rdquo; (the undertone of racist appeal)\u003c/li\u003e\n\u003cli\u003eEvangelical undertones, family values, moral majority\u003c/li\u003e\n\u003cli\u003eAgainst affirmative action\u003c/li\u003e\n\u003cli\u003eSupply-side economics: \u0026ldquo;getting rid of taxes will allow more people to spend\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eAnti-Soviet rhetoric\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eCreates the largest increase in welfare spending, gutting about $1.5Bn.\u003c/p\u003e\n\u003ch2 id=\"reagan-policy-changes\"\u003eReagan policy changes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eLowering taxes: 70% of tax to 28% of taxes\u003c/li\u003e\n\u003cli\u003eIncrease defense budget: 1 trillion to 3 trillion\u003c/li\u003e\n\u003cli\u003eRising inequality, 1% controlled 40% of wealth (double from the 1970s)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"reagan-foreign-policy\"\u003eReagan Foreign Policy\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhronald_raegan/\"\u003eRonald Reagan\u003c/a\u003e creates the largest military build-up in history (larger than Korea and Vietnam.)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eReasserted Command-in-Chief abilities\u003c/li\u003e\n\u003cli\u003eCreates the National Security Council (for whom the )\u003c/li\u003e\n\u003cli\u003eComitted the US to supporting the anti-Marxist insurrections around the world\u003c/li\u003e\n\u003cli\u003eCredited with falling the USSR\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"supreme-court-interview-process\"\u003eSupreme Court Interview Process\u003c/h2\u003e\n\u003cp\u003eA new interview process for the supreme court designed by \u003ca href=\"/posts/kbhronald_raegan/\"\u003eRonald Reagan\u003c/a\u003e, creating an extensive process to vet conservative. Reagan swapped out 50% of the Federal judicial process.\u003c/p\u003e\n\u003ch2 id=\"reagan-s-legacy\"\u003eReagan\u0026rsquo;s Legacy\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eInflation dropped\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eUSSR Collapse\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eMilitary complex expanded\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eIncomes rose\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eInequality widened\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWelfare slashed\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eDebt\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eConcentrated power in the white house\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eCentralized conservative agenda\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhronald_raegan/","tags":null,"title":"Ronald Reagan"},{"categories":null,"contents":"The Rosa Parks bus incident is the instigator which needed to act on an issue to challenge the civil rights movement.\nShe participated in many civil rights agitations, and became the instigator .\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhrosa_parks/\"\u003eRosa Parks\u003c/a\u003e bus incident is the instigator which needed to act on an issue to challenge the civil rights movement.\u003c/p\u003e\n\u003cp\u003eShe participated in many civil rights agitations, and became the instigator .\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrosa_parks/","tags":null,"title":"Rosa Parks"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhroseta/","tags":null,"title":"roseta"},{"categories":null,"contents":"Rosetta is a set of physical-based protein folding models.\nprotein binding with Rosetta check a protein surface check how protein side-chains interact with the binding surface peptide binding with Rosetta The difficulty with this is that we don\u0026rsquo;t know what the overall tertiary structure of a group of peptides are; unlike whole protein binding.\nsequence-specific DNA binding ???\nmore! You take something like a trimer; you shove a peptide between each \u0026ldquo;point\u0026rdquo;, and boom structal change to a quadromer\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrosetta/\"\u003eRosetta\u003c/a\u003e is a set of physical-based protein folding models.\u003c/p\u003e\n\u003ch2 id=\"protein-binding-with-rosetta--kbhrosetta-dot-md\"\u003eprotein binding with \u003ca href=\"/posts/kbhrosetta/\"\u003eRosetta\u003c/a\u003e\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003echeck a protein surface\u003c/li\u003e\n\u003cli\u003echeck how protein side-chains interact with the binding surface\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"peptide-binding-with-rosetta--kbhrosetta-dot-md\"\u003epeptide binding with \u003ca href=\"/posts/kbhrosetta/\"\u003eRosetta\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eThe difficulty with this is that we don\u0026rsquo;t know what the overall tertiary structure of a group of peptides are; unlike whole \u003ca href=\"#protein-binding-with-rosetta--kbhrosetta-dot-md\"\u003eprotein binding\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"sequence-specific-dna-binding\"\u003esequence-specific DNA binding\u003c/h2\u003e\n\u003cp\u003e???\u003c/p\u003e\n\u003ch2 id=\"more\"\u003emore!\u003c/h2\u003e\n\u003cp\u003eYou take something like a trimer; you shove a peptide between each \u0026ldquo;point\u0026rdquo;, and boom structal change to a quadromer\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrosetta/","tags":null,"title":"Rosetta"},{"categories":null,"contents":"RoseTTAFold2 is a three-track folding tool, which also handles multimer!\ninputs: amino acid sequence + CHEMICAL structure (WOAH! how?) \u0026ldquo;RF2 all-atom embedding\u0026rdquo; fold! The model does really well!\napplication: de-novo luciferase design come up with the correct shaped scaffolds use old Rosetta to jam a residue sequence into the scaffold refold application: RoseTTAFold2 in-painting Train the model to recover the missing bits of sequence from the overall structure (i.e. training backwards), and\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrosettafold2/\"\u003eRoseTTAFold2\u003c/a\u003e is a three-track folding tool, which also handles multimer!\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003einputs: amino acid sequence + CHEMICAL structure (WOAH! how?)\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;RF2 all-atom embedding\u0026rdquo;\u003c/li\u003e\n\u003cli\u003efold!\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThe model does really well!\u003c/p\u003e\n\u003ch2 id=\"application-de-novo-luciferase-design\"\u003eapplication: de-novo luciferase design\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ecome up with the correct shaped scaffolds\u003c/li\u003e\n\u003cli\u003euse old \u003ca href=\"/posts/kbhrosetta/\"\u003eRosetta\u003c/a\u003e to jam a residue sequence into the scaffold\u003c/li\u003e\n\u003cli\u003erefold\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"application-rosettafold2--kbhrosettafold2-dot-md--in-painting\"\u003eapplication: \u003ca href=\"/posts/kbhrosettafold2/\"\u003eRoseTTAFold2\u003c/a\u003e in-painting\u003c/h2\u003e\n\u003cp\u003eTrain the model to recover the missing bits of sequence from the overall structure (i.e. training backwards), and\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrosettafold2/","tags":null,"title":"RoseTTAFold2"},{"categories":null,"contents":"On the dynamics of Tuning Forks. (Rossing, Russell, and Brown 1992)\nCharacterizing Tuning Forks Aluminum, tines 10mm apart. Four main groups of vibration:\nSymmetrical In-Plane Antisymmetrical In-Plane Symmetrical Out-Of-Plane Antisymmetrical Out-Of-Plane (a) and (c) are in the first group; (b) is in the second group, where the fork just warps.\nDeriving Tuning Forks\u0026rsquo; Frequency As per before, we can treat tuning forks acting in clang and fundamental modes as a good\u0026rsquo;ol fashioned cantilever beam.\nThe frequency action of a cantilever beam is defined as follows:\nOtherwise, for asymmetric modes, we can use the same exact expression but with uniform rods unfixed at either end:\nNote that density is not uniform at this point (because the bottom handle-y bit.)\n","html":"\u003cp\u003eOn the dynamics of \u003ca href=\"/posts/kbhtuning_forks/\"\u003eTuning Fork\u003c/a\u003es. (\u003ca href=\"#citeproc_bib_item_1\"\u003eRossing, Russell, and Brown 1992\u003c/a\u003e)\u003c/p\u003e\n\u003ch2 id=\"characterizing-tuning-forks\"\u003eCharacterizing Tuning Forks\u003c/h2\u003e\n\u003cp\u003eAluminum, tines 10mm apart. Four main groups of vibration:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eSymmetrical In-Plane\u003c/li\u003e\n\u003cli\u003eAntisymmetrical In-Plane\u003c/li\u003e\n\u003cli\u003eSymmetrical Out-Of-Plane\u003c/li\u003e\n\u003cli\u003eAntisymmetrical Out-Of-Plane\u003c/li\u003e\n\u003c/ol\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-20_22-49-48_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003e(a) and (c) are in the first group; (b) is in the second group, where the fork just warps.\u003c/p\u003e\n\u003ch2 id=\"deriving-tuning-forks-frequency\"\u003eDeriving Tuning Forks\u0026rsquo; Frequency\u003c/h2\u003e\n\u003cp\u003eAs per before, we can treat tuning forks acting in clang and fundamental modes as a good\u0026rsquo;ol fashioned \u003ca href=\"/posts/kbhcantilever_beam/\"\u003ecantilever beam\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThe frequency action of a \u003ca href=\"/posts/kbhcantilever_beam/\"\u003ecantilever beam\u003c/a\u003e is defined as follows:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-20_23-01-55_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eOtherwise, for asymmetric modes, we can use the same exact expression but with uniform rods unfixed at either end:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-20_23-11-43_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eNote that density is not uniform at this point (because the bottom handle-y bit.)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrossing_1990/","tags":null,"title":"Rossing 1990"},{"categories":null,"contents":"total kinetic energy \\begin{equation} KE_{rigid} = \\frac{1}{2} M{V_{cm}}^2 + \\frac{1}{2} I_{CM}{\\omega_{CM}}^2 \\end{equation}\ntorque from gravity For even non rigid bodies, the following follows:\n\\begin{equation} \\vec{\\tau}_g = \\vec{R}_{CM} \\times M\\vec{g} \\end{equation}\nActually, this follows for any \\(f\\) (like \\(g\\)) evenly applied across point masses.\npotential energy \\begin{equation} \\Delta PE_g = mg\\Delta h \\end{equation}\nwhere, \\(\\Delta h\\) is the travel of center of mass. Regardless of whether or not its point.\n","html":"\u003ch2 id=\"total-kinetic-energy\"\u003etotal kinetic energy\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nKE_{rigid} = \\frac{1}{2} M{V_{cm}}^2 + \\frac{1}{2} I_{CM}{\\omega_{CM}}^2\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"torque-from-gravity\"\u003etorque from gravity\u003c/h2\u003e\n\u003cp\u003eFor even non rigid bodies, the following follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\vec{\\tau}_g = \\vec{R}_{CM} \\times M\\vec{g}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eActually, this follows for any \\(f\\) (like \\(g\\)) evenly applied across point masses.\u003c/p\u003e\n\u003ch2 id=\"potential-energy\"\u003epotential energy\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\Delta PE_g = mg\\Delta h\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\Delta h\\) is the \u003cem\u003etravel of center of mass\u003c/em\u003e. Regardless of whether or not its point.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrotational_energy/","tags":null,"title":"rotational energy theorem"},{"categories":null,"contents":"Rural Electrification Administration create electrification throughout cities. Most of American infrastructure still 1930s.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrural_electrification_administration/\"\u003eRural Electrification Administration\u003c/a\u003e create electrification throughout cities. Most of American infrastructure still 1930s.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrural_electrification_administration/","tags":null,"title":"Rural Electrification Administration"},{"categories":null,"contents":"Observations from studying the comedian Russel Howard.\nStretching analogies Using language/motion/figure do describe something on the opposite end of the spectrum Take, for instance, age: 5Y/O: \u0026ldquo;cheers mum, wasen\u0026rsquo;t on my to-do list\u0026rdquo; A surprisingly sentimental dog: \u0026ldquo;because when I wake up tomorrow I want to see you, and I want to go for a lovely walk\u0026rdquo; Large motions + deadpan after Endless extrapolations of a normal setup: setup: Russian hackers were controlling people\u0026rsquo;s toys; punchline: \u0026ldquo;5 men were dildo\u0026rsquo;d to death, we don\u0026rsquo;t have a recording but here are their final words \u0026mdash; \u0026lsquo;oh yeaaah\u0026rsquo;, \u0026lsquo;oh fuck yeaaah\u0026rsquo;\u0026rdquo; Setup: gweneth paltro Punchline: \u0026ldquo;put an egg up there, you will feel more femenine. no! you will feel like a chicken\u0026rdquo;\nMultiple use of setups: \u0026ldquo;happy birthday too you\u0026rdquo; Peach ","html":"\u003cp\u003eObservations from studying the comedian \u003ca href=\"/posts/kbhrussel_howard/\"\u003eRussel Howard\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eStretching analogies\u003c/li\u003e\n\u003cli\u003eUsing language/motion/figure do describe something on the opposite end of the spectrum\n\u003cul\u003e\n\u003cli\u003eTake, for instance, age: 5Y/O: \u0026ldquo;cheers mum, wasen\u0026rsquo;t on my to-do list\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eA surprisingly sentimental dog: \u0026ldquo;because when I wake up tomorrow I want to see you, and I want to go for a lovely walk\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eLarge motions + deadpan after\u003c/li\u003e\n\u003cli\u003eEndless extrapolations of a normal setup: setup: Russian hackers were controlling people\u0026rsquo;s toys; punchline: \u0026ldquo;5 men were dildo\u0026rsquo;d to death, we don\u0026rsquo;t have a recording but here are their final words \u0026mdash; \u0026lsquo;oh yeaaah\u0026rsquo;, \u0026lsquo;oh fuck yeaaah\u0026rsquo;\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSetup: gweneth paltro\nPunchline: \u0026ldquo;put an egg up there, you will feel more femenine. no! you will feel like a chicken\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eMultiple use of setups: \u0026ldquo;happy birthday too you\u0026rdquo;\u003c/li\u003e\n\u003cli\u003ePeach\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhrussel_howard/","tags":null,"title":"Russel Howard"},{"categories":null,"contents":"DOI: 10.3389/fcomp.2021.624594\n(Sadeghian, Schaffer, and Zahorian 2021)\nOne-Liner Using a genetic algorithm, picked features to optimize fore; achieved \\(94\\%\\) with just MMSE data alone (ok like duh me too). Developed ASR tool to aid.\nNovelty Developed an ASR methodology for speech, complete with punctuations Used a genetic algorithm to do feature selection; NNs performed worse because \u0026ldquo;space is smaller???\u0026rdquo; Notable Methods Used a GRU to insert punctuations The paper leveraged the nuke that is a bidirectional GRU, ATTENTION,\nKey Figs Fully automated ANN transcript does pretty well in terms of classifier AD/NL.\nNew Concepts fusion genetic algorithm MMSE Notes very confusing (too many things going on at once)\n","html":"\u003cp\u003eDOI: 10.3389/fcomp.2021.624594\u003c/p\u003e\n\u003cp\u003e(\u003ca href=\"#citeproc_bib_item_1\"\u003eSadeghian, Schaffer, and Zahorian 2021\u003c/a\u003e)\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eUsing a \u003ca href=\"/posts/kbhgenetic_algorithum/\"\u003egenetic algorithm\u003c/a\u003e, picked features to optimize fore; achieved \\(94\\%\\) with just \u003ca href=\"/posts/kbhmmse/\"\u003eMMSE\u003c/a\u003e data alone (ok like duh me too). Developed \u003ca href=\"/posts/kbhasr/\"\u003eASR\u003c/a\u003e tool to aid.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDeveloped an \u003ca href=\"/posts/kbhasr/\"\u003eASR\u003c/a\u003e methodology for speech, complete with punctuations\u003c/li\u003e\n\u003cli\u003eUsed a \u003ca href=\"/posts/kbhgenetic_algorithum/\"\u003egenetic algorithm\u003c/a\u003e to do feature selection; NNs performed worse because \u0026ldquo;space is smaller???\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003ch3 id=\"used-a-gru-to-insert-punctuations\"\u003eUsed a GRU to insert punctuations\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-44-59_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThe paper leveraged the nuke that is a bidirectional GRU, ATTENTION,\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_00-00-56_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eFully automated ANN transcript does pretty well in terms of classifier AD/NL.\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfusion/\"\u003efusion\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgenetic_algorithum/\"\u003egenetic algorithm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmmse/\"\u003eMMSE\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n\u003cp\u003every confusing (too many things going on at once)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsadeghian_2021/","tags":["ntj"],"title":"Sadeghian 2021"},{"categories":null,"contents":"Challenge of speech anonymization: cannot develop a model which both preserves speech features well but also effectively anonymizes the speech.\nMethodology Separate content and speech encoders Results in highly concentrated + effective speech content, but with very widespread voiceprint ","html":"\u003cp\u003eChallenge of speech anonymization: cannot develop a model which both preserves speech features well but also effectively anonymizes the speech.\u003c/p\u003e\n\u003ch2 id=\"methodology\"\u003eMethodology\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eSeparate content and speech encoders\u003c/li\u003e\n\u003cli\u003eResults in highly concentrated + effective speech content, but with very widespread voiceprint\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsaic_speech_anonomyzation/","tags":null,"title":"SAIC: Speech Anonomyzation"},{"categories":null,"contents":"Demo day No value add for demo-day winner Competition makes you want to prepare more \u0026ldquo;this much budget for an enriching experience\u0026rdquo; Mentor Conversations None yet\nIntegration Integration into soundscape Hiring Need help designing a PCB\n","html":"\u003ch2 id=\"demo-day\"\u003eDemo day\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eNo value add for demo-day winner\u003c/li\u003e\n\u003cli\u003eCompetition makes you want to prepare more\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;this much budget for an enriching experience\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"mentor-conversations\"\u003eMentor Conversations\u003c/h2\u003e\n\u003cp\u003eNone yet\u003c/p\u003e\n\u003ch2 id=\"integration\"\u003eIntegration\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eIntegration into soundscape\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"hiring\"\u003eHiring\u003c/h2\u003e\n\u003cp\u003eNeed help designing a PCB\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsalus_april_checkin/","tags":null,"title":"Salus April Checkin"},{"categories":null,"contents":"sample space \\(S\\) is the set of all possible outcomes of an experiment. It could be continuous or distinct.\nequally likely outcomes Some sample spaces have equally likely outcomes:\ncoin flip flipping two coins rolling a fair die If we have equally likely outcomes, \\(P(outcome)\\) = \\(\\frac{1}{S}\\).\nIf your sample space has equally likely outcomes, the probability is juts counting:\n\\begin{equation} P(E) = \\frac{count(E)}{count(S)} \\end{equation}\nWhenever you use this tool, you have to think about whether or not your outcomes are equally likely. For instance, the \u0026ldquo;sum of two dice rolling\u0026rdquo; is NOT equally likely.\nDistinct counting makes things equally likely.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsample_space/\"\u003esample space\u003c/a\u003e \\(S\\) is the set of all possible outcomes of an experiment. It could be continuous or distinct.\u003c/p\u003e\n\u003ch2 id=\"equally-likely-outcomes\"\u003eequally likely outcomes\u003c/h2\u003e\n\u003cp\u003eSome \u003ca href=\"/posts/kbhsample_space/\"\u003esample space\u003c/a\u003es have equally likely outcomes:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ecoin flip\u003c/li\u003e\n\u003cli\u003eflipping two coins\u003c/li\u003e\n\u003cli\u003erolling a fair die\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf we have \u003ca href=\"#equally-likely-outcomes\"\u003eequally likely outcomes\u003c/a\u003e, \\(P(outcome)\\) = \\(\\frac{1}{S}\\).\u003c/p\u003e\n\u003cp\u003eIf your sample space has \u003ca href=\"#equally-likely-outcomes\"\u003eequally likely outcomes\u003c/a\u003e, the probability is juts counting:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(E) = \\frac{count(E)}{count(S)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhenever you use this tool, you have to think about whether or not your outcomes are \u003cstrong\u003eequally likely\u003c/strong\u003e. For instance, the \u0026ldquo;sum of two dice rolling\u0026rdquo; is \u003cstrong\u003eNOT\u003c/strong\u003e equally likely.\u003c/p\u003e\n\u003cp\u003eDistinct \u003ca href=\"/posts/kbhcounting/\"\u003ecounting\u003c/a\u003e makes things equally likely.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsample_space/","tags":null,"title":"sample space"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsars_cov2/","tags":null,"title":"SARS-COV2"},{"categories":null,"contents":"SARS-COV2\ntraditional stain techniques to analyze the epitopes being targeted uses cyro-EM structural analysis to figure structural points of neutralization predict correct antibodies binding to force certain structures to neutralize covid-19 analyze mRNA-vax elicited antibodies to see similarity between those that are useful predicted in 3) Study identified three epitopes: C1520, C1791, C1717, which changes the structure/activity of all three variants of concern as identified using methods above, and are inpervious to the mutation to the main supersite.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsars_cov2/\"\u003eSARS-COV2\u003c/a\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003etraditional stain techniques to analyze the \u003ca href=\"/posts/kbhepitophs/\"\u003eepitopes\u003c/a\u003e being targeted\u003c/li\u003e\n\u003cli\u003euses \u003ca href=\"/posts/kbhcyro_em/\"\u003ecyro-EM\u003c/a\u003e structural analysis to figure structural points of neutralization\u003c/li\u003e\n\u003cli\u003epredict correct antibodies binding to force certain structures to neutralize covid-19\u003c/li\u003e\n\u003cli\u003eanalyze mRNA-vax elicited antibodies to see similarity between those that are useful predicted in 3)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eStudy identified three \u003ca href=\"/posts/kbhepitophs/\"\u003eepitopes\u003c/a\u003e: C1520, C1791, C1717, which changes the structure/activity of all three variants of concern as identified using methods above, and are inpervious to the mutation to the main \u003ca href=\"/posts/kbhspersite/\"\u003esupersite\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsars_cov2_structural_analysis/","tags":null,"title":"SARS-COV2 Structural Analysis"},{"categories":null,"contents":"Sarsa (Lambda) is SARSA with Eligibility Traces (\\(\\lambda\\)).\nPrevious approaches to deal with Partially Observable Markov Decision Process:\nmemory-based state estimation (beliefs) special planning methods Key question: Can we use MDP reinforcement learning to deal with POMDPs?\nBackground Recall MDP SARSA:\n\\begin{equation} Q(s,a) \\leftarrow Q(s,a) + \\alpha \\qty [(r + \\gamma Q(s\u0026rsquo;, a\u0026rsquo;)) - Q(s,a)] \\end{equation}\nRecall that, sparse rewards with SARSA can take a long time to learn because it takes time to backpropgate.\nHence, we use Eligibility Traces, which keeps track of what\u0026rsquo;s \u0026ldquo;eligible\u0026rdquo; for updates:\nlet \\(\\lambda\\) be some decay parameter, we have:\n\\begin{equation} \\delta = r + \\gamma Q(s\u0026rsquo;,a\u0026rsquo;) - Q(s,a) \\end{equation}\nand, we can write:\n\\begin{equation} Q(s,a) \\leftarrow Q(s,a) + \\lambda \\delta N(s,a) \\end{equation}\nwhere by the visit counts are discounted such that:\n\\begin{equation} N(s,a) \\leftarrow \\gamma \\lambda N(s,a) \\end{equation}\nWorry Inability of fully observing the state seems to invalidate the point of \\(Q\\) learning, SARSA, etc.\nApplying Eligibility Traces to POMDPs Instead of \\(N(s,a)\\) a visitation count, we initialize some \\(\\eta(x,a)\\) for observation + action and work on the rest of it in the same way.\nAt each step, we sample some reward \\(r_{t+1}\\) and observation \\(x_{t+1}\\) (and remember our current \\(r_{t}, x_{t}\\)). Then in which case:\n\\begin{equation} \\eta(x_{t},a_{t}) = 1\\ \\text{(reset decay)} \\end{equation}\nand\n\\begin{equation} \\forall (x\\neq x_{t}, a \\neq a_{t}): \\eta(x,a) = \\gamma \\lambda \\eta_{t-1}(x,a)\\ \\text{(decay others)} \\\\ \\end{equation}\nUsing the new \\(\\eta\\) values, we update \\(Q(x, a)\\) in the usualish manner:\n\\begin{equation} \\delta_{t} := r_{t} + \\gamma Q (x_{t+1}, a_{t+1}) - Q(x_{t}, a_{t}) \\end{equation}\n\\begin{equation} \\forall (x,a): Q(x,a) = Q(x,a)+a \\delta_{t} \\eta(x,a) \\end{equation}\nall values of \\(\\eta\\) are established to \\(0\\) per episode.\nNotably, we formulate \\(x\\) as a tuple of TWO observations in the past\u0026mdash;meaning we have a single step of memory in the past and optimise over those.\nThis requires no belief propagation!! And note how the \u0026ldquo;eligibility\u0026rdquo; of each observation decays over time, such that the influence of that observation decays until we see the corresponding observation again.\naliasing an important failure of this is aliasing\u0026ndash;where you maybe in one of two different places that has similar properties observationally, but taking actions at those states results in very different places.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsarsa_lambda/\"\u003eSarsa (Lambda)\u003c/a\u003e is \u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/#sarsa\"\u003eSARSA\u003c/a\u003e with \u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/#eligibility-traces\"\u003eEligibility Traces\u003c/a\u003e (\\(\\lambda\\)).\u003c/p\u003e\n\u003cp\u003ePrevious approaches to deal with \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePartially Observable Markov Decision Process\u003c/a\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ememory-based state estimation (\u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003es)\u003c/li\u003e\n\u003cli\u003especial planning methods\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eKey question: \u003cstrong\u003eCan we use MDP \u003ca href=\"/posts/kbhreinforcement_learning/\"\u003ereinforcement learning\u003c/a\u003e to deal with \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es?\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003cp\u003eRecall MDP \u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/#sarsa\"\u003eSARSA\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(s,a) \\leftarrow Q(s,a) + \\alpha \\qty [(r + \\gamma Q(s\u0026rsquo;, a\u0026rsquo;)) - Q(s,a)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that, sparse rewards with \u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/#sarsa\"\u003eSARSA\u003c/a\u003e can take a long time to learn because it takes time to backpropgate.\u003c/p\u003e\n\u003cp\u003eHence, we use \u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/#eligibility-traces\"\u003eEligibility Traces\u003c/a\u003e, which keeps track of what\u0026rsquo;s \u0026ldquo;eligible\u0026rdquo; for updates:\u003c/p\u003e\n\u003cp\u003elet \\(\\lambda\\) be some decay parameter, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\delta = r + \\gamma Q(s\u0026rsquo;,a\u0026rsquo;) - Q(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand, we can write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nQ(s,a) \\leftarrow Q(s,a) + \\lambda \\delta N(s,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere by the visit counts are discounted such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nN(s,a) \\leftarrow \\gamma \\lambda N(s,a)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"worry\"\u003eWorry\u003c/h2\u003e\n\u003cp\u003eInability of fully observing the state seems to invalidate the point of \\(Q\\) learning, \u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/#sarsa\"\u003eSARSA\u003c/a\u003e, etc.\u003c/p\u003e\n\u003ch2 id=\"applying-eligibility-traces--kbhmodel-free-reinforcement-learning-dot-md--to-pomdp--kbhpartially-observable-markov-decision-process-dot-md--s\"\u003eApplying \u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/#eligibility-traces\"\u003eEligibility Traces\u003c/a\u003e to \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es\u003c/h2\u003e\n\u003cp\u003eInstead of \\(N(s,a)\\) a visitation count, we initialize some \\(\\eta(x,a)\\) for observation + action and work on the rest of it in the same way.\u003c/p\u003e\n\u003cp\u003eAt each step, we sample some reward \\(r_{t+1}\\) and observation \\(x_{t+1}\\) (and remember our current \\(r_{t}, x_{t}\\)). Then in which case:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\eta(x_{t},a_{t}) = 1\\ \\text{(reset decay)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\forall (x\\neq x_{t}, a \\neq a_{t}): \\eta(x,a) = \\gamma \\lambda \\eta_{t-1}(x,a)\\ \\text{(decay others)} \\\\\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eUsing the new \\(\\eta\\) values, we update \\(Q(x, a)\\) in the usualish manner:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\delta_{t} := r_{t} + \\gamma Q (x_{t+1}, a_{t+1}) - Q(x_{t}, a_{t})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\forall (x,a): Q(x,a) = Q(x,a)+a \\delta_{t} \\eta(x,a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eall values of \\(\\eta\\) are established to \\(0\\) per episode.\u003c/p\u003e\n\u003cp\u003eNotably, we formulate \\(x\\) as a tuple of \u003cstrong\u003e\u003cstrong\u003eTWO\u003c/strong\u003e\u003c/strong\u003e observations in the past\u0026mdash;meaning we have a single step of memory in the past and optimise over those.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eThis requires no belief propagation\u003c/strong\u003e!! And note how the \u0026ldquo;eligibility\u0026rdquo; of each observation decays over time, such that the influence of that observation decays until we see the corresponding observation again.\u003c/p\u003e\n\u003ch2 id=\"aliasing\"\u003ealiasing\u003c/h2\u003e\n\u003cp\u003ean important failure of this is \u003ca href=\"#aliasing\"\u003ealiasing\u003c/a\u003e\u0026ndash;where you maybe in one of two different places that has similar properties observationally, but taking actions at those states results in very different places.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsarsa_lambda/","tags":null,"title":"Sarsa (Lambda)"},{"categories":null,"contents":"Big problem: curse of dimensionality and the curse of history.\nPBVI and HSVI tries to sample the belief simplex generally. But instead we should try to sample OPTIMAL REACHABLE SET.\nBackground Recall one-step lookahead in POMDP. The difficulty here is that the sum over all of the alpha-vectors is still very hard. So, in PBVI, we only do this to a small set of beliefs\nSARSOP sample \\(R^{*}\\) backup prune Initialization choose an initial belief, action, and observation using \u0026ldquo;suitable heuristics\u0026rdquo;. Initialize a set of alpha vectors corresponding to this belief.\nSampling compute \\(b\u0026rsquo; = update(b,a,o)\\) add node \\(b\u0026rsquo;\\) to the tree So far, this is just PBVI, HSVI. The point is that we only want to update the reachable set.\nTo do this, we now take the new \\(b\u0026rsquo;\\), we give an upper bound via FIB, and a lower bound with blind lower bound over the alpha vectors you already got.\nNow:\nwhere \\(\\mathcal{R}^{*}\\) is a reachable space tree set from \\(b_0\\).\nBackup PBVI Backup on the beliefs you sampled to update your alpha vectors.\nPruning We can prune anything that\u0026rsquo;s suboptimal: every step, we perform alpha vector pruning at every step.\nLimitations HSVI is better at handling systems with lower uncertainty.\nDoes not make an attempt at challenges of dimensionality Make unproven theoretical claims Don\u0026rsquo;t compare to domain contraction Compare algorithm to a single alternative Compared to continuous state spaces Subsection headings ","html":"\u003cp\u003eBig problem: curse of \u003cstrong\u003edimensionality\u003c/strong\u003e and the curse of \u003cstrong\u003ehistory\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e and \u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e tries to sample the belief simplex generally. But instead we should try to sample \u003cstrong\u003eOPTIMAL REACHABLE SET\u003c/strong\u003e.\u003c/p\u003e\n\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003cp\u003eRecall \u003ca href=\"/posts/kbhalpha_vector/#one-step-lookahead-in-pomdp\"\u003eone-step lookahead in POMDP\u003c/a\u003e. The difficulty here is that the sum over all of the alpha-vectors is still very hard. So, in \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e, we only do this to a small set of beliefs\u003c/p\u003e\n\u003ch2 id=\"sarsop\"\u003eSARSOP\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003esample \\(R^{*}\\)\u003c/li\u003e\n\u003cli\u003ebackup\u003c/li\u003e\n\u003cli\u003eprune\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"initialization\"\u003eInitialization\u003c/h3\u003e\n\u003cp\u003echoose an initial belief, action, and observation using \u0026ldquo;suitable heuristics\u0026rdquo;. Initialize a set of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es corresponding to this belief.\u003c/p\u003e\n\u003ch3 id=\"sampling\"\u003eSampling\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ecompute \\(b\u0026rsquo; = update(b,a,o)\\)\u003c/li\u003e\n\u003cli\u003eadd node \\(b\u0026rsquo;\\) to the tree\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSo far, this is just \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e, \u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e. The point is that we only want to update the reachable set.\u003c/p\u003e\n\u003cp\u003eTo do this, we now take the new \\(b\u0026rsquo;\\), we give an upper bound via \u003ca href=\"/posts/kbhfast_informed_bound/\"\u003eFIB\u003c/a\u003e, and a lower bound with \u003ca href=\"/posts/kbhblind_lower_bound/\"\u003eblind lower bound\u003c/a\u003e over the alpha vectors you already got.\u003c/p\u003e\n\u003cp\u003eNow:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-01-27_22-11-41_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ewhere \\(\\mathcal{R}^{*}\\) is a reachable space tree set from \\(b_0\\).\u003c/p\u003e\n\u003ch3 id=\"backup\"\u003eBackup\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhpoint_based_value_iteration/#pbvi-backup\"\u003ePBVI Backup\u003c/a\u003e on the beliefs you sampled to update your \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es.\u003c/p\u003e\n\u003ch3 id=\"pruning\"\u003ePruning\u003c/h3\u003e\n\u003cp\u003eWe can prune anything that\u0026rsquo;s suboptimal: every step, we perform \u003ca href=\"/posts/kbhalpha_vector/#id-a11af4cf-7e36-4b3f-876f-e6a26cf6817e-alpha-vector-pruning\"\u003ealpha vector pruning\u003c/a\u003e at every step.\u003c/p\u003e\n\u003ch2 id=\"limitations\"\u003eLimitations\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e is better at handling systems with lower uncertainty.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eDoes not make an attempt at challenges of dimensionality\u003c/li\u003e\n\u003cli\u003eMake unproven theoretical claims\u003c/li\u003e\n\u003cli\u003eDon\u0026rsquo;t compare to domain contraction\u003c/li\u003e\n\u003cli\u003eCompare algorithm to a single alternative\u003c/li\u003e\n\u003cli\u003eCompared to continuous state spaces\u003c/li\u003e\n\u003cli\u003eSubsection headings\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsarsop/","tags":null,"title":"SARSOP"},{"categories":null,"contents":" adding emails need sequential typing hitting enter should move on to the next page date selection \u0026ldquo;Monday next week\u0026rdquo; doesn\u0026rsquo;t NLP reading calendar output isn\u0026rsquo;t sorted bugs reading other people\u0026rsquo;s calendars isn\u0026rsquo;t working\nneed some information about that the heck is actually happening on the scheduling\nshow other people\u0026rsquo;s overall availibliity in the scheduling page\nthe weight number doesn\u0026rsquo;t make sense\u0026mdash;correct alt-text and make the number work\nhave the idea of a \u0026ldquo;meeting owner\u0026rdquo;, and we only reach out to them to confirm final date + have an ability to add a message; also allow the message owner to change to alternate schedules on that date\nalso scheduling multiple people is broken. ah. ","html":"\u003cul\u003e\n\u003cli\u003eadding emails need sequential typing\u003c/li\u003e\n\u003cli\u003ehitting enter should move on to the next page\u003c/li\u003e\n\u003cli\u003edate selection\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Monday next week\u0026rdquo; doesn\u0026rsquo;t NLP\u003c/li\u003e\n\u003cli\u003ereading calendar output isn\u0026rsquo;t sorted\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"bugs\"\u003ebugs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ereading other people\u0026rsquo;s calendars isn\u0026rsquo;t working\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eneed some information about that the heck is actually happening on the scheduling\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eshow other people\u0026rsquo;s overall availibliity in the scheduling page\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ethe weight number doesn\u0026rsquo;t make sense\u0026mdash;correct alt-text and make the number work\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ehave the idea of a \u0026ldquo;meeting owner\u0026rdquo;, and we only reach out to them to confirm final date + have an ability to add a message; also allow the message owner to change to alternate schedules on that date\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"also\"\u003ealso\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003escheduling multiple people is broken. ah.\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhscalander_notes-1/","tags":null,"title":"scalander notes"},{"categories":null,"contents":" adding emails need sequential typing hitting enter should move on to the next page date selection \u0026ldquo;Monday next week\u0026rdquo; doesn\u0026rsquo;t NLP reading calendar output isn\u0026rsquo;t sorted bugs reading other people\u0026rsquo;s calendars isn\u0026rsquo;t working need some information about that the heck is actually happening on the scheduling show other people\u0026rsquo;s overall availibliity in the scheduling page idea of \u0026ldquo;budget\u0026rdquo; next actions scheduling multiple people is broken. ah. have the idea of a \u0026ldquo;meeting owner\u0026rdquo;, and we only reach out to them to confirm final date + have an ability to add a message (with templates); also allow the message owner to change to alternate schedules on that date the weight number doesn\u0026rsquo;t make sense\u0026mdash;correct alt-text and make the number work ","html":"\u003cul\u003e\n\u003cli\u003eadding emails need sequential typing\u003c/li\u003e\n\u003cli\u003ehitting enter should move on to the next page\u003c/li\u003e\n\u003cli\u003edate selection\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Monday next week\u0026rdquo; doesn\u0026rsquo;t NLP\u003c/li\u003e\n\u003cli\u003ereading calendar output isn\u0026rsquo;t sorted\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"bugs\"\u003ebugs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ereading other people\u0026rsquo;s calendars isn\u0026rsquo;t working\u003c/li\u003e\n\u003cli\u003eneed some information about that the heck is actually happening on the scheduling\u003c/li\u003e\n\u003cli\u003eshow other people\u0026rsquo;s overall availibliity in the scheduling page\u003c/li\u003e\n\u003cli\u003eidea of \u0026ldquo;budget\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"next-actions\"\u003enext actions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003escheduling multiple people is broken. ah.\u003c/li\u003e\n\u003cli\u003ehave the idea of a \u0026ldquo;meeting owner\u0026rdquo;, and we only reach out to them to confirm final date + have an ability to add a message (with templates); also allow the message owner to change to alternate schedules on that date\u003c/li\u003e\n\u003cli\u003ethe weight number doesn\u0026rsquo;t make sense\u0026mdash;correct alt-text and make the number work\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhscalander_notes-2/","tags":null,"title":"scalander notes"},{"categories":null,"contents":" adding emails need sequential typing hitting enter should move on to the next page date selection \u0026ldquo;Monday next week\u0026rdquo; doesn\u0026rsquo;t NLP reading calendar output isn\u0026rsquo;t sorted bugs reading other people\u0026rsquo;s calendars isn\u0026rsquo;t working need some information about that the heck is actually happening on the scheduling show other people\u0026rsquo;s overall availibliity in the scheduling page idea of \u0026ldquo;budget\u0026rdquo; next actions scheduling multiple people is broken. ah. have the idea of a \u0026ldquo;meeting owner\u0026rdquo;, and we only reach out to them to confirm final date + have an ability to add a message (with templates); also allow the message owner to change to alternate schedules on that date (have default notifications in the iCal invite) somehow remind people after the fact that the meeting is scheduled tie in evite abilities (this will be nice for your party, etc.) event planning built in? type in a budget and find vendors for the party. age range? the weight number doesn\u0026rsquo;t make sense\u0026mdash;correct alt-text and make the number work home page \u0026ldquo;the front page looks like that for an OB-GYN\u0026rdquo; \u0026mdash; feels like ZocDoc it is also not that fun maybe some kind of memphis design ","html":"\u003cul\u003e\n\u003cli\u003eadding emails need sequential typing\u003c/li\u003e\n\u003cli\u003ehitting enter should move on to the next page\u003c/li\u003e\n\u003cli\u003edate selection\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Monday next week\u0026rdquo; doesn\u0026rsquo;t NLP\u003c/li\u003e\n\u003cli\u003ereading calendar output isn\u0026rsquo;t sorted\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"bugs\"\u003ebugs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ereading other people\u0026rsquo;s calendars isn\u0026rsquo;t working\u003c/li\u003e\n\u003cli\u003eneed some information about that the heck is actually happening on the scheduling\u003c/li\u003e\n\u003cli\u003eshow other people\u0026rsquo;s overall availibliity in the scheduling page\u003c/li\u003e\n\u003cli\u003eidea of \u0026ldquo;budget\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"next-actions\"\u003enext actions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003escheduling multiple people is broken. ah.\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003ehave the idea of a \u0026ldquo;meeting owner\u0026rdquo;, and we only reach out to them to confirm final date + have an ability to add a message (with templates); also allow the message owner to change to alternate schedules on that date (have default notifications in the iCal invite)\u003c/strong\u003e\u003c/strong\u003e\n\u003cul\u003e\n\u003cli\u003esomehow remind people after the fact that the meeting is scheduled\u003c/li\u003e\n\u003cli\u003etie in evite abilities (this will be nice for your party, etc.)\n\u003cul\u003e\n\u003cli\u003eevent planning built in? type in a budget and find vendors for the party.\u003c/li\u003e\n\u003cli\u003eage range?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003ethe weight number doesn\u0026rsquo;t make sense\u0026mdash;correct alt-text and make the number work\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"home-page\"\u003ehome page\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;the front page looks like that for an OB-GYN\u0026rdquo; \u0026mdash; feels like ZocDoc\u003c/li\u003e\n\u003cli\u003eit is also not that fun\u003c/li\u003e\n\u003cli\u003emaybe some kind of memphis design\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhscalander_notes/","tags":null,"title":"scalander notes"},{"categories":null,"contents":"Scalar multiplication is the process of multiplying a scalar to an element in a set.\nconstituents A set \\(V\\) Some \\(\\lambda \\in \\mathbb{F}\\) Each \\(v \\in V\\) requirements scalar multiplication is defined by a function that results in \\(\\lambda v \\in V\\) (maps back to the space!) to each \\(\\lambda \\in \\mathbb{F}\\) and each \\(v \\in V\\).\nadditional information See also scalar multiplication in \\(\\mathbb{F}^n\\).\n","html":"\u003cp\u003eScalar multiplication is the process of multiplying a scalar to an element in a set.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA set \\(V\\)\u003c/li\u003e\n\u003cli\u003eSome \\(\\lambda \\in \\mathbb{F}\\)\u003c/li\u003e\n\u003cli\u003eEach \\(v \\in V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e is defined by a function that results in \\(\\lambda v \\in V\\) (maps back to the space!) to each \\(\\lambda \\in \\mathbb{F}\\) and each \\(v \\in V\\).\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhlists_over_fields/#scalar-multiplication-in-mathbb-f-n\"\u003escalar multiplication in \\(\\mathbb{F}^n\\)\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhscalar_multiplication/","tags":null,"title":"scalar multiplication"},{"categories":null,"contents":"scheduling is the tool to figure out which thread can run. Because threads exist in different thread states:\nrunning blockde - waiting for an event like disk, network, etc. ready - able to run, but not on CPU yet a scheduler needs to do the task of ordering ready threads to run, moving running threads to ready when it has ran for enough time. Possible pathways:\nready =\u0026gt; running blocked =\u0026gt; running blocked =\u0026gt; ready =\u0026gt; running You can\u0026rsquo;t go from ready to blocked because you have to do something to know you are blocked.\nscheduling \u0026ldquo;ready\u0026rdquo; threads The following assumes one core.\nTradeoffs:\nminimize time to a useful result\u0026mdash;(assumption: a \u0026ldquo;useful result\u0026rdquo; = a thread blocking or completes) using resources efficiently (keeping cores/disks busy) fairness (multiple users / many jobs for one users) Typically, we focus on (1); approaches that maximize useful results quickly is unfair beacuse you are prioritizing. We can measure this based on \u0026ldquo;average completion time\u0026rdquo;: tracking the average time elapsed for a particular queue based on the start of scheduling that queue to the time when each thread ends.\nfirst-come first-serve keep all threads in ready in a queue run the first thread on the front until it finishes/it blocks for however long repeat Problem: a thread can run away with the entire system, accidentally, through infinite loops\nround robin keep all threads in a round robin each thread can run for a set amount of time called a time slice (10ms or so) if a thread terminates before that time, great; if a thread does not, we swap it off and put it to the end of the round robin Problem: what\u0026rsquo;s a good time slice?\ntoo small: the overhead of context switching is higher than the overhead of running the program too large: threads can monopolize cores, can\u0026rsquo;t handle user input, etc. Linux uses 4ms. Generally, you want 5-10ms range.\nYou can think about this as dividing each time slot by time slices, and add as fcfs\nshortest remaining processing time Run first the thread in queue that will finish the most quickly and run it fully to competition.\nIt gives preference to those that need it the least: a good side effect is that it gives preference to I/O Bound Thread first, so we can wait on them during disk operations while CPU Threads run after the I/O Bound Thread has ran.\nTHIS IS not implementable\u0026mdash;-we can\u0026rsquo;t build this beacuse we have to know which thread will finish the most quickly, which we can\u0026rsquo;t because you have to solve the halting problem to know.\nOur goal, then is to get as close as possible to the performance of SRPT.\nProblem:\nwe don\u0026rsquo;t know which one will finish the most quickly if we have many threads and one long-running thread, the long running thread won\u0026rsquo;t be able to run ever priority based scheduling Key idea: behavior tends to be consistent in a thread. We build multiple priority queues to address this.\npriority based scheduling is an approximation of SRPT, using the past performance of the thread to estimate the running time of the thread. Over time, threads will move between priority queues, and we run the topmost thread from the highest priority queue\nthreads that aren\u0026rsquo;t using much CPU stay in higher priority queue threads that are using much CPU gets bumped down to lower priority queues Similar to SRPT, this also has the good property of giving preference to those that need it the least: a good side effect is that it gives preference to I/O Bound Thread first, so we can wait on them during disk operations while CPU Threads run after the I/O Bound Thread has ran.\nimplement based on time slice usage a thread always enters in the highest priority queue\nif the thread uses all of its time slice and didn\u0026rsquo;t exit, bump them down a priority queue if a thread blocked before it used all of its time slice, bump them up a priority queue implement based on aggregate time used: fixing neglect a thread has a number for \u0026ldquo;how much time did you use on the CPU recently\u0026rdquo;? The priories are sorted by that value, and the smallest time use will be ran.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhscheduling/\"\u003escheduling\u003c/a\u003e is the tool to figure out which thread can run. Because \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003es exist in different \u003ca href=\"/posts/kbhprocess_control_block/#id-b4b86ccc-70f3-4d30-b437-2f5fff63b0e6-thread-state\"\u003ethread state\u003c/a\u003es:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003erunning\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eblockde\u003c/strong\u003e - waiting for an event like disk, network, etc.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eready\u003c/strong\u003e - able to run, but not on CPU yet\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ea scheduler needs to do the task of ordering ready threads to run, moving running threads to ready when it has ran for enough time. Possible pathways:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eready =\u0026gt; running\u003c/li\u003e\n\u003cli\u003eblocked =\u0026gt; running\u003c/li\u003e\n\u003cli\u003eblocked =\u0026gt; ready =\u0026gt; running\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eYou can\u0026rsquo;t go from \u003cstrong\u003eready\u003c/strong\u003e to \u003cstrong\u003eblocked\u003c/strong\u003e because you have to \u003cem\u003edo something\u003c/em\u003e to know you are blocked.\u003c/p\u003e\n\u003ch2 id=\"scheduling--kbhscheduling-dot-md--ready-threads\"\u003e\u003ca href=\"/posts/kbhscheduling/\"\u003escheduling\u003c/a\u003e \u0026ldquo;ready\u0026rdquo; threads\u003c/h2\u003e\n\u003cp\u003eThe following assumes one core.\u003c/p\u003e\n\u003cp\u003eTradeoffs:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eminimize time to a useful result\u0026mdash;(\u003cstrong\u003eassumption\u003c/strong\u003e: a \u0026ldquo;useful result\u0026rdquo; = a thread blocking or completes)\u003c/li\u003e\n\u003cli\u003eusing resources efficiently (keeping cores/disks busy)\u003c/li\u003e\n\u003cli\u003efairness (multiple users / many jobs for one users)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTypically, we focus on (1); approaches that maximize useful results quickly is unfair beacuse you are prioritizing. We can measure this based on \u0026ldquo;average completion time\u0026rdquo;: tracking the average time elapsed for a particular queue based on the start of scheduling that queue to the time when each thread ends.\u003c/p\u003e\n\u003ch3 id=\"first-come-first-serve\"\u003efirst-come first-serve\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ekeep all threads in ready in a \u003cstrong\u003equeue\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003erun the first thread on the front until it finishes/it blocks for however long\u003c/li\u003e\n\u003cli\u003erepeat\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003eProblem\u003c/strong\u003e: a thread can run away with the entire system, accidentally, through infinite loops\u003c/p\u003e\n\u003ch3 id=\"round-robin\"\u003eround robin\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ekeep all threads in a \u003cstrong\u003eround robin\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eeach thread can run for a set amount of time called a \u003ca href=\"#round-robin\"\u003etime slice\u003c/a\u003e (10ms or so)\u003c/li\u003e\n\u003cli\u003eif a thread terminates before that time, great; if a thread does not, we swap it off and put it to the end of the round robin\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003eProblem\u003c/strong\u003e: what\u0026rsquo;s a good \u003ca href=\"#round-robin\"\u003etime slice\u003c/a\u003e?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003etoo small: the overhead of context switching is higher than the overhead of running the program\u003c/li\u003e\n\u003cli\u003etoo large: threads can monopolize cores, can\u0026rsquo;t handle user input, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eLinux uses 4ms. Generally, you want 5-10ms range.\u003c/p\u003e\n\u003cp\u003eYou can think about this as dividing each time slot by time slices, and add as fcfs\u003c/p\u003e\n\u003ch3 id=\"shortest-remaining-processing-time\"\u003eshortest remaining processing time\u003c/h3\u003e\n\u003cp\u003eRun first the thread in queue that will finish the \u003cstrong\u003emost quickly\u003c/strong\u003e and run it \u003cstrong\u003efully to competition\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eIt \u003cstrong\u003egives preference to those that need it the least\u003c/strong\u003e: a good side effect is that it gives preference to \u003ca href=\"/posts/kbhprocess_control_block/#io-vs-dot-cpu-bound\"\u003eI/O Bound Thread\u003c/a\u003e first, so we can wait on them during disk operations while \u003ca href=\"/posts/kbhprocess_control_block/#io-vs-dot-cpu-bound\"\u003eCPU Thread\u003c/a\u003es run after the \u003ca href=\"/posts/kbhprocess_control_block/#io-vs-dot-cpu-bound\"\u003eI/O Bound Thread\u003c/a\u003e has ran.\u003c/p\u003e\n\u003cp\u003eTHIS IS \u003cstrong\u003enot implementable\u003c/strong\u003e\u0026mdash;-we can\u0026rsquo;t build this beacuse we have to know which thread will finish the most quickly, which we can\u0026rsquo;t because you have to solve the halting problem to know.\u003c/p\u003e\n\u003cp\u003eOur goal, then is to get as close as possible to the performance of \u003ca href=\"#shortest-remaining-processing-time\"\u003eSRPT\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eProblem\u003c/strong\u003e:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewe don\u0026rsquo;t know which one will finish the most quickly\u003c/li\u003e\n\u003cli\u003eif we have many threads and one long-running thread, the long running thread won\u0026rsquo;t be able to run ever\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"priority-based-scheduling\"\u003epriority based scheduling\u003c/h3\u003e\n\u003cp\u003eKey idea: \u003cstrong\u003ebehavior tends to be consistent in a thread\u003c/strong\u003e. We build multiple \u003cstrong\u003epriority queues\u003c/strong\u003e to address this.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#priority-based-scheduling\"\u003epriority based scheduling\u003c/a\u003e is an approximation of \u003ca href=\"#shortest-remaining-processing-time\"\u003eSRPT\u003c/a\u003e, using the past performance of the thread to estimate the running time of the thread. Over time, \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003es will move between priority queues, and we \u003cstrong\u003erun the topmost thread from the highest priority queue\u003c/strong\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ethreads that aren\u0026rsquo;t using much CPU stay in higher priority queue\u003c/li\u003e\n\u003cli\u003ethreads that are using much CPU gets bumped down to lower priority queues\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSimilar to \u003ca href=\"#shortest-remaining-processing-time\"\u003eSRPT\u003c/a\u003e, this also has the good property of \u003cstrong\u003egiving preference to those that need it the least\u003c/strong\u003e: a good side effect is that it gives preference to \u003ca href=\"/posts/kbhprocess_control_block/#io-vs-dot-cpu-bound\"\u003eI/O Bound Thread\u003c/a\u003e first, so we can wait on them during disk operations while \u003ca href=\"/posts/kbhprocess_control_block/#io-vs-dot-cpu-bound\"\u003eCPU Thread\u003c/a\u003es run after the \u003ca href=\"/posts/kbhprocess_control_block/#io-vs-dot-cpu-bound\"\u003eI/O Bound Thread\u003c/a\u003e has ran.\u003c/p\u003e\n\u003ch4 id=\"implement-based-on-time-slice--org04deec4--usage\"\u003eimplement based on \u003ca href=\"#round-robin\"\u003etime slice\u003c/a\u003e usage\u003c/h4\u003e\n\u003cp\u003ea \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003e always enters in the \u003cstrong\u003ehighest\u003c/strong\u003e priority queue\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eif the \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003e uses all of its \u003ca href=\"#round-robin\"\u003etime slice\u003c/a\u003e and didn\u0026rsquo;t exit, bump them down a priority queue\u003c/li\u003e\n\u003cli\u003eif a \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003e blocked before it used all of its \u003ca href=\"#round-robin\"\u003etime slice\u003c/a\u003e, bump them up a priority queue\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"implement-based-on-aggregate-time-used-fixing-neglect\"\u003eimplement based on aggregate time used: fixing neglect\u003c/h4\u003e\n\u003cp\u003ea \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003e has a number for \u0026ldquo;how much time did you use on the CPU recently\u0026rdquo;? The priories are sorted by that value, and the smallest time use will be ran.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhscheduling/","tags":null,"title":"scheduling"},{"categories":null,"contents":"Search the knowledgbase.\n","html":"\u003cp\u003eSearch the knowledgbase.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/search/","tags":null,"title":"Search Results"},{"categories":null,"contents":"The second moment of area is a value which\u0026mdash;given an origin\u0026mdash;describes how point masses are distributed around that origin. (i.e. a number for how point masses are distributed). It is in units \\(m^{4}\\).\nTake, for instance, the following picture:\nWe have defined an origin at \\((0,0)\\) of the figure above. Furthermore, we have some \\(\\rho_{i}\\) which is the distance from that origin to each of the infinitesimal areas \\(\\dd{A}\\).\nThen, the second moment of area is defined as:\n\\begin{equation} I = \\iint_{R} \\rho^{2} \\dd{A} \\end{equation}\nThis\u0026hellip; would make sense.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhsecond_moment_of_area/\"\u003esecond moment of area\u003c/a\u003e is a value which\u0026mdash;given an origin\u0026mdash;describes how point masses are distributed around that origin. (i.e. a number for how point masses are distributed). It is in units \\(m^{4}\\).\u003c/p\u003e\n\u003cp\u003eTake, for instance, the following picture:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-05_22-46-56_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eWe have defined an origin at \\((0,0)\\) of the figure above. Furthermore, we have some \\(\\rho_{i}\\) which is the distance from that origin to each of the infinitesimal areas \\(\\dd{A}\\).\u003c/p\u003e\n\u003cp\u003eThen, the \u003ca href=\"/posts/kbhsecond_moment_of_area/\"\u003esecond moment of area\u003c/a\u003e is defined as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI = \\iint_{R} \\rho^{2} \\dd{A}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis\u0026hellip; would make sense.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsecond_moment_of_area/","tags":null,"title":"second moment of area"},{"categories":null,"contents":"the trick Here is a pretty ubiquitous trick to solve differential equations of the second order differential equations. It is used to change a second order differential equation to a First-Order Differential Equations.\nIf you have a differential equation of the shape:\n\\begin{equation} x^{\u0026rsquo;\u0026rsquo;} = f(x,x\u0026rsquo;) \\end{equation}\nthat, the second derivative is strictly a function between the first derivative value and the current value.\nWe are going to define a notation \\(x\u0026rsquo; = v\\), which makes sense.\nSo, we will describe:\n\\begin{equation} x^{\u0026rsquo;\u0026rsquo;} = \\dv{v}{t} = \\dv{v}{x} \\dv{x}{t} = v\\dv{v}{x} \\end{equation}\nSo therefore, we have:\n\\begin{equation} x^{\u0026rsquo;\u0026rsquo;} = v\\dv{v}{x} = f(x,v) \\end{equation}\nSo turns out, the original input \\(t\\) is, given a specific equation above, we have no need to know it.\nTo actually go about solving it, see solving homogeneous higher-order differential equations.\n","html":"\u003ch2 id=\"the-trick\"\u003ethe trick\u003c/h2\u003e\n\u003cp\u003eHere is a pretty ubiquitous trick to solve \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003edifferential equation\u003c/a\u003es of the \u003ca href=\"/posts/kbhsecond_order_differential_equations/\"\u003esecond order differential equations\u003c/a\u003e. It is used to change a \u003ca href=\"/posts/kbhsecond_order_differential_equations/\"\u003esecond order differential equation\u003c/a\u003e to a \u003ca href=\"/posts/kbhlinear_non_seperable_equation/#solving-differential-equations\"\u003eFirst-Order Differential Equations\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIf you have a \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003edifferential equation\u003c/a\u003e of the shape:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx^{\u0026rsquo;\u0026rsquo;} = f(x,x\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat, the second derivative is strictly a function between the first derivative value and the current value.\u003c/p\u003e\n\u003cp\u003eWe are going to define a notation \\(x\u0026rsquo; = v\\), which makes sense.\u003c/p\u003e\n\u003cp\u003eSo, we will describe:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx^{\u0026rsquo;\u0026rsquo;} = \\dv{v}{t} = \\dv{v}{x} \\dv{x}{t} = v\\dv{v}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo therefore, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx^{\u0026rsquo;\u0026rsquo;} = v\\dv{v}{x} = f(x,v)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo turns out, the original input \\(t\\) is, given a specific equation above, we have no need to know it.\u003c/p\u003e\n\u003cp\u003eTo actually go about solving it, see \u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/#solving-homogeneous-higher-order-differential-equations\"\u003esolving homogeneous higher-order differential equations\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsecond_order_differential_equations/","tags":null,"title":"second order differential equation"},{"categories":null,"contents":"Here\u0026rsquo;s a general form:\n\\begin{equation} a\\dv[2]{x}{t} + b \\dv{x}{t} + cx = f(t) \\end{equation}\nsee:\nsolving homogeneous constant coefficient higher-order differential equations and more generally, using matrix exponentiation, solving homogeneous higher-order differential equations solving homogeneous higher-order differential equations This problem because easier if the right side is \\(0\\).\n\\begin{equation} a\\dv[2]{x}{t} + b \\dv{x}{t} + cx = 0 \\end{equation}\nThe general goal to solve in this case is to make this a system of First-Order Differential Equations.\nTo do this, we begin by making:\n\\begin{equation} y = \\dv{x}{t} \\end{equation}\nTherefore, we can change the first equation:\n\\begin{equation} a \\dv{y}{t} + by + cx = 0 \\end{equation}\nSolving both of these conditions, we form a system of linear equations:\n\\begin{align} \u0026amp;\\dv{x}{t}=y \\\\ \u0026amp;\\dv{y}{t} = \\frac{-c}{a}x-\\frac{b}{a}y \\end{align}\nWe are now first-order, so we can put this into a matrix equation:\n\\begin{equation} \\dv t \\begin{pmatrix} x \\\\ y \\end{pmatrix} = \\begin{pmatrix} 0 \u0026amp; 1 \\\\ -\\frac{c}{a} \u0026amp; \\frac{-b}{a} \\end{pmatrix} \\begin{pmatrix} x \\\\ y \\end{pmatrix} \\end{equation}\nNow! We have an equation:\n\\begin{equation} \\dv{t}v = Av \\end{equation}\nThe result above shows that the transformations \\(\\dv{t}\\) and \\(A\\) are isomorphic. Therefore, we now attempt to characterize \\(A\\) to solve this expression.\nLet\u0026rsquo;s begin. We will first shove that \\(v\\) on top of the differential for aesthetics:\n\\begin{equation} \\dv{v}{t} = Av \\end{equation}\nThis expression is actually nicely seperable, so we shall endeavor to separate it:\n\\begin{equation} \\dd{v} = Av\\dd{t} \\end{equation}\nOf course, \\(v\\) is a function of \\(t\\). Therefore, the right side would be woefully complicated. Therefore, we shall do this handwavy thing where we go:\n\\begin{equation} \\frac{1}{v}\\dd{v} = A\\dd{t} \\end{equation}\nNow, \\(A\\) is not a function in \\(t\\) \u0026mdash; its just some constants! So, we can integrate this safely without much trouble:\n\\begin{equation} \\int \\frac{1}{v}\\dd{v} =\\int A\\dd{t} \\end{equation}\nTo get:\n\\begin{equation} \\ln v = t A + C \\end{equation}\nNote the order as \\(t\\) is a constant. Finally, we will invert the natural log and get \\(v\\) back:\n\\begin{equation} v = e^{tA+C} \\end{equation}\nExcellent. We will now apply some log/exponent laws:\n\\begin{equation} v = e^{tA}e^{C} = e^{tA}C \\end{equation}\nthis is so very handwavy. \\(C\\) is technically a vector here\u0026hellip; long story and iffy understanding\nOk, how do we go about solving \\(x\\)?\nNote now that \\(v=(x\\ y)\\), so we will expand that:\n\\begin{equation} \\begin{pmatrix} x \\\\ y \\end{pmatrix} = e^{tA}\\begin{pmatrix} x_0 \\\\ y_0 \\end{pmatrix} \\end{equation}\nwhere, as we defined above \\(y=\\dv{x}{t}\\) (each integral needing a different constant.)\nNow. remember that \\(A\\) is diagonalizable; and so will \\(tA\\) (citation needed, but intuition is that scaling eigenvalues do nothing anyways). So, to make this exponentiation easier, we will diagonalize it.\nWe now have that\n\\begin{equation} e^{tA} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} \\end{equation}\n(how?)\nOk. Finally, we will take the binroller that is \u0026ldquo;constancy\u0026rdquo; and apply it to \\(e^{tA}\\). This took quite a bit of time for me to get, so feel free to take some time to get it too.\nThis all hinges upon the fact that \\(C\\) is a constant, so multiplying any constant to it still makes it \\(C\\).\nSo far, we have that:\n\\begin{equation} \\begin{pmatrix} x \\\\ y \\end{pmatrix} = e^{tA}\\begin{pmatrix} x_0 \\\\ y_0 \\end{pmatrix} = \\qty(\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} )\\begin{pmatrix} x_0 \\\\ y_0 \\end{pmatrix} \\end{equation}\nRemember, now, that \\(v_1\\dots v_{}\\) and its inverses are nothing but vectors filled with a lot of scalars. And any scalar \\(\\alpha\\) times a constant still results in the (a new) constant: \\(\\alpha C =C\\). So, we will steamroll \\(\\mqty(x_0\u0026amp;y_0)\\) over the right side eigenbases matrix (multiplying a constant vector to any\u0026rsquo;ol matrix will just get a new set of constants back) to get:\n\\begin{align} \\begin{pmatrix} x \\\\ y \\end{pmatrix} \u0026amp;= \\qty(\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} )\\begin{pmatrix} x_0 \\\\ y_0 \\end{pmatrix} \\\\ \u0026amp;= \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}}) \\begin{pmatrix} C_1 \\\\ C_2 \\end{pmatrix} \\end{align}\nNow, the middle thing has \\(t\\) in it! (the input!) So, we can\u0026rsquo;t just steamroll now. We have to preserve the middle part.\n\\begin{align} \\begin{pmatrix} x \\\\ y \\end{pmatrix} \u0026amp;= \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}}) \\begin{pmatrix} C_1 \\\\ C_2 \\end{pmatrix} \\\\ \u0026amp;= \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m}) \\begin{pmatrix} C_1 e^{t\\lambda_{1}} \\\\ C_2 e^{t\\lambda_{2}} \\end{pmatrix} \\end{align}\nAnd finally, we keep steamrolling:\n\\begin{align} \\begin{pmatrix} x \\\\ y \\end{pmatrix} \u0026amp;= \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m}) \\begin{pmatrix} C_1 e^{t\\lambda_{1}} \\\\ C_2 e^{t\\lambda_{2}} \\end{pmatrix}\\\\ \u0026amp;= \\mqty({C_{1_{x}} e^{t\\lambda_{1}} + C_{2_{x}} e^{t\\lambda_{2}}} \\\\ {C_{1_{y}} e^{t\\lambda_{1}} + C_{2_{y}} e^{t\\lambda_{2}}}) \\end{align}\nThere is absolutely no difference in nature between \\(C_{j_{x}}\\) and \\(C_{j_{y}}\\) except for the fact that they are different constants (which we got by multiplying \\(v_1 \\dots v_{m}\\)) to it.\nOk so:\n\\begin{equation} \\begin{cases} x = C_{1_{x}} e^{t\\lambda_{1}} + C_{2_{x}} e^{t\\lambda_{2}}\\\\ y = C_{1_{y}} e^{t\\lambda_{1}} + C_{2_{y}} e^{t\\lambda_{2}}\\\\ \\end{cases} \\end{equation}\nconstructing the characteristic equation, as desired.\nsolving homogeneous constant coefficient higher-order differential equations in the homogeneous case, we have some:\n\\begin{equation} y\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; + by = 0 \\end{equation}\nand it arises that there\u0026rsquo;s a pair of solutions \\(y_1(t)\\) and \\(y_2(t)\\) whose linear combinations span the entire space of solutions. in fact, it arises as a solution to some functional quadratic equation \\(\\lambda^{2} + a\\lambda + b = 0\\).\nThe specific coefficients \\(c_1\\) and \\(c_2\\) of the linear combination arises out of the initial conditons, which is the same measurement given at the initial time and its derivative: \\(y(t_0)\\) and \\(y\u0026rsquo;(t_0)\\). It comes out of Linear Algebra why there is exactly two initial values.\nSpecifically, it arises out of solutions of the shape:\n\\begin{equation} y(t) = c_1 e^{\\lambda_{1}t} + c_2e^{\\lambda_{2}t} \\end{equation}\nwhere \\(\\lambda_{1}\\) and \\(\\lambda_{2}\\) are solutions to the characteristic polynomial above. For why exactly this is, see method of undetermined coefficients.\nfinding independent solutions of second-order constant-coefficient linear ODEs Given some:\n\\begin{equation} y\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; + by = 0 \\end{equation}\nwe desire to find two independent solutions. After which, by superposition principle, we know that any linear combinations will yield a solution.\nAside, consider:\n\\begin{equation} y\u0026rsquo;\u0026rsquo; = y \\end{equation}\nwe see that both \\(y=e^{t}\\) and \\(y=e^{-t}\\) are solutions. We can see that this is independent by setting up:\n\\begin{equation} c_1 e^{t} + c_2 e^{-t} = 0 \\end{equation}\nwhich, multiplying through by \\(e^{t}\\) and dividing, we obtain:\n\\begin{equation} e^{2t} = -\\frac{c_2}{c_1} \\end{equation}\nNow, the right side is constant, and the left is not. So the only way this can be true is if the right side is identically zero.\nLinear Shifts Consider the case where you are given initial conditions:\n\\begin{equation} \\begin{cases} y\u0026rsquo;\u0026rsquo; - y = 0 \\\\ y(5) = -2 \\\\ y\u0026rsquo;(5) = 5 \\end{cases} \\end{equation}\ninstead of bothering to solve this, we define:\n\\begin{equation} Y(t) = y(t+5) \\end{equation}\nand it still hold that:\n\\begin{equation} Y\u0026rsquo;\u0026rsquo; - Y = 0 \\end{equation}\nbecause the derivatives don\u0026rsquo;t actually change.\nThen, after solving, we can just translate it back:\n\\begin{equation} y(t) = Y(t-5) \\end{equation}\nSolution, more generally Consider:\n\\begin{equation} y\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; + by = 0 \\end{equation}\nlet us guess that \\(y = e^{\\lambda t}\\)\nrecall that, in that case:\n\\begin{equation} \\begin{cases} y\u0026rsquo; = \\lambda e^{\\lambda t} \\\\ y\u0026rsquo;\u0026rsquo; = \\lambda^{2} e^{\\lambda t} \\end{cases} \\end{equation}\nplugging this back in:\n\\begin{equation} \\lambda^{2} e^{\\lambda t} + a \\lambda e^{\\lambda t} + b e^{\\lambda t} = 0 \\end{equation}\nwhich is:\n\\begin{equation} (\\lambda^{2} + a\\lambda +b ) e^{\\lambda t} = 0 \\end{equation}\nbecause the right side is never zero, we need the left side \\((\\lambda^{2} + a\\lambda +b )\\) is zero.\nNote that there exists three separate cases:\n\\(a^{2}-4b \u0026gt; 0\\), two exact solutions: \\(e^{\\lambda_{1}t}\\) and \\(e^{\\lambda_{2} t}\\), these two are independent functions as long as \\(\\lambda_{1} \\neq \\lambda_{2}\\) \\(a^{2}-4b \u0026lt; 0\\), which will yield imaginary solutions, recall Euler\u0026rsquo;s Equation, you can split \\(e^{ikx}\\) into a superposition of \\(\\cos (x) + i\\sin (x)\\), each of which individually is a solution. You can break this up into the case of some real \\(e^{-at}\\) multiplied by sinusoldial functions.\u0026mdash; whereby \\(e^{at} (\\cos(bt) \\pm i\\sin(bt))\\), we can break into two functions \\(y_1 = e^{at}\\cos (bt), y_2= e^{at}i \\sin (bt)\\). for \\(a^{2}-4b = 0\\), we yield some solution \\(e^{-\\frac{a}{2} t}\\), and the solution is \\(t e^{-\\frac{a}{2}t}\\). because this is the limit of the first solution \\(\\lim_{\\lambda_{2} \\to \\lambda_{1}}\\frac{e^{\\lambda_{2}t} - e^{\\lambda_{1}t}}{\\lambda_{2} - \\lambda_{2}}\\) All 2nd order solution is a linear combination\nIn fact, all solutions carry the form of the two solutions:\n\\begin{equation} c_1 y_1(t) + c_2 y_2(t) = y(t) \\end{equation}\nThis is because, consider the initial form \\(y_1(t_0)\\), and \\(y_2(t_0)\\):\n\\begin{equation} \\begin{cases} y_1(t_0) c_1 + y_2(t_0) c_2 = y(t_0) \\\\ y_1\u0026rsquo;(t_0) c_1 + y_2\u0026rsquo;(t_0) c_2 = y\u0026rsquo;(t_0) \\\\ \\end{cases} \\end{equation}\nThis is the same as the matrix equation:\n\\begin{equation} \\mqty(y_1(t_0) \u0026amp; y_2(t_0) \\\\ y_1\u0026rsquo;(t_0) \u0026amp; y_2\u0026rsquo;(t_0)) \\mqty(c_1 \\\\ c_2) = \\mqty(y(t_0) \\\\ y\u0026rsquo;(t_0)) \\end{equation}\nSo, this map is surjective.\nUniqueness and Existance of second order The uniqueness is also guaranteed with one and exactly one solution exist for every initial condition of an IVP. Unlike first order ODE, solutions can cross: because the uniq and exi. is only guaranteed for the same point AND slope (i.e. the initial condition).\nSo solutions can cross, they just can\u0026rsquo;t be tangent.\nmethod of undetermined coefficients Ok. This mechanism hinges upon the fact that linear combinations of differential equation solutions are solutions themselves. You can show this to yourself by illustrating diffeq solutions as subspaces of F^S, which are linear objects.\nTherefore, for a non-homogeneous second-order linear equation, we attempt to find two sets of solutions\u0026mdash;\nnamely, the general solution to the homogeneous case (using method above):\n\\begin{equation} a\\dv[2]{x}{t} + b \\dv{x}{t} + cx = 0 \\end{equation}\nas well attempting to fit particular solutions to the general case:\n\\begin{equation} a\\dv[2]{x}{t} + b \\dv{x}{t} + cx = f(t) \\end{equation}\nthe linear combination of both solutions would construct the final solution space.\nWe already know how to do step 1\u0026mdash;solve homogeneous higher-order differential equations\u0026mdash;so we won\u0026rsquo;t harp on it here. However, how do we find particular solutions to the general equations?\nWell, we guess! Here\u0026rsquo;s a general table to help illustrate how:\n\\(f(t)\\) \\(x(t)\\) \\(ae^{bt}\\) \\(Ae^{bt}\\) \\(a \\cos (ct) + b\\sin (ct)\\) \\(A\\cos(ct) + B\\sin (ct)\\) \\(kt^{n}\\) \\(A_{n}t^{n} + A_{n-1}x^{n-1} \\dots + A_{0}\\) you can show these to yourself by taking derivatives. \\(a,b,c, k,A,B\\) are distinct constants.\nNow, once you make an educated guess for what \\(x(t)\\) is, perhaps aided by the homogeneous solution, you would take the number of derivatives needed to plug it back to the original expression. Then, equate the left expression and right \\(f(t)\\) and match coefficients of equal-degree terms to solve for the final constants \\(A\\), \\(B\\), etc.\nAfter you finally got the specific solution for \\(A\\) and \\(B\\) , we add the degree of freedom back by adding the homogenous solution in.\nLook for \u0026ldquo;Example 1 (again)\u0026rdquo; on this page (silly, I know, but worth it) to see end-to-end such a solution.\n","html":"\u003cp\u003eHere\u0026rsquo;s a general form:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na\\dv[2]{x}{t} + b \\dv{x}{t} + cx = f(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003esee:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"#solving-homogeneous-constant-coefficient-higher-order-differential-equations\"\u003esolving homogeneous constant coefficient higher-order differential equations\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eand more generally, using \u003ca href=\"/posts/kbhmatrix_exponentiation/\"\u003ematrix exponentiation\u003c/a\u003e, \u003ca href=\"#solving-homogeneous-higher-order-differential-equations\"\u003esolving homogeneous higher-order differential equations\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"solving-homogeneous-higher-order-differential-equations\"\u003esolving homogeneous higher-order differential equations\u003c/h2\u003e\n\u003cp\u003eThis problem because easier if the right side is \\(0\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na\\dv[2]{x}{t} + b \\dv{x}{t} + cx = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe general goal to solve in this case is to make this a system of \u003ca href=\"/posts/kbhlinear_non_seperable_equation/#solving-differential-equations\"\u003eFirst-Order Differential Equations\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eTo do this, we begin by making:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = \\dv{x}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, we can change the first equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na \\dv{y}{t} + by + cx = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSolving both of these conditions, we form a system of linear equations:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\dv{x}{t}=y \\\\\n\u0026amp;\\dv{y}{t} = \\frac{-c}{a}x-\\frac{b}{a}y\n\\end{align}\u003c/p\u003e\n\u003cp\u003eWe are now first-order, so we can put this into a matrix equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv t \\begin{pmatrix}\nx \\\\ y\n\\end{pmatrix} = \\begin{pmatrix}\n0 \u0026amp; 1 \\\\ -\\frac{c}{a} \u0026amp; \\frac{-b}{a}\n\\end{pmatrix} \\begin{pmatrix}\nx \\\\ y\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow! We have an equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{t}v = Av\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe result above shows that the transformations \\(\\dv{t}\\) and \\(A\\) are isomorphic. Therefore, we now attempt to characterize \\(A\\) to solve this expression.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s begin. We will first shove that \\(v\\) on top of the differential for aesthetics:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{v}{t} = Av\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis expression is actually nicely \u003ca href=\"/posts/kbhlinear_constant_coefficient_equation/#solving-separable-differential-equations\"\u003eseperable\u003c/a\u003e, so we shall endeavor to separate it:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dd{v} = Av\\dd{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOf course, \\(v\\) is a function of \\(t\\). Therefore, the right side would be woefully complicated. Therefore, we shall do this handwavy thing where we go:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{1}{v}\\dd{v} = A\\dd{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, \\(A\\) is not a function in \\(t\\) \u0026mdash; its just some constants! So, we can integrate this safely without much trouble:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int \\frac{1}{v}\\dd{v} =\\int A\\dd{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ln v = t A + C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNote the order as \\(t\\) is a constant. Finally, we will invert the natural log and get \\(v\\) back:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = e^{tA+C}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eExcellent. We will now apply some \u003ca href=\"/posts/kbhlog_laws/\"\u003elog/exponent laws\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv = e^{tA}e^{C} = e^{tA}C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003cdel\u003ethis is so very handwavy. \\(C\\) is technically a vector here\u0026hellip; long story and iffy understanding\u003c/del\u003e\u003c/p\u003e\n\u003cp\u003eOk, how do we go about solving \\(x\\)?\u003c/p\u003e\n\u003cp\u003eNote now that \\(v=(x\\ y)\\), so we will expand that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\nx \\\\ y\n\\end{pmatrix} = e^{tA}\\begin{pmatrix}\nx_0 \\\\ y_0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, as we defined above \\(y=\\dv{x}{t}\\) (each integral needing a different constant.)\u003c/p\u003e\n\u003cp\u003eNow. remember that \\(A\\) is diagonalizable; and so will \\(tA\\) (citation needed, but intuition is that scaling eigenvalues do nothing anyways). So, to make this exponentiation easier, we will diagonalize it.\u003c/p\u003e\n\u003cp\u003eWe now have that\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{tA} = \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(\u003ca href=\"/posts/kbhraising_e_to_a_matrix/\"\u003ehow?\u003c/a\u003e)\u003c/p\u003e\n\u003cp\u003eOk. Finally, we will take the binroller that is \u0026ldquo;constancy\u0026rdquo; and apply it to \\(e^{tA}\\). This took quite a bit of time for me to get, so feel free to take some time to get it too.\u003c/p\u003e\n\u003cp\u003eThis all hinges upon the fact that \\(C\\) is a constant, so multiplying any constant to it still makes it \\(C\\).\u003c/p\u003e\n\u003cp\u003eSo far, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\nx \\\\ y\n\\end{pmatrix} = e^{tA}\\begin{pmatrix}\nx_0 \\\\ y_0\n\\end{pmatrix} = \\qty(\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} )\\begin{pmatrix}\nx_0 \\\\ y_0\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRemember, now, that \\(v_1\\dots v_{}\\) and its inverses are nothing but vectors filled with a lot of scalars. And any scalar \\(\\alpha\\) times a constant still results in the (a new) constant: \\(\\alpha C =C\\). So, we will steamroll \\(\\mqty(x_0\u0026amp;y_0)\\) over the right side eigenbases matrix (multiplying a constant vector to any\u0026rsquo;ol matrix will just get a new set of constants back) to get:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\begin{pmatrix}\nx \\\\ y\n\\end{pmatrix} \u0026amp;= \\qty(\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}})\\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})^{-1} )\\begin{pmatrix}\nx_0 \\\\ y_0\n\\end{pmatrix} \\\\\n\u0026amp;= \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}}) \\begin{pmatrix}\nC_1 \\\\ C_2\n\\end{pmatrix}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eNow, the middle thing has \\(t\\) in it! (the input!) So, we can\u0026rsquo;t just steamroll now. We have to preserve the middle part.\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\begin{pmatrix}\nx \\\\ y\n\\end{pmatrix} \u0026amp;= \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m})\\mqty(\\dmat{e^{t\\lambda_{1}}, \\dots, e^{t\\lambda_{m}}}) \\begin{pmatrix}\nC_1 \\\\ C_2\n\\end{pmatrix} \\\\\n\u0026amp;= \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m}) \\begin{pmatrix}\nC_1 e^{t\\lambda_{1}} \\\\ C_2 e^{t\\lambda_{2}}\n\\end{pmatrix}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAnd finally, we keep steamrolling:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\begin{pmatrix}\nx \\\\ y\n\\end{pmatrix} \u0026amp;= \\mqty(v_1\u0026amp; \\dots\u0026amp; v_{m}) \\begin{pmatrix}\nC_1 e^{t\\lambda_{1}} \\\\ C_2 e^{t\\lambda_{2}} \\end{pmatrix}\\\\\n\u0026amp;= \\mqty({C_{1_{x}} e^{t\\lambda_{1}} + C_{2_{x}} e^{t\\lambda_{2}}} \\\\ {C_{1_{y}} e^{t\\lambda_{1}} + C_{2_{y}} e^{t\\lambda_{2}}})\n\\end{align}\u003c/p\u003e\n\u003cp\u003eThere is absolutely no difference in nature between \\(C_{j_{x}}\\) and \\(C_{j_{y}}\\) except for the fact that they are \u003cem\u003edifferent\u003c/em\u003e constants (which we got by multiplying \\(v_1 \\dots v_{m}\\)) to it.\u003c/p\u003e\n\u003cp\u003eOk so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx = C_{1_{x}} e^{t\\lambda_{1}} + C_{2_{x}} e^{t\\lambda_{2}}\\\\\ny = C_{1_{y}} e^{t\\lambda_{1}} + C_{2_{y}} e^{t\\lambda_{2}}\\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003econstructing the characteristic equation, as desired.\u003c/p\u003e\n\u003ch2 id=\"solving-homogeneous-constant-coefficient-higher-order-differential-equations\"\u003esolving homogeneous constant coefficient higher-order differential equations\u003c/h2\u003e\n\u003cp\u003ein the homogeneous case, we have some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; + by = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand it arises that there\u0026rsquo;s a pair of solutions \\(y_1(t)\\) and \\(y_2(t)\\) whose linear combinations span the entire space of solutions. in fact, it arises as a solution to some functional quadratic equation \\(\\lambda^{2} + a\\lambda + b = 0\\).\u003c/p\u003e\n\u003cp\u003eThe specific coefficients \\(c_1\\) and \\(c_2\\) of the linear combination arises out of the initial conditons, which is the same measurement given at the initial time and its derivative: \\(y(t_0)\\) and \\(y\u0026rsquo;(t_0)\\). It comes out of \u003ca href=\"/posts/kbhlinear_algebra_errors/\"\u003eLinear Algebra\u003c/a\u003e why there is exactly two initial values.\u003c/p\u003e\n\u003cp\u003eSpecifically, it arises out of solutions of the shape:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = c_1 e^{\\lambda_{1}t} + c_2e^{\\lambda_{2}t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\lambda_{1}\\) and \\(\\lambda_{2}\\) are solutions to the characteristic polynomial above. For why exactly this is, see \u003ca href=\"#method-of-undetermined-coefficients\"\u003emethod of undetermined coefficients\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"finding-independent--kbhprobability-dot-md--solutions-of-second-order-constant-coefficient-linear-odes\"\u003efinding \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e solutions of second-order constant-coefficient linear ODEs\u003c/h3\u003e\n\u003cp\u003eGiven some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; + by = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe desire to find two \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e solutions. After which, by \u003ca href=\"/posts/kbhordinary_differential_equations/#superposition-principle\"\u003esuperposition principle\u003c/a\u003e, we know that any \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003es will yield a solution.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAside, consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; = y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe see that both \\(y=e^{t}\\) and \\(y=e^{-t}\\) are solutions. We can see that this is independent by setting up:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_1 e^{t} + c_2 e^{-t} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich, multiplying through by \\(e^{t}\\) and dividing, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ne^{2t} = -\\frac{c_2}{c_1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, the right side is constant, and the left is not. So the only way this can be true is if the right side is identically zero.\u003c/p\u003e\n\u003ch4 id=\"linear-shifts\"\u003eLinear Shifts\u003c/h4\u003e\n\u003cp\u003eConsider the case where you are given initial conditions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\ny\u0026rsquo;\u0026rsquo; - y = 0 \\\\\ny(5) = -2 \\\\\ny\u0026rsquo;(5) = 5\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003einstead of bothering to solve this, we define:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nY(t) = y(t+5)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand it still hold that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nY\u0026rsquo;\u0026rsquo; - Y = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause the derivatives don\u0026rsquo;t actually change.\u003c/p\u003e\n\u003cp\u003eThen, after solving, we can just translate it back:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = Y(t-5)\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"solution-more-generally\"\u003eSolution, more generally\u003c/h4\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; + ay\u0026rsquo; + by = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003elet us guess that \\(y = e^{\\lambda t}\\)\u003c/p\u003e\n\u003cp\u003erecall that, in that case:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\ny\u0026rsquo; = \\lambda e^{\\lambda t} \\\\\ny\u0026rsquo;\u0026rsquo; = \\lambda^{2} e^{\\lambda t}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eplugging this back in:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda^{2} e^{\\lambda t} + a \\lambda e^{\\lambda t} + b e^{\\lambda t} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(\\lambda^{2} + a\\lambda +b ) e^{\\lambda t} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause the right side is never zero, we need the left side \\((\\lambda^{2} + a\\lambda +b )\\) is zero.\u003c/p\u003e\n\u003cp\u003eNote that there exists three separate cases:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(a^{2}-4b \u0026gt; 0\\), two exact solutions: \\(e^{\\lambda_{1}t}\\) and \\(e^{\\lambda_{2} t}\\), these two are independent functions as long as \\(\\lambda_{1} \\neq \\lambda_{2}\\)\u003c/li\u003e\n\u003cli\u003e\\(a^{2}-4b \u0026lt; 0\\), which will yield imaginary solutions, recall \u003ca href=\"/posts/kbheuler_s_equation/\"\u003eEuler\u0026rsquo;s Equation\u003c/a\u003e, you can split \\(e^{ikx}\\) into a superposition of \\(\\cos (x) + i\\sin (x)\\), each of which individually is a solution. You can break this up into the case of some real \\(e^{-at}\\) multiplied by sinusoldial functions.\u0026mdash; whereby \\(e^{at} (\\cos(bt) \\pm i\\sin(bt))\\), we can break into two functions \\(y_1 = e^{at}\\cos (bt), y_2= e^{at}i \\sin (bt)\\).\u003c/li\u003e\n\u003cli\u003efor \\(a^{2}-4b = 0\\), we yield some solution \\(e^{-\\frac{a}{2} t}\\), and the solution is \\(t e^{-\\frac{a}{2}t}\\). because this is the limit of the first solution \\(\\lim_{\\lambda_{2} \\to \\lambda_{1}}\\frac{e^{\\lambda_{2}t} - e^{\\lambda_{1}t}}{\\lambda_{2} - \\lambda_{2}}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eAll 2nd order solution is a linear combination\u003c/p\u003e\n\u003cp\u003eIn fact, all solutions carry the form of the two solutions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_1 y_1(t) + c_2 y_2(t) = y(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is because, consider the initial form \\(y_1(t_0)\\), and \\(y_2(t_0)\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\ny_1(t_0) c_1 + y_2(t_0) c_2 = y(t_0) \\\\\ny_1\u0026rsquo;(t_0) c_1 + y_2\u0026rsquo;(t_0) c_2 = y\u0026rsquo;(t_0) \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is the same as the matrix equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(y_1(t_0) \u0026amp; y_2(t_0) \\\\ y_1\u0026rsquo;(t_0) \u0026amp; y_2\u0026rsquo;(t_0)) \\mqty(c_1 \\\\ c_2) = \\mqty(y(t_0) \\\\ y\u0026rsquo;(t_0))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, this map is surjective.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"uniqueness-and-existance--kbhuniqueness-and-existance-dot-md--of-second-order\"\u003e\u003ca href=\"/posts/kbhuniqueness_and_existance/\"\u003eUniqueness and Existance\u003c/a\u003e of second order\u003c/h4\u003e\n\u003cp\u003eThe uniqueness is also guaranteed with \u003ca href=\"/posts/kbhinitial_value_problems/#one-and-exactly-one-solution-exist-for-every-initial-condition-of-an-ivp\"\u003eone and exactly one solution exist for every initial condition of an IVP\u003c/a\u003e. Unlike first order ODE, solutions can cross: because the uniq and exi. is only guaranteed for the same \u003cstrong\u003epoint\u003c/strong\u003e AND \u003cstrong\u003eslope\u003c/strong\u003e (i.e. the initial condition).\u003c/p\u003e\n\u003cp\u003eSo solutions can cross, they just can\u0026rsquo;t be tangent.\u003c/p\u003e\n\u003ch2 id=\"method-of-undetermined-coefficients\"\u003emethod of undetermined coefficients\u003c/h2\u003e\n\u003cp\u003eOk. This mechanism hinges upon the fact that \u003cstrong\u003elinear combinations of differential equation solutions are solutions themselves\u003c/strong\u003e. You can show this to yourself by illustrating diffeq solutions as \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es of \u003ca href=\"/posts/kbhfs_is_a_vector_space/\"\u003eF^S\u003c/a\u003e, which are linear objects.\u003c/p\u003e\n\u003cp\u003eTherefore, for a non-homogeneous second-order linear equation, we attempt to find two sets of solutions\u0026mdash;\u003c/p\u003e\n\u003cp\u003enamely, the general solution to the homogeneous case (using method above):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na\\dv[2]{x}{t} + b \\dv{x}{t} + cx = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas well attempting to fit \u003cstrong\u003eparticular\u003c/strong\u003e solutions to the general case:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na\\dv[2]{x}{t} + b \\dv{x}{t} + cx = f(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe linear combination of both solutions would construct the final solution space.\u003c/p\u003e\n\u003cp\u003eWe already know how to do step 1\u0026mdash;\u003ca href=\"#solving-homogeneous-higher-order-differential-equations\"\u003esolve homogeneous higher-order differential equations\u003c/a\u003e\u0026mdash;so we won\u0026rsquo;t harp on it here. However, how do we find \u003cem\u003eparticular\u003c/em\u003e solutions to the general equations?\u003c/p\u003e\n\u003cp\u003eWell, we guess! Here\u0026rsquo;s a general table to help illustrate how:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\\(f(t)\\)\u003c/th\u003e\n\u003cth\u003e\\(x(t)\\)\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\\(ae^{bt}\\)\u003c/td\u003e\n\u003ctd\u003e\\(Ae^{bt}\\)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\\(a \\cos (ct) + b\\sin (ct)\\)\u003c/td\u003e\n\u003ctd\u003e\\(A\\cos(ct) + B\\sin (ct)\\)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\\(kt^{n}\\)\u003c/td\u003e\n\u003ctd\u003e\\(A_{n}t^{n} + A_{n-1}x^{n-1} \\dots + A_{0}\\)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eyou can show these to yourself by taking derivatives. \\(a,b,c, k,A,B\\) are distinct constants.\u003c/p\u003e\n\u003cp\u003eNow, once you make an educated guess for what \\(x(t)\\) is, perhaps aided by the homogeneous solution, you would take the number of derivatives needed to plug it back to the original expression. Then, equate the left expression and right \\(f(t)\\) and match \u003ca href=\"/posts/kbhpolynomial/\"\u003ecoefficient\u003c/a\u003es of equal-degree terms to solve for the final constants \\(A\\), \\(B\\), etc.\u003c/p\u003e\n\u003cp\u003eAfter you finally got the specific solution for \\(A\\) and \\(B\\) , we add the degree of freedom back by adding the homogenous solution in.\u003c/p\u003e\n\u003cp\u003eLook for \u0026ldquo;Example 1 (again)\u0026rdquo; on \u003ca href=\"https://www.mathsisfun.com/calculus/differential-equations-undetermined-coefficients.html\"\u003ethis page\u003c/a\u003e (silly, I know, but worth it) to see end-to-end such a solution.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsecond_order_linear_differential_equation/","tags":null,"title":"Second-Order Linear Differential Equations"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhselective_service_system/","tags":null,"title":"Selective Service System"},{"categories":null,"contents":"The principle of semantic accountability claims that a good grammar should be able to say \u0026ldquo;something explicit about how the abstractions of the grammar match with actual meaning\u0026rdquo;\n","html":"\u003cp\u003eThe principle of \u003ca href=\"/posts/kbhsemantic_accountability/\"\u003esemantic accountability\u003c/a\u003e claims that a good \u003ca href=\"/posts/kbhgrammar/\"\u003egrammar\u003c/a\u003e should be able to say \u0026ldquo;something explicit about how the abstractions of the \u003ca href=\"/posts/kbhgrammar/\"\u003egrammar\u003c/a\u003e match with actual meaning\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsemantic_accountability/","tags":null,"title":"semantic accountability"},{"categories":null,"contents":"Represent health information in terms of rule-based ontologies.\n","html":"\u003cp\u003eRepresent health information in terms of rule-based ontologies.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsemantic_health_risk_prediction/","tags":null,"title":"Semantic Health Risk Prediction"},{"categories":null,"contents":"In NSM, semantic primes are the most fundimental \u0026ldquo;lexical units\u0026rdquo; (so they can be words, or morphemes, etc. the size doesn\u0026rsquo;t matter) across languages.\nThey are the \u0026ldquo;core of a universal mental lexicon\u0026rdquo;.\nThere are\u0026hellip;\nguidelines for identifying semantic primes A semantic prime has to be found in every(ish?) natural language A semantic prime has to be indefinable by other primes proof for the existence of semantic primes Proof: given if the Strong Lexicalization Hypothesis holds, semantic primes must exist.\nAssume for the sake of contradiction no semantic primes exist.\nBecause Strong Lexicalization Hypothesis holds, there does not exist syntactic transformations which can take original single words and transform them into newly lexicalized words to express a different meaning.\nAt the same time, again because of the Strong Lexicalization Hypothesis, one must only leverage syntactic transformation on syntatic constituents when forming ideas.\nTherefore, given a word to lexicalize, it has to be defined by an syntatic transformation on a set of previously lexicalized words.\n(by definition) there are no words lexicalizable from the empty set of words.\nTherefore, there exists some word that needs to be lexicalized by words that are not previously defined, which is absurd. (instead, these words are lexicalized via semantic primes.)\nQED\nproblems with semantic primes the list has grown over time the problem of allolexy: formal restrictions of a language resulting in the same concept needing to be radicalized multiple times (I vs. me) finding semantic primes According to (Geeraerts 2009), (Goddard 2009) provides a \u0026ldquo;practical\u0026rdquo; (though flawed) way of establishing primes. Something to do with large-scale comparisons in \u0026ldquo;whole metalanguage studies\u0026rdquo;, which requires pairwise language comparison\nLocating primes are seen as an enforcement of NSM theories (Vanhatalo, Tissari, and Idström, n.d.). Recent prime locations: in Amharic (Amberber 2008), East Cree (Junker 2008), French (Peeters 1994), Japanese (Onishi 1994), Korean (Yoon 2008), Lao (Enfield 2002), Mandarin (Chappell 2002), Mangaaba-Mbula (Bugenhagen 2002), Malay (Goddard 2002), Polish (Wierzbicka 2002), Russian (Gladkova 2010, for the latest set, see the NSM home page), Spanish (Travis 2002), and Thai (Diller 1994).\n","html":"\u003cp\u003eIn \u003ca href=\"/posts/kbhnatural_semantic_metalanguage/\"\u003eNSM\u003c/a\u003e, \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic primes\u003c/a\u003e are the most fundimental \u0026ldquo;lexical units\u0026rdquo; (so they can be words, or morphemes, etc. the \u003cem\u003esize\u003c/em\u003e doesn\u0026rsquo;t matter) across languages.\u003c/p\u003e\n\u003cp\u003eThey are the \u0026ldquo;core of a universal mental lexicon\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eThere are\u0026hellip;\u003c/p\u003e\n\u003ch2 id=\"guidelines-for-identifying-semantic-primes--kbhsemantic-primes-dot-md\"\u003eguidelines for identifying \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic primes\u003c/a\u003e\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eA \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003e has to be found in every(ish?) natural language\u003c/li\u003e\n\u003cli\u003eA \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003e has to be indefinable by other primes\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"proof-for-the-existence-of-semantic-prime--kbhsemantic-primes-dot-md--s\"\u003eproof for the existence of \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es\u003c/h2\u003e\n\u003cp\u003eProof: given if the \u003ca href=\"/posts/kbhlexicalization_hypothesis/#strong-id-09e74661-d561-4cec-af0f-84c82c0367a1-lexicalization-hypothesis\"\u003eStrong Lexicalization Hypothesis\u003c/a\u003e holds, \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es must exist.\u003c/p\u003e\n\u003cp\u003eAssume for the sake of contradiction no \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es exist.\u003c/p\u003e\n\u003cp\u003eBecause \u003ca href=\"/posts/kbhlexicalization_hypothesis/#strong-id-09e74661-d561-4cec-af0f-84c82c0367a1-lexicalization-hypothesis\"\u003eStrong Lexicalization Hypothesis\u003c/a\u003e holds, there does not exist syntactic transformations which can take original single words and transform them into newly lexicalized words to express a different meaning.\u003c/p\u003e\n\u003cp\u003eAt the same time, again because of the \u003ca href=\"/posts/kbhlexicalization_hypothesis/#strong-id-09e74661-d561-4cec-af0f-84c82c0367a1-lexicalization-hypothesis\"\u003eStrong Lexicalization Hypothesis\u003c/a\u003e, one must only leverage syntactic transformation on \u003ca href=\"\"\u003esyntatic constituents\u003c/a\u003e when forming ideas.\u003c/p\u003e\n\u003cp\u003eTherefore, given a word to lexicalize, it has to be defined by an syntatic transformation on a set of previously lexicalized words.\u003c/p\u003e\n\u003cp\u003e(by definition) there are no words lexicalizable from the empty set of words.\u003c/p\u003e\n\u003cp\u003eTherefore, there exists some word that needs to be lexicalized by words that are not previously defined, which is absurd. (instead, these words are lexicalized via semantic primes.)\u003c/p\u003e\n\u003cp\u003eQED\u003c/p\u003e\n\u003ch2 id=\"problems-with-semantic-prime--kbhsemantic-primes-dot-md--s\"\u003eproblems with \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ethe list has grown over time\u003c/li\u003e\n\u003cli\u003ethe problem of \u003cstrong\u003e\u003cstrong\u003e\u003ca href=\"#problems-with-semantic-prime--kbhsemantic-primes-dot-md--s\"\u003eallolexy\u003c/a\u003e\u003c/strong\u003e\u003c/strong\u003e: formal restrictions of a language resulting in the same concept needing to be radicalized multiple times (I vs. me)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"finding-semantic-prime--kbhsemantic-primes-dot-md--s\"\u003efinding \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es\u003c/h2\u003e\n\u003cp\u003eAccording to (\u003ca href=\"#citeproc_bib_item_1\"\u003eGeeraerts 2009\u003c/a\u003e), (\u003ca href=\"#citeproc_bib_item_2\"\u003eGoddard 2009\u003c/a\u003e) provides a \u0026ldquo;practical\u0026rdquo; (though flawed) way of establishing primes. Something to do with large-scale comparisons in \u0026ldquo;\u003ca href=\"/posts/kbhwhole_metalanguage_study/\"\u003ewhole metalanguage studies\u003c/a\u003e\u0026rdquo;, which requires pairwise language comparison\u003c/p\u003e\n\u003cp\u003eLocating primes are seen as an enforcement of \u003ca href=\"/posts/kbhnatural_semantic_metalanguage/\"\u003eNSM\u003c/a\u003e theories (\u003ca href=\"#citeproc_bib_item_3\"\u003eVanhatalo, Tissari, and Idström, n.d.\u003c/a\u003e). Recent prime locations: in Amharic (Amberber 2008), East Cree (Junker 2008), French (Peeters 1994), Japanese (Onishi 1994), Korean (Yoon 2008), Lao (Enfield 2002), Mandarin (Chappell 2002), Mangaaba-Mbula (Bugenhagen 2002), Malay (Goddard 2002), Polish (Wierzbicka 2002), Russian (Gladkova 2010, for the latest set, see the NSM home page), Spanish (Travis 2002), and Thai (Diller 1994).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsemantic_primes/","tags":null,"title":"semantic prime"},{"categories":null,"contents":"SVF is a standardized Discourse-Completion Task for verbal recall and fluency. It is administered by asking the participant to recall a bunch of words from within a category within 60 seconds.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsemantic_verbal_fluency/\"\u003eSVF\u003c/a\u003e is a standardized \u003ca href=\"/posts/kbhdiscourse_completion_task/\"\u003eDiscourse-Completion Task\u003c/a\u003e for verbal recall and fluency. It is administered by asking the participant to recall a bunch of words from within a category within 60 seconds.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsemantic_verbal_fluency/","tags":null,"title":"Semantic Verbal Fluency"},{"categories":null,"contents":"The semiconductor industry is a growing industry, the beginning of the semiconductor industry was actually in the silicon valley.\nWe are now taking a look at a reticle.\nalgorithms used in the semiconductor industry Per KLA \u0026mdash;\nClassification Random forest Boosted decision trees MLPs CNNs Reference generation GANs (WAT) VAEs Natural Grouping and Clustering auto-encoders manual feature extractors ","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhsemiconductor/\"\u003esemiconductor\u003c/a\u003e industry is a growing industry, the beginning of the \u003ca href=\"/posts/kbhsemiconductor/\"\u003esemiconductor\u003c/a\u003e industry was actually in the silicon valley.\u003c/p\u003e\n\u003cp\u003eWe are now taking a look at a \u003ca href=\"/posts/kbhreticle/\"\u003ereticle\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"algorithms-used-in-the-semiconductor-industry\"\u003ealgorithms used in the semiconductor industry\u003c/h2\u003e\n\u003cp\u003ePer \u003ca href=\"/posts/kbhkla/\"\u003eKLA\u003c/a\u003e \u0026mdash;\u003c/p\u003e\n\u003ch3 id=\"classification\"\u003eClassification\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eRandom forest\u003c/li\u003e\n\u003cli\u003eBoosted decision trees\u003c/li\u003e\n\u003cli\u003eMLPs\u003c/li\u003e\n\u003cli\u003eCNNs\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"reference-generation\"\u003eReference generation\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eGANs (WAT)\u003c/li\u003e\n\u003cli\u003eVAEs\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"natural-grouping-and-clustering\"\u003eNatural Grouping and Clustering\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eauto-encoders\u003c/li\u003e\n\u003cli\u003emanual feature extractors\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsemiconductor/","tags":null,"title":"semiconductor"},{"categories":null,"contents":"sense is the meaning of the word.\nsynonymy synonymy\u0026mdash;using synonyms as a proxy for word meaning.\nBut! There are probably no examples of perfect synonymy: two synonyms have slightly different sense (\u0026ldquo;my big sister\u0026rdquo; != \u0026ldquo;my large sister\u0026rdquo;)\nword relatedness not synonyms, but closeness in utility or semantic frame:\ncoffee + tea \u0026mdash; similar coffee + cup \u0026mdash; related, but not similar semantic field words that relate by covering a similar semantic domain:\ne.g.: hospital - surgeon, scapel, nurse\nantonyms antonyms can be binaries at opposite ends of a related semantic field\naffective meaning word meaning that relate to the affect, emotion, etc. of the speaker which doesn\u0026rsquo;t relate to literal meaning (replica vs. knockoff)\nvalence: pleasantness of stimulus arousal: intensity of the emotion provoked by stimulus dominance: degree of control exerted by the stimulus principle of contrast \u0026ldquo;a difference in form results in a difference in meaning\u0026rdquo;\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsense/\"\u003esense\u003c/a\u003e is the meaning of the word.\u003c/p\u003e\n\u003ch2 id=\"synonymy\"\u003esynonymy\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#synonymy\"\u003esynonymy\u003c/a\u003e\u0026mdash;using synonyms as a proxy for word meaning.\u003c/p\u003e\n\u003cp\u003eBut! There are probably no examples of perfect \u003ca href=\"#synonymy\"\u003esynonymy\u003c/a\u003e: two synonyms have slightly different \u003ca href=\"/posts/kbhsense/\"\u003esense\u003c/a\u003e (\u0026ldquo;my big sister\u0026rdquo; != \u0026ldquo;my large sister\u0026rdquo;)\u003c/p\u003e\n\u003ch2 id=\"word-relatedness\"\u003eword relatedness\u003c/h2\u003e\n\u003cp\u003enot synonyms, but closeness in utility or semantic frame:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ecoffee + tea \u0026mdash; similar\u003c/li\u003e\n\u003cli\u003ecoffee + cup \u0026mdash; related, but not similar\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"semantic-field\"\u003esemantic field\u003c/h3\u003e\n\u003cp\u003ewords that relate by covering a similar semantic domain:\u003c/p\u003e\n\u003cp\u003ee.g.: \u003cstrong\u003ehospital\u003c/strong\u003e - surgeon, scapel, nurse\u003c/p\u003e\n\u003ch2 id=\"antonyms\"\u003eantonyms\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#antonyms\"\u003eantonyms\u003c/a\u003e can be binaries at opposite ends of a related \u003ca href=\"#semantic-field\"\u003esemantic field\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"affective-meaning\"\u003eaffective meaning\u003c/h2\u003e\n\u003cp\u003eword meaning that relate to the affect, emotion, etc. of the speaker which doesn\u0026rsquo;t relate to literal meaning (replica vs. knockoff)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003evalence\u003c/strong\u003e: pleasantness of stimulus\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003earousal\u003c/strong\u003e: intensity of the emotion provoked by stimulus\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003edominance\u003c/strong\u003e: degree of control exerted by the stimulus\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"principle-of-contrast\"\u003eprinciple of contrast\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;a difference in form results in a difference in meaning\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsense/","tags":null,"title":"sense"},{"categories":null,"contents":"Common algorithm\ntokenization classier for whether a period token is a part of a word or the sentence boundary ","html":"\u003cp\u003eCommon algorithm\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtokenization/\"\u003etokenization\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eclassier for whether a period token is a part of a word or the sentence boundary\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsentence_segmentation/","tags":null,"title":"sentence segmentation"},{"categories":null,"contents":"\\begin{equation} \\dv{y}{t} = a(t)f(y) \\end{equation}\nare a class of functions are called seperable. We can solve them using the division method\ndivision method the division method involves solving autonomous ODEs by dividing and treating it normally:\n\\begin{equation} y\u0026rsquo; = 8y \\end{equation}\n\\begin{equation} \\frac{y\u0026rsquo;}{8} = y \\end{equation}\nwe now write something fishy:\n\\begin{equation} \\frac{\\dd{y}}{y} = 8 \\dd{t} \\end{equation}\nwe now take the antiderivative of this:\n\\begin{equation} \\int \\frac{1}{y} \\dd{y} = \\int 8 \\dd{t} \\end{equation}\nWe will get that:\n\\begin{equation} \\ln |y| = 8t + C \\end{equation}\nwe finally get:\n\\begin{equation} |y| = e^{C} e^{8t} \\end{equation}\ngetting rid of that absolute value:\n\\begin{align} y \u0026amp;= \\pm e^{C} e^{8t} \\\\ \u0026amp;= K e^{8t} \\end{align}\nplaces where this breaks down sometimes, \\(\\frac{1}{f(y)}\\) may not have a nice antiderivative sometimes, \\(G(y)\\), the antidepressant, may not be nicely invertible general solution to y\u0026rsquo;(t) = ry(t) generally, for \\(r \\in \\mathbb{R}\\), the solution to \\(y\u0026rsquo;(t) = ry(t)\\) is at \\(y(t)=y_0e^{rt}\\), where \\(y_0 = y(0)\\).\nfor autonomous ODEs for which \\(ry(t) = f(y)\\), we have that:\n\\begin{equation} \\dv{y}{x} = ry(x) \\end{equation}\nwhich means:\n\\begin{equation} \\frac{1}{y(x)} \\dd{y} = r\\dd{x} \\end{equation}\nand so:\n\\begin{equation} \\ln \\qty| y(x) | = rx +C \\end{equation}\nand hence:\n\\begin{equation} y(x) = K e^{rx} \\end{equation}\nplugging in \\(x=0\\), yields \\(y(0) = Ke^{0} = K\\).\n","html":"\u003cp\u003e\\begin{equation}\n\\dv{y}{t} = a(t)f(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eare a class of functions are called \u003ca href=\"/posts/kbhseperable_diffequ/\"\u003eseperable\u003c/a\u003e. We can solve them using the \u003ca href=\"#division-method\"\u003edivision method\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"division-method\"\u003edivision method\u003c/h2\u003e\n\u003cp\u003ethe \u003ca href=\"#division-method\"\u003edivision method\u003c/a\u003e involves solving \u003ca href=\"/posts/kbhautonomous_odes/\"\u003eautonomous ODEs\u003c/a\u003e by dividing and treating it normally:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = 8y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{y\u0026rsquo;}{8} = y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe now write something fishy:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{\\dd{y}}{y} = 8 \\dd{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe now take the antiderivative of this:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int \\frac{1}{y} \\dd{y} = \\int 8 \\dd{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will get that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ln |y| = 8t + C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe finally get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|y| = e^{C} e^{8t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egetting rid of that absolute value:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\ny \u0026amp;= \\pm e^{C} e^{8t} \\\\\n\u0026amp;= K e^{8t}\n\\end{align}\u003c/p\u003e\n\u003ch3 id=\"places-where-this-breaks-down\"\u003eplaces where this breaks down\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003esometimes, \\(\\frac{1}{f(y)}\\) may not have a nice antiderivative\u003c/li\u003e\n\u003cli\u003esometimes, \\(G(y)\\), the antidepressant, may not be nicely invertible\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"general-solution-to-y--t--ry--t\"\u003egeneral solution to y\u0026rsquo;(t) = ry(t)\u003c/h3\u003e\n\u003cp\u003egenerally, for \\(r \\in \\mathbb{R}\\), the solution to \\(y\u0026rsquo;(t) = ry(t)\\) is at \\(y(t)=y_0e^{rt}\\), where \\(y_0 = y(0)\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003efor \u003ca href=\"/posts/kbhautonomous_odes/\"\u003eautonomous ODEs\u003c/a\u003e for which \\(ry(t) = f(y)\\), we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{y}{x} = ry(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich means:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{1}{y(x)} \\dd{y} = r\\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\ln \\qty| y(x) | = rx +C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand hence:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(x) = K e^{rx}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eplugging in \\(x=0\\), yields \\(y(0) = Ke^{0} = K\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhseperable_diffequ/","tags":null,"title":"seperable diffequ"},{"categories":null,"contents":"server-clients is a command in Emacs LISP which is used to get the clients of the current running emacsclient server\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhserver_clients/\"\u003eserver-clients\u003c/a\u003e is a command in Emacs LISP which is used to get the clients of the current running emacsclient server\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhserver_clients/","tags":null,"title":"server-clients"},{"categories":null,"contents":"A set is an unordered collection of objects, which maybe infinitely long. It is generated with \\(\\{, \\}\\). For instance, most numbers are sets.\nconstituents a collection of objects requirements repetition does not matter order does not matter additional information ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhset/\"\u003eset\u003c/a\u003e is an unordered collection of \u003ca href=\"/posts/kbhobjects/\"\u003eobject\u003c/a\u003es, which maybe infinitely long. It is generated with \\(\\{, \\}\\). For instance, most \u003ca href=\"/posts/kbhnumber/\"\u003enumber\u003c/a\u003es are sets.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ea collection of \u003ca href=\"/posts/kbhobjects/\"\u003eobject\u003c/a\u003es\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003erepetition does not matter\u003c/li\u003e\n\u003cli\u003eorder does not matter\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhset/","tags":null,"title":"set"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsets/","tags":null,"title":"sets"},{"categories":null,"contents":"DOI: 10.3389/fcomp.2021.624659\nOne-Liner Multi-feature late fusion of NLP results (by normalizing text and n-gram processing) with OpenSMILE embedding results.\nNovelty NLP transcript normalization (see methods) and OpenSMILE; otherwise similar to Martinc 2021. Same gist but different data-prep.\nNotable Methods N-gram processed the input features Used WordNet to replace words with roots Key Figs New Concepts OpenSMILE ","html":"\u003cp\u003eDOI: 10.3389/fcomp.2021.624659\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eMulti-feature \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e of NLP results (by normalizing text and n-gram processing) with \u003ca href=\"/posts/kbhopensmile/\"\u003eOpenSMILE\u003c/a\u003e embedding results.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003eNLP transcript normalization (see methods) and \u003ca href=\"/posts/kbhopensmile/\"\u003eOpenSMILE\u003c/a\u003e; otherwise similar to \u003ca href=\"/posts/kbhmartinc_2021/\"\u003eMartinc 2021\u003c/a\u003e. Same gist but different data-prep.\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eN-gram processed the input features\u003c/li\u003e\n\u003cli\u003eUsed WordNet to replace words with roots\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_22-28-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhopensmile/\"\u003eOpenSMILE\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhshah_2021/","tags":["ntj"],"title":"Shah 2021"},{"categories":null,"contents":"Short selling involves betting against the stock.\nProcess of Short Selling the trader borrows a number of shares from a third party the trader sells them immediately for cash when the security dips, the debt is repaid by repurchasing the same amount of shares of the borrowed security at the lower price traders nets the profit from the negative price differential If the person shorting\nshort squeeze \u0026ldquo;what happened to GameStock\u0026rdquo;\nA short squeeze is a situation in which a bunch of people try to drive the price of the up by buying enough shares such that the short sellers are forced to sell high\u0026mdash;driving up the price.\n","html":"\u003cp\u003eShort selling involves betting against the stock.\u003c/p\u003e\n\u003ch2 id=\"process-of-short-selling\"\u003eProcess of Short Selling\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ethe trader borrows a number of shares from a third party\u003c/li\u003e\n\u003cli\u003ethe trader sells them immediately for cash\u003c/li\u003e\n\u003cli\u003ewhen the security dips, the debt is repaid by repurchasing the same amount of shares of the borrowed security at the lower price\u003c/li\u003e\n\u003cli\u003etraders nets the profit from the negative price differential\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eIf the person \u003ca href=\"/posts/kbhshort_selling/\"\u003eshorting\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"short-squeeze\"\u003eshort squeeze\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;what happened to GameStock\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eA \u003ca href=\"#short-squeeze\"\u003eshort squeeze\u003c/a\u003e is a situation in which a bunch of people try to drive the \u003ca href=\"/posts/kbhprice/\"\u003eprice\u003c/a\u003e of the up by buying enough shares such that the \u003ca href=\"/posts/kbhshort_selling/\"\u003eshort sellers\u003c/a\u003e are forced to sell high\u0026mdash;driving up the price.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhshort_selling/","tags":null,"title":"short selling"},{"categories":null,"contents":"sigmoid function is used to squash your data between \\(0\\) and \\(1\\). Sigmoid is symmetric. It could take any number and squash it to look like a probability between 0 and 1.\n\\begin{equation} \\sigma(z) = \\frac{1}{1+ e^{-z}} \\end{equation}\nSay you have one discrete variable \\(X\\), and one continuous variable \\(Y\\), and you desire to express \\(p(x|y)\\).\nThe simplest way to do this, of course, is to say something like:\n\\begin{equation} P(x^{j} \\mid y) = \\begin{cases} P(x^{j} \\mid y) = 0, y \u0026lt; \\theta \\\\ P(x^{j} \\mid y) = 1, y \u0026gt; \\theta \\end{cases} \\end{equation}\nwhereby if \\(y\\) is above or below a value, \\(x^{j}|y\\) behaves differently. But we often don\u0026rsquo;t want a card cap.\nTo soften this, we can use a sigmoid model:\n\\begin{equation} P(x^{1} \\mid y) = \\frac{1}{1 + \\exp \\qty(-2 \\frac{y-\\theta_{1}}{\\theta_{2}})} \\end{equation}\nwhereby, \\(\\theta_{1}\\) is where the threshold of activation is, and \\(\\theta_{2}\\) is how soft you want the spread to be.\nThe derivative of this function is also dead simple:\n\\begin{equation} \\dv{\\sigma(z)}{z} = \\sigma(z) (1-\\sigma(z)) \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e function is used to squash your data between \\(0\\) and \\(1\\). Sigmoid is symmetric. It could take any number and squash it to look like a probability between 0 and 1.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sigma(z) = \\frac{1}{1+ e^{-z}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSay you have one discrete variable \\(X\\), and one continuous variable \\(Y\\), and you desire to express \\(p(x|y)\\).\u003c/p\u003e\n\u003cp\u003eThe simplest way to do this, of course, is to say something like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(x^{j} \\mid y) = \\begin{cases}\nP(x^{j} \\mid y) = 0, y \u0026lt; \\theta \\\\\nP(x^{j} \\mid y) = 1, y \u0026gt; \\theta\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby if \\(y\\) is above or below a value, \\(x^{j}|y\\) behaves differently. But we often don\u0026rsquo;t want a card cap.\u003c/p\u003e\n\u003cp\u003eTo soften this, we can use a \u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e model:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(x^{1} \\mid y) = \\frac{1}{1 + \\exp \\qty(-2 \\frac{y-\\theta_{1}}{\\theta_{2}})}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby, \\(\\theta_{1}\\) is where the threshold of activation is, and \\(\\theta_{2}\\) is how soft you want the spread to be.\u003c/p\u003e\n\u003cp\u003eThe derivative of this function is also dead simple:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{\\sigma(z)}{z} = \\sigma(z) (1-\\sigma(z))\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsigmoid/","tags":null,"title":"sigmoid"},{"categories":null,"contents":"Here is the most simple Differential Equation one could imagine:\n\\begin{equation} \\dv{x}{t} = f(t,x) \\end{equation}\nOr, perhaps, we have a second order differential equation which is the same thing but in the second degree:\n\\begin{equation} \\dv[2]{x}{t} = f\\qty(t,x,\\dv{x}{t}) \\end{equation}\nThen in which case, we have that the first most simple type of differential equation to be as follows:\n\\begin{equation} \\dv{x}{t} = x(t) \\end{equation}\nIf we can solve this, we can generalize this to most of other First-Order Differential Equations.\nwhere, the function \\(f(t,x)=x(t)\\).\n\\begin{align} \u0026amp; \\dv{x}{t} = x(t) \\\\ \\Rightarrow\\ \u0026amp; \\frac{1}{x(t)}\\dd{x} = \\dd{t} \\end{align}\nAt this point, you may ask yourself, why not construct it such that we have \\(\\dd{x} = x(t)\\dd{t}\\)? Well, its because our \\(x\\) is a variable in \\(t\\), so if we constructed it that way we\u0026rsquo;d have to integrate a function \\(\\dd{t}\\) with usub and the reverse chain rule, etc. etc. If we are instead integrating it on \\(\\dd{x}\\), it becomes much easier because our variable of interest no longer considers the \\(t\\).\nContinuing on, then:\n\\begin{align} \u0026amp;\\frac{1}{x(t)}\\dd{x} = \\dd{t} \\\\ \\Rightarrow\\ \u0026amp;\\int \\frac{1}{x(t)}\\dd{x} = \\int \\dd{t} \\\\ \\Rightarrow\\ \u0026amp; \\ln (x(t)) = t \\\\ \\Rightarrow\\ \u0026amp; x(t) = e^{t} \\end{align}\nAwesome. It should\u0026rsquo;t be hard also to see that, generally:\n\\begin{equation} x(t) = e^{ct} \\end{equation}\nis the solution to all equations \\(\\dv{x}{t} = cx\\).\nTurns out (not proven in the book), this holds for complex valued equations as well. So, we have some:\n\\begin{align} \u0026amp;x(t) = e^{it} \\\\ \\Rightarrow\\ \u0026amp; \\dv{x}{t} = ix \\end{align}\nOf course, from elementary calculus we also learned the fact that \\(e^{x}\\) can be represented as a power series; so check that out for now we connect it.\nThis equation leads us to solve:\n\\begin{equation} \\dv{x}{t} + ax = b(t) \\end{equation}\nIn order to do this, we neeed to find a replacement of the property that:\n\\begin{equation} \\dv t\\qty(e^{at}x) = e^{at}\\qty(\\dv{x}{t} +at) \\end{equation}\nA more general result of the above form is\n\\begin{equation} \\dv{x}{t} + a(t)x = b(t) \\end{equation}\nThis is fine, but now we need to leverage to chain rule to have \\(\\dv t a(t)\\) would be simply changing the above result to \\(a\u0026rsquo;(t)\\).\nBut anyways through this we will end up with the same solution we get from solving differential equations.\n","html":"\u003cp\u003eHere is the most simple \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equation\u003c/a\u003e one could imagine:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{x}{t} = f(t,x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOr, perhaps, we have a second order \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003edifferential equation\u003c/a\u003e which is the same thing but in the second degree:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv[2]{x}{t} = f\\qty(t,x,\\dv{x}{t})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThen in which case, we have that the first most simple type of \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003edifferential equation\u003c/a\u003e to be as follows:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{x}{t} = x(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf we can solve this, we can generalize this to most of other \u003ca href=\"/posts/kbhlinear_non_seperable_equation/#solving-differential-equations\"\u003eFirst-Order Differential Equations\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003ewhere, the function \\(f(t,x)=x(t)\\).\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; \\dv{x}{t} = x(t) \\\\\n\\Rightarrow\\ \u0026amp; \\frac{1}{x(t)}\\dd{x} = \\dd{t}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAt this point, you may ask yourself, why not construct it such that we have \\(\\dd{x} = x(t)\\dd{t}\\)? Well, its because our \\(x\\) is a variable in \\(t\\), so if we constructed it that way we\u0026rsquo;d have to integrate a function \\(\\dd{t}\\) with usub and the reverse chain rule, etc. etc. If we are instead integrating it on \\(\\dd{x}\\), it becomes much easier because our variable of interest no longer considers the \\(t\\).\u003c/p\u003e\n\u003cp\u003eContinuing on, then:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\frac{1}{x(t)}\\dd{x} = \\dd{t} \\\\\n\\Rightarrow\\ \u0026amp;\\int \\frac{1}{x(t)}\\dd{x} = \\int \\dd{t} \\\\\n\\Rightarrow\\ \u0026amp; \\ln (x(t)) = t \\\\\n\\Rightarrow\\ \u0026amp; x(t) = e^{t}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAwesome. It should\u0026rsquo;t be hard also to see that, generally:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = e^{ct}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eis the solution to all equations \\(\\dv{x}{t} = cx\\).\u003c/p\u003e\n\u003cp\u003eTurns out (not proven in the book), this holds for complex valued equations as well. So, we have some:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;x(t) = e^{it} \\\\\n\\Rightarrow\\ \u0026amp; \\dv{x}{t} = ix\n\\end{align}\u003c/p\u003e\n\u003cp\u003eOf course, from elementary calculus we also learned the fact that \\(e^{x}\\) can be represented as a \u003ca href=\"/posts/kbhpower_series/\"\u003epower series\u003c/a\u003e; so check that out for now we connect it.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eThis equation leads us to solve:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{x}{t} + ax = b(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn order to do this, we neeed to find a replacement of the property that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv t\\qty(e^{at}x) = e^{at}\\qty(\\dv{x}{t} +at)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eA more general result of the above form is\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{x}{t} + a(t)x = b(t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis is fine, but now we need to leverage to chain rule to have \\(\\dv t a(t)\\) would be simply changing the above result to \\(a\u0026rsquo;(t)\\).\u003c/p\u003e\n\u003cp\u003eBut anyways through this we will end up with the same solution we get from \u003ca href=\"/posts/kbhlinear_non_seperable_equation/#solving-differential-equations\"\u003esolving differential equations\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsimple_differential_equations/","tags":null,"title":"Simple Differential Equations"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsimple_game/","tags":null,"title":"simple game"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsingle_party_control/","tags":null,"title":"single party control"},{"categories":null,"contents":"Singular value decomposition is a factorization of a matrix, which is a generalization of the eigendecomposition of normal matricies (i.e. where \\(A = V^{-1} D V\\) when \\(A\\) is diagonalizable, i.e. by the spectral theorem possible when matricies are normal).\nDefinitions Singular value decomposition Every \\(m \\times n\\) matrix has a factorization of the form:\n\\begin{equation} M = U D^{\\frac{1}{2}} V^{*} \\end{equation}\nwhere, \\(U\\) is an unitary matrix, \\(D^{\\frac{1}{2}}\\) a diagonalish (i.e. rectangular diagonal) matrix with non-negative numbers on its diagonal called singular values, which are the positive square roots of eigenvalues of \\(M^{* }M\\) \u0026mdash; meaning the diagonal of \\(D^{\\frac{1}{2}}\\) is non-negative (\\(\\geq 0\\)). Finally, \\(V\\) is formed columns of orthonormal bases of eigenvectors of \\(M^{*}M\\).\nSVD is not technically unique, but we like to force a specific (convenient, see proof for why) ordering: where \\(D^{\\frac{1}{2}}\\) (and the corresponding values in \\(V^{*}\\)) is sorted such that the zero values are to the right.\nDoing It Doing SVD is not actually super duper hard, but it takes some thinking on why it works, which we shall do below.\nRecall that \\(V^{* }\\) is the conjugate transpose of the orthonormal eigenvectors of \\(M^{*} M\\). Then, we construct the square roots of the corresponding eigenvalues and arrange them into \\(D^{\\frac{1}{2}}\\).\nTangent:\nWhy is it we can take square roots of these values (i.e. the eigenvalues are guaranteed positive or zero?) Recall the definition of adjoint:\n\\begin{equation} \\langle Tv, w \\rangle = \\langle v, T^{*}w \\rangle \\end{equation}\nApplying it here, we have\n\\begin{equation} \\langle M^{*}M v, v \\rangle = \\langle M v, M v \\rangle \\end{equation}\nAnd recall that, by definition of inner product, \\(\\langle Mv, Mv \\rangle \\geq 0\\), and so \\(\\|Mv\\|^{2} \\geq 0\\) and so \\(\\|Mv\\| \\geq 0\\) so \\(\\| \\lambda v \\| \\geq 0\\).\nAnd so you can take the square roots of those singular values (i.e. square roots of eigenvalues of \\(M^{*}M\\)).\nHow do we get \\(U\\)? Well recall:\n\\begin{equation} M = U D^{\\frac{1}{2}} V^{*} \\end{equation}\nAnd \\(V\\) is an operator lined with orthornomal eigenbases so it is unitary and so \\(V = (V^{*})^{-1}\\).\nAnd therefore, we apply \\(V\\) on both sides:\n\\begin{equation} MV = UD^{\\frac{1}{2}} \\end{equation}\nAs \\(D\\) is diagonal, and we know the left side, we can then easily recover \\(U\\) by staring at it (and norming the vectors).\nMotivation and Proof Beginning Motivation We have a matrix \\(M\\) of shape \\(m \\times n\\), it sucks: it may not be normal, it may not even be an operator.\nSo consider now:\n\\begin{equation} M^{*} M \\end{equation}\nyou will note that this is now an operator (\\((n \\times m)(m \\times n) = n \\times n\\))!! Not only that, \\(M^{*}M\\) is self-adjoint (\\((M^{*}M)^{*} = M^{*}(M^{*})^{*} = M^{*}M\\)). Of course self-adjoint matricies are normal, which is nice, so spectral theorem applies here (even the real version because self-adjoint!)\nEigendecomposition of \\(M^{*}M\\) So, by the spectral theorem, there are a basis of orthonormal eigenvectors \\(v_1, \\dots v_{n}\\) of \\(M^{*}M\\) such that:\nGiven:\n\\begin{equation} V = \\mqty(v_1 \u0026amp; \\dots \u0026amp; v_{n}) \\end{equation}\nwe have\n\\begin{equation} M^{*}M = V D_0 V^{-1} \\end{equation}\ni.e. this is the eigendecomposition (\u0026ldquo;similar to diagonal\u0026rdquo;) result we had from before, where \\(D_0\\) is a Diagonal Matrix of eigenvalues on the diagonal.\nSwapping the direction of conjugation, to expose the diagonal matrix by itself, we have:\n\\begin{equation} D_0 = V^{-1} M^{*} M V \\end{equation}\nYou will NOTICE! The spectral theorem gives us that \\(v_1, \u0026hellip; v_{n}\\) is not only a basis of eigenvectors, but an ORTHONORMAL basis of eigenvectors. So \\(V\\) is an operator with orthogonal columns. And so, because of this result, we have that: \\(V^{*} = V^{-1}\\).\nSubstituting this in, we have:\n\\begin{equation} D_0 = V^{*} M^{*} M V \\end{equation}\nAside #1: zero-eigenvalue eigenvector ordering To make this better, we can order \\(v_1, \\dots v_{n}\\) such that eigenvectors vectors corresponding to \\(\\lambda = 0\\) comes last.\nAnd so we make a \\(V\\):\n\\begin{equation} V = \\mqty(v_1 \u0026amp;\\dots \u0026amp;v_{n-p} \u0026amp; v_{n-p+1} \u0026amp;\\dots \u0026amp;v_{n}) \\end{equation}\nSo we have two sub-matricies: an matrix \\(V_1\\) of shape \\((n, n-p)\\) which is filled by eigenvectors corresponding to eigenvalues not \\(=0\\), and the other matrix \\(V_2\\) of shape \\((n,p)\\) which is made of eigenvectors corresponding to zero eigenvalues.\nThat is:\n\\begin{equation} \\begin{cases} V_1 = \\mqty(v_1 \u0026amp; \\dots \u0026amp; v_{n-p}) \\\\ V_1 = \\mqty(v_{n-p+1} \u0026amp; \\dots \u0026amp; v_{n}) \\\\ \\end{cases} \\end{equation}\nand\n\\begin{equation} V = \\mqty(V_1 \u0026amp; V_2) \\end{equation}\nwhere, \\(v_1, \u0026hellip;, v_{n-p}\\) are orthonormal eigenvectors corresponding to non-zero eigenvalues, and \\(v_{n-p+1}, \u0026hellip;, v_{n}\\) are that corresponding to zero eigenvalue.\nFurthermore, this ordering of the eigenvectors can help us better clarify what \\(D_0\\) is:\n\\begin{equation} D_0 = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0) \\end{equation}\nWhere, \\(D\\) is a Diagonal Matrix with a strictly positive diagonal as the non-diagonals are zero by definition, the lower-right quadrant is \\(0\\) because the sub-part of \\(V_2\\) are eigenvectors corresponding to the zero eigenvalue.\nApplying \\(V_1, V_2\\) breakup from aside above Ok, recall where we were:\n\\begin{equation} D_0 = V^{*} M^{*} M V \\end{equation}\nApplying the substitutions from above:\n\\begin{equation} \\mqty(V_1^{*} \\\\ V_2^{*}) M^{*} M \\mqty(V_1\\ V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0) \\end{equation}\nNow, recall how matricies multiply:\n\\begin{align} \u0026amp;\\mqty(V_1^{*} \\\\ V_2^{*}) M^{*} M \\mqty(V_1\\ V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0)\\\\ \\Rightarrow\\ \u0026amp;\\mqty(V_1^{*} \\\\ V_2^{*}) \\mqty(M^{*} M V_1\\ M^{*} M V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0) \\\\ \\Rightarrow\\ \u0026amp; \\mqty(V_1^{*} M^{*} M V_1 \u0026amp; V_1^{*} M^{*} M V_2 \\\\ V_2^{*}M^{*} M V_1 \u0026amp; V_2^{*} M^{*} M V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0) \\end{align}\nAside #2: \\(A^{*} A = 0 \\implies A=0\\) Take the construction:\n\\begin{equation} A^{*} A = 0 \\end{equation}\nwe desire that \\(A = 0\\).\nRecall the definition of \\(A^{*}\\):\n\\begin{equation} \\langle Av, w \\rangle = \\langle v, A^{*}w \\rangle \\end{equation}\nfor all \\(v,w\\).\nNow, consider:\n\\begin{equation} \\langle A^{*} Av, w \\rangle = \\langle A^{*} (Av), w \\rangle = \\langle Av, (A^{*})^{*}w \\rangle = \\langle Av, Aw \\rangle \\end{equation}\nApplying the above, finally, consider:\n\\begin{equation} \\|Av\\|^{2} = \\langle Av, Av \\rangle = \\langle A^{*}A v, v \\rangle \\end{equation}\nRecall that \\(A^{*}A = 0\\), so:\n\\begin{equation} \\|Av\\|^{2} = \\langle A^{*}A v, v \\rangle = \\langle 0v,v \\rangle = 0 \\end{equation}\nSo, the norm of \\(Av = 0\\) for all \\(v \\in V\\), which means \\(A\\) produces only \\(0\\) vectors, which means \\(A=0\\), as desired.\nBreaking \\(V_{j}^{*} M^{*}M V_{j}\\) up Recall where we ended up at:\n\\begin{equation} \\mqty(V_1^{*} M^{*} M V_1 \u0026amp; V_1^{*} M^{*} M V_2 \\\\ V_2^{*}M^{*} M V_1 \u0026amp; V_2^{*} M^{*} M V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0) \\end{equation}\nConsider its diagonals:\n\\begin{equation} \\begin{cases} V_1^{*} M^{*} M V_1 = D \\\\ V_2^{*} M^{*} M V_2 = 0 \\end{cases} \\end{equation}\nNow, for the second expression, we have: \\(V_2^{*}M^{*}MV_{2} = (M V_2)^{*} (M V_2) = 0\\). So, from the result above (that \\(A^{*}A = 0 \\implies A=0\\)), we have that \\(MV_{2} = 0\\).\nAside #3: \\(V_1 V_1^{*} + V_2V_2^{*} = I\\) Consider:\n\\begin{equation} V_1 V_1^{*} \\end{equation}\nThe matrix \\(V_1\\) has shape \\((n, n-p)\\), and this makes \\(V_1^{* }\\) have shape \\((n-p, n)\\). You will, therefore, note that \\(V_{1}^{* }\\) is a map from a vector space of dimension \\(n\\) to that in a dimension \\(n-p\\). This map, then, is not injective when \\(p\\neq 0\\). Therefore, the overall operator \\(V_1 V_1^{* }\\) is also not going to be injective because non-zero is going to be sent by \\(V_1^{* }\\) to \\(0\\), then sent still by \\(V_1\\) to \\(0\\). This also means that \\(V_1 V_1^{*}\\) is not invertable.\nYet, we are trying to show \\(V_1 V_1^{*} + V_2 V_2^{*} = I\\), which is the sum of these two noninvertible map, is \\(I\\): the grandaddy of all invertible maps. What gives?\nRecall that:\n\\begin{equation} \\begin{cases} \\mqty(V_1 \u0026amp; V_2) = V \\\\ V V^{*} = I \\end{cases} \\end{equation}\nThe first result is by definition, the second because \\(V\\) is an orthonormal operator so it is unitary.\nLet us begin with:\n\\begin{align} I \u0026amp;= V V^{*} \\\\ \u0026amp;= \\mqty(V_1 \u0026amp; V_2) \\mqty(V_1 \u0026amp; V_2)^{*} \\\\ \u0026amp;= \\mqty(V_1 \u0026amp; V_2) \\mqty(V_1^{*} \\\\ V_2^{*}) \\\\ \u0026amp;= V_1V_1^{*} + V_2 V_2^{*} \\end{align}\nAnd the last equation simply comes from how matrices multiply: row by column. And so, weirdly, we can confirm that adding non-full rank matricies and end up to be the identity. So, again:\n\\begin{equation} V_1 V_1^{*} + V_2V_2^{*} = I \\end{equation}\nConstructing \\(U_1\\) With the result above, we are finally close to doing what we want to do. Recall our last set of conclusions:\none, that:\n\\begin{equation} \\begin{cases} V_1^{*} M^{*} M V_1 = D \\\\ V_2^{*} M^{*} M V_2 = 0 \\end{cases} \\end{equation}\nand specifically, that \\(MV_{2} = 0\\).\nand two, that:\n\\begin{align} \u0026amp;V_1 V_1^{* } + V_2V_2^{* } = I \\\\ \\Rightarrow\\ \u0026amp; V_1 V_1^{* } = I - V_2V_2^{* } \\end{align}\nLet\u0026rsquo;s now turn our attention to \\(D\\) above. It has all non-zero diagonals, because we cropped out the zero already (see above during the definition of \\(D\\) vis a vi \\(D_0\\)). This means it is invertible because operator is only invertible if diagonal of its upper-triangular matrix is nonzero. For a diagonal matrix, this is particularly easy; let us construct:\n\\begin{equation} D = D^{\\frac{1}{2}} D^{\\frac{1}{2}} \\end{equation}\nwhere, \\(D^{\\frac{1}{2}}\\) is just the same diagonal matrix as \\(D\\) except we take the square root of everything in the diagonal. The above could be shown then to be true by calculation (\\(\\sqrt{a}\\sqrt{a} = a\\) on every element in the diagonal).\nLet us also make:\n\\begin{equation} I = D^{-\\frac{1}{2}} D^{\\frac{1}{2}} \\end{equation}\nwhere, \\(D^{-\\frac{1}{2}}\\) is \\(\\frac{1}{\\sqrt{a}}\\) for event element \\(a\\) in the diagonal. Again, the above could be shown to be true by calculation by \\(\\sqrt{a} \\frac{1}{\\sqrt{a}} = 1\\).\nGiven the diagonal of \\(D\\) contains the eigenvalues of \\(M^{*}M\\), by calculation \\(D^{\\frac{1}{2}}\\) contains the square roots of these eigenvalues, which means that it should contain on its diagonal the singular values of \\(M\\), which is rather nice (because we have corollaries below that show concordance between singular values of \\(M\\) and its eigenvalues, see below).\nConsider, finally, the matrix \\(M\\):\n\\begin{align} M \u0026amp;= M - 0 \\\\ \u0026amp;= M - 0 V_2^{* } \\\\ \u0026amp;= M - (M V_2) V_2^{* } \\\\ \u0026amp;= M (I - V_2 V_2^{* }) \\\\ \u0026amp;= M V_1V_1^{*} \\\\ \u0026amp;= M V_1 I V_1^{*} \\\\ \u0026amp;= M V_1 (D^{-\\frac{1}{2}} D^{\\frac{1}{2}}) V_1^{*} \\\\ \u0026amp;= (M V_1 D^{-\\frac{1}{2}}) D^{\\frac{1}{2}} V_1^{*} \\end{align}\nWe now define a matrix \\(U_1\\):\n\\begin{equation} U_1 = M V_1 D^{-\\frac{1}{2}} \\end{equation}\nWe now have:\n\\begin{equation} M = U_1 D^{\\frac{1}{2}} V_1^{*} \\end{equation}\nWere \\(U_1\\) is a matrix of shape \\((m \\times n)(n \\times n-p)(n-p \\times n-p) = m \\times n-p\\), \\(D^{\\frac{1}{2}}\\) is a diagonal matrix of shape \\(n-p \\times n-p\\) with singular values on the diagonal, and \\(V_1^{*}\\) is a matrix with orthonormal rows of shape \\(n-p \\times n\\).\nThis is a compact svd. We are sandwitching a diagonal matrix of singular values between two rectangular matricies to recover \\(M\\). Ideally, we want the left and right matricies too to have nice properties (like, say, be an operator or have unitarity). So we work harder.\nAside #4: \\(U_1\\) is orthonormal We can\u0026rsquo;t actually claim \\(U_1\\) is unitary because its not an operator. However, we like to show its columns are orthonormal so far so we can extend it into a fully, actually, unitary matrix.\nOne sign that a matrix is orthonormal is if \\(T^{*}T = I\\). Because of the way that matricies multiply, this holds IFF each column yields a \\(1\\) when its own conjugate transpose is applied, and \\(0\\) otherwise. This is also the definition of orthonormality.\nTherefore, we desire \\(U_{1}^{*} U_1 = I\\). We hence consider:\n\\begin{equation} U_1^{*} U_1 \\end{equation}\nWe have by substitution of \\(U_1 = M V_1 D^{-\\frac{1}{2}}\\):\n\\begin{equation} (M V_1 D^{-\\frac{1}{2}})^{*}(M V_1 D^{-\\frac{1}{2}}) \\end{equation}\nGiven the property that \\((AB)^{*} = B^{*}A^{*}\\), we have that:\n\\begin{equation} (M V_1 D^{-\\frac{1}{2}})^{*}(M V_1 D^{-\\frac{1}{2}}) = {D^{-\\frac{1}{2}}}^{*} V_1^{*} M^{*}M V_1 D^{-\\frac{1}{2}} \\end{equation}\nRecall now that, from way before, we have:\n\\begin{equation} V_1^{*} M^{*} M V_1 = D \\end{equation}\nSubstituting that in:\n\\begin{align} {D^{-\\frac{1}{2}}}^{*} V_1^{*} M^{*}M V_1 D^{-\\frac{1}{2}} \u0026amp;= {D^{-\\frac{1}{2}}}^{*} (V_1^{*} M^{*}M V_1) D^{-\\frac{1}{2}} \\\\ \u0026amp;= {D^{-\\frac{1}{2}}}^{*} D D^{-\\frac{1}{2}} \\end{align}\nRecall now that the multiplication of diagonal matricies are commutative (by calculation), and that diagonal real matricies are self-adjoint (try conjugate-transposing a real diagonal matrix). We know that \\(D^{-\\frac{1}{2}}\\) is real (because its filled with the square roots of the eigenvalues of \\(M^{*}M\\), which is self-adjoint, and eigenvalues of self-adjoint matricies are real) and is by definition diagonal. So we have that \\(D^{-\\frac{1}{2}}\\) is self-adjoint.\nTaking those facts in mind, we can now rewrite this expression:\n\\begin{align} {D^{-\\frac{1}{2}}}^{*} V_1^{*} M^{*}M V_1 D^{-\\frac{1}{2}} \u0026amp;= {D^{-\\frac{1}{2}}}^{*} D D^{-\\frac{1}{2}} \\\\ \u0026amp;= D^{-\\frac{1}{2}} D D^{-\\frac{1}{2}} \\\\ \u0026amp;= D^{-\\frac{1}{2}}D^{-\\frac{1}{2}} D \\\\ \u0026amp;= D^{-1} D \\\\ \u0026amp;= I \\end{align}\nTherefore, \\(U_1^{*} U_1 = I\\) as desired, so the columns of \\(U_1\\) is orthonormal.\nSVD, fully Recall that, so far, we have:\n\\begin{equation} M = U_1 D^{\\frac{1}{2}} V_1^{*} \\end{equation}\nwhere\n\\begin{equation} U_1 = M V_1 D^{-\\frac{1}{2}} \\end{equation}\nSo far, \\(U_1\\) and \\(V_1^{*}\\) are both disappointingly not operators. However, we know that \\(U_1\\) and \\(V_1\\) are both orthonormal (the former per aside #4 above, the latter by the spectral theorem and construction above). So wouldn\u0026rsquo;t it be doubleplusgood for both of them to be unitary operators?\nTo make this happen, we need to change the shapes of things a little without changing how the matricies behave. That is: we want \\(U\\) and \\(V^{* }\\) to both be operators, and yet still have \\(U D^{\\frac{1}{2}} V^{*} = M\\).\nPadding out \\(D\\) and \\(V\\) There are immediate and direct ways of padding out \\(D^{\\frac{1}{2}}\\) and \\(V_{1}^{*}\\): let us replace \\(V_1 \\implies V\\), and just shove enough zeros into \\(D\\) such that the dimensions work out (changing it from shape \\(n-p \\times n-p\\) to \\(m \\times n\\), but do this by just adding enough zeros on the edges until it works).\nSo first, \\(D^{\\frac{1}{2}}\\) becomes:\n\\begin{equation} D^{\\frac{1}{2}}_{new} = \\mqty(D^{\\frac{1}{2}}_{orig} \u0026amp; 0 \u0026amp;\\dots \\\\ 0 \u0026amp; 0 \u0026amp;\\dots \\\\ 0\u0026amp;0\u0026amp;\\dots) \\end{equation}\n(the number of zeros on the edge vary based on the proportions of \\(n, p, m\\)).\nWhy would this always be padding? i.e. why is \\(n-p \\leq m\\)? Here\u0026rsquo;s a hand-wavy proof that the reader can choose to fill in the gaps of: consider the fact that \\(M\\)\u0026rsquo;s shape is \\(m \\times n\\). Specifically, this means that \\(M: V \\to W\\) where \\(\\dim V = n\\) and \\(\\dim W = m\\). Say for the sake of argument \\(n\u0026gt; m\\) (otherwise naturally \\(n-p \\leq m\\) because \\(n\\leq m\\)). Consider \\(null\\ M\\); given it is a map from a larger space to a smaller space, there\u0026rsquo;s going to be a non-trivial null space. This non-trivial null space is going to be as large or larger than \\(m-n\\); and the null space of \\(M^{*}M\\) will be at least as large as \\(m-n\\) as well because everything is sent through \\(M\\) first. And then applying rank nullity can show that \\(m \\geq \\dim\\ range\\ M^{ *}M\\). Therefore, the number of non-zero eigenvalues of \\(M^{ *}M\\), which also corresponds to the number of non-zero columns of \\(D\\), which also is \\(n-p\\), must be smaller than or equal to \\(m\\) because otherwise the diagonal representation would have too many linearly independent columns (i.e. more lin. indp. columns that the rank which is impossible).\nNow, we have\n\\begin{equation} V = \\mqty(V_1 \u0026amp; V_2) \\end{equation}\nwhere \\(V_1\\) is a matrix whose columns are the non-zero eigenvalue correlated eigenvectors, and the columns of \\(V_1\\) the zero-eigenvalue related ones.\nNote, now that:\n\\(D^{\\frac{1}{2}}_{new} V^{* }\\) is an \\(m \\times n\\) matrix that behaves almost exactly like \\(D^{\\frac{1}{2}}_{orig} V_1^{*}\\), a \\(n-p \\times n\\) matrix. The last \\(m-(n-p)\\) (as we established before, \\(m \\geq n-p\\)) dimensions of the new, padded matrix\u0026rsquo;s output will simply be \\(0\\): because recall that \\(DT\\) for some diagonal matrix \\(D\\) scales the rows of \\(T\\): and the first \\(n-p\\) rows (corresponding to the columns of \\(V_1\\), because recall we are applying \\(V\\) not \\(V^{ *}\\) to \\(D\\)) will be scaled normally, and the last \\(m-(n-p)\\) rows will be scaled by \\(0\\) as they are a part of the padded zero-diagonal.\nPadding out \\(U\\) With \\(D\\) and \\(V\\) padded, its time to deal with \\(U\\). Fortunately, recall that the last bit of the output of \\(DV\\) will just be \\(0\\): so whatever we stick in terms of columns of \\(V\\) for those slots will never actually be added to the final output. In a sense, they don\u0026rsquo;t really matter.\nThe first \\(n-p\\) of \\(U\\) (i.e. \\(U_{1}\\)) we already have a well-defined answer: recall from before \\(U_1 = M D^{-\\frac{1}{2}} V_{1}^{*}\\). So the last bit we can just stick on literally whatever to make it work.\nAnd by \u0026ldquo;making it work\u0026rdquo;, we literally just mean extending the columns of \\(U_1\\) until you have \\(m\\) linearly-independent of them, then Gram-Schmidtting to make it all orthonormal. The first \\(n-p\\) columns will not be affected by Gram-Schmidtting, as we have established before \\(U_1\\) is orthonormal.\nAgain, these are basically arbitrary: no one cares because these columns will always be scaled by \\(0\\) as they are part of the \u0026ldquo;padding columns\u0026rdquo; from padding \\(D^{\\frac{1}{2}}\\) out.\nand so, finally Finally, we now have:\n\\begin{equation} M = U D^{\\frac{1}{2}} V^{*} \\end{equation}\nwhere, \\(U\\) is an \\(m \\times m\\) unitary operator, \\(D^{\\frac{1}{2}}\\) is an \\(m \\times n\\) semidiagonal matrix (diagonal \\(n-p \\times n-p\\) part, then \\(0\\) padding all around) filled with singular values of \\(M\\) on the diagonal, and \\(V^{*}\\) is an \\(n \\times n\\) unitary operator filled with orthonormal rows of right singular-vectors (i.e. eigenvectors of \\(M^{ *}M\\)).\nUseful corollaries If \\(\\lambda\\) is an non-negative real eigenvalue of \\(M\\), then \\(\\lambda\\) is sometimes a singular value of \\(M\\) Consider the matrix:\n\\begin{equation} \\mqty(1 \u0026amp; 1 \\\\0 \u0026amp; 0) \\end{equation}\nsingular values: \\(\\sqrt{2},0\\) eigenvalues: \\(1,0\\) However, the statement is the case if \\(M\\) is already diagonalizable, then in which case you can imagine constructing \\(M^{* }M\\) to be vis a vi the eigenbasis of \\(M\\), which means that the resulting diagonal representation of \\(M^{*}M\\) would just be the eigenvalues of \\(M\\) squared as you are multiplying a diagonal matrix by itself.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsingular_value_decomposition/\"\u003eSingular value decomposition\u003c/a\u003e is a factorization of a \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e, which is a generalization of the \u003ca href=\"/posts/kbhnus_math530_similar_to_diagonal/\"\u003eeigendecomposition\u003c/a\u003e of \u003ca href=\"/posts/kbhaxler_7_a/#normal\"\u003enormal\u003c/a\u003e \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e (i.e. where \\(A = V^{-1} D V\\) when \\(A\\) is \u003ca href=\"/posts/kbhdiagonal_matrix/#properties-of-diagonal-matrices\"\u003ediagonalizable\u003c/a\u003e, i.e. by the \u003ca href=\"/posts/kbhaxler_7_a/#complex-spectral-theorem\"\u003espectral theorem\u003c/a\u003e possible when matricies are \u003ca href=\"/posts/kbhaxler_7_a/#normal\"\u003enormal\u003c/a\u003e).\u003c/p\u003e\n\u003ch2 id=\"definitions\"\u003eDefinitions\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003eSingular value decomposition\u003c/strong\u003e Every \\(m \\times n\\) matrix has a factorization of the form:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nM = U D^{\\frac{1}{2}} V^{*}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(U\\) is an \u003ca href=\"/posts/kbhaxler_7_a/#unitary\"\u003eunitary\u003c/a\u003e matrix, \\(D^{\\frac{1}{2}}\\) a \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003ediagonal\u003c/a\u003eish (i.e. rectangular diagonal) matrix with non-negative numbers on its diagonal called \u003cstrong\u003esingular values\u003c/strong\u003e, which are the positive square roots of eigenvalues of \\(M^{* }M\\) \u0026mdash; meaning the diagonal of \\(D^{\\frac{1}{2}}\\) is non-negative (\\(\\geq 0\\)). Finally, \\(V\\) is formed columns of orthonormal bases of eigenvectors of \\(M^{*}M\\).\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhsingular_value_decomposition/\"\u003eSVD\u003c/a\u003e is not technically unique, but we like to force a specific (convenient, see proof for why) ordering: where \\(D^{\\frac{1}{2}}\\) (and the corresponding values in \\(V^{*}\\)) is sorted such that the zero values are to the right.\u003c/p\u003e\n\u003ch2 id=\"doing-it\"\u003eDoing It\u003c/h2\u003e\n\u003cp\u003eDoing SVD is not actually super duper hard, but it takes some thinking on why it works, which we shall do below.\u003c/p\u003e\n\u003cp\u003eRecall that \\(V^{* }\\) is the conjugate transpose of the orthonormal eigenvectors of \\(M^{*} M\\). Then, we construct the square roots of the corresponding eigenvalues and arrange them into \\(D^{\\frac{1}{2}}\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003e\u003cstrong\u003eTangent\u003c/strong\u003e:\u003c/p\u003e\n\u003cp\u003eWhy is it we can take square roots of these values (i.e. the eigenvalues are guaranteed positive or zero?) Recall the definition of adjoint:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle Tv, w \\rangle = \\langle v, T^{*}w \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eApplying it here, we have\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle M^{*}M v, v \\rangle = \\langle M v, M v \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd recall that, by definition of inner product, \\(\\langle Mv, Mv \\rangle \\geq 0\\), and so \\(\\|Mv\\|^{2} \\geq 0\\) and so \\(\\|Mv\\| \\geq 0\\) so \\(\\| \\lambda v \\| \\geq 0\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eAnd so you can take the square roots of those singular values (i.e. square roots of eigenvalues of \\(M^{*}M\\)).\u003c/p\u003e\n\u003cp\u003eHow do we get \\(U\\)? Well recall:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nM = U D^{\\frac{1}{2}} V^{*}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd \\(V\\) is an operator lined with orthornomal eigenbases so it is unitary and so \\(V = (V^{*})^{-1}\\).\u003c/p\u003e\n\u003cp\u003eAnd therefore, we apply \\(V\\) on both sides:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nMV = UD^{\\frac{1}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAs \\(D\\) is diagonal, and we know the left side, we can then easily recover \\(U\\) by staring at it (and norming the vectors).\u003c/p\u003e\n\u003ch2 id=\"motivation-and-proof\"\u003eMotivation and Proof\u003c/h2\u003e\n\u003ch3 id=\"beginning-motivation\"\u003eBeginning Motivation\u003c/h3\u003e\n\u003cp\u003eWe have a matrix \\(M\\) of shape \\(m \\times n\\), it sucks: it may not be \u003ca href=\"/posts/kbhaxler_7_a/#normal\"\u003enormal\u003c/a\u003e, it may not even be an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSo consider now:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nM^{*} M\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou will note that this is now an \u003cem\u003eoperator (\\((n \\times m)(m \\times n) = n \\times n\\))!!\u003c/em\u003e Not only that, \\(M^{*}M\\) is \u003ca href=\"/posts/kbhaxler_7_a/#self-adjoint\"\u003eself-adjoint\u003c/a\u003e (\\((M^{*}M)^{*} = M^{*}(M^{*})^{*} = M^{*}M\\)). Of course \u003ca href=\"/posts/kbhaxler_7_a/#self-adjoint\"\u003eself-adjoint\u003c/a\u003e matricies are \u003ca href=\"/posts/kbhaxler_7_a/#normal\"\u003enormal\u003c/a\u003e, which is nice, so \u003ca href=\"/posts/kbhaxler_7_a/#complex-spectral-theorem\"\u003espectral theorem\u003c/a\u003e applies here (even the real version because \u003ca href=\"/posts/kbhaxler_7_a/#self-adjoint\"\u003eself-adjoint\u003c/a\u003e!)\u003c/p\u003e\n\u003ch3 id=\"eigendecomposition--kbhnus-math530-similar-to-diagonal-dot-md--of-m-m\"\u003e\u003ca href=\"/posts/kbhnus_math530_similar_to_diagonal/\"\u003eEigendecomposition\u003c/a\u003e of \\(M^{*}M\\)\u003c/h3\u003e\n\u003cp\u003eSo, by the \u003ca href=\"/posts/kbhaxler_7_a/#complex-spectral-theorem\"\u003espectral theorem\u003c/a\u003e, there are a basis of orthonormal \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es \\(v_1, \\dots v_{n}\\) of \\(M^{*}M\\) such that:\u003c/p\u003e\n\u003cp\u003eGiven:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV = \\mqty(v_1 \u0026amp; \\dots \u0026amp; v_{n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe have\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nM^{*}M = V D_0 V^{-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ei.e. this is the \u003ca href=\"/posts/kbhnus_math530_similar_to_diagonal/\"\u003eeigendecomposition\u003c/a\u003e (\u0026ldquo;similar to diagonal\u0026rdquo;) result we had from before, where \\(D_0\\) is a \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003eDiagonal Matrix\u003c/a\u003e of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es on the diagonal.\u003c/p\u003e\n\u003cp\u003eSwapping the direction of conjugation, to expose the diagonal matrix by itself, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nD_0 = V^{-1} M^{*} M V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will NOTICE! The \u003ca href=\"/posts/kbhaxler_7_a/#complex-spectral-theorem\"\u003espectral theorem\u003c/a\u003e gives us that \\(v_1, \u0026hellip; v_{n}\\) is not only a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es, but an \u003cstrong\u003eORTHONORMAL\u003c/strong\u003e basis of eigenvectors. So \\(V\\) is an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e with \u003ca href=\"/posts/kbhorthogonal/\"\u003eorthogonal\u003c/a\u003e columns. And so, because of \u003ca href=\"/posts/kbhnus_math530_matrix_adjectives/#an-unitary-operator-is-invertible-and-the-inverse-of-its-matrix-representation-is-its-transpose\"\u003ethis result\u003c/a\u003e, we have that: \\(V^{*} = V^{-1}\\).\u003c/p\u003e\n\u003cp\u003eSubstituting this in, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nD_0 = V^{*} M^{*} M V\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"aside-1-zero-eigenvalue-eigenvector-ordering\"\u003eAside #1: zero-eigenvalue eigenvector ordering\u003c/h3\u003e\n\u003cp\u003eTo make this better, we can order \\(v_1, \\dots v_{n}\\) such that eigenvectors vectors corresponding to \\(\\lambda = 0\\) comes last.\u003c/p\u003e\n\u003cp\u003eAnd so we make a \\(V\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV = \\mqty(v_1 \u0026amp;\\dots \u0026amp;v_{n-p} \u0026amp; v_{n-p+1} \u0026amp;\\dots \u0026amp;v_{n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo we have two sub-matricies: an matrix \\(V_1\\) of shape \\((n, n-p)\\) which is filled by \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es corresponding to \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es not \\(=0\\), and the other matrix \\(V_2\\) of shape \\((n,p)\\) which is made of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es corresponding to zero \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eThat is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nV_1 = \\mqty(v_1 \u0026amp; \\dots \u0026amp; v_{n-p}) \\\\\nV_1 = \\mqty(v_{n-p+1} \u0026amp; \\dots \u0026amp; v_{n}) \\\\\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV = \\mqty(V_1 \u0026amp; V_2)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(v_1, \u0026hellip;, v_{n-p}\\) are orthonormal \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es corresponding to non-zero \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es, and \\(v_{n-p+1}, \u0026hellip;, v_{n}\\) are that corresponding to \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eFurthermore, this ordering of the eigenvectors can help us better clarify what \\(D_0\\) is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nD_0 = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhere, \\(D\\) is a \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003eDiagonal Matrix\u003c/a\u003e with a strictly positive \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003ediagonal\u003c/a\u003e as the non-diagonals are zero by definition, the lower-right quadrant is \\(0\\) because the sub-part of \\(V_2\\) are eigenvectors corresponding to the zero eigenvalue.\u003c/p\u003e\n\u003ch3 id=\"applying-v-1-v-2-breakup-from-aside-above\"\u003eApplying \\(V_1, V_2\\) breakup from aside above\u003c/h3\u003e\n\u003cp\u003eOk, recall where we were:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nD_0 = V^{*} M^{*} M V\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eApplying the substitutions from above:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(V_1^{*} \\\\ V_2^{*}) M^{*} M \\mqty(V_1\\ V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, recall how matricies multiply:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;\\mqty(V_1^{*} \\\\ V_2^{*}) M^{*} M \\mqty(V_1\\ V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0)\\\\\n\\Rightarrow\\ \u0026amp;\\mqty(V_1^{*} \\\\ V_2^{*}) \\mqty(M^{*} M V_1\\ M^{*} M V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0) \\\\\n\\Rightarrow\\ \u0026amp; \\mqty(V_1^{*} M^{*} M V_1 \u0026amp; V_1^{*} M^{*} M V_2 \\\\ V_2^{*}M^{*} M V_1 \u0026amp; V_2^{*} M^{*} M V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0)\n\\end{align}\u003c/p\u003e\n\u003ch3 id=\"aside-2-a-a-0-implies-a-0\"\u003eAside #2: \\(A^{*} A = 0 \\implies A=0\\)\u003c/h3\u003e\n\u003cp\u003eTake the construction:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA^{*} A = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe desire that \\(A = 0\\).\u003c/p\u003e\n\u003cp\u003eRecall the definition of \\(A^{*}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle Av, w \\rangle = \\langle v, A^{*}w \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor all \\(v,w\\).\u003c/p\u003e\n\u003cp\u003eNow, consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle A^{*} Av, w \\rangle = \\langle A^{*} (Av), w \\rangle = \\langle Av, (A^{*})^{*}w \\rangle = \\langle Av, Aw \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eApplying the above, finally, consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\|Av\\|^{2} = \\langle Av, Av \\rangle = \\langle A^{*}A v, v \\rangle\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that \\(A^{*}A = 0\\), so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\|Av\\|^{2} = \\langle A^{*}A v, v \\rangle = \\langle 0v,v \\rangle = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, the norm of \\(Av = 0\\) for all \\(v \\in V\\), which means \\(A\\) produces only \\(0\\) vectors, which means \\(A=0\\), as desired.\u003c/p\u003e\n\u003ch3 id=\"breaking-v-j-m-m-v-j-up\"\u003eBreaking \\(V_{j}^{*} M^{*}M V_{j}\\) up\u003c/h3\u003e\n\u003cp\u003eRecall where we ended up at:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(V_1^{*} M^{*} M V_1 \u0026amp; V_1^{*} M^{*} M V_2 \\\\ V_2^{*}M^{*} M V_1 \u0026amp; V_2^{*} M^{*} M V_2) = \\mqty(D \u0026amp; 0 \\\\ 0 \u0026amp; 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eConsider its diagonals:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nV_1^{*} M^{*} M V_1 = D \\\\\nV_2^{*} M^{*} M V_2 = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, for the second expression, we have: \\(V_2^{*}M^{*}MV_{2} = (M V_2)^{*} (M V_2) = 0\\). So, from the result above (that \\(A^{*}A = 0 \\implies A=0\\)), we have that \\(MV_{2} = 0\\).\u003c/p\u003e\n\u003ch3 id=\"aside-3-v-1-v-1-plus-v-2v-2-i\"\u003eAside #3: \\(V_1 V_1^{*} + V_2V_2^{*} = I\\)\u003c/h3\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV_1 V_1^{*}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe matrix \\(V_1\\) has shape \\((n, n-p)\\), and this makes \\(V_1^{* }\\) have shape \\((n-p, n)\\). You will, therefore, note that \\(V_{1}^{* }\\) is a map from a vector space of dimension \\(n\\) to that in a dimension \\(n-p\\). This map, then, is not \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e when \\(p\\neq 0\\). Therefore, the overall operator \\(V_1 V_1^{* }\\) is also not going to be \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e because non-zero is going to be sent by \\(V_1^{* }\\) to \\(0\\), then sent still by \\(V_1\\) to \\(0\\). This also means that \\(V_1 V_1^{*}\\) is not \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eYet, we are trying to show \\(V_1 V_1^{*} + V_2 V_2^{*} = I\\), which is the sum of these two noninvertible map, is \\(I\\): the grandaddy of all invertible maps. What gives?\u003c/p\u003e\n\u003cp\u003eRecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\mqty(V_1 \u0026amp; V_2) = V \\\\\nV V^{*} = I\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe first result is by definition, the second because \\(V\\) is an orthonormal operator so it is \u003ca href=\"/posts/kbhaxler_7_a/#unitary\"\u003eunitary\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eLet us begin with:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nI \u0026amp;= V V^{*} \\\\\n\u0026amp;= \\mqty(V_1 \u0026amp; V_2) \\mqty(V_1 \u0026amp; V_2)^{*} \\\\\n\u0026amp;= \\mqty(V_1 \u0026amp; V_2) \\mqty(V_1^{*} \\\\ V_2^{*}) \\\\\n\u0026amp;= V_1V_1^{*} + V_2 V_2^{*}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAnd the last equation simply comes from how matrices multiply: row by column. And so, weirdly, we can confirm that adding non-full rank matricies and end up to be the identity. So, again:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV_1 V_1^{*} + V_2V_2^{*} = I\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"constructing-u-1\"\u003eConstructing \\(U_1\\)\u003c/h3\u003e\n\u003cp\u003eWith the result above, we are finally close to doing what we want to do. Recall our last set of conclusions:\u003c/p\u003e\n\u003cp\u003eone, that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nV_1^{*} M^{*} M V_1 = D \\\\\nV_2^{*} M^{*} M V_2 = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand specifically, that \\(MV_{2} = 0\\).\u003c/p\u003e\n\u003cp\u003eand two, that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp;V_1 V_1^{* } + V_2V_2^{* } = I \\\\\n\\Rightarrow\\ \u0026amp; V_1 V_1^{* } = I - V_2V_2^{* }\n\\end{align}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s now turn our attention to \\(D\\) above. It has all non-zero diagonals, because we cropped out the zero already (\u003ca href=\"#aside-1-zero-eigenvalue-eigenvector-ordering\"\u003esee above\u003c/a\u003e during the definition of \\(D\\) vis a vi \\(D_0\\)). This means it is invertible because \u003ca href=\"/posts/kbhupper_triangular_matrix/#operator-is-only-invertible-if-id-c38ed162-6861-420c-a812-6d25ac539ea9-diagonal-of-its-id-af53dbd7-0421-4039-a9f9-9080ea6e1c42-upper-triangular-matrix-is-nonzero\"\u003eoperator is only invertible if diagonal of its upper-triangular matrix is nonzero\u003c/a\u003e. For a diagonal matrix, this is particularly easy; let us construct:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nD = D^{\\frac{1}{2}} D^{\\frac{1}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(D^{\\frac{1}{2}}\\) is just the same \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003ediagonal\u003c/a\u003e matrix as \\(D\\) except we take the square root of everything in the diagonal. The above could be shown then to be true by calculation (\\(\\sqrt{a}\\sqrt{a} = a\\) on every element in the diagonal).\u003c/p\u003e\n\u003cp\u003eLet us also make:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI = D^{-\\frac{1}{2}} D^{\\frac{1}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(D^{-\\frac{1}{2}}\\) is \\(\\frac{1}{\\sqrt{a}}\\) for event element \\(a\\) in the diagonal. Again, the above could be shown to be true by calculation by \\(\\sqrt{a} \\frac{1}{\\sqrt{a}} = 1\\).\u003c/p\u003e\n\u003cp\u003eGiven the diagonal of \\(D\\) contains the \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es of \\(M^{*}M\\), by calculation \\(D^{\\frac{1}{2}}\\) contains the square roots of these \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es, which means that it should contain on its \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003ediagonal\u003c/a\u003e the \u003ca href=\"/posts/kbhsingular_value_decomposition/\"\u003esingular value\u003c/a\u003es of \\(M\\), which is rather nice (because we have corollaries below that show concordance between \u003ca href=\"/posts/kbhsingular_value_decomposition/\"\u003esingular value\u003c/a\u003es of \\(M\\) and its \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es, see below).\u003c/p\u003e\n\u003cp\u003eConsider, finally, the matrix \\(M\\):\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nM \u0026amp;= M - 0 \\\\\n\u0026amp;= M - 0 V_2^{* } \\\\\n\u0026amp;= M - (M V_2) V_2^{* } \\\\\n\u0026amp;= M (I - V_2 V_2^{* }) \\\\\n\u0026amp;= M V_1V_1^{*} \\\\\n\u0026amp;= M V_1 I V_1^{*} \\\\\n\u0026amp;= M V_1 (D^{-\\frac{1}{2}} D^{\\frac{1}{2}}) V_1^{*} \\\\\n\u0026amp;= (M V_1 D^{-\\frac{1}{2}}) D^{\\frac{1}{2}} V_1^{*}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eWe now define a matrix \\(U_1\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1 = M V_1 D^{-\\frac{1}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nM = U_1 D^{\\frac{1}{2}} V_1^{*}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWere \\(U_1\\) is a matrix of shape \\((m \\times n)(n \\times n-p)(n-p \\times n-p) = m \\times n-p\\), \\(D^{\\frac{1}{2}}\\) is a \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003ediagonal\u003c/a\u003e matrix of shape \\(n-p \\times n-p\\) with \u003ca href=\"/posts/kbhsingular_value_decomposition/\"\u003esingular values\u003c/a\u003e on the diagonal, and \\(V_1^{*}\\) is a matrix with orthonormal rows of shape \\(n-p \\times n\\).\u003c/p\u003e\n\u003cp\u003eThis is a \u003cstrong\u003ecompact svd\u003c/strong\u003e. We are sandwitching a \u003ca href=\"/posts/kbhdiagonal_matrix/\"\u003ediagonal\u003c/a\u003e matrix of \u003ca href=\"/posts/kbhsingular_value_decomposition/\"\u003esingular values\u003c/a\u003e between two rectangular matricies to recover \\(M\\). Ideally, we want the left and right matricies too to have nice properties (like, say, be an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e or have \u003ca href=\"/posts/kbhaxler_7_a/#unitary\"\u003eunitarity\u003c/a\u003e). So we work harder.\u003c/p\u003e\n\u003ch3 id=\"aside-4-u-1-is-orthonormal\"\u003eAside #4: \\(U_1\\) is orthonormal\u003c/h3\u003e\n\u003cp\u003eWe can\u0026rsquo;t actually claim \\(U_1\\) is \u003ca href=\"/posts/kbhaxler_7_a/#unitary\"\u003eunitary\u003c/a\u003e because its not an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e. However, we like to show its columns are \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e so far so we can extend it into a fully, actually, \u003ca href=\"/posts/kbhaxler_7_a/#unitary\"\u003eunitary\u003c/a\u003e matrix.\u003c/p\u003e\n\u003cp\u003eOne sign that a matrix is \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003e is if \\(T^{*}T = I\\). Because of the way that matricies multiply, this holds IFF each column yields a \\(1\\) when its own conjugate transpose is applied, and \\(0\\) otherwise. This is also the definition of \u003ca href=\"/posts/kbhorthonormal/\"\u003eorthonormal\u003c/a\u003eity.\u003c/p\u003e\n\u003cp\u003eTherefore, we desire \\(U_{1}^{*} U_1 = I\\). We hence consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1^{*} U_1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have by substitution of \\(U_1 = M V_1 D^{-\\frac{1}{2}}\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(M V_1 D^{-\\frac{1}{2}})^{*}(M V_1 D^{-\\frac{1}{2}})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven the property that \\((AB)^{*} = B^{*}A^{*}\\), we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(M V_1 D^{-\\frac{1}{2}})^{*}(M V_1 D^{-\\frac{1}{2}}) = {D^{-\\frac{1}{2}}}^{*} V_1^{*} M^{*}M V_1 D^{-\\frac{1}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall now that, from way before, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV_1^{*} M^{*} M V_1 = D\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSubstituting that in:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n{D^{-\\frac{1}{2}}}^{*} V_1^{*} M^{*}M V_1 D^{-\\frac{1}{2}} \u0026amp;= {D^{-\\frac{1}{2}}}^{*} (V_1^{*} M^{*}M V_1) D^{-\\frac{1}{2}} \\\\\n\u0026amp;= {D^{-\\frac{1}{2}}}^{*} D D^{-\\frac{1}{2}}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eRecall now that the multiplication of diagonal matricies are commutative (by calculation), and that diagonal real matricies are self-adjoint (try conjugate-transposing a real diagonal matrix). We know that \\(D^{-\\frac{1}{2}}\\) is real (because its filled with the square roots of the \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es of \\(M^{*}M\\), which is \u003ca href=\"/posts/kbhaxler_7_a/#self-adjoint\"\u003eself-adjoint\u003c/a\u003e, and \u003ca href=\"/posts/kbhaxler_7_a/#eigenvalues-of-id-04577953-b953-4ac0-8102-fe9b804bdfc9-self-adjoint-matricies-are-real\"\u003eeigenvalues of self-adjoint matricies are real\u003c/a\u003e) and is by definition diagonal. So we have that \\(D^{-\\frac{1}{2}}\\) is self-adjoint.\u003c/p\u003e\n\u003cp\u003eTaking those facts in mind, we can now rewrite this expression:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n{D^{-\\frac{1}{2}}}^{*} V_1^{*} M^{*}M V_1 D^{-\\frac{1}{2}} \u0026amp;= {D^{-\\frac{1}{2}}}^{*} D D^{-\\frac{1}{2}} \\\\\n\u0026amp;= D^{-\\frac{1}{2}} D D^{-\\frac{1}{2}} \\\\\n\u0026amp;= D^{-\\frac{1}{2}}D^{-\\frac{1}{2}} D \\\\\n\u0026amp;= D^{-1} D \\\\\n\u0026amp;= I\n\\end{align}\u003c/p\u003e\n\u003cp\u003eTherefore, \\(U_1^{*} U_1 = I\\) as desired, so the columns of \\(U_1\\) is orthonormal.\u003c/p\u003e\n\u003ch3 id=\"svd-fully\"\u003eSVD, fully\u003c/h3\u003e\n\u003cp\u003eRecall that, so far, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nM = U_1 D^{\\frac{1}{2}} V_1^{*}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1 = M V_1 D^{-\\frac{1}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo far, \\(U_1\\) and \\(V_1^{*}\\) are both disappointingly not \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003es. However, we know that \\(U_1\\) and \\(V_1\\) are both orthonormal (the former per aside #4 above, the latter by the \u003ca href=\"/posts/kbhaxler_7_a/#complex-spectral-theorem\"\u003espectral theorem\u003c/a\u003e and \u003ca href=\"#aside-1-zero-eigenvalue-eigenvector-ordering\"\u003econstruction above\u003c/a\u003e). So wouldn\u0026rsquo;t it be doubleplusgood for both of them to be \u003ca href=\"/posts/kbhaxler_7_a/#unitary\"\u003eunitary\u003c/a\u003e \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003es?\u003c/p\u003e\n\u003cp\u003eTo make this happen, we need to change the shapes of things a little without changing how the matricies behave. That is: we want \\(U\\) and \\(V^{* }\\) to both be \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003es, and yet still have \\(U D^{\\frac{1}{2}} V^{*} = M\\).\u003c/p\u003e\n\u003ch4 id=\"padding-out-d-and-v\"\u003ePadding out \\(D\\) and \\(V\\)\u003c/h4\u003e\n\u003cp\u003eThere are immediate and direct ways of padding out \\(D^{\\frac{1}{2}}\\) and \\(V_{1}^{*}\\): let us replace \\(V_1 \\implies V\\), and just shove enough zeros into \\(D\\) such that the dimensions work out (changing it from shape \\(n-p \\times n-p\\) to \\(m \\times n\\), but do this by just adding enough zeros on the edges until it works).\u003c/p\u003e\n\u003cp\u003eSo first, \\(D^{\\frac{1}{2}}\\) becomes:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nD^{\\frac{1}{2}}_{new} = \\mqty(D^{\\frac{1}{2}}_{orig} \u0026amp; 0 \u0026amp;\\dots \\\\ 0 \u0026amp; 0 \u0026amp;\\dots \\\\ 0\u0026amp;0\u0026amp;\\dots)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(the number of zeros on the edge vary based on the proportions of \\(n, p, m\\)).\u003c/p\u003e\n\u003cp\u003eWhy would this always be padding? i.e. why is \\(n-p \\leq m\\)? Here\u0026rsquo;s a hand-wavy proof that the reader can choose to fill in the gaps of: consider the fact that \\(M\\)\u0026rsquo;s shape is \\(m \\times n\\). Specifically, this means that \\(M: V \\to W\\) where \\(\\dim V = n\\) and \\(\\dim W = m\\). Say for the sake of argument \\(n\u0026gt; m\\) (otherwise naturally \\(n-p \\leq m\\) because \\(n\\leq m\\)). Consider \\(null\\ M\\); given it is a map from a larger space to a smaller space, \u003ca href=\"/posts/kbhlinear_map/#map-to-smaller-space-is-not-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003ethere\u0026rsquo;s going to be a non-trivial null space\u003c/a\u003e. This non-trivial null space is going to be as large or larger than \\(m-n\\); and the null space of \\(M^{*}M\\) will be at least as large as \\(m-n\\) as well because everything is sent through \\(M\\) first. And then applying rank nullity can show that \\(m \\geq \\dim\\ range\\ M^{ *}M\\). Therefore, the number of non-zero eigenvalues of \\(M^{ *}M\\), which also corresponds to the number of non-zero columns of \\(D\\), which also is \\(n-p\\), must be smaller than or equal to \\(m\\) because otherwise the diagonal representation would have too many linearly independent columns (i.e. more lin. indp. columns that the rank which is impossible).\u003c/p\u003e\n\u003cp\u003eNow, we have\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV = \\mqty(V_1 \u0026amp; V_2)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(V_1\\) is a matrix whose columns are the non-zero \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e correlated \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es, and the columns of \\(V_1\\) the zero-eigenvalue related ones.\u003c/p\u003e\n\u003cp\u003eNote, now that:\u003c/p\u003e\n\u003cp\u003e\\(D^{\\frac{1}{2}}_{new} V^{* }\\) is an \\(m \\times n\\) matrix that behaves almost exactly like \\(D^{\\frac{1}{2}}_{orig} V_1^{*}\\), a \\(n-p \\times n\\) matrix. The last \\(m-(n-p)\\) (as we established before, \\(m \\geq n-p\\)) dimensions of the new, padded matrix\u0026rsquo;s output will simply be \\(0\\): because recall that \\(DT\\) for some diagonal matrix \\(D\\) scales the \u003cem\u003erows\u003c/em\u003e of \\(T\\): and the first \\(n-p\\) rows (corresponding to the columns of \\(V_1\\), because recall we are applying \\(V\\) not \\(V^{ *}\\) to \\(D\\)) will be scaled normally, and the last \\(m-(n-p)\\) rows will be scaled by \\(0\\) as they are a part of the padded zero-diagonal.\u003c/p\u003e\n\u003ch4 id=\"padding-out-u\"\u003ePadding out \\(U\\)\u003c/h4\u003e\n\u003cp\u003eWith \\(D\\) and \\(V\\) padded, its time to deal with \\(U\\). Fortunately, recall that the last bit of the output of \\(DV\\) will just be \\(0\\): so whatever we stick in terms of columns of \\(V\\) for those slots will never actually be added to the final output. In a sense, they don\u0026rsquo;t really matter.\u003c/p\u003e\n\u003cp\u003eThe first \\(n-p\\) of \\(U\\) (i.e. \\(U_{1}\\)) we already have a well-defined answer: recall from before \\(U_1 = M D^{-\\frac{1}{2}} V_{1}^{*}\\). So the last bit we can just stick on literally whatever to make it work.\u003c/p\u003e\n\u003cp\u003eAnd by \u0026ldquo;making it work\u0026rdquo;, we literally just mean extending the columns of \\(U_1\\) until you have \\(m\\) linearly-independent of them, then \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003eting to make it all orthonormal. The first \\(n-p\\) columns will not be affected by \u003ca href=\"/posts/kbhgram_schmidt/\"\u003eGram-Schmidt\u003c/a\u003eting, as we have established before \u003ca href=\"#aside-4-u-1-is-orthonormal\"\u003e\\(U_1\\) is orthonormal\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eAgain, these are basically arbitrary: no one cares because these columns will always be scaled by \\(0\\) as they are part of the \u0026ldquo;padding columns\u0026rdquo; from padding \\(D^{\\frac{1}{2}}\\) out.\u003c/p\u003e\n\u003ch4 id=\"and-so-finally\"\u003eand so, finally\u003c/h4\u003e\n\u003cp\u003eFinally, we now have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nM = U D^{\\frac{1}{2}} V^{*}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(U\\) is an \\(m \\times m\\) \u003ca href=\"/posts/kbhaxler_7_a/#unitary\"\u003eunitary\u003c/a\u003e \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e, \\(D^{\\frac{1}{2}}\\) is an \\(m \\times n\\) semidiagonal matrix (diagonal \\(n-p \\times n-p\\) part, then \\(0\\) padding all around) filled with \u003ca href=\"/posts/kbhsingular_value_decomposition/\"\u003esingular value\u003c/a\u003es of \\(M\\) on the diagonal, and \\(V^{*}\\) is an \\(n \\times n\\) \u003ca href=\"/posts/kbhaxler_7_a/#unitary\"\u003eunitary\u003c/a\u003e \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e filled with orthonormal rows of right singular-vectors (i.e. \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es of \\(M^{ *}M\\)).\u003c/p\u003e\n\u003ch2 id=\"useful-corollaries\"\u003eUseful corollaries\u003c/h2\u003e\n\u003ch3 id=\"if-lambda-is-an-non-negative-real-eigenvalue-of-m-then-lambda-is-sometimes-a-singular-value-of-m\"\u003eIf \\(\\lambda\\) is an non-negative real eigenvalue of \\(M\\), then \\(\\lambda\\) is sometimes a singular value of \\(M\\)\u003c/h3\u003e\n\u003cp\u003eConsider the matrix:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(1 \u0026amp; 1 \\\\0 \u0026amp; 0)\n\\end{equation}\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsingular_value_decomposition/\"\u003esingular value\u003c/a\u003es: \\(\\sqrt{2},0\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003es: \\(1,0\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eHowever, the statement is the case if \\(M\\) is already \u003ca href=\"/posts/kbhdiagonal_matrix/#properties-of-diagonal-matrices\"\u003ediagonalizable\u003c/a\u003e, then in which case you can imagine constructing \\(M^{* }M\\) to be vis a vi the eigenbasis of \\(M\\), which means that the resulting diagonal representation of \\(M^{*}M\\) would just be the eigenvalues of \\(M\\) squared as you are multiplying a diagonal matrix by itself.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsingular_value_decomposition/","tags":null,"title":"singular value decomposition"},{"categories":null,"contents":"The SIR Model is a model to show how diseases spread.\nSusceptible \u0026ndash; # of susceptible people Infectious \u0026mdash; # of infectious people Removed \u0026mdash; # of removed people Compartmental SIR model S =\u0026gt; I =\u0026gt; R [ =\u0026gt; S]\nSo then, the question is: what is the transfer rate between populations between these compartments?\nParameters:\n\\(R_0\\) \u0026ldquo;reproductive rate\u0026rdquo;: the number of people that one infectious person will infect over the duration of their entire infectious period, if the rest of the population is entirely susceptible (only appropriate for a short duration) \\(D\\) \u0026ldquo;duration\u0026rdquo;: duration of the infectious period \\(N\\) \u0026ldquo;number\u0026rdquo;: population size (fixed) Transition I to R:\n\\begin{equation} \\frac{I}{D} \\end{equation}\n\\(I\\) is the number of infectious people, and \\(\\frac{1}{D}\\) is the number of people that recover/remove per day (i.e. because the duration is \\(D\\).)\nTransition from S to I:\n\\begin{equation} I \\frac{R_0}{D} \\frac{S}{N} \\end{equation}\nSo for \\(\\frac{R_0}{D}\\) is the number of people able to infect per day, \\(\\frac{S}{N}\\) is the percentage of population that\u0026rsquo;s able to infect, and \\(I\\) are the number of people doing the infecting.\nAnd so therefore\u0026mdash;\n\\(\\dv{S}{T} = -\\frac{SIR_{0}}{DN}\\) \\(\\dv{I}{T} = \\frac{SIR_{0}}{DN}\\) \\(\\dv{I}{T} = \\frac{I}{D}\\) Evolutionary Game Theory Suppose that we have two strategies, \\(A\\) and \\(B\\), and they have some payoff matrix:\nA B A (a,a) (b,c) B (c,b) (d,d) and we have some values:\n\\begin{equation} \\mqty(x_{a} \\\\x_{b}) \\end{equation}\nare the relative abundances (i.e. that \\(xa+xb\\)).\nThe finesses (\u0026ldquo;how much are you going to reproduce\u0026rdquo;) of the strategies are determined by\u0026mdash;\n\\(f_{A}(x_{A}, x_{B}) = ax_{A} + bx_{B}\\) \\(f_{B}(x_{A}, x_{B}) = cx_{A} + dx_{B}\\) Except for payoff constants \\((a,b,c,d)\\), everything else is a function of time.\nThe mean fitness, then:\n\\begin{equation} q = x_{A}f_{A} + x_{B}f_{B} \\end{equation}\nLet\u0026rsquo;s have the actual, absolute number of individuals:\n\\begin{equation} \\mqty(N_{A}\\\\ N_{B}) \\end{equation}\nSo, we can talk about the change is individuals using strategy \\(A\\):\n\\begin{equation} \\dv t x_{A} = \\dv t \\frac{N_{A}}{N} = X_{A}(f_{a}) \\end{equation}\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhsir_model/\"\u003eSIR Model\u003c/a\u003e is a model to show how diseases spread.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eSusceptible \u0026ndash; # of susceptible people\u003c/li\u003e\n\u003cli\u003eInfectious \u0026mdash; # of infectious people\u003c/li\u003e\n\u003cli\u003eRemoved \u0026mdash; # of removed people\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"compartmental-sir-model\"\u003eCompartmental SIR model\u003c/h2\u003e\n\u003cp\u003eS =\u0026gt; I =\u0026gt; R [ =\u0026gt; S]\u003c/p\u003e\n\u003cp\u003eSo then, the question is: what is the transfer rate between populations between these compartments?\u003c/p\u003e\n\u003cp\u003eParameters:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(R_0\\) \u0026ldquo;reproductive rate\u0026rdquo;: the number of people that one infectious person will infect over the duration of their entire infectious period, if the rest of the population is entirely susceptible (only appropriate for a short duration)\u003c/li\u003e\n\u003cli\u003e\\(D\\) \u0026ldquo;duration\u0026rdquo;: duration of the infectious period\u003c/li\u003e\n\u003cli\u003e\\(N\\) \u0026ldquo;number\u0026rdquo;: population size (fixed)\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cp\u003eTransition I to R:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{I}{D}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(I\\) is the number of infectious people, and \\(\\frac{1}{D}\\) is the number of people that recover/remove per day (i.e. because the duration is \\(D\\).)\u003c/p\u003e\n\u003cp\u003eTransition from S to I:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nI \\frac{R_0}{D} \\frac{S}{N}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo for \\(\\frac{R_0}{D}\\) is the number of people able to infect per day, \\(\\frac{S}{N}\\) is the percentage of population that\u0026rsquo;s able to infect, and \\(I\\) are the number of people doing the infecting.\u003c/p\u003e\n\u003cp\u003eAnd so therefore\u0026mdash;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(\\dv{S}{T} = -\\frac{SIR_{0}}{DN}\\)\u003c/li\u003e\n\u003cli\u003e\\(\\dv{I}{T} = \\frac{SIR_{0}}{DN}\\)\u003c/li\u003e\n\u003cli\u003e\\(\\dv{I}{T} = \\frac{I}{D}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"evolutionary-game-theory\"\u003eEvolutionary Game Theory\u003c/h2\u003e\n\u003cp\u003eSuppose that we have two strategies, \\(A\\) and \\(B\\), and they have some payoff matrix:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003eA\u003c/th\u003e\n\u003cth\u003eB\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eA\u003c/td\u003e\n\u003ctd\u003e(a,a)\u003c/td\u003e\n\u003ctd\u003e(b,c)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eB\u003c/td\u003e\n\u003ctd\u003e(c,b)\u003c/td\u003e\n\u003ctd\u003e(d,d)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eand we have some values:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(x_{a} \\\\x_{b})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eare the relative abundances (i.e. that \\(xa+xb\\)).\u003c/p\u003e\n\u003cp\u003eThe finesses (\u0026ldquo;how much are you going to reproduce\u0026rdquo;) of the strategies are determined by\u0026mdash;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(f_{A}(x_{A}, x_{B}) = ax_{A} + bx_{B}\\)\u003c/li\u003e\n\u003cli\u003e\\(f_{B}(x_{A}, x_{B}) = cx_{A} + dx_{B}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eExcept for payoff constants \\((a,b,c,d)\\), everything else is a function of time.\u003c/p\u003e\n\u003cp\u003eThe mean fitness, then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nq = x_{A}f_{A} + x_{B}f_{B}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s have the actual, absolute number of individuals:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(N_{A}\\\\ N_{B})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, we can talk about the change is individuals using strategy \\(A\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv t x_{A} = \\dv t \\frac{N_{A}}{N} = X_{A}(f_{a})\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsir_model/","tags":null,"title":"SIR Model"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhslopes/","tags":null,"title":"slope (statistics)"},{"categories":null,"contents":"HSVI\nOne-Liner \u0026ldquo;impact of approximation decreases as steps from the root node\u0026rdquo;\nNovelty combined alpha-vector and forward heuristics to guide search of belief states before backup 100x times faster in PBVI scales to huge environments Goal: minimize \u0026ldquo;regret\u0026rdquo; (difference until optimal policy)\nNovelty HSVI 2 Projected the upper bound onto a convex hull (HSVI2: via approximate convex hull projection) uses blind lower bound Notable Methods Key Figs New Concepts Notes ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhhsvi/\"\u003eHSVI\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;impact of approximation decreases as steps from the root node\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecombined alpha-vector and forward heuristics to guide search of belief states before backup\u003c/li\u003e\n\u003cli\u003e100x times faster in \u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePBVI\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003escales to \u003cem\u003ehuge\u003c/em\u003e environments\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eGoal: minimize \u0026ldquo;regret\u0026rdquo; (difference until optimal policy)\u003c/p\u003e\n\u003ch2 id=\"novelty-hsvi-2\"\u003eNovelty HSVI 2\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eProjected the upper bound onto a convex hull (HSVI2: via approximate convex hull projection)\u003c/li\u003e\n\u003cli\u003euses \u003ca href=\"/posts/kbhblind_lower_bound/\"\u003eblind lower bound\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsmith/","tags":null,"title":"Smith 20??"},{"categories":null,"contents":"a function is called smoo\n","html":"\u003cp\u003ea function is called smoo\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsmooth_function/","tags":null,"title":"smooth function"},{"categories":null,"contents":"A Social Network is a scheme for studying the relationships and interactions amongst groups of people.\npeople: \\(V\\) relationship: \\(E\\) system: a network \\(G(V,E)\\) Importantly, the \u0026ldquo;labels\u0026rdquo; of \\(E\\) often do not matter as we frequently want to study only the graphical structure of the Social Network.\ndegree (node) The degree of a node is the number of edges that are touching that node (whether in or out, or undirected).\nThe in-degree and out-degree are the number of edges touching that node (going in or out) respectively.\ndegree of node many nodes on the internet have fairly low degree, whereas some hubs have very high degree. Consider a function \\(P(k)\\), representing the number of nodes with degree \\(k\\). This follows a power law:\n\\begin{equation} P(k) \\propto k^{-a} \\end{equation}\nmeaning:\n\\begin{equation} P(k) = ck^{-a} \\end{equation}\nwhereby as degree increases, the percentage of nodes with that number of degree drops of exponentially.\nA power law distribution is log-log linear, and is \u0026ldquo;scale free\u0026rdquo;: meaning no matter how the input \\(x\\) is scaled its simply resulting in a multiplicative constant under the output: shape does NOT change.\nZipf\u0026rsquo;s Law \\begin{equation} freq(w_{r}) \\prop \\frac{1}{r^{\\beta}} \\end{equation}\nwhere \\(\\beta\\) is close to \\(1\\) and \\(w_{r}\\) is the r-th most frequent word.\nbetweenness the betweenness of a target node is calculated as: for all pairs of nodes on the graph that is not our target node, what\u0026rsquo;s the ratio between the number of shortest paths between the two nodes and the number that goes through \\(j\\).\nFormally:\nfor some node \\(j\\) for which we want to calculate betweenness, and \\(s_{ik}(j)\\) being the number of shortest paths between \\(i\\) and \\(k\\) that goes through \\(j\\) and \\(s_{ik}\\) being the number of shortest paths there are in general, we have:\n\\begin{equation} b_{j} = \\frac{\\sum_{i=1}^{n} \\sum_{k=1}^{n} s_{ik}(j)}{\\sum_{i=1}^{n} \\sum_{k=1}^{n} s_{ik}} \\end{equation}\nwhere \\(i \\neq j \\neq k\\)\nRecall that with directed graphs we may need to double count.\nclustering coefficient for some node \\(A\\), the clustering coefficient measures the percentage of nodes directly adjacent to \\(A\\) which are also directly adjacent with each other.\nrecall that, if a node has \\(n\\) friends, the total possible edges is \\(\\frac{n\\qty(n-1)}{2}\\).\nMilgram Small-World experiment made 300 people in Omaha NE to mail a thing to somebody in Boston by passing it through only people they knew by first-name basis.\nSmall World Graph The world is a Small World Graph: networks of friends is large, sparse, decentralized, and extremely clustered. Yet, people mostly seem to be about 5-6 degrees of separation away.\nThis characterizes a Small World Graph:\nhigh clustering coefficient low average shortest path Watts and Strogatz Watts and Strogatz proposes a way to build a Small World Graph:\nstart with a ring and connect each node to the next \\(z\\) nodes with probability \\(p\\) on each node, rewire every edge/add a shortcut to a random node as long as \\(0 \u0026lt; p \u0026lt; 1\\), we get a Small World Graph\nintuition: a single random connection builds a shortcut through highly centralized clusters\u0026mdash;high \\(C\\), low \\(L\\).\nweak link most job referrals were through personal contacts, but they are often WEAK LINKS.\nTriadic Closure If two people have a common friend, its likely that they become friends eventually too. This increases cluster coefficient.\nStrong Triadic Closure If there is a strong tie between \\(A - B\\), and \\(B - C\\), then there must be a strong tie between \\(A - C\\).\nIf this property is satisfied, then any Local Bridge must be a weak tie. Otherwise:\nif there is a strong \\(A-B\\) tie and it is a local bridge, then \\(C-B\\) must be a connection under Strong Triadic Closure. yet, \\(A-B\\) is a local bridge.\nBy contradiction, \\(A-B\\) is a weak tie.\nLocal Bridge A Local Bridge is an edge \\(x-y\\) which bridges two \u0026ldquo;local components\u0026rdquo; together. More formally, an edge between \\(A,B\\) is a Local Bridge if it does not live on any triangle of \\(A\\) or \\(B\\).\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhsocial_network/\"\u003eSocial Network\u003c/a\u003e is a scheme for studying the relationships and interactions amongst groups of people.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003epeople\u003c/strong\u003e: \\(V\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003erelationship\u003c/strong\u003e: \\(E\\)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003esystem\u003c/strong\u003e: a network \\(G(V,E)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eImportantly, the \u0026ldquo;labels\u0026rdquo; of \\(E\\) often do not matter as we frequently want to study only the graphical structure of the \u003ca href=\"/posts/kbhsocial_network/\"\u003eSocial Network\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"degree--node\"\u003edegree (node)\u003c/h2\u003e\n\u003cp\u003eThe degree of a node is the number of edges that are touching that node (whether in or out, or undirected).\u003c/p\u003e\n\u003cp\u003eThe in-degree and out-degree are the number of edges touching that node (going in or out) respectively.\u003c/p\u003e\n\u003ch3 id=\"degree-of-node\"\u003edegree of node\u003c/h3\u003e\n\u003cp\u003emany nodes on the internet have fairly low degree, whereas some hubs have very high degree. Consider a function \\(P(k)\\), representing the number of nodes with degree \\(k\\). This follows a power law:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(k) \\propto k^{-a}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(k) = ck^{-a}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby as degree increases, the percentage of nodes with that number of degree drops of exponentially.\u003c/p\u003e\n\u003cp\u003eA power law distribution is log-log linear, and is \u0026ldquo;scale free\u0026rdquo;: meaning no matter how the input \\(x\\) is scaled its simply resulting in a multiplicative constant under the output: shape does NOT change.\u003c/p\u003e\n\u003ch3 id=\"zipf-s-law\"\u003eZipf\u0026rsquo;s Law\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nfreq(w_{r}) \\prop \\frac{1}{r^{\\beta}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\beta\\) is close to \\(1\\) and \\(w_{r}\\) is the r-th most frequent word.\u003c/p\u003e\n\u003ch2 id=\"betweenness\"\u003ebetweenness\u003c/h2\u003e\n\u003cp\u003ethe \u003ca href=\"#betweenness\"\u003ebetweenness\u003c/a\u003e of a target node is calculated as: for all pairs of nodes on the graph that is not our target node, what\u0026rsquo;s the ratio between the number of shortest paths between the two nodes and the number that goes through \\(j\\).\u003c/p\u003e\n\u003cp\u003eFormally:\u003c/p\u003e\n\u003cp\u003efor some node \\(j\\) for which we want to calculate \u003ca href=\"#betweenness\"\u003ebetweenness\u003c/a\u003e, and \\(s_{ik}(j)\\) being the number of shortest paths between \\(i\\) and \\(k\\) that goes through \\(j\\) and \\(s_{ik}\\) being the number of shortest paths there are in general, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb_{j} = \\frac{\\sum_{i=1}^{n} \\sum_{k=1}^{n} s_{ik}(j)}{\\sum_{i=1}^{n} \\sum_{k=1}^{n} s_{ik}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(i \\neq j \\neq k\\)\u003c/p\u003e\n\u003cp\u003eRecall that with directed graphs we may need to double count.\u003c/p\u003e\n\u003ch2 id=\"clustering-coefficient\"\u003eclustering coefficient\u003c/h2\u003e\n\u003cp\u003efor some node \\(A\\), the \u003ca href=\"#clustering-coefficient\"\u003eclustering coefficient\u003c/a\u003e measures the percentage of nodes directly adjacent to \\(A\\) which are also directly adjacent with each other.\u003c/p\u003e\n\u003cp\u003erecall that, if a node has \\(n\\) friends, the total possible edges is \\(\\frac{n\\qty(n-1)}{2}\\).\u003c/p\u003e\n\u003ch2 id=\"milgram-small-world-experiment\"\u003eMilgram Small-World experiment\u003c/h2\u003e\n\u003cp\u003emade 300 people in Omaha NE to mail a thing to somebody in Boston by passing it through only people they knew by first-name basis.\u003c/p\u003e\n\u003ch2 id=\"small-world-graph\"\u003eSmall World Graph\u003c/h2\u003e\n\u003cp\u003eThe world is a \u003ca href=\"#small-world-graph\"\u003eSmall World Graph\u003c/a\u003e: networks of friends is large, sparse, decentralized, and extremely clustered. Yet, people mostly seem to be about 5-6 degrees of separation away.\u003c/p\u003e\n\u003cp\u003eThis characterizes a \u003ca href=\"#small-world-graph\"\u003eSmall World Graph\u003c/a\u003e:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ehigh \u003ca href=\"#clustering-coefficient\"\u003eclustering coefficient\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003elow average shortest path\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"watts-and-strogatz\"\u003eWatts and Strogatz\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#watts-and-strogatz\"\u003eWatts and Strogatz\u003c/a\u003e proposes a way to build a \u003ca href=\"#small-world-graph\"\u003eSmall World Graph\u003c/a\u003e:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003estart with a ring and connect each node to the next \\(z\\) nodes\u003c/li\u003e\n\u003cli\u003ewith probability \\(p\\) on each node, rewire every edge/add a shortcut to a random node\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eas long as \\(0 \u0026lt; p \u0026lt; 1\\), we get a \u003ca href=\"#small-world-graph\"\u003eSmall World Graph\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eintuition\u003c/strong\u003e: a single random connection builds a shortcut through highly centralized clusters\u0026mdash;high \\(C\\), low \\(L\\).\u003c/p\u003e\n\u003ch2 id=\"weak-link\"\u003eweak link\u003c/h2\u003e\n\u003cp\u003emost job referrals were through personal contacts, but they are often \u003cstrong\u003eWEAK LINKS\u003c/strong\u003e.\u003c/p\u003e\n\u003ch3 id=\"triadic-closure\"\u003eTriadic Closure\u003c/h3\u003e\n\u003cp\u003eIf two people have a common friend, its likely that they become friends eventually too. This increases cluster coefficient.\u003c/p\u003e\n\u003ch4 id=\"strong-triadic-closure\"\u003eStrong Triadic Closure\u003c/h4\u003e\n\u003cp\u003eIf there is a strong tie between \\(A - B\\), and \\(B - C\\), then there must be a strong tie between \\(A - C\\).\u003c/p\u003e\n\u003cp\u003eIf this property is satisfied, then any \u003ca href=\"#local-bridge\"\u003eLocal Bridge\u003c/a\u003e must be a weak tie. Otherwise:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-06_20-12-33_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eif there is a strong \\(A-B\\) tie and it is a local bridge, then \\(C-B\\) must be a connection under \u003ca href=\"#strong-triadic-closure\"\u003eStrong Triadic Closure\u003c/a\u003e. yet, \\(A-B\\) is a local bridge.\u003c/p\u003e\n\u003cp\u003eBy contradiction, \\(A-B\\) is a weak tie.\u003c/p\u003e\n\u003ch3 id=\"local-bridge\"\u003eLocal Bridge\u003c/h3\u003e\n\u003cp\u003eA \u003ca href=\"#local-bridge\"\u003eLocal Bridge\u003c/a\u003e is an edge \\(x-y\\) which bridges two \u0026ldquo;local components\u0026rdquo; together. More formally, an edge between \\(A,B\\) is a \u003ca href=\"#local-bridge\"\u003eLocal Bridge\u003c/a\u003e if it does not live on any triangle of \\(A\\) or \\(B\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsocial_network/","tags":null,"title":"Social Network"},{"categories":null,"contents":"Social Security Administration is a welfare program to directly give cash to those who are in need.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsocial_security_administration/\"\u003eSocial Security Administration\u003c/a\u003e is a welfare program to directly give cash to those who are in need.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsocial_security_administration/","tags":null,"title":"Social Security Administration"},{"categories":null,"contents":":clap: What. Does. The. Client. Want.\nWeb Applications vs Local Application scale\u0026mdash;what levels of functionality and access do we want training speed SOLID principles SOLID principles is a set of OOP principles; its kinda famous but encourages mindless braindead Java devs.\nSingle Responsibility: that a class should have only one clearly defined thing it represents, and the class should only change IFF the underlying spec regarding that thing changes Easy pitfalls: mixing PERSISTENCE LOGIC with BUSINESS LOGIC (db should be moved to a separate class like ThingProvider/ThingPersistence) Open-Close Principle: classes should be easily extendable and closed to modification \u0026ldquo;we should be able to add new functionality without touching what\u0026rsquo;s written\u0026rdquo; so like interfaces are nice Liskov Substitution Principle: subclasses should act like base classes (and more); good inheritance systems should have this built in Interface Segregation Principle: you should build lots of interfaces + sub-interfaces based on what clients are and will need, such that a client only has to extend precisely the amount needed to do their job Dependency Inversion Principle: when possible, depend on abstract classes or interfaces and not their implementations Dependency Injection \u0026ldquo;Dependency Injection\u0026rdquo; is a 25-dollar term for a 5-cent concept. [\u0026hellip;] Dependency injection means giving an object its instance variables. [\u0026hellip;].\nBlame this for all the fucking Factory classes.\nBasically having a factory (or just a fancy-enough constructor) to give a class all the right instantiations of the things it needs instead of having the class construct them inside.\nYou do this because 1) the class can then depend on more abstract interfaces 2) you can test shit easier by constructing all the necessary parts\nShared-nothing architecture A shared-nothing architecture is a type of distributed computing software architecture which ensures that no single shard shares/overlaps resources with others (i.e. needing shared memory, etc.)\nSo no mutexes; and no single-points of failure (i.e. we don\u0026rsquo;t dependent on a central node always working).\n","html":"\u003cp\u003e:clap: What. Does. The. Client. Want.\u003c/p\u003e\n\u003ch2 id=\"web-applications-vs-local-application\"\u003eWeb Applications vs Local Application\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003escale\u0026mdash;what levels of functionality and access do we want\u003c/li\u003e\n\u003cli\u003etraining\u003c/li\u003e\n\u003cli\u003espeed\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"solid-principles\"\u003eSOLID principles\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#solid-principles\"\u003eSOLID principles\u003c/a\u003e is a set of OOP principles; its kinda famous but encourages mindless braindead Java devs.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eSingle Responsibility\u003c/strong\u003e: that a class should have only one clearly defined thing it represents, and the class should only change IFF the underlying spec regarding that thing changes\n\u003cul\u003e\n\u003cli\u003eEasy pitfalls: mixing PERSISTENCE LOGIC with BUSINESS LOGIC (db should be moved to a separate class like ThingProvider/ThingPersistence)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eOpen-Close Principle\u003c/strong\u003e: classes should be easily extendable and closed to modification\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;we should be able to add new functionality without touching what\u0026rsquo;s written\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eso like interfaces are nice\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eLiskov Substitution Principle\u003c/strong\u003e: subclasses should act like base classes (and more); good inheritance systems should have this built in\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eInterface Segregation Principle\u003c/strong\u003e: you should build lots of interfaces + sub-interfaces based on what clients are and will need, such that a client only has to extend precisely the amount needed to do their job\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eDependency Inversion Principle\u003c/strong\u003e: when possible, depend on abstract classes or interfaces and not their implementations\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"dependency-injection\"\u003eDependency Injection\u003c/h3\u003e\n\u003cblockquote\u003e\n\u003cp\u003e\u0026ldquo;Dependency Injection\u0026rdquo; is a 25-dollar term for a 5-cent concept. [\u0026hellip;] Dependency injection means giving an object its instance variables. [\u0026hellip;].\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003cp\u003eBlame this for all the fucking Factory classes.\u003c/p\u003e\n\u003cp\u003eBasically having a factory (or just a fancy-enough constructor) to give a class all the right instantiations of the things it needs instead of having the class construct them inside.\u003c/p\u003e\n\u003cp\u003eYou do this because 1) the class can then depend on more abstract interfaces 2) you can test shit easier by constructing all the necessary parts\u003c/p\u003e\n\u003ch2 id=\"shared-nothing-architecture\"\u003eShared-nothing architecture\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#shared-nothing-architecture\"\u003eshared-nothing architecture\u003c/a\u003e is a type of distributed computing software architecture which ensures that no single shard shares/overlaps resources with others (i.e. needing shared memory, etc.)\u003c/p\u003e\n\u003cp\u003eSo no mutexes; and no single-points of failure (i.e. we don\u0026rsquo;t dependent on a central node always working).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsoftware_design_and_architecture_patterns/","tags":null,"title":"Software Design and Architecture Patterns"},{"categories":null,"contents":"Here\u0026rsquo;s a bit of a guide to start in software development. It is mostly links to other resources that would help.\nIntroductory Remarks Nobody \u0026ldquo;learns\u0026rdquo; software development. Even in job interviews, people expect you to have \u0026ldquo;worked\u0026rdquo; in software development. The industry, as a whole, drives via \u0026ldquo;learn-by-doing\u0026rdquo;, so its best to start thinking about what you want to achieve with software dev in terms of projects, then look specifically for resources to help you achieve those. Once you Google enough, et viola! You will have the skills needed to tackle another project.\nCommon Tooling There are some common tooling that is standard across all of software development.\nGoogle Google it! 99.98% of programming skills center around google-fu. Learn to Google unknown terms and get a better sense of the picture. The same rule applies through this guide as well.\nStackExchange A group of very mean people put together a very helpful family of websites which are essentially vicious forum boards. They are the StackExchange family of boards.\nThe most famous of which, and the one focused on programming, is called StackOverflow. StackOverflow (\u0026ldquo;SO\u0026rdquo;) is an extremely helpful resource for browsing any question you may have. For instance, if your code crashes with a stack trace, Googling the error and set site:stackoverflow.com will get you pretty far.\nIf you ask a question, though, be prepared to get yelled at though, the likely reason is that your question is already answered.\nmacOS For the quick-start type of hardware fitting for this guide: get a macBook. Even the cheapest one.\nDevelopment on Windows is like cooking on campfire. Doable, useful for specific things, but not great overall. If you have a PC, I would (and recommend! its great for advanced users especially) to put Debian/Ubuntu/some easy to use Linux on it. Windows is just terrible.\nI should add that Microsoft started doing Windows Subsystem for Linux: https://docs.microsoft.com/en-us/windows/wsl/install, which apparently have been pretty good. So worth taking a shot if you are stuck on Windows.\n*nix Terminal BSD/UNIX terminal is a tool that essentially skips the fancy user interface (UI) which your operating system draws and directly runs things \u0026ldquo;organically.\u0026rdquo; If you see something in a guide that says like:\n\u0026ldquo;please execute\u0026rdquo;\npython3 test.py or perhaps\nwget https://wandb.ai/jemoka \u0026gt;\u0026gt; test they are probably asking you to type it (\u0026ldquo;execute it\u0026rdquo;) into the Terminal and hit enter.\nRead this guide put together by the Ubuntu people, it\u0026rsquo;s very good. To open the terminal on your macOS device, open an app called Terminal.app. On Ubuntu, I believe its also an app called terminal.\nIDE An \u0026ldquo;IDE\u0026rdquo; is an Integrated Development Environment. It is where code is written. Fortunately, this is an easy one: use VSCode. There is literally no better tool out there for beginners and advanced users; no wonder it has 70% market share.\nSidenote: But Jack? What do you use? I use something called emacs for very specific reasons. Please don\u0026rsquo;t unless you really want misery and to learn a whole language to configure it.\nComputer Language Architecture This is how an idea turns into \u0026ldquo;stuff\u0026rdquo; on your screen.\nHuman programming languages (\u0026ldquo;Python\u0026rdquo;), are a very readable sort of code. No computers can actually read it. Usually, code you write goes through a three-step process before its able to be ran.\nFirst, the language you write gets converted by a \u0026ldquo;compiler\u0026rdquo; or \u0026ldquo;interpreter\u0026rdquo;, specialized pieces of code that takes human programming languages into a more machine-readable form of code named \u0026ldquo;assembly\u0026rdquo; or \u0026ldquo;byte code\u0026rdquo; respectively, called the \u0026ldquo;intermediate\u0026rdquo;.\nFor now, think of the difference between compilers and interpreters as translating code either all-at-once (compilers) or line-by-line (interpreters). Because the former has a grander view of the whole, languages that use a compiler (\u0026ldquo;compiled languages\u0026rdquo;) are faster. Although, many programmers find languages that use in interpreter (\u0026ldquo;interpreted language\u0026rdquo;) easier because they can spot problems line by line.\nBut wait! There\u0026rsquo;s more. Assembly and byte-code (what compilers and interpreters generate) are not actually runnable by a computer. Yet another piece of software called a \u0026ldquo;runtime\u0026rdquo; takes the reasonably-machine-readable code and actually performs the required operations.\nSome runtimes for languages like C++ uses the raw x86 CPU, which is the stereotypical \u0026ldquo;binary\u0026rdquo; zeros-and-ones. Some other languages, say Java, uses horribly complex runtimes that amounts to a whole virtual machine.\nHere\u0026rsquo;s a bit of a table.\nLanguage C/I Compiler/Interpreter Intermediate Runtime Python I python python bytecode python Java C javac java object java VM JavaScript I V8 (literally) js bytecode web browser! C/C++ C gcc/clang x86 asm x86 cpu Wonder what the runtimes for languages like Java are built in? C/C++. Eventually it all becomes x86 cpu instructions but its like a layer cake. This is why Python and friends are called a \u0026ldquo;higher level language\u0026rdquo;.\ngit Git is where all the code is!\nGit is a decentralized \u0026ldquo;version-control\u0026rdquo; system. It is basically a timestamp-backup system of code with messages and branching.\nGitHub is a website where people like to back up their code. Here\u0026rsquo;s my profile on GitHub.\nManaging Git is pretty\u0026hellip; Involved. It, for instance, assumes familiarity with the Terminal as described above. I suggest learning it, though. Here are some good resources:\nMorgan\u0026rsquo;s very very good git tutorial\u0026hellip; On GitHub! And this article on some commands you should know! Industry-Specific Skills What you start with doesn\u0026rsquo;t matter, but start with something Its easiest to learn programming if you have a project in mind. So, find a project in mind\u0026mdash;what it is, though, doesn\u0026rsquo;t matter. The concepts across programming are highly transferable, but the actual skill is easiest to learn if you are learning w.r.t. a project.\nData science, prototyping, and machine learning Python would be your friend for all things of programming where the act of programming is a means to an end. That is: if you are writing code to do something that\u0026rsquo;s not inherently software (data science, machine learning, heck, also manipulating quantum qubits), Python is your friend.\nIts a language that\u0026rsquo;s designed to be easy to write: is a very do-as-I-say language that sacrifices efficiency and elegance for getting crap done. This is how I started programming. This is the book I started with. It teaches Python through programming a series of small projects that are mostly Terminal games.\nTo learn data science, Nueva\u0026rsquo;s very own data science course give very good conceptual framework. A typical first project is to recognize pictures of handwritten digits, for which there is a good guide. I also started something called AIBridge with AIFS, so if we ever publish the recordings I will put them here.\nGoogle also: pip, ipython, Jupyter.\nBackend engineering Backend engineering is the science of dealing with databases and writing API (application programming interfaces). I don\u0026rsquo;t suggest starting with this, but if you are particularly interested in databases, you could!\nTo master backend engineering, first learn a database manipulation language. For 99.98% of the industry, this means mysql. The link directs to a pretty good guide.\nFurthermore, the language with which backend is written is Java. I hate to say it, but despite Java\u0026rsquo;s terribleness (don\u0026rsquo;t worry about it ;)) its very dependable. Here\u0026rsquo;s a book on Java. In general, I really like all of the books from no starch press.\nFrontend and Web engineering Do you like making stuff move? Do you like drawing buttons? Front end maybe for you. The most basic type of front-end engineering is making websites.\nStart by making a \u0026ldquo;vanilla website\u0026rdquo;: HTML (what\u0026rsquo;s on the page), CSS (what colours and sizes), JavaScript (how it moves) is the standard trio of languages to start. freeCodeCamp (a great Medium blog, check their stuff out) has a good guide on the matter.\nHowever, as you progress in your journey, you will find these tools woefully inadequate. Hence, most people writing web front end move on to something called a \u0026ldquo;JavaScript Framework\u0026rdquo;, a tool to generate a \u0026ldquo;vanilla\u0026rdquo; website from some more easily manipulable JS (changing the text on the page moves from a four line operation (indexing, selecting, grabbing, changing) to a one-liner (state.text=new text)).\nA popular JS framework is ReactJS. Check them out.\nFullstack Engineering Frontend + Backend.\nGame development Game development is honestly one of the most horribly complicated and richly science-y part of CS. I am not super experience in game development but learning C++ and mastering Unity, the game engine. Oh, right, game dev is the only, and I repeat only (with invisible footnotes and qualifications) reason why you should be writing code on Windows.\nA friend is good at game dev, I can make an intro if needed.\nGood Luck! Remember: Google-fu and project-based curiosity is your friend. Let me know if you have questions.\n","html":"\u003cp\u003eHere\u0026rsquo;s a bit of a guide to start in software development. It is mostly links to other resources that would help.\u003c/p\u003e\n\u003ch2 id=\"introductory-remarks\"\u003eIntroductory Remarks\u003c/h2\u003e\n\u003cp\u003eNobody \u0026ldquo;learns\u0026rdquo; software development. Even in job interviews, people expect you to have \u0026ldquo;worked\u0026rdquo; in software development. The industry, as a whole, drives via \u0026ldquo;learn-by-doing\u0026rdquo;, so its best to start thinking about \u003cem\u003ewhat you want to achieve\u003c/em\u003e with software dev in terms of projects, then look specifically for resources to help you achieve those. Once you Google enough, et viola! You will have the skills needed to tackle another project.\u003c/p\u003e\n\u003ch2 id=\"common-tooling\"\u003eCommon Tooling\u003c/h2\u003e\n\u003cp\u003eThere are some common tooling that is standard across all of software development.\u003c/p\u003e\n\u003ch3 id=\"google\"\u003eGoogle\u003c/h3\u003e\n\u003cp\u003eGoogle it! 99.98% of programming skills center around google-fu. Learn to Google unknown terms and get a better sense of the picture. The same rule applies through this guide as well.\u003c/p\u003e\n\u003ch3 id=\"stackexchange\"\u003eStackExchange\u003c/h3\u003e\n\u003cp\u003eA group of very mean people put together a very helpful family of websites which are essentially vicious forum boards. They are the StackExchange family of boards.\u003c/p\u003e\n\u003cp\u003eThe most famous of which, and the one focused on programming, is called \u003ca href=\"https://stackoverflow.com/\"\u003eStackOverflow\u003c/a\u003e. StackOverflow (\u0026ldquo;SO\u0026rdquo;) is an \u003cem\u003eextremely\u003c/em\u003e helpful resource for browsing any question you may have. For instance, if your code crashes with a \u003ca href=\"/posts/kbhstack_trace/\"\u003estack trace\u003c/a\u003e, Googling the error and set \u003ccode\u003esite:stackoverflow.com\u003c/code\u003e will get you pretty far.\u003c/p\u003e\n\u003cp\u003eIf you ask a question, though, \u003ca href=\"https://www.reddit.com/r/learnprogramming/comments/7w5bm4/why_on_people_on_stack_overflow_so_rude/\"\u003ebe prepared to get yelled at\u003c/a\u003e though, the likely reason is that your question is already answered.\u003c/p\u003e\n\u003ch3 id=\"macos\"\u003emacOS\u003c/h3\u003e\n\u003cp\u003eFor the quick-start type of hardware fitting for this guide: \u003ca href=\"https://www.apple.com/macbook-air/\"\u003eget a macBook\u003c/a\u003e. Even the cheapest one.\u003c/p\u003e\n\u003cp\u003eDevelopment on Windows is like cooking on campfire. Doable, useful for specific things, but not great overall. If you have a PC, I would (and recommend! its great for advanced users especially) to put \u003ca href=\"https://ubuntu.com/tutorials/install-ubuntu-desktop#1-overview\"\u003eDebian/Ubuntu/some easy to use Linux\u003c/a\u003e on it. Windows is just terrible.\u003c/p\u003e\n\u003cp\u003eI should add that Microsoft started doing Windows Subsystem for Linux: \u003ca href=\"https://docs.microsoft.com/en-us/windows/wsl/install\"\u003ehttps://docs.microsoft.com/en-us/windows/wsl/install\u003c/a\u003e, which apparently have been pretty good. So worth taking a shot if you are stuck on Windows.\u003c/p\u003e\n\u003ch3 id=\"nix-terminal\"\u003e*nix Terminal\u003c/h3\u003e\n\u003cp\u003eBSD/UNIX terminal is a tool that essentially skips the fancy user interface (UI) which your operating system draws and directly runs things \u0026ldquo;organically.\u0026rdquo; If you see something in a guide that says like:\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;please execute\u0026rdquo;\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-bash\" data-lang=\"bash\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003epython3 test.py\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eor perhaps\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-bash\" data-lang=\"bash\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003ewget https://wandb.ai/jemoka \u0026gt;\u0026gt; \u003cspan style=\"color:#111\"\u003etest\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ethey are probably asking you to type it (\u0026ldquo;execute it\u0026rdquo;) into the Terminal and hit enter.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://ubuntu.com/tutorials/command-line-for-beginners#1-overview\"\u003eRead this guide\u003c/a\u003e put together by the Ubuntu people, it\u0026rsquo;s very good. To open the terminal on your macOS device, open an app called \u003ccode\u003eTerminal.app\u003c/code\u003e. On Ubuntu, I believe its also an app called \u003ccode\u003eterminal\u003c/code\u003e.\u003c/p\u003e\n\u003ch3 id=\"ide\"\u003eIDE\u003c/h3\u003e\n\u003cp\u003eAn \u0026ldquo;IDE\u0026rdquo; is an \u003ca href=\"https://en.wikipedia.org/wiki/Integrated_development_environment\"\u003eIntegrated Development Environment\u003c/a\u003e. It is where code is written. Fortunately, this is an easy one: \u003ca href=\"https://code.visualstudio.com/\"\u003euse VSCode\u003c/a\u003e. There is literally no better tool out there for beginners and advanced users; no wonder it has 70% market share.\u003c/p\u003e\n\u003cblockquote\u003e\n\u003cp\u003eSidenote: But Jack? What do you use? I use something called \u003ca href=\"https://www.gnu.org/software/emacs/\"\u003eemacs\u003c/a\u003e for very specific reasons. Please don\u0026rsquo;t unless you really want misery and to \u003ca href=\"https://www.gnu.org/software/emacs/manual/html_node/eintr/\"\u003elearn a whole language\u003c/a\u003e to configure it.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003ch3 id=\"computer-language-architecture\"\u003eComputer Language Architecture\u003c/h3\u003e\n\u003cp\u003eThis is how an idea turns into \u0026ldquo;stuff\u0026rdquo; on your screen.\u003c/p\u003e\n\u003cp\u003eHuman programming languages (\u0026ldquo;Python\u0026rdquo;), are a very readable sort of code. No computers can actually read it. Usually, code you write goes through a three-step process before its able to be ran.\u003c/p\u003e\n\u003cp\u003eFirst, the language you write gets converted by a \u0026ldquo;compiler\u0026rdquo; or \u0026ldquo;interpreter\u0026rdquo;, specialized pieces of code that takes human programming languages into a more machine-readable form of code named \u0026ldquo;assembly\u0026rdquo; or \u0026ldquo;byte code\u0026rdquo; respectively, called the \u0026ldquo;intermediate\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eFor now, think of the difference between compilers and interpreters as translating code either all-at-once (compilers) or line-by-line (interpreters). Because the former has a grander view of the whole, languages that use a compiler (\u0026ldquo;compiled languages\u0026rdquo;) are faster. Although, many programmers find languages that use in interpreter (\u0026ldquo;interpreted language\u0026rdquo;) easier because they can spot problems line by line.\u003c/p\u003e\n\u003cp\u003eBut wait! There\u0026rsquo;s more. Assembly and byte-code (what compilers and interpreters generate) are not actually runnable by a computer. Yet another piece of software called a \u0026ldquo;runtime\u0026rdquo; takes the reasonably-machine-readable code and actually performs the required operations.\u003c/p\u003e\n\u003cp\u003eSome runtimes for languages like C++ uses the raw x86 CPU, which is the stereotypical \u0026ldquo;binary\u0026rdquo; zeros-and-ones. Some other languages, say Java, uses horribly complex runtimes that amounts to a whole virtual machine.\u003c/p\u003e\n\u003cp\u003eHere\u0026rsquo;s a bit of a table.\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eLanguage\u003c/th\u003e\n\u003cth\u003eC/I\u003c/th\u003e\n\u003cth\u003eCompiler/Interpreter\u003c/th\u003e\n\u003cth\u003eIntermediate\u003c/th\u003e\n\u003cth\u003eRuntime\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003ePython\u003c/td\u003e\n\u003ctd\u003eI\u003c/td\u003e\n\u003ctd\u003epython\u003c/td\u003e\n\u003ctd\u003epython bytecode\u003c/td\u003e\n\u003ctd\u003epython\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eJava\u003c/td\u003e\n\u003ctd\u003eC\u003c/td\u003e\n\u003ctd\u003ejavac\u003c/td\u003e\n\u003ctd\u003ejava object\u003c/td\u003e\n\u003ctd\u003ejava VM\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eJavaScript\u003c/td\u003e\n\u003ctd\u003eI\u003c/td\u003e\n\u003ctd\u003eV8 (literally)\u003c/td\u003e\n\u003ctd\u003ejs bytecode\u003c/td\u003e\n\u003ctd\u003eweb browser!\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eC/C++\u003c/td\u003e\n\u003ctd\u003eC\u003c/td\u003e\n\u003ctd\u003egcc/clang\u003c/td\u003e\n\u003ctd\u003ex86 asm\u003c/td\u003e\n\u003ctd\u003ex86 cpu\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWonder what the runtimes for languages like Java are built in? C/C++. Eventually it all becomes x86 cpu instructions but its like a layer cake. This is why Python and friends are called a \u0026ldquo;higher level language\u0026rdquo;.\u003c/p\u003e\n\u003ch3 id=\"git\"\u003egit\u003c/h3\u003e\n\u003cp\u003eGit is where all the code is!\u003c/p\u003e\n\u003cp\u003eGit is a decentralized \u0026ldquo;version-control\u0026rdquo; system. It is basically a timestamp-backup system of code with messages and branching.\u003c/p\u003e\n\u003cp\u003eGitHub is a website where people like to back up their code. \u003ca href=\"https://github.com/Jemoka/\"\u003eHere\u0026rsquo;s my profile on GitHub.\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eManaging Git is pretty\u0026hellip; Involved. It, for instance, assumes familiarity with the Terminal as described above. I suggest learning it, though. Here are some good resources:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eMorgan\u0026rsquo;s \u003ca href=\"https://github.com/morgansierrasnyder/git-going\"\u003every very good\u003c/a\u003e git tutorial\u0026hellip; On GitHub!\u003c/li\u003e\n\u003cli\u003eAnd \u003ca href=\"https://www.freecodecamp.org/news/10-important-git-commands-that-every-developer-should-know/\"\u003ethis article\u003c/a\u003e on some commands you should know!\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"industry-specific-skills\"\u003eIndustry-Specific Skills\u003c/h2\u003e\n\u003ch3 id=\"what-you-start-with-doesn-t-matter-but-start-with-something\"\u003eWhat you start with doesn\u0026rsquo;t matter, but start with something\u003c/h3\u003e\n\u003cp\u003eIts easiest to learn programming if you have a project in mind. So, find a project in mind\u0026mdash;what it is, though, \u003cem\u003edoesn\u0026rsquo;t matter.\u003c/em\u003e The concepts across programming are highly transferable, but the actual skill is easiest to learn if you are learning w.r.t. a project.\u003c/p\u003e\n\u003ch3 id=\"data-science-prototyping-and-machine-learning\"\u003eData science, prototyping, and machine learning\u003c/h3\u003e\n\u003cp\u003ePython would be your friend for all things of programming where the act of programming is a means to an end. That is: if you are writing code to \u003cem\u003edo something\u003c/em\u003e that\u0026rsquo;s not inherently software (data science, machine learning, heck, also manipulating quantum qubits), Python is your friend.\u003c/p\u003e\n\u003cp\u003eIts a language that\u0026rsquo;s designed to be easy to write: is a very do-as-I-say language that sacrifices efficiency and elegance for getting crap done. This is how I started programming. \u003ca href=\"https://www.amazon.com/Python-Programming-Absolute-Beginner-3rd/dp/1435455002\"\u003eThis is the book I started with.\u003c/a\u003e It teaches Python through programming a series of small projects that are mostly Terminal games.\u003c/p\u003e\n\u003cp\u003eTo learn data science, \u003ca href=\"https://jennselby.github.io/MachineLearningCourseNotes/\"\u003eNueva\u0026rsquo;s very own data science course\u003c/a\u003e give very good conceptual framework. A typical first project is to recognize pictures of handwritten digits, \u003ca href=\"https://towardsdatascience.com/handwritten-digit-mnist-pytorch-977b5338e627\"\u003efor which there is a good guide\u003c/a\u003e. I also started something called \u003ca href=\"/posts/kbhaibridge_course_website/\"\u003eAIBridge\u003c/a\u003e with \u003ca href=\"/posts/kbhaifs/\"\u003eAIFS\u003c/a\u003e, so if we ever publish the recordings I will put them here.\u003c/p\u003e\n\u003cp\u003eGoogle also: pip, ipython, Jupyter.\u003c/p\u003e\n\u003ch3 id=\"backend-engineering\"\u003eBackend engineering\u003c/h3\u003e\n\u003cp\u003eBackend engineering is the science of dealing with databases and writing API (application programming interfaces). I don\u0026rsquo;t suggest starting with this, but if you are particularly interested in databases, you could!\u003c/p\u003e\n\u003cp\u003eTo master backend engineering, first learn a database manipulation language. For 99.98% of the industry, this means \u003ca href=\"https://www.mysqltutorial.org/getting-started-with-mysql/\"\u003emysql\u003c/a\u003e. The link directs to a pretty good guide.\u003c/p\u003e\n\u003cp\u003eFurthermore, the language with which backend is written is Java. I hate to say it, but despite Java\u0026rsquo;s terribleness (don\u0026rsquo;t worry about it ;)) its very dependable. \u003ca href=\"https://nostarch.com/learnjava\"\u003eHere\u0026rsquo;s a book\u003c/a\u003e on Java. In general, I really like all of the books from no starch press.\u003c/p\u003e\n\u003ch3 id=\"frontend-and-web-engineering\"\u003eFrontend and Web engineering\u003c/h3\u003e\n\u003cp\u003eDo you like making stuff move? Do you like drawing buttons? Front end maybe for you. The most basic type of front-end engineering is making websites.\u003c/p\u003e\n\u003cp\u003eStart by making a \u0026ldquo;vanilla website\u0026rdquo;: HTML (what\u0026rsquo;s on the page), CSS (what colours and sizes), JavaScript (how it moves) is the standard trio of languages to start. freeCodeCamp (a great Medium blog, check their stuff out) has a \u003ca href=\"https://www.freecodecamp.org/news/html-css-and-javascript-explained-for-beginners/\"\u003egood guide\u003c/a\u003e on the matter.\u003c/p\u003e\n\u003cp\u003eHowever, as you progress in your journey, you will find these tools woefully inadequate. Hence, most people writing web front end move on to something called a \u0026ldquo;JavaScript Framework\u0026rdquo;, a tool to generate a \u0026ldquo;vanilla\u0026rdquo; website from some more easily manipulable JS (changing the text on the page moves from a four line operation (indexing, selecting, grabbing, changing) to a one-liner (state.text=new text)).\u003c/p\u003e\n\u003cp\u003eA popular JS framework is \u003ca href=\"https://reactjs.org/tutorial/tutorial.html\"\u003eReactJS\u003c/a\u003e. Check them out.\u003c/p\u003e\n\u003ch3 id=\"fullstack-engineering\"\u003eFullstack Engineering\u003c/h3\u003e\n\u003cp\u003eFrontend + Backend.\u003c/p\u003e\n\u003ch3 id=\"game-development\"\u003eGame development\u003c/h3\u003e\n\u003cp\u003eGame development is honestly one of the most horribly complicated and richly science-y part of CS. I am not super experience in game development but learning C++ and mastering \u003ca href=\"https://docs.unity3d.com/560/Documentation/Manual/UnityBasics.html\"\u003eUnity\u003c/a\u003e, the game engine. Oh, right, game dev is the \u003cem\u003eonly\u003c/em\u003e, and I repeat \u003cem\u003eonly\u003c/em\u003e (with invisible footnotes and qualifications) reason why you should be writing code on Windows.\u003c/p\u003e\n\u003cp\u003eA friend \u003ca href=\"https://github.com/Radbuglet/\"\u003eis good at game dev\u003c/a\u003e, I can make an intro if needed.\u003c/p\u003e\n\u003ch2 id=\"good-luck\"\u003eGood Luck!\u003c/h2\u003e\n\u003cp\u003eRemember: Google-fu and project-based curiosity is your friend. Let me know if you have questions.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsoftware_dev_starter_pack/","tags":null,"title":"software dev starter pack"},{"categories":null,"contents":"The software development models, or Software Development Life-cycles (SDLCs), are methodologies to approach organizing a software project.\nWaterfall The waterfall specification gets written before any code written. We hand off spec and code directly to tester, and code should behave like spec.\nCode specification exactly Spec does not update Code happens only after stuff is done\nAgile Agile are designed to work with minimum specification before code. Spec is updated constantly as code changes and get user feedback.\nSpiral (Software Development) The Spiral model is as SDLC that combines the iterative development approach of Agile and the structure of Waterfall.\nIt focuses on Risk to mitigate it.\nWaterfall style requirements detailing Preliminary design First prototype: scaled down system Second prototype Mitigates strengths, weaknesses, and risks of 1st prototype Augmented requirements that got scaled down during the firts prototype \u0026ldquo;The entire project can be aborted if the risk is deemed too great.\u0026rdquo; Budget Operating cost Repeat until customer likes it Construct final system using the prototype as a spec Other Non-Canonical SDLCs Test-Driven Development See Test-Driven Development\nExtreme Programming TDD + continually integrating code and pair programming to review code\n","html":"\u003cp\u003eThe software development models, or Software Development Life-cycles (\u003ca href=\"/posts/kbhsoftware_development_methodologies/\"\u003eSDLC\u003c/a\u003es), are methodologies to approach organizing a software project.\u003c/p\u003e\n\u003ch2 id=\"waterfall\"\u003eWaterfall\u003c/h2\u003e\n\u003cp\u003eThe waterfall specification gets written before any code written. We hand off spec and code directly to tester, and code should behave like spec.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eCode specification exactly\u003c/li\u003e\n\u003cli\u003eSpec does not update\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eCode happens only after stuff is done\u003c/p\u003e\n\u003ch2 id=\"agile\"\u003eAgile\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#agile\"\u003eAgile\u003c/a\u003e are designed to work with minimum specification before code. Spec is updated constantly as code changes and get user feedback.\u003c/p\u003e\n\u003ch2 id=\"spiral--software-development\"\u003eSpiral (Software Development)\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#spiral--software-development\"\u003eSpiral\u003c/a\u003e model is as \u003ca href=\"/posts/kbhsoftware_development_methodologies/\"\u003eSDLC\u003c/a\u003e that combines the iterative development approach of \u003ca href=\"#agile\"\u003eAgile\u003c/a\u003e and the structure of \u003ca href=\"#waterfall\"\u003eWaterfall\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIt focuses on Risk to mitigate it.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-07-08_17-30-42_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003col\u003e\n\u003cli\u003eWaterfall style requirements detailing\u003c/li\u003e\n\u003cli\u003ePreliminary design\u003c/li\u003e\n\u003cli\u003eFirst prototype: scaled down system\u003c/li\u003e\n\u003cli\u003eSecond prototype\n\u003col\u003e\n\u003cli\u003eMitigates strengths, weaknesses, and risks of 1st prototype\u003c/li\u003e\n\u003cli\u003eAugmented requirements that got scaled down during the firts prototype\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;The entire project can be aborted if the risk is deemed too great.\u0026rdquo;\n\u003col\u003e\n\u003cli\u003eBudget\u003c/li\u003e\n\u003cli\u003eOperating cost\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003eRepeat until customer likes it\u003c/li\u003e\n\u003cli\u003eConstruct final system using the prototype as a spec\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"other-non-canonical-sdlcs\"\u003eOther Non-Canonical SDLCs\u003c/h2\u003e\n\u003ch3 id=\"test-driven-development--kbhtesting-dot-md\"\u003e\u003ca href=\"/posts/kbhtesting/#test-driven-development\"\u003eTest-Driven Development\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhtesting/#test-driven-development\"\u003eTest-Driven Development\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"extreme-programming\"\u003eExtreme Programming\u003c/h3\u003e\n\u003cp\u003eTDD + continually integrating code and pair programming to review code\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsoftware_development_methodologies/","tags":null,"title":"Software Development Methodologies"},{"categories":null,"contents":"process of Engineering: chronological order User Interviews + User Stories Requirements Analysis Documentation and Specification Build the damned thing Project Management and Development Methodology (SDLC) Task Estimation Software Design and Architecture Patterns Testing Build and Release engineering (TODO) Other topics Query optimization (TODO) Fucking acronyms to know AAA Method SOLID principles STAR method: state behaviorals in Situation, Task, Action, Results fundamental trade-off of Software Engineering The MIT vs. New Jersey problem: in Software Engineering, you can only choose one of FAST or ROBUST.\nProblem Fast (\u0026ldquo;Bell Labs/NJ\u0026rdquo;) Robust (\u0026ldquo;MIT\u0026rdquo;) Specs Whatever it looks like screens, states, UI elements documented; transitions Time \u0026ldquo;whenever\u0026rdquo; precise projections, track work and dependencies Testing \u0026ldquo;ran it + didn\u0026rsquo;t crash\u0026rdquo; black, white box, code overage, edge/adv. cases Modular Giant function object/data model, grouped function, abstraction barriers Failure Unpredictable + silent Graceful, noisy, error reporting + logging Language Scripting, high level Low-level, assembly/bare metal, control, can be difficult Proto. Many/Quickly Few/Slowly Being Done Now Later Source: here.\nhow to choose? Which is the better approach? There isn\u0026rsquo;t one. However, here are some critical questions for you to answer:\nDeadline: what happens if you don\u0026rsquo;t finish today? Release cycle: if you ship a bug, how long can you fix it? Consequences: if the software malfunctions, how bad is it? Life-cycle: how long will the software get used? So\u0026mdash;\nAs consequences for deadline gets worse, trend towards fast; as consequences for failure gets worse, trend towards robust.\n","html":"\u003ch2 id=\"process-of-engineering-chronological-order\"\u003eprocess of Engineering: chronological order\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhuser_interviews/\"\u003eUser Interviews\u003c/a\u003e + \u003ca href=\"/posts/kbhuser_interviews/#user-story\"\u003eUser Stories\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrequirements_analysis/\"\u003eRequirements Analysis\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdocumentation_and_specification/\"\u003eDocumentation and Specification\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eBuild the damned thing\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsoftware_development_methodologies/\"\u003eProject Management and Development Methodology (SDLC)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtask_estimation/\"\u003eTask Estimation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsoftware_design_and_architecture_patterns/\"\u003eSoftware Design and Architecture Patterns\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtesting/\"\u003eTesting\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eBuild and Release engineering (TODO)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"other-topics\"\u003eOther topics\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eQuery optimization (TODO)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"fucking-acronyms-to-know\"\u003eFucking acronyms to know\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtesting/#arrange-act-assert\"\u003eAAA Method\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsoftware_design_and_architecture_patterns/#solid-principles\"\u003eSOLID principles\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eSTAR method: state behaviorals in Situation, Task, Action, Results\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"fundamental-trade-off-of-software-engineering--kbhsoftware-engineering-dot-md\"\u003efundamental trade-off of \u003ca href=\"/posts/kbhsoftware_engineering/\"\u003eSoftware Engineering\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#fundamental-trade-off-of-software-engineering--kbhsoftware-engineering-dot-md\"\u003eMIT vs. New Jersey\u003c/a\u003e problem: in \u003ca href=\"/posts/kbhsoftware_engineering/\"\u003eSoftware Engineering\u003c/a\u003e, you can only choose one of FAST or ROBUST.\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eProblem\u003c/th\u003e\n\u003cth\u003eFast (\u0026ldquo;Bell Labs/NJ\u0026rdquo;)\u003c/th\u003e\n\u003cth\u003eRobust (\u0026ldquo;MIT\u0026rdquo;)\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eSpecs\u003c/td\u003e\n\u003ctd\u003eWhatever it looks like\u003c/td\u003e\n\u003ctd\u003escreens, states, UI elements documented; transitions\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eTime\u003c/td\u003e\n\u003ctd\u003e\u0026ldquo;whenever\u0026rdquo;\u003c/td\u003e\n\u003ctd\u003eprecise projections, track work and dependencies\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eTesting\u003c/td\u003e\n\u003ctd\u003e\u0026ldquo;ran it + didn\u0026rsquo;t crash\u0026rdquo;\u003c/td\u003e\n\u003ctd\u003eblack, white box, code overage, edge/adv. cases\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eModular\u003c/td\u003e\n\u003ctd\u003eGiant function\u003c/td\u003e\n\u003ctd\u003eobject/data model, grouped function, abstraction barriers\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eFailure\u003c/td\u003e\n\u003ctd\u003eUnpredictable + silent\u003c/td\u003e\n\u003ctd\u003eGraceful, noisy, error reporting + logging\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eLanguage\u003c/td\u003e\n\u003ctd\u003eScripting, high level\u003c/td\u003e\n\u003ctd\u003eLow-level, assembly/bare metal, control, can be difficult\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eProto.\u003c/td\u003e\n\u003ctd\u003eMany/Quickly\u003c/td\u003e\n\u003ctd\u003eFew/Slowly\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eBeing Done\u003c/td\u003e\n\u003ctd\u003eNow\u003c/td\u003e\n\u003ctd\u003eLater\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eSource: \u003ca href=\"https://www.dreamsongs.com/RiseOfWorseIsBetter.html\"\u003ehere\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"how-to-choose\"\u003ehow to choose?\u003c/h3\u003e\n\u003cp\u003eWhich is the better approach? There isn\u0026rsquo;t one. However, here are some critical questions for you to answer:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eDeadline: what happens if you don\u0026rsquo;t finish today?\u003c/li\u003e\n\u003cli\u003eRelease cycle: if you ship a bug, how long can you fix it?\u003c/li\u003e\n\u003cli\u003eConsequences: if the software malfunctions, how bad is it?\u003c/li\u003e\n\u003cli\u003eLife-cycle: how long will the software get used?\u003c/li\u003e\n\u003c/ul\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-07_13-00-47_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSo\u0026mdash;\u003c/p\u003e\n\u003cp\u003eAs consequences for deadline gets worse, trend towards fast; as consequences for failure gets worse, trend towards robust.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsoftware_engineering/","tags":["index"],"title":"Software Engineering Index"},{"categories":null,"contents":"This will have no explicit boundary conditions in \\(x\\)!\nAssume \\(|U(t,x)|\\) decays quickly as \\(|x| \\to \\infty\\).\nApply Fourier Transform Step one is to apply the Fourier Transform on our PDE\n\\begin{equation} \\hat{U}(t, \\lambda) = \\int_{R} U(t,x) e^{-i\\lambda x} \\dd{x} \\end{equation}\nLeveraging the fact that Derivative of Fourier Transform is a multiplication, we can simply our Fourier transform in terms of one expression in \\(x\\).\nApply a Fourier Transform on \\(f(x)\\) This allows you to plug the initial conditions into your transformed expression above.\nSolve for \\(\\hat{U}(t,\\lambda)\\), and then convert back This uses the inverse Fourier transform.\n","html":"\u003cp\u003eThis will have no explicit boundary conditions in \\(x\\)!\u003c/p\u003e\n\u003cp\u003eAssume \\(|U(t,x)|\\) decays quickly as \\(|x| \\to \\infty\\).\u003c/p\u003e\n\u003ch2 id=\"apply-fourier-transform\"\u003eApply Fourier Transform\u003c/h2\u003e\n\u003cp\u003eStep one is to apply the Fourier Transform on our PDE\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U}(t, \\lambda) = \\int_{R} U(t,x) e^{-i\\lambda x} \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLeveraging the fact that \u003ca href=\"/posts/kbhfourier_transform/#derivative-of-fourier-transform\"\u003eDerivative of Fourier Transform\u003c/a\u003e is a multiplication, we can simply our Fourier transform in terms of one expression in \\(x\\).\u003c/p\u003e\n\u003ch2 id=\"apply-a-fourier-transform--kbhfourier-transform-dot-md--on-f--x\"\u003eApply a \u003ca href=\"/posts/kbhfourier_transform/\"\u003eFourier Transform\u003c/a\u003e on \\(f(x)\\)\u003c/h2\u003e\n\u003cp\u003eThis allows you to plug the initial conditions into your transformed expression above.\u003c/p\u003e\n\u003ch2 id=\"solve-for-hat-u--t-lambda--and-then-convert-back\"\u003eSolve for \\(\\hat{U}(t,\\lambda)\\), and then convert back\u003c/h2\u003e\n\u003cp\u003eThis uses the inverse Fourier transform.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsolving_pdes_via_fourier_transform/","tags":null,"title":"Solving PDEs via Fourier Transform"},{"categories":null,"contents":"So let\u0026rsquo;s say given a system:\n\\begin{equation} \\begin{cases} x + 2y + z = 0 \\\\ 2x + 0y - z = 1 \\\\ x - y + 0z = 2 \\end{cases} \\end{equation}\nWe can represent this using a matricies.\n\\begin{equation} \\begin{pmatrix} 1 \u0026amp; 2 \u0026amp; 1 \\\\ 2 \u0026amp; 0 \u0026amp; -1 \\\\ 1 \u0026amp; -1 \u0026amp; 0 \\end{pmatrix} \\begin{pmatrix} x \\\\ y \\\\ z \\end{pmatrix} = \\begin{pmatrix} 0 \\\\ 1 \\\\ 2 \\end{pmatrix} \\end{equation}\nWe will use Gaussian elimination. We will begin by multiplying the top row by \\(-2\\).\n\\begin{equation} \\begin{pmatrix} -2 \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \u0026amp; 0 \\\\ 0 \u0026amp; 0 \u0026amp; 1 \\end{pmatrix} \\begin{pmatrix} 1 \u0026amp; 2 \u0026amp; 1 \\\\ 2 \u0026amp; 0 \u0026amp; -1 \\\\ 1 \u0026amp; -1 \u0026amp; 0 \\end{pmatrix} \\begin{pmatrix} x \\\\ y \\\\ z \\end{pmatrix} =\\begin{pmatrix} -2 \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \u0026amp; 0 \\\\ 0 \u0026amp; 0 \u0026amp; 1 \\end{pmatrix} \\begin{pmatrix} 0 \\\\ 1 \\\\2 \\end{pmatrix} \\end{equation}\nAnd then we add row one to row two; we will not write out the transformation matrix:\n\\begin{equation} \\begin{pmatrix} -2 \u0026amp;-4 \u0026amp;-2 \\\\ 2 \u0026amp;-0 \u0026amp;-1 \\\\ 1 \u0026amp;-1 \u0026amp;0 \\end{pmatrix} \\begin{pmatrix} x \\\\ y \\\\ z \\end{pmatrix} = \\begin{pmatrix} 0 \\\\ 1 \\\\2 \\end{pmatrix} \\end{equation}\n","html":"\u003cp\u003eSo let\u0026rsquo;s say given a system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx + 2y + z = 0 \\\\\n2x + 0y - z = 1 \\\\\nx - y + 0z = 2\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can represent this using a \u003ca href=\"/posts/kbhmatricies/\"\u003ematricies\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n1 \u0026amp; 2 \u0026amp; 1 \\\\\n2 \u0026amp; 0 \u0026amp; -1 \\\\\n1 \u0026amp; -1 \u0026amp; 0\n\\end{pmatrix} \\begin{pmatrix}\nx \\\\ y \\\\ z\n\\end{pmatrix} = \\begin{pmatrix}\n0 \\\\ 1 \\\\ 2\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe will use \u003ca href=\"/posts/kbhmatricies/#gaussian-elimination\"\u003eGaussian elimination\u003c/a\u003e. We will begin by multiplying the top row by \\(-2\\).\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n-2 \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \u0026amp; 0 \\\\ 0 \u0026amp; 0 \u0026amp; 1\n\\end{pmatrix} \\begin{pmatrix}\n1 \u0026amp; 2 \u0026amp; 1 \\\\\n2 \u0026amp; 0 \u0026amp; -1 \\\\\n1 \u0026amp; -1 \u0026amp; 0\n\\end{pmatrix} \\begin{pmatrix}\nx \\\\ y \\\\ z\n\\end{pmatrix} =\\begin{pmatrix}\n-2 \u0026amp; 0 \u0026amp; 0 \\\\ 0 \u0026amp; 1 \u0026amp; 0 \\\\ 0 \u0026amp; 0 \u0026amp; 1\n\\end{pmatrix} \\begin{pmatrix}\n0 \\\\ 1 \\\\2\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd then we add row one to row two; we will not write out the transformation \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{pmatrix}\n-2 \u0026amp;-4 \u0026amp;-2 \\\\ 2 \u0026amp;-0 \u0026amp;-1 \\\\ 1 \u0026amp;-1 \u0026amp;0\n\\end{pmatrix} \\begin{pmatrix}\nx \\\\ y \\\\ z\n\\end{pmatrix} = \\begin{pmatrix}\n0 \\\\ 1 \\\\2\n\\end{pmatrix}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsolving_systems/","tags":null,"title":"solving systems"},{"categories":null,"contents":" \u0026ldquo;laws.als\u0026rdquo;: \u0026ldquo;drumuomup\u0026rdquo; \u0026ldquo;ping.als\u0026rdquo;: \u0026ldquo;walking down the street, eating children\u0026rdquo;\u0026quot;\u0026quot; \u0026ldquo;planets.als\u0026rdquo;: \u0026ldquo;sing a song among the starlight\u0026rdquo; \u0026ldquo;songs.als\u0026rdquo;: \u0026ldquo;thank you klint for your discussion\u0026rdquo; Other things I have to finish \u0026ldquo;Tunel2.als\u0026rdquo; ","html":"\u003cul\u003e\n\u003cli\u003e\u003cdel\u003e\u0026ldquo;laws.als\u0026rdquo;: \u0026ldquo;drumuomup\u0026rdquo;\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;ping.als\u0026rdquo;: \u0026ldquo;walking down the street, eating children\u0026rdquo;\u0026quot;\u0026quot;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;planets.als\u0026rdquo;: \u0026ldquo;sing a song among the starlight\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;songs.als\u0026rdquo;: \u0026ldquo;thank you klint for your discussion\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"other-things-i-have-to-finish\"\u003eOther things I have to finish\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Tunel2.als\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsongs_that_need_lyrics/","tags":null,"title":"Songs that need Lyrics"},{"categories":null,"contents":"qsort: sort an array of any type\nbsearch binary search of an array of any type\nlfind: linear search in a array of any find\nlsearch: lfind, but perform insertion as well\n","html":"\u003cp\u003e\u003ccode\u003eqsort\u003c/code\u003e: sort an array of any type\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003ebsearch\u003c/code\u003e binary search of an array of any type\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003elfind\u003c/code\u003e: linear search in a array of any find\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003elsearch\u003c/code\u003e: lfind, but perform insertion as well\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsorting_functions/","tags":null,"title":"sorting functions"},{"categories":null,"contents":"sound is the compression of air molecules: high/low pressure air. \u0026ldquo;This is your brain on music.\u0026rdquo;\n\u0026ldquo;Dynamic EQ\u0026rdquo;: to attenuate certain frequencies to preventing things from happening.\nSoothe audio\nhow we hear sound the way that sound is deflected as it enter our ear is important:\nsound bounce around our pinna it echos in the ear canal then it gets processed anechoic chamber an anechoic chamber is a room that blocks all forms of reflection. In the room, people experience hallucinations as the brain is trying to complete information but it can\u0026rsquo;t confirm it using sensory input.\nYou r brain is always trying to inteperate what\u0026rsquo;s going on.\nBasilar Membrane The Basilar Membrane sits after the eardrums; it is a liquid in which a membrane + some hair sits. Depending on the frequency of the sound, the hairs vibrate at different shapes.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsound/\"\u003esound\u003c/a\u003e is the compression of air molecules: high/low pressure air. \u0026ldquo;This is your brain on music.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Dynamic EQ\u0026rdquo;: to attenuate certain frequencies to preventing things from happening.\u003c/p\u003e\n\u003cp\u003eSoothe audio\u003c/p\u003e\n\u003ch2 id=\"how-we-hear-sound\"\u003ehow we hear sound\u003c/h2\u003e\n\u003cp\u003ethe way that \u003ca href=\"/posts/kbhsound/\"\u003esound\u003c/a\u003e is deflected as it enter our ear is important:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003esound bounce around our \u003ca href=\"\"\u003epinna\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eit echos in the ear canal\u003c/li\u003e\n\u003cli\u003ethen it gets processed\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"anechoic-chamber\"\u003eanechoic chamber\u003c/h2\u003e\n\u003cp\u003ean \u003ca href=\"#anechoic-chamber\"\u003eanechoic chamber\u003c/a\u003e is a room that blocks all forms of reflection. In the room, people experience hallucinations as the brain is trying to complete information but it can\u0026rsquo;t confirm it using sensory input.\u003c/p\u003e\n\u003cp\u003eYou r brain is always trying to inteperate what\u0026rsquo;s going on.\u003c/p\u003e\n\u003ch2 id=\"basilar-membrane\"\u003eBasilar Membrane\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"#basilar-membrane\"\u003eBasilar Membrane\u003c/a\u003e sits after the eardrums; it is a liquid in which a membrane + some hair sits. Depending on the frequency of the sound, the hairs vibrate at different shapes.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsound/","tags":null,"title":"sound"},{"categories":null,"contents":"Reading notes Because feeling for self-endowment, they wish to build socialist society As Communists considered themselves as a vanguard of the revolutionary proletariat – their “aim” was to build socialist society in the whole world.\nSocialist had necesity against capitalist aggression The Soviet approaches towards historical descriptions of the twentieth century showed that with the emergence of the new type of state – socialist one – it became a target for capitalist aggression.\nSocialist revolution requires the creation of socialist society against the world It was first positive move towards realization of the Soviet foreign policy main idea: the world socialist revolution and creation of the socialist society in the whole world.\nThe Soviets believe that the US wants to take over world The US had plans to dominate in the entire world.\nThat the US was intentionally sturggling with socialism All US post-war foreign policy doctrines were aimed on the struggle with socialism\nthat soviets believed that US was exclusivly fighting socialism We can summarize – that on Soviet point of view all American presidents of Cold War period were creating their own doctrines, and all of them were anti-communist and anti-Soviet\nSoviets believes that the US made the first move Soviet concept first vivid steps, which signalized about the start of the confrontation between East and West, were steps made by the West.\nbelieves its a fight against imperialism bipolar confrontation had western roots and the Cold War was the policy of the US and other imperialistic countries against socialist countries.\ncommunism is working towards revolution mankind is a process of revolutionary changes\nthe soviet union believes only it can stop American aggression the Soviet Union was the only power in the world able to stop American ambitions of superpower.\nUSSR believes that itself was the only defender The Soviet Union considered itself as the only defender of the interests of the working class all over the world because it was the first socialist state in history.\nDefinding US and defending imperialism The Imperialistic was the system of capitalist countries: they had a lot of contradictions in their “camp” where each wanted to solve their problems and to defend their own interests by using the others.\nBlack and white view of the world prevailed USSR The entire world was separated into two main categories: friends and enemies. Such black and white world-view was a distinctive feature of Stalin’s way of seeing the world (outside as well as inside the USSR), but even after his death,\n","html":"\u003ch2 id=\"reading-notes\"\u003eReading notes\u003c/h2\u003e\n\u003ch3 id=\"because-feeling-for-self-endowment-they-wish-to-build-socialist-society\"\u003eBecause feeling for self-endowment, they wish to build socialist society\u003c/h3\u003e\n\u003cp\u003eAs Communists considered themselves as a vanguard of the revolutionary proletariat – their “aim” was to build socialist society in the whole world.\u003c/p\u003e\n\u003ch3 id=\"socialist-had-necesity-against-capitalist-aggression\"\u003eSocialist had necesity against capitalist aggression\u003c/h3\u003e\n\u003cp\u003eThe Soviet approaches towards historical descriptions of the twentieth century showed that with the emergence of the new type of state – socialist one – it became a target for capitalist aggression.\u003c/p\u003e\n\u003ch3 id=\"socialist-revolution-requires-the-creation-of-socialist-society-against-the-world\"\u003eSocialist revolution requires the creation of socialist society against the world\u003c/h3\u003e\n\u003cp\u003eIt was first positive move towards realization of the Soviet foreign policy main idea: the world socialist revolution and creation of the socialist society in the whole world.\u003c/p\u003e\n\u003ch3 id=\"the-soviets-believe-that-the-us-wants-to-take-over-world\"\u003eThe Soviets believe that the US wants to take over world\u003c/h3\u003e\n\u003cp\u003eThe US had plans to dominate in the entire world.\u003c/p\u003e\n\u003ch3 id=\"that-the-us-was-intentionally-sturggling-with-socialism\"\u003eThat the US was intentionally sturggling with socialism\u003c/h3\u003e\n\u003cp\u003eAll US post-war foreign policy doctrines were aimed on the struggle with socialism\u003c/p\u003e\n\u003ch3 id=\"that-soviets-believed-that-us-was-exclusivly-fighting-socialism\"\u003ethat soviets believed that US was exclusivly fighting socialism\u003c/h3\u003e\n\u003cp\u003eWe can summarize – that on Soviet point of view all American presidents of Cold War period were creating their own doctrines, and all of them were anti-communist and anti-Soviet\u003c/p\u003e\n\u003ch3 id=\"soviets-believes-that-the-us-made-the-first-move\"\u003eSoviets believes that the US made the first move\u003c/h3\u003e\n\u003cp\u003eSoviet concept first vivid steps, which signalized about the start of the confrontation between East and West, were steps made by the West.\u003c/p\u003e\n\u003ch3 id=\"believes-its-a-fight-against-imperialism\"\u003ebelieves its a fight against imperialism\u003c/h3\u003e\n\u003cp\u003ebipolar confrontation had western roots and the Cold War was the policy of the US and other imperialistic countries against socialist countries.\u003c/p\u003e\n\u003ch3 id=\"communism-is-working-towards-revolution\"\u003ecommunism is working towards revolution\u003c/h3\u003e\n\u003cp\u003emankind is a process of revolutionary changes\u003c/p\u003e\n\u003ch3 id=\"the-soviet-union-believes-only-it-can-stop-american-aggression\"\u003ethe soviet union believes only it can stop American aggression\u003c/h3\u003e\n\u003cp\u003ethe Soviet Union was the only power in the world able to stop American ambitions of superpower.\u003c/p\u003e\n\u003ch3 id=\"ussr-believes-that-itself-was-the-only-defender\"\u003eUSSR believes that itself was the only defender\u003c/h3\u003e\n\u003cp\u003eThe Soviet Union considered itself as the only defender of the interests of the working class all over the world because it was the first socialist state in history.\u003c/p\u003e\n\u003ch3 id=\"definding-us-and-defending-imperialism\"\u003eDefinding US and defending imperialism\u003c/h3\u003e\n\u003cp\u003eThe Imperialistic was the system of capitalist countries: they had a lot of contradictions in their “camp” where each wanted to solve their problems and to defend their own interests by using the others.\u003c/p\u003e\n\u003ch3 id=\"black-and-white-view-of-the-world-prevailed-ussr\"\u003eBlack and white view of the world prevailed USSR\u003c/h3\u003e\n\u003cp\u003eThe entire world was separated into two main categories: friends and enemies. Such black and white world-view was a distinctive feature of Stalin’s way of seeing the world (outside as well as inside the USSR), but even after his death,\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsoviet_perspective_on_cold_war/","tags":null,"title":"Soviet Perspective on Cold War"},{"categories":null,"contents":"(Spaan and Vlassis 2005)\nRandomized PBVI\nOne-Liner PVBI, faster.\nNovelty \u0026ldquo;Is it necessary to maintain an alpha vector for each belief?\u0026rdquo;\nNotable Methods Don\u0026rsquo;t update all beliefs; backup only belief that didn\u0026rsquo;t yet confer an improvement\nKey Figs New Concepts Notes ","html":"\u003cp\u003e(\u003ca href=\"#citeproc_bib_item_1\"\u003eSpaan and Vlassis 2005\u003c/a\u003e)\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhpoint_based_value_iteration/#randomized\"\u003eRandomized PBVI\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003ePVBI, faster.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;Is it necessary to maintain an alpha vector for each belief?\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cp\u003eDon\u0026rsquo;t update all beliefs; backup only belief that didn\u0026rsquo;t yet confer an improvement\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhspaan_2005/","tags":null,"title":"Spaan 2005"},{"categories":null,"contents":"The span of a bunch of vectors is the set of all linear combinations of that bunch of vectors. We denote it as \\(span(v_1, \\dots v_{m)}\\).\nconstituents for constructing a linear combination a list of vectors \\(v_1,\\dots,v_{m}\\) and scalars \\(a_1, a_2, \\dots, a_{m} \\in \\mathbb{F}\\) requirements \\begin{equation} span(v_{1}..v_{m}) = \\{a_1v_1+\\dots +a_{m}v_{m}:a_1\\dots a_{m} \\in \\mathbb{F}\\} \\end{equation}\nadditional information span is the smallest subspace containing all vectors in the list Part 1: that a span of a list of vectors is a subspace containing those vectors\nBy taking all \\(a_{n}\\) as \\(0\\), we show that the additive identity exists.\nTaking two linear combinations and adding them (i.e. adding two members of the span) is still in the span by commutativity and distributivity (reorganize each constant \\(a_{1}\\) together)\u0026mdash;creating another linear combination and therefore a member of the span.\nScaling a linear combination, by distributivity, just scales the scalars and create yet another linear combination.\nPart 2: a subspace containing the list of vectors contain the span\nsubspaces are closed under scalar multiplication and addition. Therefore, we can just construct every linear combination.\nBy double-containment, a subspace is the smallest subspace containing all vectors. \\(\\blacksquare\\)\nspans If \\(span(v_1, \\dots v_{m})\\) equals \\(V\\), we say that \\(v_1, \\dots, v_{m}\\) spans \\(V\\).\nNOTE! the two things have to be equal\u0026mdash;if the span of a set of vectors is larger than \\(V\\), they do not span \\(V\\).\nlength of linearly-independent list \\(\\leq\\) length of spanning list see here.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e of a bunch of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es is the set of all \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003es of that bunch of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es. We denote it as \\(span(v_1, \\dots v_{m)}\\).\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003efor constructing a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003ea \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es \\(v_1,\\dots,v_{m}\\)\u003c/li\u003e\n\u003cli\u003eand scalars \\(a_1, a_2, \\dots, a_{m} \\in \\mathbb{F}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nspan(v_{1}..v_{m}) = \\{a_1v_1+\\dots +a_{m}v_{m}:a_1\\dots a_{m} \\in \\mathbb{F}\\}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"span-is-the-smallest-subspace-containing-all-vectors-in-the-list\"\u003espan is the smallest subspace containing all vectors in the list\u003c/h3\u003e\n\u003cp\u003ePart 1: that a \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e of a list of vectors is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e containing those \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es\u003c/p\u003e\n\u003cp\u003eBy taking all \\(a_{n}\\) as \\(0\\), we show that the additive identity exists.\u003c/p\u003e\n\u003cp\u003eTaking two \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003es and \u003ca href=\"/posts/kbhadding/\"\u003eadding\u003c/a\u003e them (i.e. adding two members of the \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e) is still in the \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e by \u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e and \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e (reorganize each constant \\(a_{1}\\) together)\u0026mdash;creating another \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e and therefore a member of the \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eScaling a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e, by \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e, just scales the scalars and create yet another \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003ePart 2: a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e containing the list of vectors contain the span\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es are closed under \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e and \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e. Therefore, we can just construct every \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eBy double-containment, a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e is the smallest \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e containing all vectors. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"spans\"\u003espans\u003c/h3\u003e\n\u003cp\u003eIf \\(span(v_1, \\dots v_{m})\\) equals \\(V\\), we say that \\(v_1, \\dots, v_{m}\\) \u003ca href=\"#spans\"\u003espans\u003c/a\u003e \\(V\\).\u003c/p\u003e\n\u003cp\u003eNOTE! the two things have to be equal\u0026mdash;if the \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e of a set of \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es is \u003cem\u003elarger\u003c/em\u003e than \\(V\\), they do \u003cstrong\u003enot\u003c/strong\u003e span \\(V\\).\u003c/p\u003e\n\u003ch3 id=\"length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003elength of linearly-independent list \\(\\leq\\) length of spanning list\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhlinear_independence/#length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003esee here\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhspan/","tags":null,"title":"span (linear algebra)"},{"categories":null,"contents":" Órale pues: confirmando No hay pedo: no hay problema Ponte la de puebla: dividirlo Qué padre: sopresa positiva De a grapa: gratis De poca madre: júbilo y aceptación Te vas a dar un ranazo: nos vamos a hacer daño (hurt) Mamá: ¡necesitamos limpiar sus cuartos! Me: órale pues, no hay pedo. Voy a limpiarlo mañana.\nMi plan está simple. Voy a dividir mi cuarto a media, y contrata mi amiga para ayudarme. ¡Ponte la de puebla!\nDos días después\u0026hellip;\nMamá: Oye, ¿su habitación? ¡De poca madre, qué padre! Qué limpia.\nMe: Sí, de a grapa con mi amigo, también.\n","html":"\u003col\u003e\n\u003cli\u003eÓrale pues: confirmando\u003c/li\u003e\n\u003cli\u003eNo hay pedo: no hay problema\u003c/li\u003e\n\u003cli\u003ePonte la de puebla: dividirlo\u003c/li\u003e\n\u003cli\u003eQué padre: sopresa positiva\u003c/li\u003e\n\u003cli\u003eDe a grapa: gratis\u003c/li\u003e\n\u003cli\u003eDe poca madre: júbilo y aceptación\u003c/li\u003e\n\u003cli\u003eTe vas a dar un ranazo: nos vamos a hacer daño (hurt)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eMamá: ¡necesitamos limpiar sus cuartos! Me: órale pues, no hay pedo. Voy a limpiarlo mañana.\u003c/p\u003e\n\u003cp\u003eMi plan está simple. Voy a dividir mi cuarto a media, y contrata mi amiga para ayudarme. ¡Ponte la de puebla!\u003c/p\u003e\n\u003cp\u003eDos días después\u0026hellip;\u003c/p\u003e\n\u003cp\u003eMamá: Oye, ¿su habitación? ¡De poca madre, qué padre! Qué limpia.\u003c/p\u003e\n\u003cp\u003eMe: Sí, de a grapa con mi amigo, también.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhspanish/","tags":null,"title":"spanish"},{"categories":null,"contents":"Spark is not a database. Importantly, its a \u0026ldquo;framework\u0026rdquo; of data:\nProgramming platform Distributed file system Prallel execution environment Software ecosystem It gives you the \u0026ldquo;parallel\u0026rdquo; search/sort needed to navigate a large database. It is based on the Hadoop ecosystem. Spark operates on RDDs to do lazy-evaluation.\nQuickstart When we start up Spark Shell, it will build you a sc variable which is appropriate for your supercomputer; if you are not, you need to set up the context yourself using the 3 lines noted below to make sc variable:\n# build context, IF NOT in a SHELL from myspark import SparkConf, SparkContext conf = SparkConf().setMaster(\u0026#34;local\u0026#34;).setaAppName(\u0026#34;Test_App\u0026#34;) sc = SparkContext(conf=conf) # do shit \u0026#34;transform\u0026#34; my_rdd = sc.textFile(\u0026#34;whatever\u0026#34;) # create an RDD from a datastore filtered_rdd = my_rdd.filter(lambda Line: \u0026#34;hubble\u0026#34; in line) # perform action in rdd # actually perform actions filtered_rdd.count() # 47 filtered_rdd.first() Then, you can submit this script. If you are in a shell its REPL so you don\u0026rsquo;t.\nspark-submit Test_App.py Main types of files Spark can handle:\nText (CSV, XML, JSON, etc.) SQL stuff Other NoSQL Stuff Parquet Hive Hadoopy things JDBC, Cassandra, Mongo, etc. etc. etc. Compressed files: tarballs, gzip Main types of filesystems:\nyours (local files) HDFS Lustre AWS S3 See also Common Spark Actions, Common Spark Transformations\nSpark RDD API The base level API of Spark is one which interacts with vectors or tuples via RDDs. It deals with manipulating unordered sets (i.e. not dataframes).\nCreate/Load RDD Transform RDD] (performing any operations in theory, lazily, without loading data) Apply some more transforms (filtering, etc.) Perform Actions (actually get data) Transformations move one RDD to another, and Actions load the data from RDD.\nSpark is smart to not do anything that\u0026rsquo;s not needed; removing entire stores which isn\u0026rsquo;t needed, caching, dynamically getting them back etc.\nSpark can read from whole databases, text files, etc. and move them into an RDD.\nTips and Tricks \u0026ldquo;kv\u0026rdquo; values are kinda like Pandas .groupby without groupyby. You can do most mapping operations by key instead, which means you can always do group.\nif you want a certain form, use the .map function to turn it into something else; for instance, if you want to turn something into a kv pair, you can thing.map(lambda x: x-key, x-value) if you need to work with KV data, you should think about swapping key and value if you so desire Spark Anti-Patterns You should never do heavy-duty compute in Python (i.e. if you end up with a for loop somewhere you are probably not using map reduce right) You should never take an action until you absolutely, seriously, need all the data Optimizations See Optimizing Spark\nSpark DataFrame API DataFrames are type-safe collections of tables built out of RDDs; they are collections of tuples, where columns have the same type\nrow_rdd = sc.parallelize([(\u0026#34;thing1\u0026#34;, \u0026#34;thing2\u0026#34;, 3), (\u0026#34;thing1\u0026#34;, \u0026#34;thing2\u0026#34;, 3)]) row_dataframe = spark.createDataFrame(row_rdd, [\u0026#34;col1\u0026#34;, \u0026#34;col2\u0026#34;, \u0026#34;col3\u0026#34;]) row_dataframe.show() You can load entire structured data via:\ndf = spark.read.json(\u0026#34;jsonfile.json\u0026#34;) And it will create the right schema on your behalf in the DataFrame.\nDataFrame Limitations Unlike Pandas, you can\u0026rsquo;t manipulate structured DataFrame well. Hence, you should like. Go back to RDDs if you are manipulating specific data into unstructured form. ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhspark/\"\u003eSpark\u003c/a\u003e is not a database. Importantly, its a \u0026ldquo;framework\u0026rdquo; of data:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eProgramming platform\u003c/li\u003e\n\u003cli\u003eDistributed file system\u003c/li\u003e\n\u003cli\u003ePrallel execution environment\u003c/li\u003e\n\u003cli\u003eSoftware ecosystem\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIt gives you the \u0026ldquo;parallel\u0026rdquo; search/sort needed to navigate a large database. It is based on the \u003ca href=\"\"\u003eHadoop\u003c/a\u003e ecosystem. Spark operates on \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003es to do lazy-evaluation.\u003c/p\u003e\n\u003ch2 id=\"quickstart\"\u003eQuickstart\u003c/h2\u003e\n\u003cp\u003eWhen we start up Spark Shell, it will build you a \u003ccode\u003esc\u003c/code\u003e variable which is appropriate for your supercomputer; if you are not, you need to set up the context yourself using the 3 lines noted below to make \u003ccode\u003esc\u003c/code\u003e variable:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# build context, IF NOT in a SHELL\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003efrom\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emyspark\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eimport\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSparkConf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSparkContext\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSparkConf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esetMaster\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;local\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esetaAppName\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;Test_App\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003esc\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSparkContext\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econf\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#111\"\u003econf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# do shit \u0026#34;transform\u0026#34;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emy_rdd\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esc\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etextFile\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;whatever\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# create an RDD from a datastore\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efiltered_rdd\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emy_rdd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efilter\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003elambda\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLine\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;hubble\u0026#34;\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eline\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# perform action in rdd\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e# actually perform actions\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efiltered_rdd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecount\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# 47\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003efiltered_rdd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efirst\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThen, you can submit this script. If you are in a shell its REPL so you don\u0026rsquo;t.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-bash\" data-lang=\"bash\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003espark-submit Test_App.py\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eMain types of files Spark can handle:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eText (CSV, XML, JSON, etc.)\u003c/li\u003e\n\u003cli\u003eSQL stuff\u003c/li\u003e\n\u003cli\u003eOther NoSQL Stuff\n\u003cul\u003e\n\u003cli\u003eParquet\u003c/li\u003e\n\u003cli\u003eHive\u003c/li\u003e\n\u003cli\u003eHadoopy things\u003c/li\u003e\n\u003cli\u003eJDBC, Cassandra, Mongo, etc. etc. etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eCompressed files: tarballs, gzip\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eMain types of filesystems:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eyours (local files)\u003c/li\u003e\n\u003cli\u003eHDFS\u003c/li\u003e\n\u003cli\u003eLustre\u003c/li\u003e\n\u003cli\u003eAWS S3\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhcommon_spark_actions/\"\u003eCommon Spark Actions\u003c/a\u003e, \u003ca href=\"/posts/kbhcommon_spark_transformations/\"\u003eCommon Spark Transformations\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"spark-rdd-api\"\u003eSpark RDD API\u003c/h2\u003e\n\u003cp\u003eThe base level API of \u003ca href=\"/posts/kbhspark/\"\u003eSpark\u003c/a\u003e is one which interacts with \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es or tuples via \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003es. It deals with manipulating unordered sets (i.e. \u003cstrong\u003enot\u003c/strong\u003e dataframes).\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eCreate/Load \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eTransform \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003e] (performing any operations \u003cem\u003ein theory, lazily, without loading data\u003c/em\u003e)\u003c/li\u003e\n\u003cli\u003eApply some more transforms (filtering, etc.)\u003c/li\u003e\n\u003cli\u003ePerform Actions (actually get data)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eTransformations\u003c/strong\u003e\u003c/strong\u003e move one \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003e to another, and \u003cstrong\u003e\u003cstrong\u003eActions\u003c/strong\u003e\u003c/strong\u003e load the data from \u003ca href=\"/posts/kbhrdd/\"\u003eRDD\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSpark is smart to not do anything that\u0026rsquo;s not needed; removing entire stores which isn\u0026rsquo;t needed, caching, dynamically getting them back etc.\u003c/p\u003e\n\u003cp\u003eSpark can read from whole databases, text files, etc. and move them into an RDD.\u003c/p\u003e\n\u003ch3 id=\"tips-and-tricks\"\u003eTips and Tricks\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;kv\u0026rdquo; values are kinda like Pandas \u003ccode\u003e.groupby\u003c/code\u003e without groupyby. You can do most mapping operations by key instead, which means you can always do group.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eif you want a certain form, use the \u003ccode\u003e.map\u003c/code\u003e function to turn it into something else; for instance, if you want to turn something into a kv pair, you can \u003ccode\u003ething.map(lambda x: x-key, x-value)\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eif you need to work with KV data, you should think about swapping key and value if you so desire\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"spark-anti-patterns\"\u003eSpark Anti-Patterns\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eYou should never do heavy-duty compute in Python (i.e. if you end up with a for loop somewhere you are probably not using map reduce right)\u003c/li\u003e\n\u003cli\u003eYou should never take an \u003cstrong\u003eaction\u003c/strong\u003e until you absolutely, seriously, need all the data\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"optimizations\"\u003eOptimizations\u003c/h4\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhoptimizing_spark/\"\u003eOptimizing Spark\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"spark-dataframe-api\"\u003eSpark DataFrame API\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#spark-dataframe-api\"\u003eDataFrame\u003c/a\u003es are type-safe collections of tables built out of RDDs; they are collections of tuples, where columns have the same type\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erow_rdd\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esc\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eparallelize\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e([(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;thing1\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;thing2\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e),\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;thing1\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;thing2\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erow_dataframe\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003espark\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecreateDataFrame\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003erow_rdd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;col1\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;col2\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;col3\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003erow_dataframe\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eshow\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou can load entire structured data via:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003espark\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ejson\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;jsonfile.json\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eAnd it will create the right schema on your behalf in the DataFrame.\u003c/p\u003e\n\u003ch3 id=\"dataframe-limitations\"\u003eDataFrame Limitations\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eUnlike Pandas, you can\u0026rsquo;t manipulate structured DataFrame well.\u003c/li\u003e\n\u003cli\u003eHence, you should like. Go back to RDDs if you are manipulating specific data into unstructured form.\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhspark/","tags":null,"title":"Spark"},{"categories":null,"contents":"Same core algorithm as Forward Search, but instead of calculating a utility based on the action-value over all possible next states, you make \\(m\\) different samples of next state, action, and reward, and average them\n","html":"\u003cp\u003eSame core algorithm as \u003ca href=\"/posts/kbhforward_search/\"\u003eForward Search\u003c/a\u003e, but instead of calculating a \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e based on the \u003ca href=\"/posts/kbhaction_value_function/\"\u003eaction-value\u003c/a\u003e over all possible next states, you make \\(m\\) different \u003cstrong\u003esamples\u003c/strong\u003e of next state, action, and reward, and average them\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsparse_sampling/","tags":null,"title":"Sparse Sampling"},{"categories":null,"contents":" take audio\ncalculate Mel Scale representation\napply a series of Filter Banks which attenuates the input to highlight groups of frequencies\nwe then run a discrete-cosine transform to obtain MFCCs, because much of the output results will still correlate with each other\n","html":"\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003etake audio\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecalculate \u003ca href=\"/posts/kbhmel_scale/\"\u003eMel Scale\u003c/a\u003e representation\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eapply a series of \u003ca href=\"/posts/kbhfilter_bank/\"\u003eFilter Bank\u003c/a\u003es which attenuates the input to highlight groups of frequencies\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-20_14-50-55_screenshot.png\"\u003e\n \u003c/figure\u003e\n\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ewe then run a discrete-cosine transform to obtain \u003ca href=\"/posts/kbhspeech_feature_extraction/\"\u003eMFCC\u003c/a\u003es, because much of the output results will still correlate with each other\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhspeech_feature_extraction/","tags":null,"title":"Speech Feature Extraction"},{"categories":null,"contents":"Group Meetings ","html":"\u003ch2 id=\"group-meetings\"\u003eGroup Meetings\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhspeech_processing_index/","tags":null,"title":"Speech Processing Index"},{"categories":null,"contents":"A spinal tap is a medical procedure whereby cerebralspinal fluid is collected by puncturing the lumbar; used to diagnose problems where biomakers from the brain are needed.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhspinal_tap/\"\u003espinal tap\u003c/a\u003e is a medical procedure whereby cerebralspinal fluid is collected by puncturing the lumbar; used to diagnose problems where biomakers from the brain are needed.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhspinal_tap/","tags":null,"title":"spinal tap"},{"categories":null,"contents":"A stationary point of an ODE is considered \u0026ldquo;stable\u0026rdquo; if, at the stationary point \\(y=c\\), the function with initial condition.\nIf you start near a stationary point, the function will either diverge \\(t\\to \\infty\\) to that stationary point, or converge to a stationary point. Whether the functions done that makes it \u0026ldquo;stable\u0026rdquo;/\u0026ldquo;unstable\u0026rdquo;.\nFor an autonomous ODEs \\(y\u0026rsquo;(t) = f(y(t))\\), suppose \\(y(t) = c\\) is a stationary solutiona:\n\\(c\\) is stable (i.e. \\(t\\to \\infty, y \\to c\\) for \\(y_0 \\approx c\\)) if the graph of \\(f\\) near \\(c\\) crosses from positive to negative; that is, when \\(f\u0026rsquo;( c) \u0026lt; 0\\) \\(c\\) is unstable (i.e. \\(t\\to -\\infty, y \\to c\\) for \\(y_0 \\approx c\\)) if the graph of \\(f\\) near \\(c\\) crosses from negative to positive; that is, when \\(f\u0026rsquo;(t) \u0026gt; 0\\) \\(c\\) is semi-stable (i.e. stable on one side, unstable on the other) if the graph of \\(f\\) near \\(c\\) has the same sign on both sides; meaning \\(f\u0026rsquo;( c) = 0\\) and \\(f\u0026rsquo;\u0026rsquo;( c)\\neq 0\\) if \\(f\u0026rsquo;( c) = 0\\) and \\(f\u0026rsquo;\u0026rsquo;( c) \\neq 0\\), we are sad and should investigate more away from zeros, the concavity of \\(y(t)\\) could be checked for \\(f f\u0026rsquo;\\). when its positive, \\(y(t)\\) is concave up; when its negative \\(y(t)\\) is concave down.\n","html":"\u003cp\u003eA stationary point of an \u003ca href=\"/posts/kbhordinary_differential_equations/\"\u003eODE\u003c/a\u003e is considered \u0026ldquo;stable\u0026rdquo; if, at the stationary point \\(y=c\\), the function with initial condition.\u003c/p\u003e\n\u003cp\u003eIf you start near a stationary point, the function will either diverge \\(t\\to \\infty\\) to that stationary point, or converge to a stationary point. Whether the functions done that makes it \u0026ldquo;stable\u0026rdquo;/\u0026ldquo;unstable\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eFor an \u003ca href=\"/posts/kbhautonomous_odes/\"\u003eautonomous ODEs\u003c/a\u003e \\(y\u0026rsquo;(t) = f(y(t))\\), suppose \\(y(t) = c\\) is a stationary solutiona:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(c\\) is stable (i.e. \\(t\\to \\infty, y \\to c\\) for \\(y_0 \\approx c\\)) if the graph of \\(f\\) near \\(c\\) crosses from positive to negative; that is, when \\(f\u0026rsquo;( c) \u0026lt; 0\\)\u003c/li\u003e\n\u003cli\u003e\\(c\\) is unstable (i.e. \\(t\\to -\\infty, y \\to c\\) for \\(y_0 \\approx c\\)) if the graph of \\(f\\) near \\(c\\) crosses from negative to positive; that is, when \\(f\u0026rsquo;(t) \u0026gt; 0\\)\u003c/li\u003e\n\u003cli\u003e\\(c\\) is semi-stable (i.e. stable on one side, unstable on the other) if the graph of \\(f\\) near \\(c\\) has the same sign on both sides; meaning \\(f\u0026rsquo;( c) = 0\\) and \\(f\u0026rsquo;\u0026rsquo;( c)\\neq 0\\)\u003c/li\u003e\n\u003cli\u003eif \\(f\u0026rsquo;( c) = 0\\) and \\(f\u0026rsquo;\u0026rsquo;( c) \\neq 0\\), we are sad and should investigate more\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eaway from zeros, the concavity of \\(y(t)\\) could be checked for \\(f f\u0026rsquo;\\). when its positive, \\(y(t)\\) is concave up; when its negative \\(y(t)\\) is concave down.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstability/","tags":null,"title":"stability (ODEs)"},{"categories":null,"contents":"stack is where all local variables and parameters live for a function. The stack frame goes away when the function returns.\nstack grows downwards in memory; each function call sets aside some space in stack regardless if local variables are used.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhstack/\"\u003estack\u003c/a\u003e is where all local variables and parameters live for a \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e. The stack frame goes away when the function returns.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhstack/\"\u003estack\u003c/a\u003e grows downwards in memory; each function call sets aside some space in stack regardless if local variables are used.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstack/","tags":null,"title":"stack"},{"categories":null,"contents":"A stack trace is the output of failing code by the runtime to indicate the location of the fault. For instance, in Python:\n--------------------------------------------------------------------------- TypeError Traceback (most recent call last) \u0026lt;ipython-input-1-0b766d7d4bc7\u0026gt; in \u0026lt;module\u0026gt; ----\u0026gt; 1 0+\u0026#34;\u0026#34; TypeError: unsupported operand type(s) for +: \u0026#39;int\u0026#39; and \u0026#39;str\u0026#39; ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhstack_trace/\"\u003estack trace\u003c/a\u003e is the output of failing code by the runtime to indicate the location of the fault. For instance, in Python:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003e---------------------------------------------------------------------------\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eTypeError\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eTraceback\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emost\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erecent\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecall\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elast\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eipython\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einput\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eb766d7d4bc7\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emodule\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#f92672\"\u003e----\u0026gt;\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u0026#34;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eTypeError\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eunsupported\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eoperand\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etype\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003es\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#39;int\u0026#39;\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003eand\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#39;str\u0026#39;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhstack_trace/","tags":null,"title":"stack trace"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhstandard_error/","tags":null,"title":"standard error"},{"categories":null,"contents":"Stanford is an university.\nStuff https://knight-hennessy.stanford.edu/ Stanford UG Courses Index ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhstanford/\"\u003eStanford\u003c/a\u003e is an university.\u003c/p\u003e\n\u003ch2 id=\"stuff\"\u003eStuff\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"https://knight-hennessy.stanford.edu/\"\u003ehttps://knight-hennessy.stanford.edu/\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstanford_courses_index/\"\u003eStanford UG Courses Index\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstanford/","tags":null,"title":"Stanford"},{"categories":null,"contents":"Locales of Interest 5th flr Green Library: Albert Bender Room 380-382T, 2nd floor: UG Math Student Lounge Classes of Interest Classes that Were Good Resources tutoring.stanford.edu hume center skills coaching: academicskills.stanford.edu Tips ask your UAD about research opportunities and networking with faculty connections show up for office hours ","html":"\u003ch2 id=\"locales-of-interest\"\u003eLocales of Interest\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e5th flr Green Library: Albert Bender Room\u003c/li\u003e\n\u003cli\u003e380-382T, 2nd floor: UG Math Student Lounge\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"classes-of-interest\"\u003eClasses of Interest\u003c/h2\u003e\n\u003ch2 id=\"classes-that-were-good\"\u003eClasses that Were Good\u003c/h2\u003e\n\u003ch2 id=\"resources\"\u003eResources\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-09-20_13-14-27_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003etutoring.stanford.edu\u003c/li\u003e\n\u003cli\u003ehume center\u003c/li\u003e\n\u003cli\u003eskills coaching: academicskills.stanford.edu\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"tips\"\u003eTips\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eask your UAD about research opportunities and networking with faculty connections\u003c/li\u003e\n\u003cli\u003eshow up for office hours\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstanford_factoids_index/","tags":null,"title":"Stanford Factoids Index"},{"categories":null,"contents":"Stanford UG Y1, Aut Decision Making Index Computer Systems Index CS Probability Index Speech Processing Index Stanford UG Y1, Win ODEs Index OS Index POMDPs Index Language Information Index UG Other Duties Here are a list of random indicies which may end up being helpful!\nCLRS Index AI Master Class Software Engineering Stanford UG Talks Date Topic Presenter Link \u0026lt;2023-09-20 Wed\u0026gt; UG Research Program Brian Thomas Stanford UG Research Program \u0026lt;2023-09-28 Thu\u0026gt; Bld an Ecosystem, Not Monolith Colin Raffel Build a System \u0026lt;2023-10-05 Thu\u0026gt; Training Helpful CHatbots Nazeen Rajani Training Helpful Chatbots \u0026lt;2023-10-26 Thu\u0026gt; AI Intepretability for Bio Gasper Begus AI Intepretability \u0026lt;2023-11-02 Thu\u0026gt; PT Transformers on Long Seqs Mike Lewis Pretraining Long Transformers \u0026lt;2023-11-07 Tue\u0026gt; Transformers! A. Vaswani Transformers \u0026lt;2023-11-09 Thu\u0026gt; Towards Interactive Agents Jessy Lin Interactive Agent \u0026lt;2023-11-16 Thu\u0026gt; Dissociating Language and Thought Anna Ivanova Dissociating Language and Thought \u0026lt;2024-01-11 Thu\u0026gt; Language Agents Karthik Narasimhan Language Agents with Karthik \u0026lt;2024-02-01 Thu\u0026gt; Pretraining Data \u0026lt;2024-02-08 Thu\u0026gt; value alignment Been Kim LM Alignment \u0026lt;2024-02-15 Thu\u0026gt; model editing Peter Hase Knowledge Editing Contacts Talk Contacts\n","html":"\u003ch2 id=\"stanford-ug-y1-aut\"\u003eStanford UG Y1, Aut\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdecision_making_index/\"\u003eDecision Making Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcomputer_systems_index/\"\u003eComputer Systems Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcs_probability_index/\"\u003eCS Probability Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhspeech_processing_index/\"\u003eSpeech Processing Index\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"stanford-ug-y1-win\"\u003eStanford UG Y1, Win\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhodes_index/\"\u003eODEs Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhos_index/\"\u003eOS Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpomdps_index/\"\u003ePOMDPs Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlanguage_information_index/\"\u003eLanguage Information Index\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"ug-other-duties\"\u003eUG Other Duties\u003c/h2\u003e\n\u003cp\u003eHere are a list of random indicies which may end up being helpful!\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhclrs_index/\"\u003eCLRS Index\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhai_master_class/\"\u003eAI Master Class\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsoftware_engineering/\"\u003eSoftware Engineering\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"stanford-ug-talks\"\u003eStanford UG Talks\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eDate\u003c/th\u003e\n\u003cth\u003eTopic\u003c/th\u003e\n\u003cth\u003ePresenter\u003c/th\u003e\n\u003cth\u003eLink\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-09-20 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003eUG Research Program\u003c/td\u003e\n\u003ctd\u003eBrian Thomas\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhstanford_ug_research_program/\"\u003eStanford UG Research Program\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-09-28 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003eBld an Ecosystem, Not Monolith\u003c/td\u003e\n\u003ctd\u003eColin Raffel\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhbuild_a_system_not_a_monolyth/\"\u003eBuild a System\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-10-05 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003eTraining Helpful CHatbots\u003c/td\u003e\n\u003ctd\u003eNazeen Rajani\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhtraining_helpful_chatbots/\"\u003eTraining Helpful Chatbots\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-10-26 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003eAI Intepretability for Bio\u003c/td\u003e\n\u003ctd\u003eGasper Begus\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhai_intepretability/\"\u003eAI Intepretability\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-11-02 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003ePT Transformers on Long Seqs\u003c/td\u003e\n\u003ctd\u003eMike Lewis\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpretraining_long_transformers/\"\u003ePretraining Long Transformers\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-11-07 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003eTransformers!\u003c/td\u003e\n\u003ctd\u003eA. Vaswani\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhtransformers/\"\u003eTransformers\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-11-09 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003eTowards Interactive Agents\u003c/td\u003e\n\u003ctd\u003eJessy Lin\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhinteractive_agent/\"\u003eInteractive Agent\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2023-11-16 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003eDissociating Language and Thought\u003c/td\u003e\n\u003ctd\u003eAnna Ivanova\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhdissociating_language_and_thought/\"\u003eDissociating Language and Thought\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-01-11 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003eLanguage Agents\u003c/td\u003e\n\u003ctd\u003eKarthik Narasimhan\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhlanguage_agents/\"\u003eLanguage Agents with Karthik\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-02-01 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpretraining_data/\"\u003ePretraining Data\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-02-08 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003evalue alignment\u003c/td\u003e\n\u003ctd\u003eBeen Kim\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhlm_alignment/\"\u003eLM Alignment\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-02-15 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003emodel editing\u003c/td\u003e\n\u003ctd\u003ePeter Hase\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhknowledge_editing/\"\u003eKnowledge Editing\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"contacts\"\u003eContacts\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhtalk_contacts/\"\u003eTalk Contacts\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstanford_courses_index/","tags":["index"],"title":"Stanford UG Courses Index"},{"categories":null,"contents":"Brian Thomas, the research guy. Don\u0026rsquo;t start research at Autumn Frosh Freshmen Year.\nGetting Started Think about the Institutes (many of them do not have an UG major) Stanford HAI Stanford HCI? Find faculty Don\u0026rsquo;t just ask for a job Research the person\u0026rsquo;s publications and ask some questions about it: TRY TO ASK FOR A OFFICE HOUR MEETING WITH QUESTIONS: \u0026ldquo;I read your thing, and I would love to talk more about it\u0026rdquo; (there is coaching from Brian Thomas\u0026rsquo;s office, and coaching from UAD) OR, use a program, but still talk HB-ref? CURIS Stanford\u0026rsquo;s grant programs can pay for research needs. There will be people talking about grants later. Don\u0026rsquo;t worry about them. Get to the point where you need money and then figure it out.\nThe UADs The UADs are PhD+ or Profs They review UG research grants They know the program that are available In general: talk to UADs (when Kristin is not sick).\nDeliverables undergradresearch.stanford.edu Reach out to professors Look into UAD workshops + talk to UADs Find the edges of your textbook/courses (identify \u0026ldquo;where the trail seems to end\u0026rdquo;) SURPS: symposium of UG research and public service. Thursday 10/19, 4P. Burnham Pavilion Large groups have \u0026ldquo;Student Services Officers\u0026rdquo;, reach out ","html":"\u003cp\u003eBrian Thomas, the \u003ca href=\"/posts/kbhresearch/\"\u003eresearch\u003c/a\u003e guy. Don\u0026rsquo;t start research at Autumn Frosh Freshmen Year.\u003c/p\u003e\n\u003ch2 id=\"getting-started\"\u003eGetting Started\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eThink about the Institutes (many of them do not have an UG major)\n\u003cul\u003e\n\u003cli\u003eStanford HAI\u003c/li\u003e\n\u003cli\u003eStanford HCI?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eFind faculty\n\u003cul\u003e\n\u003cli\u003eDon\u0026rsquo;t just ask for a job\u003c/li\u003e\n\u003cli\u003eResearch the person\u0026rsquo;s publications and ask some questions about it: \u003cstrong\u003e\u003cstrong\u003eTRY TO ASK FOR A OFFICE HOUR MEETING WITH QUESTIONS\u003c/strong\u003e\u003c/strong\u003e: \u0026ldquo;I read your thing, and I would love to talk more about it\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e(there is coaching from \u003ca href=\"/posts/kbhstanford_ug_research_program/\"\u003eBrian Thomas\u003c/a\u003e\u0026rsquo;s office, and coaching from UAD)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eOR, use a program, but still talk\n\u003cul\u003e\n\u003cli\u003eHB-ref?\u003c/li\u003e\n\u003cli\u003eCURIS\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eStanford\u0026rsquo;s grant programs can pay for research needs. There will be people talking about grants later. Don\u0026rsquo;t worry about them. Get to the point where you need money and then figure it out.\u003c/p\u003e\n\u003ch2 id=\"the-uads\"\u003eThe UADs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eThe UADs are PhD+ or Profs\u003c/li\u003e\n\u003cli\u003eThey review UG research grants\u003c/li\u003e\n\u003cli\u003eThey know the program that are available\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIn general: talk to UADs (when Kristin is not sick).\u003c/p\u003e\n\u003ch2 id=\"deliverables\"\u003eDeliverables\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eundergradresearch.stanford.edu\u003c/li\u003e\n\u003cli\u003eReach out to professors\u003c/li\u003e\n\u003cli\u003eLook into UAD workshops + talk to UADs\u003c/li\u003e\n\u003cli\u003eFind the edges of your textbook/courses (identify \u0026ldquo;where the trail seems to end\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003eSURPS: symposium of UG research and public service. Thursday 10/19, 4P. Burnham Pavilion\u003c/li\u003e\n\u003cli\u003eLarge groups have \u0026ldquo;Student Services Officers\u0026rdquo;, reach out\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstanford_ug_research_program/","tags":null,"title":"Stanford UG Research Program"},{"categories":null,"contents":"Everyone and their dog has a blog at this point. Why not me? You see, I don\u0026rsquo;t really like the idea of blogging, but I do enjoy taking notes. I take a crap tonnes of notes, and sometimes people want to see a copy of them.\nIn order to facilitate this, some friends and I created taproot, a collective note-taking effort which also automatically compiled pretty cool previews and an internet site. I still am one of the primary maintainers of taproot.\nWhile working on the project, however, we noticed that the loop-based architecture (instead of being based on events/triggers), lack of duplicity, and requirement of a central build server made it difficult.\nIn this vein, quantumish (also with his own lovely set of notes, tap on the link!) and I were discussing if the essentials of taproot can be built into a static site generator. Hence, this is an experiment (to hopefully be merged with the taproot group) to facilitate this.\n","html":"\u003cp\u003eEveryone and their dog has a blog at this point. Why not me? You see, I don\u0026rsquo;t really like the idea of blogging, but I \u003cem\u003edo\u003c/em\u003e enjoy taking notes. I take a crap tonnes of notes, and sometimes people want to see a copy of them.\u003c/p\u003e\n\u003cp\u003eIn order to facilitate this, some friends and I created \u003ca href=\"https://taproot3.sanity.gq/\"\u003etaproot\u003c/a\u003e, a collective note-taking effort which also automatically compiled pretty cool previews and an internet site. I still am one of the primary maintainers of taproot.\u003c/p\u003e\n\u003cp\u003eWhile working on the project, however, we noticed that the loop-based architecture (instead of being based on events/triggers), lack of duplicity, and requirement of a central build server made it difficult.\u003c/p\u003e\n\u003cp\u003eIn this vein, \u003ca href=\"https://quantumish.github.io/\"\u003equantumish\u003c/a\u003e (also with his own lovely set of notes, tap on the link!) and I were discussing if the essentials of taproot can be built into a static site generator. Hence, this is an experiment (to hopefully be merged with the taproot group) to facilitate this.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstarting_with_why_the_knowledgebase/","tags":null,"title":"Starting With Why: The Knowledgebase"},{"categories":null,"contents":"Smol companies\nTips A well-run startup should have 18 month of cash planned, and have a runway of 6 months to ensure you can always get acq-hired and \u0026ldquo;bail out\u0026rdquo; Myths of Startups \u0026ldquo;Joining Big Tech\u0026rdquo; vs. \u0026ldquo;Starting a Startup\u0026rdquo; are not binary options In between these poles: joining an existing startup Myth: \u0026ldquo;90% of startups of fail\u0026rdquo; True statement: 90% of SMALL BUSINESSES fail. Venture backed tech startups are very different world: only 1% of small businesses are venture backed.\nRoughly: 1/3 of VC backed startups \u0026ldquo;fail\u0026rdquo;\n1/3 fail 1/2 return the money (nothing happens) 1/6 exits + drive returns Myth (kinda): \u0026ldquo;you are under paid\u0026rdquo; If you JOIN a startup, the small amount of compensation corresponds to betting on yourself in a similar way.\nIf you are negotiating your compensation, you should try to get MORE EQUITY and less cash.\nAnatomy of a Startup \u0026ldquo;Fail\u0026rdquo; Most startup failures looks like an acqui-hire. Acq-hiring results in investors\nInvestors loose money Employees get a minor payoff ","html":"\u003cp\u003eSmol companies\u003c/p\u003e\n\u003ch2 id=\"tips\"\u003eTips\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA well-run startup should have \u003cstrong\u003e18 month of cash planned\u003c/strong\u003e, and have a \u003cstrong\u003erunway of 6 months\u003c/strong\u003e to ensure you can always get acq-hired and \u0026ldquo;bail out\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"myths-of-startups\"\u003eMyths of Startups\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Joining Big Tech\u0026rdquo; vs. \u0026ldquo;Starting a Startup\u0026rdquo; are \u003cstrong\u003enot\u003c/strong\u003e binary options\u003c/li\u003e\n\u003cli\u003eIn between these poles: joining an existing startup\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"myth-90-of-startups-of-fail\"\u003eMyth: \u0026ldquo;90% of startups of fail\u0026rdquo;\u003c/h3\u003e\n\u003cp\u003eTrue statement: 90% of \u003cstrong\u003eSMALL BUSINESSES\u003c/strong\u003e fail. Venture backed tech startups are \u003cstrong\u003every different world\u003c/strong\u003e: only 1% of small businesses are venture backed.\u003c/p\u003e\n\u003cp\u003eRoughly: 1/3 of VC backed startups \u0026ldquo;fail\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e1/3 fail\u003c/li\u003e\n\u003cli\u003e1/2 return the money (nothing happens)\u003c/li\u003e\n\u003cli\u003e1/6 exits + drive returns\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"myth--kinda--you-are-under-paid\"\u003eMyth (kinda): \u0026ldquo;you are under paid\u0026rdquo;\u003c/h3\u003e\n\u003cp\u003eIf you JOIN a startup, the small amount of compensation corresponds to betting on yourself in a similar way.\u003c/p\u003e\n\u003cp\u003eIf you are negotiating your compensation, you should try to get MORE EQUITY and less cash.\u003c/p\u003e\n\u003ch2 id=\"anatomy-of-a-startup-fail\"\u003eAnatomy of a Startup \u0026ldquo;Fail\u0026rdquo;\u003c/h2\u003e\n\u003cp\u003eMost startup failures looks like an acqui-hire. Acq-hiring results in investors\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eInvestors loose money\u003c/li\u003e\n\u003cli\u003eEmployees get a minor payoff\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstartup/","tags":null,"title":"Startup"},{"categories":null,"contents":"The stationary-action principle states that, in a dynamic system, the equations of motion of that system is yielded as the \u0026ldquo;stationary points\u0026rdquo; of the system\u0026rsquo;s action. i.e. the points of \u0026ldquo;least\u0026rdquo; action. (i.e. a ball sliding down a ramp is nice, but you don\u0026rsquo;t expect it\u0026mdash;in that system\u0026mdash;to fly off the ramp, do a turn, and then fly down.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhstationary_action_principle/\"\u003estationary-action principle\u003c/a\u003e states that, in a dynamic system, the equations of motion of that system is yielded as the \u0026ldquo;stationary points\u0026rdquo; of the system\u0026rsquo;s action. i.e. the points of \u0026ldquo;least\u0026rdquo; action. (i.e. a ball sliding down a ramp is nice, but you don\u0026rsquo;t expect it\u0026mdash;in that system\u0026mdash;to fly off the ramp, do a turn, and then fly down.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstationary_action_principle/","tags":null,"title":"stationary-action principle"},{"categories":null,"contents":"A statistic is a measure of something\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhstastistic/\"\u003estatistic\u003c/a\u003e is a measure of something\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstastistic/","tags":null,"title":"statistic"},{"categories":null,"contents":"To put some math behind that very, extremely simple Dyson\u0026rsquo;s Model, we will declare a vector space \\(K\\) which encodes the possible set of states that our \u0026ldquo;cell\u0026rdquo; can be in. Now, declare a transition matrix \\(M \\in \\mathcal{L}(K)\\) which maps from one state to another.\nFinally, then, we can define a function \\(P(k)\\) for the \\(k\\) th state of our cell.\nThat is, then:\n\\begin{equation} P(k+1) = M P(k) \\end{equation}\n(as the \u0026ldquo;next\u0026rdquo; state is simply \\(M\\) applied onto the previous state).\nRolling that out, we have:\n\\begin{equation} P(k) = M^{k} P(0) \\end{equation}\n","html":"\u003cp\u003eTo put some math behind that \u003cem\u003every, extremely\u003c/em\u003e simple \u003ca href=\"/posts/kbhdyson_s_model_of_life/\"\u003eDyson\u0026rsquo;s Model\u003c/a\u003e, we will declare a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e \\(K\\) which encodes the possible set of states that our \u0026ldquo;\u003ca href=\"/posts/kbhcell/\"\u003ecell\u003c/a\u003e\u0026rdquo; can be in. Now, declare a transition \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e \\(M \\in \\mathcal{L}(K)\\) which maps from one state to another.\u003c/p\u003e\n\u003cp\u003eFinally, then, we can define a function \\(P(k)\\) for the \\(k\\) th state of our \u003ca href=\"/posts/kbhcell/\"\u003ecell\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eThat is, then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(k+1) = M P(k)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(as the \u0026ldquo;next\u0026rdquo; state is simply \\(M\\) applied onto the previous state).\u003c/p\u003e\n\u003cp\u003eRolling that out, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(k) = M^{k} P(0)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstepwise_evolution/","tags":null,"title":"Stepwise Evolution"},{"categories":null,"contents":"This is a theory that come back to CAPM.\n","html":"\u003cp\u003eThis is a theory that come back to \u003ca href=\"/posts/kbhcapm/\"\u003eCAPM\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstochastic_discount_factor/","tags":null,"title":"Stochastic Discount Factor"},{"categories":null,"contents":"\\begin{equation} \\theta^{t+1} = \\theta^{t} - \\eta \\nabla_{\\theta} L(f_{\\theta}(x), y) \\end{equation}\nthis terminates when theta differences becomes small, or when progress halts: like when \\(\\theta\\) begins going up instead.\nwe update the weights in SGD by taking a single random sample and moving weights to that direction.\nbatch gradient descent stochastic gradient descent gives choppy movements because it does one sample at once.\nbatch gradient descent does it over the entire dataset, which is fine but its slow.\nmini-batch gradient mini-batches helps take advantage of both by training over groups of \\(m\\) samples\nregularization regularization penalize large weights to reduce over-fitting\n","html":"\u003cp\u003e\\begin{equation}\n\\theta^{t+1} = \\theta^{t} - \\eta \\nabla_{\\theta} L(f_{\\theta}(x), y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis terminates when theta differences becomes small, or when progress halts: like when \\(\\theta\\) begins going up instead.\u003c/p\u003e\n\u003cp\u003ewe update the weights in SGD by taking a \u003cstrong\u003esingle random sample\u003c/strong\u003e and moving weights to that direction.\u003c/p\u003e\n\u003ch2 id=\"batch-gradient-descent\"\u003ebatch gradient descent\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhstochastic_gradient_descent/\"\u003estochastic gradient descent\u003c/a\u003e gives choppy movements because it does one sample at once.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#batch-gradient-descent\"\u003ebatch gradient descent\u003c/a\u003e does it over the entire dataset, which is fine but its slow.\u003c/p\u003e\n\u003ch2 id=\"mini-batch-gradient\"\u003emini-batch gradient\u003c/h2\u003e\n\u003cp\u003emini-batches helps take advantage of both by training over groups of \\(m\\) samples\u003c/p\u003e\n\u003ch2 id=\"regularization\"\u003eregularization\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#regularization\"\u003eregularization\u003c/a\u003e penalize large weights to reduce over-fitting\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstochastic_gradient_descent/","tags":null,"title":"stochastic gradient descent"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhstochat/","tags":null,"title":"stochat"},{"categories":null,"contents":"the stock indicies\n","html":"\u003cp\u003ethe \u003ca href=\"/posts/kbhstock_indicies/\"\u003estock indicies\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstock_indicies/","tags":null,"title":"stock indicies"},{"categories":null,"contents":"Stock Issues are policy debate doctrines which divides the debate into 5 subtopical ideas.\nWikipedia\nHarms: what are the problems in the status quo?\nInherency: what are these problems not already being solved? (Or not already being solved in the best way?)\nSignificancy: comparing the advantages and disadvantages of the status quo and your proposed solution, why is the proposed solution more worthy than the status quo?\nThe Ws:\nWhy this? Why is your proposed solution the best (most effective, or most feasible, or fastest, etc.) one?\nWhy now? Why is now the best time to build this solution?\nWhy you? Why are you (and your team) the best builders of this solution?\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhstock_issues_debate/\"\u003eStock Issues\u003c/a\u003e are policy debate doctrines which divides the debate into 5 subtopical ideas.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://en.wikipedia.org/wiki/Stock_issues\"\u003eWikipedia\u003c/a\u003e\u003c/p\u003e\n\u003chr\u003e\n\u003col\u003e\n\u003cli\u003e\n\u003cp\u003eHarms: what are the problems in the status quo?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eInherency: what are these problems not already being solved? (Or not already being solved in the best way?)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSignificancy: comparing the advantages and disadvantages of the status quo and your proposed solution, why is the proposed solution more worthy than the status quo?\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThe Ws:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\n\u003cp\u003eWhy this? Why is your proposed solution the best (most effective, or most feasible, or fastest, etc.) one?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWhy now? Why is now the best time to build this solution?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWhy you? Why are you (and your team) the best builders of this solution?\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstock_issues_debate/","tags":null,"title":"Stock Issues (Debate)"},{"categories":null,"contents":" Around 20,000 stocks valued at $47 Trillion Only about 2,000 matter Transaction frequency is high, liquidity is generally low \u0026mdash; grade sizes are small Roughly 59 places to trade stock (exchanges + darkpools) ","html":"\u003cul\u003e\n\u003cli\u003eAround 20,000 stocks valued at $47 Trillion\u003c/li\u003e\n\u003cli\u003eOnly about 2,000 matter\u003c/li\u003e\n\u003cli\u003eTransaction frequency is high, liquidity is generally low \u0026mdash; grade sizes are small\u003c/li\u003e\n\u003cli\u003eRoughly 59 places to trade stock (exchanges + \u003ca href=\"/posts/kbhdarkpool/\"\u003edarkpool\u003c/a\u003es)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstock_market_survey/","tags":null,"title":"stock market survey"},{"categories":null,"contents":"strain is the proportional deformation of a material given some stress applied\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhstrain/\"\u003estrain\u003c/a\u003e is the proportional \u003cstrong\u003edeformation\u003c/strong\u003e of a material given some \u003ca href=\"/posts/kbhstress/\"\u003estress\u003c/a\u003e applied\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstrain/","tags":null,"title":"strain"},{"categories":null,"contents":"revising:\ncharacter as subjects, actions as verbs old before new (connect sentences\u0026rsquo; subjects from tail to head) short before long: (say the short phrase before the long phrase, move the verb up front if you can) topic then stress (the last position is being stressed—the thing that\u0026rsquo;s most important to communicate is the end of an utterance) The above principles apply to all units—as in, each sentences paragraphs, and arguments should all follow a similar principles ","html":"\u003cp\u003erevising:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003echaracter as subjects, actions as verbs\u003c/li\u003e\n\u003cli\u003eold before new (connect sentences\u0026rsquo; subjects from tail to head)\u003c/li\u003e\n\u003cli\u003eshort before long: (say the short phrase before the long phrase, move the verb up front if you can)\u003c/li\u003e\n\u003cli\u003etopic then stress (the last position is being stressed—the thing that\u0026rsquo;s most important to communicate is the end of an utterance)\u003c/li\u003e\n\u003cli\u003eThe above principles apply to all units—as in, each sentences paragraphs, and arguments should all follow a similar principles\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstrategies_to_revise_an_essay/","tags":null,"title":"Strategies to Revise an Essay"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhstress/","tags":null,"title":"stress"},{"categories":null,"contents":"In C, string is an array of chars. C strings don\u0026rsquo;t track their length; each C string always end in an null-terminating character: \\0. This is represents the zero byte.\nThere\u0026rsquo;s a built in function strlen which checks the length of a string without the null-terminating character. This function is O(n)!!!\nString Pointer Syntax Sugar Synonyms char str[6]; // these are equivalent char *ptr = str; char *ptr = \u0026amp;str[0]; char *ptr = \u0026amp;str; // DON\u0026#39;T DO THIS // these are equivalent char thirdLetter = str[3]; char thirdLetter = *(str + 3); seven commandments of c strings if we create a string as char[], we can modify its characters because its memory lives in our stack instead of living in a global data segment we can\u0026rsquo;t set char[] as equaling to something, because its not strictly a pointer and instead it refers to an entire block of memory instead of a pointer to the first element (in a same vein, an array\u0026rsquo;s size is fixed and travels with the variable) if we pass char[] as a parameter, it is converted to a char * if we create a string with new string literal as char *thing = \u0026quot;thing\u0026quot;, we can\u0026rsquo;t modify it because its on the global data segment we can set char * equaling to another value because its a pointer adding an offset to a c string gives a substring that\u0026rsquo;s places past the first character if we change characters in a string parameter, these changes will persist passing strings around Strings are passed as a pointer to their first character.\nvoid foo(char *str) { // do string things } char string[6]; // THIS IS A STRING OF LENGTH 5!!!! (beacuse there\u0026#39;s a null terminator) foo(string); // pass the syntax sugar pointer foo(\u0026amp;string[0]); // pass the actual first pointer you won\u0026rsquo;t know whether or not this is the address to a string or a pointer to a single character; so good practice to call it something_str if you\u0026rsquo;d like a string.\ncharacter manipulation checker #include \u0026lt;ctype.h\u0026gt; int main() { isalpha(ch); islower(ch); ... } string manipulations #include \u0026lt;string.h\u0026gt; strcmp When you comparing strings, you can\u0026rsquo;t use == or \u0026lt; or \u0026gt;. Instead:\n#include \u0026lt;string.h\u0026gt; int main() { int cmp = strcmp(str1, str2); if (cmp == 0) { // if str1 is equal to str2 } else if (cmp \u0026lt; 0) { // if str1 comes before str2 lexographically } else { // if str2 comes before str1 lexographically } } strcpy Copying strings, dangerously, because buffer overflows are fun.\nThis function does NOT care about buffer overflows, and WILL put in a null terminator.\nstrncopy This function optimize against buffer overflow, but it may not write a null terminator.\nstrcat strncat always puts in a null terminator.\npointer arithmetic with strings Fortunatly, each char is\nstrspn Count the number of characters that are \u0026ldquo;cool\u0026rdquo;: contained within the end\n","html":"\u003cp\u003eIn C, \u003ca href=\"/posts/kbhstring/\"\u003estring\u003c/a\u003e is an array of \u003ca href=\"/posts/kbhchar/\"\u003echar\u003c/a\u003es. C strings don\u0026rsquo;t track their length; each C string always end in an null-terminating character: \u003ccode\u003e\\0\u003c/code\u003e. This is represents the zero byte.\u003c/p\u003e\n\u003cp\u003eThere\u0026rsquo;s a built in function \u003ccode\u003estrlen\u003c/code\u003e which checks the length of a string without the null-terminating character. This function is \u003ccode\u003eO(n)\u003c/code\u003e!!!\u003c/p\u003e\n\u003ch2 id=\"string-pointer-syntax-sugar-synonyms\"\u003eString Pointer Syntax Sugar Synonyms\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e6\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// these are equivalent\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eptr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eptr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eptr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// DON\u0026#39;T DO THIS\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// these are equivalent\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ethirdLetter\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ethirdLetter\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estr\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"seven-commandments-of-c-string--kbhstring-dot-md--s\"\u003eseven commandments of c \u003ca href=\"/posts/kbhstring/\"\u003estring\u003c/a\u003es\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eif we create a string as \u003ccode\u003echar[]\u003c/code\u003e, we can modify its characters because its memory lives in our stack instead of living in a global data segment\u003c/li\u003e\n\u003cli\u003ewe can\u0026rsquo;t set \u003ccode\u003echar[]\u003c/code\u003e as equaling to something, because its not strictly a pointer and instead it refers to an entire block of memory instead of a pointer to the first element (in a same vein, an array\u0026rsquo;s size is fixed and travels with the variable)\u003c/li\u003e\n\u003cli\u003eif we pass \u003ccode\u003echar[]\u003c/code\u003e as a parameter, it is converted to a \u003ccode\u003echar *\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eif we create a string with new string literal as \u003ccode\u003echar *thing = \u0026quot;thing\u0026quot;\u003c/code\u003e, we can\u0026rsquo;t modify it because its on the global data segment\u003c/li\u003e\n\u003cli\u003ewe can set \u003ccode\u003echar *\u003c/code\u003e equaling to another value because its a pointer\u003c/li\u003e\n\u003cli\u003eadding an offset to a c string gives a substring that\u0026rsquo;s places past the first character\u003c/li\u003e\n\u003cli\u003eif we change characters in a string parameter, these changes will persist\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"passing-strings-around\"\u003epassing strings around\u003c/h2\u003e\n\u003cp\u003eStrings are passed as a pointer to their first character.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003efoo\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// do string things\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estring\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e6\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// THIS IS A STRING OF LENGTH 5!!!! (beacuse there\u0026#39;s a null terminator)\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003efoo\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estring\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// pass the syntax sugar pointer\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003efoo\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estring\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]);\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// pass the actual first pointer\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eyou won\u0026rsquo;t know whether or not this is the address to a string or a pointer to a single character; so good practice to call it \u003ccode\u003esomething_str\u003c/code\u003e if you\u0026rsquo;d like a string.\u003c/p\u003e\n\u003ch2 id=\"character-manipulation-checker\"\u003echaracter manipulation checker\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e#include\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e\u0026lt;ctype.h\u0026gt;\u003c/span\u003e\u003cspan style=\"color:#75715e\"\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emain\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003eisalpha\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ech\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003eislower\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ech\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e...\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-06_11-08-28_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"string-manipulations\"\u003estring manipulations\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e#include\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e\u0026lt;string.h\u0026gt;\u003c/span\u003e\u003cspan style=\"color:#75715e\"\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-06_11-17-37_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"strcmp\"\u003estrcmp\u003c/h3\u003e\n\u003cp\u003eWhen you comparing \u003ca href=\"/posts/kbhstring/\"\u003estring\u003c/a\u003es, you can\u0026rsquo;t use == or \u0026lt; or \u0026gt;. Instead:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e#include\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e\u0026lt;string.h\u0026gt;\u003c/span\u003e\u003cspan style=\"color:#75715e\"\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emain\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecmp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003estrcmp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estr1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estr2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecmp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if str1 is equal to str2\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecmp\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if str1 comes before str2 lexographically\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if str2 comes before str1 lexographically\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"strcpy\"\u003estrcpy\u003c/h3\u003e\n\u003cp\u003eCopying strings, dangerously, because buffer \u003ca href=\"/posts/kbhbinary_number_system/#overflow\"\u003eoverflow\u003c/a\u003es are fun.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-06_11-22-58_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis function does NOT care about buffer overflows, and \u003cstrong\u003eWILL\u003c/strong\u003e put in a null terminator.\u003c/p\u003e\n\u003ch3 id=\"strncopy\"\u003estrncopy\u003c/h3\u003e\n\u003cp\u003eThis function optimize \u003cstrong\u003eagainst\u003c/strong\u003e buffer overflow, but it may not write a null terminator.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-09_10-42-25_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"strcat\"\u003estrcat\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-09_10-45-17_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003estrncat always puts in a null terminator.\u003c/p\u003e\n\u003ch3 id=\"pointer-arithmetic-with-strings\"\u003epointer arithmetic with strings\u003c/h3\u003e\n\u003cp\u003eFortunatly, each \u003ca href=\"/posts/kbhchar/\"\u003echar\u003c/a\u003e is\u003c/p\u003e\n\u003ch3 id=\"strspn\"\u003estrspn\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-09_11-16-40_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eCount the number of characters that are \u0026ldquo;cool\u0026rdquo;: contained within the end\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstring/","tags":null,"title":"string"},{"categories":null,"contents":"This is a precursor to MDP planning:\nstates: conjunction of \u0026ldquo;fluents\u0026rdquo; (which are state) actions: transition between fulents transitions: deleting of older, changed parts of fluents, adding new parts Planning Domain Definition Language A LISP used to specify a STRIPS-style planning problem.\nHierarchical Task Network Decompose classical planning into a hierarchy of actions Leverage High level actions to generate a coarse plan Refine to smaller problems ","html":"\u003cp\u003eThis is a precursor to \u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMDP\u003c/a\u003e planning:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003estates: conjunction of \u0026ldquo;fluents\u0026rdquo; (which are state)\u003c/li\u003e\n\u003cli\u003eactions: transition between fulents\u003c/li\u003e\n\u003cli\u003etransitions: deleting of older, changed parts of fluents, adding new parts\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"planning-domain-definition-language\"\u003ePlanning Domain Definition Language\u003c/h2\u003e\n\u003cp\u003eA LISP used to specify a \u003ca href=\"/posts/kbhstrips_style_planning/\"\u003eSTRIPS-style planning\u003c/a\u003e problem.\u003c/p\u003e\n\u003ch2 id=\"hierarchical-task-network\"\u003eHierarchical Task Network\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eDecompose classical planning into a hierarchy of actions\u003c/li\u003e\n\u003cli\u003eLeverage High level actions to generate a coarse plan\u003c/li\u003e\n\u003cli\u003eRefine to smaller problems\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstrips_style_planning/","tags":null,"title":"STRIPS-style planning"},{"categories":null,"contents":"Reading Notes Strong Free Will vs. Weak Free Will \u0026mdash; \u0026ldquo;will\u0026rdquo; and \u0026ldquo;bells inequality\u0026rdquo; is a demonstration of indeterminism/randomness between particles \u0026mdash; but indeterminism and randomness a demonstration of will.\nThat if humans have free will, it should be spawened from the indeterminism of elementary particles It asserts, roughly, that if indeed we humans have free will, then elementary particles already have their own small share of this valuable commodity.\nSPIN Axiom SPIN Axiom: Measurements of the squared (components of) spin of a spin 1 particle in three orthogonal directions always give the answers 1, 0, 1 in some order.\nTWIN Axiom Paired particles will come up with same measurements if measured in the same way\nThe TWIN Axiom: For twinned spin 1 particles, suppose experimenter A performs a triple experiment of measuring the squared spin component of particle a in three orthogonal directions x, y, z, while experimenter B measures the twinned par- ticle b in one direction, w . Then if w happens to be in the same direction as one of x, y, z, experimenter B’s measurement will necessarily yield the same answer as the corresponding measurement by A.\nFree as something that cannot be an uncurried function of previous states To say that A’s choice of x, y, z is free means more precisely that it is not determined by (i.e., is not a function of) what has happened at earlier times (in any inertial frame).\nMIN Axiom Choice of direction of measurement of one twinned qubit does not influence the results of the current qubit (unless they happen to align.)\nThe MIN Axiom: Assume that the experiments performed by A and B are space-like separated. Then experimenter B can freely choose any one of the 33 particular directions w , and a’s response is independent of this choice. Similarly and inde- pendently, A can freely choose any one of the 40 triples x, y, z, and b’s response is independent of that choice.\n","html":"\u003ch2 id=\"reading-notes\"\u003eReading Notes\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhstrong_free_will/#reading-notes\"\u003eStrong Free Will\u003c/a\u003e vs. Weak Free Will \u0026mdash; \u0026ldquo;will\u0026rdquo; and \u0026ldquo;bells inequality\u0026rdquo; is a demonstration of indeterminism/randomness between particles \u0026mdash; but indeterminism and randomness a demonstration of will.\u003c/p\u003e\n\u003ch3 id=\"that-if-humans-have-free-will-it-should-be-spawened-from-the-indeterminism-of-elementary-particles\"\u003eThat if humans have free will, it should be spawened from the indeterminism of elementary particles\u003c/h3\u003e\n\u003cp\u003eIt asserts, roughly, that if indeed we humans have free will, then elementary particles already have their own small share of this valuable commodity.\u003c/p\u003e\n\u003ch3 id=\"spin-axiom\"\u003eSPIN Axiom\u003c/h3\u003e\n\u003cp\u003eSPIN Axiom: Measurements of the squared (components of) spin of a spin 1 particle in three orthogonal directions always give the answers 1, 0, 1 in some order.\u003c/p\u003e\n\u003ch3 id=\"twin-axiom\"\u003eTWIN Axiom\u003c/h3\u003e\n\u003cp\u003ePaired particles will come up with same measurements if measured in the same way\u003c/p\u003e\n\u003cp\u003eThe TWIN Axiom: For twinned spin 1 particles, suppose experimenter A performs a triple experiment of measuring the squared spin component of particle a in three orthogonal directions x, y, z, while experimenter B measures the twinned par- ticle b in one direction, w . Then if w happens to be in the same direction as one of x, y, z, experimenter B’s measurement will necessarily yield the same answer as the corresponding measurement by A.\u003c/p\u003e\n\u003ch3 id=\"free-as-something-that-cannot-be-an-uncurried-function-of-previous-states\"\u003eFree as something that cannot be an uncurried function of previous states\u003c/h3\u003e\n\u003cp\u003eTo say that A’s choice of x, y, z is free means more precisely that it is not determined by (i.e., is not a function of) what has happened at earlier times (in any inertial frame).\u003c/p\u003e\n\u003ch3 id=\"min-axiom\"\u003eMIN Axiom\u003c/h3\u003e\n\u003cp\u003eChoice of direction of measurement of one twinned qubit does not influence the results of the current qubit (unless they happen to align.)\u003c/p\u003e\n\u003cp\u003eThe MIN Axiom: Assume that the experiments performed by A and B are space-like separated. Then experimenter B can freely choose any one of the 33 particular directions w , and a’s response is independent of this choice. Similarly and inde- pendently, A can freely choose any one of the 40 triples x, y, z, and b’s response is independent of that choice.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstrong_free_will/","tags":null,"title":"Strong Free Will"},{"categories":null,"contents":"proof by induction but assuming that all \\(k \u0026lt; n\\) is given.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhproof_by_induction/\"\u003eproof by induction\u003c/a\u003e but assuming that all \\(k \u0026lt; n\\) is given.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstrong_induction/","tags":null,"title":"strong induction"},{"categories":null,"contents":"We learn a Bayes Net grphical structure by following Bayes rule:\n\\begin{align} P(G|D) \u0026amp;\\propto P(D|G) P(G) \\\\ \u0026amp;= P(G) \\int P(D | \\theta, G) P(\\theta|G) d\\theta \\\\ \u0026amp;= P(G) \\prod_{i=1}^{n} \\prod_{j=1}^{q_{i}} \\frac{\\Gamma(\\alpha_{i,j,0})}{\\Gamma(\\alpha_{i,j,0} + m_{i,j,0})} \\prod_{k=1}^{r_{i}} \\frac{\\Gamma(\\alpha_{i,j,k} + m_{i,j,k})}{\\Gamma(\\alpha_{i,j,k})} \\end{align}\nwhere, we define: \\(\\alpha_{i,j,0} = \\sum_{k} \\alpha_{i,j,k}\\).\nThe actual integration process is not provided, but mostly uninteresting. See Beta Distribution for a flavour of how it came about.\nThis is hard. We are multiply many gammas together, which is computationally lame. So instead, we use\nBaysian Network Scoring Log Bayesian Score is a score for measure of well-fittingness of a Baysian Network against some data. We sometimes call this the Baysian Score.\nLet:\n\\(x_{1:n}\\) be variables \\(o_1, \u0026hellip;, o_{m}\\) be the \\(m\\) observations we took \\(G\\) is the graph \\(r_{i}\\) is the number of instantiations in \\(X_{i}\\) (for boolean variables, this would be \\(2\\)) \\(q_{i}\\) is the number of parental instantiations of \\(X_{i}\\) (if parent 1 can take on 10 values, parent 2 can take 3 values, then child\u0026rsquo;s \\(q_{i}=10\\cdot 3=30\\)) \u0026mdash; if a node has no parents it has a \\(q_{i}\\) is \\(1\\) \\(\\pi_{i,j}\\) is \\(j\\) instantiation of parents of \\(x_{i}\\) (the \\(j\\) th combinator) Let us first make some observations. We use \\(m_{i,j,k}\\) to denote the COUNT of the number of times \\(x_{i}\\) took a value \\(k\\) when \\(x_{i}\\) parents took instantiation \\(j\\).\nWe aim to compute:\n\\begin{equation} \\log P(G|D) = \\log P(G) + \\sum_{i=1}^{n} \\sum_{j=1}^{q_{i}} \\qty[\\qty(\\log \\frac{\\Gamma(\\alpha_{i,j,0})}{\\Gamma(\\alpha_{i,j,0}+ m_{i,j,0})}) + \\sum_{k=1}^{r_{i}} \\log \\frac{\\Gamma(\\alpha_{i,j,k} + m_{i,j,k})}{\\Gamma(\\alpha_{i,j,k})}] \\end{equation}\nIn practice, uniform prior of the graph is mostly used always. Assuming uniform priors, so \\(P(G)=1\\) and therefore we can drop the first term. Recall that \\(\\alpha_{i,j,0} = \\sum_{k} \\alpha_{i,j,k}\\).\nWe can effectively take a prior structure, and blindly compute the Baysian Score vis a vi your data, and you will get an answer which whether or not something is the simplest model.\nOf course, we can\u0026rsquo;t just try all graphs to get a graph structure. Instead, we use some search algorithm:\nK2 Algorithm Runs in polynomial time, but doesn\u0026rsquo;t grantee an optimal structure. Let us create a network with a sequence of variables with some ordering:\n\\begin{equation} x_1, x_2, x_3, x_4 \\end{equation}\nFor K2 Algorithm, we assume a uniform distribution initially before the graph is learned.\nwe lay down \\(x_1\\) onto the graph we then try to lay down \\(x_{2}\\): compute the Baysian Scores of two networks: \\(x_1 \\to x_2\\) OR \\(x_1\\ x_2\\) (see if connecting \\(x_2\\) to \\(x_1\\) helps). keep the structure with the maximum score we then try to lay down \\(x_{3}\\): compute the Baysian Score of \\(x_1 \\to x_3\\) (plus whatever decision you made about \\(x_2\\)) OR \\(x_1, x_3\\); keep the one that works the best. Then, try the same to decide whether to connect \\(x_2\\) to \\(x_3\\) as well Repeat until you considered all nodes After you try out one ordering, you should try out another one. Because you can only add parents from elements before you in the list, you will never get a cycle.\nLocal Graph Search Start with an uncorrected graph. Search on the following actions:\nbasic graph operations:\nadd edge remove edge flip edge A graph\u0026rsquo;s neighborhood is the graphs for whicthey are one basic graph operation away.\nCreate a cycle detection scheme.\nNow, just try crap. Keep computing a Baysian Score after you tried something, if its good, keep it. If its not, don\u0026rsquo;t.\nTo prevent you from being stuck in a local minimum:\nperform random restarts perform K2 Algorithm, and then try things out simulated annealing: take a step that\u0026rsquo;s worse for optimizing Baysian Scores genetic algorithms: random population which reproduces at a rate proportional to their score Partially Directed Graph Search We first formulate a partially-directed graph, which is a graph which has some edges, but some edges left to be decided:\nIn this case, edges \\(C \\to D\\) and \\(D \\leftarrow E\\) are both defined. \\(A,B,C\\) are left as undirected nodes available to be searched on.\nWe now try out all combinations of arrows that may fit between \\(A,B,C\\), with the constraint of all objects you search on being Markov Equivalent (so, you can\u0026rsquo;t remove or introduce new immoral v-structures).\n","html":"\u003cp\u003eWe learn a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBayes Net\u003c/a\u003e grphical structure by following \u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes rule\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nP(G|D) \u0026amp;\\propto P(D|G) P(G) \\\\\n\u0026amp;= P(G) \\int P(D | \\theta, G) P(\\theta|G) d\\theta \\\\\n\u0026amp;= P(G) \\prod_{i=1}^{n} \\prod_{j=1}^{q_{i}} \\frac{\\Gamma(\\alpha_{i,j,0})}{\\Gamma(\\alpha_{i,j,0} + m_{i,j,0})} \\prod_{k=1}^{r_{i}} \\frac{\\Gamma(\\alpha_{i,j,k} + m_{i,j,k})}{\\Gamma(\\alpha_{i,j,k})}\n\\end{align}\u003c/p\u003e\n\u003cp\u003ewhere, we define: \\(\\alpha_{i,j,0} = \\sum_{k} \\alpha_{i,j,k}\\).\u003c/p\u003e\n\u003cp\u003eThe actual integration process is not provided, but mostly uninteresting. See \u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e for a flavour of how it came about.\u003c/p\u003e\n\u003cp\u003eThis is hard. We are multiply many gammas together, which is computationally lame. So instead, we use\u003c/p\u003e\n\u003ch2 id=\"baysian-network-scoring\"\u003eBaysian Network Scoring\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#baysian-network-scoring\"\u003eLog Bayesian Score\u003c/a\u003e is a score for measure of well-fittingness of a \u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e against some data. We sometimes call this the \u003ca href=\"#baysian-network-scoring\"\u003eBaysian Score\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eLet:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(x_{1:n}\\) be variables\u003c/li\u003e\n\u003cli\u003e\\(o_1, \u0026hellip;, o_{m}\\) be the \\(m\\) observations we took\u003c/li\u003e\n\u003cli\u003e\\(G\\) is the graph\u003c/li\u003e\n\u003cli\u003e\\(r_{i}\\) is the number of instantiations in \\(X_{i}\\) (for boolean variables, this would be \\(2\\))\u003c/li\u003e\n\u003cli\u003e\\(q_{i}\\) is the number of parental instantiations of \\(X_{i}\\) (if parent 1 can take on 10 values, parent 2 can take 3 values, then child\u0026rsquo;s \\(q_{i}=10\\cdot 3=30\\)) \u0026mdash; if a node has no parents it has a \\(q_{i}\\) is \\(1\\)\u003c/li\u003e\n\u003cli\u003e\\(\\pi_{i,j}\\) is \\(j\\) instantiation of parents of \\(x_{i}\\) (the \\(j\\) th combinator)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eLet us first make some observations. We use \\(m_{i,j,k}\\) to denote the COUNT of the number of times \\(x_{i}\\) took a value \\(k\\) when \\(x_{i}\\) parents took instantiation \\(j\\).\u003c/p\u003e\n\u003cp\u003eWe aim to compute:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\log P(G|D) = \\log P(G) + \\sum_{i=1}^{n} \\sum_{j=1}^{q_{i}} \\qty[\\qty(\\log \\frac{\\Gamma(\\alpha_{i,j,0})}{\\Gamma(\\alpha_{i,j,0}+ m_{i,j,0})}) + \\sum_{k=1}^{r_{i}} \\log \\frac{\\Gamma(\\alpha_{i,j,k} + m_{i,j,k})}{\\Gamma(\\alpha_{i,j,k})}]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn practice, uniform prior of the graph is mostly used always. Assuming uniform priors, so \\(P(G)=1\\) and therefore we can drop the first term. Recall that \\(\\alpha_{i,j,0} = \\sum_{k} \\alpha_{i,j,k}\\).\u003c/p\u003e\n\u003cp\u003eWe can effectively take a prior structure, and blindly compute the \u003ca href=\"#baysian-network-scoring\"\u003eBaysian Score\u003c/a\u003e vis a vi your data, and you will get an answer which whether or not something is the simplest model.\u003c/p\u003e\n\u003cp\u003eOf course, we can\u0026rsquo;t just try all graphs to get a graph structure. Instead, we use some search algorithm:\u003c/p\u003e\n\u003ch2 id=\"k2-algorithm\"\u003eK2 Algorithm\u003c/h2\u003e\n\u003cp\u003eRuns in polynomial time, but doesn\u0026rsquo;t grantee an optimal structure. Let us create a network with a sequence of variables with some ordering:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_1, x_2, x_3, x_4\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor \u003ca href=\"#k2-algorithm\"\u003eK2 Algorithm\u003c/a\u003e, we assume a \u003ca href=\"/posts/kbhprobability_distributions/#uniform-distribution\"\u003euniform distribution\u003c/a\u003e initially before the graph is learned.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewe lay down \\(x_1\\) onto the graph\u003c/li\u003e\n\u003cli\u003ewe then try to lay down \\(x_{2}\\): compute the \u003ca href=\"#baysian-network-scoring\"\u003eBaysian Score\u003c/a\u003es of two networks: \\(x_1 \\to x_2\\) OR \\(x_1\\ x_2\\) (see if connecting \\(x_2\\) to \\(x_1\\) helps). keep the structure with the maximum score\u003c/li\u003e\n\u003cli\u003ewe then try to lay down \\(x_{3}\\): compute the \u003ca href=\"#baysian-network-scoring\"\u003eBaysian Score\u003c/a\u003e of \\(x_1 \\to x_3\\) (plus whatever decision you made about \\(x_2\\)) OR \\(x_1, x_3\\); keep the one that works the best. Then, try the same to decide whether to connect \\(x_2\\) to \\(x_3\\) as well\u003c/li\u003e\n\u003cli\u003eRepeat until you considered all nodes\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAfter you try out one ordering, you should try out another one. Because you can only add parents from elements before you in the list, you will never get a cycle.\u003c/p\u003e\n\u003ch2 id=\"local-graph-search\"\u003eLocal Graph Search\u003c/h2\u003e\n\u003cp\u003eStart with an uncorrected graph. Search on the following actions:\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#local-graph-search\"\u003ebasic graph operation\u003c/a\u003es:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eadd edge\u003c/li\u003e\n\u003cli\u003eremove edge\u003c/li\u003e\n\u003cli\u003eflip edge\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eA graph\u0026rsquo;s \u003ca href=\"#local-graph-search\"\u003eneighborhood\u003c/a\u003e is the graphs for whicthey are one basic graph operation away.\u003c/p\u003e\n\u003cp\u003eCreate a cycle detection scheme.\u003c/p\u003e\n\u003cp\u003eNow, just try crap. Keep computing a \u003ca href=\"#baysian-network-scoring\"\u003eBaysian Score\u003c/a\u003e after you tried something, if its good, keep it. If its not, don\u0026rsquo;t.\u003c/p\u003e\n\u003cp\u003eTo prevent you from being stuck in a local minimum:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eperform random restarts\u003c/li\u003e\n\u003cli\u003eperform \u003ca href=\"#k2-algorithm\"\u003eK2 Algorithm\u003c/a\u003e, and then try things out\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"\"\u003esimulated annealing\u003c/a\u003e: take a step that\u0026rsquo;s worse for optimizing \u003ca href=\"#baysian-network-scoring\"\u003eBaysian Score\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003egenetic algorithms: random population which reproduces at a rate proportional to their score\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"partially-directed-graph-search\"\u003ePartially Directed Graph Search\u003c/h2\u003e\n\u003cp\u003eWe first formulate a partially-directed graph, which is a graph which has some edges, but some edges left to be decided:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-12_11-09-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eIn this case, edges \\(C \\to D\\) and \\(D \\leftarrow E\\) are both defined. \\(A,B,C\\) are left as undirected nodes available to be searched on.\u003c/p\u003e\n\u003cp\u003eWe now try out all combinations of arrows that may fit between \\(A,B,C\\), with the constraint of all objects you search on being \u003ca href=\"/posts/kbhmarkov_equivalence_classes/\"\u003eMarkov Equivalent\u003c/a\u003e (so, you can\u0026rsquo;t remove or introduce new \u003ca href=\"/posts/kbhimmoral_v_structure/\"\u003eimmoral v-structure\u003c/a\u003es).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstructure_learning/","tags":null,"title":"structure learning"},{"categories":null,"contents":"Goal: using protein-protein interfaces and docking to learn about the polymerase behavior\nToo bio-y and I\u0026rsquo;m literally not sure how to make of it\n","html":"\u003cp\u003eGoal: using protein-protein interfaces and docking to learn about the polymerase behavior\u003c/p\u003e\n\u003cp\u003eToo bio-y and I\u0026rsquo;m literally not sure how to make of it\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhstructure_of_covid_replication/","tags":null,"title":"Structure of COVID Replication"},{"categories":null,"contents":"Key Sequence Notation New Concepts Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_dec012023/","tags":null,"title":"SU-CS107 DEC012023"},{"categories":null,"contents":"Not published to prevent AIV.\n","html":"\u003cp\u003eNot published to prevent AIV.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_midterm_sheet/","tags":null,"title":"SU-CS107 Midterm Sheet"},{"categories":null,"contents":" privacy ","html":"\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/\"\u003eprivacy\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_nov102023/","tags":null,"title":"SU-CS107 NOV102023"},{"categories":null,"contents":"Key Sequence Notation New Concepts privacy memory allocation optimization caching Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/\"\u003eprivacy\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmemory_allocation/\"\u003ememory allocation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoptimization/\"\u003eoptimization\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcaching/\"\u003ecaching\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_nov132023/","tags":null,"title":"SU-CS107 NOV132023"},{"categories":null,"contents":" optimization ","html":"\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoptimization/\"\u003eoptimization\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_nov272023/","tags":null,"title":"SU-CS107 NOV272023"},{"categories":null,"contents":"Notation New Concepts computer number system bits and bytes base 10, base 2, base 16 integers unsigned integers signed integers and two\u0026rsquo;s complement Important Results / Claims conversion from base 10 to base 2 min and max of binary \u0026ldquo;Which bit is missing\u0026rdquo; two\u0026rsquo;s complement Questions Interesting Factoids ","html":"\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/\"\u003ecomputer number system\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/#bit\"\u003ebits\u003c/a\u003e and \u003ca href=\"/posts/kbhbinary_number_system/#byte\"\u003ebytes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/#base-10\"\u003ebase 10\u003c/a\u003e, \u003ca href=\"/posts/kbhbinary_number_system/#base-2\"\u003ebase 2\u003c/a\u003e, \u003ca href=\"/posts/kbhbinary_number_system/#base-16\"\u003ebase 16\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eintegers\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/#unsigned-integers\"\u003eunsigned integers\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/#signed-integers\"\u003esigned integers\u003c/a\u003e and \u003ca href=\"/posts/kbhtwo_s_complement/\"\u003etwo\u0026rsquo;s complement\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/#conversion-from-base-10-to-base-2\"\u003econversion from base 10 to base 2\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/#min-and-max-of-binary\"\u003emin and max of binary\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/#which-bit-is-missing\"\u003e\u0026ldquo;Which bit is missing\u0026rdquo;\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtwo_s_complement/\"\u003etwo\u0026rsquo;s complement\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct022023/","tags":null,"title":"SU-CS107 OCT022023"},{"categories":null,"contents":"Key Sequence Notation New Concepts two\u0026rsquo;s complement overflow casting Important Results / Claims mnemonic for remembering where overflows happened automatic signed promotion automated type size promotion Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtwo_s_complement/\"\u003etwo\u0026rsquo;s complement\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/#overflow\"\u003eoverflow\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcasting/\"\u003ecasting\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtwo_s_complement/#mnemonic-for-remembering-where-overflows-happened\"\u003emnemonic for remembering where overflows happened\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcasting/#automatic-signed-promotion\"\u003eautomatic signed promotion\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcasting/#automated-type-size-promotion\"\u003eautomated type size promotion\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct032023/","tags":null,"title":"SU-CS107 OCT032023"},{"categories":null,"contents":"Key Sequence Notation New Concepts casting sign promotion type size trunctaion bitwise operations bitmask GDB Important Results / Claims sizes of stuff Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcasting/\"\u003ecasting\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcasting/#sign-promotion\"\u003esign promotion\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcasting/#type-size-promotion\"\u003etype size trunctaion\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbitwise_operations/\"\u003ebitwise operations\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbitmask/\"\u003ebitmask\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgdb/\"\u003eGDB\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinary_number_system/#sizes-of-stuff\"\u003esizes of stuff\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct042023/","tags":null,"title":"SU-CS107 OCT042023"},{"categories":null,"contents":"Key Sequence Notation New Concepts char ASCII string string manpulations strcmp strcpy Important Results / Claims Absolute Value Function Questions Interesting Factoids eigenvalue\n","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhchar/\"\u003echar\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhascii/\"\u003eASCII\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/\"\u003estring\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003estring manpulations\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/#strcmp\"\u003estrcmp\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/#strcpy\"\u003estrcpy\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhabsolute_value_function/\"\u003eAbsolute Value Function\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct062023/","tags":null,"title":"SU-CS107 OCT062023"},{"categories":null,"contents":"Key Sequence Notation New Concepts string Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/\"\u003estring\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct092023/","tags":null,"title":"SU-CS107 OCT092023"},{"categories":null,"contents":"Key Sequence Notation New Concepts buffer overflow valgrind pointer memory Important Results / Claims identifying buffer overflows Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbuffer_overflow/\"\u003ebuffer overflow\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbuffer_overflow/#valgrind\"\u003evalgrind\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpointer/\"\u003epointer\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmemory/\"\u003ememory\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbuffer_overflow/#identifying-buffer-overflow--kbhbuffer-overflow-dot-md--s\"\u003eidentifying buffer overflows\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct112023/","tags":null,"title":"SU-CS107 OCT112023"},{"categories":null,"contents":"Key Sequence Notation New Concepts pointer and address operator string Important Results / Claims seven commandments of c strings String Pointer Syntax Sugar Synonyms Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpointer/\"\u003epointer\u003c/a\u003e and \u003ca href=\"/posts/kbhpointer/#address-operator\"\u003eaddress operator\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/\"\u003estring\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/#seven-commandments-of-c-id-11f0accb-37d9-4785-afea-1aeb53c8823f-string-s\"\u003eseven commandments of c strings\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/#string-pointer-syntax-sugar-synonyms\"\u003eString Pointer Syntax Sugar Synonyms\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct132023/","tags":null,"title":"SU-CS107 OCT132023"},{"categories":null,"contents":"Key Sequence Notation New Concepts strings array Pointer Arithmetic Important Results / Claims REMEMBER: you CANNOT change strings in the data segment Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstring/\"\u003estring\u003c/a\u003es\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbharray/\"\u003earray\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbharray/#pointer-arithmetic\"\u003ePointer Arithmetic\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eREMEMBER: you \u003cstrong\u003eCANNOT\u003c/strong\u003e change strings in the data segment\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct162023/","tags":null,"title":"SU-CS107 OCT162023"},{"categories":null,"contents":"Key Sequence Notation New Concepts stack heap malloc and calloc and free strdup realloc generic Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstack/\"\u003estack\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhheap/\"\u003eheap\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhheap/#malloc\"\u003emalloc\u003c/a\u003e and \u003ca href=\"/posts/kbhheap/#calloc\"\u003ecalloc\u003c/a\u003e and \u003ca href=\"/posts/kbhheap/#free\"\u003efree\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhheap/#strdup\"\u003estrdup\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhheap/#realloc\"\u003erealloc\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgeneric/\"\u003egeneric\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct182023/","tags":null,"title":"SU-CS107 OCT182023"},{"categories":null,"contents":"Key Sequence Notation New Concepts Little Endian generics memcpy, memmove Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlittle_endian/\"\u003eLittle Endian\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgeneric/\"\u003egenerics\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgeneric/#memcpy\"\u003ememcpy\u003c/a\u003e, \u003ca href=\"/posts/kbhgeneric/#memmove\"\u003ememmove\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct2023/","tags":null,"title":"SU-CS107 OCT202023"},{"categories":null,"contents":"Key Sequence Notation New Concepts generics memcpy, memmove Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgeneric/\"\u003egenerics\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgeneric/#memcpy\"\u003ememcpy\u003c/a\u003e, \u003ca href=\"/posts/kbhgeneric/#memmove\"\u003ememmove\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct232023/","tags":null,"title":"SU-CS107 OCT232023"},{"categories":null,"contents":"Key Sequence Notation New Concepts function pointers Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfunction/#function-pointers\"\u003efunction pointers\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct252023/","tags":null,"title":"SU-CS107 OCT252023"},{"categories":null,"contents":"Key Sequence Notation New Concepts sorting functions assembly Registers Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsorting_functions/\"\u003esorting functions\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhassembly/\"\u003eassembly\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhassembly/#register\"\u003eRegister\u003c/a\u003es\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_oct272023/","tags":null,"title":"SU-CS107 OCT272023"},{"categories":null,"contents":"Core Themes of CS107 how and why of 107:\nhow is program data represented in the hardware how does the heap work and how is it implemented how does a computer know how run code how does an executable map onto computer systems why is my program doing one thing when I expect it to do something else \u0026ldquo;why is this broken system behaving the way it does?\u0026rdquo;\nCore Goals of CS107 fluency pointers and memory, and how to make use of them an executable\u0026rsquo;s address space + runtime behavior competency the translation of C to and from assembly implement programs with limits of computer arithmetic identify bottlenecks and improve runtime performance navigate Unix ethical frameworks to design and implement software exposure computer architecture\nContent of CS107 bits and bytes chars and c strings pointers stacks and heaps generics: use them assembly: reverse an engineering of binary heap allocators: implement malloc and free ","html":"\u003ch2 id=\"core-themes-of-cs107\"\u003eCore Themes of CS107\u003c/h2\u003e\n\u003cp\u003e\u003cstrong\u003ehow and why\u003c/strong\u003e of 107:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003ehow\u003c/strong\u003e is program data represented in the hardware\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ehow\u003c/strong\u003e does the heap work and \u003cstrong\u003ehow\u003c/strong\u003e is it implemented\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ehow\u003c/strong\u003e does a computer know how run code\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ehow\u003c/strong\u003e does an executable map onto computer systems\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ewhy\u003c/strong\u003e is my program doing one thing when I expect it to do something else\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;why is this broken system behaving the way it does?\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"core-goals-of-cs107\"\u003eCore Goals of CS107\u003c/h2\u003e\n\u003ch3 id=\"fluency\"\u003efluency\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003epointers and memory, and how to make use of them\u003c/li\u003e\n\u003cli\u003ean executable\u0026rsquo;s address space + runtime behavior\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"competency\"\u003ecompetency\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ethe translation of C to and from assembly\u003c/li\u003e\n\u003cli\u003eimplement programs with limits of computer arithmetic\u003c/li\u003e\n\u003cli\u003eidentify bottlenecks and improve runtime performance\u003c/li\u003e\n\u003cli\u003enavigate Unix\u003c/li\u003e\n\u003cli\u003eethical frameworks to design and implement software\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"exposure\"\u003eexposure\u003c/h3\u003e\n\u003cp\u003ecomputer architecture\u003c/p\u003e\n\u003ch2 id=\"content-of-cs107\"\u003eContent of CS107\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ebits and bytes\u003c/li\u003e\n\u003cli\u003echars and c strings\u003c/li\u003e\n\u003cli\u003epointers stacks and heaps\u003c/li\u003e\n\u003cli\u003egenerics: use them\u003c/li\u003e\n\u003cli\u003eassembly: reverse an engineering of binary\u003c/li\u003e\n\u003cli\u003eheap allocators: implement malloc and free\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_sep272023/","tags":null,"title":"SU-CS107 SEP272023"},{"categories":null,"contents":"New Concepts Unix C printf bool integer Important Results / Claims principles of C C limitations ","html":"\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhunix/\"\u003eUnix\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhc/\"\u003eC\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhc_basic_operations/\"\u003eprintf\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbool/\"\u003ebool\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinteger/\"\u003einteger\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhc/#principles-of-c\"\u003eprinciples of C\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhc/#c-limitations\"\u003eC limitations\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs107_sep292023/","tags":null,"title":"SU-CS107 SEP292023"},{"categories":null,"contents":"Key Sequence Notation New Concepts Mencius Philosophy types of harm protected group Important Results / Claims Procedural vs. Distributive Fairness Questions Interesting Factoids logistic regression is a linear classifier Naive Bayes is a linear classier: there is literally no interaction between input features ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmencius_philosophy/\"\u003eMencius Philosophy\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtypes_of_harm/\"\u003etypes of harm\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprotected_group/\"\u003eprotected group\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprocedural_vs_distributive_fairness/\"\u003eProcedural vs. Distributive Fairness\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003elogistic regression is a linear classifier\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e is a linear classier: there is literally no interaction between input features\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_dec012023/","tags":null,"title":"SU-CS109 DEC012023"},{"categories":null,"contents":"Diffusion Models We can consider a model between random noise and trees.\nFor every step, we sample Gaussian noise and add it to the image. The original approach adds Gaussian to the pixels, and nowadays people replace the pixel.\nUsually, there is a few thousand steps of noising.\nWhy is it that we can\u0026rsquo;t have a one-step policy from noise to pictures? Because of a physics result that says the stability of diffusion becomes intractable at too large steps.\nloss function One way we can model our objective is as a MLE. Because we are continuously adding noise, we can assume that\n\\begin{equation} y \\sim \\mathcal{N}(\\mu = \\hat{y}(\\theta), \\sigma^{2}=k) \\end{equation}\nIf you compute MLE over the choice of \\(\\hat{y}(\\theta)\\), you get the squared error.\nELBO A cool loss function that diffusion actually uses that leverages the fact above but considers the entire diffusion process.\nLSTMs Big text generation flaw with LSTMs: the latent state vector has to contain information about the ENTIRE sentence and have the information propagated through recursion. Information\nCross Entropy its MLE over a multinomials; the counts of everything that\u0026rsquo;s not the one-hot thing just so happens to be 0.\nWe are essentially computing the derivative of:\n\\begin{equation} \\arg\\max_{p_{correct}} p_{correct} \\end{equation}\nwhich is trying to maximize the categorical of only the correct element.\n","html":"\u003ch2 id=\"diffusion-models\"\u003eDiffusion Models\u003c/h2\u003e\n\u003cp\u003eWe can consider a model between random noise and trees.\u003c/p\u003e\n\u003cp\u003eFor every step, we sample Gaussian noise and \u003cstrong\u003eadd\u003c/strong\u003e it to the image. The original approach adds Gaussian to the pixels, and nowadays people replace the pixel.\u003c/p\u003e\n\u003cp\u003eUsually, there is a few thousand steps of noising.\u003c/p\u003e\n\u003cp\u003eWhy is it that we can\u0026rsquo;t have a one-step policy from noise to pictures? Because of a physics result that says the stability of diffusion becomes intractable at too large steps.\u003c/p\u003e\n\u003ch3 id=\"loss-function\"\u003eloss function\u003c/h3\u003e\n\u003cp\u003eOne way we can model our objective is as a \u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMLE\u003c/a\u003e. Because we are continuously adding noise, we can assume that\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny \\sim \\mathcal{N}(\\mu = \\hat{y}(\\theta), \\sigma^{2}=k)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf you compute MLE over the choice of \\(\\hat{y}(\\theta)\\), you get the squared error.\u003c/p\u003e\n\u003ch3 id=\"elbo\"\u003eELBO\u003c/h3\u003e\n\u003cp\u003eA cool loss function that diffusion actually uses that leverages the fact above but considers the entire diffusion process.\u003c/p\u003e\n\u003ch3 id=\"lstms\"\u003eLSTMs\u003c/h3\u003e\n\u003cp\u003eBig text generation flaw with LSTMs: the latent state vector has to contain information about the ENTIRE sentence and have the information propagated through recursion. Information\u003c/p\u003e\n\u003ch3 id=\"cross-entropy\"\u003eCross Entropy\u003c/h3\u003e\n\u003cp\u003eits MLE over a multinomials; the counts of everything that\u0026rsquo;s not the one-hot thing just so happens to be 0.\u003c/p\u003e\n\u003cp\u003eWe are essentially computing the derivative of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\arg\\max_{p_{correct}} p_{correct}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich is trying to maximize the categorical of only the correct element.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_dec042023/","tags":null,"title":"SU-CS109 DEC042023"},{"categories":null,"contents":" standard normal density function, and formula for phi for ARBITURARY normals (x-u/sigma) practice using inverse phi BEWARE that sigma is not sigma squared PDF AND CDF, Mean, Var, params, Generative Story for: uniform distribution and the normal distribution all of these continuity correction permutation and combinations formula counting formulas: binning, stars and bars, and the counting methods tree from beginning of class probability theorems: law of total probabaly, baysian, demorgans, counting with and and or multinomial distribution binomial coefficient (i.e. combinations formula), multinomial coefficient joint probability distribution and their table Naive Bayes divison ratio trick relative probability ","html":"\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgaussian_distribution/#standard-normal-density-function\"\u003estandard normal density function\u003c/a\u003e, and formula for phi for ARBITURARY normals (x-u/sigma)\n\u003cul\u003e\n\u003cli\u003epractice using \u003cstrong\u003einverse phi\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eBEWARE that sigma is not sigma squared\u003c/li\u003e\n\u003cli\u003ePDF AND CDF, Mean, Var, params, Generative Story for:\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#uniform-distribution\"\u003euniform distribution\u003c/a\u003e and the \u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormal distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcs_probability_index/#what-random-variable-should-i-use\"\u003eall of these\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcontinuity_correction/\"\u003econtinuity correction\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpermutation/\"\u003epermutation\u003c/a\u003e and \u003ca href=\"/posts/kbhcombination/\"\u003ecombination\u003c/a\u003es formula\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcounting/\"\u003ecounting\u003c/a\u003e formulas: binning, stars and bars, and the counting methods tree from beginning of class\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e theorems: law of total probabaly, baysian, demorgans, counting with and and or\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobablistic_model/#multinomial-distribution\"\u003emultinomial distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ebinomial \u003ca href=\"/posts/kbhpolynomial/\"\u003ecoefficient\u003c/a\u003e (i.e. \u003ca href=\"/posts/kbhcombination/\"\u003ecombination\u003c/a\u003es formula), \u003ca href=\"/posts/kbhmultinomial_coefficient/\"\u003emultinomial coefficient\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e and their table\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e divison ratio trick\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrelative_probability/\"\u003erelative probability\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_midterm/","tags":null,"title":"SU-CS109 Midterm"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_midterm_sheet/","tags":null,"title":"SU-CS109 Midterm Sheet"},{"categories":null,"contents":"What if you don\u0026rsquo;t know about a probability of success?\nBeta Distribution time!!!\nMulti-Arm Bandit See Multi-Arm Bandit\nStrategies:\nupper confidence bound: take the action with theh highest n-tn-thonfidence bound Posterior Sampling: take a sample from each Beta Distributions distribution; take the action that has a higher probability of success based on their r ","html":"\u003cp\u003eWhat if you don\u0026rsquo;t know about a probability of success?\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003e time!!!\u003c/p\u003e\n\u003ch2 id=\"multi-arm-bandit--kbhexploration-and-exploitation-dot-md\"\u003e\u003ca href=\"/posts/kbhexploration_and_exploitation/\"\u003eMulti-Arm Bandit\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhexploration_and_exploitation/\"\u003eMulti-Arm Bandit\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eStrategies:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirected_exploration/#quantile-exploration\"\u003eupper confidence bound\u003c/a\u003e: take the action with theh highest n-tn-thonfidence bound\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirected_exploration/#posterior-sampling\"\u003ePosterior Sampling\u003c/a\u003e: take a sample from each \u003ca href=\"/posts/kbhbaysian_parameter_learning/#beta-distribution\"\u003eBeta Distribution\u003c/a\u003es distribution; take the action that has a higher probability of success based on their r\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov012023/","tags":null,"title":"SU-CS109 NOV012023"},{"categories":null,"contents":"Key Sequence Notation New Concepts IID Zero-Sum Game central limit theorem bootstrap Important Results / Claims Sum of Two Dice adding random variables Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhzero_sum_game/\"\u003eZero-Sum Game\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhboostrap/\"\u003ebootstrap\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsum_of_two_dice/\"\u003eSum of Two Dice\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_variables/#adding-random-variables\"\u003eadding random variables\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov032023/","tags":null,"title":"SU-CS109 NOV032023"},{"categories":null,"contents":"Key Sequence Notation New Concepts central limit theorem sampling statistics Important Results / Claims sample mean sample variance standard error of the mean: \u0026ldquo;variance of the mean\u0026rdquo;: \u0026ldquo;how wrong is your meassured mean\u0026rdquo; Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_variables/#sampling-statistics\"\u003esampling statistics\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_variables/#sample-mean\"\u003esample mean\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_variables/#sample-variance\"\u003esample variance\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_variables/#standard-error-of-the-mean\"\u003estandard error of the mean\u003c/a\u003e: \u0026ldquo;variance of the mean\u0026rdquo;: \u0026ldquo;how wrong is your meassured mean\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov062023/","tags":null,"title":"SU-CS109 NOV062023"},{"categories":null,"contents":"Key Sequence Notation New Concepts central limit theorem sampling statistics sample mean, sample variance, standard error of the mean bootstrap Important Results / Claims p-value from bootstrap Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_variables/#sampling-statistics\"\u003esampling statistics\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_variables/#sample-mean\"\u003esample mean\u003c/a\u003e, \u003ca href=\"/posts/kbhrandom_variables/#sample-variance\"\u003esample variance\u003c/a\u003e, \u003ca href=\"/posts/kbhrandom_variables/#standard-error-of-the-mean\"\u003estandard error of the mean\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhboostrap/\"\u003ebootstrap\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhboostrap/#p-value-from-bootstrap\"\u003ep-value from bootstrap\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov082023/","tags":null,"title":"SU-CS109 NOV082023"},{"categories":null,"contents":"Key Sequence Notation New Concepts Bernoulli distribution as an indicator conditional expectation law of total expectation Important Results / Claims Review: expectation. Expectation of the sums of random variables are linear regardless of whether or not the variables are IID, independent, whatever.\n\u0026ldquo;expectation of the sum is the sum of the expectations\u0026rdquo;. \u0026ldquo;Can I write the expectation I want to calculate as the sum of something else?\u0026rdquo;\nQuestions Interesting Factoids \\(\\mathbb{E}[Y]\\) =\n\\(x=1\\): 3 \\(x=2\\): 5 + Y \\(x=3\\): 7 + Y \\begin{equation} \\mathbb{E}[Y] = 3 \\cdot \\frac{1}{3} + (5+ \\mathbb{E}[Y]) \\cdot \\frac{1}{3} + (7+ \\mathbb{E}[Y]) \\cdot \\frac{1}{3} \\end{equation}\n","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbernoulli_random_variable/\"\u003eBernoulli distribution\u003c/a\u003e as an indicator\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexpectation/#conditional-expectation\"\u003econditional expectation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexpectation/#law-of-total-expectation\"\u003elaw of total expectation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cp\u003eReview: \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e. \u003ca href=\"/posts/kbhexpectation/\"\u003eExpectation\u003c/a\u003e of the sums of \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es are \u003ca href=\"/posts/kbhexpectation/#properties-of-id-24e5fb5b-b0b2-4872-adf2-398e91c3ee0e-expectation\"\u003elinear\u003c/a\u003e regardless of whether or not the variables are \u003ca href=\"/posts/kbhindependently_and_identically_distributed/\"\u003eIID\u003c/a\u003e, \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e, whatever.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;expectation of the sum is the sum of the expectations\u0026rdquo;. \u0026ldquo;Can I write the expectation I want to calculate as the sum of something else?\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003e\\(\\mathbb{E}[Y]\\) =\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(x=1\\): 3\u003c/li\u003e\n\u003cli\u003e\\(x=2\\): 5 + Y\u003c/li\u003e\n\u003cli\u003e\\(x=3\\): 7 + Y\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{E}[Y] = 3 \\cdot \\frac{1}{3} + (5+ \\mathbb{E}[Y]) \\cdot \\frac{1}{3} + (7+ \\mathbb{E}[Y]) \\cdot \\frac{1}{3}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov102023/","tags":null,"title":"SU-CS109 NOV102023"},{"categories":null,"contents":"Key Sequence Notation New Concepts modeling parameter parameter learning argmax Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodeling/\"\u003emodeling\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhparameter_learning/\"\u003eparameter learning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhargmax/\"\u003eargmax\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov132023/","tags":null,"title":"SU-CS109 NOV132023"},{"categories":null,"contents":"Key Sequence Notation New Concepts likelyhood Maximum Likelihood Parameter Learning argmax maximum a posteriori estimate Important Results / Claims Double Envelope Problem Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlikelyhood/\"\u003elikelyhood\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhargmax/\"\u003eargmax\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmaximum_a_posteriori_estimate/\"\u003emaximum a posteriori estimate\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdouble_envelope_problem/\"\u003eDouble Envelope Problem\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov152023/","tags":null,"title":"SU-CS109 NOV152023"},{"categories":null,"contents":"Key Sequence Notation For some feature input matrix, where each row is the data samples, and columns are the input features; we write\n\\begin{equation} x_{j}^{(i)} \\end{equation}\nwhere \\(i\\) are rows (datapoints), and \\(j\\) are columns (features)\nNew Concepts Naive Bayes Important Results / Claims Naive Bayes assumption Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003cp\u003eFor some feature input matrix, where each row is the data samples, and columns are the input features; we write\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{j}^{(i)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(i\\) are rows (datapoints), and \\(j\\) are columns (features)\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnaive_bayes/#naive-bayes--kbhnaive-bayes-dot-md--assumption\"\u003eNaive Bayes assumption\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov172023/","tags":null,"title":"SU-CS109 NOV172023"},{"categories":null,"contents":"Key Sequence Notation New Concepts machine learning Naive Bayes sigmoid logistic regression Important Results / Claims logistic regression assumption Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmachine_learning/\"\u003emachine learning\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlogistic_regression/\"\u003elogistic regression\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlogistic_regression/#logistic-regression-assumption\"\u003elogistic regression assumption\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov272023/","tags":null,"title":"SU-CS109 NOV272023"},{"categories":null,"contents":"Key Sequence Notation New Concepts logistic regression deep learning Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlogistic_regression/\"\u003elogistic regression\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdeep_learning/\"\u003edeep learning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_nov292023/","tags":null,"title":"SU-CS109 NOV292023"},{"categories":null,"contents":"Key Sequence Notation New Concepts probability + Frequentist Definition of Probability sample space event equally likely outcomes sample space Important Results / Claims uncertainty and probability axiom of probability with making elements INDISTINCT during picking your sample space, your sample space may not have equally likely outcomes Questions Interesting Factoids Doing probability\nconsider your cards as being distinct create a generative story ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e + \u003ca href=\"/posts/kbhprobability/#frequentist-definition-of-probability\"\u003eFrequentist Definition of Probability\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsample_space/\"\u003esample space\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhevent/\"\u003eevent\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsample_space/#equally-likely-outcomes\"\u003eequally likely outcomes\u003c/a\u003e sample space\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#id-c5f09e8a-9c3f-4874-a1c1-79d156801208-uncertainty-and-id-744af885-3e9c-4130-bd2b-2f41bcc0440e-probability\"\u003euncertainty and probability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#axiom-of-probability\"\u003eaxiom of probability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003ewith making elements INDISTINCT during picking your \u003ca href=\"/posts/kbhsample_space/\"\u003esample space\u003c/a\u003e, your \u003ca href=\"/posts/kbhsample_space/\"\u003esample space\u003c/a\u003e may not have \u003ca href=\"/posts/kbhsample_space/#equally-likely-outcomes\"\u003eequally likely outcomes\u003c/a\u003e\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003eDoing probability\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003econsider your cards as being distinct\u003c/li\u003e\n\u003cli\u003ecreate a generative story\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct022023/","tags":null,"title":"SU-CS109 OCT022023"},{"categories":null,"contents":"Key Sequence Notation And Or Given P(E and F) P(E or F) P(E \\bar F) P(E,F) P(E ∪ F) New Concepts Review: axiom of probability conditional probability law of total probability Bayes Theorem Important Results / Claims Representing Large Computation probability chain rule Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eAnd\u003c/th\u003e\n\u003cth\u003eOr\u003c/th\u003e\n\u003cth\u003eGiven\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eP(E and F)\u003c/td\u003e\n\u003ctd\u003eP(E or F)\u003c/td\u003e\n\u003ctd\u003eP(E \\bar F)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003eP(E,F)\u003c/td\u003e\n\u003ctd\u003eP(E ∪ F)\u003c/td\u003e\n\u003ctd\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eReview: \u003ca href=\"/posts/kbhprobability/#axiom-of-probability\"\u003eaxiom of probability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#law-of-total-probability\"\u003elaw of total probability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes Theorem\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrepresenting_large_computation/\"\u003eRepresenting Large Computation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003eprobability chain rule\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct042023/","tags":null,"title":"SU-CS109 OCT042023"},{"categories":null,"contents":"Key Sequence Notation New Concepts mutually exclusive easy \u0026ldquo;or\u0026rdquo; independence \u0026ldquo;and\u0026rdquo; Important Results / Claims inclusion exclusion counting for independent events, AND questions are easier for mutually exclusive events, OR questions are easier if you didn\u0026rsquo;t get the condition you want, use DeMorgan\u0026rsquo;s Law to flip them over Questions Interesting Factoids eigenvalue\nQuality Investing\n","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmutually_exclusive/\"\u003emutually exclusive\u003c/a\u003e easy \u0026ldquo;or\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependence\u003c/a\u003e \u0026ldquo;and\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsum_rule_of_counting/\"\u003einclusion exclusion counting\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003efor \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e events, AND questions are easier\u003c/li\u003e\n\u003cli\u003efor \u003ca href=\"/posts/kbhmutually_exclusive/\"\u003emutually exclusive\u003c/a\u003e events, OR questions are easier\u003c/li\u003e\n\u003cli\u003eif you didn\u0026rsquo;t get the condition you want, use \u003ca href=\"/posts/kbhdemorgan_s_law/\"\u003eDeMorgan\u0026rsquo;s Law\u003c/a\u003e to flip them over\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhfundimental_investing/#quality-investing\"\u003eQuality Investing\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct062023/","tags":null,"title":"SU-CS109 OCT062023"},{"categories":null,"contents":"Key Sequence Notation New Concepts conditional independence random variable probability mass function expectation Important Results / Claims When you are using conditional probability, if you are consistently within your condition, you can effectively just leave it there conditioning on something CHANGES whether or not two things are independent Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_network/#conditional-independence\"\u003econditional independence\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_mass_function/\"\u003eprobability mass function\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWhen you are using \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e, if you are consistently within your condition, you can effectively just leave it there\u003c/li\u003e\n\u003cli\u003econditioning on something CHANGES whether or not two things are \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependent\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct092023/","tags":null,"title":"SU-CS109 OCT092023"},{"categories":null,"contents":"Key Sequence Notation New Concepts random variables of interest binomial distribution Bernoulli distribution Important Results / Claims New problem solving recipe recognize classic random variable define a random variable of the correct parameters use their PMF and solve Galton Board Questions Debugging probability: put in large values of \\(p\\) for things; if you end up with a number higher than \\(1\\) for the probability, you probably tried to add not mutually exclusive events\nInteresting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es of interest\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbinomial_distribution/\"\u003ebinomial distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbernoulli_random_variable/\"\u003eBernoulli distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eNew problem solving recipe\n\u003cul\u003e\n\u003cli\u003erecognize classic \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003edefine a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e of the correct parameters\u003c/li\u003e\n\u003cli\u003euse their \u003ca href=\"/posts/kbhprobability_mass_function/\"\u003ePMF\u003c/a\u003e and solve\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgalton_board/\"\u003eGalton Board\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003cp\u003eDebugging probability: put in large values of \\(p\\) for things; if you end up with a number higher than \\(1\\) for the probability, you probably tried to add not \u003ca href=\"/posts/kbhmutually_exclusive/\"\u003emutually exclusive\u003c/a\u003e events\u003c/p\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct112023/","tags":null,"title":"SU-CS109 OCT112023"},{"categories":null,"contents":"Key Sequence Notation New Concepts e variance poisson distribution Important Results / Claims probability of k in x time Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhe/\"\u003ee\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvariance/\"\u003evariance\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_of_k_in_x_time/\"\u003epoisson distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_of_k_in_x_time/\"\u003eprobability of k in x time\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_109_oct132023/","tags":null,"title":"SU-CS109 OCT132023"},{"categories":null,"contents":"Key Sequence Notation New Concepts support more discrete random variables! geometric random variable negative binomial distribution hypergeometric: drawing without replacement probability density function continuous random variables uniform distribution exponential distribution cumulative distribution function Important Results / Claims PDFs are derivatives of probability Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsupport/\"\u003esupport\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003emore discrete \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es!\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgeometric_random_variable/\"\u003egeometric random variable\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnegative_binomial_distribution/\"\u003enegative binomial distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ehypergeometric: drawing without replacement\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003eprobability density function\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#uniform-distribution\"\u003euniform distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexponential_distribution/\"\u003eexponential distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#cumulative-distribution-function\"\u003ecumulative distribution function\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003ePDF\u003c/a\u003es are derivatives of \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct162023/","tags":null,"title":"SU-CS109 OCT162023"},{"categories":null,"contents":"Key Sequence Notation New Concepts Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct182023/","tags":null,"title":"SU-CS109 OCT182023"},{"categories":null,"contents":"Key Sequence Notation New Concepts multinomial coefficient joint probability distribution probablistic models multinomial distribution text Naive Bayes Important Results / Claims for Naive Bayes against some multinomial distribution, its often a good time to find the ratios of the results because the combination in the beginning cancels out if you are analyzing the same samples upon different prior probabilities \\begin{equation} \\frac{P(H|D)}{P(M|D)} = \\frac{\\prod_{i} h_{i}^{c_{i}}}{\\prod_{i} m_{i}^{c_{i}}} \\end{equation}\nQuestions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultinomial_coefficient/\"\u003emultinomial coefficient\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobablistic_model/\"\u003eprobablistic models\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobablistic_model/#multinomial-distribution\"\u003emultinomial distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003etext \u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003efor \u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e against some \u003ca href=\"/posts/kbhprobablistic_model/#multinomial-distribution\"\u003emultinomial distribution\u003c/a\u003e, its often a good time to find the ratios of the results because the combination in the beginning cancels out if you are analyzing the same samples upon different prior probabilities\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{P(H|D)}{P(M|D)} = \\frac{\\prod_{i} h_{i}^{c_{i}}}{\\prod_{i} m_{i}^{c_{i}}}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct202023/","tags":null,"title":"SU-CS109 OCT202023"},{"categories":null,"contents":"Key Sequence Notation New Concepts relative probability probability density function inference Important Results / Claims getting exact values from PDF Bayes Normalization Constant Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrelative_probability/\"\u003erelative probability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#probability-density-function\"\u003eprobability density function\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#getting-exact-values-from-pdf--kbhprobability-distributions-dot-md\"\u003egetting exact values from PDF\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbayes_normalization_constant/\"\u003eBayes Normalization Constant\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct232023/","tags":null,"title":"SU-CS109 OCT232023"},{"categories":null,"contents":"Key Sequence Notation New Concepts Bayes Theorem Over Random Variable sigmoid Important Results / Claims not all beliefs are able to be written down as a function it is ok to discretize things Item Response Theory Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbayes_theorem_over_random_variable/\"\u003eBayes Theorem Over Random Variable\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003enot all beliefs are able to be written down as a \u003ca href=\"/posts/kbhfunction/\"\u003efunction\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eit is ok to discretize things\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhitem_response_theory/\"\u003eItem Response Theory\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct252023/","tags":null,"title":"SU-CS109 OCT252023"},{"categories":null,"contents":"Key Sequence Notation New Concepts General Inference (i.e. inference in general) Important Results / Claims Methods of Compressing the Parameters of a Distribution Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgeneral_inference/\"\u003eGeneral Inference\u003c/a\u003e (i.e. \u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e in general)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#methods-of-compressing-the-parameters-of-a-distribution\"\u003eMethods of Compressing the Parameters of a Distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_oct272023/","tags":null,"title":"SU-CS109 OCT272023"},{"categories":null,"contents":"Key Sequence Notation New Concepts counting Important Results / Claims thinking by steps step rule of counting aka product rule of counting inclusion exclusion counting (\u0026ldquo;counting with or\u0026rdquo;) Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcounting/\"\u003ecounting\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ethinking by steps\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcounting/#step-rule-of-counting\"\u003estep rule of counting\u003c/a\u003e aka \u003ca href=\"/posts/kbhcounting/#step-rule-of-counting\"\u003eproduct rule of counting\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsum_rule_of_counting/\"\u003einclusion exclusion counting (\u0026ldquo;counting with or\u0026rdquo;)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_sep272023/","tags":null,"title":"SU-CS109 SEP272023"},{"categories":null,"contents":"Key Sequence Notation New Concepts permutation combination grouping Important Results / Claims permutation with indistinct objects grouping with entirely indistinct objects (divider method) Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpermutation/\"\u003epermutation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcombination/\"\u003ecombination\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgrouping/\"\u003egrouping\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpermutation/#permutation-with-indistinct-objects\"\u003epermutation with indistinct objects\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgrouping/#grouping-with-entirely-indistinct-objects\"\u003egrouping with entirely indistinct objects\u003c/a\u003e (\u003ca href=\"/posts/kbhgrouping/#grouping-with-entirely-indistinct-objects\"\u003edivider method\u003c/a\u003e)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs109_sep292023/","tags":null,"title":"SU-CS109 SEP292023"},{"categories":null,"contents":"FS main challenges naming: how do users name files reliability: surviving OS crashes and hardware failures protection: isolation between users, controlled sharing disk space management: minimize seeks, sharing space (\u0026ldquo;preventing fragmentation\u0026rdquo;) seeks to wait until the platter go under the arm and read.\ninternal v. external fragmentation internal: a file can be no less than a single block of text. external: no space is available even if the space in aggregate is available main designs contiguous allocation IBM used this? puts files and meta-data together + implement an explicit free list allocator. benefit: simple; drawback: 1) external fragmentation 2) hard to grow files\nlinked files in every block, store the location of the next block; don\u0026rsquo;t store files continuously\u0026mdash;instead, store a pointer to where the next block of the file is. benefit: solves fragmentation and file growth; drawback: 1) huge seek time 2) random access from the middle is hard (i.e. O(n))\nWindows FAT linked files, but cached the file links in memory when using it. benefits: same as linked files, and a bit faster drawback: data still fragmented and now you have a whole ass table to deal with! but its at least faster\nFile Payload Data Kind of what we do\u0026mdash;instead of storing file data in order OR using links, store the file BLOCK information contiguously.\nmulti-level index: store all block numbers for a given file down a tree (EXT2/3, Unix V6, NTFS)\nUnix V6 + MLI Sector Size Block Size Inode Size Inodes Per Block Address Type 512 512 32 16 Short, 2 bytes block const size_t INODE_PER_BLOCK = SECTOR_SIZE / sizeof(struct inode); struct inode inodes[INODE_PER_BLOCK]; char buf[SECTOR_SIZE]; readsector(2, \u0026amp;inodes); // recall this is the first 16 inodes: sec0 is fs info, sec1 is supernode printf(\u0026#34;addr: %d\\n\u0026#34;, inodes[0].i_add); ino struct inode { uint16_t i_addr[8]; uint16_t i_mode[8]; uint16_t file_size; } inodes have two modes\nif ((inode.i_mode \u0026amp; ILARG) != 0) == // node is in \u0026#34;large mode\u0026#34; in small mode, the inode stores in i_addr the block numbers to the data in large mode, the inode stores in the first seven numbers in i_addr block numbers to blocks that contain block numbers (512/2 = 256 block numbers, which are chars); the eighth number points to doubly indirect blocks that contain block numbers that point to other blocks The inode table for each file only contains space to point to \\(8\\) block. 1 block = 1 sector on Unix v6. inodes are usualy 32 bytes big, and 1 block = 1 sector = 512 bytes. usually this packs 16 inodes per block\nin large mode, this system can store \\((7+256) \\cdot (256 \\cdot 512) = 34MB\\), which is as large as the file system itself, which means we are fine now.\nsizing\nsmall: \\(512\\) bytes per block, and \\(8\\) block storable, so \\(8 \\cdot 512 = 4096\\) bytes large: \\(512\\) bytes per block pointed to by i_addr, each containing \\(\\frac{512}{2} = 256\\) block numbers. The first seven in total would therefore address \\(256 \\times 7 = 1792\\) blocks of memory. The last eight would each address \\(256 \\cdot 256 = 65536\\) blocks of memory. In total, that addresses \\(1792+65536 = 67328\\) blocks of memory. Finally, that means we can address \\(67328 \\cdot 512 = 34471936\\) bytes. dir struct dirent { uint16_t d_inumber; // inode number of this file char d_name[14]; // the name; *NOT NECESSARILY NULL TERMINATED* } THE NAME MAY NOT BE NULL TERMINATED to cram max things. You have to use strncmp\nstrcmp/strncmp: stops comparing after \\(n\\) characters; \u0026lt;0 if str1 comes before str2 alphabetically; \u0026gt;0 if str1 comes after str2; 0 if equal\nStart at the root directory, /. We want to go to the root directory, and find the entry named /classes/, and then, in that directory, find the file. etc. Traverse recursively. Directory could have metadata.\nA directory is basically just a file whose payload is a list of dirent.\nThe inode tells you whether something is a file or a directory. They can be small or large, as usual. Root directory always have inode number 1; 0 is reserved to NULL.\nfile Recall that read doesn\u0026rsquo;t read the whole thing. So, we it in parts.\nvoid copyContents(int sourceFD, int destinationFD) { char buffer[INCREMENT]; while (true) { ssize_t bytesRead = read(sourceFD, buffer, sizeof(buffer)); if (bytesRead == 0) break; size_t bytesWritten = 0; while (bytesWritten \u0026lt; bytesRead) { ssize_t count = write(destinationFD, buffer + bytesWritten, bytesRead - bytesWritten); bytesWritten += count; } } } int open(const char *pathname, int flags); Flags are a bitwise OR operations: you have to open with O_RDONLY (read only), O_WRONLY (write only), or O_RDWR (both read and write). This returns \\(-1\\) if the reading fails.\nOther flags:\nO_TRUNC (truncate file) O_CREAT (creating a file if not exist), which will require a mode_t mode parameter to set the permission O_EXCL (file must not exist) open file table open-file table is system wide: mentioning what mode an opening is + the cursor to the open file + the number of file-descriptors pointing to it to a refcount.\nwhy is refcount ever higher than 1? because forks.\nBlock Cache We will use part of the main memory to retain recently-accessed disk blocks. This is NOT at the granularity of individual files.\nLeast Recently Used (LRU) Cache When you insert a new element into the cache, kick out the element on the cache that hasn\u0026rsquo;t been touched in the longest time.\nBlock Cache Modification we can either write asap, or delay.\nwrite asap: safer: less risk of data loss, written as soon as possible; slow: program must wait to proceed until disk I/O completes\nwrite delay: dangerous: may loose data after crash; efficient: memory writes is faster\nCrash Recovery main challenges data loss: crashes can happen, and not all data could be saved to disk inconsistency: crashes can happen in the middle of operations Ideally, filesystem operations should be atomic. Every operation should happen or not happen at all\u0026mdash;but not halfway.\nfsck Check whether or not there is a clean shutdown: setting a disk flag on clean shutdown; so if the flag isn\u0026rsquo;t set there isn\u0026rsquo;t a clean shutdown. If it wasn\u0026rsquo;t a clean shutdown, identify inconsistencies Scans meta data (inodes, indirect blocks, free list, directory blocks) and handle any of the following situations\u0026mdash; block in an inode and in free list; solution: pull the block off of free list block is a part of two inodes; solution: give to newest, random, copy, remove (bad idea) inode claims one dirent refers to it, but there are no such dirent; solution: put in lost and found limitations takes long because can\u0026rsquo;t restart until done doesn\u0026rsquo;t prevent loss of actual file info filesystem may still be unusable (core files moved to lost+found) a block could migrate during recovery, leaking info ordered writes Always initialize the TARGET before initializing the REFERENCE Initialize inode before initalize directory entry to it Never reuse a resource before NULLIFYING all existing REFERENCES Remove the inode reference before putting a block on the free list Never clear the LAST REFERENCE to a live resource before setting a NEW REFERENCE (\u0026ldquo;its better to have 2 copies instead of none\u0026rdquo;) Make the new directory entry before get rid of the old one limitations performance: we need to do operations synchronously if we really want to do caching async, we can track dependencies circular dependencies are possible leak: it could leak resources (reference nullification happens but resource not added) We can run fsck in the background journaling journaling keeps a paper trail of disk appertains in the event of a crash. We have an append-only log on disk that stores disk operations.\nbefore performing an operation, record its info in the log and write that to disk The log will always record what\u0026rsquo;s happening ahead. The actual block updates can eventually be carried out in any order.\nwhat do we log? we only log metadata changes (inodes, moving stuff around, etc.) payload operations are not saved structure We typically have a LSN: log serial number, operations, and metadata.\nLogPatch: changes something LogBlockFree: mark something as free LogBlockAlloc: mark something as allocated, optionally zeroing data if its a data block (DO NOT zero if its a dirent or ino) [offset 335050] LSN 18384030 operation = \u0026#34;LogBlockAlloc\u0026#34; blockno = 1027 zero_on_replay = 0 [offset 23232] LSN N operation = \u0026#34;LogPatch\u0026#34; blockno = 8 offset = 137 bytes = 0.04 inode = 52 limitations and fixes multiple log entries: each atomic operation will be wrapped into a unit transaction to make idempotent checkpoints: we can truncate the log occasionally at a checkpoint\u0026mdash;when it is no longer needed where do we start replaying: log entries should be idempotent\u0026mdash;doing something multiple times should have the same effect of doing them once. Logs cannot have external dependencies log entries may take time: when finally we write stuff to disk, we write the logs first. So no problems there. tradeoffs durability - the data needs to be safe (which is slow, and may require manual crash recovery (sans cache, etc.)) performance - it needs to be fast (which may mean less error checking) consistency - the filesystem needs to be uniform (which means that we need to be slower and we may drop data in favor of previous checkpoints that worked) MP Multiprocessing: processes, PIDs, fork, execution order, copy on write, waitpid, zombie processes, execvp, pipes and pipe / pipe2, I/O redirection\nforking pid_t child_pid = fork(); fork returns the child PID if parent; returns 0 if child.\nThe arguments list have to BEGIN WITH EXECUTABLE NAME and END WITH NULL.\nchar *args[] = { \u0026#34;/bin/ls\u0026#34;, \u0026#34;-l\u0026#34;, \u0026#34;~/hewo\u0026#34;, NULL }; execvp(args[0], args); execvp LEAVES THE FILE DESCRIPTOR TABLE.\nevery fork has to be waited on by waitpid:\npid_t waitpid(pid_t pid, int *status, int options); pid status: pointer to store return about the child options (0 for now) if the PID has died, this returns immediately. Otherwise, this blocks.\nthe status int is a bitmap with a bunch of stuff, which we can check with a series of macros\nint status; int pid_act = waitpid(pid, \u0026amp;status, 0); if (WIFEXISTED(status)) { // child normal exit int statuscode = WEXITSTATUS(status); } else { // abnormal exist } the returned PID is the PID that got waited on; if the input PID is -1, it will wayt on any process\nfork mechanics The act of copying stack and heap sounds really really expensive. So\u0026hellip;. Whapppens?\nThe child will map the parent\u0026rsquo;s memory addresses to different physical addresses than for the parent. The copies are LAZY\u0026mdash;if the child writes to an area in memory, its virtual address are mapped to different addresses. If no writes by the child happen, the virtual address are mapped to the same address.\nduring file reading, the file descriptors gets cloned, the underlying open file table doesn\u0026rsquo;t close.\npipes int pipes[2]; // create the pipes int ret = pipe(pipes); /* int ret = pipe2(pipes, O_CLOEXEC); */ // an so int read_from_here = ret[0]; int write_to_here = ret[1]; // i.e. ret[1] writes to =\u0026gt; ret[0] read // fork! pid_t pid_p = fork(); if(pid_p == 0) { // child subroutine // because child is READING, and not READINg // we want to close the write close(write_to_here); // we want to then make a buffer char buf[num_bytes]; // if the child reads before the parents write // it will block until some data is available // if the write ends are closed globally, read // will also stop. read(read_from_here, buffer, sizeof(buffer)); close(read_from_here); return 0; } // parent subroutine // because parent is WRITING and not READING // we don\u0026#39;t want the read to block, we will // close the parent immediately. close(read_from_here); // write some data write(write_to_here, \u0026#34;msg\u0026#34;, num_bytes); // close now we are done writing close(write_to_here); // clean up child waitpid(pid_p, NULL, 0); Recall that dup2 exists:\ndup2(fds[0], STDIN_FILENO); close(fds[0]); it will close the second file descriptor, if already in use, before binding the first file descriptor to it.\nshell while (true) { char *command = { \u0026#34;ls\u0026#34;, \u0026#34;things\u0026#34; }; pid_t child_pid = fork(); if (!child_pid) { // this is the child; execvp will check PATH for you execvp(command.argv[0], command.argv); // if we got here, the PID didn\u0026#39;t do well throw STSHException(string(command.argv[0])+\u0026#34;: not found or didn\u0026#39;t succeed to fork.\u0026#34;); } waitpid(child_pid); // do cleanup } MT // now the thread can execute at any time: once a thread is made, it will run in any order thread myThread(function_to_run, arg1, arg2, ...); // threads run AS SOON AS SPAWENED: so We can wait for a thread:\nmyThread.join() You can also start a bunch on a loop:\nthread threads[3]; for (thread\u0026amp; cf : threads) { cf = thread(func, ...); } passing by reference threading doesn\u0026rsquo;t know the type of arguments being passed into a function; this is especially prevalent when passing by reference.\nstatic void mythingref(int \u0026amp;pbr); thread(myfunc, ref(myint)); Remember: ref will SHARE MEMORY, and you have no control over when the thread runs. So once a pointer is passed all bets are off in terms of what values things take on.\nmutex it would be nice if a critical section can only be executed once; a mutex can be shared across threads, but can only be \u0026ldquo;owned\u0026rdquo; by a single thread at once.\nmutex tmp; tmp.lock(); tmp.unlock(); importantly, if multiple threads are waiting on a mutex, the next thread that\u0026rsquo;s going to get the mutex\nwhen there are multiple threads writing to a value when there is a thread writing and one or more threads reading if you are no writes, you don\u0026rsquo;t need a mutex int locked = 0; Queue blocked_queue; void Lock::Lock() { // disable interrupts: otherwise multiple threads // could come and lock the mutex (such as between // the locked check and lock =1 IntrGuard grd; if (!locked) { // if our thread is not locked, just lock it locked = 1; } else { // if our thread is locked, we need to prevent our current // thread from going to the ready queue, and push it to the current thread blocked_queue.push(CURRENT_THREAD); // remember this isn\u0026#39;t an issue even if IntrGuard // didn\u0026#39;t yet go out of scope; because it will either // land on a context_switch which will enable interrupts for you // or land on the beginning of a threadfunc helper, which // is also going to enable interrupts for you // nicely, the interrupts are here are *off* as required because switching // to another thread always will result in reenabling (either by new thread, // by timer handler, or by IntrGuard) mark_block_and_call_schedule(CURRENT_THREAD); } } void Lock::Unlock() { // disable interrupts: otherwise multiple threads // could come and lock the mutex (such as between // the locked check and lock =1 IntrGuard grd; // if our thread is locked and nobody is waiting for it if (q.empty()) { locked = 0; } else { unblock_thread(q.pop()); // we do not switch to the unblocked thread, just add it to the // ready queue. we are entrusting the scheduler to start this thread // whenever we feel right } } CV condition_variable_any permitsCV; // ... thread(ref(permitsCV)) Identify the ISOLATED event to notify; notify absolutely only when needed. To notify:\npermitsCV.notify_all(); To listen:\npermits.lock(); while (permits == 0) { permitsCV.wait(permitsLock); } permits--; permitsLock.unlock(); the condition variable will\u0026hellip;\nstart sleeping FIRST unlock a lock FOR US AFTER the sleeping starts after waiting ends, tries to reaquire lock blocks until we have the lock again unique_lock void my_scope(mutex \u0026amp;mut, condition_variable_any \u0026amp;cv) { unique_lock\u0026lt;mutex\u0026gt; lck(mut); // do stuff, you can even pass it to a condition variable! cv.wait(lck); } Thread States and Contexts Recall that threads are the unit of execution. The process control block keeps track of the *stack pointer* of the thread %rsp, which means if a thread is put to sleep the state can be stored somewhere on the stack.\nThree states:\nrunning (could switch to ready/blocked) ready able to run, but not on CPU yet (could switch to running only) blocked eating for something (could switch to ready/running) trap a trap is a user request for OS attention explicitly from the user thread, swapping the user process off the CPU.\nsystem calls errors page fault (memory errors) interrupt a interrupt takes place outside the current thread, it forces the OS\u0026rsquo; attention even if the user thread isn\u0026rsquo;t asking for it\ncharacter typed at keyboard completion of a disk operations a hardware timer that fires an interrupt what if a timer goes off during an interrupt interrupts are disabled during interrupt handling, otherwise, this causes an infinite loop.\npreemption We use interrupts to implement preemption, \u0026ldquo;preempting\u0026rdquo; threads in order to swap on another thread to CPU. This enables scheduling to happen.\n// brand new thread void interrupt_handler() { /* disables interupts, automatically by timer handler */ // future spawns start here context_switch(...); /* enables interupts, automatically by timer handler */ } void threadfunc_wrapper() { // manually enable interrupts before first run intr_enable(true); // start thread\u0026#39;s actual business threadfunc(); } Scheduling main challenges minimize time to a useful result\u0026mdash;(assumption: a \u0026ldquo;useful result\u0026rdquo; = a thread blocking or completes) using resources efficiently (keeping cores/disks busy) fairness (multiple users / many jobs for one users) We can measure 1) based on \u0026ldquo;average completion time\u0026rdquo;: tracking the average time elapsed for a particular queue based on the start of scheduling that queue to the time when each thread ends.\nmain designs first-come first-serve keep all threads in ready in a queue run the first thread on the front until it finishes/it blocks for however long repeat Problem: a thread can run away with the entire system, accidentally, through infinite loops\nround robin keep all threads in a round robin each thread can run for a set amount of time called a time slice (10ms or so) if a thread terminates before that time, great; if a thread does not, we swap it off and put it to the end of the round robin Problem: what\u0026rsquo;s a good time slice?\ntoo small: the overhead of context switching is higher than the overhead of running the program too large: threads can monopolize cores, can\u0026rsquo;t handle user input, etc. Linux uses 4ms. Generally, you want 5-10ms range.\ngold: shortest remaining processing time Run first the thread in queue that will finish the most quickly and run it fully to competition.\nIt gives preference to those that need it the least (i.e. because it runs the smalest one); of course THIS IS not implementable without oracle time guess.\nOur goal, then is to get as close as possible to the performance of SRPT.\nProblem:\nwe don\u0026rsquo;t know which one will finish the most quickly if we have many threads and one long-running thread, the long running thread won\u0026rsquo;t be able to run ever priority based scheduling Key idea: behavior tends to be consistent in a thread. We build multiple priority queues to address this.\npriority based scheduling is an approximation of SRPT, using the past performance of the thread to estimate the running time of the thread. Over time, threads will move between priority queues, and we run the topmost thread from the highest priority queue\nimplement based on time slice usage a thread always enters in the highest priority queue\nif the thread uses all of its time slice and didn\u0026rsquo;t exit, bump them down a priority queue if a thread blocked before it used all of its time slice, bump them up a priority queue implement based on aggregate time used: fixing neglect a thread has a number for \u0026ldquo;how much time did you use on the CPU recently\u0026rdquo;? The priories are sorted by that value, and the smallest time use will be ran.\ncontext switch (in asm) push all callee saved registers except %rsp into the bottom of the old thread\u0026rsquo;s stack store the stack pointer %rsp into the process control block for that process corresponding to thread read the new thread\u0026rsquo;s stack pointer from the process control block, and load that into %rsp (in asm) pop all callee saved registers stored on the bottom of our new stack back onto the registers To deal with new threads, we create a fake freeze frame on the stack for that new thread which looks like you are just about to call the thread function, and calls context_switch normally.\nVirtual Memory main challenges multitasking: multiple processes should be able to use memory transparency: no process need to know that memory is shared; each process should be able to run regardless of the number/locations of processes isolation: processes shouldn\u0026rsquo;t be able to corrupt other processes\u0026rsquo; memory efficiency: shouldn\u0026rsquo;t be degraded by sharing crappy designs with no DMT single tasking: assume there\u0026rsquo;s one process 1) no isolation 2) no multitasking 3) bad fragmentation load time relocation: move the entire program somewhere on load time 1) no isolation 2) can\u0026rsquo;t grow memory after load 3) external fragmentation after frees main designs base and bound load time relocation + virtual memory\nassign a location in physical memory, call the base; during translation, we just add every virtual address by the base we can cap the virtual address space for each process by a bound, we can raise a bus error/segfault if it goes above the highest allowable last possible address: is (bound - 1)+base\ncompare virtual address to bound, trap and raise if \u0026gt;= bound then, return virtual address + base tradeoffs: good - 1) inexpensive 2) doesn\u0026rsquo;t need more space 3) ritualized; bad - 1) can\u0026rsquo;t really move either (i.e. need to allocate) 2) fragmentation 3) no read only memory\nmultiple segments break stack, heap, etc. into multiple segments; then do base and bound for each segment\ntradeoffs: good - 1) you can now recycle segments 2) you can not map the middle 3) you can grow the heap (but not the stack, because it moves downwards); bad - 1) you need to decide segment size and location ahead of time\ngoal design paging: fixed segment size, and just split each thing.\nwe map each page independently, and keep the offset. If a page is unused, internal fragmentation but not too bad. The stack can now grow downwards: because if it reaches into lower page numbers we can just map that page somewhere too.\nFor instance, typically page sizes are 4kb\nPage Size Offset Number Digits 4096 bytes (16^3) 3 then the rest of the address would just be the page number.\nIntel\u0026rsquo;s implementation Virtual Addresses\nUnused (16 bits) Virtual page number (36 bits) Offset (12 bits) Physical Addresses\nPage number (40 bits) Offset (12 bits) translation chop off page number and offset translate the page number concat the two together implementation Index Physical Address Writable Present/Mapped? Last Access Kernel Dirty 0 0x2023 1 0 0 0 0 1 0x0023 1 1 1 0 0 Swap pick a page to kick out write kicked page to disk mark the old page entry as not present give the physical address to the new virtual page choosing what to swap randomly! (works apparently kinda fine) First-in-first out (fair, bust bad \u0026mdash; throw out the page in memory longest; but what if its very used) least recently used - clock algorithm clock algorithm rotate through all pages until we find one that hasn\u0026rsquo;t been referenced since last time\nwe add a reference bit to the page table\u0026mdash;its set to \\(1\\) if the program wrote or read each page, otherwise its set to \\(0\\) when page kick is needed, clock algorithm starts where it left off before and scan through physical pages each page it checks with reference bit 1, it sets the reference bit as 0 if it checked a page and its reference bit is 0, we kick it out (because we\u0026rsquo;ve gone through two ) We now save the position of the hand\u0026mdash;we want to begin checking with the page that hasn\u0026rsquo;t been checked for the longest time. If every page has a reference bit is one, running this algorithm doesn\u0026rsquo;t break because it would set its immediately next bit of memory.\npage replacement we don\u0026rsquo;t use per process replacement because we need to allocate max pages per process we use global replacement to maximise usage demand fetching most modern OSes start with no pages loaded\u0026mdash;load pages only when referenced; this is tempered by the type of page that\u0026rsquo;s needed:\nPage Type Need Content on First Load Save to Swap (\u0026ldquo;Swap?\u0026rdquo;) code yes no (read from exe) data yes yes stack/heap no yes We only write to disk if its dirty.\nMulticore + Flash Scheduling Multi-Core CPUs main approaches one queue for everyone 1) need to figure out what is the priory of things on that queue (for preemption) one queue per core: 1) where do we put a thread? 2) how do we move between cores? One Ready Queue per Core where do we put a given thread? moving core between threads is expensive Big tension:\nWork Stealing: if one core is free (even if there is things in the ready queue), check other cores\u0026rsquo; ready queues and try to do thread communism. Core Affinity ideally, because moving threads between cores is expensive (need to rebuild cache), we keep each thread running on the same core. Gang Scheduling When you have a thread you are trying to schedule, try to see if there are other threads from the same process in the ready queue and schedule all of them on various cores.\nLocking Multi-Core CPUs disabling interrupts are not enough\nhardware atomic operation exchange + busy waiting, which reads, returns, and swaps the value of some memory in a single atomic operation AND which is never ran in parallel; it returns the previous value of the memory before it was set:\nclass Lock { std::automic\u0026lt;int\u0026gt; sync(0); } void Lock::lock() { while (sync.exchange(1)) {} // we are now the only one using it // do work .... sync = 0; } The exchange function returns the old value.\nFlash Storage writing You have two operation.\nerase: You can set ALL SEGMENT of an \u0026ldquo;erase unit\u0026rdquo; to \\(1\\) (\u0026ldquo;erase unit\u0026rdquo; size is usually 256k) write: You can modify one \u0026ldquo;page\u0026rdquo; at a time (which is smaller than a erase unit)\u0026mdash;but you can ONLY set individual bits in the page into 0 (\u0026ldquo;page\u0026rdquo; size is usually 512 bytes or 4k bytes) wear-out wear leveling: make sure that the drive wears out at roughly the same rate as other parts of the drive. Moving commonly written data (\u0026ldquo;hot\u0026rdquo; data) around\nFTL limitations no hardware access (can\u0026rsquo;t optimize around flash storage) sacrifices performances for performance wasts capacity (to look like hard drive) many layers Ethics trusting software is the task of extending your own AGENCY to a piece of software: \u0026ldquo;agential gullibility\u0026rdquo;.\npathways to trust trust by assumption: 1) trust absent any clues to warrent it due to timing 2) trust because there is imminent danger trust by inference: trust based on information you had before (brands, affiliation, performance) trust by substitution: having a backup plan accountability accountability is in a chain\nhardware designer (intel) OS developer (iOS, ec.) app developer users stakeholder direct stakeholders (people who are operating, technicians, etc.) indirect stakeholders: patients purchase = long-term support \u0026mdash;- what do you do to get it fixed/repaired.\nscales of trust scale of impact a bug in an OS can be tremendously bad \u0026ldquo;root access\u0026rdquo; \u0026mdash; privileged aces scale of longevity people maybe on very very old OS it requires keeping older OSes secure against modern technologies ","html":"\u003ch2 id=\"fs\"\u003eFS\u003c/h2\u003e\n\u003ch3 id=\"main-challenges\"\u003emain challenges\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003enaming\u003c/strong\u003e: how do users name files\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ereliability\u003c/strong\u003e: surviving OS crashes and hardware failures\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eprotection\u003c/strong\u003e: isolation between users, controlled sharing\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003edisk space management\u003c/strong\u003e: minimize seeks, sharing space (\u0026ldquo;preventing fragmentation\u0026rdquo;)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"seeks\"\u003eseeks\u003c/h4\u003e\n\u003cp\u003eto wait until the platter go under the arm and read.\u003c/p\u003e\n\u003ch4 id=\"internal-v-dot-external-fragmentation\"\u003einternal v. external fragmentation\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003einternal\u003c/strong\u003e: a file can be no less than a single block of text.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eexternal\u003c/strong\u003e: no space is available even if the space in aggregate is available\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"main-designs\"\u003emain designs\u003c/h3\u003e\n\u003ch4 id=\"contiguous-allocation\"\u003econtiguous allocation\u003c/h4\u003e\n\u003cp\u003eIBM used this? puts files and meta-data together + implement an explicit free list allocator. \u003cstrong\u003ebenefit\u003c/strong\u003e: simple; \u003cstrong\u003edrawback\u003c/strong\u003e: 1) external fragmentation 2) hard to grow files\u003c/p\u003e\n\u003ch4 id=\"linked-files\"\u003elinked files\u003c/h4\u003e\n\u003cp\u003ein every block, store the location of the next block; don\u0026rsquo;t store files continuously\u0026mdash;instead, store a pointer to where the next block of the file is. \u003cstrong\u003ebenefit\u003c/strong\u003e: solves fragmentation and file growth; \u003cstrong\u003edrawback\u003c/strong\u003e: 1) huge seek time 2) random access from the middle is hard (i.e. O(n))\u003c/p\u003e\n\u003ch4 id=\"windows-fat\"\u003eWindows FAT\u003c/h4\u003e\n\u003cp\u003elinked files, but cached the file links in memory when using it. \u003cstrong\u003ebenefits\u003c/strong\u003e: same as linked files, and a bit faster \u003cstrong\u003edrawback\u003c/strong\u003e: data \u003cem\u003estill\u003c/em\u003e fragmented and now you have a whole ass table to deal with! but its at least faster\u003c/p\u003e\n\u003ch4 id=\"file-payload-data\"\u003eFile Payload Data\u003c/h4\u003e\n\u003cp\u003eKind of what we do\u0026mdash;instead of storing file data in order OR using links, store the file BLOCK information contiguously.\u003c/p\u003e\n\u003cp\u003e\u003cem\u003emulti-level index\u003c/em\u003e: store all block numbers for a given file down a tree (EXT2/3, Unix V6, NTFS)\u003c/p\u003e\n\u003ch3 id=\"unix-v6-plus-mli\"\u003eUnix V6 + MLI\u003c/h3\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eSector Size\u003c/th\u003e\n\u003cth\u003eBlock Size\u003c/th\u003e\n\u003cth\u003eInode Size\u003c/th\u003e\n\u003cth\u003eInodes Per Block\u003c/th\u003e\n\u003cth\u003eAddress Type\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e512\u003c/td\u003e\n\u003ctd\u003e512\u003c/td\u003e\n\u003ctd\u003e32\u003c/td\u003e\n\u003ctd\u003e16\u003c/td\u003e\n\u003ctd\u003eShort, 2 bytes\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch4 id=\"block\"\u003eblock\u003c/h4\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003econst\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eINODE_PER_BLOCK\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSECTOR_SIZE\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esizeof\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003estruct\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estruct\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einode\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einodes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eINODE_PER_BLOCK\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebuf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSECTOR_SIZE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003ereadsector\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einodes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// recall this is the first 16 inodes: sec0 is fs info, sec1 is supernode\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eprintf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;addr: %d\u003c/span\u003e\u003cspan style=\"color:#8045ff\"\u003e\\n\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einodes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e].\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei_add\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch4 id=\"ino\"\u003eino\u003c/h4\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estruct\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einode\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003euint16_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei_addr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e8\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003euint16_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei_mode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e8\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003euint16_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efile_size\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003es have two modes\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei_mode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eILARG\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e!=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// node is in \u0026#34;large mode\u0026#34;\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cul\u003e\n\u003cli\u003ein \u003cstrong\u003esmall mode\u003c/strong\u003e, the \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e stores in \u003ccode\u003ei_addr\u003c/code\u003e the block numbers to the data\u003c/li\u003e\n\u003cli\u003ein \u003cstrong\u003elarge mode\u003c/strong\u003e, the \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e stores in the \u003cstrong\u003efirst seven\u003c/strong\u003e numbers in \u003ccode\u003ei_addr\u003c/code\u003e block numbers to \u003cem\u003eblocks that contain block numbers\u003c/em\u003e (512/2 = 256 block numbers, which are chars); the \u003cstrong\u003eeighth number\u003c/strong\u003e points to \u003cstrong\u003edoubly indirect\u003c/strong\u003e \u003cem\u003eblocks that contain block numbers that point to other blocks\u003c/em\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e table for each file only contains space to point to \\(8\\) block. 1 block = 1 sector on Unix v6. \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003es are usualy 32 bytes big, and 1 block = 1 sector = 512 bytes. usually this packs 16 inodes per block\u003c/p\u003e\n\u003cp\u003ein \u003cstrong\u003elarge mode\u003c/strong\u003e, this system can store \\((7+256) \\cdot (256 \\cdot 512) = 34MB\\), which is as large as the file system itself, which means we are fine now.\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003esizing\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003esmall: \\(512\\) bytes per block, and \\(8\\) block storable, so \\(8 \\cdot 512 = 4096\\) bytes\u003c/li\u003e\n\u003cli\u003elarge: \\(512\\) bytes per block pointed to by i_addr, each containing \\(\\frac{512}{2} = 256\\) block numbers. The first seven in total would therefore address \\(256 \\times 7 = 1792\\) blocks of memory. The last eight would each address \\(256 \\cdot 256 = 65536\\) blocks of memory. In total, that addresses \\(1792+65536 = 67328\\) blocks of memory. Finally, that means we can address \\(67328 \\cdot 512 = 34471936\\) bytes.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"dir\"\u003edir\u003c/h4\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estruct\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edirent\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003euint16_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed_inumber\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// inode number of this file\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed_name\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e14\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// the name; *NOT NECESSARILY NULL TERMINATED*\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003cstrong\u003eTHE NAME MAY NOT BE NULL TERMINATED\u003c/strong\u003e to cram max things. You have to use \u003cstrong\u003estrncmp\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003estrcmp/strncmp\u003c/strong\u003e: stops comparing after \\(n\\) characters; \u0026lt;0 if str1 comes before str2 alphabetically; \u0026gt;0 if str1 comes after str2; 0 if equal\u003c/p\u003e\n\u003cp\u003eStart at the root directory, \u003ccode\u003e/\u003c/code\u003e. We want to go to the root directory, and find the entry named \u003ccode\u003e/classes/\u003c/code\u003e, and then, in that directory, find the file. etc. Traverse recursively. Directory could have metadata.\u003c/p\u003e\n\u003cp\u003eA directory is basically just a \u003cstrong\u003efile whose payload is a list of \u003ccode\u003edirent\u003c/code\u003e\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eThe inode tells you whether something is a file or a directory. They can be small or large, as usual. Root directory always have inode number \u003ccode\u003e1\u003c/code\u003e; \u003ccode\u003e0\u003c/code\u003e is reserved to NULL.\u003c/p\u003e\n\u003ch4 id=\"file\"\u003efile\u003c/h4\u003e\n\u003cp\u003eRecall that \u003ccode\u003eread\u003c/code\u003e doesn\u0026rsquo;t read the whole thing. So, we it in parts.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ecopyContents\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esourceFD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edestinationFD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebuffer\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eINCREMENT\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003essize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebytesRead\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esourceFD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebuffer\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esizeof\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebuffer\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e));\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebytesRead\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ebreak\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebytesWritten\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebytesWritten\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebytesRead\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003essize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecount\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewrite\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003edestinationFD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebuffer\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebytesWritten\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ebytesRead\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebytesWritten\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ebytesWritten\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecount\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eopen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003econst\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epathname\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eflags\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFlags are a bitwise OR operations: you have to open with \u003ccode\u003eO_RDONLY\u003c/code\u003e (read only), \u003ccode\u003eO_WRONLY\u003c/code\u003e (write only), or \u003ccode\u003eO_RDWR\u003c/code\u003e (both read and write). This returns \\(-1\\) if the reading fails.\u003c/p\u003e\n\u003cp\u003eOther flags:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003eO_TRUNC\u003c/code\u003e (truncate file)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003eO_CREAT\u003c/code\u003e (creating a file if not exist), which will require a \u003ccode\u003emode_t mode\u003c/code\u003e parameter to set the permission\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003eO_EXCL\u003c/code\u003e (file must not exist)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"open-file-table\"\u003eopen file table\u003c/h4\u003e\n\u003cp\u003eopen-file table is \u003cstrong\u003esystem wide\u003c/strong\u003e: mentioning what mode an opening is + the cursor to the open file + the number of file-descriptors pointing to it to a \u003ccode\u003erefcount\u003c/code\u003e.\u003c/p\u003e\n\u003cp\u003ewhy is \u003ccode\u003erefcount\u003c/code\u003e ever higher than 1? because forks.\u003c/p\u003e\n\u003ch3 id=\"block-cache\"\u003eBlock Cache\u003c/h3\u003e\n\u003cp\u003eWe will use part of the main memory to retain recently-accessed disk \u003cstrong\u003eblocks\u003c/strong\u003e. This is \u003cstrong\u003eNOT\u003c/strong\u003e at the granularity of individual files.\u003c/p\u003e\n\u003ch4 id=\"least-recently-used--lru--cache\"\u003eLeast Recently Used (LRU) Cache\u003c/h4\u003e\n\u003cp\u003eWhen you insert a new element into the cache, kick out the element on the cache that hasn\u0026rsquo;t been touched in the longest time.\u003c/p\u003e\n\u003ch4 id=\"block-cache-modification\"\u003eBlock Cache Modification\u003c/h4\u003e\n\u003cp\u003ewe can either \u003cstrong\u003ewrite asap\u003c/strong\u003e, or \u003cstrong\u003edelay\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003ewrite asap\u003c/strong\u003e\u003c/strong\u003e: \u003cem\u003esafer\u003c/em\u003e: less risk of data loss, written as soon as possible; \u003cem\u003eslow\u003c/em\u003e: program must wait to proceed until disk I/O completes\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003ewrite delay\u003c/strong\u003e\u003c/strong\u003e: \u003cem\u003edangerous\u003c/em\u003e: may loose data after crash; \u003cem\u003eefficient\u003c/em\u003e: memory writes is faster\u003c/p\u003e\n\u003ch2 id=\"crash-recovery\"\u003eCrash Recovery\u003c/h2\u003e\n\u003ch3 id=\"main-challenges\"\u003emain challenges\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003edata loss\u003c/strong\u003e: crashes can happen, and not all data could be saved to disk\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003einconsistency\u003c/strong\u003e: crashes can happen in the middle of operations\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIdeally, filesystem operations should be \u003cstrong\u003eatomic\u003c/strong\u003e. Every operation should happen or not happen at all\u0026mdash;but not halfway.\u003c/p\u003e\n\u003ch3 id=\"fsck\"\u003efsck\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eCheck whether or not there is a clean shutdown: setting a disk flag on clean shutdown; so if the flag isn\u0026rsquo;t set there isn\u0026rsquo;t a clean shutdown.\u003c/li\u003e\n\u003cli\u003eIf it wasn\u0026rsquo;t a clean shutdown, identify inconsistencies\u003c/li\u003e\n\u003cli\u003eScans meta data (inodes, indirect blocks, free list, directory blocks) and handle any of the following situations\u0026mdash;\n\u003col\u003e\n\u003cli\u003eblock in an inode and in free list; solution: pull the block off of free list\u003c/li\u003e\n\u003cli\u003eblock is a part of two inodes; solution: give to newest, random, copy, remove (bad idea)\u003c/li\u003e\n\u003cli\u003einode claims one dirent refers to it, but there are no such dirent; solution: put in lost and found\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"limitations\"\u003elimitations\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003etakes long because can\u0026rsquo;t restart until done\u003c/li\u003e\n\u003cli\u003edoesn\u0026rsquo;t prevent loss of actual file info\u003c/li\u003e\n\u003cli\u003efilesystem may still be unusable (core files moved to lost+found)\u003c/li\u003e\n\u003cli\u003ea block could migrate during recovery, leaking info\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"ordered-writes\"\u003eordered writes\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eAlways initialize the \u003cstrong\u003eTARGET\u003c/strong\u003e before initializing the \u003cstrong\u003eREFERENCE\u003c/strong\u003e\n\u003cul\u003e\n\u003cli\u003eInitialize inode before initalize directory entry to it\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eNever reuse a resource before \u003cstrong\u003eNULLIFYING\u003c/strong\u003e all existing \u003cstrong\u003e\u003cstrong\u003eREFERENCES\u003c/strong\u003e\u003c/strong\u003e\n\u003cul\u003e\n\u003cli\u003eRemove the inode reference before putting a block on the free list\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eNever clear the \u003cstrong\u003e\u003cstrong\u003eLAST REFERENCE\u003c/strong\u003e\u003c/strong\u003e to a live resource before setting a \u003cstrong\u003e\u003cstrong\u003eNEW REFERENCE\u003c/strong\u003e\u003c/strong\u003e (\u0026ldquo;its better to have 2 copies instead of none\u0026rdquo;)\n\u003cul\u003e\n\u003cli\u003eMake the new directory entry before get rid of the old one\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"limitations\"\u003elimitations\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eperformance\u003c/strong\u003e: we need to do operations synchronously\n\u003cul\u003e\n\u003cli\u003eif we really want to do caching async, we can track dependencies\u003c/li\u003e\n\u003cli\u003ecircular dependencies are possible\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eleak\u003c/strong\u003e\u003c/strong\u003e: it could leak resources (reference nullification happens but resource not added)\n\u003cul\u003e\n\u003cli\u003eWe can run fsck in the background\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"journaling\"\u003ejournaling\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcrash_recovery/#journaling\"\u003ejournaling\u003c/a\u003e keeps a paper trail of disk appertains in the event of a crash. We have an append-only log on disk that stores disk operations.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ebefore performing an operation, record its info in the log\u003c/li\u003e\n\u003cli\u003eand write that to disk\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe log will always record what\u0026rsquo;s happening ahead. The actual block updates can eventually be carried out in any order.\u003c/p\u003e\n\u003ch4 id=\"what-do-we-log\"\u003ewhat do we log?\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003ewe only log \u003cstrong\u003emetadata\u003c/strong\u003e changes (inodes, moving stuff around, etc.)\u003c/li\u003e\n\u003cli\u003epayload operations are not saved\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"structure\"\u003estructure\u003c/h4\u003e\n\u003cp\u003eWe typically have a LSN: log serial number, operations, and metadata.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eLogPatch\u003c/strong\u003e: changes something\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eLogBlockFree\u003c/strong\u003e: mark something as free\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eLogBlockAlloc\u003c/strong\u003e: mark something as allocated, optionally zeroing data if its a data block (DO NOT zero if its a dirent or ino)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-toml\" data-lang=\"toml\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003eoffset\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e335050\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eLSN\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e18384030\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eoperation\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;LogBlockAlloc\u0026#34;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eblockno\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1027\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003ezero_on_replay\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003eoffset\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e23232\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eLSN\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eN\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eoperation\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;LogPatch\u0026#34;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eblockno\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e8\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eoffset\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e137\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003ebytes\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0.04\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003einode\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e52\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch4 id=\"limitations-and-fixes\"\u003elimitations and fixes\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003emultiple log entries\u003c/strong\u003e: each atomic operation will be wrapped into a unit \u003cstrong\u003etransaction\u003c/strong\u003e to make \u003cstrong\u003eidempotent\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003echeckpoints\u003c/strong\u003e: we can truncate the log occasionally at a \u003cstrong\u003echeckpoint\u003c/strong\u003e\u0026mdash;when it is no longer needed\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ewhere do we start replaying\u003c/strong\u003e: log entries should be \u003cstrong\u003eidempotent\u003c/strong\u003e\u0026mdash;doing something multiple times should have the same effect of doing them once. Logs cannot have external dependencies\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003elog entries may take time\u003c/strong\u003e: when finally we write stuff to disk, we write the logs first. So no problems there.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"tradeoffs\"\u003etradeoffs\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003edurability\u003c/strong\u003e - the data needs to be safe (which is slow, and may require manual crash recovery (sans cache, etc.))\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eperformance\u003c/strong\u003e - it needs to be fast (which may mean less error checking)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003econsistency\u003c/strong\u003e - the filesystem needs to be uniform (which means that we need to be slower and we may drop data in favor of previous checkpoints that worked)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"mp\"\u003eMP\u003c/h2\u003e\n\u003cp\u003eMultiprocessing: processes, PIDs, fork, execution order, copy on write, waitpid, zombie processes, execvp, pipes and pipe / pipe2, I/O redirection\u003c/p\u003e\n\u003ch3 id=\"forking\"\u003eforking\u003c/h3\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003echild_pid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003efork returns the child PID if parent; returns 0 if child.\u003c/p\u003e\n\u003cp\u003eThe arguments list have to \u003cstrong\u003eBEGIN WITH EXECUTABLE NAME\u003c/strong\u003e and \u003cstrong\u003eEND WITH NULL\u003c/strong\u003e.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;/bin/ls\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;-l\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;~/hewo\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eNULL\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e};\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eexecvp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eargs\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003cstrong\u003eexecvp LEAVES THE FILE DESCRIPTOR TABLE\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eevery fork has to be waited on by \u003ccode\u003ewaitpid\u003c/code\u003e:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ewaitpid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estatus\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eoptions\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cul\u003e\n\u003cli\u003epid\u003c/li\u003e\n\u003cli\u003estatus: pointer to store return about the child\u003c/li\u003e\n\u003cli\u003eoptions (0 for now)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eif the PID has died, this returns immediately. Otherwise, this blocks.\u003c/p\u003e\n\u003ch4 id=\"the-status-int\"\u003ethe \u003ccode\u003estatus\u003c/code\u003e int\u003c/h4\u003e\n\u003cp\u003eis a bitmap with a bunch of stuff, which we can check with a series of macros\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estatus\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epid_act\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ewaitpid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estatus\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003eWIFEXISTED\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estatus\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// child normal exit\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003estatuscode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eWEXITSTATUS\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estatus\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// abnormal exist\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ethe returned PID is the PID that got waited on; if the input PID is \u003ccode\u003e-1\u003c/code\u003e, it will wayt on any process\u003c/p\u003e\n\u003ch4 id=\"fork-mechanics\"\u003efork mechanics\u003c/h4\u003e\n\u003cp\u003eThe act of copying stack and heap sounds really really expensive. So\u0026hellip;. Whapppens?\u003c/p\u003e\n\u003cp\u003eThe child will map the parent\u0026rsquo;s memory addresses to \u003cstrong\u003edifferent\u003c/strong\u003e physical addresses than for the parent. The copies are \u003cstrong\u003eLAZY\u003c/strong\u003e\u0026mdash;if the child writes to an area in memory, its virtual address are mapped to different addresses. If no writes by the child happen, the virtual address are mapped to the same address.\u003c/p\u003e\n\u003cp\u003eduring file reading, the file descriptors gets cloned, the underlying \u003ca href=\"/posts/kbhmultiprocessing/#open-file-table\"\u003eopen file table\u003c/a\u003e doesn\u0026rsquo;t close.\u003c/p\u003e\n\u003ch3 id=\"pipes\"\u003epipes\u003c/h3\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epipes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// create the pipes\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eret\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003epipe\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epipes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e/* int ret = pipe2(pipes, O_CLOEXEC); */\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// an so\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eread_from_here\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eret\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ewrite_to_here\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eret\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// i.e. ret[1] writes to =\u0026gt; ret[0] read\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// fork!\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epid_p\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003efork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epid_p\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// child subroutine\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// because child is READING, and not READINg\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// we want to close the write\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewrite_to_here\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// we want to then make a buffer\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebuf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enum_bytes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if the child reads before the parents write\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// it will block until some data is available\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// if the write ends are closed globally, read\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// will also stop.\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_from_here\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebuffer\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esizeof\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebuffer\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e));\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_from_here\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ereturn\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// parent subroutine\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// because parent is WRITING and not READING\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// we don\u0026#39;t want the read to block, we will\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// close the parent immediately.\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eread_from_here\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// write some data\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ewrite\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewrite_to_here\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;msg\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003enum_bytes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// close now we are done writing\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewrite_to_here\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// clean up child\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ewaitpid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epid_p\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eNULL\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRecall that dup2 exists:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003edup2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efds\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSTDIN_FILENO\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efds\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eit will close the second file descriptor, if already in use, before binding the first file descriptor to it.\u003c/p\u003e\n\u003ch3 id=\"shell\"\u003eshell\u003c/h3\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecommand\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;ls\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;things\u0026#34;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e};\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003epid_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003echild_pid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efork\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e!\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echild_pid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// this is the child; execvp will check PATH for you\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eexecvp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecommand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e],\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecommand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if we got here, the PID didn\u0026#39;t do well\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ethrow\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eSTSHException\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003estring\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecommand\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eargv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e])\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;: not found or didn\u0026#39;t succeed to fork.\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ewaitpid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003echild_pid\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// do cleanup\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"mt\"\u003eMT\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// now the thread can execute at any time: once a thread is made, it will run in any order\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emyThread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efunction_to_run\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earg1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003earg2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e...);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// threads run AS SOON AS SPAWENED: so\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe can wait for a thread:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emyThread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ejoin\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eYou can also start a bunch on a loop:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ethreads\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecf\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e:\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ethreads\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ecf\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003efunc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e...);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"passing-by-reference\"\u003epassing by reference\u003c/h3\u003e\n\u003cp\u003ethreading doesn\u0026rsquo;t know the type of arguments being passed into a function; this is especially prevalent when passing by reference.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estatic\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emythingref\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epbr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emyfunc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eref\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emyint\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e));\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eRemember: ref will \u003cstrong\u003e\u003cstrong\u003eSHARE MEMORY\u003c/strong\u003e\u003c/strong\u003e, and you have no control over when the thread runs. So once a pointer is passed all bets are off in terms of what values things take on.\u003c/p\u003e\n\u003ch3 id=\"mutex\"\u003emutex\u003c/h3\u003e\n\u003cp\u003eit would be nice if a \u003ca href=\"/posts/kbhmultithreading/#critical-section\"\u003ecritical section\u003c/a\u003e can only be executed once; a \u003ca href=\"/posts/kbhmultithreading/#mutex\"\u003emutex\u003c/a\u003e can be shared across threads, but can only be \u0026ldquo;owned\u0026rdquo; by a single thread at once.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003emutex\u003c/span\u003e \u003cspan style=\"color:#111\"\u003etmp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etmp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003etmp\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eunlock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eimportantly, if multiple \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003es are waiting on a mutex, the next thread that\u0026rsquo;s going to get the mutex\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ewhen there are multiple threads \u003cstrong\u003ewriting\u003c/strong\u003e to a value\u003c/li\u003e\n\u003cli\u003ewhen there is a thread \u003cstrong\u003ewriting\u003c/strong\u003e and one or more threads \u003cstrong\u003ereading\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eif you are no writes, you don\u0026rsquo;t need a mutex\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-c++\" data-lang=\"c++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003eQueue\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eblocked_queue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLock\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e::\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eLock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// disable interrupts: otherwise multiple threads\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// could come and lock the mutex (such as between\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// the locked check and lock =1\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eIntrGuard\u003c/span\u003e \u003cspan style=\"color:#111\"\u003egrd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e!\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if our thread is not locked, just lock it\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if our thread is locked, we need to prevent our current\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// thread from going to the ready queue, and push it to the current thread\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eblocked_queue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epush\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eCURRENT_THREAD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// remember this isn\u0026#39;t an issue even if IntrGuard\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// didn\u0026#39;t yet go out of scope; because it will either\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// land on a context_switch which will enable interrupts for you\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// or land on the beginning of a threadfunc helper, which\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// is also going to enable interrupts for you\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// nicely, the interrupts are here are *off* as required because switching\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// to another thread always will result in reenabling (either by new thread,\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// by timer handler, or by IntrGuard)\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emark_block_and_call_schedule\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eCURRENT_THREAD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLock\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e::\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eUnlock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// disable interrupts: otherwise multiple threads\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// could come and lock the mutex (such as between\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// the locked check and lock =1\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eIntrGuard\u003c/span\u003e \u003cspan style=\"color:#111\"\u003egrd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// if our thread is locked and nobody is waiting for it\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eq\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eempty\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e())\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003elocked\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eunblock_thread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eq\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epop\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e());\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// we do not switch to the unblocked thread, just add it to the\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// ready queue. we are entrusting the scheduler to start this thread\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// whenever we feel right\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"cv\"\u003eCV\u003c/h3\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003econdition_variable_any\u003c/span\u003e \u003cspan style=\"color:#111\"\u003epermitsCV\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// ...\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ethread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eref\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epermitsCV\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eIdentify the \u003cstrong\u003eISOLATED event\u003c/strong\u003e to notify; notify absolutely only when needed. To notify:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epermitsCV\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003enotify_all\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eTo listen:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epermits\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epermits\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003epermitsCV\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewait\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epermitsLock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epermits\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e--\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003epermitsLock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eunlock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ethe condition variable will\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003estart sleeping \u003cstrong\u003e\u003cstrong\u003eFIRST\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eunlock a lock FOR US \u003cstrong\u003e\u003cstrong\u003eAFTER\u003c/strong\u003e\u003c/strong\u003e the sleeping starts\u003c/li\u003e\n\u003cli\u003eafter waiting ends, tries to reaquire lock\u003c/li\u003e\n\u003cli\u003eblocks until we have the lock again\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"unique-lock\"\u003eunique_lock\u003c/h3\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emy_scope\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emutex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emut\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econdition_variable_any\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eunique_lock\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emutex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elck\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emut\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// do stuff, you can even pass it to a condition variable!\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewait\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elck\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"thread-states-and-contexts\"\u003eThread States and Contexts\u003c/h2\u003e\n\u003cp\u003eRecall that \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003es are the \u003cstrong\u003eunit of execution\u003c/strong\u003e. The \u003ca href=\"/posts/kbhprocess_control_block/\"\u003eprocess control block\u003c/a\u003e keeps track of the \u003ca href=\"/posts/kbhassembly/#stack-pointer\"\u003e*stack pointer\u003c/a\u003e* of the thread \u003ccode\u003e%rsp\u003c/code\u003e, which means if a thread is put to sleep the state can be stored somewhere on the stack.\u003c/p\u003e\n\u003cp\u003eThree states:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003erunning\u003c/strong\u003e (could switch to ready/blocked)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eready\u003c/strong\u003e able to run, but not on CPU yet (could switch to running \u003cstrong\u003eonly\u003c/strong\u003e)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eblocked\u003c/strong\u003e eating for something (could switch to ready/running)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"trap\"\u003etrap\u003c/h3\u003e\n\u003cp\u003ea \u003ca href=\"/posts/kbhdispatching/#trap\"\u003etrap\u003c/a\u003e is a user request for OS attention explicitly from the user thread, swapping the user process off the CPU.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003esystem calls\u003c/li\u003e\n\u003cli\u003eerrors\u003c/li\u003e\n\u003cli\u003epage fault (memory errors)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"interrupt\"\u003einterrupt\u003c/h3\u003e\n\u003cp\u003ea \u003ca href=\"/posts/kbhdispatching/#interrupt\"\u003einterrupt\u003c/a\u003e takes place outside the current thread, it forces the OS\u0026rsquo; attention even if the user thread isn\u0026rsquo;t asking for it\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003echaracter typed at keyboard\u003c/li\u003e\n\u003cli\u003ecompletion of a disk operations\u003c/li\u003e\n\u003cli\u003ea hardware timer that fires an interrupt\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"what-if-a-timer-goes-off-during-an-interrupt--kbhdispatching-dot-md\"\u003ewhat if a timer goes off during an \u003ca href=\"/posts/kbhdispatching/#interrupt\"\u003einterrupt\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003e\u003cstrong\u003einterrupts are disabled during interrupt handling\u003c/strong\u003e, otherwise, this causes an infinite loop.\u003c/p\u003e\n\u003ch4 id=\"preemption\"\u003epreemption\u003c/h4\u003e\n\u003cp\u003eWe use \u003ca href=\"/posts/kbhdispatching/#interrupt\"\u003einterrupt\u003c/a\u003es to implement \u003ca href=\"/posts/kbhpreemption/\"\u003epreemption\u003c/a\u003e, \u0026ldquo;\u003ca href=\"/posts/kbhpreemption/\"\u003epreempting\u003c/a\u003e\u0026rdquo; threads in order to swap on another thread to CPU. This enables \u003ca href=\"/posts/kbhscheduling/\"\u003escheduling\u003c/a\u003e to happen.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e// brand new thread\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003einterrupt_handler\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e/* disables interupts, automatically by timer handler */\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// future spawns start here\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econtext_switch\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(...);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e/* enables interupts, automatically by timer handler */\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ethreadfunc_wrapper\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// manually enable interrupts before first run\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eintr_enable\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003etrue\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// start thread\u0026#39;s actual business\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ethreadfunc\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e();\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"scheduling\"\u003eScheduling\u003c/h2\u003e\n\u003ch3 id=\"main-challenges\"\u003emain challenges\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003eminimize time to a useful result\u0026mdash;(\u003cstrong\u003eassumption\u003c/strong\u003e: a \u0026ldquo;useful result\u0026rdquo; = a thread blocking or completes)\u003c/li\u003e\n\u003cli\u003eusing resources efficiently (keeping cores/disks busy)\u003c/li\u003e\n\u003cli\u003efairness (multiple users / many jobs for one users)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWe can measure 1) based on \u0026ldquo;average completion time\u0026rdquo;: tracking the average time elapsed for a particular queue based on the start of scheduling that queue to the time when each thread ends.\u003c/p\u003e\n\u003ch3 id=\"main-designs\"\u003emain designs\u003c/h3\u003e\n\u003ch4 id=\"first-come-first-serve\"\u003efirst-come first-serve\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003ekeep all threads in ready in a \u003cstrong\u003equeue\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003erun the first thread on the front until it finishes/it blocks for however long\u003c/li\u003e\n\u003cli\u003erepeat\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003eProblem\u003c/strong\u003e: a thread can run away with the entire system, accidentally, through infinite loops\u003c/p\u003e\n\u003ch4 id=\"round-robin\"\u003eround robin\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003ekeep all threads in a \u003cstrong\u003eround robin\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003eeach thread can run for a set amount of time called a \u003ca href=\"/posts/kbhscheduling/#round-robin\"\u003etime slice\u003c/a\u003e (10ms or so)\u003c/li\u003e\n\u003cli\u003eif a thread terminates before that time, great; if a thread does not, we swap it off and put it to the end of the round robin\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003eProblem\u003c/strong\u003e: what\u0026rsquo;s a good \u003ca href=\"/posts/kbhscheduling/#round-robin\"\u003etime slice\u003c/a\u003e?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003etoo small: the overhead of context switching is higher than the overhead of running the program\u003c/li\u003e\n\u003cli\u003etoo large: threads can monopolize cores, can\u0026rsquo;t handle user input, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eLinux uses 4ms. Generally, you want 5-10ms range.\u003c/p\u003e\n\u003ch3 id=\"gold-shortest-remaining-processing-time\"\u003egold: shortest remaining processing time\u003c/h3\u003e\n\u003cp\u003eRun first the thread in queue that will finish the \u003cstrong\u003emost quickly\u003c/strong\u003e and run it \u003cstrong\u003efully to competition\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eIt \u003cstrong\u003egives preference to those that need it the least\u003c/strong\u003e (i.e. because it runs the smalest one); of course THIS IS \u003cstrong\u003enot implementable\u003c/strong\u003e without oracle time guess.\u003c/p\u003e\n\u003cp\u003eOur goal, then is to get as close as possible to the performance of \u003ca href=\"/posts/kbhscheduling/#shortest-remaining-processing-time\"\u003eSRPT\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eProblem\u003c/strong\u003e:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewe don\u0026rsquo;t know which one will finish the most quickly\u003c/li\u003e\n\u003cli\u003eif we have many threads and one long-running thread, the long running thread won\u0026rsquo;t be able to run ever\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"priority-based-scheduling\"\u003epriority based scheduling\u003c/h3\u003e\n\u003cp\u003eKey idea: \u003cstrong\u003ebehavior tends to be consistent in a thread\u003c/strong\u003e. We build multiple \u003cstrong\u003epriority queues\u003c/strong\u003e to address this.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhscheduling/#priority-based-scheduling\"\u003epriority based scheduling\u003c/a\u003e is an approximation of \u003ca href=\"/posts/kbhscheduling/#shortest-remaining-processing-time\"\u003eSRPT\u003c/a\u003e, using the past performance of the thread to estimate the running time of the thread. Over time, \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003es will move between priority queues, and we \u003cstrong\u003erun the topmost thread from the highest priority queue\u003c/strong\u003e\u003c/p\u003e\n\u003ch4 id=\"implement-based-on-time-slice--kbhscheduling-dot-md--usage\"\u003eimplement based on \u003ca href=\"/posts/kbhscheduling/#round-robin\"\u003etime slice\u003c/a\u003e usage\u003c/h4\u003e\n\u003cp\u003ea \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003e always enters in the \u003cstrong\u003ehighest\u003c/strong\u003e priority queue\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eif the \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003e uses all of its \u003ca href=\"/posts/kbhscheduling/#round-robin\"\u003etime slice\u003c/a\u003e and didn\u0026rsquo;t exit, bump them down a priority queue\u003c/li\u003e\n\u003cli\u003eif a \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003e blocked before it used all of its \u003ca href=\"/posts/kbhscheduling/#round-robin\"\u003etime slice\u003c/a\u003e, bump them up a priority queue\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"implement-based-on-aggregate-time-used-fixing-neglect\"\u003eimplement based on aggregate time used: fixing neglect\u003c/h4\u003e\n\u003cp\u003ea \u003ca href=\"/posts/kbhmultithreading/#thread\"\u003ethread\u003c/a\u003e has a number for \u0026ldquo;how much time did you use on the CPU recently\u0026rdquo;? The priories are sorted by that value, and the smallest time use will be ran.\u003c/p\u003e\n\u003ch3 id=\"context-switch\"\u003econtext switch\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003e(in asm) push \u003cstrong\u003eall callee saved \u003ca href=\"/posts/kbhassembly/#register\"\u003eregister\u003c/a\u003es\u003c/strong\u003e except \u003ccode\u003e%rsp\u003c/code\u003e into the bottom of the old thread\u0026rsquo;s \u003ca href=\"/posts/kbhstack/\"\u003estack\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003estore the \u003ca href=\"/posts/kbhassembly/#stack-pointer\"\u003estack pointer\u003c/a\u003e \u003ccode\u003e%rsp\u003c/code\u003e into the \u003ca href=\"/posts/kbhprocess_control_block/\"\u003eprocess control block\u003c/a\u003e for that process corresponding to thread\u003c/li\u003e\n\u003cli\u003eread the new thread\u0026rsquo;s stack pointer from the \u003ca href=\"/posts/kbhprocess_control_block/\"\u003eprocess control block\u003c/a\u003e, and load that into \u003ccode\u003e%rsp\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003e(in asm) pop \u003cstrong\u003eall callee saved \u003ca href=\"/posts/kbhassembly/#register\"\u003eregister\u003c/a\u003es\u003c/strong\u003e stored on the bottom of our new stack back onto the registers\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTo deal with new threads, we create a fake freeze frame on the stack for that new thread which looks like you are just about to call the thread function, and calls \u003ccode\u003econtext_switch\u003c/code\u003e normally.\u003c/p\u003e\n\u003ch2 id=\"virtual-memory\"\u003eVirtual Memory\u003c/h2\u003e\n\u003ch3 id=\"main-challenges\"\u003emain challenges\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003emultitasking\u003c/strong\u003e: multiple processes should be able to use memory\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003etransparency\u003c/strong\u003e: no process need to know that memory is shared; each process should be able to run regardless of the number/locations of processes\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eisolation\u003c/strong\u003e: processes shouldn\u0026rsquo;t be able to corrupt other processes\u0026rsquo; memory\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eefficiency\u003c/strong\u003e: shouldn\u0026rsquo;t be degraded by sharing\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"crappy-designs-with-no-dmt\"\u003ecrappy designs with no DMT\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003esingle tasking\u003c/strong\u003e: assume there\u0026rsquo;s one process 1) no isolation 2) no multitasking 3) bad fragmentation\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eload time relocation\u003c/strong\u003e: move the entire program somewhere on load time 1) no isolation 2) can\u0026rsquo;t grow memory after load 3) external fragmentation after frees\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"main-designs\"\u003emain designs\u003c/h3\u003e\n\u003ch4 id=\"base-and-bound\"\u003ebase and bound\u003c/h4\u003e\n\u003cp\u003eload time relocation + virtual memory\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eassign a location in physical memory, call the \u003cstrong\u003ebase\u003c/strong\u003e; during translation, we just add every virtual address by the \u003cstrong\u003ebase\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003ewe can cap the virtual address space for each process by a \u003cstrong\u003ebound\u003c/strong\u003e, we can raise a bus error/segfault if it goes above the highest allowable\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003elast possible address\u003c/strong\u003e: is (bound - 1)+base\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ecompare virtual address to bound, trap and raise if \u0026gt;= \u003cstrong\u003ebound\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003ethen, return virtual address + \u003cstrong\u003ebase\u003c/strong\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003etradeoffs: good - 1) inexpensive 2) doesn\u0026rsquo;t need more space 3) ritualized; bad - 1) can\u0026rsquo;t really move either (i.e. need to allocate) 2) fragmentation 3) no read only memory\u003c/p\u003e\n\u003ch4 id=\"multiple-segments\"\u003emultiple segments\u003c/h4\u003e\n\u003cp\u003ebreak stack, heap, etc. into multiple segments; then do base and bound for each segment\u003c/p\u003e\n\u003cp\u003etradeoffs: good - 1) you can now recycle segments 2) you can not map the middle 3) you can grow the heap (but not the stack, because it moves downwards); bad - 1) you need to decide segment size and location ahead of time\u003c/p\u003e\n\u003ch3 id=\"goal-design\"\u003egoal design\u003c/h3\u003e\n\u003cp\u003epaging: \u003cstrong\u003efixed\u003c/strong\u003e segment size, and just split each thing.\u003c/p\u003e\n\u003cp\u003ewe map each page independently, and keep the offset. If a page is unused, \u003cstrong\u003einternal fragmentation\u003c/strong\u003e but not too bad. The \u003cstrong\u003estack can now grow downwards\u003c/strong\u003e: because if it reaches into lower page numbers we can just map that page somewhere too.\u003c/p\u003e\n\u003cp\u003eFor instance, typically page sizes are 4kb\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ePage Size\u003c/th\u003e\n\u003cth\u003eOffset Number Digits\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e4096 bytes (16^3)\u003c/td\u003e\n\u003ctd\u003e3\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003ethen the rest of the address would just be the page number.\u003c/p\u003e\n\u003ch4 id=\"intel-s-implementation\"\u003eIntel\u0026rsquo;s implementation\u003c/h4\u003e\n\u003cp\u003e\u003cstrong\u003eVirtual Addresses\u003c/strong\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eUnused (16 bits)\u003c/td\u003e\n\u003ctd\u003eVirtual page number (36 bits)\u003c/td\u003e\n\u003ctd\u003eOffset (12 bits)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\u003cstrong\u003ePhysical Addresses\u003c/strong\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003ePage number (40 bits)\u003c/td\u003e\n\u003ctd\u003eOffset (12 bits)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch4 id=\"translation\"\u003etranslation\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003echop off page number and offset\u003c/li\u003e\n\u003cli\u003etranslate the page number\u003c/li\u003e\n\u003cli\u003econcat the two together\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"implementation\"\u003eimplementation\u003c/h3\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eIndex\u003c/th\u003e\n\u003cth\u003ePhysical Address\u003c/th\u003e\n\u003cth\u003eWritable\u003c/th\u003e\n\u003cth\u003ePresent/Mapped?\u003c/th\u003e\n\u003cth\u003eLast Access\u003c/th\u003e\n\u003cth\u003eKernel\u003c/th\u003e\n\u003cth\u003eDirty\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0x2023\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0x0023\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e1\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003ctd\u003e0\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch2 id=\"swap\"\u003eSwap\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003epick a page to kick out\u003c/li\u003e\n\u003cli\u003ewrite kicked page to disk\u003c/li\u003e\n\u003cli\u003emark the old page entry as not present\u003c/li\u003e\n\u003cli\u003egive the physical address to the new virtual page\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"choosing-what-to-swap\"\u003echoosing what to swap\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003erandomly! (works apparently kinda fine)\u003c/li\u003e\n\u003cli\u003eFirst-in-first out (fair, bust bad \u0026mdash; throw out the page in memory longest; but what if its very used)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eleast recently used\u003c/strong\u003e - clock algorithm\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"clock-algorithm\"\u003eclock algorithm\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003erotate through all pages until we find one that hasn\u0026rsquo;t been referenced since last time\u003c/strong\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewe add a \u003cstrong\u003ereference bit\u003c/strong\u003e to the \u003ca href=\"/posts/kbhvirtual_memory/#paging\"\u003epage table\u003c/a\u003e\u0026mdash;its set to \\(1\\) if the program wrote or read each page, otherwise its set to \\(0\\)\u003c/li\u003e\n\u003cli\u003ewhen page kick is needed, clock algorithm starts where it left off before and scan through physical pages\n\u003col\u003e\n\u003cli\u003eeach page it checks with reference bit 1, it sets the \u003cstrong\u003ereference bit\u003c/strong\u003e as 0\u003c/li\u003e\n\u003cli\u003eif it checked a page and its reference bit is 0, we kick it out (because we\u0026rsquo;ve gone through two )\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eWe now \u003cstrong\u003esave the position of the hand\u003c/strong\u003e\u0026mdash;we want to begin checking with the page that hasn\u0026rsquo;t been checked for the longest time. If every page has a \u003cstrong\u003ereference bit\u003c/strong\u003e is one, running this algorithm doesn\u0026rsquo;t break because it would set its immediately next bit of memory.\u003c/p\u003e\n\u003ch3 id=\"page-replacement\"\u003epage replacement\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ewe \u003cem\u003edon\u0026rsquo;t use\u003c/em\u003e \u003cstrong\u003eper process replacement\u003c/strong\u003e because we need to allocate max pages per process\u003c/li\u003e\n\u003cli\u003ewe use \u003cstrong\u003eglobal replacement\u003c/strong\u003e to maximise usage\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"demand-fetching\"\u003edemand fetching\u003c/h3\u003e\n\u003cp\u003emost modern OSes start with \u003cstrong\u003eno pages loaded\u003c/strong\u003e\u0026mdash;load pages only when referenced; this is tempered by the type of page that\u0026rsquo;s needed:\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ePage Type\u003c/th\u003e\n\u003cth\u003eNeed Content on First Load\u003c/th\u003e\n\u003cth\u003eSave to Swap (\u0026ldquo;Swap?\u0026rdquo;)\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003ecode\u003c/td\u003e\n\u003ctd\u003eyes\u003c/td\u003e\n\u003ctd\u003eno (read from exe)\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003edata\u003c/td\u003e\n\u003ctd\u003eyes\u003c/td\u003e\n\u003ctd\u003eyes\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003estack/heap\u003c/td\u003e\n\u003ctd\u003eno\u003c/td\u003e\n\u003ctd\u003eyes\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eWe only write to disk if its \u003cstrong\u003edirty\u003c/strong\u003e.\u003c/p\u003e\n\u003ch2 id=\"multicore-plus-flash\"\u003eMulticore + Flash\u003c/h2\u003e\n\u003ch3 id=\"scheduling-multi-core-cpus\"\u003eScheduling Multi-Core CPUs\u003c/h3\u003e\n\u003ch4 id=\"main-approaches\"\u003emain approaches\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eone queue for everyone 1) need to figure out what is the priory of things on that queue (for preemption)\u003c/li\u003e\n\u003cli\u003eone queue per core: 1) where do we put a thread? 2) how do we move between cores?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"one-ready-queue-per-core\"\u003eOne Ready Queue per Core\u003c/h4\u003e\n\u003col\u003e\n\u003cli\u003ewhere do we put a given thread?\u003c/li\u003e\n\u003cli\u003emoving core between threads is expensive\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eBig tension:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eWork Stealing\u003c/strong\u003e: if one core is free (even if there is things in the ready queue), check other cores\u0026rsquo; ready queues and try to do thread communism.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eCore Affinity\u003c/strong\u003e ideally, because moving threads between cores is expensive (need to rebuild cache), we keep each thread running on the same core.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"gang-scheduling\"\u003eGang Scheduling\u003c/h4\u003e\n\u003cp\u003eWhen you have a thread you are trying to schedule, try to see if there are other threads from the same process in the ready queue and schedule all of them on various cores.\u003c/p\u003e\n\u003ch3 id=\"locking-multi-core-cpus\"\u003eLocking Multi-Core CPUs\u003c/h3\u003e\n\u003cp\u003edisabling interrupts are not enough\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003ehardware atomic operation\u003c/strong\u003e \u003ccode\u003eexchange\u003c/code\u003e + \u003cstrong\u003ebusy waiting\u003c/strong\u003e, which reads, returns, and swaps the value of some memory in a single atomic operation AND which is never ran in parallel; it returns the previous value of the memory before it was set:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eclass\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eLock\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003estd\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e::\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eautomic\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003esync\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eLock\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e::\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elock\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e()\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003ewhile\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003esync\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eexchange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e))\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// we are now the only one using it\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// do work ....\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003esync\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eThe exchange function returns the old value.\u003c/p\u003e\n\u003ch3 id=\"flash-storage\"\u003eFlash Storage\u003c/h3\u003e\n\u003ch4 id=\"writing\"\u003ewriting\u003c/h4\u003e\n\u003cp\u003eYou have two operation.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eerase\u003c/strong\u003e: You can set \u003cstrong\u003eALL SEGMENT\u003c/strong\u003e of an \u0026ldquo;erase unit\u0026rdquo; to \\(1\\) (\u0026ldquo;erase unit\u0026rdquo; size is usually 256k)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ewrite\u003c/strong\u003e: You can modify one \u0026ldquo;page\u0026rdquo; at a time (which is smaller than a erase unit)\u0026mdash;but you can ONLY set individual bits in the page into 0 (\u0026ldquo;page\u0026rdquo; size is usually 512 bytes or 4k bytes)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"wear-out\"\u003ewear-out\u003c/h4\u003e\n\u003cp\u003e\u003cstrong\u003ewear leveling\u003c/strong\u003e: make sure that the drive wears out at roughly the same rate as other parts of the drive. Moving commonly written data (\u0026ldquo;hot\u0026rdquo; data) around\u003c/p\u003e\n\u003ch4 id=\"ftl--kbhmodern-os-dot-md--limitations\"\u003e\u003ca href=\"/posts/kbhmodern_os/#flash-storage\"\u003eFTL\u003c/a\u003e limitations\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003eno hardware access (can\u0026rsquo;t optimize around flash storage)\u003c/li\u003e\n\u003cli\u003esacrifices performances for performance\u003c/li\u003e\n\u003cli\u003ewasts capacity (to look like hard drive)\u003c/li\u003e\n\u003cli\u003emany layers\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"ethics\"\u003eEthics\u003c/h2\u003e\n\u003cp\u003etrusting software is the task of extending your own \u003cstrong\u003eAGENCY\u003c/strong\u003e to a piece of software: \u0026ldquo;agential gullibility\u0026rdquo;.\u003c/p\u003e\n\u003ch3 id=\"pathways-to-trust\"\u003epathways to trust\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003etrust by assumption\u003c/strong\u003e: 1) trust absent any clues to warrent it due to timing 2) trust because there is imminent danger\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003etrust by inference\u003c/strong\u003e: trust based on information you had before (brands, affiliation, performance)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003etrust by substitution\u003c/strong\u003e: having a backup plan\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"accountability\"\u003eaccountability\u003c/h3\u003e\n\u003cp\u003eaccountability is in a \u003cstrong\u003echain\u003c/strong\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ehardware designer (intel)\u003c/li\u003e\n\u003cli\u003eOS developer (iOS, ec.)\u003c/li\u003e\n\u003cli\u003eapp developer\u003c/li\u003e\n\u003cli\u003eusers\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"stakeholder\"\u003estakeholder\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003edirect stakeholders\u003c/strong\u003e (people who are operating, technicians, etc.)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eindirect stakeholders\u003c/strong\u003e: patients\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003epurchase = long-term support \u0026mdash;- what do you do to get it fixed/repaired.\u003c/p\u003e\n\u003ch3 id=\"scales-of-trust\"\u003escales of trust\u003c/h3\u003e\n\u003ch4 id=\"scale-of-impact\"\u003escale of impact\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003ea bug in an OS can be tremendously bad\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;root access\u0026rdquo; \u0026mdash; privileged aces\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"scale-of-longevity\"\u003escale of longevity\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003epeople maybe on very very old OS\u003c/li\u003e\n\u003cli\u003eit requires keeping older OSes secure against modern technologies\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs111_final_sheet/","tags":null,"title":"SU-CS111 Final Sheet"},{"categories":null,"contents":"KEY IDEAS:\nfilesystems - how do we design filesystems to manage files on disk multiprocessing - how does programs interact with one another, coordinating, etc. multithreading - how can we have single-process concurrency virtual memory - how can one set of memory can be shared among several processes modern technologies - busy waiting locking, Flash Storage, etc. interplay between tech + OS: OS at the hardware, software boundary designing with tradeoffs: not always one \u0026ldquo;best\u0026rdquo; way - evaluating pros/cons/priorities virtualization: make one thing look like something else, or many of them concurrency: synchronization is hard locality: predicting the future (scheduling, paging, block cache, etc.)\u0026mdash;try to estimate the future with priority queues, etc. atomics: collections of operations that make them appear as a single, indivisible operation \u0026mdash; synchronization + file system consistency (log transactions) layering: building higher level abstractions to hide details (monitors, fs layers, file descriptors, etc.) system builders wrangling complexity: solving complex problems with simple interfaces that others can build on (virtual memory, filesystems, etc.) trust: we have to trust something or someone\u0026mdash;evaluating what to trust and how systems can incorporate trust understanding justifies how complex systems work elegant ideas of computing (concurrency, virtualization, etc.) take advantage of hardware and OS software that\u0026rsquo;s available OS aren\u0026rsquo;t standing still: OS changing and encountering new challenges Massive Review\nFS Design filesystems to manage files, what are the tradeoffs in designing them? How can we interact with the filesystem?\nmultiple approaches (continuous allocation, linked files, FAT, multi-level index\u0026mdash;file access, metadata) crash recovery designs file descriptions Why? large system design manipulate files in programs + what is a file design challenges and limitatinos MP How can our program create and interact other programs?\nfork/waitpid/execvp/pipe: coordinating and run other programs and erpcosses process control block information + running processes in any order Why? challenges of concurrency shells! chrome site isolation MT Concurrency within a single process.\ndining philosopher problem and its solution OS\u0026rsquo; tracking of threads (and not processes) to run, and when to switch between them scheduling (round robin, SRPT, priority based scheduling, etc.), preemption, and dispatching Concurrency Management Why? maximally take advantage of hardware through multi cores many applications in modern software (Excel\u0026rsquo;s threads, for instance) understand the behavior of computers\u0026mdash;single core machines may also multi-task! concurrency challenges + synchronization: this is hard Concurrency Management synchronization/race conditions/deadlock\nprocesses and threads creating and dispatching sync primitive and their implementation: mutexes, CVs, monitor pattern scheduling interrupts deadlock races and inconsistency VMem How can one set of memory be shared among several processes? How do we manage access to a limited amount of system memory?\ngives each process isolated virtual address space OS maps what\u0026rsquo;s needed to real physical memory OS can manage physical memory however it wants, including swapping pages to disk Why? virtualization - virtual world does not need to know about the complexities of where to run programmer: we always assume tones of contiguous memory thrashing, swapping, etc. Modern Technologies How do hardware impact design of OSes?\nmulti-core scheduling + locks how to schedule multi-core threads\u0026mdash;Gang Scheduling + Work Stealing + Core Affinity locking between cores: busy waiting and atomics flash-storage: impacts on file systems with wear-out and Flash Translation Layer Why? OSes sitting at software-hardware boundary: system changes can change OSes Can more fully understand how modern technologies impact our devices\u0026mdash;we can understanding their impact at the OS level Ethics and Trust Who/what do we trust, how do we decided, what do we do when the thrust is not upheald, how gcan we factor trust?\nagential gullibility privacy + trust pathways to trust accountability, stakeholder Why OS has extreme scale: high amount of trust we must trust some things, improtant to reflect what we trust and what we value reflect on what to do when trust is violated, how can we incorporate considerations of trust into what we build Next Steps ","html":"\u003cp\u003e\u003cstrong\u003eKEY IDEAS\u003c/strong\u003e:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfilesystem/\"\u003efilesystem\u003c/a\u003es - how do we design filesystems to manage files on disk\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiprocessing/\"\u003emultiprocessing\u003c/a\u003e - how does programs interact with one another, coordinating, etc.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultithreading/\"\u003emultithreading\u003c/a\u003e - how can we have single-process concurrency\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvirtual_memory/\"\u003evirtual memory\u003c/a\u003e - how can one set of memory can be shared among several processes\u003c/li\u003e\n\u003cli\u003emodern technologies - \u003ca href=\"/posts/kbhpermits_model/\"\u003ebusy waiting\u003c/a\u003e locking, \u003ca href=\"/posts/kbhmodern_os/#flash-storage\"\u003eFlash Storage\u003c/a\u003e, etc.\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003einterplay between tech + OS\u003c/strong\u003e: OS at the hardware, software boundary\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003edesigning with tradeoffs\u003c/strong\u003e: not always one \u0026ldquo;best\u0026rdquo; way - evaluating pros/cons/priorities\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003evirtualization\u003c/strong\u003e: make one thing look like something else, or many of them\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003econcurrency\u003c/strong\u003e: synchronization is hard\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003elocality\u003c/strong\u003e: predicting the future (scheduling, paging, block cache, etc.)\u0026mdash;try to estimate the future with priority queues, etc.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eatomics\u003c/strong\u003e: collections of operations that make them appear as a single, indivisible operation \u0026mdash; synchronization + file system consistency (log transactions)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003elayering\u003c/strong\u003e: building higher level abstractions to hide details (monitors, fs layers, file descriptors, etc.)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003esystem builders wrangling complexity\u003c/strong\u003e: solving complex problems with simple interfaces that others can build on (virtual memory, filesystems, etc.)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003etrust\u003c/strong\u003e: we have to trust \u003cem\u003esomething\u003c/em\u003e or \u003cem\u003esomeone\u003c/em\u003e\u0026mdash;evaluating what to trust and how systems can incorporate trust\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003col\u003e\n\u003cli\u003eunderstanding justifies how complex systems work\u003c/li\u003e\n\u003cli\u003eelegant ideas of computing (\u003cstrong\u003econcurrency\u003c/strong\u003e, \u003cstrong\u003evirtualization\u003c/strong\u003e, etc.)\u003c/li\u003e\n\u003cli\u003etake advantage of hardware and OS software that\u0026rsquo;s available\u003c/li\u003e\n\u003cli\u003eOS aren\u0026rsquo;t standing still: OS changing and encountering new challenges\u003c/li\u003e\n\u003c/ol\u003e\n\u003chr\u003e\n\u003cp\u003eMassive Review\u003c/p\u003e\n\u003ch2 id=\"fs\"\u003eFS\u003c/h2\u003e\n\u003cp\u003eDesign filesystems to manage files, what are the tradeoffs in designing them? How can we interact with the filesystem?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003emultiple approaches (continuous allocation, linked files, FAT, multi-level index\u0026mdash;file access, metadata)\u003c/li\u003e\n\u003cli\u003ecrash recovery designs\u003c/li\u003e\n\u003cli\u003efile descriptions\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"why\"\u003eWhy?\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003elarge system design\u003c/li\u003e\n\u003cli\u003emanipulate files in programs + what is a file\u003c/li\u003e\n\u003cli\u003edesign challenges and limitatinos\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"mp\"\u003eMP\u003c/h2\u003e\n\u003cp\u003eHow can our program create and interact other programs?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003efork/waitpid/execvp/pipe\u003c/strong\u003e: coordinating and run other programs and erpcosses\u003c/li\u003e\n\u003cli\u003eprocess control block information + running processes in any order\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"why\"\u003eWhy?\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003echallenges of concurrency\u003c/li\u003e\n\u003cli\u003eshells!\u003c/li\u003e\n\u003cli\u003echrome site isolation\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"mt\"\u003eMT\u003c/h2\u003e\n\u003cp\u003eConcurrency within a single process.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003edining philosopher problem and its solution\u003c/li\u003e\n\u003cli\u003eOS\u0026rsquo; tracking of threads (and not processes) to run, and when to switch between them\u003c/li\u003e\n\u003cli\u003escheduling (round robin, SRPT, \u003ca href=\"/posts/kbhscheduling/#priority-based-scheduling\"\u003epriority based scheduling\u003c/a\u003e, etc.), preemption, and dispatching\u003c/li\u003e\n\u003cli\u003eConcurrency Management\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"why\"\u003eWhy?\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003emaximally take advantage of hardware through multi cores\u003c/li\u003e\n\u003cli\u003emany applications in modern software (Excel\u0026rsquo;s threads, for instance)\u003c/li\u003e\n\u003cli\u003eunderstand the behavior of computers\u0026mdash;single core machines may also multi-task!\u003c/li\u003e\n\u003cli\u003econcurrency challenges + synchronization: this is \u003cstrong\u003ehard\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"concurrency-management\"\u003eConcurrency Management\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003esynchronization/race conditions/deadlock\u003c/strong\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eprocesses and threads\u003c/li\u003e\n\u003cli\u003ecreating and dispatching\u003c/li\u003e\n\u003cli\u003esync primitive and their implementation: mutexes, CVs, monitor pattern\u003c/li\u003e\n\u003cli\u003escheduling\u003c/li\u003e\n\u003cli\u003einterrupts\u003c/li\u003e\n\u003cli\u003edeadlock\u003c/li\u003e\n\u003cli\u003eraces and inconsistency\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"vmem\"\u003eVMem\u003c/h2\u003e\n\u003cp\u003eHow can one set of memory be shared among several processes? How do we manage access to a limited amount of system memory?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003egives each process isolated virtual address space\u003c/li\u003e\n\u003cli\u003eOS maps what\u0026rsquo;s needed to real physical memory\u003c/li\u003e\n\u003cli\u003eOS can manage physical memory however it wants, including swapping pages to disk\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"why\"\u003eWhy?\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003evirtualization\u003c/strong\u003e - virtual world does not need to know about the complexities of where to run\n\u003cul\u003e\n\u003cli\u003eprogrammer: we always assume tones of contiguous memory\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ethrashing, swapping, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"modern-technologies\"\u003eModern Technologies\u003c/h2\u003e\n\u003cp\u003eHow do hardware impact design of OSes?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003emulti-core scheduling + locks\n\u003cul\u003e\n\u003cli\u003ehow to schedule multi-core threads\u0026mdash;\u003ca href=\"/posts/kbhmodern_os/#gang-scheduling\"\u003eGang Scheduling\u003c/a\u003e + \u003ca href=\"/posts/kbhmodern_os/#work-stealing\"\u003eWork Stealing\u003c/a\u003e + \u003ca href=\"/posts/kbhmodern_os/#core-affinity\"\u003eCore Affinity\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003elocking between cores: busy waiting and atomics\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eflash-storage: impacts on file systems with wear-out and \u003ca href=\"/posts/kbhmodern_os/#flash-storage\"\u003eFlash Translation Layer\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"why\"\u003eWhy?\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eOSes sitting at software-hardware boundary: system changes can change OSes\u003c/li\u003e\n\u003cli\u003eCan more fully understand how modern technologies impact our devices\u0026mdash;we can understanding their impact at the OS level\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"ethics-and-trust\"\u003eEthics and Trust\u003c/h2\u003e\n\u003cp\u003eWho/what do we trust, how do we decided, what do we do when the thrust is not upheald, how gcan we factor trust?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/#trust\"\u003eagential gullibility\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/\"\u003eprivacy\u003c/a\u003e + \u003ca href=\"/posts/kbhprivacy/#trust\"\u003etrust\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/#pathways-to-trust\"\u003epathways to trust\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprivacy/#accountability\"\u003eaccountability\u003c/a\u003e, \u003ca href=\"/posts/kbhprivacy/#stakeholder\"\u003estakeholder\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"why\"\u003eWhy\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eOS has extreme \u003cstrong\u003escale\u003c/strong\u003e: high amount of trust\u003c/li\u003e\n\u003cli\u003ewe must trust \u003cstrong\u003esome\u003c/strong\u003e things, improtant to reflect what we trust and what we value\u003c/li\u003e\n\u003cli\u003ereflect on what to do when trust is violated, how can we incorporate considerations of trust into what we build\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"next-steps\"\u003eNext Steps\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-03-13_14-03-04_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs111_outline/","tags":null,"title":"SU-CS111 Outline"},{"categories":null,"contents":"Key Sequence Notation New Concepts Optimal Stopping Problem reinforcement learning model-based reinforcement learning model-free reinforcement learning Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoptimal_stopping_problem/\"\u003eOptimal Stopping Problem\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhreinforcement_learning/\"\u003ereinforcement learning\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodel_based_reinforcement_learning/\"\u003emodel-based reinforcement learning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodel_free_reinforcement_learning/\"\u003emodel-free reinforcement learning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_nov022023/","tags":null,"title":"SU-CS238 NOV022023"},{"categories":null,"contents":"Key Sequence Notation New Concepts POMDP belief observation model error model discrete state filter Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbelief/#observation-model\"\u003eobservation model\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbelief/#error-model\"\u003eerror model\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbelief/#discrete-state-filter\"\u003ediscrete state filter\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_nov092023/","tags":null,"title":"SU-CS238 NOV092023"},{"categories":null,"contents":"Key Sequence Notation New Concepts belief filters Kalman Filter Particle Filter POMDP belief-state MDP conditional plan alpha vector Important Results / Claims conditional plan evaluation Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbelief/\"\u003ebelief\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfilters/\"\u003efilter\u003c/a\u003es\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfilters/#kalman-filter\"\u003eKalman Filter\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfilters/#particle-filter\"\u003eParticle Filter\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbelief_state_mdp/\"\u003ebelief-state MDP\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhconditional_plan/#conditional-plan--kbhconditional-plan-dot-md--evaluation\"\u003econditional plan evaluation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_nov142023/","tags":null,"title":"SU-CS238 NOV142023"},{"categories":null,"contents":"Key Sequence Notation New Concepts belief-state MDP optimal value function for POMDP with alpha vector one-step lookahead in POMDP alpha vector pruning Important Results / Claims POMDP value-iteration Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbelief_state_mdp/\"\u003ebelief-state MDP\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhalpha_vector/#with\"\u003eoptimal value function for POMDP with alpha vector\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhalpha_vector/#one-step-lookahead-in-pomdp\"\u003eone-step lookahead in POMDP\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhalpha_vector/#id-a11af4cf-7e36-4b3f-876f-e6a26cf6817e-alpha-vector-pruning\"\u003ealpha vector pruning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_iteration/#value-iteration\"\u003ePOMDP value-iteration\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_nov162023/","tags":null,"title":"SU-CS238 NOV162023"},{"categories":null,"contents":"Key Sequence Notation New Concepts POMDP Approximation lower bound BAWS blind lower bound Point-Based Value Iteration Randomized PBVI Online POMDP Methods Important Results / Claims point selection Questions how do you use alpha-vectors in Rollout with Lookahead? forward search? in a sense: how do you get the value function what is best-action worst-state reward converting into alpha vectors? bandit quiz question Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpomdp_approximation/\"\u003ePOMDP Approximation lower bound\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhworst_possible_state/\"\u003eBAWS\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhblind_lower_bound/\"\u003eblind lower bound\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpoint_based_value_iteration/\"\u003ePoint-Based Value Iteration\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpoint_based_value_iteration/#randomized-id-ab745ce0-4282-44bc-91ab-823458060df7-pbvi\"\u003eRandomized PBVI\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhonline_pomdp_methods/\"\u003eOnline POMDP Methods\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpoint_selection/\"\u003epoint selection\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ehow do you use alpha-vectors in \u003ca href=\"/posts/kbhrollout_with_lookahead/\"\u003eRollout with Lookahead\u003c/a\u003e? forward search? in a sense: how do you get the value function\u003c/li\u003e\n\u003cli\u003ewhat is \u003ca href=\"/posts/kbhworst_possible_state/\"\u003ebest-action worst-state\u003c/a\u003e reward converting into alpha vectors?\u003c/li\u003e\n\u003cli\u003ebandit quiz question\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_nov282023/","tags":null,"title":"SU-CS238 NOV282023"},{"categories":null,"contents":"Key Sequence Notation New Concepts controller finite state controller multiagent reasoning prisoner\u0026rsquo;s dilemma traveler\u0026rsquo;s dilemma response model Dominant Strategy Equilibrium Nash Equilibrium Important Results / Claims finite state controller evaluation joint policy agent utility Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcontroller/\"\u003econtroller\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcontroller/#finite-state-controller\"\u003efinite state controller\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiagent_reasoning/\"\u003emultiagent reasoning\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiagent_reasoning/#prisoner-s-dilemma\"\u003eprisoner\u0026rsquo;s dilemma\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiagent_reasoning/#traveler-s-dilemma\"\u003etraveler\u0026rsquo;s dilemma\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiagent_reasoning/#response-model\"\u003eresponse model\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiagent_reasoning/#dominant-strategy-equilibrium\"\u003eDominant Strategy Equilibrium\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiagent_reasoning/#nash-equilibrium\"\u003eNash Equilibrium\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcontroller/#finite-state-controller-evaluation\"\u003efinite state controller evaluation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiagent_reasoning/#joint-policy-agent-utility\"\u003ejoint policy agent utility\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_nov302023/","tags":null,"title":"SU-CS238 NOV302023"},{"categories":null,"contents":"Key Sequence Notation New Concepts Bayes Net + conditional independence Baysian Net inference factor factor operations factor product factor marginalization factor conditioning Naive Bayes + inference with Naive Bayes Direct Sampling and Likelihood Weighted Sampling Important Results / Claims parameter checking for conditional independence sum-product elimination Direct Sampling a Baysian Network Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_network/\"\u003eBayes Net\u003c/a\u003e + \u003ca href=\"/posts/kbhbaysian_network/#conditional-independence\"\u003econditional independence\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinference/\"\u003eBaysian Net inference\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfactor/\"\u003efactor\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfactor/#factor-operations\"\u003efactor operations\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfactor/#factor-product\"\u003efactor product\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfactor/#factor-marginalization\"\u003efactor marginalization\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfactor/#factor-conditioning\"\u003efactor conditioning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnaive_bayes/\"\u003eNaive Bayes\u003c/a\u003e + \u003ca href=\"/posts/kbhnaive_bayes/#inference--kbhinference-dot-md--with-naive-bayes--kbhnaive-bayes-dot-md\"\u003einference with Naive Bayes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e and \u003ca href=\"/posts/kbhdirect_sampling/#likelihood-weighted-sampling\"\u003eLikelihood Weighted Sampling\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_network/#checking-for-conditional-independence\"\u003echecking for conditional independence\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinference/#sum-product-elimination\"\u003esum-product elimination\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirect_sampling/#direct-sampling-a-id-5eaa4b96-cbc2-4811-91c7-88ea2e164fc3-baysian-network\"\u003eDirect Sampling a Baysian Network\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_oct032023/","tags":null,"title":"SU-CS238 OCT032023"},{"categories":null,"contents":"Key Sequence Notation New Concepts inference Inference for Gaussian Models approximate inference Direct Sampling Likelihood Weighted Sampling parameter learning Maximum Likelihood Parameter Learning Baysian Parameter Learning Dirichlet Distribution Important Results / Claims \u0026ldquo;there is usually a tradeoff between the computational time you are willing to devote, and th Bayesian Learning on Binary Distributions Questions for Likelihood Weighted Sampling, where do the conditional probability values come from Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinference/\"\u003einference\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinference_for_gaussian_models/\"\u003eInference for Gaussian Models\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhapproximate_inference/\"\u003eapproximate inference\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirect_sampling/\"\u003eDirect Sampling\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirect_sampling/#likelihood-weighted-sampling\"\u003eLikelihood Weighted Sampling\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhparameter_learning/\"\u003eparameter learning\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmaximum_likelihood_parameter_learning/\"\u003eMaximum Likelihood Parameter Learning\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_parameter_learning/\"\u003eBaysian Parameter Learning\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_parameter_learning/#dirichlet-distribution\"\u003eDirichlet Distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;there is usually a tradeoff between the computational time you are willing to devote, and th\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_parameter_learning/#bayesian-parameter-learning-on-binary-distributions\"\u003eBayesian Learning on Binary Distributions\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003efor \u003ca href=\"/posts/kbhdirect_sampling/#likelihood-weighted-sampling\"\u003eLikelihood Weighted Sampling\u003c/a\u003e, where do the conditional probability values come from\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_oct052023/","tags":null,"title":"SU-CS238 OCT052023"},{"categories":null,"contents":"Key Sequence Notation New Concepts Beta Distribution Important Results / Claims Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_parameter_learning/#non-uniform-prior\"\u003eBeta Distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_oct102023/","tags":null,"title":"SU-CS238 OCT102023"},{"categories":null,"contents":"New Concepts structure learning K2 Algorithm Local Search Markov Equivalence Classes Partially Directed Graph Search utility and Rational Preferences lottery utility elicitation expected utility risk aversion decision network utility functions quadratic utility exponential utility power utility and its special case log utility maximum expected utility principle value of information Important Results / Claims checking Markov Equivalence utility of Rational Preference von Neumann and Morgenstern Axioms: Axioms for checking rationality never have a utility function that\u0026rsquo;s infinite utility of a lottery process of observation selection Questions ","html":"\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstructure_learning/\"\u003estructure learning\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstructure_learning/#k2-algorithm\"\u003eK2 Algorithm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstructure_learning/#local-search\"\u003eLocal Search\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmarkov_equivalence_classes/\"\u003eMarkov Equivalence Classes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhstructure_learning/#partially-directed-graph-search\"\u003ePartially Directed Graph Search\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e and \u003ca href=\"/posts/kbhrational_preference/\"\u003eRational Preferences\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlottery/\"\u003elottery\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_elicitation/\"\u003eutility elicitation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_theory/#expected-utility\"\u003eexpected utility\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexpected_utility_of_wealth/\"\u003erisk aversion\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdecision_networks/\"\u003edecision network\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_function/\"\u003eutility function\u003c/a\u003es\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_function/#quadratic-utility\"\u003equadratic utility\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_function/#exponential-utility\"\u003eexponential utility\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpower_utility/\"\u003epower utility\u003c/a\u003e and its special case \u003ca href=\"/posts/kbhpower_utility/#log-utility\"\u003elog utility\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_theory/#maximum-expected-utility-principle\"\u003emaximum expected utility principle\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_of_information/\"\u003evalue of information\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmarkov_equivalence_classes/#checking-markov-equivalence--kbhmarkov-equivalence-classes-dot-md\"\u003echecking Markov Equivalence\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_theory/#utility-of-id-3ed9f842-7fa3-4244-ab09-58b088b9c27e-rational-preference\"\u003eutility of Rational Preference\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrational_preference/#von-neumann-and-morgenstern-axioms\"\u003evon Neumann and Morgenstern Axioms\u003c/a\u003e: Axioms for checking rationality\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhutility_theory/#never-have-a-utility-function-that-s-infinite\"\u003enever have a utility function that\u0026rsquo;s infinite\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlottery/#utility-of-a-lottery\"\u003eutility of a lottery\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_of_information/#process-of-observation-selection\"\u003eprocess of observation selection\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_oct122023/","tags":null,"title":"SU-CS238 OCT122023"},{"categories":null,"contents":"Notation \u0026ldquo;state variables\u0026rdquo; represent the contents of the state; \u0026ldquo;state\u0026rdquo; is a complete assignment of state variables.\nNew Concepts Markov Decision Process stationary Markov Decision Process finite-horizon models + infinite-horizon models policy stationary policy optimal policy and optimal value function policy evaluation and policy iteration lookahead equation Bellman Expectation Equation action-value function and value-function policy advantage function Important Results / Claims policy evaluation methods solving for the utility of a policy finding the best policy policy iteration Questions why is it d seperated Interesting Factoids ","html":"\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;state variables\u0026rdquo; represent the contents of the state; \u0026ldquo;state\u0026rdquo; is a complete assignment of state variables.\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMarkov Decision Process\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmarkov_decision_process/#stationary-markov-decision-process--kbhmarkov-decision-process-dot-md\"\u003estationary Markov Decision Process\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmarkov_decision_process/#finite-horizon-models\"\u003efinite-horizon models\u003c/a\u003e + \u003ca href=\"/posts/kbhmarkov_decision_process/#infinite-horizon-models\"\u003einfinite-horizon models\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy/#stationary-policy--kbhpolicy-dot-md\"\u003estationary policy\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy/#optimal-policy\"\u003eoptimal policy\u003c/a\u003e and \u003ca href=\"/posts/kbhpolicy/#optimal-policy\"\u003eoptimal value function\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e and \u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/#lookahead-equation\"\u003elookahead equation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/#bellman-expectation-equation\"\u003eBellman Expectation Equation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/#action-value-function\"\u003eaction-value function\u003c/a\u003e and \u003ca href=\"/posts/kbhpolicy_evaluation/#value-function-policy\"\u003evalue-function policy\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadvantage_function/\"\u003eadvantage function\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e methods\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/#solving-for-the-utility-of-a-policy\"\u003esolving for the utility of a policy\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/#value-function-policy\"\u003efinding the best policy\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_iteration/\"\u003epolicy iteration\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewhy is it d seperated\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_oct172023/","tags":null,"title":"SU-CS238 OCT172023"},{"categories":null,"contents":"Key Sequence Notation New Concepts Markov Decision Process value iteration Bellman Residual for continuous state spaces: Approximate Value Function use global approximation or local approximation methods Important Results / Claims policy and utility creating a good utility function / policy from instantaneous rewards: either policy evaluation or value iteration creating a policy from a utility function: value-function policy (\u0026ldquo;choose the policy that takes the best valued action\u0026rdquo;) calculating the utility function a policy currently uses: use policy evaluation kernel smoothing value iteration, in practice Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmarkov_decision_process/\"\u003eMarkov Decision Process\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_iteration/#bellman-residual\"\u003eBellman Residual\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003efor \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuous\u003c/a\u003e state spaces: \u003ca href=\"/posts/kbhapproximate_value_function/\"\u003eApproximate Value Function\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003euse \u003ca href=\"/posts/kbhapproximate_value_function/#global-approximation\"\u003eglobal approximation\u003c/a\u003e or \u003ca href=\"/posts/kbhapproximate_value_function/#local-approximation\"\u003elocal approximation\u003c/a\u003e methods\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003epolicy and utility\n\u003cul\u003e\n\u003cli\u003ecreating a good \u003ca href=\"/posts/kbhutility_function/\"\u003eutility function\u003c/a\u003e / \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e from instantaneous rewards: either \u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e or \u003ca href=\"/posts/kbhvalue_iteration/\"\u003evalue iteration\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ecreating a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e from a \u003ca href=\"/posts/kbhutility_function/\"\u003eutility function\u003c/a\u003e: \u003ca href=\"/posts/kbhaction_value_function/#value-function-policy\"\u003evalue-function policy\u003c/a\u003e (\u0026ldquo;choose the policy that takes the best valued action\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003ecalculating the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility function\u003c/a\u003e a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e currently uses: use \u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhkernel_smoothing/\"\u003ekernel smoothing\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvalue_iteration_in_practice/\"\u003evalue iteration, in practice\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_oct192023/","tags":null,"title":"SU-CS238 OCT192023"},{"categories":null,"contents":"Key Sequence Notation New Concepts global approximation online planning Rollout with Lookahead Forward Search Branch and Bound monte-carlo tree search open loop planning Important Results / Claims Rollout Policy monte-carlo exploration open-loop planning vs close-loop planning Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhapproximate_value_function/#global-approximation\"\u003eglobal approximation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhonline_planning/\"\u003eonline planning\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrollout_with_lookahead/\"\u003eRollout with Lookahead\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhforward_search/\"\u003eForward Search\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbranch_and_bound/\"\u003eBranch and Bound\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmonte_carlo_tree_search/\"\u003emonte-carlo tree search\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhonline_planning/#open-loop-planning\"\u003eopen loop planning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhrollout_with_lookahead/#rollout-policy\"\u003eRollout Policy\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmonte_carlo_tree_search/#monte-carlo-exploration\"\u003emonte-carlo exploration\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhonline_planning/#open-loop-planning-vs-close-loop-planning\"\u003eopen-loop planning vs close-loop planning\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_oct242023/","tags":null,"title":"SU-CS238 OCT242023"},{"categories":null,"contents":"Big day. Policy Gradient.\nNew Concepts Approximate Policy Evaluation and Roll-out utility Policy Optimization methods: Local Policy Search (aka Hooke-Jeeves Policy Search) Genetic Policy Search Cross Entropy Method Policy Gradient, Regression Gradient and Likelyhood Ratio Gradient Reward-to-Go Important Results / Claims monte-carlo policy evaluation Finite-Difference Gradient Estimation Linear Regression Gradient Estimate Questions for next office hour ","html":"\u003cp\u003eBig day. \u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/#approximate-policy-evaluation\"\u003eApproximate Policy Evaluation\u003c/a\u003e and \u003ca href=\"/posts/kbhpolicy_evaluation/#roll-out-utility\"\u003eRoll-out utility\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_optimization/\"\u003ePolicy Optimization\u003c/a\u003e methods:\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlocal_policy_search/\"\u003eLocal Policy Search\u003c/a\u003e (aka \u003ca href=\"/posts/kbhlocal_policy_search/\"\u003eHooke-Jeeves Policy Search\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgenetic_policy_search/\"\u003eGenetic Policy Search\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcross_entropy_method/#cross-entropy-method\"\u003eCross Entropy Method\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_gradient/\"\u003ePolicy Gradient\u003c/a\u003e, \u003ca href=\"/posts/kbhpolicy_gradient/#linear-regression-gradient-estimate\"\u003eRegression Gradient\u003c/a\u003e and \u003ca href=\"/posts/kbhpolicy_gradient/#likelyhood-ratio-gradient--kbhpolicy-gradient-dot-md\"\u003eLikelyhood Ratio Gradient\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_gradient/#reward-to-go\"\u003eReward-to-Go\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_evaluation/#monte-carlo-policy-evaluation\"\u003emonte-carlo policy evaluation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_gradient/#finite-difference-gradient-estimation\"\u003eFinite-Difference Gradient Estimation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_gradient/#linear-regression-gradient-estimate\"\u003eLinear Regression Gradient Estimate\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-next-office-hour\"\u003eQuestions for next office hour\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_oct262023/","tags":null,"title":"SU-CS238 OCT262023"},{"categories":null,"contents":"Key Sequence Notation New Concepts Restricted Gradient and Actor-Critic Exploration and Exploitation with Binary Bandit Undirected Exploration (Explore-then-commit) Directed Exploration (Softmax Method, Quantile Exploration, UCB 1, Posterior Sampling) Important Results / Claims Bayesian Model Estimation and greedy action epsilon-greedy exploration with decay Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpolicy_gradient/#restricted-gradient\"\u003eRestricted Gradient\u003c/a\u003e and \u003ca href=\"/posts/kbhactor_critic/\"\u003eActor-Critic\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexploration_and_exploitation/\"\u003eExploration and Exploitation\u003c/a\u003e with \u003ca href=\"/posts/kbhexploration_and_exploitation/#binary-bandit\"\u003eBinary Bandit\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhundirected_exploration/\"\u003eUndirected Exploration\u003c/a\u003e (\u003ca href=\"/posts/kbhundirected_exploration/#explore-then-commit\"\u003eExplore-then-commit\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdirected_exploration/\"\u003eDirected Exploration\u003c/a\u003e (\u003ca href=\"/posts/kbhdirected_exploration/#softmax-method\"\u003eSoftmax Method\u003c/a\u003e, \u003ca href=\"/posts/kbhdirected_exploration/#quantile-exploration\"\u003eQuantile Exploration\u003c/a\u003e, \u003ca href=\"/posts/kbhdirected_exploration/#ucb-1\"\u003eUCB 1\u003c/a\u003e, \u003ca href=\"/posts/kbhdirected_exploration/#posterior-sampling\"\u003ePosterior Sampling\u003c/a\u003e)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhexploration_and_exploitation/#bayesian-model-estimation\"\u003eBayesian Model Estimation\u003c/a\u003e and \u003ca href=\"/posts/kbhexploration_and_exploitation/#bayesian-model-estimation\"\u003egreedy action\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhundirected_exploration/#epsilon-greedy-exploration-with-decay\"\u003eepsilon-greedy exploration with decay\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_oct212023/","tags":null,"title":"SU-CS238 OCT312023"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_q0q3/","tags":null,"title":"SU-CS238 Q0Q3"},{"categories":null,"contents":"See Double Envelope Problem\nKey Sequence we introduced Decision Making, models of decision making and gave some examples we introduced different types of uncertainty in the reading, we introduced the Course Outline New Definitions Decision Making uncertainty Questions for Jana how are planning methods different from explicit programming methods? \u0026ldquo;Sequential Decision Process\u0026rdquo; Partially observable Markov decision process (POMDP) ","html":"\u003cp\u003eSee \u003ca href=\"/posts/kbhdouble_envelope_problem/#double-envelope-problem\"\u003eDouble Envelope Problem\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe introduced \u003ca href=\"/posts/kbhdecision_making/\"\u003eDecision Making\u003c/a\u003e, \u003ca href=\"/posts/kbhdecision_making/#id-9075c44f-4b26-4f5a-9236-bc7a5c10f4ee-decision-making-methods\"\u003emodels of decision making\u003c/a\u003e and gave some examples\u003c/li\u003e\n\u003cli\u003ewe introduced different \u003ca href=\"/posts/kbhuncertainty/\"\u003etypes of uncertainty\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ein the reading, we introduced the \u003ca href=\"/posts/kbhdecision_making_index/#course-outline\"\u003eCourse Outline\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdecision_making/\"\u003eDecision Making\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhuncertainty/\"\u003euncertainty\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ehow are \u003ca href=\"/posts/kbhplanning/\"\u003eplanning\u003c/a\u003e methods different from \u003ca href=\"/posts/kbhexplicit_programming/\"\u003eexplicit programming\u003c/a\u003e methods?\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Sequential Decision Process\u0026rdquo;\u003c/li\u003e\n\u003cli\u003ePartially observable Markov decision process (POMDP)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_sep262023/","tags":null,"title":"SU-CS238 SEP262023"},{"categories":null,"contents":"definitions probability and random variable uniform distribution Gaussian distribution probability distributions conditional probability Bayes Theorem results axiom of probability support = range\n","html":"\u003ch2 id=\"definitions\"\u003edefinitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e and \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#uniform-distribution\"\u003euniform distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#gaussian-distribution\"\u003eGaussian distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/\"\u003eprobability distributions\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#bayes-theorem\"\u003eBayes Theorem\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results\"\u003eresults\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#axiom-of-probability\"\u003eaxiom of probability\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003esupport = range\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_sep272023/","tags":null,"title":"SU-CS238 SEP272023"},{"categories":null,"contents":"Notation shorthand for probability Take\n\\begin{equation} P(X = 1) = \\frac{1}{6} \\end{equation}\nWe can write this in short hand like:\n\\begin{equation} P(X^{1}) = P(X=1) = \\frac{1}{6} \\end{equation}\n\\(P\\) vs \\(p\\) Upper case \\(P\\) for probability mass function (one shot chance), lower case \\(p\\) for probability density functions (integral)\nNew Concepts degrees of belief and describing them using the language of probability discrete distribution and continuous distribution and joint probability distribution important tools: parameters of a distribution probability density functions cumulative distribution function quantile function fun probability distributions Gaussian distribution + Truncated Gaussian distribution uniform distribution conditional probability and Bayes Theorem unique models that leverage conditional probability conditional Gaussian models linear gaussian model conditional linear Gaussian models: use your big brain to add up 1) and 2), with continuous random variables \\(X, Y\\), and a discrete \\(Z\\), where \\(p(x \\mid y, z)\\). sigmoid model Baysian Network and conditional independence d seperation Important Results / Claims history and impact of decision making law of total probability fun axioms belief axioms: universal comparability transitivity probability axioms: axiom of probability Methods of Compressing the Parameters of a Distribution assuming independence using a decision tree checking for conditional independence Questions ","html":"\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch3 id=\"shorthand-for-probability\"\u003eshorthand for probability\u003c/h3\u003e\n\u003cp\u003eTake\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X = 1) = \\frac{1}{6}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can write this in short hand like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(X^{1}) = P(X=1) = \\frac{1}{6}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"p-vs-p\"\u003e\\(P\\) vs \\(p\\)\u003c/h3\u003e\n\u003cp\u003eUpper case \\(P\\) for \u003ca href=\"/posts/kbhprobability_distributions/#probability-mass-function\"\u003eprobability mass function\u003c/a\u003e (one shot chance), lower case \\(p\\) for \u003ca href=\"/posts/kbhprobability_distributions/#probability-density-functions\"\u003eprobability density functions\u003c/a\u003e (integral)\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_theory/\"\u003edegrees of belief\u003c/a\u003e and \u003ca href=\"/posts/kbhprobability_theory/#language-of-probability\"\u003edescribing them using the language of probability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdiscrete_distribution/\"\u003ediscrete distribution\u003c/a\u003e and \u003ca href=\"/posts/kbhcontinuous_distribution/\"\u003econtinuous distribution\u003c/a\u003e and \u003ca href=\"/posts/kbhjoint_probability_distribution/\"\u003ejoint probability distribution\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eimportant tools\u003c/strong\u003e:\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhparameter/\"\u003eparameters of a distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#probability-density-functions\"\u003eprobability density functions\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#cumulative-distribution-function\"\u003ecumulative distribution function\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#quantile-function\"\u003equantile function\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003efun \u003ca href=\"/posts/kbhprobability_distributions/\"\u003eprobability distributions\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#gaussian-distribution\"\u003eGaussian distribution\u003c/a\u003e + \u003ca href=\"/posts/kbhprobability_distributions/#truncated-gaussian-distribution\"\u003eTruncated Gaussian distribution\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#uniform-distribution\"\u003euniform distribution\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e and \u003ca href=\"/posts/kbhbayes_theorem/\"\u003eBayes Theorem\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003eunique models that leverage \u003ca href=\"/posts/kbhprobability/#conditional-probability\"\u003econditional probability\u003c/a\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhconditional_gaussian_models/\"\u003econditional Gaussian models\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlinear_gaussian_model/\"\u003elinear gaussian model\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003econditional linear Gaussian models: use your big brain to add up 1) and 2), with continuous \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003es \\(X, Y\\), and a discrete \\(Z\\), where \\(p(x \\mid y, z)\\).\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid model\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_network/\"\u003eBaysian Network\u003c/a\u003e and \u003ca href=\"/posts/kbhbaysian_network/#conditional-independence\"\u003econditional independence\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_network/#checking-for-conditional-independence\"\u003ed seperation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdecision_making_history/\"\u003ehistory and impact of decision making\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#law-of-total-probability\"\u003elaw of total probability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003efun axioms\n\u003cul\u003e\n\u003cli\u003ebelief axioms:\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_theory/#universal-comparability\"\u003euniversal comparability\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_theory/#transitivity\"\u003etransitivity\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eprobability axioms:\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability/#axiom-of-probability\"\u003eaxiom of probability\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprobability_distributions/#methods-of-compressing-the-parameters-of-a-distribution\"\u003eMethods of Compressing the Parameters of a Distribution\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003eassuming \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependence\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eusing a \u003ca href=\"/posts/kbhprobability_distributions/#decision-tree\"\u003edecision tree\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbaysian_network/#checking-for-conditional-independence\"\u003echecking for conditional independence\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs238_sep282023/","tags":null,"title":"SU-CS238 SEP282023"},{"categories":null,"contents":"Tips ","html":"\u003ch2 id=\"tips\"\u003eTips\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs239_jan092023/","tags":null,"title":"SU-CS239 JAN092023"},{"categories":null,"contents":"Of course I\u0026rsquo;m not committing my midterm.\n","html":"\u003cp\u003eOf course I\u0026rsquo;m not committing my midterm.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_cs239_midterm_1/","tags":null,"title":"SU-CS239 Midterm 1"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsu_math109_problem_set_1/","tags":null,"title":"SU-MATH109 Problem Set 1"},{"categories":null,"contents":"Key Sequence New Definitions division + division algorithm greatest common divisor prime Euclidean Algorithm Results and Their Proofs principle of induction primes there are infinitely many primes division and greatest common divisor division algorithm properties of the gcd Euclidean Algorithm and some euclid lemma linked below in fundamental theorem of arithmetic Questions for Jana Why does something being prime require that \\(p\u0026gt;1\\). that is, why is \\(1\\) not defined as prime? Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdivide/\"\u003edivision\u003c/a\u003e + \u003ca href=\"/posts/kbhdivide/#division-algorithm\"\u003edivision algorithm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgreatest_common_divisor/\"\u003egreatest common divisor\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheuclidean_algorithm/\"\u003eEuclidean Algorithm\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprinciple_of_induction/\"\u003eprinciple of induction\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eprimes\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprime/#there-are-infinitely-many-primes\"\u003ethere are infinitely many primes\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdivide/\"\u003edivision\u003c/a\u003e and \u003ca href=\"/posts/kbhgreatest_common_divisor/\"\u003egreatest common divisor\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdivide/#division-algorithm\"\u003edivision algorithm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgreatest_common_divisor/#properties-of-the-id-0ccf3a9f-e788-4485-b49a-b54906e52fe4-gcd\"\u003eproperties of the gcd\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheuclidean_algorithm/\"\u003eEuclidean Algorithm\u003c/a\u003e and some euclid lemma linked below in \u003ca href=\"/posts/kbhfundamental_theorem_of_arithmetic/\"\u003efundamental theorem of arithmetic\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWhy does something being \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e require that \\(p\u0026gt;1\\). that is, why is \\(1\\) not defined as \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math109_sep272023/","tags":null,"title":"SU-MATH109 SEP272023"},{"categories":null,"contents":"\\begin{equation} \\mathbb{N} = \\{0, 1,2,3 \\dots \\} \\end{equation}\nthe set of natural numbers. start from 0.\n\\begin{equation} \\mathbb{Z} = \\{\\dots, -2, -1, 0,1,2, \\dots \\} \\end{equation}\nthe set of integers. natural language and their negatives\nKey Sequence first, we built the ground work of principle of induction in order to construct the WOP we defined division, and formalized the algorithm for doing so we then defined the greatest common divisor, and the fact that greatest common divisor is a linear combination we then constructed the idea of prime numbers, coprimes, and showed that There are infinitely many primes Finally, we used yet another lemma from Euler to build the fundamental theorem of arithmetic New Definitions division division algorithm greatest common divisor prime coprime Results and Their Proofs principle of induction well-ordering principle There are infinitely many primes division algorithm greatest common divisor is a linear combination fundamental theorem of arithmetic and its factorization motivator lemma ","html":"\u003cp\u003e\\begin{equation}\n\\mathbb{N} = \\{0, 1,2,3 \\dots \\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe set of natural numbers. \u003cstrong\u003estart from 0\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathbb{Z} = \\{\\dots, -2, -1, 0,1,2, \\dots \\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe set of integers. natural language and their negatives\u003c/p\u003e\n\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003efirst, we built the ground work of \u003ca href=\"/posts/kbhprinciple_of_induction/\"\u003eprinciple of induction\u003c/a\u003e in order to construct the \u003ca href=\"/posts/kbhprinciple_of_induction/#well-ordering-principle\"\u003eWOP\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe defined \u003ca href=\"/posts/kbhdivide/\"\u003edivision\u003c/a\u003e, and formalized \u003ca href=\"/posts/kbhdivide/#division-algorithm\"\u003ethe algorithm for doing so\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe then defined the \u003ca href=\"/posts/kbhgreatest_common_divisor/\"\u003egreatest common divisor\u003c/a\u003e, and the fact that \u003ca href=\"/posts/kbhgreatest_common_divisor/#greatest-common-divisor-is-a-linear-combination\"\u003egreatest common divisor is a linear combination\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003ewe then constructed the idea of \u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e numbers, \u003ca href=\"/posts/kbhprime/#coprime\"\u003ecoprime\u003c/a\u003es, and showed that \u003ca href=\"/posts/kbhprime/#there-are-infinitely-many-primes\"\u003eThere are infinitely many primes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eFinally, we used \u003ca href=\"/posts/kbhfundamental_theorem_of_arithmetic/#factorization-motivator\"\u003eyet another lemma from Euler\u003c/a\u003e to build the \u003ca href=\"/posts/kbhfundamental_theorem_of_arithmetic/\"\u003efundamental theorem of arithmetic\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdivide/\"\u003edivision\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdivide/#division-algorithm\"\u003edivision algorithm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgreatest_common_divisor/\"\u003egreatest common divisor\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprime/\"\u003eprime\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprime/#coprime\"\u003ecoprime\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprinciple_of_induction/\"\u003eprinciple of induction\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprinciple_of_induction/#well-ordering-principle\"\u003ewell-ordering principle\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhprime/#there-are-infinitely-many-primes\"\u003eThere are infinitely many primes\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdivide/#division-algorithm\"\u003edivision algorithm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgreatest_common_divisor/#greatest-common-divisor-is-a-linear-combination\"\u003egreatest common divisor is a linear combination\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfundamental_theorem_of_arithmetic/\"\u003efundamental theorem of arithmetic\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003eand its \u003ca href=\"/posts/kbhfundamental_theorem_of_arithmetic/#factorization-motivator\"\u003efactorization motivator lemma\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math109_sep272023_exp/","tags":null,"title":"SU-MATH109 SEP272023"},{"categories":null,"contents":"Key Sequence New Definitions modular arithmetic + basic modular arithmetic operations permutation groups left cancellation, right cancellation Results and Their Proofs Chinese Remainder Theorem Questions for Jana why is it that adding all the digits work for \\(\\ \\text{mod}\\ 9\\). I still don\u0026rsquo;t get it. in re Chinese Remainder Theorem: is there any case in which \\(a\\ \\text{mod}\\ b\\) is not unique? Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"new-definitions\"\u003eNew Definitions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodular_arithmetic/\"\u003emodular arithmetic\u003c/a\u003e + \u003ca href=\"/posts/kbhmodular_arithmetic/#basic-id-85873ac2-491c-4cef-916b-2b409fac0b47-modular-arithmetic-operations\"\u003ebasic modular arithmetic operations\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpermutation/\"\u003epermutation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003es\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhgroup/\"\u003eleft cancellation\u003c/a\u003e, \u003ca href=\"/posts/kbhgroup/\"\u003eright cancellation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results-and-their-proofs\"\u003eResults and Their Proofs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmodular_arithmetic/#chinese-remainder-theorem\"\u003eChinese Remainder Theorem\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-jana\"\u003eQuestions for Jana\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewhy is it that adding all the digits work for \\(\\ \\text{mod}\\ 9\\). I still don\u0026rsquo;t get it.\u003c/li\u003e\n\u003cli\u003ein re \u003ca href=\"/posts/kbhmodular_arithmetic/#chinese-remainder-theorem\"\u003eChinese Remainder Theorem\u003c/a\u003e: is there any case in which \\(a\\ \\text{mod}\\ b\\) is \u003cstrong\u003enot\u003c/strong\u003e unique?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math109_sep292023/","tags":null,"title":"SU-MATH109 SEP292023"},{"categories":null,"contents":"2nd order linear inhomogeneous: non-homogeneous linear differential equation\n","html":"\u003cp\u003e2nd order linear inhomogeneous: \u003ca href=\"/posts/kbhnon_homogeneous_linear_differential_equation/\"\u003enon-homogeneous linear differential equation\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb022024/","tags":null,"title":"SU-MATH53 FEB022024"},{"categories":null,"contents":"Sensitivity to Initial Conditions + Parameters.\nODE Existence and Uniqueness We can recast all high order systems into a first-order vector-valued system. So, for any system:\n\\begin{equation} x\u0026rsquo; = g(t,x, a) \\end{equation}\nif \\(g\\) is differentiable across \\(t,x\\) and \\(a\\), the IVP given by \\(x\u0026rsquo; = g(t,x,a)\\) and \\(x(0) = x_0\\), has the property has that:\nthe ODE has a solution \\(x(t_0) = x_0\\) for any \\(t_0\\), and any two solutions on the interval coincide as the same solution The only way for a solution to fail to extend temporally is due to the bounds\u0026rsquo; \\(||x(t)||\\) becomes unbounded as \\(t\\) approaches the endpoints On any interval \\(t_0 \\leq t \\leq T\\) the solution \\(y_{a,y_0}\\) depends continuously on \\(a, y_0\\), \u0026ldquo;if I look at my solution sometime later, it would be a non-discontinuous change on the choice of initial condition\u0026rdquo; Example Let\u0026rsquo;s consider:\n\\begin{equation} y\u0026rsquo; = -y \\end{equation}\nand take the initial value at:\n\\begin{equation} y(0) = y_0 \\end{equation}\nwe have a solution such that:\n\\begin{equation} y(t) = y_0e^{-t} \\end{equation}\nwhich, at \\(y(10)\\), we obtain:\n\\begin{equation} y(10) = y_0e^{-10} \\end{equation}\nWhich brings the question: \u0026ldquo;how close should \\(y_0\u0026rsquo;\\) be such that \\(|y\u0026rsquo;(10) - y(10)| \\leq 10^{-5}\\)?\u0026rdquo;\nWe can recast this as:\n\\begin{equation} |y_0\u0026rsquo; e^{-10} - y_0 e^{-10} | \u0026lt; 10^{-5} \\end{equation}\nmeaning:\n\\begin{equation} |y_0\u0026rsquo; - y_0| \u0026lt; \\frac{10^{-5}}{e^{-10}} \\approx \\frac{1}{4} \\end{equation}\nIf you flip it over, you will have extreme instability.\nExample \\begin{equation} \\begin{cases} \\dv{x}{t} = a(y-x) \\\\ \\dv{y}{t} = (b-z)x-y \\\\ \\dv{z}{t} = xy-cz \\end{cases} \\end{equation}\nthis seems innocuous, but no. If we set our parameters to be weirdly specific values:\n\\begin{equation} \\begin{cases} a \\approx 10 \\\\ b \\approx 28 \\\\ c \\approx \\frac{8}{3} \\end{cases} \\end{equation}\nThese attractors spins across two separate spheres, and the number of times the system spins around a particular area is unknown. It is called\u0026hellip;\nDeterministic Chaos Deterministic Chaos is a hard problems which there is a bounded region in which the behavior happens, but the system is bounded.\nAnother Example Logistic expression:\n\\begin{equation} y\u0026rsquo; = ry\\qty(1-\\frac{y}{k}) -h \\end{equation}\nYou can get solutions of this form for some carrying capacity \\(k\\) and a constant rate of removal \\(h\\). You can observe that we can build a phase line of this system, and observe. This behavior is called bifurcation: when some \\(h\\) is high enough, our whole system dies out.\n\u0026ldquo;if the finish rate is too high over other parameters, you just die out.\u0026rdquo;\nYou can also draw a plot, where the \\(x\\) axis is some parameter \\(p\\), and phase plot can be drawn sideways.\nCauchy Stability Suppose \\(x(t)\\) satisfies:\n\\begin{equation} x\u0026rsquo;(t) = g(t,x(t)), x(t_0) = x_0 \\end{equation}\nFor some interval \\(t \\in I\\) where the IVP is satisfied; for any time interval \\([t_1, t_2]\\) inside \\(I\\) and any \\(x_0\u0026rsquo;\\) near to \\(x_0\\), the associated \\(x(t_0) = x_0\u0026rsquo;\\) should exist for the same interval \\([t_1, t_2]\\) and \\(|| x\u0026rsquo;(t) - x(t) ||\\) is small for \\(t\\).\nThis extends for not just initial conditions, but also parameters as well. For function parameters \\(a_0\\) and \\(a_0\u0026rsquo;\\).\nNewtonian 3-body problem \\begin{equation} m_1 x_1\u0026rsquo;\u0026rsquo; = \\frac{-Gm_{1}m_2}{|x_1-x_2|^{2}}- \\frac{Gm_{1}m_3}{|x_1-x_3|^{2}} \\end{equation}\nyou will note that this expression has no close form solution, so you can\u0026rsquo;t do the Cauchy Stability thing to it.\n","html":"\u003cp\u003eSensitivity to Initial Conditions + Parameters.\u003c/p\u003e\n\u003ch2 id=\"ode-existence-and-uniqueness\"\u003eODE Existence and Uniqueness\u003c/h2\u003e\n\u003cp\u003eWe can recast all high order systems into a first-order vector-valued system. So, for any system:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo; = g(t,x, a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif \\(g\\) is differentiable across \\(t,x\\) and \\(a\\), the \u003ca href=\"/posts/kbhinitial_value_problems/\"\u003eIVP\u003c/a\u003e given by \\(x\u0026rsquo; = g(t,x,a)\\) and \\(x(0) = x_0\\), has the property has that:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ethe ODE has a solution \\(x(t_0) = x_0\\) for any \\(t_0\\), and any two solutions on the interval coincide as the same solution\u003c/li\u003e\n\u003cli\u003eThe only way for a solution to fail to extend temporally is due to the bounds\u0026rsquo; \\(||x(t)||\\) becomes unbounded as \\(t\\) approaches the endpoints\u003c/li\u003e\n\u003cli\u003eOn any interval \\(t_0 \\leq t \\leq T\\) the solution \\(y_{a,y_0}\\) depends continuously on \\(a, y_0\\), \u0026ldquo;if I look at my solution sometime later, it would be a non-discontinuous change on the choice of initial condition\u0026rdquo;\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"example\"\u003eExample\u003c/h3\u003e\n\u003cp\u003eLet\u0026rsquo;s consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = -y\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand take the initial value at:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(0) = y_0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe have a solution such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(t) = y_0e^{-t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich, at \\(y(10)\\), we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny(10) = y_0e^{-10}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich brings the question: \u0026ldquo;how close should \\(y_0\u0026rsquo;\\) be such that \\(|y\u0026rsquo;(10) - y(10)| \\leq 10^{-5}\\)?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe can recast this as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|y_0\u0026rsquo; e^{-10} - y_0 e^{-10} | \u0026lt; 10^{-5}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|y_0\u0026rsquo; - y_0| \u0026lt; \\frac{10^{-5}}{e^{-10}} \\approx \\frac{1}{4}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf you flip it over, you will have extreme instability.\u003c/p\u003e\n\u003ch3 id=\"example\"\u003eExample\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\dv{x}{t} = a(y-x) \\\\\n\\dv{y}{t} = (b-z)x-y \\\\\n\\dv{z}{t} = xy-cz\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis seems innocuous, but no. If we set our parameters to be weirdly specific values:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\na \\approx 10 \\\\\nb \\approx 28 \\\\\nc \\approx \\frac{8}{3}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThese attractors spins across two separate spheres, and the number of times the system spins around a particular area is unknown. It is called\u0026hellip;\u003c/p\u003e\n\u003ch4 id=\"deterministic-chaos\"\u003eDeterministic Chaos\u003c/h4\u003e\n\u003cp\u003e\u003ca href=\"#deterministic-chaos\"\u003eDeterministic Chaos\u003c/a\u003e is a hard problems which there is a bounded region in which the behavior happens, but the system is bounded.\u003c/p\u003e\n\u003ch3 id=\"another-example\"\u003eAnother Example\u003c/h3\u003e\n\u003cp\u003eLogistic expression:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = ry\\qty(1-\\frac{y}{k}) -h\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou can get solutions of this form for some carrying capacity \\(k\\) and a constant rate of removal \\(h\\). You can observe that we can build a \u003ca href=\"/posts/kbhphase_line/\"\u003ephase line\u003c/a\u003e of this system, and observe. This behavior is called \u003ca href=\"#ode-existence-and-uniqueness\"\u003ebifurcation\u003c/a\u003e: when some \\(h\\) is high enough, our whole system dies out.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;if the finish rate is too high over other parameters, you just die out.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eYou can also draw a plot, where the \\(x\\) axis is some parameter \\(p\\), and phase plot can be drawn sideways.\u003c/p\u003e\n\u003ch2 id=\"cauchy-stability\"\u003eCauchy Stability\u003c/h2\u003e\n\u003cp\u003eSuppose \\(x(t)\\) satisfies:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo;(t) = g(t,x(t)), x(t_0) = x_0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor some interval \\(t \\in I\\) where the IVP is satisfied; for any time interval \\([t_1, t_2]\\) inside \\(I\\) and any \\(x_0\u0026rsquo;\\) near to \\(x_0\\), the associated \\(x(t_0) = x_0\u0026rsquo;\\) should exist for the same interval \\([t_1, t_2]\\) and \\(|| x\u0026rsquo;(t) - x(t) ||\\) is small for \\(t\\).\u003c/p\u003e\n\u003cp\u003eThis extends for not just initial conditions, but also parameters as well. For function parameters \\(a_0\\) and \\(a_0\u0026rsquo;\\).\u003c/p\u003e\n\u003ch2 id=\"newtonian-3-body-problem\"\u003eNewtonian 3-body problem\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nm_1 x_1\u0026rsquo;\u0026rsquo; = \\frac{-Gm_{1}m_2}{|x_1-x_2|^{2}}- \\frac{Gm_{1}m_3}{|x_1-x_3|^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou will note that this expression has no close form solution, so you can\u0026rsquo;t do the \u003ca href=\"#cauchy-stability\"\u003eCauchy Stability\u003c/a\u003e thing to it.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb052024/","tags":null,"title":"SU-MATH53 FEB052024"},{"categories":null,"contents":"Non-Linear ODE\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhnon_linear_ode/\"\u003eNon-Linear ODE\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb072024/","tags":null,"title":"SU-MATH53 FEB072024"},{"categories":null,"contents":"Still Non-Linear ODE\n","html":"\u003cp\u003eStill \u003ca href=\"/posts/kbhnon_linear_ode/\"\u003eNon-Linear ODE\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb092024/","tags":null,"title":"SU-MATH53 FEB092024"},{"categories":null,"contents":"How would we solve equations like:\n\\begin{equation} \\begin{cases} y\u0026rsquo;\u0026rsquo; - 2xy\u0026rsquo; + 2\\lambda y = 0 \\\\ y\u0026rsquo;\u0026rsquo; - xy = 0 \\end{cases} \\end{equation}\nTaylor Series Its time to have a blast from the past! Taylor Series time.\n\\begin{equation} p_{n}(x) = \\sum_{i=0}^{n} \\frac{f^{(n)}(0) x^{n}}{n!} \\end{equation}\nTaylor\u0026rsquo;s Theorem with Remainder gives us that, at some \\(n\\), \\(|f(x) - p_{n}(x)|\\) is bounded.\n\\begin{equation} |x(t+h) - (x(t) + h x\u0026rsquo;(t))| \\leq Ch \\end{equation}\nTwo constraints:\nneed \\(f^{(n)}\\) to exist infinitely and there\u0026rsquo;s a set of functions that are representable by Taylor Series (even if differentiable; such as \\(e^{-\\frac{1}{|x|}}\\) variable-coefficient ODEs \\begin{equation} \\dv[2]{y}{x} + a(x) \\dv{y}{x} + b(x) y = 0 \\end{equation}\nWe can no longer use any linearizion facilities we have developed before because matrix exponentiation (i.e. the eigenvalue trick) no longer work very well as squaring independent variable within the expression actually have consequences now.\nSolving ODEs via power series if \\(a_0(t), \u0026hellip;, a_{n}(t), f(t)\\) are all convergent power series on an interval centered at \\(t_0\\) then, solutions of \\(a_{n}(t)y^{(n)} + \u0026hellip; a_0(t)y = f(t)\\) is also a convergent power series on an interval at \\(t_{0}\\), provided that \\(a_{n}(t)\\) doesn\u0026rsquo;t go to \\(0\\) on that interval.\nwrite down solutions in terms of \\(y(t) = \\sum_{n=0}^{\\infty} c_{n}(t-t_0)^{n}\\) take enough derivatives of that expression \\(y(t)\\) above solve for \\(c_0\\), \\(c_1\\), etc. by using the fact that \\(c_{n} = \\frac{y^{(n)}(t_0)}{n!}\\) (i.e. plug in the given \\(y^{(n)}\\) from the IVP and solve for \\(c_{j}\\)) plug what you have in terms of derivatives as well as the initial coefficients, and relate to a general power series notice patterns Case Study Take \\(y\u0026rsquo; = 2y\\). Consider:\n\\begin{equation} y = \\sum_{n=0}^{\\infty} a_{n}x^{n} \\end{equation}\nWe hope that our solution function can be fit to this form.\nIf we differentiate:\n\\begin{equation} y\u0026rsquo; = \\sum_{n=0}^{\\infty} a_{n} n x^{n-1} \\end{equation}\nWe want to line up powers of \\(x\\), which makes life earlier. Because this is an infinite series, and at \\(n=0\\) the whole differentiated term looks like \\(0\\), we can actually just shift \\(n\\) one over and we\u0026rsquo;d be good.\n\\begin{equation} y\u0026rsquo; = \\sum_{n=0}^{\\infty} a_{n+1} (n+1) x^{n} \\end{equation}\nWe can now plug the whole thing into our original equation:\n\\begin{equation} \\sum_{n=0}^{\\infty} a_{n+1} (n+1) x^{n} = \\sum_{n=0}^{\\infty} 2a_{n}x^{n} \\end{equation}\nBecause these are two polynomials that equal, corresponding coefficients should match:\n\\begin{equation} a_{n+1}(n+1) = 2a_{n} \\end{equation}\nSo, we have:\n\\begin{equation} a_{n+1} = \\frac{2a_{n}}{n+1} \\end{equation}\nAt \\(y(0)=a_{0}\\), so we can start the recursion relationship at any initial condition we\u0026rsquo;d like.\nWe notice that the value:\n\\begin{equation} a_{n} = \\frac{2^{n}}{n!} a_{0} \\end{equation}\nsatisfies the system above. Which means we can write out the general answer as \\(a_0 \\sum_{i=0}^{\\infty} \\frac{2^{n}x^{n}}{n!}\\)\nCase Study 2 We have:\n\\begin{equation} y\u0026rsquo;\u0026rsquo; - 2xy\u0026rsquo; + 2\\lambda y = 0 \\end{equation}\nLet\u0026rsquo;s calculate our Taylor series:\n\\begin{equation} y = \\sum_{i=0}^{\\infty} a_{n} x^{n} \\end{equation}\n\\begin{equation} y\u0026rsquo; = \\sum_{i=0}^{\\infty} n a_{n}x^{n-1} \\end{equation}\n\\begin{equation} y\u0026rsquo;\u0026rsquo; = \\sum_{n=0}^{\\infty} n(n-1)a_{n}x^{n-2} \\end{equation}\nReindexing:\n\\begin{equation} y\u0026rsquo;\u0026rsquo; = \\sum_{n=0}^{\\infty} (n+1)(n+1) a_{n+2} x^{n} \\end{equation}\nBecause \\(2xy\u0026rsquo;\\) appears in the equation, we can actually write:\n\\begin{equation} -2xy\u0026rsquo; = -\\sum_{i=0}^{\\infty} 2n a_{n} x^{n} \\end{equation}\nand the final term:\n\\begin{equation} 2\\lambda = \\sum_{n=0}^{\\infty} a_{n} x^{n} \\end{equation}\nAdding the whole thing up, we obtain that:\n\\begin{equation} \\sum_{n=0}^{\\infty} \\qty[(n+2)(n+1) a_{n+2} - 2_{n}a_{n} + 2\\lambda a_{n}] x^{n} = 0 \\end{equation}\nFor each term, we get a recursion relationship in:\n\\begin{equation} a_{n+2} = \\frac{2(n-\\lambda)}{(n+2)(n+1)} a_{n} \\end{equation}\n","html":"\u003cp\u003eHow would we solve equations like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\ny\u0026rsquo;\u0026rsquo; - 2xy\u0026rsquo; + 2\\lambda y = 0 \\\\\ny\u0026rsquo;\u0026rsquo; - xy = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"taylor-series\"\u003eTaylor Series\u003c/h2\u003e\n\u003cp\u003eIts time to have a blast from the past! \u003ca href=\"#taylor-series\"\u003eTaylor Series\u003c/a\u003e time.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np_{n}(x) = \\sum_{i=0}^{n} \\frac{f^{(n)}(0) x^{n}}{n!}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#taylor-series\"\u003eTaylor\u0026rsquo;s Theorem with Remainder\u003c/a\u003e gives us that, at some \\(n\\), \\(|f(x) - p_{n}(x)|\\) is bounded.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|x(t+h) - (x(t) + h x\u0026rsquo;(t))| \\leq Ch\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTwo constraints:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eneed \\(f^{(n)}\\) to exist infinitely\u003c/li\u003e\n\u003cli\u003eand there\u0026rsquo;s a set of functions that are representable by Taylor Series (even if differentiable; such as \\(e^{-\\frac{1}{|x|}}\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"variable-coefficient-odes\"\u003evariable-coefficient ODEs\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\dv[2]{y}{x} + a(x) \\dv{y}{x} + b(x) y = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can no longer use any linearizion facilities we have developed before because \u003ca href=\"/posts/kbhmatrix_exponentiation/\"\u003ematrix exponentiation\u003c/a\u003e (i.e. the eigenvalue trick) no longer work very well as squaring independent variable within the expression actually have consequences now.\u003c/p\u003e\n\u003ch2 id=\"solving-odes-via-power-series--kbhpower-series-o-dot-md\"\u003eSolving ODEs via \u003ca href=\"/posts/kbhpower_series_o/\"\u003epower series\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eif \\(a_0(t), \u0026hellip;, a_{n}(t), f(t)\\) are all convergent \u003ca href=\"/posts/kbhpower_series_o/\"\u003epower series\u003c/a\u003e on an interval centered at \\(t_0\\) then, solutions of \\(a_{n}(t)y^{(n)} + \u0026hellip; a_0(t)y = f(t)\\) is also a convergent power series on an interval at \\(t_{0}\\), provided that \\(a_{n}(t)\\) doesn\u0026rsquo;t go to \\(0\\) on that interval.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ewrite down solutions in terms of \\(y(t) = \\sum_{n=0}^{\\infty} c_{n}(t-t_0)^{n}\\)\u003c/li\u003e\n\u003cli\u003etake enough derivatives of that expression \\(y(t)\\) above\u003c/li\u003e\n\u003cli\u003esolve for \\(c_0\\), \\(c_1\\), etc. by using the fact that \\(c_{n} = \\frac{y^{(n)}(t_0)}{n!}\\) (i.e. plug in the given \\(y^{(n)}\\) from the IVP and solve for \\(c_{j}\\))\u003c/li\u003e\n\u003cli\u003eplug what you have in terms of derivatives as well as the initial coefficients, and relate to a general power series\u003c/li\u003e\n\u003cli\u003enotice patterns\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"case-study\"\u003eCase Study\u003c/h3\u003e\n\u003cp\u003eTake \\(y\u0026rsquo; = 2y\\). Consider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = \\sum_{n=0}^{\\infty} a_{n}x^{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe hope that our solution function can be fit to this form.\u003c/p\u003e\n\u003cp\u003eIf we differentiate:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = \\sum_{n=0}^{\\infty} a_{n} n x^{n-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe want to line up powers of \\(x\\), which makes life earlier. Because this is an infinite series, and at \\(n=0\\) the whole differentiated term looks like \\(0\\), we can actually just shift \\(n\\) one over and we\u0026rsquo;d be good.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = \\sum_{n=0}^{\\infty} a_{n+1} (n+1) x^{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can now plug the whole thing into our original equation:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{n=0}^{\\infty} a_{n+1} (n+1) x^{n} = \\sum_{n=0}^{\\infty} 2a_{n}x^{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBecause these are two polynomials that equal, corresponding coefficients should match:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{n+1}(n+1) = 2a_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{n+1} = \\frac{2a_{n}}{n+1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAt \\(y(0)=a_{0}\\), so we can start the recursion relationship at any initial condition we\u0026rsquo;d like.\u003c/p\u003e\n\u003cp\u003eWe notice that the value:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{n} = \\frac{2^{n}}{n!} a_{0}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003esatisfies the system above. Which means we can write out the general answer as \\(a_0 \\sum_{i=0}^{\\infty} \\frac{2^{n}x^{n}}{n!}\\)\u003c/p\u003e\n\u003ch3 id=\"case-study-2\"\u003eCase Study 2\u003c/h3\u003e\n\u003cp\u003eWe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; - 2xy\u0026rsquo; + 2\\lambda y = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s calculate our Taylor series:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny = \\sum_{i=0}^{\\infty} a_{n} x^{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo; = \\sum_{i=0}^{\\infty} n a_{n}x^{n-1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; = \\sum_{n=0}^{\\infty} n(n-1)a_{n}x^{n-2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eReindexing:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ny\u0026rsquo;\u0026rsquo; = \\sum_{n=0}^{\\infty} (n+1)(n+1) a_{n+2} x^{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBecause \\(2xy\u0026rsquo;\\) appears in the equation, we can actually write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n-2xy\u0026rsquo; = -\\sum_{i=0}^{\\infty} 2n a_{n} x^{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand the final term:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n2\\lambda = \\sum_{n=0}^{\\infty} a_{n} x^{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAdding the whole thing up, we obtain that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{n=0}^{\\infty} \\qty[(n+2)(n+1) a_{n+2} - 2_{n}a_{n} + 2\\lambda a_{n}] x^{n} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor each term, we get a recursion relationship in:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{n+2} = \\frac{2(n-\\lambda)}{(n+2)(n+1)} a_{n}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb122024/","tags":null,"title":"SU-MATH53 FEB122024"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb142024/","tags":null,"title":"SU-MATH53 FEB142024"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb162024/","tags":null,"title":"SU-MATH53 FEB162024"},{"categories":null,"contents":"A Partial Differential Equation is a Differential Equation which has more than one independent variable: $u(x,y), u(t,x,y), \u0026hellip;$\nFor instance:\n\\begin{equation} \\pdv{U}{t} = \\alpha \\pdv[2]{U}{x} \\end{equation}\nKey Intuition PDEs may have no solutions (unlike Uniqueness and Existance for ODEs) yet, usually, there are too many solutions\u0026mdash;so\u0026hellip; how do you describe all solutions? usually, there are no explicit formulas Laplacian of \\(u(x,y)\\) Laplacian of \\(u(x,y)\\)\nExamples Heat Equation See Heat Equation\nWave Equation see Wave Equation\nTransport Equation \\begin{equation} \\pdv{u}{t} = \\pdv{u}{x} \\end{equation}\ngenerally any \\(u = w(x+t)\\) should solve this\nSchrodinger Equation We have some:\n\\begin{equation} u(x,t) \\end{equation}\nand its a complex-valued function:\n\\begin{equation} i \\pdv{u}{t} = \\pdv[2]{u}{x} \\end{equation}\nwhich results in a superposition in linear equations\nNonlinear Example \\begin{equation} \\pdv{u}{t} = \\pdv[2]{u}{x} + u(1-u) \\end{equation}\nthis is a PDE variant of the logistic equation: this is non-linear\nMonge-Ampere Equations \\begin{equation} u(x,y) \\end{equation}\nHessian \\begin{equation} Hess(u) = \\mqty(\\pdv[2]{u}{x} \u0026amp; \\frac{\\partial^{2} u}{\\partial x \\partial y} \\\\ \\frac{\\partial^{2} u}{\\partial x \\partial y} \u0026amp; \\pdv[2]{u}{y}) \\end{equation}\nIf we take its determinant, we obtain:\n\\begin{equation} \\pdv[2]{u}{x} \\pdv[2]{u}{y} - \\qty(\\frac{\\partial^{2} u}{\\partial x \\partial y})^{2} \\end{equation}\nTraveling Wave For two-variable PDEs, it is called a Traveling Wave if solutions to \\(u\\) takes on the form:\n\\begin{equation} u(t,x) = w(x-ct) \\end{equation}\nfor some constant \\(c\\), and where \\(w(x)\\) is a function which depends on only one of the two variables.\nBell Curves See also Bell Curves\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhpartial_differential_equations/\"\u003ePartial Differential Equation\u003c/a\u003e is a \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equation\u003c/a\u003e which has more than one \u003cstrong\u003eindependent variable\u003c/strong\u003e: $u(x,y), u(t,x,y), \u0026hellip;$\u003c/p\u003e\n\u003cp\u003eFor instance:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{U}{t} = \\alpha \\pdv[2]{U}{x}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"key-intuition\"\u003eKey Intuition\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpartial_differential_equations/\"\u003ePDE\u003c/a\u003es may have no solutions (unlike \u003ca href=\"/posts/kbhuniqueness_and_existance/\"\u003eUniqueness and Existance\u003c/a\u003e for \u003ca href=\"/posts/kbhordinary_differential_equations/\"\u003eODE\u003c/a\u003es)\u003c/li\u003e\n\u003cli\u003eyet, usually, there are too many solutions\u0026mdash;so\u0026hellip; how do you describe all solutions?\u003c/li\u003e\n\u003cli\u003eusually, there are no explicit formulas\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"laplacian-of-u--x-y----kbhlaplacian-of-u-x-y-dot-md\"\u003e\u003ca href=\"\"\u003eLaplacian of \\(u(x,y)\\)\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"\"\u003eLaplacian of \\(u(x,y)\\)\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"examples\"\u003eExamples\u003c/h2\u003e\n\u003ch3 id=\"heat-equation--kbhheat-equation-dot-md\"\u003e\u003ca href=\"/posts/kbhheat_equation/\"\u003eHeat Equation\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhheat_equation/\"\u003eHeat Equation\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"wave-equation--kbhwave-equation-dot-md\"\u003e\u003ca href=\"/posts/kbhwave_equation/\"\u003eWave Equation\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhwave_equation/\"\u003eWave Equation\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"transport-equation\"\u003eTransport Equation\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t} = \\pdv{u}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003egenerally any \\(u = w(x+t)\\) should solve this\u003c/p\u003e\n\u003ch3 id=\"schrodinger-equation\"\u003eSchrodinger Equation\u003c/h3\u003e\n\u003cp\u003eWe have some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(x,t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand its a complex-valued function:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ni \\pdv{u}{t} = \\pdv[2]{u}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich results in a superposition in linear equations\u003c/p\u003e\n\u003ch3 id=\"nonlinear-example\"\u003eNonlinear Example\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t} = \\pdv[2]{u}{x} + u(1-u)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is a \u003ca href=\"/posts/kbhpartial_differential_equations/\"\u003ePDE\u003c/a\u003e variant of the \u003ca href=\"/posts/kbhlogistic_equations/\"\u003elogistic equation\u003c/a\u003e: this is \u003cstrong\u003enon-linear\u003c/strong\u003e\u003c/p\u003e\n\u003ch3 id=\"monge-ampere-equations\"\u003eMonge-Ampere Equations\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nu(x,y)\n\\end{equation}\u003c/p\u003e\n\u003ch4 id=\"hessian\"\u003eHessian\u003c/h4\u003e\n\u003cp\u003e\\begin{equation}\nHess(u) = \\mqty(\\pdv[2]{u}{x} \u0026amp; \\frac{\\partial^{2} u}{\\partial x \\partial y} \\\\ \\frac{\\partial^{2} u}{\\partial x \\partial y} \u0026amp; \\pdv[2]{u}{y})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf we take its determinant, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2]{u}{x} \\pdv[2]{u}{y} - \\qty(\\frac{\\partial^{2} u}{\\partial x \\partial y})^{2}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"traveling-wave\"\u003eTraveling Wave\u003c/h2\u003e\n\u003cp\u003eFor two-variable \u003ca href=\"/posts/kbhpartial_differential_equations/\"\u003ePDE\u003c/a\u003es, it is called a \u003ca href=\"#traveling-wave\"\u003eTraveling Wave\u003c/a\u003e if solutions to \\(u\\) takes on the form:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(t,x) = w(x-ct)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some constant \\(c\\), and where \\(w(x)\\) is a function which depends on only one of the two variables.\u003c/p\u003e\n\u003ch2 id=\"bell-curves--kbhbell-curves-dot-md\"\u003e\u003ca href=\"\"\u003eBell Curves\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eSee also \u003ca href=\"\"\u003eBell Curves\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb212024/","tags":null,"title":"SU-MATH53 FEB212024"},{"categories":null,"contents":"Boundary Value Problem A BVP for an ODE is defined at two different points \\(x_0\\) and \\(x_1\\) at two different values of \\(l\\), whereby we are given:\n\\begin{equation} X_0 = a, X(L) = b \\end{equation}\nwhich we use to further specify a PDE. BVPs can either have no or lots of solutions.\nTo aid in the discovery of solutions, for:\n\\begin{equation} X\u0026rsquo;\u0026rsquo; = \\lambda X \\end{equation}\nwe have:\n\\begin{equation} X = \\begin{cases} c_1 e^{\\sqrt{\\lambda}x} + c_2 e^{-\\sqrt{\\lambda}x}, \\lambda \u0026gt; 0 \\\\ c_1 x + c_2, \\lambda =0 \\\\ c_1 \\cos \\qty(\\sqrt{|\\lambda|}x) +c_2 \\sin \\qty(\\sqrt{|\\lambda|}x), \\lambda \u0026lt; 0 \\end{cases} \\end{equation}\nWhich specific solution arises out of which initial condition you use.\nDirichlet Conditions Initial conditions:\n\\begin{equation} \\begin{cases} u(t,0) = 0 \\\\ u(t, l) = 0 \\end{cases} \\end{equation}\nThis tells us that we are holding the ends of the rod at a constant temperature.\nSolutions For:\n\\begin{equation} X\u0026rsquo;\u0026rsquo; = \\lambda X \\end{equation}\nin the vanishing Case (\\(X(0) = 0 = X(L)\\)):\n\\begin{equation} X = c \\sin \\qty( \\frac{k \\pi x}{L}) \\end{equation}\nwhere \\(c \\neq 0\\), and the solutions quantized \\(k = 1, 2, 3, \\ldots\\).\nwhich gives rise to:\n\\begin{equation} \\lambda = \\frac{-n^{2}\\pi^{2}}{L^{2}} \\end{equation}\nNeumann Conditions \\begin{equation} \\begin{cases} \\pdv{u}{x}(t,0) = 0 \\\\ \\pdv{u}{x}(t, l) = 0 \\end{cases} \\end{equation}\nthis tells us there is no heat flux across the boundary (i.e. heat doesn\u0026rsquo;t escape).\nSolutions For:\n\\begin{equation} X\u0026rsquo;\u0026rsquo; = \\lambda X \\end{equation}\nin the vanishing Case (\\(X\u0026rsquo;(0) = 0 = X\u0026rsquo;(L)\\)):\n\\begin{equation} X = c \\cos \\qty( \\frac{k \\pi x}{L}) \\end{equation}\nwhere \\(c \\neq 0\\), and the solutions quantized \\(k = 1, 2, 3, \\ldots\\).\nwhich gives rise to:\n\\begin{equation} \\lambda = \\frac{-n^{2}\\pi^{2}}{L^{2}} \\end{equation}\nExamples See Heat Equation, and its worked solution.\n","html":"\u003ch2 id=\"boundary-value-problem\"\u003eBoundary Value Problem\u003c/h2\u003e\n\u003cp\u003eA \u003ca href=\"#boundary-value-problem\"\u003eBVP\u003c/a\u003e for an \u003ca href=\"/posts/kbhordinary_differential_equations/\"\u003eODE\u003c/a\u003e is defined at two different points \\(x_0\\) and \\(x_1\\) at two different values of \\(l\\), whereby we are given:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX_0 = a, X(L) = b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich we use to further specify a \u003ca href=\"/posts/kbhpartial_differential_equations/\"\u003ePDE\u003c/a\u003e. \u003ca href=\"#boundary-value-problem\"\u003eBVP\u003c/a\u003es can either have \u003cstrong\u003eno\u003c/strong\u003e or \u003cstrong\u003elots\u003c/strong\u003e of solutions.\u003c/p\u003e\n\u003cp\u003eTo aid in the discovery of solutions, for:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX\u0026rsquo;\u0026rsquo; = \\lambda X\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX = \\begin{cases}\nc_1 e^{\\sqrt{\\lambda}x} + c_2 e^{-\\sqrt{\\lambda}x}, \\lambda \u0026gt; 0 \\\\\nc_1 x + c_2, \\lambda =0 \\\\\nc_1 \\cos \\qty(\\sqrt{|\\lambda|}x) +c_2 \\sin \\qty(\\sqrt{|\\lambda|}x), \\lambda \u0026lt; 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich specific solution arises out of which initial condition you use.\u003c/p\u003e\n\u003ch3 id=\"dirichlet-conditions\"\u003eDirichlet Conditions\u003c/h3\u003e\n\u003cp\u003eInitial conditions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nu(t,0) = 0 \\\\\nu(t, l) = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis tells us that we are holding the ends of the rod at a constant temperature.\u003c/p\u003e\n\u003ch4 id=\"solutions\"\u003eSolutions\u003c/h4\u003e\n\u003cp\u003eFor:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX\u0026rsquo;\u0026rsquo; = \\lambda X\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ein the vanishing Case (\\(X(0) = 0 = X(L)\\)):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX = c \\sin \\qty( \\frac{k \\pi x}{L})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(c \\neq 0\\), and the solutions quantized \\(k = 1, 2, 3, \\ldots\\).\u003c/p\u003e\n\u003cp\u003ewhich gives rise to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda = \\frac{-n^{2}\\pi^{2}}{L^{2}}\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"neumann-conditions\"\u003eNeumann Conditions\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\n\\pdv{u}{x}(t,0) = 0 \\\\\n\\pdv{u}{x}(t, l) = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis tells us there is no heat \u003ca href=\"/posts/kbhflux/\"\u003eflux\u003c/a\u003e across the boundary (i.e. heat doesn\u0026rsquo;t escape).\u003c/p\u003e\n\u003ch4 id=\"solutions\"\u003eSolutions\u003c/h4\u003e\n\u003cp\u003eFor:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX\u0026rsquo;\u0026rsquo; = \\lambda X\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ein the vanishing Case (\\(X\u0026rsquo;(0) = 0 = X\u0026rsquo;(L)\\)):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX = c \\cos \\qty( \\frac{k \\pi x}{L})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(c \\neq 0\\), and the solutions quantized \\(k = 1, 2, 3, \\ldots\\).\u003c/p\u003e\n\u003cp\u003ewhich gives rise to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda = \\frac{-n^{2}\\pi^{2}}{L^{2}}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"examples\"\u003eExamples\u003c/h2\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhheat_equation/\"\u003eHeat Equation\u003c/a\u003e, and its \u003ca href=\"/posts/kbhheat_equation/#solution-in-full\"\u003eworked solution\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb232024/","tags":null,"title":"SU-MATH53 FEB232024"},{"categories":null,"contents":"Fourier Decomposition Main idea, any induction \\(f(x)\\) on an interval \\([0, L]\\) can be written as a sum:\n\\begin{equation} f(x) = a_0 + \\sum_{k=1}^{\\infty} a_{k} \\cos \\qty( \\frac{2\\pi k}{L} x) + \\sum_{k=1}^{\\infty} b_{k} \\sin \\qty( \\frac{2\\pi k}{L} x) \\end{equation}\nL-periodicity A function is $L$-periodic if \\(f(x+L) = f(x)\\). In that case, it has period \\(L\\).\n$L$-periodicity is preserved across\u0026hellip;\ntranslation we are just moving it to the right/left\ndilation Suppose \\(f(x)\\) is \\(L\\) periodic and let \\(g(x) = f(kx)\\), then, \\(g\\) is also \\(L\\) periodic.\nProof:\n\\(g(x+L) = f(k(x+L)) = f(kx + kL) = f(kx) = g(x)\\). So \\(g\\) would also be \\(L\\) periodic. However, importantly, \\(g\\) would also be \\(\\frac{L}{k}\\) periodic (verified by using the same sketch as before)\nlinear combinations Suppose \\(f,g\\) are \\(L\\) periodic and \\(h(x) = af(x) + bg(x)\\), then \\(h\\) is also \\(L\\) periodic.\nProof:\n\\begin{equation} h(x+L) = af(x+L) + bg(x+L) = af(x) + bg(x) = h(x) \\end{equation}\nFourier Series see Fourier Series\n","html":"\u003ch2 id=\"fourier-decomposition\"\u003eFourier Decomposition\u003c/h2\u003e\n\u003cp\u003eMain idea, any induction \\(f(x)\\) on an interval \\([0, L]\\) can be written as a sum:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = a_0 + \\sum_{k=1}^{\\infty} a_{k} \\cos \\qty( \\frac{2\\pi k}{L} x) + \\sum_{k=1}^{\\infty} b_{k} \\sin \\qty( \\frac{2\\pi k}{L} x)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"l-periodicity\"\u003eL-periodicity\u003c/h2\u003e\n\u003cp\u003eA function is $L$-periodic if \\(f(x+L) = f(x)\\). In that case, it has period \\(L\\).\u003c/p\u003e\n\u003cp\u003e$L$-periodicity is preserved across\u0026hellip;\u003c/p\u003e\n\u003ch3 id=\"translation\"\u003etranslation\u003c/h3\u003e\n\u003cp\u003ewe are just moving it to the right/left\u003c/p\u003e\n\u003ch3 id=\"dilation\"\u003edilation\u003c/h3\u003e\n\u003cp\u003eSuppose \\(f(x)\\) is \\(L\\) periodic and let \\(g(x) = f(kx)\\), then, \\(g\\) is also \\(L\\) periodic.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003e\\(g(x+L) = f(k(x+L)) = f(kx + kL) = f(kx) = g(x)\\). So \\(g\\) would also be \\(L\\) periodic. However, importantly, \\(g\\) would also be \\(\\frac{L}{k}\\) periodic (verified by using the same sketch as before)\u003c/p\u003e\n\u003ch3 id=\"linear-combinations\"\u003elinear combinations\u003c/h3\u003e\n\u003cp\u003eSuppose \\(f,g\\) are \\(L\\) periodic and \\(h(x) = af(x) + bg(x)\\), then \\(h\\) is also \\(L\\) periodic.\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nh(x+L) = af(x+L) + bg(x+L) = af(x) + bg(x) = h(x)\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"fourier-series--kbhfourier-series-dot-md\"\u003e\u003ca href=\"/posts/kbhfourier_series/#fourier-series\"\u003eFourier Series\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhfourier_series/#fourier-series\"\u003eFourier Series\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb252024/","tags":null,"title":"SU-MATH53 FEB252024"},{"categories":null,"contents":"more on Fourier Series.\ndecomposition of functions to even and odd Suppose we have any function with period \\(L\\) over \\([-\\frac{L}{2}, \\frac{L}{2}]\\), we can write this as a sum of even and odd functions:\n\\begin{equation} f(x) = \\frac{1}{2} (f(x) - f(-x)) + \\frac{1}{2} (f(x) + f(-x)) \\end{equation}\nAnd because of this fact, we can actually take each part and break it down individually as a Fourier Series because sin and cos are even and odd parts.\nSo we can take the first part, which is odd, and break it down using \\(a_{n} \\sin (k\\omega x)\\).\nWe can take the second part, which is odd, and break it down using \\(b_{n} \\cos (k\\omega x)\\).\nIf you then assume periodicity over the interval you care about \\(L\\), suddenly you can decompose it to a Fourier Series.\n","html":"\u003cp\u003emore on \u003ca href=\"/posts/kbhfourier_series/\"\u003eFourier Series\u003c/a\u003e.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"decomposition-of-functions-to-even-and-odd\"\u003edecomposition of functions to even and odd\u003c/h2\u003e\n\u003cp\u003eSuppose we have any function with period \\(L\\) over \\([-\\frac{L}{2}, \\frac{L}{2}]\\), we can write this as a sum of even and odd functions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\frac{1}{2} (f(x) - f(-x)) + \\frac{1}{2} (f(x) + f(-x))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd because of this fact, we can actually take each part and break it down individually as a \u003ca href=\"/posts/kbhfourier_series/\"\u003eFourier Series\u003c/a\u003e because \u003ca href=\"/posts/kbhfourier_series/#sin-and-cos-are-even-and-odd-parts\"\u003esin and cos are even and odd parts\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSo we can take the first part, which is odd, and break it down using \\(a_{n} \\sin (k\\omega x)\\).\u003c/p\u003e\n\u003cp\u003eWe can take the second part, which is odd, and break it down using \\(b_{n} \\cos (k\\omega x)\\).\u003c/p\u003e\n\u003cp\u003eIf you then assume periodicity over the interval you care about \\(L\\), suddenly you can decompose it to a \u003ca href=\"/posts/kbhfourier_series/\"\u003eFourier Series\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_feb282024/","tags":null,"title":"SU-MATH53 FEB282024"},{"categories":null,"contents":" Week Link \u0026lt;2024-01-16 Tue\u0026gt; PSet 1 \u0026lt;2024-01-18 Thu\u0026gt; PSet 2 \u0026lt;2024-01-25 Thu\u0026gt; PSet 3 \u0026lt;2024-02-02 Fri\u0026gt; PSet 4 \u0026lt;2024-02-07 Wed\u0026gt; PSet 5 \u0026lt;2024-02-14 Wed\u0026gt; PSet 6 \u0026lt;2024-02-23 Fri\u0026gt; PSet 7 \u0026lt;2024-03-02 Sat\u0026gt; PSet 8 \u0026lt;2024-03-09 Sat\u0026gt; PSet 9 ","html":"\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eWeek\u003c/th\u003e\n\u003cth\u003eLink\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-01-16 Tue\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhsu_math53_pset_1/\"\u003ePSet 1\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-01-18 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpset_2/\"\u003ePSet 2\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-01-25 Thu\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpset_3/\"\u003ePSet 3\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-02-02 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpset_4/\"\u003ePSet 4\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-02-07 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpset_5/\"\u003ePSet 5\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-02-14 Wed\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpset_6/\"\u003ePSet 6\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-02-23 Fri\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpset_7/\"\u003ePSet 7\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-03-02 Sat\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpset_8/\"\u003ePSet 8\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd\u003e\u003cspan class=\"timestamp-wrapper\"\u003e\u003cspan class=\"timestamp\"\u003e\u0026lt;2024-03-09 Sat\u0026gt;\u003c/span\u003e\u003c/span\u003e\u003c/td\u003e\n\u003ctd\u003e\u003ca href=\"/posts/kbhpset_9/\"\u003ePSet 9\u003c/a\u003e\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_homework_index/","tags":null,"title":"SU-MATH53 Homework Index"},{"categories":null,"contents":"Key Sequence Notation New Concepts First Order ODEs order of equations linear vs. non-linear equations homogeneous vs. inhomogeneous equations linear systems Newton\u0026rsquo;s Law of Cooling Important Results / Claims superposition principle Fundamental Theorem of Calculus Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfirst_order_odes/\"\u003eFirst Order ODEs\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhordinary_differential_equations/#order-of-equations\"\u003eorder of equations\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhordinary_differential_equations/#linear-vs-dot-non-linear-equations\"\u003elinear vs. non-linear equations\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhordinary_differential_equations/#homogeneous-vs-dot-inhomogeneous-equations\"\u003ehomogeneous vs. inhomogeneous equations\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhordinary_differential_equations/#linear-systems\"\u003elinear systems\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnewton_s_law_of_cooling/\"\u003eNewton\u0026rsquo;s Law of Cooling\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhordinary_differential_equations/#superposition-principle\"\u003esuperposition principle\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfundamental_theorem_of_calculus/\"\u003eFundamental Theorem of Calculus\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_jan082023/","tags":null,"title":"SU-MATH53 JAN082024"},{"categories":null,"contents":"Key Sequence Notation New Concepts First Order ODEs autonomous ODEs seperable ODEs initial value problems interval principle Important Results / Claims division method general solution to y\u0026rsquo;(t) = ry(t) IMPORTANT: one and exactly one solution exist for every point of an IVP Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhfirst_order_odes/\"\u003eFirst Order ODEs\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhautonomous_odes/\"\u003eautonomous ODEs\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhseperable_diffequ/\"\u003eseperable ODEs\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinitial_value_problems/\"\u003einitial value problems\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinitial_value_problems/#one-and-exactly-one-solution-exist-for-every-point-of-an-ivp\"\u003einterval principle\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhautonomous_odes/#division-method\"\u003edivision method\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhautonomous_odes/#general-solution-to-y--t--ry--t\"\u003egeneral solution to y\u0026rsquo;(t) = ry(t)\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eIMPORTANT: \u003ca href=\"/posts/kbhinitial_value_problems/#one-and-exactly-one-solution-exist-for-every-point-of-an-ivp\"\u003eone and exactly one solution exist for every point of an IVP\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_jan102023/","tags":null,"title":"SU-MATH53 JAN102024"},{"categories":null,"contents":"Key Sequence Notation New Concepts level set Newton\u0026rsquo;s Law of Cooling logistic equations Important Results / Claims case study: pietri dish Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlevel_set/\"\u003elevel set\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnewton_s_law_of_cooling/\"\u003eNewton\u0026rsquo;s Law of Cooling\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlogistic_equations/\"\u003elogistic equations\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhpetri_dish/\"\u003ecase study: pietri dish\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_jan122023/","tags":null,"title":"SU-MATH53 JAN122024"},{"categories":null,"contents":"Key Sequence Notation New Concepts tying into Separated Equations: \\(y\u0026rsquo; = f(t,y)\\) which are the most nicest. Recall that there was two special cases: seperable and autonomous ODEs. if we can write in terms of elementary function, good times if we can\u0026rsquo;t do it in terms of elementary functions, we can use qualitative analysis t(slope field, etc.) recall again Newton\u0026rsquo;s Law of Cooling phase line and stability (ODEs) Important Results / Claims autonomous First Order ODEs\u0026rsquo; solutions do not cross; as in, if there are two solutinos \\(y_1\\) and \\(y_2\\), their curves never intersect. one and exactly one solution exist for every point of an IVP autonomous ODEs level off at stationary curves Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003etying into \u003ca href=\"/posts/kbhfirst_order_odes/#separated-equations\"\u003eSeparated Equations\u003c/a\u003e: \\(y\u0026rsquo; = f(t,y)\\) which are the most nicest. Recall that there was two special cases: \u003ca href=\"/posts/kbhseperable_diffequ/\"\u003eseperable\u003c/a\u003e and \u003ca href=\"/posts/kbhautonomous_odes/\"\u003eautonomous ODEs\u003c/a\u003e.\n\u003cul\u003e\n\u003cli\u003eif we can write in terms of elementary function, good times\u003c/li\u003e\n\u003cli\u003eif we can\u0026rsquo;t do it in terms of elementary functions, we can use qualitative analysis t(slope field, etc.)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003erecall again \u003ca href=\"/posts/kbhnewton_s_law_of_cooling/\"\u003eNewton\u0026rsquo;s Law of Cooling\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhphase_line/\"\u003ephase line\u003c/a\u003e and \u003ca href=\"/posts/kbhstability/\"\u003estability (ODEs)\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhautonomous_odes/\"\u003eautonomous\u003c/a\u003e \u003ca href=\"/posts/kbhfirst_order_odes/\"\u003eFirst Order ODEs\u003c/a\u003e\u0026rsquo; solutions do not cross; as in, if there are two solutinos \\(y_1\\) and \\(y_2\\), their curves never intersect.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinitial_value_problems/#one-and-exactly-one-solution-exist-for-every-point-of-an-ivp\"\u003eone and exactly one solution exist for every point of an IVP\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhautonomous_odes/#autonomous-odes-level-off-at-stationary-curves\"\u003eautonomous ODEs level off at stationary curves\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_jan172024/","tags":null,"title":"SU-MATH53 JAN172024"},{"categories":null,"contents":"Key Sequence Notation New Concepts complex number Recall also \\(|z| = \\sqrt{\\bar{z}z}\\)\nEuler\u0026rsquo;s Equation Important Results / Claims complex numbers, fundamentally, are a way of *multiplying in \\(\\mathbb{R}^{2}\\) scaling by reals will result in scaling up, and multiplying by complex will result in rotation. Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eRecall also \\(|z| = \\sqrt{\\bar{z}z}\\)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheuler_s_equation/\"\u003eEuler\u0026rsquo;s Equation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecomplex numbers, fundamentally, are a way of *multiplying in \\(\\mathbb{R}^{2}\\)\u003c/li\u003e\n\u003cli\u003escaling by reals will result in \u003cstrong\u003escaling up\u003c/strong\u003e, and multiplying by complex will result in \u003cstrong\u003erotation\u003c/strong\u003e.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_jan192023/","tags":null,"title":"SU-MATH53 JAN192024"},{"categories":null,"contents":"Key Sequence Notation New Concepts Second-Order Linear Differential Equations superposition principle and functional independence Newton\u0026rsquo;s First Law of Motion Important Results / Claims finding independent solutions of second-order constant-coefficient linear ODEs homogeneous constant-coefficient second order linear ODE Uniqueness and Existance of second order superposition principle Questions Interesting Factoids ","html":"\u003ch2 id=\"key-sequence\"\u003eKey Sequence\u003c/h2\u003e\n\u003ch2 id=\"notation\"\u003eNotation\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/\"\u003eSecond-Order Linear Differential Equations\u003c/a\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhordinary_differential_equations/#superposition-principle\"\u003esuperposition principle\u003c/a\u003e and functional \u003ca href=\"/posts/kbhprobability/#independence\"\u003eindependence\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhnewton_s_first_law_of_motion/\"\u003eNewton\u0026rsquo;s First Law of Motion\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"important-results-claims\"\u003eImportant Results / Claims\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/#finding-independent--kbhprobability-dot-md--solutions-of-second-order-constant-coefficient-linear-odes\"\u003efinding independent solutions of second-order constant-coefficient linear ODEs\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/#homogeneous-constant-coefficient-second-order-linear-ode\"\u003ehomogeneous constant-coefficient second order linear ODE\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhsecond_order_linear_differential_equation/#id-24ec5541-9b12-4521-b698-014711a2c762-uniqueness-and-existance-of-second-order\"\u003eUniqueness and Existance of second order\u003c/a\u003e\u003c/strong\u003e\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhordinary_differential_equations/#superposition-principle\"\u003esuperposition principle\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003eQuestions\u003c/h2\u003e\n\u003ch2 id=\"interesting-factoids\"\u003eInteresting Factoids\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_jan202024/","tags":null,"title":"SU-MATH53 JAN222024"},{"categories":null,"contents":"Underdetermined ODEs Complex ODE System Matrix Exponentiation Finding eigenvectors \\(A = n \\times n\\) matrix, the task of finding eigenvalues and eigenvectors is a linear algebra problem:\n\\begin{equation} A v = \\lambda v \\end{equation}\nFinding specific solutions to IVPs with special substitution For some:\n\\begin{equation} \\begin{cases} x\u0026rsquo; = Ax \\\\ x(t_0) = x_0 \\end{cases} \\end{equation}\nwe can leverage the first task:\nfind \\(v\\), \\(\\lambda\\) for \\(A\\) guess \\(x = u(t)v\\), this is \u0026ldquo;magical substitution\u0026rdquo; and now, we can see that \\(x\u0026rsquo; = u\u0026rsquo;v = A(uv) = \\lambda u v\\) meaning \\(u\u0026rsquo; = \\lambda u\\) finaly, \\(u(t) = ce^{\\lambda} t\\) Eigenbasis case Suppose \\(A\\) has a basis of eigenvectors, and real eigenvalues. We can write its entire solution set in terms of these basis eigenvectors:\n\\begin{equation} x(t) = u_1(t) v_1 + \\dots + u_{n}(t) v_{n} \\end{equation}\nthis means:\n\\begin{equation} x\u0026rsquo;(t) = Ax = u_1\u0026rsquo; v_1 + \\dots +u_{n} \u0026rsquo; v_{n} = \\lambda_{1} u_{1} v_1 + \\dots + \\lambda_{n} u_{n} v_{n} \\end{equation}\nBecause \\(v\\) forms a basis, each \\(u_j\u0026rsquo; = \\lambda_{j} u_{j}\\).\nWe thereby decomposed our entangled expression seperably by changing into eigenbasis.\nAfter solving each \\(u\\), we obtain:\n\\begin{equation} x(t) = c_1 e^{\\lambda_{1}} v_1 + \\dots + c_{n} e^{\\lambda_{n}} v_{n} \\end{equation}\nWe can identify \\(c_{j}\\) by noting, that \\(x(0)\\) resolves to:\n\\begin{equation} x(0) = c_1v_1 + \\dots + c_{n}v_{n} \\end{equation}\nFinally, we can write this as:\n\\begin{equation} x(0) = x_0 = \\mqty[v_1 \u0026amp; \\dots \u0026amp; v_{n}] c \\end{equation}\nMeaning, we can solve for initial conditions as:\n\\begin{equation} \\mqty[v_1 \u0026amp; \\dots \u0026amp; v_{n}]^{-1} x_0 = c \\end{equation}\nPractice Solving Let:\n\\begin{equation} A = \\mqty(0 \u0026amp; 1 \u0026amp; 1 \\\\ 1 \u0026amp; 0 \u0026amp; 1 \\\\ 1 \u0026amp; 1 \u0026amp; 0) \\end{equation}\nWe have two eigenspaces:\n\\begin{equation} \\lambda = -1, v = \\left\\{\\mqty(-1 \\\\ 1 \\\\ 0), \\mqty(0 \\\\ 1 \\\\ -1)\\right\\} \\end{equation}\nand\n\\begin{equation} \\lambda = 2, v = \\left\\{\\mqty(1 \\\\ 1 \\\\ 1)\\right\\} \\end{equation}\nThis gives rise to a basis of eigenvectors with all three vectors. We obtain:\n\\begin{equation} x(t) = c_1 e^{-t} \\mqty(-1 \\\\ 1\\\\0) + c_2 \\mqty(0 \\\\ 1 \\\\ -1) e^{-t} + c_3 \\mqty(1 \\\\ 1 \\\\ 1) e^{2t} \\end{equation}\n","html":"\u003ch2 id=\"underdetermined-odes\"\u003eUnderdetermined ODEs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhunderdetermined_ode_system/\"\u003eComplex ODE System\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmatrix_exponentiation/\"\u003eMatrix Exponentiation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"finding-eigenvector--kbheigenvalue-dot-md--s\"\u003eFinding \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es\u003c/h3\u003e\n\u003cp\u003e\\(A = n \\times n\\) matrix, the task of finding eigenvalues and eigenvectors is a linear algebra problem:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA v = \\lambda v\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"finding-specific-solutions-to-ivp--kbhinitial-value-problems-dot-md--s-with-special-substitution\"\u003eFinding specific solutions to \u003ca href=\"/posts/kbhinitial_value_problems/\"\u003eIVP\u003c/a\u003es with special substitution\u003c/h3\u003e\n\u003cp\u003eFor some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nx\u0026rsquo; = Ax \\\\\nx(t_0) = x_0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can leverage the first task:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003efind \\(v\\), \\(\\lambda\\) for \\(A\\)\u003c/li\u003e\n\u003cli\u003eguess \\(x = u(t)v\\), this is \u0026ldquo;magical substitution\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eand now, we can see that \\(x\u0026rsquo; = u\u0026rsquo;v = A(uv) = \\lambda u v\\)\u003c/li\u003e\n\u003cli\u003emeaning \\(u\u0026rsquo; = \\lambda u\\)\u003c/li\u003e\n\u003cli\u003efinaly, \\(u(t) = ce^{\\lambda} t\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"eigenbasis-case\"\u003eEigenbasis case\u003c/h3\u003e\n\u003cp\u003eSuppose \\(A\\) has a basis of \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es, and \u003cstrong\u003ereal\u003c/strong\u003e eigenvalues. We can write its entire solution set in terms of these basis eigenvectors:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = u_1(t) v_1 + \\dots + u_{n}(t) v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis means:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo;(t) = Ax = u_1\u0026rsquo; v_1 + \\dots +u_{n} \u0026rsquo; v_{n} = \\lambda_{1} u_{1} v_1 + \\dots + \\lambda_{n} u_{n} v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBecause \\(v\\) forms a basis, each \\(u_j\u0026rsquo; = \\lambda_{j} u_{j}\\).\u003c/p\u003e\n\u003cp\u003eWe thereby decomposed our entangled expression seperably by changing into eigenbasis.\u003c/p\u003e\n\u003cp\u003eAfter solving each \\(u\\), we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = c_1 e^{\\lambda_{1}} v_1 + \\dots + c_{n} e^{\\lambda_{n}} v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can identify \\(c_{j}\\) by noting, that \\(x(0)\\) resolves to:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(0) = c_1v_1 + \\dots + c_{n}v_{n}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFinally, we can write this as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(0) = x_0 = \\mqty[v_1 \u0026amp; \\dots \u0026amp; v_{n}] c\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning, we can solve for initial conditions as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty[v_1 \u0026amp; \\dots \u0026amp; v_{n}]^{-1} x_0 = c\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"practice-solving\"\u003ePractice Solving\u003c/h2\u003e\n\u003cp\u003eLet:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA = \\mqty(0 \u0026amp; 1 \u0026amp; 1 \\\\ 1 \u0026amp; 0 \u0026amp; 1 \\\\ 1 \u0026amp; 1 \u0026amp; 0)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have two eigenspaces:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda = -1, v = \\left\\{\\mqty(-1 \\\\ 1 \\\\ 0), \\mqty(0 \\\\ 1 \\\\ -1)\\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda = 2, v = \\left\\{\\mqty(1 \\\\ 1 \\\\ 1)\\right\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis gives rise to a basis of eigenvectors with all three vectors. We obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t) = c_1 e^{-t} \\mqty(-1 \\\\ 1\\\\0) + c_2 \\mqty(0 \\\\ 1 \\\\ -1) e^{-t} + c_3 \\mqty(1 \\\\ 1 \\\\ 1) e^{2t}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_jan262023/","tags":null,"title":"SU-MATh53 JAN262023"},{"categories":null,"contents":"Review For Linear Constant-Coefficient Equation that are homogeneous, we can solve it generally in terms of some matrix \\(A\\) as:\n\\begin{equation} x\u0026rsquo; = Ax \\end{equation}\nif \\(A\\) has enough eigenvectors, we can just write out \\(y(t) = c_1 e^{\\lambda_{1}t} v_1 + \u0026hellip; + c_{n}e^{\\lambda_{n}t} v_{2}\\)\nBut, if we don\u0026rsquo;t, we can use matrix exponentiation\nContent eigensolutions ","html":"\u003ch2 id=\"review\"\u003eReview\u003c/h2\u003e\n\u003cp\u003eFor \u003ca href=\"/posts/kbhlinear_constant_coefficient_equation/\"\u003eLinear Constant-Coefficient Equation\u003c/a\u003e that are \u003ca href=\"/posts/kbhhomogeneity/\"\u003ehomogeneous\u003c/a\u003e, we can solve it generally in terms of some matrix \\(A\\) as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx\u0026rsquo; = Ax\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif \\(A\\) has enough \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003es, we can just write out \\(y(t) = c_1 e^{\\lambda_{1}t} v_1 + \u0026hellip; + c_{n}e^{\\lambda_{n}t} v_{2}\\)\u003c/p\u003e\n\u003cp\u003eBut, if we don\u0026rsquo;t, we can use \u003ca href=\"/posts/kbhmatrix_exponentiation/\"\u003ematrix exponentiation\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"content\"\u003eContent\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbheigensolutions/\"\u003eeigensolutions\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_jan292024/","tags":null,"title":"SU-MATH53 JAN292024"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsu_math53_jan312024/","tags":null,"title":"SU-MATH53 JAN312024"},{"categories":null,"contents":"We\u0026rsquo;ve gone over Heat Equation, Wave Equation, and let\u0026rsquo;s talk about some more stuff.\ndamped heat equation damped wave equation two-dimensional heat equation ","html":"\u003cp\u003eWe\u0026rsquo;ve gone over \u003ca href=\"/posts/kbhheat_equation/\"\u003eHeat Equation\u003c/a\u003e, \u003ca href=\"/posts/kbhwave_equation/\"\u003eWave Equation\u003c/a\u003e, and let\u0026rsquo;s talk about some more stuff.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdamped_heat_equation/#damped-heat-equation\"\u003edamped heat equation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"\"\u003edamped wave equation\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtwo_dimensional_heat_equation/\"\u003etwo-dimensional heat equation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_mar012024/","tags":null,"title":"SU-MATH53 MAR012024"},{"categories":null,"contents":"What if, Fourier Series, but exponential?\nThis also motivates Discrete Fourier Transform.\nAlso Complex Exponential.\nReview Recall again that if we have a periodic function, we\u0026rsquo;ve got:\n\\begin{equation} f(x) = \\sum_{k=0}^{\\infty} a_{k} \\sin \\qty( \\frac{2\\pi k}{l} x) + b_{n} \\cos \\qty( \\frac{2\\pi k x}{L}) \\end{equation}\nWe note that this breaks individually into the sign and cosine series depending of the function\u0026rsquo;s oddness.\nComplex Fourier Series This will begin by feeling like a notation rewrite:\n\\begin{equation} f(x) = \\sum_{-\\infty}^{\\infty} c_{n} e^{n \\omega x i} \\end{equation}\nwhere \\(\\omega = \\frac{2\\pi}{L}\\).\nWhy is this summing from negative to positive?\nConsider:\n\\begin{equation} \\cos \\qty(nx) = \\frac{e^{inx}+e^{-inx}}{2} \\end{equation}\nYou will note that summing \\(n \\in 0 \u0026hellip; \\infty\\), plugging it into above, will result in summing from both \\(n \\in -\\infty \u0026hellip; \\infty\\).\nFinding \\(c_{n}\\) Recall that complex exponentials are orthonormal + inner product over complex-valued functions\nBecause most cancels except one thing, we get:\n\\begin{equation} \\langle f, e^{i\\omega n x} \\rangle = c_{n} L \\end{equation}\nmeaning:\n\\begin{equation} c_{n} = \\frac{1}{L} \\int_{0}^{L} f(x) e^{-i\\omega n x} \\dd{x} = \\frac{1}{L} \\int_{\\frac{-L}{2}}^{\\frac{L}{2}} f(x) e^{-i\\omega n x} \\dd{x} \\end{equation}\nif our function is \\(L\\) periodic.\nNOTE: this integral has a NEGATIVE power vs the series has a POSITIVE power!!\nComplex Exponentials with Sawtooth Consider:\n\\begin{equation} f(x) = x-n \\end{equation}\nwhere this function is periodic over \\(n \\leq x \\leq n+1\\), so\u0026mdash;\n\\begin{equation} c_{n} = \\int_{0}^{1} x e^{-2\\pi i n x} \\dd{x} = -\\frac{1}{2\\pi i n} e^{-2 \\pi i n} \\end{equation}\n","html":"\u003cp\u003eWhat if, \u003ca href=\"/posts/kbhfourier_series/\"\u003eFourier Series\u003c/a\u003e, but exponential?\u003c/p\u003e\n\u003cp\u003eThis also motivates Discrete Fourier Transform.\u003c/p\u003e\n\u003cp\u003eAlso \u003ca href=\"/posts/kbhcomplex_exponential/\"\u003eComplex Exponential\u003c/a\u003e.\u003c/p\u003e\n\u003chr\u003e\n\u003ch2 id=\"review\"\u003eReview\u003c/h2\u003e\n\u003cp\u003eRecall again that if we have a periodic function, we\u0026rsquo;ve got:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\sum_{k=0}^{\\infty} a_{k} \\sin \\qty( \\frac{2\\pi k}{l} x) + b_{n} \\cos \\qty( \\frac{2\\pi k x}{L})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe note that this breaks individually into the sign and cosine series depending of the function\u0026rsquo;s oddness.\u003c/p\u003e\n\u003ch2 id=\"complex-fourier-series\"\u003eComplex Fourier Series\u003c/h2\u003e\n\u003cp\u003eThis will begin by feeling like a notation rewrite:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\sum_{-\\infty}^{\\infty} c_{n} e^{n \\omega x i}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\omega = \\frac{2\\pi}{L}\\).\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eWhy is this summing from negative to positive?\u003c/p\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\cos \\qty(nx) = \\frac{e^{inx}+e^{-inx}}{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eYou will note that summing \\(n \\in 0 \u0026hellip; \\infty\\), plugging it into above, will result in summing from both \\(n \\in -\\infty \u0026hellip; \\infty\\).\u003c/p\u003e\n\u003chr\u003e\n\u003ch3 id=\"finding-c-n\"\u003eFinding \\(c_{n}\\)\u003c/h3\u003e\n\u003cp\u003eRecall that \u003ca href=\"/posts/kbhcomplex_exponential/#complex-exponentials-are-orthonormal\"\u003ecomplex exponentials are orthonormal\u003c/a\u003e + \u003ca href=\"/posts/kbhcomplex_exponential/#over-complex-valued-functions\"\u003einner product over complex-valued functions\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003eBecause most cancels except one thing, we get:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\langle f, e^{i\\omega n x} \\rangle = c_{n} L\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_{n} = \\frac{1}{L} \\int_{0}^{L} f(x) e^{-i\\omega n x} \\dd{x} = \\frac{1}{L} \\int_{\\frac{-L}{2}}^{\\frac{L}{2}} f(x) e^{-i\\omega n x} \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif our function is \\(L\\) periodic.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eNOTE: this integral has a NEGATIVE power vs the series has a POSITIVE power\u003c/strong\u003e\u003c/strong\u003e!!\u003c/p\u003e\n\u003ch3 id=\"complex-exponentials-with-sawtooth\"\u003eComplex Exponentials with Sawtooth\u003c/h3\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = x-n\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere this function is periodic over \\(n \\leq x \\leq n+1\\), so\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc_{n} = \\int_{0}^{1} x e^{-2\\pi i n x} \\dd{x} = -\\frac{1}{2\\pi i n} e^{-2 \\pi i n}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_mar042024/","tags":null,"title":"SU-MATH53 MAR042024"},{"categories":null,"contents":"Fourier Transform\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhfourier_transform/#fourier-transform\"\u003eFourier Transform\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_mar062024/","tags":null,"title":"SU-MATH53 MAR062024"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsu_math53_mar082024/","tags":null,"title":"SU-MATH53 MAR082024"},{"categories":null,"contents":"heat equation on the entire line \\begin{equation} \\pdv{u}{t} = \\frac{1}{2} \\pdv[2]{u}{x} \\end{equation}\nWe can try to find a:\n\\begin{equation} U(0,x) = f(x) \\end{equation}\nif we write:\n\\begin{equation} \\hat{U}(t,\\lambda) = \\int e^{-i x \\lambda} U(t,x) \\dd{x} \\end{equation}\nwhich means we can write, with initial condtions:\n\\begin{equation} \\hat{U} (t, \\lambda) = \\hat{f}(\\lambda) e^{- t \\frac{\\lambda^{2}}{2}} \\end{equation}\nWe want to reach a close form:\n\\begin{equation} U (t, x) = \\frac{1}{\\sqrt{2\\pi} t} \\int_{-\\infty}^{\\infty} f(y) e^{-\\frac{(x-y)^{2}}{2t}} \\dd{y} \\end{equation}\nSteps: recall we ended up at\n\\begin{equation} \\hat{U} (t, \\lambda) = \\hat{f}(\\lambda) e^{- t \\frac{\\lambda^{2}}{2}} \\end{equation}\nLet\u0026rsquo;s call:\n\\begin{equation} \\hat{g}(\\lambda) = e^{- t \\frac{\\lambda^{2}}{2}} \\end{equation}\nso we have:\n\\begin{equation} \\hat{U} (t, \\lambda) = \\hat{f}(\\lambda) \\hat{g}(\\lambda) \\end{equation}\nwe can use convolution to figure \\(U(t,x)\\).\nRecall that the Fourier transform of a Gaussian:\n\\begin{equation} \\mathcal{F}\\qty(e^{-\\frac{ax^{2}}{2}}) = \\sqrt{\\frac{2\\pi}{a}}e^{-\\frac{\\lambda^{2}}{2a}} \\end{equation}\nLet\u0026rsquo;s first set:\n\\begin{equation} a = \\frac{1}{t} \\end{equation}\nWhich will give us that:\n\\begin{equation} g(x) = \\frac{1}{\\sqrt{2\\pi t} } e^{-\\frac{x^{2}}{2t}} \\end{equation}\nMeaning, with convolution:\n\\begin{equation} \\mathcal{F}^{-1}(\\hat{f} \\hat{g}) = f * g \\end{equation}\nwhy does this make sense We are convolving a Gaussian against \\(f(x)\\). Meaning, at very small \\(t\\) , we are taking a very small window of size \\(1\\) against.\nHeavyside function \\begin{equation} f(x) = \\begin{cases} 1, x\\geq 0 \\\\ 0, x\u0026lt;0 \\end{cases} \\end{equation}\nThis gives: if we split the room by \\(x\\). Recall:\n\\begin{equation} U (t, x) = \\frac{1}{\\sqrt{2\\pi} t} \\int_{-\\infty}^{\\infty} f(y) e^{-\\frac{(x-y)^{2}}{2t}} \\dd{y} \\end{equation}\nGiven our \\(f\\), this becomes:\n\\begin{equation} U (t, x) = \\frac{1}{\\sqrt{2\\pi} t} \\int_{0}^{\\infty} e^{-\\frac{(x-y)^{2}}{2t}} \\dd{y} \\end{equation}\nIf we change variables:\n\\begin{align} \\frac{(x-y)^{2}}{2t} - \\qty( \\frac{x}{\\sqrt{2t}} - \\frac{y}{\\sqrt{2t}})^{2} \\end{align}\nwhich means:\n\\begin{equation} z = \\frac{y}{2\\sqrt{t}} \\end{equation}\n\\begin{equation} \\frac{1}{\\sqrt{\\pi}} \\int_{0}^{\\infty} e^{^{-\\qty(\\frac{x}{\\sqrt{2t}} - z)^{2}}} \\dd{z} \\end{equation}\nand we will also apply:\n\\begin{equation} w = z - \\frac{x}{\\sqrt{2t}} \\end{equation}\nwhich will give:\n\\begin{equation} \\frac{1}{\\sqrt{\\pi}} \\int_{-\\frac{x}{\\sqrt{2t}}}^{\\infty} e^{-w^{2}} \\dd{w} \\end{equation}\nnotice, as \\(x\\) increases, we are integrating more of a Gaussian, which will be exceedingly close to \\(1\\); as \\(x\\) decreases, we\u0026rsquo;ll get closer to \\(0\\). And also, \\(t\\) smoothed \\(x\\) out, which means as \\(t\\) increases the interface between \\(0\\) and \\(1\\) becomes smoother.\nerf erf\nconvolution see convolution\n","html":"\u003ch2 id=\"heat-equation-on-the-entire-line\"\u003eheat equation on the entire line\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t} = \\frac{1}{2} \\pdv[2]{u}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can try to find a:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(0,x) = f(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eif we write:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U}(t,\\lambda) = \\int e^{-i x \\lambda} U(t,x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich means we can write, with initial condtions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U} (t, \\lambda) = \\hat{f}(\\lambda) e^{- t \\frac{\\lambda^{2}}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe want to reach a close form:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU (t, x) = \\frac{1}{\\sqrt{2\\pi} t} \\int_{-\\infty}^{\\infty} f(y) e^{-\\frac{(x-y)^{2}}{2t}} \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eSteps: recall we ended up at\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U} (t, \\lambda) = \\hat{f}(\\lambda) e^{- t \\frac{\\lambda^{2}}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s call:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{g}(\\lambda) = e^{- t \\frac{\\lambda^{2}}{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\hat{U} (t, \\lambda) = \\hat{f}(\\lambda) \\hat{g}(\\lambda)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can use \u003ca href=\"/posts/kbhconvolution/#convolution\"\u003econvolution\u003c/a\u003e to figure \\(U(t,x)\\).\u003c/p\u003e\n\u003cp\u003eRecall that the Fourier transform of a Gaussian:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}\\qty(e^{-\\frac{ax^{2}}{2}}) = \\sqrt{\\frac{2\\pi}{a}}e^{-\\frac{\\lambda^{2}}{2a}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s first set:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na = \\frac{1}{t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich will give us that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\ng(x) = \\frac{1}{\\sqrt{2\\pi t} } e^{-\\frac{x^{2}}{2t}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning, with convolution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mathcal{F}^{-1}(\\hat{f} \\hat{g}) = f * g\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"why-does-this-make-sense\"\u003ewhy does this make sense\u003c/h3\u003e\n\u003cp\u003eWe are convolving a Gaussian against \\(f(x)\\). Meaning, at very small \\(t\\) , we are taking a very small window of size \\(1\\) against.\u003c/p\u003e\n\u003ch3 id=\"heavyside-function\"\u003eHeavyside function\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nf(x) = \\begin{cases}\n1, x\\geq 0 \\\\\n0, x\u0026lt;0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis gives: if we split the room by \\(x\\). Recall:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU (t, x) = \\frac{1}{\\sqrt{2\\pi} t} \\int_{-\\infty}^{\\infty} f(y) e^{-\\frac{(x-y)^{2}}{2t}} \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven our \\(f\\), this becomes:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU (t, x) = \\frac{1}{\\sqrt{2\\pi} t} \\int_{0}^{\\infty} e^{-\\frac{(x-y)^{2}}{2t}} \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIf we change variables:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\frac{(x-y)^{2}}{2t} - \\qty( \\frac{x}{\\sqrt{2t}} - \\frac{y}{\\sqrt{2t}})^{2}\n\\end{align}\u003c/p\u003e\n\u003cp\u003ewhich means:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nz = \\frac{y}{2\\sqrt{t}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{1}{\\sqrt{\\pi}} \\int_{0}^{\\infty} e^{^{-\\qty(\\frac{x}{\\sqrt{2t}} - z)^{2}}} \\dd{z}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand we will also apply:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nw = z - \\frac{x}{\\sqrt{2t}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich will give:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{1}{\\sqrt{\\pi}} \\int_{-\\frac{x}{\\sqrt{2t}}}^{\\infty} e^{-w^{2}} \\dd{w}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enotice, as \\(x\\) increases, we are integrating more of a Gaussian, which will be exceedingly close to \\(1\\); as \\(x\\) decreases, we\u0026rsquo;ll get closer to \\(0\\). And also, \\(t\\) smoothed \\(x\\) out, which means as \\(t\\) increases the interface between \\(0\\) and \\(1\\) becomes smoother.\u003c/p\u003e\n\u003ch2 id=\"erf\"\u003eerf\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#erf\"\u003eerf\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"convolution--kbhconvolution-dot-md\"\u003e\u003ca href=\"/posts/kbhconvolution/#convolution\"\u003econvolution\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhconvolution/#convolution\"\u003econvolution\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_mar112024/","tags":null,"title":"SU-MATH53 MAR112024"},{"categories":null,"contents":"This is the staging file for the midterm sheet, which I don\u0026rsquo;t usually publicise.\n","html":"\u003cp\u003eThis is the staging file for the midterm sheet, which I don\u0026rsquo;t usually publicise.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_midterm_sheet/","tags":null,"title":"SU-MATH53 Midterm Sheet"},{"categories":null,"contents":"We have:\n\\begin{equation} \\pdv[2]{u}{x} + \\pdv[2]{u}{y} = 0 \\end{equation}\nIgnoring the boundary conditions when \\(u(0,y)\\), we know that we have Dirichlet boundaries in \\(y\\). This gives:\n\\begin{equation} u(x,0) = u(x,\\pi) = 0 \\end{equation}\nAssuming our solution takes on the shape of \\(u=X(x)Y(y)\\), we obtain:\n\\begin{equation} X\u0026rsquo;\u0026rsquo;(x)Y(y) + Y\u0026rsquo;\u0026rsquo;(y)X(x) = 0 \\end{equation}\nby plugging in derivatives of that assumption; meaning:\n\\begin{equation} X\u0026rsquo;\u0026rsquo;(x)Y(y) = -Y\u0026rsquo;\u0026rsquo;(y)X(x) \\end{equation}\nThis gives rise to:\n\\begin{align} \\frac{X\u0026rsquo;\u0026rsquo;(x)}{X(x)} = -\\frac{Y\u0026rsquo;\u0026rsquo;(y)}{Y(y)} = c \\end{align}\n[you know why \\(c\u0026gt;0\\), so let\u0026rsquo;s skip to]\nWe have \\(c\u0026gt;0\\), meaning:\n\\begin{equation} X\u0026rsquo;\u0026rsquo;(x) = cX(x) \\end{equation}\nfor some positive \\(c\\); this will result in a linear combination of exponentials:\n\\begin{equation} X(x) = a_{1} e^{\\sqrt{c}x} + a_2 e^{-\\sqrt{c}x} \\end{equation}\nthis is because\u0026hellip; try it! try solving \\(X\u0026rsquo;\u0026rsquo;(x) = cX(x)\\).\nNow, importantly, let\u0026rsquo;s declare:\n\\begin{equation} \\lambda = \\sqrt{c} \\end{equation}\nThis gives:\n\\begin{equation} c = \\lambda^{2} \\end{equation}\nMeaning, we have:\n\\begin{equation} \\frac{Y\u0026rsquo;\u0026rsquo;(y)}{Y(y)} = -\\lambda^{2} \\end{equation}\nmeaning:\n\\begin{equation} Y\u0026rsquo;\u0026rsquo;(y) = -\\lambda^{2} Y(y) \\end{equation}\nNow, given we now have a negative sign in front of our second order ODE, we can see that this falls into the sinusoid case, whereby:\n\\begin{equation} Y = a_3 \\cos \\qty(\\lambda x) + a_4 \\sin \\qty(\\lambda x) \\end{equation}\nOur boundary condition gives:\n\\begin{equation} Y_0 = Y_{\\pi} = 0 = a_3 = 0 \\end{equation}\nmeaning\n\\begin{equation} Y = a_4 \\sin \\qty(\\lambda x) \\end{equation}\nand so on. You multiply them together and all\u0026rsquo;s well that ends well.\n","html":"\u003cp\u003eWe have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2]{u}{x} + \\pdv[2]{u}{y} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIgnoring the boundary conditions when \\(u(0,y)\\), we know that we have Dirichlet boundaries in \\(y\\). This gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(x,0) = u(x,\\pi) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAssuming our solution takes on the shape of \\(u=X(x)Y(y)\\), we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX\u0026rsquo;\u0026rsquo;(x)Y(y) + Y\u0026rsquo;\u0026rsquo;(y)X(x) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eby plugging in derivatives of that assumption; meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX\u0026rsquo;\u0026rsquo;(x)Y(y) = -Y\u0026rsquo;\u0026rsquo;(y)X(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis gives rise to:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\frac{X\u0026rsquo;\u0026rsquo;(x)}{X(x)} = -\\frac{Y\u0026rsquo;\u0026rsquo;(y)}{Y(y)} = c\n\\end{align}\u003c/p\u003e\n\u003cp\u003e[you know why \\(c\u0026gt;0\\), so let\u0026rsquo;s skip to]\u003c/p\u003e\n\u003cp\u003eWe have \\(c\u0026gt;0\\), meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX\u0026rsquo;\u0026rsquo;(x) = cX(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor some positive \\(c\\); this will result in a linear combination of exponentials:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nX(x) = a_{1} e^{\\sqrt{c}x} + a_2 e^{-\\sqrt{c}x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis is because\u0026hellip; try it! try solving \\(X\u0026rsquo;\u0026rsquo;(x) = cX(x)\\).\u003c/p\u003e\n\u003cp\u003eNow, \u003cstrong\u003eimportantly\u003c/strong\u003e, let\u0026rsquo;s declare:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda = \\sqrt{c}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nc = \\lambda^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{Y\u0026rsquo;\u0026rsquo;(y)}{Y(y)} = -\\lambda^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nY\u0026rsquo;\u0026rsquo;(y) = -\\lambda^{2} Y(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, given we now have a negative sign in front of our second order ODE, we can see that this falls into the sinusoid case, whereby:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nY = a_3 \\cos \\qty(\\lambda x) + a_4 \\sin \\qty(\\lambda x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eOur boundary condition gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nY_0 = Y_{\\pi} = 0 = a_3 = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nY = a_4 \\sin \\qty(\\lambda x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand so on. You multiply them together and all\u0026rsquo;s well that ends well.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_practice_1_problem_4/","tags":null,"title":"SU-MATH53 Practice 1 Problem 4"},{"categories":null,"contents":"L-Periodic Functions So, we have:\n\\begin{equation} f(x+L) = f(x) \\end{equation}\nThe integral is equivalent for any:\n\\begin{equation} \\int_{a}^{a+L} f(x) \\end{equation}\nfor any \\(a\\).\nHeat Equation Recipe are we on a finite interval? then, decompose into product-type solution \\(A(t)B(x)\\) and solve. are we not? Fourier transform on the space variable and solve. What if \\(\\lambda \\in \\mathbb{C} \\backslash \\mathbb{R}\\) Shush.\nWhy can we guess \\(A(t)B(x)\\) Because we were able to find solutions. Believe that the solution set spans.\nFourier Transform on Three-Variable Expressions We have better Fourier transforms on n-space rather than on a line. Use those.\nExistence and Uniqueness + Superposition ","html":"\u003ch2 id=\"l-periodic-functions\"\u003eL-Periodic Functions\u003c/h2\u003e\n\u003cp\u003eSo, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x+L) = f(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe integral is equivalent for any:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\int_{a}^{a+L} f(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor any \\(a\\).\u003c/p\u003e\n\u003ch2 id=\"heat-equation-recipe\"\u003eHeat Equation Recipe\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eare we on a finite interval? then, decompose into product-type solution \\(A(t)B(x)\\) and solve.\u003c/li\u003e\n\u003cli\u003eare we not? Fourier transform on the space variable and solve.\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"what-if-lambda-in-mathbb-c-backslash-mathbb-r\"\u003eWhat if \\(\\lambda \\in \\mathbb{C} \\backslash \\mathbb{R}\\)\u003c/h3\u003e\n\u003cp\u003eShush.\u003c/p\u003e\n\u003ch3 id=\"why-can-we-guess-a--t--b--x\"\u003eWhy can we guess \\(A(t)B(x)\\)\u003c/h3\u003e\n\u003cp\u003eBecause we were able to find solutions. Believe that the solution set spans.\u003c/p\u003e\n\u003ch2 id=\"fourier-transform-on-three-variable-expressions\"\u003eFourier Transform on Three-Variable Expressions\u003c/h2\u003e\n\u003cp\u003eWe have better Fourier transforms on n-space rather than on a line. Use those.\u003c/p\u003e\n\u003ch2 id=\"existence-and-uniqueness-plus-superposition\"\u003eExistence and Uniqueness + Superposition\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsu_math53_problem_session/","tags":null,"title":"SU-MATH53 Problem Session"},{"categories":null,"contents":"a\n","html":"\u003cp\u003ea\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsubgroup/","tags":null,"title":"subgroup"},{"categories":null,"contents":"A subspace is a vector space which is a subset of a vector space, using the same addition and scalar multiplication operations. Intuitively, a subspace of \\(\\mathbb{R}^{2}\\) are all the lines through the origin as well as \\(\\{0\\}\\); a subspace of \\(\\mathbb{R}^{3}\\) are all the planes through the origin as well as \\(\\{0\\}\\), etc. etc.\nconstituents vector space \\(V\\) A subset \\(U \\subset V\\) which is itself a vector space requirements You check if \\(U\\) is a subspace of \\(V\\) by checking IFF the following three conditions:\nadditive identity: \\(0 \\in U\\) closed under the same addition as in \\(V\\): \\(u,w \\in U: u+w \\in U\\) closed under scalar multiplication as in \\(V\\): \\(a \\in \\mathbb{F}\\) and \\(u \\in U\\) means \\(au \\in U\\) Yes, by only checking three you can prove everything else.\nadditional information simplified check for subspace commutativity, associativity, distributivity These properties are inherited from \\(V\\) as they hold for every element in \\(V\\) so they will hold for \\(U \\subset V\\).\nadditive inverse Because scalar multiplication is defined, and we proved in Axler 1.B that \\(-1v=-v\\) (proof: \\(v+(-1)v = (1+(-1))v = 0v = 0\\)).\nmultiplicative identity Its still \\(1\\).\n\\(\\blacksquare\\)\nfinite-dimensional subspaces Every subspace of a finite-dimensional vector space is a finite-dimensional vector space.\nWe prove this result again via induction.\nbase case If \\(U=\\{0\\}\\), we know \\(U\\) is finite-dimensional and are done. If not, take some \\(v_1 \\in U\\) and create a list with only \\(v_1\\) thus far; the invariant here is that the list is linearly independent as we see that a list containing this one element as indeed linearly independent.\ncase \\(j\\) If the linearly independent list we created \\(v_1, \\dots v_{j-1}\\) spans \\(U\\), we are done. We have created a finite list which spans \\(U\\), making \\(U\\) finite-dimensional.\nIf not, that means that we can pick some \\(u \\in U\\) that cannot be written as a linear combination of the invariantly linearly independent vectors \\(v_1, \\dots v_{j-1}\\). We append \\(u\\) to the list, naming it \\(v_{j}\\). As \\(v_{j}\\) cannot be written as a linear combination of the original list, appending it to the list doesn\u0026rsquo;t make the list dependent. This means that the list is still linearly independent.\ninduction Therefore, we have constructed a list of increasing length that is linearly independent. By the fact that length of linearly-independent list \\(\\leq\\) length of spanning list, and the fact that the spanning list of \\(V\\) has finite length (it is given that \\(V\\) is a finite-dimensional vector space), the increasingly longer linearly independent list\u0026mdash;building upwards to eventually span \\(U\\) in finite length.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e is a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e which is a subset of a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e, using the same \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e and \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e operations. Intuitively, a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of \\(\\mathbb{R}^{2}\\) are all the lines through the origin as well as \\(\\{0\\}\\); a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of \\(\\mathbb{R}^{3}\\) are all the planes through the origin as well as \\(\\{0\\}\\), etc. etc.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e \\(V\\)\u003c/li\u003e\n\u003cli\u003eA subset \\(U \\subset V\\) which is itself a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eYou check if \\(U\\) is a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of \\(V\\) by checking \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e the following three conditions:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e: \\(0 \\in U\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e under the same \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e as in \\(V\\): \\(u,w \\in U: u+w \\in U\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e under \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e as in \\(V\\): \\(a \\in \\mathbb{F}\\) and \\(u \\in U\\) means \\(au \\in U\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eYes, by only checking three you can prove everything else.\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"simplified-check-for-subspace\"\u003esimplified check for subspace\u003c/h3\u003e\n\u003ch4 id=\"commutativity--kbhcommutivity-dot-md--associativity--kbhassociative-dot-md--distributivity--kbhdistributivity-dot-md\"\u003e\u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e, \u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e, \u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eThese properties are inherited from \\(V\\) as they hold for every element in \\(V\\) so they will hold for \\(U \\subset V\\).\u003c/p\u003e\n\u003ch4 id=\"additive-inverse--kbhinverses-dot-md\"\u003e\u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eBecause \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e is defined, and we proved in \u003ca href=\"/posts/kbhaxler_1_b/\"\u003eAxler 1.B\u003c/a\u003e that \\(-1v=-v\\) (proof: \\(v+(-1)v = (1+(-1))v = 0v = 0\\)).\u003c/p\u003e\n\u003ch4 id=\"multiplicative-identity--kbhmultiplicative-identity-dot-md\"\u003e\u003ca href=\"/posts/kbhmultiplicative_identity/\"\u003emultiplicative identity\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003eIts still \\(1\\).\u003c/p\u003e\n\u003cp\u003e\\(\\blacksquare\\)\u003c/p\u003e\n\u003ch3 id=\"finite-dimensional-subspaces\"\u003efinite-dimensional subspaces\u003c/h3\u003e\n\u003cp\u003eEvery \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e of a \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e is a \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eWe prove this result again via induction.\u003c/p\u003e\n\u003ch4 id=\"base-case\"\u003ebase case\u003c/h4\u003e\n\u003cp\u003eIf \\(U=\\{0\\}\\), we know \\(U\\) is \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e and are done. If not, take some \\(v_1 \\in U\\) and create a list with only \\(v_1\\) thus far; the invariant here is that the list is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e as we see that a list containing this one element as indeed \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e.\u003c/p\u003e\n\u003ch4 id=\"case-j\"\u003ecase \\(j\\)\u003c/h4\u003e\n\u003cp\u003eIf the \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list we created \\(v_1, \\dots v_{j-1}\\) \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(U\\), we are done. We have created a finite list which \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(U\\), making \\(U\\) \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eIf not, that means that we can pick some \\(u \\in U\\) that cannot be written as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of the invariantly \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e vectors \\(v_1, \\dots v_{j-1}\\). We append \\(u\\) to the list, naming it \\(v_{j}\\). As \\(v_{j}\\) cannot be written as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of the original list, appending it to the list doesn\u0026rsquo;t make the list dependent. This means that the list is still \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e.\u003c/p\u003e\n\u003ch4 id=\"induction\"\u003einduction\u003c/h4\u003e\n\u003cp\u003eTherefore, we have constructed a list of increasing length that is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e. By the fact that \u003ca href=\"/posts/kbhlinear_independence/#length-of-linearly-independent-list-leq-length-of-spanning-list\"\u003elength of linearly-independent list \\(\\leq\\) length of spanning list\u003c/a\u003e, and the fact that the \u003ca href=\"/posts/kbhspan/#spans\"\u003espanning\u003c/a\u003e list of \\(V\\) has finite length (it is given that \\(V\\) is a \u003ca href=\"/posts/kbhfinite_dimensional_vector_space/\"\u003efinite-dimensional vector space\u003c/a\u003e), the increasingly longer \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list\u0026mdash;building upwards to eventually \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e \\(U\\) in finite length.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsubspace/","tags":null,"title":"subspace"},{"categories":null,"contents":"the pocket at which the ligand binds to the enzyme\n","html":"\u003cp\u003ethe pocket at which the ligand binds to the enzyme\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsubtrait_envelope/","tags":null,"title":"substrate envelope"},{"categories":null,"contents":"The sum of subsets is the definition of addition upon two subsets.\nApparently, the unions of subsets are almost never subspaces (they don\u0026rsquo;t produce linearity?) Therefore, we like to work with sum of subsets more.\nRemember this has arbitrarily many things!! as a part of the content. When defining, remember to open that possibility.\nconstituents Sub-sets of \\(V\\) named \\(U_1, U_2, \\dots, U_{m}\\)\nrequirements The sum of subsets \\(U_1, \\dots, U_{m}\\) is defined as:\n\\begin{equation} U_1, \\dots, U_{m} = \\{u_1+\\dots+u_{m}: u_1\\in U_1, \\dots, u_{m} \\in U_{m}\\} \\end{equation}\n\u0026ldquo;all elements formed by taking one element from each and add it.\u0026rdquo;\nadditional information sum of subspaces is the smallest subspace with both subspaces Suppose \\(U_1, \\dots U_{m}\\) are subspaces of \\(V\\), then \\(U_1+\\dots +U_{m}\\) is the smallest subspace of \\(V\\) containing \\(U_1, \\dots, U_{m}\\).\nProof:\nIs a subspace\u0026mdash;\nclearly \\(0\\) is in the sum. (taking \\(0\\) from each subspace and adding) addition and scalar multiplication inherits (closed in each subspace, then, reapplying definition of sum of subsets) Smallest containing subspace\u0026mdash;\nBecause a subspace is closed under addition, if a subspace contains \\(U_{1}, \\dots, U_{m}\\) you can always add each of the constituent elements manually to form every \\(U_1+\\dots+U_{m}\\).\nConversely, the subspace \\(U_1+\\dots +U_{m}\\) should contain \\(U_1, \\dots, U_{m}\\) by simply setting the coefficients except for the one you are interested in to \\(0\\).\nTherefore, as both subsets contain each other; they are equivalent.\ndimension of sums Let there be two finite-dimensional subspaces: \\(U_1\\) and \\(U_2\\). Then:\n\\begin{equation} \\dim(U_1+U_2)=\\dim U_1+\\dim U_{2} - \\dim(U_1 \\cap U_2) \\end{equation}\nProof:\nlet us form an basis of \\(U_1 \\cap U_{2}\\): \\(u_1, \\dots u_{m}\\); this indicates to us that \\(\\dim(U_1 \\cap U_{2}) = m\\). Being a basis of \\(U_1 \\cap U_{2}\\), it is linearly independent in \\(U_1\\) (which forms a part of the intersection.\nAs any linearly independent list (in this case, in \\(U_1\\)) can be expanded into a basis of \\(U_1\\). Let\u0026rsquo;s say by some vectors \\(v_1 \\dots v_{j}\\). Therefore, we have that:\nThe new basis is \\(u_1, \\dots u_{m}, v_1, \\dots v_{m}\\), and so:\n\\begin{equation} \\dim U_1 = m+j \\end{equation}\nBy the same token, let\u0026rsquo;s just say some \\(w_1, \\dots w_{k}\\) can be used to extend \\(u_1, \\dots u_{m}\\) into a basis of \\(U_2\\) (as \\(u_1, \\dots u_{m}\\) is also an linearly independent list in \\(U_2\\)). So:\n\\begin{equation} \\dim U_{2} = m+k \\end{equation}\nWe desire that \\(\\dim(U_1+U_2)=\\dim U_1+\\dim U_{2} - \\dim(U_1 \\cap U_2)\\). Having constructed all three of the elements, we desire to find a list that is length \\((m+j)+(m+k)-m = m+j+k\\) that forms a basis of \\(U_1+U_2\\), which will complete the proof.\nConveniently, \\(u_1, \\dots u_{m}, v_1, \\dots v_{j}, w_1, \\dots w_{k}\\) nicely is list of length \\(m+j+k\\). Therefore, we desire that that list forms a basis of \\(U_1+U_{2}\\).\nAs pairwise in this list are the basis of \\(U_1\\) and \\(U_2\\), this list can span both \\(U_1\\) and \\(U_2\\) (just zero out the \u0026ldquo;other\u0026rdquo; sublist\u0026mdash;zero \\(w\\) if desiring a basis of \\(U_1\\), \\(v\\) if \\(U_2\\) \u0026mdash;and you have a basis of each space. As \\(U_1+U_2\\) requires plucking a member from each and adding, as this list spans \\(U_1\\) and \\(U_2\\) separately (again, it forms the basis of the each space), we can just use this list to construct individually each component of \\(U_1+U_2\\) then adding it together. Hence, that long combo list spans \\(U_1+U_2\\).\nThe only thing left is to show that the giant list there is linearly independent. Let\u0026rsquo;s construct:\n\\begin{equation} a_1u_1+ \\dots + a_{m}u_{m} + b_1v_1 + \\dots + b_{j}v_{j} + c_1w_1 + \\dots + c_{k}w_{k} = 0 \\end{equation}\nto demonstrate linearly independence,\nMoving the \\(w\\) to the right, we have that:\n\\begin{equation} a_1u_1+ \\dots + a_{m}u_{m} + b_1v_1 + \\dots + b_{j}v_{j} =-(c_1w_1 + \\dots + c_{k}w_{k}) \\end{equation}\nRecall that \\(u_1 \\dots v_{j}\\) are all vectors in \\(U_1\\). Having written \\(-(c_1w_1 + \\dots + c_{k}w_{k})\\) as a linear combination thereof, we say that \\(-(c_1w_1 + \\dots + c_{k}w_{k}) \\in U_1\\) due to closure. But also, \\(w_1 \\dots w_{k} \\in U_2\\) as they form a basis of \\(U_2\\). Hence, \\(-(c_1w_1 + \\dots + c_{k}w_{k}) \\in U_2\\). So, \\(-(c_1w_1 + \\dots + c_{k}w_{k}) \\in U_1 \\cap U_2\\).\nAnd we said that \\(u_1, \\dots u_{m}\\) are a basis for \\(U_1 \\cap U_{2}\\). Therefore, we can write the \\(c_{i}\\) sums as a linear combination of $u$s:\n\\begin{equation} d_1u_1 \\dots + \\dots + d_{m}u_{m} = (c_1w_1 + \\dots + c_{k}w_{k}) \\end{equation}\nNow, moving the right to the left again:\n\\begin{equation} d_1u_1 \\dots + \\dots + d_{m}u_{m} - (c_1w_1 + \\dots + c_{k}w_{k}) = 0 \\end{equation}\nWe have established before that \\(u_1 \\dots w_{k}\\) is a linearly independent list (it is the basis of \\(U_2\\).) So, to write \\(0\\), \\(d_1 = \\dots = c_{k} = 0\\).\nSubstituting back to the original:\n\\begin{align} a_1u_1+ \\dots + a_{m}u_{m} + b_1v_1 + \\dots + b_{j}v_{j} \u0026amp;=-(c_1w_1 + \\dots + c_{k}w_{k}) \\\\ \u0026amp;= 0 \\end{align}\nrecall \\(u_1 \\dots v_{j}\\) is the basis of \\(U_1\\), meaning they are linearly independent. The above expression makes \\(a_1 = \\dots b_{j} = 0\\). Having shown that, to write \\(0\\) via \\(u, v, \\dots w\\) requires all scalars \\(a,b,c=0\\), the list is linearly independent.\nHaving shown that the list of \\(u_1, \\dots v_1, \\dots w_1 \\dots w_{k}\\) spans \\(U_1+U_2\\) and is linearly independent within it, it is a basis.\nIt does indeed have length \\(m+j+k\\), completing the proof. \\(\\blacksquare\\)\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e is the definition of \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e upon two subsets.\u003c/p\u003e\n\u003cp\u003eApparently, the unions of subsets are almost never \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es (they don\u0026rsquo;t produce linearity?) Therefore, we like to work with \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e more.\u003c/p\u003e\n\u003cp\u003eRemember this has \u003cstrong\u003e\u003cstrong\u003earbitrarily many things!!\u003c/strong\u003e\u003c/strong\u003e as a part of the content. When defining, remember to open that possibility.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cp\u003eSub-\u003cstrong\u003e\u003cstrong\u003esets\u003c/strong\u003e\u003c/strong\u003e of \\(V\\) named \\(U_1, U_2, \\dots, U_{m}\\)\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e \\(U_1, \\dots, U_{m}\\) is defined as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1, \\dots, U_{m} = \\{u_1+\\dots+u_{m}: u_1\\in U_1, \\dots, u_{m} \\in U_{m}\\}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;all elements formed by taking one element from each and add it.\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"sum-of-subspaces-is-the-smallest-subspace-with-both-subspaces\"\u003esum of subspaces is the smallest subspace with both subspaces\u003c/h3\u003e\n\u003cp\u003eSuppose \\(U_1, \\dots U_{m}\\) are \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003es of \\(V\\), then \\(U_1+\\dots +U_{m}\\) is the smallest subspace of \\(V\\) containing \\(U_1, \\dots, U_{m}\\).\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eIs a subspace\u0026mdash;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eclearly \\(0\\) is in the sum. (taking \\(0\\) from each subspace and adding)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e and \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e inherits (closed in each subspace, then, reapplying definition of \u003ca href=\"/posts/kbhsum_of_subsets/\"\u003esum of subsets\u003c/a\u003e)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eSmallest containing subspace\u0026mdash;\u003c/p\u003e\n\u003cp\u003eBecause a subspace is \u003ca href=\"/posts/kbhclosed/\"\u003eclosed\u003c/a\u003e under \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e, if a \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e contains \\(U_{1}, \\dots, U_{m}\\) you can always add each of the constituent elements manually to form every \\(U_1+\\dots+U_{m}\\).\u003c/p\u003e\n\u003cp\u003eConversely, the \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e \\(U_1+\\dots +U_{m}\\) should contain \\(U_1, \\dots, U_{m}\\) by simply setting the coefficients except for the one you are interested in to \\(0\\).\u003c/p\u003e\n\u003cp\u003eTherefore, as both subsets contain each other; they are equivalent.\u003c/p\u003e\n\u003ch3 id=\"dimension--kbhdimension-dot-md--of-sums\"\u003e\u003ca href=\"/posts/kbhdimension/\"\u003edimension\u003c/a\u003e of sums\u003c/h3\u003e\n\u003cp\u003eLet there be two \u003ca href=\"/posts/kbhsubspace/#finite-dimensional-subspaces\"\u003efinite-dimensional subspaces\u003c/a\u003e: \\(U_1\\) and \\(U_2\\). Then:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim(U_1+U_2)=\\dim U_1+\\dim U_{2} - \\dim(U_1 \\cap U_2)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003elet us form an \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U_1 \\cap U_{2}\\): \\(u_1, \\dots u_{m}\\); this indicates to us that \\(\\dim(U_1 \\cap U_{2}) = m\\). Being a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U_1 \\cap U_{2}\\), it is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e in \\(U_1\\) (which forms a part of the intersection.\u003c/p\u003e\n\u003cp\u003eAs any \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list (in this case, in \\(U_1\\)) can be \u003ca href=\"/posts/kbhbasis/#a-id-45384b28-f1e3-4fb1-aeb2-21c875834744-linearly-independent-list-expends-to-a-id-f88170b1-08b5-48a7-a7b5-1ace768e7b28-basis\"\u003eexpanded into a basis\u003c/a\u003e of \\(U_1\\). Let\u0026rsquo;s say by some vectors \\(v_1 \\dots v_{j}\\). Therefore, we have that:\u003c/p\u003e\n\u003cp\u003eThe new basis is \\(u_1, \\dots u_{m}, v_1, \\dots v_{m}\\), and so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim U_1 = m+j\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBy the same token, let\u0026rsquo;s just say some \\(w_1, \\dots w_{k}\\) can be used to extend \\(u_1, \\dots u_{m}\\) into a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U_2\\) (as \\(u_1, \\dots u_{m}\\) is \u003cem\u003ealso\u003c/em\u003e an \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list in \\(U_2\\)). So:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dim U_{2} = m+k\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe desire that \\(\\dim(U_1+U_2)=\\dim U_1+\\dim U_{2} - \\dim(U_1 \\cap U_2)\\). Having constructed all three of the elements, we desire to find a list that is length \\((m+j)+(m+k)-m = m+j+k\\) that forms a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U_1+U_2\\), which will complete the proof.\u003c/p\u003e\n\u003cp\u003eConveniently, \\(u_1, \\dots u_{m}, v_1, \\dots v_{j}, w_1, \\dots w_{k}\\) nicely is list of length \\(m+j+k\\). Therefore, we desire that that list forms a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U_1+U_{2}\\).\u003c/p\u003e\n\u003cp\u003eAs pairwise in this list are the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U_1\\) and \\(U_2\\), this list can \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e both \\(U_1\\) and \\(U_2\\) (just \u003ca href=\"/posts/kbhzero/\"\u003ezero\u003c/a\u003e out the \u0026ldquo;other\u0026rdquo; sublist\u0026mdash;zero \\(w\\) if desiring a basis of \\(U_1\\), \\(v\\) if \\(U_2\\) \u0026mdash;and you have a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of each space. As \\(U_1+U_2\\) requires plucking a member from each and adding, as this \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(U_1\\) and \\(U_2\\) separately (again, it forms the basis of the each space), we can just use this list to construct individually each component of \\(U_1+U_2\\) then adding it together. Hence, that long combo list \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(U_1+U_2\\).\u003c/p\u003e\n\u003cp\u003eThe only thing left is to show that the giant list there is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e. Let\u0026rsquo;s construct:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_1u_1+ \\dots + a_{m}u_{m} + b_1v_1 + \\dots + b_{j}v_{j} + c_1w_1 + \\dots + c_{k}w_{k} = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eto demonstrate \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independence\u003c/a\u003e,\u003c/p\u003e\n\u003cp\u003eMoving the \\(w\\) to the right, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_1u_1+ \\dots + a_{m}u_{m} + b_1v_1 + \\dots + b_{j}v_{j} =-(c_1w_1 + \\dots + c_{k}w_{k})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eRecall that \\(u_1 \\dots v_{j}\\) are all \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es in \\(U_1\\). Having written \\(-(c_1w_1 + \\dots + c_{k}w_{k})\\) as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e thereof, we say that \\(-(c_1w_1 + \\dots + c_{k}w_{k}) \\in U_1\\) due to closure. But also, \\(w_1 \\dots w_{k} \\in U_2\\) as they form a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U_2\\). Hence, \\(-(c_1w_1 + \\dots + c_{k}w_{k}) \\in U_2\\). So, \\(-(c_1w_1 + \\dots + c_{k}w_{k}) \\in U_1 \\cap U_2\\).\u003c/p\u003e\n\u003cp\u003eAnd we said that \\(u_1, \\dots u_{m}\\) are a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e for \\(U_1 \\cap U_{2}\\). Therefore, we can write the \\(c_{i}\\) sums as a \u003ca href=\"/posts/kbhlinear_combination/\"\u003elinear combination\u003c/a\u003e of $u$s:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nd_1u_1 \\dots + \\dots + d_{m}u_{m} = (c_1w_1 + \\dots + c_{k}w_{k})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, moving the right to the left again:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nd_1u_1 \\dots + \\dots + d_{m}u_{m} - (c_1w_1 + \\dots + c_{k}w_{k}) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe have established before that \\(u_1 \\dots w_{k}\\) is a \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e list (it is the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U_2\\).) So, to write \\(0\\), \\(d_1 = \\dots = c_{k} = 0\\).\u003c/p\u003e\n\u003cp\u003eSubstituting back to the original:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\na_1u_1+ \\dots + a_{m}u_{m} + b_1v_1 + \\dots + b_{j}v_{j} \u0026amp;=-(c_1w_1 + \\dots + c_{k}w_{k}) \\\\\n\u0026amp;= 0\n\\end{align}\u003c/p\u003e\n\u003cp\u003erecall \\(u_1 \\dots v_{j}\\) is the \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U_1\\), meaning they are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e. The above expression makes \\(a_1 = \\dots b_{j} = 0\\). Having shown that, to write \\(0\\) via \\(u, v, \\dots w\\) requires all scalars \\(a,b,c=0\\), the list is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eHaving shown that the list of \\(u_1, \\dots v_1, \\dots w_1 \\dots w_{k}\\) \u003ca href=\"/posts/kbhspan/#spans\"\u003espans\u003c/a\u003e \\(U_1+U_2\\) and is \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e within it, it is a basis.\u003c/p\u003e\n\u003cp\u003eIt does indeed have length \\(m+j+k\\), completing the proof. \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsum_of_subsets/","tags":null,"title":"sum of subsets"},{"categories":null,"contents":"Consider \u0026ldquo;what\u0026rsquo;s the variable representing the sum of the result of 2 dice?\u0026rdquo;\n\\begin{equation} Y = \\sum_{i=1}^{2} X \\end{equation}\nwhere \\(X\\) is a random variable representing the result of once dice.\n","html":"\u003cp\u003eConsider \u0026ldquo;what\u0026rsquo;s the variable representing the sum of the result of 2 dice?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nY = \\sum_{i=1}^{2} X\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(X\\) is a \u003ca href=\"/posts/kbhrandom_variables/\"\u003erandom variable\u003c/a\u003e representing the result of once dice.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsum_of_two_dice/","tags":null,"title":"Sum of Two Dice, Random Variable Edition"},{"categories":null,"contents":"Suppose \\(v \\in V\\), and \\(U \\subset V\\). Then, \\(v+U\\) is the subset (not a subspace, obviously):\n\\begin{equation} v + U = \\{v+u : u \\in U\\} \\end{equation}\n","html":"\u003cp\u003eSuppose \\(v \\in V\\), and \\(U \\subset V\\). Then, \\(v+U\\) is the \u003cem\u003esubset\u003c/em\u003e (not a subspace, obviously):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nv + U = \\{v+u : u \\in U\\}\n\\end{equation}\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-01-20_22-01-31_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhsum_of_vector_and_subspace/","tags":null,"title":"sum of vector and subspace"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhspersite/","tags":null,"title":"supersite"},{"categories":null,"contents":"Supervised learning (also known as behavioral cloning) if the agent is learning what to do in an observe-act cycle) is a type of decision making method.\nprovide the agent with some examples use an automated learning algorithm to generalize from the example This is good for typically representative situations, but if you are throwing an agent into a completely unfamiliar situation, supervised learning cannot perform better.\nDisadvantages the labeled data is finite limited by the quality of performance in the training data interpolation between states are finite ","html":"\u003cp\u003eSupervised learning (also known as \u003ca href=\"/posts/kbhsupervised_learning/\"\u003ebehavioral cloning\u003c/a\u003e) if the agent is learning what to do in an \u003ca href=\"/posts/kbhobserve_act_cycle/\"\u003eobserve-act cycle\u003c/a\u003e) is a type of \u003ca href=\"/posts/kbhdecision_making/\"\u003edecision making\u003c/a\u003e method.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eprovide the \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e with some examples\u003c/li\u003e\n\u003cli\u003euse an automated learning algorithm to generalize from the example\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis is good for typically representative situations, but if you are throwing an \u003ca href=\"/posts/kbhagent/\"\u003eagent\u003c/a\u003e into a completely unfamiliar situation, supervised learning cannot perform better.\u003c/p\u003e\n\u003ch2 id=\"disadvantages\"\u003eDisadvantages\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ethe labeled data is finite\u003c/li\u003e\n\u003cli\u003elimited by the quality of performance in the training data\u003c/li\u003e\n\u003cli\u003einterpolation between states are finite\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsupervised_learning/","tags":null,"title":"supervised learning"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhsupport/","tags":null,"title":"support"},{"categories":null,"contents":"A function \\(T: V\\to W\\) is surjective if its range equals its codomain \\(W\\). \u0026ldquo;onto\u0026rdquo;\n\u0026ldquo;For any possible output, \\(w \\in W\\) for \\(T \\in \\mathcal{L}(V,W)\\), there is at LEAST one input \\(T\\) that maps \\(Tv \\to w\\). \u0026quot;\n\\begin{equation} \\forall w \\in W, \\exists v \\in V:Tv=W \\end{equation}\nmap to bigger space is not surjective See map to bigger space is not surjective\n","html":"\u003cp\u003eA function \\(T: V\\to W\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e if its \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e equals its codomain \\(W\\). \u0026ldquo;onto\u0026rdquo;\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;For any possible output, \\(w \\in W\\) for \\(T \\in \\mathcal{L}(V,W)\\), there is at LEAST one input \\(T\\) that maps \\(Tv \\to w\\). \u0026quot;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\forall w \\in W, \\exists v \\in V:Tv=W\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"map-to-bigger-space-is-not-surjective--kbhlinear-map-dot-md\"\u003e\u003ca href=\"/posts/kbhlinear_map/#map-to-bigger-space-is-not-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjective\"\u003emap to bigger space is not surjective\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eSee \u003ca href=\"/posts/kbhlinear_map/#map-to-bigger-space-is-not-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjective\"\u003emap to bigger space is not surjective\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsurjectivity/","tags":null,"title":"surjectivity"},{"categories":null,"contents":"syscalls are public functions that allow user land operations to access system-level services (such as reading a sector) which otherwise is locked in kernel mode because they require special privileges.\nThese functions are called completely isolated to another function: 1) private stack frame 2) private memory, etc.\nopen, close, read, write\nkernel mode kernel mode allows superuser function access such as reading sectors, etc. which would be dangerous if public.\nfile open int open(const char *pathname, int flags, mode_t mode); Flags are a bitwise OR operations: you have to open with O_RDONLY (read only), O_WRONLY (write only), or O_RDWR (both read and write). This returns \\(-1\\) if the reading fails.\nOther flags:\nO_TRUNC (truncate file) O_CREAT (creating a file if not exist), which will require a mode_t mode parameter to set the permission O_EXCL (file must not exist) close int close(int fd); ssize_t is a type that is a size_t which accepts -1.\nread get a block of a file\nssize_t read(int fd, void *buf, size_t count); Returns the number of bytes actually read (for instance, if count is too large, it will only return the number of bytes read). \\(0\\) if EOF, \\(-1\\) on error.\nread my nat read all the bytes the OS keeps track of where you are reading from write writes a block of a file\nssize_t write(int fd, void *buf, size_t count); Returns the number of bytes actually read (for instance, if count is too large, it will only return the number of bytes read). \\(0\\) if EOF, \\(-1\\) on error.\nfile descriptor After we open a file, file descriptors, which are ints, which track where the reading head is in the file; so you can have multiple descriptors each with a different location\nfile descriptor is used to model access to a variety of resources:\nnetwork connections printers/services and special file descriptors:\n0: STDIN_FILENO \u0026mdash; input from the terminal 1: STDOUT_FILENO \u0026mdash; output to the terminal 2: STDERR_FILENO \u0026mdash; error to the terminal ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhsyscalls/\"\u003esyscalls\u003c/a\u003e are public functions that allow user land operations to access system-level services (such as reading a sector) which otherwise is locked in \u003ca href=\"#kernel-mode\"\u003ekernel mode\u003c/a\u003e because they require special privileges.\u003c/p\u003e\n\u003cp\u003eThese functions are called completely isolated to another function: 1) private stack frame 2) private memory, etc.\u003c/p\u003e\n\u003cp\u003e\u003ccode\u003eopen\u003c/code\u003e, \u003ccode\u003eclose\u003c/code\u003e, \u003ccode\u003eread\u003c/code\u003e, \u003ccode\u003ewrite\u003c/code\u003e\u003c/p\u003e\n\u003ch2 id=\"kernel-mode\"\u003ekernel mode\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#kernel-mode\"\u003ekernel mode\u003c/a\u003e allows superuser function access such as reading \u003ca href=\"/posts/kbhfilesystem/#disk\"\u003esector\u003c/a\u003es, etc. which would be dangerous if public.\u003c/p\u003e\n\u003ch2 id=\"file\"\u003efile\u003c/h2\u003e\n\u003ch3 id=\"open\"\u003eopen\u003c/h3\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eopen\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003econst\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003epathname\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eflags\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003emode_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eFlags are a bitwise OR operations: you have to open with \u003ccode\u003eO_RDONLY\u003c/code\u003e (read only), \u003ccode\u003eO_WRONLY\u003c/code\u003e (write only), or \u003ccode\u003eO_RDWR\u003c/code\u003e (both read and write). This returns \\(-1\\) if the reading fails.\u003c/p\u003e\n\u003cp\u003eOther flags:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003eO_TRUNC\u003c/code\u003e (truncate file)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003eO_CREAT\u003c/code\u003e (creating a file if not exist), which will require a \u003ccode\u003emode_t mode\u003c/code\u003e parameter to set the permission\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003eO_EXCL\u003c/code\u003e (file must not exist)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"close\"\u003eclose\u003c/h3\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eclose\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003ccode\u003essize_t\u003c/code\u003e is a type that is a \u003ccode\u003esize_t\u003c/code\u003e which accepts \u003ccode\u003e-1\u003c/code\u003e.\u003c/p\u003e\n\u003ch3 id=\"read\"\u003eread\u003c/h3\u003e\n\u003cp\u003eget a block of a file\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003essize_t\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003eread\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebuf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecount\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eReturns the number of bytes actually read (for instance, if count is too large, it will only return the number of bytes read). \\(0\\) if EOF, \\(-1\\) on error.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eread my nat read all the bytes\u003c/li\u003e\n\u003cli\u003ethe OS keeps track of where you are reading from\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"write\"\u003ewrite\u003c/h3\u003e\n\u003cp\u003ewrites a block of a file\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003essize_t\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ewrite\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003eint\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efd\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e*\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ebuf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecount\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eReturns the number of bytes actually read (for instance, if count is too large, it will only return the number of bytes read). \\(0\\) if EOF, \\(-1\\) on error.\u003c/p\u003e\n\u003ch2 id=\"file-descriptor\"\u003efile descriptor\u003c/h2\u003e\n\u003cp\u003eAfter we open a file, file descriptors, which are ints, which track where the reading head is in the file; so you can have multiple descriptors each with a different location\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#file-descriptor\"\u003efile descriptor\u003c/a\u003e is used to model access to a variety of resources:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003enetwork connections\u003c/li\u003e\n\u003cli\u003eprinters/services\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eand special file descriptors:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e0: \u003ccode\u003eSTDIN_FILENO\u003c/code\u003e \u0026mdash; input from the terminal\u003c/li\u003e\n\u003cli\u003e1: \u003ccode\u003eSTDOUT_FILENO\u003c/code\u003e \u0026mdash; output to the terminal\u003c/li\u003e\n\u003cli\u003e2: \u003ccode\u003eSTDERR_FILENO\u003c/code\u003e \u0026mdash; error to the terminal\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhsyscalls/","tags":null,"title":"syscalls"},{"categories":null,"contents":"Suppose \\(T \\in \\mathcal{L}(V,W)\\). Define a \\(\\widetilde{T}: V / (null\\ T) \\to W\\) such that:\n\\begin{align} \\widetilde{T}(v+ null\\ T) = Tv \\end{align}\nso \\(\\widetilde{T}\\) is the map that recovers the mapped result from an affine subset from the null space of the map.\n\\(\\widetilde{T}\\) is well defined Same problem as that with operations on quotient space. We need to make sure that \\(\\widetilde{T}\\) behave the same way on distinct but equivalent representations of the same affine subset.\nSuppose \\(u,v \\in V\\) such that \\(u+null\\ T = v+null\\ T\\). Because two affine subsets parallel to \\(U\\) are either equal or disjoint, we have that \\(u-v \\in null\\ T\\). This means that \\(Tu-Tv = 0 \\implies Tu= Tv\\). So applying \\(\\widetilde{T}\\) on equivalent representations of the same affine subset would yield the same result, as desired. \\(\\blacksquare\\)\nproperties of \\(\\widetilde{T}\\) it is a linear map TBD proof. Basically just like do it inheriting operations from the operations on quotient space.\nit is injective We desire here that \\(null\\ \\widetilde{T} = \\{0\\}\\) which will tell us that \\(\\widetilde{T}\\) is injective.\nSuppose some \\(v + null\\ T\\) is in the null space of \\(\\widetilde{T}\\). So, we have that:\n\\begin{equation} \\widetilde{T}(v+null\\ T) = Tv = 0 \\end{equation}\nSo, we have that \\(v \\in null\\ T\\). Now, this means that \\(v-0 \\in null\\ T\\). Because two affine subsets parallel to \\(U\\) are either equal or disjoint, \\(v + null\\ T = 0 + null\\ T\\) WLOG \\(\\forall v+null\\ T \\in null\\ \\widetilde{T}\\). This means that \\(null\\ \\widetilde{T}=\\{0\\}\\), as desired.\nits range is equal to the map\u0026rsquo;s range \\begin{equation} range\\ \\widetilde{T} = range\\ T \\end{equation}\nby definition of everything.\n\\(V / null\\ T\\) is isomorphic to \\(range\\ T\\) \u0026hellip;.is this the point of this whole thing?\nShown by the two sub-results above, and that injectivity and surjectivity implies invertability.\n","html":"\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V,W)\\). Define a \\(\\widetilde{T}: V / (null\\ T) \\to W\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\\widetilde{T}(v+ null\\ T) = Tv\n\\end{align}\u003c/p\u003e\n\u003cp\u003eso \\(\\widetilde{T}\\) is the map that recovers the mapped result from an \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e from the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of the map.\u003c/p\u003e\n\u003ch2 id=\"widetilde-t-is-well-defined\"\u003e\\(\\widetilde{T}\\) is well defined\u003c/h2\u003e\n\u003cp\u003eSame problem as that with \u003ca href=\"/posts/kbhquotient_space/#operations-on-id-53548f85-b3c8-42ce-81e7-9016ed7bd280-quotient-space\"\u003eoperations on quotient space\u003c/a\u003e. We need to make sure that \\(\\widetilde{T}\\) behave the same way on distinct but equivalent representations of the same \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSuppose \\(u,v \\in V\\) such that \\(u+null\\ T = v+null\\ T\\). Because \u003ca href=\"/posts/kbhparallel_linear_algebra/#two-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-affine-subset-s-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-parallel-to-u-are-either-equal-or-disjoint\"\u003etwo affine subsets parallel to \\(U\\) are either equal or disjoint\u003c/a\u003e, we have that \\(u-v \\in null\\ T\\). This means that \\(Tu-Tv = 0 \\implies Tu= Tv\\). So applying \\(\\widetilde{T}\\) on equivalent representations of the same \u003ca href=\"/posts/kbhparallel_linear_algebra/\"\u003eaffine subset\u003c/a\u003e would yield the same result, as desired. \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch2 id=\"properties-of-widetilde-t\"\u003eproperties of \\(\\widetilde{T}\\)\u003c/h2\u003e\n\u003ch3 id=\"it-is-a-linear-map\"\u003eit is a linear map\u003c/h3\u003e\n\u003cp\u003eTBD proof. Basically just like do it inheriting operations from the \u003ca href=\"/posts/kbhquotient_space/#operations-on-id-53548f85-b3c8-42ce-81e7-9016ed7bd280-quotient-space\"\u003eoperations on quotient space\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"it-is-injective\"\u003eit is injective\u003c/h3\u003e\n\u003cp\u003eWe desire here that \\(null\\ \\widetilde{T} = \\{0\\}\\) which will tell us that \\(\\widetilde{T}\\) is \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eSuppose some \\(v + null\\ T\\) is in the \u003ca href=\"/posts/kbhnull_space/\"\u003enull space\u003c/a\u003e of \\(\\widetilde{T}\\). So, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\widetilde{T}(v+null\\ T) = Tv = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, we have that \\(v \\in null\\ T\\). Now, this means that \\(v-0 \\in null\\ T\\). Because \u003ca href=\"/posts/kbhparallel_linear_algebra/#two-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-affine-subset-s-id-4c9e8fea-cd23-4a41-b85e-bb5be3867c96-parallel-to-u-are-either-equal-or-disjoint\"\u003etwo affine subsets parallel to \\(U\\) are either equal or disjoint\u003c/a\u003e, \\(v + null\\ T = 0 + null\\ T\\) WLOG \\(\\forall v+null\\ T \\in null\\ \\widetilde{T}\\). This means that \\(null\\ \\widetilde{T}=\\{0\\}\\), as desired.\u003c/p\u003e\n\u003ch3 id=\"its-range-is-equal-to-the-map-s-range\"\u003eits range is equal to the map\u0026rsquo;s range\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nrange\\ \\widetilde{T} = range\\ T\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eby definition of everything.\u003c/p\u003e\n\u003ch3 id=\"v-null-t-is-isomorphic--kbhisomorphism-dot-md--to-range-t\"\u003e\\(V / null\\ T\\) is \u003ca href=\"/posts/kbhisomorphism/\"\u003eisomorphic\u003c/a\u003e to \\(range\\ T\\)\u003c/h3\u003e\n\u003cp\u003e\u0026hellip;.is this the point of this whole thing?\u003c/p\u003e\n\u003cp\u003eShown by the two sub-results above, and that \u003ca href=\"/posts/kbhinvertability/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-and-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-implies-id-ff05739c-6e70-46ba-9d56-0958ef847f57-invertability\"\u003einjectivity and surjectivity implies invertability\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbht_twiddle/","tags":null,"title":"T twiddle"},{"categories":null,"contents":"confidence intervals, a review:\n\\begin{equation} statistic \\pm z^*\\sigma_{statistic} \\end{equation}\nFrequently, we don\u0026rsquo;t have access to \\(\\sigma\\) and hence have to guestimate. When we have a sample means and a proportion, we have ways of guestimating it from the standard error (available on the single-sample section of the AP Statistics formula sheet.)\nHowever, for means, the standard error involves! \\(\\sigma\\). How do we figure \\(\\sigma\\) when we don\u0026rsquo;t know it? We could use \\(s\\), sample standard deviation, but then we have to adjust \\(z^*\\) otherwise we will have underestimation. Hence, we have to use a statistic called \\(t^*\\).\nWe can use t-values to perform t-test, a hypothesis test of means.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhconfidence_interval/\"\u003econfidence interval\u003c/a\u003es, a review:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nstatistic \\pm z^*\\sigma_{statistic}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFrequently, we don\u0026rsquo;t have access to \\(\\sigma\\) and hence have to guestimate. When we have a sample means and a proportion, we have ways of guestimating it from the standard error (available on the single-sample section of the \u003ca href=\"/posts/kbhapstats/\"\u003eAP Statistics\u003c/a\u003e formula sheet.)\u003c/p\u003e\n\u003cp\u003eHowever, for means, the standard error \u003cem\u003einvolves!\u003c/em\u003e \\(\\sigma\\). How do we figure \\(\\sigma\\) when we don\u0026rsquo;t know it? We \u003cem\u003ecould\u003c/em\u003e use \\(s\\), sample standard deviation, but then we have to adjust \\(z^*\\) otherwise we will have underestimation. Hence, we have to use a statistic called \\(t^*\\).\u003c/p\u003e\n\u003cp\u003eWe can use t-values to perform \u003ca href=\"/posts/kbht_test/\"\u003et-test\u003c/a\u003e, a \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis test\u003c/a\u003e of means.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbht_statistics/","tags":null,"title":"t-statistics"},{"categories":null,"contents":"A t-test is a hypothesis test for statistical significance between two sample means based on t-statistics. Before it can be conducted, it must meet the conditions for inference.\nconditions for inference (t-test) To use t-statistics, you have to meet three conditions just like the conditions for inference used in z-score.\nrandom sampling normal (sample size larger than 30, or if original distribution is confirmed as roughly symmetric about the mean) Independence use a z-statistic to find a p-value Begin by finding a \\(t\\) statistic. Remember that:\n\\begin{equation} t = \\frac{statistic-parameter}{std\\ err} \\end{equation}\nIn this case, when we are dealing with sample means, then, we have:\n\\begin{equation} t = \\frac{\\bar{x}-\\mu_0}{\\frac{S_x}{\\sqrt{n}}} \\end{equation}\nwhere \\(\\bar{x}\\) is the measured mean, \\(\\mu_0\\) is the null hypothesis mean, and \\(S_x\\) the sample\u0026rsquo;s sample standard deviation.\nQuick note:\n\\(SE = \\frac{S}{\\sqrt{n}}\\) because the central limit theorem states that sample means for their own distribution, whose variance equals the original variance divided by the sample size. Hence, the standard deviation of the means would be the sample standard deviation divided by the square root of the sample size.\nOnce you have a \\(t\\) value, you look at the test and what its asking (above the mean? below the mean? etc.) and add up the tail probabilities.\npaired vs two-sample tests A paired t-test looks at pairs of values as statistic in itself (i.e. substracts directly, etc.) Think about it as a compound statistic, so you are doing a \\(t\\) test on one value, it just happened to be composed/calculated by a pair of values. (for instance, \u0026ldquo;difference between mother-father glucose levels.\u0026rdquo;)\nA two-staple t-test looks at two independent events and compares them. Hence, they are two random variables and should be manipulated as such.\nt-tests for regression lines regression lines can be imbibed with predictive power and confidence intervals:\n\\begin{equation} m \\pm t^* SE_b \\end{equation}\nwhere \\(m\\) is the slope and \\(SE_b\\) is the standard error of the regression line.\nNote that the degrees of freedom used for \\(t^*\\) is the number of data points, minus two.\nconditions for inference (slops) Acronym: LINEAR\nLinear Independent (observations are independent or \\(\u0026lt;10\\%\\)) Normal (for a given \\(x\\), \\(y\\) is normally distributed) Equal variance (for any given \\(x\\), it should have a roughly equal standard deviation in \\(y\\)) Random ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbht_test/\"\u003et-test\u003c/a\u003e is a \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis test\u003c/a\u003e for statistical significance between two sample means based on \u003ca href=\"/posts/kbht_statistics/\"\u003et-statistics\u003c/a\u003e. Before it can be conducted, it must meet the \u003ca href=\"#conditions-for-inference--t-test\"\u003econditions for inference\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"conditions-for-inference--t-test\"\u003econditions for inference (t-test)\u003c/h2\u003e\n\u003cp\u003eTo use \u003ca href=\"/posts/kbht_statistics/\"\u003et-statistics\u003c/a\u003e, you have to meet three conditions just like the \u003ca href=\"/posts/kbhz_test/#conditions-for-inference--z-test\"\u003econditions for inference\u003c/a\u003e used in \u003ca href=\"/posts/kbhz_score/\"\u003ez-score.\u003c/a\u003e\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003erandom sampling\u003c/li\u003e\n\u003cli\u003enormal (sample size larger than 30, or if original distribution is confirmed as roughly symmetric about the mean)\u003c/li\u003e\n\u003cli\u003eIndependence\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"use-a-z-statistic-to-find-a-p-value\"\u003euse a z-statistic to find a p-value\u003c/h2\u003e\n\u003cp\u003eBegin by finding a \\(t\\) statistic. Remember that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nt = \\frac{statistic-parameter}{std\\ err}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIn this case, when we are dealing with sample means, then, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nt = \\frac{\\bar{x}-\\mu_0}{\\frac{S_x}{\\sqrt{n}}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\bar{x}\\) is the measured mean, \\(\\mu_0\\) is the \u003ca href=\"/posts/kbhhypothesis_testing/#null-hypothesis\"\u003enull hypothesis\u003c/a\u003e mean, and \\(S_x\\) the sample\u0026rsquo;s sample standard deviation.\u003c/p\u003e\n\u003cp\u003eQuick note:\u003c/p\u003e\n\u003cp\u003e\\(SE = \\frac{S}{\\sqrt{n}}\\) because the \u003ca href=\"/posts/kbhcentral_limit_theorem/\"\u003ecentral limit theorem\u003c/a\u003e states that sample means for their own distribution, whose variance equals the original variance divided by the sample size. Hence, the standard deviation of the means would be the sample standard deviation divided by the square root of the sample size.\u003c/p\u003e\n\u003cp\u003eOnce you have a \\(t\\) value, you look at the test and what its asking (above the mean? below the mean? etc.) and add up the tail probabilities.\u003c/p\u003e\n\u003ch2 id=\"paired-vs-two-sample-tests\"\u003epaired vs two-sample tests\u003c/h2\u003e\n\u003cp\u003eA paired t-test looks at pairs of values as \u003ca href=\"/posts/kbhstastistic/\"\u003estatistic\u003c/a\u003e in itself (i.e. substracts directly, etc.) Think about it as a compound statistic, so you are doing a \\(t\\) test on one value, it just happened to be composed/calculated by a pair of values. (for instance, \u0026ldquo;difference between mother-father glucose levels.\u0026rdquo;)\u003c/p\u003e\n\u003cp\u003eA two-staple t-test looks at two independent events and compares them. Hence, they are two random variables and should be manipulated as such.\u003c/p\u003e\n\u003ch2 id=\"t-tests-for-regression-lines\"\u003et-tests for regression lines\u003c/h2\u003e\n\u003cp\u003eregression lines can be imbibed with predictive power and confidence intervals:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nm \\pm t^* SE_b\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(m\\) is the slope and \\(SE_b\\) is the \u003ca href=\"/posts/kbhstandard_error/\"\u003estandard error\u003c/a\u003e of the regression line.\u003c/p\u003e\n\u003cp\u003eNote that the degrees of freedom used for \\(t^*\\) is the number of data points, minus \u003cstrong\u003etwo\u003c/strong\u003e.\u003c/p\u003e\n\u003ch3 id=\"conditions-for-inference--slops\"\u003econditions for inference (slops)\u003c/h3\u003e\n\u003cp\u003eAcronym: LINEAR\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eLinear\u003c/li\u003e\n\u003cli\u003eIndependent (observations are independent or \\(\u0026lt;10\\%\\))\u003c/li\u003e\n\u003cli\u003eNormal (for a given \\(x\\), \\(y\\) is \u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormally distributed\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003eEqual variance (for any given \\(x\\), it should have a roughly equal standard deviation in \\(y\\))\u003c/li\u003e\n\u003cli\u003eRandom\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbht_test/","tags":null,"title":"t-test"},{"categories":null,"contents":"For an operator \\(T \\in \\mathcal{L}(V)\\), \\(T^{n}\\) would make sense. Instead of writing \\(TTT\\dots\\), then, we just write \\(T^{n}\\).\nconstituents operator \\(T \\in \\mathcal{L}(V)\\) requirements \\(T^{m} = T \\dots T\\) additional information \\(T^{0}\\) \\begin{equation} T^{0} := I \\in \\mathcal{L}(V) \\end{equation}\n\\(T^{-1}\\) \\begin{equation} T^{-m} = (T^{-1})^{m} \\end{equation}\nif \\(T\\) is invertable\nusual rules of squaring \\begin{equation} \\begin{cases} T^{m}T^{n} = T^{m+n} \\\\ (T^{m})^{n} = T^{mn} \\end{cases} \\end{equation}\nThis can be shown by counting the number of times \\(T\\) is repeated by writing each \\(T^{m}\\) out.\n","html":"\u003cp\u003eFor an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e \\(T \\in \\mathcal{L}(V)\\), \\(T^{n}\\) would make sense. Instead of writing \\(TTT\\dots\\), then, we just write \\(T^{n}\\).\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e \\(T \\in \\mathcal{L}(V)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(T^{m} = T \\dots T\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"t-0\"\u003e\\(T^{0}\\)\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nT^{0} := I \\in \\mathcal{L}(V)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"t-1\"\u003e\\(T^{-1}\\)\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\nT^{-m} = (T^{-1})^{m}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eif\u003c/strong\u003e \\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"usual-rules-of-squaring\"\u003eusual rules of squaring\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nT^{m}T^{n} = T^{m+n} \\\\\n(T^{m})^{n} = T^{mn}\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis can be shown by counting the number of times \\(T\\) is repeated by writing each \\(T^{m}\\) out.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhraising_operators_to_powers/","tags":null,"title":"T^m"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhtalk_contacts/","tags":null,"title":"Talk Contacts"},{"categories":null,"contents":"Moved much of this to Drafts instead\nphonbank: poor articulation\ndisfluent kids\nlate talkers\nWrite a review about ASR benchmark methods\nREV would be our benchmark What corpora we use? Has anyone used disordered speech? Or really seriously accented speech vis a vi CORALL (how was CORALL sampled?) What samples? How do we sample? What are the benchmarks? ASR model + WER\ntildes and noprompt swapped\nWER\nmissing words\ncorrect alignment\nthings swap noprompt backwards apostrophies for quotes the word separation error put tilde BETWEEN specific symbols with connection symbols jemoka becomes batchalign 2 Extended UD? combining bash script to run batchalign multiple times throughout the directories Removing removing non-auditory SBCA corpus area Diarization Diarization as a Bi-product of ASR humans at the end do speaker ID in the end DO TO BATCHALIGN allow people to reject files runhouse meeting \u0026lt;\u0026gt;Donny Greenberg: ADNE, nurses\u0026rsquo; health implementation at Google: grantees of canniniminty Remaining questions but we can\u0026rsquo;t provide SSH function.save() remote running through hashicorp vault? serializing ssh key remote? RUNHOUSE call into remote! headscale take wave2vec and hubert and GSLM questions? ask about inter-turn pauses, where INV: something something something \u0026lt;- PAR: WWW \u0026lt;- INV: somethingsomething else \u0026lt;- PAR: words words word no bullets are given for PAR, so do we skip it? do we count the time for WWW all as an inter-turn pause between INV and PAR? etc. Per Turn Turn level analysis Rename tier to Silence duration? does it include inter-utterance pauses?\nwithin-utterance pause\nfluency, mechanistic between-utterance pause\npause between utterances also: between-speaker pause!\nleaves room for the speaker to take the floor BETWEEN speaker pauses: \u0026ldquo;I don\u0026rsquo;t know what you are asking me\u0026rdquo;, etc.: \u0026ldquo;breakdown!\u0026rdquo; add features: STOPPA, TRESTLE, Wang\nhttps://coryshain.github.io/\nfeaturize saturnino fausa Questions What features? Where to put them? TalkBankDB How to encode the features? \u0026ldquo;How informative are your features\u0026rdquo; Start coming up with features (TRESTLE, perhaps) Encode them into xarray \u0026lt;\u0026gt; saturnino stuff make Spanish names list name, city, countries corpuses SABSAE: santa barbara english CABNC: British english next ignore any words that goes wrong in the pipeline ~change: noun =\u0026gt; n; verb =\u0026gt; v, etc.~ DET: ignore \u0026ldquo;DEF\u0026rdquo;, or perhaps the entir featureless unbulleted VAD exprimentents errors! line 1492\n*PAR:\tso ‡ anyway I tiptoe to the front door , open the front door and walk in . •1045194_1050644• %mor:\tco|so beg|beg adv|anyway pro:sub|I v|+n|tip+n|toe prep|to det:art|the n|front n|door cm|cm adj|open det:art|the n|front n|door coord|and n|walk adv|in . %gra:\t1|0|BEG 2|1|BEGP 3|5|JCT 4|5|SUBJ 5|0|ROOT 6|5|JCT 7|9|DET 8|9|MOD 9|6|POBJ 10|5|LP 11|14|MOD 12|14|DET 13|14|MOD 14|5|OBJ 15|14|CONJ 16|15|COORD 17|16|NJCT 18|5|PUNCT\nerrors? words without features needs to be correctly handled (done in the middle of meeting) 04111 (me ma SOS) nouns shouldn\u0026rsquo;t mark if it is Com,Neut, should\u0026rsquo;nt mark if its Com fix PASTP =\u0026gt; PAST and does past participles exist? more Move shua to d(e) Include instructions on how to recreate a broken Conda environment Update the package to conda somehow move next steps deal with `n` +\u0026hellip; fix remove bullets results ~ contraction \u0026amp; fused suffix getting rid of punkt in mor , =\u0026gt; cm . =\u0026gt; no PUNKT, stays stuff chocolaty (noadmin, https://docs.chocolatey.org/en-us/choco/setup#non-administrative-install) miniconda setx path \u0026ldquo;%path%;C:\\tools\\miniconda3\\condabin\u0026rdquo; curl env first, the install (Windows can\u0026rsquo;t do it from a URL) readme conda init zsh (close shell, open again) .mp4 mfa model downloading what\u0026rsquo;s the difference between online docker install and manual install NLTK Huggingface transformers tokenizers (versining) /opt/homebrew/Caskroom/miniforge/base/envs/aligner/lib/python3.9/site-packages/montreal_forced_aligner/corpus/text_corpus.py; getattr(self, k).update(error_dict[k]) AttributeError: \u0026rsquo;list\u0026rsquo; object has no attribute \u0026lsquo;update\u0026rsquo; FileArgumentNotFoundError: ; line 139\nDBA See the data on the frequency of haphax legomina vs. COCA ESPNet need to talk to Ji Yang Andrew\u0026rsquo;s Features Collapse two PAR tiers down Checkpoint per file One corpus prompt per run Handle empty tiers I/P selection crashes! contingency preview the LONGEST segment instead of the top one -i kill in the middle fixes \u0026ldquo;my mom\u0026rsquo;s cryin(g)\u0026rdquo; [\u0026lt;] mm [l648] (also themmm after) \u0026ldquo;made her a nice dress\u0026rdquo; [\u0026lt;] mhm [l1086] \u0026ldquo;when I was a kid I\u0026rdquo; \u0026amp;=laughs [l1278] Others chstring (for uh, mm-hmm)\nretrace (asr\u0026amp;fa folder)\nlowcase (caps)\nrep-join.cut (fixes/)\nnumbers \u0026lt;affirmative\u0026gt; \u0026lsquo;mo data! CallFriend/CallHome (ca-data) ISL? SBCSAE Aphasia + MICASE TBI data Providing a Two-Pass Solution Writing Big description of the pipeline Notion of the pipeline Better tokenization? 8/18 Initial segment repetition Extracting studdering Gramatically problematic mar mar has done a thing and its phoneme level We did it, now automated LEAP data next actions Aphasia (-apraxia?): classification Child data (EllisWeismer) Dementia a ~Multiple @Begin/CHECK problem~\n~Placement of @Options~\n~Strange, missing period~\n~Bracket comments should FOLLOW words instead of PRECEEDING them~\n~%xwor: line~\nSTICK TO DASHES WHEN DISTRIBUTING BATCHALIGN\nend the utterance when it ends (incl. inter-utterance pauses)\n\u0026ldquo;I\u0026rdquo; need to be capitalized\n11005 (LT)\nAlign EllisWeismer\nAlso cool to align:\nfluency IISRP/*\nhttps://en.wikipedia.org/wiki/Speaker_diarisation\nhttps://universaldependencies.org/\nAlzheimer\u0026rsquo;s Project https://dementia.talkbank.org/\nhttps://luzs.gitlab.io/adresso-2021/\nSpecifically: https://dementia.talkbank.org/access/English/Pitt.html\nReview Kathleen Fraser: https://drive.google.com/drive/u/1/folders/1lYTIzzXLXw3LlDG9ZQ7k4RayDiP6eLs1\nHere are the review papers: https://drive.google.com/drive/u/1/folders/1pokU75aKt6vNdeSMpc-HfN9fkLvRyutt\nRead this first: https://drive.google.com/drive/u/1/folders/0B3XZtiQwQW4XMnlFN0ZGUndUamM?resourcekey=0-AlOCZb4q9TyG4KpaMQpeoA\nSome PITT data have 3-4 recordings\nThe best way to diagnosing alzhimers\u0026rsquo; is from language.\nWhy this field is needed: to analyze a pre-post test metric.\nDesired output: existence of dementia (a.k.a alzheimer\u0026rsquo;s\u0026rsquo;).\nOther research to read:\nPenn (julia parish something but they don\u0026rsquo;t stare their data but they smile and things with Mark Libermann type of thing) Learning more about speech text https://my.clevelandclinic.org/health/diagnostics/22327-differential-diagnosis python3 ~/mfa_data/batchalign-dist/batchalign.py ~/mfa_data/my_corpus ~/mfa_data/my_corpus_aligned\nchristan marr paper on MFA on child data\n","html":"\u003cp\u003eMoved much of this to Drafts instead\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ephonbank: poor articulation\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003edisfluent kids\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003elate talkers\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWrite a review about ASR benchmark methods\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eREV would be our benchmark\n\u003cul\u003e\n\u003cli\u003eWhat corpora we use?\u003c/li\u003e\n\u003cli\u003eHas anyone used \u003cstrong\u003edisordered speech\u003c/strong\u003e?\u003c/li\u003e\n\u003cli\u003eOr really seriously accented speech vis a vi CORALL (how was CORALL sampled?)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eWhat samples? How do we sample? What are the benchmarks?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-06-01_16-32-05_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eASR model + WER\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003etildes and noprompt swapped\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWER\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003emissing words\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecorrect alignment\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"things\"\u003ethings\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eswap noprompt backwards\u003c/li\u003e\n\u003cli\u003eapostrophies for quotes\u003c/li\u003e\n\u003cli\u003ethe word separation error\n\u003cul\u003e\n\u003cli\u003eput tilde BETWEEN specific symbols with connection symbols\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ejemoka becomes batchalign 2\u003c/li\u003e\n\u003cli\u003eExtended UD?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"combining\"\u003ecombining\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ebash script to run batchalign multiple times throughout the directories\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"removing\"\u003eRemoving\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eremoving non-auditory SBCA corpus area\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"diarization\"\u003eDiarization\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDiarization as a Bi-product of ASR\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"humans-at-the-end\"\u003ehumans at the end\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003edo speaker ID in the end\u003c/li\u003e\n\u003cli\u003eDO TO BATCHALIGN\u003c/li\u003e\n\u003cli\u003eallow people to reject files\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"runhouse-meeting\"\u003erunhouse meeting\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026lt;\u0026gt;Donny Greenberg: ADNE, nurses\u0026rsquo; health\u003c/li\u003e\n\u003cli\u003eimplementation at Google: grantees of canniniminty\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"remaining-questions\"\u003eRemaining questions\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ebut \u003cstrong\u003ewe can\u0026rsquo;t provide SSH\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003efunction.save()\u003c/li\u003e\n\u003cli\u003eremote\n\u003cul\u003e\n\u003cli\u003erunning through hashicorp vault?\u003c/li\u003e\n\u003cli\u003eserializing ssh key remote?\u003c/li\u003e\n\u003cli\u003eRUNHOUSE call into remote!\u003c/li\u003e\n\u003cli\u003e\u003cem\u003eheadscale\u003c/em\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003etake wave2vec and hubert and GSLM\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions\"\u003equestions?\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eask about inter-turn pauses, where\n\u003cul\u003e\n\u003cli\u003eINV: something something something \u0026lt;-\u003c/li\u003e\n\u003cli\u003ePAR: WWW \u0026lt;-\u003c/li\u003e\n\u003cli\u003eINV: somethingsomething else \u0026lt;-\u003c/li\u003e\n\u003cli\u003ePAR: words words word\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eno bullets are given for PAR, so do we skip it? do we count the time for WWW all as an inter-turn pause between INV and PAR? etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"per-turn\"\u003ePer Turn\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eTurn\u003c/strong\u003e level analysis\u003c/li\u003e\n\u003cli\u003eRename tier to\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"silence-duration\"\u003eSilence duration?\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003edoes it include inter-utterance pauses?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ewithin-utterance pause\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003efluency, mechanistic\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ebetween-utterance pause\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003epause between utterances\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ealso: between-speaker pause!\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eleaves room for the speaker to take the floor\u003c/li\u003e\n\u003cli\u003eBETWEEN speaker pauses: \u0026ldquo;I don\u0026rsquo;t know what you are asking me\u0026rdquo;, etc.: \u0026ldquo;breakdown!\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eadd features: STOPPA, TRESTLE, Wang\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"https://coryshain.github.io/\"\u003ehttps://coryshain.github.io/\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"featurize\"\u003efeaturize\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003esaturnino\u003c/li\u003e\n\u003cli\u003efausa\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"questions\"\u003eQuestions\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eWhat features?\u003c/li\u003e\n\u003cli\u003eWhere to put them?\u003c/li\u003e\n\u003cli\u003eTalkBankDB\u003c/li\u003e\n\u003cli\u003eHow to encode the features?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"how-informative-are-your-features\"\u003e\u0026ldquo;How informative are your features\u0026rdquo;\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eStart coming up with features (TRESTLE, perhaps)\u003c/li\u003e\n\u003cli\u003eEncode them into xarray\u003c/li\u003e\n\u003cli\u003e\u0026lt;\u0026gt; saturnino\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"stuff\"\u003estuff\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003emake Spanish names list\u003c/li\u003e\n\u003cli\u003ename, city, countries\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"corpuses\"\u003ecorpuses\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eSABSAE\u003c/strong\u003e: santa barbara english\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eCABNC\u003c/strong\u003e: British english\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"next\"\u003enext\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eignore any words that goes wrong in the pipeline\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003e~change: noun =\u0026gt; n; verb =\u0026gt; v, etc.~\u003c/code\u003e\u003c/li\u003e\n\u003cli\u003eDET: ignore \u0026ldquo;DEF\u0026rdquo;, or perhaps the entir featureless\u003c/li\u003e\n\u003cli\u003eunbulleted VAD exprimentents\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"errors\"\u003eerrors!\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-12-23_20-22-46_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eline 1492\u003c/p\u003e\n\u003cp\u003e*PAR:\tso ‡ anyway I tiptoe to the front door , open the front door and\nwalk in . •1045194_1050644•\n%mor:\tco|so beg|beg adv|anyway pro:sub|I v|+n|tip+n|toe prep|to\ndet:art|the n|front n|door cm|cm adj|open det:art|the n|front n|door\ncoord|and n|walk adv|in .\n%gra:\t1|0|BEG 2|1|BEGP 3|5|JCT 4|5|SUBJ 5|0|ROOT 6|5|JCT 7|9|DET 8|9|MOD\n9|6|POBJ 10|5|LP 11|14|MOD 12|14|DET 13|14|MOD 14|5|OBJ 15|14|CONJ\n16|15|COORD 17|16|NJCT 18|5|PUNCT\u003c/p\u003e\n\u003ch2 id=\"errors\"\u003eerrors?\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewords without features needs to be correctly handled (done in the middle of meeting)\u003c/li\u003e\n\u003cli\u003e04111 (me ma SOS)\u003c/li\u003e\n\u003cli\u003enouns shouldn\u0026rsquo;t mark if it is Com,Neut, should\u0026rsquo;nt mark if its Com\u003c/li\u003e\n\u003cli\u003efix PASTP =\u0026gt; PAST\u003c/li\u003e\n\u003cli\u003eand does past participles exist?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"more\"\u003emore\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eMove shua to d(e)\u003c/li\u003e\n\u003cli\u003eInclude instructions on how to recreate a broken Conda environment\u003c/li\u003e\n\u003cli\u003eUpdate the package to conda somehow\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"move\"\u003emove\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-12-03_00-17-14_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"next-steps\"\u003enext steps\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003edeal with `n`\u003c/li\u003e\n\u003cli\u003e+\u0026hellip; fix\u003c/li\u003e\n\u003cli\u003eremove bullets\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"results\"\u003eresults\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e~ contraction\u003c/li\u003e\n\u003cli\u003e\u0026amp; fused\u003c/li\u003e\n\u003cli\u003e\n\u003cul\u003e\n\u003cli\u003esuffix\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003egetting rid of punkt in mor\n\u003cul\u003e\n\u003cli\u003e, =\u0026gt; cm\u003c/li\u003e\n\u003cli\u003e. =\u0026gt; no PUNKT, stays\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"stuff\"\u003estuff\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003echocolaty (noadmin, \u003ca href=\"https://docs.chocolatey.org/en-us/choco/setup#non-administrative-install\"\u003ehttps://docs.chocolatey.org/en-us/choco/setup#non-administrative-install\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003eminiconda\u003c/li\u003e\n\u003cli\u003esetx path \u0026ldquo;%path%;C:\\tools\\miniconda3\\condabin\u0026rdquo;\u003c/li\u003e\n\u003cli\u003ecurl env first, the install (Windows can\u0026rsquo;t do it from a URL)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"readme\"\u003ereadme\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cdel\u003econda init zsh (close shell, open again)\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003e.mp4\u003c/li\u003e\n\u003cli\u003emfa model downloading\u003c/li\u003e\n\u003cli\u003ewhat\u0026rsquo;s the difference between online docker install and manual install\u003c/li\u003e\n\u003cli\u003eNLTK Huggingface transformers tokenizers (versining)\u003c/li\u003e\n\u003cli\u003e/opt/homebrew/Caskroom/miniforge/base/envs/aligner/lib/python3.9/site-packages/montreal_forced_aligner/corpus/text_corpus.py; getattr(self, k).update(error_dict[k])\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAttributeError: \u0026rsquo;list\u0026rsquo; object has no attribute \u0026lsquo;update\u0026rsquo;\nFileArgumentNotFoundError: ; line 139\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"dba\"\u003eDBA\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eSee the data on the frequency of haphax legomina vs. COCA\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"espnet\"\u003eESPNet\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eneed to talk to Ji Yang\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"andrew-s-features\"\u003eAndrew\u0026rsquo;s Features\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eCollapse two PAR tiers down\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003eCheckpoint per file\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003eOne corpus prompt per run\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003eHandle empty tiers\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003eI/P selection crashes! contingency\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003epreview the LONGEST segment instead of the top one\u003c/del\u003e\u003c/li\u003e\n\u003cli\u003e\u003cdel\u003e-i kill in the middle\u003c/del\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"fixes\"\u003efixes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;my mom\u0026rsquo;s cryin(g)\u0026rdquo; [\u0026lt;] mm [l648] (also themmm after)\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;made her a nice dress\u0026rdquo; [\u0026lt;] mhm [l1086]\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;when I was a kid I\u0026rdquo; \u0026amp;=laughs [l1278]\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"others\"\u003eOthers\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003echstring (for uh, mm-hmm)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eretrace (asr\u0026amp;fa folder)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003elowcase (caps)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003erep-join.cut (fixes/)\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-08-02_12-55-55_screenshot.png\"\u003e\n \u003c/figure\u003e\n\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cul\u003e\n\u003cli\u003enumbers\u003c/li\u003e\n\u003cli\u003e\u0026lt;affirmative\u0026gt;\u003c/li\u003e\n\u003cli\u003e\u0026lsquo;mo data!\n\u003cul\u003e\n\u003cli\u003eCallFriend/CallHome (ca-data)\u003c/li\u003e\n\u003cli\u003eISL?\u003c/li\u003e\n\u003cli\u003eSBCSAE\u003c/li\u003e\n\u003cli\u003eAphasia + MICASE\u003c/li\u003e\n\u003cli\u003eTBI data\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003eProviding a Two-Pass Solution\u003c/li\u003e\n\u003cli\u003eWriting\n\u003cul\u003e\n\u003cli\u003eBig description of the pipeline\u003c/li\u003e\n\u003cli\u003eNotion of the pipeline\u003c/li\u003e\n\u003cli\u003eBetter tokenization?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e8/18\u003c/li\u003e\n\u003c/ul\u003e\n\u003chr\u003e\n\u003cul\u003e\n\u003cli\u003eInitial segment repetition\u003c/li\u003e\n\u003cli\u003eExtracting studdering\u003c/li\u003e\n\u003cli\u003eGramatically problematic\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"mar\"\u003emar\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003emar has done a thing and its phoneme level\u003c/li\u003e\n\u003cli\u003eWe did it, now automated\u003c/li\u003e\n\u003cli\u003eLEAP data\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"next-actions\"\u003enext actions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eAphasia (-apraxia?): classification\u003c/li\u003e\n\u003cli\u003eChild data (EllisWeismer)\u003c/li\u003e\n\u003cli\u003eDementia\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"a\"\u003ea\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ccode\u003e~Multiple @Begin/CHECK problem~\u003c/code\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ccode\u003e~Placement of @Options~\u003c/code\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ccode\u003e~Strange, missing period~\u003c/code\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ccode\u003e~Bracket comments should FOLLOW words instead of PRECEEDING them~\u003c/code\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ccode\u003e~%xwor: line~\u003c/code\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSTICK TO DASHES WHEN DISTRIBUTING BATCHALIGN\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eend the utterance when it ends (incl. inter-utterance pauses)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u0026ldquo;I\u0026rdquo; need to be capitalized\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e11005 (LT)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAlign EllisWeismer\u003c/p\u003e\n\u003cp\u003eAlso cool to align:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003efluency IISRP/*\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"https://en.wikipedia.org/wiki/Speaker_diarisation\"\u003ehttps://en.wikipedia.org/wiki/Speaker_diarisation\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"https://universaldependencies.org/\"\u003ehttps://universaldependencies.org/\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"alzheimer-s-project\"\u003eAlzheimer\u0026rsquo;s Project\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"https://dementia.talkbank.org/\"\u003ehttps://dementia.talkbank.org/\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"https://luzs.gitlab.io/adresso-2021/\"\u003ehttps://luzs.gitlab.io/adresso-2021/\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSpecifically: \u003ca href=\"https://dementia.talkbank.org/access/English/Pitt.html\"\u003ehttps://dementia.talkbank.org/access/English/Pitt.html\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eReview Kathleen Fraser: \u003ca href=\"https://drive.google.com/drive/u/1/folders/1lYTIzzXLXw3LlDG9ZQ7k4RayDiP6eLs1\"\u003ehttps://drive.google.com/drive/u/1/folders/1lYTIzzXLXw3LlDG9ZQ7k4RayDiP6eLs1\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eHere are the review papers: \u003ca href=\"https://drive.google.com/drive/u/1/folders/1pokU75aKt6vNdeSMpc-HfN9fkLvRyutt\"\u003ehttps://drive.google.com/drive/u/1/folders/1pokU75aKt6vNdeSMpc-HfN9fkLvRyutt\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eRead this first: \u003ca href=\"https://drive.google.com/drive/u/1/folders/0B3XZtiQwQW4XMnlFN0ZGUndUamM?resourcekey=0-AlOCZb4q9TyG4KpaMQpeoA\"\u003ehttps://drive.google.com/drive/u/1/folders/0B3XZtiQwQW4XMnlFN0ZGUndUamM?resourcekey=0-AlOCZb4q9TyG4KpaMQpeoA\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eSome PITT data have 3-4 recordings\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003eThe best way to diagnosing alzhimers\u0026rsquo; is from language.\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eWhy this field is needed: to analyze a pre-post test metric.\u003c/p\u003e\n\u003cp\u003eDesired output: existence of dementia (a.k.a alzheimer\u0026rsquo;s\u0026rsquo;).\u003c/p\u003e\n\u003cp\u003eOther research to read:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ePenn (julia parish something but they don\u0026rsquo;t stare their data but they smile and things with Mark Libermann type of thing)\u003c/li\u003e\n\u003cli\u003eLearning more about speech text\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://my.clevelandclinic.org/health/diagnostics/22327-differential-diagnosis\"\u003ehttps://my.clevelandclinic.org/health/diagnostics/22327-differential-diagnosis\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003epython3 ~/mfa_data/\u003ca href=\"/posts/kbhbatchalign/\"\u003ebatchalign\u003c/a\u003e-dist/batchalign.py ~/mfa_data/my_corpus ~/mfa_data/my_corpus_aligned\u003c/p\u003e\n\u003cp\u003echristan marr paper on MFA on child data\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtalkbank/","tags":null,"title":"talkbank"},{"categories":null,"contents":"Lit Survey Pipeline Segmentation ","html":"\u003ch2 id=\"lit-survey\"\u003eLit Survey\u003c/h2\u003e\n\u003ch3 id=\"pipeline\"\u003ePipeline\u003c/h3\u003e\n\u003ch3 id=\"segmentation\"\u003eSegmentation\u003c/h3\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtalkbank_pipeline_project/","tags":null,"title":"TalkBank Pipeline Project"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhtariffs/","tags":null,"title":"tariffs"},{"categories":null,"contents":"Step 0: know what you are building.\nbreaking tasks The process of breaking tasks down.\nWe need to research tasks to see how complex they are + how to break them down Research takes time! It should be its own task Over the process of research, the task becomes much simpler estimating tasks Requirement: tasks should always be estimated by the person doing the work.\nTask Estimation should be done each time! tasks shift Estimate only in powers of 2: 30 minutes, 1h, 2h, 4h, 8h, etc. If you never done something before, double the time than you estimate If you are teaching someone to do something, quadruple the time than you estimate Add buffer time (*1.5), especially if you think yourself as a procrastinator Focus is draining! You need breaks. Take breaks. Things will go wrong! Plan for it. time iterating If anything is longer than 8 hours, that\u0026rsquo;s a good sign you need to break it down! Likely that you have to break things down MVP You probably don\u0026rsquo;t have time to build your feature list\nMVP: minimum viable product We need the basic set of features; you probably have more features than you have time to build Prioritize what you build based on\u0026hellip; Dependencies: is this required for other stuff to work Viability: can the product exist without this? Time: how long does it take? Be ruthless about what you cut; talk to your user.\n","html":"\u003cp\u003eStep 0: know what you are building.\u003c/p\u003e\n\u003ch2 id=\"breaking-tasks\"\u003ebreaking tasks\u003c/h2\u003e\n\u003cp\u003eThe process of \u003ca href=\"#breaking-tasks\"\u003ebreaking tasks\u003c/a\u003e down.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eWe need to research tasks to see how complex they are + how to break them down\u003c/li\u003e\n\u003cli\u003eResearch takes time! It should be its own task\u003c/li\u003e\n\u003cli\u003eOver the process of research, the task becomes much simpler\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"estimating-tasks\"\u003eestimating tasks\u003c/h2\u003e\n\u003cp\u003eRequirement: tasks should \u003cem\u003ealways\u003c/em\u003e be estimated by the person doing the work.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtask_estimation/\"\u003eTask Estimation\u003c/a\u003e should be done \u003cem\u003eeach time!\u003c/em\u003e tasks shift\u003c/li\u003e\n\u003cli\u003eEstimate only in powers of 2: 30 minutes, 1h, 2h, 4h, 8h, etc.\u003c/li\u003e\n\u003cli\u003eIf you never done something before, \u003cstrong\u003edouble\u003c/strong\u003e the time than you estimate\u003c/li\u003e\n\u003cli\u003eIf you are teaching someone to do something, \u003cstrong\u003equadruple\u003c/strong\u003e the time than you estimate\u003c/li\u003e\n\u003cli\u003eAdd buffer time (*1.5), especially if you think yourself as a procrastinator\n\u003cul\u003e\n\u003cli\u003eFocus is draining! You need breaks. Take breaks.\u003c/li\u003e\n\u003cli\u003eThings will go wrong! Plan for it.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"time-iterating\"\u003etime iterating\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eIf anything is longer than 8 hours, that\u0026rsquo;s a good sign you need to break it down!\u003c/li\u003e\n\u003cli\u003eLikely that you have to break things down\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"mvp\"\u003eMVP\u003c/h2\u003e\n\u003cp\u003eYou probably don\u0026rsquo;t have time to build your feature list\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eMVP: minimum viable product\u003c/li\u003e\n\u003cli\u003eWe need the basic set of features; you probably have more features than you have time to build\u003c/li\u003e\n\u003cli\u003ePrioritize what you build based on\u0026hellip;\n\u003cul\u003e\n\u003cli\u003eDependencies: is this required for other stuff to work\u003c/li\u003e\n\u003cli\u003eViability: can the product exist without this?\u003c/li\u003e\n\u003cli\u003eTime: how long does it take?\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eBe ruthless about what you cut; talk to your user.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtask_estimation/","tags":null,"title":"Task Estimation"},{"categories":null,"contents":"The taxicab norm is a norm against a gridded system; it should follow the same properties of the norm, but not inner products.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhtaxicab_norm/\"\u003etaxicab norm\u003c/a\u003e is a norm against a gridded system; it should follow the same properties of the \u003ca href=\"/posts/kbhnorm/\"\u003enorm\u003c/a\u003e, but not \u003ca href=\"/posts/kbhinner_product/\"\u003einner product\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtaxicab_norm/","tags":null,"title":"taxicab norm"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhtaylor_se/","tags":null,"title":"taylor se"},{"categories":null,"contents":"Hostname: baboon.jemoka.com\nTechnology: MacBook Pro A2338\nSerial: C02FX4W2Q05N\nDescription: Space-Grey MacBook Pro 2020\nThis piece of technology is no longer managed by me, and should not be registered under this domain anymore.\n","html":"\u003cp\u003eHostname: \u003cstrong\u003ebaboon.jemoka.com\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eTechnology: MacBook Pro A2338\u003c/p\u003e\n\u003cp\u003eSerial: C02FX4W2Q05N\u003c/p\u003e\n\u003cp\u003eDescription: Space-Grey MacBook Pro 2020\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eThis piece of technology is no longer managed by me, and should not be registered under this domain anymore.\u003c/strong\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtechnology_baboon_jemoka_com/","tags":null,"title":"Technology: baboon.jemoka.com"},{"categories":null,"contents":"Hostname: balloon.jemoka.com\nTechnology: MacBook Pro A2779\nSerial: Q5491WTGGM\nDescription: Space-Grey MacBook Pro 2023\nLost If you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to houjun@jemoka.com or +1 650 209-0966 to get in touch. Please do the right thing.\nThank you.\n","html":"\u003cp\u003eHostname: \u003cstrong\u003eballoon.jemoka.com\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eTechnology: MacBook Pro A2779\u003c/p\u003e\n\u003cp\u003eSerial: Q5491WTGGM\u003c/p\u003e\n\u003cp\u003eDescription: Space-Grey MacBook Pro 2023\u003c/p\u003e\n\u003ch2 id=\"lost\"\u003eLost\u003c/h2\u003e\n\u003cp\u003eIf you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to \u003ccode\u003ehoujun@jemoka.com\u003c/code\u003e or \u003ccode\u003e+1 650 209-0966\u003c/code\u003e to get in touch. Please do the right thing.\u003c/p\u003e\n\u003cp\u003eThank you.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtechnology_balloon_jemoka_com/","tags":null,"title":"Technology: balloon.jemoka.com"},{"categories":null,"contents":"Hostname: bassoon.jemoka.com\nTechnology: Teenage Engineering OP-Z\nSerial: X3C-KJFBB\nDescription: Gray Portable Synthesizer\nLost If you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to houjun@jemoka.com or +1 650 209-0966 to get in touch. Please do the right thing.\nThank you.\n","html":"\u003cp\u003eHostname: \u003cstrong\u003ebassoon.jemoka.com\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eTechnology: Teenage Engineering OP-Z\u003c/p\u003e\n\u003cp\u003eSerial: X3C-KJFBB\u003c/p\u003e\n\u003cp\u003eDescription: Gray Portable Synthesizer\u003c/p\u003e\n\u003ch2 id=\"lost\"\u003eLost\u003c/h2\u003e\n\u003cp\u003eIf you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to \u003ccode\u003ehoujun@jemoka.com\u003c/code\u003e or \u003ccode\u003e+1 650 209-0966\u003c/code\u003e to get in touch. Please do the right thing.\u003c/p\u003e\n\u003cp\u003eThank you.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtechnology_bassoon_jemoka_com/","tags":null,"title":"Technology: bassoon.jemoka.com"},{"categories":null,"contents":"Hostname: bilon.jemoka.com\nTechnology: Trek Fx1\nDescription: Black Bicycle with front and rear Deralieurs\nSerial: WTU 270 XC1581 S\nLost If you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to houjun@jemoka.com or +1 650 209-0966 to get in touch. Please do the right thing.\nThank you.\n","html":"\u003cp\u003eHostname: \u003cstrong\u003ebilon.jemoka.com\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eTechnology: Trek Fx1\u003c/p\u003e\n\u003cp\u003eDescription: Black Bicycle with front and rear Deralieurs\u003c/p\u003e\n\u003cp\u003eSerial: WTU 270 XC1581 S\u003c/p\u003e\n\u003ch2 id=\"lost\"\u003eLost\u003c/h2\u003e\n\u003cp\u003eIf you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to \u003ccode\u003ehoujun@jemoka.com\u003c/code\u003e or \u003ccode\u003e+1 650 209-0966\u003c/code\u003e to get in touch. Please do the right thing.\u003c/p\u003e\n\u003cp\u003eThank you.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtechnology_bilon_jemoka_com/","tags":null,"title":"Technology: bilon.jemoka.com"},{"categories":null,"contents":"Hostname: bison.jemoka.com\nTechnology: iPhone MLHT3LL/A\nSerial: LXQM93HWLC\nDescription: iPhone 13 Mini Midnight\nLost If you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to houjun@jemoka.com or +1 650 209-0966 to get in touch. Please do the right thing.\nThank you.\n","html":"\u003cp\u003eHostname: \u003cstrong\u003ebison.jemoka.com\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eTechnology: iPhone MLHT3LL/A\u003c/p\u003e\n\u003cp\u003eSerial: LXQM93HWLC\u003c/p\u003e\n\u003cp\u003eDescription: iPhone 13 Mini Midnight\u003c/p\u003e\n\u003ch2 id=\"lost\"\u003eLost\u003c/h2\u003e\n\u003cp\u003eIf you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to \u003ccode\u003ehoujun@jemoka.com\u003c/code\u003e or \u003ccode\u003e+1 650 209-0966\u003c/code\u003e to get in touch. Please do the right thing.\u003c/p\u003e\n\u003cp\u003eThank you.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtechnology_bison_jemoka_com/","tags":null,"title":"Technology: bison.jemoka.com"},{"categories":null,"contents":"Hostname: bonbon.jemoka.com\nTechnology: iPhone MT972LL/A\nSerial: C39Z2HS9KPFT\nDescription: iPhone Xs Black\nLost If you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to houjun@jemoka.com or +1 650 209-0966 to get in touch. Please do the right thing.\nThank you.\n","html":"\u003cp\u003eHostname: \u003cstrong\u003ebonbon.jemoka.com\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eTechnology: iPhone MT972LL/A\u003c/p\u003e\n\u003cp\u003eSerial: C39Z2HS9KPFT\u003c/p\u003e\n\u003cp\u003eDescription: iPhone Xs Black\u003c/p\u003e\n\u003ch2 id=\"lost\"\u003eLost\u003c/h2\u003e\n\u003cp\u003eIf you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to \u003ccode\u003ehoujun@jemoka.com\u003c/code\u003e or \u003ccode\u003e+1 650 209-0966\u003c/code\u003e to get in touch. Please do the right thing.\u003c/p\u003e\n\u003cp\u003eThank you.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtechnology_bonbon_jemoka_com/","tags":null,"title":"Technology: bonbon.jemoka.com"},{"categories":null,"contents":"Hostname: boon.jemoka.com\nLost If you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to houjun@jemoka.com or +1 650 209-0966 to get in touch. Please do the right thing.\nThank you.\n","html":"\u003cp\u003eHostname: \u003cstrong\u003eboon.jemoka.com\u003c/strong\u003e\u003c/p\u003e\n\u003ch2 id=\"lost\"\u003eLost\u003c/h2\u003e\n\u003cp\u003eIf you have stumbled upon this page due to finding this device in the wild, thank you so much! Reach out to \u003ccode\u003ehoujun@jemoka.com\u003c/code\u003e or \u003ccode\u003e+1 650 209-0966\u003c/code\u003e to get in touch. Please do the right thing.\u003c/p\u003e\n\u003cp\u003eThank you.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtechnology_boon_jemoka_com/","tags":null,"title":"Technology: boon.jemoka.com"},{"categories":null,"contents":"Teddy Roosevelt was an American president.\nLarge personality: expanded scope of the Presidency \u0026mdash; \u0026ldquo;if it doesn\u0026rsquo;t explicit say its belong to the congress, it belongs to me\u0026rdquo; Moralist (Support American People), Imperialist (Believes in American Righteousness), Progressive Monroe Doctrine \u0026amp; Roosevelt Corollary: America for Americans The Panama Canal - engineered coup! to build the panama canal ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhteddy_roosevelt/\"\u003eTeddy Roosevelt\u003c/a\u003e was an American president.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eLarge personality: expanded scope of the Presidency \u0026mdash; \u0026ldquo;if it doesn\u0026rsquo;t explicit say its belong to the congress, it belongs to me\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eMoralist (Support American People), Imperialist (Believes in American Righteousness), Progressive\u003c/li\u003e\n\u003cli\u003eMonroe Doctrine \u0026amp; Roosevelt Corollary: America for Americans\u003c/li\u003e\n\u003cli\u003eThe Panama Canal - engineered \u003cem\u003ecoup!\u003c/em\u003e to build the panama canal\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhteddy_roosevelt/","tags":null,"title":"Teddy Roosevelt"},{"categories":null,"contents":"\u0026hellip;is a series for which pairs cancel out:\n\\begin{equation} (1-x)(1+x+ \\dots + x^{n-1}) \\end{equation}\nyou will note that, though the expansion of this result, pairs of:\n\\begin{equation} -x^{j} + x^{j} \\end{equation}\nform. And you will note those cancel. Hence this is a telescoping series.\n","html":"\u003cp\u003e\u0026hellip;is a series for which pairs cancel out:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(1-x)(1+x+ \\dots + x^{n-1})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eyou will note that, though the expansion of this result, pairs of:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n-x^{j} + x^{j}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eform. And you will note those cancel. Hence this is a telescoping series.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhteelscoping_series/","tags":null,"title":"Teelscoping Series"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhtemperal_abstraction/","tags":null,"title":"Temperal Abstraction"},{"categories":null,"contents":"A Term-Document Matrix is a boolean matrix of: rows\u0026mdash;\u0026ldquo;terms\u0026rdquo;, the search keywords\u0026mdash;and columns\u0026mdash;\u0026ldquo;documents\u0026rdquo;, which is the document. Each element \\((x,y)\\) is \\(1\\) if \\(y\\) contains term \\(x\\), and \\(0\\) otherwise.\nTo perform a search, we take a boolean operation over each row (usually either complement for NOT or identity), and AND it with all other terms. The resulting boolean string are the valid documents.\nNotably, this is quite intractable because the matrix is quite (words times documents) blows up. However, this representation is QUITE SPARSE. So, ideally we only store it sparsely.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhterm_document_matrix/\"\u003eTerm-Document Matrix\u003c/a\u003e is a boolean matrix of: rows\u0026mdash;\u0026ldquo;terms\u0026rdquo;, the search keywords\u0026mdash;and columns\u0026mdash;\u0026ldquo;documents\u0026rdquo;, which is the document. Each element \\((x,y)\\) is \\(1\\) if \\(y\\) contains term \\(x\\), and \\(0\\) otherwise.\u003c/p\u003e\n\u003cp\u003eTo perform a search, we take a boolean operation over each row (usually either complement for NOT or identity), and AND it with all other terms. The resulting boolean string are the valid documents.\u003c/p\u003e\n\u003cp\u003eNotably, this is quite intractable because the matrix is quite (words times documents) blows up. However, this representation is \u003cstrong\u003eQUITE SPARSE\u003c/strong\u003e. So, ideally we only store it sparsely.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhterm_document_matrix/","tags":null,"title":"Term-Document Matrix"},{"categories":null,"contents":"Given what you claim as a normal distribution, we can test for its normality. Any distribution you claim as normal has to follow that:\n\\begin{equation} np \\geq 10 \u0026amp; n(1-p) \\geq 10 \\end{equation}\nthat number of successes and failures need both be greater than or equal to ten.\n","html":"\u003cp\u003eGiven what you claim as a \u003ca href=\"/posts/kbhnormal_distribution/\"\u003enormal distribution\u003c/a\u003e, we can test for its normality. Any distribution you claim as normal has to follow that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nnp \\geq 10 \u0026amp; n(1-p) \\geq 10\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethat number of successes and failures need both be greater than or equal to ten.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtest_for_normality/","tags":null,"title":"test for normality (statistics)"},{"categories":null,"contents":"How many bugs are in 1,000 lines of code?\nTypical code: 1-10 Platform code: 0.1-1 The best\u0026mdash;NASA: 0.01-0.1 Never assume your software doesn\u0026rsquo;t have bugs.\nTest-Driven Development Test before you build!\nSpecs are already written We know what the expected behavior is We can write tests for the expected behavior first All tests fail to start We know we are done writing code when all tests pass \u0026ldquo;NYI\u0026rdquo; (not-yet implemented)\noften, writing test exposes gaps in your specs How NOT! not write tests Random Sampling Pick one or two inputs and show your code works on it Why it doesn\u0026rsquo;t work: there maybe specific inputs that break your code Exhaustive Testing Test for the domain of inputs Why it doesn\u0026rsquo;t work: tests run forever How DO you write tsets Black-Box Testing Pretend the code implementation is a black box All you know is what the specification; and what the input/output produces White-Box Testing You can see the implementation You test for specific edge cases Off-by-one, running time, specific inputs, etc. Malicious Testing What happens if a user is trying to break your system Sometimes, this is known as \u0026ldquo;pen-testing\u0026rdquo; or \u0026ldquo;white-hack hacking\u0026rdquo; Take CS340 Compsec How BIG are your tests Unit Testing Backbone of testing Typically, that means one test per function Tests choose representative inputs Idempotent: the state of the testing system should be a the beginning and end of the test (tests should revert) (setup + teardown tests) Subsystem Testing Exercise multiple functions working together in a system Often takes longer OK to run these less frequently End-to-End Integration Exercise the entire workflow May involve external libraries, hardware, etc. Regression Testing Isolate the cause of the bug to the smallest possible test case Write a test assuming the bug is fixed Fix the bug Add the test to your test suite How MUCH do we run tests Ideally, run tests every time code is committed Ideally\u0026mdash;run tests that address the function Schedule long tests What to test for see also here\nequivalence partitioning Come up with one test case per equivalence class. For instance, for a function that uppercases letters, analyze the following:\nLowercase letters Uppercase letters Non-alpha letters Non-printable letters Combinations Each group will therefore have nicely the requirements covered\nboundary value analysis In addition to just testing 1 element per class in equivalence partitioning, try to test boundary values (off-by-one, etc.) cases for each equivalence class if you can come up with them.\nArrange, Act, Assert arrange for setup by setting up variables, etc., and define the expected result (yes we do it before to be more readable) act do the thing assert correctness by checking the expected result ","html":"\u003cp\u003eHow many bugs are in 1,000 lines of code?\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eTypical code: 1-10\u003c/li\u003e\n\u003cli\u003ePlatform code: 0.1-1\u003c/li\u003e\n\u003cli\u003eThe best\u0026mdash;NASA: 0.01-0.1\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eNever \u003cem\u003eassume\u003c/em\u003e your software doesn\u0026rsquo;t have bugs.\u003c/p\u003e\n\u003ch2 id=\"test-driven-development\"\u003eTest-Driven Development\u003c/h2\u003e\n\u003cp\u003eTest \u003cem\u003ebefore\u003c/em\u003e you build!\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eSpecs are already written\u003c/li\u003e\n\u003cli\u003eWe know what the expected behavior is\u003c/li\u003e\n\u003cli\u003eWe can write tests for the expected behavior \u003cem\u003efirst\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003eAll tests fail to start\u003c/li\u003e\n\u003cli\u003eWe know we are done writing code when all tests pass\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u0026ldquo;NYI\u0026rdquo; (not-yet implemented)\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-text\" data-lang=\"text\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003eoften, writing test exposes gaps in your specs\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch2 id=\"how-not-not-write-tests\"\u003eHow \u003cem\u003eNOT!\u003c/em\u003e not write tests\u003c/h2\u003e\n\u003ch3 id=\"random-sampling\"\u003eRandom Sampling\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ePick one or two inputs and show your code works on it\u003c/li\u003e\n\u003cli\u003eWhy it doesn\u0026rsquo;t work: there maybe specific inputs that break your code\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"exhaustive-testing\"\u003eExhaustive Testing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eTest for the domain of inputs\u003c/li\u003e\n\u003cli\u003eWhy it doesn\u0026rsquo;t work: tests run forever\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"how-do-you-write-tsets\"\u003eHow \u003cem\u003eDO\u003c/em\u003e you write tsets\u003c/h2\u003e\n\u003ch3 id=\"black-box-testing\"\u003eBlack-Box Testing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ePretend the code implementation is a black box\u003c/li\u003e\n\u003cli\u003eAll you know is what the specification; and what the input/output produces\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"white-box-testing\"\u003eWhite-Box Testing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eYou can see the implementation\u003c/li\u003e\n\u003cli\u003eYou test for specific edge cases\u003c/li\u003e\n\u003cli\u003eOff-by-one, running time, specific inputs, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"malicious-testing\"\u003eMalicious Testing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eWhat happens if a user is \u003cem\u003etrying\u003c/em\u003e to break your system\u003c/li\u003e\n\u003cli\u003eSometimes, this is known as \u0026ldquo;pen-testing\u0026rdquo; or \u0026ldquo;white-hack hacking\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eTake CS340 Compsec\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"how-big-are-your-tests\"\u003eHow \u003cem\u003eBIG\u003c/em\u003e are your tests\u003c/h2\u003e\n\u003ch3 id=\"unit-testing\"\u003eUnit Testing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eBackbone of testing\u003c/li\u003e\n\u003cli\u003eTypically, that means one test per function\u003c/li\u003e\n\u003cli\u003eTests choose representative inputs\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eIdempotent\u003c/strong\u003e\u003c/strong\u003e: the state of the testing system should be a the beginning and end of the test (tests should revert) (setup + teardown tests)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"subsystem-testing\"\u003eSubsystem Testing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eExercise multiple functions working together in a system\u003c/li\u003e\n\u003cli\u003eOften takes longer\u003c/li\u003e\n\u003cli\u003eOK to run these less frequently\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"end-to-end-integration\"\u003eEnd-to-End Integration\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eExercise the entire workflow\u003c/li\u003e\n\u003cli\u003eMay involve external libraries, hardware, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"regression-testing\"\u003eRegression Testing\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eIsolate the cause of the bug to the smallest possible test case\u003c/li\u003e\n\u003cli\u003eWrite a test assuming the bug is fixed\u003c/li\u003e\n\u003cli\u003eFix the bug\u003c/li\u003e\n\u003cli\u003eAdd the test to your test suite\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"how-much-do-we-run-tests\"\u003eHow \u003cem\u003eMUCH\u003c/em\u003e do we run tests\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eIdeally, run tests every time code is committed\u003c/li\u003e\n\u003cli\u003eIdeally\u0026mdash;run tests that address the function\u003c/li\u003e\n\u003cli\u003eSchedule long tests\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"what-to-test-for\"\u003e\u003cem\u003eWhat\u003c/em\u003e to test for\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"https://softwareengineering.stackexchange.com/questions/750/what-should-you-test-with-unit-tests\"\u003esee also here\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"equivalence-partitioning\"\u003eequivalence partitioning\u003c/h3\u003e\n\u003cp\u003eCome up with one test case per equivalence class. For instance, for a function that uppercases letters, analyze the following:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eLowercase letters\u003c/li\u003e\n\u003cli\u003eUppercase letters\u003c/li\u003e\n\u003cli\u003eNon-alpha letters\u003c/li\u003e\n\u003cli\u003eNon-printable letters\u003c/li\u003e\n\u003cli\u003eCombinations\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eEach group will therefore have nicely the requirements covered\u003c/p\u003e\n\u003ch3 id=\"boundary-value-analysis\"\u003eboundary value analysis\u003c/h3\u003e\n\u003cp\u003eIn addition to just testing 1 element per class in \u003ca href=\"#equivalence-partitioning\"\u003eequivalence partitioning\u003c/a\u003e, try to test boundary values (off-by-one, etc.) cases for each equivalence class if you can come up with them.\u003c/p\u003e\n\u003ch3 id=\"arrange-act-assert\"\u003eArrange, Act, Assert\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003earrange\u003c/strong\u003e for setup by setting up variables, etc., and \u003cstrong\u003edefine the expected result\u003c/strong\u003e (yes we do it before to be more readable)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eact\u003c/strong\u003e do the thing\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eassert\u003c/strong\u003e correctness by checking the expected result\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtesting/","tags":null,"title":"Testing"},{"categories":null,"contents":"Take a document \\(d\\) and assign a fixed set of classes \\(\\{c_1, c_2, \u0026hellip;, c_{j}\\}\\) to that document. You want to predict \\(f(d) = c \\in C\\).\n","html":"\u003cp\u003eTake a document \\(d\\) and assign a fixed set of classes \\(\\{c_1, c_2, \u0026hellip;, c_{j}\\}\\) to that document. You want to predict \\(f(d) = c \\in C\\).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtext_classification/","tags":null,"title":"Text Classification"},{"categories":null,"contents":"two main parts:\ntokenization lemmatization ","html":"\u003cp\u003etwo main parts:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtokenization/\"\u003etokenization\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhlemmatization/\"\u003elemmatization\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtext_normalization/","tags":null,"title":"text normalization"},{"categories":null,"contents":"The Unreasonable Effectiveness of Mathematics in the Natural Sciences is an article by the famous mathematician Eugene Wigner. (Wigner 1990)\nReflection What I found most peculiarly interesting is the focus on many mathematical/physics texts on the idea of the \u0026ldquo;beauty\u0026rdquo; of the expressions; and, it seems, the clear pleasure that Wigner gets from analyzing the systems with the aforementioned \u0026ldquo;beauty.\u0026rdquo;\nSetting aside whether or not this beauty is \u0026ldquo;deserved\u0026rdquo;/appropriate, I love that my attraction to physics is somewhat similar to what Wigner describes. Under the appropriate conditions, with constraints, it is possible to build a solution to physics problems simply through the evolution of mathematics.\nIt is not to say that the models mathematics provides is correct. I like that Winger ended on the note about how \u0026ldquo;false\u0026rdquo; theories, even despite their falseness, provided shockingly accurate estimations of physical phenomena. Perhaps mathematics provides an almost-fully solid foundation to creating physical systems, but then the entire \u0026ldquo;flaw\u0026rdquo; we see with mathematical modeling is in our (in)ability to provide the limitations to scope.\nFor instance, Bohr\u0026rsquo;s model, an example of \u0026ldquo;falsehood\u0026rdquo; modeled, is an over-limitation to scope which\u0026mdash;thought reducing mathematical complexity\u0026mdash;resulted in a \u0026ldquo;wrong\u0026rdquo; theory. However, the mathematics behind the theory remains to be solid despite the scope limitation, making the result work in a reasonable manner (except for the pitfalls).\nThe inherent concern behind this statement, then, is that there is a case where we can build a perfectly reasonable system to model something, but it turns out that the system is correct only in the limited scope which we are used to operating; when suddenly the scope becomes broken, we are so used to the mathematical tools that we have came to rely on that we don\u0026rsquo;t notice their failures.\nI like that this entire point is brought up before our start in DiffEq, perhaps as a \u0026ldquo;with great power comes great responsibility\u0026rdquo; type of caution to us in terms of how our modeling may go awry while at the same time acting as a preview of the usefulness of the principles provided taken as a whole.\nReading notes Maths show up at entirely random places The first point is that mathematical concepts turn up in entirely unexpected connections. Moreover, they often permit an unexpectedly close and accurate description of the phenomena in these connections.\nWondering whether or not the theory is unique due to its applicability He became skeptical concerning the uniqueness of the coordination between keys and doors.\nThat math is really useful, its weird The first point is that the enormous usefulness of mathematics in the natural sciences is something bordering on the mysterious and that there is no rational explanation for it.\nIt also raises the question of how actually unique our theories are given they are all so applicable Second, it is just this uncanny usefulness of mathematical concepts that raises the question of the uniqueness of our physical theories.\nThe goal of mathematics is maximize the space of usefulness The great mathematician fully, almost ruthlessly, exploits the domain of permissible reasoning and skirts the impermissible.\nRegularity is suprising because its\u0026hellip; regularly found, which is unique The second surprising feature is that the regularity which we are discussing is independent of so many conditions which could have an effect on it.\nLaws of Nature are all highly conditional The principal purpose of the preceding discussion is to point out that the laws of nature are all conditional statements and they relate only to a very small part of our knowledge of the world.\nThat maths is just a fallback for \u0026ldquo;beatiful\u0026rdquo; physics happening the connection is that discussed in mathematics simply because he does not know of any other similar connection.\nApart from invarients, we just scope-limit ourselves to get the remaining bits that we need to make stuff work \u0026ldquo;beautifully\u0026rdquo; propose to refer to the observation which these examples illustrate as the empirical law of epistemology. Together with the laws of invariance of physical theories, it is an indispensable foundation of these theories.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhthe_unreasonable_effectiveness_of_mathematics_in_the_natural_sciences/\"\u003eThe Unreasonable Effectiveness of Mathematics in the Natural Sciences\u003c/a\u003e is an article by the famous mathematician \u003ca href=\"/posts/kbheugene_wigner/\"\u003eEugene Wigner\u003c/a\u003e. (\u003ca href=\"#citeproc_bib_item_1\"\u003eWigner 1990\u003c/a\u003e)\u003c/p\u003e\n\u003ch2 id=\"reflection\"\u003eReflection\u003c/h2\u003e\n\u003cp\u003eWhat I found most peculiarly interesting is the focus on many mathematical/physics texts on the idea of the \u0026ldquo;beauty\u0026rdquo; of the expressions; and, it seems, the clear pleasure that Wigner gets from analyzing the systems with the aforementioned \u0026ldquo;beauty.\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eSetting aside whether or not this beauty is \u0026ldquo;deserved\u0026rdquo;/appropriate, I love that my attraction to physics is somewhat similar to what Wigner describes. Under the appropriate conditions, with constraints, it is possible to build a solution to physics problems simply through the evolution of mathematics.\u003c/p\u003e\n\u003cp\u003eIt is not to say that the models mathematics provides is correct. I like that Winger ended on the note about how \u0026ldquo;false\u0026rdquo; theories, even despite their falseness, provided shockingly accurate estimations of physical phenomena. Perhaps mathematics provides an almost-fully solid foundation to creating physical systems, but then the entire \u0026ldquo;flaw\u0026rdquo; we see with mathematical modeling is in our (in)ability to provide the limitations to scope.\u003c/p\u003e\n\u003cp\u003eFor instance, Bohr\u0026rsquo;s model, an example of \u0026ldquo;falsehood\u0026rdquo; modeled, is an over-limitation to scope which\u0026mdash;thought reducing mathematical complexity\u0026mdash;resulted in a \u0026ldquo;wrong\u0026rdquo; theory. However, the mathematics behind the theory remains to be solid despite the scope limitation, making the result work in a reasonable manner (except for the pitfalls).\u003c/p\u003e\n\u003cp\u003eThe inherent concern behind this statement, then, is that there is a case where we can build a perfectly reasonable system to model something, but it turns out that the system is correct only in the limited scope which we are used to operating; when suddenly the scope becomes broken, we are so used to the mathematical tools that we have came to rely on that we don\u0026rsquo;t notice their failures.\u003c/p\u003e\n\u003cp\u003eI like that this entire point is brought up before our start in DiffEq, perhaps as a \u0026ldquo;with great power comes great responsibility\u0026rdquo; type of caution to us in terms of how our modeling may go awry while at the same time acting as a preview of the usefulness of the principles provided taken as a whole.\u003c/p\u003e\n\u003ch2 id=\"reading-notes\"\u003eReading notes\u003c/h2\u003e\n\u003ch3 id=\"maths-show-up-at-entirely-random-places\"\u003eMaths show up at entirely random places\u003c/h3\u003e\n\u003cp\u003eThe first point is that mathematical concepts turn up in entirely unexpected connections. Moreover, they often permit an unexpectedly close and accurate description of the phenomena in these connections.\u003c/p\u003e\n\u003ch3 id=\"wondering-whether-or-not-the-theory-is-unique-due-to-its-applicability\"\u003eWondering whether or not the theory is unique due to its applicability\u003c/h3\u003e\n\u003cp\u003eHe became skeptical concerning the uniqueness of the coordination between keys and doors.\u003c/p\u003e\n\u003ch3 id=\"that-math-is-really-useful-its-weird\"\u003eThat math is really useful, its weird\u003c/h3\u003e\n\u003cp\u003eThe first point is that the enormous usefulness of mathematics in the natural sciences is something bordering on the mysterious and that there is no rational explanation for it.\u003c/p\u003e\n\u003ch3 id=\"it-also-raises-the-question-of-how-actually-unique-our-theories-are-given-they-are-all-so-applicable\"\u003eIt also raises the question of how actually unique our theories are given they are all so applicable\u003c/h3\u003e\n\u003cp\u003eSecond, it is just this uncanny usefulness of mathematical concepts that raises the question of the uniqueness of our physical theories.\u003c/p\u003e\n\u003ch3 id=\"the-goal-of-mathematics-is-maximize-the-space-of-usefulness\"\u003eThe goal of mathematics is maximize the space of usefulness\u003c/h3\u003e\n\u003cp\u003eThe great mathematician fully, almost ruthlessly, exploits the domain of permissible reasoning and skirts the impermissible.\u003c/p\u003e\n\u003ch3 id=\"regularity-is-suprising-because-its-dot-dot-dot-regularly-found-which-is-unique\"\u003eRegularity is suprising because its\u0026hellip; regularly found, which is unique\u003c/h3\u003e\n\u003cp\u003eThe second surprising feature is that the regularity which we are discussing is independent of so many conditions which could have an effect on it.\u003c/p\u003e\n\u003ch3 id=\"laws-of-nature-are-all-highly-conditional\"\u003eLaws of Nature are all highly conditional\u003c/h3\u003e\n\u003cp\u003eThe principal purpose of the preceding discussion is to point out that the laws of nature are all conditional statements and they relate only to a very small part of our knowledge of the world.\u003c/p\u003e\n\u003ch3 id=\"that-maths-is-just-a-fallback-for-beatiful-physics-happening\"\u003eThat maths is just a fallback for \u0026ldquo;beatiful\u0026rdquo; physics happening\u003c/h3\u003e\n\u003cp\u003ethe connection is that discussed in mathematics simply because he does not know of any other similar connection.\u003c/p\u003e\n\u003ch3 id=\"apart-from-invarients-we-just-scope-limit-ourselves-to-get-the-remaining-bits-that-we-need-to-make-stuff-work-beautifully\"\u003eApart from invarients, we just scope-limit ourselves to get the remaining bits that we need to make stuff work \u0026ldquo;beautifully\u0026rdquo;\u003c/h3\u003e\n\u003cp\u003epropose to refer to the observation which these examples illustrate as the empirical law of epistemology. Together with the laws of invariance of physical theories, it is an indispensable foundation of these theories.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhthe_unreasonable_effectiveness_of_mathematics_in_the_natural_sciences/","tags":null,"title":"The Unreasonable Effectiveness of Mathematics in the Natural Sciences"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhtherma/","tags":null,"title":"therma"},{"categories":null,"contents":"thermoregulation is the brain\u0026rsquo;s regulation of body temperature to respond to heat, cold events.\nStudies indicate that cold exposure cold exposure can activate AgRP (stimulate food intake) as a means for the brain leveraging CNS regulation to which would lower the glucose level and maintain glucose homeostatis.\nHowever, cold exposure also trigger energy expenditure, and seems contradictory but not really why?.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhthermoregulation/\"\u003ethermoregulation\u003c/a\u003e is the brain\u0026rsquo;s regulation of body temperature to respond to heat, cold events.\u003c/p\u003e\n\u003cp\u003eStudies indicate that cold exposure cold exposure can activate \u003ca href=\"/posts/kbhagrp/\"\u003eAgRP\u003c/a\u003e (stimulate food intake) as a means for the brain leveraging \u003ca href=\"/posts/kbhcns_regulation/\"\u003eCNS regulation\u003c/a\u003e to which would lower the \u003ca href=\"\"\u003eglucose\u003c/a\u003e level and maintain \u003ca href=\"\"\u003eglucose homeostatis\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eHowever, cold exposure also trigger energy expenditure, and seems contradictory but \u003cem\u003enot really why?\u003c/em\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhthermoregulation/","tags":null,"title":"thermoregulation"},{"categories":null,"contents":"The theta/alpha ratio is the ratio between two oscillations measurable by an EEG that is shown to be a possible indicator for AD development.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhtheta_alpha_ratio/\"\u003etheta/alpha ratio\u003c/a\u003e is the ratio between two oscillations measurable by an EEG that \u003ca href=\"https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3793211/\"\u003eis shown\u003c/a\u003e to be a possible indicator for AD development.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtheta_alpha_ratio/","tags":null,"title":"theta/alpha ratio"},{"categories":null,"contents":" Because this chapter is not about linear algebra, your instructor may go through it rapidly. You may not be asked to scrutinize all the proofs. Make sure, however, that you at least read and understand the statements of all the results in this chapter—they will be used in later chapters.\nSo we are not going to go through everything very very carefully. Instead, I\u0026rsquo;m just going to go through some interesting results at my own leisure. This also means that this note is not very complete.\nfacts \u0026ldquo;you can factor out every root\u0026rdquo;: \\(p(\\alpha) = 0 \\implies p(z)=(z-\\alpha)q(z)\\)\nfundamental theorem of algebra: \u0026ldquo;if you have an nth-degree polynomial, you can factor it into n factors\u0026rdquo; (over the complex numbers, you have as many roots as the degree of the polynomials)\nthese coefficients are unique barring ordering factoring real polynomials can be treated in two pieces: one piece, the reals, which can be treated usually; then one other piece, the complexes, can be multiplied pairwise together (as over real coeffs they always come in conjugate pairs) into quadratics. This is why all real polynormials can always be factored as \\((x-\\lambda)(x-\\lambda) \\dots (x^{2}+ax+b) (x^{2}+ax+b)\\dots\\) the number of complex polynomials has to be even complex polynomials have \\(deg\\ p\\) factors\nreal polynomials have \\(deg\\ p\\) real/complex factors, but complex factors come in pairs\nwe can squish the complex part of the real polynomials together, and get\u0026mdash;wlog $m$\u0026mdash;first-degree real roots and \\(\\frac{deg\\ p - m}{2}\\) second-degree real roots where \\(b^{2} \u0026lt; 4c\\)\n\\(x^{2} + bx + c\\) has a factor of \\((x-\\lambda_{1})(x-\\lambda_{2})\\) under reals \\(b^{2} \\geq 4c\\)\nkey sequence complex numbers we defined: complex numbers, conjugates, and absolute value 9 properties of complexes (see below) polynomial coefficients polynomial coefficients are unique; namely, if a polynomial is the zero function, all of its coefficients have to be \\(0\\) division, zero, and factoring polynomial division: given two polynomials \\(p,s \\in \\mathcal{P}(\\mathbb{F})\\), with \\(s\\neq 0\\), then \\(\\exists q,r \\in \\mathcal{P}(\\mathbb{F})\\) such that: \\(p = s q +r\\); that is, given two polynomials, you can always divide one by the other with some remainder as long as the \u0026ldquo;other\u0026rdquo; is not \\(0\\) we defined zero (\\(p \\lambda =0\\), then \\(\\lambda\\) is a \u0026ldquo;zero\u0026rdquo;) and factor which is some polynomial \\(s \\in \\mathcal{P}(\\mathbb{F})\\) for another polynomial \\(p\\) such that there exists some \\(q \\in \\mathcal{P}(\\mathbb{F})\\) such that \\(p = s q\\) we show that each zero corresponds to a factor of the shape \\(p(z) = (z-\\lambda)q(z)\\) we show that a polynomial with degree \\(m\\) has at most \\(m\\) distinct zeros FToA and corollaries FToA: every non-constant polynomial under the complexes has a zero and that means every polynomial over the complexes has a unique factorization \\(p(z) = c(z-\\lambda_{1})(z-\\lambda_{2}) \\dots (z-\\lambda_{m})\\) polynomials with zero coefficients have zeros in pairs: if \\(\\lambda \\in \\mathbb{C}\\) is a factor of the polynomial, so is \\(\\bar{\\lambda}\\) Is a real polynomial factorable? A polynomial \\(x^{2}+bx+c\\) is factorable into \\((x-\\lambda_{1})(x-\\lambda_{2})\\) IFF \\(b^{2} \u0026gt; 4c\\). All polynomials over the reals can be factored into at least second degree polynomials \\(p(z) = c(z-\\lambda_{1})(z-\\lambda_{2}) \\dots (z-\\lambda_{m}) \\dots (x^{2}+b_{M}x+c_{M})\\) first, review complex numbers \\(z+\\bar{z} = 2 \\text{Re}\\ z\\) \\(z-\\bar{z} =2(\\text{Im}\\ z)i\\) \\(z\\bar{z} = |z|^{2}\\) \\(\\bar{x+z} = \\bar{w}+\\bar{z}\\), \\(\\bar{wz} = \\bar{w}\\bar{z}\\) \\(\\bar{\\bar{z}} = z\\) \\(| \\text{\\{Re,Im\\}}\\ z| \\leq |z|\\) both real and imaginary components are smaller than the actual absolute value \\(|\\bar{z}| = |z|\\) \\(|wz| = |w| |z|\\) \\(|w+z| \\leq |w| + |z|\\), the triangle inequality triangle inequality (complexes) For \\(w, z \\in \\mathbb{C}\\), we do route algebra:\npolynomial division Suppose \\(p,s \\in \\mathcal{P}(\\mathbb{F}), s\\neq 0\\), then, \\(\\exists\\) polynomials \\(q,r \\in \\mathcal{P(\\mathbb{F})}\\) such that:\n\\begin{equation} p = s q +r \\end{equation}\nand \\(\\deg r \u0026lt; \\deg s\\).\nProof:\nLet: \\(n = \\deg p\\), and \\(m = \\deg s\\). So, if \\(n \u0026lt; m\\) (i.e. it is not a division), then take \\(q=0\\) and \\(r=p\\).\nNow, let\u0026rsquo;s make ???\nFactoring A polynomial \\(s \\in \\mathcal{P}(\\mathbb{F})\\) is a factor of \\(p \\in \\mathcal{P}(\\mathbb{F})\\) if \\(\\exists\\) \\(q \\in \\mathcal{P}(\\mathbb{F})\\) such that \\(p=s q\\).\nquestions proofs: wut if the FToA holds, isn\u0026rsquo;t the polynomials over the reals a \u0026ldquo;subset\u0026rdquo;(ish) of the polynomials over the complexes? so there is going to be at least complex roots to all polynormials always no? ","html":"\u003cblockquote\u003e\n\u003cp\u003eBecause this chapter is not about linear algebra, your instructor may go through it rapidly. You may not be asked to scrutinize all the proofs. Make sure, however, that you at least read and understand the statements of all the results in this chapter—they will be used in later chapters.\u003c/p\u003e\n\u003c/blockquote\u003e\n\u003cp\u003eSo we are not going to go through everything very very carefully. Instead, I\u0026rsquo;m just going to go through some interesting results at my own leisure. This also means that this note is not very complete.\u003c/p\u003e\n\u003ch2 id=\"facts\"\u003efacts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003e\u0026ldquo;you can factor out every root\u0026rdquo;: \\(p(\\alpha) = 0 \\implies p(z)=(z-\\alpha)q(z)\\)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\u003ca href=\"\"\u003efundamental theorem of algebra\u003c/a\u003e: \u0026ldquo;if you have an nth-degree polynomial, you can factor it into n factors\u0026rdquo; (over the complex numbers, you have as many roots as the degree of the polynomials)\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ethese coefficients are unique barring ordering\u003c/li\u003e\n\u003cli\u003efactoring real polynomials can be treated in two pieces: one piece, the reals, which can be treated usually; then one other piece, the complexes, can be multiplied pairwise together (as over real coeffs they always come in conjugate pairs) into quadratics. This is why all real polynormials can always be factored as \\((x-\\lambda)(x-\\lambda) \\dots (x^{2}+ax+b) (x^{2}+ax+b)\\dots\\)\u003c/li\u003e\n\u003cli\u003ethe number of complex polynomials has to be even\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ecomplex polynomials have \\(deg\\ p\\) factors\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ereal polynomials have \\(deg\\ p\\) real/complex factors, but complex factors come in pairs\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ewe can squish the complex part of the real polynomials together, and get\u0026mdash;wlog $m$\u0026mdash;first-degree real roots and \\(\\frac{deg\\ p - m}{2}\\) second-degree real roots where \\(b^{2} \u0026lt; 4c\\)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003e\\(x^{2} + bx + c\\) has a factor of \\((x-\\lambda_{1})(x-\\lambda_{2})\\) under reals \\(b^{2} \\geq 4c\\)\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-sequence\"\u003ekey sequence\u003c/h2\u003e\n\u003ch3 id=\"complex-numbers\"\u003ecomplex numbers\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ewe defined: complex numbers, conjugates, and absolute value\n\u003cul\u003e\n\u003cli\u003e9 properties of complexes (see below)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"polynomial-coefficients\"\u003epolynomial coefficients\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003epolynomial coefficients are unique\u003c/strong\u003e; namely, if a polynomial is the zero function, all of its coefficients have to be \\(0\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"division-zero-and-factoring\"\u003edivision, zero, and factoring\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003epolynomial division\u003c/strong\u003e: given two polynomials \\(p,s \\in \\mathcal{P}(\\mathbb{F})\\), with \\(s\\neq 0\\), then \\(\\exists q,r \\in \\mathcal{P}(\\mathbb{F})\\) such that: \\(p = s q +r\\); that is, given two polynomials, you can always divide one by the other with some remainder as long as the \u0026ldquo;other\u0026rdquo; is not \\(0\\)\u003c/li\u003e\n\u003cli\u003ewe defined \u003cstrong\u003ezero\u003c/strong\u003e (\\(p \\lambda =0\\), then \\(\\lambda\\) is a \u0026ldquo;zero\u0026rdquo;) and \u003cstrong\u003efactor\u003c/strong\u003e which is some polynomial \\(s \\in \\mathcal{P}(\\mathbb{F})\\) for another polynomial \\(p\\) such that there exists some \\(q \\in \\mathcal{P}(\\mathbb{F})\\) such that \\(p = s q\\)\n\u003cul\u003e\n\u003cli\u003ewe show that each zero corresponds to a factor of the shape \\(p(z) = (z-\\lambda)q(z)\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003ewe show that a polynomial with degree \\(m\\) has at most \\(m\\) distinct zeros\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"ftoa-and-corollaries\"\u003eFToA and corollaries\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eFToA\u003c/strong\u003e: every non-constant polynomial under the complexes has a zero\u003c/li\u003e\n\u003cli\u003eand that means every polynomial over the complexes has a unique factorization \\(p(z) = c(z-\\lambda_{1})(z-\\lambda_{2}) \\dots (z-\\lambda_{m})\\)\u003c/li\u003e\n\u003cli\u003epolynomials with zero coefficients have zeros in pairs: if \\(\\lambda \\in \\mathbb{C}\\) is a factor of the polynomial, so is \\(\\bar{\\lambda}\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"is-a-real-polynomial-factorable\"\u003eIs a real polynomial factorable?\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eA polynomial \\(x^{2}+bx+c\\) is factorable into \\((x-\\lambda_{1})(x-\\lambda_{2})\\) IFF \\(b^{2} \u0026gt; 4c\\).\u003c/li\u003e\n\u003cli\u003eAll polynomials over the reals can be factored into at least second degree polynomials\n\u003cul\u003e\n\u003cli\u003e\\(p(z) = c(z-\\lambda_{1})(z-\\lambda_{2}) \\dots (z-\\lambda_{m}) \\dots (x^{2}+b_{M}x+c_{M})\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"first-review-complex-number--kbhcomplex-number-dot-md--s\"\u003efirst, review \u003ca href=\"/posts/kbhcomplex_number/\"\u003ecomplex number\u003c/a\u003es\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(z+\\bar{z} = 2 \\text{Re}\\ z\\)\u003c/li\u003e\n\u003cli\u003e\\(z-\\bar{z} =2(\\text{Im}\\ z)i\\)\u003c/li\u003e\n\u003cli\u003e\\(z\\bar{z} = |z|^{2}\\)\u003c/li\u003e\n\u003cli\u003e\\(\\bar{x+z} = \\bar{w}+\\bar{z}\\), \\(\\bar{wz} = \\bar{w}\\bar{z}\\)\u003c/li\u003e\n\u003cli\u003e\\(\\bar{\\bar{z}} = z\\)\u003c/li\u003e\n\u003cli\u003e\\(| \\text{\\{Re,Im\\}}\\ z| \\leq |z|\\) both real and imaginary components are smaller than the actual absolute value\u003c/li\u003e\n\u003cli\u003e\\(|\\bar{z}| = |z|\\)\u003c/li\u003e\n\u003cli\u003e\\(|wz| = |w| |z|\\)\u003c/li\u003e\n\u003cli\u003e\\(|w+z| \\leq |w| + |z|\\), the \u003ca href=\"#triangle-inequality--complexes\"\u003etriangle inequality\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"triangle-inequality--complexes\"\u003etriangle inequality (complexes)\u003c/h2\u003e\n\u003cp\u003eFor \\(w, z \\in \\mathbb{C}\\), we do route algebra:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-01-30_20-46-35_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"polynomial-division\"\u003epolynomial division\u003c/h2\u003e\n\u003cp\u003eSuppose \\(p,s \\in \\mathcal{P}(\\mathbb{F}), s\\neq 0\\), then, \\(\\exists\\) polynomials \\(q,r \\in \\mathcal{P(\\mathbb{F})}\\) such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\np = s q +r\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand \\(\\deg r \u0026lt; \\deg s\\).\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eLet: \\(n = \\deg p\\), and \\(m = \\deg s\\). So, if \\(n \u0026lt; m\\) (i.e. it is not a division), then take \\(q=0\\) and \\(r=p\\).\u003c/p\u003e\n\u003cp\u003eNow, let\u0026rsquo;s make\n???\u003c/p\u003e\n\u003ch2 id=\"factoring\"\u003eFactoring\u003c/h2\u003e\n\u003cp\u003eA polynomial \\(s \\in \\mathcal{P}(\\mathbb{F})\\) is a \u003cstrong\u003efactor\u003c/strong\u003e of \\(p \\in \\mathcal{P}(\\mathbb{F})\\) if \\(\\exists\\) \\(q \\in \\mathcal{P}(\\mathbb{F})\\) such that \\(p=s q\\).\u003c/p\u003e\n\u003ch2 id=\"questions\"\u003equestions\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eproofs: wut\u003c/li\u003e\n\u003cli\u003eif the FToA holds, isn\u0026rsquo;t the polynomials over the reals a \u0026ldquo;subset\u0026rdquo;(ish) of the polynomials over the complexes? so there is going to be at least complex roots to all polynormials always no?\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhthoughts_on_axler_4/","tags":null,"title":"Thoughts on Axler 4"},{"categories":null,"contents":"A\n","html":"\u003cp\u003eA\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtiago_forte/","tags":null,"title":"Tiago Forte"},{"categories":null,"contents":"Every NLP task involve some kind of text normalization.\ntokenizing words normalizing word formats (lemmatize?) sentence and paragraph segmentation For Latin, Arabic, Cyrillic, Greek systems, spaces can usually be used for tokenization. Other writing systems can\u0026rsquo;t do this. See morpheme\nSubword Tokenization Algorithms for breaking up tokens using corpus statistics which acts on lower-than-word level.\nBPE Unigram Language Modeling tokenization WordPiece They all work in 2 parst:\na token learner: takes training corpus and derives a vocabulary set a token segmenter that tokenizes text according to the vocab tr For those languages, you can use these systems to perform tokenization.\ntr -sc \u0026#34;A-Za-z\u0026#34; \u0026#34;\\n\u0026#34; \u0026lt; input.txt this takes every form which is not text (-c is the complement operator) and replaces it with a newline. -s squeezes the text so that there are not multiple newlines.\nThis turns the text into one word per line.\nSorting it (because uniq requires it) and piping into uniq gives word count\ntr -sc \u0026#34;A-Za-z\u0026#34; \u0026#34;\\n\u0026#34; \u0026lt; input.txt | sort | uniq We can then do a reverse numerical sort:\ntr -sc \u0026#34;A-Za-z\u0026#34; \u0026#34;\\n\u0026#34; \u0026lt; input.txt | sort | uniq | sort -r -n which gives a list of words per frequency.\nThis is a BAD RESULT most of the time: some words have punctuation with meaning that\u0026rsquo;s not tokenizaiton: m.p.h., or AT\u0026amp;T, or John's, or 1/1/12.\nWhat to Tokenize \u0026ldquo;I do uh main- mainly business data processing\u0026rdquo;\nuh: filled pause main-: fragments Consider:\n\u0026ldquo;Seuss\u0026rsquo;s cat in the cat is different from other cats!\u0026rdquo;\ncat and cats: same lemma (i.e. stem + part of speech + word sense) cat and cats: different wordforms We usually consider a token as distinct wordform, counting duplicates; whereas, we usually consider word types as unique, non-duplicated distinct wordforms.\nclitics John's: word that doesn\u0026rsquo;t stand on its own.\n","html":"\u003cp\u003eEvery NLP task involve some kind of text normalization.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003etokenizing words\u003c/li\u003e\n\u003cli\u003enormalizing word formats (lemmatize?)\u003c/li\u003e\n\u003cli\u003esentence and paragraph segmentation\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eFor Latin, Arabic, Cyrillic, Greek systems, spaces can usually be used for tokenization. Other writing systems can\u0026rsquo;t do this. See \u003ca href=\"/posts/kbhmorpheme/\"\u003emorpheme\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"subword-tokenization\"\u003eSubword Tokenization\u003c/h2\u003e\n\u003cp\u003eAlgorithms for breaking up tokens using \u003ca href=\"/posts/kbhcorpus/\"\u003ecorpus\u003c/a\u003e statistics which acts on lower-than-word level.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhbpe/\"\u003eBPE\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eUnigram Language Modeling tokenization\u003c/li\u003e\n\u003cli\u003eWordPiece\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThey all work in 2 parst:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ea token \u003cstrong\u003elearner\u003c/strong\u003e: takes training corpus and derives a vocabulary set\u003c/li\u003e\n\u003cli\u003ea token \u003cstrong\u003esegmenter\u003c/strong\u003e that tokenizes text according to the vocab\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"tr\"\u003etr\u003c/h2\u003e\n\u003cp\u003eFor those languages, you can use these systems to perform tokenization.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-bash\" data-lang=\"bash\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etr -sc \u003cspan style=\"color:#d88200\"\u003e\u0026#34;A-Za-z\u0026#34;\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\n\u0026#34;\u003c/span\u003e \u0026lt; input.txt\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ethis takes every form which is not text (\u003ccode\u003e-c\u003c/code\u003e is the complement operator) and replaces it with a newline. \u003ccode\u003e-s\u003c/code\u003e squeezes the text so that there are not multiple newlines.\u003c/p\u003e\n\u003cp\u003eThis turns the text into one word per line.\u003c/p\u003e\n\u003cp\u003eSorting it (because \u003ccode\u003euniq\u003c/code\u003e requires it) and piping into \u003ccode\u003euniq\u003c/code\u003e gives word count\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-bash\" data-lang=\"bash\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etr -sc \u003cspan style=\"color:#d88200\"\u003e\u0026#34;A-Za-z\u0026#34;\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\n\u0026#34;\u003c/span\u003e \u0026lt; input.txt \u003cspan style=\"color:#111\"\u003e|\u003c/span\u003e sort \u003cspan style=\"color:#111\"\u003e|\u003c/span\u003e uniq\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eWe can then do a reverse numerical sort:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-bash\" data-lang=\"bash\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003etr -sc \u003cspan style=\"color:#d88200\"\u003e\u0026#34;A-Za-z\u0026#34;\u003c/span\u003e \u003cspan style=\"color:#d88200\"\u003e\u0026#34;\\n\u0026#34;\u003c/span\u003e \u0026lt; input.txt \u003cspan style=\"color:#111\"\u003e|\u003c/span\u003e sort \u003cspan style=\"color:#111\"\u003e|\u003c/span\u003e uniq \u003cspan style=\"color:#111\"\u003e|\u003c/span\u003e sort -r -n\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ewhich gives a list of words per frequency.\u003c/p\u003e\n\u003cp\u003eThis is a \u003cstrong\u003eBAD RESULT\u003c/strong\u003e most of the time: some words have punctuation with meaning that\u0026rsquo;s not tokenizaiton: \u003ccode\u003em.p.h.\u003c/code\u003e, or \u003ccode\u003eAT\u0026amp;T\u003c/code\u003e, or \u003ccode\u003eJohn's\u003c/code\u003e, or \u003ccode\u003e1/1/12\u003c/code\u003e.\u003c/p\u003e\n\u003ch2 id=\"what-to-tokenize\"\u003eWhat to Tokenize\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;I do uh main- mainly business data processing\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003euh\u003c/code\u003e: filled pause\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003emain-\u003c/code\u003e: fragments\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eConsider:\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Seuss\u0026rsquo;s cat in the cat is different from other cats!\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ccode\u003ecat\u003c/code\u003e and \u003ccode\u003ecats\u003c/code\u003e: same \u003ca href=\"/posts/kbhtokenization/\"\u003elemma\u003c/a\u003e (i.e. stem + part of speech + word sense)\u003c/li\u003e\n\u003cli\u003e\u003ccode\u003ecat\u003c/code\u003e and \u003ccode\u003ecats\u003c/code\u003e: different \u003ca href=\"/posts/kbhtokenization/\"\u003ewordform\u003c/a\u003es\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eWe usually consider a \u003ca href=\"/posts/kbhtokenization/\"\u003etoken\u003c/a\u003e as distinct \u003ca href=\"/posts/kbhtokenization/\"\u003ewordform\u003c/a\u003e, counting duplicates; whereas, we usually consider \u003ca href=\"/posts/kbhtokenization/\"\u003eword type\u003c/a\u003es as unique, non-duplicated distinct \u003ca href=\"/posts/kbhtokenization/\"\u003ewordform\u003c/a\u003es.\u003c/p\u003e\n\u003ch3 id=\"clitics\"\u003eclitics\u003c/h3\u003e\n\u003cp\u003e\u003ccode\u003eJohn's\u003c/code\u003e: word that doesn\u0026rsquo;t stand on its own.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtokenization/","tags":null,"title":"tokenization"},{"categories":null,"contents":"For directed acyclic graphs, a topological sort of a directed graph is such that if there\u0026rsquo;s an edge \\(A \\to B\\), then \\(A\\) comes before \\(B\\) in the sort.\nUnder direct acyclic graphs, a topological sort always exist.\n","html":"\u003cp\u003eFor directed acyclic graphs, a \u003ca href=\"/posts/kbhtopological_sort/\"\u003etopological sort\u003c/a\u003e of a directed graph is such that if there\u0026rsquo;s an edge \\(A \\to B\\), then \\(A\\) comes before \\(B\\) in the sort.\u003c/p\u003e\n\u003cp\u003eUnder direct acyclic graphs, a \u003ca href=\"/posts/kbhtopological_sort/\"\u003etopological sort\u003c/a\u003e always exist.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtopological_sort/","tags":null,"title":"topological sort"},{"categories":null,"contents":"Finding training data for AI is hard. So instead:\nIntentional training data curated for training data Spent time thinking about bias, control, etc. Training set of convenience Dataset that just comes about Problematic: Accidentally introduce bias into the data: Googling images of CEOs, which is convenient, results in all white males for a bit.\n","html":"\u003cp\u003eFinding training data for AI is hard. So instead:\u003c/p\u003e\n\u003ch2 id=\"intentional-training-data\"\u003eIntentional training data\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecurated for training data\u003c/li\u003e\n\u003cli\u003eSpent time thinking about bias, control, etc.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"training-set-of-convenience\"\u003eTraining set of convenience\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eDataset that just comes about\u003c/li\u003e\n\u003cli\u003eProblematic:\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eAccidentally introduce bias into the data: Googling images of CEOs, which is convenient, results in all white males for a bit.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtraining_data_sourcing/","tags":null,"title":"Training Data Sourcing"},{"categories":null,"contents":"\u0026ldquo;What we have been building since ChatGPT at H4.\nNo pretraining in any way Basic Three Steps Goal: \u0026ldquo;helpful, harmless, honest, and huggy\u0026rdquo; bots.\nRetraining step: large-scale next token prediction Incontext learning: few shot learning without updating parameters \u0026ldquo;Helpful\u0026rdquo; steps Taking supervised data to perform supervised fine tuning \u0026ldquo;Harmless\u0026rdquo; steps Training a classifier for result ranking RLHF Benchmarking Before we started to train, we have a problem. Most benchmarks are on generic reasoning, which evaluates 1), 2). Therefore, we need new metrics for steps 4) and 5).\nSo:\nEvaluating instruction and \u0026ldquo;chatty-ness\u0026rdquo; Pairwise Elo Ratings leaderboard from 🤗 + AlpacaEval. Both use GPT4 as the automated evaluator + as well as humans. MTBench from LMSYS has a new benchmark for the same thing, but supports multi-turn evaluation.\nThree main effects observed:\nresults improve slightly the longer the prompt GPT4 MTBench assigns worse scores on gpt4 like data adding more data into fine tuning had diminishing returns after thousands of samples TruthfulQA is the most differentiating benchmark; most others score about the same\nEvaluating Reward Model There are not any open source reward models. Nor is there anything on evaluating or dataset on red teaming. The only dataset out there is Anthropic\u0026rsquo;s red teaming data.\nhttps://huggingface.co/blog/red-teaming\nWackiness GPT4 as an evaluator Why is everybody using GPT4 as a proxy for humans?\nGPT4 has a left positional bias (if you admonish GPT about this, it will prefer the second one instead :/), while humans provide pretty much uniform rating \u0026ldquo;Doping\u0026rdquo;: GPT4 prefers model trained on data that it itself generated GPT4 prefers a large variance in unique tokens GPT4 has bad correlation with humans with \u0026ldquo;low entropy\u0026rdquo; factual tasks: QA, Summarization, Code; it has better correlation with humans in brainstorming and creative generation arxiv:2306.05685\nSupervised Fine Tuning Data \u0026ldquo;Self-Instruct\u0026rdquo; dataset, Wang et all 2022 =\u0026gt; \u0026ldquo;Surge Instruct\u0026rdquo;, huggingface 2023\nInstruction (what to do) Input (what to do it on) Output (what you are supposed to do) Goal: \u0026ldquo;helpful and chatty\u0026rdquo;\nBootstrapping Data Generation 175 seed tasks: 1 instruction + 1 input/output pair Give it to a language model to generate more instructions Language mode Human-In-The-loop Data Generation Ultrachat, Ding et al 2023\nhuman doing some research on the topic and create a prompt ask LLM to generate the output if result not good, rephrase the prompt repeat until good Roleplaying Data Generation Have two models role play to get and correct data.\nHuggingface Surge-Instruct Humans write everything from scratch. With a pretrained model, diminishing return is seen after a few thousand high quality examples.\nTask Distribution\nWhat should the topics be?\nUse InstructGPT as guidance: largestest section is a generation task (12%), OpenQA the second largest one (12.4%).\nHF replaced InstructGPT distribution\u0026rsquo;s \u0026ldquo;Other\u0026rdquo; section (3.5%) with code work.\nLength Distribution\nHow long should the prompts be? Collected distributions, and Surge Instruct seems to be closest with InstructionGPT.\nBoth Anthropic and InstructGPT used a US based task force, and so so did 🤗\nus based taskforce roughly even gender slpit 19 to 62 years old primarily white technical degree to PhD Only used one turn. Multi-turn fine tuning wasn\u0026rsquo;t a thing a few mounths ago.\nTraining starcoder, falcon, llama2 True fine tuning + PEFT (LoRA) The HF part of RLHF Scale agreed with Surge and H4 a lot more, but mostly no one agreed with anyone.\nGoal: \u0026ldquo;safe and factual\u0026rdquo;\nTask Distribution Distribution: a lot more about factual things (because we want to encourage factualness) so much more math and code than the general generation. Its also easier to score.\nRanking Guidelines OpenAI have guidelines about how to rate Rate every turn from the diaglogue Smaller total length (\u0026lt;2048 tokens) Helpfulness OVER honesty \u0026ndash; this is the opposite of OpenAI because the model wasn\u0026rsquo;t large enough to be very honest Two step selection: \u0026ldquo;which one is better\u0026rdquo; =\u0026gt; \u0026ldquo;how much is better\u0026rdquo;\n","html":"\u003cp\u003e\u0026ldquo;What we have been building since ChatGPT at \u003ca href=\"/posts/kbhh4/\"\u003eH4\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eNo pretraining in any way\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"basic-three-steps\"\u003eBasic Three Steps\u003c/h2\u003e\n\u003cp\u003eGoal: \u0026ldquo;helpful, harmless, honest, and huggy\u0026rdquo; bots.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eRetraining step: large-scale next token prediction\u003c/li\u003e\n\u003cli\u003eIncontext learning: few shot learning without updating parameters\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Helpful\u0026rdquo; steps\n\u003col\u003e\n\u003cli\u003eTaking supervised data to perform supervised fine tuning\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Harmless\u0026rdquo; steps\n\u003col\u003e\n\u003cli\u003eTraining a classifier for result ranking\u003c/li\u003e\n\u003cli\u003eRLHF\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"benchmarking\"\u003eBenchmarking\u003c/h2\u003e\n\u003cp\u003eBefore we started to train, we have a problem. Most benchmarks are on generic reasoning, which evaluates 1), 2). Therefore, we need new metrics for steps 4) and 5).\u003c/p\u003e\n\u003cp\u003eSo:\u003c/p\u003e\n\u003ch3 id=\"evaluating-instruction-and-chatty-ness\"\u003eEvaluating instruction and \u0026ldquo;chatty-ness\u0026rdquo;\u003c/h3\u003e\n\u003cp\u003ePairwise \u003ca href=\"/posts/kbhelo_ratings/\"\u003eElo Ratings\u003c/a\u003e leaderboard from 🤗 + AlpacaEval. Both use GPT4 as the automated evaluator + as well as humans. MTBench from LMSYS has a new benchmark for the same thing, but supports multi-turn evaluation.\u003c/p\u003e\n\u003cp\u003eThree main effects observed:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eresults improve \u003cem\u003eslightly\u003c/em\u003e the longer the prompt\u003c/li\u003e\n\u003cli\u003eGPT4 MTBench assigns worse scores on gpt4 like data\u003c/li\u003e\n\u003cli\u003eadding more data into fine tuning had diminishing returns after thousands of samples\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eTruthfulQA is the most differentiating benchmark; most others score about the same\u003c/p\u003e\n\u003ch3 id=\"evaluating-reward-model\"\u003eEvaluating Reward Model\u003c/h3\u003e\n\u003cp\u003eThere are not any open source reward models. Nor is there anything on evaluating or dataset on red teaming. The only dataset out there is Anthropic\u0026rsquo;s red teaming data.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"https://huggingface.co/blog/red-teaming\"\u003ehttps://huggingface.co/blog/red-teaming\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"wackiness-gpt4-as-an-evaluator\"\u003eWackiness GPT4 as an evaluator\u003c/h3\u003e\n\u003cp\u003eWhy is everybody using GPT4 as a proxy for humans?\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eGPT4 has a left positional bias (if you admonish GPT about this, it will prefer the second one instead :/), while humans provide pretty much uniform rating\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Doping\u0026rdquo;: GPT4 prefers model trained on data that it itself generated\u003c/li\u003e\n\u003cli\u003eGPT4 prefers a large variance in unique tokens\u003c/li\u003e\n\u003cli\u003eGPT4 has bad correlation with humans with \u0026ldquo;low entropy\u0026rdquo; factual tasks: QA, Summarization, Code; it has better correlation with humans in brainstorming and creative generation\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003earxiv:2306.05685\u003c/p\u003e\n\u003ch2 id=\"supervised-fine-tuning\"\u003eSupervised Fine Tuning\u003c/h2\u003e\n\u003ch3 id=\"data\"\u003eData\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;Self-Instruct\u0026rdquo; dataset, Wang et all 2022 =\u0026gt; \u0026ldquo;Surge Instruct\u0026rdquo;, huggingface 2023\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eInstruction (what to do)\u003c/li\u003e\n\u003cli\u003eInput (what to do it on)\u003c/li\u003e\n\u003cli\u003eOutput (what you are supposed to do)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eGoal: \u0026ldquo;helpful and chatty\u0026rdquo;\u003c/p\u003e\n\u003ch4 id=\"bootstrapping-data-generation\"\u003eBootstrapping Data Generation\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e175 seed tasks: 1 instruction + 1 input/output pair\u003c/li\u003e\n\u003cli\u003eGive it to a language model to generate more instructions\u003c/li\u003e\n\u003cli\u003eLanguage mode\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"human-in-the-loop-data-generation\"\u003eHuman-In-The-loop Data Generation\u003c/h4\u003e\n\u003cp\u003eUltrachat, Ding et al 2023\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ehuman doing some research on the topic and create a prompt\u003c/li\u003e\n\u003cli\u003eask LLM to generate the output\u003c/li\u003e\n\u003cli\u003eif result not good, rephrase the prompt\u003c/li\u003e\n\u003cli\u003erepeat until good\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch4 id=\"roleplaying-data-generation\"\u003eRoleplaying Data Generation\u003c/h4\u003e\n\u003cp\u003eHave two models role play to get and correct data.\u003c/p\u003e\n\u003ch4 id=\"huggingface-surge-instruct\"\u003eHuggingface Surge-Instruct\u003c/h4\u003e\n\u003cp\u003eHumans write everything from scratch. With a pretrained model, diminishing return is seen after a few thousand high quality examples.\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eTask Distribution\u003c/p\u003e\n\u003cp\u003eWhat should the topics be?\u003c/p\u003e\n\u003cp\u003eUse InstructGPT as guidance: largestest section is a generation task (12%), OpenQA the second largest one (12.4%).\u003c/p\u003e\n\u003cp\u003eHF replaced InstructGPT distribution\u0026rsquo;s \u0026ldquo;Other\u0026rdquo; section (3.5%) with code work.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eLength Distribution\u003c/p\u003e\n\u003cp\u003eHow long should the prompts be? Collected distributions, and Surge Instruct seems to be closest with InstructionGPT.\u003c/p\u003e\n\u003cp\u003eBoth Anthropic and InstructGPT used a US based task force, and so so did 🤗\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eus based taskforce\u003c/li\u003e\n\u003cli\u003eroughly even gender slpit\u003c/li\u003e\n\u003cli\u003e19 to 62 years old\u003c/li\u003e\n\u003cli\u003eprimarily white\u003c/li\u003e\n\u003cli\u003etechnical degree to PhD\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eOnly used one turn. Multi-turn fine tuning wasn\u0026rsquo;t a thing a few mounths ago.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"training\"\u003eTraining\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003estarcoder, falcon, llama2\u003c/li\u003e\n\u003cli\u003eTrue fine tuning + \u003ca href=\"/posts/kbhpeft/\"\u003ePEFT\u003c/a\u003e (LoRA)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"the-hf-part-of-rlhf\"\u003eThe HF part of RLHF\u003c/h2\u003e\n\u003cp\u003eScale agreed with Surge and \u003ca href=\"/posts/kbhh4/\"\u003eH4\u003c/a\u003e a lot more, but mostly no one agreed with anyone.\u003c/p\u003e\n\u003cp\u003eGoal: \u0026ldquo;safe and factual\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"task-distribution\"\u003eTask Distribution\u003c/h3\u003e\n\u003cp\u003eDistribution: a lot more about factual things (because we want to encourage factualness) so much more math and code than the general generation. Its also easier to score.\u003c/p\u003e\n\u003ch3 id=\"ranking-guidelines\"\u003eRanking Guidelines\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eOpenAI have guidelines about how to rate\u003c/li\u003e\n\u003cli\u003eRate every turn from the diaglogue\u003c/li\u003e\n\u003cli\u003eSmaller total length (\u0026lt;2048 tokens)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eHelpfulness OVER honesty\u003c/strong\u003e \u0026ndash; this is the opposite of OpenAI because the model wasn\u0026rsquo;t large enough to be very honest\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eTwo step selection: \u0026ldquo;which one is better\u0026rdquo; =\u0026gt; \u0026ldquo;how much is better\u0026rdquo;\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtraining_helpful_chatbots/","tags":null,"title":"Training Helpful Chatbots"},{"categories":null,"contents":"the transformational generative syntax is a linguistical precept proposed by Noam Chomsky which has the interesting conclusion that meaning is supported by structure, rather than the other way around as generative semantics suggests.\nThis means that you can first come up with generic, independent structure to a sentence, then fill in the sentence with meaning.\nFor instance, \u0026ldquo;colorless green ideas sleep furiously\u0026rdquo; is a sentence Noam Chomsky proposes to have perfect structure but failes to be filled with meaning, supporting the transformational generative syntax theory.\nThis supports the Lexicalist Hypothesis, which is the theory that lexicalization transformations are independent of structural transformations. This would therefore support the proof for the existence of semantic primes.\n","html":"\u003cp\u003ethe \u003ca href=\"/posts/kbhtransformational_generative_syntax/\"\u003etransformational generative syntax\u003c/a\u003e is a linguistical precept proposed by \u003ca href=\"/posts/kbhchomsky/\"\u003eNoam Chomsky\u003c/a\u003e which has the interesting conclusion that \u003cstrong\u003e\u003cstrong\u003emeaning\u003c/strong\u003e\u003c/strong\u003e is supported by \u003cstrong\u003e\u003cstrong\u003estructure\u003c/strong\u003e\u003c/strong\u003e, rather than the other way around as \u003ca href=\"/posts/kbhgenerative_semantics/\"\u003egenerative semantics\u003c/a\u003e suggests.\u003c/p\u003e\n\u003cp\u003eThis means that you can first come up with generic, independent structure to a sentence, then fill in the sentence with meaning.\u003c/p\u003e\n\u003cp\u003eFor instance, \u0026ldquo;colorless green ideas sleep furiously\u0026rdquo; is a sentence \u003ca href=\"/posts/kbhchomsky/\"\u003eNoam Chomsky\u003c/a\u003e proposes to have perfect structure but failes to be filled with meaning, supporting the \u003ca href=\"/posts/kbhtransformational_generative_syntax/\"\u003etransformational generative syntax\u003c/a\u003e theory.\u003c/p\u003e\n\u003cp\u003eThis supports the \u003ca href=\"/posts/kbhlexicalization_hypothesis/\"\u003eLexicalist Hypothesis\u003c/a\u003e, which is the theory that lexicalization transformations are independent of structural transformations. This would therefore support the \u003ca href=\"/posts/kbhsemantic_primes/#proof-for-the-existence-of-id-4e587814-bfd1-458e-ba5f-67882832107b-semantic-prime-s\"\u003eproof for the existence of semantic primes\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtransformational_generative_syntax/","tags":null,"title":"transformational generative syntax"},{"categories":null,"contents":"Background Current deep-learning first approaches have shown promising results for the speech text diarization task. For ASR-independent diarization, specifically, two main methods appear as yielding fruitful conclusions:\nAuditory feature extraction using deep learning to create a trained, fixed-size latent representation via Mel-frequency cepstral coefficients slices that came from any existing voice-activity detection (VAD) scheme ((Snyder et al. 2018)), where the features extracted with the neural network are later used with traditional clustering and Variational Bayes refinement ((Sell et al. 2018; Landini et al. 2022)) approaches to produce groups of diarized speakers\nEnd-to-end neural approaches which takes temporally-dependent log-mel-frequency cepstrum and perform voice activity detection, speaker recognition, and diarization directly on the same neural network ((Fujita, Kanda, Horiguchi, Xue, et al. 2019))\nFigure 1: \u0026lt;\u0026amp;fujita2019end1\u0026gt;\nThe latter, end-to-end approach (EEND), offers lower Diarization Error Rate (DER) than former clustering ((Fujita, Kanda, Horiguchi, Xue, et al. 2019)), achiving 10.76 vs. 11.53 DER on the CALLHOME dataset respectively. However, it confers a few disadvantages: the end-to-end system produces a diarization result directly dependent on the time dimension of the input Log-Mel (i.e. it outputs probability per speaker per time slice), so its error could include both the error in voice activity detection and diarization; furthermore, the one-shot nature of this method allows no interpretation or manipulation of its actual outputs\u0026mdash;such as specifying the number of speakers after diarization is completed (as is possible with clustering because one could simply choose the number of centroids to calculate) (Park et al. 2021).\nWe therefore desire here to combine the advantages of both methods discussed here in producing a diarization technique that both retains the flexible nature of vector-based approaches but also seeks to generate as complete and performant (in terms of DER) a pipeline as possible with deep learning.\nMotivations The discussion here is motivated by a few facts:\nExcellent ((Radford et al. 2022)) ASR models exist without being pre-trained on the diarization task, meaning they produce well-timed transcriptions without the speakers labels Well performing forced-alignment tools exist (McAuliffe et al. 2017), which can be applied on-top-of rough voice activity segments from assumption #1 (for instance, by reading attention activations; or by concatenating rough word timings). The number of speakers is not exogenously known, yet could be specified after diarization completes. Proposal One of the latest advances for EEND-class models leverages the reasonably novel Convolutional Transformer (\u0026ldquo;Conformer\u0026rdquo;) architecture ((Gulati et al. 2020)) to improve the performance of the model. Specifically, the model swaps the time-delayed fully connected blocks in favor of Conformer blocks, and mixes in the SpecAugment data augmentation technique for the Log-Mel frequency input ((Liu et al. 2021)). We will use this new model both as the basis of our work, as well as the benchmark to improve upon for diarization results.\nText-Aware (Near) End-to-End Approach Contextual and positional information (for instance, raving pronoun use) provides a useful basis by which humans recognize the flow of an utterance used to diarize speech.\nAssumption #2 above indicates that one could identify segments of text transcripts corresponding to the input audio\u0026mdash;albeit not diarized. We hypothesize that leveraging the information from text transcripts (even if not tightly aligned) will help the model track the flow of conversation and get better diarization performance.\nIn the figure above, we specifically chose the Transformer BERT encoder ((Devlin et al. 2018)) to process a segment of text ASR corresponding to the input log-mel audio. The processed Bert latents are added and statistically pooled to the Conformer outputs from processing the audio signals; the fused embeddings are then passed through a fully-connected classification head for the usual labeling consistent with EEND ((Fujita, Kanda, Horiguchi, Nagamatsu, et al. 2019)).\nBy training a multimodal scheme in this manner, we hope to demonstrate an improved level of performance which fusing ASR text can provide to the diarization task.\nImproved X-Vector via Conformers Design constraint #3 which we outlined earlier was the desire to identify extogenously the number of speakers. While an extension below explores the possibility of this in an end-to-end architecture, conventional probabilistic clustering methods (even including neural components, such as (Snyder et al. 2018)) allow manually specified clusters to be created and tagged using PLDA ((Kenny et al. 2013)) or HMMs ((Landini et al. 2022)).\nOne direct extension to this approach would be the use of the Conformer architecture highlighted above in place of the fully-connected network ((Snyder et al. 2017)) which forms the basis of the x-vector approach.\nTo perform this, the x-vector representations would be swapped directly for the final latent from the EEND Conformer architecture prior to the fully-connected prediction head. All other post-processing of x-vectors, we hypothesize, could be applied to the new latents with minimal changes.\nSpecifically, convolution self-attention in the Conformer architecture work in a similar pattern to ((Peddinti, Povey, and Khudanpur 2015)) to scan across time frames; however, self-attention is a trained parameter, allowing the timescale dependence to be adaptive to the context provided.\nFurther adaptive training\u0026mdash;including training on previously segmented voice activity, and/or taking MFCC instead of Log-Mel as input\u0026mdash;maybe needed mostly following the training objectives in ((Snyder et al. 2018)) in order for the latent vectors to reflect the characteristics of new, unknown speakers.\nOther Possibilities Text and Vectors One direct correlary of the two proposals above is simply concatenating the novelty of each: creating text+audio transformer based latent embeddings as the basis for speaker clustering.\nSpeaker-Count Signal Clustering approaches, although more explainable, does confer some disadvantages. For instance, it will have no good way forward to predict overlapping speakers (as \u0026ldquo;a speaker similar to both A and B\u0026rdquo; would appear in a similar place in the latent space as \u0026ldquo;A and B are crosstalking\u0026rdquo;).\nReturning to the EEND approach, however, brings into focus the question regarding speaker count. One possibility for addressing this involves injecting an extra token\u0026mdash;either in the \u0026ldquo;text\u0026rdquo; portion of the multimodal implementation, or perhaps simply fused into the input of the original diarizing Conformer (i.e. (Liu et al. 2021))\u0026mdash;representing the number of speakers.\nThen, we will add a large negative positive term to the loss associated with incorrectly-used (i.e. out of bounds) speaker ID classes.\nUnfortunately, because of the minimal weight of one speaker-count feature compared to the audio sample, and the Gaussian nature of neural networks, this method will provide no garantees regarding the actual diarization outputs.\nDevlin, Jacob, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. “Bert: Pre-Training of Deep Bidirectional Transformers for Language Understanding.” arXiv Preprint arXiv:1810.04805. Fujita, Yusuke, Naoyuki Kanda, Shota Horiguchi, Kenji Nagamatsu, and Shinji Watanabe. 2019. “End-to-End Neural Speaker Diarization with Permutation-Free Objectives.” arXiv Preprint arXiv:1909.05952. Fujita, Yusuke, Naoyuki Kanda, Shota Horiguchi, Yawen Xue, Kenji Nagamatsu, and Shinji Watanabe. 2019. “End-to-End Neural Speaker Diarization with Self-Attention.” In 2019 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU), 296–303. IEEE. Gulati, Anmol, James Qin, Chung-Cheng Chiu, Niki Parmar, Yu Zhang, Jiahui Yu, Wei Han, et al. 2020. “Conformer: Convolution-Augmented Transformer for Speech Recognition.” arXiv. http://arxiv.org/abs/2005.08100. Kenny, Patrick, Themos Stafylakis, Pierre Ouellet, Md. Jahangir Alam, and Pierre Dumouchel. 2013. “PLDA for Speaker Verification with Utterances of Arbitrary Duration.” In 2013 IEEE International Conference on Acoustics, Speech and Signal Processing, 7649–53. Vancouver, BC, Canada: IEEE. doi:10.1109/ICASSP.2013.6639151. Landini, Federico, Ján Profant, Mireia Diez, and Lukáš Burget. 2022. “Bayesian HMM Clustering of X-Vector Sequences (VBx) in Speaker Diarization: Theory, Implementation and Analysis on Standard Tasks.” Computer Speech \u0026#38; Language 71 (January): 101254. doi:10.1016/j.csl.2021.101254. Liu, Yi Chieh, Eunjung Han, Chul Lee, and Andreas Stolcke. 2021. “End-to-End Neural Diarization: From Transformer to Conformer.” In Interspeech 2021, 3081–85. doi:10.21437/Interspeech.2021-1909. McAuliffe, Michael, Michaela Socolof, Sarah Mihuc, Michael Wagner, and Morgan Sonderegger. 2017. “Montreal Forced Aligner: Trainable Text-Speech Alignment Using Kaldi.” In Interspeech, 2017:498–502. Park, Tae Jin, Naoyuki Kanda, Dimitrios Dimitriadis, Kyu J. Han, Shinji Watanabe, and Shrikanth Narayanan. 2021. “A Review of Speaker Diarization: Recent Advances with Deep Learning.” arXiv. http://arxiv.org/abs/2101.09624. Peddinti, Vijayaditya, Daniel Povey, and Sanjeev Khudanpur. 2015. “A Time Delay Neural Network Architecture for Efficient Modeling of Long Temporal Contexts.” In Interspeech 2015, 3214–18. ISCA. doi:10.21437/Interspeech.2015-647. Radford, Alec, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. 2022. “Robust Speech Recognition via Large-Scale Weak Supervision.” arXiv Preprint arXiv:2212.04356. Sell, Gregory, David Snyder, Alan McCree, Daniel Garcia-Romero, Jesús Villalba, Matthew Maciejewski, Vimal Manohar, et al. 2018. “Diarization Is Hard: Some Experiences and Lessons Learned for the JHU Team in the Inaugural DIHARD Challenge.” In Interspeech 2018, 2808–12. ISCA. doi:10.21437/Interspeech.2018-1893. Snyder, David, Daniel Garcia-Romero, Daniel Povey, and Sanjeev Khudanpur. 2017. “Deep Neural Network Embeddings for Text-Independent Speaker Verification.” In Interspeech 2017, 999–1003. ISCA. doi:10.21437/Interspeech.2017-620. Snyder, David, Daniel Garcia-Romero, Gregory Sell, Daniel Povey, and Sanjeev Khudanpur. 2018. “X-Vectors: Robust DNN Embeddings for Speaker Recognition.” In 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 5329–33. Calgary, AB: IEEE. doi:10.1109/ICASSP.2018.8461375. ","html":"\u003ch2 id=\"background\"\u003eBackground\u003c/h2\u003e\n\u003cp\u003eCurrent deep-learning first approaches have shown promising results for the speech text diarization task. For ASR-independent diarization, specifically, two main methods appear as yielding fruitful conclusions:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\n\u003cp\u003eAuditory feature extraction using deep learning to create a trained, fixed-size latent representation via Mel-frequency cepstral coefficients slices that came from any existing voice-activity detection (VAD) scheme ((\u003ca href=\"#citeproc_bib_item_14\"\u003eSnyder et al. 2018\u003c/a\u003e)), where the features extracted with the neural network are later used with traditional clustering and Variational Bayes refinement ((\u003ca href=\"#citeproc_bib_item_12\"\u003eSell et al. 2018\u003c/a\u003e; \u003ca href=\"#citeproc_bib_item_6\"\u003eLandini et al. 2022\u003c/a\u003e)) approaches to produce groups of diarized speakers\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eEnd-to-end neural approaches which takes temporally-dependent log-mel-frequency cepstrum and perform voice activity detection, speaker recognition, and diarization directly on the same neural network ((\u003ca href=\"#citeproc_bib_item_3\"\u003eFujita, Kanda, Horiguchi, Xue, et al. 2019\u003c/a\u003e))\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-09_23-26-04_screenshot.png\"\n alt=\"Figure 1: \u0026amp;lt;\u0026amp;amp;fujita2019end1\u0026amp;gt;\"\u003e\u003cfigcaption\u003e\n \u003cp\u003e\u003cspan class=\"figure-number\"\u003eFigure 1: \u003c/span\u003e\u0026lt;\u0026amp;fujita2019end1\u0026gt;\u003c/p\u003e\n \u003c/figcaption\u003e\n \u003c/figure\u003e\n\n\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThe latter, end-to-end approach (EEND), offers lower Diarization Error Rate (DER) than former clustering ((\u003ca href=\"#citeproc_bib_item_3\"\u003eFujita, Kanda, Horiguchi, Xue, et al. 2019\u003c/a\u003e)), achiving 10.76 vs. 11.53 DER on the CALLHOME dataset respectively. However, it confers a few disadvantages: the end-to-end system produces a diarization result directly dependent on the time dimension of the input Log-Mel (i.e. it outputs probability per speaker per time slice), so its error could include \u003cem\u003eboth\u003c/em\u003e the error in voice activity detection and diarization; furthermore, the one-shot nature of this method allows no interpretation or manipulation of its actual outputs\u0026mdash;such as specifying the number of speakers \u003cem\u003eafter\u003c/em\u003e diarization is completed (as is possible with clustering because one could simply choose the number of centroids to calculate) (\u003ca href=\"#citeproc_bib_item_9\"\u003ePark et al. 2021\u003c/a\u003e).\u003c/p\u003e\n\u003cp\u003eWe therefore desire here to combine the advantages of both methods discussed here in producing a diarization technique that both retains the flexible nature of vector-based approaches but also seeks to generate as complete and performant (in terms of DER) a pipeline as possible with deep learning.\u003c/p\u003e\n\u003ch2 id=\"motivations\"\u003eMotivations\u003c/h2\u003e\n\u003cp\u003eThe discussion here is motivated by a few facts:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eExcellent ((\u003ca href=\"#citeproc_bib_item_11\"\u003eRadford et al. 2022\u003c/a\u003e)) ASR models exist without being pre-trained on the diarization task, meaning they produce well-timed transcriptions without the speakers labels\u003c/li\u003e\n\u003cli\u003eWell performing forced-alignment tools exist (\u003ca href=\"#citeproc_bib_item_8\"\u003eMcAuliffe et al. 2017\u003c/a\u003e), which can be applied on-top-of rough voice activity segments from assumption \u003ccode\u003e#1\u003c/code\u003e (for instance, by reading attention activations; or by concatenating rough word timings).\u003c/li\u003e\n\u003cli\u003eThe number of speakers is not exogenously known, yet could be specified after diarization completes.\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"proposal\"\u003eProposal\u003c/h2\u003e\n\u003cp\u003eOne of the latest advances for EEND-class models leverages the reasonably novel Convolutional Transformer (\u0026ldquo;Conformer\u0026rdquo;) architecture ((\u003ca href=\"#citeproc_bib_item_4\"\u003eGulati et al. 2020\u003c/a\u003e)) to improve the performance of the model. Specifically, the model swaps the time-delayed fully connected blocks in favor of Conformer blocks, and mixes in the SpecAugment data augmentation technique for the Log-Mel frequency input ((\u003ca href=\"#citeproc_bib_item_7\"\u003eLiu et al. 2021\u003c/a\u003e)). We will use this new model both as the basis of our work, as well as the benchmark to improve upon for diarization results.\u003c/p\u003e\n\u003ch3 id=\"text-aware--near--end-to-end-approach\"\u003eText-Aware (Near) End-to-End Approach\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-04-11_22-49-01_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eContextual and positional information (for instance, raving pronoun use) provides a useful basis by which humans recognize the flow of an utterance used to diarize speech.\u003c/p\u003e\n\u003cp\u003eAssumption \u003ccode\u003e#2\u003c/code\u003e above indicates that one could identify segments of text transcripts corresponding to the input audio\u0026mdash;albeit not diarized. We hypothesize that leveraging the information from text transcripts (even if not tightly aligned) will help the model track the flow of conversation and get better diarization performance.\u003c/p\u003e\n\u003cp\u003eIn the figure above, we specifically chose the Transformer BERT encoder ((\u003ca href=\"#citeproc_bib_item_1\"\u003eDevlin et al. 2018\u003c/a\u003e)) to process a segment of text ASR corresponding to the input log-mel audio. The processed Bert latents are added and statistically pooled to the Conformer outputs from processing the audio signals; the fused embeddings are then passed through a fully-connected classification head for the usual labeling consistent with EEND ((\u003ca href=\"#citeproc_bib_item_2\"\u003eFujita, Kanda, Horiguchi, Nagamatsu, et al. 2019\u003c/a\u003e)).\u003c/p\u003e\n\u003cp\u003eBy training a multimodal scheme in this manner, we hope to demonstrate an improved level of performance which fusing ASR text can provide to the diarization task.\u003c/p\u003e\n\u003ch3 id=\"improved-x-vector-via-conformers\"\u003eImproved X-Vector via Conformers\u003c/h3\u003e\n\u003cp\u003eDesign constraint \u003ccode\u003e#3\u003c/code\u003e which we outlined earlier was the desire to identify extogenously the number of speakers. While an extension below explores the possibility of this in an end-to-end architecture, conventional probabilistic clustering methods (even including neural components, such as (\u003ca href=\"#citeproc_bib_item_14\"\u003eSnyder et al. 2018\u003c/a\u003e)) allow manually specified clusters to be created and tagged using PLDA ((\u003ca href=\"#citeproc_bib_item_5\"\u003eKenny et al. 2013\u003c/a\u003e)) or HMMs ((\u003ca href=\"#citeproc_bib_item_6\"\u003eLandini et al. 2022\u003c/a\u003e)).\u003c/p\u003e\n\u003cp\u003eOne direct extension to this approach would be the use of the Conformer architecture highlighted above in place of the fully-connected network ((\u003ca href=\"#citeproc_bib_item_13\"\u003eSnyder et al. 2017\u003c/a\u003e)) which forms the basis of the x-vector approach.\u003c/p\u003e\n\u003cp\u003eTo perform this, the x-vector representations would be swapped directly for the final latent from the EEND Conformer architecture prior to the fully-connected prediction head. All other post-processing of x-vectors, we hypothesize, could be applied to the new latents with minimal changes.\u003c/p\u003e\n\u003cp\u003eSpecifically, convolution self-attention in the Conformer architecture work in a similar pattern to ((\u003ca href=\"#citeproc_bib_item_10\"\u003ePeddinti, Povey, and Khudanpur 2015\u003c/a\u003e)) to scan across time frames; however, self-attention is a trained parameter, allowing the timescale dependence to be adaptive to the context provided.\u003c/p\u003e\n\u003cp\u003eFurther adaptive training\u0026mdash;including training on previously segmented voice activity, and/or taking MFCC instead of Log-Mel as input\u0026mdash;maybe needed mostly following the training objectives in ((\u003ca href=\"#citeproc_bib_item_14\"\u003eSnyder et al. 2018\u003c/a\u003e)) in order for the latent vectors to reflect the characteristics of new, unknown speakers.\u003c/p\u003e\n\u003ch3 id=\"other-possibilities\"\u003eOther Possibilities\u003c/h3\u003e\n\u003ch4 id=\"text-and-vectors\"\u003eText and Vectors\u003c/h4\u003e\n\u003cp\u003eOne direct correlary of the two proposals above is simply concatenating the novelty of each: creating text+audio transformer based latent embeddings as the basis for speaker clustering.\u003c/p\u003e\n\u003ch4 id=\"speaker-count-signal\"\u003eSpeaker-Count Signal\u003c/h4\u003e\n\u003cp\u003eClustering approaches, although more explainable, does confer some disadvantages. For instance, it will have no good way forward to predict overlapping speakers (as \u0026ldquo;a speaker similar to both A and B\u0026rdquo; would appear in a similar place in the latent space as \u0026ldquo;A and B are crosstalking\u0026rdquo;).\u003c/p\u003e\n\u003cp\u003eReturning to the EEND approach, however, brings into focus the question regarding speaker count. One possibility for addressing this involves injecting an extra token\u0026mdash;either in the \u0026ldquo;text\u0026rdquo; portion of the multimodal implementation, or perhaps simply fused into the input of the original diarizing Conformer (i.e. (\u003ca href=\"#citeproc_bib_item_7\"\u003eLiu et al. 2021\u003c/a\u003e))\u0026mdash;representing the number of speakers.\u003c/p\u003e\n\u003cp\u003eThen, we will add a large negative positive term to the loss associated with incorrectly-used (i.e. out of bounds) speaker ID classes.\u003c/p\u003e\n\u003cp\u003eUnfortunately, because of the minimal weight of one speaker-count feature compared to the audio sample, and the Gaussian nature of neural networks, this method will provide no garantees regarding the actual diarization outputs.\u003c/p\u003e\n\u003cstyle\u003e.csl-entry{text-indent: -1.5em; margin-left: 1.5em;}\u003c/style\u003e\u003cdiv class=\"csl-bib-body\"\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_1\"\u003e\u003c/a\u003eDevlin, Jacob, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. “Bert: Pre-Training of Deep Bidirectional Transformers for Language Understanding.” \u003ci\u003earXiv Preprint arXiv:1810.04805\u003c/i\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_2\"\u003e\u003c/a\u003eFujita, Yusuke, Naoyuki Kanda, Shota Horiguchi, Kenji Nagamatsu, and Shinji Watanabe. 2019. “End-to-End Neural Speaker Diarization with Permutation-Free Objectives.” \u003ci\u003earXiv Preprint arXiv:1909.05952\u003c/i\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_3\"\u003e\u003c/a\u003eFujita, Yusuke, Naoyuki Kanda, Shota Horiguchi, Yawen Xue, Kenji Nagamatsu, and Shinji Watanabe. 2019. “End-to-End Neural Speaker Diarization with Self-Attention.” In \u003ci\u003e2019 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)\u003c/i\u003e, 296–303. IEEE.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_4\"\u003e\u003c/a\u003eGulati, Anmol, James Qin, Chung-Cheng Chiu, Niki Parmar, Yu Zhang, Jiahui Yu, Wei Han, et al. 2020. “Conformer: Convolution-Augmented Transformer for Speech Recognition.” arXiv. \u003ca href=\"http://arxiv.org/abs/2005.08100\"\u003ehttp://arxiv.org/abs/2005.08100\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_5\"\u003e\u003c/a\u003eKenny, Patrick, Themos Stafylakis, Pierre Ouellet, Md. Jahangir Alam, and Pierre Dumouchel. 2013. “PLDA for Speaker Verification with Utterances of Arbitrary Duration.” In \u003ci\u003e2013 IEEE International Conference on Acoustics, Speech and Signal Processing\u003c/i\u003e, 7649–53. Vancouver, BC, Canada: IEEE. doi:\u003ca href=\"https://doi.org/10.1109/ICASSP.2013.6639151\"\u003e10.1109/ICASSP.2013.6639151\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_6\"\u003e\u003c/a\u003eLandini, Federico, Ján Profant, Mireia Diez, and Lukáš Burget. 2022. “Bayesian HMM Clustering of X-Vector Sequences (VBx) in Speaker Diarization: Theory, Implementation and Analysis on Standard Tasks.” \u003ci\u003eComputer Speech \u0026#38; Language\u003c/i\u003e 71 (January): 101254. doi:\u003ca href=\"https://doi.org/10.1016/j.csl.2021.101254\"\u003e10.1016/j.csl.2021.101254\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_7\"\u003e\u003c/a\u003eLiu, Yi Chieh, Eunjung Han, Chul Lee, and Andreas Stolcke. 2021. “End-to-End Neural Diarization: From Transformer to Conformer.” In \u003ci\u003eInterspeech 2021\u003c/i\u003e, 3081–85. doi:\u003ca href=\"https://doi.org/10.21437/Interspeech.2021-1909\"\u003e10.21437/Interspeech.2021-1909\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_8\"\u003e\u003c/a\u003eMcAuliffe, Michael, Michaela Socolof, Sarah Mihuc, Michael Wagner, and Morgan Sonderegger. 2017. “Montreal Forced Aligner: Trainable Text-Speech Alignment Using Kaldi.” In \u003ci\u003eInterspeech\u003c/i\u003e, 2017:498–502.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_9\"\u003e\u003c/a\u003ePark, Tae Jin, Naoyuki Kanda, Dimitrios Dimitriadis, Kyu J. Han, Shinji Watanabe, and Shrikanth Narayanan. 2021. “A Review of Speaker Diarization: Recent Advances with Deep Learning.” arXiv. \u003ca href=\"http://arxiv.org/abs/2101.09624\"\u003ehttp://arxiv.org/abs/2101.09624\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_10\"\u003e\u003c/a\u003ePeddinti, Vijayaditya, Daniel Povey, and Sanjeev Khudanpur. 2015. “A Time Delay Neural Network Architecture for Efficient Modeling of Long Temporal Contexts.” In \u003ci\u003eInterspeech 2015\u003c/i\u003e, 3214–18. ISCA. doi:\u003ca href=\"https://doi.org/10.21437/Interspeech.2015-647\"\u003e10.21437/Interspeech.2015-647\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_11\"\u003e\u003c/a\u003eRadford, Alec, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. 2022. “Robust Speech Recognition via Large-Scale Weak Supervision.” \u003ci\u003earXiv Preprint arXiv:2212.04356\u003c/i\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_12\"\u003e\u003c/a\u003eSell, Gregory, David Snyder, Alan McCree, Daniel Garcia-Romero, Jesús Villalba, Matthew Maciejewski, Vimal Manohar, et al. 2018. “Diarization Is Hard: Some Experiences and Lessons Learned for the JHU Team in the Inaugural DIHARD Challenge.” In \u003ci\u003eInterspeech 2018\u003c/i\u003e, 2808–12. ISCA. doi:\u003ca href=\"https://doi.org/10.21437/Interspeech.2018-1893\"\u003e10.21437/Interspeech.2018-1893\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_13\"\u003e\u003c/a\u003eSnyder, David, Daniel Garcia-Romero, Daniel Povey, and Sanjeev Khudanpur. 2017. “Deep Neural Network Embeddings for Text-Independent Speaker Verification.” In \u003ci\u003eInterspeech 2017\u003c/i\u003e, 999–1003. ISCA. doi:\u003ca href=\"https://doi.org/10.21437/Interspeech.2017-620\"\u003e10.21437/Interspeech.2017-620\u003c/a\u003e.\u003c/div\u003e\n \u003cdiv class=\"csl-entry\"\u003e\u003ca id=\"citeproc_bib_item_14\"\u003e\u003c/a\u003eSnyder, David, Daniel Garcia-Romero, Gregory Sell, Daniel Povey, and Sanjeev Khudanpur. 2018. “X-Vectors: Robust DNN Embeddings for Speaker Recognition.” In \u003ci\u003e2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)\u003c/i\u003e, 5329–33. Calgary, AB: IEEE. doi:\u003ca href=\"https://doi.org/10.1109/ICASSP.2018.8461375\"\u003e10.1109/ICASSP.2018.8461375\u003c/a\u003e.\u003c/div\u003e\n\u003c/div\u003e\n","permalink":"https://www.jemoka.com/posts/kbhspeech_diarization/","tags":null,"title":"Transformer Speech Diarization"},{"categories":null,"contents":"Transformers has replaced large pipelines into a single system.\n\u0026ldquo;Transformers verticalized tasks in 2013 EMNLP; various domains\u0026rdquo;\nProcess Multiple manual systems that talk to each other has been replaced by neurons talking to each other General word embeddings like Word2Vec Sequence to sequence modeling from those vecs that are more general: learning variable length representations From LSTMs to Encoder-Decoder architectures: Google Neural Machine Translation System 2016 (LSTM seq2seq SoTA) So: big complicated pipelines turn into one homogeneous system.\nBig LSTM Problems and their Solutinos LSTMs crush the entire sequence into one embedding, which is bad because there\u0026rsquo;s no representation between inputs.\nConvolutions begin to solve this problem by looking at the local interactions to learn about the structure of the problem.\nSelf-attention does this massively: capturing token-to-token interactions in a parallelization fashion.\nTransformer Motivation Motivation: convolutions allows parallelism, \u0026ldquo;can we read and write in parallel instead of left to right generation?\u0026rdquo;\nno. decoding in parallel sucks apparently.\nthe ordering is hard: we don\u0026rsquo;t know how the outputs should be ordered; generating all at once assumes the output are conditionally independent each ordering selection narrows the posterior space and it makes generation easier But we can still read in parallel unlike LSTMs which is BASED.\nSelf attention is actually faster too, because convolutions are \\(O(knd^{2})\\) but self attention happens without convolving with only \\(O(nd^{2})\\).\n\u0026ldquo;Processing\u0026rdquo; happens through contractions/expansions like ResNet.\nMulti-Head Attention Language modeling: \u0026ldquo;who did what to whom\u0026rdquo;?\nA single self-attention only can capture one of those W relationships. The best in can do (because of softmax) is to do a weighted average of the inputs.\nPosition encodings Because adding is commutative, attention is permutation invariant so we have to add a positional encoding.\nIn theory we want length invariant models, which requires long term embeddings. Absolute embeddings, when generation length becomes too long, you end up with degration as length increases\nNext Steps Long-Form Retrival There are ways of doing \u0026ldquo;structured sparse attention\u0026rdquo;, an input modulated sparse matrix for attention that saves a lot of flops. So, we can do long form contexts eventually by playing with this area of retrival.\nLiking Physics \u0026ldquo;You want to make physics your friend\u0026rdquo;\nConvolutions and self attention moves memory between GPU HBM and GPU SRAM a lot: four different move/load operations. That\u0026rsquo;s not a FLOP problem. How do we fix that?\nmulti-query/group query approach: reduce read heads: n\u0026lt;d key and n\u0026lt;d values; a bunch of queries attend to the same keys and values\u0026mdash;loose fidelity but less loads Softmax improvements to improve performance ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhtransformers/\"\u003eTransformers\u003c/a\u003e has replaced large pipelines into a single system.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;\u003ca href=\"/posts/kbhtransformers/\"\u003eTransformers\u003c/a\u003e verticalized tasks in 2013 EMNLP; various domains\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"process\"\u003eProcess\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eMultiple manual systems that talk to each other has been replaced by neurons talking to each other\u003c/li\u003e\n\u003cli\u003eGeneral word embeddings like Word2Vec\u003c/li\u003e\n\u003cli\u003eSequence to sequence modeling from those vecs that are more general: learning variable length representations\u003c/li\u003e\n\u003cli\u003eFrom LSTMs to Encoder-Decoder architectures: Google Neural Machine Translation System 2016 (LSTM seq2seq SoTA)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eSo: big complicated pipelines turn into one homogeneous system.\u003c/p\u003e\n\u003ch3 id=\"big-lstm-problems-and-their-solutinos\"\u003eBig LSTM Problems and their Solutinos\u003c/h3\u003e\n\u003cp\u003eLSTMs crush the entire sequence into one embedding, which is bad because there\u0026rsquo;s no representation between inputs.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhrandom_variables/#adding-random-variables\"\u003eConvolution\u003c/a\u003es begin to solve this problem by looking at the local interactions to learn about the structure of the problem.\u003c/p\u003e\n\u003cp\u003eSelf-attention does this massively: capturing token-to-token interactions in a parallelization fashion.\u003c/p\u003e\n\u003ch3 id=\"transformer-motivation\"\u003eTransformer Motivation\u003c/h3\u003e\n\u003cp\u003eMotivation: convolutions allows parallelism, \u0026ldquo;can we read and write in parallel instead of left to right generation?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eno. decoding in parallel sucks apparently.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ethe ordering is hard: we don\u0026rsquo;t know how the outputs should be ordered; generating all at once assumes the output are conditionally independent\u003c/li\u003e\n\u003cli\u003eeach ordering selection narrows the posterior space and it makes generation easier\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eBut we can still read in parallel unlike LSTMs which is BASED.\u003c/p\u003e\n\u003cp\u003eSelf attention is actually faster too, because convolutions are \\(O(knd^{2})\\) but self attention happens without convolving with only \\(O(nd^{2})\\).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;Processing\u0026rdquo; happens through contractions/expansions like ResNet.\u003c/p\u003e\n\u003ch3 id=\"multi-head-attention\"\u003eMulti-Head Attention\u003c/h3\u003e\n\u003cp\u003eLanguage modeling: \u0026ldquo;who did what to whom\u0026rdquo;?\u003c/p\u003e\n\u003cp\u003eA single self-attention only can capture one of those W relationships. The best in can do (because of softmax) is to do a weighted average of the inputs.\u003c/p\u003e\n\u003ch3 id=\"position-encodings\"\u003ePosition encodings\u003c/h3\u003e\n\u003cp\u003eBecause adding is commutative, attention is permutation invariant so we have to add a positional encoding.\u003c/p\u003e\n\u003cp\u003eIn theory we want length invariant models, which requires long term embeddings. Absolute embeddings, when generation length becomes too long, you end up with degration as length increases\u003c/p\u003e\n\u003ch2 id=\"next-steps\"\u003eNext Steps\u003c/h2\u003e\n\u003ch3 id=\"long-form-retrival\"\u003eLong-Form Retrival\u003c/h3\u003e\n\u003cp\u003eThere are ways of doing \u0026ldquo;structured sparse attention\u0026rdquo;, an input modulated sparse matrix for attention that saves a lot of flops. So, we can do long form contexts eventually by playing with this area of retrival.\u003c/p\u003e\n\u003ch3 id=\"liking-physics\"\u003eLiking Physics\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;You want to make physics your friend\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eConvolutions and self attention moves memory between GPU HBM and GPU SRAM a lot: four different move/load operations. That\u0026rsquo;s not a FLOP problem. How do we fix that?\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003emulti-query/group query approach: reduce read heads: n\u0026lt;d key and n\u0026lt;d values; a bunch of queries attend to the same keys and values\u0026mdash;loose fidelity but less loads\u003c/li\u003e\n\u003cli\u003eSoftmax improvements to improve performance\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtransformers/","tags":null,"title":"Transformers"},{"categories":null,"contents":"Translation Theory\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhtranslation_theory/\"\u003eTranslation Theory\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtranslation_studies_index/","tags":null,"title":"Translation Studies Index"},{"categories":null,"contents":"Translation Theory is the theory that studies how translation works.\nSpectrum of Translation domestication and foreignization are processes by which a translator can choose to alter the style of a translation for a purpose.\nforeignization trying to bring the target language closer to the source language\nbring in foreign words use colourful idioms use old words domestication trying to bring he source language closer to the target language\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhtranslation_theory/\"\u003eTranslation Theory\u003c/a\u003e is the theory that studies how \u003ca href=\"/posts/kbhtranslation_studies_index/\"\u003etranslation\u003c/a\u003e works.\u003c/p\u003e\n\u003ch2 id=\"spectrum-of-translation\"\u003eSpectrum of Translation\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#spectrum-of-translation\"\u003edomestication\u003c/a\u003e and \u003ca href=\"#spectrum-of-translation\"\u003eforeignization\u003c/a\u003e are processes by which a translator can choose to alter the style of a translation for a purpose.\u003c/p\u003e\n\u003ch3 id=\"foreignization--org36767d3\"\u003e\u003ca href=\"#spectrum-of-translation\"\u003eforeignization\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003etrying to bring the target language closer to the source language\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ebring in foreign words\u003c/li\u003e\n\u003cli\u003euse colourful idioms\u003c/li\u003e\n\u003cli\u003euse old words\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"domestication--org36767d3\"\u003e\u003ca href=\"#spectrum-of-translation\"\u003edomestication\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003etrying to bring he source language closer to the target language\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtranslation_theory/","tags":null,"title":"Translation Theory"},{"categories":null,"contents":"A load perpendicular to the long end of a rod. Think of a metal rod lying flat on the ground; a transverse\n","html":"\u003cp\u003eA load perpendicular to the \u003cem\u003elong\u003c/em\u003e end of a rod. Think of a metal rod lying flat on the ground; a transverse\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtransverse_loaod/","tags":null,"title":"transverse load"},{"categories":null,"contents":"Humans either over-rely (drive a Tesla while sleeping) or under rely (interfering) with robot\u0026rsquo;s actions.\nhuman and robot interactions may depend on entire history trust is a proxy for the full interaction history the human\u0026rsquo;s policy must be modeled by th robot trust is demonstrated through real-world experimentation Formulation Add two variable\nTrust: \\(\\theta_{t}\\), the robot\u0026rsquo;s ability to succeed in a task Performance: \\(e_{t+1}\\), success or failure in attempting a task the trust model probabilities for model\u0026rsquo;s correct modeling of humans are low: high variance between participants.\nTrust Dynamics models human\u0026rsquo;s trust in the robot as a linear gaussian.\nHuman Model Results Sadly, the system didn\u0026rsquo;t actually increase in trust score, but the performance was better through lower human intervention.\n","html":"\u003cp\u003eHumans either over-rely (drive a Tesla while sleeping) or under rely (interfering) with robot\u0026rsquo;s actions.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ehuman and robot interactions may depend on entire history\n\u003col\u003e\n\u003cli\u003e\u003cstrong\u003etrust is a proxy for the full interaction history\u003c/strong\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003ethe \u003cstrong\u003ehuman\u0026rsquo;s\u003c/strong\u003e policy must be modeled by th robot\u003c/li\u003e\n\u003cli\u003etrust is demonstrated through real-world experimentation\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"formulation\"\u003eFormulation\u003c/h2\u003e\n\u003cp\u003eAdd two variable\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eTrust: \\(\\theta_{t}\\), the robot\u0026rsquo;s ability to succeed in a task\u003c/li\u003e\n\u003cli\u003ePerformance: \\(e_{t+1}\\), success or failure in attempting a task\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003cstrong\u003ethe trust model probabilities for model\u0026rsquo;s correct modeling of humans are low: high variance between participants.\u003c/strong\u003e\u003c/p\u003e\n\u003ch3 id=\"trust-dynamics\"\u003eTrust Dynamics\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-20_10-10-19_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003emodels human\u0026rsquo;s trust in the robot as a linear gaussian.\u003c/p\u003e\n\u003ch3 id=\"human-model\"\u003eHuman Model\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-20_10-13-33_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"results\"\u003eResults\u003c/h2\u003e\n\u003cp\u003eSadly, the system didn\u0026rsquo;t actually increase in trust score, but the performance was better through lower human intervention.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtrustpomdp/","tags":null,"title":"TrustPOMDP"},{"categories":null,"contents":"Tuning Forks (funing torks!) is a Tuning Fork. You smack it and it goes \u0026ldquo;biiing!\u0026rdquo;\nLet\u0026rsquo;s figure out how it works. For us to be one same page, let\u0026rsquo;s define some vocab:\nVocab \u0026ldquo;Tine\u0026rdquo;: one of the two prongs of the fork A Cursory Explanation Source: here and here. Both are not very scientific but a good first step.\nFrom a very basic perspective, hiting a tuning fork creates a transverse wave on the tine you hit, which vibrates and then compresses the air around it in a longitudinal fashion at a set frequency, which we hear as a sound.\nOk but then this raises the question of why there\u0026rsquo;s two tines. The explanation this website gives is essentially that the actual mechanism of the Tuning Fork is in squishing the air immediately around the fork, so\u0026hellip;\nif the tines are push towards together, it creates a void in the space it just was; this creates a low pressure rarefaction area if the tines snap back apart, it compresses the air creating compression by squishing the air around it And therefore, the air around the funing tork is essentially being played like a two-way slingy. To adjust the pitch of the Tuning Fork, you lengthen or shorten it: longer tuning forks have larger tines, which vibrate more slowly.\nOk but now many, many questions why does smacking one side of the Tuning Fork make both sides vibrate presumably the base is not vibrating; hence, how does the downward-bendy vibration cause perpendicular oscillation (does it?) A Detour on Rigid Body Harmonic Motion Let\u0026rsquo;s talk about Bending. How does this relate to springs/slinkies? read this. A Better Detour on Cantilever Beams Cantilever Beams\nA Detour on the Temperature We are really worried about two different things here.\nMetal expands/contracts based on the temperature Temperature affects speed of sound A Detour on Material Science Why are our Tuning Forks out of tune? Fun, Relevant Factoids About the World The range of human hearing from a youngen is about 20Hz to 20,000Hz. Look into Young\u0026rsquo;s Modulus\nDensity Second overtones: six and a quarter; why?\nprove the equations given in Rossing 1990\nwhy do high frequencies die faster?\nWhy are they FORKS? What\u0026rsquo;s wrong with one prong\nLagrangian Mechanics\nexperiments to do in the end measuring in water measuring questions to ask why no free vibrations just standing? do the various tuning fork modes compose what happened to the harmonics of the fundimental? I know the overotens are 6/14, but where did the harmonics go? do they compose? what if we did it in a vaccume? of course the tuning fork is not going to be heard, but will it keep vibrating forever? Nyquist limit (FFT is only accurate to half the sampling rate; 10000 hz sampling (default on logger pro) means max is 5000 Hz) things we can use far field because the wavelength is much mucm much much larger than the seperation between the two tines; what is the wavelength? function of frequency and hertz Questions for Mark cuw tuning forks\u0026rsquo; freq is not the predicted freq of its shortest tine. urg how driven oscellation. how would it actually work? last minute tuning forks easy explanation of FFT \u0026ldquo;wrapping around circle\u0026rdquo; backup slide on octahedral scress explain beta how to get wavelength from sinusoidal equation how does wavelength change with temp; how does our ear compensate? https://en.wikipedia.org/wiki/Residual_stress ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhtuning_forks/\"\u003eTuning Forks\u003c/a\u003e (funing torks!) is a \u003ca href=\"/posts/kbhtuning_forks/\"\u003eTuning Fork\u003c/a\u003e. You smack it and it goes \u0026ldquo;biiing!\u0026rdquo;\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-08-27_14-02-11_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eLet\u0026rsquo;s figure out how it works. For us to be one same page, let\u0026rsquo;s define some vocab:\u003c/p\u003e\n\u003ch2 id=\"vocab\"\u003eVocab\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u0026ldquo;Tine\u0026rdquo;: one of the two prongs of the fork\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"a-cursory-explanation\"\u003eA Cursory Explanation\u003c/h2\u003e\n\u003cp\u003eSource: \u003ca href=\"https://science.howstuffworks.com/tuning-fork2.htm\"\u003ehere\u003c/a\u003e and \u003ca href=\"https://americanhistory.si.edu/science/tuningfork.htm\"\u003ehere\u003c/a\u003e. Both are not very scientific but a good first step.\u003c/p\u003e\n\u003cp\u003eFrom a very basic perspective, hiting a tuning fork creates a transverse wave on the tine you hit, which vibrates and then compresses the air around it in a longitudinal fashion at a set frequency, which we hear as a sound.\u003c/p\u003e\n\u003cp\u003eOk but then this raises the question of why there\u0026rsquo;s two tines. The explanation this website gives is essentially that the actual mechanism of the \u003ca href=\"/posts/kbhtuning_forks/\"\u003eTuning Fork\u003c/a\u003e is in squishing the air \u003cstrong\u003eimmediately around\u003c/strong\u003e the fork, so\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eif the tines are push towards together, it creates a void in the space it just was; this creates a low pressure \u003cstrong\u003e\u003cstrong\u003erarefaction\u003c/strong\u003e\u003c/strong\u003e area\u003c/li\u003e\n\u003cli\u003eif the tines snap back apart, it compresses the air creating \u003cstrong\u003e\u003cstrong\u003ecompression\u003c/strong\u003e\u003c/strong\u003e by squishing the air around it\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAnd therefore, the air around the \u003ca href=\"/posts/kbhtuning_forks/\"\u003efuning tork\u003c/a\u003e is essentially being played like a two-way slingy. To adjust the pitch of the \u003ca href=\"/posts/kbhtuning_forks/\"\u003eTuning Fork\u003c/a\u003e, you lengthen or shorten it: longer tuning forks have larger tines, which vibrate more slowly.\u003c/p\u003e\n\u003ch2 id=\"ok-but-now-many-many-questions\"\u003eOk but now many, many questions\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ewhy does smacking one side of the \u003ca href=\"/posts/kbhtuning_forks/\"\u003eTuning Fork\u003c/a\u003e make both sides vibrate\u003c/li\u003e\n\u003cli\u003epresumably the base is not vibrating; hence, how does the downward-bendy vibration cause perpendicular oscillation (does it?)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"a-detour-on-rigid-body-harmonic-motion\"\u003eA Detour on Rigid Body Harmonic Motion\u003c/h2\u003e\n\u003cp\u003eLet\u0026rsquo;s talk about \u003ca href=\"/posts/kbhbending/\"\u003eBending.\n\u003c/a\u003e\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eHow does this relate to springs/slinkies? \u003ca href=\"https://ccrma.stanford.edu/~jos/pasp/Young_s_Modulus_Spring_Constant.html\"\u003eread this.\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"a-better-detour-on-cantilever-beams\"\u003eA Better Detour on Cantilever Beams\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhcantilever_beams/\"\u003eCantilever Beams\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"a-detour-on-the-temperature\"\u003eA Detour on the Temperature\u003c/h2\u003e\n\u003cp\u003eWe are really worried about two different things here.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eMetal expands/contracts based on the temperature\u003c/li\u003e\n\u003cli\u003eTemperature affects speed of sound\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"a-detour-on-material-science\"\u003eA Detour on Material Science\u003c/h2\u003e\n\u003ch2 id=\"why-are-our-tuning-fork--kbhtuning-forks-dot-md--s-out-of-tune\"\u003eWhy are our \u003ca href=\"/posts/kbhtuning_forks/\"\u003eTuning Fork\u003c/a\u003es out of tune?\u003c/h2\u003e\n\u003ch2 id=\"fun-relevant-factoids-about-the-world\"\u003eFun, Relevant Factoids About the World\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eThe range of human hearing from a youngen is about 20Hz to 20,000Hz.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"look-into\"\u003eLook into\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eYoung\u0026rsquo;s Modulus\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eDensity\u003c/li\u003e\n\u003cli\u003eSecond\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eovertones: six and a quarter; why?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eprove the equations given in \u003ca href=\"/posts/kbhrossing_1990/\"\u003eRossing 1990\u003c/a\u003e\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003ewhy do high frequencies die faster?\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eWhy are they FORKS? What\u0026rsquo;s wrong with one prong\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-25_16-17-35_screenshot.png\"\u003e\n \u003c/figure\u003e\n\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhlagrangian_mechanics/\"\u003eLagrangian Mechanics\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"experiments-to-do-in-the-end\"\u003eexperiments to do in the end\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003emeasuring in water\u003c/li\u003e\n\u003cli\u003emeasuring\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-to-ask\"\u003equestions to ask\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewhy no free vibrations just standing?\u003c/li\u003e\n\u003cli\u003edo the various tuning fork modes compose\u003c/li\u003e\n\u003cli\u003ewhat happened to the harmonics of the fundimental? I know the overotens are 6/14, but where did the harmonics go? do they compose?\u003c/li\u003e\n\u003cli\u003ewhat if we did it in a vaccume? of course the tuning fork is not going to be heard, but will it keep vibrating forever?\u003c/li\u003e\n\u003cli\u003eNyquist limit (FFT is only accurate to half the sampling rate; 10000 hz sampling (default on logger pro) means max is 5000 Hz)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"things\"\u003ethings\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ewe can use far field because the wavelength is much mucm much much larger than the seperation between the two tines; what is the wavelength? function of frequency and hertz\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"questions-for-mark\"\u003eQuestions for Mark\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ecuw tuning forks\u0026rsquo; freq is not the predicted freq of its shortest tine. urg how\u003c/li\u003e\n\u003cli\u003edriven oscellation. how would it actually work?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"last-minute-tuning-forks\"\u003elast minute tuning forks\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eeasy explanation of FFT \u0026ldquo;wrapping around circle\u0026rdquo;\u003c/li\u003e\n\u003cli\u003ebackup slide on octahedral scress\u003c/li\u003e\n\u003cli\u003eexplain beta\u003c/li\u003e\n\u003cli\u003ehow to get wavelength from sinusoidal equation\u003c/li\u003e\n\u003cli\u003ehow does wavelength change with temp; how does our ear compensate?\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://en.wikipedia.org/wiki/Residual_stress\"\u003ehttps://en.wikipedia.org/wiki/Residual_stress\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtuning_forks/","tags":null,"title":"Tuning Fork"},{"categories":null,"contents":"what if heat, but plate\n\\begin{equation} \\pdv{u}{t} = \\pdv[2]{u}{x} + \\pdv[2]{u}{y} \\end{equation}\nFor some heat distribution that has arbitrary shape, on some domain \\(\\Omega \\times [0, \\infty]_{t}\\) (i.e. argumentation of some space dimensions by time).\nDirichlet Conditions: edges have heat \\(0\\) OR Neumann Conditions: normal derivative (flux) is \\(0\\) at the edge If \\(\\Omega\\) is a general blob, you are actually kinda fucked. Because the bounds on \\(x\\) depend on \\(y\\), and \\(y\\) on \\(x\\), so you can\u0026rsquo;t just separate them into a product.\nHowever, if we cut a rectangle, life is better.\nwhere:\n\\begin{equation} 0 \\leq x \\leq l_1 \\end{equation}\n\\begin{equation} 0 \\leq y \\leq l_2 \\end{equation}\nwhere the Dirichlet condition is now described as the four line segments along the curve at \\(l_1\\) and \\(l_2\\) having constant (or vanishing) temperature.\nIts general solution is:\n\\begin{equation} u(t,x,y) = \\sum_{n_1=1}^{\\infty}\\sum_{n_2=1}^{\\infty} A_{n_1, n_2} e^{-\\qty(\\qty( \\frac{{n_{1}}^{2}}{{l_{1}}^{2}}) + ( \\frac{{n_{2}}^{2}}{{l_{2}}^{2}})) \\pi^{2}t} \\sin \\qty(\\qty(n_1 \\frac{\\pi}{l_{1}})x )\\sin \\qty(\\qty(n_2 \\frac{\\pi}{l_{2}})y ) \\end{equation}\nwhere:\n\\begin{equation} \\lambda = \\lambda_{1} + \\lambda_{2} = - \\qty( \\frac{{n_{1}}^{2}}{{l_1}^{2}} + \\frac{{n_{2}}^{2}}{{l_2}^{2}}) \\pi^{2} \\end{equation}\nsolving \\begin{equation} U(t,x,y) = A(t)B(x)C(y) \\end{equation}\nSo now with much prayer and plugging:\n\\begin{equation} A\u0026rsquo;(t) B(x) C(y) = A(t) B\u0026rsquo;\u0026rsquo;(X)C(y) + A(t)B(x)C\u0026rsquo;\u0026rsquo;(y) \\end{equation}\nwhich gives:\n\\begin{equation} \\frac{A\u0026rsquo;(t)}{A(t)} = \\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(x)} + \\frac{C\u0026rsquo;\u0026rsquo;(y)}{C(y)} = \\lambda \\end{equation}\nWhich causes two problems to arise:\n\\begin{equation} \\begin{cases} A\u0026rsquo;(t) = \\lambda A(t) = 0 \\\\ \\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(x)} + \\frac{C\u0026rsquo;\u0026rsquo;(y)}{C(y)} = \\lambda \\end{cases} \\end{equation}\nthe second expression gives:\n\\begin{equation} \\frac{B\u0026rsquo;\u0026rsquo;(X)}{B(X)} = \\lambda - \\frac{C\u0026rsquo;\u0026rsquo;(y)}{C(y)} \\end{equation}\nMeaning:\n\\begin{equation} \\frac{B\u0026rsquo;\u0026rsquo;(X)}{B(X)} = \\lambda - \\frac{C\u0026rsquo;\u0026rsquo;(y)}{C(y)} = \\lambda_{1} \\end{equation}\nMeaning:\n\\begin{equation} B\u0026rsquo;\u0026rsquo;(x) - \\lambda_{1} B(x) = 0 \\end{equation}\nand:\n\\begin{equation} C\u0026rsquo;\u0026rsquo;(y) - \\lambda_{2} C = 0 \\end{equation}\nwhere \\(\\lambda - \\lambda_{1} = \\lambda_{2}\\).\nNow, recall our boundary conditions:\n\\begin{equation} B(0) = B(l_1) = 0 \\end{equation}\nand\n\\begin{equation} C(0) = C(\\lambda_{2}) = 0 \\end{equation}\nSo, for the expression in \\(B\\), we obtain:\n\\begin{equation} \\lambda_{1} = \\frac{-k_{1}^{2}\\pi^{2}}{l_{1}^{2}}} \\end{equation}\n\\begin{equation} \\lambda_{2} = \\frac{-k_{2}^{2}\\pi^{2}}{l_{2}^{2}}} \\end{equation}\nso:\n\\begin{equation} \\lambda = \\lambda_{1} + \\lambda_{2} \\end{equation}\nAll together, we obtain:\n\\begin{equation} B(x) = \\sin \\qty( \\frac{k_{1} \\pi x}{l_{1}}) \\end{equation}\nand:\n\\begin{equation} C(y) = \\sin \\qty( \\frac{k_{2} \\pi y}{l_{2}}) \\end{equation}\nfinally, where:\n\\begin{equation} A\u0026rsquo; + \\qty( \\frac{k_{1}^{2} \\pi^{2}}{ l_1^{2}} + \\frac{k_{2}^{2} \\pi^{2}}{ l_2^{2}})A = 0 \\end{equation}\nwhich gives us:\n\\begin{equation} A(t) = e^{-\\qty( \\frac{k_{1}^{2} \\pi^{2}}{ l_1^{2}} + \\frac{k_{2}^{2} \\pi^{2}}{ l_2^{2}})t} \\end{equation}\nso then multiply them together:\n\\begin{equation} \\sum_{k_1}^{} \\sum_{k_2}^{}E_{k_1, k_2} e^{-\\qty( \\frac{k_{1}^{2} \\pi^{2}}{ l_1^{2}} + \\frac{k_{2}^{2} \\pi^{2}}{ l_2^{2}})t} \\sin \\qty( \\frac{k_{2} \\pi y}{l_{2}}) \\sin \\qty( \\frac{k_{1} \\pi x}{l_{1}}) \\end{equation}\nat \\(u(0,x,y)\\), we obtain:\n\\begin{equation} u(0,x,y) = \\sum_{k_1}^{} \\sum_{k_2}^{}E_{k_1, k_2} \\sin \\qty( \\frac{k_{2} \\pi y}{l_{2}}) \\sin \\qty( \\frac{k_{1} \\pi x}{l_{1}}) \\end{equation}\nfor every \\(f(x,y)\\), we can solve for \\(E_{k_1, k_2}\\) by fixing \\(y\\), for instance, then writing a Fourier series as a function that depends on the coefficients you left out. This gives:\n\\begin{equation} f(x,y) = \\sum a_{k_1}(y) \\sin \\qty( \\frac{k_1 \\pi x}{l_1}) \\end{equation}\nand then, each of THESE internal functions a function \\(a_{k_1}(y)\\) , which you can obtain over \\(y\\) and expand as a fourier series.\nTo solve for each, you do the susual:\n\\begin{equation} a_{k_1}(y) = \\frac{2}{l_1} \\int_{0}^{l_1} f(x,y) \\sin \\qty( \\frac{k_1 \\pi x}{l_1}) \\dd{x} \\end{equation}\nwhich you can expand:\n\\begin{equation} E_{k_1, k_2} = \\frac{2}{l_2} \\int_{0}^{l_2} a_{k_1}(y) \\sin \\qty( \\frac{k_1 \\pi y}{l_2}) \\dd{y} \\end{equation}\nwhich means that, substituting it in, the whole thing can be written together as:\n\\begin{equation} E_{k_1, k_2} = \\frac{2}{l_2} \\int_{0}^{l_2} \\frac{2}{l_1} \\int_{0}^{l_1} f(x,y) \\sin \\qty( \\frac{k_1 \\pi x}{l_1}) \\dd{x} \\sin \\qty( \\frac{k_1 \\pi y}{l_2}) \\dd{y} \\end{equation}\n","html":"\u003cp\u003ewhat if heat, but plate\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t} = \\pdv[2]{u}{x} + \\pdv[2]{u}{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eFor some heat distribution that has arbitrary shape, on some domain \\(\\Omega \\times [0, \\infty]_{t}\\) (i.e. argumentation of some space dimensions by time).\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhsu_math53_feb232024/#dirichlet-conditions\"\u003eDirichlet Conditions\u003c/a\u003e: edges have heat \\(0\\)\u003c/li\u003e\n\u003cli\u003eOR \u003ca href=\"/posts/kbhsu_math53_feb232024/#neumann-conditions\"\u003eNeumann Conditions\u003c/a\u003e: normal derivative (flux) is \\(0\\) at the edge\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eIf \\(\\Omega\\) is a general blob, you are actually kinda fucked. Because the bounds on \\(x\\) depend on \\(y\\), and \\(y\\) on \\(x\\), so you can\u0026rsquo;t just separate them into a product.\u003c/p\u003e\n\u003cp\u003eHowever, if we cut a rectangle, life is better.\u003c/p\u003e\n\u003chr\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2024-02-28_22-57-25_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 \\leq x \\leq l_1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 \\leq y \\leq l_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere the Dirichlet condition is now described as the four line segments along the curve at \\(l_1\\) and \\(l_2\\) having constant (or vanishing) temperature.\u003c/p\u003e\n\u003cp\u003eIts general solution is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(t,x,y) = \\sum_{n_1=1}^{\\infty}\\sum_{n_2=1}^{\\infty} A_{n_1, n_2} e^{-\\qty(\\qty( \\frac{{n_{1}}^{2}}{{l_{1}}^{2}}) + ( \\frac{{n_{2}}^{2}}{{l_{2}}^{2}})) \\pi^{2}t} \\sin \\qty(\\qty(n_1 \\frac{\\pi}{l_{1}})x )\\sin \\qty(\\qty(n_2 \\frac{\\pi}{l_{2}})y )\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda = \\lambda_{1} + \\lambda_{2} = - \\qty( \\frac{{n_{1}}^{2}}{{l_1}^{2}} + \\frac{{n_{2}}^{2}}{{l_2}^{2}}) \\pi^{2}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"solving\"\u003esolving\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nU(t,x,y) = A(t)B(x)C(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo now with much prayer and plugging:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA\u0026rsquo;(t) B(x) C(y) = A(t) B\u0026rsquo;\u0026rsquo;(X)C(y) + A(t)B(x)C\u0026rsquo;\u0026rsquo;(y)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{A\u0026rsquo;(t)}{A(t)} = \\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(x)} + \\frac{C\u0026rsquo;\u0026rsquo;(y)}{C(y)} = \\lambda\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWhich causes two problems to arise:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nA\u0026rsquo;(t) = \\lambda A(t) = 0 \\\\\n\\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(x)} + \\frac{C\u0026rsquo;\u0026rsquo;(y)}{C(y)} = \\lambda\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe second expression gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{B\u0026rsquo;\u0026rsquo;(X)}{B(X)} = \\lambda - \\frac{C\u0026rsquo;\u0026rsquo;(y)}{C(y)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{B\u0026rsquo;\u0026rsquo;(X)}{B(X)} = \\lambda - \\frac{C\u0026rsquo;\u0026rsquo;(y)}{C(y)} = \\lambda_{1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eMeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB\u0026rsquo;\u0026rsquo;(x) - \\lambda_{1} B(x) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC\u0026rsquo;\u0026rsquo;(y) - \\lambda_{2} C = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\lambda - \\lambda_{1} = \\lambda_{2}\\).\u003c/p\u003e\n\u003cp\u003eNow, recall our boundary conditions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB(0) = B(l_1) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC(0) = C(\\lambda_{2}) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo, for the expression in \\(B\\), we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda_{1} = \\frac{-k_{1}^{2}\\pi^{2}}{l_{1}^{2}}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda_{2} = \\frac{-k_{2}^{2}\\pi^{2}}{l_{2}^{2}}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda = \\lambda_{1} + \\lambda_{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAll together, we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB(x) = \\sin \\qty( \\frac{k_{1} \\pi x}{l_{1}})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nC(y) = \\sin \\qty( \\frac{k_{2} \\pi y}{l_{2}})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efinally, where:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA\u0026rsquo; + \\qty( \\frac{k_{1}^{2} \\pi^{2}}{ l_1^{2}} + \\frac{k_{2}^{2} \\pi^{2}}{ l_2^{2}})A = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich gives us:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA(t) = e^{-\\qty( \\frac{k_{1}^{2} \\pi^{2}}{ l_1^{2}} + \\frac{k_{2}^{2} \\pi^{2}}{ l_2^{2}})t}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eso then multiply them together:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\sum_{k_1}^{} \\sum_{k_2}^{}E_{k_1, k_2} e^{-\\qty( \\frac{k_{1}^{2} \\pi^{2}}{ l_1^{2}} + \\frac{k_{2}^{2} \\pi^{2}}{ l_2^{2}})t} \\sin \\qty( \\frac{k_{2} \\pi y}{l_{2}}) \\sin \\qty( \\frac{k_{1} \\pi x}{l_{1}})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eat \\(u(0,x,y)\\), we obtain:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(0,x,y) = \\sum_{k_1}^{} \\sum_{k_2}^{}E_{k_1, k_2} \\sin \\qty( \\frac{k_{2} \\pi y}{l_{2}}) \\sin \\qty( \\frac{k_{1} \\pi x}{l_{1}})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor every \\(f(x,y)\\), we can solve for \\(E_{k_1, k_2}\\) by fixing \\(y\\), for instance, then writing a Fourier series as a function that depends on the coefficients you left out. This gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nf(x,y) = \\sum a_{k_1}(y) \\sin \\qty( \\frac{k_1 \\pi x}{l_1})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand then, each of \u003cstrong\u003eTHESE\u003c/strong\u003e internal functions a function \\(a_{k_1}(y)\\) , which you can obtain over \\(y\\) and expand as a fourier series.\u003c/p\u003e\n\u003cp\u003eTo solve for each, you do the susual:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{k_1}(y) = \\frac{2}{l_1} \\int_{0}^{l_1} f(x,y) \\sin \\qty( \\frac{k_1 \\pi x}{l_1}) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich you can expand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE_{k_1, k_2} = \\frac{2}{l_2} \\int_{0}^{l_2} a_{k_1}(y) \\sin \\qty( \\frac{k_1 \\pi y}{l_2}) \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich means that, substituting it in, the whole thing can be written together as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE_{k_1, k_2} = \\frac{2}{l_2} \\int_{0}^{l_2} \\frac{2}{l_1} \\int_{0}^{l_1} f(x,y) \\sin \\qty( \\frac{k_1 \\pi x}{l_1}) \\dd{x} \\sin \\qty( \\frac{k_1 \\pi y}{l_2}) \\dd{y}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtwo_dimensional_heat_equation/","tags":null,"title":"two-dimensional heat equation"},{"categories":null,"contents":"Say we want to find the number which is the additive inverse (\u0026ldquo;negative\u0026rdquo;) of a number.\nWe can just flip each of the digit, and then add 1:\ntake \\(0101\\), invert it to get \\(1010\\) adding these two numbers will give you \\(1111\\). If we just added one more \\(0001\\), it will flip over to be \\(0000\\). Therefore, \\(1010+0001 = 1011\\) is the additive inverse of \\(0101\\). The left most bit being one: still a mark of whether or not something is negative. It just works backwards:\npros and cons of twos complement con: more difficult to represent and difficult to convert pro: only 1 representation for 0 pro: the most significant bit still indicates the sign of a number pro: addition works for any combination of positive/negative tricks all zeros: its always 0 zero plus all ones (011111\u0026hellip;111): it always is the largest signed value and some middle value for unsigned all ones: its always -1 (11111 =\u0026gt; 00000 +1 =\u0026gt; 1) for signed one plus all zeros mnemonic for remembering where overflows happened Unsigned Signed ","html":"\u003cp\u003eSay we want to find the number which is the \u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e (\u0026ldquo;negative\u0026rdquo;) of a number.\u003c/p\u003e\n\u003cp\u003eWe can just flip each of the digit, and then add 1:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-02_11-17-26_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cul\u003e\n\u003cli\u003etake \\(0101\\), invert it to get \\(1010\\)\u003c/li\u003e\n\u003cli\u003eadding these two numbers will give you \\(1111\\). If we just added one more \\(0001\\), it will flip over to be \\(0000\\).\u003c/li\u003e\n\u003cli\u003eTherefore, \\(1010+0001 = 1011\\) is the additive inverse of \\(0101\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe left most bit being one: still a mark of whether or not something is negative. It just works backwards:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-03_16-36-15_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"pros-and-cons-of-twos-complement\"\u003epros and cons of twos complement\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003econ: more difficult to represent and difficult to convert\u003c/li\u003e\n\u003cli\u003epro: only 1 representation for 0\u003c/li\u003e\n\u003cli\u003epro: the most significant bit still indicates the sign of a number\u003c/li\u003e\n\u003cli\u003epro: addition works for any combination of positive/negative\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"tricks\"\u003etricks\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eall zeros: its always 0\u003c/li\u003e\n\u003cli\u003ezero plus all ones (011111\u0026hellip;111): it always is the largest signed value and some middle value for unsigned\u003c/li\u003e\n\u003cli\u003eall ones: its always -1 (11111 =\u0026gt; 00000 +1 =\u0026gt; 1) for signed\u003c/li\u003e\n\u003cli\u003eone plus all zeros\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"mnemonic-for-remembering-where-overflows-happened\"\u003emnemonic for remembering where overflows happened\u003c/h2\u003e\n\u003ch3 id=\"unsigned\"\u003eUnsigned\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-03_16-58-13_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch3 id=\"signed\"\u003eSigned\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-03_16-58-27_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhtwo_s_complement/","tags":null,"title":"two's complement"},{"categories":null,"contents":" quality of service harm distributive harm existential harm ","html":"\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhquality_of_service_harm/\"\u003equality of service harm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdistributive_harm/\"\u003edistributive harm\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"\"\u003eexistential harm\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtypes_of_harm/","tags":null,"title":"types of harm"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhu1_c/","tags":null,"title":"u1.c"},{"categories":null,"contents":"If you are given the problem, you can learn the parameters by just computing them. For instance, to estimate the parameters of a gaussian, we can compute the mean and variance and shove it in.\n","html":"\u003cp\u003eIf you are given the problem, you can learn the \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es by just computing them. For instance, to estimate the \u003ca href=\"/posts/kbhparameter/\"\u003eparameter\u003c/a\u003es of a \u003ca href=\"/posts/kbhgaussian_distribution/\"\u003egaussian\u003c/a\u003e, we can compute the mean and variance and shove it in.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhunbiased_parameter_learning/","tags":null,"title":"unbiased parameter learning"},{"categories":null,"contents":"There are many different types of uncertainty.\nOutcome Uncertainty: actions may not have known results Model Uncertainty: best action in a state may not be known State Uncertainty: current state may not be precisely known Interaction Uncertainty: interference between models ","html":"\u003cp\u003eThere are many different types of \u003ca href=\"/posts/kbhuncertainty/\"\u003euncertainty\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhoutcome_uncertainty/\"\u003eOutcome Uncertainty\u003c/a\u003e: actions may not have known results\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"\"\u003eModel Uncertainty\u003c/a\u003e: best action in a state may not be known\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"\"\u003eState Uncertainty\u003c/a\u003e: current state may not be precisely known\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinteraction_uncertainty/\"\u003eInteraction Uncertainty\u003c/a\u003e: interference between models\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhuncertainty/","tags":null,"title":"uncertainty"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhunconc/","tags":null,"title":"unconc"},{"categories":null,"contents":"base epsilon-greedy:\nchoose a random action with probability \\(\\epsilon\\) otherwise, we choose the action with the best expectation \\(\\arg\\max_{a} Q(s,a)\\) epsilon-greedy exploration with decay Sometimes, approaches are suggested to decay \\(\\epsilon\\) whereby, at each timestamp:\n\\begin{equation} \\epsilon \\leftarrow \\alpha \\epsilon \\end{equation}\nwhereby \\(\\alpha \\in (0,1)\\) is called the \u0026ldquo;decay factor.\u0026rdquo;\nExplore-then-commit Select actions uniformly at random for \\(k\\) steps; then, go to greedy and stay there\n","html":"\u003cp\u003ebase \u003ca href=\"/posts/kbhundirected_exploration/\"\u003eepsilon-greedy\u003c/a\u003e:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003echoose a random action with probability \\(\\epsilon\\)\u003c/li\u003e\n\u003cli\u003eotherwise, we choose the action with the best expectation \\(\\arg\\max_{a} Q(s,a)\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"epsilon-greedy-exploration-with-decay\"\u003eepsilon-greedy exploration with decay\u003c/h2\u003e\n\u003cp\u003eSometimes, approaches are suggested to decay \\(\\epsilon\\) whereby, at each timestamp:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\epsilon \\leftarrow \\alpha \\epsilon\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhereby \\(\\alpha \\in (0,1)\\) is called the \u0026ldquo;decay factor.\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"explore-then-commit\"\u003eExplore-then-commit\u003c/h2\u003e\n\u003cp\u003eSelect actions uniformly at random for \\(k\\) steps; then, go to greedy and stay there\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhundirected_exploration/","tags":null,"title":"Undirected Exploration"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhunimodal/","tags":null,"title":"unimodal"},{"categories":null,"contents":"the unique_lock is a mutex management type. Its a lock management system whereby the type will unlock the mutex on your behalf whenever the unique lock goes out of scope.\nthis is useful if there are multiple paths to exit a function, where an edge case made you forget when to unlock:\nvoid my_scope(mutex \u0026amp;mut, condition_variable_any \u0026amp;cv) { unique_lock\u0026lt;mutex\u0026gt; lck(mut); // do stuff, you can even pass it to a condition variable! cv.wait(lck); } ","html":"\u003cp\u003ethe \u003ca href=\"/posts/kbhunique_lock/\"\u003eunique_lock\u003c/a\u003e is a mutex management type. Its a lock management system whereby the type will unlock the mutex on your behalf whenever the unique lock goes out of scope.\u003c/p\u003e\n\u003cp\u003ethis is useful if there are multiple paths to exit a function, where an edge case made you forget when to unlock:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C++\" data-lang=\"C++\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003evoid\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003emy_scope\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emutex\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emut\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003econdition_variable_any\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ecv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eunique_lock\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emutex\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e\u0026gt;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003elck\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003emut\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e// do stuff, you can even pass it to a condition variable!\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ecv\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ewait\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003elck\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhunique_lock/","tags":null,"title":"unique_lock"},{"categories":null,"contents":"Questions of Uniqueness and Existance are important elements in Differential Equations.\nHere\u0026rsquo;s a very general form of a differential equations. First, here\u0026rsquo;s the:\nfunction behavior tests continuity Weakest statement.\nA function is continuous if and only if:\n\\begin{equation} \\lim_{x \\to y} f(x) =f(y) \\end{equation}\nLipschitz Condition Stronger statement.\nThe Lipschitz Condition is a stronger test of Continuity such that:\n\\begin{equation} || F(t,x)-F(t,y)|| \\leq L|| x- y|| \\end{equation}\nfor all \\(t \\in I\\), \\(x,y \\in \\omega\\), with \\(L \\in (0,\\infty)\\) is a Lipschitz Condition in the dependent variable \\(x\\).\nReshaping this into linear one-dimensional function, we have that:\n\\begin{equation} \\left | \\frac{F(t,x)-F(t,y)}{x-y} \\right | \\leq L \u0026lt; \\infty \\end{equation}\nThe important thing here is that its the same \\(L\\) of convergence \\(\\forall t\\). However, \\(L\\) may not be stable\u0026mdash;in can oscillate\nDifferentiable We finally have the strongest statement.\n\\begin{equation} \\lim_{x \\to y} \\frac{f(x)-f(y)}{x-y} = C \\end{equation}\nTo make something Differentiable, it has to not only converge but converge to a constant \\(C\\).\nExistence and Uniqueness Check for differential equation Assume some \\(F:I \\times \\omega \\to \\mathbb{R}^{n}\\) (a function \\(F\\) whose domain is in some space \\(I \\times \\omega\\)) is bounded and continuous and satisfies the Lipschitz Condition, and let \\(x_{0} \\in \\omega\\), then, there exists \\(T_{0} \u0026gt; 0\\) and a unique solution for \\(x(t)\\) that touches \\(x_{0}\\) to the standard First-Order Differential Equation \\(\\dv{x}{t} = F(t,x), x(t_{0}) = t_{0}\\) for some \\(|t-t_{0}| \u0026lt; T_{0}\\).\nTo actually check that \\(F\\) satisfies Lipschitz Condition, we pretty much usually just go and take the partial derivative w.r.t. \\(x\\) (dependent variable, yes its \\(x\\)) of \\(F\\) on \\(x\\), which\u0026mdash;if exists on some bound\u0026mdash;satisfies the Lipschitz condition on that bound.\nProof So we started at:\n\\begin{equation} \\dv{x}{t} = F(t,x), x(t_{0}) = x_{0} \\end{equation}\nWe can separate this expression and integrate:\n\\begin{align} \u0026amp; \\dv{x}{t} = F(t,x) \\\\ \\Rightarrow\\ \u0026amp; \\dd{x} = F(t,x)\\dd{t} \\\\ \\Rightarrow\\ \u0026amp; \\int_{x_{0)}}^{x(t)} \\dd{x} = \\int_{t_{0}}^{t} F(s,x(s)) \\dd{s} \\\\ \\Rightarrow\\ \u0026amp; x(t)-x_{0}= \\int_{t_{0}}^{t} F(s,x(s)) \\dd{s} \\end{align}\nAt this point, if \\(F\\) is seperable, we can then seperate it out by \\(\\dd{t}\\) and taking the right integral. However, we are only interested in existance and uniquness, so we will do something named\u0026hellip;\nPicard Integration Picard Integration is a inductive iteration scheme which leverages the Lipschitz Condition to show that a function integral converges. Begin with the result that all First-Order Differential Equations have shape (after forcibly separating):\n\\begin{equation} x(t)-x_{0}= \\int_{t_{0}}^{t} F(s,x(s)) \\dd{s} \\end{equation}\nWe hope that the inductive sequence:\n\\begin{equation} x_{n+1}(t) = x_{0} + \\int_{t_{0}}^{t} F(s,x_{n}(s)) \\dd{s} \\end{equation}\nconverges to the same result above (that is, the functions \\(x_{n}(s)\\) stop varying and therefore we converge to a solution \\(x(s)\\) to show existance.\nThis is hard!\nHere\u0026rsquo;s a digression/example:\nif we fix a time \\(t=10\\):\nwe hope to say that:\n\\begin{equation} \\lim_{n \\to \\infty } G_{n}(10) = G(10) \\end{equation}\n\\(\\forall \\epsilon \u0026gt; 0\\), \\(\\exists M \u0026lt; \\infty\\), \\(\\forall n\u0026gt;M\\),\n\\begin{equation} |G_{n}(10)-G(10)| \u0026lt; \\epsilon \\end{equation}\nNow, the thing is, for the integral above to converge uniformly, we hope that \\(M\\) stays fixed \\(\\forall t\\) (that all of the domain converges at once after the same under of the iterations.\nTaking the original expression, and applying the following page of algebra to it:\nFinally, we then apply the Lipschitz Condition because our setup is that \\(F\\) satisfies the Lipschitz Condition, we have that:\n\\begin{equation} ||x_{n+1}(t)-x_{n}(t)|| \\leq L\\int_{x_{0}}^{t} ||x_{n}(s)-x_{n-1}(s)||ds \\end{equation}\n","html":"\u003cp\u003eQuestions of \u003ca href=\"/posts/kbhuniqueness_and_existance/\"\u003eUniqueness and Existance\u003c/a\u003e are important elements in \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003eDifferential Equations\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eHere\u0026rsquo;s a very general form of a differential equations. First, here\u0026rsquo;s the:\u003c/p\u003e\n\u003ch2 id=\"function-behavior-tests\"\u003efunction behavior tests\u003c/h2\u003e\n\u003ch3 id=\"continuity\"\u003econtinuity\u003c/h3\u003e\n\u003cp\u003eWeakest statement.\u003c/p\u003e\n\u003cp\u003eA function is \u003ca href=\"#continuity\"\u003econtinuous\u003c/a\u003e if and only if:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lim_{x \\to y} f(x) =f(y)\n\\end{equation}\u003c/p\u003e\n\u003ch3 id=\"lipschitz-condition\"\u003eLipschitz Condition\u003c/h3\u003e\n\u003cp\u003eStronger statement.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"#lipschitz-condition\"\u003eLipschitz Condition\u003c/a\u003e is a stronger test of \u003ca href=\"#continuity\"\u003eContinuity\u003c/a\u003e such that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|| F(t,x)-F(t,y)|| \\leq L|| x- y||\n\\end{equation}\u003c/p\u003e\n\u003cp\u003efor all \\(t \\in I\\), \\(x,y \\in \\omega\\), with \\(L \\in (0,\\infty)\\) is a \u003ca href=\"#lipschitz-condition\"\u003eLipschitz Condition\u003c/a\u003e in the \u003cstrong\u003edependent\u003c/strong\u003e variable \\(x\\).\u003c/p\u003e\n\u003cp\u003eReshaping this into linear one-dimensional function, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\left | \\frac{F(t,x)-F(t,y)}{x-y} \\right | \\leq L \u0026lt; \\infty\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe important thing here is that its the same \\(L\\) of convergence \\(\\forall t\\). However, \\(L\\) may not be stable\u0026mdash;in can oscillate\u003c/p\u003e\n\u003ch3 id=\"differentiable\"\u003eDifferentiable\u003c/h3\u003e\n\u003cp\u003eWe finally have the strongest statement.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lim_{x \\to y} \\frac{f(x)-f(y)}{x-y} = C\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTo make something \u003ca href=\"#differentiable\"\u003eDifferentiable\u003c/a\u003e, it has to not only converge but converge to a constant \\(C\\).\u003c/p\u003e\n\u003ch2 id=\"existence-and-uniqueness-check-for-differential-equation--kbhdiffeq-intro-dot-md\"\u003eExistence and Uniqueness Check for \u003ca href=\"/posts/kbhdiffeq_intro/\"\u003edifferential equation\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eAssume some \\(F:I \\times \\omega \\to \\mathbb{R}^{n}\\) (a function \\(F\\) whose domain is in some space \\(I \\times \\omega\\)) is \u003cstrong\u003ebounded\u003c/strong\u003e and \u003cstrong\u003econtinuous\u003c/strong\u003e and \u003cstrong\u003esatisfies the \u003ca href=\"#lipschitz-condition\"\u003eLipschitz Condition\u003c/a\u003e\u003c/strong\u003e, and let \\(x_{0} \\in \\omega\\), then, there exists \\(T_{0} \u0026gt; 0\\) and a unique solution for \\(x(t)\\) that touches \\(x_{0}\\) to the standard \u003ca href=\"/posts/kbhlinear_non_seperable_equation/#solving-differential-equations\"\u003eFirst-Order Differential Equation\u003c/a\u003e \\(\\dv{x}{t} = F(t,x), x(t_{0}) = t_{0}\\) for some \\(|t-t_{0}| \u0026lt; T_{0}\\).\u003c/p\u003e\n\u003cp\u003eTo actually check that \\(F\\) satisfies \u003ca href=\"#lipschitz-condition\"\u003eLipschitz Condition\u003c/a\u003e, we pretty much usually just go and take the partial derivative w.r.t. \\(x\\) (\u003cstrong\u003e\u003cstrong\u003edependent\u003c/strong\u003e\u003c/strong\u003e variable, yes its \\(x\\)) of \\(F\\) on \\(x\\), which\u0026mdash;if exists on some bound\u0026mdash;satisfies the \u003ca href=\"#lipschitz-condition\"\u003eLipschitz condition\u003c/a\u003e on that bound.\u003c/p\u003e\n\u003ch3 id=\"proof\"\u003eProof\u003c/h3\u003e\n\u003cp\u003eSo we started at:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\dv{x}{t} = F(t,x), x(t_{0}) = x_{0}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe can separate this expression and integrate:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\n\u0026amp; \\dv{x}{t} = F(t,x) \\\\\n\\Rightarrow\\ \u0026amp; \\dd{x} = F(t,x)\\dd{t} \\\\\n\\Rightarrow\\ \u0026amp; \\int_{x_{0)}}^{x(t)} \\dd{x} = \\int_{t_{0}}^{t} F(s,x(s)) \\dd{s} \\\\\n\\Rightarrow\\ \u0026amp; x(t)-x_{0}= \\int_{t_{0}}^{t} F(s,x(s)) \\dd{s}\n\\end{align}\u003c/p\u003e\n\u003cp\u003eAt this point, if \\(F\\) is \u003ca href=\"/posts/kbhlinear_constant_coefficient_equation/#solving-separable-differential-equations\"\u003eseperable\u003c/a\u003e, we can then seperate it out by \\(\\dd{t}\\) and taking the right integral. However, we are only interested in existance and uniquness, so we will do something named\u0026hellip;\u003c/p\u003e\n\u003ch4 id=\"picard-integration\"\u003ePicard Integration\u003c/h4\u003e\n\u003cp\u003e\u003ca href=\"#picard-integration\"\u003ePicard Integration\u003c/a\u003e is a inductive iteration scheme which leverages the \u003ca href=\"#lipschitz-condition\"\u003eLipschitz Condition\u003c/a\u003e to show that a function integral converges. Begin with the result that all \u003ca href=\"/posts/kbhlinear_non_seperable_equation/#solving-differential-equations\"\u003eFirst-Order Differential Equations\u003c/a\u003e have shape (after forcibly separating):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx(t)-x_{0}= \\int_{t_{0}}^{t} F(s,x(s)) \\dd{s}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe hope that the inductive sequence:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nx_{n+1}(t) = x_{0} + \\int_{t_{0}}^{t} F(s,x_{n}(s)) \\dd{s}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003econverges to the same result above (that is, the functions \\(x_{n}(s)\\) stop varying and therefore we converge to a solution \\(x(s)\\) to show existance.\u003c/p\u003e\n\u003cp\u003eThis is hard!\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eHere\u0026rsquo;s a digression/example:\u003c/p\u003e\n\u003cp\u003eif we fix a time \\(t=10\\):\u003c/p\u003e\n\u003cp\u003ewe hope to say that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lim_{n \\to \\infty } G_{n}(10) = G(10)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\(\\forall \\epsilon \u0026gt; 0\\), \\(\\exists M \u0026lt; \\infty\\), \\(\\forall n\u0026gt;M\\),\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|G_{n}(10)-G(10)| \u0026lt; \\epsilon\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNow, the thing is, for the integral above to converge uniformly, we hope that \\(M\\) stays fixed \\(\\forall t\\) (that all of the domain converges at once after the same under of the iterations.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eTaking the original expression, and applying the following page of algebra to it:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-09-13_13-34-58_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eFinally, we then apply the \u003ca href=\"#lipschitz-condition\"\u003eLipschitz Condition\u003c/a\u003e because our setup is that \\(F\\) satisfies the \u003ca href=\"#lipschitz-condition\"\u003eLipschitz Condition\u003c/a\u003e, we have that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n||x_{n+1}(t)-x_{n}(t)|| \\leq L\\int_{x_{0}}^{t} ||x_{n}(s)-x_{n-1}(s)||ds\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhuniqueness_and_existance/","tags":null,"title":"Uniqueness and Existance"},{"categories":null,"contents":"A constructor built out of quantum theory which can replicate itself. It is considered a universal computer.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhconstructor_theory/\"\u003econstructor\u003c/a\u003e built out of \u003ca href=\"/posts/kbhquantum_theory/\"\u003equantum theory\u003c/a\u003e which can replicate itself. It is considered a \u003ca href=\"/posts/kbhquantum_information_theory/#universal-computer\"\u003euniversal computer\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhuniversal_quantum_constructor/","tags":null,"title":"universal quantum constructor"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhuniversity_of_georgia/","tags":null,"title":"University of Georgia"},{"categories":null,"contents":"Unix is a standard set of tools commonly used in software development.\nmacOS and Linux are on top of Unix Windows comes Unix now lol You can navigate Unix inside a command line.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhunix/\"\u003eUnix\u003c/a\u003e is a standard set of tools commonly used in software development.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003emacOS and Linux are on top of Unix\u003c/li\u003e\n\u003cli\u003eWindows comes Unix now lol\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eYou can navigate \u003ca href=\"/posts/kbhunix/\"\u003eUnix\u003c/a\u003e inside a \u003ca href=\"/posts/kbhunix/\"\u003ecommand line\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhunix/","tags":null,"title":"Unix"},{"categories":null,"contents":"its a File Payload Data with smartness.\nSector Size Block Size Inode Size Inodes Per Block Address Type 512 512 32 16 Short, 2 bytes Notably, the entire file system only supports \\(2^{16} = 32MB\\) worth of space due to short address types.\nFor each file on the disk, we store payload data in a bunch of places scattered across the disk, and a single inode which stores the location of each block for the file in an array.\ninodes contain an ordered list of block numbers, file size, permissions. all inodes are stored together in an inode table, which starts at block 2. Blocks 0 and 1 are disk metadata. inode can be read into memory individally to cache 10% of harddrive is used to inode Unix V6 Filesystem limits the maximum file size in order to keep the inode a finite size.\nThe inode table for each file only contains space to point to \\(8\\) block. 1 block = 1 sector on Unix v6. inodes are usualy 32 bytes big, and 1 block = 1 sector = 512 bytes. usually this packs 16 inodes per block\ninodes are 1 indexed in order to make.\ninode struct inode { uint16_t i_addr[8]; uint16_t i_mode[8]; uint16_t file_size; } Each inode contains 8 addresses in shorts in file order.\nreading inode tables from disk We read the raw 16-block inode data from the right sector, type coerce it into the inode type, and then read from it.\nconst size_t INODE_PER_BLOCK = SECTOR_SIZE / sizeof(struct inode); struct inode inodes[INODE_PER_BLOCK]; char buf[SECTOR_SIZE]; readsector(2, \u0026amp;inodes); // recall this is the first 16 inodes: sec0 is fs info, sec1 is supernode printf(\u0026#34;addr: %d\\n\u0026#34;, inodes[0].i_add); inode modes inodes have two modes\nif ((inode.i_mode \u0026amp; ILARG) != 0) == // node is in \u0026#34;large mode\u0026#34; in small mode, the inode stores in i_addr the block numbers to the data in large mode, the inode stores in the first seven numbers in i_addr block numbers to blocks that contain block numbers (512/2 = 256 block numbers, which are chars); the eighth number points to doubly indirect blocks that contain block numbers that point to other blocks this is called indirect addressing\nindirect addressing uses more steps to get to the data, and requires more blocks to get to the block numbers.\nin large mode, this system can store \\((7+256) \\cdot (256 \\cdot 512) = 34MB\\), which is as large as the file system itself, which is fine now.\nDirectory Folders! Directory is a container that contains files, folders, directories, etc.! Its a file container.\nAll files ultimately live within root directory /. Absolute paths start with root directory, and gets you to the file. Relative paths start at the current folder, and gets you to the file File names are NOT stored within the inode. They are stored in directories.\nUnix stores 16 byte unsorted \u0026ldquo;directory entires\u0026rdquo; to represent directories:\nDirectory Entries struct dirent { uint16_t d_inumber; // inode number of this file char d_name[14]; // the name; *NOT NECESSARILY NULL TERMINATED* } THE NAME MAY NOT BE NULL TERMINATED to cram max things. You have to use strncmp because it may not be terminated.\nLookup Start at the root directory, /. We want to go to the root directory, and find the entry named /classes/, and then, in that directory, find the file. etc. Traverse recursively. Directory could have metadata.\nA directory is basically just a file whose payload is a list of dirent.\nThe inode tells you whether something is a file or a directory. They can be small or large, as usual. Root directory always have inode number 1; 0 is reserved to NULL.\nBecause the directory entries are not sorted, in each direcotry the find is a linear search.\nKey Points modularity: subdivision of a system into a collection of smaller systems layers: layer several modules over each other name resolution: system resolves human friendly name to machine friendly names visualization: making one thing look like another Overall theme: multi-level index\nAdvantages can access all block numbers for a file still supports easy sequential access easy to grow files Disadvantages lots of linear directory search Caching Freelist Linked List Linked list of free-blocks\nBitmap Take a bit for every block in the disk, if its 1, its free. If 0, its not free. This allows locality: data likely used next is closed by, we can search local, continuous spaces.\nproblem: as the disk becomes full, we have to search basically \\(O(n)\\) for each bit until we find the free block\u0026mdash;as the disk fills up, it becomes harder to find free space.\nsolution: lie to the user. don\u0026rsquo;t let the disk used up. grantee that there are some free space at all times. we typically reserve \\(10\\%\\).\nBlock Cache Getting blocks is very expensive. We can keep blocks around in memory because we may need to use them in the near future.\nWe will use part of the main memory to retain recently-accessed disk blocks. This is NOT at the granularity of individual files.\nLRU When you insert a new element into the cache, kick out the element on the cache that hasn\u0026rsquo;t been touched in the longest time.\nBlock Cache Modification we can either write asap, or delay.\nwrite asap\nsafer: less risk of data loss, written as soon as possible slow: program must wait to proceed until disk I/O completes write delay\ndangerous: may loose data after crash efficient: memory writes is faster ","html":"\u003cp\u003eits a \u003ca href=\"/posts/kbhfile_payload_data/\"\u003eFile Payload Data\u003c/a\u003e with smartness.\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003eSector Size\u003c/th\u003e\n\u003cth\u003eBlock Size\u003c/th\u003e\n\u003cth\u003eInode Size\u003c/th\u003e\n\u003cth\u003eInodes Per Block\u003c/th\u003e\n\u003cth\u003eAddress Type\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e512\u003c/td\u003e\n\u003ctd\u003e512\u003c/td\u003e\n\u003ctd\u003e32\u003c/td\u003e\n\u003ctd\u003e16\u003c/td\u003e\n\u003ctd\u003eShort, 2 bytes\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003eNotably, the entire file system only supports \\(2^{16} = 32MB\\) worth of space due to short address types.\u003c/p\u003e\n\u003cp\u003eFor each file on the disk, we store payload data in a bunch of places scattered across the disk, and a \u003cstrong\u003esingle\u003c/strong\u003e \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e which stores the location of each block for the file in an array.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003es contain an \u003cstrong\u003eordered\u003c/strong\u003e list of block numbers, file size, permissions.\u003c/li\u003e\n\u003cli\u003eall \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003es are stored together in an \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e table, which starts at \u003cstrong\u003eblock 2\u003c/strong\u003e. Blocks 0 and 1 are disk metadata.\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e can be read into memory individally to cache\u003c/li\u003e\n\u003cli\u003e10% of harddrive is used to \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003eUnix V6 Filesystem\u003c/a\u003e limits the maximum file size in order to keep the \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e a finite size.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e table for each file only contains space to point to \\(8\\) block. 1 block = 1 sector on Unix v6. \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003es are usualy 32 bytes big, and 1 block = 1 sector = 512 bytes. usually this packs 16 inodes per block\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003e\u003cstrong\u003e\u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003es are 1 indexed\u003c/strong\u003e\u003c/strong\u003e in order to make.\u003c/p\u003e\n\u003ch2 id=\"inode\"\u003einode\u003c/h2\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estruct\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einode\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003euint16_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei_addr\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e8\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003euint16_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei_mode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e8\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003euint16_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003efile_size\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eEach inode contains 8 addresses in shorts in \u003cstrong\u003efile order\u003c/strong\u003e.\u003c/p\u003e\n\u003ch3 id=\"reading-inode-tables-from-disk\"\u003ereading inode tables from disk\u003c/h3\u003e\n\u003cp\u003eWe read the raw 16-block inode data from the right sector, type coerce it into the inode type, and then read from it.\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003econst\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esize_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eINODE_PER_BLOCK\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eSECTOR_SIZE\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e/\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003esizeof\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#00a8c8\"\u003estruct\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estruct\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einode\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einodes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eINODE_PER_BLOCK\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ebuf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eSECTOR_SIZE\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003ereadsector\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einodes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// recall this is the first 16 inodes: sec0 is fs info, sec1 is supernode\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75af00\"\u003eprintf\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;addr: %d\u003c/span\u003e\u003cspan style=\"color:#8045ff\"\u003e\\n\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003einodes\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e].\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei_add\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e);\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003ch3 id=\"inode-modes\"\u003einode modes\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003es have two modes\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e((\u003c/span\u003e\u003cspan style=\"color:#111\"\u003einode\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e.\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei_mode\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e\u0026amp;\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eILARG\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e!=\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// node is in \u0026#34;large mode\u0026#34;\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cul\u003e\n\u003cli\u003ein \u003cstrong\u003esmall mode\u003c/strong\u003e, the \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e stores in \u003ccode\u003ei_addr\u003c/code\u003e the block numbers to the data\u003c/li\u003e\n\u003cli\u003ein \u003cstrong\u003elarge mode\u003c/strong\u003e, the \u003ca href=\"/posts/kbhunix_v6_filesystem/\"\u003einode\u003c/a\u003e stores in the \u003cstrong\u003efirst seven\u003c/strong\u003e numbers in \u003ccode\u003ei_addr\u003c/code\u003e block numbers to \u003cem\u003eblocks that contain block numbers\u003c/em\u003e (512/2 = 256 block numbers, which are chars); the \u003cstrong\u003eeighth number\u003c/strong\u003e points to \u003cstrong\u003edoubly indirect\u003c/strong\u003e \u003cem\u003eblocks that contain block numbers that point to other blocks\u003c/em\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ethis is called \u003ca href=\"#inode-modes\"\u003eindirect addressing\u003c/a\u003e\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"#inode-modes\"\u003eindirect addressing\u003c/a\u003e uses more steps to get to the data, and requires more blocks to get to the block numbers.\u003c/p\u003e\n\u003cp\u003ein \u003cstrong\u003elarge mode\u003c/strong\u003e, this system can store \\((7+256) \\cdot (256 \\cdot 512) = 34MB\\), which is as large as the file system itself, which is fine now.\u003c/p\u003e\n\u003ch2 id=\"directory\"\u003eDirectory\u003c/h2\u003e\n\u003cp\u003eFolders! Directory is a container that contains files, folders, directories, etc.! Its a \u003cstrong\u003efile container\u003c/strong\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eAll files ultimately live within root directory \u003ccode\u003e/\u003c/code\u003e.\u003c/li\u003e\n\u003cli\u003eAbsolute paths start with root directory, and gets you to the file.\u003c/li\u003e\n\u003cli\u003eRelative paths start at the current folder, and gets you to the file\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eFile names are \u003cstrong\u003eNOT\u003c/strong\u003e stored within the inode. They are stored in directories.\u003c/p\u003e\n\u003cp\u003eUnix stores 16 byte unsorted \u0026ldquo;directory entires\u0026rdquo; to represent directories:\u003c/p\u003e\n\u003ch3 id=\"directory-entries\"\u003eDirectory Entries\u003c/h3\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-C\" data-lang=\"C\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003estruct\u003c/span\u003e \u003cspan style=\"color:#111\"\u003edirent\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e{\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003euint16_t\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed_inumber\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e;\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// inode number of this file\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003echar\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed_name\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e14\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e];\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e// the name; *NOT NECESSARILY NULL TERMINATED*\n\u003c/span\u003e\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#75715e\"\u003e\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e}\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003e\u003cstrong\u003eTHE NAME MAY NOT BE NULL TERMINATED\u003c/strong\u003e to cram max things. You have to use \u003cstrong\u003estrncmp\u003c/strong\u003e because it may not be terminated.\u003c/p\u003e\n\u003ch3 id=\"lookup\"\u003eLookup\u003c/h3\u003e\n\u003cp\u003eStart at the root directory, \u003ccode\u003e/\u003c/code\u003e. We want to go to the root directory, and find the entry named \u003ccode\u003e/classes/\u003c/code\u003e, and then, in that directory, find the file. etc. Traverse recursively. Directory could have metadata.\u003c/p\u003e\n\u003cp\u003eA directory is basically just a \u003cstrong\u003efile whose payload is a list of \u003ccode\u003edirent\u003c/code\u003e\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eThe inode tells you whether something is a file or a directory. They can be small or large, as usual. Root directory always have inode number \u003ccode\u003e1\u003c/code\u003e; \u003ccode\u003e0\u003c/code\u003e is reserved to NULL.\u003c/p\u003e\n\u003cp\u003eBecause the directory entries are not sorted, in each direcotry the find is a linear search.\u003c/p\u003e\n\u003ch2 id=\"key-points\"\u003eKey Points\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003emodularity\u003c/strong\u003e: subdivision of a system into a collection of smaller systems\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003elayers\u003c/strong\u003e: layer several modules over each other\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ename resolution\u003c/strong\u003e: system resolves human friendly name to machine friendly names\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003evisualization\u003c/strong\u003e: making one thing look like another\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eOverall theme: \u003cem\u003emulti-level index\u003c/em\u003e\u003c/p\u003e\n\u003ch3 id=\"advantages\"\u003eAdvantages\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003ecan access all block numbers for a file\u003c/li\u003e\n\u003cli\u003estill supports easy sequential access\u003c/li\u003e\n\u003cli\u003eeasy to grow files\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"disadvantages\"\u003eDisadvantages\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003elots of linear directory search\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"caching\"\u003eCaching\u003c/h2\u003e\n\u003ch3 id=\"freelist\"\u003eFreelist\u003c/h3\u003e\n\u003ch4 id=\"linked-list\"\u003eLinked List\u003c/h4\u003e\n\u003cp\u003eLinked list of free-blocks\u003c/p\u003e\n\u003ch4 id=\"bitmap\"\u003eBitmap\u003c/h4\u003e\n\u003cp\u003eTake a bit for every block in the disk, if its 1, its free. If 0, its not free. This allows \u003cem\u003elocality\u003c/em\u003e: data likely used next is closed by, we can search local, continuous spaces.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eproblem\u003c/strong\u003e: as the disk becomes full, we have to search basically \\(O(n)\\) for each bit until we find the free block\u0026mdash;as the disk fills up, it becomes harder to find free space.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003esolution\u003c/strong\u003e: lie to the user. don\u0026rsquo;t let the disk used up. grantee that there are some free space at all times. we typically reserve \\(10\\%\\).\u003c/p\u003e\n\u003ch3 id=\"block-cache\"\u003eBlock Cache\u003c/h3\u003e\n\u003cp\u003eGetting blocks is very expensive. We can keep blocks around in memory because we may need to use them in the near future.\u003c/p\u003e\n\u003cp\u003eWe will use part of the main memory to retain recently-accessed disk blocks. This is \u003cstrong\u003eNOT\u003c/strong\u003e at the granularity of individual files.\u003c/p\u003e\n\u003ch4 id=\"lru\"\u003eLRU\u003c/h4\u003e\n\u003cp\u003eWhen you insert a new element into the cache, kick out the element on the cache that hasn\u0026rsquo;t been touched in the longest time.\u003c/p\u003e\n\u003ch4 id=\"block-cache-modification\"\u003eBlock Cache Modification\u003c/h4\u003e\n\u003cp\u003ewe can either \u003cstrong\u003ewrite asap\u003c/strong\u003e, or \u003cstrong\u003edelay\u003c/strong\u003e.\u003c/p\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ewrite asap\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003esafer\u003c/strong\u003e: less risk of data loss, written as soon as possible\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eslow\u003c/strong\u003e: program must wait to proceed until disk I/O completes\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ewrite delay\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003edangerous\u003c/strong\u003e: may loose data after crash\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eefficient\u003c/strong\u003e: memory writes is faster\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhunix_v6_filesystem/","tags":null,"title":"Unix V6 Filesystem"},{"categories":null,"contents":"A matrix is upper-triangular if the entries below the diagonal are \\(0\\):\n\\begin{equation} \\mqty(\\lambda_{1} \u0026amp; \u0026amp; * \\\\ \u0026amp; \\ddots \u0026amp; \\\\ 0 \u0026amp; \u0026amp; \\lambda_{n}) \\end{equation}\nproperties of upper-triangular matrix Suppose \\(T \\in \\mathcal{L}(V)\\), and \\(v_1 \u0026hellip; v_{n}\\) is a basis of \\(V\\). Then:\nthe matrix of \\(T\\) w.r.t. \\(v_1 \u0026hellip; v_{n}\\) is upper-triangular \\(Tv_{j} \\in span(v_1 \\dots v_{j})\\) for each \\(v_{j}\\) \\(span(v_{1}, \u0026hellip; v_{j})\\) is invariant under \\(T\\) for each \\(v_{j}\\) \\(1 \\implies 2\\) Recall that our matrix \\(A=\\mathcal{M}(T)\\) is upper-triangular. So, for any \\(v_{j}\\) sent through \\(A\\), it will be multiplied to the $j$-th column vector of the matrix. Now, that $j$-th column has \\(0\\) for rows \\(j+1 \u0026hellip; n\\), meaning that only through a linear combination of the first \\(j\\) vectors we can construct \\(T v_{j}\\). Hence, \\(Tv_{j} \\in span(v_1 \u0026hellip; v_{j})\\)\n\\(3 \\implies 2\\) \u0026ldquo;obviously\u0026rdquo;\nAll \\(v_{j} \\in span(v_1, \\dots v_{j})\\), and yet \\(T v_{j} \\in span (v_{1}, \u0026hellip; v_{j})\\) as it is given. Hence, \\(span(v_1, \u0026hellip; v_{j})\\) is invariant under \\(T\\).\n\\(2 \\implies 3\\) Let \\(v \\in span(v_1, \u0026hellip; v_{j})\\); meaning: \\(v = a_1 v_1 + \u0026hellip; + a_{j} v_{j}\\). Now, \\(Tv = a_1 T v_{1} + \u0026hellip; + a_{j} T v_{j}\\). Recall now we are given \\(T v_{j} \\in span(v_1, \u0026hellip; v_{j})\\) for each \\(v_{j}\\) (of course if \\(T{v_{1}} \\in span(v_{1})\\) it is also in \\(span(v_1, \u0026hellip; v_{j})\\) so the statement make sense.) Therefore, a linear combinations of \\(T v_{j}\\) also is in \\(span(v_1 \u0026hellip; v_{j})\\). Making the latter invariant under \\(T\\). \\(\\blacksquare\\)\nevery complex operator has an upper-triangular matrix Suppose \\(V\\) is a finite-dimensional complex vector space, with an operator \\(T \\in \\mathcal{L}(V)\\). Then, \\(T\\) has an upper-triangular matrix w.r.t. some basis of \\(V\\).\nProof:\nWe will use induction.\nInductive hypothesis: given dimension of \\(V\\), \\(T \\in \\mathcal{L}(V)\\) has an upper-triangular matrix for a basis of \\(V\\).\nBase case: \\(\\dim V=1\\)\nIf \\(\\dim V = 1\\), any matrix of \\(T\\) is technically upper-triangular because its just one number \\(\\mqty(a)\\).\nStep: \\(\\dim V = n\\), and \\(T \\in \\mathcal{L}(V)\\)\nBecause operators on complex vector spaces have an eigenvalue, let \\(v_1\\) be an eigenvector corresponding to an eigenvalue of \\(T\\). Now, create an invariant subspace \\(U = span(v_1)\\). (it is invariant because \\(v_1\\) is an eigenvalue). Now, evidently \\(\\dim U =1\\).\nNow, \\(\\dim V / U = n-1\\), the previous step from induction tells us that there exists a upper-triangular matrix for \\(T/U \\in \\mathcal{L}(V / U)\\). Specifically, because of the properties of upper-triangular matrix, it tells us that there is a basis \\(v_{2} + U \u0026hellip; v_{n} + U\\) such that its span is invariant under \\(T / U\\). Meaning:\n\\begin{equation} (T / U) (v_{j} + U ) \\in span( v_{2} + U \\dots v_{j} + U) \\end{equation}\nWriting it out:\n\\begin{equation} T v_{j} + U = (a_{2} v_{2} + \\dots + a_{j} v_{j}) + U \\end{equation}\nSpecifically, this means, there exists at least one pair \\(u_1, u_2\\) for which:\n\\begin{equation} T v_{j} + u_1 = (a_{2} v_{2} + \\dots + a_{j} v_{j}) + u_2 \\end{equation}\nAnd so:\n\\begin{equation} T v_{j} = (a_{2} v_{2} + \\dots + a_{j} v_{j}) + (u_2 - u_1) \\end{equation}\nAnd since \\(\\{v_1\\}\\) is a basis of \\(U\\), and \\(u_2 - u_1 \\in U\\), we can say:\n\\begin{equation} T v_{j} = (a_{2} v_{2} + \\dots + a_{j} v_{j}) + a_1 v_1 \\end{equation}\nHence:\n\\begin{equation} T v_{j} \\in span(v_1, \\dots v_{j}) \\end{equation}\nIt has been shown in the past (see Linear Algebra Errors) that if a list form a basis of \\(V /U\\) and another form a basis of \\(U\\) then the two lists combined form a basis of the whole thing \\(V\\). So \\(v_1 \u0026hellip; v_{j}\\) is a basis of \\(V\\).\nNow, by the properties of upper-triangular matrix again, we have that there exists an upper-triangular matrix of \\(T\\) for \\(\\dim V = n\\). \\(\\blacksquare\\)\noperator is only invertible if diagonal of its upper-triangular matrix is nonzero Suppose \\(T \\in \\mathcal{L}(V)\\) has an upper-triangular matrix w.r.t. a basis of \\(V\\). Then, \\(T\\) is invertable IFF all the entries on the diagonal of the upper-triangular matrix is nonzero.\nassume nonzero diagonal Let \\(\\lambda_{1} \u0026hellip; \\lambda_{n}\\) be the diagonal entries of \\(T\\). Per given, let there be an upper-triangular matrix of \\(T\\) under the basis \\(v_1 \u0026hellip; v_{n}\\). The matrix w.r.t. \\(T\\)\u0026rsquo;s matrix being upper-triangular under the list of \\(v_{j}\\) means that:\n\\begin{equation} T v_1 = \\lambda_{1} v_1 \\end{equation}\n(because \\(T v_{j} \\in span(v_1 \u0026hellip; v_{j})\\), and let \\(j=1\\)). And so:\n\\begin{equation} T \\frac{v_1}{\\lambda_{1}} = v_{1} \\end{equation}\n(legal as \\(\\lambda_{j} \\neq 0\\) per given).\nThus, \\(v_1 \\in range(T)\\).\nIn a similar fashion, let:\n\\begin{equation} T v_{2} = a v_{1} + \\lambda_{2} v_{2} \\end{equation}\n(\\(a\\) being the element just to the right of the \\(\\lambda_{1}\\) diagonal; recall again that \\(T\\)\u0026rsquo;s matrix under \\(v_{j}\\) is upper-triangular)\nNow:\n\\begin{equation} T \\frac{v_2}{\\lambda 2} = \\frac{a}{\\lambda_{2}} v_{1} + v_{2} \\end{equation}\nThe left side is in range \\(T\\) by definition; the right side\u0026rsquo;s \\(\\frac{a}{\\lambda 2} v_{1} \\in range\\ T\\) and hence so is its scaled versions. Thus, \\(v_2 \\in range\\ T\\).\nContinuing in this fashion, we have all \\(v_{j} \\in range\\ T\\). So \\(T\\) is surjective as it can hit all basis of \\(V\\). Now, injectivity is surjectivity in finite-dimensional operators, so \\(T\\) is invertable, as desired.\nassume invertible We will prove this by induction. Let \\(\\lambda_{1} \u0026hellip; \\lambda_{n}\\) be the diagonal entries of \\(T\\).\nInductive hypothesis: \\(\\lambda_{j} \\neq 0\\)\nBase case: \\(\\lambda_{1} \\neq 0\\) because if not, \\(T v_{1} = 0\\) and \\(v_{1} \\neq 0\\) as it is part of a basis so that would make \\(T\\) not injective and hence not invertable. Hence, by contradiction, \\(\\lambda_{1} = 0\\).\nStep: \\(\\lambda_{j}\\)\nSuppose for the sake of contradiction \\(\\lambda_{j} = 0\\). This means that the basis \\(v_{j}\\) is mapped to somewhere in \\(span(v_{1}, \u0026hellip; v_{j-1})\\) as only the top \\(j-1\\) slots are non-zero for the $j$-th column. And so, \\(T\\), under the assumption, would map \\(span(v_1, \u0026hellip; v_{j})\\) into \\(span(v_1, \u0026hellip; v_{j-1})\\).\nNow, because \\(v_{j}\\) are linearly independent (they form a basis after all), \\(\\dim span(v_1, \u0026hellip; v_{j}) = j\\) and \\(\\dim span(v_1, \u0026hellip;, v_{j-1}) = j-1\\). Now, as \\(T\\) restricted on \\(span(v_1, ..v_{j})\\) maps to a smaller subspace, it is not injective. So, \\(T\\) as a whole is not injective, so it is not invertable. Reaching contradiction, \\(\\blacksquare\\).\neigenvalues of a map are the entries of the diagonal of its upper-triangular matrix The matrix of \\(T-\\lambda I\\) for an upper-triangular form of \\(T\\) would look like:\n\\begin{equation} \\mqty(\\lambda_{1} - \\lambda \u0026amp;\u0026amp;* \\\\ \u0026amp; \\ddots \u0026amp; \\\\ 0 \u0026amp;\u0026amp;\\lambda_{n} - \\lambda) \\end{equation}\nwhere \\(\\lambda_{j}\\) are the diagonals of the upper-triangular form of \\(T\\), and \\(\\lambda\\) an eigenvalue of \\(T\\).\nRecall that operator is only invertible if diagonal of its upper-triangular matrix is nonzero; so if \\(\\lambda\\) equals any of the \\(\\lambda_{j}\\), it will make the matrix above for \\(T - \\lambda I\\) not invertable as one of its diagonal will be \\(0\\). Recall the properties of eigenvalues, specifically that \\(\\lambda\\) is an eigenvalue IFF \\((T-\\lambda I)\\) is not invertable. Hence, each \\(\\lambda_{j}\\) is an eigenvalue of \\(T\\). \\(\\blacksquare\\)\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e is \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e if the entries below the \u003ca href=\"/posts/kbhmatricies/#diagonal\"\u003ediagonal\u003c/a\u003e are \\(0\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(\\lambda_{1} \u0026amp; \u0026amp; * \\\\ \u0026amp; \\ddots \u0026amp; \\\\ 0 \u0026amp; \u0026amp; \\lambda_{n})\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"properties-of-upper-triangular-matrix--kbhupper-triangular-matrix-dot-md\"\u003eproperties of \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V)\\), and \\(v_1 \u0026hellip; v_{n}\\) is a basis of \\(V\\). Then:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ethe matrix of \\(T\\) w.r.t. \\(v_1 \u0026hellip; v_{n}\\) is \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\\(Tv_{j} \\in span(v_1 \\dots v_{j})\\) for each \\(v_{j}\\)\u003c/li\u003e\n\u003cli\u003e\\(span(v_{1}, \u0026hellip; v_{j})\\) is \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e under \\(T\\) for each \\(v_{j}\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"1-implies-2\"\u003e\\(1 \\implies 2\\)\u003c/h3\u003e\n\u003cp\u003eRecall that our matrix \\(A=\\mathcal{M}(T)\\) is \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e. So, for any \\(v_{j}\\) sent through \\(A\\), it will be multiplied to the $j$-th column \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e of the \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e. Now, that $j$-th column has \\(0\\) for rows \\(j+1 \u0026hellip; n\\), meaning that only through a linear combination of the first \\(j\\) vectors we can construct \\(T v_{j}\\). Hence, \\(Tv_{j} \\in span(v_1 \u0026hellip; v_{j})\\)\u003c/p\u003e\n\u003ch3 id=\"3-implies-2\"\u003e\\(3 \\implies 2\\)\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;obviously\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eAll \\(v_{j} \\in span(v_1, \\dots v_{j})\\), and yet \\(T v_{j} \\in span (v_{1}, \u0026hellip; v_{j})\\) as it is given. Hence, \\(span(v_1, \u0026hellip; v_{j})\\) is \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e under \\(T\\).\u003c/p\u003e\n\u003ch3 id=\"2-implies-3\"\u003e\\(2 \\implies 3\\)\u003c/h3\u003e\n\u003cp\u003eLet \\(v \\in span(v_1, \u0026hellip; v_{j})\\); meaning: \\(v = a_1 v_1 + \u0026hellip; + a_{j} v_{j}\\). Now, \\(Tv = a_1 T v_{1} + \u0026hellip; + a_{j} T v_{j}\\). Recall now we are given \\(T v_{j} \\in span(v_1, \u0026hellip; v_{j})\\) for each \\(v_{j}\\) (of course if \\(T{v_{1}} \\in span(v_{1})\\) it is also in \\(span(v_1, \u0026hellip; v_{j})\\) so the statement make sense.) Therefore, a linear combinations of \\(T v_{j}\\) also is in \\(span(v_1 \u0026hellip; v_{j})\\). Making the latter \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e under \\(T\\). \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch2 id=\"every-complex-operator-has-an-upper-triangular-matrix--kbhupper-triangular-matrix-dot-md\"\u003eevery complex operator has an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eSuppose \\(V\\) is a finite-dimensional \u003ca href=\"/posts/kbhvector_space/#vector-space-over-fields\"\u003ecomplex vector space\u003c/a\u003e, with an \u003ca href=\"/posts/kbhoperator/\"\u003eoperator\u003c/a\u003e \\(T \\in \\mathcal{L}(V)\\). Then, \\(T\\) has an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e w.r.t. some \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\).\u003c/p\u003e\n\u003cp\u003eProof:\u003c/p\u003e\n\u003cp\u003eWe will use induction.\u003c/p\u003e\n\u003cp\u003eInductive hypothesis: given dimension of \\(V\\), \\(T \\in \\mathcal{L}(V)\\) has an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e for a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\).\u003c/p\u003e\n\u003cp\u003eBase case: \\(\\dim V=1\\)\u003c/p\u003e\n\u003cp\u003eIf \\(\\dim V = 1\\), any matrix of \\(T\\) is technically \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e because its just one number \\(\\mqty(a)\\).\u003c/p\u003e\n\u003cp\u003eStep: \\(\\dim V = n\\), and \\(T \\in \\mathcal{L}(V)\\)\u003c/p\u003e\n\u003cp\u003eBecause \u003ca href=\"/posts/kbhoperators_on_complex_vector_spaces_have_an_eigenvalue/\"\u003eoperators on complex vector spaces have an eigenvalue\u003c/a\u003e, let \\(v_1\\) be an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvector\u003c/a\u003e corresponding to an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\). Now, create an \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant subspace\u003c/a\u003e \\(U = span(v_1)\\). (it is \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e because \\(v_1\\) is an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e). Now, evidently \\(\\dim U =1\\).\u003c/p\u003e\n\u003cp\u003eNow, \\(\\dim V / U = n-1\\), the previous step from induction tells us that there exists a \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e for \\(T/U \\in \\mathcal{L}(V / U)\\). Specifically, because of the \u003ca href=\"#properties-of-upper-triangular-matrix--kbhupper-triangular-matrix-dot-md\"\u003eproperties of upper-triangular matrix\u003c/a\u003e, it tells us that there is a basis \\(v_{2} + U \u0026hellip; v_{n} + U\\) such that its \u003ca href=\"/posts/kbhspan/\"\u003espan\u003c/a\u003e is \u003ca href=\"/posts/kbhinvariant_subspace/\"\u003einvariant\u003c/a\u003e under \\(T / U\\). Meaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n(T / U) (v_{j} + U ) \\in span( v_{2} + U \\dots v_{j} + U)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWriting it out:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT v_{j} + U = (a_{2} v_{2} + \\dots + a_{j} v_{j}) + U\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSpecifically, this means, there exists at least one pair \\(u_1, u_2\\) for which:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT v_{j} + u_1 = (a_{2} v_{2} + \\dots + a_{j} v_{j}) + u_2\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT v_{j} = (a_{2} v_{2} + \\dots + a_{j} v_{j}) + (u_2 - u_1)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAnd since \\(\\{v_1\\}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(U\\), and \\(u_2 - u_1 \\in U\\), we can say:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT v_{j} = (a_{2} v_{2} + \\dots + a_{j} v_{j}) + a_1 v_1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eHence:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT v_{j} \\in span(v_1, \\dots v_{j})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eIt has been shown in the past (see \u003ca href=\"/posts/kbhlinear_algebra_errors/\"\u003eLinear Algebra Errors\u003c/a\u003e) that if a list form a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V /U\\) and another form a basis of \\(U\\) then the two lists combined form a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of the whole thing \\(V\\). So \\(v_1 \u0026hellip; v_{j}\\) is a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\).\u003c/p\u003e\n\u003cp\u003eNow, by the \u003ca href=\"#properties-of-upper-triangular-matrix--kbhupper-triangular-matrix-dot-md\"\u003eproperties of upper-triangular matrix\u003c/a\u003e again, we have that there exists an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e of \\(T\\) for \\(\\dim V = n\\). \\(\\blacksquare\\)\u003c/p\u003e\n\u003ch2 id=\"operator-is-only-invertible-if-diagonal--kbhmatricies-dot-md--of-its-upper-triangular-matrix--kbhupper-triangular-matrix-dot-md--is-nonzero\"\u003eoperator is only invertible if \u003ca href=\"/posts/kbhmatricies/#diagonal\"\u003ediagonal\u003c/a\u003e of its \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e is nonzero\u003c/h2\u003e\n\u003cp\u003eSuppose \\(T \\in \\mathcal{L}(V)\\) has an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e w.r.t. a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\). Then, \\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e IFF all the entries on the \u003ca href=\"/posts/kbhmatricies/#diagonal\"\u003ediagonal\u003c/a\u003e of the \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e is nonzero.\u003c/p\u003e\n\u003ch3 id=\"assume-nonzero-diagonal\"\u003eassume nonzero diagonal\u003c/h3\u003e\n\u003cp\u003eLet \\(\\lambda_{1} \u0026hellip; \\lambda_{n}\\) be the \u003ca href=\"/posts/kbhmatricies/#diagonal\"\u003ediagonal\u003c/a\u003e entries of \\(T\\). Per given, let there be an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e of \\(T\\) under the basis \\(v_1 \u0026hellip; v_{n}\\). The \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e w.r.t. \\(T\\)\u0026rsquo;s matrix being \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e under the list of \\(v_{j}\\) means that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT v_1 = \\lambda_{1} v_1\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(because \\(T v_{j} \\in span(v_1 \u0026hellip; v_{j})\\), and let \\(j=1\\)). And so:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT \\frac{v_1}{\\lambda_{1}} = v_{1}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(legal as \\(\\lambda_{j} \\neq 0\\) per given).\u003c/p\u003e\n\u003cp\u003eThus, \\(v_1 \\in range(T)\\).\u003c/p\u003e\n\u003cp\u003eIn a similar fashion, let:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT v_{2} = a v_{1} + \\lambda_{2} v_{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(\\(a\\) being the element just to the right of the \\(\\lambda_{1}\\) diagonal; recall again that \\(T\\)\u0026rsquo;s matrix under \\(v_{j}\\) is \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e)\u003c/p\u003e\n\u003cp\u003eNow:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nT \\frac{v_2}{\\lambda 2} = \\frac{a}{\\lambda_{2}} v_{1} + v_{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe left side is in \u003ca href=\"/posts/kbhrange/\"\u003erange\u003c/a\u003e \\(T\\) by definition; the right side\u0026rsquo;s \\(\\frac{a}{\\lambda 2} v_{1} \\in range\\ T\\) and hence so is its scaled versions. Thus, \\(v_2 \\in range\\ T\\).\u003c/p\u003e\n\u003cp\u003eContinuing in this fashion, we have all \\(v_{j} \\in range\\ T\\). So \\(T\\) is \u003ca href=\"/posts/kbhsurjectivity/\"\u003esurjective\u003c/a\u003e as it can hit all \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e of \\(V\\). Now, \u003ca href=\"/posts/kbhoperator/#id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injectivity-is-id-1af529ce-e2fb-43a4-8f13-aee1dc743b5f-surjectivity-in-id-4ed27ed5-4edc-4ef4-afd7-9b8e3bcd9b96-finite-dimensional-id-36e84a46-76f1-481e-b031-8ab2f0da0aa8-operator-s\"\u003einjectivity is surjectivity in finite-dimensional operators\u003c/a\u003e, so \\(T\\) is \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e, as desired.\u003c/p\u003e\n\u003ch3 id=\"assume-invertible\"\u003eassume invertible\u003c/h3\u003e\n\u003cp\u003eWe will prove this by induction. Let \\(\\lambda_{1} \u0026hellip; \\lambda_{n}\\) be the \u003ca href=\"/posts/kbhmatricies/#diagonal\"\u003ediagonal\u003c/a\u003e entries of \\(T\\).\u003c/p\u003e\n\u003cp\u003eInductive hypothesis: \\(\\lambda_{j} \\neq 0\\)\u003c/p\u003e\n\u003cp\u003eBase case: \\(\\lambda_{1} \\neq 0\\) because if not, \\(T v_{1} = 0\\) and \\(v_{1} \\neq 0\\) as it is part of a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e so that would make \\(T\\) not \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e and hence not \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e. Hence, by contradiction, \\(\\lambda_{1} = 0\\).\u003c/p\u003e\n\u003cp\u003eStep: \\(\\lambda_{j}\\)\u003c/p\u003e\n\u003cp\u003eSuppose for the sake of contradiction \\(\\lambda_{j} = 0\\). This means that the basis \\(v_{j}\\) is mapped to somewhere in \\(span(v_{1}, \u0026hellip; v_{j-1})\\) as only the top \\(j-1\\) slots are non-zero for the $j$-th column. And so, \\(T\\), under the assumption, would map \\(span(v_1, \u0026hellip; v_{j})\\) into \\(span(v_1, \u0026hellip; v_{j-1})\\).\u003c/p\u003e\n\u003cp\u003eNow, because \\(v_{j}\\) are \u003ca href=\"/posts/kbhlinear_independence/\"\u003elinearly independent\u003c/a\u003e (they form a \u003ca href=\"/posts/kbhbasis/\"\u003ebasis\u003c/a\u003e after all), \\(\\dim span(v_1, \u0026hellip; v_{j}) = j\\) and \\(\\dim span(v_1, \u0026hellip;, v_{j-1}) = j-1\\). Now, as \\(T\\) \u003ca href=\"/posts/kbhmap_restriction_operator/\"\u003erestricted\u003c/a\u003e on \\(span(v_1, ..v_{j})\\) maps to a smaller \u003ca href=\"/posts/kbhsubspace/\"\u003esubspace\u003c/a\u003e, \u003ca href=\"/posts/kbhlinear_map/#map-to-smaller-space-is-not-id-e3ff3c90-e719-4c5b-afc4-efcec3169fb2-injective\"\u003eit is not injective\u003c/a\u003e. So, \\(T\\) as a whole is not \u003ca href=\"/posts/kbhinjectivity/\"\u003einjective\u003c/a\u003e, so it is not \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e. Reaching contradiction, \\(\\blacksquare\\).\u003c/p\u003e\n\u003ch2 id=\"eigenvalues-of-a-map-are-the-entries-of-the-diagonal--kbhmatricies-dot-md--of-its-upper-triangular-matrix--kbhupper-triangular-matrix-dot-md\"\u003eeigenvalues of a map are the entries of the \u003ca href=\"/posts/kbhmatricies/#diagonal\"\u003ediagonal\u003c/a\u003e of its \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular matrix\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhmatricies/\"\u003ematrix\u003c/a\u003e of \\(T-\\lambda I\\) for an \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e form of \\(T\\) would look like:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\mqty(\\lambda_{1} - \\lambda \u0026amp;\u0026amp;* \\\\ \u0026amp; \\ddots \u0026amp; \\\\ 0 \u0026amp;\u0026amp;\\lambda_{n} - \\lambda)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\lambda_{j}\\) are the \u003ca href=\"/posts/kbhmatricies/#diagonal\"\u003ediagonal\u003c/a\u003es of the \u003ca href=\"/posts/kbhupper_triangular_matrix/\"\u003eupper-triangular\u003c/a\u003e form of \\(T\\), and \\(\\lambda\\) an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\).\u003c/p\u003e\n\u003cp\u003eRecall that \u003ca href=\"#operator-is-only-invertible-if-diagonal--kbhmatricies-dot-md--of-its-upper-triangular-matrix--kbhupper-triangular-matrix-dot-md--is-nonzero\"\u003eoperator is only invertible if diagonal of its upper-triangular matrix is nonzero\u003c/a\u003e; so if \\(\\lambda\\) equals any of the \\(\\lambda_{j}\\), it will make the matrix above for \\(T - \\lambda I\\) not \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e as one of its \u003ca href=\"/posts/kbhmatricies/#diagonal\"\u003ediagonal\u003c/a\u003e will be \\(0\\). Recall the \u003ca href=\"/posts/kbheigenvalue/#properties-of-id-7d742b39-4a4a-4a9d-a55b-07e2030dfdeb-eigenvalue-s\"\u003eproperties of eigenvalues\u003c/a\u003e, specifically that \\(\\lambda\\) is an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e \u003ca href=\"/posts/kbhequivalence/\"\u003eIFF\u003c/a\u003e \\((T-\\lambda I)\\) is not \u003ca href=\"/posts/kbhinvertability/\"\u003einvertable\u003c/a\u003e. Hence, each \\(\\lambda_{j}\\) is an \u003ca href=\"/posts/kbheigenvalue/\"\u003eeigenvalue\u003c/a\u003e of \\(T\\). \\(\\blacksquare\\)\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhupper_triangular_matrix/","tags":null,"title":"upper-triangular matrix"},{"categories":null,"contents":" Investment: Paid for 50% of war bonds Production: ships, tanks, airplanes, etc. \u0026mdash; encourages production Conservation: 5% of the world\u0026rsquo;s population production, 50% of the world\u0026rsquo;s manufactured goods \u0026mdash; rationing, grow goods, etc. ","html":"\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eInvestment\u003c/strong\u003e\u003c/strong\u003e: Paid for 50% of war bonds\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eProduction\u003c/strong\u003e\u003c/strong\u003e: ships, tanks, airplanes, etc. \u0026mdash; encourages production\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003e\u003cstrong\u003eConservation\u003c/strong\u003e\u003c/strong\u003e: 5% of the world\u0026rsquo;s population production, 50% of the world\u0026rsquo;s manufactured goods \u0026mdash; rationing, grow goods, etc.\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhus_wwii_propaganda/","tags":null,"title":"US WWII Propaganda"},{"categories":null,"contents":"USAYPT or USIYPT is a physics research competition ran by Greg Jacobs.\n2022 My own work doc for the 2022 Tuning Forks problem is here.\nGeneral Tips When in doubt, ask about error prop ANSWER THE RESEARCH QUESTION (elevator) Convey that you understand basics via presentation Have intuition regarding phenomenon Be able to explain every formula from first principles Order of magnitude and dimension analysis Have clear variance in parameters (what did you vary and why) What does the intercepts mean on graphs? \u0026ldquo;Don\u0026rsquo;t be obtuse\u0026rdquo; Connect to simple physics terms Explanations needs to be simple Engage discussion ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhusaypt/\"\u003eUSAYPT\u003c/a\u003e or \u003ca href=\"/posts/kbhusaypt/\"\u003eUSIYPT\u003c/a\u003e is a \u003ca href=\"/posts/kbhphysics/\"\u003ephysics\u003c/a\u003e research competition ran by Greg Jacobs.\u003c/p\u003e\n\u003ch2 id=\"2022\"\u003e2022\u003c/h2\u003e\n\u003cp\u003eMy own work doc for the 2022 \u003ca href=\"/posts/kbhtuning_forks/\"\u003eTuning Fork\u003c/a\u003es problem is \u003ca href=\"/posts/kbhtuning_forks/\"\u003ehere\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"general-tips\"\u003eGeneral Tips\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eWhen in doubt, ask about error prop\u003c/li\u003e\n\u003cli\u003eANSWER THE RESEARCH QUESTION (elevator)\u003c/li\u003e\n\u003cli\u003eConvey that you understand basics via presentation\u003c/li\u003e\n\u003cli\u003eHave intuition regarding phenomenon\u003c/li\u003e\n\u003cli\u003eBe able to explain every formula from first principles\u003c/li\u003e\n\u003cli\u003eOrder of magnitude and dimension analysis\u003c/li\u003e\n\u003cli\u003eHave clear variance in parameters (what did you vary and why)\u003c/li\u003e\n\u003cli\u003eWhat does the intercepts mean on graphs?\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Don\u0026rsquo;t be obtuse\u0026rdquo;\u003c/li\u003e\n\u003cli\u003eConnect to simple physics terms\u003c/li\u003e\n\u003cli\u003eExplanations needs to be simple\u003c/li\u003e\n\u003cli\u003eEngage discussion\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhusaypt/","tags":null,"title":"USAYPT"},{"categories":null,"contents":"User Experience is the\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhuser_experience/\"\u003eUser Experience\u003c/a\u003e is the\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhuser_experience/","tags":null,"title":"User Experience"},{"categories":null,"contents":"The User Experience design sprung out of WWII\u0026mdash;in Aerospace engineering.\nThe Design Process The \u0026ldquo;Double Diamond\u0026rdquo; Process\nFirst Round of Going Broad Explore the problem space (what are you users trying to do? why? why is it hard?) Decide what to fix (what is the most high impact problem?) Second Round of Going Broad Test potential solutions (does this fix the problem?) Refine final solution (do all users understand this? can they use them?) Usability Heuristics Usability Heuristics is a set of principles used in User Experience design to identify problems and potential solutions.\nVisibility of System Status Keep the users informed about what\u0026rsquo;s actively going on, through appropriate visual feedback placed at an appropriate amount of time.\nMatch Between System and the Real World Use language that\u0026rsquo;s familiar to the user, using words, phrases, concepts familiar to the users rather than internal jargon.\nBalance User Control and Freedom User often perform actions by mistake; mark \u0026ldquo;emergency exits\u0026rdquo; to leave unwanted pathways/actions without causing side effects.\nConsistency and Standards Having consistency between different versions/family of products: putting buttons that do the same thing to the same place across the app, at the same region.\nError Prevention Eliminate error-prone conditions (prevent the users from doing it), or present users with a confirmation before they commit to an erroneous action\nRecognition vs. Recall Users should\u0026rsquo;t need to remember when they are going through an UI; instead they should be able to recognize the intended behavior from the UI\nFlexibility and Efficiency of User Catering functionality to both novice and advanced users. Make advanced actions hidden to novice users, but easily accessible for advanced users.\nMinimalism Keep the UI focused on essential actions and information\u0026mdash;maintaining an aesthetic and minimalist design\nHelp Users Recognize, Diagnoses, and Recover from Errors Errors should\u0026hellip;\ngive context for what the problem is instruct the user for possible next actions Help It maybe necessary to provide documentation to help users understand how to complete their tasks; the documentation should be clear\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhux_design/\"\u003eUser Experience\u003c/a\u003e design sprung out of WWII\u0026mdash;in Aerospace engineering.\u003c/p\u003e\n\u003ch2 id=\"the-design-process\"\u003eThe Design Process\u003c/h2\u003e\n\u003cp\u003eThe \u0026ldquo;Double Diamond\u0026rdquo; Process\u003c/p\u003e\n\u003ch3 id=\"first-round-of-going-broad\"\u003eFirst Round of Going Broad\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eExplore the problem space (what are you users trying to do? why? why is it hard?)\u003c/li\u003e\n\u003cli\u003eDecide what to fix (what is the most high impact problem?)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"second-round-of-going-broad\"\u003eSecond Round of Going Broad\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eTest potential solutions (does this fix the problem?)\u003c/li\u003e\n\u003cli\u003eRefine final solution (do all users understand this? can they use them?)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"usability-heuristics\"\u003eUsability Heuristics\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#usability-heuristics\"\u003eUsability Heuristics\u003c/a\u003e is a set of principles used in \u003ca href=\"/posts/kbhux_design/\"\u003eUser Experience\u003c/a\u003e design to identify problems and potential solutions.\u003c/p\u003e\n\u003ch3 id=\"visibility-of-system-status\"\u003eVisibility of System Status\u003c/h3\u003e\n\u003cp\u003eKeep the users informed about what\u0026rsquo;s actively going on, through appropriate visual feedback placed at an appropriate amount of time.\u003c/p\u003e\n\u003ch3 id=\"match-between-system-and-the-real-world\"\u003eMatch Between System and the Real World\u003c/h3\u003e\n\u003cp\u003eUse language that\u0026rsquo;s familiar to the user, using words, phrases, concepts familiar to the users rather than internal jargon.\u003c/p\u003e\n\u003ch3 id=\"balance-user-control-and-freedom\"\u003eBalance User Control and Freedom\u003c/h3\u003e\n\u003cp\u003eUser often perform actions by mistake; mark \u0026ldquo;emergency exits\u0026rdquo; to leave unwanted pathways/actions without causing side effects.\u003c/p\u003e\n\u003ch3 id=\"consistency-and-standards\"\u003eConsistency and Standards\u003c/h3\u003e\n\u003cp\u003eHaving consistency between different versions/family of products: putting buttons that do the same thing to the same place across the app, at the same region.\u003c/p\u003e\n\u003ch3 id=\"error-prevention\"\u003eError Prevention\u003c/h3\u003e\n\u003cp\u003eEliminate error-prone conditions (prevent the users from doing it), or present users with a confirmation before they commit to an erroneous action\u003c/p\u003e\n\u003ch3 id=\"recognition-vs-dot-recall\"\u003eRecognition vs. Recall\u003c/h3\u003e\n\u003cp\u003eUsers should\u0026rsquo;t need to remember when they are going through an UI; instead they should be able to recognize the intended behavior from the UI\u003c/p\u003e\n\u003ch3 id=\"flexibility-and-efficiency-of-user\"\u003eFlexibility and Efficiency of User\u003c/h3\u003e\n\u003cp\u003eCatering functionality to both novice and advanced users. Make advanced actions hidden to novice users, but easily accessible for advanced users.\u003c/p\u003e\n\u003ch3 id=\"minimalism\"\u003eMinimalism\u003c/h3\u003e\n\u003cp\u003eKeep the UI focused on essential actions and information\u0026mdash;maintaining an aesthetic and minimalist design\u003c/p\u003e\n\u003ch3 id=\"help-users-recognize-diagnoses-and-recover-from-errors\"\u003eHelp Users Recognize, Diagnoses, and Recover from Errors\u003c/h3\u003e\n\u003cp\u003eErrors should\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003egive context for what the problem is\u003c/li\u003e\n\u003cli\u003einstruct the user for possible next actions\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"help\"\u003eHelp\u003c/h3\u003e\n\u003cp\u003eIt maybe necessary to provide documentation to help users understand how to complete their tasks; the documentation should be clear\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhux_design/","tags":null,"title":"User Experience"},{"categories":null,"contents":"Goal: understand the user.\nFind out\u0026hellip;\nMotivation Context Deeper need? The goal of user interviews is to understand the user even if they know what they want!\nGood User Interviews Make person feel welcome/safe/appreciated\nAsk open-ended \u0026ldquo;questions\u0026rdquo;\nDescribe a time that\u0026hellip; Tell me more about.. Leave space: awkward silences (not too awkward)\nReally listen!; repress the urge to think of what you want to say next\nRepeat statements back to people\nAsk about examples, context, etc.\nA roadmap 1: create a comfortable entry point 2: go wide, deep into more personal and complex questions 3: focus on the problem, not the solution 4: focus on feelings\u0026mdash;feelings matter, how nice matters 5: end with conclusions and statements for what you User Story The user story should contain\u0026hellip;.\nA main character (your user) Character background (motivation) A plot (context) Climax and Resolution Framework describe the user; who are they; what do they like or not like an iStudio classic need statement finish with a description of the emotional impact of using our software ","html":"\u003cp\u003eGoal: \u003cstrong\u003e\u003cstrong\u003eunderstand the user.\u003c/strong\u003e\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eFind out\u0026hellip;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eMotivation\u003c/li\u003e\n\u003cli\u003eContext\u003c/li\u003e\n\u003cli\u003eDeeper need?\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe goal of user interviews is to understand the user even if they know what they want!\u003c/p\u003e\n\u003ch2 id=\"good-user-interviews\"\u003eGood User Interviews\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003eMake person feel welcome/safe/appreciated\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAsk open-ended \u0026ldquo;questions\u0026rdquo;\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eDescribe a time that\u0026hellip;\u003c/li\u003e\n\u003cli\u003eTell me more about..\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eLeave space: awkward silences (not too awkward)\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eReally \u003cem\u003elisten!\u003c/em\u003e; repress the urge to think of what you want to say next\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eRepeat statements back to people\u003c/p\u003e\n\u003c/li\u003e\n\u003cli\u003e\n\u003cp\u003eAsk about examples, context, etc.\u003c/p\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"a-roadmap\"\u003eA roadmap\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e1: create a comfortable entry point\u003c/li\u003e\n\u003cli\u003e2: go wide, deep into more personal and complex questions\u003c/li\u003e\n\u003cli\u003e3: focus on the problem, not the solution\u003c/li\u003e\n\u003cli\u003e4: focus on feelings\u0026mdash;feelings matter, how nice matters\u003c/li\u003e\n\u003cli\u003e5: end with conclusions and statements for what you\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"user-story\"\u003eUser Story\u003c/h2\u003e\n\u003cp\u003eThe user story should contain\u0026hellip;.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eA main character (your user)\u003c/li\u003e\n\u003cli\u003eCharacter background (motivation)\u003c/li\u003e\n\u003cli\u003eA plot (context)\u003c/li\u003e\n\u003cli\u003eClimax and Resolution\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"framework\"\u003eFramework\u003c/h3\u003e\n\u003col\u003e\n\u003cli\u003edescribe the user; who are they; what do they like or not like\u003c/li\u003e\n\u003cli\u003ean iStudio classic need statement\u003c/li\u003e\n\u003cli\u003efinish with a description of the emotional impact of using our software\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhuser_interviews/","tags":null,"title":"User Interviews"},{"categories":null,"contents":"utility elicitation is the process to go from Rational Preferences to a utility function. Its a bad idea to use money to do this, because money is not linear.\nConsider the best and worst possible events:\n\\begin{equation} \\overline{S}, \\underline{S} \\end{equation}\nWe assign the best event to have utility \\(1\\), and worst to have utility \\(0\\):\n\\begin{equation} \\begin{cases} U(\\overline{S}) = 1 \\\\ U(\\underline{S}) = 0 \\end{cases} \\end{equation}\nGiven some test event now \\(S\\), we try to find the \\(p\\) such that we can set up a lottery:\n\\begin{equation} S \\sim [\\overline{S}:p; \\underline{S}:(1-p)] \\end{equation}\nbecause the desirability of \\(S\\) is between the best and worst possible events, the continuity von Neumann and Morgenstern Axiom states that this \\(p\\) exists.\nOnce this \\(p\\) has been figured, we then assign:\n\\begin{equation} U(S) = p \\end{equation}\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhutility_elicitation/\"\u003eutility elicitation\u003c/a\u003e is the process to go from \u003ca href=\"/posts/kbhrational_preference/\"\u003eRational Preference\u003c/a\u003es to a \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e function. \u003cstrong\u003eIts a bad idea to use money to do this, because money is not linear.\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eConsider the best and worst possible events:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\overline{S}, \\underline{S}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eWe assign the best event to have \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e \\(1\\), and worst to have utility \\(0\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\begin{cases}\nU(\\overline{S}) = 1 \\\\\nU(\\underline{S}) = 0\n\\end{cases}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eGiven some test event now \\(S\\), we try to find the \\(p\\) such that we can set up a \u003ca href=\"/posts/kbhlottery/\"\u003elottery\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nS \\sim [\\overline{S}:p; \\underline{S}:(1-p)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ebecause the desirability of \\(S\\) is between the best and worst possible events, the \u003ca href=\"/posts/kbhuniqueness_and_existance/#continuity\"\u003econtinuity\u003c/a\u003e \u003ca href=\"/posts/kbhrational_preference/#von-neumann-and-morgenstern-axioms\"\u003evon Neumann and Morgenstern Axiom\u003c/a\u003e states that this \\(p\\) exists.\u003c/p\u003e\n\u003cp\u003eOnce this \\(p\\) has been figured, we then assign:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(S) = p\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhutility_elicitation/","tags":null,"title":"utility elicitation"},{"categories":null,"contents":"quadratic utility \\begin{equation} U(x) = \\lambda x - x^{2} \\end{equation}\nwhere, \\(\\lambda\u0026gt;0\\) controls risk aversion: as risk increases, utility increases concavely, then eventually utility falls\nexponential utility \\begin{equation} U(x) = 1 - e^{-\\lambda x} \\end{equation}\nwhere \\(\\lambda \u0026gt;0\\) controls risk aversion. This is usually not plausible as utility because people\u0026rsquo;s utility doesn\u0026rsquo;t grow exponentially ever\npower utility see power utility\n","html":"\u003ch2 id=\"quadratic-utility\"\u003equadratic utility\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nU(x) = \\lambda x - x^{2}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(\\lambda\u0026gt;0\\) controls \u003ca href=\"/posts/kbhexpected_utility_of_wealth/\"\u003erisk aversion\u003c/a\u003e: as risk increases, utility increases concavely, then eventually utility falls\u003c/p\u003e\n\u003ch2 id=\"exponential-utility\"\u003eexponential utility\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nU(x) = 1 - e^{-\\lambda x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(\\lambda \u0026gt;0\\) controls \u003ca href=\"/posts/kbhexpected_utility_of_wealth/\"\u003erisk aversion\u003c/a\u003e. This is usually not plausible as utility because people\u0026rsquo;s utility doesn\u0026rsquo;t grow exponentially ever\u003c/p\u003e\n\u003ch2 id=\"power-utility\"\u003epower utility\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhpower_utility/\"\u003epower utility\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhutility_function/","tags":null,"title":"utility function"},{"categories":null,"contents":"Take the utility function from a bunch of POMDPs and combine them together using a fusion function.\n\\begin{equation} U^{*}(b,a) = f(U^{*}(b_1, a) \u0026hellip; U^{*}(b_{n}, a) \\end{equation}\nwhere \\(f\\) can be sum or min. The overall belief \\(b\\) is simply \\(B_1 \\times \u0026hellip; \\times B_{n}\\), which combines all beliefs together.\n","html":"\u003cp\u003eTake the utility function from a bunch of \u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003es and combine them together using a fusion function.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{*}(b,a) = f(U^{*}(b_1, a) \u0026hellip; U^{*}(b_{n}, a)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(f\\) can be \u003ccode\u003esum\u003c/code\u003e or \u003ccode\u003emin\u003c/code\u003e. The overall belief \\(b\\) is simply \\(B_1 \\times \u0026hellip; \\times B_{n}\\), which combines all beliefs together.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhutility_fusion/","tags":null,"title":"utility fusion"},{"categories":null,"contents":"utility theory is a set of theories that deals with rational decision making through maximizing the expected utility.\nutility theory can be leveraged to choose the right actions in the observe-act cycle in a graphical network via decision networks\nadditional information never have a utility function that\u0026rsquo;s infinite If something has infinite utility, doing two of the good things is the same as doing one good thing, which is wrong.\nSay going to a Taylor concert has \\(+\\infty\\) utility. Then, you would be indifferent to the difference between Taylor + Harry vs. Taylor only. However, the former case clearly has higher utility as long as Harry concert doesn\u0026rsquo;t have negative utility.\nutility elicitation see utility elicitation\nexpected utility expected utility is the utility we expect from taking an action \\(a\\) at a state \\(o\\). To compute it based on transition probabilities:\n\\begin{equation} EU(a|o) = \\sum_{s\u0026rsquo;} p(s\u0026rsquo; | a,o) U(s\u0026rsquo;) \\end{equation}\nthe expected utility of taking some action \\(a\\) at an observation \\(o\\) is the probability of any given next state \\(s\u0026rsquo;\\) happening times the utility of being in that state \\(U(s\u0026rsquo;)\\).\nSee also expected utility of wealth.\nmaximum expected utility principle MEU states that a rational agent should choose an action which maximizes expected utility. That is,\n\\begin{equation} a^{*} = \\arg\\max_{a} EU(a|o) \\end{equation}\nNotably, this is not always the best action. This action maximizes utility NOT outcome.\nutility of Rational Preference For rational values, for two situations, \\(A, B\\), we have, with utility function \\(U\\):\n\\begin{equation} U(A) \u0026gt; U(B) \\iff A \\succ B \\end{equation}\n\\begin{equation} U(A) = U(B) \\iff A \\sim B \\end{equation}\nand this \\(U\\) is unique up to the same affine transformation\nrisk aversion see risk aversion\ncommon utility functions see utility function\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhutility_theory/\"\u003eutility theory\u003c/a\u003e is a set of theories that deals with rational decision making through maximizing the \u003cstrong\u003eexpected utility\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhutility_theory/\"\u003eutility theory\u003c/a\u003e can be leveraged to choose the right actions in the \u003ca href=\"/posts/kbhobserve_act_cycle/\"\u003eobserve-act cycle\u003c/a\u003e in a graphical network via \u003ca href=\"/posts/kbhdecision_networks/\"\u003edecision networks\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"never-have-a-utility-function-that-s-infinite\"\u003enever have a utility function that\u0026rsquo;s infinite\u003c/h3\u003e\n\u003cp\u003eIf something has infinite utility, doing two of the good things is the same as doing one good thing, which is wrong.\u003c/p\u003e\n\u003cp\u003eSay going to a Taylor concert has \\(+\\infty\\) utility. Then, you would be indifferent to the difference between Taylor + Harry vs. Taylor only. However, the former case clearly has higher utility as long as Harry concert doesn\u0026rsquo;t have negative utility.\u003c/p\u003e\n\u003ch3 id=\"utility-elicitation--kbhutility-elicitation-dot-md\"\u003e\u003ca href=\"/posts/kbhutility_elicitation/\"\u003eutility elicitation\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhutility_elicitation/\"\u003eutility elicitation\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"expected-utility\"\u003eexpected utility\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#expected-utility\"\u003eexpected utility\u003c/a\u003e is the utility we expect from taking an action \\(a\\) at a state \\(o\\). To compute it based on transition probabilities:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nEU(a|o) = \\sum_{s\u0026rsquo;} p(s\u0026rsquo; | a,o) U(s\u0026rsquo;)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethe expected \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of taking some action \\(a\\) at an observation \\(o\\) is the \u003ca href=\"/posts/kbhprobability/\"\u003eprobability\u003c/a\u003e of any given next state \\(s\u0026rsquo;\\) happening times the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e of being in that state \\(U(s\u0026rsquo;)\\).\u003c/p\u003e\n\u003cp\u003eSee also \u003ca href=\"/posts/kbhexpected_utility_of_wealth/\"\u003eexpected utility of wealth\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"maximum-expected-utility-principle\"\u003emaximum expected utility principle\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"#maximum-expected-utility-principle\"\u003eMEU\u003c/a\u003e states that a rational agent should choose an action which maximizes \u003ca href=\"#expected-utility\"\u003eexpected utility\u003c/a\u003e. That is,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na^{*} = \\arg\\max_{a} EU(a|o)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eNotably, this is \u003cstrong\u003enot always the best action\u003c/strong\u003e. This action maximizes \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e \u003cstrong\u003eNOT\u003c/strong\u003e outcome.\u003c/p\u003e\n\u003ch3 id=\"utility-of-rational-preference--kbhrational-preference-dot-md\"\u003eutility of \u003ca href=\"/posts/kbhrational_preference/\"\u003eRational Preference\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003eFor rational values, for two situations, \\(A, B\\), we have, with \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e function \\(U\\):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(A) \u0026gt; U(B) \\iff A \\succ B\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(A) = U(B) \\iff A \\sim B\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand this \\(U\\) is unique up to the same \u003ca href=\"/posts/kbhaffine_transformation/\"\u003eaffine transformation\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"risk-aversion--kbhexpected-utility-of-wealth-dot-md\"\u003e\u003ca href=\"/posts/kbhexpected_utility_of_wealth/\"\u003erisk aversion\u003c/a\u003e\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhexpected_utility_of_wealth/\"\u003erisk aversion\u003c/a\u003e\u003c/p\u003e\n\u003ch3 id=\"common-utility-functions\"\u003ecommon utility functions\u003c/h3\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility function\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhutility_theory/","tags":null,"title":"utility theory"},{"categories":null,"contents":"We apply the Bellman Expectation Equation and selecting the utility that is calculated by taking the most optimal action given the current utility:\n\\begin{equation} U_{k+1}(s) = \\max_{a} \\qty(R(s,a) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s,a) U_{k}(s\u0026rsquo;)) \\end{equation}\nThis iterative process is called the Bellman backup, or Bellman update.\n\\begin{equation} U_1 \\dots U_{k} \\dots U^{*} \\end{equation}\neventually will converge into the optimal value function. After which, we just extract the greedy policy from the utility to get a policy to use.\nWe stop when the Bellman Residual hits a the desired error threshold:\nBellman Residual Take the L-\\(\\infty\\) norm of \\(U^{k+1}-U^{k}\\) (that is, take \\(||U_{k+1} - U_{k}||_{\\infty}\\). We call that the Bellman Residual. If this Bellman Residual drops below \\(\\delta\\), it is shown that the error between \\(U^{*}\\) (convergence) and \\(U_{k}\\) will only be:\n\\begin{equation} \\epsilon = \\frac{\\delta \\gamma}{(1-\\gamma)} \\end{equation}\nSo as long as the Bellman Residual between your two updates \\(\\leq \\delta\\), you know that you are at most \\(\\epsilon\\) away from the optimal utility.\nYou will note that as future discount \\(\\gamma \\to 1\\), this error bound becomes much larger. Therefore, you have to iterate more to get to the same \\(\\epsilon\\). You need more iterations when \\(\\gamma \\to 1\\).\nNotably, the loss of some arbitrary utility derived from policy evaluation is:\n\\begin{equation} || U^{\\pi} - U^{*} || \u0026lt; \\frac{2\\epsilon \\gamma}{1-\\gamma} \\end{equation}\nasynchronous value iteration We choose an ordering of states. We then loop through the entire list, updating the value function. Then, we loop through this system multiple times until the system converged.\nThat is, instead of creating a list of things \\(U_{k}\\), keeping only the current current one in memory, we come up with some:\n\\begin{equation} U(s) \\leftarrow \\max_{a} \\qty(R(s,a) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s,a) U(s\u0026rsquo;)) \\end{equation}\nThe idea is, instead of keeping all of the \\(U_{k-1}\\) until you have calculated all of \\(U_{k}\\) for each state, we just use an ordering of the states to just use whatever value we calculated last.\ntime complexity \\begin{equation} O(S^{2}A) \\end{equation}\nwhere \\(S\\) is the number of states and \\(A\\) the number of actions.\nloop over all states in each update loop over all actions to figure out the max loop over all next states and calculate their utility POMDP value-iteration compute alpha vectors for all one-step plans (i.e. conditional plans that does just one action and gives up) alpha vector pruning on any plans that are dominated generate all possible two-step conditional plans over all actions using combinations of non-pruned one-step plans above as SUBPLANS (yes, you can use a one-step plan twice) repeat steps 2-3 see also performing value-iteration naively with one-step lookahead in POMDP.\nPOMDP Bellman Update Say you want to extract a policy out of a bunch of alpha vectors.\nLet \\(\\alpha \\in \\Gamma\\), a set of alpha vectors; we obtain a new alpha vector \\(U\u0026rsquo;(b) = [U(s_0) \\dots U(s_{n})]\\) by:\n\\begin{equation} U\u0026rsquo;(b) = \\max_{a}\\qty[R(b,a)+\\gamma \\qty(\\sum_{o}^{}P(o|b,a) U(b))] \\end{equation}\nwhere:\n\\begin{equation} R(b,a) = \\sum_{s}^{} R(s,a)b(s) \\end{equation}\n\\begin{align} P(o|b,a) \u0026amp;= \\sum_{s}^{} p(o|s,a) b(s) \\\\ \u0026amp;= \\sum_{s}^{} \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) O(o|s\u0026rsquo;,a) b(s) \\end{align}\nand\n\\begin{equation} U^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} b \\end{equation}\n","html":"\u003cp\u003eWe apply the \u003ca href=\"/posts/kbhpolicy_evaluation/#bellman-expectation-equation\"\u003eBellman Expectation Equation\u003c/a\u003e and selecting the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e that is calculated by taking the most optimal action given the current \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_{k+1}(s) = \\max_{a} \\qty(R(s,a) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s,a) U_{k}(s\u0026rsquo;))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis iterative process is called the \u003ca href=\"/posts/kbhvalue_iteration/\"\u003eBellman backup\u003c/a\u003e, or \u003ca href=\"/posts/kbhvalue_iteration/\"\u003eBellman update\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU_1 \\dots U_{k} \\dots U^{*}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eeventually will converge into the \u003ca href=\"/posts/kbhpolicy/#optimal-policy\"\u003eoptimal value function\u003c/a\u003e. After which, we just extract the \u003ca href=\"/posts/kbhaction_value_function/#value-function-policy\"\u003egreedy policy\u003c/a\u003e from the \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e to get a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e to use.\u003c/p\u003e\n\u003cp\u003eWe stop when the \u003ca href=\"#bellman-residual\"\u003eBellman Residual\u003c/a\u003e hits a the desired error threshold:\u003c/p\u003e\n\u003ch2 id=\"bellman-residual\"\u003eBellman Residual\u003c/h2\u003e\n\u003cp\u003eTake the \u003ca href=\"/posts/kbhl_infty/\"\u003eL-\\(\\infty\\)\u003c/a\u003e norm of \\(U^{k+1}-U^{k}\\) (that is, take \\(||U_{k+1} - U_{k}||_{\\infty}\\). We call that the \u003ca href=\"#bellman-residual\"\u003eBellman Residual\u003c/a\u003e. If this \u003ca href=\"#bellman-residual\"\u003eBellman Residual\u003c/a\u003e drops below \\(\\delta\\), it is shown that the error between \\(U^{*}\\) (convergence) and \\(U_{k}\\) will only be:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\epsilon = \\frac{\\delta \\gamma}{(1-\\gamma)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eSo as long as the \u003ca href=\"#bellman-residual\"\u003eBellman Residual\u003c/a\u003e between your two updates \\(\\leq \\delta\\), you know that you are at most \\(\\epsilon\\) away from the \u003ca href=\"/posts/kbhpolicy/#optimal-policy\"\u003eoptimal utility\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003eYou will note that as future discount \\(\\gamma \\to 1\\), this error bound becomes much larger. Therefore, you have to iterate more to get to the same \\(\\epsilon\\).\u003c/strong\u003e You need more iterations when \\(\\gamma \\to 1\\).\u003c/p\u003e\n\u003cp\u003eNotably, the loss of some arbitrary \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e derived from \u003ca href=\"/posts/kbhpolicy_evaluation/\"\u003epolicy evaluation\u003c/a\u003e is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n|| U^{\\pi} - U^{*} || \u0026lt; \\frac{2\\epsilon \\gamma}{1-\\gamma}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"asynchronous-value-iteration\"\u003easynchronous value iteration\u003c/h2\u003e\n\u003cp\u003eWe choose an ordering of states. We then loop through the entire list, updating the value function. Then, we loop through this system multiple times until the system converged.\u003c/p\u003e\n\u003cp\u003eThat is, instead of creating a list of things \\(U_{k}\\), keeping only the current current one in memory, we come up with some:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(s) \\leftarrow \\max_{a} \\qty(R(s,a) + \\gamma \\sum_{s\u0026rsquo;} T(s\u0026rsquo; | s,a) U(s\u0026rsquo;))\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThe idea is, instead of keeping all of the \\(U_{k-1}\\) until you have calculated all of \\(U_{k}\\) for each state, we just use an ordering of the states to just use whatever value we calculated last.\u003c/p\u003e\n\u003ch2 id=\"time-complexity\"\u003etime complexity\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nO(S^{2}A)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(S\\) is the number of states and \\(A\\) the number of actions.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eloop over all states in each update\u003c/li\u003e\n\u003cli\u003eloop over all actions to figure out the max\u003c/li\u003e\n\u003cli\u003eloop over all next states and calculate their \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"pomdp--kbhpartially-observable-markov-decision-process-dot-md--value-iteration\"\u003e\u003ca href=\"/posts/kbhpartially_observable_markov_decision_process/\"\u003ePOMDP\u003c/a\u003e value-iteration\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003ecompute \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es for all one-step plans (i.e. \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003es that does just one action and gives up)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhalpha_vector/#id-a11af4cf-7e36-4b3f-876f-e6a26cf6817e-alpha-vector-pruning\"\u003ealpha vector pruning\u003c/a\u003e on any plans that are dominated\u003c/li\u003e\n\u003cli\u003egenerate all possible two-step \u003ca href=\"/posts/kbhconditional_plan/\"\u003econditional plan\u003c/a\u003es over all actions using combinations of non-pruned one-step plans above as \u003cstrong\u003e\u003cstrong\u003eSUBPLANS\u003c/strong\u003e\u003c/strong\u003e (yes, you can use a one-step plan twice)\u003c/li\u003e\n\u003cli\u003erepeat steps 2-3\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003esee also performing value-iteration naively with \u003ca href=\"/posts/kbhalpha_vector/#one-step-lookahead-in-pomdp\"\u003eone-step lookahead in POMDP\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"pomdp-bellman-update\"\u003ePOMDP Bellman Update\u003c/h2\u003e\n\u003cp\u003eSay you want to extract a \u003ca href=\"/posts/kbhpolicy/\"\u003epolicy\u003c/a\u003e out of a bunch of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es.\u003c/p\u003e\n\u003cp\u003eLet \\(\\alpha \\in \\Gamma\\), a set of \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003es; we obtain a new \u003ca href=\"/posts/kbhalpha_vector/\"\u003ealpha vector\u003c/a\u003e \\(U\u0026rsquo;(b) = [U(s_0) \\dots U(s_{n})]\\) by:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU\u0026rsquo;(b) = \\max_{a}\\qty[R(b,a)+\\gamma \\qty(\\sum_{o}^{}P(o|b,a) U(b))]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nR(b,a) = \\sum_{s}^{} R(s,a)b(s)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nP(o|b,a) \u0026amp;= \\sum_{s}^{} p(o|s,a) b(s) \\\\\n\u0026amp;= \\sum_{s}^{} \\sum_{s\u0026rsquo;}^{} T(s\u0026rsquo;|s,a) O(o|s\u0026rsquo;,a) b(s)\n\\end{align}\u003c/p\u003e\n\u003cp\u003eand\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU^{\\Gamma}(b) = \\max_{\\alpha \\in \\Gamma} \\alpha^{\\top} b\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvalue_iteration/","tags":null,"title":"value iteration"},{"categories":null,"contents":"Say we have a system:\nStates: 4\u0026mdash;school, internship, job, jungle Actions: 2\u0026mdash;stay, graduate create transition model Create tables of size \\(S \\times S\\) (that is, 4x4), one for each action. These are our transition models. Rows are the states where we took the action, columns are the states which are the results of the action, and the values are the probability of that transition happening given you took the action.\nEach row should sum up to \\(1\\): after an action, you should always end up at some state.\nenumerate rewards and discount for us, we are going to say that:\n\\(R(s_1)= -1\\) \\(R(s_2)= +1\\) \\(R(s_3) = +5\\) the rest of this should work if your states are parameterized by action.\nWe are going to discount by \\(0.9\\)\niterate! for each state\u0026hellip; calculate the values within the sum of the Bellman update for each action as well as the instantaneous reward for being in that state get the maximum value of that store for the next iteration ","html":"\u003cp\u003eSay we have a system:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-20_12-11-30_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003col\u003e\n\u003cli\u003eStates: 4\u0026mdash;school, internship, job, jungle\u003c/li\u003e\n\u003cli\u003eActions: 2\u0026mdash;stay, graduate\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"create-transition-model\"\u003ecreate transition model\u003c/h2\u003e\n\u003cp\u003eCreate tables of size \\(S \\times S\\) (that is, 4x4), one for each action. These are our transition models. Rows are the states where we took the action, columns are the states which are the results of the action, and the values are the probability of that transition happening given you took the action.\u003c/p\u003e\n\u003cp\u003eEach row should sum up to \\(1\\): after an action, you should always end up at \u003cem\u003esome\u003c/em\u003e state.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-20_12-18-18_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003ch2 id=\"enumerate-rewards-and-discount\"\u003eenumerate rewards and discount\u003c/h2\u003e\n\u003cp\u003efor us, we are going to say that:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(R(s_1)= -1\\)\u003c/li\u003e\n\u003cli\u003e\\(R(s_2)= +1\\)\u003c/li\u003e\n\u003cli\u003e\\(R(s_3) = +5\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ethe rest of this should work if your states are parameterized by action.\u003c/p\u003e\n\u003cp\u003eWe are going to discount by \\(0.9\\)\u003c/p\u003e\n\u003ch2 id=\"iterate\"\u003eiterate!\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003efor each state\u0026hellip;\n\u003col\u003e\n\u003cli\u003ecalculate the values within the sum of the \u003ca href=\"/posts/kbhvalue_iteration/\"\u003eBellman update\u003c/a\u003e for each action as well as the instantaneous reward for being in that state\u003c/li\u003e\n\u003cli\u003eget the maximum value of that\u003c/li\u003e\n\u003cli\u003estore for the next iteration\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ol\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvalue_iteration_in_practice/","tags":null,"title":"value iteration, in practice"},{"categories":null,"contents":"VOI is a measure of how much observing something changes your action if you are a rational agent.\nThe value of information a measure for how much observing an additional variable is expected to increase our utility. VOI can never be negative, and does not take into account the COST of performing the observation.\nconstituents \\(o\\): an observation \\(O\u0026rsquo;\\): a possible observation to run which yield \\(o\u0026rsquo;_{j}\\) different outcomes requirements \\begin{equation} VOI(O\u0026rsquo;|o) = (\\sum_{o\u0026rsquo;} P(o\u0026rsquo;|o) EU^{*}(o, o\u0026rsquo;)) - EU^{*}(o) \\end{equation}\nwhere, \\(EU^{*}(o_{1} \\dots o_{n})\\) is the maximum expected utility given observations \\(o_1, \u0026hellip;, o_{n}\\), that is:\n\\begin{equation} EU^{*}(o_1, \\dots, o_{n}) = \\max_{a} EU(o_1, \\dots, o_{n}) \\end{equation}\n\u0026ldquo;the value of an observation is the sum of the MEU of each possible outcome from that new observation, time their probability of occurance, subtracted by the MEU of the current observation\u0026rdquo;\nadditional information process of observation selection Here\u0026rsquo;s how you would select what variables to observe.\nmake observation determine value of information of anything you haven\u0026rsquo;t observed yet select the next feature to observe repeat 1-3 wait until its no longer beneficial to observe any more variables make decision based on observations This is not the true optimum: its only a heuristic!\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhvalue_of_information/\"\u003eVOI\u003c/a\u003e is a measure of how much observing something changes your action if you are a rational agent.\u003c/p\u003e\n\u003cp\u003eThe \u003ca href=\"/posts/kbhvalue_of_information/\"\u003evalue of information\u003c/a\u003e a measure for how much observing an additional variable is expected to \u003cstrong\u003eincrease\u003c/strong\u003e our \u003ca href=\"/posts/kbhutility_theory/\"\u003eutility\u003c/a\u003e. \u003ca href=\"/posts/kbhvalue_of_information/\"\u003eVOI\u003c/a\u003e can never be negative, and does not take into account the \u003cstrong\u003eCOST\u003c/strong\u003e of performing the observation.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\\(o\\): an observation\u003c/li\u003e\n\u003cli\u003e\\(O\u0026rsquo;\\): a possible observation to run which yield \\(o\u0026rsquo;_{j}\\) different outcomes\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nVOI(O\u0026rsquo;|o) = (\\sum_{o\u0026rsquo;} P(o\u0026rsquo;|o) EU^{*}(o, o\u0026rsquo;)) - EU^{*}(o)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere, \\(EU^{*}(o_{1} \\dots o_{n})\\) is the \u003ca href=\"/posts/kbhutility_theory/#maximum-expected-utility-principle\"\u003emaximum expected utility\u003c/a\u003e given observations \\(o_1, \u0026hellip;, o_{n}\\), that is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nEU^{*}(o_1, \\dots, o_{n}) = \\max_{a} EU(o_1, \\dots, o_{n})\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the value of an observation is the sum of the \u003ca href=\"/posts/kbhutility_theory/#maximum-expected-utility-principle\"\u003eMEU\u003c/a\u003e of each possible outcome from that new observation, time their probability of occurance, subtracted by the \u003ca href=\"/posts/kbhutility_theory/#maximum-expected-utility-principle\"\u003eMEU\u003c/a\u003e of the current observation\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003ch3 id=\"process-of-observation-selection\"\u003eprocess of observation selection\u003c/h3\u003e\n\u003cp\u003eHere\u0026rsquo;s how you would select what variables to observe.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003emake observation\u003c/li\u003e\n\u003cli\u003edetermine \u003ca href=\"/posts/kbhvalue_of_information/\"\u003evalue of information\u003c/a\u003e of anything you haven\u0026rsquo;t observed yet\u003c/li\u003e\n\u003cli\u003eselect the next feature to observe\u003c/li\u003e\n\u003cli\u003erepeat 1-3\u003c/li\u003e\n\u003cli\u003ewait until its no longer beneficial to observe any more variables\u003c/li\u003e\n\u003cli\u003emake decision based on observations\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis is not the true optimum: its only a heuristic!\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvalue_of_information/","tags":null,"title":"value of information"},{"categories":null,"contents":"variance (also known as second central moment) is a way of measuring spread:\n\\begin{align} Var(X) \u0026amp;= E[(X-E(X))^{2}] \\\\ \u0026amp;= E[X^{2}] - (E[X])^{2} \\\\ \u0026amp;= \\qty(\\sum_{x}^{} x^{2} p\\qty(X=x)) - (E[X])^{2} \\end{align}\n\u0026ldquo;on average, how far is the probability of \\(X\\) from its expectation\u0026rdquo;\nThe expression(s) are derived below. Recall that standard deviation is a square root of the variance.\ncomputing variance: \\begin{align} Var(X) \u0026amp;= E[(X - \\mu)^{2}] \\\\ \u0026amp;= \\sum_{x}^{} (x-\\mu)^{2} p(X) \\end{align}\nbased on the law of the Unconscious statistician. And then, we do algebra:\nSo, for any random variable \\(X\\), we say:\n\\begin{align} Var(X) \u0026amp;= E[X^{2}] - (E[X])^{2} \\\\ \u0026amp;= \\qty(\\sum_{x}^{} x^{2} p(X=x)) - (E[X])^{2} \\end{align}\nbased on the law of Unconscious statistician.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhvariance/\"\u003evariance\u003c/a\u003e (also known as \u003ca href=\"/posts/kbhvariance/\"\u003esecond central moment\u003c/a\u003e) is a way of measuring spread:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nVar(X) \u0026amp;= E[(X-E(X))^{2}] \\\\\n\u0026amp;= E[X^{2}] - (E[X])^{2} \\\\\n\u0026amp;= \\qty(\\sum_{x}^{} x^{2} p\\qty(X=x)) - (E[X])^{2}\n\\end{align}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;on average, how far is the probability of \\(X\\) from its \u003ca href=\"/posts/kbhexpectation/\"\u003eexpectation\u003c/a\u003e\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eThe expression(s) are derived below. Recall that \u003ca href=\"\"\u003estandard deviation\u003c/a\u003e is a square root of the \u003ca href=\"/posts/kbhvariance/\"\u003evariance\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"computing-variance\"\u003ecomputing variance:\u003c/h2\u003e\n\u003cp\u003e\\begin{align}\nVar(X) \u0026amp;= E[(X - \\mu)^{2}] \\\\\n\u0026amp;= \\sum_{x}^{} (x-\\mu)^{2} p(X)\n\\end{align}\u003c/p\u003e\n\u003cp\u003ebased on the law of the \u003ca href=\"/posts/kbhexpectation/#unconscious-statistician\"\u003eUnconscious statistician\u003c/a\u003e. And then, we do algebra:\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2023-10-13_15-43-21_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSo, for any random variable \\(X\\), we say:\u003c/p\u003e\n\u003cp\u003e\\begin{align}\nVar(X) \u0026amp;= E[X^{2}] - (E[X])^{2} \\\\\n\u0026amp;= \\qty(\\sum_{x}^{} x^{2} p(X=x)) - (E[X])^{2}\n\\end{align}\u003c/p\u003e\n\u003cp\u003ebased on the law of \u003ca href=\"/posts/kbhexpectation/#unconscious-statistician\"\u003eUnconscious statistician\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvariance/","tags":null,"title":"variance"},{"categories":null,"contents":" Secrets of Silicon Valley - Horowitz Looking for people who have feel for the problem: people need to believe in the problem Team: can people come with execution? people that are good at startups which are usually not good at later stage stuff Buy a startup and kick out the founders This is very typical Team and idea are easy to decouple Vetting problems Lack of market Technically insatiability \u0026ldquo;Unbelievable stupidity\u0026rdquo;: calcium is so cheap Idea goes through many morphs; getting the credit back People wiling to have a meeting? Decoupling value proposition =\u0026gt; iStudio as a service\nRandom Need: Nueva Alumni Network Maybe set up a Nueva alumni network? What could we do to facilitate the Nueva alumni network; extraction of mutual value from the next work.\nNueva alumni as a service.\nInnovation consultants Ideas are no longer valuable, which ideas to peruse is better. \u0026ldquo;helping people along in their relationship with the idea or with each other.\u0026rdquo; Decoupling solution with the customer with the most value.\n","html":"\u003col\u003e\n\u003cli\u003eSecrets of Silicon Valley - Horowitz\n\u003col\u003e\n\u003cli\u003eLooking for people who have \u003cem\u003efeel\u003c/em\u003e for the problem: people need to believe in the problem\u003c/li\u003e\n\u003cli\u003eTeam: can people come with execution? people that are good at startups which are usually not good at later stage stuff\n\u003col\u003e\n\u003cli\u003eBuy a startup and kick out the founders\u003c/li\u003e\n\u003cli\u003eThis is very typical\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003eTeam and idea are easy to decouple\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003eVetting problems\n\u003col\u003e\n\u003cli\u003eLack of market\u003c/li\u003e\n\u003cli\u003eTechnically insatiability\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;Unbelievable stupidity\u0026rdquo;: calcium is so cheap\u003c/li\u003e\n\u003cli\u003eIdea goes through many morphs; getting the credit back\u003c/li\u003e\n\u003c/ol\u003e\n\u003c/li\u003e\n\u003cli\u003ePeople wiling to have a meeting?\u003c/li\u003e\n\u003cli\u003eDecoupling value proposition\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003e=\u0026gt; iStudio as a service\u003c/p\u003e\n\u003ch2 id=\"random-need-nueva-alumni-network\"\u003eRandom Need: Nueva Alumni Network\u003c/h2\u003e\n\u003cp\u003eMaybe set up a Nueva alumni network? What could we do to facilitate the Nueva alumni network; extraction of mutual value from the next work.\u003c/p\u003e\n\u003cp\u003eNueva alumni as a service.\u003c/p\u003e\n\u003ch2 id=\"innovation-consultants\"\u003eInnovation consultants\u003c/h2\u003e\n\u003cp\u003eIdeas are no longer valuable, which ideas to peruse is better. \u0026ldquo;helping people along in their relationship with the idea or with each other.\u0026rdquo; Decoupling solution with the customer with the most value.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvc_thing/","tags":null,"title":"vc thing"},{"categories":null,"contents":"A vector is an element of a vector space. They are also called a point.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e is an element of a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e. They are also called a \u003ca href=\"/posts/kbhvector/\"\u003epoint\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvector/","tags":null,"title":"vector"},{"categories":null,"contents":"vector semantics is a sense encoding method.\n\u0026ldquo;a meaning of the word should be tied to how they are used\u0026rdquo;\nwe measure similarity between word vectors with cosine similarity. see also vector-space model.\nmotivation idea 1 neighboring words can help infer semantic meaning of new words: \u0026ldquo;we can define a word based on its distribution in language use\u0026rdquo;\nidea 2 meaning should be in a point in space, just like affective meaning (i.e. a score in each dimension).\nthat is: a word should be a vector in n space\nvector semantics Each word is a point based on distribution; each word is a vector and similar words are nearby in semantic space.\nThe intuition is that classifiers can generalize to similar, but unseen words more easily by processing embeddings.\ntransposing a Term-Document Matrix Typically we read a Term-Document Matrix column-wise, to understand what each document can be encoded in terms of words.\nHowever, if you read it row-wise, you can see a distribution for words over the documents.\nterm-term matrix a term-term matrix is a \\(|V| \\times |V|\\) matrix that measures co-occurrence in some context. So each cell would be the number of times the two words co-occur in some small window.\npoint-wise mutual information we usually normalize a Term-Document Matrix via TF-IDF. However, for term-term matrix, we usually normalize it as:\n\\begin{equation} PMI(w_1, w_2) = \\log \\frac{p(w_1,w_2)}{p(w_1)p(w_2)} \\end{equation}\n\u0026ldquo;would something appear more often then change\u0026rdquo;\nword2vec see word2vec\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhvector_semantics/\"\u003evector semantics\u003c/a\u003e is a \u003ca href=\"/posts/kbhsense/\"\u003esense\u003c/a\u003e encoding method.\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;a meaning of the word should be tied to how they are used\u0026rdquo;\u003c/p\u003e\n\u003cp\u003ewe measure similarity between word \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es with \u003ca href=\"/posts/kbhranked_information_retrieval/#cosine-similarity\"\u003ecosine similarity\u003c/a\u003e. see also \u003ca href=\"/posts/kbhranked_information_retrieval/#vector-space-model\"\u003evector-space model\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"motivation\"\u003emotivation\u003c/h2\u003e\n\u003ch3 id=\"idea-1\"\u003eidea 1\u003c/h3\u003e\n\u003cp\u003eneighboring words can help infer semantic meaning of new words: \u0026ldquo;we can define a word based on its distribution in language use\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"idea-2\"\u003eidea 2\u003c/h3\u003e\n\u003cp\u003emeaning should be in a point in space, just like \u003ca href=\"/posts/kbhsense/#affective-meaning\"\u003eaffective meaning\u003c/a\u003e (i.e. a score in each dimension).\u003c/p\u003e\n\u003cp\u003ethat is: a word should be a \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e in n space\u003c/p\u003e\n\u003ch2 id=\"vector-semantics--kbhvector-semantics-dot-md\"\u003e\u003ca href=\"/posts/kbhvector_semantics/\"\u003evector semantics\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eEach word is a point based on distribution; each word is a \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003e and similar words are nearby in semantic space.\u003c/p\u003e\n\u003cp\u003eThe intuition is that classifiers can generalize to similar, but unseen words more easily by processing embeddings.\u003c/p\u003e\n\u003ch2 id=\"transposing-a-term-document-matrix--kbhterm-document-matrix-dot-md\"\u003etransposing a \u003ca href=\"/posts/kbhterm_document_matrix/\"\u003eTerm-Document Matrix\u003c/a\u003e\u003c/h2\u003e\n\u003cp\u003eTypically we read a \u003ca href=\"/posts/kbhterm_document_matrix/\"\u003eTerm-Document Matrix\u003c/a\u003e column-wise, to understand what each document can be encoded in terms of words.\u003c/p\u003e\n\u003cp\u003eHowever, if you read it row-wise, you can see a distribution for words over the documents.\u003c/p\u003e\n\u003ch2 id=\"term-term-matrix\"\u003eterm-term matrix\u003c/h2\u003e\n\u003cp\u003ea \u003ca href=\"#term-term-matrix\"\u003eterm-term matrix\u003c/a\u003e is a \\(|V| \\times |V|\\) matrix that measures co-occurrence in some context. So each cell would be the number of times the two words co-occur in some small window.\u003c/p\u003e\n\u003ch3 id=\"point-wise-mutual-information\"\u003epoint-wise mutual information\u003c/h3\u003e\n\u003cp\u003ewe usually normalize a \u003ca href=\"/posts/kbhterm_document_matrix/\"\u003eTerm-Document Matrix\u003c/a\u003e via \u003ca href=\"/posts/kbhranked_information_retrieval/#tf-idf\"\u003eTF-IDF\u003c/a\u003e. However, for \u003ca href=\"#term-term-matrix\"\u003eterm-term matrix\u003c/a\u003e, we usually normalize it as:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nPMI(w_1, w_2) = \\log \\frac{p(w_1,w_2)}{p(w_1)p(w_2)}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;would something appear more often then change\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"word2vec\"\u003eword2vec\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"/posts/kbhword2vec/\"\u003eword2vec\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvector_semantics/","tags":null,"title":"vector semantics"},{"categories":null,"contents":"A vector space is an object between a field and a group; it has two ops\u0026mdash;addition and scalar multiplication. Its not quite a field and its more than a group.\nconstituents A set \\(V\\) An addition on \\(V\\) An scalar multiplication on \\(V\\) such that\u0026hellip;\nrequirements commutativity in add.: \\(u+v=v+u\\) associativity in add. and mult.: \\((u+v)+w=u+(v+w)\\); \\((ab)v=a(bv)\\): \\(\\forall u,v,w \\in V\\) and \\(a,b \\in \\mathbb{F}\\) distributivity: goes both ways \\(a(u+v) = au+av\\) AND!! \\((a+b)v=av+bv\\): \\(\\forall a,b \\in \\mathbb{F}\\) and \\(u,v \\in V\\) additive identity: \\(\\exists 0 \\in V: v+0=v \\forall v \\in V\\) additive inverse: \\(\\forall v \\in V, \\exists w \\in V: v+w=0\\) multiplicative identity: \\(1v=v \\forall v \\in V\\) additional information Elements of a vector space are called vectors or points. vector space \u0026ldquo;over\u0026rdquo; fields Scalar multiplication is not in the set \\(V\\); instead, \u0026ldquo;scalars\u0026rdquo; \\(\\lambda\\) come from this magic faraway land called \\(\\mathbb{F}\\). The choice of \\(\\mathbb{F}\\) for each vector space makes it different; so, when precision is needed, we can say that a vector space is \u0026ldquo;over\u0026rdquo; some \\(\\mathbb{F}\\) which contributes its scalars.\nTherefore:\nA vector space over \\(\\mathbb{R}\\) is called a real vector space A vector space over \\(\\mathbb{C}\\) is called a real vector space ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e is an object between a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e and a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e; it has two ops\u0026mdash;addition and scalar multiplication. Its not quite a \u003ca href=\"/posts/kbhfield/\"\u003efield\u003c/a\u003e and its more than a \u003ca href=\"/posts/kbhgroup/\"\u003egroup\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"constituents\"\u003econstituents\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eA set \\(V\\)\u003c/li\u003e\n\u003cli\u003eAn \u003ca href=\"/posts/kbhadding/\"\u003eaddition\u003c/a\u003e on \\(V\\)\u003c/li\u003e\n\u003cli\u003eAn \u003ca href=\"/posts/kbhscalar_multiplication/\"\u003escalar multiplication\u003c/a\u003e on \\(V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003esuch that\u0026hellip;\u003c/p\u003e\n\u003ch2 id=\"requirements\"\u003erequirements\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhcommutivity/\"\u003ecommutativity\u003c/a\u003e in add.: \\(u+v=v+u\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhassociative/\"\u003eassociativity\u003c/a\u003e in add. and mult.: \\((u+v)+w=u+(v+w)\\); \\((ab)v=a(bv)\\): \\(\\forall u,v,w \\in V\\) and \\(a,b \\in \\mathbb{F}\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhdistributivity/\"\u003edistributivity\u003c/a\u003e: goes both ways \\(a(u+v) = au+av\\) AND!! \\((a+b)v=av+bv\\): \\(\\forall a,b \\in \\mathbb{F}\\) and \\(u,v \\in V\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhadditive_identity/\"\u003eadditive identity\u003c/a\u003e: \\(\\exists 0 \\in V: v+0=v \\forall v \\in V\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhinverses/\"\u003eadditive inverse\u003c/a\u003e: \\(\\forall v \\in V, \\exists w \\in V: v+w=0\\)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhmultiplicative_identity/\"\u003emultiplicative identity\u003c/a\u003e: \\(1v=v \\forall v \\in V\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"additional-information\"\u003eadditional information\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eElements of a \u003ca href=\"/posts/kbhvector_space/\"\u003evector space\u003c/a\u003e are called \u003ca href=\"/posts/kbhvector/\"\u003evector\u003c/a\u003es or \u003ca href=\"/posts/kbhvector/\"\u003epoint\u003c/a\u003es.\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"vector-space-over-fields\"\u003evector space \u0026ldquo;over\u0026rdquo; fields\u003c/h3\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhscalar_multiplication/\"\u003eScalar multiplication\u003c/a\u003e is not in the set \\(V\\); instead, \u0026ldquo;scalars\u0026rdquo; \\(\\lambda\\) come from this magic faraway land called \\(\\mathbb{F}\\). The choice of \\(\\mathbb{F}\\) for each vector space makes it different; so, when precision is needed, we can say that a vector space is \u0026ldquo;over\u0026rdquo; some \\(\\mathbb{F}\\) which contributes its scalars.\u003c/p\u003e\n\u003cp\u003eTherefore:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eA vector space over \\(\\mathbb{R}\\) is called a \u003cem\u003ereal vector space\u003c/em\u003e\u003c/li\u003e\n\u003cli\u003eA vector space over \\(\\mathbb{C}\\) is called a \u003cem\u003ereal vector space\u003c/em\u003e\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvector_space/","tags":null,"title":"vector space"},{"categories":null,"contents":"this is worse ","html":"\u003ch2 id=\"this-is-worse\"\u003ethis is worse\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhcraintech/","tags":null,"title":"VFUA"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhvgg/","tags":null,"title":"VGG"},{"categories":null,"contents":"VGGish is VGG, ish. VGGish is a network based on VGG which is pretrained on the audio-feature-extraction task.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhvggish/\"\u003eVGGish\u003c/a\u003e is \u003ca href=\"/posts/kbhvgg/\"\u003eVGG\u003c/a\u003e, ish. \u003ca href=\"/posts/kbhvggish/\"\u003eVGGish\u003c/a\u003e is a network based on \u003ca href=\"/posts/kbhvgg/\"\u003eVGG\u003c/a\u003e which is pretrained on the audio-feature-extraction task.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-23_23-29-00_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhvggish/","tags":null,"title":"VGGish"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhvietnam/","tags":null,"title":"Vietnam"},{"categories":null,"contents":"vietnamization is a political position held by Richard Nixon which is characterized by the slow replacement of American troops with Vietnamese ones.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhvietnamization/\"\u003evietnamization\u003c/a\u003e is a political position held by \u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e which is characterized by the slow replacement of American troops with Vietnamese ones.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvietnamization/","tags":null,"title":"vietnamization"},{"categories":null,"contents":"We are trying to share a resource: memory; memory allows multiple processes to use a share pool of memory.\nkey goals multitasking: multiple processes should be able to use memory transparency: no process need to know that memory is shared; each process should be able to run regardless of the number/locations of processes isolation: processes shouldn\u0026rsquo;t be able to corrupt other processes\u0026rsquo; memory efficiency: shouldn\u0026rsquo;t be degraded by sharing virtual memory The operating system will translate virtual addresses (which are 0 based for every program, which isn\u0026rsquo;t a problem) to physical addresses in memory.\nthe OS doesn\u0026rsquo;t need to map all virtual addresses unless its needed (i.e. if the program is asking for it) worst case: we can kick out unused memory into disk, and load it back when needed This is an example of virtualization.\nOS memory Whenever a process makes a syscall, OS will be handed virtual memory addresses. How do we resolve it?\nSolution: *every process reserves some virtual memory for the OS\u0026mdash;all of these virtual addresses maps to the SAME PHYSICAL REGION for the OS.\nThen, the page map will have a kernel bit which marks this virtual region no read and no write.\ndynamic address translation The system will die if we try to do virtual mapping to physical mapping.\nSo we have a Memory Management Unit (MMU) to do:\nHow does an MMU work?\nbase and bound This is basically load-time relocation, but with virtual memory.\nassign a location in physical memory, call the base; during translation, we just add every virtual address by the base we can cap the virtual address space for each process by a bound, we can raise a bus error/segfault if it goes above the highest allowable The bound is a virtual address (the first invalid address in the virtual world), whereas the base is a physical address. This is both stored in the process control block.\nlast possible address: is (bound - 1)+base\ntranslation compare virtual address to bound, trap and raise if \u0026gt;= bound then, return virtual address + base importantly, we can arbitrary adjust base and bound.\ntradeoff good news\ninexpensive: just doing addition doesn\u0026rsquo;t require additional space: (just two addresses) separation: virtualization. bad news\none contiguous region: need to allocate free spcae fragmentation: because of the above growing can only happens upwards with bounds (and its kind of useless)\u0026mdash;we can\u0026rsquo;t move the stack up in virtual space, and we can\u0026rsquo;t give more space downwards, because that would cause negative addresses no read only memory (we\u0026rsquo;ll want to limit access to code segment, for instance) multiple segments Let\u0026rsquo;s break up multiple virtual address space into segments, and map each of those segments separately. EACH SEGMENT will have its own base and bound. So, you will store each struct in a map: [segment number: [segment base, segment bound, read only or not]].\ntranslation look up what segment a virtual address is in (we can do this by making the top couple bits of the virtual address the segment number, and the next bits as the offset into the segment) get that segment\u0026rsquo;s info compare that address\u0026rsquo; offset to that segment\u0026rsquo;s bound, if its \u0026gt;= limit, trap otherwise, to go the base of that segment and fetch data tradeoff features\nyou can recycle segments: if you have two instances of a program running, we can actually share read-only segments (such as code). you can not map the middle: because stack and data segments are independent, we can not map the hole in the middle until more data is asked you can grow things: if you run out of continuous space, you can grow the segment by either just doing it or by moving it and growing it (and indeed we now can move the stack down as the stack is addressed as the highest address) drawbacks\ngrowing can only happens upwards with bounds\u0026mdash;now that we can move the heap independently, growing the heap makes sense now; however, growing the STACK still is impossible because growing the stack would entail moving the base address in order to go downwards variable length segments\u0026mdash;extrernal fragmentation! small number of segments\u0026mdash;the [segment, offset] deign divides virtual addresses, so you have to decide segment number exogenously paging So let\u0026rsquo;s instead allocate memory in pages. Instead of variable-length segments that can GROW in base and bound and multiple segments, let\u0026rsquo;s force a specific size of memory in each chunk.\nvirtual address: virtual page number + offset physical address: physical page number + offset we map each page independently, and keep the offset. If a page is unused, internal fragmentation but not too bad. The stack can now grow downwards: because if it reaches into lower page numbers we can just map that page somewhere too.\nTo store page mappings, in a seperate storage location, we store a page map/page table: its an array of tuples, where the index is the virtual page number, and each entry has [(physical page, writable)].\nNotice that page continuity isn\u0026rsquo;t a problem: the upper digits just count up, and the lower digits tells you offset in that chunk:\n0x0000 - 0x0fff 0x1000 - 0x1fff 0x2000 - 0x2fff where, the first digit tells you the page number\n0x0 - 0x0 0x1 - 0x1 0x2 - 0x2 and the rest is the offset.\nAnd everything is contiunous, and automatically paged.\nFor instance, typically page sizes are 4kb\nPage Size Offset Number Digits 4096 bytes (16^3) 3 then the rest of the address would just be the page number.\nIntel\u0026rsquo;s implementation Virtual Addresses\nUnused (16 bits) Virtual page number (36 bits) Offset (12 bits) Physical Addresses\nPage number (40 bits) Offset (12 bits) translation chop off page number and offset translate the page number concat the two together internal fragmentation why not something simpler? single-tasking memory very bad idea:\nASSUME that there is only one process. Stack grows down, data grows up, and code sits at the bottom.\ntradeoff no isolation: even in this case, nothing is stopping the program from accessing memory in the OS reserve segment; which is bad. no multitasking: because, well, we have one program fragmentation: little bits of space all over the place load-time relocation separate processes.\nWhen program is compiled, it assumes that its initial address is 0x0; so, at load time, we have to go into the code segment when the program is set up and increment all of its memory addresses up.\ntradeoff no isolation: nothing is stopping the program from accessing memory in otherbody\u0026rsquo;s segments must decide the memory usage of a program ahead of time + cannot grow if needs more memory (we can\u0026rsquo;t move because the addresses would be in stack) external fragmentation (normal alloc problems) ","html":"\u003cp\u003eWe are trying to share a resource: memory; memory allows multiple processes to use a share pool of memory.\u003c/p\u003e\n\u003ch2 id=\"key-goals\"\u003ekey goals\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003emultitasking\u003c/strong\u003e: multiple processes should be able to use memory\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003etransparency\u003c/strong\u003e: no process need to know that memory is shared; each process should be able to run regardless of the number/locations of processes\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eisolation\u003c/strong\u003e: processes shouldn\u0026rsquo;t be able to corrupt other processes\u0026rsquo; memory\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eefficiency\u003c/strong\u003e: shouldn\u0026rsquo;t be degraded by sharing\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"virtual-memory\"\u003evirtual memory\u003c/h2\u003e\n\u003cp\u003eThe operating system will translate \u003cstrong\u003evirtual\u003c/strong\u003e addresses (which are 0 based for every program, which isn\u0026rsquo;t a problem) to \u003cstrong\u003ephysical\u003c/strong\u003e addresses in memory.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ethe OS doesn\u0026rsquo;t need to map all virtual addresses unless its needed (i.e. if the program is asking for it)\u003c/li\u003e\n\u003cli\u003eworst case: we can kick out unused memory into disk, and load it back when needed\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThis is an example of \u003ca href=\"/posts/kbhvirtual_memory/\"\u003evirtualization\u003c/a\u003e.\u003c/p\u003e\n\u003ch2 id=\"os-memory\"\u003eOS memory\u003c/h2\u003e\n\u003cp\u003eWhenever a process makes a \u003cstrong\u003esyscall\u003c/strong\u003e, OS will be handed virtual memory addresses. How do we resolve it?\u003c/p\u003e\n\u003cp\u003eSolution: \u003cstrong\u003e*every process reserves some \u003ca href=\"/posts/kbhvirtual_memory/\"\u003evirtual memory\u003c/a\u003e for the OS\u003c/strong\u003e\u0026mdash;all of these virtual addresses maps to the \u003cstrong\u003eSAME PHYSICAL REGION\u003c/strong\u003e for the OS.\u003c/p\u003e\n\u003cp\u003eThen, the \u003ca href=\"/posts/kbhdemand_paging/#page-map\"\u003epage map\u003c/a\u003e will have a \u003cstrong\u003ekernel bit\u003c/strong\u003e which marks this virtual region no read and no write.\u003c/p\u003e\n\u003ch2 id=\"dynamic-address-translation\"\u003edynamic address translation\u003c/h2\u003e\n\u003cp\u003eThe system will die if we try to do virtual mapping to physical mapping.\u003c/p\u003e\n\u003cp\u003eSo we have a \u003ca href=\"#dynamic-address-translation\"\u003eMemory Management Unit\u003c/a\u003e (\u003ca href=\"#dynamic-address-translation\"\u003eMMU\u003c/a\u003e) to do:\u003c/p\u003e\n\u003cp\u003eHow does an \u003ca href=\"#dynamic-address-translation\"\u003eMMU\u003c/a\u003e work?\u003c/p\u003e\n\u003ch3 id=\"base-and-bound\"\u003ebase and bound\u003c/h3\u003e\n\u003cp\u003eThis is basically \u003ca href=\"#load-time-relocation\"\u003eload-time relocation\u003c/a\u003e, but with \u003ca href=\"/posts/kbhvirtual_memory/\"\u003evirtual memory\u003c/a\u003e.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eassign a location in physical memory, call the \u003cstrong\u003ebase\u003c/strong\u003e; during translation, we just add every virtual address by the \u003cstrong\u003ebase\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003ewe can cap the virtual address space for each process by a \u003cstrong\u003ebound\u003c/strong\u003e, we can raise a bus error/segfault if it goes above the highest allowable\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eThe \u003cstrong\u003ebound\u003c/strong\u003e is a virtual address (the first invalid address in the virtual world), whereas the \u003cstrong\u003ebase\u003c/strong\u003e is a physical address. This is both stored in the \u003cstrong\u003eprocess control block\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003elast possible address\u003c/strong\u003e: is (bound - 1)+base\u003c/p\u003e\n\u003ch4 id=\"translation\"\u003etranslation\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003ecompare virtual address to bound, \u003ca href=\"/posts/kbhdispatching/#trap\"\u003etrap\u003c/a\u003e and raise if \u0026gt;= \u003cstrong\u003ebound\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003ethen, return virtual address + \u003cstrong\u003ebase\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eimportantly, we can arbitrary adjust base and bound.\u003c/p\u003e\n\u003ch4 id=\"tradeoff\"\u003etradeoff\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003egood news\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003einexpensive\u003c/strong\u003e: just doing addition\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003edoesn\u0026rsquo;t require additional space\u003c/strong\u003e: (just two addresses)\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eseparation\u003c/strong\u003e: \u003ca href=\"/posts/kbhvirtual_memory/\"\u003evirtualization\u003c/a\u003e.\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003ebad news\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eone contiguous region\u003c/strong\u003e: need to allocate free spcae\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003efragmentation\u003c/strong\u003e: because of the above\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003egrowing can only happens upwards with bounds\u003c/strong\u003e (and its kind of useless)\u0026mdash;we can\u0026rsquo;t move the stack up in virtual space, and we can\u0026rsquo;t give more space downwards, because that would cause negative addresses\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eno read only memory\u003c/strong\u003e (we\u0026rsquo;ll want to limit access to code segment, for instance)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"multiple-segments\"\u003emultiple segments\u003c/h3\u003e\n\u003cp\u003eLet\u0026rsquo;s break up multiple virtual address space into segments, and map each of those segments separately. \u003cstrong\u003eEACH SEGMENT\u003c/strong\u003e will have its own \u003ca href=\"#base-and-bound\"\u003ebase and bound\u003c/a\u003e. So, you will store each struct in a map: \u003ccode\u003e[segment number: [segment base, segment bound, read only or not]]\u003c/code\u003e.\u003c/p\u003e\n\u003ch4 id=\"translation\"\u003etranslation\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003elook up what segment a virtual address is in (we can do this by making the top couple bits of the virtual address the segment number, and the next bits as the offset into the segment)\u003c/li\u003e\n\u003cli\u003eget that segment\u0026rsquo;s info\u003c/li\u003e\n\u003cli\u003ecompare that address\u0026rsquo; offset to that segment\u0026rsquo;s bound, if its \u0026gt;= limit, \u003ca href=\"/posts/kbhdispatching/#trap\"\u003etrap\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003eotherwise, to go the base of that segment and fetch data\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"tradeoff\"\u003etradeoff\u003c/h4\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003efeatures\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eyou can recycle segments\u003c/strong\u003e: if you have two instances of a program running, we can actually share read-only segments (such as code).\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eyou can not map the middle\u003c/strong\u003e: because stack and data segments are independent, we can not map the hole in the middle until more data is asked\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eyou can grow things\u003c/strong\u003e: if you run out of continuous space, you can grow the segment by either just doing it or by moving it and growing it (and indeed we now can move the stack down as the stack is addressed as the highest address)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--list-separator--\u003e\n\u003cul\u003e\n\u003cli\u003e\n\u003cp\u003edrawbacks\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003egrowing can only happens upwards with bounds\u003c/strong\u003e\u0026mdash;now that we can move the heap independently, growing the heap makes sense now; however, growing the STACK \u003cstrong\u003estill\u003c/strong\u003e is impossible because growing the stack would entail moving the base address in order to go downwards\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003evariable length segments\u003c/strong\u003e\u0026mdash;extrernal fragmentation!\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003esmall number of segments\u003c/strong\u003e\u0026mdash;the [segment, offset] deign divides virtual addresses, so you have to decide segment number exogenously\u003c/li\u003e\n\u003c/ul\u003e\n\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"paging\"\u003epaging\u003c/h3\u003e\n\u003cp\u003eSo let\u0026rsquo;s instead allocate memory in pages. Instead of variable-length segments that can GROW in \u003ca href=\"#base-and-bound\"\u003ebase and bound\u003c/a\u003e and \u003ca href=\"#multiple-segments\"\u003emultiple segments\u003c/a\u003e, let\u0026rsquo;s force a specific size of memory in each chunk.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003evirtual address\u003c/strong\u003e: \u003cstrong\u003evirtual page number\u003c/strong\u003e + \u003cstrong\u003eoffset\u003c/strong\u003e\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003ephysical address\u003c/strong\u003e: \u003cstrong\u003ephysical page number\u003c/strong\u003e + \u003cstrong\u003eoffset\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewe map each page independently, and keep the offset. If a page is unused, internal fragmentation but not too bad. The \u003cstrong\u003estack can now grow downwards\u003c/strong\u003e: because if it reaches into lower page numbers we can just map that page somewhere too.\u003c/p\u003e\n\u003cp\u003eTo store page mappings, in a seperate storage location, we store a \u003ca href=\"#paging\"\u003epage map\u003c/a\u003e/\u003ca href=\"#paging\"\u003epage table\u003c/a\u003e: its an array of tuples, where the index is the virtual page number, and each entry has [(physical page, writable)].\u003c/p\u003e\n\u003cp\u003eNotice that page continuity isn\u0026rsquo;t a problem: the upper digits just count up, and the lower digits tells you offset in that chunk:\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-asm\" data-lang=\"asm\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#960050;background-color:#1e0010\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ex0000\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0x0fff\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#960050;background-color:#1e0010\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ex1000\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0x1fff\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#960050;background-color:#1e0010\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ex2000\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0x2fff\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003ewhere, the first digit tells you the page number\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-asm\" data-lang=\"asm\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#960050;background-color:#1e0010\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ex0\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0x0\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#960050;background-color:#1e0010\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ex1\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0x1\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#960050;background-color:#1e0010\"\u003e0\u003c/span\u003e\u003cspan style=\"color:#75af00\"\u003ex2\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e-\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e0x2\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eand the rest is the offset.\u003c/p\u003e\n\u003cp\u003eAnd everything is contiunous, and automatically paged.\u003c/p\u003e\n\u003cp\u003eFor instance, typically page sizes are 4kb\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003ePage Size\u003c/th\u003e\n\u003cth\u003eOffset Number Digits\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003e4096 bytes (16^3)\u003c/td\u003e\n\u003ctd\u003e3\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003ethen the rest of the address would just be the page number.\u003c/p\u003e\n\u003ch4 id=\"intel-s-implementation\"\u003eIntel\u0026rsquo;s implementation\u003c/h4\u003e\n\u003cp\u003e\u003cstrong\u003eVirtual Addresses\u003c/strong\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003eUnused (16 bits)\u003c/td\u003e\n\u003ctd\u003eVirtual page number (36 bits)\u003c/td\u003e\n\u003ctd\u003eOffset (12 bits)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003cp\u003e\u003cstrong\u003ePhysical Addresses\u003c/strong\u003e\u003c/p\u003e\n\u003ctable\u003e\n\u003cthead\u003e\n\u003ctr\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003cth\u003e\u003c/th\u003e\n\u003c/tr\u003e\n\u003c/thead\u003e\n\u003ctbody\u003e\n\u003ctr\u003e\n\u003ctd\u003ePage number (40 bits)\u003c/td\u003e\n\u003ctd\u003eOffset (12 bits)\u003c/td\u003e\n\u003c/tr\u003e\n\u003c/tbody\u003e\n\u003c/table\u003e\n\u003ch4 id=\"translation\"\u003etranslation\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003echop off page number and offset\u003c/li\u003e\n\u003cli\u003etranslate the page number\u003c/li\u003e\n\u003cli\u003econcat the two together\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003einternal fragmentation\u003c/strong\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"why-not-something-simpler\"\u003ewhy not something simpler?\u003c/h2\u003e\n\u003ch3 id=\"single-tasking-memory\"\u003esingle-tasking memory\u003c/h3\u003e\n\u003cp\u003every bad idea:\u003c/p\u003e\n\u003cp\u003eASSUME that there is only one process. Stack grows down, data grows up, and code sits at the bottom.\u003c/p\u003e\n\u003ch4 id=\"tradeoff\"\u003etradeoff\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eno isolation\u003c/strong\u003e: even in this case, nothing is stopping the program from accessing memory in the OS reserve segment; which is bad.\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003eno multitasking\u003c/strong\u003e: because, well, we have one program\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003efragmentation\u003c/strong\u003e: little bits of space all over the place\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"load-time-relocation\"\u003eload-time relocation\u003c/h3\u003e\n\u003cp\u003eseparate processes.\u003c/p\u003e\n\u003cp\u003eWhen program is compiled, it assumes that its initial address is \u003ccode\u003e0x0\u003c/code\u003e; so, at load time, we have to go into the code segment when the program is set up and increment all of its memory addresses up.\u003c/p\u003e\n\u003ch4 id=\"tradeoff\"\u003etradeoff\u003c/h4\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003eno isolation\u003c/strong\u003e: nothing is stopping the program from accessing memory in otherbody\u0026rsquo;s segments\u003c/li\u003e\n\u003cli\u003emust decide the memory usage of a program ahead of time + cannot grow if needs more memory (we can\u0026rsquo;t move because the addresses would be in stack)\u003c/li\u003e\n\u003cli\u003eexternal fragmentation (normal alloc problems)\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvirtual_memory/","tags":null,"title":"virtual memory"},{"categories":null,"contents":"voltage is a measure of difference in electric potential energy across two points\n\\begin{equation} V = \\frac{1}{4\\pi \\epsilon_{0}} \\sum_{i} \\frac{q_{i}}{r_{i}} \\end{equation}\nor,\n\\begin{equation} PE = qV \\end{equation}\npotential energy experienced by \\(q\\) at the point.\n\\begin{equation} E_{x} = -\\dv{V}{x} \\end{equation}\n\\begin{equation} \\Delta V = - \\int E \\cdot dr \\end{equation}\n\\begin{equation} PE = \\frac{1}{4\\pi \\epsilon_{0}} \\frac{q_1q_2}{r} \\end{equation}\nequipotentential lines and electric field lines should align at right angles.\ncurrent through series is the same voltage through parallel is the same ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhvoltage/\"\u003evoltage\u003c/a\u003e is a measure of difference in \u003ca href=\"/posts/kbhelectric_potential_energy/\"\u003eelectric potential energy\u003c/a\u003e across two points\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nV = \\frac{1}{4\\pi \\epsilon_{0}} \\sum_{i} \\frac{q_{i}}{r_{i}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eor,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nPE = qV\n\\end{equation}\u003c/p\u003e\n\u003cp\u003epotential energy experienced by \\(q\\) at the point.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE_{x} = -\\dv{V}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\Delta V = - \\int E \\cdot dr\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nPE = \\frac{1}{4\\pi \\epsilon_{0}} \\frac{q_1q_2}{r}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eequipotentential lines and electric field lines should align at right angles.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003ecurrent through series is the same\u003c/li\u003e\n\u003cli\u003evoltage through parallel is the same\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvoltage/","tags":null,"title":"voltage"},{"categories":null,"contents":"The VWAP is a Financial Market metric that stands for \u0026ldquo;volume-weighted average price.\u0026rdquo; It is given by (sumshares brought(shares bought at price*price its at)/(total shares bought in period)).\n\u0026ldquo;the price we care the most about, is the price where the most volume is traded.\u0026rdquo;\nMotivation Its a weighted-by volume trading price. Though the closing price is the price used for accounting, it isn\u0026rsquo;t a good metric for large-volume trades.\nTrading at the VWAP We Trade at the VWAP because a LARGE trade will move the market around, and we don\u0026rsquo;t want that if we are a large trader. So we trade at the VWAP to ensure that we are getting the best possible value.\nBuild a volume a profile Slicing the orders to match Control for volume deviations Volume Profile We use the volume-profile: \u0026ldquo;how much/what percentage of today\u0026rsquo;s volume happened in this chunk of the day\u0026rdquo; to predict today\u0026rsquo;s trading by matching by historical data. This often results in looking like a J curve: lots of trading happen at the beginning of the day, very little towards the middle, and LOTS in the end.\nSlicing Orders Slice your funds needed to trade, volume-wise, according to the Volume Profile. Set limit orders per slice at the best price for the market.\nControl Deviations from Expectation If you were\u0026rsquo;t able to trade by the limit order you posted at that slice, by the end of the slice, cancel your limit order and just send in a market order to ensure your participation with the desired volume at that slice.\n","html":"\u003cp\u003eThe \u003ca href=\"/posts/kbhvwap/\"\u003eVWAP\u003c/a\u003e is a \u003ca href=\"/posts/kbhfinancial_markets_intro/\"\u003eFinancial Market\u003c/a\u003e metric that stands for \u0026ldquo;volume-weighted average price.\u0026rdquo; It is given by (sum\u003csub\u003eshares brought\u003c/sub\u003e(shares bought at price*price its at)/(total shares bought in period)).\u003c/p\u003e\n\u003cp\u003e\u0026ldquo;the \u003ca href=\"/posts/kbhprice/\"\u003eprice\u003c/a\u003e we care the most about, is the price where the most volume is traded.\u0026rdquo;\u003c/p\u003e\n\u003ch2 id=\"motivation\"\u003eMotivation\u003c/h2\u003e\n\u003cp\u003eIts a weighted-by volume trading price. Though the \u003ca href=\"/posts/kbhaccounting_price/\"\u003eclosing price\u003c/a\u003e is the \u003ca href=\"/posts/kbhprice/\"\u003eprice\u003c/a\u003e used for accounting, it isn\u0026rsquo;t a good metric for large-volume trades.\u003c/p\u003e\n\u003ch2 id=\"trading-at-the-vwap\"\u003eTrading at the VWAP\u003c/h2\u003e\n\u003cp\u003eWe \u003ca href=\"#trading-at-the-vwap\"\u003eTrade at the VWAP\u003c/a\u003e because a LARGE trade will move the market around, and we don\u0026rsquo;t want that if we are a large trader. So we trade at the \u003ca href=\"/posts/kbhvwap/\"\u003eVWAP\u003c/a\u003e to ensure that we are getting the best possible value.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eBuild a volume a profile\u003c/li\u003e\n\u003cli\u003eSlicing the orders to match\u003c/li\u003e\n\u003cli\u003eControl for volume deviations\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch3 id=\"volume-profile\"\u003eVolume Profile\u003c/h3\u003e\n\u003cp\u003eWe use the volume-profile: \u0026ldquo;how much/what percentage of today\u0026rsquo;s volume happened in this chunk of the day\u0026rdquo; to predict today\u0026rsquo;s trading by matching by historical data. This often results in looking like a J curve: lots of trading happen at the beginning of the day, very little towards the middle, and LOTS in the end.\u003c/p\u003e\n\u003ch3 id=\"slicing-orders\"\u003eSlicing Orders\u003c/h3\u003e\n\u003cp\u003eSlice your funds needed to trade, volume-wise, according to the \u003ca href=\"#volume-profile\"\u003eVolume Profile\u003c/a\u003e. Set limit orders per slice at the best price for the market.\u003c/p\u003e\n\u003ch3 id=\"control-deviations-from-expectation\"\u003eControl Deviations from Expectation\u003c/h3\u003e\n\u003cp\u003eIf you were\u0026rsquo;t able to trade by the limit order you posted at that slice, by the end of the slice, cancel your limit order and just send in a market order to ensure your participation with the desired volume at that slice.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhvwap/","tags":null,"title":"VWAP"},{"categories":null,"contents":"(Walker and Davies 2013)\nOne-Liner Emergency of life corresponds with the time for physical transition.\nNovelty Notable Methods Key Figs New Concepts Notes ","html":"\u003cp\u003e(\u003ca href=\"#citeproc_bib_item_1\"\u003eWalker and Davies 2013\u003c/a\u003e)\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eEmergency of life corresponds with the time for physical transition.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhwalker_2018/","tags":null,"title":"Walker 2018"},{"categories":null,"contents":"DOI: 10.21437/Interspeech.2019-2414\n","html":"\u003cp\u003eDOI: 10.21437/Interspeech.2019-2414\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhwang_2019/","tags":null,"title":"Wang 2019"},{"categories":null,"contents":"One-Liner Modeling carbon storage operations as a POMDP to show how different monitoring strategies can influence decision quality. Evalutae\nNovelty Applying POMDP to the task of carbon capture monitor planning.\nNotable Methods POMDP formulation\nSolver: POMCPOW Reward: trapped, free, and exited Co2 Action: injector placement Observation: CO2 saturation Belief: the permeability of the rock POMDP Solution: particle filter tree.\nExperimental design validated by simulations of CO2 sperad through injectors\nFourier Network Simulation The actual fluid dynamics is really really hard to solve. As such, we do the evaluation over a lot of scenarios and then train a neural network to act as surrogate.\nKey Figs New Concepts Notes ","html":"\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eModeling carbon storage operations as a POMDP to show how different monitoring strategies can influence decision quality. Evalutae\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cp\u003eApplying POMDP to the task of carbon capture monitor planning.\u003c/p\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cp\u003ePOMDP formulation\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eSolver: POMCPOW\u003c/li\u003e\n\u003cli\u003eReward: trapped, free, and exited Co2\u003c/li\u003e\n\u003cli\u003eAction: injector placement\u003c/li\u003e\n\u003cli\u003eObservation: CO2 saturation\u003c/li\u003e\n\u003cli\u003eBelief: the permeability of the rock\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ePOMDP Solution: particle filter tree.\u003c/p\u003e\n\u003cp\u003eExperimental design validated by simulations of CO2 sperad through injectors\u003c/p\u003e\n\u003ch3 id=\"fourier-network-simulation\"\u003eFourier Network Simulation\u003c/h3\u003e\n\u003cp\u003eThe actual fluid dynamics is really really hard to solve. As such, we do the evaluation over a lot of scenarios and then train a neural network to act as surrogate.\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n","permalink":"https://www.jemoka.com/posts/kbhwang_2023/","tags":null,"title":"Wang 2023"},{"categories":null,"contents":"Richard Nixon does not like democratic policies. Therefore, he had 5 operatives break into the DNC. Woodward and Berstein reports on the issue. Nixon rebounds and fires his investigator.\nThen, he released the \u0026ldquo;smoking gun\u0026rdquo; tape with the middle missing\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhrichard_nixon/\"\u003eRichard Nixon\u003c/a\u003e does not like democratic policies. Therefore, he had 5 operatives break into the DNC. Woodward and Berstein reports on the issue. Nixon rebounds and fires his investigator.\u003c/p\u003e\n\u003cp\u003eThen, he released the \u0026ldquo;smoking gun\u0026rdquo; tape with the middle missing\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhwatergate/","tags":null,"title":"watergate"},{"categories":null,"contents":"If we write it in a single set of variables:\n\\begin{equation} \\pdv[2]{u}{t} = \\pdv[2]{u}{x} \\end{equation}\nAt a glance, for Dirichlet Conditions:\n\\begin{equation} u(t,x) = \\sum_{k} \\qty(a_{k} \\sin \\qty(\\frac{ck\\pi}{l} t) + b_{k} \\cos \\qty(\\frac{ck\\pi}{l} t)) \\sin \\qty( \\frac{k \\pi}{l}x) \\end{equation}\nthis takes two initial condition:\n\\begin{equation} u(0,x) = \\sum b_{k} \\sin \\qty( \\frac{k\\pi x}{l}) = f(x) \\end{equation}\n\\begin{equation} \\pdv{u}{t}(0,x) = \\sum a_{k} \\frac{ck \\pi}{l} \\sin \\qty( \\frac{k\\pi x}{l}) = g(x) \\end{equation}\nmeaning:\n\\begin{equation} b_{k} = \\frac{2}{l} \\int_{0}^{l} f(x) \\sin \\qty( \\frac{k\\pi}{l} x) \\dd{x} \\end{equation}\nand:\n\\begin{equation} a_{k} = \\frac{2}{k\\pi c} \\int_{0}^{l} h(x) \\sin \\qty( \\frac{k\\pi}{l} x) \\dd{x} \\end{equation}\nwhich now finishes our initial conditions.\nImportantly, as we have a SECOND ORDER expression now, we need two initial conditions with initial amplitude and velocity.\nd\u0026rsquo;alembert\u0026rsquo;s formula The general solution to the wave equation, with:\n\\begin{equation} \\pdv[2]{U}{t} = c^{2} \\pdv[2]{U}{x} \\end{equation}\nwith \\(U(0,x) = f_0(x)\\), and \\(\\pdv{U}{t}(0,x) = f_1(x)\\) is:\n\\begin{equation} U(t,x) = \\frac{1}{2} \\qty(f_0 (x+ct) + f_0 (x-ct)) + \\frac{1}{2c} \\int_{x-ct}^{x+ct} f_1(y) \\dd{y} \\end{equation}\ndamping see damped wave equation\nsolving wave equation Recall:\n\\begin{equation} \\pdv[2]{u}{t} = c^{2} \\pdv[2]{u}{x} \\end{equation}\nwhere \\(c^{2}\\) is called the \u0026ldquo;wave speed\u0026rdquo;. Let\u0026rsquo;s start with the Dirichlet Conditions.\nUnlike the Heat Equation, Wave Equation are time reversible (i.e. time going forward and backwards should have no difference). Any solutions that go forward in time also satisfy for going backwards in time.\nLet\u0026rsquo;s try to solve it. Guess:\n\\begin{equation} u = A(t) B(x) \\end{equation}\nmeaning, we have:\n\\begin{equation} A\u0026rsquo;\u0026rsquo;(t) B(x) = c^{2} A(t)B\u0026rsquo;\u0026rsquo;(x) \\end{equation}\nThis finally gives:\n\\begin{equation} \\frac{A\u0026rsquo;\u0026rsquo;(t)}{A(t)} = c^{2} \\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(X)} = \\lambda \\end{equation}\nwhich gives:\n\\begin{equation} B\u0026rsquo;\u0026rsquo;(x) - \\frac{\\lambda}{c^{2}} B(x) = 0 \\end{equation}\nwe can only solve this, given our boundary conditions:\n\\begin{equation} \\lambda = \\frac{-c^{2} k^{2} \\pi^{2}}{l^{2}} \\end{equation}\nwhich gives:\n\\begin{equation} B(x) = \\sin \\qty( \\frac{k \\pi}{l}x) \\end{equation}\nand \\(A\\) will result in a second order equation (unlike before):\n\\begin{equation} A\u0026rsquo;\u0026rsquo;(t) + \\frac{c^{2} h^{2} \\pi^{2}}{l^{2}} A(t) = 0 \\end{equation}\nThis gives generally a solution:\n\\begin{equation} A(t) = c_1 \\sin \\qty(\\frac{ck\\pi}{l} t) + c_2 \\cos \\qty(\\frac{ck\\pi}{l} t) \\end{equation}\nTherefore, multiplying everything out:\n\\begin{equation} u(t,x) = \\sum_{k} \\qty(c_1 \\sin \\qty(\\frac{ck\\pi}{l} t) + c_2 \\cos \\qty(\\frac{ck\\pi}{l} t)) \\sin \\qty( \\frac{k \\pi}{l}x) \\end{equation}\nmeaning: the overall oscillation is controlled by the wave speed, which changes in time but not space.\nFinally, note that:\n\\begin{equation} u(0,x) = \\sum b_{k} \\sin \\qty( \\frac{k\\pi x}{l}) = f(x) \\end{equation}\nConsider the \\(t\\) derivative as well:\n\\begin{equation} \\pdv{u}{t} = \\sum \\qty(a_{n} \\frac{ck\\pi}{l} \\cos \\qty( \\frac{ck \\pi}{l} t) - b_{k}\\frac{k\\pi}{l} \\sin \\qty( \\frac{k\\pi}{l}t)) \\sin \\qty( \\frac{k\\pi}{l} x) \\end{equation}\nnow, this gives us another initial condition:\n\\begin{equation} \\pdv{u}{t}(0,x) = \\sum a_{k} \\frac{ck \\pi}{l} \\sin \\qty( \\frac{k\\pi x}{l}) = g(x) \\end{equation}\nwhich now finishes our initial conditions.\nGeneral Standing Wave Solution Because the PDE given is linear, solutions compose, and we note that any scale of \\(\\cos kt \\sin kx\\) will compose.\n\\begin{equation} u(t,x) = \\sum_{k=0}^{\\infty} a_{k} \\cos kt \\sin kx \\end{equation}\nFourier Series \\begin{equation} u(o,x) \\sum_{k} a_{k}\\sin kx \\end{equation}\nBIG stunning conclusion: every single function, including wack ones, can be decomposed. See Fourier Series\nGeneral Traveling Wave Solution \\begin{equation} u(t,x) = \\sin (x-t) w(x-t) \\end{equation}\nas long as \\(w\\) is a valid twice-differentiable solution, plugging its derivative in will resolve as well.\nComposition \\begin{equation} \\sin (x-t) + \\sin (x+t) = \\sin x \\cos t - \\cos x \\sin t + \\sin x \\cos t + \\cos x \\sin t = 2 \\sin x \\cos t \\end{equation}\n","html":"\u003cp\u003eIf we write it in a single set of variables:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2]{u}{t} = \\pdv[2]{u}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eAt a glance, for \u003ca href=\"/posts/kbhsu_math53_feb232024/#dirichlet-conditions\"\u003eDirichlet Conditions\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(t,x) = \\sum_{k} \\qty(a_{k} \\sin \\qty(\\frac{ck\\pi}{l} t) + b_{k} \\cos \\qty(\\frac{ck\\pi}{l} t)) \\sin \\qty( \\frac{k \\pi}{l}x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ethis takes two initial condition:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(0,x) = \\sum b_{k} \\sin \\qty( \\frac{k\\pi x}{l}) = f(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t}(0,x) = \\sum a_{k} \\frac{ck \\pi}{l} \\sin \\qty( \\frac{k\\pi x}{l}) = g(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nb_{k} = \\frac{2}{l} \\int_{0}^{l} f(x) \\sin \\qty( \\frac{k\\pi}{l} x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\na_{k} = \\frac{2}{k\\pi c} \\int_{0}^{l} h(x) \\sin \\qty( \\frac{k\\pi}{l} x) \\dd{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich now finishes our initial conditions.\u003c/p\u003e\n\u003cp\u003eImportantly, as we have a \u003cstrong\u003eSECOND ORDER\u003c/strong\u003e expression now, we need \u003cstrong\u003etwo\u003c/strong\u003e initial conditions with initial amplitude and velocity.\u003c/p\u003e\n\u003ch2 id=\"d-alembert--kbhwave-equation-dot-md--s-formula\"\u003e\u003ca href=\"/posts/kbhwave_equation/\"\u003ed\u0026rsquo;alembert\u003c/a\u003e\u0026rsquo;s formula\u003c/h2\u003e\n\u003cp\u003eThe general solution to the wave equation, with:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2]{U}{t} = c^{2} \\pdv[2]{U}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewith \\(U(0,x) = f_0(x)\\), and \\(\\pdv{U}{t}(0,x) = f_1(x)\\) is:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nU(t,x) = \\frac{1}{2} \\qty(f_0 (x+ct) + f_0 (x-ct)) + \\frac{1}{2c} \\int_{x-ct}^{x+ct} f_1(y) \\dd{y}\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"damping\"\u003edamping\u003c/h2\u003e\n\u003cp\u003esee \u003ca href=\"\"\u003edamped wave equation\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"solving-wave-equation\"\u003esolving wave equation\u003c/h2\u003e\n\u003chr\u003e\n\u003cp\u003eRecall:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv[2]{u}{t} = c^{2} \\pdv[2]{u}{x}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhere \\(c^{2}\\) is called the \u0026ldquo;wave speed\u0026rdquo;. Let\u0026rsquo;s start with the \u003ca href=\"/posts/kbhsu_math53_feb232024/#dirichlet-conditions\"\u003eDirichlet Conditions\u003c/a\u003e.\u003c/p\u003e\n\u003cp\u003eUnlike the \u003ca href=\"/posts/kbhheat_equation/\"\u003eHeat Equation\u003c/a\u003e, \u003ca href=\"/posts/kbhwave_equation/\"\u003eWave Equation\u003c/a\u003e are time reversible (i.e. time going forward and backwards should have no difference). Any solutions that go forward in time also satisfy for going backwards in time.\u003c/p\u003e\n\u003cp\u003eLet\u0026rsquo;s try to solve it. Guess:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu = A(t) B(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning, we have:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA\u0026rsquo;\u0026rsquo;(t) B(x) = c^{2} A(t)B\u0026rsquo;\u0026rsquo;(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis finally gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\frac{A\u0026rsquo;\u0026rsquo;(t)}{A(t)} = c^{2} \\frac{B\u0026rsquo;\u0026rsquo;(x)}{B(X)} = \\lambda\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB\u0026rsquo;\u0026rsquo;(x) - \\frac{\\lambda}{c^{2}} B(x) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewe can only solve this, given our boundary conditions:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\lambda = \\frac{-c^{2} k^{2} \\pi^{2}}{l^{2}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich gives:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nB(x) = \\sin \\qty( \\frac{k \\pi}{l}x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eand \\(A\\) will result in a second order equation (unlike before):\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA\u0026rsquo;\u0026rsquo;(t) + \\frac{c^{2} h^{2} \\pi^{2}}{l^{2}} A(t) = 0\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThis gives generally a solution:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nA(t) = c_1 \\sin \\qty(\\frac{ck\\pi}{l} t) + c_2 \\cos \\qty(\\frac{ck\\pi}{l} t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eTherefore, multiplying everything out:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(t,x) = \\sum_{k} \\qty(c_1 \\sin \\qty(\\frac{ck\\pi}{l} t) + c_2 \\cos \\qty(\\frac{ck\\pi}{l} t)) \\sin \\qty( \\frac{k \\pi}{l}x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003emeaning: the overall oscillation is controlled by the wave speed, which changes in \u003cstrong\u003etime\u003c/strong\u003e but not \u003cstrong\u003espace\u003c/strong\u003e.\u003c/p\u003e\n\u003cp\u003eFinally, note that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(0,x) = \\sum b_{k} \\sin \\qty( \\frac{k\\pi x}{l}) = f(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eConsider the \\(t\\) derivative as well:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t} = \\sum \\qty(a_{n} \\frac{ck\\pi}{l} \\cos \\qty( \\frac{ck \\pi}{l} t) - b_{k}\\frac{k\\pi}{l} \\sin \\qty( \\frac{k\\pi}{l}t)) \\sin \\qty( \\frac{k\\pi}{l} x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003enow, this gives us another initial condition:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{u}{t}(0,x) = \\sum a_{k} \\frac{ck \\pi}{l} \\sin \\qty( \\frac{k\\pi x}{l}) = g(x)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003ewhich now finishes our initial conditions.\u003c/p\u003e\n\u003ch2 id=\"general-standing-wave-solution\"\u003eGeneral Standing Wave Solution\u003c/h2\u003e\n\u003cp\u003eBecause the \u003ca href=\"/posts/kbhpartial_differential_equations/\"\u003ePDE\u003c/a\u003e given is linear, solutions compose, and we note that any scale of \\(\\cos kt \\sin kx\\) will compose.\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nu(t,x) = \\sum_{k=0}^{\\infty} a_{k} \\cos kt \\sin kx\n\\end{equation}\u003c/p\u003e\n\u003ch2 id=\"fourier-series\"\u003eFourier Series\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nu(o,x) \\sum_{k} a_{k}\\sin kx\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eBIG \u003cstrong\u003estunning conclusion\u003c/strong\u003e: \u003cstrong\u003eevery single function, including wack ones, can be decomposed\u003c/strong\u003e. See \u003ca href=\"/posts/kbhsu_math53_feb252024/#fourier-decomposition\"\u003eFourier Series\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"general-traveling-wave-solution\"\u003eGeneral Traveling Wave Solution\u003c/h2\u003e\n\u003cp\u003e\\begin{equation}\nu(t,x) = \\sin (x-t) w(x-t)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eas long as \\(w\\) is a valid twice-differentiable solution, plugging its derivative in will resolve as well.\u003c/p\u003e\n\u003ch3 id=\"composition\"\u003eComposition\u003c/h3\u003e\n\u003cp\u003e\\begin{equation}\n\\sin (x-t) + \\sin (x+t) = \\sin x \\cos t - \\cos x \\sin t + \\sin x \\cos t + \\cos x \\sin t = 2 \\sin x \\cos t\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhwave_equation/","tags":null,"title":"Wave Equation"},{"categories":null,"contents":"let\u0026rsquo;s consider the web as a directed graph, where\u0026hellip;\na hyper-link denotes perceived relevance (\u0026ldquo;quality\u0026rdquo;) anchor of the hyper-link describe the target page (\u0026ldquo;textual context\u0026rdquo;) anchor text consider:\nIBM\u0026rsquo;s mostly graphical homepage IBM\u0026rsquo;s copyright page Rival\u0026rsquo;s span IBM page Consider, a million picees of anchor text saying \u0026ldquo;IBM\u0026rdquo; pointing to ibm.com, suddenly, that legitimizes the home page\n\u0026lt;a href=\u0026#34;target\u0026#34;\u0026gt;[archor text that says IBM]\u0026lt;/a\u0026gt; So, when we index a website, we index not just the website but 1) all links pointing to it and 2) the text of those links.\nside effects \u0026ldquo;Google Bombing\u0026rdquo;\u0026mdash;a lot of people artificially increasing the rank of the website by pointing a lot to it on more fake websites and writing in anchor text about the spoofed website\nsolution: weight each web page\u0026rsquo;s target anchors based on their \u0026ldquo;authoritativeness\u0026rdquo;\u0026mdash;either curated or calculated.\nuses of anchor text synonym usage (collect multiple ways of referring to the same website) finding translations (collect multiple languages referring to the same website) providing constituency boundaries (i.e. the anchor text is a NP within the larger sentence) PageRank \u0026ldquo;A page that\u0026rsquo;s a very popular is a good page.\u0026rdquo;\nPage Rank Solves the LinkCount problem by using the intuition that we want to weight Directed Popularity based on the importance of the page where the link is from.\nPrecisely: after starting at a random page, we walk along each link with equal probability and continue until we reach a time where a page\u0026rsquo;s visitation rate converges. We will use this as PageRank\nTeleporting To resolve the problem of dead ends, if we reach a dead end we jump to a random page.\nEven if we didn\u0026rsquo;t reach a dead end, with probability \\(\\alpha\\) we still jump to a random page.\nif the node has no out-link, the transition probability to each other node is \\(\\frac{1}{N}\\) if the node does have \\(K\\) out links, the probability of telephoning to a random node is \\(\\frac{\\alpha}{N}\\), and the probability of going to a normal out link is \\(\\frac{1-\\alpha}{k}\\). Building PageRank Matrix For some matrix A, \\(A_{ij}\\) is \\(1\\) if there is a hyper-link from \\(i\\) to \\(j\\).\nIf row \\(A\\) has no 1s, then we will replace each element by \\(\\frac{1}{N}\\). For all other rows, divide each row by the sum of the row, and multplying each entry by \\((1-\\alpha)\\). Then, add \\(\\frac{a}{N}\\) to the whole row.\ncalculating PageRank uses the fact that the matrix you built in the previous step is Ergotic to compute its steady state.\nLink Count page that is pointed to by lots of other pages\nFailure: this is very easy to spam\u0026ndash;we can just create a bunch of pages and add arbitrary number of links.\nUndirected Popularity \u0026ldquo;Degree\u0026rdquo;: number of in links plus the number of out-links.\nDirected Popularity Number of in-links\n","html":"\u003cp\u003elet\u0026rsquo;s consider the web as a \u003cstrong\u003edirected graph\u003c/strong\u003e, where\u0026hellip;\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ea hyper-link denotes perceived relevance (\u0026ldquo;quality\u0026rdquo;)\u003c/li\u003e\n\u003cli\u003eanchor of the hyper-link describe the target page (\u0026ldquo;textual context\u0026rdquo;)\u003c/li\u003e\n\u003c/ol\u003e\n\u003ch2 id=\"anchor-text\"\u003eanchor text\u003c/h2\u003e\n\u003cp\u003econsider:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eIBM\u0026rsquo;s mostly graphical homepage\u003c/li\u003e\n\u003cli\u003eIBM\u0026rsquo;s copyright page\u003c/li\u003e\n\u003cli\u003eRival\u0026rsquo;s span IBM page\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eConsider, a million picees of \u003ca href=\"#anchor-text\"\u003eanchor text\u003c/a\u003e saying \u0026ldquo;IBM\u0026rdquo; pointing to ibm.com, suddenly, that legitimizes the home page\u003c/p\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-html\" data-lang=\"html\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#111\"\u003e\u0026lt;\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003ea\u003c/span\u003e \u003cspan style=\"color:#75af00\"\u003ehref\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e\u003cspan style=\"color:#d88200\"\u003e\u0026#34;target\u0026#34;\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e\u0026gt;\u003c/span\u003e[archor text that says IBM]\u003cspan style=\"color:#111\"\u003e\u0026lt;/\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003ea\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e\u0026gt;\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e\u003cp\u003eSo, when we index a website, we index not just the website but 1) all links pointing to it and 2) the text of those links.\u003c/p\u003e\n\u003ch3 id=\"side-effects\"\u003eside effects\u003c/h3\u003e\n\u003cp\u003e\u0026ldquo;Google Bombing\u0026rdquo;\u0026mdash;a lot of people artificially increasing the rank of the website by pointing a lot to it on more fake websites and writing in anchor text about the spoofed website\u003c/p\u003e\n\u003cp\u003esolution: weight each web page\u0026rsquo;s target anchors based on their \u0026ldquo;authoritativeness\u0026rdquo;\u0026mdash;either curated or calculated.\u003c/p\u003e\n\u003ch3 id=\"uses-of-anchor-text\"\u003euses of anchor text\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003esynonym usage (collect multiple ways of referring to the same website)\u003c/li\u003e\n\u003cli\u003efinding translations (collect multiple languages referring to the same website)\u003c/li\u003e\n\u003cli\u003eproviding constituency boundaries (i.e. the anchor text is a NP within the larger sentence)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"pagerank\"\u003ePageRank\u003c/h2\u003e\n\u003cp\u003e\u0026ldquo;A page that\u0026rsquo;s a very popular is a good page.\u0026rdquo;\u003c/p\u003e\n\u003ch3 id=\"page-rank\"\u003ePage Rank\u003c/h3\u003e\n\u003cp\u003eSolves the LinkCount problem by using the intuition that we want to weight \u003ca href=\"#directed-popularity\"\u003eDirected Popularity\u003c/a\u003e based on the importance of the page where the link is from.\u003c/p\u003e\n\u003cp\u003e\u003cstrong\u003ePrecisely\u003c/strong\u003e: after starting at a random page, we walk along each link with equal probability and continue until we reach a time where a page\u0026rsquo;s visitation rate converges. We will use this as PageRank\u003c/p\u003e\n\u003ch4 id=\"teleporting\"\u003eTeleporting\u003c/h4\u003e\n\u003cp\u003eTo resolve the problem of dead ends, if we reach a dead end we jump to a random page.\u003c/p\u003e\n\u003cp\u003eEven if we didn\u0026rsquo;t reach a dead end, with probability \\(\\alpha\\) we still jump to a random page.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eif the node has no out-link, the transition probability to each other node is \\(\\frac{1}{N}\\)\u003c/li\u003e\n\u003cli\u003eif the node does have \\(K\\) out links, the probability of telephoning to a random node is \\(\\frac{\\alpha}{N}\\), and the probability of going to a normal out link is \\(\\frac{1-\\alpha}{k}\\).\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"building-pagerank-matrix\"\u003eBuilding PageRank Matrix\u003c/h4\u003e\n\u003cp\u003eFor some matrix A, \\(A_{ij}\\) is \\(1\\) if there is a hyper-link from \\(i\\) to \\(j\\).\u003c/p\u003e\n\u003cp\u003eIf row \\(A\\) has no 1s, then we will replace each element by \\(\\frac{1}{N}\\). For all other rows, divide each row by the sum of the row, and multplying each entry by \\((1-\\alpha)\\). Then, add \\(\\frac{a}{N}\\) to the whole row.\u003c/p\u003e\n\u003ch4 id=\"calculating-pagerank--org69f1397\"\u003ecalculating \u003ca href=\"#pagerank\"\u003ePageRank\u003c/a\u003e\u003c/h4\u003e\n\u003cp\u003euses the fact that the matrix you built in the previous step is \u003ca href=\"/posts/kbhmarkov_chain/#ergotic-markov-chain\"\u003eErgotic\u003c/a\u003e to \u003ca href=\"/posts/kbhmarkov_chain/#computing-steady-state\"\u003ecompute its steady state\u003c/a\u003e.\u003c/p\u003e\n\u003ch3 id=\"link-count\"\u003eLink Count\u003c/h3\u003e\n\u003cp\u003e\u003cstrong\u003epage that is pointed to by lots of other pages\u003c/strong\u003e\u003c/p\u003e\n\u003cp\u003eFailure: this is \u003cstrong\u003every easy to spam\u003c/strong\u003e\u0026ndash;we can just create a bunch of pages and add arbitrary number of links.\u003c/p\u003e\n\u003ch4 id=\"undirected-popularity\"\u003eUndirected Popularity\u003c/h4\u003e\n\u003cp\u003e\u0026ldquo;Degree\u0026rdquo;: number of in links plus the number of out-links.\u003c/p\u003e\n\u003ch4 id=\"directed-popularity\"\u003eDirected Popularity\u003c/h4\u003e\n\u003cp\u003eNumber of in-links\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhweb_graph/","tags":null,"title":"Web Graph"},{"categories":null,"contents":"For instance, in spellcheck, you are more likely to confuse say \\(a\\) and \\(e\\) than \\(a\\) and \\(b\\). Therefore, sometimes we want to weight our edit distance with DP to account for these \u0026ldquo;common\u0026rdquo; paths to make certain corrections more \u0026ldquo;jarring\u0026rdquo;.\nFor two strings, let\u0026rsquo;s define:\n\\(X\\) of length \\(n\\) \\(Y\\) of length \\(m\\) we define some \\(D(i,j)\\) as the edit distance between substring \\(X[1:i]\\) and \\(Y[1:j]\\).\nLet:\n\\(D(i,0) = i, \\forall i\\) \\(D(0,j) = j, \\forall j\\) for i in range(1,M): for j in range(1,N): # deletion: ignoring one char of previous string d1 = D(i-1,j) + 1 # (cost) # insertion: insertion into string before using rest of j d2 = D(i,j-1) + 1 # (cost) # keep same if char is same or substitute current d3 = D(i-1,j-1) + (0 if X[i] == Y[j] else 2) # cache D(i,j) = min(d1, d2, d3) ","html":"\u003cp\u003eFor instance, in spellcheck, you are more likely to confuse say \\(a\\) and \\(e\\) than \\(a\\) and \\(b\\). Therefore, sometimes we want to weight our \u003ca href=\"/posts/kbhedit_distance_with_dp/\"\u003eedit distance with DP\u003c/a\u003e to account for these \u0026ldquo;common\u0026rdquo; paths to make certain corrections more \u0026ldquo;jarring\u0026rdquo;.\u003c/p\u003e\n\u003cp\u003eFor two strings, let\u0026rsquo;s define:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(X\\) of length \\(n\\)\u003c/li\u003e\n\u003cli\u003e\\(Y\\) of length \\(m\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003ewe define some \\(D(i,j)\\) as the edit distance between substring \\(X[1:i]\\) and \\(Y[1:j]\\).\u003c/p\u003e\n\u003cp\u003eLet:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\\(D(i,0) = i, \\forall i\\)\u003c/li\u003e\n\u003cli\u003e\\(D(0,j) = j, \\forall j\\)\u003c/li\u003e\n\u003c/ul\u003e\n\u003c!--listend--\u003e\n\u003cdiv class=\"highlight\"\u003e\u003cpre tabindex=\"0\" style=\"color:#272822;background-color:#fafafa;-moz-tab-size:4;-o-tab-size:4;tab-size:4;\"\u003e\u003ccode class=\"language-python\" data-lang=\"python\"\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eM\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#00a8c8\"\u003efor\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003ein\u003c/span\u003e \u003cspan style=\"color:#111\"\u003erange\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003eN\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e):\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# deletion: ignoring one char of previous string\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# (cost)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# insertion: insertion into string before using rest of j\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e \u003cspan style=\"color:#75715e\"\u003e# (cost)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# keep same if char is same or substitute current\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#f92672\"\u003e-\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e+\u003c/span\u003e \u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#ae81ff\"\u003e0\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eif\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eX\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e==\u003c/span\u003e \u003cspan style=\"color:#111\"\u003eY\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e[\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e]\u003c/span\u003e \u003cspan style=\"color:#00a8c8\"\u003eelse\u003c/span\u003e \u003cspan style=\"color:#ae81ff\"\u003e2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#75715e\"\u003e# cache\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003cspan style=\"display:flex;\"\u003e\u003cspan\u003e \u003cspan style=\"color:#111\"\u003eD\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ei\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ej\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e \u003cspan style=\"color:#f92672\"\u003e=\u003c/span\u003e \u003cspan style=\"color:#111\"\u003emin\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e(\u003c/span\u003e\u003cspan style=\"color:#111\"\u003ed1\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed2\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e,\u003c/span\u003e \u003cspan style=\"color:#111\"\u003ed3\u003c/span\u003e\u003cspan style=\"color:#111\"\u003e)\u003c/span\u003e\n\u003c/span\u003e\u003c/span\u003e\u003c/code\u003e\u003c/pre\u003e\u003c/div\u003e","permalink":"https://www.jemoka.com/posts/kbhweighted_edit_distance/","tags":null,"title":"weighted edit distance"},{"categories":null,"contents":"Fireside a series of articles that I\u0026rsquo;m writing to consolidate my learning.\nI have always dreamed of blogging. I have even tried once called 20MinuteRants. They worked quite well as a basic format whereby I can write about things in a fairly efficient manner (hence the 20 minutes), and be able to reflect about the things I\u0026rsquo;m up to.\nThe problem with the project is that I rarely had the motivation to do one. Once I was too busy, or out of ideas to write about, I stop. If there\u0026rsquo;s not anything to rant about, why is there a 20MinuteRant?\nIndeed that has been why the blog has been on hiatus for the past many months. I suppose we can consider this entry the last of the 20MinuteRants and the first one of a new series of writings\u0026mdash;Fireside\u0026mdash;which I hope will continue for a long time.\nImpetus My mentor D has always told me to start arguments with why. The larger system in which Fireside is located, my knowlegebase system, has such a page arguing why its a good idea.\nAnd here\u0026rsquo;s the why: starting university has made me surprisingly lost in terms of what I want to do. In that: there\u0026rsquo;s so much of the vicissitudes of daily life that I no longer have the same intellectual curiosity that I think I had during middle and high school. And, in doing this, I hope to get it back.\nThis illustrates the key goal of Fireside: a once+ per week posting, illustrating some new thing I\u0026rsquo;m aiming to learn for the week. It can even be in the classroom: but something I\u0026rsquo;m going above and beyond to try to understand. Each article will either plan something to learn, or summarize my learning in it.\nI remember seeing an article through HN at one point (its not this one, it was significantly less \u0026ldquo;ha ha! business\u0026rdquo; language, but this will do) that \u0026ldquo;You never really learn something until you write about it.\u0026rdquo; A dormmate of mine working on ray tracing also said something to the same effect. And, so, what the heck. Let\u0026rsquo;s try this out.\nA part of me wishes that this fulfills the \u0026ldquo;deliverable\u0026rdquo; in my head of \u0026ldquo;YOU AREN\u0026rsquo;T DOING ENOUGH!\u0026rdquo; whenever I spend time wandering aimlessly to try to learn something. If it has become a Fireside, it counts. I guess.\nI should also add that Fireside is named Fireside because of FDR\u0026rsquo;s Fireside Chats, where he got to directly talk to people unfiltered about his views.\nParameters Frequency At a minimum once a week. No promises though.\nNames You have already noticed one of the parameters. Unless it is a general concept or a well known thing I did, I won\u0026rsquo;t be using names throughout the articles. Somewhat contrarily, I firmly believe that the process of building shit is a very personal one. Hence, following the example of one of my favorite essayists Zhu Ziqing, I will be using the first letter of the name I refer to them to refer to all people mentioned. Of course, if you don\u0026rsquo;t want to be included, I\u0026rsquo;d be happy to pull things down.\nThemes technology in general random nerdism deep learning and language models, methods and applications the shit I get up to We Begin I hope to begin this weekend. I\u0026rsquo;ve spent the last while trying to train a serious deep learning model (read: OpenAI Whisper Large V2), and dying because all I have access to is 2 32GB V-100s on the PSC (and yes, I point out that this is terribly privileged statement: woe is me with a cutting edge GPU).\nHowever, the Language Model literally doesn\u0026rsquo;t fit in the damned box. So, I\u0026rsquo;m trying to learn about distributed training methods like Ray, methods of efficient tuning with LoRA, and new-fangled memory sharing things like DeepSpeed.\nStay tuned.\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhfireside/\"\u003eFireside\u003c/a\u003e a series of articles that I\u0026rsquo;m writing to consolidate my learning.\u003c/p\u003e\n\u003cp\u003eI have always dreamed of blogging. I have even tried once \u003ca href=\"https://medium.com/20minuterants\"\u003ecalled 20MinuteRants\u003c/a\u003e. They worked quite well as a basic format whereby I can write about things in a fairly efficient manner (hence the 20 minutes), and be able to reflect about the things I\u0026rsquo;m up to.\u003c/p\u003e\n\u003cp\u003eThe problem with the project is that I rarely had the motivation to do one. Once I was too busy, or out of ideas to write about, I stop. If there\u0026rsquo;s not anything to rant about, why is there a 20MinuteRant?\u003c/p\u003e\n\u003cp\u003eIndeed that has been why the blog has been on hiatus for the past many months. I suppose we can consider this entry the last of the 20MinuteRants and the first one of a new series of writings\u0026mdash;\u003ca href=\"/posts/kbhfireside/\"\u003eFireside\u003c/a\u003e\u0026mdash;which I hope will continue for a long time.\u003c/p\u003e\n\u003ch2 id=\"impetus\"\u003eImpetus\u003c/h2\u003e\n\u003cp\u003eMy mentor D has always told me to start arguments with why. The larger system in which \u003ca href=\"/posts/kbhfireside/\"\u003eFireside\u003c/a\u003e is located, my knowlegebase system, has \u003ca href=\"/posts/kbhstarting_with_why_the_knowledgebase/\"\u003esuch a page\u003c/a\u003e arguing why its a good idea.\u003c/p\u003e\n\u003cp\u003eAnd here\u0026rsquo;s the why: starting university has made me surprisingly lost in terms of what I want to do. In that: there\u0026rsquo;s so much of the vicissitudes of daily life that I no longer have the same intellectual curiosity that I think I had during middle and high school. And, in doing this, I hope to get it back.\u003c/p\u003e\n\u003cp\u003eThis illustrates the key goal of \u003ca href=\"/posts/kbhfireside/\"\u003eFireside\u003c/a\u003e: a once+ per week posting, illustrating some new thing I\u0026rsquo;m aiming to learn for the week. It can even be in the classroom: but something I\u0026rsquo;m going above and beyond to try to understand. Each article will either plan something to learn, or summarize my learning in it.\u003c/p\u003e\n\u003cp\u003eI remember seeing an article through HN at one point (\u003ca href=\"https://addyosmani.com/blog/write-learn/\"\u003eits not this one, it was significantly less \u0026ldquo;ha ha! business\u0026rdquo; language, but this will do\u003c/a\u003e) that \u0026ldquo;You never really learn something until you write about it.\u0026rdquo; A dormmate of mine working on ray tracing also said something to the same effect. And, so, what the heck. Let\u0026rsquo;s try this out.\u003c/p\u003e\n\u003cp\u003eA part of me wishes that this fulfills the \u0026ldquo;deliverable\u0026rdquo; in my head of \u0026ldquo;YOU AREN\u0026rsquo;T DOING ENOUGH!\u0026rdquo; whenever I spend time wandering aimlessly to try to learn something. If it has become a \u003ca href=\"/posts/kbhfireside/\"\u003eFireside\u003c/a\u003e, it counts. I guess.\u003c/p\u003e\n\u003cp\u003eI should also add that \u003ca href=\"/posts/kbhfireside/\"\u003eFireside\u003c/a\u003e is named \u003ca href=\"/posts/kbhfireside/\"\u003eFireside\u003c/a\u003e because of FDR\u0026rsquo;s \u003ca href=\"/posts/kbhfireside_chats/\"\u003eFireside Chats\u003c/a\u003e, where he got to directly talk to people unfiltered about his views.\u003c/p\u003e\n\u003ch2 id=\"parameters\"\u003eParameters\u003c/h2\u003e\n\u003ch3 id=\"frequency\"\u003eFrequency\u003c/h3\u003e\n\u003cp\u003eAt a minimum once a week. No promises though.\u003c/p\u003e\n\u003ch3 id=\"names\"\u003eNames\u003c/h3\u003e\n\u003cp\u003eYou have already noticed one of the parameters. Unless it is a general concept or a well known thing I did, I won\u0026rsquo;t be using names throughout the articles. Somewhat contrarily, I firmly believe that the process of building shit is a very personal one. Hence, following the example of one of my favorite essayists \u003ca href=\"https://en.wikipedia.org/wiki/Zhu_Ziqing\"\u003eZhu Ziqing\u003c/a\u003e, I will be using the first letter of the name I refer to them to refer to all people mentioned. Of course, if you don\u0026rsquo;t want to be included, I\u0026rsquo;d be happy to pull things down.\u003c/p\u003e\n\u003ch3 id=\"themes\"\u003eThemes\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003etechnology in general\u003c/li\u003e\n\u003cli\u003erandom nerdism\u003c/li\u003e\n\u003cli\u003edeep learning and language models, methods and applications\u003c/li\u003e\n\u003cli\u003ethe shit I get up to\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"we-begin\"\u003eWe Begin\u003c/h2\u003e\n\u003cp\u003eI hope to begin this weekend. I\u0026rsquo;ve spent the last while trying to train a serious deep learning model (read: OpenAI Whisper Large V2), and dying because all I have access to is 2 32GB V-100s on the PSC (and yes, I point out that this is terribly privileged statement: woe is me with a cutting edge GPU).\u003c/p\u003e\n\u003cp\u003eHowever, the \u003ca href=\"/posts/kbhnlp/#language-model\"\u003eLanguage Model\u003c/a\u003e literally doesn\u0026rsquo;t fit in the damned box. So, I\u0026rsquo;m trying to learn about distributed training methods like Ray, methods of efficient tuning with LoRA, and new-fangled memory sharing things like DeepSpeed.\u003c/p\u003e\n\u003cp\u003eStay tuned.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhfireside_article/","tags":["fireside"],"title":"Welcome to the Fireside"},{"categories":null,"contents":"Under Construction\n","html":"\u003cp\u003eUnder Construction\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhwho_s_talking_when/","tags":null,"title":"Who's Talking When?"},{"categories":null,"contents":"A study with the goal of identifying semantic primes.\n","html":"\u003cp\u003eA study with the goal of identifying \u003ca href=\"/posts/kbhsemantic_primes/\"\u003esemantic prime\u003c/a\u003es.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhwhole_metalanguage_study/","tags":null,"title":"whole metalanguage study"},{"categories":null,"contents":"Why are Todo Lists (a.k.a. personal productivity systems) so hard to build well?\nI\u0026rsquo;m genuinely curious. I was listening to the last episode of Cortex, and one of the hosts (CGP Grey) brought up a similar point regarding personal productivity platforms. OmniFocus, the reigning champion of the industry for professionals looking for a deeply customized system, has been staggering in their ability to ship the next version of their application. Much of the market consists of various different packagings of the same offering. Grey\u0026rsquo;s thesis of these platforms essentially boils down to this:\nTodo Lists are very personal systems that requires deep customizations Yet, they need to stay very out of the way (\u0026ldquo;fit into the workflow\u0026rdquo;) of their user Plus, a point that I have noticed which was brought up briefly:\nits very easy to build a crappy one, and very hard to build a good one I particularly like Grey\u0026rsquo;s phrasing of point 3.: \u0026ldquo;there\u0026rsquo;s high desire market saturation, but no practical market saturation in the offering.\u0026rdquo; That is, \u0026ldquo;everyone has a good idea of what a good to-do list software is, and no one has been able to write one that generally works very well for \u0026ldquo;most\u0026rdquo; people.\nLearnings and Next Steps I tried once. Condution was an early experiment between me and a few of my friends to try to solve some of the issues we saw in the to-do list market at the time. There was some response within the community: within about 6 months, we got about 1000+ MAU, 10,000+ registered users; but I think there\u0026rsquo;s an undeniable sense in me that, while Condution solved the specific problem we saw in the market, it still doesn\u0026rsquo;t solve the fundamental issue that to-do list platforms have:\nCondution, like all other platforms, isn\u0026rsquo;t for everyone. And that is a problem. Peter Thiel gave this famous talk in 183B which outlines the fact that much of the illusion of \u0026ldquo;no competition\u0026rdquo; comes from the small companies trying desperatly to dominate a fiercely competitive market; to win, one truly has to dominate a market. The person productivity space is one which the battle for hearts and minds have created an exciting explosion of choices for the consumer, but no one system has yet emerged to engulf it all.\nThe feedback that I tracked from building Condution boils down to a few asks:\n\u0026ldquo;can we have this repeating task/date/filter behavior?\u0026rdquo; (yes, but it has to be a ticket for each one of these and god knows when it can happen) \u0026ldquo;can you be a calendar app with scheduling?\u0026rdquo; (aaaaaaaaaa so much API work; plus, scheduling is hard) \u0026ldquo;why is [this native feature] not supported?\u0026rdquo; (because we run a PWA wrapped in a WebView on mobile) \u0026ldquo;self hosting when?\u0026rdquo; / \u0026ldquo;API when?\u0026rdquo; (oh, we tried. alas) I think this boils down to a few things:\nusers want myriad behavior that we don\u0026rsquo;t possibly have time to implement ourselves users want their ability to process their own data (i.e. API, calendar, etc.), at their own pacing, without going through our server And, to be honest, I don\u0026rsquo;t think anyone on the market (barring lots of elisp code + Org mode, which really fails at \u0026ldquo;users want an experience that\u0026rsquo;s actually not limited to Desktop Emacs nuts like me\u0026rdquo;) achieves these two objectives.\nWe\u0026rsquo;ve Seen This Before Here\u0026rsquo;s the thing. This is not the world\u0026rsquo;s first rodeo to this problem. Visual Studio Code, the text editing sensation which today holds something like 75% market share, was released in 2015. Let\u0026rsquo;s examine two random blogs I found in 2013 writing about text editors:\nthis one and this one I\u0026rsquo;m of the humble opinion that Text Editors circa 2013 has the same exact set of problems as to-do lists now. Technology was mature enough for everyone to build one; the space is crowded enough that its was worth writing a listicle literally titled (\u0026ldquo;10 most fascinating text editors\u0026rdquo;); and each of us developers had a persnickety opinion about what our Text Editor/IDE should do: from editing text, to running code, to making coffee.\nAnd then, a disruptor.\nthat has easy to implement plugins and a web-based core which allows any and all users to customize their experience down to the tat and which makes the core experience of editing text and setting up autocomplete (bare bones basics) so easy that its inconceivable you would use anything else Our friend Visual Studio Code.\nAnd so, a proposal I don\u0026rsquo;t know what such a disruptor of the field would look like, but I\u0026rsquo;d like to try again. I would like to take a crack at building a to-do list app (YET AGAIN!) that solely focuses on these two tenants:\nultimate and intuitive extensibility by the user absolutly smooth down to a tat introductory (\u0026ldquo;basic\u0026rdquo;) experience, which is minimally opinionated but can accommodate as many modalities as possible I hope to do this while not compromising the platforms the product is available on, and the nativeness of this experience.\nIt\u0026rsquo;s a lot to ask, and its yet unclear what a solution may look like. But it never hurts to take a crack. If you want to help out with this probably open-source effort, reach out.\nand also, defer/start dates. We should have those.\n","html":"\u003cp\u003eWhy are \u003ca href=\"/posts/kbhtodo_lists/\"\u003eTodo Lists\u003c/a\u003e (a.k.a. personal productivity systems) so hard to build well?\u003c/p\u003e\n\u003cp\u003eI\u0026rsquo;m genuinely curious. I was listening to the last episode of \u003ca href=\"https://www.relay.fm/cortex/\"\u003eCortex\u003c/a\u003e, and one of the hosts (CGP Grey) brought up a similar point regarding personal productivity platforms. OmniFocus, the reigning champion of the industry for professionals looking for a deeply customized system, has been staggering in their ability to ship the next version of their application. Much of the market consists of various different packagings of the same offering. Grey\u0026rsquo;s thesis of these platforms essentially boils down to this:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtodo_lists/\"\u003eTodo Lists\u003c/a\u003e are very personal systems that requires deep customizations\u003c/li\u003e\n\u003cli\u003eYet, they need to stay very out of the way (\u0026ldquo;fit into the workflow\u0026rdquo;) of their user\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ePlus, a point that I have noticed which was brought up briefly:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eits very easy to build a crappy one, and very hard to build a good one\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eI particularly like Grey\u0026rsquo;s phrasing of point 3.: \u0026ldquo;there\u0026rsquo;s high desire market saturation, but no practical market saturation in the offering.\u0026rdquo; That is, \u0026ldquo;everyone has a good idea of what a good to-do list software is, and no one has been able to write one that generally works very well for \u0026ldquo;most\u0026rdquo; people.\u003c/p\u003e\n\u003ch2 id=\"learnings-and-next-steps\"\u003eLearnings and Next Steps\u003c/h2\u003e\n\u003cp\u003eI tried once. \u003ca href=\"https://www.condution.com/\"\u003eCondution\u003c/a\u003e was an early experiment between me and a few of my friends to try to solve some of the issues we saw in the to-do list market at the time. There was some response within the community: within about 6 months, we got about 1000+ MAU, 10,000+ registered users; but I think there\u0026rsquo;s an undeniable sense in me that, while Condution solved the specific problem we saw in the market, it still doesn\u0026rsquo;t solve the fundamental issue that to-do list platforms have:\u003c/p\u003e\n\u003cp\u003eCondution, like all other platforms, isn\u0026rsquo;t for everyone. And that is a problem. Peter Thiel gave this \u003ca href=\"https://www.youtube.com/watch?v=3Fx5Q8xGU8k\"\u003efamous talk\u003c/a\u003e in 183B which outlines the fact that much of the illusion of \u0026ldquo;no competition\u0026rdquo; comes from the small companies trying desperatly to dominate a fiercely competitive market; to win, one truly has to dominate a market. The person productivity space is one which the battle for hearts and minds have created an exciting explosion of choices for the consumer, but no one system has yet emerged to engulf it all.\u003c/p\u003e\n\u003cp\u003eThe feedback that I tracked from building Condution boils down to a few asks:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003e\u0026ldquo;can we have this repeating task/date/filter behavior?\u0026rdquo; (yes, but it has to be a ticket for each one of these and god knows when it can happen)\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;can you be a calendar app with scheduling?\u0026rdquo; (aaaaaaaaaa so much API work; plus, scheduling is hard)\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;why is [this native feature] not supported?\u0026rdquo; (because we run a PWA wrapped in a WebView on mobile)\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;self hosting when?\u0026rdquo; / \u0026ldquo;API when?\u0026rdquo; (oh, we tried. alas)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eI think this boils down to a few things:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eusers want myriad behavior that we don\u0026rsquo;t possibly have time to implement ourselves\u003c/li\u003e\n\u003cli\u003eusers want their ability to process their own data (i.e. API, calendar, etc.), at their own pacing, without going through our server\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eAnd, to be honest, I don\u0026rsquo;t think anyone on the market (barring lots of elisp code + Org mode, which really fails at \u0026ldquo;users want an experience that\u0026rsquo;s actually not limited to Desktop Emacs nuts like me\u0026rdquo;) achieves these two objectives.\u003c/p\u003e\n\u003ch2 id=\"we-ve-seen-this-before\"\u003eWe\u0026rsquo;ve Seen This Before\u003c/h2\u003e\n\u003cp\u003eHere\u0026rsquo;s the thing. This is not the world\u0026rsquo;s first rodeo to this problem. Visual Studio Code, the text editing sensation which today holds something like 75% market share, was released in 2015. Let\u0026rsquo;s examine two random blogs I found in 2013 writing about text editors:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"https://web.archive.org/web/20230509053757/https://www.theregister.com/2013/03/11/verity_stob_text_editor/\"\u003ethis one\u003c/a\u003e\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"https://web.archive.org/web/20230608151954/https://www.bloggersentral.com/2013/09/awesome-text-editors-for-web-developers.html\"\u003eand this one\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003eI\u0026rsquo;m of the humble opinion that Text Editors circa 2013 has the same exact set of problems as to-do lists now. Technology was mature enough for everyone to build one; the space is crowded enough that its was worth writing a listicle literally titled (\u0026ldquo;10 most fascinating text editors\u0026rdquo;); and each of us developers had a persnickety opinion about what our Text Editor/IDE should do: from editing text, to running code, to making coffee.\u003c/p\u003e\n\u003cp\u003eAnd then, a disruptor.\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ethat has easy to implement plugins and a web-based core which allows any and all users to customize their experience down to the tat\u003c/li\u003e\n\u003cli\u003eand which makes the core experience of editing text and setting up autocomplete (bare bones basics) so easy that its inconceivable you would use anything else\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eOur friend Visual Studio Code.\u003c/p\u003e\n\u003ch2 id=\"and-so-a-proposal\"\u003eAnd so, a proposal\u003c/h2\u003e\n\u003cp\u003eI don\u0026rsquo;t know what such a disruptor of the field would look like, but I\u0026rsquo;d like to try again. I would like to take a crack at building a to-do list app (YET AGAIN!) that solely focuses on these two tenants:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003eultimate and intuitive extensibility by the user\u003c/li\u003e\n\u003cli\u003eabsolutly smooth down to a tat introductory (\u0026ldquo;basic\u0026rdquo;) experience, which is minimally opinionated but can accommodate as many modalities as possible\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eI hope to do this while not compromising the platforms the product is available on, and the nativeness of this experience.\u003c/p\u003e\n\u003cp\u003eIt\u0026rsquo;s a lot to ask, and its yet unclear what a solution may look like. But it never hurts to take a crack. If you want to help out with this probably open-source effort, reach out.\u003c/p\u003e\n\u003chr\u003e\n\u003cp\u003eand also, defer/start dates. We should have those.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhtodo_lists/","tags":["writing","fireside"],"title":"Why is building a to-do list app so darn hard?"},{"categories":null,"contents":"linked files architecture for filesystem, but it caches the file links in memory when the OS is running.\nproblems data is still scattered across the disk we had to construct the file allocation table though its must faster because jumping to the middle of the file is now in memory, we are still doing O(n) search for a specific sub part ","html":"\u003cp\u003e\u003ca href=\"/posts/kbhlinked_files/\"\u003elinked files\u003c/a\u003e architecture for \u003ca href=\"/posts/kbhfilesystem/\"\u003efilesystem\u003c/a\u003e, but it caches the file links in memory when the OS is running.\u003c/p\u003e\n\u003ch2 id=\"problems\"\u003eproblems\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003edata is \u003cstrong\u003estill\u003c/strong\u003e scattered across the disk\u003c/li\u003e\n\u003cli\u003ewe had to construct the file allocation table\u003c/li\u003e\n\u003cli\u003ethough its must faster because jumping to the middle of the file is now in memory, we are still doing O(n) search for a specific sub part\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhwindows_fat/","tags":null,"title":"Windows FAT"},{"categories":null,"contents":"Pay attention to:\ncases (all letters to lower case?) lemmatization This is often done with morphological parsing, for instance, you can try stemming.\n","html":"\u003cp\u003ePay attention to:\u003c/p\u003e\n\u003col\u003e\n\u003cli\u003ecases (all letters to lower case?)\u003c/li\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhtokenization/\"\u003elemma\u003c/a\u003etization\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003eThis is often done with \u003ca href=\"/posts/kbhmorphological_parsing/\"\u003emorphological parsing\u003c/a\u003e, for instance, you can try \u003ca href=\"/posts/kbhmorphological_parsing/#stemming\"\u003estemming\u003c/a\u003e.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhword_normalization/","tags":null,"title":"Word Normalization"},{"categories":null,"contents":"we will train a classifier on a binary prediction task: \u0026ldquo;is context words \\(c_{1:L}\\) likely to show up near some target word \\(W_0\\)?\u0026rdquo;\nWe estimate the probability that \\(w_{0}\\) occurs within this window based on the product of the probabilities of the similarity of the embeddings between each context word and the target word.\nTo turn cosine similarity dot products into probability, we squish the dot product via the sigmoid function.\nimportantly, we don\u0026rsquo;t actually use these results. we simply take the resulting embeddings.\nproperties window size smaller windows: captures more syntax level information large windows: capture more semantic field information parallelogram model simple way to solve analogies problems with vector semantics: get the difference between two word vectors, and add it somewhere else to get an analogous transformation.\nonly words for frequent words small distances but not quite for large systems allocational harm embeddings bake in existing biases, which leads to bias in hiring practices, etc.\nskip-gram with negative sampling skip-gram trains vectors separately for word being used as target and word being used as context.\nthe mechanism for training the embedding:\nselect some \\(k\\), which is the multiplier of the negative examples (if \\(k=2\\), ever one positive example will be matched with 2 negative examples) sample a target word, and generate positive samples paired by words in its immediate window sample window size times \\(k\\) negative examples, where the noise words are chosen explicitly as not being near our target word, and weighted based on unigram frequency for each paired training sample, we minimize the loss via cross entropy loss:\n\\begin{equation} L_{CE} = -\\qty[ \\log (\\sigma(c_{pos} \\cdot w)) + \\sum_{i=1}^{k} \\log \\sigma\\qty(-c_{neg} \\cdot w)] \\end{equation}\nrecall that:\n\\begin{equation} \\pdv{L_{CE}}{w} = \\qty[\\sigma(c_{pos} \\cdot w) -1]c_{pos} + \\sum_{i=1}^{k} \\qty[\\sigma(c_{neg_{i}}\\cdot w)]c_{neg_{i}} \\end{equation}\n","html":"\u003cp\u003ewe will train a classifier on a binary prediction task: \u0026ldquo;is context words \\(c_{1:L}\\) likely to show up near some target word \\(W_0\\)?\u0026rdquo;\u003c/p\u003e\n\u003cp\u003eWe estimate the probability that \\(w_{0}\\) occurs within this window based on the product of the probabilities of the similarity of the embeddings between each context word and the target word.\u003c/p\u003e\n\u003cp\u003eTo turn \u003ca href=\"/posts/kbhranked_information_retrieval/#cosine-similarity\"\u003ecosine similarity\u003c/a\u003e \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003es into probability, we squish the \u003ca href=\"/posts/kbhdot_product/\"\u003edot product\u003c/a\u003e via the \u003ca href=\"/posts/kbhsigmoid/\"\u003esigmoid\u003c/a\u003e function.\u003c/p\u003e\n\u003cp\u003eimportantly, we don\u0026rsquo;t actually use these results. we simply take the resulting embeddings.\u003c/p\u003e\n\u003ch2 id=\"properties\"\u003eproperties\u003c/h2\u003e\n\u003ch3 id=\"window-size\"\u003ewindow size\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003e\u003cstrong\u003esmaller windows\u003c/strong\u003e: captures more syntax level information\u003c/li\u003e\n\u003cli\u003e\u003cstrong\u003elarge windows\u003c/strong\u003e: capture more semantic field information\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"parallelogram-model\"\u003eparallelogram model\u003c/h3\u003e\n\u003cp\u003esimple way to solve analogies problems with vector semantics: get the difference between two word vectors, and add it somewhere else to get an analogous transformation.\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eonly words for frequent words\u003c/li\u003e\n\u003cli\u003esmall distances\u003c/li\u003e\n\u003cli\u003ebut not quite for large systems\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch4 id=\"allocational-harm\"\u003eallocational harm\u003c/h4\u003e\n\u003cp\u003eembeddings bake in existing biases, which leads to bias in hiring practices, etc.\u003c/p\u003e\n\u003ch2 id=\"skip-gram-with-negative-sampling\"\u003eskip-gram with negative sampling\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"#skip-gram-with-negative-sampling\"\u003eskip-gram\u003c/a\u003e trains vectors separately for word being used as target and word being used as context.\u003c/p\u003e\n\u003cp\u003ethe mechanism for training the embedding:\u003c/p\u003e\n\u003cul\u003e\n\u003cli\u003eselect some \\(k\\), which is the multiplier of the negative examples (if \\(k=2\\), ever one positive example will be matched with 2 negative examples)\u003c/li\u003e\n\u003cli\u003esample a target word, and generate positive samples paired by words in its immediate window\u003c/li\u003e\n\u003cli\u003esample window size times \\(k\\) negative examples, where the noise words are chosen explicitly as not being near our target word, and weighted based on unigram frequency\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003efor each paired training sample, we minimize the loss via \u003ca href=\"/posts/kbhcross_entropy_loss/\"\u003ecross entropy loss\u003c/a\u003e:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nL_{CE} = -\\qty[ \\log (\\sigma(c_{pos} \\cdot w)) + \\sum_{i=1}^{k} \\log \\sigma\\qty(-c_{neg} \\cdot w)]\n\\end{equation}\u003c/p\u003e\n\u003cp\u003erecall that:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n\\pdv{L_{CE}}{w} = \\qty[\\sigma(c_{pos} \\cdot w) -1]c_{pos} + \\sum_{i=1}^{k} \\qty[\\sigma(c_{neg_{i}}\\cdot w)]c_{neg_{i}}\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhword2vec/","tags":null,"title":"word2vec"},{"categories":null,"contents":"WPA is the largest relief program ever in the Great Depression New Deal, to promote public infrastructure and create artistic murals. It helped unskilled men to carry out public works infrastructure.\nThe project started 5/1935 and dissolved 6/1943.\n","html":"\u003cp\u003eWPA is the largest relief program ever in the \u003ca href=\"/posts/kbhgreat_depression/\"\u003eGreat Depression\u003c/a\u003e \u003ca href=\"/posts/kbhnew_deal/\"\u003eNew Deal\u003c/a\u003e, to promote public infrastructure and create artistic murals. It helped unskilled men to carry out public works infrastructure.\u003c/p\u003e\n\u003cp\u003eThe project started 5/1935 and dissolved 6/1943.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhwpa/","tags":null,"title":"Works Progress Administration"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhwriting_index/","tags":null,"title":"Writing Index"},{"categories":null,"contents":" vertibre backbone: 3 points to remember \u0026ldquo;we are in the business of looking for outliers\u0026rdquo; tarpit ides vision with world + good team iStudio Meeting Notes\n","html":"\u003cul\u003e\n\u003cli\u003evertibre backbone: 3 points to remember\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;we are in the business of looking for outliers\u0026rdquo;\u003c/li\u003e\n\u003cli\u003etarpit ides\u003c/li\u003e\n\u003cli\u003evision with world + good team\u003c/li\u003e\n\u003c/ul\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhistudio_meeting_notes/\"\u003eiStudio Meeting Notes\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhycomb/","tags":null,"title":"ycomb"},{"categories":null,"contents":"Young\u0026rsquo;s Modulus is a mechanical property that measures the stiffness of a solid material.\nIt measures the ratio between mechanical stress \\(\\sigma\\) and the relative resulting strain \\(\\epsilon\\).\nAnd so, very simply:\n\\begin{equation} E = \\frac{\\sigma }{\\epsilon } \\end{equation}\nThinking about this, silly puddy deforms very easily given a little stress, so it would have low Young\u0026rsquo;s Modulus (\\(\\sigma \\ll \\epsilon\\)); and visa versa. https://aapt.scitation.org/doi/10.1119/1.17116?cookieSet=1\n","html":"\u003cp\u003e\u003ca href=\"/posts/kbhyoung_s_modulus/\"\u003eYoung\u0026rsquo;s Modulus\u003c/a\u003e is a mechanical property that measures the stiffness of a solid material.\u003c/p\u003e\n\u003cp\u003e\u003cimg src=\"/ox-hugo/2022-09-05_22-27-31_screenshot.png\" alt=\"\"\u003e\nIt measures the ratio between mechanical \u003ca href=\"/posts/kbhstress/\"\u003estress\u003c/a\u003e \\(\\sigma\\) and the relative resulting \u003ca href=\"/posts/kbhstrain/\"\u003estrain\u003c/a\u003e \\(\\epsilon\\).\u003c/p\u003e\n\u003cp\u003eAnd so, very simply:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nE = \\frac{\\sigma }{\\epsilon }\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eThinking about this, silly puddy deforms very easily given a little stress, so it would have \u003cem\u003elow\u003c/em\u003e \u003ca href=\"/posts/kbhyoung_s_modulus/\"\u003eYoung\u0026rsquo;s Modulus\u003c/a\u003e (\\(\\sigma \\ll \\epsilon\\)); and visa versa.\n\u003ca href=\"https://aapt.scitation.org/doi/10.1119/1.17116?cookieSet=1\"\u003ehttps://aapt.scitation.org/doi/10.1119/1.17116?cookieSet=1\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhyoung_s_modulus/","tags":null,"title":"Young's Modulus"},{"categories":null,"contents":"DOI: 10.3389/fcomp.2020.624488\nOne-Liner Used an ERNIE trained on transcripts for classification; inclusion of pause encoding made results better.\nNovelty Instead of just looking at actual speech content, look at pauses specific as a feature engineering task \\(89.6\\%\\) on the ADReSS Challenge dataset Notable Methods Applied FA with pause encoding with standard .cha semantics (short pauses, medium pauses, long pauses). Shoved all of this into an ERNIE.\nAssay for performance was LOO\nKey Figs Fig 1 This figure motivates the point that subjects with AD says oh and um more often; which prompted Table 1\nTable 1 Subjects with AD says uh a lot more often; no significance level calculations but ok.\nFigure 5 This figure is the result of a LOO study on the proposed model and presumably others before. X axis is the validation accuracy in question, Y is the density by which the score in X appears in an \\(N=35\\) LOO measurement.\nThis figure tells us that either way the ERNIE model is better than state of the art; furthermore, transcripts with pause encoding did better and did it better more of the time; that\u0026rsquo;s where the 89.6% came from.\nNew Concepts Leave-One-Out cross validation Notes Glorious.\n","html":"\u003cp\u003eDOI: 10.3389/fcomp.2020.624488\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003eUsed an ERNIE trained on transcripts for classification; inclusion of pause encoding made results better.\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eInstead of just looking at actual speech content, look at pauses specific as a feature engineering task\u003c/li\u003e\n\u003cli\u003e\\(89.6\\%\\) on the \u003ca href=\"/posts/kbhadress_challenge/\"\u003eADReSS Challenge\u003c/a\u003e dataset\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_20-45-47_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eApplied FA with pause encoding with standard \u003ccode\u003e.cha\u003c/code\u003e semantics (short pauses, medium pauses, long pauses). Shoved all of this into an ERNIE.\u003c/p\u003e\n\u003cp\u003eAssay for performance was \u003ca href=\"/posts/kbhloo/\"\u003eLOO\u003c/a\u003e\u003c/p\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"fig-1\"\u003eFig 1\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_20-43-43_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure motivates the point that subjects with AD says oh and um more often; which prompted Table 1\u003c/p\u003e\n\u003ch3 id=\"table-1\"\u003eTable 1\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_20-44-31_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eSubjects with AD says uh a lot more often; no \u003ca href=\"/posts/kbhhypothesis_testing/#significance-level\"\u003esignificance level\u003c/a\u003e calculations but ok.\u003c/p\u003e\n\u003ch3 id=\"figure-5\"\u003eFigure 5\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_20-55-10_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure is the result of a \u003ca href=\"/posts/kbhloo/\"\u003eLOO\u003c/a\u003e study on the proposed model and presumably others before. X axis is the validation accuracy in question, Y is the density by which the score in X appears in an \\(N=35\\) \u003ca href=\"/posts/kbhloo/\"\u003eLOO\u003c/a\u003e measurement.\u003c/p\u003e\n\u003cp\u003eThis figure tells us that either way the ERNIE model is better than state of the art; furthermore, transcripts with pause encoding did better and did it better more of the time; that\u0026rsquo;s where the 89.6% came from.\u003c/p\u003e\n\u003ch2 id=\"new-concepts\"\u003eNew Concepts\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003e\u003ca href=\"/posts/kbhloo/\"\u003eLeave-One-Out cross validation\u003c/a\u003e\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notes\"\u003eNotes\u003c/h2\u003e\n\u003cp\u003eGlorious.\u003c/p\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-24_20-41-41_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n","permalink":"https://www.jemoka.com/posts/kbhyuan_2021/","tags":["ntj"],"title":"Yuan 2021"},{"categories":null,"contents":"A z-test is a hypothesis test for statistical significance between two sample proportions. Before it can be conducted, it must meet the conditions for inference for a z-test.\nconditions for inference (z-test) has to be random has to be reasonably normal (vis a vi test for normality) each sample has to be independent (or 10% rule) use a z-statistic to find p-value Given a sample proportion, calculate the sample proportion standard deviation (given on the formula sheet) Then, divide the difference between measured and null proportions to figure \\(z\\) that is,\n\\begin{equation} z = \\frac{\\hat{p}-p_0}{\\sqrt{\\frac{p_0(1-p_0)}{n}}} \\end{equation}\nLook up the probability of \\(z\\) taking place on a \\(z\\) table. Then, \\(1-z\\) would yield the \\(p\\) vaule.\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhz_test/\"\u003ez-test\u003c/a\u003e is a \u003ca href=\"/posts/kbhhypothesis_testing/\"\u003ehypothesis test\u003c/a\u003e for statistical significance between two sample proportions. Before it can be conducted, it must meet the \u003ca href=\"#conditions-for-inference--z-test\"\u003econditions for inference\u003c/a\u003e for a z-test.\u003c/p\u003e\n\u003ch2 id=\"conditions-for-inference--z-test\"\u003econditions for inference (z-test)\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ehas to be random\u003c/li\u003e\n\u003cli\u003ehas to be reasonably normal (vis a vi \u003ca href=\"/posts/kbhtest_for_normality/\"\u003etest for normality\u003c/a\u003e)\u003c/li\u003e\n\u003cli\u003eeach sample has to be independent (or 10% rule)\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"use-a-z-statistic-to-find-p-value\"\u003euse a z-statistic to find p-value\u003c/h2\u003e\n\u003col\u003e\n\u003cli\u003eGiven a sample proportion, calculate the sample proportion standard deviation (given on the formula sheet)\u003c/li\u003e\n\u003cli\u003eThen, divide the difference between measured and null proportions to figure \\(z\\)\u003c/li\u003e\n\u003c/ol\u003e\n\u003cp\u003ethat is,\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nz = \\frac{\\hat{p}-p_0}{\\sqrt{\\frac{p_0(1-p_0)}{n}}}\n\\end{equation}\u003c/p\u003e\n\u003cp\u003eLook up the probability of \\(z\\) taking place on a \\(z\\) table. Then, \\(1-z\\) would yield the \\(p\\) vaule.\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhz_test/","tags":null,"title":"z-test"},{"categories":null,"contents":"\\(0\\) is a list of length \\(n\\) whose coordinates are all zero\nFormally\u0026mdash;\n\\begin{equation} 0 = (0,\\ldots,0) \\end{equation}\n","html":"\u003cp\u003e\\(0\\) is a \u003ca href=\"/posts/kbhlist/\"\u003elist\u003c/a\u003e of length \\(n\\) whose coordinates are all zero\u003c/p\u003e\n\u003cp\u003eFormally\u0026mdash;\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\n0 = (0,\\ldots,0)\n\\end{equation}\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhzero/","tags":null,"title":"zero"},{"categories":null,"contents":"A Zero-Sum Game happens during the following situation:\nWe have two distributions \\(X\\) and \\(Y\\). A \u0026ldquo;Zero-Sum Game\u0026rdquo; is a case where:\n\\begin{equation} P(success) = P(Y \u0026gt; X) \\end{equation}\n(because \\(Y\u0026gt;X \\implies X\u0026lt; Y\\), so there\u0026rsquo;s no case whereby a situation can both \u0026ldquo;cause success\u0026rdquo;).\n","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhzero_sum_game/\"\u003eZero-Sum Game\u003c/a\u003e happens during the following situation:\u003c/p\u003e\n\u003cp\u003eWe have two distributions \\(X\\) and \\(Y\\). A \u0026ldquo;\u003ca href=\"/posts/kbhzero_sum_game/\"\u003eZero-Sum Game\u003c/a\u003e\u0026rdquo; is a case where:\u003c/p\u003e\n\u003cp\u003e\\begin{equation}\nP(success) = P(Y \u0026gt; X)\n\\end{equation}\u003c/p\u003e\n\u003cp\u003e(because \\(Y\u0026gt;X \\implies X\u0026lt; Y\\), so there\u0026rsquo;s no case whereby a situation can both \u0026ldquo;cause success\u0026rdquo;).\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhzero_sum_game/","tags":null,"title":"Zero-Sum Game"},{"categories":null,"contents":"A zettlekasten is an atomic notetaking system.\nSteps:\nLit notes brief: \u0026lt; 3 sentences write it in your own words Reference notes Take reference notes? Fleeting Notes shower notes Permanent nodes Go through each notes from above, think about how it matters to your research Try to explicitly add value to existing ideas Try to find meaningful connection between ideas finding connections How does this fit into what I know? Can this be explained? find keywords not just to store a note, but how to retrieve it \u0026ldquo;in which circumstance will I need this note\u0026rdquo; \u0026ldquo;when and how will I need this idea\u0026rdquo; ","html":"\u003cp\u003eA \u003ca href=\"/posts/kbhzettlekasten/\"\u003ezettlekasten\u003c/a\u003e is an atomic notetaking system.\u003c/p\u003e\n\u003cp\u003eSteps:\u003c/p\u003e\n\u003ch2 id=\"lit-notes\"\u003eLit notes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003ebrief: \u0026lt; 3 sentences\u003c/li\u003e\n\u003cli\u003ewrite it in your own words\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"reference-notes\"\u003eReference notes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eTake reference notes?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"fleeting-notes\"\u003eFleeting Notes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eshower notes\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"permanent-nodes\"\u003ePermanent nodes\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eGo through each notes from above, think about how it matters to your research\u003c/li\u003e\n\u003cli\u003eTry to explicitly add value to existing ideas\u003c/li\u003e\n\u003cli\u003eTry to find meaningful connection between ideas\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"finding-connections\"\u003efinding connections\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003eHow does this fit into what I know?\u003c/li\u003e\n\u003cli\u003eCan this be explained?\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch3 id=\"find-keywords\"\u003efind keywords\u003c/h3\u003e\n\u003cul\u003e\n\u003cli\u003enot just to store a note, but how to retrieve it\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;in which circumstance will I need this note\u0026rdquo;\u003c/li\u003e\n\u003cli\u003e\u0026ldquo;when and how will I need this idea\u0026rdquo;\u003c/li\u003e\n\u003c/ul\u003e\n","permalink":"https://www.jemoka.com/posts/kbhzettlekasten/","tags":null,"title":"zettlekasten"},{"categories":null,"contents":"a zettlekasten index is an index in a zettlekasten file format; it keeps track of all lists of notes. Head to Index Index for an index of indexes in this particular zettlekasten.\n","html":"\u003cp\u003ea \u003ca href=\"/posts/kbhzettlekasten_index/\"\u003ezettlekasten index\u003c/a\u003e is an index in a \u003ca href=\"/posts/kbhzettlekasten/\"\u003ezettlekasten\u003c/a\u003e file format; it keeps track of all lists of notes. Head to \u003ca href=\"/posts/kbhindex_index/\"\u003eIndex Index\u003c/a\u003e for an \u003ca href=\"/posts/kbhzettlekasten_index/\"\u003eindex\u003c/a\u003e of \u003ca href=\"/posts/kbhzettlekasten_index/\"\u003eindex\u003c/a\u003ees in this particular \u003ca href=\"/posts/kbhzettlekasten/\"\u003ezettlekasten.\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhzettlekasten_index/","tags":null,"title":"zettlekasten index"},{"categories":null,"contents":"DOI: 10.3389/fcomp.2021.624683\nOne-Liner late fusion of multimodal signal on the CTP task using transformers, mobilnet, yamnet, and mockingjay\nNovelty Similar to Martinc 2021 and Shah 2021 but actually used the the current Neural-Network state of the art Used late fusion again after the base model training Proposed that inconsistency in the diagnoses of MMSE scores could be a great contributing factor to multi-task learning performance hindrance Notable Methods Proposed base model for transfer learning from text based on MobileNet (image), YAMNet (audio), Mockingjay (speech) and BERT (text) Data all sourced from recording/transcribing/recognizing CTP task Key Figs Figure 3 and 4 This figure tells us the late fusion architecture used\nTable 2 Pre-training with an existing dataset had (not statistically quantified) improvement against a randomly seeded model.\nTable 3 Concat/Add fusion methods between audio and text provided even better results; confirms Martinc 2021 on newer data\n","html":"\u003cp\u003eDOI: 10.3389/fcomp.2021.624683\u003c/p\u003e\n\u003ch2 id=\"one-liner\"\u003eOne-Liner\u003c/h2\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e of multimodal signal on the \u003ca href=\"/posts/kbhctp/\"\u003eCTP\u003c/a\u003e task using transformers, mobilnet, yamnet, and mockingjay\u003c/p\u003e\n\u003ch2 id=\"novelty\"\u003eNovelty\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eSimilar to \u003ca href=\"/posts/kbhmartinc_2021/\"\u003eMartinc 2021\u003c/a\u003e and \u003ca href=\"/posts/kbhshah_2021/\"\u003eShah 2021\u003c/a\u003e but actually used the the current Neural-Network state of the art\u003c/li\u003e\n\u003cli\u003eUsed \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e again after the base model training\u003c/li\u003e\n\u003cli\u003eProposed that inconsistency in the diagnoses of \u003ca href=\"/posts/kbhmmse/\"\u003eMMSE\u003c/a\u003e scores could be a great contributing factor to multi-task learning performance hindrance\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"notable-methods\"\u003eNotable Methods\u003c/h2\u003e\n\u003cul\u003e\n\u003cli\u003eProposed base model for transfer learning from text based on MobileNet (image), YAMNet (audio), Mockingjay (speech) and BERT (text)\u003c/li\u003e\n\u003cli\u003eData all sourced from recording/transcribing/recognizing \u003ca href=\"/posts/kbhctp/\"\u003eCTP\u003c/a\u003e task\u003c/li\u003e\n\u003c/ul\u003e\n\u003ch2 id=\"key-figs\"\u003eKey Figs\u003c/h2\u003e\n\u003ch3 id=\"figure-3-and-4\"\u003eFigure 3 and 4\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_10-54-21_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eThis figure tells us the \u003ca href=\"/posts/kbhfusion/#late-fusion\"\u003elate fusion\u003c/a\u003e architecture used\u003c/p\u003e\n\u003ch3 id=\"table-2\"\u003eTable 2\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_10-55-53_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003ePre-training with an existing dataset had (not statistically quantified) improvement against a randomly seeded model.\u003c/p\u003e\n\u003ch3 id=\"table-3\"\u003eTable 3\u003c/h3\u003e\n\u003cfigure\u003e\u003cimg src=\"/ox-hugo/2022-06-25_10-56-22_screenshot.png\"\u003e\n\u003c/figure\u003e\n\n\u003cp\u003eConcat/Add fusion methods between audio and text provided even better results; confirms \u003ca href=\"/posts/kbhmartinc_2021/\"\u003eMartinc 2021\u003c/a\u003e on newer data\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhzhu_2021/","tags":["ntj"],"title":"Zhu 2021"},{"categories":null,"contents":"Zinc is toxic in excess; so to manage Zinc is important.\nregulating zinc uptake\n","html":"\u003cp\u003eZinc is toxic in excess; so to manage Zinc is important.\u003c/p\u003e\n\u003cp\u003e\u003ca href=\"/posts/kbhregulating_zinc_uptake/\"\u003eregulating zinc uptake\u003c/a\u003e\u003c/p\u003e\n","permalink":"https://www.jemoka.com/posts/kbhzinc_abc_transporters/","tags":null,"title":"Zinc ABC Transporters"},{"categories":null,"contents":"","html":"","permalink":"https://www.jemoka.com/posts/kbhgaussian_mixture_model/","tags":null,"title":"zzzzzz"}] \ No newline at end of file diff --git a/posts/kbhangelman_syndrome/index.html b/posts/kbhangelman_syndrome/index.html index 5bb8b2f4d..b7ce85a0b 100644 --- a/posts/kbhangelman_syndrome/index.html +++ b/posts/kbhangelman_syndrome/index.html @@ -3,4 +3,4 @@ cause of Angelman Syndrome Angelman Syndrome is primarily caused by the UBE3A and the ubiquitin proteasome system. Poly-ubiquitin chain asks to discard cells.">
Houjun Liu
-

Angelman Syndrome

\ No newline at end of file +

Angelman Syndrome

\ No newline at end of file diff --git a/posts/kbhargmax/index.html b/posts/kbhargmax/index.html index 047cf04ac..ce4a4d4d7 100644 --- a/posts/kbhargmax/index.html +++ b/posts/kbhargmax/index.html @@ -6,6 +6,6 @@ additional information argmax of log see argmax of log">
Houjun Liu
-

argmax

function that returns the input that maximizes the expression.

finding argmax

direct optimization

Typical maximization system. Take derivative, set it to 0, solve, plug in, solve. THis is pretty bad during times are not differentiable.

gradient ascent

We take steps following the direction

\begin{equation} +

argmax

function that returns the input that maximizes the expression.

finding argmax

direct optimization

Typical maximization system. Take derivative, set it to 0, solve, plug in, solve. THis is pretty bad during times are not differentiable.

gradient ascent

We take steps following the direction

\begin{equation} \theta_{1j} = \theta_{0j} + \eta \pdv{LL(\theta_{0})}{\theta_{0j}} \end{equation}

additional information

argmax of log

see argmax of log

\ No newline at end of file diff --git a/posts/kbhautism/index.html b/posts/kbhautism/index.html index 6e94ef485..3d96962dd 100644 --- a/posts/kbhautism/index.html +++ b/posts/kbhautism/index.html @@ -4,4 +4,4 @@ autism gene signature The gene signature of autism can be measured in clean and quantitative assays.">
Houjun Liu
-

autism

autism is a spectrum disorder that are caused by both environmental and genetic factors.

Key Question: how can different chromatin regulators lead to the same set of symptoms named “autism”.

autism gene signature

The gene signature of autism can be measured in clean and quantitative assays.

\ No newline at end of file +

autism

autism is a spectrum disorder that are caused by both environmental and genetic factors.

Key Question: how can different chromatin regulators lead to the same set of symptoms named “autism”.

autism gene signature

The gene signature of autism can be measured in clean and quantitative assays.

\ No newline at end of file diff --git a/posts/kbhbasis/index.html b/posts/kbhbasis/index.html index 551d3c545..b564b1f1e 100644 --- a/posts/kbhbasis/index.html +++ b/posts/kbhbasis/index.html @@ -4,7 +4,7 @@ \begin{equation} v = a_1v_1+ \dots + a_{n}v_{n} \end{equation}">
Houjun Liu
-

basis

A basis is a list of vectors in \(V\) that spans \(V\) and is linearly independent

constituents

requirements

additional information

criteria for basis

A list \(v_1, \dots v_{n}\) of vectors in \(V\) is a basis of \(V\) IFF every \(v \in V\) can be written uniquely as:

\begin{equation} +

basis

A basis is a list of vectors in \(V\) that spans \(V\) and is linearly independent

constituents

requirements

additional information

criteria for basis

A list \(v_1, \dots v_{n}\) of vectors in \(V\) is a basis of \(V\) IFF every \(v \in V\) can be written uniquely as:

\begin{equation} v = a_1v_1+ \dots + a_{n}v_{n} \end{equation}

where \(a_1, \dots, a_{n} \in \mathbb{F}\).

forward direction

Suppose we have \(v_1, \dots, v_{n}\) as the basis in \(V\). We desire that \(v_1, \dots v_{n}\) uniquely constructs each \(v \in V\).

By definition, they span \(V\) and are linear independent in \(V\).

Because of the spanning quality, there exists at least one set of \(a_1, \dots, a_{n} \in \mathbb{F}\) such that we can write:

\begin{equation} v \in V = a_1v_1+ \dots + a_{n}v_{n} diff --git a/posts/kbhbernoulli_random_variable/index.html b/posts/kbhbernoulli_random_variable/index.html index 5183f55ed..4e170db5f 100644 --- a/posts/kbhbernoulli_random_variable/index.html +++ b/posts/kbhbernoulli_random_variable/index.html @@ -8,7 +8,7 @@ Which emulates the behavior of your function at \(0\) and \(1\) and we kinda don’t care any other place.">

Houjun Liu
-

Bernoulli distribution

Consider a case where there’s only a single binary outcome:

  • “success”, with probability \(p\)
  • “failure”, with probability \(1-p\)

constituents

\begin{equation} +

Bernoulli distribution

Consider a case where there’s only a single binary outcome:

  • “success”, with probability \(p\)
  • “failure”, with probability \(1-p\)

constituents

\begin{equation} X \sim Bern(p) \end{equation}

requirements

the probability mass function:

\begin{equation} P(X=k) = diff --git a/posts/kbhbetazero/index.html b/posts/kbhbetazero/index.html index cbf326c64..7d89b0a8b 100644 --- a/posts/kbhbetazero/index.html +++ b/posts/kbhbetazero/index.html @@ -7,6 +7,6 @@ Ground truth policy Action Selection Uses Double Progressive Widening">

Houjun Liu
-

BetaZero

Background

recall AlphaZero

  1. Selection (UCB 1, or DTW, etc.)
  2. Expansion (generate possible belief notes)
  3. Simulation (if its a brand new node, Rollout, etc.)
  4. Backpropegation (backpropegate your values up)

Key Idea

Remove the need for heuristics for MCTS—removing inductive bias

Approach

We keep the ol’ neural network:

\begin{equation} +

BetaZero

Background

recall AlphaZero

  1. Selection (UCB 1, or DTW, etc.)
  2. Expansion (generate possible belief notes)
  3. Simulation (if its a brand new node, Rollout, etc.)
  4. Backpropegation (backpropegate your values up)

Key Idea

Remove the need for heuristics for MCTS—removing inductive bias

Approach

We keep the ol’ neural network:

\begin{equation} f_{\theta}(b_{t}) = (p_{t}, v_{t}) \end{equation}

Policy Evaluation

Do \(n\) episodes of MCTS, then use cross entropy to improve \(f\)

Ground truth policy

Action Selection

Uses Double Progressive Widening

Importantly, no need to use a heuristic (or worst yet random Rollouts) for action selection.

Difference vs. LetsDrive

\ No newline at end of file diff --git a/posts/kbhbioinformatics/index.html b/posts/kbhbioinformatics/index.html index bbe928a23..f5f1bd5ad 100644 --- a/posts/kbhbioinformatics/index.html +++ b/posts/kbhbioinformatics/index.html @@ -4,4 +4,4 @@ possible use for bioinformatics Find the start/stop codons of known gene, and determine the gene and protein length ">
Houjun Liu
-

bioinformatics

\ No newline at end of file +

bioinformatics

\ No newline at end of file diff --git a/posts/kbhcantilever_beams/index.html b/posts/kbhcantilever_beams/index.html index 648803197..3dc88490c 100644 --- a/posts/kbhcantilever_beams/index.html +++ b/posts/kbhcantilever_beams/index.html @@ -7,7 +7,7 @@ is the change in deflection over location. “How much deviation of the beam from the resting axi is there as you run along it?">
Houjun Liu
-

Cantilever Beams

A Cantilever beam is a rigid structure which is extended horizontally and supported on one end.


Working with Cantilever Beams

curvature

Let’s first define a function:

\begin{equation} +

Cantilever Beams

A Cantilever beam is a rigid structure which is extended horizontally and supported on one end.


Working with Cantilever Beams

curvature

Let’s first define a function:

\begin{equation} w(x) \end{equation}

this represents the deflection of the beam at point \(x\). We will begin by taking its derivative by location:

\begin{equation} \Delta w = \pdv{w}{x} diff --git a/posts/kbhchatbot/index.html b/posts/kbhchatbot/index.html index f10bbbe45..d428e9fd3 100644 --- a/posts/kbhchatbot/index.html +++ b/posts/kbhchatbot/index.html @@ -6,4 +6,4 @@ LLM Chatbots Training Corpus C4: colossal clean crawled corpus">

Houjun Liu
-

Chatbot

Two main Dialogue Systems architectures:

  • frame based systems: talk to users + accomplish specific tasks
  • LLM: reasoning as agents

Dialogue Systems vs Chatbot

Previously, when we say Chatbot we mean task-based systems

humans and chat

humans tend to think of Dialogue Systems as human-like even if they know its not. this makes users more prone to share private information and worry less about its disclosure.

ELIZA

see ELIZA

LLM Chatbots

Training Corpus

C4: colossal clean crawled corpus

patent, wikipedia, news

Chatbots

  • EmphaticDialogues
  • SaFeRDialogues
  • Pseudo-conversations: reddit, twitter, weibo

Fine-Tuning

  • quality: improving sensible and interesting responses
  • safety: prevention of suggesting harmful actions

IFT: perhaps you can add positive data as fine tuning as a part of instruction-finetuning step.

Filtering: build a filter for whether something is safe/unsafe, etc.

Retrieval Augmented Generation

  1. call search engine
  2. get back a retrieved passages
  3. shove them into prompt
  4. “based on this tasks, answer:”

we can make Chatbots use RAG by adding “pseudo-participants” to make the chat bots, which the system should add.

Evaluation

  • task based systems: measure task performance
  • chatbot: enjoyability by humans

we evaluate chatbots by asking a human to assign a score, and observer is a third party that assigns a score via a transcript of a conversation.

participants scoring

interact with 6 turns, then score:

  • avoiding repetition
  • interestingness
  • sensemaking
  • fluency
  • listening
  • inquisitiveness
  • humanness
  • engagingness

ACUTE-EVAL: choosing who you would like to speak to

adversarial evaluation

train a human/robot classifier, use it, use the inverse of its score at the metric of the chat bot

task evaluatino

measure overall task success, or measure slot error rate

design system design

Don’t build Frankenstein: safety (ensure people aren’t crashing cars), limiting representation harm (don’t demean social groups), privacy

study users and task

what are their values? how do they interact?

build simulations

wizard of oz study: observe user interaction with a HUMAN pretending to be a chat bot

test the design

test on users

info leakage

  • accidentally leaking information (microphone, etc.)
  • intentionally leaking information due to advertising, etc.
\ No newline at end of file +

Chatbot

Two main Dialogue Systems architectures:

  • frame based systems: talk to users + accomplish specific tasks
  • LLM: reasoning as agents

Dialogue Systems vs Chatbot

Previously, when we say Chatbot we mean task-based systems

humans and chat

humans tend to think of Dialogue Systems as human-like even if they know its not. this makes users more prone to share private information and worry less about its disclosure.

ELIZA

see ELIZA

LLM Chatbots

Training Corpus

C4: colossal clean crawled corpus

patent, wikipedia, news

Chatbots

  • EmphaticDialogues
  • SaFeRDialogues
  • Pseudo-conversations: reddit, twitter, weibo

Fine-Tuning

  • quality: improving sensible and interesting responses
  • safety: prevention of suggesting harmful actions

IFT: perhaps you can add positive data as fine tuning as a part of instruction-finetuning step.

Filtering: build a filter for whether something is safe/unsafe, etc.

Retrieval Augmented Generation

  1. call search engine
  2. get back a retrieved passages
  3. shove them into prompt
  4. “based on this tasks, answer:”

we can make Chatbots use RAG by adding “pseudo-participants” to make the chat bots, which the system should add.

Evaluation

  • task based systems: measure task performance
  • chatbot: enjoyability by humans

we evaluate chatbots by asking a human to assign a score, and observer is a third party that assigns a score via a transcript of a conversation.

participants scoring

interact with 6 turns, then score:

  • avoiding repetition
  • interestingness
  • sensemaking
  • fluency
  • listening
  • inquisitiveness
  • humanness
  • engagingness

ACUTE-EVAL: choosing who you would like to speak to

adversarial evaluation

train a human/robot classifier, use it, use the inverse of its score at the metric of the chat bot

task evaluatino

measure overall task success, or measure slot error rate

design system design

Don’t build Frankenstein: safety (ensure people aren’t crashing cars), limiting representation harm (don’t demean social groups), privacy

study users and task

what are their values? how do they interact?

build simulations

wizard of oz study: observe user interaction with a HUMAN pretending to be a chat bot

test the design

test on users

info leakage

  • accidentally leaking information (microphone, etc.)
  • intentionally leaking information due to advertising, etc.
\ No newline at end of file diff --git a/posts/kbhcivil_rights/index.html b/posts/kbhcivil_rights/index.html index 466e151c6..a44ca3859 100644 --- a/posts/kbhcivil_rights/index.html +++ b/posts/kbhcivil_rights/index.html @@ -4,4 +4,4 @@ educational integration in the civil rights movement K-12 disintegration: Brown v. Board of Education University of Georgia was the first disintegrated university in the south service integration in the civil rights movement Lunch counter boycotts.">
Houjun Liu
-

civil rights movement

civil rights movement starting

civil rights moment was kicked off by the Rosa Parks incident, which caused the Montomery Bus Boycott.

Martin Luther King capitalized the incident to kick start civil rights movement. He employed the method of nonviolence movement.

educational integration in the civil rights movement

service integration in the civil rights movement

Lunch counter boycotts. Nashville became the first desegregated lunch counter.

SNICK

SNICK is a student organization founded by Ella Baker in the civil rights movement that sent students into the most dangerous areas of segregation and leading protests.

Motown Records

Motown Records is an African-American owned Detroit record business

Malcom X

A civil rights movement activist, calling for more violent forms of protest and prosecuting specific white actions. Malcom X and Martin Luther King contradicted each other in methods of active persecution vs. nonviolent integration.

Bloody Sunday

Bloody Sunday was a voting rights march from Selma to Montgomery. Peaceful protesters were attacked with nightsticks and tear gas. The event was widely televised: transforming the movement as a televised morality play.

Nonviolence helps getting the clergy leaders as a form of leveraging religion in a show of unity.

Black Power Movement

A new chapter in the civil rights movement which incorporated less of the elements of integration but instead in wanted more sense of self-determination. nonviolence movement, which the Black Power Movement overrided, had ran its course when Martin Luther King was assassinated.

\ No newline at end of file +

civil rights movement

civil rights movement starting

civil rights moment was kicked off by the Rosa Parks incident, which caused the Montomery Bus Boycott.

Martin Luther King capitalized the incident to kick start civil rights movement. He employed the method of nonviolence movement.

educational integration in the civil rights movement

service integration in the civil rights movement

Lunch counter boycotts. Nashville became the first desegregated lunch counter.

SNICK

SNICK is a student organization founded by Ella Baker in the civil rights movement that sent students into the most dangerous areas of segregation and leading protests.

Motown Records

Motown Records is an African-American owned Detroit record business

Malcom X

A civil rights movement activist, calling for more violent forms of protest and prosecuting specific white actions. Malcom X and Martin Luther King contradicted each other in methods of active persecution vs. nonviolent integration.

Bloody Sunday

Bloody Sunday was a voting rights march from Selma to Montgomery. Peaceful protesters were attacked with nightsticks and tear gas. The event was widely televised: transforming the movement as a televised morality play.

Nonviolence helps getting the clergy leaders as a form of leveraging religion in a show of unity.

Black Power Movement

A new chapter in the civil rights movement which incorporated less of the elements of integration but instead in wanted more sense of self-determination. nonviolence movement, which the Black Power Movement overrided, had ran its course when Martin Luther King was assassinated.

\ No newline at end of file diff --git a/posts/kbhcold_war_in_vietnam/index.html b/posts/kbhcold_war_in_vietnam/index.html index c436282cf..113807127 100644 --- a/posts/kbhcold_war_in_vietnam/index.html +++ b/posts/kbhcold_war_in_vietnam/index.html @@ -4,4 +4,4 @@ 1959-1960: VCs initiated a group of ambushes which the exiled government led by Ngô Đình Diệm can no longer fend off 1961: Kennedy takes office, institutes a plan to put American advisors at all levels of Vietnam leadership 1963: Buddest monks rebelled, Ngô family rule became bizarre and leveraged their Roman Catholic views to persecute Buddhists in the region 1963: Ngô Đình Diệm is assassinated after the support of the US (Kennedy) via Cable 243 seeking a regime change 1963: Kennedy assisinated 1964: Vietnam situation worsens 1965: American government fully went in, and Ky was eased out of power when Neuyen Van Thieu ad full control 1965: US fighting was effective though unpresistent; viet cong just went in after US leaves 1967: Protests in the US lead to a growing anti-war sentiment, which the VietCong picked up on 1968: the Tet Offensive, a VietCong operation, tried to pillage South Vietnam.">
Houjun Liu
-

cold war in vietnam

A fact sheet on the progress of the cold war in Vietnam.

progression of US escalation in the war, an overview

Reading: encyclopedia Britannica

  • 1959-1960: VCs initiated a group of ambushes which the exiled government led by Ngô Đình Diệm can no longer fend off
  • 1961: Kennedy takes office, institutes a plan to put American advisors at all levels of Vietnam leadership
  • 1963: Buddest monks rebelled, Ngô family rule became bizarre and leveraged their Roman Catholic views to persecute Buddhists in the region
  • 1963: Ngô Đình Diệm is assassinated after the support of the US (Kennedy) via Cable 243 seeking a regime change
  • 1963: Kennedy assisinated
  • 1964: Vietnam situation worsens
  • 1965: American government fully went in, and Ky was eased out of power when Neuyen Van Thieu ad full control
  • 1965: US fighting was effective though unpresistent; viet cong just went in after US leaves
  • 1967: Protests in the US lead to a growing anti-war sentiment, which the VietCong picked up on
  • 1968: the Tet Offensive, a VietCong operation, tried to pillage South Vietnam. Though it failed, strong anti-war sentiments were stirred.
  • 1969: Anti-War protests pick up force
  • 1970: Ohio National Guard opens fire on unarmed protesters
  • 1973: Peace Pact Signed after the US giving up, essentially
  • 1975: Saigon falls, US evacuates

anti-war protest motivation in Vietnam

Reading: Protest against the War in Vietnam

The first protests rose in the 1950 and picked up force by the late 1960s when LBJ decided not to seek re-election.

Foreign policy is usually hard to change, but the strength of domestic dissent in Vietnam represents an usual shift which drove foreign policy changes.

  • Right-wing sentiment: seeing the war as a means of future-proofing the American government from Communistic influences.
  • Left-wing protest
    • More organized than the spontaneous of the right-wing protest
    • Split between moralistic + legalistic interests vs. national interest

domestic political influence of the Vietnam War

Reading: The War that Killed Trust, Karl Marlantes, 2017

  • “Of course presidents lie”—that the Vietnam War represented the shift away from genuine truthfulness as a part of American politics
  • Killed 58,000 service-members, and made Americans cynical and distrustful of governmental institutions

Systemic Cynicism

Johnson’s “credibility gap”: that the president maybe lying. Nowadays this is commonplace, but back then it was quite unusual.

CLAIM: engendered Cynicism threatened inaction.

Racial Integration

The cold war promised higher degrees of racial integration because of collective service.

Repeated Touring

That, post-draft, the American working class became much more likely to serve “voluntarily” by being recruited. Unlike the draft, which is some ways is universal service, the volunteer system is much more reliant upon th emiddle class.

social impacts of the Vietnam War

Reading: The Social Impact of War, Modell and Haggerty, 1991

  • Wars’ effects can be treated with a lens of social manifestation
  • The Vietnam war had an impact on the last 20 years of primary war literature

draft

The draft is the principle mechanism by which people into the war. The system facilitating the draft in the United States, the Selective Service System, is a good case study for such a system in the Vietnam War.

By its design, the draft is supposed to be an equitable process (baring gender and age.) However, the Vietnam War reveals that the military services was not straightforwardly distributed: often drafting children of lower socioeconomic status.

experience of servicemen in Vietnam

Soldiers in the Vietnam War have shown some negative psychological side effects. Solders are shown to be “working through” the ideas to process, creating a larger effects.

effects on the economy

War veterans generally had higher incomes than non-vets, mostly because they have more income per level of educational attanment.

historiographical school of Vietnam War

Reading: James McLeroy, Small Wars Journal, (Army Special Forces Officer in I Corps, Vietnam, in 1968)

Orthodox treatment

Vietnam War as an extension/afterthought of late-20th century cold war history

  • Vietnam War escalated only because of United States involvement
  • “anti-war” is not opposition against communistic conquest but opposition against war in itself

Revisionist treatment

Vietnam War as a calculable implementation of escalator revolutionary strategy modeled after Mao.

  • Vietnam War is not an insurgency or a civil war, but instead a part of the three-step guerrilla warfare
  • Provocation of the United States is a part of the strategy—to force them to move out of Vietnam and to encourage the communist bloc to provide more support
\ No newline at end of file +

cold war in vietnam

A fact sheet on the progress of the cold war in Vietnam.

progression of US escalation in the war, an overview

Reading: encyclopedia Britannica

  • 1959-1960: VCs initiated a group of ambushes which the exiled government led by Ngô Đình Diệm can no longer fend off
  • 1961: Kennedy takes office, institutes a plan to put American advisors at all levels of Vietnam leadership
  • 1963: Buddest monks rebelled, Ngô family rule became bizarre and leveraged their Roman Catholic views to persecute Buddhists in the region
  • 1963: Ngô Đình Diệm is assassinated after the support of the US (Kennedy) via Cable 243 seeking a regime change
  • 1963: Kennedy assisinated
  • 1964: Vietnam situation worsens
  • 1965: American government fully went in, and Ky was eased out of power when Neuyen Van Thieu ad full control
  • 1965: US fighting was effective though unpresistent; viet cong just went in after US leaves
  • 1967: Protests in the US lead to a growing anti-war sentiment, which the VietCong picked up on
  • 1968: the Tet Offensive, a VietCong operation, tried to pillage South Vietnam. Though it failed, strong anti-war sentiments were stirred.
  • 1969: Anti-War protests pick up force
  • 1970: Ohio National Guard opens fire on unarmed protesters
  • 1973: Peace Pact Signed after the US giving up, essentially
  • 1975: Saigon falls, US evacuates

anti-war protest motivation in Vietnam

Reading: Protest against the War in Vietnam

The first protests rose in the 1950 and picked up force by the late 1960s when LBJ decided not to seek re-election.

Foreign policy is usually hard to change, but the strength of domestic dissent in Vietnam represents an usual shift which drove foreign policy changes.

  • Right-wing sentiment: seeing the war as a means of future-proofing the American government from Communistic influences.
  • Left-wing protest
    • More organized than the spontaneous of the right-wing protest
    • Split between moralistic + legalistic interests vs. national interest

domestic political influence of the Vietnam War

Reading: The War that Killed Trust, Karl Marlantes, 2017

  • “Of course presidents lie”—that the Vietnam War represented the shift away from genuine truthfulness as a part of American politics
  • Killed 58,000 service-members, and made Americans cynical and distrustful of governmental institutions

Systemic Cynicism

Johnson’s “credibility gap”: that the president maybe lying. Nowadays this is commonplace, but back then it was quite unusual.

CLAIM: engendered Cynicism threatened inaction.

Racial Integration

The cold war promised higher degrees of racial integration because of collective service.

Repeated Touring

That, post-draft, the American working class became much more likely to serve “voluntarily” by being recruited. Unlike the draft, which is some ways is universal service, the volunteer system is much more reliant upon th emiddle class.

social impacts of the Vietnam War

Reading: The Social Impact of War, Modell and Haggerty, 1991

  • Wars’ effects can be treated with a lens of social manifestation
  • The Vietnam war had an impact on the last 20 years of primary war literature

draft

The draft is the principle mechanism by which people into the war. The system facilitating the draft in the United States, the Selective Service System, is a good case study for such a system in the Vietnam War.

By its design, the draft is supposed to be an equitable process (baring gender and age.) However, the Vietnam War reveals that the military services was not straightforwardly distributed: often drafting children of lower socioeconomic status.

experience of servicemen in Vietnam

Soldiers in the Vietnam War have shown some negative psychological side effects. Solders are shown to be “working through” the ideas to process, creating a larger effects.

effects on the economy

War veterans generally had higher incomes than non-vets, mostly because they have more income per level of educational attanment.

historiographical school of Vietnam War

Reading: James McLeroy, Small Wars Journal, (Army Special Forces Officer in I Corps, Vietnam, in 1968)

Orthodox treatment

Vietnam War as an extension/afterthought of late-20th century cold war history

  • Vietnam War escalated only because of United States involvement
  • “anti-war” is not opposition against communistic conquest but opposition against war in itself

Revisionist treatment

Vietnam War as a calculable implementation of escalator revolutionary strategy modeled after Mao.

  • Vietnam War is not an insurgency or a civil war, but instead a part of the three-step guerrilla warfare
  • Provocation of the United States is a part of the strategy—to force them to move out of Vietnam and to encourage the communist bloc to provide more support
\ No newline at end of file diff --git a/posts/kbhcomplex_exponential/index.html b/posts/kbhcomplex_exponential/index.html index 451150fcc..42476f70b 100644 --- a/posts/kbhcomplex_exponential/index.html +++ b/posts/kbhcomplex_exponential/index.html @@ -8,7 +8,7 @@ inner product over complex-valued functions recall all of the inner product properties. Now, for functions periodic over \([0,L]\) (recall we have double this if the function is period over \([-L, L]\):">
Houjun Liu
-

Complex Exponential

Recall that Euler’s Equation exists:

\begin{equation} +

Complex Exponential

Recall that Euler’s Equation exists:

\begin{equation} f(x) = e^{i k \omega x} = \cos (k\omega x) + i \sin(k\omega x) \end{equation}

and, for \(\omega = \frac{2\pi}{L}\), this is still \(L\) periodic!

Next up, we make an important note:

\begin{equation} e^{ik\omega x}, e^{-i k \omega x} diff --git a/posts/kbhcomplex_number/index.html b/posts/kbhcomplex_number/index.html index 643381ef1..e299e0c7f 100644 --- a/posts/kbhcomplex_number/index.html +++ b/posts/kbhcomplex_number/index.html @@ -7,7 +7,7 @@ properties of complex arithmetic there are 6. For all statements below, we assume \(\alpha = a+bi\) and \(\beta=c+di\), \(\lambda = e+fi\), where \(a,b,c,d,e,f \in \mathbb{R}\) and therefore \(\alpha, \beta,\lambda \in \mathbb{C}\).">

Houjun Liu
-

complex number

A complex number is a type of number. They are usually written as \(a+bi\).

Formally—

\begin{equation} +

complex number

A complex number is a type of number. They are usually written as \(a+bi\).

Formally—

\begin{equation} \mathbb{C} = \left\{a+bi\ \middle |\ a,b \in \mathbb{R} \right\} \end{equation}

This set generates solutions to every single polynomial with unique solutions. Its plane looks like \(\mathbb{R}^{2}\).

constituents

an order pair of two elements \((a,b)\) where \(a,b\in \mathbb{R}\).

properties of complex arithmetic

there are 6. For all statements below, we assume \(\alpha = a+bi\) and \(\beta=c+di\), \(\lambda = e+fi\), where \(a,b,c,d,e,f \in \mathbb{R}\) and therefore \(\alpha, \beta,\lambda \in \mathbb{C}\).

commutativity

\(\alpha + \beta = \beta + \alpha\) and \(\alpha\beta = \beta\alpha\) for all \(\alpha,\beta \in \mathbb{C}\).

Proof of complex number commutativity

We desire \(\alpha + \beta = \beta + \alpha\).

\begin{align} \alpha + \beta &= (a+bi)+(c+di) \\ diff --git a/posts/kbhconditional_plan/index.html b/posts/kbhconditional_plan/index.html index 80a0263a2..8c279c389 100644 --- a/posts/kbhconditional_plan/index.html +++ b/posts/kbhconditional_plan/index.html @@ -4,7 +4,7 @@ actions: feed, ignore reward: if hungry, negative reward state: two states: is the baby hungry or not observation: noisy crying (she maybe crying because she’s genuinely hungry or crying just for kicks) formulate a conditional plan we can create a conditional plan by generating a exponential tree based on the observations.">

Houjun Liu
-

conditional plan

conditional plan is a POMDP representation technique. We can represent a conditional plan as a tree.

toy problem

crying baby POMDP problem:

  • actions: feed, ignore
  • reward: if hungry, negative reward
  • state: two states: is the baby hungry or not
  • observation: noisy crying (she maybe crying because she’s genuinely hungry or crying just for kicks)

formulate a conditional plan

we can create a conditional plan by generating a exponential tree based on the observations. This is a policy which tells you what you should do given the sequence of observations you get, with no knowledge of the underlying state.

We call this plan \(\pi\) (shock suprise). We define two notations:

  • \(\pi()\): the ACTION at the head of this tree (in this case, “ignore”)
  • \(\pi(o)\): the SUBTREE which is one-level below the first action. For instance, for both observations of the tree above, \(\pi(o)()\) is ignore for both \(o\).

conditional plan evaluation

Assume we have a starting at some given true state \(s\). We can evaluate a conditional plan at that state by formulating:

\begin{equation} +

conditional plan

conditional plan is a POMDP representation technique. We can represent a conditional plan as a tree.

toy problem

crying baby POMDP problem:

  • actions: feed, ignore
  • reward: if hungry, negative reward
  • state: two states: is the baby hungry or not
  • observation: noisy crying (she maybe crying because she’s genuinely hungry or crying just for kicks)

formulate a conditional plan

we can create a conditional plan by generating a exponential tree based on the observations. This is a policy which tells you what you should do given the sequence of observations you get, with no knowledge of the underlying state.

We call this plan \(\pi\) (shock suprise). We define two notations:

  • \(\pi()\): the ACTION at the head of this tree (in this case, “ignore”)
  • \(\pi(o)\): the SUBTREE which is one-level below the first action. For instance, for both observations of the tree above, \(\pi(o)()\) is ignore for both \(o\).

conditional plan evaluation

Assume we have a starting at some given true state \(s\). We can evaluate a conditional plan at that state by formulating:

\begin{equation} U^{\pi} (s) = R(s, \pi()) + \gamma \qty[\sum_{s’} T(s’|s, \pi()) \sum_{o} O(o|\pi(), s’) U^{\pi(o)}(s’)] \end{equation}

where, \(\pi()\) is the action at the root node of the tree; and \(\pi(o)\) is the subtree for subplan at observation \(o\); essentially, at each point where we evaluate \(U\), we move the root node forward and recalculate. If we run out of depth, the utility is \(0\) and hence the whole right term is \(0\).

Of course this assumes we know what our initial state is. Which is lame. So now:

\begin{equation} U^{\pi}(b) = \sum_{s}^{} b(s) U^{\pi}(s) diff --git a/posts/kbhcross_product/index.html b/posts/kbhcross_product/index.html index a6232b89c..4eaabb14c 100644 --- a/posts/kbhcross_product/index.html +++ b/posts/kbhcross_product/index.html @@ -5,6 +5,6 @@ The length of the resulting vector in the cross product is the area of the parallelogram formed by the two vectors.">

Houjun Liu
-

cross product

constituents

additional information

lack of inverse of cross product

The cross product doesn’t have an inverse

geometric interpretation of cross product

\begin{equation} +

cross product

constituents

additional information

lack of inverse of cross product

The cross product doesn’t have an inverse

geometric interpretation of cross product

\begin{equation} a \times b = |\vec{a}| |\vec{b}| \sin \theta n \end{equation}

where, \(n\) is the unit vector in some direction.

The length of the resulting vector in the cross product is the area of the parallelogram formed by the two vectors.

\ No newline at end of file diff --git a/posts/kbhdecision_making/index.html b/posts/kbhdecision_making/index.html index 9ad063879..4ab1823f2 100644 --- a/posts/kbhdecision_making/index.html +++ b/posts/kbhdecision_making/index.html @@ -4,4 +4,4 @@ Applications Stock shelving Automated driving Space missions Sports Congestion modeling Online dating Traffic light control decision making methods explicit programming: “just code it up” — try this first if you are building something, which should establish a baseline: guess all possible states, and hard code strategies for all of them supervised learning: manually solve representative states, hard code strategies for them, make model interpolate between them optimization: create optimization objective connected to a model of the environment, optimize that objective planning: using model of the environment directly to predict best moves reinforcement learning: make agent interact with environment directly, and optimize its score of success in the environment without a model Method Model Visible?">
Houjun Liu
-

decision making

Key components

  • Task/Objective (“Automated Driving to reach destination [here]”)
  • Resources (state) (“sensors, fuel, etc.”)
  • Uncertainties (“What in the world is happening”)
  • Actions (“turn left”)

In one line: an agent makes decisions via the balance of observation with uncertainty. This is called the observe-act cycle.

See also connectionism

Applications

  • Stock shelving
  • Automated driving
  • Space missions
  • Sports
  • Congestion modeling
  • Online dating
  • Traffic light control

decision making methods

  • explicit programming: “just code it up” — try this first if you are building something, which should establish a baseline: guess all possible states, and hard code strategies for all of them
  • supervised learning: manually solve representative states, hard code strategies for them, make model interpolate between them
  • optimization: create optimization objective connected to a model of the environment, optimize that objective
  • planning: using model of the environment directly to predict best moves
  • reinforcement learning: make agent interact with environment directly, and optimize its score of success in the environment without a model
MethodModel Visible?Strategy Hard-Coded?
explicit programmingyes, all states fully knownyes
supervised learningno, only a sample of ityes, only a sample of it
optimizationno, except rewardno
planningyesno
reinforcement learning

history

see decision making history

\ No newline at end of file +

decision making

Key components

  • Task/Objective (“Automated Driving to reach destination [here]”)
  • Resources (state) (“sensors, fuel, etc.”)
  • Uncertainties (“What in the world is happening”)
  • Actions (“turn left”)

In one line: an agent makes decisions via the balance of observation with uncertainty. This is called the observe-act cycle.

See also connectionism

Applications

  • Stock shelving
  • Automated driving
  • Space missions
  • Sports
  • Congestion modeling
  • Online dating
  • Traffic light control

decision making methods

  • explicit programming: “just code it up” — try this first if you are building something, which should establish a baseline: guess all possible states, and hard code strategies for all of them
  • supervised learning: manually solve representative states, hard code strategies for them, make model interpolate between them
  • optimization: create optimization objective connected to a model of the environment, optimize that objective
  • planning: using model of the environment directly to predict best moves
  • reinforcement learning: make agent interact with environment directly, and optimize its score of success in the environment without a model
MethodModel Visible?Strategy Hard-Coded?
explicit programmingyes, all states fully knownyes
supervised learningno, only a sample of ityes, only a sample of it
optimizationno, except rewardno
planningyesno
reinforcement learning

history

see decision making history

\ No newline at end of file diff --git a/posts/kbhdecision_making_index/index.html b/posts/kbhdecision_making_index/index.html index 8de2b1bed..1e0ce49d9 100644 --- a/posts/kbhdecision_making_index/index.html +++ b/posts/kbhdecision_making_index/index.html @@ -4,4 +4,4 @@

Decision Making Index

# -index

Lecture notes taking during CS238, decision making. Stanford Intelligence Systems Laboratory (SISL: planning and validation of intelligent systems).

Big Ideas

Themes

  1. There’s a principled mathematical framework for defining rational behavior
  2. There are computational techniques that could lead to better, and perhaps counter-intuitive decisions
  3. Successful application depends on your choice of representation and approximation
    • you typically can’t solve mathematical models exactly
    • so, we have to rely on good models of approximations
  4. The same computational approaches can be applied to different application domains
    • the same set of abstractions can be carried through life
    • send Mykel a note about how these topics about where this stuff is applied

These algorithms drive high quality decisions on a tight timeline. You can’t fuck up: people die.

Contents

  • Fundamental understanding of mathematical models and solution methods—ungraded book exercises
    • Three quizzes: one question per chapter
      1. chapters 2, 3, 5
  • Implement and extend key algorithms for learning and decision making
  • Identify an application of the theory of this course and formulate it mathematically (proposal)
    • what are the i/o
    • what are the sensors measurements
    • what are the decisions to be made
  • [one other thing]

Course Outline

1-shot: Probabilistic Reasoning

  • models of distributions over many variables
  • using distributions to make inferences
  • utility theory

n-shot: Sequential Problems

Model Uncertainty

  • deal with situations where we don’t know what the best action is at any given step
  • i.e.: future rewards, etc.
  • introduce reinforcement learning and its challenges
    1. Rewards may be received long after important decisions
    2. Agents must generalized from limited exploration experience

State Uncertainty

  • deal with situations where we don’t know what is actually happening: we only have a probabilistic state
  • introduce Partially Observable Markov Decision Process
    1. keep a distribution of believes
    2. update the distribution of believes
    3. make decisions based the distribution

Multiagent Systems

Lectures

probabilistic reasoning relating to single decisions

Baysian Networks, and how to deal with them.

a chain of reasoning with feedback

Markov Decision Process uses policies that are evaluated with policy evaluation via utility, Bellman Equation, value function, etc.

If we know the state space fully, we can use policy iteration and value iteration to determine an objectively optimal policy. If we don’t (or if the state space is too large), we can try to discretize our state space and appropriate through Approximate Value Functions, or use online planning approaches to compute good policy as we go.

If none of those things are feasible (i.e. your state space is too big or complex to be discretized (i.e. sampling will cause you to loose the structure of the problem)), you can do some lovely Policy Optimization which will keep you in continuous space while iterating on the policy directly. Some nerds lmao like Policy Gradient methods if your policy is differentiable.

Now, Policy Optimization methods all require sampling a certain set of trajectories and optimizing over them in order to work. How do we know how much sampling to do before we start optimizing? That’s an Exploration and Exploitation question. We can try really hard to collect trajectories, but then we’d loose out on collecting intermediate reward.

POMDP bomp bomp bomp

Failures?

  • Change the action space
  • Change the reward function
  • Change the transition function
  • Improve the solver
  • Don’t worry about it
  • Don’t deploy the system

Words of Wisdom from Mykel

“The belief update is central to learning. The point of education is to change your beliefs; look for opportunities to change your belief.”

“What’s in the action space, how do we maximize it?”

From MDPs, “we can learn from the past, but the past doesn’t influence you.”

“Optimism under uncertainty”: Exploration and Exploitation “you should try things”

Worksheets

\ No newline at end of file +index

Lecture notes taking during CS238, decision making. Stanford Intelligence Systems Laboratory (SISL: planning and validation of intelligent systems).

Big Ideas

Themes

  1. There’s a principled mathematical framework for defining rational behavior
  2. There are computational techniques that could lead to better, and perhaps counter-intuitive decisions
  3. Successful application depends on your choice of representation and approximation
    • you typically can’t solve mathematical models exactly
    • so, we have to rely on good models of approximations
  4. The same computational approaches can be applied to different application domains
    • the same set of abstractions can be carried through life
    • send Mykel a note about how these topics about where this stuff is applied

These algorithms drive high quality decisions on a tight timeline. You can’t fuck up: people die.

Contents

  • Fundamental understanding of mathematical models and solution methods—ungraded book exercises
    • Three quizzes: one question per chapter
      1. chapters 2, 3, 5
  • Implement and extend key algorithms for learning and decision making
  • Identify an application of the theory of this course and formulate it mathematically (proposal)
    • what are the i/o
    • what are the sensors measurements
    • what are the decisions to be made
  • [one other thing]

Course Outline

1-shot: Probabilistic Reasoning

  • models of distributions over many variables
  • using distributions to make inferences
  • utility theory

n-shot: Sequential Problems

Model Uncertainty

  • deal with situations where we don’t know what the best action is at any given step
  • i.e.: future rewards, etc.
  • introduce reinforcement learning and its challenges
    1. Rewards may be received long after important decisions
    2. Agents must generalized from limited exploration experience

State Uncertainty

  • deal with situations where we don’t know what is actually happening: we only have a probabilistic state
  • introduce Partially Observable Markov Decision Process
    1. keep a distribution of believes
    2. update the distribution of believes
    3. make decisions based the distribution

Multiagent Systems

Lectures

probabilistic reasoning relating to single decisions

Baysian Networks, and how to deal with them.

a chain of reasoning with feedback

Markov Decision Process uses policies that are evaluated with policy evaluation via utility, Bellman Equation, value function, etc.

If we know the state space fully, we can use policy iteration and value iteration to determine an objectively optimal policy. If we don’t (or if the state space is too large), we can try to discretize our state space and appropriate through Approximate Value Functions, or use online planning approaches to compute good policy as we go.

If none of those things are feasible (i.e. your state space is too big or complex to be discretized (i.e. sampling will cause you to loose the structure of the problem)), you can do some lovely Policy Optimization which will keep you in continuous space while iterating on the policy directly. Some nerds lmao like Policy Gradient methods if your policy is differentiable.

Now, Policy Optimization methods all require sampling a certain set of trajectories and optimizing over them in order to work. How do we know how much sampling to do before we start optimizing? That’s an Exploration and Exploitation question. We can try really hard to collect trajectories, but then we’d loose out on collecting intermediate reward.

POMDP bomp bomp bomp

Failures?

  • Change the action space
  • Change the reward function
  • Change the transition function
  • Improve the solver
  • Don’t worry about it
  • Don’t deploy the system

Words of Wisdom from Mykel

“The belief update is central to learning. The point of education is to change your beliefs; look for opportunities to change your belief.”

“What’s in the action space, how do we maximize it?”

From MDPs, “we can learn from the past, but the past doesn’t influence you.”

“Optimism under uncertainty”: Exploration and Exploitation “you should try things”

Worksheets

\ No newline at end of file diff --git a/posts/kbhdot_product/index.html b/posts/kbhdot_product/index.html index 45f38dbed..df7348536 100644 --- a/posts/kbhdot_product/index.html +++ b/posts/kbhdot_product/index.html @@ -3,7 +3,7 @@ constituents \(x, y \in \mathbb{R}^{n}\) (NOTE the realness) where, \(x = (x_1, \dots, x_{n})\) and \(y = (y_1, …, y_{n})\) requirements As we are familiar with, element-wise product and sum">
Houjun Liu
-

dot product

The dot product is a property of real vector spaces which is a simplified version of an inner product; specifically, it obviates the need to complex-conjugate anything because, well, \(\bar{n} = n, n \in \mathbb{R}\). The dot-product also yield a real number.

constituents

  • \(x, y \in \mathbb{R}^{n}\) (NOTE the realness)
    • where, \(x = (x_1, \dots, x_{n})\) and \(y = (y_1, …, y_{n})\)

requirements

As we are familiar with, element-wise product and sum

\begin{equation} +

dot product

The dot product is a property of real vector spaces which is a simplified version of an inner product; specifically, it obviates the need to complex-conjugate anything because, well, \(\bar{n} = n, n \in \mathbb{R}\). The dot-product also yield a real number.

constituents

  • \(x, y \in \mathbb{R}^{n}\) (NOTE the realness)
    • where, \(x = (x_1, \dots, x_{n})\) and \(y = (y_1, …, y_{n})\)

requirements

As we are familiar with, element-wise product and sum

\begin{equation} x\cdot y = x_1y_1 + \dots + x_{n}y_{n} \end{equation}

additional information

properties of the dot product

  1. For fixed \(y \in \mathbb{R}^{n}\), the dot product map that sends \(x\) to \(x \cdot y\) is linear (inheriting add. and homo. from algebra)
  2. \(x \cdot x = 0\) IFF \(x =0\) (no negs allowed (above), so every slot has to have a zero to multiply to 0)
  3. \(x \cdot x > 0\) for all \(x \in \mathbb{R}^{n}\) (neg times neg is pos)
  4. \(x \cdot y = y \cdot x\) for reals; by inheriting from each element’s field

orthogonality test

The dot product is an orthogonality test. If the dot product between the two vectors is \(0\), they are definitely orthogonal.

geometric interpretation of the dot product

Well, we have some shape between two vectors; then, we can first write out the law of cosines. Then, we can see that, for two vectors from the same origin, we can say that the projection of vector \(\vec{A}\) onto \(\vec{B}\) is written as:

\begin{equation} |\vec{A}||\vec{B}|\cos \theta diff --git a/posts/kbhdrug_resistance/index.html b/posts/kbhdrug_resistance/index.html index ed43a4ec3..530e7a925 100644 --- a/posts/kbhdrug_resistance/index.html +++ b/posts/kbhdrug_resistance/index.html @@ -3,4 +3,4 @@ occurrence of Drug Resistance Drug Resistance occurs when there’s a mutation that causes macromolecular changes which causes virus to no longer bind the drug but still maintain the same function">

Houjun Liu
-

Drug Resistance

Drug Resistance is the process of developing resistance to drugs after some time of use

occurrence of Drug Resistance

Drug Resistance occurs when there’s a mutation that causes macromolecular changes which causes virus to no longer bind the drug but still maintain the same function

\ No newline at end of file +

Drug Resistance

Drug Resistance is the process of developing resistance to drugs after some time of use

occurrence of Drug Resistance

Drug Resistance occurs when there’s a mutation that causes macromolecular changes which causes virus to no longer bind the drug but still maintain the same function

\ No newline at end of file diff --git a/posts/kbheigenvalue/index.html b/posts/kbheigenvalue/index.html index 9d7e12785..2a81f0a63 100644 --- a/posts/kbheigenvalue/index.html +++ b/posts/kbheigenvalue/index.html @@ -7,7 +7,7 @@ Why is eigenvalue consistent per eigenvector?">
Houjun Liu
-

1-d invariant subspace

eigenvalue is the scalar needed to scale the basis element of a one dimensional invariant subspace of a Linear Map to represent the behavior of the map:

\begin{equation} +

1-d invariant subspace

eigenvalue is the scalar needed to scale the basis element of a one dimensional invariant subspace of a Linear Map to represent the behavior of the map:

\begin{equation} Tv = \lambda v \end{equation}

Note we require \(v \neq 0\) because otherwise all scalars count.

eigenvector is a vector that forms the basis list of length 1 of that 1-D invariant subspace under \(T\).

operators own eigenvalues, eigenvalues own eigenvectors”

Why is eigenvalue consistent per eigenvector? Because a linear map has to act on the same way to something’s basis as it does to the whole space.

Motivation

Take some subspace \(U \subset V\):

\begin{equation} U = \{\lambda v\ |\ \lambda \in \mathbb{F}, v \in V\} = span(v) diff --git a/posts/kbhelectric_potential_energy/index.html b/posts/kbhelectric_potential_energy/index.html index 57a93be52..1fba99dae 100644 --- a/posts/kbhelectric_potential_energy/index.html +++ b/posts/kbhelectric_potential_energy/index.html @@ -6,7 +6,7 @@ additional information electric potential is analogous to gravitational potential Let \(A, B, C\) be positrons, and the lines are the electric field.">

Houjun Liu
-

electric potential energy

electric potential is analogous to gravitational potential energy, but with electrostatics!

\begin{equation} +

electric potential energy

electric potential is analogous to gravitational potential energy, but with electrostatics!

\begin{equation} P_{E} = qV \end{equation}

where \(q\) is the change on the particle in question, and \(V\) is the voltage, the difference in electric potential between two places.

Yes, voltage is defined vis a vi electric potential: that is, it represents a differential of electric potential.

additional information

electric potential is analogous to gravitational potential

Let \(A, B, C\) be positrons, and the lines are the electric field. Which one has the highest electric potential? \(A\), because it has the most distance to travel to until it can get all the way to the right.

connecting electric potential and electric field

parallel plates

\begin{equation} E = \frac{V}{d} diff --git a/posts/kbhexpectation/index.html b/posts/kbhexpectation/index.html index b613b339f..142ecda7c 100644 --- a/posts/kbhexpectation/index.html +++ b/posts/kbhexpectation/index.html @@ -5,7 +5,7 @@ properties of expectation these holds REGARDLESS of whether or not the variables you are doing is independent, IID, etc.">

Houjun Liu
-

expectation

expectation is the calculation of the “intended” or “target” value given a random variable:

\begin{equation} +

expectation

expectation is the calculation of the “intended” or “target” value given a random variable:

\begin{equation} \mathbb{E}[M] = \sum_{x} x\ p(X=x) \end{equation}

  1. Standardize variables to \(z\) by dividing
  2. The correlation is simply their “product”: means of positive and negative groups

The expectation is the average of the counts of the data you have.

properties of expectation

these holds REGARDLESS of whether or not the variables you are doing is independent, IID, etc.

Linearity in the first slot

expectation has additivity and homogeneity.

\begin{equation} \mathbb{E}[aX+b] = a\mathbb{E}[X] + b diff --git a/posts/kbhexploration_and_exploitation/index.html b/posts/kbhexploration_and_exploitation/index.html index 4d6c44003..3728e290e 100644 --- a/posts/kbhexploration_and_exploitation/index.html +++ b/posts/kbhexploration_and_exploitation/index.html @@ -4,6 +4,6 @@ Sometimes, you don’t have a way of getting data.">

Houjun Liu
-

Exploration and Exploitation

You are the president, and you are trying to choose the secretary of state. You can only interview people in sequence, and you have to hire on the spot. There are a known number of candidates. We want to maximize the probability of selecting the best candidate. You are given no priors.

How do we know which candidates we explore, and which candidates we exploit?

Sometimes, you don’t have a way of getting data.


Binary Bandit

We are playing with \(n\) binary slot machines.

  1. arm \(j\) pays off \(1\) with probability \(\theta_{j}\), and pays of \(0\) otherwise. we do not know $θj$s exogenously and have to learn it
  2. we only have \(h\) pulls in total across all \(n\) slot machines

As we perform \(k\) pulls, we can keep track of a separate Beta Distribution representing the probability of success for each of the slot machines.

Essentially, we have a problem whereby we are at a stationary Markov Decision Process whereby the only difference between actions is how much reward we get.

Bayesian Model Estimation

We don’t actually know the probability of winning (called “\(\theta\)” in the figure above), and therefore have to “explore” the system to actually know about it.

We want to compute \(\rho_{a}\):

\begin{equation} +

Exploration and Exploitation

You are the president, and you are trying to choose the secretary of state. You can only interview people in sequence, and you have to hire on the spot. There are a known number of candidates. We want to maximize the probability of selecting the best candidate. You are given no priors.

How do we know which candidates we explore, and which candidates we exploit?

Sometimes, you don’t have a way of getting data.


Binary Bandit

We are playing with \(n\) binary slot machines.

  1. arm \(j\) pays off \(1\) with probability \(\theta_{j}\), and pays of \(0\) otherwise. we do not know $θj$s exogenously and have to learn it
  2. we only have \(h\) pulls in total across all \(n\) slot machines

As we perform \(k\) pulls, we can keep track of a separate Beta Distribution representing the probability of success for each of the slot machines.

Essentially, we have a problem whereby we are at a stationary Markov Decision Process whereby the only difference between actions is how much reward we get.

Bayesian Model Estimation

We don’t actually know the probability of winning (called “\(\theta\)” in the figure above), and therefore have to “explore” the system to actually know about it.

We want to compute \(\rho_{a}\):

\begin{equation} \rho_{a} = P(win_{a} | w_{a}, l_{a}) = \int_{0}^{1} \theta \times Beta(\theta | w_{a}+1, l_{a}+1) \dd{\theta} \end{equation}

where, \(w_{a}\) is the number of successes for arm \(a\), and \(l_{a}\) is the number of failures observed.

This is exactly the \(\mathbb{E}[Beta(w_{a}+1, l_{a}+1)] = \frac{w_{a}+1}{(w_{a}+1)+(l_{a}+1)}\)

A “greedy action” is an action which simply chooses the \(a\) out of all \(\rho_{a}\) which maximizes this probability. We often don’t want that because we want to explore the space.

Approximate Exploration Strategies

Optimal Exploration

Optimal Exploration is not always possible because its computationally to complex. But its in theory possible. See Optimal Exploration.

\ No newline at end of file diff --git a/posts/kbhexponential_distribution/index.html b/posts/kbhexponential_distribution/index.html index c209d6d1a..29fb77689 100644 --- a/posts/kbhexponential_distribution/index.html +++ b/posts/kbhexponential_distribution/index.html @@ -4,7 +4,7 @@ constituents $λ$—“rate”: event rate (mean occurrence per time) requirements \begin{equation} f(x) = \begin{cases} \lambda e^{-\lambda x}, x\geq 0\\ 0, x< 0 \end{cases} \end{equation}">
Houjun Liu
-

exponential distribution

Analogous to poisson distribution, but for continuous random variable. Consider a distribution which lasts a duration of time until success; what’s the probability that success is found in some range of times:

“What’s the probability that there are an earthquake in \(k\) years if there’s on average \(2\) earthquakes in 1 year?”

constituents

  • $λ$—“rate”: event rate (mean occurrence per time)

requirements

\begin{equation} +

exponential distribution

Analogous to poisson distribution, but for continuous random variable. Consider a distribution which lasts a duration of time until success; what’s the probability that success is found in some range of times:

“What’s the probability that there are an earthquake in \(k\) years if there’s on average \(2\) earthquakes in 1 year?”

constituents

  • $λ$—“rate”: event rate (mean occurrence per time)

requirements

\begin{equation} f(x) = \begin{cases} \lambda e^{-\lambda x}, x\geq 0\\ 0, x< 0 diff --git a/posts/kbhfactored_mdps/index.html b/posts/kbhfactored_mdps/index.html index 5f0beab06..39b697caa 100644 --- a/posts/kbhfactored_mdps/index.html +++ b/posts/kbhfactored_mdps/index.html @@ -4,4 +4,4 @@ Possible Approaches Using a traditional MDP: an MDP considers “action” as a joint action between all agents (exponential blow up because the agent actions multiply) Local Optimization: share rewards/values among agents Local Optimization: search and maximize joint utility explicitly (no need to model the entire action space) Problems with single Reward Sharing:">

Houjun Liu
-

Factored MDPs

Motivation

Multiple agents need to collaborate to achieve common goal.

Joint Utility Maximization: maximize the joint utility between various agents.

Possible Approaches

  • Using a traditional MDP: an MDP considers “action” as a joint action between all agents (exponential blow up because the agent actions multiply)
  • Local Optimization: share rewards/values among agents
  • Local Optimization: search and maximize joint utility explicitly (no need to model the entire action space)

Problems with single Reward Sharing:

Credit Assignment Problem

In collective reward situations, determining which action out of the cohort actually contributed to the award is hard.

Free Ride Problem

Agents can benefit from reward without actually doing anything by being carried.

Factored MDPs Representation

  • Using factored linear value function to approximate the joint value function
  • Using linear programming to avoid exponential blow up

Background

Coordination Graphs

  • modeling each agent as a node
  • each edge is a dependency

factored Markov Decision Process

  • MDPs are not good at large problems
  • factor the state and action spaces as a random variable factors, etc.

action selection

  • each agent maintains a local \(Q\) function indicating its population
  • the \(Q\) function of each agent maybe influenced by other agents:
    • the coordination graph of the agent is used to calculate contribution

We optimize by using one agent at a time: we optimize one agent, then

\ No newline at end of file +

Factored MDPs

Motivation

Multiple agents need to collaborate to achieve common goal.

Joint Utility Maximization: maximize the joint utility between various agents.

Possible Approaches

  • Using a traditional MDP: an MDP considers “action” as a joint action between all agents (exponential blow up because the agent actions multiply)
  • Local Optimization: share rewards/values among agents
  • Local Optimization: search and maximize joint utility explicitly (no need to model the entire action space)

Problems with single Reward Sharing:

Credit Assignment Problem

In collective reward situations, determining which action out of the cohort actually contributed to the award is hard.

Free Ride Problem

Agents can benefit from reward without actually doing anything by being carried.

Factored MDPs Representation

  • Using factored linear value function to approximate the joint value function
  • Using linear programming to avoid exponential blow up

Background

Coordination Graphs

  • modeling each agent as a node
  • each edge is a dependency

factored Markov Decision Process

  • MDPs are not good at large problems
  • factor the state and action spaces as a random variable factors, etc.

action selection

  • each agent maintains a local \(Q\) function indicating its population
  • the \(Q\) function of each agent maybe influenced by other agents:
    • the coordination graph of the agent is used to calculate contribution

We optimize by using one agent at a time: we optimize one agent, then

\ No newline at end of file diff --git a/posts/kbhfinite_dimensional_vector_space/index.html b/posts/kbhfinite_dimensional_vector_space/index.html index ad23aba73..7ee34e723 100644 --- a/posts/kbhfinite_dimensional_vector_space/index.html +++ b/posts/kbhfinite_dimensional_vector_space/index.html @@ -4,4 +4,4 @@ additional information every finite-dimensional vector space has a basis Begin with a spanning list in the finite-dimensional vector space you are working with. Apply the fact that all spanning lists contains a basis of which you are spanning.">
Houjun Liu
-

finite-dimensional vector space

A finite-dimensional vector space is a vector space where some actual list (which remember, has finite length) of vectors spans the space.

An infinite-demensional vector space is a vector space that’s not a finite-dimensional vector space.

additional information

every finite-dimensional vector space has a basis

Begin with a spanning list in the finite-dimensional vector space you are working with. Apply the fact that all spanning lists contains a basis of which you are spanning. Therefore, some elements of that list form a basis of the finite-dimensional vector space you are working with. \(\blacksquare\)

finite-dimensional subspaces

finite-dimensional subspaces

\ No newline at end of file +

finite-dimensional vector space

A finite-dimensional vector space is a vector space where some actual list (which remember, has finite length) of vectors spans the space.

An infinite-demensional vector space is a vector space that’s not a finite-dimensional vector space.

additional information

every finite-dimensional vector space has a basis

Begin with a spanning list in the finite-dimensional vector space you are working with. Apply the fact that all spanning lists contains a basis of which you are spanning. Therefore, some elements of that list form a basis of the finite-dimensional vector space you are working with. \(\blacksquare\)

finite-dimensional subspaces

finite-dimensional subspaces

\ No newline at end of file diff --git a/posts/kbhfirst_order_odes/index.html b/posts/kbhfirst_order_odes/index.html index 59086fb84..66384b8f9 100644 --- a/posts/kbhfirst_order_odes/index.html +++ b/posts/kbhfirst_order_odes/index.html @@ -9,7 +9,7 @@ \begin{equation} y(x) = \int_{0}^{x} e^{-s{2}} \dd{s} \end{equation}">
Houjun Liu
-

First Order ODEs

First Order ODEs are Differential Equations that only takes one derivative.

Typically, by the nature of how they are modeled, we usually state it in a equation between three things:

\begin{equation} +

First Order ODEs

First Order ODEs are Differential Equations that only takes one derivative.

Typically, by the nature of how they are modeled, we usually state it in a equation between three things:

\begin{equation} t, y(t), y’(t) \end{equation}

as in—we only take one derivative.

Sometimes the solution may not be analytic, but is well-defined:

\begin{equation} y’ = e^{-x^{2}} diff --git a/posts/kbhfourier_series/index.html b/posts/kbhfourier_series/index.html index 9469adb4e..f9b22fa6b 100644 --- a/posts/kbhfourier_series/index.html +++ b/posts/kbhfourier_series/index.html @@ -7,7 +7,7 @@ Recall that because sin and cos are even and odd parts, the functions above force an even and oddness to your expansions. They will be particularly helpful for Dirichlet Conditions and Neumann Conditions.">

Houjun Liu
-

Fourier Series

Fourier Series and how to find them.

For a function given at some interval of length \(l\), then the function can be written at:

\begin{equation} +

Fourier Series

Fourier Series and how to find them.

For a function given at some interval of length \(l\), then the function can be written at:

\begin{equation} f(x) = \sum_{k=1}^{\infty} a_{k} \sin \qty( \frac{k\pi x}{l}) \end{equation}

or

\begin{equation} f(x) = \sum_{k=1}^{\infty} b_{k} \cos \qty( \frac{k\pi x}{l}) diff --git a/posts/kbhg_dice/index.html b/posts/kbhg_dice/index.html index 07ca38e8a..76ef0b947 100644 --- a/posts/kbhg_dice/index.html +++ b/posts/kbhg_dice/index.html @@ -5,4 +5,4 @@ sample a value function \(k\) takes \(n\) highest sampled values update parameter \(\theta\) resample until distribution convergence take the best sample \(x\) G-DICE create a graph with exogenous \(N\) nodes, and \(O\) outgoing edges (designed before) use Direct Cross Entropy to solve for the best policy Results demonstrates improved performance over MMCS and MCTS does not need robot communication garantees convergence for both finite and infiinte horizon can choose exogenous number of nodes in order to gain computational savings ">

Houjun Liu
-

G-DICE

Motivation

Its the same. It hasn’t changed: curses of dimensionality and history.

Goal: to solve decentralized multi-agent MDPs.

Key Insights

  1. macro-actions (MAs) to reduce computational complexity (like hierarchical planning)
  2. uses cross entropy to make infinite horizon problem tractable

Prior Approaches

  • masked Monte Carlo search: heuristic based, no optimality garantees
  • MCTS: poor performance

Direct Cross Entropy

see also Cross Entropy Method

  1. sample a value function \(k\)
  2. takes \(n\) highest sampled values
  3. update parameter \(\theta\)
  4. resample until distribution convergence
  5. take the best sample \(x\)

G-DICE

  1. create a graph with exogenous \(N\) nodes, and \(O\) outgoing edges (designed before)
  2. use Direct Cross Entropy to solve for the best policy

Results

  1. demonstrates improved performance over MMCS and MCTS
  2. does not need robot communication
  3. garantees convergence for both finite and infiinte horizon
  4. can choose exogenous number of nodes in order to gain computational savings
\ No newline at end of file +

G-DICE

Motivation

Its the same. It hasn’t changed: curses of dimensionality and history.

Goal: to solve decentralized multi-agent MDPs.

Key Insights

  1. macro-actions (MAs) to reduce computational complexity (like hierarchical planning)
  2. uses cross entropy to make infinite horizon problem tractable

Prior Approaches

  • masked Monte Carlo search: heuristic based, no optimality garantees
  • MCTS: poor performance

Direct Cross Entropy

see also Cross Entropy Method

  1. sample a value function \(k\)
  2. takes \(n\) highest sampled values
  3. update parameter \(\theta\)
  4. resample until distribution convergence
  5. take the best sample \(x\)

G-DICE

  1. create a graph with exogenous \(N\) nodes, and \(O\) outgoing edges (designed before)
  2. use Direct Cross Entropy to solve for the best policy

Results

  1. demonstrates improved performance over MMCS and MCTS
  2. does not need robot communication
  3. garantees convergence for both finite and infiinte horizon
  4. can choose exogenous number of nodes in order to gain computational savings
\ No newline at end of file diff --git a/posts/kbhgarch/index.html b/posts/kbhgarch/index.html index 22f2173a2..de14234b1 100644 --- a/posts/kbhgarch/index.html +++ b/posts/kbhgarch/index.html @@ -12,7 +12,7 @@ \begin{equation} {\sigma_{t}}^{2} = \omega + \lambda {\sigma_{t-1}}^{2} + \beta {\sigma_{t-1}}^{2} \end{equation}">
Houjun Liu
-

GARCH

The GARCH model is a model for the heteroskedastic variations where the changes in variance is assumed to be auto correlated: that, though the variance changes, it changes in a predictable manner.

It is especially useful to

GARCH 1,1

Conditional mean:

\begin{equation} +

GARCH

The GARCH model is a model for the heteroskedastic variations where the changes in variance is assumed to be auto correlated: that, though the variance changes, it changes in a predictable manner.

It is especially useful to

GARCH 1,1

Conditional mean:

\begin{equation} y_{t} = x’_{t} \theta + \epsilon_{t} \end{equation}

Then, the epsilon parameter:

\begin{equation} \epsilon_{t} = \sigma_{t}z_{t} diff --git a/posts/kbhgauss_law/index.html b/posts/kbhgauss_law/index.html index 9abf58a5c..092d91a17 100644 --- a/posts/kbhgauss_law/index.html +++ b/posts/kbhgauss_law/index.html @@ -5,7 +5,7 @@ somewhat motivating Gauss’ Law Consider a sphere with uniformly distributed charge on its surface. It has surface area \(4 \pi r^{2}\).">

Houjun Liu
-

Gauss' Law

The Gauss’ Law is a principle of electric flux of uniformly distributed electric field along a surface: that, the electric flux through a closed surface is the sum of the electric charge enclosed divided by the permittivity of free space.

That is:

\begin{equation} +

Gauss' Law

The Gauss’ Law is a principle of electric flux of uniformly distributed electric field along a surface: that, the electric flux through a closed surface is the sum of the electric charge enclosed divided by the permittivity of free space.

That is:

\begin{equation} \oint E \cdot dA = \frac{\sum Q}{\epsilon_{0}} \end{equation}

somewhat motivating Gauss’ Law

Consider a sphere with uniformly distributed charge on its surface. It has surface area \(4 \pi r^{2}\). Given the expression of electric flux and the fact that the origin change is in the center, and the test change is evenly distributed (i.e. \(E\) is held constant):

\begin{align} \Phi_{E} &= \int E \cdot dA \\ diff --git a/posts/kbhgaussian/index.html b/posts/kbhgaussian/index.html index 4c9136bcd..2a7fa82f1 100644 --- a/posts/kbhgaussian/index.html +++ b/posts/kbhgaussian/index.html @@ -12,7 +12,7 @@ You will note that \(H\) does satisfy the heat equation:">

Houjun Liu
-

Gaussian

The Gaussian, in general, gives:

\begin{equation} +

Gaussian

The Gaussian, in general, gives:

\begin{equation} e^{-\frac{ax^{2}}{2}} \end{equation}

which is a Bell-Shaped curve. It’s pretty darn important

solving heat equation without boundary

for general expression:

\begin{equation} \pdv{U}{t} = \alpha \pdv[2]{U}{x} diff --git a/posts/kbhgram_schmidt/index.html b/posts/kbhgram_schmidt/index.html index f96de30e6..4699c5036 100644 --- a/posts/kbhgram_schmidt/index.html +++ b/posts/kbhgram_schmidt/index.html @@ -5,7 +5,7 @@ The Procedure We do this process inductively.">

Houjun Liu
-

Gram-Schmidt

OMG its Gram-Schmidtting!!! Ok so like orthonormal basis are so nice, don’t you want to make them out of boring-ass normal basis? Of course you do.

Suppose \(v_1, … v_{m}\) is a linearly independent list in \(V\). Now let us define some \(e_{1} … e_{m}\) using the procedure below such that \(e_{j}\) are orthonormal and, importantly:

\begin{equation} +

Gram-Schmidt

OMG its Gram-Schmidtting!!! Ok so like orthonormal basis are so nice, don’t you want to make them out of boring-ass normal basis? Of course you do.

Suppose \(v_1, … v_{m}\) is a linearly independent list in \(V\). Now let us define some \(e_{1} … e_{m}\) using the procedure below such that \(e_{j}\) are orthonormal and, importantly:

\begin{equation} span(v_1, \dots, v_{m}) = span(e_{1}, \dots, e_{m}) \end{equation}

The Procedure

We do this process inductively. Let:

\begin{equation} e_1 = \frac{v_1}{\|v_1\|} diff --git a/posts/kbhgrammar/index.html b/posts/kbhgrammar/index.html index eaf670361..a0673bd18 100644 --- a/posts/kbhgrammar/index.html +++ b/posts/kbhgrammar/index.html @@ -4,4 +4,4 @@ semantic accountability generativity ">

Houjun Liu
-

grammar

\ No newline at end of file +

grammar

\ No newline at end of file diff --git a/posts/kbhguilded_age/index.html b/posts/kbhguilded_age/index.html index 9985d3d83..8454d8425 100644 --- a/posts/kbhguilded_age/index.html +++ b/posts/kbhguilded_age/index.html @@ -4,4 +4,4 @@ The Guilded Age consists of three different sections:">
Houjun Liu
-

Guilded Age

The Guilded Age is a period in history between 1877 and 1900. This period deepened divide in racism, deepened the split between poor and rich, and the fluidity of American social classes became more set in this time.

Why is the “Guilded Age” “Guilded”?

Guilded: Outside Lined with Gold, Inside Contains Metal and is Less Valuable.

The Guilded Age consists of three different sections:

  • Business (Top!)
  • Labour
  • Government

Contributors to the Guilded Age

There are three pieces

  • Homestead Act”: legal way to give people land in the west
  • National Banking Act”: unified a uniform economic system, and to connect markets
  • Pacific Railroad Act”: expansion of connection; also formed the first “Corporations” based on railroad organization structures.

Issues of the Guilded Age

Immigration

“They are coming to take our jobs!” (Irish Edition.)

USCIS processed people in Ellis (Irish processing, took about 2 days to process) and Angel (Chinese processing, took about 6 months to process) islands: beginning having racial immigrant discrimination.

Urbanization

Populations in the United States tripled in about 50 years. Immigrants were stuffed into Tennaments. The Guilded age saw the beginning of skyscrapers.

Social Activism

Because of the issues began during the Guilded Age, more people essentially stepped instead of the governement to play a role in supporting welfare.

Industrialization

“Pulling yourself up by your bootstraps.” Steel is invented. All of the technology has been moved to provide maximum output; this comes, of course, at an environmental cost. Large unions are starting to take off. Railroad Strike: federal troops fired upon railroad workers; argued the case for union but also for corp. influence on politics.

Politics

  • Democrats: racist, states rights, limited federal government.
  • Republicans: supported businesses, immigrations.

Yes, they are still flipped.

But either way, democracy was rampant: 80% turnout! This is, however, one of the most corrupt time in American politics. The party system: local political bosses would provide favours for a vote — in an absence of welfare, in exchange for a vote, wolud provide protection and social welfare. At this time, there was mass lack of reliance.

Culture

Victorianism! Proper manners, conservatism, etc. Bikes and contraceptives! There is also a fear that “manliness was declining”: that no more farming means need for more sports, body building, etc. Also, “name brands”, “sears catalogue”, and consumerism is taking hold.

Corportization

Corporations, as an idea, took hold. That the owners of a group is seperated by management, it allows the expansion of the size of companies. Monopolies in industries run while: concentrated wealth in addition to corrupted politics.

Taylorism: Taylor decided to make a shovel for each type of movement — which makes people repeat the same task over again but increased efficiency. “Taylor-made” comes from this.

Omaha Platform

  • Expanding Credit
  • Bracketed income tax
  • Social reforms

Lays the groundwork for the progressive moment. This was a socialist movement!

The West

  • Transcontinental railroad: power over towns and concessions
  • Rise of Cowboys and “cattle bonanza”
  • Prairies settled with new farming equipment and new Russian wheat strands: “Americanlization”

The “turner thesis”: American democracy is formed at the frontier. However, Western Expansion is actually much of a tragedy, and this is actually leads to Imperialism.

Indian Removal

  • Policy of Indian removal to force into treaties + reservation
  • Sioux Wars (crazy horse, etc.): Native American resistance
  • Native Americans of California extreme violence; as well as slave labour
  • Dawes Act of 1887 and forced “assimilation”: forced the breakup of many reservations

Guilded Age Commentary Historians

Rebekah Edwards

  • The late 19th century was not entirely laissez faire
  • “Progressive Era”: not always progressive
  • Issues that lead to the “Guilded age” name that was not specific to the Guilded age

“Guilded age”: “eh, nothing else to deal with, so let’s deal with racism!”

Richard John

  • Guilded age was a period of rapid industrialization
  • Very charactured, unequal + vulgar time
  • The resulting changes are very concentrated; all of the changes that are 80 years apart

This is super disconnected to social, political aspects of life. It doesn’t talk about how the economy effects the social standings and ladders that people lived in => that movement comes from a lot of social change.

Made a point about the positive/negatives effects of the guilded age: don’t focus the individuals but instead the structures.

He did not want the “progressive era” as a classification in line with the guilded age. “Guilded age” is the only pejorative term for an era: so one negative description does not do it justice.

Richard Benzel

Richard Benzel claims that the textbook industry primes people; that a title for an age shoehorns the age and changes the reflection of the reader.

\ No newline at end of file +

Guilded Age

The Guilded Age is a period in history between 1877 and 1900. This period deepened divide in racism, deepened the split between poor and rich, and the fluidity of American social classes became more set in this time.

Why is the “Guilded Age” “Guilded”?

Guilded: Outside Lined with Gold, Inside Contains Metal and is Less Valuable.

The Guilded Age consists of three different sections:

  • Business (Top!)
  • Labour
  • Government

Contributors to the Guilded Age

There are three pieces

  • Homestead Act”: legal way to give people land in the west
  • National Banking Act”: unified a uniform economic system, and to connect markets
  • Pacific Railroad Act”: expansion of connection; also formed the first “Corporations” based on railroad organization structures.

Issues of the Guilded Age

Immigration

“They are coming to take our jobs!” (Irish Edition.)

USCIS processed people in Ellis (Irish processing, took about 2 days to process) and Angel (Chinese processing, took about 6 months to process) islands: beginning having racial immigrant discrimination.

Urbanization

Populations in the United States tripled in about 50 years. Immigrants were stuffed into Tennaments. The Guilded age saw the beginning of skyscrapers.

Social Activism

Because of the issues began during the Guilded Age, more people essentially stepped instead of the governement to play a role in supporting welfare.

Industrialization

“Pulling yourself up by your bootstraps.” Steel is invented. All of the technology has been moved to provide maximum output; this comes, of course, at an environmental cost. Large unions are starting to take off. Railroad Strike: federal troops fired upon railroad workers; argued the case for union but also for corp. influence on politics.

Politics

  • Democrats: racist, states rights, limited federal government.
  • Republicans: supported businesses, immigrations.

Yes, they are still flipped.

But either way, democracy was rampant: 80% turnout! This is, however, one of the most corrupt time in American politics. The party system: local political bosses would provide favours for a vote — in an absence of welfare, in exchange for a vote, wolud provide protection and social welfare. At this time, there was mass lack of reliance.

Culture

Victorianism! Proper manners, conservatism, etc. Bikes and contraceptives! There is also a fear that “manliness was declining”: that no more farming means need for more sports, body building, etc. Also, “name brands”, “sears catalogue”, and consumerism is taking hold.

Corportization

Corporations, as an idea, took hold. That the owners of a group is seperated by management, it allows the expansion of the size of companies. Monopolies in industries run while: concentrated wealth in addition to corrupted politics.

Taylorism: Taylor decided to make a shovel for each type of movement — which makes people repeat the same task over again but increased efficiency. “Taylor-made” comes from this.

Omaha Platform

  • Expanding Credit
  • Bracketed income tax
  • Social reforms

Lays the groundwork for the progressive moment. This was a socialist movement!

The West

  • Transcontinental railroad: power over towns and concessions
  • Rise of Cowboys and “cattle bonanza”
  • Prairies settled with new farming equipment and new Russian wheat strands: “Americanlization”

The “turner thesis”: American democracy is formed at the frontier. However, Western Expansion is actually much of a tragedy, and this is actually leads to Imperialism.

Indian Removal

  • Policy of Indian removal to force into treaties + reservation
  • Sioux Wars (crazy horse, etc.): Native American resistance
  • Native Americans of California extreme violence; as well as slave labour
  • Dawes Act of 1887 and forced “assimilation”: forced the breakup of many reservations

Guilded Age Commentary Historians

Rebekah Edwards

  • The late 19th century was not entirely laissez faire
  • “Progressive Era”: not always progressive
  • Issues that lead to the “Guilded age” name that was not specific to the Guilded age

“Guilded age”: “eh, nothing else to deal with, so let’s deal with racism!”

Richard John

  • Guilded age was a period of rapid industrialization
  • Very charactured, unequal + vulgar time
  • The resulting changes are very concentrated; all of the changes that are 80 years apart

This is super disconnected to social, political aspects of life. It doesn’t talk about how the economy effects the social standings and ladders that people lived in => that movement comes from a lot of social change.

Made a point about the positive/negatives effects of the guilded age: don’t focus the individuals but instead the structures.

He did not want the “progressive era” as a classification in line with the guilded age. “Guilded age” is the only pejorative term for an era: so one negative description does not do it justice.

Richard Benzel

Richard Benzel claims that the textbook industry primes people; that a title for an age shoehorns the age and changes the reflection of the reader.

\ No newline at end of file diff --git a/posts/kbhhindsight_optimization/index.html b/posts/kbhhindsight_optimization/index.html index c4dcf76c2..645156ab9 100644 --- a/posts/kbhhindsight_optimization/index.html +++ b/posts/kbhhindsight_optimization/index.html @@ -4,6 +4,6 @@ system does not know the goal the user may not change their goal on a whim Hindsight Optimization To solve this, we use QMDP: “select the most optimal actions to estimating cost-to-go assuming full observability”.">
Houjun Liu
-

Hindsight Optimization

If we are tele-operating a robot, we ideally want to minimize cost. We want to estimate a user’s goal via user inputs. Predict the most likely goal + assist for it.

“find a cost function for which user input \(u\) is optimal”.

  • system does not know the goal
  • the user may not change their goal on a whim

Hindsight Optimization

To solve this, we use QMDP: “select the most optimal actions to estimating cost-to-go assuming full observability”.

\begin{equation} +

Hindsight Optimization

If we are tele-operating a robot, we ideally want to minimize cost. We want to estimate a user’s goal via user inputs. Predict the most likely goal + assist for it.

“find a cost function for which user input \(u\) is optimal”.

  • system does not know the goal
  • the user may not change their goal on a whim

Hindsight Optimization

To solve this, we use QMDP: “select the most optimal actions to estimating cost-to-go assuming full observability”.

\begin{equation} Q(b,a,u) = \sum_{g}^{} b(g) Q_{g}(x,a,u) \end{equation}

Result

users felt less in control with Hindsight Optimization, despite reaching the goal faster with this policy.

Challenging the results between “task completion” vs. “user satisfaction”.

\ No newline at end of file diff --git a/posts/kbhhow_did_economists_get_it_so_wrong/index.html b/posts/kbhhow_did_economists_get_it_so_wrong/index.html index 5877f2423..510dab27e 100644 --- a/posts/kbhhow_did_economists_get_it_so_wrong/index.html +++ b/posts/kbhhow_did_economists_get_it_so_wrong/index.html @@ -4,5 +4,5 @@ One particular statement that resonated with me in the essay was the fact that a crisis simply “pushed the freshwater economists into further absurdity.” It is interesting to see that, once a theory has been well-established and insulated in a community, it becomes much more difficult to parcel out as something that could be wrong.">
Houjun Liu
-

How Did Economists Get It So Wrong?

A reading: (Krugman 2009)

Reflection

The discussion here of the conflict between “saltwater” and “freshwater” (Keynesian and Neoclassical) economists is very interesting when evaluated from the perspective of our recent impending recession.

One particular statement that resonated with me in the essay was the fact that a crisis simply “pushed the freshwater economists into further absurdity.” It is interesting to see that, once a theory has been well-established and insulated in a community, it becomes much more difficult to parcel out as something that could be wrong.

As the same time, the forcibly-correcting “fudge” inconsistencies of the Keynesian model is also a strong weakness which perhaps further exacerbated the freshwater economists’ dissent into their models. Modeling human behavior has been consistently quite messy, so it is unsurprising that both neoclassical and Keynesian economists strayed away from those models.

Circling back to the COVID-trigger economic downturn: we definitely see a push towards increased “absurdity” in terms of increased polarization in the US; but not only that, the deeply rooted idea of “pandemics don’t affect the States” or at least “the Feds/our supply chain have preparation for absurd events” is again shown to be false—despite the Obaman re-discovery of Keynesian management earlier.

This all raises a question: under what circumstances is a tangibly “better” result going to surface and be accepted when one model is tangibly perfect yet wrong, the other requiring flawed corrections or unrigorous analysis. Must we reject one model completely before the other one can be used?

I don’t believe behavioral economics, though providing a partial solution as Krugman outlines, is the be-and-end-all of macroeconomic models during a depression. All of the models which were theorized (bar pure neoclassicalist “perfect agents”) ostensibly do one thing: trying to “rationally” model the “irrational” behavior of market participants. I don’t believe that this is ultimately going to be feasible on a macroeconomic scale to create models that will last (sans repeated, empirical testing—but there are not enough depressions to go around.) Perhaps, then, the basic Keynesian idea of simply creating fiscal corrections may very well be the best second thing.

Reading notes

the main problem was the fact that nobody saw a catastrophie coming

More important was the profession’s blindness to the very possibility of catastrophic failures in a market economy.

people either believed that the market would never go wrong or the Fed fixes everything

free-market economies never go astray and those who believed that economies may stray now and then but that any major deviations from the path of prosperity could and would be corrected by the all-powerful Fed.

The economists thought the humans are perfectly rational, and the fact that they are not is what leads to failures

Unfortunately, this romanticized and sanitized vision of the economy led most economists to ignore all the things that can go wrong. They turned a blind eye to the limitations of human rationality that often lead to bubbles and busts

Keynsian Economics was not trying to entirely replace markets

Keynes did not, despite what you may have heard, want the government to run the economy. … He wanted to fix capitalism, not replace it.

Milton Friedman lead the return to Neoclassical Economics

The neoclassical revival was initially led by Milton Friedman of the University of Chicago, who asserted as early as 1953 that neoclassical economics works well enough as a description of the way the economy actually functions

Neoclassical Economics with the monetarist theory under Milton asserted that keeping the money supply growing is all that needed

Monetarists asserted, however, that a very limited, circumscribed form of government intervention — namely, instructing central banks to keep the nation’s money supply, the sum of cash in circulation and bank deposits, growing on a steady path — is all that’s required to prevent depressions.

Milton Freedman believes that large-scale expansion would lead to inflation and high unimployment

excessively expansionary policies, he predicted, would lead to a combination of inflation and high unemployment

Anti-Keynesian seniments overtook Freedman’s original proposition

Eventually, however, the anti-Keynesian counterrevolution went far beyond Friedman’s position, which came to seem relatively moderate compared with what his successors were saying.

#question why is this obvious?

for obvious reasons

Because the new economists beliefed that the market is right, the advise was for business to max stock price

finance economists believed that we should put the capital development of the nation in the hands of what Keynes had called a “casino.”

Major stock events didn’t blunt the disregard to Keynesian policy

These events, however, which Keynes would have considered evidence of the unreliability of markets, did little to blunt the force of a beautiful idea.

New “perfect” economic models earned large respect in industry

mild-mannered business-school professors could and did become Wall Street rocket scientists, earning Wall Street paychecks.

New models often analyzed financial systems independently of their real-world worth

Finance economists rarely asked the seemingly obvious (though not easily answered) question of whether asset prices made sense given real-world fundamentals like earnings. Instead, they asked only whether asset prices made sense given other asset prices

Macro split into two factions: the Keynes recessionists or the anti-Keynesians

macroeconomics has divided into two great factions: “saltwater” economists (mainly in coastal U.S. universities), who have a more or less Keynesian vision of what recessions are all about; and “freshwater” economists (mainly at inland schools), who consider that vision nonsense.

Freshwater economists’ theory: recessions were just people confused?

Nobel laureate Robert Lucas, argued that recessions were caused by temporary confusion: workers and companies had trouble distinguishing overall changes in the level of prices

Under freshwater theories, unemployment is just people electing not to work due to unfavorable environment

amplified by the rational response of workers, who voluntarily work more when the environment is favorable and less when it’s unfavorable. Unemployment is a deliberate decision by workers to take time off.

Put baldly like that, this theory sounds foolish — was the Great Depression really the Great +

How Did Economists Get It So Wrong?

A reading: (Krugman 2009)

Reflection

The discussion here of the conflict between “saltwater” and “freshwater” (Keynesian and Neoclassical) economists is very interesting when evaluated from the perspective of our recent impending recession.

One particular statement that resonated with me in the essay was the fact that a crisis simply “pushed the freshwater economists into further absurdity.” It is interesting to see that, once a theory has been well-established and insulated in a community, it becomes much more difficult to parcel out as something that could be wrong.

As the same time, the forcibly-correcting “fudge” inconsistencies of the Keynesian model is also a strong weakness which perhaps further exacerbated the freshwater economists’ dissent into their models. Modeling human behavior has been consistently quite messy, so it is unsurprising that both neoclassical and Keynesian economists strayed away from those models.

Circling back to the COVID-trigger economic downturn: we definitely see a push towards increased “absurdity” in terms of increased polarization in the US; but not only that, the deeply rooted idea of “pandemics don’t affect the States” or at least “the Feds/our supply chain have preparation for absurd events” is again shown to be false—despite the Obaman re-discovery of Keynesian management earlier.

This all raises a question: under what circumstances is a tangibly “better” result going to surface and be accepted when one model is tangibly perfect yet wrong, the other requiring flawed corrections or unrigorous analysis. Must we reject one model completely before the other one can be used?

I don’t believe behavioral economics, though providing a partial solution as Krugman outlines, is the be-and-end-all of macroeconomic models during a depression. All of the models which were theorized (bar pure neoclassicalist “perfect agents”) ostensibly do one thing: trying to “rationally” model the “irrational” behavior of market participants. I don’t believe that this is ultimately going to be feasible on a macroeconomic scale to create models that will last (sans repeated, empirical testing—but there are not enough depressions to go around.) Perhaps, then, the basic Keynesian idea of simply creating fiscal corrections may very well be the best second thing.

Reading notes

the main problem was the fact that nobody saw a catastrophie coming

More important was the profession’s blindness to the very possibility of catastrophic failures in a market economy.

people either believed that the market would never go wrong or the Fed fixes everything

free-market economies never go astray and those who believed that economies may stray now and then but that any major deviations from the path of prosperity could and would be corrected by the all-powerful Fed.

The economists thought the humans are perfectly rational, and the fact that they are not is what leads to failures

Unfortunately, this romanticized and sanitized vision of the economy led most economists to ignore all the things that can go wrong. They turned a blind eye to the limitations of human rationality that often lead to bubbles and busts

Keynsian Economics was not trying to entirely replace markets

Keynes did not, despite what you may have heard, want the government to run the economy. … He wanted to fix capitalism, not replace it.

Milton Friedman lead the return to Neoclassical Economics

The neoclassical revival was initially led by Milton Friedman of the University of Chicago, who asserted as early as 1953 that neoclassical economics works well enough as a description of the way the economy actually functions

Neoclassical Economics with the monetarist theory under Milton asserted that keeping the money supply growing is all that needed

Monetarists asserted, however, that a very limited, circumscribed form of government intervention — namely, instructing central banks to keep the nation’s money supply, the sum of cash in circulation and bank deposits, growing on a steady path — is all that’s required to prevent depressions.

Milton Freedman believes that large-scale expansion would lead to inflation and high unimployment

excessively expansionary policies, he predicted, would lead to a combination of inflation and high unemployment

Anti-Keynesian seniments overtook Freedman’s original proposition

Eventually, however, the anti-Keynesian counterrevolution went far beyond Friedman’s position, which came to seem relatively moderate compared with what his successors were saying.

#question why is this obvious?

for obvious reasons

Because the new economists beliefed that the market is right, the advise was for business to max stock price

finance economists believed that we should put the capital development of the nation in the hands of what Keynes had called a “casino.”

Major stock events didn’t blunt the disregard to Keynesian policy

These events, however, which Keynes would have considered evidence of the unreliability of markets, did little to blunt the force of a beautiful idea.

New “perfect” economic models earned large respect in industry

mild-mannered business-school professors could and did become Wall Street rocket scientists, earning Wall Street paychecks.

New models often analyzed financial systems independently of their real-world worth

Finance economists rarely asked the seemingly obvious (though not easily answered) question of whether asset prices made sense given real-world fundamentals like earnings. Instead, they asked only whether asset prices made sense given other asset prices

Macro split into two factions: the Keynes recessionists or the anti-Keynesians

macroeconomics has divided into two great factions: “saltwater” economists (mainly in coastal U.S. universities), who have a more or less Keynesian vision of what recessions are all about; and “freshwater” economists (mainly at inland schools), who consider that vision nonsense.

Freshwater economists’ theory: recessions were just people confused?

Nobel laureate Robert Lucas, argued that recessions were caused by temporary confusion: workers and companies had trouble distinguishing overall changes in the level of prices

Under freshwater theories, unemployment is just people electing not to work due to unfavorable environment

amplified by the rational response of workers, who voluntarily work more when the environment is favorable and less when it’s unfavorable. Unemployment is a deliberate decision by workers to take time off.

Put baldly like that, this theory sounds foolish — was the Great Depression really the Great Vacation?

The new Keysians still kept more or less to non-dramatic thinking

They tried to keep their deviations from neoclassical orthodoxy as limited as possible. This meant that there was no room in the prevailing models for such things as bubbles and banking-system collapse.

New Keysians believed entirely in the Fed, without need for large fiscal policy

They believed that monetary policy, administered by the technocrats at the Fed, could provide whatever remedies the economy needed.

People just thought that there can’t be a bubble in housing

What’s striking, when you reread Greenspan’s assurances, is that they weren’t based on evidence — they were based on the a priori assertion that there simply can’t be a bubble in housing.

Obama’s economic policies are much more on the Keynes side

Such Keynesian thinking underlies the Obama administration’s economic policies — and the freshwater economists are furious.

Failure of neoclassicalist theory is that breaking Keynsian economical behavior requires perfect rationality, which is absurd

if you start from the assumption that people are perfectly rational and markets are perfectly efficient, you have to conclude that unemployment is voluntary and recessions are desirable.

Economists thought that economics would have been perfect

Economics, as a field, got in trouble because economists were seduced by the vision of a perfect, frictionless market system.

Behavioral Economics

Behavioral Economics is a study of economics which hinges on the irrationality of human behavior. Its an answer to both the Neoclassical Economics’ poor assumption that humans and markets are perfect, but also Keynsian Economics’s increasingly large need for a random “fudge” to get their models working right.

pillars of Behavioral Economics

  1. “Many real-world investors bear little resemblance to the cool calculators of efficient-market theory: they’re all too subject to herd behavior, to bouts of irrational exuberance and unwarranted panic.”
  2. “even those who try to base their decisions on cool calculation often find that they can’t, that problems of trust, credibility and limited collateral force them to run with the herd.”

Good arbitrageurs are just forced out of the economy in large downward spirals

As a result, the smart money is forced out of the market, and prices may go into a downward spiral.

\ No newline at end of file diff --git a/posts/kbhilqr/index.html b/posts/kbhilqr/index.html index 651aba351..0063047a3 100644 --- a/posts/kbhilqr/index.html +++ b/posts/kbhilqr/index.html @@ -6,7 +6,7 @@ dim(belief) >> dim(state) dim(belief) >> dim(action) Belief iLQR “determinize and replan”: simplify the dynamics at each step, plan, take action, and replan">
Houjun Liu
-

Belief iLQR

Motivation

  • Imperfect sensors in robot control: partial observations
  • Manipulators face tradeoff between sensing + acting

curse of dimensionality and curse of history.

Belief-Space Planning

Perhaps we should plan over all possible distributions of state space, making a belief-state MDP.

But: this is a nonlinear, stochastic dynamic. In fact: there maybe stochastic events that affects dynamics.

Big problem:

Belief iLQR

“determinize and replan”: simplify the dynamics at each step, plan, take action, and replan

  1. tracks belief via observations
  2. simplifies belief state dynamics based on linear MLE

When the dynamics is linear, you can use Linear-Quadratic Regulator to solve. This results in a worse policy but will give you a policy.

Previous Work

  • “just solve most-likely state”: doesn’t take action to explore and understand the state.
  • “belief roadmap”: not really planning in the belief space itself

Approach

Belief Update

We use Baysian updates for the state probably updates:

\begin{equation} +

Belief iLQR

Motivation

  • Imperfect sensors in robot control: partial observations
  • Manipulators face tradeoff between sensing + acting

curse of dimensionality and curse of history.

Belief-Space Planning

Perhaps we should plan over all possible distributions of state space, making a belief-state MDP.

But: this is a nonlinear, stochastic dynamic. In fact: there maybe stochastic events that affects dynamics.

Big problem:

Belief iLQR

“determinize and replan”: simplify the dynamics at each step, plan, take action, and replan

  1. tracks belief via observations
  2. simplifies belief state dynamics based on linear MLE

When the dynamics is linear, you can use Linear-Quadratic Regulator to solve. This results in a worse policy but will give you a policy.

Previous Work

  • “just solve most-likely state”: doesn’t take action to explore and understand the state.
  • “belief roadmap”: not really planning in the belief space itself

Approach

Belief Update

We use Baysian updates for the state probably updates:

\begin{equation} P(s_{t+1}) = \eta P(o_{t+1}|s_{t+1}) \int_{x} p(_{t+1}|x, a_{t}) P(s) \end{equation}

and then the actual beliefs are updated with Extended Kalman Filter.

Importantly, the Extended Kalman Filter usually requires us to take an expectation of each observation O over all O; instead, we assume that the future states are uniform linearly distributed.

Belief Update Cost

Ideally, we want to lower covariance of the belief vectors in order to be more confident.

  1. first term: reduce large trajectories (verify)
  2. second: stabilization

Replanning Strategy

while b not at goal:
     # replan at where we are at now
diff --git a/posts/kbhindependently_and_identically_distributed/index.html b/posts/kbhindependently_and_identically_distributed/index.html
index 869c5f321..dfa2a4030 100644
--- a/posts/kbhindependently_and_identically_distributed/index.html
+++ b/posts/kbhindependently_and_identically_distributed/index.html
@@ -4,4 +4,4 @@
 \(X_i\) all have the same PMF / PDF and therefore, all have the same expectation and variance central limit theorem when things are IID, you can use central limit theorem.">
Houjun Liu
-

independently and identically distributed

\ No newline at end of file +

independently and identically distributed

\ No newline at end of file diff --git a/posts/kbhinductors_in_circuits/index.html b/posts/kbhinductors_in_circuits/index.html index f8e34880c..5ba409489 100644 --- a/posts/kbhinductors_in_circuits/index.html +++ b/posts/kbhinductors_in_circuits/index.html @@ -5,7 +5,7 @@ energy stored in an inductor \begin{equation} E = \frac{1}{2} LI^{2} \end{equation}">
Houjun Liu
-

inductor

voltage across a inductor

\begin{equation} +

inductor

voltage across a inductor

\begin{equation} V = \epsilon = -L \dv{I}{t} \end{equation}

this is kind of a formulation of faraday’s law.

\begin{equation} I(t) = \frac{V_0}{R_1} (1-e^{\frac{-t}{\frac{L}{R}}}) diff --git a/posts/kbhinjectivity/index.html b/posts/kbhinjectivity/index.html index 69d158cef..e5fb18184 100644 --- a/posts/kbhinjectivity/index.html +++ b/posts/kbhinjectivity/index.html @@ -6,4 +6,4 @@ Now, we know that \(0\), because it indeed gets mapped by \(T\) to \(0\), is in the null space of \(T\).">

Houjun Liu
-

injectivity

An injective function is one which is one-to-one: that it maps distinct inputs to distinct outputs.

constituents

requirements

\(T\) is injective if \(Tu = Tv\) implies \(u=v\).

additional information

injectivity implies that null space is \(\{0\}\)

Proof: let \(T \in \mathcal{L}(V,W)\); \(T\) is injective IFF \(null\ T = \{0\}\).

given injectivity

Suppose \(T\) is injective.

Now, we know that \(0\), because it indeed gets mapped by \(T\) to \(0\), is in the null space of \(T\).

Because linear maps take \(0\) to \(0\), \(T0=0\). Now, because \(T\) is injective, for any \(v\) that \(Tv = 0 = T 0\) implies \(v=0\).

So \(0\) is the only thing that an injective \(T\) can map to \(0\), and it is indeed in the null space, so the null space is just \(\{0\}\).

given \(null\ T=\{0\}\)

Suppose we have some \(Tu = Tv\), we desire to proof that \(u=v\) to show that \(T\) is injective.

Given \(Tu=Tv\), we have that \(Tu-Tv\). Given additivity, \(T(u-v) = 0\). This makes \((u-v) \in\ null\ T\).

Given only \(0\) is in the null space of \(T\), \(u-v = 0\), so \(u=v\), as desired. \(\blacksquare\).

map to smaller space is not injective

See map to smaller space is not injective

\ No newline at end of file +

injectivity

An injective function is one which is one-to-one: that it maps distinct inputs to distinct outputs.

constituents

requirements

\(T\) is injective if \(Tu = Tv\) implies \(u=v\).

additional information

injectivity implies that null space is \(\{0\}\)

Proof: let \(T \in \mathcal{L}(V,W)\); \(T\) is injective IFF \(null\ T = \{0\}\).

given injectivity

Suppose \(T\) is injective.

Now, we know that \(0\), because it indeed gets mapped by \(T\) to \(0\), is in the null space of \(T\).

Because linear maps take \(0\) to \(0\), \(T0=0\). Now, because \(T\) is injective, for any \(v\) that \(Tv = 0 = T 0\) implies \(v=0\).

So \(0\) is the only thing that an injective \(T\) can map to \(0\), and it is indeed in the null space, so the null space is just \(\{0\}\).

given \(null\ T=\{0\}\)

Suppose we have some \(Tu = Tv\), we desire to proof that \(u=v\) to show that \(T\) is injective.

Given \(Tu=Tv\), we have that \(Tu-Tv\). Given additivity, \(T(u-v) = 0\). This makes \((u-v) \in\ null\ T\).

Given only \(0\) is in the null space of \(T\), \(u-v = 0\), so \(u=v\), as desired. \(\blacksquare\).

map to smaller space is not injective

See map to smaller space is not injective

\ No newline at end of file diff --git a/posts/kbhinner_product/index.html b/posts/kbhinner_product/index.html index 373fb12f9..1995863cb 100644 --- a/posts/kbhinner_product/index.html +++ b/posts/kbhinner_product/index.html @@ -3,7 +3,7 @@ positivity: \(\langle v, v\rangle \geq 0, \forall v \in V\) definiteness: \(\langle v, v\rangle = 0\) IFF \(v = 0\) additivity in the first slot: \(\langle u+v, w\rangle = \langle u, w \rangle + \langle v, w \rangle\) homogeneity in the first slot: \(\langle \lambda u, v \rangle = \lambda \langle u, v \rangle\) conjugate symmetry: \(\langle u,v \rangle = \overline{\langle v,u \rangle}\) additional information Inner Product Space An Inner Product Space is a vector space with a well-defined inner product.">
Houjun Liu
-

inner product

constituents

requirements

We define \(\langle u, v \rangle \in \mathbb{F}\) as the inner product of \((u,v)\) in that order!. It carries the following properties:

  1. positivity: \(\langle v, v\rangle \geq 0, \forall v \in V\)
  2. definiteness: \(\langle v, v\rangle = 0\) IFF \(v = 0\)
  3. additivity in the first slot: \(\langle u+v, w\rangle = \langle u, w \rangle + \langle v, w \rangle\)
  4. homogeneity in the first slot: \(\langle \lambda u, v \rangle = \lambda \langle u, v \rangle\)
  5. conjugate symmetry: \(\langle u,v \rangle = \overline{\langle v,u \rangle}\)

additional information

Inner Product Space

An Inner Product Space is a vector space with a well-defined inner product. For instance, \(\mathbb{F}^{n}\) has the canonical inner product named Euclidean Inner Product (see below, a.k.a. dot product for reals). The existence of such a well-defined inner product makes \(\mathbb{F}^{n}\) an Inner Product Space.

Rare Axler moment, instead of “well-defined”, he says we want a vector space with an inner product “lurking nearby”; james bond style.

properties of inner product

  1. For a fixed \(u \in V\), the function takes \(v\) to \(\langle v,u \rangle\) is a Linear Map \(V \to \mathbb{F}\)
  2. \(\langle 0,u \rangle = 0\)
  3. \(\langle u,0 \rangle = 0\)
  4. \(\langle u,v+w \rangle = \langle u,v \rangle + \langle u,w \rangle\)
  5. \(\langle u,\lambda v \rangle = \bar{\lambda}\langle u,v \rangle\)

Proof:

  1. Inheriting the additivity and homogeneity of the definition of inner products
  2. Set \(u\) to be the fixed element for 1), set \(0\) to be the input, linear maps take \(0\) to \(0\)
  3. Apply conjugate symmetry to 2)
  4. Apply conjugate symmetry, inner product additivty, then conjugate back
  5. Apply conjugate symmetry, inner product homogeneity in the first slot, then conjugate back (of course leaving \(\lambda\) out conjugated)

Euclidean Inner Product

For \(x,y \in \mathbb{F}^{n}\), one can define a pretty well-defined inner product by

\begin{equation} +

inner product

constituents

requirements

We define \(\langle u, v \rangle \in \mathbb{F}\) as the inner product of \((u,v)\) in that order!. It carries the following properties:

  1. positivity: \(\langle v, v\rangle \geq 0, \forall v \in V\)
  2. definiteness: \(\langle v, v\rangle = 0\) IFF \(v = 0\)
  3. additivity in the first slot: \(\langle u+v, w\rangle = \langle u, w \rangle + \langle v, w \rangle\)
  4. homogeneity in the first slot: \(\langle \lambda u, v \rangle = \lambda \langle u, v \rangle\)
  5. conjugate symmetry: \(\langle u,v \rangle = \overline{\langle v,u \rangle}\)

additional information

Inner Product Space

An Inner Product Space is a vector space with a well-defined inner product. For instance, \(\mathbb{F}^{n}\) has the canonical inner product named Euclidean Inner Product (see below, a.k.a. dot product for reals). The existence of such a well-defined inner product makes \(\mathbb{F}^{n}\) an Inner Product Space.

Rare Axler moment, instead of “well-defined”, he says we want a vector space with an inner product “lurking nearby”; james bond style.

properties of inner product

  1. For a fixed \(u \in V\), the function takes \(v\) to \(\langle v,u \rangle\) is a Linear Map \(V \to \mathbb{F}\)
  2. \(\langle 0,u \rangle = 0\)
  3. \(\langle u,0 \rangle = 0\)
  4. \(\langle u,v+w \rangle = \langle u,v \rangle + \langle u,w \rangle\)
  5. \(\langle u,\lambda v \rangle = \bar{\lambda}\langle u,v \rangle\)

Proof:

  1. Inheriting the additivity and homogeneity of the definition of inner products
  2. Set \(u\) to be the fixed element for 1), set \(0\) to be the input, linear maps take \(0\) to \(0\)
  3. Apply conjugate symmetry to 2)
  4. Apply conjugate symmetry, inner product additivty, then conjugate back
  5. Apply conjugate symmetry, inner product homogeneity in the first slot, then conjugate back (of course leaving \(\lambda\) out conjugated)

Euclidean Inner Product

For \(x,y \in \mathbb{F}^{n}\), one can define a pretty well-defined inner product by

\begin{equation} x \cdot y = x_1 \bar{y_{1}} + … + x_{n} \bar{y_{n}} \end{equation}

similar to dot product for the reals. This is called the Euclidean Inner Product and has the nice parallelity properties we saw.

complex number shenanigans that motivate the inner product

…as both relevant and more general than the dot product, but also different in key areas.

First, review complex numbers from our discussion in chapter 4. The main problem here is this:

for \(z = (z_1, \dots, z_{n}) \in \mathbb{C}^{n}\), simply squaring each slot to take the norm may cause us to take a square root of a negative number (as each slot would then be \(a^{2}-b^{2}\) for a complex number). That’s no bueno because we want \(\|z\|\) to be real and non-negative.

This, therefore, suggests something similar for our inner product definition; to make sure that each slot end up being a real and non-negative number, we simply conjugate the second value:

\begin{equation} x \cdot y = x_1 \bar{y_{1}} + … + x_{n} \bar{y_{n}} diff --git a/posts/kbhinvariant_subspace/index.html b/posts/kbhinvariant_subspace/index.html index 0a7093806..fa6daec41 100644 --- a/posts/kbhinvariant_subspace/index.html +++ b/posts/kbhinvariant_subspace/index.html @@ -5,4 +5,4 @@ additional information nontrivial invariant subspace (i.">

Houjun Liu
-

invariant subspace

invariant subspaces are a property of operators; it is a subspace for which the operator in question on the overall space is also an operator of the subspace.

constituents

requirements

\(U\) is considered invariant on \(T\) if \(u \in U \implies Tu \in U\)

(i.e. \(U\) is invariant under \(T\) if \(T |_{U}\) is an operator on \(U\))

additional information

nontrivial invariant subspace

(i.e. eigenstuff)

A proof is not given yet, but \(T \in \mathcal{L}(V)\) has an invariant subspace that’s not \(V\) nor \(\{0\}\) if \(\dim V > 1\) for complex number vector spaces and \(\dim V > 2\) for real number vector spaces.

\ No newline at end of file +

invariant subspace

invariant subspaces are a property of operators; it is a subspace for which the operator in question on the overall space is also an operator of the subspace.

constituents

requirements

\(U\) is considered invariant on \(T\) if \(u \in U \implies Tu \in U\)

(i.e. \(U\) is invariant under \(T\) if \(T |_{U}\) is an operator on \(U\))

additional information

nontrivial invariant subspace

(i.e. eigenstuff)

A proof is not given yet, but \(T \in \mathcal{L}(V)\) has an invariant subspace that’s not \(V\) nor \(\{0\}\) if \(\dim V > 1\) for complex number vector spaces and \(\dim V > 2\) for real number vector spaces.

\ No newline at end of file diff --git a/posts/kbhinvertability/index.html b/posts/kbhinvertability/index.html index 1eee21f08..959b704e0 100644 --- a/posts/kbhinvertability/index.html +++ b/posts/kbhinvertability/index.html @@ -6,6 +6,6 @@ additional information matrix invertability Matrices whose determinants are not \(0\) (i.">
Houjun Liu
-

invertability

A Linear Map is invertable if it can be undone. It is called a nonsingular matrix

constituents

A linear map \(T \in \mathcal{L}(V,W)\)

requirements

A Linear Map \(T \in \mathcal{L}(V,W)\) is called invertable if \(\exists T^{-1} \in \mathcal{L}(W,V): T^{-1}T=I \in \mathcal{L}(V), TT^{-1} = I \in \mathcal{L}(W)\).

“a map is invertable if there is an inverse”: that combining the commutable inverse and itself will result in the identity map.

additional information

matrix invertability

Matrices whose determinants are not \(0\) (i.e. it is invertable) is called “nonsingular matrix”. If it doesn’t have an inverse, it is called a singular matrix.

linear map inverse is unique

An invertable Linear Map has an unique inverse:

Proof:

Suppose \(T \in \mathcal{L}(V,W)\), and \(\exists S_1, S_2\) which are both inverses of \(T\). We desire \(S_1=S_2\).

So:

\begin{equation} +

invertability

A Linear Map is invertable if it can be undone. It is called a nonsingular matrix

constituents

A linear map \(T \in \mathcal{L}(V,W)\)

requirements

A Linear Map \(T \in \mathcal{L}(V,W)\) is called invertable if \(\exists T^{-1} \in \mathcal{L}(W,V): T^{-1}T=I \in \mathcal{L}(V), TT^{-1} = I \in \mathcal{L}(W)\).

“a map is invertable if there is an inverse”: that combining the commutable inverse and itself will result in the identity map.

additional information

matrix invertability

Matrices whose determinants are not \(0\) (i.e. it is invertable) is called “nonsingular matrix”. If it doesn’t have an inverse, it is called a singular matrix.

linear map inverse is unique

An invertable Linear Map has an unique inverse:

Proof:

Suppose \(T \in \mathcal{L}(V,W)\), and \(\exists S_1, S_2\) which are both inverses of \(T\). We desire \(S_1=S_2\).

So:

\begin{equation} S_1 = S_1(TS_2) = (S_1T)S_2 = IS_{2} = S_2 \end{equation}

given Product of Linear Maps is associative.

\(S_1=S_2\), as desired. \(\blacksquare\)

injectivity and surjectivity implies invertability

Suppose \(T \in \mathcal{L}(V,W)\); we desire that \(T\) is invertable IFF it is both injective and surjective.

First, suppose \(T\) is invertible; that is, \(\exists T^{-1}: T^{-1}T=I, TT^{-1}=I\) We desire that \(T\) is both injective and surjective.

Injectivity: Suppose \(Tv=Tu\); we desire \(u=v\). \(u = T^{-1}(Tu) = T^{-1}(Tv) = v\) . We essentially to use the fact that \(T^{-1}\) is a function to “revert” the map of \(T\); as \(T^{-1}\) is a map, we know it has to revert to the same result.

Surjectivity: Recall \(T: V\to W\). WLOG let \(w \in W\), \(w=T(T^{-1}w)\). Therefore, all \(w\) is in range of \(T\).

Second, suppose \(T\) is both injective and surjective. Define a transition \(S\) such that \(T(Sw) = w\) for all \(w \in W\) (i.e. it hits just the right element to hit \(w\) as an input of \(T\).) This is made possible because \(T\) is surjective (because you can hit all \(W\)) and injective (which makes \(S\) not need to hit two different things or have two non-equal things accidentally map to the same thing.)

Evidently, \(T(Sw)=w \forall w \in W \implies (TS) = I\) by definition.

We now desire \(ST = I\). We have \((TSTv) = (TS)(Tv) = ITv = Tv\) by associativity of map multiplication. Now, \((TSTv) = Tv \implies T(ST)v = Tv\) by associativity again. This implies that \((ST)v=v\) again because \(T\) is injective: so the same input will not produce two unique outputs.

We then can show \(S\) is a linear map in the usual way.

Having constructed the desired result, \(\blacksquare\)

Alternate Proof for Finite Dimensional \(T\)

So given map to bigger space is not surjective and map to smaller space is not injective, we have that the dimension of \(W = V\), we leverage the basis of each and build the using the basis of domain.

\ No newline at end of file diff --git a/posts/kbhjoint_probability_distribution/index.html b/posts/kbhjoint_probability_distribution/index.html index 9e92ff72f..b43a45222 100644 --- a/posts/kbhjoint_probability_distribution/index.html +++ b/posts/kbhjoint_probability_distribution/index.html @@ -5,7 +5,7 @@ probability of the joint of a Bayes Net \begin{equation} p(joint) = \prod_{i \in BN}^{} p(x_{i} | parents(x_{i})) \end{equation}">
Houjun Liu
-

joint probability distribution

for random variables \(X, Y\), the joint probability distribution is the probability of both of them happening at once.

\begin{equation} +

joint probability distribution

for random variables \(X, Y\), the joint probability distribution is the probability of both of them happening at once.

\begin{equation} p(x,y) \end{equation}

The most fundamental solution can be derived with a table where all complete probabilities are listed. They are going to be too large to practically store.

probability of the joint of a Bayes Net

\begin{equation} p(joint) = \prod_{i \in BN}^{} p(x_{i} | parents(x_{i})) diff --git a/posts/kbhkernel_smoothing/index.html b/posts/kbhkernel_smoothing/index.html index caf549cba..ee23867a3 100644 --- a/posts/kbhkernel_smoothing/index.html +++ b/posts/kbhkernel_smoothing/index.html @@ -6,7 +6,7 @@ where, \(k\) is the kernel function, a function inversely proportional to how close the two states are:">

Houjun Liu
-

kernel smoothing

kernel smoothing is a way of smoothing a utility function over continuous state space despite only sampling a discrete set of the states.

\begin{equation} +

kernel smoothing

kernel smoothing is a way of smoothing a utility function over continuous state space despite only sampling a discrete set of the states.

\begin{equation} U_{\theta}(s) = \theta^{T} \beta(s) \end{equation}

We multiply a vector \(\theta_{j}\), the utility of being in each state \(s_{j}\) a basis function, which smears, generated for each \(i\) of known discrete state we have:

\begin{equation} \beta_{i}(s) = \frac{k(s, s_{i})}{\sum_{j}^{} k(s, s_{j})} diff --git a/posts/kbhmapreduce/index.html b/posts/kbhmapreduce/index.html index 1c91df77a..d18d0c620 100644 --- a/posts/kbhmapreduce/index.html +++ b/posts/kbhmapreduce/index.html @@ -5,4 +5,4 @@ Map: \((document\_name, document\_contents) \Rightarrow list(word, #\ occurrences)\) You can see that this can be distributed to multiple processors. You can have each processor count the word frequencies in a single document. We have now broken the contents into divide and conquerable groups.">

Houjun Liu
-

MapReduce

MapReduce is an distributed algorithm.

https://www.psc.edu/wp-content/uploads/2023/07/A-Brief-History-of-Big-Data.pdf

  • Map: \((in\_key, in\_value) \Rightarrow list(out\_key, intermediate\_value)\).
  • Reduce:
    • Group map outputs by \(out\_key\)
    • \((out\_key, list(intermediate\_value)) \Rightarrow list(out\_value)\)

example of MapReduce

Say, if you want to count word frequencies in a set of documents.

  • Map: \((document\_name, document\_contents) \Rightarrow list(word, #\ occurrences)\)

You can see that this can be distributed to multiple processors. You can have each processor count the word frequencies in a single document. We have now broken the contents into divide and conquerable groups.

  • Reduce: \((word, list\ (occurrences\_per\_document)) \Rightarrow (word,sum)\)

We just add up the occurrences that each of the nodes’ output for word frequency.

\ No newline at end of file +

MapReduce

MapReduce is an distributed algorithm.

https://www.psc.edu/wp-content/uploads/2023/07/A-Brief-History-of-Big-Data.pdf

  • Map: \((in\_key, in\_value) \Rightarrow list(out\_key, intermediate\_value)\).
  • Reduce:
    • Group map outputs by \(out\_key\)
    • \((out\_key, list(intermediate\_value)) \Rightarrow list(out\_value)\)

example of MapReduce

Say, if you want to count word frequencies in a set of documents.

  • Map: \((document\_name, document\_contents) \Rightarrow list(word, #\ occurrences)\)

You can see that this can be distributed to multiple processors. You can have each processor count the word frequencies in a single document. We have now broken the contents into divide and conquerable groups.

  • Reduce: \((word, list\ (occurrences\_per\_document)) \Rightarrow (word,sum)\)

We just add up the occurrences that each of the nodes’ output for word frequency.

\ No newline at end of file diff --git a/posts/kbhmarkov_decision_process/index.html b/posts/kbhmarkov_decision_process/index.html index 7e127be5e..e67cad2d3 100644 --- a/posts/kbhmarkov_decision_process/index.html +++ b/posts/kbhmarkov_decision_process/index.html @@ -4,7 +4,7 @@ constituents \(S\): state space (assuming discrete for now, there are \(n\) states) — “minimum set of information that allows you to solve a problem” \(A\): action space — set of things your agent can do \(T(s’ | s,a)\): “dynamics”, state-transition model “probability that we end up in \(s’\) given \(s\) and action \(a\)”: good idea to make a table of probabilities of source vs.">
Houjun Liu
-

Markov Decision Process

A MDP is a decision network whereby a sequences of actions causes a sequence of states. Each state is dependent on the action we take and the state we are in, and each utility is dependent on action taken and the state we are in.

Note that, unlike a POMDP, we know what state we are in—the observations from the states are just unclear.

constituents

  • \(S\): state space (assuming discrete for now, there are \(n\) states) — “minimum set of information that allows you to solve a problem”
  • \(A\): action space — set of things your agent can do
  • \(T(s’ | s,a)\): “dynamics”, state-transition model “probability that we end up in \(s’\) given \(s\) and action \(a\)”: good idea to make a table of probabilities of source vs. destination variables
  • \(R(s,a,s’)\): expected reward given in an action and a state (real world reward maybe stochastic)
  • \(\pi_{t}(s_{1:t}, a_{1:t-1})\): the policy, returning an action, a system of assigning actions based on states
    • however, our past states are d-seperated from our current action given knowing the state, so really we have \(\pi_{t}(s_{t})\)

additional information

We assume policy to be exact right now.

stationary Markov Decision Process

This is a stationary Markov Decision Process because at each node \(S_{n}\), we have: \(P(S_{n+1} | A_n, S_n)\). Time is not a variable: as long as you know what state you are in, and what you did, you know the transition probability.

(that is, the set of states is not dependent on time)

calculating utility with instantaneous rewards

Because, typically, in decision networks you sum all the utilities together, you’d think that we should sum the utilities together.

finite-horizon models

We want to maximize reward over time, over a finite horizon \(n\). Therefore, we try to maximize:

\begin{equation} +

Markov Decision Process

A MDP is a decision network whereby a sequences of actions causes a sequence of states. Each state is dependent on the action we take and the state we are in, and each utility is dependent on action taken and the state we are in.

Note that, unlike a POMDP, we know what state we are in—the observations from the states are just unclear.

constituents

  • \(S\): state space (assuming discrete for now, there are \(n\) states) — “minimum set of information that allows you to solve a problem”
  • \(A\): action space — set of things your agent can do
  • \(T(s’ | s,a)\): “dynamics”, state-transition model “probability that we end up in \(s’\) given \(s\) and action \(a\)”: good idea to make a table of probabilities of source vs. destination variables
  • \(R(s,a,s’)\): expected reward given in an action and a state (real world reward maybe stochastic)
  • \(\pi_{t}(s_{1:t}, a_{1:t-1})\): the policy, returning an action, a system of assigning actions based on states
    • however, our past states are d-seperated from our current action given knowing the state, so really we have \(\pi_{t}(s_{t})\)

additional information

We assume policy to be exact right now.

stationary Markov Decision Process

This is a stationary Markov Decision Process because at each node \(S_{n}\), we have: \(P(S_{n+1} | A_n, S_n)\). Time is not a variable: as long as you know what state you are in, and what you did, you know the transition probability.

(that is, the set of states is not dependent on time)

calculating utility with instantaneous rewards

Because, typically, in decision networks you sum all the utilities together, you’d think that we should sum the utilities together.

finite-horizon models

We want to maximize reward over time, over a finite horizon \(n\). Therefore, we try to maximize:

\begin{equation} \sum_{t=1}^{n}r_{t} \end{equation}

this function is typically called “return”.

infinite-horizon models

If you lived forever, small positive \(r_{t}\) and large \(r_{t}\) makes no utility difference. We therefore add discounting:

\begin{equation} \sum_{t=1}^{\infty} \gamma^{t-1} r_{t} diff --git a/posts/kbhmarkov_equivalence_classes/index.html b/posts/kbhmarkov_equivalence_classes/index.html index bcd4b6bbb..29c5c6993 100644 --- a/posts/kbhmarkov_equivalence_classes/index.html +++ b/posts/kbhmarkov_equivalence_classes/index.html @@ -5,4 +5,4 @@ some edges without regard to direction (“same skeleton”) the same set of immoral v-structures ">

Houjun Liu
-

Markov Equivalence Classes

\ No newline at end of file +

Markov Equivalence Classes

\ No newline at end of file diff --git a/posts/kbhmartingale_model/index.html b/posts/kbhmartingale_model/index.html index 834d4b30e..830b842b5 100644 --- a/posts/kbhmartingale_model/index.html +++ b/posts/kbhmartingale_model/index.html @@ -6,7 +6,7 @@ This is not a for sure! modeling statement: this is simply the expected value!! That means, after \(\infty\) times of re-running the universe starting “yesterday”, the new opening price will converge to the last closing price.">
Houjun Liu
-

Martingale Model

The Martingale Model states: if we observed the closing price of the market yesterday, we expect that the market is going to open at the close price yesterday.

Formally:

\begin{equation} +

Martingale Model

The Martingale Model states: if we observed the closing price of the market yesterday, we expect that the market is going to open at the close price yesterday.

Formally:

\begin{equation} E\qty [X_{k}|X_{k-1}, X_{k-2},\ldots] = X_{k-1} \end{equation}

“irrespective of what you know, no matter how long the history, the best expectation of today’s price is yesterday’s price.”

This is not a for sure! modeling statement: this is simply the expected value!! That means, after \(\infty\) times of re-running the universe starting “yesterday”, the new opening price will converge to the last closing price.

Two important conclusions:

  1. If we know the closing price yesterday (it is observed), the price today will be DETERMINED and not!!! a random variable
  2. If the closing price yesterday is a random variable, the price today will be IN-DETERMINED and also a random variable

Therefore, the “randomness is fair”, and therefore the “market is not drifting in favor/against you.”

The Martingale Model comes from the idea that “true gambling is true equal conditions (money, opponents, bystanders, situations, die, and dice.)” Therefore, any amount of bias towards one direction/party is advantageous for that person.

In fact, it was theorized that an efficient market should follow exactly this behavior.

changes in history

Of course, the difference between the expression:

\begin{equation} E\qty [X_{k}|X_{k-1}, X_{k-2},\ldots] = X_{k-1} diff --git a/posts/kbhmodel_based_reinforcement_learning/index.html b/posts/kbhmodel_based_reinforcement_learning/index.html index a2a770d9e..5f5ebb98c 100644 --- a/posts/kbhmodel_based_reinforcement_learning/index.html +++ b/posts/kbhmodel_based_reinforcement_learning/index.html @@ -8,7 +8,7 @@ the sum of rewards when taking \(s,a\). To calculate a reward, we take the average:">

Houjun Liu
-

model-based reinforcement learning

Step 1: Getting Model

We want a model

  • \(T\): transition probability
  • \(R\): rewards

Maximum Likelihood Parameter Learning Method

\begin{equation} +

model-based reinforcement learning

Step 1: Getting Model

We want a model

  • \(T\): transition probability
  • \(R\): rewards

Maximum Likelihood Parameter Learning Method

\begin{equation} N(s,a,s’) \end{equation}

which is the count of transitions from \(s,a\) to \(s’\) and increment it as \(s, a, s’\) gets observed. This makes, with Maximum Likelihood Parameter Learning:

\begin{equation} T(s’ | s,a) = \frac{N(s,a,s’)}{\sum_{s’’}^{} N(s,a,s’’)} diff --git a/posts/kbhmodel_free_reinforcement_learning/index.html b/posts/kbhmodel_free_reinforcement_learning/index.html index 550baa74c..907d80ca1 100644 --- a/posts/kbhmodel_free_reinforcement_learning/index.html +++ b/posts/kbhmodel_free_reinforcement_learning/index.html @@ -5,7 +5,7 @@ \begin{equation} \hat{x}_{m} = \hat{x}_{m-1} + \frac{1}{m} (x^{(m)} - \hat{x}_{m-1}) \end{equation}">

Houjun Liu
-

model-free reinforcement learning

In model-based reinforcement learning, we tried real hard to get \(T\) and \(R\). What if we just estimated \(Q(s,a)\) directly? model-free reinforcement learning tends to be quite slow, compared to model-based reinforcement learning methods.

review: estimating mean of a random variable

we got \(m\) points \(x^{(1 \dots m)} \in X\) , what is the mean of \(X\)?

\begin{equation} +

model-free reinforcement learning

In model-based reinforcement learning, we tried real hard to get \(T\) and \(R\). What if we just estimated \(Q(s,a)\) directly? model-free reinforcement learning tends to be quite slow, compared to model-based reinforcement learning methods.

review: estimating mean of a random variable

we got \(m\) points \(x^{(1 \dots m)} \in X\) , what is the mean of \(X\)?

\begin{equation} \hat{x_{m}} = \frac{1}{m} \sum_{i=1}^{m} x^{(i)} \end{equation}

\begin{equation} \hat{x}_{m} = \hat{x}_{m-1} + \frac{1}{m} (x^{(m)} - \hat{x}_{m-1}) diff --git a/posts/kbhmodular_arithmetic/index.html b/posts/kbhmodular_arithmetic/index.html index e413303ad..cbb8ad872 100644 --- a/posts/kbhmodular_arithmetic/index.html +++ b/posts/kbhmodular_arithmetic/index.html @@ -6,7 +6,7 @@ additional information basic modular arithmetic operations \begin{align} (a+b)\ \text{mod}\ c &= ((a\ \text{mod}\ c) + (b\ \text{mod}\ c))\ \text{mod}\ c \\ (ab) \ \text{mod}\ c &= ((a\ \text{mod}\ c) (b \ \text{mod}\ c)) \ \text{mod}\ c \end{align}">

Houjun Liu
-

modular arithmetic

Clock math.

We say that \(a\ \text{mod}\ b = r\) if \(a=bq+r\), such that \(b>0\) and \(0 \leq r <b\). More specifically, we denote:

\begin{equation} +

modular arithmetic

Clock math.

We say that \(a\ \text{mod}\ b = r\) if \(a=bq+r\), such that \(b>0\) and \(0 \leq r <b\). More specifically, we denote:

\begin{equation} a \equiv a’\ \text{mod}\ b \end{equation}

if \(b|(a-a’)\).

additional information

basic modular arithmetic operations

\begin{align} (a+b)\ \text{mod}\ c &= ((a\ \text{mod}\ c) + (b\ \text{mod}\ c))\ \text{mod}\ c \\ diff --git a/posts/kbhmultithreading/index.html b/posts/kbhmultithreading/index.html index 39f2d2bf2..89a83b3f1 100644 --- a/posts/kbhmultithreading/index.html +++ b/posts/kbhmultithreading/index.html @@ -3,7 +3,7 @@ never race condition never deadlock thread you can spawn a thread using the thread() can even pass function parameters threads share all virtual address space: bugs can arise when multiple threads modify the same thing at the same time—each thread has access to a small chunk of the stack threads are actually the unit of concurrency: the OS actually chooses threads to run // now the thread can execute at any time: once a thread is made, it will run in any order thread myThread(function_to_run, arg1, arg2, .">

Houjun Liu
-

multithreading

  • we can have concurrency within a single process—each running a single function

We will solve problems:

thread

  • you can spawn a thread using the thread() can even pass function parameters
  • threads share all virtual address space: bugs can arise when multiple threads modify the same thing at the same time—each thread has access to a small chunk of the stack
  • threads are actually the unit of concurrency: the OS actually chooses threads to run
// now the thread can execute at any time: once a thread is made, it will run in any order
+

multithreading

  • we can have concurrency within a single process—each running a single function

We will solve problems:

thread

  • you can spawn a thread using the thread() can even pass function parameters
  • threads share all virtual address space: bugs can arise when multiple threads modify the same thing at the same time—each thread has access to a small chunk of the stack
  • threads are actually the unit of concurrency: the OS actually chooses threads to run
// now the thread can execute at any time: once a thread is made, it will run in any order
 thread myThread(function_to_run, arg1, arg2, ...);
 // threads run AS SOON AS SPAWENED: so
 

We can wait for a thread:

myThread.join()
diff --git a/posts/kbhmutually_exclusive/index.html b/posts/kbhmutually_exclusive/index.html
index d982f601e..b817446be 100644
--- a/posts/kbhmutually_exclusive/index.html
+++ b/posts/kbhmutually_exclusive/index.html
@@ -7,7 +7,7 @@
 We basically need to alternate between adding and subtracting. (i.e.: in our case here, we add all the odd-group pairs (for P(x) and P(xyz)), we subtract the even-number pairs (for p(xy))).">
Houjun Liu
-

mutually exclusive

probability of “or”

If its not possible for two events to happen at the same time, they are called mutually exclusive:

\begin{equation} +

mutually exclusive

probability of “or”

If its not possible for two events to happen at the same time, they are called mutually exclusive:

\begin{equation} P(E\ or\ F) = P(E)+P(F) - P(E \cap F) \end{equation}

This is called the inclusion exclusion principle. This is what motivates inclusion exclusion counting.

General inclusion exclusion principle

Its scary. Think about this:

We basically need to alternate between adding and subtracting. (i.e.: in our case here, we add all the odd-group pairs (for P(x) and P(xyz)), we subtract the even-number pairs (for p(xy))).

And so:

\begin{equation} P(E_1\ or\ \dots\ or\ E_{n}) = \sum_{r=1}^{n} (-1)^{r+1} Y_{r} diff --git a/posts/kbhnaive_bayes/index.html b/posts/kbhnaive_bayes/index.html index 18390d08d..9ff967153 100644 --- a/posts/kbhnaive_bayes/index.html +++ b/posts/kbhnaive_bayes/index.html @@ -4,7 +4,7 @@ (Why is this backwards(ish)? Though we typically think about models as a function M(obs) = cls, the real world is almost kind of opposite; it kinda works like World(thing happening) = things we observe. Therefore, the observations are a RESULT of the class happening.">

Houjun Liu
-

Naive Bayes

Naive Bayes is a special class of Baysian Network inference problem which follows a specific structure used to solve classification problems.

The Naive Bayes classifier is a Baysian Network of the shape:

(Why is this backwards(ish)? Though we typically think about models as a function M(obs) = cls, the real world is almost kind of opposite; it kinda works like World(thing happening) = things we observe. Therefore, the observations are a RESULT of the class happening.)

We consider, naively, \(o_{1:n}\) are all conditionally independent on \(c\). From this graph, we can therefore use the probability chain rule + conditional probability to write that:

\begin{equation} +

Naive Bayes

Naive Bayes is a special class of Baysian Network inference problem which follows a specific structure used to solve classification problems.

The Naive Bayes classifier is a Baysian Network of the shape:

(Why is this backwards(ish)? Though we typically think about models as a function M(obs) = cls, the real world is almost kind of opposite; it kinda works like World(thing happening) = things we observe. Therefore, the observations are a RESULT of the class happening.)

We consider, naively, \(o_{1:n}\) are all conditionally independent on \(c\). From this graph, we can therefore use the probability chain rule + conditional probability to write that:

\begin{equation} P(c, o_{1:n}) = P( c) \prod_{i=1}^{n} P(o_{i} | c) \end{equation}

so, to actually compute this, we don’t want to bother going over all the multiplications because of underflow, we write:

\begin{equation} \hat{y} = \arg\max_{y} \log \hat{P}(y) + \sum_{i=1}^{m} \log \hat{P}(x|y) diff --git a/posts/kbhneural_networks/index.html b/posts/kbhneural_networks/index.html index ada34a1a1..1b01d0723 100644 --- a/posts/kbhneural_networks/index.html +++ b/posts/kbhneural_networks/index.html @@ -8,7 +8,7 @@ Tanh \begin{equation} y(z) = \frac{e^{z} - e^{-z}}{e^{z}+e^{-z}} \end{equation}">

Houjun Liu
-

Neural Networks

Neural Network Unit

A real-valued vector as input, each multiplied by some weights, summed, and squashed by some non-linear transform.

\begin{equation} +

Neural Networks

Neural Network Unit

A real-valued vector as input, each multiplied by some weights, summed, and squashed by some non-linear transform.

\begin{equation} z = w\cdot x + b \end{equation}

and then, we will squash this using it as an “activation”

\begin{equation} y = \sigmoid(z) diff --git a/posts/kbhnlp/index.html b/posts/kbhnlp/index.html index 8f3e5d6af..9dbc5991c 100644 --- a/posts/kbhnlp/index.html +++ b/posts/kbhnlp/index.html @@ -8,6 +8,6 @@ Branch and Bound See Branch and Bound">

Houjun Liu
-

NLP

Complex System

Language Model

A Language Model is a large neural network trained to predict the next token given some context.

“Language models can discriminate behavior that they can’t reliably generate.”

Coherence

Generative REVOLUTION

Why probability maximization sucks

Its expensive!

  1. Take \(k\) candidates
  2. Expand \(k\) expansions for each of the \(k\) candidates
  3. Choose the highest probability \(k\) candidates

\(k\) should be small: trying to maximizing

Branch and Bound

See Branch and Bound

Challenges of Direct Sampling

Direct Sampling sucks. Its sucks. It sucks. Just sampling from the distribution sucks. This has to do with the fact that assigning slightly lower scores “being less confident” is exponentially worse.

The model has to therefore be VERY conservative about giving low confidences; so, it is over confident about worst tokens.

Top-K

Top-k is too broad, and top

Nucleaus Sampling

Find the smallest set of tokens that make up to \(p\) probability.

Correctness

  • The highest probability answer isn’t always right
  • Generative models consider every answer, so we want another model to compute the correct answer

Surface Form Competition

The Surface Form Competition problem results when top probabity token “steals” probability from the other tokens.

The predicted frequency of a possible string is a main comfounder. And so we can use models to decompose their own predictions:

Turns out:

\(P(answer|question) \approx P(answer\ is\ valid)P(answer|domain)\)

So…

\begin{equation} +

NLP

Complex System

Language Model

A Language Model is a large neural network trained to predict the next token given some context.

“Language models can discriminate behavior that they can’t reliably generate.”

Coherence

Generative REVOLUTION

Why probability maximization sucks

Its expensive!

  1. Take \(k\) candidates
  2. Expand \(k\) expansions for each of the \(k\) candidates
  3. Choose the highest probability \(k\) candidates

\(k\) should be small: trying to maximizing

Branch and Bound

See Branch and Bound

Challenges of Direct Sampling

Direct Sampling sucks. Its sucks. It sucks. Just sampling from the distribution sucks. This has to do with the fact that assigning slightly lower scores “being less confident” is exponentially worse.

The model has to therefore be VERY conservative about giving low confidences; so, it is over confident about worst tokens.

Top-K

Top-k is too broad, and top

Nucleaus Sampling

Find the smallest set of tokens that make up to \(p\) probability.

Correctness

  • The highest probability answer isn’t always right
  • Generative models consider every answer, so we want another model to compute the correct answer

Surface Form Competition

The Surface Form Competition problem results when top probabity token “steals” probability from the other tokens.

The predicted frequency of a possible string is a main comfounder. And so we can use models to decompose their own predictions:

Turns out:

\(P(answer|question) \approx P(answer\ is\ valid)P(answer|domain)\)

So…

\begin{equation} P(answer\ is\ valid) = \frac{P(answer|question)}{P(answer|domain)} \end{equation}

This is better :point_up:. Futher reading: (Holtzman et al. 2021)

Domain

Domain is the context in which that the text may occur.

Coverage

Why aren’t models controllable

Hallucination

  • Language models predict what’s most likely
  • We hope to control them with natural-language semantics

In-Context Learning

If we show the model some context which has example input output pairs, it can output. (Language Model model are few shot learners)

Correct Scoring

We can reverse the output to predict the input to prevent model from loosing information, and use that to rerank the info. Of course, if the model can’t generate the desired input, the output is probably missing information.

Smaller models can be made better because of info reranking.

Th Degenerative Discriminative Gap.

Future Work

The fact that the single comma shift the input. What we need is a language to control language behavior.

The Ability to Control a Model are the Goal of Understand the Model

We should only claim to understand a model when we can make a theory map about it: “when X is fed into the model, we get Y”

So:

  • we should look at what the model is biased about (Surface Form Competition, for instance)
  • we would be closer to prime behaviors such that they mimic the human behavior (in pieces, not just “complete these tokens”) in completion
  • We see success as the actual evaluation metrics; we can use machines vs. other machines as the the results

Questions

ahai@uw.edu

Marcel Just

anthropic ai papers

percy liang

\ No newline at end of file diff --git a/posts/kbhnon_homogeneous_linear_differential_equation/index.html b/posts/kbhnon_homogeneous_linear_differential_equation/index.html index c3312a8fd..643ed44b0 100644 --- a/posts/kbhnon_homogeneous_linear_differential_equation/index.html +++ b/posts/kbhnon_homogeneous_linear_differential_equation/index.html @@ -6,7 +6,7 @@ where the left is a particular solution, and the right is any homogeneous solution. We can do this because, say if we derivate it; the left derivative (the particular solution) gives \(f(t)\), and the right, because its homogeneous, gives 0.">
Houjun Liu
-

non-homogeneous linear differential equation

\begin{equation} +

non-homogeneous linear differential equation

\begin{equation} y’ + ay = f(t) \end{equation}

The general solution for this would be

  1. any solution specifically which gives \(f(t)\), plus
  2. any homogeneous solutions

specifically:

\begin{equation} y = y_{p}(t) + y_{n}(t) diff --git a/posts/kbhnon_linear_systems/index.html b/posts/kbhnon_linear_systems/index.html index 2c7e0ca52..ba54d799c 100644 --- a/posts/kbhnon_linear_systems/index.html +++ b/posts/kbhnon_linear_systems/index.html @@ -5,7 +5,7 @@ \begin{equation} \dv t \mqty(x \\y) = A \mqty(x \\ y) +b \end{equation}">

Houjun Liu
-

Non-Linear System

“Chaotic Dynamics” Because the word is sadly nonlinear.

motivating non-linearity

\begin{equation} +

Non-Linear System

“Chaotic Dynamics” Because the word is sadly nonlinear.

motivating non-linearity

\begin{equation} \dv t \mqty(x \\ y) = f\qty(\mqty(x\\y)) \end{equation}

This function is a function from \(f: \mathbb{R}^{2}\to \mathbb{R}^{2}\). All the work on Second-Order Linear Differential Equations, has told us that the above system can serve as a “linearization” of a second order differential equation that looks like the follows:

\begin{equation} \dv t \mqty(x \\y) = A \mqty(x \\ y) +b diff --git a/posts/kbhnull_space/index.html b/posts/kbhnull_space/index.html index 7cf19f8c8..01070dc58 100644 --- a/posts/kbhnull_space/index.html +++ b/posts/kbhnull_space/index.html @@ -6,7 +6,7 @@ additional information the null space is a subspace of the domain It should probably not be a surprise, given a Null Space is called a Null Space, that the Null Space is a subspace of the domain.">

Houjun Liu
-

null space

The Null Space, also known as the kernel, is the subset of vectors which get mapped to \(0\) by some Linear Map.

constituents

Some linear map \(T \in \mathcal{L}(V,W)\)

requirements

The subset of \(V\) which \(T\) maps to \(0\) is called the “Null Space”:

\begin{equation} +

null space

The Null Space, also known as the kernel, is the subset of vectors which get mapped to \(0\) by some Linear Map.

constituents

Some linear map \(T \in \mathcal{L}(V,W)\)

requirements

The subset of \(V\) which \(T\) maps to \(0\) is called the “Null Space”:

\begin{equation} null\ T = \{v \in V: Tv = 0\} \end{equation}

additional information

the null space is a subspace of the domain

It should probably not be a surprise, given a Null Space is called a Null Space, that the Null Space is a subspace of the domain.

zero

As linear maps take \(0\) to \(0\), \(T 0=0\) so \(0\) is in the Null Space of \(T\).

closure under addition

We have that:

\begin{equation} 0+0 = 0 diff --git a/posts/kbhode_linearilzation/index.html b/posts/kbhode_linearilzation/index.html index 1a5ae1af7..c7c46bc27 100644 --- a/posts/kbhode_linearilzation/index.html +++ b/posts/kbhode_linearilzation/index.html @@ -6,7 +6,7 @@ if at least one \(Re[\lambda] > 0\) of \(\qty(\nabla F)(p)\) then \(p\) is considered unstable—that is, points initially near \(p\) will go somewhere else">

Houjun Liu
-

linearilzation

For some non-linear function, we can use its first Jacobian to create a linear system. Then, we can use that system to write the first order Taylor:

\begin{equation} +

linearilzation

For some non-linear function, we can use its first Jacobian to create a linear system. Then, we can use that system to write the first order Taylor:

\begin{equation} y’ = \nabla F(crit)y \end{equation}

where \(crit\) are critical points.

Phase Portrait stability

  • if all \(Re[\lambda] < 0\) of \(\qty(\nabla F)(p)\) then \(p\) is considered stable—that is, points initially near \(p\) will exponentially approach \(p\)

  • if at least one \(Re[\lambda] > 0\) of \(\qty(\nabla F)(p)\) then \(p\) is considered unstable—that is, points initially near \(p\) will go somewhere else

  • if all \(Re[\lambda] \leq 0\) and at least one \(\lambda\) is pure imaginary of \(\qty(\nabla F)(p)\), then there are no conclusions and \(p\) is considered marginal

    If there are no purely imaginary values, then the solution paths of the ODE look like that of \(y’ = (\nambla F)(p) y\).

Worked Example

Let’s Lotha-Volterra Prey-Predictor Equation again as an example

\begin{equation} \begin{cases} diff --git a/posts/kbhparallel_linear_algebra/index.html b/posts/kbhparallel_linear_algebra/index.html index bcf05e37c..02e7db29e 100644 --- a/posts/kbhparallel_linear_algebra/index.html +++ b/posts/kbhparallel_linear_algebra/index.html @@ -4,6 +4,6 @@ that is, an affine subset for \(U \subset V\) and \(v \in V\):">

Houjun Liu
-

affine subset

an affine subset of \(V\) is a subset of \(V\) that is the sum of a vector and one of its subspace; that is, an affine subset of \(V\) is a subset of \(V\) of the form \(v+U\) for \(v \in V\) and subspace \(U \subset V\).

for \(v \in V\) and \(U \subset V\), an affine subset \(v+U\) is said to be parallel to \(U\).

that is, an affine subset for \(U \subset V\) and \(v \in V\):

\begin{equation} +

affine subset

an affine subset of \(V\) is a subset of \(V\) that is the sum of a vector and one of its subspace; that is, an affine subset of \(V\) is a subset of \(V\) of the form \(v+U\) for \(v \in V\) and subspace \(U \subset V\).

for \(v \in V\) and \(U \subset V\), an affine subset \(v+U\) is said to be parallel to \(U\).

that is, an affine subset for \(U \subset V\) and \(v \in V\):

\begin{equation} v + U = \{v+u : u \in U\} \end{equation}

additional information

two affine subsets parallel to \(U\) are either equal or disjoint

Suppose \(U\) is a subspace of \(V\); and \(v,w \in V\), then, if one of the following is true all of them are true:

  1. \(v-w \in U\)
  2. \(v+U = w+U\)
  3. \((v+U) \cap (w+U) \neq \emptyset\)

\(1 \implies 2\)

Given \(v-w \in U\)….

For an element in \(v+U\), we have that \(v+u = (w-w)+v+u = w+((v-w)+u) \in w + U\). This is because \(U\) is closed so adding \(v-w \in U\) and \(u\) will remain being in \(U\). \(w-w=0\) just by everything being in \(V\).

We now have \(v+u \in w+U\ \forall u \in U\); we now can reverse the argument to argue in a similar fashion that \(w+u \in v+U\ \forall u \in U\). So, we have that \(v+U \subset w+U\) and \(w+U \subset v+U\). So \(v+U = w+U\), as desired.

\(2 \implies 3\)

By definition of \(v+U=w+U\) as long as \(v+U\) and \(w+U\) is not empty sets, which they can’t be because \(U\) is a vector space so guaranteed nonempty.

\(3\implies 1\)

Given \((v+U) \cap (w+U) \neq \emptyset\), we have that there exists some \(u_1, u_2 \in U\) such that \(v+u_1 = w+u_2\). Because everything here is in \(V\), we can add their respective inverses (“move them around”) such that: \(v-w = u_2-u_1\). Therefore \(u_2-u_1 \in U \implies v-w \in U\).

\ No newline at end of file diff --git a/posts/kbhparameter/index.html b/posts/kbhparameter/index.html index 986e0a960..3c10b8329 100644 --- a/posts/kbhparameter/index.html +++ b/posts/kbhparameter/index.html @@ -6,6 +6,6 @@ \begin{equation} p(x|a) \end{equation}">
Houjun Liu
-

parameter

a parameter of probability distribution govern the probabilities associated with different conditions in that distribution. It is usually a vector:

For instance, for uniform \(Uni(\alpha, \beta)\), parameter \(\theta = [\alpha, \beta]\).

importantly, for a discrete distribution system with 6 parameters, we only need 5 independent parameters to be able to satisfy the entire system. This is because a probability distribution must sum to 1.

however, for a conditional probability:

\begin{equation} +

parameter

a parameter of probability distribution govern the probabilities associated with different conditions in that distribution. It is usually a vector:

For instance, for uniform \(Uni(\alpha, \beta)\), parameter \(\theta = [\alpha, \beta]\).

importantly, for a discrete distribution system with 6 parameters, we only need 5 independent parameters to be able to satisfy the entire system. This is because a probability distribution must sum to 1.

however, for a conditional probability:

\begin{equation} p(x|a) \end{equation}

we need to specificity \((n-1)m\) parameters, whereby \(m\) is the number of states \(a\) can take, and \(n\) the number of states \(n\) can take. Each group of \(m\) has to add up to \(1\).

parameter learning

see parameter learning

\ No newline at end of file diff --git a/posts/kbhpegasus/index.html b/posts/kbhpegasus/index.html index 5c9f08769..2813df6e9 100644 --- a/posts/kbhpegasus/index.html +++ b/posts/kbhpegasus/index.html @@ -6,7 +6,7 @@ To actually ensure that \(V\) has deterministic transitions…">
Houjun Liu
-

Pegasus

Memoryless policy search through fake determinism.

Primary contribution: transforming stochastic POMDP to a deterministic simulative function; foregos alpha vectors.

Suppose you have \(m\) initial states that you sampled, you can then just try to get the set of acions that maximize:

\begin{equation} +

Pegasus

Memoryless policy search through fake determinism.

Primary contribution: transforming stochastic POMDP to a deterministic simulative function; foregos alpha vectors.

Suppose you have \(m\) initial states that you sampled, you can then just try to get the set of acions that maximize:

\begin{equation} \arg\max_{\theta} \tilde{V} = \frac{1}{m} \sum_{n}^{m} V_{\theta}(s_{m}) \end{equation}

To actually ensure that \(V\) has deterministic transitions…

deterministic simulative function

Typically, a generative model takes random actions from the action distribution. However, what we do is have a simulator which takes a RANDOM NUMBER as INPUT, and also the action distribution, and DETERMINISTICALLY give an action.

Pegasus procedure

We augment the state:

\begin{equation} s \in (S, \mathbb{R}^{[0,1]}, \mathbb{R}^{[0,1]}, \dots) diff --git a/posts/kbhpermittivity_of_free_space/index.html b/posts/kbhpermittivity_of_free_space/index.html index a8068614c..cad9a7e6c 100644 --- a/posts/kbhpermittivity_of_free_space/index.html +++ b/posts/kbhpermittivity_of_free_space/index.html @@ -3,6 +3,6 @@ redefinition of Coulomb’s Constant based on permittivity of free space \begin{equation} k = \frac{1}{4\pi \epsilon_{0}} \end{equation}">

Houjun Liu
-

permittivity of free space

permittivity of free space is a constant \(\epsilon_{0} \approx 8.85 \times 10^{-12} \frac{C^{2}}{N \cdot m^{2}}\).

redefinition of Coulomb’s Constant based on permittivity of free space

\begin{equation} +

permittivity of free space

\ No newline at end of file diff --git a/posts/kbhpointer/index.html b/posts/kbhpointer/index.html index eac174475..461062ab1 100644 --- a/posts/kbhpointer/index.html +++ b/posts/kbhpointer/index.html @@ -5,7 +5,7 @@ int x = 2; // declare object int *xptr = &x; // get location of object (&: address of) printf("%d\n", *xptr); // dereference the pointer address operator You will note, in the line above:'>
Houjun Liu
-

pointer

A pointer is a variable which stores memory addresses. Because there are no pass-by reference, we use pointers to emulate pass by reference: by sharing addresses with other functions.

A pointer can identify a single byte OR some large data structures. We can dynamically allocate pointers, and also identify memory generically without types.

C is always pass-by-copy. Therefore, to pass-by-reference, you basically have to

int x = 2; // declare object
+

pointer

A pointer is a variable which stores memory addresses. Because there are no pass-by reference, we use pointers to emulate pass by reference: by sharing addresses with other functions.

A pointer can identify a single byte OR some large data structures. We can dynamically allocate pointers, and also identify memory generically without types.

C is always pass-by-copy. Therefore, to pass-by-reference, you basically have to

int x = 2; // declare object
 int *xptr = &x; // get location of object (&: address of)
 
 printf("%d\n", *xptr); // dereference the pointer
diff --git a/posts/kbhpolicy/index.html b/posts/kbhpolicy/index.html
index 33add8439..f92e3aca4 100644
--- a/posts/kbhpolicy/index.html
+++ b/posts/kbhpolicy/index.html
@@ -8,7 +8,7 @@
 instead of telling you something to do at a specific point, it tells you what the probability it chooses of doing \(a_{t}\) is given the history.">
Houjun Liu
-

policy

constituents

the history: last states and actions \(h_{t} = (s_{1:t}, a_{1:t-1})\)

requirements

typically:

\begin{equation} +

policy

constituents

the history: last states and actions \(h_{t} = (s_{1:t}, a_{1:t-1})\)

requirements

typically:

\begin{equation} a_{t} = \pi_{t}(h_{t}) \end{equation}

for a Markov Decision Process, our past states are d-seperated from our current action given knowing the state, so really we have \(\pi_{t}(s_{t})\)

Some policies can be stochastic:

\begin{equation} P(a_{t}) = \pi_{t}(a_{t} | h_{t}) diff --git a/posts/kbhpolicy_gradient/index.html b/posts/kbhpolicy_gradient/index.html index 463c5a062..d3471308e 100644 --- a/posts/kbhpolicy_gradient/index.html +++ b/posts/kbhpolicy_gradient/index.html @@ -7,7 +7,7 @@ \begin{equation} \nabla U(\theta) = \qty[ \frac{U(\theta + \delta e^{1}) - U(\theta)}{\delta} , \dots, \frac{U(\theta + \delta e^{n}) - U(\theta)}{\delta} ] \end{equation}">

Houjun Liu
-

Policy Gradient

Two steps:

  1. obtaining a function for the gradient of policy against some parameters \(\theta\)
  2. making them more based than they are right now by optimization

Thoughout all of this, \(U(\theta)\) is \(U(\pi_{\theta})\).

Obtaining a policy gradient

Finite-Difference Gradient Estimation

We want some expression for:

\begin{equation} +

Policy Gradient

Two steps:

  1. obtaining a function for the gradient of policy against some parameters \(\theta\)
  2. making them more based than they are right now by optimization

Thoughout all of this, \(U(\theta)\) is \(U(\pi_{\theta})\).

Obtaining a policy gradient

Finite-Difference Gradient Estimation

We want some expression for:

\begin{equation} \nabla U(\theta) = \qty[\pdv{U}{\theta_{1}} (\theta), \dots, \pdv{U}{\theta_{n}}] \end{equation}

we can estimate that with the finite-difference “epsilon trick”:

\begin{equation} \nabla U(\theta) = \qty[ \frac{U(\theta + \delta e^{1}) - U(\theta)}{\delta} , \dots, \frac{U(\theta + \delta e^{n}) - U(\theta)}{\delta} ] diff --git a/posts/kbhpomdp_approximation/index.html b/posts/kbhpomdp_approximation/index.html index 69290c973..d4faefdbe 100644 --- a/posts/kbhpomdp_approximation/index.html +++ b/posts/kbhpomdp_approximation/index.html @@ -7,4 +7,4 @@ Point-Based Value Iteration “Perseus”: Randomized PBVI HSVI SARSOP point selection see point selection">

Houjun Liu
-

POMDP Approximation

\ No newline at end of file +

POMDP Approximation

\ No newline at end of file diff --git a/posts/kbhproduct_summation_map/index.html b/posts/kbhproduct_summation_map/index.html index 27888dacc..41c4d5858 100644 --- a/posts/kbhproduct_summation_map/index.html +++ b/posts/kbhproduct_summation_map/index.html @@ -6,7 +6,7 @@ \(U_1 + \dots + U_{m}\) is a direct sum IFF \(\Gamma\) is injective Proof:">
Houjun Liu
-

product summation map

Let \(U_1, \dots, U_{m}\) be subspaces of \(V\); we define a linear

We define \(\Gamma\) to be a map \(U_1 \times \dots U_{m} \to U_1 + \dots + U_{m}\) such that:

\begin{equation} +

product summation map

Let \(U_1, \dots, U_{m}\) be subspaces of \(V\); we define a linear

We define \(\Gamma\) to be a map \(U_1 \times \dots U_{m} \to U_1 + \dots + U_{m}\) such that:

\begin{equation} \Gamma (u_1, \dots, u_{m}) = u_1 + \dots + u_{m} \end{equation}

Essentially, \(\Gamma\) is the sum operation of the elements of the tuple made by the Product of Vector Spaces.

\(U_1 + \dots + U_{m}\) is a direct sum IFF \(\Gamma\) is injective

Proof:

Given \(\Gamma\) is injective: Given injectivity, we have that injectivity implies that null space is \(\{0\}\). Now, because the only way to produce \(0\) is to have the input product/tuple be 0, \(u_1 \dots u_{m} = 0\). So, given a sum of subsets is a direct sum IFF there is only one way to write \(0\), the sum is a direct sum.

Given direct sum: diff --git a/posts/kbhproject80/index.html b/posts/kbhproject80/index.html index 3abe5c966..b626ed7f7 100644 --- a/posts/kbhproject80/index.html +++ b/posts/kbhproject80/index.html @@ -3,4 +3,4 @@ College Application w.r.t. Project80 Cheese mission statement: Project80 is a good way of creating a self-propegating set of learning that would serve to benefit and educate future generations in hopes of creating a more equitable planet.">

Houjun Liu
-

Project80

\ No newline at end of file +

Project80

\ No newline at end of file diff --git a/posts/kbhprojects/index.html b/posts/kbhprojects/index.html index 460ea185f..94267497c 100644 --- a/posts/kbhprojects/index.html +++ b/posts/kbhprojects/index.html @@ -6,4 +6,4 @@

Projects Index

# -index

Projects Index is a index that contains a list of almost all projects for which I have ever worked on. Major categories are highlighted from chapter titles.

Research Projects

I end up doing a lot of research these days, and so have isolated that to a different, academic homepage.

For a list of my recent research, please head to the Research Index.

Media Production Projects

I produce a lot of media (videos, podcasts, blogs, live events/talks) as a part of publicizing my work or for other purposes. For those types of projects, head on over to Production Index.

Large-Scale Endeavors

Condution

An open-source task management app. Website.

Motivation: I got really tired with most other to-do apps after swapping them out over and over again, until I got fed up and built one with some friends.

  • Role: Co-Founder, Lead Developer.
  • Technologies: React, Ionic, Firebase, Typescript, Swift, PostgreSQL
  • Key facts: 10,000+ users, 8-person team, featured in the Bay Area almanac, praised by Asana’s head of developer relations for “open-source advocacy”

MODAP

A R&D team for fireline safety during emergency fires. Repository.

Motivation: a friend approached me with an opportunity to help our local community, especially with the increased influx of fires.

  • Role: Team Lead
  • Technologies: Rust, Torch, ARM, electronics (i2C, UART, messaging protocols, etc.)
  • Key facts: coordinated 5 engineers in developing new technology, supported by Dr. Robert G. Gann, Deputy Director, Center of Excellence for Advanced Technology Aerial Firefighting at the state of Colorado as well as Captain Mason of CalFire

CMU batchalign

A pipeline for the automated preparation of annotated CHAT transcripts from raw audio. Repository.

Motivation: my work over the summer.

  • Role: Author
  • Technologies: Torch, Huggingface, NLTK, CLAN, computational linguistics
  • Key facts: work developed with and maintained under Prof. Brian MacWhinney at CMU’s psycolinguistics department.

AIBridge

A bootcamp for non-CS students in data science. Website

Motivation:

  • Role: Co-Founder, Lecturer
  • Technologies: Python, ScyPy, Scikit-learn, Pandas
  • Key facts: worked with Prof. Xin Liu at UC Davis to develop an introductary one-week bootcamp in ML. We piloted the program this summer at Davis to an in-person group of 20 PhD students in food science sponsored by AIFS.

Full-Stack Projects

Simon

Augmenting the functionality of large-language-models with Elastic. Repository.

Motivation: LLMs have become more and more prominent, and frameworks like ReAct is finally mature enough to produce coherent, well reasoned responses.

  • Role: Author
  • Technologies: Huggingface, GPT-3.5, ElasticSearch

tractotato

CommonLisp macroset for time tracking. Repo.

Motivation: I wanted to learn CommonLisp macros syntax after reading the Land of Lisp book.

  • Role: author
  • Technologies: CommonLisp

Scratchathon Portal

Portal to submit projects for a scratch hackathon I hosted. Repo.

Motivation: my friends McGuy and fuelvin, both content creators on Scratch on YouTube, put together a Scratch hackathon summer of 2020. This is the submission portal.

  • Role: author
  • Technologies: React, Vercel, Firebase

syzygy

Library rethinking to-do list dating to be more flexible and powerful. Repo.

Motivation: a friend and I wanted to innovate beyond the scope of Condution to see how we can abstract away a to-do list system to its bare minimum.

  • Role: co-founder, co-author
  • Technologies: Rust

positron

Library for building lightweight native apps using web tech. Repo.

Motivation: I wanted to re-make electron to be more lightweight using Suckless’ Surf browser concept.

  • Role: author
  • Technologies: C++, GTK

OS/Driver Development

Broadcom Wifi/Bluetooth 4377 Chip Linux Driver

A driver patchset to support cutting-edge Broadcom 4377 Wifi/Bluetooth chips. Repo.

Motivation: I needed to be able to use Wifi on my laptop while running Arch Linux.

  • Role: author
  • Technologies: C, (small amounts of) Assembly
  • Key facts: integrated into the t2linux pipeline used to make WiFi possible on Linux for most MacBooks released after 2018

Distributed Algorithms and Parallel Computing

coveather

An encrypted, anonymized system for protected health information verification. Preprint, Repo, and internal note.

Motivation: I wanted to be able to make vaccine passports more feasible because the current COVID testing/vaccine verification scheme is really bad.

  • Role: author
  • Technologies: Clojure, core.async concurrency, Monte-Carlo simulations, blockchain, PGP
  • Key facts: project won first place at the California STEM Fair, and got special recognition from the Yale Science and Engineering assoc. Total award $3000.

multischedule

A multiple-asynchronous scheduling and delegation algorithm. Repo.

Motivation: (didn’t even come close to getting there) I wanted to create a way to solve or simplify debugging loop overrun problems in robotics codebases.

  • Role: author
  • Technologies: Clojure, core.async concurrency

rotifer

A work-in-progress distributed algorithm for taproot. Repo.

Motivation: I wanted to make taproot even more distributed if possible.

  • Role: author
  • Technologies: Clojure, XML, UDP, ICE

simian

Exploring OT/CRDT and collaborative text editing for taproot. Repo.

Motivation: I wanted to learn about how apps like Google Docs work, and explore Operational Transformation/CRDT, in hopes of putting it into taproot.

  • Role: author
  • Technologies: Clojure, OT, CRDT

aron

A distributed multi-dimensional optimization tool. Repo.

Motivation: Nueva’s course scheduling was quite a mess, and I wanted to help. It is a very complex problem and this project is in the freezer at the moment.

  • Role: author
  • Technologies: CommonLisp

mitte

Easy UDP sockets. Repo, Docs.

Motivation: a friend and I wanted to explore UDP.

  • Role: co-author
  • Technologies: Rust, UDP, ICE (connection)

Cryptography and security

See also: coveather.

jrainbow

An implementation of a MD5 rainbow table. Repo, Crate.

Motivation: I wanted to understand how Rainbow Tables worked.

  • Role: author
  • Technologies: Rust, MD5

Note-taking Systems and \(\LaTeX\) improvements

taproot

A shared zettlekasten of notes and learning resources put together by some friends and I. there has been a few iterations. Current Repo, Current Site, Legacy Site, Even More Legacy Site.

Motivation: I started writing nice \(\LaTeX\) PDFs of my homework, and some friends wanted to have access to it. Later when I mentioned it, another friend had a similar need; so we asked many people to pool our notes and work together to share.

  • Role: co-founder, co-lead, developer
  • Technologies: Next.JS, XeLaTeX, GNU Make, Firn, Hugo, Emacs Org, Org-Publish, Markdown

blag

The zettlekasten you are currently in! My currently maintained personal knowledgebase. Repo, Site.

Motivation: I wanted to experiment with more advanced note-taking techniques after developing taproot, and it ended up superseeding the note-taking abilities of taproot.

  • Role: author
  • Technologies: Next.js, Emacs Org, Hugo

gdoc.el

A utility to enable GNU Emacs to edit Google Doc documents based on the gdrive utility. Repo.

Motivation: I wanted to edit Google Docs in Emacs!

  • Role: author
  • Technologies: GNU Emacs, elisp

interesting

Things that my friends and I find interesting, chucked on the web and builds itself. Repo, Site. No longer maintained.

Motivation: many text channels were too clogged with stuff my friend group found interesting, so I wanted to take initiative to collect them.

  • Role: co-founder, author
  • Technologies: Next.js, Vercel, remark, CommonMark Markdown

Public Configurations

borg

Automatically configure terminals. Repo.

Motivation: I needed a way to copy my system terminal config onto a system quickly.

  • Role: author
  • Technologies: Bash, Zsh, OhMyZsh

.config

A group of sane configuration files. Repo.

Motivation: some Redditors asked for my Config, and I thought I’d share it to benefit the community; also for personal backup.

  • Role: author, maintainer
  • Technologies: Unix administration, Perl, Ruby, LISP

.emacs.d

Simple, powerful, and semantic GNU Emacs configuration for personal use. Repo.

Motivation: I wanted to track my progress in developing a working Emacs config.

  • Role: author, maintainer
  • Technologies: GNU Emacs, elisp
\ No newline at end of file +index

Projects Index is a index that contains a list of almost all projects for which I have ever worked on. Major categories are highlighted from chapter titles.

Research Projects

I end up doing a lot of research these days, and so have isolated that to a different, academic homepage.

For a list of my recent research, please head to the Research Index.

Media Production Projects

I produce a lot of media (videos, podcasts, blogs, live events/talks) as a part of publicizing my work or for other purposes. For those types of projects, head on over to Production Index.

Large-Scale Endeavors

Condution

An open-source task management app. Website.

Motivation: I got really tired with most other to-do apps after swapping them out over and over again, until I got fed up and built one with some friends.

  • Role: Co-Founder, Lead Developer.
  • Technologies: React, Ionic, Firebase, Typescript, Swift, PostgreSQL
  • Key facts: 10,000+ users, 8-person team, featured in the Bay Area almanac, praised by Asana’s head of developer relations for “open-source advocacy”

MODAP

A R&D team for fireline safety during emergency fires. Repository.

Motivation: a friend approached me with an opportunity to help our local community, especially with the increased influx of fires.

  • Role: Team Lead
  • Technologies: Rust, Torch, ARM, electronics (i2C, UART, messaging protocols, etc.)
  • Key facts: coordinated 5 engineers in developing new technology, supported by Dr. Robert G. Gann, Deputy Director, Center of Excellence for Advanced Technology Aerial Firefighting at the state of Colorado as well as Captain Mason of CalFire

CMU batchalign

A pipeline for the automated preparation of annotated CHAT transcripts from raw audio. Repository.

Motivation: my work over the summer.

  • Role: Author
  • Technologies: Torch, Huggingface, NLTK, CLAN, computational linguistics
  • Key facts: work developed with and maintained under Prof. Brian MacWhinney at CMU’s psycolinguistics department.

AIBridge

A bootcamp for non-CS students in data science. Website

Motivation:

  • Role: Co-Founder, Lecturer
  • Technologies: Python, ScyPy, Scikit-learn, Pandas
  • Key facts: worked with Prof. Xin Liu at UC Davis to develop an introductary one-week bootcamp in ML. We piloted the program this summer at Davis to an in-person group of 20 PhD students in food science sponsored by AIFS.

Full-Stack Projects

Simon

Augmenting the functionality of large-language-models with Elastic. Repository.

Motivation: LLMs have become more and more prominent, and frameworks like ReAct is finally mature enough to produce coherent, well reasoned responses.

  • Role: Author
  • Technologies: Huggingface, GPT-3.5, ElasticSearch

tractotato

CommonLisp macroset for time tracking. Repo.

Motivation: I wanted to learn CommonLisp macros syntax after reading the Land of Lisp book.

  • Role: author
  • Technologies: CommonLisp

Scratchathon Portal

Portal to submit projects for a scratch hackathon I hosted. Repo.

Motivation: my friends McGuy and fuelvin, both content creators on Scratch on YouTube, put together a Scratch hackathon summer of 2020. This is the submission portal.

  • Role: author
  • Technologies: React, Vercel, Firebase

syzygy

Library rethinking to-do list dating to be more flexible and powerful. Repo.

Motivation: a friend and I wanted to innovate beyond the scope of Condution to see how we can abstract away a to-do list system to its bare minimum.

  • Role: co-founder, co-author
  • Technologies: Rust

positron

Library for building lightweight native apps using web tech. Repo.

Motivation: I wanted to re-make electron to be more lightweight using Suckless’ Surf browser concept.

  • Role: author
  • Technologies: C++, GTK

OS/Driver Development

Broadcom Wifi/Bluetooth 4377 Chip Linux Driver

A driver patchset to support cutting-edge Broadcom 4377 Wifi/Bluetooth chips. Repo.

Motivation: I needed to be able to use Wifi on my laptop while running Arch Linux.

  • Role: author
  • Technologies: C, (small amounts of) Assembly
  • Key facts: integrated into the t2linux pipeline used to make WiFi possible on Linux for most MacBooks released after 2018

Distributed Algorithms and Parallel Computing

coveather

An encrypted, anonymized system for protected health information verification. Preprint, Repo, and internal note.

Motivation: I wanted to be able to make vaccine passports more feasible because the current COVID testing/vaccine verification scheme is really bad.

  • Role: author
  • Technologies: Clojure, core.async concurrency, Monte-Carlo simulations, blockchain, PGP
  • Key facts: project won first place at the California STEM Fair, and got special recognition from the Yale Science and Engineering assoc. Total award $3000.

multischedule

A multiple-asynchronous scheduling and delegation algorithm. Repo.

Motivation: (didn’t even come close to getting there) I wanted to create a way to solve or simplify debugging loop overrun problems in robotics codebases.

  • Role: author
  • Technologies: Clojure, core.async concurrency

rotifer

A work-in-progress distributed algorithm for taproot. Repo.

Motivation: I wanted to make taproot even more distributed if possible.

  • Role: author
  • Technologies: Clojure, XML, UDP, ICE

simian

Exploring OT/CRDT and collaborative text editing for taproot. Repo.

Motivation: I wanted to learn about how apps like Google Docs work, and explore Operational Transformation/CRDT, in hopes of putting it into taproot.

  • Role: author
  • Technologies: Clojure, OT, CRDT

aron

A distributed multi-dimensional optimization tool. Repo.

Motivation: Nueva’s course scheduling was quite a mess, and I wanted to help. It is a very complex problem and this project is in the freezer at the moment.

  • Role: author
  • Technologies: CommonLisp

mitte

Easy UDP sockets. Repo, Docs.

Motivation: a friend and I wanted to explore UDP.

  • Role: co-author
  • Technologies: Rust, UDP, ICE (connection)

Cryptography and security

See also: coveather.

jrainbow

An implementation of a MD5 rainbow table. Repo, Crate.

Motivation: I wanted to understand how Rainbow Tables worked.

  • Role: author
  • Technologies: Rust, MD5

Note-taking Systems and \(\LaTeX\) improvements

taproot

A shared zettlekasten of notes and learning resources put together by some friends and I. there has been a few iterations. Current Repo, Current Site, Legacy Site, Even More Legacy Site.

Motivation: I started writing nice \(\LaTeX\) PDFs of my homework, and some friends wanted to have access to it. Later when I mentioned it, another friend had a similar need; so we asked many people to pool our notes and work together to share.

  • Role: co-founder, co-lead, developer
  • Technologies: Next.JS, XeLaTeX, GNU Make, Firn, Hugo, Emacs Org, Org-Publish, Markdown

blag

The zettlekasten you are currently in! My currently maintained personal knowledgebase. Repo, Site.

Motivation: I wanted to experiment with more advanced note-taking techniques after developing taproot, and it ended up superseeding the note-taking abilities of taproot.

  • Role: author
  • Technologies: Next.js, Emacs Org, Hugo

gdoc.el

A utility to enable GNU Emacs to edit Google Doc documents based on the gdrive utility. Repo.

Motivation: I wanted to edit Google Docs in Emacs!

  • Role: author
  • Technologies: GNU Emacs, elisp

interesting

Things that my friends and I find interesting, chucked on the web and builds itself. Repo, Site. No longer maintained.

Motivation: many text channels were too clogged with stuff my friend group found interesting, so I wanted to take initiative to collect them.

  • Role: co-founder, author
  • Technologies: Next.js, Vercel, remark, CommonMark Markdown

Public Configurations

borg

Automatically configure terminals. Repo.

Motivation: I needed a way to copy my system terminal config onto a system quickly.

  • Role: author
  • Technologies: Bash, Zsh, OhMyZsh

.config

A group of sane configuration files. Repo.

Motivation: some Redditors asked for my Config, and I thought I’d share it to benefit the community; also for personal backup.

  • Role: author, maintainer
  • Technologies: Unix administration, Perl, Ruby, LISP

.emacs.d

Simple, powerful, and semantic GNU Emacs configuration for personal use. Repo.

Motivation: I wanted to track my progress in developing a working Emacs config.

  • Role: author, maintainer
  • Technologies: GNU Emacs, elisp
\ No newline at end of file diff --git a/posts/kbhquotient_group/index.html b/posts/kbhquotient_group/index.html index 81784e1f8..3fbd3578b 100644 --- a/posts/kbhquotient_group/index.html +++ b/posts/kbhquotient_group/index.html @@ -6,6 +6,6 @@ actual quotient groups We can use the subgroup above to mask out a group.">
Houjun Liu
-

quotient group

a quotient group is a group which is the product of mapping things out.

subgroups

The set of integers \(\mathbb{Z}\) is obviously a group. You can show it to yourself that multiples of any number in the group is a subgroup of that group.

For instance:

\(3 \mathbb{Z}\), the set \(\{\dots -6, -3, 0, 3, 6, \dots\}\) is a subgroup

actual quotient groups

We can use the subgroup above to mask out a group. The resulting product is NOT a subgroup, but its a new group with individual elements being subsets of our original group.

For instance, the \(\mod 3\) quotient group is written as:

\begin{equation} +

quotient group

a quotient group is a group which is the product of mapping things out.

subgroups

The set of integers \(\mathbb{Z}\) is obviously a group. You can show it to yourself that multiples of any number in the group is a subgroup of that group.

For instance:

\(3 \mathbb{Z}\), the set \(\{\dots -6, -3, 0, 3, 6, \dots\}\) is a subgroup

actual quotient groups

We can use the subgroup above to mask out a group. The resulting product is NOT a subgroup, but its a new group with individual elements being subsets of our original group.

For instance, the \(\mod 3\) quotient group is written as:

\begin{equation} \mathbb{Z}} / 3 \mathbb{Z} \end{equation}

Each element in this new group is a set; for instance, in \(\mathbb{Z} / 3\mathbb{Z}\), \(0\) is actually the set \(\{\dots -6, -3, 0, 3, 6, \dots\}\) (i.e. the subgroup that we were masking by). Other elements in the quotient space (“1”, a.k.a. \(\{ \dots, -2, 1, 4, 7 \dots \}\), or “2”, a.k.a. \(\{\dots, -1, 2, 5, 8 \dots \}\)) are called “cosets” of \(3 \mathbb{Z}\). You will notice they are not a subgroups.

\ No newline at end of file diff --git a/posts/kbhrational_preference/index.html b/posts/kbhrational_preference/index.html index e15ab75ce..73bf957e6 100644 --- a/posts/kbhrational_preference/index.html +++ b/posts/kbhrational_preference/index.html @@ -5,6 +5,6 @@ von Neumann and Morgenstern Axioms Axioms for checking if a set of preferences are rational. The axioms allow you to check if a set of decisions are Rational Preferences.">
Houjun Liu
-

rational preference

Motivation

Suppose we would like to say that “we prefer all to well \(A\) more than bad blood \(B\)”

\begin{equation} +

rational preference

Motivation

Suppose we would like to say that “we prefer all to well \(A\) more than bad blood \(B\)”

\begin{equation} A \succ B \end{equation}

No right or wrong answers in this statement by itself, but we can check whether or not your preferences are inconsistent with itself.

von Neumann and Morgenstern Axioms

Axioms for checking if a set of preferences are rational. The axioms allow you to check if a set of decisions are Rational Preferences.

For three conditions \(A, B, C\), we have:

completeness

universal comparability

either \(A \succ B\), \(A \prec B\), \(A \sim B\) (you have to like either better, or be indifferent)

transitivity

If \(A \succeq B\), \(B \succeq C\), then \(A \succeq C\)

continuity

If \(A \succeq C \succeq B\), then there exists some probability \(p\) such that we can form a lottery of shape \([A:p; B:1-p] \sim C\)

That is, if \(C\) is between \(A, B\), then we can create a situation where we mix the chance of \(A\) and \(B\) happening such that selecting from that situation feels equally as good as selecting from \(C\)

independence

for \(A \succ B\), then for any \(C\) and probability \(b\) and any probability \(p\), then the lotteries \([A:p; c:1-p] \geq [B:p; C:1-p]\)

As in, if you swap out a component of a lottery with something less desirable, your new lottery should be more undesirable as well.

\ No newline at end of file diff --git a/posts/kbhresearch/index.html b/posts/kbhresearch/index.html index ccb73d975..95568d424 100644 --- a/posts/kbhresearch/index.html +++ b/posts/kbhresearch/index.html @@ -3,4 +3,4 @@ The Why of Research from Brian Thomas Your discipline is not your topic You should do research to find out whether or not you have chosen the right area of focus You can interact with faculty in a new way: everyone is here because of fantastic scholarship; you are mining a completely novel area of their expertise Research is the act of taming unruly problems ">
Houjun Liu
-

Research

Learning to ask questions in a reasonable way. Changing you from a consumer of knowledge to a producer of researcher.

The Why of Research from Brian Thomas

  • Your discipline is not your topic
  • You should do research to find out whether or not you have chosen the right area of focus
  • You can interact with faculty in a new way: everyone is here because of fantastic scholarship; you are mining a completely novel area of their expertise
  • Research is the act of taming unruly problems
\ No newline at end of file +

Research

Learning to ask questions in a reasonable way. Changing you from a consumer of knowledge to a producer of researcher.

The Why of Research from Brian Thomas

  • Your discipline is not your topic
  • You should do research to find out whether or not you have chosen the right area of focus
  • You can interact with faculty in a new way: everyone is here because of fantastic scholarship; you are mining a completely novel area of their expertise
  • Research is the act of taming unruly problems
\ No newline at end of file diff --git a/posts/kbhrfdiffusion/index.html b/posts/kbhrfdiffusion/index.html index d52199bef..0bb448f0b 100644 --- a/posts/kbhrfdiffusion/index.html +++ b/posts/kbhrfdiffusion/index.html @@ -3,4 +3,4 @@ RFDiffusion is available starting THIS WEEK!">
Houjun Liu
-

RFDiffusion

  1. Starting with random residue noise: coordinates + backbones
  2. Diffusion happens: train like diffusion, with the goal of increasing binding affinities
  3. Eventually resolves to valid protein structures given the binding environments

Basically, start with only the desired substraight, and the diffuse the sequence around that small sequence with the goal of higher affinity binding: i.e. allow only the binding site to stay and regenerating the rest.

RFDiffusion is available starting THIS WEEK!

advantages over RoseTTAFold2 inpainting

The starting point is random for diffusion, so if you do it multiple times you will get new results instead of the same thing.

\ No newline at end of file +

RFDiffusion

  1. Starting with random residue noise: coordinates + backbones
  2. Diffusion happens: train like diffusion, with the goal of increasing binding affinities
  3. Eventually resolves to valid protein structures given the binding environments

Basically, start with only the desired substraight, and the diffuse the sequence around that small sequence with the goal of higher affinity binding: i.e. allow only the binding site to stay and regenerating the rest.

RFDiffusion is available starting THIS WEEK!

advantages over RoseTTAFold2 inpainting

The starting point is random for diffusion, so if you do it multiple times you will get new results instead of the same thing.

\ No newline at end of file diff --git a/posts/kbhrho_pomdps/index.html b/posts/kbhrho_pomdps/index.html index 714d09923..469151f41 100644 --- a/posts/kbhrho_pomdps/index.html +++ b/posts/kbhrho_pomdps/index.html @@ -4,7 +4,7 @@ To do this, we want to define some reward directly over the belief space which assigns rewards based on uncertainty reduction:">
Houjun Liu
-

rho-POMDPs

POMDPs to solve Active Sensing Problem: where gathering information is the explicit goal and not a means to do something. Meaning, we can’t train them using state-only reward functions (i.e. reward is based on belief and not state).

Directly reward the reduction of uncertainty: belief-based reward framework which you can just tack onto the existing solvers.

To do this, we want to define some reward directly over the belief space which assigns rewards based on uncertainty reduction:

\begin{equation} +

rho-POMDPs

POMDPs to solve Active Sensing Problem: where gathering information is the explicit goal and not a means to do something. Meaning, we can’t train them using state-only reward functions (i.e. reward is based on belief and not state).

Directly reward the reduction of uncertainty: belief-based reward framework which you can just tack onto the existing solvers.

To do this, we want to define some reward directly over the belief space which assigns rewards based on uncertainty reduction:

\begin{equation} r(b,a) = \rho(b,a) \end{equation}

\(\rho\) should be some measure of uncertainty, like entropy.

key question: how does our POMDP formulations change given this change?

Don’t worry about the Value Function

result: if reward function is convex, then Bellman updates should preserve the convexity of the value function

So, we now just need to make sure that however we compute our rewards the reward function \(\rho\) has to be piecewise linear convex.

PWLC rewards

One simple PWLC rewards are alpha vectors:

\begin{equation} \rho(b,a) = \max_{\alpha in \Gamma} \qty[\sum_{ss}^{} b(s) \alpha(s)] diff --git a/posts/kbhrosetta/index.html b/posts/kbhrosetta/index.html index 7cef8938d..1f4ac5dde 100644 --- a/posts/kbhrosetta/index.html +++ b/posts/kbhrosetta/index.html @@ -5,4 +5,4 @@ more! You take something like a trimer; you shove a peptide between each “point”, and boom structal change to a quadromer">

Houjun Liu
-

Rosetta

Rosetta is a set of physical-based protein folding models.

protein binding with Rosetta

  • check a protein surface
  • check how protein side-chains interact with the binding surface

peptide binding with Rosetta

The difficulty with this is that we don’t know what the overall tertiary structure of a group of peptides are; unlike whole protein binding.

sequence-specific DNA binding

???

more!

You take something like a trimer; you shove a peptide between each “point”, and boom structal change to a quadromer

\ No newline at end of file +

Rosetta

Rosetta is a set of physical-based protein folding models.

protein binding with Rosetta

  • check a protein surface
  • check how protein side-chains interact with the binding surface

peptide binding with Rosetta

The difficulty with this is that we don’t know what the overall tertiary structure of a group of peptides are; unlike whole protein binding.

sequence-specific DNA binding

???

more!

You take something like a trimer; you shove a peptide between each “point”, and boom structal change to a quadromer

\ No newline at end of file diff --git a/posts/kbhrosettafold2/index.html b/posts/kbhrosettafold2/index.html index acf5cade2..72ad2a97f 100644 --- a/posts/kbhrosettafold2/index.html +++ b/posts/kbhrosettafold2/index.html @@ -4,4 +4,4 @@ application: de-novo luciferase design come up with the correct shaped scaffolds use old Rosetta to jam a residue sequence into the scaffold refold application: RoseTTAFold2 in-painting Train the model to recover the missing bits of sequence from the overall structure (i.e. training backwards), and">
Houjun Liu
-

RoseTTAFold2

RoseTTAFold2 is a three-track folding tool, which also handles multimer!

  1. inputs: amino acid sequence + CHEMICAL structure (WOAH! how?)
  2. “RF2 all-atom embedding”
  3. fold!

The model does really well!

application: de-novo luciferase design

  1. come up with the correct shaped scaffolds
  2. use old Rosetta to jam a residue sequence into the scaffold
  3. refold

application: RoseTTAFold2 in-painting

Train the model to recover the missing bits of sequence from the overall structure (i.e. training backwards), and

\ No newline at end of file +

RoseTTAFold2

RoseTTAFold2 is a three-track folding tool, which also handles multimer!

  1. inputs: amino acid sequence + CHEMICAL structure (WOAH! how?)
  2. “RF2 all-atom embedding”
  3. fold!

The model does really well!

application: de-novo luciferase design

  1. come up with the correct shaped scaffolds
  2. use old Rosetta to jam a residue sequence into the scaffold
  3. refold

application: RoseTTAFold2 in-painting

Train the model to recover the missing bits of sequence from the overall structure (i.e. training backwards), and

\ No newline at end of file diff --git a/posts/kbhsarsa_lambda/index.html b/posts/kbhsarsa_lambda/index.html index 74b572abc..3345634a5 100644 --- a/posts/kbhsarsa_lambda/index.html +++ b/posts/kbhsarsa_lambda/index.html @@ -7,7 +7,7 @@ Recall that, sparse rewards with SARSA can take a long time to learn because it takes time to backpropgate.">
Houjun Liu
-

Sarsa (Lambda)

Sarsa (Lambda) is SARSA with Eligibility Traces (\(\lambda\)).

Previous approaches to deal with Partially Observable Markov Decision Process:

  • memory-based state estimation (beliefs)
  • special planning methods

Key question: Can we use MDP reinforcement learning to deal with POMDPs?

Background

Recall MDP SARSA:

\begin{equation} +

Sarsa (Lambda)

Sarsa (Lambda) is SARSA with Eligibility Traces (\(\lambda\)).

Previous approaches to deal with Partially Observable Markov Decision Process:

  • memory-based state estimation (beliefs)
  • special planning methods

Key question: Can we use MDP reinforcement learning to deal with POMDPs?

Background

Recall MDP SARSA:

\begin{equation} Q(s,a) \leftarrow Q(s,a) + \alpha \qty [(r + \gamma Q(s’, a’)) - Q(s,a)] \end{equation}

Recall that, sparse rewards with SARSA can take a long time to learn because it takes time to backpropgate.

Hence, we use Eligibility Traces, which keeps track of what’s “eligible” for updates:

let \(\lambda\) be some decay parameter, we have:

\begin{equation} \delta = r + \gamma Q(s’,a’) - Q(s,a) diff --git a/posts/kbhscheduling/index.html b/posts/kbhscheduling/index.html index e87b0ccf6..acc259e44 100644 --- a/posts/kbhscheduling/index.html +++ b/posts/kbhscheduling/index.html @@ -4,4 +4,4 @@ ready => running blocked => running blocked => ready => running You can’t go from ready to blocked because you have to do something to know you are blocked.">

Houjun Liu
-

scheduling

scheduling is the tool to figure out which thread can run. Because threads exist in different thread states:

  1. running
  2. blockde - waiting for an event like disk, network, etc.
  3. ready - able to run, but not on CPU yet

a scheduler needs to do the task of ordering ready threads to run, moving running threads to ready when it has ran for enough time. Possible pathways:

  1. ready => running
  2. blocked => running
  3. blocked => ready => running

You can’t go from ready to blocked because you have to do something to know you are blocked.

scheduling “ready” threads

The following assumes one core.

Tradeoffs:

  1. minimize time to a useful result—(assumption: a “useful result” = a thread blocking or completes)
  2. using resources efficiently (keeping cores/disks busy)
  3. fairness (multiple users / many jobs for one users)

Typically, we focus on (1); approaches that maximize useful results quickly is unfair beacuse you are prioritizing. We can measure this based on “average completion time”: tracking the average time elapsed for a particular queue based on the start of scheduling that queue to the time when each thread ends.

first-come first-serve

  • keep all threads in ready in a queue
  • run the first thread on the front until it finishes/it blocks for however long
  • repeat

Problem: a thread can run away with the entire system, accidentally, through infinite loops

round robin

  • keep all threads in a round robin
  • each thread can run for a set amount of time called a time slice (10ms or so)
  • if a thread terminates before that time, great; if a thread does not, we swap it off and put it to the end of the round robin

Problem: what’s a good time slice?

  • too small: the overhead of context switching is higher than the overhead of running the program
  • too large: threads can monopolize cores, can’t handle user input, etc.

Linux uses 4ms. Generally, you want 5-10ms range.

You can think about this as dividing each time slot by time slices, and add as fcfs

shortest remaining processing time

Run first the thread in queue that will finish the most quickly and run it fully to competition.

It gives preference to those that need it the least: a good side effect is that it gives preference to I/O Bound Thread first, so we can wait on them during disk operations while CPU Threads run after the I/O Bound Thread has ran.

THIS IS not implementable—-we can’t build this beacuse we have to know which thread will finish the most quickly, which we can’t because you have to solve the halting problem to know.

Our goal, then is to get as close as possible to the performance of SRPT.

Problem:

  1. we don’t know which one will finish the most quickly
  2. if we have many threads and one long-running thread, the long running thread won’t be able to run ever

priority based scheduling

Key idea: behavior tends to be consistent in a thread. We build multiple priority queues to address this.

priority based scheduling is an approximation of SRPT, using the past performance of the thread to estimate the running time of the thread. Over time, threads will move between priority queues, and we run the topmost thread from the highest priority queue

  1. threads that aren’t using much CPU stay in higher priority queue
  2. threads that are using much CPU gets bumped down to lower priority queues

Similar to SRPT, this also has the good property of giving preference to those that need it the least: a good side effect is that it gives preference to I/O Bound Thread first, so we can wait on them during disk operations while CPU Threads run after the I/O Bound Thread has ran.

implement based on time slice usage

a thread always enters in the highest priority queue

  1. if the thread uses all of its time slice and didn’t exit, bump them down a priority queue
  2. if a thread blocked before it used all of its time slice, bump them up a priority queue

implement based on aggregate time used: fixing neglect

a thread has a number for “how much time did you use on the CPU recently”? The priories are sorted by that value, and the smallest time use will be ran.

\ No newline at end of file +

scheduling

scheduling is the tool to figure out which thread can run. Because threads exist in different thread states:

  1. running
  2. blockde - waiting for an event like disk, network, etc.
  3. ready - able to run, but not on CPU yet

a scheduler needs to do the task of ordering ready threads to run, moving running threads to ready when it has ran for enough time. Possible pathways:

  1. ready => running
  2. blocked => running
  3. blocked => ready => running

You can’t go from ready to blocked because you have to do something to know you are blocked.

scheduling “ready” threads

The following assumes one core.

Tradeoffs:

  1. minimize time to a useful result—(assumption: a “useful result” = a thread blocking or completes)
  2. using resources efficiently (keeping cores/disks busy)
  3. fairness (multiple users / many jobs for one users)

Typically, we focus on (1); approaches that maximize useful results quickly is unfair beacuse you are prioritizing. We can measure this based on “average completion time”: tracking the average time elapsed for a particular queue based on the start of scheduling that queue to the time when each thread ends.

first-come first-serve

  • keep all threads in ready in a queue
  • run the first thread on the front until it finishes/it blocks for however long
  • repeat

Problem: a thread can run away with the entire system, accidentally, through infinite loops

round robin

  • keep all threads in a round robin
  • each thread can run for a set amount of time called a time slice (10ms or so)
  • if a thread terminates before that time, great; if a thread does not, we swap it off and put it to the end of the round robin

Problem: what’s a good time slice?

  • too small: the overhead of context switching is higher than the overhead of running the program
  • too large: threads can monopolize cores, can’t handle user input, etc.

Linux uses 4ms. Generally, you want 5-10ms range.

You can think about this as dividing each time slot by time slices, and add as fcfs

shortest remaining processing time

Run first the thread in queue that will finish the most quickly and run it fully to competition.

It gives preference to those that need it the least: a good side effect is that it gives preference to I/O Bound Thread first, so we can wait on them during disk operations while CPU Threads run after the I/O Bound Thread has ran.

THIS IS not implementable—-we can’t build this beacuse we have to know which thread will finish the most quickly, which we can’t because you have to solve the halting problem to know.

Our goal, then is to get as close as possible to the performance of SRPT.

Problem:

  1. we don’t know which one will finish the most quickly
  2. if we have many threads and one long-running thread, the long running thread won’t be able to run ever

priority based scheduling

Key idea: behavior tends to be consistent in a thread. We build multiple priority queues to address this.

priority based scheduling is an approximation of SRPT, using the past performance of the thread to estimate the running time of the thread. Over time, threads will move between priority queues, and we run the topmost thread from the highest priority queue

  1. threads that aren’t using much CPU stay in higher priority queue
  2. threads that are using much CPU gets bumped down to lower priority queues

Similar to SRPT, this also has the good property of giving preference to those that need it the least: a good side effect is that it gives preference to I/O Bound Thread first, so we can wait on them during disk operations while CPU Threads run after the I/O Bound Thread has ran.

implement based on time slice usage

a thread always enters in the highest priority queue

  1. if the thread uses all of its time slice and didn’t exit, bump them down a priority queue
  2. if a thread blocked before it used all of its time slice, bump them up a priority queue

implement based on aggregate time used: fixing neglect

a thread has a number for “how much time did you use on the CPU recently”? The priories are sorted by that value, and the smallest time use will be ran.

\ No newline at end of file diff --git a/posts/kbhsecond_order_linear_differential_equation/index.html b/posts/kbhsecond_order_linear_differential_equation/index.html index bd1ea8396..40d17d74f 100644 --- a/posts/kbhsecond_order_linear_differential_equation/index.html +++ b/posts/kbhsecond_order_linear_differential_equation/index.html @@ -7,7 +7,7 @@ The general goal to solve in this case is to make this a system of First-Order Differential Equations.">
Houjun Liu
-

Second-Order Linear Differential Equations

Here’s a general form:

\begin{equation} +

Second-Order Linear Differential Equations

Here’s a general form:

\begin{equation} a\dv[2]{x}{t} + b \dv{x}{t} + cx = f(t) \end{equation}

see:

solving homogeneous higher-order differential equations

This problem because easier if the right side is \(0\).

\begin{equation} a\dv[2]{x}{t} + b \dv{x}{t} + cx = 0 diff --git a/posts/kbhsemantic_primes/index.html b/posts/kbhsemantic_primes/index.html index 0cb6cc85b..3e45ff400 100644 --- a/posts/kbhsemantic_primes/index.html +++ b/posts/kbhsemantic_primes/index.html @@ -5,4 +5,4 @@ guidelines for identifying semantic primes A semantic prime has to be found in every(ish?) natural language A semantic prime has to be indefinable by other primes proof for the existence of semantic primes Proof: given if the Strong Lexicalization Hypothesis holds, semantic primes must exist.">

Houjun Liu
-

semantic prime

In NSM, semantic primes are the most fundimental “lexical units” (so they can be words, or morphemes, etc. the size doesn’t matter) across languages.

They are the “core of a universal mental lexicon”.

There are…

guidelines for identifying semantic primes

  1. A semantic prime has to be found in every(ish?) natural language
  2. A semantic prime has to be indefinable by other primes

proof for the existence of semantic primes

Proof: given if the Strong Lexicalization Hypothesis holds, semantic primes must exist.

Assume for the sake of contradiction no semantic primes exist.

Because Strong Lexicalization Hypothesis holds, there does not exist syntactic transformations which can take original single words and transform them into newly lexicalized words to express a different meaning.

At the same time, again because of the Strong Lexicalization Hypothesis, one must only leverage syntactic transformation on syntatic constituents when forming ideas.

Therefore, given a word to lexicalize, it has to be defined by an syntatic transformation on a set of previously lexicalized words.

(by definition) there are no words lexicalizable from the empty set of words.

Therefore, there exists some word that needs to be lexicalized by words that are not previously defined, which is absurd. (instead, these words are lexicalized via semantic primes.)

QED

problems with semantic primes

  1. the list has grown over time
  2. the problem of allolexy: formal restrictions of a language resulting in the same concept needing to be radicalized multiple times (I vs. me)

finding semantic primes

According to (Geeraerts 2009), (Goddard 2009) provides a “practical” (though flawed) way of establishing primes. Something to do with large-scale comparisons in “whole metalanguage studies”, which requires pairwise language comparison

Locating primes are seen as an enforcement of NSM theories (Vanhatalo, Tissari, and Idström, n.d.). Recent prime locations: in Amharic (Amberber 2008), East Cree (Junker 2008), French (Peeters 1994), Japanese (Onishi 1994), Korean (Yoon 2008), Lao (Enfield 2002), Mandarin (Chappell 2002), Mangaaba-Mbula (Bugenhagen 2002), Malay (Goddard 2002), Polish (Wierzbicka 2002), Russian (Gladkova 2010, for the latest set, see the NSM home page), Spanish (Travis 2002), and Thai (Diller 1994).

\ No newline at end of file +

semantic prime

In NSM, semantic primes are the most fundimental “lexical units” (so they can be words, or morphemes, etc. the size doesn’t matter) across languages.

They are the “core of a universal mental lexicon”.

There are…

guidelines for identifying semantic primes

  1. A semantic prime has to be found in every(ish?) natural language
  2. A semantic prime has to be indefinable by other primes

proof for the existence of semantic primes

Proof: given if the Strong Lexicalization Hypothesis holds, semantic primes must exist.

Assume for the sake of contradiction no semantic primes exist.

Because Strong Lexicalization Hypothesis holds, there does not exist syntactic transformations which can take original single words and transform them into newly lexicalized words to express a different meaning.

At the same time, again because of the Strong Lexicalization Hypothesis, one must only leverage syntactic transformation on syntatic constituents when forming ideas.

Therefore, given a word to lexicalize, it has to be defined by an syntatic transformation on a set of previously lexicalized words.

(by definition) there are no words lexicalizable from the empty set of words.

Therefore, there exists some word that needs to be lexicalized by words that are not previously defined, which is absurd. (instead, these words are lexicalized via semantic primes.)

QED

problems with semantic primes

  1. the list has grown over time
  2. the problem of allolexy: formal restrictions of a language resulting in the same concept needing to be radicalized multiple times (I vs. me)

finding semantic primes

According to (Geeraerts 2009), (Goddard 2009) provides a “practical” (though flawed) way of establishing primes. Something to do with large-scale comparisons in “whole metalanguage studies”, which requires pairwise language comparison

Locating primes are seen as an enforcement of NSM theories (Vanhatalo, Tissari, and Idström, n.d.). Recent prime locations: in Amharic (Amberber 2008), East Cree (Junker 2008), French (Peeters 1994), Japanese (Onishi 1994), Korean (Yoon 2008), Lao (Enfield 2002), Mandarin (Chappell 2002), Mangaaba-Mbula (Bugenhagen 2002), Malay (Goddard 2002), Polish (Wierzbicka 2002), Russian (Gladkova 2010, for the latest set, see the NSM home page), Spanish (Travis 2002), and Thai (Diller 1994).

\ No newline at end of file diff --git a/posts/kbhsingular_value_decomposition/index.html b/posts/kbhsingular_value_decomposition/index.html index cfc061163..ee83ea0c4 100644 --- a/posts/kbhsingular_value_decomposition/index.html +++ b/posts/kbhsingular_value_decomposition/index.html @@ -5,7 +5,7 @@ where, \(U\) is an unitary matrix, \(D^{\frac{1}{2}}\) a diagonalish (i.">
Houjun Liu
-

singular value decomposition

Singular value decomposition is a factorization of a matrix, which is a generalization of the eigendecomposition of normal matricies (i.e. where \(A = V^{-1} D V\) when \(A\) is diagonalizable, i.e. by the spectral theorem possible when matricies are normal).

Definitions

Singular value decomposition Every \(m \times n\) matrix has a factorization of the form:

\begin{equation} +

singular value decomposition

Singular value decomposition is a factorization of a matrix, which is a generalization of the eigendecomposition of normal matricies (i.e. where \(A = V^{-1} D V\) when \(A\) is diagonalizable, i.e. by the spectral theorem possible when matricies are normal).

Definitions

Singular value decomposition Every \(m \times n\) matrix has a factorization of the form:

\begin{equation} M = U D^{\frac{1}{2}} V^{*} \end{equation}

where, \(U\) is an unitary matrix, \(D^{\frac{1}{2}}\) a diagonalish (i.e. rectangular diagonal) matrix with non-negative numbers on its diagonal called singular values, which are the positive square roots of eigenvalues of \(M^{* }M\) — meaning the diagonal of \(D^{\frac{1}{2}}\) is non-negative (\(\geq 0\)). Finally, \(V\) is formed columns of orthonormal bases of eigenvectors of \(M^{*}M\).

SVD is not technically unique, but we like to force a specific (convenient, see proof for why) ordering: where \(D^{\frac{1}{2}}\) (and the corresponding values in \(V^{*}\)) is sorted such that the zero values are to the right.

Doing It

Doing SVD is not actually super duper hard, but it takes some thinking on why it works, which we shall do below.

Recall that \(V^{* }\) is the conjugate transpose of the orthonormal eigenvectors of \(M^{*} M\). Then, we construct the square roots of the corresponding eigenvalues and arrange them into \(D^{\frac{1}{2}}\).


Tangent:

Why is it we can take square roots of these values (i.e. the eigenvalues are guaranteed positive or zero?) Recall the definition of adjoint:

\begin{equation} \langle Tv, w \rangle = \langle v, T^{*}w \rangle diff --git a/posts/kbhsoftware_development_methodologies/index.html b/posts/kbhsoftware_development_methodologies/index.html index 9e3e58ca2..0322f3ef2 100644 --- a/posts/kbhsoftware_development_methodologies/index.html +++ b/posts/kbhsoftware_development_methodologies/index.html @@ -5,4 +5,4 @@ Agile Agile are designed to work with minimum specification before code. Spec is updated constantly as code changes and get user feedback.">

Houjun Liu
-

Software Development Methodologies

The software development models, or Software Development Life-cycles (SDLCs), are methodologies to approach organizing a software project.

Waterfall

The waterfall specification gets written before any code written. We hand off spec and code directly to tester, and code should behave like spec.

  • Code specification exactly
  • Spec does not update

Code happens only after stuff is done

Agile

Agile are designed to work with minimum specification before code. Spec is updated constantly as code changes and get user feedback.

Spiral (Software Development)

The Spiral model is as SDLC that combines the iterative development approach of Agile and the structure of Waterfall.

It focuses on Risk to mitigate it.

  1. Waterfall style requirements detailing
  2. Preliminary design
  3. First prototype: scaled down system
  4. Second prototype
    1. Mitigates strengths, weaknesses, and risks of 1st prototype
    2. Augmented requirements that got scaled down during the firts prototype
  5. “The entire project can be aborted if the risk is deemed too great.”
    1. Budget
    2. Operating cost
  6. Repeat until customer likes it
  7. Construct final system using the prototype as a spec

Other Non-Canonical SDLCs

Test-Driven Development

See Test-Driven Development

Extreme Programming

TDD + continually integrating code and pair programming to review code

\ No newline at end of file +

Software Development Methodologies

The software development models, or Software Development Life-cycles (SDLCs), are methodologies to approach organizing a software project.

Waterfall

The waterfall specification gets written before any code written. We hand off spec and code directly to tester, and code should behave like spec.

  • Code specification exactly
  • Spec does not update

Code happens only after stuff is done

Agile

Agile are designed to work with minimum specification before code. Spec is updated constantly as code changes and get user feedback.

Spiral (Software Development)

The Spiral model is as SDLC that combines the iterative development approach of Agile and the structure of Waterfall.

It focuses on Risk to mitigate it.

  1. Waterfall style requirements detailing
  2. Preliminary design
  3. First prototype: scaled down system
  4. Second prototype
    1. Mitigates strengths, weaknesses, and risks of 1st prototype
    2. Augmented requirements that got scaled down during the firts prototype
  5. “The entire project can be aborted if the risk is deemed too great.”
    1. Budget
    2. Operating cost
  6. Repeat until customer likes it
  7. Construct final system using the prototype as a spec

Other Non-Canonical SDLCs

Test-Driven Development

See Test-Driven Development

Extreme Programming

TDD + continually integrating code and pair programming to review code

\ No newline at end of file diff --git a/posts/kbhsoftware_engineering/index.html b/posts/kbhsoftware_engineering/index.html index 352e429d7..9d31fced7 100644 --- a/posts/kbhsoftware_engineering/index.html +++ b/posts/kbhsoftware_engineering/index.html @@ -3,4 +3,4 @@

Software Engineering Index

# -index

process of Engineering: chronological order

Other topics

  • Query optimization (TODO)

Fucking acronyms to know

fundamental trade-off of Software Engineering

The MIT vs. New Jersey problem: in Software Engineering, you can only choose one of FAST or ROBUST.

ProblemFast (“Bell Labs/NJ”)Robust (“MIT”)
SpecsWhatever it looks likescreens, states, UI elements documented; transitions
Time“whenever”precise projections, track work and dependencies
Testing“ran it + didn’t crash”black, white box, code overage, edge/adv. cases
ModularGiant functionobject/data model, grouped function, abstraction barriers
FailureUnpredictable + silentGraceful, noisy, error reporting + logging
LanguageScripting, high levelLow-level, assembly/bare metal, control, can be difficult
Proto.Many/QuicklyFew/Slowly
Being DoneNowLater

Source: here.

how to choose?

Which is the better approach? There isn’t one. However, here are some critical questions for you to answer:

  • Deadline: what happens if you don’t finish today?
  • Release cycle: if you ship a bug, how long can you fix it?
  • Consequences: if the software malfunctions, how bad is it?
  • Life-cycle: how long will the software get used?

So—

As consequences for deadline gets worse, trend towards fast; as consequences for failure gets worse, trend towards robust.

\ No newline at end of file +index

process of Engineering: chronological order

Other topics

  • Query optimization (TODO)

Fucking acronyms to know

fundamental trade-off of Software Engineering

The MIT vs. New Jersey problem: in Software Engineering, you can only choose one of FAST or ROBUST.

ProblemFast (“Bell Labs/NJ”)Robust (“MIT”)
SpecsWhatever it looks likescreens, states, UI elements documented; transitions
Time“whenever”precise projections, track work and dependencies
Testing“ran it + didn’t crash”black, white box, code overage, edge/adv. cases
ModularGiant functionobject/data model, grouped function, abstraction barriers
FailureUnpredictable + silentGraceful, noisy, error reporting + logging
LanguageScripting, high levelLow-level, assembly/bare metal, control, can be difficult
Proto.Many/QuicklyFew/Slowly
Being DoneNowLater

Source: here.

how to choose?

Which is the better approach? There isn’t one. However, here are some critical questions for you to answer:

  • Deadline: what happens if you don’t finish today?
  • Release cycle: if you ship a bug, how long can you fix it?
  • Consequences: if the software malfunctions, how bad is it?
  • Life-cycle: how long will the software get used?

So—

As consequences for deadline gets worse, trend towards fast; as consequences for failure gets worse, trend towards robust.

\ No newline at end of file diff --git a/posts/kbhsolving_pdes_via_fourier_transform/index.html b/posts/kbhsolving_pdes_via_fourier_transform/index.html index 6b59adc73..fb554de75 100644 --- a/posts/kbhsolving_pdes_via_fourier_transform/index.html +++ b/posts/kbhsolving_pdes_via_fourier_transform/index.html @@ -7,6 +7,6 @@ Apply a Fourier Transform on \(f(x)\) This allows you to plug the initial conditions into your transformed expression above.">
Houjun Liu
-

Solving PDEs via Fourier Transform

This will have no explicit boundary conditions in \(x\)!

Assume \(|U(t,x)|\) decays quickly as \(|x| \to \infty\).

Apply Fourier Transform

Step one is to apply the Fourier Transform on our PDE

\begin{equation} +

Solving PDEs via Fourier Transform

This will have no explicit boundary conditions in \(x\)!

Assume \(|U(t,x)|\) decays quickly as \(|x| \to \infty\).

Apply Fourier Transform

Step one is to apply the Fourier Transform on our PDE

\begin{equation} \hat{U}(t, \lambda) = \int_{R} U(t,x) e^{-i\lambda x} \dd{x} \end{equation}

Leveraging the fact that Derivative of Fourier Transform is a multiplication, we can simply our Fourier transform in terms of one expression in \(x\).

Apply a Fourier Transform on \(f(x)\)

This allows you to plug the initial conditions into your transformed expression above.

Solve for \(\hat{U}(t,\lambda)\), and then convert back

This uses the inverse Fourier transform.

\ No newline at end of file diff --git a/posts/kbhstable_matching_problem/index.html b/posts/kbhstable_matching_problem/index.html index b526ebb0f..77d46a754 100644 --- a/posts/kbhstable_matching_problem/index.html +++ b/posts/kbhstable_matching_problem/index.html @@ -4,4 +4,4 @@ We want to discover a stable matching, where pairs are most unwilling to move.">
Houjun Liu
-

The Stable Matching Problem is Wes Chao’s favourite algorithm.

Consider two populations, \(A\) and \(B\), who want to form paired relationships between a person \(A\) and \(B\). \(A_i\) has a list of their ranked order matches (I want to be paired with \(B_1\) most, \(B_4\) second, etc.), and so does \(B_i\) (I want to be paired with \(A_4\) most \(A_9\) second, etc.)

We want to discover a stable matching, where pairs are most unwilling to move. We can solve it using the stable matching algorithm.

Nueva Invention Studio speed-dating noises?

applications of the stable matching problem

  • Dating
  • Applying to college
  • Both of these are high-stress situations, especially if you are doing asking
  • You can mathematically prove that person doing the asking gets the best result

Hence, it shows us that the best possible outcomes go to the people who are willing to ask and get rejected.

extensions to the stable matching problem

the stable matching problem can be extended to the rural hospitals problem, which is slightly better.

\ No newline at end of file +

The Stable Matching Problem is Wes Chao’s favourite algorithm.

Consider two populations, \(A\) and \(B\), who want to form paired relationships between a person \(A\) and \(B\). \(A_i\) has a list of their ranked order matches (I want to be paired with \(B_1\) most, \(B_4\) second, etc.), and so does \(B_i\) (I want to be paired with \(A_4\) most \(A_9\) second, etc.)

We want to discover a stable matching, where pairs are most unwilling to move. We can solve it using the stable matching algorithm.

Nueva Invention Studio speed-dating noises?

applications of the stable matching problem

  • Dating
  • Applying to college
  • Both of these are high-stress situations, especially if you are doing asking
  • You can mathematically prove that person doing the asking gets the best result

Hence, it shows us that the best possible outcomes go to the people who are willing to ask and get rejected.

extensions to the stable matching problem

the stable matching problem can be extended to the rural hospitals problem, which is slightly better.

\ No newline at end of file diff --git a/posts/kbhstring/index.html b/posts/kbhstring/index.html index cb965c823..16a37992b 100644 --- a/posts/kbhstring/index.html +++ b/posts/kbhstring/index.html @@ -4,7 +4,7 @@ String Pointer Syntax Sugar Synonyms char str[6]; // these are equivalent char *ptr = str; char *ptr = &str[0]; char *ptr = &str; // DON'T DO THIS // these are equivalent char thirdLetter = str[3]; char thirdLetter = *(str + 3); seven commandments of c strings if we create a string as char[], we can modify its characters because its memory lives in our stack instead of living in a global data segment we can’t set char[] as equaling to something, because its not strictly a pointer and instead it refers to an entire block of memory instead of a pointer to the first element (in a same vein, an array’s size is fixed and travels with the variable) if we pass char[] as a parameter, it is converted to a char * if we create a string with new string literal as char *thing = "thing", we can’t modify it because its on the global data segment we can set char * equaling to another value because its a pointer adding an offset to a c string gives a substring that’s places past the first character if we change characters in a string parameter, these changes will persist passing strings around Strings are passed as a pointer to their first character.'>
Houjun Liu
-

string

In C, string is an array of chars. C strings don’t track their length; each C string always end in an null-terminating character: \0. This is represents the zero byte.

There’s a built in function strlen which checks the length of a string without the null-terminating character. This function is O(n)!!!

String Pointer Syntax Sugar Synonyms

char str[6];
+

string

In C, string is an array of chars. C strings don’t track their length; each C string always end in an null-terminating character: \0. This is represents the zero byte.

There’s a built in function strlen which checks the length of a string without the null-terminating character. This function is O(n)!!!

String Pointer Syntax Sugar Synonyms

char str[6];
 
 // these are equivalent
 char *ptr = str;
diff --git a/posts/kbhsu_cs111_final_sheet/index.html b/posts/kbhsu_cs111_final_sheet/index.html
index 3662248f4..1f00b7fc9 100644
--- a/posts/kbhsu_cs111_final_sheet/index.html
+++ b/posts/kbhsu_cs111_final_sheet/index.html
@@ -3,7 +3,7 @@
 internal v. external fragmentation internal: a file can be no less than a single block of text. external: no space is available even if the space in aggregate is available main designs contiguous allocation IBM used this?">
Houjun Liu
-

SU-CS111 Final Sheet

FS

main challenges

  • naming: how do users name files
  • reliability: surviving OS crashes and hardware failures
  • protection: isolation between users, controlled sharing
  • disk space management: minimize seeks, sharing space (“preventing fragmentation”)

seeks

to wait until the platter go under the arm and read.

internal v. external fragmentation

  • internal: a file can be no less than a single block of text.
  • external: no space is available even if the space in aggregate is available

main designs

contiguous allocation

IBM used this? puts files and meta-data together + implement an explicit free list allocator. benefit: simple; drawback: 1) external fragmentation 2) hard to grow files

linked files

in every block, store the location of the next block; don’t store files continuously—instead, store a pointer to where the next block of the file is. benefit: solves fragmentation and file growth; drawback: 1) huge seek time 2) random access from the middle is hard (i.e. O(n))

Windows FAT

linked files, but cached the file links in memory when using it. benefits: same as linked files, and a bit faster drawback: data still fragmented and now you have a whole ass table to deal with! but its at least faster

File Payload Data

Kind of what we do—instead of storing file data in order OR using links, store the file BLOCK information contiguously.

multi-level index: store all block numbers for a given file down a tree (EXT2/3, Unix V6, NTFS)

Unix V6 + MLI

Sector SizeBlock SizeInode SizeInodes Per BlockAddress Type
5125123216Short, 2 bytes

block

const size_t INODE_PER_BLOCK = SECTOR_SIZE / sizeof(struct inode);
+

SU-CS111 Final Sheet

FS

main challenges

  • naming: how do users name files
  • reliability: surviving OS crashes and hardware failures
  • protection: isolation between users, controlled sharing
  • disk space management: minimize seeks, sharing space (“preventing fragmentation”)

seeks

to wait until the platter go under the arm and read.

internal v. external fragmentation

  • internal: a file can be no less than a single block of text.
  • external: no space is available even if the space in aggregate is available

main designs

contiguous allocation

IBM used this? puts files and meta-data together + implement an explicit free list allocator. benefit: simple; drawback: 1) external fragmentation 2) hard to grow files

linked files

in every block, store the location of the next block; don’t store files continuously—instead, store a pointer to where the next block of the file is. benefit: solves fragmentation and file growth; drawback: 1) huge seek time 2) random access from the middle is hard (i.e. O(n))

Windows FAT

linked files, but cached the file links in memory when using it. benefits: same as linked files, and a bit faster drawback: data still fragmented and now you have a whole ass table to deal with! but its at least faster

File Payload Data

Kind of what we do—instead of storing file data in order OR using links, store the file BLOCK information contiguously.

multi-level index: store all block numbers for a given file down a tree (EXT2/3, Unix V6, NTFS)

Unix V6 + MLI

Sector SizeBlock SizeInode SizeInodes Per BlockAddress Type
5125123216Short, 2 bytes

block

const size_t INODE_PER_BLOCK = SECTOR_SIZE / sizeof(struct inode);
 struct inode inodes[INODE_PER_BLOCK];
 
 char buf[SECTOR_SIZE];
@@ -35,4 +35,199 @@
     }
 }
 
int open(const char *pathname, int flags);
-

Flags are a bitwise OR operations: you have to open with O_RDONLY (read only), O_WRONLY (write only), or O_RDWR (both read and write). This returns \(-1\) if the reading fails.

Other flags:

  • O_TRUNC (truncate file)
  • O_CREAT (creating a file if not exist), which will require a mode_t mode parameter to set the permission
  • O_EXCL (file must not exist)

Block Cache

We will use part of the main memory to retain recently-accessed disk blocks. This is NOT at the granularity of individual files.

Least Recently Used (LRU) Cache

When you insert a new element into the cache, kick out the element on the cache that hasn’t been touched in the longest time.

Block Cache Modification

we can either write asap, or delay.

write asap: safer: less risk of data loss, written as soon as possible; slow: program must wait to proceed until disk I/O completes

write delay: dangerous: may loose data after crash; efficient: memory writes is faster

Crash Recovery

main challenges

main designs

goal design

implementation

MP

main challenges

main designs

goal design

implementation

MT

main challenges

main designs

goal design

implementation

Virtual Memory

main challenges

main designs

goal design

implementation

Multicore + Flash

main challenges

main designs

goal design

implementation

Ethics

main challenges

main designs

goal design

implementation

Crash Recovery: tradeoffs, data loss and inconsistency, atomic operations, free list and block cache, fsck, ordered writes, write-ahead logging, transactions, checkpoints, idempotency, durability and consistency

Multiprocessing: processes, PIDs, fork, execution order, copy on write, waitpid, zombie processes, execvp, pipes and pipe / pipe2, I/O redirection

Multithreading: processes vs. threads, C++ threads and .join(), thread safety, race conditions, atomicity, critical sections, mutexes, deadlock, busy waiting, condition variables, notify_all, unique_lock, monitor pattern; dining philosophers

Dispatching / Scheduling: Process control blocks, traps and interrupts, context switching, thread state (running / blocked / ready), I/O-bound and CPU-bound threads, scheduling algorithms, first-come-first-serve, round robin, shortest remaining processing time (SRPT), priority-based scheduling, preemption, interrupts, implementing single-core locks and condition variables

Virtual memory: single-tasking, process memory, memory sharing goals, load-time relocation, dynamic address translation and MMU, virtual and physical addresses, base and bound, multiple segments, paging, demand paging, page maps, page faults, thrashing, fragmentation, disk swap, page replacement policies, random replacement, FIFO replacement, LRU replacement, clock algorithm, per process vs. global replacement, virtualization.

Modern technologies: multicore processors (multicore scheduling, work stealing, core affinity, gang scheduling, multicore locks (for multicore locks, just high level ideas about interrupts being insufficient to prevent races, atomic operations, and that busy waiting is necessary)), flash storage (quirks of erase + write operations, wear-out, wear-leveling, flash translation layer high-level idea)

Ethics and trust: trust and agency, trust by assumption, trust by inference, trust by substitution, agential gullibility, violations of trust, stakeholders, pervasiveness, time.

\ No newline at end of file +

Flags are a bitwise OR operations: you have to open with O_RDONLY (read only), O_WRONLY (write only), or O_RDWR (both read and write). This returns \(-1\) if the reading fails.

Other flags:

  • O_TRUNC (truncate file)
  • O_CREAT (creating a file if not exist), which will require a mode_t mode parameter to set the permission
  • O_EXCL (file must not exist)

open file table

open-file table is system wide: mentioning what mode an opening is + the cursor to the open file + the number of file-descriptors pointing to it to a refcount.

why is refcount ever higher than 1? because forks.

Block Cache

We will use part of the main memory to retain recently-accessed disk blocks. This is NOT at the granularity of individual files.

Least Recently Used (LRU) Cache

When you insert a new element into the cache, kick out the element on the cache that hasn’t been touched in the longest time.

Block Cache Modification

we can either write asap, or delay.

write asap: safer: less risk of data loss, written as soon as possible; slow: program must wait to proceed until disk I/O completes

write delay: dangerous: may loose data after crash; efficient: memory writes is faster

Crash Recovery

main challenges

  • data loss: crashes can happen, and not all data could be saved to disk
  • inconsistency: crashes can happen in the middle of operations

Ideally, filesystem operations should be atomic. Every operation should happen or not happen at all—but not halfway.

fsck

  • Check whether or not there is a clean shutdown: setting a disk flag on clean shutdown; so if the flag isn’t set there isn’t a clean shutdown.
  • If it wasn’t a clean shutdown, identify inconsistencies
  • Scans meta data (inodes, indirect blocks, free list, directory blocks) and handle any of the following situations—
    1. block in an inode and in free list; solution: pull the block off of free list
    2. block is a part of two inodes; solution: give to newest, random, copy, remove (bad idea)
    3. inode claims one dirent refers to it, but there are no such dirent; solution: put in lost and found

limitations

  • takes long because can’t restart until done
  • doesn’t prevent loss of actual file info
  • filesystem may still be unusable (core files moved to lost+found)
  • a block could migrate during recovery, leaking info

ordered writes

  1. Always initialize the TARGET before initializing the REFERENCE
    • Initialize inode before initalize directory entry to it
  2. Never reuse a resource before NULLIFYING all existing REFERENCES
    • Remove the inode reference before putting a block on the free list
  3. Never clear the LAST REFERENCE to a live resource before setting a NEW REFERENCE (“its better to have 2 copies instead of none”)
    • Make the new directory entry before get rid of the old one

limitations

  • performance: we need to do operations synchronously
    • if we really want to do caching async, we can track dependencies
    • circular dependencies are possible
  • leak: it could leak resources (reference nullification happens but resource not added)
    • We can run fsck in the background

journaling

journaling keeps a paper trail of disk appertains in the event of a crash. We have an append-only log on disk that stores disk operations.

  • before performing an operation, record its info in the log
  • and write that to disk

The log will always record what’s happening ahead. The actual block updates can eventually be carried out in any order.

what do we log?

  • we only log metadata changes (inodes, moving stuff around, etc.)
  • payload operations are not saved

structure

We typically have a LSN: log serial number, operations, and metadata.

  • LogPatch: changes something
  • LogBlockFree: mark something as free
  • LogBlockAlloc: mark something as allocated, optionally zeroing data if its a data block (DO NOT zero if its a dirent or ino)
[offset 335050]
+LSN 18384030
+operation = "LogBlockAlloc"
+blockno = 1027
+zero_on_replay = 0
+
+[offset 23232]
+LSN N
+operation = "LogPatch"
+blockno = 8
+offset = 137
+bytes = 0.04
+inode = 52
+

limitations and fixes

  • multiple log entries: each atomic operation will be wrapped into a unit transaction to make idempotent
  • checkpoints: we can truncate the log occasionally at a checkpoint—when it is no longer needed
  • where do we start replaying: log entries should be idempotent—doing something multiple times should have the same effect of doing them once. Logs cannot have external dependencies
  • log entries may take time: when finally we write stuff to disk, we write the logs first. So no problems there.

tradeoffs

  • durability - the data needs to be safe (which is slow, and may require manual crash recovery (sans cache, etc.))
  • performance - it needs to be fast (which may mean less error checking)
  • consistency - the filesystem needs to be uniform (which means that we need to be slower and we may drop data in favor of previous checkpoints that worked)

MP

Multiprocessing: processes, PIDs, fork, execution order, copy on write, waitpid, zombie processes, execvp, pipes and pipe / pipe2, I/O redirection

forking

pid_t child_pid = fork();
+

fork returns the child PID if parent; returns 0 if child.

The arguments list have to BEGIN WITH EXECUTABLE NAME and END WITH NULL.

char *args[] = { "/bin/ls", "-l", "~/hewo", NULL };
+execvp(args[0], args);
+

execvp LEAVES THE FILE DESCRIPTOR TABLE.

every fork has to be waited on by waitpid:

pid_t waitpid(pid_t pid, int *status, int options);
+
  • pid
  • status: pointer to store return about the child
  • options (0 for now)

if the PID has died, this returns immediately. Otherwise, this blocks.

the status int

is a bitmap with a bunch of stuff, which we can check with a series of macros

int status;
+int pid_act = waitpid(pid, &status, 0);
+
+if (WIFEXISTED(status)) {
+    // child normal exit
+    int statuscode = WEXITSTATUS(status);
+} else {
+   // abnormal exist
+}
+

the returned PID is the PID that got waited on; if the input PID is -1, it will wayt on any process

fork mechanics

The act of copying stack and heap sounds really really expensive. So…. Whapppens?

The child will map the parent’s memory addresses to different physical addresses than for the parent. The copies are LAZY—if the child writes to an area in memory, its virtual address are mapped to different addresses. If no writes by the child happen, the virtual address are mapped to the same address.

during file reading, the file descriptors gets cloned, the underlying open file table doesn’t close.

pipes

int pipes[2];
+
+// create the pipes
+int ret = pipe(pipes);
+/* int ret = pipe2(pipes, O_CLOEXEC); */
+
+// an so
+int read_from_here = ret[0];
+int write_to_here = ret[1];
+// i.e. ret[1] writes to => ret[0] read
+
+// fork!
+pid_t pid_p = fork();
+
+if(pid_p == 0) {
+    // child subroutine
+    // because child is READING, and not READINg
+    // we want to close the write
+    close(write_to_here);
+
+    // we want to then make a buffer
+    char buf[num_bytes];
+    // if the child reads before the parents write
+    // it will block until some data is available
+    // if the write ends are closed globally, read
+    // will also stop.
+    read(read_from_here, buffer, sizeof(buffer));
+    close(read_from_here);
+
+    return 0;
+}
+
+// parent subroutine
+// because parent is WRITING and not READING
+// we don't want the read to block, we will
+// close the parent immediately.
+close(read_from_here);
+
+    // write some data
+write(write_to_here, "msg", num_bytes);
+
+// close now we are done writing
+close(write_to_here);
+
+// clean up child
+waitpid(pid_p, NULL, 0);
+

Recall that dup2 exists:

dup2(fds[0], STDIN_FILENO);
+close(fds[0]);
+

it will close the second file descriptor, if already in use, before binding the first file descriptor to it.

shell

while (true) {
+    char *command = { "ls", "things" };
+
+    pid_t child_pid = fork();
+    if (!child_pid) {
+        // this is the child; execvp will check PATH for you
+        execvp(command.argv[0], command.argv);
+        // if we got here, the PID didn't do well
+        throw STSHException(string(command.argv[0])+": not found or didn't succeed to fork.");
+    }
+
+    waitpid(child_pid);
+
+    // do cleanup
+}
+

MT

// now the thread can execute at any time: once a thread is made, it will run in any order
+thread myThread(function_to_run, arg1, arg2, ...);
+// threads run AS SOON AS SPAWENED: so
+

We can wait for a thread:

myThread.join()
+

You can also start a bunch on a loop:

thread threads[3];
+for (thread& cf : threads) {
+    cf = thread(func, ...);
+}
+

passing by reference

threading doesn’t know the type of arguments being passed into a function; this is especially prevalent when passing by reference.

static void mythingref(int &pbr);
+thread(myfunc, ref(myint));
+

Remember: ref will SHARE MEMORY, and you have no control over when the thread runs. So once a pointer is passed all bets are off in terms of what values things take on.

mutex

it would be nice if a critical section can only be executed once; a mutex can be shared across threads, but can only be “owned” by a single thread at once.

mutex tmp;
+
tmp.lock();
+tmp.unlock();
+

importantly, if multiple threads are waiting on a mutex, the next thread that’s going to get the mutex

  • when there are multiple threads writing to a value
  • when there is a thread writing and one or more threads reading
  • if you are no writes, you don’t need a mutex
    int locked = 0;
    +Queue blocked_queue;
    +
    +void Lock::Lock() {
    +    // disable interrupts: otherwise multiple threads
    +    // could come and lock the mutex (such as between
    +    // the locked check and lock =1
    +    IntrGuard grd;
    +
    +    if (!locked) {
    +        // if our thread is not locked, just lock it
    +        locked = 1;
    +    } else {
    +        // if our thread is locked, we need to prevent our current
    +        // thread from going to the ready queue, and push it to the current thread
    +        blocked_queue.push(CURRENT_THREAD);
    +
    +        // remember this isn't an issue even if IntrGuard
    +        // didn't yet go out of scope; because it will either
    +        // land on a context_switch which will enable interrupts for you
    +        // or land on the beginning of a threadfunc helper, which
    +        // is also going to enable interrupts for you
    +
    +        // nicely, the interrupts are here are *off* as required because switching
    +        // to another thread always will result in reenabling (either by new thread,
    +        // by timer handler, or by IntrGuard)
    +        mark_block_and_call_schedule(CURRENT_THREAD);
    +    }
    +}
    +
    +void Lock::Unlock() {
    +    // disable interrupts: otherwise multiple threads
    +    // could come and lock the mutex (such as between
    +    // the locked check and lock =1
    +    IntrGuard grd;
    +
    +    // if our thread is locked and nobody is waiting for it
    +    if (q.empty()) {
    +        locked = 0;
    +    } else {
    +        unblock_thread(q.pop());
    +        // we do not switch to the unblocked thread, just add it to the
    +        // ready queue. we are entrusting the scheduler to start this thread
    +        // whenever we feel right
    +    }
    +}
    +

CV

condition_variable_any permitsCV;
+
+// ...
+
+thread(ref(permitsCV))
+

Identify the ISOLATED event to notify; notify absolutely only when needed. To notify:

permitsCV.notify_all();
+

To listen:

permits.lock();
+while (permits == 0) {
+    permitsCV.wait(permitsLock);
+}
+
+permits--;
+permitsLock.unlock();
+

the condition variable will…

  1. start sleeping FIRST
  2. unlock a lock FOR US AFTER the sleeping starts
  3. after waiting ends, tries to reaquire lock
  4. blocks until we have the lock again

unique_lock

void my_scope(mutex &mut, condition_variable_any &cv) {
+    unique_lock<mutex> lck(mut);
+    // do stuff, you can even pass it to a condition variable!
+    cv.wait(lck);
+}
+

Thread States and Contexts

Recall that threads are the unit of execution. The process control block keeps track of the *stack pointer* of the thread %rsp, which means if a thread is put to sleep the state can be stored somewhere on the stack.

Three states:

  1. running (could switch to ready/blocked)
  2. ready able to run, but not on CPU yet (could switch to running only)
  3. blocked eating for something (could switch to ready/running)

trap

a trap is a user request for OS attention explicitly from the user thread, swapping the user process off the CPU.

  1. system calls
  2. errors
  3. page fault (memory errors)

interrupt

a interrupt takes place outside the current thread, it forces the OS’ attention even if the user thread isn’t asking for it

  1. character typed at keyboard
  2. completion of a disk operations
  3. a hardware timer that fires an interrupt

what if a timer goes off during an interrupt

interrupts are disabled during interrupt handling, otherwise, this causes an infinite loop.

preemption

We use interrupts to implement preemption, “preempting” threads in order to swap on another thread to CPU. This enables scheduling to happen.

// brand new thread
+
+void interrupt_handler() {
+    /* disables interupts, automatically by timer handler */
+
+    // future spawns start here
+    context_switch(...);
+
+    /* enables interupts, automatically by timer handler */
+}
+
+void threadfunc_wrapper() {
+    // manually enable interrupts before first run
+    intr_enable(true);
+    // start thread's actual business
+    threadfunc();
+}
+

Scheduling

main challenges

  1. minimize time to a useful result—(assumption: a “useful result” = a thread blocking or completes)
  2. using resources efficiently (keeping cores/disks busy)
  3. fairness (multiple users / many jobs for one users)

We can measure 1) based on “average completion time”: tracking the average time elapsed for a particular queue based on the start of scheduling that queue to the time when each thread ends.

main designs

first-come first-serve

  • keep all threads in ready in a queue
  • run the first thread on the front until it finishes/it blocks for however long
  • repeat

Problem: a thread can run away with the entire system, accidentally, through infinite loops

round robin

  • keep all threads in a round robin
  • each thread can run for a set amount of time called a time slice (10ms or so)
  • if a thread terminates before that time, great; if a thread does not, we swap it off and put it to the end of the round robin

Problem: what’s a good time slice?

  • too small: the overhead of context switching is higher than the overhead of running the program
  • too large: threads can monopolize cores, can’t handle user input, etc.

Linux uses 4ms. Generally, you want 5-10ms range.

gold: shortest remaining processing time

Run first the thread in queue that will finish the most quickly and run it fully to competition.

It gives preference to those that need it the least (i.e. because it runs the smalest one); of course THIS IS not implementable without oracle time guess.

Our goal, then is to get as close as possible to the performance of SRPT.

Problem:

  1. we don’t know which one will finish the most quickly
  2. if we have many threads and one long-running thread, the long running thread won’t be able to run ever

priority based scheduling

Key idea: behavior tends to be consistent in a thread. We build multiple priority queues to address this.

priority based scheduling is an approximation of SRPT, using the past performance of the thread to estimate the running time of the thread. Over time, threads will move between priority queues, and we run the topmost thread from the highest priority queue

implement based on time slice usage

a thread always enters in the highest priority queue

  1. if the thread uses all of its time slice and didn’t exit, bump them down a priority queue
  2. if a thread blocked before it used all of its time slice, bump them up a priority queue

implement based on aggregate time used: fixing neglect

a thread has a number for “how much time did you use on the CPU recently”? The priories are sorted by that value, and the smallest time use will be ran.

context switch

  1. (in asm) push all callee saved registers except %rsp into the bottom of the old thread’s stack
  2. store the stack pointer %rsp into the process control block for that process corresponding to thread
  3. read the new thread’s stack pointer from the process control block, and load that into %rsp
  4. (in asm) pop all callee saved registers stored on the bottom of our new stack back onto the registers

To deal with new threads, we create a fake freeze frame on the stack for that new thread which looks like you are just about to call the thread function, and calls context_switch normally.

Virtual Memory

main challenges

  • multitasking: multiple processes should be able to use memory
  • transparency: no process need to know that memory is shared; each process should be able to run regardless of the number/locations of processes
  • isolation: processes shouldn’t be able to corrupt other processes’ memory
  • efficiency: shouldn’t be degraded by sharing

crappy designs with no DMT

  • single tasking: assume there’s one process 1) no isolation 2) no multitasking 3) bad fragmentation
  • load time relocation: move the entire program somewhere on load time 1) no isolation 2) can’t grow memory after load 3) external fragmentation after frees

main designs

base and bound

load time relocation + virtual memory

  • assign a location in physical memory, call the base; during translation, we just add every virtual address by the base
  • we can cap the virtual address space for each process by a bound, we can raise a bus error/segfault if it goes above the highest allowable

last possible address: is (bound - 1)+base

  1. compare virtual address to bound, trap and raise if >= bound
  2. then, return virtual address + base

tradeoffs: good - 1) inexpensive 2) doesn’t need more space 3) ritualized; bad - 1) can’t really move either (i.e. need to allocate) 2) fragmentation 3) no read only memory

multiple segments

break stack, heap, etc. into multiple segments; then do base and bound for each segment

tradeoffs: good - 1) you can now recycle segments 2) you can not map the middle 3) you can grow the heap (but not the stack, because it moves downwards); bad - 1) you need to decide segment size and location ahead of time

goal design

paging: fixed segment size, and just split each thing.

we map each page independently, and keep the offset. If a page is unused, internal fragmentation but not too bad. The stack can now grow downwards: because if it reaches into lower page numbers we can just map that page somewhere too.

For instance, typically page sizes are 4kb

Page SizeOffset Number Digits
4096 bytes (16^3)3

then the rest of the address would just be the page number.

Intel’s implementation

Virtual Addresses

Unused (16 bits)Virtual page number (36 bits)Offset (12 bits)

Physical Addresses

Page number (40 bits)Offset (12 bits)

translation

  • chop off page number and offset
  • translate the page number
  • concat the two together

implementation

IndexPhysical AddressWritablePresent/Mapped?Last AccessKernelDirty
00x202310000
10x002311100

Swap

  1. pick a page to kick out
  2. write kicked page to disk
  3. mark the old page entry as not present
  4. give the physical address to the new virtual page

choosing what to swap

  • randomly! (works apparently kinda fine)
  • First-in-first out (fair, bust bad — throw out the page in memory longest; but what if its very used)
  • least recently used - clock algorithm

clock algorithm

rotate through all pages until we find one that hasn’t been referenced since last time

  1. we add a reference bit to the page table—its set to \(1\) if the program wrote or read each page, otherwise its set to \(0\)
  2. when page kick is needed, clock algorithm starts where it left off before and scan through physical pages
    1. each page it checks with reference bit 1, it sets the reference bit as 0
    2. if it checked a page and its reference bit is 0, we kick it out (because we’ve gone through two )

We now save the position of the hand—we want to begin checking with the page that hasn’t been checked for the longest time. If every page has a reference bit is one, running this algorithm doesn’t break because it would set its immediately next bit of memory.

page replacement

  • we don’t use per process replacement because we need to allocate max pages per process
  • we use global replacement to maximise usage

demand fetching

most modern OSes start with no pages loaded—load pages only when referenced; this is tempered by the type of page that’s needed:

Page TypeNeed Content on First LoadSave to Swap (“Swap?”)
codeyesno (read from exe)
datayesyes
stack/heapnoyes

We only write to disk if its dirty.

Multicore + Flash

Scheduling Multi-Core CPUs

main approaches

  • one queue for everyone 1) need to figure out what is the priory of things on that queue (for preemption)
  • one queue per core: 1) where do we put a thread? 2) how do we move between cores?

One Ready Queue per Core

  1. where do we put a given thread?
  2. moving core between threads is expensive

Big tension:

  • Work Stealing: if one core is free (even if there is things in the ready queue), check other cores’ ready queues and try to do thread communism.
  • Core Affinity ideally, because moving threads between cores is expensive (need to rebuild cache), we keep each thread running on the same core.

Gang Scheduling

When you have a thread you are trying to schedule, try to see if there are other threads from the same process in the ready queue and schedule all of them on various cores.

Locking Multi-Core CPUs

disabling interrupts are not enough

hardware atomic operation exchange + busy waiting, which reads, returns, and swaps the value of some memory in a single atomic operation AND which is never ran in parallel; it returns the previous value of the memory before it was set:

class Lock {
+    std::automic<int> sync(0);
+}
+
+void Lock::lock() {
+    while (sync.exchange(1)) {}
+
+    // we are now the only one using it
+    // do work ....
+
+    sync = 0;
+}
+

The exchange function returns the old value.

Flash Storage

writing

You have two operation.

  • erase: You can set ALL SEGMENT of an “erase unit” to \(1\) (“erase unit” size is usually 256k)
  • write: You can modify one “page” at a time (which is smaller than a erase unit)—but you can ONLY set individual bits in the page into 0 (“page” size is usually 512 bytes or 4k bytes)

wear-out

wear leveling: make sure that the drive wears out at roughly the same rate as other parts of the drive. Moving commonly written data (“hot” data) around

FTL limitations

  • no hardware access (can’t optimize around flash storage)
  • sacrifices performances for performance
  • wasts capacity (to look like hard drive)
  • many layers

Ethics

trusting software is the task of extending your own AGENCY to a piece of software: “agential gullibility”.

pathways to trust

  • trust by assumption: 1) trust absent any clues to warrent it due to timing 2) trust because there is imminent danger
  • trust by inference: trust based on information you had before (brands, affiliation, performance)
  • trust by substitution: having a backup plan

accountability

accountability is in a chain

  • hardware designer (intel)
  • OS developer (iOS, ec.)
  • app developer
  • users

stakeholder

  1. direct stakeholders (people who are operating, technicians, etc.)
  2. indirect stakeholders: patients

purchase = long-term support —- what do you do to get it fixed/repaired.

scales of trust

scale of impact

  • a bug in an OS can be tremendously bad
  • “root access” — privileged aces

scale of longevity

  • people maybe on very very old OS
  • it requires keeping older OSes secure against modern technologies
\ No newline at end of file diff --git a/posts/kbhsu_math53_feb122024/index.html b/posts/kbhsu_math53_feb122024/index.html index a6fa7198b..28b05785c 100644 --- a/posts/kbhsu_math53_feb122024/index.html +++ b/posts/kbhsu_math53_feb122024/index.html @@ -7,7 +7,7 @@ \begin{equation} |x(t+h) - (x(t) + h x’(t))| \leq Ch \end{equation}">
Houjun Liu
-

SU-MATH53 FEB122024

How would we solve equations like:

\begin{equation} +

SU-MATH53 FEB122024

How would we solve equations like:

\begin{equation} \begin{cases} y’’ - 2xy’ + 2\lambda y = 0 \\ y’’ - xy = 0 diff --git a/posts/kbhsu_math53_feb212024/index.html b/posts/kbhsu_math53_feb212024/index.html index 4416799ef..79e55a01d 100644 --- a/posts/kbhsu_math53_feb212024/index.html +++ b/posts/kbhsu_math53_feb212024/index.html @@ -6,7 +6,7 @@ Examples Heat Equation See Heat Equation">

Houjun Liu
-

SU-MATH53 FEB212024

A Partial Differential Equation is a Differential Equation which has more than one independent variable: $u(x,y), u(t,x,y), …$

For instance:

\begin{equation} +

SU-MATH53 FEB212024

A Partial Differential Equation is a Differential Equation which has more than one independent variable: $u(x,y), u(t,x,y), …$

For instance:

\begin{equation} \pdv{U}{t} = \alpha \pdv[2]{U}{x} \end{equation}

Key Intuition

  • PDEs may have no solutions (unlike Uniqueness and Existance for ODEs)
  • yet, usually, there are too many solutions—so… how do you describe all solutions?
  • usually, there are no explicit formulas

Laplacian of \(u(x,y)\)

Laplacian of \(u(x,y)\)

Examples

Heat Equation

See Heat Equation

Wave Equation

see Wave Equation

Transport Equation

\begin{equation} \pdv{u}{t} = \pdv{u}{x} diff --git a/posts/kbhsu_math53_jan262023/index.html b/posts/kbhsu_math53_jan262023/index.html index e0263de36..faf098b2c 100644 --- a/posts/kbhsu_math53_jan262023/index.html +++ b/posts/kbhsu_math53_jan262023/index.html @@ -7,7 +7,7 @@ find \(v\), \(\lambda\) for \(A\) guess \(x = u(t)v\), this is “magical substitution” and now, we can see that \(x’ = u’v = A(uv) = \lambda u v\) meaning \(u’ = \lambda u\) finaly, \(u(t) = ce^{\lambda} t\) Eigenbasis case Suppose \(A\) has a basis of eigenvectors, and real eigenvalues.">

Houjun Liu
-

SU-MATh53 JAN262023

Underdetermined ODEs

Finding eigenvectors

\(A = n \times n\) matrix, the task of finding eigenvalues and eigenvectors is a linear algebra problem:

\begin{equation} +

SU-MATh53 JAN262023

Underdetermined ODEs

Finding eigenvectors

\(A = n \times n\) matrix, the task of finding eigenvalues and eigenvectors is a linear algebra problem:

\begin{equation} A v = \lambda v \end{equation}

Finding specific solutions to IVPs with special substitution

For some:

\begin{equation} \begin{cases} diff --git a/posts/kbhsu_math53_practice_1_problem_4/index.html b/posts/kbhsu_math53_practice_1_problem_4/index.html new file mode 100644 index 000000000..796cb58ea --- /dev/null +++ b/posts/kbhsu_math53_practice_1_problem_4/index.html @@ -0,0 +1,42 @@ +SU-MATH53 Practice 1 Problem 4 +

Houjun Liu
+ + +

SU-MATH53 Practice 1 Problem 4

We have:

\begin{equation} +\pdv[2]{u}{x} + \pdv[2]{u}{y} = 0 +\end{equation}

Ignoring the boundary conditions when \(u(0,y)\), we know that we have Dirichlet boundaries in \(y\). This gives:

\begin{equation} +u(x,0) = u(x,\pi) = 0 +\end{equation}

Assuming our solution takes on the shape of \(u=X(x)Y(y)\), we obtain:

\begin{equation} +X’’(x)Y(y) + Y’’(y)X(x) = 0 +\end{equation}

by plugging in derivatives of that assumption; meaning:

\begin{equation} +X’’(x)Y(y) = -Y’’(y)X(x) +\end{equation}

This gives rise to:

\begin{align} +\frac{X’’(x)}{X(x)} = -\frac{Y’’(y)}{Y(y)} = c +\end{align}

[you know why \(c>0\), so let’s skip to]

We have \(c>0\), meaning:

\begin{equation} +X’’(x) = cX(x) +\end{equation}

for some positive \(c\); this will result in a linear combination of exponentials:

\begin{equation} +X(x) = a_{1} e^{\sqrt{c}x} + a_2 e^{-\sqrt{c}x} +\end{equation}

this is because… try it! try solving \(X’’(x) = cX(x)\).

Now, importantly, let’s declare:

\begin{equation} +\lambda = \sqrt{c} +\end{equation}

This gives:

\begin{equation} +c = \lambda^{2} +\end{equation}

Meaning, we have:

\begin{equation} +\frac{Y’’(y)}{Y(y)} = -\lambda^{2} +\end{equation}

meaning:

\begin{equation} +Y’’(y) = -\lambda^{2} Y(y) +\end{equation}

Now, given we now have a negative sign in front of our second order ODE, we can see that this falls into the sinusoid case, whereby:

\begin{equation} +Y = a_3 \cos \qty(\lambda x) + a_4 \sin \qty(\lambda x) +\end{equation}

Our boundary condition gives:

\begin{equation} +Y_0 = Y_{\pi} = 0 = a_3 = 0 +\end{equation}

meaning

\begin{equation} +Y = a_4 \sin \qty(\lambda x) +\end{equation}

and so on. You multiply them together and all’s well that ends well.

\ No newline at end of file diff --git a/posts/kbhsum_of_subsets/index.html b/posts/kbhsum_of_subsets/index.html index d313d8b73..1e4d2b433 100644 --- a/posts/kbhsum_of_subsets/index.html +++ b/posts/kbhsum_of_subsets/index.html @@ -6,7 +6,7 @@ requirements The sum of subsets \(U_1, \dots, U_{m}\) is defined as:">
Houjun Liu
-

sum of subsets

The sum of subsets is the definition of addition upon two subsets.

Apparently, the unions of subsets are almost never subspaces (they don’t produce linearity?) Therefore, we like to work with sum of subsets more.

Remember this has arbitrarily many things!! as a part of the content. When defining, remember to open that possibility.

constituents

Sub-sets of \(V\) named \(U_1, U_2, \dots, U_{m}\)

requirements

The sum of subsets \(U_1, \dots, U_{m}\) is defined as:

\begin{equation} +

sum of subsets

The sum of subsets is the definition of addition upon two subsets.

Apparently, the unions of subsets are almost never subspaces (they don’t produce linearity?) Therefore, we like to work with sum of subsets more.

Remember this has arbitrarily many things!! as a part of the content. When defining, remember to open that possibility.

constituents

Sub-sets of \(V\) named \(U_1, U_2, \dots, U_{m}\)

requirements

The sum of subsets \(U_1, \dots, U_{m}\) is defined as:

\begin{equation} U_1, \dots, U_{m} = \{u_1+\dots+u_{m}: u_1\in U_1, \dots, u_{m} \in U_{m}\} \end{equation}

“all elements formed by taking one element from each and add it.”

additional information

sum of subspaces is the smallest subspace with both subspaces

Suppose \(U_1, \dots U_{m}\) are subspaces of \(V\), then \(U_1+\dots +U_{m}\) is the smallest subspace of \(V\) containing \(U_1, \dots, U_{m}\).

Proof:

Is a subspace—

Smallest containing subspace—

Because a subspace is closed under addition, if a subspace contains \(U_{1}, \dots, U_{m}\) you can always add each of the constituent elements manually to form every \(U_1+\dots+U_{m}\).

Conversely, the subspace \(U_1+\dots +U_{m}\) should contain \(U_1, \dots, U_{m}\) by simply setting the coefficients except for the one you are interested in to \(0\).

Therefore, as both subsets contain each other; they are equivalent.

dimension of sums

Let there be two finite-dimensional subspaces: \(U_1\) and \(U_2\). Then:

\begin{equation} \dim(U_1+U_2)=\dim U_1+\dim U_{2} - \dim(U_1 \cap U_2) diff --git a/posts/kbhsurjectivity/index.html b/posts/kbhsurjectivity/index.html index 4fd8bfd13..92ed8a6f0 100644 --- a/posts/kbhsurjectivity/index.html +++ b/posts/kbhsurjectivity/index.html @@ -5,6 +5,6 @@ map to bigger space is not surjective See map to bigger space is not surjective'>

Houjun Liu
-

surjectivity

A function \(T: V\to W\) is surjective if its range equals its codomain \(W\). “onto”

“For any possible output, \(w \in W\) for \(T \in \mathcal{L}(V,W)\), there is at LEAST one input \(T\) that maps \(Tv \to w\). "

\begin{equation} +

surjectivity

\ No newline at end of file diff --git a/posts/kbht_twiddle/index.html b/posts/kbht_twiddle/index.html index 8b02eb300..dfc9b1d6e 100644 --- a/posts/kbht_twiddle/index.html +++ b/posts/kbht_twiddle/index.html @@ -5,7 +5,7 @@ \(\widetilde{T}\) is well defined Same problem as that with operations on quotient space. We need to make sure that \(\widetilde{T}\) behave the same way on distinct but equivalent representations of the same affine subset.">
Houjun Liu
-

T twiddle

Suppose \(T \in \mathcal{L}(V,W)\). Define a \(\widetilde{T}: V / (null\ T) \to W\) such that:

\begin{align} +

T twiddle

Suppose \(T \in \mathcal{L}(V,W)\). Define a \(\widetilde{T}: V / (null\ T) \to W\) such that:

\begin{align} \widetilde{T}(v+ null\ T) = Tv \end{align}

so \(\widetilde{T}\) is the map that recovers the mapped result from an affine subset from the null space of the map.

\(\widetilde{T}\) is well defined

Same problem as that with operations on quotient space. We need to make sure that \(\widetilde{T}\) behave the same way on distinct but equivalent representations of the same affine subset.

Suppose \(u,v \in V\) such that \(u+null\ T = v+null\ T\). Because two affine subsets parallel to \(U\) are either equal or disjoint, we have that \(u-v \in null\ T\). This means that \(Tu-Tv = 0 \implies Tu= Tv\). So applying \(\widetilde{T}\) on equivalent representations of the same affine subset would yield the same result, as desired. \(\blacksquare\)

properties of \(\widetilde{T}\)

it is a linear map

TBD proof. Basically just like do it inheriting operations from the operations on quotient space.

it is injective

We desire here that \(null\ \widetilde{T} = \{0\}\) which will tell us that \(\widetilde{T}\) is injective.

Suppose some \(v + null\ T\) is in the null space of \(\widetilde{T}\). So, we have that:

\begin{equation} \widetilde{T}(v+null\ T) = Tv = 0 diff --git a/posts/kbhthoughts_on_axler_4/index.html b/posts/kbhthoughts_on_axler_4/index.html index 2add627b1..9f1afe1a2 100644 --- a/posts/kbhthoughts_on_axler_4/index.html +++ b/posts/kbhthoughts_on_axler_4/index.html @@ -3,7 +3,7 @@ So we are not going to go through everything very very carefully. Instead, I’m just going to go through some interesting results at my own leisure.">

Houjun Liu
-

Thoughts on Axler 4

Because this chapter is not about linear algebra, your instructor may go through it rapidly. You may not be asked to scrutinize all the proofs. Make sure, however, that you at least read and understand the statements of all the results in this chapter—they will be used in later chapters.

So we are not going to go through everything very very carefully. Instead, I’m just going to go through some interesting results at my own leisure. This also means that this note is not very complete.

facts

  • “you can factor out every root”: \(p(\alpha) = 0 \implies p(z)=(z-\alpha)q(z)\)

  • fundamental theorem of algebra: “if you have an nth-degree polynomial, you can factor it into n factors” (over the complex numbers, you have as many roots as the degree of the polynomials)

    • these coefficients are unique barring ordering
    • factoring real polynomials can be treated in two pieces: one piece, the reals, which can be treated usually; then one other piece, the complexes, can be multiplied pairwise together (as over real coeffs they always come in conjugate pairs) into quadratics. This is why all real polynormials can always be factored as \((x-\lambda)(x-\lambda) \dots (x^{2}+ax+b) (x^{2}+ax+b)\dots\)
    • the number of complex polynomials has to be even
  • complex polynomials have \(deg\ p\) factors

  • real polynomials have \(deg\ p\) real/complex factors, but complex factors come in pairs

  • we can squish the complex part of the real polynomials together, and get—wlog $m$—first-degree real roots and \(\frac{deg\ p - m}{2}\) second-degree real roots where \(b^{2} < 4c\)

  • \(x^{2} + bx + c\) has a factor of \((x-\lambda_{1})(x-\lambda_{2})\) under reals \(b^{2} \geq 4c\)

key sequence

complex numbers

  • we defined: complex numbers, conjugates, and absolute value
    • 9 properties of complexes (see below)

polynomial coefficients

  • polynomial coefficients are unique; namely, if a polynomial is the zero function, all of its coefficients have to be \(0\)

division, zero, and factoring

  • polynomial division: given two polynomials \(p,s \in \mathcal{P}(\mathbb{F})\), with \(s\neq 0\), then \(\exists q,r \in \mathcal{P}(\mathbb{F})\) such that: \(p = s q +r\); that is, given two polynomials, you can always divide one by the other with some remainder as long as the “other” is not \(0\)
  • we defined zero (\(p \lambda =0\), then \(\lambda\) is a “zero”) and factor which is some polynomial \(s \in \mathcal{P}(\mathbb{F})\) for another polynomial \(p\) such that there exists some \(q \in \mathcal{P}(\mathbb{F})\) such that \(p = s q\)
    • we show that each zero corresponds to a factor of the shape \(p(z) = (z-\lambda)q(z)\)
  • we show that a polynomial with degree \(m\) has at most \(m\) distinct zeros

FToA and corollaries

  • FToA: every non-constant polynomial under the complexes has a zero
  • and that means every polynomial over the complexes has a unique factorization \(p(z) = c(z-\lambda_{1})(z-\lambda_{2}) \dots (z-\lambda_{m})\)
  • polynomials with zero coefficients have zeros in pairs: if \(\lambda \in \mathbb{C}\) is a factor of the polynomial, so is \(\bar{\lambda}\)

Is a real polynomial factorable?

  • A polynomial \(x^{2}+bx+c\) is factorable into \((x-\lambda_{1})(x-\lambda_{2})\) IFF \(b^{2} > 4c\).
  • All polynomials over the reals can be factored into at least second degree polynomials
    • \(p(z) = c(z-\lambda_{1})(z-\lambda_{2}) \dots (z-\lambda_{m}) \dots (x^{2}+b_{M}x+c_{M})\)

first, review complex numbers

  • \(z+\bar{z} = 2 \text{Re}\ z\)
  • \(z-\bar{z} =2(\text{Im}\ z)i\)
  • \(z\bar{z} = |z|^{2}\)
  • \(\bar{x+z} = \bar{w}+\bar{z}\), \(\bar{wz} = \bar{w}\bar{z}\)
  • \(\bar{\bar{z}} = z\)
  • \(| \text{\{Re,Im\}}\ z| \leq |z|\) both real and imaginary components are smaller than the actual absolute value
  • \(|\bar{z}| = |z|\)
  • \(|wz| = |w| |z|\)
  • \(|w+z| \leq |w| + |z|\), the triangle inequality

triangle inequality (complexes)

For \(w, z \in \mathbb{C}\), we do route algebra:

polynomial division

Suppose \(p,s \in \mathcal{P}(\mathbb{F}), s\neq 0\), then, \(\exists\) polynomials \(q,r \in \mathcal{P(\mathbb{F})}\) such that:

\begin{equation} +

Thoughts on Axler 4

Because this chapter is not about linear algebra, your instructor may go through it rapidly. You may not be asked to scrutinize all the proofs. Make sure, however, that you at least read and understand the statements of all the results in this chapter—they will be used in later chapters.

So we are not going to go through everything very very carefully. Instead, I’m just going to go through some interesting results at my own leisure. This also means that this note is not very complete.

facts

  • “you can factor out every root”: \(p(\alpha) = 0 \implies p(z)=(z-\alpha)q(z)\)

  • fundamental theorem of algebra: “if you have an nth-degree polynomial, you can factor it into n factors” (over the complex numbers, you have as many roots as the degree of the polynomials)

    • these coefficients are unique barring ordering
    • factoring real polynomials can be treated in two pieces: one piece, the reals, which can be treated usually; then one other piece, the complexes, can be multiplied pairwise together (as over real coeffs they always come in conjugate pairs) into quadratics. This is why all real polynormials can always be factored as \((x-\lambda)(x-\lambda) \dots (x^{2}+ax+b) (x^{2}+ax+b)\dots\)
    • the number of complex polynomials has to be even
  • complex polynomials have \(deg\ p\) factors

  • real polynomials have \(deg\ p\) real/complex factors, but complex factors come in pairs

  • we can squish the complex part of the real polynomials together, and get—wlog $m$—first-degree real roots and \(\frac{deg\ p - m}{2}\) second-degree real roots where \(b^{2} < 4c\)

  • \(x^{2} + bx + c\) has a factor of \((x-\lambda_{1})(x-\lambda_{2})\) under reals \(b^{2} \geq 4c\)

key sequence

complex numbers

  • we defined: complex numbers, conjugates, and absolute value
    • 9 properties of complexes (see below)

polynomial coefficients

  • polynomial coefficients are unique; namely, if a polynomial is the zero function, all of its coefficients have to be \(0\)

division, zero, and factoring

  • polynomial division: given two polynomials \(p,s \in \mathcal{P}(\mathbb{F})\), with \(s\neq 0\), then \(\exists q,r \in \mathcal{P}(\mathbb{F})\) such that: \(p = s q +r\); that is, given two polynomials, you can always divide one by the other with some remainder as long as the “other” is not \(0\)
  • we defined zero (\(p \lambda =0\), then \(\lambda\) is a “zero”) and factor which is some polynomial \(s \in \mathcal{P}(\mathbb{F})\) for another polynomial \(p\) such that there exists some \(q \in \mathcal{P}(\mathbb{F})\) such that \(p = s q\)
    • we show that each zero corresponds to a factor of the shape \(p(z) = (z-\lambda)q(z)\)
  • we show that a polynomial with degree \(m\) has at most \(m\) distinct zeros

FToA and corollaries

  • FToA: every non-constant polynomial under the complexes has a zero
  • and that means every polynomial over the complexes has a unique factorization \(p(z) = c(z-\lambda_{1})(z-\lambda_{2}) \dots (z-\lambda_{m})\)
  • polynomials with zero coefficients have zeros in pairs: if \(\lambda \in \mathbb{C}\) is a factor of the polynomial, so is \(\bar{\lambda}\)

Is a real polynomial factorable?

  • A polynomial \(x^{2}+bx+c\) is factorable into \((x-\lambda_{1})(x-\lambda_{2})\) IFF \(b^{2} > 4c\).
  • All polynomials over the reals can be factored into at least second degree polynomials
    • \(p(z) = c(z-\lambda_{1})(z-\lambda_{2}) \dots (z-\lambda_{m}) \dots (x^{2}+b_{M}x+c_{M})\)

first, review complex numbers

  • \(z+\bar{z} = 2 \text{Re}\ z\)
  • \(z-\bar{z} =2(\text{Im}\ z)i\)
  • \(z\bar{z} = |z|^{2}\)
  • \(\bar{x+z} = \bar{w}+\bar{z}\), \(\bar{wz} = \bar{w}\bar{z}\)
  • \(\bar{\bar{z}} = z\)
  • \(| \text{\{Re,Im\}}\ z| \leq |z|\) both real and imaginary components are smaller than the actual absolute value
  • \(|\bar{z}| = |z|\)
  • \(|wz| = |w| |z|\)
  • \(|w+z| \leq |w| + |z|\), the triangle inequality

triangle inequality (complexes)

For \(w, z \in \mathbb{C}\), we do route algebra:

polynomial division

Suppose \(p,s \in \mathcal{P}(\mathbb{F}), s\neq 0\), then, \(\exists\) polynomials \(q,r \in \mathcal{P(\mathbb{F})}\) such that:

\begin{equation} p = s q +r \end{equation}

and \(\deg r < \deg s\).

Proof:

Let: \(n = \deg p\), and \(m = \deg s\). So, if \(n < m\) (i.e. it is not a division), then take \(q=0\) and \(r=p\).

Now, let’s make ???

Factoring

A polynomial \(s \in \mathcal{P}(\mathbb{F})\) is a factor of \(p \in \mathcal{P}(\mathbb{F})\) if \(\exists\) \(q \in \mathcal{P}(\mathbb{F})\) such that \(p=s q\).

questions

  • proofs: wut
  • if the FToA holds, isn’t the polynomials over the reals a “subset”(ish) of the polynomials over the complexes? so there is going to be at least complex roots to all polynormials always no?
\ No newline at end of file diff --git a/posts/kbhtuning_forks/index.html b/posts/kbhtuning_forks/index.html index 4e37398a0..5b2b7a64a 100644 --- a/posts/kbhtuning_forks/index.html +++ b/posts/kbhtuning_forks/index.html @@ -5,4 +5,4 @@ From a very basic perspective, hiting a tuning fork creates a transverse wave on the tine you hit, which vibrates and then compresses the air around it in a longitudinal fashion at a set frequency, which we hear as a sound.">
Houjun Liu
-

Tuning Fork

Tuning Forks (funing torks!) is a Tuning Fork. You smack it and it goes “biiing!”

Let’s figure out how it works. For us to be one same page, let’s define some vocab:

Vocab

  • “Tine”: one of the two prongs of the fork

A Cursory Explanation

Source: here and here. Both are not very scientific but a good first step.

From a very basic perspective, hiting a tuning fork creates a transverse wave on the tine you hit, which vibrates and then compresses the air around it in a longitudinal fashion at a set frequency, which we hear as a sound.

Ok but then this raises the question of why there’s two tines. The explanation this website gives is essentially that the actual mechanism of the Tuning Fork is in squishing the air immediately around the fork, so…

  1. if the tines are push towards together, it creates a void in the space it just was; this creates a low pressure rarefaction area
  2. if the tines snap back apart, it compresses the air creating compression by squishing the air around it

And therefore, the air around the funing tork is essentially being played like a two-way slingy. To adjust the pitch of the Tuning Fork, you lengthen or shorten it: longer tuning forks have larger tines, which vibrate more slowly.

Ok but now many, many questions

  1. why does smacking one side of the Tuning Fork make both sides vibrate
  2. presumably the base is not vibrating; hence, how does the downward-bendy vibration cause perpendicular oscillation (does it?)

A Detour on Rigid Body Harmonic Motion

Let’s talk about Bending.

  1. How does this relate to springs/slinkies? read this.

A Better Detour on Cantilever Beams

Cantilever Beams

A Detour on the Temperature

We are really worried about two different things here.

  1. Metal expands/contracts based on the temperature
  2. Temperature affects speed of sound

A Detour on Material Science

Why are our Tuning Forks out of tune?

Fun, Relevant Factoids About the World

  • The range of human hearing from a youngen is about 20Hz to 20,000Hz.

Look into

  • Young’s Modulus

    • Density
    • Second
  • overtones: six and a quarter; why?

  • prove the equations given in Rossing 1990

  • why do high frequencies die faster?

  • Why are they FORKS? What’s wrong with one prong

Lagrangian Mechanics

experiments to do in the end

  • measuring in water
  • measuring

questions to ask

  • why no free vibrations just standing?
  • do the various tuning fork modes compose
  • what happened to the harmonics of the fundimental? I know the overotens are 6/14, but where did the harmonics go? do they compose?
  • what if we did it in a vaccume? of course the tuning fork is not going to be heard, but will it keep vibrating forever?
  • Nyquist limit (FFT is only accurate to half the sampling rate; 10000 hz sampling (default on logger pro) means max is 5000 Hz)

things

  • we can use far field because the wavelength is much mucm much much larger than the seperation between the two tines; what is the wavelength? function of frequency and hertz

Questions for Mark

  • cuw tuning forks’ freq is not the predicted freq of its shortest tine. urg how
  • driven oscellation. how would it actually work?

last minute tuning forks

  • easy explanation of FFT “wrapping around circle”
  • backup slide on octahedral scress
  • explain beta
  • how to get wavelength from sinusoidal equation
  • how does wavelength change with temp; how does our ear compensate?
  • https://en.wikipedia.org/wiki/Residual_stress
\ No newline at end of file +

Tuning Fork

Tuning Forks (funing torks!) is a Tuning Fork. You smack it and it goes “biiing!”

Let’s figure out how it works. For us to be one same page, let’s define some vocab:

Vocab

  • “Tine”: one of the two prongs of the fork

A Cursory Explanation

Source: here and here. Both are not very scientific but a good first step.

From a very basic perspective, hiting a tuning fork creates a transverse wave on the tine you hit, which vibrates and then compresses the air around it in a longitudinal fashion at a set frequency, which we hear as a sound.

Ok but then this raises the question of why there’s two tines. The explanation this website gives is essentially that the actual mechanism of the Tuning Fork is in squishing the air immediately around the fork, so…

  1. if the tines are push towards together, it creates a void in the space it just was; this creates a low pressure rarefaction area
  2. if the tines snap back apart, it compresses the air creating compression by squishing the air around it

And therefore, the air around the funing tork is essentially being played like a two-way slingy. To adjust the pitch of the Tuning Fork, you lengthen or shorten it: longer tuning forks have larger tines, which vibrate more slowly.

Ok but now many, many questions

  1. why does smacking one side of the Tuning Fork make both sides vibrate
  2. presumably the base is not vibrating; hence, how does the downward-bendy vibration cause perpendicular oscillation (does it?)

A Detour on Rigid Body Harmonic Motion

Let’s talk about Bending.

  1. How does this relate to springs/slinkies? read this.

A Better Detour on Cantilever Beams

Cantilever Beams

A Detour on the Temperature

We are really worried about two different things here.

  1. Metal expands/contracts based on the temperature
  2. Temperature affects speed of sound

A Detour on Material Science

Why are our Tuning Forks out of tune?

Fun, Relevant Factoids About the World

  • The range of human hearing from a youngen is about 20Hz to 20,000Hz.

Look into

  • Young’s Modulus

    • Density
    • Second
  • overtones: six and a quarter; why?

  • prove the equations given in Rossing 1990

  • why do high frequencies die faster?

  • Why are they FORKS? What’s wrong with one prong

Lagrangian Mechanics

experiments to do in the end

  • measuring in water
  • measuring

questions to ask

  • why no free vibrations just standing?
  • do the various tuning fork modes compose
  • what happened to the harmonics of the fundimental? I know the overotens are 6/14, but where did the harmonics go? do they compose?
  • what if we did it in a vaccume? of course the tuning fork is not going to be heard, but will it keep vibrating forever?
  • Nyquist limit (FFT is only accurate to half the sampling rate; 10000 hz sampling (default on logger pro) means max is 5000 Hz)

things

  • we can use far field because the wavelength is much mucm much much larger than the seperation between the two tines; what is the wavelength? function of frequency and hertz

Questions for Mark

  • cuw tuning forks’ freq is not the predicted freq of its shortest tine. urg how
  • driven oscellation. how would it actually work?

last minute tuning forks

  • easy explanation of FFT “wrapping around circle”
  • backup slide on octahedral scress
  • explain beta
  • how to get wavelength from sinusoidal equation
  • how does wavelength change with temp; how does our ear compensate?
  • https://en.wikipedia.org/wiki/Residual_stress
\ No newline at end of file diff --git a/posts/kbhuniqueness_and_existance/index.html b/posts/kbhuniqueness_and_existance/index.html index 124be87e7..4c824c161 100644 --- a/posts/kbhuniqueness_and_existance/index.html +++ b/posts/kbhuniqueness_and_existance/index.html @@ -10,7 +10,7 @@ for all \(t \in I\), \(x,y \in \omega\), with \(L \in (0,\infty)\) is a Lipschitz Condition in the dependent variable \(x\).">
Houjun Liu
-

Uniqueness and Existance

Questions of Uniqueness and Existance are important elements in Differential Equations.

Here’s a very general form of a differential equations. First, here’s the:

function behavior tests

continuity

Weakest statement.

A function is continuous if and only if:

\begin{equation} +

Uniqueness and Existance

Questions of Uniqueness and Existance are important elements in Differential Equations.

Here’s a very general form of a differential equations. First, here’s the:

function behavior tests

continuity

Weakest statement.

A function is continuous if and only if:

\begin{equation} \lim_{x \to y} f(x) =f(y) \end{equation}

Lipschitz Condition

Stronger statement.

The Lipschitz Condition is a stronger test of Continuity such that:

\begin{equation} || F(t,x)-F(t,y)|| \leq L|| x- y|| diff --git a/posts/kbhupper_triangular_matrix/index.html b/posts/kbhupper_triangular_matrix/index.html index a60e41f36..b3678531f 100644 --- a/posts/kbhupper_triangular_matrix/index.html +++ b/posts/kbhupper_triangular_matrix/index.html @@ -5,7 +5,7 @@ the matrix of \(T\) w.r.t. \(v_1 … v_{n}\) is upper-triangular \(Tv_{j} \in span(v_1 \dots v_{j})\) for each \(v_{j}\) \(span(v_{1}, … v_{j})\) is invariant under \(T\) for each \(v_{j}\) \(1 \implies 2\) Recall that our matrix \(A=\mathcal{M}(T)\) is upper-triangular.">

Houjun Liu
-

upper-triangular matrix

A matrix is upper-triangular if the entries below the diagonal are \(0\):

\begin{equation} +

upper-triangular matrix

A matrix is upper-triangular if the entries below the diagonal are \(0\):

\begin{equation} \mqty(\lambda_{1} & & * \\ & \ddots & \\ 0 & & \lambda_{n}) \end{equation}

properties of upper-triangular matrix

Suppose \(T \in \mathcal{L}(V)\), and \(v_1 … v_{n}\) is a basis of \(V\). Then:

  1. the matrix of \(T\) w.r.t. \(v_1 … v_{n}\) is upper-triangular
  2. \(Tv_{j} \in span(v_1 \dots v_{j})\) for each \(v_{j}\)
  3. \(span(v_{1}, … v_{j})\) is invariant under \(T\) for each \(v_{j}\)

\(1 \implies 2\)

Recall that our matrix \(A=\mathcal{M}(T)\) is upper-triangular. So, for any \(v_{j}\) sent through \(A\), it will be multiplied to the $j$-th column vector of the matrix. Now, that $j$-th column has \(0\) for rows \(j+1 … n\), meaning that only through a linear combination of the first \(j\) vectors we can construct \(T v_{j}\). Hence, \(Tv_{j} \in span(v_1 … v_{j})\)

\(3 \implies 2\)

“obviously”

All \(v_{j} \in span(v_1, \dots v_{j})\), and yet \(T v_{j} \in span (v_{1}, … v_{j})\) as it is given. Hence, \(span(v_1, … v_{j})\) is invariant under \(T\).

\(2 \implies 3\)

Let \(v \in span(v_1, … v_{j})\); meaning: \(v = a_1 v_1 + … + a_{j} v_{j}\). Now, \(Tv = a_1 T v_{1} + … + a_{j} T v_{j}\). Recall now we are given \(T v_{j} \in span(v_1, … v_{j})\) for each \(v_{j}\) (of course if \(T{v_{1}} \in span(v_{1})\) it is also in \(span(v_1, … v_{j})\) so the statement make sense.) Therefore, a linear combinations of \(T v_{j}\) also is in \(span(v_1 … v_{j})\). Making the latter invariant under \(T\). \(\blacksquare\)

every complex operator has an upper-triangular matrix

Suppose \(V\) is a finite-dimensional complex vector space, with an operator \(T \in \mathcal{L}(V)\). Then, \(T\) has an upper-triangular matrix w.r.t. some basis of \(V\).

Proof:

We will use induction.

Inductive hypothesis: given dimension of \(V\), \(T \in \mathcal{L}(V)\) has an upper-triangular matrix for a basis of \(V\).

Base case: \(\dim V=1\)

If \(\dim V = 1\), any matrix of \(T\) is technically upper-triangular because its just one number \(\mqty(a)\).

Step: \(\dim V = n\), and \(T \in \mathcal{L}(V)\)

Because operators on complex vector spaces have an eigenvalue, let \(v_1\) be an eigenvector corresponding to an eigenvalue of \(T\). Now, create an invariant subspace \(U = span(v_1)\). (it is invariant because \(v_1\) is an eigenvalue). Now, evidently \(\dim U =1\).

Now, \(\dim V / U = n-1\), the previous step from induction tells us that there exists a upper-triangular matrix for \(T/U \in \mathcal{L}(V / U)\). Specifically, because of the properties of upper-triangular matrix, it tells us that there is a basis \(v_{2} + U … v_{n} + U\) such that its span is invariant under \(T / U\). Meaning:

\begin{equation} (T / U) (v_{j} + U ) \in span( v_{2} + U \dots v_{j} + U) diff --git a/posts/kbhutility_theory/index.html b/posts/kbhutility_theory/index.html index 0ca381cef..9548b61af 100644 --- a/posts/kbhutility_theory/index.html +++ b/posts/kbhutility_theory/index.html @@ -4,7 +4,7 @@ additional information never have a utility function that’s infinite If something has infinite utility, doing two of the good things is the same as doing one good thing, which is wrong.">

Houjun Liu
-

utility theory

utility theory is a set of theories that deals with rational decision making through maximizing the expected utility.

utility theory can be leveraged to choose the right actions in the observe-act cycle in a graphical network via decision networks

additional information

never have a utility function that’s infinite

If something has infinite utility, doing two of the good things is the same as doing one good thing, which is wrong.

Say going to a Taylor concert has \(+\infty\) utility. Then, you would be indifferent to the difference between Taylor + Harry vs. Taylor only. However, the former case clearly has higher utility as long as Harry concert doesn’t have negative utility.

utility elicitation

see utility elicitation

expected utility

expected utility is the utility we expect from taking an action \(a\) at a state \(o\). To compute it based on transition probabilities:

\begin{equation} +

utility theory

utility theory is a set of theories that deals with rational decision making through maximizing the expected utility.

utility theory can be leveraged to choose the right actions in the observe-act cycle in a graphical network via decision networks

additional information

never have a utility function that’s infinite

If something has infinite utility, doing two of the good things is the same as doing one good thing, which is wrong.

Say going to a Taylor concert has \(+\infty\) utility. Then, you would be indifferent to the difference between Taylor + Harry vs. Taylor only. However, the former case clearly has higher utility as long as Harry concert doesn’t have negative utility.

utility elicitation

see utility elicitation

expected utility

expected utility is the utility we expect from taking an action \(a\) at a state \(o\). To compute it based on transition probabilities:

\begin{equation} EU(a|o) = \sum_{s’} p(s’ | a,o) U(s’) \end{equation}

the expected utility of taking some action \(a\) at an observation \(o\) is the probability of any given next state \(s’\) happening times the utility of being in that state \(U(s’)\).

See also expected utility of wealth.

maximum expected utility principle

MEU states that a rational agent should choose an action which maximizes expected utility. That is,

\begin{equation} a^{*} = \arg\max_{a} EU(a|o) diff --git a/posts/kbhvalue_iteration/index.html b/posts/kbhvalue_iteration/index.html index 5f0d9b0ec..2a4f9f8bf 100644 --- a/posts/kbhvalue_iteration/index.html +++ b/posts/kbhvalue_iteration/index.html @@ -6,7 +6,7 @@ eventually will converge into the optimal value function. After which, we just extract the greedy policy from the utility to get a policy to use.">

Houjun Liu
-

value iteration

We apply the Bellman Expectation Equation and selecting the utility that is calculated by taking the most optimal action given the current utility:

\begin{equation} +

value iteration

We apply the Bellman Expectation Equation and selecting the utility that is calculated by taking the most optimal action given the current utility:

\begin{equation} U_{k+1}(s) = \max_{a} \qty(R(s,a) + \gamma \sum_{s’} T(s’ | s,a) U_{k}(s’)) \end{equation}

This iterative process is called the Bellman backup, or Bellman update.

\begin{equation} U_1 \dots U_{k} \dots U^{*} diff --git a/posts/kbhvector_semantics/index.html b/posts/kbhvector_semantics/index.html index 8e8c12fe4..78e4bd0ce 100644 --- a/posts/kbhvector_semantics/index.html +++ b/posts/kbhvector_semantics/index.html @@ -6,6 +6,6 @@ idea 2 meaning should be in a point in space, just like affective meaning (i.">

Houjun Liu
-

vector semantics

vector semantics is a sense encoding method.

“a meaning of the word should be tied to how they are used”

we measure similarity between word vectors with cosine similarity. see also vector-space model.

motivation

idea 1

neighboring words can help infer semantic meaning of new words: “we can define a word based on its distribution in language use”

idea 2

meaning should be in a point in space, just like affective meaning (i.e. a score in each dimension).

that is: a word should be a vector in n space

vector semantics

Each word is a point based on distribution; each word is a vector and similar words are nearby in semantic space.

The intuition is that classifiers can generalize to similar, but unseen words more easily by processing embeddings.

transposing a Term-Document Matrix

Typically we read a Term-Document Matrix column-wise, to understand what each document can be encoded in terms of words.

However, if you read it row-wise, you can see a distribution for words over the documents.

term-term matrix

a term-term matrix is a \(|V| \times |V|\) matrix that measures co-occurrence in some context. So each cell would be the number of times the two words co-occur in some small window.

point-wise mutual information

we usually normalize a Term-Document Matrix via TF-IDF. However, for term-term matrix, we usually normalize it as:

\begin{equation} +

vector semantics

vector semantics is a sense encoding method.

“a meaning of the word should be tied to how they are used”

we measure similarity between word vectors with cosine similarity. see also vector-space model.

motivation

idea 1

neighboring words can help infer semantic meaning of new words: “we can define a word based on its distribution in language use”

idea 2

meaning should be in a point in space, just like affective meaning (i.e. a score in each dimension).

that is: a word should be a vector in n space

vector semantics

Each word is a point based on distribution; each word is a vector and similar words are nearby in semantic space.

The intuition is that classifiers can generalize to similar, but unseen words more easily by processing embeddings.

transposing a Term-Document Matrix

Typically we read a Term-Document Matrix column-wise, to understand what each document can be encoded in terms of words.

However, if you read it row-wise, you can see a distribution for words over the documents.

term-term matrix

a term-term matrix is a \(|V| \times |V|\) matrix that measures co-occurrence in some context. So each cell would be the number of times the two words co-occur in some small window.

point-wise mutual information

we usually normalize a Term-Document Matrix via TF-IDF. However, for term-term matrix, we usually normalize it as:

\begin{equation} PMI(w_1, w_2) = \log \frac{p(w_1,w_2)}{p(w_1)p(w_2)} \end{equation}

“would something appear more often then change”

word2vec

see word2vec

\ No newline at end of file diff --git a/posts/kbhwave_equation/index.html b/posts/kbhwave_equation/index.html index ae19ed574..4c67fea08 100644 --- a/posts/kbhwave_equation/index.html +++ b/posts/kbhwave_equation/index.html @@ -8,7 +8,7 @@ \begin{equation} \pdv{u}{t}(0,x) = \sum a_{k} \frac{ck \pi}{l} \sin \qty( \frac{k\pi x}{l}) = g(x) \end{equation}">
Houjun Liu
-

Wave Equation

If we write it in a single set of variables:

\begin{equation} +

Wave Equation

If we write it in a single set of variables:

\begin{equation} \pdv[2]{u}{t} = \pdv[2]{u}{x} \end{equation}

At a glance, for Dirichlet Conditions:

\begin{equation} u(t,x) = \sum_{k} \qty(a_{k} \sin \qty(\frac{ck\pi}{l} t) + b_{k} \cos \qty(\frac{ck\pi}{l} t)) \sin \qty( \frac{k \pi}{l}x) diff --git a/sitemap.xml b/sitemap.xml index 93fdc4b93..76014987d 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -1 +1 @@ -https://www.jemoka.com/posts/kbhassembly/2023-10-31T00:44:54-07:00https://www.jemoka.com/posts/kbhistudio_meeting_nodes/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhmaking_qubits_interact/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhpoint_estimate/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhproperties_of_the_stable_matching_algorithm/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhrnn_notes/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhrural_hospitals_problem/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhstable_matching_problem/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhz_score/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbh1v_1/2022-09-14T14:30:08-07:00https://www.jemoka.com/posts/kbhq/2022-08-27T09:38:59-07:00https://www.jemoka.com/posts/kbhw/2023-03-28T21:31:23-07:00https://www.jemoka.com/posts/kbhzero_times_vector/2022-09-14T14:30:08-07:00https://www.jemoka.com/posts/kbheigenvalue/2024-01-26T11:24:29-08:00https://www.jemoka.com/posts/kbh1980s_political_alignment/2022-06-07T13:38:12-07:00https://www.jemoka.com/posts/kbh1a/2022-08-30T14:23:25-07:00https://www.jemoka.com/posts/2023-02-26/2023-02-26T20:31:50-08:00https://www.jemoka.com/posts/kbhnus_math530_3_e_problem_1/2023-01-29T10:52:51-08:00https://www.jemoka.com/posts/kbh776/2023-03-01T13:39:00-08:00https://www.jemoka.com/posts/kbhaaa/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhaaai_talk_contacts/2024-02-26T11:25:20-08:00https://www.jemoka.com/posts/kbhaaai2024_index/2024-02-27T13:50:55-08:00https://www.jemoka.com/posts/kbhindex/2024-01-17T12:24:19-08:00https://www.jemoka.com/posts/kbhabsolute_value_function/2023-10-06T16:57:26-07:00https://www.jemoka.com/posts/kbhaccounting_price/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhaction_of_capecitabmine/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhaction_research/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhaction_value_function/2023-10-24T14:11:17-07:00https://www.jemoka.com/posts/kbhactive_data_representation/2022-06-24T00:34:53-07:00https://www.jemoka.com/posts/kbhactive_learning_molecule_iteration/2023-03-27T10:23:02-07:00https://www.jemoka.com/posts/kbhactive_listening/2022-12-01T14:28:25-08:00https://www.jemoka.com/posts/kbhactive_recall/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhactor_critic/2023-10-31T10:20:58-07:00https://www.jemoka.com/posts/kbhadaops/2024-01-30T10:40:31-08:00https://www.jemoka.com/posts/kbhadding/2022-09-13T22:27:22-07:00https://www.jemoka.com/posts/kbhadditive_identity/2022-09-13T22:27:22-07:00https://www.jemoka.com/posts/kbhadditive_identity_is_unique_in_a_vector_space/2022-09-13T23:23:56-07:00https://www.jemoka.com/posts/kbhadditive_inverse_is_unique_in_a_vector_space/2022-09-13T23:23:56-07:00https://www.jemoka.com/posts/kbhadhd/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhadme/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhadress_challenge/2022-06-24T23:35:50-07:00https://www.jemoka.com/posts/kbhadress_literature_survey/2022-06-26T10:25:12-07:00https://www.jemoka.com/posts/kbhadvantage_function/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhadvertising/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhparallel_linear_algebra/2023-01-29T10:52:51-08:00https://www.jemoka.com/posts/kbhaffine_transformation/2023-10-12T14:52:11-07:00https://www.jemoka.com/posts/kbhagent/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhagricultural_adjustment_administration/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhagrp/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhai/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhai_ethics/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhai_healthcare_safety/2024-02-26T10:36:33-08:00https://www.jemoka.com/posts/kbhai_intepretability/2023-10-26T12:04:45-07:00https://www.jemoka.com/posts/kbhai_master_class/2023-04-29T15:36:35-07:00https://www.jemoka.com/posts/kbhai_medicine/2024-02-26T14:44:52-08:00https://www.jemoka.com/posts/kbhaibridge/2023-03-12T17:23:29-07:00https://www.jemoka.com/posts/kbhaibridge_course_website/2022-06-30T21:14:10-07:00https://www.jemoka.com/posts/kbhaibridge_final_project/2022-06-29T23:42:15-07:00https://www.jemoka.com/posts/kbhaibridge_iris_variance_worksheet/2022-06-30T10:37:38-07:00https://www.jemoka.com/posts/kbhaibridge_packages/2022-06-27T11:07:38-07:00https://www.jemoka.com/posts/kbhaibridge_student_presentations/2022-07-02T00:07:55-07:00https://www.jemoka.com/posts/kbhaibridgelab_d1aft/2022-06-27T16:03:53-07:00https://www.jemoka.com/posts/kbhaibridgelab_d3_d4/2022-06-23T14:37:07-07:00https://www.jemoka.com/posts/kbhaibridgelab_d2aft/2022-06-29T10:21:38-07:00https://www.jemoka.com/posts/kbhaibridgelab_d4aft/2022-06-29T23:29:28-07:00https://www.jemoka.com/posts/kbhaifs/2022-06-13T22:05:51-07:00https://www.jemoka.com/posts/kbhair_a_greek_style_myth/2022-07-05T23:31:37-07:00https://www.jemoka.com/posts/kbhalexis_ohanian/2023-03-01T13:39:00-08:00https://www.jemoka.com/posts/kbhalgebra/2022-08-26T14:58:53-07:00https://www.jemoka.com/posts/kbhalgebreic_equation/2024-01-08T11:23:11-08:00https://www.jemoka.com/posts/kbhalgebreic_multiplicity/2023-03-22T14:02:07-07:00https://www.jemoka.com/posts/kbhali_partovi/2023-09-12T21:52:20-07:00https://www.jemoka.com/posts/kbhalign_with_new_vocab/2022-06-13T13:25:37-07:00https://www.jemoka.com/posts/kbhalivio_april_checkin/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhalpha_vector/2023-11-29T11:20:39-08:00https://www.jemoka.com/posts/kbhalternating_least_squares/2023-08-11T11:57:33-07:00https://www.jemoka.com/posts/kbhabulance_trajectories/2024-02-26T10:36:33-08:00https://www.jemoka.com/tags/aml/2023-10-31T12:46:14-07:00https://www.jemoka.com/posts/kbhaml_dipping_into_pytorch/2023-09-02T15:08:45-07:00https://www.jemoka.com/posts/kbhaml_iris_strikes_bath/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhaml_it_takes_two/2023-09-02T15:08:45-07:00https://www.jemoka.com/posts/kbhaml_reinforce/2023-10-31T12:46:14-07:00https://www.jemoka.com/posts/kbhaml_time_to_convolve/2023-09-02T15:08:45-07:00https://www.jemoka.com/posts/kbhaml_your_first_article/2023-02-11T18:38:34-08:00https://www.jemoka.com/posts/kbhanatomy/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhanatomy_learning/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhanca_ae/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhangelman_syndrome/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhanna_s_team_checkin/2022-05-04T18:21:19-07:00https://www.jemoka.com/posts/kbhanotehuaoeu/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhanoushka_krishnan/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhanthony_badger/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhantonsson_2021/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhany_name_here/2022-04-25T11:59:35-07:00https://www.jemoka.com/posts/kbhaems/2024-01-27T23:59:53-08:00https://www.jemoka.com/posts/kbhaosneuhasoneuh/2022-09-10T21:05:25-07:00https://www.jemoka.com/posts/kbhap_phys_c_em_index/2023-04-25T22:54:49-07:00https://www.jemoka.com/posts/kbhap_phys_c_em_things_to_do/2023-05-08T10:14:24-07:00https://www.jemoka.com/posts/kbhap_phys_c_mech_index/2022-05-02T10:28:52-07:00https://www.jemoka.com/posts/kbhap_physi/2023-03-21T22:17:34-07:00https://www.jemoka.com/posts/kbhapstats/2022-04-21T16:21:45-07:00https://www.jemoka.com/posts/kbhapplying_eigenspace/2022-09-30T14:51:29-07:00https://www.jemoka.com/posts/kbhapproximate_inference/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhapproximate_value_function/2023-11-02T17:00:07-07:00https://www.jemoka.com/posts/kbhapr_paradox/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhaps/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbharbitrage_pricing/2022-10-04T13:12:46-07:00https://www.jemoka.com/posts/kbhargmax/2023-11-16T10:19:53-08:00https://www.jemoka.com/posts/kbharray/2023-10-16T16:34:18-07:00https://www.jemoka.com/posts/kbharrival_movie/2023-02-19T11:21:06-08:00https://www.jemoka.com/posts/kbharthur_m_schlesinger/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhartificial_intelligence/2023-04-29T15:36:35-07:00https://www.jemoka.com/posts/kbhasbmb/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhasbmb2023_index/2023-07-09T21:21:12+08:00https://www.jemoka.com/posts/kbhascii/2023-10-06T16:57:26-07:00https://www.jemoka.com/posts/kbhasee_prism/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhasip/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhasr/2022-06-24T00:34:53-07:00https://www.jemoka.com/posts/kbhasr_disordered_speech/2023-06-05T23:44:58-07:00https://www.jemoka.com/posts/kbhassociative/2022-08-26T20:31:40-07:00https://www.jemoka.com/posts/kbhasymtotic_analysis/2022-09-21T10:43:02-07:00https://www.jemoka.com/posts/kbhatoms_as_qubits/2022-04-20T21:40:40-07:00https://www.jemoka.com/posts/kbhafib/2023-03-26T23:05:09-07:00https://www.jemoka.com/posts/kbhauthoritarianism/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhautism/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhautonomous_odes/2024-01-17T11:19:33-08:00https://www.jemoka.com/posts/kbhaxler_a/2022-09-02T12:35:27-07:00https://www.jemoka.com/posts/kbhaxler_1_b/2022-09-17T16:42:48-07:00https://www.jemoka.com/posts/kbhaxler_1_c/2022-09-27T14:35:57-07:00https://www.jemoka.com/posts/kbhaxler_1_c_excercises/2022-10-08T16:24:13-07:00https://www.jemoka.com/posts/kbhaxler_2_a/2022-10-12T17:34:39-07:00https://www.jemoka.com/posts/kbhaxler_2_b/2022-10-16T14:41:14-07:00https://www.jemoka.com/posts/kbhaxler_2_c/2022-11-02T10:16:31-07:00https://www.jemoka.com/posts/kbhaxler_3_a/2022-11-02T23:49:12-07:00https://www.jemoka.com/posts/kbhaxler_3_b/2022-11-18T13:13:30-08:00https://www.jemoka.com/posts/kbhaxler_3_c/2022-11-28T23:56:18-08:00https://www.jemoka.com/posts/kbhaxler_3_d/2023-01-12T10:11:08-08:00https://www.jemoka.com/posts/kbhaxler_3_e/2023-02-11T18:38:34-08:00https://www.jemoka.com/posts/kbhaxler_3_f/2023-06-21T16:34:47+08:00https://www.jemoka.com/posts/kbhaxler_5_a/2023-02-16T10:06:41-08:00https://www.jemoka.com/posts/kbhaxler_5_b/2023-03-15T10:17:51-07:00https://www.jemoka.com/posts/kbhaxler_5_c/2023-03-23T09:42:23-07:00https://www.jemoka.com/posts/kbhaxler_6_a/2023-04-08T23:40:21-07:00https://www.jemoka.com/posts/kbhaxler_6_b/2023-05-01T11:30:10-07:00https://www.jemoka.com/posts/kbhaxler_7_a/2023-05-12T00:42:25-07:00https://www.jemoka.com/posts/kbhbackpacks/2023-04-10T21:36:38-07:00https://www.jemoka.com/posts/kbhbacktracing/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhbag_of_words/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhbalagopalan_2021/2022-06-25T11:50:47-07:00https://www.jemoka.com/posts/kbhbasis/2022-10-16T12:40:03-07:00https://www.jemoka.com/posts/kbhbasis_of_domain/2022-11-08T15:26:32-08:00https://www.jemoka.com/posts/kbhbatchalign/2022-07-02T00:07:55-07:00https://www.jemoka.com/posts/kbhbatchalign_paper_outline/2022-08-07T12:24:33-07:00https://www.jemoka.com/posts/kbhbayes_normalization_constant/2023-10-23T16:49:33-07:00https://www.jemoka.com/posts/kbhbayes_theorem/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhbayes_theorem_over_random_variable/2023-10-26T10:40:35-07:00https://www.jemoka.com/posts/kbhbaysian_network/2023-10-10T10:20:50-07:00https://www.jemoka.com/posts/kbhbaysian_networks_for_healthcare/2024-02-27T11:58:42-08:00https://www.jemoka.com/posts/kbhbaysian_parameter_learning/2023-11-27T14:51:05-08:00https://www.jemoka.com/posts/kbhbelief/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhilqr/2024-02-20T10:19:26-08:00https://www.jemoka.com/posts/kbhbelief_state_mdp/2023-11-14T13:34:03-08:00https://www.jemoka.com/posts/kbhbending/2022-09-25T17:26:49-07:00https://www.jemoka.com/posts/kbhbernoulli_random_variable/2023-11-15T11:23:18-08:00https://www.jemoka.com/posts/kbhbessel_s_equation/2024-01-08T11:23:11-08:00https://www.jemoka.com/posts/kbhworst_possible_state/2023-11-29T11:20:39-08:00https://www.jemoka.com/posts/kbhbetazero/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhbig_data/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhbinary_operation/2022-09-14T14:29:05-07:00https://www.jemoka.com/posts/kbhbinomial_distribution/2023-11-03T16:40:48-07:00https://www.jemoka.com/posts/kbhbioinformatics/2022-05-02T10:28:52-07:00https://www.jemoka.com/posts/kbhbitmask/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhbitwise_operations/2023-11-01T13:24:02-07:00https://www.jemoka.com/posts/kbhblack_thursday/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhblack_scholes_formula/2022-10-09T19:28:34-07:00https://www.jemoka.com/posts/kbhblb/2023-11-29T11:20:39-08:00https://www.jemoka.com/posts/kbhblind_lower_bound/2023-11-29T11:20:39-08:00https://www.jemoka.com/posts/kbhbloch_sphere/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhbluest_eye/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhenglish_bluest_eye/2022-05-09T13:20:16-07:00https://www.jemoka.com/posts/kbhsecondary_source_comparison_activity/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhbool/2023-09-29T11:23:35-07:00https://www.jemoka.com/posts/kbhboostrap/2023-12-06T15:27:17-08:00https://www.jemoka.com/posts/kbhboston_naming_test/2022-06-25T11:00:19-07:00https://www.jemoka.com/posts/kbhbouton_2018/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhbranch_and_bound/2023-10-24T14:11:17-07:00https://www.jemoka.com/posts/kbhbraun_and_clarke_thematic_analysis/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhbrian_macwinney/2022-11-09T22:10:18-08:00https://www.jemoka.com/posts/kbhbrown_v_board_of_education/2022-05-09T13:20:16-07:00https://www.jemoka.com/posts/kbhbrownian_motion/2022-09-26T00:15:10-07:00https://www.jemoka.com/posts/kbhbuffer_overflow/2023-10-11T11:20:31-07:00https://www.jemoka.com/posts/kbhbuild_a_system_not_a_monolyth/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhbpe/2024-01-12T11:21:05-08:00https://www.jemoka.com/posts/kbhc/2023-09-29T11:23:35-07:00https://www.jemoka.com/posts/kbhcaching/2023-11-29T11:20:39-08:00https://www.jemoka.com/posts/kbhcal_com/2023-03-01T13:39:00-08:00https://www.jemoka.com/posts/kbhcalculating_shear_s_modulus/2022-12-08T21:46:35-08:00https://www.jemoka.com/posts/kbhcalp/2024-01-18T10:15:38-08:00https://www.jemoka.com/posts/kbhcalpains_afib/2023-03-26T23:05:09-07:00https://www.jemoka.com/posts/kbhcanciones/2023-03-03T15:36:02-08:00https://www.jemoka.com/posts/kbhcantilever_beam/2022-10-02T13:54:53-07:00https://www.jemoka.com/posts/kbhcantilever_beams/2022-12-12T15:20:22-08:00https://www.jemoka.com/posts/kbhcapacitance/2023-03-18T21:42:56-07:00https://www.jemoka.com/posts/kbhcapacitor/2023-04-16T10:57:27-07:00https://www.jemoka.com/posts/kbhcapecitabmine/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcapm/2022-10-29T18:57:09-07:00https://www.jemoka.com/posts/kbhcasting/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhcategorical_grammar/2023-02-26T12:21:16-08:00https://www.jemoka.com/posts/kbhcategorical_grammars_index/2023-02-25T23:04:37-08:00https://www.jemoka.com/categories/https://www.jemoka.com/posts/kbhcategory/2023-02-26T17:02:13-08:00https://www.jemoka.com/posts/kbhcategory_theory/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhcauses_of_the_great_depression/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcell/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbhcell_free_biocatalysis/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhcentral_limit_theorem/2023-11-06T16:42:28-08:00https://www.jemoka.com/posts/kbhchanges_to_central_dogma/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhchar/2023-10-06T16:57:26-07:00https://www.jemoka.com/posts/kbhcharacteristic_polynomial/2024-01-26T11:24:29-08:00https://www.jemoka.com/posts/kbhcharged/2023-02-12T15:19:43-08:00https://www.jemoka.com/posts/kbhchatbot/2024-03-02T17:53:00-08:00https://www.jemoka.com/posts/kbhchi_square/2022-04-20T23:19:11-07:00https://www.jemoka.com/posts/kbhchiara_marletto/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhchild_labour/2022-07-10T11:34:23-07:00https://www.jemoka.com/posts/kbhchlasta_2021/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhchromatin/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcivil_rights/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhcivillian_conservation_corps/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhclinical_skin_disease_imaging/2024-02-26T11:42:26-08:00https://www.jemoka.com/posts/kbhclock_algorthium/2024-03-06T14:21:19-08:00https://www.jemoka.com/posts/kbhclosed/2022-08-26T20:34:46-07:00https://www.jemoka.com/posts/kbhclrs_index/2022-12-23T11:30:23-08:00https://www.jemoka.com/posts/kbhclustering/2023-08-11T11:57:33-07:00https://www.jemoka.com/posts/kbhcmu/2022-06-22T22:22:13-07:00https://www.jemoka.com/posts/kbhcns_regulation/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcode_review/2022-11-03T14:37:47-07:00https://www.jemoka.com/posts/kbhcoherence_time/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcold_sites/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhcold_war/2022-05-04T18:21:19-07:00https://www.jemoka.com/posts/kbhcold_war_in_vietnam/2022-05-04T18:21:19-07:00https://www.jemoka.com/posts/kbhcollectivist_economy/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcollege_application/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhcollege101_index/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhcollegeboard/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhcollocation_extractio/2023-03-05T23:17:11-08:00https://www.jemoka.com/posts/kbhcolumn_space/2022-11-18T13:13:30-08:00https://www.jemoka.com/posts/kbhcombination/2023-10-01T22:08:54-07:00https://www.jemoka.com/posts/kbhcommon_spark_actions/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhcommon_spark_transformations/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhcommutivity/2022-08-26T23:09:36-07:00https://www.jemoka.com/posts/kbhcomparison_function/2023-10-26T10:40:35-07:00https://www.jemoka.com/posts/kbhcomplex_exponential/2024-03-04T14:20:51-08:00https://www.jemoka.com/posts/kbhcomplex_number/2024-01-20T11:35:00-08:00https://www.jemoka.com/posts/kbhunderdetermined_ode_system/2024-01-26T11:24:29-08:00https://www.jemoka.com/posts/kbhcomplex_system/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhcomplexity_theory/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhcomposite_system/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhscene_representation/2023-03-20T14:43:38-07:00https://www.jemoka.com/posts/kbhcomputational_biology_index/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhbinary_number_system/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhcomputer_systems_index/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhconceptual_grammar/2022-08-28T11:55:03-07:00https://www.jemoka.com/posts/kbhcondef_abstract/2022-06-22T23:25:11-07:00https://www.jemoka.com/posts/kbhconditional_gaussian_models/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhconditional_plan/2023-11-16T10:38:43-08:00https://www.jemoka.com/posts/kbhconditions_in_the_great_depression/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhconfidence_interval/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhconjugation/2023-03-20T14:27:00-07:00https://www.jemoka.com/posts/kbhconnectionism/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhconstructor_theory/2022-05-10T10:01:02-07:00https://www.jemoka.com/posts/kbhcontiguous_allocation/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhcontinuity_correct/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhcontinuity_correction/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhcontinuous_distribution/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhcontroller/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhcontroller_gradient_ascent/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhconvolution/2024-03-13T11:25:11-07:00https://www.jemoka.com/posts/kbhctp/2022-06-24T23:34:46-07:00https://www.jemoka.com/posts/kbhcornucopia_of_analysis/2023-04-25T22:54:49-07:00https://www.jemoka.com/posts/kbhcorpus/2024-01-12T11:21:05-08:00https://www.jemoka.com/posts/kbhcortex/2023-03-08T14:16:45-08:00https://www.jemoka.com/posts/kbhcoulomb_s_law/2023-02-12T15:19:43-08:00https://www.jemoka.com/posts/kbhcounterfactual/2022-05-10T20:54:46-07:00https://www.jemoka.com/posts/kbhcounting/2023-10-03T10:22:01-07:00https://www.jemoka.com/posts/kbhcourses_to_take_for_qnlp/2023-02-25T23:04:37-08:00https://www.jemoka.com/posts/kbhcovariance/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhcoveather/2022-06-22T23:25:11-07:00https://www.jemoka.com/posts/kbhcoveather_abstract/2022-06-22T23:25:11-07:00https://www.jemoka.com/posts/kbhcovid_19/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhcpomdp/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhcram/2022-04-22T17:19:23-07:00https://www.jemoka.com/posts/kbhcrap_to_remember_for_ap_stats/2022-05-05T07:50:29-07:00https://www.jemoka.com/posts/kbhcrash_recovery/2024-01-27T14:33:57-08:00https://www.jemoka.com/posts/kbhcredit/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcredit_suisse/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhcritical_value/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcriticism_of_the_new_deal/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcross_entropy_loss/2024-01-27T14:33:57-08:00https://www.jemoka.com/posts/kbhcross_entropy_method/2023-10-31T10:20:58-07:00https://www.jemoka.com/posts/kbhcross_product/2022-09-13T14:47:30-07:00https://www.jemoka.com/posts/kbhcrossfinder/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhcyro_em/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhcrystels/2023-01-09T21:50:29-08:00https://www.jemoka.com/posts/kbhcs_probability_index/2023-12-06T15:27:17-08:00https://www.jemoka.com/posts/kbhcs124/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhcultural_revolution/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcurrent/2023-03-21T22:17:34-07:00https://www.jemoka.com/posts/kbhcurse_of_dimensionality/2023-08-11T11:57:33-07:00https://www.jemoka.com/posts/kbhcustomer_journey_map/2023-07-15T00:48:46+08:00https://www.jemoka.com/posts/kbhcynthia_lee/2023-04-29T15:36:35-07:00https://www.jemoka.com/posts/kbhcyrodrgn/2023-03-26T09:24:46-07:00https://www.jemoka.com/posts/kbhd_see/2023-10-12T14:52:11-07:00https://www.jemoka.com/posts/kbhdamped_heat_equation/2024-03-04T14:20:51-08:00https://www.jemoka.com/posts/kbhdarkpool/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhdata_inference/2022-04-20T21:40:40-07:00https://www.jemoka.com/posts/kbhdcgan/2023-03-05T23:17:11-08:00https://www.jemoka.com/posts/kbhde_novo_biosensors/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhde_novo_protein_design/2023-03-27T10:07:03-07:00https://www.jemoka.com/posts/kbhdeadlock/2024-02-07T14:20:33-08:00https://www.jemoka.com/posts/kbhdecision_making/2023-09-28T10:20:46-07:00https://www.jemoka.com/posts/kbhdecision_making_index/2023-12-05T10:16:47-08:00https://www.jemoka.com/posts/kbhdecision_networks/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhdeep_approach/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhdeep_learning/2024-02-13T23:59:46-08:00https://www.jemoka.com/posts/kbhdefensive_programming/2022-10-07T00:44:00-07:00https://www.jemoka.com/posts/kbhprobability_theory/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhdemand_paging/2024-03-06T14:46:11-08:00https://www.jemoka.com/posts/kbhdemand_driven_theory/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhdementiabank/2022-06-22T23:48:35-07:00https://www.jemoka.com/posts/kbhdementiabank_acoustics_brainstoming/2022-07-02T00:07:55-07:00https://www.jemoka.com/posts/kbhdementiabank_acoustics_project/2022-07-11T17:45:58-07:00https://www.jemoka.com/posts/kbhdemorgan_s_law/2023-10-06T16:57:26-07:00https://www.jemoka.com/posts/kbhdepression/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhderivational_words/2022-08-27T17:10:35-07:00https://www.jemoka.com/posts/kbhderivatives/2022-10-09T12:18:43-07:00https://www.jemoka.com/posts/kbhderivative_pricing/2022-10-07T00:44:00-07:00https://www.jemoka.com/posts/kbhderived_variable/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhdeterminants/2023-03-20T09:03:31-07:00https://www.jemoka.com/posts/kbhdespot/2024-01-30T10:40:31-08:00https://www.jemoka.com/posts/kbhdiabetes/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhdiagonal_matrix/2023-03-20T23:37:00-07:00https://www.jemoka.com/posts/kbhdialogue/2024-03-01T11:27:29-08:00https://www.jemoka.com/posts/kbhdialogue_state_architecture/2024-03-01T11:27:29-08:00https://www.jemoka.com/posts/kbhdiffeomorphism/2023-06-01T16:59:54-07:00https://www.jemoka.com/posts/kbhchallenge_1/2022-09-01T22:41:54-07:00https://www.jemoka.com/posts/kbhgenerative_vs_discriminitive_classifier/2024-01-27T14:33:57-08:00https://www.jemoka.com/posts/kbhdifference_equation/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhdiffeq_intro/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhodes_index/2024-03-09T13:37:34-08:00https://www.jemoka.com/posts/kbhdifferential_privacy/2023-11-10T16:48:11-08:00https://www.jemoka.com/posts/kbhdiffusion_map/2023-03-26T09:24:46-07:00https://www.jemoka.com/posts/kbhdiffusion_models_for_laproscopic_surgeries/2024-02-26T11:20:05-08:00https://www.jemoka.com/posts/kbhdigital_origin_for_life/2023-03-15T20:36:33-07:00https://www.jemoka.com/posts/kbhdimension/2022-10-24T18:59:59-07:00https://www.jemoka.com/posts/kbhdirect_sampling/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhdirect_sum/2022-10-16T14:35:38-07:00https://www.jemoka.com/posts/kbhdirected_evolution/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhdirected_exploration/2023-11-30T00:00:07-08:00https://www.jemoka.com/posts/kbhdiscourse_features/2022-06-23T23:41:34-07:00https://www.jemoka.com/posts/kbhdiscourse_completion_task/2022-06-25T11:00:09-07:00https://www.jemoka.com/posts/kbhdiscrete_distribution/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhdispatching/2024-02-27T09:50:26-08:00https://www.jemoka.com/posts/kbhdissociating_language_and_thought/2023-11-16T12:01:38-08:00https://www.jemoka.com/posts/kbhdistributed_algorithum/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhdistributed_morphology/2022-08-27T23:45:32-07:00https://www.jemoka.com/posts/kbhdistributive_harm/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhdistributivity/2022-09-13T22:27:22-07:00https://www.jemoka.com/posts/kbhdivide/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhdivide_by_2pi/2023-02-01T18:22:50-08:00https://www.jemoka.com/posts/kbhdlight_1/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhdocumentation_and_specification/2022-09-15T22:11:01-07:00https://www.jemoka.com/posts/kbhdopamine/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhdopamine_circuitry_in_nf1/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhdost/2024-02-27T09:50:26-08:00https://www.jemoka.com/posts/kbhdot_product/2023-04-08T23:00:43-07:00https://www.jemoka.com/posts/kbhdouble_envelope_problem/2023-11-16T10:19:53-08:00https://www.jemoka.com/posts/kbhdouble_progressive_widening/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhdouble_slit_experiment/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhdpyd/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhdriving/2022-07-25T16:59:07-07:00https://www.jemoka.com/posts/kbhdriving_practice/2022-07-30T15:46:23-07:00https://www.jemoka.com/posts/kbhdrug_resistance/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhdual_space/2023-06-21T16:34:47+08:00https://www.jemoka.com/posts/kbhdup15q/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhdynamic_programming/2022-05-04T18:21:19-07:00https://www.jemoka.com/posts/kbhdynamic_rc_circuts/2023-04-16T10:57:27-07:00https://www.jemoka.com/posts/kbhdyson_s_model_of_life/2023-03-15T20:36:33-07:00https://www.jemoka.com/posts/kbhe/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhe_pca/2024-01-29T11:26:06-08:00https://www.jemoka.com/posts/kbhe_coli/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbheb_emails/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhecon320_architecture/2022-08-25T11:12:44-07:00https://www.jemoka.com/posts/kbheconomy_of_credit/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhedit_distance_with_dp/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbheffability/2023-02-25T23:04:37-08:00https://www.jemoka.com/posts/kbheffects_of_the_new_deal/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbheigensolutions/2024-01-29T11:26:06-08:00https://www.jemoka.com/posts/kbheigenspace/2024-01-26T11:24:29-08:00https://www.jemoka.com/posts/kbhekf/2024-01-09T12:16:02-08:00https://www.jemoka.com/posts/kbhelastic_modulus/2022-10-02T13:54:53-07:00https://www.jemoka.com/posts/kbheleanor_roosevelt/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhelectric_field/2023-02-12T15:19:43-08:00https://www.jemoka.com/posts/kbhelectric_potential_energy/2023-03-18T21:42:56-07:00https://www.jemoka.com/posts/kbhelectron/2023-02-12T15:19:43-08:00https://www.jemoka.com/posts/kbhelie_adam/2023-03-08T14:19:50-08:00https://www.jemoka.com/posts/kbheliza/2024-03-01T11:27:29-08:00https://www.jemoka.com/posts/kbhella_baker/2022-05-09T13:20:16-07:00https://www.jemoka.com/posts/kbhelo_ratings/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhempty_binding_site/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbhenergy_homeostasis/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhenglish/2023-01-12T09:00:43-08:00https://www.jemoka.com/posts/kbhentangled/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhepigenetics/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhepitophs/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhequal_rights_act/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhetf/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbheuclidean_algorithm/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbheugene_wigner/2022-08-26T21:21:26-07:00https://www.jemoka.com/posts/kbheukareotyic_cell/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbheuler_bernoulli_theory/2022-09-27T13:55:17-07:00https://www.jemoka.com/posts/kbheuler_s_equation/2024-01-22T14:24:00-08:00https://www.jemoka.com/posts/kbheurope/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhevent/2023-10-03T10:22:01-07:00https://www.jemoka.com/posts/kbhpgapset/2024-01-17T00:44:47-08:00https://www.jemoka.com/posts/kbhoperators_on_complex_vector_spaces_have_an_eigenvalue/2023-03-09T10:10:56-08:00https://www.jemoka.com/posts/kbhexpectation/2023-11-10T16:48:11-08:00https://www.jemoka.com/posts/kbhexplicit_programming/2023-09-28T10:20:46-07:00https://www.jemoka.com/posts/kbhexploration_and_exploitation/2023-11-02T17:00:07-07:00https://www.jemoka.com/posts/kbhexponential_distribution/2023-10-20T12:43:40-07:00https://www.jemoka.com/posts/kbhlists_over_fields/2022-08-30T14:49:40-07:00https://www.jemoka.com/posts/kbhfs_is_a_vector_space/2022-09-14T14:30:08-07:00https://www.jemoka.com/posts/kbhfactor/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhfactored_mdps/2024-02-22T10:15:57-08:00https://www.jemoka.com/posts/kbhprocedural_vs_distributive_fairness/2023-12-13T13:36:16-08:00https://www.jemoka.com/posts/kbhfaraday_s_law/2023-04-19T11:49:49-07:00https://www.jemoka.com/posts/kbhfast_informed_bound/2023-11-16T10:19:53-08:00https://www.jemoka.com/posts/kbhfederal_housing_administration/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhfederal_project_number_one/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhfield/2023-02-26T17:02:13-08:00https://www.jemoka.com/posts/kbhfile_payload_data/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhfilesystem/2024-03-16T23:10:28-07:00https://www.jemoka.com/posts/kbhfilters/2024-02-20T10:19:26-08:00https://www.jemoka.com/posts/kbhfilter_bank/2023-10-20T16:51:21-07:00https://www.jemoka.com/posts/kbhfilterb/2023-11-01T13:24:02-07:00https://www.jemoka.com/posts/kbhfilterba/2023-11-01T13:24:02-07:00https://www.jemoka.com/posts/kbhnus_math570_finance_eigen/2022-12-26T17:01:44-08:00https://www.jemoka.com/posts/kbhfinancial_markets_intro/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhfinfty_is_a_vector_space_over_f/2022-09-14T14:30:08-07:00https://www.jemoka.com/posts/kbhfinite_difference_method/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhfinite_state_machine/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhfinite_dimensional_vector_space/2022-10-24T15:30:43-07:00https://www.jemoka.com/tags/fireside/2023-10-31T12:46:14-07:00https://www.jemoka.com/posts/kbhfireside_chats/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhfireside/2023-10-31T00:37:51-07:00https://www.jemoka.com/posts/kbhfirst_order_odes/2024-01-17T11:19:33-08:00https://www.jemoka.com/posts/kbhsystems_of_odes/2024-01-26T11:24:29-08:00https://www.jemoka.com/posts/kbhflexua/2022-11-06T21:05:19-08:00https://www.jemoka.com/posts/kbhfloyd_s_invariant_method/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhflux/2023-02-19T11:21:06-08:00https://www.jemoka.com/posts/kbhdementiabank_acoustics_project_proposal/2022-06-26T12:59:44-07:00https://www.jemoka.com/posts/kbhfork/2024-02-02T17:24:38-08:00https://www.jemoka.com/posts/kbhforward_search/2023-10-24T14:11:17-07:00https://www.jemoka.com/posts/kbhforward_forward_algorithm/2023-03-03T15:36:02-08:00https://www.jemoka.com/posts/kbhfoundational_model/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhfundational_models_of_interaction_analysis-1/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhfundational_models_of_interaction_analysis/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhlearn_more/2024-02-25T15:15:43-08:00https://www.jemoka.com/posts/kbhfourier_series/2024-03-04T14:20:51-08:00https://www.jemoka.com/posts/kbhfourier_transform/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhfdr/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhfreshwater_economists/2022-08-25T20:58:47-07:00https://www.jemoka.com/posts/kbhfunction/2023-10-26T10:40:35-07:00https://www.jemoka.com/posts/kbhfunctor/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhfundimental_investing/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhfundamental_theorem_of_arithmetic/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhfundamental_theorem_of_calculus/2024-01-09T12:16:02-08:00https://www.jemoka.com/posts/kbhfundamental_theorem_of_linear_maps/2022-11-14T22:52:42-08:00https://www.jemoka.com/posts/kbhfusion/2022-06-24T00:34:53-07:00https://www.jemoka.com/posts/kbhfv_pomcps/2024-02-22T10:15:57-08:00https://www.jemoka.com/posts/kbhg_dice/2024-02-22T10:15:57-08:00https://www.jemoka.com/posts/kbhgalactica/2023-02-26T20:31:50-08:00https://www.jemoka.com/posts/kbhgalton_board/2023-10-12T14:52:11-07:00https://www.jemoka.com/posts/kbhgarch/2022-12-03T23:08:07-08:00https://www.jemoka.com/posts/kbhgauss_law/2023-03-18T21:42:56-07:00https://www.jemoka.com/posts/kbhgaussian/2024-03-09T13:37:34-08:00https://www.jemoka.com/posts/kbhgaussian_distribution/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhgaussian_elimination/2022-11-28T23:24:09-08:00https://www.jemoka.com/posts/kbhgdb/2023-10-09T16:49:53-07:00https://www.jemoka.com/posts/kbhgeneral_inference/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhgeneral_relativity/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhgenerative_adversarial_network/2023-03-05T23:17:11-08:00https://www.jemoka.com/posts/kbhgenerative_semantics/2022-08-27T23:45:32-07:00https://www.jemoka.com/posts/kbhgenerativity/2023-02-25T23:04:37-08:00https://www.jemoka.com/posts/kbhgeneric/2023-10-23T13:17:19-07:00https://www.jemoka.com/posts/kbhgenetic_algorithum/2022-06-24T00:34:53-07:00https://www.jemoka.com/posts/kbhgenetic_policy_search/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhgenslms-1/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhgenslms/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhgeometric_brownian_motion/2022-10-14T14:08:23-07:00https://www.jemoka.com/posts/kbhgeometric_random_variable/2023-10-16T16:34:18-07:00https://www.jemoka.com/posts/kbhgeometric_multplicity/2023-03-22T14:02:07-07:00https://www.jemoka.com/posts/kbhgetting_started_with_pytorch/2022-11-29T22:42:04-08:00https://www.jemoka.com/posts/kbhgolden_gate_bridge/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhgood_restaurants_in_the_bay_area/2023-06-15T11:57:34-07:00https://www.jemoka.com/posts/kbhgoogle_nerd_snipe/2022-10-01T17:52:29-07:00https://www.jemoka.com/posts/kbhgorup/2022-08-27T08:39:29-07:00https://www.jemoka.com/posts/kbhgram_schmidt/2023-04-26T00:03:18-07:00https://www.jemoka.com/posts/kbhgrammar/2023-03-03T15:36:02-08:00https://www.jemoka.com/posts/kbhgravitational_entanglement/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhgravitational_potential_energy/2023-03-05T20:26:53-08:00https://www.jemoka.com/posts/kbhgreat_depression/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhgreatest_common_divisor/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhgreedy_programming/2022-05-04T18:21:19-07:00https://www.jemoka.com/posts/kbhgreenswing_april_checkin/2022-04-17T16:18:17-07:00https://www.jemoka.com/posts/kbhgregarious_abstract/2022-06-22T23:25:11-07:00https://www.jemoka.com/posts/kbhgrid_search/2022-06-26T10:25:12-07:00https://www.jemoka.com/posts/kbhgroup/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhgroup_theory_index/2023-09-29T11:23:35-07:00https://www.jemoka.com/posts/kbhgrouping/2023-10-01T22:08:54-07:00https://www.jemoka.com/tags/guide/2022-11-29T22:42:04-08:00https://www.jemoka.com/posts/kbhguilded_age/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhkolobov_2018/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhguo_2021/2022-06-25T11:50:47-07:00https://www.jemoka.com/posts/kbhgus/2024-03-01T11:27:29-08:00https://www.jemoka.com/posts/kbh5_fluoropyrimidine_maybe_inactivated_by_gut_microbiome/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhh4/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhhansen/2024-01-27T23:59:53-08:00https://www.jemoka.com/posts/kbhhaplmmune/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhharmonic_mean/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhharms_in_classification/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhheap/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhheap_allocator/2023-11-27T14:51:05-08:00https://www.jemoka.com/posts/kbhheat_equation/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhhello_internet/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhherber_hoover/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhheteroskedastic/2022-12-03T23:08:07-08:00https://www.jemoka.com/posts/kbhhidden_markov_model/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhhierarchical_multi_label_clsf_for_vaccine/2024-02-27T11:58:42-08:00https://www.jemoka.com/posts/kbhhigh_chemical_activity/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbhhindsight_optimization/2024-02-20T10:19:26-08:00https://www.jemoka.com/posts/kbhdecision_making_history/2023-09-28T10:20:46-07:00https://www.jemoka.com/posts/kbhhistory_readings_index/2022-05-29T19:48:27-07:00https://www.jemoka.com/posts/kbhhomestead_act/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhhomogeneity/2022-11-14T23:45:36-08:00https://www.jemoka.com/posts/kbhhomset/2023-02-25T23:04:37-08:00https://www.jemoka.com/posts/kbhhonore_s_statistic/2022-09-03T21:08:40-07:00https://www.jemoka.com/posts/kbhhoover_dam/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhhooverviles/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhhopfield_networks/2023-03-08T14:19:50-08:00https://www.jemoka.com/posts/kbhhoujun_liu/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhresearch_index/2024-01-28T23:50:19-08:00https://www.jemoka.com/posts/kbhhow_did_economists_get_it_so_wrong/2022-08-25T21:09:40-07:00https://www.jemoka.com/posts/kbhhsbi/2024-01-18T10:15:38-08:00https://www.jemoka.com/posts/kbhhsvi/2024-01-27T23:59:53-08:00https://www.jemoka.com/posts/kbhhybplan/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhhypothesis_testing/2023-11-08T16:46:31-08:00https://www.jemoka.com/posts/kbhidentity/2022-08-26T20:31:40-07:00https://www.jemoka.com/posts/kbhactivism_during_the_1970s/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhequivalence/2022-08-30T14:23:25-07:00https://www.jemoka.com/posts/kbhimmoral_v_structure/2023-10-12T14:52:11-07:00https://www.jemoka.com/posts/kbhimmunogen_design-1/2023-03-27T10:54:24-07:00https://www.jemoka.com/posts/kbhimmunogen_design/2023-03-27T10:54:24-07:00https://www.jemoka.com/posts/kbhimperialism/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhinbox/2023-02-26T20:31:50-08:00https://www.jemoka.com/posts/kbhsum_rule_of_counting/2023-10-01T22:08:54-07:00https://www.jemoka.com/posts/kbhindependently_and_identically_distributed/2023-12-06T15:27:17-08:00https://www.jemoka.com/tags/index/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhindex_index/2022-06-22T22:22:13-07:00https://www.jemoka.com/posts/kbhinductors_in_circuits/2023-04-19T11:49:49-07:00https://www.jemoka.com/posts/kbhinference/2023-10-23T16:49:33-07:00https://www.jemoka.com/posts/kbhinference_for_gaussian_models/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhinflectional_words/2022-08-27T17:10:35-07:00https://www.jemoka.com/posts/kbhinformation_retrival/2024-02-01T14:24:47-08:00https://www.jemoka.com/posts/kbhinformation_theory/2023-03-05T23:17:11-08:00https://www.jemoka.com/posts/kbhiu/2022-06-24T23:34:46-07:00https://www.jemoka.com/posts/kbhinitial_value_problems/2024-01-24T11:23:54-08:00https://www.jemoka.com/posts/kbhinjectivity/2022-11-14T23:45:36-08:00https://www.jemoka.com/posts/kbhinjectivity_implies_that_null_space_is_0/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhinner_product/2024-03-04T14:20:51-08:00https://www.jemoka.com/posts/kbhinsertion_sort/2022-12-23T11:30:23-08:00https://www.jemoka.com/posts/kbhinteger/2023-10-03T17:26:55-07:00https://www.jemoka.com/posts/kbhintegrating_factor/2022-09-02T23:06:31-07:00https://www.jemoka.com/posts/risk_apetite_preso/2022-12-04T21:39:41-08:00https://www.jemoka.com/posts/kbhinteraction_uncertainty/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhinteractive_agent/2023-11-09T12:00:18-08:00https://www.jemoka.com/posts/kbhintersession_2023/2023-01-03T11:19:33-08:00https://www.jemoka.com/posts/kbhinvariant_subspace/2023-02-11T18:38:34-08:00https://www.jemoka.com/posts/kbhinverses/2022-08-26T20:31:40-07:00https://www.jemoka.com/posts/kbhinverse_transform_sampling/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhinvertability/2023-01-12T09:12:11-08:00https://www.jemoka.com/posts/kbhinverted_index/2024-02-28T10:28:39-08:00https://www.jemoka.com/posts/kbhiob/2022-12-23T14:17:31-08:00https://www.jemoka.com/posts/kbhiptv/2023-02-26T20:31:50-08:00https://www.jemoka.com/posts/kbhirrational_number/2022-08-26T23:09:36-07:00https://www.jemoka.com/posts/kbhis_despot/2024-02-25T15:15:43-08:00https://www.jemoka.com/posts/kbhisomorphism/2023-01-12T10:11:08-08:00https://www.jemoka.com/posts/kbhistudio_meeting_notes/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhitem_response_theory/2023-10-26T10:40:35-07:00https://www.jemoka.com/posts/kbhito_intergral/2022-10-14T14:08:23-07:00https://www.jemoka.com/2024-03-16T23:10:28-07:00https://www.jemoka.com/posts/kbhjohn_corso/2023-09-20T18:14:07-07:00https://www.jemoka.com/posts/kbhjoint_probability_distribution/2023-11-03T11:22:40-07:00https://www.jemoka.com/posts/kbhjokes/2023-04-08T09:22:37-07:00https://www.jemoka.com/posts/kbhjonell_2021/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhjsj/2024-02-26T10:36:33-08:00https://www.jemoka.com/posts/kbhka_chava/2023-02-26T20:31:50-08:00https://www.jemoka.com/posts/kbhkepler_s_laws_of_planetary_motion/2022-05-02T10:28:52-07:00https://www.jemoka.com/posts/kbhkernel_smoothing/2023-11-03T11:22:40-07:00https://www.jemoka.com/posts/kbhkeynsian_politics/2022-08-25T20:58:47-07:00https://www.jemoka.com/posts/kbhkeys/2022-12-23T11:30:23-08:00https://www.jemoka.com/posts/kbhkirchoff_s_laws/2023-03-21T22:17:34-07:00https://www.jemoka.com/posts/kbhkl_divergence/2023-03-05T23:17:11-08:00https://www.jemoka.com/posts/kbhkla/2022-05-29T19:48:27-07:00https://www.jemoka.com/posts/kbhknowledge_editing/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhknowledgebase_testing/2024-02-02T11:32:31-08:00https://www.jemoka.com/posts/kbhkolmogorov_smirnov_test/2022-07-12T15:56:06-07:00https://www.jemoka.com/posts/kbhl_infty/2023-10-19T10:22:45-07:00https://www.jemoka.com/posts/kbhlagrangian_mechanics/2022-10-25T00:35:07-07:00https://www.jemoka.com/posts/kbhlaguarta_2021/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhlambek_calculus/2023-02-25T23:04:37-08:00https://www.jemoka.com/posts/kbhlanguage/2023-03-03T15:36:02-08:00https://www.jemoka.com/posts/kbhlanguage_agents/2024-01-12T11:21:05-08:00https://www.jemoka.com/posts/kbhlanguage_information_index/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhlaplae/2022-12-13T14:32:50-08:00https://www.jemoka.com/posts/kbhlaw_of_cosines/2022-09-13T14:47:30-07:00https://www.jemoka.com/posts/kbhlaw_of_large_numbers/2023-10-03T10:22:01-07:00https://www.jemoka.com/posts/kbhloo/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhlegacy_of_mccarthyism/2022-04-25T11:59:35-07:00https://www.jemoka.com/posts/kbhlemmatization/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhlength_of_basis_doesn_t_depend_on_basis/2022-10-24T15:30:43-07:00https://www.jemoka.com/posts/kbhletsdrive/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhlevel_set/2024-01-12T11:21:05-08:00https://www.jemoka.com/posts/kbhlexicalization_hypothesis/2022-08-27T23:45:32-07:00https://www.jemoka.com/posts/kbhlexicon/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhliberal_center/2022-06-07T13:38:12-07:00https://www.jemoka.com/posts/kbhlikelyhood/2023-11-16T10:19:53-08:00https://www.jemoka.com/posts/kbhlina/2022-08-26T20:31:40-07:00https://www.jemoka.com/posts/kbhlindsay_2021/2022-06-24T23:34:46-07:00https://www.jemoka.com/posts/kbhlinear_algea/2023-03-02T10:38:30-08:00https://www.jemoka.com/posts/kbhlinear_algebra_errors-1/2023-02-13T09:02:49-08:00https://www.jemoka.com/posts/kbhlinear_algebra_errors/2023-05-18T15:04:47-07:00https://www.jemoka.com/posts/kbhlinear_algebra_index/2023-06-21T16:34:47+08:00https://www.jemoka.com/posts/kbhlinear_combination/2022-10-01T17:52:29-07:00https://www.jemoka.com/posts/kbhlinear_constant_coefficient_equation/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhlinear_dependence_lemma/2022-10-11T15:00:54-07:00https://www.jemoka.com/posts/kbhlinear_functional/2023-04-26T00:03:18-07:00https://www.jemoka.com/posts/kbhlinear_gaussian_model/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhlinear_independence/2022-10-16T12:26:23-07:00https://www.jemoka.com/posts/kbhlinear_map/2022-12-04T21:39:41-08:00https://www.jemoka.com/posts/kbhlinear_non_seperable_equation/2022-09-13T13:51:27-07:00https://www.jemoka.com/posts/kbhlinear_quadratic_regulator/2024-02-20T10:19:26-08:00https://www.jemoka.com/posts/kbhode_linearilzation/2024-02-08T00:22:51-08:00https://www.jemoka.com/posts/linearity_tests_preso/2022-10-30T00:25:57-07:00https://www.jemoka.com/posts/kbhlinked_files/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhliquidnet/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhlist/2022-08-30T14:33:23-07:00https://www.jemoka.com/posts/kbhlist_of_american_presidents/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhlittle_endian/2023-10-20T12:43:40-07:00https://www.jemoka.com/posts/kbhliving/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbhllama/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbhdora/2024-02-13T15:44:04-08:00https://www.jemoka.com/posts/kbhllms_are_text_matchers/2023-09-11T17:06:48-07:00https://www.jemoka.com/posts/kbhlm_alignment/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhlocal_policy_search/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhlog_laws/2023-11-15T11:23:18-08:00https://www.jemoka.com/posts/kbhlogan_s_team_check_in/2022-05-02T10:28:52-07:00https://www.jemoka.com/posts/kbhlogistic_equations/2024-01-12T11:21:05-08:00https://www.jemoka.com/posts/kbhlogistic_regression/2024-02-28T10:28:39-08:00https://www.jemoka.com/posts/kbhloop_invariant/2022-12-23T11:30:23-08:00https://www.jemoka.com/posts/kbhlottery/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhltrdp/2024-02-13T10:20:04-08:00https://www.jemoka.com/posts/kbhluccage/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhluz_2021/2022-06-26T10:25:12-07:00https://www.jemoka.com/posts/kbhlyrics_ping/2022-08-11T16:44:41-07:00https://www.jemoka.com/posts/kbhlyrics_laws/2023-10-31T10:20:58-07:00https://www.jemoka.com/posts/kbhlyu_2018/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhmachine_learning/2023-11-27T17:00:44-08:00https://www.jemoka.com/posts/kbhmacroaverage/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhmagnetism/2023-04-19T11:17:16-07:00https://www.jemoka.com/posts/kbhmahajan_2021/2022-06-25T12:18:38-07:00https://www.jemoka.com/posts/kbhmahatma_ghandi/2022-05-09T13:20:16-07:00https://www.jemoka.com/posts/kbhmake_models_go_brrr/2023-10-23T13:17:19-07:00https://www.jemoka.com/posts/kbhmap_restriction_operator/2023-02-11T18:38:34-08:00https://www.jemoka.com/posts/kbhmapreduce/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhmarkov_chain/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhmarkov_decision_process/2023-11-02T17:00:07-07:00https://www.jemoka.com/posts/kbhmarkov_equivalence_classes/2023-10-12T14:52:11-07:00https://www.jemoka.com/posts/kbhmarkov_game/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhmarkovian_process/2022-09-07T11:02:32-07:00https://www.jemoka.com/posts/kbhmartin_luther_king/2022-05-09T13:20:16-07:00https://www.jemoka.com/posts/kbhmartinc_2021/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhmartingale_model/2022-09-07T11:25:50-07:00https://www.jemoka.com/posts/kbhmath5_how/2024-02-27T13:50:55-08:00https://www.jemoka.com/posts/kbhmatricies/2023-03-20T23:37:00-07:00https://www.jemoka.com/posts/kbhmatrix_adjectives/2023-05-04T16:10:16-07:00https://www.jemoka.com/posts/kbhmatrix_exponentiation/2024-01-29T11:26:06-08:00https://www.jemoka.com/posts/kbhmatrix_multiplication/2023-01-11T00:22:15-08:00https://www.jemoka.com/posts/kbhmaximal_interval/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhmaximum_a_posteriori_estimate/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhmaximum_likelihood_parameter_learning/2023-11-27T14:51:05-08:00https://www.jemoka.com/posts/kbhmaxq/2024-02-13T10:20:04-08:00https://www.jemoka.com/posts/kbhmbp/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhmcvi/2024-01-30T20:06:03-08:00https://www.jemoka.com/posts/kbhmeal_replacement/2023-02-26T20:31:50-08:00https://www.jemoka.com/posts/kbhmean_average_precision/2024-02-28T10:28:39-08:00https://www.jemoka.com/posts/kbhmedblindtuner/2024-02-27T10:06:32-08:00https://www.jemoka.com/posts/kbhmedical_dialogue_generation/2024-02-27T10:22:48-08:00https://www.jemoka.com/posts/kbhmedical_knowledge_extraction/2024-02-27T10:06:32-08:00https://www.jemoka.com/posts/kbhmeghanani_2021/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhmel_scale/2023-10-20T16:51:21-07:00https://www.jemoka.com/posts/kbhmemory/2023-10-11T11:20:31-07:00https://www.jemoka.com/posts/kbhmemory_allocation/2023-11-27T14:51:05-08:00https://www.jemoka.com/posts/kbhmencius_philosophy/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhmesoscopic_region/2024-02-07T11:24:17-08:00https://www.jemoka.com/posts/kbhmetabolism/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbhmethods/2023-11-29T11:20:39-08:00https://www.jemoka.com/posts/kbhmfa_disfluency_measurement/2022-07-12T15:56:50-07:00https://www.jemoka.com/posts/kbhmfa_performance_statistics/2022-08-17T22:38:03-07:00https://www.jemoka.com/posts/kbhmia_tavares/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhmicah_brown/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhmilton_freedman/2022-08-25T20:58:47-07:00https://www.jemoka.com/posts/kbhmmse/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhminimum_edit_distance/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhminimum_spanning_tree/2022-05-05T10:41:32-07:00https://www.jemoka.com/posts/kbhminimum_user_base_requirements_for_coveather/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhminimum_wage/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhmisc_financial_market_questions/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhml_drug_discovery/2023-03-27T10:07:03-07:00https://www.jemoka.com/posts/kbhmlib/2023-08-11T11:57:33-07:00https://www.jemoka.com/posts/kbhmlk_and_malcom_x_reading/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhmodal/2023-09-19T19:47:37-07:00https://www.jemoka.com/posts/kbhmodalization/2022-06-23T23:41:34-07:00https://www.jemoka.com/posts/kbhmodel_bae/2023-11-06T16:42:28-08:00https://www.jemoka.com/posts/kbhmodel_evaluation/2024-01-20T11:35:00-08:00https://www.jemoka.com/posts/kbhmodel_based_reinforcement_learning/2023-11-10T11:46:12-08:00https://www.jemoka.com/posts/kbhmodel_free_reinforcement_learning/2024-02-08T10:17:20-08:00https://www.jemoka.com/posts/kbhmodeling/2023-11-14T10:59:27-08:00https://www.jemoka.com/posts/kbhmodern_os/2024-03-09T13:37:34-08:00https://www.jemoka.com/posts/kbhmodular_arithmetic/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhmolecular_drug_resistance/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhmomdp/2024-02-25T15:15:43-08:00https://www.jemoka.com/posts/kbhmonetarist_theory/2022-08-25T20:58:47-07:00https://www.jemoka.com/posts/kbhmonitor_pattern/2024-02-14T14:21:39-08:00https://www.jemoka.com/posts/kbhmonte_carlo_tree_search/2024-01-30T10:40:31-08:00https://www.jemoka.com/posts/kbhmontomery_bus_boycott/2022-05-09T13:20:16-07:00https://www.jemoka.com/posts/kbhmorpheme/2024-01-12T11:21:05-08:00https://www.jemoka.com/posts/kbhmorphism/2023-02-25T23:04:37-08:00https://www.jemoka.com/posts/kbhmorphological_parsing/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhmulti_lstm_for_clinical_report_generation/2024-02-27T11:58:42-08:00https://www.jemoka.com/posts/kbhmultiagent_reasoning/2023-11-30T10:20:10-08:00https://www.jemoka.com/posts/kbhmultimodal_ai_for_real_world_signals/2024-02-27T09:50:26-08:00https://www.jemoka.com/posts/kbhmultinomial_coefficient/2023-10-20T16:51:21-07:00https://www.jemoka.com/posts/kbhmultiple_instance_learning/2024-02-26T12:02:19-08:00https://www.jemoka.com/posts/kbhmultiplicative_identity/2022-08-26T15:07:17-07:00https://www.jemoka.com/posts/kbhmultiplying/2022-08-27T10:50:07-07:00https://www.jemoka.com/posts/kbhmultiprocessing/2024-02-16T14:21:52-08:00https://www.jemoka.com/posts/kbhmultithreading/2024-02-27T09:50:26-08:00https://www.jemoka.com/posts/kbhmutual_information/2023-03-05T23:17:11-08:00https://www.jemoka.com/posts/kbhmutually_exclusive/2023-10-06T16:57:26-07:00https://www.jemoka.com/posts/kbhmy_day/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhn_grams/2024-02-28T10:28:39-08:00https://www.jemoka.com/posts/kbhnacc/2022-06-16T20:06:51-07:00https://www.jemoka.com/posts/kbhnaive_bayes/2023-11-27T17:00:44-08:00https://www.jemoka.com/posts/kbhnational_banking_act/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhnatural_numbers/2022-08-26T20:36:54-07:00https://www.jemoka.com/posts/kbhnatural_semantic_metalanguage/2022-08-27T23:45:32-07:00https://www.jemoka.com/posts/kbhnatural_transformations/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhnbbo/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhnebula/2024-01-09T12:16:02-08:00https://www.jemoka.com/posts/kbhneedfinding/2023-04-29T15:36:35-07:00https://www.jemoka.com/posts/kbhnegative_binomial_distribution/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhneoclassical_economics/2022-08-25T20:58:47-07:00https://www.jemoka.com/posts/kbhner_tagging/2024-02-14T11:23:36-08:00https://www.jemoka.com/posts/kbhneural_networks/2024-02-13T23:59:46-08:00https://www.jemoka.com/posts/kbhneuroscience_and_ai/2023-03-20T14:43:38-07:00https://www.jemoka.com/posts/kbhneutral_stability/2022-11-26T00:54:35-08:00https://www.jemoka.com/posts/kbhnew_american_south/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhnew_deal/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhnew_right/2022-06-07T13:38:12-07:00https://www.jemoka.com/posts/kbhnewton_s_first_law_of_motion/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhnewton_s_law_of_cooling/2024-01-12T11:21:05-08:00https://www.jemoka.com/posts/kbhnewton_s_method/2024-02-14T11:23:36-08:00https://www.jemoka.com/posts/kbhnlp/2023-12-04T16:51:01-08:00https://www.jemoka.com/posts/kbhnlp_semantics_timeline/2024-03-16T23:10:28-07:00https://www.jemoka.com/posts/kbhchomsky/2022-08-23T11:06:27-07:00https://www.jemoka.com/posts/kbhnon_homogeneous_linear_differential_equation/2024-02-07T11:24:17-08:00https://www.jemoka.com/posts/kbhnon_intersecting_graphs/2022-10-14T14:08:23-07:00https://www.jemoka.com/posts/kbhnon_linear_ode/2024-02-14T11:23:36-08:00https://www.jemoka.com/posts/kbhnon_linear_systems/2022-11-04T15:10:26-07:00https://www.jemoka.com/posts/kbhnon_parametric_learning/2023-10-10T10:20:50-07:00https://www.jemoka.com/posts/kbhnon_pathological_matricies/2023-08-11T11:57:33-07:00https://www.jemoka.com/posts/kbhnonsingular_matricies/2022-09-02T21:42:58-07:00https://www.jemoka.com/posts/kbhnonviolence_movement/2022-05-09T13:20:16-07:00https://www.jemoka.com/posts/kbhnorm/2023-04-08T23:00:43-07:00https://www.jemoka.com/posts/kbhnormal_distribution/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhnormal_random_variable/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhnorman_an_epic_tale_in_n_parts/2022-05-10T21:05:54-07:00https://www.jemoka.com/posts/kbhnsm_proposal/2022-08-28T23:06:33-07:00https://www.jemoka.com/tags/ntj/2022-09-12T12:54:42-07:00https://www.jemoka.com/posts/kbhnueva_courses_index/2023-02-12T15:19:43-08:00https://www.jemoka.com/posts/kbhnull_space/2022-11-18T13:13:30-08:00https://www.jemoka.com/posts/kbhnumber/2022-08-26T23:09:36-07:00https://www.jemoka.com/posts/kbhnumerical_approximation_schemes/2024-02-16T11:20:46-08:00https://www.jemoka.com/posts/kbhnumerical_cantileaver_simulations-1/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhnumerical_cantileaver_simulations/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhnus_econ320_capm_problem_set/2022-10-29T16:57:58-07:00https://www.jemoka.com/posts/kbhnus_econ320_currency_arbitrage/2022-12-08T21:46:35-08:00https://www.jemoka.com/posts/kbhnus_econ320_risk_appetite/2022-12-04T21:39:41-08:00https://www.jemoka.com/posts/kbhnus_econ320_linearity_tests/2022-11-02T10:16:31-07:00https://www.jemoka.com/posts/kbhnus_econ320_stochastic_integration/2022-09-26T00:20:34-07:00https://www.jemoka.com/posts/kbhnus_econ320_volatility_hedging/2022-10-16T12:26:23-07:00https://www.jemoka.com/posts/kbhnus_eng401_gift_1/2022-12-11T23:08:07-08:00https://www.jemoka.com/posts/kbhnus_eng401_gift_0/2022-12-11T23:06:39-08:00https://www.jemoka.com/posts/kbhnus_eng401_gift_5/2022-12-11T23:05:58-08:00https://www.jemoka.com/posts/kbhnus_eng401_film_analysis/2023-03-14T13:43:38-07:00https://www.jemoka.com/posts/kbhnus_eng401_film_analysis_outline/2023-03-13T19:10:08-07:00https://www.jemoka.com/posts/kbhnus_eng401_gift_2/2022-12-11T23:05:58-08:00https://www.jemoka.com/posts/kbhnus_eng401_gift_utility/2022-12-11T23:10:58-08:00https://www.jemoka.com/posts/kbhi_tituba_essay_planning/2022-09-19T12:28:12-07:00https://www.jemoka.com/posts/kbhnus_eng401_gift_6/2022-12-11T23:05:58-08:00https://www.jemoka.com/posts/kbhnus_eng401_gift_3/2022-12-11T23:05:58-08:00https://www.jemoka.com/posts/kbhnus_eng401_racialization_outline/2023-04-08T23:00:43-07:00https://www.jemoka.com/posts/kbhnus_eng401_gift_4/2022-12-11T23:05:58-08:00https://www.jemoka.com/posts/kbhnus_math530_1_c_proof_preso/2022-10-07T00:44:00-07:00https://www.jemoka.com/posts/kbhnus_math530_2_c_problem_17/2022-11-04T14:01:46-07:00https://www.jemoka.com/posts/kbhnus_math530_3_b_problem_20-1/2022-11-18T13:13:30-08:00https://www.jemoka.com/posts/kbhnus_math530_3_b_problem_20/2022-11-29T22:42:04-08:00https://www.jemoka.com/posts/kbhnus_math530_5_a_and_discussion/2023-02-15T08:49:38-08:00https://www.jemoka.com/posts/kbhnus_math530_5_a_problem_14/2023-02-27T10:18:22-08:00https://www.jemoka.com/posts/kbhnus_math530_5_a_problem_35_36/2023-02-27T22:55:40-08:00https://www.jemoka.com/posts/kbhnus_math530_5_c_problem_7/2023-04-04T11:34:33-07:00https://www.jemoka.com/posts/kbhnus_math530_changing_bases/2023-03-16T13:58:47-07:00https://www.jemoka.com/posts/kbhnus_math530_geometric_intepretations/2022-09-07T00:15:39-07:00https://www.jemoka.com/posts/kbhnus_math530_geometric_multiplicity/2023-04-08T09:22:37-07:00https://www.jemoka.com/posts/kbhnus_math530_homework_index/2023-05-04T16:10:16-07:00https://www.jemoka.com/posts/kbhnus_math530_linear_vehicles/2022-09-09T23:36:04-07:00https://www.jemoka.com/posts/kbhnus_math530_matrix_adjectives/2023-05-10T21:41:46-07:00https://www.jemoka.com/posts/kbhnus_math530_plane_and_1_b/2022-09-15T22:11:01-07:00https://www.jemoka.com/posts/kbhnus_math530_similar_to_diagonal/2023-05-12T00:42:25-07:00https://www.jemoka.com/posts/kbhnus_math530_solving_systems/2022-09-07T00:15:39-07:00https://www.jemoka.com/posts/kbhnus_math530_some_6_a_problems/2023-04-16T10:57:27-07:00https://www.jemoka.com/posts/kbhnus_math530_some_matrix_manipulation/2022-08-30T21:21:17-07:00https://www.jemoka.com/posts/kbhnus_math570_circuts/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhnus_math570_finance/2022-12-17T22:39:27-08:00https://www.jemoka.com/posts/kbhnus_math570_problem_set_1/2022-09-17T16:42:48-07:00https://www.jemoka.com/posts/kbhnus_math570_problem_set_2/2022-10-09T19:23:18-07:00https://www.jemoka.com/posts/kbhnus_math570_research_question_1/2022-09-17T16:42:48-07:00https://www.jemoka.com/posts/kbhnus_math570_supply_demand/2022-11-27T22:32:43-08:00https://www.jemoka.com/posts/kbhnus_mus150_critical_listening/2023-01-09T21:50:29-08:00https://www.jemoka.com/posts/kbhnus_span502_plastico_biodegrable/2023-03-14T10:16:14-07:00https://www.jemoka.com/posts/kbhnus_span502_tarea_2/2022-08-30T21:21:17-07:00https://www.jemoka.com/posts/kbhnus_span502_tarea_4/2022-09-13T22:27:22-07:00https://www.jemoka.com/posts/kbhnus_span502_vocab/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhobjects/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhobserve_act_cycle/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhof_our_spiritual_strivings/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhohm_s_law/2023-04-25T22:54:49-07:00https://www.jemoka.com/posts/kbhproductivity/2024-02-05T11:23:43-08:00https://www.jemoka.com/posts/kbhone_shot_deformation/2022-12-12T15:20:22-08:00https://www.jemoka.com/posts/kbhonline_m/2023-11-02T17:00:07-07:00https://www.jemoka.com/posts/kbhonline_planning/2024-01-09T12:16:02-08:00https://www.jemoka.com/posts/kbhonline_pomdp_methods/2024-01-26T11:24:29-08:00https://www.jemoka.com/posts/kbhopen_voice_brain_model/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhopensmile/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhos_index/2024-03-13T13:57:37-07:00https://www.jemoka.com/posts/kbhoperation/2022-08-26T15:07:17-07:00https://www.jemoka.com/posts/kbhoperation_linebacker/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhoperator/2023-03-03T21:04:56-08:00https://www.jemoka.com/posts/kbhopsins/2023-03-08T14:16:45-08:00https://www.jemoka.com/posts/kbhoptimal_exploration/2023-11-01T16:45:09-07:00https://www.jemoka.com/posts/kbhoptimal_stopping_problem/2023-11-02T10:20:22-07:00https://www.jemoka.com/posts/kbhoptimization/2023-12-13T13:36:16-08:00https://www.jemoka.com/posts/kbhoptimizing_spark/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhoptions/2022-10-09T19:23:18-07:00https://www.jemoka.com/posts/kbhoption/2024-02-13T10:20:04-08:00https://www.jemoka.com/posts/kbhoptogenetics/2023-03-08T14:16:45-08:00https://www.jemoka.com/posts/kbhoral_lexical_retrival/2022-06-23T23:41:34-07:00https://www.jemoka.com/posts/kbhordinary_differential_equations/2024-01-27T20:26:16-08:00https://www.jemoka.com/posts/kbhrise_of_american_conservatism/2022-05-29T19:48:27-07:00https://www.jemoka.com/posts/kbhorthogonal/2023-04-08T23:00:43-07:00https://www.jemoka.com/posts/kbhorthonormal/2023-04-26T00:03:18-07:00https://www.jemoka.com/posts/kbhorthonormal_basis/2023-04-26T00:03:18-07:00https://www.jemoka.com/posts/kbhotc_markets/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhoutcome_uncertainty/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhoverfitting/2023-08-11T11:57:33-07:00https://www.jemoka.com/posts/kbhpolynomial_operator/2023-03-03T21:04:56-08:00https://www.jemoka.com/posts/kbhpace/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhpacific_railroad_act/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhpagin_q/2024-03-02T17:53:00-08:00https://www.jemoka.com/posts/kbhpapyrus/2023-03-03T22:44:51-08:00https://www.jemoka.com/posts/kbhparameter/2023-11-14T10:59:27-08:00https://www.jemoka.com/posts/kbhparameter_learning/2023-11-15T11:23:18-08:00https://www.jemoka.com/posts/kbhparkingson_s_classification_with_eeg/2024-02-27T10:22:48-08:00https://www.jemoka.com/posts/kbhparry/2024-03-01T11:27:29-08:00https://www.jemoka.com/posts/kbhpartial_differential_equations/2024-01-08T11:23:11-08:00https://www.jemoka.com/posts/kbhpartially_observable_markov_decision_process/2024-01-09T12:16:02-08:00https://www.jemoka.com/posts/kbhpartially_observable_markov_game/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhparvin_2020/2022-06-25T11:50:47-07:00https://www.jemoka.com/posts/kbhpatient_risk_prediction/2024-02-26T10:36:33-08:00https://www.jemoka.com/posts/kbhpcp_april_checkin/2022-04-17T16:18:17-07:00https://www.jemoka.com/posts/kbhpeft/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhpegasus/2024-02-26T13:48:45-08:00https://www.jemoka.com/posts/kbhpermits_model/2024-02-27T09:50:26-08:00https://www.jemoka.com/posts/kbhpermittivity_of_free_space/2023-02-12T15:19:43-08:00https://www.jemoka.com/posts/kbhpermutation/2023-10-01T22:08:54-07:00https://www.jemoka.com/posts/kbhperplexity/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhpet/2023-02-27T10:18:04-08:00https://www.jemoka.com/posts/kbhpetri_dish/2024-01-12T11:21:05-08:00https://www.jemoka.com/posts/kbhpga/2024-02-02T11:32:31-08:00https://www.jemoka.com/posts/kbhphase_line/2024-01-17T00:44:47-08:00https://www.jemoka.com/posts/kbhphysical_qubits/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhphysics/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhpineau_2006/2024-01-17T00:44:47-08:00https://www.jemoka.com/posts/kbhpipe/2024-02-02T14:21:08-08:00https://www.jemoka.com/posts/kbhpitch_a_project/2022-09-01T22:41:54-07:00https://www.jemoka.com/posts/kbhpsc/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhpkm/2022-11-18T13:13:30-08:00https://www.jemoka.com/posts/kbhplanning/2023-09-28T10:20:46-07:00https://www.jemoka.com/posts/kbhpoint_selection/2023-11-29T11:20:39-08:00https://www.jemoka.com/posts/kbhpoint_based_value_iteration/2024-01-27T20:26:16-08:00https://www.jemoka.com/posts/kbhpointer/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhprobability_of_k_in_x_time/2023-11-14T10:59:27-08:00https://www.jemoka.com/posts/kbhpolicy/2023-10-19T10:22:45-07:00https://www.jemoka.com/posts/kbhpolicy_evaluation/2023-11-03T16:40:48-07:00https://www.jemoka.com/posts/kbhpolicy_gradient/2023-11-02T17:00:07-07:00https://www.jemoka.com/posts/kbhpolicy_iteration/2023-11-01T16:45:09-07:00https://www.jemoka.com/posts/kbhpolicy_optimization/2023-10-31T12:46:14-07:00https://www.jemoka.com/posts/kbhpolio/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhpolynomial/2023-03-03T21:04:56-08:00https://www.jemoka.com/posts/kbhpomcp/2024-01-27T23:59:53-08:00https://www.jemoka.com/posts/kbhpomcpow/2024-01-30T10:40:31-08:00https://www.jemoka.com/posts/kbhpomdp_approximation/2024-01-18T10:15:38-08:00https://www.jemoka.com/posts/kbhpomdp_lite/2024-02-25T15:46:27-08:00https://www.jemoka.com/posts/kbhpomdps_index/2024-02-22T10:15:57-08:00https://www.jemoka.com/posts/kbhpos_tagging/2024-02-12T11:25:19-08:00https://www.jemoka.com/posts/2024-03-16T23:10:28-07:00https://www.jemoka.com/posts/kbhpower_math/2022-08-27T09:54:42-07:00https://www.jemoka.com/posts/kbhpower_series_o/2024-02-12T11:25:19-08:00https://www.jemoka.com/posts/kbhpower_series/2024-02-12T11:25:19-08:00https://www.jemoka.com/posts/kbhpower_utility/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhpreemption/2024-02-27T09:50:26-08:00https://www.jemoka.com/posts/kbhpretraining_data/2024-02-01T14:24:47-08:00https://www.jemoka.com/posts/kbhpretraining_long_transformers/2023-11-02T11:50:25-07:00https://www.jemoka.com/posts/kbhprice/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhprime/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhprime_factorization/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhprinciple_of_induction/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhc_basic_operations/2023-09-29T11:23:35-07:00https://www.jemoka.com/posts/kbhprivacy/2024-03-13T11:25:11-07:00https://www.jemoka.com/posts/kbhprobability/2023-10-06T16:57:26-07:00https://www.jemoka.com/posts/kbhprobability_distributions/2023-10-23T16:49:33-07:00https://www.jemoka.com/posts/kbhprobability_mass_function/2023-10-09T16:49:53-07:00https://www.jemoka.com/posts/kbhprobablistic_model/2023-10-23T16:49:33-07:00https://www.jemoka.com/posts/kbhproblem_with_gravity/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhprocess_control_block/2024-02-21T14:22:23-08:00https://www.jemoka.com/posts/kbhproduct_of_linear_maps/2022-11-08T15:26:32-08:00https://www.jemoka.com/posts/kbhproduct_of_vector_spaces/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhproduct_summation_map/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhproduction_index/2023-10-23T10:30:41-07:00https://www.jemoka.com/posts/kbhproductivity_starter_pack/2023-03-12T10:30:03-07:00https://www.jemoka.com/posts/kbhproducts_and_quotients_the_intuition/2023-01-23T09:43:58-08:00https://www.jemoka.com/posts/kbhprof_xin_liu/2022-06-13T22:05:51-07:00https://www.jemoka.com/posts/kbhloop_of_thoughts/2024-03-14T14:31:16-07:00https://www.jemoka.com/posts/kbhproject80/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhproject80_abstract/2022-06-22T23:25:11-07:00https://www.jemoka.com/posts/kbhprojects/2024-01-30T16:52:00-08:00https://www.jemoka.com/posts/kbhprokateotic_cell/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbhproof/2022-08-26T20:31:40-07:00https://www.jemoka.com/posts/kbhproof_by_induction/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhproof_design_patterns-1/2022-09-27T22:11:29-07:00https://www.jemoka.com/posts/kbhproof_design_patterns/2023-04-08T23:00:43-07:00https://www.jemoka.com/posts/kbhproof_of_work/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhpropaganda/2022-04-17T17:42:52-07:00https://www.jemoka.com/posts/kbhprotease/2023-03-27T10:07:03-07:00https://www.jemoka.com/posts/kbhprotected_group/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhprotons/2023-02-12T15:19:43-08:00https://www.jemoka.com/posts/kbhprototyping/2022-09-07T13:20:23-07:00https://www.jemoka.com/posts/kbhpsc_big_data_workshop_july_2023/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhsu_math53_pset_1/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhpset_2/2024-01-19T10:11:39-08:00https://www.jemoka.com/posts/kbhpset_3/2024-01-26T11:24:29-08:00https://www.jemoka.com/posts/kbhpset_4/2024-02-02T17:24:38-08:00https://www.jemoka.com/posts/kbhpset_5/2024-02-25T10:45:33-08:00https://www.jemoka.com/posts/kbhpset_6/2024-02-25T10:45:33-08:00https://www.jemoka.com/posts/kbhpset_7/2024-02-25T10:45:33-08:00https://www.jemoka.com/posts/kbhpset_8/2024-03-02T17:53:00-08:00https://www.jemoka.com/posts/kbhpset_9/2024-03-09T13:37:34-08:00https://www.jemoka.com/posts/kbhpsycoacoustics/2023-01-03T11:19:33-08:00https://www.jemoka.com/posts/kbhptsd/2023-05-10T21:41:46-07:00https://www.jemoka.com/posts/kbhpwr_notes/2024-01-17T12:24:19-08:00https://www.jemoka.com/posts/kbhpwr1_rba_planning/2024-03-04T14:20:51-08:00https://www.jemoka.com/posts/kbhpwr1_rhetorical_analysis_planning/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhpwr1_texts_in_conversation/2024-02-09T11:20:17-08:00https://www.jemoka.com/posts/kbhqmdp/2024-02-20T10:19:26-08:00https://www.jemoka.com/posts/kbhquality_of_service_harm/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhcorrelation/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhquantum_group_project/2023-01-29T10:52:51-08:00https://www.jemoka.com/posts/kbhquantum_information_theory/2023-03-05T23:17:11-08:00https://www.jemoka.com/posts/kbhquantum_supremecy/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhquantum_theory/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhquantumnlp/2023-02-25T23:04:37-08:00https://www.jemoka.com/posts/kbhqubits/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhquotient_group/2022-12-13T14:48:13-08:00https://www.jemoka.com/posts/kbhquotient_map/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhquotient_operator/2023-02-16T10:06:41-08:00https://www.jemoka.com/posts/kbhquotient_space/2023-01-29T10:52:51-08:00https://www.jemoka.com/posts/kbhr_n_abstract/2022-11-09T22:10:18-08:00https://www.jemoka.com/posts/kbhr_n_meeting_with_angi/2022-05-02T10:28:52-07:00https://www.jemoka.com/posts/kbhraising_e_to_a_matrix/2022-10-09T12:18:43-07:00https://www.jemoka.com/posts/kbhrandom/2022-11-06T21:05:19-08:00https://www.jemoka.com/posts/kbhrandom_variables/2024-03-09T13:37:34-08:00https://www.jemoka.com/posts/kbhrandom_walk/2022-09-07T11:14:44-07:00https://www.jemoka.com/posts/kbhrandom_wol/2022-09-07T11:02:32-07:00https://www.jemoka.com/posts/kbhrandomized_algorithum/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhrandomized_pbvi/2024-01-27T20:26:16-08:00https://www.jemoka.com/posts/kbhperseus/2024-01-27T20:26:16-08:00https://www.jemoka.com/posts/kbhrange/2022-11-18T13:13:30-08:00https://www.jemoka.com/posts/kbhranked_information_retrieval/2024-02-28T10:28:39-08:00https://www.jemoka.com/posts/kbhrational_number/2022-08-26T14:58:53-07:00https://www.jemoka.com/posts/kbhrational_preference/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhreal_number/2022-08-26T14:58:53-07:00https://www.jemoka.com/posts/kbhreceeding_horizon/2024-01-09T12:16:02-08:00https://www.jemoka.com/posts/kbhrecommender_system/2024-03-02T17:53:00-08:00https://www.jemoka.com/posts/kbhreduce/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhreductive_paraphrase/2022-08-28T11:55:03-07:00https://www.jemoka.com/posts/kbhresearch_at_nueva_notes_06_09_2022/2022-06-12T22:29:49-07:00https://www.jemoka.com/posts/kbhregex/2024-02-28T10:28:39-08:00https://www.jemoka.com/posts/kbhregulating_zinc_uptake/2023-03-26T23:05:09-07:00https://www.jemoka.com/posts/kbhreinforcement_learning/2023-11-02T17:00:07-07:00https://www.jemoka.com/posts/kbhrejection_sampling/2023-11-14T10:59:27-08:00https://www.jemoka.com/posts/kbhrelative_probability/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhrelaxation_algorithums/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhreplication/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbhreplier_abstract/2022-06-22T23:25:11-07:00https://www.jemoka.com/posts/kbhrepresentation_learning/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhrepresenting_large_computation/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhrequirements_analysis/2023-07-15T00:48:46+08:00https://www.jemoka.com/posts/kbhresearch/2023-09-20T18:14:07-07:00https://www.jemoka.com/posts/kbhresearch_tips/2024-02-22T10:15:57-08:00https://www.jemoka.com/posts/kbhrdd/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhresistors/2023-03-21T22:17:34-07:00https://www.jemoka.com/posts/kbhreticle/2022-05-27T14:08:35-07:00https://www.jemoka.com/posts/kbhrfdiffusion/2023-03-27T10:07:03-07:00https://www.jemoka.com/posts/kbhrho_pomdps/2024-02-26T10:36:33-08:00https://www.jemoka.com/posts/kbhrichard_nixon/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhrichard_nixon_s_foreign_policy/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhrichard_nixon_s_treatment_against_the_vietnam_war/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhrick_wallace/2022-05-27T14:08:35-07:00https://www.jemoka.com/posts/kbhring/2023-03-09T09:56:26-08:00https://www.jemoka.com/posts/kbhexpected_utility_of_wealth/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhrobotics_assisted_directed_evolution/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhrollout_with_lookahead/2023-11-29T11:20:39-08:00https://www.jemoka.com/posts/kbhronald_raegan/2022-06-07T13:38:12-07:00https://www.jemoka.com/posts/kbhrosa_parks/2022-05-09T13:20:16-07:00https://www.jemoka.com/posts/kbhroseta/2023-03-27T10:07:03-07:00https://www.jemoka.com/posts/kbhrosetta/2023-03-27T10:07:03-07:00https://www.jemoka.com/posts/kbhrosettafold2/2023-03-27T10:07:03-07:00https://www.jemoka.com/posts/kbhrossing_1990/2022-09-20T23:31:24-07:00https://www.jemoka.com/posts/kbhrotational_energy/2022-04-20T22:23:58-07:00https://www.jemoka.com/posts/kbhrural_electrification_administration/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhrussel_howard/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhsadeghian_2021/2022-09-12T12:54:42-07:00https://www.jemoka.com/posts/kbhsaic_speech_anonomyzation/2024-02-27T14:18:25-08:00https://www.jemoka.com/posts/kbhsalus_april_checkin/2022-05-02T10:28:52-07:00https://www.jemoka.com/posts/kbhsample_space/2023-10-03T10:22:01-07:00https://www.jemoka.com/posts/kbhsars_cov2/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhsars_cov2_structural_analysis/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhsarsa_lambda/2024-02-26T10:36:33-08:00https://www.jemoka.com/posts/kbhsarsop/2024-01-27T23:59:53-08:00https://www.jemoka.com/posts/kbhscalander_notes-1/2022-11-02T10:16:31-07:00https://www.jemoka.com/posts/kbhscalander_notes-2/2022-11-02T10:16:31-07:00https://www.jemoka.com/posts/kbhscalander_notes/2022-11-02T10:16:31-07:00https://www.jemoka.com/posts/kbhscalar_multiplication/2022-09-13T22:27:22-07:00https://www.jemoka.com/posts/kbhscheduling/2024-02-25T10:45:33-08:00https://www.jemoka.com/search/2023-04-02T23:20:11-07:000.1https://www.jemoka.com/posts/kbhsecond_moment_of_area/2022-10-02T13:54:53-07:00https://www.jemoka.com/posts/kbhsecond_order_differential_equations/2022-10-09T12:23:02-07:00https://www.jemoka.com/posts/kbhsecond_order_linear_differential_equation/2024-01-27T14:33:57-08:00https://www.jemoka.com/posts/kbhselective_service_system/2022-05-04T18:21:19-07:00https://www.jemoka.com/posts/kbhsemantic_accountability/2023-02-25T23:04:37-08:00https://www.jemoka.com/posts/kbhsemantic_health_risk_prediction/2024-02-27T13:50:55-08:00https://www.jemoka.com/posts/kbhsemantic_primes/2022-08-28T11:55:03-07:00https://www.jemoka.com/posts/kbhsemantic_verbal_fluency/2022-06-23T23:41:34-07:00https://www.jemoka.com/posts/kbhsemiconductor/2022-05-29T19:48:27-07:00https://www.jemoka.com/posts/kbhsense/2024-02-12T11:25:19-08:00https://www.jemoka.com/posts/kbhsentence_segmentation/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhseperable_diffequ/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhserver_clients/2023-02-26T20:31:50-08:00https://www.jemoka.com/posts/kbhset/2022-08-27T10:50:07-07:00https://www.jemoka.com/posts/kbhsets/2022-08-27T10:50:07-07:00https://www.jemoka.com/posts/kbhshah_2021/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhshort_selling/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhsigmoid/2023-11-27T17:00:44-08:00https://www.jemoka.com/posts/kbhsimple_differential_equations/2022-09-06T13:01:21-07:00https://www.jemoka.com/posts/kbhsimple_game/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhsingle_party_control/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhsingular_value_decomposition/2023-05-14T00:00:30-07:00https://www.jemoka.com/posts/kbhsir_model/2024-02-19T14:29:19-08:00https://www.jemoka.com/posts/kbhslopes/2022-04-20T21:40:40-07:00https://www.jemoka.com/posts/kbhsmith/2024-01-17T00:44:47-08:00https://www.jemoka.com/posts/kbhsmooth_function/2022-10-24T23:46:49-07:00https://www.jemoka.com/posts/kbhsocial_network/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhsocial_security_administration/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhsoftware_design_and_architecture_patterns/2023-07-09T21:20:36+08:00https://www.jemoka.com/posts/kbhsoftware_dev_starter_pack/2023-02-01T18:22:50-08:00https://www.jemoka.com/posts/kbhsoftware_development_methodologies/2023-07-09T21:20:36+08:00https://www.jemoka.com/posts/kbhsoftware_engineering/2023-07-09T21:20:36+08:00https://www.jemoka.com/posts/kbhsolving_pdes_via_fourier_transform/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhsolving_systems/2022-09-09T13:01:35-07:00https://www.jemoka.com/posts/kbhsongs_that_need_lyrics/2022-08-11T16:44:41-07:00https://www.jemoka.com/posts/kbhsorting_functions/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhsound/2023-01-03T11:19:33-08:00https://www.jemoka.com/posts/kbhsoviet_perspective_on_cold_war/2022-05-05T07:50:29-07:00https://www.jemoka.com/posts/kbhspaan_2005/2024-01-17T00:44:47-08:00https://www.jemoka.com/posts/kbhspan/2022-10-11T14:20:10-07:00https://www.jemoka.com/posts/kbhspanish/2023-01-12T09:00:43-08:00https://www.jemoka.com/posts/kbhspark/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhsparse_sampling/2023-11-30T00:00:07-08:00https://www.jemoka.com/posts/kbhspeech_feature_extraction/2023-10-20T16:51:21-07:00https://www.jemoka.com/posts/kbhspeech_processing_index/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhspinal_tap/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhstability/2024-01-17T11:22:00-08:00https://www.jemoka.com/posts/kbhstack/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhstack_trace/2022-04-21T10:53:09-07:00https://www.jemoka.com/posts/kbhstandard_error/2022-04-20T23:19:11-07:00https://www.jemoka.com/posts/kbhstanford/2023-03-17T19:18:57-07:00https://www.jemoka.com/posts/kbhstanford_factoids_index/2023-09-20T18:14:07-07:00https://www.jemoka.com/posts/kbhstanford_courses_index/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhstanford_ug_research_program/2023-09-20T18:14:07-07:00https://www.jemoka.com/posts/kbhstarting_with_why_the_knowledgebase/2022-04-17T16:54:46-07:00https://www.jemoka.com/posts/kbhstartup/2023-09-12T21:52:20-07:00https://www.jemoka.com/posts/kbhstationary_action_principle/2022-10-24T23:46:49-07:00https://www.jemoka.com/posts/kbhstastistic/2022-04-20T21:40:40-07:00https://www.jemoka.com/posts/kbhstepwise_evolution/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbhstochastic_discount_factor/2022-11-18T13:13:30-08:00https://www.jemoka.com/posts/kbhstochastic_gradient_descent/2024-02-28T10:28:39-08:00https://www.jemoka.com/posts/kbhstochat/2024-01-27T14:33:57-08:00https://www.jemoka.com/posts/kbhstock_indicies/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhstock_issues_debate/2022-09-24T16:25:49-07:00https://www.jemoka.com/posts/kbhstock_market_survey/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhstrain/2022-09-05T22:37:28-07:00https://www.jemoka.com/posts/kbhstrategies_to_revise_an_essay/2024-01-26T11:24:29-08:00https://www.jemoka.com/posts/kbhstress/2022-09-05T22:37:28-07:00https://www.jemoka.com/posts/kbhstring/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhstrips_style_planning/2024-02-13T10:20:04-08:00https://www.jemoka.com/posts/kbhstrong_free_will/2022-05-27T14:08:35-07:00https://www.jemoka.com/posts/kbhstrong_induction/2023-03-16T09:57:46-07:00https://www.jemoka.com/posts/kbhstructure_learning/2023-10-26T10:40:35-07:00https://www.jemoka.com/posts/kbhstructure_of_covid_replication/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhsu_cs107_dec012023/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhsu_cs107_midterm_sheet/2023-10-29T17:56:04-07:00https://www.jemoka.com/posts/kbhsu_cs107_nov102023/2023-11-10T11:46:12-08:00https://www.jemoka.com/posts/kbhsu_cs107_nov132023/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhsu_cs107_nov272023/2023-11-27T14:51:05-08:00https://www.jemoka.com/posts/kbhsu_cs107_oct022023/2023-10-02T11:21:26-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct032023/2023-10-03T17:26:55-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct042023/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct062023/2023-10-09T16:49:53-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct092023/2023-10-09T16:49:53-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct112023/2023-10-11T11:20:31-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct132023/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct162023/2023-10-16T16:34:18-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct182023/2023-10-23T10:30:41-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct2023/2023-10-20T12:43:40-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct232023/2023-10-23T13:17:19-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct252023/2023-10-26T10:40:35-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct272023/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhsu_cs107_sep272023/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhsu_cs107_sep292023/2023-09-29T11:23:35-07:00https://www.jemoka.com/posts/kbhsu_cs109_dec012023/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhsu_cs109_dec042023/2023-12-05T10:05:22-08:00https://www.jemoka.com/posts/kbhsu_cs109_midterm/2023-10-23T16:49:33-07:00https://www.jemoka.com/posts/kbhsu_cs109_midterm_sheet/2023-10-24T14:11:17-07:00https://www.jemoka.com/posts/kbhsu_cs109_nov012023/2023-11-03T16:40:48-07:00https://www.jemoka.com/posts/kbhsu_cs109_nov032023/2023-11-10T11:46:12-08:00https://www.jemoka.com/posts/kbhsu_cs109_nov062023/2023-11-06T16:42:28-08:00https://www.jemoka.com/posts/kbhsu_cs109_nov082023/2023-11-08T16:46:31-08:00https://www.jemoka.com/posts/kbhsu_cs109_nov102023/2023-11-10T16:48:11-08:00https://www.jemoka.com/posts/kbhsu_cs109_nov132023/2023-11-15T11:23:18-08:00https://www.jemoka.com/posts/kbhsu_cs109_nov152023/2023-11-16T10:19:53-08:00https://www.jemoka.com/posts/kbhsu_cs109_nov172023/2023-11-27T14:51:05-08:00https://www.jemoka.com/posts/kbhsu_cs109_nov272023/2023-11-27T17:00:44-08:00https://www.jemoka.com/posts/kbhsu_cs109_nov292023/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhsu_cs109_oct022023/2023-10-03T17:26:55-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct042023/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct062023/2023-10-09T16:49:53-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct092023/2023-10-09T16:49:53-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct112023/2023-10-12T14:52:11-07:00https://www.jemoka.com/posts/kbhsu_109_oct132023/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct162023/2023-10-17T00:13:23-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct182023/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct202023/2023-10-23T16:49:33-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct232023/2023-10-23T16:49:49-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct252023/2023-10-26T10:40:35-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct272023/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhsu_cs109_sep272023/2023-10-01T22:08:54-07:00https://www.jemoka.com/posts/kbhsu_cs109_sep292023/2023-10-01T22:08:54-07:00https://www.jemoka.com/posts/kbhsu_cs111_final_sheet/2024-03-16T23:10:28-07:00https://www.jemoka.com/posts/kbhsu_cs111_outline/2024-03-13T14:06:28-07:00https://www.jemoka.com/posts/kbhsu_cs238_nov022023/2023-11-02T10:20:22-07:00https://www.jemoka.com/posts/kbhsu_cs238_nov092023/2023-11-09T12:00:18-08:00https://www.jemoka.com/posts/kbhsu_cs238_nov142023/2023-11-14T13:34:03-08:00https://www.jemoka.com/posts/kbhsu_cs238_nov162023/2023-11-27T14:51:05-08:00https://www.jemoka.com/posts/kbhsu_cs238_nov282023/2023-11-29T11:20:39-08:00https://www.jemoka.com/posts/kbhsu_cs238_nov302023/2023-11-30T10:20:10-08:00https://www.jemoka.com/posts/kbhsu_cs238_oct032023/2023-10-03T17:26:55-07:00https://www.jemoka.com/posts/kbhsu_cs238_oct052023/2023-10-08T23:37:58-07:00https://www.jemoka.com/posts/kbhsu_cs238_oct102023/2023-10-10T10:20:50-07:00https://www.jemoka.com/posts/kbhsu_cs238_oct122023/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhsu_cs238_oct172023/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhsu_cs238_oct192023/2023-10-23T10:30:41-07:00https://www.jemoka.com/posts/kbhsu_cs238_oct242023/2023-10-24T14:11:17-07:00https://www.jemoka.com/posts/kbhsu_cs238_oct262023/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhsu_cs238_oct212023/2023-11-01T13:24:02-07:00https://www.jemoka.com/posts/kbhsu_cs238_q0q3/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhsu_cs238_sep262023/2023-11-16T10:19:53-08:00https://www.jemoka.com/posts/kbhsu_cs238_sep272023/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhsu_cs238_sep282023/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhsu_cs239_jan092023/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhsu_cs239_midterm_1/2024-02-01T14:24:47-08:00https://www.jemoka.com/posts/kbhsu_math109_problem_set_1/2023-09-29T11:23:35-07:00https://www.jemoka.com/posts/kbhsu_math109_sep272023/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhsu_math109_sep272023_exp/2023-09-27T23:30:36-07:00https://www.jemoka.com/posts/kbhsu_math109_sep292023/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhsu_math53_feb022024/2024-02-02T11:32:31-08:00https://www.jemoka.com/posts/kbhsu_math53_feb052024/2024-02-05T11:23:43-08:00https://www.jemoka.com/posts/kbhsu_math53_feb072024/2024-02-09T11:20:17-08:00https://www.jemoka.com/posts/kbhsu_math53_feb092024/2024-02-09T11:20:17-08:00https://www.jemoka.com/posts/kbhsu_math53_feb122024/2024-02-14T11:23:36-08:00https://www.jemoka.com/posts/kbhsu_math53_feb142024/2024-02-16T11:20:46-08:00https://www.jemoka.com/posts/kbhsu_math53_feb162024/2024-02-16T11:20:46-08:00https://www.jemoka.com/posts/kbhsu_math53_feb212024/2024-03-04T14:20:51-08:00https://www.jemoka.com/posts/kbhsu_math53_feb232024/2024-02-25T10:45:33-08:00https://www.jemoka.com/posts/kbhsu_math53_feb252024/2024-02-28T11:36:19-08:00https://www.jemoka.com/posts/kbhsu_math53_feb282024/2024-03-04T14:20:51-08:00https://www.jemoka.com/posts/kbhsu_math53_homework_index/2024-03-09T13:37:34-08:00https://www.jemoka.com/posts/kbhsu_math53_jan082023/2024-01-09T12:16:02-08:00https://www.jemoka.com/posts/kbhsu_math53_jan102023/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhsu_math53_jan122023/2024-01-12T11:21:05-08:00https://www.jemoka.com/posts/kbhsu_math53_jan172024/2024-01-17T11:19:33-08:00https://www.jemoka.com/posts/kbhsu_math53_jan192023/2024-01-20T11:35:00-08:00https://www.jemoka.com/posts/kbhsu_math53_jan202024/2024-01-24T11:23:54-08:00https://www.jemoka.com/posts/kbhsu_math53_jan262023/2024-01-26T14:19:53-08:00https://www.jemoka.com/posts/kbhsu_math53_jan292024/2024-01-29T11:26:06-08:00https://www.jemoka.com/posts/kbhsu_math53_jan312024/2024-02-02T11:32:31-08:00https://www.jemoka.com/posts/kbhsu_math53_mar012024/2024-03-04T14:20:51-08:00https://www.jemoka.com/posts/kbhsu_math53_mar042024/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhsu_math53_mar062024/2024-03-06T11:19:33-08:00https://www.jemoka.com/posts/kbhsu_math53_mar082024/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhsu_math53_mar112024/2024-03-13T11:25:11-07:00https://www.jemoka.com/posts/kbhsu_math53_midterm_sheet/2024-01-09T12:16:02-08:00https://www.jemoka.com/posts/kbhsu_math53_problem_session/2024-03-13T11:25:11-07:00https://www.jemoka.com/posts/kbhsubgroup/2022-12-13T14:32:50-08:00https://www.jemoka.com/posts/kbhsubspace/2022-10-01T23:44:57-07:00https://www.jemoka.com/posts/kbhsubtrait_envelope/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhsum_of_subsets/2022-10-24T21:49:16-07:00https://www.jemoka.com/posts/kbhsum_of_two_dice/2023-11-03T16:40:48-07:00https://www.jemoka.com/posts/kbhsum_of_vector_and_subspace/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhspersite/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhsupervised_learning/2023-09-28T10:20:46-07:00https://www.jemoka.com/posts/kbhsupport/2023-10-16T16:34:18-07:00https://www.jemoka.com/posts/kbhsurjectivity/2022-11-26T00:54:35-08:00https://www.jemoka.com/posts/kbhsyscalls/2024-02-03T23:42:26-08:00https://www.jemoka.com/posts/kbht_twiddle/2023-01-21T00:35:25-08:00https://www.jemoka.com/posts/kbht_statistics/2022-04-17T22:11:05-07:00https://www.jemoka.com/posts/kbht_test/2022-04-21T10:53:09-07:00https://www.jemoka.com/posts/kbhraising_operators_to_powers/2023-03-03T21:04:56-08:00https://www.jemoka.com/tags/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhtalk_contacts/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhtalkbank/2023-06-13T10:29:05-07:00https://www.jemoka.com/posts/kbhtalkbank_pipeline_project/2022-07-30T15:46:23-07:00https://www.jemoka.com/posts/kbhtariffs/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhtask_estimation/2022-09-13T13:51:27-07:00https://www.jemoka.com/posts/kbhtaxicab_norm/2023-04-25T22:54:49-07:00https://www.jemoka.com/posts/kbhtaylor_se/2024-02-12T11:25:19-08:00https://www.jemoka.com/posts/kbhtechnology_baboon_jemoka_com/2023-06-06T21:50:18-07:00https://www.jemoka.com/posts/kbhtechnology_balloon_jemoka_com/2023-02-02T21:27:48-08:00https://www.jemoka.com/posts/kbhtechnology_bassoon_jemoka_com/2023-01-29T11:18:03-08:00https://www.jemoka.com/posts/kbhtechnology_bilon_jemoka_com/2023-11-29T13:34:35-08:00https://www.jemoka.com/posts/kbhtechnology_bison_jemoka_com/2023-06-17T13:35:21-07:00https://www.jemoka.com/posts/kbhtechnology_bonbon_jemoka_com/2023-02-12T19:05:57-08:00https://www.jemoka.com/posts/kbhtechnology_boon_jemoka_com/2023-08-15T00:10:08-07:00https://www.jemoka.com/posts/kbhteddy_roosevelt/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhteelscoping_series/2023-03-20T09:39:51-07:00https://www.jemoka.com/posts/kbhtemperal_abstraction/2024-02-13T10:20:04-08:00https://www.jemoka.com/posts/kbhterm_document_matrix/2024-02-01T14:24:47-08:00https://www.jemoka.com/posts/kbhtest_for_normality/2022-04-17T20:34:22-07:00https://www.jemoka.com/posts/kbhtesting/2023-07-09T21:20:36+08:00https://www.jemoka.com/posts/kbhtext_classification/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhtext_normalization/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhthe_unreasonable_effectiveness_of_mathematics_in_the_natural_sciences/2022-08-26T22:22:05-07:00https://www.jemoka.com/posts/kbhtherma/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhthermoregulation/2022-11-02T10:16:31-07:00https://www.jemoka.com/posts/kbhtheta_alpha_ratio/2022-06-25T11:50:47-07:00https://www.jemoka.com/posts/kbhthoughts_on_axler_4/2023-04-08T23:40:21-07:00https://www.jemoka.com/posts/kbhtiago_forte/2022-11-18T13:13:30-08:00https://www.jemoka.com/posts/kbhtokenization/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhtopological_sort/2023-10-03T17:26:55-07:00https://www.jemoka.com/posts/kbhtraining_data_sourcing/2023-04-29T15:36:35-07:00https://www.jemoka.com/posts/kbhtraining_helpful_chatbots/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhtransformational_generative_syntax/2022-08-27T23:45:32-07:00https://www.jemoka.com/posts/kbhspeech_diarization/2023-04-16T10:57:27-07:00https://www.jemoka.com/posts/kbhtransformers/2023-11-08T16:46:31-08:00https://www.jemoka.com/posts/kbhtranslation_studies_index/2023-01-09T21:50:29-08:00https://www.jemoka.com/posts/kbhtranslation_theory/2023-07-15T00:48:46+08:00https://www.jemoka.com/posts/kbhtransverse_loaod/2022-09-05T22:24:09-07:00https://www.jemoka.com/posts/kbhtrustpomdp/2024-02-20T10:19:26-08:00https://www.jemoka.com/posts/kbhtuning_forks/2023-01-30T22:45:16-08:00https://www.jemoka.com/posts/kbhtwo_dimensional_heat_equation/2024-03-04T14:20:51-08:00https://www.jemoka.com/posts/kbhtwo_s_complement/2023-10-03T17:26:55-07:00https://www.jemoka.com/posts/kbhtypes_of_harm/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhu1_c/2022-10-24T22:04:57-07:00https://www.jemoka.com/posts/kbhunbiased_parameter_learning/2023-11-14T10:59:27-08:00https://www.jemoka.com/posts/kbhuncertainty/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhunconc/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhundirected_exploration/2023-11-10T11:46:12-08:00https://www.jemoka.com/posts/kbhunimodal/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhunique_lock/2024-02-14T14:21:39-08:00https://www.jemoka.com/posts/kbhuniqueness_and_existance/2023-10-12T14:52:11-07:00https://www.jemoka.com/posts/kbhuniversal_quantum_constructor/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhuniversity_of_georgia/2022-05-09T13:20:16-07:00https://www.jemoka.com/posts/kbhunix/2023-09-29T11:23:35-07:00https://www.jemoka.com/posts/kbhunix_v6_filesystem/2024-01-20T11:35:00-08:00https://www.jemoka.com/posts/kbhupper_triangular_matrix/2023-03-15T10:17:51-07:00https://www.jemoka.com/posts/kbhus_wwii_propaganda/2022-04-17T17:42:52-07:00https://www.jemoka.com/posts/kbhusaypt/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhuser_experience/2022-12-04T21:39:41-08:00https://www.jemoka.com/posts/kbhux_design/2022-12-01T14:28:25-08:00https://www.jemoka.com/posts/kbhuser_interviews/2022-09-01T22:41:54-07:00https://www.jemoka.com/posts/kbhutility_elicitation/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhutility_function/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhutility_fusion/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhutility_theory/2023-10-29T17:56:04-07:00https://www.jemoka.com/posts/kbhvalue_iteration/2024-01-27T21:34:06-08:00https://www.jemoka.com/posts/kbhvalue_iteration_in_practice/2023-10-20T12:43:40-07:00https://www.jemoka.com/posts/kbhvalue_of_information/2023-10-29T17:56:04-07:00https://www.jemoka.com/posts/kbhvariance/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhvc_thing/2022-05-10T20:54:46-07:00https://www.jemoka.com/posts/kbhvector/2022-09-13T22:27:22-07:00https://www.jemoka.com/posts/kbhvector_semantics/2024-02-12T11:25:19-08:00https://www.jemoka.com/posts/kbhvector_space/2022-09-14T14:29:05-07:00https://www.jemoka.com/posts/kbhcraintech/2022-04-20T21:40:40-07:00https://www.jemoka.com/posts/kbhvgg/2022-06-23T23:41:34-07:00https://www.jemoka.com/posts/kbhvggish/2022-06-24T00:35:24-07:00https://www.jemoka.com/posts/kbhvietnam/2022-05-04T18:21:19-07:00https://www.jemoka.com/posts/kbhvietnamization/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhvirtual_memory/2024-03-06T14:21:19-08:00https://www.jemoka.com/posts/kbhvoltage/2023-04-16T10:57:27-07:00https://www.jemoka.com/posts/kbhvwap/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhwalker_2018/2023-03-15T20:36:33-07:00https://www.jemoka.com/posts/kbhwang_2019/2022-07-02T00:15:04-07:00https://www.jemoka.com/posts/kbhwang_2023/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhwatergate/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhwave_equation/2024-03-09T13:37:34-08:00https://www.jemoka.com/posts/kbhweb_graph/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhweighted_edit_distance/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhfireside_article/2023-10-17T00:16:44-07:00https://www.jemoka.com/posts/kbhwho_s_talking_when/2023-10-26T10:40:35-07:00https://www.jemoka.com/posts/kbhwhole_metalanguage_study/2022-08-27T17:10:35-07:00https://www.jemoka.com/posts/kbhtodo_lists/2023-10-31T12:46:14-07:00https://www.jemoka.com/posts/kbhwindows_fat/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhword_normalization/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhword2vec/2024-02-12T11:25:19-08:00https://www.jemoka.com/posts/kbhwpa/2022-04-17T15:13:02-07:00https://www.jemoka.com/tags/writing/2023-10-31T12:46:14-07:00https://www.jemoka.com/posts/kbhwriting_index/2022-04-17T17:42:52-07:00https://www.jemoka.com/posts/kbhycomb/2023-02-11T18:38:34-08:00https://www.jemoka.com/posts/kbhyoung_s_modulus/2022-10-08T16:24:13-07:00https://www.jemoka.com/posts/kbhyuan_2021/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhz_test/2022-04-17T22:11:05-07:00https://www.jemoka.com/posts/kbhzero/2022-08-27T10:50:07-07:00https://www.jemoka.com/posts/kbhzero_sum_game/2023-11-03T16:40:48-07:00https://www.jemoka.com/posts/kbhzettlekasten/2022-11-18T13:13:30-08:00https://www.jemoka.com/posts/kbhzettlekasten_index/2022-06-22T21:51:22-07:00https://www.jemoka.com/posts/kbhzhu_2021/2022-06-25T11:00:09-07:00https://www.jemoka.com/posts/kbhzinc_abc_transporters/2023-03-26T23:05:09-07:00https://www.jemoka.com/posts/kbhgaussian_mixture_model/2023-09-28T16:07:11-07:00 \ No newline at end of file +https://www.jemoka.com/posts/kbhassembly/2023-10-31T00:44:54-07:00https://www.jemoka.com/posts/kbhistudio_meeting_nodes/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhmaking_qubits_interact/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhpoint_estimate/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhproperties_of_the_stable_matching_algorithm/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhrnn_notes/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhrural_hospitals_problem/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhstable_matching_problem/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhz_score/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbh1v_1/2022-09-14T14:30:08-07:00https://www.jemoka.com/posts/kbhq/2022-08-27T09:38:59-07:00https://www.jemoka.com/posts/kbhw/2023-03-28T21:31:23-07:00https://www.jemoka.com/posts/kbhzero_times_vector/2022-09-14T14:30:08-07:00https://www.jemoka.com/posts/kbheigenvalue/2024-01-26T11:24:29-08:00https://www.jemoka.com/posts/kbh1980s_political_alignment/2022-06-07T13:38:12-07:00https://www.jemoka.com/posts/kbh1a/2022-08-30T14:23:25-07:00https://www.jemoka.com/posts/2023-02-26/2023-02-26T20:31:50-08:00https://www.jemoka.com/posts/kbhnus_math530_3_e_problem_1/2023-01-29T10:52:51-08:00https://www.jemoka.com/posts/kbh776/2023-03-01T13:39:00-08:00https://www.jemoka.com/posts/kbhaaa/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhaaai_talk_contacts/2024-02-26T11:25:20-08:00https://www.jemoka.com/posts/kbhaaai2024_index/2024-02-27T13:50:55-08:00https://www.jemoka.com/posts/kbhindex/2024-01-17T12:24:19-08:00https://www.jemoka.com/posts/kbhabsolute_value_function/2023-10-06T16:57:26-07:00https://www.jemoka.com/posts/kbhaccounting_price/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhaction_of_capecitabmine/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhaction_research/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhaction_value_function/2023-10-24T14:11:17-07:00https://www.jemoka.com/posts/kbhactive_data_representation/2022-06-24T00:34:53-07:00https://www.jemoka.com/posts/kbhactive_learning_molecule_iteration/2023-03-27T10:23:02-07:00https://www.jemoka.com/posts/kbhactive_listening/2022-12-01T14:28:25-08:00https://www.jemoka.com/posts/kbhactive_recall/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhactor_critic/2023-10-31T10:20:58-07:00https://www.jemoka.com/posts/kbhadaops/2024-01-30T10:40:31-08:00https://www.jemoka.com/posts/kbhadding/2022-09-13T22:27:22-07:00https://www.jemoka.com/posts/kbhadditive_identity/2022-09-13T22:27:22-07:00https://www.jemoka.com/posts/kbhadditive_identity_is_unique_in_a_vector_space/2022-09-13T23:23:56-07:00https://www.jemoka.com/posts/kbhadditive_inverse_is_unique_in_a_vector_space/2022-09-13T23:23:56-07:00https://www.jemoka.com/posts/kbhadhd/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhadme/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhadress_challenge/2022-06-24T23:35:50-07:00https://www.jemoka.com/posts/kbhadress_literature_survey/2022-06-26T10:25:12-07:00https://www.jemoka.com/posts/kbhadvantage_function/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhadvertising/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhparallel_linear_algebra/2023-01-29T10:52:51-08:00https://www.jemoka.com/posts/kbhaffine_transformation/2023-10-12T14:52:11-07:00https://www.jemoka.com/posts/kbhagent/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhagricultural_adjustment_administration/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhagrp/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhai/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhai_ethics/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhai_healthcare_safety/2024-02-26T10:36:33-08:00https://www.jemoka.com/posts/kbhai_intepretability/2023-10-26T12:04:45-07:00https://www.jemoka.com/posts/kbhai_master_class/2023-04-29T15:36:35-07:00https://www.jemoka.com/posts/kbhai_medicine/2024-02-26T14:44:52-08:00https://www.jemoka.com/posts/kbhaibridge/2023-03-12T17:23:29-07:00https://www.jemoka.com/posts/kbhaibridge_course_website/2022-06-30T21:14:10-07:00https://www.jemoka.com/posts/kbhaibridge_final_project/2022-06-29T23:42:15-07:00https://www.jemoka.com/posts/kbhaibridge_iris_variance_worksheet/2022-06-30T10:37:38-07:00https://www.jemoka.com/posts/kbhaibridge_packages/2022-06-27T11:07:38-07:00https://www.jemoka.com/posts/kbhaibridge_student_presentations/2022-07-02T00:07:55-07:00https://www.jemoka.com/posts/kbhaibridgelab_d1aft/2022-06-27T16:03:53-07:00https://www.jemoka.com/posts/kbhaibridgelab_d3_d4/2022-06-23T14:37:07-07:00https://www.jemoka.com/posts/kbhaibridgelab_d2aft/2022-06-29T10:21:38-07:00https://www.jemoka.com/posts/kbhaibridgelab_d4aft/2022-06-29T23:29:28-07:00https://www.jemoka.com/posts/kbhaifs/2022-06-13T22:05:51-07:00https://www.jemoka.com/posts/kbhair_a_greek_style_myth/2022-07-05T23:31:37-07:00https://www.jemoka.com/posts/kbhalexis_ohanian/2023-03-01T13:39:00-08:00https://www.jemoka.com/posts/kbhalgebra/2022-08-26T14:58:53-07:00https://www.jemoka.com/posts/kbhalgebreic_equation/2024-01-08T11:23:11-08:00https://www.jemoka.com/posts/kbhalgebreic_multiplicity/2023-03-22T14:02:07-07:00https://www.jemoka.com/posts/kbhali_partovi/2023-09-12T21:52:20-07:00https://www.jemoka.com/posts/kbhalign_with_new_vocab/2022-06-13T13:25:37-07:00https://www.jemoka.com/posts/kbhalivio_april_checkin/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhalpha_vector/2023-11-29T11:20:39-08:00https://www.jemoka.com/posts/kbhalternating_least_squares/2023-08-11T11:57:33-07:00https://www.jemoka.com/posts/kbhabulance_trajectories/2024-02-26T10:36:33-08:00https://www.jemoka.com/tags/aml/2023-10-31T12:46:14-07:00https://www.jemoka.com/posts/kbhaml_dipping_into_pytorch/2023-09-02T15:08:45-07:00https://www.jemoka.com/posts/kbhaml_iris_strikes_bath/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhaml_it_takes_two/2023-09-02T15:08:45-07:00https://www.jemoka.com/posts/kbhaml_reinforce/2023-10-31T12:46:14-07:00https://www.jemoka.com/posts/kbhaml_time_to_convolve/2023-09-02T15:08:45-07:00https://www.jemoka.com/posts/kbhaml_your_first_article/2023-02-11T18:38:34-08:00https://www.jemoka.com/posts/kbhanatomy/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhanatomy_learning/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhanca_ae/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhangelman_syndrome/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhanna_s_team_checkin/2022-05-04T18:21:19-07:00https://www.jemoka.com/posts/kbhanotehuaoeu/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhanoushka_krishnan/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhanthony_badger/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhantonsson_2021/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhany_name_here/2022-04-25T11:59:35-07:00https://www.jemoka.com/posts/kbhaems/2024-01-27T23:59:53-08:00https://www.jemoka.com/posts/kbhaosneuhasoneuh/2022-09-10T21:05:25-07:00https://www.jemoka.com/posts/kbhap_phys_c_em_index/2023-04-25T22:54:49-07:00https://www.jemoka.com/posts/kbhap_phys_c_em_things_to_do/2023-05-08T10:14:24-07:00https://www.jemoka.com/posts/kbhap_phys_c_mech_index/2022-05-02T10:28:52-07:00https://www.jemoka.com/posts/kbhap_physi/2023-03-21T22:17:34-07:00https://www.jemoka.com/posts/kbhapstats/2022-04-21T16:21:45-07:00https://www.jemoka.com/posts/kbhapplying_eigenspace/2022-09-30T14:51:29-07:00https://www.jemoka.com/posts/kbhapproximate_inference/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhapproximate_value_function/2023-11-02T17:00:07-07:00https://www.jemoka.com/posts/kbhapr_paradox/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhaps/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbharbitrage_pricing/2022-10-04T13:12:46-07:00https://www.jemoka.com/posts/kbhargmax/2023-11-16T10:19:53-08:00https://www.jemoka.com/posts/kbharray/2023-10-16T16:34:18-07:00https://www.jemoka.com/posts/kbharrival_movie/2023-02-19T11:21:06-08:00https://www.jemoka.com/posts/kbharthur_m_schlesinger/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhartificial_intelligence/2023-04-29T15:36:35-07:00https://www.jemoka.com/posts/kbhasbmb/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhasbmb2023_index/2023-07-09T21:21:12+08:00https://www.jemoka.com/posts/kbhascii/2023-10-06T16:57:26-07:00https://www.jemoka.com/posts/kbhasee_prism/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhasip/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhasr/2022-06-24T00:34:53-07:00https://www.jemoka.com/posts/kbhasr_disordered_speech/2023-06-05T23:44:58-07:00https://www.jemoka.com/posts/kbhassociative/2022-08-26T20:31:40-07:00https://www.jemoka.com/posts/kbhasymtotic_analysis/2022-09-21T10:43:02-07:00https://www.jemoka.com/posts/kbhatoms_as_qubits/2022-04-20T21:40:40-07:00https://www.jemoka.com/posts/kbhafib/2023-03-26T23:05:09-07:00https://www.jemoka.com/posts/kbhauthoritarianism/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhautism/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhautonomous_odes/2024-01-17T11:19:33-08:00https://www.jemoka.com/posts/kbhaxler_a/2022-09-02T12:35:27-07:00https://www.jemoka.com/posts/kbhaxler_1_b/2022-09-17T16:42:48-07:00https://www.jemoka.com/posts/kbhaxler_1_c/2022-09-27T14:35:57-07:00https://www.jemoka.com/posts/kbhaxler_1_c_excercises/2022-10-08T16:24:13-07:00https://www.jemoka.com/posts/kbhaxler_2_a/2022-10-12T17:34:39-07:00https://www.jemoka.com/posts/kbhaxler_2_b/2022-10-16T14:41:14-07:00https://www.jemoka.com/posts/kbhaxler_2_c/2022-11-02T10:16:31-07:00https://www.jemoka.com/posts/kbhaxler_3_a/2022-11-02T23:49:12-07:00https://www.jemoka.com/posts/kbhaxler_3_b/2022-11-18T13:13:30-08:00https://www.jemoka.com/posts/kbhaxler_3_c/2022-11-28T23:56:18-08:00https://www.jemoka.com/posts/kbhaxler_3_d/2023-01-12T10:11:08-08:00https://www.jemoka.com/posts/kbhaxler_3_e/2023-02-11T18:38:34-08:00https://www.jemoka.com/posts/kbhaxler_3_f/2023-06-21T16:34:47+08:00https://www.jemoka.com/posts/kbhaxler_5_a/2023-02-16T10:06:41-08:00https://www.jemoka.com/posts/kbhaxler_5_b/2023-03-15T10:17:51-07:00https://www.jemoka.com/posts/kbhaxler_5_c/2023-03-23T09:42:23-07:00https://www.jemoka.com/posts/kbhaxler_6_a/2023-04-08T23:40:21-07:00https://www.jemoka.com/posts/kbhaxler_6_b/2023-05-01T11:30:10-07:00https://www.jemoka.com/posts/kbhaxler_7_a/2023-05-12T00:42:25-07:00https://www.jemoka.com/posts/kbhbackpacks/2023-04-10T21:36:38-07:00https://www.jemoka.com/posts/kbhbacktracing/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhbag_of_words/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhbalagopalan_2021/2022-06-25T11:50:47-07:00https://www.jemoka.com/posts/kbhbasis/2022-10-16T12:40:03-07:00https://www.jemoka.com/posts/kbhbasis_of_domain/2022-11-08T15:26:32-08:00https://www.jemoka.com/posts/kbhbatchalign/2022-07-02T00:07:55-07:00https://www.jemoka.com/posts/kbhbatchalign_paper_outline/2022-08-07T12:24:33-07:00https://www.jemoka.com/posts/kbhbayes_normalization_constant/2023-10-23T16:49:33-07:00https://www.jemoka.com/posts/kbhbayes_theorem/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhbayes_theorem_over_random_variable/2023-10-26T10:40:35-07:00https://www.jemoka.com/posts/kbhbaysian_network/2023-10-10T10:20:50-07:00https://www.jemoka.com/posts/kbhbaysian_networks_for_healthcare/2024-02-27T11:58:42-08:00https://www.jemoka.com/posts/kbhbaysian_parameter_learning/2023-11-27T14:51:05-08:00https://www.jemoka.com/posts/kbhbelief/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhilqr/2024-02-20T10:19:26-08:00https://www.jemoka.com/posts/kbhbelief_state_mdp/2023-11-14T13:34:03-08:00https://www.jemoka.com/posts/kbhbending/2022-09-25T17:26:49-07:00https://www.jemoka.com/posts/kbhbernoulli_random_variable/2023-11-15T11:23:18-08:00https://www.jemoka.com/posts/kbhbessel_s_equation/2024-01-08T11:23:11-08:00https://www.jemoka.com/posts/kbhworst_possible_state/2023-11-29T11:20:39-08:00https://www.jemoka.com/posts/kbhbetazero/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhbig_data/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhbinary_operation/2022-09-14T14:29:05-07:00https://www.jemoka.com/posts/kbhbinomial_distribution/2023-11-03T16:40:48-07:00https://www.jemoka.com/posts/kbhbioinformatics/2022-05-02T10:28:52-07:00https://www.jemoka.com/posts/kbhbitmask/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhbitwise_operations/2023-11-01T13:24:02-07:00https://www.jemoka.com/posts/kbhblack_thursday/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhblack_scholes_formula/2022-10-09T19:28:34-07:00https://www.jemoka.com/posts/kbhblb/2023-11-29T11:20:39-08:00https://www.jemoka.com/posts/kbhblind_lower_bound/2023-11-29T11:20:39-08:00https://www.jemoka.com/posts/kbhbloch_sphere/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhbluest_eye/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhenglish_bluest_eye/2022-05-09T13:20:16-07:00https://www.jemoka.com/posts/kbhsecondary_source_comparison_activity/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhbool/2023-09-29T11:23:35-07:00https://www.jemoka.com/posts/kbhboostrap/2023-12-06T15:27:17-08:00https://www.jemoka.com/posts/kbhboston_naming_test/2022-06-25T11:00:19-07:00https://www.jemoka.com/posts/kbhbouton_2018/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhbranch_and_bound/2023-10-24T14:11:17-07:00https://www.jemoka.com/posts/kbhbraun_and_clarke_thematic_analysis/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhbrian_macwinney/2022-11-09T22:10:18-08:00https://www.jemoka.com/posts/kbhbrown_v_board_of_education/2022-05-09T13:20:16-07:00https://www.jemoka.com/posts/kbhbrownian_motion/2022-09-26T00:15:10-07:00https://www.jemoka.com/posts/kbhbuffer_overflow/2023-10-11T11:20:31-07:00https://www.jemoka.com/posts/kbhbuild_a_system_not_a_monolyth/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhbpe/2024-01-12T11:21:05-08:00https://www.jemoka.com/posts/kbhc/2023-09-29T11:23:35-07:00https://www.jemoka.com/posts/kbhcaching/2023-11-29T11:20:39-08:00https://www.jemoka.com/posts/kbhcal_com/2023-03-01T13:39:00-08:00https://www.jemoka.com/posts/kbhcalculating_shear_s_modulus/2022-12-08T21:46:35-08:00https://www.jemoka.com/posts/kbhcalp/2024-01-18T10:15:38-08:00https://www.jemoka.com/posts/kbhcalpains_afib/2023-03-26T23:05:09-07:00https://www.jemoka.com/posts/kbhcanciones/2023-03-03T15:36:02-08:00https://www.jemoka.com/posts/kbhcantilever_beam/2022-10-02T13:54:53-07:00https://www.jemoka.com/posts/kbhcantilever_beams/2022-12-12T15:20:22-08:00https://www.jemoka.com/posts/kbhcapacitance/2023-03-18T21:42:56-07:00https://www.jemoka.com/posts/kbhcapacitor/2023-04-16T10:57:27-07:00https://www.jemoka.com/posts/kbhcapecitabmine/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcapm/2022-10-29T18:57:09-07:00https://www.jemoka.com/posts/kbhcasting/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhcategorical_grammar/2023-02-26T12:21:16-08:00https://www.jemoka.com/posts/kbhcategorical_grammars_index/2023-02-25T23:04:37-08:00https://www.jemoka.com/categories/https://www.jemoka.com/posts/kbhcategory/2023-02-26T17:02:13-08:00https://www.jemoka.com/posts/kbhcategory_theory/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhcauses_of_the_great_depression/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcell/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbhcell_free_biocatalysis/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhcentral_limit_theorem/2023-11-06T16:42:28-08:00https://www.jemoka.com/posts/kbhchanges_to_central_dogma/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhchar/2023-10-06T16:57:26-07:00https://www.jemoka.com/posts/kbhcharacteristic_polynomial/2024-01-26T11:24:29-08:00https://www.jemoka.com/posts/kbhcharged/2023-02-12T15:19:43-08:00https://www.jemoka.com/posts/kbhchatbot/2024-03-02T17:53:00-08:00https://www.jemoka.com/posts/kbhchi_square/2022-04-20T23:19:11-07:00https://www.jemoka.com/posts/kbhchiara_marletto/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhchild_labour/2022-07-10T11:34:23-07:00https://www.jemoka.com/posts/kbhchlasta_2021/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhchromatin/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcivil_rights/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhcivillian_conservation_corps/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhclinical_skin_disease_imaging/2024-02-26T11:42:26-08:00https://www.jemoka.com/posts/kbhclock_algorthium/2024-03-06T14:21:19-08:00https://www.jemoka.com/posts/kbhclosed/2022-08-26T20:34:46-07:00https://www.jemoka.com/posts/kbhclrs_index/2022-12-23T11:30:23-08:00https://www.jemoka.com/posts/kbhclustering/2023-08-11T11:57:33-07:00https://www.jemoka.com/posts/kbhcmu/2022-06-22T22:22:13-07:00https://www.jemoka.com/posts/kbhcns_regulation/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcode_review/2022-11-03T14:37:47-07:00https://www.jemoka.com/posts/kbhcoherence_time/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcold_sites/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhcold_war/2022-05-04T18:21:19-07:00https://www.jemoka.com/posts/kbhcold_war_in_vietnam/2022-05-04T18:21:19-07:00https://www.jemoka.com/posts/kbhcollectivist_economy/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcollege_application/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhcollege101_index/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhcollegeboard/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhcollocation_extractio/2023-03-05T23:17:11-08:00https://www.jemoka.com/posts/kbhcolumn_space/2022-11-18T13:13:30-08:00https://www.jemoka.com/posts/kbhcombination/2023-10-01T22:08:54-07:00https://www.jemoka.com/posts/kbhcommon_spark_actions/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhcommon_spark_transformations/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhcommutivity/2022-08-26T23:09:36-07:00https://www.jemoka.com/posts/kbhcomparison_function/2023-10-26T10:40:35-07:00https://www.jemoka.com/posts/kbhcomplex_exponential/2024-03-04T14:20:51-08:00https://www.jemoka.com/posts/kbhcomplex_number/2024-01-20T11:35:00-08:00https://www.jemoka.com/posts/kbhunderdetermined_ode_system/2024-01-26T11:24:29-08:00https://www.jemoka.com/posts/kbhcomplex_system/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhcomplexity_theory/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhcomposite_system/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhscene_representation/2023-03-20T14:43:38-07:00https://www.jemoka.com/posts/kbhcomputational_biology_index/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhbinary_number_system/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhcomputer_systems_index/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhconceptual_grammar/2022-08-28T11:55:03-07:00https://www.jemoka.com/posts/kbhcondef_abstract/2022-06-22T23:25:11-07:00https://www.jemoka.com/posts/kbhconditional_gaussian_models/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhconditional_plan/2023-11-16T10:38:43-08:00https://www.jemoka.com/posts/kbhconditions_in_the_great_depression/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhconfidence_interval/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhconjugation/2023-03-20T14:27:00-07:00https://www.jemoka.com/posts/kbhconnectionism/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhconstructor_theory/2022-05-10T10:01:02-07:00https://www.jemoka.com/posts/kbhcontiguous_allocation/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhcontinuity_correct/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhcontinuity_correction/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhcontinuous_distribution/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhcontroller/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhcontroller_gradient_ascent/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhconvolution/2024-03-13T11:25:11-07:00https://www.jemoka.com/posts/kbhctp/2022-06-24T23:34:46-07:00https://www.jemoka.com/posts/kbhcornucopia_of_analysis/2023-04-25T22:54:49-07:00https://www.jemoka.com/posts/kbhcorpus/2024-01-12T11:21:05-08:00https://www.jemoka.com/posts/kbhcortex/2023-03-08T14:16:45-08:00https://www.jemoka.com/posts/kbhcoulomb_s_law/2023-02-12T15:19:43-08:00https://www.jemoka.com/posts/kbhcounterfactual/2022-05-10T20:54:46-07:00https://www.jemoka.com/posts/kbhcounting/2023-10-03T10:22:01-07:00https://www.jemoka.com/posts/kbhcourses_to_take_for_qnlp/2023-02-25T23:04:37-08:00https://www.jemoka.com/posts/kbhcovariance/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhcoveather/2022-06-22T23:25:11-07:00https://www.jemoka.com/posts/kbhcoveather_abstract/2022-06-22T23:25:11-07:00https://www.jemoka.com/posts/kbhcovid_19/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhcpomdp/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhcram/2022-04-22T17:19:23-07:00https://www.jemoka.com/posts/kbhcrap_to_remember_for_ap_stats/2022-05-05T07:50:29-07:00https://www.jemoka.com/posts/kbhcrash_recovery/2024-01-27T14:33:57-08:00https://www.jemoka.com/posts/kbhcredit/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcredit_suisse/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhcritical_value/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcriticism_of_the_new_deal/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcross_entropy_loss/2024-01-27T14:33:57-08:00https://www.jemoka.com/posts/kbhcross_entropy_method/2023-10-31T10:20:58-07:00https://www.jemoka.com/posts/kbhcross_product/2022-09-13T14:47:30-07:00https://www.jemoka.com/posts/kbhcrossfinder/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhcyro_em/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhcrystels/2023-01-09T21:50:29-08:00https://www.jemoka.com/posts/kbhcs_probability_index/2023-12-06T15:27:17-08:00https://www.jemoka.com/posts/kbhcs124/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhcultural_revolution/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhcurrent/2023-03-21T22:17:34-07:00https://www.jemoka.com/posts/kbhcurse_of_dimensionality/2023-08-11T11:57:33-07:00https://www.jemoka.com/posts/kbhcustomer_journey_map/2023-07-15T00:48:46+08:00https://www.jemoka.com/posts/kbhcynthia_lee/2023-04-29T15:36:35-07:00https://www.jemoka.com/posts/kbhcyrodrgn/2023-03-26T09:24:46-07:00https://www.jemoka.com/posts/kbhd_see/2023-10-12T14:52:11-07:00https://www.jemoka.com/posts/kbhdamped_heat_equation/2024-03-04T14:20:51-08:00https://www.jemoka.com/posts/kbhdarkpool/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhdata_inference/2022-04-20T21:40:40-07:00https://www.jemoka.com/posts/kbhdcgan/2023-03-05T23:17:11-08:00https://www.jemoka.com/posts/kbhde_novo_biosensors/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhde_novo_protein_design/2023-03-27T10:07:03-07:00https://www.jemoka.com/posts/kbhdeadlock/2024-02-07T14:20:33-08:00https://www.jemoka.com/posts/kbhdecision_making/2023-09-28T10:20:46-07:00https://www.jemoka.com/posts/kbhdecision_making_index/2023-12-05T10:16:47-08:00https://www.jemoka.com/posts/kbhdecision_networks/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhdeep_approach/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhdeep_learning/2024-02-13T23:59:46-08:00https://www.jemoka.com/posts/kbhdefensive_programming/2022-10-07T00:44:00-07:00https://www.jemoka.com/posts/kbhprobability_theory/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhdemand_paging/2024-03-06T14:46:11-08:00https://www.jemoka.com/posts/kbhdemand_driven_theory/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhdementiabank/2022-06-22T23:48:35-07:00https://www.jemoka.com/posts/kbhdementiabank_acoustics_brainstoming/2022-07-02T00:07:55-07:00https://www.jemoka.com/posts/kbhdementiabank_acoustics_project/2022-07-11T17:45:58-07:00https://www.jemoka.com/posts/kbhdemorgan_s_law/2023-10-06T16:57:26-07:00https://www.jemoka.com/posts/kbhdepression/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhderivational_words/2022-08-27T17:10:35-07:00https://www.jemoka.com/posts/kbhderivatives/2022-10-09T12:18:43-07:00https://www.jemoka.com/posts/kbhderivative_pricing/2022-10-07T00:44:00-07:00https://www.jemoka.com/posts/kbhderived_variable/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhdeterminants/2023-03-20T09:03:31-07:00https://www.jemoka.com/posts/kbhdespot/2024-01-30T10:40:31-08:00https://www.jemoka.com/posts/kbhdiabetes/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhdiagonal_matrix/2023-03-20T23:37:00-07:00https://www.jemoka.com/posts/kbhdialogue/2024-03-01T11:27:29-08:00https://www.jemoka.com/posts/kbhdialogue_state_architecture/2024-03-01T11:27:29-08:00https://www.jemoka.com/posts/kbhdiffeomorphism/2023-06-01T16:59:54-07:00https://www.jemoka.com/posts/kbhchallenge_1/2022-09-01T22:41:54-07:00https://www.jemoka.com/posts/kbhgenerative_vs_discriminitive_classifier/2024-01-27T14:33:57-08:00https://www.jemoka.com/posts/kbhdifference_equation/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhdiffeq_intro/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhodes_index/2024-03-09T13:37:34-08:00https://www.jemoka.com/posts/kbhdifferential_privacy/2023-11-10T16:48:11-08:00https://www.jemoka.com/posts/kbhdiffusion_map/2023-03-26T09:24:46-07:00https://www.jemoka.com/posts/kbhdiffusion_models_for_laproscopic_surgeries/2024-02-26T11:20:05-08:00https://www.jemoka.com/posts/kbhdigital_origin_for_life/2023-03-15T20:36:33-07:00https://www.jemoka.com/posts/kbhdimension/2022-10-24T18:59:59-07:00https://www.jemoka.com/posts/kbhdirect_sampling/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhdirect_sum/2022-10-16T14:35:38-07:00https://www.jemoka.com/posts/kbhdirected_evolution/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhdirected_exploration/2023-11-30T00:00:07-08:00https://www.jemoka.com/posts/kbhdiscourse_features/2022-06-23T23:41:34-07:00https://www.jemoka.com/posts/kbhdiscourse_completion_task/2022-06-25T11:00:09-07:00https://www.jemoka.com/posts/kbhdiscrete_distribution/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhdispatching/2024-02-27T09:50:26-08:00https://www.jemoka.com/posts/kbhdissociating_language_and_thought/2023-11-16T12:01:38-08:00https://www.jemoka.com/posts/kbhdistributed_algorithum/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhdistributed_morphology/2022-08-27T23:45:32-07:00https://www.jemoka.com/posts/kbhdistributive_harm/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhdistributivity/2022-09-13T22:27:22-07:00https://www.jemoka.com/posts/kbhdivide/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhdivide_by_2pi/2023-02-01T18:22:50-08:00https://www.jemoka.com/posts/kbhdlight_1/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhdocumentation_and_specification/2022-09-15T22:11:01-07:00https://www.jemoka.com/posts/kbhdopamine/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhdopamine_circuitry_in_nf1/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhdost/2024-02-27T09:50:26-08:00https://www.jemoka.com/posts/kbhdot_product/2023-04-08T23:00:43-07:00https://www.jemoka.com/posts/kbhdouble_envelope_problem/2023-11-16T10:19:53-08:00https://www.jemoka.com/posts/kbhdouble_progressive_widening/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhdouble_slit_experiment/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhdpyd/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhdriving/2022-07-25T16:59:07-07:00https://www.jemoka.com/posts/kbhdriving_practice/2022-07-30T15:46:23-07:00https://www.jemoka.com/posts/kbhdrug_resistance/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhdual_space/2023-06-21T16:34:47+08:00https://www.jemoka.com/posts/kbhdup15q/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhdynamic_programming/2022-05-04T18:21:19-07:00https://www.jemoka.com/posts/kbhdynamic_rc_circuts/2023-04-16T10:57:27-07:00https://www.jemoka.com/posts/kbhdyson_s_model_of_life/2023-03-15T20:36:33-07:00https://www.jemoka.com/posts/kbhe/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhe_pca/2024-01-29T11:26:06-08:00https://www.jemoka.com/posts/kbhe_coli/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbheb_emails/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhecon320_architecture/2022-08-25T11:12:44-07:00https://www.jemoka.com/posts/kbheconomy_of_credit/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhedit_distance_with_dp/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbheffability/2023-02-25T23:04:37-08:00https://www.jemoka.com/posts/kbheffects_of_the_new_deal/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbheigensolutions/2024-01-29T11:26:06-08:00https://www.jemoka.com/posts/kbheigenspace/2024-01-26T11:24:29-08:00https://www.jemoka.com/posts/kbhekf/2024-01-09T12:16:02-08:00https://www.jemoka.com/posts/kbhelastic_modulus/2022-10-02T13:54:53-07:00https://www.jemoka.com/posts/kbheleanor_roosevelt/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhelectric_field/2023-02-12T15:19:43-08:00https://www.jemoka.com/posts/kbhelectric_potential_energy/2023-03-18T21:42:56-07:00https://www.jemoka.com/posts/kbhelectron/2023-02-12T15:19:43-08:00https://www.jemoka.com/posts/kbhelie_adam/2023-03-08T14:19:50-08:00https://www.jemoka.com/posts/kbheliza/2024-03-01T11:27:29-08:00https://www.jemoka.com/posts/kbhella_baker/2022-05-09T13:20:16-07:00https://www.jemoka.com/posts/kbhelo_ratings/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhempty_binding_site/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbhenergy_homeostasis/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhenglish/2023-01-12T09:00:43-08:00https://www.jemoka.com/posts/kbhentangled/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhepigenetics/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhepitophs/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhequal_rights_act/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhetf/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbheuclidean_algorithm/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbheugene_wigner/2022-08-26T21:21:26-07:00https://www.jemoka.com/posts/kbheukareotyic_cell/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbheuler_bernoulli_theory/2022-09-27T13:55:17-07:00https://www.jemoka.com/posts/kbheuler_s_equation/2024-01-22T14:24:00-08:00https://www.jemoka.com/posts/kbheurope/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhevent/2023-10-03T10:22:01-07:00https://www.jemoka.com/posts/kbhpgapset/2024-01-17T00:44:47-08:00https://www.jemoka.com/posts/kbhoperators_on_complex_vector_spaces_have_an_eigenvalue/2023-03-09T10:10:56-08:00https://www.jemoka.com/posts/kbhexpectation/2023-11-10T16:48:11-08:00https://www.jemoka.com/posts/kbhexplicit_programming/2023-09-28T10:20:46-07:00https://www.jemoka.com/posts/kbhexploration_and_exploitation/2023-11-02T17:00:07-07:00https://www.jemoka.com/posts/kbhexponential_distribution/2023-10-20T12:43:40-07:00https://www.jemoka.com/posts/kbhlists_over_fields/2022-08-30T14:49:40-07:00https://www.jemoka.com/posts/kbhfs_is_a_vector_space/2022-09-14T14:30:08-07:00https://www.jemoka.com/posts/kbhfactor/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhfactored_mdps/2024-02-22T10:15:57-08:00https://www.jemoka.com/posts/kbhprocedural_vs_distributive_fairness/2023-12-13T13:36:16-08:00https://www.jemoka.com/posts/kbhfaraday_s_law/2023-04-19T11:49:49-07:00https://www.jemoka.com/posts/kbhfast_informed_bound/2023-11-16T10:19:53-08:00https://www.jemoka.com/posts/kbhfederal_housing_administration/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhfederal_project_number_one/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhfield/2023-02-26T17:02:13-08:00https://www.jemoka.com/posts/kbhfile_payload_data/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhfilesystem/2024-03-16T23:10:28-07:00https://www.jemoka.com/posts/kbhfilters/2024-02-20T10:19:26-08:00https://www.jemoka.com/posts/kbhfilter_bank/2023-10-20T16:51:21-07:00https://www.jemoka.com/posts/kbhfilterb/2023-11-01T13:24:02-07:00https://www.jemoka.com/posts/kbhfilterba/2023-11-01T13:24:02-07:00https://www.jemoka.com/posts/kbhnus_math570_finance_eigen/2022-12-26T17:01:44-08:00https://www.jemoka.com/posts/kbhfinancial_markets_intro/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhfinfty_is_a_vector_space_over_f/2022-09-14T14:30:08-07:00https://www.jemoka.com/posts/kbhfinite_difference_method/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhfinite_state_machine/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhfinite_dimensional_vector_space/2022-10-24T15:30:43-07:00https://www.jemoka.com/tags/fireside/2023-10-31T12:46:14-07:00https://www.jemoka.com/posts/kbhfireside_chats/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhfireside/2023-10-31T00:37:51-07:00https://www.jemoka.com/posts/kbhfirst_order_odes/2024-01-17T11:19:33-08:00https://www.jemoka.com/posts/kbhsystems_of_odes/2024-01-26T11:24:29-08:00https://www.jemoka.com/posts/kbhflexua/2022-11-06T21:05:19-08:00https://www.jemoka.com/posts/kbhfloyd_s_invariant_method/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhflux/2023-02-19T11:21:06-08:00https://www.jemoka.com/posts/kbhdementiabank_acoustics_project_proposal/2022-06-26T12:59:44-07:00https://www.jemoka.com/posts/kbhfork/2024-02-02T17:24:38-08:00https://www.jemoka.com/posts/kbhforward_search/2023-10-24T14:11:17-07:00https://www.jemoka.com/posts/kbhforward_forward_algorithm/2023-03-03T15:36:02-08:00https://www.jemoka.com/posts/kbhfoundational_model/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhfundational_models_of_interaction_analysis-1/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhfundational_models_of_interaction_analysis/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhlearn_more/2024-02-25T15:15:43-08:00https://www.jemoka.com/posts/kbhfourier_series/2024-03-04T14:20:51-08:00https://www.jemoka.com/posts/kbhfourier_transform/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhfdr/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhfreshwater_economists/2022-08-25T20:58:47-07:00https://www.jemoka.com/posts/kbhfunction/2023-10-26T10:40:35-07:00https://www.jemoka.com/posts/kbhfunctor/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhfundimental_investing/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhfundamental_theorem_of_arithmetic/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhfundamental_theorem_of_calculus/2024-01-09T12:16:02-08:00https://www.jemoka.com/posts/kbhfundamental_theorem_of_linear_maps/2022-11-14T22:52:42-08:00https://www.jemoka.com/posts/kbhfusion/2022-06-24T00:34:53-07:00https://www.jemoka.com/posts/kbhfv_pomcps/2024-02-22T10:15:57-08:00https://www.jemoka.com/posts/kbhg_dice/2024-02-22T10:15:57-08:00https://www.jemoka.com/posts/kbhgalactica/2023-02-26T20:31:50-08:00https://www.jemoka.com/posts/kbhgalton_board/2023-10-12T14:52:11-07:00https://www.jemoka.com/posts/kbhgarch/2022-12-03T23:08:07-08:00https://www.jemoka.com/posts/kbhgauss_law/2023-03-18T21:42:56-07:00https://www.jemoka.com/posts/kbhgaussian/2024-03-09T13:37:34-08:00https://www.jemoka.com/posts/kbhgaussian_distribution/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhgaussian_elimination/2022-11-28T23:24:09-08:00https://www.jemoka.com/posts/kbhgdb/2023-10-09T16:49:53-07:00https://www.jemoka.com/posts/kbhgeneral_inference/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhgeneral_relativity/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhgenerative_adversarial_network/2023-03-05T23:17:11-08:00https://www.jemoka.com/posts/kbhgenerative_semantics/2022-08-27T23:45:32-07:00https://www.jemoka.com/posts/kbhgenerativity/2023-02-25T23:04:37-08:00https://www.jemoka.com/posts/kbhgeneric/2023-10-23T13:17:19-07:00https://www.jemoka.com/posts/kbhgenetic_algorithum/2022-06-24T00:34:53-07:00https://www.jemoka.com/posts/kbhgenetic_policy_search/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhgenslms-1/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhgenslms/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhgeometric_brownian_motion/2022-10-14T14:08:23-07:00https://www.jemoka.com/posts/kbhgeometric_random_variable/2023-10-16T16:34:18-07:00https://www.jemoka.com/posts/kbhgeometric_multplicity/2023-03-22T14:02:07-07:00https://www.jemoka.com/posts/kbhgetting_started_with_pytorch/2022-11-29T22:42:04-08:00https://www.jemoka.com/posts/kbhgolden_gate_bridge/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhgood_restaurants_in_the_bay_area/2023-06-15T11:57:34-07:00https://www.jemoka.com/posts/kbhgoogle_nerd_snipe/2022-10-01T17:52:29-07:00https://www.jemoka.com/posts/kbhgorup/2022-08-27T08:39:29-07:00https://www.jemoka.com/posts/kbhgram_schmidt/2023-04-26T00:03:18-07:00https://www.jemoka.com/posts/kbhgrammar/2023-03-03T15:36:02-08:00https://www.jemoka.com/posts/kbhgravitational_entanglement/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhgravitational_potential_energy/2023-03-05T20:26:53-08:00https://www.jemoka.com/posts/kbhgreat_depression/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhgreatest_common_divisor/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhgreedy_programming/2022-05-04T18:21:19-07:00https://www.jemoka.com/posts/kbhgreenswing_april_checkin/2022-04-17T16:18:17-07:00https://www.jemoka.com/posts/kbhgregarious_abstract/2022-06-22T23:25:11-07:00https://www.jemoka.com/posts/kbhgrid_search/2022-06-26T10:25:12-07:00https://www.jemoka.com/posts/kbhgroup/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhgroup_theory_index/2023-09-29T11:23:35-07:00https://www.jemoka.com/posts/kbhgrouping/2023-10-01T22:08:54-07:00https://www.jemoka.com/tags/guide/2022-11-29T22:42:04-08:00https://www.jemoka.com/posts/kbhguilded_age/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhkolobov_2018/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhguo_2021/2022-06-25T11:50:47-07:00https://www.jemoka.com/posts/kbhgus/2024-03-01T11:27:29-08:00https://www.jemoka.com/posts/kbh5_fluoropyrimidine_maybe_inactivated_by_gut_microbiome/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhh4/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhhansen/2024-01-27T23:59:53-08:00https://www.jemoka.com/posts/kbhhaplmmune/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhharmonic_mean/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhharms_in_classification/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhheap/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhheap_allocator/2023-11-27T14:51:05-08:00https://www.jemoka.com/posts/kbhheat_equation/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhhello_internet/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhherber_hoover/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhheteroskedastic/2022-12-03T23:08:07-08:00https://www.jemoka.com/posts/kbhhidden_markov_model/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhhierarchical_multi_label_clsf_for_vaccine/2024-02-27T11:58:42-08:00https://www.jemoka.com/posts/kbhhigh_chemical_activity/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbhhindsight_optimization/2024-02-20T10:19:26-08:00https://www.jemoka.com/posts/kbhdecision_making_history/2023-09-28T10:20:46-07:00https://www.jemoka.com/posts/kbhhistory_readings_index/2022-05-29T19:48:27-07:00https://www.jemoka.com/posts/kbhhomestead_act/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhhomogeneity/2022-11-14T23:45:36-08:00https://www.jemoka.com/posts/kbhhomset/2023-02-25T23:04:37-08:00https://www.jemoka.com/posts/kbhhonore_s_statistic/2022-09-03T21:08:40-07:00https://www.jemoka.com/posts/kbhhoover_dam/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhhooverviles/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhhopfield_networks/2023-03-08T14:19:50-08:00https://www.jemoka.com/posts/kbhhoujun_liu/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhresearch_index/2024-01-28T23:50:19-08:00https://www.jemoka.com/posts/kbhhow_did_economists_get_it_so_wrong/2022-08-25T21:09:40-07:00https://www.jemoka.com/posts/kbhhsbi/2024-01-18T10:15:38-08:00https://www.jemoka.com/posts/kbhhsvi/2024-01-27T23:59:53-08:00https://www.jemoka.com/posts/kbhhybplan/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhhypothesis_testing/2023-11-08T16:46:31-08:00https://www.jemoka.com/posts/kbhidentity/2022-08-26T20:31:40-07:00https://www.jemoka.com/posts/kbhactivism_during_the_1970s/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhequivalence/2022-08-30T14:23:25-07:00https://www.jemoka.com/posts/kbhimmoral_v_structure/2023-10-12T14:52:11-07:00https://www.jemoka.com/posts/kbhimmunogen_design-1/2023-03-27T10:54:24-07:00https://www.jemoka.com/posts/kbhimmunogen_design/2023-03-27T10:54:24-07:00https://www.jemoka.com/posts/kbhimperialism/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhinbox/2023-02-26T20:31:50-08:00https://www.jemoka.com/posts/kbhsum_rule_of_counting/2023-10-01T22:08:54-07:00https://www.jemoka.com/posts/kbhindependently_and_identically_distributed/2023-12-06T15:27:17-08:00https://www.jemoka.com/tags/index/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhindex_index/2022-06-22T22:22:13-07:00https://www.jemoka.com/posts/kbhinductors_in_circuits/2023-04-19T11:49:49-07:00https://www.jemoka.com/posts/kbhinference/2023-10-23T16:49:33-07:00https://www.jemoka.com/posts/kbhinference_for_gaussian_models/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhinflectional_words/2022-08-27T17:10:35-07:00https://www.jemoka.com/posts/kbhinformation_retrival/2024-02-01T14:24:47-08:00https://www.jemoka.com/posts/kbhinformation_theory/2023-03-05T23:17:11-08:00https://www.jemoka.com/posts/kbhiu/2022-06-24T23:34:46-07:00https://www.jemoka.com/posts/kbhinitial_value_problems/2024-01-24T11:23:54-08:00https://www.jemoka.com/posts/kbhinjectivity/2022-11-14T23:45:36-08:00https://www.jemoka.com/posts/kbhinjectivity_implies_that_null_space_is_0/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhinner_product/2024-03-04T14:20:51-08:00https://www.jemoka.com/posts/kbhinsertion_sort/2022-12-23T11:30:23-08:00https://www.jemoka.com/posts/kbhinteger/2023-10-03T17:26:55-07:00https://www.jemoka.com/posts/kbhintegrating_factor/2022-09-02T23:06:31-07:00https://www.jemoka.com/posts/risk_apetite_preso/2022-12-04T21:39:41-08:00https://www.jemoka.com/posts/kbhinteraction_uncertainty/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhinteractive_agent/2023-11-09T12:00:18-08:00https://www.jemoka.com/posts/kbhintersession_2023/2023-01-03T11:19:33-08:00https://www.jemoka.com/posts/kbhinvariant_subspace/2023-02-11T18:38:34-08:00https://www.jemoka.com/posts/kbhinverses/2022-08-26T20:31:40-07:00https://www.jemoka.com/posts/kbhinverse_transform_sampling/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhinvertability/2023-01-12T09:12:11-08:00https://www.jemoka.com/posts/kbhinverted_index/2024-02-28T10:28:39-08:00https://www.jemoka.com/posts/kbhiob/2022-12-23T14:17:31-08:00https://www.jemoka.com/posts/kbhiptv/2023-02-26T20:31:50-08:00https://www.jemoka.com/posts/kbhirrational_number/2022-08-26T23:09:36-07:00https://www.jemoka.com/posts/kbhis_despot/2024-02-25T15:15:43-08:00https://www.jemoka.com/posts/kbhisomorphism/2023-01-12T10:11:08-08:00https://www.jemoka.com/posts/kbhistudio_meeting_notes/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhitem_response_theory/2023-10-26T10:40:35-07:00https://www.jemoka.com/posts/kbhito_intergral/2022-10-14T14:08:23-07:00https://www.jemoka.com/2024-03-18T22:44:04-07:00https://www.jemoka.com/posts/kbhjohn_corso/2023-09-20T18:14:07-07:00https://www.jemoka.com/posts/kbhjoint_probability_distribution/2023-11-03T11:22:40-07:00https://www.jemoka.com/posts/kbhjokes/2023-04-08T09:22:37-07:00https://www.jemoka.com/posts/kbhjonell_2021/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhjsj/2024-02-26T10:36:33-08:00https://www.jemoka.com/posts/kbhka_chava/2023-02-26T20:31:50-08:00https://www.jemoka.com/posts/kbhkepler_s_laws_of_planetary_motion/2022-05-02T10:28:52-07:00https://www.jemoka.com/posts/kbhkernel_smoothing/2023-11-03T11:22:40-07:00https://www.jemoka.com/posts/kbhkeynsian_politics/2022-08-25T20:58:47-07:00https://www.jemoka.com/posts/kbhkeys/2022-12-23T11:30:23-08:00https://www.jemoka.com/posts/kbhkirchoff_s_laws/2023-03-21T22:17:34-07:00https://www.jemoka.com/posts/kbhkl_divergence/2023-03-05T23:17:11-08:00https://www.jemoka.com/posts/kbhkla/2022-05-29T19:48:27-07:00https://www.jemoka.com/posts/kbhknowledge_editing/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhknowledgebase_testing/2024-02-02T11:32:31-08:00https://www.jemoka.com/posts/kbhkolmogorov_smirnov_test/2022-07-12T15:56:06-07:00https://www.jemoka.com/posts/kbhl_infty/2023-10-19T10:22:45-07:00https://www.jemoka.com/posts/kbhlagrangian_mechanics/2022-10-25T00:35:07-07:00https://www.jemoka.com/posts/kbhlaguarta_2021/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhlambek_calculus/2023-02-25T23:04:37-08:00https://www.jemoka.com/posts/kbhlanguage/2023-03-03T15:36:02-08:00https://www.jemoka.com/posts/kbhlanguage_agents/2024-01-12T11:21:05-08:00https://www.jemoka.com/posts/kbhlanguage_information_index/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhlaplae/2022-12-13T14:32:50-08:00https://www.jemoka.com/posts/kbhlaw_of_cosines/2022-09-13T14:47:30-07:00https://www.jemoka.com/posts/kbhlaw_of_large_numbers/2023-10-03T10:22:01-07:00https://www.jemoka.com/posts/kbhloo/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhlegacy_of_mccarthyism/2022-04-25T11:59:35-07:00https://www.jemoka.com/posts/kbhlemmatization/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhlength_of_basis_doesn_t_depend_on_basis/2022-10-24T15:30:43-07:00https://www.jemoka.com/posts/kbhletsdrive/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhlevel_set/2024-01-12T11:21:05-08:00https://www.jemoka.com/posts/kbhlexicalization_hypothesis/2022-08-27T23:45:32-07:00https://www.jemoka.com/posts/kbhlexicon/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhliberal_center/2022-06-07T13:38:12-07:00https://www.jemoka.com/posts/kbhlikelyhood/2023-11-16T10:19:53-08:00https://www.jemoka.com/posts/kbhlina/2022-08-26T20:31:40-07:00https://www.jemoka.com/posts/kbhlindsay_2021/2022-06-24T23:34:46-07:00https://www.jemoka.com/posts/kbhlinear_algea/2023-03-02T10:38:30-08:00https://www.jemoka.com/posts/kbhlinear_algebra_errors-1/2023-02-13T09:02:49-08:00https://www.jemoka.com/posts/kbhlinear_algebra_errors/2023-05-18T15:04:47-07:00https://www.jemoka.com/posts/kbhlinear_algebra_index/2023-06-21T16:34:47+08:00https://www.jemoka.com/posts/kbhlinear_combination/2022-10-01T17:52:29-07:00https://www.jemoka.com/posts/kbhlinear_constant_coefficient_equation/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhlinear_dependence_lemma/2022-10-11T15:00:54-07:00https://www.jemoka.com/posts/kbhlinear_functional/2023-04-26T00:03:18-07:00https://www.jemoka.com/posts/kbhlinear_gaussian_model/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhlinear_independence/2022-10-16T12:26:23-07:00https://www.jemoka.com/posts/kbhlinear_map/2022-12-04T21:39:41-08:00https://www.jemoka.com/posts/kbhlinear_non_seperable_equation/2022-09-13T13:51:27-07:00https://www.jemoka.com/posts/kbhlinear_quadratic_regulator/2024-02-20T10:19:26-08:00https://www.jemoka.com/posts/kbhode_linearilzation/2024-02-08T00:22:51-08:00https://www.jemoka.com/posts/linearity_tests_preso/2022-10-30T00:25:57-07:00https://www.jemoka.com/posts/kbhlinked_files/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhliquidnet/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhlist/2022-08-30T14:33:23-07:00https://www.jemoka.com/posts/kbhlist_of_american_presidents/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhlittle_endian/2023-10-20T12:43:40-07:00https://www.jemoka.com/posts/kbhliving/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbhllama/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbhdora/2024-02-13T15:44:04-08:00https://www.jemoka.com/posts/kbhllms_are_text_matchers/2023-09-11T17:06:48-07:00https://www.jemoka.com/posts/kbhlm_alignment/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhlocal_policy_search/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhlog_laws/2023-11-15T11:23:18-08:00https://www.jemoka.com/posts/kbhlogan_s_team_check_in/2022-05-02T10:28:52-07:00https://www.jemoka.com/posts/kbhlogistic_equations/2024-01-12T11:21:05-08:00https://www.jemoka.com/posts/kbhlogistic_regression/2024-02-28T10:28:39-08:00https://www.jemoka.com/posts/kbhloop_invariant/2022-12-23T11:30:23-08:00https://www.jemoka.com/posts/kbhlottery/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhltrdp/2024-02-13T10:20:04-08:00https://www.jemoka.com/posts/kbhluccage/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhluz_2021/2022-06-26T10:25:12-07:00https://www.jemoka.com/posts/kbhlyrics_ping/2022-08-11T16:44:41-07:00https://www.jemoka.com/posts/kbhlyrics_laws/2023-10-31T10:20:58-07:00https://www.jemoka.com/posts/kbhlyu_2018/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhmachine_learning/2023-11-27T17:00:44-08:00https://www.jemoka.com/posts/kbhmacroaverage/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhmagnetism/2023-04-19T11:17:16-07:00https://www.jemoka.com/posts/kbhmahajan_2021/2022-06-25T12:18:38-07:00https://www.jemoka.com/posts/kbhmahatma_ghandi/2022-05-09T13:20:16-07:00https://www.jemoka.com/posts/kbhmake_models_go_brrr/2023-10-23T13:17:19-07:00https://www.jemoka.com/posts/kbhmap_restriction_operator/2023-02-11T18:38:34-08:00https://www.jemoka.com/posts/kbhmapreduce/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhmarkov_chain/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhmarkov_decision_process/2023-11-02T17:00:07-07:00https://www.jemoka.com/posts/kbhmarkov_equivalence_classes/2023-10-12T14:52:11-07:00https://www.jemoka.com/posts/kbhmarkov_game/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhmarkovian_process/2022-09-07T11:02:32-07:00https://www.jemoka.com/posts/kbhmartin_luther_king/2022-05-09T13:20:16-07:00https://www.jemoka.com/posts/kbhmartinc_2021/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhmartingale_model/2022-09-07T11:25:50-07:00https://www.jemoka.com/posts/kbhmath5_how/2024-02-27T13:50:55-08:00https://www.jemoka.com/posts/kbhmatricies/2023-03-20T23:37:00-07:00https://www.jemoka.com/posts/kbhmatrix_adjectives/2023-05-04T16:10:16-07:00https://www.jemoka.com/posts/kbhmatrix_exponentiation/2024-01-29T11:26:06-08:00https://www.jemoka.com/posts/kbhmatrix_multiplication/2023-01-11T00:22:15-08:00https://www.jemoka.com/posts/kbhmaximal_interval/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhmaximum_a_posteriori_estimate/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhmaximum_likelihood_parameter_learning/2023-11-27T14:51:05-08:00https://www.jemoka.com/posts/kbhmaxq/2024-02-13T10:20:04-08:00https://www.jemoka.com/posts/kbhmbp/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhmcvi/2024-01-30T20:06:03-08:00https://www.jemoka.com/posts/kbhmeal_replacement/2023-02-26T20:31:50-08:00https://www.jemoka.com/posts/kbhmean_average_precision/2024-02-28T10:28:39-08:00https://www.jemoka.com/posts/kbhmedblindtuner/2024-02-27T10:06:32-08:00https://www.jemoka.com/posts/kbhmedical_dialogue_generation/2024-02-27T10:22:48-08:00https://www.jemoka.com/posts/kbhmedical_knowledge_extraction/2024-02-27T10:06:32-08:00https://www.jemoka.com/posts/kbhmeghanani_2021/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhmel_scale/2023-10-20T16:51:21-07:00https://www.jemoka.com/posts/kbhmemory/2023-10-11T11:20:31-07:00https://www.jemoka.com/posts/kbhmemory_allocation/2023-11-27T14:51:05-08:00https://www.jemoka.com/posts/kbhmencius_philosophy/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhmesoscopic_region/2024-02-07T11:24:17-08:00https://www.jemoka.com/posts/kbhmetabolism/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbhmethods/2023-11-29T11:20:39-08:00https://www.jemoka.com/posts/kbhmfa_disfluency_measurement/2022-07-12T15:56:50-07:00https://www.jemoka.com/posts/kbhmfa_performance_statistics/2022-08-17T22:38:03-07:00https://www.jemoka.com/posts/kbhmia_tavares/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhmicah_brown/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhmilton_freedman/2022-08-25T20:58:47-07:00https://www.jemoka.com/posts/kbhmmse/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhminimum_edit_distance/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhminimum_spanning_tree/2022-05-05T10:41:32-07:00https://www.jemoka.com/posts/kbhminimum_user_base_requirements_for_coveather/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhminimum_wage/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhmisc_financial_market_questions/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhml_drug_discovery/2023-03-27T10:07:03-07:00https://www.jemoka.com/posts/kbhmlib/2023-08-11T11:57:33-07:00https://www.jemoka.com/posts/kbhmlk_and_malcom_x_reading/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhmodal/2023-09-19T19:47:37-07:00https://www.jemoka.com/posts/kbhmodalization/2022-06-23T23:41:34-07:00https://www.jemoka.com/posts/kbhmodel_bae/2023-11-06T16:42:28-08:00https://www.jemoka.com/posts/kbhmodel_evaluation/2024-01-20T11:35:00-08:00https://www.jemoka.com/posts/kbhmodel_based_reinforcement_learning/2023-11-10T11:46:12-08:00https://www.jemoka.com/posts/kbhmodel_free_reinforcement_learning/2024-02-08T10:17:20-08:00https://www.jemoka.com/posts/kbhmodeling/2023-11-14T10:59:27-08:00https://www.jemoka.com/posts/kbhmodern_os/2024-03-09T13:37:34-08:00https://www.jemoka.com/posts/kbhmodular_arithmetic/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhmolecular_drug_resistance/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhmomdp/2024-02-25T15:15:43-08:00https://www.jemoka.com/posts/kbhmonetarist_theory/2022-08-25T20:58:47-07:00https://www.jemoka.com/posts/kbhmonitor_pattern/2024-02-14T14:21:39-08:00https://www.jemoka.com/posts/kbhmonte_carlo_tree_search/2024-01-30T10:40:31-08:00https://www.jemoka.com/posts/kbhmontomery_bus_boycott/2022-05-09T13:20:16-07:00https://www.jemoka.com/posts/kbhmorpheme/2024-01-12T11:21:05-08:00https://www.jemoka.com/posts/kbhmorphism/2023-02-25T23:04:37-08:00https://www.jemoka.com/posts/kbhmorphological_parsing/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhmulti_lstm_for_clinical_report_generation/2024-02-27T11:58:42-08:00https://www.jemoka.com/posts/kbhmultiagent_reasoning/2023-11-30T10:20:10-08:00https://www.jemoka.com/posts/kbhmultimodal_ai_for_real_world_signals/2024-02-27T09:50:26-08:00https://www.jemoka.com/posts/kbhmultinomial_coefficient/2023-10-20T16:51:21-07:00https://www.jemoka.com/posts/kbhmultiple_instance_learning/2024-02-26T12:02:19-08:00https://www.jemoka.com/posts/kbhmultiplicative_identity/2022-08-26T15:07:17-07:00https://www.jemoka.com/posts/kbhmultiplying/2022-08-27T10:50:07-07:00https://www.jemoka.com/posts/kbhmultiprocessing/2024-02-16T14:21:52-08:00https://www.jemoka.com/posts/kbhmultithreading/2024-02-27T09:50:26-08:00https://www.jemoka.com/posts/kbhmutual_information/2023-03-05T23:17:11-08:00https://www.jemoka.com/posts/kbhmutually_exclusive/2023-10-06T16:57:26-07:00https://www.jemoka.com/posts/kbhmy_day/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhn_grams/2024-02-28T10:28:39-08:00https://www.jemoka.com/posts/kbhnacc/2022-06-16T20:06:51-07:00https://www.jemoka.com/posts/kbhnaive_bayes/2023-11-27T17:00:44-08:00https://www.jemoka.com/posts/kbhnational_banking_act/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhnatural_numbers/2022-08-26T20:36:54-07:00https://www.jemoka.com/posts/kbhnatural_semantic_metalanguage/2022-08-27T23:45:32-07:00https://www.jemoka.com/posts/kbhnatural_transformations/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhnbbo/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhnebula/2024-01-09T12:16:02-08:00https://www.jemoka.com/posts/kbhneedfinding/2023-04-29T15:36:35-07:00https://www.jemoka.com/posts/kbhnegative_binomial_distribution/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhneoclassical_economics/2022-08-25T20:58:47-07:00https://www.jemoka.com/posts/kbhner_tagging/2024-02-14T11:23:36-08:00https://www.jemoka.com/posts/kbhneural_networks/2024-02-13T23:59:46-08:00https://www.jemoka.com/posts/kbhneuroscience_and_ai/2023-03-20T14:43:38-07:00https://www.jemoka.com/posts/kbhneutral_stability/2022-11-26T00:54:35-08:00https://www.jemoka.com/posts/kbhnew_american_south/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhnew_deal/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhnew_right/2022-06-07T13:38:12-07:00https://www.jemoka.com/posts/kbhnewton_s_first_law_of_motion/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhnewton_s_law_of_cooling/2024-01-12T11:21:05-08:00https://www.jemoka.com/posts/kbhnewton_s_method/2024-02-14T11:23:36-08:00https://www.jemoka.com/posts/kbhnlp/2023-12-04T16:51:01-08:00https://www.jemoka.com/posts/kbhnlp_semantics_timeline/2024-03-16T23:10:28-07:00https://www.jemoka.com/posts/kbhchomsky/2022-08-23T11:06:27-07:00https://www.jemoka.com/posts/kbhnon_homogeneous_linear_differential_equation/2024-02-07T11:24:17-08:00https://www.jemoka.com/posts/kbhnon_intersecting_graphs/2022-10-14T14:08:23-07:00https://www.jemoka.com/posts/kbhnon_linear_ode/2024-02-14T11:23:36-08:00https://www.jemoka.com/posts/kbhnon_linear_systems/2022-11-04T15:10:26-07:00https://www.jemoka.com/posts/kbhnon_parametric_learning/2023-10-10T10:20:50-07:00https://www.jemoka.com/posts/kbhnon_pathological_matricies/2023-08-11T11:57:33-07:00https://www.jemoka.com/posts/kbhnonsingular_matricies/2022-09-02T21:42:58-07:00https://www.jemoka.com/posts/kbhnonviolence_movement/2022-05-09T13:20:16-07:00https://www.jemoka.com/posts/kbhnorm/2023-04-08T23:00:43-07:00https://www.jemoka.com/posts/kbhnormal_distribution/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhnormal_random_variable/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhnorman_an_epic_tale_in_n_parts/2022-05-10T21:05:54-07:00https://www.jemoka.com/posts/kbhnsm_proposal/2022-08-28T23:06:33-07:00https://www.jemoka.com/tags/ntj/2022-09-12T12:54:42-07:00https://www.jemoka.com/posts/kbhnueva_courses_index/2023-02-12T15:19:43-08:00https://www.jemoka.com/posts/kbhnull_space/2022-11-18T13:13:30-08:00https://www.jemoka.com/posts/kbhnumber/2022-08-26T23:09:36-07:00https://www.jemoka.com/posts/kbhnumerical_approximation_schemes/2024-02-16T11:20:46-08:00https://www.jemoka.com/posts/kbhnumerical_cantileaver_simulations-1/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhnumerical_cantileaver_simulations/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhnus_econ320_capm_problem_set/2022-10-29T16:57:58-07:00https://www.jemoka.com/posts/kbhnus_econ320_currency_arbitrage/2022-12-08T21:46:35-08:00https://www.jemoka.com/posts/kbhnus_econ320_risk_appetite/2022-12-04T21:39:41-08:00https://www.jemoka.com/posts/kbhnus_econ320_linearity_tests/2022-11-02T10:16:31-07:00https://www.jemoka.com/posts/kbhnus_econ320_stochastic_integration/2022-09-26T00:20:34-07:00https://www.jemoka.com/posts/kbhnus_econ320_volatility_hedging/2022-10-16T12:26:23-07:00https://www.jemoka.com/posts/kbhnus_eng401_gift_1/2022-12-11T23:08:07-08:00https://www.jemoka.com/posts/kbhnus_eng401_gift_0/2022-12-11T23:06:39-08:00https://www.jemoka.com/posts/kbhnus_eng401_gift_5/2022-12-11T23:05:58-08:00https://www.jemoka.com/posts/kbhnus_eng401_film_analysis/2023-03-14T13:43:38-07:00https://www.jemoka.com/posts/kbhnus_eng401_film_analysis_outline/2023-03-13T19:10:08-07:00https://www.jemoka.com/posts/kbhnus_eng401_gift_2/2022-12-11T23:05:58-08:00https://www.jemoka.com/posts/kbhnus_eng401_gift_utility/2022-12-11T23:10:58-08:00https://www.jemoka.com/posts/kbhi_tituba_essay_planning/2022-09-19T12:28:12-07:00https://www.jemoka.com/posts/kbhnus_eng401_gift_6/2022-12-11T23:05:58-08:00https://www.jemoka.com/posts/kbhnus_eng401_gift_3/2022-12-11T23:05:58-08:00https://www.jemoka.com/posts/kbhnus_eng401_racialization_outline/2023-04-08T23:00:43-07:00https://www.jemoka.com/posts/kbhnus_eng401_gift_4/2022-12-11T23:05:58-08:00https://www.jemoka.com/posts/kbhnus_math530_1_c_proof_preso/2022-10-07T00:44:00-07:00https://www.jemoka.com/posts/kbhnus_math530_2_c_problem_17/2022-11-04T14:01:46-07:00https://www.jemoka.com/posts/kbhnus_math530_3_b_problem_20-1/2022-11-18T13:13:30-08:00https://www.jemoka.com/posts/kbhnus_math530_3_b_problem_20/2022-11-29T22:42:04-08:00https://www.jemoka.com/posts/kbhnus_math530_5_a_and_discussion/2023-02-15T08:49:38-08:00https://www.jemoka.com/posts/kbhnus_math530_5_a_problem_14/2023-02-27T10:18:22-08:00https://www.jemoka.com/posts/kbhnus_math530_5_a_problem_35_36/2023-02-27T22:55:40-08:00https://www.jemoka.com/posts/kbhnus_math530_5_c_problem_7/2023-04-04T11:34:33-07:00https://www.jemoka.com/posts/kbhnus_math530_changing_bases/2023-03-16T13:58:47-07:00https://www.jemoka.com/posts/kbhnus_math530_geometric_intepretations/2022-09-07T00:15:39-07:00https://www.jemoka.com/posts/kbhnus_math530_geometric_multiplicity/2023-04-08T09:22:37-07:00https://www.jemoka.com/posts/kbhnus_math530_homework_index/2023-05-04T16:10:16-07:00https://www.jemoka.com/posts/kbhnus_math530_linear_vehicles/2022-09-09T23:36:04-07:00https://www.jemoka.com/posts/kbhnus_math530_matrix_adjectives/2023-05-10T21:41:46-07:00https://www.jemoka.com/posts/kbhnus_math530_plane_and_1_b/2022-09-15T22:11:01-07:00https://www.jemoka.com/posts/kbhnus_math530_similar_to_diagonal/2023-05-12T00:42:25-07:00https://www.jemoka.com/posts/kbhnus_math530_solving_systems/2022-09-07T00:15:39-07:00https://www.jemoka.com/posts/kbhnus_math530_some_6_a_problems/2023-04-16T10:57:27-07:00https://www.jemoka.com/posts/kbhnus_math530_some_matrix_manipulation/2022-08-30T21:21:17-07:00https://www.jemoka.com/posts/kbhnus_math570_circuts/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhnus_math570_finance/2022-12-17T22:39:27-08:00https://www.jemoka.com/posts/kbhnus_math570_problem_set_1/2022-09-17T16:42:48-07:00https://www.jemoka.com/posts/kbhnus_math570_problem_set_2/2022-10-09T19:23:18-07:00https://www.jemoka.com/posts/kbhnus_math570_research_question_1/2022-09-17T16:42:48-07:00https://www.jemoka.com/posts/kbhnus_math570_supply_demand/2022-11-27T22:32:43-08:00https://www.jemoka.com/posts/kbhnus_mus150_critical_listening/2023-01-09T21:50:29-08:00https://www.jemoka.com/posts/kbhnus_span502_plastico_biodegrable/2023-03-14T10:16:14-07:00https://www.jemoka.com/posts/kbhnus_span502_tarea_2/2022-08-30T21:21:17-07:00https://www.jemoka.com/posts/kbhnus_span502_tarea_4/2022-09-13T22:27:22-07:00https://www.jemoka.com/posts/kbhnus_span502_vocab/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhobjects/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhobserve_act_cycle/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhof_our_spiritual_strivings/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhohm_s_law/2023-04-25T22:54:49-07:00https://www.jemoka.com/posts/kbhproductivity/2024-02-05T11:23:43-08:00https://www.jemoka.com/posts/kbhone_shot_deformation/2022-12-12T15:20:22-08:00https://www.jemoka.com/posts/kbhonline_m/2023-11-02T17:00:07-07:00https://www.jemoka.com/posts/kbhonline_planning/2024-01-09T12:16:02-08:00https://www.jemoka.com/posts/kbhonline_pomdp_methods/2024-01-26T11:24:29-08:00https://www.jemoka.com/posts/kbhopen_voice_brain_model/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhopensmile/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhos_index/2024-03-13T13:57:37-07:00https://www.jemoka.com/posts/kbhoperation/2022-08-26T15:07:17-07:00https://www.jemoka.com/posts/kbhoperation_linebacker/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhoperator/2023-03-03T21:04:56-08:00https://www.jemoka.com/posts/kbhopsins/2023-03-08T14:16:45-08:00https://www.jemoka.com/posts/kbhoptimal_exploration/2023-11-01T16:45:09-07:00https://www.jemoka.com/posts/kbhoptimal_stopping_problem/2023-11-02T10:20:22-07:00https://www.jemoka.com/posts/kbhoptimization/2023-12-13T13:36:16-08:00https://www.jemoka.com/posts/kbhoptimizing_spark/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhoptions/2022-10-09T19:23:18-07:00https://www.jemoka.com/posts/kbhoption/2024-02-13T10:20:04-08:00https://www.jemoka.com/posts/kbhoptogenetics/2023-03-08T14:16:45-08:00https://www.jemoka.com/posts/kbhoral_lexical_retrival/2022-06-23T23:41:34-07:00https://www.jemoka.com/posts/kbhordinary_differential_equations/2024-01-27T20:26:16-08:00https://www.jemoka.com/posts/kbhrise_of_american_conservatism/2022-05-29T19:48:27-07:00https://www.jemoka.com/posts/kbhorthogonal/2023-04-08T23:00:43-07:00https://www.jemoka.com/posts/kbhorthonormal/2023-04-26T00:03:18-07:00https://www.jemoka.com/posts/kbhorthonormal_basis/2023-04-26T00:03:18-07:00https://www.jemoka.com/posts/kbhotc_markets/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhoutcome_uncertainty/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhoverfitting/2023-08-11T11:57:33-07:00https://www.jemoka.com/posts/kbhpolynomial_operator/2023-03-03T21:04:56-08:00https://www.jemoka.com/posts/kbhpace/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhpacific_railroad_act/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhpagin_q/2024-03-02T17:53:00-08:00https://www.jemoka.com/posts/kbhpapyrus/2023-03-03T22:44:51-08:00https://www.jemoka.com/posts/kbhparameter/2023-11-14T10:59:27-08:00https://www.jemoka.com/posts/kbhparameter_learning/2023-11-15T11:23:18-08:00https://www.jemoka.com/posts/kbhparkingson_s_classification_with_eeg/2024-02-27T10:22:48-08:00https://www.jemoka.com/posts/kbhparry/2024-03-01T11:27:29-08:00https://www.jemoka.com/posts/kbhpartial_differential_equations/2024-01-08T11:23:11-08:00https://www.jemoka.com/posts/kbhpartially_observable_markov_decision_process/2024-01-09T12:16:02-08:00https://www.jemoka.com/posts/kbhpartially_observable_markov_game/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhparvin_2020/2022-06-25T11:50:47-07:00https://www.jemoka.com/posts/kbhpatient_risk_prediction/2024-02-26T10:36:33-08:00https://www.jemoka.com/posts/kbhpcp_april_checkin/2022-04-17T16:18:17-07:00https://www.jemoka.com/posts/kbhpeft/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhpegasus/2024-02-26T13:48:45-08:00https://www.jemoka.com/posts/kbhpermits_model/2024-02-27T09:50:26-08:00https://www.jemoka.com/posts/kbhpermittivity_of_free_space/2023-02-12T15:19:43-08:00https://www.jemoka.com/posts/kbhpermutation/2023-10-01T22:08:54-07:00https://www.jemoka.com/posts/kbhperplexity/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhpet/2023-02-27T10:18:04-08:00https://www.jemoka.com/posts/kbhpetri_dish/2024-01-12T11:21:05-08:00https://www.jemoka.com/posts/kbhpga/2024-02-02T11:32:31-08:00https://www.jemoka.com/posts/kbhphase_line/2024-01-17T00:44:47-08:00https://www.jemoka.com/posts/kbhphysical_qubits/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhphysics/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhpineau_2006/2024-01-17T00:44:47-08:00https://www.jemoka.com/posts/kbhpipe/2024-02-02T14:21:08-08:00https://www.jemoka.com/posts/kbhpitch_a_project/2022-09-01T22:41:54-07:00https://www.jemoka.com/posts/kbhpsc/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhpkm/2022-11-18T13:13:30-08:00https://www.jemoka.com/posts/kbhplanning/2023-09-28T10:20:46-07:00https://www.jemoka.com/posts/kbhpoint_selection/2023-11-29T11:20:39-08:00https://www.jemoka.com/posts/kbhpoint_based_value_iteration/2024-01-27T20:26:16-08:00https://www.jemoka.com/posts/kbhpointer/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhprobability_of_k_in_x_time/2023-11-14T10:59:27-08:00https://www.jemoka.com/posts/kbhpolicy/2023-10-19T10:22:45-07:00https://www.jemoka.com/posts/kbhpolicy_evaluation/2023-11-03T16:40:48-07:00https://www.jemoka.com/posts/kbhpolicy_gradient/2023-11-02T17:00:07-07:00https://www.jemoka.com/posts/kbhpolicy_iteration/2023-11-01T16:45:09-07:00https://www.jemoka.com/posts/kbhpolicy_optimization/2023-10-31T12:46:14-07:00https://www.jemoka.com/posts/kbhpolio/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhpolynomial/2023-03-03T21:04:56-08:00https://www.jemoka.com/posts/kbhpomcp/2024-01-27T23:59:53-08:00https://www.jemoka.com/posts/kbhpomcpow/2024-01-30T10:40:31-08:00https://www.jemoka.com/posts/kbhpomdp_approximation/2024-01-18T10:15:38-08:00https://www.jemoka.com/posts/kbhpomdp_lite/2024-02-25T15:46:27-08:00https://www.jemoka.com/posts/kbhpomdps_index/2024-02-22T10:15:57-08:00https://www.jemoka.com/posts/kbhpos_tagging/2024-02-12T11:25:19-08:00https://www.jemoka.com/posts/2024-03-18T22:44:04-07:00https://www.jemoka.com/posts/kbhpower_math/2022-08-27T09:54:42-07:00https://www.jemoka.com/posts/kbhpower_series_o/2024-02-12T11:25:19-08:00https://www.jemoka.com/posts/kbhpower_series/2024-02-12T11:25:19-08:00https://www.jemoka.com/posts/kbhpower_utility/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhpreemption/2024-02-27T09:50:26-08:00https://www.jemoka.com/posts/kbhpretraining_data/2024-02-01T14:24:47-08:00https://www.jemoka.com/posts/kbhpretraining_long_transformers/2023-11-02T11:50:25-07:00https://www.jemoka.com/posts/kbhprice/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhprime/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhprime_factorization/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhprinciple_of_induction/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhc_basic_operations/2023-09-29T11:23:35-07:00https://www.jemoka.com/posts/kbhprivacy/2024-03-13T11:25:11-07:00https://www.jemoka.com/posts/kbhprobability/2023-10-06T16:57:26-07:00https://www.jemoka.com/posts/kbhprobability_distributions/2023-10-23T16:49:33-07:00https://www.jemoka.com/posts/kbhprobability_mass_function/2023-10-09T16:49:53-07:00https://www.jemoka.com/posts/kbhprobablistic_model/2023-10-23T16:49:33-07:00https://www.jemoka.com/posts/kbhproblem_with_gravity/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhprocess_control_block/2024-02-21T14:22:23-08:00https://www.jemoka.com/posts/kbhproduct_of_linear_maps/2022-11-08T15:26:32-08:00https://www.jemoka.com/posts/kbhproduct_of_vector_spaces/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhproduct_summation_map/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhproduction_index/2023-10-23T10:30:41-07:00https://www.jemoka.com/posts/kbhproductivity_starter_pack/2023-03-12T10:30:03-07:00https://www.jemoka.com/posts/kbhproducts_and_quotients_the_intuition/2023-01-23T09:43:58-08:00https://www.jemoka.com/posts/kbhprof_xin_liu/2022-06-13T22:05:51-07:00https://www.jemoka.com/posts/kbhloop_of_thoughts/2024-03-14T14:31:16-07:00https://www.jemoka.com/posts/kbhproject80/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhproject80_abstract/2022-06-22T23:25:11-07:00https://www.jemoka.com/posts/kbhprojects/2024-01-30T16:52:00-08:00https://www.jemoka.com/posts/kbhprokateotic_cell/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbhproof/2022-08-26T20:31:40-07:00https://www.jemoka.com/posts/kbhproof_by_induction/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhproof_design_patterns-1/2022-09-27T22:11:29-07:00https://www.jemoka.com/posts/kbhproof_design_patterns/2023-04-08T23:00:43-07:00https://www.jemoka.com/posts/kbhproof_of_work/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhpropaganda/2022-04-17T17:42:52-07:00https://www.jemoka.com/posts/kbhprotease/2023-03-27T10:07:03-07:00https://www.jemoka.com/posts/kbhprotected_group/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhprotons/2023-02-12T15:19:43-08:00https://www.jemoka.com/posts/kbhprototyping/2022-09-07T13:20:23-07:00https://www.jemoka.com/posts/kbhpsc_big_data_workshop_july_2023/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhsu_math53_pset_1/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhpset_2/2024-01-19T10:11:39-08:00https://www.jemoka.com/posts/kbhpset_3/2024-01-26T11:24:29-08:00https://www.jemoka.com/posts/kbhpset_4/2024-02-02T17:24:38-08:00https://www.jemoka.com/posts/kbhpset_5/2024-02-25T10:45:33-08:00https://www.jemoka.com/posts/kbhpset_6/2024-02-25T10:45:33-08:00https://www.jemoka.com/posts/kbhpset_7/2024-02-25T10:45:33-08:00https://www.jemoka.com/posts/kbhpset_8/2024-03-02T17:53:00-08:00https://www.jemoka.com/posts/kbhpset_9/2024-03-09T13:37:34-08:00https://www.jemoka.com/posts/kbhpsycoacoustics/2023-01-03T11:19:33-08:00https://www.jemoka.com/posts/kbhptsd/2023-05-10T21:41:46-07:00https://www.jemoka.com/posts/kbhpwr_notes/2024-01-17T12:24:19-08:00https://www.jemoka.com/posts/kbhpwr1_rba_planning/2024-03-04T14:20:51-08:00https://www.jemoka.com/posts/kbhpwr1_rhetorical_analysis_planning/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhpwr1_texts_in_conversation/2024-02-09T11:20:17-08:00https://www.jemoka.com/posts/kbhqmdp/2024-02-20T10:19:26-08:00https://www.jemoka.com/posts/kbhquality_of_service_harm/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhcorrelation/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhquantum_group_project/2023-01-29T10:52:51-08:00https://www.jemoka.com/posts/kbhquantum_information_theory/2023-03-05T23:17:11-08:00https://www.jemoka.com/posts/kbhquantum_supremecy/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhquantum_theory/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhquantumnlp/2023-02-25T23:04:37-08:00https://www.jemoka.com/posts/kbhqubits/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhquotient_group/2022-12-13T14:48:13-08:00https://www.jemoka.com/posts/kbhquotient_map/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhquotient_operator/2023-02-16T10:06:41-08:00https://www.jemoka.com/posts/kbhquotient_space/2023-01-29T10:52:51-08:00https://www.jemoka.com/posts/kbhr_n_abstract/2022-11-09T22:10:18-08:00https://www.jemoka.com/posts/kbhr_n_meeting_with_angi/2022-05-02T10:28:52-07:00https://www.jemoka.com/posts/kbhraising_e_to_a_matrix/2022-10-09T12:18:43-07:00https://www.jemoka.com/posts/kbhrandom/2022-11-06T21:05:19-08:00https://www.jemoka.com/posts/kbhrandom_variables/2024-03-09T13:37:34-08:00https://www.jemoka.com/posts/kbhrandom_walk/2022-09-07T11:14:44-07:00https://www.jemoka.com/posts/kbhrandom_wol/2022-09-07T11:02:32-07:00https://www.jemoka.com/posts/kbhrandomized_algorithum/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhrandomized_pbvi/2024-01-27T20:26:16-08:00https://www.jemoka.com/posts/kbhperseus/2024-01-27T20:26:16-08:00https://www.jemoka.com/posts/kbhrange/2022-11-18T13:13:30-08:00https://www.jemoka.com/posts/kbhranked_information_retrieval/2024-02-28T10:28:39-08:00https://www.jemoka.com/posts/kbhrational_number/2022-08-26T14:58:53-07:00https://www.jemoka.com/posts/kbhrational_preference/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhreal_number/2022-08-26T14:58:53-07:00https://www.jemoka.com/posts/kbhreceeding_horizon/2024-01-09T12:16:02-08:00https://www.jemoka.com/posts/kbhrecommender_system/2024-03-02T17:53:00-08:00https://www.jemoka.com/posts/kbhreduce/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhreductive_paraphrase/2022-08-28T11:55:03-07:00https://www.jemoka.com/posts/kbhresearch_at_nueva_notes_06_09_2022/2022-06-12T22:29:49-07:00https://www.jemoka.com/posts/kbhregex/2024-02-28T10:28:39-08:00https://www.jemoka.com/posts/kbhregulating_zinc_uptake/2023-03-26T23:05:09-07:00https://www.jemoka.com/posts/kbhreinforcement_learning/2023-11-02T17:00:07-07:00https://www.jemoka.com/posts/kbhrejection_sampling/2023-11-14T10:59:27-08:00https://www.jemoka.com/posts/kbhrelative_probability/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhrelaxation_algorithums/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhreplication/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbhreplier_abstract/2022-06-22T23:25:11-07:00https://www.jemoka.com/posts/kbhrepresentation_learning/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhrepresenting_large_computation/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhrequirements_analysis/2023-07-15T00:48:46+08:00https://www.jemoka.com/posts/kbhresearch/2023-09-20T18:14:07-07:00https://www.jemoka.com/posts/kbhresearch_tips/2024-02-22T10:15:57-08:00https://www.jemoka.com/posts/kbhrdd/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhresistors/2023-03-21T22:17:34-07:00https://www.jemoka.com/posts/kbhreticle/2022-05-27T14:08:35-07:00https://www.jemoka.com/posts/kbhrfdiffusion/2023-03-27T10:07:03-07:00https://www.jemoka.com/posts/kbhrho_pomdps/2024-02-26T10:36:33-08:00https://www.jemoka.com/posts/kbhrichard_nixon/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhrichard_nixon_s_foreign_policy/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhrichard_nixon_s_treatment_against_the_vietnam_war/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhrick_wallace/2022-05-27T14:08:35-07:00https://www.jemoka.com/posts/kbhring/2023-03-09T09:56:26-08:00https://www.jemoka.com/posts/kbhexpected_utility_of_wealth/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhrobotics_assisted_directed_evolution/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhrollout_with_lookahead/2023-11-29T11:20:39-08:00https://www.jemoka.com/posts/kbhronald_raegan/2022-06-07T13:38:12-07:00https://www.jemoka.com/posts/kbhrosa_parks/2022-05-09T13:20:16-07:00https://www.jemoka.com/posts/kbhroseta/2023-03-27T10:07:03-07:00https://www.jemoka.com/posts/kbhrosetta/2023-03-27T10:07:03-07:00https://www.jemoka.com/posts/kbhrosettafold2/2023-03-27T10:07:03-07:00https://www.jemoka.com/posts/kbhrossing_1990/2022-09-20T23:31:24-07:00https://www.jemoka.com/posts/kbhrotational_energy/2022-04-20T22:23:58-07:00https://www.jemoka.com/posts/kbhrural_electrification_administration/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhrussel_howard/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhsadeghian_2021/2022-09-12T12:54:42-07:00https://www.jemoka.com/posts/kbhsaic_speech_anonomyzation/2024-02-27T14:18:25-08:00https://www.jemoka.com/posts/kbhsalus_april_checkin/2022-05-02T10:28:52-07:00https://www.jemoka.com/posts/kbhsample_space/2023-10-03T10:22:01-07:00https://www.jemoka.com/posts/kbhsars_cov2/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhsars_cov2_structural_analysis/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhsarsa_lambda/2024-02-26T10:36:33-08:00https://www.jemoka.com/posts/kbhsarsop/2024-01-27T23:59:53-08:00https://www.jemoka.com/posts/kbhscalander_notes-1/2022-11-02T10:16:31-07:00https://www.jemoka.com/posts/kbhscalander_notes-2/2022-11-02T10:16:31-07:00https://www.jemoka.com/posts/kbhscalander_notes/2022-11-02T10:16:31-07:00https://www.jemoka.com/posts/kbhscalar_multiplication/2022-09-13T22:27:22-07:00https://www.jemoka.com/posts/kbhscheduling/2024-02-25T10:45:33-08:00https://www.jemoka.com/search/2023-04-02T23:20:11-07:000.1https://www.jemoka.com/posts/kbhsecond_moment_of_area/2022-10-02T13:54:53-07:00https://www.jemoka.com/posts/kbhsecond_order_differential_equations/2022-10-09T12:23:02-07:00https://www.jemoka.com/posts/kbhsecond_order_linear_differential_equation/2024-01-27T14:33:57-08:00https://www.jemoka.com/posts/kbhselective_service_system/2022-05-04T18:21:19-07:00https://www.jemoka.com/posts/kbhsemantic_accountability/2023-02-25T23:04:37-08:00https://www.jemoka.com/posts/kbhsemantic_health_risk_prediction/2024-02-27T13:50:55-08:00https://www.jemoka.com/posts/kbhsemantic_primes/2022-08-28T11:55:03-07:00https://www.jemoka.com/posts/kbhsemantic_verbal_fluency/2022-06-23T23:41:34-07:00https://www.jemoka.com/posts/kbhsemiconductor/2022-05-29T19:48:27-07:00https://www.jemoka.com/posts/kbhsense/2024-02-12T11:25:19-08:00https://www.jemoka.com/posts/kbhsentence_segmentation/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhseperable_diffequ/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhserver_clients/2023-02-26T20:31:50-08:00https://www.jemoka.com/posts/kbhset/2022-08-27T10:50:07-07:00https://www.jemoka.com/posts/kbhsets/2022-08-27T10:50:07-07:00https://www.jemoka.com/posts/kbhshah_2021/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhshort_selling/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhsigmoid/2023-11-27T17:00:44-08:00https://www.jemoka.com/posts/kbhsimple_differential_equations/2022-09-06T13:01:21-07:00https://www.jemoka.com/posts/kbhsimple_game/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhsingle_party_control/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhsingular_value_decomposition/2023-05-14T00:00:30-07:00https://www.jemoka.com/posts/kbhsir_model/2024-02-19T14:29:19-08:00https://www.jemoka.com/posts/kbhslopes/2022-04-20T21:40:40-07:00https://www.jemoka.com/posts/kbhsmith/2024-01-17T00:44:47-08:00https://www.jemoka.com/posts/kbhsmooth_function/2022-10-24T23:46:49-07:00https://www.jemoka.com/posts/kbhsocial_network/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhsocial_security_administration/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhsoftware_design_and_architecture_patterns/2023-07-09T21:20:36+08:00https://www.jemoka.com/posts/kbhsoftware_dev_starter_pack/2023-02-01T18:22:50-08:00https://www.jemoka.com/posts/kbhsoftware_development_methodologies/2023-07-09T21:20:36+08:00https://www.jemoka.com/posts/kbhsoftware_engineering/2023-07-09T21:20:36+08:00https://www.jemoka.com/posts/kbhsolving_pdes_via_fourier_transform/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhsolving_systems/2022-09-09T13:01:35-07:00https://www.jemoka.com/posts/kbhsongs_that_need_lyrics/2022-08-11T16:44:41-07:00https://www.jemoka.com/posts/kbhsorting_functions/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhsound/2023-01-03T11:19:33-08:00https://www.jemoka.com/posts/kbhsoviet_perspective_on_cold_war/2022-05-05T07:50:29-07:00https://www.jemoka.com/posts/kbhspaan_2005/2024-01-17T00:44:47-08:00https://www.jemoka.com/posts/kbhspan/2022-10-11T14:20:10-07:00https://www.jemoka.com/posts/kbhspanish/2023-01-12T09:00:43-08:00https://www.jemoka.com/posts/kbhspark/2023-07-31T14:47:03-04:00https://www.jemoka.com/posts/kbhsparse_sampling/2023-11-30T00:00:07-08:00https://www.jemoka.com/posts/kbhspeech_feature_extraction/2023-10-20T16:51:21-07:00https://www.jemoka.com/posts/kbhspeech_processing_index/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhspinal_tap/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhstability/2024-01-17T11:22:00-08:00https://www.jemoka.com/posts/kbhstack/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhstack_trace/2022-04-21T10:53:09-07:00https://www.jemoka.com/posts/kbhstandard_error/2022-04-20T23:19:11-07:00https://www.jemoka.com/posts/kbhstanford/2023-03-17T19:18:57-07:00https://www.jemoka.com/posts/kbhstanford_factoids_index/2023-09-20T18:14:07-07:00https://www.jemoka.com/posts/kbhstanford_courses_index/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhstanford_ug_research_program/2023-09-20T18:14:07-07:00https://www.jemoka.com/posts/kbhstarting_with_why_the_knowledgebase/2022-04-17T16:54:46-07:00https://www.jemoka.com/posts/kbhstartup/2023-09-12T21:52:20-07:00https://www.jemoka.com/posts/kbhstationary_action_principle/2022-10-24T23:46:49-07:00https://www.jemoka.com/posts/kbhstastistic/2022-04-20T21:40:40-07:00https://www.jemoka.com/posts/kbhstepwise_evolution/2023-03-11T21:22:31-08:00https://www.jemoka.com/posts/kbhstochastic_discount_factor/2022-11-18T13:13:30-08:00https://www.jemoka.com/posts/kbhstochastic_gradient_descent/2024-02-28T10:28:39-08:00https://www.jemoka.com/posts/kbhstochat/2024-01-27T14:33:57-08:00https://www.jemoka.com/posts/kbhstock_indicies/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhstock_issues_debate/2022-09-24T16:25:49-07:00https://www.jemoka.com/posts/kbhstock_market_survey/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhstrain/2022-09-05T22:37:28-07:00https://www.jemoka.com/posts/kbhstrategies_to_revise_an_essay/2024-01-26T11:24:29-08:00https://www.jemoka.com/posts/kbhstress/2022-09-05T22:37:28-07:00https://www.jemoka.com/posts/kbhstring/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhstrips_style_planning/2024-02-13T10:20:04-08:00https://www.jemoka.com/posts/kbhstrong_free_will/2022-05-27T14:08:35-07:00https://www.jemoka.com/posts/kbhstrong_induction/2023-03-16T09:57:46-07:00https://www.jemoka.com/posts/kbhstructure_learning/2023-10-26T10:40:35-07:00https://www.jemoka.com/posts/kbhstructure_of_covid_replication/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhsu_cs107_dec012023/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhsu_cs107_midterm_sheet/2023-10-29T17:56:04-07:00https://www.jemoka.com/posts/kbhsu_cs107_nov102023/2023-11-10T11:46:12-08:00https://www.jemoka.com/posts/kbhsu_cs107_nov132023/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhsu_cs107_nov272023/2023-11-27T14:51:05-08:00https://www.jemoka.com/posts/kbhsu_cs107_oct022023/2023-10-02T11:21:26-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct032023/2023-10-03T17:26:55-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct042023/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct062023/2023-10-09T16:49:53-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct092023/2023-10-09T16:49:53-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct112023/2023-10-11T11:20:31-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct132023/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct162023/2023-10-16T16:34:18-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct182023/2023-10-23T10:30:41-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct2023/2023-10-20T12:43:40-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct232023/2023-10-23T13:17:19-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct252023/2023-10-26T10:40:35-07:00https://www.jemoka.com/posts/kbhsu_cs107_oct272023/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhsu_cs107_sep272023/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhsu_cs107_sep292023/2023-09-29T11:23:35-07:00https://www.jemoka.com/posts/kbhsu_cs109_dec012023/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhsu_cs109_dec042023/2023-12-05T10:05:22-08:00https://www.jemoka.com/posts/kbhsu_cs109_midterm/2023-10-23T16:49:33-07:00https://www.jemoka.com/posts/kbhsu_cs109_midterm_sheet/2023-10-24T14:11:17-07:00https://www.jemoka.com/posts/kbhsu_cs109_nov012023/2023-11-03T16:40:48-07:00https://www.jemoka.com/posts/kbhsu_cs109_nov032023/2023-11-10T11:46:12-08:00https://www.jemoka.com/posts/kbhsu_cs109_nov062023/2023-11-06T16:42:28-08:00https://www.jemoka.com/posts/kbhsu_cs109_nov082023/2023-11-08T16:46:31-08:00https://www.jemoka.com/posts/kbhsu_cs109_nov102023/2023-11-10T16:48:11-08:00https://www.jemoka.com/posts/kbhsu_cs109_nov132023/2023-11-15T11:23:18-08:00https://www.jemoka.com/posts/kbhsu_cs109_nov152023/2023-11-16T10:19:53-08:00https://www.jemoka.com/posts/kbhsu_cs109_nov172023/2023-11-27T14:51:05-08:00https://www.jemoka.com/posts/kbhsu_cs109_nov272023/2023-11-27T17:00:44-08:00https://www.jemoka.com/posts/kbhsu_cs109_nov292023/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhsu_cs109_oct022023/2023-10-03T17:26:55-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct042023/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct062023/2023-10-09T16:49:53-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct092023/2023-10-09T16:49:53-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct112023/2023-10-12T14:52:11-07:00https://www.jemoka.com/posts/kbhsu_109_oct132023/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct162023/2023-10-17T00:13:23-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct182023/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct202023/2023-10-23T16:49:33-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct232023/2023-10-23T16:49:49-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct252023/2023-10-26T10:40:35-07:00https://www.jemoka.com/posts/kbhsu_cs109_oct272023/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhsu_cs109_sep272023/2023-10-01T22:08:54-07:00https://www.jemoka.com/posts/kbhsu_cs109_sep292023/2023-10-01T22:08:54-07:00https://www.jemoka.com/posts/kbhsu_cs111_final_sheet/2024-03-18T22:44:04-07:00https://www.jemoka.com/posts/kbhsu_cs111_outline/2024-03-13T14:06:28-07:00https://www.jemoka.com/posts/kbhsu_cs238_nov022023/2023-11-02T10:20:22-07:00https://www.jemoka.com/posts/kbhsu_cs238_nov092023/2023-11-09T12:00:18-08:00https://www.jemoka.com/posts/kbhsu_cs238_nov142023/2023-11-14T13:34:03-08:00https://www.jemoka.com/posts/kbhsu_cs238_nov162023/2023-11-27T14:51:05-08:00https://www.jemoka.com/posts/kbhsu_cs238_nov282023/2023-11-29T11:20:39-08:00https://www.jemoka.com/posts/kbhsu_cs238_nov302023/2023-11-30T10:20:10-08:00https://www.jemoka.com/posts/kbhsu_cs238_oct032023/2023-10-03T17:26:55-07:00https://www.jemoka.com/posts/kbhsu_cs238_oct052023/2023-10-08T23:37:58-07:00https://www.jemoka.com/posts/kbhsu_cs238_oct102023/2023-10-10T10:20:50-07:00https://www.jemoka.com/posts/kbhsu_cs238_oct122023/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhsu_cs238_oct172023/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhsu_cs238_oct192023/2023-10-23T10:30:41-07:00https://www.jemoka.com/posts/kbhsu_cs238_oct242023/2023-10-24T14:11:17-07:00https://www.jemoka.com/posts/kbhsu_cs238_oct262023/2023-10-27T18:02:19-07:00https://www.jemoka.com/posts/kbhsu_cs238_oct212023/2023-11-01T13:24:02-07:00https://www.jemoka.com/posts/kbhsu_cs238_q0q3/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhsu_cs238_sep262023/2023-11-16T10:19:53-08:00https://www.jemoka.com/posts/kbhsu_cs238_sep272023/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhsu_cs238_sep282023/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhsu_cs239_jan092023/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhsu_cs239_midterm_1/2024-02-01T14:24:47-08:00https://www.jemoka.com/posts/kbhsu_math109_problem_set_1/2023-09-29T11:23:35-07:00https://www.jemoka.com/posts/kbhsu_math109_sep272023/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhsu_math109_sep272023_exp/2023-09-27T23:30:36-07:00https://www.jemoka.com/posts/kbhsu_math109_sep292023/2023-09-27T23:21:11-07:00https://www.jemoka.com/posts/kbhsu_math53_feb022024/2024-02-02T11:32:31-08:00https://www.jemoka.com/posts/kbhsu_math53_feb052024/2024-02-05T11:23:43-08:00https://www.jemoka.com/posts/kbhsu_math53_feb072024/2024-02-09T11:20:17-08:00https://www.jemoka.com/posts/kbhsu_math53_feb092024/2024-02-09T11:20:17-08:00https://www.jemoka.com/posts/kbhsu_math53_feb122024/2024-02-14T11:23:36-08:00https://www.jemoka.com/posts/kbhsu_math53_feb142024/2024-02-16T11:20:46-08:00https://www.jemoka.com/posts/kbhsu_math53_feb162024/2024-02-16T11:20:46-08:00https://www.jemoka.com/posts/kbhsu_math53_feb212024/2024-03-04T14:20:51-08:00https://www.jemoka.com/posts/kbhsu_math53_feb232024/2024-02-25T10:45:33-08:00https://www.jemoka.com/posts/kbhsu_math53_feb252024/2024-02-28T11:36:19-08:00https://www.jemoka.com/posts/kbhsu_math53_feb282024/2024-03-04T14:20:51-08:00https://www.jemoka.com/posts/kbhsu_math53_homework_index/2024-03-09T13:37:34-08:00https://www.jemoka.com/posts/kbhsu_math53_jan082023/2024-01-09T12:16:02-08:00https://www.jemoka.com/posts/kbhsu_math53_jan102023/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhsu_math53_jan122023/2024-01-12T11:21:05-08:00https://www.jemoka.com/posts/kbhsu_math53_jan172024/2024-01-17T11:19:33-08:00https://www.jemoka.com/posts/kbhsu_math53_jan192023/2024-01-20T11:35:00-08:00https://www.jemoka.com/posts/kbhsu_math53_jan202024/2024-01-24T11:23:54-08:00https://www.jemoka.com/posts/kbhsu_math53_jan262023/2024-01-26T14:19:53-08:00https://www.jemoka.com/posts/kbhsu_math53_jan292024/2024-01-29T11:26:06-08:00https://www.jemoka.com/posts/kbhsu_math53_jan312024/2024-02-02T11:32:31-08:00https://www.jemoka.com/posts/kbhsu_math53_mar012024/2024-03-04T14:20:51-08:00https://www.jemoka.com/posts/kbhsu_math53_mar042024/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhsu_math53_mar062024/2024-03-06T11:19:33-08:00https://www.jemoka.com/posts/kbhsu_math53_mar082024/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhsu_math53_mar112024/2024-03-13T11:25:11-07:00https://www.jemoka.com/posts/kbhsu_math53_midterm_sheet/2024-01-09T12:16:02-08:00https://www.jemoka.com/posts/kbhsu_math53_practice_1_problem_4/2024-03-18T22:44:04-07:00https://www.jemoka.com/posts/kbhsu_math53_problem_session/2024-03-13T11:25:11-07:00https://www.jemoka.com/posts/kbhsubgroup/2022-12-13T14:32:50-08:00https://www.jemoka.com/posts/kbhsubspace/2022-10-01T23:44:57-07:00https://www.jemoka.com/posts/kbhsubtrait_envelope/2023-03-26T10:54:47-07:00https://www.jemoka.com/posts/kbhsum_of_subsets/2022-10-24T21:49:16-07:00https://www.jemoka.com/posts/kbhsum_of_two_dice/2023-11-03T16:40:48-07:00https://www.jemoka.com/posts/kbhsum_of_vector_and_subspace/2023-01-21T00:33:30-08:00https://www.jemoka.com/posts/kbhspersite/2023-03-25T16:40:20-07:00https://www.jemoka.com/posts/kbhsupervised_learning/2023-09-28T10:20:46-07:00https://www.jemoka.com/posts/kbhsupport/2023-10-16T16:34:18-07:00https://www.jemoka.com/posts/kbhsurjectivity/2022-11-26T00:54:35-08:00https://www.jemoka.com/posts/kbhsyscalls/2024-02-03T23:42:26-08:00https://www.jemoka.com/posts/kbht_twiddle/2023-01-21T00:35:25-08:00https://www.jemoka.com/posts/kbht_statistics/2022-04-17T22:11:05-07:00https://www.jemoka.com/posts/kbht_test/2022-04-21T10:53:09-07:00https://www.jemoka.com/posts/kbhraising_operators_to_powers/2023-03-03T21:04:56-08:00https://www.jemoka.com/tags/2024-02-15T12:02:30-08:00https://www.jemoka.com/posts/kbhtalk_contacts/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhtalkbank/2023-06-13T10:29:05-07:00https://www.jemoka.com/posts/kbhtalkbank_pipeline_project/2022-07-30T15:46:23-07:00https://www.jemoka.com/posts/kbhtariffs/2022-04-16T23:31:57-07:00https://www.jemoka.com/posts/kbhtask_estimation/2022-09-13T13:51:27-07:00https://www.jemoka.com/posts/kbhtaxicab_norm/2023-04-25T22:54:49-07:00https://www.jemoka.com/posts/kbhtaylor_se/2024-02-12T11:25:19-08:00https://www.jemoka.com/posts/kbhtechnology_baboon_jemoka_com/2023-06-06T21:50:18-07:00https://www.jemoka.com/posts/kbhtechnology_balloon_jemoka_com/2023-02-02T21:27:48-08:00https://www.jemoka.com/posts/kbhtechnology_bassoon_jemoka_com/2023-01-29T11:18:03-08:00https://www.jemoka.com/posts/kbhtechnology_bilon_jemoka_com/2023-11-29T13:34:35-08:00https://www.jemoka.com/posts/kbhtechnology_bison_jemoka_com/2023-06-17T13:35:21-07:00https://www.jemoka.com/posts/kbhtechnology_bonbon_jemoka_com/2023-02-12T19:05:57-08:00https://www.jemoka.com/posts/kbhtechnology_boon_jemoka_com/2023-08-15T00:10:08-07:00https://www.jemoka.com/posts/kbhteddy_roosevelt/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhteelscoping_series/2023-03-20T09:39:51-07:00https://www.jemoka.com/posts/kbhtemperal_abstraction/2024-02-13T10:20:04-08:00https://www.jemoka.com/posts/kbhterm_document_matrix/2024-02-01T14:24:47-08:00https://www.jemoka.com/posts/kbhtest_for_normality/2022-04-17T20:34:22-07:00https://www.jemoka.com/posts/kbhtesting/2023-07-09T21:20:36+08:00https://www.jemoka.com/posts/kbhtext_classification/2024-01-22T10:38:52-08:00https://www.jemoka.com/posts/kbhtext_normalization/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhthe_unreasonable_effectiveness_of_mathematics_in_the_natural_sciences/2022-08-26T22:22:05-07:00https://www.jemoka.com/posts/kbhtherma/2022-04-17T15:13:02-07:00https://www.jemoka.com/posts/kbhthermoregulation/2022-11-02T10:16:31-07:00https://www.jemoka.com/posts/kbhtheta_alpha_ratio/2022-06-25T11:50:47-07:00https://www.jemoka.com/posts/kbhthoughts_on_axler_4/2023-04-08T23:40:21-07:00https://www.jemoka.com/posts/kbhtiago_forte/2022-11-18T13:13:30-08:00https://www.jemoka.com/posts/kbhtokenization/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhtopological_sort/2023-10-03T17:26:55-07:00https://www.jemoka.com/posts/kbhtraining_data_sourcing/2023-04-29T15:36:35-07:00https://www.jemoka.com/posts/kbhtraining_helpful_chatbots/2023-10-05T11:57:25-07:00https://www.jemoka.com/posts/kbhtransformational_generative_syntax/2022-08-27T23:45:32-07:00https://www.jemoka.com/posts/kbhspeech_diarization/2023-04-16T10:57:27-07:00https://www.jemoka.com/posts/kbhtransformers/2023-11-08T16:46:31-08:00https://www.jemoka.com/posts/kbhtranslation_studies_index/2023-01-09T21:50:29-08:00https://www.jemoka.com/posts/kbhtranslation_theory/2023-07-15T00:48:46+08:00https://www.jemoka.com/posts/kbhtransverse_loaod/2022-09-05T22:24:09-07:00https://www.jemoka.com/posts/kbhtrustpomdp/2024-02-20T10:19:26-08:00https://www.jemoka.com/posts/kbhtuning_forks/2023-01-30T22:45:16-08:00https://www.jemoka.com/posts/kbhtwo_dimensional_heat_equation/2024-03-04T14:20:51-08:00https://www.jemoka.com/posts/kbhtwo_s_complement/2023-10-03T17:26:55-07:00https://www.jemoka.com/posts/kbhtypes_of_harm/2023-12-01T16:47:47-08:00https://www.jemoka.com/posts/kbhu1_c/2022-10-24T22:04:57-07:00https://www.jemoka.com/posts/kbhunbiased_parameter_learning/2023-11-14T10:59:27-08:00https://www.jemoka.com/posts/kbhuncertainty/2023-09-26T15:31:27-07:00https://www.jemoka.com/posts/kbhunconc/2023-10-19T01:16:45-07:00https://www.jemoka.com/posts/kbhundirected_exploration/2023-11-10T11:46:12-08:00https://www.jemoka.com/posts/kbhunimodal/2023-09-28T16:07:11-07:00https://www.jemoka.com/posts/kbhunique_lock/2024-02-14T14:21:39-08:00https://www.jemoka.com/posts/kbhuniqueness_and_existance/2023-10-12T14:52:11-07:00https://www.jemoka.com/posts/kbhuniversal_quantum_constructor/2022-05-10T09:28:34-07:00https://www.jemoka.com/posts/kbhuniversity_of_georgia/2022-05-09T13:20:16-07:00https://www.jemoka.com/posts/kbhunix/2023-09-29T11:23:35-07:00https://www.jemoka.com/posts/kbhunix_v6_filesystem/2024-01-20T11:35:00-08:00https://www.jemoka.com/posts/kbhupper_triangular_matrix/2023-03-15T10:17:51-07:00https://www.jemoka.com/posts/kbhus_wwii_propaganda/2022-04-17T17:42:52-07:00https://www.jemoka.com/posts/kbhusaypt/2022-10-29T00:17:59-07:00https://www.jemoka.com/posts/kbhuser_experience/2022-12-04T21:39:41-08:00https://www.jemoka.com/posts/kbhux_design/2022-12-01T14:28:25-08:00https://www.jemoka.com/posts/kbhuser_interviews/2022-09-01T22:41:54-07:00https://www.jemoka.com/posts/kbhutility_elicitation/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhutility_function/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhutility_fusion/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhutility_theory/2023-10-29T17:56:04-07:00https://www.jemoka.com/posts/kbhvalue_iteration/2024-01-27T21:34:06-08:00https://www.jemoka.com/posts/kbhvalue_iteration_in_practice/2023-10-20T12:43:40-07:00https://www.jemoka.com/posts/kbhvalue_of_information/2023-10-29T17:56:04-07:00https://www.jemoka.com/posts/kbhvariance/2023-10-13T16:27:37-07:00https://www.jemoka.com/posts/kbhvc_thing/2022-05-10T20:54:46-07:00https://www.jemoka.com/posts/kbhvector/2022-09-13T22:27:22-07:00https://www.jemoka.com/posts/kbhvector_semantics/2024-02-12T11:25:19-08:00https://www.jemoka.com/posts/kbhvector_space/2022-09-14T14:29:05-07:00https://www.jemoka.com/posts/kbhcraintech/2022-04-20T21:40:40-07:00https://www.jemoka.com/posts/kbhvgg/2022-06-23T23:41:34-07:00https://www.jemoka.com/posts/kbhvggish/2022-06-24T00:35:24-07:00https://www.jemoka.com/posts/kbhvietnam/2022-05-04T18:21:19-07:00https://www.jemoka.com/posts/kbhvietnamization/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhvirtual_memory/2024-03-06T14:21:19-08:00https://www.jemoka.com/posts/kbhvoltage/2023-04-16T10:57:27-07:00https://www.jemoka.com/posts/kbhvwap/2023-01-07T18:19:03-08:00https://www.jemoka.com/posts/kbhwalker_2018/2023-03-15T20:36:33-07:00https://www.jemoka.com/posts/kbhwang_2019/2022-07-02T00:15:04-07:00https://www.jemoka.com/posts/kbhwang_2023/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhwatergate/2022-05-26T21:43:30-07:00https://www.jemoka.com/posts/kbhwave_equation/2024-03-09T13:37:34-08:00https://www.jemoka.com/posts/kbhweb_graph/2024-03-08T11:21:58-08:00https://www.jemoka.com/posts/kbhweighted_edit_distance/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhfireside_article/2023-10-17T00:16:44-07:00https://www.jemoka.com/posts/kbhwho_s_talking_when/2023-10-26T10:40:35-07:00https://www.jemoka.com/posts/kbhwhole_metalanguage_study/2022-08-27T17:10:35-07:00https://www.jemoka.com/posts/kbhtodo_lists/2023-10-31T12:46:14-07:00https://www.jemoka.com/posts/kbhwindows_fat/2024-01-11T11:49:36-08:00https://www.jemoka.com/posts/kbhword_normalization/2024-01-13T15:50:03-08:00https://www.jemoka.com/posts/kbhword2vec/2024-02-12T11:25:19-08:00https://www.jemoka.com/posts/kbhwpa/2022-04-17T15:13:02-07:00https://www.jemoka.com/tags/writing/2023-10-31T12:46:14-07:00https://www.jemoka.com/posts/kbhwriting_index/2022-04-17T17:42:52-07:00https://www.jemoka.com/posts/kbhycomb/2023-02-11T18:38:34-08:00https://www.jemoka.com/posts/kbhyoung_s_modulus/2022-10-08T16:24:13-07:00https://www.jemoka.com/posts/kbhyuan_2021/2022-06-24T23:18:03-07:00https://www.jemoka.com/posts/kbhz_test/2022-04-17T22:11:05-07:00https://www.jemoka.com/posts/kbhzero/2022-08-27T10:50:07-07:00https://www.jemoka.com/posts/kbhzero_sum_game/2023-11-03T16:40:48-07:00https://www.jemoka.com/posts/kbhzettlekasten/2022-11-18T13:13:30-08:00https://www.jemoka.com/posts/kbhzettlekasten_index/2022-06-22T21:51:22-07:00https://www.jemoka.com/posts/kbhzhu_2021/2022-06-25T11:00:09-07:00https://www.jemoka.com/posts/kbhzinc_abc_transporters/2023-03-26T23:05:09-07:00https://www.jemoka.com/posts/kbhgaussian_mixture_model/2023-09-28T16:07:11-07:00 \ No newline at end of file